diff --git a/drivers/staging/qca-wifi-host-cmn/README.txt b/drivers/staging/qca-wifi-host-cmn/README.txt new file mode 100644 index 0000000000000000000000000000000000000000..d4554fc99fa8a3e1ff5082839c63ef7a0971af5e --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/README.txt @@ -0,0 +1 @@ + This is CNSS WLAN Host Driver for products starting from iHelium diff --git a/drivers/staging/qca-wifi-host-cmn/VERSION.txt b/drivers/staging/qca-wifi-host-cmn/VERSION.txt new file mode 100644 index 0000000000000000000000000000000000000000..56884950dded0c1b8a9c3b4825519205aa88e8a6 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/VERSION.txt @@ -0,0 +1,2 @@ +Current Component wlan-cmn.driver.lnx.1.0 version 5.1.1.17I +Matches Component wlan-cld3.driver.lnx.1.1 version 5.1.0.22C diff --git a/drivers/staging/qca-wifi-host-cmn/cfg/inc/cfg_converged.h b/drivers/staging/qca-wifi-host-cmn/cfg/inc/cfg_converged.h new file mode 100644 index 0000000000000000000000000000000000000000..117828faf48b6b7ea366da4ba7b9b2cb57c204f8 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/cfg/inc/cfg_converged.h @@ -0,0 +1,30 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: This file contains centralized definitions of converged configuration. + */ + +#ifndef __CFG_CONVERGED_H +#define __CFG_CONVERGED_H + +#define CFG_CONVERGED_ALL \ + /* i.e. CFG_SCAN_ALL etc. */ + +#endif /* __CFG_CONVERGED_H */ + diff --git a/drivers/staging/qca-wifi-host-cmn/cfg/inc/cfg_define.h b/drivers/staging/qca-wifi-host-cmn/cfg/inc/cfg_define.h new file mode 100644 index 0000000000000000000000000000000000000000..8988399e75afacc13cd0844839af10dcda18934a --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/cfg/inc/cfg_define.h @@ -0,0 +1,87 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: APIs and macros for defining configuration. + */ + +#ifndef __CFG_DEFINE_H +#define __CFG_DEFINE_H + +enum cfg_fallback_behavior { + CFG_VALUE_OR_CLAMP, + CFG_VALUE_OR_DEFAULT, +}; + +#define rm_parens(...) __VA_ARGS__ +#define __CFG(id, mtype, args...) __CFG_##mtype(id, mtype, args) +#define _CFG(id, args) __CFG(id, args) +#define CFG(id) _CFG(__##id, rm_parens id) + +#define __CFG_INT(args...) __CFG_ANY(args) +#define __CFG_UINT(args...) __CFG_ANY(args) +#define __CFG_BOOL(args...) __CFG_ANY(args) +#define __CFG_STRING(args...) __CFG_ANY(args) +#define __CFG_MAC(args...) __CFG_ANY(args) +#define __CFG_IPV4(args...) __CFG_ANY(args) +#define __CFG_IPV6(args...) __CFG_ANY(args) + +#define __CFG_ANY(args...) (args) +#define __CFG_NONE(args...) + +/* configuration available in ini */ +#define CFG_INI_INT(name, min, max, def, fallback, desc) \ + (INT, int32_t, name, min, max, fallback, desc, def) +#define CFG_INI_UINT(name, min, max, def, fallback, desc) \ + (UINT, uint32_t, name, min, max, fallback, desc, def) +#define CFG_INI_BOOL(name, def, desc) \ + (BOOL, bool, name, -1, -1, -1, desc, def) +#define CFG_INI_STRING(name, min_len, max_len, def, desc) \ + (STRING, char *, name, min_len, max_len, -1, desc, def) +#define CFG_INI_MAC(name, def, desc) \ + (MAC, struct qdf_mac_addr, name, -1, -1, -1, desc, def) +#define CFG_INI_IPV4(name, def, desc) \ + (IPV4, struct qdf_ipv4_addr, name, -1, -1, -1, desc, def) +#define CFG_INI_IPV6(name, def, desc) \ + (IPV6, struct qdf_ipv6_addr, name, -1, -1, -1, desc, def) + +/* configuration *not* available in ini */ +#define CFG_INT(name, min, max, def, fallback, desc) \ + (NONE, int32_t, name, min, max, fallback, desc, def) +#define CFG_UINT(name, min, max, def, fallback, desc) \ + (NONE, uint32_t, name, min, max, fallback, desc, def) +#define CFG_BOOL(name, def, desc) \ + (NONE, bool, name, -1, -1, -1, desc, def) +#define CFG_STRING(name, min_len, max_len, def, desc) \ + (NONE, char *, name, min_len, max_len, -1, desc, def) +#define CFG_MAC(name, def, desc) \ + (NONE, struct qdf_mac_addr, name, -1, -1, -1, desc, def) +#define CFG_IPV4(name, def, desc) \ + (NONE, struct qdf_ipv4_addr, name, -1, -1, -1, desc, def) +#define CFG_IPV6(name, def, desc) \ + (NONE, struct qdf_ipv6_addr, name, -1, -1, -1, desc, def) + +/* utility macros/functions */ +#ifdef CONFIG_MCL +#define MCL_OR_WIN_VALUE(mcl_value, win_value) mcl_value +#else +#define MCL_OR_WIN_VALUE(mcl_value, win_value) win_value +#endif + +#endif /* __CFG_DEFINE_H */ + diff --git a/drivers/staging/qca-wifi-host-cmn/cfg/inc/cfg_dispatcher.h b/drivers/staging/qca-wifi-host-cmn/cfg/inc/cfg_dispatcher.h new file mode 100644 index 0000000000000000000000000000000000000000..c87672cc15398a566b632f58b6567ec9cea36c2b --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/cfg/inc/cfg_dispatcher.h @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: Dispatcher related handler APIs for the configuration component + */ +#ifndef __CFG_DISPATCHER_H_ +#define __CFG_DISPATCHER_H_ + +#include + +/** + * cfg_dispatcher_init() - Configuration component global init handler + * + * Return: QDF_STATUS + */ +QDF_STATUS cfg_dispatcher_init(void); + +/** + * cfg_dispatcher_deinit() - Configuration component global deinit handler + * + * Return: QDF_STATUS + */ +QDF_STATUS cfg_dispatcher_deinit(void); + +#endif /* __CFG_DISPATCHER_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/cfg/inc/cfg_ucfg_api.h b/drivers/staging/qca-wifi-host-cmn/cfg/inc/cfg_ucfg_api.h new file mode 100644 index 0000000000000000000000000000000000000000..11e4dc5784aec43f4e3a3cfab4b6f668423d96c5 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/cfg/inc/cfg_ucfg_api.h @@ -0,0 +1,239 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: UCFG APIs for the configuration component. + * + * Logically, configuration exists at the psoc level. This means, each psoc can + * have its own custom configuration, and calls to lookup configuration take a + * psoc parameter for reference. E.g. + * + * int32_t value = cfg_get(psoc, WLAN_SOME_INTEGER_CONFIG_ID); + * + * Configuration is cascading, and lookups happen in this order: + * + * 1) use psoc value, if configured + * 2) use global value, if configured + * 3) fallback to the default value for the configuration item + * + * This means a psoc configuration is a specialization of the global + * configuration, and does not need to explicitly set the same values if they + * would match the global config. + * + * In order to load and parse the global config, call cfg_parse(). In order to + * load and parse psoc configs, call cfg_psoc_parse(). cfg_parse() MUST be + * called before cfg_psoc_parse(), as global configuration will be consulted + * during the psoc parsing process. + * + * There are two basic lifecycles supported: + * + * 1) The type and number of psocs is *not* known at load time + * + * // driver is loading + * cfg_parse("/path/to/config"); + * + * ... + * + * // a psoc has just been created + * cfg_psoc_parse(psoc, "/path/to/psoc/config"); + * + * ... + * + * // driver is unloading + * cfg_release(); + * + * 2) The type and number of psocs *is* known at load time + * + * // driver is loading + * cfg_parse("/path/to/config"); + * + * ... + * + * // for each psoc + * cfg_psoc_parse(psoc, "/path/to/psoc/config"); + * + * // no further psocs will be created after this point + * cfg_release(); + * + * ... + * + * // driver is unloaded later + * + * Each configuration store is reference counted to reduce memory footprint, and + * the configuration component itself will hold one ref count on the global + * config store. All psocs for which psoc-specific configurations have *not* + * been provided will reference the global config store. Psocs for which psoc- + * specific configurations *have* been provded will check for existings stores + * with a matching path to use, before parsing the specified configuration file. + * + * If, at some point in time, it is known that no further psocs will ever be + * created, a call to cfg_release() will release the global ref count held by + * the configuration component. For systems which specify psoc-specific configs + * for all psocs, this will release the unnecessary memory used by the global + * config store. Otherwise, calling cfg_release() at unload time will ensure + * the global config store is properly freed. + */ + +#ifndef __CFG_UCFG_H +#define __CFG_UCFG_H + +#include "cfg_all.h" +#include "cfg_define.h" +#include "i_cfg.h" +#include "qdf_status.h" +#include "qdf_str.h" +#include "qdf_types.h" +#include "wlan_objmgr_psoc_obj.h" + +/** + * cfg_parse() - parse an ini file, and populate the global config storei + * @path: The full file path of the ini file to parse + * + * Note: A matching cfg_release() call is required to release allocated + * resources. + * + * The *.ini file format is a simple format consiting of a list of key/value + * pairs, separated by an '=' character. e.g. + * + * gConfigItem1=some string value + * gConfigItem2=0xabc + * + * Comments are also supported, initiated with the '#' character: + * + * # This is a comment. It will be ignored by the *.ini parser + * gConfigItem3=aa:bb:cc:dd:ee:ff # this is also a comment + * + * Several datatypes are natively supported: + * + * gInt=-123 # bin (0b), octal (0o), hex (0x), and decimal supported + * gUint=123 # a non-negative integer value + * gBool=y # (1, Y, y) -> true; (0, N, n) -> false + * gString=any string # strings are useful for representing complex types + * gMacAddr=aa:bb:cc:dd:ee:ff # colons are optional, upper and lower case + * gIpv4Addr=127.0.0.1 # uses typical dot-decimal notation + * gIpv6Addr=::1 # typical notation, supporting zero-compression + * + * Return: QDF_STATUS + */ +QDF_STATUS cfg_parse(const char *path); + +/** + * cfg_release() - release the global configuration store + * + * This API releases the configuration component's reference to the global + * config store. + * + * See also: this file's DOC section. + * + * Return: None + */ +void cfg_release(void); + +/** + * cfg_psoc_parse() - specialize the config store for @psoc by parsing @path + * @psoc: The psoc whose config store should be specialized + * @path: The full file path of the ini file to parse + * + * See also: cfg_parse(), and this file's DOC section. + * + * Return: QDF_STATUS + */ +QDF_STATUS cfg_psoc_parse(struct wlan_objmgr_psoc *psoc, const char *path); + +/** + * cfg_get() - lookup the configured value for @id from @psoc + * @psoc: The psoc from which to lookup the configured value + * @id: The id of the configured value to lookup + * + * E.g. + * + * int32_t value = cfg_get(psoc, WLAN_SOME_INTEGER_CONFIG_ID); + * + * Return: The configured value + */ +#define cfg_get(psoc, id) __cfg_get(psoc, __##id) + +/* Configuration Access APIs */ +#define __do_call(op, args...) op(args) +#define do_call(op, args) __do_call(op, rm_parens args) + +#define cfg_id(id) #id + +#define __cfg_mtype(mtype, ctype, name, min, max, fallback, desc, def...) mtype +#define cfg_mtype(id) do_call(__cfg_mtype, id) + +#define __cfg_type(mtype, ctype, name, min, max, fallback, desc, def...) ctype +#define cfg_type(id) do_call(__cfg_type, id) + +#define __cfg_name(mtype, ctype, name, min, max, fallback, desc, def...) name +#define cfg_name(id) do_call(__cfg_name, id) + +#define __cfg_min(mtype, ctype, name, min, max, fallback, desc, def...) min +#define cfg_min(id) do_call(__cfg_min, id) + +#define __cfg_max(mtype, ctype, name, min, max, fallback, desc, def...) max +#define cfg_max(id) do_call(__cfg_max, id) + +#define __cfg_fb(mtype, ctype, name, min, max, fallback, desc, def...) fallback +#define cfg_fallback(id) do_call(__cfg_fb, id) + +#define __cfg_desc(mtype, ctype, name, min, max, fallback, desc, def...) desc +#define cfg_description(id) do_call(__cfg_desc, id) + +#define __cfg_def(mtype, ctype, name, min, max, fallback, desc, def...) def +#define cfg_default(id) do_call(__cfg_def, id) + +#define __cfg_str(id...) #id +#define cfg_str(id) #id __cfg_str(id) + +/* validate APIs */ +static inline bool +cfg_string_in_range(const char *value, qdf_size_t min_len, qdf_size_t max_len) +{ + qdf_size_t len = qdf_str_len(value); + + return len >= min_len && len <= max_len; +} + +#define __cfg_int_in_range(value, min, max) (value >= min && value <= max) +#define __cfg_uint_in_range(value, min, max) (value >= min && value <= max) +#define __cfg_string_in_range(value, min_len, max_len) \ + cfg_string_in_range(value, min_len, max_len) + +#define __cfg_in_range(id, value, mtype) \ + __cfg_ ## mtype ## _in_range(value, cfg_min(id), cfg_max(id)) + +/* this may look redundant, but forces @mtype to be expanded */ +#define __cfg_in_range_type(id, value, mtype) \ + __cfg_is_range(id, value, mtype) + +#define cfg_in_range(id, value) __cfg_in_range_type(id, value, cfg_mtype(id)) + +/* Value-or-Default APIs */ +#define __cfg_value_or_default(id, value, def) \ + (cfg_in_range(id, value) ? value : def) + +#define cfg_value_or_default(id, value) \ + __cfg_value_or_default(id, value, cfg_default(id)) + +/* Value-or-Clamped APIs */ +#define __cfg_clamp(val, min, max) (val < min ? min : (val > max ? max : val)) +#define cfg_clamp(id, value) __cfg_clamp(value, cfg_min(id), cfg_max(id)) + +#endif /* __CFG_UCFG_H */ + diff --git a/drivers/staging/qca-wifi-host-cmn/cfg/inc/i_cfg.h b/drivers/staging/qca-wifi-host-cmn/cfg/inc/i_cfg.h new file mode 100644 index 0000000000000000000000000000000000000000..42fa6e995aea88f32359ca79015f2fd004e13fb3 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/cfg/inc/i_cfg.h @@ -0,0 +1,63 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: Internal APIs for the configuration component. + */ + +#ifndef __I_CFG_H +#define __I_CFG_H + +#include "cfg_define.h" +#include "qdf_trace.h" +#include "qdf_types.h" +#include "wlan_objmgr_psoc_obj.h" + +#define __cfg_log(level, fmt, args...) \ + QDF_TRACE(QDF_MODULE_ID_CONFIG, level, FL(fmt), ##args) +#define cfg_err(fmt, args...) __cfg_log(QDF_TRACE_LEVEL_ERROR, fmt, ##args) +#define cfg_info(fmt, args...) __cfg_log(QDF_TRACE_LEVEL_INFO, fmt, ##args) +#define cfg_debug(fmt, args...) __cfg_log(QDF_TRACE_LEVEL_DEBUG, fmt, ##args) +#define cfg_enter() cfg_debug("enter") +#define cfg_exit() cfg_debug("exit") + +/* define global config values structure */ + +#undef __CFG_STRING +#define __CFG_STRING(id, mtype, ctype, name, min, max, fallback, desc, def...) \ + const char id##_internal[max + 1]; +#undef __CFG_ANY +#define __CFG_ANY(id, mtype, ctype, name, min, max, fallback, desc, def...) \ + const ctype id##_internal; + +struct cfg_values { + /* e.g. const int32_t __CFG_SCAN_DWELL_TIME_internal; */ + CFG_ALL +}; + +#undef __CFG_STRING +#define __CFG_STRING(args...) __CFG_ANY(args) +#undef __CFG_ANY +#define __CFG_ANY(args...) (args) + +struct cfg_values *cfg_psoc_get_values(struct wlan_objmgr_psoc *psoc); + +#define __cfg_get(psoc, id) (cfg_psoc_get_values(psoc)->id##_internal) + +#endif /* __I_CFG_H */ + diff --git a/drivers/staging/qca-wifi-host-cmn/cfg/inc/i_cfg_objmgr.h b/drivers/staging/qca-wifi-host-cmn/cfg/inc/i_cfg_objmgr.h new file mode 100644 index 0000000000000000000000000000000000000000..330321b5bf0395f42bdca0ca71d7809ab1cd268a --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/cfg/inc/i_cfg_objmgr.h @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: This file contains various object manager related wrappers and helpers + */ + +#ifndef __CFG_OBJMGR_H +#define __CFG_OBJMGR_H + +#include "wlan_cmn.h" +#include "wlan_objmgr_global_obj.h" +#include "wlan_objmgr_psoc_obj.h" + +/* Private Data */ + +#define cfg_psoc_get_priv(psoc) \ + wlan_objmgr_psoc_get_comp_private_obj((psoc), WLAN_UMAC_COMP_CONFIG) +#define cfg_psoc_set_priv(psoc, priv) \ + wlan_objmgr_psoc_component_obj_attach((psoc), WLAN_UMAC_COMP_CONFIG, \ + (priv), QDF_STATUS_SUCCESS) +#define cfg_psoc_unset_priv(psoc, priv) \ + wlan_objmgr_psoc_component_obj_detach((psoc), WLAN_UMAC_COMP_CONFIG, \ + (priv)) + +/* event registration */ + +#define cfg_psoc_register_create(callback) \ + wlan_objmgr_register_psoc_create_handler(WLAN_UMAC_COMP_CONFIG, \ + (callback), NULL) +#define cfg_psoc_register_destroy(callback) \ + wlan_objmgr_register_psoc_destroy_handler(WLAN_UMAC_COMP_CONFIG, \ + (callback), NULL) + +/* event de-registration */ + +#define cfg_psoc_unregister_create(callback) \ + wlan_objmgr_unregister_psoc_create_handler(WLAN_UMAC_COMP_CONFIG, \ + (callback), NULL) +#define cfg_psoc_unregister_destroy(callback) \ + wlan_objmgr_unregister_psoc_destroy_handler(WLAN_UMAC_COMP_CONFIG, \ + (callback), NULL) + +#endif /* __CFG_OBJMGR_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/cfg/src/cfg.c b/drivers/staging/qca-wifi-host-cmn/cfg/src/cfg.c new file mode 100644 index 0000000000000000000000000000000000000000..fbda51826df7a3ef0991afbc06e56c810aa1cd52 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/cfg/src/cfg.c @@ -0,0 +1,724 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "cfg_all.h" +#include "cfg_define.h" +#include "cfg_dispatcher.h" +#include "cfg_ucfg_api.h" +#include "i_cfg.h" +#include "i_cfg_objmgr.h" +#include "qdf_atomic.h" +#include "qdf_list.h" +#include "qdf_mem.h" +#include "qdf_module.h" +#include "qdf_parse.h" +#include "qdf_status.h" +#include "qdf_str.h" +#include "qdf_trace.h" +#include "qdf_types.h" +#include "wlan_objmgr_psoc_obj.h" + +/** + * struct cfg_value_store - backing store for an ini file + * @path: file path of the ini file + * @node: internal list node for keeping track of all the allocated stores + * @users: number of references on the store + * @values: a values struct containing the parsed values from the ini file + */ +struct cfg_value_store { + char *path; + qdf_list_node_t node; + qdf_atomic_t users; + struct cfg_values values; +}; + +/* define/populate dynamic metadata lookup table */ + +/** + * struct cfg_meta - configuration item metadata for dynamic lookup during parse + * @name: name of the config item used in the ini file (i.e. "gScanDwellTime") + * @item_handler: parsing callback based on the type of the config item + * @min: minimum value for use in bounds checking (min_len for strings) + * @max: maximum value for use in bounds checking (max_len for strings) + * @fallback: the fallback behavior to use when configured values are invalid + */ +struct cfg_meta { + const char *name; + const uint32_t field_offset; + void (*const item_handler)(struct cfg_value_store *store, + const struct cfg_meta *meta, + const char *value); + const int32_t min; + const int32_t max; + const enum cfg_fallback_behavior fallback; +}; + +/* ini item handler functions */ + +#define cfg_value_ptr(store, meta) \ + ((void *)&(store)->values + (meta)->field_offset) + +static void cfg_int_item_handler(struct cfg_value_store *store, + const struct cfg_meta *meta, + const char *str_value) +{ + QDF_STATUS status; + int32_t *store_value = cfg_value_ptr(store, meta); + int32_t value; + + status = qdf_int32_parse(str_value, &value); + if (QDF_IS_STATUS_ERROR(status)) { + cfg_err("%s=%s - Invalid format (status %d); Using default %d", + meta->name, str_value, status, *store_value); + return; + } + + QDF_BUG(meta->min <= meta->max); + if (meta->min > meta->max) { + cfg_err("Invalid config item meta for %s", meta->name); + return; + } + + if (value >= meta->min && value <= meta->max) { + *store_value = value; + return; + } + + switch (meta->fallback) { + default: + QDF_DEBUG_PANIC("Unknown fallback method %d for cfg item '%s'", + meta->fallback, meta->name); + /* fall through */ + case CFG_VALUE_OR_DEFAULT: + /* store already contains default */ + break; + case CFG_VALUE_OR_CLAMP: + *store_value = __cfg_clamp(value, meta->min, meta->max); + break; + } + + cfg_err("%s=%d - Out of range [%d, %d]; Using %d", + meta->name, value, meta->min, meta->max, *store_value); +} + +static void cfg_uint_item_handler(struct cfg_value_store *store, + const struct cfg_meta *meta, + const char *str_value) +{ + QDF_STATUS status; + uint32_t *store_value = cfg_value_ptr(store, meta); + uint32_t value; + uint32_t min; + uint32_t max; + + /** + * Since meta min and max are of type int32_t + * We need explicit type casting to avoid + * implicit wrap around for uint32_t type cfg data. + */ + min = (uint32_t)meta->min; + max = (uint32_t)meta->max; + + status = qdf_uint32_parse(str_value, &value); + if (QDF_IS_STATUS_ERROR(status)) { + cfg_err("%s=%s - Invalid format (status %d); Using default %u", + meta->name, str_value, status, *store_value); + return; + } + + QDF_BUG(min <= max); + if (min > max) { + cfg_err("Invalid config item meta for %s", meta->name); + return; + } + + if (value >= min && value <= max) { + *store_value = value; + return; + } + + switch (meta->fallback) { + default: + QDF_DEBUG_PANIC("Unknown fallback method %d for cfg item '%s'", + meta->fallback, meta->name); + /* fall through */ + case CFG_VALUE_OR_DEFAULT: + /* store already contains default */ + break; + case CFG_VALUE_OR_CLAMP: + *store_value = __cfg_clamp(value, min, max); + break; + } + + cfg_err("%s=%u - Out of range [%d, %d]; Using %u", + meta->name, value, min, max, *store_value); +} + +static void cfg_bool_item_handler(struct cfg_value_store *store, + const struct cfg_meta *meta, + const char *str_value) +{ + QDF_STATUS status; + bool *store_value = cfg_value_ptr(store, meta); + + status = qdf_bool_parse(str_value, store_value); + if (QDF_IS_STATUS_SUCCESS(status)) + return; + + cfg_err("%s=%s - Invalid format (status %d); Using default '%s'", + meta->name, str_value, status, *store_value ? "true" : "false"); +} + +static void cfg_string_item_handler(struct cfg_value_store *store, + const struct cfg_meta *meta, + const char *str_value) +{ + char *store_value = cfg_value_ptr(store, meta); + qdf_size_t len; + + QDF_BUG(meta->min >= 0); + QDF_BUG(meta->min <= meta->max); + if (meta->min < 0 || meta->min > meta->max) { + cfg_err("Invalid config item meta for %s", meta->name); + return; + } + + /* ensure min length */ + len = qdf_str_nlen(str_value, meta->min); + if (len < meta->min) { + cfg_err("%s=%s - Too short; Using default '%s'", + meta->name, str_value, store_value); + return; + } + + /* check max length */ + len += qdf_str_nlen(str_value + meta->min, meta->max - meta->min + 1); + if (len > meta->max) { + cfg_err("%s=%s - Too long; Using default '%s'", + meta->name, str_value, store_value); + return; + } + + qdf_str_lcopy(store_value, str_value, meta->max + 1); +} + +static void cfg_mac_item_handler(struct cfg_value_store *store, + const struct cfg_meta *meta, + const char *str_value) +{ + QDF_STATUS status; + struct qdf_mac_addr *store_value = cfg_value_ptr(store, meta); + + status = qdf_mac_parse(str_value, store_value); + if (QDF_IS_STATUS_SUCCESS(status)) + return; + + cfg_err("%s=%s - Invalid format (status %d); Using default " + QDF_MAC_ADDR_STR, meta->name, str_value, status, + QDF_MAC_ADDR_ARRAY(store_value->bytes)); +} + +static void cfg_ipv4_item_handler(struct cfg_value_store *store, + const struct cfg_meta *meta, + const char *str_value) +{ + QDF_STATUS status; + struct qdf_ipv4_addr *store_value = cfg_value_ptr(store, meta); + + status = qdf_ipv4_parse(str_value, store_value); + if (QDF_IS_STATUS_SUCCESS(status)) + return; + + cfg_err("%s=%s - Invalid format (status %d); Using default " + QDF_IPV4_ADDR_STR, meta->name, str_value, status, + QDF_IPV4_ADDR_ARRAY(store_value->bytes)); +} + +static void cfg_ipv6_item_handler(struct cfg_value_store *store, + const struct cfg_meta *meta, + const char *str_value) +{ + QDF_STATUS status; + struct qdf_ipv6_addr *store_value = cfg_value_ptr(store, meta); + + status = qdf_ipv6_parse(str_value, store_value); + if (QDF_IS_STATUS_SUCCESS(status)) + return; + + cfg_err("%s=%s - Invalid format (status %d); Using default " + QDF_IPV6_ADDR_STR, meta->name, str_value, status, + QDF_IPV6_ADDR_ARRAY(store_value->bytes)); +} + +/* populate metadata lookup table */ +#undef __CFG_ANY +#define __CFG_ANY(_id, _mtype, _ctype, _name, _min, _max, _fallback, ...) \ +{ \ + .name = _name, \ + .field_offset = qdf_offsetof(struct cfg_values, _id##_internal), \ + .item_handler = cfg_ ## _mtype ## _item_handler, \ + .min = _min, \ + .max = _max, \ + .fallback = _fallback, \ +}, + +#define cfg_INT_item_handler cfg_int_item_handler +#define cfg_UINT_item_handler cfg_uint_item_handler +#define cfg_BOOL_item_handler cfg_bool_item_handler +#define cfg_STRING_item_handler cfg_string_item_handler +#define cfg_MAC_item_handler cfg_mac_item_handler +#define cfg_IPV4_item_handler cfg_ipv4_item_handler +#define cfg_IPV6_item_handler cfg_ipv6_item_handler + +static const struct cfg_meta cfg_meta_lookup_table[] = { + CFG_ALL +}; + +/* default store initializer */ + +static void cfg_store_set_defaults(struct cfg_value_store *store) +{ +#undef __CFG_ANY +#define __CFG_ANY(id, mtype, ctype, name, min, max, fallback, desc, def...) \ + ctype id = def; + + CFG_ALL + +#undef __CFG_STRING +#define __CFG_STRING(id, mtype, ctype, name, min_len, max_len, ...) \ + qdf_str_lcopy((char *)&store->values.id##_internal, id, max_len + 1); +#undef __CFG_ANY +#define __CFG_ANY(id, mtype, ctype, name, min, max, fallback, desc, def...) \ + *(ctype *)&store->values.id##_internal = id; + + CFG_ALL +} + +static const struct cfg_meta *cfg_lookup_meta(const char *name) +{ + int i; + + QDF_BUG(name); + if (!name) + return NULL; + + /* linear search for now; optimize in the future if needed */ + for (i = 0; i < QDF_ARRAY_SIZE(cfg_meta_lookup_table); i++) { + const struct cfg_meta *meta = &cfg_meta_lookup_table[i]; + + if (qdf_str_eq(name, meta->name)) + return meta; + } + + return NULL; +} + +static QDF_STATUS +cfg_ini_item_handler(void *context, const char *key, const char *value) +{ + struct cfg_value_store *store = context; + const struct cfg_meta *meta; + + meta = cfg_lookup_meta(key); + if (!meta) { + /* TODO: promote to 'err' or 'warn' once legacy is ported */ + cfg_info("Unknown config item '%s'", key); + return QDF_STATUS_SUCCESS; + } + + QDF_BUG(meta->item_handler); + if (!meta->item_handler) + return QDF_STATUS_SUCCESS; + + meta->item_handler(store, meta, value); + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS cfg_ini_section_handler(void *context, const char *name) +{ + cfg_err("Unexpected section '%s'. Sections are not supported.", name); + + return QDF_STATUS_SUCCESS; +} + +#define cfg_assert_success(expr) \ +do { \ + QDF_STATUS __assert_status = (expr); \ + QDF_BUG(QDF_IS_STATUS_SUCCESS(__assert_status)); \ +} while (0) + +static bool __cfg_is_init; +static struct cfg_value_store *__cfg_global_store; +static qdf_list_t __cfg_stores_list; +static qdf_spinlock_t __cfg_stores_lock; + +struct cfg_psoc_ctx { + struct cfg_value_store *store; +}; + +static QDF_STATUS +cfg_store_alloc(const char *path, struct cfg_value_store **out_store) +{ + QDF_STATUS status; + struct cfg_value_store *store; + + cfg_enter(); + + store = qdf_mem_malloc(sizeof(*store)); + if (!store) { + cfg_err("Out of memory"); + return QDF_STATUS_E_NOMEM; + } + + status = qdf_str_dup(&store->path, path); + if (QDF_IS_STATUS_ERROR(status)) + goto free_store; + + status = qdf_atomic_init(&store->users); + if (QDF_IS_STATUS_ERROR(status)) + goto free_path; + qdf_atomic_inc(&store->users); + + qdf_spin_lock_bh(&__cfg_stores_lock); + status = qdf_list_insert_back(&__cfg_stores_list, &store->node); + qdf_spin_unlock_bh(&__cfg_stores_lock); + if (QDF_IS_STATUS_ERROR(status)) + goto free_path; + + *out_store = store; + + return QDF_STATUS_SUCCESS; + +free_path: + qdf_mem_free(store->path); + +free_store: + qdf_mem_free(store); + + return status; +} + +static void cfg_store_free(struct cfg_value_store *store) +{ + QDF_STATUS status; + + cfg_enter(); + + qdf_spin_lock_bh(&__cfg_stores_lock); + status = qdf_list_remove_node(&__cfg_stores_list, &store->node); + qdf_spin_unlock_bh(&__cfg_stores_lock); + if (QDF_IS_STATUS_ERROR(status)) + QDF_DEBUG_PANIC("Failed config store list removal; status:%d", + status); + + qdf_mem_free(store->path); + qdf_mem_free(store); +} + +static QDF_STATUS +cfg_store_get(const char *path, struct cfg_value_store **out_store) +{ + QDF_STATUS status; + qdf_list_node_t *node; + + *out_store = NULL; + + qdf_spin_lock_bh(&__cfg_stores_lock); + status = qdf_list_peek_front(&__cfg_stores_list, &node); + while (QDF_IS_STATUS_SUCCESS(status)) { + struct cfg_value_store *store = + qdf_container_of(node, struct cfg_value_store, node); + + if (qdf_str_eq(path, store->path)) { + qdf_atomic_inc(&store->users); + *out_store = store; + break; + } + + status = qdf_list_peek_next(&__cfg_stores_list, node, &node); + } + qdf_spin_unlock_bh(&__cfg_stores_lock); + + return status; +} + +static void cfg_store_put(struct cfg_value_store *store) +{ + if (qdf_atomic_dec_and_test(&store->users)) + cfg_store_free(store); +} + +static struct cfg_psoc_ctx *cfg_psoc_get_ctx(struct wlan_objmgr_psoc *psoc) +{ + struct cfg_psoc_ctx *psoc_ctx; + + psoc_ctx = cfg_psoc_get_priv(psoc); + QDF_BUG(psoc_ctx); + + return psoc_ctx; +} + +struct cfg_values *cfg_psoc_get_values(struct wlan_objmgr_psoc *psoc) +{ + return &cfg_psoc_get_ctx(psoc)->store->values; +} +qdf_export_symbol(cfg_psoc_get_values); + +static QDF_STATUS +cfg_ini_parse_to_store(const char *path, struct cfg_value_store *store) +{ + QDF_STATUS status; + + status = qdf_ini_parse(path, store, cfg_ini_item_handler, + cfg_ini_section_handler); + if (QDF_IS_STATUS_ERROR(status)) + cfg_err("Failed to parse *.ini file @ %s; status:%d", + path, status); + + return status; +} + +static void cfg_init(void) +{ + qdf_list_create(&__cfg_stores_list, 0); + qdf_spinlock_create(&__cfg_stores_lock); +} + +static void cfg_deinit(void) +{ + qdf_spinlock_destroy(&__cfg_stores_lock); + qdf_list_destroy(&__cfg_stores_list); +} + +static void cfg_try_deinit(void) +{ + bool empty; + + qdf_spin_lock_bh(&__cfg_stores_lock); + empty = qdf_list_empty(&__cfg_stores_list); + qdf_spin_unlock_bh(&__cfg_stores_lock); + + if (empty) + cfg_deinit(); +} + +static QDF_STATUS +cfg_on_psoc_create(struct wlan_objmgr_psoc *psoc, void *context) +{ + QDF_STATUS status; + struct cfg_psoc_ctx *psoc_ctx; + + cfg_enter(); + + QDF_BUG(__cfg_global_store); + if (!__cfg_global_store) + return QDF_STATUS_E_FAILURE; + + psoc_ctx = qdf_mem_malloc(sizeof(*psoc_ctx)); + if (!psoc_ctx) { + cfg_err("Out of memory"); + return QDF_STATUS_E_NOMEM; + } + + qdf_atomic_inc(&__cfg_global_store->users); + psoc_ctx->store = __cfg_global_store; + + status = cfg_psoc_set_priv(psoc, psoc_ctx); + if (QDF_IS_STATUS_ERROR(status)) + goto put_store; + + return QDF_STATUS_SUCCESS; + +put_store: + cfg_store_put(__cfg_global_store); + qdf_mem_free(psoc_ctx); + + return status; +} + +static QDF_STATUS +cfg_on_psoc_destroy(struct wlan_objmgr_psoc *psoc, void *context) +{ + QDF_STATUS status; + struct cfg_psoc_ctx *psoc_ctx; + + cfg_enter(); + + psoc_ctx = cfg_psoc_get_ctx(psoc); + status = cfg_psoc_unset_priv(psoc, psoc_ctx); + + cfg_store_put(psoc_ctx->store); + qdf_mem_free(psoc_ctx); + + return status; +} + +QDF_STATUS cfg_dispatcher_init(void) +{ + QDF_STATUS status; + + cfg_enter(); + + QDF_BUG(!__cfg_is_init); + if (__cfg_is_init) + return QDF_STATUS_E_INVAL; + + status = cfg_psoc_register_create(cfg_on_psoc_create); + if (QDF_IS_STATUS_ERROR(status)) + return status; + + status = cfg_psoc_register_destroy(cfg_on_psoc_destroy); + if (QDF_IS_STATUS_ERROR(status)) + goto unreg_create; + + __cfg_is_init = true; + + return QDF_STATUS_SUCCESS; + +unreg_create: + cfg_assert_success(cfg_psoc_unregister_create(cfg_on_psoc_create)); + + return status; +} + +QDF_STATUS cfg_dispatcher_deinit(void) +{ + cfg_enter(); + + QDF_BUG(__cfg_is_init); + if (!__cfg_is_init) + return QDF_STATUS_E_INVAL; + + __cfg_is_init = false; + + cfg_assert_success(cfg_psoc_unregister_create(cfg_on_psoc_create)); + cfg_assert_success(cfg_psoc_unregister_destroy(cfg_on_psoc_destroy)); + + cfg_try_deinit(); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS cfg_parse(const char *path) +{ + QDF_STATUS status; + struct cfg_value_store *store; + + cfg_enter(); + + QDF_BUG(!__cfg_global_store); + if (__cfg_global_store) + return QDF_STATUS_E_INVAL; + + cfg_init(); + + status = cfg_store_alloc(path, &store); + if (QDF_IS_STATUS_ERROR(status)) + goto deinit; + + cfg_store_set_defaults(store); + + status = cfg_ini_parse_to_store(path, store); + if (QDF_IS_STATUS_ERROR(status)) + goto free_store; + + __cfg_global_store = store; + + return QDF_STATUS_SUCCESS; + +free_store: + cfg_store_free(store); + +deinit: + cfg_deinit(); + + return status; +} + +void cfg_release(void) +{ + cfg_enter(); + + QDF_BUG(__cfg_global_store); + if (!__cfg_global_store) + return; + + cfg_store_put(__cfg_global_store); + __cfg_global_store = NULL; + + cfg_try_deinit(); +} + +QDF_STATUS cfg_psoc_parse(struct wlan_objmgr_psoc *psoc, const char *path) +{ + QDF_STATUS status; + struct cfg_value_store *store; + struct cfg_psoc_ctx *psoc_ctx; + + cfg_enter(); + + QDF_BUG(__cfg_global_store); + if (!__cfg_global_store) + return QDF_STATUS_E_INVAL; + + QDF_BUG(__cfg_is_init); + if (!__cfg_is_init) + return QDF_STATUS_E_INVAL; + + QDF_BUG(psoc); + if (!psoc) + return QDF_STATUS_E_INVAL; + + QDF_BUG(path); + if (!path) + return QDF_STATUS_E_INVAL; + + psoc_ctx = cfg_psoc_get_ctx(psoc); + + QDF_BUG(psoc_ctx->store == __cfg_global_store); + if (psoc_ctx->store != __cfg_global_store) + return QDF_STATUS_SUCCESS; + + /* check if @path has been parsed before */ + status = cfg_store_get(path, &store); + if (QDF_IS_STATUS_ERROR(status)) { + status = cfg_store_alloc(path, &store); + if (QDF_IS_STATUS_ERROR(status)) + return status; + + /* inherit global configuration */ + qdf_mem_copy(&store->values, &__cfg_global_store->values, + sizeof(store->values)); + + status = cfg_ini_parse_to_store(path, store); + if (QDF_IS_STATUS_ERROR(status)) + goto put_store; + } + + psoc_ctx->store = store; + cfg_store_put(__cfg_global_store); + + return QDF_STATUS_SUCCESS; + +put_store: + cfg_store_put(store); + + return status; +} + diff --git a/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_bus.h b/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_bus.h new file mode 100644 index 0000000000000000000000000000000000000000..f6b9a3efe3e195616644234a3daa6d17f6a2d48f --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_bus.h @@ -0,0 +1,72 @@ +/* + * Copyright (c) 2016-2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * @file cdp_txrx_bus.h + * @brief Define the host data path bus related functions + */ +#ifndef _CDP_TXRX_BUS_H_ +#define _CDP_TXRX_BUS_H_ + +/** + * cdp_bus_suspend() - suspend bus + * @soc - data path soc handle + * @ppdev: data path pdev handle + * + * suspend bus + * + * return QDF_STATUS_SUCCESS suspend is not implemented or suspend done + */ +static inline QDF_STATUS cdp_bus_suspend(ol_txrx_soc_handle soc, + struct cdp_pdev *ppdev) +{ + if (!soc || !soc->ops || !soc->ops->bus_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return QDF_STATUS_E_INVAL; + } + + if (soc->ops->bus_ops->bus_suspend) + return soc->ops->bus_ops->bus_suspend(ppdev); + return QDF_STATUS_E_NOSUPPORT; +} + +/** + * cdp_bus_resume() - resume bus + * @soc - data path soc handle + * @ppdev: data path pdev handle + * + * resume bus + * + * return QDF_STATUS_SUCCESS resume is not implemented or suspend done + */ +static inline QDF_STATUS cdp_bus_resume(ol_txrx_soc_handle soc, + struct cdp_pdev *ppdev) +{ + if (!soc || !soc->ops || !soc->ops->bus_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return QDF_STATUS_E_INVAL; + } + + if (soc->ops->bus_ops->bus_resume) + return soc->ops->bus_ops->bus_resume(ppdev); + return QDF_STATUS_E_NOSUPPORT; +} + +#endif /* _CDP_TXRX_BUS_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_cfg.h b/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_cfg.h new file mode 100644 index 0000000000000000000000000000000000000000..984639dbc5198174bb3af568e768a8910afc2e3b --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_cfg.h @@ -0,0 +1,422 @@ +/* + * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * @file cdp_txrx_cfg.h + * @brief Define the host data path configuration API functions + */ +#ifndef _CDP_TXRX_CFG_H_ +#define _CDP_TXRX_CFG_H_ +#include "cdp_txrx_handle.h" +/** + * cdp_cfg_set_rx_fwd_disabled() - enable/disable rx forwarding + * @soc - data path soc handle + * @pdev - data path device instance + * @disable_rx_fwd - enable or disable rx forwarding + * + * enable/disable rx forwarding + * + * return NONE + */ +static inline void +cdp_cfg_set_rx_fwd_disabled(ol_txrx_soc_handle soc, struct cdp_cfg *cfg_pdev, + uint8_t disable_rx_fwd) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "%s invalid instance", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->cfg_ops || + !soc->ops->cfg_ops->set_cfg_rx_fwd_disabled) + return; + + soc->ops->cfg_ops->set_cfg_rx_fwd_disabled(cfg_pdev, + disable_rx_fwd); +} + +/** + * cdp_cfg_set_packet_log_enabled() - enable/disable packet log + * @soc - data path soc handle + * @pdev - data path device instance + * @val - enable or disable packet log + * + * packet log enable or disable + * + * return NONE + */ +static inline void +cdp_cfg_set_packet_log_enabled(ol_txrx_soc_handle soc, + struct cdp_cfg *cfg_pdev, uint8_t val) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "%s invalid instance", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->cfg_ops || + !soc->ops->cfg_ops->set_cfg_packet_log_enabled) + return; + + soc->ops->cfg_ops->set_cfg_packet_log_enabled(cfg_pdev, + val); +} + +/** + * cdp_cfg_attach() - attach config module + * @soc - data path soc handle + * @osdev - os instance + * @cfg_param - configuration parameter should be propagated + * + * Allocate configuration module instance, and propagate configuration values + * + * return soc configuration module instance + */ +static inline struct cdp_cfg +*cdp_cfg_attach(ol_txrx_soc_handle soc, + qdf_device_t osdev, void *cfg_param) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "%s invalid instance", __func__); + QDF_BUG(0); + return NULL; + } + + if (!soc->ops->cfg_ops || + !soc->ops->cfg_ops->cfg_attach) + return NULL; + + return soc->ops->cfg_ops->cfg_attach(osdev, cfg_param); +} + +/** + * cdp_cfg_vdev_rx_set_intrabss_fwd() - enable/disable intra bass forwarding + * @soc - data path soc handle + * @vdev - virtual interface instance + * @val - enable or disable intra bss forwarding + * + * ap isolate, do not forward intra bss traffic + * + * return NONE + */ +static inline void +cdp_cfg_vdev_rx_set_intrabss_fwd(ol_txrx_soc_handle soc, + struct cdp_vdev *vdev, bool val) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "%s invalid instance", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->cfg_ops || + !soc->ops->cfg_ops->vdev_rx_set_intrabss_fwd) + return; + + soc->ops->cfg_ops->vdev_rx_set_intrabss_fwd(vdev, val); +} + +/** + * cdp_cfg_is_rx_fwd_disabled() - get vdev rx forward + * @soc - data path soc handle + * @vdev - virtual interface instance + * + * Return rx forward feature enable status + * + * return 1 enabled + * 0 disabled + */ +static inline uint8_t +cdp_cfg_is_rx_fwd_disabled(ol_txrx_soc_handle soc, struct cdp_vdev *vdev) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "%s invalid instance", __func__); + QDF_BUG(0); + return 0; + } + + if (!soc->ops->cfg_ops || + !soc->ops->cfg_ops->is_rx_fwd_disabled) + return 0; + + return soc->ops->cfg_ops->is_rx_fwd_disabled(vdev); + +} + +/** + * cdp_cfg_tx_set_is_mgmt_over_wmi_enabled() - mgmt tx over wmi enable/disable + * @soc - data path soc handle + * @value - feature enable or disable + * + * Enable or disable management packet TX over WMI feature + * + * return None + */ +static inline void +cdp_cfg_tx_set_is_mgmt_over_wmi_enabled(ol_txrx_soc_handle soc, + uint8_t value) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "%s invalid instance", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->cfg_ops || + !soc->ops->cfg_ops->tx_set_is_mgmt_over_wmi_enabled) + return; + + soc->ops->cfg_ops->tx_set_is_mgmt_over_wmi_enabled(value); +} + +/** + * cdp_cfg_is_high_latency() - query data path is in high or low latency + * @soc - data path soc handle + * @pdev - data path device instance + * + * query data path is in high or low latency + * + * return 1 high latency data path, usb or sdio + * 0 low latency data path + */ +static inline int +cdp_cfg_is_high_latency(ol_txrx_soc_handle soc, struct cdp_cfg *cfg_pdev) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "%s invalid instance", __func__); + QDF_BUG(0); + return 0; + } + + if (!soc->ops->cfg_ops || + !soc->ops->cfg_ops->is_high_latency) + return 0; + + return soc->ops->cfg_ops->is_high_latency(cfg_pdev); +} + +/** + * cdp_cfg_set_flow_control_parameters() - set flow control params + * @soc - data path soc handle + * @cfg - dp config module instance + * @param - parameters should set + * + * set flow control params + * + * return None + */ +static inline void +cdp_cfg_set_flow_control_parameters(ol_txrx_soc_handle soc, + struct cdp_cfg *cfg_pdev, void *param) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "%s invalid instance", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->cfg_ops || + !soc->ops->cfg_ops->set_flow_control_parameters) + return; + + soc->ops->cfg_ops->set_flow_control_parameters(cfg_pdev, + param); +} + +/** + * cdp_cfg_set_flow_steering - Set Rx flow steering config based on CFG ini + * config. + * + * @pdev - handle to the physical device + * @val - 0 - disable, 1 - enable + * + * Return: None + */ +static inline void cdp_cfg_set_flow_steering(ol_txrx_soc_handle soc, + struct cdp_cfg *cfg_pdev, uint8_t val) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "%s invalid instance", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->cfg_ops || + !soc->ops->cfg_ops->set_flow_steering) + return; + + soc->ops->cfg_ops->set_flow_steering(cfg_pdev, val); +} + +static inline void cdp_cfg_get_max_peer_id(ol_txrx_soc_handle soc, + struct cdp_cfg *cfg_pdev) +{ +} + +/** + * cdp_cfg_set_ptp_rx_opt_enabled() - enable/disable ptp rx timestamping + * @soc - data path soc handle + * @pdev - data path device instance + * @val - enable or disable packet log + * + * ptp rx timestamping enable or disable + * + * return NONE + */ +static inline void +cdp_cfg_set_ptp_rx_opt_enabled(ol_txrx_soc_handle soc, + struct cdp_cfg *cfg_pdev, uint8_t val) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "%s invalid instance", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->cfg_ops || + !soc->ops->cfg_ops->set_ptp_rx_opt_enabled) + return; + + soc->ops->cfg_ops->set_ptp_rx_opt_enabled(cfg_pdev, val); +} + +/** + * cdp_cfg_set_new_htt_msg_format() - set htt h2t msg feature + * @soc - datapath soc handle + * @val - enable or disable new htt h2t msg feature + * + * Enable whether htt h2t message length includes htc header length + * + * return NONE + */ +static inline void +cdp_cfg_set_new_htt_msg_format(ol_txrx_soc_handle soc, + uint8_t val) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "%s invalid instance", __func__); + return; + } + + if (!soc->ops->cfg_ops || + !soc->ops->cfg_ops->set_new_htt_msg_format) + return; + + soc->ops->cfg_ops->set_new_htt_msg_format(val); +} + +/** + * cdp_cfg_set_peer_unmap_conf_support() - set peer unmap conf feature + * @soc - datapath soc handle + * @val - enable or disable peer unmap conf feature + * + * Set if peer unmap confirmation feature is supported by both FW and in INI + * + * return NONE + */ +static inline void +cdp_cfg_set_peer_unmap_conf_support(ol_txrx_soc_handle soc, bool val) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "%s invalid instance", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->cfg_ops || + !soc->ops->cfg_ops->set_peer_unmap_conf_support) + return; + + soc->ops->cfg_ops->set_peer_unmap_conf_support(val); +} + +/** + * cdp_cfg_get_peer_unmap_conf_support() - check peer unmap conf feature + * @soc - datapath soc handle + * + * Check if peer unmap confirmation feature is enabled + * + * return true is peer unmap confirmation feature is enabled else false + */ +static inline bool +cdp_cfg_get_peer_unmap_conf_support(ol_txrx_soc_handle soc) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "%s invalid instance", __func__); + QDF_BUG(0); + return false; + } + + if (!soc->ops->cfg_ops || + !soc->ops->cfg_ops->get_peer_unmap_conf_support) + return false; + + return soc->ops->cfg_ops->get_peer_unmap_conf_support(); +} + +static inline void +cdp_cfg_set_tx_compl_tsf64(ol_txrx_soc_handle soc, + uint8_t val) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "%s invalid instance", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->cfg_ops || + !soc->ops->cfg_ops->set_tx_compl_tsf64) + return; + + soc->ops->cfg_ops->set_tx_compl_tsf64(val); +} + +static inline bool +cdp_cfg_get_tx_compl_tsf64(ol_txrx_soc_handle soc) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "%s invalid instance", __func__); + QDF_BUG(0); + return false; + } + + if (!soc->ops->cfg_ops || + !soc->ops->cfg_ops->get_tx_compl_tsf64) + return false; + + return soc->ops->cfg_ops->get_tx_compl_tsf64(); +} + +#endif /* _CDP_TXRX_CFG_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_cmn.h b/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_cmn.h new file mode 100644 index 0000000000000000000000000000000000000000..d286871789f02533d610d0b828be72b7f2607ff9 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_cmn.h @@ -0,0 +1,1653 @@ +/* + * Copyright (c) 2011-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * @file cdp_txrx_cmn.h + * @brief Define the host data path converged API functions + * called by the host control SW and the OS interface module + */ +#ifndef _CDP_TXRX_CMN_H_ +#define _CDP_TXRX_CMN_H_ + +#include "qdf_types.h" +#include "qdf_nbuf.h" +#include "cdp_txrx_ops.h" +#include "cdp_txrx_handle.h" +#include "cdp_txrx_cmn_struct.h" +/****************************************************************************** + * + * Common Data Path Header File + * + *****************************************************************************/ + +static inline int +cdp_soc_attach_target(ol_txrx_soc_handle soc) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return 0; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->txrx_soc_attach_target) + return 0; + + return soc->ops->cmn_drv_ops->txrx_soc_attach_target(soc); + +} + +static inline int +cdp_soc_get_nss_cfg(ol_txrx_soc_handle soc) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return 0; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->txrx_soc_get_nss_cfg) + return 0; + + return soc->ops->cmn_drv_ops->txrx_soc_get_nss_cfg(soc); +} + +static inline void +cdp_soc_set_nss_cfg(ol_txrx_soc_handle soc, uint32_t config) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->txrx_soc_set_nss_cfg) + return; + + soc->ops->cmn_drv_ops->txrx_soc_set_nss_cfg(soc, config); +} + +static inline struct cdp_vdev * +cdp_vdev_attach(ol_txrx_soc_handle soc, struct cdp_pdev *pdev, + uint8_t *vdev_mac_addr, uint8_t vdev_id, enum wlan_op_mode op_mode) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return NULL; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->txrx_vdev_attach) + return NULL; + + return soc->ops->cmn_drv_ops->txrx_vdev_attach(pdev, + vdev_mac_addr, vdev_id, op_mode); +} +#ifndef CONFIG_WIN +/** + * cdp_flow_pool_map() - Create flow pool for vdev + * @soc - data path soc handle + * @pdev + * @vdev_id - vdev_id corresponding to vdev start + * + * Create per vdev flow pool. + * + * return none + */ +static inline QDF_STATUS cdp_flow_pool_map(ol_txrx_soc_handle soc, + struct cdp_pdev *pdev, uint8_t vdev_id) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return QDF_STATUS_E_INVAL; + } + + if (!soc->ops->flowctl_ops || + !soc->ops->flowctl_ops->flow_pool_map_handler) + return QDF_STATUS_E_INVAL; + + return soc->ops->flowctl_ops->flow_pool_map_handler(soc, pdev, vdev_id); +} + +/** + * cdp_flow_pool_unmap() - Delete flow pool + * @soc - data path soc handle + * @pdev + * @vdev_id - vdev_id corresponding to vdev start + * + * Delete flow pool + * + * return none + */ +static inline void cdp_flow_pool_unmap(ol_txrx_soc_handle soc, + struct cdp_pdev *pdev, uint8_t vdev_id) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->flowctl_ops || + !soc->ops->flowctl_ops->flow_pool_unmap_handler) + return; + + return soc->ops->flowctl_ops->flow_pool_unmap_handler(soc, pdev, + vdev_id); +} +#endif + +static inline void +cdp_vdev_detach(ol_txrx_soc_handle soc, struct cdp_vdev *vdev, + ol_txrx_vdev_delete_cb callback, void *cb_context) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->txrx_vdev_detach) + return; + + soc->ops->cmn_drv_ops->txrx_vdev_detach(vdev, + callback, cb_context); +} + +static inline int +cdp_pdev_attach_target(ol_txrx_soc_handle soc, struct cdp_pdev *pdev) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return 0; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->txrx_pdev_attach_target) + return 0; + + return soc->ops->cmn_drv_ops->txrx_pdev_attach_target(pdev); +} + +static inline struct cdp_pdev *cdp_pdev_attach + (ol_txrx_soc_handle soc, struct cdp_cfg *ctrl_pdev, + HTC_HANDLE htc_pdev, qdf_device_t osdev, uint8_t pdev_id) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return NULL; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->txrx_pdev_attach) + return NULL; + + return soc->ops->cmn_drv_ops->txrx_pdev_attach(soc, ctrl_pdev, + htc_pdev, osdev, pdev_id); +} + +static inline int cdp_pdev_post_attach(ol_txrx_soc_handle soc, + struct cdp_pdev *pdev) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return 0; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->txrx_pdev_post_attach) + return 0; + + return soc->ops->cmn_drv_ops->txrx_pdev_post_attach(pdev); +} + +static inline void +cdp_pdev_pre_detach(ol_txrx_soc_handle soc, struct cdp_pdev *pdev, int force) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->txrx_pdev_pre_detach) + return; + + soc->ops->cmn_drv_ops->txrx_pdev_pre_detach(pdev, force); +} + +static inline void +cdp_pdev_detach(ol_txrx_soc_handle soc, struct cdp_pdev *pdev, int force) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->txrx_pdev_detach) + return; + + soc->ops->cmn_drv_ops->txrx_pdev_detach(pdev, force); +} + +static inline void *cdp_peer_create + (ol_txrx_soc_handle soc, struct cdp_vdev *vdev, + uint8_t *peer_mac_addr) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return NULL; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->txrx_peer_create) + return NULL; + + return soc->ops->cmn_drv_ops->txrx_peer_create(vdev, + peer_mac_addr); +} + +static inline void cdp_peer_setup + (ol_txrx_soc_handle soc, struct cdp_vdev *vdev, void *peer) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->txrx_peer_setup) + return; + + soc->ops->cmn_drv_ops->txrx_peer_setup(vdev, + peer); +} + +static inline void *cdp_peer_ast_hash_find + (ol_txrx_soc_handle soc, uint8_t *ast_mac_addr) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return NULL; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->txrx_peer_ast_hash_find) + return NULL; + + return soc->ops->cmn_drv_ops->txrx_peer_ast_hash_find(soc, + ast_mac_addr); +} + +static inline int cdp_peer_add_ast + (ol_txrx_soc_handle soc, struct cdp_peer *peer_handle, + uint8_t *mac_addr, enum cdp_txrx_ast_entry_type type, uint32_t flags) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return 0; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->txrx_peer_add_ast) + return 0; + + return soc->ops->cmn_drv_ops->txrx_peer_add_ast(soc, + peer_handle, + mac_addr, + type, + flags); +} + +static inline void cdp_peer_reset_ast + (ol_txrx_soc_handle soc, uint8_t *wds_macaddr, void *vdev_hdl) +{ + + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return; + } + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->txrx_peer_reset_ast) + return; + + soc->ops->cmn_drv_ops->txrx_peer_reset_ast(soc, wds_macaddr, vdev_hdl); +} + +static inline void cdp_peer_reset_ast_table + (ol_txrx_soc_handle soc, void *vdev_hdl) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->txrx_peer_reset_ast_table) + return; + + soc->ops->cmn_drv_ops->txrx_peer_reset_ast_table(soc, vdev_hdl); +} + +static inline void cdp_peer_flush_ast_table + (ol_txrx_soc_handle soc) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->txrx_peer_flush_ast_table) + return; + + soc->ops->cmn_drv_ops->txrx_peer_flush_ast_table(soc); +} + +static inline int cdp_peer_update_ast + (ol_txrx_soc_handle soc, uint8_t *wds_macaddr, + struct cdp_peer *peer_handle, uint32_t flags) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return 0; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->txrx_peer_update_ast) + return 0; + + + return soc->ops->cmn_drv_ops->txrx_peer_update_ast(soc, + peer_handle, + wds_macaddr, + flags); +} + +static inline void cdp_peer_del_ast + (ol_txrx_soc_handle soc, void *ast_handle) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->txrx_peer_del_ast) + return; + + soc->ops->cmn_drv_ops->txrx_peer_del_ast(soc, ast_handle); +} + + +static inline uint8_t cdp_peer_ast_get_pdev_id + (ol_txrx_soc_handle soc, void *ast_handle) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return 0xff; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->txrx_peer_ast_get_pdev_id) + return 0xff; + + return soc->ops->cmn_drv_ops->txrx_peer_ast_get_pdev_id(soc, + ast_handle); +} + +static inline uint8_t cdp_peer_ast_get_next_hop + (ol_txrx_soc_handle soc, void *ast_handle) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return 0xff; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->txrx_peer_ast_get_next_hop) + return 0xff; + + return soc->ops->cmn_drv_ops->txrx_peer_ast_get_next_hop(soc, + ast_handle); +} + +static inline void cdp_peer_ast_set_type + (ol_txrx_soc_handle soc, void *ast_handle, + enum cdp_txrx_ast_entry_type type) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->txrx_peer_ast_set_type) + return; + + soc->ops->cmn_drv_ops->txrx_peer_ast_set_type(soc, ast_handle, type); +} + +static inline void cdp_peer_teardown + (ol_txrx_soc_handle soc, struct cdp_vdev *vdev, void *peer) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->txrx_peer_teardown) + return; + + soc->ops->cmn_drv_ops->txrx_peer_teardown(vdev, peer); +} + +static inline void +cdp_peer_delete(ol_txrx_soc_handle soc, void *peer, uint32_t bitmap) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->txrx_peer_delete) + return; + + soc->ops->cmn_drv_ops->txrx_peer_delete(peer, bitmap); +} + +static inline void +cdp_peer_delete_sync(ol_txrx_soc_handle soc, void *peer, + QDF_STATUS(*delete_cb)( + uint8_t vdev_id, + uint32_t peerid_cnt, + uint16_t *peerid_list), + uint32_t bitmap) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->txrx_peer_delete_sync) + return; + + soc->ops->cmn_drv_ops->txrx_peer_delete_sync(peer, + delete_cb, + bitmap); +} + +static inline int +cdp_set_monitor_mode(ol_txrx_soc_handle soc, struct cdp_vdev *vdev, + uint8_t smart_monitor) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return 0; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->txrx_set_monitor_mode) + return 0; + + return soc->ops->cmn_drv_ops->txrx_set_monitor_mode(vdev, + smart_monitor); +} + +static inline void +cdp_set_curchan(ol_txrx_soc_handle soc, + struct cdp_pdev *pdev, + uint32_t chan_mhz) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->txrx_set_curchan) + return; + + soc->ops->cmn_drv_ops->txrx_set_curchan(pdev, chan_mhz); +} + +static inline void +cdp_set_privacy_filters(ol_txrx_soc_handle soc, struct cdp_vdev *vdev, + void *filter, uint32_t num) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->txrx_set_privacy_filters) + return; + + soc->ops->cmn_drv_ops->txrx_set_privacy_filters(vdev, + filter, num); +} + +static inline int +cdp_set_monitor_filter(ol_txrx_soc_handle soc, struct cdp_pdev *pdev, + struct cdp_monitor_filter *filter_val) +{ + if (soc->ops->mon_ops->txrx_set_advance_monitor_filter) + return soc->ops->mon_ops->txrx_set_advance_monitor_filter(pdev, + filter_val); + return 0; +} + + +/****************************************************************************** + * Data Interface (B Interface) + *****************************************************************************/ +static inline void +cdp_vdev_register(ol_txrx_soc_handle soc, struct cdp_vdev *vdev, + void *osif_vdev, struct ol_txrx_ops *txrx_ops) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->txrx_vdev_register) + return; + + soc->ops->cmn_drv_ops->txrx_vdev_register(vdev, + osif_vdev, txrx_ops); +} + +static inline int +cdp_mgmt_send(ol_txrx_soc_handle soc, struct cdp_vdev *vdev, + qdf_nbuf_t tx_mgmt_frm, uint8_t type) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return 0; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->txrx_mgmt_send) + return 0; + + return soc->ops->cmn_drv_ops->txrx_mgmt_send(vdev, + tx_mgmt_frm, type); +} + +static inline int +cdp_mgmt_send_ext(ol_txrx_soc_handle soc, struct cdp_vdev *vdev, + qdf_nbuf_t tx_mgmt_frm, uint8_t type, + uint8_t use_6mbps, uint16_t chanfreq) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return 0; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->txrx_mgmt_send_ext) + return 0; + + return soc->ops->cmn_drv_ops->txrx_mgmt_send_ext + (vdev, tx_mgmt_frm, type, use_6mbps, chanfreq); +} + + +static inline void +cdp_mgmt_tx_cb_set(ol_txrx_soc_handle soc, struct cdp_pdev *pdev, + uint8_t type, ol_txrx_mgmt_tx_cb download_cb, + ol_txrx_mgmt_tx_cb ota_ack_cb, void *ctxt) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->txrx_mgmt_tx_cb_set) + return; + + soc->ops->cmn_drv_ops->txrx_mgmt_tx_cb_set + (pdev, type, download_cb, ota_ack_cb, ctxt); +} + +static inline void +cdp_peer_unmap_sync_cb_set(ol_txrx_soc_handle soc, + struct cdp_pdev *pdev, + QDF_STATUS(*unmap_resp_cb)( + uint8_t vdev_id, + uint32_t peerid_cnt, + uint16_t *peerid_list)) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->txrx_peer_unmap_sync_cb_set) + return; + + soc->ops->cmn_drv_ops->txrx_peer_unmap_sync_cb_set(pdev, unmap_resp_cb); +} + +static inline int cdp_get_tx_pending(ol_txrx_soc_handle soc, +struct cdp_pdev *pdev) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return 0; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->txrx_get_tx_pending) + return 0; + + + return soc->ops->cmn_drv_ops->txrx_get_tx_pending(pdev); +} + +static inline void +cdp_data_tx_cb_set(ol_txrx_soc_handle soc, struct cdp_vdev *data_vdev, + ol_txrx_data_tx_cb callback, void *ctxt) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->txrx_data_tx_cb_set) + return; + + soc->ops->cmn_drv_ops->txrx_data_tx_cb_set(data_vdev, + callback, ctxt); +} + +/****************************************************************************** + * Statistics and Debugging Interface (C Interface) + *****************************************************************************/ +/** + * External Device physical address types + * + * Currently, both MAC and IPA uController use the same size addresses + * and descriptors are exchanged between these two depending on the mode. + * + * Rationale: qdf_dma_addr_t is the type used internally on the host for DMA + * operations. However, external device physical address sizes + * may be different from host-specific physical address sizes. + * This calls for the following definitions for target devices + * (MAC, IPA uc). + */ +#if HTT_PADDR64 +typedef uint64_t target_paddr_t; +#else +typedef uint32_t target_paddr_t; +#endif /*HTT_PADDR64 */ + +static inline int +cdp_aggr_cfg(ol_txrx_soc_handle soc, struct cdp_vdev *vdev, + int max_subfrms_ampdu, + int max_subfrms_amsdu) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return 0; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->txrx_aggr_cfg) + return 0; + + return soc->ops->cmn_drv_ops->txrx_aggr_cfg(vdev, + max_subfrms_ampdu, max_subfrms_amsdu); +} + +static inline int +cdp_fw_stats_get(ol_txrx_soc_handle soc, struct cdp_vdev *vdev, + struct ol_txrx_stats_req *req, bool per_vdev, + bool response_expected) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return 0; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->txrx_fw_stats_get) + return 0; + + return soc->ops->cmn_drv_ops->txrx_fw_stats_get(vdev, req, + per_vdev, response_expected); +} + +static inline int +cdp_debug(ol_txrx_soc_handle soc, struct cdp_vdev *vdev, int debug_specs) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return 0; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->txrx_debug) + return 0; + + return soc->ops->cmn_drv_ops->txrx_debug(vdev, debug_specs); +} + +static inline void cdp_fw_stats_cfg(ol_txrx_soc_handle soc, + struct cdp_vdev *vdev, uint8_t cfg_stats_type, uint32_t cfg_val) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->txrx_fw_stats_cfg) + return; + + soc->ops->cmn_drv_ops->txrx_fw_stats_cfg(vdev, + cfg_stats_type, cfg_val); +} + +static inline void cdp_print_level_set(ol_txrx_soc_handle soc, unsigned level) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->txrx_print_level_set) + return; + + soc->ops->cmn_drv_ops->txrx_print_level_set(level); +} + +static inline uint8_t * +cdp_get_vdev_mac_addr(ol_txrx_soc_handle soc, struct cdp_vdev *vdev) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return NULL; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->txrx_get_vdev_mac_addr) + return NULL; + + return soc->ops->cmn_drv_ops->txrx_get_vdev_mac_addr(vdev); + +} + +/** + * cdp_get_vdev_struct_mac_addr() - Return handle to struct qdf_mac_addr of + * vdev + * @vdev: vdev handle + * + * Return: Handle to struct qdf_mac_addr + */ +static inline struct qdf_mac_addr *cdp_get_vdev_struct_mac_addr + (ol_txrx_soc_handle soc, struct cdp_vdev *vdev) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return NULL; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->txrx_get_vdev_struct_mac_addr) + return NULL; + + return soc->ops->cmn_drv_ops->txrx_get_vdev_struct_mac_addr + (vdev); + +} + +/** + * cdp_get_pdev_from_vdev() - Return handle to pdev of vdev + * @vdev: vdev handle + * + * Return: Handle to pdev + */ +static inline struct cdp_pdev *cdp_get_pdev_from_vdev + (ol_txrx_soc_handle soc, struct cdp_vdev *vdev) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return NULL; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->txrx_get_pdev_from_vdev) + return NULL; + + return soc->ops->cmn_drv_ops->txrx_get_pdev_from_vdev(vdev); +} + +/** + * cdp_get_ctrl_pdev_from_vdev() - Return control pdev of vdev + * @vdev: vdev handle + * + * Return: Handle to control pdev + */ +static inline struct cdp_cfg * +cdp_get_ctrl_pdev_from_vdev(ol_txrx_soc_handle soc, struct cdp_vdev *vdev) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return NULL; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->txrx_get_ctrl_pdev_from_vdev) + return NULL; + + return soc->ops->cmn_drv_ops->txrx_get_ctrl_pdev_from_vdev + (vdev); +} + +static inline struct cdp_vdev * +cdp_get_vdev_from_vdev_id(ol_txrx_soc_handle soc, struct cdp_pdev *pdev, + uint8_t vdev_id) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return NULL; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->txrx_get_vdev_from_vdev_id) + return NULL; + + return soc->ops->cmn_drv_ops->txrx_get_vdev_from_vdev_id + (pdev, vdev_id); +} + +static inline void +cdp_soc_detach(ol_txrx_soc_handle soc) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->txrx_soc_detach) + return; + + soc->ops->cmn_drv_ops->txrx_soc_detach((void *)soc); +} + +static inline int cdp_addba_requestprocess(ol_txrx_soc_handle soc, + void *peer_handle, uint8_t dialogtoken, uint16_t tid, + uint16_t batimeout, uint16_t buffersize, uint16_t startseqnum) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return 0; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->addba_requestprocess) + return 0; + + return soc->ops->cmn_drv_ops->addba_requestprocess(peer_handle, + dialogtoken, tid, batimeout, buffersize, startseqnum); +} + +static inline void cdp_addba_responsesetup(ol_txrx_soc_handle soc, + void *peer_handle, uint8_t tid, uint8_t *dialogtoken, + uint16_t *statuscode, uint16_t *buffersize, uint16_t *batimeout) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->addba_responsesetup) + return; + + soc->ops->cmn_drv_ops->addba_responsesetup(peer_handle, tid, + dialogtoken, statuscode, buffersize, batimeout); +} + +static inline int cdp_delba_process(ol_txrx_soc_handle soc, + void *peer_handle, int tid, uint16_t reasoncode) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return 0; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->delba_process) + return 0; + + return soc->ops->cmn_drv_ops->delba_process(peer_handle, + tid, reasoncode); +} + +static inline void cdp_set_addbaresponse(ol_txrx_soc_handle soc, + void *peer_handle, int tid, uint16_t statuscode) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->set_addba_response) + return; + + soc->ops->cmn_drv_ops->set_addba_response(peer_handle, tid, statuscode); +} + +/** + * cdp_get_peer_mac_addr_frm_id: function to return vdev id and and peer + * mac address + * @soc: SOC handle + * @peer_id: peer id of the peer for which mac_address is required + * @mac_addr: reference to mac address + * + * reutm: vdev_id of the vap + */ +static inline uint8_t +cdp_get_peer_mac_addr_frm_id(ol_txrx_soc_handle soc, uint16_t peer_id, + uint8_t *mac_addr) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return CDP_INVALID_VDEV_ID; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->get_peer_mac_addr_frm_id) + return CDP_INVALID_VDEV_ID; + + return soc->ops->cmn_drv_ops->get_peer_mac_addr_frm_id(soc, + peer_id, mac_addr); +} + +/** + * cdp_set_vdev_dscp_tid_map(): function to set DSCP-tid map in the vap + * @vdev: vdev handle + * @map_id: id of the tid map + * + * Return: void + */ +static inline void cdp_set_vdev_dscp_tid_map(ol_txrx_soc_handle soc, + struct cdp_vdev *vdev, uint8_t map_id) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->set_vdev_dscp_tid_map) + return; + + soc->ops->cmn_drv_ops->set_vdev_dscp_tid_map(vdev, + map_id); +} + +/** + * cdp_set_pdev_dscp_tid_map(): function to change tid values in DSCP-tid map + * @pdev: pdev handle + * @map_id: id of the tid map + * @tos: index value in map that needs to be changed + * @tid: tid value passed by user + * + * Return: void + */ +static inline void cdp_set_pdev_dscp_tid_map(ol_txrx_soc_handle soc, + struct cdp_pdev *pdev, uint8_t map_id, uint8_t tos, uint8_t tid) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->set_pdev_dscp_tid_map) + return; + + soc->ops->cmn_drv_ops->set_pdev_dscp_tid_map(pdev, + map_id, tos, tid); +} + +/** + * cdp_flush_cache_rx_queue() - flush cache rx queue frame + * + * Return: None + */ +static inline void cdp_flush_cache_rx_queue(ol_txrx_soc_handle soc) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->flush_cache_rx_queue) + return; + soc->ops->cmn_drv_ops->flush_cache_rx_queue(); +} + +/** + * cdp_txrx_stats_request(): function to map to host and firmware statistics + * @soc: soc handle + * @vdev: virtual device + * @req: stats request container + * + * return: status + */ +static inline +int cdp_txrx_stats_request(ol_txrx_soc_handle soc, struct cdp_vdev *vdev, + struct cdp_txrx_stats_req *req) +{ + if (!soc || !soc->ops || !soc->ops->cmn_drv_ops || !req) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_ASSERT(0); + return 0; + } + + if (soc->ops->cmn_drv_ops->txrx_stats_request) + return soc->ops->cmn_drv_ops->txrx_stats_request(vdev, req); + + return 0; +} + +/** + * cdp_txrx_intr_attach(): function to attach and configure interrupt + * @soc: soc handle + */ +static inline QDF_STATUS cdp_txrx_intr_attach(ol_txrx_soc_handle soc) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return 0; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->txrx_intr_attach) + return 0; + + return soc->ops->cmn_drv_ops->txrx_intr_attach(soc); +} + +/** + * cdp_txrx_intr_detach(): function to detach interrupt + * @soc: soc handle + */ +static inline void cdp_txrx_intr_detach(ol_txrx_soc_handle soc) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->txrx_intr_detach) + return; + + soc->ops->cmn_drv_ops->txrx_intr_detach(soc); +} + +/** + * cdp_display_stats(): function to map to dump stats + * @soc: soc handle + * @value: statistics option + */ +static inline QDF_STATUS +cdp_display_stats(ol_txrx_soc_handle soc, uint16_t value, + enum qdf_stats_verbosity_level level) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return 0; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->display_stats) + return 0; + + return soc->ops->cmn_drv_ops->display_stats(soc, value, level); +} + + +/** + * cdp_set_pn_check(): function to set pn check + * @soc: soc handle + * @sec_type: security type + * #rx_pn: receive pn + */ +static inline int cdp_set_pn_check(ol_txrx_soc_handle soc, + struct cdp_vdev *vdev, struct cdp_peer *peer_handle, enum cdp_sec_type sec_type, uint32_t *rx_pn) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return 0; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->set_pn_check) + return 0; + + soc->ops->cmn_drv_ops->set_pn_check(vdev, peer_handle, + sec_type, rx_pn); + return 0; +} + +/** + * cdp_update_config_parameters(): function to propagate configuration + * parameters to datapath + * @soc: opaque soc handle + * @cfg: configuration handle + * + * Return: status: 0 - Success, non-zero: Failure + */ +static inline +QDF_STATUS cdp_update_config_parameters(ol_txrx_soc_handle soc, + struct cdp_config_params *cfg) +{ + struct cdp_soc *psoc = (struct cdp_soc *)soc; + + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return 0; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->update_config_parameters) + return QDF_STATUS_SUCCESS; + + return soc->ops->cmn_drv_ops->update_config_parameters(psoc, + cfg); +} + +/** + * cdp_pdev_get_dp_txrx_handle() - get advanced dp handle from pdev + * @soc: opaque soc handle + * @pdev: data path pdev handle + * + * Return: opaque dp handle + */ +static inline void * +cdp_pdev_get_dp_txrx_handle(ol_txrx_soc_handle soc, void *pdev) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return 0; + } + + if (soc->ops->cmn_drv_ops->get_dp_txrx_handle) + return soc->ops->cmn_drv_ops->get_dp_txrx_handle(pdev); + + return 0; +} + +/** + * cdp_pdev_set_dp_txrx_handle() - set advanced dp handle in pdev + * @soc: opaque soc handle + * @pdev: data path pdev handle + * @dp_hdl: opaque pointer for dp_txrx_handle + * + * Return: void + */ +static inline void +cdp_pdev_set_dp_txrx_handle(ol_txrx_soc_handle soc, void *pdev, void *dp_hdl) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->set_dp_txrx_handle) + return; + + soc->ops->cmn_drv_ops->set_dp_txrx_handle(pdev, dp_hdl); +} + +/* + * cdp_soc_get_dp_txrx_handle() - get extended dp handle from soc + * @soc: opaque soc handle + * + * Return: opaque extended dp handle + */ +static inline void * +cdp_soc_get_dp_txrx_handle(ol_txrx_soc_handle soc) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return NULL; + } + + if (soc->ops->cmn_drv_ops->get_soc_dp_txrx_handle) + return soc->ops->cmn_drv_ops->get_soc_dp_txrx_handle( + (struct cdp_soc *) soc); + + return NULL; +} + +/** + * cdp_soc_set_dp_txrx_handle() - set advanced dp handle in soc + * @soc: opaque soc handle + * @dp_hdl: opaque pointer for dp_txrx_handle + * + * Return: void + */ +static inline void +cdp_soc_set_dp_txrx_handle(ol_txrx_soc_handle soc, void *dp_handle) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->set_soc_dp_txrx_handle) + return; + + soc->ops->cmn_drv_ops->set_soc_dp_txrx_handle((struct cdp_soc *)soc, + dp_handle); +} + +/** + * cdp_tx_send() - enqueue frame for transmission + * @soc: soc opaque handle + * @vdev: VAP device + * @nbuf: nbuf to be enqueued + * + * This API is used by Extended Datapath modules to enqueue frame for + * transmission + * + * Return: void + */ +static inline void +cdp_tx_send(ol_txrx_soc_handle soc, struct cdp_vdev *vdev, qdf_nbuf_t nbuf) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->tx_send) + return; + + soc->ops->cmn_drv_ops->tx_send(vdev, nbuf); +} + +/* + * cdp_get_pdev_id_frm_pdev() - return pdev_id from pdev + * @soc: opaque soc handle + * @pdev: data path pdev handle + * + * Return: pdev_id + */ +static inline +uint8_t cdp_get_pdev_id_frm_pdev(ol_txrx_soc_handle soc, + struct cdp_pdev *pdev) +{ + if (soc->ops->cmn_drv_ops->txrx_get_pdev_id_frm_pdev) + return soc->ops->cmn_drv_ops->txrx_get_pdev_id_frm_pdev(pdev); + return 0; +} + +/** + * cdp_set_nac() - set nac + * @soc: opaque soc handle + * @peer: data path peer handle + * + */ +static inline +void cdp_set_nac(ol_txrx_soc_handle soc, + struct cdp_peer *peer) +{ + if (soc->ops->cmn_drv_ops->txrx_set_nac) + soc->ops->cmn_drv_ops->txrx_set_nac(peer); +} + +/** + * cdp_set_pdev_tx_capture() - set pdev tx_capture + * @soc: opaque soc handle + * @pdev: data path pdev handle + * @val: value of pdev_tx_capture + * + * Return: void + */ +static inline +void cdp_set_pdev_tx_capture(ol_txrx_soc_handle soc, + struct cdp_pdev *pdev, int val) +{ + if (soc->ops->cmn_drv_ops->txrx_set_pdev_tx_capture) + return soc->ops->cmn_drv_ops->txrx_set_pdev_tx_capture(pdev, + val); + +} + +/** + * cdp_get_peer_mac_from_peer_id() - get peer mac addr from peer id + * @soc: opaque soc handle + * @pdev: data path pdev handle + * @peer_id: data path peer id + * @peer_mac: peer_mac + * + * Return: void + */ +static inline +void cdp_get_peer_mac_from_peer_id(ol_txrx_soc_handle soc, + struct cdp_pdev *pdev_handle, + uint32_t peer_id, uint8_t *peer_mac) +{ + if (soc->ops->cmn_drv_ops->txrx_get_peer_mac_from_peer_id) + soc->ops->cmn_drv_ops->txrx_get_peer_mac_from_peer_id( + pdev_handle, peer_id, peer_mac); +} + +/** + * cdp_vdev_tx_lock() - acquire lock + * @soc: opaque soc handle + * @vdev: data path vdev handle + * + * Return: void + */ +static inline +void cdp_vdev_tx_lock(ol_txrx_soc_handle soc, + struct cdp_vdev *vdev) +{ + if (soc->ops->cmn_drv_ops->txrx_vdev_tx_lock) + soc->ops->cmn_drv_ops->txrx_vdev_tx_lock(vdev); +} + +/** + * cdp_vdev_tx_unlock() - release lock + * @soc: opaque soc handle + * @vdev: data path vdev handle + * + * Return: void + */ +static inline +void cdp_vdev_tx_unlock(ol_txrx_soc_handle soc, + struct cdp_vdev *vdev) +{ + if (soc->ops->cmn_drv_ops->txrx_vdev_tx_unlock) + soc->ops->cmn_drv_ops->txrx_vdev_tx_unlock(vdev); +} + +/** + * cdp_ath_getstats() - get updated athstats + * @soc: opaque soc handle + * @dev: dp interface handle + * @stats: cdp network device stats structure + * @type: device type pdev/vdev + * + * Return: void + */ +static inline void cdp_ath_getstats(ol_txrx_soc_handle soc, + void *dev, struct cdp_dev_stats *stats, + uint8_t type) +{ + if (soc && soc->ops && soc->ops->cmn_drv_ops->txrx_ath_getstats) + soc->ops->cmn_drv_ops->txrx_ath_getstats(dev, stats, type); +} + +/** + * cdp_set_gid_flag() - set groupid flag + * @soc: opaque soc handle + * @pdev: data path pdev handle + * @mem_status: member status from grp management frame + * @user_position: user position from grp management frame + * + * Return: void + */ +static inline +void cdp_set_gid_flag(ol_txrx_soc_handle soc, + struct cdp_pdev *pdev, u_int8_t *mem_status, + u_int8_t *user_position) +{ + if (soc->ops->cmn_drv_ops->txrx_set_gid_flag) + soc->ops->cmn_drv_ops->txrx_set_gid_flag(pdev, mem_status, user_position); +} + +/** + * cdp_fw_supported_enh_stats_version() - returns the fw enhanced stats version + * @soc: opaque soc handle + * @pdev: data path pdev handle + * + */ +static inline +uint32_t cdp_fw_supported_enh_stats_version(ol_txrx_soc_handle soc, + struct cdp_pdev *pdev) +{ + if (soc->ops->cmn_drv_ops->txrx_fw_supported_enh_stats_version) + return soc->ops->cmn_drv_ops->txrx_fw_supported_enh_stats_version(pdev); + return 0; +} + +/** + * cdp_get_pdev_id_frm_pdev() - return pdev_id from pdev + * @soc: opaque soc handle + * @ni: associated node + * @force: number of frame in SW queue + * Return: void + */ +static inline +void cdp_if_mgmt_drain(ol_txrx_soc_handle soc, + void *ni, int force) +{ + if (soc->ops->cmn_drv_ops->txrx_if_mgmt_drain) + soc->ops->cmn_drv_ops->txrx_if_mgmt_drain(ni, force); +} + +static inline void +cdp_peer_map_attach(ol_txrx_soc_handle soc, uint32_t max_peers) +{ + if (soc && soc->ops && soc->ops->cmn_drv_ops && + soc->ops->cmn_drv_ops->txrx_peer_map_attach) + soc->ops->cmn_drv_ops->txrx_peer_map_attach(soc, max_peers); +} + +#ifdef RECEIVE_OFFLOAD +/** + * cdp_register_rx_offld_flush_cb() - register LRO/GRO flush cb function pointer + * @soc - data path soc handle + * @pdev - device instance pointer + * + * register rx offload flush callback function pointer + * + * return none + */ +static inline void cdp_register_rx_offld_flush_cb(ol_txrx_soc_handle soc, + void (rx_ol_flush_cb)(void *)) +{ + if (!soc || !soc->ops || !soc->ops->rx_offld_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return; + } + + if (soc->ops->rx_offld_ops->register_rx_offld_flush_cb) + return soc->ops->rx_offld_ops->register_rx_offld_flush_cb( + rx_ol_flush_cb); +} + +/** + * cdp_deregister_rx_offld_flush_cb() - deregister Rx offld flush cb function + * @soc - data path soc handle + * + * deregister rx offload flush callback function pointer + * + * return none + */ +static inline void cdp_deregister_rx_offld_flush_cb(ol_txrx_soc_handle soc) +{ + if (!soc || !soc->ops || !soc->ops->rx_offld_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return; + } + + if (soc->ops->rx_offld_ops->deregister_rx_offld_flush_cb) + return soc->ops->rx_offld_ops->deregister_rx_offld_flush_cb(); +} +#endif /* RECEIVE_OFFLOAD */ +#endif /* _CDP_TXRX_CMN_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_cmn_reg.h b/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_cmn_reg.h new file mode 100644 index 0000000000000000000000000000000000000000..cb0b4a42f9835b98f848e99bc3e6d7d82f607541 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_cmn_reg.h @@ -0,0 +1,78 @@ +/* + * Copyright (c) 2011-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + + /** + * @file cdp_txrx_cmn.h + * @brief Define the host data path converged API functions + * called by the host control SW and the OS interface module + */ +#ifndef _CDP_TXRX_CMN_REG_H_ +#define _CDP_TXRX_CMN_REG_H_ + +#include "hif_main.h" + +#define MOB_DRV_LEGACY_DP 0xdeed/*FIXME Add MCL device IDs */ +#define LITHIUM_DP 0xfffe/*FIXME Add Litium device ID */ +/* Use these device IDs for attach in future */ + +ol_txrx_soc_handle ol_txrx_soc_attach(void *scn_handle, struct ol_if_ops *dp_ol_if_ops); + +#ifdef QCA_WIFI_QCA8074 +void *dp_soc_attach_wifi3(void *ctrl_psoc, void *hif_handle, + HTC_HANDLE htc_handle, qdf_device_t qdf_osdev, + struct ol_if_ops *ol_ops); +#else +/* + * dp_soc_attach_wifi3() - Attach txrx SOC + * @ctrl_psoc: Opaque SOC handle from Ctrl plane + * @htc_handle: Opaque HTC handle + * @hif_handle: Opaque HIF handle + * @qdf_osdev: QDF device + * + * Return: DP SOC handle on success, NULL on failure + */ +static inline void *dp_soc_attach_wifi3(void *ctrl_psoc, void *hif_handle, + HTC_HANDLE htc_handle, qdf_device_t qdf_osdev, + struct ol_if_ops *ol_ops) +{ + return NULL; +} +#endif /* QCA_WIFI_QCA8074 */ + +static inline ol_txrx_soc_handle cdp_soc_attach(u_int16_t devid, + void *hif_handle, void *psoc, void *htc_handle, + qdf_device_t qdf_dev, struct ol_if_ops *dp_ol_if_ops) +{ + switch (devid) { + case LITHIUM_DP: /*FIXME Add lithium devide IDs */ + case QCA8074_DEVICE_ID: /* Hawekeye */ + case QCA6290_DEVICE_ID: + case RUMIM2M_DEVICE_ID_NODE0: /*lithium emulation */ + case RUMIM2M_DEVICE_ID_NODE1: /*lithium emulation */ + case RUMIM2M_DEVICE_ID_NODE2: /*lithium emulation */ + case RUMIM2M_DEVICE_ID_NODE3: /*lithium emulation */ + return dp_soc_attach_wifi3(psoc, hif_handle, htc_handle, + qdf_dev, dp_ol_if_ops); + break; + default: + return ol_txrx_soc_attach(psoc, dp_ol_if_ops); + } + return NULL; +} + +#endif /*_CDP_TXRX_CMN_REG_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_cmn_struct.h b/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_cmn_struct.h new file mode 100644 index 0000000000000000000000000000000000000000..5675a74e41310fcd110b9d8bfe905734bc7d0533 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_cmn_struct.h @@ -0,0 +1,1237 @@ +/* + * Copyright (c) 2011-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + + /** + * @file cdp_txrx_cmn_struct.h + * @brief Define the host data path converged API functions + * called by the host control SW and the OS interface module + */ +#ifndef _CDP_TXRX_CMN_STRUCT_H_ +#define _CDP_TXRX_CMN_STRUCT_H_ + +/** + * For WIN legacy header compilation + * Temporary add dummy definitions + * should be removed properly WIN legacy code handle + */ + +#include "htc_api.h" +#include "qdf_types.h" +#include "qdf_nbuf.h" +#include "qdf_atomic.h" +#ifndef CONFIG_WIN +#include +#endif /* CONFIG_WIN */ +#include + +#ifndef OL_TXRX_NUM_LOCAL_PEER_IDS +/* + * Each AP will occupy one ID, so it will occupy two IDs for AP-AP mode. + * Clients will be assigned max 32 IDs. + * STA(associated)/P2P DEV(self-PEER) will get one ID. + */ +#define OL_TXRX_NUM_LOCAL_PEER_IDS (32 + 1 + 1 + 1) +#endif + +#define CDP_BA_256_BIT_MAP_SIZE_DWORDS 8 +#define CDP_BA_64_BIT_MAP_SIZE_DWORDS 2 + +#define OL_TXRX_INVALID_LOCAL_PEER_ID 0xffff +#define CDP_INVALID_VDEV_ID 0xff +/* Options for Dump Statistics */ +#define CDP_HDD_STATS 0 +#define CDP_TXRX_PATH_STATS 1 +#define CDP_TXRX_HIST_STATS 2 +#define CDP_TXRX_TSO_STATS 3 +#define CDP_HDD_NETIF_OPER_HISTORY 4 +#define CDP_DUMP_TX_FLOW_POOL_INFO 5 +#define CDP_TXRX_DESC_STATS 6 +#define CDP_HIF_STATS 7 +#define CDP_LRO_STATS 8 +#define CDP_NAPI_STATS 9 +#define CDP_WLAN_RX_BUF_DEBUG_STATS 10 +#define CDP_RX_RING_STATS 11 +#define CDP_DP_NAPI_STATS 12 +#define CDP_SCHEDULER_STATS 21 +#define CDP_TX_QUEUE_STATS 22 +#define CDP_BUNDLE_STATS 23 +#define CDP_CREDIT_STATS 24 +#define CDP_DISCONNECT_STATS 25 + +#define WME_AC_TO_TID(_ac) ( \ + ((_ac) == WME_AC_VO) ? 6 : \ + ((_ac) == WME_AC_VI) ? 5 : \ + ((_ac) == WME_AC_BK) ? 1 : \ + 0) + +#define TID_TO_WME_AC(_tid) ( \ + (((_tid) == 0) || ((_tid) == 3)) ? WME_AC_BE : \ + (((_tid) == 1) || ((_tid) == 2)) ? WME_AC_BK : \ + (((_tid) == 4) || ((_tid) == 5)) ? WME_AC_VI : \ + WME_AC_VO) + +#define CDP_MU_MAX_USERS 8 +#define CDP_MU_MAX_USER_INDEX (CDP_MU_MAX_USERS - 1) +#define CDP_INVALID_PEER 0xffff +#define CDP_INVALID_TID 31 + +#define CDP_DATA_TID_MAX 8 +/* + * advance rx monitor filter + * */ +#define MON_FILTER_PASS 0x0001 +#define MON_FILTER_OTHER 0x0002 +#define MON_FILTER_ALL 0x0003 + +#define FILTER_MGMT_ALL 0xFFFF +#define FILTER_MGMT_ASSOC_REQ 0x0001 +#define FILTER_MGMT_ASSOC_RES 0x0002 +#define FILTER_MGMT_REASSOC_REQ 0x0004 +#define FILTER_MGMT_REASSOC_RES 0x0008 +#define FILTER_MGMT_PROBE_REQ 0x0010 +#define FILTER_MGMT_PROBE_RES 0x0020 +#define FILTER_MGMT_TIM_ADVT 0x0040 +#define FILTER_MGMT_RESERVED_7 0x0080 +#define FILTER_MGMT_BEACON 0x0100 +#define FILTER_MGMT_ATIM 0x0200 +#define FILTER_MGMT_DISASSOC 0x0400 +#define FILTER_MGMT_AUTH 0x0800 +#define FILTER_MGMT_DEAUTH 0x1000 +#define FILTER_MGMT_ACTION 0x2000 +#define FILTER_MGMT_ACT_NO_ACK 0x4000 +#define FILTER_MGMT_RESERVED_15 0x8000 + +#define FILTER_CTRL_ALL 0xFFFF +#define FILTER_CTRL_RESERVED_1 0x0001 +#define FILTER_CTRL_RESERVED_2 0x0002 +#define FILTER_CTRL_TRIGGER 0x0004 +#define FILTER_CTRL_RESERVED_4 0x0008 +#define FILTER_CTRL_BF_REP_POLL 0x0010 +#define FILTER_CTRL_VHT_NDP 0x0020 +#define FILTER_CTRL_FRAME_EXT 0x0040 +#define FILTER_CTRL_CTRLWRAP 0x0080 +#define FILTER_CTRL_BA_REQ 0x0100 +#define FILTER_CTRL_BA 0x0200 +#define FILTER_CTRL_PSPOLL 0x0400 +#define FILTER_CTRL_RTS 0x0800 +#define FILTER_CTRL_CTS 0x1000 +#define FILTER_CTRL_ACK 0x2000 +#define FILTER_CTRL_CFEND 0x4000 +#define FILTER_CTRL_CFEND_CFACK 0x8000 + +#define FILTER_DATA_ALL 0xFFFF +#define FILTER_DATA_MCAST 0x4000 +#define FILTER_DATA_UCAST 0x8000 +#define FILTER_DATA_DATA 0x0001 +#define FILTER_DATA_NULL 0x0008 + +/* + * DP configuration parameters + */ +enum cdp_cfg_param_type { + CDP_CFG_MAX_PEER_ID, + CDP_CFG_CCE_DISABLE, + CDP_CFG_NUM_PARAMS +}; + +/* + * htt_dbg_stats_type - + * bit positions for each stats type within a stats type bitmask + * The bitmask contains 24 bits. + */ +enum htt_cmn_dbg_stats_type { + HTT_DBG_CMN_STATS_WAL_PDEV_TXRX = 0, /* bit 0 -> 0x1 */ + HTT_DBG_CMN_STATS_RX_REORDER = 1, /* bit 1 -> 0x2 */ + HTT_DBG_CMN_STATS_RX_RATE_INFO = 2, /* bit 2 -> 0x4 */ + HTT_DBG_CMN_STATS_TX_PPDU_LOG = 3, /* bit 3 -> 0x8 */ + HTT_DBG_CMN_STATS_TX_RATE_INFO = 4, /* bit 4 -> 0x10 */ + HTT_DBG_CMN_STATS_TIDQ = 5, /* bit 5 -> 0x20 */ + HTT_DBG_CMN_STATS_TXBF_INFO = 6, /* bit 6 -> 0x40 */ + HTT_DBG_CMN_STATS_SND_INFO = 7, /* bit 7 -> 0x80 */ + HTT_DBG_CMN_STATS_ERROR_INFO = 8, /* bit 8 -> 0x100 */ + HTT_DBG_CMN_STATS_TX_SELFGEN_INFO = 9, /* bit 9 -> 0x200 */ + HTT_DBG_CMN_STATS_TX_MU_INFO = 10, /* bit 10 -> 0x400 */ + HTT_DBG_CMN_STATS_SIFS_RESP_INFO = 11, /* bit 11 -> 0x800 */ + HTT_DBG_CMN_STATS_RESET_INFO = 12, /* bit 12 -> 0x1000 */ + HTT_DBG_CMN_STATS_MAC_WDOG_INFO = 13, /* bit 13 -> 0x2000 */ + HTT_DBG_CMN_STATS_TX_DESC_INFO = 14, /* bit 14 -> 0x4000 */ + HTT_DBG_CMN_STATS_TX_FETCH_MGR_INFO = 15, /* bit 15 -> 0x8000 */ + HTT_DBG_CMN_STATS_TX_PFSCHED_INFO = 16, /* bit 16 -> 0x10000 */ + HTT_DBG_CMN_STATS_TX_PATH_STATS_INFO = 17, /* bit 17 -> 0x20000 */ + /* bits 18-23 currently reserved */ + + /* keep this last */ + HTT_DBG_CMN_NUM_STATS, + HTT_DBG_CMN_NUM_STATS_INVALID = 31, /* bit 31 -> 0x80000000 */ +}; + +/* + * cdp_host_txrx_stats: Different types of host stats + * @TXRX_HOST_STATS_INVALID: Invalid option + * @TXRX_RX_RATE_STATS: Rx rate info + * @TXRX_TX_RATE_STATS: Tx rate info + * @TXRX_TX_HOST_STATS: Print Tx stats + * @TXRX_RX_HOST_STATS: Print host Rx stats + * @TXRX_CLEAR_STATS: clear all host stats + * @TXRX_SRNG_PTR_STATS: Print SRNG pointer stats + * @TXRX_RX_MON_STATS: Print monitor mode stats +*/ +enum cdp_host_txrx_stats { + TXRX_HOST_STATS_INVALID = -1, + TXRX_CLEAR_STATS = 0, + TXRX_RX_RATE_STATS = 1, + TXRX_TX_RATE_STATS = 2, + TXRX_TX_HOST_STATS = 3, + TXRX_RX_HOST_STATS = 4, + TXRX_AST_STATS = 5, + TXRX_SRNG_PTR_STATS = 6, + TXRX_RX_MON_STATS = 7, + TXRX_HOST_STATS_MAX, +}; + +/* + * cdp_ppdu_ftype: PPDU Frame Type + * @CDP_PPDU_FTYPE_DATA: SU or MU Data Frame + * @CDP_PPDU_FTYPE_CTRL: Control/Management Frames +*/ +enum cdp_ppdu_ftype { + CDP_PPDU_FTYPE_CTRL, + CDP_PPDU_FTYPE_DATA, + CDP_PPDU_FTYPE_MAX +}; + + +/** + * @brief General specification of the tx frame contents + * + * @details + * for efficiency, the HTT packet type values correspond + * to the bit positions of the WAL packet type values, so the + * translation is a simple shift operation. + */ +enum htt_cmn_pkt_type { + htt_cmn_pkt_type_raw = 0, + htt_cmn_pkt_type_native_wifi = 1, + htt_cmn_pkt_type_ethernet = 2, + htt_cmn_pkt_type_mgmt = 3, + htt_cmn_pkt_type_eth2 = 4, + + /* keep this last */ + htt_cmn_pkt_num_types +}; + +/** + * @General description of HTT received packets status + * It is similar to htt_rx_status enum + * but is added as a cdp enum can be freely used in OL_IF layer + */ +enum htt_cmn_rx_status { + htt_cmn_rx_status_unknown = 0x0, + htt_cmn_rx_status_ok, + htt_cmn_rx_status_err_fcs, + htt_cmn_rx_status_err_dup, + htt_cmn_rx_status_err_replay, + htt_cmn_rx_status_inv_peer, + htt_cmn_rx_status_ctrl_mgmt_null = 0x08, + htt_cmn_rx_status_tkip_mic_err = 0x09, + htt_cmn_rx_status_decrypt_err = 0x0A, + htt_cmn_rx_status_mpdu_length_err = 0x0B, + htt_cmn_rx_status_err_misc = 0xFF +}; + + +enum cdp_host_reo_dest_ring { + cdp_host_reo_dest_ring_unknown = 0, + cdp_host_reo_dest_ring_1 = 1, + cdp_host_reo_dest_ring_2 = 2, + cdp_host_reo_dest_ring_3 = 3, + cdp_host_reo_dest_ring_4 = 4, +}; + +enum htt_cmn_t2h_en_stats_type { + /* keep this alwyas first */ + HTT_CMN_T2H_EN_STATS_TYPE_START = 0, + + /** ppdu_common_stats is the payload */ + HTT_CMN_T2H_EN_STATS_TYPE_COMMON = 1, + /** ppdu_sant_stats is the payload */ + HTT_CMN_T2H_EN_STATS_TYPE_SANT = 2, + /** ppdu_common_stats_v2 is the payload */ + HTT_CMN_T2H_EN_STATS_TYPE_COMMON_V2 = 3, + + /* Keep this last */ + HTT_CMN_T2H_EN_STATS_TYPE_END = 0x1f, +}; + +enum htt_cmn_t2h_en_stats_status { + /* Keep this first always */ + HTT_CMN_T2H_EN_STATS_STATUS_PARTIAL = 0, + HTT_CMN_T2H_EN_STATS_STATUS_PRESENT = 1, + HTT_CMN_T2H_EN_STATS_STATUS_ERROR = 2, + HTT_CMN_T2H_EN_STATS_STATUS_INVALID = 3, + + + /* keep this always last */ + HTT_CMN_T2H_EN_STATS_STATUS_SERIES_DONE = 7, +}; + +/** + * struct ol_txrx_peer_state - Peer state information + */ +enum ol_txrx_peer_state { + OL_TXRX_PEER_STATE_INVALID, + OL_TXRX_PEER_STATE_DISC, /* initial state */ + OL_TXRX_PEER_STATE_CONN, /* authentication in progress */ + OL_TXRX_PEER_STATE_AUTH, /* authentication successful */ +}; + +/** + * struct ol_txrx_ast_type - AST entry type information + */ +enum cdp_txrx_ast_entry_type { + CDP_TXRX_AST_TYPE_NONE, /* static ast entry for connected peer */ + CDP_TXRX_AST_TYPE_STATIC, /* static ast entry for connected peer */ + CDP_TXRX_AST_TYPE_WDS, /* WDS peer ast entry type*/ + CDP_TXRX_AST_TYPE_MEC, /* Multicast echo ast entry type */ + CDP_TXRX_AST_TYPE_WDS_HM, /* HM WDS entry */ + CDP_TXRX_AST_TYPE_MAX +}; + +/** + * struct cdp_sec_type - security type information + */ +enum cdp_sec_type { + cdp_sec_type_none, + cdp_sec_type_wep128, + cdp_sec_type_wep104, + cdp_sec_type_wep40, + cdp_sec_type_tkip, + cdp_sec_type_tkip_nomic, + cdp_sec_type_aes_ccmp, + cdp_sec_type_wapi, + cdp_sec_type_aes_ccmp_256, + cdp_sec_type_aes_gcmp, + cdp_sec_type_aes_gcmp_256, + + /* keep this last! */ + cdp_num_sec_types +}; + +/** + * struct cdp_tx_exception_metadata - Exception path parameters + * @peer_id: Peer id of the peer + * @tid: Transmit Identifier + * @tx_encap_type: Transmit encap type (i.e. Raw, Native Wi-Fi, Ethernet) + * @sec_type: sec_type to be passed to HAL + * + * This structure holds the parameters needed in the exception path of tx + * + */ +struct cdp_tx_exception_metadata { + uint16_t peer_id; + uint8_t tid; + uint16_t tx_encap_type; + enum cdp_sec_type sec_type; +}; + +typedef struct cdp_soc_t *ol_txrx_soc_handle; + +/** + * ol_txrx_vdev_delete_cb - callback registered during vdev + * detach + */ +typedef void (*ol_txrx_vdev_delete_cb)(void *context); + +/** + * ol_txrx_peer_unmap_sync_cb - callback registered during peer detach sync + */ +typedef QDF_STATUS(*ol_txrx_peer_unmap_sync_cb)(uint8_t vdev_id, + uint32_t peer_id_cnt, + uint16_t *peer_id_list); + +/** + * ol_osif_vdev_handle - paque handle for OS shim virtual device + * object + */ +struct ol_osif_vdev_t; +typedef struct ol_osif_vdev_t *ol_osif_vdev_handle; + +/** + * wlan_op_mode - Virtual device operation mode + * @wlan_op_mode_unknown: Unknown mode + * @wlan_op_mode_ap: AP mode + * @wlan_op_mode_ibss: IBSS mode + * @wlan_op_mode_sta: STA (client) mode + * @wlan_op_mode_monitor: Monitor mode + * @wlan_op_mode_ocb: OCB mode + */ +enum wlan_op_mode { + wlan_op_mode_unknown, + wlan_op_mode_ap, + wlan_op_mode_ibss, + wlan_op_mode_sta, + wlan_op_mode_monitor, + wlan_op_mode_ocb, + wlan_op_mode_ndi, +}; + +/** + * connectivity_stats_pkt_status - data pkt type + * @PKT_TYPE_REQ: Request packet + * @PKT_TYPE_RSP: Response packet + * @PKT_TYPE_TX_DROPPED: TX packet dropped + * @PKT_TYPE_RX_DROPPED: RX packet dropped + * @PKT_TYPE_RX_DELIVERED: RX packet delivered + * @PKT_TYPE_RX_REFUSED: RX packet refused + * @PKT_TYPE_TX_HOST_FW_SENT: TX packet FW sent + * @PKT_TYPE_TX_ACK_CNT:TC packet acked + * @PKT_TYPE_NONE: Invalid packet + */ +enum connectivity_stats_pkt_status { + PKT_TYPE_REQ, + PKT_TYPE_RSP, + PKT_TYPE_TX_DROPPED, + PKT_TYPE_RX_DROPPED, + PKT_TYPE_RX_DELIVERED, + PKT_TYPE_RX_REFUSED, + PKT_TYPE_TX_HOST_FW_SENT, + PKT_TYPE_TX_ACK_CNT, + PKT_TYPE_NONE, +}; + +/** + * cdp_mgmt_tx_cb - tx management delivery notification + * callback function + */ +typedef void +(*ol_txrx_mgmt_tx_cb)(void *ctxt, qdf_nbuf_t tx_mgmt_frm, int had_error); + +/** + * ol_txrx_data_tx_cb - Function registered with the data path + * that is called when tx frames marked as "no free" are + * done being transmitted + */ +typedef void +(*ol_txrx_data_tx_cb)(void *ctxt, qdf_nbuf_t tx_frm, int had_error); + +/** + * ol_txrx_tx_fp - top-level transmit function + * @data_vdev - handle to the virtual device object + * @msdu_list - list of network buffers + */ +typedef qdf_nbuf_t (*ol_txrx_tx_fp)(void *data_vdev, + qdf_nbuf_t msdu_list); + +/** + * ol_txrx_tx_exc_fp - top-level transmit function on exception path + * @data_vdev - handle to the virtual device object + * @msdu_list - list of network buffers + * @tx_exc_metadata - structure that holds parameters to exception path + */ +typedef qdf_nbuf_t (*ol_txrx_tx_exc_fp)(void *data_vdev, + qdf_nbuf_t msdu_list, + struct cdp_tx_exception_metadata + *tx_exc_metadata); + +/** + * ol_txrx_completion_fp - top-level transmit function + * for tx completion + * @skb: skb data + * @osif_dev: the virtual device's OS shim object + */ +typedef void (*ol_txrx_completion_fp)(qdf_nbuf_t skb, + void *osif_dev); +/** + * ol_txrx_tx_flow_control_fp - tx flow control notification + * function from txrx to OS shim + * @osif_dev - the virtual device's OS shim object + * @tx_resume - tx os q should be resumed or not + */ +typedef void (*ol_txrx_tx_flow_control_fp)(void *osif_dev, + bool tx_resume); + +/** + * ol_txrx_tx_flow_control_is_pause_fp - is tx paused by flow control + * function from txrx to OS shim + * @osif_dev - the virtual device's OS shim object + * + * Return: true if tx is paused by flow control + */ +typedef bool (*ol_txrx_tx_flow_control_is_pause_fp)(void *osif_dev); + +/** + * ol_txrx_rx_fp - receive function to hand batches of data + * frames from txrx to OS shim + * @data_vdev - handle to the OSIF virtual device object + * @msdu_list - list of network buffers + */ +typedef QDF_STATUS(*ol_txrx_rx_fp)(void *osif_dev, qdf_nbuf_t msdu_list); + +/** + * ol_txrx_stats_rx_fp - receive function to hand batches of data + * frames from txrx to OS shim + * @skb: skb data + * @osif_dev: the virtual device's OS shim object + * @action: data packet type + * @pkt_type: packet data type + */ +typedef void (*ol_txrx_stats_rx_fp)(struct sk_buff *skb, + void *osif_dev, enum connectivity_stats_pkt_status action, + uint8_t *pkt_type); + +/** + * ol_txrx_get_key_fp - function to gey key based on keyix and peer + * mac address + * @osif_dev - the virtual device's OS shim object + * @key_buf - pointer to store key + * @mac_addr - pointer to mac address + * @keyix - key id + */ +typedef QDF_STATUS(*ol_txrx_get_key_fp)(void *osif_dev, uint8_t *key_buf, uint8_t *mac_addr, uint8_t keyix); + +/** + * ol_txrx_rsim_rx_decap_fp - raw mode simulation function to decap the + * packets in receive path. + * @osif_dev - the virtual device's OS shim object + * @list_head - poniter to head of receive packet queue to decap + * @list_tail - poniter to tail of receive packet queue to decap + * @peer - Peer handler + */ +typedef QDF_STATUS(*ol_txrx_rsim_rx_decap_fp)(void *osif_dev, + qdf_nbuf_t *list_head, + qdf_nbuf_t *list_tail, + struct cdp_peer *peer); + +/* ol_txrx_rx_fp - external tx free function to read per packet stats and + * free tx buffer externally + * @netbuf - tx network buffer + */ +typedef void (*ol_txrx_tx_free_ext_fp)(qdf_nbuf_t netbuf); + +/** + * ol_txrx_rx_check_wai_fp - OSIF WAPI receive function +*/ +typedef bool (*ol_txrx_rx_check_wai_fp)(ol_osif_vdev_handle vdev, + qdf_nbuf_t mpdu_head, + qdf_nbuf_t mpdu_tail); +/** + * ol_txrx_rx_mon_fp - OSIF monitor mode receive function for single + * MPDU (802.11 format) + */ +typedef void (*ol_txrx_rx_mon_fp)(ol_osif_vdev_handle vdev, + qdf_nbuf_t mpdu, + void *rx_status); + +/** + * ol_txrx_proxy_arp_fp - proxy arp function pointer +*/ +typedef int (*ol_txrx_proxy_arp_fp)(ol_osif_vdev_handle vdev, + qdf_nbuf_t netbuf); + +/* + * ol_txrx_mcast_me_fp - function pointer for multicast enhancement + */ +typedef int (*ol_txrx_mcast_me_fp)(ol_osif_vdev_handle vdev, + qdf_nbuf_t netbuf); + +/** + * ol_txrx_stats_callback - statistics notify callback + */ +typedef void (*ol_txrx_stats_callback)(void *ctxt, + enum htt_cmn_dbg_stats_type type, + uint8_t *buf, int bytes); + +/** + * ol_txrx_ops - (pointers to) the functions used for tx and rx + * data xfer + * + * There are two portions of these txrx operations. + * The rx portion is filled in by OSIF SW before calling + * ol_txrx_osif_vdev_register; inside the ol_txrx_osif_vdev_register + * the txrx SW stores a copy of these rx function pointers, to use + * as it delivers rx data frames to the OSIF SW. + * The tx portion is filled in by the txrx SW inside + * ol_txrx_osif_vdev_register; when the function call returns, + * the OSIF SW stores a copy of these tx functions to use as it + * delivers tx data frames to the txrx SW. + * + * @tx.std - the tx function pointer for standard data + * frames This function pointer is set by the txrx SW + * perform host-side transmit operations based on + * whether a HL or LL host/target interface is in use. + * @tx.flow_control_cb - the transmit flow control + * function that is registered by the + * OSIF which is called from txrx to + * indicate whether the transmit OS + * queues should be paused/resumed + * @rx.std - the OS shim rx function to deliver rx data + * frames to. This can have different values for + * different virtual devices, e.g. so one virtual + * device's OS shim directly hands rx frames to the OS, + * but another virtual device's OS shim filters out P2P + * messages before sending the rx frames to the OS. The + * netbufs delivered to the osif_rx function are in the + * format specified by the OS to use for tx and rx + * frames (either 802.3 or native WiFi) + * @rx.wai_check - the tx function pointer for WAPI frames + * @rx.mon - the OS shim rx monitor function to deliver + * monitor data to Though in practice, it is probable + * that the same function will be used for delivering + * rx monitor data for all virtual devices, in theory + * each different virtual device can have a different + * OS shim function for accepting rx monitor data. The + * netbufs delivered to the osif_rx_mon function are in + * 802.11 format. Each netbuf holds a 802.11 MPDU, not + * an 802.11 MSDU. Depending on compile-time + * configuration, each netbuf may also have a + * monitor-mode encapsulation header such as a radiotap + * header added before the MPDU contents. + * @rx.std - the OS shim rx function to deliver rx data + * @proxy_arp - proxy arp function pointer - specified by + * OS shim, stored by txrx + * @get_key - function pointer to get key of the peer with + * specific key index + */ +struct ol_txrx_ops { + /* tx function pointers - specified by txrx, stored by OS shim */ + struct { + ol_txrx_tx_fp tx; + ol_txrx_tx_exc_fp tx_exception; + ol_txrx_tx_free_ext_fp tx_free_ext; + ol_txrx_completion_fp tx_comp; + } tx; + + /* rx function pointers - specified by OS shim, stored by txrx */ + struct { + ol_txrx_rx_fp rx; + ol_txrx_rx_check_wai_fp wai_check; + ol_txrx_rx_mon_fp mon; + ol_txrx_stats_rx_fp stats_rx; + ol_txrx_rsim_rx_decap_fp rsim_rx_decap; + } rx; + + /* proxy arp function pointer - specified by OS shim, stored by txrx */ + ol_txrx_proxy_arp_fp proxy_arp; + ol_txrx_mcast_me_fp me_convert; + + ol_txrx_get_key_fp get_key; +}; + +/** + * ol_txrx_stats_req - specifications of the requested + * statistics + */ +struct ol_txrx_stats_req { + uint32_t stats_type_upload_mask; /* which stats to upload */ + uint32_t stats_type_reset_mask; /* which stats to reset */ + + /* stats will be printed if either print element is set */ + struct { + int verbose; /* verbose stats printout */ + int concise; /* concise stats printout (takes precedence) */ + } print; /* print uploaded stats */ + + /* stats notify callback will be invoked if fp is non-NULL */ + struct { + ol_txrx_stats_callback fp; + void *ctxt; + } callback; + + /* stats will be copied into the specified buffer if buf is non-NULL */ + struct { + uint8_t *buf; + int byte_limit; /* don't copy more than this */ + } copy; + + /* + * If blocking is true, the caller will take the specified semaphore + * to wait for the stats to be uploaded, and the driver will release + * the semaphore when the stats are done being uploaded. + */ + struct { + int blocking; + /*Note: this needs to change to some qdf_* type */ + qdf_semaphore_t *sem_ptr; + } wait; +}; + + +/* DP soc struct definition */ +struct cdp_soc_t { + struct cdp_ops *ops; + struct ol_if_ops *ol_ops; +}; + +/* + * cdp_pdev_param_type: different types of parameters + * to set values in pdev + * @CDP_CONFIG_DEBUG_SNIFFER: Enable debug sniffer feature + */ +enum cdp_pdev_param_type { + CDP_CONFIG_DEBUG_SNIFFER, +}; + +/* + * cdp_vdev_param_type: different types of parameters + * to set values in vdev + * @CDP_ENABLE_NAWDS: set nawds enable/disable + * @CDP_ENABLE_MCAST_EN: enable/disable multicast enhancement + * @CDP_ENABLE_WDS: wds sta + * @CDP_ENABLE_PROXYSTA: proxy sta + * @CDP_UPDATE_TDLS_FLAGS: tdls link flags + * @CDP_ENABLE_AP_BRIDGE: set ap_bridging enable/disable + * @CDP_ENABLE_CIPHER : set cipher type based on security + * @CDP_ENABLE_QWRAP_ISOLATION: qwrap isolation mode + */ +enum cdp_vdev_param_type { + CDP_ENABLE_NAWDS, + CDP_ENABLE_MCAST_EN, + CDP_ENABLE_WDS, + CDP_ENABLE_PROXYSTA, + CDP_UPDATE_TDLS_FLAGS, + CDP_CFG_WDS_AGING_TIMER, + CDP_ENABLE_AP_BRIDGE, + CDP_ENABLE_CIPHER, + CDP_ENABLE_QWRAP_ISOLATION +}; + +#define TXRX_FW_STATS_TXSTATS 1 +#define TXRX_FW_STATS_RXSTATS 2 +#define TXRX_FW_STATS_RX_RATE_INFO 3 +#define TXRX_FW_STATS_PHYSTATS 4 +#define TXRX_FW_STATS_PHYSTATS_CONCISE 5 +#define TXRX_FW_STATS_TX_RATE_INFO 6 +#define TXRX_FW_STATS_TID_STATE 7 +#define TXRX_FW_STATS_HOST_STATS 8 +#define TXRX_FW_STATS_CLEAR_HOST_STATS 9 +#define TXRX_FW_STATS_CE_STATS 10 +#define TXRX_FW_STATS_VOW_UMAC_COUNTER 11 +#define TXRX_FW_STATS_ME_STATS 12 +#define TXRX_FW_STATS_TXBF_INFO 13 +#define TXRX_FW_STATS_SND_INFO 14 +#define TXRX_FW_STATS_ERROR_INFO 15 +#define TXRX_FW_STATS_TX_SELFGEN_INFO 16 +#define TXRX_FW_STATS_TX_MU_INFO 17 +#define TXRX_FW_SIFS_RESP_INFO 18 +#define TXRX_FW_RESET_STATS 19 +#define TXRX_FW_MAC_WDOG_STATS 20 +#define TXRX_FW_MAC_DESC_STATS 21 +#define TXRX_FW_MAC_FETCH_MGR_STATS 22 +#define TXRX_FW_MAC_PREFETCH_MGR_STATS 23 +#define TXRX_FW_STATS_DURATION_INFO 24 +#define TXRX_FW_STATS_DURATION_INFO_RESET 25 +#define TXRX_FW_HALPHY_STATS 26 +#define TXRX_FW_COEX_STATS 27 + +#define PER_RADIO_FW_STATS_REQUEST 0 +#define PER_VDEV_FW_STATS_REQUEST 1 + +/** + * enum data_stall_log_event_indicator - Module triggering data stall + * @DATA_STALL_LOG_INDICATOR_UNUSED: Unused + * @DATA_STALL_LOG_INDICATOR_HOST_DRIVER: Host driver indicates data stall + * @DATA_STALL_LOG_INDICATOR_FIRMWARE: FW indicates data stall + * @DATA_STALL_LOG_INDICATOR_FRAMEWORK: Framework indicates data stall + * + * Enum indicating the module that indicates data stall event + */ +enum data_stall_log_event_indicator { + DATA_STALL_LOG_INDICATOR_UNUSED, + DATA_STALL_LOG_INDICATOR_HOST_DRIVER, + DATA_STALL_LOG_INDICATOR_FIRMWARE, + DATA_STALL_LOG_INDICATOR_FRAMEWORK, +}; + +/** + * enum data_stall_log_event_type - data stall event type + * @DATA_STALL_LOG_NONE + * @DATA_STALL_LOG_FW_VDEV_PAUSE + * @DATA_STALL_LOG_HWSCHED_CMD_FILTER + * @DATA_STALL_LOG_HWSCHED_CMD_FLUSH + * @DATA_STALL_LOG_FW_RX_REFILL_FAILED + * @DATA_STALL_LOG_FW_RX_FCS_LEN_ERROR + * @DATA_STALL_LOG_FW_WDOG_ERRORS + * @DATA_STALL_LOG_BB_WDOG_ERROR + * @DATA_STALL_LOG_POST_TIM_NO_TXRX_ERROR + * @DATA_STALL_LOG_HOST_STA_TX_TIMEOUT + * @DATA_STALL_LOG_HOST_SOFTAP_TX_TIMEOUT + * @DATA_STALL_LOG_NUD_FAILURE + * + * Enum indicating data stall event type + */ +enum data_stall_log_event_type { + DATA_STALL_LOG_NONE, + DATA_STALL_LOG_FW_VDEV_PAUSE, + DATA_STALL_LOG_HWSCHED_CMD_FILTER, + DATA_STALL_LOG_HWSCHED_CMD_FLUSH, + DATA_STALL_LOG_FW_RX_REFILL_FAILED, + DATA_STALL_LOG_FW_RX_FCS_LEN_ERROR, + DATA_STALL_LOG_FW_WDOG_ERRORS, + DATA_STALL_LOG_BB_WDOG_ERROR, + DATA_STALL_LOG_POST_TIM_NO_TXRX_ERROR, + /* Stall events triggered by host/framework start from 0x100 onwards. */ + DATA_STALL_LOG_HOST_STA_TX_TIMEOUT = 0x100, + DATA_STALL_LOG_HOST_SOFTAP_TX_TIMEOUT, + DATA_STALL_LOG_NUD_FAILURE, +}; + +/** + * enum data_stall_log_recovery_type - data stall recovery type + * @DATA_STALL_LOG_RECOVERY_NONE, + * @DATA_STALL_LOG_RECOVERY_CONNECT_DISCONNECT, + * @DATA_STALL_LOG_RECOVERY_TRIGGER_PDR + * + * Enum indicating data stall recovery type + */ +enum data_stall_log_recovery_type { + DATA_STALL_LOG_RECOVERY_NONE = 0, + DATA_STALL_LOG_RECOVERY_CONNECT_DISCONNECT, + DATA_STALL_LOG_RECOVERY_TRIGGER_PDR, +}; + +/** + * struct data_stall_event_info - data stall info + * @indicator: Module triggering data stall + * @data_stall_type: data stall event type + * @vdev_id_bitmap: vdev_id_bitmap + * @pdev_id: pdev id + * @recovery_type: data stall recovery type + */ +struct data_stall_event_info { + uint32_t indicator; + uint32_t data_stall_type; + uint32_t vdev_id_bitmap; + uint32_t pdev_id; + uint32_t recovery_type; +}; + +typedef void (*data_stall_detect_cb)(struct data_stall_event_info *); + +/* + * cdp_stats - options for host and firmware + * statistics +*/ +enum cdp_stats { + CDP_TXRX_STATS_0 = 0, + CDP_TXRX_STATS_1, + CDP_TXRX_STATS_2, + CDP_TXRX_STATS_3, + CDP_TXRX_STATS_4, + CDP_TXRX_STATS_5, + CDP_TXRX_STATS_6, + CDP_TXRX_STATS_7, + CDP_TXRX_STATS_8, + CDP_TXRX_STATS_9, + CDP_TXRX_STATS_10, + CDP_TXRX_STATS_11, + CDP_TXRX_STATS_12, + CDP_TXRX_STATS_13, + CDP_TXRX_STATS_14, + CDP_TXRX_STATS_15, + CDP_TXRX_STATS_16, + CDP_TXRX_STATS_17, + CDP_TXRX_STATS_18, + CDP_TXRX_STATS_19, + CDP_TXRX_STATS_20, + CDP_TXRX_STATS_21, + CDP_TXRX_STATS_22, + CDP_TXRX_STATS_23, + CDP_TXRX_STATS_24, + CDP_TXRX_STATS_25, + CDP_TXRX_STATS_26, + CDP_TXRX_STATS_27, + CDP_TXRX_STATS_HTT_MAX = 256, + CDP_TXRX_MAX_STATS = 512, +}; + +/* + * Different Stat update types sent to OL_IF + * @UPDATE_PEER_STATS: update peer stats + * @UPDATE_VDEV_STATS: update vdev stats + * @UPDATE_PDE_STATS: Update pdev stats + */ +enum cdp_stat_update_type { + UPDATE_PEER_STATS = 0, + UPDATE_VDEV_STATS = 1, + UPDATE_PDEV_STATS = 2, +}; + +/** + * struct cdp_tx_completion_ppdu_user - Tx PPDU completion per-user information + * @completion_status: completion status - OK/Filter/Abort/Timeout + * @tid: TID number + * @peer_id: Peer ID + * @frame_ctrl: frame control field in 802.11 header + * @qos_ctrl: QoS control field in 802.11 header + * @mpdu_tried: number of mpdus tried + * @mpdu_success: number of mpdus successfully transmitted + * @long_retries: long retries + * @short_retries: short retries + * @is_ampdu: mpdu aggregate or non-aggregate? + * @success_bytes: bytes successfully transmitted + * @retry_bytes: bytes retried + * @failed_msdus: MSDUs failed transmission + * @duration: user duration in ppdu + * @ltf_size: ltf_size + * @stbc: stbc + * @he_re: he_re (range extension) + * @txbf: txbf + * @bw: Transmission bandwidth + * + * + * + * + * @nss: NSS 1,2, ...8 + * @mcs: MCS index + * @preamble: preamble + * @gi: guard interval 800/400/1600/3200 ns + * @dcm: dcm + * @ldpc: ldpc + * @ppdu_type: SU/MU_MIMO/MU_OFDMA/MU_MIMO_OFDMA/UL_TRIG/BURST_BCN/UL_BSR_RESP/ + * UL_BSR_TRIG/UNKNOWN + * @ba_seq_no: Block Ack sequence number + * @ba_bitmap: Block Ack bitmap + * @start_seqa: Sequence number of first MPDU + * @enq_bitmap: Enqueue MPDU bitmap + * @is_mcast: MCAST or UCAST + * @tx_rate: Transmission Rate + */ +struct cdp_tx_completion_ppdu_user { + uint32_t completion_status:8, + tid:8, + peer_id:16; + uint8_t mac_addr[6]; + uint32_t frame_ctrl:16, + qos_ctrl:16; + uint32_t mpdu_tried_ucast:16, + mpdu_tried_mcast:16; + uint16_t mpdu_success:16; + uint32_t long_retries:4, + short_retries:4, + tx_ratecode:8, + is_ampdu:1, + ppdu_type:5; + uint32_t success_bytes; + uint32_t retry_bytes; + uint32_t failed_bytes; + uint32_t success_msdus:16, + retry_msdus:16; + uint32_t failed_msdus:16, + duration:16; + uint32_t ltf_size:2, + stbc:1, + he_re:1, + txbf:4, + bw:4, + nss:4, + mcs:4, + preamble:4, + gi:4, + dcm:1, + ldpc:1; + uint32_t ba_seq_no; + uint32_t ba_bitmap[CDP_BA_256_BIT_MAP_SIZE_DWORDS]; + uint32_t start_seq; + uint32_t enq_bitmap[CDP_BA_256_BIT_MAP_SIZE_DWORDS]; + uint32_t num_mpdu:9, + num_msdu:16; + uint32_t tx_duration; + uint16_t ru_tones; + bool is_mcast; + uint32_t tx_rate; +}; + +/** + * struct cdp_tx_completion_ppdu - Tx PPDU completion information + * @completion_status: completion status - OK/Filter/Abort/Timeout + * @ppdu_id: PPDU Id + * @vdev_id: VAP Id + * @num_users: Number of users + * @num_mpdu: Number of MPDUs in PPDU + * @num_msdu: Number of MSDUs in PPDU + * @frame_type: frame SU or MU + * @frame_ctrl: frame control of 80211 header + * @channel: Channel informartion + * @ack_rssi: RSSI value of last ack packet (units=dB above noise floor) + * @tx_duration: PPDU airtime + * @ppdu_start_timestamp: TSF at PPDU start + * @ppdu_end_timestamp: TSF at PPDU end + * @ack_timestamp: TSF at the reception of ACK + * @user: per-User stats (array of per-user structures) + */ +struct cdp_tx_completion_ppdu { + uint32_t ppdu_id; + uint16_t vdev_id; + uint32_t num_users; + uint32_t num_mpdu:9, + num_msdu:16; + uint16_t frame_type; + uint16_t frame_ctrl; + uint16_t channel; + uint16_t phy_mode; + uint32_t ack_rssi; + uint32_t tx_duration; + uint32_t ppdu_start_timestamp; + uint32_t ppdu_end_timestamp; + uint32_t ack_timestamp; + struct cdp_tx_completion_ppdu_user user[CDP_MU_MAX_USERS]; +}; + +/** + * struct cdp_dev_stats - Network device stats structure + * @tx_packets: Tx total packets transmitted + * @tx_bytes : Tx total bytes transmitted + * @tx_errors : Tx error due to FW tx failure, Ring failure DMA etc + * @tx_dropped: Tx dropped is same as tx errors as above + * @rx_packets: Rx total packets transmitted + * @rx_bytes : Rx total bytes transmitted + */ +struct cdp_dev_stats { + uint32_t tx_packets; + uint32_t tx_bytes; + uint32_t tx_errors; + uint32_t tx_dropped; + uint32_t rx_packets; + uint32_t rx_bytes; +}; + +/** + * struct cdp_rate_stats - Tx/Rx Rate statistics + * @bw: Indicates the BW of the upcoming transmission - + * + * + * + * + * @pkt_type: Transmit Packet Type + * @stbc: When set, STBC transmission rate was used + * @ldpc: When set, use LDPC transmission rates + * @sgi: Legacy normal GI + * Legacy short GI + * HE related GI + * HE + * @mcs: Transmit MCS Rate + * @ofdma: Set when the transmission was an OFDMA transmission + * @tones_in_ru: The number of tones in the RU used. + * @tsf: Lower 32 bits of the TSF (timestamp when ppdu transmission finished) + * @peer_id: Peer ID of the flow or MPDU queue + * @tid: TID of the flow or MPDU queue + */ +struct cdp_rate_stats { + uint32_t rate_stats_info_valid:1, + bw:2, + pkt_type:4, + stbc:1, + ldpc:1, + sgi:2, + mcs:4, + ofdma:1, + tones_in_ru:12, + resvd0:4; + uint32_t tsf; + uint16_t peer_id; + uint8_t tid; +}; + +/** + * struct cdp_tx_completion_msdu - Tx MSDU completion descriptor + * @ppdu_id: PPDU to which this MSDU belongs + * @transmit_cnt: Number of times this frame has been transmitted + * @ack_frame_rssi: RSSI of the received ACK or BA frame + * @first_msdu: Indicates this MSDU is the first MSDU in AMSDU + * @last_msdu: Indicates this MSDU is the last MSDU in AMSDU + * @msdu_part_of_amsdu : Indicates this MSDU was part of an A-MSDU in MPDU + * @extd: Extended structure containing rate statistics + */ +struct cdp_tx_completion_msdu { + uint32_t ppdu_id; + uint8_t transmit_cnt; + uint32_t ack_frame_rssi:8, + resvd0:1, + first_msdu:1, + last_msdu:1, + msdu_part_of_amsdu:1, + resvd1:20; + struct cdp_rate_stats extd; +}; + +/** + * struct cdp_rx_indication_ppdu - Rx PPDU indication structure + * @ppdu_id: PPDU Id + * @is_ampdu: mpdu aggregate or non-aggregate? + * @num_mpdu: Number of MPDUs in PPDU + * @reserved: Reserved bits for future use + * @num_msdu: Number of MSDUs in PPDU + * @udp_msdu_count: Number of UDP MSDUs in PPDU + * @tcp_msdu_count: Number of TCP MSDUs in PPDU + * @other_msdu_count: Number of MSDUs other than UDP and TCP MSDUs in PPDU + * @duration: PPDU duration + * @tid: TID number + * @peer_id: Peer ID + * @vdev_id: VAP ID + * @mac_addr: Peer MAC Address + * @first_data_seq_ctrl: Sequence control field of first data frame + * @ltf_size: ltf_size + * @stbc: When set, STBC rate was used + * @he_re: he_re (range extension) + * @bw: Bandwidth + * + * + * + * + * @nss: NSS 1,2, ...8 + * @mcs: MCS index + * @preamble: preamble + * @gi: Legacy normal GI + * Legacy short GI + * HE related GI + * HE + * @dcm: dcm + * @ldpc: ldpc + * @ppdu_type: SU/MU_MIMO/MU_OFDMA/MU_MIMO_OFDMA/UL_TRIG/BURST_BCN/UL_BSR_RESP/ + * UL_BSR_TRIG/UNKNOWN + * @rssi: RSSI value (units = dB above noise floor) + * @timestamp: TSF at the reception of PPDU + * @length: PPDU length + * @channel: Channel informartion + * @lsig_A: L-SIG in 802.11 PHY header + */ +struct cdp_rx_indication_ppdu { + uint32_t ppdu_id; + uint16_t is_ampdu:1, + num_mpdu:9, + reserved:6; + uint32_t num_msdu; + uint16_t udp_msdu_count; + uint16_t tcp_msdu_count; + uint16_t other_msdu_count; + uint16_t duration; + uint32_t tid:8, + peer_id:16; + uint8_t vdev_id; + uint8_t mac_addr[6]; + uint16_t first_data_seq_ctrl; + union { + uint32_t rate_info; + struct { + uint32_t ltf_size:2, + stbc:1, + he_re:1, + bw:4, + nss:4, + mcs:4, + preamble:4, + gi:4, + dcm:1, + ldpc:1, + ppdu_type:2; + }; + } u; + uint32_t lsig_a; + uint32_t rssi; + uint64_t timestamp; + uint32_t length; + uint8_t channel; + uint8_t beamformed; +}; + +/** + * struct cdp_rx_indication_msdu - Rx MSDU info + * @ppdu_id: PPDU to which the MSDU belongs + * @msdu_len: Length of MSDU in bytes + * @ack_frame_rssi: RSSI of the received ACK or BA frame + * @first_msdu: Indicates this MSDU is the first MSDU in AMSDU + * @last_msdu: Indicates this MSDU is the last MSDU in AMSDU + * @msdu_part_of_amsdu : Indicates this MSDU was part of an A-MSDU in MPDU + * @extd: Extended structure containing rate statistics + */ +struct cdp_rx_indication_msdu { + uint32_t ppdu_id; + uint16_t msdu_len; + uint32_t ack_frame_rssi:8, + resvd0:1, + first_msdu:1, + last_msdu:1, + msdu_part_of_amsdu:1, + msdu_part_of_ampdu:1, + resvd1:19; + struct cdp_rate_stats extd; +}; + +/** + * struct cdp_config_params - Propagate configuration parameters to datapath + * @tso_enable: Enable/Disable TSO + * @lro_enable: Enable/Disable LRO + * @flow_steering_enable: Enable/Disable Rx Hash + * @tcp_Udp_ChecksumOffload: Enable/Disable tcp-Udp checksum Offload + * @napi_enable: Enable/Disable Napi + * @tx_flow_stop_queue_threshold: Value to Pause tx queues + * @tx_flow_start_queue_offset: Available Tx descriptors to unpause + * tx queue + */ +struct cdp_config_params { + unsigned int tso_enable:1; + unsigned int lro_enable:1; + unsigned int flow_steering_enable:1; + unsigned int tcp_udp_checksumoffload:1; + unsigned int napi_enable:1; + /* Set when QCA_LL_TX_FLOW_CONTROL_V2 is enabled */ + uint8_t tx_flow_stop_queue_threshold; + uint8_t tx_flow_start_queue_offset; +}; + +/** + * cdp_txrx_stats_req: stats request wrapper + * used to pass request information to cdp layer + * @stats: type of stats requested + * @param0: opaque argument 0 to be passed to htt + * @param1: opaque argument 1 to be passed to htt + * @param2: opaque argument 2 to be passed to htt + * @param3: opaque argument 3 to be passed to htt + * @mac id: mac_id + */ +struct cdp_txrx_stats_req { + enum cdp_stats stats; + uint32_t param0; + uint32_t param1; + uint32_t param2; + uint32_t param3; + uint32_t cookie_val; + uint8_t mac_id; +}; + +/** + * struct cdp_monitor_filter - monitor filter info + * @mode: set filter mode + * @fp_mgmt: set Filter Pass MGMT Configuration + * @fp_ctrl: set Filter Pass CTRL Configuration + * @fp_data: set Filter Pass DATA Configuration + * @mo_mgmt: set Monitor Other MGMT Configuration + * @mo_ctrl: set Monitor Other CTRL Configuration + * @mo_data: set Monitor other DATA Configuration + * + */ +struct cdp_monitor_filter { + uint16_t mode; + uint16_t fp_mgmt; + uint16_t fp_ctrl; + uint16_t fp_data; + uint16_t mo_mgmt; + uint16_t mo_ctrl; + uint16_t mo_data; +}; +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_ctrl.h b/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_ctrl.h new file mode 100644 index 0000000000000000000000000000000000000000..aec17fc1e28984481433e2afaa040cdd82582ae5 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_ctrl.h @@ -0,0 +1,763 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * @file cdp_txrx_ctrl.h + * @brief Define the host data path control API functions + * called by the host control SW and the OS interface module + */ + +#ifndef _CDP_TXRX_CTRL_H_ +#define _CDP_TXRX_CTRL_H_ +#include "cdp_txrx_handle.h" +#include "cdp_txrx_cmn_struct.h" +#include "cdp_txrx_ops.h" + +static inline int cdp_is_target_ar900b + (ol_txrx_soc_handle soc, struct cdp_vdev *vdev) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return 0; + } + + if (!soc->ops->ctrl_ops || + !soc->ops->ctrl_ops->txrx_is_target_ar900b) + return 0; + + return soc->ops->ctrl_ops->txrx_is_target_ar900b(vdev); +} + + +/* WIN */ +static inline int +cdp_mempools_attach(ol_txrx_soc_handle soc, void *ctrl_pdev) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return 0; + } + + if (!soc->ops->ctrl_ops || + !soc->ops->ctrl_ops->txrx_mempools_attach) + return 0; + + return soc->ops->ctrl_ops->txrx_mempools_attach(ctrl_pdev); +} + +/** + * @brief set filter neighbour peers + * @details + * This defines interface function to set neighbour peer filtering. + * + * @param soc - the pointer to soc object + * @param pdev - the pointer physical device object + * @param val - the enable/disable value + * @return - int + */ +static inline int +cdp_set_filter_neighbour_peers(ol_txrx_soc_handle soc, + struct cdp_pdev *pdev, u_int32_t val) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return 0; + } + + if (!soc->ops->ctrl_ops || + !soc->ops->ctrl_ops->txrx_set_filter_neighbour_peers) + return 0; + + return soc->ops->ctrl_ops->txrx_set_filter_neighbour_peers + (pdev, val); +} + +/** + * @brief update the neighbour peer addresses + * @details + * This defines interface function to update neighbour peers addresses + * which needs to be filtered + * + * @param soc - the pointer to soc object + * @param pdev - the pointer to physical device object + * @param cmd - add/del entry into peer table + * @param macaddr - the address of neighbour peer + * @return - int + */ +static inline int +cdp_update_filter_neighbour_peers(ol_txrx_soc_handle soc, + struct cdp_pdev *pdev, uint32_t cmd, uint8_t *macaddr) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return 0; + } + + if (!soc->ops->ctrl_ops || + !soc->ops->ctrl_ops->txrx_update_filter_neighbour_peers) + return 0; + + return soc->ops->ctrl_ops->txrx_update_filter_neighbour_peers + (pdev, cmd, macaddr); +} + +/** + * @brief set the safemode of the device + * @details + * This flag is used to bypass the encrypt and decrypt processes when send and + * receive packets. It works like open AUTH mode, HW will treate all packets + * as non-encrypt frames because no key installed. For rx fragmented frames, + * it bypasses all the rx defragmentaion. + * + * @param vdev - the data virtual device object + * @param val - the safemode state + * @return - void + */ +static inline void +cdp_set_safemode(ol_txrx_soc_handle soc, + struct cdp_vdev *vdev, u_int32_t val) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->ctrl_ops || + !soc->ops->ctrl_ops->txrx_set_safemode) + return; + + soc->ops->ctrl_ops->txrx_set_safemode(vdev, val); +} +/** + * @brief configure the drop unencrypted frame flag + * @details + * Rx related. When set this flag, all the unencrypted frames + * received over a secure connection will be discarded + * + * @param vdev - the data virtual device object + * @param val - flag + * @return - void + */ +static inline void +cdp_set_drop_unenc(ol_txrx_soc_handle soc, + struct cdp_vdev *vdev, u_int32_t val) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->ctrl_ops || + !soc->ops->ctrl_ops->txrx_set_drop_unenc) + return; + + soc->ops->ctrl_ops->txrx_set_drop_unenc(vdev, val); +} + + +/** + * @brief set the Tx encapsulation type of the VDEV + * @details + * This will be used to populate the HTT desc packet type field during Tx + * + * @param vdev - the data virtual device object + * @param val - the Tx encap type (htt_cmn_pkt_type) + * @return - void + */ +static inline void +cdp_set_tx_encap_type(ol_txrx_soc_handle soc, + struct cdp_vdev *vdev, enum htt_cmn_pkt_type val) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->ctrl_ops || + !soc->ops->ctrl_ops->txrx_set_tx_encap_type) + return; + + soc->ops->ctrl_ops->txrx_set_tx_encap_type(vdev, val); +} + +/** + * @brief set the Rx decapsulation type of the VDEV + * @details + * This will be used to configure into firmware and hardware which format to + * decap all Rx packets into, for all peers under the VDEV. + * + * @param vdev - the data virtual device object + * @param val - the Rx decap mode (htt_cmn_pkt_type) + * @return - void + */ +static inline void +cdp_set_vdev_rx_decap_type(ol_txrx_soc_handle soc, + struct cdp_vdev *vdev, enum htt_cmn_pkt_type val) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->ctrl_ops || + !soc->ops->ctrl_ops->txrx_set_vdev_rx_decap_type) + return; + + soc->ops->ctrl_ops->txrx_set_vdev_rx_decap_type + (vdev, val); +} + +/** + * @brief get the Rx decapsulation type of the VDEV + * + * @param vdev - the data virtual device object + * @return - the Rx decap type (htt_cmn_pkt_type) + */ +static inline enum htt_cmn_pkt_type +cdp_get_vdev_rx_decap_type(ol_txrx_soc_handle soc, struct cdp_vdev *vdev) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return 0; + } + + if (!soc->ops->ctrl_ops || + !soc->ops->ctrl_ops->txrx_get_vdev_rx_decap_type) + return 0; + + return soc->ops->ctrl_ops->txrx_get_vdev_rx_decap_type(vdev); +} + +/** + * @brief set the Reo Destination ring for the pdev + * @details + * This will be used to configure the Reo Destination ring for this pdev. + * + * @param soc - pointer to the soc + * @param pdev - the data physical device object + * @param val - the Reo destination ring index (1 to 4) + * @return - void + */ +static inline void +cdp_set_pdev_reo_dest(ol_txrx_soc_handle soc, + struct cdp_pdev *pdev, enum cdp_host_reo_dest_ring val) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->ctrl_ops || + !soc->ops->ctrl_ops->txrx_set_pdev_reo_dest) + return; + + soc->ops->ctrl_ops->txrx_set_pdev_reo_dest + (pdev, val); +} + +/** + * @brief get the Reo Destination ring for the pdev + * + * @param soc - pointer to the soc + * @param pdev - the data physical device object + * @return - the Reo destination ring index (1 to 4), 0 if not supported. + */ +static inline enum cdp_host_reo_dest_ring +cdp_get_pdev_reo_dest(ol_txrx_soc_handle soc, struct cdp_pdev *pdev) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return cdp_host_reo_dest_ring_unknown; + } + + if (!soc->ops->ctrl_ops || + !soc->ops->ctrl_ops->txrx_get_pdev_reo_dest) + return cdp_host_reo_dest_ring_unknown; + + return soc->ops->ctrl_ops->txrx_get_pdev_reo_dest(pdev); +} + +/* Is this similar to ol_txrx_peer_state_update() in MCL */ +/** + * @brief Update the authorize peer object at association time + * @details + * For the host-based implementation of rate-control, it + * updates the peer/node-related parameters within rate-control + * context of the peer at association. + * + * @param peer - pointer to the node's object + * @authorize - either to authorize or unauthorize peer + * + * @return none + */ +static inline void +cdp_peer_authorize(ol_txrx_soc_handle soc, + struct cdp_peer *peer, u_int32_t authorize) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->ctrl_ops || + !soc->ops->ctrl_ops->txrx_peer_authorize) + return; + + soc->ops->ctrl_ops->txrx_peer_authorize + (peer, authorize); +} + +static inline bool +cdp_set_inact_params(ol_txrx_soc_handle soc, struct cdp_pdev *pdev, + u_int16_t inact_check_interval, + u_int16_t inact_normal, + u_int16_t inact_overload) +{ + if (!soc || !pdev || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return false; + } + + if (!soc->ops->ctrl_ops || + !soc->ops->ctrl_ops->txrx_set_inact_params) + return false; + + return soc->ops->ctrl_ops->txrx_set_inact_params + (pdev, inact_check_interval, inact_normal, + inact_overload); +} + +static inline bool +cdp_start_inact_timer(ol_txrx_soc_handle soc, + struct cdp_pdev *pdev, + bool enable) +{ + if (!soc || !pdev || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return false; + } + + if (!soc->ops->ctrl_ops || + !soc->ops->ctrl_ops->txrx_start_inact_timer) + return false; + + return soc->ops->ctrl_ops->txrx_start_inact_timer + (pdev, enable); +} + +/** + * @brief Set the overload status of the radio + * @details + * Set the overload status of the radio, updating the inactivity + * threshold and inactivity count for each node. + * + * @param pdev - the data physical device object + * @param overload - whether the radio is overloaded or not + */ +static inline void +cdp_set_overload(ol_txrx_soc_handle soc, struct cdp_pdev *pdev, + bool overload) +{ + if (!soc || !pdev || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->ctrl_ops || + !soc->ops->ctrl_ops->txrx_set_overload) + return; + + soc->ops->ctrl_ops->txrx_set_overload(pdev, overload); +} + +/** + * @brief Check the inactivity status of the peer/node + * + * @param peer - pointer to the node's object + * @return true if the node is inactive; otherwise return false + */ +static inline bool +cdp_peer_is_inact(ol_txrx_soc_handle soc, void *peer) +{ + if (!soc || !peer || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return false; + } + + if (!soc->ops->ctrl_ops || + !soc->ops->ctrl_ops->txrx_peer_is_inact) + return false; + + return soc->ops->ctrl_ops->txrx_peer_is_inact(peer); +} + +/** + * @brief Mark inactivity status of the peer/node + * @details + * If it becomes active, reset inactivity count to reload value; + * if the inactivity status changed, notify umac band steering. + * + * @param peer - pointer to the node's object + * @param inactive - whether the node is inactive or not + */ +static inline void +cdp_mark_peer_inact(ol_txrx_soc_handle soc, + void *peer, + bool inactive) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->ctrl_ops || + !soc->ops->ctrl_ops->txrx_mark_peer_inact) + return; + + soc->ops->ctrl_ops->txrx_mark_peer_inact + (peer, inactive); +} + + +/* Should be ol_txrx_ctrl_api.h */ +static inline void cdp_set_mesh_mode +(ol_txrx_soc_handle soc, struct cdp_vdev *vdev, u_int32_t val) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->ctrl_ops || + !soc->ops->ctrl_ops->txrx_set_mesh_mode) + return; + + soc->ops->ctrl_ops->txrx_set_mesh_mode(vdev, val); +} + +/** + * @brief set mesh rx filter + * @details based on the bits enabled in the filter packets has to be dropped. + * + * @param soc - pointer to the soc + * @param vdev - the data virtual device object + * @param val - value to be set + * @return - void + */ +static inline +void cdp_set_mesh_rx_filter(ol_txrx_soc_handle soc, + struct cdp_vdev *vdev, uint32_t val) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->ctrl_ops || + !soc->ops->ctrl_ops->txrx_set_mesh_rx_filter) + return; + + soc->ops->ctrl_ops->txrx_set_mesh_rx_filter(vdev, val); +} + +static inline void cdp_tx_flush_buffers +(ol_txrx_soc_handle soc, struct cdp_vdev *vdev) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->ctrl_ops || + !soc->ops->ctrl_ops->tx_flush_buffers) + return; + + soc->ops->ctrl_ops->tx_flush_buffers(vdev); +} + +static inline void cdp_txrx_set_vdev_param(ol_txrx_soc_handle soc, + struct cdp_vdev *vdev, enum cdp_vdev_param_type type, + uint32_t val) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->ctrl_ops || + !soc->ops->ctrl_ops->txrx_set_vdev_param) + return; + + soc->ops->ctrl_ops->txrx_set_vdev_param(vdev, type, val); +} + +static inline void +cdp_peer_set_nawds(ol_txrx_soc_handle soc, + struct cdp_peer *peer, uint8_t value) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->ctrl_ops || + !soc->ops->ctrl_ops->txrx_peer_set_nawds) + return; + + soc->ops->ctrl_ops->txrx_peer_set_nawds + (peer, value); +} + +static inline void cdp_txrx_set_pdev_param(ol_txrx_soc_handle soc, + struct cdp_pdev *pdev, enum cdp_pdev_param_type type, + uint8_t val) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->ctrl_ops || + !soc->ops->ctrl_ops->txrx_set_pdev_param) + return; + + soc->ops->ctrl_ops->txrx_set_pdev_param + (pdev, type, val); +} + +/** + * @brief Subscribe to a specified WDI event. + * @details + * This function adds the provided wdi_event_subscribe object to a list of + * subscribers for the specified WDI event. + * When the event in question happens, each subscriber for the event will + * have their callback function invoked. + * The order in which callback functions from multiple subscribers are + * invoked is unspecified. + * + * @param soc - pointer to the soc + * @param pdev - the data physical device object + * @param event_cb_sub - the callback and context for the event subscriber + * @param event - which event's notifications are being subscribed to + * @return - int + */ +static inline int +cdp_wdi_event_sub(ol_txrx_soc_handle soc, + struct cdp_pdev *pdev, void *event_cb_sub, uint32_t event) +{ + + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "%s invalid instance", __func__); + QDF_BUG(0); + return 0; + } + + if (!soc->ops->ctrl_ops || + !soc->ops->ctrl_ops->txrx_wdi_event_sub) + return 0; + + return soc->ops->ctrl_ops->txrx_wdi_event_sub + (pdev, event_cb_sub, event); +} + +/** + * @brief Unsubscribe from a specified WDI event. + * @details + * This function removes the provided event subscription object from the + * list of subscribers for its event. + * This function shall only be called if there was a successful prior call + * to event_sub() on the same wdi_event_subscribe object. + * + * @param soc - pointer to the soc + * @param pdev - the data physical device object + * @param event_cb_sub - the callback and context for the event subscriber + * @param event - which event's notifications are being subscribed to + * @return - int + */ +static inline int +cdp_wdi_event_unsub(ol_txrx_soc_handle soc, + struct cdp_pdev *pdev, void *event_cb_sub, uint32_t event) +{ + + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "%s invalid instance", __func__); + QDF_BUG(0); + return 0; + } + + if (!soc->ops->ctrl_ops || + !soc->ops->ctrl_ops->txrx_wdi_event_unsub) + return 0; + + return soc->ops->ctrl_ops->txrx_wdi_event_unsub + (pdev, event_cb_sub, event); +} + +/** + * @brief Get security type from the from peer. + * @details + * This function gets the Security information from the peer handler. + * The security information is got from the rx descriptor and filled in + * to the peer handler. + * + * @param soc - pointer to the soc + * @param peer - peer handler + * @param sec_idx - mcast or ucast frame type. + * @return - int + */ +static inline int +cdp_get_sec_type(ol_txrx_soc_handle soc, struct cdp_peer *peer, uint8_t sec_idx) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "%s invalid instance", __func__); + QDF_BUG(0); + return A_ERROR; + } + + if (!soc->ops->ctrl_ops || + !soc->ops->ctrl_ops->txrx_get_sec_type) + return A_ERROR; + + return soc->ops->ctrl_ops->txrx_get_sec_type + (peer, sec_idx); +} + +/** + * cdp_set_mgmt_tx_power(): function to set tx power for mgmt frames + * @vdev_handle: vdev handle + * @subtype_index: subtype + * @tx_power: Tx power + * Return: None + */ +static inline int cdp_set_mgmt_tx_power(ol_txrx_soc_handle soc, + struct cdp_vdev *vdev, uint8_t subtype, uint8_t tx_power) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return 0; + } + + if (!soc->ops->ctrl_ops || + !soc->ops->ctrl_ops->txrx_update_mgmt_txpow_vdev) + return 0; + + soc->ops->ctrl_ops->txrx_update_mgmt_txpow_vdev(vdev, + subtype, tx_power); + return 0; +} + +static inline void * +cdp_get_pldev(ol_txrx_soc_handle soc, + struct cdp_pdev *pdev) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + QDF_BUG(0); + return NULL; + } + + if (!soc->ops->ctrl_ops || !soc->ops->ctrl_ops->txrx_get_pldev) + return NULL; + + return soc->ops->ctrl_ops->txrx_get_pldev(pdev); +} + +#ifdef ATH_SUPPORT_NAC_RSSI +/** + * cdp_vdev_config_for_nac_rssi(): To invoke dp callback for nac rssi config + * @soc: soc pointer + * @vdev: vdev pointer + * @nac_cmd: specfies nac_rss config action add, del, list + * @bssid: Neighbour bssid + * @client_macaddr: Non-Associated client MAC + * @chan_num: channel number to scan + * + * Return: QDF_STATUS + */ +static inline QDF_STATUS cdp_vdev_config_for_nac_rssi(ol_txrx_soc_handle soc, + struct cdp_vdev *vdev, enum cdp_nac_param_cmd nac_cmd, + char *bssid, char *client_macaddr, uint8_t chan_num) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + QDF_BUG(0); + return QDF_STATUS_E_FAILURE; + } + + if (!soc->ops->ctrl_ops || + !soc->ops->ctrl_ops->txrx_vdev_config_for_nac_rssi) + return QDF_STATUS_E_FAILURE; + + return soc->ops->ctrl_ops->txrx_vdev_config_for_nac_rssi(vdev, + nac_cmd, bssid, client_macaddr, chan_num); +} +#endif +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_ctrl_def.h b/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_ctrl_def.h new file mode 100644 index 0000000000000000000000000000000000000000..d91e7cce06025430a26e2bde7bf374b72092cd7a --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_ctrl_def.h @@ -0,0 +1,94 @@ +/* + * Copyright (c) 2011-2016,2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + + /** + * @file cdp_txrx_ctrl.h + * @brief Define the host data path control API functions + * called by the host control SW and the OS interface module + */ + +#ifndef _CDP_TXRX_CTRL_DEF_H_ +#define _CDP_TXRX_CTRL_DEF_H_ +/* TODO: adf need to be replaced with qdf */ +/* + * Cleanups -- Might need cleanup + */ +#if !QCA_OL_TX_PDEV_LOCK && QCA_NSS_PLATFORM || \ + (defined QCA_PARTNER_PLATFORM && QCA_PARTNER_SUPPORT_FAST_TX) +#define VAP_TX_SPIN_LOCK(_x) spin_lock(_x) +#define VAP_TX_SPIN_UNLOCK(_x) spin_unlock(_x) +#else /* QCA_OL_TX_PDEV_LOCK */ +#define VAP_TX_SPIN_LOCK(_x) +#define VAP_TX_SPIN_UNLOCK(_x) +#endif /* QCA_OL_TX_PDEV_LOCK */ + +#if QCA_OL_TX_PDEV_LOCK +void ol_ll_pdev_tx_lock(void *); +void ol_ll_pdev_tx_unlock(void *); +#define OL_TX_LOCK(_x) ol_ll_pdev_tx_lock(_x) +#define OL_TX_UNLOCK(_x) ol_ll_pdev_tx_unlock(_x) + +#define OL_TX_PDEV_LOCK(_x) qdf_spin_lock_bh(_x) +#define OL_TX_PDEV_UNLOCK(_x) qdf_spin_unlock_bh(_x) +#else +#define OL_TX_PDEV_LOCK(_x) +#define OL_TX_PDEV_UNLOCK(_x) + +#define OL_TX_LOCK(_x) +#define OL_TX_UNLOCK(_x) +#endif /* QCA_OL_TX_PDEV_LOCK */ + +#if !QCA_OL_TX_PDEV_LOCK +#define OL_TX_FLOW_CTRL_LOCK(_x) qdf_spin_lock_bh(_x) +#define OL_TX_FLOW_CTRL_UNLOCK(_x) qdf_spin_unlock_bh(_x) + +#define OL_TX_DESC_LOCK(_x) qdf_spin_lock_bh(_x) +#define OL_TX_DESC_UNLOCK(_x) qdf_spin_unlock_bh(_x) + +#define OSIF_VAP_TX_LOCK(_y, _x) spin_lock(&((_x)->tx_lock)) +#define OSIF_VAP_TX_UNLOCK(_y, _x) spin_unlock(&((_x)->tx_lock)) + +#define OL_TX_PEER_LOCK(_x, _id) qdf_spin_lock_bh(&((_x)->peer_lock[_id])) +#define OL_TX_PEER_UNLOCK(_x, _id) qdf_spin_unlock_bh(&((_x)->peer_lock[_id])) + +#define OL_TX_PEER_UPDATE_LOCK(_x, _id) \ + qdf_spin_lock_bh(&((_x)->peer_lock[_id])) +#define OL_TX_PEER_UPDATE_UNLOCK(_x, _id) \ + qdf_spin_unlock_bh(&((_x)->peer_lock[_id])) + +#else +#define OSIF_VAP_TX_LOCK(_y, _x) cdp_vdev_tx_lock( \ + _y, wlan_vdev_get_dp_handle((_x)->os_if_vdev)) +#define OSIF_VAP_TX_UNLOCK(_y, _x) cdp_vdev_tx_unlock( \ + _y, wlan_vdev_get_dp_handle((_x)->os_if_vdev)) + +#define OL_TX_FLOW_CTRL_LOCK(_x) +#define OL_TX_FLOW_CTRL_UNLOCK(_x) + +#define OL_TX_DESC_LOCK(_x) +#define OL_TX_DESC_UNLOCK(_x) + +#define OL_TX_PEER_LOCK(_x, _id) +#define OL_TX_PEER_UNLOCK(_x, _id) + +#define OL_TX_PEER_UPDATE_LOCK(_x, _id) qdf_spin_lock_bh(&((_x)->tx_lock)) +#define OL_TX_PEER_UPDATE_UNLOCK(_x, _id) qdf_spin_unlock_bh(&((_x)->tx_lock)) + +#endif /* !QCA_OL_TX_PDEV_LOCK */ +#endif + diff --git a/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_flow_ctrl_legacy.h b/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_flow_ctrl_legacy.h new file mode 100644 index 0000000000000000000000000000000000000000..93faf7d54c4d1f3be616d49415ca0c71f4d3ef2f --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_flow_ctrl_legacy.h @@ -0,0 +1,230 @@ +/* + * Copyright (c) 2016-2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * @file cdp_txrx_flow_ctrl_legacy.h + * @brief Define the host data path legacy flow control API + * functions + */ +#ifndef _CDP_TXRX_FC_LEG_H_ +#define _CDP_TXRX_FC_LEG_H_ +#include +#include "cdp_txrx_handle.h" + +/** + * cdp_fc_register() - Register flow control callback function pointer + * @soc - data path soc handle + * @vdev_id - virtual interface id to register flow control + * @flowControl - callback function pointer + * @osif_fc_ctx - client context pointer + * @flow_control_is_pause: is vdev paused by flow control + * + * Register flow control callback function pointer and client context pointer + * + * return 0 success + */ +static inline int +cdp_fc_register(ol_txrx_soc_handle soc, uint8_t vdev_id, + ol_txrx_tx_flow_control_fp flowControl, void *osif_fc_ctx, + ol_txrx_tx_flow_control_is_pause_fp flow_control_is_pause) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "%s invalid instance", __func__); + QDF_BUG(0); + return 0; + } + + if (!soc->ops->l_flowctl_ops || + !soc->ops->l_flowctl_ops->register_tx_flow_control) + return 0; + + return soc->ops->l_flowctl_ops->register_tx_flow_control( + vdev_id, flowControl, osif_fc_ctx, + flow_control_is_pause); +} + +/** + * cdp_fc_deregister() - remove flow control instance + * @soc - data path soc handle + * @vdev_id - virtual interface id to register flow control + * + * remove flow control instance + * + * return 0 success + */ +static inline int +cdp_fc_deregister(ol_txrx_soc_handle soc, uint8_t vdev_id) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "%s invalid instance", __func__); + QDF_BUG(0); + return 0; + } + + if (!soc->ops->l_flowctl_ops || + !soc->ops->l_flowctl_ops->deregister_tx_flow_control_cb) + return 0; + + return soc->ops->l_flowctl_ops->deregister_tx_flow_control_cb( + vdev_id); +} + +/** + * cdp_fc_get_tx_resource() - get data path resource count + * @soc - data path soc handle + * @sta_id - local peer id + * @low_watermark - low resource threshold + * @high_watermark_offset - high resource threshold + * + * get data path resource count + * + * return true enough data path resource available + * false resource is not avaialbe + */ +static inline bool +cdp_fc_get_tx_resource(ol_txrx_soc_handle soc, uint8_t sta_id, + unsigned int low_watermark, unsigned int high_watermark_offset) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "%s invalid instance", __func__); + QDF_BUG(0); + return false; + } + + if (!soc->ops->l_flowctl_ops || + !soc->ops->l_flowctl_ops->get_tx_resource) + return false; + + return soc->ops->l_flowctl_ops->get_tx_resource(sta_id, + low_watermark, high_watermark_offset); +} + +/** + * cdp_fc_ll_set_tx_pause_q_depth() - set pause queue depth + * @soc - data path soc handle + * @vdev_id - virtual interface id to register flow control + * @pause_q_depth - pending tx queue delth + * + * set pause queue depth + * + * return 0 success + */ +static inline int +cdp_fc_ll_set_tx_pause_q_depth(ol_txrx_soc_handle soc, + uint8_t vdev_id, int pause_q_depth) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "%s invalid instance", __func__); + QDF_BUG(0); + return 0; + } + + if (!soc->ops->l_flowctl_ops || + !soc->ops->l_flowctl_ops->ll_set_tx_pause_q_depth) + return 0; + + return soc->ops->l_flowctl_ops->ll_set_tx_pause_q_depth( + vdev_id, pause_q_depth); + +} + +/** + * cdp_fc_vdev_flush() - flush tx queue + * @soc - data path soc handle + * @vdev - virtual interface context pointer + * + * flush tx queue + * + * return None + */ +static inline void +cdp_fc_vdev_flush(ol_txrx_soc_handle soc, struct cdp_vdev *vdev) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "%s invalid instance", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->l_flowctl_ops || + !soc->ops->l_flowctl_ops->vdev_flush) + return; + + soc->ops->l_flowctl_ops->vdev_flush(vdev); +} + +/** + * cdp_fc_vdev_pause() - pause tx scheduler on vdev + * @soc - data path soc handle + * @vdev - virtual interface context pointer + * @reason - pause reason + * + * pause tx scheduler on vdev + * + * return None + */ +static inline void +cdp_fc_vdev_pause(ol_txrx_soc_handle soc, struct cdp_vdev *vdev, + uint32_t reason) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "%s invalid instance", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->l_flowctl_ops || + !soc->ops->l_flowctl_ops->vdev_pause) + return; + + soc->ops->l_flowctl_ops->vdev_pause(vdev, reason); +} + +/** + * cdp_fc_vdev_unpause() - resume tx scheduler on vdev + * @soc - data path soc handle + * @vdev - virtual interface context pointer + * @reason - pause reason + * + * resume tx scheduler on vdev + * + * return None + */ +static inline void +cdp_fc_vdev_unpause(ol_txrx_soc_handle soc, struct cdp_vdev *vdev, + uint32_t reason) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "%s invalid instance", __func__); + return; + } + + if (!soc->ops->l_flowctl_ops || + !soc->ops->l_flowctl_ops->vdev_unpause) + return; + + soc->ops->l_flowctl_ops->vdev_unpause(vdev, reason); +} +#endif /* _CDP_TXRX_FC_LEG_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_flow_ctrl_v2.h b/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_flow_ctrl_v2.h new file mode 100644 index 0000000000000000000000000000000000000000..e3a9e0d3ba419ab7c1e86e6f7af1893bf93ade09 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_flow_ctrl_v2.h @@ -0,0 +1,110 @@ +/* + * Copyright (c) 2016-2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * @file cdp_txrx_flow_ctrl_v2.h + * @brief Define the host data path flow control version 2 API + * functions + */ +#ifndef _CDP_TXRX_FC_V2_H_ +#define _CDP_TXRX_FC_V2_H_ +#include + +/** + * cdp_register_pause_cb() - Register flow control callback function pointer + * @soc - data path soc handle + * @pause_cb - Pause callback intend to register + * + * Register flow control callback function pointer and client context pointer + * + * return QDF_STATUS_SUCCESS success + */ +static inline QDF_STATUS +cdp_register_pause_cb(ol_txrx_soc_handle soc, + tx_pause_callback pause_cb) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "%s invalid instance", __func__); + QDF_BUG(0); + return QDF_STATUS_E_INVAL; + } + + if (!soc->ops->flowctl_ops || + !soc->ops->flowctl_ops->register_pause_cb) + return QDF_STATUS_SUCCESS; + + return soc->ops->flowctl_ops->register_pause_cb(soc, pause_cb); + +} + +/** + * cdp_set_desc_global_pool_size() - set global device pool size + * @soc - data path soc handle + * @num_msdu_desc - descriptor pool size + * + * set global device pool size + * + * return none + */ +static inline void +cdp_set_desc_global_pool_size(ol_txrx_soc_handle soc, + uint32_t num_msdu_desc) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "%s invalid instance", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->flowctl_ops || + !soc->ops->flowctl_ops->set_desc_global_pool_size) + return; + + soc->ops->flowctl_ops->set_desc_global_pool_size( + num_msdu_desc); +} + +/** + * cdp_dump_flow_pool_info() - dump flow pool information + * @soc - data path soc handle + * + * dump flow pool information + * + * return none + */ +static inline void +cdp_dump_flow_pool_info(struct cdp_soc_t *soc) +{ + void *dp_soc = (void *)soc; + + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "%s invalid instance", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->flowctl_ops || + !soc->ops->flowctl_ops->dump_flow_pool_info) + return; + + soc->ops->flowctl_ops->dump_flow_pool_info(dp_soc); +} +#endif /* _CDP_TXRX_FC_V2_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_handle.h b/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_handle.h new file mode 100644 index 0000000000000000000000000000000000000000..8f63fd0d677570295faf97e54a990fe8e9ff2d40 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_handle.h @@ -0,0 +1,36 @@ + +/* + * Copyright (c) 2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + + /** + * @file cdp_txrx_handle.h + * @brief Holds the forward structure declarations for handles + * passed from the upper layers + */ + +#ifndef CDP_TXRX_HANDLE_H +#define CDP_TXRX_HANDLE_H + +struct cdp_cfg; +struct cdp_pdev; +struct cdp_vdev; +struct cdp_peer; +struct cdp_raw_ast; +struct cdp_soc; + +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_host_stats.h b/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_host_stats.h new file mode 100644 index 0000000000000000000000000000000000000000..2944527349d01a65c1d97bf99df2ab527f054b83 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_host_stats.h @@ -0,0 +1,422 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * @file cdp_txrx_host_stats.h + * @brief Define the host data path stats API functions + * called by the host control SW and the OS interface module + */ +#ifndef _CDP_TXRX_HOST_STATS_H_ +#define _CDP_TXRX_HOST_STATS_H_ +#include "cdp_txrx_handle.h" +/** + * cdp_host_stats_get: cdp call to get host stats + * @soc: SOC handle + * @req: Requirement type + * + * return: 0 for Success, Failure returns error message + */ +static inline int cdp_host_stats_get(ol_txrx_soc_handle soc, + struct cdp_vdev *vdev, + struct ol_txrx_stats_req *req) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance", __func__); + QDF_BUG(0); + return 0; + } + + if (!soc->ops->host_stats_ops || + !soc->ops->host_stats_ops->txrx_host_stats_get) + return 0; + + return soc->ops->host_stats_ops->txrx_host_stats_get(vdev, req); +} + +/** + * cdp_host_stats_clr: cdp call to clear host stats + * @vdev: vdev handle + * + * return: void + */ +static inline void +cdp_host_stats_clr(ol_txrx_soc_handle soc, struct cdp_vdev *vdev) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->host_stats_ops || + !soc->ops->host_stats_ops->txrx_host_stats_clr) + return; + + soc->ops->host_stats_ops->txrx_host_stats_clr(vdev); +} + +static inline void +cdp_host_ce_stats(ol_txrx_soc_handle soc, struct cdp_vdev *vdev) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->host_stats_ops || + !soc->ops->host_stats_ops->txrx_host_ce_stats) + return; + + soc->ops->host_stats_ops->txrx_host_ce_stats(vdev); +} + +static inline int cdp_stats_publish + (ol_txrx_soc_handle soc, struct cdp_pdev *pdev, + struct ol_txrx_stats *buf) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance", __func__); + QDF_BUG(0); + return 0; + } + + if (!soc->ops->host_stats_ops || + !soc->ops->host_stats_ops->txrx_stats_publish) + return 0; + + return soc->ops->host_stats_ops->txrx_stats_publish(pdev, buf); +} + +/** + * @brief Enable enhanced stats functionality. + * + * @param pdev - the physical device object + * @return - void + */ +static inline void +cdp_enable_enhanced_stats(ol_txrx_soc_handle soc, struct cdp_pdev *pdev) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->host_stats_ops || + !soc->ops->host_stats_ops->txrx_enable_enhanced_stats) + return; + + soc->ops->host_stats_ops->txrx_enable_enhanced_stats + (pdev); +} + +/** + * @brief Disable enhanced stats functionality. + * + * @param pdev - the physical device object + * @return - void + */ +static inline void +cdp_disable_enhanced_stats(ol_txrx_soc_handle soc, struct cdp_pdev *pdev) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->host_stats_ops || + !soc->ops->host_stats_ops->txrx_disable_enhanced_stats) + return; + + soc->ops->host_stats_ops->txrx_disable_enhanced_stats + (pdev); +} + +/** + * @brief Get the desired stats from the message. + * + * @param pdev - the physical device object + * @param stats_base - stats buffer received from FW + * @param type - stats type. + * @return - pointer to requested stat identified by type + */ +static inline uint32_t *cdp_get_stats_base + (ol_txrx_soc_handle soc, struct cdp_pdev *pdev, + uint32_t *stats_base, uint32_t msg_len, uint8_t type) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance", __func__); + QDF_BUG(0); + return 0; + } + + if (!soc->ops->host_stats_ops || + !soc->ops->host_stats_ops->txrx_get_stats_base) + return 0; + + return (uint32_t *)soc->ops->host_stats_ops->txrx_get_stats_base + (pdev, stats_base, msg_len, type); +} + +static inline void +cdp_tx_print_tso_stats(ol_txrx_soc_handle soc, + struct cdp_vdev *vdev) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->host_stats_ops || + !soc->ops->host_stats_ops->tx_print_tso_stats) + return; + + soc->ops->host_stats_ops->tx_print_tso_stats(vdev); +} + +static inline void +cdp_tx_rst_tso_stats(ol_txrx_soc_handle soc, struct cdp_vdev *vdev) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->host_stats_ops || + !soc->ops->host_stats_ops->tx_rst_tso_stats) + return; + + soc->ops->host_stats_ops->tx_rst_tso_stats(vdev); +} + +static inline void +cdp_tx_print_sg_stats(ol_txrx_soc_handle soc, + struct cdp_vdev *vdev) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->host_stats_ops || + !soc->ops->host_stats_ops->tx_print_sg_stats) + return; + + soc->ops->host_stats_ops->tx_print_sg_stats(vdev); +} + +static inline void +cdp_tx_rst_sg_stats(ol_txrx_soc_handle soc, struct cdp_vdev *vdev) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->host_stats_ops || + !soc->ops->host_stats_ops->tx_rst_sg_stats) + return; + + soc->ops->host_stats_ops->tx_rst_sg_stats(vdev); +} + +static inline void +cdp_print_rx_cksum_stats(ol_txrx_soc_handle soc, + struct cdp_vdev *vdev) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->host_stats_ops || + !soc->ops->host_stats_ops->print_rx_cksum_stats) + return; + + soc->ops->host_stats_ops->print_rx_cksum_stats(vdev); +} + +static inline void +cdp_rst_rx_cksum_stats(ol_txrx_soc_handle soc, struct cdp_vdev *vdev) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->host_stats_ops || + !soc->ops->host_stats_ops->rst_rx_cksum_stats) + return; + + soc->ops->host_stats_ops->rst_rx_cksum_stats(vdev); +} + +static inline A_STATUS +cdp_host_me_stats(ol_txrx_soc_handle soc, struct cdp_vdev *vdev) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance", __func__); + QDF_BUG(0); + return 0; + } + + if (!soc->ops->host_stats_ops || + !soc->ops->host_stats_ops->txrx_host_me_stats) + return 0; + + return soc->ops->host_stats_ops->txrx_host_me_stats(vdev); +} + +static inline void cdp_per_peer_stats + (ol_txrx_soc_handle soc, struct cdp_pdev *pdev, char *addr) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->host_stats_ops || + !soc->ops->host_stats_ops->txrx_per_peer_stats) + return; + + soc->ops->host_stats_ops->txrx_per_peer_stats + (pdev, addr); +} + +static inline int cdp_host_msdu_ttl_stats(ol_txrx_soc_handle soc, + struct cdp_vdev *vdev, + struct ol_txrx_stats_req *req) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance", __func__); + QDF_BUG(0); + return 0; + } + + if (!soc->ops->host_stats_ops || + !soc->ops->host_stats_ops->txrx_host_msdu_ttl_stats) + return 0; + + return soc->ops->host_stats_ops->txrx_host_msdu_ttl_stats + (vdev, req); +} + +static inline void +cdp_print_lro_stats(ol_txrx_soc_handle soc, struct cdp_vdev *vdev) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->host_stats_ops || + !soc->ops->host_stats_ops->print_lro_stats) + return; + + soc->ops->host_stats_ops->print_lro_stats(vdev); +} + +static inline void +cdp_reset_lro_stats(ol_txrx_soc_handle soc, struct cdp_vdev *vdev) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->host_stats_ops || + !soc->ops->host_stats_ops->reset_lro_stats) + return; + + soc->ops->host_stats_ops->reset_lro_stats(vdev); +} + +static inline void cdp_get_dp_fw_peer_stats(ol_txrx_soc_handle soc, + struct cdp_pdev *pdev, uint8_t *mac, uint32_t caps) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->host_stats_ops || + !soc->ops->host_stats_ops->get_fw_peer_stats) + return; + + soc->ops->host_stats_ops->get_fw_peer_stats + (pdev, mac, caps); +} + +static inline void cdp_get_dp_htt_stats(ol_txrx_soc_handle soc, + struct cdp_pdev *pdev, + void *data, uint32_t data_len) +{ + if (soc && soc->ops && soc->ops->host_stats_ops && + soc->ops->host_stats_ops->get_htt_stats) + return soc->ops->host_stats_ops->get_htt_stats + (pdev, data, data_len); + return; +} + +/** + * @brief Parse the stats header and get the payload from the message. + * + * @param pdev - the physical device object + * @param msg_word - stats buffer received from FW + * @param msg_len - length of the message + * @param type - place holder for parsed message type + * @param status - place holder for parsed message status + * @return - pointer to received stat payload + */ + +#if defined(QCA_SUPPORT_SON) || defined(ENHANCED_STATS) +uint32_t *ol_txrx_get_en_stats_base(struct cdp_pdev *pdev, uint32_t *msg_word, + uint32_t msg_len, enum htt_cmn_t2h_en_stats_type *type, enum htt_cmn_t2h_en_stats_status *status); +#endif +#endif /* _CDP_TXRX_HOST_STATS_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_ipa.h b/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_ipa.h new file mode 100644 index 0000000000000000000000000000000000000000..1109ad002af6169f5257850fa98a59bc72753155 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_ipa.h @@ -0,0 +1,578 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * @file cdp_txrx_ipa.h + * @brief Define the host data path IP Acceleraor API functions + */ +#ifndef _CDP_TXRX_IPA_H_ +#define _CDP_TXRX_IPA_H_ + +#ifdef IPA_OFFLOAD +#ifdef CONFIG_IPA_WDI_UNIFIED_API +#include +#else +#include +#endif +#include +#include "cdp_txrx_handle.h" + +/** + * cdp_ipa_get_resource() - Get allocated WLAN resources for IPA data path + * @soc - data path soc handle + * @pdev - device instance pointer + * + * Get allocated WLAN resources for IPA data path + * + * return QDF_STATUS_SUCCESS + */ +static inline QDF_STATUS +cdp_ipa_get_resource(ol_txrx_soc_handle soc, struct cdp_pdev *pdev) +{ + if (!soc || !soc->ops || !soc->ops->ipa_ops || !pdev) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return QDF_STATUS_E_FAILURE; + } + + if (soc->ops->ipa_ops->ipa_get_resource) + return soc->ops->ipa_ops->ipa_get_resource(pdev); + + return QDF_STATUS_SUCCESS; +} + +/** + * cdp_ipa_set_doorbell_paddr() - give IPA db paddr to FW + * @soc - data path soc handle + * @pdev - device instance pointer + * + * give IPA db paddr to FW + * + * return QDF_STATUS_SUCCESS + */ +static inline QDF_STATUS +cdp_ipa_set_doorbell_paddr(ol_txrx_soc_handle soc, struct cdp_pdev *pdev) +{ + if (!soc || !soc->ops || !soc->ops->ipa_ops || !pdev) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return QDF_STATUS_E_FAILURE; + } + + if (soc->ops->ipa_ops->ipa_set_doorbell_paddr) + return soc->ops->ipa_ops->ipa_set_doorbell_paddr(pdev); + + return QDF_STATUS_SUCCESS; +} + +/** + * cdp_ipa_set_active() - activate/de-ctivate IPA offload path + * @soc - data path soc handle + * @pdev - device instance pointer + * @uc_active - activate or de-activate + * @is_tx - toggle tx or rx data path + * + * activate/de-ctivate IPA offload path + * + * return QDF_STATUS_SUCCESS + */ +static inline QDF_STATUS +cdp_ipa_set_active(ol_txrx_soc_handle soc, struct cdp_pdev *pdev, + bool uc_active, bool is_tx) +{ + if (!soc || !soc->ops || !soc->ops->ipa_ops || !pdev) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return QDF_STATUS_E_FAILURE; + } + + if (soc->ops->ipa_ops->ipa_set_active) + return soc->ops->ipa_ops->ipa_set_active(pdev, uc_active, + is_tx); + + return QDF_STATUS_SUCCESS; +} + +/** + * cdp_ipa_op_response() - event handler from FW + * @soc - data path soc handle + * @pdev - device instance pointer + * @op_msg - event contents from firmware + * + * event handler from FW + * + * return QDF_STATUS_SUCCESS + */ +static inline QDF_STATUS +cdp_ipa_op_response(ol_txrx_soc_handle soc, struct cdp_pdev *pdev, + uint8_t *op_msg) +{ + if (!soc || !soc->ops || !soc->ops->ipa_ops || !pdev) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return QDF_STATUS_E_FAILURE; + } + + if (soc->ops->ipa_ops->ipa_op_response) + return soc->ops->ipa_ops->ipa_op_response(pdev, op_msg); + + return QDF_STATUS_SUCCESS; +} + +/** + * cdp_ipa_register_op_cb() - register event handler function pointer + * @soc - data path soc handle + * @pdev - device instance pointer + * @op_cb - event handler callback function pointer + * @usr_ctxt - user context to registered + * + * register event handler function pointer + * + * return QDF_STATUS_SUCCESS + */ +static inline QDF_STATUS +cdp_ipa_register_op_cb(ol_txrx_soc_handle soc, struct cdp_pdev *pdev, + ipa_uc_op_cb_type op_cb, void *usr_ctxt) +{ + if (!soc || !soc->ops || !soc->ops->ipa_ops || !pdev) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return QDF_STATUS_E_FAILURE; + } + + if (soc->ops->ipa_ops->ipa_register_op_cb) + return soc->ops->ipa_ops->ipa_register_op_cb(pdev, op_cb, + usr_ctxt); + + return QDF_STATUS_SUCCESS; +} + +/** + * cdp_ipa_get_stat() - get IPA data path stats from FW + * @soc - data path soc handle + * @pdev - device instance pointer + * + * get IPA data path stats from FW async + * + * return QDF_STATUS_SUCCESS + */ +static inline QDF_STATUS +cdp_ipa_get_stat(ol_txrx_soc_handle soc, struct cdp_pdev *pdev) +{ + if (!soc || !soc->ops || !soc->ops->ipa_ops || !pdev) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return QDF_STATUS_E_FAILURE; + } + + if (soc->ops->ipa_ops->ipa_get_stat) + return soc->ops->ipa_ops->ipa_get_stat(pdev); + + return QDF_STATUS_SUCCESS; +} + +/** + * cdp_tx_send_ipa_data_frame() - send IPA data frame + * @vdev: vdev + * @skb: skb + * + * Return: skb/ NULL is for success + */ +static inline qdf_nbuf_t cdp_ipa_tx_send_data_frame(ol_txrx_soc_handle soc, + struct cdp_vdev *vdev, qdf_nbuf_t skb) +{ + if (!soc || !soc->ops || !soc->ops->ipa_ops || !vdev) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return skb; + } + + if (soc->ops->ipa_ops->ipa_tx_data_frame) + return soc->ops->ipa_ops->ipa_tx_data_frame(vdev, skb); + + return skb; +} + +/** + * cdp_ipa_set_uc_tx_partition_base() - set tx packet partition base + * @pdev: physical device instance + * @value: partition base value + * + * Return: QDF_STATUS + */ +static inline QDF_STATUS +cdp_ipa_set_uc_tx_partition_base(ol_txrx_soc_handle soc, + struct cdp_cfg *cfg_pdev, uint32_t value) +{ + if (!soc || !soc->ops || !soc->ops->ipa_ops || !cfg_pdev) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return QDF_STATUS_E_FAILURE; + } + + if (soc->ops->ipa_ops->ipa_set_uc_tx_partition_base) + soc->ops->ipa_ops->ipa_set_uc_tx_partition_base(cfg_pdev, + value); + + return QDF_STATUS_SUCCESS; +} + +#ifdef FEATURE_METERING +/** + * cdp_ipa_uc_get_share_stats() - get Tx/Rx byte stats from FW + * @pdev: physical device instance + * @value: reset stats + * + * Return: QDF_STATUS + */ +static inline QDF_STATUS +cdp_ipa_uc_get_share_stats(ol_txrx_soc_handle soc, + struct cdp_pdev *pdev, uint8_t value) +{ + if (!soc || !soc->ops || !soc->ops->ipa_ops || !pdev) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return QDF_STATUS_E_FAILURE; + } + + if (soc->ops->ipa_ops->ipa_uc_get_share_stats) + return soc->ops->ipa_ops->ipa_uc_get_share_stats(pdev, + value); + + return QDF_STATUS_SUCCESS; +} + +/** + * cdp_ipa_uc_set_quota() - set quota limit to FW + * @pdev: physical device instance + * @value: quota limit bytes + * + * Return: QDF_STATUS + */ +static inline QDF_STATUS +cdp_ipa_uc_set_quota(ol_txrx_soc_handle soc, + struct cdp_pdev *pdev, uint64_t value) +{ + if (!soc || !soc->ops || !soc->ops->ipa_ops || !pdev) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return QDF_STATUS_E_FAILURE; + } + + if (soc->ops->ipa_ops->ipa_uc_set_quota) + return soc->ops->ipa_ops->ipa_uc_set_quota(pdev, + value); + + return QDF_STATUS_SUCCESS; +} +#endif + +/** + * cdp_ipa_enable_autonomy() - Enable autonomy RX data path + * @soc: data path soc handle + * @pdev: handle to the device instance + * + * IPA Data path is enabled and resumed. + * All autonomy data path elements are ready to deliver packet + * All RX packet should routed to IPA_REO ring, then IPA can receive packet + * from WLAN + * + * Return: QDF_STATUS + */ +static inline QDF_STATUS +cdp_ipa_enable_autonomy(ol_txrx_soc_handle soc, struct cdp_pdev *pdev) +{ + if (!soc || !soc->ops || !soc->ops->ipa_ops || !pdev) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return QDF_STATUS_E_FAILURE; + } + + if (soc->ops->ipa_ops->ipa_enable_autonomy) + return soc->ops->ipa_ops->ipa_enable_autonomy(pdev); + + return QDF_STATUS_SUCCESS; +} + +/** + * cdp_ipa_disable_autonomy() - Disable autonomy RX data path + * @soc: data path soc handle + * @pdev: handle to the device instance + * + * IPA Data path is enabled and resumed. + * All autonomy datapath elements are ready to deliver packet + * All RX packet should routed to IPA_REO ring, then IPA can receive packet + * from WLAN + * + * Return: QDF_STATUS + */ +static inline QDF_STATUS +cdp_ipa_disable_autonomy(ol_txrx_soc_handle soc, struct cdp_pdev *pdev) +{ + if (!soc || !soc->ops || !soc->ops->ipa_ops || !pdev) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return QDF_STATUS_E_FAILURE; + } + if (soc->ops->ipa_ops->ipa_enable_autonomy) + return soc->ops->ipa_ops->ipa_disable_autonomy(pdev); + + return QDF_STATUS_SUCCESS; +} + +#ifdef CONFIG_IPA_WDI_UNIFIED_API +/** + * cdp_ipa_setup() - Setup and connect IPA pipes + * @soc: data path soc handle + * @pdev: handle to the device instance + * @ipa_i2w_cb: IPA to WLAN callback + * @ipa_w2i_cb: WLAN to IPA callback + * @ipa_wdi_meter_notifier_cb: IPA WDI metering callback + * @ipa_desc_size: IPA descriptor size + * @ipa_priv: handle to the HTT instance + * @is_rm_enabled: Is IPA RM enabled or not + * @tx_pipe_handle: pointer to Tx pipe handle + * @rx_pipe_handle: pointer to Rx pipe handle + * @is_smmu_enabled: Is SMMU enabled or not + * @sys_in: parameters to setup sys pipe in mcc mode + * + * Return: QDF_STATUS + */ +static inline QDF_STATUS +cdp_ipa_setup(ol_txrx_soc_handle soc, struct cdp_pdev *pdev, void *ipa_i2w_cb, + void *ipa_w2i_cb, void *ipa_wdi_meter_notifier_cb, + uint32_t ipa_desc_size, void *ipa_priv, bool is_rm_enabled, + uint32_t *tx_pipe_handle, uint32_t *rx_pipe_handle, + bool is_smmu_enabled, qdf_ipa_sys_connect_params_t *sys_in) +{ + if (!soc || !soc->ops || !soc->ops->ipa_ops || !pdev) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return QDF_STATUS_E_FAILURE; + } + + if (soc->ops->ipa_ops->ipa_setup) + return soc->ops->ipa_ops->ipa_setup(pdev, ipa_i2w_cb, + ipa_w2i_cb, + ipa_wdi_meter_notifier_cb, + ipa_desc_size, ipa_priv, + is_rm_enabled, + tx_pipe_handle, + rx_pipe_handle, + is_smmu_enabled, + sys_in); + + return QDF_STATUS_SUCCESS; +} +#else /* CONFIG_IPA_WDI_UNIFIED_API */ +/** + * cdp_ipa_setup() - Setup and connect IPA pipes + * @soc: data path soc handle + * @pdev: handle to the device instance + * @ipa_i2w_cb: IPA to WLAN callback + * @ipa_w2i_cb: WLAN to IPA callback + * @ipa_wdi_meter_notifier_cb: IPA WDI metering callback + * @ipa_desc_size: IPA descriptor size + * @ipa_priv: handle to the HTT instance + * @is_rm_enabled: Is IPA RM enabled or not + * @tx_pipe_handle: pointer to Tx pipe handle + * @rx_pipe_handle: pointer to Rx pipe handle + * + * Return: QDF_STATUS + */ +static inline QDF_STATUS +cdp_ipa_setup(ol_txrx_soc_handle soc, struct cdp_pdev *pdev, void *ipa_i2w_cb, + void *ipa_w2i_cb, void *ipa_wdi_meter_notifier_cb, + uint32_t ipa_desc_size, void *ipa_priv, bool is_rm_enabled, + uint32_t *tx_pipe_handle, uint32_t *rx_pipe_handle) +{ + if (!soc || !soc->ops || !soc->ops->ipa_ops || !pdev) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return QDF_STATUS_E_FAILURE; + } + + if (soc->ops->ipa_ops->ipa_setup) + return soc->ops->ipa_ops->ipa_setup(pdev, ipa_i2w_cb, + ipa_w2i_cb, + ipa_wdi_meter_notifier_cb, + ipa_desc_size, ipa_priv, + is_rm_enabled, + tx_pipe_handle, + rx_pipe_handle); + + return QDF_STATUS_SUCCESS; +} +#endif /* CONFIG_IPA_WDI_UNIFIED_API */ + +/** + * cdp_ipa_cleanup() - Disconnect IPA pipes + * @soc: data path soc handle + * @tx_pipe_handle: Tx pipe handle + * @rx_pipe_handle: Rx pipe handle + * + * Return: QDF_STATUS + */ +static inline QDF_STATUS +cdp_ipa_cleanup(ol_txrx_soc_handle soc, uint32_t tx_pipe_handle, + uint32_t rx_pipe_handle) +{ + if (!soc || !soc->ops || !soc->ops->ipa_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return QDF_STATUS_E_FAILURE; + } + + if (soc->ops->ipa_ops->ipa_cleanup) + return soc->ops->ipa_ops->ipa_cleanup(tx_pipe_handle, + rx_pipe_handle); + + return QDF_STATUS_SUCCESS; +} + +/** + * cdp_ipa_setup_iface() - Setup IPA header and register interface + * @soc: data path soc handle + * @ifname: Interface name + * @mac_addr: Interface MAC address + * @prod_client: IPA prod client type + * @cons_client: IPA cons client type + * @session_id: Session ID + * @is_ipv6_enabled: Is IPV6 enabled or not + * + * Return: QDF_STATUS + */ +static inline QDF_STATUS +cdp_ipa_setup_iface(ol_txrx_soc_handle soc, char *ifname, uint8_t *mac_addr, + qdf_ipa_client_type_t prod_client, + qdf_ipa_client_type_t cons_client, + uint8_t session_id, bool is_ipv6_enabled) +{ + if (!soc || !soc->ops || !soc->ops->ipa_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return QDF_STATUS_E_FAILURE; + } + + if (soc->ops->ipa_ops->ipa_setup_iface) + return soc->ops->ipa_ops->ipa_setup_iface(ifname, mac_addr, + prod_client, + cons_client, + session_id, + is_ipv6_enabled); + + return QDF_STATUS_SUCCESS; +} + +/** + * cdp_ipa_cleanup_iface() - Cleanup IPA header and deregister interface + * @soc: data path soc handle + * @ifname: Interface name + * @is_ipv6_enabled: Is IPV6 enabled or not + * + * Return: QDF_STATUS + */ +static inline QDF_STATUS +cdp_ipa_cleanup_iface(ol_txrx_soc_handle soc, char *ifname, + bool is_ipv6_enabled) +{ + if (!soc || !soc->ops || !soc->ops->ipa_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return QDF_STATUS_E_FAILURE; + } + + if (soc->ops->ipa_ops->ipa_cleanup_iface) + return soc->ops->ipa_ops->ipa_cleanup_iface(ifname, + is_ipv6_enabled); + + return QDF_STATUS_SUCCESS; +} + + /** + * cdp_ipa_uc_enable_pipes() - Enable and resume traffic on Tx/Rx pipes + * @soc: data path soc handle + * @pdev: handle to the device instance + * + * Return: QDF_STATUS + */ +static inline QDF_STATUS +cdp_ipa_enable_pipes(ol_txrx_soc_handle soc, struct cdp_pdev *pdev) +{ + if (!soc || !soc->ops || !soc->ops->ipa_ops || !pdev) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return QDF_STATUS_E_FAILURE; + } + + if (soc->ops->ipa_ops->ipa_enable_pipes) + return soc->ops->ipa_ops->ipa_enable_pipes(pdev); + + return QDF_STATUS_SUCCESS; +} + +/** + * cdp_ipa_uc_disable_pipes() - Suspend traffic and disable Tx/Rx pipes + * @soc: data path soc handle + * @pdev: handle to the device instance + * + * Return: QDF_STATUS + */ +static inline QDF_STATUS +cdp_ipa_disable_pipes(ol_txrx_soc_handle soc, struct cdp_pdev *pdev) +{ + if (!soc || !soc->ops || !soc->ops->ipa_ops || !pdev) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return QDF_STATUS_E_FAILURE; + } + + if (soc->ops->ipa_ops->ipa_disable_pipes) + return soc->ops->ipa_ops->ipa_disable_pipes(pdev); + + return QDF_STATUS_SUCCESS; +} + +/** + * cdp_ipa_set_perf_level() - Set IPA clock bandwidth based on data rates + * @soc: data path soc handle + * @client: WLAN Client ID + * @max_supported_bw_mbps: Maximum bandwidth needed (in Mbps) + * + * Return: 0 on success, negative errno on error + */ +static inline QDF_STATUS +cdp_ipa_set_perf_level(ol_txrx_soc_handle soc, int client, + uint32_t max_supported_bw_mbps) +{ + if (!soc || !soc->ops || !soc->ops->ipa_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return QDF_STATUS_E_FAILURE; + } + + if (soc->ops->ipa_ops->ipa_set_perf_level) + return soc->ops->ipa_ops->ipa_set_perf_level(client, + max_supported_bw_mbps); + + return QDF_STATUS_SUCCESS; +} +#endif /* IPA_OFFLOAD */ + +#endif /* _CDP_TXRX_IPA_H_ */ + diff --git a/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_me.h b/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_me.h new file mode 100644 index 0000000000000000000000000000000000000000..2e40067e6fedcb7c8806bb7578b2bb506dc7e5e5 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_me.h @@ -0,0 +1,171 @@ +/* + * Copyright (c) 2016-2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * @file cdp_txrx_me.h + * @brief Define the host data path mcast enhance API functions + * called by the host control SW and the OS interface module + */ +#ifndef _CDP_TXRX_ME_H_ +#define _CDP_TXRX_ME_H_ + +#include +/* TODO: adf need to be replaced with qdf */ +#include "cdp_txrx_handle.h" + +static inline u_int16_t +cdp_tx_desc_alloc_and_mark_for_mcast_clone(ol_txrx_soc_handle soc, + struct cdp_pdev *pdev, u_int16_t buf_count) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance", __func__); + QDF_BUG(0); + return 0; + } + + if (!soc->ops->me_ops || + !soc->ops->me_ops->tx_desc_alloc_and_mark_for_mcast_clone) + return 0; + + return soc->ops->me_ops-> + tx_desc_alloc_and_mark_for_mcast_clone + (pdev, buf_count); +} + +static inline u_int16_t +cdp_tx_desc_free_and_unmark_for_mcast_clone(ol_txrx_soc_handle soc, + struct cdp_pdev *pdev, u_int16_t buf_count) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance", __func__); + QDF_BUG(0); + return 0; + } + + if (!soc->ops->me_ops || + !soc->ops->me_ops->tx_desc_free_and_unmark_for_mcast_clone) + return 0; + + return soc->ops->me_ops-> + tx_desc_free_and_unmark_for_mcast_clone + (pdev, buf_count); +} + +static inline u_int16_t +cdp_tx_get_mcast_buf_allocated_marked(ol_txrx_soc_handle soc, + struct cdp_pdev *pdev) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance", __func__); + QDF_BUG(0); + return 0; + } + + if (!soc->ops->me_ops || + !soc->ops->me_ops->tx_get_mcast_buf_allocated_marked) + return 0; + + return soc->ops->me_ops->tx_get_mcast_buf_allocated_marked + (pdev); +} + +static inline void +cdp_tx_me_alloc_descriptor(ol_txrx_soc_handle soc, struct cdp_pdev *pdev) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->me_ops || + !soc->ops->me_ops->tx_me_alloc_descriptor) + return; + + soc->ops->me_ops->tx_me_alloc_descriptor(pdev); +} + +static inline void +cdp_tx_me_free_descriptor(ol_txrx_soc_handle soc, struct cdp_pdev *pdev) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->me_ops || + !soc->ops->me_ops->tx_me_free_descriptor) + return; + + soc->ops->me_ops->tx_me_free_descriptor(pdev); +} + +static inline uint16_t +cdp_tx_me_convert_ucast(ol_txrx_soc_handle soc, struct cdp_vdev *vdev, + qdf_nbuf_t wbuf, u_int8_t newmac[][6], uint8_t newmaccnt) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance", __func__); + QDF_BUG(0); + return 0; + } + + if (!soc->ops->me_ops || + !soc->ops->me_ops->tx_me_convert_ucast) + return 0; + + return soc->ops->me_ops->tx_me_convert_ucast + (vdev, wbuf, newmac, newmaccnt); +} + +/* Should be a function pointer in ol_txrx_osif_ops{} */ +/** + * @brief notify mcast frame indication from FW. + * @details + * This notification will be used to convert + * multicast frame to unicast. + * + * @param pdev - handle to the ctrl SW's physical device object + * @param vdev_id - ID of the virtual device received the special data + * @param msdu - the multicast msdu returned by FW for host inspect + */ + +static inline int cdp_mcast_notify(ol_txrx_soc_handle soc, + struct cdp_pdev *pdev, u_int8_t vdev_id, qdf_nbuf_t msdu) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance", __func__); + QDF_BUG(0); + return 0; + } + + if (!soc->ops->me_ops || + !soc->ops->me_ops->mcast_notify) + return 0; + + return soc->ops->me_ops->mcast_notify(pdev, vdev_id, msdu); +} +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_misc.h b/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_misc.h new file mode 100644 index 0000000000000000000000000000000000000000..446d341ae0f6eb452d7f967d91ca913e8a4e2f6d --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_misc.h @@ -0,0 +1,553 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * @file cdp_txrx_misc.h + * @brief Define the host data path miscellaneous API functions + * called by the host control SW and the OS interface module + */ +#ifndef _CDP_TXRX_MISC_H_ +#define _CDP_TXRX_MISC_H_ + +#include "cdp_txrx_handle.h" +/** + * cdp_tx_non_std() - Allow the control-path SW to send data frames + * + * @soc - data path soc handle + * @data_vdev - which vdev should transmit the tx data frames + * @tx_spec - what non-standard handling to apply to the tx data frames + * @msdu_list - NULL-terminated list of tx MSDUs + * + * Generally, all tx data frames come from the OS shim into the txrx layer. + * However, there are rare cases such as TDLS messaging where the UMAC + * control-path SW creates tx data frames. + * This UMAC SW can call this function to provide the tx data frames to + * the txrx layer. + * The UMAC SW can request a callback for these data frames after their + * transmission completes, by using the ol_txrx_data_tx_cb_set function + * to register a tx completion callback, and by specifying + * ol_tx_spec_no_free as the tx_spec arg when giving the frames to + * ol_tx_non_std. + * The MSDUs need to have the appropriate L2 header type (802.3 vs. 802.11), + * as specified by ol_cfg_frame_type(). + * + * Return: null - success, skb - failure + */ +static inline qdf_nbuf_t +cdp_tx_non_std(ol_txrx_soc_handle soc, struct cdp_vdev *vdev, + enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list) +{ + if (!soc || !soc->ops || !soc->ops->misc_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "%s invalid instance", __func__); + return NULL; + } + + if (soc->ops->misc_ops->tx_non_std) + return soc->ops->misc_ops->tx_non_std( + vdev, tx_spec, msdu_list); + return NULL; +} + +/** + * cdp_set_ibss_vdev_heart_beat_timer() - Update ibss vdev heart + * beat timer + * @soc - data path soc handle + * @vdev - vdev handle + * @timer_value_sec - new heart beat timer value + * + * Return: Old timer value set in vdev. + */ +static inline uint16_t +cdp_set_ibss_vdev_heart_beat_timer(ol_txrx_soc_handle soc, + struct cdp_vdev *vdev, uint16_t timer_value_sec) +{ + if (!soc || !soc->ops || !soc->ops->misc_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return 0; + } + + if (soc->ops->misc_ops->set_ibss_vdev_heart_beat_timer) + return soc->ops->misc_ops->set_ibss_vdev_heart_beat_timer( + vdev, timer_value_sec); + + return 0; +} + +/** + * cdp_set_wisa_mode() - set wisa mode + * @soc - data path soc handle + * @vdev - vdev handle + * @enable - enable or disable + * + * Return: QDF_STATUS_SUCCESS mode enable success + */ +static inline QDF_STATUS +cdp_set_wisa_mode(ol_txrx_soc_handle soc, struct cdp_vdev *vdev, bool enable) +{ + if (!soc || !soc->ops || !soc->ops->misc_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return QDF_STATUS_E_INVAL; + } + + if (soc->ops->misc_ops->set_wisa_mode) + return soc->ops->misc_ops->set_wisa_mode(vdev, enable); + return QDF_STATUS_SUCCESS; +} + +/** + * cdp_data_stall_cb_register() - register data stall callback + * @soc - data path soc handle + * @cb - callback function + * + * Return: QDF_STATUS_SUCCESS register success + */ +static inline QDF_STATUS cdp_data_stall_cb_register(ol_txrx_soc_handle soc, + data_stall_detect_cb cb) +{ + if (!soc || !soc->ops || !soc->ops->misc_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return QDF_STATUS_E_INVAL; + } + + if (soc->ops->misc_ops->txrx_data_stall_cb_register) + return soc->ops->misc_ops->txrx_data_stall_cb_register(cb); + return QDF_STATUS_SUCCESS; +} + +/** + * cdp_data_stall_cb_deregister() - de-register data stall callback + * @soc - data path soc handle + * @cb - callback function + * + * Return: QDF_STATUS_SUCCESS de-register success + */ +static inline QDF_STATUS cdp_data_stall_cb_deregister(ol_txrx_soc_handle soc, + data_stall_detect_cb cb) +{ + if (!soc || !soc->ops || !soc->ops->misc_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return QDF_STATUS_E_INVAL; + } + + if (soc->ops->misc_ops->txrx_data_stall_cb_deregister) + return soc->ops->misc_ops->txrx_data_stall_cb_deregister(cb); + return QDF_STATUS_SUCCESS; +} + +/** + * cdp_post_data_stall_event() - post data stall event + * @soc - data path soc handle + * @indicator: Module triggering data stall + * @data_stall_type: data stall event type + * @pdev_id: pdev id + * @vdev_id_bitmap: vdev id bitmap + * @recovery_type: data stall recovery type + * + * Return: None + */ +static inline void +cdp_post_data_stall_event(ol_txrx_soc_handle soc, + enum data_stall_log_event_indicator indicator, + enum data_stall_log_event_type data_stall_type, + uint32_t pdev_id, uint32_t vdev_id_bitmap, + enum data_stall_log_recovery_type recovery_type) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->misc_ops || + !soc->ops->misc_ops->txrx_post_data_stall_event) + return; + + soc->ops->misc_ops->txrx_post_data_stall_event( + indicator, data_stall_type, pdev_id, + vdev_id_bitmap, recovery_type); +} + +/** + * cdp_set_wmm_param() - set wmm parameter + * @soc - data path soc handle + * @pdev - device instance pointer + * @wmm_param - wmm parameter + * + * Return: none + */ +static inline void +cdp_set_wmm_param(ol_txrx_soc_handle soc, struct cdp_pdev *pdev, + struct ol_tx_wmm_param_t wmm_param) +{ + if (!soc || !soc->ops || !soc->ops->misc_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return; + } + + if (soc->ops->misc_ops->set_wmm_param) + return soc->ops->misc_ops->set_wmm_param( + pdev, wmm_param); + + return; +} + +/** + * cdp_runtime_suspend() - suspend + * @soc - data path soc handle + * @pdev - device instance pointer + * + * Return: QDF_STATUS_SUCCESS suspend success + */ +static inline QDF_STATUS cdp_runtime_suspend(ol_txrx_soc_handle soc, + struct cdp_pdev *pdev) +{ + if (!soc || !soc->ops || !soc->ops->misc_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return QDF_STATUS_E_INVAL; + } + + if (soc->ops->misc_ops->runtime_suspend) + return soc->ops->misc_ops->runtime_suspend(pdev); + + return QDF_STATUS_SUCCESS; +} + +/** + * cdp_runtime_resume() - resume + * @soc - data path soc handle + * @pdev - device instance pointer + * + * Return: QDF_STATUS_SUCCESS suspend success + */ +static inline QDF_STATUS cdp_runtime_resume(ol_txrx_soc_handle soc, + struct cdp_pdev *pdev) +{ + if (!soc || !soc->ops || !soc->ops->misc_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return QDF_STATUS_E_INVAL; + } + + if (soc->ops->misc_ops->runtime_resume) + return soc->ops->misc_ops->runtime_resume(pdev); + + return QDF_STATUS_SUCCESS; +} + +/** + * cdp_hl_tdls_flag_reset() - tdls flag reset + * @soc - data path soc handle + * @vdev - virtual interface handle pointer + * @flag + * + * Return: none + */ +static inline void +cdp_hl_tdls_flag_reset(ol_txrx_soc_handle soc, struct cdp_vdev *vdev, bool flag) +{ + if (!soc || !soc->ops || !soc->ops->misc_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return; + } + + if (soc->ops->misc_ops->hl_tdls_flag_reset) + return soc->ops->misc_ops->hl_tdls_flag_reset(vdev, flag); + + return; +} + +/** + * cdp_get_opmode() - get vdev operation mode + * @soc - data path soc handle + * @vdev - virtual interface instance + * + * Return virtual device operational mode + * op_mode_ap, + * op_mode_ibss, + * op_mode_sta, + * op_mode_monitor, + * op_mode_ocb, + * + * return interface id + * 0 unknown interface + */ +static inline int +cdp_get_opmode(ol_txrx_soc_handle soc, struct cdp_vdev *vdev) +{ + if (!soc || !soc->ops || !soc->ops->misc_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return 0; + } + + if (soc->ops->misc_ops->get_opmode) + return soc->ops->misc_ops->get_opmode(vdev); + return 0; +} + +/** + * cdp_get_vdev_id() - get vdev id + * @soc - data path soc handle + * @vdev - virtual interface instance + * + * get virtual interface id + * + * return interface id + * 0 unknown interface + */ +static inline uint16_t +cdp_get_vdev_id(ol_txrx_soc_handle soc, struct cdp_vdev *vdev) +{ + if (!soc || !soc->ops || !soc->ops->misc_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return 0; + } + + if (soc->ops->misc_ops->get_vdev_id) + return soc->ops->misc_ops->get_vdev_id(vdev); + return 0; +} + +/** + * cdp_get_tx_ack_stats() - get tx ack count for vdev + * @soc - data path soc handle + * @vdev_id - vdev id + * + * return tx ack count + * 0 invalid count + */ +static inline uint32_t +cdp_get_tx_ack_stats(ol_txrx_soc_handle soc, uint8_t vdev_id) +{ + if (!soc || !soc->ops || !soc->ops->misc_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return 0; + } + + if (soc->ops->misc_ops->get_tx_ack_stats) + return soc->ops->misc_ops->get_tx_ack_stats(vdev_id); + return 0; +} + +/** + * cdp_bad_peer_txctl_set_setting() - TBD + * @soc - data path soc handle + * @pdev - data path device instance + * @enable - + * @period - + * @txq_limit - + * + * TBD + * + * Return: none + */ +static inline void +cdp_bad_peer_txctl_set_setting(ol_txrx_soc_handle soc, struct cdp_pdev *pdev, + int enable, int period, int txq_limit) +{ + if (!soc || !soc->ops || !soc->ops->misc_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return; + } + + if (soc->ops->misc_ops->bad_peer_txctl_set_setting) + return soc->ops->misc_ops->bad_peer_txctl_set_setting(pdev, + enable, period, txq_limit); + return; +} + +/** + * cdp_bad_peer_txctl_update_threshold() - TBD + * @soc - data path soc handle + * @pdev - data path device instance + * @level - + * @tput_thresh - + * @tx_limit - + * + * TBD + * + * Return: none + */ +static inline void +cdp_bad_peer_txctl_update_threshold(ol_txrx_soc_handle soc, + struct cdp_pdev *pdev, + int level, int tput_thresh, int tx_limit) +{ + if (!soc || !soc->ops || !soc->ops->misc_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return; + } + + if (soc->ops->misc_ops->bad_peer_txctl_update_threshold) + return soc->ops->misc_ops->bad_peer_txctl_update_threshold( + pdev, level, tput_thresh, tx_limit); + return; +} + +/** + * cdp_mark_first_wakeup_packet() - set flag to indicate that + * fw is compatible for marking first packet after wow wakeup + * @soc - data path soc handle + * @value: 1 for enabled/ 0 for disabled + * + * Return: None + */ +static inline void cdp_mark_first_wakeup_packet(ol_txrx_soc_handle soc, + uint8_t value) +{ + if (!soc || !soc->ops || !soc->ops->misc_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return; + } + + if (soc->ops->misc_ops->mark_first_wakeup_packet) + return soc->ops->misc_ops->mark_first_wakeup_packet(value); + return; +} + + +/** + * cds_update_mac_id() - update mac_id for vdev + * @soc - data path soc handle + * @vdev_id: vdev id + * @mac_id: mac id + * + * Return: none + */ +static inline void cdp_update_mac_id(void *psoc, uint8_t vdev_id, + uint8_t mac_id) +{ + ol_txrx_soc_handle soc = psoc; + + if (!soc || !soc->ops || !soc->ops->misc_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return; + } + + if (soc->ops->misc_ops->update_mac_id) + return soc->ops->misc_ops->update_mac_id(vdev_id, mac_id); + return; +} + +/** + * cdp_flush_rx_frames() - flush cached rx frames + * @soc - data path soc handle + * @peer: peer + * @drop: set flag to drop frames + * + * Return: None + */ +static inline void cdp_flush_rx_frames(ol_txrx_soc_handle soc, void *peer, + bool drop) +{ + if (!soc || !soc->ops || !soc->ops->misc_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return; + } + + if (soc->ops->misc_ops->flush_rx_frames) + return soc->ops->misc_ops->flush_rx_frames(peer, drop); + return; +} + +/* + * cdp_get_intra_bss_fwd_pkts_count() - to get the total tx and rx packets + * that has been forwarded from txrx layer without going to upper layers. + * @vdev_id: vdev id + * @fwd_tx_packets: pointer to forwarded tx packets count parameter + * @fwd_rx_packets: pointer to forwarded rx packets count parameter + * + * Return: status -> A_OK - success, A_ERROR - failure + */ +static inline A_STATUS cdp_get_intra_bss_fwd_pkts_count( + ol_txrx_soc_handle soc, uint8_t vdev_id, + uint64_t *fwd_tx_packets, uint64_t *fwd_rx_packets) +{ + if (!soc || !soc->ops || !soc->ops->misc_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return 0; + } + + if (soc->ops->misc_ops->get_intra_bss_fwd_pkts_count) + return soc->ops->misc_ops->get_intra_bss_fwd_pkts_count( + vdev_id, fwd_tx_packets, fwd_rx_packets); + + return 0; +} + +/** + * cdp_pkt_log_init() - API to initialize packet log + * @handle: pdev handle + * @scn: HIF context + * + * Return: void + */ +static inline void cdp_pkt_log_init(ol_txrx_soc_handle soc, + struct cdp_pdev *pdev, void *scn) +{ + if (!soc || !soc->ops || !soc->ops->misc_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return; + } + + if (soc->ops->misc_ops->pkt_log_init) + return soc->ops->misc_ops->pkt_log_init(pdev, scn); + + return; +} + +/** + * cdp_pkt_log_con_service() - API to connect packet log service + * @handle: pdev handle + * @scn: HIF context + * + * Return: void + */ +static inline void cdp_pkt_log_con_service(ol_txrx_soc_handle soc, + struct cdp_pdev *pdev, void *scn) +{ + if (!soc || !soc->ops || !soc->ops->misc_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return; + } + + if (soc->ops->misc_ops->pkt_log_con_service) + return soc->ops->misc_ops->pkt_log_con_service(pdev, scn); + + return; +} +#endif /* _CDP_TXRX_MISC_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_mob_def.h b/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_mob_def.h new file mode 100644 index 0000000000000000000000000000000000000000..de27af20e6638266e752bac586e549f8642ded8a --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_mob_def.h @@ -0,0 +1,436 @@ +/* + * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef __CDP_TXRX_MOB_DEF_H +#define __CDP_TXRX_MOB_DEF_H +#include +#include + +#define TX_WMM_AC_NUM 4 + +#define OL_TXQ_PAUSE_REASON_FW (1 << 0) +#define OL_TXQ_PAUSE_REASON_PEER_UNAUTHORIZED (1 << 1) +#define OL_TXQ_PAUSE_REASON_TX_ABORT (1 << 2) +#define OL_TXQ_PAUSE_REASON_VDEV_STOP (1 << 3) +#define OL_TXQ_PAUSE_REASON_THERMAL_MITIGATION (1 << 4) + +#define OL_TXRX_INVALID_NUM_PEERS (-1) + +#define OL_TXRX_MAC_ADDR_LEN 6 + + +/* Maximum number of station supported by data path, including BC. */ +#define WLAN_MAX_STA_COUNT (HAL_NUM_STA) + +/* The symbolic station ID return to HDD to specify the packet is bc/mc */ +#define WLAN_RX_BCMC_STA_ID (WLAN_MAX_STA_COUNT + 1) + +/* The symbolic station ID return to HDD to specify the packet is + to soft-AP itself */ +#define WLAN_RX_SAP_SELF_STA_ID (WLAN_MAX_STA_COUNT + 2) + +/* is 802.11 address multicast/broadcast? */ +#define IEEE80211_IS_MULTICAST(_a) (*(_a) & 0x01) + +#define MAX_PEERS 32 + +/* + * Bins used for reporting delay histogram: + * bin 0: 0 - 10 ms delay + * bin 1: 10 - 20 ms delay + * bin 2: 20 - 40 ms delay + * bin 3: 40 - 80 ms delay + * bin 4: 80 - 160 ms delay + * bin 5: > 160 ms delay + */ +#define QCA_TX_DELAY_HIST_REPORT_BINS 6 + +/* BA actions */ +#define IEEE80211_ACTION_BA_ADDBA_REQUEST 0 /* ADDBA request */ +#define IEEE80211_ACTION_BA_ADDBA_RESPONSE 1 /* ADDBA response */ +#define IEEE80211_ACTION_BA_DELBA 2 /* DELBA */ + +#define IEEE80211_BA_POLICY_DELAYED 0 +#define IEEE80211_BA_POLICY_IMMEDIATE 1 +#define IEEE80211_BA_AMSDU_SUPPORTED 1 + +/** + * enum netif_action_type - Type of actions on netif queues + * @WLAN_STOP_ALL_NETIF_QUEUE: stop all netif queues + * @WLAN_START_ALL_NETIF_QUEUE: start all netif queues + * @WLAN_WAKE_ALL_NETIF_QUEUE: wake all netif queues + * @WLAN_STOP_ALL_NETIF_QUEUE_N_CARRIER: stop all queues and off carrier + * @WLAN_START_ALL_NETIF_QUEUE_N_CARRIER: start all queues and on carrier + * @WLAN_NETIF_TX_DISABLE: disable tx + * @WLAN_NETIF_TX_DISABLE_N_CARRIER: disable tx and off carrier + * @WLAN_NETIF_CARRIER_ON: on carrier + * @WLAN_NETIF_CARRIER_OFF: off carrier + * @WLAN_NETIF_PRIORITY_QUEUE_ON: start priority netif queues + * @WLAN_NETIF_PRIORITY_QUEUE_OFF: stop priority netif queues + * @WLAN_WAKE_NON_PRIORITY_QUEUE: wake non priority netif queues + * @WLAN_STOP_NON_PRIORITY_QUEUE: stop non priority netif queues + */ +enum netif_action_type { + WLAN_STOP_ALL_NETIF_QUEUE = 1, + WLAN_START_ALL_NETIF_QUEUE, + WLAN_WAKE_ALL_NETIF_QUEUE, + WLAN_STOP_ALL_NETIF_QUEUE_N_CARRIER, + WLAN_START_ALL_NETIF_QUEUE_N_CARRIER, + WLAN_NETIF_TX_DISABLE, + WLAN_NETIF_TX_DISABLE_N_CARRIER, + WLAN_NETIF_CARRIER_ON, + WLAN_NETIF_CARRIER_OFF, + WLAN_NETIF_PRIORITY_QUEUE_ON, + WLAN_NETIF_PRIORITY_QUEUE_OFF, + WLAN_WAKE_NON_PRIORITY_QUEUE, + WLAN_STOP_NON_PRIORITY_QUEUE, + WLAN_NETIF_ACTION_TYPE_MAX, +}; + +/** + * enum netif_reason_type - reason for netif queue action + * @WLAN_CONTROL_PATH: action from control path + * @WLAN_DATA_FLOW_CONTROL: because of flow control + * @WLAN_FW_PAUSE: because of firmware pause + * @WLAN_TX_ABORT: because of tx abort + * @WLAN_VDEV_STOP: because of vdev stop + * @WLAN_PEER_UNAUTHORISED: because of peer is unauthorised + * @WLAN_THERMAL_MITIGATION: because of thermal mitigation + */ +enum netif_reason_type { + WLAN_CONTROL_PATH = 1, + WLAN_DATA_FLOW_CONTROL, + WLAN_FW_PAUSE, + WLAN_TX_ABORT, + WLAN_VDEV_STOP, + WLAN_PEER_UNAUTHORISED, + WLAN_THERMAL_MITIGATION, + WLAN_DATA_FLOW_CONTROL_PRIORITY, + WLAN_REASON_TYPE_MAX, +}; + +enum ol_rx_err_type { + OL_RX_ERR_DEFRAG_MIC, + OL_RX_ERR_PN, + OL_RX_ERR_UNKNOWN_PEER, + OL_RX_ERR_MALFORMED, + OL_RX_ERR_TKIP_MIC, + OL_RX_ERR_DECRYPT, + OL_RX_ERR_MPDU_LENGTH, + OL_RX_ERR_ENCRYPT_REQUIRED, + OL_RX_ERR_DUP, + OL_RX_ERR_UNKNOWN, + OL_RX_ERR_FCS, + OL_RX_ERR_PRIVACY, + OL_RX_ERR_NONE_FRAG, + OL_RX_ERR_NONE = 0xFF +}; + +enum throttle_level { + THROTTLE_LEVEL_0, + THROTTLE_LEVEL_1, + THROTTLE_LEVEL_2, + THROTTLE_LEVEL_3, + /* Invalid */ + THROTTLE_LEVEL_MAX, +}; + +enum { + OL_TX_WMM_AC_BE, + OL_TX_WMM_AC_BK, + OL_TX_WMM_AC_VI, + OL_TX_WMM_AC_VO, + OL_TX_NUM_WMM_AC +}; + +/** + * @enum ol_tx_spec + * @brief indicate what non-standard transmission actions to apply + * @details + * Indicate one or more of the following: + * - The tx frame already has a complete 802.11 header. + * Thus, skip 802.3/native-WiFi to 802.11 header encapsulation and + * A-MSDU aggregation. + * - The tx frame should not be aggregated (A-MPDU or A-MSDU) + * - The tx frame is already encrypted - don't attempt encryption. + * - The tx frame is a segment of a TCP jumbo frame. + * - This tx frame should not be unmapped and freed by the txrx layer + * after transmission, but instead given to a registered tx completion + * callback. + * More than one of these specification can apply, though typically + * only a single specification is applied to a tx frame. + * A compound specification can be created, as a bit-OR of these + * specifications. + */ +enum ol_tx_spec { + OL_TX_SPEC_STD = 0x0, /* do regular processing */ + OL_TX_SPEC_RAW = 0x1, /* skip encap + A-MSDU aggr */ + OL_TX_SPEC_NO_AGGR = 0x2, /* skip encap + all aggr */ + OL_TX_SPEC_NO_ENCRYPT = 0x4, /* skip encap + encrypt */ + OL_TX_SPEC_TSO = 0x8, /* TCP segmented */ + OL_TX_SPEC_NWIFI_NO_ENCRYPT = 0x10, /* skip encrypt for nwifi */ + OL_TX_SPEC_NO_FREE = 0x20, /* give to cb rather than free */ +}; + +/** + * @enum peer_debug_id_type: debug ids to track peer get_ref and release_ref + * @brief Unique peer debug IDs to track the callers. Each new usage can add to + * this enum list to create a new "PEER_DEBUG_ID_". + * @PEER_DEBUG_ID_OL_INTERNAL: debug id for OL internal usage + * @PEER_DEBUG_ID_WMA_PKT_DROP: debug id for wma_is_pkt_drop_candidate API + * @PEER_DEBUG_ID_WMA_ADDBA_REQ: debug id for ADDBA request + * @PEER_DEBUG_ID_WMA_DELBA_REQ: debug id for DELBA request + * @PEER_DEBUG_ID_LIM_SEND_ADDBA_RESP: debug id for send ADDBA response + * @PEER_DEBUG_ID_OL_RX_THREAD: debug id for rx thread + * @PEER_DEBUG_ID_WMA_CCMP_REPLAY_ATTACK: debug id for CCMP replay + * @PEER_DEBUG_ID_WMA_DEL_BSS:debug id for remove BSS + * @PEER_DEBUG_ID_WMA_VDEV_STOP_RESP:debug id for vdev stop response handler + * @PEER_DEBUG_ID_OL_PEER_MAP:debug id for peer map/unmap + * @PEER_DEBUG_ID_OL_PEER_ATTACH: debug id for peer attach/detach + * @PEER_DEBUG_ID_OL_TXQ_VDEV_FL: debug id for vdev flush + * @PEER_DEBUG_ID_OL_HASH_ERS:debug id for peer find hash erase + * @PEER_DEBUG_ID_MAX: debug id MAX + */ +enum peer_debug_id_type { + PEER_DEBUG_ID_OL_INTERNAL, + PEER_DEBUG_ID_WMA_PKT_DROP, + PEER_DEBUG_ID_WMA_ADDBA_REQ, + PEER_DEBUG_ID_WMA_DELBA_REQ, + PEER_DEBUG_ID_LIM_SEND_ADDBA_RESP, + PEER_DEBUG_ID_OL_RX_THREAD, + PEER_DEBUG_ID_WMA_CCMP_REPLAY_ATTACK, + PEER_DEBUG_ID_WMA_DEL_BSS, + PEER_DEBUG_ID_WMA_VDEV_STOP_RESP, + PEER_DEBUG_ID_OL_PEER_MAP, + PEER_DEBUG_ID_OL_PEER_ATTACH, + PEER_DEBUG_ID_OL_TXQ_VDEV_FL, + PEER_DEBUG_ID_OL_HASH_ERS, + PEER_DEBUG_ID_OL_UNMAP_TIMER_WORK, + PEER_DEBUG_ID_MAX +}; + +/** + * struct ol_txrx_desc_type - txrx descriptor type + * @sta_id: sta id + * @is_qos_enabled: is station qos enabled + * @is_wapi_supported: is station wapi supported + */ +struct ol_txrx_desc_type { + uint8_t sta_id; + uint8_t is_qos_enabled; + uint8_t is_wapi_supported; +}; + +/** + * struct ol_tx_sched_wrr_ac_specs_t - the wrr ac specs params structure + * @wrr_skip_weight: map to ol_tx_sched_wrr_adv_category_info_t.specs. + * wrr_skip_weight + * @credit_threshold: map to ol_tx_sched_wrr_adv_category_info_t.specs. + * credit_threshold + * @send_limit: map to ol_tx_sched_wrr_adv_category_info_t.specs. + * send_limit + * @credit_reserve: map to ol_tx_sched_wrr_adv_category_info_t.specs. + * credit_reserve + * @discard_weight: map to ol_tx_sched_wrr_adv_category_info_t.specs. + * discard_weight + * + * This structure is for wrr ac specs params set from user, it will update + * its content corresponding to the ol_tx_sched_wrr_adv_category_info_t.specs. + */ +struct ol_tx_sched_wrr_ac_specs_t { + int wrr_skip_weight; + uint32_t credit_threshold; + uint16_t send_limit; + int credit_reserve; + int discard_weight; +}; + +/** + * struct txrx_pdev_cfg_param_t - configuration information + * passed to the data path + */ +struct txrx_pdev_cfg_param_t { + uint8_t is_full_reorder_offload; + /* IPA Micro controller data path offload enable flag */ + uint8_t is_uc_offload_enabled; + /* IPA Micro controller data path offload TX buffer count */ + uint32_t uc_tx_buffer_count; + /* IPA Micro controller data path offload TX buffer size */ + uint32_t uc_tx_buffer_size; + /* IPA Micro controller data path offload RX indication ring count */ + uint32_t uc_rx_indication_ring_count; + /* IPA Micro controller data path offload TX partition base */ + uint32_t uc_tx_partition_base; + /* IP, TCP and UDP checksum offload */ + bool ip_tcp_udp_checksum_offload; + /* Rx processing in thread from TXRX */ + bool enable_rxthread; + /* CE classification enabled through INI */ + bool ce_classify_enabled; +#if defined(QCA_LL_TX_FLOW_CONTROL_V2) || defined(QCA_LL_PDEV_TX_FLOW_CONTROL) + /* Threshold to stop queue in percentage */ + uint32_t tx_flow_stop_queue_th; + /* Start queue offset in percentage */ + uint32_t tx_flow_start_queue_offset; +#endif + + struct ol_tx_sched_wrr_ac_specs_t ac_specs[TX_WMM_AC_NUM]; +}; + +#ifdef IPA_OFFLOAD +/** + * ol_txrx_ipa_resources - Resources needed for IPA + */ +struct ol_txrx_ipa_resources { + qdf_shared_mem_t *ce_sr; + uint32_t ce_sr_ring_size; + qdf_dma_addr_t ce_reg_paddr; + + qdf_shared_mem_t *tx_comp_ring; + uint32_t tx_num_alloc_buffer; + + qdf_shared_mem_t *rx_rdy_ring; + qdf_shared_mem_t *rx_proc_done_idx; + + qdf_shared_mem_t *rx2_rdy_ring; + qdf_shared_mem_t *rx2_proc_done_idx; + + /* IPA UC doorbell registers paddr */ + qdf_dma_addr_t tx_comp_doorbell_dmaaddr; + qdf_dma_addr_t rx_ready_doorbell_dmaaddr; + + uint32_t tx_pipe_handle; + uint32_t rx_pipe_handle; +}; +#endif + +struct ol_txrx_ocb_chan_info { + uint32_t chan_freq; + uint16_t disable_rx_stats_hdr:1; +}; + +/** + * ol_mic_error_info - carries the information associated with + * a MIC error + * @vdev_id: virtual device ID + * @key_id: Key ID + * @pn: packet number + * @sa: source address + * @da: destination address + * @ta: transmitter address + */ +struct ol_mic_error_info { + uint8_t vdev_id; + uint32_t key_id; + uint64_t pn; + uint8_t sa[OL_TXRX_MAC_ADDR_LEN]; + uint8_t da[OL_TXRX_MAC_ADDR_LEN]; + uint8_t ta[OL_TXRX_MAC_ADDR_LEN]; +}; + +/** + * ol_error_info - carries the information associated with an + * error indicated by the firmware + * @mic_err: MIC error information + */ +struct ol_error_info { + union { + struct ol_mic_error_info mic_err; + } u; +}; + + +/** + * struct ol_txrx_ocb_set_chan - txrx OCB channel info + * @ocb_channel_count: Channel count + * @ocb_channel_info: OCB channel info + */ +struct ol_txrx_ocb_set_chan { + uint32_t ocb_channel_count; + struct ol_txrx_ocb_chan_info *ocb_channel_info; +}; + +/** + * @brief Parameter type to pass WMM setting to ol_txrx_set_wmm_param + * @details + * The struct is used to specify informaiton to update TX WMM scheduler. + */ +struct ol_tx_ac_param_t { + uint32_t aifs; + uint32_t cwmin; + uint32_t cwmax; +}; + +struct ol_tx_wmm_param_t { + struct ol_tx_ac_param_t ac[OL_TX_NUM_WMM_AC]; +}; + +struct ieee80211_ba_parameterset { +#if _BYTE_ORDER == _BIG_ENDIAN + uint16_t buffersize:10, /* B6-15 buffer size */ + tid:4, /* B2-5 TID */ + bapolicy:1, /* B1 block ack policy */ + amsdusupported:1; /* B0 amsdu supported */ +#else + uint16_t amsdusupported:1, /* B0 amsdu supported */ + bapolicy:1, /* B1 block ack policy */ + tid:4, /* B2-5 TID */ + buffersize:10; /* B6-15 buffer size */ +#endif +} __packed; + +struct ieee80211_ba_seqctrl { +#if _BYTE_ORDER == _BIG_ENDIAN + uint16_t startseqnum:12, /* B4-15 starting sequence number */ + fragnum:4; /* B0-3 fragment number */ +#else + uint16_t fragnum:4, /* B0-3 fragment number */ + startseqnum:12; /* B4-15 starting sequence number */ +#endif +} __packed; + +struct ieee80211_delba_parameterset { +#if _BYTE_ORDER == _BIG_ENDIAN + uint16_t tid:4, /* B12-15 tid */ + initiator:1, /* B11 initiator */ + reserved0:11; /* B0-10 reserved */ +#else + uint16_t reserved0:11, /* B0-10 reserved */ + initiator:1, /* B11 initiator */ + tid:4; /* B12-15 tid */ +#endif +} __packed; + +/** + * ol_txrx_vdev_peer_remove_cb - wma_remove_peer callback + */ +typedef void (*ol_txrx_vdev_peer_remove_cb)(void *handle, uint8_t *bssid, + uint8_t vdev_id, void *peer); + +/** + * @typedef tx_pause_callback + * @brief OSIF function registered with the data path + */ +typedef void (*tx_pause_callback)(uint8_t vdev_id, + enum netif_action_type action, + enum netif_reason_type reason); + +typedef void (*ipa_uc_op_cb_type)(uint8_t *op_msg, + void *osif_ctxt); + +#endif /* __CDP_TXRX_MOB_DEF_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_mon.h b/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_mon.h new file mode 100644 index 0000000000000000000000000000000000000000..d78cbeba0267afec45fa355bca5d566aa5569a49 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_mon.h @@ -0,0 +1,152 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * @file cdp_txrx_mon.h + * @brief Define the monitor mode API functions + * called by the host control SW and the OS interface module + */ + +#ifndef _CDP_TXRX_MON_H_ +#define _CDP_TXRX_MON_H_ +#include "cdp_txrx_handle.h" +static inline void cdp_monitor_set_filter_ucast_data + (ol_txrx_soc_handle soc, struct cdp_pdev *pdev, u_int8_t val) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->mon_ops || + !soc->ops->mon_ops->txrx_monitor_set_filter_ucast_data) + return; + + soc->ops->mon_ops->txrx_monitor_set_filter_ucast_data + (pdev, val); +} + +static inline void cdp_monitor_set_filter_mcast_data + (ol_txrx_soc_handle soc, struct cdp_pdev *pdev, u_int8_t val) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->mon_ops || + !soc->ops->mon_ops->txrx_monitor_set_filter_mcast_data) + return; + + soc->ops->mon_ops->txrx_monitor_set_filter_mcast_data + (pdev, val); +} + +static inline void cdp_monitor_set_filter_non_data + (ol_txrx_soc_handle soc, struct cdp_pdev *pdev, u_int8_t val) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->mon_ops || + !soc->ops->mon_ops->txrx_monitor_set_filter_non_data) + return; + + soc->ops->mon_ops->txrx_monitor_set_filter_non_data + (pdev, val); +} + +static inline bool cdp_monitor_get_filter_ucast_data +(ol_txrx_soc_handle soc, struct cdp_vdev *vdev_txrx_handle) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance", __func__); + QDF_BUG(0); + return 0; + } + + if (!soc->ops->mon_ops || + !soc->ops->mon_ops->txrx_monitor_get_filter_ucast_data) + return 0; + + return soc->ops->mon_ops->txrx_monitor_get_filter_ucast_data + (vdev_txrx_handle); +} + +static inline bool cdp_monitor_get_filter_mcast_data +(ol_txrx_soc_handle soc, struct cdp_vdev *vdev_txrx_handle) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance", __func__); + QDF_BUG(0); + return 0; + } + + if (!soc->ops->mon_ops || + !soc->ops->mon_ops->txrx_monitor_get_filter_mcast_data) + return 0; + + return soc->ops->mon_ops->txrx_monitor_get_filter_mcast_data + (vdev_txrx_handle); +} + +static inline bool cdp_monitor_get_filter_non_data +(ol_txrx_soc_handle soc, struct cdp_vdev *vdev_txrx_handle) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance", __func__); + QDF_BUG(0); + return 0; + } + + if (!soc->ops->mon_ops || + !soc->ops->mon_ops->txrx_monitor_get_filter_non_data) + return 0; + + return soc->ops->mon_ops->txrx_monitor_get_filter_non_data + (vdev_txrx_handle); +} + +static inline int cdp_reset_monitor_mode +(ol_txrx_soc_handle soc, struct cdp_pdev *pdev) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance", __func__); + QDF_BUG(0); + return 0; + } + + if (!soc->ops->mon_ops || + !soc->ops->mon_ops->txrx_reset_monitor_mode) + return 0; + + return soc->ops->mon_ops->txrx_reset_monitor_mode(pdev); +} +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_mon_struct.h b/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_mon_struct.h new file mode 100644 index 0000000000000000000000000000000000000000..01e7ae101a57f66fa23440bf5d5d83d4b66f3aeb --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_mon_struct.h @@ -0,0 +1,230 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + + /** + * @file cdp_txrx_mon_struct.h + * @brief Define the monitor mode API structure + * shared by data path and the OS interface module + */ + +#ifndef _CDP_TXRX_MON_STRUCT_H_ +#define _CDP_TXRX_MON_STRUCT_H_ +/* XXX not really a mode; there are really multiple PHY's */ +enum cdp_mon_phymode { + /* autoselect */ + CDP_IEEE80211_MODE_AUTO = 0, + /* 5GHz, OFDM */ + CDP_IEEE80211_MODE_11A = 1, + /* 2GHz, CCK */ + CDP_IEEE80211_MODE_11B = 2, + /* 2GHz, OFDM */ + CDP_IEEE80211_MODE_11G = 3, + /* 2GHz, GFSK */ + CDP_IEEE80211_MODE_FH = 4, + /* 5GHz, OFDM, 2x clock dynamic turbo */ + CDP_IEEE80211_MODE_TURBO_A = 5, + /* 2GHz, OFDM, 2x clock dynamic turbo */ + CDP_IEEE80211_MODE_TURBO_G = 6, + /* 5Ghz, HT20 */ + CDP_IEEE80211_MODE_11NA_HT20 = 7, + /* 2Ghz, HT20 */ + CDP_IEEE80211_MODE_11NG_HT20 = 8, + /* 5Ghz, HT40 (ext ch +1) */ + CDP_IEEE80211_MODE_11NA_HT40PLUS = 9, + /* 5Ghz, HT40 (ext ch -1) */ + CDP_IEEE80211_MODE_11NA_HT40MINUS = 10, + /* 2Ghz, HT40 (ext ch +1) */ + CDP_IEEE80211_MODE_11NG_HT40PLUS = 11, + /* 2Ghz, HT40 (ext ch -1) */ + CDP_IEEE80211_MODE_11NG_HT40MINUS = 12, + /* 2Ghz, Auto HT40 */ + CDP_IEEE80211_MODE_11NG_HT40 = 13, + /* 5Ghz, Auto HT40 */ + CDP_IEEE80211_MODE_11NA_HT40 = 14, + /* 5Ghz, VHT20 */ + CDP_IEEE80211_MODE_11AC_VHT20 = 15, + /* 5Ghz, VHT40 (Ext ch +1) */ + CDP_IEEE80211_MODE_11AC_VHT40PLUS = 16, + /* 5Ghz VHT40 (Ext ch -1) */ + CDP_IEEE80211_MODE_11AC_VHT40MINUS = 17, + /* 5Ghz, VHT40 */ + CDP_IEEE80211_MODE_11AC_VHT40 = 18, + /* 5Ghz, VHT80 */ + CDP_IEEE80211_MODE_11AC_VHT80 = 19, + /* 5Ghz, VHT160 */ + CDP_IEEE80211_MODE_11AC_VHT160 = 20, + /* 5Ghz, VHT80_80 */ + CDP_IEEE80211_MODE_11AC_VHT80_80 = 21, +}; + +enum { + CDP_PKT_TYPE_OFDM = 0, + CDP_PKT_TYPE_CCK, + CDP_PKT_TYPE_HT, + CDP_PKT_TYPE_VHT, + CDP_PKT_TYPE_HE, +}; + +enum { + CDP_SGI_0_8_US = 0, + CDP_SGI_0_4_US, + CDP_SGI_1_6_US, + CDP_SGI_3_2_US, +}; + +enum { + CDP_RX_TYPE_SU = 0, + CDP_RX_TYPE_MU_MIMO, + CDP_RX_TYPE_MU_OFDMA, + CDP_RX_TYPE_MU_OFDMA_MIMO, +}; + +enum { + CDP_FULL_RX_BW_20 = 0, + CDP_FULL_RX_BW_40, + CDP_FULL_RX_BW_80, + CDP_FULL_RX_BW_160, +}; + +struct cdp_mon_status { + int rs_numchains; + int rs_flags; +#define IEEE80211_RX_FCS_ERROR 0x01 +#define IEEE80211_RX_MIC_ERROR 0x02 +#define IEEE80211_RX_DECRYPT_ERROR 0x04 +/* holes in flags here between, ATH_RX_XXXX to IEEE80211_RX_XXX */ +#define IEEE80211_RX_KEYMISS 0x200 + int rs_rssi; /* RSSI (noise floor ajusted) */ + int rs_abs_rssi; /* absolute RSSI */ + int rs_datarate; /* data rate received */ + int rs_rateieee; + int rs_ratephy1; + int rs_ratephy2; + int rs_ratephy3; + +/* Keep the same as ATH_MAX_ANTENNA */ +#define IEEE80211_MAX_ANTENNA 3 + /* RSSI (noise floor ajusted) */ + u_int8_t rs_rssictl[IEEE80211_MAX_ANTENNA]; + /* RSSI (noise floor ajusted) */ + u_int8_t rs_rssiextn[IEEE80211_MAX_ANTENNA]; + /* rs_rssi is valid or not */ + u_int8_t rs_isvalidrssi; + + enum cdp_mon_phymode rs_phymode; + int rs_freq; + + union { + u_int8_t data[8]; + u_int64_t tsf; + } rs_tstamp; + + /* + * Detail channel structure of recv frame. + * It could be NULL if not available + */ + + +#ifdef ATH_SUPPORT_AOW + u_int16_t rs_rxseq; /* WLAN Sequence number */ +#endif +#ifdef ATH_VOW_EXT_STATS + /* Lower 16 bits holds the udp checksum offset in the data pkt */ + u_int32_t vow_extstats_offset; + /* Higher 16 bits contains offset in the data pkt at which vow + * ext stats are embedded + */ +#endif + u_int8_t rs_isaggr; + u_int8_t rs_isapsd; + int16_t rs_noisefloor; + u_int16_t rs_channel; +#ifdef ATH_SUPPORT_TxBF + u_int32_t rs_rpttstamp; /* txbf report time stamp*/ +#endif + + /* The following counts are meant to assist in stats calculation. + * These variables are incremented only in specific situations, and + * should not be relied upon for any purpose other than the original + * stats related purpose they have been introduced for. + */ + + u_int16_t rs_cryptodecapcount; /* Crypto bytes decapped/demic'ed. */ + u_int8_t rs_padspace; /* No. of padding bytes present after + header in wbuf. */ + u_int8_t rs_qosdecapcount; /* QoS/HTC bytes decapped. */ + + /* End of stats calculation related counts. */ + + /* + * uint8_t rs_lsig[IEEE80211_LSIG_LEN]; + * uint8_t rs_htsig[IEEE80211_HTSIG_LEN]; + * uint8_t rs_servicebytes[IEEE80211_SB_LEN]; + * uint8_t rs_fcs_error; + */ + + /* cdp convergence monitor mode status */ + union { + u_int8_t cdp_data[8]; + u_int64_t cdp_tsf; + } cdp_rs_tstamp; + + uint8_t cdp_rs_pream_type; + uint32_t cdp_rs_user_rssi; + uint8_t cdp_rs_stbc; + uint8_t cdp_rs_sgi; + uint32_t cdf_rs_rate_mcs; + uint32_t cdp_rs_reception_type; + uint32_t cdp_rs_bw; + uint32_t cdp_rs_nss; + uint8_t cdp_rs_fcs_err; + +}; + +enum { + CDP_MON_PPDU_START = 0, + CDP_MON_PPDU_END, +}; + +/** + * struct cdp_pdev_mon_stats + * @status_ppdu_state: state on PPDU start and end + * @status_ppdu_start: status ring PPDU start TLV count + * @status_ppdu_end: status ring PPDU end TLV count + * @status_ppdu_compl: status ring matching start and end count on PPDU + * @status_ppdu_start_mis: status ring missing start TLV count on PPDU + * @status_ppdu_end_mis: status ring missing end TLV count on PPDU + * @status_ppdu_done: status ring PPDU done TLV count + * @dest_ppdu_done: destination ring PPDU count + * @dest_mpdu_done: destination ring MPDU count + */ +struct cdp_pdev_mon_stats { +#ifndef REMOVE_MON_DBG_STATS + uint32_t status_ppdu_state; + uint32_t status_ppdu_start; + uint32_t status_ppdu_end; + uint32_t status_ppdu_compl; + uint32_t status_ppdu_start_mis; + uint32_t status_ppdu_end_mis; +#endif + uint32_t status_ppdu_done; + uint32_t dest_ppdu_done; + uint32_t dest_mpdu_done; + uint32_t dest_mpdu_drop; +}; +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_ocb.h b/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_ocb.h new file mode 100644 index 0000000000000000000000000000000000000000..5fd6d682f0cac3bcc511f8f54ba7d0cef51ffe6f --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_ocb.h @@ -0,0 +1,67 @@ +/* + * Copyright (c) 2016-2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _CDP_TXRX_OCB_H_ +#define _CDP_TXRX_OCB_H_ +#include +#include "cdp_txrx_handle.h" +/** + * cdp_set_ocb_chan_info() - set OCB channel info to vdev. + * @soc - data path soc handle + * @vdev: vdev handle + * @ocb_set_chan: OCB channel information to be set in vdev. + * + * Return: NONE + */ +static inline void +cdp_set_ocb_chan_info(ol_txrx_soc_handle soc, struct cdp_vdev *vdev, + struct ol_txrx_ocb_set_chan ocb_set_chan) +{ + if (!soc || !soc->ops || !soc->ops->ocb_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return; + } + + if (soc->ops->ocb_ops->set_ocb_chan_info) + soc->ops->ocb_ops->set_ocb_chan_info(vdev, + ocb_set_chan); + +} +/** + * cdp_get_ocb_chan_info() - return handle to vdev ocb_channel_info + * @soc - data path soc handle + * @vdev: vdev handle + * + * Return: handle to struct ol_txrx_ocb_chan_info + */ +static inline struct ol_txrx_ocb_chan_info * +cdp_get_ocb_chan_info(ol_txrx_soc_handle soc, struct cdp_vdev *vdev) +{ + if (!soc || !soc->ops || !soc->ops->ocb_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return NULL; + } + + if (soc->ops->ocb_ops->get_ocb_chan_info) + return soc->ops->ocb_ops->get_ocb_chan_info(vdev); + + return NULL; +} +#endif /* _CDP_TXRX_OCB_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_ops.h b/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..af6fd6f967da15b600c9974906594593f502f52e --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_ops.h @@ -0,0 +1,1158 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * @file cdp_txrx_ops.h + * @brief Define the host data path converged API functions + * called by the host control SW and the OS interface module + */ +#ifndef _CDP_TXRX_CMN_OPS_H_ +#define _CDP_TXRX_CMN_OPS_H_ + +#include +#include +#include "cdp_txrx_handle.h" +#include +#include "wlan_objmgr_psoc_obj.h" + +#ifdef IPA_OFFLOAD +#ifdef CONFIG_IPA_WDI_UNIFIED_API +#include +#else +#include +#endif +#endif + +/** + * bitmap values to indicate special handling of peer_delete + */ +#define CDP_PEER_DELETE_NO_SPECIAL 0 +#define CDP_PEER_DO_NOT_START_UNMAP_TIMER 1 + +/* same as ieee80211_nac_param */ +enum cdp_nac_param_cmd { + /* IEEE80211_NAC_PARAM_ADD */ + CDP_NAC_PARAM_ADD = 1, + /* IEEE80211_NAC_PARAM_DEL */ + CDP_NAC_PARAM_DEL, + /* IEEE80211_NAC_PARAM_LIST */ + CDP_NAC_PARAM_LIST, +}; +/****************************************************************************** + * + * Control Interface (A Interface) + * + *****************************************************************************/ + +struct cdp_cmn_ops { + + int (*txrx_soc_attach_target)(ol_txrx_soc_handle soc); + + int (*txrx_pdev_attach_target)(struct cdp_pdev *pdev); + + struct cdp_vdev *(*txrx_vdev_attach) + (struct cdp_pdev *pdev, uint8_t *vdev_mac_addr, + uint8_t vdev_id, enum wlan_op_mode op_mode); + + void (*txrx_vdev_detach) + (struct cdp_vdev *vdev, ol_txrx_vdev_delete_cb callback, + void *cb_context); + + struct cdp_pdev *(*txrx_pdev_attach) + (ol_txrx_soc_handle soc, struct cdp_cfg *ctrl_pdev, + HTC_HANDLE htc_pdev, qdf_device_t osdev, uint8_t pdev_id); + + int (*txrx_pdev_post_attach)(struct cdp_pdev *pdev); + + void (*txrx_pdev_pre_detach)(struct cdp_pdev *pdev, int force); + + void (*txrx_pdev_detach)(struct cdp_pdev *pdev, int force); + + void *(*txrx_peer_create) + (struct cdp_vdev *vdev, uint8_t *peer_mac_addr); + + void (*txrx_peer_setup) + (struct cdp_vdev *vdev_hdl, void *peer_hdl); + + void (*txrx_peer_teardown) + (struct cdp_vdev *vdev_hdl, void *peer_hdl); + + int (*txrx_peer_add_ast) + (ol_txrx_soc_handle soc, struct cdp_peer *peer_hdl, + uint8_t *mac_addr, enum cdp_txrx_ast_entry_type type, + uint32_t flags); + + void (*txrx_peer_del_ast) + (ol_txrx_soc_handle soc, void *ast_hdl); + + int (*txrx_peer_update_ast) + (ol_txrx_soc_handle soc, struct cdp_peer *peer_hdl, + uint8_t *mac_addr, uint32_t flags); + + + void *(*txrx_peer_ast_hash_find) + (ol_txrx_soc_handle soc, uint8_t *ast_mac_addr); + + uint8_t (*txrx_peer_ast_get_pdev_id) + (ol_txrx_soc_handle soc, void *ast_hdl); + + uint8_t (*txrx_peer_ast_get_next_hop) + (ol_txrx_soc_handle soc, void *ast_hdl); + + void (*txrx_peer_ast_set_type) + (ol_txrx_soc_handle soc, void *ast_hdl, + enum cdp_txrx_ast_entry_type type); + + void (*txrx_peer_delete)(void *peer, uint32_t bitmap); + + int (*txrx_set_monitor_mode)(struct cdp_vdev *vdev, + uint8_t smart_monitor); + + void (*txrx_peer_delete_sync)(void *peer, + QDF_STATUS(*delete_cb)( + uint8_t vdev_id, + uint32_t peerid_cnt, + uint16_t *peerid_list), + uint32_t bitmap); + + void (*txrx_peer_unmap_sync_cb_set)(struct cdp_pdev *pdev, + QDF_STATUS(*unmap_resp_cb)( + uint8_t vdev_id, + uint32_t peerid_cnt, + uint16_t *peerid_list)); + + uint8_t (*txrx_get_pdev_id_frm_pdev)(struct cdp_pdev *pdev); + + void (*txrx_set_nac)(struct cdp_peer *peer); + + void (*txrx_set_pdev_tx_capture)(struct cdp_pdev *pdev, int val); + + void (*txrx_get_peer_mac_from_peer_id) + (struct cdp_pdev *pdev_handle, + uint32_t peer_id, uint8_t *peer_mac); + + void (*txrx_vdev_tx_lock)(struct cdp_vdev *vdev); + + void (*txrx_vdev_tx_unlock)(struct cdp_vdev *vdev); + + void (*txrx_ath_getstats)(void *pdev, + struct cdp_dev_stats *stats, uint8_t type); + + void (*txrx_set_gid_flag)(struct cdp_pdev *pdev, u_int8_t *mem_status, + u_int8_t *user_position); + + uint32_t (*txrx_fw_supported_enh_stats_version)(struct cdp_pdev *pdev); + + void (*txrx_if_mgmt_drain)(void *ni, int force); + + void (*txrx_set_curchan)(struct cdp_pdev *pdev, uint32_t chan_mhz); + + void (*txrx_set_privacy_filters) + (struct cdp_vdev *vdev, void *filter, uint32_t num); + + /******************************************************************** + * Data Interface (B Interface) + ********************************************************************/ + + void (*txrx_vdev_register)(struct cdp_vdev *vdev, + void *osif_vdev, struct ol_txrx_ops *txrx_ops); + + int (*txrx_mgmt_send)(struct cdp_vdev *vdev, + qdf_nbuf_t tx_mgmt_frm, uint8_t type); + + int (*txrx_mgmt_send_ext)(struct cdp_vdev *vdev, + qdf_nbuf_t tx_mgmt_frm, uint8_t type, uint8_t use_6mbps, + uint16_t chanfreq); + + /** + * ol_txrx_mgmt_tx_cb - tx management delivery notification + * callback function + */ + + void (*txrx_mgmt_tx_cb_set)(struct cdp_pdev *pdev, uint8_t type, + ol_txrx_mgmt_tx_cb download_cb, + ol_txrx_mgmt_tx_cb ota_ack_cb, + void *ctxt); + + int (*txrx_get_tx_pending)(struct cdp_pdev *pdev); + + /** + * ol_txrx_data_tx_cb - Function registered with the data path + * that is called when tx frames marked as "no free" are + * done being transmitted + */ + + void (*txrx_data_tx_cb_set)(struct cdp_vdev *data_vdev, + ol_txrx_data_tx_cb callback, void *ctxt); + + /******************************************************************* + * Statistics and Debugging Interface (C Interface) + ********************************************************************/ + + int (*txrx_aggr_cfg)(struct cdp_vdev *vdev, int max_subfrms_ampdu, + int max_subfrms_amsdu); + + A_STATUS (*txrx_fw_stats_get)(struct cdp_vdev *vdev, + struct ol_txrx_stats_req *req, + bool per_vdev, bool response_expected); + + int (*txrx_debug)(struct cdp_vdev *vdev, int debug_specs); + + void (*txrx_fw_stats_cfg)(struct cdp_vdev *vdev, + uint8_t cfg_stats_type, uint32_t cfg_val); + + void (*txrx_print_level_set)(unsigned level); + + /** + * ol_txrx_get_vdev_mac_addr() - Return mac addr of vdev + * @vdev: vdev handle + * + * Return: vdev mac address + */ + uint8_t * (*txrx_get_vdev_mac_addr)(struct cdp_vdev *vdev); + + /** + * ol_txrx_get_vdev_struct_mac_addr() - Return handle to struct qdf_mac_addr of + * vdev + * @vdev: vdev handle + * + * Return: Handle to struct qdf_mac_addr + */ + struct qdf_mac_addr * + (*txrx_get_vdev_struct_mac_addr)(struct cdp_vdev *vdev); + + /** + * ol_txrx_get_pdev_from_vdev() - Return handle to pdev of vdev + * @vdev: vdev handle + * + * Return: Handle to pdev + */ + struct cdp_pdev *(*txrx_get_pdev_from_vdev) + (struct cdp_vdev *vdev); + + /** + * ol_txrx_get_ctrl_pdev_from_vdev() - Return control pdev of vdev + * @vdev: vdev handle + * + * Return: Handle to control pdev + */ + struct cdp_cfg * + (*txrx_get_ctrl_pdev_from_vdev)(struct cdp_vdev *vdev); + + struct cdp_vdev * + (*txrx_get_vdev_from_vdev_id)(struct cdp_pdev *pdev, + uint8_t vdev_id); + + void (*txrx_soc_detach)(void *soc); + + int (*addba_requestprocess)(void *peer_handle, uint8_t dialogtoken, + uint16_t tid, uint16_t batimeout, uint16_t buffersize, + uint16_t startseqnum); + + void (*addba_responsesetup)(void *peer_handle, uint8_t tid, + uint8_t *dialogtoken, uint16_t *statuscode, + uint16_t *buffersize, uint16_t *batimeout); + + int (*delba_process)(void *peer_handle, + int tid, uint16_t reasoncode); + + void (*set_addba_response)(void *peer_handle, + uint8_t tid, uint16_t statuscode); + + uint8_t (*get_peer_mac_addr_frm_id)(struct cdp_soc_t *soc_handle, + uint16_t peer_id, uint8_t *mac_addr); + + void (*set_vdev_dscp_tid_map)(struct cdp_vdev *vdev_handle, + uint8_t map_id); + + void (*flush_cache_rx_queue)(void); + void (*set_pdev_dscp_tid_map)(struct cdp_pdev *pdev, uint8_t map_id, + uint8_t tos, uint8_t tid); + + int (*txrx_stats_request)(struct cdp_vdev *vdev, + struct cdp_txrx_stats_req *req); + + QDF_STATUS (*display_stats)(void *psoc, uint16_t value, + enum qdf_stats_verbosity_level level); + void (*txrx_soc_set_nss_cfg)(ol_txrx_soc_handle soc, int config); + + int(*txrx_soc_get_nss_cfg)(ol_txrx_soc_handle soc); + QDF_STATUS (*txrx_intr_attach)(void *soc); + void (*txrx_intr_detach)(void *soc); + void (*set_pn_check)(struct cdp_vdev *vdev, + struct cdp_peer *peer_handle, enum cdp_sec_type sec_type, + uint32_t *rx_pn); + QDF_STATUS (*update_config_parameters)(struct cdp_soc *psoc, + struct cdp_config_params *params); + + void *(*get_dp_txrx_handle)(struct cdp_pdev *pdev_hdl); + void (*set_dp_txrx_handle)(struct cdp_pdev *pdev_hdl, + void *dp_txrx_hdl); + + void *(*get_soc_dp_txrx_handle)(struct cdp_soc *soc_handle); + void (*set_soc_dp_txrx_handle)(struct cdp_soc *soc_handle, + void *dp_txrx_handle); + + void (*txrx_peer_reset_ast) + (ol_txrx_soc_handle soc, uint8_t *ast_macaddr, void *vdev_hdl); + + void (*txrx_peer_reset_ast_table)(ol_txrx_soc_handle soc, + void *vdev_hdl); + + void (*txrx_peer_flush_ast_table)(ol_txrx_soc_handle soc); + + QDF_STATUS (*txrx_peer_map_attach)(ol_txrx_soc_handle soc, + uint32_t num_peers); + + ol_txrx_tx_fp tx_send; +}; + +struct cdp_ctrl_ops { + + int + (*txrx_mempools_attach)(void *ctrl_pdev); + int + (*txrx_set_filter_neighbour_peers)( + struct cdp_pdev *pdev, + uint32_t val); + int + (*txrx_update_filter_neighbour_peers)( + struct cdp_pdev *pdev, + uint32_t cmd, uint8_t *macaddr); + /** + * @brief set the safemode of the device + * @details + * This flag is used to bypass the encrypt and decrypt processes when + * send and receive packets. It works like open AUTH mode, HW will + * ctreate all packets as non-encrypt frames because no key installed. + * For rx fragmented frames,it bypasses all the rx defragmentaion. + * + * @param vdev - the data virtual device object + * @param val - the safemode state + * @return - void + */ + + void + (*txrx_set_safemode)( + struct cdp_vdev *vdev, + u_int32_t val); + /** + * @brief configure the drop unencrypted frame flag + * @details + * Rx related. When set this flag, all the unencrypted frames + * received over a secure connection will be discarded + * + * @param vdev - the data virtual device object + * @param val - flag + * @return - void + */ + void + (*txrx_set_drop_unenc)( + struct cdp_vdev *vdev, + u_int32_t val); + + + /** + * @brief set the Tx encapsulation type of the VDEV + * @details + * This will be used to populate the HTT desc packet type field + * during Tx + * @param vdev - the data virtual device object + * @param val - the Tx encap type + * @return - void + */ + void + (*txrx_set_tx_encap_type)( + struct cdp_vdev *vdev, + enum htt_cmn_pkt_type val); + /** + * @brief set the Rx decapsulation type of the VDEV + * @details + * This will be used to configure into firmware and hardware + * which format to decap all Rx packets into, for all peers under + * the VDEV. + * @param vdev - the data virtual device object + * @param val - the Rx decap mode + * @return - void + */ + void + (*txrx_set_vdev_rx_decap_type)( + struct cdp_vdev *vdev, + enum htt_cmn_pkt_type val); + + /** + * @brief get the Rx decapsulation type of the VDEV + * + * @param vdev - the data virtual device object + * @return - the Rx decap type + */ + enum htt_cmn_pkt_type + (*txrx_get_vdev_rx_decap_type)(struct cdp_vdev *vdev); + + /* Is this similar to ol_txrx_peer_state_update() in MCL */ + /** + * @brief Update the authorize peer object at association time + * @details + * For the host-based implementation of rate-control, it + * updates the peer/node-related parameters within rate-control + * context of the peer at association. + * + * @param peer - pointer to the node's object + * @authorize - either to authorize or unauthorize peer + * + * @return none + */ + void + (*txrx_peer_authorize)(struct cdp_peer *peer, + u_int32_t authorize); + + bool + (*txrx_set_inact_params)(struct cdp_pdev *pdev, + u_int16_t inact_check_interval, + u_int16_t inact_normal, + u_int16_t inact_overload); + bool + (*txrx_start_inact_timer)( + struct cdp_pdev *pdev, + bool enable); + + + /** + * @brief Set the overload status of the radio + * @details + * Set the overload status of the radio, updating the inactivity + * threshold and inactivity count for each node. + * + * @param pdev - the data physical device object + * @param overload - whether the radio is overloaded or not + */ + void (*txrx_set_overload)( + struct cdp_pdev *pdev, + bool overload); + /** + * @brief Check the inactivity status of the peer/node + * + * @param peer - pointer to the node's object + * @return true if the node is inactive; otherwise return false + */ + bool + (*txrx_peer_is_inact)(void *peer); + + /** + * @brief Mark inactivity status of the peer/node + * @details + * If it becomes active, reset inactivity count to reload value; + * if the inactivity status changed, notify umac band steering. + * + * @param peer - pointer to the node's object + * @param inactive - whether the node is inactive or not + */ + void (*txrx_mark_peer_inact)( + void *peer, + bool inactive); + + + /* Should be ol_txrx_ctrl_api.h */ + void (*txrx_set_mesh_mode)(struct cdp_vdev *vdev, u_int32_t val); + + /** + * @brief setting mesh rx filter + * @details + * based on the bits enabled in the filter packets has to be dropped. + * + * @param vdev - the data virtual device object + * @param val - value to set + */ + void (*txrx_set_mesh_rx_filter)(struct cdp_vdev *vdev, uint32_t val); + + void (*tx_flush_buffers)(struct cdp_vdev *vdev); + + int (*txrx_is_target_ar900b)(struct cdp_vdev *vdev); + + void (*txrx_set_vdev_param)(struct cdp_vdev *vdev, + enum cdp_vdev_param_type param, uint32_t val); + + void (*txrx_peer_set_nawds)(struct cdp_peer *peer, uint8_t value); + /** + * @brief Set the reo dest ring num of the radio + * @details + * Set the reo destination ring no on which we will receive + * pkts for this radio. + * + * @param pdev - the data physical device object + * @param reo_dest_ring_num - value ranges between 1 - 4 + */ + void (*txrx_set_pdev_reo_dest)( + struct cdp_pdev *pdev, + enum cdp_host_reo_dest_ring reo_dest_ring_num); + + /** + * @brief Get the reo dest ring num of the radio + * @details + * Get the reo destination ring no on which we will receive + * pkts for this radio. + * + * @param pdev - the data physical device object + * @return the reo destination ring number + */ + enum cdp_host_reo_dest_ring (*txrx_get_pdev_reo_dest)( + struct cdp_pdev *pdev); + + int (*txrx_wdi_event_sub)(struct cdp_pdev *pdev, void *event_cb_sub, + uint32_t event); + + int (*txrx_wdi_event_unsub)(struct cdp_pdev *pdev, void *event_cb_sub, + uint32_t event); + int (*txrx_get_sec_type)(struct cdp_peer *peer, uint8_t sec_idx); + + void (*txrx_update_mgmt_txpow_vdev)(struct cdp_vdev *vdev, + uint8_t subtype, uint8_t tx_power); + + void (*txrx_set_pdev_param)(struct cdp_pdev *pdev, + enum cdp_pdev_param_type type, uint8_t val); + void * (*txrx_get_pldev)(struct cdp_pdev *pdev); + +#ifdef ATH_SUPPORT_NAC_RSSI + QDF_STATUS (*txrx_vdev_config_for_nac_rssi)(struct cdp_vdev *vdev, + enum cdp_nac_param_cmd cmd, char *bssid, char *client_macaddr, + uint8_t chan_num); +#endif +}; + +struct cdp_me_ops { + + u_int16_t (*tx_desc_alloc_and_mark_for_mcast_clone) + (struct cdp_pdev *pdev, u_int16_t buf_count); + + u_int16_t (*tx_desc_free_and_unmark_for_mcast_clone)( + struct cdp_pdev *pdev, + u_int16_t buf_count); + + u_int16_t + (*tx_get_mcast_buf_allocated_marked) + (struct cdp_pdev *pdev); + void + (*tx_me_alloc_descriptor)(struct cdp_pdev *pdev); + + void + (*tx_me_free_descriptor)(struct cdp_pdev *pdev); + + uint16_t + (*tx_me_convert_ucast)(struct cdp_vdev *vdev, + qdf_nbuf_t wbuf, u_int8_t newmac[][6], + uint8_t newmaccnt); + /* Should be a function pointer in ol_txrx_osif_ops{} */ + /** + * @brief notify mcast frame indication from FW. + * @details + * This notification will be used to convert + * multicast frame to unicast. + * + * @param pdev - handle to the ctrl SW's physical device object + * @param vdev_id - ID of the virtual device received the special data + * @param msdu - the multicast msdu returned by FW for host inspect + */ + + int (*mcast_notify)(struct cdp_pdev *pdev, + u_int8_t vdev_id, qdf_nbuf_t msdu); +}; + +struct cdp_mon_ops { + + void (*txrx_monitor_set_filter_ucast_data) + (struct cdp_pdev *, u_int8_t val); + void (*txrx_monitor_set_filter_mcast_data) + (struct cdp_pdev *, u_int8_t val); + void (*txrx_monitor_set_filter_non_data) + (struct cdp_pdev *, u_int8_t val); + + bool (*txrx_monitor_get_filter_ucast_data) + (struct cdp_vdev *vdev_txrx_handle); + bool (*txrx_monitor_get_filter_mcast_data) + (struct cdp_vdev *vdev_txrx_handle); + bool (*txrx_monitor_get_filter_non_data) + (struct cdp_vdev *vdev_txrx_handle); + int (*txrx_reset_monitor_mode)(struct cdp_pdev *pdev); + + /* HK advance monitor filter support */ + int (*txrx_set_advance_monitor_filter) + (struct cdp_pdev *pdev, struct cdp_monitor_filter *filter_val); +}; + +struct cdp_host_stats_ops { + int (*txrx_host_stats_get)(struct cdp_vdev *vdev, + struct ol_txrx_stats_req *req); + + void (*txrx_host_stats_clr)(struct cdp_vdev *vdev); + + void (*txrx_host_ce_stats)(struct cdp_vdev *vdev); + + int (*txrx_stats_publish)(struct cdp_pdev *pdev, + void *buf); + /** + * @brief Enable enhanced stats functionality. + * + * @param pdev - the physical device object + * @return - void + */ + void (*txrx_enable_enhanced_stats)(struct cdp_pdev *pdev); + + /** + * @brief Disable enhanced stats functionality. + * + * @param pdev - the physical device object + * @return - void + */ + void (*txrx_disable_enhanced_stats)(struct cdp_pdev *pdev); + + /** + * @brief Get the desired stats from the message. + * + * @param pdev - the physical device object + * @param stats_base - stats buffer received from FW + * @param type - stats type. + * @return - pointer to requested stat identified by type + */ + uint32_t * (*txrx_get_stats_base)(struct cdp_pdev *pdev, + uint32_t *stats_base, uint32_t msg_len, uint8_t type); + void + (*tx_print_tso_stats)(struct cdp_vdev *vdev); + + void + (*tx_rst_tso_stats)(struct cdp_vdev *vdev); + + void + (*tx_print_sg_stats)(struct cdp_vdev *vdev); + + void + (*tx_rst_sg_stats)(struct cdp_vdev *vdev); + + void + (*print_rx_cksum_stats)(struct cdp_vdev *vdev); + + void + (*rst_rx_cksum_stats)(struct cdp_vdev *vdev); + + A_STATUS + (*txrx_host_me_stats)(struct cdp_vdev *vdev); + void + (*txrx_per_peer_stats)(struct cdp_pdev *pdev, char *addr); + int (*txrx_host_msdu_ttl_stats)(struct cdp_vdev *vdev, + struct ol_txrx_stats_req *req); + + void + (*print_lro_stats)(struct cdp_vdev *vdev); + + void + (*reset_lro_stats)(struct cdp_vdev *vdev); + + void + (*get_fw_peer_stats)(struct cdp_pdev *pdev, uint8_t *addr, + uint32_t cap); + void + (*get_htt_stats)(struct cdp_pdev *pdev, void *data, + uint32_t data_len); +}; + +struct cdp_wds_ops { + void + (*txrx_set_wds_rx_policy)(struct cdp_vdev *vdev, + u_int32_t val); + void + (*txrx_wds_peer_tx_policy_update)(struct cdp_peer *peer, + int wds_tx_ucast, int wds_tx_mcast); + int (*vdev_set_wds)(void *vdev, uint32_t val); +}; + +struct cdp_raw_ops { + int (*txrx_get_nwifi_mode)(struct cdp_vdev *vdev); + + void (*rsim_get_astentry)(struct cdp_vdev *vdev, + qdf_nbuf_t *pnbuf, + struct cdp_raw_ast *raw_ast); +}; + +#ifdef CONFIG_WIN +struct cdp_pflow_ops { + uint32_t(*pflow_update_pdev_params)(void *, + enum _ol_ath_param_t, uint32_t, void *); +}; +#endif /* CONFIG_WIN */ + +#define LRO_IPV4_SEED_ARR_SZ 5 +#define LRO_IPV6_SEED_ARR_SZ 11 + +/** + * struct cdp_lro_hash_config - set rx_offld(LRO/GRO) init parameters + * @lro_enable: indicates whether rx_offld is enabled + * @tcp_flag: If the TCP flags from the packet do not match + * the values in this field after masking with TCP flags mask + * below, packet is not rx_offld eligible + * @tcp_flag_mask: field for comparing the TCP values provided + * above with the TCP flags field in the received packet + * @toeplitz_hash_ipv4: contains seed needed to compute the flow id + * 5-tuple toeplitz hash for ipv4 packets + * @toeplitz_hash_ipv6: contains seed needed to compute the flow id + * 5-tuple toeplitz hash for ipv6 packets + */ +struct cdp_lro_hash_config { + uint32_t lro_enable; + uint32_t tcp_flag:9, + tcp_flag_mask:9; + uint32_t toeplitz_hash_ipv4[LRO_IPV4_SEED_ARR_SZ]; + uint32_t toeplitz_hash_ipv6[LRO_IPV6_SEED_ARR_SZ]; +}; + +struct ol_if_ops { + void (*peer_set_default_routing)(void *scn_handle, + uint8_t *peer_macaddr, uint8_t vdev_id, + bool hash_based, uint8_t ring_num); + int (*peer_rx_reorder_queue_setup)(void *scn_handle, + uint8_t vdev_id, uint8_t *peer_mac, + qdf_dma_addr_t hw_qdesc, int tid, uint16_t queue_num); + int (*peer_rx_reorder_queue_remove)(void *scn_handle, + uint8_t vdev_id, uint8_t *peer_macaddr, + uint32_t tid_mask); + int (*peer_unref_delete)(void *scn_handle, uint8_t vdev_id, + uint8_t *peer_macaddr); + bool (*is_hw_dbs_2x2_capable)(struct wlan_objmgr_psoc *psoc); + int (*peer_add_wds_entry)(void *ol_soc_handle, + const uint8_t *dest_macaddr, uint8_t *peer_macaddr, + uint32_t flags); + int (*peer_update_wds_entry)(void *ol_soc_handle, + uint8_t *dest_macaddr, uint8_t *peer_macaddr, + uint32_t flags); + void (*peer_del_wds_entry)(void *ol_soc_handle, + uint8_t *wds_macaddr); + QDF_STATUS (*lro_hash_config)(void *scn_handle, + struct cdp_lro_hash_config *rx_offld_hash); + void (*update_dp_stats)(void *soc, void *stats, uint16_t id, + uint8_t type); + uint8_t (*rx_invalid_peer)(void *osif_pdev, void *msg); + + int (*peer_map_event)(void *ol_soc_handle, uint16_t peer_id, uint16_t hw_peer_id, + uint8_t vdev_id, uint8_t *peer_mac_addr, + enum cdp_txrx_ast_entry_type peer_type); + int (*peer_unmap_event)(void *ol_soc_handle, uint16_t peer_id); + + int (*get_dp_cfg_param)(void *ol_soc_handle, enum cdp_cfg_param_type param_num); + + void (*rx_mic_error)(void *ol_soc_handle, + uint16_t vdev_id, void *wh); + uint8_t (*freq_to_channel)(void *ol_soc_handle, uint16_t vdev_id); + + void (*record_act_change)(struct wlan_objmgr_pdev *pdev, + u_int8_t *dstmac, bool active); +#ifdef ATH_SUPPORT_NAC_RSSI + int (*config_fw_for_nac_rssi)(struct wlan_objmgr_pdev *pdev, + u_int8_t vdev_id, enum cdp_nac_param_cmd cmd, char *bssid, + char *client_macaddr, uint8_t chan_num); + int (*config_bssid_in_fw_for_nac_rssi)(struct wlan_objmgr_pdev *pdev, + u_int8_t vdev_id, enum cdp_nac_param_cmd cmd, char *bssid); +#endif + int (*peer_sta_kickout)(void *osif_pdev, uint8_t *peer_macaddr); + + /* TODO: Add any other control path calls required to OL_IF/WMA layer */ +}; + +#ifndef CONFIG_WIN +/* From here MCL specific OPs */ +/** + * struct cdp_misc_ops - mcl ops not classified + * @set_ibss_vdev_heart_beat_timer: + * @bad_peer_txctl_set_setting: + * @bad_peer_txctl_update_threshold: + * @hl_tdls_flag_reset: + * @tx_non_std: + * @get_vdev_id: + * @set_wisa_mode: + * @txrx_data_stall_cb_register: + * @txrx_data_stall_cb_deregister: + * @txrx_post_data_stall_event + * @runtime_suspend: + * @runtime_resume: + */ +struct cdp_misc_ops { + uint16_t (*set_ibss_vdev_heart_beat_timer)(struct cdp_vdev *vdev, + uint16_t timer_value_sec); + void (*set_wmm_param)(struct cdp_pdev *cfg_pdev, + struct ol_tx_wmm_param_t wmm_param); + void (*bad_peer_txctl_set_setting)(struct cdp_pdev *pdev, int enable, + int period, int txq_limit); + void (*bad_peer_txctl_update_threshold)(struct cdp_pdev *pdev, + int level, int tput_thresh, int tx_limit); + void (*hl_tdls_flag_reset)(struct cdp_vdev *vdev, bool flag); + qdf_nbuf_t (*tx_non_std)(struct cdp_vdev *vdev, + enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list); + uint16_t (*get_vdev_id)(struct cdp_vdev *vdev); + uint32_t (*get_tx_ack_stats)(uint8_t vdev_id); + QDF_STATUS (*set_wisa_mode)(struct cdp_vdev *vdev, bool enable); + QDF_STATUS (*txrx_data_stall_cb_register)(data_stall_detect_cb cb); + QDF_STATUS (*txrx_data_stall_cb_deregister)(data_stall_detect_cb cb); + void (*txrx_post_data_stall_event)( + enum data_stall_log_event_indicator indicator, + enum data_stall_log_event_type data_stall_type, + uint32_t pdev_id, uint32_t vdev_id_bitmap, + enum data_stall_log_recovery_type recovery_type); + QDF_STATUS (*runtime_suspend)(struct cdp_pdev *pdev); + QDF_STATUS (*runtime_resume)(struct cdp_pdev *pdev); + int (*get_opmode)(struct cdp_vdev *vdev); + void (*mark_first_wakeup_packet)(uint8_t value); + void (*update_mac_id)(uint8_t vdev_id, uint8_t mac_id); + void (*flush_rx_frames)(void *peer, bool drop); + A_STATUS (*get_intra_bss_fwd_pkts_count)(uint8_t vdev_id, + uint64_t *fwd_tx_packets, uint64_t *fwd_rx_packets); + void (*pkt_log_init)(struct cdp_pdev *handle, void *scn); + void (*pkt_log_con_service)(struct cdp_pdev *pdev, void *scn); +}; + +/** + * struct cdp_tx_delay_ops - mcl tx delay ops + * @tx_delay: + * @tx_delay_hist: + * @tx_packet_count: + * @tx_set_compute_interval: + */ +struct cdp_tx_delay_ops { + void (*tx_delay)(struct cdp_pdev *pdev, uint32_t *queue_delay_microsec, + uint32_t *tx_delay_microsec, int category); + void (*tx_delay_hist)(struct cdp_pdev *pdev, + uint16_t *bin_values, int category); + void (*tx_packet_count)(struct cdp_pdev *pdev, + uint16_t *out_packet_count, + uint16_t *out_packet_loss_count, int category); + void (*tx_set_compute_interval)(struct cdp_pdev *pdev, + uint32_t interval); +}; + +/** + * struct cdp_pmf_ops - mcl protected management frame ops + * @get_pn_info: + */ +struct cdp_pmf_ops { + void (*get_pn_info)(void *peer, uint8_t **last_pn_valid, + uint64_t **last_pn, uint32_t **rmf_pn_replays); +}; + +/** + * struct cdp_cfg_ops - mcl configuration ops + * @set_cfg_rx_fwd_disabled: set rx_fwd_disabled flag + * @set_cfg_packet_log_enabled: set is_packet_log_enabled flag + * @cfg_attach: hardcode the configuration parameters + * @vdev_rx_set_intrabss_fwd: set disable_intrabss_fwd flag + * @is_rx_fwd_disabled: get the rx_fwd_disabled flag, + * 1 enabled, 0 disabled. + * @tx_set_is_mgmt_over_wmi_enabled: set is_mgmt_over_wmi_enabled flag to + * indicate that mgmt over wmi is enabled + * or not, + * 1 for enabled, 0 for disable + * @is_high_latency: get device is high or low latency device, + * 1 high latency bus, 0 low latency bus + * @set_flow_control_parameters: set flow control parameters + * @set_flow_steering: set flow_steering_enabled flag + * @set_ptp_rx_opt_enabled: set is_ptp_rx_opt_enabled flag + * @set_new_htt_msg_format: set new_htt_msg_format flag + * @set_peer_unmap_conf_support: set enable_peer_unmap_conf_support flag + * @get_peer_unmap_conf_support: get enable_peer_unmap_conf_support flag + * @set_tx_compl_tsf64: set enable_tx_compl_tsf64 flag, + * 1 enabled, 0 disabled. + * @get_tx_compl_tsf64: get enable_tx_compl_tsf64 flag, + * 1 enabled, 0 disabled. + */ +struct cdp_cfg_ops { + void (*set_cfg_rx_fwd_disabled)(struct cdp_cfg *cfg_pdev, + uint8_t disable_rx_fwd); + void (*set_cfg_packet_log_enabled)(struct cdp_cfg *cfg_pdev, + uint8_t val); + struct cdp_cfg * (*cfg_attach)(qdf_device_t osdev, void *cfg_param); + void (*vdev_rx_set_intrabss_fwd)(struct cdp_vdev *vdev, bool val); + uint8_t (*is_rx_fwd_disabled)(struct cdp_vdev *vdev); + void (*tx_set_is_mgmt_over_wmi_enabled)(uint8_t value); + int (*is_high_latency)(struct cdp_cfg *cfg_pdev); + void (*set_flow_control_parameters)(struct cdp_cfg *cfg_pdev, + void *param); + void (*set_flow_steering)(struct cdp_cfg *cfg_pdev, uint8_t val); + void (*set_ptp_rx_opt_enabled)(struct cdp_cfg *cfg_pdev, uint8_t val); + void (*set_new_htt_msg_format)(uint8_t val); + void (*set_peer_unmap_conf_support)(bool val); + bool (*get_peer_unmap_conf_support)(void); + void (*set_tx_compl_tsf64)(bool val); + bool (*get_tx_compl_tsf64)(void); +}; + +/** + * struct cdp_flowctl_ops - mcl flow control + * @register_pause_cb: + * @set_desc_global_pool_size: + * @dump_flow_pool_info: + */ +struct cdp_flowctl_ops { + QDF_STATUS (*flow_pool_map_handler)(struct cdp_soc_t *soc, + struct cdp_pdev *pdev, + uint8_t vdev_id); + void (*flow_pool_unmap_handler)(struct cdp_soc_t *soc, + struct cdp_pdev *pdev, + uint8_t vdev_id); + QDF_STATUS (*register_pause_cb)(struct cdp_soc_t *soc, + tx_pause_callback); + void (*set_desc_global_pool_size)(uint32_t num_msdu_desc); + + void (*dump_flow_pool_info)(void *); +}; + +/** + * struct cdp_lflowctl_ops - mcl legacy flow control ops + * @register_tx_flow_control: + * @deregister_tx_flow_control_cb: + * @flow_control_cb: + * @get_tx_resource: + * @ll_set_tx_pause_q_depth: + * @vdev_flush: + * @vdev_pause: + * @vdev_unpause: + */ +struct cdp_lflowctl_ops { + int (*register_tx_flow_control)(uint8_t vdev_id, + ol_txrx_tx_flow_control_fp flowControl, void *osif_fc_ctx, + ol_txrx_tx_flow_control_is_pause_fp flow_control_is_pause); + int (*deregister_tx_flow_control_cb)(uint8_t vdev_id); + void (*flow_control_cb)(struct cdp_vdev *vdev, bool tx_resume); + bool (*get_tx_resource)(uint8_t sta_id, + unsigned int low_watermark, + unsigned int high_watermark_offset); + int (*ll_set_tx_pause_q_depth)(uint8_t vdev_id, int pause_q_depth); + void (*vdev_flush)(struct cdp_vdev *vdev); + void (*vdev_pause)(struct cdp_vdev *vdev, uint32_t reason); + void (*vdev_unpause)(struct cdp_vdev *vdev, uint32_t reason); +}; + +#ifdef IPA_OFFLOAD +/** + * struct cdp_ipa_ops - mcl ipa data path ops + * @ipa_get_resource: + * @ipa_set_doorbell_paddr: + * @ipa_set_active: + * @ipa_op_response: + * @ipa_register_op_cb: + * @ipa_get_stat: + * @ipa_tx_data_frame: + */ +struct cdp_ipa_ops { + QDF_STATUS (*ipa_get_resource)(struct cdp_pdev *pdev); + QDF_STATUS (*ipa_set_doorbell_paddr)(struct cdp_pdev *pdev); + QDF_STATUS (*ipa_set_active)(struct cdp_pdev *pdev, bool uc_active, + bool is_tx); + QDF_STATUS (*ipa_op_response)(struct cdp_pdev *pdev, uint8_t *op_msg); + QDF_STATUS (*ipa_register_op_cb)(struct cdp_pdev *pdev, + void (*ipa_uc_op_cb_type)(uint8_t *op_msg, void *osif_ctxt), + void *usr_ctxt); + QDF_STATUS (*ipa_get_stat)(struct cdp_pdev *pdev); + qdf_nbuf_t (*ipa_tx_data_frame)(struct cdp_vdev *vdev, qdf_nbuf_t skb); + void (*ipa_set_uc_tx_partition_base)(struct cdp_cfg *pdev, + uint32_t value); +#ifdef FEATURE_METERING + QDF_STATUS (*ipa_uc_get_share_stats)(struct cdp_pdev *pdev, + uint8_t reset_stats); + QDF_STATUS (*ipa_uc_set_quota)(struct cdp_pdev *pdev, + uint64_t quota_bytes); +#endif + QDF_STATUS (*ipa_enable_autonomy)(struct cdp_pdev *pdev); + QDF_STATUS (*ipa_disable_autonomy)(struct cdp_pdev *pdev); +#ifdef CONFIG_IPA_WDI_UNIFIED_API + QDF_STATUS (*ipa_setup)(struct cdp_pdev *pdev, void *ipa_i2w_cb, + void *ipa_w2i_cb, void *ipa_wdi_meter_notifier_cb, + uint32_t ipa_desc_size, void *ipa_priv, bool is_rm_enabled, + uint32_t *tx_pipe_handle, uint32_t *rx_pipe_handle, + bool is_smmu_enabled, qdf_ipa_sys_connect_params_t *sys_in); +#else /* CONFIG_IPA_WDI_UNIFIED_API */ + QDF_STATUS (*ipa_setup)(struct cdp_pdev *pdev, void *ipa_i2w_cb, + void *ipa_w2i_cb, void *ipa_wdi_meter_notifier_cb, + uint32_t ipa_desc_size, void *ipa_priv, bool is_rm_enabled, + uint32_t *tx_pipe_handle, uint32_t *rx_pipe_handle); +#endif /* CONFIG_IPA_WDI_UNIFIED_API */ + QDF_STATUS (*ipa_cleanup)(uint32_t tx_pipe_handle, + uint32_t rx_pipe_handle); + QDF_STATUS (*ipa_setup_iface)(char *ifname, uint8_t *mac_addr, + qdf_ipa_client_type_t prod_client, + qdf_ipa_client_type_t cons_client, + uint8_t session_id, bool is_ipv6_enabled); + QDF_STATUS (*ipa_cleanup_iface)(char *ifname, bool is_ipv6_enabled); + QDF_STATUS (*ipa_enable_pipes)(struct cdp_pdev *pdev); + QDF_STATUS (*ipa_disable_pipes)(struct cdp_pdev *pdev); + QDF_STATUS (*ipa_set_perf_level)(int client, + uint32_t max_supported_bw_mbps); +}; +#endif + +/** + * struct cdp_bus_ops - mcl bus suspend/resume ops + * @bus_suspend: + * @bus_resume: + */ +struct cdp_bus_ops { + QDF_STATUS (*bus_suspend)(struct cdp_pdev *opaque_pdev); + QDF_STATUS (*bus_resume)(struct cdp_pdev *opaque_pdev); +}; + +/** + * struct cdp_ocb_ops - mcl ocb ops + * @set_ocb_chan_info: + * @get_ocb_chan_info: + */ +struct cdp_ocb_ops { + void (*set_ocb_chan_info)(struct cdp_vdev *vdev, + struct ol_txrx_ocb_set_chan ocb_set_chan); + struct ol_txrx_ocb_chan_info * + (*get_ocb_chan_info)(struct cdp_vdev *vdev); +}; + +/** + * struct cdp_peer_ops - mcl peer related ops + * @register_peer: + * @clear_peer: + * @cfg_attach: + * @find_peer_by_addr: + * @find_peer_by_addr_and_vdev: + * @local_peer_id: + * @peer_find_by_local_id: + * @peer_state_update: + * @get_vdevid: + * @get_vdev_by_sta_id: + * @register_ocb_peer: + * @peer_get_peer_mac_addr: + * @get_peer_state: + * @get_vdev_for_peer: + * @update_ibss_add_peer_num_of_vdev: + * @remove_peers_for_vdev: + * @remove_peers_for_vdev_no_lock: + * @copy_mac_addr_raw: + * @add_last_real_peer: + * @is_vdev_restore_last_peer: + * @update_last_real_peer: + */ +struct cdp_peer_ops { + QDF_STATUS (*register_peer)(struct cdp_pdev *pdev, + struct ol_txrx_desc_type *sta_desc); + QDF_STATUS (*clear_peer)(struct cdp_pdev *pdev, uint8_t sta_id); + QDF_STATUS (*change_peer_state)(uint8_t sta_id, + enum ol_txrx_peer_state sta_state, + bool roam_synch_in_progress); + void * (*peer_get_ref_by_addr)(struct cdp_pdev *pdev, + u8 *peer_addr, uint8_t *peer_id, + enum peer_debug_id_type debug_id); + void (*peer_release_ref)(void *peer, enum peer_debug_id_type debug_id); + void * (*find_peer_by_addr)(struct cdp_pdev *pdev, + uint8_t *peer_addr, uint8_t *peer_id); + void * (*find_peer_by_addr_and_vdev)(struct cdp_pdev *pdev, + struct cdp_vdev *vdev, + uint8_t *peer_addr, uint8_t *peer_id); + uint16_t (*local_peer_id)(void *peer); + void * (*peer_find_by_local_id)(struct cdp_pdev *pdev, + uint8_t local_peer_id); + QDF_STATUS (*peer_state_update)(struct cdp_pdev *pdev, + uint8_t *peer_addr, + enum ol_txrx_peer_state state); + QDF_STATUS (*get_vdevid)(void *peer, uint8_t *vdev_id); + struct cdp_vdev * (*get_vdev_by_sta_id)(struct cdp_pdev *pdev, + uint8_t sta_id); + QDF_STATUS (*register_ocb_peer)(uint8_t *mac_addr, uint8_t *peer_id); + uint8_t * (*peer_get_peer_mac_addr)(void *peer); + int (*get_peer_state)(void *peer); + struct cdp_vdev * (*get_vdev_for_peer)(void *peer); + int16_t (*update_ibss_add_peer_num_of_vdev)(struct cdp_vdev *vdev, + int16_t peer_num_delta); + void (*remove_peers_for_vdev)(struct cdp_vdev *vdev, + ol_txrx_vdev_peer_remove_cb callback, + void *callback_context, bool remove_last_peer); + void (*remove_peers_for_vdev_no_lock)(struct cdp_vdev *vdev, + ol_txrx_vdev_peer_remove_cb callback, + void *callback_context); + void (*copy_mac_addr_raw)(struct cdp_vdev *vdev, uint8_t *bss_addr); + void (*add_last_real_peer)(struct cdp_pdev *pdev, + struct cdp_vdev *vdev, uint8_t *peer_id); + bool (*is_vdev_restore_last_peer)(void *peer); + void (*update_last_real_peer)(struct cdp_pdev *pdev, void *peer, + uint8_t *peer_id, bool restore_last_peer); + void (*peer_detach_force_delete)(void *peer); +}; + +/** + * struct cdp_ocb_ops - mcl ocb ops + * @throttle_init_period: + * @throttle_set_level: + */ +struct cdp_throttle_ops { + void (*throttle_init_period)(struct cdp_pdev *pdev, int period, + uint8_t *dutycycle_level); + void (*throttle_set_level)(struct cdp_pdev *pdev, int level); +}; + +/** + * struct cdp_ocb_ops - mcl ocb ops + * @clear_stats: + * @stats: + */ +struct cdp_mob_stats_ops { + void (*clear_stats)(uint16_t bitmap); + int (*stats)(uint8_t vdev_id, char *buffer, unsigned buf_len); +}; +#endif /* CONFIG_WIN */ + +#ifdef RECEIVE_OFFLOAD +/** + * struct cdp_rx_offld_ops - mcl receive offload ops + * @register_rx_offld_flush_cb: + * @deregister_rx_offld_flush_cb: + */ +struct cdp_rx_offld_ops { + void (*register_rx_offld_flush_cb)(void (rx_offld_flush_cb)(void *)); + void (*deregister_rx_offld_flush_cb)(void); +}; +#endif + +struct cdp_ops { + struct cdp_cmn_ops *cmn_drv_ops; + struct cdp_ctrl_ops *ctrl_ops; + struct cdp_me_ops *me_ops; + struct cdp_mon_ops *mon_ops; + struct cdp_host_stats_ops *host_stats_ops; + struct cdp_wds_ops *wds_ops; + struct cdp_raw_ops *raw_ops; + struct cdp_pflow_ops *pflow_ops; +#ifndef CONFIG_WIN + struct cdp_misc_ops *misc_ops; + struct cdp_cfg_ops *cfg_ops; + struct cdp_flowctl_ops *flowctl_ops; + struct cdp_lflowctl_ops *l_flowctl_ops; +#ifdef IPA_OFFLOAD + struct cdp_ipa_ops *ipa_ops; +#endif +#ifdef RECEIVE_OFFLOAD + struct cdp_rx_offld_ops *rx_offld_ops; +#endif + struct cdp_bus_ops *bus_ops; + struct cdp_ocb_ops *ocb_ops; + struct cdp_peer_ops *peer_ops; + struct cdp_throttle_ops *throttle_ops; + struct cdp_mob_stats_ops *mob_stats_ops; + struct cdp_tx_delay_ops *delay_ops; + struct cdp_pmf_ops *pmf_ops; +#endif /* CONFIG_WIN */ +}; +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_peer_ops.h b/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_peer_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..fb46834b32fea470a306fcd0db28124f7cb60693 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_peer_ops.h @@ -0,0 +1,649 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * @file cdp_txrx_peer.h + * @brief Define the host data path peer API functions + * called by the host control SW and the OS interface module + */ +#ifndef _CDP_TXRX_PEER_H_ +#define _CDP_TXRX_PEER_H_ +#include +#include "cdp_txrx_handle.h" + +/** + * cdp_peer_register() - Register peer into physical device + * @soc - data path soc handle + * @pdev - data path device instance + * @sta_desc - peer description + * + * Register peer into physical device + * + * Return: QDF_STATUS_SUCCESS registration success + * QDF_STATUS_E_NOSUPPORT not support this feature + */ +static inline QDF_STATUS +cdp_peer_register(ol_txrx_soc_handle soc, struct cdp_pdev *pdev, + struct ol_txrx_desc_type *sta_desc) +{ + if (!soc || !soc->ops || !soc->ops->peer_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return QDF_STATUS_E_INVAL; + } + + if (soc->ops->peer_ops->register_peer) + return soc->ops->peer_ops->register_peer(pdev, sta_desc); + + return QDF_STATUS_E_NOSUPPORT; +} + +/** + * cdp_clear_peer() - remove peer from physical device + * @soc - data path soc handle + * @pdev - data path device instance + * @sta_id - local peer id + * + * remove peer from physical device + * + * Return: QDF_STATUS_SUCCESS registration success + * QDF_STATUS_E_NOSUPPORT not support this feature + */ +static inline QDF_STATUS +cdp_clear_peer(ol_txrx_soc_handle soc, struct cdp_pdev *pdev, uint8_t sta_id) +{ + if (!soc || !soc->ops || !soc->ops->peer_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return QDF_STATUS_E_INVAL; + } + + if (soc->ops->peer_ops->clear_peer) + return soc->ops->peer_ops->clear_peer(pdev, sta_id); + + return QDF_STATUS_E_NOSUPPORT; +} + +/** + * cdp_peer_register_ocb_peer() - register ocb peer from physical device + * @soc - data path soc handle + * @cds_ctx - cds void context + * @mac_addr - mac address for ocb self peer + * @peer_id - local peer id + * + * register ocb peer from physical device + * + * Return: QDF_STATUS_SUCCESS registration success + * QDF_STATUS_E_NOSUPPORT not support this feature + */ +static inline QDF_STATUS +cdp_peer_register_ocb_peer(ol_txrx_soc_handle soc, + uint8_t *mac_addr, uint8_t *peer_id) +{ + if (!soc || !soc->ops || !soc->ops->peer_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return QDF_STATUS_E_INVAL; + } + + if (soc->ops->peer_ops->register_ocb_peer) + return soc->ops->peer_ops->register_ocb_peer(mac_addr, peer_id); + + return QDF_STATUS_E_NOSUPPORT; +} + +/** + * cdp_peer_remove_for_vdev() - remove peer instance from virtual interface + * @soc - data path soc handle + * @vdev - virtual interface instance + * @callback - remove done notification callback function pointer + * @callback_context - callback caller context + * @remove_last_peer - removed peer is last peer or not + * + * remove peer instance from virtual interface + * + * Return: NONE + */ +static inline void +cdp_peer_remove_for_vdev(ol_txrx_soc_handle soc, + struct cdp_vdev *vdev, ol_txrx_vdev_peer_remove_cb callback, + void *callback_context, bool remove_last_peer) +{ + if (!soc || !soc->ops || !soc->ops->peer_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return; + } + + if (soc->ops->peer_ops->remove_peers_for_vdev) + return soc->ops->peer_ops->remove_peers_for_vdev( + vdev, callback, callback_context, remove_last_peer); + + return; +} + +/** + * cdp_peer_remove_for_vdev_no_lock() - remove peer instance from vdev + * @soc - data path soc handle + * @vdev - virtual interface instance + * @callback - remove done notification callback function pointer + * @callback_context - callback caller context + * + * remove peer instance from virtual interface without lock + * + * Return: NONE + */ +static inline void +cdp_peer_remove_for_vdev_no_lock(ol_txrx_soc_handle soc, + struct cdp_vdev *vdev, + ol_txrx_vdev_peer_remove_cb callback, + void *callback_context) +{ + if (!soc || !soc->ops || !soc->ops->peer_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return; + } + + if (soc->ops->peer_ops->remove_peers_for_vdev_no_lock) + return soc->ops->peer_ops->remove_peers_for_vdev_no_lock( + vdev, callback, callback_context); +} + +/** + * cdp_peer_get_ref_by_addr() - Find peer by peer mac address and inc peer ref + * @soc - data path soc handle + * @pdev - data path device instance + * @peer_addr - peer mac address + * @peer_id - local peer id with target mac address + * @debug_id - debug_id to track caller + * + * To release the peer ref, cdp_peer_release_ref needs to be called. + * + * Return: peer instance void pointer + * NULL cannot find target peer + */ +static inline void +*cdp_peer_get_ref_by_addr(ol_txrx_soc_handle soc, struct cdp_pdev *pdev, + u8 *peer_addr, u8 *peer_id, + enum peer_debug_id_type debug_id) +{ + if (!soc || !soc->ops || !soc->ops->peer_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return NULL; + } + + if (soc->ops->peer_ops->peer_get_ref_by_addr) + return soc->ops->peer_ops->peer_get_ref_by_addr( + pdev, peer_addr, peer_id, debug_id); + + return NULL; +} + +/** + * cdp_peer_release_ref() - Release peer reference + * @soc - data path soc handle + * @peer - peer pointer + * @debug_id - debug_id to track caller + * + * Return:void + */ +static inline void +cdp_peer_release_ref(ol_txrx_soc_handle soc, void *peer, + enum peer_debug_id_type debug_id) +{ + if (!soc || !soc->ops || !soc->ops->peer_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return; + } + + if (soc->ops->peer_ops->peer_release_ref) + soc->ops->peer_ops->peer_release_ref(peer, debug_id); +} + +/** + * cdp_peer_find_by_addr() - Find peer by peer mac address + * @soc - data path soc handle + * @pdev - data path device instance + * @peer_addr - peer mac address + * @peer_id - local peer id with target mac address + * + * Find peer and local peer id by peer mac address + * + * Return: peer instance void pointer + * NULL cannot find target peer + */ +static inline void +*cdp_peer_find_by_addr(ol_txrx_soc_handle soc, struct cdp_pdev *pdev, + uint8_t *peer_addr, uint8_t *peer_id) +{ + if (!soc || !soc->ops || !soc->ops->peer_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return NULL; + } + + if (soc->ops->peer_ops->find_peer_by_addr) + return soc->ops->peer_ops->find_peer_by_addr( + pdev, peer_addr, peer_id); + + return NULL; +} + +/** + * cdp_peer_find_by_addr_and_vdev() - Find peer by peer mac address within vdev + * @soc - data path soc handle + * @pdev - data path device instance + * @vdev - virtual interface instance + * @peer_addr - peer mac address + * @peer_id - local peer id with target mac address + * + * Find peer by peer mac address within vdev + * + * Return: peer instance void pointer + * NULL cannot find target peer + */ +static inline void +*cdp_peer_find_by_addr_and_vdev(ol_txrx_soc_handle soc, struct cdp_pdev *pdev, + struct cdp_vdev *vdev, uint8_t *peer_addr, uint8_t *peer_id) +{ + if (!soc || !soc->ops || !soc->ops->peer_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return NULL; + } + + if (soc->ops->peer_ops->find_peer_by_addr_and_vdev) + return soc->ops->peer_ops->find_peer_by_addr_and_vdev( + pdev, vdev, peer_addr, peer_id); + + return NULL; +} + +/** + * cdp_peer_find_by_local_id() - Find peer by local peer id + * @soc - data path soc handle + * @pdev - data path device instance + * @local_peer_id - local peer id want to find + * + * Find peer by local peer id within physical device + * + * Return: peer instance void pointer + * NULL cannot find target peer + */ +static inline void +*cdp_peer_find_by_local_id(ol_txrx_soc_handle soc, struct cdp_pdev *pdev, + uint8_t local_peer_id) +{ + if (!soc || !soc->ops || !soc->ops->peer_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return NULL; + } + + if (soc->ops->peer_ops->peer_find_by_local_id) + return soc->ops->peer_ops->peer_find_by_local_id( + pdev, local_peer_id); + + return NULL; +} + +/** + * cdp_peer_state_update() - update peer local state + * @soc - data path soc handle + * @pdev - data path device instance + * @peer_addr - peer mac address + * @state - new peer local state + * + * update peer local state + * + * Return: QDF_STATUS_SUCCESS registration success + * QDF_STATUS_E_NOSUPPORT not support this feature + */ +static inline QDF_STATUS +cdp_peer_state_update(ol_txrx_soc_handle soc, struct cdp_pdev *pdev, + uint8_t *peer_addr, enum ol_txrx_peer_state state) +{ + if (!soc || !soc->ops || !soc->ops->peer_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return QDF_STATUS_E_INVAL; + } + + if (soc->ops->peer_ops->peer_state_update) + return soc->ops->peer_ops->peer_state_update( + pdev, peer_addr, state); + + return QDF_STATUS_E_NOSUPPORT; +} + +/** + * cdp_peer_state_get() - Get local peer state + * @soc - data path soc handle + * @peer - peer instance + * + * Get local peer state + * + * Return: peer status + */ +static inline int +cdp_peer_state_get(ol_txrx_soc_handle soc, void *peer) +{ + if (!soc || !soc->ops || !soc->ops->peer_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return 0; + } + + if (soc->ops->peer_ops->get_peer_state) + return soc->ops->peer_ops->get_peer_state(peer); + + return 0; +} + +/** + * cdp_peer_get_local_peer_id() - Find local peer id within peer instance + * @soc - data path soc handle + * @peer - peer instance + * + * Find local peer id within peer instance + * + * Return: local peer id + * HTT_INVALID_PEER Invalid peer + */ +static inline uint16_t +cdp_peer_get_local_peer_id(ol_txrx_soc_handle soc, void *peer) +{ + if (!soc || !soc->ops || !soc->ops->peer_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return HTT_INVALID_PEER; + } + + if (soc->ops->peer_ops->local_peer_id) + return soc->ops->peer_ops->local_peer_id(peer); + + return HTT_INVALID_PEER; +} + +/** + * cdp_peer_get_vdevid() - Get virtual interface id which peer registered + * @soc - data path soc handle + * @peer - peer instance + * @vdev_id - virtual interface id which peer registered + * + * Get virtual interface id which peer registered + * + * Return: QDF_STATUS_SUCCESS registration success + * QDF_STATUS_E_NOSUPPORT not support this feature + */ +static inline QDF_STATUS +cdp_peer_get_vdevid(ol_txrx_soc_handle soc, void *peer, uint8_t *vdev_id) +{ + if (!soc || !soc->ops || !soc->ops->peer_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return QDF_STATUS_E_INVAL; + } + + if (soc->ops->peer_ops->get_vdevid) + return soc->ops->peer_ops->get_vdevid(peer, vdev_id); + + return QDF_STATUS_E_NOSUPPORT; +} + +/** + * cdp_peer_get_vdev_by_sta_id() - Get vdev instance by local peer id + * @soc - data path soc handle + * @pdev - data path device instance + * @sta_id - local peer id + * + * Get virtual interface id by local peer id + * + * Return: Virtual interface instance + * NULL in case cannot find + */ +static inline struct cdp_vdev +*cdp_peer_get_vdev_by_sta_id(ol_txrx_soc_handle soc, struct cdp_pdev *pdev, + uint8_t sta_id) +{ + if (!soc || !soc->ops || !soc->ops->peer_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return NULL; + } + + if (soc->ops->peer_ops->get_vdev_by_sta_id) + return soc->ops->peer_ops->get_vdev_by_sta_id(pdev, sta_id); + + return NULL; +} + +/** + * cdp_peer_get_peer_mac_addr() - Get peer mac address + * @soc - data path soc handle + * @peer - peer instance + * + * Get peer mac address + * + * Return: peer mac address pointer + * NULL in case cannot find + */ +static inline uint8_t +*cdp_peer_get_peer_mac_addr(ol_txrx_soc_handle soc, void *peer) +{ + if (!soc || !soc->ops || !soc->ops->peer_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return NULL; + } + + if (soc->ops->peer_ops->peer_get_peer_mac_addr) + return soc->ops->peer_ops->peer_get_peer_mac_addr(peer); + + return NULL; +} + +/** + * cdp_peer_get_vdev() - Get virtual interface instance which peer belongs + * @soc - data path soc handle + * @peer - peer instance + * + * Get virtual interface instance which peer belongs + * + * Return: virtual interface instance pointer + * NULL in case cannot find + */ +static inline struct cdp_vdev +*cdp_peer_get_vdev(ol_txrx_soc_handle soc, void *peer) +{ + if (!soc || !soc->ops || !soc->ops->peer_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return NULL; + } + + if (soc->ops->peer_ops->get_vdev_for_peer) + return soc->ops->peer_ops->get_vdev_for_peer(peer); + + return NULL; +} + +/** + * cdp_peer_update_ibss_add_peer_num_of_vdev() - update number of peer + * @soc - data path soc handle + * @vdev - virtual interface instance + * @peer_num_delta - number of peer should be updated + * + * update number of peer + * + * Return: updated number of peer + * 0 fail + */ +static inline int16_t +cdp_peer_update_ibss_add_peer_num_of_vdev(ol_txrx_soc_handle soc, + struct cdp_vdev *vdev, int16_t peer_num_delta) +{ + if (!soc || !soc->ops || !soc->ops->peer_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return 0; + } + + if (soc->ops->peer_ops->update_ibss_add_peer_num_of_vdev) + return soc->ops->peer_ops->update_ibss_add_peer_num_of_vdev( + vdev, peer_num_delta); + + return 0; +} + +/** + * cdp_peer_copy_mac_addr_raw() - copy peer mac address + * @soc - data path soc handle + * @vdev - virtual interface instance + * @bss_addr - mac address should be copied + * + * copy peer mac address + * + * Return: none + */ +static inline void +cdp_peer_copy_mac_addr_raw(ol_txrx_soc_handle soc, + struct cdp_vdev *vdev, uint8_t *bss_addr) +{ + if (!soc || !soc->ops || !soc->ops->peer_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return; + } + + if (soc->ops->peer_ops->copy_mac_addr_raw) + return soc->ops->peer_ops->copy_mac_addr_raw(vdev, bss_addr); + + return; +} + +/** + * cdp_peer_add_last_real_peer() - Add peer with last peer marking + * @soc - data path soc handle + * @pdev - data path device instance + * @vdev - virtual interface instance + * @peer_id - local peer id + * + * copy peer mac address + * + * Return: none + */ +static inline void +cdp_peer_add_last_real_peer(ol_txrx_soc_handle soc, + struct cdp_pdev *pdev, struct cdp_vdev *vdev, uint8_t *peer_id) +{ + if (!soc || !soc->ops || !soc->ops->peer_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return; + } + + if (soc->ops->peer_ops->add_last_real_peer) + return soc->ops->peer_ops->add_last_real_peer( + pdev, vdev, peer_id); + return; +} + +/** + * cdp_peer_is_vdev_restore_last_peer() - restore last peer + * @soc - data path soc handle + * @peer - peer instance pointer + * + * restore last peer + * + * Return: true, restore success + * fasle, restore fail + */ +static inline bool +cdp_peer_is_vdev_restore_last_peer(ol_txrx_soc_handle soc, void *peer) +{ + if (!soc || !soc->ops || !soc->ops->peer_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return false; + } + + if (soc->ops->peer_ops->is_vdev_restore_last_peer) + return soc->ops->peer_ops->is_vdev_restore_last_peer(peer); + + return false; +} + +/** + * cdp_peer_update_last_real_peer() - update last real peer + * @soc - data path soc handle + * @pdev - data path device instance + * @peer - peer instance pointer + * @peer_id - local peer id + * @restore_last_peer - restore last peer or not + * + * update last real peer + * + * Return: none + */ +static inline void +cdp_peer_update_last_real_peer(ol_txrx_soc_handle soc, struct cdp_pdev *pdev, + void *peer, uint8_t *peer_id, bool restore_last_peer) +{ + if (!soc || !soc->ops || !soc->ops->peer_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return; + } + + if (soc->ops->peer_ops->update_last_real_peer) + return soc->ops->peer_ops->update_last_real_peer(pdev, peer, + peer_id, restore_last_peer); + + return; +} + +/** + * ol_txrx_peer_detach_force_delete() - Detach and delete a peer's data object + * @peer - the object to detach + * + * Detach a peer and force the peer object to be removed. It is called during + * roaming scenario when the firmware has already deleted a peer. + * Peer object is freed immediately to avoid duplicate peers during roam sync + * indication processing. + * + * Return: None + */ +static inline void cdp_peer_detach_force_delete(ol_txrx_soc_handle soc, + void *peer) +{ + if (!soc || !soc->ops || !soc->ops->peer_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return; + } + + if (soc->ops->peer_ops->peer_detach_force_delete) + return soc->ops->peer_ops->peer_detach_force_delete(peer); + + return; +} +#endif /* _CDP_TXRX_PEER_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_pflow.h b/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_pflow.h new file mode 100644 index 0000000000000000000000000000000000000000..1a7ea9038aa702cb7e3008f1b34dd21e623f2bb2 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_pflow.h @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2016-2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * @file cdp_txrx_pflow.h + * @brief Define the host data path peer flow API functions + * called by the host control SW and the OS interface module + */ +#ifndef _CDP_TXRX_PFLOW_H_ +#define _CDP_TXRX_PFLOW_H_ + +#include +#include "cdp_txrx_ops.h" +#include "cdp_txrx_handle.h" + +static inline uint32_t cdp_pflow_update_pdev_params + (ol_txrx_soc_handle soc, struct cdp_pdev *pdev, + enum _ol_ath_param_t param, uint32_t val, void *ctx) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance", __func__); + QDF_BUG(0); + return 0; + } + + if (!soc->ops->pflow_ops || + !soc->ops->pflow_ops->pflow_update_pdev_params) + return 0; + + return soc->ops->pflow_ops->pflow_update_pdev_params + (pdev, param, val, ctx); +} +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_pmf.h b/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_pmf.h new file mode 100644 index 0000000000000000000000000000000000000000..48f644bbd0fc640e41a96385a3d1897cd1a6b2e9 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_pmf.h @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2016 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _CDP_TXRX_PMF_H_ +#define _CDP_TXRX_PMF_H_ + +/** + * cdp_get_pn_info() - Returns pn info from peer + * @soc - data path soc handle + * @peer: handle to peer + * @last_pn_valid: return last_rmf_pn_valid value from peer. + * @last_pn: return last_rmf_pn value from peer. + * @rmf_pn_replays: return rmf_pn_replays value from peer. + * + * Return: NONE + */ +static inline void +cdp_get_pn_info(ol_txrx_soc_handle soc, void *peer, uint8_t **last_pn_valid, + uint64_t **last_pn, uint32_t **rmf_pn_replays) +{ + if (!soc || !soc->ops || !soc->ops->pmf_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return; + } + + if (soc->ops->pmf_ops->get_pn_info) + return soc->ops->pmf_ops->get_pn_info( + peer, last_pn_valid, last_pn, rmf_pn_replays); + + return; +} +#endif /* _CDP_TXRX_PMF_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_raw.h b/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_raw.h new file mode 100644 index 0000000000000000000000000000000000000000..a15e37a3713ea144cf87a2ab8593aa82a546ea9c --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_raw.h @@ -0,0 +1,78 @@ +/* + * Copyright (c) 2016-2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * @file cdp_txrx_raw.h + * @brief Define the host data path raw mode API functions + * called by the host control SW and the OS interface module + */ +#ifndef _CDP_TXRX_RAW_H_ +#define _CDP_TXRX_RAW_H_ + +#include "cdp_txrx_handle.h" +#include "cdp_txrx_ops.h" +/* TODO: adf need to be replaced with qdf */ +static inline int cdp_get_nwifi_mode(ol_txrx_soc_handle soc, + struct cdp_vdev *vdev) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance", __func__); + QDF_BUG(0); + return 0; + } + + if (!soc->ops->raw_ops || + !soc->ops->raw_ops->txrx_get_nwifi_mode) + return 0; + + return soc->ops->raw_ops->txrx_get_nwifi_mode(vdev); +} + +/** + * @brief finds the ast entry for the packet + * @details: Finds the ast entry i.e 4th address for the packet based on the + * details in the netbuf. + * + * @param vdev - the data virtual device object + * @param pnbuf - pointer to nbuf + * @param raw_ast - pointer to fill ast information + * + * @return - 0 on success, -1 on error, 1 if more nbufs need to be consumed. + */ + +static inline void +cdp_rawsim_get_astentry (ol_txrx_soc_handle soc, struct cdp_vdev *vdev, + qdf_nbuf_t *pnbuf, struct cdp_raw_ast *raw_ast) +{ + + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->raw_ops || + !soc->ops->raw_ops->rsim_get_astentry) + return; + + soc->ops->raw_ops->rsim_get_astentry(vdev, pnbuf, raw_ast); +} + +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_stats.h b/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_stats.h new file mode 100644 index 0000000000000000000000000000000000000000..69814a071e22dd0a16d25dca813065b2132388d8 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_stats.h @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2016-2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * @file cdp_txrx_stats.h + * @brief Define the host data path statistics API functions + * called by the host control SW and the OS interface module + */ +#ifndef _CDP_TXRX_STATS_H_ +#define _CDP_TXRX_STATS_H_ +#include + +static inline void +cdp_clear_stats(ol_txrx_soc_handle soc, uint16_t bitmap) +{ + + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance", __func__); + QDF_BUG(0); + return; + } + + if (!soc->ops->mob_stats_ops || + !soc->ops->mob_stats_ops->clear_stats) + return; + + soc->ops->mob_stats_ops->clear_stats(bitmap); +} + +static inline int +cdp_stats(ol_txrx_soc_handle soc, uint8_t vdev_id, char *buffer, + unsigned int buf_len) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance", __func__); + QDF_BUG(0); + return 0; + } + + if (!soc->ops->mob_stats_ops || + !soc->ops->mob_stats_ops->stats) + return 0; + + return soc->ops->mob_stats_ops->stats(vdev_id, buffer, buf_len); +} + +#endif /* _CDP_TXRX_STATS_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_stats_struct.h b/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_stats_struct.h new file mode 100644 index 0000000000000000000000000000000000000000..33b6f5bf8c408295ff4085335c34977543681821 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_stats_struct.h @@ -0,0 +1,1256 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * @file cdp_txrx_stats_struct.h + * @brief Define the host data path stats API functions + * called by the host control SW and the OS interface module + */ +#ifndef _CDP_TXRX_STATS_STRUCT_H_ +#define _CDP_TXRX_STATS_STRUCT_H_ +#ifndef CONFIG_WIN +#include +#endif + +#define TXRX_STATS_LEVEL_OFF 0 +#define TXRX_STATS_LEVEL_BASIC 1 +#define TXRX_STATS_LEVEL_FULL 2 + +#define BSS_CHAN_INFO_READ 1 +#define BSS_CHAN_INFO_READ_AND_CLEAR 2 + +#define TX_FRAME_TYPE_DATA 0 +#define TX_FRAME_TYPE_MGMT 1 +#define TX_FRAME_TYPE_BEACON 2 + +#ifndef TXRX_STATS_LEVEL +#define TXRX_STATS_LEVEL TXRX_STATS_LEVEL_BASIC +#endif + +/* 1 additional MCS is for invalid values */ +#define MAX_MCS (12 + 1) +#define MAX_MCS_11A 8 +#define MAX_MCS_11B 7 +#define MAX_MCS_11AC 12 +/* 1 additional GI is for invalid values */ +#define MAX_GI (4 + 1) +#define SS_COUNT 8 +#define MAX_BW 7 +#define MAX_RECEPTION_TYPES 4 + +/* WME stream classes */ +#define WME_AC_BE 0 /* best effort */ +#define WME_AC_BK 1 /* background */ +#define WME_AC_VI 2 /* video */ +#define WME_AC_VO 3 /* voice */ +#define WME_AC_MAX 4 /* MAX AC Value */ + +#define CDP_MAX_RX_RINGS 4 + +/* + * Number of TLVs sent by FW. Needs to reflect + * HTT_PPDU_STATS_MAX_TAG declared in FW + */ +#define CDP_PPDU_STATS_MAX_TAG 14 + +/* Different Packet Types */ +enum cdp_packet_type { + DOT11_A = 0, + DOT11_B = 1, + DOT11_N = 2, + DOT11_AC = 3, + DOT11_AX = 4, + DOT11_MAX = 5, +}; + +/* packet info */ +struct cdp_pkt_info { + /*no of packets*/ + uint32_t num; + /* total no of bytes */ + uint64_t bytes; +}; + +/* Tx Stats */ +struct cdp_tx_stats { + /* Pkt Info for which completions were received */ + struct cdp_pkt_info comp_pkt; + /* Unicast Packet Count */ + struct cdp_pkt_info ucast; + /* Multicast Packet Count */ + struct cdp_pkt_info mcast; + /* Broadcast Packet Count*/ + struct cdp_pkt_info bcast; + /*NAWDS Multicast Packet Count */ + struct cdp_pkt_info nawds_mcast; + /*NAWDS Multicast Drop Count */ + uint32_t nawds_mcast_drop; + /* Successful Tx Packets */ + struct cdp_pkt_info tx_success; + /* Total Tx failure */ + uint32_t tx_failed; + /* Total Packets as ofdma*/ + uint32_t ofdma; + /* Packets in STBC */ + uint32_t stbc; + /* Packets in LDPC */ + uint32_t ldpc; + /* Packet retries */ + uint32_t retries; + /* Number of MSDUs with no MSDU level aggregation */ + uint32_t non_amsdu_cnt; + /* Number of MSDUs part of AMSDU*/ + uint32_t amsdu_cnt; + /* Tx Rate */ + uint32_t tx_rate; + + /* RSSI of last packet */ + uint32_t last_ack_rssi; + + /* Packet Type */ + struct { + /* MCS Count */ + uint32_t mcs_count[MAX_MCS]; + } pkt_type[DOT11_MAX]; + + /* SGI count */ + uint32_t sgi_count[MAX_GI]; + + /* Packet count for different num_spatial_stream values */ + uint32_t nss[SS_COUNT]; + + /* Packet Count for different bandwidths */ + uint32_t bw[MAX_BW]; + + /* Wireless Multimedia type Count */ + uint32_t wme_ac_type[WME_AC_MAX]; + + /* Wireless Multimedia type Count */ + uint32_t excess_retries_per_ac[WME_AC_MAX]; + + /* Packets dropped on the Tx side */ + struct { + /* Discarded by firmware */ + uint32_t fw_rem; + /* firmware_discard_untransmitted */ + uint32_t fw_rem_notx; + /* firmware_discard_transmitted */ + uint32_t fw_rem_tx; + /* aged out in mpdu/msdu queues*/ + uint32_t age_out; + /* discarded by firmware reason 1 */ + uint32_t fw_reason1; + /* discarded by firmware reason 2 */ + uint32_t fw_reason2; + /* discarded by firmware reason 3 */ + uint32_t fw_reason3; + } dropped; +}; + +/* Rx Level Stats */ +struct cdp_rx_stats { + /* Total packets sent up the stack */ + struct cdp_pkt_info to_stack; + /* Packets received on the reo ring */ + struct cdp_pkt_info rcvd_reo[CDP_MAX_RX_RINGS]; + /* Total unicast packets */ + struct cdp_pkt_info unicast; + /* Total multicast packets */ + struct cdp_pkt_info multicast; + /* Broadcast Packet Count*/ + struct cdp_pkt_info bcast; + /* Raw Pakets received */ + struct cdp_pkt_info raw; + /* Total multicast packets */ + uint32_t nawds_mcast_drop; + + struct { + /* Intra BSS packets received */ + struct cdp_pkt_info pkts; + struct cdp_pkt_info fail; + } intra_bss; + + /* Errors */ + struct { + /* Rx MIC errors */ + uint32_t mic_err; + /* Rx Decryption Errors */ + uint32_t decrypt_err; + } err; + + /* Wireless Multimedia type Count */ + uint32_t wme_ac_type[WME_AC_MAX]; + /* Reception type os packets */ + uint32_t reception_type[MAX_RECEPTION_TYPES]; + /* Packet Type */ + struct { + /* MCS Count */ + uint32_t mcs_count[MAX_MCS]; + } pkt_type[DOT11_MAX]; + /* SGI count */ + uint32_t sgi_count[MAX_GI]; + /* Packet count in spatiel Streams */ + uint32_t nss[SS_COUNT]; + /* Packet Count in different bandwidths */ + uint32_t bw[MAX_BW]; + /* Number of MSDUs with no MPDU level aggregation */ + uint32_t non_ampdu_cnt; + /* Number of MSDUs part of AMSPU */ + uint32_t ampdu_cnt; + /* Number of MSDUs with no MSDU level aggregation */ + uint32_t non_amsdu_cnt; + /* Number of MSDUs part of AMSDU*/ + uint32_t amsdu_cnt; + /* Number of bar received */ + uint32_t bar_recv_cnt; + /* RSSI of received signal */ + uint32_t rssi; + /*Rx rate */ + uint32_t rx_rate; +}; + +/* Tx ingress Stats */ +struct cdp_tx_ingress_stats { + /* Total packets received for transmission */ + struct cdp_pkt_info rcvd; + /* Tx packets processed*/ + struct cdp_pkt_info processed; + /* Total packets passed Reinject handler */ + struct cdp_pkt_info reinject_pkts; + /* Total packets passed to inspect handler */ + struct cdp_pkt_info inspect_pkts; + /*NAWDS Multicast Packet Count */ + struct cdp_pkt_info nawds_mcast; + /* Number of broadcast packets */ + struct cdp_pkt_info bcast; + + struct { + /* Total Raw packets */ + struct cdp_pkt_info raw_pkt; + /* DMA map error */ + uint32_t dma_map_error; + } raw; + + /* TSO packets info */ + struct { + /* No of segments in TSO packets */ + uint32_t num_seg; + /* total no of TSO packets */ + struct cdp_pkt_info tso_pkt; + /* TSO packets dropped by host */ + uint32_t dropped_host; + /* TSO packets dropped by target */ + uint32_t dropped_target; + } tso; + + /* Scatter Gather packet info */ + struct { + /* Total scatter gather packets */ + struct cdp_pkt_info sg_pkt; + /* SG packets dropped by host */ + uint32_t dropped_host; + /* SG packets dropped by target */ + uint32_t dropped_target; + /* Dma map error */ + uint32_t dma_map_error; + } sg; + + /* Multicast Enhancement packets info */ + struct { + /* total no of multicast conversion packets */ + struct cdp_pkt_info mcast_pkt; + /* packets dropped due to map error */ + uint32_t dropped_map_error; + /* packets dropped due to self Mac address */ + uint32_t dropped_self_mac; + /* Packets dropped due to send fail */ + uint32_t dropped_send_fail; + /* total unicast packets transmitted */ + uint32_t ucast; + /* Segment allocation failure */ + uint32_t fail_seg_alloc; + /* NBUF clone failure */ + uint32_t clone_fail; + } mcast_en; + + /* Packets dropped on the Tx side */ + struct { + /* Total scatter gather packets */ + struct cdp_pkt_info dropped_pkt; + /* Desc Not Available */ + uint32_t desc_na; + /* Ring Full */ + uint32_t ring_full; + /* Hwenqueue failed */ + uint32_t enqueue_fail; + /* DMA failed */ + uint32_t dma_error; + /* Resource Full: Congestion Control */ + uint32_t res_full; + } dropped; + + /* Mesh packets info */ + struct { + /* packets sent to fw */ + uint32_t exception_fw; + /* packets completions received from fw */ + uint32_t completion_fw; + } mesh; + + /*Number of packets classified by CCE*/ + uint32_t cce_classified; + + /*Number of raw packets classified by CCE*/ + uint32_t cce_classified_raw; +}; + +struct cdp_vdev_stats { + /* Tx ingress stats */ + struct cdp_tx_ingress_stats tx_i; + /* CDP Tx Stats */ + struct cdp_tx_stats tx; + /* CDP Rx Stats */ + struct cdp_rx_stats rx; +}; + +struct cdp_peer_stats { + /* CDP Tx Stats */ + struct cdp_tx_stats tx; + /* CDP Rx Stats */ + struct cdp_rx_stats rx; +}; + +/* Tx completions per interrupt */ +struct cdp_hist_tx_comp { + uint32_t pkts_1; + uint32_t pkts_2_20; + uint32_t pkts_21_40; + uint32_t pkts_41_60; + uint32_t pkts_61_80; + uint32_t pkts_81_100; + uint32_t pkts_101_200; + uint32_t pkts_201_plus; +}; + +/* Rx ring descriptors reaped per interrupt */ +struct cdp_hist_rx_ind { + uint32_t pkts_1; + uint32_t pkts_2_20; + uint32_t pkts_21_40; + uint32_t pkts_41_60; + uint32_t pkts_61_80; + uint32_t pkts_81_100; + uint32_t pkts_101_200; + uint32_t pkts_201_plus; +}; + +struct cdp_htt_tlv_hdr { + /* BIT [11 : 0] :- tag + * BIT [23 : 12] :- length + * BIT [31 : 24] :- reserved + */ + uint32_t tag__length; +}; + +#define HTT_STATS_SUBTYPE_MAX 16 + +struct cdp_htt_rx_pdev_fw_stats_tlv { + struct cdp_htt_tlv_hdr tlv_hdr; + + /* BIT [ 7 : 0] :- mac_id + * BIT [31 : 8] :- reserved + */ + uint32_t mac_id__word; + /* Num PPDU status processed from HW */ + uint32_t ppdu_recvd; + /* Num MPDU across PPDUs with FCS ok */ + uint32_t mpdu_cnt_fcs_ok; + /* Num MPDU across PPDUs with FCS err */ + uint32_t mpdu_cnt_fcs_err; + /* Num MSDU across PPDUs */ + uint32_t tcp_msdu_cnt; + /* Num MSDU across PPDUs */ + uint32_t tcp_ack_msdu_cnt; + /* Num MSDU across PPDUs */ + uint32_t udp_msdu_cnt; + /* Num MSDU across PPDUs */ + uint32_t other_msdu_cnt; + /* Num MPDU on FW ring indicated */ + uint32_t fw_ring_mpdu_ind; + /* Num MGMT MPDU given to protocol */ + uint32_t fw_ring_mgmt_subtype[HTT_STATS_SUBTYPE_MAX]; + /* Num ctrl MPDU given to protocol */ + uint32_t fw_ring_ctrl_subtype[HTT_STATS_SUBTYPE_MAX]; + /* Num mcast data packet received */ + uint32_t fw_ring_mcast_data_msdu; + /* Num broadcast data packet received */ + uint32_t fw_ring_bcast_data_msdu; + /* Num unicat data packet received */ + uint32_t fw_ring_ucast_data_msdu; + /* Num null data packet received */ + uint32_t fw_ring_null_data_msdu; + /* Num MPDU on FW ring dropped */ + uint32_t fw_ring_mpdu_drop; + + /* Num buf indication to offload */ + uint32_t ofld_local_data_ind_cnt; + /* Num buf recycle from offload */ + uint32_t ofld_local_data_buf_recycle_cnt; + /* Num buf indication to data_rx */ + uint32_t drx_local_data_ind_cnt; + /* Num buf recycle from data_rx */ + uint32_t drx_local_data_buf_recycle_cnt; + /* Num buf indication to protocol */ + uint32_t local_nondata_ind_cnt; + /* Num buf recycle from protocol */ + uint32_t local_nondata_buf_recycle_cnt; + + /* Num buf fed */ + uint32_t fw_status_buf_ring_refill_cnt; + /* Num ring empty encountered */ + uint32_t fw_status_buf_ring_empty_cnt; + /* Num buf fed */ + uint32_t fw_pkt_buf_ring_refill_cnt; + /* Num ring empty encountered */ + uint32_t fw_pkt_buf_ring_empty_cnt; + /* Num buf fed */ + uint32_t fw_link_buf_ring_refill_cnt; + /* Num ring empty encountered */ + uint32_t fw_link_buf_ring_empty_cnt; + + /* Num buf fed */ + uint32_t host_pkt_buf_ring_refill_cnt; + /* Num ring empty encountered */ + uint32_t host_pkt_buf_ring_empty_cnt; + /* Num buf fed */ + uint32_t mon_pkt_buf_ring_refill_cnt; + /* Num ring empty encountered */ + uint32_t mon_pkt_buf_ring_empty_cnt; + /* Num buf fed */ + uint32_t mon_status_buf_ring_refill_cnt; + /* Num ring empty encountered */ + uint32_t mon_status_buf_ring_empty_cnt; + /* Num buf fed */ + uint32_t mon_desc_buf_ring_refill_cnt; + /* Num ring empty encountered */ + uint32_t mon_desc_buf_ring_empty_cnt; + /* Num buf fed */ + uint32_t mon_dest_ring_update_cnt; + /* Num ring full encountered */ + uint32_t mon_dest_ring_full_cnt; + + /* Num rx suspend is attempted */ + uint32_t rx_suspend_cnt; + /* Num rx suspend failed */ + uint32_t rx_suspend_fail_cnt; + /* Num rx resume attempted */ + uint32_t rx_resume_cnt; + /* Num rx resume failed */ + uint32_t rx_resume_fail_cnt; + /* Num rx ring switch */ + uint32_t rx_ring_switch_cnt; + /* Num rx ring restore */ + uint32_t rx_ring_restore_cnt; + /* Num rx flush issued */ + uint32_t rx_flush_cnt; +}; + +/* == TX PDEV STATS == */ +struct cdp_htt_tx_pdev_stats_cmn_tlv { + struct cdp_htt_tlv_hdr tlv_hdr; + + /* BIT [ 7 : 0] :- mac_id + * BIT [31 : 8] :- reserved + */ + uint32_t mac_id__word; + /* Num queued to HW */ + uint32_t hw_queued; + /* Num PPDU reaped from HW */ + uint32_t hw_reaped; + /* Num underruns */ + uint32_t underrun; + /* Num HW Paused counter. */ + uint32_t hw_paused; + /* Num HW flush counter. */ + uint32_t hw_flush; + /* Num HW filtered counter. */ + uint32_t hw_filt; + /* Num PPDUs cleaned up in TX abort */ + uint32_t tx_abort; + /* Num MPDUs requed by SW */ + uint32_t mpdu_requed; + /* excessive retries */ + uint32_t tx_xretry; + /* Last used data hw rate code */ + uint32_t data_rc; + /* frames dropped due to excessive sw retries */ + uint32_t mpdu_dropped_xretry; + /* illegal rate phy errors */ + uint32_t illgl_rate_phy_err; + /* wal pdev continuous xretry */ + uint32_t cont_xretry; + /* wal pdev continuous xretry */ + uint32_t tx_timeout; + /* wal pdev resets */ + uint32_t pdev_resets; + /* PhY/BB underrun */ + uint32_t phy_underrun; + /* MPDU is more than txop limit */ + uint32_t txop_ovf; + /* Number of Sequences posted */ + uint32_t seq_posted; + /* Number of Sequences failed queueing */ + uint32_t seq_failed_queueing; + /* Number of Sequences completed */ + uint32_t seq_completed; + /* Number of Sequences restarted */ + uint32_t seq_restarted; + /* Number of MU Sequences posted */ + uint32_t mu_seq_posted; + /* Number of time HW ring is paused between seq switch within ISR */ + uint32_t seq_switch_hw_paused; + /* Number of times seq continuation in DSR */ + uint32_t next_seq_posted_dsr; + /* Number of times seq continuation in ISR */ + uint32_t seq_posted_isr; + /* Number of seq_ctrl cached. */ + uint32_t seq_ctrl_cached; + /* Number of MPDUs successfully transmitted */ + uint32_t mpdu_count_tqm; + /* Number of MSDUs successfully transmitted */ + uint32_t msdu_count_tqm; + /* Number of MPDUs dropped */ + uint32_t mpdu_removed_tqm; + /* Number of MSDUs dropped */ + uint32_t msdu_removed_tqm; + /* Num MPDUs flushed by SW, HWPAUSED, SW TXABORT (Reset,channel change) */ + uint32_t mpdus_sw_flush; + /* Num MPDUs filtered by HW, all filter condition (TTL expired) */ + uint32_t mpdus_hw_filter; + /* Num MPDUs truncated by PDG (TXOP, TBTT, PPDU_duration based on rate, dyn_bw) */ + uint32_t mpdus_truncated; + /* Num MPDUs that was tried but didn't receive ACK or BA */ + uint32_t mpdus_ack_failed; + /* Num MPDUs that was dropped due to expiry (MSDU TTL). */ + uint32_t mpdus_expired; + /* Num MPDUs that was retried within seq_ctrl (MGMT/LEGACY) */ + uint32_t mpdus_seq_hw_retry; + /* Num of TQM acked cmds processed */ + uint32_t ack_tlv_proc; + /* coex_abort_mpdu_cnt valid. */ + uint32_t coex_abort_mpdu_cnt_valid; + /* coex_abort_mpdu_cnt from TX FES stats. */ + uint32_t coex_abort_mpdu_cnt; + /* Number of total PPDUs(DATA, MGMT, excludes selfgen) tried over the air (OTA) */ + uint32_t num_total_ppdus_tried_ota; + /* Number of data PPDUs tried over the air (OTA) */ + uint32_t num_data_ppdus_tried_ota; + /* Num Local control/mgmt frames (MSDUs) queued */ + uint32_t local_ctrl_mgmt_enqued; + /* local_ctrl_mgmt_freed: + * Num Local control/mgmt frames (MSDUs) done + * It includes all local ctrl/mgmt completions + * (acked, no ack, flush, TTL, etc) + */ + uint32_t local_ctrl_mgmt_freed; + /* Num Local data frames (MSDUs) queued */ + uint32_t local_data_enqued; + /* local_data_freed: + * Num Local data frames (MSDUs) done + * It includes all local data completions + * (acked, no ack, flush, TTL, etc) + */ + uint32_t local_data_freed; +}; + +struct cdp_htt_tx_pdev_stats_urrn_tlv_v { + struct cdp_htt_tlv_hdr tlv_hdr; + uint32_t urrn_stats[1]; /* HTT_TX_PDEV_MAX_URRN_STATS */ +}; + +/* NOTE: Variable length TLV, use length spec to infer array size */ +struct cdp_htt_tx_pdev_stats_flush_tlv_v { + struct cdp_htt_tlv_hdr tlv_hdr; + uint32_t flush_errs[1]; /* HTT_TX_PDEV_MAX_FLUSH_REASON_STATS */ +}; + +/* NOTE: Variable length TLV, use length spec to infer array size */ +struct cdp_htt_tx_pdev_stats_sifs_tlv_v { + struct cdp_htt_tlv_hdr tlv_hdr; + uint32_t sifs_status[1]; /* HTT_TX_PDEV_MAX_SIFS_BURST_STATS */ +}; + +/* NOTE: Variable length TLV, use length spec to infer array size */ +struct cdp_htt_tx_pdev_stats_phy_err_tlv_v { + struct cdp_htt_tlv_hdr tlv_hdr; + uint32_t phy_errs[1]; /* HTT_TX_PDEV_MAX_PHY_ERR_STATS */ +}; + +/* == RX PDEV/SOC STATS == */ +/* HTT_STATS_RX_SOC_FW_STATS_TAG */ +struct cdp_htt_rx_soc_fw_stats_tlv { + struct cdp_htt_tlv_hdr tlv_hdr; + /* Num Packets received on REO FW ring */ + uint32_t fw_reo_ring_data_msdu; + /* Num bc/mc packets indicated from fw to host */ + uint32_t fw_to_host_data_msdu_bcmc; + /* Num unicast packets indicated from fw to host */ + uint32_t fw_to_host_data_msdu_uc; + /* Num remote buf recycle from offload */ + uint32_t ofld_remote_data_buf_recycle_cnt; + /* Num remote free buf given to offload */ + uint32_t ofld_remote_free_buf_indication_cnt; +}; + +struct cdp_htt_rx_soc_fw_refill_ring_num_refill_tlv_v { + struct cdp_htt_tlv_hdr tlv_hdr; + /* Num total buf refilled from refill ring */ + uint32_t refill_ring_num_refill[1]; /* HTT_RX_STATS_REFILL_MAX_RING */ +}; + +struct cdp_htt_rx_pdev_fw_ring_mpdu_err_tlv_v { + struct cdp_htt_tlv_hdr tlv_hdr; + /* Num error MPDU for each RxDMA error type */ + uint32_t fw_ring_mpdu_err[1]; /* HTT_RX_STATS_RXDMA_MAX_ERR */ +}; + +struct cdp_htt_rx_pdev_fw_mpdu_drop_tlv_v { + struct cdp_htt_tlv_hdr tlv_hdr; + /* Num MPDU dropped */ + uint32_t fw_mpdu_drop[1]; /* HTT_RX_STATS_FW_DROP_REASON_MAX */ +}; + +#define HTT_STATS_PHY_ERR_MAX 43 + +struct cdp_htt_rx_pdev_fw_stats_phy_err_tlv { + struct cdp_htt_tlv_hdr tlv_hdr; + + /* BIT [ 7 : 0] :- mac_id + * BIT [31 : 8] :- reserved + */ + uint32_t mac_id__word; + /* Num of phy err */ + uint32_t total_phy_err_cnt; + /* Counts of different types of phy errs + * The mapping of PHY error types to phy_err array elements is HW dependent. + * The only currently-supported mapping is shown below: + * + * 0 phyrx_err_phy_off Reception aborted due to receiving a PHY_OFF TLV + * 1 phyrx_err_synth_off + * 2 phyrx_err_ofdma_timing + * 3 phyrx_err_ofdma_signal_parity + * 4 phyrx_err_ofdma_rate_illegal + * 5 phyrx_err_ofdma_length_illegal + * 6 phyrx_err_ofdma_restart + * 7 phyrx_err_ofdma_service + * 8 phyrx_err_ppdu_ofdma_power_drop + * 9 phyrx_err_cck_blokker + * 10 phyrx_err_cck_timing + * 11 phyrx_err_cck_header_crc + * 12 phyrx_err_cck_rate_illegal + * 13 phyrx_err_cck_length_illegal + * 14 phyrx_err_cck_restart + * 15 phyrx_err_cck_service + * 16 phyrx_err_cck_power_drop + * 17 phyrx_err_ht_crc_err + * 18 phyrx_err_ht_length_illegal + * 19 phyrx_err_ht_rate_illegal + * 20 phyrx_err_ht_zlf + * 21 phyrx_err_false_radar_ext + * 22 phyrx_err_green_field + * 23 phyrx_err_bw_gt_dyn_bw + * 24 phyrx_err_leg_ht_mismatch + * 25 phyrx_err_vht_crc_error + * 26 phyrx_err_vht_siga_unsupported + * 27 phyrx_err_vht_lsig_len_invalid + * 28 phyrx_err_vht_ndp_or_zlf + * 29 phyrx_err_vht_nsym_lt_zero + * 30 phyrx_err_vht_rx_extra_symbol_mismatch + * 31 phyrx_err_vht_rx_skip_group_id0 + * 32 phyrx_err_vht_rx_skip_group_id1to62 + * 33 phyrx_err_vht_rx_skip_group_id63 + * 34 phyrx_err_ofdm_ldpc_decoder_disabled + * 35 phyrx_err_defer_nap + * 36 phyrx_err_fdomain_timeout + * 37 phyrx_err_lsig_rel_check + * 38 phyrx_err_bt_collision + * 39 phyrx_err_unsupported_mu_feedback + * 40 phyrx_err_ppdu_tx_interrupt_rx + * 41 phyrx_err_unsupported_cbf + * 42 phyrx_err_other + */ + uint32_t phy_err[HTT_STATS_PHY_ERR_MAX]; +}; + +struct cdp_htt_rx_soc_fw_refill_ring_empty_tlv_v { + struct cdp_htt_tlv_hdr tlv_hdr; + /* Num ring empty encountered */ + uint32_t refill_ring_empty_cnt[1]; /* HTT_RX_STATS_REFILL_MAX_RING */ +}; + +struct cdp_htt_tx_pdev_stats { + struct cdp_htt_tx_pdev_stats_cmn_tlv cmn_tlv; + struct cdp_htt_tx_pdev_stats_urrn_tlv_v underrun_tlv; + struct cdp_htt_tx_pdev_stats_sifs_tlv_v sifs_tlv; + struct cdp_htt_tx_pdev_stats_flush_tlv_v flush_tlv; + struct cdp_htt_tx_pdev_stats_phy_err_tlv_v phy_err_tlv; +}; + +struct cdp_htt_rx_soc_stats_t { + struct cdp_htt_rx_soc_fw_stats_tlv fw_tlv; + struct cdp_htt_rx_soc_fw_refill_ring_empty_tlv_v fw_refill_ring_empty_tlv; + struct cdp_htt_rx_soc_fw_refill_ring_num_refill_tlv_v fw_refill_ring_num_refill_tlv; +}; + +struct cdp_htt_rx_pdev_stats { + struct cdp_htt_rx_soc_stats_t soc_stats; + struct cdp_htt_rx_pdev_fw_stats_tlv fw_stats_tlv; + struct cdp_htt_rx_pdev_fw_ring_mpdu_err_tlv_v fw_ring_mpdu_err_tlv; + struct cdp_htt_rx_pdev_fw_mpdu_drop_tlv_v fw_ring_mpdu_drop; + struct cdp_htt_rx_pdev_fw_stats_phy_err_tlv fw_stats_phy_err_tlv; +}; + +struct cdp_pdev_stats { + /* packets dropped on rx */ + struct { + /* packets dropped because nsdu_done bit not set */ + uint32_t msdu_not_done; + /* Multicast Echo check */ + uint32_t mec; + /* Mesh Filtered packets */ + uint32_t mesh_filter; + /* packets dropped on monitor vap */ + uint32_t mon_rx_drop; + } dropped; + + struct { + /* total packets replnished */ + struct cdp_pkt_info pkts; + /* rxdma errors */ + uint32_t rxdma_err; + /* nbuf alloc failed */ + uint32_t nbuf_alloc_fail; + /* Mapping failure */ + uint32_t map_err; + /* x86 failures */ + uint32_t x86_fail; + /* low threshold interrupts */ + uint32_t low_thresh_intrs; + } replenish; + + /* Rx Raw Packets */ + uint32_t rx_raw_pkts; + /* Mesh Rx Stats Alloc fail */ + uint32_t mesh_mem_alloc; + + /* Rx errors */ + struct { + /* desc alloc failed errors */ + uint32_t desc_alloc_fail; + /* ip csum errors */ + uint32_t ip_csum_err; + /* tcp/udp csum errors */ + uint32_t tcp_udp_csum_err; + } err; + + /* buffers added back in freelist */ + uint32_t buf_freelist; + /* Tx Ingress stats */ + struct cdp_tx_ingress_stats tx_i; + /* CDP Tx Stats */ + struct cdp_tx_stats tx; + /* CDP Rx Stats */ + struct cdp_rx_stats rx; + /* Number of Tx completions per interrupt */ + struct cdp_hist_tx_comp tx_comp_histogram; + /* Number of Rx ring descriptors reaped per interrupt */ + struct cdp_hist_rx_ind rx_ind_histogram; + uint64_t ppdu_stats_counter[CDP_PPDU_STATS_MAX_TAG]; + + struct cdp_htt_tx_pdev_stats htt_tx_pdev_stats; + struct cdp_htt_rx_pdev_stats htt_rx_pdev_stats; +}; + +#ifndef BIG_ENDIAN_HOST +typedef struct { + uint64_t pkts; + uint64_t bytes; +} ol_txrx_stats_elem; +#else +struct ol_txrx_elem_t { + uint64_t pkts; + uint64_t bytes; +}; +typedef struct ol_txrx_elem_t ol_txrx_stats_elem; +#endif + +#ifndef CONFIG_MCL +/** + * @brief data stats published by the host txrx layer + */ +struct ol_txrx_stats { + struct { + /* MSDUs received from the stack */ + ol_txrx_stats_elem from_stack; + /* MSDUs successfully sent across the WLAN */ + ol_txrx_stats_elem delivered; + struct { + /* MSDUs that the host did not accept */ + ol_txrx_stats_elem host_reject; + /* MSDUs which could not be downloaded to the target */ + ol_txrx_stats_elem download_fail; + /* + * MSDUs which the target discarded + * (lack of mem or old age) + */ + ol_txrx_stats_elem target_discard; + /* + * MSDUs which the target sent but couldn't get + * an ack for + */ + ol_txrx_stats_elem no_ack; + /* MSDUs dropped in NSS-FW */ + ol_txrx_stats_elem nss_ol_dropped; + } dropped; + u_int32_t desc_in_use; + u_int32_t desc_alloc_fails; + u_int32_t ce_ring_full; + u_int32_t dma_map_error; + /* MSDUs given to the txrx layer by the management stack */ + ol_txrx_stats_elem mgmt; + struct { + /* TSO applied jumbo packets received from NW Stack */ + ol_txrx_stats_elem tso_pkts; + /* Non - TSO packets */ + ol_txrx_stats_elem non_tso_pkts; + /* TSO packets : Dropped during TCP segmentation*/ + ol_txrx_stats_elem tso_dropped; + /* TSO Descriptors */ + u_int32_t tso_desc_cnt; + } tso; + + struct { + /* TSO applied jumbo packets received from NW Stack */ + ol_txrx_stats_elem sg_pkts; + /* Non - TSO packets */ + ol_txrx_stats_elem non_sg_pkts; + /* TSO packets : Dropped during TCP segmentation*/ + ol_txrx_stats_elem sg_dropped; + /* TSO Descriptors */ + u_int32_t sg_desc_cnt; + } sg; + struct { + /* packets enqueued for flow control */ + u_int32_t fl_ctrl_enqueue; + /* packets discarded for flow control is full */ + u_int32_t fl_ctrl_discard; + /* packets sent to CE without flow control */ + u_int32_t fl_ctrl_avoid; + } fl_ctrl; + } tx; + struct { + /* MSDUs given to the OS shim */ + ol_txrx_stats_elem delivered; + /* MSDUs forwarded from the rx path to the tx path */ + ol_txrx_stats_elem forwarded; + /* MSDUs in which ipv4 chksum error detected by HW */ + ol_txrx_stats_elem ipv4_cksum_err; + /* MSDUs in which tcp chksum error detected by HW */ + ol_txrx_stats_elem tcp_ipv4_cksum_err; + /* MSDUs in which udp chksum error detected by HW */ + ol_txrx_stats_elem udp_ipv4_cksum_err; + /* MSDUs in which tcp V6 chksum error detected by HW */ + ol_txrx_stats_elem tcp_ipv6_cksum_err; + /* MSDUs in which UDP V6 chksum error detected by HW */ + ol_txrx_stats_elem udp_ipv6_cksum_err; + } rx; + struct { + /* Number of mcast received for conversion */ + u_int32_t num_me_rcvd; + /* Number of unicast sent as part of mcast conversion */ + u_int32_t num_me_ucast; + /* Number of multicast frames dropped due to dma_map failure */ + u_int32_t num_me_dropped_m; + /* + * Number of multicast frames dropped due to allocation + * failure + */ + u_int32_t num_me_dropped_a; + /* Number of multicast frames dropped due to internal failure */ + u_int32_t num_me_dropped_i; + /* Number of me buf currently in use */ + u_int32_t num_me_buf; + /* Number of me buf frames to self mac address */ + u_int32_t num_me_dropped_s; + /* Number of me buf in use in non pool based allocation*/ + u_int32_t num_me_nonpool; + /* Number of me buf allocated using non pool based allocation*/ + u_int32_t num_me_nonpool_count; + } mcast_enhance; +}; + +struct ol_ath_dbg_rx_rssi { + uint8_t rx_rssi_pri20; + uint8_t rx_rssi_sec20; + uint8_t rx_rssi_sec40; + uint8_t rx_rssi_sec80; +}; + +struct ol_ath_radiostats { + uint64_t tx_beacon; + uint32_t be_nobuf; + uint32_t tx_buf_count; + uint32_t tx_packets; + uint32_t rx_packets; + int32_t tx_mgmt; + uint32_t tx_num_data; + uint32_t rx_num_data; + int32_t rx_mgmt; + uint32_t rx_num_mgmt; + uint32_t rx_num_ctl; + uint32_t tx_rssi; + uint32_t tx_mcs[10]; + uint32_t rx_mcs[10]; + uint32_t rx_rssi_comb; + struct ol_ath_dbg_rx_rssi rx_rssi_chain0; + struct ol_ath_dbg_rx_rssi rx_rssi_chain1; + struct ol_ath_dbg_rx_rssi rx_rssi_chain2; + struct ol_ath_dbg_rx_rssi rx_rssi_chain3; + uint64_t rx_bytes; + uint64_t tx_bytes; + uint32_t tx_compaggr; + uint32_t rx_aggr; + uint32_t tx_bawadv; + uint32_t tx_compunaggr; + uint32_t rx_overrun; + uint32_t rx_badcrypt; + uint32_t rx_badmic; + uint32_t rx_crcerr; + uint32_t rx_phyerr; + uint32_t ackRcvBad; + uint32_t rtsBad; + uint32_t rtsGood; + uint32_t fcsBad; + uint32_t noBeacons; + uint32_t mib_int_count; + uint32_t rx_looplimit_start; + uint32_t rx_looplimit_end; + uint8_t ap_stats_tx_cal_enable; + uint8_t self_bss_util; + uint8_t obss_util; + uint32_t tgt_asserts; + int16_t chan_nf; + uint32_t rx_last_msdu_unset_cnt; + int16_t chan_nf_sec80; + uint64_t wmi_tx_mgmt; + uint64_t wmi_tx_mgmt_completions; + uint32_t wmi_tx_mgmt_completion_err; + uint32_t peer_delete_req; + uint32_t peer_delete_resp; + uint32_t rx_mgmt_rssi_drop; + uint32_t tx_retries; + uint32_t rx_data_bytes; + uint32_t tx_frame_count; + uint32_t rx_frame_count; + uint32_t rx_clear_count; + uint32_t cycle_count; + uint32_t phy_err_count; + uint32_t chan_tx_pwr; +}; + +/* + * Enumeration of PDEV Configuration parameter + */ +enum _ol_ath_param_t { + OL_ATH_PARAM_TXCHAINMASK = 1, + OL_ATH_PARAM_RXCHAINMASK = 2, + OL_ATH_PARAM_AMPDU = 6, + OL_ATH_PARAM_AMPDU_LIMIT = 7, + OL_ATH_PARAM_AMPDU_SUBFRAMES = 8, + OL_ATH_PARAM_TXPOWER_LIMIT2G = 12, + OL_ATH_PARAM_TXPOWER_LIMIT5G = 13, + OL_ATH_PARAM_LDPC = 32, + OL_ATH_PARAM_VOW_EXT_STATS = 45, + OL_ATH_PARAM_DYN_TX_CHAINMASK = 73, + OL_ATH_PARAM_BURST_ENABLE = 77, + OL_ATH_PARAM_BURST_DUR = 78, + OL_ATH_PARAM_BCN_BURST = 80, + OL_ATH_PARAM_DCS = 82, +#if UMAC_SUPPORT_PERIODIC_PERFSTATS + OL_ATH_PARAM_PRDPERFSTAT_THRPUT_ENAB = 83, + OL_ATH_PARAM_PRDPERFSTAT_THRPUT_WIN = 84, + OL_ATH_PARAM_PRDPERFSTAT_THRPUT = 85, + OL_ATH_PARAM_PRDPERFSTAT_PER_ENAB = 86, + OL_ATH_PARAM_PRDPERFSTAT_PER_WIN = 87, + OL_ATH_PARAM_PRDPERFSTAT_PER = 88, +#endif + /* UMAC_SUPPORT_PERIODIC_PERFSTATS */ + OL_ATH_PARAM_TOTAL_PER = 89, + /* set manual rate for rts frame */ + OL_ATH_PARAM_RTS_CTS_RATE = 92, + /* co channel interference threshold level */ + OL_ATH_PARAM_DCS_COCH_THR = 93, + /* transmit error threshold */ + OL_ATH_PARAM_DCS_TXERR_THR = 94, + /* phy error threshold */ + OL_ATH_PARAM_DCS_PHYERR_THR = 95, + /* + * The IOCTL number is 114, it is made 114, inorder to make the IOCTL + * number same as Direct-attach IOCTL. + * Please, don't change number. This IOCTL gets the Interface code path + * it should be either DIRECT-ATTACH or OFF-LOAD. + */ + OL_ATH_PARAM_GET_IF_ID = 114, + /* Enable Acs back Ground Channel selection Scan timer in AP mode*/ + OL_ATH_PARAM_ACS_ENABLE_BK_SCANTIMEREN = 118, + /* ACS scan timer value in Seconds */ + OL_ATH_PARAM_ACS_SCANTIME = 119, + /* Negligence Delta RSSI between two channel */ + OL_ATH_PARAM_ACS_RSSIVAR = 120, + /* Negligence Delta Channel load between two channel*/ + OL_ATH_PARAM_ACS_CHLOADVAR = 121, + /* Enable Limited OBSS check */ + OL_ATH_PARAM_ACS_LIMITEDOBSS = 122, + /* Acs control flag for Scan timer */ + OL_ATH_PARAM_ACS_CTRLFLAG = 123, + /* Acs Run time Debug level*/ + OL_ATH_PARAM_ACS_DEBUGTRACE = 124, + OL_ATH_PARAM_SET_FW_HANG_ID = 137, + /* Radio type 1:11ac 0:11abgn */ + OL_ATH_PARAM_RADIO_TYPE = 138, + OL_ATH_PARAM_IGMPMLD_OVERRIDE, /* IGMP/MLD packet override */ + OL_ATH_PARAM_IGMPMLD_TID, /* IGMP/MLD packet TID no */ + OL_ATH_PARAM_ARPDHCP_AC_OVERRIDE, + OL_ATH_PARAM_NON_AGG_SW_RETRY_TH, + OL_ATH_PARAM_AGG_SW_RETRY_TH, + /* Dont change this number it as per sync with DA + Blocking certian channel from ic channel list */ + OL_ATH_PARAM_DISABLE_DFS = 144, + OL_ATH_PARAM_ENABLE_AMSDU = 145, + OL_ATH_PARAM_ENABLE_AMPDU = 146, + OL_ATH_PARAM_STA_KICKOUT_TH, + OL_ATH_PARAM_WLAN_PROF_ENABLE, + OL_ATH_PARAM_LTR_ENABLE, + OL_ATH_PARAM_LTR_AC_LATENCY_BE = 150, + OL_ATH_PARAM_LTR_AC_LATENCY_BK, + OL_ATH_PARAM_LTR_AC_LATENCY_VI, + OL_ATH_PARAM_LTR_AC_LATENCY_VO, + OL_ATH_PARAM_LTR_AC_LATENCY_TIMEOUT, + OL_ATH_PARAM_LTR_TX_ACTIVITY_TIMEOUT = 155, + OL_ATH_PARAM_LTR_SLEEP_OVERRIDE, + OL_ATH_PARAM_LTR_RX_OVERRIDE, + OL_ATH_PARAM_L1SS_ENABLE, + OL_ATH_PARAM_DSLEEP_ENABLE, + /* radar error threshold */ + OL_ATH_PARAM_DCS_RADAR_ERR_THR = 160, + /* Tx channel utilization due to AP's tx and rx */ + OL_ATH_PARAM_DCS_USERMAX_CU_THR, + /* interference detection threshold */ + OL_ATH_PARAM_DCS_INTR_DETECT_THR, + /* sampling window, default 10secs */ + OL_ATH_PARAM_DCS_SAMPLE_WINDOW, + /* debug logs enable/disable */ + OL_ATH_PARAM_DCS_DEBUG, + OL_ATH_PARAM_ANI_ENABLE = 165, + OL_ATH_PARAM_ANI_POLL_PERIOD, + OL_ATH_PARAM_ANI_LISTEN_PERIOD, + OL_ATH_PARAM_ANI_OFDM_LEVEL, + OL_ATH_PARAM_ANI_CCK_LEVEL, + OL_ATH_PARAM_DSCP_TID_MAP = 170, + OL_ATH_PARAM_TXPOWER_SCALE, + /* Phy error penalty */ + OL_ATH_PARAM_DCS_PHYERR_PENALTY, +#if ATH_SUPPORT_DSCP_OVERRIDE + /* set/get TID for sending HMMC packets */ + OL_ATH_PARAM_HMMC_DSCP_TID_MAP, + /* set/get DSCP mapping override */ + OL_ATH_PARAM_DSCP_OVERRIDE, + /* set/get HMMC-DSCP mapping override */ + OL_ATH_PARAM_HMMC_DSCP_OVERRIDE = 175, +#endif +#if ATH_RX_LOOPLIMIT_TIMER + OL_ATH_PARAM_LOOPLIMIT_NUM, +#endif + OL_ATH_PARAM_ANTENNA_GAIN_2G, + OL_ATH_PARAM_ANTENNA_GAIN_5G, + OL_ATH_PARAM_RX_FILTER, +#if ATH_SUPPORT_HYFI_ENHANCEMENTS + OL_ATH_PARAM_BUFF_THRESH = 180, + OL_ATH_PARAM_BLK_REPORT_FLOOD, + OL_ATH_PARAM_DROP_STA_QUERY, +#endif + OL_ATH_PARAM_QBOOST, + OL_ATH_PARAM_SIFS_FRMTYPE, + OL_ATH_PARAM_SIFS_UAPSD = 185, + OL_ATH_PARAM_FW_RECOVERY_ID, + OL_ATH_PARAM_RESET_OL_STATS, + OL_ATH_PARAM_AGGR_BURST, + /* Number of deauth sent in consecutive rx_peer_invalid */ + OL_ATH_PARAM_DEAUTH_COUNT, + OL_ATH_PARAM_BLOCK_INTERBSS = 190, + /* Firmware reset control for Bmiss / timeout / reset */ + OL_ATH_PARAM_FW_DISABLE_RESET, + OL_ATH_PARAM_MSDU_TTL, + OL_ATH_PARAM_PPDU_DURATION, + OL_ATH_PARAM_SET_TXBF_SND_PERIOD, + OL_ATH_PARAM_ALLOW_PROMISC = 195, + OL_ATH_PARAM_BURST_MODE, + OL_ATH_PARAM_DYN_GROUPING, + OL_ATH_PARAM_DPD_ENABLE, + OL_ATH_PARAM_DBGLOG_RATELIM, + /* firmware should intimate us about ps state change for node */ + OL_ATH_PARAM_PS_STATE_CHANGE = 200, + OL_ATH_PARAM_MCAST_BCAST_ECHO, + /* OBSS RSSI threshold for 20/40 coexistence */ + OL_ATH_PARAM_OBSS_RSSI_THRESHOLD, + /* Link/node RX RSSI threshold for 20/40 coexistence */ + OL_ATH_PARAM_OBSS_RX_RSSI_THRESHOLD, +#if ATH_CHANNEL_BLOCKING + OL_ATH_PARAM_ACS_BLOCK_MODE = 205, +#endif + OL_ATH_PARAM_ACS_TX_POWER_OPTION, + /* + * Default Antenna Polarization MSB 8 bits (24:31) specifying + * enable/disable ; LSB 24 bits (0:23) antenna mask value + */ + OL_ATH_PARAM_ANT_POLARIZATION, + /* rate limit mute type error prints */ + OL_ATH_PARAM_PRINT_RATE_LIMIT, + OL_ATH_PARAM_PDEV_RESET, /* Reset FW PDEV*/ + /* Do not crash host when target assert happened*/ + OL_ATH_PARAM_FW_DUMP_NO_HOST_CRASH = 210, + /* Consider OBSS non-erp to change to long slot*/ + OL_ATH_PARAM_CONSIDER_OBSS_NON_ERP_LONG_SLOT = 211, + OL_ATH_PARAM_STATS_FC, + OL_ATH_PARAM_QFLUSHINTERVAL, + OL_ATH_PARAM_TOTAL_Q_SIZE, + OL_ATH_PARAM_TOTAL_Q_SIZE_RANGE0, + OL_ATH_PARAM_TOTAL_Q_SIZE_RANGE1, + OL_ATH_PARAM_TOTAL_Q_SIZE_RANGE2, + OL_ATH_PARAM_TOTAL_Q_SIZE_RANGE3, + OL_ATH_PARAM_MIN_THRESHOLD, + OL_ATH_PARAM_MAX_Q_LIMIT, + OL_ATH_PARAM_MIN_Q_LIMIT, + OL_ATH_PARAM_CONG_CTRL_TIMER_INTV, + OL_ATH_PARAM_STATS_TIMER_INTV, + OL_ATH_PARAM_ROTTING_TIMER_INTV, + OL_ATH_PARAM_LATENCY_PROFILE, + OL_ATH_PARAM_HOSTQ_DUMP, + OL_ATH_PARAM_TIDQ_MAP, + OL_ATH_PARAM_DBG_ARP_SRC_ADDR, /* ARP DEBUG source address*/ + OL_ATH_PARAM_DBG_ARP_DST_ADDR, /* ARP DEBUG destination address*/ + OL_ATH_PARAM_ARP_DBG_CONF, /* ARP debug configuration */ + OL_ATH_PARAM_DISABLE_STA_VAP_AMSDU, /* Disable AMSDU for station vap */ +#if ATH_SUPPORT_DFS && ATH_SUPPORT_STA_DFS + OL_ATH_PARAM_STADFS_ENABLE = 300, /* STA DFS is enabled or not */ +#endif +#if QCA_AIRTIME_FAIRNESS + OL_ATH_PARAM_ATF_STRICT_SCHED = 301, + OL_ATH_PARAM_ATF_GROUP_POLICY = 302, +#endif +#if DBDC_REPEATER_SUPPORT + OL_ATH_PARAM_PRIMARY_RADIO, + OL_ATH_PARAM_DBDC_ENABLE, +#endif + OL_ATH_PARAM_TXPOWER_DBSCALE, + OL_ATH_PARAM_CTL_POWER_SCALE, +#if QCA_AIRTIME_FAIRNESS + OL_ATH_PARAM_ATF_OBSS_SCHED = 307, + OL_ATH_PARAM_ATF_OBSS_SCALE = 308, +#endif + OL_ATH_PARAM_PHY_OFDM_ERR = 309, + OL_ATH_PARAM_PHY_CCK_ERR = 310, + OL_ATH_PARAM_FCS_ERR = 311, + OL_ATH_PARAM_CHAN_UTIL = 312, +#if DBDC_REPEATER_SUPPORT + OL_ATH_PARAM_CLIENT_MCAST, +#endif + OL_ATH_PARAM_EMIWAR_80P80 = 314, + OL_ATH_PARAM_BATCHMODE = 315, + OL_ATH_PARAM_PACK_AGGR_DELAY = 316, +#if UMAC_SUPPORT_ACFG + OL_ATH_PARAM_DIAG_ENABLE = 317, +#endif +#if ATH_SUPPORT_VAP_QOS + OL_ATH_PARAM_VAP_QOS = 318, +#endif + OL_ATH_PARAM_CHAN_STATS_TH = 319, + /* Passive scan is enabled or disabled */ + OL_ATH_PARAM_PASSIVE_SCAN_ENABLE = 320, + OL_ATH_MIN_RSSI_ENABLE = 321, + OL_ATH_MIN_RSSI = 322, + OL_ATH_PARAM_ACS_2G_ALLCHAN = 323, +#if DBDC_REPEATER_SUPPORT + OL_ATH_PARAM_DELAY_STAVAP_UP = 324, +#endif + /* It is used to set the channel switch options */ + OL_ATH_PARAM_CHANSWITCH_OPTIONS = 327, + OL_ATH_BTCOEX_ENABLE = 328, + OL_ATH_BTCOEX_WL_PRIORITY = 329, + OL_ATH_PARAM_TID_OVERRIDE_QUEUE_MAPPING = 330, + OL_ATH_PARAM_CAL_VER_CHECK = 331, + OL_ATH_PARAM_NO_VLAN = 332, + OL_ATH_PARAM_CCA_THRESHOLD = 333, + OL_ATH_PARAM_ATF_LOGGING = 334, + OL_ATH_PARAM_STRICT_DOTH = 335, + OL_ATH_PARAM_DISCONNECTION_TIMEOUT = 336, + OL_ATH_PARAM_RECONFIGURATION_TIMEOUT = 337, + OL_ATH_PARAM_CHANNEL_SWITCH_COUNT = 338, + OL_ATH_PARAM_ALWAYS_PRIMARY = 339, + OL_ATH_PARAM_FAST_LANE = 340, + OL_ATH_GET_BTCOEX_DUTY_CYCLE = 341, + OL_ATH_PARAM_SECONDARY_OFFSET_IE = 342, + OL_ATH_PARAM_WIDE_BAND_SUB_ELEMENT = 343, + OL_ATH_PARAM_PREFERRED_UPLINK = 344, + OL_ATH_PARAM_PRECAC_ENABLE = 345, + OL_ATH_PARAM_PRECAC_TIMEOUT = 346, + OL_ATH_COEX_VER_CFG = 347, + OL_ATH_PARAM_DUMP_TARGET = 348, + OL_ATH_PARAM_PDEV_TO_REO_DEST = 349, + OL_ATH_PARAM_DUMP_CHAINMASK_TABLES = 350, + OL_ATH_PARAM_DUMP_OBJECTS = 351, + OL_ATH_PARAM_ACS_SRLOADVAR = 352, + OL_ATH_PARAM_MGMT_RSSI_THRESHOLD = 353, + OL_ATH_PARAM_EXT_NSS_CAPABLE = 354, + OL_ATH_PARAM_MGMT_PDEV_STATS_TIMER = 355, + OL_ATH_PARAM_TXACKTIMEOUT = 356, + OL_ATH_PARAM_ICM_ACTIVE = 357, + OL_ATH_PARAM_NOMINAL_NOISEFLOOR = 358, + OL_ATH_PARAM_CHAN_INFO = 359, + OL_ATH_PARAM_ACS_RANK = 360, + OL_ATH_PARAM_TXCHAINSOFT = 361, + OL_ATH_PARAM_WIDE_BAND_SCAN = 362, + OL_ATH_PARAM_CCK_TX_ENABLE = 363, + OL_ATH_PARAM_PAPI_ENABLE = 364, + OL_ATH_PARAM_ISOLATION = 365, + OL_ATH_PARAM_MAX_CLIENTS_PER_RADIO = 366, +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) && defined(HOST_DFS_SPOOF_TEST) + OL_ATH_PARAM_DFS_HOST_WAIT_TIMEOUT = 367, +#endif + OL_ATH_PARAM_NF_THRESH = 368, +}; + +/* Enumeration of PDEV Configuration parameter */ +enum _ol_hal_param_t { + OL_HAL_CONFIG_DMA_BEACON_RESPONSE_TIME = 0 +}; +#endif + +/* Bitmasks for stats that can block */ +#define EXT_TXRX_FW_STATS 0x0001 +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_tx_delay.h b/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_tx_delay.h new file mode 100644 index 0000000000000000000000000000000000000000..34e3cbe9355b7bd867bd4cc4796c1ad2d45d6c4c --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_tx_delay.h @@ -0,0 +1,128 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * @file cdp_txrx_tx_delay.h + * @brief Define the host data path histogram API functions + * called by the host control SW and the OS interface module + */ +#ifndef _CDP_TXRX_COMPUTE_TX_DELAY_H_ +#define _CDP_TXRX_COMPUTE_TX_DELAY_H_ +#include "cdp_txrx_handle.h" +/** + * cdp_tx_delay() - get tx packet delay + * @soc: data path soc handle + * @pdev: physical device instance + * @queue_delay_microsec: tx packet delay within queue, usec + * @tx_delay_microsec: tx packet delay, usec + * @category: packet category + * + * Return: NONE + */ +static inline void +cdp_tx_delay(ol_txrx_soc_handle soc, struct cdp_pdev *pdev, + uint32_t *queue_delay_microsec, uint32_t *tx_delay_microsec, + int category) +{ + if (!soc || !soc->ops || !soc->ops->delay_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return; + } + + if (soc->ops->delay_ops->tx_delay) + return soc->ops->delay_ops->tx_delay(pdev, + queue_delay_microsec, tx_delay_microsec, category); + return; +} + +/** + * cdp_tx_delay_hist() - get tx packet delay histogram + * @soc: data path soc handle + * @pdev: physical device instance + * @bin_values: bin + * @category: packet category + * + * Return: NONE + */ +static inline void +cdp_tx_delay_hist(ol_txrx_soc_handle soc, struct cdp_pdev *pdev, + uint16_t *bin_values, int category) +{ + if (!soc || !soc->ops || !soc->ops->delay_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return; + } + + if (soc->ops->delay_ops->tx_delay_hist) + return soc->ops->delay_ops->tx_delay_hist(pdev, + bin_values, category); + return; +} + +/** + * cdp_tx_packet_count() - get tx packet count + * @soc: data path soc handle + * @pdev: physical device instance + * @out_packet_loss_count: packet loss count + * @category: packet category + * + * Return: NONE + */ +static inline void +cdp_tx_packet_count(ol_txrx_soc_handle soc, struct cdp_pdev *pdev, + uint16_t *out_packet_count, uint16_t *out_packet_loss_count, + int category) +{ + if (!soc || !soc->ops || !soc->ops->delay_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return; + } + + if (soc->ops->delay_ops->tx_packet_count) + return soc->ops->delay_ops->tx_packet_count(pdev, + out_packet_count, out_packet_loss_count, category); + return; +} + +/** + * cdp_tx_set_compute_interval() - set tx packet stat compute interval + * @soc: data path soc handle + * @pdev: physical device instance + * @interval: compute interval + * + * Return: NONE + */ +static inline void +cdp_tx_set_compute_interval(ol_txrx_soc_handle soc, struct cdp_pdev *pdev, + uint32_t interval) +{ + if (!soc || !soc->ops || !soc->ops->delay_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return; + } + + if (soc->ops->delay_ops->tx_set_compute_interval) + return soc->ops->delay_ops->tx_set_compute_interval(pdev, + interval); + return; +} +#endif /* _CDP_TXRX_COMPUTE_TX_DELAY_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_tx_throttle.h b/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_tx_throttle.h new file mode 100644 index 0000000000000000000000000000000000000000..9716f27c4bd017ba648b81c995631b1566565a06 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_tx_throttle.h @@ -0,0 +1,76 @@ +/* + * Copyright (c) 2016-2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * @file cdp_txrx_tx_throttle.h + * @brief Define the host data path transmit throttle API + * functions called by the host control SW and the OS interface + * module + */ +#ifndef _CDP_TXRX_TX_THROTTLE_H_ +#define _CDP_TXRX_TX_THROTTLE_H_ +#include +#include "cdp_txrx_handle.h" +/** + * cdp_throttle_init_period() - init tx throttle period + * @soc: data path soc handle + * @pdev: physical device instance + * @period: throttle period + * @dutycycle_level: duty cycle level + * + * Return: NONE + */ +static inline void +cdp_throttle_init_period(ol_txrx_soc_handle soc, struct cdp_pdev *pdev, + int period, uint8_t *dutycycle_level) +{ + if (!soc || !soc->ops || !soc->ops->throttle_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return; + } + + if (soc->ops->throttle_ops->throttle_init_period) + return soc->ops->throttle_ops->throttle_init_period(pdev, + period, dutycycle_level); + return; +} + +/** + * cdp_throttle_init_period() - init tx throttle period + * @soc: data path soc handle + * @pdev: physical device instance + * @level: throttle level + * + * Return: NONE + */ +static inline void +cdp_throttle_set_level(ol_txrx_soc_handle soc, struct cdp_pdev *pdev, int level) +{ + if (!soc || !soc->ops || !soc->ops->throttle_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return; + } + + if (soc->ops->throttle_ops->throttle_set_level) + return soc->ops->throttle_ops->throttle_set_level(pdev, level); + return; +} + +#endif /* _CDP_TXRX_TX_THROTTLE_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_wds.h b/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_wds.h new file mode 100644 index 0000000000000000000000000000000000000000..db28e80bed9a07ebea9d8ed1783718d83b6e03db --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_wds.h @@ -0,0 +1,97 @@ +/* + * Copyright (c) 2016-2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * @file cdp_txrx_wds.h + * @brief Define the host data path WDS API functions + * called by the host control SW and the OS interface module + */ +#ifndef _CDP_TXRX_WDS_H_ +#define _CDP_TXRX_WDS_H_ +#include "cdp_txrx_handle.h" +/** + * @brief set the wds rx filter policy of the device + * @details + * This flag sets the wds rx policy on the vdev. Rx frames not compliant + * with the policy will be dropped. + * + * @param vdev - the data virtual device object + * @param val - the wds rx policy bitmask + * @return - void + */ +static inline void +cdp_set_wds_rx_policy(ol_txrx_soc_handle soc, + struct cdp_vdev *vdev, + u_int32_t val) +{ + if (!soc || !soc->ops || !soc->ops->wds_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return; + } + + if (soc->ops->wds_ops->txrx_set_wds_rx_policy) + return soc->ops->wds_ops->txrx_set_wds_rx_policy(vdev, val); + return; +} + +/** + * @brief set the wds rx filter policy of the device + * @details + * This flag sets the wds rx policy on the vdev. Rx frames not compliant + * with the policy will be dropped. + * + * @param vdev - the data virtual device object + * @param val - the wds rx policy bitmask + * @return - void + */ +static inline void +cdp_set_wds_tx_policy_update(ol_txrx_soc_handle soc, + struct cdp_peer *peer, + int wds_tx_ucast, int wds_tx_mcast) +{ + if (!soc || !soc->ops || !soc->ops->wds_ops) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s invalid instance", __func__); + return; + } + + if (soc->ops->wds_ops->txrx_wds_peer_tx_policy_update) + return soc->ops->wds_ops->txrx_wds_peer_tx_policy_update( + peer, wds_tx_ucast, wds_tx_mcast); + return; +} + +/** + * cdp_vdev_set_wds() - Set/unset wds_enable flag in vdev + * @soc - data path soc handle + * @vdev - data path vap handle + * @val - value to be set in wds_en flag + * + * This flag enables WDS source port learning feature on a vdev + * + * return 1 on success + */ +static inline int +cdp_vdev_set_wds(ol_txrx_soc_handle soc, void *vdev, uint32_t val) +{ + if (soc->ops->wds_ops->vdev_set_wds) + return soc->ops->wds_ops->vdev_set_wds(vdev, val); + return 0; +} +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_htt.c b/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_htt.c new file mode 100644 index 0000000000000000000000000000000000000000..df5cd2d02048a279451cba5f9e7accc791aa62c5 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_htt.c @@ -0,0 +1,3277 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include +#include "dp_htt.h" +#include "dp_peer.h" +#include "dp_types.h" +#include "dp_internal.h" +#include "dp_rx_mon.h" +#include "htt_stats.h" +#include "htt_ppdu_stats.h" +#include "qdf_mem.h" /* qdf_mem_malloc,free */ +#include "cdp_txrx_cmn_struct.h" + +#define HTT_TLV_HDR_LEN HTT_T2H_EXT_STATS_CONF_TLV_HDR_SIZE + +#define HTT_HTC_PKT_POOL_INIT_SIZE 64 +#define HTT_T2H_MAX_MSG_SIZE 2048 + +#define HTT_MSG_BUF_SIZE(msg_bytes) \ + ((msg_bytes) + HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING) + +#define HTT_PID_BIT_MASK 0x3 + +#define DP_EXT_MSG_LENGTH 2048 +#define DP_HTT_SEND_HTC_PKT(soc, pkt) \ +do { \ + if (htc_send_pkt(soc->htc_soc, &pkt->htc_pkt) == \ + QDF_STATUS_SUCCESS) \ + htt_htc_misc_pkt_list_add(soc, pkt); \ +} while (0) + +#define HTT_MGMT_CTRL_TLV_RESERVERD_LEN 12 +/** + * Bitmap of HTT PPDU TLV types for Default mode + */ +#define HTT_PPDU_DEFAULT_TLV_BITMAP \ + (1 << HTT_PPDU_STATS_COMMON_TLV) | \ + (1 << HTT_PPDU_STATS_USR_COMMON_TLV) | \ + (1 << HTT_PPDU_STATS_USR_RATE_TLV) | \ + (1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV) | \ + (1 << HTT_PPDU_STATS_USR_COMPLTN_COMMON_TLV) | \ + (1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) + +/** + * Bitmap of HTT PPDU TLV types for Sniffer mode + */ +#define HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP \ + (1 << HTT_PPDU_STATS_COMMON_TLV) | \ + (1 << HTT_PPDU_STATS_USR_COMMON_TLV) | \ + (1 << HTT_PPDU_STATS_USR_RATE_TLV) | \ + (1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV) | \ + (1 << HTT_PPDU_STATS_USR_COMPLTN_COMMON_TLV) | \ + (1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) | \ + (1 << HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_64_TLV) | \ + (1 << HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_64_TLV) | \ + (1 << HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_256_TLV) + +#define HTT_FRAMECTRL_DATATYPE 0x08 +#define HTT_PPDU_DESC_MAX_DEPTH 16 + +/* + * dp_tx_stats_update() - Update per-peer statistics + * @soc: Datapath soc handle + * @peer: Datapath peer handle + * @ppdu: PPDU Descriptor + * @ack_rssi: RSSI of last ack received + * + * Return: None + */ +#ifdef FEATURE_PERPKT_INFO +static void dp_tx_stats_update(struct dp_soc *soc, struct dp_peer *peer, + struct cdp_tx_completion_ppdu_user *ppdu, uint32_t ack_rssi) +{ + struct dp_pdev *pdev = peer->vdev->pdev; + uint8_t preamble, mcs; + uint16_t num_msdu; + + preamble = ppdu->preamble; + mcs = ppdu->mcs; + num_msdu = ppdu->num_msdu; + + /* If the peer statistics are already processed as part of + * per-MSDU completion handler, do not process these again in per-PPDU + * indications */ + if (soc->process_tx_status) + return; + + DP_STATS_INC_PKT(peer, tx.comp_pkt, + num_msdu, (ppdu->success_bytes + + ppdu->retry_bytes + ppdu->failed_bytes)); + DP_STATS_INC(peer, tx.tx_failed, ppdu->failed_msdus); + DP_STATS_UPD(peer, tx.tx_rate, ppdu->tx_rate); + DP_STATS_INC(peer, tx.sgi_count[ppdu->gi], num_msdu); + DP_STATS_INC(peer, tx.bw[ppdu->bw], num_msdu); + DP_STATS_INC(peer, tx.nss[ppdu->nss], num_msdu); + DP_STATS_INC(peer, tx.wme_ac_type[TID_TO_WME_AC(ppdu->tid)], num_msdu); + DP_STATS_INCC(peer, tx.stbc, num_msdu, ppdu->stbc); + DP_STATS_INCC(peer, tx.ldpc, num_msdu, ppdu->ldpc); + if (!(ppdu->is_mcast)) + DP_STATS_UPD(peer, tx.last_ack_rssi, ack_rssi); + + DP_STATS_INC(peer, tx.retries, + (ppdu->long_retries + ppdu->short_retries)); + DP_STATS_INCC(peer, + tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu, + ((mcs >= MAX_MCS_11A) && (preamble == DOT11_A))); + DP_STATS_INCC(peer, + tx.pkt_type[preamble].mcs_count[mcs], num_msdu, + ((mcs < MAX_MCS_11A) && (preamble == DOT11_A))); + DP_STATS_INCC(peer, + tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu, + ((mcs >= MAX_MCS_11B) && (preamble == DOT11_B))); + DP_STATS_INCC(peer, + tx.pkt_type[preamble].mcs_count[mcs], num_msdu, + ((mcs < (MAX_MCS_11B)) && (preamble == DOT11_B))); + DP_STATS_INCC(peer, + tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu, + ((mcs >= MAX_MCS_11A) && (preamble == DOT11_N))); + DP_STATS_INCC(peer, + tx.pkt_type[preamble].mcs_count[mcs], num_msdu, + ((mcs < MAX_MCS_11A) && (preamble == DOT11_N))); + DP_STATS_INCC(peer, + tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu, + ((mcs >= MAX_MCS_11AC) && (preamble == DOT11_AC))); + DP_STATS_INCC(peer, + tx.pkt_type[preamble].mcs_count[mcs], num_msdu, + ((mcs < MAX_MCS_11AC) && (preamble == DOT11_AC))); + DP_STATS_INCC(peer, + tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu, + ((mcs >= (MAX_MCS - 1)) && (preamble == DOT11_AX))); + DP_STATS_INCC(peer, + tx.pkt_type[preamble].mcs_count[mcs], num_msdu, + ((mcs < (MAX_MCS - 1)) && (preamble == DOT11_AX))); + + if (soc->cdp_soc.ol_ops->update_dp_stats) { + soc->cdp_soc.ol_ops->update_dp_stats(pdev->osif_pdev, + &peer->stats, ppdu->peer_id, + UPDATE_PEER_STATS); + + } +} +#endif + +/* + * htt_htc_pkt_alloc() - Allocate HTC packet buffer + * @htt_soc: HTT SOC handle + * + * Return: Pointer to htc packet buffer + */ +static struct dp_htt_htc_pkt * +htt_htc_pkt_alloc(struct htt_soc *soc) +{ + struct dp_htt_htc_pkt_union *pkt = NULL; + + HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex); + if (soc->htt_htc_pkt_freelist) { + pkt = soc->htt_htc_pkt_freelist; + soc->htt_htc_pkt_freelist = soc->htt_htc_pkt_freelist->u.next; + } + HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex); + + if (pkt == NULL) + pkt = qdf_mem_malloc(sizeof(*pkt)); + return &pkt->u.pkt; /* not actually a dereference */ +} + +/* + * htt_htc_pkt_free() - Free HTC packet buffer + * @htt_soc: HTT SOC handle + */ +static void +htt_htc_pkt_free(struct htt_soc *soc, struct dp_htt_htc_pkt *pkt) +{ + struct dp_htt_htc_pkt_union *u_pkt = + (struct dp_htt_htc_pkt_union *)pkt; + + HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex); + u_pkt->u.next = soc->htt_htc_pkt_freelist; + soc->htt_htc_pkt_freelist = u_pkt; + HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex); +} + +/* + * htt_htc_pkt_pool_free() - Free HTC packet pool + * @htt_soc: HTT SOC handle + */ +static void +htt_htc_pkt_pool_free(struct htt_soc *soc) +{ + struct dp_htt_htc_pkt_union *pkt, *next; + pkt = soc->htt_htc_pkt_freelist; + while (pkt) { + next = pkt->u.next; + qdf_mem_free(pkt); + pkt = next; + } + soc->htt_htc_pkt_freelist = NULL; +} + +/* + * htt_htc_misc_pkt_list_trim() - trim misc list + * @htt_soc: HTT SOC handle + * @level: max no. of pkts in list + */ +static void +htt_htc_misc_pkt_list_trim(struct htt_soc *soc, int level) +{ + struct dp_htt_htc_pkt_union *pkt, *next, *prev = NULL; + int i = 0; + qdf_nbuf_t netbuf; + + HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex); + pkt = soc->htt_htc_pkt_misclist; + while (pkt) { + next = pkt->u.next; + /* trim the out grown list*/ + if (++i > level) { + netbuf = + (qdf_nbuf_t)(pkt->u.pkt.htc_pkt.pNetBufContext); + qdf_nbuf_unmap(soc->osdev, netbuf, QDF_DMA_TO_DEVICE); + qdf_nbuf_free(netbuf); + qdf_mem_free(pkt); + pkt = NULL; + if (prev) + prev->u.next = NULL; + } + prev = pkt; + pkt = next; + } + HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex); +} + +/* + * htt_htc_misc_pkt_list_add() - Add pkt to misc list + * @htt_soc: HTT SOC handle + * @dp_htt_htc_pkt: pkt to be added to list + */ +static void +htt_htc_misc_pkt_list_add(struct htt_soc *soc, struct dp_htt_htc_pkt *pkt) +{ + struct dp_htt_htc_pkt_union *u_pkt = + (struct dp_htt_htc_pkt_union *)pkt; + int misclist_trim_level = htc_get_tx_queue_depth(soc->htc_soc, + pkt->htc_pkt.Endpoint) + + DP_HTT_HTC_PKT_MISCLIST_SIZE; + + HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex); + if (soc->htt_htc_pkt_misclist) { + u_pkt->u.next = soc->htt_htc_pkt_misclist; + soc->htt_htc_pkt_misclist = u_pkt; + } else { + soc->htt_htc_pkt_misclist = u_pkt; + } + HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex); + + /* only ce pipe size + tx_queue_depth could possibly be in use + * free older packets in the misclist + */ + htt_htc_misc_pkt_list_trim(soc, misclist_trim_level); +} + +/* + * htt_htc_misc_pkt_pool_free() - free pkts in misc list + * @htt_soc: HTT SOC handle + */ +static void +htt_htc_misc_pkt_pool_free(struct htt_soc *soc) +{ + struct dp_htt_htc_pkt_union *pkt, *next; + qdf_nbuf_t netbuf; + + pkt = soc->htt_htc_pkt_misclist; + + while (pkt) { + next = pkt->u.next; + netbuf = (qdf_nbuf_t) (pkt->u.pkt.htc_pkt.pNetBufContext); + qdf_nbuf_unmap(soc->osdev, netbuf, QDF_DMA_TO_DEVICE); + + soc->stats.htc_pkt_free++; + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW, + "%s: Pkt free count %d\n", + __func__, soc->stats.htc_pkt_free); + + qdf_nbuf_free(netbuf); + qdf_mem_free(pkt); + pkt = next; + } + soc->htt_htc_pkt_misclist = NULL; +} + +/* + * htt_t2h_mac_addr_deswizzle() - Swap MAC addr bytes if FW endianness differ + * @tgt_mac_addr: Target MAC + * @buffer: Output buffer + */ +static u_int8_t * +htt_t2h_mac_addr_deswizzle(u_int8_t *tgt_mac_addr, u_int8_t *buffer) +{ +#ifdef BIG_ENDIAN_HOST + /* + * The host endianness is opposite of the target endianness. + * To make u_int32_t elements come out correctly, the target->host + * upload has swizzled the bytes in each u_int32_t element of the + * message. + * For byte-array message fields like the MAC address, this + * upload swizzling puts the bytes in the wrong order, and needs + * to be undone. + */ + buffer[0] = tgt_mac_addr[3]; + buffer[1] = tgt_mac_addr[2]; + buffer[2] = tgt_mac_addr[1]; + buffer[3] = tgt_mac_addr[0]; + buffer[4] = tgt_mac_addr[7]; + buffer[5] = tgt_mac_addr[6]; + return buffer; +#else + /* + * The host endianness matches the target endianness - + * we can use the mac addr directly from the message buffer. + */ + return tgt_mac_addr; +#endif +} + +/* + * dp_htt_h2t_send_complete_free_netbuf() - Free completed buffer + * @soc: SOC handle + * @status: Completion status + * @netbuf: HTT buffer + */ +static void +dp_htt_h2t_send_complete_free_netbuf( + void *soc, A_STATUS status, qdf_nbuf_t netbuf) +{ + qdf_nbuf_free(netbuf); +} + +/* + * dp_htt_h2t_send_complete() - H2T completion handler + * @context: Opaque context (HTT SOC handle) + * @htc_pkt: HTC packet + */ +static void +dp_htt_h2t_send_complete(void *context, HTC_PACKET *htc_pkt) +{ + void (*send_complete_part2)( + void *soc, A_STATUS status, qdf_nbuf_t msdu); + struct htt_soc *soc = (struct htt_soc *) context; + struct dp_htt_htc_pkt *htt_pkt; + qdf_nbuf_t netbuf; + + send_complete_part2 = htc_pkt->pPktContext; + + htt_pkt = container_of(htc_pkt, struct dp_htt_htc_pkt, htc_pkt); + + /* process (free or keep) the netbuf that held the message */ + netbuf = (qdf_nbuf_t) htc_pkt->pNetBufContext; + /* + * adf sendcomplete is required for windows only + */ + /* qdf_nbuf_set_sendcompleteflag(netbuf, TRUE); */ + if (send_complete_part2 != NULL) { + send_complete_part2( + htt_pkt->soc_ctxt, htc_pkt->Status, netbuf); + } + /* free the htt_htc_pkt / HTC_PACKET object */ + htt_htc_pkt_free(soc, htt_pkt); +} + +/* + * htt_h2t_ver_req_msg() - Send HTT version request message to target + * @htt_soc: HTT SOC handle + * + * Return: 0 on success; error code on failure + */ +static int htt_h2t_ver_req_msg(struct htt_soc *soc) +{ + struct dp_htt_htc_pkt *pkt; + qdf_nbuf_t msg; + uint32_t *msg_word; + + msg = qdf_nbuf_alloc( + soc->osdev, + HTT_MSG_BUF_SIZE(HTT_VER_REQ_BYTES), + /* reserve room for the HTC header */ + HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE); + if (!msg) + return QDF_STATUS_E_NOMEM; + + /* + * Set the length of the message. + * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added + * separately during the below call to qdf_nbuf_push_head. + * The contribution from the HTC header is added separately inside HTC. + */ + if (qdf_nbuf_put_tail(msg, HTT_VER_REQ_BYTES) == NULL) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "%s: Failed to expand head for HTT_H2T_MSG_TYPE_VERSION_REQ msg\n", + __func__); + return QDF_STATUS_E_FAILURE; + } + + /* fill in the message contents */ + msg_word = (u_int32_t *) qdf_nbuf_data(msg); + + /* rewind beyond alignment pad to get to the HTC header reserved area */ + qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING); + + *msg_word = 0; + HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_VERSION_REQ); + + pkt = htt_htc_pkt_alloc(soc); + if (!pkt) { + qdf_nbuf_free(msg); + return QDF_STATUS_E_FAILURE; + } + pkt->soc_ctxt = NULL; /* not used during send-done callback */ + + SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt, + dp_htt_h2t_send_complete_free_netbuf, qdf_nbuf_data(msg), + qdf_nbuf_len(msg), soc->htc_endpoint, + 1); /* tag - not relevant here */ + + SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg); + DP_HTT_SEND_HTC_PKT(soc, pkt); + return 0; +} + +/* + * htt_srng_setup() - Send SRNG setup message to target + * @htt_soc: HTT SOC handle + * @mac_id: MAC Id + * @hal_srng: Opaque HAL SRNG pointer + * @hal_ring_type: SRNG ring type + * + * Return: 0 on success; error code on failure + */ +int htt_srng_setup(void *htt_soc, int mac_id, void *hal_srng, + int hal_ring_type) +{ + struct htt_soc *soc = (struct htt_soc *)htt_soc; + struct dp_htt_htc_pkt *pkt; + qdf_nbuf_t htt_msg; + uint32_t *msg_word; + struct hal_srng_params srng_params; + qdf_dma_addr_t hp_addr, tp_addr; + uint32_t ring_entry_size = + hal_srng_get_entrysize(soc->hal_soc, hal_ring_type); + int htt_ring_type, htt_ring_id; + + /* Sizes should be set in 4-byte words */ + ring_entry_size = ring_entry_size >> 2; + + htt_msg = qdf_nbuf_alloc(soc->osdev, + HTT_MSG_BUF_SIZE(HTT_SRING_SETUP_SZ), + /* reserve room for the HTC header */ + HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE); + if (!htt_msg) + goto fail0; + + hal_get_srng_params(soc->hal_soc, hal_srng, &srng_params); + hp_addr = hal_srng_get_hp_addr(soc->hal_soc, hal_srng); + tp_addr = hal_srng_get_tp_addr(soc->hal_soc, hal_srng); + + switch (hal_ring_type) { + case RXDMA_BUF: +#ifdef QCA_HOST2FW_RXBUF_RING + if (srng_params.ring_id == + (HAL_SRNG_WMAC1_SW2RXDMA0_BUF0)) { + htt_ring_id = HTT_HOST1_TO_FW_RXBUF_RING; + htt_ring_type = HTT_SW_TO_SW_RING; +#ifdef IPA_OFFLOAD + } else if (srng_params.ring_id == + (HAL_SRNG_WMAC1_SW2RXDMA0_BUF2)) { + htt_ring_id = HTT_HOST2_TO_FW_RXBUF_RING; + htt_ring_type = HTT_SW_TO_SW_RING; +#endif +#else + if (srng_params.ring_id == + (HAL_SRNG_WMAC1_SW2RXDMA0_BUF0 + + (mac_id * HAL_MAX_RINGS_PER_LMAC))) { + htt_ring_id = HTT_RXDMA_HOST_BUF_RING; + htt_ring_type = HTT_SW_TO_HW_RING; +#endif + } else if (srng_params.ring_id == +#ifdef IPA_OFFLOAD + (HAL_SRNG_WMAC1_SW2RXDMA0_BUF1 + +#else + (HAL_SRNG_WMAC1_SW2RXDMA1_BUF + +#endif + (mac_id * HAL_MAX_RINGS_PER_LMAC))) { + htt_ring_id = HTT_RXDMA_HOST_BUF_RING; + htt_ring_type = HTT_SW_TO_HW_RING; + } else { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "%s: Ring %d currently not supported\n", + __func__, srng_params.ring_id); + goto fail1; + } + + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "%s: ring_type %d ring_id %d\n", + __func__, hal_ring_type, srng_params.ring_id); + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "%s: hp_addr 0x%llx tp_addr 0x%llx\n", + __func__, (uint64_t)hp_addr, (uint64_t)tp_addr); + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "%s: htt_ring_id %d\n", __func__, htt_ring_id); + break; + case RXDMA_MONITOR_BUF: + htt_ring_id = HTT_RXDMA_MONITOR_BUF_RING; + htt_ring_type = HTT_SW_TO_HW_RING; + break; + case RXDMA_MONITOR_STATUS: + htt_ring_id = HTT_RXDMA_MONITOR_STATUS_RING; + htt_ring_type = HTT_SW_TO_HW_RING; + break; + case RXDMA_MONITOR_DST: + htt_ring_id = HTT_RXDMA_MONITOR_DEST_RING; + htt_ring_type = HTT_HW_TO_SW_RING; + break; + case RXDMA_MONITOR_DESC: + htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING; + htt_ring_type = HTT_SW_TO_HW_RING; + break; + case RXDMA_DST: + htt_ring_id = HTT_RXDMA_NON_MONITOR_DEST_RING; + htt_ring_type = HTT_HW_TO_SW_RING; + break; + + default: + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "%s: Ring currently not supported\n", __func__); + goto fail1; + } + + /* + * Set the length of the message. + * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added + * separately during the below call to qdf_nbuf_push_head. + * The contribution from the HTC header is added separately inside HTC. + */ + if (qdf_nbuf_put_tail(htt_msg, HTT_SRING_SETUP_SZ) == NULL) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "%s: Failed to expand head for SRING_SETUP msg\n", + __func__); + return QDF_STATUS_E_FAILURE; + } + + msg_word = (uint32_t *)qdf_nbuf_data(htt_msg); + + /* rewind beyond alignment pad to get to the HTC header reserved area */ + qdf_nbuf_push_head(htt_msg, HTC_HDR_ALIGNMENT_PADDING); + + /* word 0 */ + *msg_word = 0; + HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_SRING_SETUP); + + if ((htt_ring_type == HTT_SW_TO_HW_RING) || + (htt_ring_type == HTT_HW_TO_SW_RING)) + HTT_SRING_SETUP_PDEV_ID_SET(*msg_word, + DP_SW2HW_MACID(mac_id)); + else + HTT_SRING_SETUP_PDEV_ID_SET(*msg_word, mac_id); + + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "%s: mac_id %d\n", __func__, mac_id); + HTT_SRING_SETUP_RING_TYPE_SET(*msg_word, htt_ring_type); + /* TODO: Discuss with FW on changing this to unique ID and using + * htt_ring_type to send the type of ring + */ + HTT_SRING_SETUP_RING_ID_SET(*msg_word, htt_ring_id); + + /* word 1 */ + msg_word++; + *msg_word = 0; + HTT_SRING_SETUP_RING_BASE_ADDR_LO_SET(*msg_word, + srng_params.ring_base_paddr & 0xffffffff); + + /* word 2 */ + msg_word++; + *msg_word = 0; + HTT_SRING_SETUP_RING_BASE_ADDR_HI_SET(*msg_word, + (uint64_t)srng_params.ring_base_paddr >> 32); + + /* word 3 */ + msg_word++; + *msg_word = 0; + HTT_SRING_SETUP_ENTRY_SIZE_SET(*msg_word, ring_entry_size); + HTT_SRING_SETUP_RING_SIZE_SET(*msg_word, + (ring_entry_size * srng_params.num_entries)); + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "%s: entry_size %d\n", __func__, + ring_entry_size); + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "%s: num_entries %d\n", __func__, + srng_params.num_entries); + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "%s: ring_size %d\n", __func__, + (ring_entry_size * srng_params.num_entries)); + if (htt_ring_type == HTT_SW_TO_HW_RING) + HTT_SRING_SETUP_RING_MISC_CFG_FLAG_LOOPCOUNT_DISABLE_SET( + *msg_word, 1); + HTT_SRING_SETUP_RING_MISC_CFG_FLAG_MSI_SWAP_SET(*msg_word, + !!(srng_params.flags & HAL_SRNG_MSI_SWAP)); + HTT_SRING_SETUP_RING_MISC_CFG_FLAG_TLV_SWAP_SET(*msg_word, + !!(srng_params.flags & HAL_SRNG_DATA_TLV_SWAP)); + HTT_SRING_SETUP_RING_MISC_CFG_FLAG_HOST_FW_SWAP_SET(*msg_word, + !!(srng_params.flags & HAL_SRNG_RING_PTR_SWAP)); + + /* word 4 */ + msg_word++; + *msg_word = 0; + HTT_SRING_SETUP_HEAD_OFFSET32_REMOTE_BASE_ADDR_LO_SET(*msg_word, + hp_addr & 0xffffffff); + + /* word 5 */ + msg_word++; + *msg_word = 0; + HTT_SRING_SETUP_HEAD_OFFSET32_REMOTE_BASE_ADDR_HI_SET(*msg_word, + (uint64_t)hp_addr >> 32); + + /* word 6 */ + msg_word++; + *msg_word = 0; + HTT_SRING_SETUP_TAIL_OFFSET32_REMOTE_BASE_ADDR_LO_SET(*msg_word, + tp_addr & 0xffffffff); + + /* word 7 */ + msg_word++; + *msg_word = 0; + HTT_SRING_SETUP_TAIL_OFFSET32_REMOTE_BASE_ADDR_HI_SET(*msg_word, + (uint64_t)tp_addr >> 32); + + /* word 8 */ + msg_word++; + *msg_word = 0; + HTT_SRING_SETUP_RING_MSI_ADDR_LO_SET(*msg_word, + srng_params.msi_addr & 0xffffffff); + + /* word 9 */ + msg_word++; + *msg_word = 0; + HTT_SRING_SETUP_RING_MSI_ADDR_HI_SET(*msg_word, + (uint64_t)(srng_params.msi_addr) >> 32); + + /* word 10 */ + msg_word++; + *msg_word = 0; + HTT_SRING_SETUP_RING_MSI_DATA_SET(*msg_word, + srng_params.msi_data); + + /* word 11 */ + msg_word++; + *msg_word = 0; + HTT_SRING_SETUP_INTR_BATCH_COUNTER_TH_SET(*msg_word, + srng_params.intr_batch_cntr_thres_entries * + ring_entry_size); + HTT_SRING_SETUP_INTR_TIMER_TH_SET(*msg_word, + srng_params.intr_timer_thres_us >> 3); + + /* word 12 */ + msg_word++; + *msg_word = 0; + if (srng_params.flags & HAL_SRNG_LOW_THRES_INTR_ENABLE) { + /* TODO: Setting low threshold to 1/8th of ring size - see + * if this needs to be configurable + */ + HTT_SRING_SETUP_INTR_LOW_TH_SET(*msg_word, + srng_params.low_threshold); + } + /* "response_required" field should be set if a HTT response message is + * required after setting up the ring. + */ + pkt = htt_htc_pkt_alloc(soc); + if (!pkt) + goto fail1; + + pkt->soc_ctxt = NULL; /* not used during send-done callback */ + + SET_HTC_PACKET_INFO_TX( + &pkt->htc_pkt, + dp_htt_h2t_send_complete_free_netbuf, + qdf_nbuf_data(htt_msg), + qdf_nbuf_len(htt_msg), + soc->htc_endpoint, + HTC_TX_PACKET_TAG_RUNTIME_PUT); /* tag for no FW response msg */ + + SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, htt_msg); + DP_HTT_SEND_HTC_PKT(soc, pkt); + + return QDF_STATUS_SUCCESS; + +fail1: + qdf_nbuf_free(htt_msg); +fail0: + return QDF_STATUS_E_FAILURE; +} + +/* + * htt_h2t_rx_ring_cfg() - Send SRNG packet and TLV filter + * config message to target + * @htt_soc: HTT SOC handle + * @pdev_id: PDEV Id + * @hal_srng: Opaque HAL SRNG pointer + * @hal_ring_type: SRNG ring type + * @ring_buf_size: SRNG buffer size + * @htt_tlv_filter: Rx SRNG TLV and filter setting + * Return: 0 on success; error code on failure + */ +int htt_h2t_rx_ring_cfg(void *htt_soc, int pdev_id, void *hal_srng, + int hal_ring_type, int ring_buf_size, + struct htt_rx_ring_tlv_filter *htt_tlv_filter) +{ + struct htt_soc *soc = (struct htt_soc *)htt_soc; + struct dp_htt_htc_pkt *pkt; + qdf_nbuf_t htt_msg; + uint32_t *msg_word; + struct hal_srng_params srng_params; + uint32_t htt_ring_type, htt_ring_id; + uint32_t tlv_filter; + + htt_msg = qdf_nbuf_alloc(soc->osdev, + HTT_MSG_BUF_SIZE(HTT_RX_RING_SELECTION_CFG_SZ), + /* reserve room for the HTC header */ + HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE); + if (!htt_msg) + goto fail0; + + hal_get_srng_params(soc->hal_soc, hal_srng, &srng_params); + + switch (hal_ring_type) { + case RXDMA_BUF: +#if QCA_HOST2FW_RXBUF_RING + htt_ring_id = HTT_HOST1_TO_FW_RXBUF_RING; + htt_ring_type = HTT_SW_TO_SW_RING; +#else + htt_ring_id = HTT_RXDMA_HOST_BUF_RING; + htt_ring_type = HTT_SW_TO_HW_RING; +#endif + break; + case RXDMA_MONITOR_BUF: + htt_ring_id = HTT_RXDMA_MONITOR_BUF_RING; + htt_ring_type = HTT_SW_TO_HW_RING; + break; + case RXDMA_MONITOR_STATUS: + htt_ring_id = HTT_RXDMA_MONITOR_STATUS_RING; + htt_ring_type = HTT_SW_TO_HW_RING; + break; + case RXDMA_MONITOR_DST: + htt_ring_id = HTT_RXDMA_MONITOR_DEST_RING; + htt_ring_type = HTT_HW_TO_SW_RING; + break; + case RXDMA_MONITOR_DESC: + htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING; + htt_ring_type = HTT_SW_TO_HW_RING; + break; + case RXDMA_DST: + htt_ring_id = HTT_RXDMA_NON_MONITOR_DEST_RING; + htt_ring_type = HTT_HW_TO_SW_RING; + break; + + default: + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "%s: Ring currently not supported\n", __func__); + goto fail1; + } + + /* + * Set the length of the message. + * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added + * separately during the below call to qdf_nbuf_push_head. + * The contribution from the HTC header is added separately inside HTC. + */ + if (qdf_nbuf_put_tail(htt_msg, HTT_RX_RING_SELECTION_CFG_SZ) == NULL) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "%s: Failed to expand head for RX Ring Cfg msg\n", + __func__); + goto fail1; /* failure */ + } + + msg_word = (uint32_t *)qdf_nbuf_data(htt_msg); + + /* rewind beyond alignment pad to get to the HTC header reserved area */ + qdf_nbuf_push_head(htt_msg, HTC_HDR_ALIGNMENT_PADDING); + + /* word 0 */ + *msg_word = 0; + HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG); + + /* + * pdev_id is indexed from 0 whereas mac_id is indexed from 1 + * SW_TO_SW and SW_TO_HW rings are unaffected by this + */ + if (htt_ring_type == HTT_SW_TO_SW_RING || + htt_ring_type == HTT_SW_TO_HW_RING) + HTT_RX_RING_SELECTION_CFG_PDEV_ID_SET(*msg_word, + DP_SW2HW_MACID(pdev_id)); + + /* TODO: Discuss with FW on changing this to unique ID and using + * htt_ring_type to send the type of ring + */ + HTT_RX_RING_SELECTION_CFG_RING_ID_SET(*msg_word, htt_ring_id); + + HTT_RX_RING_SELECTION_CFG_STATUS_TLV_SET(*msg_word, + !!(srng_params.flags & HAL_SRNG_MSI_SWAP)); + + HTT_RX_RING_SELECTION_CFG_PKT_TLV_SET(*msg_word, + !!(srng_params.flags & HAL_SRNG_DATA_TLV_SWAP)); + + /* word 1 */ + msg_word++; + *msg_word = 0; + HTT_RX_RING_SELECTION_CFG_RING_BUFFER_SIZE_SET(*msg_word, + ring_buf_size); + + /* word 2 */ + msg_word++; + *msg_word = 0; + + if (htt_tlv_filter->enable_fp) { + /* TYPE: MGMT */ + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, + FP, MGMT, 0000, + (htt_tlv_filter->fp_mgmt_filter & + FILTER_MGMT_ASSOC_REQ) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, + FP, MGMT, 0001, + (htt_tlv_filter->fp_mgmt_filter & + FILTER_MGMT_ASSOC_RES) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, + FP, MGMT, 0010, + (htt_tlv_filter->fp_mgmt_filter & + FILTER_MGMT_REASSOC_REQ) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, + FP, MGMT, 0011, + (htt_tlv_filter->fp_mgmt_filter & + FILTER_MGMT_REASSOC_RES) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, + FP, MGMT, 0100, + (htt_tlv_filter->fp_mgmt_filter & + FILTER_MGMT_PROBE_REQ) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, + FP, MGMT, 0101, + (htt_tlv_filter->fp_mgmt_filter & + FILTER_MGMT_PROBE_RES) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, + FP, MGMT, 0110, + (htt_tlv_filter->fp_mgmt_filter & + FILTER_MGMT_TIM_ADVT) ? 1 : 0); + /* reserved */ + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, FP, + MGMT, 0111, + (htt_tlv_filter->fp_mgmt_filter & + FILTER_MGMT_RESERVED_7) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, + FP, MGMT, 1000, + (htt_tlv_filter->fp_mgmt_filter & + FILTER_MGMT_BEACON) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, + FP, MGMT, 1001, + (htt_tlv_filter->fp_mgmt_filter & + FILTER_MGMT_ATIM) ? 1 : 0); + } + + if (htt_tlv_filter->enable_md) { + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD, + MGMT, 0000, 1); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD, + MGMT, 0001, 1); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD, + MGMT, 0010, 1); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD, + MGMT, 0011, 1); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD, + MGMT, 0100, 1); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD, + MGMT, 0101, 1); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD, + MGMT, 0110, 1); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD, + MGMT, 0111, 1); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD, + MGMT, 1000, 1); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD, + MGMT, 1001, 1); + } + + if (htt_tlv_filter->enable_mo) { + /* TYPE: MGMT */ + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, + MO, MGMT, 0000, + (htt_tlv_filter->mo_mgmt_filter & + FILTER_MGMT_ASSOC_REQ) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, + MO, MGMT, 0001, + (htt_tlv_filter->mo_mgmt_filter & + FILTER_MGMT_ASSOC_RES) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, + MO, MGMT, 0010, + (htt_tlv_filter->mo_mgmt_filter & + FILTER_MGMT_REASSOC_REQ) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, + MO, MGMT, 0011, + (htt_tlv_filter->mo_mgmt_filter & + FILTER_MGMT_REASSOC_RES) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, + MO, MGMT, 0100, + (htt_tlv_filter->mo_mgmt_filter & + FILTER_MGMT_PROBE_REQ) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, + MO, MGMT, 0101, + (htt_tlv_filter->mo_mgmt_filter & + FILTER_MGMT_PROBE_RES) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, + MO, MGMT, 0110, + (htt_tlv_filter->mo_mgmt_filter & + FILTER_MGMT_TIM_ADVT) ? 1 : 0); + /* reserved */ + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MO, + MGMT, 0111, + (htt_tlv_filter->mo_mgmt_filter & + FILTER_MGMT_RESERVED_7) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, + MO, MGMT, 1000, + (htt_tlv_filter->mo_mgmt_filter & + FILTER_MGMT_BEACON) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, + MO, MGMT, 1001, + (htt_tlv_filter->mo_mgmt_filter & + FILTER_MGMT_ATIM) ? 1 : 0); + } + + /* word 3 */ + msg_word++; + *msg_word = 0; + + if (htt_tlv_filter->enable_fp) { + /* TYPE: MGMT */ + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, + FP, MGMT, 1010, + (htt_tlv_filter->fp_mgmt_filter & + FILTER_MGMT_DISASSOC) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, + FP, MGMT, 1011, + (htt_tlv_filter->fp_mgmt_filter & + FILTER_MGMT_AUTH) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, + FP, MGMT, 1100, + (htt_tlv_filter->fp_mgmt_filter & + FILTER_MGMT_DEAUTH) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, + FP, MGMT, 1101, + (htt_tlv_filter->fp_mgmt_filter & + FILTER_MGMT_ACTION) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, + FP, MGMT, 1110, + (htt_tlv_filter->fp_mgmt_filter & + FILTER_MGMT_ACT_NO_ACK) ? 1 : 0); + /* reserved*/ + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, FP, + MGMT, 1111, + (htt_tlv_filter->fp_mgmt_filter & + FILTER_MGMT_RESERVED_15) ? 1 : 0); + } + + if (htt_tlv_filter->enable_md) { + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, MD, + MGMT, 1010, 1); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, MD, + MGMT, 1011, 1); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, MD, + MGMT, 1100, 1); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, MD, + MGMT, 1101, 1); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, MD, + MGMT, 1110, 1); + } + + if (htt_tlv_filter->enable_mo) { + /* TYPE: MGMT */ + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, + MO, MGMT, 1010, + (htt_tlv_filter->mo_mgmt_filter & + FILTER_MGMT_DISASSOC) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, + MO, MGMT, 1011, + (htt_tlv_filter->mo_mgmt_filter & + FILTER_MGMT_AUTH) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, + MO, MGMT, 1100, + (htt_tlv_filter->mo_mgmt_filter & + FILTER_MGMT_DEAUTH) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, + MO, MGMT, 1101, + (htt_tlv_filter->mo_mgmt_filter & + FILTER_MGMT_ACTION) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, + MO, MGMT, 1110, + (htt_tlv_filter->mo_mgmt_filter & + FILTER_MGMT_ACT_NO_ACK) ? 1 : 0); + /* reserved*/ + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, MO, + MGMT, 1111, + (htt_tlv_filter->mo_mgmt_filter & + FILTER_MGMT_RESERVED_15) ? 1 : 0); + } + + /* word 4 */ + msg_word++; + *msg_word = 0; + + if (htt_tlv_filter->enable_fp) { + /* TYPE: CTRL */ + /* reserved */ + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP, + CTRL, 0000, + (htt_tlv_filter->fp_ctrl_filter & + FILTER_CTRL_RESERVED_1) ? 1 : 0); + /* reserved */ + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP, + CTRL, 0001, + (htt_tlv_filter->fp_ctrl_filter & + FILTER_CTRL_RESERVED_2) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP, + CTRL, 0010, + (htt_tlv_filter->fp_ctrl_filter & + FILTER_CTRL_TRIGGER) ? 1 : 0); + /* reserved */ + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP, + CTRL, 0011, + (htt_tlv_filter->fp_ctrl_filter & + FILTER_CTRL_RESERVED_4) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP, + CTRL, 0100, + (htt_tlv_filter->fp_ctrl_filter & + FILTER_CTRL_BF_REP_POLL) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP, + CTRL, 0101, + (htt_tlv_filter->fp_ctrl_filter & + FILTER_CTRL_VHT_NDP) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP, + CTRL, 0110, + (htt_tlv_filter->fp_ctrl_filter & + FILTER_CTRL_FRAME_EXT) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP, + CTRL, 0111, + (htt_tlv_filter->fp_ctrl_filter & + FILTER_CTRL_CTRLWRAP) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP, + CTRL, 1000, + (htt_tlv_filter->fp_ctrl_filter & + FILTER_CTRL_BA_REQ) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP, + CTRL, 1001, + (htt_tlv_filter->fp_ctrl_filter & + FILTER_CTRL_BA) ? 1 : 0); + } + + if (htt_tlv_filter->enable_md) { + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO, + CTRL, 0000, 1); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO, + CTRL, 0001, 1); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO, + CTRL, 0010, 1); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO, + CTRL, 0011, 1); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO, + CTRL, 0100, 1); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO, + CTRL, 0101, 1); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO, + CTRL, 0110, 1); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD, + CTRL, 0111, 1); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD, + CTRL, 1000, 1); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD, + CTRL, 1001, 1); + } + + if (htt_tlv_filter->enable_mo) { + /* TYPE: CTRL */ + /* reserved */ + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO, + CTRL, 0000, + (htt_tlv_filter->mo_ctrl_filter & + FILTER_CTRL_RESERVED_1) ? 1 : 0); + /* reserved */ + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO, + CTRL, 0001, + (htt_tlv_filter->mo_ctrl_filter & + FILTER_CTRL_RESERVED_2) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO, + CTRL, 0010, + (htt_tlv_filter->mo_ctrl_filter & + FILTER_CTRL_TRIGGER) ? 1 : 0); + /* reserved */ + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO, + CTRL, 0011, + (htt_tlv_filter->mo_ctrl_filter & + FILTER_CTRL_RESERVED_4) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO, + CTRL, 0100, + (htt_tlv_filter->mo_ctrl_filter & + FILTER_CTRL_BF_REP_POLL) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO, + CTRL, 0101, + (htt_tlv_filter->mo_ctrl_filter & + FILTER_CTRL_VHT_NDP) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO, + CTRL, 0110, + (htt_tlv_filter->mo_ctrl_filter & + FILTER_CTRL_FRAME_EXT) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO, + CTRL, 0111, + (htt_tlv_filter->mo_ctrl_filter & + FILTER_CTRL_CTRLWRAP) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO, + CTRL, 1000, + (htt_tlv_filter->mo_ctrl_filter & + FILTER_CTRL_BA_REQ) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO, + CTRL, 1001, + (htt_tlv_filter->mo_ctrl_filter & + FILTER_CTRL_BA) ? 1 : 0); + } + + /* word 5 */ + msg_word++; + *msg_word = 0; + if (htt_tlv_filter->enable_fp) { + /* TYPE: CTRL */ + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP, + CTRL, 1010, + (htt_tlv_filter->fp_ctrl_filter & + FILTER_CTRL_PSPOLL) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP, + CTRL, 1011, + (htt_tlv_filter->fp_ctrl_filter & + FILTER_CTRL_RTS) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP, + CTRL, 1100, + (htt_tlv_filter->fp_ctrl_filter & + FILTER_CTRL_CTS) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP, + CTRL, 1101, + (htt_tlv_filter->fp_ctrl_filter & + FILTER_CTRL_ACK) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP, + CTRL, 1110, + (htt_tlv_filter->fp_ctrl_filter & + FILTER_CTRL_CFEND) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP, + CTRL, 1111, + (htt_tlv_filter->fp_ctrl_filter & + FILTER_CTRL_CFEND_CFACK) ? 1 : 0); + /* TYPE: DATA */ + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP, + DATA, MCAST, + (htt_tlv_filter->fp_data_filter & + FILTER_DATA_MCAST) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP, + DATA, UCAST, + (htt_tlv_filter->fp_data_filter & + FILTER_DATA_UCAST) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP, + DATA, NULL, + (htt_tlv_filter->fp_data_filter & + FILTER_DATA_NULL) ? 1 : 0); + } + + if (htt_tlv_filter->enable_md) { + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD, + CTRL, 1010, 1); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD, + CTRL, 1011, 1); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD, + CTRL, 1100, 1); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD, + CTRL, 1101, 1); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD, + CTRL, 1110, 1); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD, + CTRL, 1111, 1); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD, + DATA, MCAST, 1); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD, + DATA, UCAST, 1); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD, + DATA, NULL, 1); + } + + if (htt_tlv_filter->enable_mo) { + /* TYPE: CTRL */ + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO, + CTRL, 1010, + (htt_tlv_filter->mo_ctrl_filter & + FILTER_CTRL_PSPOLL) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO, + CTRL, 1011, + (htt_tlv_filter->mo_ctrl_filter & + FILTER_CTRL_RTS) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO, + CTRL, 1100, + (htt_tlv_filter->mo_ctrl_filter & + FILTER_CTRL_CTS) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO, + CTRL, 1101, + (htt_tlv_filter->mo_ctrl_filter & + FILTER_CTRL_ACK) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO, + CTRL, 1110, + (htt_tlv_filter->mo_ctrl_filter & + FILTER_CTRL_CFEND) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO, + CTRL, 1111, + (htt_tlv_filter->mo_ctrl_filter & + FILTER_CTRL_CFEND_CFACK) ? 1 : 0); + /* TYPE: DATA */ + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO, + DATA, MCAST, + (htt_tlv_filter->mo_data_filter & + FILTER_DATA_MCAST) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO, + DATA, UCAST, + (htt_tlv_filter->mo_data_filter & + FILTER_DATA_UCAST) ? 1 : 0); + htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO, + DATA, NULL, + (htt_tlv_filter->mo_data_filter & + FILTER_DATA_NULL) ? 1 : 0); + } + + /* word 6 */ + msg_word++; + *msg_word = 0; + tlv_filter = 0; + htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MPDU_START, + htt_tlv_filter->mpdu_start); + htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MSDU_START, + htt_tlv_filter->msdu_start); + htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PACKET, + htt_tlv_filter->packet); + htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MSDU_END, + htt_tlv_filter->msdu_end); + htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MPDU_END, + htt_tlv_filter->mpdu_end); + htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PACKET_HEADER, + htt_tlv_filter->packet_header); + htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, ATTENTION, + htt_tlv_filter->attention); + htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_START, + htt_tlv_filter->ppdu_start); + htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END, + htt_tlv_filter->ppdu_end); + htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END_USER_STATS, + htt_tlv_filter->ppdu_end_user_stats); + htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, + PPDU_END_USER_STATS_EXT, + htt_tlv_filter->ppdu_end_user_stats_ext); + htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END_STATUS_DONE, + htt_tlv_filter->ppdu_end_status_done); + /* RESERVED bit maps to header_per_msdu in htt_tlv_filter*/ + htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, RESERVED, + htt_tlv_filter->header_per_msdu); + + HTT_RX_RING_SELECTION_CFG_TLV_FILTER_IN_FLAG_SET(*msg_word, tlv_filter); + + /* "response_required" field should be set if a HTT response message is + * required after setting up the ring. + */ + pkt = htt_htc_pkt_alloc(soc); + if (!pkt) + goto fail1; + + pkt->soc_ctxt = NULL; /* not used during send-done callback */ + + SET_HTC_PACKET_INFO_TX( + &pkt->htc_pkt, + dp_htt_h2t_send_complete_free_netbuf, + qdf_nbuf_data(htt_msg), + qdf_nbuf_len(htt_msg), + soc->htc_endpoint, + 1); /* tag - not relevant here */ + + SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, htt_msg); + DP_HTT_SEND_HTC_PKT(soc, pkt); + return QDF_STATUS_SUCCESS; + +fail1: + qdf_nbuf_free(htt_msg); +fail0: + return QDF_STATUS_E_FAILURE; +} + +#if defined(CONFIG_WIN) && WDI_EVENT_ENABLE +static inline QDF_STATUS dp_send_htt_stat_resp(struct htt_stats_context *htt_stats, + struct dp_soc *soc, qdf_nbuf_t htt_msg) + +{ + uint32_t pdev_id; + uint32_t *msg_word = NULL; + uint32_t msg_remain_len = 0; + + msg_word = (uint32_t *) qdf_nbuf_data(htt_msg); + + /*COOKIE MSB*/ + pdev_id = *(msg_word + 2) & HTT_PID_BIT_MASK; + + /* stats message length + 16 size of HTT header*/ + msg_remain_len = qdf_min(htt_stats->msg_len + 16, + (uint32_t)DP_EXT_MSG_LENGTH); + + dp_wdi_event_handler(WDI_EVENT_HTT_STATS, soc, + msg_word, msg_remain_len, + WDI_NO_VAL, pdev_id); + + if (htt_stats->msg_len >= DP_EXT_MSG_LENGTH) { + htt_stats->msg_len -= DP_EXT_MSG_LENGTH; + } + /* Need to be freed here as WDI handler will + * make a copy of pkt to send data to application + */ + qdf_nbuf_free(htt_msg); + return QDF_STATUS_SUCCESS; +} +#else +static inline QDF_STATUS dp_send_htt_stat_resp(struct htt_stats_context *htt_stats, + struct dp_soc *soc, qdf_nbuf_t htt_msg) +{ + return QDF_STATUS_E_NOSUPPORT; +} +#endif + +/** + * dp_process_htt_stat_msg(): Process the list of buffers of HTT EXT stats + * @htt_stats: htt stats info + * + * The FW sends the HTT EXT STATS as a stream of T2H messages. Each T2H message + * contains sub messages which are identified by a TLV header. + * In this function we will process the stream of T2H messages and read all the + * TLV contained in the message. + * + * THe following cases have been taken care of + * Case 1: When the tlv_remain_length <= msg_remain_length of HTT MSG buffer + * In this case the buffer will contain multiple tlvs. + * Case 2: When the tlv_remain_length > msg_remain_length of HTT MSG buffer. + * Only one tlv will be contained in the HTT message and this tag + * will extend onto the next buffer. + * Case 3: When the buffer is the continuation of the previous message + * Case 4: tlv length is 0. which will indicate the end of message + * + * return: void + */ +static inline void dp_process_htt_stat_msg(struct htt_stats_context *htt_stats, + struct dp_soc *soc) +{ + htt_tlv_tag_t tlv_type = 0xff; + qdf_nbuf_t htt_msg = NULL; + uint32_t *msg_word; + uint8_t *tlv_buf_head = NULL; + uint8_t *tlv_buf_tail = NULL; + uint32_t msg_remain_len = 0; + uint32_t tlv_remain_len = 0; + uint32_t *tlv_start; + int cookie_val; + int cookie_msb; + int pdev_id; + bool copy_stats = false; + struct dp_pdev *pdev; + + /* Process node in the HTT message queue */ + while ((htt_msg = qdf_nbuf_queue_remove(&htt_stats->msg)) + != NULL) { + msg_word = (uint32_t *) qdf_nbuf_data(htt_msg); + cookie_val = *(msg_word + 1); + htt_stats->msg_len = HTT_T2H_EXT_STATS_CONF_TLV_LENGTH_GET( + *(msg_word + + HTT_T2H_EXT_STATS_TLV_START_OFFSET)); + + if (cookie_val) { + if (dp_send_htt_stat_resp(htt_stats, soc, htt_msg) + == QDF_STATUS_SUCCESS) { + continue; + } + } + + cookie_msb = *(msg_word + 2); + pdev_id = *(msg_word + 2) & HTT_PID_BIT_MASK; + pdev = soc->pdev_list[pdev_id]; + + if (cookie_msb >> 2) { + copy_stats = true; + } + + /* read 5th word */ + msg_word = msg_word + 4; + msg_remain_len = qdf_min(htt_stats->msg_len, + (uint32_t) DP_EXT_MSG_LENGTH); + /* Keep processing the node till node length is 0 */ + while (msg_remain_len) { + /* + * if message is not a continuation of previous message + * read the tlv type and tlv length + */ + if (!tlv_buf_head) { + tlv_type = HTT_STATS_TLV_TAG_GET( + *msg_word); + tlv_remain_len = HTT_STATS_TLV_LENGTH_GET( + *msg_word); + } + + if (tlv_remain_len == 0) { + msg_remain_len = 0; + + if (tlv_buf_head) { + qdf_mem_free(tlv_buf_head); + tlv_buf_head = NULL; + tlv_buf_tail = NULL; + } + + goto error; + } + + if (!tlv_buf_head) + tlv_remain_len += HTT_TLV_HDR_LEN; + + if ((tlv_remain_len <= msg_remain_len)) { + /* Case 3 */ + if (tlv_buf_head) { + qdf_mem_copy(tlv_buf_tail, + (uint8_t *)msg_word, + tlv_remain_len); + tlv_start = (uint32_t *)tlv_buf_head; + } else { + /* Case 1 */ + tlv_start = msg_word; + } + + if (copy_stats) + dp_htt_stats_copy_tag(pdev, tlv_type, tlv_start); + else + dp_htt_stats_print_tag(tlv_type, tlv_start); + + msg_remain_len -= tlv_remain_len; + + msg_word = (uint32_t *) + (((uint8_t *)msg_word) + + tlv_remain_len); + + tlv_remain_len = 0; + + if (tlv_buf_head) { + qdf_mem_free(tlv_buf_head); + tlv_buf_head = NULL; + tlv_buf_tail = NULL; + } + + } else { /* tlv_remain_len > msg_remain_len */ + /* Case 2 & 3 */ + if (!tlv_buf_head) { + tlv_buf_head = qdf_mem_malloc( + tlv_remain_len); + + if (!tlv_buf_head) { + QDF_TRACE(QDF_MODULE_ID_TXRX, + QDF_TRACE_LEVEL_ERROR, + "Alloc failed"); + goto error; + } + + tlv_buf_tail = tlv_buf_head; + } + + qdf_mem_copy(tlv_buf_tail, (uint8_t *)msg_word, + msg_remain_len); + tlv_remain_len -= msg_remain_len; + tlv_buf_tail += msg_remain_len; + } + } + + if (htt_stats->msg_len >= DP_EXT_MSG_LENGTH) { + htt_stats->msg_len -= DP_EXT_MSG_LENGTH; + } + + qdf_nbuf_free(htt_msg); + } + return; + +error: + qdf_nbuf_free(htt_msg); + while ((htt_msg = qdf_nbuf_queue_remove(&htt_stats->msg)) + != NULL) + qdf_nbuf_free(htt_msg); +} + +void htt_t2h_stats_handler(void *context) +{ + struct dp_soc *soc = (struct dp_soc *)context; + struct htt_stats_context htt_stats; + uint32_t *msg_word; + qdf_nbuf_t htt_msg = NULL; + uint8_t done; + uint8_t rem_stats; + + if (!soc || !qdf_atomic_read(&soc->cmn_init_done)) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "soc: 0x%pK, init_done: %d", soc, + qdf_atomic_read(&soc->cmn_init_done)); + return; + } + + qdf_mem_zero(&htt_stats, sizeof(htt_stats)); + qdf_nbuf_queue_init(&htt_stats.msg); + + /* pull one completed stats from soc->htt_stats_msg and process */ + qdf_spin_lock_bh(&soc->htt_stats.lock); + if (!soc->htt_stats.num_stats) { + qdf_spin_unlock_bh(&soc->htt_stats.lock); + return; + } + while ((htt_msg = qdf_nbuf_queue_remove(&soc->htt_stats.msg)) != NULL) { + msg_word = (uint32_t *) qdf_nbuf_data(htt_msg); + msg_word = msg_word + HTT_T2H_EXT_STATS_TLV_START_OFFSET; + done = HTT_T2H_EXT_STATS_CONF_TLV_DONE_GET(*msg_word); + qdf_nbuf_queue_add(&htt_stats.msg, htt_msg); + /* + * Done bit signifies that this is the last T2H buffer in the + * stream of HTT EXT STATS message + */ + if (done) + break; + } + rem_stats = --soc->htt_stats.num_stats; + qdf_spin_unlock_bh(&soc->htt_stats.lock); + + dp_process_htt_stat_msg(&htt_stats, soc); + /* If there are more stats to process, schedule stats work again */ + if (rem_stats) + qdf_sched_work(0, &soc->htt_stats.work); +} + +/* + * dp_get_ppdu_info_user_index: Find and allocate a per-user descriptor for a PPDU, + * if a new peer id arrives in a PPDU + * pdev: DP pdev handle + * @peer_id : peer unique identifier + * @ppdu_info: per ppdu tlv structure + * + * return:user index to be populated + */ +#ifdef FEATURE_PERPKT_INFO +static uint8_t dp_get_ppdu_info_user_index(struct dp_pdev *pdev, + uint16_t peer_id, + struct ppdu_info *ppdu_info) +{ + uint8_t user_index = 0; + struct cdp_tx_completion_ppdu *ppdu_desc; + struct cdp_tx_completion_ppdu_user *ppdu_user_desc; + + ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf); + + while ((user_index + 1) <= ppdu_info->last_user) { + ppdu_user_desc = &ppdu_desc->user[user_index]; + if (ppdu_user_desc->peer_id != peer_id) { + user_index++; + continue; + } else { + /* Max users possible is 8 so user array index should + * not exceed 7 + */ + qdf_assert_always(user_index <= CDP_MU_MAX_USER_INDEX); + return user_index; + } + } + + ppdu_info->last_user++; + /* Max users possible is 8 so last user should not exceed 8 */ + qdf_assert_always(ppdu_info->last_user <= CDP_MU_MAX_USERS); + return ppdu_info->last_user - 1; +} + +/* + * dp_process_ppdu_stats_common_tlv: Process htt_ppdu_stats_common_tlv + * pdev: DP pdev handle + * @tag_buf: buffer containing the tlv htt_ppdu_stats_common_tlv + * @ppdu_info: per ppdu tlv structure + * + * return:void + */ +static void dp_process_ppdu_stats_common_tlv(struct dp_pdev *pdev, + uint32_t *tag_buf, struct ppdu_info *ppdu_info) +{ + uint16_t frame_type; + uint16_t freq; + struct dp_soc *soc = NULL; + struct cdp_tx_completion_ppdu *ppdu_desc = NULL; + + ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf); + + tag_buf += 2; + ppdu_desc->num_users = + HTT_PPDU_STATS_COMMON_TLV_NUM_USERS_GET(*tag_buf); + tag_buf++; + frame_type = HTT_PPDU_STATS_COMMON_TLV_FRM_TYPE_GET(*tag_buf); + + if ((frame_type == HTT_STATS_FTYPE_TIDQ_DATA_SU) || + (frame_type == HTT_STATS_FTYPE_TIDQ_DATA_MU)) + ppdu_desc->frame_type = CDP_PPDU_FTYPE_DATA; + else + ppdu_desc->frame_type = CDP_PPDU_FTYPE_CTRL; + + tag_buf += 2; + ppdu_desc->tx_duration = *tag_buf; + tag_buf += 3; + ppdu_desc->ppdu_start_timestamp = *tag_buf; + + ppdu_desc->ppdu_end_timestamp = ppdu_desc->ppdu_start_timestamp + + ppdu_desc->tx_duration; + /* Ack time stamp is same as end time stamp*/ + ppdu_desc->ack_timestamp = ppdu_desc->ppdu_end_timestamp; + + tag_buf++; + + freq = HTT_PPDU_STATS_COMMON_TLV_CHAN_MHZ_GET(*tag_buf); + if (freq != ppdu_desc->channel) { + soc = pdev->soc; + ppdu_desc->channel = freq; + if (soc && soc->cdp_soc.ol_ops->freq_to_channel) + pdev->operating_channel = + soc->cdp_soc.ol_ops->freq_to_channel(pdev->osif_pdev, freq); + } + + ppdu_desc->phy_mode = HTT_PPDU_STATS_COMMON_TLV_PHY_MODE_GET(*tag_buf); +} + +/* + * dp_process_ppdu_stats_user_common_tlv: Process ppdu_stats_user_common + * @tag_buf: buffer containing the tlv htt_ppdu_stats_user_common_tlv + * @ppdu_info: per ppdu tlv structure + * + * return:void + */ +static void dp_process_ppdu_stats_user_common_tlv( + struct dp_pdev *pdev, uint32_t *tag_buf, + struct ppdu_info *ppdu_info) +{ + uint16_t peer_id; + struct dp_peer *peer; + struct cdp_tx_completion_ppdu *ppdu_desc; + struct cdp_tx_completion_ppdu_user *ppdu_user_desc; + uint8_t curr_user_index = 0; + + ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf); + + tag_buf++; + peer_id = HTT_PPDU_STATS_USER_COMMON_TLV_SW_PEER_ID_GET(*tag_buf); + peer = dp_peer_find_by_id(pdev->soc, peer_id); + + if (!peer) + return; + + curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info); + ppdu_user_desc = &ppdu_desc->user[curr_user_index]; + + ppdu_user_desc->peer_id = peer_id; + + tag_buf++; + + if (HTT_PPDU_STATS_USER_COMMON_TLV_MCAST_GET(*tag_buf)) { + ppdu_user_desc->is_mcast = true; + ppdu_user_desc->mpdu_tried_mcast = + HTT_PPDU_STATS_USER_COMMON_TLV_MPDUS_TRIED_GET(*tag_buf); + ppdu_user_desc->num_mpdu = ppdu_user_desc->mpdu_tried_mcast; + } else { + ppdu_user_desc->mpdu_tried_ucast = + HTT_PPDU_STATS_USER_COMMON_TLV_MPDUS_TRIED_GET(*tag_buf); + } + + tag_buf++; + + ppdu_user_desc->qos_ctrl = + HTT_PPDU_STATS_USER_COMMON_TLV_QOS_CTRL_GET(*tag_buf); + ppdu_user_desc->frame_ctrl = + HTT_PPDU_STATS_USER_COMMON_TLV_FRAME_CTRL_GET(*tag_buf); + ppdu_desc->frame_ctrl = ppdu_user_desc->frame_ctrl; +} + + +/** + * dp_process_ppdu_stats_user_rate_tlv() - Process htt_ppdu_stats_user_rate_tlv + * @pdev: DP pdev handle + * @tag_buf: T2H message buffer carrying the user rate TLV + * @ppdu_info: per ppdu tlv structure + * + * return:void + */ +static void dp_process_ppdu_stats_user_rate_tlv(struct dp_pdev *pdev, + uint32_t *tag_buf, + struct ppdu_info *ppdu_info) +{ + uint16_t peer_id; + struct dp_peer *peer; + struct cdp_tx_completion_ppdu *ppdu_desc; + struct cdp_tx_completion_ppdu_user *ppdu_user_desc; + uint8_t curr_user_index = 0; + + ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf); + + tag_buf++; + peer_id = HTT_PPDU_STATS_USER_RATE_TLV_SW_PEER_ID_GET(*tag_buf); + peer = dp_peer_find_by_id(pdev->soc, peer_id); + + if (!peer) + return; + + curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info); + + ppdu_user_desc = &ppdu_desc->user[curr_user_index]; + ppdu_user_desc->peer_id = peer_id; + + ppdu_user_desc->tid = + HTT_PPDU_STATS_USER_RATE_TLV_TID_NUM_GET(*tag_buf); + + qdf_mem_copy(ppdu_user_desc->mac_addr, peer->mac_addr.raw, + DP_MAC_ADDR_LEN); + + tag_buf += 2; + + ppdu_user_desc->ru_tones = (HTT_PPDU_STATS_USER_RATE_TLV_RU_END_GET(*tag_buf) - + HTT_PPDU_STATS_USER_RATE_TLV_RU_START_GET(*tag_buf)) + 1; + + tag_buf += 2; + + ppdu_user_desc->ppdu_type = + HTT_PPDU_STATS_USER_RATE_TLV_PPDU_TYPE_GET(*tag_buf); + + tag_buf++; + ppdu_user_desc->tx_rate = *tag_buf; + + ppdu_user_desc->ltf_size = + HTT_PPDU_STATS_USER_RATE_TLV_LTF_SIZE_GET(*tag_buf); + ppdu_user_desc->stbc = + HTT_PPDU_STATS_USER_RATE_TLV_STBC_GET(*tag_buf); + ppdu_user_desc->he_re = + HTT_PPDU_STATS_USER_RATE_TLV_HE_RE_GET(*tag_buf); + ppdu_user_desc->txbf = + HTT_PPDU_STATS_USER_RATE_TLV_TXBF_GET(*tag_buf); + ppdu_user_desc->bw = + HTT_PPDU_STATS_USER_RATE_TLV_BW_GET(*tag_buf); + ppdu_user_desc->nss = HTT_PPDU_STATS_USER_RATE_TLV_NSS_GET(*tag_buf); + ppdu_user_desc->mcs = HTT_PPDU_STATS_USER_RATE_TLV_MCS_GET(*tag_buf); + ppdu_user_desc->preamble = + HTT_PPDU_STATS_USER_RATE_TLV_PREAMBLE_GET(*tag_buf); + ppdu_user_desc->gi = HTT_PPDU_STATS_USER_RATE_TLV_GI_GET(*tag_buf); + ppdu_user_desc->dcm = HTT_PPDU_STATS_USER_RATE_TLV_DCM_GET(*tag_buf); + ppdu_user_desc->ldpc = HTT_PPDU_STATS_USER_RATE_TLV_LDPC_GET(*tag_buf); +} + +/* + * dp_process_ppdu_stats_enq_mpdu_bitmap_64_tlv: Process + * htt_ppdu_stats_enq_mpdu_bitmap_64_tlv + * pdev: DP PDEV handle + * @tag_buf: buffer containing the tlv htt_ppdu_stats_enq_mpdu_bitmap_64_tlv + * @ppdu_info: per ppdu tlv structure + * + * return:void + */ +static void dp_process_ppdu_stats_enq_mpdu_bitmap_64_tlv( + struct dp_pdev *pdev, uint32_t *tag_buf, + struct ppdu_info *ppdu_info) +{ + htt_ppdu_stats_enq_mpdu_bitmap_64_tlv *dp_stats_buf = + (htt_ppdu_stats_enq_mpdu_bitmap_64_tlv *)tag_buf; + + struct cdp_tx_completion_ppdu *ppdu_desc; + struct cdp_tx_completion_ppdu_user *ppdu_user_desc; + uint8_t curr_user_index = 0; + uint16_t peer_id; + struct dp_peer *peer; + + ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf); + + tag_buf++; + + peer_id = + HTT_PPDU_STATS_ENQ_MPDU_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf); + + peer = dp_peer_find_by_id(pdev->soc, peer_id); + + if (!peer) + return; + + curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info); + + ppdu_user_desc = &ppdu_desc->user[curr_user_index]; + ppdu_user_desc->peer_id = peer_id; + + ppdu_user_desc->start_seq = dp_stats_buf->start_seq; + qdf_mem_copy(&ppdu_user_desc->enq_bitmap, &dp_stats_buf->enq_bitmap, + CDP_BA_64_BIT_MAP_SIZE_DWORDS); +} + +/* + * dp_process_ppdu_stats_enq_mpdu_bitmap_256_tlv: Process + * htt_ppdu_stats_enq_mpdu_bitmap_256_tlv + * soc: DP SOC handle + * @tag_buf: buffer containing the tlv htt_ppdu_stats_enq_mpdu_bitmap_256_tlv + * @ppdu_info: per ppdu tlv structure + * + * return:void + */ +static void dp_process_ppdu_stats_enq_mpdu_bitmap_256_tlv( + struct dp_pdev *pdev, uint32_t *tag_buf, + struct ppdu_info *ppdu_info) +{ + htt_ppdu_stats_enq_mpdu_bitmap_256_tlv *dp_stats_buf = + (htt_ppdu_stats_enq_mpdu_bitmap_256_tlv *)tag_buf; + + struct cdp_tx_completion_ppdu *ppdu_desc; + struct cdp_tx_completion_ppdu_user *ppdu_user_desc; + uint8_t curr_user_index = 0; + uint16_t peer_id; + struct dp_peer *peer; + + ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf); + + tag_buf++; + + peer_id = + HTT_PPDU_STATS_ENQ_MPDU_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf); + + peer = dp_peer_find_by_id(pdev->soc, peer_id); + + if (!peer) + return; + + curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info); + + ppdu_user_desc = &ppdu_desc->user[curr_user_index]; + ppdu_user_desc->peer_id = peer_id; + + ppdu_user_desc->start_seq = dp_stats_buf->start_seq; + qdf_mem_copy(&ppdu_user_desc->enq_bitmap, &dp_stats_buf->enq_bitmap, + CDP_BA_256_BIT_MAP_SIZE_DWORDS); +} + +/* + * dp_process_ppdu_stats_user_cmpltn_common_tlv: Process + * htt_ppdu_stats_user_cmpltn_common_tlv + * soc: DP SOC handle + * @tag_buf: buffer containing the tlv htt_ppdu_stats_user_cmpltn_common_tlv + * @ppdu_info: per ppdu tlv structure + * + * return:void + */ +static void dp_process_ppdu_stats_user_cmpltn_common_tlv( + struct dp_pdev *pdev, uint32_t *tag_buf, + struct ppdu_info *ppdu_info) +{ + uint16_t peer_id; + struct dp_peer *peer; + struct cdp_tx_completion_ppdu *ppdu_desc; + struct cdp_tx_completion_ppdu_user *ppdu_user_desc; + uint8_t curr_user_index = 0; + htt_ppdu_stats_user_cmpltn_common_tlv *dp_stats_buf = + (htt_ppdu_stats_user_cmpltn_common_tlv *)tag_buf; + + ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf); + + tag_buf++; + peer_id = + HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_SW_PEER_ID_GET(*tag_buf); + peer = dp_peer_find_by_id(pdev->soc, peer_id); + + if (!peer) + return; + + curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info); + ppdu_user_desc = &ppdu_desc->user[curr_user_index]; + ppdu_user_desc->peer_id = peer_id; + + ppdu_user_desc->completion_status = + HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_COMPLETION_STATUS_GET( + *tag_buf); + + ppdu_user_desc->tid = + HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_TID_NUM_GET(*tag_buf); + + + tag_buf++; + ppdu_desc->ack_rssi = dp_stats_buf->ack_rssi; + + tag_buf++; + + ppdu_user_desc->mpdu_success = + HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MPDU_SUCCESS_GET(*tag_buf); + + tag_buf++; + + ppdu_user_desc->long_retries = + HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_LONG_RETRY_GET(*tag_buf); + + ppdu_user_desc->short_retries = + HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_SHORT_RETRY_GET(*tag_buf); + ppdu_user_desc->retry_msdus = + ppdu_user_desc->long_retries + ppdu_user_desc->short_retries; + + ppdu_user_desc->is_ampdu = + HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_IS_AMPDU_GET(*tag_buf); + ppdu_info->is_ampdu = ppdu_user_desc->is_ampdu; + +} + +/* + * dp_process_ppdu_stats_user_compltn_ba_bitmap_64_tlv: Process + * htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv + * pdev: DP PDEV handle + * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv + * @ppdu_info: per ppdu tlv structure + * + * return:void + */ +static void dp_process_ppdu_stats_user_compltn_ba_bitmap_64_tlv( + struct dp_pdev *pdev, uint32_t *tag_buf, + struct ppdu_info *ppdu_info) +{ + htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv *dp_stats_buf = + (htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv *)tag_buf; + struct cdp_tx_completion_ppdu_user *ppdu_user_desc; + struct cdp_tx_completion_ppdu *ppdu_desc; + uint8_t curr_user_index = 0; + uint16_t peer_id; + struct dp_peer *peer; + + ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf); + + tag_buf++; + + peer_id = + HTT_PPDU_STATS_USER_CMPLTN_BA_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf); + + peer = dp_peer_find_by_id(pdev->soc, peer_id); + + if (!peer) + return; + + curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info); + + ppdu_user_desc = &ppdu_desc->user[curr_user_index]; + ppdu_user_desc->peer_id = peer_id; + + ppdu_user_desc->ba_seq_no = dp_stats_buf->ba_seq_no; + qdf_mem_copy(&ppdu_user_desc->ba_bitmap, &dp_stats_buf->ba_bitmap, + CDP_BA_64_BIT_MAP_SIZE_DWORDS); +} + +/* + * dp_process_ppdu_stats_user_compltn_ba_bitmap_256_tlv: Process + * htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv + * pdev: DP PDEV handle + * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv + * @ppdu_info: per ppdu tlv structure + * + * return:void + */ +static void dp_process_ppdu_stats_user_compltn_ba_bitmap_256_tlv( + struct dp_pdev *pdev, uint32_t *tag_buf, + struct ppdu_info *ppdu_info) +{ + htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv *dp_stats_buf = + (htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv *)tag_buf; + struct cdp_tx_completion_ppdu_user *ppdu_user_desc; + struct cdp_tx_completion_ppdu *ppdu_desc; + uint8_t curr_user_index = 0; + uint16_t peer_id; + struct dp_peer *peer; + + ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf); + + tag_buf++; + + peer_id = + HTT_PPDU_STATS_USER_CMPLTN_BA_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf); + + peer = dp_peer_find_by_id(pdev->soc, peer_id); + + if (!peer) + return; + + curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info); + + ppdu_user_desc = &ppdu_desc->user[curr_user_index]; + ppdu_user_desc->peer_id = peer_id; + + ppdu_user_desc->ba_seq_no = dp_stats_buf->ba_seq_no; + qdf_mem_copy(&ppdu_user_desc->ba_bitmap, &dp_stats_buf->ba_bitmap, + CDP_BA_256_BIT_MAP_SIZE_DWORDS); +} + +/* + * dp_process_ppdu_stats_user_compltn_ack_ba_status_tlv: Process + * htt_ppdu_stats_user_compltn_ack_ba_status_tlv + * pdev: DP PDE handle + * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ack_ba_status_tlv + * @ppdu_info: per ppdu tlv structure + * + * return:void + */ +static void dp_process_ppdu_stats_user_compltn_ack_ba_status_tlv( + struct dp_pdev *pdev, uint32_t *tag_buf, + struct ppdu_info *ppdu_info) +{ + uint16_t peer_id; + struct dp_peer *peer; + struct cdp_tx_completion_ppdu *ppdu_desc; + struct cdp_tx_completion_ppdu_user *ppdu_user_desc; + uint8_t curr_user_index = 0; + + ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf); + + tag_buf += 2; + peer_id = + HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_SW_PEER_ID_GET(*tag_buf); + + + peer = dp_peer_find_by_id(pdev->soc, peer_id); + + if (!peer) + return; + + curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info); + + ppdu_user_desc = &ppdu_desc->user[curr_user_index]; + ppdu_user_desc->peer_id = peer_id; + + tag_buf++; + ppdu_user_desc->tid = + HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_TID_NUM_GET(*tag_buf); + ppdu_user_desc->num_mpdu = + HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_NUM_MPDU_GET(*tag_buf); + + ppdu_user_desc->num_msdu = + HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_NUM_MSDU_GET(*tag_buf); + + ppdu_user_desc->success_msdus = ppdu_user_desc->num_msdu; + + tag_buf += 2; + ppdu_user_desc->success_bytes = *tag_buf; + +} + +/* + * dp_process_ppdu_stats_user_common_array_tlv: Process + * htt_ppdu_stats_user_common_array_tlv + * pdev: DP PDEV handle + * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ack_ba_status_tlv + * @ppdu_info: per ppdu tlv structure + * + * return:void + */ +static void dp_process_ppdu_stats_user_common_array_tlv( + struct dp_pdev *pdev, uint32_t *tag_buf, + struct ppdu_info *ppdu_info) +{ + uint32_t peer_id; + struct dp_peer *peer; + struct cdp_tx_completion_ppdu *ppdu_desc; + struct cdp_tx_completion_ppdu_user *ppdu_user_desc; + uint8_t curr_user_index = 0; + struct htt_tx_ppdu_stats_info *dp_stats_buf; + + ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf); + + tag_buf++; + dp_stats_buf = (struct htt_tx_ppdu_stats_info *)tag_buf; + tag_buf += 3; + peer_id = + HTT_PPDU_STATS_ARRAY_ITEM_TLV_PEERID_GET(*tag_buf); + + peer = dp_peer_find_by_id(pdev->soc, peer_id); + + if (!peer) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "Invalid peer"); + return; + } + + curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info); + + ppdu_user_desc = &ppdu_desc->user[curr_user_index]; + + ppdu_user_desc->retry_bytes = dp_stats_buf->tx_retry_bytes; + ppdu_user_desc->failed_bytes = dp_stats_buf->tx_failed_bytes; + + tag_buf++; + + ppdu_user_desc->success_msdus = + HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_SUCC_MSDUS_GET(*tag_buf); + ppdu_user_desc->retry_bytes = + HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_RETRY_MSDUS_GET(*tag_buf); + tag_buf++; + ppdu_user_desc->failed_msdus = + HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_FAILED_MSDUS_GET(*tag_buf); +} + +/* + * dp_process_ppdu_stats_flush_tlv: Process + * htt_ppdu_stats_flush_tlv + * @pdev: DP PDEV handle + * @tag_buf: buffer containing the htt_ppdu_stats_flush_tlv + * + * return:void + */ +static void dp_process_ppdu_stats_user_compltn_flush_tlv(struct dp_pdev *pdev, + uint32_t *tag_buf) +{ + uint32_t peer_id; + uint32_t drop_reason; + uint8_t tid; + uint32_t num_msdu; + struct dp_peer *peer; + + tag_buf++; + drop_reason = *tag_buf; + + tag_buf++; + num_msdu = HTT_PPDU_STATS_FLUSH_TLV_NUM_MSDU_GET(*tag_buf); + + tag_buf++; + peer_id = + HTT_PPDU_STATS_FLUSH_TLV_SW_PEER_ID_GET(*tag_buf); + + peer = dp_peer_find_by_id(pdev->soc, peer_id); + if (!peer) + return; + + tid = HTT_PPDU_STATS_FLUSH_TLV_TID_NUM_GET(*tag_buf); + + if (drop_reason == HTT_FLUSH_EXCESS_RETRIES) { + DP_STATS_INC(peer, tx.excess_retries_per_ac[TID_TO_WME_AC(tid)], + num_msdu); + } +} + +/* + * dp_process_ppdu_stats_tx_mgmtctrl_payload_tlv: Process + * htt_ppdu_stats_tx_mgmtctrl_payload_tlv + * @pdev: DP PDEV handle + * @tag_buf: buffer containing the htt_ppdu_stats_tx_mgmtctrl_payload_tlv + * @length: tlv_length + * + * return:void + */ +static void +dp_process_ppdu_stats_tx_mgmtctrl_payload_tlv(struct dp_pdev *pdev, + qdf_nbuf_t tag_buf, + uint32_t length, + uint32_t ppdu_id) +{ + uint32_t *nbuf_ptr; + + if ((!pdev->tx_sniffer_enable) && (!pdev->mcopy_mode)) + return; + + if (qdf_nbuf_pull_head(tag_buf, HTT_MGMT_CTRL_TLV_RESERVERD_LEN + 4) + == NULL) + return; + + nbuf_ptr = (uint32_t *)qdf_nbuf_push_head( + tag_buf, sizeof(ppdu_id)); + *nbuf_ptr = ppdu_id; + + dp_wdi_event_handler(WDI_EVENT_TX_MGMT_CTRL, pdev->soc, + tag_buf, HTT_INVALID_PEER, + WDI_NO_VAL, pdev->pdev_id); +} + +/** + * dp_process_ppdu_tag(): Function to process the PPDU TLVs + * @pdev: DP pdev handle + * @tag_buf: TLV buffer + * @tlv_len: length of tlv + * @ppdu_info: per ppdu tlv structure + * + * return: void + */ +static void dp_process_ppdu_tag(struct dp_pdev *pdev, uint32_t *tag_buf, + uint32_t tlv_len, struct ppdu_info *ppdu_info) +{ + uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf); + + switch (tlv_type) { + case HTT_PPDU_STATS_COMMON_TLV: + qdf_assert_always(tlv_len == + sizeof(htt_ppdu_stats_common_tlv)); + dp_process_ppdu_stats_common_tlv(pdev, tag_buf, ppdu_info); + break; + case HTT_PPDU_STATS_USR_COMMON_TLV: + qdf_assert_always(tlv_len == + sizeof(htt_ppdu_stats_user_common_tlv)); + dp_process_ppdu_stats_user_common_tlv( + pdev, tag_buf, ppdu_info); + break; + case HTT_PPDU_STATS_USR_RATE_TLV: + qdf_assert_always(tlv_len == + sizeof(htt_ppdu_stats_user_rate_tlv)); + dp_process_ppdu_stats_user_rate_tlv(pdev, tag_buf, ppdu_info); + break; + case HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_64_TLV: + qdf_assert_always(tlv_len == + sizeof(htt_ppdu_stats_enq_mpdu_bitmap_64_tlv)); + dp_process_ppdu_stats_enq_mpdu_bitmap_64_tlv( + pdev, tag_buf, ppdu_info); + break; + case HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_256_TLV: + qdf_assert_always(tlv_len == + sizeof(htt_ppdu_stats_enq_mpdu_bitmap_256_tlv)); + dp_process_ppdu_stats_enq_mpdu_bitmap_256_tlv( + pdev, tag_buf, ppdu_info); + break; + case HTT_PPDU_STATS_USR_COMPLTN_COMMON_TLV: + qdf_assert_always(tlv_len == + sizeof(htt_ppdu_stats_user_cmpltn_common_tlv)); + dp_process_ppdu_stats_user_cmpltn_common_tlv( + pdev, tag_buf, ppdu_info); + break; + case HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_64_TLV: + qdf_assert_always(tlv_len == + sizeof(htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv)); + dp_process_ppdu_stats_user_compltn_ba_bitmap_64_tlv( + pdev, tag_buf, ppdu_info); + break; + case HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_256_TLV: + qdf_assert_always(tlv_len == + sizeof(htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv)); + dp_process_ppdu_stats_user_compltn_ba_bitmap_256_tlv( + pdev, tag_buf, ppdu_info); + break; + case HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV: + qdf_assert_always(tlv_len == + sizeof(htt_ppdu_stats_user_compltn_ack_ba_status_tlv)); + dp_process_ppdu_stats_user_compltn_ack_ba_status_tlv( + pdev, tag_buf, ppdu_info); + break; + case HTT_PPDU_STATS_USR_COMMON_ARRAY_TLV: + qdf_assert_always(tlv_len == + sizeof(htt_ppdu_stats_usr_common_array_tlv_v)); + dp_process_ppdu_stats_user_common_array_tlv( + pdev, tag_buf, ppdu_info); + break; + case HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV: + qdf_assert_always(tlv_len == + sizeof(htt_ppdu_stats_flush_tlv)); + dp_process_ppdu_stats_user_compltn_flush_tlv( + pdev, tag_buf); + break; + default: + break; + } +} + +/** + * dp_ppdu_desc_deliver(): Function to deliver Tx PPDU status descriptor + * to upper layer + * @pdev: DP pdev handle + * @ppdu_info: per PPDU TLV descriptor + * + * return: void + */ +static +void dp_ppdu_desc_deliver(struct dp_pdev *pdev, + struct ppdu_info *ppdu_info) +{ + struct cdp_tx_completion_ppdu *ppdu_desc = NULL; + struct dp_peer *peer = NULL; + qdf_nbuf_t nbuf; + uint16_t i; + + ppdu_desc = (struct cdp_tx_completion_ppdu *) + qdf_nbuf_data(ppdu_info->nbuf); + + ppdu_desc->num_users = ppdu_info->last_user; + ppdu_desc->ppdu_id = ppdu_info->ppdu_id; + + for (i = 0; i < ppdu_desc->num_users; i++) { + + + ppdu_desc->num_mpdu += ppdu_desc->user[i].num_mpdu; + ppdu_desc->num_msdu += ppdu_desc->user[i].num_msdu; + + if (ppdu_desc->user[i].tid < CDP_DATA_TID_MAX) { + peer = dp_peer_find_by_id(pdev->soc, + ppdu_desc->user[i].peer_id); + /** + * This check is to make sure peer is not deleted + * after processing the TLVs. + */ + if (!peer) + continue; + + dp_tx_stats_update(pdev->soc, peer, + &ppdu_desc->user[i], + ppdu_desc->ack_rssi); + } + } + + /* + * Remove from the list + */ + TAILQ_REMOVE(&pdev->ppdu_info_list, ppdu_info, ppdu_info_list_elem); + nbuf = ppdu_info->nbuf; + pdev->list_depth--; + qdf_mem_free(ppdu_info); + + qdf_assert_always(nbuf); + + ppdu_desc = (struct cdp_tx_completion_ppdu *) + qdf_nbuf_data(nbuf); + + /** + * Deliver PPDU stats only for valid (acked) data frames if + * sniffer mode is not enabled. + * If sniffer mode is enabled, PPDU stats for all frames + * including mgmt/control frames should be delivered to upper layer + */ + if (pdev->tx_sniffer_enable || pdev->mcopy_mode) { + dp_wdi_event_handler(WDI_EVENT_TX_PPDU_DESC, pdev->soc, + nbuf, HTT_INVALID_PEER, + WDI_NO_VAL, pdev->pdev_id); + } else { + if (ppdu_desc->num_mpdu != 0 && ppdu_desc->num_users != 0 && + ppdu_desc->frame_ctrl & HTT_FRAMECTRL_DATATYPE) { + + dp_wdi_event_handler(WDI_EVENT_TX_PPDU_DESC, + pdev->soc, nbuf, HTT_INVALID_PEER, + WDI_NO_VAL, pdev->pdev_id); + } else + qdf_nbuf_free(nbuf); + } + return; +} + +/** + * dp_get_ppdu_desc(): Function to allocate new PPDU status + * desc for new ppdu id + * @pdev: DP pdev handle + * @ppdu_id: PPDU unique identifier + * @tlv_type: TLV type received + * + * return: ppdu_info per ppdu tlv structure + */ +static +struct ppdu_info *dp_get_ppdu_desc(struct dp_pdev *pdev, uint32_t ppdu_id, + uint8_t tlv_type) +{ + struct ppdu_info *ppdu_info = NULL; + + /* + * Find ppdu_id node exists or not + */ + TAILQ_FOREACH(ppdu_info, &pdev->ppdu_info_list, ppdu_info_list_elem) { + + if (ppdu_info && (ppdu_info->ppdu_id == ppdu_id)) { + break; + } + } + + if (ppdu_info) { + /** + * if we get tlv_type that is already been processed for ppdu, + * that means we got a new ppdu with same ppdu id. + * Hence Flush the older ppdu + */ + if (ppdu_info->tlv_bitmap & (1 << tlv_type)) + dp_ppdu_desc_deliver(pdev, ppdu_info); + else + return ppdu_info; + } + + /** + * Flush the head ppdu descriptor if ppdu desc list reaches max + * threshold + */ + if (pdev->list_depth > HTT_PPDU_DESC_MAX_DEPTH) { + ppdu_info = TAILQ_FIRST(&pdev->ppdu_info_list); + dp_ppdu_desc_deliver(pdev, ppdu_info); + } + + /* + * Allocate new ppdu_info node + */ + ppdu_info = qdf_mem_malloc(sizeof(struct ppdu_info)); + if (!ppdu_info) + return NULL; + + ppdu_info->nbuf = qdf_nbuf_alloc(pdev->soc->osdev, + sizeof(struct cdp_tx_completion_ppdu), 0, 4, + TRUE); + if (!ppdu_info->nbuf) { + qdf_mem_free(ppdu_info); + return NULL; + } + + qdf_mem_zero(qdf_nbuf_data(ppdu_info->nbuf), + sizeof(struct cdp_tx_completion_ppdu)); + + if (qdf_nbuf_put_tail(ppdu_info->nbuf, + sizeof(struct cdp_tx_completion_ppdu)) == NULL) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "No tailroom for HTT PPDU"); + qdf_nbuf_free(ppdu_info->nbuf); + ppdu_info->nbuf = NULL; + ppdu_info->last_user = 0; + qdf_mem_free(ppdu_info); + return NULL; + } + + /** + * No lock is needed because all PPDU TLVs are processed in + * same context and this list is updated in same context + */ + TAILQ_INSERT_TAIL(&pdev->ppdu_info_list, ppdu_info, + ppdu_info_list_elem); + pdev->list_depth++; + return ppdu_info; +} + +/** + * dp_htt_process_tlv(): Function to process each PPDU TLVs + * @pdev: DP pdev handle + * @htt_t2h_msg: HTT target to host message + * + * return: ppdu_info per ppdu tlv structure + */ + +static struct ppdu_info *dp_htt_process_tlv(struct dp_pdev *pdev, + qdf_nbuf_t htt_t2h_msg, bool *free_buf) +{ + uint32_t length; + uint32_t ppdu_id; + uint8_t tlv_type; + uint32_t tlv_length, tlv_bitmap_expected; + uint8_t *tlv_buf; + struct ppdu_info *ppdu_info = NULL; + + uint32_t *msg_word = (uint32_t *) qdf_nbuf_data(htt_t2h_msg); + + length = HTT_T2H_PPDU_STATS_PAYLOAD_SIZE_GET(*msg_word); + + msg_word = msg_word + 1; + ppdu_id = HTT_T2H_PPDU_STATS_PPDU_ID_GET(*msg_word); + + + msg_word = msg_word + 3; + while (length > 0) { + tlv_buf = (uint8_t *)msg_word; + tlv_type = HTT_STATS_TLV_TAG_GET(*msg_word); + tlv_length = HTT_STATS_TLV_LENGTH_GET(*msg_word); + if (qdf_likely(tlv_type < CDP_PPDU_STATS_MAX_TAG)) + pdev->stats.ppdu_stats_counter[tlv_type]++; + + if (tlv_length == 0) + break; + + tlv_length += HTT_TLV_HDR_LEN; + + /** + * Not allocating separate ppdu descriptor for MGMT Payload + * TLV as this is sent as separate WDI indication and it + * doesn't contain any ppdu information + */ + if (tlv_type == HTT_PPDU_STATS_TX_MGMTCTRL_PAYLOAD_TLV) { + dp_process_ppdu_stats_tx_mgmtctrl_payload_tlv(pdev, + htt_t2h_msg, tlv_length, ppdu_id); + msg_word = + (uint32_t *)((uint8_t *)tlv_buf + tlv_length); + length -= (tlv_length); + *free_buf = false; + return NULL; + } + + ppdu_info = dp_get_ppdu_desc(pdev, ppdu_id, tlv_type); + if (!ppdu_info) + return NULL; + ppdu_info->ppdu_id = ppdu_id; + ppdu_info->tlv_bitmap |= (1 << tlv_type); + + dp_process_ppdu_tag(pdev, msg_word, tlv_length, ppdu_info); + + /** + * Increment pdev level tlv count to monitor + * missing TLVs + */ + pdev->tlv_count++; + ppdu_info->last_tlv_cnt = pdev->tlv_count; + + msg_word = (uint32_t *)((uint8_t *)tlv_buf + tlv_length); + length -= (tlv_length); + } + + if (!ppdu_info) + return NULL; + + pdev->last_ppdu_id = ppdu_id; + + tlv_bitmap_expected = HTT_PPDU_DEFAULT_TLV_BITMAP; + + if (pdev->tx_sniffer_enable || pdev->mcopy_mode) { + if (ppdu_info->is_ampdu) + tlv_bitmap_expected = HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP; + } + + /** + * Once all the TLVs for a given PPDU has been processed, + * return PPDU status to be delivered to higher layer + */ + if (ppdu_info->tlv_bitmap == tlv_bitmap_expected) + return ppdu_info; + + return NULL; +} +#endif /* FEATURE_PERPKT_INFO */ + +/** + * dp_txrx_ppdu_stats_handler() - Function to process HTT PPDU stats from FW + * @soc: DP SOC handle + * @pdev_id: pdev id + * @htt_t2h_msg: HTT message nbuf + * + * return:void + */ +#if defined(WDI_EVENT_ENABLE) +#ifdef FEATURE_PERPKT_INFO +static bool dp_txrx_ppdu_stats_handler(struct dp_soc *soc, + uint8_t pdev_id, qdf_nbuf_t htt_t2h_msg) +{ + struct dp_pdev *pdev = soc->pdev_list[pdev_id]; + struct ppdu_info *ppdu_info = NULL; + bool free_buf = true; + + if (!pdev->enhanced_stats_en && !pdev->tx_sniffer_enable && + !pdev->mcopy_mode) + return free_buf; + + ppdu_info = dp_htt_process_tlv(pdev, htt_t2h_msg, &free_buf); + if (ppdu_info) + dp_ppdu_desc_deliver(pdev, ppdu_info); + + return free_buf; +} +#else +static bool dp_txrx_ppdu_stats_handler(struct dp_soc *soc, + uint8_t pdev_id, qdf_nbuf_t htt_t2h_msg) +{ + return true; +} +#endif +#endif + +/** + * dp_txrx_fw_stats_handler() - Function to process HTT EXT stats + * @soc: DP SOC handle + * @htt_t2h_msg: HTT message nbuf + * + * return:void + */ +static inline void dp_txrx_fw_stats_handler(struct dp_soc *soc, + qdf_nbuf_t htt_t2h_msg) +{ + uint8_t done; + qdf_nbuf_t msg_copy; + uint32_t *msg_word; + + msg_word = (uint32_t *) qdf_nbuf_data(htt_t2h_msg); + msg_word = msg_word + 3; + done = HTT_T2H_EXT_STATS_CONF_TLV_DONE_GET(*msg_word); + + /* + * HTT EXT stats response comes as stream of TLVs which span over + * multiple T2H messages. + * The first message will carry length of the response. + * For rest of the messages length will be zero. + * + * Clone the T2H message buffer and store it in a list to process + * it later. + * + * The original T2H message buffers gets freed in the T2H HTT event + * handler + */ + msg_copy = qdf_nbuf_clone(htt_t2h_msg); + + if (!msg_copy) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO, + "T2H messge clone failed for HTT EXT STATS"); + goto error; + } + + qdf_spin_lock_bh(&soc->htt_stats.lock); + qdf_nbuf_queue_add(&soc->htt_stats.msg, msg_copy); + /* + * Done bit signifies that this is the last T2H buffer in the stream of + * HTT EXT STATS message + */ + if (done) { + soc->htt_stats.num_stats++; + qdf_sched_work(0, &soc->htt_stats.work); + } + qdf_spin_unlock_bh(&soc->htt_stats.lock); + + return; + +error: + qdf_spin_lock_bh(&soc->htt_stats.lock); + while ((msg_copy = qdf_nbuf_queue_remove(&soc->htt_stats.msg)) + != NULL) { + qdf_nbuf_free(msg_copy); + } + soc->htt_stats.num_stats = 0; + qdf_spin_unlock_bh(&soc->htt_stats.lock); + return; + +} + +/* + * htt_soc_attach_target() - SOC level HTT setup + * @htt_soc: HTT SOC handle + * + * Return: 0 on success; error code on failure + */ +int htt_soc_attach_target(void *htt_soc) +{ + struct htt_soc *soc = (struct htt_soc *)htt_soc; + + return htt_h2t_ver_req_msg(soc); +} + + +#if defined(WDI_EVENT_ENABLE) && !defined(REMOVE_PKT_LOG) +/* + * dp_ppdu_stats_ind_handler() - PPDU stats msg handler + * @htt_soc: HTT SOC handle + * @msg_word: Pointer to payload + * @htt_t2h_msg: HTT msg nbuf + * + * Return: True if buffer should be freed by caller. + */ +static bool +dp_ppdu_stats_ind_handler(struct htt_soc *soc, + uint32_t *msg_word, + qdf_nbuf_t htt_t2h_msg) +{ + u_int8_t pdev_id; + bool free_buf; + qdf_nbuf_set_pktlen(htt_t2h_msg, HTT_T2H_MAX_MSG_SIZE); + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO, + "received HTT_T2H_MSG_TYPE_PPDU_STATS_IND\n"); + pdev_id = HTT_T2H_PPDU_STATS_PDEV_ID_GET(*msg_word); + pdev_id = DP_HW2SW_MACID(pdev_id); + free_buf = dp_txrx_ppdu_stats_handler(soc->dp_soc, pdev_id, + htt_t2h_msg); + dp_wdi_event_handler(WDI_EVENT_LITE_T2H, soc->dp_soc, + htt_t2h_msg, HTT_INVALID_PEER, WDI_NO_VAL, + pdev_id); + return free_buf; +} +#else +static bool +dp_ppdu_stats_ind_handler(struct htt_soc *soc, + uint32_t *msg_word, + qdf_nbuf_t htt_t2h_msg) +{ + return true; +} +#endif + +#if defined(WDI_EVENT_ENABLE) && \ + !defined(REMOVE_PKT_LOG) && defined(CONFIG_WIN) +/* + * dp_pktlog_msg_handler() - Pktlog msg handler + * @htt_soc: HTT SOC handle + * @msg_word: Pointer to payload + * + * Return: None + */ +static void +dp_pktlog_msg_handler(struct htt_soc *soc, + uint32_t *msg_word) +{ + uint8_t pdev_id; + uint32_t *pl_hdr; + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO, + "received HTT_T2H_MSG_TYPE_PKTLOG\n"); + pdev_id = HTT_T2H_PKTLOG_PDEV_ID_GET(*msg_word); + pdev_id = DP_HW2SW_MACID(pdev_id); + pl_hdr = (msg_word + 1); + dp_wdi_event_handler(WDI_EVENT_OFFLOAD_ALL, soc->dp_soc, + pl_hdr, HTT_INVALID_PEER, WDI_NO_VAL, + pdev_id); +} +#else +static void +dp_pktlog_msg_handler(struct htt_soc *soc, + uint32_t *msg_word) +{ +} +#endif + +/* + * dp_htt_t2h_msg_handler() - Generic Target to host Msg/event handler + * @context: Opaque context (HTT SOC handle) + * @pkt: HTC packet + */ +static void dp_htt_t2h_msg_handler(void *context, HTC_PACKET *pkt) +{ + struct htt_soc *soc = (struct htt_soc *) context; + qdf_nbuf_t htt_t2h_msg = (qdf_nbuf_t) pkt->pPktContext; + u_int32_t *msg_word; + enum htt_t2h_msg_type msg_type; + bool free_buf = true; + + /* check for successful message reception */ + if (pkt->Status != QDF_STATUS_SUCCESS) { + if (pkt->Status != QDF_STATUS_E_CANCELED) + soc->stats.htc_err_cnt++; + + qdf_nbuf_free(htt_t2h_msg); + return; + } + + /* TODO: Check if we should pop the HTC/HTT header alignment padding */ + + msg_word = (u_int32_t *) qdf_nbuf_data(htt_t2h_msg); + msg_type = HTT_T2H_MSG_TYPE_GET(*msg_word); + switch (msg_type) { + case HTT_T2H_MSG_TYPE_PEER_MAP: + { + u_int8_t mac_addr_deswizzle_buf[HTT_MAC_ADDR_LEN]; + u_int8_t *peer_mac_addr; + u_int16_t peer_id; + u_int16_t hw_peer_id; + u_int8_t vdev_id; + + peer_id = HTT_RX_PEER_MAP_PEER_ID_GET(*msg_word); + hw_peer_id = + HTT_RX_PEER_MAP_HW_PEER_ID_GET(*(msg_word+2)); + vdev_id = HTT_RX_PEER_MAP_VDEV_ID_GET(*msg_word); + peer_mac_addr = htt_t2h_mac_addr_deswizzle( + (u_int8_t *) (msg_word+1), + &mac_addr_deswizzle_buf[0]); + QDF_TRACE(QDF_MODULE_ID_TXRX, + QDF_TRACE_LEVEL_INFO, + "HTT_T2H_MSG_TYPE_PEER_MAP msg for peer id %d vdev id %d n", + peer_id, vdev_id); + + dp_rx_peer_map_handler(soc->dp_soc, peer_id, hw_peer_id, + vdev_id, peer_mac_addr); + break; + } + case HTT_T2H_MSG_TYPE_PEER_UNMAP: + { + u_int16_t peer_id; + peer_id = HTT_RX_PEER_UNMAP_PEER_ID_GET(*msg_word); + + dp_rx_peer_unmap_handler(soc->dp_soc, peer_id); + break; + } + case HTT_T2H_MSG_TYPE_SEC_IND: + { + u_int16_t peer_id; + enum htt_sec_type sec_type; + int is_unicast; + + peer_id = HTT_SEC_IND_PEER_ID_GET(*msg_word); + sec_type = HTT_SEC_IND_SEC_TYPE_GET(*msg_word); + is_unicast = HTT_SEC_IND_UNICAST_GET(*msg_word); + /* point to the first part of the Michael key */ + msg_word++; + dp_rx_sec_ind_handler( + soc->dp_soc, peer_id, sec_type, is_unicast, + msg_word, msg_word + 2); + break; + } + + case HTT_T2H_MSG_TYPE_PPDU_STATS_IND: + { + free_buf = dp_ppdu_stats_ind_handler(soc, msg_word, + htt_t2h_msg); + break; + } + + case HTT_T2H_MSG_TYPE_PKTLOG: + { + dp_pktlog_msg_handler(soc, msg_word); + break; + } + + case HTT_T2H_MSG_TYPE_VERSION_CONF: + { + htc_pm_runtime_put(soc->htc_soc); + soc->tgt_ver.major = HTT_VER_CONF_MAJOR_GET(*msg_word); + soc->tgt_ver.minor = HTT_VER_CONF_MINOR_GET(*msg_word); + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH, + "target uses HTT version %d.%d; host uses %d.%d\n", + soc->tgt_ver.major, soc->tgt_ver.minor, + HTT_CURRENT_VERSION_MAJOR, + HTT_CURRENT_VERSION_MINOR); + if (soc->tgt_ver.major != HTT_CURRENT_VERSION_MAJOR) { + QDF_TRACE(QDF_MODULE_ID_TXRX, + QDF_TRACE_LEVEL_ERROR, + "*** Incompatible host/target HTT versions!\n"); + } + /* abort if the target is incompatible with the host */ + qdf_assert(soc->tgt_ver.major == + HTT_CURRENT_VERSION_MAJOR); + if (soc->tgt_ver.minor != HTT_CURRENT_VERSION_MINOR) { + QDF_TRACE(QDF_MODULE_ID_TXRX, + QDF_TRACE_LEVEL_WARN, + "*** Warning: host/target HTT versions" + " are different, though compatible!\n"); + } + break; + } + case HTT_T2H_MSG_TYPE_RX_ADDBA: + { + uint16_t peer_id; + uint8_t tid; + uint8_t win_sz; + uint16_t status; + struct dp_peer *peer; + + /* + * Update REO Queue Desc with new values + */ + peer_id = HTT_RX_ADDBA_PEER_ID_GET(*msg_word); + tid = HTT_RX_ADDBA_TID_GET(*msg_word); + win_sz = HTT_RX_ADDBA_WIN_SIZE_GET(*msg_word); + peer = dp_peer_find_by_id(soc->dp_soc, peer_id); + + /* + * Window size needs to be incremented by 1 + * since fw needs to represent a value of 256 + * using just 8 bits + */ + if (peer) { + status = dp_addba_requestprocess_wifi3(peer, + 0, tid, 0, win_sz + 1, 0xffff); + QDF_TRACE(QDF_MODULE_ID_TXRX, + QDF_TRACE_LEVEL_INFO, + FL("PeerID %d BAW %d TID %d stat %d\n"), + peer_id, win_sz, tid, status); + + } else { + QDF_TRACE(QDF_MODULE_ID_TXRX, + QDF_TRACE_LEVEL_ERROR, + FL("Peer not found peer id %d\n"), + peer_id); + } + break; + } + case HTT_T2H_MSG_TYPE_EXT_STATS_CONF: + { + dp_txrx_fw_stats_handler(soc->dp_soc, htt_t2h_msg); + break; + } + default: + break; + }; + + /* Free the indication buffer */ + if (free_buf) + qdf_nbuf_free(htt_t2h_msg); +} + +/* + * dp_htt_h2t_full() - Send full handler (called from HTC) + * @context: Opaque context (HTT SOC handle) + * @pkt: HTC packet + * + * Return: enum htc_send_full_action + */ +static enum htc_send_full_action +dp_htt_h2t_full(void *context, HTC_PACKET *pkt) +{ + return HTC_SEND_FULL_KEEP; +} + +/* + * dp_htt_hif_t2h_hp_callback() - HIF callback for high priority T2H messages + * @context: Opaque context (HTT SOC handle) + * @nbuf: nbuf containing T2H message + * @pipe_id: HIF pipe ID + * + * Return: QDF_STATUS + * + * TODO: Temporary change to bypass HTC connection for this new HIF pipe, which + * will be used for packet log and other high-priority HTT messages. Proper + * HTC connection to be added later once required FW changes are available + */ +static QDF_STATUS +dp_htt_hif_t2h_hp_callback (void *context, qdf_nbuf_t nbuf, uint8_t pipe_id) +{ + A_STATUS rc = QDF_STATUS_SUCCESS; + HTC_PACKET htc_pkt; + + qdf_assert_always(pipe_id == DP_HTT_T2H_HP_PIPE); + qdf_mem_zero(&htc_pkt, sizeof(htc_pkt)); + htc_pkt.Status = QDF_STATUS_SUCCESS; + htc_pkt.pPktContext = (void *)nbuf; + dp_htt_t2h_msg_handler(context, &htc_pkt); + + return rc; +} + +/* + * htt_htc_soc_attach() - Register SOC level HTT instance with HTC + * @htt_soc: HTT SOC handle + * + * Return: 0 on success; error code on failure + */ +static int +htt_htc_soc_attach(struct htt_soc *soc) +{ + struct htc_service_connect_req connect; + struct htc_service_connect_resp response; + A_STATUS status; + struct dp_soc *dpsoc = soc->dp_soc; + + qdf_mem_set(&connect, sizeof(connect), 0); + qdf_mem_set(&response, sizeof(response), 0); + + connect.pMetaData = NULL; + connect.MetaDataLength = 0; + connect.EpCallbacks.pContext = soc; + connect.EpCallbacks.EpTxComplete = dp_htt_h2t_send_complete; + connect.EpCallbacks.EpTxCompleteMultiple = NULL; + connect.EpCallbacks.EpRecv = dp_htt_t2h_msg_handler; + + /* rx buffers currently are provided by HIF, not by EpRecvRefill */ + connect.EpCallbacks.EpRecvRefill = NULL; + + /* N/A, fill is done by HIF */ + connect.EpCallbacks.RecvRefillWaterMark = 1; + + connect.EpCallbacks.EpSendFull = dp_htt_h2t_full; + /* + * Specify how deep to let a queue get before htc_send_pkt will + * call the EpSendFull function due to excessive send queue depth. + */ + connect.MaxSendQueueDepth = DP_HTT_MAX_SEND_QUEUE_DEPTH; + + /* disable flow control for HTT data message service */ + connect.ConnectionFlags |= HTC_CONNECT_FLAGS_DISABLE_CREDIT_FLOW_CTRL; + + /* connect to control service */ + connect.service_id = HTT_DATA_MSG_SVC; + + status = htc_connect_service(soc->htc_soc, &connect, &response); + + if (status != A_OK) + return QDF_STATUS_E_FAILURE; + + soc->htc_endpoint = response.Endpoint; + + hif_save_htc_htt_config_endpoint(dpsoc->hif_handle, soc->htc_endpoint); + dp_hif_update_pipe_callback(soc->dp_soc, (void *)soc, + dp_htt_hif_t2h_hp_callback, DP_HTT_T2H_HP_PIPE); + + return 0; /* success */ +} + +/* + * htt_soc_attach() - SOC level HTT initialization + * @dp_soc: Opaque Data path SOC handle + * @ctrl_psoc: Opaque ctrl SOC handle + * @htc_soc: SOC level HTC handle + * @hal_soc: Opaque HAL SOC handle + * @osdev: QDF device + * + * Return: HTT handle on success; NULL on failure + */ +void * +htt_soc_attach(void *dp_soc, void *ctrl_psoc, HTC_HANDLE htc_soc, + void *hal_soc, qdf_device_t osdev) +{ + struct htt_soc *soc; + int i; + + soc = qdf_mem_malloc(sizeof(*soc)); + + if (!soc) + goto fail1; + + soc->osdev = osdev; + soc->ctrl_psoc = ctrl_psoc; + soc->dp_soc = dp_soc; + soc->htc_soc = htc_soc; + soc->hal_soc = hal_soc; + + /* TODO: See if any NSS related context is required in htt_soc */ + + soc->htt_htc_pkt_freelist = NULL; + + if (htt_htc_soc_attach(soc)) + goto fail2; + + /* TODO: See if any Rx data specific intialization is required. For + * MCL use cases, the data will be received as single packet and + * should not required any descriptor or reorder handling + */ + + HTT_TX_MUTEX_INIT(&soc->htt_tx_mutex); + + /* pre-allocate some HTC_PACKET objects */ + for (i = 0; i < HTT_HTC_PKT_POOL_INIT_SIZE; i++) { + struct dp_htt_htc_pkt_union *pkt; + pkt = qdf_mem_malloc(sizeof(*pkt)); + if (!pkt) + break; + + htt_htc_pkt_free(soc, &pkt->u.pkt); + } + + return soc; + +fail2: + qdf_mem_free(soc); + +fail1: + return NULL; +} + + +/* + * htt_soc_detach() - Detach SOC level HTT + * @htt_soc: HTT SOC handle + */ +void +htt_soc_detach(void *htt_soc) +{ + struct htt_soc *soc = (struct htt_soc *)htt_soc; + + htt_htc_misc_pkt_pool_free(soc); + htt_htc_pkt_pool_free(soc); + HTT_TX_MUTEX_DESTROY(&soc->htt_tx_mutex); + qdf_mem_free(soc); +} + +/** + * dp_h2t_ext_stats_msg_send(): function to contruct HTT message to pass to FW + * @pdev: DP PDEV handle + * @stats_type_upload_mask: stats type requested by user + * @config_param_0: extra configuration parameters + * @config_param_1: extra configuration parameters + * @config_param_2: extra configuration parameters + * @config_param_3: extra configuration parameters + * @mac_id: mac number + * + * return: QDF STATUS + */ +QDF_STATUS dp_h2t_ext_stats_msg_send(struct dp_pdev *pdev, + uint32_t stats_type_upload_mask, uint32_t config_param_0, + uint32_t config_param_1, uint32_t config_param_2, + uint32_t config_param_3, int cookie_val, int cookie_msb, + uint8_t mac_id) +{ + struct htt_soc *soc = pdev->soc->htt_handle; + struct dp_htt_htc_pkt *pkt; + qdf_nbuf_t msg; + uint32_t *msg_word; + uint8_t pdev_mask = 0; + + msg = qdf_nbuf_alloc( + soc->osdev, + HTT_MSG_BUF_SIZE(HTT_H2T_EXT_STATS_REQ_MSG_SZ), + HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE); + + if (!msg) + return QDF_STATUS_E_NOMEM; + + /*TODO:Add support for SOC stats + * Bit 0: SOC Stats + * Bit 1: Pdev stats for pdev id 0 + * Bit 2: Pdev stats for pdev id 1 + * Bit 3: Pdev stats for pdev id 2 + */ + mac_id = dp_get_mac_id_for_pdev(mac_id, pdev->pdev_id); + + pdev_mask = 1 << DP_SW2HW_MACID(mac_id); + /* + * Set the length of the message. + * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added + * separately during the below call to qdf_nbuf_push_head. + * The contribution from the HTC header is added separately inside HTC. + */ + if (qdf_nbuf_put_tail(msg, HTT_H2T_EXT_STATS_REQ_MSG_SZ) == NULL) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "Failed to expand head for HTT_EXT_STATS"); + qdf_nbuf_free(msg); + return QDF_STATUS_E_FAILURE; + } + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, + "-----%s:%d----\n cookie <-> %d\n config_param_0 %u\n" + "config_param_1 %u\n config_param_2 %u\n" + "config_param_4 %u\n -------------\n", + __func__, __LINE__, cookie_val, config_param_0, + config_param_1, config_param_2, config_param_3); + + msg_word = (uint32_t *) qdf_nbuf_data(msg); + + qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING); + *msg_word = 0; + HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_EXT_STATS_REQ); + HTT_H2T_EXT_STATS_REQ_PDEV_MASK_SET(*msg_word, pdev_mask); + HTT_H2T_EXT_STATS_REQ_STATS_TYPE_SET(*msg_word, stats_type_upload_mask); + + /* word 1 */ + msg_word++; + *msg_word = 0; + HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_0); + + /* word 2 */ + msg_word++; + *msg_word = 0; + HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_1); + + /* word 3 */ + msg_word++; + *msg_word = 0; + HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_2); + + /* word 4 */ + msg_word++; + *msg_word = 0; + HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_3); + + HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, 0); + + /* word 5 */ + msg_word++; + + /* word 6 */ + msg_word++; + *msg_word = 0; + HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, cookie_val); + + /* word 7 */ + msg_word++; + *msg_word = 0; + /*Using last 2 bits for pdev_id */ + cookie_msb = ((cookie_msb << 2) | pdev->pdev_id); + HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, cookie_msb); + + pkt = htt_htc_pkt_alloc(soc); + if (!pkt) { + qdf_nbuf_free(msg); + return QDF_STATUS_E_NOMEM; + } + + pkt->soc_ctxt = NULL; /* not used during send-done callback */ + + SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt, + dp_htt_h2t_send_complete_free_netbuf, + qdf_nbuf_data(msg), qdf_nbuf_len(msg), + soc->htc_endpoint, + 1); /* tag - not relevant here */ + + SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg); + DP_HTT_SEND_HTC_PKT(soc, pkt); + return 0; +} + +/* This macro will revert once proper HTT header will define for + * HTT_H2T_MSG_TYPE_PPDU_STATS_CFG in htt.h file + * */ +#if defined(WDI_EVENT_ENABLE) +/** + * dp_h2t_cfg_stats_msg_send(): function to construct HTT message to pass to FW + * @pdev: DP PDEV handle + * @stats_type_upload_mask: stats type requested by user + * @mac_id: Mac id number + * + * return: QDF STATUS + */ +QDF_STATUS dp_h2t_cfg_stats_msg_send(struct dp_pdev *pdev, + uint32_t stats_type_upload_mask, uint8_t mac_id) +{ + struct htt_soc *soc = pdev->soc->htt_handle; + struct dp_htt_htc_pkt *pkt; + qdf_nbuf_t msg; + uint32_t *msg_word; + uint8_t pdev_mask; + + msg = qdf_nbuf_alloc( + soc->osdev, + HTT_MSG_BUF_SIZE(HTT_H2T_PPDU_STATS_CFG_MSG_SZ), + HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, true); + + if (!msg) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "Fail to allocate HTT_H2T_PPDU_STATS_CFG_MSG_SZ msg buffer\n"); + qdf_assert(0); + return QDF_STATUS_E_NOMEM; + } + + /*TODO:Add support for SOC stats + * Bit 0: SOC Stats + * Bit 1: Pdev stats for pdev id 0 + * Bit 2: Pdev stats for pdev id 1 + * Bit 3: Pdev stats for pdev id 2 + */ + pdev_mask = 1 << DP_SW2HW_MACID(mac_id); + + /* + * Set the length of the message. + * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added + * separately during the below call to qdf_nbuf_push_head. + * The contribution from the HTC header is added separately inside HTC. + */ + if (qdf_nbuf_put_tail(msg, HTT_H2T_PPDU_STATS_CFG_MSG_SZ) == NULL) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "Failed to expand head for HTT_CFG_STATS\n"); + qdf_nbuf_free(msg); + return QDF_STATUS_E_FAILURE; + } + + msg_word = (uint32_t *) qdf_nbuf_data(msg); + + qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING); + *msg_word = 0; + HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_PPDU_STATS_CFG); + HTT_H2T_PPDU_STATS_CFG_PDEV_MASK_SET(*msg_word, pdev_mask); + HTT_H2T_PPDU_STATS_CFG_TLV_BITMASK_SET(*msg_word, + stats_type_upload_mask); + + pkt = htt_htc_pkt_alloc(soc); + if (!pkt) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "Fail to allocate dp_htt_htc_pkt buffer\n"); + qdf_assert(0); + qdf_nbuf_free(msg); + return QDF_STATUS_E_NOMEM; + } + + pkt->soc_ctxt = NULL; /* not used during send-done callback */ + + SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt, + dp_htt_h2t_send_complete_free_netbuf, + qdf_nbuf_data(msg), qdf_nbuf_len(msg), + soc->htc_endpoint, + 1); /* tag - not relevant here */ + + SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg); + DP_HTT_SEND_HTC_PKT(soc, pkt); + return 0; +} +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_htt.h b/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_htt.h new file mode 100644 index 0000000000000000000000000000000000000000..6ce2c3aab3c94ced315b9e606829a74602dee855 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_htt.h @@ -0,0 +1,184 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _DP_HTT_H_ +#define _DP_HTT_H_ + +#include +#include +#include +#include + +#define HTT_TX_MUTEX_TYPE qdf_spinlock_t + +#define HTT_TX_MUTEX_INIT(_mutex) \ + qdf_spinlock_create(_mutex) + +#define HTT_TX_MUTEX_ACQUIRE(_mutex) \ + qdf_spin_lock_bh(_mutex) + +#define HTT_TX_MUTEX_RELEASE(_mutex) \ + qdf_spin_unlock_bh(_mutex) + +#define HTT_TX_MUTEX_DESTROY(_mutex) \ + qdf_spinlock_destroy(_mutex) + +#define DP_HTT_MAX_SEND_QUEUE_DEPTH 64 + +#ifndef HTT_MAC_ADDR_LEN +#define HTT_MAC_ADDR_LEN 6 +#endif + +#define DP_HTT_HTC_PKT_MISCLIST_SIZE 256 + +#define HTT_T2H_EXT_STATS_TLV_START_OFFSET 3 + +struct dp_htt_htc_pkt { + void *soc_ctxt; + qdf_dma_addr_t nbuf_paddr; + HTC_PACKET htc_pkt; +}; + +struct dp_htt_htc_pkt_union { + union { + struct dp_htt_htc_pkt pkt; + struct dp_htt_htc_pkt_union *next; + } u; +}; + +struct htt_soc { + void *ctrl_psoc; + void *dp_soc; + void *hal_soc; + HTC_HANDLE htc_soc; + qdf_device_t osdev; + HTC_ENDPOINT_ID htc_endpoint; + struct dp_htt_htc_pkt_union *htt_htc_pkt_freelist; + struct dp_htt_htc_pkt_union *htt_htc_pkt_misclist; + struct { + u_int8_t major; + u_int8_t minor; + } tgt_ver; + struct { + u_int8_t major; + u_int8_t minor; + } wifi_ip_ver; + + struct { + int htc_err_cnt; + int htc_pkt_free; + } stats; + + HTT_TX_MUTEX_TYPE htt_tx_mutex; +}; + +/** + * struct htt_rx_ring_tlv_filter - Rx ring TLV filter + * enable/disable. + * @mpdu_start: enable/disable MPDU start TLV + * @msdu_start: enable/disable MSDU start TLV + * @packet: enable/disable PACKET TLV + * @msdu_end: enable/disable MSDU end TLV + * @mpdu_end: enable/disable MPDU end TLV + * @packet_header: enable/disable PACKET header TLV + * @attention: enable/disable ATTENTION TLV + * @ppdu_start: enable/disable PPDU start TLV + * @ppdu_end: enable/disable PPDU end TLV + * @ppdu_end_user_stats: enable/disable PPDU user stats TLV + * @ppdu_end_user_stats_ext: enable/disable PPDU user stats ext TLV + * @ppdu_end_status_done: enable/disable PPDU end status done TLV + * @enable_fp: enable/disable FP packet + * @enable_md: enable/disable MD packet + * @enable_mo: enable/disable MO packet + * @enable_mgmt: enable/disable MGMT packet + * @enable_ctrl: enable/disable CTRL packet + * @enable_data: enable/disable DATA packet + */ +struct htt_rx_ring_tlv_filter { + u_int32_t mpdu_start:1, + msdu_start:1, + packet:1, + msdu_end:1, + mpdu_end:1, + packet_header:1, + attention:1, + ppdu_start:1, + ppdu_end:1, + ppdu_end_user_stats:1, + ppdu_end_user_stats_ext:1, + ppdu_end_status_done:1, + header_per_msdu:1, + enable_fp:1, + enable_md:1, + enable_mo:1; + u_int32_t fp_mgmt_filter:16, + mo_mgmt_filter:16; + u_int32_t fp_ctrl_filter:16, + mo_ctrl_filter:16; + u_int32_t fp_data_filter:16, + mo_data_filter:16; +}; + +void * +htt_soc_attach(void *txrx_soc, void *ctrl_psoc, HTC_HANDLE htc_soc, + void *hal_soc, qdf_device_t osdev); + +void htt_soc_detach(void *soc); + +int htt_srng_setup(void *htt_soc, int pdev_id, void *hal_srng, + int hal_ring_type); + +int htt_soc_attach_target(void *htt_soc); + +/* + * htt_h2t_rx_ring_cfg() - Send SRNG packet and TLV filter + * config message to target + * @htt_soc: HTT SOC handle + * @pdev_id: PDEV Id + * @hal_srng: Opaque HAL SRNG pointer + * @hal_ring_type: SRNG ring type + * @ring_buf_size: SRNG buffer size + * @htt_tlv_filter: Rx SRNG TLV and filter setting + * + * Return: 0 on success; error code on failure + */ +int htt_h2t_rx_ring_cfg(void *htt_soc, int pdev_id, void *hal_srng, + int hal_ring_type, int ring_buf_size, + struct htt_rx_ring_tlv_filter *htt_tlv_filter); + +/* + * htt_t2h_stats_handler() - target to host stats work handler + * @context: context (dp soc context) + * + * Return: void + */ +void htt_t2h_stats_handler(void *context); + +/** + * struct htt_stats_context - htt stats information + * @soc: Size of each descriptor in the pool + * @msg: T2H Ext stats message queue + * @msg_len: T2H Ext stats message length + */ +struct htt_stats_context { + struct dp_soc *soc; + qdf_nbuf_queue_t msg; + uint32_t msg_len; +}; + +#endif /* _DP_HTT_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_internal.h b/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_internal.h new file mode 100644 index 0000000000000000000000000000000000000000..2aacb15a0790587e1d6517e0194a59f7370e5cda --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_internal.h @@ -0,0 +1,560 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _DP_INTERNAL_H_ +#define _DP_INTERNAL_H_ + +#include "dp_types.h" + +#define RX_BUFFER_SIZE_PKTLOG_LITE 1024 + +/* Macro For NYSM value received in VHT TLV */ +#define VHT_SGI_NYSM 3 + +#if DP_PRINT_ENABLE +#include /* va_list */ +#include /* qdf_vprint */ +#include + +enum { + /* FATAL_ERR - print only irrecoverable error messages */ + DP_PRINT_LEVEL_FATAL_ERR, + + /* ERR - include non-fatal err messages */ + DP_PRINT_LEVEL_ERR, + + /* WARN - include warnings */ + DP_PRINT_LEVEL_WARN, + + /* INFO1 - include fundamental, infrequent events */ + DP_PRINT_LEVEL_INFO1, + + /* INFO2 - include non-fundamental but infrequent events */ + DP_PRINT_LEVEL_INFO2, +}; + + +#define dp_print(level, fmt, ...) do { \ + if (level <= g_txrx_print_level) \ + qdf_print(fmt, ## __VA_ARGS__); \ +while (0) +#define DP_PRINT(level, fmt, ...) do { \ + dp_print(level, "DP: " fmt, ## __VA_ARGS__); \ +while (0) +#else +#define DP_PRINT(level, fmt, ...) +#endif /* DP_PRINT_ENABLE */ + +#define DP_TRACE(LVL, fmt, args ...) \ + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_##LVL, \ + fmt, ## args) + +#define DP_TRACE_STATS(LVL, fmt, args ...) \ + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_##LVL, \ + fmt, ## args) + +#define DP_PRINT_STATS(fmt, args ...) \ + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, \ + fmt, ## args) + +#define DP_STATS_INIT(_handle) \ + qdf_mem_set(&((_handle)->stats), sizeof((_handle)->stats), 0x0) + +#define DP_STATS_CLR(_handle) \ + qdf_mem_set(&((_handle)->stats), sizeof((_handle)->stats), 0x0) + +#ifndef DISABLE_DP_STATS +#define DP_STATS_INC(_handle, _field, _delta) \ +{ \ + if (likely(_handle)) \ + _handle->stats._field += _delta; \ +} + +#define DP_STATS_INCC(_handle, _field, _delta, _cond) \ +{ \ + if (_cond && likely(_handle)) \ + _handle->stats._field += _delta; \ +} + +#define DP_STATS_DEC(_handle, _field, _delta) \ +{ \ + if (likely(_handle)) \ + _handle->stats._field -= _delta; \ +} + +#define DP_STATS_UPD(_handle, _field, _delta) \ +{ \ + if (likely(_handle)) \ + _handle->stats._field = _delta; \ +} + +#define DP_STATS_INC_PKT(_handle, _field, _count, _bytes) \ +{ \ + DP_STATS_INC(_handle, _field.num, _count); \ + DP_STATS_INC(_handle, _field.bytes, _bytes) \ +} + +#define DP_STATS_INCC_PKT(_handle, _field, _count, _bytes, _cond) \ +{ \ + DP_STATS_INCC(_handle, _field.num, _count, _cond); \ + DP_STATS_INCC(_handle, _field.bytes, _bytes, _cond) \ +} + +#define DP_STATS_AGGR(_handle_a, _handle_b, _field) \ +{ \ + _handle_a->stats._field += _handle_b->stats._field; \ +} + +#define DP_STATS_AGGR_PKT(_handle_a, _handle_b, _field) \ +{ \ + DP_STATS_AGGR(_handle_a, _handle_b, _field.num); \ + DP_STATS_AGGR(_handle_a, _handle_b, _field.bytes);\ +} + +#define DP_STATS_UPD_STRUCT(_handle_a, _handle_b, _field) \ +{ \ + _handle_a->stats._field = _handle_b->stats._field; \ +} + +#define DP_HIST_INIT() \ + uint32_t num_of_packets[MAX_PDEV_CNT] = {0}; + +#define DP_HIST_PACKET_COUNT_INC(_pdev_id) \ +{ \ + ++num_of_packets[_pdev_id]; \ +} + +#define DP_TX_HISTOGRAM_UPDATE(_pdev, _p_cntrs) \ + do { \ + if (_p_cntrs == 1) { \ + DP_STATS_INC(_pdev, \ + tx_comp_histogram.pkts_1, 1); \ + } else if (_p_cntrs > 1 && _p_cntrs <= 20) { \ + DP_STATS_INC(_pdev, \ + tx_comp_histogram.pkts_2_20, 1); \ + } else if (_p_cntrs > 20 && _p_cntrs <= 40) { \ + DP_STATS_INC(_pdev, \ + tx_comp_histogram.pkts_21_40, 1); \ + } else if (_p_cntrs > 40 && _p_cntrs <= 60) { \ + DP_STATS_INC(_pdev, \ + tx_comp_histogram.pkts_41_60, 1); \ + } else if (_p_cntrs > 60 && _p_cntrs <= 80) { \ + DP_STATS_INC(_pdev, \ + tx_comp_histogram.pkts_61_80, 1); \ + } else if (_p_cntrs > 80 && _p_cntrs <= 100) { \ + DP_STATS_INC(_pdev, \ + tx_comp_histogram.pkts_81_100, 1); \ + } else if (_p_cntrs > 100 && _p_cntrs <= 200) { \ + DP_STATS_INC(_pdev, \ + tx_comp_histogram.pkts_101_200, 1); \ + } else if (_p_cntrs > 200) { \ + DP_STATS_INC(_pdev, \ + tx_comp_histogram.pkts_201_plus, 1); \ + } \ + } while (0) + +#define DP_RX_HISTOGRAM_UPDATE(_pdev, _p_cntrs) \ + do { \ + if (_p_cntrs == 1) { \ + DP_STATS_INC(_pdev, \ + rx_ind_histogram.pkts_1, 1); \ + } else if (_p_cntrs > 1 && _p_cntrs <= 20) { \ + DP_STATS_INC(_pdev, \ + rx_ind_histogram.pkts_2_20, 1); \ + } else if (_p_cntrs > 20 && _p_cntrs <= 40) { \ + DP_STATS_INC(_pdev, \ + rx_ind_histogram.pkts_21_40, 1); \ + } else if (_p_cntrs > 40 && _p_cntrs <= 60) { \ + DP_STATS_INC(_pdev, \ + rx_ind_histogram.pkts_41_60, 1); \ + } else if (_p_cntrs > 60 && _p_cntrs <= 80) { \ + DP_STATS_INC(_pdev, \ + rx_ind_histogram.pkts_61_80, 1); \ + } else if (_p_cntrs > 80 && _p_cntrs <= 100) { \ + DP_STATS_INC(_pdev, \ + rx_ind_histogram.pkts_81_100, 1); \ + } else if (_p_cntrs > 100 && _p_cntrs <= 200) { \ + DP_STATS_INC(_pdev, \ + rx_ind_histogram.pkts_101_200, 1); \ + } else if (_p_cntrs > 200) { \ + DP_STATS_INC(_pdev, \ + rx_ind_histogram.pkts_201_plus, 1); \ + } \ + } while (0) + +#define DP_TX_HIST_STATS_PER_PDEV() \ + do { \ + uint8_t hist_stats = 0; \ + for (hist_stats = 0; hist_stats < soc->pdev_count; \ + hist_stats++) { \ + DP_TX_HISTOGRAM_UPDATE(soc->pdev_list[hist_stats], \ + num_of_packets[hist_stats]); \ + } \ + } while (0) + + +#define DP_RX_HIST_STATS_PER_PDEV() \ + do { \ + uint8_t hist_stats = 0; \ + for (hist_stats = 0; hist_stats < soc->pdev_count; \ + hist_stats++) { \ + DP_RX_HISTOGRAM_UPDATE(soc->pdev_list[hist_stats], \ + num_of_packets[hist_stats]); \ + } \ + } while (0) + + +#else +#define DP_STATS_INC(_handle, _field, _delta) +#define DP_STATS_INCC(_handle, _field, _delta, _cond) +#define DP_STATS_DEC(_handle, _field, _delta) +#define DP_STATS_UPD(_handle, _field, _delta) +#define DP_STATS_INC_PKT(_handle, _field, _count, _bytes) +#define DP_STATS_INCC_PKT(_handle, _field, _count, _bytes, _cond) +#define DP_STATS_AGGR(_handle_a, _handle_b, _field) +#define DP_STATS_AGGR_PKT(_handle_a, _handle_b, _field) +#define DP_HIST_INIT() +#define DP_HIST_PACKET_COUNT_INC(_pdev_id) +#define DP_TX_HISTOGRAM_UPDATE(_pdev, _p_cntrs) +#define DP_RX_HISTOGRAM_UPDATE(_pdev, _p_cntrs) +#define DP_RX_HIST_STATS_PER_PDEV() +#define DP_TX_HIST_STATS_PER_PDEV() +#endif + +#define DP_HTT_T2H_HP_PIPE 5 + +#define DP_UPDATE_STATS(_tgtobj, _srcobj) \ + do { \ + uint8_t i; \ + uint8_t pream_type; \ + for (pream_type = 0; pream_type < DOT11_MAX; pream_type++) { \ + for (i = 0; i < MAX_MCS; i++) { \ + DP_STATS_AGGR(_tgtobj, _srcobj, \ + tx.pkt_type[pream_type].mcs_count[i]); \ + DP_STATS_AGGR(_tgtobj, _srcobj, \ + rx.pkt_type[pream_type].mcs_count[i]); \ + } \ + } \ + \ + for (i = 0; i < MAX_BW; i++) { \ + DP_STATS_AGGR(_tgtobj, _srcobj, tx.bw[i]); \ + DP_STATS_AGGR(_tgtobj, _srcobj, rx.bw[i]); \ + } \ + \ + for (i = 0; i < SS_COUNT; i++) { \ + DP_STATS_AGGR(_tgtobj, _srcobj, rx.nss[i]); \ + DP_STATS_AGGR(_tgtobj, _srcobj, tx.nss[i]); \ + } \ + for (i = 0; i < WME_AC_MAX; i++) { \ + DP_STATS_AGGR(_tgtobj, _srcobj, tx.wme_ac_type[i]); \ + DP_STATS_AGGR(_tgtobj, _srcobj, rx.wme_ac_type[i]); \ + DP_STATS_AGGR(_tgtobj, _srcobj, tx.excess_retries_per_ac[i]); \ + \ + } \ + \ + for (i = 0; i < MAX_GI; i++) { \ + DP_STATS_AGGR(_tgtobj, _srcobj, tx.sgi_count[i]); \ + DP_STATS_AGGR(_tgtobj, _srcobj, rx.sgi_count[i]); \ + } \ + \ + for (i = 0; i < MAX_RECEPTION_TYPES; i++) \ + DP_STATS_AGGR(_tgtobj, _srcobj, rx.reception_type[i]); \ + \ + DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.comp_pkt); \ + DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.ucast); \ + DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.mcast); \ + DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.bcast); \ + DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.tx_success); \ + DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.nawds_mcast); \ + DP_STATS_AGGR(_tgtobj, _srcobj, tx.tx_failed); \ + DP_STATS_AGGR(_tgtobj, _srcobj, tx.ofdma); \ + DP_STATS_AGGR(_tgtobj, _srcobj, tx.stbc); \ + DP_STATS_AGGR(_tgtobj, _srcobj, tx.ldpc); \ + DP_STATS_AGGR(_tgtobj, _srcobj, tx.retries); \ + DP_STATS_AGGR(_tgtobj, _srcobj, tx.non_amsdu_cnt); \ + DP_STATS_AGGR(_tgtobj, _srcobj, tx.amsdu_cnt); \ + DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.fw_rem); \ + DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.fw_rem_tx); \ + DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.fw_rem_notx); \ + DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.fw_reason1); \ + DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.fw_reason2); \ + DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.fw_reason3); \ + DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.age_out); \ + \ + DP_STATS_AGGR(_tgtobj, _srcobj, rx.err.mic_err); \ + DP_STATS_UPD_STRUCT(_tgtobj, _srcobj, rx.rssi); \ + DP_STATS_UPD_STRUCT(_tgtobj, _srcobj, rx.rx_rate); \ + DP_STATS_AGGR(_tgtobj, _srcobj, rx.err.decrypt_err); \ + DP_STATS_AGGR(_tgtobj, _srcobj, rx.non_ampdu_cnt); \ + DP_STATS_AGGR(_tgtobj, _srcobj, rx.ampdu_cnt); \ + DP_STATS_AGGR(_tgtobj, _srcobj, rx.non_amsdu_cnt); \ + DP_STATS_AGGR(_tgtobj, _srcobj, rx.amsdu_cnt); \ + DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.to_stack); \ + \ + for (i = 0; i < CDP_MAX_RX_RINGS; i++) \ + DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.rcvd_reo[i]); \ + \ + _srcobj->stats.rx.unicast.num = \ + _srcobj->stats.rx.to_stack.num - \ + (_srcobj->stats.rx.multicast.num + \ + _srcobj->stats.rx.bcast.num); \ + _srcobj->stats.rx.unicast.bytes = \ + _srcobj->stats.rx.to_stack.bytes - \ + (_srcobj->stats.rx.multicast.bytes + \ + _srcobj->stats.rx.bcast.bytes); \ + DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.unicast); \ + DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.multicast); \ + DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.bcast); \ + DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.raw); \ + DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.intra_bss.pkts); \ + DP_STATS_AGGR_PKT(_tgtobj, _srcobj, rx.intra_bss.fail); \ + \ + _tgtobj->stats.tx.last_ack_rssi = \ + _srcobj->stats.tx.last_ack_rssi; \ + } while (0) + +extern int dp_peer_find_attach(struct dp_soc *soc); +extern void dp_peer_find_detach(struct dp_soc *soc); +extern void dp_peer_find_hash_add(struct dp_soc *soc, struct dp_peer *peer); +extern void dp_peer_find_hash_remove(struct dp_soc *soc, struct dp_peer *peer); +extern void dp_peer_find_hash_erase(struct dp_soc *soc); +extern void dp_peer_rx_init(struct dp_pdev *pdev, struct dp_peer *peer); +extern void dp_peer_cleanup(struct dp_vdev *vdev, struct dp_peer *peer); +extern void dp_peer_rx_cleanup(struct dp_vdev *vdev, struct dp_peer *peer); +extern void dp_peer_unref_delete(void *peer_handle); +extern void dp_rx_discard(struct dp_vdev *vdev, struct dp_peer *peer, + unsigned tid, qdf_nbuf_t msdu_list); +extern void *dp_find_peer_by_addr(struct cdp_pdev *dev, + uint8_t *peer_mac_addr, uint8_t *peer_id); +extern struct dp_peer *dp_peer_find_hash_find(struct dp_soc *soc, + uint8_t *peer_mac_addr, int mac_addr_is_aligned, uint8_t vdev_id); + +#ifndef CONFIG_WIN +QDF_STATUS dp_register_peer(struct cdp_pdev *pdev_handle, + struct ol_txrx_desc_type *sta_desc); +QDF_STATUS dp_clear_peer(struct cdp_pdev *pdev_handle, uint8_t local_id); +void *dp_find_peer_by_addr_and_vdev(struct cdp_pdev *pdev_handle, + struct cdp_vdev *vdev, + uint8_t *peer_addr, uint8_t *local_id); +uint16_t dp_local_peer_id(void *peer); +void *dp_peer_find_by_local_id(struct cdp_pdev *pdev_handle, uint8_t local_id); +QDF_STATUS dp_peer_state_update(struct cdp_pdev *pdev_handle, uint8_t *peer_mac, + enum ol_txrx_peer_state state); +QDF_STATUS dp_get_vdevid(void *peer_handle, uint8_t *vdev_id); +struct cdp_vdev *dp_get_vdev_by_sta_id(struct cdp_pdev *pdev_handle, + uint8_t sta_id); +struct cdp_vdev *dp_get_vdev_for_peer(void *peer); +uint8_t *dp_peer_get_peer_mac_addr(void *peer); +int dp_get_peer_state(void *peer_handle); +void dp_local_peer_id_pool_init(struct dp_pdev *pdev); +void dp_local_peer_id_alloc(struct dp_pdev *pdev, struct dp_peer *peer); +void dp_local_peer_id_free(struct dp_pdev *pdev, struct dp_peer *peer); +#endif +extern int dp_addba_requestprocess_wifi3(void *peer_handle, + uint8_t dialogtoken, uint16_t tid, uint16_t batimeout, + uint16_t buffersize, uint16_t startseqnum); +extern void dp_addba_responsesetup_wifi3(void *peer_handle, uint8_t tid, + uint8_t *dialogtoken, uint16_t *statuscode, + uint16_t *buffersize, uint16_t *batimeout); +extern void dp_set_addba_response(void *peer_handle, uint8_t tid, + uint16_t statuscode); +extern int dp_delba_process_wifi3(void *peer_handle, + int tid, uint16_t reasoncode); + +extern int dp_rx_tid_setup_wifi3(struct dp_peer *peer, int tid, + uint32_t ba_window_size, uint32_t start_seq); + +extern QDF_STATUS dp_reo_send_cmd(struct dp_soc *soc, + enum hal_reo_cmd_type type, struct hal_reo_cmd_params *params, + void (*callback_fn), void *data); + +extern void dp_reo_cmdlist_destroy(struct dp_soc *soc); +extern void dp_reo_status_ring_handler(struct dp_soc *soc); +void dp_aggregate_vdev_stats(struct dp_vdev *vdev); +void dp_rx_tid_stats_cb(struct dp_soc *soc, void *cb_ctxt, + union hal_reo_status *reo_status); +void dp_rx_bar_stats_cb(struct dp_soc *soc, void *cb_ctxt, + union hal_reo_status *reo_status); +uint16_t dp_tx_me_send_convert_ucast(struct cdp_vdev *vdev_handle, + qdf_nbuf_t nbuf, uint8_t newmac[][DP_MAC_ADDR_LEN], + uint8_t new_mac_cnt); +void dp_tx_me_alloc_descriptor(struct cdp_pdev *pdev); + +void dp_tx_me_free_descriptor(struct cdp_pdev *pdev); +QDF_STATUS dp_h2t_ext_stats_msg_send(struct dp_pdev *pdev, + uint32_t stats_type_upload_mask, uint32_t config_param_0, + uint32_t config_param_1, uint32_t config_param_2, + uint32_t config_param_3, int cookie, int cookie_msb, + uint8_t mac_id); +void dp_htt_stats_print_tag(uint8_t tag_type, uint32_t *tag_buf); +void dp_htt_stats_copy_tag(struct dp_pdev *pdev, uint8_t tag_type, uint32_t *tag_buf); +void dp_peer_rxtid_stats(struct dp_peer *peer, void (*callback_fn), + void *cb_ctxt); +void dp_set_pn_check_wifi3(struct cdp_vdev *vdev_handle, + struct cdp_peer *peer_handle, enum cdp_sec_type sec_type, + uint32_t *rx_pn); + +void *dp_get_pdev_for_mac_id(struct dp_soc *soc, uint32_t mac_id); +void dp_mark_peer_inact(void *peer_handle, bool inactive); +bool dp_set_inact_params(struct cdp_pdev *pdev_handle, + u_int16_t inact_check_interval, + u_int16_t inact_normal, u_int16_t inact_overload); +bool dp_start_inact_timer(struct cdp_pdev *pdev_handle, bool enable); +void dp_set_overload(struct cdp_pdev *pdev_handle, bool overload); +bool dp_peer_is_inact(void *peer_handle); +void dp_init_inact_timer(struct dp_soc *soc); +void dp_free_inact_timer(struct dp_soc *soc); + +/* + * dp_get_mac_id_for_pdev() - Return mac corresponding to pdev for mac + * + * @mac_id: MAC id + * @pdev_id: pdev_id corresponding to pdev, 0 for MCL + * + * Single pdev using both MACs will operate on both MAC rings, + * which is the case for MCL. + * For WIN each PDEV will operate one ring, so index is zero. + * + */ +static inline int dp_get_mac_id_for_pdev(uint32_t mac_id, uint32_t pdev_id) +{ + if (mac_id && pdev_id) { + qdf_print("Both mac_id and pdev_id cannot be non zero"); + QDF_BUG(0); + return 0; + } + return (mac_id + pdev_id); +} + +/* + * dp_get_mac_id_for_mac() - Return mac corresponding WIN and MCL mac_ids + * + * @soc: handle to DP soc + * @mac_id: MAC id + * + * Single pdev using both MACs will operate on both MAC rings, + * which is the case for MCL. + * For WIN each PDEV will operate one ring, so index is zero. + * + */ +static inline int dp_get_mac_id_for_mac(struct dp_soc *soc, uint32_t mac_id) +{ + /* + * Single pdev using both MACs will operate on both MAC rings, + * which is the case for MCL. + */ + if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) + return mac_id; + + /* For WIN each PDEV will operate one ring, so index is zero. */ + return 0; +} + +#ifdef WDI_EVENT_ENABLE +QDF_STATUS dp_h2t_cfg_stats_msg_send(struct dp_pdev *pdev, + uint32_t stats_type_upload_mask, + uint8_t mac_id); + +int dp_wdi_event_unsub(struct cdp_pdev *txrx_pdev_handle, + void *event_cb_sub_handle, + uint32_t event); + +int dp_wdi_event_sub(struct cdp_pdev *txrx_pdev_handle, + void *event_cb_sub_handle, + uint32_t event); + +void dp_wdi_event_handler(enum WDI_EVENT event, void *soc, + void *data, u_int16_t peer_id, + int status, u_int8_t pdev_id); + +int dp_wdi_event_attach(struct dp_pdev *txrx_pdev); +int dp_wdi_event_detach(struct dp_pdev *txrx_pdev); +int dp_set_pktlog_wifi3(struct dp_pdev *pdev, uint32_t event, + bool enable); +void *dp_get_pldev(struct cdp_pdev *txrx_pdev); +void dp_pkt_log_init(struct cdp_pdev *ppdev, void *scn); + +static inline void dp_hif_update_pipe_callback(void *soc, void *cb_context, + QDF_STATUS (*callback)(void *, qdf_nbuf_t, uint8_t), uint8_t pipe_id) +{ + struct hif_msg_callbacks hif_pipe_callbacks; + struct dp_soc *dp_soc = (struct dp_soc *)soc; + + /* TODO: Temporary change to bypass HTC connection for this new + * HIF pipe, which will be used for packet log and other high- + * priority HTT messages. Proper HTC connection to be added + * later once required FW changes are available + */ + hif_pipe_callbacks.rxCompletionHandler = callback; + hif_pipe_callbacks.Context = cb_context; + hif_update_pipe_callback(dp_soc->hif_handle, + DP_HTT_T2H_HP_PIPE, &hif_pipe_callbacks); +} + +#else +static inline int dp_wdi_event_unsub(struct cdp_pdev *txrx_pdev_handle, + void *event_cb_sub_handle, + uint32_t event) +{ + return 0; +} + +static inline int dp_wdi_event_sub(struct cdp_pdev *txrx_pdev_handle, + void *event_cb_sub_handle, + uint32_t event) +{ + return 0; +} + +static inline void dp_wdi_event_handler(enum WDI_EVENT event, void *soc, + void *data, u_int16_t peer_id, + int status, u_int8_t pdev_id) +{ +} + +static inline int dp_wdi_event_attach(struct dp_pdev *txrx_pdev) +{ + return 0; +} + +static inline int dp_wdi_event_detach(struct dp_pdev *txrx_pdev) +{ + return 0; +} + +static inline int dp_set_pktlog_wifi3(struct dp_pdev *pdev, uint32_t event, + bool enable) +{ + return 0; +} +static inline QDF_STATUS dp_h2t_cfg_stats_msg_send(struct dp_pdev *pdev, + uint32_t stats_type_upload_mask, uint8_t mac_id) +{ + return 0; +} +static inline void dp_hif_update_pipe_callback(void *soc, void *cb_context, + QDF_STATUS (*callback)(void *, qdf_nbuf_t, uint8_t), uint8_t pipe_id) +{ +} +#endif /* CONFIG_WIN */ +#ifdef QCA_LL_TX_FLOW_CONTROL_V2 +void dp_tx_dump_flow_pool_info(void *soc); +int dp_tx_delete_flow_pool(struct dp_soc *soc, struct dp_tx_desc_pool_s *pool, + bool force); +#endif /* QCA_LL_TX_FLOW_CONTROL_V2 */ +#endif /* #ifndef _DP_INTERNAL_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_ipa.c b/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_ipa.c new file mode 100644 index 0000000000000000000000000000000000000000..b8df3700874803664e63e985be1f28c0e3c19a6e --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_ipa.c @@ -0,0 +1,935 @@ +/* + * Copyright (c) 2017, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifdef IPA_OFFLOAD + +#include +#include +#include +#include +#include +#include +#include +#include +#include "dp_types.h" +#include "dp_htt.h" +#include "dp_tx.h" +#include "dp_ipa.h" + +/* Hard coded config parameters until dp_ops_cfg.cfg_attach implemented */ +#define CFG_IPA_UC_TX_BUF_SIZE_DEFAULT (2048) + +/** + * dp_tx_ipa_uc_detach - Free autonomy TX resources + * @soc: data path instance + * @pdev: core txrx pdev context + * + * Free allocated TX buffers with WBM SRNG + * + * Return: none + */ +static void dp_tx_ipa_uc_detach(struct dp_soc *soc, struct dp_pdev *pdev) +{ + int idx; + uint32_t ring_base_align = 8; + /* + * Uncomment when dp_ops_cfg.cfg_attach is implemented + * unsigned int uc_tx_buf_sz = + * dp_cfg_ipa_uc_tx_buf_size(pdev->osif_pdev); + */ + unsigned int uc_tx_buf_sz = CFG_IPA_UC_TX_BUF_SIZE_DEFAULT; + unsigned int alloc_size = uc_tx_buf_sz + ring_base_align - 1; + + for (idx = 0; idx < soc->ipa_uc_tx_rsc.alloc_tx_buf_cnt; idx++) { + if (soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned[idx]) { + qdf_mem_free_consistent( + soc->osdev, soc->osdev->dev, + alloc_size, + soc->ipa_uc_tx_rsc. + tx_buf_pool_vaddr_unaligned[idx], + soc->ipa_uc_tx_rsc. + tx_buf_pool_paddr_unaligned[idx], + 0); + + soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned[idx] = + (void *)NULL; + soc->ipa_uc_tx_rsc.tx_buf_pool_paddr_unaligned[idx] = + (qdf_dma_addr_t)NULL; + } + } + + qdf_mem_free(soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned); + soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned = NULL; + qdf_mem_free(soc->ipa_uc_tx_rsc.tx_buf_pool_paddr_unaligned); + soc->ipa_uc_tx_rsc.tx_buf_pool_paddr_unaligned = NULL; +} + +/** + * dp_rx_ipa_uc_detach - free autonomy RX resources + * @soc: data path instance + * @pdev: core txrx pdev context + * + * This function will detach DP RX into main device context + * will free DP Rx resources. + * + * Return: none + */ +static void dp_rx_ipa_uc_detach(struct dp_soc *soc, struct dp_pdev *pdev) +{ +} + +int dp_ipa_uc_detach(struct dp_soc *soc, struct dp_pdev *pdev) +{ + /* TX resource detach */ + dp_tx_ipa_uc_detach(soc, pdev); + + /* RX resource detach */ + dp_rx_ipa_uc_detach(soc, pdev); + + return QDF_STATUS_SUCCESS; /* success */ +} + +/** + * dp_tx_ipa_uc_attach - Allocate autonomy TX resources + * @soc: data path instance + * @pdev: Physical device handle + * + * Allocate TX buffer from non-cacheable memory + * Attache allocated TX buffers with WBM SRNG + * + * Return: int + */ +static int dp_tx_ipa_uc_attach(struct dp_soc *soc, struct dp_pdev *pdev) +{ + uint32_t tx_buffer_count; + uint32_t ring_base_align = 8; + void *buffer_vaddr_unaligned; + void *buffer_vaddr; + qdf_dma_addr_t buffer_paddr_unaligned; + qdf_dma_addr_t buffer_paddr; + struct hal_srng *wbm_srng = + soc->tx_comp_ring[IPA_TX_COMP_RING_IDX].hal_srng; + struct hal_srng_params srng_params; + uint32_t paddr_lo; + uint32_t paddr_hi; + void *ring_entry; + int num_entries; + int retval = QDF_STATUS_SUCCESS; + /* + * Uncomment when dp_ops_cfg.cfg_attach is implemented + * unsigned int uc_tx_buf_sz = + * dp_cfg_ipa_uc_tx_buf_size(pdev->osif_pdev); + */ + unsigned int uc_tx_buf_sz = CFG_IPA_UC_TX_BUF_SIZE_DEFAULT; + unsigned int alloc_size = uc_tx_buf_sz + ring_base_align - 1; + + hal_get_srng_params(soc->hal_soc, (void *)wbm_srng, &srng_params); + num_entries = srng_params.num_entries; + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, + "%s: requested %d buffers to be posted to wbm ring", + __func__, num_entries); + + soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned = + qdf_mem_malloc(num_entries * + sizeof(*soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned)); + if (!soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "%s: IPA WBM Ring Tx buf pool vaddr alloc fail", + __func__); + return -ENOMEM; + } + + soc->ipa_uc_tx_rsc.tx_buf_pool_paddr_unaligned = + qdf_mem_malloc(num_entries * + sizeof(*soc->ipa_uc_tx_rsc.tx_buf_pool_paddr_unaligned)); + if (!soc->ipa_uc_tx_rsc.tx_buf_pool_paddr_unaligned) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "%s: IPA WBM Ring Tx buf pool paddr alloc fail", + __func__); + qdf_mem_free(soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned); + return -ENOMEM; + } + + hal_srng_access_start(soc->hal_soc, (void *)wbm_srng); + + /* + * Allocate Tx buffers as many as possible + * Populate Tx buffers into WBM2IPA ring + * This initial buffer population will simulate H/W as source ring, + * and update HP + */ + for (tx_buffer_count = 0; + tx_buffer_count < num_entries - 1; tx_buffer_count++) { + buffer_vaddr_unaligned = qdf_mem_alloc_consistent(soc->osdev, + soc->osdev->dev, alloc_size, &buffer_paddr_unaligned); + if (!buffer_vaddr_unaligned) + break; + + ring_entry = hal_srng_dst_get_next_hp(soc->hal_soc, + (void *)wbm_srng); + if (!ring_entry) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, + "%s: Failed to get WBM ring entry\n", + __func__); + qdf_mem_free_consistent(soc->osdev, soc->osdev->dev, + alloc_size, buffer_vaddr_unaligned, + buffer_paddr_unaligned, 0); + break; + } + + buffer_vaddr = (void *)qdf_align((unsigned long) + buffer_vaddr_unaligned, ring_base_align); + buffer_paddr = buffer_paddr_unaligned + + ((unsigned long)(buffer_vaddr) - + (unsigned long)buffer_vaddr_unaligned); + + paddr_lo = ((u64)buffer_paddr & 0x00000000ffffffff); + paddr_hi = ((u64)buffer_paddr & 0x0000001f00000000) >> 32; + HAL_WBM_PADDR_LO_SET(ring_entry, paddr_lo); + HAL_WBM_PADDR_HI_SET(ring_entry, paddr_hi); + + soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned[tx_buffer_count] + = buffer_vaddr_unaligned; + soc->ipa_uc_tx_rsc.tx_buf_pool_paddr_unaligned[tx_buffer_count] + = buffer_paddr_unaligned; + } + + hal_srng_access_end(soc->hal_soc, wbm_srng); + + soc->ipa_uc_tx_rsc.alloc_tx_buf_cnt = tx_buffer_count; + + if (tx_buffer_count) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, + "%s: IPA WDI TX buffer: %d allocated\n", + __func__, tx_buffer_count); + } else { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "%s: No IPA WDI TX buffer allocated\n", + __func__); + qdf_mem_free(soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned); + soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned = NULL; + qdf_mem_free(soc->ipa_uc_tx_rsc.tx_buf_pool_paddr_unaligned); + soc->ipa_uc_tx_rsc.tx_buf_pool_paddr_unaligned = NULL; + retval = -ENOMEM; + } + + return retval; +} + +/** + * dp_rx_ipa_uc_attach - Allocate autonomy RX resources + * @soc: data path instance + * @pdev: core txrx pdev context + * + * This function will attach a DP RX instance into the main + * device (SOC) context. + * + * Return: QDF_STATUS_SUCCESS: success + * QDF_STATUS_E_RESOURCES: Error return + */ +static int dp_rx_ipa_uc_attach(struct dp_soc *soc, struct dp_pdev *pdev) +{ + return QDF_STATUS_SUCCESS; +} + +int dp_ipa_uc_attach(struct dp_soc *soc, struct dp_pdev *pdev) +{ + int error; + + /* TX resource attach */ + error = dp_tx_ipa_uc_attach(soc, pdev); + if (error) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "%s: DP IPA UC TX attach fail code %d\n", + __func__, error); + return error; + } + + /* RX resource attach */ + error = dp_rx_ipa_uc_attach(soc, pdev); + if (error) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "%s: DP IPA UC RX attach fail code %d\n", + __func__, error); + dp_tx_ipa_uc_detach(soc, pdev); + return error; + } + + return QDF_STATUS_SUCCESS; /* success */ +} + +/* + * dp_ipa_ring_resource_setup() - setup IPA ring resources + * @soc: data path SoC handle + * + * Return: none + */ +int dp_ipa_ring_resource_setup(struct dp_soc *soc, + struct dp_pdev *pdev) +{ + struct hal_soc *hal_soc = (struct hal_soc *)soc->hal_soc; + struct hal_srng *hal_srng; + struct hal_srng_params srng_params; + qdf_dma_addr_t hp_addr; + unsigned long addr_offset, dev_base_paddr; + + /* IPA TCL_DATA Ring - HAL_SRNG_SW2TCL3 */ + hal_srng = soc->tcl_data_ring[IPA_TCL_DATA_RING_IDX].hal_srng; + hal_get_srng_params(hal_soc, (void *)hal_srng, &srng_params); + + soc->ipa_uc_tx_rsc.ipa_tcl_ring_base_paddr = + srng_params.ring_base_paddr; + soc->ipa_uc_tx_rsc.ipa_tcl_ring_base_vaddr = + srng_params.ring_base_vaddr; + soc->ipa_uc_tx_rsc.ipa_tcl_ring_size = + (srng_params.num_entries * srng_params.entry_size) << 2; + /* + * For the register backed memory addresses, use the scn->mem_pa to + * calculate the physical address of the shadow registers + */ + dev_base_paddr = + (unsigned long) + ((struct hif_softc *)(hal_soc->hif_handle))->mem_pa; + addr_offset = (unsigned long)(hal_srng->u.src_ring.hp_addr) - + (unsigned long)(hal_soc->dev_base_addr); + soc->ipa_uc_tx_rsc.ipa_tcl_hp_paddr = + (qdf_dma_addr_t)(addr_offset + dev_base_paddr); + + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO, + "%s: addr_offset=%x, dev_base_paddr=%x, ipa_tcl_hp_paddr=%x", + __func__, (unsigned int)addr_offset, + (unsigned int)dev_base_paddr, + (unsigned int)(soc->ipa_uc_tx_rsc.ipa_tcl_hp_paddr)); + + /* IPA TX COMP Ring - HAL_SRNG_WBM2SW2_RELEASE */ + hal_srng = soc->tx_comp_ring[IPA_TX_COMP_RING_IDX].hal_srng; + hal_get_srng_params(hal_soc, (void *)hal_srng, &srng_params); + + soc->ipa_uc_tx_rsc.ipa_wbm_ring_base_paddr = + srng_params.ring_base_paddr; + soc->ipa_uc_tx_rsc.ipa_wbm_ring_base_vaddr = + srng_params.ring_base_vaddr; + soc->ipa_uc_tx_rsc.ipa_wbm_ring_size = + (srng_params.num_entries * srng_params.entry_size) << 2; + addr_offset = (unsigned long)(hal_srng->u.dst_ring.tp_addr) - + (unsigned long)(hal_soc->dev_base_addr); + soc->ipa_uc_tx_rsc.ipa_wbm_tp_paddr = + (qdf_dma_addr_t)(addr_offset + dev_base_paddr); + + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO, + "%s: addr_offset=%x, dev_base_paddr=%x, ipa_wbm_tp_paddr=%x", + __func__, (unsigned int)addr_offset, + (unsigned int)dev_base_paddr, + (unsigned int)(soc->ipa_uc_tx_rsc.ipa_wbm_tp_paddr)); + + /* IPA REO_DEST Ring - HAL_SRNG_REO2SW4 */ + hal_srng = soc->reo_dest_ring[IPA_REO_DEST_RING_IDX].hal_srng; + hal_get_srng_params(hal_soc, (void *)hal_srng, &srng_params); + + soc->ipa_uc_rx_rsc.ipa_reo_ring_base_paddr = + srng_params.ring_base_paddr; + soc->ipa_uc_rx_rsc.ipa_reo_ring_base_vaddr = + srng_params.ring_base_vaddr; + soc->ipa_uc_rx_rsc.ipa_reo_ring_size = + (srng_params.num_entries * srng_params.entry_size) << 2; + addr_offset = (unsigned long)(hal_srng->u.dst_ring.tp_addr) - + (unsigned long)(hal_soc->dev_base_addr); + soc->ipa_uc_rx_rsc.ipa_reo_tp_paddr = + (qdf_dma_addr_t)(addr_offset + dev_base_paddr); + + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO, + "%s: addr_offset=%x, dev_base_paddr=%x, ipa_reo_tp_paddr=%x", + __func__, (unsigned int)addr_offset, + (unsigned int)dev_base_paddr, + (unsigned int)(soc->ipa_uc_rx_rsc.ipa_reo_tp_paddr)); + + hal_srng = pdev->rx_refill_buf_ring2.hal_srng; + hal_get_srng_params(hal_soc, (void *)hal_srng, &srng_params); + soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_base_paddr = + srng_params.ring_base_paddr; + soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_base_vaddr = + srng_params.ring_base_vaddr; + soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_size = + (srng_params.num_entries * srng_params.entry_size) << 2; + hp_addr = hal_srng_get_hp_addr(hal_soc, (void *)hal_srng); + soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_hp_paddr = hp_addr; + + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO, + "%s: ipa_rx_refill_buf_hp_paddr=%x", __func__, + (unsigned int)(soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_hp_paddr)); + + return 0; +} + +/** + * dp_ipa_uc_get_resource() - Client request resource information + * @ppdev - handle to the device instance + * + * IPA client will request IPA UC related resource information + * Resource information will be distributed to IPA module + * All of the required resources should be pre-allocated + * + * Return: QDF_STATUS + */ +QDF_STATUS dp_ipa_get_resource(struct cdp_pdev *ppdev) +{ + struct dp_pdev *pdev = (struct dp_pdev *)ppdev; + struct dp_soc *soc = pdev->soc; + struct dp_ipa_resources *ipa_res = &pdev->ipa_resource; + + ipa_res->tx_ring_base_paddr = + soc->ipa_uc_tx_rsc.ipa_tcl_ring_base_paddr; + ipa_res->tx_ring_size = + soc->ipa_uc_tx_rsc.ipa_tcl_ring_size; + ipa_res->tx_num_alloc_buffer = + (uint32_t)soc->ipa_uc_tx_rsc.alloc_tx_buf_cnt; + + ipa_res->tx_comp_ring_base_paddr = + soc->ipa_uc_tx_rsc.ipa_wbm_ring_base_paddr; + ipa_res->tx_comp_ring_size = + soc->ipa_uc_tx_rsc.ipa_wbm_ring_size; + + ipa_res->rx_rdy_ring_base_paddr = + soc->ipa_uc_rx_rsc.ipa_reo_ring_base_paddr; + ipa_res->rx_rdy_ring_size = + soc->ipa_uc_rx_rsc.ipa_reo_ring_size; + + ipa_res->rx_refill_ring_base_paddr = + soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_base_paddr; + ipa_res->rx_refill_ring_size = + soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_size; + + if ((0 == ipa_res->tx_comp_ring_base_paddr) || + (0 == ipa_res->rx_rdy_ring_base_paddr)) + return QDF_STATUS_E_FAILURE; + + return QDF_STATUS_SUCCESS; +} + +/** + * dp_ipa_set_doorbell_paddr () - Set doorbell register physical address to SRNG + * @ppdev - handle to the device instance + * + * Set TX_COMP_DOORBELL register physical address to WBM Head_Ptr_MemAddr_LSB + * Set RX_READ_DOORBELL register physical address to REO Head_Ptr_MemAddr_LSB + * + * Return: none + */ +QDF_STATUS dp_ipa_set_doorbell_paddr(struct cdp_pdev *ppdev) +{ + struct dp_pdev *pdev = (struct dp_pdev *)ppdev; + struct dp_soc *soc = pdev->soc; + struct dp_ipa_resources *ipa_res = &pdev->ipa_resource; + struct hal_srng *wbm_srng = + soc->tx_comp_ring[IPA_TX_COMP_RING_IDX].hal_srng; + struct hal_srng *reo_srng = + soc->reo_dest_ring[IPA_REO_DEST_RING_IDX].hal_srng; + + hal_srng_dst_set_hp_paddr(wbm_srng, ipa_res->tx_comp_doorbell_paddr); + hal_srng_dst_init_hp(wbm_srng, ipa_res->tx_comp_doorbell_vaddr); + hal_srng_dst_set_hp_paddr(reo_srng, ipa_res->rx_ready_doorbell_paddr); + + return QDF_STATUS_SUCCESS; +} + +/** + * dp_ipa_op_response() - Handle OP command response from firmware + * @ppdev - handle to the device instance + * @op_msg: op response message from firmware + * + * Return: none + */ +QDF_STATUS dp_ipa_op_response(struct cdp_pdev *ppdev, uint8_t *op_msg) +{ + struct dp_pdev *pdev = (struct dp_pdev *)ppdev; + + if (pdev->ipa_uc_op_cb) { + pdev->ipa_uc_op_cb(op_msg, pdev->usr_ctxt); + } else { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "%s: IPA callback function is not registered", __func__); + qdf_mem_free(op_msg); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * dp_ipa_register_op_cb() - Register OP handler function + * @ppdev - handle to the device instance + * @op_cb: handler function pointer + * + * Return: none + */ +QDF_STATUS dp_ipa_register_op_cb(struct cdp_pdev *ppdev, + ipa_uc_op_cb_type op_cb, + void *usr_ctxt) +{ + struct dp_pdev *pdev = (struct dp_pdev *)ppdev; + + pdev->ipa_uc_op_cb = op_cb; + pdev->usr_ctxt = usr_ctxt; + + return QDF_STATUS_SUCCESS; +} + +/** + * dp_ipa_get_stat() - Get firmware wdi status + * @ppdev - handle to the device instance + * + * Return: none + */ +QDF_STATUS dp_ipa_get_stat(struct cdp_pdev *ppdev) +{ + /* TBD */ + return QDF_STATUS_SUCCESS; +} + +/** + * dp_tx_send_ipa_data_frame() - send IPA data frame + * @vdev: vdev + * @skb: skb + * + * Return: skb/ NULL is for success + */ +qdf_nbuf_t dp_tx_send_ipa_data_frame(struct cdp_vdev *vdev, qdf_nbuf_t skb) +{ + qdf_nbuf_t ret; + + /* Terminate the (single-element) list of tx frames */ + qdf_nbuf_set_next(skb, NULL); + ret = dp_tx_send((struct dp_vdev_t *)vdev, skb); + if (ret) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "%s: Failed to tx", __func__); + return ret; + } + + return NULL; +} + +/** + * dp_ipa_enable_autonomy() – Enable autonomy RX path + * @pdev - handle to the device instance + * + * Set all RX packet route to IPA REO ring + * Program Destination_Ring_Ctrl_IX_0 REO register to point IPA REO ring + * Return: none + */ +QDF_STATUS dp_ipa_enable_autonomy(struct cdp_pdev *ppdev) +{ + struct dp_pdev *pdev = (struct dp_pdev *)ppdev; + struct dp_soc *soc = pdev->soc; + uint32_t remap_val; + + /* Call HAL API to remap REO rings to REO2IPA ring */ + remap_val = HAL_REO_REMAP_VAL(REO_REMAP_TCL, REO_REMAP_TCL) | + HAL_REO_REMAP_VAL(REO_REMAP_SW1, REO_REMAP_SW4) | + HAL_REO_REMAP_VAL(REO_REMAP_SW2, REO_REMAP_SW4) | + HAL_REO_REMAP_VAL(REO_REMAP_SW3, REO_REMAP_SW4) | + HAL_REO_REMAP_VAL(REO_REMAP_SW4, REO_REMAP_SW4) | + HAL_REO_REMAP_VAL(REO_REMAP_RELEASE, REO_REMAP_RELEASE) | + HAL_REO_REMAP_VAL(REO_REMAP_FW, REO_REMAP_FW) | + HAL_REO_REMAP_VAL(REO_REMAP_UNUSED, REO_REMAP_FW); + hal_reo_remap_IX0(soc->hal_soc, remap_val); + + return QDF_STATUS_SUCCESS; +} + +/** + * dp_ipa_disable_autonomy() – Disable autonomy RX path + * @ppdev - handle to the device instance + * + * Disable RX packet routing to IPA REO + * Program Destination_Ring_Ctrl_IX_0 REO register to disable + * Return: none + */ +QDF_STATUS dp_ipa_disable_autonomy(struct cdp_pdev *ppdev) +{ + struct dp_pdev *pdev = (struct dp_pdev *)ppdev; + struct dp_soc *soc = pdev->soc; + uint32_t remap_val; + + /* Call HAL API to remap REO rings to REO2IPA ring */ + remap_val = HAL_REO_REMAP_VAL(REO_REMAP_TCL, REO_REMAP_TCL) | + HAL_REO_REMAP_VAL(REO_REMAP_SW1, REO_REMAP_SW1) | + HAL_REO_REMAP_VAL(REO_REMAP_SW2, REO_REMAP_SW2) | + HAL_REO_REMAP_VAL(REO_REMAP_SW3, REO_REMAP_SW3) | + HAL_REO_REMAP_VAL(REO_REMAP_SW4, REO_REMAP_SW2) | + HAL_REO_REMAP_VAL(REO_REMAP_RELEASE, REO_REMAP_RELEASE) | + HAL_REO_REMAP_VAL(REO_REMAP_FW, REO_REMAP_FW) | + HAL_REO_REMAP_VAL(REO_REMAP_UNUSED, REO_REMAP_FW); + hal_reo_remap_IX0(soc->hal_soc, remap_val); + + return QDF_STATUS_SUCCESS; +} + +/* This should be configurable per H/W configuration enable status */ +#define L3_HEADER_PADDING 2 + +/** + * dp_ipa_setup() - Setup and connect IPA pipes + * @ppdev - handle to the device instance + * @ipa_i2w_cb: IPA to WLAN callback + * @ipa_w2i_cb: WLAN to IPA callback + * @ipa_wdi_meter_notifier_cb: IPA WDI metering callback + * @ipa_desc_size: IPA descriptor size + * @ipa_priv: handle to the HTT instance + * @is_rm_enabled: Is IPA RM enabled or not + * @tx_pipe_handle: pointer to Tx pipe handle + * @rx_pipe_handle: pointer to Rx pipe handle + * + * Return: QDF_STATUS + */ +QDF_STATUS dp_ipa_setup(struct cdp_pdev *ppdev, void *ipa_i2w_cb, + void *ipa_w2i_cb, void *ipa_wdi_meter_notifier_cb, + uint32_t ipa_desc_size, void *ipa_priv, + bool is_rm_enabled, uint32_t *tx_pipe_handle, + uint32_t *rx_pipe_handle) +{ + struct dp_pdev *pdev = (struct dp_pdev *)ppdev; + struct dp_soc *soc = pdev->soc; + struct dp_ipa_resources *ipa_res = &pdev->ipa_resource; + qdf_ipa_wdi3_setup_info_t *tx; + qdf_ipa_wdi3_setup_info_t *rx; + qdf_ipa_wdi3_conn_in_params_t pipe_in; + qdf_ipa_wdi3_conn_out_params_t pipe_out; + struct tcl_data_cmd *tcl_desc_ptr; + uint8_t *desc_addr; + uint32_t desc_size; + int ret; + + qdf_mem_zero(&tx, sizeof(struct ipa_wdi3_setup_info)); + qdf_mem_zero(&rx, sizeof(struct ipa_wdi3_setup_info)); + qdf_mem_zero(&pipe_in, sizeof(pipe_in)); + qdf_mem_zero(&pipe_out, sizeof(pipe_out)); + + /* TX PIPE */ + /** + * Transfer Ring: WBM Ring + * Transfer Ring Doorbell PA: WBM Tail Pointer Address + * Event Ring: TCL ring + * Event Ring Doorbell PA: TCL Head Pointer Address + */ + tx = &QDF_IPA_WDI3_CONN_IN_PARAMS_TX(&pipe_in); + QDF_IPA_WDI3_SETUP_INFO_NAT_EN(tx) = IPA_BYPASS_NAT; + QDF_IPA_WDI3_SETUP_INFO_HDR_LEN(tx) = DP_IPA_UC_WLAN_TX_HDR_LEN; + QDF_IPA_WDI3_SETUP_INFO_HDR_OFST_PKT_SIZE_VALID(tx) = 0; + QDF_IPA_WDI3_SETUP_INFO_HDR_OFST_PKT_SIZE(tx) = 0; + QDF_IPA_WDI3_SETUP_INFO_HDR_ADDITIONAL_CONST_LEN(tx) = 0; + QDF_IPA_WDI3_SETUP_INFO_MODE(tx) = IPA_BASIC; + QDF_IPA_WDI3_SETUP_INFO_HDR_LITTLE_ENDIAN(tx) = true; + QDF_IPA_WDI3_SETUP_INFO_CLIENT(tx) = IPA_CLIENT_WLAN1_CONS; + QDF_IPA_WDI3_SETUP_INFO_TRANSFER_RING_BASE_PA(tx) = + ipa_res->tx_comp_ring_base_paddr; + QDF_IPA_WDI3_SETUP_INFO_TRANSFER_RING_SIZE(tx) = + ipa_res->tx_comp_ring_size; + /* WBM Tail Pointer Address */ + QDF_IPA_WDI3_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(tx) = + soc->ipa_uc_tx_rsc.ipa_wbm_tp_paddr; + QDF_IPA_WDI3_SETUP_INFO_EVENT_RING_BASE_PA(tx) = + ipa_res->tx_ring_base_paddr; + QDF_IPA_WDI3_SETUP_INFO_EVENT_RING_SIZE(tx) = ipa_res->tx_ring_size; + /* TCL Head Pointer Address */ + QDF_IPA_WDI3_SETUP_INFO_EVENT_RING_DOORBELL_PA(tx) = + soc->ipa_uc_tx_rsc.ipa_tcl_hp_paddr; + QDF_IPA_WDI3_SETUP_INFO_NUM_PKT_BUFFERS(tx) = + ipa_res->tx_num_alloc_buffer; + QDF_IPA_WDI3_SETUP_INFO_PKT_OFFSET(tx) = 0; + + /* Preprogram TCL descriptor */ + desc_addr = + (uint8_t *)QDF_IPA_WDI3_SETUP_INFO_DESC_FORMAT_TEMPLATE(tx); + desc_size = sizeof(struct tcl_data_cmd); + HAL_TX_DESC_SET_TLV_HDR(desc_addr, HAL_TX_TCL_DATA_TAG, desc_size); + tcl_desc_ptr = (struct tcl_data_cmd *) + (QDF_IPA_WDI3_SETUP_INFO_DESC_FORMAT_TEMPLATE(tx) + 1); + tcl_desc_ptr->buf_addr_info.return_buffer_manager = + HAL_RX_BUF_RBM_SW2_BM; + tcl_desc_ptr->addrx_en = 1; /* Address X search enable in ASE */ + tcl_desc_ptr->encap_type = HAL_TX_ENCAP_TYPE_ETHERNET; + tcl_desc_ptr->packet_offset = 2; /* padding for alignment */ + + /* RX PIPE */ + /** + * Transfer Ring: REO Ring + * Transfer Ring Doorbell PA: REO Tail Pointer Address + * Event Ring: FW ring + * Event Ring Doorbell PA: FW Head Pointer Address + */ + rx = &QDF_IPA_WDI3_CONN_IN_PARAMS_RX(&pipe_in); + QDF_IPA_WDI3_SETUP_INFO_NAT_EN(rx) = IPA_BYPASS_NAT; + QDF_IPA_WDI3_SETUP_INFO_HDR_LEN(rx) = DP_IPA_UC_WLAN_TX_HDR_LEN; + QDF_IPA_WDI3_SETUP_INFO_HDR_OFST_METADATA_VALID(rx) = 0; + QDF_IPA_WDI3_SETUP_INFO_HDR_METADATA_REG_VALID(rx) = 1; + QDF_IPA_WDI3_SETUP_INFO_MODE(rx) = IPA_BASIC; + QDF_IPA_WDI3_SETUP_INFO_CLIENT(rx) = IPA_CLIENT_WLAN1_PROD; + QDF_IPA_WDI3_SETUP_INFO_TRANSFER_RING_BASE_PA(rx) = ipa_res->rx_rdy_ring_base_paddr; + QDF_IPA_WDI3_SETUP_INFO_TRANSFER_RING_SIZE(rx) = ipa_res->rx_rdy_ring_size; + QDF_IPA_WDI3_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(rx) = /* REO Tail Pointer Address */ + soc->ipa_uc_rx_rsc.ipa_reo_tp_paddr; + QDF_IPA_WDI3_SETUP_INFO_EVENT_RING_BASE_PA(rx) = ipa_res->rx_refill_ring_base_paddr; + QDF_IPA_WDI3_SETUP_INFO_EVENT_RING_SIZE(rx) = ipa_res->rx_refill_ring_size; + QDF_IPA_WDI3_SETUP_INFO_EVENT_RING_DOORBELL_PA(rx) = /* FW Head Pointer Address */ + soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_hp_paddr; + QDF_IPA_WDI3_SETUP_INFO_PKT_OFFSET(rx) = RX_PKT_TLVS_LEN + L3_HEADER_PADDING; + + QDF_IPA_WDI3_CONN_IN_PARAMS_NOTIFY(&pipe_in) = ipa_w2i_cb; + QDF_IPA_WDI3_CONN_IN_PARAMS_PRIV(&pipe_in) = ipa_priv; + + /* Connect WDI IPA PIPE */ + ret = qdf_ipa_wdi3_conn_pipes(&pipe_in, &pipe_out); + if (ret) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "%s: ipa_wdi3_conn_pipes: IPA pipe setup failed: ret=%d", + __func__, ret); + return QDF_STATUS_E_FAILURE; + } + + /* IPA uC Doorbell registers */ + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, + "%s: Tx DB PA=0x%x, Rx DB PA=0x%x", + __func__, + (unsigned int)QDF_IPA_WDI3_CONN_OUT_PARAMS_TX_UC_DB_PA(&pipe_out), + (unsigned int)QDF_IPA_WDI3_CONN_OUT_PARAMS_RX_UC_DB_PA(&pipe_out)); + + ipa_res->tx_comp_doorbell_paddr = + QDF_IPA_WDI3_CONN_OUT_PARAMS_TX_UC_DB_PA(&pipe_out); + ipa_res->tx_comp_doorbell_vaddr = + QDF_IPA_WDI3_CONN_OUT_PARAMS_TX_UC_DB_VA(&pipe_out); + ipa_res->rx_ready_doorbell_paddr = + QDF_IPA_WDI3_CONN_OUT_PARAMS_RX_UC_DB_PA(&pipe_out); + + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, + "%s: Tx: %s=%pK, %s=%d, %s=%pK, %s=%pK, %s=%d, %s=%pK, %s=%d, %s=%pK", + __func__, + "transfer_ring_base_pa", + (void *)QDF_IPA_WDI3_SETUP_INFO_TRANSFER_RING_BASE_PA(tx), + "transfer_ring_size", + QDF_IPA_WDI3_SETUP_INFO_TRANSFER_RING_SIZE(tx), + "transfer_ring_doorbell_pa", + (void *)QDF_IPA_WDI3_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(tx), + "event_ring_base_pa", + (void *)QDF_IPA_WDI3_SETUP_INFO_EVENT_RING_BASE_PA(tx), + "event_ring_size", + QDF_IPA_WDI3_SETUP_INFO_EVENT_RING_SIZE(tx), + "event_ring_doorbell_pa", + (void *)QDF_IPA_WDI3_SETUP_INFO_EVENT_RING_DOORBELL_PA(tx), + "num_pkt_buffers", + QDF_IPA_WDI3_SETUP_INFO_NUM_PKT_BUFFERS(tx), + "tx_comp_doorbell_paddr", + (void *)ipa_res->tx_comp_doorbell_paddr); + + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, + "%s: Rx: %s=%pK, %s=%d, %s=%pK, %s=%pK, %s=%d, %s=%pK, %s=%d, %s=%pK", + __func__, + "transfer_ring_base_pa", + (void *)QDF_IPA_WDI3_SETUP_INFO_TRANSFER_RING_BASE_PA(rx), + "transfer_ring_size", + QDF_IPA_WDI3_SETUP_INFO_TRANSFER_RING_SIZE(rx), + "transfer_ring_doorbell_pa", + (void *)QDF_IPA_WDI3_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(rx), + "event_ring_base_pa", + (void *)QDF_IPA_WDI3_SETUP_INFO_EVENT_RING_BASE_PA(rx), + "event_ring_size", + QDF_IPA_WDI3_SETUP_INFO_EVENT_RING_SIZE(rx), + "event_ring_doorbell_pa", + (void *)QDF_IPA_WDI3_SETUP_INFO_EVENT_RING_DOORBELL_PA(rx), + "num_pkt_buffers", + QDF_IPA_WDI3_SETUP_INFO_NUM_PKT_BUFFERS(rx), + "tx_comp_doorbell_paddr", + (void *)ipa_res->rx_ready_doorbell_paddr); + + return QDF_STATUS_SUCCESS; +} + +/** + * dp_ipa_cleanup() - Disconnect IPA pipes + * @tx_pipe_handle: Tx pipe handle + * @rx_pipe_handle: Rx pipe handle + * + * Return: QDF_STATUS + */ +QDF_STATUS dp_ipa_cleanup(uint32_t tx_pipe_handle, uint32_t rx_pipe_handle) +{ + int ret; + + ret = qdf_ipa_wdi3_disconn_pipes(); + if (ret) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "%s: ipa_wdi3_disconn_pipes: IPA pipe cleanup failed: ret=%d", + __func__, ret); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * dp_ipa_setup_iface() - Setup IPA header and register interface + * @ifname: Interface name + * @mac_addr: Interface MAC address + * @prod_client: IPA prod client type + * @cons_client: IPA cons client type + * @session_id: Session ID + * @is_ipv6_enabled: Is IPV6 enabled or not + * + * Return: QDF_STATUS + */ +QDF_STATUS dp_ipa_setup_iface(char *ifname, uint8_t *mac_addr, + qdf_ipa_client_type_t prod_client, + qdf_ipa_client_type_t cons_client, + uint8_t session_id, bool is_ipv6_enabled) +{ + qdf_ipa_wdi3_reg_intf_in_params_t in; + qdf_ipa_wdi3_hdr_info_t hdr_info; + struct dp_ipa_uc_tx_hdr uc_tx_hdr; + int ret = -EINVAL; + + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, + "%s: Add Partial hdr: %s, %pM", + __func__, ifname, mac_addr); + + qdf_mem_zero(&hdr_info, sizeof(qdf_ipa_wdi3_hdr_info_t)); + qdf_ether_addr_copy(uc_tx_hdr.eth.h_source, mac_addr); + + /* IPV4 header */ + uc_tx_hdr.eth.h_proto = qdf_htons(ETH_P_IP); + + QDF_IPA_WDI3_HDR_INFO_HDR(&hdr_info) = (uint8_t *)&uc_tx_hdr; + QDF_IPA_WDI3_HDR_INFO_HDR_LEN(&hdr_info) = DP_IPA_UC_WLAN_TX_HDR_LEN; + QDF_IPA_WDI3_HDR_INFO_HDR_TYPE(&hdr_info) = IPA_HDR_L2_ETHERNET_II; + QDF_IPA_WDI3_HDR_INFO_DST_MAC_ADDR_OFFSET(&hdr_info) = + DP_IPA_UC_WLAN_HDR_DES_MAC_OFFSET; + + QDF_IPA_WDI3_REG_INTF_IN_PARAMS_NETDEV_NAME(&in) = ifname; + memcpy(&(QDF_IPA_WDI3_REG_INTF_IN_PARAMS_HDR_INFO(&in)[0]), &hdr_info, + sizeof(qdf_ipa_wdi3_hdr_info_t)); + QDF_IPA_WDI3_REG_INTF_IN_PARAMS_IS_META_DATA_VALID(&in) = 1; + QDF_IPA_WDI3_REG_INTF_IN_PARAMS_META_DATA(&in) = + htonl(session_id << 16); + QDF_IPA_WDI3_REG_INTF_IN_PARAMS_META_DATA_MASK(&in) = htonl(0x00FF0000); + + /* IPV6 header */ + if (is_ipv6_enabled) { + uc_tx_hdr.eth.h_proto = qdf_htons(ETH_P_IPV6); + memcpy(&(QDF_IPA_WDI3_REG_INTF_IN_PARAMS_HDR_INFO(&in)[1]), + &hdr_info, sizeof(qdf_ipa_wdi3_hdr_info_t)); + } + + ret = qdf_ipa_wdi3_reg_intf(&in); + if (ret) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "%s: ipa_wdi3_reg_intf: register IPA interface falied: ret=%d", + __func__, ret); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * dp_ipa_cleanup_iface() - Cleanup IPA header and deregister interface + * @ifname: Interface name + * @is_ipv6_enabled: Is IPV6 enabled or not + * + * Return: QDF_STATUS + */ +QDF_STATUS dp_ipa_cleanup_iface(char *ifname, bool is_ipv6_enabled) +{ + int ret; + + ret = qdf_ipa_wdi3_dereg_intf(ifname); + if (ret) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "%s: ipa_wdi3_dereg_intf: IPA pipe deregistration failed: ret=%d", + __func__, ret); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + + /** + * dp_ipa_uc_enable_pipes() - Enable and resume traffic on Tx/Rx pipes + * @ppdev - handle to the device instance + * + * Return: QDF_STATUS + */ +QDF_STATUS dp_ipa_enable_pipes(struct cdp_pdev *ppdev) +{ + QDF_STATUS result; + + result = qdf_ipa_wdi3_enable_pipes(); + if (result) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "%s: Enable WDI PIPE fail, code %d", + __func__, result); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * dp_ipa_uc_disable_pipes() – Suspend traffic and disable Tx/Rx pipes + * @ppdev - handle to the device instance + * + * Return: QDF_STATUS + */ +QDF_STATUS dp_ipa_disable_pipes(struct cdp_pdev *ppdev) +{ + QDF_STATUS result; + + result = qdf_ipa_wdi3_disable_pipes(); + if (result) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "%s: Disable WDI PIPE fail, code %d", + __func__, result); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * dp_ipa_set_perf_level() - Set IPA clock bandwidth based on data rates + * @client: Client type + * @max_supported_bw_mbps: Maximum bandwidth needed (in Mbps) + * + * Return: QDF_STATUS + */ +QDF_STATUS dp_ipa_set_perf_level(int client, uint32_t max_supported_bw_mbps) +{ + qdf_ipa_wdi3_perf_profile_t profile; + QDF_STATUS result; + + profile.client = client; + profile.max_supported_bw_mbps = max_supported_bw_mbps; + + result = qdf_ipa_wdi3_set_perf_profile(&profile); + if (result) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "%s: ipa_wdi3_set_perf_profile fail, code %d", + __func__, result); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_ipa.h b/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_ipa.h new file mode 100644 index 0000000000000000000000000000000000000000..b9013374a1d1a0b3079b48440ce1c1fe3c03e7d9 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_ipa.h @@ -0,0 +1,98 @@ +/* + * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _DP_IPA_H_ +#define _DP_IPA_H_ + +#ifdef IPA_OFFLOAD + +#define IPA_TCL_DATA_RING_IDX 2 +#define IPA_TX_COMP_RING_IDX 2 +#define IPA_REO_DEST_RING_IDX 3 +#define IPA_RX_REFILL_BUF_RING_IDX 2 + +/** + * struct dp_ipa_uc_tx_hdr - full tx header registered to IPA hardware + * @eth: ether II header + */ +struct dp_ipa_uc_tx_hdr { + struct ethhdr eth; +} __packed; + +/** + * struct dp_ipa_uc_rx_hdr - full rx header registered to IPA hardware + * @eth: ether II header + */ +struct dp_ipa_uc_rx_hdr { + struct ethhdr eth; +} __packed; + +#define DP_IPA_UC_WLAN_TX_HDR_LEN sizeof(struct dp_ipa_uc_tx_hdr) +#define DP_IPA_UC_WLAN_RX_HDR_LEN sizeof(struct dp_ipa_uc_rx_hdr) +#define DP_IPA_UC_WLAN_HDR_DES_MAC_OFFSET 0 + +QDF_STATUS dp_ipa_get_resource(struct cdp_pdev *pdev); +QDF_STATUS dp_ipa_set_doorbell_paddr(struct cdp_pdev *pdev); +QDF_STATUS dp_ipa_uc_set_active(struct cdp_pdev *pdev, bool uc_active, + bool is_tx); +QDF_STATUS dp_ipa_op_response(struct cdp_pdev *pdev, uint8_t *op_msg); +QDF_STATUS dp_ipa_register_op_cb(struct cdp_pdev *pdev, ipa_uc_op_cb_type op_cb, + void *usr_ctxt); +QDF_STATUS dp_ipa_get_stat(struct cdp_pdev *pdev); +qdf_nbuf_t dp_tx_send_ipa_data_frame(struct cdp_vdev *vdev, qdf_nbuf_t skb); +QDF_STATUS dp_ipa_enable_autonomy(struct cdp_pdev *pdev); +QDF_STATUS dp_ipa_disable_autonomy(struct cdp_pdev *pdev); +QDF_STATUS dp_ipa_setup(struct cdp_pdev *pdev, void *ipa_i2w_cb, + void *ipa_w2i_cb, void *ipa_wdi_meter_notifier_cb, + uint32_t ipa_desc_size, void *ipa_priv, bool is_rm_enabled, + uint32_t *tx_pipe_handle, uint32_t *rx_pipe_handle); +QDF_STATUS dp_ipa_cleanup(uint32_t tx_pipe_handle, + uint32_t rx_pipe_handle); +QDF_STATUS dp_ipa_remove_header(char *name); +int dp_ipa_add_header_info(char *ifname, uint8_t *mac_addr, + uint8_t session_id, bool is_ipv6_enabled); +int dp_ipa_register_interface(char *ifname, bool is_ipv6_enabled); +QDF_STATUS dp_ipa_setup_iface(char *ifname, uint8_t *mac_addr, + qdf_ipa_client_type_t prod_client, + qdf_ipa_client_type_t cons_client, + uint8_t session_id, bool is_ipv6_enabled); +QDF_STATUS dp_ipa_cleanup_iface(char *ifname, bool is_ipv6_enabled); +QDF_STATUS dp_ipa_enable_pipes(struct cdp_pdev *pdev); +QDF_STATUS dp_ipa_disable_pipes(struct cdp_pdev *pdev); +QDF_STATUS dp_ipa_set_perf_level(int client, + uint32_t max_supported_bw_mbps); +int dp_ipa_uc_detach(struct dp_soc *soc, struct dp_pdev *pdev); +int dp_ipa_uc_attach(struct dp_soc *soc, struct dp_pdev *pdev); +int dp_ipa_ring_resource_setup(struct dp_soc *soc, + struct dp_pdev *pdev); +#else +static inline int dp_ipa_uc_detach(struct dp_soc *soc, struct dp_pdev *pdev) +{ + return QDF_STATUS_SUCCESS; +} + +static inline int dp_ipa_uc_attach(struct dp_soc *soc, struct dp_pdev *pdev) +{ + return QDF_STATUS_SUCCESS; +} + +static inline int dp_ipa_ring_resource_setup(struct dp_soc *soc, + struct dp_pdev *pdev) +{ + return 0; +} +#endif +#endif /* _DP_IPA_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_main.c b/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_main.c new file mode 100644 index 0000000000000000000000000000000000000000..f91f727335156cf3a130004813c3bd428f5ce361 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_main.c @@ -0,0 +1,7759 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "dp_htt.h" +#include "dp_types.h" +#include "dp_internal.h" +#include "dp_tx.h" +#include "dp_tx_desc.h" +#include "dp_rx.h" +#include +#include +#include "cdp_txrx_cmn_struct.h" +#include "cdp_txrx_stats_struct.h" +#include +#include "dp_peer.h" +#include "dp_rx_mon.h" +#include "htt_stats.h" +#include "qdf_mem.h" /* qdf_mem_malloc,free */ +#ifdef QCA_LL_TX_FLOW_CONTROL_V2 +#include "cdp_txrx_flow_ctrl_v2.h" +#else +static inline void +cdp_dump_flow_pool_info(struct cdp_soc_t *soc) +{ + return; +} +#endif +#include "dp_ipa.h" + +#ifdef CONFIG_MCL +static void dp_service_mon_rings(void *arg); +#ifndef REMOVE_PKT_LOG +#include +#include +static void dp_pkt_log_con_service(struct cdp_pdev *ppdev, void *scn); +#endif +#endif +static void dp_pktlogmod_exit(struct dp_pdev *handle); +static void *dp_peer_create_wifi3(struct cdp_vdev *vdev_handle, + uint8_t *peer_mac_addr); +static void dp_peer_delete_wifi3(void *peer_handle, uint32_t bitmap); + +#define DP_INTR_POLL_TIMER_MS 10 +#define DP_WDS_AGING_TIMER_DEFAULT_MS 120000 +#define DP_MCS_LENGTH (6*MAX_MCS) +#define DP_NSS_LENGTH (6*SS_COUNT) +#define DP_RXDMA_ERR_LENGTH (6*HAL_RXDMA_ERR_MAX) +#define DP_REO_ERR_LENGTH (6*HAL_REO_ERR_MAX) +#define DP_MAX_MCS_STRING_LEN 30 +#define DP_CURR_FW_STATS_AVAIL 19 +#define DP_HTT_DBG_EXT_STATS_MAX 256 +#define DP_MAX_SLEEP_TIME 100 + +#ifdef IPA_OFFLOAD +/* Exclude IPA rings from the interrupt context */ +#define TX_RING_MASK_VAL 0xb +#define RX_RING_MASK_VAL 0x7 +#else +#define TX_RING_MASK_VAL 0xF +#define RX_RING_MASK_VAL 0xF +#endif + +bool rx_hash = 1; +qdf_declare_param(rx_hash, bool); + +#define STR_MAXLEN 64 + +#define DP_PPDU_STATS_CFG_ALL 0xFFFF + +/* PPDU stats mask sent to FW to enable enhanced stats */ +#define DP_PPDU_STATS_CFG_ENH_STATS 0xE67 +/* PPDU stats mask sent to FW to support debug sniffer feature */ +#define DP_PPDU_STATS_CFG_SNIFFER 0x2FFF +/** + * default_dscp_tid_map - Default DSCP-TID mapping + * + * DSCP TID + * 000000 0 + * 001000 1 + * 010000 2 + * 011000 3 + * 100000 4 + * 101000 5 + * 110000 6 + * 111000 7 + */ +static uint8_t default_dscp_tid_map[DSCP_TID_MAP_MAX] = { + 0, 0, 0, 0, 0, 0, 0, 0, + 1, 1, 1, 1, 1, 1, 1, 1, + 2, 2, 2, 2, 2, 2, 2, 2, + 3, 3, 3, 3, 3, 3, 3, 3, + 4, 4, 4, 4, 4, 4, 4, 4, + 5, 5, 5, 5, 5, 5, 5, 5, + 6, 6, 6, 6, 6, 6, 6, 6, + 7, 7, 7, 7, 7, 7, 7, 7, +}; + +/* + * struct dp_rate_debug + * + * @mcs_type: print string for a given mcs + * @valid: valid mcs rate? + */ +struct dp_rate_debug { + char mcs_type[DP_MAX_MCS_STRING_LEN]; + uint8_t valid; +}; + +#define MCS_VALID 1 +#define MCS_INVALID 0 + +static const struct dp_rate_debug dp_rate_string[DOT11_MAX][MAX_MCS] = { + + { + {"OFDM 48 Mbps", MCS_VALID}, + {"OFDM 24 Mbps", MCS_VALID}, + {"OFDM 12 Mbps", MCS_VALID}, + {"OFDM 6 Mbps ", MCS_VALID}, + {"OFDM 54 Mbps", MCS_VALID}, + {"OFDM 36 Mbps", MCS_VALID}, + {"OFDM 18 Mbps", MCS_VALID}, + {"OFDM 9 Mbps ", MCS_VALID}, + {"INVALID ", MCS_INVALID}, + {"INVALID ", MCS_INVALID}, + {"INVALID ", MCS_INVALID}, + {"INVALID ", MCS_INVALID}, + {"INVALID ", MCS_VALID}, + }, + { + {"CCK 11 Mbps Long ", MCS_VALID}, + {"CCK 5.5 Mbps Long ", MCS_VALID}, + {"CCK 2 Mbps Long ", MCS_VALID}, + {"CCK 1 Mbps Long ", MCS_VALID}, + {"CCK 11 Mbps Short ", MCS_VALID}, + {"CCK 5.5 Mbps Short", MCS_VALID}, + {"CCK 2 Mbps Short ", MCS_VALID}, + {"INVALID ", MCS_INVALID}, + {"INVALID ", MCS_INVALID}, + {"INVALID ", MCS_INVALID}, + {"INVALID ", MCS_INVALID}, + {"INVALID ", MCS_INVALID}, + {"INVALID ", MCS_VALID}, + }, + { + {"HT MCS 0 (BPSK 1/2) ", MCS_VALID}, + {"HT MCS 1 (QPSK 1/2) ", MCS_VALID}, + {"HT MCS 2 (QPSK 3/4) ", MCS_VALID}, + {"HT MCS 3 (16-QAM 1/2)", MCS_VALID}, + {"HT MCS 4 (16-QAM 3/4)", MCS_VALID}, + {"HT MCS 5 (64-QAM 2/3)", MCS_VALID}, + {"HT MCS 6 (64-QAM 3/4)", MCS_VALID}, + {"HT MCS 7 (64-QAM 5/6)", MCS_VALID}, + {"INVALID ", MCS_INVALID}, + {"INVALID ", MCS_INVALID}, + {"INVALID ", MCS_INVALID}, + {"INVALID ", MCS_INVALID}, + {"INVALID ", MCS_VALID}, + }, + { + {"VHT MCS 0 (BPSK 1/2) ", MCS_VALID}, + {"VHT MCS 1 (QPSK 1/2) ", MCS_VALID}, + {"VHT MCS 2 (QPSK 3/4) ", MCS_VALID}, + {"VHT MCS 3 (16-QAM 1/2) ", MCS_VALID}, + {"VHT MCS 4 (16-QAM 3/4) ", MCS_VALID}, + {"VHT MCS 5 (64-QAM 2/3) ", MCS_VALID}, + {"VHT MCS 6 (64-QAM 3/4) ", MCS_VALID}, + {"VHT MCS 7 (64-QAM 5/6) ", MCS_VALID}, + {"VHT MCS 8 (256-QAM 3/4) ", MCS_VALID}, + {"VHT MCS 9 (256-QAM 5/6) ", MCS_VALID}, + {"VHT MCS 10 (1024-QAM 3/4)", MCS_VALID}, + {"VHT MCS 11 (1024-QAM 5/6)", MCS_VALID}, + {"INVALID ", MCS_VALID}, + }, + { + {"HE MCS 0 (BPSK 1/2) ", MCS_VALID}, + {"HE MCS 1 (QPSK 1/2) ", MCS_VALID}, + {"HE MCS 2 (QPSK 3/4) ", MCS_VALID}, + {"HE MCS 3 (16-QAM 1/2) ", MCS_VALID}, + {"HE MCS 4 (16-QAM 3/4) ", MCS_VALID}, + {"HE MCS 5 (64-QAM 2/3) ", MCS_VALID}, + {"HE MCS 6 (64-QAM 3/4) ", MCS_VALID}, + {"HE MCS 7 (64-QAM 5/6) ", MCS_VALID}, + {"HE MCS 8 (256-QAM 3/4) ", MCS_VALID}, + {"HE MCS 9 (256-QAM 5/6) ", MCS_VALID}, + {"HE MCS 10 (1024-QAM 3/4)", MCS_VALID}, + {"HE MCS 11 (1024-QAM 5/6)", MCS_VALID}, + {"INVALID ", MCS_VALID}, + } +}; + +/** + * @brief Cpu ring map types + */ +enum dp_cpu_ring_map_types { + DP_DEFAULT_MAP, + DP_NSS_FIRST_RADIO_OFFLOADED_MAP, + DP_NSS_SECOND_RADIO_OFFLOADED_MAP, + DP_NSS_ALL_RADIO_OFFLOADED_MAP, + DP_CPU_RING_MAP_MAX +}; + +/** + * @brief Cpu to tx ring map + */ +static uint8_t dp_cpu_ring_map[DP_CPU_RING_MAP_MAX][WLAN_CFG_INT_NUM_CONTEXTS] = { + {0x0, 0x1, 0x2, 0x0}, + {0x1, 0x2, 0x1, 0x2}, + {0x0, 0x2, 0x0, 0x2}, + {0x2, 0x2, 0x2, 0x2} +}; + +/** + * @brief Select the type of statistics + */ +enum dp_stats_type { + STATS_FW = 0, + STATS_HOST = 1, + STATS_TYPE_MAX = 2, +}; + +/** + * @brief General Firmware statistics options + * + */ +enum dp_fw_stats { + TXRX_FW_STATS_INVALID = -1, +}; + +/** + * dp_stats_mapping_table - Firmware and Host statistics + * currently supported + */ +const int dp_stats_mapping_table[][STATS_TYPE_MAX] = { + {HTT_DBG_EXT_STATS_RESET, TXRX_HOST_STATS_INVALID}, + {HTT_DBG_EXT_STATS_PDEV_TX, TXRX_HOST_STATS_INVALID}, + {HTT_DBG_EXT_STATS_PDEV_RX, TXRX_HOST_STATS_INVALID}, + {HTT_DBG_EXT_STATS_PDEV_TX_HWQ, TXRX_HOST_STATS_INVALID}, + {HTT_DBG_EXT_STATS_PDEV_TX_SCHED, TXRX_HOST_STATS_INVALID}, + {HTT_DBG_EXT_STATS_PDEV_ERROR, TXRX_HOST_STATS_INVALID}, + {HTT_DBG_EXT_STATS_PDEV_TQM, TXRX_HOST_STATS_INVALID}, + {HTT_DBG_EXT_STATS_TQM_CMDQ, TXRX_HOST_STATS_INVALID}, + {HTT_DBG_EXT_STATS_TX_DE_INFO, TXRX_HOST_STATS_INVALID}, + {HTT_DBG_EXT_STATS_PDEV_TX_RATE, TXRX_HOST_STATS_INVALID}, + {HTT_DBG_EXT_STATS_PDEV_RX_RATE, TXRX_HOST_STATS_INVALID}, + {TXRX_FW_STATS_INVALID, TXRX_HOST_STATS_INVALID}, + {HTT_DBG_EXT_STATS_TX_SELFGEN_INFO, TXRX_HOST_STATS_INVALID}, + {HTT_DBG_EXT_STATS_TX_MU_HWQ, TXRX_HOST_STATS_INVALID}, + {HTT_DBG_EXT_STATS_RING_IF_INFO, TXRX_HOST_STATS_INVALID}, + {HTT_DBG_EXT_STATS_SRNG_INFO, TXRX_HOST_STATS_INVALID}, + {HTT_DBG_EXT_STATS_SFM_INFO, TXRX_HOST_STATS_INVALID}, + {HTT_DBG_EXT_STATS_PDEV_TX_MU, TXRX_HOST_STATS_INVALID}, + {HTT_DBG_EXT_STATS_ACTIVE_PEERS_LIST, TXRX_HOST_STATS_INVALID}, + /* Last ENUM for HTT FW STATS */ + {DP_HTT_DBG_EXT_STATS_MAX, TXRX_HOST_STATS_INVALID}, + {TXRX_FW_STATS_INVALID, TXRX_CLEAR_STATS}, + {TXRX_FW_STATS_INVALID, TXRX_RX_RATE_STATS}, + {TXRX_FW_STATS_INVALID, TXRX_TX_RATE_STATS}, + {TXRX_FW_STATS_INVALID, TXRX_TX_HOST_STATS}, + {TXRX_FW_STATS_INVALID, TXRX_RX_HOST_STATS}, + {TXRX_FW_STATS_INVALID, TXRX_AST_STATS}, + {TXRX_FW_STATS_INVALID, TXRX_SRNG_PTR_STATS}, + {TXRX_FW_STATS_INVALID, TXRX_RX_MON_STATS}, +}; + +static int dp_peer_add_ast_wifi3(struct cdp_soc_t *soc_hdl, + struct cdp_peer *peer_hdl, + uint8_t *mac_addr, + enum cdp_txrx_ast_entry_type type, + uint32_t flags) +{ + + return dp_peer_add_ast((struct dp_soc *)soc_hdl, + (struct dp_peer *)peer_hdl, + mac_addr, + type, + flags); +} + +static void dp_peer_del_ast_wifi3(struct cdp_soc_t *soc_hdl, + void *ast_entry_hdl) +{ + struct dp_soc *soc = (struct dp_soc *)soc_hdl; + qdf_spin_lock_bh(&soc->ast_lock); + dp_peer_del_ast((struct dp_soc *)soc_hdl, + (struct dp_ast_entry *)ast_entry_hdl); + qdf_spin_unlock_bh(&soc->ast_lock); +} + + +static int dp_peer_update_ast_wifi3(struct cdp_soc_t *soc_hdl, + struct cdp_peer *peer_hdl, + uint8_t *wds_macaddr, + uint32_t flags) +{ + int status; + struct dp_soc *soc = (struct dp_soc *)soc_hdl; + struct dp_ast_entry *ast_entry = NULL; + + qdf_spin_lock_bh(&soc->ast_lock); + ast_entry = dp_peer_ast_hash_find(soc, wds_macaddr); + + status = dp_peer_update_ast(soc, + (struct dp_peer *)peer_hdl, + ast_entry, + flags); + qdf_spin_unlock_bh(&soc->ast_lock); + + return status; +} + +/* + * dp_wds_reset_ast_wifi3() - Reset the is_active param for ast entry + * @soc_handle: Datapath SOC handle + * @wds_macaddr: MAC address of the WDS entry to be added + * @vdev_hdl: vdev handle + * Return: None + */ +static void dp_wds_reset_ast_wifi3(struct cdp_soc_t *soc_hdl, + uint8_t *wds_macaddr, void *vdev_hdl) +{ + struct dp_soc *soc = (struct dp_soc *)soc_hdl; + struct dp_ast_entry *ast_entry = NULL; + + qdf_spin_lock_bh(&soc->ast_lock); + ast_entry = dp_peer_ast_hash_find(soc, wds_macaddr); + + if (ast_entry->type != CDP_TXRX_AST_TYPE_STATIC) { + ast_entry->is_active = TRUE; + } + qdf_spin_unlock_bh(&soc->ast_lock); +} + +/* + * dp_wds_reset_ast_table_wifi3() - Reset the is_active param for all ast entry + * @soc: Datapath SOC handle + * @vdev_hdl: vdev handle + * + * Return: None + */ +static void dp_wds_reset_ast_table_wifi3(struct cdp_soc_t *soc_hdl, + void *vdev_hdl) +{ + struct dp_soc *soc = (struct dp_soc *) soc_hdl; + struct dp_pdev *pdev; + struct dp_vdev *vdev; + struct dp_peer *peer; + struct dp_ast_entry *ase, *temp_ase; + int i; + + qdf_spin_lock_bh(&soc->ast_lock); + + for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) { + pdev = soc->pdev_list[i]; + qdf_spin_lock_bh(&pdev->vdev_list_lock); + DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) { + DP_VDEV_ITERATE_PEER_LIST(vdev, peer) { + DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) { + if (ase->type == + CDP_TXRX_AST_TYPE_STATIC) + continue; + ase->is_active = TRUE; + } + } + } + qdf_spin_unlock_bh(&pdev->vdev_list_lock); + } + + qdf_spin_unlock_bh(&soc->ast_lock); +} + +/* + * dp_wds_flush_ast_table_wifi3() - Delete all wds and hmwds ast entry + * @soc: Datapath SOC handle + * + * Return: None + */ +static void dp_wds_flush_ast_table_wifi3(struct cdp_soc_t *soc_hdl) +{ + struct dp_soc *soc = (struct dp_soc *) soc_hdl; + struct dp_pdev *pdev; + struct dp_vdev *vdev; + struct dp_peer *peer; + struct dp_ast_entry *ase, *temp_ase; + int i; + + qdf_spin_lock_bh(&soc->ast_lock); + + for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) { + pdev = soc->pdev_list[i]; + qdf_spin_lock_bh(&pdev->vdev_list_lock); + DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) { + DP_VDEV_ITERATE_PEER_LIST(vdev, peer) { + DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) { + if (ase->type == + CDP_TXRX_AST_TYPE_STATIC) + continue; + dp_peer_del_ast(soc, ase); + } + } + } + qdf_spin_unlock_bh(&pdev->vdev_list_lock); + } + + qdf_spin_unlock_bh(&soc->ast_lock); +} + +static void *dp_peer_ast_hash_find_wifi3(struct cdp_soc_t *soc_hdl, + uint8_t *ast_mac_addr) +{ + struct dp_ast_entry *ast_entry; + struct dp_soc *soc = (struct dp_soc *)soc_hdl; + qdf_spin_lock_bh(&soc->ast_lock); + ast_entry = dp_peer_ast_hash_find(soc, ast_mac_addr); + qdf_spin_unlock_bh(&soc->ast_lock); + return (void *)ast_entry; +} + +static uint8_t dp_peer_ast_get_pdev_id_wifi3(struct cdp_soc_t *soc_hdl, + void *ast_entry_hdl) +{ + return dp_peer_ast_get_pdev_id((struct dp_soc *)soc_hdl, + (struct dp_ast_entry *)ast_entry_hdl); +} + +static uint8_t dp_peer_ast_get_next_hop_wifi3(struct cdp_soc_t *soc_hdl, + void *ast_entry_hdl) +{ + return dp_peer_ast_get_next_hop((struct dp_soc *)soc_hdl, + (struct dp_ast_entry *)ast_entry_hdl); +} + +static void dp_peer_ast_set_type_wifi3( + struct cdp_soc_t *soc_hdl, + void *ast_entry_hdl, + enum cdp_txrx_ast_entry_type type) +{ + dp_peer_ast_set_type((struct dp_soc *)soc_hdl, + (struct dp_ast_entry *)ast_entry_hdl, + type); +} + + + +/** + * dp_srng_find_ring_in_mask() - find which ext_group a ring belongs + * @ring_num: ring num of the ring being queried + * @grp_mask: the grp_mask array for the ring type in question. + * + * The grp_mask array is indexed by group number and the bit fields correspond + * to ring numbers. We are finding which interrupt group a ring belongs to. + * + * Return: the index in the grp_mask array with the ring number. + * -QDF_STATUS_E_NOENT if no entry is found + */ +static int dp_srng_find_ring_in_mask(int ring_num, int *grp_mask) +{ + int ext_group_num; + int mask = 1 << ring_num; + + for (ext_group_num = 0; ext_group_num < WLAN_CFG_INT_NUM_CONTEXTS; + ext_group_num++) { + if (mask & grp_mask[ext_group_num]) + return ext_group_num; + } + + return -QDF_STATUS_E_NOENT; +} + +static int dp_srng_calculate_msi_group(struct dp_soc *soc, + enum hal_ring_type ring_type, + int ring_num) +{ + int *grp_mask; + + switch (ring_type) { + case WBM2SW_RELEASE: + /* dp_tx_comp_handler - soc->tx_comp_ring */ + if (ring_num < 3) + grp_mask = &soc->wlan_cfg_ctx->int_tx_ring_mask[0]; + + /* dp_rx_wbm_err_process - soc->rx_rel_ring */ + else if (ring_num == 3) { + /* sw treats this as a separate ring type */ + grp_mask = &soc->wlan_cfg_ctx-> + int_rx_wbm_rel_ring_mask[0]; + ring_num = 0; + } else { + qdf_assert(0); + return -QDF_STATUS_E_NOENT; + } + break; + + case REO_EXCEPTION: + /* dp_rx_err_process - &soc->reo_exception_ring */ + grp_mask = &soc->wlan_cfg_ctx->int_rx_err_ring_mask[0]; + break; + + case REO_DST: + /* dp_rx_process - soc->reo_dest_ring */ + grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0]; + break; + + case REO_STATUS: + /* dp_reo_status_ring_handler - soc->reo_status_ring */ + grp_mask = &soc->wlan_cfg_ctx->int_reo_status_ring_mask[0]; + break; + + /* dp_rx_mon_status_srng_process - pdev->rxdma_mon_status_ring*/ + case RXDMA_MONITOR_STATUS: + /* dp_rx_mon_dest_process - pdev->rxdma_mon_dst_ring */ + case RXDMA_MONITOR_DST: + /* dp_mon_process */ + grp_mask = &soc->wlan_cfg_ctx->int_rx_mon_ring_mask[0]; + break; + case RXDMA_DST: + /* dp_rxdma_err_process */ + grp_mask = &soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[0]; + break; + + case RXDMA_BUF: + grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0]; + break; + + case RXDMA_MONITOR_BUF: + /* TODO: support low_thresh interrupt */ + return -QDF_STATUS_E_NOENT; + break; + + case TCL_DATA: + case TCL_CMD: + case REO_CMD: + case SW2WBM_RELEASE: + case WBM_IDLE_LINK: + /* normally empty SW_TO_HW rings */ + return -QDF_STATUS_E_NOENT; + break; + + case TCL_STATUS: + case REO_REINJECT: + /* misc unused rings */ + return -QDF_STATUS_E_NOENT; + break; + + case CE_SRC: + case CE_DST: + case CE_DST_STATUS: + /* CE_rings - currently handled by hif */ + default: + return -QDF_STATUS_E_NOENT; + break; + } + + return dp_srng_find_ring_in_mask(ring_num, grp_mask); +} + +static void dp_srng_msi_setup(struct dp_soc *soc, struct hal_srng_params + *ring_params, int ring_type, int ring_num) +{ + int msi_group_number; + int msi_data_count; + int ret; + uint32_t msi_data_start, msi_irq_start, addr_low, addr_high; + + ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP", + &msi_data_count, &msi_data_start, + &msi_irq_start); + + if (ret) + return; + + msi_group_number = dp_srng_calculate_msi_group(soc, ring_type, + ring_num); + if (msi_group_number < 0) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW, + FL("ring not part of an ext_group; ring_type: %d,ring_num %d"), + ring_type, ring_num); + ring_params->msi_addr = 0; + ring_params->msi_data = 0; + return; + } + + if (msi_group_number > msi_data_count) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_WARN, + FL("2 msi_groups will share an msi; msi_group_num %d"), + msi_group_number); + + QDF_ASSERT(0); + } + + pld_get_msi_address(soc->osdev->dev, &addr_low, &addr_high); + + ring_params->msi_addr = addr_low; + ring_params->msi_addr |= (qdf_dma_addr_t)(((uint64_t)addr_high) << 32); + ring_params->msi_data = (msi_group_number % msi_data_count) + + msi_data_start; + ring_params->flags |= HAL_SRNG_MSI_INTR; +} + +/** + * dp_print_ast_stats() - Dump AST table contents + * @soc: Datapath soc handle + * + * return void + */ +#ifdef FEATURE_AST +static void dp_print_ast_stats(struct dp_soc *soc) +{ + uint8_t i; + uint8_t num_entries = 0; + struct dp_vdev *vdev; + struct dp_pdev *pdev; + struct dp_peer *peer; + struct dp_ast_entry *ase, *tmp_ase; + char type[5][10] = {"NONE", "STATIC", "WDS", "MEC", "HMWDS"}; + + DP_PRINT_STATS("AST Stats:"); + DP_PRINT_STATS(" Entries Added = %d", soc->stats.ast.added); + DP_PRINT_STATS(" Entries Deleted = %d", soc->stats.ast.deleted); + DP_PRINT_STATS(" Entries Agedout = %d", soc->stats.ast.aged_out); + DP_PRINT_STATS("AST Table:"); + for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) { + pdev = soc->pdev_list[i]; + qdf_spin_lock_bh(&pdev->vdev_list_lock); + DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) { + DP_VDEV_ITERATE_PEER_LIST(vdev, peer) { + DP_PEER_ITERATE_ASE_LIST(peer, ase, tmp_ase) { + DP_PRINT_STATS("%6d mac_addr = %pM" + " peer_mac_addr = %pM" + " type = %s" + " next_hop = %d" + " is_active = %d" + " is_bss = %d" + " ast_idx = %d" + " pdev_id = %d" + " vdev_id = %d", + ++num_entries, + ase->mac_addr.raw, + ase->peer->mac_addr.raw, + type[ase->type], + ase->next_hop, + ase->is_active, + ase->is_bss, + ase->ast_idx, + ase->pdev_id, + ase->vdev_id); + } + } + } + qdf_spin_unlock_bh(&pdev->vdev_list_lock); + } +} +#else +static void dp_print_ast_stats(struct dp_soc *soc) +{ + DP_PRINT_STATS("AST Stats not available.Enable FEATURE_AST"); + return; +} +#endif + +static void dp_print_peer_table(struct dp_vdev *vdev) +{ + struct dp_peer *peer = NULL; + + DP_PRINT_STATS("Dumping Peer Table Stats:"); + TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) { + if (!peer) { + DP_PRINT_STATS("Invalid Peer"); + return; + } + DP_PRINT_STATS(" peer_mac_addr = %pM" + " nawds_enabled = %d" + " bss_peer = %d" + " wapi = %d" + " wds_enabled = %d" + " delete in progress = %d", + peer->mac_addr.raw, + peer->nawds_enabled, + peer->bss_peer, + peer->wapi, + peer->wds_enabled, + peer->delete_in_progress); + } +} + +/* + * dp_setup_srng - Internal function to setup SRNG rings used by data path + */ +static int dp_srng_setup(struct dp_soc *soc, struct dp_srng *srng, + int ring_type, int ring_num, int mac_id, uint32_t num_entries) +{ + void *hal_soc = soc->hal_soc; + uint32_t entry_size = hal_srng_get_entrysize(hal_soc, ring_type); + /* TODO: See if we should get align size from hal */ + uint32_t ring_base_align = 8; + struct hal_srng_params ring_params; + uint32_t max_entries = hal_srng_max_entries(hal_soc, ring_type); + + /* TODO: Currently hal layer takes care of endianness related settings. + * See if these settings need to passed from DP layer + */ + ring_params.flags = 0; + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW, + FL("Ring type: %d, num:%d"), ring_type, ring_num); + + num_entries = (num_entries > max_entries) ? max_entries : num_entries; + srng->hal_srng = NULL; + srng->alloc_size = (num_entries * entry_size) + ring_base_align - 1; + srng->num_entries = num_entries; + srng->base_vaddr_unaligned = qdf_mem_alloc_consistent( + soc->osdev, soc->osdev->dev, srng->alloc_size, + &(srng->base_paddr_unaligned)); + + if (!srng->base_vaddr_unaligned) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("alloc failed - ring_type: %d, ring_num %d"), + ring_type, ring_num); + return QDF_STATUS_E_NOMEM; + } + + ring_params.ring_base_vaddr = srng->base_vaddr_unaligned + + ((unsigned long)srng->base_vaddr_unaligned % ring_base_align); + ring_params.ring_base_paddr = srng->base_paddr_unaligned + + ((unsigned long)(ring_params.ring_base_vaddr) - + (unsigned long)srng->base_vaddr_unaligned); + ring_params.num_entries = num_entries; + + if (soc->intr_mode == DP_INTR_MSI) { + dp_srng_msi_setup(soc, &ring_params, ring_type, ring_num); + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("Using MSI for ring_type: %d, ring_num %d"), + ring_type, ring_num); + + } else { + ring_params.msi_data = 0; + ring_params.msi_addr = 0; + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("Skipping MSI for ring_type: %d, ring_num %d"), + ring_type, ring_num); + } + + /* + * Setup interrupt timer and batch counter thresholds for + * interrupt mitigation based on ring type + */ + if (ring_type == REO_DST) { + ring_params.intr_timer_thres_us = + wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx); + ring_params.intr_batch_cntr_thres_entries = + wlan_cfg_get_int_batch_threshold_rx(soc->wlan_cfg_ctx); + } else if (ring_type == WBM2SW_RELEASE && (ring_num < 3)) { + ring_params.intr_timer_thres_us = + wlan_cfg_get_int_timer_threshold_tx(soc->wlan_cfg_ctx); + ring_params.intr_batch_cntr_thres_entries = + wlan_cfg_get_int_batch_threshold_tx(soc->wlan_cfg_ctx); + } else { + ring_params.intr_timer_thres_us = + wlan_cfg_get_int_timer_threshold_other(soc->wlan_cfg_ctx); + ring_params.intr_batch_cntr_thres_entries = + wlan_cfg_get_int_batch_threshold_other(soc->wlan_cfg_ctx); + } + + /* Enable low threshold interrupts for rx buffer rings (regular and + * monitor buffer rings. + * TODO: See if this is required for any other ring + */ + if ((ring_type == RXDMA_BUF) || (ring_type == RXDMA_MONITOR_BUF) || + (ring_type == RXDMA_MONITOR_STATUS)) { + /* TODO: Setting low threshold to 1/8th of ring size + * see if this needs to be configurable + */ + ring_params.low_threshold = num_entries >> 3; + ring_params.flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE; + ring_params.intr_timer_thres_us = + wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx); + ring_params.intr_batch_cntr_thres_entries = 0; + } + + srng->hal_srng = hal_srng_setup(hal_soc, ring_type, ring_num, + mac_id, &ring_params); + + if (!srng->hal_srng) { + qdf_mem_free_consistent(soc->osdev, soc->osdev->dev, + srng->alloc_size, + srng->base_vaddr_unaligned, + srng->base_paddr_unaligned, 0); + } + + return 0; +} + +/** + * dp_srng_cleanup - Internal function to cleanup SRNG rings used by data path + * Any buffers allocated and attached to ring entries are expected to be freed + * before calling this function. + */ +static void dp_srng_cleanup(struct dp_soc *soc, struct dp_srng *srng, + int ring_type, int ring_num) +{ + if (!srng->hal_srng) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("Ring type: %d, num:%d not setup"), + ring_type, ring_num); + return; + } + + hal_srng_cleanup(soc->hal_soc, srng->hal_srng); + + qdf_mem_free_consistent(soc->osdev, soc->osdev->dev, + srng->alloc_size, + srng->base_vaddr_unaligned, + srng->base_paddr_unaligned, 0); + srng->hal_srng = NULL; +} + +/* TODO: Need this interface from HIF */ +void *hif_get_hal_handle(void *hif_handle); + +/* + * dp_service_srngs() - Top level interrupt handler for DP Ring interrupts + * @dp_ctx: DP SOC handle + * @budget: Number of frames/descriptors that can be processed in one shot + * + * Return: remaining budget/quota for the soc device + */ +static uint32_t dp_service_srngs(void *dp_ctx, uint32_t dp_budget) +{ + struct dp_intr *int_ctx = (struct dp_intr *)dp_ctx; + struct dp_soc *soc = int_ctx->soc; + int ring = 0; + uint32_t work_done = 0; + int budget = dp_budget; + uint8_t tx_mask = int_ctx->tx_ring_mask; + uint8_t rx_mask = int_ctx->rx_ring_mask; + uint8_t rx_err_mask = int_ctx->rx_err_ring_mask; + uint8_t rx_wbm_rel_mask = int_ctx->rx_wbm_rel_ring_mask; + uint8_t reo_status_mask = int_ctx->reo_status_ring_mask; + uint32_t remaining_quota = dp_budget; + struct dp_pdev *pdev = NULL; + int mac_id; + + /* Process Tx completion interrupts first to return back buffers */ + while (tx_mask) { + if (tx_mask & 0x1) { + work_done = dp_tx_comp_handler(soc, + soc->tx_comp_ring[ring].hal_srng, + remaining_quota); + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "tx mask 0x%x ring %d, budget %d, work_done %d", + tx_mask, ring, budget, work_done); + + budget -= work_done; + if (budget <= 0) + goto budget_done; + + remaining_quota = budget; + } + tx_mask = tx_mask >> 1; + ring++; + } + + + /* Process REO Exception ring interrupt */ + if (rx_err_mask) { + work_done = dp_rx_err_process(soc, + soc->reo_exception_ring.hal_srng, + remaining_quota); + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "REO Exception Ring: work_done %d budget %d", + work_done, budget); + + budget -= work_done; + if (budget <= 0) { + goto budget_done; + } + remaining_quota = budget; + } + + /* Process Rx WBM release ring interrupt */ + if (rx_wbm_rel_mask) { + work_done = dp_rx_wbm_err_process(soc, + soc->rx_rel_ring.hal_srng, remaining_quota); + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "WBM Release Ring: work_done %d budget %d", + work_done, budget); + + budget -= work_done; + if (budget <= 0) { + goto budget_done; + } + remaining_quota = budget; + } + + /* Process Rx interrupts */ + if (rx_mask) { + for (ring = 0; ring < soc->num_reo_dest_rings; ring++) { + if (rx_mask & (1 << ring)) { + work_done = dp_rx_process(int_ctx, + soc->reo_dest_ring[ring].hal_srng, + remaining_quota); + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "rx mask 0x%x ring %d, work_done %d budget %d", + rx_mask, ring, work_done, budget); + + budget -= work_done; + if (budget <= 0) + goto budget_done; + remaining_quota = budget; + } + } + for (ring = 0; ring < MAX_RX_MAC_RINGS; ring++) { + work_done = dp_rxdma_err_process(soc, ring, + remaining_quota); + budget -= work_done; + } + } + + if (reo_status_mask) + dp_reo_status_ring_handler(soc); + + /* Process LMAC interrupts */ + for (ring = 0 ; ring < MAX_PDEV_CNT; ring++) { + pdev = soc->pdev_list[ring]; + if (pdev == NULL) + continue; + for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) { + int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, + pdev->pdev_id); + + if (int_ctx->rx_mon_ring_mask & (1 << mac_for_pdev)) { + work_done = dp_mon_process(soc, mac_for_pdev, + remaining_quota); + budget -= work_done; + if (budget <= 0) + goto budget_done; + remaining_quota = budget; + } + + if (int_ctx->rxdma2host_ring_mask & + (1 << mac_for_pdev)) { + work_done = dp_rxdma_err_process(soc, + mac_for_pdev, + remaining_quota); + budget -= work_done; + if (budget <= 0) + goto budget_done; + remaining_quota = budget; + } + + if (int_ctx->host2rxdma_ring_mask & + (1 << mac_for_pdev)) { + union dp_rx_desc_list_elem_t *desc_list = NULL; + union dp_rx_desc_list_elem_t *tail = NULL; + struct dp_srng *rx_refill_buf_ring = + &pdev->rx_refill_buf_ring; + + DP_STATS_INC(pdev, replenish.low_thresh_intrs, + 1); + dp_rx_buffers_replenish(soc, mac_for_pdev, + rx_refill_buf_ring, + &soc->rx_desc_buf[mac_for_pdev], 0, + &desc_list, &tail); + } + } + } + + qdf_lro_flush(int_ctx->lro_ctx); + +budget_done: + return dp_budget - budget; +} + +#ifdef DP_INTR_POLL_BASED +/* dp_interrupt_timer()- timer poll for interrupts + * + * @arg: SoC Handle + * + * Return: + * + */ +static void dp_interrupt_timer(void *arg) +{ + struct dp_soc *soc = (struct dp_soc *) arg; + int i; + + if (qdf_atomic_read(&soc->cmn_init_done)) { + for (i = 0; + i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) + dp_service_srngs(&soc->intr_ctx[i], 0xffff); + + qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS); + } +} + +/* + * dp_soc_interrupt_attach_poll() - Register handlers for DP interrupts + * @txrx_soc: DP SOC handle + * + * Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS†number of NAPI + * contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and + * rx_monitor_ring mask to indicate the rings that are processed by the handler. + * + * Return: 0 for success. nonzero for failure. + */ +static QDF_STATUS dp_soc_interrupt_attach_poll(void *txrx_soc) +{ + struct dp_soc *soc = (struct dp_soc *)txrx_soc; + int i; + + soc->intr_mode = DP_INTR_POLL; + + for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) { + soc->intr_ctx[i].dp_intr_id = i; + soc->intr_ctx[i].tx_ring_mask = + wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i); + soc->intr_ctx[i].rx_ring_mask = + wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i); + soc->intr_ctx[i].rx_mon_ring_mask = + wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, i); + soc->intr_ctx[i].rx_err_ring_mask = + wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i); + soc->intr_ctx[i].rx_wbm_rel_ring_mask = + wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i); + soc->intr_ctx[i].reo_status_ring_mask = + wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i); + soc->intr_ctx[i].rxdma2host_ring_mask = + wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i); + soc->intr_ctx[i].soc = soc; + soc->intr_ctx[i].lro_ctx = qdf_lro_init(); + } + + qdf_timer_init(soc->osdev, &soc->int_timer, + dp_interrupt_timer, (void *)soc, + QDF_TIMER_TYPE_WAKE_APPS); + + return QDF_STATUS_SUCCESS; +} + +#if defined(CONFIG_MCL) +extern int con_mode_monitor; +static QDF_STATUS dp_soc_interrupt_attach(void *txrx_soc); +/* + * dp_soc_interrupt_attach_wrapper() - Register handlers for DP interrupts + * @txrx_soc: DP SOC handle + * + * Call the appropriate attach function based on the mode of operation. + * This is a WAR for enabling monitor mode. + * + * Return: 0 for success. nonzero for failure. + */ +static QDF_STATUS dp_soc_interrupt_attach_wrapper(void *txrx_soc) +{ + struct dp_soc *soc = (struct dp_soc *)txrx_soc; + + if (!(soc->wlan_cfg_ctx->napi_enabled) || + con_mode_monitor == QDF_GLOBAL_MONITOR_MODE) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, + "%s: Poll mode", __func__); + return dp_soc_interrupt_attach_poll(txrx_soc); + } else { + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, + "%s: Interrupt mode", __func__); + return dp_soc_interrupt_attach(txrx_soc); + } +} +#else +static QDF_STATUS dp_soc_interrupt_attach_wrapper(void *txrx_soc) +{ + return dp_soc_interrupt_attach_poll(txrx_soc); +} +#endif +#endif + +static void dp_soc_interrupt_map_calculate_integrated(struct dp_soc *soc, + int intr_ctx_num, int *irq_id_map, int *num_irq_r) +{ + int j; + int num_irq = 0; + + int tx_mask = + wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num); + int rx_mask = + wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num); + int rx_mon_mask = + wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num); + int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask( + soc->wlan_cfg_ctx, intr_ctx_num); + int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask( + soc->wlan_cfg_ctx, intr_ctx_num); + int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask( + soc->wlan_cfg_ctx, intr_ctx_num); + int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask( + soc->wlan_cfg_ctx, intr_ctx_num); + int host2rxdma_ring_mask = wlan_cfg_get_host2rxdma_ring_mask( + soc->wlan_cfg_ctx, intr_ctx_num); + + for (j = 0; j < HIF_MAX_GRP_IRQ; j++) { + + if (tx_mask & (1 << j)) { + irq_id_map[num_irq++] = + (wbm2host_tx_completions_ring1 - j); + } + + if (rx_mask & (1 << j)) { + irq_id_map[num_irq++] = + (reo2host_destination_ring1 - j); + } + + if (rxdma2host_ring_mask & (1 << j)) { + irq_id_map[num_irq++] = + rxdma2host_destination_ring_mac1 - + wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j); + } + + if (host2rxdma_ring_mask & (1 << j)) { + irq_id_map[num_irq++] = + host2rxdma_host_buf_ring_mac1 - + wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j); + } + + if (rx_mon_mask & (1 << j)) { + irq_id_map[num_irq++] = + ppdu_end_interrupts_mac1 - + wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j); + irq_id_map[num_irq++] = + rxdma2host_monitor_status_ring_mac1 - + wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j); + } + + if (rx_wbm_rel_ring_mask & (1 << j)) + irq_id_map[num_irq++] = wbm2host_rx_release; + + if (rx_err_ring_mask & (1 << j)) + irq_id_map[num_irq++] = reo2host_exception; + + if (reo_status_ring_mask & (1 << j)) + irq_id_map[num_irq++] = reo2host_status; + + } + *num_irq_r = num_irq; +} + +static void dp_soc_interrupt_map_calculate_msi(struct dp_soc *soc, + int intr_ctx_num, int *irq_id_map, int *num_irq_r, + int msi_vector_count, int msi_vector_start) +{ + int tx_mask = wlan_cfg_get_tx_ring_mask( + soc->wlan_cfg_ctx, intr_ctx_num); + int rx_mask = wlan_cfg_get_rx_ring_mask( + soc->wlan_cfg_ctx, intr_ctx_num); + int rx_mon_mask = wlan_cfg_get_rx_mon_ring_mask( + soc->wlan_cfg_ctx, intr_ctx_num); + int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask( + soc->wlan_cfg_ctx, intr_ctx_num); + int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask( + soc->wlan_cfg_ctx, intr_ctx_num); + int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask( + soc->wlan_cfg_ctx, intr_ctx_num); + int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask( + soc->wlan_cfg_ctx, intr_ctx_num); + + unsigned int vector = + (intr_ctx_num % msi_vector_count) + msi_vector_start; + int num_irq = 0; + + soc->intr_mode = DP_INTR_MSI; + + if (tx_mask | rx_mask | rx_mon_mask | rx_err_ring_mask | + rx_wbm_rel_ring_mask | reo_status_ring_mask | rxdma2host_ring_mask) + irq_id_map[num_irq++] = + pld_get_msi_irq(soc->osdev->dev, vector); + + *num_irq_r = num_irq; +} + +static void dp_soc_interrupt_map_calculate(struct dp_soc *soc, int intr_ctx_num, + int *irq_id_map, int *num_irq) +{ + int msi_vector_count, ret; + uint32_t msi_base_data, msi_vector_start; + + ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP", + &msi_vector_count, + &msi_base_data, + &msi_vector_start); + if (ret) + return dp_soc_interrupt_map_calculate_integrated(soc, + intr_ctx_num, irq_id_map, num_irq); + + else + dp_soc_interrupt_map_calculate_msi(soc, + intr_ctx_num, irq_id_map, num_irq, + msi_vector_count, msi_vector_start); +} + +/* + * dp_soc_interrupt_attach() - Register handlers for DP interrupts + * @txrx_soc: DP SOC handle + * + * Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS†number of NAPI + * contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and + * rx_monitor_ring mask to indicate the rings that are processed by the handler. + * + * Return: 0 for success. nonzero for failure. + */ +static QDF_STATUS dp_soc_interrupt_attach(void *txrx_soc) +{ + struct dp_soc *soc = (struct dp_soc *)txrx_soc; + + int i = 0; + int num_irq = 0; + + for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) { + int ret = 0; + + /* Map of IRQ ids registered with one interrupt context */ + int irq_id_map[HIF_MAX_GRP_IRQ]; + + int tx_mask = + wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i); + int rx_mask = + wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i); + int rx_mon_mask = + wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, i); + int rx_err_ring_mask = + wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i); + int rx_wbm_rel_ring_mask = + wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i); + int reo_status_ring_mask = + wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i); + int rxdma2host_ring_mask = + wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i); + int host2rxdma_ring_mask = + wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx, i); + + + soc->intr_ctx[i].dp_intr_id = i; + soc->intr_ctx[i].tx_ring_mask = tx_mask; + soc->intr_ctx[i].rx_ring_mask = rx_mask; + soc->intr_ctx[i].rx_mon_ring_mask = rx_mon_mask; + soc->intr_ctx[i].rx_err_ring_mask = rx_err_ring_mask; + soc->intr_ctx[i].rxdma2host_ring_mask = rxdma2host_ring_mask; + soc->intr_ctx[i].host2rxdma_ring_mask = host2rxdma_ring_mask; + soc->intr_ctx[i].rx_wbm_rel_ring_mask = rx_wbm_rel_ring_mask; + soc->intr_ctx[i].reo_status_ring_mask = reo_status_ring_mask; + + soc->intr_ctx[i].soc = soc; + + num_irq = 0; + + dp_soc_interrupt_map_calculate(soc, i, &irq_id_map[0], + &num_irq); + + ret = hif_register_ext_group(soc->hif_handle, + num_irq, irq_id_map, dp_service_srngs, + &soc->intr_ctx[i], "dp_intr", + HIF_EXEC_NAPI_TYPE, QCA_NAPI_DEF_SCALE_BIN_SHIFT); + + if (ret) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("failed, ret = %d"), ret); + + return QDF_STATUS_E_FAILURE; + } + soc->intr_ctx[i].lro_ctx = qdf_lro_init(); + } + + hif_configure_ext_group_interrupts(soc->hif_handle); + + return QDF_STATUS_SUCCESS; +} + +/* + * dp_soc_interrupt_detach() - Deregister any allocations done for interrupts + * @txrx_soc: DP SOC handle + * + * Return: void + */ +static void dp_soc_interrupt_detach(void *txrx_soc) +{ + struct dp_soc *soc = (struct dp_soc *)txrx_soc; + int i; + + if (soc->intr_mode == DP_INTR_POLL) { + qdf_timer_stop(&soc->int_timer); + qdf_timer_free(&soc->int_timer); + } else { + hif_deregister_exec_group(soc->hif_handle, "dp_intr"); + } + + for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) { + soc->intr_ctx[i].tx_ring_mask = 0; + soc->intr_ctx[i].rx_ring_mask = 0; + soc->intr_ctx[i].rx_mon_ring_mask = 0; + soc->intr_ctx[i].rx_err_ring_mask = 0; + soc->intr_ctx[i].rx_wbm_rel_ring_mask = 0; + soc->intr_ctx[i].reo_status_ring_mask = 0; + soc->intr_ctx[i].rxdma2host_ring_mask = 0; + soc->intr_ctx[i].host2rxdma_ring_mask = 0; + + qdf_lro_deinit(soc->intr_ctx[i].lro_ctx); + } +} + +#define AVG_MAX_MPDUS_PER_TID 128 +#define AVG_TIDS_PER_CLIENT 2 +#define AVG_FLOWS_PER_TID 2 +#define AVG_MSDUS_PER_FLOW 128 +#define AVG_MSDUS_PER_MPDU 4 + +/* + * Allocate and setup link descriptor pool that will be used by HW for + * various link and queue descriptors and managed by WBM + */ +static int dp_hw_link_desc_pool_setup(struct dp_soc *soc) +{ + int link_desc_size = hal_get_link_desc_size(soc->hal_soc); + int link_desc_align = hal_get_link_desc_align(soc->hal_soc); + uint32_t max_clients = wlan_cfg_get_max_clients(soc->wlan_cfg_ctx); + uint32_t num_mpdus_per_link_desc = + hal_num_mpdus_per_link_desc(soc->hal_soc); + uint32_t num_msdus_per_link_desc = + hal_num_msdus_per_link_desc(soc->hal_soc); + uint32_t num_mpdu_links_per_queue_desc = + hal_num_mpdu_links_per_queue_desc(soc->hal_soc); + uint32_t max_alloc_size = wlan_cfg_max_alloc_size(soc->wlan_cfg_ctx); + uint32_t total_link_descs, total_mem_size; + uint32_t num_mpdu_link_descs, num_mpdu_queue_descs; + uint32_t num_tx_msdu_link_descs, num_rx_msdu_link_descs; + uint32_t num_link_desc_banks; + uint32_t last_bank_size = 0; + uint32_t entry_size, num_entries; + int i; + uint32_t desc_id = 0; + + /* Only Tx queue descriptors are allocated from common link descriptor + * pool Rx queue descriptors are not included in this because (REO queue + * extension descriptors) they are expected to be allocated contiguously + * with REO queue descriptors + */ + num_mpdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT * + AVG_MAX_MPDUS_PER_TID) / num_mpdus_per_link_desc; + + num_mpdu_queue_descs = num_mpdu_link_descs / + num_mpdu_links_per_queue_desc; + + num_tx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT * + AVG_FLOWS_PER_TID * AVG_MSDUS_PER_FLOW) / + num_msdus_per_link_desc; + + num_rx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT * + AVG_MAX_MPDUS_PER_TID * AVG_MSDUS_PER_MPDU) / 6; + + num_entries = num_mpdu_link_descs + num_mpdu_queue_descs + + num_tx_msdu_link_descs + num_rx_msdu_link_descs; + + /* Round up to power of 2 */ + total_link_descs = 1; + while (total_link_descs < num_entries) + total_link_descs <<= 1; + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH, + FL("total_link_descs: %u, link_desc_size: %d"), + total_link_descs, link_desc_size); + total_mem_size = total_link_descs * link_desc_size; + + total_mem_size += link_desc_align; + + if (total_mem_size <= max_alloc_size) { + num_link_desc_banks = 0; + last_bank_size = total_mem_size; + } else { + num_link_desc_banks = (total_mem_size) / + (max_alloc_size - link_desc_align); + last_bank_size = total_mem_size % + (max_alloc_size - link_desc_align); + } + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH, + FL("total_mem_size: %d, num_link_desc_banks: %u"), + total_mem_size, num_link_desc_banks); + + for (i = 0; i < num_link_desc_banks; i++) { + soc->link_desc_banks[i].base_vaddr_unaligned = + qdf_mem_alloc_consistent(soc->osdev, soc->osdev->dev, + max_alloc_size, + &(soc->link_desc_banks[i].base_paddr_unaligned)); + soc->link_desc_banks[i].size = max_alloc_size; + + soc->link_desc_banks[i].base_vaddr = (void *)((unsigned long)( + soc->link_desc_banks[i].base_vaddr_unaligned) + + ((unsigned long)( + soc->link_desc_banks[i].base_vaddr_unaligned) % + link_desc_align)); + + soc->link_desc_banks[i].base_paddr = (unsigned long)( + soc->link_desc_banks[i].base_paddr_unaligned) + + ((unsigned long)(soc->link_desc_banks[i].base_vaddr) - + (unsigned long)( + soc->link_desc_banks[i].base_vaddr_unaligned)); + + if (!soc->link_desc_banks[i].base_vaddr_unaligned) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("Link descriptor memory alloc failed")); + goto fail; + } + } + + if (last_bank_size) { + /* Allocate last bank in case total memory required is not exact + * multiple of max_alloc_size + */ + soc->link_desc_banks[i].base_vaddr_unaligned = + qdf_mem_alloc_consistent(soc->osdev, soc->osdev->dev, + last_bank_size, + &(soc->link_desc_banks[i].base_paddr_unaligned)); + soc->link_desc_banks[i].size = last_bank_size; + + soc->link_desc_banks[i].base_vaddr = (void *)((unsigned long) + (soc->link_desc_banks[i].base_vaddr_unaligned) + + ((unsigned long)( + soc->link_desc_banks[i].base_vaddr_unaligned) % + link_desc_align)); + + soc->link_desc_banks[i].base_paddr = + (unsigned long)( + soc->link_desc_banks[i].base_paddr_unaligned) + + ((unsigned long)(soc->link_desc_banks[i].base_vaddr) - + (unsigned long)( + soc->link_desc_banks[i].base_vaddr_unaligned)); + } + + + /* Allocate and setup link descriptor idle list for HW internal use */ + entry_size = hal_srng_get_entrysize(soc->hal_soc, WBM_IDLE_LINK); + total_mem_size = entry_size * total_link_descs; + + if (total_mem_size <= max_alloc_size) { + void *desc; + + if (dp_srng_setup(soc, &soc->wbm_idle_link_ring, + WBM_IDLE_LINK, 0, 0, total_link_descs)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("Link desc idle ring setup failed")); + goto fail; + } + + hal_srng_access_start_unlocked(soc->hal_soc, + soc->wbm_idle_link_ring.hal_srng); + + for (i = 0; i < MAX_LINK_DESC_BANKS && + soc->link_desc_banks[i].base_paddr; i++) { + uint32_t num_entries = (soc->link_desc_banks[i].size - + ((unsigned long)( + soc->link_desc_banks[i].base_vaddr) - + (unsigned long)( + soc->link_desc_banks[i].base_vaddr_unaligned))) + / link_desc_size; + unsigned long paddr = (unsigned long)( + soc->link_desc_banks[i].base_paddr); + + while (num_entries && (desc = hal_srng_src_get_next( + soc->hal_soc, + soc->wbm_idle_link_ring.hal_srng))) { + hal_set_link_desc_addr(desc, + LINK_DESC_COOKIE(desc_id, i), paddr); + num_entries--; + desc_id++; + paddr += link_desc_size; + } + } + hal_srng_access_end_unlocked(soc->hal_soc, + soc->wbm_idle_link_ring.hal_srng); + } else { + uint32_t num_scatter_bufs; + uint32_t num_entries_per_buf; + uint32_t rem_entries; + uint8_t *scatter_buf_ptr; + uint16_t scatter_buf_num; + + soc->wbm_idle_scatter_buf_size = + hal_idle_list_scatter_buf_size(soc->hal_soc); + num_entries_per_buf = hal_idle_scatter_buf_num_entries( + soc->hal_soc, soc->wbm_idle_scatter_buf_size); + num_scatter_bufs = hal_idle_list_num_scatter_bufs( + soc->hal_soc, total_mem_size, + soc->wbm_idle_scatter_buf_size); + + if (num_scatter_bufs > MAX_IDLE_SCATTER_BUFS) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("scatter bufs size out of bounds")); + goto fail; + } + + for (i = 0; i < num_scatter_bufs; i++) { + soc->wbm_idle_scatter_buf_base_vaddr[i] = + qdf_mem_alloc_consistent(soc->osdev, + soc->osdev->dev, + soc->wbm_idle_scatter_buf_size, + &(soc->wbm_idle_scatter_buf_base_paddr[i])); + if (soc->wbm_idle_scatter_buf_base_vaddr[i] == NULL) { + QDF_TRACE(QDF_MODULE_ID_DP, + QDF_TRACE_LEVEL_ERROR, + FL("Scatter list memory alloc failed")); + goto fail; + } + } + + /* Populate idle list scatter buffers with link descriptor + * pointers + */ + scatter_buf_num = 0; + scatter_buf_ptr = (uint8_t *)( + soc->wbm_idle_scatter_buf_base_vaddr[scatter_buf_num]); + rem_entries = num_entries_per_buf; + + for (i = 0; i < MAX_LINK_DESC_BANKS && + soc->link_desc_banks[i].base_paddr; i++) { + uint32_t num_link_descs = + (soc->link_desc_banks[i].size - + ((unsigned long)( + soc->link_desc_banks[i].base_vaddr) - + (unsigned long)( + soc->link_desc_banks[i].base_vaddr_unaligned))) + / link_desc_size; + unsigned long paddr = (unsigned long)( + soc->link_desc_banks[i].base_paddr); + + while (num_link_descs) { + hal_set_link_desc_addr((void *)scatter_buf_ptr, + LINK_DESC_COOKIE(desc_id, i), paddr); + num_link_descs--; + desc_id++; + paddr += link_desc_size; + rem_entries--; + if (rem_entries) { + scatter_buf_ptr += entry_size; + } else { + rem_entries = num_entries_per_buf; + scatter_buf_num++; + + if (scatter_buf_num >= num_scatter_bufs) + break; + + scatter_buf_ptr = (uint8_t *)( + soc->wbm_idle_scatter_buf_base_vaddr[ + scatter_buf_num]); + } + } + } + /* Setup link descriptor idle list in HW */ + hal_setup_link_idle_list(soc->hal_soc, + soc->wbm_idle_scatter_buf_base_paddr, + soc->wbm_idle_scatter_buf_base_vaddr, + num_scatter_bufs, soc->wbm_idle_scatter_buf_size, + (uint32_t)(scatter_buf_ptr - + (uint8_t *)(soc->wbm_idle_scatter_buf_base_vaddr[ + scatter_buf_num-1])), total_link_descs); + } + return 0; + +fail: + if (soc->wbm_idle_link_ring.hal_srng) { + dp_srng_cleanup(soc->hal_soc, &soc->wbm_idle_link_ring, + WBM_IDLE_LINK, 0); + } + + for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) { + if (soc->wbm_idle_scatter_buf_base_vaddr[i]) { + qdf_mem_free_consistent(soc->osdev, soc->osdev->dev, + soc->wbm_idle_scatter_buf_size, + soc->wbm_idle_scatter_buf_base_vaddr[i], + soc->wbm_idle_scatter_buf_base_paddr[i], 0); + soc->wbm_idle_scatter_buf_base_vaddr[i] = NULL; + } + } + + for (i = 0; i < MAX_LINK_DESC_BANKS; i++) { + if (soc->link_desc_banks[i].base_vaddr_unaligned) { + qdf_mem_free_consistent(soc->osdev, soc->osdev->dev, + soc->link_desc_banks[i].size, + soc->link_desc_banks[i].base_vaddr_unaligned, + soc->link_desc_banks[i].base_paddr_unaligned, + 0); + soc->link_desc_banks[i].base_vaddr_unaligned = NULL; + } + } + return QDF_STATUS_E_FAILURE; +} + +/* + * Free link descriptor pool that was setup HW + */ +static void dp_hw_link_desc_pool_cleanup(struct dp_soc *soc) +{ + int i; + + if (soc->wbm_idle_link_ring.hal_srng) { + dp_srng_cleanup(soc, &soc->wbm_idle_link_ring, + WBM_IDLE_LINK, 0); + } + + for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) { + if (soc->wbm_idle_scatter_buf_base_vaddr[i]) { + qdf_mem_free_consistent(soc->osdev, soc->osdev->dev, + soc->wbm_idle_scatter_buf_size, + soc->wbm_idle_scatter_buf_base_vaddr[i], + soc->wbm_idle_scatter_buf_base_paddr[i], 0); + soc->wbm_idle_scatter_buf_base_vaddr[i] = NULL; + } + } + + for (i = 0; i < MAX_LINK_DESC_BANKS; i++) { + if (soc->link_desc_banks[i].base_vaddr_unaligned) { + qdf_mem_free_consistent(soc->osdev, soc->osdev->dev, + soc->link_desc_banks[i].size, + soc->link_desc_banks[i].base_vaddr_unaligned, + soc->link_desc_banks[i].base_paddr_unaligned, + 0); + soc->link_desc_banks[i].base_vaddr_unaligned = NULL; + } + } +} + +/* TODO: Following should be configurable */ +#define WBM_RELEASE_RING_SIZE 64 +#define TCL_CMD_RING_SIZE 32 +#define TCL_STATUS_RING_SIZE 32 +#if defined(QCA_WIFI_QCA6290) +#define REO_DST_RING_SIZE 1024 +#else +#define REO_DST_RING_SIZE 2048 +#endif +#define REO_REINJECT_RING_SIZE 32 +#define RX_RELEASE_RING_SIZE 1024 +#define REO_EXCEPTION_RING_SIZE 128 +#define REO_CMD_RING_SIZE 64 +#define REO_STATUS_RING_SIZE 128 +#define RXDMA_BUF_RING_SIZE 1024 +#define RXDMA_REFILL_RING_SIZE 4096 +#define RXDMA_MONITOR_BUF_RING_SIZE 4096 +#define RXDMA_MONITOR_DST_RING_SIZE 2048 +#define RXDMA_MONITOR_STATUS_RING_SIZE 1024 +#define RXDMA_MONITOR_DESC_RING_SIZE 4096 +#define RXDMA_ERR_DST_RING_SIZE 1024 + +/* + * dp_wds_aging_timer_fn() - Timer callback function for WDS aging + * @soc: Datapath SOC handle + * + * This is a timer function used to age out stale AST nodes from + * AST table + */ +#ifdef FEATURE_WDS +static void dp_wds_aging_timer_fn(void *soc_hdl) +{ + struct dp_soc *soc = (struct dp_soc *) soc_hdl; + struct dp_pdev *pdev; + struct dp_vdev *vdev; + struct dp_peer *peer; + struct dp_ast_entry *ase, *temp_ase; + int i; + + qdf_spin_lock_bh(&soc->ast_lock); + + for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) { + pdev = soc->pdev_list[i]; + qdf_spin_lock_bh(&pdev->vdev_list_lock); + DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) { + DP_VDEV_ITERATE_PEER_LIST(vdev, peer) { + DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) { + /* + * Do not expire static ast entries + * and HM WDS entries + */ + if (ase->type != CDP_TXRX_AST_TYPE_WDS) + continue; + + if (ase->is_active) { + ase->is_active = FALSE; + continue; + } + + DP_STATS_INC(soc, ast.aged_out, 1); + dp_peer_del_ast(soc, ase); + } + } + } + qdf_spin_unlock_bh(&pdev->vdev_list_lock); + } + + qdf_spin_unlock_bh(&soc->ast_lock); + + if (qdf_atomic_read(&soc->cmn_init_done)) + qdf_timer_mod(&soc->wds_aging_timer, DP_WDS_AGING_TIMER_DEFAULT_MS); +} + + +/* + * dp_soc_wds_attach() - Setup WDS timer and AST table + * @soc: Datapath SOC handle + * + * Return: None + */ +static void dp_soc_wds_attach(struct dp_soc *soc) +{ + qdf_timer_init(soc->osdev, &soc->wds_aging_timer, + dp_wds_aging_timer_fn, (void *)soc, + QDF_TIMER_TYPE_WAKE_APPS); + + qdf_timer_mod(&soc->wds_aging_timer, DP_WDS_AGING_TIMER_DEFAULT_MS); +} + +/* + * dp_soc_wds_detach() - Detach WDS data structures and timers + * @txrx_soc: DP SOC handle + * + * Return: None + */ +static void dp_soc_wds_detach(struct dp_soc *soc) +{ + qdf_timer_stop(&soc->wds_aging_timer); + qdf_timer_free(&soc->wds_aging_timer); +} +#else +static void dp_soc_wds_attach(struct dp_soc *soc) +{ +} + +static void dp_soc_wds_detach(struct dp_soc *soc) +{ +} +#endif + +/* + * dp_soc_reset_ring_map() - Reset cpu ring map + * @soc: Datapath soc handler + * + * This api resets the default cpu ring map + */ + +static void dp_soc_reset_cpu_ring_map(struct dp_soc *soc) +{ + uint8_t i; + int nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx); + + for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) { + if (nss_config == 1) { + /* + * Setting Tx ring map for one nss offloaded radio + */ + soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_FIRST_RADIO_OFFLOADED_MAP][i]; + } else if (nss_config == 2) { + /* + * Setting Tx ring for two nss offloaded radios + */ + soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_SECOND_RADIO_OFFLOADED_MAP][i]; + } else { + /* + * Setting Tx ring map for all nss offloaded radios + */ + soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_ALL_RADIO_OFFLOADED_MAP][i]; + } + } +} + +/* + * dp_soc_ring_if_nss_offloaded() - find if ring is offloaded to NSS + * @dp_soc - DP soc handle + * @ring_type - ring type + * @ring_num - ring_num + * + * return 0 or 1 + */ +static uint8_t dp_soc_ring_if_nss_offloaded(struct dp_soc *soc, enum hal_ring_type ring_type, int ring_num) +{ + uint8_t nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx); + uint8_t status = 0; + + switch (ring_type) { + case WBM2SW_RELEASE: + case REO_DST: + case RXDMA_BUF: + status = ((nss_config) & (1 << ring_num)); + break; + default: + break; + } + + return status; +} + +/* + * dp_soc_reset_intr_mask() - reset interrupt mask + * @dp_soc - DP Soc handle + * + * Return: Return void + */ +static void dp_soc_reset_intr_mask(struct dp_soc *soc) +{ + uint8_t j; + int *grp_mask = NULL; + int group_number, mask, num_ring; + + /* number of tx ring */ + num_ring = wlan_cfg_num_tcl_data_rings(soc->wlan_cfg_ctx); + + /* + * group mask for tx completion ring. + */ + grp_mask = &soc->wlan_cfg_ctx->int_tx_ring_mask[0]; + + /* loop and reset the mask for only offloaded ring */ + for (j = 0; j < num_ring; j++) { + if (!dp_soc_ring_if_nss_offloaded(soc, WBM2SW_RELEASE, j)) { + continue; + } + + /* + * Group number corresponding to tx offloaded ring. + */ + group_number = dp_srng_find_ring_in_mask(j, grp_mask); + if (group_number < 0) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + FL("ring not part of any group; ring_type: %d,ring_num %d"), + WBM2SW_RELEASE, j); + return; + } + + /* reset the tx mask for offloaded ring */ + mask = wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, group_number); + mask &= (~(1 << j)); + + /* + * reset the interrupt mask for offloaded ring. + */ + wlan_cfg_set_tx_ring_mask(soc->wlan_cfg_ctx, group_number, mask); + } + + /* number of rx rings */ + num_ring = wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx); + + /* + * group mask for reo destination ring. + */ + grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0]; + + /* loop and reset the mask for only offloaded ring */ + for (j = 0; j < num_ring; j++) { + if (!dp_soc_ring_if_nss_offloaded(soc, REO_DST, j)) { + continue; + } + + /* + * Group number corresponding to rx offloaded ring. + */ + group_number = dp_srng_find_ring_in_mask(j, grp_mask); + if (group_number < 0) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + FL("ring not part of any group; ring_type: %d,ring_num %d"), + REO_DST, j); + return; + } + + /* set the interrupt mask for offloaded ring */ + mask = wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, group_number); + mask &= (~(1 << j)); + + /* + * set the interrupt mask to zero for rx offloaded radio. + */ + wlan_cfg_set_rx_ring_mask(soc->wlan_cfg_ctx, group_number, mask); + } + + /* + * group mask for Rx buffer refill ring + */ + grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0]; + + /* loop and reset the mask for only offloaded ring */ + for (j = 0; j < MAX_PDEV_CNT; j++) { + if (!dp_soc_ring_if_nss_offloaded(soc, RXDMA_BUF, j)) { + continue; + } + + /* + * Group number corresponding to rx offloaded ring. + */ + group_number = dp_srng_find_ring_in_mask(j, grp_mask); + if (group_number < 0) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + FL("ring not part of any group; ring_type: %d,ring_num %d"), + REO_DST, j); + return; + } + + /* set the interrupt mask for offloaded ring */ + mask = wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx, + group_number); + mask &= (~(1 << j)); + + /* + * set the interrupt mask to zero for rx offloaded radio. + */ + wlan_cfg_set_host2rxdma_ring_mask(soc->wlan_cfg_ctx, + group_number, mask); + } +} + +#ifdef IPA_OFFLOAD +/** + * dp_reo_remap_config() - configure reo remap register value based + * nss configuration. + * based on offload_radio value below remap configuration + * get applied. + * 0 - both Radios handled by host (remap rings 1, 2, 3 & 4) + * 1 - 1st Radio handled by NSS (remap rings 2, 3 & 4) + * 2 - 2nd Radio handled by NSS (remap rings 1, 2 & 4) + * 3 - both Radios handled by NSS (remap not required) + * 4 - IPA OFFLOAD enabled (remap rings 1,2 & 3) + * + * @remap1: output parameter indicates reo remap 1 register value + * @remap2: output parameter indicates reo remap 2 register value + * Return: bool type, true if remap is configured else false. + */ +static bool dp_reo_remap_config(struct dp_soc *soc, + uint32_t *remap1, + uint32_t *remap2) +{ + + *remap1 = ((0x1 << 0) | (0x2 << 3) | (0x3 << 6) | (0x1 << 9) | + (0x2 << 12) | (0x3 << 15) | (0x1 << 18) | (0x2 << 21)) << 8; + + *remap2 = ((0x3 << 0) | (0x1 << 3) | (0x2 << 6) | (0x3 << 9) | + (0x1 << 12) | (0x2 << 15) | (0x3 << 18) | (0x1 << 21)) << 8; + + return true; +} +#else +static bool dp_reo_remap_config(struct dp_soc *soc, + uint32_t *remap1, + uint32_t *remap2) +{ + uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx); + + switch (offload_radio) { + case 0: + *remap1 = ((0x1 << 0) | (0x2 << 3) | (0x3 << 6) | + (0x4 << 9) | (0x1 << 12) | (0x2 << 15) | + (0x3 << 18) | (0x4 << 21)) << 8; + + *remap2 = ((0x1 << 0) | (0x2 << 3) | (0x3 << 6) | + (0x4 << 9) | (0x1 << 12) | (0x2 << 15) | + (0x3 << 18) | (0x4 << 21)) << 8; + break; + + case 1: + *remap1 = ((0x2 << 0) | (0x3 << 3) | (0x4 << 6) | + (0x2 << 9) | (0x3 << 12) | (0x4 << 15) | + (0x2 << 18) | (0x3 << 21)) << 8; + + *remap2 = ((0x4 << 0) | (0x2 << 3) | (0x3 << 6) | + (0x4 << 9) | (0x2 << 12) | (0x3 << 15) | + (0x4 << 18) | (0x2 << 21)) << 8; + break; + + case 2: + *remap1 = ((0x1 << 0) | (0x3 << 3) | (0x4 << 6) | + (0x1 << 9) | (0x3 << 12) | (0x4 << 15) | + (0x1 << 18) | (0x3 << 21)) << 8; + + *remap2 = ((0x4 << 0) | (0x1 << 3) | (0x3 << 6) | + (0x4 << 9) | (0x1 << 12) | (0x3 << 15) | + (0x4 << 18) | (0x1 << 21)) << 8; + break; + + case 3: + /* return false if both radios are offloaded to NSS */ + return false; + } + return true; +} +#endif + +/* + * dp_reo_frag_dst_set() - configure reo register to set the + * fragment destination ring + * @soc : Datapath soc + * @frag_dst_ring : output parameter to set fragment destination ring + * + * Based on offload_radio below fragment destination rings is selected + * 0 - TCL + * 1 - SW1 + * 2 - SW2 + * 3 - SW3 + * 4 - SW4 + * 5 - Release + * 6 - FW + * 7 - alternate select + * + * return: void + */ +static void dp_reo_frag_dst_set(struct dp_soc *soc, uint8_t *frag_dst_ring) +{ + uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx); + + switch (offload_radio) { + case 0: + *frag_dst_ring = HAL_SRNG_REO_EXCEPTION; + break; + case 3: + *frag_dst_ring = HAL_SRNG_REO_ALTERNATE_SELECT; + break; + default: + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("dp_reo_frag_dst_set invalid offload radio config")); + break; + } +} + +/* + * dp_soc_cmn_setup() - Common SoC level initializion + * @soc: Datapath SOC handle + * + * This is an internal function used to setup common SOC data structures, + * to be called from PDEV attach after receiving HW mode capabilities from FW + */ +static int dp_soc_cmn_setup(struct dp_soc *soc) +{ + int i; + struct hal_reo_params reo_params; + int tx_ring_size; + int tx_comp_ring_size; + + if (qdf_atomic_read(&soc->cmn_init_done)) + return 0; + + if (dp_hw_link_desc_pool_setup(soc)) + goto fail1; + + /* Setup SRNG rings */ + /* Common rings */ + if (dp_srng_setup(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0, 0, + WBM_RELEASE_RING_SIZE)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("dp_srng_setup failed for wbm_desc_rel_ring")); + goto fail1; + } + + + soc->num_tcl_data_rings = 0; + /* Tx data rings */ + if (!wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) { + soc->num_tcl_data_rings = + wlan_cfg_num_tcl_data_rings(soc->wlan_cfg_ctx); + tx_comp_ring_size = + wlan_cfg_tx_comp_ring_size(soc->wlan_cfg_ctx); + tx_ring_size = + wlan_cfg_tx_ring_size(soc->wlan_cfg_ctx); + for (i = 0; i < soc->num_tcl_data_rings; i++) { + if (dp_srng_setup(soc, &soc->tcl_data_ring[i], + TCL_DATA, i, 0, tx_ring_size)) { + QDF_TRACE(QDF_MODULE_ID_DP, + QDF_TRACE_LEVEL_ERROR, + FL("dp_srng_setup failed for tcl_data_ring[%d]"), i); + goto fail1; + } + /* + * TBD: Set IPA WBM ring size with ini IPA UC tx buffer + * count + */ + if (dp_srng_setup(soc, &soc->tx_comp_ring[i], + WBM2SW_RELEASE, i, 0, tx_comp_ring_size)) { + QDF_TRACE(QDF_MODULE_ID_DP, + QDF_TRACE_LEVEL_ERROR, + FL("dp_srng_setup failed for tx_comp_ring[%d]"), i); + goto fail1; + } + } + } else { + /* This will be incremented during per pdev ring setup */ + soc->num_tcl_data_rings = 0; + } + + if (dp_tx_soc_attach(soc)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("dp_tx_soc_attach failed")); + goto fail1; + } + + /* TCL command and status rings */ + if (dp_srng_setup(soc, &soc->tcl_cmd_ring, TCL_CMD, 0, 0, + TCL_CMD_RING_SIZE)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("dp_srng_setup failed for tcl_cmd_ring")); + goto fail1; + } + + if (dp_srng_setup(soc, &soc->tcl_status_ring, TCL_STATUS, 0, 0, + TCL_STATUS_RING_SIZE)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("dp_srng_setup failed for tcl_status_ring")); + goto fail1; + } + + + /* TBD: call dp_tx_init to setup Tx SW descriptors and MSDU extension + * descriptors + */ + + /* Rx data rings */ + if (!wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) { + soc->num_reo_dest_rings = + wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx); + QDF_TRACE(QDF_MODULE_ID_DP, + QDF_TRACE_LEVEL_ERROR, + FL("num_reo_dest_rings %d\n"), soc->num_reo_dest_rings); + for (i = 0; i < soc->num_reo_dest_rings; i++) { + if (dp_srng_setup(soc, &soc->reo_dest_ring[i], REO_DST, + i, 0, REO_DST_RING_SIZE)) { + QDF_TRACE(QDF_MODULE_ID_DP, + QDF_TRACE_LEVEL_ERROR, + FL("dp_srng_setup failed for reo_dest_ring[%d]"), i); + goto fail1; + } + } + } else { + /* This will be incremented during per pdev ring setup */ + soc->num_reo_dest_rings = 0; + } + + /* LMAC RxDMA to SW Rings configuration */ + if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) { + /* Only valid for MCL */ + struct dp_pdev *pdev = soc->pdev_list[0]; + + for (i = 0; i < MAX_RX_MAC_RINGS; i++) { + if (dp_srng_setup(soc, &pdev->rxdma_err_dst_ring[i], + RXDMA_DST, 0, i, RXDMA_ERR_DST_RING_SIZE)) { + QDF_TRACE(QDF_MODULE_ID_DP, + QDF_TRACE_LEVEL_ERROR, + FL("dp_srng_setup failed for rxdma_err_dst_ring")); + goto fail1; + } + } + } + /* TBD: call dp_rx_init to setup Rx SW descriptors */ + + /* REO reinjection ring */ + if (dp_srng_setup(soc, &soc->reo_reinject_ring, REO_REINJECT, 0, 0, + REO_REINJECT_RING_SIZE)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("dp_srng_setup failed for reo_reinject_ring")); + goto fail1; + } + + + /* Rx release ring */ + if (dp_srng_setup(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 3, 0, + RX_RELEASE_RING_SIZE)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("dp_srng_setup failed for rx_rel_ring")); + goto fail1; + } + + + /* Rx exception ring */ + if (dp_srng_setup(soc, &soc->reo_exception_ring, REO_EXCEPTION, 0, + MAX_REO_DEST_RINGS, REO_EXCEPTION_RING_SIZE)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("dp_srng_setup failed for reo_exception_ring")); + goto fail1; + } + + + /* REO command and status rings */ + if (dp_srng_setup(soc, &soc->reo_cmd_ring, REO_CMD, 0, 0, + REO_CMD_RING_SIZE)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("dp_srng_setup failed for reo_cmd_ring")); + goto fail1; + } + + hal_reo_init_cmd_ring(soc->hal_soc, soc->reo_cmd_ring.hal_srng); + TAILQ_INIT(&soc->rx.reo_cmd_list); + qdf_spinlock_create(&soc->rx.reo_cmd_lock); + + if (dp_srng_setup(soc, &soc->reo_status_ring, REO_STATUS, 0, 0, + REO_STATUS_RING_SIZE)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("dp_srng_setup failed for reo_status_ring")); + goto fail1; + } + + qdf_spinlock_create(&soc->ast_lock); + dp_soc_wds_attach(soc); + + /* Reset the cpu ring map if radio is NSS offloaded */ + if (wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) { + dp_soc_reset_cpu_ring_map(soc); + dp_soc_reset_intr_mask(soc); + } + + /* Setup HW REO */ + qdf_mem_zero(&reo_params, sizeof(reo_params)); + + if (wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) { + + /* + * Reo ring remap is not required if both radios + * are offloaded to NSS + */ + if (!dp_reo_remap_config(soc, + &reo_params.remap1, + &reo_params.remap2)) + goto out; + + reo_params.rx_hash_enabled = true; + } + + /* setup the global rx defrag waitlist */ + TAILQ_INIT(&soc->rx.defrag.waitlist); + soc->rx.defrag.timeout_ms = + wlan_cfg_get_rx_defrag_min_timeout(soc->wlan_cfg_ctx); + soc->rx.flags.defrag_timeout_check = + wlan_cfg_get_defrag_timeout_check(soc->wlan_cfg_ctx); + qdf_spinlock_create(&soc->rx.defrag.defrag_lock); + +out: + /* + * set the fragment destination ring + */ + dp_reo_frag_dst_set(soc, &reo_params.frag_dst_ring); + + hal_reo_setup(soc->hal_soc, &reo_params); + + qdf_atomic_set(&soc->cmn_init_done, 1); + qdf_nbuf_queue_init(&soc->htt_stats.msg); + return 0; +fail1: + /* + * Cleanup will be done as part of soc_detach, which will + * be called on pdev attach failure + */ + return QDF_STATUS_E_FAILURE; +} + +static void dp_pdev_detach_wifi3(struct cdp_pdev *txrx_pdev, int force); + +static void dp_lro_hash_setup(struct dp_soc *soc) +{ + struct cdp_lro_hash_config lro_hash; + + if (!wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) && + !wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("LRO disabled RX hash disabled")); + return; + } + + qdf_mem_zero(&lro_hash, sizeof(lro_hash)); + + if (wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx)) { + lro_hash.lro_enable = 1; + lro_hash.tcp_flag = QDF_TCPHDR_ACK; + lro_hash.tcp_flag_mask = QDF_TCPHDR_FIN | QDF_TCPHDR_SYN | + QDF_TCPHDR_RST | QDF_TCPHDR_ACK | QDF_TCPHDR_URG | + QDF_TCPHDR_ECE | QDF_TCPHDR_CWR; + } + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW, FL("enabled")); + qdf_get_random_bytes(lro_hash.toeplitz_hash_ipv4, + (sizeof(lro_hash.toeplitz_hash_ipv4[0]) * + LRO_IPV4_SEED_ARR_SZ)); + qdf_get_random_bytes(lro_hash.toeplitz_hash_ipv6, + (sizeof(lro_hash.toeplitz_hash_ipv6[0]) * + LRO_IPV6_SEED_ARR_SZ)); + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW, + "lro_hash: lro_enable: 0x%x tcp_flag 0x%x tcp_flag_mask 0x%x", + lro_hash.lro_enable, lro_hash.tcp_flag, + lro_hash.tcp_flag_mask); + + qdf_trace_hex_dump(QDF_MODULE_ID_DP, + QDF_TRACE_LEVEL_ERROR, + (void *)lro_hash.toeplitz_hash_ipv4, + (sizeof(lro_hash.toeplitz_hash_ipv4[0]) * + LRO_IPV4_SEED_ARR_SZ)); + + qdf_trace_hex_dump(QDF_MODULE_ID_DP, + QDF_TRACE_LEVEL_ERROR, + (void *)lro_hash.toeplitz_hash_ipv6, + (sizeof(lro_hash.toeplitz_hash_ipv6[0]) * + LRO_IPV6_SEED_ARR_SZ)); + + qdf_assert(soc->cdp_soc.ol_ops->lro_hash_config); + + if (soc->cdp_soc.ol_ops->lro_hash_config) + (void)soc->cdp_soc.ol_ops->lro_hash_config + (soc->ctrl_psoc, &lro_hash); +} + +/* +* dp_rxdma_ring_setup() - configure the RX DMA rings +* @soc: data path SoC handle +* @pdev: Physical device handle +* +* Return: 0 - success, > 0 - failure +*/ +#ifdef QCA_HOST2FW_RXBUF_RING +static int dp_rxdma_ring_setup(struct dp_soc *soc, + struct dp_pdev *pdev) +{ + int max_mac_rings = + wlan_cfg_get_num_mac_rings + (pdev->wlan_cfg_ctx); + int i; + + for (i = 0; i < max_mac_rings; i++) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "%s: pdev_id %d mac_id %d\n", + __func__, pdev->pdev_id, i); + if (dp_srng_setup(soc, &pdev->rx_mac_buf_ring[i], + RXDMA_BUF, 1, i, RXDMA_BUF_RING_SIZE)) { + QDF_TRACE(QDF_MODULE_ID_DP, + QDF_TRACE_LEVEL_ERROR, + FL("failed rx mac ring setup")); + return QDF_STATUS_E_FAILURE; + } + } + return QDF_STATUS_SUCCESS; +} +#else +static int dp_rxdma_ring_setup(struct dp_soc *soc, + struct dp_pdev *pdev) +{ + return QDF_STATUS_SUCCESS; +} +#endif + +/** + * dp_dscp_tid_map_setup(): Initialize the dscp-tid maps + * @pdev - DP_PDEV handle + * + * Return: void + */ +static inline void +dp_dscp_tid_map_setup(struct dp_pdev *pdev) +{ + uint8_t map_id; + for (map_id = 0; map_id < DP_MAX_TID_MAPS; map_id++) { + qdf_mem_copy(pdev->dscp_tid_map[map_id], default_dscp_tid_map, + sizeof(default_dscp_tid_map)); + } + for (map_id = 0; map_id < HAL_MAX_HW_DSCP_TID_MAPS; map_id++) { + hal_tx_set_dscp_tid_map(pdev->soc->hal_soc, + pdev->dscp_tid_map[map_id], + map_id); + } +} + +#ifdef QCA_SUPPORT_SON +/** + * dp_mark_peer_inact(): Update peer inactivity status + * @peer_handle - datapath peer handle + * + * Return: void + */ +void dp_mark_peer_inact(void *peer_handle, bool inactive) +{ + struct dp_peer *peer = (struct dp_peer *)peer_handle; + struct dp_pdev *pdev; + struct dp_soc *soc; + bool inactive_old; + + if (!peer) + return; + + pdev = peer->vdev->pdev; + soc = pdev->soc; + + inactive_old = peer->peer_bs_inact_flag == 1; + if (!inactive) + peer->peer_bs_inact = soc->pdev_bs_inact_reload; + peer->peer_bs_inact_flag = inactive ? 1 : 0; + + if (inactive_old != inactive) { + /** + * Note: a node lookup can happen in RX datapath context + * when a node changes from inactive to active (at most once + * per inactivity timeout threshold) + */ + if (soc->cdp_soc.ol_ops->record_act_change) { + soc->cdp_soc.ol_ops->record_act_change(pdev->osif_pdev, + peer->mac_addr.raw, !inactive); + } + } +} + +/** + * dp_txrx_peer_find_inact_timeout_handler(): Inactivity timeout function + * + * Periodically checks the inactivity status + */ +static os_timer_func(dp_txrx_peer_find_inact_timeout_handler) +{ + struct dp_pdev *pdev; + struct dp_vdev *vdev; + struct dp_peer *peer; + struct dp_soc *soc; + int i; + + OS_GET_TIMER_ARG(soc, struct dp_soc *); + + qdf_spin_lock(&soc->peer_ref_mutex); + + for (i = 0; i < soc->pdev_count; i++) { + pdev = soc->pdev_list[i]; + qdf_spin_lock_bh(&pdev->vdev_list_lock); + TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) { + if (vdev->opmode != wlan_op_mode_ap) + continue; + + TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) { + if (!peer->authorize) { + /** + * Inactivity check only interested in + * connected node + */ + continue; + } + if (peer->peer_bs_inact > soc->pdev_bs_inact_reload) { + /** + * This check ensures we do not wait extra long + * due to the potential race condition + */ + peer->peer_bs_inact = soc->pdev_bs_inact_reload; + } + if (peer->peer_bs_inact > 0) { + /* Do not let it wrap around */ + peer->peer_bs_inact--; + } + if (peer->peer_bs_inact == 0) + dp_mark_peer_inact(peer, true); + } + } + qdf_spin_unlock_bh(&pdev->vdev_list_lock); + } + + qdf_spin_unlock(&soc->peer_ref_mutex); + qdf_timer_mod(&soc->pdev_bs_inact_timer, + soc->pdev_bs_inact_interval * 1000); +} + + +/** + * dp_free_inact_timer(): free inact timer + * @timer - inact timer handle + * + * Return: bool + */ +void dp_free_inact_timer(struct dp_soc *soc) +{ + qdf_timer_free(&soc->pdev_bs_inact_timer); +} +#else + +void dp_mark_peer_inact(void *peer, bool inactive) +{ + return; +} + +void dp_free_inact_timer(struct dp_soc *soc) +{ + return; +} + +#endif + +#ifdef IPA_OFFLOAD +/** + * dp_setup_ipa_rx_refill_buf_ring - Setup second Rx refill buffer ring + * @soc: data path instance + * @pdev: core txrx pdev context + * + * Return: QDF_STATUS_SUCCESS: success + * QDF_STATUS_E_RESOURCES: Error return + */ +static int dp_setup_ipa_rx_refill_buf_ring(struct dp_soc *soc, + struct dp_pdev *pdev) +{ + /* Setup second Rx refill buffer ring */ + if (dp_srng_setup(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF, + IPA_RX_REFILL_BUF_RING_IDX, + pdev->pdev_id, RXDMA_REFILL_RING_SIZE)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("dp_srng_setup failed second rx refill ring")); + return QDF_STATUS_E_FAILURE; + } + return QDF_STATUS_SUCCESS; +} + +/** + * dp_cleanup_ipa_rx_refill_buf_ring - Cleanup second Rx refill buffer ring + * @soc: data path instance + * @pdev: core txrx pdev context + * + * Return: void + */ +static void dp_cleanup_ipa_rx_refill_buf_ring(struct dp_soc *soc, + struct dp_pdev *pdev) +{ + dp_srng_cleanup(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF, + IPA_RX_REFILL_BUF_RING_IDX); +} + +#else + +static int dp_setup_ipa_rx_refill_buf_ring(struct dp_soc *soc, + struct dp_pdev *pdev) +{ + return QDF_STATUS_SUCCESS; +} + +static void dp_cleanup_ipa_rx_refill_buf_ring(struct dp_soc *soc, + struct dp_pdev *pdev) +{ +} + +#endif + +/* +* dp_pdev_attach_wifi3() - attach txrx pdev +* @ctrl_pdev: Opaque PDEV object +* @txrx_soc: Datapath SOC handle +* @htc_handle: HTC handle for host-target interface +* @qdf_osdev: QDF OS device +* @pdev_id: PDEV ID +* +* Return: DP PDEV handle on success, NULL on failure +*/ +static struct cdp_pdev *dp_pdev_attach_wifi3(struct cdp_soc_t *txrx_soc, + struct cdp_cfg *ctrl_pdev, + HTC_HANDLE htc_handle, qdf_device_t qdf_osdev, uint8_t pdev_id) +{ + int tx_ring_size; + int tx_comp_ring_size; + + struct dp_soc *soc = (struct dp_soc *)txrx_soc; + struct dp_pdev *pdev = qdf_mem_malloc(sizeof(*pdev)); + int mac_id; + + if (!pdev) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("DP PDEV memory allocation failed")); + goto fail0; + } + + pdev->wlan_cfg_ctx = wlan_cfg_pdev_attach(); + + if (!pdev->wlan_cfg_ctx) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("pdev cfg_attach failed")); + + qdf_mem_free(pdev); + goto fail0; + } + + /* + * set nss pdev config based on soc config + */ + wlan_cfg_set_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx, + (wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx) & (1 << pdev_id))); + + pdev->soc = soc; + pdev->osif_pdev = ctrl_pdev; + pdev->pdev_id = pdev_id; + soc->pdev_list[pdev_id] = pdev; + soc->pdev_count++; + + TAILQ_INIT(&pdev->vdev_list); + qdf_spinlock_create(&pdev->vdev_list_lock); + pdev->vdev_count = 0; + + qdf_spinlock_create(&pdev->tx_mutex); + qdf_spinlock_create(&pdev->neighbour_peer_mutex); + TAILQ_INIT(&pdev->neighbour_peers_list); + + if (dp_soc_cmn_setup(soc)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("dp_soc_cmn_setup failed")); + goto fail1; + } + + /* Setup per PDEV TCL rings if configured */ + if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) { + tx_ring_size = + wlan_cfg_tx_ring_size(soc->wlan_cfg_ctx); + tx_comp_ring_size = + wlan_cfg_tx_comp_ring_size(soc->wlan_cfg_ctx); + + if (dp_srng_setup(soc, &soc->tcl_data_ring[pdev_id], TCL_DATA, + pdev_id, pdev_id, tx_ring_size)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("dp_srng_setup failed for tcl_data_ring")); + goto fail1; + } + if (dp_srng_setup(soc, &soc->tx_comp_ring[pdev_id], + WBM2SW_RELEASE, pdev_id, pdev_id, tx_comp_ring_size)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("dp_srng_setup failed for tx_comp_ring")); + goto fail1; + } + soc->num_tcl_data_rings++; + } + + /* Tx specific init */ + if (dp_tx_pdev_attach(pdev)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("dp_tx_pdev_attach failed")); + goto fail1; + } + + /* Setup per PDEV REO rings if configured */ + if (wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) { + if (dp_srng_setup(soc, &soc->reo_dest_ring[pdev_id], REO_DST, + pdev_id, pdev_id, REO_DST_RING_SIZE)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("dp_srng_setup failed for reo_dest_ringn")); + goto fail1; + } + soc->num_reo_dest_rings++; + + } + if (dp_srng_setup(soc, &pdev->rx_refill_buf_ring, RXDMA_BUF, 0, pdev_id, + RXDMA_REFILL_RING_SIZE)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("dp_srng_setup failed rx refill ring")); + goto fail1; + } + + if (dp_rxdma_ring_setup(soc, pdev)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("RXDMA ring config failed")); + goto fail1; + } + + for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) { + int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id); + + if (dp_srng_setup(soc, &pdev->rxdma_mon_buf_ring[mac_id], + RXDMA_MONITOR_BUF, 0, mac_for_pdev, + RXDMA_MONITOR_BUF_RING_SIZE)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("dp_srng_setup failed for rxdma_mon_buf_ring")); + goto fail1; + } + + if (dp_srng_setup(soc, &pdev->rxdma_mon_dst_ring[mac_id], + RXDMA_MONITOR_DST, 0, mac_for_pdev, + RXDMA_MONITOR_DST_RING_SIZE)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("dp_srng_setup failed for rxdma_mon_dst_ring")); + goto fail1; + } + + + if (dp_srng_setup(soc, &pdev->rxdma_mon_status_ring[mac_id], + RXDMA_MONITOR_STATUS, 0, mac_for_pdev, + RXDMA_MONITOR_STATUS_RING_SIZE)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("dp_srng_setup failed for rxdma_mon_status_ring")); + goto fail1; + } + + if (dp_srng_setup(soc, &pdev->rxdma_mon_desc_ring[mac_id], + RXDMA_MONITOR_DESC, 0, mac_for_pdev, + RXDMA_MONITOR_DESC_RING_SIZE)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "dp_srng_setup failed for rxdma_mon_desc_ring\n"); + goto fail1; + } + } + + if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) { + if (dp_srng_setup(soc, &pdev->rxdma_err_dst_ring[0], RXDMA_DST, + 0, pdev_id, RXDMA_ERR_DST_RING_SIZE)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("dp_srng_setup failed for rxdma_err_dst_ring")); + goto fail1; + } + } + + if (dp_setup_ipa_rx_refill_buf_ring(soc, pdev)) + goto fail1; + + if (dp_ipa_ring_resource_setup(soc, pdev)) + goto fail1; + + if (dp_ipa_uc_attach(soc, pdev) != QDF_STATUS_SUCCESS) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("dp_ipa_uc_attach failed")); + goto fail1; + } + + /* Rx specific init */ + if (dp_rx_pdev_attach(pdev)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("dp_rx_pdev_attach failed")); + goto fail0; + } + DP_STATS_INIT(pdev); + + /* Monitor filter init */ + pdev->mon_filter_mode = MON_FILTER_ALL; + pdev->fp_mgmt_filter = FILTER_MGMT_ALL; + pdev->fp_ctrl_filter = FILTER_CTRL_ALL; + pdev->fp_data_filter = FILTER_DATA_ALL; + pdev->mo_mgmt_filter = FILTER_MGMT_ALL; + pdev->mo_ctrl_filter = FILTER_CTRL_ALL; + pdev->mo_data_filter = FILTER_DATA_ALL; + +#ifndef CONFIG_WIN + /* MCL */ + dp_local_peer_id_pool_init(pdev); +#endif + dp_dscp_tid_map_setup(pdev); + + /* Rx monitor mode specific init */ + if (dp_rx_pdev_mon_attach(pdev)) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "dp_rx_pdev_attach failed\n"); + goto fail1; + } + + if (dp_wdi_event_attach(pdev)) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "dp_wdi_evet_attach failed\n"); + goto fail1; + } + + /* set the reo destination during initialization */ + pdev->reo_dest = pdev->pdev_id + 1; + + /* + * initialize ppdu tlv list + */ + TAILQ_INIT(&pdev->ppdu_info_list); + pdev->tlv_count = 0; + pdev->list_depth = 0; + + return (struct cdp_pdev *)pdev; + +fail1: + dp_pdev_detach_wifi3((struct cdp_pdev *)pdev, 0); + +fail0: + return NULL; +} + +/* +* dp_rxdma_ring_cleanup() - configure the RX DMA rings +* @soc: data path SoC handle +* @pdev: Physical device handle +* +* Return: void +*/ +#ifdef QCA_HOST2FW_RXBUF_RING +static void dp_rxdma_ring_cleanup(struct dp_soc *soc, + struct dp_pdev *pdev) +{ + int max_mac_rings = + wlan_cfg_get_num_mac_rings(pdev->wlan_cfg_ctx); + int i; + + max_mac_rings = max_mac_rings < MAX_RX_MAC_RINGS ? + max_mac_rings : MAX_RX_MAC_RINGS; + for (i = 0; i < MAX_RX_MAC_RINGS; i++) + dp_srng_cleanup(soc, &pdev->rx_mac_buf_ring[i], + RXDMA_BUF, 1); + + qdf_timer_free(&soc->mon_reap_timer); +} +#else +static void dp_rxdma_ring_cleanup(struct dp_soc *soc, + struct dp_pdev *pdev) +{ +} +#endif + +/* + * dp_neighbour_peers_detach() - Detach neighbour peers(nac clients) + * @pdev: device object + * + * Return: void + */ +static void dp_neighbour_peers_detach(struct dp_pdev *pdev) +{ + struct dp_neighbour_peer *peer = NULL; + struct dp_neighbour_peer *temp_peer = NULL; + + TAILQ_FOREACH_SAFE(peer, &pdev->neighbour_peers_list, + neighbour_peer_list_elem, temp_peer) { + /* delete this peer from the list */ + TAILQ_REMOVE(&pdev->neighbour_peers_list, + peer, neighbour_peer_list_elem); + qdf_mem_free(peer); + } + + qdf_spinlock_destroy(&pdev->neighbour_peer_mutex); +} + +/** +* dp_htt_ppdu_stats_detach() - detach stats resources +* @pdev: Datapath PDEV handle +* +* Return: void +*/ +static void dp_htt_ppdu_stats_detach(struct dp_pdev *pdev) +{ + struct ppdu_info *ppdu_info, *ppdu_info_next; + + TAILQ_FOREACH_SAFE(ppdu_info, &pdev->ppdu_info_list, + ppdu_info_list_elem, ppdu_info_next) { + if (!ppdu_info) + break; + qdf_assert_always(ppdu_info->nbuf); + qdf_nbuf_free(ppdu_info->nbuf); + qdf_mem_free(ppdu_info); + } +} + +/* +* dp_pdev_detach_wifi3() - detach txrx pdev +* @txrx_pdev: Datapath PDEV handle +* @force: Force detach +* +*/ +static void dp_pdev_detach_wifi3(struct cdp_pdev *txrx_pdev, int force) +{ + struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev; + struct dp_soc *soc = pdev->soc; + qdf_nbuf_t curr_nbuf, next_nbuf; + int mac_id; + + dp_wdi_event_detach(pdev); + + dp_tx_pdev_detach(pdev); + + if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) { + dp_srng_cleanup(soc, &soc->tcl_data_ring[pdev->pdev_id], + TCL_DATA, pdev->pdev_id); + dp_srng_cleanup(soc, &soc->tx_comp_ring[pdev->pdev_id], + WBM2SW_RELEASE, pdev->pdev_id); + } + + dp_pktlogmod_exit(pdev); + + dp_rx_pdev_detach(pdev); + + dp_rx_pdev_mon_detach(pdev); + + dp_neighbour_peers_detach(pdev); + qdf_spinlock_destroy(&pdev->tx_mutex); + qdf_spinlock_destroy(&pdev->vdev_list_lock); + + dp_ipa_uc_detach(soc, pdev); + + dp_cleanup_ipa_rx_refill_buf_ring(soc, pdev); + + /* Cleanup per PDEV REO rings if configured */ + if (wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) { + dp_srng_cleanup(soc, &soc->reo_dest_ring[pdev->pdev_id], + REO_DST, pdev->pdev_id); + } + + dp_srng_cleanup(soc, &pdev->rx_refill_buf_ring, RXDMA_BUF, 0); + + dp_rxdma_ring_cleanup(soc, pdev); + + for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) { + dp_srng_cleanup(soc, &pdev->rxdma_mon_buf_ring[mac_id], + RXDMA_MONITOR_BUF, 0); + + dp_srng_cleanup(soc, &pdev->rxdma_mon_dst_ring[mac_id], + RXDMA_MONITOR_DST, 0); + + dp_srng_cleanup(soc, &pdev->rxdma_mon_status_ring[mac_id], + RXDMA_MONITOR_STATUS, 0); + + dp_srng_cleanup(soc, &pdev->rxdma_mon_desc_ring[mac_id], + RXDMA_MONITOR_DESC, 0); + + dp_srng_cleanup(soc, &pdev->rxdma_err_dst_ring[mac_id], + RXDMA_DST, 0); + } + + curr_nbuf = pdev->invalid_peer_head_msdu; + while (curr_nbuf) { + next_nbuf = qdf_nbuf_next(curr_nbuf); + qdf_nbuf_free(curr_nbuf); + curr_nbuf = next_nbuf; + } + + dp_htt_ppdu_stats_detach(pdev); + + soc->pdev_list[pdev->pdev_id] = NULL; + soc->pdev_count--; + wlan_cfg_pdev_detach(pdev->wlan_cfg_ctx); + qdf_mem_free(pdev->dp_txrx_handle); + qdf_mem_free(pdev); +} + +/* + * dp_reo_desc_freelist_destroy() - Flush REO descriptors from deferred freelist + * @soc: DP SOC handle + */ +static inline void dp_reo_desc_freelist_destroy(struct dp_soc *soc) +{ + struct reo_desc_list_node *desc; + struct dp_rx_tid *rx_tid; + + qdf_spin_lock_bh(&soc->reo_desc_freelist_lock); + while (qdf_list_remove_front(&soc->reo_desc_freelist, + (qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) { + rx_tid = &desc->rx_tid; + qdf_mem_unmap_nbytes_single(soc->osdev, + rx_tid->hw_qdesc_paddr, + QDF_DMA_BIDIRECTIONAL, + rx_tid->hw_qdesc_alloc_size); + qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned); + qdf_mem_free(desc); + } + qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock); + qdf_list_destroy(&soc->reo_desc_freelist); + qdf_spinlock_destroy(&soc->reo_desc_freelist_lock); +} + +/* + * dp_soc_detach_wifi3() - Detach txrx SOC + * @txrx_soc: DP SOC handle, struct cdp_soc_t is first element of struct dp_soc. + */ +static void dp_soc_detach_wifi3(void *txrx_soc) +{ + struct dp_soc *soc = (struct dp_soc *)txrx_soc; + int i; + + qdf_atomic_set(&soc->cmn_init_done, 0); + + qdf_flush_work(&soc->htt_stats.work); + qdf_disable_work(&soc->htt_stats.work); + + /* Free pending htt stats messages */ + qdf_nbuf_queue_free(&soc->htt_stats.msg); + + dp_free_inact_timer(soc); + + for (i = 0; i < MAX_PDEV_CNT; i++) { + if (soc->pdev_list[i]) + dp_pdev_detach_wifi3( + (struct cdp_pdev *)soc->pdev_list[i], 1); + } + + dp_peer_find_detach(soc); + + /* TBD: Call Tx and Rx cleanup functions to free buffers and + * SW descriptors + */ + + /* Free the ring memories */ + /* Common rings */ + dp_srng_cleanup(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0); + + dp_tx_soc_detach(soc); + /* Tx data rings */ + if (!wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) { + for (i = 0; i < soc->num_tcl_data_rings; i++) { + dp_srng_cleanup(soc, &soc->tcl_data_ring[i], + TCL_DATA, i); + dp_srng_cleanup(soc, &soc->tx_comp_ring[i], + WBM2SW_RELEASE, i); + } + } + + /* TCL command and status rings */ + dp_srng_cleanup(soc, &soc->tcl_cmd_ring, TCL_CMD, 0); + dp_srng_cleanup(soc, &soc->tcl_status_ring, TCL_STATUS, 0); + + /* Rx data rings */ + if (!wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) { + soc->num_reo_dest_rings = + wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx); + for (i = 0; i < soc->num_reo_dest_rings; i++) { + /* TODO: Get number of rings and ring sizes + * from wlan_cfg + */ + dp_srng_cleanup(soc, &soc->reo_dest_ring[i], + REO_DST, i); + } + } + /* REO reinjection ring */ + dp_srng_cleanup(soc, &soc->reo_reinject_ring, REO_REINJECT, 0); + + /* Rx release ring */ + dp_srng_cleanup(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 0); + + /* Rx exception ring */ + /* TODO: Better to store ring_type and ring_num in + * dp_srng during setup + */ + dp_srng_cleanup(soc, &soc->reo_exception_ring, REO_EXCEPTION, 0); + + /* REO command and status rings */ + dp_srng_cleanup(soc, &soc->reo_cmd_ring, REO_CMD, 0); + dp_srng_cleanup(soc, &soc->reo_status_ring, REO_STATUS, 0); + dp_hw_link_desc_pool_cleanup(soc); + + qdf_spinlock_destroy(&soc->peer_ref_mutex); + qdf_spinlock_destroy(&soc->htt_stats.lock); + + htt_soc_detach(soc->htt_handle); + + qdf_spinlock_destroy(&soc->rx.defrag.defrag_lock); + + dp_reo_cmdlist_destroy(soc); + qdf_spinlock_destroy(&soc->rx.reo_cmd_lock); + dp_reo_desc_freelist_destroy(soc); + + wlan_cfg_soc_detach(soc->wlan_cfg_ctx); + + dp_soc_wds_detach(soc); + qdf_spinlock_destroy(&soc->ast_lock); + + qdf_mem_free(soc); +} + +/* + * dp_rxdma_ring_config() - configure the RX DMA rings + * + * This function is used to configure the MAC rings. + * On MCL host provides buffers in Host2FW ring + * FW refills (copies) buffers to the ring and updates + * ring_idx in register + * + * @soc: data path SoC handle + * + * Return: void + */ +#ifdef QCA_HOST2FW_RXBUF_RING +static void dp_rxdma_ring_config(struct dp_soc *soc) +{ + int i; + + for (i = 0; i < MAX_PDEV_CNT; i++) { + struct dp_pdev *pdev = soc->pdev_list[i]; + + if (pdev) { + int mac_id; + bool dbs_enable = 0; + int max_mac_rings = + wlan_cfg_get_num_mac_rings + (pdev->wlan_cfg_ctx); + + htt_srng_setup(soc->htt_handle, 0, + pdev->rx_refill_buf_ring.hal_srng, + RXDMA_BUF); + + if (pdev->rx_refill_buf_ring2.hal_srng) + htt_srng_setup(soc->htt_handle, 0, + pdev->rx_refill_buf_ring2.hal_srng, + RXDMA_BUF); + + if (soc->cdp_soc.ol_ops-> + is_hw_dbs_2x2_capable) { + dbs_enable = soc->cdp_soc.ol_ops-> + is_hw_dbs_2x2_capable(soc->ctrl_psoc); + } + + if (dbs_enable) { + QDF_TRACE(QDF_MODULE_ID_TXRX, + QDF_TRACE_LEVEL_ERROR, + FL("DBS enabled max_mac_rings %d\n"), + max_mac_rings); + } else { + max_mac_rings = 1; + QDF_TRACE(QDF_MODULE_ID_TXRX, + QDF_TRACE_LEVEL_ERROR, + FL("DBS disabled, max_mac_rings %d\n"), + max_mac_rings); + } + + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + FL("pdev_id %d max_mac_rings %d\n"), + pdev->pdev_id, max_mac_rings); + + for (mac_id = 0; mac_id < max_mac_rings; mac_id++) { + int mac_for_pdev = dp_get_mac_id_for_pdev( + mac_id, pdev->pdev_id); + + QDF_TRACE(QDF_MODULE_ID_TXRX, + QDF_TRACE_LEVEL_ERROR, + FL("mac_id %d\n"), mac_for_pdev); + htt_srng_setup(soc->htt_handle, mac_for_pdev, + pdev->rx_mac_buf_ring[mac_id] + .hal_srng, + RXDMA_BUF); + htt_srng_setup(soc->htt_handle, mac_for_pdev, + pdev->rxdma_err_dst_ring[mac_id] + .hal_srng, + RXDMA_DST); + + /* Configure monitor mode rings */ + htt_srng_setup(soc->htt_handle, mac_for_pdev, + pdev->rxdma_mon_buf_ring[mac_id].hal_srng, + RXDMA_MONITOR_BUF); + + htt_srng_setup(soc->htt_handle, mac_for_pdev, + pdev->rxdma_mon_dst_ring[mac_id].hal_srng, + RXDMA_MONITOR_DST); + + htt_srng_setup(soc->htt_handle, mac_for_pdev, + pdev->rxdma_mon_status_ring[mac_id].hal_srng, + RXDMA_MONITOR_STATUS); + + htt_srng_setup(soc->htt_handle, mac_for_pdev, + pdev->rxdma_mon_desc_ring[mac_id].hal_srng, + RXDMA_MONITOR_DESC); + } + } + } + + /* + * Timer to reap rxdma status rings. + * Needed until we enable ppdu end interrupts + */ + qdf_timer_init(soc->osdev, &soc->mon_reap_timer, + dp_service_mon_rings, (void *)soc, + QDF_TIMER_TYPE_WAKE_APPS); + soc->reap_timer_init = 1; +} +#else +/* This is only for WIN */ +static void dp_rxdma_ring_config(struct dp_soc *soc) +{ + int i; + int mac_id; + + for (i = 0; i < MAX_PDEV_CNT; i++) { + struct dp_pdev *pdev = soc->pdev_list[i]; + + if (pdev == NULL) + continue; + + for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) { + int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, i); + + htt_srng_setup(soc->htt_handle, mac_for_pdev, + pdev->rx_refill_buf_ring.hal_srng, RXDMA_BUF); + + htt_srng_setup(soc->htt_handle, mac_for_pdev, + pdev->rxdma_mon_buf_ring[mac_id].hal_srng, + RXDMA_MONITOR_BUF); + htt_srng_setup(soc->htt_handle, mac_for_pdev, + pdev->rxdma_mon_dst_ring[mac_id].hal_srng, + RXDMA_MONITOR_DST); + htt_srng_setup(soc->htt_handle, mac_for_pdev, + pdev->rxdma_mon_status_ring[mac_id].hal_srng, + RXDMA_MONITOR_STATUS); + htt_srng_setup(soc->htt_handle, mac_for_pdev, + pdev->rxdma_mon_desc_ring[mac_id].hal_srng, + RXDMA_MONITOR_DESC); + htt_srng_setup(soc->htt_handle, mac_for_pdev, + pdev->rxdma_err_dst_ring[mac_id].hal_srng, + RXDMA_DST); + } + } +} +#endif + +/* + * dp_soc_attach_target_wifi3() - SOC initialization in the target + * @txrx_soc: Datapath SOC handle + */ +static int dp_soc_attach_target_wifi3(struct cdp_soc_t *cdp_soc) +{ + struct dp_soc *soc = (struct dp_soc *)cdp_soc; + + htt_soc_attach_target(soc->htt_handle); + + dp_rxdma_ring_config(soc); + + DP_STATS_INIT(soc); + + /* initialize work queue for stats processing */ + qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc); + + return 0; +} + +/* + * dp_soc_get_nss_cfg_wifi3() - SOC get nss config + * @txrx_soc: Datapath SOC handle + */ +static int dp_soc_get_nss_cfg_wifi3(struct cdp_soc_t *cdp_soc) +{ + struct dp_soc *dsoc = (struct dp_soc *)cdp_soc; + return wlan_cfg_get_dp_soc_nss_cfg(dsoc->wlan_cfg_ctx); +} +/* + * dp_soc_set_nss_cfg_wifi3() - SOC set nss config + * @txrx_soc: Datapath SOC handle + * @nss_cfg: nss config + */ +static void dp_soc_set_nss_cfg_wifi3(struct cdp_soc_t *cdp_soc, int config) +{ + struct dp_soc *dsoc = (struct dp_soc *)cdp_soc; + struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx = dsoc->wlan_cfg_ctx; + + wlan_cfg_set_dp_soc_nss_cfg(wlan_cfg_ctx, config); + + /* + * TODO: masked out based on the per offloaded radio + */ + if (config == dp_nss_cfg_dbdc) { + wlan_cfg_set_num_tx_desc_pool(wlan_cfg_ctx, 0); + wlan_cfg_set_num_tx_ext_desc_pool(wlan_cfg_ctx, 0); + wlan_cfg_set_num_tx_desc(wlan_cfg_ctx, 0); + wlan_cfg_set_num_tx_ext_desc(wlan_cfg_ctx, 0); + } + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("nss-wifi<0> nss config is enabled")); +} +/* +* dp_vdev_attach_wifi3() - attach txrx vdev +* @txrx_pdev: Datapath PDEV handle +* @vdev_mac_addr: MAC address of the virtual interface +* @vdev_id: VDEV Id +* @wlan_op_mode: VDEV operating mode +* +* Return: DP VDEV handle on success, NULL on failure +*/ +static struct cdp_vdev *dp_vdev_attach_wifi3(struct cdp_pdev *txrx_pdev, + uint8_t *vdev_mac_addr, uint8_t vdev_id, enum wlan_op_mode op_mode) +{ + struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev; + struct dp_soc *soc = pdev->soc; + struct dp_vdev *vdev = qdf_mem_malloc(sizeof(*vdev)); + + if (!vdev) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("DP VDEV memory allocation failed")); + goto fail0; + } + + vdev->pdev = pdev; + vdev->vdev_id = vdev_id; + vdev->opmode = op_mode; + vdev->osdev = soc->osdev; + + vdev->osif_rx = NULL; + vdev->osif_rsim_rx_decap = NULL; + vdev->osif_get_key = NULL; + vdev->osif_rx_mon = NULL; + vdev->osif_tx_free_ext = NULL; + vdev->osif_vdev = NULL; + + vdev->delete.pending = 0; + vdev->safemode = 0; + vdev->drop_unenc = 1; + vdev->sec_type = cdp_sec_type_none; +#ifdef notyet + vdev->filters_num = 0; +#endif + + qdf_mem_copy( + &vdev->mac_addr.raw[0], vdev_mac_addr, OL_TXRX_MAC_ADDR_LEN); + + vdev->tx_encap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx); + vdev->rx_decap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx); + vdev->dscp_tid_map_id = 0; + vdev->mcast_enhancement_en = 0; + + /* TODO: Initialize default HTT meta data that will be used in + * TCL descriptors for packets transmitted from this VDEV + */ + + TAILQ_INIT(&vdev->peer_list); + + qdf_spin_lock_bh(&pdev->vdev_list_lock); + /* add this vdev into the pdev's list */ + TAILQ_INSERT_TAIL(&pdev->vdev_list, vdev, vdev_list_elem); + qdf_spin_unlock_bh(&pdev->vdev_list_lock); + pdev->vdev_count++; + + dp_tx_vdev_attach(vdev); + + + if ((soc->intr_mode == DP_INTR_POLL) && + wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx) != 0) { + if (pdev->vdev_count == 1) + qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS); + } + + dp_lro_hash_setup(soc); + + /* LRO */ + if (wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) && + wlan_op_mode_sta == vdev->opmode) + vdev->lro_enable = true; + + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "LRO: vdev_id %d lro_enable %d", vdev_id, vdev->lro_enable); + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "Created vdev %pK (%pM)", vdev, vdev->mac_addr.raw); + DP_STATS_INIT(vdev); + + if (wlan_op_mode_sta == vdev->opmode) + dp_peer_create_wifi3((struct cdp_vdev *)vdev, + vdev->mac_addr.raw); + + return (struct cdp_vdev *)vdev; + +fail0: + return NULL; +} + +/** + * dp_vdev_register_wifi3() - Register VDEV operations from osif layer + * @vdev: Datapath VDEV handle + * @osif_vdev: OSIF vdev handle + * @txrx_ops: Tx and Rx operations + * + * Return: DP VDEV handle on success, NULL on failure + */ +static void dp_vdev_register_wifi3(struct cdp_vdev *vdev_handle, + void *osif_vdev, + struct ol_txrx_ops *txrx_ops) +{ + struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle; + vdev->osif_vdev = osif_vdev; + vdev->osif_rx = txrx_ops->rx.rx; + vdev->osif_rsim_rx_decap = txrx_ops->rx.rsim_rx_decap; + vdev->osif_get_key = txrx_ops->get_key; + vdev->osif_rx_mon = txrx_ops->rx.mon; + vdev->osif_tx_free_ext = txrx_ops->tx.tx_free_ext; +#ifdef notyet +#if ATH_SUPPORT_WAPI + vdev->osif_check_wai = txrx_ops->rx.wai_check; +#endif +#endif +#ifdef UMAC_SUPPORT_PROXY_ARP + vdev->osif_proxy_arp = txrx_ops->proxy_arp; +#endif + vdev->me_convert = txrx_ops->me_convert; + + /* TODO: Enable the following once Tx code is integrated */ + if (vdev->mesh_vdev) + txrx_ops->tx.tx = dp_tx_send_mesh; + else + txrx_ops->tx.tx = dp_tx_send; + + txrx_ops->tx.tx_exception = dp_tx_send_exception; + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW, + "DP Vdev Register success"); +} + +/** + * dp_vdev_flush_peers() - Forcibily Flush peers of vdev + * @vdev: Datapath VDEV handle + * + * Return: void + */ +static void dp_vdev_flush_peers(struct dp_vdev *vdev) +{ + struct dp_pdev *pdev = vdev->pdev; + struct dp_soc *soc = pdev->soc; + struct dp_peer *peer; + uint16_t *peer_ids; + uint8_t i = 0, j = 0; + + peer_ids = qdf_mem_malloc(soc->max_peers * sizeof(peer_ids[0])); + if (!peer_ids) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "DP alloc failure - unable to flush peers"); + return; + } + + qdf_spin_lock_bh(&soc->peer_ref_mutex); + TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) { + for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++) + if (peer->peer_ids[i] != HTT_INVALID_PEER) + if (j < soc->max_peers) + peer_ids[j++] = peer->peer_ids[i]; + } + qdf_spin_unlock_bh(&soc->peer_ref_mutex); + + for (i = 0; i < j ; i++) + dp_rx_peer_unmap_handler(soc, peer_ids[i]); + + qdf_mem_free(peer_ids); + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH, + FL("Flushed peers for vdev object %pK "), vdev); +} + +/* + * dp_vdev_detach_wifi3() - Detach txrx vdev + * @txrx_vdev: Datapath VDEV handle + * @callback: Callback OL_IF on completion of detach + * @cb_context: Callback context + * + */ +static void dp_vdev_detach_wifi3(struct cdp_vdev *vdev_handle, + ol_txrx_vdev_delete_cb callback, void *cb_context) +{ + struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle; + struct dp_pdev *pdev = vdev->pdev; + struct dp_soc *soc = pdev->soc; + + /* preconditions */ + qdf_assert(vdev); + + qdf_spin_lock_bh(&pdev->vdev_list_lock); + /* remove the vdev from its parent pdev's list */ + TAILQ_REMOVE(&pdev->vdev_list, vdev, vdev_list_elem); + qdf_spin_unlock_bh(&pdev->vdev_list_lock); + + if (wlan_op_mode_sta == vdev->opmode) + dp_peer_delete_wifi3(vdev->vap_bss_peer, 0); + + /* + * If Target is hung, flush all peers before detaching vdev + * this will free all references held due to missing + * unmap commands from Target + */ + if (hif_get_target_status(soc->hif_handle) == TARGET_STATUS_RESET) + dp_vdev_flush_peers(vdev); + + /* + * Use peer_ref_mutex while accessing peer_list, in case + * a peer is in the process of being removed from the list. + */ + qdf_spin_lock_bh(&soc->peer_ref_mutex); + /* check that the vdev has no peers allocated */ + if (!TAILQ_EMPTY(&vdev->peer_list)) { + /* debug print - will be removed later */ + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_WARN, + FL("not deleting vdev object %pK (%pM)" + "until deletion finishes for all its peers"), + vdev, vdev->mac_addr.raw); + /* indicate that the vdev needs to be deleted */ + vdev->delete.pending = 1; + vdev->delete.callback = callback; + vdev->delete.context = cb_context; + qdf_spin_unlock_bh(&soc->peer_ref_mutex); + return; + } + qdf_spin_unlock_bh(&soc->peer_ref_mutex); + + dp_tx_vdev_detach(vdev); + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH, + FL("deleting vdev object %pK (%pM)"), vdev, vdev->mac_addr.raw); + + qdf_mem_free(vdev); + + if (callback) + callback(cb_context); +} + +/* + * dp_peer_create_wifi3() - attach txrx peer + * @txrx_vdev: Datapath VDEV handle + * @peer_mac_addr: Peer MAC address + * + * Return: DP peeer handle on success, NULL on failure + */ +static void *dp_peer_create_wifi3(struct cdp_vdev *vdev_handle, + uint8_t *peer_mac_addr) +{ + struct dp_peer *peer; + int i; + struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle; + struct dp_pdev *pdev; + struct dp_soc *soc; + + /* preconditions */ + qdf_assert(vdev); + qdf_assert(peer_mac_addr); + + pdev = vdev->pdev; + soc = pdev->soc; + + peer = dp_peer_find_hash_find(pdev->soc, peer_mac_addr, + 0, vdev->vdev_id); + + if (peer) { + peer->delete_in_progress = false; + + qdf_spin_lock_bh(&soc->ast_lock); + TAILQ_INIT(&peer->ast_entry_list); + qdf_spin_unlock_bh(&soc->ast_lock); + + /* + * on peer create, peer ref count decrements, sice new peer is not + * getting created earlier reference is reused, peer_unref_delete will + * take care of incrementing count + * */ + if (soc->cdp_soc.ol_ops->peer_unref_delete) { + soc->cdp_soc.ol_ops->peer_unref_delete(pdev->osif_pdev, + vdev->vdev_id, peer->mac_addr.raw); + } + + DP_STATS_INIT(peer); + return (void *)peer; + } + +#ifdef notyet + peer = (struct dp_peer *)qdf_mempool_alloc(soc->osdev, + soc->mempool_ol_ath_peer); +#else + peer = (struct dp_peer *)qdf_mem_malloc(sizeof(*peer)); +#endif + + if (!peer) + return NULL; /* failure */ + + qdf_mem_zero(peer, sizeof(struct dp_peer)); + + TAILQ_INIT(&peer->ast_entry_list); + + /* store provided params */ + peer->vdev = vdev; + + dp_peer_add_ast(soc, peer, peer_mac_addr, CDP_TXRX_AST_TYPE_STATIC, 0); + + qdf_spinlock_create(&peer->peer_info_lock); + + qdf_mem_copy( + &peer->mac_addr.raw[0], peer_mac_addr, OL_TXRX_MAC_ADDR_LEN); + + /* TODO: See of rx_opt_proc is really required */ + peer->rx_opt_proc = soc->rx_opt_proc; + + /* initialize the peer_id */ + for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++) + peer->peer_ids[i] = HTT_INVALID_PEER; + + qdf_spin_lock_bh(&soc->peer_ref_mutex); + + qdf_atomic_init(&peer->ref_cnt); + + /* keep one reference for attach */ + qdf_atomic_inc(&peer->ref_cnt); + + /* add this peer into the vdev's list */ + if (wlan_op_mode_sta == vdev->opmode) + TAILQ_INSERT_HEAD(&vdev->peer_list, peer, peer_list_elem); + else + TAILQ_INSERT_TAIL(&vdev->peer_list, peer, peer_list_elem); + + qdf_spin_unlock_bh(&soc->peer_ref_mutex); + + /* TODO: See if hash based search is required */ + dp_peer_find_hash_add(soc, peer); + + /* Initialize the peer state */ + peer->state = OL_TXRX_PEER_STATE_DISC; + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH, + "vdev %pK created peer %pK (%pM) ref_cnt: %d", + vdev, peer, peer->mac_addr.raw, + qdf_atomic_read(&peer->ref_cnt)); + /* + * For every peer MAp message search and set if bss_peer + */ + if (memcmp(peer->mac_addr.raw, vdev->mac_addr.raw, 6) == 0) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH, + "vdev bss_peer!!!!"); + peer->bss_peer = 1; + vdev->vap_bss_peer = peer; + } + + +#ifndef CONFIG_WIN + dp_local_peer_id_alloc(pdev, peer); +#endif + DP_STATS_INIT(peer); + return (void *)peer; +} + +/* + * dp_peer_setup_wifi3() - initialize the peer + * @vdev_hdl: virtual device object + * @peer: Peer object + * + * Return: void + */ +static void dp_peer_setup_wifi3(struct cdp_vdev *vdev_hdl, void *peer_hdl) +{ + struct dp_peer *peer = (struct dp_peer *)peer_hdl; + struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl; + struct dp_pdev *pdev; + struct dp_soc *soc; + bool hash_based = 0; + enum cdp_host_reo_dest_ring reo_dest; + + /* preconditions */ + qdf_assert(vdev); + qdf_assert(peer); + + pdev = vdev->pdev; + soc = pdev->soc; + + peer->last_assoc_rcvd = 0; + peer->last_disassoc_rcvd = 0; + peer->last_deauth_rcvd = 0; + + /* + * hash based steering is disabled for Radios which are offloaded + * to NSS + */ + if (!wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) + hash_based = wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx); + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, + FL("hash based steering for pdev: %d is %d\n"), + pdev->pdev_id, hash_based); + + /* + * Below line of code will ensure the proper reo_dest ring is chosen + * for cases where toeplitz hash cannot be generated (ex: non TCP/UDP) + */ + reo_dest = pdev->reo_dest; + + if (soc->cdp_soc.ol_ops->peer_set_default_routing) { + /* TODO: Check the destination ring number to be passed to FW */ + soc->cdp_soc.ol_ops->peer_set_default_routing( + pdev->osif_pdev, peer->mac_addr.raw, + peer->vdev->vdev_id, hash_based, reo_dest); + } + + dp_peer_rx_init(pdev, peer); + return; +} + +/* + * dp_set_vdev_tx_encap_type() - set the encap type of the vdev + * @vdev_handle: virtual device object + * @htt_pkt_type: type of pkt + * + * Return: void + */ +static void dp_set_vdev_tx_encap_type(struct cdp_vdev *vdev_handle, + enum htt_cmn_pkt_type val) +{ + struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle; + vdev->tx_encap_type = val; +} + +/* + * dp_set_vdev_rx_decap_type() - set the decap type of the vdev + * @vdev_handle: virtual device object + * @htt_pkt_type: type of pkt + * + * Return: void + */ +static void dp_set_vdev_rx_decap_type(struct cdp_vdev *vdev_handle, + enum htt_cmn_pkt_type val) +{ + struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle; + vdev->rx_decap_type = val; +} + +/* + * dp_set_pdev_reo_dest() - set the reo destination ring for this pdev + * @pdev_handle: physical device object + * @val: reo destination ring index (1 - 4) + * + * Return: void + */ +static void dp_set_pdev_reo_dest(struct cdp_pdev *pdev_handle, + enum cdp_host_reo_dest_ring val) +{ + struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle; + + if (pdev) + pdev->reo_dest = val; +} + +/* + * dp_get_pdev_reo_dest() - get the reo destination for this pdev + * @pdev_handle: physical device object + * + * Return: reo destination ring index + */ +static enum cdp_host_reo_dest_ring +dp_get_pdev_reo_dest(struct cdp_pdev *pdev_handle) +{ + struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle; + + if (pdev) + return pdev->reo_dest; + else + return cdp_host_reo_dest_ring_unknown; +} + +#ifdef QCA_SUPPORT_SON +static void dp_son_peer_authorize(struct dp_peer *peer) +{ + struct dp_soc *soc; + soc = peer->vdev->pdev->soc; + peer->peer_bs_inact_flag = 0; + peer->peer_bs_inact = soc->pdev_bs_inact_reload; + return; +} +#else +static void dp_son_peer_authorize(struct dp_peer *peer) +{ + return; +} +#endif +/* + * dp_set_filter_neighbour_peers() - set filter neighbour peers for smart mesh + * @pdev_handle: device object + * @val: value to be set + * + * Return: void + */ +static int dp_set_filter_neighbour_peers(struct cdp_pdev *pdev_handle, + uint32_t val) +{ + struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle; + + /* Enable/Disable smart mesh filtering. This flag will be checked + * during rx processing to check if packets are from NAC clients. + */ + pdev->filter_neighbour_peers = val; + return 0; +} + +/* + * dp_update_filter_neighbour_peers() - set neighbour peers(nac clients) + * address for smart mesh filtering + * @pdev_handle: device object + * @cmd: Add/Del command + * @macaddr: nac client mac address + * + * Return: void + */ +static int dp_update_filter_neighbour_peers(struct cdp_pdev *pdev_handle, + uint32_t cmd, uint8_t *macaddr) +{ + struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle; + struct dp_neighbour_peer *peer = NULL; + + if (!macaddr) + goto fail0; + + /* Store address of NAC (neighbour peer) which will be checked + * against TA of received packets. + */ + if (cmd == DP_NAC_PARAM_ADD) { + peer = (struct dp_neighbour_peer *) qdf_mem_malloc( + sizeof(*peer)); + + if (!peer) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("DP neighbour peer node memory allocation failed")); + goto fail0; + } + + qdf_mem_copy(&peer->neighbour_peers_macaddr.raw[0], + macaddr, DP_MAC_ADDR_LEN); + + + qdf_spin_lock_bh(&pdev->neighbour_peer_mutex); + /* add this neighbour peer into the list */ + TAILQ_INSERT_TAIL(&pdev->neighbour_peers_list, peer, + neighbour_peer_list_elem); + qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex); + + return 1; + + } else if (cmd == DP_NAC_PARAM_DEL) { + qdf_spin_lock_bh(&pdev->neighbour_peer_mutex); + TAILQ_FOREACH(peer, &pdev->neighbour_peers_list, + neighbour_peer_list_elem) { + if (!qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0], + macaddr, DP_MAC_ADDR_LEN)) { + /* delete this peer from the list */ + TAILQ_REMOVE(&pdev->neighbour_peers_list, + peer, neighbour_peer_list_elem); + qdf_mem_free(peer); + break; + } + } + qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex); + + return 1; + + } + +fail0: + return 0; +} + +/* + * dp_get_sec_type() - Get the security type + * @peer: Datapath peer handle + * @sec_idx: Security id (mcast, ucast) + * + * return sec_type: Security type + */ +static int dp_get_sec_type(struct cdp_peer *peer, uint8_t sec_idx) +{ + struct dp_peer *dpeer = (struct dp_peer *)peer; + + return dpeer->security[sec_idx].sec_type; +} + +/* + * dp_peer_authorize() - authorize txrx peer + * @peer_handle: Datapath peer handle + * @authorize + * + */ +static void dp_peer_authorize(struct cdp_peer *peer_handle, uint32_t authorize) +{ + struct dp_peer *peer = (struct dp_peer *)peer_handle; + struct dp_soc *soc; + + if (peer != NULL) { + soc = peer->vdev->pdev->soc; + qdf_spin_lock_bh(&soc->peer_ref_mutex); + dp_son_peer_authorize(peer); + peer->authorize = authorize ? 1 : 0; + qdf_spin_unlock_bh(&soc->peer_ref_mutex); + } +} + +#ifdef QCA_SUPPORT_SON +/* + * dp_txrx_update_inact_threshold() - Update inact timer threshold + * @pdev_handle: Device handle + * @new_threshold : updated threshold value + * + */ +static void +dp_txrx_update_inact_threshold(struct cdp_pdev *pdev_handle, + u_int16_t new_threshold) +{ + struct dp_vdev *vdev; + struct dp_peer *peer; + struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle; + struct dp_soc *soc = pdev->soc; + u_int16_t old_threshold = soc->pdev_bs_inact_reload; + + if (old_threshold == new_threshold) + return; + + soc->pdev_bs_inact_reload = new_threshold; + + qdf_spin_lock_bh(&soc->peer_ref_mutex); + qdf_spin_lock_bh(&pdev->vdev_list_lock); + TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) { + if (vdev->opmode != wlan_op_mode_ap) + continue; + + TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) { + if (!peer->authorize) + continue; + + if (old_threshold - peer->peer_bs_inact >= + new_threshold) { + dp_mark_peer_inact((void *)peer, true); + peer->peer_bs_inact = 0; + } else { + peer->peer_bs_inact = new_threshold - + (old_threshold - peer->peer_bs_inact); + } + } + } + qdf_spin_unlock_bh(&pdev->vdev_list_lock); + qdf_spin_unlock_bh(&soc->peer_ref_mutex); +} + +/** + * dp_txrx_reset_inact_count(): Reset inact count + * @pdev_handle - device handle + * + * Return: void + */ +static void +dp_txrx_reset_inact_count(struct cdp_pdev *pdev_handle) +{ + struct dp_vdev *vdev = NULL; + struct dp_peer *peer = NULL; + struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle; + struct dp_soc *soc = pdev->soc; + + qdf_spin_lock_bh(&soc->peer_ref_mutex); + qdf_spin_lock_bh(&pdev->vdev_list_lock); + TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) { + if (vdev->opmode != wlan_op_mode_ap) + continue; + + TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) { + if (!peer->authorize) + continue; + + peer->peer_bs_inact = soc->pdev_bs_inact_reload; + } + } + qdf_spin_unlock_bh(&pdev->vdev_list_lock); + qdf_spin_unlock_bh(&soc->peer_ref_mutex); +} + +/** + * dp_set_inact_params(): set inactivity params + * @pdev_handle - device handle + * @inact_check_interval - inactivity interval + * @inact_normal - Inactivity normal + * @inact_overload - Inactivity overload + * + * Return: bool + */ +bool dp_set_inact_params(struct cdp_pdev *pdev_handle, + u_int16_t inact_check_interval, + u_int16_t inact_normal, u_int16_t inact_overload) +{ + struct dp_soc *soc; + struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle; + + if (!pdev) + return false; + + soc = pdev->soc; + if (!soc) + return false; + + soc->pdev_bs_inact_interval = inact_check_interval; + soc->pdev_bs_inact_normal = inact_normal; + soc->pdev_bs_inact_overload = inact_overload; + + dp_txrx_update_inact_threshold((struct cdp_pdev *)pdev, + soc->pdev_bs_inact_normal); + + return true; +} + +/** + * dp_start_inact_timer(): Inactivity timer start + * @pdev_handle - device handle + * @enable - Inactivity timer start/stop + * + * Return: bool + */ +bool dp_start_inact_timer(struct cdp_pdev *pdev_handle, bool enable) +{ + struct dp_soc *soc; + struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle; + + if (!pdev) + return false; + + soc = pdev->soc; + if (!soc) + return false; + + if (enable) { + dp_txrx_reset_inact_count((struct cdp_pdev *)pdev); + qdf_timer_mod(&soc->pdev_bs_inact_timer, + soc->pdev_bs_inact_interval * 1000); + } else { + qdf_timer_stop(&soc->pdev_bs_inact_timer); + } + + return true; +} + +/** + * dp_set_overload(): Set inactivity overload + * @pdev_handle - device handle + * @overload - overload status + * + * Return: void + */ +void dp_set_overload(struct cdp_pdev *pdev_handle, bool overload) +{ + struct dp_soc *soc; + struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle; + + if (!pdev) + return; + + soc = pdev->soc; + if (!soc) + return; + + dp_txrx_update_inact_threshold((struct cdp_pdev *)pdev, + overload ? soc->pdev_bs_inact_overload : + soc->pdev_bs_inact_normal); +} + +/** + * dp_peer_is_inact(): check whether peer is inactive + * @peer_handle - datapath peer handle + * + * Return: bool + */ +bool dp_peer_is_inact(void *peer_handle) +{ + struct dp_peer *peer = (struct dp_peer *)peer_handle; + + if (!peer) + return false; + + return peer->peer_bs_inact_flag == 1; +} + +/** + * dp_init_inact_timer: initialize the inact timer + * @soc - SOC handle + * + * Return: void + */ +void dp_init_inact_timer(struct dp_soc *soc) +{ + qdf_timer_init(soc->osdev, &soc->pdev_bs_inact_timer, + dp_txrx_peer_find_inact_timeout_handler, + (void *)soc, QDF_TIMER_TYPE_WAKE_APPS); +} + +#else + +bool dp_set_inact_params(struct cdp_pdev *pdev, u_int16_t inact_check_interval, + u_int16_t inact_normal, u_int16_t inact_overload) +{ + return false; +} + +bool dp_start_inact_timer(struct cdp_pdev *pdev, bool enable) +{ + return false; +} + +void dp_set_overload(struct cdp_pdev *pdev, bool overload) +{ + return; +} + +void dp_init_inact_timer(struct dp_soc *soc) +{ + return; +} + +bool dp_peer_is_inact(void *peer) +{ + return false; +} +#endif + +/* + * dp_peer_unref_delete() - unref and delete peer + * @peer_handle: Datapath peer handle + * + */ +void dp_peer_unref_delete(void *peer_handle) +{ + struct dp_peer *peer = (struct dp_peer *)peer_handle; + struct dp_peer *bss_peer = NULL; + struct dp_vdev *vdev = peer->vdev; + struct dp_pdev *pdev = vdev->pdev; + struct dp_soc *soc = pdev->soc; + struct dp_peer *tmppeer; + int found = 0; + uint16_t peer_id; + uint16_t vdev_id; + + /* + * Hold the lock all the way from checking if the peer ref count + * is zero until the peer references are removed from the hash + * table and vdev list (if the peer ref count is zero). + * This protects against a new HL tx operation starting to use the + * peer object just after this function concludes it's done being used. + * Furthermore, the lock needs to be held while checking whether the + * vdev's list of peers is empty, to make sure that list is not modified + * concurrently with the empty check. + */ + qdf_spin_lock_bh(&soc->peer_ref_mutex); + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "%s: peer %pK ref_cnt(before decrement): %d\n", __func__, + peer, qdf_atomic_read(&peer->ref_cnt)); + if (qdf_atomic_dec_and_test(&peer->ref_cnt)) { + peer_id = peer->peer_ids[0]; + vdev_id = vdev->vdev_id; + + /* + * Make sure that the reference to the peer in + * peer object map is removed + */ + if (peer_id != HTT_INVALID_PEER) + soc->peer_id_to_obj_map[peer_id] = NULL; + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH, + "Deleting peer %pK (%pM)", peer, peer->mac_addr.raw); + + /* remove the reference to the peer from the hash table */ + dp_peer_find_hash_remove(soc, peer); + + TAILQ_FOREACH(tmppeer, &peer->vdev->peer_list, peer_list_elem) { + if (tmppeer == peer) { + found = 1; + break; + } + } + if (found) { + TAILQ_REMOVE(&peer->vdev->peer_list, peer, + peer_list_elem); + } else { + /*Ignoring the remove operation as peer not found*/ + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_WARN, + "peer %pK not found in vdev (%pK)->peer_list:%pK", + peer, vdev, &peer->vdev->peer_list); + } + + /* cleanup the peer data */ + dp_peer_cleanup(vdev, peer); + + /* check whether the parent vdev has no peers left */ + if (TAILQ_EMPTY(&vdev->peer_list)) { + /* + * Now that there are no references to the peer, we can + * release the peer reference lock. + */ + qdf_spin_unlock_bh(&soc->peer_ref_mutex); + /* + * Check if the parent vdev was waiting for its peers + * to be deleted, in order for it to be deleted too. + */ + if (vdev->delete.pending) { + ol_txrx_vdev_delete_cb vdev_delete_cb = + vdev->delete.callback; + void *vdev_delete_context = + vdev->delete.context; + + QDF_TRACE(QDF_MODULE_ID_DP, + QDF_TRACE_LEVEL_INFO_HIGH, + FL("deleting vdev object %pK (%pM)" + " - its last peer is done"), + vdev, vdev->mac_addr.raw); + /* all peers are gone, go ahead and delete it */ + dp_tx_flow_pool_unmap_handler(pdev, vdev_id, + FLOW_TYPE_VDEV, + vdev_id); + dp_tx_vdev_detach(vdev); + QDF_TRACE(QDF_MODULE_ID_DP, + QDF_TRACE_LEVEL_INFO_HIGH, + FL("deleting vdev object %pK (%pM)"), + vdev, vdev->mac_addr.raw); + + qdf_mem_free(vdev); + vdev = NULL; + if (vdev_delete_cb) + vdev_delete_cb(vdev_delete_context); + } + } else { + qdf_spin_unlock_bh(&soc->peer_ref_mutex); + } + + if (vdev) { + if (vdev->vap_bss_peer == peer) { + vdev->vap_bss_peer = NULL; + } + } + + if (soc->cdp_soc.ol_ops->peer_unref_delete) { + soc->cdp_soc.ol_ops->peer_unref_delete(pdev->osif_pdev, + vdev_id, peer->mac_addr.raw); + } + + if (!vdev || !vdev->vap_bss_peer) { + goto free_peer; + } + +#ifdef notyet + qdf_mempool_free(soc->osdev, soc->mempool_ol_ath_peer, peer); +#else + bss_peer = vdev->vap_bss_peer; + DP_UPDATE_STATS(bss_peer, peer); + +free_peer: + qdf_mem_free(peer); + +#endif + } else { + qdf_spin_unlock_bh(&soc->peer_ref_mutex); + } +} + +/* + * dp_peer_detach_wifi3() – Detach txrx peer + * @peer_handle: Datapath peer handle + * @bitmap: bitmap indicating special handling of request. + * + */ +static void dp_peer_delete_wifi3(void *peer_handle, uint32_t bitmap) +{ + struct dp_peer *peer = (struct dp_peer *)peer_handle; + + /* redirect the peer's rx delivery function to point to a + * discard func + */ + + peer->rx_opt_proc = dp_rx_discard; + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH, + FL("peer %pK (%pM)"), peer, peer->mac_addr.raw); + +#ifndef CONFIG_WIN + dp_local_peer_id_free(peer->vdev->pdev, peer); +#endif + qdf_spinlock_destroy(&peer->peer_info_lock); + + /* + * Remove the reference added during peer_attach. + * The peer will still be left allocated until the + * PEER_UNMAP message arrives to remove the other + * reference, added by the PEER_MAP message. + */ + dp_peer_unref_delete(peer_handle); +} + +/* + * dp_get_vdev_mac_addr_wifi3() – Detach txrx peer + * @peer_handle: Datapath peer handle + * + */ +static uint8 *dp_get_vdev_mac_addr_wifi3(struct cdp_vdev *pvdev) +{ + struct dp_vdev *vdev = (struct dp_vdev *)pvdev; + return vdev->mac_addr.raw; +} + +/* + * dp_vdev_set_wds() - Enable per packet stats + * @vdev_handle: DP VDEV handle + * @val: value + * + * Return: none + */ +static int dp_vdev_set_wds(void *vdev_handle, uint32_t val) +{ + struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle; + + vdev->wds_enabled = val; + return 0; +} + +/* + * dp_get_vdev_from_vdev_id_wifi3() – Detach txrx peer + * @peer_handle: Datapath peer handle + * + */ +static struct cdp_vdev *dp_get_vdev_from_vdev_id_wifi3(struct cdp_pdev *dev, + uint8_t vdev_id) +{ + struct dp_pdev *pdev = (struct dp_pdev *)dev; + struct dp_vdev *vdev = NULL; + + if (qdf_unlikely(!pdev)) + return NULL; + + qdf_spin_lock_bh(&pdev->vdev_list_lock); + TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) { + if (vdev->vdev_id == vdev_id) + break; + } + qdf_spin_unlock_bh(&pdev->vdev_list_lock); + + return (struct cdp_vdev *)vdev; +} + +static int dp_get_opmode(struct cdp_vdev *vdev_handle) +{ + struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle; + + return vdev->opmode; +} + +static struct cdp_cfg *dp_get_ctrl_pdev_from_vdev_wifi3(struct cdp_vdev *pvdev) +{ + struct dp_vdev *vdev = (struct dp_vdev *)pvdev; + struct dp_pdev *pdev = vdev->pdev; + + return (struct cdp_cfg *)pdev->wlan_cfg_ctx; +} + +/** + * dp_reset_monitor_mode() - Disable monitor mode + * @pdev_handle: Datapath PDEV handle + * + * Return: 0 on success, not 0 on failure + */ +static int dp_reset_monitor_mode(struct cdp_pdev *pdev_handle) +{ + struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle; + struct htt_rx_ring_tlv_filter htt_tlv_filter; + struct dp_soc *soc = pdev->soc; + uint8_t pdev_id; + int mac_id; + + pdev_id = pdev->pdev_id; + soc = pdev->soc; + + qdf_spin_lock_bh(&pdev->mon_lock); + + qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0); + + for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) { + int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id); + + htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev, + pdev->rxdma_mon_buf_ring[mac_id].hal_srng, + RXDMA_MONITOR_BUF, RX_BUFFER_SIZE, &htt_tlv_filter); + + htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev, + pdev->rxdma_mon_status_ring[mac_id].hal_srng, + RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter); + } + + pdev->monitor_vdev = NULL; + + qdf_spin_unlock_bh(&pdev->mon_lock); + + return 0; +} + +/** + * dp_set_nac() - set peer_nac + * @peer_handle: Datapath PEER handle + * + * Return: void + */ +static void dp_set_nac(struct cdp_peer *peer_handle) +{ + struct dp_peer *peer = (struct dp_peer *)peer_handle; + + peer->nac = 1; +} + +/** + * dp_get_tx_pending() - read pending tx + * @pdev_handle: Datapath PDEV handle + * + * Return: outstanding tx + */ +static int dp_get_tx_pending(struct cdp_pdev *pdev_handle) +{ + struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle; + + return qdf_atomic_read(&pdev->num_tx_outstanding); +} + +/** + * dp_get_peer_mac_from_peer_id() - get peer mac + * @pdev_handle: Datapath PDEV handle + * @peer_id: Peer ID + * @peer_mac: MAC addr of PEER + * + * Return: void + */ +static void dp_get_peer_mac_from_peer_id(struct cdp_pdev *pdev_handle, + uint32_t peer_id, uint8_t *peer_mac) +{ + struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle; + struct dp_peer *peer; + + if (pdev && peer_mac) { + peer = dp_peer_find_by_id(pdev->soc, (uint16_t)peer_id); + if (peer && peer->mac_addr.raw) { + qdf_mem_copy(peer_mac, peer->mac_addr.raw, + DP_MAC_ADDR_LEN); + } + } +} + +/** + * dp_vdev_set_monitor_mode() - Set DP VDEV to monitor mode + * @vdev_handle: Datapath VDEV handle + * @smart_monitor: Flag to denote if its smart monitor mode + * + * Return: 0 on success, not 0 on failure + */ +static int dp_vdev_set_monitor_mode(struct cdp_vdev *vdev_handle, + uint8_t smart_monitor) +{ + /* Many monitor VAPs can exists in a system but only one can be up at + * anytime + */ + struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle; + struct dp_pdev *pdev; + struct htt_rx_ring_tlv_filter htt_tlv_filter; + struct dp_soc *soc; + uint8_t pdev_id; + int mac_id; + + qdf_assert(vdev); + + pdev = vdev->pdev; + pdev_id = pdev->pdev_id; + soc = pdev->soc; + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN, + "pdev=%pK, pdev_id=%d, soc=%pK vdev=%pK\n", + pdev, pdev_id, soc, vdev); + + /*Check if current pdev's monitor_vdev exists */ + if (pdev->monitor_vdev) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "vdev=%pK\n", vdev); + qdf_assert(vdev); + } + + pdev->monitor_vdev = vdev; + + /* If smart monitor mode, do not configure monitor ring */ + if (smart_monitor) + return QDF_STATUS_SUCCESS; + + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH, + "MODE[%x] FP[%02x|%02x|%02x] MO[%02x|%02x|%02x]\n", + pdev->mon_filter_mode, pdev->fp_mgmt_filter, + pdev->fp_ctrl_filter, pdev->fp_data_filter, + pdev->mo_mgmt_filter, pdev->mo_ctrl_filter, + pdev->mo_data_filter); + + qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0); + + htt_tlv_filter.mpdu_start = 1; + htt_tlv_filter.msdu_start = 1; + htt_tlv_filter.packet = 1; + htt_tlv_filter.msdu_end = 1; + htt_tlv_filter.mpdu_end = 1; + htt_tlv_filter.packet_header = 1; + htt_tlv_filter.attention = 1; + htt_tlv_filter.ppdu_start = 0; + htt_tlv_filter.ppdu_end = 0; + htt_tlv_filter.ppdu_end_user_stats = 0; + htt_tlv_filter.ppdu_end_user_stats_ext = 0; + htt_tlv_filter.ppdu_end_status_done = 0; + htt_tlv_filter.header_per_msdu = 1; + htt_tlv_filter.enable_fp = + (pdev->mon_filter_mode & MON_FILTER_PASS) ? 1 : 0; + htt_tlv_filter.enable_md = 0; + htt_tlv_filter.enable_mo = + (pdev->mon_filter_mode & MON_FILTER_OTHER) ? 1 : 0; + htt_tlv_filter.fp_mgmt_filter = pdev->fp_mgmt_filter; + htt_tlv_filter.fp_ctrl_filter = pdev->fp_ctrl_filter; + htt_tlv_filter.fp_data_filter = pdev->fp_data_filter; + htt_tlv_filter.mo_mgmt_filter = pdev->mo_mgmt_filter; + htt_tlv_filter.mo_ctrl_filter = pdev->mo_ctrl_filter; + htt_tlv_filter.mo_data_filter = pdev->mo_data_filter; + + for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) { + int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id); + + htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev, + pdev->rxdma_mon_buf_ring[mac_id].hal_srng, + RXDMA_MONITOR_BUF, RX_BUFFER_SIZE, &htt_tlv_filter); + } + + qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0); + + htt_tlv_filter.mpdu_start = 1; + htt_tlv_filter.msdu_start = 0; + htt_tlv_filter.packet = 0; + htt_tlv_filter.msdu_end = 0; + htt_tlv_filter.mpdu_end = 0; + htt_tlv_filter.attention = 0; + htt_tlv_filter.ppdu_start = 1; + htt_tlv_filter.ppdu_end = 1; + htt_tlv_filter.ppdu_end_user_stats = 1; + htt_tlv_filter.ppdu_end_user_stats_ext = 1; + htt_tlv_filter.ppdu_end_status_done = 1; + htt_tlv_filter.enable_fp = 1; + htt_tlv_filter.enable_md = 0; + htt_tlv_filter.enable_mo = 1; + if (pdev->mcopy_mode) { + htt_tlv_filter.packet_header = 1; + } + htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL; + htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL; + htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL; + htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL; + htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL; + htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL; + + for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) { + int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, + pdev->pdev_id); + + htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev, + pdev->rxdma_mon_status_ring[mac_id].hal_srng, + RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter); + } + + return QDF_STATUS_SUCCESS; +} + +/** + * dp_pdev_set_advance_monitor_filter() - Set DP PDEV monitor filter + * @pdev_handle: Datapath PDEV handle + * @filter_val: Flag to select Filter for monitor mode + * Return: 0 on success, not 0 on failure + */ +static int dp_pdev_set_advance_monitor_filter(struct cdp_pdev *pdev_handle, + struct cdp_monitor_filter *filter_val) +{ + /* Many monitor VAPs can exists in a system but only one can be up at + * anytime + */ + struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle; + struct dp_vdev *vdev = pdev->monitor_vdev; + struct htt_rx_ring_tlv_filter htt_tlv_filter; + struct dp_soc *soc; + uint8_t pdev_id; + int mac_id; + + pdev_id = pdev->pdev_id; + soc = pdev->soc; + + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN, + "pdev=%pK, pdev_id=%d, soc=%pK vdev=%pK\n", + pdev, pdev_id, soc, vdev); + + /*Check if current pdev's monitor_vdev exists */ + if (!pdev->monitor_vdev) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "vdev=%pK\n", vdev); + qdf_assert(vdev); + } + + /* update filter mode, type in pdev structure */ + pdev->mon_filter_mode = filter_val->mode; + pdev->fp_mgmt_filter = filter_val->fp_mgmt; + pdev->fp_ctrl_filter = filter_val->fp_ctrl; + pdev->fp_data_filter = filter_val->fp_data; + pdev->mo_mgmt_filter = filter_val->mo_mgmt; + pdev->mo_ctrl_filter = filter_val->mo_ctrl; + pdev->mo_data_filter = filter_val->mo_data; + + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH, + "MODE[%x] FP[%02x|%02x|%02x] MO[%02x|%02x|%02x]\n", + pdev->mon_filter_mode, pdev->fp_mgmt_filter, + pdev->fp_ctrl_filter, pdev->fp_data_filter, + pdev->mo_mgmt_filter, pdev->mo_ctrl_filter, + pdev->mo_data_filter); + + qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0); + + for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) { + int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id); + + htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev, + pdev->rxdma_mon_buf_ring[mac_id].hal_srng, + RXDMA_MONITOR_BUF, RX_BUFFER_SIZE, &htt_tlv_filter); + + htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev, + pdev->rxdma_mon_status_ring[mac_id].hal_srng, + RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter); + } + + htt_tlv_filter.mpdu_start = 1; + htt_tlv_filter.msdu_start = 1; + htt_tlv_filter.packet = 1; + htt_tlv_filter.msdu_end = 1; + htt_tlv_filter.mpdu_end = 1; + htt_tlv_filter.packet_header = 1; + htt_tlv_filter.attention = 1; + htt_tlv_filter.ppdu_start = 0; + htt_tlv_filter.ppdu_end = 0; + htt_tlv_filter.ppdu_end_user_stats = 0; + htt_tlv_filter.ppdu_end_user_stats_ext = 0; + htt_tlv_filter.ppdu_end_status_done = 0; + htt_tlv_filter.header_per_msdu = 1; + htt_tlv_filter.enable_fp = + (pdev->mon_filter_mode & MON_FILTER_PASS) ? 1 : 0; + htt_tlv_filter.enable_md = 0; + htt_tlv_filter.enable_mo = + (pdev->mon_filter_mode & MON_FILTER_OTHER) ? 1 : 0; + htt_tlv_filter.fp_mgmt_filter = pdev->fp_mgmt_filter; + htt_tlv_filter.fp_ctrl_filter = pdev->fp_ctrl_filter; + htt_tlv_filter.fp_data_filter = pdev->fp_data_filter; + htt_tlv_filter.mo_mgmt_filter = pdev->mo_mgmt_filter; + htt_tlv_filter.mo_ctrl_filter = pdev->mo_ctrl_filter; + htt_tlv_filter.mo_data_filter = pdev->mo_data_filter; + + for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) { + int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id); + + htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev, + pdev->rxdma_mon_buf_ring[mac_id].hal_srng, + RXDMA_MONITOR_BUF, RX_BUFFER_SIZE, &htt_tlv_filter); + } + + qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0); + + htt_tlv_filter.mpdu_start = 1; + htt_tlv_filter.msdu_start = 0; + htt_tlv_filter.packet = 0; + htt_tlv_filter.msdu_end = 0; + htt_tlv_filter.mpdu_end = 0; + htt_tlv_filter.attention = 0; + htt_tlv_filter.ppdu_start = 1; + htt_tlv_filter.ppdu_end = 1; + htt_tlv_filter.ppdu_end_user_stats = 1; + htt_tlv_filter.ppdu_end_user_stats_ext = 1; + htt_tlv_filter.ppdu_end_status_done = 1; + htt_tlv_filter.enable_fp = 1; + htt_tlv_filter.enable_md = 0; + htt_tlv_filter.enable_mo = 1; + if (pdev->mcopy_mode) { + htt_tlv_filter.packet_header = 1; + } + htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL; + htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL; + htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL; + htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL; + htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL; + htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL; + + for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) { + int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, + pdev->pdev_id); + + htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev, + pdev->rxdma_mon_status_ring[mac_id].hal_srng, + RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter); + } + + return QDF_STATUS_SUCCESS; +} + +/** + * dp_get_pdev_id_frm_pdev() - get pdev_id + * @pdev_handle: Datapath PDEV handle + * + * Return: pdev_id + */ +static +uint8_t dp_get_pdev_id_frm_pdev(struct cdp_pdev *pdev_handle) +{ + struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle; + + return pdev->pdev_id; +} + +/** + * dp_vdev_get_filter_ucast_data() - get DP VDEV monitor ucast filter + * @vdev_handle: Datapath VDEV handle + * Return: true on ucast filter flag set + */ +static bool dp_vdev_get_filter_ucast_data(struct cdp_vdev *vdev_handle) +{ + struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle; + struct dp_pdev *pdev; + + pdev = vdev->pdev; + + if ((pdev->fp_data_filter & FILTER_DATA_UCAST) || + (pdev->mo_data_filter & FILTER_DATA_UCAST)) + return true; + + return false; +} + +/** + * dp_vdev_get_filter_mcast_data() - get DP VDEV monitor mcast filter + * @vdev_handle: Datapath VDEV handle + * Return: true on mcast filter flag set + */ +static bool dp_vdev_get_filter_mcast_data(struct cdp_vdev *vdev_handle) +{ + struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle; + struct dp_pdev *pdev; + + pdev = vdev->pdev; + + if ((pdev->fp_data_filter & FILTER_DATA_MCAST) || + (pdev->mo_data_filter & FILTER_DATA_MCAST)) + return true; + + return false; +} + +/** + * dp_vdev_get_filter_non_data() - get DP VDEV monitor non_data filter + * @vdev_handle: Datapath VDEV handle + * Return: true on non data filter flag set + */ +static bool dp_vdev_get_filter_non_data(struct cdp_vdev *vdev_handle) +{ + struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle; + struct dp_pdev *pdev; + + pdev = vdev->pdev; + + if ((pdev->fp_mgmt_filter & FILTER_MGMT_ALL) || + (pdev->mo_mgmt_filter & FILTER_MGMT_ALL)) { + if ((pdev->fp_ctrl_filter & FILTER_CTRL_ALL) || + (pdev->mo_ctrl_filter & FILTER_CTRL_ALL)) { + return true; + } + } + + return false; +} + +#ifdef MESH_MODE_SUPPORT +void dp_peer_set_mesh_mode(struct cdp_vdev *vdev_hdl, uint32_t val) +{ + struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl; + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, + FL("val %d"), val); + vdev->mesh_vdev = val; +} + +/* + * dp_peer_set_mesh_rx_filter() - to set the mesh rx filter + * @vdev_hdl: virtual device object + * @val: value to be set + * + * Return: void + */ +void dp_peer_set_mesh_rx_filter(struct cdp_vdev *vdev_hdl, uint32_t val) +{ + struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl; + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, + FL("val %d"), val); + vdev->mesh_rx_filter = val; +} +#endif + +/* + * dp_aggregate_pdev_ctrl_frames_stats()- function to agreegate peer stats + * Current scope is bar received count + * + * @pdev_handle: DP_PDEV handle + * + * Return: void + */ +#define STATS_PROC_TIMEOUT (HZ/1000) + +static void +dp_aggregate_pdev_ctrl_frames_stats(struct dp_pdev *pdev) +{ + struct dp_vdev *vdev; + struct dp_peer *peer; + uint32_t waitcnt; + + TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) { + TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) { + if (!peer) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("DP Invalid Peer refernce")); + return; + } + + if (peer->delete_in_progress) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("DP Peer deletion in progress")); + continue; + } + + qdf_atomic_inc(&peer->ref_cnt); + waitcnt = 0; + dp_peer_rxtid_stats(peer, dp_rx_bar_stats_cb, pdev); + while (!(qdf_atomic_read(&(pdev->stats_cmd_complete))) + && waitcnt < 10) { + schedule_timeout_interruptible( + STATS_PROC_TIMEOUT); + waitcnt++; + } + qdf_atomic_set(&(pdev->stats_cmd_complete), 0); + dp_peer_unref_delete(peer); + } + } +} + +/** + * dp_rx_bar_stats_cb(): BAR received stats callback + * @soc: SOC handle + * @cb_ctxt: Call back context + * @reo_status: Reo status + * + * return: void + */ +void dp_rx_bar_stats_cb(struct dp_soc *soc, void *cb_ctxt, + union hal_reo_status *reo_status) +{ + struct dp_pdev *pdev = (struct dp_pdev *)cb_ctxt; + struct hal_reo_queue_status *queue_status = &(reo_status->queue_status); + + if (queue_status->header.status != HAL_REO_CMD_SUCCESS) { + DP_TRACE_STATS(FATAL, "REO stats failure %d \n", + queue_status->header.status); + qdf_atomic_set(&(pdev->stats_cmd_complete), 1); + return; + } + + pdev->stats.rx.bar_recv_cnt += queue_status->bar_rcvd_cnt; + qdf_atomic_set(&(pdev->stats_cmd_complete), 1); + +} + +/** + * dp_aggregate_vdev_stats(): Consolidate stats at VDEV level + * @vdev: DP VDEV handle + * + * return: void + */ +void dp_aggregate_vdev_stats(struct dp_vdev *vdev) +{ + struct dp_peer *peer = NULL; + struct dp_soc *soc = vdev->pdev->soc; + + qdf_mem_set(&(vdev->stats.tx), sizeof(vdev->stats.tx), 0x0); + qdf_mem_set(&(vdev->stats.rx), sizeof(vdev->stats.rx), 0x0); + + TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) + DP_UPDATE_STATS(vdev, peer); + + if (soc->cdp_soc.ol_ops->update_dp_stats) + soc->cdp_soc.ol_ops->update_dp_stats(vdev->pdev->osif_pdev, + &vdev->stats, (uint16_t) vdev->vdev_id, + UPDATE_VDEV_STATS); + +} + +/** + * dp_aggregate_pdev_stats(): Consolidate stats at PDEV level + * @pdev: DP PDEV handle + * + * return: void + */ +static inline void dp_aggregate_pdev_stats(struct dp_pdev *pdev) +{ + struct dp_vdev *vdev = NULL; + struct dp_soc *soc = pdev->soc; + + qdf_mem_set(&(pdev->stats.tx), sizeof(pdev->stats.tx), 0x0); + qdf_mem_set(&(pdev->stats.rx), sizeof(pdev->stats.rx), 0x0); + qdf_mem_set(&(pdev->stats.tx_i), sizeof(pdev->stats.tx_i), 0x0); + + qdf_spin_lock_bh(&pdev->vdev_list_lock); + TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) { + + dp_aggregate_vdev_stats(vdev); + DP_UPDATE_STATS(pdev, vdev); + + DP_STATS_AGGR_PKT(pdev, vdev, tx_i.nawds_mcast); + + DP_STATS_AGGR_PKT(pdev, vdev, tx_i.rcvd); + DP_STATS_AGGR_PKT(pdev, vdev, tx_i.processed); + DP_STATS_AGGR_PKT(pdev, vdev, tx_i.reinject_pkts); + DP_STATS_AGGR_PKT(pdev, vdev, tx_i.inspect_pkts); + DP_STATS_AGGR_PKT(pdev, vdev, tx_i.raw.raw_pkt); + DP_STATS_AGGR(pdev, vdev, tx_i.raw.dma_map_error); + DP_STATS_AGGR_PKT(pdev, vdev, tx_i.tso.tso_pkt); + DP_STATS_AGGR(pdev, vdev, tx_i.tso.dropped_host); + DP_STATS_AGGR(pdev, vdev, tx_i.tso.dropped_target); + DP_STATS_AGGR(pdev, vdev, tx_i.sg.dropped_host); + DP_STATS_AGGR(pdev, vdev, tx_i.sg.dropped_target); + DP_STATS_AGGR_PKT(pdev, vdev, tx_i.sg.sg_pkt); + DP_STATS_AGGR_PKT(pdev, vdev, tx_i.mcast_en.mcast_pkt); + DP_STATS_AGGR(pdev, vdev, + tx_i.mcast_en.dropped_map_error); + DP_STATS_AGGR(pdev, vdev, + tx_i.mcast_en.dropped_self_mac); + DP_STATS_AGGR(pdev, vdev, + tx_i.mcast_en.dropped_send_fail); + DP_STATS_AGGR(pdev, vdev, tx_i.mcast_en.ucast); + DP_STATS_AGGR(pdev, vdev, tx_i.dropped.dma_error); + DP_STATS_AGGR(pdev, vdev, tx_i.dropped.ring_full); + DP_STATS_AGGR(pdev, vdev, tx_i.dropped.enqueue_fail); + DP_STATS_AGGR(pdev, vdev, tx_i.dropped.desc_na); + DP_STATS_AGGR(pdev, vdev, tx_i.dropped.res_full); + DP_STATS_AGGR(pdev, vdev, tx_i.cce_classified); + DP_STATS_AGGR(pdev, vdev, tx_i.cce_classified_raw); + DP_STATS_AGGR(pdev, vdev, tx_i.mesh.exception_fw); + DP_STATS_AGGR(pdev, vdev, tx_i.mesh.completion_fw); + + pdev->stats.tx_i.dropped.dropped_pkt.num = + pdev->stats.tx_i.dropped.dma_error + + pdev->stats.tx_i.dropped.ring_full + + pdev->stats.tx_i.dropped.enqueue_fail + + pdev->stats.tx_i.dropped.desc_na + + pdev->stats.tx_i.dropped.res_full; + + pdev->stats.tx.last_ack_rssi = + vdev->stats.tx.last_ack_rssi; + pdev->stats.tx_i.tso.num_seg = + vdev->stats.tx_i.tso.num_seg; + } + qdf_spin_unlock_bh(&pdev->vdev_list_lock); + if (soc->cdp_soc.ol_ops->update_dp_stats) + soc->cdp_soc.ol_ops->update_dp_stats(pdev->osif_pdev, + &pdev->stats, pdev->pdev_id, UPDATE_PDEV_STATS); + +} + +/** + * dp_vdev_getstats() - get vdev packet level stats + * @vdev_handle: Datapath VDEV handle + * @stats: cdp network device stats structure + * + * Return: void + */ +static void dp_vdev_getstats(void *vdev_handle, + struct cdp_dev_stats *stats) +{ + struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle; + + dp_aggregate_vdev_stats(vdev); +} + + +/** + * dp_pdev_getstats() - get pdev packet level stats + * @pdev_handle: Datapath PDEV handle + * @stats: cdp network device stats structure + * + * Return: void + */ +static void dp_pdev_getstats(void *pdev_handle, + struct cdp_dev_stats *stats) +{ + struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle; + + dp_aggregate_pdev_stats(pdev); + + stats->tx_packets = pdev->stats.tx_i.rcvd.num; + stats->tx_bytes = pdev->stats.tx_i.rcvd.bytes; + + stats->tx_errors = pdev->stats.tx.tx_failed + + pdev->stats.tx_i.dropped.dropped_pkt.num; + stats->tx_dropped = stats->tx_errors; + + stats->rx_packets = pdev->stats.rx.unicast.num + + pdev->stats.rx.multicast.num + + pdev->stats.rx.bcast.num; + stats->rx_bytes = pdev->stats.rx.unicast.bytes + + pdev->stats.rx.multicast.bytes + + pdev->stats.rx.bcast.bytes; +} + +/** + * dp_get_device_stats() - get interface level packet stats + * @handle: device handle + * @stats: cdp network device stats structure + * @type: device type pdev/vdev + * + * Return: void + */ +static void dp_get_device_stats(void *handle, + struct cdp_dev_stats *stats, uint8_t type) +{ + switch (type) { + case UPDATE_VDEV_STATS: + dp_vdev_getstats(handle, stats); + break; + case UPDATE_PDEV_STATS: + dp_pdev_getstats(handle, stats); + break; + default: + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "apstats cannot be updated for this input " + "type %d\n", type); + break; + } + +} + + +/** + * dp_print_pdev_tx_stats(): Print Pdev level TX stats + * @pdev: DP_PDEV Handle + * + * Return:void + */ +static inline void +dp_print_pdev_tx_stats(struct dp_pdev *pdev) +{ + uint8_t index = 0; + DP_PRINT_STATS("PDEV Tx Stats:\n"); + DP_PRINT_STATS("Received From Stack:"); + DP_PRINT_STATS(" Packets = %d", + pdev->stats.tx_i.rcvd.num); + DP_PRINT_STATS(" Bytes = %llu", + pdev->stats.tx_i.rcvd.bytes); + DP_PRINT_STATS("Processed:"); + DP_PRINT_STATS(" Packets = %d", + pdev->stats.tx_i.processed.num); + DP_PRINT_STATS(" Bytes = %llu", + pdev->stats.tx_i.processed.bytes); + DP_PRINT_STATS("Total Completions:"); + DP_PRINT_STATS(" Packets = %u", + pdev->stats.tx.comp_pkt.num); + DP_PRINT_STATS(" Bytes = %llu", + pdev->stats.tx.comp_pkt.bytes); + DP_PRINT_STATS("Successful Completions:"); + DP_PRINT_STATS(" Packets = %u", + pdev->stats.tx.tx_success.num); + DP_PRINT_STATS(" Bytes = %llu", + pdev->stats.tx.tx_success.bytes); + DP_PRINT_STATS("Dropped:"); + DP_PRINT_STATS(" Total = %d", + pdev->stats.tx_i.dropped.dropped_pkt.num); + DP_PRINT_STATS(" Dma_map_error = %d", + pdev->stats.tx_i.dropped.dma_error); + DP_PRINT_STATS(" Ring Full = %d", + pdev->stats.tx_i.dropped.ring_full); + DP_PRINT_STATS(" Descriptor Not available = %d", + pdev->stats.tx_i.dropped.desc_na); + DP_PRINT_STATS(" HW enqueue failed= %d", + pdev->stats.tx_i.dropped.enqueue_fail); + DP_PRINT_STATS(" Resources Full = %d", + pdev->stats.tx_i.dropped.res_full); + DP_PRINT_STATS(" FW removed = %d", + pdev->stats.tx.dropped.fw_rem); + DP_PRINT_STATS(" FW removed transmitted = %d", + pdev->stats.tx.dropped.fw_rem_tx); + DP_PRINT_STATS(" FW removed untransmitted = %d", + pdev->stats.tx.dropped.fw_rem_notx); + DP_PRINT_STATS(" FW removed untransmitted fw_reason1 = %d", + pdev->stats.tx.dropped.fw_reason1); + DP_PRINT_STATS(" FW removed untransmitted fw_reason2 = %d", + pdev->stats.tx.dropped.fw_reason2); + DP_PRINT_STATS(" FW removed untransmitted fw_reason3 = %d", + pdev->stats.tx.dropped.fw_reason3); + DP_PRINT_STATS(" Aged Out from msdu/mpdu queues = %d", + pdev->stats.tx.dropped.age_out); + DP_PRINT_STATS("Scatter Gather:"); + DP_PRINT_STATS(" Packets = %d", + pdev->stats.tx_i.sg.sg_pkt.num); + DP_PRINT_STATS(" Bytes = %llu", + pdev->stats.tx_i.sg.sg_pkt.bytes); + DP_PRINT_STATS(" Dropped By Host = %d", + pdev->stats.tx_i.sg.dropped_host); + DP_PRINT_STATS(" Dropped By Target = %d", + pdev->stats.tx_i.sg.dropped_target); + DP_PRINT_STATS("TSO:"); + DP_PRINT_STATS(" Number of Segments = %d", + pdev->stats.tx_i.tso.num_seg); + DP_PRINT_STATS(" Packets = %d", + pdev->stats.tx_i.tso.tso_pkt.num); + DP_PRINT_STATS(" Bytes = %llu", + pdev->stats.tx_i.tso.tso_pkt.bytes); + DP_PRINT_STATS(" Dropped By Host = %d", + pdev->stats.tx_i.tso.dropped_host); + DP_PRINT_STATS("Mcast Enhancement:"); + DP_PRINT_STATS(" Packets = %d", + pdev->stats.tx_i.mcast_en.mcast_pkt.num); + DP_PRINT_STATS(" Bytes = %llu", + pdev->stats.tx_i.mcast_en.mcast_pkt.bytes); + DP_PRINT_STATS(" Dropped: Map Errors = %d", + pdev->stats.tx_i.mcast_en.dropped_map_error); + DP_PRINT_STATS(" Dropped: Self Mac = %d", + pdev->stats.tx_i.mcast_en.dropped_self_mac); + DP_PRINT_STATS(" Dropped: Send Fail = %d", + pdev->stats.tx_i.mcast_en.dropped_send_fail); + DP_PRINT_STATS(" Unicast sent = %d", + pdev->stats.tx_i.mcast_en.ucast); + DP_PRINT_STATS("Raw:"); + DP_PRINT_STATS(" Packets = %d", + pdev->stats.tx_i.raw.raw_pkt.num); + DP_PRINT_STATS(" Bytes = %llu", + pdev->stats.tx_i.raw.raw_pkt.bytes); + DP_PRINT_STATS(" DMA map error = %d", + pdev->stats.tx_i.raw.dma_map_error); + DP_PRINT_STATS("Reinjected:"); + DP_PRINT_STATS(" Packets = %d", + pdev->stats.tx_i.reinject_pkts.num); + DP_PRINT_STATS(" Bytes = %llu\n", + pdev->stats.tx_i.reinject_pkts.bytes); + DP_PRINT_STATS("Inspected:"); + DP_PRINT_STATS(" Packets = %d", + pdev->stats.tx_i.inspect_pkts.num); + DP_PRINT_STATS(" Bytes = %llu", + pdev->stats.tx_i.inspect_pkts.bytes); + DP_PRINT_STATS("Nawds Multicast:"); + DP_PRINT_STATS(" Packets = %d", + pdev->stats.tx_i.nawds_mcast.num); + DP_PRINT_STATS(" Bytes = %llu", + pdev->stats.tx_i.nawds_mcast.bytes); + DP_PRINT_STATS("CCE Classified:"); + DP_PRINT_STATS(" CCE Classified Packets: %u", + pdev->stats.tx_i.cce_classified); + DP_PRINT_STATS(" RAW CCE Classified Packets: %u", + pdev->stats.tx_i.cce_classified_raw); + DP_PRINT_STATS("Mesh stats:"); + DP_PRINT_STATS(" frames to firmware: %u", + pdev->stats.tx_i.mesh.exception_fw); + DP_PRINT_STATS(" completions from fw: %u", + pdev->stats.tx_i.mesh.completion_fw); + DP_PRINT_STATS("PPDU stats counter"); + for (index = 0; index < CDP_PPDU_STATS_MAX_TAG; index++) { + DP_PRINT_STATS(" Tag[%d] = %llu", index, + pdev->stats.ppdu_stats_counter[index]); + } +} + +/** + * dp_print_pdev_rx_stats(): Print Pdev level RX stats + * @pdev: DP_PDEV Handle + * + * Return: void + */ +static inline void +dp_print_pdev_rx_stats(struct dp_pdev *pdev) +{ + DP_PRINT_STATS("PDEV Rx Stats:\n"); + DP_PRINT_STATS("Received From HW (Per Rx Ring):"); + DP_PRINT_STATS(" Packets = %d %d %d %d", + pdev->stats.rx.rcvd_reo[0].num, + pdev->stats.rx.rcvd_reo[1].num, + pdev->stats.rx.rcvd_reo[2].num, + pdev->stats.rx.rcvd_reo[3].num); + DP_PRINT_STATS(" Bytes = %llu %llu %llu %llu", + pdev->stats.rx.rcvd_reo[0].bytes, + pdev->stats.rx.rcvd_reo[1].bytes, + pdev->stats.rx.rcvd_reo[2].bytes, + pdev->stats.rx.rcvd_reo[3].bytes); + DP_PRINT_STATS("Replenished:"); + DP_PRINT_STATS(" Packets = %d", + pdev->stats.replenish.pkts.num); + DP_PRINT_STATS(" Bytes = %llu", + pdev->stats.replenish.pkts.bytes); + DP_PRINT_STATS(" Buffers Added To Freelist = %d", + pdev->stats.buf_freelist); + DP_PRINT_STATS(" Low threshold intr = %d", + pdev->stats.replenish.low_thresh_intrs); + DP_PRINT_STATS("Dropped:"); + DP_PRINT_STATS(" msdu_not_done = %d", + pdev->stats.dropped.msdu_not_done); + DP_PRINT_STATS(" mon_rx_drop = %d", + pdev->stats.dropped.mon_rx_drop); + DP_PRINT_STATS("Sent To Stack:"); + DP_PRINT_STATS(" Packets = %d", + pdev->stats.rx.to_stack.num); + DP_PRINT_STATS(" Bytes = %llu", + pdev->stats.rx.to_stack.bytes); + DP_PRINT_STATS("Multicast/Broadcast:"); + DP_PRINT_STATS(" Packets = %d", + (pdev->stats.rx.multicast.num + + pdev->stats.rx.bcast.num)); + DP_PRINT_STATS(" Bytes = %llu", + (pdev->stats.rx.multicast.bytes + + pdev->stats.rx.bcast.bytes)); + DP_PRINT_STATS("Errors:"); + DP_PRINT_STATS(" Rxdma Ring Un-inititalized = %d", + pdev->stats.replenish.rxdma_err); + DP_PRINT_STATS(" Desc Alloc Failed: = %d", + pdev->stats.err.desc_alloc_fail); + DP_PRINT_STATS(" IP checksum error = %d", + pdev->stats.err.ip_csum_err); + DP_PRINT_STATS(" TCP/UDP checksum error = %d", + pdev->stats.err.tcp_udp_csum_err); + + /* Get bar_recv_cnt */ + dp_aggregate_pdev_ctrl_frames_stats(pdev); + DP_PRINT_STATS("BAR Received Count: = %d", + pdev->stats.rx.bar_recv_cnt); + +} + +/** + * dp_print_pdev_rx_mon_stats(): Print Pdev level RX monitor stats + * @pdev: DP_PDEV Handle + * + * Return: void + */ +static inline void +dp_print_pdev_rx_mon_stats(struct dp_pdev *pdev) +{ + struct cdp_pdev_mon_stats *rx_mon_stats; + + rx_mon_stats = &pdev->rx_mon_stats; + + DP_PRINT_STATS("PDEV Rx Monitor Stats:\n"); + + dp_rx_mon_print_dbg_ppdu_stats(rx_mon_stats); + + DP_PRINT_STATS("status_ppdu_done_cnt = %d", + rx_mon_stats->status_ppdu_done); + DP_PRINT_STATS("dest_ppdu_done_cnt = %d", + rx_mon_stats->dest_ppdu_done); + DP_PRINT_STATS("dest_mpdu_done_cnt = %d", + rx_mon_stats->dest_mpdu_done); + DP_PRINT_STATS("dest_mpdu_drop_cnt = %d", + rx_mon_stats->dest_mpdu_drop); +} + +/** + * dp_print_soc_tx_stats(): Print SOC level stats + * @soc DP_SOC Handle + * + * Return: void + */ +static inline void +dp_print_soc_tx_stats(struct dp_soc *soc) +{ + uint8_t desc_pool_id; + soc->stats.tx.desc_in_use = 0; + + DP_PRINT_STATS("SOC Tx Stats:\n"); + + for (desc_pool_id = 0; + desc_pool_id < wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx); + desc_pool_id++) + soc->stats.tx.desc_in_use += + soc->tx_desc[desc_pool_id].num_allocated; + + DP_PRINT_STATS("Tx Descriptors In Use = %d", + soc->stats.tx.desc_in_use); + DP_PRINT_STATS("Invalid peer:"); + DP_PRINT_STATS(" Packets = %d", + soc->stats.tx.tx_invalid_peer.num); + DP_PRINT_STATS(" Bytes = %llu", + soc->stats.tx.tx_invalid_peer.bytes); + DP_PRINT_STATS("Packets dropped due to TCL ring full = %d %d %d", + soc->stats.tx.tcl_ring_full[0], + soc->stats.tx.tcl_ring_full[1], + soc->stats.tx.tcl_ring_full[2]); + +} +/** + * dp_print_soc_rx_stats: Print SOC level Rx stats + * @soc: DP_SOC Handle + * + * Return:void + */ +static inline void +dp_print_soc_rx_stats(struct dp_soc *soc) +{ + uint32_t i; + char reo_error[DP_REO_ERR_LENGTH]; + char rxdma_error[DP_RXDMA_ERR_LENGTH]; + uint8_t index = 0; + + DP_PRINT_STATS("SOC Rx Stats:\n"); + DP_PRINT_STATS("Errors:\n"); + DP_PRINT_STATS("Rx Decrypt Errors = %d", + (soc->stats.rx.err.rxdma_error[HAL_RXDMA_ERR_DECRYPT] + + soc->stats.rx.err.rxdma_error[HAL_RXDMA_ERR_TKIP_MIC])); + DP_PRINT_STATS("Invalid RBM = %d", + soc->stats.rx.err.invalid_rbm); + DP_PRINT_STATS("Invalid Vdev = %d", + soc->stats.rx.err.invalid_vdev); + DP_PRINT_STATS("Invalid Pdev = %d", + soc->stats.rx.err.invalid_pdev); + DP_PRINT_STATS("Invalid Peer = %d", + soc->stats.rx.err.rx_invalid_peer.num); + DP_PRINT_STATS("HAL Ring Access Fail = %d", + soc->stats.rx.err.hal_ring_access_fail); + + for (i = 0; i < HAL_RXDMA_ERR_MAX; i++) { + index += qdf_snprint(&rxdma_error[index], + DP_RXDMA_ERR_LENGTH - index, + " %d", soc->stats.rx.err.rxdma_error[i]); + } + DP_PRINT_STATS("RXDMA Error (0-31):%s", + rxdma_error); + + index = 0; + for (i = 0; i < HAL_REO_ERR_MAX; i++) { + index += qdf_snprint(&reo_error[index], + DP_REO_ERR_LENGTH - index, + " %d", soc->stats.rx.err.reo_error[i]); + } + DP_PRINT_STATS("REO Error(0-14):%s", + reo_error); +} + + +/** + * dp_print_ring_stat_from_hal(): Print hal level ring stats + * @soc: DP_SOC handle + * @srng: DP_SRNG handle + * @ring_name: SRNG name + * + * Return: void + */ +static inline void +dp_print_ring_stat_from_hal(struct dp_soc *soc, struct dp_srng *srng, + char *ring_name) +{ + uint32_t tailp; + uint32_t headp; + + if (srng->hal_srng != NULL) { + hal_api_get_tphp(soc->hal_soc, srng->hal_srng, &tailp, &headp); + DP_PRINT_STATS("%s : Head pointer = %d Tail Pointer = %d\n", + ring_name, headp, tailp); + } +} + +/** + * dp_print_ring_stats(): Print tail and head pointer + * @pdev: DP_PDEV handle + * + * Return:void + */ +static inline void +dp_print_ring_stats(struct dp_pdev *pdev) +{ + uint32_t i; + char ring_name[STR_MAXLEN + 1]; + int mac_id; + + dp_print_ring_stat_from_hal(pdev->soc, + &pdev->soc->reo_exception_ring, + "Reo Exception Ring"); + dp_print_ring_stat_from_hal(pdev->soc, + &pdev->soc->reo_reinject_ring, + "Reo Inject Ring"); + dp_print_ring_stat_from_hal(pdev->soc, + &pdev->soc->reo_cmd_ring, + "Reo Command Ring"); + dp_print_ring_stat_from_hal(pdev->soc, + &pdev->soc->reo_status_ring, + "Reo Status Ring"); + dp_print_ring_stat_from_hal(pdev->soc, + &pdev->soc->rx_rel_ring, + "Rx Release ring"); + dp_print_ring_stat_from_hal(pdev->soc, + &pdev->soc->tcl_cmd_ring, + "Tcl command Ring"); + dp_print_ring_stat_from_hal(pdev->soc, + &pdev->soc->tcl_status_ring, + "Tcl Status Ring"); + dp_print_ring_stat_from_hal(pdev->soc, + &pdev->soc->wbm_desc_rel_ring, + "Wbm Desc Rel Ring"); + for (i = 0; i < MAX_REO_DEST_RINGS; i++) { + snprintf(ring_name, STR_MAXLEN, "Reo Dest Ring %d", i); + dp_print_ring_stat_from_hal(pdev->soc, + &pdev->soc->reo_dest_ring[i], + ring_name); + } + for (i = 0; i < pdev->soc->num_tcl_data_rings; i++) { + snprintf(ring_name, STR_MAXLEN, "Tcl Data Ring %d", i); + dp_print_ring_stat_from_hal(pdev->soc, + &pdev->soc->tcl_data_ring[i], + ring_name); + } + for (i = 0; i < MAX_TCL_DATA_RINGS; i++) { + snprintf(ring_name, STR_MAXLEN, "Tx Comp Ring %d", i); + dp_print_ring_stat_from_hal(pdev->soc, + &pdev->soc->tx_comp_ring[i], + ring_name); + } + dp_print_ring_stat_from_hal(pdev->soc, + &pdev->rx_refill_buf_ring, + "Rx Refill Buf Ring"); + + dp_print_ring_stat_from_hal(pdev->soc, + &pdev->rx_refill_buf_ring2, + "Second Rx Refill Buf Ring"); + + for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) { + dp_print_ring_stat_from_hal(pdev->soc, + &pdev->rxdma_mon_buf_ring[mac_id], + "Rxdma Mon Buf Ring"); + dp_print_ring_stat_from_hal(pdev->soc, + &pdev->rxdma_mon_dst_ring[mac_id], + "Rxdma Mon Dst Ring"); + dp_print_ring_stat_from_hal(pdev->soc, + &pdev->rxdma_mon_status_ring[mac_id], + "Rxdma Mon Status Ring"); + dp_print_ring_stat_from_hal(pdev->soc, + &pdev->rxdma_mon_desc_ring[mac_id], + "Rxdma mon desc Ring"); + } + + for (i = 0; i < MAX_RX_MAC_RINGS; i++) { + snprintf(ring_name, STR_MAXLEN, "Rxdma err dst ring %d", i); + dp_print_ring_stat_from_hal(pdev->soc, + &pdev->rxdma_err_dst_ring[i], + ring_name); + } + + for (i = 0; i < MAX_RX_MAC_RINGS; i++) { + snprintf(ring_name, STR_MAXLEN, "Rx mac buf ring %d", i); + dp_print_ring_stat_from_hal(pdev->soc, + &pdev->rx_mac_buf_ring[i], + ring_name); + } +} + +/** + * dp_txrx_host_stats_clr(): Reinitialize the txrx stats + * @vdev: DP_VDEV handle + * + * Return:void + */ +static inline void +dp_txrx_host_stats_clr(struct dp_vdev *vdev) +{ + struct dp_peer *peer = NULL; + struct dp_soc *soc = (struct dp_soc *)vdev->pdev->soc; + + DP_STATS_CLR(vdev->pdev); + DP_STATS_CLR(vdev->pdev->soc); + DP_STATS_CLR(vdev); + TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) { + if (!peer) + return; + DP_STATS_CLR(peer); + + if (soc->cdp_soc.ol_ops->update_dp_stats) { + soc->cdp_soc.ol_ops->update_dp_stats( + vdev->pdev->osif_pdev, + &peer->stats, + peer->peer_ids[0], + UPDATE_PEER_STATS); + } + + } + + if (soc->cdp_soc.ol_ops->update_dp_stats) + soc->cdp_soc.ol_ops->update_dp_stats(vdev->pdev->osif_pdev, + &vdev->stats, (uint16_t)vdev->vdev_id, + UPDATE_VDEV_STATS); +} + +/** + * dp_print_rx_rates(): Print Rx rate stats + * @vdev: DP_VDEV handle + * + * Return:void + */ +static inline void +dp_print_rx_rates(struct dp_vdev *vdev) +{ + struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev; + uint8_t i, mcs, pkt_type; + uint8_t index = 0; + char nss[DP_NSS_LENGTH]; + + DP_PRINT_STATS("Rx Rate Info:\n"); + + for (pkt_type = 0; pkt_type < DOT11_MAX; pkt_type++) { + index = 0; + for (mcs = 0; mcs < MAX_MCS; mcs++) { + if (!dp_rate_string[pkt_type][mcs].valid) + continue; + + DP_PRINT_STATS(" %s = %d", + dp_rate_string[pkt_type][mcs].mcs_type, + pdev->stats.rx.pkt_type[pkt_type]. + mcs_count[mcs]); + } + + DP_PRINT_STATS("\n"); + } + + index = 0; + for (i = 0; i < SS_COUNT; i++) { + index += qdf_snprint(&nss[index], DP_NSS_LENGTH - index, + " %d", pdev->stats.rx.nss[i]); + } + DP_PRINT_STATS("NSS(1-8) = %s", + nss); + + DP_PRINT_STATS("SGI =" + " 0.8us %d," + " 0.4us %d," + " 1.6us %d," + " 3.2us %d,", + pdev->stats.rx.sgi_count[0], + pdev->stats.rx.sgi_count[1], + pdev->stats.rx.sgi_count[2], + pdev->stats.rx.sgi_count[3]); + DP_PRINT_STATS("BW Counts = 20MHZ %d, 40MHZ %d, 80MHZ %d, 160MHZ %d", + pdev->stats.rx.bw[0], pdev->stats.rx.bw[1], + pdev->stats.rx.bw[2], pdev->stats.rx.bw[3]); + DP_PRINT_STATS("Reception Type =" + " SU: %d," + " MU_MIMO:%d," + " MU_OFDMA:%d," + " MU_OFDMA_MIMO:%d\n", + pdev->stats.rx.reception_type[0], + pdev->stats.rx.reception_type[1], + pdev->stats.rx.reception_type[2], + pdev->stats.rx.reception_type[3]); + DP_PRINT_STATS("Aggregation:\n"); + DP_PRINT_STATS("Number of Msdu's Part of Ampdus = %d", + pdev->stats.rx.ampdu_cnt); + DP_PRINT_STATS("Number of Msdu's With No Mpdu Level Aggregation : %d", + pdev->stats.rx.non_ampdu_cnt); + DP_PRINT_STATS("Number of Msdu's Part of Amsdu: %d", + pdev->stats.rx.amsdu_cnt); + DP_PRINT_STATS("Number of Msdu's With No Msdu Level Aggregation: %d", + pdev->stats.rx.non_amsdu_cnt); +} + +/** + * dp_print_tx_rates(): Print tx rates + * @vdev: DP_VDEV handle + * + * Return:void + */ +static inline void +dp_print_tx_rates(struct dp_vdev *vdev) +{ + struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev; + uint8_t mcs, pkt_type; + uint32_t index; + + DP_PRINT_STATS("Tx Rate Info:\n"); + + for (pkt_type = 0; pkt_type < DOT11_MAX; pkt_type++) { + index = 0; + for (mcs = 0; mcs < MAX_MCS; mcs++) { + if (!dp_rate_string[pkt_type][mcs].valid) + continue; + + DP_PRINT_STATS(" %s = %d", + dp_rate_string[pkt_type][mcs].mcs_type, + pdev->stats.tx.pkt_type[pkt_type]. + mcs_count[mcs]); + } + + DP_PRINT_STATS("\n"); + } + + DP_PRINT_STATS("SGI =" + " 0.8us %d" + " 0.4us %d" + " 1.6us %d" + " 3.2us %d", + pdev->stats.tx.sgi_count[0], + pdev->stats.tx.sgi_count[1], + pdev->stats.tx.sgi_count[2], + pdev->stats.tx.sgi_count[3]); + + DP_PRINT_STATS("BW Counts = 20MHZ %d, 40MHZ %d, 80MHZ %d, 160MHZ %d", + pdev->stats.tx.bw[0], pdev->stats.tx.bw[1], + pdev->stats.tx.bw[2], pdev->stats.tx.bw[3]); + + DP_PRINT_STATS("OFDMA = %d", pdev->stats.tx.ofdma); + DP_PRINT_STATS("STBC = %d", pdev->stats.tx.stbc); + DP_PRINT_STATS("LDPC = %d", pdev->stats.tx.ldpc); + DP_PRINT_STATS("Retries = %d", pdev->stats.tx.retries); + DP_PRINT_STATS("Last ack rssi = %d\n", pdev->stats.tx.last_ack_rssi); + + DP_PRINT_STATS("Aggregation:\n"); + DP_PRINT_STATS("Number of Msdu's Part of Amsdu = %d", + pdev->stats.tx.amsdu_cnt); + DP_PRINT_STATS("Number of Msdu's With No Msdu Level Aggregation = %d", + pdev->stats.tx.non_amsdu_cnt); +} + +/** + * dp_print_peer_stats():print peer stats + * @peer: DP_PEER handle + * + * return void + */ +static inline void dp_print_peer_stats(struct dp_peer *peer) +{ + uint8_t i, mcs, pkt_type; + uint32_t index; + char nss[DP_NSS_LENGTH]; + DP_PRINT_STATS("Node Tx Stats:\n"); + DP_PRINT_STATS("Total Packet Completions = %d", + peer->stats.tx.comp_pkt.num); + DP_PRINT_STATS("Total Bytes Completions = %llu", + peer->stats.tx.comp_pkt.bytes); + DP_PRINT_STATS("Success Packets = %d", + peer->stats.tx.tx_success.num); + DP_PRINT_STATS("Success Bytes = %llu", + peer->stats.tx.tx_success.bytes); + DP_PRINT_STATS("Unicast Success Packets = %d", + peer->stats.tx.ucast.num); + DP_PRINT_STATS("Unicast Success Bytes = %llu", + peer->stats.tx.ucast.bytes); + DP_PRINT_STATS("Multicast Success Packets = %d", + peer->stats.tx.mcast.num); + DP_PRINT_STATS("Multicast Success Bytes = %llu", + peer->stats.tx.mcast.bytes); + DP_PRINT_STATS("Broadcast Success Packets = %d", + peer->stats.tx.bcast.num); + DP_PRINT_STATS("Broadcast Success Bytes = %llu", + peer->stats.tx.bcast.bytes); + DP_PRINT_STATS("Packets Failed = %d", + peer->stats.tx.tx_failed); + DP_PRINT_STATS("Packets In OFDMA = %d", + peer->stats.tx.ofdma); + DP_PRINT_STATS("Packets In STBC = %d", + peer->stats.tx.stbc); + DP_PRINT_STATS("Packets In LDPC = %d", + peer->stats.tx.ldpc); + DP_PRINT_STATS("Packet Retries = %d", + peer->stats.tx.retries); + DP_PRINT_STATS("MSDU's Part of AMSDU = %d", + peer->stats.tx.amsdu_cnt); + DP_PRINT_STATS("Last Packet RSSI = %d", + peer->stats.tx.last_ack_rssi); + DP_PRINT_STATS("Dropped At FW: Removed = %d", + peer->stats.tx.dropped.fw_rem); + DP_PRINT_STATS("Dropped At FW: Removed transmitted = %d", + peer->stats.tx.dropped.fw_rem_tx); + DP_PRINT_STATS("Dropped At FW: Removed Untransmitted = %d", + peer->stats.tx.dropped.fw_rem_notx); + DP_PRINT_STATS("Dropped : Age Out = %d", + peer->stats.tx.dropped.age_out); + DP_PRINT_STATS("NAWDS : "); + DP_PRINT_STATS(" Nawds multicast Drop Tx Packet = %d", + peer->stats.tx.nawds_mcast_drop); + DP_PRINT_STATS(" Nawds multicast Tx Packet Count = %d", + peer->stats.tx.nawds_mcast.num); + DP_PRINT_STATS(" Nawds multicast Tx Packet Bytes = %llu", + peer->stats.tx.nawds_mcast.bytes); + + DP_PRINT_STATS("Rate Info:"); + + for (pkt_type = 0; pkt_type < DOT11_MAX; pkt_type++) { + index = 0; + for (mcs = 0; mcs < MAX_MCS; mcs++) { + if (!dp_rate_string[pkt_type][mcs].valid) + continue; + + DP_PRINT_STATS(" %s = %d", + dp_rate_string[pkt_type][mcs].mcs_type, + peer->stats.tx.pkt_type[pkt_type]. + mcs_count[mcs]); + } + + DP_PRINT_STATS("\n"); + } + + DP_PRINT_STATS("SGI = " + " 0.8us %d" + " 0.4us %d" + " 1.6us %d" + " 3.2us %d", + peer->stats.tx.sgi_count[0], + peer->stats.tx.sgi_count[1], + peer->stats.tx.sgi_count[2], + peer->stats.tx.sgi_count[3]); + DP_PRINT_STATS("Excess Retries per AC "); + DP_PRINT_STATS(" Best effort = %d", + peer->stats.tx.excess_retries_per_ac[0]); + DP_PRINT_STATS(" Background= %d", + peer->stats.tx.excess_retries_per_ac[1]); + DP_PRINT_STATS(" Video = %d", + peer->stats.tx.excess_retries_per_ac[2]); + DP_PRINT_STATS(" Voice = %d", + peer->stats.tx.excess_retries_per_ac[3]); + DP_PRINT_STATS("BW Counts = 20MHZ %d 40MHZ %d 80MHZ %d 160MHZ %d\n", + peer->stats.tx.bw[2], peer->stats.tx.bw[3], + peer->stats.tx.bw[4], peer->stats.tx.bw[5]); + + index = 0; + for (i = 0; i < SS_COUNT; i++) { + index += qdf_snprint(&nss[index], DP_NSS_LENGTH - index, + " %d", peer->stats.tx.nss[i]); + } + DP_PRINT_STATS("NSS(1-8) = %s", + nss); + + DP_PRINT_STATS("Aggregation:"); + DP_PRINT_STATS(" Number of Msdu's Part of Amsdu = %d", + peer->stats.tx.amsdu_cnt); + DP_PRINT_STATS(" Number of Msdu's With No Msdu Level Aggregation = %d\n", + peer->stats.tx.non_amsdu_cnt); + + DP_PRINT_STATS("Node Rx Stats:"); + DP_PRINT_STATS("Packets Sent To Stack = %d", + peer->stats.rx.to_stack.num); + DP_PRINT_STATS("Bytes Sent To Stack = %llu", + peer->stats.rx.to_stack.bytes); + for (i = 0; i < CDP_MAX_RX_RINGS; i++) { + DP_PRINT_STATS("Ring Id = %d", i); + DP_PRINT_STATS(" Packets Received = %d", + peer->stats.rx.rcvd_reo[i].num); + DP_PRINT_STATS(" Bytes Received = %llu", + peer->stats.rx.rcvd_reo[i].bytes); + } + DP_PRINT_STATS("Multicast Packets Received = %d", + peer->stats.rx.multicast.num); + DP_PRINT_STATS("Multicast Bytes Received = %llu", + peer->stats.rx.multicast.bytes); + DP_PRINT_STATS("Broadcast Packets Received = %d", + peer->stats.rx.bcast.num); + DP_PRINT_STATS("Broadcast Bytes Received = %llu", + peer->stats.rx.bcast.bytes); + DP_PRINT_STATS("Intra BSS Packets Received = %d", + peer->stats.rx.intra_bss.pkts.num); + DP_PRINT_STATS("Intra BSS Bytes Received = %llu", + peer->stats.rx.intra_bss.pkts.bytes); + DP_PRINT_STATS("Raw Packets Received = %d", + peer->stats.rx.raw.num); + DP_PRINT_STATS("Raw Bytes Received = %llu", + peer->stats.rx.raw.bytes); + DP_PRINT_STATS("Errors: MIC Errors = %d", + peer->stats.rx.err.mic_err); + DP_PRINT_STATS("Erros: Decryption Errors = %d", + peer->stats.rx.err.decrypt_err); + DP_PRINT_STATS("Msdu's Received As Part of Ampdu = %d", + peer->stats.rx.non_ampdu_cnt); + DP_PRINT_STATS("Msdu's Recived As Ampdu = %d", + peer->stats.rx.ampdu_cnt); + DP_PRINT_STATS("Msdu's Received Not Part of Amsdu's = %d", + peer->stats.rx.non_amsdu_cnt); + DP_PRINT_STATS("MSDUs Received As Part of Amsdu = %d", + peer->stats.rx.amsdu_cnt); + DP_PRINT_STATS("NAWDS : "); + DP_PRINT_STATS(" Nawds multicast Drop Rx Packet = %d", + peer->stats.rx.nawds_mcast_drop); + DP_PRINT_STATS("SGI =" + " 0.8us %d" + " 0.4us %d" + " 1.6us %d" + " 3.2us %d", + peer->stats.rx.sgi_count[0], + peer->stats.rx.sgi_count[1], + peer->stats.rx.sgi_count[2], + peer->stats.rx.sgi_count[3]); + DP_PRINT_STATS("BW Counts = 20MHZ %d 40MHZ %d 80MHZ %d 160MHZ %d", + peer->stats.rx.bw[0], peer->stats.rx.bw[1], + peer->stats.rx.bw[2], peer->stats.rx.bw[3]); + DP_PRINT_STATS("Reception Type =" + " SU %d," + " MU_MIMO %d," + " MU_OFDMA %d," + " MU_OFDMA_MIMO %d", + peer->stats.rx.reception_type[0], + peer->stats.rx.reception_type[1], + peer->stats.rx.reception_type[2], + peer->stats.rx.reception_type[3]); + + + for (pkt_type = 0; pkt_type < DOT11_MAX; pkt_type++) { + index = 0; + for (mcs = 0; mcs < MAX_MCS; mcs++) { + if (!dp_rate_string[pkt_type][mcs].valid) + continue; + + DP_PRINT_STATS(" %s = %d", + dp_rate_string[pkt_type][mcs].mcs_type, + peer->stats.rx.pkt_type[pkt_type]. + mcs_count[mcs]); + } + + DP_PRINT_STATS("\n"); + } + + index = 0; + for (i = 0; i < SS_COUNT; i++) { + index += qdf_snprint(&nss[index], DP_NSS_LENGTH - index, + " %d", peer->stats.rx.nss[i]); + } + DP_PRINT_STATS("NSS(1-8) = %s", + nss); + + DP_PRINT_STATS("Aggregation:"); + DP_PRINT_STATS(" Msdu's Part of Ampdu = %d", + peer->stats.rx.ampdu_cnt); + DP_PRINT_STATS(" Msdu's With No Mpdu Level Aggregation = %d", + peer->stats.rx.non_ampdu_cnt); + DP_PRINT_STATS(" Msdu's Part of Amsdu = %d", + peer->stats.rx.amsdu_cnt); + DP_PRINT_STATS(" Msdu's With No Msdu Level Aggregation = %d", + peer->stats.rx.non_amsdu_cnt); +} + +/** + * dp_print_host_stats()- Function to print the stats aggregated at host + * @vdev_handle: DP_VDEV handle + * @type: host stats type + * + * Available Stat types + * TXRX_CLEAR_STATS : Clear the stats + * TXRX_RX_RATE_STATS: Print Rx Rate Info + * TXRX_TX_RATE_STATS: Print Tx Rate Info + * TXRX_TX_HOST_STATS: Print Tx Stats + * TXRX_RX_HOST_STATS: Print Rx Stats + * TXRX_AST_STATS: Print AST Stats + * TXRX_SRNG_PTR_STATS: Print SRNG ring pointer stats + * + * Return: 0 on success, print error message in case of failure + */ +static int +dp_print_host_stats(struct cdp_vdev *vdev_handle, enum cdp_host_txrx_stats type) +{ + struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle; + struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev; + + dp_aggregate_pdev_stats(pdev); + + switch (type) { + case TXRX_CLEAR_STATS: + dp_txrx_host_stats_clr(vdev); + break; + case TXRX_RX_RATE_STATS: + dp_print_rx_rates(vdev); + break; + case TXRX_TX_RATE_STATS: + dp_print_tx_rates(vdev); + break; + case TXRX_TX_HOST_STATS: + dp_print_pdev_tx_stats(pdev); + dp_print_soc_tx_stats(pdev->soc); + break; + case TXRX_RX_HOST_STATS: + dp_print_pdev_rx_stats(pdev); + dp_print_soc_rx_stats(pdev->soc); + break; + case TXRX_AST_STATS: + dp_print_ast_stats(pdev->soc); + dp_print_peer_table(vdev); + break; + case TXRX_SRNG_PTR_STATS: + dp_print_ring_stats(pdev); + break; + case TXRX_RX_MON_STATS: + dp_print_pdev_rx_mon_stats(pdev); + break; + default: + DP_TRACE(FATAL, "Wrong Input For TxRx Host Stats"); + break; + } + return 0; +} + +/* + * dp_get_host_peer_stats()- function to print peer stats + * @pdev_handle: DP_PDEV handle + * @mac_addr: mac address of the peer + * + * Return: void + */ +static void +dp_get_host_peer_stats(struct cdp_pdev *pdev_handle, char *mac_addr) +{ + struct dp_peer *peer; + uint8_t local_id; + peer = (struct dp_peer *)dp_find_peer_by_addr(pdev_handle, mac_addr, + &local_id); + + if (!peer) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "%s: Invalid peer\n", __func__); + return; + } + + dp_print_peer_stats(peer); + dp_peer_rxtid_stats(peer, dp_rx_tid_stats_cb, NULL); + return; +} + +/* + * dp_ppdu_ring_reset()- Reset PPDU Stats ring + * @pdev: DP_PDEV handle + * + * Return: void + */ +static void +dp_ppdu_ring_reset(struct dp_pdev *pdev) +{ + struct htt_rx_ring_tlv_filter htt_tlv_filter; + int mac_id; + + qdf_mem_set(&(htt_tlv_filter), sizeof(htt_tlv_filter), 0x0); + + for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) { + int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, + pdev->pdev_id); + + htt_h2t_rx_ring_cfg(pdev->soc->htt_handle, mac_for_pdev, + pdev->rxdma_mon_status_ring[mac_id].hal_srng, + RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter); + } +} + +/* + * dp_ppdu_ring_cfg()- Configure PPDU Stats ring + * @pdev: DP_PDEV handle + * + * Return: void + */ +static void +dp_ppdu_ring_cfg(struct dp_pdev *pdev) +{ + struct htt_rx_ring_tlv_filter htt_tlv_filter = {0}; + int mac_id; + + htt_tlv_filter.mpdu_start = 1; + htt_tlv_filter.msdu_start = 0; + htt_tlv_filter.packet = 0; + htt_tlv_filter.msdu_end = 0; + htt_tlv_filter.mpdu_end = 0; + htt_tlv_filter.attention = 0; + htt_tlv_filter.ppdu_start = 1; + htt_tlv_filter.ppdu_end = 1; + htt_tlv_filter.ppdu_end_user_stats = 1; + htt_tlv_filter.ppdu_end_user_stats_ext = 1; + htt_tlv_filter.ppdu_end_status_done = 1; + htt_tlv_filter.enable_fp = 1; + htt_tlv_filter.enable_md = 0; + if (pdev->mcopy_mode) { + htt_tlv_filter.packet_header = 1; + htt_tlv_filter.enable_mo = 1; + } + htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL; + htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL; + htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL; + htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL; + htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL; + htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL; + + for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) { + int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, + pdev->pdev_id); + + htt_h2t_rx_ring_cfg(pdev->soc->htt_handle, mac_for_pdev, + pdev->rxdma_mon_status_ring[mac_id].hal_srng, + RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter); + } +} + +/* + * dp_config_debug_sniffer()- API to enable/disable debug sniffer + * @pdev_handle: DP_PDEV handle + * @val: user provided value + * + * Return: void + */ +static void +dp_config_debug_sniffer(struct cdp_pdev *pdev_handle, int val) +{ + struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle; + + switch (val) { + case 0: + pdev->tx_sniffer_enable = 0; + pdev->mcopy_mode = 0; + + if (!pdev->pktlog_ppdu_stats && !pdev->enhanced_stats_en) { + dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id); + dp_ppdu_ring_reset(pdev); + } else if (pdev->enhanced_stats_en) { + dp_h2t_cfg_stats_msg_send(pdev, + DP_PPDU_STATS_CFG_ENH_STATS, pdev->pdev_id); + } + break; + + case 1: + pdev->tx_sniffer_enable = 1; + pdev->mcopy_mode = 0; + + if (!pdev->pktlog_ppdu_stats) + dp_h2t_cfg_stats_msg_send(pdev, + DP_PPDU_STATS_CFG_SNIFFER, pdev->pdev_id); + break; + case 2: + pdev->mcopy_mode = 1; + pdev->tx_sniffer_enable = 0; + if (!pdev->enhanced_stats_en) + dp_ppdu_ring_cfg(pdev); + + if (!pdev->pktlog_ppdu_stats) + dp_h2t_cfg_stats_msg_send(pdev, + DP_PPDU_STATS_CFG_SNIFFER, pdev->pdev_id); + break; + default: + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "Invalid value\n"); + break; + } +} + +/* + * dp_enable_enhanced_stats()- API to enable enhanced statistcs + * @pdev_handle: DP_PDEV handle + * + * Return: void + */ +static void +dp_enable_enhanced_stats(struct cdp_pdev *pdev_handle) +{ + struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle; + pdev->enhanced_stats_en = 1; + + if (!pdev->mcopy_mode) + dp_ppdu_ring_cfg(pdev); + + if (!pdev->pktlog_ppdu_stats && !pdev->tx_sniffer_enable && !pdev->mcopy_mode) + dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_ENH_STATS, pdev->pdev_id); +} + +/* + * dp_disable_enhanced_stats()- API to disable enhanced statistcs + * @pdev_handle: DP_PDEV handle + * + * Return: void + */ +static void +dp_disable_enhanced_stats(struct cdp_pdev *pdev_handle) +{ + struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle; + + pdev->enhanced_stats_en = 0; + + if (!pdev->pktlog_ppdu_stats && !pdev->tx_sniffer_enable && !pdev->mcopy_mode) + dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id); + + if (!pdev->mcopy_mode) + dp_ppdu_ring_reset(pdev); +} + +/* + * dp_get_fw_peer_stats()- function to print peer stats + * @pdev_handle: DP_PDEV handle + * @mac_addr: mac address of the peer + * @cap: Type of htt stats requested + * + * Currently Supporting only MAC ID based requests Only + * 1: HTT_PEER_STATS_REQ_MODE_NO_QUERY + * 2: HTT_PEER_STATS_REQ_MODE_QUERY_TQM + * 3: HTT_PEER_STATS_REQ_MODE_FLUSH_TQM + * + * Return: void + */ +static void +dp_get_fw_peer_stats(struct cdp_pdev *pdev_handle, uint8_t *mac_addr, + uint32_t cap) +{ + struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle; + int i; + uint32_t config_param0 = 0; + uint32_t config_param1 = 0; + uint32_t config_param2 = 0; + uint32_t config_param3 = 0; + + HTT_DBG_EXT_STATS_PEER_INFO_IS_MAC_ADDR_SET(config_param0, 1); + config_param0 |= (1 << (cap + 1)); + + for (i = 0; i < HTT_PEER_STATS_MAX_TLV; i++) { + config_param1 |= (1 << i); + } + + config_param2 |= (mac_addr[0] & 0x000000ff); + config_param2 |= ((mac_addr[1] << 8) & 0x0000ff00); + config_param2 |= ((mac_addr[2] << 16) & 0x00ff0000); + config_param2 |= ((mac_addr[3] << 24) & 0xff000000); + + config_param3 |= (mac_addr[4] & 0x000000ff); + config_param3 |= ((mac_addr[5] << 8) & 0x0000ff00); + + dp_h2t_ext_stats_msg_send(pdev, HTT_DBG_EXT_STATS_PEER_INFO, + config_param0, config_param1, config_param2, + config_param3, 0, 0, 0); + +} + +/* This struct definition will be removed from here + * once it get added in FW headers*/ +struct httstats_cmd_req { + uint32_t config_param0; + uint32_t config_param1; + uint32_t config_param2; + uint32_t config_param3; + int cookie; + u_int8_t stats_id; +}; + +/* + * dp_get_htt_stats: function to process the httstas request + * @pdev_handle: DP pdev handle + * @data: pointer to request data + * @data_len: length for request data + * + * return: void + */ +static void +dp_get_htt_stats(struct cdp_pdev *pdev_handle, void *data, uint32_t data_len) +{ + struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle; + struct httstats_cmd_req *req = (struct httstats_cmd_req *)data; + + QDF_ASSERT(data_len == sizeof(struct httstats_cmd_req)); + dp_h2t_ext_stats_msg_send(pdev, req->stats_id, + req->config_param0, req->config_param1, + req->config_param2, req->config_param3, + req->cookie, 0, 0); +} +/* + * dp_set_pdev_param: function to set parameters in pdev + * @pdev_handle: DP pdev handle + * @param: parameter type to be set + * @val: value of parameter to be set + * + * return: void + */ +static void dp_set_pdev_param(struct cdp_pdev *pdev_handle, + enum cdp_pdev_param_type param, uint8_t val) +{ + switch (param) { + case CDP_CONFIG_DEBUG_SNIFFER: + dp_config_debug_sniffer(pdev_handle, val); + break; + default: + break; + } +} + +/* + * dp_set_vdev_param: function to set parameters in vdev + * @param: parameter type to be set + * @val: value of parameter to be set + * + * return: void + */ +static void dp_set_vdev_param(struct cdp_vdev *vdev_handle, + enum cdp_vdev_param_type param, uint32_t val) +{ + struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle; + switch (param) { + case CDP_ENABLE_WDS: + vdev->wds_enabled = val; + break; + case CDP_ENABLE_NAWDS: + vdev->nawds_enabled = val; + break; + case CDP_ENABLE_MCAST_EN: + vdev->mcast_enhancement_en = val; + break; + case CDP_ENABLE_PROXYSTA: + vdev->proxysta_vdev = val; + break; + case CDP_UPDATE_TDLS_FLAGS: + vdev->tdls_link_connected = val; + break; + case CDP_CFG_WDS_AGING_TIMER: + if (val == 0) + qdf_timer_stop(&vdev->pdev->soc->wds_aging_timer); + else if (val != vdev->wds_aging_timer_val) + qdf_timer_mod(&vdev->pdev->soc->wds_aging_timer, val); + + vdev->wds_aging_timer_val = val; + break; + case CDP_ENABLE_AP_BRIDGE: + if (wlan_op_mode_sta != vdev->opmode) + vdev->ap_bridge_enabled = val; + else + vdev->ap_bridge_enabled = false; + break; + case CDP_ENABLE_CIPHER: + vdev->sec_type = val; + break; + case CDP_ENABLE_QWRAP_ISOLATION: + vdev->isolation_vdev = val; + break; + default: + break; + } + + dp_tx_vdev_update_search_flags(vdev); +} + +/** + * dp_peer_set_nawds: set nawds bit in peer + * @peer_handle: pointer to peer + * @value: enable/disable nawds + * + * return: void + */ +static void dp_peer_set_nawds(struct cdp_peer *peer_handle, uint8_t value) +{ + struct dp_peer *peer = (struct dp_peer *)peer_handle; + peer->nawds_enabled = value; +} + +/* + * dp_set_vdev_dscp_tid_map_wifi3(): Update Map ID selected for particular vdev + * @vdev_handle: DP_VDEV handle + * @map_id:ID of map that needs to be updated + * + * Return: void + */ +static void dp_set_vdev_dscp_tid_map_wifi3(struct cdp_vdev *vdev_handle, + uint8_t map_id) +{ + struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle; + vdev->dscp_tid_map_id = map_id; + return; +} + +/* + * dp_txrx_stats_publish(): publish pdev stats into a buffer + * @pdev_handle: DP_PDEV handle + * @buf: to hold pdev_stats + * + * Return: int + */ +static int +dp_txrx_stats_publish(struct cdp_pdev *pdev_handle, void *buf) +{ + struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle; + struct cdp_pdev_stats *buffer = (struct cdp_pdev_stats *) buf; + struct cdp_txrx_stats_req req = {0,}; + + dp_aggregate_pdev_stats(pdev); + req.stats = HTT_DBG_EXT_STATS_PDEV_TX; + req.cookie_val = 1; + dp_h2t_ext_stats_msg_send(pdev, req.stats, req.param0, + req.param1, req.param2, req.param3, 0, + req.cookie_val, 0); + + msleep(DP_MAX_SLEEP_TIME); + + req.stats = HTT_DBG_EXT_STATS_PDEV_RX; + req.cookie_val = 1; + dp_h2t_ext_stats_msg_send(pdev, req.stats, req.param0, + req.param1, req.param2, req.param3, 0, + req.cookie_val, 0); + + msleep(DP_MAX_SLEEP_TIME); + qdf_mem_copy(buffer, &pdev->stats, sizeof(pdev->stats)); + + return TXRX_STATS_LEVEL; +} + +/** + * dp_set_pdev_dscp_tid_map_wifi3(): update dscp tid map in pdev + * @pdev: DP_PDEV handle + * @map_id: ID of map that needs to be updated + * @tos: index value in map + * @tid: tid value passed by the user + * + * Return: void + */ +static void dp_set_pdev_dscp_tid_map_wifi3(struct cdp_pdev *pdev_handle, + uint8_t map_id, uint8_t tos, uint8_t tid) +{ + uint8_t dscp; + struct dp_pdev *pdev = (struct dp_pdev *) pdev_handle; + dscp = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK; + pdev->dscp_tid_map[map_id][dscp] = tid; + if (map_id < HAL_MAX_HW_DSCP_TID_MAPS) + hal_tx_update_dscp_tid(pdev->soc->hal_soc, tid, + map_id, dscp); + return; +} + +/** + * dp_fw_stats_process(): Process TxRX FW stats request + * @vdev_handle: DP VDEV handle + * @req: stats request + * + * return: int + */ +static int dp_fw_stats_process(struct cdp_vdev *vdev_handle, + struct cdp_txrx_stats_req *req) +{ + struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle; + struct dp_pdev *pdev = NULL; + uint32_t stats = req->stats; + uint8_t mac_id = req->mac_id; + + if (!vdev) { + DP_TRACE(NONE, "VDEV not found"); + return 1; + } + pdev = vdev->pdev; + + /* + * For HTT_DBG_EXT_STATS_RESET command, FW need to config + * from param0 to param3 according to below rule: + * + * PARAM: + * - config_param0 : start_offset (stats type) + * - config_param1 : stats bmask from start offset + * - config_param2 : stats bmask from start offset + 32 + * - config_param3 : stats bmask from start offset + 64 + */ + if (req->stats == CDP_TXRX_STATS_0) { + req->param0 = HTT_DBG_EXT_STATS_PDEV_TX; + req->param1 = 0xFFFFFFFF; + req->param2 = 0xFFFFFFFF; + req->param3 = 0xFFFFFFFF; + } + + return dp_h2t_ext_stats_msg_send(pdev, stats, req->param0, + req->param1, req->param2, req->param3, + 0, 0, mac_id); +} + +/** + * dp_txrx_stats_request - function to map to firmware and host stats + * @vdev: virtual handle + * @req: stats request + * + * Return: integer + */ +static int dp_txrx_stats_request(struct cdp_vdev *vdev, + struct cdp_txrx_stats_req *req) +{ + int host_stats; + int fw_stats; + enum cdp_stats stats; + + if (!vdev || !req) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "Invalid vdev/req instance"); + return 0; + } + + stats = req->stats; + if (stats >= CDP_TXRX_MAX_STATS) + return 0; + + /* + * DP_CURR_FW_STATS_AVAIL: no of FW stats currently available + * has to be updated if new FW HTT stats added + */ + if (stats > CDP_TXRX_STATS_HTT_MAX) + stats = stats + DP_CURR_FW_STATS_AVAIL - DP_HTT_DBG_EXT_STATS_MAX; + fw_stats = dp_stats_mapping_table[stats][STATS_FW]; + host_stats = dp_stats_mapping_table[stats][STATS_HOST]; + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, + "stats: %u fw_stats_type: %d host_stats_type: %d", + stats, fw_stats, host_stats); + + if (fw_stats != TXRX_FW_STATS_INVALID) { + /* update request with FW stats type */ + req->stats = fw_stats; + return dp_fw_stats_process(vdev, req); + } + + if ((host_stats != TXRX_HOST_STATS_INVALID) && + (host_stats <= TXRX_HOST_STATS_MAX)) + return dp_print_host_stats(vdev, host_stats); + else + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, + "Wrong Input for TxRx Stats"); + + return 0; +} + +/* + * dp_print_napi_stats(): NAPI stats + * @soc - soc handle + */ +static void dp_print_napi_stats(struct dp_soc *soc) +{ + hif_print_napi_stats(soc->hif_handle); +} + +/* + * dp_print_per_ring_stats(): Packet count per ring + * @soc - soc handle + */ +static void dp_print_per_ring_stats(struct dp_soc *soc) +{ + uint8_t ring; + uint16_t core; + uint64_t total_packets; + + DP_TRACE(FATAL, "Reo packets per ring:"); + for (ring = 0; ring < MAX_REO_DEST_RINGS; ring++) { + total_packets = 0; + DP_TRACE(FATAL, "Packets on ring %u:", ring); + for (core = 0; core < NR_CPUS; core++) { + DP_TRACE(FATAL, "Packets arriving on core %u: %llu", + core, soc->stats.rx.ring_packets[core][ring]); + total_packets += soc->stats.rx.ring_packets[core][ring]; + } + DP_TRACE(FATAL, "Total packets on ring %u: %llu", + ring, total_packets); + } +} + +/* + * dp_txrx_path_stats() - Function to display dump stats + * @soc - soc handle + * + * return: none + */ +static void dp_txrx_path_stats(struct dp_soc *soc) +{ + uint8_t error_code; + uint8_t loop_pdev; + struct dp_pdev *pdev; + uint8_t i; + + for (loop_pdev = 0; loop_pdev < soc->pdev_count; loop_pdev++) { + + pdev = soc->pdev_list[loop_pdev]; + dp_aggregate_pdev_stats(pdev); + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "Tx path Statistics:"); + + DP_TRACE(FATAL, "from stack: %u msdus (%llu bytes)", + pdev->stats.tx_i.rcvd.num, + pdev->stats.tx_i.rcvd.bytes); + DP_TRACE(FATAL, "processed from host: %u msdus (%llu bytes)", + pdev->stats.tx_i.processed.num, + pdev->stats.tx_i.processed.bytes); + DP_TRACE(FATAL, "successfully transmitted: %u msdus (%llu bytes)", + pdev->stats.tx.tx_success.num, + pdev->stats.tx.tx_success.bytes); + + DP_TRACE(FATAL, "Dropped in host:"); + DP_TRACE(FATAL, "Total packets dropped: %u,", + pdev->stats.tx_i.dropped.dropped_pkt.num); + DP_TRACE(FATAL, "Descriptor not available: %u", + pdev->stats.tx_i.dropped.desc_na); + DP_TRACE(FATAL, "Ring full: %u", + pdev->stats.tx_i.dropped.ring_full); + DP_TRACE(FATAL, "Enqueue fail: %u", + pdev->stats.tx_i.dropped.enqueue_fail); + DP_TRACE(FATAL, "DMA Error: %u", + pdev->stats.tx_i.dropped.dma_error); + + DP_TRACE(FATAL, "Dropped in hardware:"); + DP_TRACE(FATAL, "total packets dropped: %u", + pdev->stats.tx.tx_failed); + DP_TRACE(FATAL, "mpdu age out: %u", + pdev->stats.tx.dropped.age_out); + DP_TRACE(FATAL, "firmware removed: %u", + pdev->stats.tx.dropped.fw_rem); + DP_TRACE(FATAL, "firmware removed tx: %u", + pdev->stats.tx.dropped.fw_rem_tx); + DP_TRACE(FATAL, "firmware removed notx %u", + pdev->stats.tx.dropped.fw_rem_notx); + DP_TRACE(FATAL, "peer_invalid: %u", + pdev->soc->stats.tx.tx_invalid_peer.num); + + + DP_TRACE(FATAL, "Tx packets sent per interrupt:"); + DP_TRACE(FATAL, "Single Packet: %u", + pdev->stats.tx_comp_histogram.pkts_1); + DP_TRACE(FATAL, "2-20 Packets: %u", + pdev->stats.tx_comp_histogram.pkts_2_20); + DP_TRACE(FATAL, "21-40 Packets: %u", + pdev->stats.tx_comp_histogram.pkts_21_40); + DP_TRACE(FATAL, "41-60 Packets: %u", + pdev->stats.tx_comp_histogram.pkts_41_60); + DP_TRACE(FATAL, "61-80 Packets: %u", + pdev->stats.tx_comp_histogram.pkts_61_80); + DP_TRACE(FATAL, "81-100 Packets: %u", + pdev->stats.tx_comp_histogram.pkts_81_100); + DP_TRACE(FATAL, "101-200 Packets: %u", + pdev->stats.tx_comp_histogram.pkts_101_200); + DP_TRACE(FATAL, " 201+ Packets: %u", + pdev->stats.tx_comp_histogram.pkts_201_plus); + + DP_TRACE(FATAL, "Rx path statistics"); + + DP_TRACE(FATAL, "delivered %u msdus ( %llu bytes),", + pdev->stats.rx.to_stack.num, + pdev->stats.rx.to_stack.bytes); + for (i = 0; i < CDP_MAX_RX_RINGS; i++) + DP_TRACE(FATAL, "received on reo[%d] %u msdus ( %llu bytes),", + i, pdev->stats.rx.rcvd_reo[i].num, + pdev->stats.rx.rcvd_reo[i].bytes); + DP_TRACE(FATAL, "intra-bss packets %u msdus ( %llu bytes),", + pdev->stats.rx.intra_bss.pkts.num, + pdev->stats.rx.intra_bss.pkts.bytes); + DP_TRACE(FATAL, "intra-bss fails %u msdus ( %llu bytes),", + pdev->stats.rx.intra_bss.fail.num, + pdev->stats.rx.intra_bss.fail.bytes); + DP_TRACE(FATAL, "raw packets %u msdus ( %llu bytes),", + pdev->stats.rx.raw.num, + pdev->stats.rx.raw.bytes); + DP_TRACE(FATAL, "dropped: error %u msdus", + pdev->stats.rx.err.mic_err); + DP_TRACE(FATAL, "peer invalid %u", + pdev->soc->stats.rx.err.rx_invalid_peer.num); + + DP_TRACE(FATAL, "Reo Statistics"); + DP_TRACE(FATAL, "rbm error: %u msdus", + pdev->soc->stats.rx.err.invalid_rbm); + DP_TRACE(FATAL, "hal ring access fail: %u msdus", + pdev->soc->stats.rx.err.hal_ring_access_fail); + + for (error_code = 0; error_code < HAL_REO_ERR_MAX; + error_code++) { + if (!pdev->soc->stats.rx.err.reo_error[error_code]) + continue; + DP_TRACE(FATAL, "Reo error number (%u): %u msdus", + error_code, + pdev->soc->stats.rx.err.reo_error[error_code]); + } + + for (error_code = 0; error_code < HAL_RXDMA_ERR_MAX; + error_code++) { + if (!pdev->soc->stats.rx.err.rxdma_error[error_code]) + continue; + DP_TRACE(FATAL, "Rxdma error number (%u): %u msdus", + error_code, + pdev->soc->stats.rx.err + .rxdma_error[error_code]); + } + + DP_TRACE(FATAL, "Rx packets reaped per interrupt:"); + DP_TRACE(FATAL, "Single Packet: %u", + pdev->stats.rx_ind_histogram.pkts_1); + DP_TRACE(FATAL, "2-20 Packets: %u", + pdev->stats.rx_ind_histogram.pkts_2_20); + DP_TRACE(FATAL, "21-40 Packets: %u", + pdev->stats.rx_ind_histogram.pkts_21_40); + DP_TRACE(FATAL, "41-60 Packets: %u", + pdev->stats.rx_ind_histogram.pkts_41_60); + DP_TRACE(FATAL, "61-80 Packets: %u", + pdev->stats.rx_ind_histogram.pkts_61_80); + DP_TRACE(FATAL, "81-100 Packets: %u", + pdev->stats.rx_ind_histogram.pkts_81_100); + DP_TRACE(FATAL, "101-200 Packets: %u", + pdev->stats.rx_ind_histogram.pkts_101_200); + DP_TRACE(FATAL, " 201+ Packets: %u", + pdev->stats.rx_ind_histogram.pkts_201_plus); + + DP_TRACE_STATS(ERROR, "%s: tso_enable: %u lro_enable: %u rx_hash: %u napi_enable: %u", + __func__, + pdev->soc->wlan_cfg_ctx->tso_enabled, + pdev->soc->wlan_cfg_ctx->lro_enabled, + pdev->soc->wlan_cfg_ctx->rx_hash, + pdev->soc->wlan_cfg_ctx->napi_enabled); +#ifdef QCA_LL_TX_FLOW_CONTROL_V2 + DP_TRACE_STATS(ERROR, "%s: Tx flow stop queue: %u tx flow start queue offset: %u", + __func__, + pdev->soc->wlan_cfg_ctx->tx_flow_stop_queue_threshold, + pdev->soc->wlan_cfg_ctx->tx_flow_start_queue_offset); +#endif + } +} + +/* + * dp_txrx_dump_stats() - Dump statistics + * @value - Statistics option + */ +static QDF_STATUS dp_txrx_dump_stats(void *psoc, uint16_t value, + enum qdf_stats_verbosity_level level) +{ + struct dp_soc *soc = + (struct dp_soc *)psoc; + QDF_STATUS status = QDF_STATUS_SUCCESS; + + if (!soc) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "%s: soc is NULL", __func__); + return QDF_STATUS_E_INVAL; + } + + switch (value) { + case CDP_TXRX_PATH_STATS: + dp_txrx_path_stats(soc); + break; + + case CDP_RX_RING_STATS: + dp_print_per_ring_stats(soc); + break; + + case CDP_TXRX_TSO_STATS: + /* TODO: NOT IMPLEMENTED */ + break; + + case CDP_DUMP_TX_FLOW_POOL_INFO: + cdp_dump_flow_pool_info((struct cdp_soc_t *)soc); + break; + + case CDP_DP_NAPI_STATS: + dp_print_napi_stats(soc); + break; + + case CDP_TXRX_DESC_STATS: + /* TODO: NOT IMPLEMENTED */ + break; + + default: + status = QDF_STATUS_E_INVAL; + break; + } + + return status; + +} + +#ifdef QCA_LL_TX_FLOW_CONTROL_V2 +/** + * dp_update_flow_control_parameters() - API to store datapath + * config parameters + * @soc: soc handle + * @cfg: ini parameter handle + * + * Return: void + */ +static inline +void dp_update_flow_control_parameters(struct dp_soc *soc, + struct cdp_config_params *params) +{ + soc->wlan_cfg_ctx->tx_flow_stop_queue_threshold = + params->tx_flow_stop_queue_threshold; + soc->wlan_cfg_ctx->tx_flow_start_queue_offset = + params->tx_flow_start_queue_offset; +} +#else +static inline +void dp_update_flow_control_parameters(struct dp_soc *soc, + struct cdp_config_params *params) +{ +} +#endif + +/** + * dp_update_config_parameters() - API to store datapath + * config parameters + * @soc: soc handle + * @cfg: ini parameter handle + * + * Return: status + */ +static +QDF_STATUS dp_update_config_parameters(struct cdp_soc *psoc, + struct cdp_config_params *params) +{ + struct dp_soc *soc = (struct dp_soc *)psoc; + + if (!(soc)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "%s: Invalid handle", __func__); + return QDF_STATUS_E_INVAL; + } + + soc->wlan_cfg_ctx->tso_enabled = params->tso_enable; + soc->wlan_cfg_ctx->lro_enabled = params->lro_enable; + soc->wlan_cfg_ctx->rx_hash = params->flow_steering_enable; + soc->wlan_cfg_ctx->tcp_udp_checksumoffload = + params->tcp_udp_checksumoffload; + soc->wlan_cfg_ctx->napi_enabled = params->napi_enable; + + dp_update_flow_control_parameters(soc, params); + + return QDF_STATUS_SUCCESS; +} + +/** + * dp_txrx_set_wds_rx_policy() - API to store datapath + * config parameters + * @vdev_handle - datapath vdev handle + * @cfg: ini parameter handle + * + * Return: status + */ +#ifdef WDS_VENDOR_EXTENSION +void +dp_txrx_set_wds_rx_policy( + struct cdp_vdev *vdev_handle, + u_int32_t val) +{ + struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle; + struct dp_peer *peer; + if (vdev->opmode == wlan_op_mode_ap) { + /* for ap, set it on bss_peer */ + TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) { + if (peer->bss_peer) { + peer->wds_ecm.wds_rx_filter = 1; + peer->wds_ecm.wds_rx_ucast_4addr = (val & WDS_POLICY_RX_UCAST_4ADDR) ? 1:0; + peer->wds_ecm.wds_rx_mcast_4addr = (val & WDS_POLICY_RX_MCAST_4ADDR) ? 1:0; + break; + } + } + } else if (vdev->opmode == wlan_op_mode_sta) { + peer = TAILQ_FIRST(&vdev->peer_list); + peer->wds_ecm.wds_rx_filter = 1; + peer->wds_ecm.wds_rx_ucast_4addr = (val & WDS_POLICY_RX_UCAST_4ADDR) ? 1:0; + peer->wds_ecm.wds_rx_mcast_4addr = (val & WDS_POLICY_RX_MCAST_4ADDR) ? 1:0; + } +} + +/** + * dp_txrx_peer_wds_tx_policy_update() - API to set tx wds policy + * + * @peer_handle - datapath peer handle + * @wds_tx_ucast: policy for unicast transmission + * @wds_tx_mcast: policy for multicast transmission + * + * Return: void + */ +void +dp_txrx_peer_wds_tx_policy_update(struct cdp_peer *peer_handle, + int wds_tx_ucast, int wds_tx_mcast) +{ + struct dp_peer *peer = (struct dp_peer *)peer_handle; + if (wds_tx_ucast || wds_tx_mcast) { + peer->wds_enabled = 1; + peer->wds_ecm.wds_tx_ucast_4addr = wds_tx_ucast; + peer->wds_ecm.wds_tx_mcast_4addr = wds_tx_mcast; + } else { + peer->wds_enabled = 0; + peer->wds_ecm.wds_tx_ucast_4addr = 0; + peer->wds_ecm.wds_tx_mcast_4addr = 0; + } + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, + FL("Policy Update set to :\ + peer->wds_enabled %d\ + peer->wds_ecm.wds_tx_ucast_4addr %d\ + peer->wds_ecm.wds_tx_mcast_4addr %d\n"), + peer->wds_enabled, peer->wds_ecm.wds_tx_ucast_4addr, + peer->wds_ecm.wds_tx_mcast_4addr); + return; +} +#endif + +static struct cdp_wds_ops dp_ops_wds = { + .vdev_set_wds = dp_vdev_set_wds, +#ifdef WDS_VENDOR_EXTENSION + .txrx_set_wds_rx_policy = dp_txrx_set_wds_rx_policy, + .txrx_wds_peer_tx_policy_update = dp_txrx_peer_wds_tx_policy_update, +#endif +}; + +/* + * dp_peer_delete_ast_entries(): Delete all AST entries for a peer + * @soc - datapath soc handle + * @peer - datapath peer handle + * + * Delete the AST entries belonging to a peer + */ +#ifdef FEATURE_AST +static inline void dp_peer_delete_ast_entries(struct dp_soc *soc, + struct dp_peer *peer) +{ + struct dp_ast_entry *ast_entry, *temp_ast_entry; + + qdf_spin_lock_bh(&soc->ast_lock); + DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, temp_ast_entry) + dp_peer_del_ast(soc, ast_entry); + + qdf_spin_unlock_bh(&soc->ast_lock); +} +#else +static inline void dp_peer_delete_ast_entries(struct dp_soc *soc, + struct dp_peer *peer) +{ +} +#endif + +/* + * dp_txrx_data_tx_cb_set(): set the callback for non standard tx + * @vdev_handle - datapath vdev handle + * @callback - callback function + * @ctxt: callback context + * + */ +static void +dp_txrx_data_tx_cb_set(struct cdp_vdev *vdev_handle, + ol_txrx_data_tx_cb callback, void *ctxt) +{ + struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle; + + vdev->tx_non_std_data_callback.func = callback; + vdev->tx_non_std_data_callback.ctxt = ctxt; +} + +/** + * dp_pdev_get_dp_txrx_handle() - get dp handle from pdev + * @pdev_hdl: datapath pdev handle + * + * Return: opaque pointer to dp txrx handle + */ +static void *dp_pdev_get_dp_txrx_handle(struct cdp_pdev *pdev_hdl) +{ + struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl; + + return pdev->dp_txrx_handle; +} + +/** + * dp_pdev_set_dp_txrx_handle() - set dp handle in pdev + * @pdev_hdl: datapath pdev handle + * @dp_txrx_hdl: opaque pointer for dp_txrx_handle + * + * Return: void + */ +static void +dp_pdev_set_dp_txrx_handle(struct cdp_pdev *pdev_hdl, void *dp_txrx_hdl) +{ + struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl; + + pdev->dp_txrx_handle = dp_txrx_hdl; +} + +/** + * dp_soc_get_dp_txrx_handle() - get context for external-dp from dp soc + * @soc_handle: datapath soc handle + * + * Return: opaque pointer to external dp (non-core DP) + */ +static void *dp_soc_get_dp_txrx_handle(struct cdp_soc *soc_handle) +{ + struct dp_soc *soc = (struct dp_soc *)soc_handle; + + return soc->external_txrx_handle; +} + +/** + * dp_soc_set_dp_txrx_handle() - set external dp handle in soc + * @soc_handle: datapath soc handle + * @txrx_handle: opaque pointer to external dp (non-core DP) + * + * Return: void + */ +static void +dp_soc_set_dp_txrx_handle(struct cdp_soc *soc_handle, void *txrx_handle) +{ + struct dp_soc *soc = (struct dp_soc *)soc_handle; + + soc->external_txrx_handle = txrx_handle; +} + +#ifdef FEATURE_AST +static void dp_peer_teardown_wifi3(struct cdp_vdev *vdev_hdl, void *peer_hdl) +{ + struct dp_vdev *vdev = (struct dp_vdev *) vdev_hdl; + struct dp_peer *peer = (struct dp_peer *) peer_hdl; + struct dp_soc *soc = (struct dp_soc *) vdev->pdev->soc; + + /* + * For BSS peer, new peer is not created on alloc_node if the + * peer with same address already exists , instead refcnt is + * increased for existing peer. Correspondingly in delete path, + * only refcnt is decreased; and peer is only deleted , when all + * references are deleted. So delete_in_progress should not be set + * for bss_peer, unless only 2 reference remains (peer map reference + * and peer hash table reference). + */ + if (peer->bss_peer && (qdf_atomic_read(&peer->ref_cnt) > 2)) { + return; + } + + peer->delete_in_progress = true; + dp_peer_delete_ast_entries(soc, peer); +} +#endif + +#ifdef ATH_SUPPORT_NAC_RSSI +static QDF_STATUS dp_config_for_nac_rssi(struct cdp_vdev *vdev_handle, + enum cdp_nac_param_cmd cmd, char *bssid, char *client_macaddr, + uint8_t chan_num) +{ + + struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle; + struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev; + struct dp_soc *soc = (struct dp_soc *) vdev->pdev->soc; + + pdev->nac_rssi_filtering = 1; + /* Store address of NAC (neighbour peer) which will be checked + * against TA of received packets. + */ + + if (cmd == CDP_NAC_PARAM_ADD) { + qdf_mem_copy(vdev->cdp_nac_rssi.client_mac, + client_macaddr, DP_MAC_ADDR_LEN); + vdev->cdp_nac_rssi_enabled = 1; + } else if (cmd == CDP_NAC_PARAM_DEL) { + if (!qdf_mem_cmp(vdev->cdp_nac_rssi.client_mac, + client_macaddr, DP_MAC_ADDR_LEN)) { + /* delete this peer from the list */ + qdf_mem_zero(vdev->cdp_nac_rssi.client_mac, + DP_MAC_ADDR_LEN); + } + vdev->cdp_nac_rssi_enabled = 0; + } + + if (soc->cdp_soc.ol_ops->config_bssid_in_fw_for_nac_rssi) + soc->cdp_soc.ol_ops->config_bssid_in_fw_for_nac_rssi + (vdev->pdev->osif_pdev, vdev->vdev_id, cmd, bssid); + + return QDF_STATUS_SUCCESS; +} +#endif + +static QDF_STATUS dp_peer_map_attach_wifi3(struct cdp_soc_t *soc_hdl, + uint32_t max_peers) +{ + struct dp_soc *soc = (struct dp_soc *)soc_hdl; + + soc->max_peers = max_peers; + + qdf_print ("%s max_peers %u\n", __func__, max_peers); + + if (dp_peer_find_attach(soc)) + return QDF_STATUS_E_FAILURE; + + return QDF_STATUS_SUCCESS; +} + +static struct cdp_cmn_ops dp_ops_cmn = { + .txrx_soc_attach_target = dp_soc_attach_target_wifi3, + .txrx_vdev_attach = dp_vdev_attach_wifi3, + .txrx_vdev_detach = dp_vdev_detach_wifi3, + .txrx_pdev_attach = dp_pdev_attach_wifi3, + .txrx_pdev_detach = dp_pdev_detach_wifi3, + .txrx_peer_create = dp_peer_create_wifi3, + .txrx_peer_setup = dp_peer_setup_wifi3, +#ifdef FEATURE_AST + .txrx_peer_teardown = dp_peer_teardown_wifi3, +#else + .txrx_peer_teardown = NULL, +#endif + .txrx_peer_add_ast = dp_peer_add_ast_wifi3, + .txrx_peer_del_ast = dp_peer_del_ast_wifi3, + .txrx_peer_update_ast = dp_peer_update_ast_wifi3, + .txrx_peer_ast_hash_find = dp_peer_ast_hash_find_wifi3, + .txrx_peer_ast_get_pdev_id = dp_peer_ast_get_pdev_id_wifi3, + .txrx_peer_ast_get_next_hop = dp_peer_ast_get_next_hop_wifi3, + .txrx_peer_ast_set_type = dp_peer_ast_set_type_wifi3, + .txrx_peer_delete = dp_peer_delete_wifi3, + .txrx_vdev_register = dp_vdev_register_wifi3, + .txrx_soc_detach = dp_soc_detach_wifi3, + .txrx_get_vdev_mac_addr = dp_get_vdev_mac_addr_wifi3, + .txrx_get_vdev_from_vdev_id = dp_get_vdev_from_vdev_id_wifi3, + .txrx_get_ctrl_pdev_from_vdev = dp_get_ctrl_pdev_from_vdev_wifi3, + .txrx_ath_getstats = dp_get_device_stats, + .addba_requestprocess = dp_addba_requestprocess_wifi3, + .addba_responsesetup = dp_addba_responsesetup_wifi3, + .delba_process = dp_delba_process_wifi3, + .set_addba_response = dp_set_addba_response, + .get_peer_mac_addr_frm_id = dp_get_peer_mac_addr_frm_id, + .flush_cache_rx_queue = NULL, + /* TODO: get API's for dscp-tid need to be added*/ + .set_vdev_dscp_tid_map = dp_set_vdev_dscp_tid_map_wifi3, + .set_pdev_dscp_tid_map = dp_set_pdev_dscp_tid_map_wifi3, + .txrx_stats_request = dp_txrx_stats_request, + .txrx_set_monitor_mode = dp_vdev_set_monitor_mode, + .txrx_get_pdev_id_frm_pdev = dp_get_pdev_id_frm_pdev, + .txrx_set_nac = dp_set_nac, + .txrx_get_tx_pending = dp_get_tx_pending, + .txrx_set_pdev_tx_capture = dp_config_debug_sniffer, + .txrx_get_peer_mac_from_peer_id = dp_get_peer_mac_from_peer_id, + .display_stats = dp_txrx_dump_stats, + .txrx_soc_set_nss_cfg = dp_soc_set_nss_cfg_wifi3, + .txrx_soc_get_nss_cfg = dp_soc_get_nss_cfg_wifi3, +#ifdef DP_INTR_POLL_BASED + .txrx_intr_attach = dp_soc_interrupt_attach_wrapper, +#else + .txrx_intr_attach = dp_soc_interrupt_attach, +#endif + .txrx_intr_detach = dp_soc_interrupt_detach, + .set_pn_check = dp_set_pn_check_wifi3, + .update_config_parameters = dp_update_config_parameters, + /* TODO: Add other functions */ + .txrx_data_tx_cb_set = dp_txrx_data_tx_cb_set, + .get_dp_txrx_handle = dp_pdev_get_dp_txrx_handle, + .set_dp_txrx_handle = dp_pdev_set_dp_txrx_handle, + .get_soc_dp_txrx_handle = dp_soc_get_dp_txrx_handle, + .set_soc_dp_txrx_handle = dp_soc_set_dp_txrx_handle, + .tx_send = dp_tx_send, + .txrx_peer_reset_ast = dp_wds_reset_ast_wifi3, + .txrx_peer_reset_ast_table = dp_wds_reset_ast_table_wifi3, + .txrx_peer_flush_ast_table = dp_wds_flush_ast_table_wifi3, + .txrx_peer_map_attach = dp_peer_map_attach_wifi3, +}; + +static struct cdp_ctrl_ops dp_ops_ctrl = { + .txrx_peer_authorize = dp_peer_authorize, +#ifdef QCA_SUPPORT_SON + .txrx_set_inact_params = dp_set_inact_params, + .txrx_start_inact_timer = dp_start_inact_timer, + .txrx_set_overload = dp_set_overload, + .txrx_peer_is_inact = dp_peer_is_inact, + .txrx_mark_peer_inact = dp_mark_peer_inact, +#endif + .txrx_set_vdev_rx_decap_type = dp_set_vdev_rx_decap_type, + .txrx_set_tx_encap_type = dp_set_vdev_tx_encap_type, +#ifdef MESH_MODE_SUPPORT + .txrx_set_mesh_mode = dp_peer_set_mesh_mode, + .txrx_set_mesh_rx_filter = dp_peer_set_mesh_rx_filter, +#endif + .txrx_set_vdev_param = dp_set_vdev_param, + .txrx_peer_set_nawds = dp_peer_set_nawds, + .txrx_set_pdev_reo_dest = dp_set_pdev_reo_dest, + .txrx_get_pdev_reo_dest = dp_get_pdev_reo_dest, + .txrx_set_filter_neighbour_peers = dp_set_filter_neighbour_peers, + .txrx_update_filter_neighbour_peers = + dp_update_filter_neighbour_peers, + .txrx_get_sec_type = dp_get_sec_type, + /* TODO: Add other functions */ + .txrx_wdi_event_sub = dp_wdi_event_sub, + .txrx_wdi_event_unsub = dp_wdi_event_unsub, +#ifdef WDI_EVENT_ENABLE + .txrx_get_pldev = dp_get_pldev, +#endif + .txrx_set_pdev_param = dp_set_pdev_param, +#ifdef ATH_SUPPORT_NAC_RSSI + .txrx_vdev_config_for_nac_rssi = dp_config_for_nac_rssi, +#endif +}; + +static struct cdp_me_ops dp_ops_me = { +#ifdef ATH_SUPPORT_IQUE + .tx_me_alloc_descriptor = dp_tx_me_alloc_descriptor, + .tx_me_free_descriptor = dp_tx_me_free_descriptor, + .tx_me_convert_ucast = dp_tx_me_send_convert_ucast, +#endif +}; + +static struct cdp_mon_ops dp_ops_mon = { + .txrx_monitor_set_filter_ucast_data = NULL, + .txrx_monitor_set_filter_mcast_data = NULL, + .txrx_monitor_set_filter_non_data = NULL, + .txrx_monitor_get_filter_ucast_data = dp_vdev_get_filter_ucast_data, + .txrx_monitor_get_filter_mcast_data = dp_vdev_get_filter_mcast_data, + .txrx_monitor_get_filter_non_data = dp_vdev_get_filter_non_data, + .txrx_reset_monitor_mode = dp_reset_monitor_mode, + /* Added support for HK advance filter */ + .txrx_set_advance_monitor_filter = dp_pdev_set_advance_monitor_filter, +}; + +static struct cdp_host_stats_ops dp_ops_host_stats = { + .txrx_per_peer_stats = dp_get_host_peer_stats, + .get_fw_peer_stats = dp_get_fw_peer_stats, + .get_htt_stats = dp_get_htt_stats, + .txrx_enable_enhanced_stats = dp_enable_enhanced_stats, + .txrx_disable_enhanced_stats = dp_disable_enhanced_stats, + .txrx_stats_publish = dp_txrx_stats_publish, + /* TODO */ +}; + +static struct cdp_raw_ops dp_ops_raw = { + /* TODO */ +}; + +#ifdef CONFIG_WIN +static struct cdp_pflow_ops dp_ops_pflow = { + /* TODO */ +}; +#endif /* CONFIG_WIN */ + +#ifdef FEATURE_RUNTIME_PM +/** + * dp_runtime_suspend() - ensure DP is ready to runtime suspend + * @opaque_pdev: DP pdev context + * + * DP is ready to runtime suspend if there are no pending TX packets. + * + * Return: QDF_STATUS + */ +static QDF_STATUS dp_runtime_suspend(struct cdp_pdev *opaque_pdev) +{ + struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev; + struct dp_soc *soc = pdev->soc; + + /* Call DP TX flow control API to check if there is any + pending packets */ + + if (soc->intr_mode == DP_INTR_POLL) + qdf_timer_stop(&soc->int_timer); + + return QDF_STATUS_SUCCESS; +} + +/** + * dp_runtime_resume() - ensure DP is ready to runtime resume + * @opaque_pdev: DP pdev context + * + * Resume DP for runtime PM. + * + * Return: QDF_STATUS + */ +static QDF_STATUS dp_runtime_resume(struct cdp_pdev *opaque_pdev) +{ + struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev; + struct dp_soc *soc = pdev->soc; + void *hal_srng; + int i; + + if (soc->intr_mode == DP_INTR_POLL) + qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS); + + for (i = 0; i < MAX_TCL_DATA_RINGS; i++) { + hal_srng = soc->tcl_data_ring[i].hal_srng; + if (hal_srng) { + /* We actually only need to acquire the lock */ + hal_srng_access_start(soc->hal_soc, hal_srng); + /* Update SRC ring head pointer for HW to send + all pending packets */ + hal_srng_access_end(soc->hal_soc, hal_srng); + } + } + + return QDF_STATUS_SUCCESS; +} +#endif /* FEATURE_RUNTIME_PM */ + +static QDF_STATUS dp_bus_suspend(struct cdp_pdev *opaque_pdev) +{ + struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev; + struct dp_soc *soc = pdev->soc; + + if (soc->intr_mode == DP_INTR_POLL) + qdf_timer_stop(&soc->int_timer); + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS dp_bus_resume(struct cdp_pdev *opaque_pdev) +{ + struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev; + struct dp_soc *soc = pdev->soc; + + if (soc->intr_mode == DP_INTR_POLL) + qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS); + + return QDF_STATUS_SUCCESS; +} + +#ifndef CONFIG_WIN +static struct cdp_misc_ops dp_ops_misc = { + .tx_non_std = dp_tx_non_std, + .get_opmode = dp_get_opmode, +#ifdef FEATURE_RUNTIME_PM + .runtime_suspend = dp_runtime_suspend, + .runtime_resume = dp_runtime_resume, +#endif /* FEATURE_RUNTIME_PM */ + .pkt_log_init = dp_pkt_log_init, + .pkt_log_con_service = dp_pkt_log_con_service, +}; + +static struct cdp_flowctl_ops dp_ops_flowctl = { + /* WIFI 3.0 DP implement as required. */ +#ifdef QCA_LL_TX_FLOW_CONTROL_V2 + .flow_pool_map_handler = dp_tx_flow_pool_map, + .flow_pool_unmap_handler = dp_tx_flow_pool_unmap, + .register_pause_cb = dp_txrx_register_pause_cb, + .dump_flow_pool_info = dp_tx_dump_flow_pool_info, +#endif /* QCA_LL_TX_FLOW_CONTROL_V2 */ +}; + +static struct cdp_lflowctl_ops dp_ops_l_flowctl = { + /* WIFI 3.0 DP NOT IMPLEMENTED YET */ +}; + +#ifdef IPA_OFFLOAD +static struct cdp_ipa_ops dp_ops_ipa = { + .ipa_get_resource = dp_ipa_get_resource, + .ipa_set_doorbell_paddr = dp_ipa_set_doorbell_paddr, + .ipa_op_response = dp_ipa_op_response, + .ipa_register_op_cb = dp_ipa_register_op_cb, + .ipa_get_stat = dp_ipa_get_stat, + .ipa_tx_data_frame = dp_tx_send_ipa_data_frame, + .ipa_enable_autonomy = dp_ipa_enable_autonomy, + .ipa_disable_autonomy = dp_ipa_disable_autonomy, + .ipa_setup = dp_ipa_setup, + .ipa_cleanup = dp_ipa_cleanup, + .ipa_setup_iface = dp_ipa_setup_iface, + .ipa_cleanup_iface = dp_ipa_cleanup_iface, + .ipa_enable_pipes = dp_ipa_enable_pipes, + .ipa_disable_pipes = dp_ipa_disable_pipes, + .ipa_set_perf_level = dp_ipa_set_perf_level +}; +#endif + +static struct cdp_bus_ops dp_ops_bus = { + .bus_suspend = dp_bus_suspend, + .bus_resume = dp_bus_resume +}; + +static struct cdp_ocb_ops dp_ops_ocb = { + /* WIFI 3.0 DP NOT IMPLEMENTED YET */ +}; + + +static struct cdp_throttle_ops dp_ops_throttle = { + /* WIFI 3.0 DP NOT IMPLEMENTED YET */ +}; + +static struct cdp_mob_stats_ops dp_ops_mob_stats = { + /* WIFI 3.0 DP NOT IMPLEMENTED YET */ +}; + +static struct cdp_cfg_ops dp_ops_cfg = { + /* WIFI 3.0 DP NOT IMPLEMENTED YET */ +}; + +/* + * dp_wrapper_peer_get_ref_by_addr - wrapper function to get to peer + * @dev: physical device instance + * @peer_mac_addr: peer mac address + * @local_id: local id for the peer + * @debug_id: to track enum peer access + + * Return: peer instance pointer + */ +static inline void * +dp_wrapper_peer_get_ref_by_addr(struct cdp_pdev *dev, u8 *peer_mac_addr, + u8 *local_id, + enum peer_debug_id_type debug_id) +{ + /* + * Currently this function does not implement the "get ref" + * functionality and is mapped to dp_find_peer_by_addr which does not + * increment the peer ref count. So the peer state is uncertain after + * calling this API. The functionality needs to be implemented. + * Accordingly the corresponding release_ref function is NULL. + */ + return dp_find_peer_by_addr(dev, peer_mac_addr, local_id); +} + +static struct cdp_peer_ops dp_ops_peer = { + .register_peer = dp_register_peer, + .clear_peer = dp_clear_peer, + .find_peer_by_addr = dp_find_peer_by_addr, + .find_peer_by_addr_and_vdev = dp_find_peer_by_addr_and_vdev, + .peer_get_ref_by_addr = dp_wrapper_peer_get_ref_by_addr, + .peer_release_ref = NULL, + .local_peer_id = dp_local_peer_id, + .peer_find_by_local_id = dp_peer_find_by_local_id, + .peer_state_update = dp_peer_state_update, + .get_vdevid = dp_get_vdevid, + .get_vdev_by_sta_id = dp_get_vdev_by_sta_id, + .peer_get_peer_mac_addr = dp_peer_get_peer_mac_addr, + .get_vdev_for_peer = dp_get_vdev_for_peer, + .get_peer_state = dp_get_peer_state, +}; +#endif + +static struct cdp_ops dp_txrx_ops = { + .cmn_drv_ops = &dp_ops_cmn, + .ctrl_ops = &dp_ops_ctrl, + .me_ops = &dp_ops_me, + .mon_ops = &dp_ops_mon, + .host_stats_ops = &dp_ops_host_stats, + .wds_ops = &dp_ops_wds, + .raw_ops = &dp_ops_raw, +#ifdef CONFIG_WIN + .pflow_ops = &dp_ops_pflow, +#endif /* CONFIG_WIN */ +#ifndef CONFIG_WIN + .misc_ops = &dp_ops_misc, + .cfg_ops = &dp_ops_cfg, + .flowctl_ops = &dp_ops_flowctl, + .l_flowctl_ops = &dp_ops_l_flowctl, +#ifdef IPA_OFFLOAD + .ipa_ops = &dp_ops_ipa, +#endif + .bus_ops = &dp_ops_bus, + .ocb_ops = &dp_ops_ocb, + .peer_ops = &dp_ops_peer, + .throttle_ops = &dp_ops_throttle, + .mob_stats_ops = &dp_ops_mob_stats, +#endif +}; + +/* + * dp_soc_set_txrx_ring_map() + * @dp_soc: DP handler for soc + * + * Return: Void + */ +static void dp_soc_set_txrx_ring_map(struct dp_soc *soc) +{ + uint32_t i; + for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) { + soc->tx_ring_map[i] = dp_cpu_ring_map[DP_DEFAULT_MAP][i]; + } +} + +/* + * dp_soc_attach_wifi3() - Attach txrx SOC + * @ctrl_psoc: Opaque SOC handle from control plane + * @htc_handle: Opaque HTC handle + * @hif_handle: Opaque HIF handle + * @qdf_osdev: QDF device + * + * Return: DP SOC handle on success, NULL on failure + */ +/* + * Local prototype added to temporarily address warning caused by + * -Wmissing-prototypes. A more correct solution, namely to expose + * a prototype in an appropriate header file, will come later. + */ +void *dp_soc_attach_wifi3(void *ctrl_psoc, void *hif_handle, + HTC_HANDLE htc_handle, qdf_device_t qdf_osdev, + struct ol_if_ops *ol_ops); +void *dp_soc_attach_wifi3(void *ctrl_psoc, void *hif_handle, + HTC_HANDLE htc_handle, qdf_device_t qdf_osdev, + struct ol_if_ops *ol_ops) +{ + struct dp_soc *soc = qdf_mem_malloc(sizeof(*soc)); + + if (!soc) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("DP SOC memory allocation failed")); + goto fail0; + } + + soc->cdp_soc.ops = &dp_txrx_ops; + soc->cdp_soc.ol_ops = ol_ops; + soc->ctrl_psoc = ctrl_psoc; + soc->osdev = qdf_osdev; + soc->hif_handle = hif_handle; + + soc->hal_soc = hif_get_hal_handle(hif_handle); + soc->htt_handle = htt_soc_attach(soc, ctrl_psoc, htc_handle, + soc->hal_soc, qdf_osdev); + if (!soc->htt_handle) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("HTT attach failed")); + goto fail1; + } + + soc->wlan_cfg_ctx = wlan_cfg_soc_attach(); + if (!soc->wlan_cfg_ctx) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("wlan_cfg_soc_attach failed")); + goto fail2; + } + + wlan_cfg_set_rx_hash(soc->wlan_cfg_ctx, rx_hash); + soc->cce_disable = false; + + if (soc->cdp_soc.ol_ops->get_dp_cfg_param) { + int ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc, + CDP_CFG_MAX_PEER_ID); + + if (ret != -EINVAL) { + wlan_cfg_set_max_peer_id(soc->wlan_cfg_ctx, ret); + } + + ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc, + CDP_CFG_CCE_DISABLE); + if (ret == 1) + soc->cce_disable = true; + } + + qdf_spinlock_create(&soc->peer_ref_mutex); + + qdf_spinlock_create(&soc->reo_desc_freelist_lock); + qdf_list_create(&soc->reo_desc_freelist, REO_DESC_FREELIST_SIZE); + + /* fill the tx/rx cpu ring map*/ + dp_soc_set_txrx_ring_map(soc); + + qdf_spinlock_create(&soc->htt_stats.lock); + /* initialize work queue for stats processing */ + qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc); + + /*Initialize inactivity timer for wifison */ + dp_init_inact_timer(soc); + + return (void *)soc; + +fail2: + htt_soc_detach(soc->htt_handle); +fail1: + qdf_mem_free(soc); +fail0: + return NULL; +} + +/* + * dp_get_pdev_for_mac_id() - Return pdev for mac_id + * + * @soc: handle to DP soc + * @mac_id: MAC id + * + * Return: Return pdev corresponding to MAC + */ +void *dp_get_pdev_for_mac_id(struct dp_soc *soc, uint32_t mac_id) +{ + if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) + return soc->pdev_list[mac_id]; + + /* Typically for MCL as there only 1 PDEV*/ + return soc->pdev_list[0]; +} + +/* + * dp_is_hw_dbs_enable() - Procedure to check if DBS is supported + * @soc: DP SoC context + * @max_mac_rings: No of MAC rings + * + * Return: None + */ +static +void dp_is_hw_dbs_enable(struct dp_soc *soc, + int *max_mac_rings) +{ + bool dbs_enable = false; + if (soc->cdp_soc.ol_ops->is_hw_dbs_2x2_capable) + dbs_enable = soc->cdp_soc.ol_ops-> + is_hw_dbs_2x2_capable(soc->ctrl_psoc); + + *max_mac_rings = (dbs_enable)?(*max_mac_rings):1; +} + +/* +* dp_set_pktlog_wifi3() - attach txrx vdev +* @pdev: Datapath PDEV handle +* @event: which event's notifications are being subscribed to +* @enable: WDI event subscribe or not. (True or False) +* +* Return: Success, NULL on failure +*/ +#ifdef WDI_EVENT_ENABLE +int dp_set_pktlog_wifi3(struct dp_pdev *pdev, uint32_t event, + bool enable) +{ + struct dp_soc *soc = pdev->soc; + struct htt_rx_ring_tlv_filter htt_tlv_filter = {0}; + int max_mac_rings = wlan_cfg_get_num_mac_rings + (pdev->wlan_cfg_ctx); + uint8_t mac_id = 0; + + dp_is_hw_dbs_enable(soc, &max_mac_rings); + + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, + FL("Max_mac_rings %d \n"), + max_mac_rings); + + if (enable) { + switch (event) { + case WDI_EVENT_RX_DESC: + if (pdev->monitor_vdev) { + /* Nothing needs to be done if monitor mode is + * enabled + */ + return 0; + } + if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_FULL) { + pdev->rx_pktlog_mode = DP_RX_PKTLOG_FULL; + htt_tlv_filter.mpdu_start = 1; + htt_tlv_filter.msdu_start = 1; + htt_tlv_filter.msdu_end = 1; + htt_tlv_filter.mpdu_end = 1; + htt_tlv_filter.packet_header = 1; + htt_tlv_filter.attention = 1; + htt_tlv_filter.ppdu_start = 1; + htt_tlv_filter.ppdu_end = 1; + htt_tlv_filter.ppdu_end_user_stats = 1; + htt_tlv_filter.ppdu_end_user_stats_ext = 1; + htt_tlv_filter.ppdu_end_status_done = 1; + htt_tlv_filter.enable_fp = 1; + htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL; + htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL; + htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL; + htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL; + htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL; + htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL; + + for (mac_id = 0; mac_id < max_mac_rings; + mac_id++) { + int mac_for_pdev = + dp_get_mac_id_for_pdev(mac_id, + pdev->pdev_id); + + htt_h2t_rx_ring_cfg(soc->htt_handle, + mac_for_pdev, + pdev->rxdma_mon_status_ring[mac_id] + .hal_srng, + RXDMA_MONITOR_STATUS, + RX_BUFFER_SIZE, + &htt_tlv_filter); + + } + + if (soc->reap_timer_init) + qdf_timer_mod(&soc->mon_reap_timer, + DP_INTR_POLL_TIMER_MS); + } + break; + + case WDI_EVENT_LITE_RX: + if (pdev->monitor_vdev) { + /* Nothing needs to be done if monitor mode is + * enabled + */ + return 0; + } + + if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_LITE) { + pdev->rx_pktlog_mode = DP_RX_PKTLOG_LITE; + + htt_tlv_filter.ppdu_start = 1; + htt_tlv_filter.ppdu_end = 1; + htt_tlv_filter.ppdu_end_user_stats = 1; + htt_tlv_filter.ppdu_end_user_stats_ext = 1; + htt_tlv_filter.ppdu_end_status_done = 1; + htt_tlv_filter.mpdu_start = 1; + htt_tlv_filter.enable_fp = 1; + htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL; + htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL; + htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL; + htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL; + htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL; + htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL; + + for (mac_id = 0; mac_id < max_mac_rings; + mac_id++) { + int mac_for_pdev = + dp_get_mac_id_for_pdev(mac_id, + pdev->pdev_id); + + htt_h2t_rx_ring_cfg(soc->htt_handle, + mac_for_pdev, + pdev->rxdma_mon_status_ring[mac_id] + .hal_srng, + RXDMA_MONITOR_STATUS, + RX_BUFFER_SIZE_PKTLOG_LITE, + &htt_tlv_filter); + } + + if (soc->reap_timer_init) + qdf_timer_mod(&soc->mon_reap_timer, + DP_INTR_POLL_TIMER_MS); + } + break; + + case WDI_EVENT_LITE_T2H: + if (pdev->monitor_vdev) { + /* Nothing needs to be done if monitor mode is + * enabled + */ + return 0; + } + + for (mac_id = 0; mac_id < max_mac_rings; mac_id++) { + int mac_for_pdev = dp_get_mac_id_for_pdev( + mac_id, pdev->pdev_id); + + pdev->pktlog_ppdu_stats = true; + dp_h2t_cfg_stats_msg_send(pdev, + DP_PPDU_TXLITE_STATS_BITMASK_CFG, + mac_for_pdev); + } + break; + + default: + /* Nothing needs to be done for other pktlog types */ + break; + } + } else { + switch (event) { + case WDI_EVENT_RX_DESC: + case WDI_EVENT_LITE_RX: + if (pdev->monitor_vdev) { + /* Nothing needs to be done if monitor mode is + * enabled + */ + return 0; + } + if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_DISABLED) { + pdev->rx_pktlog_mode = DP_RX_PKTLOG_DISABLED; + + for (mac_id = 0; mac_id < max_mac_rings; + mac_id++) { + int mac_for_pdev = + dp_get_mac_id_for_pdev(mac_id, + pdev->pdev_id); + + htt_h2t_rx_ring_cfg(soc->htt_handle, + mac_for_pdev, + pdev->rxdma_mon_status_ring[mac_id] + .hal_srng, + RXDMA_MONITOR_STATUS, + RX_BUFFER_SIZE, + &htt_tlv_filter); + } + + if (soc->reap_timer_init) + qdf_timer_stop(&soc->mon_reap_timer); + } + break; + case WDI_EVENT_LITE_T2H: + if (pdev->monitor_vdev) { + /* Nothing needs to be done if monitor mode is + * enabled + */ + return 0; + } + /* To disable HTT_H2T_MSG_TYPE_PPDU_STATS_CFG in FW + * passing value 0. Once these macros will define in htt + * header file will use proper macros + */ + for (mac_id = 0; mac_id < max_mac_rings; mac_id++) { + int mac_for_pdev = + dp_get_mac_id_for_pdev(mac_id, + pdev->pdev_id); + + pdev->pktlog_ppdu_stats = false; + if (!pdev->enhanced_stats_en && !pdev->tx_sniffer_enable && !pdev->mcopy_mode) { + dp_h2t_cfg_stats_msg_send(pdev, 0, + mac_for_pdev); + } else if (pdev->tx_sniffer_enable || pdev->mcopy_mode) { + dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_SNIFFER, + mac_for_pdev); + } else if (pdev->enhanced_stats_en) { + dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_ENH_STATS, + mac_for_pdev); + } + } + + break; + default: + /* Nothing needs to be done for other pktlog types */ + break; + } + } + return 0; +} +#endif + +#ifdef CONFIG_MCL +/* + * dp_service_mon_rings()- timer to reap monitor rings + * reqd as we are not getting ppdu end interrupts + * @arg: SoC Handle + * + * Return: + * + */ +static void dp_service_mon_rings(void *arg) +{ + struct dp_soc *soc = (struct dp_soc *) arg; + int ring = 0, work_done, mac_id; + struct dp_pdev *pdev = NULL; + + for (ring = 0 ; ring < MAX_PDEV_CNT; ring++) { + pdev = soc->pdev_list[ring]; + if (pdev == NULL) + continue; + for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) { + int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, + pdev->pdev_id); + work_done = dp_mon_process(soc, mac_for_pdev, + QCA_NAPI_BUDGET); + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + FL("Reaped %d descs from Monitor rings"), + work_done); + } + } + + qdf_timer_mod(&soc->mon_reap_timer, DP_INTR_POLL_TIMER_MS); +} + +#ifndef REMOVE_PKT_LOG +/** + * dp_pkt_log_init() - API to initialize packet log + * @ppdev: physical device handle + * @scn: HIF context + * + * Return: none + */ +void dp_pkt_log_init(struct cdp_pdev *ppdev, void *scn) +{ + struct dp_pdev *handle = (struct dp_pdev *)ppdev; + + if (handle->pkt_log_init) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "%s: Packet log not initialized", __func__); + return; + } + + pktlog_sethandle(&handle->pl_dev, scn); + pktlog_set_callback_regtype(PKTLOG_LITE_CALLBACK_REGISTRATION); + + if (pktlogmod_init(scn)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "%s: pktlogmod_init failed", __func__); + handle->pkt_log_init = false; + } else { + handle->pkt_log_init = true; + } +} + +/** + * dp_pkt_log_con_service() - connect packet log service + * @ppdev: physical device handle + * @scn: device context + * + * Return: none + */ +static void dp_pkt_log_con_service(struct cdp_pdev *ppdev, void *scn) +{ + struct dp_pdev *pdev = (struct dp_pdev *)ppdev; + + dp_pkt_log_init((struct cdp_pdev *)pdev, scn); + pktlog_htc_attach(); +} + +/** + * dp_pktlogmod_exit() - API to cleanup pktlog info + * @handle: Pdev handle + * + * Return: none + */ +static void dp_pktlogmod_exit(struct dp_pdev *handle) +{ + void *scn = (void *)handle->soc->hif_handle; + + if (!scn) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "%s: Invalid hif(scn) handle", __func__); + return; + } + + pktlogmod_exit(scn); + handle->pkt_log_init = false; +} +#endif +#else +static void dp_pktlogmod_exit(struct dp_pdev *handle) { } +#endif + diff --git a/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_peer.c b/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_peer.c new file mode 100644 index 0000000000000000000000000000000000000000..44522b98bc439f6d7f9a55f44567e901ef304a19 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_peer.c @@ -0,0 +1,2333 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include +#include "dp_htt.h" +#include "dp_types.h" +#include "dp_internal.h" +#include "dp_peer.h" +#include "dp_rx_defrag.h" +#include +#include +#ifdef CONFIG_MCL +#include +#include +#endif +#include +#include + +#ifdef DP_LFR +static inline void +dp_set_ssn_valid_flag(struct hal_reo_cmd_params *params, + uint8_t valid) +{ + params->u.upd_queue_params.update_svld = 1; + params->u.upd_queue_params.svld = valid; + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, + "%s: Setting SSN valid bit to %d\n", + __func__, valid); +} +#else +static inline void +dp_set_ssn_valid_flag(struct hal_reo_cmd_params *params, + uint8_t valid) {}; +#endif + +static inline int dp_peer_find_mac_addr_cmp( + union dp_align_mac_addr *mac_addr1, + union dp_align_mac_addr *mac_addr2) +{ + return !((mac_addr1->align4.bytes_abcd == mac_addr2->align4.bytes_abcd) + /* + * Intentionally use & rather than &&. + * because the operands are binary rather than generic boolean, + * the functionality is equivalent. + * Using && has the advantage of short-circuited evaluation, + * but using & has the advantage of no conditional branching, + * which is a more significant benefit. + */ + & + (mac_addr1->align4.bytes_ef == mac_addr2->align4.bytes_ef)); +} + +static int dp_peer_find_map_attach(struct dp_soc *soc) +{ + uint32_t max_peers, peer_map_size; + + max_peers = soc->max_peers; + /* allocate the peer ID -> peer object map */ + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO, + "\n<=== cfg max peer id %d ====>\n", max_peers); + peer_map_size = max_peers * sizeof(soc->peer_id_to_obj_map[0]); + soc->peer_id_to_obj_map = qdf_mem_malloc(peer_map_size); + if (!soc->peer_id_to_obj_map) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "%s: peer map memory allocation failed\n", __func__); + return QDF_STATUS_E_NOMEM; + } + + /* + * The peer_id_to_obj_map doesn't really need to be initialized, + * since elements are only used after they have been individually + * initialized. + * However, it is convenient for debugging to have all elements + * that are not in use set to 0. + */ + qdf_mem_zero(soc->peer_id_to_obj_map, peer_map_size); + return 0; /* success */ +} + +static int dp_log2_ceil(unsigned value) +{ + unsigned tmp = value; + int log2 = -1; + + while (tmp) { + log2++; + tmp >>= 1; + } + if (1 << log2 != value) + log2++; + return log2; +} + +static int dp_peer_find_add_id_to_obj( + struct dp_peer *peer, + uint16_t peer_id) +{ + int i; + + for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++) { + if (peer->peer_ids[i] == HTT_INVALID_PEER) { + peer->peer_ids[i] = peer_id; + return 0; /* success */ + } + } + return QDF_STATUS_E_FAILURE; /* failure */ +} + +#define DP_PEER_HASH_LOAD_MULT 2 +#define DP_PEER_HASH_LOAD_SHIFT 0 + +#define DP_AST_HASH_LOAD_MULT 2 +#define DP_AST_HASH_LOAD_SHIFT 0 + +static int dp_peer_find_hash_attach(struct dp_soc *soc) +{ + int i, hash_elems, log2; + + /* allocate the peer MAC address -> peer object hash table */ + hash_elems = soc->max_peers; + hash_elems *= DP_PEER_HASH_LOAD_MULT; + hash_elems >>= DP_PEER_HASH_LOAD_SHIFT; + log2 = dp_log2_ceil(hash_elems); + hash_elems = 1 << log2; + + soc->peer_hash.mask = hash_elems - 1; + soc->peer_hash.idx_bits = log2; + /* allocate an array of TAILQ peer object lists */ + soc->peer_hash.bins = qdf_mem_malloc( + hash_elems * sizeof(TAILQ_HEAD(anonymous_tail_q, dp_peer))); + if (!soc->peer_hash.bins) + return QDF_STATUS_E_NOMEM; + + for (i = 0; i < hash_elems; i++) + TAILQ_INIT(&soc->peer_hash.bins[i]); + + return 0; +} + +static void dp_peer_find_hash_detach(struct dp_soc *soc) +{ + qdf_mem_free(soc->peer_hash.bins); +} + +static inline unsigned dp_peer_find_hash_index(struct dp_soc *soc, + union dp_align_mac_addr *mac_addr) +{ + unsigned index; + + index = + mac_addr->align2.bytes_ab ^ + mac_addr->align2.bytes_cd ^ + mac_addr->align2.bytes_ef; + index ^= index >> soc->peer_hash.idx_bits; + index &= soc->peer_hash.mask; + return index; +} + + +void dp_peer_find_hash_add(struct dp_soc *soc, struct dp_peer *peer) +{ + unsigned index; + + index = dp_peer_find_hash_index(soc, &peer->mac_addr); + qdf_spin_lock_bh(&soc->peer_ref_mutex); + /* + * It is important to add the new peer at the tail of the peer list + * with the bin index. Together with having the hash_find function + * search from head to tail, this ensures that if two entries with + * the same MAC address are stored, the one added first will be + * found first. + */ + TAILQ_INSERT_TAIL(&soc->peer_hash.bins[index], peer, hash_list_elem); + qdf_spin_unlock_bh(&soc->peer_ref_mutex); +} + +#ifdef FEATURE_AST +/* + * dp_peer_ast_hash_attach() - Allocate and initialize AST Hash Table + * @soc: SoC handle + * + * Return: None + */ +static int dp_peer_ast_hash_attach(struct dp_soc *soc) +{ + int i, hash_elems, log2; + + hash_elems = ((soc->max_peers * DP_AST_HASH_LOAD_MULT) >> + DP_AST_HASH_LOAD_SHIFT); + + log2 = dp_log2_ceil(hash_elems); + hash_elems = 1 << log2; + + soc->ast_hash.mask = hash_elems - 1; + soc->ast_hash.idx_bits = log2; + + /* allocate an array of TAILQ peer object lists */ + soc->ast_hash.bins = qdf_mem_malloc( + hash_elems * sizeof(TAILQ_HEAD(anonymous_tail_q, + dp_ast_entry))); + + if (!soc->ast_hash.bins) + return QDF_STATUS_E_NOMEM; + + for (i = 0; i < hash_elems; i++) + TAILQ_INIT(&soc->ast_hash.bins[i]); + + return 0; +} + +/* + * dp_peer_ast_hash_detach() - Free AST Hash table + * @soc: SoC handle + * + * Return: None + */ +static void dp_peer_ast_hash_detach(struct dp_soc *soc) +{ + qdf_mem_free(soc->ast_hash.bins); +} + +/* + * dp_peer_ast_hash_index() - Compute the AST hash from MAC address + * @soc: SoC handle + * + * Return: AST hash + */ +static inline uint32_t dp_peer_ast_hash_index(struct dp_soc *soc, + union dp_align_mac_addr *mac_addr) +{ + uint32_t index; + + index = + mac_addr->align2.bytes_ab ^ + mac_addr->align2.bytes_cd ^ + mac_addr->align2.bytes_ef; + index ^= index >> soc->ast_hash.idx_bits; + index &= soc->ast_hash.mask; + return index; +} + +/* + * dp_peer_ast_hash_add() - Add AST entry into hash table + * @soc: SoC handle + * + * This function adds the AST entry into SoC AST hash table + * It assumes caller has taken the ast lock to protect the access to this table + * + * Return: None + */ +static inline void dp_peer_ast_hash_add(struct dp_soc *soc, + struct dp_ast_entry *ase) +{ + uint32_t index; + + index = dp_peer_ast_hash_index(soc, &ase->mac_addr); + TAILQ_INSERT_TAIL(&soc->ast_hash.bins[index], ase, hash_list_elem); +} + +/* + * dp_peer_ast_hash_remove() - Look up and remove AST entry from hash table + * @soc: SoC handle + * + * This function removes the AST entry from soc AST hash table + * It assumes caller has taken the ast lock to protect the access to this table + * + * Return: None + */ +static inline void dp_peer_ast_hash_remove(struct dp_soc *soc, + struct dp_ast_entry *ase) +{ + unsigned index; + struct dp_ast_entry *tmpase; + int found = 0; + + index = dp_peer_ast_hash_index(soc, &ase->mac_addr); + /* Check if tail is not empty before delete*/ + QDF_ASSERT(!TAILQ_EMPTY(&soc->ast_hash.bins[index])); + + TAILQ_FOREACH(tmpase, &soc->ast_hash.bins[index], hash_list_elem) { + if (tmpase == ase) { + found = 1; + break; + } + } + + QDF_ASSERT(found); + TAILQ_REMOVE(&soc->ast_hash.bins[index], ase, hash_list_elem); +} + +/* + * dp_peer_ast_hash_find() - Find AST entry by MAC address + * @soc: SoC handle + * + * It assumes caller has taken the ast lock to protect the access to + * AST hash table + * + * Return: AST entry + */ +struct dp_ast_entry *dp_peer_ast_hash_find(struct dp_soc *soc, + uint8_t *ast_mac_addr) +{ + union dp_align_mac_addr local_mac_addr_aligned, *mac_addr; + unsigned index; + struct dp_ast_entry *ase; + + qdf_mem_copy(&local_mac_addr_aligned.raw[0], + ast_mac_addr, DP_MAC_ADDR_LEN); + mac_addr = &local_mac_addr_aligned; + + index = dp_peer_ast_hash_index(soc, mac_addr); + TAILQ_FOREACH(ase, &soc->ast_hash.bins[index], hash_list_elem) { + if (dp_peer_find_mac_addr_cmp(mac_addr, &ase->mac_addr) == 0) { + return ase; + } + } + + return NULL; +} + +/* + * dp_peer_map_ast() - Map the ast entry with HW AST Index + * @soc: SoC handle + * @peer: peer to which ast node belongs + * @mac_addr: MAC address of ast node + * @hw_peer_id: HW AST Index returned by target in peer map event + * @vdev_id: vdev id for VAP to which the peer belongs to + * + * Return: None + */ +static inline void dp_peer_map_ast(struct dp_soc *soc, + struct dp_peer *peer, uint8_t *mac_addr, uint16_t hw_peer_id, + uint8_t vdev_id) +{ + struct dp_ast_entry *ast_entry; + enum cdp_txrx_ast_entry_type peer_type = CDP_TXRX_AST_TYPE_STATIC; + bool ast_entry_found = FALSE; + + if (!peer) { + return; + } + + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "%s: peer %pK ID %d vid %d mac %02x:%02x:%02x:%02x:%02x:%02x\n", + __func__, peer, hw_peer_id, vdev_id, mac_addr[0], + mac_addr[1], mac_addr[2], mac_addr[3], + mac_addr[4], mac_addr[5]); + + qdf_spin_lock_bh(&soc->ast_lock); + TAILQ_FOREACH(ast_entry, &peer->ast_entry_list, ase_list_elem) { + if (!(qdf_mem_cmp(mac_addr, ast_entry->mac_addr.raw, + DP_MAC_ADDR_LEN))) { + ast_entry->ast_idx = hw_peer_id; + soc->ast_table[hw_peer_id] = ast_entry; + ast_entry->is_active = TRUE; + peer_type = ast_entry->type; + ast_entry_found = TRUE; + } + } + + if (ast_entry_found || (peer->vdev && peer->vdev->proxysta_vdev)) { + if (soc->cdp_soc.ol_ops->peer_map_event) { + soc->cdp_soc.ol_ops->peer_map_event( + soc->ctrl_psoc, peer->peer_ids[0], + hw_peer_id, vdev_id, + mac_addr, peer_type); + } + } else { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "AST entry not found\n"); + } + + qdf_spin_unlock_bh(&soc->ast_lock); + return; +} + +/* + * dp_peer_add_ast() - Allocate and add AST entry into peer list + * @soc: SoC handle + * @peer: peer to which ast node belongs + * @mac_addr: MAC address of ast node + * @is_self: Is this base AST entry with peer mac address + * + * This API is used by WDS source port learning function to + * add a new AST entry into peer AST list + * + * Return: 0 if new entry is allocated, + * -1 if entry add failed + */ +int dp_peer_add_ast(struct dp_soc *soc, + struct dp_peer *peer, + uint8_t *mac_addr, + enum cdp_txrx_ast_entry_type type, + uint32_t flags) +{ + struct dp_ast_entry *ast_entry; + struct dp_vdev *vdev = peer->vdev; + uint8_t next_node_mac[6]; + int ret = -1; + + if (!vdev) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + FL("Peers vdev is NULL")); + QDF_ASSERT(0); + return ret; + } + + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "%s: peer %pK mac %02x:%02x:%02x:%02x:%02x:%02x\n", + __func__, peer, mac_addr[0], mac_addr[1], mac_addr[2], + mac_addr[3], mac_addr[4], mac_addr[5]); + + qdf_spin_lock_bh(&soc->ast_lock); + + /* If AST entry already exists , just return from here */ + ast_entry = dp_peer_ast_hash_find(soc, mac_addr); + + if (ast_entry) { + if (ast_entry->type == CDP_TXRX_AST_TYPE_MEC) + ast_entry->is_active = TRUE; + + qdf_spin_unlock_bh(&soc->ast_lock); + return 0; + } + + ast_entry = (struct dp_ast_entry *) + qdf_mem_malloc(sizeof(struct dp_ast_entry)); + + if (!ast_entry) { + qdf_spin_unlock_bh(&soc->ast_lock); + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + FL("fail to allocate ast_entry")); + QDF_ASSERT(0); + return ret; + } + + qdf_mem_copy(&ast_entry->mac_addr.raw[0], mac_addr, DP_MAC_ADDR_LEN); + ast_entry->peer = peer; + ast_entry->pdev_id = vdev->pdev->pdev_id; + ast_entry->vdev_id = vdev->vdev_id; + + switch (type) { + case CDP_TXRX_AST_TYPE_STATIC: + peer->self_ast_entry = ast_entry; + ast_entry->type = CDP_TXRX_AST_TYPE_STATIC; + break; + case CDP_TXRX_AST_TYPE_WDS: + ast_entry->next_hop = 1; + ast_entry->type = CDP_TXRX_AST_TYPE_WDS; + break; + case CDP_TXRX_AST_TYPE_WDS_HM: + ast_entry->next_hop = 1; + ast_entry->type = CDP_TXRX_AST_TYPE_WDS_HM; + break; + case CDP_TXRX_AST_TYPE_MEC: + ast_entry->next_hop = 1; + ast_entry->type = CDP_TXRX_AST_TYPE_MEC; + break; + default: + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("Incorrect AST entry type")); + } + + ast_entry->is_active = TRUE; + TAILQ_INSERT_TAIL(&peer->ast_entry_list, ast_entry, ase_list_elem); + DP_STATS_INC(soc, ast.added, 1); + dp_peer_ast_hash_add(soc, ast_entry); + qdf_spin_unlock_bh(&soc->ast_lock); + + if (ast_entry->type == CDP_TXRX_AST_TYPE_MEC) + qdf_mem_copy(next_node_mac, peer->vdev->mac_addr.raw, 6); + else + qdf_mem_copy(next_node_mac, peer->mac_addr.raw, 6); + + if (ast_entry->type != CDP_TXRX_AST_TYPE_STATIC) { + if (QDF_STATUS_SUCCESS == + soc->cdp_soc.ol_ops->peer_add_wds_entry( + peer->vdev->osif_vdev, + mac_addr, + next_node_mac, + flags)) + return 0; + } + + return ret; +} + +/* + * dp_peer_del_ast() - Delete and free AST entry + * @soc: SoC handle + * @ast_entry: AST entry of the node + * + * This function removes the AST entry from peer and soc tables + * It assumes caller has taken the ast lock to protect the access to these + * tables + * + * Return: None + */ +void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry) +{ + struct dp_peer *peer = ast_entry->peer; + + if (ast_entry->next_hop) + soc->cdp_soc.ol_ops->peer_del_wds_entry(peer->vdev->osif_vdev, + ast_entry->mac_addr.raw); + + soc->ast_table[ast_entry->ast_idx] = NULL; + TAILQ_REMOVE(&peer->ast_entry_list, ast_entry, ase_list_elem); + DP_STATS_INC(soc, ast.deleted, 1); + dp_peer_ast_hash_remove(soc, ast_entry); + qdf_mem_free(ast_entry); +} + +/* + * dp_peer_update_ast() - Delete and free AST entry + * @soc: SoC handle + * @peer: peer to which ast node belongs + * @ast_entry: AST entry of the node + * @flags: wds or hmwds + * + * This function update the AST entry to the roamed peer and soc tables + * It assumes caller has taken the ast lock to protect the access to these + * tables + * + * Return: 0 if ast entry is updated successfully + * -1 failure + */ +int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer, + struct dp_ast_entry *ast_entry, uint32_t flags) +{ + int ret = -1; + struct dp_peer *old_peer; + struct dp_peer *sa_peer; + + if (ast_entry->type == CDP_TXRX_AST_TYPE_STATIC) { + sa_peer = ast_entry->peer; + + /* + * Kickout, when direct associated peer(SA) roams + * to another AP and reachable via TA peer + */ + if (!sa_peer->delete_in_progress) { + sa_peer->delete_in_progress = true; + if (soc->cdp_soc.ol_ops->peer_sta_kickout) { + soc->cdp_soc.ol_ops->peer_sta_kickout( + sa_peer->vdev->pdev->osif_pdev, + ast_entry->mac_addr.raw); + } + return 0; + } + } + + old_peer = ast_entry->peer; + TAILQ_REMOVE(&old_peer->ast_entry_list, ast_entry, ase_list_elem); + + ast_entry->peer = peer; + ast_entry->type = CDP_TXRX_AST_TYPE_WDS; + ast_entry->pdev_id = peer->vdev->pdev->pdev_id; + ast_entry->vdev_id = peer->vdev->vdev_id; + ast_entry->is_active = TRUE; + TAILQ_INSERT_TAIL(&peer->ast_entry_list, ast_entry, ase_list_elem); + + ret = soc->cdp_soc.ol_ops->peer_update_wds_entry( + peer->vdev->osif_vdev, + ast_entry->mac_addr.raw, + peer->mac_addr.raw, + flags); + + return ret; +} + +/* + * dp_peer_ast_get_pdev_id() - get pdev_id from the ast entry + * @soc: SoC handle + * @ast_entry: AST entry of the node + * + * This function gets the pdev_id from the ast entry. + * + * Return: (uint8_t) pdev_id + */ +uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc, + struct dp_ast_entry *ast_entry) +{ + return ast_entry->pdev_id; +} + +/* + * dp_peer_ast_get_next_hop() - get next_hop from the ast entry + * @soc: SoC handle + * @ast_entry: AST entry of the node + * + * This function gets the next hop from the ast entry. + * + * Return: (uint8_t) next_hop + */ +uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc, + struct dp_ast_entry *ast_entry) +{ + return ast_entry->next_hop; +} + +/* + * dp_peer_ast_set_type() - set type from the ast entry + * @soc: SoC handle + * @ast_entry: AST entry of the node + * + * This function sets the type in the ast entry. + * + * Return: + */ +void dp_peer_ast_set_type(struct dp_soc *soc, + struct dp_ast_entry *ast_entry, + enum cdp_txrx_ast_entry_type type) +{ + ast_entry->type = type; +} + +#else +int dp_peer_add_ast(struct dp_soc *soc, struct dp_peer *peer, + uint8_t *mac_addr, enum cdp_txrx_ast_entry_type type, + uint32_t flags) +{ + return 1; +} + +void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry) +{ +} + +int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer, + struct dp_ast_entry *ast_entry, uint32_t flags) +{ + return 1; +} + +struct dp_ast_entry *dp_peer_ast_hash_find(struct dp_soc *soc, + uint8_t *ast_mac_addr) +{ + return NULL; +} + +static int dp_peer_ast_hash_attach(struct dp_soc *soc) +{ + return 0; +} + +static inline void dp_peer_map_ast(struct dp_soc *soc, + struct dp_peer *peer, uint8_t *mac_addr, uint16_t hw_peer_id, + uint8_t vdev_id) +{ + return; +} + +static void dp_peer_ast_hash_detach(struct dp_soc *soc) +{ +} + +void dp_peer_ast_set_type(struct dp_soc *soc, + struct dp_ast_entry *ast_entry, + enum cdp_txrx_ast_entry_type type) +{ +} + +uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc, + struct dp_ast_entry *ast_entry) +{ + return 0xff; +} + + +uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc, + struct dp_ast_entry *ast_entry) +{ + return 0xff; +} +#endif + +struct dp_peer *dp_peer_find_hash_find(struct dp_soc *soc, + uint8_t *peer_mac_addr, int mac_addr_is_aligned, uint8_t vdev_id) +{ + union dp_align_mac_addr local_mac_addr_aligned, *mac_addr; + unsigned index; + struct dp_peer *peer; + + if (mac_addr_is_aligned) { + mac_addr = (union dp_align_mac_addr *) peer_mac_addr; + } else { + qdf_mem_copy( + &local_mac_addr_aligned.raw[0], + peer_mac_addr, DP_MAC_ADDR_LEN); + mac_addr = &local_mac_addr_aligned; + } + index = dp_peer_find_hash_index(soc, mac_addr); + qdf_spin_lock_bh(&soc->peer_ref_mutex); + TAILQ_FOREACH(peer, &soc->peer_hash.bins[index], hash_list_elem) { +#if ATH_SUPPORT_WRAP + /* ProxySTA may have multiple BSS peer with same MAC address, + * modified find will take care of finding the correct BSS peer. + */ + if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0 && + ((peer->vdev->vdev_id == vdev_id) || + (vdev_id == DP_VDEV_ALL))) { +#else + if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0) { +#endif + /* found it - increment the ref count before releasing + * the lock + */ + qdf_atomic_inc(&peer->ref_cnt); + qdf_spin_unlock_bh(&soc->peer_ref_mutex); + return peer; + } + } + qdf_spin_unlock_bh(&soc->peer_ref_mutex); + return NULL; /* failure */ +} + +void dp_peer_find_hash_remove(struct dp_soc *soc, struct dp_peer *peer) +{ + unsigned index; + struct dp_peer *tmppeer = NULL; + int found = 0; + + index = dp_peer_find_hash_index(soc, &peer->mac_addr); + /* Check if tail is not empty before delete*/ + QDF_ASSERT(!TAILQ_EMPTY(&soc->peer_hash.bins[index])); + /* + * DO NOT take the peer_ref_mutex lock here - it needs to be taken + * by the caller. + * The caller needs to hold the lock from the time the peer object's + * reference count is decremented and tested up through the time the + * reference to the peer object is removed from the hash table, by + * this function. + * Holding the lock only while removing the peer object reference + * from the hash table keeps the hash table consistent, but does not + * protect against a new HL tx context starting to use the peer object + * if it looks up the peer object from its MAC address just after the + * peer ref count is decremented to zero, but just before the peer + * object reference is removed from the hash table. + */ + TAILQ_FOREACH(tmppeer, &soc->peer_hash.bins[index], hash_list_elem) { + if (tmppeer == peer) { + found = 1; + break; + } + } + QDF_ASSERT(found); + TAILQ_REMOVE(&soc->peer_hash.bins[index], peer, hash_list_elem); +} + +void dp_peer_find_hash_erase(struct dp_soc *soc) +{ + int i; + + /* + * Not really necessary to take peer_ref_mutex lock - by this point, + * it's known that the soc is no longer in use. + */ + for (i = 0; i <= soc->peer_hash.mask; i++) { + if (!TAILQ_EMPTY(&soc->peer_hash.bins[i])) { + struct dp_peer *peer, *peer_next; + + /* + * TAILQ_FOREACH_SAFE must be used here to avoid any + * memory access violation after peer is freed + */ + TAILQ_FOREACH_SAFE(peer, &soc->peer_hash.bins[i], + hash_list_elem, peer_next) { + /* + * Don't remove the peer from the hash table - + * that would modify the list we are currently + * traversing, and it's not necessary anyway. + */ + /* + * Artificially adjust the peer's ref count to + * 1, so it will get deleted by + * dp_peer_unref_delete. + */ + /* set to zero */ + qdf_atomic_init(&peer->ref_cnt); + /* incr to one */ + qdf_atomic_inc(&peer->ref_cnt); + dp_peer_unref_delete(peer); + } + } + } +} + +static void dp_peer_find_map_detach(struct dp_soc *soc) +{ + qdf_mem_free(soc->peer_id_to_obj_map); +} + +int dp_peer_find_attach(struct dp_soc *soc) +{ + if (dp_peer_find_map_attach(soc)) + return 1; + + if (dp_peer_find_hash_attach(soc)) { + dp_peer_find_map_detach(soc); + return 1; + } + + if (dp_peer_ast_hash_attach(soc)) { + dp_peer_find_hash_detach(soc); + dp_peer_find_map_detach(soc); + return 1; + } + return 0; /* success */ +} + +void dp_rx_tid_stats_cb(struct dp_soc *soc, void *cb_ctxt, + union hal_reo_status *reo_status) +{ + struct dp_rx_tid *rx_tid = (struct dp_rx_tid *)cb_ctxt; + struct hal_reo_queue_status *queue_status = &(reo_status->queue_status); + + if (queue_status->header.status != HAL_REO_CMD_SUCCESS) { + DP_TRACE_STATS(FATAL, "REO stats failure %d for TID %d\n", + queue_status->header.status, rx_tid->tid); + return; + } + + DP_TRACE_STATS(FATAL, "REO queue stats (TID: %d): \n" + "ssn: %d\n" + "curr_idx : %d\n" + "pn_31_0 : %08x\n" + "pn_63_32 : %08x\n" + "pn_95_64 : %08x\n" + "pn_127_96 : %08x\n" + "last_rx_enq_tstamp : %08x\n" + "last_rx_deq_tstamp : %08x\n" + "rx_bitmap_31_0 : %08x\n" + "rx_bitmap_63_32 : %08x\n" + "rx_bitmap_95_64 : %08x\n" + "rx_bitmap_127_96 : %08x\n" + "rx_bitmap_159_128 : %08x\n" + "rx_bitmap_191_160 : %08x\n" + "rx_bitmap_223_192 : %08x\n" + "rx_bitmap_255_224 : %08x\n", + rx_tid->tid, + queue_status->ssn, queue_status->curr_idx, + queue_status->pn_31_0, queue_status->pn_63_32, + queue_status->pn_95_64, queue_status->pn_127_96, + queue_status->last_rx_enq_tstamp, + queue_status->last_rx_deq_tstamp, + queue_status->rx_bitmap_31_0, queue_status->rx_bitmap_63_32, + queue_status->rx_bitmap_95_64, queue_status->rx_bitmap_127_96, + queue_status->rx_bitmap_159_128, + queue_status->rx_bitmap_191_160, + queue_status->rx_bitmap_223_192, + queue_status->rx_bitmap_255_224); + + DP_TRACE_STATS(FATAL, + "curr_mpdu_cnt : %d\n" + "curr_msdu_cnt : %d\n" + "fwd_timeout_cnt : %d\n" + "fwd_bar_cnt : %d\n" + "dup_cnt : %d\n" + "frms_in_order_cnt : %d\n" + "bar_rcvd_cnt : %d\n" + "mpdu_frms_cnt : %d\n" + "msdu_frms_cnt : %d\n" + "total_byte_cnt : %d\n" + "late_recv_mpdu_cnt : %d\n" + "win_jump_2k : %d\n" + "hole_cnt : %d\n", + queue_status->curr_mpdu_cnt, queue_status->curr_msdu_cnt, + queue_status->fwd_timeout_cnt, queue_status->fwd_bar_cnt, + queue_status->dup_cnt, queue_status->frms_in_order_cnt, + queue_status->bar_rcvd_cnt, queue_status->mpdu_frms_cnt, + queue_status->msdu_frms_cnt, queue_status->total_cnt, + queue_status->late_recv_mpdu_cnt, queue_status->win_jump_2k, + queue_status->hole_cnt); + + DP_PRINT_STATS("Num of Addba Req = %d\n", rx_tid->num_of_addba_req); + DP_PRINT_STATS("Num of Addba Resp = %d\n", rx_tid->num_of_addba_resp); + DP_PRINT_STATS("Num of Delba Req = %d\n", rx_tid->num_of_delba_req); + DP_PRINT_STATS("BA window size = %d\n", rx_tid->ba_win_size); + DP_PRINT_STATS("Pn size = %d\n", rx_tid->pn_size); +} + +static inline struct dp_peer *dp_peer_find_add_id(struct dp_soc *soc, + uint8_t *peer_mac_addr, uint16_t peer_id, uint16_t hw_peer_id, + uint8_t vdev_id) +{ + struct dp_peer *peer; + + QDF_ASSERT(peer_id <= soc->max_peers); + /* check if there's already a peer object with this MAC address */ + peer = dp_peer_find_hash_find(soc, peer_mac_addr, + 0 /* is aligned */, vdev_id); + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "%s: peer %pK ID %d vid %d mac %02x:%02x:%02x:%02x:%02x:%02x\n", + __func__, peer, peer_id, vdev_id, peer_mac_addr[0], + peer_mac_addr[1], peer_mac_addr[2], peer_mac_addr[3], + peer_mac_addr[4], peer_mac_addr[5]); + + if (peer) { + /* peer's ref count was already incremented by + * peer_find_hash_find + */ + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "%s: ref_cnt: %d", __func__, + qdf_atomic_read(&peer->ref_cnt)); + soc->peer_id_to_obj_map[peer_id] = peer; + + if (dp_peer_find_add_id_to_obj(peer, peer_id)) { + /* TBDXXX: assert for now */ + QDF_ASSERT(0); + } + + return peer; + } + + return NULL; +} + +/** + * dp_rx_peer_map_handler() - handle peer map event from firmware + * @soc_handle - genereic soc handle + * @peeri_id - peer_id from firmware + * @hw_peer_id - ast index for this peer + * vdev_id - vdev ID + * peer_mac_addr - macc assress of the peer + * + * associate the peer_id that firmware provided with peer entry + * and update the ast table in the host with the hw_peer_id. + * + * Return: none + */ + +void +dp_rx_peer_map_handler(void *soc_handle, uint16_t peer_id, uint16_t hw_peer_id, + uint8_t vdev_id, uint8_t *peer_mac_addr) +{ + struct dp_soc *soc = (struct dp_soc *)soc_handle; + struct dp_peer *peer = NULL; + + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH, + "peer_map_event (soc:%pK): peer_id %di, hw_peer_id %d, peer_mac " + "%02x:%02x:%02x:%02x:%02x:%02x, vdev_id %d\n", soc, peer_id, + hw_peer_id, peer_mac_addr[0], peer_mac_addr[1], + peer_mac_addr[2], peer_mac_addr[3], peer_mac_addr[4], + peer_mac_addr[5], vdev_id); + + peer = soc->peer_id_to_obj_map[peer_id]; + + if ((hw_peer_id < 0) || (hw_peer_id > (WLAN_UMAC_PSOC_MAX_PEERS * 2))) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "invalid hw_peer_id: %d", hw_peer_id); + qdf_assert_always(0); + } + + /* + * check if peer already exists for this peer_id, if so + * this peer map event is in response for a wds peer add + * wmi command sent during wds source port learning. + * in this case just add the ast entry to the existing + * peer ast_list. + */ + if (!peer) + peer = dp_peer_find_add_id(soc, peer_mac_addr, peer_id, + hw_peer_id, vdev_id); + + if (peer) { + qdf_assert_always(peer->vdev); + /* + * For every peer MAp message search and set if bss_peer + */ + if (!(qdf_mem_cmp(peer->mac_addr.raw, peer->vdev->mac_addr.raw, + DP_MAC_ADDR_LEN))) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH, + "vdev bss_peer!!!!"); + peer->bss_peer = 1; + peer->vdev->vap_bss_peer = peer; + } + } + + dp_peer_map_ast(soc, peer, peer_mac_addr, + hw_peer_id, vdev_id); +} + +void +dp_rx_peer_unmap_handler(void *soc_handle, uint16_t peer_id) +{ + struct dp_peer *peer; + struct dp_soc *soc = (struct dp_soc *)soc_handle; + uint8_t i; + + peer = __dp_peer_find_by_id(soc, peer_id); + + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH, + "peer_unmap_event (soc:%pK) peer_id %d peer %pK\n", + soc, peer_id, peer); + + /* + * Currently peer IDs are assigned for vdevs as well as peers. + * If the peer ID is for a vdev, then the peer pointer stored + * in peer_id_to_obj_map will be NULL. + */ + if (!peer) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "%s: Received unmap event for invalid peer_id" + " %u\n", __func__, peer_id); + return; + } + + soc->peer_id_to_obj_map[peer_id] = NULL; + for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++) { + if (peer->peer_ids[i] == peer_id) { + peer->peer_ids[i] = HTT_INVALID_PEER; + break; + } + } + + if (soc->cdp_soc.ol_ops->peer_unmap_event) { + soc->cdp_soc.ol_ops->peer_unmap_event(soc->ctrl_psoc, + peer_id); + } + + /* + * Remove a reference to the peer. + * If there are no more references, delete the peer object. + */ + dp_peer_unref_delete(peer); +} + +void +dp_peer_find_detach(struct dp_soc *soc) +{ + dp_peer_find_map_detach(soc); + dp_peer_find_hash_detach(soc); + dp_peer_ast_hash_detach(soc); +} + +static void dp_rx_tid_update_cb(struct dp_soc *soc, void *cb_ctxt, + union hal_reo_status *reo_status) +{ + struct dp_rx_tid *rx_tid = (struct dp_rx_tid *)cb_ctxt; + + if ((reo_status->rx_queue_status.header.status != + HAL_REO_CMD_SUCCESS) && + (reo_status->rx_queue_status.header.status != + HAL_REO_CMD_DRAIN)) { + /* Should not happen normally. Just print error for now */ + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "%s: Rx tid HW desc update failed(%d): tid %d\n", + __func__, + reo_status->rx_queue_status.header.status, + rx_tid->tid); + } +} + +/* + * dp_find_peer_by_addr - find peer instance by mac address + * @dev: physical device instance + * @peer_mac_addr: peer mac address + * @local_id: local id for the peer + * + * Return: peer instance pointer + */ +void *dp_find_peer_by_addr(struct cdp_pdev *dev, uint8_t *peer_mac_addr, + uint8_t *local_id) +{ + struct dp_pdev *pdev = (struct dp_pdev *)dev; + struct dp_peer *peer; + + peer = dp_peer_find_hash_find(pdev->soc, peer_mac_addr, 0, DP_VDEV_ALL); + + if (!peer) + return NULL; + + /* Multiple peer ids? How can know peer id? */ + *local_id = peer->local_id; + DP_TRACE(INFO, "%s: peer %pK id %d", __func__, peer, *local_id); + + /* ref_cnt is incremented inside dp_peer_find_hash_find(). + * Decrement it here. + */ + qdf_atomic_dec(&peer->ref_cnt); + + return peer; +} + +/* + * dp_rx_tid_update_wifi3() – Update receive TID state + * @peer: Datapath peer handle + * @tid: TID + * @ba_window_size: BlockAck window size + * @start_seq: Starting sequence number + * + * Return: 0 on success, error code on failure + */ +static int dp_rx_tid_update_wifi3(struct dp_peer *peer, int tid, uint32_t + ba_window_size, uint32_t start_seq) +{ + struct dp_rx_tid *rx_tid = &peer->rx_tid[tid]; + struct dp_soc *soc = peer->vdev->pdev->soc; + struct hal_reo_cmd_params params; + + qdf_mem_zero(¶ms, sizeof(params)); + + params.std.need_status = 1; + params.std.addr_lo = rx_tid->hw_qdesc_paddr & 0xffffffff; + params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32; + params.u.upd_queue_params.update_ba_window_size = 1; + params.u.upd_queue_params.ba_window_size = ba_window_size; + + if (start_seq < IEEE80211_SEQ_MAX) { + params.u.upd_queue_params.update_ssn = 1; + params.u.upd_queue_params.ssn = start_seq; + } + + dp_set_ssn_valid_flag(¶ms, 0); + + dp_reo_send_cmd(soc, CMD_UPDATE_RX_REO_QUEUE, ¶ms, dp_rx_tid_update_cb, rx_tid); + return 0; +} + +/* + * dp_reo_desc_free() - Callback free reo descriptor memory after + * HW cache flush + * + * @soc: DP SOC handle + * @cb_ctxt: Callback context + * @reo_status: REO command status + */ +static void dp_reo_desc_free(struct dp_soc *soc, void *cb_ctxt, + union hal_reo_status *reo_status) +{ + struct reo_desc_list_node *freedesc = + (struct reo_desc_list_node *)cb_ctxt; + struct dp_rx_tid *rx_tid = &freedesc->rx_tid; + + if ((reo_status->fl_cache_status.header.status != + HAL_REO_CMD_SUCCESS) && + (reo_status->fl_cache_status.header.status != + HAL_REO_CMD_DRAIN)) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "%s: Rx tid HW desc flush failed(%d): tid %d\n", + __func__, + reo_status->rx_queue_status.header.status, + freedesc->rx_tid.tid); + } + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, + "%s: hw_qdesc_paddr: %pK, tid:%d\n", __func__, + (void *)(rx_tid->hw_qdesc_paddr), rx_tid->tid); + qdf_mem_unmap_nbytes_single(soc->osdev, + rx_tid->hw_qdesc_paddr, + QDF_DMA_BIDIRECTIONAL, + rx_tid->hw_qdesc_alloc_size); + qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned); + qdf_mem_free(freedesc); +} + +#if defined(QCA_WIFI_QCA8074) && defined(BUILD_X86) +/* Hawkeye emulation requires bus address to be >= 0x50000000 */ +static inline int dp_reo_desc_addr_chk(qdf_dma_addr_t dma_addr) +{ + if (dma_addr < 0x50000000) + return QDF_STATUS_E_FAILURE; + else + return QDF_STATUS_SUCCESS; +} +#else +static inline int dp_reo_desc_addr_chk(qdf_dma_addr_t dma_addr) +{ + return QDF_STATUS_SUCCESS; +} +#endif + + +/* + * dp_rx_tid_setup_wifi3() – Setup receive TID state + * @peer: Datapath peer handle + * @tid: TID + * @ba_window_size: BlockAck window size + * @start_seq: Starting sequence number + * + * Return: 0 on success, error code on failure + */ +int dp_rx_tid_setup_wifi3(struct dp_peer *peer, int tid, + uint32_t ba_window_size, uint32_t start_seq) +{ + struct dp_rx_tid *rx_tid = &peer->rx_tid[tid]; + struct dp_vdev *vdev = peer->vdev; + struct dp_soc *soc = vdev->pdev->soc; + uint32_t hw_qdesc_size; + uint32_t hw_qdesc_align; + int hal_pn_type; + void *hw_qdesc_vaddr; + uint32_t alloc_tries = 0; + + if (peer->delete_in_progress) + return QDF_STATUS_E_FAILURE; + + rx_tid->ba_win_size = ba_window_size; + if (rx_tid->hw_qdesc_vaddr_unaligned != NULL) + return dp_rx_tid_update_wifi3(peer, tid, ba_window_size, + start_seq); + rx_tid->num_of_addba_req = 0; + rx_tid->num_of_delba_req = 0; + rx_tid->num_of_addba_resp = 0; +#ifdef notyet + hw_qdesc_size = hal_get_reo_qdesc_size(soc->hal_soc, ba_window_size); +#else + /* TODO: Allocating HW queue descriptors based on max BA window size + * for all QOS TIDs so that same descriptor can be used later when + * ADDBA request is recevied. This should be changed to allocate HW + * queue descriptors based on BA window size being negotiated (0 for + * non BA cases), and reallocate when BA window size changes and also + * send WMI message to FW to change the REO queue descriptor in Rx + * peer entry as part of dp_rx_tid_update. + */ + if (tid != DP_NON_QOS_TID) + hw_qdesc_size = hal_get_reo_qdesc_size(soc->hal_soc, + HAL_RX_MAX_BA_WINDOW); + else + hw_qdesc_size = hal_get_reo_qdesc_size(soc->hal_soc, + ba_window_size); +#endif + + hw_qdesc_align = hal_get_reo_qdesc_align(soc->hal_soc); + /* To avoid unnecessary extra allocation for alignment, try allocating + * exact size and see if we already have aligned address. + */ + rx_tid->hw_qdesc_alloc_size = hw_qdesc_size; + +try_desc_alloc: + rx_tid->hw_qdesc_vaddr_unaligned = + qdf_mem_malloc(rx_tid->hw_qdesc_alloc_size); + + if (!rx_tid->hw_qdesc_vaddr_unaligned) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "%s: Rx tid HW desc alloc failed: tid %d\n", + __func__, tid); + return QDF_STATUS_E_NOMEM; + } + + if ((unsigned long)(rx_tid->hw_qdesc_vaddr_unaligned) % + hw_qdesc_align) { + /* Address allocated above is not alinged. Allocate extra + * memory for alignment + */ + qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned); + rx_tid->hw_qdesc_vaddr_unaligned = + qdf_mem_malloc(rx_tid->hw_qdesc_alloc_size + + hw_qdesc_align - 1); + + if (!rx_tid->hw_qdesc_vaddr_unaligned) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "%s: Rx tid HW desc alloc failed: tid %d\n", + __func__, tid); + return QDF_STATUS_E_NOMEM; + } + + hw_qdesc_vaddr = (void *)qdf_align((unsigned long) + rx_tid->hw_qdesc_vaddr_unaligned, + hw_qdesc_align); + + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, + "%s: Total Size %d Aligned Addr %pK\n", + __func__, rx_tid->hw_qdesc_alloc_size, + hw_qdesc_vaddr); + + } else { + hw_qdesc_vaddr = rx_tid->hw_qdesc_vaddr_unaligned; + } + + /* TODO: Ensure that sec_type is set before ADDBA is received. + * Currently this is set based on htt indication + * HTT_T2H_MSG_TYPE_SEC_IND from target + */ + switch (peer->security[dp_sec_ucast].sec_type) { + case cdp_sec_type_tkip_nomic: + case cdp_sec_type_aes_ccmp: + case cdp_sec_type_aes_ccmp_256: + case cdp_sec_type_aes_gcmp: + case cdp_sec_type_aes_gcmp_256: + hal_pn_type = HAL_PN_WPA; + break; + case cdp_sec_type_wapi: + if (vdev->opmode == wlan_op_mode_ap) + hal_pn_type = HAL_PN_WAPI_EVEN; + else + hal_pn_type = HAL_PN_WAPI_UNEVEN; + break; + default: + hal_pn_type = HAL_PN_NONE; + break; + } + + hal_reo_qdesc_setup(soc->hal_soc, tid, ba_window_size, start_seq, + hw_qdesc_vaddr, rx_tid->hw_qdesc_paddr, hal_pn_type); + + qdf_mem_map_nbytes_single(soc->osdev, hw_qdesc_vaddr, + QDF_DMA_BIDIRECTIONAL, rx_tid->hw_qdesc_alloc_size, + &(rx_tid->hw_qdesc_paddr)); + + if (dp_reo_desc_addr_chk(rx_tid->hw_qdesc_paddr) != + QDF_STATUS_SUCCESS) { + if (alloc_tries++ < 10) + goto try_desc_alloc; + else { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "%s: Rx tid HW desc alloc failed (lowmem): tid %d\n", + __func__, tid); + return QDF_STATUS_E_NOMEM; + } + } + + if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup) { + soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup( + vdev->pdev->osif_pdev, + peer->vdev->vdev_id, peer->mac_addr.raw, + rx_tid->hw_qdesc_paddr, tid, tid); + + } + return 0; +} + +/* + * dp_rx_tid_delete_cb() - Callback to flush reo descriptor HW cache + * after deleting the entries (ie., setting valid=0) + * + * @soc: DP SOC handle + * @cb_ctxt: Callback context + * @reo_status: REO command status + */ +static void dp_rx_tid_delete_cb(struct dp_soc *soc, void *cb_ctxt, + union hal_reo_status *reo_status) +{ + struct reo_desc_list_node *freedesc = + (struct reo_desc_list_node *)cb_ctxt; + uint32_t list_size; + struct reo_desc_list_node *desc; + unsigned long curr_ts = qdf_get_system_timestamp(); + uint32_t desc_size, tot_desc_size; + struct hal_reo_cmd_params params; + + if (reo_status->rx_queue_status.header.status == HAL_REO_CMD_DRAIN) { + qdf_mem_zero(reo_status, sizeof(*reo_status)); + reo_status->fl_cache_status.header.status = HAL_REO_CMD_DRAIN; + dp_reo_desc_free(soc, (void *)freedesc, reo_status); + return; + } else if (reo_status->rx_queue_status.header.status != + HAL_REO_CMD_SUCCESS) { + /* Should not happen normally. Just print error for now */ + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "%s: Rx tid HW desc deletion failed(%d): tid %d\n", + __func__, + reo_status->rx_queue_status.header.status, + freedesc->rx_tid.tid); + } + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW, + "%s: rx_tid: %d status: %d\n", __func__, + freedesc->rx_tid.tid, + reo_status->rx_queue_status.header.status); + + qdf_spin_lock_bh(&soc->reo_desc_freelist_lock); + freedesc->free_ts = curr_ts; + qdf_list_insert_back_size(&soc->reo_desc_freelist, + (qdf_list_node_t *)freedesc, &list_size); + + while ((qdf_list_peek_front(&soc->reo_desc_freelist, + (qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) && + ((list_size >= REO_DESC_FREELIST_SIZE) || + ((curr_ts - desc->free_ts) > REO_DESC_FREE_DEFER_MS))) { + struct dp_rx_tid *rx_tid; + + qdf_list_remove_front(&soc->reo_desc_freelist, + (qdf_list_node_t **)&desc); + list_size--; + rx_tid = &desc->rx_tid; + + /* Flush and invalidate REO descriptor from HW cache: Base and + * extension descriptors should be flushed separately */ + tot_desc_size = hal_get_reo_qdesc_size(soc->hal_soc, + rx_tid->ba_win_size); + desc_size = hal_get_reo_qdesc_size(soc->hal_soc, 0); + + /* Flush reo extension descriptors */ + while ((tot_desc_size -= desc_size) > 0) { + qdf_mem_zero(¶ms, sizeof(params)); + params.std.addr_lo = + ((uint64_t)(rx_tid->hw_qdesc_paddr) + + tot_desc_size) & 0xffffffff; + params.std.addr_hi = + (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32; + + if (QDF_STATUS_SUCCESS != dp_reo_send_cmd(soc, + CMD_FLUSH_CACHE, + ¶ms, + NULL, + NULL)) { + QDF_TRACE(QDF_MODULE_ID_DP, + QDF_TRACE_LEVEL_ERROR, + "%s: fail to send CMD_CACHE_FLUSH:" + "tid %d desc %pK\n", __func__, + rx_tid->tid, + (void *)(rx_tid->hw_qdesc_paddr)); + } + } + + /* Flush base descriptor */ + qdf_mem_zero(¶ms, sizeof(params)); + params.std.need_status = 1; + params.std.addr_lo = + (uint64_t)(rx_tid->hw_qdesc_paddr) & 0xffffffff; + params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32; + + if (QDF_STATUS_SUCCESS != dp_reo_send_cmd(soc, + CMD_FLUSH_CACHE, + ¶ms, + dp_reo_desc_free, + (void *)desc)) { + union hal_reo_status reo_status; + /* + * If dp_reo_send_cmd return failure, related TID queue desc + * should be unmapped. Also locally reo_desc, together with + * TID queue desc also need to be freed accordingly. + * + * Here invoke desc_free function directly to do clean up. + */ + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "%s: fail to send REO cmd to flush cache: tid %d\n", + __func__, rx_tid->tid); + qdf_mem_zero(&reo_status, sizeof(reo_status)); + reo_status.fl_cache_status.header.status = 0; + dp_reo_desc_free(soc, (void *)desc, &reo_status); + } + } + qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock); +} + +/* + * dp_rx_tid_delete_wifi3() – Delete receive TID queue + * @peer: Datapath peer handle + * @tid: TID + * + * Return: 0 on success, error code on failure + */ +static int dp_rx_tid_delete_wifi3(struct dp_peer *peer, int tid) +{ + struct dp_rx_tid *rx_tid = &(peer->rx_tid[tid]); + struct dp_soc *soc = peer->vdev->pdev->soc; + struct hal_reo_cmd_params params; + struct reo_desc_list_node *freedesc = + qdf_mem_malloc(sizeof(*freedesc)); + + if (!freedesc) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "%s: malloc failed for freedesc: tid %d\n", + __func__, tid); + return -ENOMEM; + } + + freedesc->rx_tid = *rx_tid; + + qdf_mem_zero(¶ms, sizeof(params)); + + params.std.need_status = 0; + params.std.addr_lo = rx_tid->hw_qdesc_paddr & 0xffffffff; + params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32; + params.u.upd_queue_params.update_vld = 1; + params.u.upd_queue_params.vld = 0; + + dp_reo_send_cmd(soc, CMD_UPDATE_RX_REO_QUEUE, ¶ms, + dp_rx_tid_delete_cb, (void *)freedesc); + + rx_tid->hw_qdesc_vaddr_unaligned = NULL; + rx_tid->hw_qdesc_alloc_size = 0; + rx_tid->hw_qdesc_paddr = 0; + + return 0; +} + +#ifdef DP_LFR +static void dp_peer_setup_remaining_tids(struct dp_peer *peer) +{ + int tid; + + for (tid = 1; tid < DP_MAX_TIDS-1; tid++) { + dp_rx_tid_setup_wifi3(peer, tid, 1, 0); + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, + "Setting up TID %d for peer %pK peer->local_id %d\n", + tid, peer, peer->local_id); + } +} +#else +static void dp_peer_setup_remaining_tids(struct dp_peer *peer) {}; +#endif +/* + * dp_peer_rx_init() – Initialize receive TID state + * @pdev: Datapath pdev + * @peer: Datapath peer + * + */ +void dp_peer_rx_init(struct dp_pdev *pdev, struct dp_peer *peer) +{ + int tid; + struct dp_rx_tid *rx_tid; + for (tid = 0; tid < DP_MAX_TIDS; tid++) { + rx_tid = &peer->rx_tid[tid]; + rx_tid->array = &rx_tid->base; + rx_tid->base.head = rx_tid->base.tail = NULL; + rx_tid->tid = tid; + rx_tid->defrag_timeout_ms = 0; + rx_tid->ba_win_size = 0; + rx_tid->ba_status = DP_RX_BA_INACTIVE; + + rx_tid->defrag_waitlist_elem.tqe_next = NULL; + rx_tid->defrag_waitlist_elem.tqe_prev = NULL; + +#ifdef notyet /* TODO: See if this is required for exception handling */ + /* invalid sequence number */ + peer->tids_last_seq[tid] = 0xffff; +#endif + } + + /* Setup default (non-qos) rx tid queue */ + dp_rx_tid_setup_wifi3(peer, DP_NON_QOS_TID, 1, 0); + + /* Setup rx tid queue for TID 0. + * Other queues will be setup on receiving first packet, which will cause + * NULL REO queue error + */ + dp_rx_tid_setup_wifi3(peer, 0, 1, 0); + + /* + * Setup the rest of TID's to handle LFR + */ + dp_peer_setup_remaining_tids(peer); + + /* + * Set security defaults: no PN check, no security. The target may + * send a HTT SEC_IND message to overwrite these defaults. + */ + peer->security[dp_sec_ucast].sec_type = + peer->security[dp_sec_mcast].sec_type = cdp_sec_type_none; +} + +/* + * dp_peer_rx_cleanup() – Cleanup receive TID state + * @vdev: Datapath vdev + * @peer: Datapath peer + * + */ +void dp_peer_rx_cleanup(struct dp_vdev *vdev, struct dp_peer *peer) +{ + int tid; + uint32_t tid_delete_mask = 0; + for (tid = 0; tid < DP_MAX_TIDS; tid++) { + if (peer->rx_tid[tid].hw_qdesc_vaddr_unaligned != NULL) { + dp_rx_tid_delete_wifi3(peer, tid); + + /* Cleanup defrag related resource */ + dp_rx_defrag_waitlist_remove(peer, tid); + dp_rx_reorder_flush_frag(peer, tid); + + tid_delete_mask |= (1 << tid); + } + } +#ifdef notyet /* See if FW can remove queues as part of peer cleanup */ + if (soc->ol_ops->peer_rx_reorder_queue_remove) { + soc->ol_ops->peer_rx_reorder_queue_remove(vdev->pdev->osif_pdev, + peer->vdev->vdev_id, peer->mac_addr.raw, + tid_delete_mask); + } +#endif +} + +/* + * dp_peer_cleanup() – Cleanup peer information + * @vdev: Datapath vdev + * @peer: Datapath peer + * + */ +void dp_peer_cleanup(struct dp_vdev *vdev, struct dp_peer *peer) +{ + peer->last_assoc_rcvd = 0; + peer->last_disassoc_rcvd = 0; + peer->last_deauth_rcvd = 0; + + /* cleanup the Rx reorder queues for this peer */ + dp_peer_rx_cleanup(vdev, peer); +} + +/* +* dp_rx_addba_requestprocess_wifi3() – Process ADDBA request from peer +* +* @peer: Datapath peer handle +* @dialogtoken: dialogtoken from ADDBA frame +* @tid: TID number +* @startseqnum: Start seq. number received in BA sequence control +* in ADDBA frame +* +* Return: 0 on success, error code on failure +*/ +int dp_addba_requestprocess_wifi3(void *peer_handle, + uint8_t dialogtoken, uint16_t tid, uint16_t batimeout, + uint16_t buffersize, uint16_t startseqnum) +{ + struct dp_peer *peer = (struct dp_peer *)peer_handle; + struct dp_rx_tid *rx_tid = &peer->rx_tid[tid]; + + if ((rx_tid->ba_status == DP_RX_BA_ACTIVE) && + (rx_tid->hw_qdesc_vaddr_unaligned != NULL)) + rx_tid->ba_status = DP_RX_BA_INACTIVE; + + if (dp_rx_tid_setup_wifi3(peer, tid, buffersize, + startseqnum)) { + /* TODO: Should we send addba reject in this case */ + return QDF_STATUS_E_FAILURE; + } + + if (rx_tid->userstatuscode != IEEE80211_STATUS_SUCCESS) + rx_tid->statuscode = rx_tid->userstatuscode; + else + rx_tid->statuscode = IEEE80211_STATUS_SUCCESS; + + rx_tid->dialogtoken = dialogtoken; + rx_tid->ba_status = DP_RX_BA_ACTIVE; + rx_tid->num_of_addba_req++; + + return 0; +} + +/* +* dp_rx_addba_responsesetup_wifi3() – Process ADDBA request from peer +* +* @peer: Datapath peer handle +* @tid: TID number +* @dialogtoken: output dialogtoken +* @statuscode: output dialogtoken +* @buffersize: Output BA window size +* @batimeout: Output BA timeout +*/ +void dp_addba_responsesetup_wifi3(void *peer_handle, uint8_t tid, + uint8_t *dialogtoken, uint16_t *statuscode, + uint16_t *buffersize, uint16_t *batimeout) +{ + struct dp_peer *peer = (struct dp_peer *)peer_handle; + struct dp_rx_tid *rx_tid = &peer->rx_tid[tid]; + + rx_tid->num_of_addba_resp++; + /* setup ADDBA response parameters */ + *dialogtoken = rx_tid->dialogtoken; + *statuscode = rx_tid->statuscode; + *buffersize = rx_tid->ba_win_size; + *batimeout = 0; +} + +/* +* dp_set_addba_response() – Set a user defined ADDBA response status code +* +* @peer: Datapath peer handle +* @tid: TID number +* @statuscode: response status code to be set +*/ +void dp_set_addba_response(void *peer_handle, uint8_t tid, + uint16_t statuscode) +{ + struct dp_peer *peer = (struct dp_peer *)peer_handle; + struct dp_rx_tid *rx_tid = &peer->rx_tid[tid]; + + rx_tid->userstatuscode = statuscode; +} + +/* +* dp_rx_delba_process_wifi3() – Process DELBA from peer +* @peer: Datapath peer handle +* @tid: TID number +* @reasoncode: Reason code received in DELBA frame +* +* Return: 0 on success, error code on failure +*/ +int dp_delba_process_wifi3(void *peer_handle, + int tid, uint16_t reasoncode) +{ + struct dp_peer *peer = (struct dp_peer *)peer_handle; + struct dp_rx_tid *rx_tid = &peer->rx_tid[tid]; + + if (rx_tid->ba_status != DP_RX_BA_ACTIVE) + return QDF_STATUS_E_FAILURE; + + /* TODO: See if we can delete the existing REO queue descriptor and + * replace with a new one without queue extenstion descript to save + * memory + */ + rx_tid->num_of_delba_req++; + dp_rx_tid_update_wifi3(peer, tid, 1, 0); + + rx_tid->ba_status = DP_RX_BA_INACTIVE; + + return 0; +} + +void dp_rx_discard(struct dp_vdev *vdev, struct dp_peer *peer, unsigned tid, + qdf_nbuf_t msdu_list) +{ + while (msdu_list) { + qdf_nbuf_t msdu = msdu_list; + + msdu_list = qdf_nbuf_next(msdu_list); + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH, + "discard rx %pK from partly-deleted peer %pK " + "(%02x:%02x:%02x:%02x:%02x:%02x)\n", + msdu, peer, + peer->mac_addr.raw[0], peer->mac_addr.raw[1], + peer->mac_addr.raw[2], peer->mac_addr.raw[3], + peer->mac_addr.raw[4], peer->mac_addr.raw[5]); + qdf_nbuf_free(msdu); + } +} + + +/** + * dp_set_pn_check_wifi3() - enable PN check in REO for security + * @peer: Datapath peer handle + * @vdev: Datapath vdev + * @pdev - data path device instance + * @sec_type - security type + * @rx_pn - Receive pn starting number + * + */ + +void +dp_set_pn_check_wifi3(struct cdp_vdev *vdev_handle, struct cdp_peer *peer_handle, enum cdp_sec_type sec_type, uint32_t *rx_pn) +{ + struct dp_peer *peer = (struct dp_peer *)peer_handle; + struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle; + struct dp_pdev *pdev; + struct dp_soc *soc; + int i; + uint8_t pn_size; + struct hal_reo_cmd_params params; + + /* preconditions */ + qdf_assert(vdev); + + pdev = vdev->pdev; + soc = pdev->soc; + + + qdf_mem_zero(¶ms, sizeof(params)); + + params.std.need_status = 1; + params.u.upd_queue_params.update_pn_valid = 1; + params.u.upd_queue_params.update_pn_size = 1; + params.u.upd_queue_params.update_pn = 1; + params.u.upd_queue_params.update_pn_check_needed = 1; + + peer->security[dp_sec_ucast].sec_type = sec_type; + + switch (sec_type) { + case cdp_sec_type_tkip_nomic: + case cdp_sec_type_aes_ccmp: + case cdp_sec_type_aes_ccmp_256: + case cdp_sec_type_aes_gcmp: + case cdp_sec_type_aes_gcmp_256: + params.u.upd_queue_params.pn_check_needed = 1; + params.u.upd_queue_params.pn_size = 48; + pn_size = 48; + break; + case cdp_sec_type_wapi: + params.u.upd_queue_params.pn_check_needed = 1; + params.u.upd_queue_params.pn_size = 128; + pn_size = 128; + if (vdev->opmode == wlan_op_mode_ap) { + params.u.upd_queue_params.pn_even = 1; + params.u.upd_queue_params.update_pn_even = 1; + } else { + params.u.upd_queue_params.pn_uneven = 1; + params.u.upd_queue_params.update_pn_uneven = 1; + } + break; + default: + params.u.upd_queue_params.pn_check_needed = 0; + pn_size = 0; + break; + } + + + for (i = 0; i < DP_MAX_TIDS; i++) { + struct dp_rx_tid *rx_tid = &peer->rx_tid[i]; + if (rx_tid->hw_qdesc_vaddr_unaligned != NULL) { + params.std.addr_lo = + rx_tid->hw_qdesc_paddr & 0xffffffff; + params.std.addr_hi = + (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32; + + if (sec_type != cdp_sec_type_wapi) { + params.u.upd_queue_params.update_pn_valid = 0; + } else { + /* + * Setting PN valid bit for WAPI sec_type, + * since WAPI PN has to be started with + * predefined value + */ + params.u.upd_queue_params.update_pn_valid = 1; + params.u.upd_queue_params.pn_31_0 = rx_pn[0]; + params.u.upd_queue_params.pn_63_32 = rx_pn[1]; + params.u.upd_queue_params.pn_95_64 = rx_pn[2]; + params.u.upd_queue_params.pn_127_96 = rx_pn[3]; + } + rx_tid->pn_size = pn_size; + dp_reo_send_cmd(soc, CMD_UPDATE_RX_REO_QUEUE, ¶ms, + dp_rx_tid_update_cb, rx_tid); + } else { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH, + "PN Check not setup for TID :%d \n", i); + } + } +} + + +void +dp_rx_sec_ind_handler(void *soc_handle, uint16_t peer_id, + enum htt_sec_type sec_type, int is_unicast, u_int32_t *michael_key, + u_int32_t *rx_pn) +{ + struct dp_soc *soc = (struct dp_soc *)soc_handle; + struct dp_peer *peer; + int sec_index; + + peer = dp_peer_find_by_id(soc, peer_id); + if (!peer) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "Couldn't find peer from ID %d - skipping security inits\n", + peer_id); + return; + } + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH, + "sec spec for peer %pK (%02x:%02x:%02x:%02x:%02x:%02x): " + "%s key of type %d\n", + peer, + peer->mac_addr.raw[0], peer->mac_addr.raw[1], + peer->mac_addr.raw[2], peer->mac_addr.raw[3], + peer->mac_addr.raw[4], peer->mac_addr.raw[5], + is_unicast ? "ucast" : "mcast", + sec_type); + sec_index = is_unicast ? dp_sec_ucast : dp_sec_mcast; + peer->security[sec_index].sec_type = sec_type; +#ifdef notyet /* TODO: See if this is required for defrag support */ + /* michael key only valid for TKIP, but for simplicity, + * copy it anyway + */ + qdf_mem_copy( + &peer->security[sec_index].michael_key[0], + michael_key, + sizeof(peer->security[sec_index].michael_key)); +#ifdef BIG_ENDIAN_HOST + OL_IF_SWAPBO(peer->security[sec_index].michael_key[0], + sizeof(peer->security[sec_index].michael_key)); +#endif /* BIG_ENDIAN_HOST */ +#endif + +#ifdef notyet /* TODO: Check if this is required for wifi3.0 */ + if (sec_type != htt_sec_type_wapi) { + qdf_mem_set(peer->tids_last_pn_valid, _EXT_TIDS, 0x00); + } else { + for (i = 0; i < DP_MAX_TIDS; i++) { + /* + * Setting PN valid bit for WAPI sec_type, + * since WAPI PN has to be started with predefined value + */ + peer->tids_last_pn_valid[i] = 1; + qdf_mem_copy( + (u_int8_t *) &peer->tids_last_pn[i], + (u_int8_t *) rx_pn, sizeof(union htt_rx_pn_t)); + peer->tids_last_pn[i].pn128[1] = + qdf_cpu_to_le64(peer->tids_last_pn[i].pn128[1]); + peer->tids_last_pn[i].pn128[0] = + qdf_cpu_to_le64(peer->tids_last_pn[i].pn128[0]); + } + } +#endif + /* TODO: Update HW TID queue with PN check parameters (pn type for + * all security types and last pn for WAPI) once REO command API + * is available + */ +} + +#ifndef CONFIG_WIN +/** + * dp_register_peer() - Register peer into physical device + * @pdev - data path device instance + * @sta_desc - peer description + * + * Register peer into physical device + * + * Return: QDF_STATUS_SUCCESS registration success + * QDF_STATUS_E_FAULT peer not found + */ +QDF_STATUS dp_register_peer(struct cdp_pdev *pdev_handle, + struct ol_txrx_desc_type *sta_desc) +{ + struct dp_peer *peer; + struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle; + + peer = dp_peer_find_by_local_id((struct cdp_pdev *)pdev, + sta_desc->sta_id); + if (!peer) + return QDF_STATUS_E_FAULT; + + qdf_spin_lock_bh(&peer->peer_info_lock); + peer->state = OL_TXRX_PEER_STATE_CONN; + qdf_spin_unlock_bh(&peer->peer_info_lock); + + return QDF_STATUS_SUCCESS; +} + +/** + * dp_clear_peer() - remove peer from physical device + * @pdev - data path device instance + * @sta_id - local peer id + * + * remove peer from physical device + * + * Return: QDF_STATUS_SUCCESS registration success + * QDF_STATUS_E_FAULT peer not found + */ +QDF_STATUS dp_clear_peer(struct cdp_pdev *pdev_handle, uint8_t local_id) +{ + struct dp_peer *peer; + struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle; + + peer = dp_peer_find_by_local_id((struct cdp_pdev *)pdev, local_id); + if (!peer) + return QDF_STATUS_E_FAULT; + + qdf_spin_lock_bh(&peer->peer_info_lock); + peer->state = OL_TXRX_PEER_STATE_DISC; + qdf_spin_unlock_bh(&peer->peer_info_lock); + + return QDF_STATUS_SUCCESS; +} + +/** + * dp_find_peer_by_addr_and_vdev() - Find peer by peer mac address within vdev + * @pdev - data path device instance + * @vdev - virtual interface instance + * @peer_addr - peer mac address + * @peer_id - local peer id with target mac address + * + * Find peer by peer mac address within vdev + * + * Return: peer instance void pointer + * NULL cannot find target peer + */ +void *dp_find_peer_by_addr_and_vdev(struct cdp_pdev *pdev_handle, + struct cdp_vdev *vdev_handle, + uint8_t *peer_addr, uint8_t *local_id) +{ + struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle; + struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle; + struct dp_peer *peer; + + DP_TRACE(INFO, "vdev %pK peer_addr %pK", vdev, peer_addr); + peer = dp_peer_find_hash_find(pdev->soc, peer_addr, 0, 0); + DP_TRACE(INFO, "peer %pK vdev %pK", peer, vdev); + + if (!peer) + return NULL; + + if (peer->vdev != vdev) + return NULL; + + *local_id = peer->local_id; + DP_TRACE(INFO, "peer %pK vdev %pK local id %d", peer, vdev, *local_id); + + /* ref_cnt is incremented inside dp_peer_find_hash_find(). + * Decrement it here. + */ + qdf_atomic_dec(&peer->ref_cnt); + + return peer; +} + +/** + * dp_local_peer_id() - Find local peer id within peer instance + * @peer - peer instance + * + * Find local peer id within peer instance + * + * Return: local peer id + */ +uint16_t dp_local_peer_id(void *peer) +{ + return ((struct dp_peer *)peer)->local_id; +} + +/** + * dp_peer_find_by_local_id() - Find peer by local peer id + * @pdev - data path device instance + * @local_peer_id - local peer id want to find + * + * Find peer by local peer id within physical device + * + * Return: peer instance void pointer + * NULL cannot find target peer + */ +void *dp_peer_find_by_local_id(struct cdp_pdev *pdev_handle, uint8_t local_id) +{ + struct dp_peer *peer; + struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle; + + if (local_id >= OL_TXRX_NUM_LOCAL_PEER_IDS) { + DP_TRACE(DEBUG, "Incorrect local id %d", local_id); + QDF_BUG(0); + return NULL; + } + qdf_spin_lock_bh(&pdev->local_peer_ids.lock); + peer = pdev->local_peer_ids.map[local_id]; + qdf_spin_unlock_bh(&pdev->local_peer_ids.lock); + DP_TRACE(DEBUG, "peer %pK local id %d", peer, local_id); + return peer; +} + +/** + * dp_peer_state_update() - update peer local state + * @pdev - data path device instance + * @peer_addr - peer mac address + * @state - new peer local state + * + * update peer local state + * + * Return: QDF_STATUS_SUCCESS registration success + */ +QDF_STATUS dp_peer_state_update(struct cdp_pdev *pdev_handle, uint8_t *peer_mac, + enum ol_txrx_peer_state state) +{ + struct dp_peer *peer; + struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle; + + peer = dp_peer_find_hash_find(pdev->soc, peer_mac, 0, DP_VDEV_ALL); + if (NULL == peer) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "Failed to find peer for: [%pM]", peer_mac); + return QDF_STATUS_E_FAILURE; + } + peer->state = state; + + DP_TRACE(INFO, "peer %pK state %d", peer, peer->state); + /* ref_cnt is incremented inside dp_peer_find_hash_find(). + * Decrement it here. + */ + qdf_atomic_dec(&peer->ref_cnt); + + return QDF_STATUS_SUCCESS; +} + +/** + * dp_get_vdevid() - Get virtual interface id which peer registered + * @peer - peer instance + * @vdev_id - virtual interface id which peer registered + * + * Get virtual interface id which peer registered + * + * Return: QDF_STATUS_SUCCESS registration success + */ +QDF_STATUS dp_get_vdevid(void *peer_handle, uint8_t *vdev_id) +{ + struct dp_peer *peer = peer_handle; + + DP_TRACE(INFO, "peer %pK vdev %pK vdev id %d", + peer, peer->vdev, peer->vdev->vdev_id); + *vdev_id = peer->vdev->vdev_id; + return QDF_STATUS_SUCCESS; +} + +struct cdp_vdev *dp_get_vdev_by_sta_id(struct cdp_pdev *pdev_handle, + uint8_t sta_id) +{ + struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle; + struct dp_peer *peer = NULL; + + if (sta_id >= WLAN_MAX_STA_COUNT) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH, + "Invalid sta id passed"); + return NULL; + } + + if (!pdev) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH, + "PDEV not found for sta_id [%d]", sta_id); + return NULL; + } + + peer = dp_peer_find_by_local_id((struct cdp_pdev *)pdev, sta_id); + if (!peer) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH, + "PEER [%d] not found", sta_id); + return NULL; + } + + return (struct cdp_vdev *)peer->vdev; +} + +/** + * dp_get_vdev_for_peer() - Get virtual interface instance which peer belongs + * @peer - peer instance + * + * Get virtual interface instance which peer belongs + * + * Return: virtual interface instance pointer + * NULL in case cannot find + */ +struct cdp_vdev *dp_get_vdev_for_peer(void *peer_handle) +{ + struct dp_peer *peer = peer_handle; + + DP_TRACE(INFO, "peer %pK vdev %pK", peer, peer->vdev); + return (struct cdp_vdev *)peer->vdev; +} + +/** + * dp_peer_get_peer_mac_addr() - Get peer mac address + * @peer - peer instance + * + * Get peer mac address + * + * Return: peer mac address pointer + * NULL in case cannot find + */ +uint8_t *dp_peer_get_peer_mac_addr(void *peer_handle) +{ + struct dp_peer *peer = peer_handle; + uint8_t *mac; + + mac = peer->mac_addr.raw; + DP_TRACE(INFO, "peer %pK mac 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x", + peer, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); + return peer->mac_addr.raw; +} + +/** + * dp_get_peer_state() - Get local peer state + * @peer - peer instance + * + * Get local peer state + * + * Return: peer status + */ +int dp_get_peer_state(void *peer_handle) +{ + struct dp_peer *peer = peer_handle; + + DP_TRACE(DEBUG, "peer %pK stats %d", peer, peer->state); + return peer->state; +} + +/** + * dp_local_peer_id_pool_init() - local peer id pool alloc for physical device + * @pdev - data path device instance + * + * local peer id pool alloc for physical device + * + * Return: none + */ +void dp_local_peer_id_pool_init(struct dp_pdev *pdev) +{ + int i; + + /* point the freelist to the first ID */ + pdev->local_peer_ids.freelist = 0; + + /* link each ID to the next one */ + for (i = 0; i < OL_TXRX_NUM_LOCAL_PEER_IDS; i++) { + pdev->local_peer_ids.pool[i] = i + 1; + pdev->local_peer_ids.map[i] = NULL; + } + + /* link the last ID to itself, to mark the end of the list */ + i = OL_TXRX_NUM_LOCAL_PEER_IDS; + pdev->local_peer_ids.pool[i] = i; + + qdf_spinlock_create(&pdev->local_peer_ids.lock); + DP_TRACE(INFO, "Peer pool init"); +} + +/** + * dp_local_peer_id_alloc() - allocate local peer id + * @pdev - data path device instance + * @peer - new peer instance + * + * allocate local peer id + * + * Return: none + */ +void dp_local_peer_id_alloc(struct dp_pdev *pdev, struct dp_peer *peer) +{ + int i; + + qdf_spin_lock_bh(&pdev->local_peer_ids.lock); + i = pdev->local_peer_ids.freelist; + if (pdev->local_peer_ids.pool[i] == i) { + /* the list is empty, except for the list-end marker */ + peer->local_id = OL_TXRX_INVALID_LOCAL_PEER_ID; + } else { + /* take the head ID and advance the freelist */ + peer->local_id = i; + pdev->local_peer_ids.freelist = pdev->local_peer_ids.pool[i]; + pdev->local_peer_ids.map[i] = peer; + } + qdf_spin_unlock_bh(&pdev->local_peer_ids.lock); + DP_TRACE(INFO, "peer %pK, local id %d", peer, peer->local_id); +} + +/** + * dp_local_peer_id_free() - remove local peer id + * @pdev - data path device instance + * @peer - peer instance should be removed + * + * remove local peer id + * + * Return: none + */ +void dp_local_peer_id_free(struct dp_pdev *pdev, struct dp_peer *peer) +{ + int i = peer->local_id; + if ((i == OL_TXRX_INVALID_LOCAL_PEER_ID) || + (i >= OL_TXRX_NUM_LOCAL_PEER_IDS)) { + return; + } + + /* put this ID on the head of the freelist */ + qdf_spin_lock_bh(&pdev->local_peer_ids.lock); + pdev->local_peer_ids.pool[i] = pdev->local_peer_ids.freelist; + pdev->local_peer_ids.freelist = i; + pdev->local_peer_ids.map[i] = NULL; + qdf_spin_unlock_bh(&pdev->local_peer_ids.lock); +} +#endif + +/** + * dp_get_peer_mac_addr_frm_id(): get mac address of the peer + * @soc_handle: DP SOC handle + * @peer_id:peer_id of the peer + * + * return: vdev_id of the vap + */ +uint8_t dp_get_peer_mac_addr_frm_id(struct cdp_soc_t *soc_handle, + uint16_t peer_id, uint8_t *peer_mac) +{ + struct dp_soc *soc = (struct dp_soc *)soc_handle; + struct dp_peer *peer; + + peer = dp_peer_find_by_id(soc, peer_id); + + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, + "soc %pK peer_id %d", soc, peer_id); + + if (!peer) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "peer not found "); + return CDP_INVALID_VDEV_ID; + } + + qdf_mem_copy(peer_mac, peer->mac_addr.raw, 6); + return peer->vdev->vdev_id; +} + +/** + * dp_peer_rxtid_stats: Retried Rx TID (REO queue) stats from HW + * @peer: DP peer handle + * @dp_stats_cmd_cb: REO command callback function + * @cb_ctxt: Callback context + * + * Return: none + */ +void dp_peer_rxtid_stats(struct dp_peer *peer, void (*dp_stats_cmd_cb), + void *cb_ctxt) +{ + struct dp_soc *soc = peer->vdev->pdev->soc; + struct hal_reo_cmd_params params; + int i; + + if (!dp_stats_cmd_cb) + return; + + qdf_mem_zero(¶ms, sizeof(params)); + for (i = 0; i < DP_MAX_TIDS; i++) { + struct dp_rx_tid *rx_tid = &peer->rx_tid[i]; + if (rx_tid->hw_qdesc_vaddr_unaligned != NULL) { + params.std.need_status = 1; + params.std.addr_lo = + rx_tid->hw_qdesc_paddr & 0xffffffff; + params.std.addr_hi = + (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32; + + if (cb_ctxt) { + dp_reo_send_cmd(soc, CMD_GET_QUEUE_STATS, + ¶ms, dp_stats_cmd_cb, cb_ctxt); + } else { + dp_reo_send_cmd(soc, CMD_GET_QUEUE_STATS, + ¶ms, dp_stats_cmd_cb, rx_tid); + } + + /* Flush REO descriptor from HW cache to update stats + * in descriptor memory. This is to help debugging */ + qdf_mem_zero(¶ms, sizeof(params)); + params.std.need_status = 0; + params.std.addr_lo = + rx_tid->hw_qdesc_paddr & 0xffffffff; + params.std.addr_hi = + (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32; + params.u.fl_cache_params.flush_no_inval = 1; + dp_reo_send_cmd(soc, CMD_FLUSH_CACHE, ¶ms, NULL, + NULL); + } + } +} + diff --git a/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_peer.h b/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_peer.h new file mode 100644 index 0000000000000000000000000000000000000000..f58901fea542693dde4540d7c851a9d7c6e95fad --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_peer.h @@ -0,0 +1,136 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ +#ifndef _DP_PEER_H_ +#define _DP_PEER_H_ + +#include +#include +#include "dp_types.h" + +#define DP_INVALID_PEER_ID 0xffff +/** + * __dp_peer_find_by_id() - Returns peer object given the peer id + * + * @soc : core DP soc context + * @peer_id : peer id from peer object can be retrieved + * + * Return: struct dp_peer*: Pointer to DP peer object + */ +static inline struct dp_peer * +__dp_peer_find_by_id(struct dp_soc *soc, + uint16_t peer_id) +{ + struct dp_peer *peer; + + /* TODO: Hold lock */ + peer = (peer_id >= soc->max_peers) ? NULL : + soc->peer_id_to_obj_map[peer_id]; + + return peer; +} + +/** + * dp_peer_find_by_id() - Returns peer object given the peer id + * if delete_in_progress in not set for peer + * + * @soc : core DP soc context + * @peer_id : peer id from peer object can be retrieved + * + * Return: struct dp_peer*: Pointer to DP peer object + */ +static inline struct dp_peer * +dp_peer_find_by_id(struct dp_soc *soc, + uint16_t peer_id) +{ + struct dp_peer *peer; + + peer = __dp_peer_find_by_id (soc, peer_id); + + if (peer && peer->delete_in_progress) { + return NULL; + } + + return peer; +} + +void dp_rx_peer_map_handler(void *soc_handle, uint16_t peer_id, + uint16_t hw_peer_id, uint8_t vdev_id, uint8_t *peer_mac_addr); +void dp_rx_peer_unmap_handler(void *soc_handle, uint16_t peer_id); +void dp_rx_sec_ind_handler(void *soc_handle, uint16_t peer_id, + enum htt_sec_type sec_type, int is_unicast, + u_int32_t *michael_key, u_int32_t *rx_pn); +uint8_t dp_get_peer_mac_addr_frm_id(struct cdp_soc_t *soc_handle, + uint16_t peer_id, uint8_t *peer_mac); + +int dp_peer_add_ast(struct dp_soc *soc, struct dp_peer *peer, + uint8_t *mac_addr, enum cdp_txrx_ast_entry_type type, + uint32_t flags); + +void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry); + +int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer, + struct dp_ast_entry *ast_entry, uint32_t flags); + +struct dp_ast_entry *dp_peer_ast_hash_find(struct dp_soc *soc, + uint8_t *ast_mac_addr); + +uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc, + struct dp_ast_entry *ast_entry); + + +uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc, + struct dp_ast_entry *ast_entry); + +void dp_peer_ast_set_type(struct dp_soc *soc, + struct dp_ast_entry *ast_entry, + enum cdp_txrx_ast_entry_type type); + +/* + * dp_get_vdev_from_soc_vdev_id_wifi3() - + * Returns vdev object given the vdev id + * vdev id is unique across pdev's + * + * @soc : core DP soc context + * @vdev_id : vdev id from vdev object can be retrieved + * + * Return: struct dp_vdev*: Pointer to DP vdev object + */ +static inline struct dp_vdev * +dp_get_vdev_from_soc_vdev_id_wifi3(struct dp_soc *soc, + uint8_t vdev_id) +{ + struct dp_pdev *pdev = NULL; + struct dp_vdev *vdev = NULL; + int i; + + for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) { + pdev = soc->pdev_list[i]; + TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) { + if (vdev->vdev_id == vdev_id) { + QDF_TRACE(QDF_MODULE_ID_DP, + QDF_TRACE_LEVEL_INFO, + FL("Found vdev 0x%pK on pdev %d\n"), + vdev, i); + return vdev; + } + } + } + return NULL; + +} +#endif /* _DP_PEER_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_reo.c b/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_reo.c new file mode 100644 index 0000000000000000000000000000000000000000..d7fc4d3eb63e27a184211b9428cf5c26f01c6162 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_reo.c @@ -0,0 +1,195 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "dp_types.h" +#include "hal_reo.h" +#include "dp_internal.h" + +QDF_STATUS dp_reo_send_cmd(struct dp_soc *soc, enum hal_reo_cmd_type type, + struct hal_reo_cmd_params *params, + void (*callback_fn), void *data) +{ + struct dp_reo_cmd_info *reo_cmd; + int num; + + switch (type) { + case CMD_GET_QUEUE_STATS: + num = hal_reo_cmd_queue_stats(soc->reo_cmd_ring.hal_srng, + soc->hal_soc, params); + break; + case CMD_FLUSH_QUEUE: + num = hal_reo_cmd_flush_queue(soc->reo_cmd_ring.hal_srng, + soc->hal_soc, params); + break; + case CMD_FLUSH_CACHE: + num = hal_reo_cmd_flush_cache(soc->reo_cmd_ring.hal_srng, + soc->hal_soc, params); + break; + case CMD_UNBLOCK_CACHE: + num = hal_reo_cmd_unblock_cache(soc->reo_cmd_ring.hal_srng, + soc->hal_soc, params); + break; + case CMD_FLUSH_TIMEOUT_LIST: + num = hal_reo_cmd_flush_timeout_list(soc->reo_cmd_ring.hal_srng, + soc->hal_soc, params); + break; + case CMD_UPDATE_RX_REO_QUEUE: + num = hal_reo_cmd_update_rx_queue(soc->reo_cmd_ring.hal_srng, + soc->hal_soc, params); + break; + default: + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "%s: Invalid REO command type\n", __func__); + return QDF_STATUS_E_FAILURE; + }; + + if (num < 0) { + qdf_print("%s: Error with sending REO command\n", __func__); + return QDF_STATUS_E_FAILURE; + } + + if (callback_fn) { + reo_cmd = qdf_mem_malloc(sizeof(*reo_cmd)); + if (!reo_cmd) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "%s: alloc failed for REO cmd:%d!!\n", + __func__, type); + return QDF_STATUS_E_NOMEM; + } + + reo_cmd->cmd = num; + reo_cmd->cmd_type = type; + reo_cmd->handler = callback_fn; + reo_cmd->data = data; + qdf_spin_lock_bh(&soc->rx.reo_cmd_lock); + TAILQ_INSERT_TAIL(&soc->rx.reo_cmd_list, reo_cmd, + reo_cmd_list_elem); + qdf_spin_unlock_bh(&soc->rx.reo_cmd_lock); + } + + return QDF_STATUS_SUCCESS; +} + +void dp_reo_status_ring_handler(struct dp_soc *soc) +{ + uint32_t *reo_desc; + struct dp_reo_cmd_info *reo_cmd = NULL; + union hal_reo_status reo_status; + int num; + + if (hal_srng_access_start(soc->hal_soc, + soc->reo_status_ring.hal_srng)) { + return; + } + reo_desc = hal_srng_dst_get_next(soc->hal_soc, + soc->reo_status_ring.hal_srng); + + while (reo_desc) { + uint16_t tlv = HAL_GET_TLV(reo_desc); + + switch (tlv) { + case HAL_REO_QUEUE_STATS_STATUS_TLV: + hal_reo_queue_stats_status(reo_desc, + &reo_status.queue_status); + num = reo_status.queue_status.header.cmd_num; + break; + case HAL_REO_FLUSH_QUEUE_STATUS_TLV: + hal_reo_flush_queue_status(reo_desc, + &reo_status.fl_queue_status); + num = reo_status.fl_queue_status.header.cmd_num; + break; + case HAL_REO_FLUSH_CACHE_STATUS_TLV: + hal_reo_flush_cache_status(reo_desc, soc->hal_soc, + &reo_status.fl_cache_status); + num = reo_status.fl_cache_status.header.cmd_num; + break; + case HAL_REO_UNBLK_CACHE_STATUS_TLV: + hal_reo_unblock_cache_status(reo_desc, soc->hal_soc, + &reo_status.unblk_cache_status); + num = reo_status.unblk_cache_status.header.cmd_num; + break; + case HAL_REO_TIMOUT_LIST_STATUS_TLV: + hal_reo_flush_timeout_list_status(reo_desc, + &reo_status.fl_timeout_status); + num = reo_status.fl_timeout_status.header.cmd_num; + break; + case HAL_REO_DESC_THRES_STATUS_TLV: + hal_reo_desc_thres_reached_status(reo_desc, + &reo_status.thres_status); + num = reo_status.thres_status.header.cmd_num; + break; + case HAL_REO_UPDATE_RX_QUEUE_STATUS_TLV: + hal_reo_rx_update_queue_status(reo_desc, + &reo_status.rx_queue_status); + num = reo_status.rx_queue_status.header.cmd_num; + break; + default: + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_WARN, + "%s, no handler for TLV:%d\n", __func__, tlv); + goto next; + } /* switch */ + + qdf_spin_lock_bh(&soc->rx.reo_cmd_lock); + TAILQ_FOREACH(reo_cmd, &soc->rx.reo_cmd_list, + reo_cmd_list_elem) { + if (reo_cmd->cmd == num) { + TAILQ_REMOVE(&soc->rx.reo_cmd_list, reo_cmd, + reo_cmd_list_elem); + break; + } + } + qdf_spin_unlock_bh(&soc->rx.reo_cmd_lock); + + if (reo_cmd) { + reo_cmd->handler(soc, reo_cmd->data, + &reo_status); + qdf_mem_free(reo_cmd); + } + +next: + reo_desc = hal_srng_dst_get_next(soc, + soc->reo_status_ring.hal_srng); + } /* while */ + + hal_srng_access_end(soc->hal_soc, soc->reo_status_ring.hal_srng); +} + +/** + * dp_reo_cmdlist_destroy - Free REO commands in the queue + * @soc: DP SoC hanle + * + */ +void dp_reo_cmdlist_destroy(struct dp_soc *soc) +{ + struct dp_reo_cmd_info *reo_cmd = NULL; + struct dp_reo_cmd_info *tmp_cmd = NULL; + union hal_reo_status reo_status; + + reo_status.queue_status.header.status = + HAL_REO_CMD_DRAIN; + + qdf_spin_lock_bh(&soc->rx.reo_cmd_lock); + TAILQ_FOREACH_SAFE(reo_cmd, &soc->rx.reo_cmd_list, + reo_cmd_list_elem, tmp_cmd) { + TAILQ_REMOVE(&soc->rx.reo_cmd_list, reo_cmd, + reo_cmd_list_elem); + reo_cmd->handler(soc, reo_cmd->data, &reo_status); + qdf_mem_free(reo_cmd); + } + qdf_spin_unlock_bh(&soc->rx.reo_cmd_lock); +} diff --git a/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_rx.c b/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_rx.c new file mode 100644 index 0000000000000000000000000000000000000000..2d47beb287bf5d20c9bbea0fa7dd423d1f3631fb --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_rx.c @@ -0,0 +1,1807 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "dp_types.h" +#include "dp_rx.h" +#include "dp_peer.h" +#include "hal_rx.h" +#include "hal_api.h" +#include "qdf_nbuf.h" +#ifdef MESH_MODE_SUPPORT +#include "if_meta_hdr.h" +#endif +#include "dp_internal.h" +#include "dp_rx_mon.h" +#ifdef RX_DESC_DEBUG_CHECK +static inline void dp_rx_desc_prep(struct dp_rx_desc *rx_desc, qdf_nbuf_t nbuf) +{ + rx_desc->magic = DP_RX_DESC_MAGIC; + rx_desc->nbuf = nbuf; +} +#else +static inline void dp_rx_desc_prep(struct dp_rx_desc *rx_desc, qdf_nbuf_t nbuf) +{ + rx_desc->nbuf = nbuf; +} +#endif + +#ifdef CONFIG_WIN +static inline bool dp_rx_check_ap_bridge(struct dp_vdev *vdev) +{ + return vdev->ap_bridge_enabled; +} +#else +static inline bool dp_rx_check_ap_bridge(struct dp_vdev *vdev) +{ + if (vdev->opmode != wlan_op_mode_sta) + return true; + else + return false; +} +#endif +/* + * dp_rx_buffers_replenish() - replenish rxdma ring with rx nbufs + * called during dp rx initialization + * and at the end of dp_rx_process. + * + * @soc: core txrx main context + * @mac_id: mac_id which is one of 3 mac_ids + * @dp_rxdma_srng: dp rxdma circular ring + * @rx_desc_pool: Pointer to free Rx descriptor pool + * @num_req_buffers: number of buffer to be replenished + * @desc_list: list of descs if called from dp_rx_process + * or NULL during dp rx initialization or out of buffer + * interrupt. + * @tail: tail of descs list + * Return: return success or failure + */ +QDF_STATUS dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id, + struct dp_srng *dp_rxdma_srng, + struct rx_desc_pool *rx_desc_pool, + uint32_t num_req_buffers, + union dp_rx_desc_list_elem_t **desc_list, + union dp_rx_desc_list_elem_t **tail) +{ + uint32_t num_alloc_desc; + uint16_t num_desc_to_free = 0; + struct dp_pdev *dp_pdev = dp_get_pdev_for_mac_id(dp_soc, mac_id); + uint32_t num_entries_avail; + uint32_t count; + int sync_hw_ptr = 1; + qdf_dma_addr_t paddr; + qdf_nbuf_t rx_netbuf; + void *rxdma_ring_entry; + union dp_rx_desc_list_elem_t *next; + QDF_STATUS ret; + + void *rxdma_srng; + + rxdma_srng = dp_rxdma_srng->hal_srng; + + if (!rxdma_srng) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "rxdma srng not initialized"); + DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers); + return QDF_STATUS_E_FAILURE; + } + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "requested %d buffers for replenish", num_req_buffers); + + hal_srng_access_start(dp_soc->hal_soc, rxdma_srng); + num_entries_avail = hal_srng_src_num_avail(dp_soc->hal_soc, + rxdma_srng, + sync_hw_ptr); + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "no of available entries in rxdma ring: %d", + num_entries_avail); + + if (!(*desc_list) && (num_entries_avail > + ((dp_rxdma_srng->num_entries * 3) / 4))) { + num_req_buffers = num_entries_avail; + } else if (num_entries_avail < num_req_buffers) { + num_desc_to_free = num_req_buffers - num_entries_avail; + num_req_buffers = num_entries_avail; + } + + if (qdf_unlikely(!num_req_buffers)) { + num_desc_to_free = num_req_buffers; + hal_srng_access_end(dp_soc->hal_soc, rxdma_srng); + goto free_descs; + } + + /* + * if desc_list is NULL, allocate the descs from freelist + */ + if (!(*desc_list)) { + num_alloc_desc = dp_rx_get_free_desc_list(dp_soc, mac_id, + rx_desc_pool, + num_req_buffers, + desc_list, + tail); + + if (!num_alloc_desc) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "no free rx_descs in freelist"); + DP_STATS_INC(dp_pdev, err.desc_alloc_fail, + num_req_buffers); + hal_srng_access_end(dp_soc->hal_soc, rxdma_srng); + return QDF_STATUS_E_NOMEM; + } + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "%d rx desc allocated", num_alloc_desc); + num_req_buffers = num_alloc_desc; + } + + + count = 0; + + while (count < num_req_buffers) { + rx_netbuf = qdf_nbuf_alloc(dp_soc->osdev, + RX_BUFFER_SIZE, + RX_BUFFER_RESERVATION, + RX_BUFFER_ALIGNMENT, + FALSE); + + if (rx_netbuf == NULL) { + DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1); + continue; + } + + ret = qdf_nbuf_map_single(dp_soc->osdev, rx_netbuf, + QDF_DMA_BIDIRECTIONAL); + if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) { + qdf_nbuf_free(rx_netbuf); + DP_STATS_INC(dp_pdev, replenish.map_err, 1); + continue; + } + + paddr = qdf_nbuf_get_frag_paddr(rx_netbuf, 0); + + /* + * check if the physical address of nbuf->data is + * less then 0x50000000 then free the nbuf and try + * allocating new nbuf. We can try for 100 times. + * this is a temp WAR till we fix it properly. + */ + ret = check_x86_paddr(dp_soc, &rx_netbuf, &paddr, dp_pdev); + if (ret == QDF_STATUS_E_FAILURE) { + DP_STATS_INC(dp_pdev, replenish.x86_fail, 1); + break; + } + + count++; + + rxdma_ring_entry = hal_srng_src_get_next(dp_soc->hal_soc, + rxdma_srng); + qdf_assert_always(rxdma_ring_entry); + + next = (*desc_list)->next; + + dp_rx_desc_prep(&((*desc_list)->rx_desc), rx_netbuf); + (*desc_list)->rx_desc.in_use = 1; + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "rx_netbuf=%pK, buf=%pK, paddr=0x%llx, cookie=%d", + rx_netbuf, qdf_nbuf_data(rx_netbuf), + (unsigned long long)paddr, (*desc_list)->rx_desc.cookie); + + hal_rxdma_buff_addr_info_set(rxdma_ring_entry, paddr, + (*desc_list)->rx_desc.cookie, + rx_desc_pool->owner); + + *desc_list = next; + } + + hal_srng_access_end(dp_soc->hal_soc, rxdma_srng); + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "successfully replenished %d buffers", num_req_buffers); + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "%d rx desc added back to free list", num_desc_to_free); + + DP_STATS_INC_PKT(dp_pdev, replenish.pkts, num_req_buffers, + (RX_BUFFER_SIZE * num_req_buffers)); + +free_descs: + DP_STATS_INC(dp_pdev, buf_freelist, num_desc_to_free); + /* + * add any available free desc back to the free list + */ + if (*desc_list) + dp_rx_add_desc_list_to_free_list(dp_soc, desc_list, tail, + mac_id, rx_desc_pool); + + return QDF_STATUS_SUCCESS; +} + +/* + * dp_rx_deliver_raw() - process RAW mode pkts and hand over the + * pkts to RAW mode simulation to + * decapsulate the pkt. + * + * @vdev: vdev on which RAW mode is enabled + * @nbuf_list: list of RAW pkts to process + * @peer: peer object from which the pkt is rx + * + * Return: void + */ +void +dp_rx_deliver_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf_list, + struct dp_peer *peer) +{ + qdf_nbuf_t deliver_list_head = NULL; + qdf_nbuf_t deliver_list_tail = NULL; + qdf_nbuf_t nbuf; + + nbuf = nbuf_list; + while (nbuf) { + qdf_nbuf_t next = qdf_nbuf_next(nbuf); + + DP_RX_LIST_APPEND(deliver_list_head, deliver_list_tail, nbuf); + + DP_STATS_INC(vdev->pdev, rx_raw_pkts, 1); + DP_STATS_INC_PKT(peer, rx.raw, 1, qdf_nbuf_len(nbuf)); + /* + * reset the chfrag_start and chfrag_end bits in nbuf cb + * as this is a non-amsdu pkt and RAW mode simulation expects + * these bit s to be 0 for non-amsdu pkt. + */ + if (qdf_nbuf_is_rx_chfrag_start(nbuf) && + qdf_nbuf_is_rx_chfrag_end(nbuf)) { + qdf_nbuf_set_rx_chfrag_start(nbuf, 0); + qdf_nbuf_set_rx_chfrag_end(nbuf, 0); + } + + nbuf = next; + } + + vdev->osif_rsim_rx_decap(vdev->osif_vdev, &deliver_list_head, + &deliver_list_tail, (struct cdp_peer*) peer); + + vdev->osif_rx(vdev->osif_vdev, deliver_list_head); +} + + +#ifdef DP_LFR +/* + * In case of LFR, data of a new peer might be sent up + * even before peer is added. + */ +static inline struct dp_vdev * +dp_get_vdev_from_peer(struct dp_soc *soc, + uint16_t peer_id, + struct dp_peer *peer, + struct hal_rx_mpdu_desc_info mpdu_desc_info) +{ + struct dp_vdev *vdev; + uint8_t vdev_id; + + if (unlikely(!peer)) { + if (peer_id != HTT_INVALID_PEER) { + vdev_id = DP_PEER_METADATA_ID_GET( + mpdu_desc_info.peer_meta_data); + QDF_TRACE(QDF_MODULE_ID_DP, + QDF_TRACE_LEVEL_DEBUG, + FL("PeerID %d not found use vdevID %d"), + peer_id, vdev_id); + vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, + vdev_id); + } else { + QDF_TRACE(QDF_MODULE_ID_DP, + QDF_TRACE_LEVEL_DEBUG, + FL("Invalid PeerID %d"), + peer_id); + return NULL; + } + } else { + vdev = peer->vdev; + } + return vdev; +} +#else +static inline struct dp_vdev * +dp_get_vdev_from_peer(struct dp_soc *soc, + uint16_t peer_id, + struct dp_peer *peer, + struct hal_rx_mpdu_desc_info mpdu_desc_info) +{ + if (unlikely(!peer)) { + QDF_TRACE(QDF_MODULE_ID_DP, + QDF_TRACE_LEVEL_DEBUG, + FL("Peer not found for peerID %d"), + peer_id); + return NULL; + } else { + return peer->vdev; + } +} +#endif + +/** + * dp_rx_intrabss_fwd() - Implements the Intra-BSS forwarding logic + * + * @soc: core txrx main context + * @sa_peer : source peer entry + * @rx_tlv_hdr : start address of rx tlvs + * @nbuf : nbuf that has to be intrabss forwarded + * + * Return: bool: true if it is forwarded else false + */ +static bool +dp_rx_intrabss_fwd(struct dp_soc *soc, + struct dp_peer *sa_peer, + uint8_t *rx_tlv_hdr, + qdf_nbuf_t nbuf) +{ + uint16_t da_idx; + uint16_t len; + struct dp_peer *da_peer; + struct dp_ast_entry *ast_entry; + qdf_nbuf_t nbuf_copy; + struct dp_vdev *vdev = sa_peer->vdev; + + /* + * intrabss forwarding is not applicable if + * vap is nawds enabled or ap_bridge is false. + */ + if (vdev->nawds_enabled) + return false; + + + /* check if the destination peer is available in peer table + * and also check if the source peer and destination peer + * belong to the same vap and destination peer is not bss peer. + */ + + if ((hal_rx_msdu_end_da_is_valid_get(rx_tlv_hdr) && + !hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr))) { + da_idx = hal_rx_msdu_end_da_idx_get(rx_tlv_hdr); + + ast_entry = soc->ast_table[da_idx]; + if (!ast_entry) + return false; + + da_peer = ast_entry->peer; + + if (!da_peer) + return false; + + if (da_peer->vdev == sa_peer->vdev && !da_peer->bss_peer) { + memset(nbuf->cb, 0x0, sizeof(nbuf->cb)); + len = qdf_nbuf_len(nbuf); + + /* linearize the nbuf just before we send to + * dp_tx_send() + */ + if (qdf_unlikely(qdf_nbuf_get_ext_list(nbuf))) { + if (qdf_nbuf_linearize(nbuf) == -ENOMEM) + return false; + + nbuf = qdf_nbuf_unshare(nbuf); + if (!nbuf) { + DP_STATS_INC_PKT(sa_peer, + rx.intra_bss.fail, + 1, + len); + /* return true even though the pkt is + * not forwarded. Basically skb_unshare + * failed and we want to continue with + * next nbuf. + */ + return true; + } + } + + if (!dp_tx_send(sa_peer->vdev, nbuf)) { + DP_STATS_INC_PKT(sa_peer, rx.intra_bss.pkts, + 1, len); + return true; + } else { + DP_STATS_INC_PKT(sa_peer, rx.intra_bss.fail, 1, + len); + return false; + } + } + } + /* if it is a broadcast pkt (eg: ARP) and it is not its own + * source, then clone the pkt and send the cloned pkt for + * intra BSS forwarding and original pkt up the network stack + * Note: how do we handle multicast pkts. do we forward + * all multicast pkts as is or let a higher layer module + * like igmpsnoop decide whether to forward or not with + * Mcast enhancement. + */ + else if (qdf_unlikely((hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr) && + !sa_peer->bss_peer))) { + nbuf_copy = qdf_nbuf_copy(nbuf); + if (!nbuf_copy) + return false; + memset(nbuf_copy->cb, 0x0, sizeof(nbuf_copy->cb)); + len = qdf_nbuf_len(nbuf_copy); + + if (dp_tx_send(sa_peer->vdev, nbuf_copy)) { + DP_STATS_INC_PKT(sa_peer, rx.intra_bss.fail, 1, len); + qdf_nbuf_free(nbuf_copy); + } else + DP_STATS_INC_PKT(sa_peer, rx.intra_bss.pkts, 1, len); + } + /* return false as we have to still send the original pkt + * up the stack + */ + return false; +} + +#ifdef MESH_MODE_SUPPORT + +/** + * dp_rx_fill_mesh_stats() - Fills the mesh per packet receive stats + * + * @vdev: DP Virtual device handle + * @nbuf: Buffer pointer + * @rx_tlv_hdr: start of rx tlv header + * @peer: pointer to peer + * + * This function allocated memory for mesh receive stats and fill the + * required stats. Stores the memory address in skb cb. + * + * Return: void + */ + +void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf, + uint8_t *rx_tlv_hdr, struct dp_peer *peer) +{ + struct mesh_recv_hdr_s *rx_info = NULL; + uint32_t pkt_type; + uint32_t nss; + uint32_t rate_mcs; + uint32_t bw; + + /* fill recv mesh stats */ + rx_info = qdf_mem_malloc(sizeof(struct mesh_recv_hdr_s)); + + /* upper layers are resposible to free this memory */ + + if (rx_info == NULL) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "Memory allocation failed for mesh rx stats"); + DP_STATS_INC(vdev->pdev, mesh_mem_alloc, 1); + return; + } + + rx_info->rs_flags = MESH_RXHDR_VER1; + if (qdf_nbuf_is_rx_chfrag_start(nbuf)) + rx_info->rs_flags |= MESH_RX_FIRST_MSDU; + + if (qdf_nbuf_is_rx_chfrag_end(nbuf)) + rx_info->rs_flags |= MESH_RX_LAST_MSDU; + + if (hal_rx_attn_msdu_get_is_decrypted(rx_tlv_hdr)) { + rx_info->rs_flags |= MESH_RX_DECRYPTED; + rx_info->rs_keyix = hal_rx_msdu_get_keyid(rx_tlv_hdr); + if (vdev->osif_get_key) + vdev->osif_get_key(vdev->osif_vdev, + &rx_info->rs_decryptkey[0], + &peer->mac_addr.raw[0], + rx_info->rs_keyix); + } + + rx_info->rs_rssi = hal_rx_msdu_start_get_rssi(rx_tlv_hdr); + rx_info->rs_channel = hal_rx_msdu_start_get_freq(rx_tlv_hdr); + pkt_type = hal_rx_msdu_start_get_pkt_type(rx_tlv_hdr); + rate_mcs = hal_rx_msdu_start_rate_mcs_get(rx_tlv_hdr); + bw = hal_rx_msdu_start_bw_get(rx_tlv_hdr); + nss = hal_rx_msdu_start_nss_get(rx_tlv_hdr); + rx_info->rs_ratephy1 = rate_mcs | (nss << 0x8) | (pkt_type << 16) | + (bw << 24); + + qdf_nbuf_set_rx_fctx_type(nbuf, (void *)rx_info, CB_FTYPE_MESH_RX_INFO); + + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_MED, + FL("Mesh rx stats: flags %x, rssi %x, chn %x, rate %x, kix %x"), + rx_info->rs_flags, + rx_info->rs_rssi, + rx_info->rs_channel, + rx_info->rs_ratephy1, + rx_info->rs_keyix); + +} + +/** + * dp_rx_filter_mesh_packets() - Filters mesh unwanted packets + * + * @vdev: DP Virtual device handle + * @nbuf: Buffer pointer + * @rx_tlv_hdr: start of rx tlv header + * + * This checks if the received packet is matching any filter out + * catogery and and drop the packet if it matches. + * + * Return: status(0 indicates drop, 1 indicate to no drop) + */ + +QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf, + uint8_t *rx_tlv_hdr) +{ + union dp_align_mac_addr mac_addr; + + if (qdf_unlikely(vdev->mesh_rx_filter)) { + if (vdev->mesh_rx_filter & MESH_FILTER_OUT_FROMDS) + if (hal_rx_mpdu_get_fr_ds(rx_tlv_hdr)) + return QDF_STATUS_SUCCESS; + + if (vdev->mesh_rx_filter & MESH_FILTER_OUT_TODS) + if (hal_rx_mpdu_get_to_ds(rx_tlv_hdr)) + return QDF_STATUS_SUCCESS; + + if (vdev->mesh_rx_filter & MESH_FILTER_OUT_NODS) + if (!hal_rx_mpdu_get_fr_ds(rx_tlv_hdr) + && !hal_rx_mpdu_get_to_ds(rx_tlv_hdr)) + return QDF_STATUS_SUCCESS; + + if (vdev->mesh_rx_filter & MESH_FILTER_OUT_RA) { + if (hal_rx_mpdu_get_addr1(rx_tlv_hdr, + &mac_addr.raw[0])) + return QDF_STATUS_E_FAILURE; + + if (!qdf_mem_cmp(&mac_addr.raw[0], + &vdev->mac_addr.raw[0], + DP_MAC_ADDR_LEN)) + return QDF_STATUS_SUCCESS; + } + + if (vdev->mesh_rx_filter & MESH_FILTER_OUT_TA) { + if (hal_rx_mpdu_get_addr2(rx_tlv_hdr, + &mac_addr.raw[0])) + return QDF_STATUS_E_FAILURE; + + if (!qdf_mem_cmp(&mac_addr.raw[0], + &vdev->mac_addr.raw[0], + DP_MAC_ADDR_LEN)) + return QDF_STATUS_SUCCESS; + } + } + + return QDF_STATUS_E_FAILURE; +} + +#else +void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf, + uint8_t *rx_tlv_hdr, struct dp_peer *peer) +{ +} + +QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf, + uint8_t *rx_tlv_hdr) +{ + return QDF_STATUS_E_FAILURE; +} + +#endif + +#ifdef CONFIG_WIN +/** + * dp_rx_nac_filter(): Function to perform filtering of non-associated + * clients + * @pdev: DP pdev handle + * @rx_pkt_hdr: Rx packet Header + * + * return: dp_vdev* + */ +static +struct dp_vdev *dp_rx_nac_filter(struct dp_pdev *pdev, + uint8_t *rx_pkt_hdr) +{ + struct ieee80211_frame *wh; + struct dp_neighbour_peer *peer = NULL; + + wh = (struct ieee80211_frame *)rx_pkt_hdr; + + if ((wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) != IEEE80211_FC1_DIR_TODS) + return NULL; + + qdf_spin_lock_bh(&pdev->neighbour_peer_mutex); + TAILQ_FOREACH(peer, &pdev->neighbour_peers_list, + neighbour_peer_list_elem) { + if (qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0], + wh->i_addr2, DP_MAC_ADDR_LEN) == 0) { + QDF_TRACE( + QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + FL("NAC configuration matched for mac-%2x:%2x:%2x:%2x:%2x:%2x"), + peer->neighbour_peers_macaddr.raw[0], + peer->neighbour_peers_macaddr.raw[1], + peer->neighbour_peers_macaddr.raw[2], + peer->neighbour_peers_macaddr.raw[3], + peer->neighbour_peers_macaddr.raw[4], + peer->neighbour_peers_macaddr.raw[5]); + + qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex); + + return pdev->monitor_vdev; + } + } + qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex); + + return NULL; +} + +/** + * dp_rx_process_nac_rssi_frames(): Store RSSI for configured NAC + * @pdev: DP pdev handle + * @rx_tlv_hdr: tlv hdr buf + * + * return: None + */ +#ifdef ATH_SUPPORT_NAC_RSSI +static void dp_rx_process_nac_rssi_frames(struct dp_pdev *pdev, uint8_t *rx_tlv_hdr) +{ + struct dp_vdev *vdev = NULL; + struct dp_soc *soc = pdev->soc; + uint8_t *rx_pkt_hdr = hal_rx_pkt_hdr_get(rx_tlv_hdr); + struct ieee80211_frame *wh = (struct ieee80211_frame *)rx_pkt_hdr; + + if (pdev->nac_rssi_filtering) { + TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) { + if (vdev->cdp_nac_rssi_enabled && + (qdf_mem_cmp(vdev->cdp_nac_rssi.client_mac, + wh->i_addr1, DP_MAC_ADDR_LEN) == 0)) { + QDF_TRACE(QDF_MODULE_ID_DP, + QDF_TRACE_LEVEL_DEBUG, "RSSI updated"); + vdev->cdp_nac_rssi.vdev_id = vdev->vdev_id; + vdev->cdp_nac_rssi.client_rssi = + hal_rx_msdu_start_get_rssi(rx_tlv_hdr); + dp_wdi_event_handler(WDI_EVENT_NAC_RSSI, soc, + (void *)&vdev->cdp_nac_rssi, + HTT_INVALID_PEER, WDI_NO_VAL, + pdev->pdev_id); + } + } + } +} +#else +static void dp_rx_process_nac_rssi_frames(struct dp_pdev *pdev, uint8_t *rx_tlv_hdr) +{ +} +#endif + +/** + * dp_rx_process_invalid_peer(): Function to pass invalid peer list to umac + * @soc: DP SOC handle + * @mpdu: mpdu for which peer is invalid + * + * return: integer type + */ +uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t mpdu) +{ + struct dp_invalid_peer_msg msg; + struct dp_vdev *vdev = NULL; + struct dp_pdev *pdev = NULL; + struct ieee80211_frame *wh; + uint8_t i; + qdf_nbuf_t curr_nbuf, next_nbuf; + uint8_t *rx_tlv_hdr = qdf_nbuf_data(mpdu); + uint8_t *rx_pkt_hdr = hal_rx_pkt_hdr_get(rx_tlv_hdr); + + wh = (struct ieee80211_frame *)rx_pkt_hdr; + + if (!DP_FRAME_IS_DATA(wh)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "NAWDS valid only for data frames"); + goto free; + } + + if (qdf_nbuf_len(mpdu) < sizeof(struct ieee80211_frame)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "Invalid nbuf length"); + goto free; + } + + + for (i = 0; i < MAX_PDEV_CNT; i++) { + pdev = soc->pdev_list[i]; + if (!pdev) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "PDEV not found"); + continue; + } + + if (pdev->filter_neighbour_peers) { + /* Next Hop scenario not yet handle */ + vdev = dp_rx_nac_filter(pdev, rx_pkt_hdr); + if (vdev) { + dp_rx_mon_deliver(soc, i, + pdev->invalid_peer_head_msdu, + pdev->invalid_peer_tail_msdu); + + pdev->invalid_peer_head_msdu = NULL; + pdev->invalid_peer_tail_msdu = NULL; + + return 0; + } + } + + + dp_rx_process_nac_rssi_frames(pdev, rx_tlv_hdr); + + TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) { + + if (qdf_mem_cmp(wh->i_addr1, vdev->mac_addr.raw, + DP_MAC_ADDR_LEN) == 0) { + goto out; + } + } + } + + if (!vdev) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "VDEV not found"); + goto free; + } + +out: + msg.wh = wh; + qdf_nbuf_pull_head(mpdu, RX_PKT_TLVS_LEN); + msg.nbuf = mpdu; + msg.vdev_id = vdev->vdev_id; + if (pdev->soc->cdp_soc.ol_ops->rx_invalid_peer) + pdev->soc->cdp_soc.ol_ops->rx_invalid_peer(pdev->osif_pdev, &msg); + +free: + /* Drop and free packet */ + curr_nbuf = mpdu; + while (curr_nbuf) { + next_nbuf = qdf_nbuf_next(curr_nbuf); + qdf_nbuf_free(curr_nbuf); + curr_nbuf = next_nbuf; + } + + return 0; +} + +/** + * dp_rx_process_invalid_peer_wrapper(): Function to wrap invalid peer handler + * @soc: DP SOC handle + * @mpdu: mpdu for which peer is invalid + * @mpdu_done: if an mpdu is completed + * + * return: integer type + */ +void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc, + qdf_nbuf_t mpdu, bool mpdu_done) +{ + /* Only trigger the process when mpdu is completed */ + if (mpdu_done) + dp_rx_process_invalid_peer(soc, mpdu); +} +#else +uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t mpdu) +{ + qdf_nbuf_t curr_nbuf, next_nbuf; + struct dp_pdev *pdev; + uint8_t i; + + curr_nbuf = mpdu; + while (curr_nbuf) { + next_nbuf = qdf_nbuf_next(curr_nbuf); + /* Drop and free packet */ + DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1, + qdf_nbuf_len(curr_nbuf)); + qdf_nbuf_free(curr_nbuf); + curr_nbuf = next_nbuf; + } + + /* reset the head and tail pointers */ + for (i = 0; i < MAX_PDEV_CNT; i++) { + pdev = soc->pdev_list[i]; + if (!pdev) { + QDF_TRACE(QDF_MODULE_ID_DP, + QDF_TRACE_LEVEL_ERROR, + "PDEV not found"); + continue; + } + + pdev->invalid_peer_head_msdu = NULL; + pdev->invalid_peer_tail_msdu = NULL; + } + return 0; +} + +void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc, + qdf_nbuf_t mpdu, bool mpdu_done) +{ + /* To avoid compiler warning */ + mpdu_done = mpdu_done; + + /* Process the nbuf */ + dp_rx_process_invalid_peer(soc, mpdu); +} +#endif + +#if defined(FEATURE_LRO) +static void dp_rx_print_lro_info(uint8_t *rx_tlv) +{ + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + FL("----------------------RX DESC LRO----------------------\n")); + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + FL("lro_eligible 0x%x"), HAL_RX_TLV_GET_LRO_ELIGIBLE(rx_tlv)); + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + FL("pure_ack 0x%x"), HAL_RX_TLV_GET_TCP_PURE_ACK(rx_tlv)); + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + FL("chksum 0x%x"), HAL_RX_TLV_GET_TCP_CHKSUM(rx_tlv)); + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + FL("TCP seq num 0x%x"), HAL_RX_TLV_GET_TCP_SEQ(rx_tlv)); + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + FL("TCP ack num 0x%x"), HAL_RX_TLV_GET_TCP_ACK(rx_tlv)); + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + FL("TCP window 0x%x"), HAL_RX_TLV_GET_TCP_WIN(rx_tlv)); + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + FL("TCP protocol 0x%x"), HAL_RX_TLV_GET_TCP_PROTO(rx_tlv)); + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + FL("TCP offset 0x%x"), HAL_RX_TLV_GET_TCP_OFFSET(rx_tlv)); + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + FL("toeplitz 0x%x"), HAL_RX_TLV_GET_FLOW_ID_TOEPLITZ(rx_tlv)); + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + FL("---------------------------------------------------------\n")); +} + +/** + * dp_rx_lro() - LRO related processing + * @rx_tlv: TLV data extracted from the rx packet + * @peer: destination peer of the msdu + * @msdu: network buffer + * @ctx: LRO context + * + * This function performs the LRO related processing of the msdu + * + * Return: true: LRO enabled false: LRO is not enabled + */ +static void dp_rx_lro(uint8_t *rx_tlv, struct dp_peer *peer, + qdf_nbuf_t msdu, qdf_lro_ctx_t ctx) +{ + if (!peer || !peer->vdev || !peer->vdev->lro_enable) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, + FL("no peer, no vdev or LRO disabled")); + QDF_NBUF_CB_RX_LRO_ELIGIBLE(msdu) = 0; + return; + } + qdf_assert(rx_tlv); + dp_rx_print_lro_info(rx_tlv); + + QDF_NBUF_CB_RX_LRO_ELIGIBLE(msdu) = + HAL_RX_TLV_GET_LRO_ELIGIBLE(rx_tlv); + + QDF_NBUF_CB_RX_TCP_PURE_ACK(msdu) = + HAL_RX_TLV_GET_TCP_PURE_ACK(rx_tlv); + + QDF_NBUF_CB_RX_TCP_CHKSUM(msdu) = + HAL_RX_TLV_GET_TCP_CHKSUM(rx_tlv); + QDF_NBUF_CB_RX_TCP_SEQ_NUM(msdu) = + HAL_RX_TLV_GET_TCP_SEQ(rx_tlv); + QDF_NBUF_CB_RX_TCP_ACK_NUM(msdu) = + HAL_RX_TLV_GET_TCP_ACK(rx_tlv); + QDF_NBUF_CB_RX_TCP_WIN(msdu) = + HAL_RX_TLV_GET_TCP_WIN(rx_tlv); + QDF_NBUF_CB_RX_TCP_PROTO(msdu) = + HAL_RX_TLV_GET_TCP_PROTO(rx_tlv); + QDF_NBUF_CB_RX_IPV6_PROTO(msdu) = + HAL_RX_TLV_GET_IPV6(rx_tlv); + QDF_NBUF_CB_RX_TCP_OFFSET(msdu) = + HAL_RX_TLV_GET_TCP_OFFSET(rx_tlv); + QDF_NBUF_CB_RX_FLOW_ID(msdu) = + HAL_RX_TLV_GET_FLOW_ID_TOEPLITZ(rx_tlv); + QDF_NBUF_CB_RX_LRO_CTX(msdu) = (unsigned char *)ctx; + +} +#else +static void dp_rx_lro(uint8_t *rx_tlv, struct dp_peer *peer, + qdf_nbuf_t msdu, qdf_lro_ctx_t ctx) +{ +} +#endif + +/** + * dp_rx_adjust_nbuf_len() - set appropriate msdu length in nbuf. + * + * @nbuf: pointer to msdu. + * @mpdu_len: mpdu length + * + * Return: returns true if nbuf is last msdu of mpdu else retuns false. + */ +static inline bool dp_rx_adjust_nbuf_len(qdf_nbuf_t nbuf, uint16_t *mpdu_len) +{ + bool last_nbuf; + + if (*mpdu_len >= (RX_BUFFER_SIZE - RX_PKT_TLVS_LEN)) { + qdf_nbuf_set_pktlen(nbuf, RX_BUFFER_SIZE); + last_nbuf = false; + } else { + qdf_nbuf_set_pktlen(nbuf, (*mpdu_len + RX_PKT_TLVS_LEN)); + last_nbuf = true; + } + + *mpdu_len -= (RX_BUFFER_SIZE - RX_PKT_TLVS_LEN); + + return last_nbuf; +} + +/** + * dp_rx_sg_create() - create a frag_list for MSDUs which are spread across + * multiple nbufs. + * @nbuf: pointer to the first msdu of an amsdu. + * @rx_tlv_hdr: pointer to the start of RX TLV headers. + * + * + * This function implements the creation of RX frag_list for cases + * where an MSDU is spread across multiple nbufs. + * + * Return: returns the head nbuf which contains complete frag_list. + */ +qdf_nbuf_t dp_rx_sg_create(qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr) +{ + qdf_nbuf_t parent, next, frag_list; + uint16_t frag_list_len = 0; + uint16_t mpdu_len; + bool last_nbuf; + + /* + * this is a case where the complete msdu fits in one single nbuf. + * in this case HW sets both start and end bit and we only need to + * reset these bits for RAW mode simulator to decap the pkt + */ + if (qdf_nbuf_is_rx_chfrag_start(nbuf) && + qdf_nbuf_is_rx_chfrag_end(nbuf)) { + qdf_nbuf_set_rx_chfrag_start(nbuf, 0); + qdf_nbuf_set_rx_chfrag_end(nbuf, 0); + return nbuf; + } + + /* + * This is a case where we have multiple msdus (A-MSDU) spread across + * multiple nbufs. here we create a fraglist out of these nbufs. + * + * the moment we encounter a nbuf with continuation bit set we + * know for sure we have an MSDU which is spread across multiple + * nbufs. We loop through and reap nbufs till we reach last nbuf. + */ + parent = nbuf; + frag_list = nbuf->next; + nbuf = nbuf->next; + + /* + * set the start bit in the first nbuf we encounter with continuation + * bit set. This has the proper mpdu length set as it is the first + * msdu of the mpdu. this becomes the parent nbuf and the subsequent + * nbufs will form the frag_list of the parent nbuf. + */ + qdf_nbuf_set_rx_chfrag_start(parent, 1); + mpdu_len = hal_rx_msdu_start_msdu_len_get(rx_tlv_hdr); + last_nbuf = dp_rx_adjust_nbuf_len(parent, &mpdu_len); + + /* + * this is where we set the length of the fragments which are + * associated to the parent nbuf. We iterate through the frag_list + * till we hit the last_nbuf of the list. + */ + do { + last_nbuf = dp_rx_adjust_nbuf_len(nbuf, &mpdu_len); + qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN); + frag_list_len += qdf_nbuf_len(nbuf); + + if (last_nbuf) { + next = nbuf->next; + nbuf->next = NULL; + break; + } + + nbuf = nbuf->next; + } while (!last_nbuf); + + qdf_nbuf_set_rx_chfrag_start(nbuf, 0); + qdf_nbuf_append_ext_list(parent, frag_list, frag_list_len); + parent->next = next; + + qdf_nbuf_pull_head(parent, RX_PKT_TLVS_LEN); + return parent; +} + +static inline void dp_rx_deliver_to_stack(struct dp_vdev *vdev, + struct dp_peer *peer, + qdf_nbuf_t nbuf_head, + qdf_nbuf_t nbuf_tail) +{ + /* + * highly unlikely to have a vdev without a registered rx + * callback function. if so let us free the nbuf_list. + */ + if (qdf_unlikely(!vdev->osif_rx)) { + qdf_nbuf_t nbuf; + do { + nbuf = nbuf_head; + nbuf_head = nbuf_head->next; + qdf_nbuf_free(nbuf); + } while (nbuf_head); + + return; + } + + if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw) || + (vdev->rx_decap_type == htt_cmn_pkt_type_native_wifi)) { + vdev->osif_rsim_rx_decap(vdev->osif_vdev, &nbuf_head, + &nbuf_tail, (struct cdp_peer *) peer); + } + + vdev->osif_rx(vdev->osif_vdev, nbuf_head); + +} + +/** + * dp_rx_cksum_offload() - set the nbuf checksum as defined by hardware. + * @nbuf: pointer to the first msdu of an amsdu. + * @rx_tlv_hdr: pointer to the start of RX TLV headers. + * + * The ipsumed field of the skb is set based on whether HW validated the + * IP/TCP/UDP checksum. + * + * Return: void + */ +static inline void dp_rx_cksum_offload(struct dp_pdev *pdev, + qdf_nbuf_t nbuf, + uint8_t *rx_tlv_hdr) +{ + qdf_nbuf_rx_cksum_t cksum = {0}; + bool ip_csum_err = hal_rx_attn_ip_cksum_fail_get(rx_tlv_hdr); + bool tcp_udp_csum_er = hal_rx_attn_tcp_udp_cksum_fail_get(rx_tlv_hdr); + + if (qdf_likely(!ip_csum_err && !tcp_udp_csum_er)) { + cksum.l4_result = QDF_NBUF_RX_CKSUM_TCP_UDP_UNNECESSARY; + qdf_nbuf_set_rx_cksum(nbuf, &cksum); + } else { + DP_STATS_INCC(pdev, err.ip_csum_err, 1, ip_csum_err); + DP_STATS_INCC(pdev, err.tcp_udp_csum_err, 1, tcp_udp_csum_er); + } +} + +/** + * dp_rx_msdu_stats_update() - update per msdu stats. + * @soc: core txrx main context + * @nbuf: pointer to the first msdu of an amsdu. + * @rx_tlv_hdr: pointer to the start of RX TLV headers. + * @peer: pointer to the peer object. + * @ring_id: reo dest ring number on which pkt is reaped. + * + * update all the per msdu stats for that nbuf. + * Return: void + */ +static void dp_rx_msdu_stats_update(struct dp_soc *soc, + qdf_nbuf_t nbuf, + uint8_t *rx_tlv_hdr, + struct dp_peer *peer, + uint8_t ring_id) +{ + bool is_ampdu, is_not_amsdu; + uint16_t peer_id; + uint32_t sgi, mcs, tid, nss, bw, reception_type, pkt_type; + struct dp_vdev *vdev = peer->vdev; + struct ether_header *eh; + uint16_t msdu_len = qdf_nbuf_len(nbuf); + + peer_id = DP_PEER_METADATA_PEER_ID_GET( + hal_rx_mpdu_peer_meta_data_get(rx_tlv_hdr)); + + is_not_amsdu = qdf_nbuf_is_rx_chfrag_start(nbuf) & + qdf_nbuf_is_rx_chfrag_end(nbuf); + + DP_STATS_INC_PKT(peer, rx.rcvd_reo[ring_id], 1, msdu_len); + DP_STATS_INCC(peer, rx.non_amsdu_cnt, 1, is_not_amsdu); + DP_STATS_INCC(peer, rx.amsdu_cnt, 1, !is_not_amsdu); + + if (qdf_unlikely(hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr) && + (vdev->rx_decap_type == htt_cmn_pkt_type_ethernet))) { + eh = (struct ether_header *)qdf_nbuf_data(nbuf); + if (IEEE80211_IS_BROADCAST(eh->ether_dhost)) { + DP_STATS_INC_PKT(peer, rx.bcast, 1, msdu_len); + } else { + DP_STATS_INC_PKT(peer, rx.multicast, 1, msdu_len); + } + } + + /* + * currently we can return from here as we have similar stats + * updated at per ppdu level instead of msdu level + */ + if (!soc->process_rx_status) + return; + + is_ampdu = hal_rx_mpdu_info_ampdu_flag_get(rx_tlv_hdr); + DP_STATS_INCC(peer, rx.ampdu_cnt, 1, is_ampdu); + DP_STATS_INCC(peer, rx.non_ampdu_cnt, 1, !(is_ampdu)); + + sgi = hal_rx_msdu_start_sgi_get(rx_tlv_hdr); + mcs = hal_rx_msdu_start_rate_mcs_get(rx_tlv_hdr); + tid = hal_rx_mpdu_start_tid_get(rx_tlv_hdr); + bw = hal_rx_msdu_start_bw_get(rx_tlv_hdr); + reception_type = hal_rx_msdu_start_reception_type_get(rx_tlv_hdr); + nss = hal_rx_msdu_start_nss_get(rx_tlv_hdr); + pkt_type = hal_rx_msdu_start_get_pkt_type(rx_tlv_hdr); + + /* Save tid to skb->priority */ + DP_RX_TID_SAVE(nbuf, tid); + + DP_STATS_INC(peer, rx.nss[nss], 1); + DP_STATS_INC(peer, rx.sgi_count[sgi], 1); + DP_STATS_INCC(peer, rx.err.mic_err, 1, + hal_rx_mpdu_end_mic_err_get(rx_tlv_hdr)); + DP_STATS_INCC(peer, rx.err.decrypt_err, 1, + hal_rx_mpdu_end_decrypt_err_get(rx_tlv_hdr)); + + DP_STATS_INC(peer, rx.wme_ac_type[TID_TO_WME_AC(tid)], 1); + DP_STATS_INC(peer, rx.bw[bw], 1); + DP_STATS_INC(peer, rx.reception_type[reception_type], 1); + + DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS], 1, + ((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_A))); + DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1, + ((mcs <= MAX_MCS_11A) && (pkt_type == DOT11_A))); + DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS], 1, + ((mcs >= MAX_MCS_11B) && (pkt_type == DOT11_B))); + DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1, + ((mcs <= MAX_MCS_11B) && (pkt_type == DOT11_B))); + DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS], 1, + ((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_N))); + DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1, + ((mcs <= MAX_MCS_11A) && (pkt_type == DOT11_N))); + DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS], 1, + ((mcs >= MAX_MCS_11AC) && (pkt_type == DOT11_AC))); + DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1, + ((mcs <= MAX_MCS_11AC) && (pkt_type == DOT11_AC))); + DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS], 1, + ((mcs >= MAX_MCS) && (pkt_type == DOT11_AX))); + DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1, + ((mcs <= MAX_MCS) && (pkt_type == DOT11_AX))); + + if ((soc->process_rx_status) && + hal_rx_attn_first_mpdu_get(rx_tlv_hdr)) { + if (soc->cdp_soc.ol_ops->update_dp_stats) { + soc->cdp_soc.ol_ops->update_dp_stats( + vdev->pdev->osif_pdev, + &peer->stats, + peer_id, + UPDATE_PEER_STATS); + } + } +} + +#ifdef WDS_VENDOR_EXTENSION +int dp_wds_rx_policy_check( + uint8_t *rx_tlv_hdr, + struct dp_vdev *vdev, + struct dp_peer *peer, + int rx_mcast + ) +{ + struct dp_peer *bss_peer; + int fr_ds, to_ds, rx_3addr, rx_4addr; + int rx_policy_ucast, rx_policy_mcast; + + if (vdev->opmode == wlan_op_mode_ap) { + TAILQ_FOREACH(bss_peer, &vdev->peer_list, peer_list_elem) { + if (bss_peer->bss_peer) { + /* if wds policy check is not enabled on this vdev, accept all frames */ + if (!bss_peer->wds_ecm.wds_rx_filter) { + return 1; + } + break; + } + } + rx_policy_ucast = bss_peer->wds_ecm.wds_rx_ucast_4addr; + rx_policy_mcast = bss_peer->wds_ecm.wds_rx_mcast_4addr; + } else { /* sta mode */ + if (!peer->wds_ecm.wds_rx_filter) { + return 1; + } + rx_policy_ucast = peer->wds_ecm.wds_rx_ucast_4addr; + rx_policy_mcast = peer->wds_ecm.wds_rx_mcast_4addr; + } + + /* ------------------------------------------------ + * self + * peer- rx rx- + * wds ucast mcast dir policy accept note + * ------------------------------------------------ + * 1 1 0 11 x1 1 AP configured to accept ds-to-ds Rx ucast from wds peers, constraint met; so, accept + * 1 1 0 01 x1 0 AP configured to accept ds-to-ds Rx ucast from wds peers, constraint not met; so, drop + * 1 1 0 10 x1 0 AP configured to accept ds-to-ds Rx ucast from wds peers, constraint not met; so, drop + * 1 1 0 00 x1 0 bad frame, won't see it + * 1 0 1 11 1x 1 AP configured to accept ds-to-ds Rx mcast from wds peers, constraint met; so, accept + * 1 0 1 01 1x 0 AP configured to accept ds-to-ds Rx mcast from wds peers, constraint not met; so, drop + * 1 0 1 10 1x 0 AP configured to accept ds-to-ds Rx mcast from wds peers, constraint not met; so, drop + * 1 0 1 00 1x 0 bad frame, won't see it + * 1 1 0 11 x0 0 AP configured to accept from-ds Rx ucast from wds peers, constraint not met; so, drop + * 1 1 0 01 x0 0 AP configured to accept from-ds Rx ucast from wds peers, constraint not met; so, drop + * 1 1 0 10 x0 1 AP configured to accept from-ds Rx ucast from wds peers, constraint met; so, accept + * 1 1 0 00 x0 0 bad frame, won't see it + * 1 0 1 11 0x 0 AP configured to accept from-ds Rx mcast from wds peers, constraint not met; so, drop + * 1 0 1 01 0x 0 AP configured to accept from-ds Rx mcast from wds peers, constraint not met; so, drop + * 1 0 1 10 0x 1 AP configured to accept from-ds Rx mcast from wds peers, constraint met; so, accept + * 1 0 1 00 0x 0 bad frame, won't see it + * + * 0 x x 11 xx 0 we only accept td-ds Rx frames from non-wds peers in mode. + * 0 x x 01 xx 1 + * 0 x x 10 xx 0 + * 0 x x 00 xx 0 bad frame, won't see it + * ------------------------------------------------ + */ + + fr_ds = hal_rx_mpdu_get_fr_ds(rx_tlv_hdr); + to_ds = hal_rx_mpdu_get_to_ds(rx_tlv_hdr); + rx_3addr = fr_ds ^ to_ds; + rx_4addr = fr_ds & to_ds; + + if (vdev->opmode == wlan_op_mode_ap) { + if ((!peer->wds_enabled && rx_3addr && to_ds) || + (peer->wds_enabled && !rx_mcast && (rx_4addr == rx_policy_ucast)) || + (peer->wds_enabled && rx_mcast && (rx_4addr == rx_policy_mcast))) { + return 1; + } + } else { /* sta mode */ + if ((!rx_mcast && (rx_4addr == rx_policy_ucast)) || + (rx_mcast && (rx_4addr == rx_policy_mcast))) { + return 1; + } + } + return 0; +} +#else +int dp_wds_rx_policy_check( + uint8_t *rx_tlv_hdr, + struct dp_vdev *vdev, + struct dp_peer *peer, + int rx_mcast + ) +{ + return 1; +} +#endif + +/** + * dp_rx_process() - Brain of the Rx processing functionality + * Called from the bottom half (tasklet/NET_RX_SOFTIRQ) + * @soc: core txrx main context + * @hal_ring: opaque pointer to the HAL Rx Ring, which will be serviced + * @quota: No. of units (packets) that can be serviced in one shot. + * + * This function implements the core of Rx functionality. This is + * expected to handle only non-error frames. + * + * Return: uint32_t: No. of elements processed + */ +uint32_t +dp_rx_process(struct dp_intr *int_ctx, void *hal_ring, uint32_t quota) +{ + void *hal_soc; + void *ring_desc; + struct dp_rx_desc *rx_desc = NULL; + qdf_nbuf_t nbuf, next; + union dp_rx_desc_list_elem_t *head[MAX_PDEV_CNT] = { NULL }; + union dp_rx_desc_list_elem_t *tail[MAX_PDEV_CNT] = { NULL }; + uint32_t rx_bufs_used = 0, rx_buf_cookie; + uint32_t l2_hdr_offset = 0; + uint16_t msdu_len; + uint16_t peer_id; + struct dp_peer *peer = NULL; + struct dp_vdev *vdev = NULL; + uint32_t pkt_len; + struct hal_rx_mpdu_desc_info mpdu_desc_info = { 0 }; + struct hal_rx_msdu_desc_info msdu_desc_info = { 0 }; + enum hal_reo_error_status error; + uint32_t peer_mdata; + uint8_t *rx_tlv_hdr; + uint32_t rx_bufs_reaped[MAX_PDEV_CNT] = { 0 }; + uint8_t mac_id = 0; + struct dp_pdev *pdev; + struct dp_srng *dp_rxdma_srng; + struct rx_desc_pool *rx_desc_pool; + struct dp_soc *soc = int_ctx->soc; + uint8_t ring_id = 0; + uint8_t core_id = 0; + qdf_nbuf_t nbuf_head = NULL; + qdf_nbuf_t nbuf_tail = NULL; + qdf_nbuf_t deliver_list_head = NULL; + qdf_nbuf_t deliver_list_tail = NULL; + + DP_HIST_INIT(); + /* Debug -- Remove later */ + qdf_assert(soc && hal_ring); + + hal_soc = soc->hal_soc; + + /* Debug -- Remove later */ + qdf_assert(hal_soc); + + hif_pm_runtime_mark_last_busy(soc->osdev->dev); + + if (qdf_unlikely(hal_srng_access_start(hal_soc, hal_ring))) { + + /* + * Need API to convert from hal_ring pointer to + * Ring Type / Ring Id combo + */ + DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1); + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + FL("HAL RING Access Failed -- %pK"), hal_ring); + hal_srng_access_end(hal_soc, hal_ring); + goto done; + } + + /* + * start reaping the buffers from reo ring and queue + * them in per vdev queue. + * Process the received pkts in a different per vdev loop. + */ + while (qdf_likely(quota && (ring_desc = + hal_srng_dst_get_next(hal_soc, hal_ring)))) { + + error = HAL_RX_ERROR_STATUS_GET(ring_desc); + ring_id = hal_srng_ring_id_get(hal_ring); + + if (qdf_unlikely(error == HAL_REO_ERROR_DETECTED)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("HAL RING 0x%pK:error %d"), hal_ring, error); + DP_STATS_INC(soc, rx.err.hal_reo_error[ring_id], 1); + /* Don't know how to deal with this -- assert */ + qdf_assert(0); + } + + rx_buf_cookie = HAL_RX_REO_BUF_COOKIE_GET(ring_desc); + + rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, rx_buf_cookie); + + + qdf_assert(rx_desc); + rx_bufs_reaped[rx_desc->pool_id]++; + + /* TODO */ + /* + * Need a separate API for unmapping based on + * phyiscal address + */ + qdf_nbuf_unmap_single(soc->osdev, rx_desc->nbuf, + QDF_DMA_BIDIRECTIONAL); + + core_id = smp_processor_id(); + DP_STATS_INC(soc, rx.ring_packets[core_id][ring_id], 1); + + /* Get MPDU DESC info */ + hal_rx_mpdu_desc_info_get(ring_desc, &mpdu_desc_info); + + hal_rx_mpdu_peer_meta_data_set(qdf_nbuf_data(rx_desc->nbuf), + mpdu_desc_info.peer_meta_data); + + /* Get MSDU DESC info */ + hal_rx_msdu_desc_info_get(ring_desc, &msdu_desc_info); + + /* + * save msdu flags first, last and continuation msdu in + * nbuf->cb + */ + if (msdu_desc_info.msdu_flags & HAL_MSDU_F_FIRST_MSDU_IN_MPDU) + qdf_nbuf_set_rx_chfrag_start(rx_desc->nbuf, 1); + + if (msdu_desc_info.msdu_flags & HAL_MSDU_F_MSDU_CONTINUATION) + qdf_nbuf_set_rx_chfrag_cont(rx_desc->nbuf, 1); + + if (msdu_desc_info.msdu_flags & HAL_MSDU_F_LAST_MSDU_IN_MPDU) + qdf_nbuf_set_rx_chfrag_end(rx_desc->nbuf, 1); + + DP_RX_LIST_APPEND(nbuf_head, nbuf_tail, rx_desc->nbuf); + + /* + * if continuation bit is set then we have MSDU spread + * across multiple buffers, let us not decrement quota + * till we reap all buffers of that MSDU. + */ + if (qdf_likely(!qdf_nbuf_is_rx_chfrag_cont(rx_desc->nbuf))) + quota -= 1; + + + dp_rx_add_to_free_desc_list(&head[rx_desc->pool_id], + &tail[rx_desc->pool_id], + rx_desc); + } +done: + hal_srng_access_end(hal_soc, hal_ring); + + /* Update histogram statistics by looping through pdev's */ + DP_RX_HIST_STATS_PER_PDEV(); + + for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) { + /* + * continue with next mac_id if no pkts were reaped + * from that pool + */ + if (!rx_bufs_reaped[mac_id]) + continue; + + pdev = soc->pdev_list[mac_id]; + dp_rxdma_srng = &pdev->rx_refill_buf_ring; + rx_desc_pool = &soc->rx_desc_buf[mac_id]; + + dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng, + rx_desc_pool, rx_bufs_reaped[mac_id], + &head[mac_id], &tail[mac_id]); + } + + /* Peer can be NULL is case of LFR */ + if (qdf_likely(peer != NULL)) + vdev = NULL; + + /* + * BIG loop where each nbuf is dequeued from global queue, + * processed and queued back on a per vdev basis. These nbufs + * are sent to stack as and when we run out of nbufs + * or a new nbuf dequeued from global queue has a different + * vdev when compared to previous nbuf. + */ + nbuf = nbuf_head; + while (nbuf) { + next = nbuf->next; + rx_tlv_hdr = qdf_nbuf_data(nbuf); + + /* + * Check if DMA completed -- msdu_done is the last bit + * to be written + */ + if (qdf_unlikely(!hal_rx_attn_msdu_done_get(rx_tlv_hdr))) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("MSDU DONE failure")); + hal_rx_dump_pkt_tlvs(rx_tlv_hdr, QDF_TRACE_LEVEL_INFO); + qdf_assert(0); + } + + peer_mdata = hal_rx_mpdu_peer_meta_data_get(rx_tlv_hdr); + peer_id = DP_PEER_METADATA_PEER_ID_GET(peer_mdata); + peer = dp_peer_find_by_id(soc, peer_id); + + if (peer) { + QDF_NBUF_CB_DP_TRACE_PRINT(nbuf) = false; + qdf_dp_trace_set_track(nbuf, QDF_RX); + QDF_NBUF_CB_RX_DP_TRACE(nbuf) = 1; + QDF_NBUF_CB_RX_PACKET_TRACK(nbuf) = + QDF_NBUF_RX_PKT_DATA_TRACK; + } + + rx_bufs_used++; + + if (deliver_list_head && peer && (vdev != peer->vdev)) { + dp_rx_deliver_to_stack(vdev, peer, deliver_list_head, + deliver_list_tail); + deliver_list_head = NULL; + deliver_list_tail = NULL; + } + + if (qdf_likely(peer != NULL)) { + vdev = peer->vdev; + } else { + qdf_nbuf_free(nbuf); + nbuf = next; + continue; + } + + if (qdf_unlikely(vdev == NULL)) { + qdf_nbuf_free(nbuf); + nbuf = next; + DP_STATS_INC(soc, rx.err.invalid_vdev, 1); + continue; + } + + DP_HIST_PACKET_COUNT_INC(vdev->pdev->pdev_id); + /* + * The below condition happens when an MSDU is spread + * across multiple buffers. This can happen in two cases + * 1. The nbuf size is smaller then the received msdu. + * ex: we have set the nbuf size to 2048 during + * nbuf_alloc. but we received an msdu which is + * 2304 bytes in size then this msdu is spread + * across 2 nbufs. + * + * 2. AMSDUs when RAW mode is enabled. + * ex: 1st MSDU is in 1st nbuf and 2nd MSDU is spread + * across 1st nbuf and 2nd nbuf and last MSDU is + * spread across 2nd nbuf and 3rd nbuf. + * + * for these scenarios let us create a skb frag_list and + * append these buffers till the last MSDU of the AMSDU + */ + if (qdf_unlikely(vdev->rx_decap_type == + htt_cmn_pkt_type_raw)) { + + DP_STATS_INC(vdev->pdev, rx_raw_pkts, 1); + DP_STATS_INC_PKT(peer, rx.raw, 1, qdf_nbuf_len(nbuf)); + + nbuf = dp_rx_sg_create(nbuf, rx_tlv_hdr); + next = nbuf->next; + } + + if (!dp_wds_rx_policy_check(rx_tlv_hdr, vdev, peer, + hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr))) { + QDF_TRACE(QDF_MODULE_ID_DP, + QDF_TRACE_LEVEL_ERROR, + FL("Policy Check Drop pkt")); + /* Drop & free packet */ + qdf_nbuf_free(nbuf); + /* Statistics */ + nbuf = next; + continue; + } + + if (qdf_unlikely(peer && peer->bss_peer)) { + QDF_TRACE(QDF_MODULE_ID_DP, + QDF_TRACE_LEVEL_ERROR, + FL("received pkt with same src MAC")); + DP_STATS_INC(vdev->pdev, dropped.mec, 1); + + /* Drop & free packet */ + qdf_nbuf_free(nbuf); + /* Statistics */ + nbuf = next; + continue; + } + + if (qdf_unlikely(peer && (peer->nawds_enabled == true) && + (hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr)) && + (hal_rx_get_mpdu_mac_ad4_valid(rx_tlv_hdr) == false))) { + DP_STATS_INC(peer, rx.nawds_mcast_drop, 1); + qdf_nbuf_free(nbuf); + nbuf = next; + continue; + } + + dp_rx_cksum_offload(vdev->pdev, nbuf, rx_tlv_hdr); + + dp_set_rx_queue(nbuf, ring_id); + + /* + * HW structures call this L3 header padding -- + * even though this is actually the offset from + * the buffer beginning where the L2 header + * begins. + */ + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + FL("rxhash: flow id toeplitz: 0x%x\n"), + hal_rx_msdu_start_toeplitz_get(rx_tlv_hdr)); + + /*L2 header offset will not be set in raw mode*/ + if (qdf_likely(vdev->rx_decap_type != + htt_cmn_pkt_type_raw)) { + l2_hdr_offset = + hal_rx_msdu_end_l3_hdr_padding_get(rx_tlv_hdr); + } + + msdu_len = hal_rx_msdu_start_msdu_len_get(rx_tlv_hdr); + pkt_len = msdu_len + l2_hdr_offset + RX_PKT_TLVS_LEN; + + if (unlikely(qdf_nbuf_get_ext_list(nbuf))) + qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN); + else { + qdf_nbuf_set_pktlen(nbuf, pkt_len); + qdf_nbuf_pull_head(nbuf, + RX_PKT_TLVS_LEN + + l2_hdr_offset); + } + + dp_rx_msdu_stats_update(soc, nbuf, rx_tlv_hdr, peer, ring_id); + + if (qdf_unlikely(vdev->mesh_vdev)) { + if (dp_rx_filter_mesh_packets(vdev, nbuf, + rx_tlv_hdr) + == QDF_STATUS_SUCCESS) { + QDF_TRACE(QDF_MODULE_ID_DP, + QDF_TRACE_LEVEL_INFO_MED, + FL("mesh pkt filtered")); + DP_STATS_INC(vdev->pdev, dropped.mesh_filter, + 1); + + qdf_nbuf_free(nbuf); + nbuf = next; + continue; + } + dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr, peer); + } + +#ifdef QCA_WIFI_NAPIER_EMULATION_DBG /* Debug code, remove later */ + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "p_id %d msdu_len %d hdr_off %d", + peer_id, msdu_len, l2_hdr_offset); + + print_hex_dump(KERN_ERR, + "\t Pkt Data:", DUMP_PREFIX_NONE, 32, 4, + qdf_nbuf_data(nbuf), 128, false); +#endif /* NAPIER_EMULATION */ + + if (qdf_likely(vdev->rx_decap_type == + htt_cmn_pkt_type_ethernet) && + (qdf_likely(!vdev->mesh_vdev))) { + /* WDS Source Port Learning */ + dp_rx_wds_srcport_learn(soc, + rx_tlv_hdr, + peer, + nbuf); + + /* Intrabss-fwd */ + if (dp_rx_check_ap_bridge(vdev)) + if (dp_rx_intrabss_fwd(soc, + peer, + rx_tlv_hdr, + nbuf)) { + nbuf = next; + continue; /* Get next desc */ + } + } + + dp_rx_lro(rx_tlv_hdr, peer, nbuf, int_ctx->lro_ctx); + + DP_RX_LIST_APPEND(deliver_list_head, + deliver_list_tail, + nbuf); + + DP_STATS_INC_PKT(peer, rx.to_stack, 1, + qdf_nbuf_len(nbuf)); + + nbuf = next; + } + + if (deliver_list_head) + dp_rx_deliver_to_stack(vdev, peer, deliver_list_head, + deliver_list_tail); + + return rx_bufs_used; /* Assume no scale factor for now */ +} + +/** + * dp_rx_detach() - detach dp rx + * @pdev: core txrx pdev context + * + * This function will detach DP RX into main device context + * will free DP Rx resources. + * + * Return: void + */ +void +dp_rx_pdev_detach(struct dp_pdev *pdev) +{ + uint8_t pdev_id = pdev->pdev_id; + struct dp_soc *soc = pdev->soc; + struct rx_desc_pool *rx_desc_pool; + + rx_desc_pool = &soc->rx_desc_buf[pdev_id]; + + if (rx_desc_pool->pool_size != 0) { + dp_rx_desc_pool_free(soc, pdev_id, rx_desc_pool); + } + + return; +} + +/** + * dp_rx_attach() - attach DP RX + * @pdev: core txrx pdev context + * + * This function will attach a DP RX instance into the main + * device (SOC) context. Will allocate dp rx resource and + * initialize resources. + * + * Return: QDF_STATUS_SUCCESS: success + * QDF_STATUS_E_RESOURCES: Error return + */ +QDF_STATUS +dp_rx_pdev_attach(struct dp_pdev *pdev) +{ + uint8_t pdev_id = pdev->pdev_id; + struct dp_soc *soc = pdev->soc; + struct dp_srng rxdma_srng; + uint32_t rxdma_entries; + union dp_rx_desc_list_elem_t *desc_list = NULL; + union dp_rx_desc_list_elem_t *tail = NULL; + struct dp_srng *dp_rxdma_srng; + struct rx_desc_pool *rx_desc_pool; + + if (wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "nss-wifi<4> skip Rx refil %d", pdev_id); + return QDF_STATUS_SUCCESS; + } + + pdev = soc->pdev_list[pdev_id]; + rxdma_srng = pdev->rx_refill_buf_ring; + soc->process_rx_status = CONFIG_PROCESS_RX_STATUS; + rxdma_entries = rxdma_srng.alloc_size/hal_srng_get_entrysize( + soc->hal_soc, RXDMA_BUF); + + rx_desc_pool = &soc->rx_desc_buf[pdev_id]; + + dp_rx_desc_pool_alloc(soc, pdev_id, rxdma_entries*3, rx_desc_pool); + + rx_desc_pool->owner = DP_WBM2SW_RBM; + /* For Rx buffers, WBM release ring is SW RING 3,for all pdev's */ + dp_rxdma_srng = &pdev->rx_refill_buf_ring; + dp_rx_buffers_replenish(soc, pdev_id, dp_rxdma_srng, rx_desc_pool, + 0, &desc_list, &tail); + + return QDF_STATUS_SUCCESS; +} + +/* + * dp_rx_nbuf_prepare() - prepare RX nbuf + * @soc: core txrx main context + * @pdev: core txrx pdev context + * + * This function alloc & map nbuf for RX dma usage, retry it if failed + * until retry times reaches max threshold or succeeded. + * + * Return: qdf_nbuf_t pointer if succeeded, NULL if failed. + */ +qdf_nbuf_t +dp_rx_nbuf_prepare(struct dp_soc *soc, struct dp_pdev *pdev) +{ + uint8_t *buf; + int32_t nbuf_retry_count; + QDF_STATUS ret; + qdf_nbuf_t nbuf = NULL; + + for (nbuf_retry_count = 0; nbuf_retry_count < + QDF_NBUF_ALLOC_MAP_RETRY_THRESHOLD; + nbuf_retry_count++) { + /* Allocate a new skb */ + nbuf = qdf_nbuf_alloc(soc->osdev, + RX_BUFFER_SIZE, + RX_BUFFER_RESERVATION, + RX_BUFFER_ALIGNMENT, + FALSE); + + if (nbuf == NULL) { + DP_STATS_INC(pdev, + replenish.nbuf_alloc_fail, 1); + continue; + } + + buf = qdf_nbuf_data(nbuf); + + memset(buf, 0, RX_BUFFER_SIZE); + + ret = qdf_nbuf_map_single(soc->osdev, nbuf, + QDF_DMA_BIDIRECTIONAL); + + /* nbuf map failed */ + if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) { + qdf_nbuf_free(nbuf); + DP_STATS_INC(pdev, replenish.map_err, 1); + continue; + } + /* qdf_nbuf alloc and map succeeded */ + break; + } + + /* qdf_nbuf still alloc or map failed */ + if (qdf_unlikely(nbuf_retry_count >= + QDF_NBUF_ALLOC_MAP_RETRY_THRESHOLD)) + return NULL; + + return nbuf; +} diff --git a/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_rx.h b/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_rx.h new file mode 100644 index 0000000000000000000000000000000000000000..df239f82134a0b0caf2592104ccbddf750ee36d9 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_rx.h @@ -0,0 +1,784 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _DP_RX_H +#define _DP_RX_H + +#include "hal_rx.h" +#include "dp_tx.h" +#include "dp_peer.h" +#include "dp_internal.h" + +#ifdef RXDMA_OPTIMIZATION +#define RX_BUFFER_ALIGNMENT 128 +#else /* RXDMA_OPTIMIZATION */ +#define RX_BUFFER_ALIGNMENT 4 +#endif /* RXDMA_OPTIMIZATION */ + +#ifdef QCA_HOST2FW_RXBUF_RING +#define DP_WBM2SW_RBM HAL_RX_BUF_RBM_SW1_BM +#else +#define DP_WBM2SW_RBM HAL_RX_BUF_RBM_SW3_BM +#endif +#define RX_BUFFER_SIZE 2048 +#define RX_BUFFER_RESERVATION 0 + +#define DP_PEER_METADATA_PEER_ID_MASK 0x0000ffff +#define DP_PEER_METADATA_PEER_ID_SHIFT 0 +#define DP_PEER_METADATA_VDEV_ID_MASK 0x00070000 +#define DP_PEER_METADATA_VDEV_ID_SHIFT 16 + +#define DP_PEER_METADATA_PEER_ID_GET(_peer_metadata) \ + (((_peer_metadata) & DP_PEER_METADATA_PEER_ID_MASK) \ + >> DP_PEER_METADATA_PEER_ID_SHIFT) + +#define DP_PEER_METADATA_ID_GET(_peer_metadata) \ + (((_peer_metadata) & DP_PEER_METADATA_VDEV_ID_MASK) \ + >> DP_PEER_METADATA_VDEV_ID_SHIFT) + +#define DP_RX_DESC_MAGIC 0xdec0de + +/** + * struct dp_rx_desc + * + * @nbuf : VA of the "skb" posted + * @rx_buf_start : VA of the original Rx buffer, before + * movement of any skb->data pointer + * @cookie : index into the sw array which holds + * the sw Rx descriptors + * Cookie space is 21 bits: + * lower 18 bits -- index + * upper 3 bits -- pool_id + * @pool_id : pool Id for which this allocated. + * Can only be used if there is no flow + * steering + * @in_use rx_desc is in use + * @unmapped used to mark rx_desc an unmapped if the corresponding + * nbuf is already unmapped + */ +struct dp_rx_desc { + qdf_nbuf_t nbuf; + uint8_t *rx_buf_start; + uint32_t cookie; + uint8_t pool_id; +#ifdef RX_DESC_DEBUG_CHECK + uint32_t magic; +#endif + uint8_t in_use:1, + unmapped:1; +}; + +#define RX_DESC_COOKIE_INDEX_SHIFT 0 +#define RX_DESC_COOKIE_INDEX_MASK 0x3ffff /* 18 bits */ +#define RX_DESC_COOKIE_POOL_ID_SHIFT 18 +#define RX_DESC_COOKIE_POOL_ID_MASK 0x1c0000 + +#define DP_RX_DESC_COOKIE_POOL_ID_GET(_cookie) \ + (((_cookie) & RX_DESC_COOKIE_POOL_ID_MASK) >> \ + RX_DESC_COOKIE_POOL_ID_SHIFT) + +#define DP_RX_DESC_COOKIE_INDEX_GET(_cookie) \ + (((_cookie) & RX_DESC_COOKIE_INDEX_MASK) >> \ + RX_DESC_COOKIE_INDEX_SHIFT) + +/* + *dp_rx_xor_block() - xor block of data + *@b: destination data block + *@a: source data block + *@len: length of the data to process + * + *Returns: None + */ +static inline void dp_rx_xor_block(uint8_t *b, const uint8_t *a, qdf_size_t len) +{ + qdf_size_t i; + + for (i = 0; i < len; i++) + b[i] ^= a[i]; +} + +/* + *dp_rx_rotl() - rotate the bits left + *@val: unsigned integer input value + *@bits: number of bits + * + *Returns: Integer with left rotated by number of 'bits' + */ +static inline uint32_t dp_rx_rotl(uint32_t val, int bits) +{ + return (val << bits) | (val >> (32 - bits)); +} + +/* + *dp_rx_rotr() - rotate the bits right + *@val: unsigned integer input value + *@bits: number of bits + * + *Returns: Integer with right rotated by number of 'bits' + */ +static inline uint32_t dp_rx_rotr(uint32_t val, int bits) +{ + return (val >> bits) | (val << (32 - bits)); +} + +/* + * dp_set_rx_queue() - set queue_mapping in skb + * @nbuf: skb + * @queue_id: rx queue_id + * + * Return: void + */ +#ifdef QCA_OL_RX_MULTIQ_SUPPORT +static inline void dp_set_rx_queue(qdf_nbuf_t nbuf, uint8_t queue_id) +{ + qdf_nbuf_record_rx_queue(nbuf, queue_id); + return; +} +#else +static inline void dp_set_rx_queue(qdf_nbuf_t nbuf, uint8_t queue_id) +{ +} +#endif + +/* + *dp_rx_xswap() - swap the bits left + *@val: unsigned integer input value + * + *Returns: Integer with bits swapped + */ +static inline uint32_t dp_rx_xswap(uint32_t val) +{ + return ((val & 0x00ff00ff) << 8) | ((val & 0xff00ff00) >> 8); +} + +/* + *dp_rx_get_le32_split() - get little endian 32 bits split + *@b0: byte 0 + *@b1: byte 1 + *@b2: byte 2 + *@b3: byte 3 + * + *Returns: Integer with split little endian 32 bits + */ +static inline uint32_t dp_rx_get_le32_split(uint8_t b0, uint8_t b1, uint8_t b2, + uint8_t b3) +{ + return b0 | (b1 << 8) | (b2 << 16) | (b3 << 24); +} + +/* + *dp_rx_get_le32() - get little endian 32 bits + *@b0: byte 0 + *@b1: byte 1 + *@b2: byte 2 + *@b3: byte 3 + * + *Returns: Integer with little endian 32 bits + */ +static inline uint32_t dp_rx_get_le32(const uint8_t *p) +{ + return dp_rx_get_le32_split(p[0], p[1], p[2], p[3]); +} + +/* + * dp_rx_put_le32() - put little endian 32 bits + * @p: destination char array + * @v: source 32-bit integer + * + * Returns: None + */ +static inline void dp_rx_put_le32(uint8_t *p, uint32_t v) +{ + p[0] = (v) & 0xff; + p[1] = (v >> 8) & 0xff; + p[2] = (v >> 16) & 0xff; + p[3] = (v >> 24) & 0xff; +} + +/* Extract michal mic block of data */ +#define dp_rx_michael_block(l, r) \ + do { \ + r ^= dp_rx_rotl(l, 17); \ + l += r; \ + r ^= dp_rx_xswap(l); \ + l += r; \ + r ^= dp_rx_rotl(l, 3); \ + l += r; \ + r ^= dp_rx_rotr(l, 2); \ + l += r; \ + } while (0) + +/** + * struct dp_rx_desc_list_elem_t + * + * @next : Next pointer to form free list + * @rx_desc : DP Rx descriptor + */ +union dp_rx_desc_list_elem_t { + union dp_rx_desc_list_elem_t *next; + struct dp_rx_desc rx_desc; +}; + +/** + * dp_rx_cookie_2_va_rxdma_buf() - Converts cookie to a virtual address of + * the Rx descriptor on Rx DMA source ring buffer + * @soc: core txrx main context + * @cookie: cookie used to lookup virtual address + * + * Return: void *: Virtual Address of the Rx descriptor + */ +static inline +void *dp_rx_cookie_2_va_rxdma_buf(struct dp_soc *soc, uint32_t cookie) +{ + uint8_t pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(cookie); + uint16_t index = DP_RX_DESC_COOKIE_INDEX_GET(cookie); + struct rx_desc_pool *rx_desc_pool; + + if (qdf_unlikely(pool_id >= MAX_RXDESC_POOLS)) + return NULL; + + rx_desc_pool = &soc->rx_desc_buf[pool_id]; + + if (qdf_unlikely(index >= rx_desc_pool->pool_size)) + return NULL; + + return &(soc->rx_desc_buf[pool_id].array[index].rx_desc); +} + +/** + * dp_rx_cookie_2_va_mon_buf() - Converts cookie to a virtual address of + * the Rx descriptor on monitor ring buffer + * @soc: core txrx main context + * @cookie: cookie used to lookup virtual address + * + * Return: void *: Virtual Address of the Rx descriptor + */ +static inline +void *dp_rx_cookie_2_va_mon_buf(struct dp_soc *soc, uint32_t cookie) +{ + uint8_t pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(cookie); + uint16_t index = DP_RX_DESC_COOKIE_INDEX_GET(cookie); + /* TODO */ + /* Add sanity for pool_id & index */ + return &(soc->rx_desc_mon[pool_id].array[index].rx_desc); +} + +/** + * dp_rx_cookie_2_va_mon_status() - Converts cookie to a virtual address of + * the Rx descriptor on monitor status ring buffer + * @soc: core txrx main context + * @cookie: cookie used to lookup virtual address + * + * Return: void *: Virtual Address of the Rx descriptor + */ +static inline +void *dp_rx_cookie_2_va_mon_status(struct dp_soc *soc, uint32_t cookie) +{ + uint8_t pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(cookie); + uint16_t index = DP_RX_DESC_COOKIE_INDEX_GET(cookie); + /* TODO */ + /* Add sanity for pool_id & index */ + return &(soc->rx_desc_status[pool_id].array[index].rx_desc); +} + +void dp_rx_add_desc_list_to_free_list(struct dp_soc *soc, + union dp_rx_desc_list_elem_t **local_desc_list, + union dp_rx_desc_list_elem_t **tail, + uint16_t pool_id, + struct rx_desc_pool *rx_desc_pool); + +uint16_t dp_rx_get_free_desc_list(struct dp_soc *soc, uint32_t pool_id, + struct rx_desc_pool *rx_desc_pool, + uint16_t num_descs, + union dp_rx_desc_list_elem_t **desc_list, + union dp_rx_desc_list_elem_t **tail); + + +QDF_STATUS dp_rx_pdev_attach(struct dp_pdev *pdev); + +void dp_rx_pdev_detach(struct dp_pdev *pdev); + + +uint32_t +dp_rx_process(struct dp_intr *int_ctx, void *hal_ring, uint32_t quota); + +uint32_t dp_rx_err_process(struct dp_soc *soc, void *hal_ring, uint32_t quota); + +uint32_t +dp_rx_wbm_err_process(struct dp_soc *soc, void *hal_ring, uint32_t quota); + +/** + * dp_rx_sg_create() - create a frag_list for MSDUs which are spread across + * multiple nbufs. + * @nbuf: pointer to the first msdu of an amsdu. + * @rx_tlv_hdr: pointer to the start of RX TLV headers. + * + * This function implements the creation of RX frag_list for cases + * where an MSDU is spread across multiple nbufs. + * + * Return: returns the head nbuf which contains complete frag_list. + */ +qdf_nbuf_t dp_rx_sg_create(qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr); + +QDF_STATUS dp_rx_desc_pool_alloc(struct dp_soc *soc, + uint32_t pool_id, + uint32_t pool_size, + struct rx_desc_pool *rx_desc_pool); + +void dp_rx_desc_pool_free(struct dp_soc *soc, + uint32_t pool_id, + struct rx_desc_pool *rx_desc_pool); + +void dp_rx_deliver_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf_list, + struct dp_peer *peer); + +/** + * dp_rx_add_to_free_desc_list() - Adds to a local free descriptor list + * + * @head: pointer to the head of local free list + * @tail: pointer to the tail of local free list + * @new: new descriptor that is added to the free list + * + * Return: void: + */ +static inline +void dp_rx_add_to_free_desc_list(union dp_rx_desc_list_elem_t **head, + union dp_rx_desc_list_elem_t **tail, + struct dp_rx_desc *new) +{ + qdf_assert(head && new); + + new->nbuf = NULL; + new->in_use = 0; + new->unmapped = 0; + + ((union dp_rx_desc_list_elem_t *)new)->next = *head; + *head = (union dp_rx_desc_list_elem_t *)new; + if (*tail == NULL) + *tail = *head; + +} + +/** + * dp_rx_wds_srcport_learn() - Add or update the STA PEER which + * is behind the WDS repeater. + * + * @soc: core txrx main context + * @rx_tlv_hdr: base address of RX TLV header + * @ta_peer: WDS repeater peer + * @nbuf: rx pkt + * + * Return: void: + */ +#ifdef FEATURE_WDS +static inline void +dp_rx_wds_srcport_learn(struct dp_soc *soc, + uint8_t *rx_tlv_hdr, + struct dp_peer *ta_peer, + qdf_nbuf_t nbuf) +{ + uint16_t sa_sw_peer_id = hal_rx_msdu_end_sa_sw_peer_id_get(rx_tlv_hdr); + uint32_t flags = IEEE80211_NODE_F_WDS_HM; + uint32_t ret = 0; + uint8_t wds_src_mac[IEEE80211_ADDR_LEN]; + struct dp_ast_entry *ast; + uint16_t sa_idx; + + /* Do wds source port learning only if it is a 4-address mpdu */ + if (!(qdf_nbuf_is_rx_chfrag_start(nbuf) && + hal_rx_get_mpdu_mac_ad4_valid(rx_tlv_hdr))) + return; + + memcpy(wds_src_mac, (qdf_nbuf_data(nbuf) + IEEE80211_ADDR_LEN), + IEEE80211_ADDR_LEN); + + if (qdf_unlikely(!hal_rx_msdu_end_sa_is_valid_get(rx_tlv_hdr))) { + ret = dp_peer_add_ast(soc, + ta_peer, + wds_src_mac, + CDP_TXRX_AST_TYPE_WDS, + flags); + return; + + } + + /* + * Get the AST entry from HW SA index and mark it as active + */ + sa_idx = hal_rx_msdu_end_sa_idx_get(rx_tlv_hdr); + + qdf_spin_lock_bh(&soc->ast_lock); + ast = soc->ast_table[sa_idx]; + + if (!ast) { + qdf_spin_unlock_bh(&soc->ast_lock); + return; + } + + /* + * Ensure we are updating the right AST entry by + * validating ast_idx. + * There is a possibility we might arrive here without + * AST MAP event , so this check is mandatory + */ + if (ast->ast_idx == sa_idx) + ast->is_active = TRUE; + + /* Handle client roaming */ + if (sa_sw_peer_id != ta_peer->peer_ids[0]) + dp_peer_update_ast(soc, ta_peer, ast, flags); + + qdf_spin_unlock_bh(&soc->ast_lock); + + return; +} +#else +static inline void +dp_rx_wds_srcport_learn(struct dp_soc *soc, + uint8_t *rx_tlv_hdr, + struct dp_peer *ta_peer, + qdf_nbuf_t nbuf) +{ +} +#endif + +uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t nbuf); +void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc, + qdf_nbuf_t mpdu, bool mpdu_done); +void dp_rx_process_mic_error(struct dp_soc *soc, qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr); + +#define DP_RX_LIST_APPEND(head, tail, elem) \ + do { \ + if (!(head)) { \ + (head) = (elem); \ + } else { \ + qdf_nbuf_set_next((tail), (elem)); \ + } \ + (tail) = (elem); \ + qdf_nbuf_set_next((tail), NULL); \ + } while (0) + +#ifndef BUILD_X86 +static inline int check_x86_paddr(struct dp_soc *dp_soc, qdf_nbuf_t *rx_netbuf, + qdf_dma_addr_t *paddr, struct dp_pdev *pdev) +{ + return QDF_STATUS_SUCCESS; +} +#else +#define MAX_RETRY 100 +static inline int check_x86_paddr(struct dp_soc *dp_soc, qdf_nbuf_t *rx_netbuf, + qdf_dma_addr_t *paddr, struct dp_pdev *pdev) +{ + uint32_t nbuf_retry = 0; + int32_t ret; + const uint32_t x86_phy_addr = 0x50000000; + /* + * in M2M emulation platforms (x86) the memory below 0x50000000 + * is reserved for target use, so any memory allocated in this + * region should not be used by host + */ + do { + if (qdf_likely(*paddr > x86_phy_addr)) + return QDF_STATUS_SUCCESS; + else { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, + "phy addr %pK exceeded 0x50000000 trying again\n", + paddr); + + nbuf_retry++; + if ((*rx_netbuf)) { + qdf_nbuf_unmap_single(dp_soc->osdev, *rx_netbuf, + QDF_DMA_BIDIRECTIONAL); + /* Not freeing buffer intentionally. + * Observed that same buffer is getting + * re-allocated resulting in longer load time + * WMI init timeout. + * This buffer is anyway not useful so skip it. + **/ + } + + *rx_netbuf = qdf_nbuf_alloc(dp_soc->osdev, + RX_BUFFER_SIZE, + RX_BUFFER_RESERVATION, + RX_BUFFER_ALIGNMENT, + FALSE); + + if (qdf_unlikely(!(*rx_netbuf))) + return QDF_STATUS_E_FAILURE; + + ret = qdf_nbuf_map_single(dp_soc->osdev, *rx_netbuf, + QDF_DMA_BIDIRECTIONAL); + + if (qdf_unlikely(ret == QDF_STATUS_E_FAILURE)) { + qdf_nbuf_free(*rx_netbuf); + *rx_netbuf = NULL; + continue; + } + + *paddr = qdf_nbuf_get_frag_paddr(*rx_netbuf, 0); + } + } while (nbuf_retry < MAX_RETRY); + + if ((*rx_netbuf)) { + qdf_nbuf_unmap_single(dp_soc->osdev, *rx_netbuf, + QDF_DMA_BIDIRECTIONAL); + qdf_nbuf_free(*rx_netbuf); + } + + return QDF_STATUS_E_FAILURE; +} +#endif + +/** + * dp_rx_cookie_2_link_desc_va() - Converts cookie to a virtual address of + * the MSDU Link Descriptor + * @soc: core txrx main context + * @buf_info: buf_info include cookie that used to lookup virtual address of + * link descriptor Normally this is just an index into a per SOC array. + * + * This is the VA of the link descriptor, that HAL layer later uses to + * retrieve the list of MSDU's for a given MPDU. + * + * Return: void *: Virtual Address of the Rx descriptor + */ +static inline +void *dp_rx_cookie_2_link_desc_va(struct dp_soc *soc, + struct hal_buf_info *buf_info) +{ + void *link_desc_va; + uint32_t bank_id = LINK_DESC_COOKIE_BANK_ID(buf_info->sw_cookie); + + + /* TODO */ + /* Add sanity for cookie */ + + link_desc_va = soc->link_desc_banks[bank_id].base_vaddr + + (buf_info->paddr - + soc->link_desc_banks[bank_id].base_paddr); + + return link_desc_va; +} + +/** + * dp_rx_cookie_2_mon_link_desc_va() - Converts cookie to a virtual address of + * the MSDU Link Descriptor + * @pdev: core txrx pdev context + * @buf_info: buf_info includes cookie that used to lookup virtual address of + * link descriptor. Normally this is just an index into a per pdev array. + * + * This is the VA of the link descriptor in monitor mode destination ring, + * that HAL layer later uses to retrieve the list of MSDU's for a given MPDU. + * + * Return: void *: Virtual Address of the Rx descriptor + */ +static inline +void *dp_rx_cookie_2_mon_link_desc_va(struct dp_pdev *pdev, + struct hal_buf_info *buf_info, + int mac_id) +{ + void *link_desc_va; + int mac_for_pdev = dp_get_mac_id_for_mac(pdev->soc, mac_id); + + /* TODO */ + /* Add sanity for cookie */ + + link_desc_va = + pdev->link_desc_banks[mac_for_pdev][buf_info->sw_cookie].base_vaddr + + (buf_info->paddr - + pdev->link_desc_banks[mac_for_pdev][buf_info->sw_cookie].base_paddr); + + return link_desc_va; +} + +/** + * dp_rx_defrag_concat() - Concatenate the fragments + * + * @dst: destination pointer to the buffer + * @src: source pointer from where the fragment payload is to be copied + * + * Return: QDF_STATUS + */ +static inline QDF_STATUS dp_rx_defrag_concat(qdf_nbuf_t dst, qdf_nbuf_t src) +{ + /* + * Inside qdf_nbuf_cat, if it is necessary to reallocate dst + * to provide space for src, the headroom portion is copied from + * the original dst buffer to the larger new dst buffer. + * (This is needed, because the headroom of the dst buffer + * contains the rx desc.) + */ + if (qdf_nbuf_cat(dst, src)) + return QDF_STATUS_E_DEFRAG_ERROR; + + return QDF_STATUS_SUCCESS; +} + +/* + * dp_rx_ast_set_active() - set the active flag of the astentry + * corresponding to a hw index. + * @soc: core txrx main context + * @sa_idx: hw idx + * @is_active: active flag + * + */ +#ifdef FEATURE_WDS +static inline QDF_STATUS dp_rx_ast_set_active(struct dp_soc *soc, uint16_t sa_idx, bool is_active) +{ + struct dp_ast_entry *ast; + qdf_spin_lock_bh(&soc->ast_lock); + ast = soc->ast_table[sa_idx]; + + /* + * Ensure we are updating the right AST entry by + * validating ast_idx. + * There is a possibility we might arrive here without + * AST MAP event , so this check is mandatory + */ + if (ast && (ast->ast_idx == sa_idx)) { + ast->is_active = is_active; + qdf_spin_unlock_bh(&soc->ast_lock); + return QDF_STATUS_SUCCESS; + } + + qdf_spin_unlock_bh(&soc->ast_lock); + return QDF_STATUS_E_FAILURE; +} +#else +static inline QDF_STATUS dp_rx_ast_set_active(struct dp_soc *soc, uint16_t sa_idx, bool is_active) +{ + return QDF_STATUS_SUCCESS; +} +#endif + +/* + * check_qwrap_multicast_loopback() - Check if rx packet is a loopback packet. + * In qwrap mode, packets originated from + * any vdev should not loopback and + * should be dropped. + * @vdev: vdev on which rx packet is received + * @nbuf: rx pkt + * + */ +#if ATH_SUPPORT_WRAP +static inline bool check_qwrap_multicast_loopback(struct dp_vdev *vdev, + qdf_nbuf_t nbuf) +{ + struct dp_vdev *psta_vdev; + struct dp_pdev *pdev = vdev->pdev; + struct dp_soc *soc = pdev->soc; + uint8_t *data = qdf_nbuf_data(nbuf); + uint8_t i; + + for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) { + pdev = soc->pdev_list[i]; + if (qdf_unlikely(vdev->proxysta_vdev)) { + /* In qwrap isolation mode, allow loopback packets as all + * packets go to RootAP and Loopback on the mpsta. + */ + if (vdev->isolation_vdev) + return false; + TAILQ_FOREACH(psta_vdev, &pdev->vdev_list, vdev_list_elem) { + if (qdf_unlikely(psta_vdev->proxysta_vdev && + !qdf_mem_cmp(psta_vdev->mac_addr.raw, + &data[DP_MAC_ADDR_LEN], DP_MAC_ADDR_LEN))) { + /* Drop packet if source address is equal to + * any of the vdev addresses. + */ + return true; + } + } + } + } + return false; +} +#else +static inline bool check_qwrap_multicast_loopback(struct dp_vdev *vdev, + qdf_nbuf_t nbuf) +{ + return false; +} +#endif + +/* + * dp_rx_buffers_replenish() - replenish rxdma ring with rx nbufs + * called during dp rx initialization + * and at the end of dp_rx_process. + * + * @soc: core txrx main context + * @mac_id: mac_id which is one of 3 mac_ids + * @dp_rxdma_srng: dp rxdma circular ring + * @rx_desc_pool: Pointer to free Rx descriptor pool + * @num_req_buffers: number of buffer to be replenished + * @desc_list: list of descs if called from dp_rx_process + * or NULL during dp rx initialization or out of buffer + * interrupt. + * @tail: tail of descs list + * Return: return success or failure + */ +QDF_STATUS dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id, + struct dp_srng *dp_rxdma_srng, + struct rx_desc_pool *rx_desc_pool, + uint32_t num_req_buffers, + union dp_rx_desc_list_elem_t **desc_list, + union dp_rx_desc_list_elem_t **tail); + +/** + * dp_rx_link_desc_return() - Return a MPDU link descriptor to HW + * (WBM), following error handling + * + * @soc: core DP main context + * @buf_addr_info: opaque pointer to the REO error ring descriptor + * @buf_addr_info: void pointer to the buffer_addr_info + * @bm_action: put to idle_list or release to msdu_list + * Return: QDF_STATUS + */ +QDF_STATUS +dp_rx_link_desc_return(struct dp_soc *soc, void *ring_desc, uint8_t bm_action); + +QDF_STATUS +dp_rx_link_desc_buf_return(struct dp_soc *soc, struct dp_srng *dp_rxdma_srng, + void *buf_addr_info, uint8_t bm_action); +/** + * dp_rx_link_desc_return_by_addr - Return a MPDU link descriptor to + * (WBM) by address + * + * @soc: core DP main context + * @link_desc_addr: link descriptor addr + * + * Return: QDF_STATUS + */ +QDF_STATUS +dp_rx_link_desc_return_by_addr(struct dp_soc *soc, void *link_desc_addr, + uint8_t bm_action); + +uint32_t +dp_rxdma_err_process(struct dp_soc *soc, uint32_t mac_id, + uint32_t quota); + +void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf, + uint8_t *rx_tlv_hdr, struct dp_peer *peer); +QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf, + uint8_t *rx_tlv_hdr); + +int dp_wds_rx_policy_check(uint8_t *rx_tlv_hdr, struct dp_vdev *vdev, + struct dp_peer *peer, int rx_mcast); + +qdf_nbuf_t +dp_rx_nbuf_prepare(struct dp_soc *soc, struct dp_pdev *pdev); + +#endif /* _DP_RX_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_rx_defrag.c b/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_rx_defrag.c new file mode 100644 index 0000000000000000000000000000000000000000..0f35d9cd59d22a3c569b9ad41e7d4e09a2155112 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_rx_defrag.c @@ -0,0 +1,1648 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "dp_types.h" +#include "dp_rx.h" +#include "dp_peer.h" +#include "hal_api.h" +#include "qdf_trace.h" +#include "qdf_nbuf.h" +#include "dp_rx_defrag.h" +#include /* LLC_SNAP_HDR_LEN */ +#include "dp_rx_defrag.h" + +const struct dp_rx_defrag_cipher dp_f_ccmp = { + "AES-CCM", + IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN + IEEE80211_WEP_EXTIVLEN, + IEEE80211_WEP_MICLEN, + 0, +}; + +const struct dp_rx_defrag_cipher dp_f_tkip = { + "TKIP", + IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN + IEEE80211_WEP_EXTIVLEN, + IEEE80211_WEP_CRCLEN, + IEEE80211_WEP_MICLEN, +}; + +const struct dp_rx_defrag_cipher dp_f_wep = { + "WEP", + IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN, + IEEE80211_WEP_CRCLEN, + 0, +}; + +/* + * dp_rx_defrag_frames_free(): Free fragment chain + * @frames: Fragment chain + * + * Iterates through the fragment chain and frees them + * Returns: None + */ +static void dp_rx_defrag_frames_free(qdf_nbuf_t frames) +{ + qdf_nbuf_t next, frag = frames; + + while (frag) { + next = qdf_nbuf_next(frag); + qdf_nbuf_free(frag); + frag = next; + } +} + +/* + * dp_rx_clear_saved_desc_info(): Clears descriptor info + * @peer: Pointer to the peer data structure + * @tid: Transmit ID (TID) + * + * Saves MPDU descriptor info and MSDU link pointer from REO + * ring descriptor. The cache is created per peer, per TID + * + * Returns: None + */ +static void dp_rx_clear_saved_desc_info(struct dp_peer *peer, unsigned tid) +{ + if (peer->rx_tid[tid].dst_ring_desc) + qdf_mem_free(peer->rx_tid[tid].dst_ring_desc); + + peer->rx_tid[tid].dst_ring_desc = NULL; +} + +/* + * dp_rx_reorder_flush_frag(): Flush the frag list + * @peer: Pointer to the peer data structure + * @tid: Transmit ID (TID) + * + * Flush the per-TID frag list + * + * Returns: None + */ +void dp_rx_reorder_flush_frag(struct dp_peer *peer, + unsigned int tid) +{ + struct dp_soc *soc; + struct dp_srng *dp_rxdma_srng; + struct rx_desc_pool *rx_desc_pool; + struct dp_pdev *pdev; + union dp_rx_desc_list_elem_t *head = NULL; + union dp_rx_desc_list_elem_t *tail = NULL; + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("Flushing TID %d"), tid); + + if (!peer) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "%s: NULL peer\n", __func__); + return; + } + + pdev = peer->vdev->pdev; + soc = pdev->soc; + + if (peer->rx_tid[tid].dst_ring_desc) { + if (dp_rx_link_desc_return(soc, + peer->rx_tid[tid].dst_ring_desc, + HAL_BM_ACTION_PUT_IN_IDLE_LIST) != + QDF_STATUS_SUCCESS) + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "%s: Failed to return link desc\n", + __func__); + } + + if (peer->rx_tid[tid].head_frag_desc) { + dp_rxdma_srng = &pdev->rx_refill_buf_ring; + rx_desc_pool = &soc->rx_desc_buf[0]; + + dp_rx_add_to_free_desc_list(&head, &tail, + peer->rx_tid[tid].head_frag_desc); + dp_rx_buffers_replenish(soc, 0, dp_rxdma_srng, rx_desc_pool, + 1, &head, &tail); + } + + dp_rx_defrag_cleanup(peer, tid); +} + +/* + * dp_rx_defrag_waitlist_flush(): Flush SOC defrag wait list + * @soc: DP SOC + * + * Flush fragments of all waitlisted TID's + * + * Returns: None + */ +void dp_rx_defrag_waitlist_flush(struct dp_soc *soc) +{ + struct dp_rx_tid *rx_reorder; + struct dp_rx_tid *tmp; + uint32_t now_ms = qdf_system_ticks_to_msecs(qdf_system_ticks()); + TAILQ_HEAD(, dp_rx_tid) temp_list; + + TAILQ_INIT(&temp_list); + + qdf_spin_lock_bh(&soc->rx.defrag.defrag_lock); + TAILQ_FOREACH_SAFE(rx_reorder, &soc->rx.defrag.waitlist, + defrag_waitlist_elem, tmp) { + unsigned int tid; + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("Current time %u"), now_ms); + + if (rx_reorder->defrag_timeout_ms > now_ms) + break; + + tid = rx_reorder->tid; + if (tid >= DP_MAX_TIDS) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "%s: TID out of bounds: %d", __func__, tid); + qdf_assert(0); + continue; + } + + TAILQ_REMOVE(&soc->rx.defrag.waitlist, rx_reorder, + defrag_waitlist_elem); + + /* Move to temp list and clean-up later */ + TAILQ_INSERT_TAIL(&temp_list, rx_reorder, + defrag_waitlist_elem); + } + qdf_spin_unlock_bh(&soc->rx.defrag.defrag_lock); + + TAILQ_FOREACH_SAFE(rx_reorder, &temp_list, + defrag_waitlist_elem, tmp) { + struct dp_peer *peer; + + /* get address of current peer */ + peer = + container_of(rx_reorder, struct dp_peer, + rx_tid[rx_reorder->tid]); + dp_rx_reorder_flush_frag(peer, rx_reorder->tid); + } +} + +/* + * dp_rx_defrag_waitlist_add(): Update per-PDEV defrag wait list + * @peer: Pointer to the peer data structure + * @tid: Transmit ID (TID) + * + * Appends per-tid fragments to global fragment wait list + * + * Returns: None + */ +static void dp_rx_defrag_waitlist_add(struct dp_peer *peer, unsigned tid) +{ + struct dp_soc *psoc = peer->vdev->pdev->soc; + struct dp_rx_tid *rx_reorder = &peer->rx_tid[tid]; + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("Adding TID %u to waitlist for peer %pK"), + tid, peer); + + /* TODO: use LIST macros instead of TAIL macros */ + qdf_spin_lock_bh(&psoc->rx.defrag.defrag_lock); + TAILQ_INSERT_TAIL(&psoc->rx.defrag.waitlist, rx_reorder, + defrag_waitlist_elem); + qdf_spin_unlock_bh(&psoc->rx.defrag.defrag_lock); +} + +/* + * dp_rx_defrag_waitlist_remove(): Remove fragments from waitlist + * @peer: Pointer to the peer data structure + * @tid: Transmit ID (TID) + * + * Remove fragments from waitlist + * + * Returns: None + */ +void dp_rx_defrag_waitlist_remove(struct dp_peer *peer, unsigned tid) +{ + struct dp_pdev *pdev = peer->vdev->pdev; + struct dp_soc *soc = pdev->soc; + struct dp_rx_tid *rx_reorder; + + if (tid > DP_MAX_TIDS) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "TID out of bounds: %d", tid); + qdf_assert(0); + return; + } + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("Remove TID %u from waitlist for peer %pK"), + tid, peer); + + qdf_spin_lock_bh(&soc->rx.defrag.defrag_lock); + TAILQ_FOREACH(rx_reorder, &soc->rx.defrag.waitlist, + defrag_waitlist_elem) { + struct dp_peer *peer_on_waitlist; + + /* get address of current peer */ + peer_on_waitlist = + container_of(rx_reorder, struct dp_peer, + rx_tid[rx_reorder->tid]); + + /* Ensure it is TID for same peer */ + if (peer_on_waitlist == peer && rx_reorder->tid == tid) + TAILQ_REMOVE(&soc->rx.defrag.waitlist, + rx_reorder, defrag_waitlist_elem); + } + qdf_spin_unlock_bh(&soc->rx.defrag.defrag_lock); +} + +/* + * dp_rx_defrag_fraglist_insert(): Create a per-sequence fragment list + * @peer: Pointer to the peer data structure + * @tid: Transmit ID (TID) + * @head_addr: Pointer to head list + * @tail_addr: Pointer to tail list + * @frag: Incoming fragment + * @all_frag_present: Flag to indicate whether all fragments are received + * + * Build a per-tid, per-sequence fragment list. + * + * Returns: Success, if inserted + */ +static QDF_STATUS dp_rx_defrag_fraglist_insert(struct dp_peer *peer, unsigned tid, + qdf_nbuf_t *head_addr, qdf_nbuf_t *tail_addr, qdf_nbuf_t frag, + uint8_t *all_frag_present) +{ + qdf_nbuf_t next; + qdf_nbuf_t prev = NULL; + qdf_nbuf_t cur; + uint16_t head_fragno, cur_fragno, next_fragno; + uint8_t last_morefrag = 1, count = 0; + struct dp_rx_tid *rx_tid = &peer->rx_tid[tid]; + uint8_t *rx_desc_info; + + + qdf_assert(frag); + qdf_assert(head_addr); + qdf_assert(tail_addr); + + *all_frag_present = 0; + rx_desc_info = qdf_nbuf_data(frag); + cur_fragno = dp_rx_frag_get_mpdu_frag_number(rx_desc_info); + + /* If this is the first fragment */ + if (!(*head_addr)) { + *head_addr = *tail_addr = frag; + qdf_nbuf_set_next(*tail_addr, NULL); + rx_tid->curr_frag_num = cur_fragno; + + goto insert_done; + } + + /* In sequence fragment */ + if (cur_fragno > rx_tid->curr_frag_num) { + qdf_nbuf_set_next(*tail_addr, frag); + *tail_addr = frag; + qdf_nbuf_set_next(*tail_addr, NULL); + rx_tid->curr_frag_num = cur_fragno; + } else { + /* Out of sequence fragment */ + cur = *head_addr; + rx_desc_info = qdf_nbuf_data(cur); + head_fragno = dp_rx_frag_get_mpdu_frag_number(rx_desc_info); + + if (cur_fragno == head_fragno) { + qdf_nbuf_free(frag); + goto insert_fail; + } else if (head_fragno > cur_fragno) { + qdf_nbuf_set_next(frag, cur); + cur = frag; + *head_addr = frag; /* head pointer to be updated */ + } else { + while ((cur_fragno > head_fragno) && cur != NULL) { + prev = cur; + cur = qdf_nbuf_next(cur); + rx_desc_info = qdf_nbuf_data(cur); + head_fragno = + dp_rx_frag_get_mpdu_frag_number( + rx_desc_info); + } + + if (cur_fragno == head_fragno) { + qdf_nbuf_free(frag); + goto insert_fail; + } + + qdf_nbuf_set_next(prev, frag); + qdf_nbuf_set_next(frag, cur); + } + } + + next = qdf_nbuf_next(*head_addr); + + rx_desc_info = qdf_nbuf_data(*tail_addr); + last_morefrag = dp_rx_frag_get_more_frag_bit(rx_desc_info); + + /* TODO: optimize the loop */ + if (!last_morefrag) { + /* Check if all fragments are present */ + do { + rx_desc_info = qdf_nbuf_data(next); + next_fragno = + dp_rx_frag_get_mpdu_frag_number(rx_desc_info); + count++; + + if (next_fragno != count) + break; + + next = qdf_nbuf_next(next); + } while (next); + + if (!next) { + *all_frag_present = 1; + return QDF_STATUS_SUCCESS; + } + } + +insert_done: + return QDF_STATUS_SUCCESS; + +insert_fail: + return QDF_STATUS_E_FAILURE; +} + + +/* + * dp_rx_defrag_tkip_decap(): decap tkip encrypted fragment + * @msdu: Pointer to the fragment + * @hdrlen: 802.11 header length (mostly useful in 4 addr frames) + * + * decap tkip encrypted fragment + * + * Returns: QDF_STATUS + */ +static QDF_STATUS dp_rx_defrag_tkip_decap(qdf_nbuf_t msdu, uint16_t hdrlen) +{ + uint8_t *ivp, *orig_hdr; + int rx_desc_len = sizeof(struct rx_pkt_tlvs); + + /* start of 802.11 header info */ + orig_hdr = (uint8_t *)(qdf_nbuf_data(msdu) + rx_desc_len); + + /* TKIP header is located post 802.11 header */ + ivp = orig_hdr + hdrlen; + if (!(ivp[IEEE80211_WEP_IVLEN] & IEEE80211_WEP_EXTIV)) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "IEEE80211_WEP_EXTIV is missing in TKIP fragment"); + return QDF_STATUS_E_DEFRAG_ERROR; + } + + qdf_mem_move(orig_hdr + dp_f_tkip.ic_header, orig_hdr, hdrlen); + + qdf_nbuf_pull_head(msdu, dp_f_tkip.ic_header); + qdf_nbuf_trim_tail(msdu, dp_f_tkip.ic_trailer); + + return QDF_STATUS_SUCCESS; +} + +/* + * dp_rx_defrag_ccmp_demic(): Remove MIC information from CCMP fragment + * @nbuf: Pointer to the fragment buffer + * @hdrlen: 802.11 header length (mostly useful in 4 addr frames) + * + * Remove MIC information from CCMP fragment + * + * Returns: QDF_STATUS + */ +static QDF_STATUS dp_rx_defrag_ccmp_demic(qdf_nbuf_t nbuf, uint16_t hdrlen) +{ + uint8_t *ivp, *orig_hdr; + int rx_desc_len = sizeof(struct rx_pkt_tlvs); + + /* start of the 802.11 header */ + orig_hdr = (uint8_t *)(qdf_nbuf_data(nbuf) + rx_desc_len); + + /* CCMP header is located after 802.11 header */ + ivp = orig_hdr + hdrlen; + if (!(ivp[IEEE80211_WEP_IVLEN] & IEEE80211_WEP_EXTIV)) + return QDF_STATUS_E_DEFRAG_ERROR; + + qdf_nbuf_trim_tail(nbuf, dp_f_ccmp.ic_trailer); + + return QDF_STATUS_SUCCESS; +} + +/* + * dp_rx_defrag_ccmp_decap(): decap CCMP encrypted fragment + * @nbuf: Pointer to the fragment + * @hdrlen: length of the header information + * + * decap CCMP encrypted fragment + * + * Returns: QDF_STATUS + */ +static QDF_STATUS dp_rx_defrag_ccmp_decap(qdf_nbuf_t nbuf, uint16_t hdrlen) +{ + uint8_t *ivp, *origHdr; + int rx_desc_len = sizeof(struct rx_pkt_tlvs); + + origHdr = (uint8_t *) (qdf_nbuf_data(nbuf) + rx_desc_len); + ivp = origHdr + hdrlen; + + if (!(ivp[IEEE80211_WEP_IVLEN] & IEEE80211_WEP_EXTIV)) + return QDF_STATUS_E_DEFRAG_ERROR; + + /* Let's pull the header later */ + + return QDF_STATUS_SUCCESS; +} + +/* + * dp_rx_defrag_wep_decap(): decap WEP encrypted fragment + * @msdu: Pointer to the fragment + * @hdrlen: length of the header information + * + * decap WEP encrypted fragment + * + * Returns: QDF_STATUS + */ +static QDF_STATUS dp_rx_defrag_wep_decap(qdf_nbuf_t msdu, uint16_t hdrlen) +{ + uint8_t *origHdr; + int rx_desc_len = sizeof(struct rx_pkt_tlvs); + + origHdr = (uint8_t *) (qdf_nbuf_data(msdu) + rx_desc_len); + qdf_mem_move(origHdr + dp_f_wep.ic_header, origHdr, hdrlen); + + qdf_nbuf_trim_tail(msdu, dp_f_wep.ic_trailer); + + return QDF_STATUS_SUCCESS; +} + +/* + * dp_rx_defrag_hdrsize(): Calculate the header size of the received fragment + * @nbuf: Pointer to the fragment + * + * Calculate the header size of the received fragment + * + * Returns: header size (uint16_t) + */ +static uint16_t dp_rx_defrag_hdrsize(qdf_nbuf_t nbuf) +{ + uint8_t *rx_tlv_hdr = qdf_nbuf_data(nbuf); + uint16_t size = sizeof(struct ieee80211_frame); + uint16_t fc = 0; + uint32_t to_ds, fr_ds; + uint8_t frm_ctrl_valid; + uint16_t frm_ctrl_field; + + to_ds = hal_rx_mpdu_get_to_ds(rx_tlv_hdr); + fr_ds = hal_rx_mpdu_get_fr_ds(rx_tlv_hdr); + frm_ctrl_valid = hal_rx_get_mpdu_frame_control_valid(rx_tlv_hdr); + frm_ctrl_field = hal_rx_get_frame_ctrl_field(rx_tlv_hdr); + + if (to_ds && fr_ds) + size += IEEE80211_ADDR_LEN; + + if (frm_ctrl_valid) { + fc = frm_ctrl_field; + + /* use 1-st byte for validation */ + if (DP_RX_DEFRAG_IEEE80211_QOS_HAS_SEQ(fc & 0xff)) { + size += sizeof(uint16_t); + /* use 2-nd byte for validation */ + if (((fc & 0xff00) >> 8) & IEEE80211_FC1_ORDER) + size += sizeof(struct ieee80211_htc); + } + } + + return size; +} + +/* + * dp_rx_defrag_michdr(): Calculate a pseudo MIC header + * @wh0: Pointer to the wireless header of the fragment + * @hdr: Array to hold the pseudo header + * + * Calculate a pseudo MIC header + * + * Returns: None + */ +static void dp_rx_defrag_michdr(const struct ieee80211_frame *wh0, + uint8_t hdr[]) +{ + const struct ieee80211_frame_addr4 *wh = + (const struct ieee80211_frame_addr4 *)wh0; + + switch (wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) { + case IEEE80211_FC1_DIR_NODS: + DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr, wh->i_addr1); /* DA */ + DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr + IEEE80211_ADDR_LEN, + wh->i_addr2); + break; + case IEEE80211_FC1_DIR_TODS: + DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr, wh->i_addr3); /* DA */ + DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr + IEEE80211_ADDR_LEN, + wh->i_addr2); + break; + case IEEE80211_FC1_DIR_FROMDS: + DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr, wh->i_addr1); /* DA */ + DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr + IEEE80211_ADDR_LEN, + wh->i_addr3); + break; + case IEEE80211_FC1_DIR_DSTODS: + DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr, wh->i_addr3); /* DA */ + DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr + IEEE80211_ADDR_LEN, + wh->i_addr4); + break; + } + + /* + * Bit 7 is IEEE80211_FC0_SUBTYPE_QOS for data frame, but + * it could also be set for deauth, disassoc, action, etc. for + * a mgt type frame. It comes into picture for MFP. + */ + if (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_QOS) { + const struct ieee80211_qosframe *qwh = + (const struct ieee80211_qosframe *)wh; + hdr[12] = qwh->i_qos[0] & IEEE80211_QOS_TID; + } else { + hdr[12] = 0; + } + + hdr[13] = hdr[14] = hdr[15] = 0; /* reserved */ +} + +/* + * dp_rx_defrag_mic(): Calculate MIC header + * @key: Pointer to the key + * @wbuf: fragment buffer + * @off: Offset + * @data_len: Data length + * @mic: Array to hold MIC + * + * Calculate a pseudo MIC header + * + * Returns: QDF_STATUS + */ +static QDF_STATUS dp_rx_defrag_mic(const uint8_t *key, qdf_nbuf_t wbuf, + uint16_t off, uint16_t data_len, uint8_t mic[]) +{ + uint8_t hdr[16] = { 0, }; + uint32_t l, r; + const uint8_t *data; + uint32_t space; + int rx_desc_len = sizeof(struct rx_pkt_tlvs); + + dp_rx_defrag_michdr((struct ieee80211_frame *)(qdf_nbuf_data(wbuf) + + rx_desc_len), hdr); + l = dp_rx_get_le32(key); + r = dp_rx_get_le32(key + 4); + + /* Michael MIC pseudo header: DA, SA, 3 x 0, Priority */ + l ^= dp_rx_get_le32(hdr); + dp_rx_michael_block(l, r); + l ^= dp_rx_get_le32(&hdr[4]); + dp_rx_michael_block(l, r); + l ^= dp_rx_get_le32(&hdr[8]); + dp_rx_michael_block(l, r); + l ^= dp_rx_get_le32(&hdr[12]); + dp_rx_michael_block(l, r); + + /* first buffer has special handling */ + data = (uint8_t *) qdf_nbuf_data(wbuf) + rx_desc_len + off; + space = qdf_nbuf_len(wbuf) - rx_desc_len - off; + + for (;; ) { + if (space > data_len) + space = data_len; + + /* collect 32-bit blocks from current buffer */ + while (space >= sizeof(uint32_t)) { + l ^= dp_rx_get_le32(data); + dp_rx_michael_block(l, r); + data += sizeof(uint32_t); + space -= sizeof(uint32_t); + data_len -= sizeof(uint32_t); + } + if (data_len < sizeof(uint32_t)) + break; + + wbuf = qdf_nbuf_next(wbuf); + if (wbuf == NULL) + return QDF_STATUS_E_DEFRAG_ERROR; + + if (space != 0) { + const uint8_t *data_next; + /* + * Block straddles buffers, split references. + */ + data_next = + (uint8_t *) qdf_nbuf_data(wbuf) + rx_desc_len; + if ((qdf_nbuf_len(wbuf) - rx_desc_len) < + sizeof(uint32_t) - space) { + return QDF_STATUS_E_DEFRAG_ERROR; + } + switch (space) { + case 1: + l ^= dp_rx_get_le32_split(data[0], + data_next[0], data_next[1], + data_next[2]); + data = data_next + 3; + space = (qdf_nbuf_len(wbuf) - rx_desc_len) + - 3; + break; + case 2: + l ^= dp_rx_get_le32_split(data[0], data[1], + data_next[0], data_next[1]); + data = data_next + 2; + space = (qdf_nbuf_len(wbuf) - rx_desc_len) + - 2; + break; + case 3: + l ^= dp_rx_get_le32_split(data[0], data[1], + data[2], data_next[0]); + data = data_next + 1; + space = (qdf_nbuf_len(wbuf) - rx_desc_len) + - 1; + break; + } + dp_rx_michael_block(l, r); + data_len -= sizeof(uint32_t); + } else { + /* + * Setup for next buffer. + */ + data = (uint8_t *) qdf_nbuf_data(wbuf) + rx_desc_len; + space = qdf_nbuf_len(wbuf) - rx_desc_len; + } + } + /* Last block and padding (0x5a, 4..7 x 0) */ + switch (data_len) { + case 0: + l ^= dp_rx_get_le32_split(0x5a, 0, 0, 0); + break; + case 1: + l ^= dp_rx_get_le32_split(data[0], 0x5a, 0, 0); + break; + case 2: + l ^= dp_rx_get_le32_split(data[0], data[1], 0x5a, 0); + break; + case 3: + l ^= dp_rx_get_le32_split(data[0], data[1], data[2], 0x5a); + break; + } + dp_rx_michael_block(l, r); + dp_rx_michael_block(l, r); + dp_rx_put_le32(mic, l); + dp_rx_put_le32(mic + 4, r); + + return QDF_STATUS_SUCCESS; +} + +/* + * dp_rx_defrag_tkip_demic(): Remove MIC header from the TKIP frame + * @key: Pointer to the key + * @msdu: fragment buffer + * @hdrlen: Length of the header information + * + * Remove MIC information from the TKIP frame + * + * Returns: QDF_STATUS + */ +static QDF_STATUS dp_rx_defrag_tkip_demic(const uint8_t *key, + qdf_nbuf_t msdu, uint16_t hdrlen) +{ + QDF_STATUS status; + uint32_t pktlen; + uint8_t mic[IEEE80211_WEP_MICLEN]; + uint8_t mic0[IEEE80211_WEP_MICLEN]; + int rx_desc_len = sizeof(struct rx_pkt_tlvs); + + pktlen = qdf_nbuf_len(msdu) - rx_desc_len; + + status = dp_rx_defrag_mic(key, msdu, hdrlen, + pktlen - (hdrlen + dp_f_tkip.ic_miclen), mic); + + if (QDF_IS_STATUS_ERROR(status)) + return status; + + qdf_nbuf_copy_bits(msdu, pktlen - dp_f_tkip.ic_miclen + rx_desc_len, + dp_f_tkip.ic_miclen, (caddr_t)mic0); + + if (!qdf_mem_cmp(mic, mic0, dp_f_tkip.ic_miclen)) + return QDF_STATUS_E_DEFRAG_ERROR; + + qdf_nbuf_trim_tail(msdu, dp_f_tkip.ic_miclen); + + return QDF_STATUS_SUCCESS; +} + +/* + * dp_rx_frag_pull_hdr(): Pulls the RXTLV & the 802.11 headers + * @nbuf: buffer pointer + * @hdrsize: size of the header to be pulled + * + * Pull the RXTLV & the 802.11 headers + * + * Returns: None + */ +static void dp_rx_frag_pull_hdr(qdf_nbuf_t nbuf, uint16_t hdrsize) +{ + qdf_nbuf_pull_head(nbuf, + RX_PKT_TLVS_LEN + hdrsize); + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, + "%s: final pktlen %d .11len %d\n", + __func__, + (uint32_t)qdf_nbuf_len(nbuf), hdrsize); +} + +/* + * dp_rx_construct_fraglist(): Construct a nbuf fraglist + * @peer: Pointer to the peer + * @head: Pointer to list of fragments + * @hdrsize: Size of the header to be pulled + * + * Construct a nbuf fraglist + * + * Returns: None + */ +static void +dp_rx_construct_fraglist(struct dp_peer *peer, + qdf_nbuf_t head, uint16_t hdrsize) +{ + qdf_nbuf_t msdu = qdf_nbuf_next(head); + qdf_nbuf_t rx_nbuf = msdu; + uint32_t len = 0; + + while (msdu) { + dp_rx_frag_pull_hdr(msdu, hdrsize); + len += qdf_nbuf_len(msdu); + msdu = qdf_nbuf_next(msdu); + } + + qdf_nbuf_append_ext_list(head, rx_nbuf, len); + qdf_nbuf_set_next(head, NULL); + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, + "%s: head len %d ext len %d data len %d \n", + __func__, + (uint32_t)qdf_nbuf_len(head), + (uint32_t)qdf_nbuf_len(rx_nbuf), + (uint32_t)(head->data_len)); +} + +/** + * dp_rx_defrag_err() - rx err handler + * @pdev: handle to pdev object + * @vdev_id: vdev id + * @peer_mac_addr: peer mac address + * @tid: TID + * @tsf32: TSF + * @err_type: error type + * @rx_frame: rx frame + * @pn: PN Number + * @key_id: key id + * + * This function handles rx error and send MIC error notification + * + * Return: None + */ +static void dp_rx_defrag_err(uint8_t vdev_id, uint8_t *peer_mac_addr, + int tid, uint32_t tsf32, uint32_t err_type, qdf_nbuf_t rx_frame, + uint64_t *pn, uint8_t key_id) +{ + /* TODO: Who needs to know about the TKIP MIC error */ +} + + +/* + * dp_rx_defrag_nwifi_to_8023(): Transcap 802.11 to 802.3 + * @nbuf: Pointer to the fragment buffer + * @hdrsize: Size of headers + * + * Transcap the fragment from 802.11 to 802.3 + * + * Returns: None + */ +static void +dp_rx_defrag_nwifi_to_8023(qdf_nbuf_t nbuf, uint16_t hdrsize) +{ + struct llc_snap_hdr_t *llchdr; + struct ethernet_hdr_t *eth_hdr; + uint8_t ether_type[2]; + uint16_t fc = 0; + union dp_align_mac_addr mac_addr; + uint8_t *rx_desc_info = qdf_mem_malloc(RX_PKT_TLVS_LEN); + + if (rx_desc_info == NULL) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "%s: Memory alloc failed ! \n", __func__); + QDF_ASSERT(0); + return; + } + + qdf_mem_copy(rx_desc_info, qdf_nbuf_data(nbuf), RX_PKT_TLVS_LEN); + + llchdr = (struct llc_snap_hdr_t *)(qdf_nbuf_data(nbuf) + + RX_PKT_TLVS_LEN + hdrsize); + qdf_mem_copy(ether_type, llchdr->ethertype, 2); + + qdf_nbuf_pull_head(nbuf, (RX_PKT_TLVS_LEN + hdrsize + + sizeof(struct llc_snap_hdr_t) - + sizeof(struct ethernet_hdr_t))); + + eth_hdr = (struct ethernet_hdr_t *)(qdf_nbuf_data(nbuf)); + + if (hal_rx_get_mpdu_frame_control_valid(rx_desc_info)) + fc = hal_rx_get_frame_ctrl_field(rx_desc_info); + + switch (((fc & 0xff00) >> 8) & IEEE80211_FC1_DIR_MASK) { + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, + "%s: frame control type: 0x%x", __func__, fc); + + case IEEE80211_FC1_DIR_NODS: + hal_rx_mpdu_get_addr1(rx_desc_info, + &mac_addr.raw[0]); + qdf_mem_copy(eth_hdr->dest_addr, &mac_addr.raw[0], + IEEE80211_ADDR_LEN); + hal_rx_mpdu_get_addr2(rx_desc_info, + &mac_addr.raw[0]); + qdf_mem_copy(eth_hdr->src_addr, &mac_addr.raw[0], + IEEE80211_ADDR_LEN); + break; + case IEEE80211_FC1_DIR_TODS: + hal_rx_mpdu_get_addr3(rx_desc_info, + &mac_addr.raw[0]); + qdf_mem_copy(eth_hdr->dest_addr, &mac_addr.raw[0], + IEEE80211_ADDR_LEN); + hal_rx_mpdu_get_addr2(rx_desc_info, + &mac_addr.raw[0]); + qdf_mem_copy(eth_hdr->src_addr, &mac_addr.raw[0], + IEEE80211_ADDR_LEN); + break; + case IEEE80211_FC1_DIR_FROMDS: + hal_rx_mpdu_get_addr1(rx_desc_info, + &mac_addr.raw[0]); + qdf_mem_copy(eth_hdr->dest_addr, &mac_addr.raw[0], + IEEE80211_ADDR_LEN); + hal_rx_mpdu_get_addr3(rx_desc_info, + &mac_addr.raw[0]); + qdf_mem_copy(eth_hdr->src_addr, &mac_addr.raw[0], + IEEE80211_ADDR_LEN); + break; + + case IEEE80211_FC1_DIR_DSTODS: + hal_rx_mpdu_get_addr3(rx_desc_info, + &mac_addr.raw[0]); + qdf_mem_copy(eth_hdr->dest_addr, &mac_addr.raw[0], + IEEE80211_ADDR_LEN); + hal_rx_mpdu_get_addr4(rx_desc_info, + &mac_addr.raw[0]); + qdf_mem_copy(eth_hdr->src_addr, &mac_addr.raw[0], + IEEE80211_ADDR_LEN); + break; + + default: + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "%s: Unknown frame control type: 0x%x", __func__, fc); + } + + qdf_mem_copy(eth_hdr->ethertype, ether_type, + sizeof(ether_type)); + + qdf_nbuf_push_head(nbuf, RX_PKT_TLVS_LEN); + qdf_mem_copy(qdf_nbuf_data(nbuf), rx_desc_info, RX_PKT_TLVS_LEN); + qdf_mem_free(rx_desc_info); +} + +/* + * dp_rx_defrag_reo_reinject(): Reinject the fragment chain back into REO + * @peer: Pointer to the peer + * @tid: Transmit Identifier + * @head: Buffer to be reinjected back + * + * Reinject the fragment chain back into REO + * + * Returns: QDF_STATUS + */ + static QDF_STATUS dp_rx_defrag_reo_reinject(struct dp_peer *peer, + unsigned tid, qdf_nbuf_t head) +{ + struct dp_pdev *pdev = peer->vdev->pdev; + struct dp_soc *soc = pdev->soc; + struct hal_buf_info buf_info; + void *link_desc_va; + void *msdu0, *msdu_desc_info; + void *ent_ring_desc, *ent_mpdu_desc_info, *ent_qdesc_addr; + void *dst_mpdu_desc_info, *dst_qdesc_addr; + qdf_dma_addr_t paddr; + uint32_t nbuf_len, seq_no, dst_ind; + uint32_t *mpdu_wrd; + uint32_t ret, cookie; + + void *dst_ring_desc = + peer->rx_tid[tid].dst_ring_desc; + void *hal_srng = soc->reo_reinject_ring.hal_srng; + + hal_rx_reo_buf_paddr_get(dst_ring_desc, &buf_info); + + link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &buf_info); + + qdf_assert(link_desc_va); + + msdu0 = (uint8_t *)link_desc_va + + RX_MSDU_LINK_8_RX_MSDU_DETAILS_MSDU_0_OFFSET; + + nbuf_len = qdf_nbuf_len(head) - RX_PKT_TLVS_LEN; + + HAL_RX_UNIFORM_HDR_SET(link_desc_va, OWNER, UNI_DESC_OWNER_SW); + HAL_RX_UNIFORM_HDR_SET(link_desc_va, BUFFER_TYPE, + UNI_DESC_BUF_TYPE_RX_MSDU_LINK); + + /* msdu reconfig */ + msdu_desc_info = (uint8_t *)msdu0 + + RX_MSDU_DETAILS_2_RX_MSDU_DESC_INFO_RX_MSDU_DESC_INFO_DETAILS_OFFSET; + + dst_ind = hal_rx_msdu_reo_dst_ind_get(link_desc_va); + + qdf_mem_zero(msdu_desc_info, sizeof(struct rx_msdu_desc_info)); + + HAL_RX_MSDU_DESC_INFO_SET(msdu_desc_info, + FIRST_MSDU_IN_MPDU_FLAG, 1); + HAL_RX_MSDU_DESC_INFO_SET(msdu_desc_info, + LAST_MSDU_IN_MPDU_FLAG, 1); + HAL_RX_MSDU_DESC_INFO_SET(msdu_desc_info, + MSDU_CONTINUATION, 0x0); + HAL_RX_MSDU_DESC_INFO_SET(msdu_desc_info, + REO_DESTINATION_INDICATION, dst_ind); + HAL_RX_MSDU_DESC_INFO_SET(msdu_desc_info, + MSDU_LENGTH, nbuf_len); + HAL_RX_MSDU_DESC_INFO_SET(msdu_desc_info, + SA_IS_VALID, 1); + HAL_RX_MSDU_DESC_INFO_SET(msdu_desc_info, + DA_IS_VALID, 1); + + /* change RX TLV's */ + hal_rx_msdu_start_msdu_len_set( + qdf_nbuf_data(head), nbuf_len); + + cookie = HAL_RX_BUF_COOKIE_GET(msdu0); + + /* map the nbuf before reinject it into HW */ + ret = qdf_nbuf_map_single(soc->osdev, head, + QDF_DMA_BIDIRECTIONAL); + + if (qdf_unlikely(ret == QDF_STATUS_E_FAILURE)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "%s: nbuf map failed !\n", __func__); + qdf_nbuf_free(head); + return QDF_STATUS_E_FAILURE; + } + + paddr = qdf_nbuf_get_frag_paddr(head, 0); + + ret = check_x86_paddr(soc, &head, &paddr, pdev); + + if (ret == QDF_STATUS_E_FAILURE) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "%s: x86 check failed !\n", __func__); + return QDF_STATUS_E_FAILURE; + } + + hal_rxdma_buff_addr_info_set(msdu0, paddr, cookie, DP_WBM2SW_RBM); + + /* Lets fill entrance ring now !!! */ + if (qdf_unlikely(hal_srng_access_start(soc->hal_soc, hal_srng))) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "HAL RING Access For REO entrance SRNG Failed: %pK", + hal_srng); + + return QDF_STATUS_E_FAILURE; + } + + ent_ring_desc = hal_srng_src_get_next(soc->hal_soc, hal_srng); + + qdf_assert(ent_ring_desc); + + paddr = (uint64_t)buf_info.paddr; + /* buf addr */ + hal_rxdma_buff_addr_info_set(ent_ring_desc, paddr, + buf_info.sw_cookie, + HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST); + /* mpdu desc info */ + ent_mpdu_desc_info = (uint8_t *)ent_ring_desc + + RX_MPDU_DETAILS_2_RX_MPDU_DESC_INFO_RX_MPDU_DESC_INFO_DETAILS_OFFSET; + + dst_mpdu_desc_info = (uint8_t *)dst_ring_desc + + REO_DESTINATION_RING_2_RX_MPDU_DESC_INFO_RX_MPDU_DESC_INFO_DETAILS_OFFSET; + + qdf_mem_copy(ent_mpdu_desc_info, dst_mpdu_desc_info, + sizeof(struct rx_mpdu_desc_info)); + qdf_mem_zero(ent_mpdu_desc_info, sizeof(uint32_t)); + + mpdu_wrd = (uint32_t *)dst_mpdu_desc_info; + seq_no = HAL_RX_MPDU_SEQUENCE_NUMBER_GET(mpdu_wrd); + + HAL_RX_MPDU_DESC_INFO_SET(ent_mpdu_desc_info, + MSDU_COUNT, 0x1); + HAL_RX_MPDU_DESC_INFO_SET(ent_mpdu_desc_info, + MPDU_SEQUENCE_NUMBER, seq_no); + + /* unset frag bit */ + HAL_RX_MPDU_DESC_INFO_SET(ent_mpdu_desc_info, + FRAGMENT_FLAG, 0x0); + + /* set sa/da valid bits */ + HAL_RX_MPDU_DESC_INFO_SET(ent_mpdu_desc_info, + SA_IS_VALID, 0x1); + HAL_RX_MPDU_DESC_INFO_SET(ent_mpdu_desc_info, + DA_IS_VALID, 0x1); + HAL_RX_MPDU_DESC_INFO_SET(ent_mpdu_desc_info, + RAW_MPDU, 0x0); + + /* qdesc addr */ + ent_qdesc_addr = (uint8_t *)ent_ring_desc + + REO_ENTRANCE_RING_4_RX_REO_QUEUE_DESC_ADDR_31_0_OFFSET; + + dst_qdesc_addr = (uint8_t *)dst_ring_desc + + REO_DESTINATION_RING_6_RX_REO_QUEUE_DESC_ADDR_31_0_OFFSET; + + qdf_mem_copy(ent_qdesc_addr, dst_qdesc_addr, 8); + + HAL_RX_FLD_SET(ent_ring_desc, REO_ENTRANCE_RING_5, + REO_DESTINATION_INDICATION, dst_ind); + + hal_srng_access_end(soc->hal_soc, hal_srng); + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, + "%s: reinjection done !\n", __func__); + return QDF_STATUS_SUCCESS; +} + +/* + * dp_rx_defrag(): Defragment the fragment chain + * @peer: Pointer to the peer + * @tid: Transmit Identifier + * @frag_list_head: Pointer to head list + * @frag_list_tail: Pointer to tail list + * + * Defragment the fragment chain + * + * Returns: QDF_STATUS + */ +static QDF_STATUS dp_rx_defrag(struct dp_peer *peer, unsigned tid, + qdf_nbuf_t frag_list_head, qdf_nbuf_t frag_list_tail) +{ + qdf_nbuf_t tmp_next, prev; + qdf_nbuf_t cur = frag_list_head, msdu; + uint32_t index, tkip_demic = 0; + uint16_t hdr_space; + uint8_t key[DEFRAG_IEEE80211_KEY_LEN]; + struct dp_vdev *vdev = peer->vdev; + + hdr_space = dp_rx_defrag_hdrsize(cur); + index = hal_rx_msdu_is_wlan_mcast(cur) ? + dp_sec_mcast : dp_sec_ucast; + + /* Remove FCS from all fragments */ + while (cur) { + tmp_next = qdf_nbuf_next(cur); + qdf_nbuf_set_next(cur, NULL); + qdf_nbuf_trim_tail(cur, DEFRAG_IEEE80211_FCS_LEN); + prev = cur; + qdf_nbuf_set_next(cur, tmp_next); + cur = tmp_next; + } + cur = frag_list_head; + + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO, + "%s: Security type: %d\n", __func__, + peer->security[index].sec_type); + + /* Temporary fix to drop TKIP encrypted packets */ + if (peer->security[index].sec_type == + htt_sec_type_tkip) { + return QDF_STATUS_E_DEFRAG_ERROR; + } + + switch (peer->security[index].sec_type) { + case htt_sec_type_tkip: + tkip_demic = 1; + + case htt_sec_type_tkip_nomic: + while (cur) { + tmp_next = qdf_nbuf_next(cur); + if (dp_rx_defrag_tkip_decap(cur, hdr_space)) { + + /* TKIP decap failed, discard frags */ + dp_rx_defrag_frames_free(frag_list_head); + + QDF_TRACE(QDF_MODULE_ID_TXRX, + QDF_TRACE_LEVEL_ERROR, + "dp_rx_defrag: TKIP decap failed"); + + return QDF_STATUS_E_DEFRAG_ERROR; + } + cur = tmp_next; + } + break; + + case htt_sec_type_aes_ccmp: + while (cur) { + tmp_next = qdf_nbuf_next(cur); + if (dp_rx_defrag_ccmp_demic(cur, hdr_space)) { + + /* CCMP demic failed, discard frags */ + dp_rx_defrag_frames_free(frag_list_head); + + QDF_TRACE(QDF_MODULE_ID_TXRX, + QDF_TRACE_LEVEL_ERROR, + "dp_rx_defrag: CCMP demic failed"); + + return QDF_STATUS_E_DEFRAG_ERROR; + } + if (dp_rx_defrag_ccmp_decap(cur, hdr_space)) { + + /* CCMP decap failed, discard frags */ + dp_rx_defrag_frames_free(frag_list_head); + + QDF_TRACE(QDF_MODULE_ID_TXRX, + QDF_TRACE_LEVEL_ERROR, + "dp_rx_defrag: CCMP decap failed"); + + return QDF_STATUS_E_DEFRAG_ERROR; + } + cur = tmp_next; + } + + /* If success, increment header to be stripped later */ + hdr_space += dp_f_ccmp.ic_header; + break; + case htt_sec_type_wep40: + case htt_sec_type_wep104: + case htt_sec_type_wep128: + while (cur) { + tmp_next = qdf_nbuf_next(cur); + if (dp_rx_defrag_wep_decap(cur, hdr_space)) { + + /* WEP decap failed, discard frags */ + dp_rx_defrag_frames_free(frag_list_head); + + QDF_TRACE(QDF_MODULE_ID_TXRX, + QDF_TRACE_LEVEL_ERROR, + "dp_rx_defrag: WEP decap failed"); + + return QDF_STATUS_E_DEFRAG_ERROR; + } + cur = tmp_next; + } + + /* If success, increment header to be stripped later */ + hdr_space += dp_f_wep.ic_header; + break; + default: + QDF_TRACE(QDF_MODULE_ID_TXRX, + QDF_TRACE_LEVEL_ERROR, + "dp_rx_defrag: Did not match any security type"); + break; + } + + if (tkip_demic) { + msdu = frag_list_tail; /* Only last fragment has the MIC */ + + qdf_mem_copy(key, + peer->security[index].michael_key, + sizeof(peer->security[index].michael_key)); + if (dp_rx_defrag_tkip_demic(key, msdu, hdr_space)) { + qdf_nbuf_free(msdu); + dp_rx_defrag_err(vdev->vdev_id, peer->mac_addr.raw, + tid, 0, QDF_STATUS_E_DEFRAG_ERROR, msdu, + NULL, 0); + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "dp_rx_defrag: TKIP demic failed"); + return QDF_STATUS_E_DEFRAG_ERROR; + } + } + + /* Convert the header to 802.3 header */ + dp_rx_defrag_nwifi_to_8023(frag_list_head, hdr_space); + dp_rx_construct_fraglist(peer, frag_list_head, hdr_space); + + return QDF_STATUS_SUCCESS; +} + +/* + * dp_rx_defrag_cleanup(): Clean up activities + * @peer: Pointer to the peer + * @tid: Transmit Identifier + * + * Returns: None + */ +void dp_rx_defrag_cleanup(struct dp_peer *peer, unsigned tid) +{ + struct dp_rx_reorder_array_elem *rx_reorder_array_elem = + peer->rx_tid[tid].array; + + /* Free up nbufs */ + dp_rx_defrag_frames_free(rx_reorder_array_elem->head); + + /* Free up saved ring descriptors */ + dp_rx_clear_saved_desc_info(peer, tid); + + rx_reorder_array_elem->head = NULL; + rx_reorder_array_elem->tail = NULL; + peer->rx_tid[tid].defrag_timeout_ms = 0; + peer->rx_tid[tid].curr_frag_num = 0; + peer->rx_tid[tid].curr_seq_num = 0; + peer->rx_tid[tid].head_frag_desc = NULL; +} + +/* + * dp_rx_defrag_save_info_from_ring_desc(): Save info from REO ring descriptor + * @ring_desc: Pointer to the dst ring descriptor + * @peer: Pointer to the peer + * @tid: Transmit Identifier + * + * Returns: None + */ +static QDF_STATUS dp_rx_defrag_save_info_from_ring_desc(void *ring_desc, + struct dp_rx_desc *rx_desc, struct dp_peer *peer, unsigned tid) +{ + void *dst_ring_desc = qdf_mem_malloc( + sizeof(struct reo_destination_ring)); + + if (dst_ring_desc == NULL) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "%s: Memory alloc failed !\n", __func__); + QDF_ASSERT(0); + return QDF_STATUS_E_NOMEM; + } + + qdf_mem_copy(dst_ring_desc, ring_desc, + sizeof(struct reo_destination_ring)); + + peer->rx_tid[tid].dst_ring_desc = dst_ring_desc; + peer->rx_tid[tid].head_frag_desc = rx_desc; + + return QDF_STATUS_SUCCESS; +} + +/* + * dp_rx_defrag_store_fragment(): Store incoming fragments + * @soc: Pointer to the SOC data structure + * @ring_desc: Pointer to the ring descriptor + * @mpdu_desc_info: MPDU descriptor info + * @tid: Traffic Identifier + * @rx_desc: Pointer to rx descriptor + * @rx_bfs: Number of bfs consumed + * + * Returns: QDF_STATUS + */ +static QDF_STATUS dp_rx_defrag_store_fragment(struct dp_soc *soc, + void *ring_desc, + union dp_rx_desc_list_elem_t **head, + union dp_rx_desc_list_elem_t **tail, + struct hal_rx_mpdu_desc_info *mpdu_desc_info, + unsigned tid, struct dp_rx_desc *rx_desc, + uint32_t *rx_bfs) +{ + struct dp_rx_reorder_array_elem *rx_reorder_array_elem; + struct dp_pdev *pdev; + struct dp_peer *peer; + uint16_t peer_id; + uint8_t fragno, more_frag, all_frag_present = 0; + uint16_t rxseq = mpdu_desc_info->mpdu_seq; + QDF_STATUS status; + struct dp_rx_tid *rx_tid; + uint8_t mpdu_sequence_control_valid; + uint8_t mpdu_frame_control_valid; + qdf_nbuf_t frag = rx_desc->nbuf; + + /* Check if the packet is from a valid peer */ + peer_id = DP_PEER_METADATA_PEER_ID_GET( + mpdu_desc_info->peer_meta_data); + peer = dp_peer_find_by_id(soc, peer_id); + + if (!peer) { + /* We should not receive anything from unknown peer + * however, that might happen while we are in the monitor mode. + * We don't need to handle that here + */ + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "Unknown peer, dropping the fragment"); + + qdf_nbuf_free(frag); + dp_rx_add_to_free_desc_list(head, tail, rx_desc); + *rx_bfs = 1; + + return QDF_STATUS_E_DEFRAG_ERROR; + } + + pdev = peer->vdev->pdev; + rx_tid = &peer->rx_tid[tid]; + + rx_reorder_array_elem = peer->rx_tid[tid].array; + + mpdu_sequence_control_valid = + hal_rx_get_mpdu_sequence_control_valid(rx_desc->rx_buf_start); + + /* Invalid MPDU sequence control field, MPDU is of no use */ + if (!mpdu_sequence_control_valid) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "Invalid MPDU seq control field, dropping MPDU"); + qdf_nbuf_free(frag); + dp_rx_add_to_free_desc_list(head, tail, rx_desc); + *rx_bfs = 1; + + qdf_assert(0); + goto end; + } + + mpdu_frame_control_valid = + hal_rx_get_mpdu_frame_control_valid(rx_desc->rx_buf_start); + + /* Invalid frame control field */ + if (!mpdu_frame_control_valid) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "Invalid frame control field, dropping MPDU"); + qdf_nbuf_free(frag); + dp_rx_add_to_free_desc_list(head, tail, rx_desc); + *rx_bfs = 1; + + qdf_assert(0); + goto end; + } + + /* Current mpdu sequence */ + more_frag = dp_rx_frag_get_more_frag_bit(rx_desc->rx_buf_start); + + /* HW does not populate the fragment number as of now + * need to get from the 802.11 header + */ + fragno = dp_rx_frag_get_mpdu_frag_number(rx_desc->rx_buf_start); + + /* + * !more_frag: no more fragments to be delivered + * !frag_no: packet is not fragmented + * !rx_reorder_array_elem->head: no saved fragments so far + */ + if ((!more_frag) && (!fragno) && (!rx_reorder_array_elem->head)) { + /* We should not get into this situation here. + * It means an unfragmented packet with fragment flag + * is delivered over the REO exception ring. + * Typically it follows normal rx path. + */ + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "Rcvd unfragmented pkt on REO Err srng, dropping"); + qdf_nbuf_free(frag); + dp_rx_add_to_free_desc_list(head, tail, rx_desc); + *rx_bfs = 1; + + qdf_assert(0); + goto end; + } + + /* Check if the fragment is for the same sequence or a different one */ + if (rx_reorder_array_elem->head) { + if (rxseq != rx_tid->curr_seq_num) { + + /* Drop stored fragments if out of sequence + * fragment is received + */ + dp_rx_defrag_frames_free(rx_reorder_array_elem->head); + + rx_reorder_array_elem->head = NULL; + rx_reorder_array_elem->tail = NULL; + + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "%s mismatch, dropping earlier sequence ", + (rxseq == rx_tid->curr_seq_num) + ? "address" + : "seq number"); + + /* + * The sequence number for this fragment becomes the + * new sequence number to be processed + */ + rx_tid->curr_seq_num = rxseq; + + } + } else { + /* Start of a new sequence */ + dp_rx_defrag_cleanup(peer, tid); + rx_tid->curr_seq_num = rxseq; + } + + /* + * If the earlier sequence was dropped, this will be the fresh start. + * Else, continue with next fragment in a given sequence + */ + status = dp_rx_defrag_fraglist_insert(peer, tid, &rx_reorder_array_elem->head, + &rx_reorder_array_elem->tail, frag, + &all_frag_present); + + /* + * Currently, we can have only 6 MSDUs per-MPDU, if the current + * packet sequence has more than 6 MSDUs for some reason, we will + * have to use the next MSDU link descriptor and chain them together + * before reinjection + */ + if ((fragno == 0) && (status == QDF_STATUS_SUCCESS) && + (rx_reorder_array_elem->head == frag)) { + + status = dp_rx_defrag_save_info_from_ring_desc(ring_desc, + rx_desc, peer, tid); + + if (status != QDF_STATUS_SUCCESS) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "%s: Unable to store ring desc !\n", __func__); + goto end; + } + } else { + dp_rx_add_to_free_desc_list(head, tail, rx_desc); + *rx_bfs = 1; + + /* Return the non-head link desc */ + if (dp_rx_link_desc_return(soc, ring_desc, + HAL_BM_ACTION_PUT_IN_IDLE_LIST) != + QDF_STATUS_SUCCESS) + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "%s: Failed to return link desc\n", + __func__); + + } + + if (pdev->soc->rx.flags.defrag_timeout_check) + dp_rx_defrag_waitlist_remove(peer, tid); + + /* Yet to receive more fragments for this sequence number */ + if (!all_frag_present) { + uint32_t now_ms = + qdf_system_ticks_to_msecs(qdf_system_ticks()); + + peer->rx_tid[tid].defrag_timeout_ms = + now_ms + pdev->soc->rx.defrag.timeout_ms; + + dp_rx_defrag_waitlist_add(peer, tid); + + return QDF_STATUS_SUCCESS; + } + + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO, + "All fragments received for sequence: %d", rxseq); + + /* Process the fragments */ + status = dp_rx_defrag(peer, tid, rx_reorder_array_elem->head, + rx_reorder_array_elem->tail); + if (QDF_IS_STATUS_ERROR(status)) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "Fragment processing failed"); + + dp_rx_add_to_free_desc_list(head, tail, + peer->rx_tid[tid].head_frag_desc); + *rx_bfs = 1; + + if (dp_rx_link_desc_return(soc, + peer->rx_tid[tid].dst_ring_desc, + HAL_BM_ACTION_PUT_IN_IDLE_LIST) != + QDF_STATUS_SUCCESS) + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "%s: Failed to return link desc\n", + __func__); + dp_rx_defrag_cleanup(peer, tid); + goto end; + } + + /* Re-inject the fragments back to REO for further processing */ + status = dp_rx_defrag_reo_reinject(peer, tid, + rx_reorder_array_elem->head); + if (QDF_IS_STATUS_SUCCESS(status)) { + rx_reorder_array_elem->head = NULL; + rx_reorder_array_elem->tail = NULL; + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO, + "Fragmented sequence successfully reinjected"); + } + else + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "Fragmented sequence reinjection failed"); + + dp_rx_defrag_cleanup(peer, tid); + return QDF_STATUS_SUCCESS; + +end: + return QDF_STATUS_E_DEFRAG_ERROR; +} + +/** + * dp_rx_frag_handle() - Handles fragmented Rx frames + * + * @soc: core txrx main context + * @ring_desc: opaque pointer to the REO error ring descriptor + * @mpdu_desc_info: MPDU descriptor information from ring descriptor + * @head: head of the local descriptor free-list + * @tail: tail of the local descriptor free-list + * @quota: No. of units (packets) that can be serviced in one shot. + * + * This function implements RX 802.11 fragmentation handling + * The handling is mostly same as legacy fragmentation handling. + * If required, this function can re-inject the frames back to + * REO ring (with proper setting to by-pass fragmentation check + * but use duplicate detection / re-ordering and routing these frames + * to a different core. + * + * Return: uint32_t: No. of elements processed + */ +uint32_t dp_rx_frag_handle(struct dp_soc *soc, void *ring_desc, + struct hal_rx_mpdu_desc_info *mpdu_desc_info, + union dp_rx_desc_list_elem_t **head, + union dp_rx_desc_list_elem_t **tail, + uint32_t quota) +{ + uint32_t rx_bufs_used = 0; + void *link_desc_va; + struct hal_buf_info buf_info; + struct hal_rx_msdu_list msdu_list; /* per MPDU list of MSDUs */ + qdf_nbuf_t msdu = NULL; + uint32_t tid, msdu_len; + int idx, rx_bfs = 0; + QDF_STATUS status; + + qdf_assert(soc); + qdf_assert(mpdu_desc_info); + + /* Fragment from a valid peer */ + hal_rx_reo_buf_paddr_get(ring_desc, &buf_info); + + link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &buf_info); + + qdf_assert(link_desc_va); + + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH, + "Number of MSDUs to process, num_msdus: %d", + mpdu_desc_info->msdu_count); + + + if (qdf_unlikely(mpdu_desc_info->msdu_count == 0)) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "Not sufficient MSDUs to process"); + return rx_bufs_used; + } + + /* Get msdu_list for the given MPDU */ + hal_rx_msdu_list_get(link_desc_va, &msdu_list, + &mpdu_desc_info->msdu_count); + + /* Process all MSDUs in the current MPDU */ + for (idx = 0; (idx < mpdu_desc_info->msdu_count) && quota--; idx++) { + struct dp_rx_desc *rx_desc = + dp_rx_cookie_2_va_rxdma_buf(soc, + msdu_list.sw_cookie[idx]); + + qdf_assert(rx_desc); + + msdu = rx_desc->nbuf; + + qdf_nbuf_unmap_single(soc->osdev, msdu, + QDF_DMA_BIDIRECTIONAL); + + rx_desc->rx_buf_start = qdf_nbuf_data(msdu); + + msdu_len = hal_rx_msdu_start_msdu_len_get( + rx_desc->rx_buf_start); + + qdf_nbuf_set_pktlen(msdu, (msdu_len + RX_PKT_TLVS_LEN)); + + tid = hal_rx_mpdu_start_tid_get(rx_desc->rx_buf_start); + + /* Process fragment-by-fragment */ + status = dp_rx_defrag_store_fragment(soc, ring_desc, + head, tail, mpdu_desc_info, + tid, rx_desc, &rx_bfs); + + if (rx_bfs) + rx_bufs_used++; + + if (!QDF_IS_STATUS_SUCCESS(status)) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "Rx Defrag err seq#:0x%x msdu_count:%d flags:%d", + mpdu_desc_info->mpdu_seq, + mpdu_desc_info->msdu_count, + mpdu_desc_info->mpdu_flags); + + /* No point in processing rest of the fragments */ + break; + } + } + + return rx_bufs_used; +} diff --git a/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_rx_defrag.h b/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_rx_defrag.h new file mode 100644 index 0000000000000000000000000000000000000000..5ff3c0b7de1722716675a007aa5c084fd5057f05 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_rx_defrag.h @@ -0,0 +1,147 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _DP_RX_DEFRAG_H +#define _DP_RX_DEFRAG_H + +#include "hal_rx.h" + +#ifdef CONFIG_MCL +#include +#else +#include +#endif + +#define DEFRAG_IEEE80211_ADDR_LEN 6 +#define DEFRAG_IEEE80211_KEY_LEN 8 +#define DEFRAG_IEEE80211_FCS_LEN 4 + +#define DP_RX_DEFRAG_IEEE80211_ADDR_COPY(dst, src) \ + qdf_mem_copy(dst, src, IEEE80211_ADDR_LEN) + +#define DP_RX_DEFRAG_IEEE80211_QOS_HAS_SEQ(wh) \ + (((wh) & \ + (IEEE80211_FC0_TYPE_MASK | IEEE80211_FC0_SUBTYPE_QOS)) == \ + (IEEE80211_FC0_TYPE_DATA | IEEE80211_FC0_SUBTYPE_QOS)) + +#define UNI_DESC_OWNER_SW 0x1 +#define UNI_DESC_BUF_TYPE_RX_MSDU_LINK 0x6 +/** + * struct dp_rx_defrag_cipher: structure to indicate cipher header + * @ic_name: Name + * @ic_header: header length + * @ic_trailer: trail length + * @ic_miclen: MIC length + */ +struct dp_rx_defrag_cipher { + const char *ic_name; + uint16_t ic_header; + uint8_t ic_trailer; + uint8_t ic_miclen; +}; + +uint32_t dp_rx_frag_handle(struct dp_soc *soc, void *ring_desc, + struct hal_rx_mpdu_desc_info *mpdu_desc_info, + union dp_rx_desc_list_elem_t **head, + union dp_rx_desc_list_elem_t **tail, + uint32_t quota); + +/* + * dp_rx_frag_get_mac_hdr() - Return pointer to the mac hdr + * @rx_desc_info: Pointer to the pkt_tlvs in the + * nbuf (pkt_tlvs->mac_hdr->data) + * + * It is inefficient to peek into the packet for received + * frames but these APIs are required to get to some of + * 802.11 fields that hardware does not populate in the + * rx meta data. + * + * Returns: pointer to ieee80211_frame + */ +static inline +struct ieee80211_frame *dp_rx_frag_get_mac_hdr(uint8_t *rx_desc_info) +{ + int rx_desc_len = hal_rx_get_desc_len(); + return (struct ieee80211_frame *)(rx_desc_info + rx_desc_len); +} + +/* + * dp_rx_frag_get_mpdu_seq_number() - Get mpdu sequence number + * @rx_desc_info: Pointer to the pkt_tlvs in the + * nbuf (pkt_tlvs->mac_hdr->data) + * + * Returns: uint16_t, rx sequence number + */ +static inline +uint16_t dp_rx_frag_get_mpdu_seq_number(uint8_t *rx_desc_info) +{ + struct ieee80211_frame *mac_hdr; + mac_hdr = dp_rx_frag_get_mac_hdr(rx_desc_info); + + return qdf_le16_to_cpu(*(uint16_t *) mac_hdr->i_seq) >> + IEEE80211_SEQ_SEQ_SHIFT; +} + +/* + * dp_rx_frag_get_mpdu_frag_number() - Get mpdu fragment number + * @rx_desc_info: Pointer to the pkt_tlvs in the + * nbuf (pkt_tlvs->mac_hdr->data) + * + * Returns: uint8_t, receive fragment number + */ +static inline +uint8_t dp_rx_frag_get_mpdu_frag_number(uint8_t *rx_desc_info) +{ + struct ieee80211_frame *mac_hdr; + mac_hdr = dp_rx_frag_get_mac_hdr(rx_desc_info); + + return qdf_le16_to_cpu(*(uint16_t *) mac_hdr->i_seq) & + IEEE80211_SEQ_FRAG_MASK; +} + +/* + * dp_rx_frag_get_more_frag_bit() - Get more fragment bit + * @rx_desc_info: Pointer to the pkt_tlvs in the + * nbuf (pkt_tlvs->mac_hdr->data) + * + * Returns: uint8_t, get more fragment bit + */ +static inline +uint8_t dp_rx_frag_get_more_frag_bit(uint8_t *rx_desc_info) +{ + struct ieee80211_frame *mac_hdr; + mac_hdr = dp_rx_frag_get_mac_hdr(rx_desc_info); + + return (mac_hdr->i_fc[1] & IEEE80211_FC1_MORE_FRAG) >> 2; +} + +static inline +uint8_t dp_rx_get_pkt_dir(uint8_t *rx_desc_info) +{ + struct ieee80211_frame *mac_hdr; + mac_hdr = dp_rx_frag_get_mac_hdr(rx_desc_info); + + return mac_hdr->i_fc[1] & IEEE80211_FC1_DIR_MASK; +} + +void dp_rx_defrag_waitlist_flush(struct dp_soc *soc); +void dp_rx_reorder_flush_frag(struct dp_peer *peer, + unsigned int tid); +void dp_rx_defrag_waitlist_remove(struct dp_peer *peer, unsigned tid); +void dp_rx_defrag_cleanup(struct dp_peer *peer, unsigned tid); +#endif /* _DP_RX_DEFRAG_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_rx_desc.c b/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_rx_desc.c new file mode 100644 index 0000000000000000000000000000000000000000..dda5b71d520e1e39943db61223c0de370605a9ed --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_rx_desc.c @@ -0,0 +1,167 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "dp_types.h" +#include "dp_rx.h" + +/* + * dp_rx_desc_pool_alloc() - create a pool of software rx_descs + * at the time of dp rx initialization + * + * @soc: core txrx main context + * @pool_id: pool_id which is one of 3 mac_ids + * @pool_size: number of Rx descriptor in the pool + * @rx_desc_pool: rx descriptor pool pointer + * + * return success or failure + */ +QDF_STATUS dp_rx_desc_pool_alloc(struct dp_soc *soc, uint32_t pool_id, + uint32_t pool_size, struct rx_desc_pool *rx_desc_pool) +{ + uint32_t i; + + rx_desc_pool->array = + qdf_mem_malloc(pool_size*sizeof(union dp_rx_desc_list_elem_t)); + + if (!(rx_desc_pool->array)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, + "%s: RX Desc Pool[%d] allocation failed\n", + __func__, pool_id); + + return QDF_STATUS_E_NOMEM; + } + + /* Initialize the lock */ + qdf_spinlock_create(&rx_desc_pool->lock); + + qdf_spin_lock_bh(&rx_desc_pool->lock); + rx_desc_pool->pool_size = pool_size; + + /* link SW rx descs into a freelist */ + rx_desc_pool->freelist = &rx_desc_pool->array[0]; + for (i = 0; i < rx_desc_pool->pool_size-1; i++) { + rx_desc_pool->array[i].next = &rx_desc_pool->array[i+1]; + rx_desc_pool->array[i].rx_desc.cookie = i | (pool_id << 18); + rx_desc_pool->array[i].rx_desc.pool_id = pool_id; + rx_desc_pool->array[i].rx_desc.in_use = 0; + } + + rx_desc_pool->array[i].next = NULL; + rx_desc_pool->array[i].rx_desc.cookie = i | (pool_id << 18); + rx_desc_pool->array[i].rx_desc.pool_id = pool_id; + qdf_spin_unlock_bh(&rx_desc_pool->lock); + return QDF_STATUS_SUCCESS; +} + +/* + * dp_rx_desc_pool_free() - free the sw rx desc pool called during + * de-initialization of wifi module. + * + * @soc: core txrx main context + * @pool_id: pool_id which is one of 3 mac_ids + * @rx_desc_pool: rx descriptor pool pointer + */ +void dp_rx_desc_pool_free(struct dp_soc *soc, uint32_t pool_id, + struct rx_desc_pool *rx_desc_pool) +{ + int i; + + qdf_spin_lock_bh(&rx_desc_pool->lock); + for (i = 0; i < rx_desc_pool->pool_size; i++) { + if (rx_desc_pool->array[i].rx_desc.in_use) { + if (!(rx_desc_pool->array[i].rx_desc.unmapped)) + qdf_nbuf_unmap_single(soc->osdev, + rx_desc_pool->array[i].rx_desc.nbuf, + QDF_DMA_BIDIRECTIONAL); + qdf_nbuf_free(rx_desc_pool->array[i].rx_desc.nbuf); + } + } + qdf_mem_free(rx_desc_pool->array); + qdf_spin_unlock_bh(&rx_desc_pool->lock); + qdf_spinlock_destroy(&rx_desc_pool->lock); +} + +/* + * dp_rx_get_free_desc_list() - provide a list of descriptors from + * the free rx desc pool. + * + * @soc: core txrx main context + * @pool_id: pool_id which is one of 3 mac_ids + * @rx_desc_pool: rx descriptor pool pointer + * @num_descs: number of descs requested from freelist + * @desc_list: attach the descs to this list (output parameter) + * @tail: attach the point to last desc of free list (output parameter) + * + * Return: number of descs allocated from free list. + */ +uint16_t dp_rx_get_free_desc_list(struct dp_soc *soc, uint32_t pool_id, + struct rx_desc_pool *rx_desc_pool, + uint16_t num_descs, + union dp_rx_desc_list_elem_t **desc_list, + union dp_rx_desc_list_elem_t **tail) +{ + uint16_t count; + + qdf_spin_lock_bh(&rx_desc_pool->lock); + + *desc_list = *tail = rx_desc_pool->freelist; + + for (count = 0; count < num_descs; count++) { + + if (qdf_unlikely(!rx_desc_pool->freelist)) { + qdf_spin_unlock_bh(&rx_desc_pool->lock); + return count; + } + *tail = rx_desc_pool->freelist; + rx_desc_pool->freelist = rx_desc_pool->freelist->next; + } + (*tail)->next = NULL; + qdf_spin_unlock_bh(&rx_desc_pool->lock); + return count; +} + +/* + * dp_rx_add_desc_list_to_free_list() - append unused desc_list back to + * freelist. + * + * @soc: core txrx main context + * @local_desc_list: local desc list provided by the caller + * @tail: attach the point to last desc of local desc list + * @pool_id: pool_id which is one of 3 mac_ids + * @rx_desc_pool: rx descriptor pool pointer + */ +void dp_rx_add_desc_list_to_free_list(struct dp_soc *soc, + union dp_rx_desc_list_elem_t **local_desc_list, + union dp_rx_desc_list_elem_t **tail, + uint16_t pool_id, + struct rx_desc_pool *rx_desc_pool) +{ + union dp_rx_desc_list_elem_t *temp_list = NULL; + + qdf_spin_lock_bh(&rx_desc_pool->lock); + + + temp_list = rx_desc_pool->freelist; + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "temp_list: %pK, *local_desc_list: %pK, *tail: %pK (*tail)->next: %pK\n", + temp_list, *local_desc_list, *tail, (*tail)->next); + rx_desc_pool->freelist = *local_desc_list; + (*tail)->next = temp_list; + + qdf_spin_unlock_bh(&rx_desc_pool->lock); +} diff --git a/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_rx_err.c b/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_rx_err.c new file mode 100644 index 0000000000000000000000000000000000000000..c3a5edaf9068f46fc9857b089c0e91177020fb64 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_rx_err.c @@ -0,0 +1,1414 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "dp_types.h" +#include "dp_rx.h" +#include "dp_peer.h" +#include "dp_internal.h" +#include "hal_api.h" +#include "qdf_trace.h" +#include "qdf_nbuf.h" +#ifdef CONFIG_MCL +#include +#else +#include +#endif +#include "dp_rx_defrag.h" +#include /* LLC_SNAP_HDR_LEN */ + +#ifdef RX_DESC_DEBUG_CHECK +static inline bool dp_rx_desc_check_magic(struct dp_rx_desc *rx_desc) +{ + if (qdf_unlikely(rx_desc->magic != DP_RX_DESC_MAGIC)) { + return false; + } + rx_desc->magic = 0; + return true; +} +#else +static inline bool dp_rx_desc_check_magic(struct dp_rx_desc *rx_desc) +{ + return true; +} +#endif + +/** + * dp_rx_mcast_echo_check() - check if the mcast pkt is a loop + * back on same vap or a different vap. + * + * @soc: core DP main context + * @peer: dp peer handler + * @rx_tlv_hdr: start of the rx TLV header + * @nbuf: pkt buffer + * + * Return: bool (true if it is a looped back pkt else false) + * + */ +static inline bool dp_rx_mcast_echo_check(struct dp_soc *soc, + struct dp_peer *peer, + uint8_t *rx_tlv_hdr, + qdf_nbuf_t nbuf) +{ + struct dp_vdev *vdev = peer->vdev; + struct dp_ast_entry *ase; + uint16_t sa_idx = 0; + uint8_t *data; + + /* + * Multicast Echo Check is required only if vdev is STA and + * received pkt is a multicast/broadcast pkt. otherwise + * skip the MEC check. + */ + if (vdev->opmode != wlan_op_mode_sta) + return false; + + if (!hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr)) + return false; + + data = qdf_nbuf_data(nbuf); + /* + * if the received pkts src mac addr matches with vdev + * mac address then drop the pkt as it is looped back + */ + if (!(qdf_mem_cmp(&data[DP_MAC_ADDR_LEN], + vdev->mac_addr.raw, + DP_MAC_ADDR_LEN))) + return true; + + /* + * In case of qwrap isolation mode, donot drop loopback packets. + * In isolation mode, all packets from the wired stations need to go + * to rootap and loop back to reach the wireless stations and + * vice-versa. + */ + if (qdf_unlikely(vdev->isolation_vdev)) + return false; + + /* if the received pkts src mac addr matches with the + * wired PCs MAC addr which is behind the STA or with + * wireless STAs MAC addr which are behind the Repeater, + * then drop the pkt as it is looped back + */ + qdf_spin_lock_bh(&soc->ast_lock); + if (hal_rx_msdu_end_sa_is_valid_get(rx_tlv_hdr)) { + sa_idx = hal_rx_msdu_end_sa_idx_get(rx_tlv_hdr); + + if ((sa_idx < 0) || + (sa_idx >= (WLAN_UMAC_PSOC_MAX_PEERS * 2))) { + qdf_spin_unlock_bh(&soc->ast_lock); + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "invalid sa_idx: %d", sa_idx); + qdf_assert_always(0); + } + + ase = soc->ast_table[sa_idx]; + if (!ase) { + /* We do not get a peer map event for STA and without + * this event we don't know what is STA's sa_idx. + * For this reason the AST is still not associated to + * any index postion in ast_table. + * In these kind of scenarios where sa is valid but + * ast is not in ast_table, we use the below API to get + * AST entry for STA's own mac_address. + */ + ase = dp_peer_ast_hash_find(soc, + &data[DP_MAC_ADDR_LEN]); + + } + } else + ase = dp_peer_ast_hash_find(soc, &data[DP_MAC_ADDR_LEN]); + + if (ase) { + ase->ast_idx = sa_idx; + soc->ast_table[sa_idx] = ase; + + if (ase->pdev_id != vdev->pdev->pdev_id) { + qdf_spin_unlock_bh(&soc->ast_lock); + QDF_TRACE(QDF_MODULE_ID_DP, + QDF_TRACE_LEVEL_INFO, + "Detected DBDC Root AP %pM, %d %d", + &data[DP_MAC_ADDR_LEN], vdev->pdev->pdev_id, + ase->pdev_id); + return false; + } + + if ((ase->type == CDP_TXRX_AST_TYPE_MEC) || + (ase->peer != peer)) { + qdf_spin_unlock_bh(&soc->ast_lock); + QDF_TRACE(QDF_MODULE_ID_DP, + QDF_TRACE_LEVEL_INFO, + "received pkt with same src mac %pM", + &data[DP_MAC_ADDR_LEN]); + + return true; + } + } + qdf_spin_unlock_bh(&soc->ast_lock); + return false; +} + +/** + * dp_rx_link_desc_return_by_addr - Return a MPDU link descriptor to + * (WBM) by address + * + * @soc: core DP main context + * @link_desc_addr: link descriptor addr + * + * Return: QDF_STATUS + */ +QDF_STATUS +dp_rx_link_desc_return_by_addr(struct dp_soc *soc, void *link_desc_addr, + uint8_t bm_action) +{ + struct dp_srng *wbm_desc_rel_ring = &soc->wbm_desc_rel_ring; + void *wbm_rel_srng = wbm_desc_rel_ring->hal_srng; + void *hal_soc = soc->hal_soc; + QDF_STATUS status = QDF_STATUS_E_FAILURE; + void *src_srng_desc; + + if (!wbm_rel_srng) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "WBM RELEASE RING not initialized"); + return status; + } + + if (qdf_unlikely(hal_srng_access_start(hal_soc, wbm_rel_srng))) { + + /* TODO */ + /* + * Need API to convert from hal_ring pointer to + * Ring Type / Ring Id combo + */ + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("HAL RING Access For WBM Release SRNG Failed - %pK"), + wbm_rel_srng); + DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1); + goto done; + } + src_srng_desc = hal_srng_src_get_next(hal_soc, wbm_rel_srng); + if (qdf_likely(src_srng_desc)) { + /* Return link descriptor through WBM ring (SW2WBM)*/ + hal_rx_msdu_link_desc_set(hal_soc, + src_srng_desc, link_desc_addr, bm_action); + status = QDF_STATUS_SUCCESS; + } else { + struct hal_srng *srng = (struct hal_srng *)wbm_rel_srng; + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("WBM Release Ring (Id %d) Full"), srng->ring_id); + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "HP 0x%x Reap HP 0x%x TP 0x%x Cached TP 0x%x", + *srng->u.src_ring.hp_addr, srng->u.src_ring.reap_hp, + *srng->u.src_ring.tp_addr, srng->u.src_ring.cached_tp); + } +done: + hal_srng_access_end(hal_soc, wbm_rel_srng); + return status; + +} + +/** + * dp_rx_link_desc_return() - Return a MPDU link descriptor to HW + * (WBM), following error handling + * + * @soc: core DP main context + * @ring_desc: opaque pointer to the REO error ring descriptor + * + * Return: QDF_STATUS + */ +QDF_STATUS +dp_rx_link_desc_return(struct dp_soc *soc, void *ring_desc, uint8_t bm_action) +{ + void *buf_addr_info = HAL_RX_REO_BUF_ADDR_INFO_GET(ring_desc); + return dp_rx_link_desc_return_by_addr(soc, buf_addr_info, bm_action); +} + +/** + * dp_rx_msdus_drop() - Drops all MSDU's per MPDU + * + * @soc: core txrx main context + * @ring_desc: opaque pointer to the REO error ring descriptor + * @mpdu_desc_info: MPDU descriptor information from ring descriptor + * @head: head of the local descriptor free-list + * @tail: tail of the local descriptor free-list + * @quota: No. of units (packets) that can be serviced in one shot. + * + * This function is used to drop all MSDU in an MPDU + * + * Return: uint32_t: No. of elements processed + */ +static uint32_t dp_rx_msdus_drop(struct dp_soc *soc, void *ring_desc, + struct hal_rx_mpdu_desc_info *mpdu_desc_info, + union dp_rx_desc_list_elem_t **head, + union dp_rx_desc_list_elem_t **tail, + uint32_t quota) +{ + uint32_t rx_bufs_used = 0; + void *link_desc_va; + struct hal_buf_info buf_info; + struct hal_rx_msdu_list msdu_list; /* MSDU's per MPDU */ + int i; + uint8_t *rx_tlv_hdr; + uint32_t tid; + + hal_rx_reo_buf_paddr_get(ring_desc, &buf_info); + + link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &buf_info); + + /* No UNMAP required -- this is "malloc_consistent" memory */ + hal_rx_msdu_list_get(link_desc_va, &msdu_list, + &mpdu_desc_info->msdu_count); + + for (i = 0; (i < mpdu_desc_info->msdu_count) && quota--; i++) { + struct dp_rx_desc *rx_desc = + dp_rx_cookie_2_va_rxdma_buf(soc, + msdu_list.sw_cookie[i]); + + qdf_assert(rx_desc); + + if (!dp_rx_desc_check_magic(rx_desc)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("Invalid rx_desc cookie=%d"), + msdu_list.sw_cookie[i]); + return rx_bufs_used; + } + + rx_bufs_used++; + tid = hal_rx_mpdu_start_tid_get(rx_desc->rx_buf_start); + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "Packet received with PN error for tid :%d", tid); + + rx_tlv_hdr = qdf_nbuf_data(rx_desc->nbuf); + if (hal_rx_encryption_info_valid(rx_tlv_hdr)) + hal_rx_print_pn(rx_tlv_hdr); + + /* Just free the buffers */ + qdf_nbuf_free(rx_desc->nbuf); + + dp_rx_add_to_free_desc_list(head, tail, rx_desc); + } + + /* Return link descriptor through WBM ring (SW2WBM)*/ + dp_rx_link_desc_return(soc, ring_desc, HAL_BM_ACTION_PUT_IN_IDLE_LIST); + + return rx_bufs_used; +} + +/** + * dp_rx_pn_error_handle() - Handles PN check errors + * + * @soc: core txrx main context + * @ring_desc: opaque pointer to the REO error ring descriptor + * @mpdu_desc_info: MPDU descriptor information from ring descriptor + * @head: head of the local descriptor free-list + * @tail: tail of the local descriptor free-list + * @quota: No. of units (packets) that can be serviced in one shot. + * + * This function implements PN error handling + * If the peer is configured to ignore the PN check errors + * or if DP feels, that this frame is still OK, the frame can be + * re-injected back to REO to use some of the other features + * of REO e.g. duplicate detection/routing to other cores + * + * Return: uint32_t: No. of elements processed + */ +static uint32_t +dp_rx_pn_error_handle(struct dp_soc *soc, void *ring_desc, + struct hal_rx_mpdu_desc_info *mpdu_desc_info, + union dp_rx_desc_list_elem_t **head, + union dp_rx_desc_list_elem_t **tail, + uint32_t quota) +{ + uint16_t peer_id; + uint32_t rx_bufs_used = 0; + struct dp_peer *peer; + bool peer_pn_policy = false; + + peer_id = DP_PEER_METADATA_PEER_ID_GET( + mpdu_desc_info->peer_meta_data); + + + peer = dp_peer_find_by_id(soc, peer_id); + + if (qdf_likely(peer)) { + /* + * TODO: Check for peer specific policies & set peer_pn_policy + */ + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "discard rx due to PN error for peer %pK " + "(%02x:%02x:%02x:%02x:%02x:%02x)\n", + peer, + peer->mac_addr.raw[0], peer->mac_addr.raw[1], + peer->mac_addr.raw[2], peer->mac_addr.raw[3], + peer->mac_addr.raw[4], peer->mac_addr.raw[5]); + + } + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "Packet received with PN error"); + + /* No peer PN policy -- definitely drop */ + if (!peer_pn_policy) + rx_bufs_used = dp_rx_msdus_drop(soc, ring_desc, + mpdu_desc_info, + head, tail, quota); + + return rx_bufs_used; +} + +/** + * dp_rx_2k_jump_handle() - Handles Sequence Number Jump by 2K + * + * @soc: core txrx main context + * @ring_desc: opaque pointer to the REO error ring descriptor + * @mpdu_desc_info: MPDU descriptor information from ring descriptor + * @head: head of the local descriptor free-list + * @tail: tail of the local descriptor free-list + * @quota: No. of units (packets) that can be serviced in one shot. + * + * This function implements the error handling when sequence number + * of the MPDU jumps suddenly by 2K.Today there are 2 cases that + * need to be handled: + * A) CSN (Current Sequence Number) = Last Valid SN (LSN) + 2K + * B) CSN = LSN + 2K, but falls within a "BA sized window" of the SSN + * For case A) the protocol stack is invoked to generate DELBA/DEAUTH frame + * For case B), the frame is normally dropped, no more action is taken + * + * Return: uint32_t: No. of elements processed + */ +static uint32_t +dp_rx_2k_jump_handle(struct dp_soc *soc, void *ring_desc, + struct hal_rx_mpdu_desc_info *mpdu_desc_info, + union dp_rx_desc_list_elem_t **head, + union dp_rx_desc_list_elem_t **tail, + uint32_t quota) +{ + return dp_rx_msdus_drop(soc, ring_desc, mpdu_desc_info, + head, tail, quota); +} + +/** + * dp_rx_chain_msdus() - Function to chain all msdus of a mpdu + * to pdev invalid peer list + * + * @soc: core DP main context + * @nbuf: Buffer pointer + * @rx_tlv_hdr: start of rx tlv header + * @mac_id: mac id + * + * Return: bool: true for last msdu of mpdu + */ +static bool +dp_rx_chain_msdus(struct dp_soc *soc, qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr, + uint8_t mac_id) +{ + bool mpdu_done = false; + qdf_nbuf_t curr_nbuf = NULL; + qdf_nbuf_t tmp_nbuf = NULL; + + /* TODO: Currently only single radio is supported, hence + * pdev hard coded to '0' index + */ + struct dp_pdev *dp_pdev = soc->pdev_list[mac_id]; + + if (!dp_pdev->first_nbuf) { + qdf_nbuf_set_rx_chfrag_start(nbuf, 1); + dp_pdev->ppdu_id = HAL_RX_HW_DESC_GET_PPDUID_GET(rx_tlv_hdr); + dp_pdev->first_nbuf = true; + + /* If the new nbuf received is the first msdu of the + * amsdu and there are msdus in the invalid peer msdu + * list, then let us free all the msdus of the invalid + * peer msdu list. + * This scenario can happen when we start receiving + * new a-msdu even before the previous a-msdu is completely + * received. + */ + curr_nbuf = dp_pdev->invalid_peer_head_msdu; + while (curr_nbuf) { + tmp_nbuf = curr_nbuf->next; + qdf_nbuf_free(curr_nbuf); + curr_nbuf = tmp_nbuf; + } + + dp_pdev->invalid_peer_head_msdu = NULL; + dp_pdev->invalid_peer_tail_msdu = NULL; + + hal_rx_mon_hw_desc_get_mpdu_status(rx_tlv_hdr, + &(dp_pdev->ppdu_info.rx_status)); + + } + + if (dp_pdev->ppdu_id == hal_rx_attn_phy_ppdu_id_get(rx_tlv_hdr) && + hal_rx_attn_msdu_done_get(rx_tlv_hdr)) { + qdf_nbuf_set_rx_chfrag_end(nbuf, 1); + qdf_assert_always(dp_pdev->first_nbuf == true); + dp_pdev->first_nbuf = false; + mpdu_done = true; + } + + DP_RX_LIST_APPEND(dp_pdev->invalid_peer_head_msdu, + dp_pdev->invalid_peer_tail_msdu, + nbuf); + + return mpdu_done; +} + +/** + * dp_rx_null_q_desc_handle() - Function to handle NULL Queue + * descriptor violation on either a + * REO or WBM ring + * + * @soc: core DP main context + * @nbuf: buffer pointer + * @rx_tlv_hdr: start of rx tlv header + * @pool_id: mac id + * + * This function handles NULL queue descriptor violations arising out + * a missing REO queue for a given peer or a given TID. This typically + * may happen if a packet is received on a QOS enabled TID before the + * ADDBA negotiation for that TID, when the TID queue is setup. Or + * it may also happen for MC/BC frames if they are not routed to the + * non-QOS TID queue, in the absence of any other default TID queue. + * This error can show up both in a REO destination or WBM release ring. + * + */ +static void +dp_rx_null_q_desc_handle(struct dp_soc *soc, + qdf_nbuf_t nbuf, + uint8_t *rx_tlv_hdr, + uint8_t pool_id) +{ + uint32_t pkt_len, l2_hdr_offset; + uint16_t msdu_len; + struct dp_vdev *vdev; + uint16_t peer_id = 0xFFFF; + struct dp_peer *peer = NULL; + uint8_t tid; + + qdf_nbuf_set_rx_chfrag_start(nbuf, + hal_rx_msdu_end_first_msdu_get(rx_tlv_hdr)); + qdf_nbuf_set_rx_chfrag_end(nbuf, + hal_rx_msdu_end_last_msdu_get(rx_tlv_hdr)); + + l2_hdr_offset = hal_rx_msdu_end_l3_hdr_padding_get(rx_tlv_hdr); + msdu_len = hal_rx_msdu_start_msdu_len_get(rx_tlv_hdr); + pkt_len = msdu_len + l2_hdr_offset + RX_PKT_TLVS_LEN; + + /* Set length in nbuf */ + qdf_nbuf_set_pktlen(nbuf, pkt_len); + + /* + * Check if DMA completed -- msdu_done is the last bit + * to be written + */ + if (!hal_rx_attn_msdu_done_get(rx_tlv_hdr)) { + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("MSDU DONE failure")); + + hal_rx_dump_pkt_tlvs(rx_tlv_hdr, QDF_TRACE_LEVEL_INFO); + qdf_assert(0); + } + + peer_id = hal_rx_mpdu_start_sw_peer_id_get(rx_tlv_hdr); + peer = dp_peer_find_by_id(soc, peer_id); + + if (!peer) { + bool mpdu_done = false; + struct dp_pdev *pdev = soc->pdev_list[pool_id]; + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("peer is NULL")); + + mpdu_done = dp_rx_chain_msdus(soc, nbuf, rx_tlv_hdr, pool_id); + /* Trigger invalid peer handler wrapper */ + dp_rx_process_invalid_peer_wrapper(soc, nbuf, mpdu_done); + + if (mpdu_done) { + pdev->invalid_peer_head_msdu = NULL; + pdev->invalid_peer_tail_msdu = NULL; + } + return; + } + + vdev = peer->vdev; + if (!vdev) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("INVALID vdev %pK OR osif_rx"), vdev); + /* Drop & free packet */ + qdf_nbuf_free(nbuf); + DP_STATS_INC(soc, rx.err.invalid_vdev, 1); + return; + } + + /* + * Advance the packet start pointer by total size of + * pre-header TLV's + */ + qdf_nbuf_pull_head(nbuf, (l2_hdr_offset + RX_PKT_TLVS_LEN)); + + if (dp_rx_mcast_echo_check(soc, peer, rx_tlv_hdr, nbuf)) { + /* this is a looped back MCBC pkt, drop it */ + qdf_nbuf_free(nbuf); + return; + } + /* + * In qwrap mode if the received packet matches with any of the vdev + * mac addresses, drop it. Donot receive multicast packets originated + * from any proxysta. + */ + if (check_qwrap_multicast_loopback(vdev, nbuf)) { + qdf_nbuf_free(nbuf); + return; + } + + + if (qdf_unlikely((peer->nawds_enabled == true) && + hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr))) { + QDF_TRACE(QDF_MODULE_ID_DP, + QDF_TRACE_LEVEL_DEBUG, + "%s free buffer for multicast packet", + __func__); + DP_STATS_INC(peer, rx.nawds_mcast_drop, 1); + qdf_nbuf_free(nbuf); + return; + } + + if (!dp_wds_rx_policy_check(rx_tlv_hdr, vdev, peer, + hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr))) { + QDF_TRACE(QDF_MODULE_ID_DP, + QDF_TRACE_LEVEL_ERROR, + FL("mcast Policy Check Drop pkt")); + /* Drop & free packet */ + qdf_nbuf_free(nbuf); + return; + } + + /* WDS Source Port Learning */ + if (qdf_likely(vdev->rx_decap_type == htt_cmn_pkt_type_ethernet && + vdev->wds_enabled)) + dp_rx_wds_srcport_learn(soc, rx_tlv_hdr, peer, nbuf); + + if (hal_rx_mpdu_start_mpdu_qos_control_valid_get(rx_tlv_hdr)) { + /* TODO: Assuming that qos_control_valid also indicates + * unicast. Should we check this? + */ + tid = hal_rx_mpdu_start_tid_get(rx_tlv_hdr); + if (peer && + peer->rx_tid[tid].hw_qdesc_vaddr_unaligned == NULL) { + /* IEEE80211_SEQ_MAX indicates invalid start_seq */ + dp_rx_tid_setup_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX); + } + } + +#ifdef QCA_WIFI_NAPIER_EMULATION /* Debug code, remove later */ + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "%s: p_id %d msdu_len %d hdr_off %d", + __func__, peer_id, msdu_len, l2_hdr_offset); + + print_hex_dump(KERN_ERR, "\t Pkt Data:", DUMP_PREFIX_NONE, 32, 4, + qdf_nbuf_data(nbuf), 128, false); +#endif /* NAPIER_EMULATION */ + + if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw)) { + qdf_nbuf_set_next(nbuf, NULL); + dp_rx_deliver_raw(vdev, nbuf, peer); + } else { + if (qdf_unlikely(peer->bss_peer)) { + QDF_TRACE(QDF_MODULE_ID_DP, + QDF_TRACE_LEVEL_INFO, + FL("received pkt with same src MAC")); + /* Drop & free packet */ + qdf_nbuf_free(nbuf); + return; + } + + if (vdev->osif_rx) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, + FL("vdev %pK osif_rx %pK"), vdev, + vdev->osif_rx); + qdf_nbuf_set_next(nbuf, NULL); + vdev->osif_rx(vdev->osif_vdev, nbuf); + DP_STATS_INCC_PKT(vdev->pdev, rx.multicast, 1, + qdf_nbuf_len(nbuf), + hal_rx_msdu_end_da_is_mcbc_get( + rx_tlv_hdr)); + DP_STATS_INC_PKT(vdev->pdev, rx.to_stack, 1, + qdf_nbuf_len(nbuf)); + } else { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("INVALID vdev %pK OR osif_rx"), vdev); + DP_STATS_INC(soc, rx.err.invalid_vdev, 1); + } + } + return; +} + +/** +* dp_rx_err_deliver() - Function to deliver error frames to OS +* +* @soc: core DP main context +* @rx_desc : pointer to the sw rx descriptor +* @head: pointer to head of rx descriptors to be added to free list +* @tail: pointer to tail of rx descriptors to be added to free list +* quota: upper limit of descriptors that can be reaped +* +* Return: uint32_t: No. of Rx buffers reaped +*/ +static void +dp_rx_err_deliver(struct dp_soc *soc, qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr) +{ + uint32_t pkt_len, l2_hdr_offset; + uint16_t msdu_len; + struct dp_vdev *vdev; + uint16_t peer_id = 0xFFFF; + struct dp_peer *peer = NULL; + struct ether_header *eh; + bool isBroadcast; + + /* + * Check if DMA completed -- msdu_done is the last bit + * to be written + */ + if (!hal_rx_attn_msdu_done_get(rx_tlv_hdr)) { + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("MSDU DONE failure")); + + hal_rx_dump_pkt_tlvs(rx_tlv_hdr, QDF_TRACE_LEVEL_INFO); + qdf_assert(0); + } + + peer_id = hal_rx_mpdu_start_sw_peer_id_get(rx_tlv_hdr); + peer = dp_peer_find_by_id(soc, peer_id); + + l2_hdr_offset = hal_rx_msdu_end_l3_hdr_padding_get(rx_tlv_hdr); + msdu_len = hal_rx_msdu_start_msdu_len_get(rx_tlv_hdr); + pkt_len = msdu_len + l2_hdr_offset + RX_PKT_TLVS_LEN; + + /* Set length in nbuf */ + qdf_nbuf_set_pktlen(nbuf, pkt_len); + + qdf_nbuf_set_next(nbuf, NULL); + + qdf_nbuf_set_rx_chfrag_start(nbuf, 1); + qdf_nbuf_set_rx_chfrag_end(nbuf, 1); + + if (!peer) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("peer is NULL")); + DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1, + qdf_nbuf_len(nbuf)); + /* Trigger invalid peer handler wrapper */ + dp_rx_process_invalid_peer_wrapper(soc, nbuf, true); + return; + } + + vdev = peer->vdev; + if (!vdev) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("INVALID vdev %pK OR osif_rx"), vdev); + /* Drop & free packet */ + qdf_nbuf_free(nbuf); + DP_STATS_INC(soc, rx.err.invalid_vdev, 1); + return; + } + + /* Drop & free packet if mesh mode not enabled */ + if (!vdev->mesh_vdev) { + qdf_nbuf_free(nbuf); + DP_STATS_INC(soc, rx.err.invalid_vdev, 1); + return; + } + + /* + * Advance the packet start pointer by total size of + * pre-header TLV's + */ + qdf_nbuf_pull_head(nbuf, (l2_hdr_offset + RX_PKT_TLVS_LEN)); + + if (dp_rx_filter_mesh_packets(vdev, nbuf, rx_tlv_hdr) + == QDF_STATUS_SUCCESS) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_MED, + FL("mesh pkt filtered")); + DP_STATS_INC(vdev->pdev, dropped.mesh_filter, 1); + + qdf_nbuf_free(nbuf); + return; + + } + dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr, peer); + + if (qdf_unlikely(hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr) && + (vdev->rx_decap_type == + htt_cmn_pkt_type_ethernet))) { + eh = (struct ether_header *)qdf_nbuf_data(nbuf); + isBroadcast = (IEEE80211_IS_BROADCAST + (eh->ether_dhost)) ? 1 : 0 ; + if (isBroadcast) { + DP_STATS_INC_PKT(peer, rx.bcast, 1, + qdf_nbuf_len(nbuf)); + } + } + + if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw)) { + dp_rx_deliver_raw(vdev, nbuf, peer); + } else { + DP_STATS_INC(vdev->pdev, rx.to_stack.num, 1); + vdev->osif_rx(vdev->osif_vdev, nbuf); + } + + return; +} + +/** + * dp_rx_process_mic_error(): Function to pass mic error indication to umac + * @soc: DP SOC handle + * @rx_desc : pointer to the sw rx descriptor + * @head: pointer to head of rx descriptors to be added to free list + * @tail: pointer to tail of rx descriptors to be added to free list + * + * return: void + */ +void +dp_rx_process_mic_error(struct dp_soc *soc, + qdf_nbuf_t nbuf, + uint8_t *rx_tlv_hdr) +{ + struct dp_vdev *vdev = NULL; + struct dp_pdev *pdev = NULL; + struct ol_if_ops *tops = NULL; + struct ieee80211_frame *wh; + uint8_t *rx_pkt_hdr; + struct dp_peer *peer; + uint16_t peer_id; + + if (!hal_rx_msdu_end_first_msdu_get(rx_tlv_hdr)) + return; + + rx_pkt_hdr = hal_rx_pkt_hdr_get(qdf_nbuf_data(nbuf)); + wh = (struct ieee80211_frame *)rx_pkt_hdr; + + peer_id = hal_rx_mpdu_start_sw_peer_id_get(rx_tlv_hdr); + peer = dp_peer_find_by_id(soc, peer_id); + if (!peer) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "peer not found"); + goto fail; + } + + vdev = peer->vdev; + if (!vdev) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "VDEV not found"); + goto fail; + } + + pdev = vdev->pdev; + if (!pdev) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "PDEV not found"); + goto fail; + } + + tops = pdev->soc->cdp_soc.ol_ops; + if (tops->rx_mic_error) + tops->rx_mic_error(pdev->osif_pdev, vdev->vdev_id, wh); + +fail: + qdf_nbuf_free(nbuf); + return; +} + +/** + * dp_rx_err_process() - Processes error frames routed to REO error ring + * + * @soc: core txrx main context + * @hal_ring: opaque pointer to the HAL Rx Error Ring, which will be serviced + * @quota: No. of units (packets) that can be serviced in one shot. + * + * This function implements error processing and top level demultiplexer + * for all the frames routed to REO error ring. + * + * Return: uint32_t: No. of elements processed + */ +uint32_t +dp_rx_err_process(struct dp_soc *soc, void *hal_ring, uint32_t quota) +{ + void *hal_soc; + void *ring_desc; + union dp_rx_desc_list_elem_t *head = NULL; + union dp_rx_desc_list_elem_t *tail = NULL; + uint32_t rx_bufs_used = 0; + uint8_t buf_type; + uint8_t error, rbm; + struct hal_rx_mpdu_desc_info mpdu_desc_info; + struct hal_buf_info hbi; + struct dp_pdev *dp_pdev; + struct dp_srng *dp_rxdma_srng; + struct rx_desc_pool *rx_desc_pool; + uint32_t cookie = 0; + void *link_desc_va; + struct hal_rx_msdu_list msdu_list; /* MSDU's per MPDU */ + uint16_t num_msdus; + + /* Debug -- Remove later */ + qdf_assert(soc && hal_ring); + + hal_soc = soc->hal_soc; + + /* Debug -- Remove later */ + qdf_assert(hal_soc); + + if (qdf_unlikely(hal_srng_access_start(hal_soc, hal_ring))) { + + /* TODO */ + /* + * Need API to convert from hal_ring pointer to + * Ring Type / Ring Id combo + */ + DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1); + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("HAL RING Access Failed -- %pK"), hal_ring); + goto done; + } + + while (qdf_likely(quota-- && (ring_desc = + hal_srng_dst_get_next(hal_soc, hal_ring)))) { + + DP_STATS_INC(soc, rx.err_ring_pkts, 1); + + error = HAL_RX_ERROR_STATUS_GET(ring_desc); + + qdf_assert(error == HAL_REO_ERROR_DETECTED); + + buf_type = HAL_RX_REO_BUF_TYPE_GET(ring_desc); + /* + * For REO error ring, expect only MSDU LINK DESC + */ + qdf_assert_always(buf_type == HAL_RX_REO_MSDU_LINK_DESC_TYPE); + + cookie = HAL_RX_REO_BUF_COOKIE_GET(ring_desc); + /* + * check for the magic number in the sw cookie + */ + qdf_assert_always((cookie >> LINK_DESC_ID_SHIFT) & + LINK_DESC_ID_START); + + /* + * Check if the buffer is to be processed on this processor + */ + rbm = hal_rx_ret_buf_manager_get(ring_desc); + + hal_rx_reo_buf_paddr_get(ring_desc, &hbi); + link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &hbi); + hal_rx_msdu_list_get(link_desc_va, &msdu_list, &num_msdus); + + if (qdf_unlikely((msdu_list.rbm[0] != DP_WBM2SW_RBM) && + (msdu_list.rbm[0] != + HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST))) { + /* TODO */ + /* Call appropriate handler */ + DP_STATS_INC(soc, rx.err.invalid_rbm, 1); + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("Invalid RBM %d"), msdu_list.rbm[0]); + + /* Return link descriptor through WBM ring (SW2WBM)*/ + dp_rx_link_desc_return(soc, ring_desc, + HAL_BM_ACTION_RELEASE_MSDU_LIST); + continue; + } + + /* Get the MPDU DESC info */ + hal_rx_mpdu_desc_info_get(ring_desc, &mpdu_desc_info); + + if (mpdu_desc_info.mpdu_flags & HAL_MPDU_F_FRAGMENT) { + /* TODO */ + rx_bufs_used += dp_rx_frag_handle(soc, + ring_desc, &mpdu_desc_info, + &head, &tail, quota); + DP_STATS_INC(soc, rx.rx_frags, 1); + continue; + } + + if (hal_rx_reo_is_pn_error(ring_desc)) { + /* TOD0 */ + DP_STATS_INC(soc, + rx.err. + reo_error[HAL_REO_ERR_PN_CHECK_FAILED], + 1); + rx_bufs_used += dp_rx_pn_error_handle(soc, + ring_desc, &mpdu_desc_info, + &head, &tail, quota); + continue; + } + + if (hal_rx_reo_is_2k_jump(ring_desc)) { + /* TOD0 */ + DP_STATS_INC(soc, + rx.err. + reo_error[HAL_REO_ERR_REGULAR_FRAME_2K_JUMP], + 1); + rx_bufs_used += dp_rx_2k_jump_handle(soc, + ring_desc, &mpdu_desc_info, + &head, &tail, quota); + continue; + } + } + +done: + hal_srng_access_end(hal_soc, hal_ring); + + if (soc->rx.flags.defrag_timeout_check) + dp_rx_defrag_waitlist_flush(soc); + + /* Assume MAC id = 0, owner = 0 */ + if (rx_bufs_used) { + dp_pdev = soc->pdev_list[0]; + dp_rxdma_srng = &dp_pdev->rx_refill_buf_ring; + rx_desc_pool = &soc->rx_desc_buf[0]; + + dp_rx_buffers_replenish(soc, 0, dp_rxdma_srng, rx_desc_pool, + rx_bufs_used, &head, &tail); + } + + return rx_bufs_used; /* Assume no scale factor for now */ +} + +/** + * dp_rx_wbm_err_process() - Processes error frames routed to WBM release ring + * + * @soc: core txrx main context + * @hal_ring: opaque pointer to the HAL Rx Error Ring, which will be serviced + * @quota: No. of units (packets) that can be serviced in one shot. + * + * This function implements error processing and top level demultiplexer + * for all the frames routed to WBM2HOST sw release ring. + * + * Return: uint32_t: No. of elements processed + */ +uint32_t +dp_rx_wbm_err_process(struct dp_soc *soc, void *hal_ring, uint32_t quota) +{ + void *hal_soc; + void *ring_desc; + struct dp_rx_desc *rx_desc; + union dp_rx_desc_list_elem_t *head[MAX_PDEV_CNT] = { NULL }; + union dp_rx_desc_list_elem_t *tail[MAX_PDEV_CNT] = { NULL }; + uint32_t rx_bufs_used = 0; + uint32_t rx_bufs_reaped[MAX_PDEV_CNT] = { 0 }; + uint8_t buf_type, rbm; + uint32_t rx_buf_cookie; + uint8_t mac_id; + struct dp_pdev *dp_pdev; + struct dp_srng *dp_rxdma_srng; + struct rx_desc_pool *rx_desc_pool; + uint8_t *rx_tlv_hdr; + qdf_nbuf_t nbuf_head = NULL; + qdf_nbuf_t nbuf_tail = NULL; + qdf_nbuf_t nbuf, next; + struct hal_wbm_err_desc_info wbm_err_info = { 0 }; + uint8_t pool_id; + + /* Debug -- Remove later */ + qdf_assert(soc && hal_ring); + + hal_soc = soc->hal_soc; + + /* Debug -- Remove later */ + qdf_assert(hal_soc); + + if (qdf_unlikely(hal_srng_access_start(hal_soc, hal_ring))) { + + /* TODO */ + /* + * Need API to convert from hal_ring pointer to + * Ring Type / Ring Id combo + */ + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("HAL RING Access Failed -- %pK"), hal_ring); + goto done; + } + + while (qdf_likely(quota-- && (ring_desc = + hal_srng_dst_get_next(hal_soc, hal_ring)))) { + + /* XXX */ + buf_type = HAL_RX_WBM_BUF_TYPE_GET(ring_desc); + + /* + * For WBM ring, expect only MSDU buffers + */ + qdf_assert_always(buf_type == HAL_RX_WBM_BUF_TYPE_REL_BUF); + + qdf_assert((HAL_RX_WBM_ERR_SRC_GET(ring_desc) + == HAL_RX_WBM_ERR_SRC_RXDMA) || + (HAL_RX_WBM_ERR_SRC_GET(ring_desc) + == HAL_RX_WBM_ERR_SRC_REO)); + + /* + * Check if the buffer is to be processed on this processor + */ + rbm = hal_rx_ret_buf_manager_get(ring_desc); + + if (qdf_unlikely(rbm != HAL_RX_BUF_RBM_SW3_BM)) { + /* TODO */ + /* Call appropriate handler */ + DP_STATS_INC(soc, rx.err.invalid_rbm, 1); + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("Invalid RBM %d"), rbm); + continue; + } + + rx_buf_cookie = HAL_RX_WBM_BUF_COOKIE_GET(ring_desc); + + rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, rx_buf_cookie); + qdf_assert(rx_desc); + + if (!dp_rx_desc_check_magic(rx_desc)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("Invalid rx_desc cookie=%d"), + rx_buf_cookie); + continue; + } + + nbuf = rx_desc->nbuf; + qdf_nbuf_unmap_single(soc->osdev, nbuf, QDF_DMA_BIDIRECTIONAL); + + /* + * save the wbm desc info in nbuf TLV. We will need this + * info when we do the actual nbuf processing + */ + hal_rx_wbm_err_info_get(ring_desc, &wbm_err_info); + wbm_err_info.pool_id = rx_desc->pool_id; + hal_rx_wbm_err_info_set_in_tlv(qdf_nbuf_data(nbuf), + &wbm_err_info); + + rx_bufs_reaped[rx_desc->pool_id]++; + + DP_RX_LIST_APPEND(nbuf_head, nbuf_tail, rx_desc->nbuf); + dp_rx_add_to_free_desc_list(&head[rx_desc->pool_id], + &tail[rx_desc->pool_id], + rx_desc); + } +done: + hal_srng_access_end(hal_soc, hal_ring); + + for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) { + if (rx_bufs_reaped[mac_id]) { + dp_pdev = soc->pdev_list[mac_id]; + dp_rxdma_srng = &dp_pdev->rx_refill_buf_ring; + rx_desc_pool = &soc->rx_desc_buf[mac_id]; + + dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng, + rx_desc_pool, rx_bufs_reaped[mac_id], + &head[mac_id], &tail[mac_id]); + rx_bufs_used += rx_bufs_reaped[mac_id]; + } + } + + nbuf = nbuf_head; + while (nbuf) { + rx_tlv_hdr = qdf_nbuf_data(nbuf); + /* + * retrieve the wbm desc info from nbuf TLV, so we can + * handle error cases appropriately + */ + hal_rx_wbm_err_info_get_from_tlv(rx_tlv_hdr, &wbm_err_info); + + /* Set queue_mapping in nbuf to 0 */ + dp_set_rx_queue(nbuf, 0); + + next = nbuf->next; + if (wbm_err_info.wbm_err_src == HAL_RX_WBM_ERR_SRC_REO) { + if (wbm_err_info.reo_psh_rsn + == HAL_RX_WBM_REO_PSH_RSN_ERROR) { + + DP_STATS_INC(soc, + rx.err.reo_error + [wbm_err_info.reo_err_code], 1); + + switch (wbm_err_info.reo_err_code) { + /* + * Handling for packets which have NULL REO + * queue descriptor + */ + case HAL_REO_ERR_QUEUE_DESC_ADDR_0: + pool_id = wbm_err_info.pool_id; + QDF_TRACE(QDF_MODULE_ID_DP, + QDF_TRACE_LEVEL_WARN, + "Got pkt with REO ERROR: %d", + wbm_err_info.reo_err_code); + dp_rx_null_q_desc_handle(soc, + nbuf, + rx_tlv_hdr, + pool_id); + nbuf = next; + continue; + /* TODO */ + /* Add per error code accounting */ + + default: + QDF_TRACE(QDF_MODULE_ID_DP, + QDF_TRACE_LEVEL_DEBUG, + "REO error %d detected", + wbm_err_info.reo_err_code); + } + } + } else if (wbm_err_info.wbm_err_src == + HAL_RX_WBM_ERR_SRC_RXDMA) { + if (wbm_err_info.rxdma_psh_rsn + == HAL_RX_WBM_RXDMA_PSH_RSN_ERROR) { + struct dp_peer *peer = NULL; + uint16_t peer_id = 0xFFFF; + + DP_STATS_INC(soc, + rx.err.rxdma_error + [wbm_err_info.rxdma_err_code], 1); + peer_id = hal_rx_mpdu_start_sw_peer_id_get(rx_tlv_hdr); + peer = dp_peer_find_by_id(soc, peer_id); + + switch (wbm_err_info.rxdma_err_code) { + case HAL_RXDMA_ERR_UNENCRYPTED: + dp_rx_err_deliver(soc, + nbuf, + rx_tlv_hdr); + nbuf = next; + continue; + + case HAL_RXDMA_ERR_TKIP_MIC: + dp_rx_process_mic_error(soc, + nbuf, + rx_tlv_hdr); + nbuf = next; + if (peer) + DP_STATS_INC(peer, rx.err.mic_err, 1); + continue; + + case HAL_RXDMA_ERR_DECRYPT: + if (peer) + DP_STATS_INC(peer, rx.err.decrypt_err, 1); + QDF_TRACE(QDF_MODULE_ID_DP, + QDF_TRACE_LEVEL_DEBUG, + "Packet received with Decrypt error"); + break; + + default: + QDF_TRACE(QDF_MODULE_ID_DP, + QDF_TRACE_LEVEL_DEBUG, + "RXDMA error %d", + wbm_err_info. + rxdma_err_code); + } + } + } else { + /* Should not come here */ + qdf_assert(0); + } + + hal_rx_dump_pkt_tlvs(rx_tlv_hdr, QDF_TRACE_LEVEL_DEBUG); + qdf_nbuf_free(nbuf); + nbuf = next; + } + return rx_bufs_used; /* Assume no scale factor for now */ +} + +/** + * dp_rx_err_mpdu_pop() - extract the MSDU's from link descs + * + * @soc: core DP main context + * @mac_id: mac id which is one of 3 mac_ids + * @rxdma_dst_ring_desc: void pointer to monitor link descriptor buf addr info + * @head: head of descs list to be freed + * @tail: tail of decs list to be freed + + * Return: number of msdu in MPDU to be popped + */ +static inline uint32_t +dp_rx_err_mpdu_pop(struct dp_soc *soc, uint32_t mac_id, + void *rxdma_dst_ring_desc, + union dp_rx_desc_list_elem_t **head, + union dp_rx_desc_list_elem_t **tail) +{ + void *rx_msdu_link_desc; + qdf_nbuf_t msdu; + qdf_nbuf_t last; + struct hal_rx_msdu_list msdu_list; + uint16_t num_msdus; + struct hal_buf_info buf_info; + void *p_buf_addr_info; + void *p_last_buf_addr_info; + uint32_t rx_bufs_used = 0; + uint32_t msdu_cnt; + uint32_t i; + uint8_t push_reason; + uint8_t rxdma_error_code = 0; + uint8_t bm_action = HAL_BM_ACTION_PUT_IN_IDLE_LIST; + struct dp_pdev *pdev = dp_get_pdev_for_mac_id(soc, mac_id); + + msdu = 0; + + last = NULL; + + hal_rx_reo_ent_buf_paddr_get(rxdma_dst_ring_desc, &buf_info, + &p_last_buf_addr_info, &msdu_cnt); + + push_reason = + hal_rx_reo_ent_rxdma_push_reason_get(rxdma_dst_ring_desc); + if (push_reason == HAL_RX_WBM_RXDMA_PSH_RSN_ERROR) { + rxdma_error_code = + hal_rx_reo_ent_rxdma_error_code_get(rxdma_dst_ring_desc); + } + + do { + rx_msdu_link_desc = + dp_rx_cookie_2_link_desc_va(soc, &buf_info); + + qdf_assert(rx_msdu_link_desc); + + hal_rx_msdu_list_get(rx_msdu_link_desc, &msdu_list, &num_msdus); + + if (msdu_list.sw_cookie[0] != HAL_RX_COOKIE_SPECIAL) { + /* if the msdus belongs to NSS offloaded radio && + * the rbm is not SW1_BM then return the msdu_link + * descriptor without freeing the msdus (nbufs). let + * these buffers be given to NSS completion ring for + * NSS to free them. + * else iterate through the msdu link desc list and + * free each msdu in the list. + */ + if (msdu_list.rbm[0] != HAL_RX_BUF_RBM_SW3_BM && + wlan_cfg_get_dp_pdev_nss_enabled( + pdev->wlan_cfg_ctx)) + bm_action = HAL_BM_ACTION_RELEASE_MSDU_LIST; + else { + for (i = 0; i < num_msdus; i++) { + struct dp_rx_desc *rx_desc = + dp_rx_cookie_2_va_rxdma_buf(soc, + msdu_list.sw_cookie[i]); + qdf_assert(rx_desc); + msdu = rx_desc->nbuf; + + qdf_nbuf_unmap_single(soc->osdev, msdu, + QDF_DMA_FROM_DEVICE); + + QDF_TRACE(QDF_MODULE_ID_DP, + QDF_TRACE_LEVEL_DEBUG, + "[%s][%d] msdu_nbuf=%pK \n", + __func__, __LINE__, msdu); + + qdf_nbuf_free(msdu); + rx_bufs_used++; + dp_rx_add_to_free_desc_list(head, + tail, rx_desc); + } + } + } else { + rxdma_error_code = HAL_RXDMA_ERR_WAR; + } + + hal_rx_mon_next_link_desc_get(rx_msdu_link_desc, &buf_info, + &p_buf_addr_info); + + dp_rx_link_desc_return(soc, p_last_buf_addr_info, bm_action); + p_last_buf_addr_info = p_buf_addr_info; + + } while (buf_info.paddr); + + DP_STATS_INC(soc, rx.err.rxdma_error[rxdma_error_code], 1); + + if (rxdma_error_code == HAL_RXDMA_ERR_DECRYPT) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "Packet received with Decrypt error"); + } + + return rx_bufs_used; +} + +/** +* dp_rxdma_err_process() - RxDMA error processing functionality +* +* @soc: core txrx main contex +* @mac_id: mac id which is one of 3 mac_ids +* @hal_ring: opaque pointer to the HAL Rx Ring, which will be serviced +* @quota: No. of units (packets) that can be serviced in one shot. + +* Return: num of buffers processed +*/ +uint32_t +dp_rxdma_err_process(struct dp_soc *soc, uint32_t mac_id, uint32_t quota) +{ + struct dp_pdev *pdev = dp_get_pdev_for_mac_id(soc, mac_id); + int mac_for_pdev = dp_get_mac_id_for_mac(soc, mac_id); + void *hal_soc; + void *rxdma_dst_ring_desc; + void *err_dst_srng; + union dp_rx_desc_list_elem_t *head = NULL; + union dp_rx_desc_list_elem_t *tail = NULL; + struct dp_srng *dp_rxdma_srng; + struct rx_desc_pool *rx_desc_pool; + uint32_t work_done = 0; + uint32_t rx_bufs_used = 0; + +#ifdef DP_INTR_POLL_BASED + if (!pdev) + return 0; +#endif + err_dst_srng = pdev->rxdma_err_dst_ring[mac_for_pdev].hal_srng; + + if (!err_dst_srng) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "%s %d : HAL Monitor Destination Ring Init \ + Failed -- %pK\n", + __func__, __LINE__, err_dst_srng); + return 0; + } + + hal_soc = soc->hal_soc; + + qdf_assert(hal_soc); + + if (qdf_unlikely(hal_srng_access_start(hal_soc, err_dst_srng))) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "%s %d : HAL Monitor Destination Ring Init \ + Failed -- %pK\n", + __func__, __LINE__, err_dst_srng); + return 0; + } + + while (qdf_likely(quota-- && (rxdma_dst_ring_desc = + hal_srng_dst_get_next(hal_soc, err_dst_srng)))) { + + rx_bufs_used += dp_rx_err_mpdu_pop(soc, mac_id, + rxdma_dst_ring_desc, + &head, &tail); + } + + hal_srng_access_end(hal_soc, err_dst_srng); + + if (rx_bufs_used) { + dp_rxdma_srng = &pdev->rx_refill_buf_ring; + rx_desc_pool = &soc->rx_desc_buf[mac_id]; + + dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng, + rx_desc_pool, rx_bufs_used, &head, &tail); + + work_done += rx_bufs_used; + } + + return work_done; +} diff --git a/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_rx_mon.h b/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_rx_mon.h new file mode 100644 index 0000000000000000000000000000000000000000..8269c34450261c25b70433bec8955513af231651 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_rx_mon.h @@ -0,0 +1,150 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _DP_RX_MON_H_ +#define _DP_RX_MON_H_ + +#ifdef CONFIG_MCL +#include +#endif + +/** +* dp_rx_mon_dest_process() - Brain of the Rx processing functionality +* Called from the bottom half (tasklet/NET_RX_SOFTIRQ) +* @soc: core txrx main context 164 +* @hal_ring: opaque pointer to the HAL Rx Ring, which will be serviced +* @quota: No. of units (packets) that can be serviced in one shot. +* +* This function implements the core of Rx functionality. This is +* expected to handle only non-error frames. +* +* Return: uint32_t: No. of elements processed +*/ +void dp_rx_mon_dest_process(struct dp_soc *soc, uint32_t mac_id, + uint32_t quota); + +QDF_STATUS dp_rx_pdev_mon_attach(struct dp_pdev *pdev); +QDF_STATUS dp_rx_pdev_mon_detach(struct dp_pdev *pdev); + +QDF_STATUS dp_rx_pdev_mon_status_attach(struct dp_pdev *pdev, int mac_id); +QDF_STATUS dp_rx_pdev_mon_status_detach(struct dp_pdev *pdev, int mac_id); + +uint32_t dp_mon_process(struct dp_soc *soc, uint32_t mac_id, uint32_t quota); +QDF_STATUS dp_rx_mon_deliver(struct dp_soc *soc, uint32_t mac_id, + qdf_nbuf_t head_msdu, qdf_nbuf_t tail_msdu); + +uint32_t dp_rxdma_err_process(struct dp_soc *soc, uint32_t mac_id, + uint32_t quota); + +#ifndef REMOVE_MON_DBG_STATS +/* + * dp_rx_mon_update_dbg_ppdu_stats() - Update status ring TLV count + * @ppdu_info: HAL RX PPDU info retrieved from status ring TLV + * @rx_mon_stats: monitor mode status/destination ring PPDU and MPDU count + * + * Update status ring PPDU start and end count. Keep track TLV state on + * PPDU start and end to find out if start and end is matching. Keep + * track missing PPDU start and end count. Keep track matching PPDU + * start and end count. + * + * Return: None + */ +static inline void +dp_rx_mon_update_dbg_ppdu_stats(struct hal_rx_ppdu_info *ppdu_info, + struct cdp_pdev_mon_stats *rx_mon_stats) +{ + if (ppdu_info->rx_state == + HAL_RX_MON_PPDU_START) { + rx_mon_stats->status_ppdu_start++; + if (rx_mon_stats->status_ppdu_state + != CDP_MON_PPDU_END) + rx_mon_stats->status_ppdu_end_mis++; + rx_mon_stats->status_ppdu_state + = CDP_MON_PPDU_START; + } else if (ppdu_info->rx_state == + HAL_RX_MON_PPDU_END) { + rx_mon_stats->status_ppdu_end++; + if (rx_mon_stats->status_ppdu_state + != CDP_MON_PPDU_START) + rx_mon_stats->status_ppdu_start_mis++; + else + rx_mon_stats->status_ppdu_compl++; + rx_mon_stats->status_ppdu_state + = CDP_MON_PPDU_END; + } +} + +/* + * dp_rx_mon_init_dbg_ppdu_stats() - initialization for monitor mode stats + * @ppdu_info: HAL RX PPDU info retrieved from status ring TLV + * @rx_mon_stats: monitor mode status/destination ring PPDU and MPDU count + * + * Return: None + */ +static inline void +dp_rx_mon_init_dbg_ppdu_stats(struct hal_rx_ppdu_info *ppdu_info, + struct cdp_pdev_mon_stats *rx_mon_stats) +{ + ppdu_info->rx_state = HAL_RX_MON_PPDU_END; + rx_mon_stats->status_ppdu_state + = CDP_MON_PPDU_END; +} + +/* + * dp_rx_mon_dbg_dbg_ppdu_stats() - Print monitor mode status ring stats + * @ppdu_info: HAL RX PPDU info retrieved from status ring TLV + * @rx_mon_stats: monitor mode status/destination ring PPDU and MPDU count + * + * Print monitor mode PPDU start and end TLV count + * Return: None + */ +static inline void +dp_rx_mon_print_dbg_ppdu_stats(struct cdp_pdev_mon_stats *rx_mon_stats) +{ + DP_PRINT_STATS("status_ppdu_compl_cnt = %d", + rx_mon_stats->status_ppdu_compl); + DP_PRINT_STATS("status_ppdu_start_cnt = %d", + rx_mon_stats->status_ppdu_start); + DP_PRINT_STATS("status_ppdu_end_cnt = %d", + rx_mon_stats->status_ppdu_end); + DP_PRINT_STATS("status_ppdu_start_mis_cnt = %d", + rx_mon_stats->status_ppdu_start_mis); + DP_PRINT_STATS("status_ppdu_end_mis_cnt = %d", + rx_mon_stats->status_ppdu_end_mis); +} + +#else +static inline void +dp_rx_mon_update_dbg_ppdu_stats(struct hal_rx_ppdu_info *ppdu_info, + struct cdp_pdev_mon_stats *rx_mon_stats) +{ +} + +static inline void +dp_rx_mon_init_dbg_ppdu_stats(struct hal_rx_ppdu_info *ppdu_info, + struct cdp_pdev_mon_stats *rx_mon_stats) +{ +} + +static inline void +dp_rx_mon_print_dbg_ppdu_stats(struct hal_rx_ppdu_info *ppdu_info, + struct cdp_pdev_mon_stats *rx_mon_stats) +{ +} +#endif +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_rx_mon_dest.c b/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_rx_mon_dest.c new file mode 100644 index 0000000000000000000000000000000000000000..1a53c59ef6d02840d539ffdf24da1a8453a96a22 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_rx_mon_dest.c @@ -0,0 +1,1202 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "dp_types.h" +#include "dp_rx.h" +#include "dp_peer.h" +#include "hal_rx.h" +#include "hal_api.h" +#include "qdf_trace.h" +#include "qdf_nbuf.h" +#include "hal_api_mon.h" +#include "dp_rx_mon.h" +#include "wlan_cfg.h" +#include "dp_internal.h" + +/** + * dp_rx_mon_link_desc_return() - Return a MPDU link descriptor to HW + * (WBM), following error handling + * + * @dp_pdev: core txrx pdev context + * @buf_addr_info: void pointer to monitor link descriptor buf addr info + * Return: QDF_STATUS + */ +static QDF_STATUS +dp_rx_mon_link_desc_return(struct dp_pdev *dp_pdev, + void *buf_addr_info, int mac_id) +{ + struct dp_srng *dp_srng; + void *hal_srng; + void *hal_soc; + QDF_STATUS status = QDF_STATUS_E_FAILURE; + void *src_srng_desc; + int mac_for_pdev = dp_get_mac_id_for_mac(dp_pdev->soc, mac_id); + + hal_soc = dp_pdev->soc->hal_soc; + + dp_srng = &dp_pdev->rxdma_mon_desc_ring[mac_for_pdev]; + hal_srng = dp_srng->hal_srng; + + qdf_assert(hal_srng); + + if (qdf_unlikely(hal_srng_access_start(hal_soc, hal_srng))) { + + /* TODO */ + /* + * Need API to convert from hal_ring pointer to + * Ring Type / Ring Id combo + */ + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "%s %d : \ + HAL RING Access For WBM Release SRNG Failed -- %pK\n", + __func__, __LINE__, hal_srng); + goto done; + } + + src_srng_desc = hal_srng_src_get_next(hal_soc, hal_srng); + + if (qdf_likely(src_srng_desc)) { + /* Return link descriptor through WBM ring (SW2WBM)*/ + hal_rx_mon_msdu_link_desc_set(hal_soc, + src_srng_desc, buf_addr_info); + status = QDF_STATUS_SUCCESS; + } else { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "%s %d -- Monitor Link Desc WBM Release Ring Full\n", + __func__, __LINE__); + } +done: + hal_srng_access_end(hal_soc, hal_srng); + return status; +} + +/** + * dp_mon_adjust_frag_len() - MPDU and MSDU may spread across + * multiple nbufs. This function + * is to return data length in + * fragmented buffer + * + * @total_len: pointer to remaining data length. + * @frag_len: pointer to data length in this fragment. +*/ +static inline void dp_mon_adjust_frag_len(uint32_t *total_len, +uint32_t *frag_len) +{ + if (*total_len >= (RX_BUFFER_SIZE - RX_PKT_TLVS_LEN)) { + *frag_len = RX_BUFFER_SIZE - RX_PKT_TLVS_LEN; + *total_len -= *frag_len; + } else { + *frag_len = *total_len; + *total_len = 0; + } +} + +/** + * dp_rx_mon_mpdu_pop() - Return a MPDU link descriptor to HW + * (WBM), following error handling + * + * @soc: core DP main context + * @mac_id: mac id which is one of 3 mac_ids + * @rxdma_dst_ring_desc: void pointer to monitor link descriptor buf addr info + * @head_msdu: head of msdu to be popped + * @tail_msdu: tail of msdu to be popped + * @npackets: number of packet to be popped + * @ppdu_id: ppdu id of processing ppdu + * @head: head of descs list to be freed + * @tail: tail of decs list to be freed + * Return: number of msdu in MPDU to be popped + */ +static inline uint32_t +dp_rx_mon_mpdu_pop(struct dp_soc *soc, uint32_t mac_id, + void *rxdma_dst_ring_desc, qdf_nbuf_t *head_msdu, + qdf_nbuf_t *tail_msdu, uint32_t *npackets, uint32_t *ppdu_id, + union dp_rx_desc_list_elem_t **head, + union dp_rx_desc_list_elem_t **tail) +{ + struct dp_pdev *dp_pdev = dp_get_pdev_for_mac_id(soc, mac_id); + void *rx_desc_tlv; + void *rx_msdu_link_desc; + qdf_nbuf_t msdu; + qdf_nbuf_t last; + struct hal_rx_msdu_list msdu_list; + uint16_t num_msdus; + uint32_t rx_buf_size, rx_pkt_offset; + struct hal_buf_info buf_info; + void *p_buf_addr_info; + void *p_last_buf_addr_info; + uint32_t rx_bufs_used = 0; + uint32_t msdu_ppdu_id, msdu_cnt, last_ppdu_id; + uint8_t *data; + uint32_t i; + uint32_t total_frag_len = 0, frag_len = 0; + bool is_frag, is_first_msdu; + bool drop_mpdu = false; + + msdu = 0; + last_ppdu_id = dp_pdev->ppdu_info.com_info.last_ppdu_id; + + last = NULL; + + hal_rx_reo_ent_buf_paddr_get(rxdma_dst_ring_desc, &buf_info, + &p_last_buf_addr_info, &msdu_cnt); + + if ((hal_rx_reo_ent_rxdma_push_reason_get(rxdma_dst_ring_desc) == + HAL_RX_WBM_RXDMA_PSH_RSN_ERROR)) { + uint8_t rxdma_err = + hal_rx_reo_ent_rxdma_error_code_get( + rxdma_dst_ring_desc); + if (qdf_unlikely((rxdma_err == HAL_RXDMA_ERR_FLUSH_REQUEST) || + (rxdma_err == HAL_RXDMA_ERR_MPDU_LENGTH) || + (rxdma_err == HAL_RXDMA_ERR_OVERFLOW))) { + drop_mpdu = true; + dp_pdev->rx_mon_stats.dest_mpdu_drop++; + } + } + + is_frag = false; + is_first_msdu = true; + + do { + rx_msdu_link_desc = + dp_rx_cookie_2_mon_link_desc_va(dp_pdev, &buf_info, + mac_id); + + qdf_assert(rx_msdu_link_desc); + + hal_rx_msdu_list_get(rx_msdu_link_desc, &msdu_list, &num_msdus); + + for (i = 0; i < num_msdus; i++) { + uint32_t l2_hdr_offset; + struct dp_rx_desc *rx_desc = + dp_rx_cookie_2_va_mon_buf(soc, + msdu_list.sw_cookie[i]); + + qdf_assert(rx_desc); + msdu = rx_desc->nbuf; + + if (rx_desc->unmapped == 0) { + qdf_nbuf_unmap_single(soc->osdev, msdu, + QDF_DMA_FROM_DEVICE); + rx_desc->unmapped = 1; + } + + if (drop_mpdu) { + qdf_nbuf_free(msdu); + msdu = NULL; + goto next_msdu; + } + + data = qdf_nbuf_data(msdu); + + rx_desc_tlv = HAL_RX_MON_DEST_GET_DESC(data); + + QDF_TRACE(QDF_MODULE_ID_DP, + QDF_TRACE_LEVEL_DEBUG, + "[%s] i=%d, ppdu_id=%x, " + "last_ppdu_id=%x num_msdus = %u\n", + __func__, i, *ppdu_id, + last_ppdu_id, num_msdus); + + if (is_first_msdu) { + msdu_ppdu_id = HAL_RX_HW_DESC_GET_PPDUID_GET( + rx_desc_tlv); + is_first_msdu = false; + + QDF_TRACE(QDF_MODULE_ID_DP, + QDF_TRACE_LEVEL_DEBUG, + "[%s] msdu_ppdu_id=%x\n", + __func__, msdu_ppdu_id); + + if (*ppdu_id > msdu_ppdu_id) + QDF_TRACE(QDF_MODULE_ID_DP, + QDF_TRACE_LEVEL_DEBUG, + "[%s][%d] ppdu_id=%d " + "msdu_ppdu_id=%d\n", + __func__, __LINE__, *ppdu_id, + msdu_ppdu_id); + + if ((*ppdu_id < msdu_ppdu_id) && (*ppdu_id > + last_ppdu_id)) { + *ppdu_id = msdu_ppdu_id; + return rx_bufs_used; + } + } + + if (hal_rx_desc_is_first_msdu(rx_desc_tlv)) + hal_rx_mon_hw_desc_get_mpdu_status(rx_desc_tlv, + &(dp_pdev->ppdu_info.rx_status)); + + + if (msdu_list.msdu_info[i].msdu_flags & + HAL_MSDU_F_MSDU_CONTINUATION) { + if (!is_frag) { + total_frag_len = + msdu_list.msdu_info[i].msdu_len; + is_frag = true; + } + dp_mon_adjust_frag_len( + &total_frag_len, &frag_len); + } else { + if (is_frag) { + dp_mon_adjust_frag_len( + &total_frag_len, &frag_len); + } else { + frag_len = + msdu_list.msdu_info[i].msdu_len; + } + is_frag = false; + msdu_cnt--; + } + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "%s total_len %u frag_len %u flags %u", + __func__, total_frag_len, frag_len, + msdu_list.msdu_info[i].msdu_flags); + + rx_pkt_offset = HAL_RX_MON_HW_RX_DESC_SIZE(); + /* + * HW structures call this L3 header padding + * -- even though this is actually the offset + * from the buffer beginning where the L2 + * header begins. + */ + l2_hdr_offset = + hal_rx_msdu_end_l3_hdr_padding_get(data); + + rx_buf_size = rx_pkt_offset + l2_hdr_offset + + frag_len; + + qdf_nbuf_set_pktlen(msdu, rx_buf_size); + +#if 0 + /* Disble it.see packet on msdu done set to 0 */ + /* + * Check if DMA completed -- msdu_done is the + * last bit to be written + */ + if (!hal_rx_attn_msdu_done_get(rx_desc_tlv)) { + + QDF_TRACE(QDF_MODULE_ID_DP, + QDF_TRACE_LEVEL_ERROR, + "%s:%d: Pkt Desc\n", + __func__, __LINE__); + + QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_DP, + QDF_TRACE_LEVEL_ERROR, + rx_desc_tlv, 128); + + qdf_assert_always(0); + } +#endif + QDF_TRACE(QDF_MODULE_ID_DP, + QDF_TRACE_LEVEL_DEBUG, + "%s: rx_pkt_offset=%d, l2_hdr_offset=%d, msdu_len=%d, addr=%pK skb->len %lu", + __func__, rx_pkt_offset, l2_hdr_offset, + msdu_list.msdu_info[i].msdu_len, + qdf_nbuf_data(msdu), qdf_nbuf_len(msdu)); + + if (head_msdu && *head_msdu == NULL) { + *head_msdu = msdu; + } else { + if (last) + qdf_nbuf_set_next(last, msdu); + } + + last = msdu; +next_msdu: + rx_bufs_used++; + dp_rx_add_to_free_desc_list(head, + tail, rx_desc); + } + + hal_rx_mon_next_link_desc_get(rx_msdu_link_desc, &buf_info, + &p_buf_addr_info); + + if (dp_rx_mon_link_desc_return(dp_pdev, p_last_buf_addr_info, + mac_id) != QDF_STATUS_SUCCESS) + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "dp_rx_mon_link_desc_return failed\n"); + + p_last_buf_addr_info = p_buf_addr_info; + + } while (buf_info.paddr && msdu_cnt); + + if (last) + qdf_nbuf_set_next(last, NULL); + + *tail_msdu = msdu; + + return rx_bufs_used; + +} + +static inline +void dp_rx_msdus_set_payload(qdf_nbuf_t msdu) +{ + uint8_t *data; + uint32_t rx_pkt_offset, l2_hdr_offset; + + data = qdf_nbuf_data(msdu); + rx_pkt_offset = HAL_RX_MON_HW_RX_DESC_SIZE(); + l2_hdr_offset = hal_rx_msdu_end_l3_hdr_padding_get(data); + qdf_nbuf_pull_head(msdu, rx_pkt_offset + l2_hdr_offset); +} + +static inline +qdf_nbuf_t dp_rx_mon_restitch_mpdu_from_msdus(struct dp_soc *soc, + uint32_t mac_id, qdf_nbuf_t head_msdu, qdf_nbuf_t last_msdu, + struct cdp_mon_status *rx_status) +{ + qdf_nbuf_t msdu, mpdu_buf, prev_buf, msdu_orig, head_frag_list; + uint32_t decap_format, wifi_hdr_len, sec_hdr_len, msdu_llc_len, + mpdu_buf_len, decap_hdr_pull_bytes, frag_list_sum_len, dir, + is_amsdu, is_first_frag, amsdu_pad; + void *rx_desc; + char *hdr_desc; + unsigned char *dest; + struct ieee80211_frame *wh; + struct ieee80211_qoscntl *qos; + struct dp_pdev *dp_pdev = dp_get_pdev_for_mac_id(soc, mac_id); + head_frag_list = NULL; + mpdu_buf = NULL; + + /* The nbuf has been pulled just beyond the status and points to the + * payload + */ + if (!head_msdu) + goto mpdu_stitch_fail; + + msdu_orig = head_msdu; + + rx_desc = qdf_nbuf_data(msdu_orig); + + if (HAL_RX_DESC_GET_MPDU_LENGTH_ERR(rx_desc)) { + /* It looks like there is some issue on MPDU len err */ + /* Need further investigate if drop the packet */ + DP_STATS_INC(dp_pdev, dropped.mon_rx_drop, 1); + return NULL; + } + + rx_desc = qdf_nbuf_data(last_msdu); + + rx_status->cdp_rs_fcs_err = HAL_RX_DESC_GET_MPDU_FCS_ERR(rx_desc); + dp_pdev->ppdu_info.rx_status.rs_fcs_err = + HAL_RX_DESC_GET_MPDU_FCS_ERR(rx_desc); + + /* Fill out the rx_status from the PPDU start and end fields */ + /* HAL_RX_GET_PPDU_STATUS(soc, mac_id, rx_status); */ + + rx_desc = qdf_nbuf_data(head_msdu); + + decap_format = HAL_RX_DESC_GET_DECAP_FORMAT(rx_desc); + + /* Easy case - The MSDU status indicates that this is a non-decapped + * packet in RAW mode. + */ + if (decap_format == HAL_HW_RX_DECAP_FORMAT_RAW) { + /* Note that this path might suffer from headroom unavailabilty + * - but the RX status is usually enough + */ + + dp_rx_msdus_set_payload(head_msdu); + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "[%s][%d] decap format raw head %pK head->next %pK last_msdu %pK last_msdu->next %pK", + __func__, __LINE__, head_msdu, head_msdu->next, + last_msdu, last_msdu->next); + + mpdu_buf = head_msdu; + + prev_buf = mpdu_buf; + + frag_list_sum_len = 0; + msdu = qdf_nbuf_next(head_msdu); + is_first_frag = 1; + + while (msdu) { + + dp_rx_msdus_set_payload(msdu); + + if (is_first_frag) { + is_first_frag = 0; + head_frag_list = msdu; + } + + frag_list_sum_len += qdf_nbuf_len(msdu); + + /* Maintain the linking of the cloned MSDUS */ + qdf_nbuf_set_next_ext(prev_buf, msdu); + + /* Move to the next */ + prev_buf = msdu; + msdu = qdf_nbuf_next(msdu); + } + + qdf_nbuf_trim_tail(prev_buf, HAL_RX_FCS_LEN); + + /* If there were more fragments to this RAW frame */ + if (head_frag_list) { + if (frag_list_sum_len < + sizeof(struct ieee80211_frame_min_one)) { + DP_STATS_INC(dp_pdev, dropped.mon_rx_drop, 1); + return NULL; + } + frag_list_sum_len -= HAL_RX_FCS_LEN; + qdf_nbuf_append_ext_list(mpdu_buf, head_frag_list, + frag_list_sum_len); + qdf_nbuf_set_next(mpdu_buf, NULL); + } + + goto mpdu_stitch_done; + } + + /* Decap mode: + * Calculate the amount of header in decapped packet to knock off based + * on the decap type and the corresponding number of raw bytes to copy + * status header + */ + rx_desc = qdf_nbuf_data(head_msdu); + + hdr_desc = HAL_RX_DESC_GET_80211_HDR(rx_desc); + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "[%s][%d] decap format not raw", + __func__, __LINE__); + + + /* Base size */ + wifi_hdr_len = sizeof(struct ieee80211_frame); + wh = (struct ieee80211_frame *)hdr_desc; + + dir = wh->i_fc[1] & IEEE80211_FC1_DIR_MASK; + + if (dir == IEEE80211_FC1_DIR_DSTODS) + wifi_hdr_len += 6; + + is_amsdu = 0; + if (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_QOS) { + qos = (struct ieee80211_qoscntl *) + (hdr_desc + wifi_hdr_len); + wifi_hdr_len += 2; + + is_amsdu = (qos->i_qos[0] & IEEE80211_QOS_AMSDU); + } + + /*Calculate security header length based on 'Protected' + * and 'EXT_IV' flag + * */ + if (wh->i_fc[1] & IEEE80211_FC1_WEP) { + char *iv = (char *)wh + wifi_hdr_len; + + if (iv[3] & KEY_EXTIV) + sec_hdr_len = 8; + else + sec_hdr_len = 4; + } else { + sec_hdr_len = 0; + } + wifi_hdr_len += sec_hdr_len; + + /* MSDU related stuff LLC - AMSDU subframe header etc */ + msdu_llc_len = is_amsdu ? (14 + 8) : 8; + + mpdu_buf_len = wifi_hdr_len + msdu_llc_len; + + /* "Decap" header to remove from MSDU buffer */ + decap_hdr_pull_bytes = 14; + + /* Allocate a new nbuf for holding the 802.11 header retrieved from the + * status of the now decapped first msdu. Leave enough headroom for + * accomodating any radio-tap /prism like PHY header + */ +#define MAX_MONITOR_HEADER (512) + mpdu_buf = qdf_nbuf_alloc(soc->osdev, + MAX_MONITOR_HEADER + mpdu_buf_len, + MAX_MONITOR_HEADER, 4, FALSE); + + if (!mpdu_buf) + goto mpdu_stitch_done; + + /* Copy the MPDU related header and enc headers into the first buffer + * - Note that there can be a 2 byte pad between heaader and enc header + */ + + prev_buf = mpdu_buf; + dest = qdf_nbuf_put_tail(prev_buf, wifi_hdr_len); + if (!dest) + goto mpdu_stitch_fail; + + qdf_mem_copy(dest, hdr_desc, wifi_hdr_len); + hdr_desc += wifi_hdr_len; + +#if 0 + dest = qdf_nbuf_put_tail(prev_buf, sec_hdr_len); + adf_os_mem_copy(dest, hdr_desc, sec_hdr_len); + hdr_desc += sec_hdr_len; +#endif + + /* The first LLC len is copied into the MPDU buffer */ + frag_list_sum_len = 0; + + msdu_orig = head_msdu; + is_first_frag = 1; + amsdu_pad = 0; + + while (msdu_orig) { + + /* TODO: intra AMSDU padding - do we need it ??? */ + + msdu = msdu_orig; + + if (is_first_frag) { + head_frag_list = msdu; + } else { + /* Reload the hdr ptr only on non-first MSDUs */ + rx_desc = qdf_nbuf_data(msdu_orig); + hdr_desc = HAL_RX_DESC_GET_80211_HDR(rx_desc); + } + + /* Copy this buffers MSDU related status into the prev buffer */ + + if (is_first_frag) { + is_first_frag = 0; + } + + dest = qdf_nbuf_put_tail(prev_buf, + msdu_llc_len + amsdu_pad); + + if (!dest) + goto mpdu_stitch_fail; + + dest += amsdu_pad; + qdf_mem_copy(dest, hdr_desc, msdu_llc_len); + + dp_rx_msdus_set_payload(msdu); + + /* Push the MSDU buffer beyond the decap header */ + qdf_nbuf_pull_head(msdu, decap_hdr_pull_bytes); + frag_list_sum_len += msdu_llc_len + qdf_nbuf_len(msdu) + + amsdu_pad; + + /* Set up intra-AMSDU pad to be added to start of next buffer - + * AMSDU pad is 4 byte pad on AMSDU subframe */ + amsdu_pad = (msdu_llc_len + qdf_nbuf_len(msdu)) & 0x3; + amsdu_pad = amsdu_pad ? (4 - amsdu_pad) : 0; + + /* TODO FIXME How do we handle MSDUs that have fraglist - Should + * probably iterate all the frags cloning them along the way and + * and also updating the prev_buf pointer + */ + + /* Move to the next */ + prev_buf = msdu; + msdu_orig = qdf_nbuf_next(msdu_orig); + + } + +#if 0 + /* Add in the trailer section - encryption trailer + FCS */ + qdf_nbuf_put_tail(prev_buf, HAL_RX_FCS_LEN); + frag_list_sum_len += HAL_RX_FCS_LEN; +#endif + + frag_list_sum_len -= msdu_llc_len; + + /* TODO: Convert this to suitable adf routines */ + qdf_nbuf_append_ext_list(mpdu_buf, head_frag_list, + frag_list_sum_len); + +mpdu_stitch_done: + /* Check if this buffer contains the PPDU end status for TSF */ + /* Need revist this code to see where we can get tsf timestamp */ +#if 0 + /* PPDU end TLV will be retrieved from monitor status ring */ + last_mpdu = + (*(((u_int32_t *)&rx_desc->attention)) & + RX_ATTENTION_0_LAST_MPDU_MASK) >> + RX_ATTENTION_0_LAST_MPDU_LSB; + + if (last_mpdu) + rx_status->rs_tstamp.tsf = rx_desc->ppdu_end.tsf_timestamp; + +#endif + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "%s %d mpdu_buf %pK mpdu_buf->len %u", + __func__, __LINE__, + mpdu_buf, mpdu_buf->len); + return mpdu_buf; + +mpdu_stitch_fail: + if ((mpdu_buf) && (decap_format != HAL_HW_RX_DECAP_FORMAT_RAW)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "%s mpdu_stitch_fail mpdu_buf %pK", + __func__, mpdu_buf); + /* Free the head buffer */ + qdf_nbuf_free(mpdu_buf); + } + return NULL; +} + +/** + * dp_rx_extract_radiotap_info(): Extract and populate information in + * struct mon_rx_status type + * @rx_status: Receive status + * @mon_rx_status: Monitor mode status + * + * Returns: None + */ +static inline +void dp_rx_extract_radiotap_info(struct cdp_mon_status *rx_status, + struct mon_rx_status *rx_mon_status) +{ + rx_mon_status->tsft = rx_status->cdp_rs_tstamp.cdp_tsf; + rx_mon_status->chan_freq = rx_status->rs_freq; + rx_mon_status->chan_num = rx_status->rs_channel; + rx_mon_status->chan_flags = rx_status->rs_flags; + rx_mon_status->rate = rx_status->rs_datarate; + /* TODO: rx_mon_status->ant_signal_db */ + /* TODO: rx_mon_status->nr_ant */ + rx_mon_status->mcs = rx_status->cdf_rs_rate_mcs; + rx_mon_status->is_stbc = rx_status->cdp_rs_stbc; + rx_mon_status->sgi = rx_status->cdp_rs_sgi; + /* TODO: rx_mon_status->ldpc */ + /* TODO: rx_mon_status->beamformed */ + /* TODO: rx_mon_status->vht_flags */ + /* TODO: rx_mon_status->vht_flag_values1 */ +} + +QDF_STATUS dp_rx_mon_deliver(struct dp_soc *soc, uint32_t mac_id, + qdf_nbuf_t head_msdu, qdf_nbuf_t tail_msdu) +{ + struct dp_pdev *pdev = dp_get_pdev_for_mac_id(soc, mac_id); + struct cdp_mon_status *rs = &pdev->rx_mon_recv_status; + qdf_nbuf_t mon_skb, skb_next; + qdf_nbuf_t mon_mpdu = NULL; + + if ((pdev->monitor_vdev == NULL) || + (pdev->monitor_vdev->osif_rx_mon == NULL)) { + goto mon_deliver_fail; + } + + /* restitch mon MPDU for delivery via monitor interface */ + mon_mpdu = dp_rx_mon_restitch_mpdu_from_msdus(soc, mac_id, head_msdu, + tail_msdu, rs); + + if (mon_mpdu && pdev->monitor_vdev && pdev->monitor_vdev->osif_vdev) { + pdev->ppdu_info.rx_status.ppdu_id = + pdev->ppdu_info.com_info.ppdu_id; + qdf_nbuf_update_radiotap(&(pdev->ppdu_info.rx_status), + mon_mpdu, sizeof(struct rx_pkt_tlvs)); + pdev->monitor_vdev->osif_rx_mon( + pdev->monitor_vdev->osif_vdev, mon_mpdu, NULL); + } else { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "[%s][%d] mon_mpdu=%pK pdev->monitor_vdev %pK osif_vdev %pK", + __func__, __LINE__, mon_mpdu, pdev->monitor_vdev, + pdev->monitor_vdev->osif_vdev); + goto mon_deliver_fail; + } + + return QDF_STATUS_SUCCESS; + +mon_deliver_fail: + mon_skb = head_msdu; + while (mon_skb) { + skb_next = qdf_nbuf_next(mon_skb); + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "[%s][%d] mon_skb=%pK len %u", __func__, __LINE__, + mon_skb, mon_skb->len); + + qdf_nbuf_free(mon_skb); + mon_skb = skb_next; + } + return QDF_STATUS_E_INVAL; +} + +/** +* dp_rx_mon_dest_process() - Brain of the Rx processing functionality +* Called from the bottom half (tasklet/NET_RX_SOFTIRQ) +* @soc: core txrx main contex +* @hal_ring: opaque pointer to the HAL Rx Ring, which will be serviced +* @quota: No. of units (packets) that can be serviced in one shot. +* +* This function implements the core of Rx functionality. This is +* expected to handle only non-error frames. +* +* Return: none +*/ +void dp_rx_mon_dest_process(struct dp_soc *soc, uint32_t mac_id, uint32_t quota) +{ + struct dp_pdev *pdev = dp_get_pdev_for_mac_id(soc, mac_id); + void *hal_soc; + void *rxdma_dst_ring_desc; + void *mon_dst_srng; + union dp_rx_desc_list_elem_t *head = NULL; + union dp_rx_desc_list_elem_t *tail = NULL; + uint32_t ppdu_id; + uint32_t rx_bufs_used; + int mac_for_pdev = dp_get_mac_id_for_mac(soc, mac_id); + struct cdp_pdev_mon_stats *rx_mon_stats; + + mon_dst_srng = pdev->rxdma_mon_dst_ring[mac_for_pdev].hal_srng; + + if (!mon_dst_srng || !hal_srng_initialized(mon_dst_srng)) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "%s %d : HAL Monitor Destination Ring Init Failed -- %pK\n", + __func__, __LINE__, mon_dst_srng); + return; + } + + hal_soc = soc->hal_soc; + + qdf_assert(hal_soc); + + qdf_spin_lock_bh(&pdev->mon_lock); + if (pdev->monitor_vdev == NULL) { + qdf_spin_unlock(&pdev->mon_lock); + return; + } + + if (qdf_unlikely(hal_srng_access_start(hal_soc, mon_dst_srng))) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "%s %d : HAL Monitor Destination Ring access Failed -- %pK\n", + __func__, __LINE__, mon_dst_srng); + return; + } + + ppdu_id = pdev->ppdu_info.com_info.ppdu_id; + rx_bufs_used = 0; + rx_mon_stats = &pdev->rx_mon_stats; + + while (qdf_likely(rxdma_dst_ring_desc = + hal_srng_dst_peek(hal_soc, mon_dst_srng))) { + qdf_nbuf_t head_msdu, tail_msdu; + uint32_t npackets; + head_msdu = (qdf_nbuf_t) NULL; + tail_msdu = (qdf_nbuf_t) NULL; + + rx_bufs_used += dp_rx_mon_mpdu_pop(soc, mac_id, + rxdma_dst_ring_desc, + &head_msdu, &tail_msdu, + &npackets, &ppdu_id, + &head, &tail); + + if (ppdu_id != pdev->ppdu_info.com_info.ppdu_id) { + pdev->mon_ppdu_status = DP_PPDU_STATUS_START; + qdf_mem_zero(&(pdev->ppdu_info.rx_status), + sizeof(pdev->ppdu_info.rx_status)); + pdev->ppdu_info.com_info.last_ppdu_id = + pdev->ppdu_info.com_info.ppdu_id; + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "%s %d ppdu_id %x != ppdu_info.com_info .ppdu_id %x", + __func__, __LINE__, + ppdu_id, pdev->ppdu_info.com_info.ppdu_id); + break; + } + + if (qdf_likely((head_msdu != NULL) && (tail_msdu != NULL))) { + rx_mon_stats->dest_mpdu_done++; + dp_rx_mon_deliver(soc, mac_id, head_msdu, tail_msdu); + } + + rxdma_dst_ring_desc = hal_srng_dst_get_next(hal_soc, + mon_dst_srng); + } + hal_srng_access_end(hal_soc, mon_dst_srng); + + qdf_spin_unlock_bh(&pdev->mon_lock); + + if (rx_bufs_used) { + rx_mon_stats->dest_ppdu_done++; + dp_rx_buffers_replenish(soc, mac_id, + &pdev->rxdma_mon_buf_ring[mac_for_pdev], + &soc->rx_desc_mon[mac_id], rx_bufs_used, &head, &tail); + } +} + +static QDF_STATUS +dp_rx_pdev_mon_buf_attach(struct dp_pdev *pdev, int mac_id) { + uint8_t pdev_id = pdev->pdev_id; + struct dp_soc *soc = pdev->soc; + union dp_rx_desc_list_elem_t *desc_list = NULL; + union dp_rx_desc_list_elem_t *tail = NULL; + struct dp_srng *rxdma_srng; + uint32_t rxdma_entries; + struct rx_desc_pool *rx_desc_pool; + QDF_STATUS status; + uint8_t mac_for_pdev = dp_get_mac_id_for_mac(soc, mac_id); + + rxdma_srng = &pdev->rxdma_mon_buf_ring[mac_for_pdev]; + + rxdma_entries = rxdma_srng->alloc_size/hal_srng_get_entrysize( + soc->hal_soc, + RXDMA_MONITOR_BUF); + + rx_desc_pool = &soc->rx_desc_mon[mac_id]; + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW, + "%s: Mon RX Desc Pool[%d] allocation size=%d" + , __func__, pdev_id, rxdma_entries*3); + + status = dp_rx_desc_pool_alloc(soc, mac_id, + rxdma_entries*3, rx_desc_pool); + if (!QDF_IS_STATUS_SUCCESS(status)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "%s: dp_rx_desc_pool_alloc() failed \n", __func__); + return status; + } + + rx_desc_pool->owner = HAL_RX_BUF_RBM_SW3_BM; + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW, + "%s: Mon RX Buffers Replenish pdev_id=%d", + __func__, pdev_id); + + + status = dp_rx_buffers_replenish(soc, mac_id, rxdma_srng, rx_desc_pool, + rxdma_entries, &desc_list, &tail); + + if (!QDF_IS_STATUS_SUCCESS(status)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "%s: dp_rx_buffers_replenish() failed", + __func__); + return status; + } + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS +dp_rx_pdev_mon_buf_detach(struct dp_pdev *pdev, int mac_id) +{ + struct dp_soc *soc = pdev->soc; + struct rx_desc_pool *rx_desc_pool; + + rx_desc_pool = &soc->rx_desc_mon[mac_id]; + if (rx_desc_pool->pool_size != 0) + dp_rx_desc_pool_free(soc, mac_id, rx_desc_pool); + + return QDF_STATUS_SUCCESS; +} + +/* + * Allocate and setup link descriptor pool that will be used by HW for + * various link and queue descriptors and managed by WBM + */ +static int dp_mon_link_desc_pool_setup(struct dp_soc *soc, uint32_t mac_id) +{ + struct dp_pdev *dp_pdev = dp_get_pdev_for_mac_id(soc, mac_id); + int mac_for_pdev = dp_get_mac_id_for_mac(soc, mac_id); + int link_desc_size = hal_get_link_desc_size(soc->hal_soc); + int link_desc_align = hal_get_link_desc_align(soc->hal_soc); + uint32_t max_alloc_size = wlan_cfg_max_alloc_size(soc->wlan_cfg_ctx); + uint32_t total_link_descs, total_mem_size; + uint32_t num_link_desc_banks; + uint32_t last_bank_size = 0; + uint32_t entry_size, num_entries; + void *mon_desc_srng; + uint32_t num_replenish_buf; + struct dp_srng *dp_srng; + int i; + + dp_srng = &dp_pdev->rxdma_mon_desc_ring[mac_for_pdev]; + + num_entries = dp_srng->alloc_size/hal_srng_get_entrysize( + soc->hal_soc, RXDMA_MONITOR_DESC); + + /* Round up to power of 2 */ + total_link_descs = 1; + while (total_link_descs < num_entries) + total_link_descs <<= 1; + + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH, + "%s: total_link_descs: %u, link_desc_size: %d\n", + __func__, total_link_descs, link_desc_size); + + total_mem_size = total_link_descs * link_desc_size; + + total_mem_size += link_desc_align; + + if (total_mem_size <= max_alloc_size) { + num_link_desc_banks = 0; + last_bank_size = total_mem_size; + } else { + num_link_desc_banks = (total_mem_size) / + (max_alloc_size - link_desc_align); + last_bank_size = total_mem_size % + (max_alloc_size - link_desc_align); + } + + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN, + "%s: total_mem_size: %d, num_link_desc_banks: %u, \ + max_alloc_size: %d last_bank_size: %d\n", + __func__, total_mem_size, num_link_desc_banks, max_alloc_size, + last_bank_size); + + for (i = 0; i < num_link_desc_banks; i++) { + dp_pdev->link_desc_banks[mac_for_pdev][i].base_vaddr_unaligned = + qdf_mem_alloc_consistent(soc->osdev, soc->osdev->dev, + max_alloc_size, + &(dp_pdev->link_desc_banks[mac_for_pdev][i]. + base_paddr_unaligned)); + + if (!dp_pdev->link_desc_banks[mac_for_pdev][i]. + base_vaddr_unaligned) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "%s: Link desc memory allocation failed\n", + __func__); + goto fail; + } + dp_pdev->link_desc_banks[mac_for_pdev][i].size = max_alloc_size; + + dp_pdev->link_desc_banks[mac_for_pdev][i].base_vaddr = + (void *)((unsigned long) + (dp_pdev->link_desc_banks[mac_for_pdev][i]. + base_vaddr_unaligned) + + ((unsigned long) + (dp_pdev->link_desc_banks[mac_for_pdev][i]. + base_vaddr_unaligned) % + link_desc_align)); + + dp_pdev->link_desc_banks[mac_for_pdev][i].base_paddr = + (unsigned long) + (dp_pdev->link_desc_banks[mac_for_pdev][i]. + base_paddr_unaligned) + + ((unsigned long) + (dp_pdev->link_desc_banks[mac_for_pdev][i].base_vaddr) - + (unsigned long) + (dp_pdev->link_desc_banks[mac_for_pdev][i]. + base_vaddr_unaligned)); + } + + if (last_bank_size) { + /* Allocate last bank in case total memory required is not exact + * multiple of max_alloc_size + */ + dp_pdev->link_desc_banks[mac_for_pdev][i].base_vaddr_unaligned = + qdf_mem_alloc_consistent(soc->osdev, + soc->osdev->dev, last_bank_size, + &(dp_pdev->link_desc_banks[mac_for_pdev][i]. + base_paddr_unaligned)); + + if (dp_pdev->link_desc_banks[mac_for_pdev][i]. + base_vaddr_unaligned == NULL) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "%s: allocation failed for mon link desc pool\n", + __func__); + goto fail; + } + dp_pdev->link_desc_banks[mac_for_pdev][i].size = last_bank_size; + + dp_pdev->link_desc_banks[mac_for_pdev][i].base_vaddr = + (void *)((unsigned long) + (dp_pdev->link_desc_banks[mac_for_pdev][i]. + base_vaddr_unaligned) + + ((unsigned long) + (dp_pdev->link_desc_banks[mac_for_pdev][i]. + base_vaddr_unaligned) % + link_desc_align)); + + dp_pdev->link_desc_banks[mac_for_pdev][i].base_paddr = + (unsigned long) + (dp_pdev->link_desc_banks[mac_for_pdev][i]. + base_paddr_unaligned) + + ((unsigned long) + (dp_pdev->link_desc_banks[mac_for_pdev][i].base_vaddr) - + (unsigned long) + (dp_pdev->link_desc_banks[mac_for_pdev][i]. + base_vaddr_unaligned)); + } + + /* Allocate and setup link descriptor idle list for HW internal use */ + entry_size = hal_srng_get_entrysize(soc->hal_soc, RXDMA_MONITOR_DESC); + total_mem_size = entry_size * total_link_descs; + + mon_desc_srng = dp_pdev->rxdma_mon_desc_ring[mac_for_pdev].hal_srng; + + num_replenish_buf = 0; + + if (total_mem_size <= max_alloc_size) { + void *desc; + + + for (i = 0; + i < MAX_MON_LINK_DESC_BANKS && + dp_pdev->link_desc_banks[mac_for_pdev][i].base_paddr; + i++) { + uint32_t num_entries = + (dp_pdev->link_desc_banks[mac_for_pdev][i].size - + (unsigned long) + (dp_pdev->link_desc_banks[mac_for_pdev][i].base_vaddr) - + (unsigned long) + (dp_pdev->link_desc_banks[mac_for_pdev][i]. + base_vaddr_unaligned)) / link_desc_size; + unsigned long paddr = + (unsigned long) + (dp_pdev->link_desc_banks[mac_for_pdev][i].base_paddr); + unsigned long vaddr = + (unsigned long) + (dp_pdev->link_desc_banks[mac_for_pdev][i].base_vaddr); + + hal_srng_access_start_unlocked(soc->hal_soc, + mon_desc_srng); + + while (num_entries && (desc = + hal_srng_src_get_next(soc->hal_soc, + mon_desc_srng))) { + + hal_set_link_desc_addr(desc, i, paddr); + num_entries--; + num_replenish_buf++; + paddr += link_desc_size; + vaddr += link_desc_size; + } + hal_srng_access_end_unlocked(soc->hal_soc, + mon_desc_srng); + } + } else { + qdf_assert(0); + } + + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN, + "%s: successfully replenished %d buffer\n", + __func__, num_replenish_buf); + + return 0; + +fail: + for (i = 0; i < MAX_MON_LINK_DESC_BANKS; i++) { + if (dp_pdev->link_desc_banks[mac_for_pdev][i]. + base_vaddr_unaligned) { + qdf_mem_free_consistent(soc->osdev, soc->osdev->dev, + dp_pdev->link_desc_banks[mac_for_pdev][i].size, + dp_pdev->link_desc_banks[mac_for_pdev][i]. + base_vaddr_unaligned, + dp_pdev->link_desc_banks[mac_for_pdev][i]. + base_paddr_unaligned, 0); + dp_pdev->link_desc_banks[mac_for_pdev][i]. + base_vaddr_unaligned = NULL; + } + } + return QDF_STATUS_E_FAILURE; +} + +/* + * Free link descriptor pool that was setup HW + */ +static void dp_mon_link_desc_pool_cleanup(struct dp_soc *soc, uint32_t mac_id) +{ + struct dp_pdev *dp_pdev = dp_get_pdev_for_mac_id(soc, mac_id); + int mac_for_pdev = dp_get_mac_id_for_mac(soc, mac_id); + int i; + + for (i = 0; i < MAX_MON_LINK_DESC_BANKS; i++) { + if (dp_pdev->link_desc_banks[mac_for_pdev][i]. + base_vaddr_unaligned) { + qdf_mem_free_consistent(soc->osdev, soc->osdev->dev, + dp_pdev->link_desc_banks[mac_for_pdev][i].size, + dp_pdev->link_desc_banks[mac_for_pdev][i]. + base_vaddr_unaligned, + dp_pdev->link_desc_banks[mac_for_pdev][i]. + base_paddr_unaligned, 0); + dp_pdev->link_desc_banks[mac_for_pdev][i]. + base_vaddr_unaligned = NULL; + } + } +} + +/** + * dp_rx_pdev_mon_attach() - attach DP RX for monitor mode + * @pdev: core txrx pdev context + * + * This function will attach a DP RX for monitor mode instance into + * the main device (SOC) context. Will allocate dp rx resource and + * initialize resources. + * + * Return: QDF_STATUS_SUCCESS: success + * QDF_STATUS_E_RESOURCES: Error return + */ + +QDF_STATUS +dp_rx_pdev_mon_attach(struct dp_pdev *pdev) { + struct dp_soc *soc = pdev->soc; + QDF_STATUS status; + uint8_t pdev_id = pdev->pdev_id; + int mac_id; + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_WARN, + "%s: pdev attach id=%d\n", __func__, pdev_id); + + for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) { + int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id); + + status = dp_rx_pdev_mon_buf_attach(pdev, mac_for_pdev); + if (!QDF_IS_STATUS_SUCCESS(status)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "%s: dp_rx_pdev_mon_buf_attach() failed\n", + __func__); + return status; + } + + status = dp_rx_pdev_mon_status_attach(pdev, mac_for_pdev); + if (!QDF_IS_STATUS_SUCCESS(status)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "%s: dp_rx_pdev_mon_status_attach() failed\n", + __func__); + return status; + } + + status = dp_mon_link_desc_pool_setup(soc, mac_for_pdev); + if (!QDF_IS_STATUS_SUCCESS(status)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "%s: dp_mon_link_desc_pool_setup() failed\n", + __func__); + return status; + } + } + qdf_spinlock_create(&pdev->mon_lock); + return QDF_STATUS_SUCCESS; +} +/** + * dp_rx_pdev_mon_detach() - detach dp rx for monitor mode + * @pdev: core txrx pdev context + * + * This function will detach DP RX for monitor mode from + * main device context. will free DP Rx resources for + * monitor mode + * + * Return: QDF_STATUS_SUCCESS: success + * QDF_STATUS_E_RESOURCES: Error return + */ +QDF_STATUS +dp_rx_pdev_mon_detach(struct dp_pdev *pdev) { + uint8_t pdev_id = pdev->pdev_id; + struct dp_soc *soc = pdev->soc; + int mac_id; + + for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) { + int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id); + + qdf_spinlock_destroy(&pdev->mon_lock); + dp_mon_link_desc_pool_cleanup(soc, mac_for_pdev); + dp_rx_pdev_mon_status_detach(pdev, mac_for_pdev); + dp_rx_pdev_mon_buf_detach(pdev, mac_for_pdev); + } + + return QDF_STATUS_SUCCESS; +} diff --git a/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_rx_mon_status.c b/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_rx_mon_status.c new file mode 100644 index 0000000000000000000000000000000000000000..6c55cb35b253b3c2443cfe929827a73506b1a356 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_rx_mon_status.c @@ -0,0 +1,856 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ +#include "dp_types.h" +#include "dp_rx.h" +#include "dp_peer.h" +#include "hal_rx.h" +#include "hal_api.h" +#include "qdf_trace.h" +#include "qdf_nbuf.h" +#include "hal_api_mon.h" +#include "dp_rx_mon.h" +#include "dp_internal.h" +#include "qdf_mem.h" /* qdf_mem_malloc,free */ + +/** +* dp_rx_populate_cdp_indication_ppdu() - Populate cdp rx indication structure +* @pdev: pdev ctx +* @ppdu_info: ppdu info structure from ppdu ring +* @ppdu_nbuf: qdf nbuf abstraction for linux skb +* +* Return: none +*/ +#ifdef FEATURE_PERPKT_INFO +static inline void +dp_rx_populate_cdp_indication_ppdu(struct dp_pdev *pdev, + struct hal_rx_ppdu_info *ppdu_info, + qdf_nbuf_t ppdu_nbuf) +{ + struct dp_peer *peer; + struct dp_soc *soc = pdev->soc; + struct dp_ast_entry *ast_entry; + struct cdp_rx_indication_ppdu *cdp_rx_ppdu; + uint32_t ast_index; + + cdp_rx_ppdu = (struct cdp_rx_indication_ppdu *)ppdu_nbuf->data; + + cdp_rx_ppdu->first_data_seq_ctrl = + ppdu_info->rx_status.first_data_seq_ctrl; + cdp_rx_ppdu->ppdu_id = ppdu_info->com_info.ppdu_id; + cdp_rx_ppdu->length = ppdu_info->rx_status.ppdu_len; + cdp_rx_ppdu->duration = ppdu_info->rx_status.duration; + cdp_rx_ppdu->u.bw = ppdu_info->rx_status.bw; + cdp_rx_ppdu->tcp_msdu_count = ppdu_info->rx_status.tcp_msdu_count; + cdp_rx_ppdu->udp_msdu_count = ppdu_info->rx_status.udp_msdu_count; + cdp_rx_ppdu->other_msdu_count = ppdu_info->rx_status.other_msdu_count; + cdp_rx_ppdu->u.nss = ppdu_info->rx_status.nss; + cdp_rx_ppdu->u.mcs = ppdu_info->rx_status.mcs; + if ((ppdu_info->rx_status.sgi == VHT_SGI_NYSM) && + (ppdu_info->rx_status.preamble_type == HAL_RX_PKT_TYPE_11AC)) + cdp_rx_ppdu->u.gi = CDP_SGI_0_4_US; + else + cdp_rx_ppdu->u.gi = ppdu_info->rx_status.sgi; + cdp_rx_ppdu->u.ldpc = ppdu_info->rx_status.ldpc; + cdp_rx_ppdu->u.preamble = ppdu_info->rx_status.preamble_type; + cdp_rx_ppdu->u.ppdu_type = ppdu_info->rx_status.reception_type; + cdp_rx_ppdu->rssi = ppdu_info->rx_status.rssi_comb; + cdp_rx_ppdu->timestamp = ppdu_info->rx_status.tsft; + cdp_rx_ppdu->channel = ppdu_info->rx_status.chan_num; + cdp_rx_ppdu->beamformed = ppdu_info->rx_status.beamformed; + cdp_rx_ppdu->num_msdu = (cdp_rx_ppdu->tcp_msdu_count + + cdp_rx_ppdu->udp_msdu_count + + cdp_rx_ppdu->other_msdu_count); + + cdp_rx_ppdu->num_mpdu = ppdu_info->com_info.mpdu_cnt_fcs_ok; + if (ppdu_info->com_info.mpdu_cnt_fcs_ok > 1) + cdp_rx_ppdu->is_ampdu = 1; + else + cdp_rx_ppdu->is_ampdu = 0; + + cdp_rx_ppdu->tid = ppdu_info->rx_status.tid; + cdp_rx_ppdu->lsig_a = ppdu_info->rx_status.rate; + + ast_index = ppdu_info->rx_status.ast_index; + if (ast_index > (WLAN_UMAC_PSOC_MAX_PEERS * 2)) { + cdp_rx_ppdu->peer_id = HTT_INVALID_PEER; + return; + } + + ast_entry = soc->ast_table[ast_index]; + if (!ast_entry) { + cdp_rx_ppdu->peer_id = HTT_INVALID_PEER; + return; + } + peer = ast_entry->peer; + if (!peer || peer->peer_ids[0] == HTT_INVALID_PEER) { + cdp_rx_ppdu->peer_id = HTT_INVALID_PEER; + return; + } + + qdf_mem_copy(cdp_rx_ppdu->mac_addr, + peer->mac_addr.raw, DP_MAC_ADDR_LEN); + cdp_rx_ppdu->peer_id = peer->peer_ids[0]; + cdp_rx_ppdu->vdev_id = peer->vdev->vdev_id; +} +#else +static inline void +dp_rx_populate_cdp_indication_ppdu(struct dp_pdev *pdev, + struct hal_rx_ppdu_info *ppdu_info, + qdf_nbuf_t ppdu_nbuf) +{ +} +#endif +/** + * dp_rx_stats_update() - Update per-peer statistics + * @soc: Datapath SOC handle + * @peer: Datapath peer handle + * @ppdu: PPDU Descriptor + * + * Return: None + */ +#ifdef FEATURE_PERPKT_INFO +static void dp_rx_stats_update(struct dp_soc *soc, struct dp_peer *peer, + struct cdp_rx_indication_ppdu *ppdu) +{ + struct dp_pdev *pdev = NULL; + uint8_t mcs, preamble, ac = 0; + uint16_t num_msdu; + + mcs = ppdu->u.mcs; + preamble = ppdu->u.preamble; + num_msdu = ppdu->num_msdu; + + if (!peer) + return; + + pdev = peer->vdev->pdev; + + dp_mark_peer_inact(peer, false); + + if (soc->process_rx_status) + return; + + DP_STATS_UPD(peer, rx.rssi, ppdu->rssi); + + if ((preamble == DOT11_A) || (preamble == DOT11_B)) + ppdu->u.nss = 1; + + if (ppdu->u.nss) + DP_STATS_INC(peer, rx.nss[ppdu->u.nss - 1], num_msdu); + + DP_STATS_INC(peer, rx.sgi_count[ppdu->u.gi], num_msdu); + DP_STATS_INC(peer, rx.bw[ppdu->u.bw], num_msdu); + DP_STATS_INC(peer, rx.reception_type[ppdu->u.ppdu_type], num_msdu); + DP_STATS_INCC(peer, rx.ampdu_cnt, num_msdu, ppdu->is_ampdu); + DP_STATS_INCC(peer, rx.non_ampdu_cnt, num_msdu, !(ppdu->is_ampdu)); + DP_STATS_UPD(peer, rx.rx_rate, mcs); + DP_STATS_INCC(peer, + rx.pkt_type[preamble].mcs_count[MAX_MCS - 1], num_msdu, + ((mcs >= MAX_MCS_11A) && (preamble == DOT11_A))); + DP_STATS_INCC(peer, + rx.pkt_type[preamble].mcs_count[mcs], num_msdu, + ((mcs < MAX_MCS_11A) && (preamble == DOT11_A))); + DP_STATS_INCC(peer, + rx.pkt_type[preamble].mcs_count[MAX_MCS - 1], num_msdu, + ((mcs >= MAX_MCS_11B) && (preamble == DOT11_B))); + DP_STATS_INCC(peer, + rx.pkt_type[preamble].mcs_count[mcs], num_msdu, + ((mcs < MAX_MCS_11B) && (preamble == DOT11_B))); + DP_STATS_INCC(peer, + rx.pkt_type[preamble].mcs_count[MAX_MCS - 1], num_msdu, + ((mcs >= MAX_MCS_11A) && (preamble == DOT11_N))); + DP_STATS_INCC(peer, + rx.pkt_type[preamble].mcs_count[mcs], num_msdu, + ((mcs < MAX_MCS_11A) && (preamble == DOT11_N))); + DP_STATS_INCC(peer, + rx.pkt_type[preamble].mcs_count[MAX_MCS - 1], num_msdu, + ((mcs >= MAX_MCS_11AC) && (preamble == DOT11_AC))); + DP_STATS_INCC(peer, + rx.pkt_type[preamble].mcs_count[mcs], num_msdu, + ((mcs < MAX_MCS_11AC) && (preamble == DOT11_AC))); + DP_STATS_INCC(peer, + rx.pkt_type[preamble].mcs_count[MAX_MCS - 1], num_msdu, + ((mcs >= (MAX_MCS - 1)) && (preamble == DOT11_AX))); + DP_STATS_INCC(peer, + rx.pkt_type[preamble].mcs_count[mcs], num_msdu, + ((mcs < (MAX_MCS - 1)) && (preamble == DOT11_AX))); + /* + * If invalid TID, it could be a non-qos frame, hence do not update + * any AC counters + */ + ac = TID_TO_WME_AC(ppdu->tid); + if (ppdu->tid != HAL_TID_INVALID) + DP_STATS_INC(peer, rx.wme_ac_type[ac], num_msdu); + + if (soc->cdp_soc.ol_ops->update_dp_stats) { + soc->cdp_soc.ol_ops->update_dp_stats(pdev->osif_pdev, + &peer->stats, ppdu->peer_id, + UPDATE_PEER_STATS); + + } +} +#endif + +/** + * dp_rx_handle_mcopy_mode() - Allocate and deliver first MSDU payload + * @soc: core txrx main context + * @pdev: pdev strcuture + * @ppdu_info: structure for rx ppdu ring + * + * Return: QDF_STATUS_SUCCESS - If nbuf to be freed by caller + * QDF_STATUS_E_ALREADY - If nbuf not to be freed by caller + */ +#ifdef FEATURE_PERPKT_INFO +static inline QDF_STATUS +dp_rx_handle_mcopy_mode(struct dp_soc *soc, struct dp_pdev *pdev, + struct hal_rx_ppdu_info *ppdu_info, qdf_nbuf_t nbuf) +{ + uint8_t size = 0; + + if (ppdu_info->msdu_info.first_msdu_payload == NULL) + return QDF_STATUS_SUCCESS; + + if (pdev->m_copy_id.rx_ppdu_id == ppdu_info->com_info.ppdu_id) + return QDF_STATUS_SUCCESS; + + pdev->m_copy_id.rx_ppdu_id = ppdu_info->com_info.ppdu_id; + + /* Include 2 bytes of reserved space appended to the msdu payload */ + size = (ppdu_info->msdu_info.first_msdu_payload - + qdf_nbuf_data(nbuf)) + 2; + ppdu_info->msdu_info.first_msdu_payload = NULL; + + if (qdf_nbuf_pull_head(nbuf, size) == NULL) + return QDF_STATUS_SUCCESS; + + /* only retain RX MSDU payload in the skb */ + qdf_nbuf_trim_tail(nbuf, qdf_nbuf_len(nbuf) - + ppdu_info->msdu_info.payload_len); + dp_wdi_event_handler(WDI_EVENT_RX_DATA, soc, + nbuf, HTT_INVALID_PEER, WDI_NO_VAL, pdev->pdev_id); + return QDF_STATUS_E_ALREADY; +} +#else +static inline QDF_STATUS +dp_rx_handle_mcopy_mode(struct dp_soc *soc, struct dp_pdev *pdev, + struct hal_rx_ppdu_info *ppdu_info, qdf_nbuf_t nbuf) +{ + return QDF_STATUS_SUCCESS; +} +#endif + + +/** +* dp_rx_handle_ppdu_stats() - Allocate and deliver ppdu stats to cdp layer +* @soc: core txrx main context +* @pdev: pdev strcuture +* @ppdu_info: structure for rx ppdu ring +* +* Return: none +*/ +#ifdef FEATURE_PERPKT_INFO +static inline void +dp_rx_handle_ppdu_stats(struct dp_soc *soc, struct dp_pdev *pdev, + struct hal_rx_ppdu_info *ppdu_info) +{ + qdf_nbuf_t ppdu_nbuf; + struct dp_peer *peer; + struct cdp_rx_indication_ppdu *cdp_rx_ppdu; + + /* + * Do not allocate if fcs error, + * ast idx invalid / fctl invalid + */ + if (ppdu_info->com_info.mpdu_cnt_fcs_ok == 0) + return; + + if (!pdev->mcopy_mode) { + if (!ppdu_info->rx_status.frame_control_info_valid) + return; + + if (ppdu_info->rx_status.ast_index == HAL_AST_IDX_INVALID) + return; + } + ppdu_nbuf = qdf_nbuf_alloc(soc->osdev, + sizeof(struct hal_rx_ppdu_info), 0, 0, FALSE); + if (ppdu_nbuf) { + dp_rx_populate_cdp_indication_ppdu(pdev, ppdu_info, ppdu_nbuf); + qdf_nbuf_put_tail(ppdu_nbuf, + sizeof(struct cdp_rx_indication_ppdu)); + cdp_rx_ppdu = (struct cdp_rx_indication_ppdu *)ppdu_nbuf->data; + + peer = dp_peer_find_by_id(soc, cdp_rx_ppdu->peer_id); + if (peer && cdp_rx_ppdu->peer_id != HTT_INVALID_PEER) { + dp_rx_stats_update(soc, peer, cdp_rx_ppdu); + dp_wdi_event_handler(WDI_EVENT_RX_PPDU_DESC, soc, + ppdu_nbuf, cdp_rx_ppdu->peer_id, + WDI_NO_VAL, pdev->pdev_id); + } else if (pdev->mcopy_mode) { + dp_wdi_event_handler(WDI_EVENT_RX_PPDU_DESC, soc, + ppdu_nbuf, HTT_INVALID_PEER, + WDI_NO_VAL, pdev->pdev_id); + } else { + qdf_nbuf_free(ppdu_nbuf); + } + } +} +#else +static inline void +dp_rx_handle_ppdu_stats(struct dp_soc *soc, struct dp_pdev *pdev, + struct hal_rx_ppdu_info *ppdu_info) +{ +} +#endif + +/** +* dp_rx_mon_status_process_tlv() - Process status TLV in status +* buffer on Rx status Queue posted by status SRNG processing. +* @soc: core txrx main context +* @mac_id: mac_id which is one of 3 mac_ids _ring +* +* Return: none +*/ +static inline void +dp_rx_mon_status_process_tlv(struct dp_soc *soc, uint32_t mac_id, + uint32_t quota) +{ + struct dp_pdev *pdev = dp_get_pdev_for_mac_id(soc, mac_id); + struct hal_rx_ppdu_info *ppdu_info; + qdf_nbuf_t status_nbuf; + uint8_t *rx_tlv; + uint8_t *rx_tlv_start; + uint32_t tlv_status = HAL_TLV_STATUS_BUF_DONE; + QDF_STATUS m_copy_status = QDF_STATUS_SUCCESS; + struct cdp_pdev_mon_stats *rx_mon_stats; + + ppdu_info = &pdev->ppdu_info; + rx_mon_stats = &pdev->rx_mon_stats; + + if (pdev->mon_ppdu_status != DP_PPDU_STATUS_START) + return; + + while (!qdf_nbuf_is_queue_empty(&pdev->rx_status_q)) { + + status_nbuf = qdf_nbuf_queue_remove(&pdev->rx_status_q); + rx_tlv = qdf_nbuf_data(status_nbuf); + rx_tlv_start = rx_tlv; + +#ifndef REMOVE_PKT_LOG +#if defined(WDI_EVENT_ENABLE) + dp_wdi_event_handler(WDI_EVENT_RX_DESC, soc, + status_nbuf, HTT_INVALID_PEER, WDI_NO_VAL, mac_id); +#endif +#endif + if ((pdev->monitor_vdev != NULL) || (pdev->enhanced_stats_en) || + pdev->mcopy_mode) { + + do { + tlv_status = hal_rx_status_get_tlv_info(rx_tlv, + ppdu_info); + + dp_rx_mon_update_dbg_ppdu_stats(ppdu_info, + rx_mon_stats); + + rx_tlv = hal_rx_status_get_next_tlv(rx_tlv); + + if ((rx_tlv - rx_tlv_start) >= RX_BUFFER_SIZE) + break; + + } while (tlv_status == HAL_TLV_STATUS_PPDU_NOT_DONE); + } + + if (pdev->mcopy_mode) { + m_copy_status = dp_rx_handle_mcopy_mode(soc, + pdev, ppdu_info, status_nbuf); + if (m_copy_status == QDF_STATUS_SUCCESS) + qdf_nbuf_free(status_nbuf); + } else { + qdf_nbuf_free(status_nbuf); + } + + if (tlv_status == HAL_TLV_STATUS_PPDU_DONE) { + rx_mon_stats->status_ppdu_done++; + if (pdev->enhanced_stats_en || + pdev->mcopy_mode) + dp_rx_handle_ppdu_stats(soc, pdev, ppdu_info); + + pdev->mon_ppdu_status = DP_PPDU_STATUS_DONE; + dp_rx_mon_dest_process(soc, mac_id, quota); + pdev->mon_ppdu_status = DP_PPDU_STATUS_START; + pdev->ppdu_info.com_info.last_ppdu_id = + pdev->ppdu_info.com_info.ppdu_id; + } + } + return; +} + +/* + * dp_rx_mon_status_srng_process() - Process monitor status ring + * post the status ring buffer to Rx status Queue for later + * processing when status ring is filled with status TLV. + * Allocate a new buffer to status ring if the filled buffer + * is posted. + * + * @soc: core txrx main context + * @mac_id: mac_id which is one of 3 mac_ids + * @quota: No. of ring entry that can be serviced in one shot. + + * Return: uint32_t: No. of ring entry that is processed. + */ +static inline uint32_t +dp_rx_mon_status_srng_process(struct dp_soc *soc, uint32_t mac_id, + uint32_t quota) +{ + struct dp_pdev *pdev = dp_get_pdev_for_mac_id(soc, mac_id); + void *hal_soc; + void *mon_status_srng; + void *rxdma_mon_status_ring_entry; + QDF_STATUS status; + uint32_t work_done = 0; + int mac_for_pdev = dp_get_mac_id_for_mac(soc, mac_id); + + mon_status_srng = pdev->rxdma_mon_status_ring[mac_for_pdev].hal_srng; + + qdf_assert(mon_status_srng); + if (!mon_status_srng || !hal_srng_initialized(mon_status_srng)) { + + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "%s %d : HAL Monitor Status Ring Init Failed -- %pK\n", + __func__, __LINE__, mon_status_srng); + return work_done; + } + + hal_soc = soc->hal_soc; + + qdf_assert(hal_soc); + + if (qdf_unlikely(hal_srng_access_start(hal_soc, mon_status_srng))) + goto done; + + /* mon_status_ring_desc => WBM_BUFFER_RING STRUCT => + * BUFFER_ADDR_INFO STRUCT + */ + while (qdf_likely((rxdma_mon_status_ring_entry = + hal_srng_src_peek(hal_soc, mon_status_srng)) + && quota--)) { + uint32_t rx_buf_cookie; + qdf_nbuf_t status_nbuf; + struct dp_rx_desc *rx_desc; + uint8_t *status_buf; + qdf_dma_addr_t paddr; + uint64_t buf_addr; + + buf_addr = + (HAL_RX_BUFFER_ADDR_31_0_GET( + rxdma_mon_status_ring_entry) | + ((uint64_t)(HAL_RX_BUFFER_ADDR_39_32_GET( + rxdma_mon_status_ring_entry)) << 32)); + + if (qdf_likely(buf_addr)) { + + rx_buf_cookie = + HAL_RX_BUF_COOKIE_GET( + rxdma_mon_status_ring_entry); + rx_desc = dp_rx_cookie_2_va_mon_status(soc, + rx_buf_cookie); + + qdf_assert(rx_desc); + + status_nbuf = rx_desc->nbuf; + + qdf_nbuf_sync_for_cpu(soc->osdev, status_nbuf, + QDF_DMA_FROM_DEVICE); + + status_buf = qdf_nbuf_data(status_nbuf); + + status = hal_get_rx_status_done(status_buf); + + if (status != QDF_STATUS_SUCCESS) { + uint32_t hp, tp; + hal_api_get_tphp(hal_soc, mon_status_srng, + &tp, &hp); + QDF_TRACE(QDF_MODULE_ID_DP, + QDF_TRACE_LEVEL_ERROR, + "[%s][%d] status not done - hp:%u, tp:%u", + __func__, __LINE__, hp, tp); + /* WAR for missing status: Skip status entry */ + hal_srng_src_get_next(hal_soc, mon_status_srng); + continue; + } + qdf_nbuf_set_pktlen(status_nbuf, RX_BUFFER_SIZE); + + qdf_nbuf_unmap_single(soc->osdev, status_nbuf, + QDF_DMA_FROM_DEVICE); + + /* Put the status_nbuf to queue */ + qdf_nbuf_queue_add(&pdev->rx_status_q, status_nbuf); + + } else { + union dp_rx_desc_list_elem_t *desc_list = NULL; + union dp_rx_desc_list_elem_t *tail = NULL; + struct rx_desc_pool *rx_desc_pool; + uint32_t num_alloc_desc; + + rx_desc_pool = &soc->rx_desc_status[mac_id]; + + num_alloc_desc = dp_rx_get_free_desc_list(soc, mac_id, + rx_desc_pool, + 1, + &desc_list, + &tail); + + rx_desc = &desc_list->rx_desc; + } + + status_nbuf = dp_rx_nbuf_prepare(soc, pdev); + + /* + * qdf_nbuf alloc or map failed, + * free the dp rx desc to free list, + * fill in NULL dma address at current HP entry, + * keep HP in mon_status_ring unchanged, + * wait next time dp_rx_mon_status_srng_process + * to fill in buffer at current HP. + */ + if (qdf_unlikely(status_nbuf == NULL)) { + union dp_rx_desc_list_elem_t *desc_list = NULL; + union dp_rx_desc_list_elem_t *tail = NULL; + struct rx_desc_pool *rx_desc_pool; + + rx_desc_pool = &soc->rx_desc_status[mac_id]; + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "%s: fail to allocate or map qdf_nbuf", + __func__); + dp_rx_add_to_free_desc_list(&desc_list, + &tail, rx_desc); + dp_rx_add_desc_list_to_free_list(soc, &desc_list, + &tail, mac_id, rx_desc_pool); + + hal_rxdma_buff_addr_info_set( + rxdma_mon_status_ring_entry, + 0, 0, HAL_RX_BUF_RBM_SW3_BM); + work_done++; + break; + } + + paddr = qdf_nbuf_get_frag_paddr(status_nbuf, 0); + + rx_desc->nbuf = status_nbuf; + rx_desc->in_use = 1; + + hal_rxdma_buff_addr_info_set(rxdma_mon_status_ring_entry, + paddr, rx_desc->cookie, HAL_RX_BUF_RBM_SW3_BM); + + hal_srng_src_get_next(hal_soc, mon_status_srng); + work_done++; + } +done: + + hal_srng_access_end(hal_soc, mon_status_srng); + + return work_done; + +} +/* + * dp_rx_mon_status_process() - Process monitor status ring and + * TLV in status ring. + * + * @soc: core txrx main context + * @mac_id: mac_id which is one of 3 mac_ids + * @quota: No. of ring entry that can be serviced in one shot. + + * Return: uint32_t: No. of ring entry that is processed. + */ +static inline uint32_t +dp_rx_mon_status_process(struct dp_soc *soc, uint32_t mac_id, uint32_t quota) { + uint32_t work_done; + + work_done = dp_rx_mon_status_srng_process(soc, mac_id, quota); + quota -= work_done; + dp_rx_mon_status_process_tlv(soc, mac_id, quota); + + return work_done; +} +/** + * dp_mon_process() - Main monitor mode processing roution. + * This call monitor status ring process then monitor + * destination ring process. + * Called from the bottom half (tasklet/NET_RX_SOFTIRQ) + * @soc: core txrx main context + * @mac_id: mac_id which is one of 3 mac_ids + * @quota: No. of status ring entry that can be serviced in one shot. + + * Return: uint32_t: No. of ring entry that is processed. + */ +uint32_t +dp_mon_process(struct dp_soc *soc, uint32_t mac_id, uint32_t quota) { + return dp_rx_mon_status_process(soc, mac_id, quota); +} + +/** + * dp_rx_pdev_mon_detach() - detach dp rx for status ring + * @pdev: core txrx pdev context + * @mac_id: mac_id/pdev_id correspondinggly for MCL and WIN + * + * This function will detach DP RX status ring from + * main device context. will free DP Rx resources for + * status ring + * + * Return: QDF_STATUS_SUCCESS: success + * QDF_STATUS_E_RESOURCES: Error return + */ +QDF_STATUS +dp_rx_pdev_mon_status_detach(struct dp_pdev *pdev, int mac_id) +{ + struct dp_soc *soc = pdev->soc; + struct rx_desc_pool *rx_desc_pool; + + rx_desc_pool = &soc->rx_desc_status[mac_id]; + if (rx_desc_pool->pool_size != 0) + dp_rx_desc_pool_free(soc, mac_id, rx_desc_pool); + + return QDF_STATUS_SUCCESS; +} + +/* + * dp_rx_buffers_replenish() - replenish monitor status ring with + * rx nbufs called during dp rx + * monitor status ring initialization + * + * @soc: core txrx main context + * @mac_id: mac_id which is one of 3 mac_ids + * @dp_rxdma_srng: dp monitor status circular ring + * @rx_desc_pool; Pointer to Rx descriptor pool + * @num_req_buffers: number of buffer to be replenished + * @desc_list: list of descs if called from dp rx monitor status + * process or NULL during dp rx initialization or + * out of buffer interrupt + * @tail: tail of descs list + * @owner: who owns the nbuf (host, NSS etc...) + * Return: return success or failure + */ +static inline +QDF_STATUS dp_rx_mon_status_buffers_replenish(struct dp_soc *dp_soc, + uint32_t mac_id, + struct dp_srng *dp_rxdma_srng, + struct rx_desc_pool *rx_desc_pool, + uint32_t num_req_buffers, + union dp_rx_desc_list_elem_t **desc_list, + union dp_rx_desc_list_elem_t **tail, + uint8_t owner) +{ + uint32_t num_alloc_desc; + uint16_t num_desc_to_free = 0; + uint32_t num_entries_avail; + uint32_t count = 0; + int sync_hw_ptr = 1; + qdf_dma_addr_t paddr; + qdf_nbuf_t rx_netbuf; + void *rxdma_ring_entry; + union dp_rx_desc_list_elem_t *next; + void *rxdma_srng; + struct dp_pdev *dp_pdev = dp_get_pdev_for_mac_id(dp_soc, mac_id); + + rxdma_srng = dp_rxdma_srng->hal_srng; + + qdf_assert(rxdma_srng); + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "[%s][%d] requested %d buffers for replenish\n", + __func__, __LINE__, num_req_buffers); + + /* + * if desc_list is NULL, allocate the descs from freelist + */ + if (!(*desc_list)) { + + num_alloc_desc = dp_rx_get_free_desc_list(dp_soc, mac_id, + rx_desc_pool, + num_req_buffers, + desc_list, + tail); + + if (!num_alloc_desc) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "[%s][%d] no free rx_descs in freelist\n", + __func__, __LINE__); + return QDF_STATUS_E_NOMEM; + } + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "[%s][%d] %d rx desc allocated\n", __func__, __LINE__, + num_alloc_desc); + + num_req_buffers = num_alloc_desc; + } + + hal_srng_access_start(dp_soc->hal_soc, rxdma_srng); + num_entries_avail = hal_srng_src_num_avail(dp_soc->hal_soc, + rxdma_srng, sync_hw_ptr); + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "[%s][%d] no of available entries in rxdma ring: %d\n", + __func__, __LINE__, num_entries_avail); + + if (num_entries_avail < num_req_buffers) { + num_desc_to_free = num_req_buffers - num_entries_avail; + num_req_buffers = num_entries_avail; + } + + while (count < num_req_buffers) { + rx_netbuf = dp_rx_nbuf_prepare(dp_soc, dp_pdev); + + /* + * qdf_nbuf alloc or map failed, + * keep HP in mon_status_ring unchanged, + * wait dp_rx_mon_status_srng_process + * to fill in buffer at current HP. + */ + if (qdf_unlikely(rx_netbuf == NULL)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "%s: qdf_nbuf allocate or map fail, count %d", + __func__, count); + break; + } + + paddr = qdf_nbuf_get_frag_paddr(rx_netbuf, 0); + + next = (*desc_list)->next; + rxdma_ring_entry = hal_srng_src_get_next(dp_soc->hal_soc, + rxdma_srng); + + if (qdf_unlikely(rxdma_ring_entry == NULL)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "[%s][%d] rxdma_ring_entry is NULL, count - %d\n", + __func__, __LINE__, count); + qdf_nbuf_unmap_single(dp_soc->osdev, rx_netbuf, + QDF_DMA_BIDIRECTIONAL); + qdf_nbuf_free(rx_netbuf); + break; + } + + (*desc_list)->rx_desc.nbuf = rx_netbuf; + (*desc_list)->rx_desc.in_use = 1; + count++; + + hal_rxdma_buff_addr_info_set(rxdma_ring_entry, paddr, + (*desc_list)->rx_desc.cookie, owner); + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "[%s][%d] rx_desc=%pK, cookie=%d, nbuf=%pK, \ + paddr=%pK\n", + __func__, __LINE__, &(*desc_list)->rx_desc, + (*desc_list)->rx_desc.cookie, rx_netbuf, + (void *)paddr); + + *desc_list = next; + } + + hal_srng_access_end(dp_soc->hal_soc, rxdma_srng); + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "successfully replenished %d buffers\n", num_req_buffers); + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "%d rx desc added back to free list\n", num_desc_to_free); + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "[%s][%d] desc_list=%pK, tail=%pK rx_desc=%pK, cookie=%d\n", + __func__, __LINE__, desc_list, tail, &(*desc_list)->rx_desc, + (*desc_list)->rx_desc.cookie); + + /* + * add any available free desc back to the free list + */ + if (*desc_list) { + dp_rx_add_desc_list_to_free_list(dp_soc, desc_list, tail, + mac_id, rx_desc_pool); + } + + return QDF_STATUS_SUCCESS; +} +/** + * dp_rx_pdev_mon_status_attach() - attach DP RX monitor status ring + * @pdev: core txrx pdev context + * + * This function will attach a DP RX monitor status ring into pDEV + * and replenish monitor status ring with buffer. + * + * Return: QDF_STATUS_SUCCESS: success + * QDF_STATUS_E_RESOURCES: Error return + */ +QDF_STATUS +dp_rx_pdev_mon_status_attach(struct dp_pdev *pdev, int ring_id) { + struct dp_soc *soc = pdev->soc; + union dp_rx_desc_list_elem_t *desc_list = NULL; + union dp_rx_desc_list_elem_t *tail = NULL; + struct dp_srng *rxdma_srng; + uint32_t rxdma_entries; + struct rx_desc_pool *rx_desc_pool; + QDF_STATUS status; + int mac_for_pdev = dp_get_mac_id_for_mac(soc, ring_id); + + rxdma_srng = &pdev->rxdma_mon_status_ring[mac_for_pdev]; + + rxdma_entries = rxdma_srng->alloc_size/hal_srng_get_entrysize( + soc->hal_soc, RXDMA_MONITOR_STATUS); + + rx_desc_pool = &soc->rx_desc_status[ring_id]; + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW, + "%s: Mon RX Status Pool[%d] allocation size=%d\n", + __func__, ring_id, rxdma_entries); + + status = dp_rx_desc_pool_alloc(soc, ring_id, rxdma_entries+1, + rx_desc_pool); + if (!QDF_IS_STATUS_SUCCESS(status)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "%s: dp_rx_desc_pool_alloc() failed \n", __func__); + return status; + } + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW, + "%s: Mon RX Status Buffers Replenish ring_id=%d\n", + __func__, ring_id); + + status = dp_rx_mon_status_buffers_replenish(soc, ring_id, rxdma_srng, + rx_desc_pool, rxdma_entries, &desc_list, &tail, + HAL_RX_BUF_RBM_SW3_BM); + if (!QDF_IS_STATUS_SUCCESS(status)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "%s: dp_rx_buffers_replenish() failed \n", __func__); + return status; + } + + qdf_nbuf_queue_init(&pdev->rx_status_q); + + pdev->mon_ppdu_status = DP_PPDU_STATUS_START; + pdev->ppdu_info.com_info.last_ppdu_id = 0; + + qdf_mem_zero(&(pdev->ppdu_info.rx_status), + sizeof(pdev->ppdu_info.rx_status)); + + qdf_mem_zero(&pdev->rx_mon_stats, + sizeof(pdev->rx_mon_stats)); + + dp_rx_mon_init_dbg_ppdu_stats(&pdev->ppdu_info, + &pdev->rx_mon_stats); + + return QDF_STATUS_SUCCESS; +} diff --git a/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_stats.c b/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_stats.c new file mode 100644 index 0000000000000000000000000000000000000000..f4fc044840c2236cfdb57552c0e6450b4e5b27fb --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_stats.c @@ -0,0 +1,3332 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ +#include "qdf_types.h" +#include "htt_stats.h" +#include "dp_types.h" +#include "dp_internal.h" + +#define DP_MAX_STRING_LEN 500 + +#define DP_HTT_HW_INTR_NAME_LEN HTT_STATS_MAX_HW_INTR_NAME_LEN +#define DP_HTT_HW_MODULE_NAME_LEN HTT_STATS_MAX_HW_MODULE_NAME_LEN +#define DP_HTT_COUNTER_NAME_LEN HTT_MAX_COUNTER_NAME +#define DP_HTT_LOW_WM_HIT_COUNT_LEN HTT_STATS_LOW_WM_BINS +#define DP_HTT_HIGH_WM_HIT_COUNT_LEN HTT_STATS_HIGH_WM_BINS +#define DP_HTT_TX_MCS_LEN HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS +#define DP_HTT_TX_SU_MCS_LEN HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS +#define DP_HTT_TX_MU_MCS_LEN HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS +#define DP_HTT_TX_NSS_LEN HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS +#define DP_HTT_TX_BW_LEN HTT_TX_PDEV_STATS_NUM_BW_COUNTERS +#define DP_HTT_TX_PREAM_LEN HTT_TX_PDEV_STATS_NUM_PREAMBLE_TYPES +#define DP_HTT_TX_PDEV_GI_LEN HTT_TX_PDEV_STATS_NUM_GI_COUNTERS +#define DP_HTT_TX_DCM_LEN HTT_TX_PDEV_STATS_NUM_DCM_COUNTERS +#define DP_HTT_RX_MCS_LEN HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS +#define DP_HTT_RX_NSS_LEN HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS +#define DP_HTT_RX_DCM_LEN HTT_RX_PDEV_STATS_NUM_DCM_COUNTERS +#define DP_HTT_RX_BW_LEN HTT_RX_PDEV_STATS_NUM_BW_COUNTERS +#define DP_HTT_RX_PREAM_LEN HTT_RX_PDEV_STATS_NUM_PREAMBLE_TYPES +#define DP_HTT_RSSI_CHAIN_LEN HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS +#define DP_HTT_RX_GI_LEN HTT_RX_PDEV_STATS_NUM_GI_COUNTERS +#define DP_HTT_FW_RING_MGMT_SUBTYPE_LEN HTT_STATS_SUBTYPE_MAX +#define DP_HTT_FW_RING_CTRL_SUBTYPE_LEN HTT_STATS_SUBTYPE_MAX +#define DP_HTT_FW_RING_MPDU_ERR_LEN HTT_RX_STATS_RXDMA_MAX_ERR +#define DP_HTT_TID_NAME_LEN MAX_HTT_TID_NAME +#define DP_HTT_PEER_NUM_SS HTT_RX_PEER_STATS_NUM_SPATIAL_STREAMS +#define DP_HTT_PDEV_TX_GI_LEN HTT_TX_PDEV_STATS_NUM_GI_COUNTERS + +/* + * dp_print_stats_string_tlv: display htt_stats_string_tlv + * @tag_buf: buffer containing the tlv htt_stats_string_tlv + * + * return:void + */ +static inline void dp_print_stats_string_tlv(uint32_t *tag_buf) +{ + htt_stats_string_tlv *dp_stats_buf = + (htt_stats_string_tlv *)tag_buf; + uint8_t i; + uint16_t index = 0; + uint32_t tag_len = (HTT_STATS_TLV_LENGTH_GET(*tag_buf) >> 2); + char *data = qdf_mem_malloc(DP_MAX_STRING_LEN); + + if (!data) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("Output buffer not allocated\n")); + return; + } + + DP_TRACE_STATS(FATAL, "HTT_STATS_STRING_TLV:"); + for (i = 0; i < tag_len; i++) { + index += qdf_snprint(&data[index], + DP_MAX_STRING_LEN - index, + " %d:%d,", i, dp_stats_buf->data[i]); + } + DP_TRACE_STATS(FATAL, "data = %s\n", data); + qdf_mem_free(data); +} + +/* + * dp_print_tx_pdev_stats_cmn_tlv: display htt_tx_pdev_stats_cmn_tlv + * @tag_buf: buffer containing the tlv htt_tx_pdev_stats_cmn_tlv + * + * return:void + */ +static inline void dp_print_tx_pdev_stats_cmn_tlv(uint32_t *tag_buf) +{ + htt_tx_pdev_stats_cmn_tlv *dp_stats_buf = + (htt_tx_pdev_stats_cmn_tlv *)tag_buf; + + DP_TRACE_STATS(FATAL, "HTT_TX_PDEV_STATS_CMN_TLV:"); + DP_TRACE_STATS(FATAL, "mac_id__word = %d", + dp_stats_buf->mac_id__word); + DP_TRACE_STATS(FATAL, "hw_queued = %d", + dp_stats_buf->hw_queued); + DP_TRACE_STATS(FATAL, "hw_reaped = %d", + dp_stats_buf->hw_reaped); + DP_TRACE_STATS(FATAL, "underrun = %d", + dp_stats_buf->underrun); + DP_TRACE_STATS(FATAL, "hw_paused = %d", + dp_stats_buf->hw_paused); + DP_TRACE_STATS(FATAL, "hw_flush = %d", + dp_stats_buf->hw_flush); + DP_TRACE_STATS(FATAL, "hw_filt = %d", + dp_stats_buf->hw_filt); + DP_TRACE_STATS(FATAL, "tx_abort = %d", + dp_stats_buf->tx_abort); + DP_TRACE_STATS(FATAL, "mpdu_requeued = %d", + dp_stats_buf->mpdu_requed); + DP_TRACE_STATS(FATAL, "tx_xretry = %d", + dp_stats_buf->tx_xretry); + DP_TRACE_STATS(FATAL, "data_rc = %d", + dp_stats_buf->data_rc); + DP_TRACE_STATS(FATAL, "mpdu_dropped_xretry = %d", + dp_stats_buf->mpdu_dropped_xretry); + DP_TRACE_STATS(FATAL, "illegal_rate_phy_err = %d", + dp_stats_buf->illgl_rate_phy_err); + DP_TRACE_STATS(FATAL, "cont_xretry = %d", + dp_stats_buf->cont_xretry); + DP_TRACE_STATS(FATAL, "tx_timeout = %d", + dp_stats_buf->tx_timeout); + DP_TRACE_STATS(FATAL, "pdev_resets = %d", + dp_stats_buf->pdev_resets); + DP_TRACE_STATS(FATAL, "phy_underrun = %d", + dp_stats_buf->phy_underrun); + DP_TRACE_STATS(FATAL, "txop_ovf = %d", + dp_stats_buf->txop_ovf); + DP_TRACE_STATS(FATAL, "seq_posted = %d", + dp_stats_buf->seq_posted); + DP_TRACE_STATS(FATAL, "seq_failed_queueing = %d", + dp_stats_buf->seq_failed_queueing); + DP_TRACE_STATS(FATAL, "seq_completed = %d", + dp_stats_buf->seq_completed); + DP_TRACE_STATS(FATAL, "seq_restarted = %d", + dp_stats_buf->seq_restarted); + DP_TRACE_STATS(FATAL, "mu_seq_posted = %d", + dp_stats_buf->mu_seq_posted); + DP_TRACE_STATS(FATAL, "seq_switch_hw_paused = %d", + dp_stats_buf->seq_switch_hw_paused); + DP_TRACE_STATS(FATAL, "next_seq_posted_dsr = %d", + dp_stats_buf->next_seq_posted_dsr); + DP_TRACE_STATS(FATAL, "seq_posted_isr = %d", + dp_stats_buf->seq_posted_isr); + DP_TRACE_STATS(FATAL, "seq_ctrl_cached = %d", + dp_stats_buf->seq_ctrl_cached); + DP_TRACE_STATS(FATAL, "mpdu_count_tqm = %d", + dp_stats_buf->mpdu_count_tqm); + DP_TRACE_STATS(FATAL, "msdu_count_tqm = %d", + dp_stats_buf->msdu_count_tqm); + DP_TRACE_STATS(FATAL, "mpdu_removed_tqm = %d", + dp_stats_buf->mpdu_removed_tqm); + DP_TRACE_STATS(FATAL, "msdu_removed_tqm = %d", + dp_stats_buf->msdu_removed_tqm); + DP_TRACE_STATS(FATAL, "mpdus_sw_flush = %d", + dp_stats_buf->mpdus_sw_flush); + DP_TRACE_STATS(FATAL, "mpdus_hw_filter = %d", + dp_stats_buf->mpdus_hw_filter); + DP_TRACE_STATS(FATAL, "mpdus_truncated = %d", + dp_stats_buf->mpdus_truncated); + DP_TRACE_STATS(FATAL, "mpdus_ack_failed = %d", + dp_stats_buf->mpdus_ack_failed); + DP_TRACE_STATS(FATAL, "mpdus_expired = %d", + dp_stats_buf->mpdus_expired); + DP_TRACE_STATS(FATAL, "mpdus_seq_hw_retry = %d", + dp_stats_buf->mpdus_seq_hw_retry); + DP_TRACE_STATS(FATAL, "ack_tlv_proc = %d", + dp_stats_buf->ack_tlv_proc); + DP_TRACE_STATS(FATAL, "coex_abort_mpdu_cnt_valid = %d", + dp_stats_buf->coex_abort_mpdu_cnt_valid); + DP_TRACE_STATS(FATAL, "coex_abort_mpdu_cnt = %d\n", + dp_stats_buf->coex_abort_mpdu_cnt); +} + +/* + * dp_print_tx_pdev_stats_urrn_tlv_v: display htt_tx_pdev_stats_urrn_tlv_v + * @tag_buf: buffer containing the tlv htt_tx_pdev_stats_urrn_tlv_v + * + * return:void + */ +static inline void dp_print_tx_pdev_stats_urrn_tlv_v(uint32_t *tag_buf) +{ + htt_tx_pdev_stats_urrn_tlv_v *dp_stats_buf = + (htt_tx_pdev_stats_urrn_tlv_v *)tag_buf; + uint8_t i; + uint16_t index = 0; + uint32_t tag_len = (HTT_STATS_TLV_LENGTH_GET(*tag_buf) >> 2); + char *urrn_stats = qdf_mem_malloc(DP_MAX_STRING_LEN); + + if (!urrn_stats) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("Output buffer not allocated\n")); + return; + } + + tag_len = qdf_min(tag_len, (uint32_t)HTT_TX_PDEV_MAX_URRN_STATS); + DP_TRACE_STATS(FATAL, "HTT_TX_PDEV_STATS_URRN_TLV_V:"); + for (i = 0; i < tag_len; i++) { + index += qdf_snprint(&urrn_stats[index], + DP_MAX_STRING_LEN - index, + " %d:%d,", i, dp_stats_buf->urrn_stats[i]); + } + DP_TRACE_STATS(FATAL, "urrn_stats = %s\n", urrn_stats); + qdf_mem_free(urrn_stats); +} + +/* + * dp_print_tx_pdev_stats_flush_tlv_v: display htt_tx_pdev_stats_flush_tlv_v + * @tag_buf: buffer containing the tlv htt_tx_pdev_stats_flush_tlv_v + * + * return:void + */ +static inline void dp_print_tx_pdev_stats_flush_tlv_v(uint32_t *tag_buf) +{ + htt_tx_pdev_stats_flush_tlv_v *dp_stats_buf = + (htt_tx_pdev_stats_flush_tlv_v *)tag_buf; + uint8_t i; + uint16_t index = 0; + uint32_t tag_len = (HTT_STATS_TLV_LENGTH_GET(*tag_buf) >> 2); + char *flush_errs = qdf_mem_malloc(DP_MAX_STRING_LEN); + + if (!flush_errs) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("Output buffer not allocated\n")); + return; + } + + tag_len = qdf_min(tag_len, + (uint32_t)HTT_TX_PDEV_MAX_FLUSH_REASON_STATS); + + DP_TRACE_STATS(FATAL, "HTT_TX_PDEV_STATS_FLUSH_TLV_V:"); + for (i = 0; i < tag_len; i++) { + index += qdf_snprint(&flush_errs[index], + DP_MAX_STRING_LEN - index, + " %d:%d,", i, dp_stats_buf->flush_errs[i]); + } + DP_TRACE_STATS(FATAL, "flush_errs = %s\n", flush_errs); + qdf_mem_free(flush_errs); +} + +/* + * dp_print_tx_pdev_stats_sifs_tlv_v: display htt_tx_pdev_stats_sifs_tlv_v + * @tag_buf: buffer containing the tlv htt_tx_pdev_stats_sifs_tlv_v + * + * return:void + */ +static inline void dp_print_tx_pdev_stats_sifs_tlv_v(uint32_t *tag_buf) +{ + htt_tx_pdev_stats_sifs_tlv_v *dp_stats_buf = + (htt_tx_pdev_stats_sifs_tlv_v *)tag_buf; + uint8_t i; + uint16_t index = 0; + uint32_t tag_len = (HTT_STATS_TLV_LENGTH_GET(*tag_buf) >> 2); + char *sifs_status = qdf_mem_malloc(DP_MAX_STRING_LEN); + + if (!sifs_status) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("Output buffer not allocated\n")); + return; + } + + tag_len = qdf_min(tag_len, (uint32_t)HTT_TX_PDEV_MAX_SIFS_BURST_STATS); + + DP_TRACE_STATS(FATAL, "HTT_TX_PDEV_STATS_SIFS_TLV_V:"); + for (i = 0; i < tag_len; i++) { + index += qdf_snprint(&sifs_status[index], + DP_MAX_STRING_LEN - index, + " %d:%d,", i, dp_stats_buf->sifs_status[i]); + } + DP_TRACE_STATS(FATAL, "sifs_status = %s\n", sifs_status); + qdf_mem_free(sifs_status); +} + +/* + * dp_print_tx_pdev_stats_phy_err_tlv_v: display htt_tx_pdev_stats_phy_err_tlv_v + * @tag_buf: buffer containing the tlv htt_tx_pdev_stats_phy_err_tlv_v + * + * return:void + */ +static inline void dp_print_tx_pdev_stats_phy_err_tlv_v(uint32_t *tag_buf) +{ + htt_tx_pdev_stats_phy_err_tlv_v *dp_stats_buf = + (htt_tx_pdev_stats_phy_err_tlv_v *)tag_buf; + uint8_t i; + uint16_t index = 0; + uint32_t tag_len = (HTT_STATS_TLV_LENGTH_GET(*tag_buf) >> 2); + char *phy_errs = qdf_mem_malloc(DP_MAX_STRING_LEN); + + if (!phy_errs) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("Output buffer not allocated\n")); + return; + } + + tag_len = qdf_min(tag_len, (uint32_t)HTT_TX_PDEV_MAX_PHY_ERR_STATS); + + DP_TRACE_STATS(FATAL, "HTT_TX_PDEV_STATS_PHY_ERR_TLV_V:"); + for (i = 0; i < tag_len; i++) { + index += qdf_snprint(&phy_errs[index], + DP_MAX_STRING_LEN - index, + " %d:%d,", i, dp_stats_buf->phy_errs[i]); + } + DP_TRACE_STATS(FATAL, "phy_errs = %s\n", phy_errs); + qdf_mem_free(phy_errs); +} + +/* + * dp_print_hw_stats_intr_misc_tlv: display htt_hw_stats_intr_misc_tlv + * @tag_buf: buffer containing the tlv htt_hw_stats_intr_misc_tlv + * + * return:void + */ +static inline void dp_print_hw_stats_intr_misc_tlv(uint32_t *tag_buf) +{ + htt_hw_stats_intr_misc_tlv *dp_stats_buf = + (htt_hw_stats_intr_misc_tlv *)tag_buf; + uint8_t i; + uint16_t index = 0; + char *hw_intr_name = qdf_mem_malloc(DP_MAX_STRING_LEN); + + if (!hw_intr_name) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("Output buffer not allocated\n")); + return; + } + + DP_TRACE_STATS(FATAL, "HTT_HW_STATS_INTR_MISC_TLV:"); + for (i = 0; i < DP_HTT_HW_INTR_NAME_LEN; i++) { + index += qdf_snprint(&hw_intr_name[index], + DP_MAX_STRING_LEN - index, + " %d:%d,", i, dp_stats_buf->hw_intr_name[i]); + } + DP_TRACE_STATS(FATAL, "hw_intr_name = %s ", hw_intr_name); + DP_TRACE_STATS(FATAL, "mask = %d", + dp_stats_buf->mask); + DP_TRACE_STATS(FATAL, "count = %d\n", + dp_stats_buf->count); + qdf_mem_free(hw_intr_name); +} + +/* + * dp_print_hw_stats_wd_timeout_tlv: display htt_hw_stats_wd_timeout_tlv + * @tag_buf: buffer containing the tlv htt_hw_stats_wd_timeout_tlv + * + * return:void + */ +static inline void dp_print_hw_stats_wd_timeout_tlv(uint32_t *tag_buf) +{ + htt_hw_stats_wd_timeout_tlv *dp_stats_buf = + (htt_hw_stats_wd_timeout_tlv *)tag_buf; + uint8_t i; + uint16_t index = 0; + char *hw_module_name = qdf_mem_malloc(DP_MAX_STRING_LEN); + + if (!hw_module_name) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("Output buffer not allocated\n")); + return; + } + + DP_TRACE_STATS(FATAL, "HTT_HW_STATS_WD_TIMEOUT_TLV:"); + for (i = 0; i < DP_HTT_HW_MODULE_NAME_LEN; i++) { + index += qdf_snprint(&hw_module_name[index], + DP_MAX_STRING_LEN - index, + " %d:%d,", i, dp_stats_buf->hw_module_name[i]); + } + DP_TRACE_STATS(FATAL, "hw_module_name = %s ", hw_module_name); + DP_TRACE_STATS(FATAL, "count = %d", + dp_stats_buf->count); + qdf_mem_free(hw_module_name); +} + +/* + * dp_print_hw_stats_pdev_errs_tlv: display htt_hw_stats_pdev_errs_tlv + * @tag_buf: buffer containing the tlv htt_hw_stats_pdev_errs_tlv + * + * return:void + */ +static inline void dp_print_hw_stats_pdev_errs_tlv(uint32_t *tag_buf) +{ + htt_hw_stats_pdev_errs_tlv *dp_stats_buf = + (htt_hw_stats_pdev_errs_tlv *)tag_buf; + + DP_TRACE_STATS(FATAL, "HTT_HW_STATS_PDEV_ERRS_TLV:"); + DP_TRACE_STATS(FATAL, "mac_id__word = %d", + dp_stats_buf->mac_id__word); + DP_TRACE_STATS(FATAL, "tx_abort = %d", + dp_stats_buf->tx_abort); + DP_TRACE_STATS(FATAL, "tx_abort_fail_count = %d", + dp_stats_buf->tx_abort_fail_count); + DP_TRACE_STATS(FATAL, "rx_abort = %d", + dp_stats_buf->rx_abort); + DP_TRACE_STATS(FATAL, "rx_abort_fail_count = %d", + dp_stats_buf->rx_abort_fail_count); + DP_TRACE_STATS(FATAL, "warm_reset = %d", + dp_stats_buf->warm_reset); + DP_TRACE_STATS(FATAL, "cold_reset = %d", + dp_stats_buf->cold_reset); + DP_TRACE_STATS(FATAL, "tx_flush = %d", + dp_stats_buf->tx_flush); + DP_TRACE_STATS(FATAL, "tx_glb_reset = %d", + dp_stats_buf->tx_glb_reset); + DP_TRACE_STATS(FATAL, "tx_txq_reset = %d", + dp_stats_buf->tx_txq_reset); + DP_TRACE_STATS(FATAL, "rx_timeout_reset = %d\n", + dp_stats_buf->rx_timeout_reset); +} + +/* + * dp_print_msdu_flow_stats_tlv: display htt_msdu_flow_stats_tlv + * @tag_buf: buffer containing the tlv htt_msdu_flow_stats_tlv + * + * return:void + */ +static inline void dp_print_msdu_flow_stats_tlv(uint32_t *tag_buf) +{ + htt_msdu_flow_stats_tlv *dp_stats_buf = + (htt_msdu_flow_stats_tlv *)tag_buf; + + DP_TRACE_STATS(FATAL, "HTT_MSDU_FLOW_STATS_TLV:"); + DP_TRACE_STATS(FATAL, "last_update_timestamp = %d", + dp_stats_buf->last_update_timestamp); + DP_TRACE_STATS(FATAL, "last_add_timestamp = %d", + dp_stats_buf->last_add_timestamp); + DP_TRACE_STATS(FATAL, "last_remove_timestamp = %d", + dp_stats_buf->last_remove_timestamp); + DP_TRACE_STATS(FATAL, "total_processed_msdu_count = %d", + dp_stats_buf->total_processed_msdu_count); + DP_TRACE_STATS(FATAL, "cur_msdu_count_in_flowq = %d", + dp_stats_buf->cur_msdu_count_in_flowq); + DP_TRACE_STATS(FATAL, "sw_peer_id = %d", + dp_stats_buf->sw_peer_id); + DP_TRACE_STATS(FATAL, "tx_flow_no__tid_num__drop_rule = %d\n", + dp_stats_buf->tx_flow_no__tid_num__drop_rule); +} + +/* + * dp_print_tx_tid_stats_tlv: display htt_tx_tid_stats_tlv + * @tag_buf: buffer containing the tlv htt_tx_tid_stats_tlv + * + * return:void + */ +static inline void dp_print_tx_tid_stats_tlv(uint32_t *tag_buf) +{ + htt_tx_tid_stats_tlv *dp_stats_buf = + (htt_tx_tid_stats_tlv *)tag_buf; + uint8_t i; + uint16_t index = 0; + char *tid_name = qdf_mem_malloc(DP_MAX_STRING_LEN); + + if (!tid_name) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("Output buffer not allocated\n")); + return; + } + + DP_TRACE_STATS(FATAL, "HTT_TX_TID_STATS_TLV:"); + for (i = 0; i < DP_HTT_TID_NAME_LEN; i++) { + index += qdf_snprint(&tid_name[index], + DP_MAX_STRING_LEN - index, + " %d:%d,", i, dp_stats_buf->tid_name[i]); + } + DP_TRACE_STATS(FATAL, "tid_name = %s ", tid_name); + DP_TRACE_STATS(FATAL, "sw_peer_id__tid_num = %d", + dp_stats_buf->sw_peer_id__tid_num); + DP_TRACE_STATS(FATAL, "num_sched_pending__num_ppdu_in_hwq = %d", + dp_stats_buf->num_sched_pending__num_ppdu_in_hwq); + DP_TRACE_STATS(FATAL, "tid_flags = %d", + dp_stats_buf->tid_flags); + DP_TRACE_STATS(FATAL, "hw_queued = %d", + dp_stats_buf->hw_queued); + DP_TRACE_STATS(FATAL, "hw_reaped = %d", + dp_stats_buf->hw_reaped); + DP_TRACE_STATS(FATAL, "mpdus_hw_filter = %d", + dp_stats_buf->mpdus_hw_filter); + DP_TRACE_STATS(FATAL, "qdepth_bytes = %d", + dp_stats_buf->qdepth_bytes); + DP_TRACE_STATS(FATAL, "qdepth_num_msdu = %d", + dp_stats_buf->qdepth_num_msdu); + DP_TRACE_STATS(FATAL, "qdepth_num_mpdu = %d", + dp_stats_buf->qdepth_num_mpdu); + DP_TRACE_STATS(FATAL, "last_scheduled_tsmp = %d", + dp_stats_buf->last_scheduled_tsmp); + DP_TRACE_STATS(FATAL, "pause_module_id = %d", + dp_stats_buf->pause_module_id); + DP_TRACE_STATS(FATAL, "block_module_id = %d\n", + dp_stats_buf->block_module_id); + DP_TRACE_STATS(FATAL, "tid_tx_airtime = %d\n", + dp_stats_buf->tid_tx_airtime); + qdf_mem_free(tid_name); +} + +#ifdef CONFIG_WIN +/* + * dp_print_tx_tid_stats_v1_tlv: display htt_tx_tid_stats_v1_tlv + * @tag_buf: buffer containing the tlv htt_tx_tid_stats_v1_tlv + * + * return:void + */ +static inline void dp_print_tx_tid_stats_v1_tlv(uint32_t *tag_buf) +{ + htt_tx_tid_stats_v1_tlv *dp_stats_buf = + (htt_tx_tid_stats_v1_tlv *)tag_buf; + uint8_t i; + uint16_t index = 0; + char *tid_name = qdf_mem_malloc(DP_MAX_STRING_LEN); + + if (!tid_name) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("Output buffer not allocated\n")); + return; + } + + DP_TRACE_STATS(FATAL, "HTT_TX_TID_STATS_V1_TLV:"); + for (i = 0; i < DP_HTT_TID_NAME_LEN; i++) { + index += qdf_snprint(&tid_name[index], + DP_MAX_STRING_LEN - index, + " %d:%d,", i, dp_stats_buf->tid_name[i]); + } + DP_TRACE_STATS(FATAL, "tid_name = %s ", tid_name); + DP_TRACE_STATS(FATAL, "sw_peer_id__tid_num = %d", + dp_stats_buf->sw_peer_id__tid_num); + DP_TRACE_STATS(FATAL, "num_sched_pending__num_ppdu_in_hwq = %d", + dp_stats_buf->num_sched_pending__num_ppdu_in_hwq); + DP_TRACE_STATS(FATAL, "tid_flags = %d", + dp_stats_buf->tid_flags); + DP_TRACE_STATS(FATAL, "max_qdepth_bytes = %d", + dp_stats_buf->max_qdepth_bytes); + DP_TRACE_STATS(FATAL, "max_qdepth_n_msdus = %d", + dp_stats_buf->max_qdepth_n_msdus); + DP_TRACE_STATS(FATAL, "rsvd = %d", + dp_stats_buf->rsvd); + DP_TRACE_STATS(FATAL, "qdepth_bytes = %d", + dp_stats_buf->qdepth_bytes); + DP_TRACE_STATS(FATAL, "qdepth_num_msdu = %d", + dp_stats_buf->qdepth_num_msdu); + DP_TRACE_STATS(FATAL, "qdepth_num_mpdu = %d", + dp_stats_buf->qdepth_num_mpdu); + DP_TRACE_STATS(FATAL, "last_scheduled_tsmp = %d", + dp_stats_buf->last_scheduled_tsmp); + DP_TRACE_STATS(FATAL, "pause_module_id = %d", + dp_stats_buf->pause_module_id); + DP_TRACE_STATS(FATAL, "block_module_id = %d\n", + dp_stats_buf->block_module_id); + DP_TRACE_STATS(FATAL, "tid_tx_airtime = %d\n", + dp_stats_buf->tid_tx_airtime); + qdf_mem_free(tid_name); +} +#endif + +/* + * dp_print_rx_tid_stats_tlv: display htt_rx_tid_stats_tlv + * @tag_buf: buffer containing the tlv htt_rx_tid_stats_tlv + * + * return:void + */ +static inline void dp_print_rx_tid_stats_tlv(uint32_t *tag_buf) +{ + htt_rx_tid_stats_tlv *dp_stats_buf = + (htt_rx_tid_stats_tlv *)tag_buf; + uint8_t i; + uint16_t index = 0; + char *tid_name = qdf_mem_malloc(DP_MAX_STRING_LEN); + + if (!tid_name) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("Output buffer not allocated\n")); + return; + } + + DP_TRACE_STATS(FATAL, "HTT_RX_TID_STATS_TLV:"); + DP_TRACE_STATS(FATAL, "sw_peer_id__tid_num = %d", + dp_stats_buf->sw_peer_id__tid_num); + for (i = 0; i < DP_HTT_TID_NAME_LEN; i++) { + index += qdf_snprint(&tid_name[index], + DP_MAX_STRING_LEN - index, + " %d:%d,", i, dp_stats_buf->tid_name[i]); + } + DP_TRACE_STATS(FATAL, "tid_name = %s ", tid_name); + DP_TRACE_STATS(FATAL, "dup_in_reorder = %d", + dp_stats_buf->dup_in_reorder); + DP_TRACE_STATS(FATAL, "dup_past_outside_window = %d", + dp_stats_buf->dup_past_outside_window); + DP_TRACE_STATS(FATAL, "dup_past_within_window = %d", + dp_stats_buf->dup_past_within_window); + DP_TRACE_STATS(FATAL, "rxdesc_err_decrypt = %d\n", + dp_stats_buf->rxdesc_err_decrypt); + qdf_mem_free(tid_name); +} + +/* + * dp_print_counter_tlv: display htt_counter_tlv + * @tag_buf: buffer containing the tlv htt_counter_tlv + * + * return:void + */ +static inline void dp_print_counter_tlv(uint32_t *tag_buf) +{ + htt_counter_tlv *dp_stats_buf = + (htt_counter_tlv *)tag_buf; + uint8_t i; + uint16_t index = 0; + char *counter_name = qdf_mem_malloc(DP_MAX_STRING_LEN); + + if (!counter_name) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("Output buffer not allocated\n")); + return; + } + + DP_TRACE_STATS(FATAL, "HTT_COUNTER_TLV:"); + for (i = 0; i < DP_HTT_COUNTER_NAME_LEN; i++) { + index += qdf_snprint(&counter_name[index], + DP_MAX_STRING_LEN - index, + " %d:%d,", i, dp_stats_buf->counter_name[i]); + } + DP_TRACE_STATS(FATAL, "counter_name = %s ", counter_name); + DP_TRACE_STATS(FATAL, "count = %d\n", + dp_stats_buf->count); + qdf_mem_free(counter_name); +} + +/* + * dp_print_peer_stats_cmn_tlv: display htt_peer_stats_cmn_tlv + * @tag_buf: buffer containing the tlv htt_peer_stats_cmn_tlv + * + * return:void + */ +static inline void dp_print_peer_stats_cmn_tlv(uint32_t *tag_buf) +{ + htt_peer_stats_cmn_tlv *dp_stats_buf = + (htt_peer_stats_cmn_tlv *)tag_buf; + + DP_TRACE_STATS(FATAL, "HTT_PEER_STATS_CMN_TLV:"); + DP_TRACE_STATS(FATAL, "ppdu_cnt = %d", + dp_stats_buf->ppdu_cnt); + DP_TRACE_STATS(FATAL, "mpdu_cnt = %d", + dp_stats_buf->mpdu_cnt); + DP_TRACE_STATS(FATAL, "msdu_cnt = %d", + dp_stats_buf->msdu_cnt); + DP_TRACE_STATS(FATAL, "pause_bitmap = %d", + dp_stats_buf->pause_bitmap); + DP_TRACE_STATS(FATAL, "block_bitmap = %d", + dp_stats_buf->block_bitmap); + DP_TRACE_STATS(FATAL, "current_timestamp = %d\n", + dp_stats_buf->current_timestamp); +} + +/* + * dp_print_peer_details_tlv: display htt_peer_details_tlv + * @tag_buf: buffer containing the tlv htt_peer_details_tlv + * + * return:void + */ +static inline void dp_print_peer_details_tlv(uint32_t *tag_buf) +{ + htt_peer_details_tlv *dp_stats_buf = + (htt_peer_details_tlv *)tag_buf; + + DP_TRACE_STATS(FATAL, "HTT_PEER_DETAILS_TLV:"); + DP_TRACE_STATS(FATAL, "peer_type = %d", + dp_stats_buf->peer_type); + DP_TRACE_STATS(FATAL, "sw_peer_id = %d", + dp_stats_buf->sw_peer_id); + DP_TRACE_STATS(FATAL, "vdev_pdev_ast_idx = %d", + dp_stats_buf->vdev_pdev_ast_idx); + DP_TRACE_STATS(FATAL, "mac_addr(upper 4 bytes) = %d", + dp_stats_buf->mac_addr.mac_addr31to0); + DP_TRACE_STATS(FATAL, "mac_addr(lower 2 bytes) = %d", + dp_stats_buf->mac_addr.mac_addr47to32); + DP_TRACE_STATS(FATAL, "peer_flags = %d", + dp_stats_buf->peer_flags); + DP_TRACE_STATS(FATAL, "qpeer_flags = %d\n", + dp_stats_buf->qpeer_flags); +} + +/* + * dp_print_tx_peer_rate_stats_tlv: display htt_tx_peer_rate_stats_tlv + * @tag_buf: buffer containing the tlv htt_tx_peer_rate_stats_tlv + * + * return:void + */ +static inline void dp_print_tx_peer_rate_stats_tlv(uint32_t *tag_buf) +{ + htt_tx_peer_rate_stats_tlv *dp_stats_buf = + (htt_tx_peer_rate_stats_tlv *)tag_buf; + uint8_t i, j; + uint16_t index = 0; + char *tx_gi[HTT_TX_PEER_STATS_NUM_GI_COUNTERS]; + char *str_buf = qdf_mem_malloc(DP_MAX_STRING_LEN); + + if (!str_buf) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("Output buffer not allocated\n")); + return; + } + + for (i = 0; i < HTT_TX_PEER_STATS_NUM_GI_COUNTERS; i++) + tx_gi[i] = (char *)qdf_mem_malloc(DP_MAX_STRING_LEN); + + DP_TRACE_STATS(FATAL, "HTT_TX_PEER_RATE_STATS_TLV:"); + DP_TRACE_STATS(FATAL, "tx_ldpc = %d", + dp_stats_buf->tx_ldpc); + DP_TRACE_STATS(FATAL, "rts_cnt = %d", + dp_stats_buf->rts_cnt); + DP_TRACE_STATS(FATAL, "ack_rssi = %d", + dp_stats_buf->ack_rssi); + + index = 0; + qdf_mem_set(str_buf, DP_MAX_STRING_LEN, 0x0); + for (i = 0; i < DP_HTT_TX_MCS_LEN; i++) { + index += qdf_snprint(&str_buf[index], + DP_MAX_STRING_LEN - index, + " %d:%d,", i, dp_stats_buf->tx_mcs[i]); + } + DP_TRACE_STATS(FATAL, "tx_mcs = %s ", str_buf); + + index = 0; + qdf_mem_set(str_buf, DP_MAX_STRING_LEN, 0x0); + for (i = 0; i < DP_HTT_TX_SU_MCS_LEN; i++) { + index += qdf_snprint(&str_buf[index], + DP_MAX_STRING_LEN - index, + " %d:%d,", i, dp_stats_buf->tx_su_mcs[i]); + } + DP_TRACE_STATS(FATAL, "tx_su_mcs = %s ", str_buf); + index = 0; + qdf_mem_set(str_buf, DP_MAX_STRING_LEN, 0x0); + for (i = 0; i < DP_HTT_TX_MU_MCS_LEN; i++) { + index += qdf_snprint(&str_buf[index], + DP_MAX_STRING_LEN - index, + " %d:%d,", i, dp_stats_buf->tx_mu_mcs[i]); + } + DP_TRACE_STATS(FATAL, "tx_mu_mcs = %s ", str_buf); + index = 0; + qdf_mem_set(str_buf, DP_MAX_STRING_LEN, 0x0); + for (i = 0; i < DP_HTT_TX_NSS_LEN; i++) { + /* 0 stands for NSS 1, 1 stands for NSS 2, etc. */ + index += qdf_snprint(&str_buf[index], + DP_MAX_STRING_LEN - index, + " %d:%d,", (i + 1), + dp_stats_buf->tx_nss[i]); + } + DP_TRACE_STATS(FATAL, "tx_nss = %s ", str_buf); + index = 0; + qdf_mem_set(str_buf, DP_MAX_STRING_LEN, 0x0); + for (i = 0; i < DP_HTT_TX_BW_LEN; i++) { + index += qdf_snprint(&str_buf[index], + DP_MAX_STRING_LEN - index, + " %d:%d,", i, dp_stats_buf->tx_bw[i]); + } + DP_TRACE_STATS(FATAL, "tx_bw = %s ", str_buf); + qdf_mem_set(str_buf, DP_MAX_STRING_LEN, 0x0); + for (i = 0; i < HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS; i++) { + index += qdf_snprint(&str_buf[index], + DP_MAX_STRING_LEN - index, + " %d:%d,", i, dp_stats_buf->tx_stbc[i]); + } + DP_TRACE_STATS(FATAL, "tx_stbc = %s ", str_buf); + index = 0; + qdf_mem_set(str_buf, DP_MAX_STRING_LEN, 0x0); + + for (i = 0; i < DP_HTT_TX_PREAM_LEN; i++) { + index += qdf_snprint(&str_buf[index], + DP_MAX_STRING_LEN - index, + " %d:%d,", i, dp_stats_buf->tx_pream[i]); + } + DP_TRACE_STATS(FATAL, "tx_pream = %s ", str_buf); + + for (j = 0; j < HTT_TX_PEER_STATS_NUM_GI_COUNTERS; j++) { + index = 0; + for (i = 0; i < HTT_TX_PEER_STATS_NUM_MCS_COUNTERS; i++) { + index += qdf_snprint(&tx_gi[j][index], + DP_MAX_STRING_LEN - index, + " %d:%d,", i, + dp_stats_buf->tx_gi[j][i]); + } + DP_TRACE_STATS(FATAL, "tx_gi[%d] = %s ", j, tx_gi[j]); + } + + index = 0; + qdf_mem_set(str_buf, DP_MAX_STRING_LEN, 0x0); + for (i = 0; i < DP_HTT_TX_DCM_LEN; i++) { + index += qdf_snprint(&str_buf[index], + DP_MAX_STRING_LEN - index, + " %d:%d,", i, dp_stats_buf->tx_dcm[i]); + } + DP_TRACE_STATS(FATAL, "tx_dcm = %s\n", str_buf); + for (i = 0; i < HTT_TX_PEER_STATS_NUM_GI_COUNTERS; i++) + qdf_mem_free(tx_gi[i]); + + qdf_mem_free(str_buf); +} + +/* + * dp_print_rx_peer_rate_stats_tlv: display htt_rx_peer_rate_stats_tlv + * @tag_buf: buffer containing the tlv htt_rx_peer_rate_stats_tlv + * + * return:void + */ +static inline void dp_print_rx_peer_rate_stats_tlv(uint32_t *tag_buf) +{ + htt_rx_peer_rate_stats_tlv *dp_stats_buf = + (htt_rx_peer_rate_stats_tlv *)tag_buf; + uint8_t i, j; + uint16_t index = 0; + char *rssi_chain[DP_HTT_PEER_NUM_SS]; + char *rx_gi[HTT_RX_PEER_STATS_NUM_GI_COUNTERS]; + char *str_buf = qdf_mem_malloc(DP_MAX_STRING_LEN); + + if (!str_buf) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("Output buffer not allocated\n")); + return; + } + + for (i = 0; i < DP_HTT_PEER_NUM_SS; i++) + rssi_chain[i] = qdf_mem_malloc(DP_MAX_STRING_LEN); + for (i = 0; i < HTT_RX_PEER_STATS_NUM_GI_COUNTERS; i++) + rx_gi[i] = qdf_mem_malloc(DP_MAX_STRING_LEN); + + DP_TRACE_STATS(FATAL, "HTT_RX_PEER_RATE_STATS_TLV:"); + DP_TRACE_STATS(FATAL, "nsts = %d", + dp_stats_buf->nsts); + DP_TRACE_STATS(FATAL, "rx_ldpc = %d", + dp_stats_buf->rx_ldpc); + DP_TRACE_STATS(FATAL, "rts_cnt = %d", + dp_stats_buf->rts_cnt); + DP_TRACE_STATS(FATAL, "rssi_mgmt = %d", + dp_stats_buf->rssi_mgmt); + DP_TRACE_STATS(FATAL, "rssi_data = %d", + dp_stats_buf->rssi_data); + DP_TRACE_STATS(FATAL, "rssi_comb = %d", + dp_stats_buf->rssi_comb); + + qdf_mem_set(str_buf, DP_MAX_STRING_LEN, 0x0); + for (i = 0; i < DP_HTT_RX_MCS_LEN; i++) { + index += qdf_snprint(&str_buf[index], + DP_MAX_STRING_LEN - index, + " %d:%d,", i, dp_stats_buf->rx_mcs[i]); + } + DP_TRACE_STATS(FATAL, "rx_mcs = %s ", str_buf); + + index = 0; + qdf_mem_set(str_buf, DP_MAX_STRING_LEN, 0x0); + for (i = 0; i < DP_HTT_RX_NSS_LEN; i++) { + /* 0 stands for NSS 1, 1 stands for NSS 2, etc. */ + index += qdf_snprint(&str_buf[index], + DP_MAX_STRING_LEN - index, + " %d:%d,", (i + 1), + dp_stats_buf->rx_nss[i]); + } + DP_TRACE_STATS(FATAL, "rx_nss = %s ", str_buf); + + index = 0; + qdf_mem_set(str_buf, DP_MAX_STRING_LEN, 0x0); + for (i = 0; i < DP_HTT_RX_DCM_LEN; i++) { + index += qdf_snprint(&str_buf[index], + DP_MAX_STRING_LEN - index, + " %d:%d,", i, dp_stats_buf->rx_dcm[i]); + } + DP_TRACE_STATS(FATAL, "rx_dcm = %s ", str_buf); + + index = 0; + qdf_mem_set(str_buf, DP_MAX_STRING_LEN, 0x0); + for (i = 0; i < HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS; i++) { + index += qdf_snprint(&str_buf[index], + DP_MAX_STRING_LEN - index, + " %d:%d,", i, dp_stats_buf->rx_stbc[i]); + } + DP_TRACE_STATS(FATAL, "rx_stbc = %s ", str_buf); + + index = 0; + qdf_mem_set(str_buf, DP_MAX_STRING_LEN, 0x0); + for (i = 0; i < DP_HTT_RX_BW_LEN; i++) { + index += qdf_snprint(&str_buf[index], + DP_MAX_STRING_LEN - index, + " %d:%d,", i, dp_stats_buf->rx_bw[i]); + } + DP_TRACE_STATS(FATAL, "rx_bw = %s ", str_buf); + + for (j = 0; j < DP_HTT_PEER_NUM_SS; j++) { + qdf_mem_set(str_buf, DP_MAX_STRING_LEN, 0x0); + index = 0; + for (i = 0; i < HTT_RX_PEER_STATS_NUM_BW_COUNTERS; i++) { + index += qdf_snprint(&rssi_chain[j][index], + DP_MAX_STRING_LEN - index, + " %d:%d,", i, + dp_stats_buf->rssi_chain[j][i]); + } + DP_TRACE_STATS(FATAL, "rssi_chain[%d] = %s ", j, rssi_chain[j]); + } + + for (j = 0; j < HTT_RX_PEER_STATS_NUM_GI_COUNTERS; j++) { + index = 0; + for (i = 0; i < HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS; i++) { + index += qdf_snprint(&rx_gi[j][index], + DP_MAX_STRING_LEN - index, + " %d:%d,", i, + dp_stats_buf->rx_gi[j][i]); + } + DP_TRACE_STATS(FATAL, "rx_gi[%d] = %s ", j, rx_gi[j]); + } + index = 0; + qdf_mem_set(str_buf, DP_MAX_STRING_LEN, 0x0); + for (i = 0; i < DP_HTT_RX_PREAM_LEN; i++) { + index += qdf_snprint(&str_buf[index], + DP_MAX_STRING_LEN - index, + " %d:%d,", i, dp_stats_buf->rx_pream[i]); + } + DP_TRACE_STATS(FATAL, "rx_pream = %s\n", str_buf); + + for (i = 0; i < DP_HTT_PEER_NUM_SS; i++) + qdf_mem_free(rssi_chain[i]); + for (i = 0; i < HTT_RX_PEER_STATS_NUM_GI_COUNTERS; i++) + qdf_mem_free(rx_gi[i]); + + qdf_mem_free(str_buf); +} + +/* + * dp_print_tx_hwq_mu_mimo_sch_stats_tlv: display htt_tx_hwq_mu_mimo_sch_stats + * @tag_buf: buffer containing the tlv htt_tx_hwq_mu_mimo_sch_stats_tlv + * + * return:void + */ +static inline void dp_print_tx_hwq_mu_mimo_sch_stats_tlv(uint32_t *tag_buf) +{ + htt_tx_hwq_mu_mimo_sch_stats_tlv *dp_stats_buf = + (htt_tx_hwq_mu_mimo_sch_stats_tlv *)tag_buf; + + DP_TRACE_STATS(FATAL, "HTT_TX_HWQ_MU_MIMO_SCH_STATS_TLV:"); + DP_TRACE_STATS(FATAL, "mu_mimo_sch_posted = %d", + dp_stats_buf->mu_mimo_sch_posted); + DP_TRACE_STATS(FATAL, "mu_mimo_sch_failed = %d", + dp_stats_buf->mu_mimo_sch_failed); + DP_TRACE_STATS(FATAL, "mu_mimo_ppdu_posted = %d\n", + dp_stats_buf->mu_mimo_ppdu_posted); +} + +/* + * dp_print_tx_hwq_mu_mimo_mpdu_stats_tlv: display htt_tx_hwq_mu_mimo_mpdu_stats + * @tag_buf: buffer containing the tlv htt_tx_hwq_mu_mimo_mpdu_stats_tlv + * + * return:void + */ +static inline void dp_print_tx_hwq_mu_mimo_mpdu_stats_tlv(uint32_t *tag_buf) +{ + htt_tx_hwq_mu_mimo_mpdu_stats_tlv *dp_stats_buf = + (htt_tx_hwq_mu_mimo_mpdu_stats_tlv *)tag_buf; + + DP_TRACE_STATS(FATAL, "HTT_TX_HWQ_MU_MIMO_MPDU_STATS_TLV:"); + DP_TRACE_STATS(FATAL, "mu_mimo_mpdus_queued_usr = %d", + dp_stats_buf->mu_mimo_mpdus_queued_usr); + DP_TRACE_STATS(FATAL, "mu_mimo_mpdus_tried_usr = %d", + dp_stats_buf->mu_mimo_mpdus_tried_usr); + DP_TRACE_STATS(FATAL, "mu_mimo_mpdus_failed_usr = %d", + dp_stats_buf->mu_mimo_mpdus_failed_usr); + DP_TRACE_STATS(FATAL, "mu_mimo_mpdus_requeued_usr = %d", + dp_stats_buf->mu_mimo_mpdus_requeued_usr); + DP_TRACE_STATS(FATAL, "mu_mimo_err_no_ba_usr = %d", + dp_stats_buf->mu_mimo_err_no_ba_usr); + DP_TRACE_STATS(FATAL, "mu_mimo_mpdu_underrun_usr = %d", + dp_stats_buf->mu_mimo_mpdu_underrun_usr); + DP_TRACE_STATS(FATAL, "mu_mimo_ampdu_underrun_usr = %d\n", + dp_stats_buf->mu_mimo_ampdu_underrun_usr); +} + +/* + * dp_print_tx_hwq_mu_mimo_cmn_stats_tlv: display htt_tx_hwq_mu_mimo_cmn_stats + * @tag_buf: buffer containing the tlv htt_tx_hwq_mu_mimo_cmn_stats_tlv + * + * return:void + */ +static inline void dp_print_tx_hwq_mu_mimo_cmn_stats_tlv(uint32_t *tag_buf) +{ + htt_tx_hwq_mu_mimo_cmn_stats_tlv *dp_stats_buf = + (htt_tx_hwq_mu_mimo_cmn_stats_tlv *)tag_buf; + + DP_TRACE_STATS(FATAL, "HTT_TX_HWQ_MU_MIMO_CMN_STATS_TLV:"); + DP_TRACE_STATS(FATAL, "mac_id__hwq_id__word = %d\n", + dp_stats_buf->mac_id__hwq_id__word); +} + +/* + * dp_print_tx_hwq_stats_cmn_tlv: display htt_tx_hwq_stats_cmn_tlv + * @tag_buf: buffer containing the tlv htt_tx_hwq_stats_cmn_tlv + * + * return:void + */ +static inline void dp_print_tx_hwq_stats_cmn_tlv(uint32_t *tag_buf) +{ + htt_tx_hwq_stats_cmn_tlv *dp_stats_buf = + (htt_tx_hwq_stats_cmn_tlv *)tag_buf; + + DP_TRACE_STATS(FATAL, "HTT_TX_HWQ_STATS_CMN_TLV:"); + DP_TRACE_STATS(FATAL, "mac_id__hwq_id__word = %d", + dp_stats_buf->mac_id__hwq_id__word); + DP_TRACE_STATS(FATAL, "xretry = %d", + dp_stats_buf->xretry); + DP_TRACE_STATS(FATAL, "underrun_cnt = %d", + dp_stats_buf->underrun_cnt); + DP_TRACE_STATS(FATAL, "flush_cnt = %d", + dp_stats_buf->flush_cnt); + DP_TRACE_STATS(FATAL, "filt_cnt = %d", + dp_stats_buf->filt_cnt); + DP_TRACE_STATS(FATAL, "null_mpdu_bmap = %d", + dp_stats_buf->null_mpdu_bmap); + DP_TRACE_STATS(FATAL, "user_ack_failure = %d", + dp_stats_buf->user_ack_failure); + DP_TRACE_STATS(FATAL, "ack_tlv_proc = %d", + dp_stats_buf->ack_tlv_proc); + DP_TRACE_STATS(FATAL, "sched_id_proc = %d", + dp_stats_buf->sched_id_proc); + DP_TRACE_STATS(FATAL, "null_mpdu_tx_count = %d", + dp_stats_buf->null_mpdu_tx_count); + DP_TRACE_STATS(FATAL, "mpdu_bmap_not_recvd = %d", + dp_stats_buf->mpdu_bmap_not_recvd); + DP_TRACE_STATS(FATAL, "num_bar = %d", + dp_stats_buf->num_bar); + DP_TRACE_STATS(FATAL, "rts = %d", + dp_stats_buf->rts); + DP_TRACE_STATS(FATAL, "cts2self = %d", + dp_stats_buf->cts2self); + DP_TRACE_STATS(FATAL, "qos_null = %d", + dp_stats_buf->qos_null); + DP_TRACE_STATS(FATAL, "mpdu_tried_cnt = %d", + dp_stats_buf->mpdu_tried_cnt); + DP_TRACE_STATS(FATAL, "mpdu_queued_cnt = %d", + dp_stats_buf->mpdu_queued_cnt); + DP_TRACE_STATS(FATAL, "mpdu_ack_fail_cnt = %d", + dp_stats_buf->mpdu_ack_fail_cnt); + DP_TRACE_STATS(FATAL, "mpdu_filt_cnt = %d", + dp_stats_buf->mpdu_filt_cnt); + DP_TRACE_STATS(FATAL, "false_mpdu_ack_count = %d\n", + dp_stats_buf->false_mpdu_ack_count); +} + +/* + * dp_print_tx_hwq_difs_latency_stats_tlv_v: display + * htt_tx_hwq_difs_latency_stats_tlv_v + * @tag_buf: buffer containing the tlv htt_tx_hwq_difs_latency_stats_tlv_v + * + *return:void + */ +static inline void dp_print_tx_hwq_difs_latency_stats_tlv_v(uint32_t *tag_buf) +{ + htt_tx_hwq_difs_latency_stats_tlv_v *dp_stats_buf = + (htt_tx_hwq_difs_latency_stats_tlv_v *)tag_buf; + uint8_t i; + uint16_t index = 0; + uint32_t tag_len = (HTT_STATS_TLV_LENGTH_GET(*tag_buf) >> 2); + char *difs_latency_hist = qdf_mem_malloc(DP_MAX_STRING_LEN); + + if (!difs_latency_hist) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("Output buffer not allocated\n")); + return; + } + + tag_len = qdf_min(tag_len, + (uint32_t)HTT_TX_HWQ_MAX_DIFS_LATENCY_BINS); + + DP_TRACE_STATS(FATAL, "HTT_TX_HWQ_DIFS_LATENCY_STATS_TLV_V:"); + DP_TRACE_STATS(FATAL, "hist_intvl = %d", + dp_stats_buf->hist_intvl); + + for (i = 0; i < tag_len; i++) { + index += qdf_snprint(&difs_latency_hist[index], + DP_MAX_STRING_LEN - index, + " %d:%d,", i, + dp_stats_buf->difs_latency_hist[i]); + } + DP_TRACE_STATS(FATAL, "difs_latency_hist = %s\n", difs_latency_hist); + qdf_mem_free(difs_latency_hist); +} + +/* + * dp_print_tx_hwq_cmd_result_stats_tlv_v: display htt_tx_hwq_cmd_result_stats + * @tag_buf: buffer containing the tlv htt_tx_hwq_cmd_result_stats_tlv_v + * + * return:void + */ +static inline void dp_print_tx_hwq_cmd_result_stats_tlv_v(uint32_t *tag_buf) +{ + htt_tx_hwq_cmd_result_stats_tlv_v *dp_stats_buf = + (htt_tx_hwq_cmd_result_stats_tlv_v *)tag_buf; + uint8_t i; + uint16_t index = 0; + uint32_t tag_len = (HTT_STATS_TLV_LENGTH_GET(*tag_buf) >> 2); + char *cmd_result = qdf_mem_malloc(DP_MAX_STRING_LEN); + + if (!cmd_result) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("Output buffer not allocated\n")); + return; + } + + tag_len = qdf_min(tag_len, (uint32_t)HTT_TX_HWQ_MAX_CMD_RESULT_STATS); + + DP_TRACE_STATS(FATAL, "HTT_TX_HWQ_CMD_RESULT_STATS_TLV_V:"); + for (i = 0; i < tag_len; i++) { + index += qdf_snprint(&cmd_result[index], + DP_MAX_STRING_LEN - index, + " %d:%d,", i, dp_stats_buf->cmd_result[i]); + } + DP_TRACE_STATS(FATAL, "cmd_result = %s ", cmd_result); + qdf_mem_free(cmd_result); +} + +/* + * dp_print_tx_hwq_cmd_stall_stats_tlv_v: display htt_tx_hwq_cmd_stall_stats_tlv + * @tag_buf: buffer containing the tlv htt_tx_hwq_cmd_stall_stats_tlv_v + * + * return:void + */ +static inline void dp_print_tx_hwq_cmd_stall_stats_tlv_v(uint32_t *tag_buf) +{ + htt_tx_hwq_cmd_stall_stats_tlv_v *dp_stats_buf = + (htt_tx_hwq_cmd_stall_stats_tlv_v *)tag_buf; + uint8_t i; + uint16_t index = 0; + uint32_t tag_len = (HTT_STATS_TLV_LENGTH_GET(*tag_buf) >> 2); + char *cmd_stall_status = qdf_mem_malloc(DP_MAX_STRING_LEN); + + if (!cmd_stall_status) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("Output buffer not allocated\n")); + return; + } + + tag_len = qdf_min(tag_len, (uint32_t)HTT_TX_HWQ_MAX_CMD_STALL_STATS); + + DP_TRACE_STATS(FATAL, "HTT_TX_HWQ_CMD_STALL_STATS_TLV_V:"); + for (i = 0; i < tag_len; i++) { + index += qdf_snprint(&cmd_stall_status[index], + DP_MAX_STRING_LEN - index, + " %d:%d,", i, + dp_stats_buf->cmd_stall_status[i]); + } + DP_TRACE_STATS(FATAL, "cmd_stall_status = %s\n", cmd_stall_status); + qdf_mem_free(cmd_stall_status); +} + +/* + * dp_print_tx_hwq_fes_result_stats_tlv_v: display htt_tx_hwq_fes_result_stats + * @tag_buf: buffer containing the tlv htt_tx_hwq_fes_result_stats_tlv_v + * + * return:void + */ +static inline void dp_print_tx_hwq_fes_result_stats_tlv_v(uint32_t *tag_buf) +{ + htt_tx_hwq_fes_result_stats_tlv_v *dp_stats_buf = + (htt_tx_hwq_fes_result_stats_tlv_v *)tag_buf; + uint8_t i; + uint16_t index = 0; + uint32_t tag_len = (HTT_STATS_TLV_LENGTH_GET(*tag_buf) >> 2); + char *fes_result = qdf_mem_malloc(DP_MAX_STRING_LEN); + + if (!fes_result) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("Output buffer not allocated\n")); + return; + } + + tag_len = qdf_min(tag_len, (uint32_t)HTT_TX_HWQ_MAX_FES_RESULT_STATS); + + DP_TRACE_STATS(FATAL, "HTT_TX_HWQ_FES_RESULT_STATS_TLV_V:"); + for (i = 0; i < tag_len; i++) { + index += qdf_snprint(&fes_result[index], + DP_MAX_STRING_LEN - index, + " %d:%d,", i, dp_stats_buf->fes_result[i]); + } + DP_TRACE_STATS(FATAL, "fes_result = %s ", fes_result); + qdf_mem_free(fes_result); +} + +/* + * dp_print_tx_selfgen_cmn_stats_tlv: display htt_tx_selfgen_cmn_stats_tlv + * @tag_buf: buffer containing the tlv htt_tx_selfgen_cmn_stats_tlv + * + * return:void + */ +static inline void dp_print_tx_selfgen_cmn_stats_tlv(uint32_t *tag_buf) +{ + htt_tx_selfgen_cmn_stats_tlv *dp_stats_buf = + (htt_tx_selfgen_cmn_stats_tlv *)tag_buf; + + DP_TRACE_STATS(FATAL, "HTT_TX_SELFGEN_CMN_STATS_TLV:"); + DP_TRACE_STATS(FATAL, "mac_id__word = %d", + dp_stats_buf->mac_id__word); + DP_TRACE_STATS(FATAL, "su_bar = %d", + dp_stats_buf->su_bar); + DP_TRACE_STATS(FATAL, "rts = %d", + dp_stats_buf->rts); + DP_TRACE_STATS(FATAL, "cts2self = %d", + dp_stats_buf->cts2self); + DP_TRACE_STATS(FATAL, "qos_null = %d", + dp_stats_buf->qos_null); + DP_TRACE_STATS(FATAL, "delayed_bar_1 = %d", + dp_stats_buf->delayed_bar_1); + DP_TRACE_STATS(FATAL, "delayed_bar_2 = %d", + dp_stats_buf->delayed_bar_2); + DP_TRACE_STATS(FATAL, "delayed_bar_3 = %d", + dp_stats_buf->delayed_bar_3); + DP_TRACE_STATS(FATAL, "delayed_bar_4 = %d", + dp_stats_buf->delayed_bar_4); + DP_TRACE_STATS(FATAL, "delayed_bar_5 = %d", + dp_stats_buf->delayed_bar_5); + DP_TRACE_STATS(FATAL, "delayed_bar_6 = %d", + dp_stats_buf->delayed_bar_6); + DP_TRACE_STATS(FATAL, "delayed_bar_7 = %d\n", + dp_stats_buf->delayed_bar_7); +} + +/* + * dp_print_tx_selfgen_ac_stats_tlv: display htt_tx_selfgen_ac_stats_tlv + * @tag_buf: buffer containing the tlv htt_tx_selfgen_ac_stats_tlv + * + * return:void + */ +static inline void dp_print_tx_selfgen_ac_stats_tlv(uint32_t *tag_buf) +{ + htt_tx_selfgen_ac_stats_tlv *dp_stats_buf = + (htt_tx_selfgen_ac_stats_tlv *)tag_buf; + + DP_TRACE_STATS(FATAL, "HTT_TX_SELFGEN_AC_STATS_TLV:"); + DP_TRACE_STATS(FATAL, "ac_su_ndpa = %d", + dp_stats_buf->ac_su_ndpa); + DP_TRACE_STATS(FATAL, "ac_su_ndp = %d", + dp_stats_buf->ac_su_ndp); + DP_TRACE_STATS(FATAL, "ac_mu_mimo_ndpa = %d", + dp_stats_buf->ac_mu_mimo_ndpa); + DP_TRACE_STATS(FATAL, "ac_mu_mimo_ndp = %d", + dp_stats_buf->ac_mu_mimo_ndp); + DP_TRACE_STATS(FATAL, "ac_mu_mimo_brpoll_1 = %d", + dp_stats_buf->ac_mu_mimo_brpoll_1); + DP_TRACE_STATS(FATAL, "ac_mu_mimo_brpoll_2 = %d", + dp_stats_buf->ac_mu_mimo_brpoll_2); + DP_TRACE_STATS(FATAL, "ac_mu_mimo_brpoll_3 = %d\n", + dp_stats_buf->ac_mu_mimo_brpoll_3); +} + +/* + * dp_print_tx_selfgen_ax_stats_tlv: display htt_tx_selfgen_ax_stats_tlv + * @tag_buf: buffer containing the tlv htt_tx_selfgen_ax_stats_tlv + * + * return:void + */ +static inline void dp_print_tx_selfgen_ax_stats_tlv(uint32_t *tag_buf) +{ + htt_tx_selfgen_ax_stats_tlv *dp_stats_buf = + (htt_tx_selfgen_ax_stats_tlv *)tag_buf; + + DP_TRACE_STATS(FATAL, "HTT_TX_SELFGEN_AX_STATS_TLV:"); + DP_TRACE_STATS(FATAL, "ax_su_ndpa = %d", + dp_stats_buf->ax_su_ndpa); + DP_TRACE_STATS(FATAL, "ax_su_ndp = %d", + dp_stats_buf->ax_su_ndp); + DP_TRACE_STATS(FATAL, "ax_mu_mimo_ndpa = %d", + dp_stats_buf->ax_mu_mimo_ndpa); + DP_TRACE_STATS(FATAL, "ax_mu_mimo_ndp = %d", + dp_stats_buf->ax_mu_mimo_ndp); + DP_TRACE_STATS(FATAL, "ax_mu_mimo_brpoll_1 = %d", + dp_stats_buf->ax_mu_mimo_brpoll_1); + DP_TRACE_STATS(FATAL, "ax_mu_mimo_brpoll_2 = %d", + dp_stats_buf->ax_mu_mimo_brpoll_2); + DP_TRACE_STATS(FATAL, "ax_mu_mimo_brpoll_3 = %d", + dp_stats_buf->ax_mu_mimo_brpoll_3); + DP_TRACE_STATS(FATAL, "ax_mu_mimo_brpoll_4 = %d", + dp_stats_buf->ax_mu_mimo_brpoll_4); + DP_TRACE_STATS(FATAL, "ax_mu_mimo_brpoll_5 = %d", + dp_stats_buf->ax_mu_mimo_brpoll_5); + DP_TRACE_STATS(FATAL, "ax_mu_mimo_brpoll_6 = %d", + dp_stats_buf->ax_mu_mimo_brpoll_6); + DP_TRACE_STATS(FATAL, "ax_mu_mimo_brpoll_7 = %d", + dp_stats_buf->ax_mu_mimo_brpoll_7); + DP_TRACE_STATS(FATAL, "ax_basic_trigger = %d", + dp_stats_buf->ax_basic_trigger); + DP_TRACE_STATS(FATAL, "ax_bsr_trigger = %d", + dp_stats_buf->ax_bsr_trigger); + DP_TRACE_STATS(FATAL, "ax_mu_bar_trigger = %d", + dp_stats_buf->ax_mu_bar_trigger); + DP_TRACE_STATS(FATAL, "ax_mu_rts_trigger = %d\n", + dp_stats_buf->ax_mu_rts_trigger); +} + +/* + * dp_print_tx_selfgen_ac_err_stats_tlv: display htt_tx_selfgen_ac_err_stats_tlv + * @tag_buf: buffer containing the tlv htt_tx_selfgen_ac_err_stats_tlv + * + * return:void + */ +static inline void dp_print_tx_selfgen_ac_err_stats_tlv(uint32_t *tag_buf) +{ + htt_tx_selfgen_ac_err_stats_tlv *dp_stats_buf = + (htt_tx_selfgen_ac_err_stats_tlv *)tag_buf; + + DP_TRACE_STATS(FATAL, "HTT_TX_SELFGEN_AC_ERR_STATS_TLV:"); + DP_TRACE_STATS(FATAL, "ac_su_ndp_err = %d", + dp_stats_buf->ac_su_ndp_err); + DP_TRACE_STATS(FATAL, "ac_su_ndpa_err = %d", + dp_stats_buf->ac_su_ndpa_err); + DP_TRACE_STATS(FATAL, "ac_mu_mimo_ndpa_err = %d", + dp_stats_buf->ac_mu_mimo_ndpa_err); + DP_TRACE_STATS(FATAL, "ac_mu_mimo_ndp_err = %d", + dp_stats_buf->ac_mu_mimo_ndp_err); + DP_TRACE_STATS(FATAL, "ac_mu_mimo_brp1_err = %d", + dp_stats_buf->ac_mu_mimo_brp1_err); + DP_TRACE_STATS(FATAL, "ac_mu_mimo_brp2_err = %d", + dp_stats_buf->ac_mu_mimo_brp2_err); + DP_TRACE_STATS(FATAL, "ac_mu_mimo_brp3_err = %d\n", + dp_stats_buf->ac_mu_mimo_brp3_err); +} + +/* + * dp_print_tx_selfgen_ax_err_stats_tlv: display htt_tx_selfgen_ax_err_stats_tlv + * @tag_buf: buffer containing the tlv htt_tx_selfgen_ax_err_stats_tlv + * + * return:void + */ +static inline void dp_print_tx_selfgen_ax_err_stats_tlv(uint32_t *tag_buf) +{ + htt_tx_selfgen_ax_err_stats_tlv *dp_stats_buf = + (htt_tx_selfgen_ax_err_stats_tlv *)tag_buf; + + DP_TRACE_STATS(FATAL, "HTT_TX_SELFGEN_AX_ERR_STATS_TLV:"); + DP_TRACE_STATS(FATAL, "ax_su_ndp_err = %d", + dp_stats_buf->ax_su_ndp_err); + DP_TRACE_STATS(FATAL, "ax_su_ndpa_err = %d", + dp_stats_buf->ax_su_ndpa_err); + DP_TRACE_STATS(FATAL, "ax_mu_mimo_ndpa_err = %d", + dp_stats_buf->ax_mu_mimo_ndpa_err); + DP_TRACE_STATS(FATAL, "ax_mu_mimo_ndp_err = %d", + dp_stats_buf->ax_mu_mimo_ndp_err); + DP_TRACE_STATS(FATAL, "ax_mu_mimo_brp1_err = %d", + dp_stats_buf->ax_mu_mimo_brp1_err); + DP_TRACE_STATS(FATAL, "ax_mu_mimo_brp2_err = %d", + dp_stats_buf->ax_mu_mimo_brp2_err); + DP_TRACE_STATS(FATAL, "ax_mu_mimo_brp3_err = %d", + dp_stats_buf->ax_mu_mimo_brp3_err); + DP_TRACE_STATS(FATAL, "ax_mu_mimo_brp4_err = %d", + dp_stats_buf->ax_mu_mimo_brp4_err); + DP_TRACE_STATS(FATAL, "ax_mu_mimo_brp5_err = %d", + dp_stats_buf->ax_mu_mimo_brp5_err); + DP_TRACE_STATS(FATAL, "ax_mu_mimo_brp6_err = %d", + dp_stats_buf->ax_mu_mimo_brp6_err); + DP_TRACE_STATS(FATAL, "ax_mu_mimo_brp7_err = %d", + dp_stats_buf->ax_mu_mimo_brp7_err); + DP_TRACE_STATS(FATAL, "ax_basic_trigger_err = %d", + dp_stats_buf->ax_basic_trigger_err); + DP_TRACE_STATS(FATAL, "ax_bsr_trigger_err = %d", + dp_stats_buf->ax_bsr_trigger_err); + DP_TRACE_STATS(FATAL, "ax_mu_bar_trigger_err = %d", + dp_stats_buf->ax_mu_bar_trigger_err); + DP_TRACE_STATS(FATAL, "ax_mu_rts_trigger_err = %d\n", + dp_stats_buf->ax_mu_rts_trigger_err); +} + +/* + * dp_print_tx_pdev_mu_mimo_sch_stats_tlv: display htt_tx_pdev_mu_mimo_sch_stats + * @tag_buf: buffer containing the tlv htt_tx_pdev_mu_mimo_sch_stats_tlv + * + * return:void + */ +static inline void dp_print_tx_pdev_mu_mimo_sch_stats_tlv(uint32_t *tag_buf) +{ + htt_tx_pdev_mu_mimo_sch_stats_tlv *dp_stats_buf = + (htt_tx_pdev_mu_mimo_sch_stats_tlv *)tag_buf; + + DP_TRACE_STATS(FATAL, "HTT_TX_PDEV_MU_MIMO_SCH_STATS_TLV:"); + DP_TRACE_STATS(FATAL, "mu_mimo_sch_posted = %d", + dp_stats_buf->mu_mimo_sch_posted); + DP_TRACE_STATS(FATAL, "mu_mimo_sch_failed = %d", + dp_stats_buf->mu_mimo_sch_failed); + DP_TRACE_STATS(FATAL, "mu_mimo_ppdu_posted = %d\n", + dp_stats_buf->mu_mimo_ppdu_posted); +} + +/* + * dp_print_tx_pdev_mu_mimo_mpdu_stats_tlv: display + * htt_tx_pdev_mu_mimo_mpdu_stats_tlv + * @tag_buf: buffer containing the tlv htt_tx_pdev_mu_mimo_mpdu_stats_tlv + * + * return:void + */ +static inline void dp_print_tx_pdev_mu_mimo_mpdu_stats_tlv(uint32_t *tag_buf) +{ + htt_tx_pdev_mu_mimo_mpdu_stats_tlv *dp_stats_buf = + (htt_tx_pdev_mu_mimo_mpdu_stats_tlv *)tag_buf; + + DP_TRACE_STATS(FATAL, "HTT_TX_PDEV_MU_MIMO_MPDU_STATS_TLV:"); + DP_TRACE_STATS(FATAL, "mu_mimo_mpdus_queued_usr = %d", + dp_stats_buf->mu_mimo_mpdus_queued_usr); + DP_TRACE_STATS(FATAL, "mu_mimo_mpdus_tried_usr = %d", + dp_stats_buf->mu_mimo_mpdus_tried_usr); + DP_TRACE_STATS(FATAL, "mu_mimo_mpdus_failed_usr = %d", + dp_stats_buf->mu_mimo_mpdus_failed_usr); + DP_TRACE_STATS(FATAL, "mu_mimo_mpdus_requeued_usr = %d", + dp_stats_buf->mu_mimo_mpdus_requeued_usr); + DP_TRACE_STATS(FATAL, "mu_mimo_err_no_ba_usr = %d", + dp_stats_buf->mu_mimo_err_no_ba_usr); + DP_TRACE_STATS(FATAL, "mu_mimo_mpdu_underrun_usr = %d", + dp_stats_buf->mu_mimo_mpdu_underrun_usr); + DP_TRACE_STATS(FATAL, "mu_mimo_ampdu_underrun_usr = %d\n", + dp_stats_buf->mu_mimo_ampdu_underrun_usr); +} + +/* + * dp_print_sched_txq_cmd_posted_tlv_v: display htt_sched_txq_cmd_posted_tlv_v + * @tag_buf: buffer containing the tlv htt_sched_txq_cmd_posted_tlv_v + * + * return:void + */ +static inline void dp_print_sched_txq_cmd_posted_tlv_v(uint32_t *tag_buf) +{ + htt_sched_txq_cmd_posted_tlv_v *dp_stats_buf = + (htt_sched_txq_cmd_posted_tlv_v *)tag_buf; + uint8_t i; + uint16_t index = 0; + uint32_t tag_len = (HTT_STATS_TLV_LENGTH_GET(*tag_buf) >> 2); + char *sched_cmd_posted = qdf_mem_malloc(DP_MAX_STRING_LEN); + + if (!sched_cmd_posted) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("Output buffer not allocated\n")); + return; + } + + tag_len = qdf_min(tag_len, (uint32_t)HTT_TX_PDEV_SCHED_TX_MODE_MAX); + + DP_TRACE_STATS(FATAL, "HTT_SCHED_TXQ_CMD_POSTED_TLV_V:"); + for (i = 0; i < tag_len; i++) { + index += qdf_snprint(&sched_cmd_posted[index], + DP_MAX_STRING_LEN - index, + " %d:%d,", i, + dp_stats_buf->sched_cmd_posted[i]); + } + DP_TRACE_STATS(FATAL, "sched_cmd_posted = %s\n", sched_cmd_posted); + qdf_mem_free(sched_cmd_posted); +} + +/* + * dp_print_sched_txq_cmd_reaped_tlv_v: display htt_sched_txq_cmd_reaped_tlv_v + * @tag_buf: buffer containing the tlv htt_sched_txq_cmd_reaped_tlv_v + * + * return:void + */ +static inline void dp_print_sched_txq_cmd_reaped_tlv_v(uint32_t *tag_buf) +{ + htt_sched_txq_cmd_reaped_tlv_v *dp_stats_buf = + (htt_sched_txq_cmd_reaped_tlv_v *)tag_buf; + uint8_t i; + uint16_t index = 0; + uint32_t tag_len = (HTT_STATS_TLV_LENGTH_GET(*tag_buf) >> 2); + char *sched_cmd_reaped = qdf_mem_malloc(DP_MAX_STRING_LEN); + + if (!sched_cmd_reaped) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("Output buffer not allocated\n")); + return; + } + + tag_len = qdf_min(tag_len, (uint32_t)HTT_TX_PDEV_SCHED_TX_MODE_MAX); + + DP_TRACE_STATS(FATAL, "HTT_SCHED_TXQ_CMD_REAPED_TLV_V:"); + for (i = 0; i < tag_len; i++) { + index += qdf_snprint(&sched_cmd_reaped[index], + DP_MAX_STRING_LEN - index, + " %d:%d,", i, + dp_stats_buf->sched_cmd_reaped[i]); + } + DP_TRACE_STATS(FATAL, "sched_cmd_reaped = %s\n", sched_cmd_reaped); + qdf_mem_free(sched_cmd_reaped); +} + +/* + * dp_print_tx_pdev_stats_sched_per_txq_tlv: display + * htt_tx_pdev_stats_sched_per_txq_tlv + * @tag_buf: buffer containing the tlv htt_tx_pdev_stats_sched_per_txq_tlv + * + * return:void + */ +static inline void dp_print_tx_pdev_stats_sched_per_txq_tlv(uint32_t *tag_buf) +{ + htt_tx_pdev_stats_sched_per_txq_tlv *dp_stats_buf = + (htt_tx_pdev_stats_sched_per_txq_tlv *)tag_buf; + + DP_TRACE_STATS(FATAL, "HTT_TX_PDEV_STATS_SCHED_PER_TXQ_TLV:"); + DP_TRACE_STATS(FATAL, "mac_id__txq_id__word = %d", + dp_stats_buf->mac_id__txq_id__word); + DP_TRACE_STATS(FATAL, "sched_policy = %d", + dp_stats_buf->sched_policy); + DP_TRACE_STATS(FATAL, "last_sched_cmd_posted_timestamp = %d", + dp_stats_buf->last_sched_cmd_posted_timestamp); + DP_TRACE_STATS(FATAL, "last_sched_cmd_compl_timestamp = %d", + dp_stats_buf->last_sched_cmd_compl_timestamp); + DP_TRACE_STATS(FATAL, "sched_2_tac_lwm_count = %d", + dp_stats_buf->sched_2_tac_lwm_count); + DP_TRACE_STATS(FATAL, "sched_2_tac_ring_full = %d", + dp_stats_buf->sched_2_tac_ring_full); + DP_TRACE_STATS(FATAL, "sched_cmd_post_failure = %d", + dp_stats_buf->sched_cmd_post_failure); + DP_TRACE_STATS(FATAL, "num_active_tids = %d", + dp_stats_buf->num_active_tids); + DP_TRACE_STATS(FATAL, "num_ps_schedules = %d", + dp_stats_buf->num_ps_schedules); + DP_TRACE_STATS(FATAL, "sched_cmds_pending = %d", + dp_stats_buf->sched_cmds_pending); + DP_TRACE_STATS(FATAL, "num_tid_register = %d", + dp_stats_buf->num_tid_register); + DP_TRACE_STATS(FATAL, "num_tid_unregister = %d", + dp_stats_buf->num_tid_unregister); + DP_TRACE_STATS(FATAL, "num_qstats_queried = %d", + dp_stats_buf->num_qstats_queried); + DP_TRACE_STATS(FATAL, "qstats_update_pending = %d", + dp_stats_buf->qstats_update_pending); + DP_TRACE_STATS(FATAL, "last_qstats_query_timestamp = %d", + dp_stats_buf->last_qstats_query_timestamp); + DP_TRACE_STATS(FATAL, "num_tqm_cmdq_full = %d", + dp_stats_buf->num_tqm_cmdq_full); + DP_TRACE_STATS(FATAL, "num_de_sched_algo_trigger = %d", + dp_stats_buf->num_de_sched_algo_trigger); + DP_TRACE_STATS(FATAL, "num_rt_sched_algo_trigger = %d", + dp_stats_buf->num_rt_sched_algo_trigger); + DP_TRACE_STATS(FATAL, "num_tqm_sched_algo_trigger = %d", + dp_stats_buf->num_tqm_sched_algo_trigger); + DP_TRACE_STATS(FATAL, "notify_sched = %d\n", + dp_stats_buf->notify_sched); +} + +/* + * dp_print_stats_tx_sched_cmn_tlv: display htt_stats_tx_sched_cmn_tlv + * @tag_buf: buffer containing the tlv htt_stats_tx_sched_cmn_tlv + * + * return:void + */ +static inline void dp_print_stats_tx_sched_cmn_tlv(uint32_t *tag_buf) +{ + htt_stats_tx_sched_cmn_tlv *dp_stats_buf = + (htt_stats_tx_sched_cmn_tlv *)tag_buf; + + DP_TRACE_STATS(FATAL, "HTT_STATS_TX_SCHED_CMN_TLV:"); + DP_TRACE_STATS(FATAL, "mac_id__word = %d", + dp_stats_buf->mac_id__word); + DP_TRACE_STATS(FATAL, "current_timestamp = %d\n", + dp_stats_buf->current_timestamp); +} + +/* + * dp_print_tx_tqm_gen_mpdu_stats_tlv_v: display htt_tx_tqm_gen_mpdu_stats_tlv_v + * @tag_buf: buffer containing the tlv htt_tx_tqm_gen_mpdu_stats_tlv_v + * + * return:void + */ +static inline void dp_print_tx_tqm_gen_mpdu_stats_tlv_v(uint32_t *tag_buf) +{ + htt_tx_tqm_gen_mpdu_stats_tlv_v *dp_stats_buf = + (htt_tx_tqm_gen_mpdu_stats_tlv_v *)tag_buf; + uint8_t i; + uint16_t index = 0; + uint32_t tag_len = (HTT_STATS_TLV_LENGTH_GET(*tag_buf) >> 2); + char *gen_mpdu_end_reason = qdf_mem_malloc(DP_MAX_STRING_LEN); + + if (!gen_mpdu_end_reason) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("Output buffer not allocated\n")); + return; + } + + tag_len = qdf_min(tag_len, + (uint32_t)HTT_TX_TQM_MAX_GEN_MPDU_END_REASON); + + DP_TRACE_STATS(FATAL, "HTT_TX_TQM_GEN_MPDU_STATS_TLV_V:"); + for (i = 0; i < tag_len; i++) { + index += qdf_snprint(&gen_mpdu_end_reason[index], + DP_MAX_STRING_LEN - index, + " %d:%d,", i, + dp_stats_buf->gen_mpdu_end_reason[i]); + } + DP_TRACE_STATS(FATAL, "gen_mpdu_end_reason = %s\n", gen_mpdu_end_reason); + qdf_mem_free(gen_mpdu_end_reason); +} + +/* + * dp_print_tx_tqm_list_mpdu_stats_tlv_v: display htt_tx_tqm_list_mpdu_stats_tlv + * @tag_buf: buffer containing the tlv htt_tx_tqm_list_mpdu_stats_tlv_v + * + * return:void + */ +static inline void dp_print_tx_tqm_list_mpdu_stats_tlv_v(uint32_t *tag_buf) +{ + htt_tx_tqm_list_mpdu_stats_tlv_v *dp_stats_buf = + (htt_tx_tqm_list_mpdu_stats_tlv_v *)tag_buf; + uint8_t i; + uint16_t index = 0; + uint32_t tag_len = (HTT_STATS_TLV_LENGTH_GET(*tag_buf) >> 2); + char *list_mpdu_end_reason = qdf_mem_malloc(DP_MAX_STRING_LEN); + + if (!list_mpdu_end_reason) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("Output buffer not allocated\n")); + return; + } + + tag_len = qdf_min(tag_len, + (uint32_t)HTT_TX_TQM_MAX_LIST_MPDU_END_REASON); + + DP_TRACE_STATS(FATAL, "HTT_TX_TQM_LIST_MPDU_STATS_TLV_V:"); + for (i = 0; i < tag_len; i++) { + index += qdf_snprint(&list_mpdu_end_reason[index], + DP_MAX_STRING_LEN - index, + " %d:%d,", i, + dp_stats_buf->list_mpdu_end_reason[i]); + } + DP_TRACE_STATS(FATAL, "list_mpdu_end_reason = %s\n", + list_mpdu_end_reason); + qdf_mem_free(list_mpdu_end_reason); +} + +/* + * dp_print_tx_tqm_list_mpdu_cnt_tlv_v: display htt_tx_tqm_list_mpdu_cnt_tlv_v + * @tag_buf: buffer containing the tlv htt_tx_tqm_list_mpdu_cnt_tlv_v + * + * return:void + */ +static inline void dp_print_tx_tqm_list_mpdu_cnt_tlv_v(uint32_t *tag_buf) +{ + htt_tx_tqm_list_mpdu_cnt_tlv_v *dp_stats_buf = + (htt_tx_tqm_list_mpdu_cnt_tlv_v *)tag_buf; + uint8_t i; + uint16_t index = 0; + uint32_t tag_len = (HTT_STATS_TLV_LENGTH_GET(*tag_buf) >> 2); + char *list_mpdu_cnt_hist = qdf_mem_malloc(DP_MAX_STRING_LEN); + + if (!list_mpdu_cnt_hist) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("Output buffer not allocated\n")); + return; + } + + tag_len = qdf_min(tag_len, + (uint32_t)HTT_TX_TQM_MAX_LIST_MPDU_CNT_HISTOGRAM_BINS); + + DP_TRACE_STATS(FATAL, "HTT_TX_TQM_LIST_MPDU_CNT_TLV_V:"); + for (i = 0; i < tag_len; i++) { + index += qdf_snprint(&list_mpdu_cnt_hist[index], + DP_MAX_STRING_LEN - index, + " %d:%d,", i, + dp_stats_buf->list_mpdu_cnt_hist[i]); + } + DP_TRACE_STATS(FATAL, "list_mpdu_cnt_hist = %s\n", list_mpdu_cnt_hist); + qdf_mem_free(list_mpdu_cnt_hist); +} + +/* + * dp_print_tx_tqm_pdev_stats_tlv_v: display htt_tx_tqm_pdev_stats_tlv_v + * @tag_buf: buffer containing the tlv htt_tx_tqm_pdev_stats_tlv_v + * + * return:void + */ +static inline void dp_print_tx_tqm_pdev_stats_tlv_v(uint32_t *tag_buf) +{ + htt_tx_tqm_pdev_stats_tlv_v *dp_stats_buf = + (htt_tx_tqm_pdev_stats_tlv_v *)tag_buf; + + DP_TRACE_STATS(FATAL, "HTT_TX_TQM_PDEV_STATS_TLV_V:"); + DP_TRACE_STATS(FATAL, "msdu_count = %d", + dp_stats_buf->msdu_count); + DP_TRACE_STATS(FATAL, "mpdu_count = %d", + dp_stats_buf->mpdu_count); + DP_TRACE_STATS(FATAL, "remove_msdu = %d", + dp_stats_buf->remove_msdu); + DP_TRACE_STATS(FATAL, "remove_mpdu = %d", + dp_stats_buf->remove_mpdu); + DP_TRACE_STATS(FATAL, "remove_msdu_ttl = %d", + dp_stats_buf->remove_msdu_ttl); + DP_TRACE_STATS(FATAL, "send_bar = %d", + dp_stats_buf->send_bar); + DP_TRACE_STATS(FATAL, "bar_sync = %d", + dp_stats_buf->bar_sync); + DP_TRACE_STATS(FATAL, "notify_mpdu = %d", + dp_stats_buf->notify_mpdu); + DP_TRACE_STATS(FATAL, "sync_cmd = %d", + dp_stats_buf->sync_cmd); + DP_TRACE_STATS(FATAL, "write_cmd = %d", + dp_stats_buf->write_cmd); + DP_TRACE_STATS(FATAL, "hwsch_trigger = %d", + dp_stats_buf->hwsch_trigger); + DP_TRACE_STATS(FATAL, "ack_tlv_proc = %d", + dp_stats_buf->ack_tlv_proc); + DP_TRACE_STATS(FATAL, "gen_mpdu_cmd = %d", + dp_stats_buf->gen_mpdu_cmd); + DP_TRACE_STATS(FATAL, "gen_list_cmd = %d", + dp_stats_buf->gen_list_cmd); + DP_TRACE_STATS(FATAL, "remove_mpdu_cmd = %d", + dp_stats_buf->remove_mpdu_cmd); + DP_TRACE_STATS(FATAL, "remove_mpdu_tried_cmd = %d", + dp_stats_buf->remove_mpdu_tried_cmd); + DP_TRACE_STATS(FATAL, "mpdu_queue_stats_cmd = %d", + dp_stats_buf->mpdu_queue_stats_cmd); + DP_TRACE_STATS(FATAL, "mpdu_head_info_cmd = %d", + dp_stats_buf->mpdu_head_info_cmd); + DP_TRACE_STATS(FATAL, "msdu_flow_stats_cmd = %d", + dp_stats_buf->msdu_flow_stats_cmd); + DP_TRACE_STATS(FATAL, "remove_msdu_cmd = %d", + dp_stats_buf->remove_msdu_cmd); + DP_TRACE_STATS(FATAL, "remove_msdu_ttl_cmd = %d", + dp_stats_buf->remove_msdu_ttl_cmd); + DP_TRACE_STATS(FATAL, "flush_cache_cmd = %d", + dp_stats_buf->flush_cache_cmd); + DP_TRACE_STATS(FATAL, "update_mpduq_cmd = %d", + dp_stats_buf->update_mpduq_cmd); + DP_TRACE_STATS(FATAL, "enqueue = %d", + dp_stats_buf->enqueue); + DP_TRACE_STATS(FATAL, "enqueue_notify = %d", + dp_stats_buf->enqueue_notify); + DP_TRACE_STATS(FATAL, "notify_mpdu_at_head = %d", + dp_stats_buf->notify_mpdu_at_head); + DP_TRACE_STATS(FATAL, "notify_mpdu_state_valid = %d\n", + dp_stats_buf->notify_mpdu_state_valid); +} + +/* + * dp_print_tx_tqm_cmn_stats_tlv: display htt_tx_tqm_cmn_stats_tlv + * @tag_buf: buffer containing the tlv htt_tx_tqm_cmn_stats_tlv + * + * return:void + */ +static inline void dp_print_tx_tqm_cmn_stats_tlv(uint32_t *tag_buf) +{ + htt_tx_tqm_cmn_stats_tlv *dp_stats_buf = + (htt_tx_tqm_cmn_stats_tlv *)tag_buf; + + DP_TRACE_STATS(FATAL, "HTT_TX_TQM_CMN_STATS_TLV:"); + DP_TRACE_STATS(FATAL, "mac_id__word = %d", + dp_stats_buf->mac_id__word); + DP_TRACE_STATS(FATAL, "max_cmdq_id = %d", + dp_stats_buf->max_cmdq_id); + DP_TRACE_STATS(FATAL, "list_mpdu_cnt_hist_intvl = %d", + dp_stats_buf->list_mpdu_cnt_hist_intvl); + DP_TRACE_STATS(FATAL, "add_msdu = %d", + dp_stats_buf->add_msdu); + DP_TRACE_STATS(FATAL, "q_empty = %d", + dp_stats_buf->q_empty); + DP_TRACE_STATS(FATAL, "q_not_empty = %d", + dp_stats_buf->q_not_empty); + DP_TRACE_STATS(FATAL, "drop_notification = %d", + dp_stats_buf->drop_notification); + DP_TRACE_STATS(FATAL, "desc_threshold = %d\n", + dp_stats_buf->desc_threshold); +} + +/* + * dp_print_tx_tqm_error_stats_tlv: display htt_tx_tqm_error_stats_tlv + * @tag_buf: buffer containing the tlv htt_tx_tqm_error_stats_tlv + * + * return:void + */ +static inline void dp_print_tx_tqm_error_stats_tlv(uint32_t *tag_buf) +{ + htt_tx_tqm_error_stats_tlv *dp_stats_buf = + (htt_tx_tqm_error_stats_tlv *)tag_buf; + + DP_TRACE_STATS(FATAL, "HTT_TX_TQM_ERROR_STATS_TLV:"); + DP_TRACE_STATS(FATAL, "q_empty_failure = %d", + dp_stats_buf->q_empty_failure); + DP_TRACE_STATS(FATAL, "q_not_empty_failure = %d", + dp_stats_buf->q_not_empty_failure); + DP_TRACE_STATS(FATAL, "add_msdu_failure = %d\n", + dp_stats_buf->add_msdu_failure); +} + +/* + * dp_print_tx_tqm_cmdq_status_tlv: display htt_tx_tqm_cmdq_status_tlv + * @tag_buf: buffer containing the tlv htt_tx_tqm_cmdq_status_tlv + * + * return:void + */ +static inline void dp_print_tx_tqm_cmdq_status_tlv(uint32_t *tag_buf) +{ + htt_tx_tqm_cmdq_status_tlv *dp_stats_buf = + (htt_tx_tqm_cmdq_status_tlv *)tag_buf; + + DP_TRACE_STATS(FATAL, "HTT_TX_TQM_CMDQ_STATUS_TLV:"); + DP_TRACE_STATS(FATAL, "mac_id__cmdq_id__word = %d", + dp_stats_buf->mac_id__cmdq_id__word); + DP_TRACE_STATS(FATAL, "sync_cmd = %d", + dp_stats_buf->sync_cmd); + DP_TRACE_STATS(FATAL, "write_cmd = %d", + dp_stats_buf->write_cmd); + DP_TRACE_STATS(FATAL, "gen_mpdu_cmd = %d", + dp_stats_buf->gen_mpdu_cmd); + DP_TRACE_STATS(FATAL, "mpdu_queue_stats_cmd = %d", + dp_stats_buf->mpdu_queue_stats_cmd); + DP_TRACE_STATS(FATAL, "mpdu_head_info_cmd = %d", + dp_stats_buf->mpdu_head_info_cmd); + DP_TRACE_STATS(FATAL, "msdu_flow_stats_cmd = %d", + dp_stats_buf->msdu_flow_stats_cmd); + DP_TRACE_STATS(FATAL, "remove_mpdu_cmd = %d", + dp_stats_buf->remove_mpdu_cmd); + DP_TRACE_STATS(FATAL, "remove_msdu_cmd = %d", + dp_stats_buf->remove_msdu_cmd); + DP_TRACE_STATS(FATAL, "flush_cache_cmd = %d", + dp_stats_buf->flush_cache_cmd); + DP_TRACE_STATS(FATAL, "update_mpduq_cmd = %d", + dp_stats_buf->update_mpduq_cmd); + DP_TRACE_STATS(FATAL, "update_msduq_cmd = %d\n", + dp_stats_buf->update_msduq_cmd); +} + +/* + * dp_print_tx_de_eapol_packets_stats_tlv: display htt_tx_de_eapol_packets_stats + * @tag_buf: buffer containing the tlv htt_tx_de_eapol_packets_stats_tlv + * + * return:void + */ +static inline void dp_print_tx_de_eapol_packets_stats_tlv(uint32_t *tag_buf) +{ + htt_tx_de_eapol_packets_stats_tlv *dp_stats_buf = + (htt_tx_de_eapol_packets_stats_tlv *)tag_buf; + + DP_TRACE_STATS(FATAL, "HTT_TX_DE_EAPOL_PACKETS_STATS_TLV:"); + DP_TRACE_STATS(FATAL, "m1_packets = %d", + dp_stats_buf->m1_packets); + DP_TRACE_STATS(FATAL, "m2_packets = %d", + dp_stats_buf->m2_packets); + DP_TRACE_STATS(FATAL, "m3_packets = %d", + dp_stats_buf->m3_packets); + DP_TRACE_STATS(FATAL, "m4_packets = %d", + dp_stats_buf->m4_packets); + DP_TRACE_STATS(FATAL, "g1_packets = %d", + dp_stats_buf->g1_packets); + DP_TRACE_STATS(FATAL, "g2_packets = %d\n", + dp_stats_buf->g2_packets); +} + +/* + * dp_print_tx_de_classify_failed_stats_tlv: display + * htt_tx_de_classify_failed_stats_tlv + * @tag_buf: buffer containing the tlv htt_tx_de_classify_failed_stats_tlv + * + * return:void + */ +static inline void dp_print_tx_de_classify_failed_stats_tlv(uint32_t *tag_buf) +{ + htt_tx_de_classify_failed_stats_tlv *dp_stats_buf = + (htt_tx_de_classify_failed_stats_tlv *)tag_buf; + + DP_TRACE_STATS(FATAL, "HTT_TX_DE_CLASSIFY_FAILED_STATS_TLV:"); + DP_TRACE_STATS(FATAL, "ap_bss_peer_not_found = %d", + dp_stats_buf->ap_bss_peer_not_found); + DP_TRACE_STATS(FATAL, "ap_bcast_mcast_no_peer = %d", + dp_stats_buf->ap_bcast_mcast_no_peer); + DP_TRACE_STATS(FATAL, "sta_delete_in_progress = %d", + dp_stats_buf->sta_delete_in_progress); + DP_TRACE_STATS(FATAL, "ibss_no_bss_peer = %d", + dp_stats_buf->ibss_no_bss_peer); + DP_TRACE_STATS(FATAL, "invaild_vdev_type = %d", + dp_stats_buf->invaild_vdev_type); + DP_TRACE_STATS(FATAL, "invalid_ast_peer_entry = %d", + dp_stats_buf->invalid_ast_peer_entry); + DP_TRACE_STATS(FATAL, "peer_entry_invalid = %d", + dp_stats_buf->peer_entry_invalid); + DP_TRACE_STATS(FATAL, "ethertype_not_ip = %d", + dp_stats_buf->ethertype_not_ip); + DP_TRACE_STATS(FATAL, "eapol_lookup_failed = %d", + dp_stats_buf->eapol_lookup_failed); + DP_TRACE_STATS(FATAL, "qpeer_not_allow_data = %d", + dp_stats_buf->qpeer_not_allow_data); + DP_TRACE_STATS(FATAL, "fse_tid_override = %d\n", + dp_stats_buf->fse_tid_override); +} + +/* + * dp_print_tx_de_classify_stats_tlv: display htt_tx_de_classify_stats_tlv + * @tag_buf: buffer containing the tlv htt_tx_de_classify_stats_tlv + * + * return:void + */ +static inline void dp_print_tx_de_classify_stats_tlv(uint32_t *tag_buf) +{ + htt_tx_de_classify_stats_tlv *dp_stats_buf = + (htt_tx_de_classify_stats_tlv *)tag_buf; + + DP_TRACE_STATS(FATAL, "HTT_TX_DE_CLASSIFY_STATS_TLV:"); + DP_TRACE_STATS(FATAL, "arp_packets = %d", + dp_stats_buf->arp_packets); + DP_TRACE_STATS(FATAL, "igmp_packets = %d", + dp_stats_buf->igmp_packets); + DP_TRACE_STATS(FATAL, "dhcp_packets = %d", + dp_stats_buf->dhcp_packets); + DP_TRACE_STATS(FATAL, "host_inspected = %d", + dp_stats_buf->host_inspected); + DP_TRACE_STATS(FATAL, "htt_included = %d", + dp_stats_buf->htt_included); + DP_TRACE_STATS(FATAL, "htt_valid_mcs = %d", + dp_stats_buf->htt_valid_mcs); + DP_TRACE_STATS(FATAL, "htt_valid_nss = %d", + dp_stats_buf->htt_valid_nss); + DP_TRACE_STATS(FATAL, "htt_valid_preamble_type = %d", + dp_stats_buf->htt_valid_preamble_type); + DP_TRACE_STATS(FATAL, "htt_valid_chainmask = %d", + dp_stats_buf->htt_valid_chainmask); + DP_TRACE_STATS(FATAL, "htt_valid_guard_interval = %d", + dp_stats_buf->htt_valid_guard_interval); + DP_TRACE_STATS(FATAL, "htt_valid_retries = %d", + dp_stats_buf->htt_valid_retries); + DP_TRACE_STATS(FATAL, "htt_valid_bw_info = %d", + dp_stats_buf->htt_valid_bw_info); + DP_TRACE_STATS(FATAL, "htt_valid_power = %d", + dp_stats_buf->htt_valid_power); + DP_TRACE_STATS(FATAL, "htt_valid_key_flags = %d", + dp_stats_buf->htt_valid_key_flags); + DP_TRACE_STATS(FATAL, "htt_valid_no_encryption = %d", + dp_stats_buf->htt_valid_no_encryption); + DP_TRACE_STATS(FATAL, "fse_entry_count = %d", + dp_stats_buf->fse_entry_count); + DP_TRACE_STATS(FATAL, "fse_priority_be = %d", + dp_stats_buf->fse_priority_be); + DP_TRACE_STATS(FATAL, "fse_priority_high = %d", + dp_stats_buf->fse_priority_high); + DP_TRACE_STATS(FATAL, "fse_priority_low = %d", + dp_stats_buf->fse_priority_low); + DP_TRACE_STATS(FATAL, "fse_traffic_ptrn_be = %d", + dp_stats_buf->fse_traffic_ptrn_be); + DP_TRACE_STATS(FATAL, "fse_traffic_ptrn_over_sub = %d", + dp_stats_buf->fse_traffic_ptrn_over_sub); + DP_TRACE_STATS(FATAL, "fse_traffic_ptrn_bursty = %d", + dp_stats_buf->fse_traffic_ptrn_bursty); + DP_TRACE_STATS(FATAL, "fse_traffic_ptrn_interactive = %d", + dp_stats_buf->fse_traffic_ptrn_interactive); + DP_TRACE_STATS(FATAL, "fse_traffic_ptrn_periodic = %d", + dp_stats_buf->fse_traffic_ptrn_periodic); + DP_TRACE_STATS(FATAL, "fse_hwqueue_alloc = %d", + dp_stats_buf->fse_hwqueue_alloc); + DP_TRACE_STATS(FATAL, "fse_hwqueue_created = %d", + dp_stats_buf->fse_hwqueue_created); + DP_TRACE_STATS(FATAL, "fse_hwqueue_send_to_host = %d", + dp_stats_buf->fse_hwqueue_send_to_host); + DP_TRACE_STATS(FATAL, "mcast_entry = %d", + dp_stats_buf->mcast_entry); + DP_TRACE_STATS(FATAL, "bcast_entry = %d\n", + dp_stats_buf->bcast_entry); +} + +/* + * dp_print_tx_de_classify_status_stats_tlv: display + * htt_tx_de_classify_status_stats_tlv + * @tag_buf: buffer containing the tlv htt_tx_de_classify_status_stats_tlv + * + * return:void + */ +static inline void dp_print_tx_de_classify_status_stats_tlv(uint32_t *tag_buf) +{ + htt_tx_de_classify_status_stats_tlv *dp_stats_buf = + (htt_tx_de_classify_status_stats_tlv *)tag_buf; + + DP_TRACE_STATS(FATAL, "HTT_TX_DE_CLASSIFY_STATUS_STATS_TLV:"); + DP_TRACE_STATS(FATAL, "eok = %d", + dp_stats_buf->eok); + DP_TRACE_STATS(FATAL, "classify_done = %d", + dp_stats_buf->classify_done); + DP_TRACE_STATS(FATAL, "lookup_failed = %d", + dp_stats_buf->lookup_failed); + DP_TRACE_STATS(FATAL, "send_host_dhcp = %d", + dp_stats_buf->send_host_dhcp); + DP_TRACE_STATS(FATAL, "send_host_mcast = %d", + dp_stats_buf->send_host_mcast); + DP_TRACE_STATS(FATAL, "send_host_unknown_dest = %d", + dp_stats_buf->send_host_unknown_dest); + DP_TRACE_STATS(FATAL, "send_host = %d", + dp_stats_buf->send_host); + DP_TRACE_STATS(FATAL, "status_invalid = %d\n", + dp_stats_buf->status_invalid); +} + +/* + * dp_print_tx_de_enqueue_packets_stats_tlv: display + * htt_tx_de_enqueue_packets_stats_tlv + * @tag_buf: buffer containing the tlv htt_tx_de_enqueue_packets_stats_tlv + * + * return:void + */ +static inline void dp_print_tx_de_enqueue_packets_stats_tlv(uint32_t *tag_buf) +{ + htt_tx_de_enqueue_packets_stats_tlv *dp_stats_buf = + (htt_tx_de_enqueue_packets_stats_tlv *)tag_buf; + + DP_TRACE_STATS(FATAL, "HTT_TX_DE_ENQUEUE_PACKETS_STATS_TLV:"); + DP_TRACE_STATS(FATAL, "enqueued_pkts = %d", + dp_stats_buf->enqueued_pkts); + DP_TRACE_STATS(FATAL, "to_tqm = %d", + dp_stats_buf->to_tqm); + DP_TRACE_STATS(FATAL, "to_tqm_bypass = %d\n", + dp_stats_buf->to_tqm_bypass); +} + +/* + * dp_print_tx_de_enqueue_discard_stats_tlv: display + * htt_tx_de_enqueue_discard_stats_tlv + * @tag_buf: buffer containing the tlv htt_tx_de_enqueue_discard_stats_tlv + * + * return:void + */ +static inline void dp_print_tx_de_enqueue_discard_stats_tlv(uint32_t *tag_buf) +{ + htt_tx_de_enqueue_discard_stats_tlv *dp_stats_buf = + (htt_tx_de_enqueue_discard_stats_tlv *)tag_buf; + + DP_TRACE_STATS(FATAL, "HTT_TX_DE_ENQUEUE_DISCARD_STATS_TLV:"); + DP_TRACE_STATS(FATAL, "discarded_pkts = %d", + dp_stats_buf->discarded_pkts); + DP_TRACE_STATS(FATAL, "local_frames = %d", + dp_stats_buf->local_frames); + DP_TRACE_STATS(FATAL, "is_ext_msdu = %d\n", + dp_stats_buf->is_ext_msdu); +} + +/* + * dp_print_tx_de_compl_stats_tlv: display htt_tx_de_compl_stats_tlv + * @tag_buf: buffer containing the tlv htt_tx_de_compl_stats_tlv + * + * return:void + */ +static inline void dp_print_tx_de_compl_stats_tlv(uint32_t *tag_buf) +{ + htt_tx_de_compl_stats_tlv *dp_stats_buf = + (htt_tx_de_compl_stats_tlv *)tag_buf; + + DP_TRACE_STATS(FATAL, "HTT_TX_DE_COMPL_STATS_TLV:"); + DP_TRACE_STATS(FATAL, "tcl_dummy_frame = %d", + dp_stats_buf->tcl_dummy_frame); + DP_TRACE_STATS(FATAL, "tqm_dummy_frame = %d", + dp_stats_buf->tqm_dummy_frame); + DP_TRACE_STATS(FATAL, "tqm_notify_frame = %d", + dp_stats_buf->tqm_notify_frame); + DP_TRACE_STATS(FATAL, "fw2wbm_enq = %d", + dp_stats_buf->fw2wbm_enq); + DP_TRACE_STATS(FATAL, "tqm_bypass_frame = %d\n", + dp_stats_buf->tqm_bypass_frame); +} + +/* + * dp_print_tx_de_cmn_stats_tlv: display htt_tx_de_cmn_stats_tlv + * @tag_buf: buffer containing the tlv htt_tx_de_cmn_stats_tlv + * + * return:void + */ +static inline void dp_print_tx_de_cmn_stats_tlv(uint32_t *tag_buf) +{ + htt_tx_de_cmn_stats_tlv *dp_stats_buf = + (htt_tx_de_cmn_stats_tlv *)tag_buf; + + DP_TRACE_STATS(FATAL, "HTT_TX_DE_CMN_STATS_TLV:"); + DP_TRACE_STATS(FATAL, "mac_id__word = %d", + dp_stats_buf->mac_id__word); + DP_TRACE_STATS(FATAL, "tcl2fw_entry_count = %d", + dp_stats_buf->tcl2fw_entry_count); + DP_TRACE_STATS(FATAL, "not_to_fw = %d", + dp_stats_buf->not_to_fw); + DP_TRACE_STATS(FATAL, "invalid_pdev_vdev_peer = %d", + dp_stats_buf->invalid_pdev_vdev_peer); + DP_TRACE_STATS(FATAL, "tcl_res_invalid_addrx = %d", + dp_stats_buf->tcl_res_invalid_addrx); + DP_TRACE_STATS(FATAL, "wbm2fw_entry_count = %d", + dp_stats_buf->wbm2fw_entry_count); + DP_TRACE_STATS(FATAL, "invalid_pdev = %d\n", + dp_stats_buf->invalid_pdev); +} + +/* + * dp_print_ring_if_stats_tlv: display htt_ring_if_stats_tlv + * @tag_buf: buffer containing the tlv htt_ring_if_stats_tlv + * + * return:void + */ +static inline void dp_print_ring_if_stats_tlv(uint32_t *tag_buf) +{ + htt_ring_if_stats_tlv *dp_stats_buf = + (htt_ring_if_stats_tlv *)tag_buf; + uint8_t i; + uint16_t index = 0; + char *wm_hit_count = qdf_mem_malloc(DP_MAX_STRING_LEN); + + if (!wm_hit_count) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("Output buffer not allocated\n")); + return; + } + + DP_TRACE_STATS(FATAL, "HTT_RING_IF_STATS_TLV:"); + DP_TRACE_STATS(FATAL, "base_addr = %d", + dp_stats_buf->base_addr); + DP_TRACE_STATS(FATAL, "elem_size = %d", + dp_stats_buf->elem_size); + DP_TRACE_STATS(FATAL, "num_elems__prefetch_tail_idx = %d", + dp_stats_buf->num_elems__prefetch_tail_idx); + DP_TRACE_STATS(FATAL, "head_idx__tail_idx = %d", + dp_stats_buf->head_idx__tail_idx); + DP_TRACE_STATS(FATAL, "shadow_head_idx__shadow_tail_idx = %d", + dp_stats_buf->shadow_head_idx__shadow_tail_idx); + DP_TRACE_STATS(FATAL, "num_tail_incr = %d", + dp_stats_buf->num_tail_incr); + DP_TRACE_STATS(FATAL, "lwm_thresh__hwm_thresh = %d", + dp_stats_buf->lwm_thresh__hwm_thresh); + DP_TRACE_STATS(FATAL, "overrun_hit_count = %d", + dp_stats_buf->overrun_hit_count); + DP_TRACE_STATS(FATAL, "underrun_hit_count = %d", + dp_stats_buf->underrun_hit_count); + DP_TRACE_STATS(FATAL, "prod_blockwait_count = %d", + dp_stats_buf->prod_blockwait_count); + DP_TRACE_STATS(FATAL, "cons_blockwait_count = %d", + dp_stats_buf->cons_blockwait_count); + + for (i = 0; i < DP_HTT_LOW_WM_HIT_COUNT_LEN; i++) { + index += qdf_snprint(&wm_hit_count[index], + DP_MAX_STRING_LEN - index, + " %d:%d,", i, + dp_stats_buf->low_wm_hit_count[i]); + } + DP_TRACE_STATS(FATAL, "low_wm_hit_count = %s ", wm_hit_count); + + qdf_mem_zero(wm_hit_count, DP_MAX_STRING_LEN); + + index = 0; + for (i = 0; i < DP_HTT_HIGH_WM_HIT_COUNT_LEN; i++) { + index += qdf_snprint(&wm_hit_count[index], + DP_MAX_STRING_LEN - index, + " %d:%d,", i, + dp_stats_buf->high_wm_hit_count[i]); + } + DP_TRACE_STATS(FATAL, "high_wm_hit_count = %s\n", wm_hit_count); +} + +/* + * dp_print_ring_if_cmn_tlv: display htt_ring_if_cmn_tlv + * @tag_buf: buffer containing the tlv htt_ring_if_cmn_tlv + * + * return:void + */ +static inline void dp_print_ring_if_cmn_tlv(uint32_t *tag_buf) +{ + htt_ring_if_cmn_tlv *dp_stats_buf = + (htt_ring_if_cmn_tlv *)tag_buf; + + DP_TRACE_STATS(FATAL, "HTT_RING_IF_CMN_TLV:"); + DP_TRACE_STATS(FATAL, "mac_id__word = %d", + dp_stats_buf->mac_id__word); + DP_TRACE_STATS(FATAL, "num_records = %d\n", + dp_stats_buf->num_records); +} + +/* + * dp_print_sfm_client_user_tlv_v: display htt_sfm_client_user_tlv_v + * @tag_buf: buffer containing the tlv htt_sfm_client_user_tlv_v + * + * return:void + */ +static inline void dp_print_sfm_client_user_tlv_v(uint32_t *tag_buf) +{ + htt_sfm_client_user_tlv_v *dp_stats_buf = + (htt_sfm_client_user_tlv_v *)tag_buf; + uint8_t i; + uint16_t index = 0; + uint32_t tag_len = (HTT_STATS_TLV_LENGTH_GET(*tag_buf) >> 2); + char *dwords_used_by_user_n = qdf_mem_malloc(DP_MAX_STRING_LEN); + + if (!dwords_used_by_user_n) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("Output buffer not allocated\n")); + return; + } + + DP_TRACE_STATS(FATAL, "HTT_SFM_CLIENT_USER_TLV_V:"); + for (i = 0; i < tag_len; i++) { + index += qdf_snprint(&dwords_used_by_user_n[index], + DP_MAX_STRING_LEN - index, + " %d:%d,", i, + dp_stats_buf->dwords_used_by_user_n[i]); + } + DP_TRACE_STATS(FATAL, "dwords_used_by_user_n = %s\n", + dwords_used_by_user_n); + qdf_mem_free(dwords_used_by_user_n); +} + +/* + * dp_print_sfm_client_tlv: display htt_sfm_client_tlv + * @tag_buf: buffer containing the tlv htt_sfm_client_tlv + * + * return:void + */ +static inline void dp_print_sfm_client_tlv(uint32_t *tag_buf) +{ + htt_sfm_client_tlv *dp_stats_buf = + (htt_sfm_client_tlv *)tag_buf; + + DP_TRACE_STATS(FATAL, "HTT_SFM_CLIENT_TLV:"); + DP_TRACE_STATS(FATAL, "client_id = %d", + dp_stats_buf->client_id); + DP_TRACE_STATS(FATAL, "buf_min = %d", + dp_stats_buf->buf_min); + DP_TRACE_STATS(FATAL, "buf_max = %d", + dp_stats_buf->buf_max); + DP_TRACE_STATS(FATAL, "buf_busy = %d", + dp_stats_buf->buf_busy); + DP_TRACE_STATS(FATAL, "buf_alloc = %d", + dp_stats_buf->buf_alloc); + DP_TRACE_STATS(FATAL, "buf_avail = %d", + dp_stats_buf->buf_avail); + DP_TRACE_STATS(FATAL, "num_users = %d\n", + dp_stats_buf->num_users); +} + +/* + * dp_print_sfm_cmn_tlv: display htt_sfm_cmn_tlv + * @tag_buf: buffer containing the tlv htt_sfm_cmn_tlv + * + * return:void + */ +static inline void dp_print_sfm_cmn_tlv(uint32_t *tag_buf) +{ + htt_sfm_cmn_tlv *dp_stats_buf = + (htt_sfm_cmn_tlv *)tag_buf; + + DP_TRACE_STATS(FATAL, "HTT_SFM_CMN_TLV:"); + DP_TRACE_STATS(FATAL, "mac_id__word = %d", + dp_stats_buf->mac_id__word); + DP_TRACE_STATS(FATAL, "buf_total = %d", + dp_stats_buf->buf_total); + DP_TRACE_STATS(FATAL, "mem_empty = %d", + dp_stats_buf->mem_empty); + DP_TRACE_STATS(FATAL, "deallocate_bufs = %d", + dp_stats_buf->deallocate_bufs); + DP_TRACE_STATS(FATAL, "num_records = %d\n", + dp_stats_buf->num_records); +} + +/* + * dp_print_sring_stats_tlv: display htt_sring_stats_tlv + * @tag_buf: buffer containing the tlv htt_sring_stats_tlv + * + * return:void + */ +static inline void dp_print_sring_stats_tlv(uint32_t *tag_buf) +{ + htt_sring_stats_tlv *dp_stats_buf = + (htt_sring_stats_tlv *)tag_buf; + + DP_TRACE_STATS(FATAL, "HTT_SRING_STATS_TLV:"); + DP_TRACE_STATS(FATAL, "mac_id__ring_id__arena__ep = %d", + dp_stats_buf->mac_id__ring_id__arena__ep); + DP_TRACE_STATS(FATAL, "base_addr_lsb = %d", + dp_stats_buf->base_addr_lsb); + DP_TRACE_STATS(FATAL, "base_addr_msb = %d", + dp_stats_buf->base_addr_msb); + DP_TRACE_STATS(FATAL, "ring_size = %d", + dp_stats_buf->ring_size); + DP_TRACE_STATS(FATAL, "elem_size = %d", + dp_stats_buf->elem_size); + DP_TRACE_STATS(FATAL, "num_avail_words__num_valid_words = %d", + dp_stats_buf->num_avail_words__num_valid_words); + DP_TRACE_STATS(FATAL, "head_ptr__tail_ptr = %d", + dp_stats_buf->head_ptr__tail_ptr); + DP_TRACE_STATS(FATAL, "consumer_empty__producer_full = %d", + dp_stats_buf->consumer_empty__producer_full); + DP_TRACE_STATS(FATAL, "prefetch_count__internal_tail_ptr = %d\n", + dp_stats_buf->prefetch_count__internal_tail_ptr); +} + +/* + * dp_print_sring_cmn_tlv: display htt_sring_cmn_tlv + * @tag_buf: buffer containing the tlv htt_sring_cmn_tlv + * + * return:void + */ +static inline void dp_print_sring_cmn_tlv(uint32_t *tag_buf) +{ + htt_sring_cmn_tlv *dp_stats_buf = + (htt_sring_cmn_tlv *)tag_buf; + + DP_TRACE_STATS(FATAL, "HTT_SRING_CMN_TLV:"); + DP_TRACE_STATS(FATAL, "num_records = %d\n", + dp_stats_buf->num_records); +} + +/* + * dp_print_tx_pdev_rate_stats_tlv: display htt_tx_pdev_rate_stats_tlv + * @tag_buf: buffer containing the tlv htt_tx_pdev_rate_stats_tlv + * + * return:void + */ +static inline void dp_print_tx_pdev_rate_stats_tlv(uint32_t *tag_buf) +{ + htt_tx_pdev_rate_stats_tlv *dp_stats_buf = + (htt_tx_pdev_rate_stats_tlv *)tag_buf; + uint8_t i, j; + uint16_t index = 0; + char *tx_gi[HTT_TX_PEER_STATS_NUM_GI_COUNTERS]; + char *str_buf = qdf_mem_malloc(DP_MAX_STRING_LEN); + + if (!str_buf) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("Output buffer not allocated\n")); + return; + } + + for (i = 0; i < HTT_TX_PEER_STATS_NUM_GI_COUNTERS; i++) { + tx_gi[i] = (char *)qdf_mem_malloc(DP_MAX_STRING_LEN); + } + + DP_TRACE_STATS(FATAL, "HTT_TX_PDEV_RATE_STATS_TLV:"); + DP_TRACE_STATS(FATAL, "mac_id__word = %d", + dp_stats_buf->mac_id__word); + DP_TRACE_STATS(FATAL, "tx_ldpc = %d", + dp_stats_buf->tx_ldpc); + DP_TRACE_STATS(FATAL, "rts_cnt = %d", + dp_stats_buf->rts_cnt); + DP_TRACE_STATS(FATAL, "rts_success = %d", + dp_stats_buf->rts_success); + + DP_TRACE_STATS(FATAL, "ack_rssi = %d", + dp_stats_buf->ack_rssi); + + qdf_mem_set(str_buf, DP_MAX_STRING_LEN, 0x0); + for (i = 0; i < DP_HTT_TX_MCS_LEN; i++) { + index += qdf_snprint(&str_buf[index], + DP_MAX_STRING_LEN - index, + " %d:%d,", i, dp_stats_buf->tx_mcs[i]); + } + DP_TRACE_STATS(FATAL, "tx_mcs = %s ", str_buf); + + index = 0; + qdf_mem_set(str_buf, DP_MAX_STRING_LEN, 0x0); + for (i = 0; i < DP_HTT_TX_SU_MCS_LEN; i++) { + index += qdf_snprint(&str_buf[index], + DP_MAX_STRING_LEN - index, + " %d:%d,", i, dp_stats_buf->tx_su_mcs[i]); + } + DP_TRACE_STATS(FATAL, "tx_su_mcs = %s ", str_buf); + + index = 0; + qdf_mem_set(str_buf, DP_MAX_STRING_LEN, 0x0); + for (i = 0; i < DP_HTT_TX_MU_MCS_LEN; i++) { + index += qdf_snprint(&str_buf[index], + DP_MAX_STRING_LEN - index, + " %d:%d,", i, dp_stats_buf->tx_mu_mcs[i]); + } + DP_TRACE_STATS(FATAL, "tx_mu_mcs = %s ", str_buf); + + index = 0; + qdf_mem_set(str_buf, DP_MAX_STRING_LEN, 0x0); + for (i = 0; i < DP_HTT_TX_NSS_LEN; i++) { + /* 0 stands for NSS 1, 1 stands for NSS 2, etc. */ + index += qdf_snprint(&str_buf[index], + DP_MAX_STRING_LEN - index, + " %d:%d,", (i + 1), + dp_stats_buf->tx_nss[i]); + } + DP_TRACE_STATS(FATAL, "tx_nss = %s ", str_buf); + + index = 0; + qdf_mem_set(str_buf, DP_MAX_STRING_LEN, 0x0); + for (i = 0; i < DP_HTT_TX_BW_LEN; i++) { + index += qdf_snprint(&str_buf[index], + DP_MAX_STRING_LEN - index, + " %d:%d,", i, dp_stats_buf->tx_bw[i]); + } + DP_TRACE_STATS(FATAL, "tx_bw = %s ", str_buf); + + index = 0; + qdf_mem_set(str_buf, DP_MAX_STRING_LEN, 0x0); + for (i = 0; i < HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS; i++) { + index += qdf_snprint(&str_buf[index], + DP_MAX_STRING_LEN - index, + " %d:%d,", i, dp_stats_buf->tx_stbc[i]); + } + DP_TRACE_STATS(FATAL, "tx_stbc = %s ", str_buf); + + index = 0; + qdf_mem_set(str_buf, DP_MAX_STRING_LEN, 0x0); + for (i = 0; i < DP_HTT_TX_PREAM_LEN; i++) { + index += qdf_snprint(&str_buf[index], + DP_MAX_STRING_LEN - index, + " %d:%d,", i, dp_stats_buf->tx_pream[i]); + } + DP_TRACE_STATS(FATAL, "tx_pream = %s ", str_buf); + + for (j = 0; j < DP_HTT_PDEV_TX_GI_LEN; j++) { + index = 0; + for (i = 0; i < HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS; i++) { + index += qdf_snprint(&tx_gi[j][index], + DP_MAX_STRING_LEN - index, + " %d:%d,", i, + dp_stats_buf->tx_gi[j][i]); + } + DP_TRACE_STATS(FATAL, "tx_gi[%d] = %s ", j, tx_gi[j]); + } + + index = 0; + qdf_mem_set(str_buf, DP_MAX_STRING_LEN, 0x0); + for (i = 0; i < DP_HTT_TX_DCM_LEN; i++) { + index += qdf_snprint(&str_buf[index], + DP_MAX_STRING_LEN - index, + " %d:%d,", i, dp_stats_buf->tx_dcm[i]); + } + DP_TRACE_STATS(FATAL, "tx_dcm = %s\n", str_buf); + + for (i = 0; i < HTT_TX_PEER_STATS_NUM_GI_COUNTERS; i++) + qdf_mem_free(tx_gi[i]); + + qdf_mem_free(str_buf); +} + +/* + * dp_print_rx_pdev_rate_stats_tlv: display htt_rx_pdev_rate_stats_tlv + * @tag_buf: buffer containing the tlv htt_rx_pdev_rate_stats_tlv + * + * return:void + */ +static inline void dp_print_rx_pdev_rate_stats_tlv(uint32_t *tag_buf) +{ + htt_rx_pdev_rate_stats_tlv *dp_stats_buf = + (htt_rx_pdev_rate_stats_tlv *)tag_buf; + uint8_t i, j; + uint16_t index = 0; + char *rssi_chain[DP_HTT_RSSI_CHAIN_LEN]; + char *rx_gi[HTT_RX_PDEV_STATS_NUM_GI_COUNTERS]; + char *str_buf = qdf_mem_malloc(DP_MAX_STRING_LEN); + + if (!str_buf) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("Output buffer not allocated\n")); + return; + } + + for (i = 0; i < DP_HTT_RSSI_CHAIN_LEN; i++) + rssi_chain[i] = qdf_mem_malloc(DP_MAX_STRING_LEN); + for (i = 0; i < HTT_RX_PDEV_STATS_NUM_GI_COUNTERS; i++) + rx_gi[i] = qdf_mem_malloc(DP_MAX_STRING_LEN); + + DP_TRACE_STATS(FATAL, "HTT_RX_PDEV_RATE_STATS_TLV:"); + DP_TRACE_STATS(FATAL, "mac_id__word = %d", + dp_stats_buf->mac_id__word); + DP_TRACE_STATS(FATAL, "nsts = %d", + dp_stats_buf->nsts); + DP_TRACE_STATS(FATAL, "rx_ldpc = %d", + dp_stats_buf->rx_ldpc); + DP_TRACE_STATS(FATAL, "rts_cnt = %d", + dp_stats_buf->rts_cnt); + DP_TRACE_STATS(FATAL, "rssi_mgmt = %d", + dp_stats_buf->rssi_mgmt); + DP_TRACE_STATS(FATAL, "rssi_data = %d", + dp_stats_buf->rssi_data); + DP_TRACE_STATS(FATAL, "rssi_comb = %d", + dp_stats_buf->rssi_comb); + + qdf_mem_set(str_buf, DP_MAX_STRING_LEN, 0x0); + for (i = 0; i < DP_HTT_RX_MCS_LEN; i++) { + index += qdf_snprint(&str_buf[index], + DP_MAX_STRING_LEN - index, + " %d:%d,", i, dp_stats_buf->rx_mcs[i]); + } + DP_TRACE_STATS(FATAL, "rx_mcs = %s ", str_buf); + + index = 0; + qdf_mem_set(str_buf, DP_MAX_STRING_LEN, 0x0); + for (i = 0; i < DP_HTT_RX_NSS_LEN; i++) { + /* 0 stands for NSS 1, 1 stands for NSS 2, etc. */ + index += qdf_snprint(&str_buf[index], + DP_MAX_STRING_LEN - index, + " %d:%d,", (i + 1), + dp_stats_buf->rx_nss[i]); + } + DP_TRACE_STATS(FATAL, "rx_nss = %s ", str_buf); + + index = 0; + qdf_mem_set(str_buf, DP_MAX_STRING_LEN, 0x0); + for (i = 0; i < DP_HTT_RX_DCM_LEN; i++) { + index += qdf_snprint(&str_buf[index], + DP_MAX_STRING_LEN - index, + " %d:%d,", i, dp_stats_buf->rx_dcm[i]); + } + DP_TRACE_STATS(FATAL, "rx_dcm = %s ", str_buf); + + index = 0; + qdf_mem_set(str_buf, DP_MAX_STRING_LEN, 0x0); + for (i = 0; i < HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS; i++) { + index += qdf_snprint(&str_buf[index], + DP_MAX_STRING_LEN - index, + " %d:%d,", i, dp_stats_buf->rx_stbc[i]); + } + DP_TRACE_STATS(FATAL, "rx_stbc = %s ", str_buf); + + index = 0; + qdf_mem_set(str_buf, DP_MAX_STRING_LEN, 0x0); + for (i = 0; i < DP_HTT_RX_BW_LEN; i++) { + index += qdf_snprint(&str_buf[index], + DP_MAX_STRING_LEN - index, + " %d:%d,", i, dp_stats_buf->rx_bw[i]); + } + DP_TRACE_STATS(FATAL, "rx_bw = %s ", str_buf); + + for (j = 0; j < DP_HTT_RSSI_CHAIN_LEN; j++) { + index = 0; + for (i = 0; i < HTT_RX_PDEV_STATS_NUM_BW_COUNTERS; i++) { + index += qdf_snprint(&rssi_chain[j][index], + DP_MAX_STRING_LEN - index, + " %d:%d,", i, + dp_stats_buf->rssi_chain[j][i]); + } + DP_TRACE_STATS(FATAL, "rssi_chain[%d] = %s ", j, rssi_chain[j]); + } + + for (j = 0; j < DP_HTT_RX_GI_LEN; j++) { + index = 0; + for (i = 0; i < HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS; i++) { + index += qdf_snprint(&rx_gi[j][index], + DP_MAX_STRING_LEN - index, + " %d:%d,", i, + dp_stats_buf->rx_gi[j][i]); + } + DP_TRACE_STATS(FATAL, "rx_gi[%d] = %s ", j, rx_gi[j]); + } + + index = 0; + qdf_mem_set(str_buf, DP_MAX_STRING_LEN, 0x0); + for (i = 0; i < DP_HTT_RX_PREAM_LEN; i++) { + index += qdf_snprint(&str_buf[index], + DP_MAX_STRING_LEN - index, + " %d:%d,", i, dp_stats_buf->rx_pream[i]); + } + DP_TRACE_STATS(FATAL, "rx_pream = %s\n", str_buf); + for (i = 0; i < DP_HTT_RSSI_CHAIN_LEN; i++) + qdf_mem_free(rssi_chain[i]); + for (i = 0; i < HTT_RX_PDEV_STATS_NUM_GI_COUNTERS; i++) + qdf_mem_free(rx_gi[i]); + + qdf_mem_free(str_buf); +} + +/* + * dp_print_rx_soc_fw_stats_tlv: display htt_rx_soc_fw_stats_tlv + * @tag_buf: buffer containing the tlv htt_rx_soc_fw_stats_tlv + * + * return:void + */ +static inline void dp_print_rx_soc_fw_stats_tlv(uint32_t *tag_buf) +{ + htt_rx_soc_fw_stats_tlv *dp_stats_buf = + (htt_rx_soc_fw_stats_tlv *)tag_buf; + + DP_TRACE_STATS(FATAL, "HTT_RX_SOC_FW_STATS_TLV:"); + DP_TRACE_STATS(FATAL, "fw_reo_ring_data_msdu = %d", + dp_stats_buf->fw_reo_ring_data_msdu); + DP_TRACE_STATS(FATAL, "fw_to_host_data_msdu_bcmc = %d", + dp_stats_buf->fw_to_host_data_msdu_bcmc); + DP_TRACE_STATS(FATAL, "fw_to_host_data_msdu_uc = %d", + dp_stats_buf->fw_to_host_data_msdu_uc); + DP_TRACE_STATS(FATAL, "ofld_remote_data_buf_recycle_cnt = %d", + dp_stats_buf->ofld_remote_data_buf_recycle_cnt); + DP_TRACE_STATS(FATAL, "ofld_remote_free_buf_indication_cnt = %d", + dp_stats_buf->ofld_remote_free_buf_indication_cnt); + DP_TRACE_STATS(FATAL, "ofld_buf_to_host_data_msdu_uc = %d ", + dp_stats_buf->ofld_buf_to_host_data_msdu_uc); + DP_TRACE_STATS(FATAL, "reo_fw_ring_to_host_data_msdu_uc = %d ", + dp_stats_buf->reo_fw_ring_to_host_data_msdu_uc); + DP_TRACE_STATS(FATAL, "wbm_sw_ring_reap = %d ", + dp_stats_buf->wbm_sw_ring_reap); + DP_TRACE_STATS(FATAL, "wbm_forward_to_host_cnt = %d ", + dp_stats_buf->wbm_forward_to_host_cnt); + DP_TRACE_STATS(FATAL, "wbm_target_recycle_cnt = %d ", + dp_stats_buf->wbm_target_recycle_cnt); + DP_TRACE_STATS(FATAL, "target_refill_ring_recycle_cnt = %d", + dp_stats_buf->target_refill_ring_recycle_cnt); + +} + +/* + * dp_print_rx_soc_fw_refill_ring_empty_tlv_v: display + * htt_rx_soc_fw_refill_ring_empty_tlv_v + * @tag_buf: buffer containing the tlv htt_rx_soc_fw_refill_ring_empty_tlv_v + * + * return:void + */ +static inline void dp_print_rx_soc_fw_refill_ring_empty_tlv_v(uint32_t *tag_buf) +{ + htt_rx_soc_fw_refill_ring_empty_tlv_v *dp_stats_buf = + (htt_rx_soc_fw_refill_ring_empty_tlv_v *)tag_buf; + uint8_t i; + uint16_t index = 0; + uint32_t tag_len = (HTT_STATS_TLV_LENGTH_GET(*tag_buf) >> 2); + char *refill_ring_empty_cnt = qdf_mem_malloc(DP_MAX_STRING_LEN); + + if (!refill_ring_empty_cnt) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("Output buffer not allocated\n")); + return; + } + + tag_len = qdf_min(tag_len, (uint32_t)HTT_RX_STATS_REFILL_MAX_RING); + + DP_TRACE_STATS(FATAL, "HTT_RX_SOC_FW_REFILL_RING_EMPTY_TLV_V:"); + for (i = 0; i < tag_len; i++) { + index += qdf_snprint(&refill_ring_empty_cnt[index], + DP_MAX_STRING_LEN - index, + " %d:%d,", i, + dp_stats_buf->refill_ring_empty_cnt[i]); + } + DP_TRACE_STATS(FATAL, "refill_ring_empty_cnt = %s\n", + refill_ring_empty_cnt); + qdf_mem_free(refill_ring_empty_cnt); +} + +/* + * dp_print_rx_soc_fw_refill_ring_num_refill_tlv_v: display + * htt_rx_soc_fw_refill_ring_num_refill_tlv_v + * @tag_buf: buffer containing the tlv htt_rx_soc_fw_refill_ring_num_refill_tlv + * + * return:void + */ +static inline void dp_print_rx_soc_fw_refill_ring_num_refill_tlv_v( + uint32_t *tag_buf) +{ + htt_rx_soc_fw_refill_ring_num_refill_tlv_v *dp_stats_buf = + (htt_rx_soc_fw_refill_ring_num_refill_tlv_v *)tag_buf; + uint8_t i; + uint16_t index = 0; + uint32_t tag_len = (HTT_STATS_TLV_LENGTH_GET(*tag_buf) >> 2); + char *refill_ring_num_refill = qdf_mem_malloc(DP_MAX_STRING_LEN); + + if (!refill_ring_num_refill) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("Output buffer not allocated\n")); + return; + } + + tag_len = qdf_min(tag_len, (uint32_t)HTT_TX_PDEV_MAX_URRN_STATS); + + DP_TRACE_STATS(FATAL, "HTT_RX_SOC_FW_REFILL_RING_NUM_REFILL_TLV_V:"); + for (i = 0; i < tag_len; i++) { + index += qdf_snprint(&refill_ring_num_refill[index], + DP_MAX_STRING_LEN - index, + " %d:%d,", i, + dp_stats_buf->refill_ring_num_refill[i]); + } + DP_TRACE_STATS(FATAL, "refill_ring_num_refill = %s\n", + refill_ring_num_refill); + qdf_mem_free(refill_ring_num_refill); +} + +/* + * dp_print_rx_pdev_fw_stats_tlv: display htt_rx_pdev_fw_stats_tlv + * @tag_buf: buffer containing the tlv htt_rx_pdev_fw_stats_tlv + * + * return:void + */ +static inline void dp_print_rx_pdev_fw_stats_tlv(uint32_t *tag_buf) +{ + htt_rx_pdev_fw_stats_tlv *dp_stats_buf = + (htt_rx_pdev_fw_stats_tlv *)tag_buf; + uint8_t i; + uint16_t index = 0; + char fw_ring_mgmt_subtype[DP_MAX_STRING_LEN]; + char fw_ring_ctrl_subtype[DP_MAX_STRING_LEN]; + + DP_TRACE_STATS(FATAL, "HTT_RX_PDEV_FW_STATS_TLV:"); + DP_TRACE_STATS(FATAL, "mac_id__word = %d", + dp_stats_buf->mac_id__word); + DP_TRACE_STATS(FATAL, "ppdu_recvd = %d", + dp_stats_buf->ppdu_recvd); + DP_TRACE_STATS(FATAL, "mpdu_cnt_fcs_ok = %d", + dp_stats_buf->mpdu_cnt_fcs_ok); + DP_TRACE_STATS(FATAL, "mpdu_cnt_fcs_err = %d", + dp_stats_buf->mpdu_cnt_fcs_err); + DP_TRACE_STATS(FATAL, "tcp_msdu_cnt = %d", + dp_stats_buf->tcp_msdu_cnt); + DP_TRACE_STATS(FATAL, "tcp_ack_msdu_cnt = %d", + dp_stats_buf->tcp_ack_msdu_cnt); + DP_TRACE_STATS(FATAL, "udp_msdu_cnt = %d", + dp_stats_buf->udp_msdu_cnt); + DP_TRACE_STATS(FATAL, "other_msdu_cnt = %d", + dp_stats_buf->other_msdu_cnt); + DP_TRACE_STATS(FATAL, "fw_ring_mpdu_ind = %d", + dp_stats_buf->fw_ring_mpdu_ind); + + for (i = 0; i < DP_HTT_FW_RING_MGMT_SUBTYPE_LEN; i++) { + index += qdf_snprint(&fw_ring_mgmt_subtype[index], + DP_MAX_STRING_LEN - index, + " %d:%d,", i, + dp_stats_buf->fw_ring_mgmt_subtype[i]); + } + DP_TRACE_STATS(FATAL, "fw_ring_mgmt_subtype = %s ", fw_ring_mgmt_subtype); + + index = 0; + for (i = 0; i < DP_HTT_FW_RING_CTRL_SUBTYPE_LEN; i++) { + index += qdf_snprint(&fw_ring_ctrl_subtype[index], + DP_MAX_STRING_LEN - index, + " %d:%d,", i, + dp_stats_buf->fw_ring_ctrl_subtype[i]); + } + DP_TRACE_STATS(FATAL, "fw_ring_ctrl_subtype = %s ", fw_ring_ctrl_subtype); + DP_TRACE_STATS(FATAL, "fw_ring_mcast_data_msdu = %d", + dp_stats_buf->fw_ring_mcast_data_msdu); + DP_TRACE_STATS(FATAL, "fw_ring_bcast_data_msdu = %d", + dp_stats_buf->fw_ring_bcast_data_msdu); + DP_TRACE_STATS(FATAL, "fw_ring_ucast_data_msdu = %d", + dp_stats_buf->fw_ring_ucast_data_msdu); + DP_TRACE_STATS(FATAL, "fw_ring_null_data_msdu = %d", + dp_stats_buf->fw_ring_null_data_msdu); + DP_TRACE_STATS(FATAL, "fw_ring_mpdu_drop = %d", + dp_stats_buf->fw_ring_mpdu_drop); + DP_TRACE_STATS(FATAL, "ofld_local_data_ind_cnt = %d", + dp_stats_buf->ofld_local_data_ind_cnt); + DP_TRACE_STATS(FATAL, "ofld_local_data_buf_recycle_cnt = %d", + dp_stats_buf->ofld_local_data_buf_recycle_cnt); + DP_TRACE_STATS(FATAL, "drx_local_data_ind_cnt = %d", + dp_stats_buf->drx_local_data_ind_cnt); + DP_TRACE_STATS(FATAL, "drx_local_data_buf_recycle_cnt = %d", + dp_stats_buf->drx_local_data_buf_recycle_cnt); + DP_TRACE_STATS(FATAL, "local_nondata_ind_cnt = %d", + dp_stats_buf->local_nondata_ind_cnt); + DP_TRACE_STATS(FATAL, "local_nondata_buf_recycle_cnt = %d", + dp_stats_buf->local_nondata_buf_recycle_cnt); + DP_TRACE_STATS(FATAL, "fw_status_buf_ring_refill_cnt = %d", + dp_stats_buf->fw_status_buf_ring_refill_cnt); + DP_TRACE_STATS(FATAL, "fw_status_buf_ring_empty_cnt = %d", + dp_stats_buf->fw_status_buf_ring_empty_cnt); + DP_TRACE_STATS(FATAL, "fw_pkt_buf_ring_refill_cnt = %d", + dp_stats_buf->fw_pkt_buf_ring_refill_cnt); + DP_TRACE_STATS(FATAL, "fw_pkt_buf_ring_empty_cnt = %d", + dp_stats_buf->fw_pkt_buf_ring_empty_cnt); + DP_TRACE_STATS(FATAL, "fw_link_buf_ring_refill_cnt = %d", + dp_stats_buf->fw_link_buf_ring_refill_cnt); + DP_TRACE_STATS(FATAL, "fw_link_buf_ring_empty_cnt = %d", + dp_stats_buf->fw_link_buf_ring_empty_cnt); + DP_TRACE_STATS(FATAL, "host_pkt_buf_ring_refill_cnt = %d", + dp_stats_buf->host_pkt_buf_ring_refill_cnt); + DP_TRACE_STATS(FATAL, "host_pkt_buf_ring_empty_cnt = %d", + dp_stats_buf->host_pkt_buf_ring_empty_cnt); + DP_TRACE_STATS(FATAL, "mon_pkt_buf_ring_refill_cnt = %d", + dp_stats_buf->mon_pkt_buf_ring_refill_cnt); + DP_TRACE_STATS(FATAL, "mon_pkt_buf_ring_empty_cnt = %d", + dp_stats_buf->mon_pkt_buf_ring_empty_cnt); + DP_TRACE_STATS(FATAL, "mon_status_buf_ring_refill_cnt = %d", + dp_stats_buf->mon_status_buf_ring_refill_cnt); + DP_TRACE_STATS(FATAL, "mon_status_buf_ring_empty_cnt = %d", + dp_stats_buf->mon_status_buf_ring_empty_cnt); + DP_TRACE_STATS(FATAL, "mon_desc_buf_ring_refill_cnt = %d", + dp_stats_buf->mon_desc_buf_ring_refill_cnt); + DP_TRACE_STATS(FATAL, "mon_desc_buf_ring_empty_cnt = %d", + dp_stats_buf->mon_desc_buf_ring_empty_cnt); + DP_TRACE_STATS(FATAL, "mon_dest_ring_update_cnt = %d", + dp_stats_buf->mon_dest_ring_update_cnt); + DP_TRACE_STATS(FATAL, "mon_dest_ring_full_cnt = %d", + dp_stats_buf->mon_dest_ring_full_cnt); + DP_TRACE_STATS(FATAL, "rx_suspend_cnt = %d", + dp_stats_buf->rx_suspend_cnt); + DP_TRACE_STATS(FATAL, "rx_suspend_fail_cnt = %d", + dp_stats_buf->rx_suspend_fail_cnt); + DP_TRACE_STATS(FATAL, "rx_resume_cnt = %d", + dp_stats_buf->rx_resume_cnt); + DP_TRACE_STATS(FATAL, "rx_resume_fail_cnt = %d", + dp_stats_buf->rx_resume_fail_cnt); + DP_TRACE_STATS(FATAL, "rx_ring_switch_cnt = %d", + dp_stats_buf->rx_ring_switch_cnt); + DP_TRACE_STATS(FATAL, "rx_ring_restore_cnt = %d", + dp_stats_buf->rx_ring_restore_cnt); + DP_TRACE_STATS(FATAL, "rx_flush_cnt = %d\n", + dp_stats_buf->rx_flush_cnt); +} + +/* + * dp_print_rx_pdev_fw_ring_mpdu_err_tlv_v: display + * htt_rx_pdev_fw_ring_mpdu_err_tlv_v + * @tag_buf: buffer containing the tlv htt_rx_pdev_fw_ring_mpdu_err_tlv_v + * + * return:void + */ +static inline void dp_print_rx_pdev_fw_ring_mpdu_err_tlv_v(uint32_t *tag_buf) +{ + htt_rx_pdev_fw_ring_mpdu_err_tlv_v *dp_stats_buf = + (htt_rx_pdev_fw_ring_mpdu_err_tlv_v *)tag_buf; + uint8_t i; + uint16_t index = 0; + char *fw_ring_mpdu_err = qdf_mem_malloc(DP_MAX_STRING_LEN); + + if (!fw_ring_mpdu_err) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("Output buffer not allocated\n")); + return; + } + + DP_TRACE_STATS(FATAL, "HTT_RX_PDEV_FW_RING_MPDU_ERR_TLV_V:"); + for (i = 0; i < DP_HTT_FW_RING_MPDU_ERR_LEN; i++) { + index += qdf_snprint(&fw_ring_mpdu_err[index], + DP_MAX_STRING_LEN - index, + " %d:%d,", i, + dp_stats_buf->fw_ring_mpdu_err[i]); + } + DP_TRACE_STATS(FATAL, "fw_ring_mpdu_err = %s\n", fw_ring_mpdu_err); + qdf_mem_free(fw_ring_mpdu_err); +} + +/* + * dp_print_rx_pdev_fw_mpdu_drop_tlv_v: display htt_rx_pdev_fw_mpdu_drop_tlv_v + * @tag_buf: buffer containing the tlv htt_rx_pdev_fw_mpdu_drop_tlv_v + * + * return:void + */ +static inline void dp_print_rx_pdev_fw_mpdu_drop_tlv_v(uint32_t *tag_buf) +{ + htt_rx_pdev_fw_mpdu_drop_tlv_v *dp_stats_buf = + (htt_rx_pdev_fw_mpdu_drop_tlv_v *)tag_buf; + uint8_t i; + uint16_t index = 0; + uint32_t tag_len = (HTT_STATS_TLV_LENGTH_GET(*tag_buf) >> 2); + char *fw_mpdu_drop = qdf_mem_malloc(DP_MAX_STRING_LEN); + + if (!fw_mpdu_drop) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("Output buffer not allocated\n")); + return; + } + + tag_len = qdf_min(tag_len, (uint32_t)HTT_RX_STATS_FW_DROP_REASON_MAX); + + DP_TRACE_STATS(FATAL, "HTT_RX_PDEV_FW_MPDU_DROP_TLV_V:"); + for (i = 0; i < tag_len; i++) { + index += qdf_snprint(&fw_mpdu_drop[index], + DP_MAX_STRING_LEN - index, + " %d:%d,", i, dp_stats_buf->fw_mpdu_drop[i]); + } + DP_TRACE_STATS(FATAL, "fw_mpdu_drop = %s\n", fw_mpdu_drop); + qdf_mem_free(fw_mpdu_drop); +} + +/* + * dp_print_rx_soc_fw_refill_ring_num_rxdma_err_tlv() - Accounts for rxdma error + * packets + * + * tag_buf - Buffer + * Return - NULL + */ +static inline void dp_print_rx_soc_fw_refill_ring_num_rxdma_err_tlv(uint32_t *tag_buf) +{ + htt_rx_soc_fw_refill_ring_num_rxdma_err_tlv_v *dp_stats_buf = + (htt_rx_soc_fw_refill_ring_num_rxdma_err_tlv_v *)tag_buf; + + uint8_t i; + uint16_t index = 0; + char rxdma_err_cnt[DP_MAX_STRING_LEN]; + uint32_t tag_len = (HTT_STATS_TLV_LENGTH_GET(*tag_buf) >> 2); + + tag_len = qdf_min(tag_len, (uint32_t)HTT_RX_RXDMA_MAX_ERR_CODE); + + DP_TRACE_STATS(FATAL, "HTT_RX_SOC_FW_REFILL_RING_NUM_RXDMA_ERR_TLV_V"); + + for (i = 0; i < tag_len; i++) { + index += snprintf(&rxdma_err_cnt[index], + DP_MAX_STRING_LEN - index, + " %d:%d,", i, + dp_stats_buf->rxdma_err[i]); + } + + DP_TRACE_STATS(FATAL, "rxdma_err = %s\n", rxdma_err_cnt); +} + +/* + * dp_print_rx_soc_fw_refill_ring_num_reo_err_tlv() - Accounts for reo error + * packets + * + * tag_buf - Buffer + * Return - NULL + */ +static inline void dp_print_rx_soc_fw_refill_ring_num_reo_err_tlv(uint32_t *tag_buf) +{ + htt_rx_soc_fw_refill_ring_num_reo_err_tlv_v *dp_stats_buf = + (htt_rx_soc_fw_refill_ring_num_reo_err_tlv_v *)tag_buf; + + uint8_t i; + uint16_t index = 0; + char reo_err_cnt[DP_MAX_STRING_LEN]; + uint32_t tag_len = (HTT_STATS_TLV_LENGTH_GET(*tag_buf) >> 2); + + tag_len = qdf_min(tag_len, (uint32_t)HTT_RX_REO_MAX_ERR_CODE); + + DP_TRACE_STATS(FATAL, "HTT_RX_SOC_FW_REFILL_RING_NUM_REO_ERR_TLV_V"); + + for (i = 0; i < tag_len; i++) { + index += snprintf(&reo_err_cnt[index], + DP_MAX_STRING_LEN - index, + " %d:%d,", i, + dp_stats_buf->reo_err[i]); + } + + DP_TRACE_STATS(FATAL, "reo_err = %s\n", reo_err_cnt); +} + +/* + * dp_print_rx_reo_debug_stats_tlv() - REO Statistics + * + * tag_buf - Buffer + * Return - NULL + */ +static inline void dp_print_rx_reo_debug_stats_tlv(uint32_t *tag_buf) +{ + htt_rx_reo_resource_stats_tlv_v *dp_stats_buf = + (htt_rx_reo_resource_stats_tlv_v *)tag_buf; + + DP_TRACE_STATS(FATAL, "HTT_RX_REO_RESOURCE_STATS_TLV"); + + DP_TRACE_STATS(FATAL, "sample_id: %d ", + dp_stats_buf->sample_id); + DP_TRACE_STATS(FATAL, "total_max: %d ", + dp_stats_buf->total_max); + DP_TRACE_STATS(FATAL, "total_avg: %d ", + dp_stats_buf->total_avg); + DP_TRACE_STATS(FATAL, "total_sample: %d ", + dp_stats_buf->total_sample); + DP_TRACE_STATS(FATAL, "non_zeros_avg: %d ", + dp_stats_buf->non_zeros_avg); + DP_TRACE_STATS(FATAL, "non_zeros_sample: %d ", + dp_stats_buf->non_zeros_sample); + DP_TRACE_STATS(FATAL, "last_non_zeros_max: %d ", + dp_stats_buf->last_non_zeros_max); + DP_TRACE_STATS(FATAL, "last_non_zeros_min: %d ", + dp_stats_buf->last_non_zeros_min); + DP_TRACE_STATS(FATAL, "last_non_zeros_avg: %d ", + dp_stats_buf->last_non_zeros_avg); + DP_TRACE_STATS(FATAL, "last_non_zeros_sample: %d\n ", + dp_stats_buf->last_non_zeros_sample); +} + +/* + * dp_print_rx_pdev_fw_stats_phy_err_tlv() - Accounts for phy errors + * + * tag_buf - Buffer + * Return - NULL + */ +static inline void dp_print_rx_pdev_fw_stats_phy_err_tlv(uint32_t *tag_buf) +{ + htt_rx_pdev_fw_stats_phy_err_tlv *dp_stats_buf = + (htt_rx_pdev_fw_stats_phy_err_tlv *)tag_buf; + + uint8_t i = 0; + uint16_t index = 0; + char phy_errs[DP_MAX_STRING_LEN]; + + DP_TRACE_STATS(FATAL, "HTT_RX_PDEV_FW_STATS_PHY_ERR_TLV"); + + DP_TRACE_STATS(FATAL, "mac_id_word: %d", + dp_stats_buf->mac_id__word); + DP_TRACE_STATS(FATAL, "total_phy_err_cnt: %d", + dp_stats_buf->total_phy_err_cnt); + + for (i = 0; i < HTT_STATS_PHY_ERR_MAX; i++) { + index += snprintf(&phy_errs[index], + DP_MAX_STRING_LEN - index, + " %d:%d,", i, dp_stats_buf->phy_err[i]); + } + + DP_TRACE_STATS(FATAL, "phy_errs: %s\n", phy_errs); +} + +/* + * dp_htt_stats_print_tag: function to select the tag type and + * print the corresponding tag structure + * @tag_type: tag type that is to be printed + * @tag_buf: pointer to the tag structure + * + * return: void + */ +void dp_htt_stats_print_tag(uint8_t tag_type, uint32_t *tag_buf) +{ + switch (tag_type) { + case HTT_STATS_TX_PDEV_CMN_TAG: + dp_print_tx_pdev_stats_cmn_tlv(tag_buf); + break; + case HTT_STATS_TX_PDEV_UNDERRUN_TAG: + dp_print_tx_pdev_stats_urrn_tlv_v(tag_buf); + break; + case HTT_STATS_TX_PDEV_SIFS_TAG: + dp_print_tx_pdev_stats_sifs_tlv_v(tag_buf); + break; + case HTT_STATS_TX_PDEV_FLUSH_TAG: + dp_print_tx_pdev_stats_flush_tlv_v(tag_buf); + break; + + case HTT_STATS_TX_PDEV_PHY_ERR_TAG: + dp_print_tx_pdev_stats_phy_err_tlv_v(tag_buf); + break; + + case HTT_STATS_STRING_TAG: + dp_print_stats_string_tlv(tag_buf); + break; + + case HTT_STATS_TX_HWQ_CMN_TAG: + dp_print_tx_hwq_stats_cmn_tlv(tag_buf); + break; + + case HTT_STATS_TX_HWQ_DIFS_LATENCY_TAG: + dp_print_tx_hwq_difs_latency_stats_tlv_v(tag_buf); + break; + + case HTT_STATS_TX_HWQ_CMD_RESULT_TAG: + dp_print_tx_hwq_cmd_result_stats_tlv_v(tag_buf); + break; + + case HTT_STATS_TX_HWQ_CMD_STALL_TAG: + dp_print_tx_hwq_cmd_stall_stats_tlv_v(tag_buf); + break; + + case HTT_STATS_TX_HWQ_FES_STATUS_TAG: + dp_print_tx_hwq_fes_result_stats_tlv_v(tag_buf); + break; + + case HTT_STATS_TX_TQM_GEN_MPDU_TAG: + dp_print_tx_tqm_gen_mpdu_stats_tlv_v(tag_buf); + break; + + case HTT_STATS_TX_TQM_LIST_MPDU_TAG: + dp_print_tx_tqm_list_mpdu_stats_tlv_v(tag_buf); + break; + + case HTT_STATS_TX_TQM_LIST_MPDU_CNT_TAG: + dp_print_tx_tqm_list_mpdu_cnt_tlv_v(tag_buf); + break; + + case HTT_STATS_TX_TQM_CMN_TAG: + dp_print_tx_tqm_cmn_stats_tlv(tag_buf); + break; + + case HTT_STATS_TX_TQM_PDEV_TAG: + dp_print_tx_tqm_pdev_stats_tlv_v(tag_buf); + break; + + case HTT_STATS_TX_TQM_CMDQ_STATUS_TAG: + dp_print_tx_tqm_cmdq_status_tlv(tag_buf); + break; + + case HTT_STATS_TX_DE_EAPOL_PACKETS_TAG: + dp_print_tx_de_eapol_packets_stats_tlv(tag_buf); + break; + + case HTT_STATS_TX_DE_CLASSIFY_FAILED_TAG: + dp_print_tx_de_classify_failed_stats_tlv(tag_buf); + break; + + case HTT_STATS_TX_DE_CLASSIFY_STATS_TAG: + dp_print_tx_de_classify_stats_tlv(tag_buf); + break; + + case HTT_STATS_TX_DE_CLASSIFY_STATUS_TAG: + dp_print_tx_de_classify_status_stats_tlv(tag_buf); + break; + + case HTT_STATS_TX_DE_ENQUEUE_PACKETS_TAG: + dp_print_tx_de_enqueue_packets_stats_tlv(tag_buf); + break; + + case HTT_STATS_TX_DE_ENQUEUE_DISCARD_TAG: + dp_print_tx_de_enqueue_discard_stats_tlv(tag_buf); + break; + + case HTT_STATS_TX_DE_CMN_TAG: + dp_print_tx_de_cmn_stats_tlv(tag_buf); + break; + + case HTT_STATS_RING_IF_TAG: + dp_print_ring_if_stats_tlv(tag_buf); + break; + + case HTT_STATS_TX_PDEV_MU_MIMO_STATS_TAG: + dp_print_tx_pdev_mu_mimo_sch_stats_tlv(tag_buf); + break; + + case HTT_STATS_SFM_CMN_TAG: + dp_print_sfm_cmn_tlv(tag_buf); + break; + + case HTT_STATS_SRING_STATS_TAG: + dp_print_sring_stats_tlv(tag_buf); + break; + + case HTT_STATS_RX_PDEV_FW_STATS_TAG: + dp_print_rx_pdev_fw_stats_tlv(tag_buf); + break; + + case HTT_STATS_RX_PDEV_FW_RING_MPDU_ERR_TAG: + dp_print_rx_pdev_fw_ring_mpdu_err_tlv_v(tag_buf); + break; + + case HTT_STATS_RX_PDEV_FW_MPDU_DROP_TAG: + dp_print_rx_pdev_fw_mpdu_drop_tlv_v(tag_buf); + break; + + case HTT_STATS_RX_SOC_FW_STATS_TAG: + dp_print_rx_soc_fw_stats_tlv(tag_buf); + break; + + case HTT_STATS_RX_SOC_FW_REFILL_RING_EMPTY_TAG: + dp_print_rx_soc_fw_refill_ring_empty_tlv_v(tag_buf); + break; + + case HTT_STATS_RX_SOC_FW_REFILL_RING_NUM_REFILL_TAG: + dp_print_rx_soc_fw_refill_ring_num_refill_tlv_v( + tag_buf); + break; + + case HTT_STATS_TX_PDEV_RATE_STATS_TAG: + dp_print_tx_pdev_rate_stats_tlv(tag_buf); + break; + + case HTT_STATS_RX_PDEV_RATE_STATS_TAG: + dp_print_rx_pdev_rate_stats_tlv(tag_buf); + break; + + case HTT_STATS_TX_PDEV_SCHEDULER_TXQ_STATS_TAG: + dp_print_tx_pdev_stats_sched_per_txq_tlv(tag_buf); + break; + + case HTT_STATS_TX_SCHED_CMN_TAG: + dp_print_stats_tx_sched_cmn_tlv(tag_buf); + break; + + case HTT_STATS_TX_PDEV_MUMIMO_MPDU_STATS_TAG: + dp_print_tx_pdev_mu_mimo_mpdu_stats_tlv(tag_buf); + break; + + case HTT_STATS_SCHED_TXQ_CMD_POSTED_TAG: + dp_print_sched_txq_cmd_posted_tlv_v(tag_buf); + break; + + case HTT_STATS_RING_IF_CMN_TAG: + dp_print_ring_if_cmn_tlv(tag_buf); + break; + + case HTT_STATS_SFM_CLIENT_USER_TAG: + dp_print_sfm_client_user_tlv_v(tag_buf); + break; + + case HTT_STATS_SFM_CLIENT_TAG: + dp_print_sfm_client_tlv(tag_buf); + break; + + case HTT_STATS_TX_TQM_ERROR_STATS_TAG: + dp_print_tx_tqm_error_stats_tlv(tag_buf); + break; + + case HTT_STATS_SCHED_TXQ_CMD_REAPED_TAG: + dp_print_sched_txq_cmd_reaped_tlv_v(tag_buf); + break; + + case HTT_STATS_SRING_CMN_TAG: + dp_print_sring_cmn_tlv(tag_buf); + break; + + case HTT_STATS_TX_SELFGEN_AC_ERR_STATS_TAG: + dp_print_tx_selfgen_ac_err_stats_tlv(tag_buf); + break; + + case HTT_STATS_TX_SELFGEN_CMN_STATS_TAG: + dp_print_tx_selfgen_cmn_stats_tlv(tag_buf); + break; + + case HTT_STATS_TX_SELFGEN_AC_STATS_TAG: + dp_print_tx_selfgen_ac_stats_tlv(tag_buf); + break; + + case HTT_STATS_TX_SELFGEN_AX_STATS_TAG: + dp_print_tx_selfgen_ax_stats_tlv(tag_buf); + break; + + case HTT_STATS_TX_SELFGEN_AX_ERR_STATS_TAG: + dp_print_tx_selfgen_ax_err_stats_tlv(tag_buf); + break; + + case HTT_STATS_TX_HWQ_MUMIMO_SCH_STATS_TAG: + dp_print_tx_hwq_mu_mimo_sch_stats_tlv(tag_buf); + break; + + case HTT_STATS_TX_HWQ_MUMIMO_MPDU_STATS_TAG: + dp_print_tx_hwq_mu_mimo_mpdu_stats_tlv(tag_buf); + break; + + case HTT_STATS_TX_HWQ_MUMIMO_CMN_STATS_TAG: + dp_print_tx_hwq_mu_mimo_cmn_stats_tlv(tag_buf); + break; + + case HTT_STATS_HW_INTR_MISC_TAG: + dp_print_hw_stats_intr_misc_tlv(tag_buf); + break; + + case HTT_STATS_HW_WD_TIMEOUT_TAG: + dp_print_hw_stats_wd_timeout_tlv(tag_buf); + break; + + case HTT_STATS_HW_PDEV_ERRS_TAG: + dp_print_hw_stats_pdev_errs_tlv(tag_buf); + break; + + case HTT_STATS_COUNTER_NAME_TAG: + dp_print_counter_tlv(tag_buf); + break; + + case HTT_STATS_TX_TID_DETAILS_TAG: + dp_print_tx_tid_stats_tlv(tag_buf); + break; + +#ifdef CONFIG_WIN + case HTT_STATS_TX_TID_DETAILS_V1_TAG: + dp_print_tx_tid_stats_v1_tlv(tag_buf); + break; +#endif + + case HTT_STATS_RX_TID_DETAILS_TAG: + dp_print_rx_tid_stats_tlv(tag_buf); + break; + + case HTT_STATS_PEER_STATS_CMN_TAG: + dp_print_peer_stats_cmn_tlv(tag_buf); + break; + + case HTT_STATS_PEER_DETAILS_TAG: + dp_print_peer_details_tlv(tag_buf); + break; + + case HTT_STATS_PEER_MSDU_FLOWQ_TAG: + dp_print_msdu_flow_stats_tlv(tag_buf); + break; + + case HTT_STATS_PEER_TX_RATE_STATS_TAG: + dp_print_tx_peer_rate_stats_tlv(tag_buf); + break; + + case HTT_STATS_PEER_RX_RATE_STATS_TAG: + dp_print_rx_peer_rate_stats_tlv(tag_buf); + break; + + case HTT_STATS_TX_DE_COMPL_STATS_TAG: + dp_print_tx_de_compl_stats_tlv(tag_buf); + break; + + case HTT_STATS_RX_REFILL_RXDMA_ERR_TAG: + dp_print_rx_soc_fw_refill_ring_num_rxdma_err_tlv(tag_buf); + break; + + case HTT_STATS_RX_REFILL_REO_ERR_TAG: + dp_print_rx_soc_fw_refill_ring_num_reo_err_tlv(tag_buf); + break; + + case HTT_STATS_RX_REO_RESOURCE_STATS_TAG: + dp_print_rx_reo_debug_stats_tlv(tag_buf); + break; + + case HTT_STATS_RX_PDEV_FW_STATS_PHY_ERR_TAG: + dp_print_rx_pdev_fw_stats_phy_err_tlv(tag_buf); + break; + + default: + break; + } +} + +/* + * dp_htt_stats_copy_tag: function to select the tag type and + * copy the corresponding tag structure + * @pdev: DP_PDEV handle + * @tag_type: tag type that is to be printed + * @tag_buf: pointer to the tag structure + * + * return: void + */ +void dp_htt_stats_copy_tag(struct dp_pdev *pdev, uint8_t tag_type, uint32_t *tag_buf) +{ + void *dest_ptr = NULL; + uint32_t size = 0; + + switch (tag_type) { + case HTT_STATS_TX_PDEV_CMN_TAG: + dest_ptr = &pdev->stats.htt_tx_pdev_stats.cmn_tlv; + size = sizeof(htt_tx_pdev_stats_cmn_tlv); + break; + case HTT_STATS_TX_PDEV_UNDERRUN_TAG: + dest_ptr = &pdev->stats.htt_tx_pdev_stats.underrun_tlv; + size = sizeof(htt_tx_pdev_stats_urrn_tlv_v); + break; + case HTT_STATS_TX_PDEV_SIFS_TAG: + dest_ptr = &pdev->stats.htt_tx_pdev_stats.sifs_tlv; + size = sizeof(htt_tx_pdev_stats_sifs_tlv_v); + break; + case HTT_STATS_TX_PDEV_FLUSH_TAG: + dest_ptr = &pdev->stats.htt_tx_pdev_stats.flush_tlv; + size = sizeof(htt_tx_pdev_stats_flush_tlv_v); + break; + case HTT_STATS_TX_PDEV_PHY_ERR_TAG: + dest_ptr = &pdev->stats.htt_tx_pdev_stats.phy_err_tlv; + size = sizeof(htt_tx_pdev_stats_phy_err_tlv_v); + break; + case HTT_STATS_RX_PDEV_FW_STATS_TAG: + dest_ptr = &pdev->stats.htt_rx_pdev_stats.fw_stats_tlv; + size = sizeof(htt_rx_pdev_fw_stats_tlv); + break; + case HTT_STATS_RX_SOC_FW_STATS_TAG: + dest_ptr = &pdev->stats.htt_rx_pdev_stats.soc_stats.fw_tlv; + size = sizeof(htt_rx_soc_fw_stats_tlv); + break; + case HTT_STATS_RX_SOC_FW_REFILL_RING_EMPTY_TAG: + dest_ptr = &pdev->stats.htt_rx_pdev_stats.soc_stats.fw_refill_ring_empty_tlv; + size = sizeof(htt_rx_soc_fw_refill_ring_empty_tlv_v); + break; + case HTT_STATS_RX_SOC_FW_REFILL_RING_NUM_REFILL_TAG: + dest_ptr = &pdev->stats.htt_rx_pdev_stats.soc_stats.fw_refill_ring_num_refill_tlv; + size = sizeof(htt_rx_soc_fw_refill_ring_num_refill_tlv_v); + break; + case HTT_STATS_RX_PDEV_FW_RING_MPDU_ERR_TAG: + dest_ptr = &pdev->stats.htt_rx_pdev_stats.fw_ring_mpdu_err_tlv; + size = sizeof(htt_rx_pdev_fw_ring_mpdu_err_tlv_v); + break; + case HTT_STATS_RX_PDEV_FW_MPDU_DROP_TAG: + dest_ptr = &pdev->stats.htt_rx_pdev_stats.fw_ring_mpdu_drop; + size = sizeof(htt_rx_pdev_fw_mpdu_drop_tlv_v); + break; + default: + break; + } + + if (dest_ptr) + qdf_mem_copy(dest_ptr, tag_buf, size); +} diff --git a/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_tx.c b/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_tx.c new file mode 100644 index 0000000000000000000000000000000000000000..572b56a3bd41a3f5c2df6cf87c1df9ceb1c3902d --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_tx.c @@ -0,0 +1,3658 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "htt.h" +#include "dp_tx.h" +#include "dp_tx_desc.h" +#include "dp_peer.h" +#include "dp_types.h" +#include "hal_tx.h" +#include "qdf_mem.h" +#include "qdf_nbuf.h" +#include "qdf_net_types.h" +#include +#ifdef MESH_MODE_SUPPORT +#include "if_meta_hdr.h" +#endif + +#define DP_TX_QUEUE_MASK 0x3 + +/* TODO Add support in TSO */ +#define DP_DESC_NUM_FRAG(x) 0 + +/* disable TQM_BYPASS */ +#define TQM_BYPASS_WAR 0 + +/* invalid peer id for reinject*/ +#define DP_INVALID_PEER 0XFFFE + +/*mapping between hal encrypt type and cdp_sec_type*/ +#define MAX_CDP_SEC_TYPE 12 +static const uint8_t sec_type_map[MAX_CDP_SEC_TYPE] = { + HAL_TX_ENCRYPT_TYPE_NO_CIPHER, + HAL_TX_ENCRYPT_TYPE_WEP_128, + HAL_TX_ENCRYPT_TYPE_WEP_104, + HAL_TX_ENCRYPT_TYPE_WEP_40, + HAL_TX_ENCRYPT_TYPE_TKIP_WITH_MIC, + HAL_TX_ENCRYPT_TYPE_TKIP_NO_MIC, + HAL_TX_ENCRYPT_TYPE_AES_CCMP_128, + HAL_TX_ENCRYPT_TYPE_WAPI, + HAL_TX_ENCRYPT_TYPE_AES_CCMP_256, + HAL_TX_ENCRYPT_TYPE_AES_GCMP_128, + HAL_TX_ENCRYPT_TYPE_AES_GCMP_256, + HAL_TX_ENCRYPT_TYPE_WAPI_GCM_SM4}; + +/** + * dp_tx_get_queue() - Returns Tx queue IDs to be used for this Tx frame + * @vdev: DP Virtual device handle + * @nbuf: Buffer pointer + * @queue: queue ids container for nbuf + * + * TX packet queue has 2 instances, software descriptors id and dma ring id + * Based on tx feature and hardware configuration queue id combination could be + * different. + * For example - + * With XPS enabled,all TX descriptor pools and dma ring are assigned per cpu id + * With no XPS,lock based resource protection, Descriptor pool ids are different + * for each vdev, dma ring id will be same as single pdev id + * + * Return: None + */ +#ifdef QCA_OL_TX_MULTIQ_SUPPORT +static inline void dp_tx_get_queue(struct dp_vdev *vdev, + qdf_nbuf_t nbuf, struct dp_tx_queue *queue) +{ + uint16_t queue_offset = qdf_nbuf_get_queue_mapping(nbuf) & DP_TX_QUEUE_MASK; + queue->desc_pool_id = queue_offset; + queue->ring_id = vdev->pdev->soc->tx_ring_map[queue_offset]; + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "%s, pool_id:%d ring_id: %d", + __func__, queue->desc_pool_id, queue->ring_id); + + return; +} +#else /* QCA_OL_TX_MULTIQ_SUPPORT */ +static inline void dp_tx_get_queue(struct dp_vdev *vdev, + qdf_nbuf_t nbuf, struct dp_tx_queue *queue) +{ + /* get flow id */ + queue->desc_pool_id = DP_TX_GET_DESC_POOL_ID(vdev); + queue->ring_id = DP_TX_GET_RING_ID(vdev); + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "%s, pool_id:%d ring_id: %d", + __func__, queue->desc_pool_id, queue->ring_id); + + return; +} +#endif + +#if defined(FEATURE_TSO) +/** + * dp_tx_tso_desc_release() - Release the tso segment + * after unmapping all the fragments + * + * @pdev - physical device handle + * @tx_desc - Tx software descriptor + */ +static void dp_tx_tso_desc_release(struct dp_soc *soc, + struct dp_tx_desc_s *tx_desc) +{ + TSO_DEBUG("%s: Free the tso descriptor", __func__); + if (qdf_unlikely(tx_desc->tso_desc == NULL)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "%s %d TSO desc is NULL!", + __func__, __LINE__); + qdf_assert(0); + } else if (qdf_unlikely(tx_desc->tso_num_desc == NULL)) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "%s %d TSO common info is NULL!", + __func__, __LINE__); + qdf_assert(0); + } else { + struct qdf_tso_num_seg_elem_t *tso_num_desc = + (struct qdf_tso_num_seg_elem_t *) tx_desc->tso_num_desc; + + if (tso_num_desc->num_seg.tso_cmn_num_seg > 1) { + tso_num_desc->num_seg.tso_cmn_num_seg--; + qdf_nbuf_unmap_tso_segment(soc->osdev, + tx_desc->tso_desc, false); + } else { + tso_num_desc->num_seg.tso_cmn_num_seg--; + qdf_assert(tso_num_desc->num_seg.tso_cmn_num_seg == 0); + qdf_nbuf_unmap_tso_segment(soc->osdev, + tx_desc->tso_desc, true); + dp_tso_num_seg_free(soc, tx_desc->pool_id, + tx_desc->tso_num_desc); + tx_desc->tso_num_desc = NULL; + } + dp_tx_tso_desc_free(soc, + tx_desc->pool_id, tx_desc->tso_desc); + tx_desc->tso_desc = NULL; + } +} +#else +static void dp_tx_tso_desc_release(struct dp_soc *soc, + struct dp_tx_desc_s *tx_desc) +{ + return; +} +#endif +/** + * dp_tx_desc_release() - Release Tx Descriptor + * @tx_desc : Tx Descriptor + * @desc_pool_id: Descriptor Pool ID + * + * Deallocate all resources attached to Tx descriptor and free the Tx + * descriptor. + * + * Return: + */ +static void +dp_tx_desc_release(struct dp_tx_desc_s *tx_desc, uint8_t desc_pool_id) +{ + struct dp_pdev *pdev = tx_desc->pdev; + struct dp_soc *soc; + uint8_t comp_status = 0; + + qdf_assert(pdev); + + soc = pdev->soc; + + if (tx_desc->frm_type == dp_tx_frm_tso) + dp_tx_tso_desc_release(soc, tx_desc); + + if (tx_desc->flags & DP_TX_DESC_FLAG_FRAG) + dp_tx_ext_desc_free(soc, tx_desc->msdu_ext_desc, desc_pool_id); + + if (tx_desc->flags & DP_TX_DESC_FLAG_ME) + dp_tx_me_free_buf(tx_desc->pdev, tx_desc->me_buffer); + + qdf_atomic_dec(&pdev->num_tx_outstanding); + + if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW) + qdf_atomic_dec(&pdev->num_tx_exception); + + if (HAL_TX_COMP_RELEASE_SOURCE_TQM == + hal_tx_comp_get_buffer_source(&tx_desc->comp)) + comp_status = hal_tx_comp_get_release_reason(&tx_desc->comp); + else + comp_status = HAL_TX_COMP_RELEASE_REASON_FW; + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "Tx Completion Release desc %d status %d outstanding %d", + tx_desc->id, comp_status, + qdf_atomic_read(&pdev->num_tx_outstanding)); + + dp_tx_desc_free(soc, tx_desc, desc_pool_id); + return; +} + +/** + * dp_tx_htt_metadata_prepare() - Prepare HTT metadata for special frames + * @vdev: DP vdev Handle + * @nbuf: skb + * + * Prepares and fills HTT metadata in the frame pre-header for special frames + * that should be transmitted using varying transmit parameters. + * There are 2 VDEV modes that currently needs this special metadata - + * 1) Mesh Mode + * 2) DSRC Mode + * + * Return: HTT metadata size + * + */ +static uint8_t dp_tx_prepare_htt_metadata(struct dp_vdev *vdev, qdf_nbuf_t nbuf, + uint32_t *meta_data) +{ + struct htt_tx_msdu_desc_ext2_t *desc_ext = + (struct htt_tx_msdu_desc_ext2_t *) meta_data; + + uint8_t htt_desc_size; + + /* Size rounded of multiple of 8 bytes */ + uint8_t htt_desc_size_aligned; + + uint8_t *hdr = NULL; + + /* + * Metadata - HTT MSDU Extension header + */ + htt_desc_size = sizeof(struct htt_tx_msdu_desc_ext2_t); + htt_desc_size_aligned = (htt_desc_size + 7) & ~0x7; + + if (vdev->mesh_vdev) { + + /* Fill and add HTT metaheader */ + hdr = qdf_nbuf_push_head(nbuf, htt_desc_size_aligned); + if (hdr == NULL) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "Error in filling HTT metadata\n"); + + return 0; + } + qdf_mem_copy(hdr, desc_ext, htt_desc_size); + + } else if (vdev->opmode == wlan_op_mode_ocb) { + /* Todo - Add support for DSRC */ + } + + return htt_desc_size_aligned; +} + +/** + * dp_tx_prepare_tso_ext_desc() - Prepare MSDU extension descriptor for TSO + * @tso_seg: TSO segment to process + * @ext_desc: Pointer to MSDU extension descriptor + * + * Return: void + */ +#if defined(FEATURE_TSO) +static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg, + void *ext_desc) +{ + uint8_t num_frag; + uint32_t tso_flags; + + /* + * Set tso_en, tcp_flags(NS, CWR, ECE, URG, ACK, PSH, RST, SYN, FIN), + * tcp_flag_mask + * + * Checksum enable flags are set in TCL descriptor and not in Extension + * Descriptor (H/W ignores checksum_en flags in MSDU ext descriptor) + */ + tso_flags = *(uint32_t *) &tso_seg->tso_flags; + + hal_tx_ext_desc_set_tso_flags(ext_desc, tso_flags); + + hal_tx_ext_desc_set_msdu_length(ext_desc, tso_seg->tso_flags.l2_len, + tso_seg->tso_flags.ip_len); + + hal_tx_ext_desc_set_tcp_seq(ext_desc, tso_seg->tso_flags.tcp_seq_num); + hal_tx_ext_desc_set_ip_id(ext_desc, tso_seg->tso_flags.ip_id); + + + for (num_frag = 0; num_frag < tso_seg->num_frags; num_frag++) { + uint32_t lo = 0; + uint32_t hi = 0; + + qdf_dmaaddr_to_32s( + tso_seg->tso_frags[num_frag].paddr, &lo, &hi); + hal_tx_ext_desc_set_buffer(ext_desc, num_frag, lo, hi, + tso_seg->tso_frags[num_frag].length); + } + + return; +} +#else +static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg, + void *ext_desc) +{ + return; +} +#endif + +#if defined(FEATURE_TSO) +/** + * dp_tx_free_tso_seg() - Loop through the tso segments + * allocated and free them + * + * @soc: soc handle + * @free_seg: list of tso segments + * @msdu_info: msdu descriptor + * + * Return - void + */ +static void dp_tx_free_tso_seg(struct dp_soc *soc, + struct qdf_tso_seg_elem_t *free_seg, + struct dp_tx_msdu_info_s *msdu_info) +{ + struct qdf_tso_seg_elem_t *next_seg; + + while (free_seg) { + next_seg = free_seg->next; + dp_tx_tso_desc_free(soc, + msdu_info->tx_queue.desc_pool_id, + free_seg); + free_seg = next_seg; + } +} + +/** + * dp_tx_free_tso_num_seg() - Loop through the tso num segments + * allocated and free them + * + * @soc: soc handle + * @free_seg: list of tso segments + * @msdu_info: msdu descriptor + * Return - void + */ +static void dp_tx_free_tso_num_seg(struct dp_soc *soc, + struct qdf_tso_num_seg_elem_t *free_seg, + struct dp_tx_msdu_info_s *msdu_info) +{ + struct qdf_tso_num_seg_elem_t *next_seg; + + while (free_seg) { + next_seg = free_seg->next; + dp_tso_num_seg_free(soc, + msdu_info->tx_queue.desc_pool_id, + free_seg); + free_seg = next_seg; + } +} + +/** + * dp_tx_prepare_tso() - Given a jumbo msdu, prepare the TSO info + * @vdev: virtual device handle + * @msdu: network buffer + * @msdu_info: meta data associated with the msdu + * + * Return: QDF_STATUS_SUCCESS success + */ +static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev, + qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info) +{ + struct qdf_tso_seg_elem_t *tso_seg; + int num_seg = qdf_nbuf_get_tso_num_seg(msdu); + struct dp_soc *soc = vdev->pdev->soc; + struct qdf_tso_info_t *tso_info; + struct qdf_tso_num_seg_elem_t *tso_num_seg; + + tso_info = &msdu_info->u.tso_info; + tso_info->curr_seg = NULL; + tso_info->tso_seg_list = NULL; + tso_info->num_segs = num_seg; + msdu_info->frm_type = dp_tx_frm_tso; + tso_info->tso_num_seg_list = NULL; + + TSO_DEBUG(" %s: num_seg: %d", __func__, num_seg); + + while (num_seg) { + tso_seg = dp_tx_tso_desc_alloc( + soc, msdu_info->tx_queue.desc_pool_id); + if (tso_seg) { + tso_seg->next = tso_info->tso_seg_list; + tso_info->tso_seg_list = tso_seg; + num_seg--; + } else { + struct qdf_tso_seg_elem_t *free_seg = + tso_info->tso_seg_list; + + dp_tx_free_tso_seg(soc, free_seg, msdu_info); + + return QDF_STATUS_E_NOMEM; + } + } + + TSO_DEBUG(" %s: num_seg: %d", __func__, num_seg); + + tso_num_seg = dp_tso_num_seg_alloc(soc, + msdu_info->tx_queue.desc_pool_id); + + if (tso_num_seg) { + tso_num_seg->next = tso_info->tso_num_seg_list; + tso_info->tso_num_seg_list = tso_num_seg; + } else { + /* Bug: free tso_num_seg and tso_seg */ + /* Free the already allocated num of segments */ + struct qdf_tso_seg_elem_t *free_seg = + tso_info->tso_seg_list; + + TSO_DEBUG(" %s: Failed alloc - Number of segs for a TSO packet", + __func__); + dp_tx_free_tso_seg(soc, free_seg, msdu_info); + + return QDF_STATUS_E_NOMEM; + } + + msdu_info->num_seg = + qdf_nbuf_get_tso_info(soc->osdev, msdu, tso_info); + + TSO_DEBUG(" %s: msdu_info->num_seg: %d", __func__, + msdu_info->num_seg); + + if (!(msdu_info->num_seg)) { + dp_tx_free_tso_seg(soc, tso_info->tso_seg_list, msdu_info); + dp_tx_free_tso_num_seg(soc, tso_info->tso_num_seg_list, + msdu_info); + return QDF_STATUS_E_INVAL; + } + + tso_info->curr_seg = tso_info->tso_seg_list; + + return QDF_STATUS_SUCCESS; +} +#else +static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev, + qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info) +{ + return QDF_STATUS_E_NOMEM; +} +#endif + +/** + * dp_tx_prepare_ext_desc() - Allocate and prepare MSDU extension descriptor + * @vdev: DP Vdev handle + * @msdu_info: MSDU info to be setup in MSDU extension descriptor + * @desc_pool_id: Descriptor Pool ID + * + * Return: + */ +static +struct dp_tx_ext_desc_elem_s *dp_tx_prepare_ext_desc(struct dp_vdev *vdev, + struct dp_tx_msdu_info_s *msdu_info, uint8_t desc_pool_id) +{ + uint8_t i; + uint8_t cached_ext_desc[HAL_TX_EXT_DESC_WITH_META_DATA]; + struct dp_tx_seg_info_s *seg_info; + struct dp_tx_ext_desc_elem_s *msdu_ext_desc; + struct dp_soc *soc = vdev->pdev->soc; + + /* Allocate an extension descriptor */ + msdu_ext_desc = dp_tx_ext_desc_alloc(soc, desc_pool_id); + qdf_mem_zero(&cached_ext_desc[0], HAL_TX_EXT_DESC_WITH_META_DATA); + + if (!msdu_ext_desc) { + DP_STATS_INC(vdev, tx_i.dropped.desc_na, 1); + return NULL; + } + + if (msdu_info->exception_fw && + qdf_unlikely(vdev->mesh_vdev)) { + qdf_mem_copy(&cached_ext_desc[HAL_TX_EXTENSION_DESC_LEN_BYTES], + &msdu_info->meta_data[0], + sizeof(struct htt_tx_msdu_desc_ext2_t)); + qdf_atomic_inc(&vdev->pdev->num_tx_exception); + } + + switch (msdu_info->frm_type) { + case dp_tx_frm_sg: + case dp_tx_frm_me: + case dp_tx_frm_raw: + seg_info = msdu_info->u.sg_info.curr_seg; + /* Update the buffer pointers in MSDU Extension Descriptor */ + for (i = 0; i < seg_info->frag_cnt; i++) { + hal_tx_ext_desc_set_buffer(&cached_ext_desc[0], i, + seg_info->frags[i].paddr_lo, + seg_info->frags[i].paddr_hi, + seg_info->frags[i].len); + } + + break; + + case dp_tx_frm_tso: + dp_tx_prepare_tso_ext_desc(&msdu_info->u.tso_info.curr_seg->seg, + &cached_ext_desc[0]); + break; + + + default: + break; + } + + QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + cached_ext_desc, HAL_TX_EXT_DESC_WITH_META_DATA); + + hal_tx_ext_desc_sync(&cached_ext_desc[0], + msdu_ext_desc->vaddr); + + return msdu_ext_desc; +} + +/** + * dp_tx_trace_pkt() - Trace TX packet at DP layer + * + * @skb: skb to be traced + * @msdu_id: msdu_id of the packet + * @vdev_id: vdev_id of the packet + * + * Return: None + */ +static void dp_tx_trace_pkt(qdf_nbuf_t skb, uint16_t msdu_id, + uint8_t vdev_id) +{ + QDF_NBUF_CB_TX_PACKET_TRACK(skb) = QDF_NBUF_TX_PKT_DATA_TRACK; + QDF_NBUF_CB_TX_DP_TRACE(skb) = 1; + DPTRACE(qdf_dp_trace_ptr(skb, + QDF_DP_TRACE_LI_DP_TX_PACKET_PTR_RECORD, + QDF_TRACE_DEFAULT_PDEV_ID, + qdf_nbuf_data_addr(skb), + sizeof(qdf_nbuf_data(skb)), + msdu_id, vdev_id)); + + qdf_dp_trace_log_pkt(vdev_id, skb, QDF_TX, QDF_TRACE_DEFAULT_PDEV_ID); + + DPTRACE(qdf_dp_trace_data_pkt(skb, QDF_TRACE_DEFAULT_PDEV_ID, + QDF_DP_TRACE_LI_DP_TX_PACKET_RECORD, + msdu_id, QDF_TX)); +} + +/** + * dp_tx_desc_prepare_single - Allocate and prepare Tx descriptor + * @vdev: DP vdev handle + * @nbuf: skb + * @desc_pool_id: Descriptor pool ID + * @meta_data: Metadata to the fw + * @tx_exc_metadata: Handle that holds exception path metadata + * Allocate and prepare Tx descriptor with msdu information. + * + * Return: Pointer to Tx Descriptor on success, + * NULL on failure + */ +static +struct dp_tx_desc_s *dp_tx_prepare_desc_single(struct dp_vdev *vdev, + qdf_nbuf_t nbuf, uint8_t desc_pool_id, + struct dp_tx_msdu_info_s *msdu_info, + struct cdp_tx_exception_metadata *tx_exc_metadata) +{ + uint8_t align_pad; + uint8_t is_exception = 0; + uint8_t htt_hdr_size; + struct ether_header *eh; + struct dp_tx_desc_s *tx_desc; + struct dp_pdev *pdev = vdev->pdev; + struct dp_soc *soc = pdev->soc; + + /* Allocate software Tx descriptor */ + tx_desc = dp_tx_desc_alloc(soc, desc_pool_id); + if (qdf_unlikely(!tx_desc)) { + DP_STATS_INC(vdev, tx_i.dropped.desc_na, 1); + return NULL; + } + + /* Flow control/Congestion Control counters */ + qdf_atomic_inc(&pdev->num_tx_outstanding); + + /* Initialize the SW tx descriptor */ + tx_desc->nbuf = nbuf; + tx_desc->frm_type = dp_tx_frm_std; + tx_desc->tx_encap_type = (tx_exc_metadata ? + tx_exc_metadata->tx_encap_type : vdev->tx_encap_type); + tx_desc->vdev = vdev; + tx_desc->pdev = pdev; + tx_desc->msdu_ext_desc = NULL; + tx_desc->pkt_offset = 0; + + dp_tx_trace_pkt(nbuf, tx_desc->id, vdev->vdev_id); + + /* + * For special modes (vdev_type == ocb or mesh), data frames should be + * transmitted using varying transmit parameters (tx spec) which include + * transmit rate, power, priority, channel, channel bandwidth , nss etc. + * These are filled in HTT MSDU descriptor and sent in frame pre-header. + * These frames are sent as exception packets to firmware. + * + * HW requirement is that metadata should always point to a + * 8-byte aligned address. So we add alignment pad to start of buffer. + * HTT Metadata should be ensured to be multiple of 8-bytes, + * to get 8-byte aligned start address along with align_pad added + * + * |-----------------------------| + * | | + * |-----------------------------| <-----Buffer Pointer Address given + * | | ^ in HW descriptor (aligned) + * | HTT Metadata | | + * | | | + * | | | Packet Offset given in descriptor + * | | | + * |-----------------------------| | + * | Alignment Pad | v + * |-----------------------------| <----- Actual buffer start address + * | SKB Data | (Unaligned) + * | | + * | | + * | | + * | | + * | | + * |-----------------------------| + */ + if (qdf_unlikely((msdu_info->exception_fw)) || + (vdev->opmode == wlan_op_mode_ocb)) { + align_pad = ((unsigned long) qdf_nbuf_data(nbuf)) & 0x7; + if (qdf_nbuf_push_head(nbuf, align_pad) == NULL) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "qdf_nbuf_push_head failed\n"); + goto failure; + } + + htt_hdr_size = dp_tx_prepare_htt_metadata(vdev, nbuf, + msdu_info->meta_data); + if (htt_hdr_size == 0) + goto failure; + tx_desc->pkt_offset = align_pad + htt_hdr_size; + tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW; + is_exception = 1; + } + + if (qdf_unlikely(QDF_STATUS_SUCCESS != + qdf_nbuf_map(soc->osdev, nbuf, + QDF_DMA_TO_DEVICE))) { + /* Handle failure */ + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "qdf_nbuf_map failed\n"); + DP_STATS_INC(vdev, tx_i.dropped.dma_error, 1); + goto failure; + } + + if (qdf_unlikely(vdev->nawds_enabled)) { + eh = (struct ether_header *) qdf_nbuf_data(nbuf); + if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost)) { + tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW; + is_exception = 1; + } + } + +#if !TQM_BYPASS_WAR + if (is_exception || tx_exc_metadata) +#endif + { + /* Temporary WAR due to TQM VP issues */ + tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW; + qdf_atomic_inc(&pdev->num_tx_exception); + } + + return tx_desc; + +failure: + dp_tx_desc_release(tx_desc, desc_pool_id); + return NULL; +} + +/** + * dp_tx_prepare_desc() - Allocate and prepare Tx descriptor for multisegment frame + * @vdev: DP vdev handle + * @nbuf: skb + * @msdu_info: Info to be setup in MSDU descriptor and MSDU extension descriptor + * @desc_pool_id : Descriptor Pool ID + * + * Allocate and prepare Tx descriptor with msdu and fragment descritor + * information. For frames wth fragments, allocate and prepare + * an MSDU extension descriptor + * + * Return: Pointer to Tx Descriptor on success, + * NULL on failure + */ +static struct dp_tx_desc_s *dp_tx_prepare_desc(struct dp_vdev *vdev, + qdf_nbuf_t nbuf, struct dp_tx_msdu_info_s *msdu_info, + uint8_t desc_pool_id) +{ + struct dp_tx_desc_s *tx_desc; + struct dp_tx_ext_desc_elem_s *msdu_ext_desc; + struct dp_pdev *pdev = vdev->pdev; + struct dp_soc *soc = pdev->soc; + + /* Allocate software Tx descriptor */ + tx_desc = dp_tx_desc_alloc(soc, desc_pool_id); + if (!tx_desc) { + DP_STATS_INC(vdev, tx_i.dropped.desc_na, 1); + return NULL; + } + + /* Flow control/Congestion Control counters */ + qdf_atomic_inc(&pdev->num_tx_outstanding); + + /* Initialize the SW tx descriptor */ + tx_desc->nbuf = nbuf; + tx_desc->frm_type = msdu_info->frm_type; + tx_desc->tx_encap_type = vdev->tx_encap_type; + tx_desc->vdev = vdev; + tx_desc->pdev = pdev; + tx_desc->pkt_offset = 0; + tx_desc->tso_desc = msdu_info->u.tso_info.curr_seg; + tx_desc->tso_num_desc = msdu_info->u.tso_info.tso_num_seg_list; + + dp_tx_trace_pkt(nbuf, tx_desc->id, vdev->vdev_id); + + /* Handle scattered frames - TSO/SG/ME */ + /* Allocate and prepare an extension descriptor for scattered frames */ + msdu_ext_desc = dp_tx_prepare_ext_desc(vdev, msdu_info, desc_pool_id); + if (!msdu_ext_desc) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, + "%s Tx Extension Descriptor Alloc Fail\n", + __func__); + goto failure; + } + +#if TQM_BYPASS_WAR + /* Temporary WAR due to TQM VP issues */ + tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW; + qdf_atomic_inc(&pdev->num_tx_exception); +#endif + if (qdf_unlikely(msdu_info->exception_fw)) + tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW; + + tx_desc->msdu_ext_desc = msdu_ext_desc; + tx_desc->flags |= DP_TX_DESC_FLAG_FRAG; + + return tx_desc; +failure: + dp_tx_desc_release(tx_desc, desc_pool_id); + return NULL; +} + +/** + * dp_tx_prepare_raw() - Prepare RAW packet TX + * @vdev: DP vdev handle + * @nbuf: buffer pointer + * @seg_info: Pointer to Segment info Descriptor to be prepared + * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension + * descriptor + * + * Return: + */ +static qdf_nbuf_t dp_tx_prepare_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf, + struct dp_tx_seg_info_s *seg_info, struct dp_tx_msdu_info_s *msdu_info) +{ + qdf_nbuf_t curr_nbuf = NULL; + uint16_t total_len = 0; + qdf_dma_addr_t paddr; + int32_t i; + int32_t mapped_buf_num = 0; + + struct dp_tx_sg_info_s *sg_info = &msdu_info->u.sg_info; + qdf_dot3_qosframe_t *qos_wh = (qdf_dot3_qosframe_t *) nbuf->data; + + DP_STATS_INC_PKT(vdev, tx_i.raw.raw_pkt, 1, qdf_nbuf_len(nbuf)); + + /* SWAR for HW: Enable WEP bit in the AMSDU frames for RAW mode */ + if (qos_wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_QOS) + qos_wh->i_fc[1] |= IEEE80211_FC1_WEP; + + for (curr_nbuf = nbuf, i = 0; curr_nbuf; + curr_nbuf = qdf_nbuf_next(curr_nbuf), i++) { + + if (QDF_STATUS_SUCCESS != qdf_nbuf_map(vdev->osdev, curr_nbuf, + QDF_DMA_TO_DEVICE)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "%s dma map error \n", __func__); + DP_STATS_INC(vdev, tx_i.raw.dma_map_error, 1); + mapped_buf_num = i; + goto error; + } + + paddr = qdf_nbuf_get_frag_paddr(curr_nbuf, 0); + seg_info->frags[i].paddr_lo = paddr; + seg_info->frags[i].paddr_hi = ((uint64_t)paddr >> 32); + seg_info->frags[i].len = qdf_nbuf_len(curr_nbuf); + seg_info->frags[i].vaddr = (void *) curr_nbuf; + total_len += qdf_nbuf_len(curr_nbuf); + } + + seg_info->frag_cnt = i; + seg_info->total_len = total_len; + seg_info->next = NULL; + + sg_info->curr_seg = seg_info; + + msdu_info->frm_type = dp_tx_frm_raw; + msdu_info->num_seg = 1; + + return nbuf; + +error: + i = 0; + while (nbuf) { + curr_nbuf = nbuf; + if (i < mapped_buf_num) { + qdf_nbuf_unmap(vdev->osdev, curr_nbuf, QDF_DMA_TO_DEVICE); + i++; + } + nbuf = qdf_nbuf_next(nbuf); + qdf_nbuf_free(curr_nbuf); + } + return NULL; + +} + +/** + * dp_tx_hw_enqueue() - Enqueue to TCL HW for transmit + * @soc: DP Soc Handle + * @vdev: DP vdev handle + * @tx_desc: Tx Descriptor Handle + * @tid: TID from HLOS for overriding default DSCP-TID mapping + * @fw_metadata: Metadata to send to Target Firmware along with frame + * @ring_id: Ring ID of H/W ring to which we enqueue the packet + * @tx_exc_metadata: Handle that holds exception path meta data + * + * Gets the next free TCL HW DMA descriptor and sets up required parameters + * from software Tx descriptor + * + * Return: + */ +static QDF_STATUS dp_tx_hw_enqueue(struct dp_soc *soc, struct dp_vdev *vdev, + struct dp_tx_desc_s *tx_desc, uint8_t tid, + uint16_t fw_metadata, uint8_t ring_id, + struct cdp_tx_exception_metadata + *tx_exc_metadata) +{ + uint8_t type; + uint16_t length; + void *hal_tx_desc, *hal_tx_desc_cached; + qdf_dma_addr_t dma_addr; + uint8_t cached_desc[HAL_TX_DESC_LEN_BYTES]; + + enum cdp_sec_type sec_type = (tx_exc_metadata ? + tx_exc_metadata->sec_type : vdev->sec_type); + + /* Return Buffer Manager ID */ + uint8_t bm_id = ring_id; + void *hal_srng = soc->tcl_data_ring[ring_id].hal_srng; + + hal_tx_desc_cached = (void *) cached_desc; + qdf_mem_zero_outline(hal_tx_desc_cached, HAL_TX_DESC_LEN_BYTES); + + if (tx_desc->flags & DP_TX_DESC_FLAG_FRAG) { + length = HAL_TX_EXT_DESC_WITH_META_DATA; + type = HAL_TX_BUF_TYPE_EXT_DESC; + dma_addr = tx_desc->msdu_ext_desc->paddr; + } else { + length = qdf_nbuf_len(tx_desc->nbuf) - tx_desc->pkt_offset; + type = HAL_TX_BUF_TYPE_BUFFER; + dma_addr = qdf_nbuf_mapped_paddr_get(tx_desc->nbuf); + } + + hal_tx_desc_set_fw_metadata(hal_tx_desc_cached, fw_metadata); + hal_tx_desc_set_buf_addr(hal_tx_desc_cached, + dma_addr , bm_id, tx_desc->id, type); + hal_tx_desc_set_buf_length(hal_tx_desc_cached, length); + hal_tx_desc_set_buf_offset(hal_tx_desc_cached, tx_desc->pkt_offset); + hal_tx_desc_set_encap_type(hal_tx_desc_cached, tx_desc->tx_encap_type); + hal_tx_desc_set_lmac_id(hal_tx_desc_cached, + HAL_TX_DESC_DEFAULT_LMAC_ID); + hal_tx_desc_set_dscp_tid_table_id(hal_tx_desc_cached, + vdev->dscp_tid_map_id); + hal_tx_desc_set_encrypt_type(hal_tx_desc_cached, + sec_type_map[sec_type]); + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "%s length:%d , type = %d, dma_addr %llx, offset %d desc id %u", + __func__, length, type, (uint64_t)dma_addr, + tx_desc->pkt_offset, tx_desc->id); + + if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW) + hal_tx_desc_set_to_fw(hal_tx_desc_cached, 1); + + hal_tx_desc_set_addr_search_flags(hal_tx_desc_cached, + vdev->hal_desc_addr_search_flags); + + /* verify checksum offload configuration*/ + if ((wlan_cfg_get_checksum_offload(soc->wlan_cfg_ctx)) && + ((qdf_nbuf_get_tx_cksum(tx_desc->nbuf) == QDF_NBUF_TX_CKSUM_TCP_UDP) + || qdf_nbuf_is_tso(tx_desc->nbuf))) { + hal_tx_desc_set_l3_checksum_en(hal_tx_desc_cached, 1); + hal_tx_desc_set_l4_checksum_en(hal_tx_desc_cached, 1); + } + + if (tid != HTT_TX_EXT_TID_INVALID) + hal_tx_desc_set_hlos_tid(hal_tx_desc_cached, tid); + + if (tx_desc->flags & DP_TX_DESC_FLAG_MESH) + hal_tx_desc_set_mesh_en(hal_tx_desc_cached, 1); + + + /* Sync cached descriptor with HW */ + hal_tx_desc = hal_srng_src_get_next(soc->hal_soc, hal_srng); + + if (!hal_tx_desc) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "%s TCL ring full ring_id:%d\n", __func__, ring_id); + DP_STATS_INC(soc, tx.tcl_ring_full[ring_id], 1); + DP_STATS_INC(vdev, tx_i.dropped.enqueue_fail, 1); + return QDF_STATUS_E_RESOURCES; + } + + tx_desc->flags |= DP_TX_DESC_FLAG_QUEUED_TX; + + hal_tx_desc_sync(hal_tx_desc_cached, hal_tx_desc); + DP_STATS_INC_PKT(vdev, tx_i.processed, 1, length); + + /* + * If one packet is enqueued in HW, PM usage count needs to be + * incremented by one to prevent future runtime suspend. This + * should be tied with the success of enqueuing. It will be + * decremented after the packet has been sent. + */ + hif_pm_runtime_get_noresume(soc->hif_handle); + + return QDF_STATUS_SUCCESS; +} + + +/** + * dp_cce_classify() - Classify the frame based on CCE rules + * @vdev: DP vdev handle + * @nbuf: skb + * + * Classify frames based on CCE rules + * Return: bool( true if classified, + * else false) + */ +static bool dp_cce_classify(struct dp_vdev *vdev, qdf_nbuf_t nbuf) +{ + struct ether_header *eh = NULL; + uint16_t ether_type; + qdf_llc_t *llcHdr; + qdf_nbuf_t nbuf_clone = NULL; + qdf_dot3_qosframe_t *qos_wh = NULL; + + /* for mesh packets don't do any classification */ + if (qdf_unlikely(vdev->mesh_vdev)) + return false; + + if (qdf_likely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) { + eh = (struct ether_header *) qdf_nbuf_data(nbuf); + ether_type = eh->ether_type; + llcHdr = (qdf_llc_t *)(nbuf->data + + sizeof(struct ether_header)); + } else { + qos_wh = (qdf_dot3_qosframe_t *) nbuf->data; + /* For encrypted packets don't do any classification */ + if (qdf_unlikely(qos_wh->i_fc[1] & IEEE80211_FC1_WEP)) + return false; + + if (qdf_unlikely(qos_wh->i_fc[0] & QDF_IEEE80211_FC0_SUBTYPE_QOS)) { + if (qdf_unlikely( + qos_wh->i_fc[1] & QDF_IEEE80211_FC1_TODS && + qos_wh->i_fc[1] & QDF_IEEE80211_FC1_FROMDS)) { + + ether_type = *(uint16_t *)(nbuf->data + + QDF_IEEE80211_4ADDR_HDR_LEN + + sizeof(qdf_llc_t) + - sizeof(ether_type)); + llcHdr = (qdf_llc_t *)(nbuf->data + + QDF_IEEE80211_4ADDR_HDR_LEN); + } else { + ether_type = *(uint16_t *)(nbuf->data + + QDF_IEEE80211_3ADDR_HDR_LEN + + sizeof(qdf_llc_t) + - sizeof(ether_type)); + llcHdr = (qdf_llc_t *)(nbuf->data + + QDF_IEEE80211_3ADDR_HDR_LEN); + } + + if (qdf_unlikely(DP_FRAME_IS_SNAP(llcHdr) + && (ether_type == + qdf_htons(QDF_NBUF_TRAC_EAPOL_ETH_TYPE)))) { + + DP_STATS_INC(vdev, tx_i.cce_classified_raw, 1); + return true; + } + } + + return false; + } + + if (qdf_unlikely(DP_FRAME_IS_SNAP(llcHdr))) { + ether_type = *(uint16_t *)(nbuf->data + 2*ETHER_ADDR_LEN + + sizeof(*llcHdr)); + nbuf_clone = qdf_nbuf_clone(nbuf); + if (qdf_unlikely(nbuf_clone)) { + qdf_nbuf_pull_head(nbuf_clone, sizeof(*llcHdr)); + + if (ether_type == htons(ETHERTYPE_8021Q)) { + qdf_nbuf_pull_head(nbuf_clone, + sizeof(qdf_net_vlanhdr_t)); + } + } + } else { + if (ether_type == htons(ETHERTYPE_8021Q)) { + nbuf_clone = qdf_nbuf_clone(nbuf); + if (qdf_unlikely(nbuf_clone)) { + qdf_nbuf_pull_head(nbuf_clone, + sizeof(qdf_net_vlanhdr_t)); + } + } + } + + if (qdf_unlikely(nbuf_clone)) + nbuf = nbuf_clone; + + + if (qdf_unlikely(qdf_nbuf_is_ipv4_eapol_pkt(nbuf) + || qdf_nbuf_is_ipv4_arp_pkt(nbuf) + || qdf_nbuf_is_ipv4_wapi_pkt(nbuf) + || qdf_nbuf_is_ipv4_tdls_pkt(nbuf) + || (qdf_nbuf_is_ipv4_pkt(nbuf) + && qdf_nbuf_is_ipv4_dhcp_pkt(nbuf)) + || (qdf_nbuf_is_ipv6_pkt(nbuf) && + qdf_nbuf_is_ipv6_dhcp_pkt(nbuf)))) { + if (qdf_unlikely(nbuf_clone != NULL)) + qdf_nbuf_free(nbuf_clone); + return true; + } + + if (qdf_unlikely(nbuf_clone != NULL)) + qdf_nbuf_free(nbuf_clone); + + return false; +} + +/** + * dp_tx_classify_tid() - Obtain TID to be used for this frame + * @vdev: DP vdev handle + * @nbuf: skb + * + * Extract the DSCP or PCP information from frame and map into TID value. + * Software based TID classification is required when more than 2 DSCP-TID + * mapping tables are needed. + * Hardware supports 2 DSCP-TID mapping tables + * + * Return: void + */ +static void dp_tx_classify_tid(struct dp_vdev *vdev, qdf_nbuf_t nbuf, + struct dp_tx_msdu_info_s *msdu_info) +{ + uint8_t tos = 0, dscp_tid_override = 0; + uint8_t *hdr_ptr, *L3datap; + uint8_t is_mcast = 0; + struct ether_header *eh = NULL; + qdf_ethervlan_header_t *evh = NULL; + uint16_t ether_type; + qdf_llc_t *llcHdr; + struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev; + + DP_TX_TID_OVERRIDE(msdu_info, nbuf); + + if (vdev->dscp_tid_map_id <= 1) + return; + + /* for mesh packets don't do any classification */ + if (qdf_unlikely(vdev->mesh_vdev)) + return; + + if (qdf_likely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) { + eh = (struct ether_header *) nbuf->data; + hdr_ptr = eh->ether_dhost; + L3datap = hdr_ptr + sizeof(struct ether_header); + } else { + qdf_dot3_qosframe_t *qos_wh = + (qdf_dot3_qosframe_t *) nbuf->data; + msdu_info->tid = qos_wh->i_fc[0] & DP_FC0_SUBTYPE_QOS ? + qos_wh->i_qos[0] & DP_QOS_TID : 0; + return; + } + + is_mcast = DP_FRAME_IS_MULTICAST(hdr_ptr); + ether_type = eh->ether_type; + + llcHdr = (qdf_llc_t *)(nbuf->data + sizeof(struct ether_header)); + /* + * Check if packet is dot3 or eth2 type. + */ + if (DP_FRAME_IS_LLC(ether_type) && DP_FRAME_IS_SNAP(llcHdr)) { + ether_type = (uint16_t)*(nbuf->data + 2*ETHER_ADDR_LEN + + sizeof(*llcHdr)); + + if (ether_type == htons(ETHERTYPE_8021Q)) { + L3datap = hdr_ptr + sizeof(qdf_ethervlan_header_t) + + sizeof(*llcHdr); + ether_type = (uint16_t)*(nbuf->data + 2*ETHER_ADDR_LEN + + sizeof(*llcHdr) + + sizeof(qdf_net_vlanhdr_t)); + } else { + L3datap = hdr_ptr + sizeof(struct ether_header) + + sizeof(*llcHdr); + } + } else { + if (ether_type == htons(ETHERTYPE_8021Q)) { + evh = (qdf_ethervlan_header_t *) eh; + ether_type = evh->ether_type; + L3datap = hdr_ptr + sizeof(qdf_ethervlan_header_t); + } + } + + /* + * Find priority from IP TOS DSCP field + */ + if (qdf_nbuf_is_ipv4_pkt(nbuf)) { + qdf_net_iphdr_t *ip = (qdf_net_iphdr_t *) L3datap; + if (qdf_nbuf_is_ipv4_dhcp_pkt(nbuf)) { + /* Only for unicast frames */ + if (!is_mcast) { + /* send it on VO queue */ + msdu_info->tid = DP_VO_TID; + } + } else { + /* + * IP frame: exclude ECN bits 0-1 and map DSCP bits 2-7 + * from TOS byte. + */ + tos = ip->ip_tos; + dscp_tid_override = 1; + + } + } else if (qdf_nbuf_is_ipv6_pkt(nbuf)) { + /* TODO + * use flowlabel + *igmpmld cases to be handled in phase 2 + */ + unsigned long ver_pri_flowlabel; + unsigned long pri; + ver_pri_flowlabel = *(unsigned long *) L3datap; + pri = (ntohl(ver_pri_flowlabel) & IPV6_FLOWINFO_PRIORITY) >> + DP_IPV6_PRIORITY_SHIFT; + tos = pri; + dscp_tid_override = 1; + } else if (qdf_nbuf_is_ipv4_eapol_pkt(nbuf)) + msdu_info->tid = DP_VO_TID; + else if (qdf_nbuf_is_ipv4_arp_pkt(nbuf)) { + /* Only for unicast frames */ + if (!is_mcast) { + /* send ucast arp on VO queue */ + msdu_info->tid = DP_VO_TID; + } + } + + /* + * Assign all MCAST packets to BE + */ + if (qdf_unlikely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) { + if (is_mcast) { + tos = 0; + dscp_tid_override = 1; + } + } + + if (dscp_tid_override == 1) { + tos = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK; + msdu_info->tid = pdev->dscp_tid_map[vdev->dscp_tid_map_id][tos]; + } + return; +} + +#ifdef CONVERGED_TDLS_ENABLE +/** + * dp_tx_update_tdls_flags() - Update descriptor flags for TDLS frame + * @tx_desc: TX descriptor + * + * Return: None + */ +static void dp_tx_update_tdls_flags(struct dp_tx_desc_s *tx_desc) +{ + if (tx_desc->vdev) { + if (tx_desc->vdev->is_tdls_frame) + tx_desc->flags |= DP_TX_DESC_FLAG_TDLS_FRAME; + tx_desc->vdev->is_tdls_frame = false; + } +} + +/** + * dp_non_std_tx_comp_free_buff() - Free the non std tx packet buffer + * @tx_desc: TX descriptor + * @vdev: datapath vdev handle + * + * Return: None + */ +static void dp_non_std_tx_comp_free_buff(struct dp_tx_desc_s *tx_desc, + struct dp_vdev *vdev) +{ + struct hal_tx_completion_status ts = {0}; + qdf_nbuf_t nbuf = tx_desc->nbuf; + + hal_tx_comp_get_status(&tx_desc->comp, &ts); + if (vdev->tx_non_std_data_callback.func) { + qdf_nbuf_set_next(tx_desc->nbuf, NULL); + vdev->tx_non_std_data_callback.func( + vdev->tx_non_std_data_callback.ctxt, + nbuf, ts.status); + return; + } +} +#endif + +/** + * dp_tx_send_msdu_single() - Setup descriptor and enqueue single MSDU to TCL + * @vdev: DP vdev handle + * @nbuf: skb + * @tid: TID from HLOS for overriding default DSCP-TID mapping + * @meta_data: Metadata to the fw + * @tx_q: Tx queue to be used for this Tx frame + * @peer_id: peer_id of the peer in case of NAWDS frames + * @tx_exc_metadata: Handle that holds exception path metadata + * + * Return: NULL on success, + * nbuf when it fails to send + */ +static qdf_nbuf_t dp_tx_send_msdu_single(struct dp_vdev *vdev, qdf_nbuf_t nbuf, + struct dp_tx_msdu_info_s *msdu_info, uint16_t peer_id, + struct cdp_tx_exception_metadata *tx_exc_metadata) +{ + struct dp_pdev *pdev = vdev->pdev; + struct dp_soc *soc = pdev->soc; + struct dp_tx_desc_s *tx_desc; + QDF_STATUS status; + struct dp_tx_queue *tx_q = &(msdu_info->tx_queue); + void *hal_srng = soc->tcl_data_ring[tx_q->ring_id].hal_srng; + uint16_t htt_tcl_metadata = 0; + uint8_t tid = msdu_info->tid; + + /* Setup Tx descriptor for an MSDU, and MSDU extension descriptor */ + tx_desc = dp_tx_prepare_desc_single(vdev, nbuf, tx_q->desc_pool_id, + msdu_info, tx_exc_metadata); + if (!tx_desc) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "%s Tx_desc prepare Fail vdev %pK queue %d\n", + __func__, vdev, tx_q->desc_pool_id); + return nbuf; + } + + if (qdf_unlikely(soc->cce_disable)) { + if (dp_cce_classify(vdev, nbuf) == true) { + DP_STATS_INC(vdev, tx_i.cce_classified, 1); + tid = DP_VO_TID; + tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW; + } + } + + dp_tx_update_tdls_flags(tx_desc); + + if (qdf_unlikely(hal_srng_access_start(soc->hal_soc, hal_srng))) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "%s %d : HAL RING Access Failed -- %pK\n", + __func__, __LINE__, hal_srng); + DP_STATS_INC(vdev, tx_i.dropped.ring_full, 1); + dp_tx_desc_release(tx_desc, tx_q->desc_pool_id); + goto fail_return; + } + + if (qdf_unlikely(peer_id == DP_INVALID_PEER)) { + htt_tcl_metadata = vdev->htt_tcl_metadata; + HTT_TX_TCL_METADATA_HOST_INSPECTED_SET(htt_tcl_metadata, 1); + } else if (qdf_unlikely(peer_id != HTT_INVALID_PEER)) { + HTT_TX_TCL_METADATA_TYPE_SET(htt_tcl_metadata, + HTT_TCL_METADATA_TYPE_PEER_BASED); + HTT_TX_TCL_METADATA_PEER_ID_SET(htt_tcl_metadata, + peer_id); + } else + htt_tcl_metadata = vdev->htt_tcl_metadata; + + + if (msdu_info->exception_fw) { + HTT_TX_TCL_METADATA_VALID_HTT_SET(htt_tcl_metadata, 1); + } + + /* Enqueue the Tx MSDU descriptor to HW for transmit */ + status = dp_tx_hw_enqueue(soc, vdev, tx_desc, tid, + htt_tcl_metadata, tx_q->ring_id, tx_exc_metadata); + + if (status != QDF_STATUS_SUCCESS) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "%s Tx_hw_enqueue Fail tx_desc %pK queue %d\n", + __func__, tx_desc, tx_q->ring_id); + dp_tx_desc_release(tx_desc, tx_q->desc_pool_id); + goto fail_return; + } + + nbuf = NULL; + +fail_return: + if (hif_pm_runtime_get(soc->hif_handle) == 0) { + hal_srng_access_end(soc->hal_soc, hal_srng); + hif_pm_runtime_put(soc->hif_handle); + } else { + hal_srng_access_end_reap(soc->hal_soc, hal_srng); + } + + return nbuf; +} + +/** + * dp_tx_send_msdu_multiple() - Enqueue multiple MSDUs + * @vdev: DP vdev handle + * @nbuf: skb + * @msdu_info: MSDU info to be setup in MSDU extension descriptor + * + * Prepare descriptors for multiple MSDUs (TSO segments) and enqueue to TCL + * + * Return: NULL on success, + * nbuf when it fails to send + */ +#if QDF_LOCK_STATS +static noinline +#else +static +#endif +qdf_nbuf_t dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf, + struct dp_tx_msdu_info_s *msdu_info) +{ + uint8_t i; + struct dp_pdev *pdev = vdev->pdev; + struct dp_soc *soc = pdev->soc; + struct dp_tx_desc_s *tx_desc; + bool is_cce_classified = false; + QDF_STATUS status; + uint16_t htt_tcl_metadata = 0; + + struct dp_tx_queue *tx_q = &msdu_info->tx_queue; + void *hal_srng = soc->tcl_data_ring[tx_q->ring_id].hal_srng; + + if (qdf_unlikely(hal_srng_access_start(soc->hal_soc, hal_srng))) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "%s %d : HAL RING Access Failed -- %pK\n", + __func__, __LINE__, hal_srng); + DP_STATS_INC(vdev, tx_i.dropped.ring_full, 1); + return nbuf; + } + + if (qdf_unlikely(soc->cce_disable)) { + is_cce_classified = dp_cce_classify(vdev, nbuf); + if (is_cce_classified) { + DP_STATS_INC(vdev, tx_i.cce_classified, 1); + msdu_info->tid = DP_VO_TID; + } + } + + if (msdu_info->frm_type == dp_tx_frm_me) + nbuf = msdu_info->u.sg_info.curr_seg->nbuf; + + i = 0; + /* Print statement to track i and num_seg */ + /* + * For each segment (maps to 1 MSDU) , prepare software and hardware + * descriptors using information in msdu_info + */ + while (i < msdu_info->num_seg) { + /* + * Setup Tx descriptor for an MSDU, and MSDU extension + * descriptor + */ + tx_desc = dp_tx_prepare_desc(vdev, nbuf, msdu_info, + tx_q->desc_pool_id); + + if (!tx_desc) { + if (msdu_info->frm_type == dp_tx_frm_me) { + dp_tx_me_free_buf(pdev, + (void *)(msdu_info->u.sg_info + .curr_seg->frags[0].vaddr)); + } + goto done; + } + + if (msdu_info->frm_type == dp_tx_frm_me) { + tx_desc->me_buffer = + msdu_info->u.sg_info.curr_seg->frags[0].vaddr; + tx_desc->flags |= DP_TX_DESC_FLAG_ME; + } + + if (is_cce_classified) + tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW; + + htt_tcl_metadata = vdev->htt_tcl_metadata; + if (msdu_info->exception_fw) { + HTT_TX_TCL_METADATA_VALID_HTT_SET(htt_tcl_metadata, 1); + } + + /* + * Enqueue the Tx MSDU descriptor to HW for transmit + */ + status = dp_tx_hw_enqueue(soc, vdev, tx_desc, msdu_info->tid, + htt_tcl_metadata, tx_q->ring_id, NULL); + + if (status != QDF_STATUS_SUCCESS) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "%s Tx_hw_enqueue Fail tx_desc %pK queue %d\n", + __func__, tx_desc, tx_q->ring_id); + + if (tx_desc->flags & DP_TX_DESC_FLAG_ME) + dp_tx_me_free_buf(pdev, tx_desc->me_buffer); + + dp_tx_desc_release(tx_desc, tx_q->desc_pool_id); + goto done; + } + + /* + * TODO + * if tso_info structure can be modified to have curr_seg + * as first element, following 2 blocks of code (for TSO and SG) + * can be combined into 1 + */ + + /* + * For frames with multiple segments (TSO, ME), jump to next + * segment. + */ + if (msdu_info->frm_type == dp_tx_frm_tso) { + if (msdu_info->u.tso_info.curr_seg->next) { + msdu_info->u.tso_info.curr_seg = + msdu_info->u.tso_info.curr_seg->next; + + /* + * If this is a jumbo nbuf, then increment the number of + * nbuf users for each additional segment of the msdu. + * This will ensure that the skb is freed only after + * receiving tx completion for all segments of an nbuf + */ + qdf_nbuf_inc_users(nbuf); + + /* Check with MCL if this is needed */ + /* nbuf = msdu_info->u.tso_info.curr_seg->nbuf; */ + } + } + + /* + * For Multicast-Unicast converted packets, + * each converted frame (for a client) is represented as + * 1 segment + */ + if ((msdu_info->frm_type == dp_tx_frm_sg) || + (msdu_info->frm_type == dp_tx_frm_me)) { + if (msdu_info->u.sg_info.curr_seg->next) { + msdu_info->u.sg_info.curr_seg = + msdu_info->u.sg_info.curr_seg->next; + nbuf = msdu_info->u.sg_info.curr_seg->nbuf; + } + } + i++; + } + + nbuf = NULL; + +done: + if (hif_pm_runtime_get(soc->hif_handle) == 0) { + hal_srng_access_end(soc->hal_soc, hal_srng); + hif_pm_runtime_put(soc->hif_handle); + } else { + hal_srng_access_end_reap(soc->hal_soc, hal_srng); + } + + return nbuf; +} + +/** + * dp_tx_prepare_sg()- Extract SG info from NBUF and prepare msdu_info + * for SG frames + * @vdev: DP vdev handle + * @nbuf: skb + * @seg_info: Pointer to Segment info Descriptor to be prepared + * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc. + * + * Return: NULL on success, + * nbuf when it fails to send + */ +static qdf_nbuf_t dp_tx_prepare_sg(struct dp_vdev *vdev, qdf_nbuf_t nbuf, + struct dp_tx_seg_info_s *seg_info, struct dp_tx_msdu_info_s *msdu_info) +{ + uint32_t cur_frag, nr_frags; + qdf_dma_addr_t paddr; + struct dp_tx_sg_info_s *sg_info; + + sg_info = &msdu_info->u.sg_info; + nr_frags = qdf_nbuf_get_nr_frags(nbuf); + + if (QDF_STATUS_SUCCESS != qdf_nbuf_map(vdev->osdev, nbuf, + QDF_DMA_TO_DEVICE)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "dma map error\n"); + DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1); + + qdf_nbuf_free(nbuf); + return NULL; + } + + paddr = qdf_nbuf_get_frag_paddr(nbuf, 0); + seg_info->frags[0].paddr_lo = paddr; + seg_info->frags[0].paddr_hi = ((uint64_t) paddr) >> 32; + seg_info->frags[0].len = qdf_nbuf_headlen(nbuf); + seg_info->frags[0].vaddr = (void *) nbuf; + + for (cur_frag = 0; cur_frag < nr_frags; cur_frag++) { + if (QDF_STATUS_E_FAILURE == qdf_nbuf_frag_map(vdev->osdev, + nbuf, 0, QDF_DMA_TO_DEVICE, cur_frag)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "frag dma map error\n"); + DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1); + qdf_nbuf_free(nbuf); + return NULL; + } + + paddr = qdf_nbuf_get_frag_paddr(nbuf, 0); + seg_info->frags[cur_frag + 1].paddr_lo = paddr; + seg_info->frags[cur_frag + 1].paddr_hi = + ((uint64_t) paddr) >> 32; + seg_info->frags[cur_frag + 1].len = + qdf_nbuf_get_frag_size(nbuf, cur_frag); + } + + seg_info->frag_cnt = (cur_frag + 1); + seg_info->total_len = qdf_nbuf_len(nbuf); + seg_info->next = NULL; + + sg_info->curr_seg = seg_info; + + msdu_info->frm_type = dp_tx_frm_sg; + msdu_info->num_seg = 1; + + return nbuf; +} + +#ifdef MESH_MODE_SUPPORT + +/** + * dp_tx_extract_mesh_meta_data()- Extract mesh meta hdr info from nbuf + and prepare msdu_info for mesh frames. + * @vdev: DP vdev handle + * @nbuf: skb + * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc. + * + * Return: NULL on failure, + * nbuf when extracted successfully + */ +static +qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf, + struct dp_tx_msdu_info_s *msdu_info) +{ + struct meta_hdr_s *mhdr; + struct htt_tx_msdu_desc_ext2_t *meta_data = + (struct htt_tx_msdu_desc_ext2_t *)&msdu_info->meta_data[0]; + + mhdr = (struct meta_hdr_s *)qdf_nbuf_data(nbuf); + + if (CB_FTYPE_MESH_TX_INFO != qdf_nbuf_get_tx_ftype(nbuf)) { + msdu_info->exception_fw = 0; + goto remove_meta_hdr; + } + + msdu_info->exception_fw = 1; + + qdf_mem_set(meta_data, sizeof(struct htt_tx_msdu_desc_ext2_t), 0); + + meta_data->host_tx_desc_pool = 1; + meta_data->update_peer_cache = 1; + meta_data->learning_frame = 1; + + if (!(mhdr->flags & METAHDR_FLAG_AUTO_RATE)) { + meta_data->power = mhdr->power; + + meta_data->mcs_mask = 1 << mhdr->rate_info[0].mcs; + meta_data->nss_mask = 1 << mhdr->rate_info[0].nss; + meta_data->pream_type = mhdr->rate_info[0].preamble_type; + meta_data->retry_limit = mhdr->rate_info[0].max_tries; + + meta_data->dyn_bw = 1; + + meta_data->valid_pwr = 1; + meta_data->valid_mcs_mask = 1; + meta_data->valid_nss_mask = 1; + meta_data->valid_preamble_type = 1; + meta_data->valid_retries = 1; + meta_data->valid_bw_info = 1; + } + + if (mhdr->flags & METAHDR_FLAG_NOENCRYPT) { + meta_data->encrypt_type = 0; + meta_data->valid_encrypt_type = 1; + meta_data->learning_frame = 0; + } + + meta_data->valid_key_flags = 1; + meta_data->key_flags = (mhdr->keyix & 0x3); + +remove_meta_hdr: + if (qdf_nbuf_pull_head(nbuf, sizeof(struct meta_hdr_s)) == NULL) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "qdf_nbuf_pull_head failed\n"); + qdf_nbuf_free(nbuf); + return NULL; + } + + if (mhdr->flags & METAHDR_FLAG_NOQOS) + msdu_info->tid = HTT_TX_EXT_TID_NON_QOS_MCAST_BCAST; + else + msdu_info->tid = qdf_nbuf_get_priority(nbuf); + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH, + "%s , Meta hdr %0x %0x %0x %0x %0x %0x" + " tid %d to_fw %d\n", + __func__, msdu_info->meta_data[0], + msdu_info->meta_data[1], + msdu_info->meta_data[2], + msdu_info->meta_data[3], + msdu_info->meta_data[4], + msdu_info->meta_data[5], + msdu_info->tid, msdu_info->exception_fw); + + return nbuf; +} +#else +static +qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf, + struct dp_tx_msdu_info_s *msdu_info) +{ + return nbuf; +} + +#endif + +#ifdef DP_FEATURE_NAWDS_TX +/** + * dp_tx_prepare_nawds(): Tramit NAWDS frames + * @vdev: dp_vdev handle + * @nbuf: skb + * @tid: TID from HLOS for overriding default DSCP-TID mapping + * @tx_q: Tx queue to be used for this Tx frame + * @meta_data: Meta date for mesh + * @peer_id: peer_id of the peer in case of NAWDS frames + * + * return: NULL on success nbuf on failure + */ +static qdf_nbuf_t dp_tx_prepare_nawds(struct dp_vdev *vdev, qdf_nbuf_t nbuf, + struct dp_tx_msdu_info_s *msdu_info) +{ + struct dp_peer *peer = NULL; + struct dp_soc *soc = vdev->pdev->soc; + struct dp_ast_entry *ast_entry = NULL; + struct ether_header *eh = (struct ether_header *)qdf_nbuf_data(nbuf); + uint16_t peer_id = HTT_INVALID_PEER; + + struct dp_peer *sa_peer = NULL; + qdf_nbuf_t nbuf_copy; + + qdf_spin_lock_bh(&(soc->ast_lock)); + ast_entry = dp_peer_ast_hash_find(soc, (uint8_t *)(eh->ether_shost)); + + if (ast_entry) + sa_peer = ast_entry->peer; + + qdf_spin_unlock_bh(&(soc->ast_lock)); + + TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) { + if ((peer->peer_ids[0] != HTT_INVALID_PEER) && + (peer->nawds_enabled)) { + if (sa_peer == peer) { + QDF_TRACE(QDF_MODULE_ID_DP, + QDF_TRACE_LEVEL_DEBUG, + " %s: broadcast multicast packet", + __func__); + DP_STATS_INC(peer, tx.nawds_mcast_drop, 1); + continue; + } + + nbuf_copy = qdf_nbuf_copy(nbuf); + if (!nbuf_copy) { + QDF_TRACE(QDF_MODULE_ID_DP, + QDF_TRACE_LEVEL_ERROR, + "nbuf copy failed"); + } + + peer_id = peer->peer_ids[0]; + nbuf_copy = dp_tx_send_msdu_single(vdev, nbuf_copy, + msdu_info, peer_id, NULL); + if (nbuf_copy != NULL) { + qdf_nbuf_free(nbuf_copy); + continue; + } + DP_STATS_INC_PKT(peer, tx.nawds_mcast, + 1, qdf_nbuf_len(nbuf)); + } + } + if (peer_id == HTT_INVALID_PEER) + return nbuf; + + return NULL; +} +#endif + +/** + * dp_check_exc_metadata() - Checks if parameters are valid + * @tx_exc - holds all exception path parameters + * + * Returns true when all the parameters are valid else false + * + */ +static bool dp_check_exc_metadata(struct cdp_tx_exception_metadata *tx_exc) +{ + if ((tx_exc->tid > DP_MAX_TIDS && tx_exc->tid != HTT_INVALID_TID) || + tx_exc->tx_encap_type > htt_cmn_pkt_num_types || + tx_exc->sec_type > cdp_num_sec_types) { + return false; + } + + return true; +} + +/** + * dp_tx_send_exception() - Transmit a frame on a given VAP in exception path + * @vap_dev: DP vdev handle + * @nbuf: skb + * @tx_exc_metadata: Handle that holds exception path meta data + * + * Entry point for Core Tx layer (DP_TX) invoked from + * hard_start_xmit in OSIF/HDD to transmit frames through fw + * + * Return: NULL on success, + * nbuf when it fails to send + */ +qdf_nbuf_t dp_tx_send_exception(void *vap_dev, qdf_nbuf_t nbuf, + struct cdp_tx_exception_metadata *tx_exc_metadata) +{ + struct ether_header *eh = NULL; + struct dp_vdev *vdev = (struct dp_vdev *) vap_dev; + struct dp_tx_msdu_info_s msdu_info; + + qdf_mem_set(&msdu_info, sizeof(msdu_info), 0x0); + + msdu_info.tid = tx_exc_metadata->tid; + + eh = (struct ether_header *)qdf_nbuf_data(nbuf); + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "%s , skb %pM", + __func__, nbuf->data); + + DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, qdf_nbuf_len(nbuf)); + + if (qdf_unlikely(!dp_check_exc_metadata(tx_exc_metadata))) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "Invalid parameters in exception path"); + goto fail; + } + + /* Basic sanity checks for unsupported packets */ + + /* MESH mode */ + if (qdf_unlikely(vdev->mesh_vdev)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "Mesh mode is not supported in exception path"); + goto fail; + } + + /* TSO or SG */ + if (qdf_unlikely(qdf_nbuf_is_tso(nbuf)) || + qdf_unlikely(qdf_nbuf_is_nonlinear(nbuf))) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "TSO and SG are not supported in exception path"); + + goto fail; + } + + /* RAW */ + if (qdf_unlikely(tx_exc_metadata->tx_encap_type == htt_cmn_pkt_type_raw)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "Raw frame is not supported in exception path"); + goto fail; + } + + + /* Mcast enhancement*/ + if (qdf_unlikely(vdev->mcast_enhancement_en > 0)) { + if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "Ignoring mcast_enhancement_en which is set and sending the mcast packet to the FW\n"); + } + } + + /* + * Get HW Queue to use for this frame. + * TCL supports upto 4 DMA rings, out of which 3 rings are + * dedicated for data and 1 for command. + * "queue_id" maps to one hardware ring. + * With each ring, we also associate a unique Tx descriptor pool + * to minimize lock contention for these resources. + */ + dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue); + + /* Reset the control block */ + qdf_nbuf_reset_ctxt(nbuf); + + /* Single linear frame */ + /* + * If nbuf is a simple linear frame, use send_single function to + * prepare direct-buffer type TCL descriptor and enqueue to TCL + * SRNG. There is no need to setup a MSDU extension descriptor. + */ + nbuf = dp_tx_send_msdu_single(vdev, nbuf, &msdu_info, + tx_exc_metadata->peer_id, tx_exc_metadata); + + return nbuf; + +fail: + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "pkt send failed"); + return nbuf; +} + +/** + * dp_tx_send_mesh() - Transmit mesh frame on a given VAP + * @vap_dev: DP vdev handle + * @nbuf: skb + * + * Entry point for Core Tx layer (DP_TX) invoked from + * hard_start_xmit in OSIF/HDD + * + * Return: NULL on success, + * nbuf when it fails to send + */ +#ifdef MESH_MODE_SUPPORT +qdf_nbuf_t dp_tx_send_mesh(void *vap_dev, qdf_nbuf_t nbuf) +{ + struct meta_hdr_s *mhdr; + qdf_nbuf_t nbuf_mesh = NULL; + qdf_nbuf_t nbuf_clone = NULL; + struct dp_vdev *vdev = (struct dp_vdev *) vap_dev; + uint8_t no_enc_frame = 0; + + nbuf_mesh = qdf_nbuf_unshare(nbuf); + if (nbuf_mesh == NULL) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "qdf_nbuf_unshare failed\n"); + return nbuf; + } + nbuf = nbuf_mesh; + + mhdr = (struct meta_hdr_s *)qdf_nbuf_data(nbuf); + + if ((vdev->sec_type != cdp_sec_type_none) && + (mhdr->flags & METAHDR_FLAG_NOENCRYPT)) + no_enc_frame = 1; + + if ((mhdr->flags & METAHDR_FLAG_INFO_UPDATED) && + !no_enc_frame) { + nbuf_clone = qdf_nbuf_clone(nbuf); + if (nbuf_clone == NULL) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "qdf_nbuf_clone failed\n"); + return nbuf; + } + qdf_nbuf_set_tx_ftype(nbuf_clone, CB_FTYPE_MESH_TX_INFO); + } + + if (nbuf_clone) { + if (!dp_tx_send(vap_dev, nbuf_clone)) { + DP_STATS_INC(vdev, tx_i.mesh.exception_fw, 1); + } else { + qdf_nbuf_free(nbuf_clone); + } + } + + if (no_enc_frame) + qdf_nbuf_set_tx_ftype(nbuf, CB_FTYPE_MESH_TX_INFO); + else + qdf_nbuf_set_tx_ftype(nbuf, CB_FTYPE_INVALID); + + nbuf = dp_tx_send(vap_dev, nbuf); + if ((nbuf == NULL) && no_enc_frame) { + DP_STATS_INC(vdev, tx_i.mesh.exception_fw, 1); + } + + return nbuf; +} + +#else + +qdf_nbuf_t dp_tx_send_mesh(void *vap_dev, qdf_nbuf_t nbuf) +{ + return dp_tx_send(vap_dev, nbuf); +} + +#endif + +/** + * dp_tx_send() - Transmit a frame on a given VAP + * @vap_dev: DP vdev handle + * @nbuf: skb + * + * Entry point for Core Tx layer (DP_TX) invoked from + * hard_start_xmit in OSIF/HDD or from dp_rx_process for intravap forwarding + * cases + * + * Return: NULL on success, + * nbuf when it fails to send + */ +qdf_nbuf_t dp_tx_send(void *vap_dev, qdf_nbuf_t nbuf) +{ + struct ether_header *eh = NULL; + struct dp_tx_msdu_info_s msdu_info; + struct dp_tx_seg_info_s seg_info; + struct dp_vdev *vdev = (struct dp_vdev *) vap_dev; + uint16_t peer_id = HTT_INVALID_PEER; + qdf_nbuf_t nbuf_mesh = NULL; + + qdf_mem_set(&msdu_info, sizeof(msdu_info), 0x0); + qdf_mem_set(&seg_info, sizeof(seg_info), 0x0); + + eh = (struct ether_header *)qdf_nbuf_data(nbuf); + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "%s , skb %pM", + __func__, nbuf->data); + + /* + * Set Default Host TID value to invalid TID + * (TID override disabled) + */ + msdu_info.tid = HTT_TX_EXT_TID_INVALID; + DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, qdf_nbuf_len(nbuf)); + + if (qdf_unlikely(vdev->mesh_vdev)) { + nbuf_mesh = dp_tx_extract_mesh_meta_data(vdev, nbuf, + &msdu_info); + if (nbuf_mesh == NULL) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "Extracting mesh metadata failed\n"); + return nbuf; + } + nbuf = nbuf_mesh; + } + + /* + * Get HW Queue to use for this frame. + * TCL supports upto 4 DMA rings, out of which 3 rings are + * dedicated for data and 1 for command. + * "queue_id" maps to one hardware ring. + * With each ring, we also associate a unique Tx descriptor pool + * to minimize lock contention for these resources. + */ + dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue); + + /* + * TCL H/W supports 2 DSCP-TID mapping tables. + * Table 1 - Default DSCP-TID mapping table + * Table 2 - 1 DSCP-TID override table + * + * If we need a different DSCP-TID mapping for this vap, + * call tid_classify to extract DSCP/ToS from frame and + * map to a TID and store in msdu_info. This is later used + * to fill in TCL Input descriptor (per-packet TID override). + */ + dp_tx_classify_tid(vdev, nbuf, &msdu_info); + + /* Reset the control block */ + qdf_nbuf_reset_ctxt(nbuf); + + /* + * Classify the frame and call corresponding + * "prepare" function which extracts the segment (TSO) + * and fragmentation information (for TSO , SG, ME, or Raw) + * into MSDU_INFO structure which is later used to fill + * SW and HW descriptors. + */ + if (qdf_nbuf_is_tso(nbuf)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "%s TSO frame %pK\n", __func__, vdev); + DP_STATS_INC_PKT(vdev, tx_i.tso.tso_pkt, 1, + qdf_nbuf_len(nbuf)); + + if (dp_tx_prepare_tso(vdev, nbuf, &msdu_info)) { + DP_STATS_INC(vdev, tx_i.tso.dropped_host, 1); + return nbuf; + } + + goto send_multiple; + } + + /* SG */ + if (qdf_unlikely(qdf_nbuf_is_nonlinear(nbuf))) { + nbuf = dp_tx_prepare_sg(vdev, nbuf, &seg_info, &msdu_info); + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "%s non-TSO SG frame %pK\n", __func__, vdev); + + DP_STATS_INC_PKT(vdev, tx_i.sg.sg_pkt, 1, + qdf_nbuf_len(nbuf)); + + goto send_multiple; + } + +#ifdef ATH_SUPPORT_IQUE + /* Mcast to Ucast Conversion*/ + if (qdf_unlikely(vdev->mcast_enhancement_en > 0)) { + eh = (struct ether_header *)qdf_nbuf_data(nbuf); + if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "%s Mcast frm for ME %pK\n", __func__, vdev); + + DP_STATS_INC_PKT(vdev, + tx_i.mcast_en.mcast_pkt, 1, + qdf_nbuf_len(nbuf)); + if (dp_tx_prepare_send_me(vdev, nbuf) == + QDF_STATUS_SUCCESS) { + return NULL; + } + } + } +#endif + + /* RAW */ + if (qdf_unlikely(vdev->tx_encap_type == htt_cmn_pkt_type_raw)) { + nbuf = dp_tx_prepare_raw(vdev, nbuf, &seg_info, &msdu_info); + if (nbuf == NULL) + return NULL; + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "%s Raw frame %pK\n", __func__, vdev); + + goto send_multiple; + + } + + /* Single linear frame */ + /* + * If nbuf is a simple linear frame, use send_single function to + * prepare direct-buffer type TCL descriptor and enqueue to TCL + * SRNG. There is no need to setup a MSDU extension descriptor. + */ + nbuf = dp_tx_send_msdu_single(vdev, nbuf, &msdu_info, peer_id, NULL); + + return nbuf; + +send_multiple: + nbuf = dp_tx_send_msdu_multiple(vdev, nbuf, &msdu_info); + + return nbuf; +} + +/** + * dp_tx_reinject_handler() - Tx Reinject Handler + * @tx_desc: software descriptor head pointer + * @status : Tx completion status from HTT descriptor + * + * This function reinjects frames back to Target. + * Todo - Host queue needs to be added + * + * Return: none + */ +static +void dp_tx_reinject_handler(struct dp_tx_desc_s *tx_desc, uint8_t *status) +{ + struct dp_vdev *vdev; + struct dp_peer *peer = NULL; + uint32_t peer_id = HTT_INVALID_PEER; + qdf_nbuf_t nbuf = tx_desc->nbuf; + qdf_nbuf_t nbuf_copy = NULL; + struct dp_tx_msdu_info_s msdu_info; + struct dp_peer *sa_peer = NULL; + struct dp_ast_entry *ast_entry = NULL; + struct dp_soc *soc = NULL; + struct ether_header *eh = (struct ether_header *)qdf_nbuf_data(nbuf); +#ifdef WDS_VENDOR_EXTENSION + int is_mcast = 0, is_ucast = 0; + int num_peers_3addr = 0; + struct ether_header *eth_hdr = (struct ether_header *)(qdf_nbuf_data(nbuf)); + struct ieee80211_frame_addr4 *wh = (struct ieee80211_frame_addr4 *)(qdf_nbuf_data(nbuf)); +#endif + + vdev = tx_desc->vdev; + soc = vdev->pdev->soc; + + qdf_assert(vdev); + + qdf_mem_set(&msdu_info, sizeof(msdu_info), 0x0); + + dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue); + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "%s Tx reinject path\n", __func__); + + DP_STATS_INC_PKT(vdev, tx_i.reinject_pkts, 1, + qdf_nbuf_len(tx_desc->nbuf)); + + qdf_spin_lock_bh(&(soc->ast_lock)); + + ast_entry = dp_peer_ast_hash_find(soc, (uint8_t *)(eh->ether_shost)); + + if (ast_entry) + sa_peer = ast_entry->peer; + + qdf_spin_unlock_bh(&(soc->ast_lock)); + +#ifdef WDS_VENDOR_EXTENSION + if (qdf_unlikely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) { + is_mcast = (IS_MULTICAST(wh->i_addr1)) ? 1 : 0; + } else { + is_mcast = (IS_MULTICAST(eth_hdr->ether_dhost)) ? 1 : 0; + } + is_ucast = !is_mcast; + + TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) { + if (peer->bss_peer) + continue; + + /* Detect wds peers that use 3-addr framing for mcast. + * if there are any, the bss_peer is used to send the + * the mcast frame using 3-addr format. all wds enabled + * peers that use 4-addr framing for mcast frames will + * be duplicated and sent as 4-addr frames below. + */ + if (!peer->wds_enabled || !peer->wds_ecm.wds_tx_mcast_4addr) { + num_peers_3addr = 1; + break; + } + } +#endif + + if (qdf_unlikely(vdev->mesh_vdev)) { + DP_TX_FREE_SINGLE_BUF(vdev->pdev->soc, tx_desc->nbuf); + } else { + TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) { + if ((peer->peer_ids[0] != HTT_INVALID_PEER) && +#ifdef WDS_VENDOR_EXTENSION + /* + * . if 3-addr STA, then send on BSS Peer + * . if Peer WDS enabled and accept 4-addr mcast, + * send mcast on that peer only + * . if Peer WDS enabled and accept 4-addr ucast, + * send ucast on that peer only + */ + ((peer->bss_peer && num_peers_3addr && is_mcast) || + (peer->wds_enabled && + ((is_mcast && peer->wds_ecm.wds_tx_mcast_4addr) || + (is_ucast && peer->wds_ecm.wds_tx_ucast_4addr))))) { +#else + ((peer->bss_peer && + !(vdev->osif_proxy_arp(vdev->osif_vdev, nbuf))) || + peer->nawds_enabled)) { +#endif + peer_id = DP_INVALID_PEER; + + if (peer->nawds_enabled) { + peer_id = peer->peer_ids[0]; + if (sa_peer == peer) { + QDF_TRACE( + QDF_MODULE_ID_DP, + QDF_TRACE_LEVEL_DEBUG, + " %s: multicast packet", + __func__); + DP_STATS_INC(peer, + tx.nawds_mcast_drop, 1); + continue; + } + } + + nbuf_copy = qdf_nbuf_copy(nbuf); + + if (!nbuf_copy) { + QDF_TRACE(QDF_MODULE_ID_DP, + QDF_TRACE_LEVEL_DEBUG, + FL("nbuf copy failed")); + break; + } + + nbuf_copy = dp_tx_send_msdu_single(vdev, + nbuf_copy, + &msdu_info, + peer_id, + NULL); + + if (nbuf_copy) { + QDF_TRACE(QDF_MODULE_ID_DP, + QDF_TRACE_LEVEL_DEBUG, + FL("pkt send failed")); + qdf_nbuf_free(nbuf_copy); + } else { + if (peer_id != DP_INVALID_PEER) + DP_STATS_INC_PKT(peer, + tx.nawds_mcast, + 1, qdf_nbuf_len(nbuf)); + } + } + } + } + + if (vdev->nawds_enabled) { + peer_id = DP_INVALID_PEER; + + DP_STATS_INC_PKT(vdev, tx_i.nawds_mcast, + 1, qdf_nbuf_len(nbuf)); + + nbuf = dp_tx_send_msdu_single(vdev, + nbuf, + &msdu_info, + peer_id, NULL); + + if (nbuf) { + QDF_TRACE(QDF_MODULE_ID_DP, + QDF_TRACE_LEVEL_DEBUG, + FL("pkt send failed")); + qdf_nbuf_free(nbuf); + } + } else + qdf_nbuf_free(nbuf); + + dp_tx_desc_release(tx_desc, tx_desc->pool_id); +} + +/** + * dp_tx_inspect_handler() - Tx Inspect Handler + * @tx_desc: software descriptor head pointer + * @status : Tx completion status from HTT descriptor + * + * Handles Tx frames sent back to Host for inspection + * (ProxyARP) + * + * Return: none + */ +static void dp_tx_inspect_handler(struct dp_tx_desc_s *tx_desc, uint8_t *status) +{ + + struct dp_soc *soc; + struct dp_pdev *pdev = tx_desc->pdev; + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, + "%s Tx inspect path\n", + __func__); + + qdf_assert(pdev); + + soc = pdev->soc; + + DP_STATS_INC_PKT(tx_desc->vdev, tx_i.inspect_pkts, 1, + qdf_nbuf_len(tx_desc->nbuf)); + + DP_TX_FREE_SINGLE_BUF(soc, tx_desc->nbuf); + dp_tx_desc_release(tx_desc, tx_desc->pool_id); +} + +#ifdef FEATURE_PERPKT_INFO +/** + * dp_get_completion_indication_for_stack() - send completion to stack + * @soc : dp_soc handle + * @pdev: dp_pdev handle + * @peer_id: peer_id of the peer for which completion came + * @ppdu_id: ppdu_id + * @first_msdu: first msdu + * @last_msdu: last msdu + * @netbuf: Buffer pointer for free + * + * This function is used for indication whether buffer needs to be + * send to stack for free or not +*/ +QDF_STATUS +dp_get_completion_indication_for_stack(struct dp_soc *soc, struct dp_pdev *pdev, + uint16_t peer_id, uint32_t ppdu_id, uint8_t first_msdu, + uint8_t last_msdu, qdf_nbuf_t netbuf) +{ + struct tx_capture_hdr *ppdu_hdr; + struct dp_peer *peer = NULL; + + if (qdf_unlikely(!pdev->tx_sniffer_enable && !pdev->mcopy_mode)) + return QDF_STATUS_E_NOSUPPORT; + + peer = (peer_id == HTT_INVALID_PEER) ? NULL : + dp_peer_find_by_id(soc, peer_id); + + if (!peer) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("Peer Invalid")); + return QDF_STATUS_E_INVAL; + } + + if (pdev->mcopy_mode) { + if ((pdev->m_copy_id.tx_ppdu_id == ppdu_id) && + (pdev->m_copy_id.tx_peer_id == peer_id)) { + return QDF_STATUS_E_INVAL; + } + + pdev->m_copy_id.tx_ppdu_id = ppdu_id; + pdev->m_copy_id.tx_peer_id = peer_id; + } + + if (!qdf_nbuf_push_head(netbuf, sizeof(struct tx_capture_hdr))) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("No headroom")); + return QDF_STATUS_E_NOMEM; + } + + ppdu_hdr = (struct tx_capture_hdr *)qdf_nbuf_data(netbuf); + qdf_mem_copy(ppdu_hdr->ta, peer->vdev->mac_addr.raw, + IEEE80211_ADDR_LEN); + ppdu_hdr->ppdu_id = ppdu_id; + qdf_mem_copy(ppdu_hdr->ra, peer->mac_addr.raw, + IEEE80211_ADDR_LEN); + ppdu_hdr->peer_id = peer_id; + ppdu_hdr->first_msdu = first_msdu; + ppdu_hdr->last_msdu = last_msdu; + + return QDF_STATUS_SUCCESS; +} + + +/** + * dp_send_completion_to_stack() - send completion to stack + * @soc : dp_soc handle + * @pdev: dp_pdev handle + * @peer_id: peer_id of the peer for which completion came + * @ppdu_id: ppdu_id + * @netbuf: Buffer pointer for free + * + * This function is used to send completion to stack + * to free buffer +*/ +void dp_send_completion_to_stack(struct dp_soc *soc, struct dp_pdev *pdev, + uint16_t peer_id, uint32_t ppdu_id, + qdf_nbuf_t netbuf) +{ + dp_wdi_event_handler(WDI_EVENT_TX_DATA, soc, + netbuf, peer_id, + WDI_NO_VAL, pdev->pdev_id); +} +#else +static QDF_STATUS +dp_get_completion_indication_for_stack(struct dp_soc *soc, struct dp_pdev *pdev, + uint16_t peer_id, uint32_t ppdu_id, uint8_t first_msdu, + uint8_t last_msdu, qdf_nbuf_t netbuf) +{ + return QDF_STATUS_E_NOSUPPORT; +} + +static void +dp_send_completion_to_stack(struct dp_soc *soc, struct dp_pdev *pdev, + uint16_t peer_id, uint32_t ppdu_id, qdf_nbuf_t netbuf) +{ +} +#endif + +/** + * dp_tx_comp_free_buf() - Free nbuf associated with the Tx Descriptor + * @soc: Soc handle + * @desc: software Tx descriptor to be processed + * + * Return: none + */ +static inline void dp_tx_comp_free_buf(struct dp_soc *soc, + struct dp_tx_desc_s *desc) +{ + struct dp_vdev *vdev = desc->vdev; + qdf_nbuf_t nbuf = desc->nbuf; + + /* If it is TDLS mgmt, don't unmap or free the frame */ + if (desc->flags & DP_TX_DESC_FLAG_TDLS_FRAME) + return dp_non_std_tx_comp_free_buff(desc, vdev); + + /* 0 : MSDU buffer, 1 : MLE */ + if (desc->msdu_ext_desc) { + /* TSO free */ + if (hal_tx_ext_desc_get_tso_enable( + desc->msdu_ext_desc->vaddr)) { + /* If remaining number of segment is 0 + * actual TSO may unmap and free */ + if (qdf_nbuf_get_users(nbuf) == 1) + __qdf_nbuf_unmap_single(soc->osdev, + nbuf, + QDF_DMA_TO_DEVICE); + + qdf_nbuf_free(nbuf); + return; + } + } + + qdf_nbuf_unmap(soc->osdev, nbuf, QDF_DMA_TO_DEVICE); + + if (qdf_likely(!vdev->mesh_vdev)) + qdf_nbuf_free(nbuf); + else { + if (desc->flags & DP_TX_DESC_FLAG_TO_FW) { + qdf_nbuf_free(nbuf); + DP_STATS_INC(vdev, tx_i.mesh.completion_fw, 1); + } else + vdev->osif_tx_free_ext((nbuf)); + } +} + +/** + * dp_tx_mec_handler() - Tx MEC Notify Handler + * @vdev: pointer to dp dev handler + * @status : Tx completion status from HTT descriptor + * + * Handles MEC notify event sent from fw to Host + * + * Return: none + */ +#ifdef FEATURE_WDS +void dp_tx_mec_handler(struct dp_vdev *vdev, uint8_t *status) +{ + + struct dp_soc *soc; + uint32_t flags = IEEE80211_NODE_F_WDS_HM; + struct dp_peer *peer; + uint8_t mac_addr[DP_MAC_ADDR_LEN], i; + + if (!vdev->wds_enabled) + return; + + soc = vdev->pdev->soc; + qdf_spin_lock_bh(&soc->peer_ref_mutex); + peer = TAILQ_FIRST(&vdev->peer_list); + qdf_spin_unlock_bh(&soc->peer_ref_mutex); + + if (!peer) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + FL("peer is NULL")); + return; + } + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "%s Tx MEC Handler\n", + __func__); + + for (i = 0; i < DP_MAC_ADDR_LEN; i++) + mac_addr[(DP_MAC_ADDR_LEN - 1) - i] = + status[(DP_MAC_ADDR_LEN - 2) + i]; + + if (qdf_mem_cmp(mac_addr, vdev->mac_addr.raw, DP_MAC_ADDR_LEN)) + dp_peer_add_ast(soc, + peer, + mac_addr, + CDP_TXRX_AST_TYPE_MEC, + flags); +} +#endif + +/** + * dp_tx_process_htt_completion() - Tx HTT Completion Indication Handler + * @tx_desc: software descriptor head pointer + * @status : Tx completion status from HTT descriptor + * + * This function will process HTT Tx indication messages from Target + * + * Return: none + */ +static +void dp_tx_process_htt_completion(struct dp_tx_desc_s *tx_desc, uint8_t *status) +{ + uint8_t tx_status; + struct dp_pdev *pdev; + struct dp_vdev *vdev; + struct dp_soc *soc; + uint32_t *htt_status_word = (uint32_t *) status; + + qdf_assert(tx_desc->pdev); + + pdev = tx_desc->pdev; + vdev = tx_desc->vdev; + soc = pdev->soc; + + tx_status = HTT_TX_WBM_COMPLETION_V2_TX_STATUS_GET(htt_status_word[0]); + + switch (tx_status) { + case HTT_TX_FW2WBM_TX_STATUS_OK: + case HTT_TX_FW2WBM_TX_STATUS_DROP: + case HTT_TX_FW2WBM_TX_STATUS_TTL: + { + dp_tx_comp_free_buf(soc, tx_desc); + dp_tx_desc_release(tx_desc, tx_desc->pool_id); + break; + } + case HTT_TX_FW2WBM_TX_STATUS_REINJECT: + { + dp_tx_reinject_handler(tx_desc, status); + break; + } + case HTT_TX_FW2WBM_TX_STATUS_INSPECT: + { + dp_tx_inspect_handler(tx_desc, status); + break; + } + case HTT_TX_FW2WBM_TX_STATUS_MEC_NOTIFY: + { + dp_tx_mec_handler(vdev, status); + break; + } + default: + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "%s Invalid HTT tx_status %d\n", + __func__, tx_status); + break; + } +} + +#ifdef MESH_MODE_SUPPORT +/** + * dp_tx_comp_fill_tx_completion_stats() - Fill per packet Tx completion stats + * in mesh meta header + * @tx_desc: software descriptor head pointer + * @ts: pointer to tx completion stats + * Return: none + */ +static +void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc, + struct hal_tx_completion_status *ts) +{ + struct meta_hdr_s *mhdr; + qdf_nbuf_t netbuf = tx_desc->nbuf; + + if (!tx_desc->msdu_ext_desc) { + if (qdf_nbuf_pull_head(netbuf, tx_desc->pkt_offset) == NULL) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "netbuf %pK offset %d\n", + netbuf, tx_desc->pkt_offset); + return; + } + } + if (qdf_nbuf_push_head(netbuf, sizeof(struct meta_hdr_s)) == NULL) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "netbuf %pK offset %d\n", netbuf, + sizeof(struct meta_hdr_s)); + return; + } + + mhdr = (struct meta_hdr_s *)qdf_nbuf_data(netbuf); + mhdr->rssi = ts->ack_frame_rssi; + mhdr->channel = tx_desc->pdev->operating_channel; +} + +#else +static +void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc, + struct hal_tx_completion_status *ts) +{ +} + +#endif + +/** + * dp_tx_update_peer_stats() - Update peer stats from Tx completion indications + * @peer: Handle to DP peer + * @ts: pointer to HAL Tx completion stats + * @length: MSDU length + * + * Return: None + */ +static void dp_tx_update_peer_stats(struct dp_peer *peer, + struct hal_tx_completion_status *ts, uint32_t length) +{ + struct dp_pdev *pdev = peer->vdev->pdev; + struct dp_soc *soc = pdev->soc; + uint8_t mcs, pkt_type; + + mcs = ts->mcs; + pkt_type = ts->pkt_type; + + if (!ts->release_src == HAL_TX_COMP_RELEASE_SOURCE_TQM) + return; + + if (peer->bss_peer) { + DP_STATS_INC_PKT(peer, tx.mcast, 1, length); + } else { + if (ts->status == HAL_TX_TQM_RR_FRAME_ACKED) { + DP_STATS_INC_PKT(peer, tx.tx_success, 1, length); + } + DP_STATS_INC_PKT(peer, tx.ucast, 1, length); + } + + DP_STATS_INCC(peer, tx.dropped.age_out, 1, + (ts->status == HAL_TX_TQM_RR_REM_CMD_AGED)); + + DP_STATS_INCC(peer, tx.dropped.fw_rem, 1, + (ts->status == HAL_TX_TQM_RR_REM_CMD_REM)); + + DP_STATS_INCC(peer, tx.dropped.fw_rem_notx, 1, + (ts->status == HAL_TX_TQM_RR_REM_CMD_NOTX)); + + DP_STATS_INCC(peer, tx.dropped.fw_rem_tx, 1, + (ts->status == HAL_TX_TQM_RR_REM_CMD_TX)); + + DP_STATS_INCC(peer, tx.dropped.fw_reason1, 1, + (ts->status == HAL_TX_TQM_RR_FW_REASON1)); + + DP_STATS_INCC(peer, tx.dropped.fw_reason2, 1, + (ts->status == HAL_TX_TQM_RR_FW_REASON2)); + + DP_STATS_INCC(peer, tx.dropped.fw_reason3, 1, + (ts->status == HAL_TX_TQM_RR_FW_REASON3)); + + if (!ts->status == HAL_TX_TQM_RR_FRAME_ACKED) + return; + + DP_STATS_INCC(peer, tx.ofdma, 1, ts->ofdma); + + DP_STATS_INCC(peer, tx.amsdu_cnt, 1, ts->msdu_part_of_amsdu); + DP_STATS_INCC(peer, tx.non_amsdu_cnt, 1, !ts->msdu_part_of_amsdu); + + if (!(soc->process_tx_status)) + return; + + DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1, + ((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_A))); + DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1, + ((mcs < (MAX_MCS_11A)) && (pkt_type == DOT11_A))); + DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1, + ((mcs >= MAX_MCS_11B) && (pkt_type == DOT11_B))); + DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1, + ((mcs < MAX_MCS_11B) && (pkt_type == DOT11_B))); + DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1, + ((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_N))); + DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1, + ((mcs < MAX_MCS_11A) && (pkt_type == DOT11_N))); + DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1, + ((mcs >= MAX_MCS_11AC) && (pkt_type == DOT11_AC))); + DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1, + ((mcs < MAX_MCS_11AC) && (pkt_type == DOT11_AC))); + DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1, + ((mcs >= (MAX_MCS - 1)) && (pkt_type == DOT11_AX))); + DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1, + ((mcs < (MAX_MCS - 1)) && (pkt_type == DOT11_AX))); + DP_STATS_INC(peer, tx.sgi_count[ts->sgi], 1); + DP_STATS_INC(peer, tx.bw[ts->bw], 1); + DP_STATS_UPD(peer, tx.last_ack_rssi, ts->ack_frame_rssi); + DP_STATS_INC(peer, tx.wme_ac_type[TID_TO_WME_AC(ts->tid)], 1); + DP_STATS_INCC(peer, tx.stbc, 1, ts->stbc); + DP_STATS_INCC(peer, tx.ldpc, 1, ts->ldpc); + DP_STATS_INC_PKT(peer, tx.tx_success, 1, length); + DP_STATS_INCC(peer, tx.retries, 1, ts->transmit_cnt > 1); + + if (soc->cdp_soc.ol_ops->update_dp_stats) { + soc->cdp_soc.ol_ops->update_dp_stats(pdev->osif_pdev, + &peer->stats, ts->peer_id, + UPDATE_PEER_STATS); + } +} + +#ifdef QCA_LL_TX_FLOW_CONTROL_V2 +/** + * dp_tx_flow_pool_lock() - take flow pool lock + * @soc: core txrx main context + * @tx_desc: tx desc + * + * Return: None + */ +static inline +void dp_tx_flow_pool_lock(struct dp_soc *soc, + struct dp_tx_desc_s *tx_desc) +{ + struct dp_tx_desc_pool_s *pool; + uint8_t desc_pool_id; + + desc_pool_id = tx_desc->pool_id; + pool = &soc->tx_desc[desc_pool_id]; + + qdf_spin_lock_bh(&pool->flow_pool_lock); +} + +/** + * dp_tx_flow_pool_unlock() - release flow pool lock + * @soc: core txrx main context + * @tx_desc: tx desc + * + * Return: None + */ +static inline +void dp_tx_flow_pool_unlock(struct dp_soc *soc, + struct dp_tx_desc_s *tx_desc) +{ + struct dp_tx_desc_pool_s *pool; + uint8_t desc_pool_id; + + desc_pool_id = tx_desc->pool_id; + pool = &soc->tx_desc[desc_pool_id]; + + qdf_spin_unlock_bh(&pool->flow_pool_lock); +} +#else +static inline +void dp_tx_flow_pool_lock(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc) +{ +} + +static inline +void dp_tx_flow_pool_unlock(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc) +{ +} +#endif + +/** + * dp_tx_notify_completion() - Notify tx completion for this desc + * @soc: core txrx main context + * @tx_desc: tx desc + * @netbuf: buffer + * + * Return: none + */ +static inline void dp_tx_notify_completion(struct dp_soc *soc, + struct dp_tx_desc_s *tx_desc, + qdf_nbuf_t netbuf) +{ + void *osif_dev; + ol_txrx_completion_fp tx_compl_cbk = NULL; + + qdf_assert(tx_desc); + + dp_tx_flow_pool_lock(soc, tx_desc); + + if (!tx_desc->vdev || + !tx_desc->vdev->osif_vdev) { + dp_tx_flow_pool_unlock(soc, tx_desc); + return; + } + + osif_dev = tx_desc->vdev->osif_vdev; + tx_compl_cbk = tx_desc->vdev->tx_comp; + dp_tx_flow_pool_unlock(soc, tx_desc); + + if (tx_compl_cbk) + tx_compl_cbk(netbuf, osif_dev); +} + +/** + * dp_tx_comp_process_tx_status() - Parse and Dump Tx completion status info + * @tx_desc: software descriptor head pointer + * @length: packet length + * + * Return: none + */ +static inline void dp_tx_comp_process_tx_status(struct dp_tx_desc_s *tx_desc, + uint32_t length) +{ + struct hal_tx_completion_status ts; + struct dp_soc *soc = NULL; + struct dp_vdev *vdev = tx_desc->vdev; + struct dp_peer *peer = NULL; + struct ether_header *eh = + (struct ether_header *)qdf_nbuf_data(tx_desc->nbuf); + + hal_tx_comp_get_status(&tx_desc->comp, &ts); + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "-------------------- \n" + "Tx Completion Stats: \n" + "-------------------- \n" + "ack_frame_rssi = %d \n" + "first_msdu = %d \n" + "last_msdu = %d \n" + "msdu_part_of_amsdu = %d \n" + "rate_stats valid = %d \n" + "bw = %d \n" + "pkt_type = %d \n" + "stbc = %d \n" + "ldpc = %d \n" + "sgi = %d \n" + "mcs = %d \n" + "ofdma = %d \n" + "tones_in_ru = %d \n" + "tsf = %d \n" + "ppdu_id = %d \n" + "transmit_cnt = %d \n" + "tid = %d \n" + "peer_id = %d \n", + ts.ack_frame_rssi, ts.first_msdu, ts.last_msdu, + ts.msdu_part_of_amsdu, ts.valid, ts.bw, + ts.pkt_type, ts.stbc, ts.ldpc, ts.sgi, + ts.mcs, ts.ofdma, ts.tones_in_ru, ts.tsf, + ts.ppdu_id, ts.transmit_cnt, ts.tid, + ts.peer_id); + + if (!vdev) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, + "invalid vdev"); + goto out; + } + + soc = vdev->pdev->soc; + + /* Update SoC level stats */ + DP_STATS_INCC(soc, tx.dropped_fw_removed, 1, + (ts.status == HAL_TX_TQM_RR_REM_CMD_REM)); + + /* Update per-packet stats */ + if (qdf_unlikely(vdev->mesh_vdev) && + !(tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)) + dp_tx_comp_fill_tx_completion_stats(tx_desc, &ts); + + /* Update peer level stats */ + peer = dp_peer_find_by_id(soc, ts.peer_id); + if (!peer) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, + "invalid peer"); + DP_STATS_INC_PKT(soc, tx.tx_invalid_peer, 1, length); + goto out; + } + + if (qdf_likely(peer->vdev->tx_encap_type == + htt_cmn_pkt_type_ethernet)) { + if (peer->bss_peer && IEEE80211_IS_BROADCAST(eh->ether_dhost)) + DP_STATS_INC_PKT(peer, tx.bcast, 1, length); + } + + dp_tx_update_peer_stats(peer, &ts, length); + +out: + return; +} + +/** + * dp_tx_comp_process_desc() - Tx complete software descriptor handler + * @soc: core txrx main context + * @comp_head: software descriptor head pointer + * + * This function will process batch of descriptors reaped by dp_tx_comp_handler + * and release the software descriptors after processing is complete + * + * Return: none + */ +static void dp_tx_comp_process_desc(struct dp_soc *soc, + struct dp_tx_desc_s *comp_head) +{ + struct dp_tx_desc_s *desc; + struct dp_tx_desc_s *next; + struct hal_tx_completion_status ts = {0}; + uint32_t length; + struct dp_peer *peer; + + DP_HIST_INIT(); + desc = comp_head; + + while (desc) { + hal_tx_comp_get_status(&desc->comp, &ts); + peer = dp_peer_find_by_id(soc, ts.peer_id); + length = qdf_nbuf_len(desc->nbuf); + + /* check tx completion notification */ + if (QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_NOTIFY_COMP(desc->nbuf)) + dp_tx_notify_completion(soc, desc, desc->nbuf); + + dp_tx_comp_process_tx_status(desc, length); + + DPTRACE(qdf_dp_trace_ptr + (desc->nbuf, + QDF_DP_TRACE_LI_DP_FREE_PACKET_PTR_RECORD, + QDF_TRACE_DEFAULT_PDEV_ID, + qdf_nbuf_data_addr(desc->nbuf), + sizeof(qdf_nbuf_data(desc->nbuf)), + desc->id, ts.status) + ); + + /*currently m_copy/tx_capture is not supported for scatter gather packets*/ + if (!(desc->msdu_ext_desc) && (dp_get_completion_indication_for_stack(soc, + desc->pdev, ts.peer_id, ts.ppdu_id, + ts.first_msdu, ts.last_msdu, + desc->nbuf) == QDF_STATUS_SUCCESS)) { + qdf_nbuf_unmap(soc->osdev, desc->nbuf, + QDF_DMA_TO_DEVICE); + + dp_send_completion_to_stack(soc, desc->pdev, ts.peer_id, + ts.ppdu_id, desc->nbuf); + } else { + dp_tx_comp_free_buf(soc, desc); + } + + DP_HIST_PACKET_COUNT_INC(desc->pdev->pdev_id); + + next = desc->next; + dp_tx_desc_release(desc, desc->pool_id); + desc = next; + } + DP_TX_HIST_STATS_PER_PDEV(); +} + +/** + * dp_tx_comp_handler() - Tx completion handler + * @soc: core txrx main context + * @ring_id: completion ring id + * @quota: No. of packets/descriptors that can be serviced in one loop + * + * This function will collect hardware release ring element contents and + * handle descriptor contents. Based on contents, free packet or handle error + * conditions + * + * Return: none + */ +uint32_t dp_tx_comp_handler(struct dp_soc *soc, void *hal_srng, uint32_t quota) +{ + void *tx_comp_hal_desc; + uint8_t buffer_src; + uint8_t pool_id; + uint32_t tx_desc_id; + struct dp_tx_desc_s *tx_desc = NULL; + struct dp_tx_desc_s *head_desc = NULL; + struct dp_tx_desc_s *tail_desc = NULL; + uint32_t num_processed; + uint32_t count; + + if (qdf_unlikely(hal_srng_access_start(soc->hal_soc, hal_srng))) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "%s %d : HAL RING Access Failed -- %pK\n", + __func__, __LINE__, hal_srng); + return 0; + } + + num_processed = 0; + count = 0; + + /* Find head descriptor from completion ring */ + while (qdf_likely(tx_comp_hal_desc = + hal_srng_dst_get_next(soc->hal_soc, hal_srng))) { + + buffer_src = hal_tx_comp_get_buffer_source(tx_comp_hal_desc); + + /* If this buffer was not released by TQM or FW, then it is not + * Tx completion indication, assert */ + if ((buffer_src != HAL_TX_COMP_RELEASE_SOURCE_TQM) && + (buffer_src != HAL_TX_COMP_RELEASE_SOURCE_FW)) { + + QDF_TRACE(QDF_MODULE_ID_DP, + QDF_TRACE_LEVEL_FATAL, + "Tx comp release_src != TQM | FW"); + + qdf_assert_always(0); + } + + /* Get descriptor id */ + tx_desc_id = hal_tx_comp_get_desc_id(tx_comp_hal_desc); + pool_id = (tx_desc_id & DP_TX_DESC_ID_POOL_MASK) >> + DP_TX_DESC_ID_POOL_OS; + + /* Pool ID is out of limit. Error */ + if (pool_id > wlan_cfg_get_num_tx_desc_pool( + soc->wlan_cfg_ctx)) { + QDF_TRACE(QDF_MODULE_ID_DP, + QDF_TRACE_LEVEL_FATAL, + "Tx Comp pool id %d not valid", + pool_id); + + qdf_assert_always(0); + } + + /* Find Tx descriptor */ + tx_desc = dp_tx_desc_find(soc, pool_id, + (tx_desc_id & DP_TX_DESC_ID_PAGE_MASK) >> + DP_TX_DESC_ID_PAGE_OS, + (tx_desc_id & DP_TX_DESC_ID_OFFSET_MASK) >> + DP_TX_DESC_ID_OFFSET_OS); + + /* + * If the release source is FW, process the HTT status + */ + if (qdf_unlikely(buffer_src == + HAL_TX_COMP_RELEASE_SOURCE_FW)) { + uint8_t htt_tx_status[HAL_TX_COMP_HTT_STATUS_LEN]; + hal_tx_comp_get_htt_desc(tx_comp_hal_desc, + htt_tx_status); + dp_tx_process_htt_completion(tx_desc, + htt_tx_status); + } else { + /* Pool id is not matching. Error */ + if (tx_desc->pool_id != pool_id) { + QDF_TRACE(QDF_MODULE_ID_DP, + QDF_TRACE_LEVEL_FATAL, + "Tx Comp pool id %d not matched %d", + pool_id, tx_desc->pool_id); + + qdf_assert_always(0); + } + + if (!(tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED) || + !(tx_desc->flags & DP_TX_DESC_FLAG_QUEUED_TX)) { + QDF_TRACE(QDF_MODULE_ID_DP, + QDF_TRACE_LEVEL_FATAL, + "Txdesc invalid, flgs = %x,id = %d", + tx_desc->flags, tx_desc_id); + qdf_assert_always(0); + } + + /* First ring descriptor on the cycle */ + if (!head_desc) { + head_desc = tx_desc; + tail_desc = tx_desc; + } + + tail_desc->next = tx_desc; + tx_desc->next = NULL; + tail_desc = tx_desc; + + /* Collect hw completion contents */ + hal_tx_comp_desc_sync(tx_comp_hal_desc, + &tx_desc->comp, 1); + + } + + num_processed += !(count & DP_TX_NAPI_BUDGET_DIV_MASK); + /* Decrement PM usage count if the packet has been sent.*/ + hif_pm_runtime_put(soc->hif_handle); + + /* + * Processed packet count is more than given quota + * stop to processing + */ + if ((num_processed >= quota)) + break; + + count++; + } + + hal_srng_access_end(soc->hal_soc, hal_srng); + + /* Process the reaped descriptors */ + if (head_desc) + dp_tx_comp_process_desc(soc, head_desc); + + return num_processed; +} + +#ifdef CONVERGED_TDLS_ENABLE +/** + * dp_tx_non_std() - Allow the control-path SW to send data frames + * + * @data_vdev - which vdev should transmit the tx data frames + * @tx_spec - what non-standard handling to apply to the tx data frames + * @msdu_list - NULL-terminated list of tx MSDUs + * + * Return: NULL on success, + * nbuf when it fails to send + */ +qdf_nbuf_t dp_tx_non_std(struct cdp_vdev *vdev_handle, + enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list) +{ + struct dp_vdev *vdev = (struct dp_vdev *) vdev_handle; + + if (tx_spec & OL_TX_SPEC_NO_FREE) + vdev->is_tdls_frame = true; + return dp_tx_send(vdev_handle, msdu_list); +} +#endif + +/** + * dp_tx_vdev_attach() - attach vdev to dp tx + * @vdev: virtual device instance + * + * Return: QDF_STATUS_SUCCESS: success + * QDF_STATUS_E_RESOURCES: Error return + */ +QDF_STATUS dp_tx_vdev_attach(struct dp_vdev *vdev) +{ + /* + * Fill HTT TCL Metadata with Vdev ID and MAC ID + */ + HTT_TX_TCL_METADATA_TYPE_SET(vdev->htt_tcl_metadata, + HTT_TCL_METADATA_TYPE_VDEV_BASED); + + HTT_TX_TCL_METADATA_VDEV_ID_SET(vdev->htt_tcl_metadata, + vdev->vdev_id); + + HTT_TX_TCL_METADATA_PDEV_ID_SET(vdev->htt_tcl_metadata, + DP_SW2HW_MACID(vdev->pdev->pdev_id)); + + /* + * Set HTT Extension Valid bit to 0 by default + */ + HTT_TX_TCL_METADATA_VALID_HTT_SET(vdev->htt_tcl_metadata, 0); + + dp_tx_vdev_update_search_flags(vdev); + + return QDF_STATUS_SUCCESS; +} + +/** + * dp_tx_vdev_update_search_flags() - Update vdev flags as per opmode + * @vdev: virtual device instance + * + * Return: void + * + */ +void dp_tx_vdev_update_search_flags(struct dp_vdev *vdev) +{ + /* + * Enable both AddrY (SA based search) and AddrX (Da based search) + * for TDLS link + * + * Enable AddrY (SA based search) only for non-WDS STA and + * ProxySTA VAP modes. + * + * In all other VAP modes, only DA based search should be + * enabled + */ + if (vdev->opmode == wlan_op_mode_sta && + vdev->tdls_link_connected) + vdev->hal_desc_addr_search_flags = + (HAL_TX_DESC_ADDRX_EN | HAL_TX_DESC_ADDRY_EN); + else if ((vdev->opmode == wlan_op_mode_sta && + (!vdev->wds_enabled || vdev->proxysta_vdev))) + vdev->hal_desc_addr_search_flags = HAL_TX_DESC_ADDRY_EN; + else + vdev->hal_desc_addr_search_flags = HAL_TX_DESC_ADDRX_EN; +} + +#ifdef QCA_LL_TX_FLOW_CONTROL_V2 +static void dp_tx_desc_flush(struct dp_vdev *vdev) +{ +} +#else /* QCA_LL_TX_FLOW_CONTROL_V2! */ + +/* dp_tx_desc_flush() - release resources associated + * to tx_desc + * @vdev: virtual device instance + * + * This function will free all outstanding Tx buffers, + * including ME buffer for which either free during + * completion didn't happened or completion is not + * received. +*/ +static void dp_tx_desc_flush(struct dp_vdev *vdev) +{ + uint8_t i, num_pool; + uint32_t j; + uint32_t num_desc; + struct dp_soc *soc = vdev->pdev->soc; + struct dp_tx_desc_s *tx_desc = NULL; + struct dp_tx_desc_pool_s *tx_desc_pool = NULL; + + num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx); + num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx); + + for (i = 0; i < num_pool; i++) { + for (j = 0; j < num_desc; j++) { + tx_desc_pool = &((soc)->tx_desc[(i)]); + if (tx_desc_pool && + tx_desc_pool->desc_pages.cacheable_pages) { + tx_desc = dp_tx_desc_find(soc, i, + (j & DP_TX_DESC_ID_PAGE_MASK) >> + DP_TX_DESC_ID_PAGE_OS, + (j & DP_TX_DESC_ID_OFFSET_MASK) >> + DP_TX_DESC_ID_OFFSET_OS); + + if (tx_desc && (tx_desc->vdev == vdev) && + (tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED)) { + dp_tx_comp_free_buf(soc, tx_desc); + dp_tx_desc_release(tx_desc, i); + } + } + } + } +} +#endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */ + +/** + * dp_tx_vdev_detach() - detach vdev from dp tx + * @vdev: virtual device instance + * + * Return: QDF_STATUS_SUCCESS: success + * QDF_STATUS_E_RESOURCES: Error return + */ +QDF_STATUS dp_tx_vdev_detach(struct dp_vdev *vdev) +{ + dp_tx_desc_flush(vdev); + return QDF_STATUS_SUCCESS; +} + +/** + * dp_tx_pdev_attach() - attach pdev to dp tx + * @pdev: physical device instance + * + * Return: QDF_STATUS_SUCCESS: success + * QDF_STATUS_E_RESOURCES: Error return + */ +QDF_STATUS dp_tx_pdev_attach(struct dp_pdev *pdev) +{ + struct dp_soc *soc = pdev->soc; + + /* Initialize Flow control counters */ + qdf_atomic_init(&pdev->num_tx_exception); + qdf_atomic_init(&pdev->num_tx_outstanding); + + if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) { + /* Initialize descriptors in TCL Ring */ + hal_tx_init_data_ring(soc->hal_soc, + soc->tcl_data_ring[pdev->pdev_id].hal_srng); + } + + return QDF_STATUS_SUCCESS; +} + +/** + * dp_tx_pdev_detach() - detach pdev from dp tx + * @pdev: physical device instance + * + * Return: QDF_STATUS_SUCCESS: success + * QDF_STATUS_E_RESOURCES: Error return + */ +QDF_STATUS dp_tx_pdev_detach(struct dp_pdev *pdev) +{ + dp_tx_me_exit(pdev); + return QDF_STATUS_SUCCESS; +} + +#ifdef QCA_LL_TX_FLOW_CONTROL_V2 +/* Pools will be allocated dynamically */ +static int dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool, + int num_desc) +{ + uint8_t i; + + for (i = 0; i < num_pool; i++) { + qdf_spinlock_create(&soc->tx_desc[i].flow_pool_lock); + soc->tx_desc[i].status = FLOW_POOL_INACTIVE; + } + + return 0; +} + +static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool) +{ + uint8_t i; + + for (i = 0; i < num_pool; i++) + qdf_spinlock_destroy(&soc->tx_desc[i].flow_pool_lock); +} +#else /* QCA_LL_TX_FLOW_CONTROL_V2! */ +static int dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool, + int num_desc) +{ + uint8_t i; + + /* Allocate software Tx descriptor pools */ + for (i = 0; i < num_pool; i++) { + if (dp_tx_desc_pool_alloc(soc, i, num_desc)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "%s Tx Desc Pool alloc %d failed %pK\n", + __func__, i, soc); + return ENOMEM; + } + } + return 0; +} + +static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool) +{ + uint8_t i; + + for (i = 0; i < num_pool; i++) { + qdf_assert_always(!soc->tx_desc[i].num_allocated); + if (dp_tx_desc_pool_free(soc, i)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, + "%s Tx Desc Pool Free failed\n", __func__); + } + } +} + +#endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */ + +/** + * dp_tx_soc_detach() - detach soc from dp tx + * @soc: core txrx main context + * + * This function will detach dp tx into main device context + * will free dp tx resource and initialize resources + * + * Return: QDF_STATUS_SUCCESS: success + * QDF_STATUS_E_RESOURCES: Error return + */ +QDF_STATUS dp_tx_soc_detach(struct dp_soc *soc) +{ + uint8_t num_pool; + uint16_t num_desc; + uint16_t num_ext_desc; + uint8_t i; + + num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx); + num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx); + num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx); + + dp_tx_flow_control_deinit(soc); + dp_tx_delete_static_pools(soc, num_pool); + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, + "%s Tx Desc Pool Free num_pool = %d, descs = %d\n", + __func__, num_pool, num_desc); + + for (i = 0; i < num_pool; i++) { + if (dp_tx_ext_desc_pool_free(soc, i)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, + "%s Tx Ext Desc Pool Free failed\n", + __func__); + return QDF_STATUS_E_RESOURCES; + } + } + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, + "%s MSDU Ext Desc Pool %d Free descs = %d\n", + __func__, num_pool, num_ext_desc); + + for (i = 0; i < num_pool; i++) { + dp_tx_tso_desc_pool_free(soc, i); + } + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, + "%s TSO Desc Pool %d Free descs = %d\n", + __func__, num_pool, num_desc); + + + for (i = 0; i < num_pool; i++) + dp_tx_tso_num_seg_pool_free(soc, i); + + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, + "%s TSO Num of seg Desc Pool %d Free descs = %d\n", + __func__, num_pool, num_desc); + + return QDF_STATUS_SUCCESS; +} + +/** + * dp_tx_soc_attach() - attach soc to dp tx + * @soc: core txrx main context + * + * This function will attach dp tx into main device context + * will allocate dp tx resource and initialize resources + * + * Return: QDF_STATUS_SUCCESS: success + * QDF_STATUS_E_RESOURCES: Error return + */ +QDF_STATUS dp_tx_soc_attach(struct dp_soc *soc) +{ + uint8_t i; + uint8_t num_pool; + uint32_t num_desc; + uint32_t num_ext_desc; + + num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx); + num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx); + num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx); + + if (dp_tx_alloc_static_pools(soc, num_pool, num_desc)) + goto fail; + + dp_tx_flow_control_init(soc); + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, + "%s Tx Desc Alloc num_pool = %d, descs = %d\n", + __func__, num_pool, num_desc); + + /* Allocate extension tx descriptor pools */ + for (i = 0; i < num_pool; i++) { + if (dp_tx_ext_desc_pool_alloc(soc, i, num_ext_desc)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "MSDU Ext Desc Pool alloc %d failed %pK\n", + i, soc); + + goto fail; + } + } + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, + "%s MSDU Ext Desc Alloc %d, descs = %d\n", + __func__, num_pool, num_ext_desc); + + for (i = 0; i < num_pool; i++) { + if (dp_tx_tso_desc_pool_alloc(soc, i, num_desc)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "TSO Desc Pool alloc %d failed %pK\n", + i, soc); + + goto fail; + } + } + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, + "%s TSO Desc Alloc %d, descs = %d\n", + __func__, num_pool, num_desc); + + for (i = 0; i < num_pool; i++) { + if (dp_tx_tso_num_seg_pool_alloc(soc, i, num_desc)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "TSO Num of seg Pool alloc %d failed %pK\n", + i, soc); + + goto fail; + } + } + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, + "%s TSO Num of seg pool Alloc %d, descs = %d\n", + __func__, num_pool, num_desc); + + /* Initialize descriptors in TCL Rings */ + if (!wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) { + for (i = 0; i < soc->num_tcl_data_rings; i++) { + hal_tx_init_data_ring(soc->hal_soc, + soc->tcl_data_ring[i].hal_srng); + } + } + + /* + * todo - Add a runtime config option to enable this. + */ + /* + * Due to multiple issues on NPR EMU, enable it selectively + * only for NPR EMU, should be removed, once NPR platforms + * are stable. + */ + soc->process_tx_status = CONFIG_PROCESS_TX_STATUS; + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, + "%s HAL Tx init Success\n", __func__); + + return QDF_STATUS_SUCCESS; + +fail: + /* Detach will take care of freeing only allocated resources */ + dp_tx_soc_detach(soc); + return QDF_STATUS_E_RESOURCES; +} + +/* + * dp_tx_me_mem_free(): Function to free allocated memory in mcast enahncement + * pdev: pointer to DP PDEV structure + * seg_info_head: Pointer to the head of list + * + * return: void + */ +static void dp_tx_me_mem_free(struct dp_pdev *pdev, + struct dp_tx_seg_info_s *seg_info_head) +{ + struct dp_tx_me_buf_t *mc_uc_buf; + struct dp_tx_seg_info_s *seg_info_new = NULL; + qdf_nbuf_t nbuf = NULL; + uint64_t phy_addr; + + while (seg_info_head) { + nbuf = seg_info_head->nbuf; + mc_uc_buf = (struct dp_tx_me_buf_t *) + seg_info_head->frags[0].vaddr; + phy_addr = seg_info_head->frags[0].paddr_hi; + phy_addr = (phy_addr << 32) | seg_info_head->frags[0].paddr_lo; + qdf_mem_unmap_nbytes_single(pdev->soc->osdev, + phy_addr, + QDF_DMA_TO_DEVICE , DP_MAC_ADDR_LEN); + dp_tx_me_free_buf(pdev, mc_uc_buf); + qdf_nbuf_free(nbuf); + seg_info_new = seg_info_head; + seg_info_head = seg_info_head->next; + qdf_mem_free(seg_info_new); + } +} + +/** + * dp_tx_me_send_convert_ucast(): function to convert multicast to unicast + * @vdev: DP VDEV handle + * @nbuf: Multicast nbuf + * @newmac: Table of the clients to which packets have to be sent + * @new_mac_cnt: No of clients + * + * return: no of converted packets + */ +uint16_t +dp_tx_me_send_convert_ucast(struct cdp_vdev *vdev_handle, qdf_nbuf_t nbuf, + uint8_t newmac[][DP_MAC_ADDR_LEN], uint8_t new_mac_cnt) +{ + struct dp_vdev *vdev = (struct dp_vdev *) vdev_handle; + struct dp_pdev *pdev = vdev->pdev; + struct ether_header *eh; + uint8_t *data; + uint16_t len; + + /* reference to frame dst addr */ + uint8_t *dstmac; + /* copy of original frame src addr */ + uint8_t srcmac[DP_MAC_ADDR_LEN]; + + /* local index into newmac */ + uint8_t new_mac_idx = 0; + struct dp_tx_me_buf_t *mc_uc_buf; + qdf_nbuf_t nbuf_clone; + struct dp_tx_msdu_info_s msdu_info; + struct dp_tx_seg_info_s *seg_info_head = NULL; + struct dp_tx_seg_info_s *seg_info_tail = NULL; + struct dp_tx_seg_info_s *seg_info_new; + struct dp_tx_frag_info_s data_frag; + qdf_dma_addr_t paddr_data; + qdf_dma_addr_t paddr_mcbuf = 0; + uint8_t empty_entry_mac[DP_MAC_ADDR_LEN] = {0}; + QDF_STATUS status; + + qdf_mem_set(&msdu_info, sizeof(msdu_info), 0x0); + + dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue); + + eh = (struct ether_header *) nbuf; + qdf_mem_copy(srcmac, eh->ether_shost, DP_MAC_ADDR_LEN); + + len = qdf_nbuf_len(nbuf); + + data = qdf_nbuf_data(nbuf); + + status = qdf_nbuf_map(vdev->osdev, nbuf, + QDF_DMA_TO_DEVICE); + + if (status) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "Mapping failure Error:%d", status); + DP_STATS_INC(vdev, tx_i.mcast_en.dropped_map_error, 1); + qdf_nbuf_free(nbuf); + return 1; + } + + paddr_data = qdf_nbuf_get_frag_paddr(nbuf, 0) + IEEE80211_ADDR_LEN; + + /*preparing data fragment*/ + data_frag.vaddr = qdf_nbuf_data(nbuf) + IEEE80211_ADDR_LEN; + data_frag.paddr_lo = (uint32_t)paddr_data; + data_frag.paddr_hi = (((uint64_t) paddr_data) >> 32); + data_frag.len = len - DP_MAC_ADDR_LEN; + + for (new_mac_idx = 0; new_mac_idx < new_mac_cnt; new_mac_idx++) { + dstmac = newmac[new_mac_idx]; + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, + "added mac addr (%pM)", dstmac); + + /* Check for NULL Mac Address */ + if (!qdf_mem_cmp(dstmac, empty_entry_mac, DP_MAC_ADDR_LEN)) + continue; + + /* frame to self mac. skip */ + if (!qdf_mem_cmp(dstmac, srcmac, DP_MAC_ADDR_LEN)) + continue; + + /* + * TODO: optimize to avoid malloc in per-packet path + * For eg. seg_pool can be made part of vdev structure + */ + seg_info_new = qdf_mem_malloc(sizeof(*seg_info_new)); + + if (!seg_info_new) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "alloc failed"); + DP_STATS_INC(vdev, tx_i.mcast_en.fail_seg_alloc, 1); + goto fail_seg_alloc; + } + + mc_uc_buf = dp_tx_me_alloc_buf(pdev); + if (mc_uc_buf == NULL) + goto fail_buf_alloc; + + /* + * TODO: Check if we need to clone the nbuf + * Or can we just use the reference for all cases + */ + if (new_mac_idx < (new_mac_cnt - 1)) { + nbuf_clone = qdf_nbuf_clone((qdf_nbuf_t)nbuf); + if (nbuf_clone == NULL) { + DP_STATS_INC(vdev, tx_i.mcast_en.clone_fail, 1); + goto fail_clone; + } + } else { + /* + * Update the ref + * to account for frame sent without cloning + */ + qdf_nbuf_ref(nbuf); + nbuf_clone = nbuf; + } + + qdf_mem_copy(mc_uc_buf->data, dstmac, DP_MAC_ADDR_LEN); + + status = qdf_mem_map_nbytes_single(vdev->osdev, mc_uc_buf->data, + QDF_DMA_TO_DEVICE, DP_MAC_ADDR_LEN, + &paddr_mcbuf); + + if (status) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "Mapping failure Error:%d", status); + DP_STATS_INC(vdev, tx_i.mcast_en.dropped_map_error, 1); + goto fail_map; + } + + seg_info_new->frags[0].vaddr = (uint8_t *)mc_uc_buf; + seg_info_new->frags[0].paddr_lo = (uint32_t) paddr_mcbuf; + seg_info_new->frags[0].paddr_hi = + ((uint64_t) paddr_mcbuf >> 32); + seg_info_new->frags[0].len = DP_MAC_ADDR_LEN; + + seg_info_new->frags[1] = data_frag; + seg_info_new->nbuf = nbuf_clone; + seg_info_new->frag_cnt = 2; + seg_info_new->total_len = len; + + seg_info_new->next = NULL; + + if (seg_info_head == NULL) + seg_info_head = seg_info_new; + else + seg_info_tail->next = seg_info_new; + + seg_info_tail = seg_info_new; + } + + if (!seg_info_head) { + goto free_return; + } + + msdu_info.u.sg_info.curr_seg = seg_info_head; + msdu_info.num_seg = new_mac_cnt; + msdu_info.frm_type = dp_tx_frm_me; + + DP_STATS_INC(vdev, tx_i.mcast_en.ucast, new_mac_cnt); + dp_tx_send_msdu_multiple(vdev, nbuf, &msdu_info); + + while (seg_info_head->next) { + seg_info_new = seg_info_head; + seg_info_head = seg_info_head->next; + qdf_mem_free(seg_info_new); + } + qdf_mem_free(seg_info_head); + + qdf_nbuf_unmap(pdev->soc->osdev, nbuf, QDF_DMA_TO_DEVICE); + qdf_nbuf_free(nbuf); + return new_mac_cnt; + +fail_map: + qdf_nbuf_free(nbuf_clone); + +fail_clone: + dp_tx_me_free_buf(pdev, mc_uc_buf); + +fail_buf_alloc: + qdf_mem_free(seg_info_new); + +fail_seg_alloc: + dp_tx_me_mem_free(pdev, seg_info_head); + +free_return: + qdf_nbuf_unmap(pdev->soc->osdev, nbuf, QDF_DMA_TO_DEVICE); + qdf_nbuf_free(nbuf); + return 1; +} + diff --git a/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_tx.h b/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_tx.h new file mode 100644 index 0000000000000000000000000000000000000000..8d67465983969d57cebcc1697aa25df7df96d624 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_tx.h @@ -0,0 +1,229 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ +#ifndef __DP_TX_H +#define __DP_TX_H + +#include +#include +#include "dp_types.h" + + +#define DP_TX_MAX_NUM_FRAGS 6 + +#define DP_TX_DESC_FLAG_ALLOCATED 0x1 +#define DP_TX_DESC_FLAG_TO_FW 0x2 +#define DP_TX_DESC_FLAG_FRAG 0x4 +#define DP_TX_DESC_FLAG_RAW 0x8 +#define DP_TX_DESC_FLAG_MESH 0x10 +#define DP_TX_DESC_FLAG_QUEUED_TX 0x20 +#define DP_TX_DESC_FLAG_COMPLETED_TX 0x40 +#define DP_TX_DESC_FLAG_ME 0x80 +#define DP_TX_DESC_FLAG_TDLS_FRAME 0x100 + +#define DP_TX_FREE_SINGLE_BUF(soc, buf) \ +do { \ + qdf_nbuf_unmap(soc->osdev, buf, QDF_DMA_TO_DEVICE); \ + qdf_nbuf_free(buf); \ +} while (0) + +#define OCB_HEADER_VERSION 1 + +/** + * struct dp_tx_frag_info_s + * @vaddr: hlos vritual address for buffer + * @paddr_lo: physical address lower 32bits + * @paddr_hi: physical address higher bits + * @len: length of the buffer + */ +struct dp_tx_frag_info_s { + uint8_t *vaddr; + uint32_t paddr_lo; + uint16_t paddr_hi; + uint16_t len; +}; + +/** + * struct dp_tx_seg_info_s - Segmentation Descriptor + * @nbuf: NBUF pointer if segment corresponds to separate nbuf + * @frag_cnt: Fragment count in this segment + * @total_len: Total length of segment + * @frags: per-Fragment information + * @next: pointer to next MSDU segment + */ +struct dp_tx_seg_info_s { + qdf_nbuf_t nbuf; + uint16_t frag_cnt; + uint16_t total_len; + struct dp_tx_frag_info_s frags[DP_TX_MAX_NUM_FRAGS]; + struct dp_tx_seg_info_s *next; +}; + +/** + * struct dp_tx_sg_info_s - Scatter Gather Descriptor + * @num_segs: Number of segments (TSO/ME) in the frame + * @total_len: Total length of the frame + * @curr_seg: Points to current segment descriptor to be processed. Chain of + * descriptors for SG frames/multicast-unicast converted packets. + * + * Used for SG (802.3 or Raw) frames and Multicast-Unicast converted frames to + * carry fragmentation information + * Raw Frames will be handed over to driver as an SKB chain with MPDU boundaries + * indicated through flags in SKB CB (first_msdu and last_msdu). This will be + * converted into set of skb sg (nr_frags) structures. + */ +struct dp_tx_sg_info_s { + uint32_t num_segs; + uint32_t total_len; + struct dp_tx_seg_info_s *curr_seg; +}; + +/** + * struct dp_tx_queue - Tx queue + * @desc_pool_id: Descriptor Pool to be used for the tx queue + * @ring_id: TCL descriptor ring ID corresponding to the tx queue + * + * Tx queue contains information of the software (Descriptor pool) + * and hardware resources (TCL ring id) to be used for a particular + * transmit queue (obtained from skb_queue_mapping in case of linux) + */ +struct dp_tx_queue { + uint8_t desc_pool_id; + uint8_t ring_id; +}; + +/** + * struct dp_tx_msdu_info_s - MSDU Descriptor + * @frm_type: Frame type - Regular/TSO/SG/Multicast enhancement + * @tx_queue: Tx queue on which this MSDU should be transmitted + * @num_seg: Number of segments (TSO) + * @tid: TID (override) that is sent from HLOS + * @u.tso_info: TSO information for TSO frame types + * (chain of the TSO segments, number of segments) + * @u.sg_info: Scatter Gather information for non-TSO SG frames + * @meta_data: Mesh meta header information + * @exception_fw: Duplicate frame to be sent to firmware + * + * This structure holds the complete MSDU information needed to program the + * Hardware TCL and MSDU extension descriptors for different frame types + * + */ +struct dp_tx_msdu_info_s { + enum dp_tx_frm_type frm_type; + struct dp_tx_queue tx_queue; + uint32_t num_seg; + uint8_t tid; + union { + struct qdf_tso_info_t tso_info; + struct dp_tx_sg_info_s sg_info; + } u; + uint32_t meta_data[6]; + uint8_t exception_fw; +}; + +QDF_STATUS dp_tx_vdev_attach(struct dp_vdev *vdev); +QDF_STATUS dp_tx_vdev_detach(struct dp_vdev *vdev); +void dp_tx_vdev_update_search_flags(struct dp_vdev *vdev); + +QDF_STATUS dp_tx_soc_attach(struct dp_soc *soc); +QDF_STATUS dp_tx_soc_detach(struct dp_soc *soc); + +QDF_STATUS dp_tx_pdev_detach(struct dp_pdev *pdev); +QDF_STATUS dp_tx_pdev_attach(struct dp_pdev *pdev); + +qdf_nbuf_t dp_tx_send(void *data_vdev, qdf_nbuf_t nbuf); +qdf_nbuf_t dp_tx_send_exception(void *data_vdev, qdf_nbuf_t nbuf, + struct cdp_tx_exception_metadata *tx_exc); +qdf_nbuf_t dp_tx_send_mesh(void *data_vdev, qdf_nbuf_t nbuf); + +#ifdef CONVERGED_TDLS_ENABLE +qdf_nbuf_t dp_tx_non_std(struct cdp_vdev *vdev_handle, + enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list); +#endif + +uint32_t dp_tx_comp_handler(struct dp_soc *soc, void *hal_srng, uint32_t quota); + +QDF_STATUS +dp_tx_prepare_send_me(struct dp_vdev *vdev, qdf_nbuf_t nbuf); + +#ifndef CONVERGED_TDLS_ENABLE + +static inline void dp_tx_update_tdls_flags(struct dp_tx_desc_s *tx_desc) +{ + return; +} + +static inline void dp_non_std_tx_comp_free_buff(struct dp_tx_desc_s *tx_desc, + struct dp_vdev *vdev) +{ + return; +} + +#endif + + + +#ifdef FEATURE_WDS +void dp_tx_mec_handler(struct dp_vdev *vdev, uint8_t *status); +#else +static inline void dp_tx_mec_handler(struct dp_vdev *vdev, uint8_t *status) +{ + return; +} +#endif + +#ifdef ATH_SUPPORT_IQUE +void dp_tx_me_exit(struct dp_pdev *pdev); +#else +static inline void dp_tx_me_exit(struct dp_pdev *pdev) +{ + return; +} +#endif + +#ifdef FEATURE_PERPKT_INFO +QDF_STATUS +dp_get_completion_indication_for_stack(struct dp_soc *soc, struct dp_pdev *pdev, + uint16_t peer_id, uint32_t ppdu_id, uint8_t first_msdu, + uint8_t last_msdu, qdf_nbuf_t netbuf); + +void dp_send_completion_to_stack(struct dp_soc *soc, struct dp_pdev *pdev, + uint16_t peer_id, uint32_t ppdu_id, + qdf_nbuf_t netbuf); +#endif + +#ifdef ATH_TX_PRI_OVERRIDE +#define DP_TX_TID_OVERRIDE(_msdu_info, _nbuf) \ + ((_msdu_info)->tid = qdf_nbuf_get_priority(_nbuf)) +#else +#define DP_TX_TID_OVERRIDE(_msdu_info, _nbuf) +#endif + +#ifdef ATH_RX_PRI_SAVE +#define DP_RX_TID_SAVE(_nbuf, _tid) \ + (qdf_nbuf_set_priority(_nbuf, _tid)) +#else +#define DP_RX_TID_SAVE(_nbuf, _tid) +#endif + +/* TODO TX_FEATURE_NOT_YET */ +static inline void dp_tx_comp_process_exception(struct dp_tx_desc_s *tx_desc) +{ + return; +} +/* TODO TX_FEATURE_NOT_YET */ +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_tx_desc.c b/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_tx_desc.c new file mode 100644 index 0000000000000000000000000000000000000000..8ae7065254462122367be6eac39936112519b4cb --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_tx_desc.c @@ -0,0 +1,533 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "dp_types.h" +#include "dp_tx_desc.h" + +#ifndef DESC_PARTITION +#define DP_TX_DESC_SIZE(a) qdf_get_pwr2(a) +#define DP_TX_DESC_PAGE_DIVIDER(soc, num_desc_per_page, pool_id) \ +do { \ + uint8_t sig_bit; \ + soc->tx_desc[pool_id].offset_filter = num_desc_per_page - 1; \ + /* Calculate page divider to find page number */ \ + sig_bit = 0; \ + while (num_desc_per_page) { \ + sig_bit++; \ + num_desc_per_page = num_desc_per_page >> 1; \ + } \ + soc->tx_desc[pool_id].page_divider = (sig_bit - 1); \ +} while (0) +#else +#define DP_TX_DESC_SIZE(a) a +#define DP_TX_DESC_PAGE_DIVIDER(soc, num_desc_per_page, pool_id) {} +#endif /* DESC_PARTITION */ + +/** + * dp_tx_desc_pool_counter_initialize() - Initialize counters + * @tx_desc_pool Handle to DP tx_desc_pool structure + * @num_elem Number of descriptor elements per pool + * + * Return: None + */ +#ifdef QCA_LL_TX_FLOW_CONTROL_V2 +static void +dp_tx_desc_pool_counter_initialize(struct dp_tx_desc_pool_s *tx_desc_pool, + uint16_t num_elem) +{ +} +#else +static void +dp_tx_desc_pool_counter_initialize(struct dp_tx_desc_pool_s *tx_desc_pool, + uint16_t num_elem) +{ + tx_desc_pool->num_free = num_elem; + tx_desc_pool->num_allocated = 0; +} +#endif + +/** + * dp_tx_desc_pool_alloc() - Allocate Tx Descriptor pool(s) + * @soc Handle to DP SoC structure + * @num_pool Number of pools to allocate + * @num_elem Number of descriptor elements per pool + * + * This function allocates memory for SW tx descriptors + * (used within host for tx data path). + * The number of tx descriptors required will be large + * since based on number of clients (1024 clients x 3 radios), + * outstanding MSDUs stored in TQM queues and LMAC queues will be significantly + * large. + * + * To avoid allocating a large contiguous memory, it uses multi_page_alloc qdf + * function to allocate memory + * in multiple pages. It then iterates through the memory allocated across pages + * and links each descriptor + * to next descriptor, taking care of page boundaries. + * + * Since WiFi 3.0 HW supports multiple Tx rings, multiple pools are allocated, + * one for each ring; + * This minimizes lock contention when hard_start_xmit is called + * from multiple CPUs. + * Alternately, multiple pools can be used for multiple VDEVs for VDEV level + * flow control. + * + * Return: Status code. 0 for success. + */ +QDF_STATUS dp_tx_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id, + uint16_t num_elem) +{ + uint32_t id, count, page_id, offset, pool_id_32; + uint16_t num_page, num_desc_per_page; + struct dp_tx_desc_s *tx_desc_elem; + uint32_t desc_size; + struct dp_tx_desc_pool_s *tx_desc_pool = &((soc)->tx_desc[(pool_id)]); + + desc_size = DP_TX_DESC_SIZE(sizeof(*tx_desc_elem)); + tx_desc_pool->elem_size = desc_size; + qdf_mem_multi_pages_alloc(soc->osdev, + &tx_desc_pool->desc_pages, desc_size, num_elem, + 0, true); + if (!tx_desc_pool->desc_pages.num_pages) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "Multi page alloc fail, tx desc"); + goto fail_exit; + } + + + num_page = tx_desc_pool->desc_pages.num_pages; + num_desc_per_page = + tx_desc_pool->desc_pages.num_element_per_page; + tx_desc_pool->freelist = (struct dp_tx_desc_s *) + *tx_desc_pool->desc_pages.cacheable_pages; + if (qdf_mem_multi_page_link(soc->osdev, + &tx_desc_pool->desc_pages, desc_size, num_elem, true)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "invalid tx desc allocation - overflow num link"); + goto free_tx_desc; + } + + /* Set unique IDs for each Tx descriptor */ + tx_desc_elem = tx_desc_pool->freelist; + count = 0; + pool_id_32 = (uint32_t)pool_id; + while (tx_desc_elem) { + page_id = count / num_desc_per_page; + offset = count % num_desc_per_page; + id = ((pool_id_32 << DP_TX_DESC_ID_POOL_OS) | + (page_id << DP_TX_DESC_ID_PAGE_OS) | offset); + + tx_desc_elem->id = id; + tx_desc_elem->pool_id = pool_id; + tx_desc_elem = tx_desc_elem->next; + count++; + } + + dp_tx_desc_pool_counter_initialize(tx_desc_pool, num_elem); + TX_DESC_LOCK_CREATE(&tx_desc_pool->lock); + return QDF_STATUS_SUCCESS; + +free_tx_desc: + qdf_mem_multi_pages_free(soc->osdev, + &tx_desc_pool->desc_pages, 0, true); + +fail_exit: + return QDF_STATUS_E_FAULT; +} + +/** + * dp_tx_desc_pool_free() - Free the memory pool allocated for Tx Descriptors + * + * @soc Handle to DP SoC structure + * @pool_id + * + * Return: + */ +QDF_STATUS dp_tx_desc_pool_free(struct dp_soc *soc, uint8_t pool_id) +{ + struct dp_tx_desc_pool_s *tx_desc_pool = + &((soc)->tx_desc[(pool_id)]); + + qdf_mem_multi_pages_free(soc->osdev, + &tx_desc_pool->desc_pages, 0, true); + TX_DESC_LOCK_DESTROY(&tx_desc_pool->lock); + TX_DESC_POOL_MEMBER_CLEAN(tx_desc_pool); + return QDF_STATUS_SUCCESS; +} + +/** + * dp_tx_ext_desc_pool_alloc() - Allocate tx ext descriptor pool + * @soc Handle to DP SoC structure + * @pool_id + * + * Return: NONE + */ +QDF_STATUS dp_tx_ext_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id, + uint16_t num_elem) +{ + uint16_t num_page; + uint32_t count; + struct dp_tx_ext_desc_elem_s *c_elem, *p_elem; + struct qdf_mem_dma_page_t *page_info; + struct qdf_mem_multi_page_t *pages; + QDF_STATUS status; + + /* Coherent tx extension descriptor alloc */ + soc->tx_ext_desc[pool_id].elem_size = HAL_TX_EXT_DESC_WITH_META_DATA; + soc->tx_ext_desc[pool_id].elem_count = num_elem; + qdf_mem_multi_pages_alloc(soc->osdev, + &soc->tx_ext_desc[pool_id].desc_pages, + soc->tx_ext_desc[pool_id].elem_size, + soc->tx_ext_desc[pool_id].elem_count, + qdf_get_dma_mem_context((&soc->tx_ext_desc[pool_id]), memctx), + false); + if (!soc->tx_ext_desc[pool_id].desc_pages.num_pages) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "ext desc page alloc fail"); + status = QDF_STATUS_E_NOMEM; + goto fail_exit; + } + + num_page = soc->tx_ext_desc[pool_id].desc_pages.num_pages; + /* + * Cacheable ext descriptor link alloc + * This structure also large size already + * single element is 24bytes, 2K elements are 48Kbytes + * Have to alloc multi page cacheable memory + */ + soc->tx_ext_desc[pool_id].link_elem_size = + sizeof(struct dp_tx_ext_desc_elem_s); + qdf_mem_multi_pages_alloc(soc->osdev, + &soc->tx_ext_desc[pool_id].desc_link_pages, + soc->tx_ext_desc[pool_id].link_elem_size, + soc->tx_ext_desc[pool_id].elem_count, 0, + true); + if (!soc->tx_ext_desc[pool_id].desc_link_pages.num_pages) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "ext link desc page alloc fail"); + status = QDF_STATUS_E_NOMEM; + goto free_ext_desc_page; + } + + /* link tx descriptors into a freelist */ + soc->tx_ext_desc[pool_id].freelist = (struct dp_tx_ext_desc_elem_s *) + *soc->tx_ext_desc[pool_id].desc_link_pages.cacheable_pages; + if (qdf_mem_multi_page_link(soc->osdev, + &soc->tx_ext_desc[pool_id].desc_link_pages, + soc->tx_ext_desc[pool_id].link_elem_size, + soc->tx_ext_desc[pool_id].elem_count, true)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "ext link desc page linking fail"); + status = QDF_STATUS_E_FAULT; + goto free_ext_link_desc_page; + } + + /* Assign coherent memory pointer into linked free list */ + pages = &soc->tx_ext_desc[pool_id].desc_pages; + page_info = soc->tx_ext_desc[pool_id].desc_pages.dma_pages; + c_elem = soc->tx_ext_desc[pool_id].freelist; + p_elem = c_elem; + for (count = 0; count < soc->tx_ext_desc[pool_id].elem_count; count++) { + if (!(count % pages->num_element_per_page)) { + /** + * First element for new page, + * should point next page + */ + if (!pages->dma_pages->page_v_addr_start) { + QDF_TRACE(QDF_MODULE_ID_DP, + QDF_TRACE_LEVEL_ERROR, + "link over flow"); + status = QDF_STATUS_E_FAULT; + goto free_ext_link_desc_page; + } + c_elem->vaddr = (void *)page_info->page_v_addr_start; + c_elem->paddr = page_info->page_p_addr; + page_info++; + } else { + c_elem->vaddr = (void *)(p_elem->vaddr + + soc->tx_ext_desc[pool_id].elem_size); + c_elem->paddr = (p_elem->paddr + + soc->tx_ext_desc[pool_id].elem_size); + } + p_elem = c_elem; + c_elem = c_elem->next; + if (!c_elem) + break; + } + + soc->tx_ext_desc[pool_id].num_free = num_elem; + qdf_spinlock_create(&soc->tx_ext_desc[pool_id].lock); + return QDF_STATUS_SUCCESS; + +free_ext_link_desc_page: + qdf_mem_multi_pages_free(soc->osdev, + &soc->tx_ext_desc[pool_id].desc_link_pages, 0, true); + +free_ext_desc_page: + qdf_mem_multi_pages_free(soc->osdev, + &soc->tx_ext_desc[pool_id].desc_pages, + qdf_get_dma_mem_context((&soc->tx_ext_desc[pool_id]), memctx), + false); + +fail_exit: + return status; + +} + +/** + * dp_tx_ext_desc_pool_free() - free tx ext descriptor pool + * @soc: Handle to DP SoC structure + * @pool_id: extension descriptor pool id + * + * Return: NONE + */ +QDF_STATUS dp_tx_ext_desc_pool_free(struct dp_soc *soc, uint8_t pool_id) +{ + qdf_mem_multi_pages_free(soc->osdev, + &soc->tx_ext_desc[pool_id].desc_link_pages, 0, true); + + qdf_mem_multi_pages_free(soc->osdev, + &soc->tx_ext_desc[pool_id].desc_pages, + qdf_get_dma_mem_context((&soc->tx_ext_desc[pool_id]), memctx), + false); + + qdf_spinlock_destroy(&soc->tx_ext_desc[pool_id].lock); + return QDF_STATUS_SUCCESS; +} + +/** + * dp_tx_tso_desc_pool_alloc() - allocate tx tso descriptor pool + * @soc: Handle to DP SoC structure + * @pool_id: tso descriptor pool id + * @num_elem: number of element + * + * Return: QDF_STATUS_SUCCESS + */ +#if defined(FEATURE_TSO) +QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id, + uint16_t num_elem) +{ + int i; + struct qdf_tso_seg_elem_t *c_element; + struct qdf_tso_seg_elem_t *temp; + + soc->tx_tso_desc[pool_id].num_free = 0; + c_element = qdf_mem_malloc(sizeof(struct qdf_tso_seg_elem_t)); + + if (!c_element) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + FL("Alloc Failed %pK pool_id %d"), + soc, pool_id); + return QDF_STATUS_E_NOMEM; + } + + soc->tx_tso_desc[pool_id].freelist = c_element; + soc->tx_tso_desc[pool_id].num_free++; + for (i = 0; i < (num_elem - 1); i++) { + c_element->next = + qdf_mem_malloc(sizeof(struct qdf_tso_seg_elem_t)); + if (!c_element->next) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + FL("Alloc Failed %pK pool_id %d"), + soc, pool_id); + goto fail; + } + + soc->tx_tso_desc[pool_id].num_free++; + c_element = c_element->next; + c_element->next = NULL; + + } + TSO_DEBUG("Number of free descriptors: %u\n", + soc->tx_tso_desc[pool_id].num_free); + soc->tx_tso_desc[pool_id].pool_size = num_elem; + qdf_spinlock_create(&soc->tx_tso_desc[pool_id].lock); + + return QDF_STATUS_SUCCESS; + +fail: + c_element = soc->tx_tso_desc[pool_id].freelist; + while (c_element) { + temp = c_element->next; + qdf_mem_free(c_element); + c_element = temp; + } + + return QDF_STATUS_E_NOMEM; +} + +/** + * dp_tx_tso_desc_pool_free() - free tx tso descriptor pool + * @soc: Handle to DP SoC structure + * @pool_id: extension descriptor pool id + * + * Return: NONE + */ +void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t pool_id) +{ + int i; + struct qdf_tso_seg_elem_t *c_element; + struct qdf_tso_seg_elem_t *temp; + + qdf_spin_lock_bh(&soc->tx_tso_desc[pool_id].lock); + c_element = soc->tx_tso_desc[pool_id].freelist; + + if (!c_element) { + qdf_spin_unlock_bh(&soc->tx_tso_desc[pool_id].lock); + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("Desc Pool Corrupt %d"), pool_id); + return; + } + + for (i = 0; i < soc->tx_tso_desc[pool_id].pool_size; i++) { + temp = c_element->next; + qdf_mem_free(c_element); + c_element = temp; + if (!c_element) + break; + } + + soc->tx_tso_desc[pool_id].freelist = NULL; + soc->tx_tso_desc[pool_id].num_free = 0; + soc->tx_tso_desc[pool_id].pool_size = 0; + qdf_spin_unlock_bh(&soc->tx_tso_desc[pool_id].lock); + qdf_spinlock_destroy(&soc->tx_tso_desc[pool_id].lock); + return; +} +/** + * dp_tx_tso_num_seg_pool_alloc() - Allocate descriptors that tracks the + * fragments in each tso segment + * + * @soc: handle to dp soc structure + * @pool_id: descriptor pool id + * @num_elem: total number of descriptors to be allocated + */ +QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t pool_id, + uint16_t num_elem) +{ + + int i; + struct qdf_tso_num_seg_elem_t *c_element; + struct qdf_tso_num_seg_elem_t *temp; + + soc->tx_tso_num_seg[pool_id].num_free = 0; + c_element = qdf_mem_malloc(sizeof(struct qdf_tso_num_seg_elem_t)); + + if (!c_element) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + FL("Alloc Failed %pK pool_id %d"), + soc, pool_id); + return QDF_STATUS_E_NOMEM; + } + + soc->tx_tso_num_seg[pool_id].freelist = c_element; + soc->tx_tso_num_seg[pool_id].num_free++; + for (i = 0; i < (num_elem - 1); i++) { + c_element->next = + qdf_mem_malloc(sizeof(struct qdf_tso_num_seg_elem_t)); + + if (!c_element->next) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + FL("Alloc Failed %pK pool_id %d"), + soc, pool_id); + goto fail; + } + soc->tx_tso_num_seg[pool_id].num_free++; + + c_element = c_element->next; + c_element->next = NULL; + } + + soc->tx_tso_num_seg[pool_id].num_seg_pool_size = num_elem; + qdf_spinlock_create(&soc->tx_tso_num_seg[pool_id].lock); + + return QDF_STATUS_SUCCESS; + +fail: + c_element = soc->tx_tso_num_seg[pool_id].freelist; + while (c_element) { + temp = c_element->next; + qdf_mem_free(c_element); + c_element = temp; + } + return QDF_STATUS_E_NOMEM; +} + +/** + * dp_tx_tso_num_seg_pool_free() - free pool of descriptors that tracks + * the fragments in tso segment + * + * + * @soc: handle to dp soc structure + * @pool_id: descriptor pool_id + */ +void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t pool_id) +{ + int i; + struct qdf_tso_num_seg_elem_t *c_element; + struct qdf_tso_num_seg_elem_t *temp; + + qdf_spin_lock_bh(&soc->tx_tso_num_seg[pool_id].lock); + c_element = soc->tx_tso_num_seg[pool_id].freelist; + + if (!c_element) { + qdf_spin_unlock_bh(&soc->tx_tso_num_seg[pool_id].lock); + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("Desc Pool Corrupt %d"), pool_id); + return; + } + + for (i = 0; i < soc->tx_tso_num_seg[pool_id].num_seg_pool_size; i++) { + temp = c_element->next; + qdf_mem_free(c_element); + c_element = temp; + if (!c_element) + break; + } + + soc->tx_tso_num_seg[pool_id].freelist = NULL; + soc->tx_tso_num_seg[pool_id].num_free = 0; + soc->tx_tso_num_seg[pool_id].num_seg_pool_size = 0; + qdf_spin_unlock_bh(&soc->tx_tso_num_seg[pool_id].lock); + qdf_spinlock_destroy(&soc->tx_tso_num_seg[pool_id].lock); + return; +} + +#else +QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id, + uint16_t num_elem) +{ + return QDF_STATUS_SUCCESS; +} + +void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t pool_id) +{ + return; +} + +QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t pool_id, + uint16_t num_elem) +{ + return QDF_STATUS_SUCCESS; +} + +void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t pool_id) +{ + return; +} +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_tx_desc.h b/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_tx_desc.h new file mode 100644 index 0000000000000000000000000000000000000000..9d7c0a1d5d5c0019e6b69a0a35ba4573c2b2ee92 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_tx_desc.h @@ -0,0 +1,598 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef DP_TX_DESC_H +#define DP_TX_DESC_H + +#include "dp_types.h" +#include "dp_tx.h" +#include "dp_internal.h" + +#ifdef TX_PER_PDEV_DESC_POOL +#ifdef QCA_LL_TX_FLOW_CONTROL_V2 +#define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->vdev_id) +#else /* QCA_LL_TX_FLOW_CONTROL_V2 */ +#define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->pdev->pdev_id) +#endif /* QCA_LL_TX_FLOW_CONTROL_V2 */ + #define DP_TX_GET_RING_ID(vdev) (vdev->pdev->pdev_id) +#else + #ifdef TX_PER_VDEV_DESC_POOL + #define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->vdev_id) + #define DP_TX_GET_RING_ID(vdev) (vdev->pdev->pdev_id) + #endif /* TX_PER_VDEV_DESC_POOL */ +#endif /* TX_PER_PDEV_DESC_POOL */ + +/** + * 21 bits cookie + * 2 bits pool id 0 ~ 3, + * 10 bits page id 0 ~ 1023 + * 5 bits offset id 0 ~ 31 (Desc size = 128, Num descs per page = 4096/128 = 32) + */ +/* ???Ring ID needed??? */ +#define DP_TX_DESC_ID_POOL_MASK 0x018000 +#define DP_TX_DESC_ID_POOL_OS 15 +#define DP_TX_DESC_ID_PAGE_MASK 0x007FE0 +#define DP_TX_DESC_ID_PAGE_OS 5 +#define DP_TX_DESC_ID_OFFSET_MASK 0x00001F +#define DP_TX_DESC_ID_OFFSET_OS 0 + +#ifdef QCA_LL_TX_FLOW_CONTROL_V2 +#define TX_DESC_LOCK_CREATE(lock) +#define TX_DESC_LOCK_DESTROY(lock) +#define TX_DESC_LOCK_LOCK(lock) +#define TX_DESC_LOCK_UNLOCK(lock) +#define TX_DESC_POOL_MEMBER_CLEAN(_tx_desc_pool) \ +do { \ + (_tx_desc_pool)->elem_size = 0; \ + (_tx_desc_pool)->freelist = NULL; \ + (_tx_desc_pool)->pool_size = 0; \ + (_tx_desc_pool)->avail_desc = 0; \ + (_tx_desc_pool)->start_th = 0; \ + (_tx_desc_pool)->stop_th = 0; \ + (_tx_desc_pool)->status = FLOW_POOL_INACTIVE; \ +} while (0) +#else /* !QCA_LL_TX_FLOW_CONTROL_V2 */ +#define TX_DESC_LOCK_CREATE(lock) qdf_spinlock_create(lock) +#define TX_DESC_LOCK_DESTROY(lock) qdf_spinlock_destroy(lock) +#define TX_DESC_LOCK_LOCK(lock) qdf_spin_lock_bh(lock) +#define TX_DESC_LOCK_UNLOCK(lock) qdf_spin_unlock_bh(lock) +#define TX_DESC_POOL_MEMBER_CLEAN(_tx_desc_pool) \ +do { \ + (_tx_desc_pool)->elem_size = 0; \ + (_tx_desc_pool)->num_allocated = 0; \ + (_tx_desc_pool)->freelist = NULL; \ + (_tx_desc_pool)->elem_count = 0; \ + (_tx_desc_pool)->num_free = 0; \ +} while (0) +#endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */ +#define MAX_POOL_BUFF_COUNT 10000 + +QDF_STATUS dp_tx_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id, + uint16_t num_elem); +QDF_STATUS dp_tx_desc_pool_free(struct dp_soc *soc, uint8_t pool_id); +QDF_STATUS dp_tx_ext_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id, + uint16_t num_elem); +QDF_STATUS dp_tx_ext_desc_pool_free(struct dp_soc *soc, uint8_t pool_id); +QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id, + uint16_t num_elem); +void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t pool_id); +QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t pool_id, + uint16_t num_elem); +void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t pool_id); + +#ifdef QCA_LL_TX_FLOW_CONTROL_V2 +void dp_tx_flow_control_init(struct dp_soc *); +void dp_tx_flow_control_deinit(struct dp_soc *); + +QDF_STATUS dp_txrx_register_pause_cb(struct cdp_soc_t *soc, + tx_pause_callback pause_cb); +QDF_STATUS dp_tx_flow_pool_map(struct cdp_soc_t *soc, struct cdp_pdev *pdev, + uint8_t vdev_id); +void dp_tx_flow_pool_unmap(struct cdp_soc_t *soc, struct cdp_pdev *pdev, + uint8_t vdev_id); +void dp_tx_clear_flow_pool_stats(struct dp_soc *soc); +struct dp_tx_desc_pool_s *dp_tx_create_flow_pool(struct dp_soc *soc, + uint8_t flow_pool_id, uint16_t flow_pool_size); + +QDF_STATUS dp_tx_flow_pool_map_handler(struct dp_pdev *pdev, uint8_t flow_id, + uint8_t flow_type, uint8_t flow_pool_id, uint16_t flow_pool_size); +void dp_tx_flow_pool_unmap_handler(struct dp_pdev *pdev, uint8_t flow_id, + uint8_t flow_type, uint8_t flow_pool_id); + +/** + * dp_tx_get_desc_flow_pool() - get descriptor from flow pool + * @pool: flow pool + * + * Caller needs to take lock and do sanity checks. + * + * Return: tx descriptor + */ +static inline +struct dp_tx_desc_s *dp_tx_get_desc_flow_pool(struct dp_tx_desc_pool_s *pool) +{ + struct dp_tx_desc_s *tx_desc = pool->freelist; + + pool->freelist = pool->freelist->next; + pool->avail_desc--; + return tx_desc; +} + +/** + * ol_tx_put_desc_flow_pool() - put descriptor to flow pool freelist + * @pool: flow pool + * @tx_desc: tx descriptor + * + * Caller needs to take lock and do sanity checks. + * + * Return: none + */ +static inline +void dp_tx_put_desc_flow_pool(struct dp_tx_desc_pool_s *pool, + struct dp_tx_desc_s *tx_desc) +{ + tx_desc->next = pool->freelist; + pool->freelist = tx_desc; + pool->avail_desc++; +} + + +/** + * dp_tx_desc_alloc() - Allocate a Software Tx Descriptor from given pool + * + * @soc Handle to DP SoC structure + * @pool_id + * + * Return: + */ +static inline struct dp_tx_desc_s * +dp_tx_desc_alloc(struct dp_soc *soc, uint8_t desc_pool_id) +{ + struct dp_tx_desc_s *tx_desc = NULL; + struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id]; + + if (pool) { + qdf_spin_lock_bh(&pool->flow_pool_lock); + if (pool->status <= FLOW_POOL_ACTIVE_PAUSED && + pool->avail_desc) { + tx_desc = dp_tx_get_desc_flow_pool(pool); + tx_desc->pool_id = desc_pool_id; + tx_desc->flags = DP_TX_DESC_FLAG_ALLOCATED; + if (qdf_unlikely(pool->avail_desc < pool->stop_th)) { + pool->status = FLOW_POOL_ACTIVE_PAUSED; + qdf_spin_unlock_bh(&pool->flow_pool_lock); + /* pause network queues */ + soc->pause_cb(desc_pool_id, + WLAN_STOP_ALL_NETIF_QUEUE, + WLAN_DATA_FLOW_CONTROL); + } else { + qdf_spin_unlock_bh(&pool->flow_pool_lock); + } + } else { + pool->pkt_drop_no_desc++; + qdf_spin_unlock_bh(&pool->flow_pool_lock); + } + } else { + soc->pool_stats.pkt_drop_no_pool++; + } + + + return tx_desc; +} + +/** + * dp_tx_desc_free() - Fee a tx descriptor and attach it to free list + * + * @soc Handle to DP SoC structure + * @pool_id + * @tx_desc + * + * Return: None + */ +static inline void +dp_tx_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc, + uint8_t desc_pool_id) +{ + struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id]; + + qdf_spin_lock_bh(&pool->flow_pool_lock); + tx_desc->flags = 0; + dp_tx_put_desc_flow_pool(pool, tx_desc); + switch (pool->status) { + case FLOW_POOL_ACTIVE_PAUSED: + if (pool->avail_desc > pool->start_th) { + soc->pause_cb(pool->flow_pool_id, + WLAN_WAKE_ALL_NETIF_QUEUE, + WLAN_DATA_FLOW_CONTROL); + pool->status = FLOW_POOL_ACTIVE_UNPAUSED; + } + break; + case FLOW_POOL_INVALID: + if (pool->avail_desc == pool->pool_size) { + dp_tx_desc_pool_free(soc, desc_pool_id); + qdf_spin_unlock_bh(&pool->flow_pool_lock); + qdf_print("%s %d pool is freed!!\n", + __func__, __LINE__); + return; + } + break; + + case FLOW_POOL_ACTIVE_UNPAUSED: + break; + default: + qdf_print("%s %d pool is INACTIVE State!!\n", + __func__, __LINE__); + break; + }; + + qdf_spin_unlock_bh(&pool->flow_pool_lock); + +} +#else /* QCA_LL_TX_FLOW_CONTROL_V2 */ + +static inline void dp_tx_flow_control_init(struct dp_soc *handle) +{ +} + +static inline void dp_tx_flow_control_deinit(struct dp_soc *handle) +{ +} + +static inline QDF_STATUS dp_tx_flow_pool_map_handler(struct dp_pdev *pdev, + uint8_t flow_id, uint8_t flow_type, uint8_t flow_pool_id, + uint16_t flow_pool_size) +{ + return QDF_STATUS_SUCCESS; +} + +static inline void dp_tx_flow_pool_unmap_handler(struct dp_pdev *pdev, + uint8_t flow_id, uint8_t flow_type, uint8_t flow_pool_id) +{ +} + +/** + * dp_tx_desc_alloc() - Allocate a Software Tx Descriptor from given pool + * + * @param soc Handle to DP SoC structure + * @param pool_id + * + * Return: + */ +static inline struct dp_tx_desc_s *dp_tx_desc_alloc(struct dp_soc *soc, + uint8_t desc_pool_id) +{ + struct dp_tx_desc_s *tx_desc = NULL; + + TX_DESC_LOCK_LOCK(&soc->tx_desc[desc_pool_id].lock); + + tx_desc = soc->tx_desc[desc_pool_id].freelist; + + /* Pool is exhausted */ + if (!tx_desc) { + TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock); + return NULL; + } + + soc->tx_desc[desc_pool_id].freelist = + soc->tx_desc[desc_pool_id].freelist->next; + soc->tx_desc[desc_pool_id].num_allocated++; + soc->tx_desc[desc_pool_id].num_free--; + + tx_desc->flags = DP_TX_DESC_FLAG_ALLOCATED; + + TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock); + + return tx_desc; +} + +/** + * dp_tx_desc_alloc_multiple() - Allocate batch of software Tx Descriptors + * from given pool + * @soc: Handle to DP SoC structure + * @pool_id: pool id should pick up + * @num_requested: number of required descriptor + * + * allocate multiple tx descriptor and make a link + * + * Return: h_desc first descriptor pointer + */ +static inline struct dp_tx_desc_s *dp_tx_desc_alloc_multiple( + struct dp_soc *soc, uint8_t desc_pool_id, uint8_t num_requested) +{ + struct dp_tx_desc_s *c_desc = NULL, *h_desc = NULL; + uint8_t count; + + TX_DESC_LOCK_LOCK(&soc->tx_desc[desc_pool_id].lock); + + if ((num_requested == 0) || + (soc->tx_desc[desc_pool_id].num_free < num_requested)) { + TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock); + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "%s, No Free Desc: Available(%d) num_requested(%d)", + __func__, soc->tx_desc[desc_pool_id].num_free, + num_requested); + return NULL; + } + + h_desc = soc->tx_desc[desc_pool_id].freelist; + + /* h_desc should never be NULL since num_free > requested */ + qdf_assert_always(h_desc); + + c_desc = h_desc; + for (count = 0; count < (num_requested - 1); count++) { + c_desc->flags = DP_TX_DESC_FLAG_ALLOCATED; + c_desc = c_desc->next; + } + soc->tx_desc[desc_pool_id].num_free -= count; + soc->tx_desc[desc_pool_id].num_allocated += count; + soc->tx_desc[desc_pool_id].freelist = c_desc->next; + c_desc->next = NULL; + + TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock); + return h_desc; +} + +/** + * dp_tx_desc_free() - Fee a tx descriptor and attach it to free list + * + * @soc Handle to DP SoC structure + * @pool_id + * @tx_desc + */ +static inline void +dp_tx_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc, + uint8_t desc_pool_id) +{ + TX_DESC_LOCK_LOCK(&soc->tx_desc[desc_pool_id].lock); + + tx_desc->flags = 0; + tx_desc->next = soc->tx_desc[desc_pool_id].freelist; + soc->tx_desc[desc_pool_id].freelist = tx_desc; + soc->tx_desc[desc_pool_id].num_allocated--; + soc->tx_desc[desc_pool_id].num_free++; + + + TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock); +} +#endif /* QCA_LL_TX_FLOW_CONTROL_V2 */ + +/** + * dp_tx_desc_find() - find dp tx descriptor from cokie + * @soc - handle for the device sending the data + * @tx_desc_id - the ID of the descriptor in question + * @return the descriptor object that has the specified ID + * + * Use a tx descriptor ID to find the corresponding descriptor object. + * + */ +static inline struct dp_tx_desc_s *dp_tx_desc_find(struct dp_soc *soc, + uint8_t pool_id, uint16_t page_id, uint16_t offset) +{ + struct dp_tx_desc_pool_s *tx_desc_pool = &((soc)->tx_desc[(pool_id)]); + + return tx_desc_pool->desc_pages.cacheable_pages[page_id] + + tx_desc_pool->elem_size * offset; +} + +/** + * dp_tx_ext_desc_alloc() - Get tx extension descriptor from pool + * @soc: handle for the device sending the data + * @pool_id: target pool id + * + * Return: None + */ +static inline +struct dp_tx_ext_desc_elem_s *dp_tx_ext_desc_alloc(struct dp_soc *soc, + uint8_t desc_pool_id) +{ + struct dp_tx_ext_desc_elem_s *c_elem; + + qdf_spin_lock_bh(&soc->tx_ext_desc[desc_pool_id].lock); + if (soc->tx_ext_desc[desc_pool_id].num_free <= 0) { + qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock); + return NULL; + } + c_elem = soc->tx_ext_desc[desc_pool_id].freelist; + soc->tx_ext_desc[desc_pool_id].freelist = + soc->tx_ext_desc[desc_pool_id].freelist->next; + soc->tx_ext_desc[desc_pool_id].num_free--; + qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock); + return c_elem; +} + +/** + * dp_tx_ext_desc_free() - Release tx extension descriptor to the pool + * @soc: handle for the device sending the data + * @pool_id: target pool id + * @elem: ext descriptor pointer should release + * + * Return: None + */ +static inline void dp_tx_ext_desc_free(struct dp_soc *soc, + struct dp_tx_ext_desc_elem_s *elem, uint8_t desc_pool_id) +{ + qdf_spin_lock_bh(&soc->tx_ext_desc[desc_pool_id].lock); + elem->next = soc->tx_ext_desc[desc_pool_id].freelist; + soc->tx_ext_desc[desc_pool_id].freelist = elem; + soc->tx_ext_desc[desc_pool_id].num_free++; + qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock); + return; +} + +/** + * dp_tx_ext_desc_free_multiple() - Fee multiple tx extension descriptor and + * attach it to free list + * @soc: Handle to DP SoC structure + * @desc_pool_id: pool id should pick up + * @elem: tx descriptor should be freed + * @num_free: number of descriptors should be freed + * + * Return: none + */ +static inline void dp_tx_ext_desc_free_multiple(struct dp_soc *soc, + struct dp_tx_ext_desc_elem_s *elem, uint8_t desc_pool_id, + uint8_t num_free) +{ + struct dp_tx_ext_desc_elem_s *head, *tail, *c_elem; + uint8_t freed = num_free; + + /* caller should always guarantee atleast list of num_free nodes */ + qdf_assert_always(head); + + head = elem; + c_elem = head; + tail = head; + while (c_elem && freed) { + tail = c_elem; + c_elem = c_elem->next; + freed--; + } + + /* caller should always guarantee atleast list of num_free nodes */ + qdf_assert_always(tail); + + qdf_spin_lock_bh(&soc->tx_ext_desc[desc_pool_id].lock); + tail->next = soc->tx_ext_desc[desc_pool_id].freelist; + soc->tx_ext_desc[desc_pool_id].freelist = head; + soc->tx_ext_desc[desc_pool_id].num_free += num_free; + qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock); + + return; +} + +#if defined(FEATURE_TSO) +/** + * dp_tx_tso_desc_alloc() - function to allocate a TSO segment + * @soc: device soc instance + * @pool_id: pool id should pick up tso descriptor + * + * Allocates a TSO segment element from the free list held in + * the soc + * + * Return: tso_seg, tso segment memory pointer + */ +static inline struct qdf_tso_seg_elem_t *dp_tx_tso_desc_alloc( + struct dp_soc *soc, uint8_t pool_id) +{ + struct qdf_tso_seg_elem_t *tso_seg = NULL; + + qdf_spin_lock_bh(&soc->tx_tso_desc[pool_id].lock); + if (soc->tx_tso_desc[pool_id].freelist) { + soc->tx_tso_desc[pool_id].num_free--; + tso_seg = soc->tx_tso_desc[pool_id].freelist; + soc->tx_tso_desc[pool_id].freelist = + soc->tx_tso_desc[pool_id].freelist->next; + } + qdf_spin_unlock_bh(&soc->tx_tso_desc[pool_id].lock); + + return tso_seg; +} + +/** + * dp_tx_tso_desc_free() - function to free a TSO segment + * @soc: device soc instance + * @pool_id: pool id should pick up tso descriptor + * @tso_seg: tso segment memory pointer + * + * Returns a TSO segment element to the free list held in the + * HTT pdev + * + * Return: none + */ +static inline void dp_tx_tso_desc_free(struct dp_soc *soc, + uint8_t pool_id, struct qdf_tso_seg_elem_t *tso_seg) +{ + qdf_spin_lock_bh(&soc->tx_tso_desc[pool_id].lock); + tso_seg->next = soc->tx_tso_desc[pool_id].freelist; + soc->tx_tso_desc[pool_id].freelist = tso_seg; + soc->tx_tso_desc[pool_id].num_free++; + qdf_spin_unlock_bh(&soc->tx_tso_desc[pool_id].lock); +} + +static inline +struct qdf_tso_num_seg_elem_t *dp_tso_num_seg_alloc(struct dp_soc *soc, + uint8_t pool_id) +{ + struct qdf_tso_num_seg_elem_t *tso_num_seg = NULL; + + qdf_spin_lock_bh(&soc->tx_tso_num_seg[pool_id].lock); + if (soc->tx_tso_num_seg[pool_id].freelist) { + soc->tx_tso_num_seg[pool_id].num_free--; + tso_num_seg = soc->tx_tso_num_seg[pool_id].freelist; + soc->tx_tso_num_seg[pool_id].freelist = + soc->tx_tso_num_seg[pool_id].freelist->next; + } + qdf_spin_unlock_bh(&soc->tx_tso_num_seg[pool_id].lock); + + return tso_num_seg; +} + +static inline +void dp_tso_num_seg_free(struct dp_soc *soc, + uint8_t pool_id, struct qdf_tso_num_seg_elem_t *tso_num_seg) +{ + qdf_spin_lock_bh(&soc->tx_tso_num_seg[pool_id].lock); + tso_num_seg->next = soc->tx_tso_num_seg[pool_id].freelist; + soc->tx_tso_num_seg[pool_id].freelist = tso_num_seg; + soc->tx_tso_num_seg[pool_id].num_free++; + qdf_spin_unlock_bh(&soc->tx_tso_num_seg[pool_id].lock); +} +#endif + +/* + * dp_tx_me_alloc_buf() Alloc descriptor from me pool + * @pdev DP_PDEV handle for datapath + * + * Return:dp_tx_me_buf_t(buf) + */ +static inline struct dp_tx_me_buf_t* +dp_tx_me_alloc_buf(struct dp_pdev *pdev) +{ + struct dp_tx_me_buf_t *buf = NULL; + qdf_spin_lock_bh(&pdev->tx_mutex); + if (pdev->me_buf.freelist) { + buf = pdev->me_buf.freelist; + pdev->me_buf.freelist = pdev->me_buf.freelist->next; + pdev->me_buf.buf_in_use++; + } else { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "Error allocating memory in pool"); + qdf_spin_unlock_bh(&pdev->tx_mutex); + return NULL; + } + qdf_spin_unlock_bh(&pdev->tx_mutex); + return buf; +} + +/* + * dp_tx_me_free_buf() - Free me descriptor and add it to pool + * @pdev: DP_PDEV handle for datapath + * @buf : Allocated ME BUF + * + * Return:void + */ +static inline void +dp_tx_me_free_buf(struct dp_pdev *pdev, struct dp_tx_me_buf_t *buf) +{ + qdf_spin_lock_bh(&pdev->tx_mutex); + buf->next = pdev->me_buf.freelist; + pdev->me_buf.freelist = buf; + pdev->me_buf.buf_in_use--; + qdf_spin_unlock_bh(&pdev->tx_mutex); +} +#endif /* DP_TX_DESC_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_tx_flow_control.c b/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_tx_flow_control.c new file mode 100644 index 0000000000000000000000000000000000000000..e554a327b0cdfcc2b797ee726d5e23665ac3f4f5 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_tx_flow_control.c @@ -0,0 +1,442 @@ +/* + * Copyright (c) 2015-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include + +/* OS abstraction libraries */ +#include /* qdf_nbuf_t, etc. */ +#include /* qdf_atomic_read, etc. */ +#include /* qdf_unlikely */ +#include "dp_types.h" +#include "dp_tx_desc.h" + +#include +#include "dp_internal.h" +#define INVALID_FLOW_ID 0xFF +#define MAX_INVALID_BIN 3 + +/** + * dp_tx_dump_flow_pool_info() - dump global_pool and flow_pool info + * + * @ctx: Handle to struct dp_soc. + * + * Return: none + */ +void dp_tx_dump_flow_pool_info(void *ctx) +{ + struct dp_soc *soc = ctx; + struct dp_txrx_pool_stats *pool_stats = &soc->pool_stats; + struct dp_tx_desc_pool_s *pool = NULL; + struct dp_tx_desc_pool_s tmp_pool; + int i; + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "No of pool map received %d", pool_stats->pool_map_count); + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "No of pool unmap received %d", pool_stats->pool_unmap_count); + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "Pkt dropped due to unavailablity of pool %d", + pool_stats->pkt_drop_no_pool); + + /* + * Nested spin lock. + * Always take in below order. + * flow_pool_array_lock -> flow_pool_lock + */ + qdf_spin_lock_bh(&soc->flow_pool_array_lock); + for (i = 0; i < MAX_TXDESC_POOLS; i++) { + pool = &soc->tx_desc[i]; + if (pool->status > FLOW_POOL_INVALID) + continue; + qdf_spin_lock_bh(&pool->flow_pool_lock); + qdf_mem_copy(&tmp_pool, pool, sizeof(tmp_pool)); + qdf_spin_unlock_bh(&pool->flow_pool_lock); + qdf_spin_unlock_bh(&soc->flow_pool_array_lock); + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, "\n"); + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "Flow_pool_id %d :: status %d", + tmp_pool.flow_pool_id, tmp_pool.status); + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "Total %d :: Available %d", + tmp_pool.pool_size, tmp_pool.avail_desc); + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "Start threshold %d :: Stop threshold %d", + tmp_pool.start_th, tmp_pool.stop_th); + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "Member flow_id %d :: flow_type %d", + tmp_pool.flow_pool_id, tmp_pool.flow_type); + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "Pkt dropped due to unavailablity of descriptors %d", + tmp_pool.pkt_drop_no_desc); + qdf_spin_lock_bh(&soc->flow_pool_array_lock); + } + qdf_spin_unlock_bh(&soc->flow_pool_array_lock); +} + +/** + * dp_tx_clear_flow_pool_stats() - clear flow pool statistics + * + * @soc: Handle to struct dp_soc. + * + * Return: None + */ +void dp_tx_clear_flow_pool_stats(struct dp_soc *soc) +{ + + if (!soc) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "%s: soc is null\n", __func__); + return; + } + qdf_mem_zero(&soc->pool_stats, sizeof(soc->pool_stats)); +} + +/** + * dp_tx_create_flow_pool() - create flow pool + * @soc: Handle to struct dp_soc + * @flow_pool_id: flow pool id + * @flow_pool_size: flow pool size + * + * Return: flow_pool pointer / NULL for error + */ +struct dp_tx_desc_pool_s *dp_tx_create_flow_pool(struct dp_soc *soc, + uint8_t flow_pool_id, uint16_t flow_pool_size) +{ + struct dp_tx_desc_pool_s *pool; + uint32_t stop_threshold; + uint32_t start_threshold; + + if (flow_pool_id >= MAX_TXDESC_POOLS) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "%s: invalid flow_pool_id %d", __func__, flow_pool_id); + return NULL; + } + pool = &soc->tx_desc[flow_pool_id]; + qdf_spin_lock_bh(&pool->flow_pool_lock); + if ((pool->status != FLOW_POOL_INACTIVE) || pool->pool_create_cnt) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "%s: flow pool already allocated, attached %d times\n", + __func__, pool->pool_create_cnt); + if (pool->avail_desc > pool->start_th) + pool->status = FLOW_POOL_ACTIVE_UNPAUSED; + else + pool->status = FLOW_POOL_ACTIVE_PAUSED; + qdf_spin_unlock_bh(&pool->flow_pool_lock); + return pool; + } + + if (dp_tx_desc_pool_alloc(soc, flow_pool_id, flow_pool_size)) { + qdf_spin_unlock_bh(&pool->flow_pool_lock); + return NULL; + } + + stop_threshold = wlan_cfg_get_tx_flow_stop_queue_th(soc->wlan_cfg_ctx); + start_threshold = stop_threshold + + wlan_cfg_get_tx_flow_start_queue_offset(soc->wlan_cfg_ctx); + + pool->flow_pool_id = flow_pool_id; + pool->pool_size = flow_pool_size; + pool->avail_desc = flow_pool_size; + pool->status = FLOW_POOL_ACTIVE_UNPAUSED; + /* INI is in percentage so divide by 100 */ + pool->start_th = (start_threshold * flow_pool_size)/100; + pool->stop_th = (stop_threshold * flow_pool_size)/100; + pool->pool_create_cnt++; + + qdf_spin_unlock_bh(&pool->flow_pool_lock); + + return pool; +} + +/** + * dp_tx_delete_flow_pool() - delete flow pool + * @soc: Handle to struct dp_soc + * @pool: flow pool pointer + * @force: free pool forcefully + * + * Delete flow_pool if all tx descriptors are available. + * Otherwise put it in FLOW_POOL_INVALID state. + * If force is set then pull all available descriptors to + * global pool. + * + * Return: 0 for success or error + */ +int dp_tx_delete_flow_pool(struct dp_soc *soc, struct dp_tx_desc_pool_s *pool, + bool force) +{ + if (!soc || !pool) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "%s: pool or soc is NULL\n", __func__); + QDF_ASSERT(0); + return ENOMEM; + } + + qdf_spin_lock_bh(&pool->flow_pool_lock); + if (!pool->pool_create_cnt) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "flow pool either not created or alread deleted"); + qdf_spin_unlock_bh(&pool->flow_pool_lock); + return -ENOENT; + } + pool->pool_create_cnt--; + if (pool->pool_create_cnt) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "%s: pool is still attached, pending detach %d\n", + __func__, pool->pool_create_cnt); + qdf_spin_unlock_bh(&pool->flow_pool_lock); + return -EAGAIN; + } + + if (pool->avail_desc < pool->pool_size) { + pool->status = FLOW_POOL_INVALID; + qdf_spin_unlock_bh(&pool->flow_pool_lock); + return -EAGAIN; + } + + /* We have all the descriptors for the pool, we can delete the pool */ + dp_tx_desc_pool_free(soc, pool->flow_pool_id); + qdf_spin_unlock_bh(&pool->flow_pool_lock); + return 0; +} + +/** + * dp_tx_flow_pool_vdev_map() - Map flow_pool with vdev + * @pdev: Handle to struct dp_pdev + * @pool: flow_pool + * @vdev_id: flow_id /vdev_id + * + * Return: none + */ +static void dp_tx_flow_pool_vdev_map(struct dp_pdev *pdev, + struct dp_tx_desc_pool_s *pool, uint8_t vdev_id) +{ + struct dp_vdev *vdev; + struct dp_soc *soc = pdev->soc; + + vdev = (struct dp_vdev *)cdp_get_vdev_from_vdev_id((void *)soc, + (struct cdp_pdev *)pdev, vdev_id); + if (!vdev) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "%s: invalid vdev_id %d\n", + __func__, vdev_id); + return; + } + + vdev->pool = pool; + qdf_spin_lock_bh(&pool->flow_pool_lock); + pool->pool_owner_ctx = soc; + pool->flow_pool_id = vdev_id; + qdf_spin_unlock_bh(&pool->flow_pool_lock); +} + +/** + * dp_tx_flow_pool_vdev_unmap() - Unmap flow_pool from vdev + * @pdev: Handle to struct dp_pdev + * @pool: flow_pool + * @vdev_id: flow_id /vdev_id + * + * Return: none + */ +static void dp_tx_flow_pool_vdev_unmap(struct dp_pdev *pdev, + struct dp_tx_desc_pool_s *pool, uint8_t vdev_id) +{ + struct dp_vdev *vdev; + struct dp_soc *soc = pdev->soc; + + vdev = (struct dp_vdev *)cdp_get_vdev_from_vdev_id((void *)soc, + (struct cdp_pdev *)pdev, vdev_id); + if (!vdev) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "%s: invalid vdev_id %d\n", + __func__, vdev_id); + return; + } + + vdev->pool = NULL; +} + +/** + * dp_tx_flow_pool_map_handler() - Map flow_id with pool of descriptors + * @pdev: Handle to struct dp_pdev + * @flow_id: flow id + * @flow_type: flow type + * @flow_pool_id: pool id + * @flow_pool_size: pool size + * + * Process below target to host message + * HTT_T2H_MSG_TYPE_FLOW_POOL_MAP + * + * Return: none + */ +QDF_STATUS dp_tx_flow_pool_map_handler(struct dp_pdev *pdev, uint8_t flow_id, + uint8_t flow_type, uint8_t flow_pool_id, uint16_t flow_pool_size) +{ + struct dp_soc *soc = pdev->soc; + struct dp_tx_desc_pool_s *pool; + enum htt_flow_type type = flow_type; + + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, + "%s: flow_id %d flow_type %d flow_pool_id %d flow_pool_size %d\n", + __func__, flow_id, flow_type, flow_pool_id, flow_pool_size); + + if (qdf_unlikely(!soc)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "%s: soc is NULL", __func__); + return QDF_STATUS_E_FAULT; + } + soc->pool_stats.pool_map_count++; + + pool = dp_tx_create_flow_pool(soc, flow_pool_id, + flow_pool_size); + if (pool == NULL) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "%s: creation of flow_pool %d size %d failed\n", + __func__, flow_pool_id, flow_pool_size); + return QDF_STATUS_E_RESOURCES; + } + + switch (type) { + + case FLOW_TYPE_VDEV: + dp_tx_flow_pool_vdev_map(pdev, pool, flow_id); + break; + default: + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "%s: flow type %d not supported !!!\n", + __func__, type); + break; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * dp_tx_flow_pool_unmap_handler() - Unmap flow_id from pool of descriptors + * @pdev: Handle to struct dp_pdev + * @flow_id: flow id + * @flow_type: flow type + * @flow_pool_id: pool id + * + * Process below target to host message + * HTT_T2H_MSG_TYPE_FLOW_POOL_UNMAP + * + * Return: none + */ +void dp_tx_flow_pool_unmap_handler(struct dp_pdev *pdev, uint8_t flow_id, + uint8_t flow_type, uint8_t flow_pool_id) +{ + struct dp_soc *soc = pdev->soc; + struct dp_tx_desc_pool_s *pool; + enum htt_flow_type type = flow_type; + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, + "%s: flow_id %d flow_type %d flow_pool_id %d\n", + __func__, flow_id, flow_type, flow_pool_id); + + if (qdf_unlikely(!pdev)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "%s: pdev is NULL", __func__); + return; + } + soc->pool_stats.pool_unmap_count++; + + pool = &soc->tx_desc[flow_pool_id]; + if (!pool) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "%s: flow_pool not available flow_pool_id %d\n", + __func__, type); + return; + } + + switch (type) { + + case FLOW_TYPE_VDEV: + dp_tx_flow_pool_vdev_unmap(pdev, pool, flow_id); + break; + default: + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "%s: flow type %d not supported !!!\n", + __func__, type); + return; + } + + /* only delete if all descriptors are available */ + dp_tx_delete_flow_pool(soc, pool, false); +} + +/** + * dp_tx_flow_control_init() - Initialize tx flow control + * @tx_desc_pool: Handle to flow_pool + * + * Return: none + */ +void dp_tx_flow_control_init(struct dp_soc *soc) +{ + qdf_spinlock_create(&soc->flow_pool_array_lock); +} + +/** + * dp_tx_flow_control_deinit() - Deregister fw based tx flow control + * @tx_desc_pool: Handle to flow_pool + * + * Return: none + */ +void dp_tx_flow_control_deinit(struct dp_soc *soc) +{ + qdf_spinlock_destroy(&soc->flow_pool_array_lock); +} + +/** + * dp_txrx_register_pause_cb() - Register pause callback + * @ctx: Handle to struct dp_soc + * @pause_cb: Tx pause_cb + * + * Return: none + */ +QDF_STATUS dp_txrx_register_pause_cb(struct cdp_soc_t *handle, + tx_pause_callback pause_cb) +{ + struct dp_soc *soc = (struct dp_soc *)handle; + + if (!soc || !pause_cb) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("soc or pause_cb is NULL")); + return QDF_STATUS_E_INVAL; + } + soc->pause_cb = pause_cb; + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS dp_tx_flow_pool_map(struct cdp_soc_t *handle, struct cdp_pdev *pdev, + uint8_t vdev_id) +{ + struct dp_soc *soc = (struct dp_soc *)handle; + int tx_ring_size = wlan_cfg_tx_ring_size(soc->wlan_cfg_ctx); + + return (dp_tx_flow_pool_map_handler((struct dp_pdev *)pdev, vdev_id, + FLOW_TYPE_VDEV, vdev_id, tx_ring_size)); +} + +void dp_tx_flow_pool_unmap(struct cdp_soc_t *soc, struct cdp_pdev *pdev, + uint8_t vdev_id) +{ + return(dp_tx_flow_pool_unmap_handler((struct dp_pdev *)pdev, vdev_id, + FLOW_TYPE_VDEV, vdev_id)); +} diff --git a/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_tx_me.c b/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_tx_me.c new file mode 100644 index 0000000000000000000000000000000000000000..ff769a57a3e95dd4db3a7784df6576e397b7f88e --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_tx_me.c @@ -0,0 +1,195 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "dp_types.h" +#include "qdf_nbuf.h" +#include "qdf_atomic.h" +#include "qdf_types.h" +#include "dp_tx.h" +#include "dp_tx_desc.h" +#include "dp_internal.h" + +#ifdef ATH_SUPPORT_IQUE +#define MAX_ME_BUF_CHUNK 1424 +#define ME_US_TO_SEC(_x) ((_x)/(1000 * 1000)) +#define ME_CLEAN_WAIT_TIMEOUT (200000) /*200ms*/ +#define ME_CLEAN_WAIT_COUNT 400 + +/** + * dp_tx_me_init():Initialize ME buffer ppol + * @pdev: DP PDEV handle + * + * Return:0 on Succes 1 on failure + */ +static inline uint16_t +dp_tx_me_init(struct dp_pdev *pdev) +{ + + uint16_t i, mc_uc_buf_len, num_pool_elems; + uint32_t pool_size; + + struct dp_tx_me_buf_t *p; + + mc_uc_buf_len = sizeof(struct dp_tx_me_buf_t); + + num_pool_elems = MAX_ME_BUF_CHUNK; + /* Add flow control buffer count */ + pool_size = (mc_uc_buf_len) * num_pool_elems; + pdev->me_buf.size = mc_uc_buf_len; + if (pdev->me_buf.vaddr == NULL) { + qdf_spin_lock_bh(&pdev->tx_mutex); + pdev->me_buf.vaddr = qdf_mem_malloc(pool_size); + if (pdev->me_buf.vaddr == NULL) { + qdf_spin_unlock_bh(&pdev->tx_mutex); + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, + "Error allocating memory pool"); + return 1; + } + pdev->me_buf.buf_in_use = 0; + pdev->me_buf.freelist = + (struct dp_tx_me_buf_t *) pdev->me_buf.vaddr; + /* + * me_buf looks like this + * |=======+==========================| + * | ptr | Dst MAC | + * |=======+==========================| + */ + p = pdev->me_buf.freelist; + for (i = 0; i < num_pool_elems-1; i++) { + p->next = (struct dp_tx_me_buf_t *) + ((char *)p + pdev->me_buf.size); + p = p->next; + } + p->next = NULL; + qdf_spin_unlock_bh(&pdev->tx_mutex); + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, + "ME Pool successfully initialized vaddr - %x \ + paddr - %x\n num_elems = %d buf_size - %d" + "pool_size = %d", + pdev->me_buf.vaddr, + (unsigned int)pdev->me_buf.paddr, + (unsigned int)num_pool_elems, + (unsigned int)pdev->me_buf.size, + (unsigned int)pool_size); + } else { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, + "ME Already Enabled!!"); + } + return 0; +} + +/** + * dp_tx_me_alloc_descriptor():Allocate ME descriptor + * @pdev_handle: DP PDEV handle + * + * Return:void + */ +void +dp_tx_me_alloc_descriptor(struct cdp_pdev *pdev_handle) +{ + struct dp_pdev *pdev = (struct dp_pdev *) pdev_handle; + if (qdf_atomic_read(&pdev->mc_num_vap_attached) == 0) { + dp_tx_me_init(pdev); + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, + FL("Enable MCAST_TO_UCAST ")); + } + qdf_atomic_inc(&pdev->mc_num_vap_attached); +} + +/** + * dp_tx_me_exit():Free memory and other cleanup required for + * multicast unicast conversion + * @pdev - DP_PDEV handle + * + * Return:void + */ +void +dp_tx_me_exit(struct dp_pdev *pdev) +{ + /* Add flow control buffer count */ + uint32_t wait_time = ME_US_TO_SEC(ME_CLEAN_WAIT_TIMEOUT * + ME_CLEAN_WAIT_COUNT); + + if (pdev->me_buf.vaddr) { + uint16_t wait_cnt = 0; + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, + "Disabling Mcastenhance" + "This may take some time"); + qdf_spin_lock_bh(&pdev->tx_mutex); + while ((pdev->me_buf.buf_in_use > 0) && + (wait_cnt < ME_CLEAN_WAIT_COUNT)) { + qdf_spin_unlock_bh(&pdev->tx_mutex); + OS_SLEEP(ME_CLEAN_WAIT_TIMEOUT); + wait_cnt++; + qdf_spin_lock_bh(&pdev->tx_mutex); + } + if (pdev->me_buf.buf_in_use > 0) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, + "Tx-comp pending for %d " + "ME frames after waiting %ds!!\n", + pdev->me_buf.buf_in_use, wait_time); + qdf_assert_always(0); + } + + qdf_mem_free(pdev->me_buf.vaddr); + pdev->me_buf.vaddr = NULL; + pdev->me_buf.freelist = NULL; + qdf_spin_unlock_bh(&pdev->tx_mutex); + } else { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, + "ME Already Disabled !!!"); + } +} + +/** + * dp_tx_me_free_descriptor():free ME descriptor + * @pdev_handle:DP_PDEV handle + * + * Return:void + */ +void +dp_tx_me_free_descriptor(struct cdp_pdev *pdev_handle) +{ + struct dp_pdev *pdev = (struct dp_pdev *) pdev_handle; + qdf_atomic_dec(&pdev->mc_num_vap_attached); + if (atomic_read(&pdev->mc_num_vap_attached) == 0) { + dp_tx_me_exit(pdev); + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, + "Disable MCAST_TO_UCAST"); + } +} + +/** + * dp_tx_prepare_send_me(): Call to the umac to get the list of clients + * @vdev: DP VDEV handle + * @nbuf: Multicast buffer + * + * Return: no of packets transmitted + */ +QDF_STATUS +dp_tx_prepare_send_me(struct dp_vdev *vdev, qdf_nbuf_t nbuf) +{ + if (vdev->me_convert) { + if (vdev->me_convert(vdev->osif_vdev, nbuf) > 0) + return QDF_STATUS_SUCCESS; + } + + return QDF_STATUS_E_FAILURE; +} + +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_types.h b/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_types.h new file mode 100644 index 0000000000000000000000000000000000000000..2fbbe72b7bcf9a934c7426a40e2a9f8a3fc41a38 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_types.h @@ -0,0 +1,1447 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _DP_TYPES_H_ +#define _DP_TYPES_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#ifdef CONFIG_MCL +#include +#else +#include +#endif + +#ifndef CONFIG_WIN +#include /* WDI subscriber event list */ +#endif + +#include +#include +#include "wlan_cfg.h" +#include "hal_rx.h" +#include +#include +#include "hal_rx.h" + +#define MAX_BW 7 +#define MAX_RETRIES 4 +#define MAX_RECEPTION_TYPES 4 + +#ifndef REMOVE_PKT_LOG +#include +#endif + +#define REPT_MU_MIMO 1 +#define REPT_MU_OFDMA_MIMO 3 +#define DP_VO_TID 6 + +#define DP_MAX_INTERRUPT_CONTEXTS 8 +#define DP_MAX_TID_MAPS 16 /* MAX TID MAPS AVAILABLE PER PDEV*/ +#define DSCP_TID_MAP_MAX (64) +#define DP_IP_DSCP_SHIFT 2 +#define DP_IP_DSCP_MASK 0x3f +#define DP_FC0_SUBTYPE_QOS 0x80 +#define DP_QOS_TID 0x0f +#define DP_IPV6_PRIORITY_SHIFT 20 +#define MAX_MON_LINK_DESC_BANKS 2 +#define DP_VDEV_ALL 0xff + +#if defined(CONFIG_MCL) +#define MAX_PDEV_CNT 1 +#else +#define MAX_PDEV_CNT 3 +#endif + +#define MAX_LINK_DESC_BANKS 8 +#define MAX_TXDESC_POOLS 4 +#define MAX_RXDESC_POOLS 4 +#define MAX_REO_DEST_RINGS 4 +#define MAX_TCL_DATA_RINGS 4 +#define MAX_IDLE_SCATTER_BUFS 16 +#define DP_MAX_IRQ_PER_CONTEXT 12 +#define DP_MAX_INTERRUPT_CONTEXTS 8 +#define DEFAULT_HW_PEER_ID 0xffff + +#define MAX_TX_HW_QUEUES MAX_TCL_DATA_RINGS + +#define DP_MAX_INTERRUPT_CONTEXTS 8 + +#ifndef REMOVE_PKT_LOG +enum rx_pktlog_mode { + DP_RX_PKTLOG_DISABLED = 0, + DP_RX_PKTLOG_FULL, + DP_RX_PKTLOG_LITE, +}; +#endif + +struct dp_soc_cmn; +struct dp_pdev; +struct dp_vdev; +struct dp_tx_desc_s; +struct dp_soc; +union dp_rx_desc_list_elem_t; + +#define DP_PDEV_ITERATE_VDEV_LIST(_pdev, _vdev) \ + TAILQ_FOREACH((_vdev), &(_pdev)->vdev_list, vdev_list_elem) + +#define DP_VDEV_ITERATE_PEER_LIST(_vdev, _peer) \ + TAILQ_FOREACH((_peer), &(_vdev)->peer_list, peer_list_elem) + +#define DP_PEER_ITERATE_ASE_LIST(_peer, _ase, _temp_ase) \ + TAILQ_FOREACH_SAFE((_ase), &peer->ast_entry_list, ase_list_elem, (_temp_ase)) + +#define DP_MUTEX_TYPE qdf_spinlock_t + +#define DP_FRAME_IS_MULTICAST(_a) (*(_a) & 0x01) +#define DP_FRAME_IS_IPV4_MULTICAST(_a) (*(_a) == 0x01) + +#define DP_FRAME_IS_IPV6_MULTICAST(_a) \ + ((_a)[0] == 0x33 && \ + (_a)[1] == 0x33) + +#define DP_FRAME_IS_BROADCAST(_a) \ + ((_a)[0] == 0xff && \ + (_a)[1] == 0xff && \ + (_a)[2] == 0xff && \ + (_a)[3] == 0xff && \ + (_a)[4] == 0xff && \ + (_a)[5] == 0xff) +#define DP_FRAME_IS_SNAP(_llc) ((_llc)->llc_dsap == 0xaa && \ + (_llc)->llc_ssap == 0xaa && \ + (_llc)->llc_un.type_snap.control == 0x3) +#define DP_FRAME_IS_LLC(typeorlen) ((typeorlen) >= 0x600) +#define DP_FRAME_FC0_TYPE_MASK 0x0c +#define DP_FRAME_FC0_TYPE_DATA 0x08 +#define DP_FRAME_IS_DATA(_frame) \ + (((_frame)->i_fc[0] & DP_FRAME_FC0_TYPE_MASK) == DP_FRAME_FC0_TYPE_DATA) + +/** + * macros to convert hw mac id to sw mac id: + * mac ids used by hardware start from a value of 1 while + * those in host software start from a value of 0. Use the + * macros below to convert between mac ids used by software and + * hardware + */ +#define DP_SW2HW_MACID(id) ((id) + 1) +#define DP_HW2SW_MACID(id) ((id) > 0 ? ((id) - 1) : 0) +#define DP_MAC_ADDR_LEN 6 + +/** + * enum dp_intr_mode + * @DP_INTR_LEGACY: Legacy/Line interrupts, for WIN + * @DP_INTR_MSI: MSI interrupts, for MCL + * @DP_INTR_POLL: Polling + */ +enum dp_intr_mode { + DP_INTR_LEGACY = 0, + DP_INTR_MSI, + DP_INTR_POLL, +}; + +/** + * enum dp_tx_frm_type + * @dp_tx_frm_std: Regular frame, no added header fragments + * @dp_tx_frm_tso: TSO segment, with a modified IP header added + * @dp_tx_frm_sg: SG segment + * @dp_tx_frm_audio: Audio frames, a custom LLC/SNAP header added + * @dp_tx_frm_me: Multicast to Unicast Converted frame + * @dp_tx_frm_raw: Raw Frame + */ +enum dp_tx_frm_type { + dp_tx_frm_std = 0, + dp_tx_frm_tso, + dp_tx_frm_sg, + dp_tx_frm_audio, + dp_tx_frm_me, + dp_tx_frm_raw, +}; + +/** + * enum dp_ast_type + * @dp_ast_type_wds: WDS peer AST type + * @dp_ast_type_static: static ast entry type + * @dp_ast_type_mec: Multicast echo ast entry type + */ +enum dp_ast_type { + dp_ast_type_wds = 0, + dp_ast_type_static, + dp_ast_type_mec, +}; + +/** + * enum dp_nss_cfg + * @dp_nss_cfg_default: No radios are offloaded + * @dp_nss_cfg_first_radio: First radio offloaded + * @dp_nss_cfg_second_radio: Second radio offloaded + * @dp_nss_cfg_dbdc: Dual radios offloaded + */ +enum dp_nss_cfg { + dp_nss_cfg_default, + dp_nss_cfg_first_radio, + dp_nss_cfg_second_radio, + dp_nss_cfg_dbdc, +}; + +/** + * struct rx_desc_pool + * @pool_size: number of RX descriptor in the pool + * @array: pointer to array of RX descriptor + * @freelist: pointer to free RX descriptor link list + * @lock: Protection for the RX descriptor pool + * @owner: owner for nbuf + */ +struct rx_desc_pool { + uint32_t pool_size; + union dp_rx_desc_list_elem_t *array; + union dp_rx_desc_list_elem_t *freelist; + qdf_spinlock_t lock; + uint8_t owner; +}; + +/** + * struct dp_tx_ext_desc_elem_s + * @next: next extension descriptor pointer + * @vaddr: hlos virtual address pointer + * @paddr: physical address pointer for descriptor + */ +struct dp_tx_ext_desc_elem_s { + struct dp_tx_ext_desc_elem_s *next; + void *vaddr; + qdf_dma_addr_t paddr; +}; + +/** + * struct dp_tx_ext_desc_s - Tx Extension Descriptor Pool + * @elem_count: Number of descriptors in the pool + * @elem_size: Size of each descriptor + * @num_free: Number of free descriptors + * @msdu_ext_desc: MSDU extension descriptor + * @desc_pages: multiple page allocation information for actual descriptors + * @link_elem_size: size of the link descriptor in cacheable memory used for + * chaining the extension descriptors + * @desc_link_pages: multiple page allocation information for link descriptors + */ +struct dp_tx_ext_desc_pool_s { + uint16_t elem_count; + int elem_size; + uint16_t num_free; + struct qdf_mem_multi_page_t desc_pages; + int link_elem_size; + struct qdf_mem_multi_page_t desc_link_pages; + struct dp_tx_ext_desc_elem_s *freelist; + qdf_spinlock_t lock; + qdf_dma_mem_context(memctx); +}; + +/** + * struct dp_tx_desc_s - Tx Descriptor + * @next: Next in the chain of descriptors in freelist or in the completion list + * @nbuf: Buffer Address + * @msdu_ext_desc: MSDU extension descriptor + * @id: Descriptor ID + * @vdev: vdev over which the packet was transmitted + * @pdev: Handle to pdev + * @pool_id: Pool ID - used when releasing the descriptor + * @flags: Flags to track the state of descriptor and special frame handling + * @comp: Pool ID - used when releasing the descriptor + * @tx_encap_type: Transmit encap type (i.e. Raw, Native Wi-Fi, Ethernet). + * This is maintained in descriptor to allow more efficient + * processing in completion event processing code. + * This field is filled in with the htt_pkt_type enum. + * @frm_type: Frame Type - ToDo check if this is redundant + * @pkt_offset: Offset from which the actual packet data starts + * @me_buffer: Pointer to ME buffer - store this so that it can be freed on + * Tx completion of ME packet + * @pool: handle to flow_pool this descriptor belongs to. + */ +struct dp_tx_desc_s { + struct dp_tx_desc_s *next; + qdf_nbuf_t nbuf; + struct dp_tx_ext_desc_elem_s *msdu_ext_desc; + uint32_t id; + struct dp_vdev *vdev; + struct dp_pdev *pdev; + uint8_t pool_id; + uint16_t flags; + struct hal_tx_desc_comp_s comp; + uint16_t tx_encap_type; + uint8_t frm_type; + uint8_t pkt_offset; + void *me_buffer; + void *tso_desc; + void *tso_num_desc; +}; + +/** + * enum flow_pool_status - flow pool status + * @FLOW_POOL_ACTIVE_UNPAUSED : pool is active (can take/put descriptors) + * and network queues are unpaused + * @FLOW_POOL_ACTIVE_PAUSED: pool is active (can take/put descriptors) + * and network queues are paused + * @FLOW_POOL_INVALID: pool is invalid (put descriptor) + * @FLOW_POOL_INACTIVE: pool is inactive (pool is free) + */ +enum flow_pool_status { + FLOW_POOL_ACTIVE_UNPAUSED = 0, + FLOW_POOL_ACTIVE_PAUSED = 1, + FLOW_POOL_INVALID = 2, + FLOW_POOL_INACTIVE = 3, +}; + +/** + * struct dp_tx_tso_seg_pool_s + * @pool_size: total number of pool elements + * @num_free: free element count + * @freelist: first free element pointer + * @lock: lock for accessing the pool + */ +struct dp_tx_tso_seg_pool_s { + uint16_t pool_size; + uint16_t num_free; + struct qdf_tso_seg_elem_t *freelist; + qdf_spinlock_t lock; +}; + +/** + * struct dp_tx_tso_num_seg_pool_s { + * @num_seg_pool_size: total number of pool elements + * @num_free: free element count + * @freelist: first free element pointer + * @lock: lock for accessing the pool + */ + +struct dp_tx_tso_num_seg_pool_s { + uint16_t num_seg_pool_size; + uint16_t num_free; + struct qdf_tso_num_seg_elem_t *freelist; + /*tso mutex */ + qdf_spinlock_t lock; +}; + +/** + * struct dp_tx_desc_pool_s - Tx Descriptor pool information + * @elem_size: Size of each descriptor in the pool + * @pool_size: Total number of descriptors in the pool + * @num_free: Number of free descriptors + * @num_allocated: Number of used descriptors + * @freelist: Chain of free descriptors + * @desc_pages: multiple page allocation information for actual descriptors + * @num_invalid_bin: Deleted pool with pending Tx completions. + * @flow_pool_array_lock: Lock when operating on flow_pool_array. + * @flow_pool_array: List of allocated flow pools + * @lock- Lock for descriptor allocation/free from/to the pool + */ +struct dp_tx_desc_pool_s { + uint16_t elem_size; + uint32_t num_allocated; + struct dp_tx_desc_s *freelist; + struct qdf_mem_multi_page_t desc_pages; +#ifdef QCA_LL_TX_FLOW_CONTROL_V2 + uint16_t pool_size; + uint8_t flow_pool_id; + uint8_t num_invalid_bin; + uint16_t avail_desc; + enum flow_pool_status status; + enum htt_flow_type flow_type; + uint16_t stop_th; + uint16_t start_th; + uint16_t pkt_drop_no_desc; + qdf_spinlock_t flow_pool_lock; + uint8_t pool_create_cnt; + void *pool_owner_ctx; +#else + uint16_t elem_count; + uint32_t num_free; + qdf_spinlock_t lock; +#endif +}; + +/** + * struct dp_txrx_pool_stats - flow pool related statistics + * @pool_map_count: flow pool map received + * @pool_unmap_count: flow pool unmap received + * @pkt_drop_no_pool: packets dropped due to unavailablity of pool + */ +struct dp_txrx_pool_stats { + uint16_t pool_map_count; + uint16_t pool_unmap_count; + uint16_t pkt_drop_no_pool; +}; + +struct dp_srng { + void *hal_srng; + void *base_vaddr_unaligned; + qdf_dma_addr_t base_paddr_unaligned; + uint32_t alloc_size; + int irq; + uint32_t num_entries; +}; + +struct dp_rx_reorder_array_elem { + qdf_nbuf_t head; + qdf_nbuf_t tail; +}; + +#define DP_RX_BA_INACTIVE 0 +#define DP_RX_BA_ACTIVE 1 +struct dp_reo_cmd_info { + uint16_t cmd; + enum hal_reo_cmd_type cmd_type; + void *data; + void (*handler)(struct dp_soc *, void *, union hal_reo_status *); + TAILQ_ENTRY(dp_reo_cmd_info) reo_cmd_list_elem; +}; + +/* Rx TID */ +struct dp_rx_tid { + /* TID */ + int tid; + + /* Num of addba requests */ + uint32_t num_of_addba_req; + + /* Num of addba responses */ + uint32_t num_of_addba_resp; + + /* Num of delba requests */ + uint32_t num_of_delba_req; + + /* pn size */ + uint8_t pn_size; + /* REO TID queue descriptors */ + void *hw_qdesc_vaddr_unaligned; + qdf_dma_addr_t hw_qdesc_paddr_unaligned; + qdf_dma_addr_t hw_qdesc_paddr; + uint32_t hw_qdesc_alloc_size; + + /* RX ADDBA session state */ + int ba_status; + + /* RX BA window size */ + uint16_t ba_win_size; + + /* TODO: Check the following while adding defragmentation support */ + struct dp_rx_reorder_array_elem *array; + /* base - single rx reorder element used for non-aggr cases */ + struct dp_rx_reorder_array_elem base; + + /* only used for defrag right now */ + TAILQ_ENTRY(dp_rx_tid) defrag_waitlist_elem; + + /* Store dst desc for reinjection */ + void *dst_ring_desc; + struct dp_rx_desc *head_frag_desc; + + /* Sequence and fragments that are being processed currently */ + uint32_t curr_seq_num; + uint32_t curr_frag_num; + + uint32_t defrag_timeout_ms; + uint16_t dialogtoken; + uint16_t statuscode; + /* user defined ADDBA response status code */ + uint16_t userstatuscode; +}; + +/* per interrupt context */ +struct dp_intr { + uint8_t tx_ring_mask; /* WBM Tx completion rings (0-2) + associated with this napi context */ + uint8_t rx_ring_mask; /* Rx REO rings (0-3) associated + with this interrupt context */ + uint8_t rx_mon_ring_mask; /* Rx monitor ring mask (0-2) */ + uint8_t rx_err_ring_mask; /* REO Exception Ring */ + uint8_t rx_wbm_rel_ring_mask; /* WBM2SW Rx Release Ring */ + uint8_t reo_status_ring_mask; /* REO command response ring */ + uint8_t rxdma2host_ring_mask; /* RXDMA to host destination ring */ + uint8_t host2rxdma_ring_mask; /* Host to RXDMA buffer ring */ + struct dp_soc *soc; /* Reference to SoC structure , + to get DMA ring handles */ + qdf_lro_ctx_t lro_ctx; + uint8_t dp_intr_id; +}; + +#define REO_DESC_FREELIST_SIZE 64 +#define REO_DESC_FREE_DEFER_MS 1000 +struct reo_desc_list_node { + qdf_list_node_t node; + unsigned long free_ts; + struct dp_rx_tid rx_tid; +}; + +/* SoC level data path statistics */ +struct dp_soc_stats { + struct { + uint32_t added; + uint32_t deleted; + uint32_t aged_out; + } ast; + + /* SOC level TX stats */ + struct { + /* packets dropped on tx because of no peer */ + struct cdp_pkt_info tx_invalid_peer; + /* descriptors in each tcl ring */ + uint32_t tcl_ring_full[MAX_TCL_DATA_RINGS]; + /* Descriptors in use at soc */ + uint32_t desc_in_use; + /* tqm_release_reason == FW removed */ + uint32_t dropped_fw_removed; + + } tx; + + /* SOC level RX stats */ + struct { + /* Rx errors */ + /* Total Packets in Rx Error ring */ + uint32_t err_ring_pkts; + /* No of Fragments */ + uint32_t rx_frags; + struct { + /* Invalid RBM error count */ + uint32_t invalid_rbm; + /* Invalid VDEV Error count */ + uint32_t invalid_vdev; + /* Invalid PDEV error count */ + uint32_t invalid_pdev; + /* Invalid PEER Error count */ + struct cdp_pkt_info rx_invalid_peer; + /* HAL ring access Fail error count */ + uint32_t hal_ring_access_fail; + /* RX DMA error count */ + uint32_t rxdma_error[HAL_RXDMA_ERR_MAX]; + /* REO Error count */ + uint32_t reo_error[HAL_REO_ERR_MAX]; + /* HAL REO ERR Count */ + uint32_t hal_reo_error[MAX_REO_DEST_RINGS]; + } err; + + /* packet count per core - per ring */ + uint64_t ring_packets[NR_CPUS][MAX_REO_DEST_RINGS]; + } rx; +}; + +#define DP_MAC_ADDR_LEN 6 +union dp_align_mac_addr { + uint8_t raw[DP_MAC_ADDR_LEN]; + struct { + uint16_t bytes_ab; + uint16_t bytes_cd; + uint16_t bytes_ef; + } align2; + struct { + uint32_t bytes_abcd; + uint16_t bytes_ef; + } align4; + struct { + uint16_t bytes_ab; + uint32_t bytes_cdef; + } align4_2; +}; + +/* + * dp_ast_entry + * + * @ast_idx: Hardware AST Index + * @mac_addr: MAC Address for this AST entry + * @peer: Next Hop peer (for non-WDS nodes, this will be point to + * associated peer with this MAC address) + * @next_hop: Set to 1 if this is for a WDS node + * @is_active: flag to indicate active data traffic on this node + * (used for aging out/expiry) + * @ase_list_elem: node in peer AST list + * @is_bss: flag to indicate if entry corresponds to bss peer + * @pdev_id: pdev ID + * @vdev_id: vdev ID + * @type: flag to indicate type of the entry(static/WDS/MEC) + * @hash_list_elem: node in soc AST hash list (mac address used as hash) + */ +struct dp_ast_entry { + uint16_t ast_idx; + /* MAC address */ + union dp_align_mac_addr mac_addr; + struct dp_peer *peer; + bool next_hop; + bool is_active; + bool is_bss; + uint8_t pdev_id; + uint8_t vdev_id; + enum cdp_txrx_ast_entry_type type; + TAILQ_ENTRY(dp_ast_entry) ase_list_elem; + TAILQ_ENTRY(dp_ast_entry) hash_list_elem; +}; + +/* SOC level htt stats */ +struct htt_t2h_stats { + /* lock to protect htt_stats_msg update */ + qdf_spinlock_t lock; + + /* work queue to process htt stats */ + qdf_work_t work; + + /* T2H Ext stats message queue */ + qdf_nbuf_queue_t msg; + + /* number of completed stats in htt_stats_msg */ + uint32_t num_stats; +}; + +/* SOC level structure for data path */ +struct dp_soc { + /* Common base structure - Should be the first member */ + struct cdp_soc_t cdp_soc; + + /* SoC Obj */ + void *ctrl_psoc; + + /* OS device abstraction */ + qdf_device_t osdev; + + /* WLAN config context */ + struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx; + + /* HTT handle for host-fw interaction */ + void *htt_handle; + + /* Commint init done */ + qdf_atomic_t cmn_init_done; + + /* Opaque hif handle */ + struct hif_opaque_softc *hif_handle; + + /* PDEVs on this SOC */ + struct dp_pdev *pdev_list[MAX_PDEV_CNT]; + + /* Number of PDEVs */ + uint8_t pdev_count; + + /*cce disable*/ + bool cce_disable; + + /* Link descriptor memory banks */ + struct { + void *base_vaddr_unaligned; + void *base_vaddr; + qdf_dma_addr_t base_paddr_unaligned; + qdf_dma_addr_t base_paddr; + uint32_t size; + } link_desc_banks[MAX_LINK_DESC_BANKS]; + + /* Link descriptor Idle list for HW internal use (SRNG mode) */ + struct dp_srng wbm_idle_link_ring; + + /* Link descriptor Idle list for HW internal use (scatter buffer mode) + */ + qdf_dma_addr_t wbm_idle_scatter_buf_base_paddr[MAX_IDLE_SCATTER_BUFS]; + void *wbm_idle_scatter_buf_base_vaddr[MAX_IDLE_SCATTER_BUFS]; + uint32_t wbm_idle_scatter_buf_size; + +#ifdef QCA_LL_TX_FLOW_CONTROL_V2 + qdf_spinlock_t flow_pool_array_lock; + tx_pause_callback pause_cb; + struct dp_txrx_pool_stats pool_stats; +#endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */ + /* Tx SW descriptor pool */ + struct dp_tx_desc_pool_s tx_desc[MAX_TXDESC_POOLS]; + + /* Tx MSDU Extension descriptor pool */ + struct dp_tx_ext_desc_pool_s tx_ext_desc[MAX_TXDESC_POOLS]; + + /* Tx TSO descriptor pool */ + struct dp_tx_tso_seg_pool_s tx_tso_desc[MAX_TXDESC_POOLS]; + + /* Tx TSO Num of segments pool */ + struct dp_tx_tso_num_seg_pool_s tx_tso_num_seg[MAX_TXDESC_POOLS]; + + /* Tx H/W queues lock */ + qdf_spinlock_t tx_queue_lock[MAX_TX_HW_QUEUES]; + + /* Rx SW descriptor pool for RXDMA buffer */ + struct rx_desc_pool rx_desc_buf[MAX_RXDESC_POOLS]; + + /* Rx SW descriptor pool for RXDMA monitor buffer */ + struct rx_desc_pool rx_desc_mon[MAX_RXDESC_POOLS]; + + /* Rx SW descriptor pool for RXDMA status buffer */ + struct rx_desc_pool rx_desc_status[MAX_RXDESC_POOLS]; + + /* HAL SOC handle */ + void *hal_soc; + + /* DP Interrupts */ + struct dp_intr intr_ctx[DP_MAX_INTERRUPT_CONTEXTS]; + + /* REO destination rings */ + struct dp_srng reo_dest_ring[MAX_REO_DEST_RINGS]; + + /* Number of REO destination rings */ + uint8_t num_reo_dest_rings; + + /* REO exception ring - See if should combine this with reo_dest_ring */ + struct dp_srng reo_exception_ring; + + /* REO reinjection ring */ + struct dp_srng reo_reinject_ring; + + /* REO command ring */ + struct dp_srng reo_cmd_ring; + + /* REO command status ring */ + struct dp_srng reo_status_ring; + + /* WBM Rx release ring */ + struct dp_srng rx_rel_ring; + + /* Number of TCL data rings */ + uint8_t num_tcl_data_rings; + + /* TCL data ring */ + struct dp_srng tcl_data_ring[MAX_TCL_DATA_RINGS]; + + /* TCL command ring */ + struct dp_srng tcl_cmd_ring; + + /* TCL command status ring */ + struct dp_srng tcl_status_ring; + + /* WBM Tx completion rings */ + struct dp_srng tx_comp_ring[MAX_TCL_DATA_RINGS]; + + /* Common WBM link descriptor release ring (SW to WBM) */ + struct dp_srng wbm_desc_rel_ring; + + /* Tx ring map for interrupt processing */ + uint8_t tx_ring_map[WLAN_CFG_INT_NUM_CONTEXTS]; + + /* Rx ring map for interrupt processing */ + uint8_t rx_ring_map[WLAN_CFG_INT_NUM_CONTEXTS]; + + /* peer ID to peer object map (array of pointers to peer objects) */ + struct dp_peer **peer_id_to_obj_map; + + struct { + unsigned mask; + unsigned idx_bits; + TAILQ_HEAD(, dp_peer) * bins; + } peer_hash; + + /* rx defrag state – TBD: do we need this per radio? */ + struct { + struct { + TAILQ_HEAD(, dp_rx_tid) waitlist; + uint32_t timeout_ms; + qdf_spinlock_t defrag_lock; + } defrag; + struct { + int defrag_timeout_check; + int dup_check; + } flags; + TAILQ_HEAD(, dp_reo_cmd_info) reo_cmd_list; + qdf_spinlock_t reo_cmd_lock; + } rx; + + /* optional rx processing function */ + void (*rx_opt_proc)( + struct dp_vdev *vdev, + struct dp_peer *peer, + unsigned tid, + qdf_nbuf_t msdu_list); + + /* pool addr for mcast enhance buff */ + struct { + int size; + uint32_t paddr; + uint32_t *vaddr; + struct dp_tx_me_buf_t *freelist; + int buf_in_use; + qdf_dma_mem_context(memctx); + } me_buf; + + /** + * peer ref mutex: + * 1. Protect peer object lookups until the returned peer object's + * reference count is incremented. + * 2. Provide mutex when accessing peer object lookup structures. + */ + DP_MUTEX_TYPE peer_ref_mutex; + + /* maximum value for peer_id */ + uint32_t max_peers; + + /* SoC level data path statistics */ + struct dp_soc_stats stats; + + /* Enable processing of Tx completion status words */ + bool process_tx_status; + bool process_rx_status; + struct dp_ast_entry *ast_table[WLAN_UMAC_PSOC_MAX_PEERS * 2]; + struct { + unsigned mask; + unsigned idx_bits; + TAILQ_HEAD(, dp_ast_entry) * bins; + } ast_hash; + + qdf_spinlock_t ast_lock; + qdf_timer_t wds_aging_timer; + + /*interrupt timer*/ + qdf_timer_t mon_reap_timer; + uint8_t reap_timer_init; + qdf_timer_t int_timer; + uint8_t intr_mode; + + qdf_list_t reo_desc_freelist; + qdf_spinlock_t reo_desc_freelist_lock; + +#ifdef QCA_SUPPORT_SON + /* The timer to check station's inactivity status */ + os_timer_t pdev_bs_inact_timer; + /* The current inactivity count reload value + based on overload condition */ + u_int16_t pdev_bs_inact_reload; + + /* The inactivity timer value when not overloaded */ + u_int16_t pdev_bs_inact_normal; + + /* The inactivity timer value when overloaded */ + u_int16_t pdev_bs_inact_overload; + + /* The inactivity timer check interval */ + u_int16_t pdev_bs_inact_interval; + /* Inactivity timer */ +#endif /* QCA_SUPPORT_SON */ + + /* htt stats */ + struct htt_t2h_stats htt_stats; + + void *external_txrx_handle; /* External data path handle */ +#ifdef IPA_OFFLOAD + /* IPA uC datapath offload Wlan Tx resources */ + struct { + /* Resource info to be passed to IPA */ + qdf_dma_addr_t ipa_tcl_ring_base_paddr; + void *ipa_tcl_ring_base_vaddr; + uint32_t ipa_tcl_ring_size; + qdf_dma_addr_t ipa_tcl_hp_paddr; + uint32_t alloc_tx_buf_cnt; + + qdf_dma_addr_t ipa_wbm_ring_base_paddr; + void *ipa_wbm_ring_base_vaddr; + uint32_t ipa_wbm_ring_size; + qdf_dma_addr_t ipa_wbm_tp_paddr; + + /* TX buffers populated into the WBM ring */ + void **tx_buf_pool_vaddr_unaligned; + qdf_dma_addr_t *tx_buf_pool_paddr_unaligned; + } ipa_uc_tx_rsc; + + /* IPA uC datapath offload Wlan Rx resources */ + struct { + /* Resource info to be passed to IPA */ + qdf_dma_addr_t ipa_reo_ring_base_paddr; + void *ipa_reo_ring_base_vaddr; + uint32_t ipa_reo_ring_size; + qdf_dma_addr_t ipa_reo_tp_paddr; + + /* Resource info to be passed to firmware and IPA */ + qdf_dma_addr_t ipa_rx_refill_buf_ring_base_paddr; + void *ipa_rx_refill_buf_ring_base_vaddr; + uint32_t ipa_rx_refill_buf_ring_size; + qdf_dma_addr_t ipa_rx_refill_buf_hp_paddr; + } ipa_uc_rx_rsc; +#endif +}; + +#ifdef IPA_OFFLOAD +/** + * dp_ipa_resources - Resources needed for IPA + */ +struct dp_ipa_resources { + qdf_dma_addr_t tx_ring_base_paddr; + uint32_t tx_ring_size; + uint32_t tx_num_alloc_buffer; + + qdf_dma_addr_t tx_comp_ring_base_paddr; + uint32_t tx_comp_ring_size; + + qdf_dma_addr_t rx_rdy_ring_base_paddr; + uint32_t rx_rdy_ring_size; + + qdf_dma_addr_t rx_refill_ring_base_paddr; + uint32_t rx_refill_ring_size; + + /* IPA UC doorbell registers paddr */ + qdf_dma_addr_t tx_comp_doorbell_paddr; + uint32_t *tx_comp_doorbell_vaddr; + qdf_dma_addr_t rx_ready_doorbell_paddr; +}; +#endif + +#define MAX_RX_MAC_RINGS 2 +/* Same as NAC_MAX_CLENT */ +#define DP_NAC_MAX_CLIENT 24 + +/* + * Macros to setup link descriptor cookies - for link descriptors, we just + * need first 3 bits to store bank ID. The remaining bytes will be used set a + * unique ID, which will be useful in debugging + */ +#define LINK_DESC_BANK_ID_MASK 0x7 +#define LINK_DESC_ID_SHIFT 3 +#define LINK_DESC_ID_START 0x8000 + +#define LINK_DESC_COOKIE(_desc_id, _bank_id) \ + ((((_desc_id) + LINK_DESC_ID_START) << LINK_DESC_ID_SHIFT) | (_bank_id)) + +#define LINK_DESC_COOKIE_BANK_ID(_cookie) \ + ((_cookie) & LINK_DESC_BANK_ID_MASK) + +/* same as ieee80211_nac_param */ +enum dp_nac_param_cmd { + /* IEEE80211_NAC_PARAM_ADD */ + DP_NAC_PARAM_ADD = 1, + /* IEEE80211_NAC_PARAM_DEL */ + DP_NAC_PARAM_DEL, + /* IEEE80211_NAC_PARAM_LIST */ + DP_NAC_PARAM_LIST, +}; + +/** + * struct dp_neighbour_peer - neighbour peer list type for smart mesh + * @neighbour_peers_macaddr: neighbour peer's mac address + * @neighbour_peer_list_elem: neighbour peer list TAILQ element + */ +struct dp_neighbour_peer { + /* MAC address of neighbour's peer */ + union dp_align_mac_addr neighbour_peers_macaddr; + /* node in the list of neighbour's peer */ + TAILQ_ENTRY(dp_neighbour_peer) neighbour_peer_list_elem; +}; + +/** + * struct ppdu_info - PPDU Status info descriptor + * @ppdu_id - Unique ppduid assigned by firmware for every tx packet + * @max_ppdu_id - wrap around for ppdu id + * @last_tlv_cnt - Keep track for missing ppdu tlvs + * @last_user - last ppdu processed for user + * @is_ampdu - set if Ampdu aggregate + * @nbuf - ppdu descriptor payload + * @ppdu_desc - ppdu descriptor + * @ppdu_info_list_elem - linked list of ppdu tlvs + */ +struct ppdu_info { + uint32_t ppdu_id; + uint32_t max_ppdu_id; + uint16_t tlv_bitmap; + uint16_t last_tlv_cnt; + uint16_t last_user:8, + is_ampdu:1; + qdf_nbuf_t nbuf; + struct cdp_tx_completion_ppdu *ppdu_desc; + TAILQ_ENTRY(ppdu_info) ppdu_info_list_elem; +}; + +/* PDEV level structure for data path */ +struct dp_pdev { + /* PDEV handle from OSIF layer TBD: see if we really need osif_pdev */ + void *osif_pdev; + + /* PDEV Id */ + int pdev_id; + + /* TXRX SOC handle */ + struct dp_soc *soc; + + /* Ring used to replenish rx buffers (maybe to the firmware of MAC) */ + struct dp_srng rx_refill_buf_ring; + + /* Second ring used to replenish rx buffers */ + struct dp_srng rx_refill_buf_ring2; + + /* Empty ring used by firmware to post rx buffers to the MAC */ + struct dp_srng rx_mac_buf_ring[MAX_RX_MAC_RINGS]; + + /* wlan_cfg pdev ctxt*/ + struct wlan_cfg_dp_pdev_ctxt *wlan_cfg_ctx; + + /* RXDMA monitor buffer replenish ring */ + struct dp_srng rxdma_mon_buf_ring[NUM_RXDMA_RINGS_PER_PDEV]; + + /* RXDMA monitor destination ring */ + struct dp_srng rxdma_mon_dst_ring[NUM_RXDMA_RINGS_PER_PDEV]; + + /* RXDMA monitor status ring. TBD: Check format of this ring */ + struct dp_srng rxdma_mon_status_ring[NUM_RXDMA_RINGS_PER_PDEV]; + + struct dp_srng rxdma_mon_desc_ring[NUM_RXDMA_RINGS_PER_PDEV]; + + /* RXDMA error destination ring */ + struct dp_srng rxdma_err_dst_ring[NUM_RXDMA_RINGS_PER_PDEV]; + + /* Link descriptor memory banks */ + struct { + void *base_vaddr_unaligned; + void *base_vaddr; + qdf_dma_addr_t base_paddr_unaligned; + qdf_dma_addr_t base_paddr; + uint32_t size; + } link_desc_banks[NUM_RXDMA_RINGS_PER_PDEV][MAX_MON_LINK_DESC_BANKS]; + + + /** + * TODO: See if we need a ring map here for LMAC rings. + * 1. Monitor rings are currently planning to be processed on receiving + * PPDU end interrupts and hence wont need ring based interrupts. + * 2. Rx buffer rings will be replenished during REO destination + * processing and doesn't require regular interrupt handling - we will + * only handle low water mark interrupts which is not expected + * frequently + */ + + /* VDEV list */ + TAILQ_HEAD(, dp_vdev) vdev_list; + + /* vdev list lock */ + qdf_spinlock_t vdev_list_lock; + + /* Number of vdevs this device have */ + uint16_t vdev_count; + + /* PDEV transmit lock */ + qdf_spinlock_t tx_lock; + +#ifndef REMOVE_PKT_LOG + bool pkt_log_init; + /* Pktlog pdev */ + struct pktlog_dev_t *pl_dev; +#endif /* #ifndef REMOVE_PKT_LOG */ + + /* Monitor mode interface and status storage */ + struct dp_vdev *monitor_vdev; + + /* monitor mode lock */ + qdf_spinlock_t mon_lock; + + /*tx_mutex for me*/ + DP_MUTEX_TYPE tx_mutex; + + /* Smart Mesh */ + bool filter_neighbour_peers; + /* smart mesh mutex */ + qdf_spinlock_t neighbour_peer_mutex; + /* Neighnour peer list */ + TAILQ_HEAD(, dp_neighbour_peer) neighbour_peers_list; + /* msdu chain head & tail */ + qdf_nbuf_t invalid_peer_head_msdu; + qdf_nbuf_t invalid_peer_tail_msdu; + + /* Band steering */ + /* TBD */ + + /* PDEV level data path statistics */ + struct cdp_pdev_stats stats; + + /* Global RX decap mode for the device */ + enum htt_pkt_type rx_decap_mode; + + /* Enhanced Stats is enabled */ + bool enhanced_stats_en; + + /* advance filter mode and type*/ + uint8_t mon_filter_mode; + uint16_t fp_mgmt_filter; + uint16_t fp_ctrl_filter; + uint16_t fp_data_filter; + uint16_t mo_mgmt_filter; + uint16_t mo_ctrl_filter; + uint16_t mo_data_filter; + + qdf_atomic_t num_tx_outstanding; + + qdf_atomic_t num_tx_exception; + + /* MCL specific local peer handle */ + struct { + uint8_t pool[OL_TXRX_NUM_LOCAL_PEER_IDS + 1]; + uint8_t freelist; + qdf_spinlock_t lock; + struct dp_peer *map[OL_TXRX_NUM_LOCAL_PEER_IDS]; + } local_peer_ids; + + /* dscp_tid_map_*/ + uint8_t dscp_tid_map[DP_MAX_TID_MAPS][DSCP_TID_MAP_MAX]; + + struct hal_rx_ppdu_info ppdu_info; + + /* operating channel */ + uint8_t operating_channel; + + qdf_nbuf_queue_t rx_status_q; + uint32_t mon_ppdu_status; + struct cdp_mon_status rx_mon_recv_status; + /* monitor mode status/destination ring PPDU and MPDU count */ + struct cdp_pdev_mon_stats rx_mon_stats; + + /* pool addr for mcast enhance buff */ + struct { + int size; + uint32_t paddr; + char *vaddr; + struct dp_tx_me_buf_t *freelist; + int buf_in_use; + qdf_dma_mem_context(memctx); + } me_buf; + + /* Number of VAPs with mcast enhancement enabled */ + qdf_atomic_t mc_num_vap_attached; + + qdf_atomic_t stats_cmd_complete; + +#ifdef IPA_OFFLOAD + ipa_uc_op_cb_type ipa_uc_op_cb; + void *usr_ctxt; + struct dp_ipa_resources ipa_resource; +#endif + + /* TBD */ + + /* map this pdev to a particular Reo Destination ring */ + enum cdp_host_reo_dest_ring reo_dest; + +#ifndef REMOVE_PKT_LOG + /* Packet log mode */ + uint8_t rx_pktlog_mode; +#endif + + /* WDI event handlers */ + struct wdi_event_subscribe_t **wdi_event_list; + + /* ppdu_id of last received HTT TX stats */ + uint32_t last_ppdu_id; + struct { + uint8_t last_user; + qdf_nbuf_t buf; + } tx_ppdu_info; + + bool tx_sniffer_enable; + /* mirror copy mode */ + bool mcopy_mode; + + struct { + uint16_t tx_ppdu_id; + uint16_t tx_peer_id; + uint16_t rx_ppdu_id; + } m_copy_id; + + /* To check if PPDU Tx stats are enabled for Pktlog */ + bool pktlog_ppdu_stats; + + void *dp_txrx_handle; /* Advanced data path handle */ + +#ifdef ATH_SUPPORT_NAC_RSSI + bool nac_rssi_filtering; +#endif + /* list of ppdu tlvs */ + TAILQ_HEAD(, ppdu_info) ppdu_info_list; + uint32_t tlv_count; + uint32_t list_depth; + uint32_t ppdu_id; + bool first_nbuf; +}; + +struct dp_peer; + +/* VDEV structure for data path state */ +struct dp_vdev { + /* OS device abstraction */ + qdf_device_t osdev; + /* physical device that is the parent of this virtual device */ + struct dp_pdev *pdev; + + /* Handle to the OS shim SW's virtual device */ + ol_osif_vdev_handle osif_vdev; + + /* vdev_id - ID used to specify a particular vdev to the target */ + uint8_t vdev_id; + + /* MAC address */ + union dp_align_mac_addr mac_addr; + + /* node in the pdev's list of vdevs */ + TAILQ_ENTRY(dp_vdev) vdev_list_elem; + + /* dp_peer list */ + TAILQ_HEAD(, dp_peer) peer_list; + + /* callback to hand rx frames to the OS shim */ + ol_txrx_rx_fp osif_rx; + ol_txrx_rsim_rx_decap_fp osif_rsim_rx_decap; + ol_txrx_get_key_fp osif_get_key; + ol_txrx_tx_free_ext_fp osif_tx_free_ext; + +#ifdef notyet + /* callback to check if the msdu is an WAI (WAPI) frame */ + ol_rx_check_wai_fp osif_check_wai; +#endif + + /* proxy arp function */ + ol_txrx_proxy_arp_fp osif_proxy_arp; + + /* callback to hand rx monitor 802.11 MPDU to the OS shim */ + ol_txrx_rx_mon_fp osif_rx_mon; + + ol_txrx_mcast_me_fp me_convert; + + /* completion function used by this vdev*/ + ol_txrx_completion_fp tx_comp; + + /* deferred vdev deletion state */ + struct { + /* VDEV delete pending */ + int pending; + /* + * callback and a context argument to provide a + * notification for when the vdev is deleted. + */ + ol_txrx_vdev_delete_cb callback; + void *context; + } delete; + + /* tx data delivery notification callback function */ + struct { + ol_txrx_data_tx_cb func; + void *ctxt; + } tx_non_std_data_callback; + + + /* safe mode control to bypass the encrypt and decipher process*/ + uint32_t safemode; + + /* rx filter related */ + uint32_t drop_unenc; +#ifdef notyet + privacy_exemption privacy_filters[MAX_PRIVACY_FILTERS]; + uint32_t filters_num; +#endif + /* TDLS Link status */ + bool tdls_link_connected; + bool is_tdls_frame; + + + /* VDEV operating mode */ + enum wlan_op_mode opmode; + + /* Tx encapsulation type for this VAP */ + enum htt_cmn_pkt_type tx_encap_type; + /* Rx Decapsulation type for this VAP */ + enum htt_cmn_pkt_type rx_decap_type; + + /* BSS peer */ + struct dp_peer *vap_bss_peer; + + /* WDS enabled */ + bool wds_enabled; + + /* WDS Aging timer period */ + uint32_t wds_aging_timer_val; + + /* NAWDS enabled */ + bool nawds_enabled; + + /* Default HTT meta data for this VDEV */ + /* TBD: check alignment constraints */ + uint16_t htt_tcl_metadata; + + /* Mesh mode vdev */ + uint32_t mesh_vdev; + + /* Mesh mode rx filter setting */ + uint32_t mesh_rx_filter; + + /* DSCP-TID mapping table ID */ + uint8_t dscp_tid_map_id; + + /* Multicast enhancement enabled */ + uint8_t mcast_enhancement_en; + + /* per vdev rx nbuf queue */ + qdf_nbuf_queue_t rxq; + + uint8_t tx_ring_id; + struct dp_tx_desc_pool_s *tx_desc; + struct dp_tx_ext_desc_pool_s *tx_ext_desc; + + /* VDEV Stats */ + struct cdp_vdev_stats stats; + bool lro_enable; + + /* Is this a proxySTA VAP */ + bool proxysta_vdev; + /* Is isolation mode enabled */ + bool isolation_vdev; + + /* Address search flags to be configured in HAL descriptor */ + uint8_t hal_desc_addr_search_flags; +#ifdef QCA_LL_TX_FLOW_CONTROL_V2 + struct dp_tx_desc_pool_s *pool; +#endif + /* AP BRIDGE enabled */ + uint32_t ap_bridge_enabled; + + enum cdp_sec_type sec_type; + +#ifdef ATH_SUPPORT_NAC_RSSI + bool cdp_nac_rssi_enabled; + struct { + uint8_t bssid_mac[6]; + uint8_t client_mac[6]; + uint8_t chan_num; + uint8_t client_rssi_valid; + uint8_t client_rssi; + uint8_t vdev_id; + } cdp_nac_rssi; +#endif +}; + + +enum { + dp_sec_mcast = 0, + dp_sec_ucast +}; + +#ifdef WDS_VENDOR_EXTENSION +typedef struct { + uint8_t wds_tx_mcast_4addr:1, + wds_tx_ucast_4addr:1, + wds_rx_filter:1, /* enforce rx filter */ + wds_rx_ucast_4addr:1, /* when set, accept 4addr unicast frames */ + wds_rx_mcast_4addr:1; /* when set, accept 4addr multicast frames */ + +} dp_ecm_policy; +#endif + +/* Peer structure for data path state */ +struct dp_peer { + /* VDEV to which this peer is associated */ + struct dp_vdev *vdev; + + struct dp_ast_entry *self_ast_entry; + + qdf_atomic_t ref_cnt; + + /* TODO: See if multiple peer IDs are required in wifi3.0 */ + /* peer ID(s) for this peer */ + uint16_t peer_ids[MAX_NUM_PEER_ID_PER_PEER]; + + union dp_align_mac_addr mac_addr; + + /* node in the vdev's list of peers */ + TAILQ_ENTRY(dp_peer) peer_list_elem; + /* node in the hash table bin's list of peers */ + TAILQ_ENTRY(dp_peer) hash_list_elem; + + /* TID structures */ + struct dp_rx_tid rx_tid[DP_MAX_TIDS]; + + /* TBD: No transmit TID state required? */ + + struct { + enum htt_sec_type sec_type; + u_int32_t michael_key[2]; /* relevant for TKIP */ + } security[2]; /* 0 -> multicast, 1 -> unicast */ + + /* + * rx proc function: this either is a copy of pdev's rx_opt_proc for + * regular rx processing, or has been redirected to a /dev/null discard + * function when peer deletion is in progress. + */ + void (*rx_opt_proc)(struct dp_vdev *vdev, struct dp_peer *peer, + unsigned tid, qdf_nbuf_t msdu_list); + + /* set when node is authorized */ + uint8_t authorize:1; + + u_int8_t nac; + + /* Band steering: Set when node is inactive */ + uint8_t peer_bs_inact_flag:1; + u_int16_t peer_bs_inact; /* inactivity mark count */ + + /* NAWDS Flag and Bss Peer bit */ + uint8_t nawds_enabled:1, + bss_peer:1, + wapi:1, + wds_enabled:1; + + /* MCL specific peer local id */ + uint16_t local_id; + enum ol_txrx_peer_state state; + qdf_spinlock_t peer_info_lock; + + qdf_time_t last_assoc_rcvd; + qdf_time_t last_disassoc_rcvd; + qdf_time_t last_deauth_rcvd; + /* Peer Stats */ + struct cdp_peer_stats stats; + + TAILQ_HEAD(, dp_ast_entry) ast_entry_list; + /* TBD */ + +#ifdef WDS_VENDOR_EXTENSION + dp_ecm_policy wds_ecm; +#endif + bool delete_in_progress; +}; + +#ifdef CONFIG_WIN +/* + * dp_invalid_peer_msg + * @nbuf: data buffer + * @wh: 802.11 header + * @vdev_id: id of vdev + */ +struct dp_invalid_peer_msg { + qdf_nbuf_t nbuf; + struct ieee80211_frame *wh; + uint8_t vdev_id; +}; +#endif + +/* + * dp_tx_me_buf_t: ME buffer + * next: pointer to next buffer + * data: Destination Mac address + */ +struct dp_tx_me_buf_t { + /* Note: ME buf pool initialization logic expects next pointer to + * be the first element. Dont add anything before next */ + struct dp_tx_me_buf_t *next; + uint8_t data[DP_MAC_ADDR_LEN]; +}; + +#endif /* _DP_TYPES_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_wdi_event.c b/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_wdi_event.c new file mode 100644 index 0000000000000000000000000000000000000000..52b36e931fa1d448a8567917a7d23d0d615e48ff --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/dp_wdi_event.c @@ -0,0 +1,308 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + + +#include "dp_internal.h" +#include "qdf_mem.h" /* qdf_mem_malloc,free */ + +#ifdef WDI_EVENT_ENABLE +void *dp_get_pldev(struct cdp_pdev *txrx_pdev) +{ + struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev; + return pdev->pl_dev; +} +/* + * dp_wdi_event_next_sub() - Return handle for Next WDI event + * @wdi_sub: WDI Event handle + * + * Return handle for next WDI event in list + * + * Return: Next WDI event to be subscribe + */ +static inline wdi_event_subscribe * +dp_wdi_event_next_sub(wdi_event_subscribe *wdi_sub) +{ + if (!wdi_sub) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "Invalid subscriber in %s\n", __func__); + return NULL; + } + return wdi_sub->priv.next; +} + + +/* + * dp_wdi_event_del_subs() -Delete Event subscription + * @wdi_sub: WDI Event handle + * @event_index: Event index from list + * + * This API will delete subscribed event from list + * Return: None + */ +static inline void +dp_wdi_event_del_subs(wdi_event_subscribe *wdi_sub, int event_index) +{ + /* Subscribers should take care of deletion */ +} + + +/* + * dp_wdi_event_iter_sub() - Iterate through all WDI event in the list + * and pass WDI event to callback function + * @pdev: DP pdev handle + * @event_index: Event index in list + * @wdi_event: WDI event handle + * @data: pointer to data + * @peer_id: peer id number + * @status: HTT rx status + * + * + * Return: None + */ +static inline void +dp_wdi_event_iter_sub( + struct dp_pdev *pdev, + uint32_t event_index, + wdi_event_subscribe *wdi_sub, + void *data, + uint16_t peer_id, + int status) +{ + enum WDI_EVENT event = event_index + WDI_EVENT_BASE; + + if (wdi_sub) { + do { + wdi_sub->callback(wdi_sub->context, event, data, + peer_id, status); + } while ((wdi_sub = dp_wdi_event_next_sub(wdi_sub))); + } +} + + +/* + * dp_wdi_event_handler() - Event handler for WDI event + * @event: wdi event number + * @soc: soc handle + * @data: pointer to data + * @peer_id: peer id number + * @status: HTT rx status + * @pdev_id: id of pdev + * + * It will be called to register WDI event + * + * Return: None + */ +void +dp_wdi_event_handler( + enum WDI_EVENT event, + void *soc, + void *data, + uint16_t peer_id, + int status, uint8_t pdev_id) +{ + uint32_t event_index; + wdi_event_subscribe *wdi_sub; + struct dp_pdev *txrx_pdev; + struct dp_soc *soc_t = (struct dp_soc *)soc; + txrx_pdev = dp_get_pdev_for_mac_id(soc_t, pdev_id); + + if (!event) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "Invalid WDI event in %s\n", __func__); + return; + } + if (!txrx_pdev) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "Invalid pdev in WDI event handler\n"); + return; + } + + /* + * There can be NULL data, so no validation for the data + * Subscribers must do the sanity based on the requirements + */ + event_index = event - WDI_EVENT_BASE; + if (!(txrx_pdev->wdi_event_list[event_index]) && + (event == WDI_EVENT_RX_DESC)) { + /* WDI_EVEN_RX_DESC is indicated for RX_LITE also */ + event_index = WDI_EVENT_LITE_RX - WDI_EVENT_BASE; + } + wdi_sub = txrx_pdev->wdi_event_list[event_index]; + + /* Find the subscriber */ + dp_wdi_event_iter_sub(txrx_pdev, event_index, wdi_sub, data, + peer_id, status); +} + + +/* + * dp_wdi_event_sub() - Subscribe WDI event + * @txrx_pdev_handle: cdp_pdev handle + * @event_cb_sub_handle: subcribe evnet handle + * @event: Event to be subscribe + * + * Return: 0 for success. nonzero for failure. + */ +int +dp_wdi_event_sub( + struct cdp_pdev *txrx_pdev_handle, + void *event_cb_sub_handle, + uint32_t event) +{ + uint32_t event_index; + wdi_event_subscribe *wdi_sub; + struct dp_pdev *txrx_pdev = (struct dp_pdev *)txrx_pdev_handle; + wdi_event_subscribe *event_cb_sub = + (wdi_event_subscribe *) event_cb_sub_handle; + + if (!txrx_pdev) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "Invalid txrx_pdev in %s", __func__); + return -EINVAL; + } + if (!event_cb_sub) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "Invalid callback in %s", __func__); + return -EINVAL; + } + if ((!event) || (event >= WDI_EVENT_LAST) || (event < WDI_EVENT_BASE)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "Invalid event in %s", __func__); + return -EINVAL; + } + dp_set_pktlog_wifi3(txrx_pdev, event, true); + event_index = event - WDI_EVENT_BASE; + wdi_sub = txrx_pdev->wdi_event_list[event_index]; + + /* + * Check if it is the first subscriber of the event + */ + if (!wdi_sub) { + wdi_sub = event_cb_sub; + wdi_sub->priv.next = NULL; + wdi_sub->priv.prev = NULL; + txrx_pdev->wdi_event_list[event_index] = wdi_sub; + return 0; + } + event_cb_sub->priv.next = wdi_sub; + event_cb_sub->priv.prev = NULL; + wdi_sub->priv.prev = event_cb_sub; + txrx_pdev->wdi_event_list[event_index] = event_cb_sub; + return 0; + +} + +/* + * dp_wdi_event_unsub() - WDI event unsubscribe + * @txrx_pdev_handle: cdp_pdev handle + * @event_cb_sub_handle: subscribed event handle + * @event: Event to be unsubscribe + * + * + * Return: 0 for success. nonzero for failure. + */ +int +dp_wdi_event_unsub( + struct cdp_pdev *txrx_pdev_handle, + void *event_cb_sub_handle, + uint32_t event) +{ + uint32_t event_index = event - WDI_EVENT_BASE; + struct dp_pdev *txrx_pdev = (struct dp_pdev *)txrx_pdev_handle; + wdi_event_subscribe *event_cb_sub = + (wdi_event_subscribe *) event_cb_sub_handle; + + if (!event_cb_sub) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "Invalid callback in %s", __func__); + return -EINVAL; + } + + dp_set_pktlog_wifi3(txrx_pdev, event, false); + + if (!event_cb_sub->priv.prev) { + txrx_pdev->wdi_event_list[event_index] = event_cb_sub->priv.next; + } else { + event_cb_sub->priv.prev->priv.next = event_cb_sub->priv.next; + } + if (event_cb_sub->priv.next) { + event_cb_sub->priv.next->priv.prev = event_cb_sub->priv.prev; + } + + return 0; +} + + +/* + * dp_wdi_event_attach() - Attach wdi event + * @txrx_pdev: DP pdev handle + * + * Return: 0 for success. nonzero for failure. + */ +int +dp_wdi_event_attach(struct dp_pdev *txrx_pdev) +{ + if (!txrx_pdev) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "Invalid device in %s\nWDI event attach failed\n", + __func__); + return -EINVAL; + } + /* Separate subscriber list for each event */ + txrx_pdev->wdi_event_list = (wdi_event_subscribe **) + qdf_mem_malloc( + sizeof(wdi_event_subscribe *) * WDI_NUM_EVENTS); + if (!txrx_pdev->wdi_event_list) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "Insufficient memory for the WDI event lists\n"); + return -EINVAL; + } + return 0; +} + + +/* + * dp_wdi_event_detach() - Detach WDI event + * @txrx_pdev: DP pdev handle + * + * Return: 0 for success. nonzero for failure. + */ +int +dp_wdi_event_detach(struct dp_pdev *txrx_pdev) +{ + int i; + wdi_event_subscribe *wdi_sub; + if (!txrx_pdev) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "Invalid device in %s\nWDI attach failed", __func__); + return -EINVAL; + } + if (!txrx_pdev->wdi_event_list) { + return -EINVAL; + } + for (i = 0; i < WDI_NUM_EVENTS; i++) { + wdi_sub = txrx_pdev->wdi_event_list[i]; + /* Delete all the subscribers */ + dp_wdi_event_del_subs(wdi_sub, i); + } + if (txrx_pdev->wdi_event_list) { + qdf_mem_free(txrx_pdev->wdi_event_list); + } + return 0; +} +#endif /* CONFIG_WIN */ diff --git a/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/hal_rx.h b/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/hal_rx.h new file mode 100644 index 0000000000000000000000000000000000000000..bc4bebc24b20147e1fed418943fb457c66bcf297 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/dp/wifi3.0/hal_rx.h @@ -0,0 +1,3586 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _HAL_RX_H_ +#define _HAL_RX_H_ + +#include + +/** + * struct hal_wbm_err_desc_info: structure to hold wbm error codes and reasons + * + * @reo_psh_rsn: REO push reason + * @reo_err_code: REO Error code + * @rxdma_psh_rsn: RXDMA push reason + * @rxdma_err_code: RXDMA Error code + * @reserved_1: Reserved bits + * @wbm_err_src: WBM error source + * @pool_id: pool ID, indicates which rxdma pool + * @reserved_2: Reserved bits + */ +struct hal_wbm_err_desc_info { + uint16_t reo_psh_rsn:2, + reo_err_code:5, + rxdma_psh_rsn:2, + rxdma_err_code:5, + reserved_1:2; + uint8_t wbm_err_src:3, + pool_id:2, + reserved_2:3; +}; + +/** + * enum hal_reo_error_code: Enum which encapsulates "reo_push_reason" + * + * @ HAL_REO_ERROR_DETECTED: Packets arrived because of an error detected + * @ HAL_REO_ROUTING_INSTRUCTION: Packets arrived because of REO routing + */ +enum hal_reo_error_status { + HAL_REO_ERROR_DETECTED = 0, + HAL_REO_ROUTING_INSTRUCTION = 1, +}; + +/** + * @msdu_flags: [0] first_msdu_in_mpdu + * [1] last_msdu_in_mpdu + * [2] msdu_continuation - MSDU spread across buffers + * [23] sa_is_valid - SA match in peer table + * [24] sa_idx_timeout - Timeout while searching for SA match + * [25] da_is_valid - Used to identtify intra-bss forwarding + * [26] da_is_MCBC + * [27] da_idx_timeout - Timeout while searching for DA match + * + */ +struct hal_rx_msdu_desc_info { + uint32_t msdu_flags; + uint16_t msdu_len; /* 14 bits for length */ +}; + +/** + * enum hal_rx_msdu_desc_flags: Enum for flags in MSDU_DESC_INFO + * + * @ HAL_MSDU_F_FIRST_MSDU_IN_MPDU: First MSDU in MPDU + * @ HAL_MSDU_F_LAST_MSDU_IN_MPDU: Last MSDU in MPDU + * @ HAL_MSDU_F_MSDU_CONTINUATION: MSDU continuation + * @ HAL_MSDU_F_SA_IS_VALID: Found match for SA in AST + * @ HAL_MSDU_F_SA_IDX_TIMEOUT: AST search for SA timed out + * @ HAL_MSDU_F_DA_IS_VALID: Found match for DA in AST + * @ HAL_MSDU_F_DA_IS_MCBC: DA is MC/BC address + * @ HAL_MSDU_F_DA_IDX_TIMEOUT: AST search for DA timed out + */ +enum hal_rx_msdu_desc_flags { + HAL_MSDU_F_FIRST_MSDU_IN_MPDU = (0x1 << 0), + HAL_MSDU_F_LAST_MSDU_IN_MPDU = (0x1 << 1), + HAL_MSDU_F_MSDU_CONTINUATION = (0x1 << 2), + HAL_MSDU_F_SA_IS_VALID = (0x1 << 23), + HAL_MSDU_F_SA_IDX_TIMEOUT = (0x1 << 24), + HAL_MSDU_F_DA_IS_VALID = (0x1 << 25), + HAL_MSDU_F_DA_IS_MCBC = (0x1 << 26), + HAL_MSDU_F_DA_IDX_TIMEOUT = (0x1 << 27) +}; + +/* + * @msdu_count: no. of msdus in the MPDU + * @mpdu_seq: MPDU sequence number + * @mpdu_flags [0] Fragment flag + * [1] MPDU_retry_bit + * [2] AMPDU flag + * [3] raw_ampdu + * @peer_meta_data: Upper bits containing peer id, vdev id + */ +struct hal_rx_mpdu_desc_info { + uint16_t msdu_count; + uint16_t mpdu_seq; /* 12 bits for length */ + uint32_t mpdu_flags; + uint32_t peer_meta_data; /* sw progamed meta-data:MAC Id & peer Id */ +}; + +/** + * enum hal_rx_mpdu_desc_flags: Enum for flags in MPDU_DESC_INFO + * + * @ HAL_MPDU_F_FRAGMENT: Fragmented MPDU (802.11 fragemtation) + * @ HAL_MPDU_F_RETRY_BIT: Retry bit is set in FC of MPDU + * @ HAL_MPDU_F_AMPDU_FLAG: MPDU received as part of A-MPDU + * @ HAL_MPDU_F_RAW_AMPDU: MPDU is a Raw MDPU + */ +enum hal_rx_mpdu_desc_flags { + HAL_MPDU_F_FRAGMENT = (0x1 << 20), + HAL_MPDU_F_RETRY_BIT = (0x1 << 21), + HAL_MPDU_F_AMPDU_FLAG = (0x1 << 22), + HAL_MPDU_F_RAW_AMPDU = (0x1 << 30) +}; + +/** + * enum hal_rx_ret_buf_manager: Enum for return_buffer_manager field in + * BUFFER_ADDR_INFO structure + * + * @ HAL_RX_BUF_RBM_WBM_IDLE_BUF_LIST: Buffer returned to WBM idle buffer list + * @ HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST: Descriptor returned to WBM idle + * descriptor list + * @ HAL_RX_BUF_RBM_FW_BM: Buffer returned to FW + * @ HAL_RX_BUF_RBM_SW0_BM: For Tx completion -- returned to host + * @ HAL_RX_BUF_RBM_SW1_BM: For Tx completion -- returned to host + * @ HAL_RX_BUF_RBM_SW2_BM: For Tx completion -- returned to host + * @ HAL_RX_BUF_RBM_SW3_BM: For Rx release -- returned to host + */ +enum hal_rx_ret_buf_manager { + HAL_RX_BUF_RBM_WBM_IDLE_BUF_LIST = 0, + HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST = 1, + HAL_RX_BUF_RBM_FW_BM = 2, + HAL_RX_BUF_RBM_SW0_BM = 3, + HAL_RX_BUF_RBM_SW1_BM = 4, + HAL_RX_BUF_RBM_SW2_BM = 5, + HAL_RX_BUF_RBM_SW3_BM = 6, +}; + +/* + * Given the offset of a field in bytes, returns uint8_t * + */ +#define _OFFSET_TO_BYTE_PTR(_ptr, _off_in_bytes) \ + (((uint8_t *)(_ptr)) + (_off_in_bytes)) + +/* + * Given the offset of a field in bytes, returns uint32_t * + */ +#define _OFFSET_TO_WORD_PTR(_ptr, _off_in_bytes) \ + (((uint32_t *)(_ptr)) + ((_off_in_bytes) >> 2)) + +#define _HAL_MS(_word, _mask, _shift) \ + (((_word) & (_mask)) >> (_shift)) + +/* + * macro to set the LSW of the nbuf data physical address + * to the rxdma ring entry + */ +#define HAL_RXDMA_PADDR_LO_SET(buff_addr_info, paddr_lo) \ + ((*(((unsigned int *) buff_addr_info) + \ + (BUFFER_ADDR_INFO_0_BUFFER_ADDR_31_0_OFFSET >> 2))) = \ + (paddr_lo << BUFFER_ADDR_INFO_0_BUFFER_ADDR_31_0_LSB) & \ + BUFFER_ADDR_INFO_0_BUFFER_ADDR_31_0_MASK) + +/* + * macro to set the LSB of MSW of the nbuf data physical address + * to the rxdma ring entry + */ +#define HAL_RXDMA_PADDR_HI_SET(buff_addr_info, paddr_hi) \ + ((*(((unsigned int *) buff_addr_info) + \ + (BUFFER_ADDR_INFO_1_BUFFER_ADDR_39_32_OFFSET >> 2))) = \ + (paddr_hi << BUFFER_ADDR_INFO_1_BUFFER_ADDR_39_32_LSB) & \ + BUFFER_ADDR_INFO_1_BUFFER_ADDR_39_32_MASK) + +/* + * macro to set the cookie into the rxdma ring entry + */ +#define HAL_RXDMA_COOKIE_SET(buff_addr_info, cookie) \ + ((*(((unsigned int *) buff_addr_info) + \ + (BUFFER_ADDR_INFO_1_SW_BUFFER_COOKIE_OFFSET >> 2))) &= \ + ~BUFFER_ADDR_INFO_1_SW_BUFFER_COOKIE_MASK); \ + ((*(((unsigned int *) buff_addr_info) + \ + (BUFFER_ADDR_INFO_1_SW_BUFFER_COOKIE_OFFSET >> 2))) |= \ + (cookie << BUFFER_ADDR_INFO_1_SW_BUFFER_COOKIE_LSB) & \ + BUFFER_ADDR_INFO_1_SW_BUFFER_COOKIE_MASK) + +/* + * macro to set the LSW of the nbuf data physical address + * to the WBM ring entry + */ +#define HAL_WBM_PADDR_LO_SET(buff_addr_info, paddr_lo) \ + ((*(((unsigned int *) buff_addr_info) + \ + (BUFFER_ADDR_INFO_0_BUFFER_ADDR_31_0_OFFSET >> 2))) = \ + (paddr_lo << BUFFER_ADDR_INFO_0_BUFFER_ADDR_31_0_LSB) & \ + BUFFER_ADDR_INFO_0_BUFFER_ADDR_31_0_MASK) + +/* + * macro to set the LSB of MSW of the nbuf data physical address + * to the WBM ring entry + */ +#define HAL_WBM_PADDR_HI_SET(buff_addr_info, paddr_hi) \ + ((*(((unsigned int *) buff_addr_info) + \ + (BUFFER_ADDR_INFO_1_BUFFER_ADDR_39_32_OFFSET >> 2))) = \ + (paddr_hi << BUFFER_ADDR_INFO_1_BUFFER_ADDR_39_32_LSB) & \ + BUFFER_ADDR_INFO_1_BUFFER_ADDR_39_32_MASK) + +/* + * macro to set the manager into the rxdma ring entry + */ +#define HAL_RXDMA_MANAGER_SET(buff_addr_info, manager) \ + ((*(((unsigned int *) buff_addr_info) + \ + (BUFFER_ADDR_INFO_1_RETURN_BUFFER_MANAGER_OFFSET >> 2))) &= \ + ~BUFFER_ADDR_INFO_1_RETURN_BUFFER_MANAGER_MASK); \ + ((*(((unsigned int *) buff_addr_info) + \ + (BUFFER_ADDR_INFO_1_RETURN_BUFFER_MANAGER_OFFSET >> 2))) |= \ + (manager << BUFFER_ADDR_INFO_1_RETURN_BUFFER_MANAGER_LSB) & \ + BUFFER_ADDR_INFO_1_RETURN_BUFFER_MANAGER_MASK) + +#define HAL_RX_ERROR_STATUS_GET(reo_desc) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(reo_desc, \ + REO_DESTINATION_RING_7_REO_PUSH_REASON_OFFSET)),\ + REO_DESTINATION_RING_7_REO_PUSH_REASON_MASK, \ + REO_DESTINATION_RING_7_REO_PUSH_REASON_LSB)) + +#define HAL_RX_BUF_COOKIE_GET(buff_addr_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(buff_addr_info, \ + BUFFER_ADDR_INFO_1_SW_BUFFER_COOKIE_OFFSET)), \ + BUFFER_ADDR_INFO_1_SW_BUFFER_COOKIE_MASK, \ + BUFFER_ADDR_INFO_1_SW_BUFFER_COOKIE_LSB)) + +#define HAL_RX_BUFFER_ADDR_39_32_GET(buff_addr_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(buff_addr_info, \ + BUFFER_ADDR_INFO_1_BUFFER_ADDR_39_32_OFFSET)), \ + BUFFER_ADDR_INFO_1_BUFFER_ADDR_39_32_MASK, \ + BUFFER_ADDR_INFO_1_BUFFER_ADDR_39_32_LSB)) + +#define HAL_RX_BUFFER_ADDR_31_0_GET(buff_addr_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(buff_addr_info, \ + BUFFER_ADDR_INFO_0_BUFFER_ADDR_31_0_OFFSET)), \ + BUFFER_ADDR_INFO_0_BUFFER_ADDR_31_0_MASK, \ + BUFFER_ADDR_INFO_0_BUFFER_ADDR_31_0_LSB)) + +#define HAL_RX_BUF_RBM_GET(buff_addr_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(buff_addr_info, \ + BUFFER_ADDR_INFO_1_RETURN_BUFFER_MANAGER_OFFSET)),\ + BUFFER_ADDR_INFO_1_RETURN_BUFFER_MANAGER_MASK, \ + BUFFER_ADDR_INFO_1_RETURN_BUFFER_MANAGER_LSB)) + +/* TODO: Convert the following structure fields accesseses to offsets */ + +#define HAL_RX_REO_BUFFER_ADDR_39_32_GET(reo_desc) \ + (HAL_RX_BUFFER_ADDR_39_32_GET(& \ + (((struct reo_destination_ring *) \ + reo_desc)->buf_or_link_desc_addr_info))) + +#define HAL_RX_REO_BUFFER_ADDR_31_0_GET(reo_desc) \ + (HAL_RX_BUFFER_ADDR_31_0_GET(& \ + (((struct reo_destination_ring *) \ + reo_desc)->buf_or_link_desc_addr_info))) + +#define HAL_RX_REO_BUF_COOKIE_GET(reo_desc) \ + (HAL_RX_BUF_COOKIE_GET(& \ + (((struct reo_destination_ring *) \ + reo_desc)->buf_or_link_desc_addr_info))) + +#define HAL_RX_MPDU_SEQUENCE_NUMBER_GET(mpdu_info_ptr) \ + ((mpdu_info_ptr \ + [RX_MPDU_DESC_INFO_0_MPDU_SEQUENCE_NUMBER_OFFSET >> 2] & \ + RX_MPDU_DESC_INFO_0_MPDU_SEQUENCE_NUMBER_MASK) >> \ + RX_MPDU_DESC_INFO_0_MPDU_SEQUENCE_NUMBER_LSB) + +#define HAL_RX_MPDU_DESC_PEER_META_DATA_GET(mpdu_info_ptr) \ + ((mpdu_info_ptr \ + [RX_MPDU_DESC_INFO_1_PEER_META_DATA_OFFSET >> 2] & \ + RX_MPDU_DESC_INFO_1_PEER_META_DATA_MASK) >> \ + RX_MPDU_DESC_INFO_1_PEER_META_DATA_LSB) + +#define HAL_RX_MPDU_MSDU_COUNT_GET(mpdu_info_ptr) \ + ((mpdu_info_ptr[RX_MPDU_DESC_INFO_0_MSDU_COUNT_OFFSET >> 2] & \ + RX_MPDU_DESC_INFO_0_MSDU_COUNT_MASK) >> \ + RX_MPDU_DESC_INFO_0_MSDU_COUNT_LSB) + +#define HAL_RX_MPDU_FRAGMENT_FLAG_GET(mpdu_info_ptr) \ + (mpdu_info_ptr[RX_MPDU_DESC_INFO_0_FRAGMENT_FLAG_OFFSET >> 2] & \ + RX_MPDU_DESC_INFO_0_FRAGMENT_FLAG_MASK) + +#define HAL_RX_MPDU_RETRY_BIT_GET(mpdu_info_ptr) \ + (mpdu_info_ptr[RX_MPDU_DESC_INFO_0_MPDU_RETRY_BIT_OFFSET >> 2] & \ + RX_MPDU_DESC_INFO_0_MPDU_RETRY_BIT_MASK) + +#define HAL_RX_MPDU_AMPDU_FLAG_GET(mpdu_info_ptr) \ + (mpdu_info_ptr[RX_MPDU_DESC_INFO_0_AMPDU_FLAG_OFFSET >> 2] & \ + RX_MPDU_DESC_INFO_0_AMPDU_FLAG_MASK) + +#define HAL_RX_MPDU_RAW_MPDU_GET(mpdu_info_ptr) \ + (mpdu_info_ptr[RX_MPDU_DESC_INFO_0_RAW_MPDU_OFFSET >> 2] & \ + RX_MPDU_DESC_INFO_0_RAW_MPDU_MASK) + +#define HAL_RX_MPDU_FLAGS_GET(mpdu_info_ptr) \ + (HAL_RX_MPDU_FRAGMENT_FLAG_GET(mpdu_info_ptr) | \ + HAL_RX_MPDU_RETRY_BIT_GET(mpdu_info_ptr) | \ + HAL_RX_MPDU_AMPDU_FLAG_GET(mpdu_info_ptr) | \ + HAL_RX_MPDU_RAW_MPDU_GET(mpdu_info_ptr)) + + +#define HAL_RX_MSDU_PKT_LENGTH_GET(msdu_info_ptr) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(msdu_info_ptr, \ + RX_MSDU_DESC_INFO_0_MSDU_LENGTH_OFFSET)), \ + RX_MSDU_DESC_INFO_0_MSDU_LENGTH_MASK, \ + RX_MSDU_DESC_INFO_0_MSDU_LENGTH_LSB)) + +/* + * NOTE: None of the following _GET macros need a right + * shift by the corresponding _LSB. This is because, they are + * finally taken and "OR'ed" into a single word again. + */ +#define HAL_RX_FIRST_MSDU_IN_MPDU_FLAG_SET(msdu_info_ptr, val) \ + ((*(((uint32_t *)msdu_info_ptr) + \ + (RX_MSDU_DESC_INFO_0_FIRST_MSDU_IN_MPDU_FLAG_OFFSET >> 2))) |= \ + (val << RX_MSDU_DESC_INFO_0_FIRST_MSDU_IN_MPDU_FLAG_LSB) & \ + RX_MSDU_DESC_INFO_0_FIRST_MSDU_IN_MPDU_FLAG_MASK) + +#define HAL_RX_LAST_MSDU_IN_MPDU_FLAG_SET(msdu_info_ptr, val) \ + ((*(((uint32_t *)msdu_info_ptr) + \ + (RX_MSDU_DESC_INFO_0_LAST_MSDU_IN_MPDU_FLAG_OFFSET >> 2))) |= \ + (val << RX_MSDU_DESC_INFO_0_LAST_MSDU_IN_MPDU_FLAG_LSB) & \ + RX_MSDU_DESC_INFO_0_LAST_MSDU_IN_MPDU_FLAG_MASK) + +#define HAL_RX_MSDU_CONTINUATION_FLAG_SET(msdu_info_ptr, val) \ + ((*(((uint32_t *)msdu_info_ptr) + \ + (RX_MSDU_DESC_INFO_0_MSDU_CONTINUATION_OFFSET >> 2))) |= \ + (val << RX_MSDU_DESC_INFO_0_MSDU_CONTINUATION_LSB) & \ + RX_MSDU_DESC_INFO_0_MSDU_CONTINUATION_MASK) + + +#define HAL_RX_FIRST_MSDU_IN_MPDU_FLAG_GET(msdu_info_ptr) \ + ((*_OFFSET_TO_WORD_PTR(msdu_info_ptr, \ + RX_MSDU_DESC_INFO_0_FIRST_MSDU_IN_MPDU_FLAG_OFFSET)) & \ + RX_MSDU_DESC_INFO_0_FIRST_MSDU_IN_MPDU_FLAG_MASK) + +#define HAL_RX_LAST_MSDU_IN_MPDU_FLAG_GET(msdu_info_ptr) \ + ((*_OFFSET_TO_WORD_PTR(msdu_info_ptr, \ + RX_MSDU_DESC_INFO_0_LAST_MSDU_IN_MPDU_FLAG_OFFSET)) & \ + RX_MSDU_DESC_INFO_0_LAST_MSDU_IN_MPDU_FLAG_MASK) + +#define HAL_RX_MSDU_CONTINUATION_FLAG_GET(msdu_info_ptr) \ + ((*_OFFSET_TO_WORD_PTR(msdu_info_ptr, \ + RX_MSDU_DESC_INFO_0_MSDU_CONTINUATION_OFFSET)) & \ + RX_MSDU_DESC_INFO_0_MSDU_CONTINUATION_MASK) + +#define HAL_RX_MSDU_REO_DST_IND_GET(msdu_info_ptr) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(msdu_info_ptr, \ + RX_MSDU_DESC_INFO_0_REO_DESTINATION_INDICATION_OFFSET)), \ + RX_MSDU_DESC_INFO_0_REO_DESTINATION_INDICATION_MASK, \ + RX_MSDU_DESC_INFO_0_REO_DESTINATION_INDICATION_LSB)) + +#define HAL_RX_MSDU_SA_IS_VALID_FLAG_GET(msdu_info_ptr) \ + ((*_OFFSET_TO_WORD_PTR(msdu_info_ptr, \ + RX_MSDU_DESC_INFO_0_SA_IS_VALID_OFFSET)) & \ + RX_MSDU_DESC_INFO_0_SA_IS_VALID_MASK) + +#define HAL_RX_MSDU_SA_IDX_TIMEOUT_FLAG_GET(msdu_info_ptr) \ + ((*_OFFSET_TO_WORD_PTR(msdu_info_ptr, \ + RX_MSDU_DESC_INFO_0_SA_IDX_TIMEOUT_OFFSET)) & \ + RX_MSDU_DESC_INFO_0_SA_IDX_TIMEOUT_MASK) + +#define HAL_RX_MSDU_DA_IS_VALID_FLAG_GET(msdu_info_ptr) \ + ((*_OFFSET_TO_WORD_PTR(msdu_info_ptr, \ + RX_MSDU_DESC_INFO_0_DA_IS_VALID_OFFSET)) & \ + RX_MSDU_DESC_INFO_0_DA_IS_VALID_MASK) + +#define HAL_RX_MSDU_DA_IS_MCBC_FLAG_GET(msdu_info_ptr) \ + ((*_OFFSET_TO_WORD_PTR(msdu_info_ptr, \ + RX_MSDU_DESC_INFO_0_DA_IS_MCBC_OFFSET)) & \ + RX_MSDU_DESC_INFO_0_DA_IS_MCBC_MASK) + +#define HAL_RX_MSDU_DA_IDX_TIMEOUT_FLAG_GET(msdu_info_ptr) \ + ((*_OFFSET_TO_WORD_PTR(msdu_info_ptr, \ + RX_MSDU_DESC_INFO_0_DA_IDX_TIMEOUT_OFFSET)) & \ + RX_MSDU_DESC_INFO_0_DA_IDX_TIMEOUT_MASK) + + +#define HAL_RX_MSDU_FLAGS_GET(msdu_info_ptr) \ + (HAL_RX_FIRST_MSDU_IN_MPDU_FLAG_GET(msdu_info_ptr) | \ + HAL_RX_LAST_MSDU_IN_MPDU_FLAG_GET(msdu_info_ptr) | \ + HAL_RX_MSDU_CONTINUATION_FLAG_GET(msdu_info_ptr) | \ + HAL_RX_MSDU_SA_IS_VALID_FLAG_GET(msdu_info_ptr) | \ + HAL_RX_MSDU_SA_IDX_TIMEOUT_FLAG_GET(msdu_info_ptr) | \ + HAL_RX_MSDU_DA_IS_VALID_FLAG_GET(msdu_info_ptr) | \ + HAL_RX_MSDU_DA_IS_MCBC_FLAG_GET(msdu_info_ptr) | \ + HAL_RX_MSDU_DA_IDX_TIMEOUT_FLAG_GET(msdu_info_ptr)) + +#define HAL_RX_MSDU_DESC_INFO_GET(msdu_details_ptr) \ + ((struct rx_msdu_desc_info *) \ + _OFFSET_TO_BYTE_PTR(msdu_details_ptr, \ +RX_MSDU_DETAILS_2_RX_MSDU_DESC_INFO_RX_MSDU_DESC_INFO_DETAILS_OFFSET)) + + +#define HAL_RX_MPDU_PN_31_0_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_4_PN_31_0_OFFSET)), \ + RX_MPDU_INFO_4_PN_31_0_MASK, \ + RX_MPDU_INFO_4_PN_31_0_LSB)) + +#define HAL_RX_MPDU_PN_63_32_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_5_PN_63_32_OFFSET)), \ + RX_MPDU_INFO_5_PN_63_32_MASK, \ + RX_MPDU_INFO_5_PN_63_32_LSB)) + +#define HAL_RX_MPDU_PN_95_64_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_6_PN_95_64_OFFSET)), \ + RX_MPDU_INFO_6_PN_95_64_MASK, \ + RX_MPDU_INFO_6_PN_95_64_LSB)) + +#define HAL_RX_MPDU_PN_127_96_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_7_PN_127_96_OFFSET)), \ + RX_MPDU_INFO_7_PN_127_96_MASK, \ + RX_MPDU_INFO_7_PN_127_96_LSB)) + +#define HAL_RX_MPDU_ENCRYPT_TYPE_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_3_ENCRYPT_TYPE_OFFSET)), \ + RX_MPDU_INFO_3_ENCRYPT_TYPE_MASK, \ + RX_MPDU_INFO_3_ENCRYPT_TYPE_LSB)) + +#define HAL_RX_MPDU_ENCRYPTION_INFO_VALID(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_2_FRAME_ENCRYPTION_INFO_VALID_OFFSET)), \ + RX_MPDU_INFO_2_FRAME_ENCRYPTION_INFO_VALID_MASK, \ + RX_MPDU_INFO_2_FRAME_ENCRYPTION_INFO_VALID_LSB)) + +#define HAL_RX_FLD_SET(_ptr, _wrd, _field, _val) \ + (*(uint32_t *)(((uint8_t *)_ptr) + \ + _wrd ## _ ## _field ## _OFFSET) |= \ + ((_val << _wrd ## _ ## _field ## _LSB) & \ + _wrd ## _ ## _field ## _MASK)) + +#define HAL_RX_UNIFORM_HDR_SET(_rx_msdu_link, _field, _val) \ + HAL_RX_FLD_SET(_rx_msdu_link, UNIFORM_DESCRIPTOR_HEADER_0, \ + _field, _val) + +#define HAL_RX_MSDU_DESC_INFO_SET(_msdu_info_ptr, _field, _val) \ + HAL_RX_FLD_SET(_msdu_info_ptr, RX_MSDU_DESC_INFO_0, \ + _field, _val) + +#define HAL_RX_MPDU_DESC_INFO_SET(_mpdu_info_ptr, _field, _val) \ + HAL_RX_FLD_SET(_mpdu_info_ptr, RX_MPDU_DESC_INFO_0, \ + _field, _val) + +static inline void hal_rx_mpdu_desc_info_get(void *desc_addr, + struct hal_rx_mpdu_desc_info *mpdu_desc_info) +{ + struct reo_destination_ring *reo_dst_ring; + uint32_t mpdu_info[NUM_OF_DWORDS_RX_MPDU_DESC_INFO]; + + reo_dst_ring = (struct reo_destination_ring *) desc_addr; + + qdf_mem_copy(&mpdu_info, + (const void *)&reo_dst_ring->rx_mpdu_desc_info_details, + sizeof(struct rx_mpdu_desc_info)); + + mpdu_desc_info->msdu_count = HAL_RX_MPDU_MSDU_COUNT_GET(mpdu_info); + mpdu_desc_info->mpdu_seq = HAL_RX_MPDU_SEQUENCE_NUMBER_GET(mpdu_info); + mpdu_desc_info->mpdu_flags = HAL_RX_MPDU_FLAGS_GET(mpdu_info); + mpdu_desc_info->peer_meta_data = + HAL_RX_MPDU_DESC_PEER_META_DATA_GET(mpdu_info); +} + +/* + * @ hal_rx_msdu_desc_info_get: Gets the flags related to MSDU desciptor. + * @ Specifically flags needed are: + * @ first_msdu_in_mpdu, last_msdu_in_mpdu, + * @ msdu_continuation, sa_is_valid, + * @ sa_idx_timeout, da_is_valid, da_idx_timeout, + * @ da_is_MCBC + * + * @ hal_rx_desc_cookie: Opaque cookie pointer used by HAL to get to the current + * @ descriptor + * @ msdu_desc_info: Holds MSDU descriptor info from HAL Rx descriptor + * @ Return: void + */ +static inline void hal_rx_msdu_desc_info_get(void *desc_addr, + struct hal_rx_msdu_desc_info *msdu_desc_info) +{ + struct reo_destination_ring *reo_dst_ring; + uint32_t msdu_info[NUM_OF_DWORDS_RX_MSDU_DESC_INFO]; + + reo_dst_ring = (struct reo_destination_ring *) desc_addr; + + qdf_mem_copy(&msdu_info, + (const void *)&reo_dst_ring->rx_msdu_desc_info_details, + sizeof(struct rx_msdu_desc_info)); + + msdu_desc_info->msdu_flags = HAL_RX_MSDU_FLAGS_GET(msdu_info); + msdu_desc_info->msdu_len = HAL_RX_MSDU_PKT_LENGTH_GET(msdu_info); +} + +/* + * hal_rxdma_buff_addr_info_set() - set the buffer_addr_info of the + * rxdma ring entry. + * @rxdma_entry: descriptor entry + * @paddr: physical address of nbuf data pointer. + * @cookie: SW cookie used as a index to SW rx desc. + * @manager: who owns the nbuf (host, NSS, etc...). + * + */ +static inline void hal_rxdma_buff_addr_info_set(void *rxdma_entry, + qdf_dma_addr_t paddr, uint32_t cookie, uint8_t manager) +{ + uint32_t paddr_lo = ((u64)paddr & 0x00000000ffffffff); + uint32_t paddr_hi = ((u64)paddr & 0xffffffff00000000) >> 32; + + HAL_RXDMA_PADDR_LO_SET(rxdma_entry, paddr_lo); + HAL_RXDMA_PADDR_HI_SET(rxdma_entry, paddr_hi); + HAL_RXDMA_COOKIE_SET(rxdma_entry, cookie); + HAL_RXDMA_MANAGER_SET(rxdma_entry, manager); +} + +/* + * Structures & Macros to obtain fields from the TLV's in the Rx packet + * pre-header. + */ + +/* + * Every Rx packet starts at an offset from the top of the buffer. + * If the host hasn't subscribed to any specific TLV, there is + * still space reserved for the following TLV's from the start of + * the buffer: + * -- RX ATTENTION + * -- RX MPDU START + * -- RX MSDU START + * -- RX MSDU END + * -- RX MPDU END + * -- RX PACKET HEADER (802.11) + * If the host subscribes to any of the TLV's above, that TLV + * if populated by the HW + */ + +#define NUM_DWORDS_TAG 1 + +/* By default the packet header TLV is 128 bytes */ +#define NUM_OF_BYTES_RX_802_11_HDR_TLV 128 +#define NUM_OF_DWORDS_RX_802_11_HDR_TLV \ + (NUM_OF_BYTES_RX_802_11_HDR_TLV >> 2) + +#define RX_PKT_OFFSET_WORDS \ + ( \ + NUM_OF_DWORDS_RX_ATTENTION + NUM_DWORDS_TAG \ + NUM_OF_DWORDS_RX_MPDU_START + NUM_DWORDS_TAG \ + NUM_OF_DWORDS_RX_MSDU_START + NUM_DWORDS_TAG \ + NUM_OF_DWORDS_RX_MSDU_END + NUM_DWORDS_TAG \ + NUM_OF_DWORDS_RX_MPDU_END + NUM_DWORDS_TAG \ + NUM_OF_DWORDS_RX_802_11_HDR_TLV + NUM_DWORDS_TAG \ + ) + +#define RX_PKT_OFFSET_BYTES \ + (RX_PKT_OFFSET_WORDS << 2) + +#define RX_PKT_HDR_TLV_LEN 120 + +/* + * Each RX descriptor TLV is preceded by 1 DWORD "tag" + */ +struct rx_attention_tlv { + uint32_t tag; + struct rx_attention rx_attn; +}; + +struct rx_mpdu_start_tlv { + uint32_t tag; + struct rx_mpdu_start rx_mpdu_start; +}; + +struct rx_msdu_start_tlv { + uint32_t tag; + struct rx_msdu_start rx_msdu_start; +}; + +struct rx_msdu_end_tlv { + uint32_t tag; + struct rx_msdu_end rx_msdu_end; +}; + +struct rx_mpdu_end_tlv { + uint32_t tag; + struct rx_mpdu_end rx_mpdu_end; +}; + +struct rx_pkt_hdr_tlv { + uint32_t tag; /* 4 B */ + uint32_t phy_ppdu_id; /* 4 B */ + char rx_pkt_hdr[RX_PKT_HDR_TLV_LEN]; /* 120 B */ +}; + + +#define RXDMA_OPTIMIZATION + +#ifdef RXDMA_OPTIMIZATION +/* + * The RX_PADDING_BYTES is required so that the TLV's don't + * spread across the 128 byte boundary + * RXDMA optimization requires: + * 1) MSDU_END & ATTENTION TLV's follow in that order + * 2) TLV's don't span across 128 byte lines + * 3) Rx Buffer is nicely aligned on the 128 byte boundary + */ +#if defined(WCSS_VERSION) && \ + ((defined(CONFIG_WIN) && (WCSS_VERSION >= 96)) || \ + (defined(CONFIG_MCL) && (WCSS_VERSION >= 72))) +#define RX_PADDING0_BYTES 4 +#endif +#define RX_PADDING1_BYTES 16 +struct rx_pkt_tlvs { + struct rx_msdu_end_tlv msdu_end_tlv; /* 72 bytes */ + struct rx_attention_tlv attn_tlv; /* 16 bytes */ + struct rx_msdu_start_tlv msdu_start_tlv;/* 40 bytes */ +#if defined(WCSS_VERSION) && \ + ((defined(CONFIG_WIN) && (WCSS_VERSION >= 96)) || \ + (defined(CONFIG_MCL) && (WCSS_VERSION >= 72))) + uint8_t rx_padding0[RX_PADDING0_BYTES]; /* 4 bytes */ +#endif + struct rx_mpdu_start_tlv mpdu_start_tlv;/* 96 bytes */ + struct rx_mpdu_end_tlv mpdu_end_tlv; /* 12 bytes */ + uint8_t rx_padding1[RX_PADDING1_BYTES]; /* 16 bytes */ + struct rx_pkt_hdr_tlv pkt_hdr_tlv; /* 128 bytes */ +}; +#else /* RXDMA_OPTIMIZATION */ +struct rx_pkt_tlvs { + struct rx_attention_tlv attn_tlv; + struct rx_mpdu_start_tlv mpdu_start_tlv; + struct rx_msdu_start_tlv msdu_start_tlv; + struct rx_msdu_end_tlv msdu_end_tlv; + struct rx_mpdu_end_tlv mpdu_end_tlv; + struct rx_pkt_hdr_tlv pkt_hdr_tlv; +}; +#endif /* RXDMA_OPTIMIZATION */ + +#define RX_PKT_TLVS_LEN (sizeof(struct rx_pkt_tlvs)) + +static inline uint8_t +*hal_rx_pkt_hdr_get(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + + return pkt_tlvs->pkt_hdr_tlv.rx_pkt_hdr; + +} + +static inline uint8_t +*hal_rx_padding0_get(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + return pkt_tlvs->rx_padding0; +} + +/* + * @ hal_rx_encryption_info_valid: Returns encryption type. + * + * @ buf: rx_tlv_hdr of the received packet + * @ Return: encryption type + */ +static inline uint32_t +hal_rx_encryption_info_valid(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_mpdu_start *mpdu_start = + &pkt_tlvs->mpdu_start_tlv.rx_mpdu_start; + struct rx_mpdu_info *mpdu_info = &(mpdu_start->rx_mpdu_info_details); + uint32_t encryption_info = HAL_RX_MPDU_ENCRYPTION_INFO_VALID(mpdu_info); + + return encryption_info; +} + +/* + * @ hal_rx_print_pn: Prints the PN of rx packet. + * + * @ buf: rx_tlv_hdr of the received packet + * @ Return: void + */ +static inline void +hal_rx_print_pn(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_mpdu_start *mpdu_start = + &pkt_tlvs->mpdu_start_tlv.rx_mpdu_start; + struct rx_mpdu_info *mpdu_info = &(mpdu_start->rx_mpdu_info_details); + + uint32_t pn_31_0 = HAL_RX_MPDU_PN_31_0_GET(mpdu_info); + uint32_t pn_63_32 = HAL_RX_MPDU_PN_63_32_GET(mpdu_info); + uint32_t pn_95_64 = HAL_RX_MPDU_PN_95_64_GET(mpdu_info); + uint32_t pn_127_96 = HAL_RX_MPDU_PN_127_96_GET(mpdu_info); + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "PN number pn_127_96 0x%x pn_95_64 0x%x pn_63_32 0x%x pn_31_0 0x%x \n", + pn_127_96, pn_95_64, pn_63_32, pn_31_0); +} + +/* + * Get msdu_done bit from the RX_ATTENTION TLV + */ +#define HAL_RX_ATTN_MSDU_DONE_GET(_rx_attn) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_attn, \ + RX_ATTENTION_2_MSDU_DONE_OFFSET)), \ + RX_ATTENTION_2_MSDU_DONE_MASK, \ + RX_ATTENTION_2_MSDU_DONE_LSB)) + +static inline uint32_t +hal_rx_attn_msdu_done_get(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_attention *rx_attn = &pkt_tlvs->attn_tlv.rx_attn; + uint32_t msdu_done; + + msdu_done = HAL_RX_ATTN_MSDU_DONE_GET(rx_attn); + + return msdu_done; +} + +#define HAL_RX_ATTN_FIRST_MPDU_GET(_rx_attn) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_attn, \ + RX_ATTENTION_1_FIRST_MPDU_OFFSET)), \ + RX_ATTENTION_1_FIRST_MPDU_MASK, \ + RX_ATTENTION_1_FIRST_MPDU_LSB)) + +/* + * hal_rx_attn_first_mpdu_get(): get fist_mpdu bit from rx attention + * @buf: pointer to rx_pkt_tlvs + * + * reutm: uint32_t(first_msdu) + */ +static inline uint32_t +hal_rx_attn_first_mpdu_get(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_attention *rx_attn = &pkt_tlvs->attn_tlv.rx_attn; + uint32_t first_mpdu; + + first_mpdu = HAL_RX_ATTN_FIRST_MPDU_GET(rx_attn); + + return first_mpdu; +} + +#define HAL_RX_ATTN_TCP_UDP_CKSUM_FAIL_GET(_rx_attn) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_attn, \ + RX_ATTENTION_1_TCP_UDP_CHKSUM_FAIL_OFFSET)), \ + RX_ATTENTION_1_TCP_UDP_CHKSUM_FAIL_MASK, \ + RX_ATTENTION_1_TCP_UDP_CHKSUM_FAIL_LSB)) + +/* + * hal_rx_attn_tcp_udp_cksum_fail_get(): get tcp_udp cksum fail bit + * from rx attention + * @buf: pointer to rx_pkt_tlvs + * + * Return: tcp_udp_cksum_fail + */ +static inline bool +hal_rx_attn_tcp_udp_cksum_fail_get(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_attention *rx_attn = &pkt_tlvs->attn_tlv.rx_attn; + bool tcp_udp_cksum_fail; + + tcp_udp_cksum_fail = HAL_RX_ATTN_TCP_UDP_CKSUM_FAIL_GET(rx_attn); + + return tcp_udp_cksum_fail; +} + +#define HAL_RX_ATTN_IP_CKSUM_FAIL_GET(_rx_attn) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_attn, \ + RX_ATTENTION_1_IP_CHKSUM_FAIL_OFFSET)), \ + RX_ATTENTION_1_IP_CHKSUM_FAIL_MASK, \ + RX_ATTENTION_1_IP_CHKSUM_FAIL_LSB)) + +/* + * hal_rx_attn_ip_cksum_fail_get(): get ip cksum fail bit + * from rx attention + * @buf: pointer to rx_pkt_tlvs + * + * Return: ip_cksum_fail + */ +static inline bool +hal_rx_attn_ip_cksum_fail_get(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_attention *rx_attn = &pkt_tlvs->attn_tlv.rx_attn; + bool ip_cksum_fail; + + ip_cksum_fail = HAL_RX_ATTN_IP_CKSUM_FAIL_GET(rx_attn); + + return ip_cksum_fail; +} + +#define HAL_RX_ATTN_PHY_PPDU_ID_GET(_rx_attn) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_attn, \ + RX_ATTENTION_0_PHY_PPDU_ID_OFFSET)), \ + RX_ATTENTION_0_PHY_PPDU_ID_MASK, \ + RX_ATTENTION_0_PHY_PPDU_ID_LSB)) + +/* + * hal_rx_attn_phy_ppdu_id_get(): get phy_ppdu_id value + * from rx attention + * @buf: pointer to rx_pkt_tlvs + * + * Return: phy_ppdu_id + */ +static inline uint16_t +hal_rx_attn_phy_ppdu_id_get(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_attention *rx_attn = &pkt_tlvs->attn_tlv.rx_attn; + uint16_t phy_ppdu_id; + + phy_ppdu_id = HAL_RX_ATTN_PHY_PPDU_ID_GET(rx_attn); + + return phy_ppdu_id; +} + +/* + * Get peer_meta_data from RX_MPDU_INFO within RX_MPDU_START + */ +#define HAL_RX_MPDU_PEER_META_DATA_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_8_PEER_META_DATA_OFFSET)), \ + RX_MPDU_INFO_8_PEER_META_DATA_MASK, \ + RX_MPDU_INFO_8_PEER_META_DATA_LSB)) + +static inline uint32_t +hal_rx_mpdu_peer_meta_data_get(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_mpdu_start *mpdu_start = + &pkt_tlvs->mpdu_start_tlv.rx_mpdu_start; + + struct rx_mpdu_info *mpdu_info = &mpdu_start->rx_mpdu_info_details; + uint32_t peer_meta_data; + + peer_meta_data = HAL_RX_MPDU_PEER_META_DATA_GET(mpdu_info); + + return peer_meta_data; +} + +#define HAL_RX_MPDU_INFO_AMPDU_FLAG_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_12_AMPDU_FLAG_OFFSET)), \ + RX_MPDU_INFO_12_AMPDU_FLAG_MASK, \ + RX_MPDU_INFO_12_AMPDU_FLAG_LSB)) +/** + * hal_rx_mpdu_info_ampdu_flag_get(): get ampdu flag bit + * from rx mpdu info + * @buf: pointer to rx_pkt_tlvs + * + * Return: ampdu flag + */ +static inline bool +hal_rx_mpdu_info_ampdu_flag_get(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_mpdu_start *mpdu_start = + &pkt_tlvs->mpdu_start_tlv.rx_mpdu_start; + + struct rx_mpdu_info *mpdu_info = &mpdu_start->rx_mpdu_info_details; + bool ampdu_flag; + + ampdu_flag = HAL_RX_MPDU_INFO_AMPDU_FLAG_GET(mpdu_info); + + return ampdu_flag; +} + +#define HAL_RX_MPDU_PEER_META_DATA_SET(_rx_mpdu_info, peer_mdata) \ + ((*(((uint32_t *)_rx_mpdu_info) + \ + (RX_MPDU_INFO_8_PEER_META_DATA_OFFSET >> 2))) = \ + (peer_mdata << RX_MPDU_INFO_8_PEER_META_DATA_LSB) & \ + RX_MPDU_INFO_8_PEER_META_DATA_MASK) + +/* + * @ hal_rx_mpdu_peer_meta_data_set: set peer meta data in RX mpdu start tlv + * + * @ buf: rx_tlv_hdr of the received packet + * @ peer_mdata: peer meta data to be set. + * @ Return: void + */ +static inline void +hal_rx_mpdu_peer_meta_data_set(uint8_t *buf, uint32_t peer_mdata) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_mpdu_start *mpdu_start = + &pkt_tlvs->mpdu_start_tlv.rx_mpdu_start; + + struct rx_mpdu_info *mpdu_info = &mpdu_start->rx_mpdu_info_details; + + HAL_RX_MPDU_PEER_META_DATA_SET(mpdu_info, peer_mdata); +} + +#if defined(WCSS_VERSION) && \ + ((defined(CONFIG_WIN) && (WCSS_VERSION > 81)) || \ + (defined(CONFIG_MCL) && (WCSS_VERSION >= 72))) +#define HAL_RX_MSDU_END_L3_HEADER_PADDING_GET(_rx_msdu_end) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_end, \ + RX_MSDU_END_5_L3_HEADER_PADDING_OFFSET)), \ + RX_MSDU_END_5_L3_HEADER_PADDING_MASK, \ + RX_MSDU_END_5_L3_HEADER_PADDING_LSB)) +#else +#define HAL_RX_MSDU_END_L3_HEADER_PADDING_GET(_rx_msdu_end) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_end, \ + RX_MSDU_END_9_L3_HEADER_PADDING_OFFSET)), \ + RX_MSDU_END_9_L3_HEADER_PADDING_MASK, \ + RX_MSDU_END_9_L3_HEADER_PADDING_LSB)) +#endif + +/** +* LRO information needed from the TLVs +*/ +#define HAL_RX_TLV_GET_LRO_ELIGIBLE(buf) \ + (_HAL_MS( \ + (*_OFFSET_TO_WORD_PTR(&(((struct rx_pkt_tlvs *)(buf))->\ + msdu_end_tlv.rx_msdu_end), \ + RX_MSDU_END_9_LRO_ELIGIBLE_OFFSET)), \ + RX_MSDU_END_9_LRO_ELIGIBLE_MASK, \ + RX_MSDU_END_9_LRO_ELIGIBLE_LSB)) + +#define HAL_RX_TLV_GET_TCP_CHKSUM(buf) \ + (_HAL_MS( \ + (*_OFFSET_TO_WORD_PTR(&(((struct rx_pkt_tlvs *)(buf))->\ + msdu_end_tlv.rx_msdu_end), \ + RX_MSDU_END_1_TCP_UDP_CHKSUM_OFFSET)), \ + RX_MSDU_END_1_TCP_UDP_CHKSUM_MASK, \ + RX_MSDU_END_1_TCP_UDP_CHKSUM_LSB)) + +#define HAL_RX_TLV_GET_TCP_ACK(buf) \ + (_HAL_MS( \ + (*_OFFSET_TO_WORD_PTR(&(((struct rx_pkt_tlvs *)(buf))->\ + msdu_end_tlv.rx_msdu_end), \ + RX_MSDU_END_8_TCP_ACK_NUMBER_OFFSET)), \ + RX_MSDU_END_8_TCP_ACK_NUMBER_MASK, \ + RX_MSDU_END_8_TCP_ACK_NUMBER_LSB)) + +#define HAL_RX_TLV_GET_TCP_SEQ(buf) \ + (_HAL_MS( \ + (*_OFFSET_TO_WORD_PTR(&(((struct rx_pkt_tlvs *)(buf))->\ + msdu_end_tlv.rx_msdu_end), \ + RX_MSDU_END_7_TCP_SEQ_NUMBER_OFFSET)), \ + RX_MSDU_END_7_TCP_SEQ_NUMBER_MASK, \ + RX_MSDU_END_7_TCP_SEQ_NUMBER_LSB)) + +#define HAL_RX_TLV_GET_TCP_WIN(buf) \ + (_HAL_MS( \ + (*_OFFSET_TO_WORD_PTR(&(((struct rx_pkt_tlvs *)(buf))->\ + msdu_end_tlv.rx_msdu_end), \ + RX_MSDU_END_9_WINDOW_SIZE_OFFSET)), \ + RX_MSDU_END_9_WINDOW_SIZE_MASK, \ + RX_MSDU_END_9_WINDOW_SIZE_LSB)) + +#define HAL_RX_TLV_GET_TCP_PURE_ACK(buf) \ + (_HAL_MS( \ + (*_OFFSET_TO_WORD_PTR(&(((struct rx_pkt_tlvs *)(buf))->\ + msdu_start_tlv.rx_msdu_start), \ + RX_MSDU_START_2_TCP_ONLY_ACK_OFFSET)), \ + RX_MSDU_START_2_TCP_ONLY_ACK_MASK, \ + RX_MSDU_START_2_TCP_ONLY_ACK_LSB)) + +#define HAL_RX_TLV_GET_TCP_PROTO(buf) \ + (_HAL_MS( \ + (*_OFFSET_TO_WORD_PTR(&(((struct rx_pkt_tlvs *)(buf))->\ + msdu_start_tlv.rx_msdu_start), \ + RX_MSDU_START_2_TCP_PROTO_OFFSET)), \ + RX_MSDU_START_2_TCP_PROTO_MASK, \ + RX_MSDU_START_2_TCP_PROTO_LSB)) + +#define HAL_RX_TLV_GET_IPV6(buf) \ + (_HAL_MS( \ + (*_OFFSET_TO_WORD_PTR(&(((struct rx_pkt_tlvs *)(buf))->\ + msdu_start_tlv.rx_msdu_start), \ + RX_MSDU_START_2_IPV6_PROTO_OFFSET)), \ + RX_MSDU_START_2_IPV6_PROTO_MASK, \ + RX_MSDU_START_2_IPV6_PROTO_LSB)) + +#define HAL_RX_TLV_GET_IP_OFFSET(buf) \ + (_HAL_MS( \ + (*_OFFSET_TO_WORD_PTR(&(((struct rx_pkt_tlvs *)(buf))->\ + msdu_start_tlv.rx_msdu_start), \ + RX_MSDU_START_1_L3_OFFSET_OFFSET)), \ + RX_MSDU_START_1_L3_OFFSET_MASK, \ + RX_MSDU_START_1_L3_OFFSET_LSB)) + +#define HAL_RX_TLV_GET_TCP_OFFSET(buf) \ + (_HAL_MS( \ + (*_OFFSET_TO_WORD_PTR(&(((struct rx_pkt_tlvs *)(buf))->\ + msdu_start_tlv.rx_msdu_start), \ + RX_MSDU_START_1_L4_OFFSET_OFFSET)), \ + RX_MSDU_START_1_L4_OFFSET_MASK, \ + RX_MSDU_START_1_L4_OFFSET_LSB)) + +#define HAL_RX_TLV_GET_FLOW_ID_TOEPLITZ(buf) \ + (_HAL_MS( \ + (*_OFFSET_TO_WORD_PTR(&(((struct rx_pkt_tlvs *)(buf))->\ + msdu_start_tlv.rx_msdu_start), \ + RX_MSDU_START_4_FLOW_ID_TOEPLITZ_OFFSET)), \ + RX_MSDU_START_4_FLOW_ID_TOEPLITZ_MASK, \ + RX_MSDU_START_4_FLOW_ID_TOEPLITZ_LSB)) + + /** + * hal_rx_msdu_end_l3_hdr_padding_get(): API to get the + * l3_header padding from rx_msdu_end TLV + * + * @ buf: pointer to the start of RX PKT TLV headers + * Return: number of l3 header padding bytes + */ +static inline uint32_t +hal_rx_msdu_end_l3_hdr_padding_get(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_end *msdu_end = &pkt_tlvs->msdu_end_tlv.rx_msdu_end; + uint32_t l3_header_padding; + + l3_header_padding = HAL_RX_MSDU_END_L3_HEADER_PADDING_GET(msdu_end); + + return l3_header_padding; +} + +#define HAL_RX_MSDU_END_SA_IDX_GET(_rx_msdu_end) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_end, \ + RX_MSDU_END_13_SA_IDX_OFFSET)), \ + RX_MSDU_END_13_SA_IDX_MASK, \ + RX_MSDU_END_13_SA_IDX_LSB)) + + /** + * hal_rx_msdu_end_sa_idx_get(): API to get the + * sa_idx from rx_msdu_end TLV + * + * @ buf: pointer to the start of RX PKT TLV headers + * Return: sa_idx (SA AST index) + */ +static inline uint16_t +hal_rx_msdu_end_sa_idx_get(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_end *msdu_end = &pkt_tlvs->msdu_end_tlv.rx_msdu_end; + uint16_t sa_idx; + + sa_idx = HAL_RX_MSDU_END_SA_IDX_GET(msdu_end); + + return sa_idx; +} + +#define HAL_RX_MSDU_END_SA_IS_VALID_GET(_rx_msdu_end) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_end, \ + RX_MSDU_END_5_SA_IS_VALID_OFFSET)), \ + RX_MSDU_END_5_SA_IS_VALID_MASK, \ + RX_MSDU_END_5_SA_IS_VALID_LSB)) + + /** + * hal_rx_msdu_end_sa_is_valid_get(): API to get the + * sa_is_valid bit from rx_msdu_end TLV + * + * @ buf: pointer to the start of RX PKT TLV headers + * Return: sa_is_valid bit + */ +static inline uint8_t +hal_rx_msdu_end_sa_is_valid_get(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_end *msdu_end = &pkt_tlvs->msdu_end_tlv.rx_msdu_end; + uint8_t sa_is_valid; + + sa_is_valid = HAL_RX_MSDU_END_SA_IS_VALID_GET(msdu_end); + + return sa_is_valid; +} + +#define HAL_RX_MSDU_END_SA_SW_PEER_ID_GET(_rx_msdu_end) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_end, \ + RX_MSDU_END_16_SA_SW_PEER_ID_OFFSET)), \ + RX_MSDU_END_16_SA_SW_PEER_ID_MASK, \ + RX_MSDU_END_16_SA_SW_PEER_ID_LSB)) + + /** + * hal_rx_msdu_end_sa_sw_peer_id_get(): API to get the + * sa_sw_peer_id from rx_msdu_end TLV + * + * @ buf: pointer to the start of RX PKT TLV headers + * Return: sa_sw_peer_id index + */ +static inline uint32_t +hal_rx_msdu_end_sa_sw_peer_id_get(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_end *msdu_end = &pkt_tlvs->msdu_end_tlv.rx_msdu_end; + uint32_t sa_sw_peer_id; + + sa_sw_peer_id = HAL_RX_MSDU_END_SA_SW_PEER_ID_GET(msdu_end); + + return sa_sw_peer_id; +} + +#define HAL_RX_MSDU_START_MSDU_LEN_GET(_rx_msdu_start) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_start, \ + RX_MSDU_START_1_MSDU_LENGTH_OFFSET)), \ + RX_MSDU_START_1_MSDU_LENGTH_MASK, \ + RX_MSDU_START_1_MSDU_LENGTH_LSB)) + + /** + * hal_rx_msdu_start_msdu_len_get(): API to get the MSDU length + * from rx_msdu_start TLV + * + * @ buf: pointer to the start of RX PKT TLV headers + * Return: msdu length + */ +static inline uint32_t +hal_rx_msdu_start_msdu_len_get(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_start *msdu_start = + &pkt_tlvs->msdu_start_tlv.rx_msdu_start; + uint32_t msdu_len; + + msdu_len = HAL_RX_MSDU_START_MSDU_LEN_GET(msdu_start); + + return msdu_len; +} + + /** + * hal_rx_msdu_start_msdu_len_set(): API to set the MSDU length + * from rx_msdu_start TLV + * + * @buf: pointer to the start of RX PKT TLV headers + * @len: msdu length + * + * Return: none + */ +static inline void +hal_rx_msdu_start_msdu_len_set(uint8_t *buf, uint32_t len) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_start *msdu_start = + &pkt_tlvs->msdu_start_tlv.rx_msdu_start; + void *wrd1; + + wrd1 = (uint8_t *)msdu_start + RX_MSDU_START_1_MSDU_LENGTH_OFFSET; + *(uint32_t *)wrd1 &= (~RX_MSDU_START_1_MSDU_LENGTH_MASK); + *(uint32_t *)wrd1 |= len; +} + +#define HAL_RX_MSDU_START_BW_GET(_rx_msdu_start) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR((_rx_msdu_start),\ + RX_MSDU_START_5_RECEIVE_BANDWIDTH_OFFSET)), \ + RX_MSDU_START_5_RECEIVE_BANDWIDTH_MASK, \ + RX_MSDU_START_5_RECEIVE_BANDWIDTH_LSB)) + +/* + * hal_rx_msdu_start_bw_get(): API to get the Bandwidth + * Interval from rx_msdu_start + * + * @buf: pointer to the start of RX PKT TLV header + * Return: uint32_t(bw) + */ +static inline uint32_t +hal_rx_msdu_start_bw_get(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_start *msdu_start = + &pkt_tlvs->msdu_start_tlv.rx_msdu_start; + uint32_t bw; + + bw = HAL_RX_MSDU_START_BW_GET(msdu_start); + + return bw; +} + +#define HAL_RX_MSDU_START_RECEPTION_TYPE_GET(_rx_msdu_start) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR((_rx_msdu_start), \ + RX_MSDU_START_5_RECEPTION_TYPE_OFFSET)), \ + RX_MSDU_START_5_RECEPTION_TYPE_MASK, \ + RX_MSDU_START_5_RECEPTION_TYPE_LSB)) + +/* + * hal_rx_msdu_start_reception_type_get(): API to get the reception type + * Interval from rx_msdu_start + * + * @buf: pointer to the start of RX PKT TLV header + * Return: uint32_t(reception_type) + */ +static inline uint32_t +hal_rx_msdu_start_reception_type_get(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_start *msdu_start = + &pkt_tlvs->msdu_start_tlv.rx_msdu_start; + uint32_t reception_type; + + reception_type = HAL_RX_MSDU_START_RECEPTION_TYPE_GET(msdu_start); + + return reception_type; +} + +#define HAL_RX_MSDU_START_FLOWID_TOEPLITZ_GET(_rx_msdu_start) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_start, \ + RX_MSDU_START_4_FLOW_ID_TOEPLITZ_OFFSET)), \ + RX_MSDU_START_4_FLOW_ID_TOEPLITZ_MASK, \ + RX_MSDU_START_4_FLOW_ID_TOEPLITZ_LSB)) + + /** + * hal_rx_msdu_start_toeplitz_get: API to get the toeplitz hash + * from rx_msdu_start TLV + * + * @ buf: pointer to the start of RX PKT TLV headers + * Return: toeplitz hash + */ +static inline uint32_t +hal_rx_msdu_start_toeplitz_get(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_start *msdu_start = + &pkt_tlvs->msdu_start_tlv.rx_msdu_start; + + return HAL_RX_MSDU_START_FLOWID_TOEPLITZ_GET(msdu_start); +} + +/* + * Get qos_control_valid from RX_MPDU_START + */ +#define HAL_RX_MPDU_INFO_QOS_CONTROL_VALID_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR((_rx_mpdu_info), \ + RX_MPDU_INFO_2_MPDU_SEQUENCE_CONTROL_VALID_OFFSET)), \ + RX_MPDU_INFO_2_MPDU_SEQUENCE_CONTROL_VALID_MASK, \ + RX_MPDU_INFO_2_MPDU_SEQUENCE_CONTROL_VALID_LSB)) + +static inline uint32_t +hal_rx_mpdu_start_mpdu_qos_control_valid_get(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_mpdu_start *mpdu_start = + &pkt_tlvs->mpdu_start_tlv.rx_mpdu_start; + uint32_t qos_control_valid; + + qos_control_valid = HAL_RX_MPDU_INFO_QOS_CONTROL_VALID_GET( + &(mpdu_start->rx_mpdu_info_details)); + + return qos_control_valid; +} + +/* + * Get tid from RX_MPDU_START + */ +#define HAL_RX_MPDU_INFO_TID_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR((_rx_mpdu_info), \ + RX_MPDU_INFO_3_TID_OFFSET)), \ + RX_MPDU_INFO_3_TID_MASK, \ + RX_MPDU_INFO_3_TID_LSB)) + +static inline uint32_t +hal_rx_mpdu_start_tid_get(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_mpdu_start *mpdu_start = + &pkt_tlvs->mpdu_start_tlv.rx_mpdu_start; + uint32_t tid; + + tid = HAL_RX_MPDU_INFO_TID_GET( + &(mpdu_start->rx_mpdu_info_details)); + + return tid; +} + +/* + * Get SW peer id from RX_MPDU_START + */ +#define HAL_RX_MPDU_INFO_SW_PEER_ID_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR((_rx_mpdu_info), \ + RX_MPDU_INFO_1_SW_PEER_ID_OFFSET)), \ + RX_MPDU_INFO_1_SW_PEER_ID_MASK, \ + RX_MPDU_INFO_1_SW_PEER_ID_LSB)) + +static inline uint32_t +hal_rx_mpdu_start_sw_peer_id_get(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_mpdu_start *mpdu_start = + &pkt_tlvs->mpdu_start_tlv.rx_mpdu_start; + uint32_t sw_peer_id; + + sw_peer_id = HAL_RX_MPDU_INFO_SW_PEER_ID_GET( + &(mpdu_start->rx_mpdu_info_details)); + + return sw_peer_id; +} + +#if defined(WCSS_VERSION) && \ + ((defined(CONFIG_WIN) && (WCSS_VERSION > 81)) || \ + (defined(CONFIG_MCL) && (WCSS_VERSION >= 72))) +#define HAL_RX_MSDU_START_SGI_GET(_rx_msdu_start) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR((_rx_msdu_start),\ + RX_MSDU_START_5_SGI_OFFSET)), \ + RX_MSDU_START_5_SGI_MASK, \ + RX_MSDU_START_5_SGI_LSB)) +#else +#define HAL_RX_MSDU_START_SGI_GET(_rx_msdu_start) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR((_rx_msdu_start),\ + RX_MSDU_START_6_SGI_OFFSET)), \ + RX_MSDU_START_6_SGI_MASK, \ + RX_MSDU_START_6_SGI_LSB)) +#endif +/** + * hal_rx_msdu_start_msdu_sgi_get(): API to get the Short Gaurd + * Interval from rx_msdu_start TLV + * + * @buf: pointer to the start of RX PKT TLV headers + * Return: uint32_t(sgi) + */ +static inline uint32_t +hal_rx_msdu_start_sgi_get(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_start *msdu_start = + &pkt_tlvs->msdu_start_tlv.rx_msdu_start; + uint32_t sgi; + + sgi = HAL_RX_MSDU_START_SGI_GET(msdu_start); + + return sgi; +} + +#if defined(WCSS_VERSION) && \ + ((defined(CONFIG_WIN) && (WCSS_VERSION > 81)) || \ + (defined(CONFIG_MCL) && (WCSS_VERSION >= 72))) +#define HAL_RX_MSDU_START_RATE_MCS_GET(_rx_msdu_start) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR((_rx_msdu_start),\ + RX_MSDU_START_5_RATE_MCS_OFFSET)), \ + RX_MSDU_START_5_RATE_MCS_MASK, \ + RX_MSDU_START_5_RATE_MCS_LSB)) +#else +#define HAL_RX_MSDU_START_RATE_MCS_GET(_rx_msdu_start) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR((_rx_msdu_start),\ + RX_MSDU_START_6_RATE_MCS_OFFSET)), \ + RX_MSDU_START_6_RATE_MCS_MASK, \ + RX_MSDU_START_6_RATE_MCS_LSB)) +#endif +/** + * hal_rx_msdu_start_msdu_rate_mcs_get(): API to get the MCS rate + * from rx_msdu_start TLV + * + * @buf: pointer to the start of RX PKT TLV headers + * Return: uint32_t(rate_mcs) + */ +static inline uint32_t +hal_rx_msdu_start_rate_mcs_get(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_start *msdu_start = + &pkt_tlvs->msdu_start_tlv.rx_msdu_start; + uint32_t rate_mcs; + + rate_mcs = HAL_RX_MSDU_START_RATE_MCS_GET(msdu_start); + + return rate_mcs; +} + +#define HAL_RX_ATTN_DECRYPT_STATUS_GET(_rx_attn) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_attn, \ + RX_ATTENTION_2_DECRYPT_STATUS_CODE_OFFSET)), \ + RX_ATTENTION_2_DECRYPT_STATUS_CODE_MASK, \ + RX_ATTENTION_2_DECRYPT_STATUS_CODE_LSB)) + +/* + * hal_rx_attn_msdu_get_is_decrypted(): API to get the decrypt status of the + * packet from rx_attention + * + * @buf: pointer to the start of RX PKT TLV header + * Return: uint32_t(decryt status) + */ + +static inline uint32_t +hal_rx_attn_msdu_get_is_decrypted(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_attention *rx_attn = &pkt_tlvs->attn_tlv.rx_attn; + uint32_t is_decrypt = 0; + uint32_t decrypt_status; + + decrypt_status = HAL_RX_ATTN_DECRYPT_STATUS_GET(rx_attn); + + if (!decrypt_status) + is_decrypt = 1; + + return is_decrypt; +} + +/* + * Get key index from RX_MSDU_END + */ +#define HAL_RX_MSDU_END_KEYID_OCTET_GET(_rx_msdu_end) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_end, \ + RX_MSDU_END_2_KEY_ID_OCTET_OFFSET)), \ + RX_MSDU_END_2_KEY_ID_OCTET_MASK, \ + RX_MSDU_END_2_KEY_ID_OCTET_LSB)) +/* + * hal_rx_msdu_get_keyid(): API to get the key id if the decrypted packet + * from rx_msdu_end + * + * @buf: pointer to the start of RX PKT TLV header + * Return: uint32_t(key id) + */ + +static inline uint32_t +hal_rx_msdu_get_keyid(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_end *msdu_end = &pkt_tlvs->msdu_end_tlv.rx_msdu_end; + uint32_t keyid_octet; + + keyid_octet = HAL_RX_MSDU_END_KEYID_OCTET_GET(msdu_end); + + return keyid_octet & 0x3; +} + +#define HAL_RX_MSDU_START_RSSI_GET(_rx_msdu_start) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_start, \ + RX_MSDU_START_5_USER_RSSI_OFFSET)), \ + RX_MSDU_START_5_USER_RSSI_MASK, \ + RX_MSDU_START_5_USER_RSSI_LSB)) +/* + * hal_rx_msdu_start_get_rssi(): API to get the rssi of received pkt + * from rx_msdu_start + * + * @buf: pointer to the start of RX PKT TLV header + * Return: uint32_t(rssi) + */ + +static inline uint32_t +hal_rx_msdu_start_get_rssi(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_start *msdu_start = &pkt_tlvs->msdu_start_tlv.rx_msdu_start; + uint32_t rssi; + + rssi = HAL_RX_MSDU_START_RSSI_GET(msdu_start); + + return rssi; + +} + +#define HAL_RX_MSDU_START_FREQ_GET(_rx_msdu_start) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_start, \ + RX_MSDU_START_7_SW_PHY_META_DATA_OFFSET)), \ + RX_MSDU_START_7_SW_PHY_META_DATA_MASK, \ + RX_MSDU_START_7_SW_PHY_META_DATA_LSB)) + +/* + * hal_rx_msdu_start_get_freq(): API to get the frequency of operating channel + * from rx_msdu_start + * + * @buf: pointer to the start of RX PKT TLV header + * Return: uint32_t(frequency) + */ + +static inline uint32_t +hal_rx_msdu_start_get_freq(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_start *msdu_start = + &pkt_tlvs->msdu_start_tlv.rx_msdu_start; + uint32_t freq; + + freq = HAL_RX_MSDU_START_FREQ_GET(msdu_start); + + return freq; +} + + +#define HAL_RX_MSDU_START_PKT_TYPE_GET(_rx_msdu_start) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_start, \ + RX_MSDU_START_5_PKT_TYPE_OFFSET)), \ + RX_MSDU_START_5_PKT_TYPE_MASK, \ + RX_MSDU_START_5_PKT_TYPE_LSB)) + +/* + * hal_rx_msdu_start_get_pkt_type(): API to get the pkt type + * from rx_msdu_start + * + * @buf: pointer to the start of RX PKT TLV header + * Return: uint32_t(pkt type) + */ + +static inline uint32_t +hal_rx_msdu_start_get_pkt_type(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_start *msdu_start = &pkt_tlvs->msdu_start_tlv.rx_msdu_start; + uint32_t pkt_type; + + pkt_type = HAL_RX_MSDU_START_PKT_TYPE_GET(msdu_start); + + return pkt_type; +} + +#define HAL_RX_MSDU_START_NSS_GET(_rx_msdu_start) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR((_rx_msdu_start),\ + RX_MSDU_START_5_NSS_OFFSET)), \ + RX_MSDU_START_5_NSS_MASK, \ + RX_MSDU_START_5_NSS_LSB)) + +/* + * hal_rx_msdu_start_nss_get(): API to get the NSS + * Interval from rx_msdu_start + * + * @buf: pointer to the start of RX PKT TLV header + * Return: uint32_t(nss) + */ + +#if !defined(QCA_WIFI_QCA6290_11AX) +static inline uint32_t +hal_rx_msdu_start_nss_get(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_start *msdu_start = + &pkt_tlvs->msdu_start_tlv.rx_msdu_start; + uint32_t nss; + + nss = HAL_RX_MSDU_START_NSS_GET(msdu_start); + return nss; +} +#else +#define HAL_RX_MSDU_START_MIMO_SS_BITMAP(_rx_msdu_start) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR((_rx_msdu_start),\ + RX_MSDU_START_5_MIMO_SS_BITMAP_OFFSET)), \ + RX_MSDU_START_5_MIMO_SS_BITMAP_MASK, \ + RX_MSDU_START_5_MIMO_SS_BITMAP_LSB)) + +static inline uint32_t +hal_rx_msdu_start_nss_get(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_start *msdu_start = + &pkt_tlvs->msdu_start_tlv.rx_msdu_start; + uint8_t mimo_ss_bitmap; + + mimo_ss_bitmap = HAL_RX_MSDU_START_MIMO_SS_BITMAP(msdu_start); + + return qdf_get_hweight8(mimo_ss_bitmap); +} +#endif + +#define HAL_RX_MPDU_GET_TODS(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_2_TO_DS_OFFSET)), \ + RX_MPDU_INFO_2_TO_DS_MASK, \ + RX_MPDU_INFO_2_TO_DS_LSB)) + +/* + * hal_rx_mpdu_get_tods(): API to get the tods info + * from rx_mpdu_start + * + * @buf: pointer to the start of RX PKT TLV header + * Return: uint32_t(to_ds) + */ + +static inline uint32_t +hal_rx_mpdu_get_to_ds(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_mpdu_start *mpdu_start = + &pkt_tlvs->mpdu_start_tlv.rx_mpdu_start; + + struct rx_mpdu_info *mpdu_info = &mpdu_start->rx_mpdu_info_details; + uint32_t to_ds; + + to_ds = HAL_RX_MPDU_GET_TODS(mpdu_info); + + return to_ds; +} + +#define HAL_RX_MPDU_GET_FROMDS(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_2_FR_DS_OFFSET)), \ + RX_MPDU_INFO_2_FR_DS_MASK, \ + RX_MPDU_INFO_2_FR_DS_LSB)) + +/* + * hal_rx_mpdu_get_fr_ds(): API to get the from ds info + * from rx_mpdu_start + * + * @buf: pointer to the start of RX PKT TLV header + * Return: uint32_t(fr_ds) + */ + +static inline uint32_t +hal_rx_mpdu_get_fr_ds(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_mpdu_start *mpdu_start = + &pkt_tlvs->mpdu_start_tlv.rx_mpdu_start; + + struct rx_mpdu_info *mpdu_info = &mpdu_start->rx_mpdu_info_details; + uint32_t fr_ds; + + fr_ds = HAL_RX_MPDU_GET_FROMDS(mpdu_info); + + return fr_ds; +} + +#define HAL_RX_MPDU_MAC_ADDR_AD1_VALID_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_2_MAC_ADDR_AD1_VALID_OFFSET)), \ + RX_MPDU_INFO_2_MAC_ADDR_AD1_VALID_MASK, \ + RX_MPDU_INFO_2_MAC_ADDR_AD1_VALID_LSB)) + +#define HAL_RX_MPDU_MAC_ADDR_AD2_VALID_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_2_MAC_ADDR_AD2_VALID_OFFSET)), \ + RX_MPDU_INFO_2_MAC_ADDR_AD2_VALID_MASK, \ + RX_MPDU_INFO_2_MAC_ADDR_AD2_VALID_LSB)) + +#define HAL_RX_MPDU_MAC_ADDR_AD3_VALID_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_2_MAC_ADDR_AD3_VALID_OFFSET)), \ + RX_MPDU_INFO_2_MAC_ADDR_AD3_VALID_MASK, \ + RX_MPDU_INFO_2_MAC_ADDR_AD3_VALID_LSB)) + +#define HAL_RX_MPDU_MAC_ADDR_AD4_VALID_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_2_MAC_ADDR_AD4_VALID_OFFSET)), \ + RX_MPDU_INFO_2_MAC_ADDR_AD4_VALID_MASK, \ + RX_MPDU_INFO_2_MAC_ADDR_AD4_VALID_LSB)) + +#define HAL_RX_MPDU_AD1_31_0_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_15_MAC_ADDR_AD1_31_0_OFFSET)), \ + RX_MPDU_INFO_15_MAC_ADDR_AD1_31_0_MASK, \ + RX_MPDU_INFO_15_MAC_ADDR_AD1_31_0_LSB)) + +#define HAL_RX_MPDU_AD1_47_32_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_16_MAC_ADDR_AD1_47_32_OFFSET)), \ + RX_MPDU_INFO_16_MAC_ADDR_AD1_47_32_MASK, \ + RX_MPDU_INFO_16_MAC_ADDR_AD1_47_32_LSB)) + +#define HAL_RX_MPDU_AD2_15_0_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_16_MAC_ADDR_AD2_15_0_OFFSET)), \ + RX_MPDU_INFO_16_MAC_ADDR_AD2_15_0_MASK, \ + RX_MPDU_INFO_16_MAC_ADDR_AD2_15_0_LSB)) + +#define HAL_RX_MPDU_AD2_47_16_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_17_MAC_ADDR_AD2_47_16_OFFSET)), \ + RX_MPDU_INFO_17_MAC_ADDR_AD2_47_16_MASK, \ + RX_MPDU_INFO_17_MAC_ADDR_AD2_47_16_LSB)) + +#define HAL_RX_MPDU_AD3_31_0_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_18_MAC_ADDR_AD3_31_0_OFFSET)), \ + RX_MPDU_INFO_18_MAC_ADDR_AD3_31_0_MASK, \ + RX_MPDU_INFO_18_MAC_ADDR_AD3_31_0_LSB)) + +#define HAL_RX_MPDU_AD3_47_32_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_19_MAC_ADDR_AD3_47_32_OFFSET)), \ + RX_MPDU_INFO_19_MAC_ADDR_AD3_47_32_MASK, \ + RX_MPDU_INFO_19_MAC_ADDR_AD3_47_32_LSB)) + +#define HAL_RX_MPDU_AD4_31_0_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_20_MAC_ADDR_AD4_31_0_OFFSET)), \ + RX_MPDU_INFO_20_MAC_ADDR_AD4_31_0_MASK, \ + RX_MPDU_INFO_20_MAC_ADDR_AD4_31_0_LSB)) + +#define HAL_RX_MPDU_AD4_47_32_GET(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_21_MAC_ADDR_AD4_47_32_OFFSET)), \ + RX_MPDU_INFO_21_MAC_ADDR_AD4_47_32_MASK, \ + RX_MPDU_INFO_21_MAC_ADDR_AD4_47_32_LSB)) + +/* + * hal_rx_mpdu_get_addr1(): API to check get address1 of the mpdu + * + * @buf: pointer to the start of RX PKT TLV headera + * @mac_addr: pointer to mac address + * Return: success/failure + */ +static inline +QDF_STATUS hal_rx_mpdu_get_addr1(uint8_t *buf, uint8_t *mac_addr) +{ + struct __attribute__((__packed__)) hal_addr1 { + uint32_t ad1_31_0; + uint16_t ad1_47_32; + }; + + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_mpdu_start *mpdu_start = + &pkt_tlvs->mpdu_start_tlv.rx_mpdu_start; + + struct rx_mpdu_info *mpdu_info = &mpdu_start->rx_mpdu_info_details; + struct hal_addr1 *addr = (struct hal_addr1 *)mac_addr; + uint32_t mac_addr_ad1_valid; + + mac_addr_ad1_valid = HAL_RX_MPDU_MAC_ADDR_AD1_VALID_GET(mpdu_info); + + if (mac_addr_ad1_valid) { + addr->ad1_31_0 = HAL_RX_MPDU_AD1_31_0_GET(mpdu_info); + addr->ad1_47_32 = HAL_RX_MPDU_AD1_47_32_GET(mpdu_info); + return QDF_STATUS_SUCCESS; + } + + return QDF_STATUS_E_FAILURE; +} + +/* + * hal_rx_mpdu_get_addr2(): API to check get address2 of the mpdu + * in the packet + * + * @buf: pointer to the start of RX PKT TLV header + * @mac_addr: pointer to mac address + * Return: success/failure + */ +static inline +QDF_STATUS hal_rx_mpdu_get_addr2(uint8_t *buf, uint8_t *mac_addr) +{ + struct __attribute__((__packed__)) hal_addr2 { + uint16_t ad2_15_0; + uint32_t ad2_47_16; + }; + + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_mpdu_start *mpdu_start = + &pkt_tlvs->mpdu_start_tlv.rx_mpdu_start; + + struct rx_mpdu_info *mpdu_info = &mpdu_start->rx_mpdu_info_details; + struct hal_addr2 *addr = (struct hal_addr2 *)mac_addr; + uint32_t mac_addr_ad2_valid; + + mac_addr_ad2_valid = HAL_RX_MPDU_MAC_ADDR_AD2_VALID_GET(mpdu_info); + + if (mac_addr_ad2_valid) { + addr->ad2_15_0 = HAL_RX_MPDU_AD2_15_0_GET(mpdu_info); + addr->ad2_47_16 = HAL_RX_MPDU_AD2_47_16_GET(mpdu_info); + return QDF_STATUS_SUCCESS; + } + + return QDF_STATUS_E_FAILURE; +} + +/* + * hal_rx_mpdu_get_addr3(): API to get address3 of the mpdu + * in the packet + * + * @buf: pointer to the start of RX PKT TLV header + * @mac_addr: pointer to mac address + * Return: success/failure + */ +static inline +QDF_STATUS hal_rx_mpdu_get_addr3(uint8_t *buf, uint8_t *mac_addr) +{ + struct __attribute__((__packed__)) hal_addr3 { + uint32_t ad3_31_0; + uint16_t ad3_47_32; + }; + + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_mpdu_start *mpdu_start = + &pkt_tlvs->mpdu_start_tlv.rx_mpdu_start; + + struct rx_mpdu_info *mpdu_info = &mpdu_start->rx_mpdu_info_details; + struct hal_addr3 *addr = (struct hal_addr3 *)mac_addr; + uint32_t mac_addr_ad3_valid; + + mac_addr_ad3_valid = HAL_RX_MPDU_MAC_ADDR_AD3_VALID_GET(mpdu_info); + + if (mac_addr_ad3_valid) { + addr->ad3_31_0 = HAL_RX_MPDU_AD3_31_0_GET(mpdu_info); + addr->ad3_47_32 = HAL_RX_MPDU_AD3_47_32_GET(mpdu_info); + return QDF_STATUS_SUCCESS; + } + + return QDF_STATUS_E_FAILURE; +} + +/* + * hal_rx_mpdu_get_addr4(): API to get address4 of the mpdu + * in the packet + * + * @buf: pointer to the start of RX PKT TLV header + * @mac_addr: pointer to mac address + * Return: success/failure + */ +static inline +QDF_STATUS hal_rx_mpdu_get_addr4(uint8_t *buf, uint8_t *mac_addr) +{ + struct __attribute__((__packed__)) hal_addr4 { + uint32_t ad4_31_0; + uint16_t ad4_47_32; + }; + + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_mpdu_start *mpdu_start = + &pkt_tlvs->mpdu_start_tlv.rx_mpdu_start; + + struct rx_mpdu_info *mpdu_info = &mpdu_start->rx_mpdu_info_details; + struct hal_addr4 *addr = (struct hal_addr4 *)mac_addr; + uint32_t mac_addr_ad4_valid; + + mac_addr_ad4_valid = HAL_RX_MPDU_MAC_ADDR_AD4_VALID_GET(mpdu_info); + + if (mac_addr_ad4_valid) { + addr->ad4_31_0 = HAL_RX_MPDU_AD4_31_0_GET(mpdu_info); + addr->ad4_47_32 = HAL_RX_MPDU_AD4_47_32_GET(mpdu_info); + return QDF_STATUS_SUCCESS; + } + + return QDF_STATUS_E_FAILURE; +} + +#define HAL_RX_MSDU_END_DA_IDX_GET(_rx_msdu_end) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_end, \ + RX_MSDU_END_13_DA_IDX_OFFSET)), \ + RX_MSDU_END_13_DA_IDX_MASK, \ + RX_MSDU_END_13_DA_IDX_LSB)) + + /** + * hal_rx_msdu_end_da_idx_get: API to get da_idx + * from rx_msdu_end TLV + * + * @ buf: pointer to the start of RX PKT TLV headers + * Return: da index + */ +static inline uint16_t +hal_rx_msdu_end_da_idx_get(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_end *msdu_end = &pkt_tlvs->msdu_end_tlv.rx_msdu_end; + uint16_t da_idx; + + da_idx = HAL_RX_MSDU_END_DA_IDX_GET(msdu_end); + + return da_idx; +} + +#define HAL_RX_MSDU_END_DA_IS_VALID_GET(_rx_msdu_end) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_end, \ + RX_MSDU_END_5_DA_IS_VALID_OFFSET)), \ + RX_MSDU_END_5_DA_IS_VALID_MASK, \ + RX_MSDU_END_5_DA_IS_VALID_LSB)) + + /** + * hal_rx_msdu_end_da_is_valid_get: API to check if da is valid + * from rx_msdu_end TLV + * + * @ buf: pointer to the start of RX PKT TLV headers + * Return: da_is_valid + */ +static inline uint8_t +hal_rx_msdu_end_da_is_valid_get(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_end *msdu_end = &pkt_tlvs->msdu_end_tlv.rx_msdu_end; + uint8_t da_is_valid; + + da_is_valid = HAL_RX_MSDU_END_DA_IS_VALID_GET(msdu_end); + + return da_is_valid; +} + +#define HAL_RX_MSDU_END_DA_IS_MCBC_GET(_rx_msdu_end) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_end, \ + RX_MSDU_END_5_DA_IS_MCBC_OFFSET)), \ + RX_MSDU_END_5_DA_IS_MCBC_MASK, \ + RX_MSDU_END_5_DA_IS_MCBC_LSB)) + + /** + * hal_rx_msdu_end_da_is_mcbc_get: API to check if pkt is MCBC + * from rx_msdu_end TLV + * + * @ buf: pointer to the start of RX PKT TLV headers + * Return: da_is_mcbc + */ +static inline uint8_t +hal_rx_msdu_end_da_is_mcbc_get(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_end *msdu_end = &pkt_tlvs->msdu_end_tlv.rx_msdu_end; + uint8_t da_is_mcbc; + + da_is_mcbc = HAL_RX_MSDU_END_DA_IS_MCBC_GET(msdu_end); + + return da_is_mcbc; +} + +#define HAL_RX_MSDU_END_FIRST_MSDU_GET(_rx_msdu_end) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_end, \ + RX_MSDU_END_5_FIRST_MSDU_OFFSET)), \ + RX_MSDU_END_5_FIRST_MSDU_MASK, \ + RX_MSDU_END_5_FIRST_MSDU_LSB)) + + /** + * hal_rx_msdu_end_first_msdu_get: API to get first msdu status + * from rx_msdu_end TLV + * + * @ buf: pointer to the start of RX PKT TLV headers + * Return: first_msdu + */ +static inline uint8_t +hal_rx_msdu_end_first_msdu_get(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_end *msdu_end = &pkt_tlvs->msdu_end_tlv.rx_msdu_end; + uint8_t first_msdu; + + first_msdu = HAL_RX_MSDU_END_FIRST_MSDU_GET(msdu_end); + + return first_msdu; +} + +#define HAL_RX_MSDU_END_LAST_MSDU_GET(_rx_msdu_end) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_end, \ + RX_MSDU_END_5_LAST_MSDU_OFFSET)), \ + RX_MSDU_END_5_LAST_MSDU_MASK, \ + RX_MSDU_END_5_LAST_MSDU_LSB)) + + /** + * hal_rx_msdu_end_last_msdu_get: API to get last msdu status + * from rx_msdu_end TLV + * + * @ buf: pointer to the start of RX PKT TLV headers + * Return: last_msdu + */ +static inline uint8_t +hal_rx_msdu_end_last_msdu_get(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_msdu_end *msdu_end = &pkt_tlvs->msdu_end_tlv.rx_msdu_end; + uint8_t last_msdu; + + last_msdu = HAL_RX_MSDU_END_LAST_MSDU_GET(msdu_end); + + return last_msdu; +} +/******************************************************************************* + * RX ERROR APIS + ******************************************************************************/ + +#define HAL_RX_MPDU_END_DECRYPT_ERR_GET(_rx_mpdu_end) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR((_rx_mpdu_end),\ + RX_MPDU_END_1_RX_IN_TX_DECRYPT_BYP_OFFSET)), \ + RX_MPDU_END_1_RX_IN_TX_DECRYPT_BYP_MASK, \ + RX_MPDU_END_1_RX_IN_TX_DECRYPT_BYP_LSB)) + +/** + * hal_rx_mpdu_end_decrypt_err_get(): API to get the Decrypt ERR + * from rx_mpdu_end TLV + * + * @buf: pointer to the start of RX PKT TLV headers + * Return: uint32_t(decrypt_err) + */ +static inline uint32_t +hal_rx_mpdu_end_decrypt_err_get(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_mpdu_end *mpdu_end = + &pkt_tlvs->mpdu_end_tlv.rx_mpdu_end; + uint32_t decrypt_err; + + decrypt_err = HAL_RX_MPDU_END_DECRYPT_ERR_GET(mpdu_end); + + return decrypt_err; +} + +#define HAL_RX_MPDU_END_MIC_ERR_GET(_rx_mpdu_end) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR((_rx_mpdu_end),\ + RX_MPDU_END_1_TKIP_MIC_ERR_OFFSET)), \ + RX_MPDU_END_1_TKIP_MIC_ERR_MASK, \ + RX_MPDU_END_1_TKIP_MIC_ERR_LSB)) + +/** + * hal_rx_mpdu_end_mic_err_get(): API to get the MIC ERR + * from rx_mpdu_end TLV + * + * @buf: pointer to the start of RX PKT TLV headers + * Return: uint32_t(mic_err) + */ +static inline uint32_t +hal_rx_mpdu_end_mic_err_get(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_mpdu_end *mpdu_end = + &pkt_tlvs->mpdu_end_tlv.rx_mpdu_end; + uint32_t mic_err; + + mic_err = HAL_RX_MPDU_END_MIC_ERR_GET(mpdu_end); + + return mic_err; +} + +/******************************************************************************* + * RX REO ERROR APIS + ******************************************************************************/ + +#define HAL_RX_LINK_DESC_MSDU0_PTR(link_desc) \ + ((struct rx_msdu_details *) \ + _OFFSET_TO_BYTE_PTR((link_desc),\ + RX_MSDU_LINK_8_RX_MSDU_DETAILS_MSDU_0_OFFSET)) + +#define HAL_RX_NUM_MSDU_DESC 6 +#define HAL_RX_MAX_SAVED_RING_DESC 16 + +/* TODO: rework the structure */ +struct hal_rx_msdu_list { + struct hal_rx_msdu_desc_info msdu_info[HAL_RX_NUM_MSDU_DESC]; + uint32_t sw_cookie[HAL_RX_NUM_MSDU_DESC]; + uint8_t rbm[HAL_RX_NUM_MSDU_DESC]; +}; + +struct hal_buf_info { + uint64_t paddr; + uint32_t sw_cookie; +}; + +/* This special cookie value will be used to indicate FW allocated buffers + * received through RXDMA2SW ring for RXDMA WARs */ +#define HAL_RX_COOKIE_SPECIAL 0x1fffff + +/** + * hal_rx_msdu_link_desc_get(): API to get the MSDU information + * from the MSDU link descriptor + * + * @msdu_link_desc: Opaque pointer used by HAL to get to the + * MSDU link descriptor (struct rx_msdu_link) + * + * @msdu_list: Return the list of MSDUs contained in this link descriptor + * + * @num_msdus: Number of MSDUs in the MPDU + * + * Return: void + */ +static inline void hal_rx_msdu_list_get(void *msdu_link_desc, + struct hal_rx_msdu_list *msdu_list, uint16_t *num_msdus) +{ + struct rx_msdu_details *msdu_details; + struct rx_msdu_desc_info *msdu_desc_info; + struct rx_msdu_link *msdu_link = (struct rx_msdu_link *)msdu_link_desc; + int i; + + msdu_details = HAL_RX_LINK_DESC_MSDU0_PTR(msdu_link); + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "[%s][%d] msdu_link=%pK msdu_details=%pK\n", + __func__, __LINE__, msdu_link, msdu_details); + + for (i = 0; i < HAL_RX_NUM_MSDU_DESC; i++) { + /* num_msdus received in mpdu descriptor may be incorrect + * sometimes due to HW issue. Check msdu buffer address also */ + if (HAL_RX_BUFFER_ADDR_31_0_GET( + &msdu_details[i].buffer_addr_info_details) == 0) { + /* set the last msdu bit in the prev msdu_desc_info */ + msdu_desc_info = + HAL_RX_MSDU_DESC_INFO_GET(&msdu_details[i - 1]); + HAL_RX_LAST_MSDU_IN_MPDU_FLAG_SET(msdu_desc_info, 1); + break; + } + msdu_desc_info = HAL_RX_MSDU_DESC_INFO_GET(&msdu_details[i]); + + /* set first MSDU bit or the last MSDU bit */ + if (!i) + HAL_RX_FIRST_MSDU_IN_MPDU_FLAG_SET(msdu_desc_info, 1); + else if (i == (HAL_RX_NUM_MSDU_DESC - 1)) + HAL_RX_LAST_MSDU_IN_MPDU_FLAG_SET(msdu_desc_info, 1); + + msdu_list->msdu_info[i].msdu_flags = + HAL_RX_MSDU_FLAGS_GET(msdu_desc_info); + msdu_list->msdu_info[i].msdu_len = + HAL_RX_MSDU_PKT_LENGTH_GET(msdu_desc_info); + msdu_list->sw_cookie[i] = + HAL_RX_BUF_COOKIE_GET( + &msdu_details[i].buffer_addr_info_details); + msdu_list->rbm[i] = HAL_RX_BUF_RBM_GET( + &msdu_details[i].buffer_addr_info_details); + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "[%s][%d] i=%d sw_cookie=%d\n", + __func__, __LINE__, i, msdu_list->sw_cookie[i]); + } + *num_msdus = i; +} + +/** + * hal_rx_msdu_reo_dst_ind_get: Gets the REO + * destination ring ID from the msdu desc info + * + * @msdu_link_desc : Opaque cookie pointer used by HAL to get to + * the current descriptor + * + * Return: dst_ind (REO destination ring ID) + */ +static inline uint32_t +hal_rx_msdu_reo_dst_ind_get(void *msdu_link_desc) +{ + struct rx_msdu_details *msdu_details; + struct rx_msdu_desc_info *msdu_desc_info; + struct rx_msdu_link *msdu_link = (struct rx_msdu_link *)msdu_link_desc; + uint32_t dst_ind; + + msdu_details = HAL_RX_LINK_DESC_MSDU0_PTR(msdu_link); + + /* The first msdu in the link should exsist */ + msdu_desc_info = HAL_RX_MSDU_DESC_INFO_GET(&msdu_details[0]); + dst_ind = HAL_RX_MSDU_REO_DST_IND_GET(msdu_desc_info); + return dst_ind; +} + +/** + * hal_rx_reo_buf_paddr_get: Gets the physical address and + * cookie from the REO destination ring element + * + * @ hal_rx_desc_cookie: Opaque cookie pointer used by HAL to get to + * the current descriptor + * @ buf_info: structure to return the buffer information + * Return: void + */ +static inline void hal_rx_reo_buf_paddr_get(void *rx_desc, + struct hal_buf_info *buf_info) +{ + struct reo_destination_ring *reo_ring = + (struct reo_destination_ring *)rx_desc; + + buf_info->paddr = + (HAL_RX_REO_BUFFER_ADDR_31_0_GET(reo_ring) | + ((uint64_t)(HAL_RX_REO_BUFFER_ADDR_39_32_GET(reo_ring)) << 32)); + + buf_info->sw_cookie = HAL_RX_REO_BUF_COOKIE_GET(reo_ring); +} + +/** + * enum hal_reo_error_code: Indicates that type of buffer or descriptor + * + * @ HAL_RX_MSDU_BUF_ADDR_TYPE : Reo buffer address points to the MSDU buffer + * @ HAL_RX_MSDU_LINK_DESC_TYPE: Reo buffer address points to the link + * descriptor + */ +enum hal_rx_reo_buf_type { + HAL_RX_REO_MSDU_BUF_ADDR_TYPE = 0, + HAL_RX_REO_MSDU_LINK_DESC_TYPE, +}; + +#define HAL_RX_REO_BUF_TYPE_GET(reo_desc) (((*(((uint32_t *) reo_desc)+ \ + (REO_DESTINATION_RING_7_REO_DEST_BUFFER_TYPE_OFFSET >> 2))) & \ + REO_DESTINATION_RING_7_REO_DEST_BUFFER_TYPE_MASK) >> \ + REO_DESTINATION_RING_7_REO_DEST_BUFFER_TYPE_LSB) + +/** + * enum hal_reo_error_code: Error code describing the type of error detected + * + * @ HAL_REO_ERR_QUEUE_DESC_ADDR_0 : Reo queue descriptor provided in the + * REO_ENTRANCE ring is set to 0 + * @ HAL_REO_ERR_QUEUE_DESC_INVALID: Reo queue descriptor valid bit is NOT set + * @ HAL_REO_ERR_AMPDU_IN_NON_BA : AMPDU frame received without BA session + * having been setup + * @ HAL_REO_ERR_NON_BA_DUPLICATE : Non-BA session, SN equal to SSN, + * Retry bit set: duplicate frame + * @ HAL_REO_ERR_BA_DUPLICATE : BA session, duplicate frame + * @ HAL_REO_ERR_REGULAR_FRAME_2K_JUMP : A normal (management/data frame) + * received with 2K jump in SN + * @ HAL_REO_ERR_BAR_FRAME_2K_JUMP : A bar received with 2K jump in SSN + * @ HAL_REO_ERR_REGULAR_FRAME_OOR : A normal (management/data frame) received + * with SN falling within the OOR window + * @ HAL_REO_ERR_BAR_FRAME_OOR : A bar received with SSN falling within the + * OOR window + * @ HAL_REO_ERR_BAR_FRAME_NO_BA_SESSION : A bar received without a BA session + * @ HAL_REO_ERR_BAR_FRAME_SN_EQUALS_SSN : A bar received with SSN equal to SN + * @ HAL_REO_ERR_PN_CHECK_FAILED : PN Check Failed packet + * @ HAL_REO_ERR_2K_ERROR_HANDLING_FLAG_SET : Frame is forwarded as a result + * of the Seq_2k_error_detected_flag been set in the REO Queue descriptor + * @ HAL_REO_ERR_PN_ERROR_HANDLING_FLAG_SET : Frame is forwarded as a result + * of the pn_error_detected_flag been set in the REO Queue descriptor + * @ HAL_REO_ERR_QUEUE_DESC_BLOCKED_SET : Frame is forwarded as a result of + * the queue descriptor(address) being blocked as SW/FW seems to be currently + * in the process of making updates to this descriptor + */ +enum hal_reo_error_code { + HAL_REO_ERR_QUEUE_DESC_ADDR_0 = 0, + HAL_REO_ERR_QUEUE_DESC_INVALID, + HAL_REO_ERR_AMPDU_IN_NON_BA, + HAL_REO_ERR_NON_BA_DUPLICATE, + HAL_REO_ERR_BA_DUPLICATE, + HAL_REO_ERR_REGULAR_FRAME_2K_JUMP, + HAL_REO_ERR_BAR_FRAME_2K_JUMP, + HAL_REO_ERR_REGULAR_FRAME_OOR, + HAL_REO_ERR_BAR_FRAME_OOR, + HAL_REO_ERR_BAR_FRAME_NO_BA_SESSION, + HAL_REO_ERR_BAR_FRAME_SN_EQUALS_SSN, + HAL_REO_ERR_PN_CHECK_FAILED, + HAL_REO_ERR_2K_ERROR_HANDLING_FLAG_SET, + HAL_REO_ERR_PN_ERROR_HANDLING_FLAG_SET, + HAL_REO_ERR_QUEUE_DESC_BLOCKED_SET, + HAL_REO_ERR_MAX +}; + +/** + * enum hal_rxdma_error_code: Code describing the type of RxDMA error detected + * + * @HAL_RXDMA_ERR_OVERFLOW: MPDU frame is not complete due to a FIFO overflow + * @ HAL_RXDMA_ERR_OVERFLOW : MPDU frame is not complete due to a FIFO + * overflow + * @ HAL_RXDMA_ERR_MPDU_LENGTH : MPDU frame is not complete due to receiving + * incomplete + * MPDU from the PHY + * @ HAL_RXDMA_ERR_FCS : FCS check on the MPDU frame failed + * @ HAL_RXDMA_ERR_DECRYPT : Decryption error + * @ HAL_RXDMA_ERR_TKIP_MIC : TKIP MIC error + * @ HAL_RXDMA_ERR_UNENCRYPTED : Received a frame that was expected to be + * encrypted but wasn’t + * @ HAL_RXDMA_ERR_MSDU_LEN : MSDU related length error + * @ HAL_RXDMA_ERR_MSDU_LIMIT : Number of MSDUs in the MPDUs exceeded + * the max allowed + * @ HAL_RXDMA_ERR_WIFI_PARSE : wifi parsing error + * @ HAL_RXDMA_ERR_AMSDU_PARSE : Amsdu parsing error + * @ HAL_RXDMA_ERR_SA_TIMEOUT : Source Address search timeout + * @ HAL_RXDMA_ERR_DA_TIMEOUT : Destination Address search timeout + * @ HAL_RXDMA_ERR_FLOW_TIMEOUT : Flow Search Timeout + * @ HAL_RXDMA_ERR_FLUSH_REQUEST : RxDMA FIFO Flush request + * @ HAL_RXDMA_ERR_WAR : RxDMA WAR dummy errors + */ +enum hal_rxdma_error_code { + HAL_RXDMA_ERR_OVERFLOW = 0, + HAL_RXDMA_ERR_MPDU_LENGTH, + HAL_RXDMA_ERR_FCS, + HAL_RXDMA_ERR_DECRYPT, + HAL_RXDMA_ERR_TKIP_MIC, + HAL_RXDMA_ERR_UNENCRYPTED, + HAL_RXDMA_ERR_MSDU_LEN, + HAL_RXDMA_ERR_MSDU_LIMIT, + HAL_RXDMA_ERR_WIFI_PARSE, + HAL_RXDMA_ERR_AMSDU_PARSE, + HAL_RXDMA_ERR_SA_TIMEOUT, + HAL_RXDMA_ERR_DA_TIMEOUT, + HAL_RXDMA_ERR_FLOW_TIMEOUT, + HAL_RXDMA_ERR_FLUSH_REQUEST, + HAL_RXDMA_ERR_WAR = 31, + HAL_RXDMA_ERR_MAX +}; + +/** + * HW BM action settings in WBM release ring + */ +#define HAL_BM_ACTION_PUT_IN_IDLE_LIST 0 +#define HAL_BM_ACTION_RELEASE_MSDU_LIST 1 + +/** + * enum hal_rx_wbm_error_source: Indicates which module initiated the + * release of this buffer or descriptor + * + * @ HAL_RX_WBM_ERR_SRC_TQM : TQM released this buffer or descriptor + * @ HAL_RX_WBM_ERR_SRC_RXDMA: RXDMA released this buffer or descriptor + * @ HAL_RX_WBM_ERR_SRC_REO: REO released this buffer or descriptor + * @ HAL_RX_WBM_ERR_SRC_FW: FW released this buffer or descriptor + * @ HAL_RX_WBM_ERR_SRC_SW: SW released this buffer or descriptor + */ +enum hal_rx_wbm_error_source { + HAL_RX_WBM_ERR_SRC_TQM = 0, + HAL_RX_WBM_ERR_SRC_RXDMA, + HAL_RX_WBM_ERR_SRC_REO, + HAL_RX_WBM_ERR_SRC_FW, + HAL_RX_WBM_ERR_SRC_SW, +}; + +/** + * enum hal_rx_wbm_buf_type: Indicates that type of buffer or descriptor + * released + * + * @ HAL_RX_WBM_ERR_SRC_TQM : TQM released this buffer or descriptor + * @ HAL_RX_WBM_ERR_SRC_RXDMA: RXDMA released this buffer or descriptor + * @ HAL_RX_WBM_ERR_SRC_REO: REO released this buffer or descriptor + * @ HAL_RX_WBM_ERR_SRC_FW: FW released this buffer or descriptor + * @ HAL_RX_WBM_ERR_SRC_SW: SW released this buffer or descriptor + */ +enum hal_rx_wbm_buf_type { + HAL_RX_WBM_BUF_TYPE_REL_BUF = 0, + HAL_RX_WBM_BUF_TYPE_MSDU_LINK_DESC, + HAL_RX_WBM_BUF_TYPE_MPDU_LINK_DESC, + HAL_RX_WBM_BUF_TYPE_MSDU_EXT_DESC, + HAL_RX_WBM_BUF_TYPE_Q_EXT_DESC, +}; + +#define HAL_RX_REO_ERROR_GET(reo_desc) (((*(((uint32_t *) reo_desc)+ \ + (REO_DESTINATION_RING_7_REO_ERROR_CODE_OFFSET >> 2))) & \ + REO_DESTINATION_RING_7_REO_ERROR_CODE_MASK) >> \ + REO_DESTINATION_RING_7_REO_ERROR_CODE_LSB) + +/** + * hal_rx_is_pn_error() - Indicate if this error was caused by a + * PN check failure + * + * @reo_desc: opaque pointer used by HAL to get the REO destination entry + * + * Return: true: error caused by PN check, false: other error + */ +static inline bool hal_rx_reo_is_pn_error(void *rx_desc) +{ + struct reo_destination_ring *reo_desc = + (struct reo_destination_ring *)rx_desc; + + return ((HAL_RX_REO_ERROR_GET(reo_desc) == + HAL_REO_ERR_PN_CHECK_FAILED) | + (HAL_RX_REO_ERROR_GET(reo_desc) == + HAL_REO_ERR_PN_ERROR_HANDLING_FLAG_SET)) ? + true : false; +} + +/** + * hal_rx_is_2k_jump() - Indicate if this error was caused by a 2K jump in + * the sequence number + * + * @ring_desc: opaque pointer used by HAL to get the REO destination entry + * + * Return: true: error caused by 2K jump, false: other error + */ +static inline bool hal_rx_reo_is_2k_jump(void *rx_desc) +{ + struct reo_destination_ring *reo_desc = + (struct reo_destination_ring *)rx_desc; + + return ((HAL_RX_REO_ERROR_GET(reo_desc) == + HAL_REO_ERR_REGULAR_FRAME_2K_JUMP) | + (HAL_RX_REO_ERROR_GET(reo_desc) == + HAL_REO_ERR_2K_ERROR_HANDLING_FLAG_SET)) ? + true : false; +} + +/** + * hal_rx_msdu_link_desc_set: Retrieves MSDU Link Descriptor to WBM + * + * @ soc : HAL version of the SOC pointer + * @ src_srng_desc : void pointer to the WBM Release Ring descriptor + * @ buf_addr_info : void pointer to the buffer_addr_info + * @ bm_action : put in IDLE list or release to MSDU_LIST + * + * Return: void + */ +/* look at implementation at dp_hw_link_desc_pool_setup()*/ +static inline void hal_rx_msdu_link_desc_set(struct hal_soc *soc, + void *src_srng_desc, void *buf_addr_info, + uint8_t bm_action) +{ + struct wbm_release_ring *wbm_rel_srng = + (struct wbm_release_ring *)src_srng_desc; + + /* Structure copy !!! */ + wbm_rel_srng->released_buff_or_desc_addr_info = + *((struct buffer_addr_info *)buf_addr_info); + HAL_DESC_SET_FIELD(src_srng_desc, WBM_RELEASE_RING_2, + RELEASE_SOURCE_MODULE, HAL_RX_WBM_ERR_SRC_SW); + HAL_DESC_SET_FIELD(src_srng_desc, WBM_RELEASE_RING_2, BM_ACTION, + bm_action); + HAL_DESC_SET_FIELD(src_srng_desc, WBM_RELEASE_RING_2, + BUFFER_OR_DESC_TYPE, HAL_RX_WBM_BUF_TYPE_MSDU_LINK_DESC); +} + +/* + * hal_rx_msdu_link_desc_reinject: Re-injects the MSDU link descriptor to + * REO entrance ring + * + * @ soc: HAL version of the SOC pointer + * @ pa: Physical address of the MSDU Link Descriptor + * @ cookie: SW cookie to get to the virtual address + * @ error_enabled_reo_q: Argument to determine whether this needs to go + * to the error enabled REO queue + * + * Return: void + */ +static inline void hal_rx_msdu_link_desc_reinject(struct hal_soc *soc, + uint64_t pa, uint32_t cookie, bool error_enabled_reo_q) +{ + /* TODO */ +} + +/** + * HAL_RX_BUF_ADDR_INFO_GET: Returns the address of the + * BUFFER_ADDR_INFO, give the RX descriptor + * (Assumption -- BUFFER_ADDR_INFO is the + * first field in the descriptor structure) + */ +#define HAL_RX_BUF_ADDR_INFO_GET(ring_desc) ((void *)(ring_desc)) + +#define HAL_RX_REO_BUF_ADDR_INFO_GET HAL_RX_BUF_ADDR_INFO_GET + +#define HAL_RX_WBM_BUF_ADDR_INFO_GET HAL_RX_BUF_ADDR_INFO_GET + +/** + * hal_rx_ret_buf_manager_get: Returns the "return_buffer_manager" + * from the BUFFER_ADDR_INFO structure + * given a REO destination ring descriptor. + * @ ring_desc: RX(REO/WBM release) destination ring descriptor + * + * Return: uint8_t (value of the return_buffer_manager) + */ +static inline +uint8_t hal_rx_ret_buf_manager_get(void *ring_desc) +{ + /* + * The following macro takes buf_addr_info as argument, + * but since buf_addr_info is the first field in ring_desc + * Hence the following call is OK + */ + return HAL_RX_BUF_RBM_GET(ring_desc); +} + + +/******************************************************************************* + * RX WBM ERROR APIS + ******************************************************************************/ + +#define HAL_RX_WBM_ERR_SRC_GET(wbm_desc) (((*(((uint32_t *) wbm_desc)+ \ + (WBM_RELEASE_RING_2_RELEASE_SOURCE_MODULE_OFFSET >> 2))) & \ + WBM_RELEASE_RING_2_RELEASE_SOURCE_MODULE_MASK) >> \ + WBM_RELEASE_RING_2_RELEASE_SOURCE_MODULE_LSB) + +#define HAL_RX_WBM_BUF_TYPE_GET(wbm_desc) (((*(((uint32_t *) wbm_desc)+ \ + (WBM_RELEASE_RING_2_BUFFER_OR_DESC_TYPE_OFFSET >> 2))) & \ + WBM_RELEASE_RING_2_BUFFER_OR_DESC_TYPE_MASK) >> \ + WBM_RELEASE_RING_2_BUFFER_OR_DESC_TYPE_LSB) + +/** + * enum - hal_rx_wbm_reo_push_reason: Indicates why REO pushed + * the frame to this release ring + * + * @ HAL_RX_WBM_REO_PSH_RSN_ERROR : Reo detected an error and pushed this + * frame to this queue + * @ HAL_RX_WBM_REO_PSH_RSN_ROUTE: Reo pushed the frame to this queue per + * received routing instructions. No error within REO was detected + */ +enum hal_rx_wbm_reo_push_reason { + HAL_RX_WBM_REO_PSH_RSN_ERROR = 0, + HAL_RX_WBM_REO_PSH_RSN_ROUTE, +}; + +#define HAL_RX_WBM_REO_PUSH_REASON_GET(wbm_desc) (((*(((uint32_t *) wbm_desc)+ \ + (WBM_RELEASE_RING_2_REO_PUSH_REASON_OFFSET >> 2))) & \ + WBM_RELEASE_RING_2_REO_PUSH_REASON_MASK) >> \ + WBM_RELEASE_RING_2_REO_PUSH_REASON_LSB) + +#define HAL_RX_WBM_REO_ERROR_CODE_GET(wbm_desc) (((*(((uint32_t *) wbm_desc)+ \ + (WBM_RELEASE_RING_2_REO_ERROR_CODE_OFFSET >> 2))) & \ + WBM_RELEASE_RING_2_REO_ERROR_CODE_MASK) >> \ + WBM_RELEASE_RING_2_REO_ERROR_CODE_LSB) + +/** + * enum hal_rx_wbm_rxdma_push_reason: Indicates why REO pushed the frame to + * this release ring + * + * @ HAL_RX_WBM_RXDMA_PSH_RSN_ERROR : RXDMA detected an error and pushed + * this frame to this queue + * @ HAL_RX_WBM_RXDMA_PSH_RSN_ROUTE: RXDMA pushed the frame to this queue + * per received routing instructions. No error within RXDMA was detected + */ +enum hal_rx_wbm_rxdma_push_reason { + HAL_RX_WBM_RXDMA_PSH_RSN_ERROR = 0, + HAL_RX_WBM_RXDMA_PSH_RSN_ROUTE, +}; + +#define HAL_RX_WBM_RXDMA_PUSH_REASON_GET(wbm_desc) \ + (((*(((uint32_t *) wbm_desc) + \ + (WBM_RELEASE_RING_2_RXDMA_PUSH_REASON_OFFSET >> 2))) & \ + WBM_RELEASE_RING_2_RXDMA_PUSH_REASON_MASK) >> \ + WBM_RELEASE_RING_2_RXDMA_PUSH_REASON_LSB) + +#define HAL_RX_WBM_RXDMA_ERROR_CODE_GET(wbm_desc) \ + (((*(((uint32_t *) wbm_desc) + \ + (WBM_RELEASE_RING_2_RXDMA_ERROR_CODE_OFFSET >> 2))) & \ + WBM_RELEASE_RING_2_RXDMA_ERROR_CODE_MASK) >> \ + WBM_RELEASE_RING_2_RXDMA_ERROR_CODE_LSB) + +#define HAL_RX_WBM_FIRST_MSDU_GET(wbm_desc) \ + (((*(((uint32_t *) wbm_desc) + \ + (WBM_RELEASE_RING_4_FIRST_MSDU_OFFSET >> 2))) & \ + WBM_RELEASE_RING_4_FIRST_MSDU_MASK) >> \ + WBM_RELEASE_RING_4_FIRST_MSDU_LSB) + +#define HAL_RX_WBM_LAST_MSDU_GET(wbm_desc) \ + (((*(((uint32_t *) wbm_desc) + \ + (WBM_RELEASE_RING_4_LAST_MSDU_OFFSET >> 2))) & \ + WBM_RELEASE_RING_4_LAST_MSDU_MASK) >> \ + WBM_RELEASE_RING_4_LAST_MSDU_LSB) + +#define HAL_RX_WBM_BUF_COOKIE_GET(wbm_desc) \ + HAL_RX_BUF_COOKIE_GET(&((struct wbm_release_ring *) \ + wbm_desc)->released_buff_or_desc_addr_info) + +/** + * hal_rx_dump_rx_attention_tlv: dump RX attention TLV in structured + * humman readable format. + * @ rx_attn: pointer the rx_attention TLV in pkt. + * @ dbg_level: log level. + * + * Return: void + */ +static inline void hal_rx_dump_rx_attention_tlv(struct rx_attention *rx_attn, + uint8_t dbg_level) +{ + QDF_TRACE(QDF_MODULE_ID_DP, dbg_level, + "rx_attention tlv=" + "rxpcu_mpdu_filter_in_category: %d " + "sw_frame_group_id: %d " + "reserved_0: %d " + "phy_ppdu_id: %d " + "first_mpdu : %d " + "reserved_1a: %d " + "mcast_bcast: %d " + "ast_index_not_found: %d " + "ast_index_timeout: %d " + "power_mgmt: %d " + "non_qos: %d " + "null_data: %d " + "mgmt_type: %d " + "ctrl_type: %d " + "more_data: %d " + "eosp: %d " + "a_msdu_error: %d " + "fragment_flag: %d " + "order: %d " + "cce_match: %d " + "overflow_err: %d " + "msdu_length_err: %d " + "tcp_udp_chksum_fail: %d " + "ip_chksum_fail: %d " + "sa_idx_invalid: %d " + "da_idx_invalid: %d " + "reserved_1b: %d " + "rx_in_tx_decrypt_byp: %d " + "encrypt_required: %d " + "directed: %d " + "buffer_fragment: %d " + "mpdu_length_err: %d " + "tkip_mic_err: %d " + "decrypt_err: %d " + "unencrypted_frame_err: %d " + "fcs_err: %d " + "flow_idx_timeout: %d " + "flow_idx_invalid: %d " + "wifi_parser_error: %d " + "amsdu_parser_error: %d " + "sa_idx_timeout: %d " + "da_idx_timeout: %d " + "msdu_limit_error: %d " + "da_is_valid: %d " + "da_is_mcbc: %d " + "sa_is_valid: %d " + "decrypt_status_code: %d " + "rx_bitmap_not_updated: %d " + "reserved_2: %d " + "msdu_done: %d ", + rx_attn->rxpcu_mpdu_filter_in_category, + rx_attn->sw_frame_group_id, + rx_attn->reserved_0, + rx_attn->phy_ppdu_id, + rx_attn->first_mpdu, + rx_attn->reserved_1a, + rx_attn->mcast_bcast, + rx_attn->ast_index_not_found, + rx_attn->ast_index_timeout, + rx_attn->power_mgmt, + rx_attn->non_qos, + rx_attn->null_data, + rx_attn->mgmt_type, + rx_attn->ctrl_type, + rx_attn->more_data, + rx_attn->eosp, + rx_attn->a_msdu_error, + rx_attn->fragment_flag, + rx_attn->order, + rx_attn->cce_match, + rx_attn->overflow_err, + rx_attn->msdu_length_err, + rx_attn->tcp_udp_chksum_fail, + rx_attn->ip_chksum_fail, + rx_attn->sa_idx_invalid, + rx_attn->da_idx_invalid, + rx_attn->reserved_1b, + rx_attn->rx_in_tx_decrypt_byp, + rx_attn->encrypt_required, + rx_attn->directed, + rx_attn->buffer_fragment, + rx_attn->mpdu_length_err, + rx_attn->tkip_mic_err, + rx_attn->decrypt_err, + rx_attn->unencrypted_frame_err, + rx_attn->fcs_err, + rx_attn->flow_idx_timeout, + rx_attn->flow_idx_invalid, + rx_attn->wifi_parser_error, + rx_attn->amsdu_parser_error, + rx_attn->sa_idx_timeout, + rx_attn->da_idx_timeout, + rx_attn->msdu_limit_error, + rx_attn->da_is_valid, + rx_attn->da_is_mcbc, + rx_attn->sa_is_valid, + rx_attn->decrypt_status_code, + rx_attn->rx_bitmap_not_updated, + rx_attn->reserved_2, + rx_attn->msdu_done); + + +} + +/** + * hal_rx_dump_mpdu_start_tlv: dump RX mpdu_start TLV in structured + * human readable format. + * @ mpdu_start: pointer the rx_attention TLV in pkt. + * @ dbg_level: log level. + * + * Return: void + */ +static inline void hal_rx_dump_mpdu_start_tlv(struct rx_mpdu_start *mpdu_start, +uint8_t dbg_level) +{ + struct rx_mpdu_info *mpdu_info = + (struct rx_mpdu_info *) &mpdu_start->rx_mpdu_info_details; + QDF_TRACE(QDF_MODULE_ID_DP, dbg_level, + "rx_mpdu_start tlv - " + "rxpcu_mpdu_filter_in_category: %d " + "sw_frame_group_id: %d " + "ndp_frame: %d " + "phy_err: %d " + "phy_err_during_mpdu_header: %d " + "protocol_version_err: %d " + "ast_based_lookup_valid: %d " + "phy_ppdu_id: %d " + "ast_index: %d " + "sw_peer_id: %d " + "mpdu_frame_control_valid: %d " + "mpdu_duration_valid: %d " + "mac_addr_ad1_valid: %d " + "mac_addr_ad2_valid: %d " + "mac_addr_ad3_valid: %d " + "mac_addr_ad4_valid: %d " + "mpdu_sequence_control_valid: %d " + "mpdu_qos_control_valid: %d " + "mpdu_ht_control_valid: %d " + "frame_encryption_info_valid: %d " + "fr_ds: %d " + "to_ds: %d " + "encrypted: %d " + "mpdu_retry: %d " + "mpdu_sequence_number: %d " + "epd_en: %d " + "all_frames_shall_be_encrypted: %d " + "encrypt_type: %d " + "mesh_sta: %d " + "bssid_hit: %d " + "bssid_number: %d " + "tid: %d " + "pn_31_0: %d " + "pn_63_32: %d " + "pn_95_64: %d " + "pn_127_96: %d " + "peer_meta_data: %d " + "rxpt_classify_info.reo_destination_indication: %d " + "rxpt_classify_info.use_flow_id_toeplitz_clfy: %d " + "rx_reo_queue_desc_addr_31_0: %d " + "rx_reo_queue_desc_addr_39_32: %d " + "receive_queue_number: %d " + "pre_delim_err_warning: %d " + "first_delim_err: %d " + "key_id_octet: %d " + "new_peer_entry: %d " + "decrypt_needed: %d " + "decap_type: %d " + "rx_insert_vlan_c_tag_padding: %d " + "rx_insert_vlan_s_tag_padding: %d " + "strip_vlan_c_tag_decap: %d " + "strip_vlan_s_tag_decap: %d " + "pre_delim_count: %d " + "ampdu_flag: %d " + "bar_frame: %d " + "mpdu_length: %d " + "first_mpdu: %d " + "mcast_bcast: %d " + "ast_index_not_found: %d " + "ast_index_timeout: %d " + "power_mgmt: %d " + "non_qos: %d " + "null_data: %d " + "mgmt_type: %d " + "ctrl_type: %d " + "more_data: %d " + "eosp: %d " + "fragment_flag: %d " + "order: %d " + "u_apsd_trigger: %d " + "encrypt_required: %d " + "directed: %d " + "mpdu_frame_control_field: %d " + "mpdu_duration_field: %d " + "mac_addr_ad1_31_0: %d " + "mac_addr_ad1_47_32: %d " + "mac_addr_ad2_15_0: %d " + "mac_addr_ad2_47_16: %d " + "mac_addr_ad3_31_0: %d " + "mac_addr_ad3_47_32: %d " + "mpdu_sequence_control_field: %d " + "mac_addr_ad4_31_0: %d " + "mac_addr_ad4_47_32: %d " + "mpdu_qos_control_field: %d " + "mpdu_ht_control_field: %d ", + mpdu_info->rxpcu_mpdu_filter_in_category, + mpdu_info->sw_frame_group_id, + mpdu_info->ndp_frame, + mpdu_info->phy_err, + mpdu_info->phy_err_during_mpdu_header, + mpdu_info->protocol_version_err, + mpdu_info->ast_based_lookup_valid, + mpdu_info->phy_ppdu_id, + mpdu_info->ast_index, + mpdu_info->sw_peer_id, + mpdu_info->mpdu_frame_control_valid, + mpdu_info->mpdu_duration_valid, + mpdu_info->mac_addr_ad1_valid, + mpdu_info->mac_addr_ad2_valid, + mpdu_info->mac_addr_ad3_valid, + mpdu_info->mac_addr_ad4_valid, + mpdu_info->mpdu_sequence_control_valid, + mpdu_info->mpdu_qos_control_valid, + mpdu_info->mpdu_ht_control_valid, + mpdu_info->frame_encryption_info_valid, + mpdu_info->fr_ds, + mpdu_info->to_ds, + mpdu_info->encrypted, + mpdu_info->mpdu_retry, + mpdu_info->mpdu_sequence_number, + mpdu_info->epd_en, + mpdu_info->all_frames_shall_be_encrypted, + mpdu_info->encrypt_type, + mpdu_info->mesh_sta, + mpdu_info->bssid_hit, + mpdu_info->bssid_number, + mpdu_info->tid, + mpdu_info->pn_31_0, + mpdu_info->pn_63_32, + mpdu_info->pn_95_64, + mpdu_info->pn_127_96, + mpdu_info->peer_meta_data, + mpdu_info->rxpt_classify_info_details.reo_destination_indication, + mpdu_info->rxpt_classify_info_details.use_flow_id_toeplitz_clfy, + mpdu_info->rx_reo_queue_desc_addr_31_0, + mpdu_info->rx_reo_queue_desc_addr_39_32, + mpdu_info->receive_queue_number, + mpdu_info->pre_delim_err_warning, + mpdu_info->first_delim_err, + mpdu_info->key_id_octet, + mpdu_info->new_peer_entry, + mpdu_info->decrypt_needed, + mpdu_info->decap_type, + mpdu_info->rx_insert_vlan_c_tag_padding, + mpdu_info->rx_insert_vlan_s_tag_padding, + mpdu_info->strip_vlan_c_tag_decap, + mpdu_info->strip_vlan_s_tag_decap, + mpdu_info->pre_delim_count, + mpdu_info->ampdu_flag, + mpdu_info->bar_frame, + mpdu_info->mpdu_length, + mpdu_info->first_mpdu, + mpdu_info->mcast_bcast, + mpdu_info->ast_index_not_found, + mpdu_info->ast_index_timeout, + mpdu_info->power_mgmt, + mpdu_info->non_qos, + mpdu_info->null_data, + mpdu_info->mgmt_type, + mpdu_info->ctrl_type, + mpdu_info->more_data, + mpdu_info->eosp, + mpdu_info->fragment_flag, + mpdu_info->order, + mpdu_info->u_apsd_trigger, + mpdu_info->encrypt_required, + mpdu_info->directed, + mpdu_info->mpdu_frame_control_field, + mpdu_info->mpdu_duration_field, + mpdu_info->mac_addr_ad1_31_0, + mpdu_info->mac_addr_ad1_47_32, + mpdu_info->mac_addr_ad2_15_0, + mpdu_info->mac_addr_ad2_47_16, + mpdu_info->mac_addr_ad3_31_0, + mpdu_info->mac_addr_ad3_47_32, + mpdu_info->mpdu_sequence_control_field, + mpdu_info->mac_addr_ad4_31_0, + mpdu_info->mac_addr_ad4_47_32, + mpdu_info->mpdu_qos_control_field, + mpdu_info->mpdu_ht_control_field); +} + +/** + * hal_rx_dump_msdu_start_tlv: dump RX msdu_start TLV in structured + * human readable format. + * @ msdu_start: pointer the msdu_start TLV in pkt. + * @ dbg_level: log level. + * + * Return: void + */ +static void hal_rx_dump_msdu_start_tlv(struct rx_msdu_start *msdu_start, + uint8_t dbg_level) +{ + QDF_TRACE(QDF_MODULE_ID_DP, dbg_level, + "rx_msdu_start tlv - " + "rxpcu_mpdu_filter_in_category: %d " + "sw_frame_group_id: %d " + "phy_ppdu_id: %d " + "msdu_length: %d " + "ipsec_esp: %d " + "l3_offset: %d " + "ipsec_ah: %d " + "l4_offset: %d " + "msdu_number: %d " + "decap_format: %d " + "ipv4_proto: %d " + "ipv6_proto: %d " + "tcp_proto: %d " + "udp_proto: %d " + "ip_frag: %d " + "tcp_only_ack: %d " + "da_is_bcast_mcast: %d " + "ip4_protocol_ip6_next_header: %d " + "toeplitz_hash_2_or_4: %d " + "flow_id_toeplitz: %d " + "user_rssi: %d " + "pkt_type: %d " + "stbc: %d " + "sgi: %d " + "rate_mcs: %d " + "receive_bandwidth: %d " + "reception_type: %d " +#if !defined(QCA_WIFI_QCA6290_11AX) + "toeplitz_hash: %d " + "nss: %d " +#endif + "ppdu_start_timestamp: %d " + "sw_phy_meta_data: %d ", + msdu_start->rxpcu_mpdu_filter_in_category, + msdu_start->sw_frame_group_id, + msdu_start->phy_ppdu_id, + msdu_start->msdu_length, + msdu_start->ipsec_esp, + msdu_start->l3_offset, + msdu_start->ipsec_ah, + msdu_start->l4_offset, + msdu_start->msdu_number, + msdu_start->decap_format, + msdu_start->ipv4_proto, + msdu_start->ipv6_proto, + msdu_start->tcp_proto, + msdu_start->udp_proto, + msdu_start->ip_frag, + msdu_start->tcp_only_ack, + msdu_start->da_is_bcast_mcast, + msdu_start->ip4_protocol_ip6_next_header, + msdu_start->toeplitz_hash_2_or_4, + msdu_start->flow_id_toeplitz, + msdu_start->user_rssi, + msdu_start->pkt_type, + msdu_start->stbc, + msdu_start->sgi, + msdu_start->rate_mcs, + msdu_start->receive_bandwidth, + msdu_start->reception_type, +#if !defined(QCA_WIFI_QCA6290_11AX) + msdu_start->toeplitz_hash, + msdu_start->nss, +#endif + msdu_start->ppdu_start_timestamp, + msdu_start->sw_phy_meta_data); +} + +/** + * hal_rx_dump_msdu_end_tlv: dump RX msdu_end TLV in structured + * human readable format. + * @ msdu_end: pointer the msdu_end TLV in pkt. + * @ dbg_level: log level. + * + * Return: void + */ +static inline void hal_rx_dump_msdu_end_tlv(struct rx_msdu_end *msdu_end, + uint8_t dbg_level) +{ + QDF_TRACE(QDF_MODULE_ID_DP, dbg_level, + "rx_msdu_end tlv - " + "rxpcu_mpdu_filter_in_category: %d " + "sw_frame_group_id: %d " + "phy_ppdu_id: %d " + "ip_hdr_chksum: %d " + "tcp_udp_chksum: %d " + "key_id_octet: %d " + "cce_super_rule: %d " + "cce_classify_not_done_truncat: %d " + "cce_classify_not_done_cce_dis: %d " + "ext_wapi_pn_63_48: %d " + "ext_wapi_pn_95_64: %d " + "ext_wapi_pn_127_96: %d " + "reported_mpdu_length: %d " + "first_msdu: %d " + "last_msdu: %d " + "sa_idx_timeout: %d " + "da_idx_timeout: %d " + "msdu_limit_error: %d " + "flow_idx_timeout: %d " + "flow_idx_invalid: %d " + "wifi_parser_error: %d " + "amsdu_parser_error: %d " + "sa_is_valid: %d " + "da_is_valid: %d " + "da_is_mcbc: %d " + "l3_header_padding: %d " + "ipv6_options_crc: %d " + "tcp_seq_number: %d " + "tcp_ack_number: %d " + "tcp_flag: %d " + "lro_eligible: %d " + "window_size: %d " + "da_offset: %d " + "sa_offset: %d " + "da_offset_valid: %d " + "sa_offset_valid: %d " + "rule_indication_31_0: %d " + "rule_indication_63_32: %d " + "sa_idx: %d " + "da_idx: %d " + "msdu_drop: %d " + "reo_destination_indication: %d " + "flow_idx: %d " + "fse_metadata: %d " + "cce_metadata: %d " + "sa_sw_peer_id: %d ", + msdu_end->rxpcu_mpdu_filter_in_category, + msdu_end->sw_frame_group_id, + msdu_end->phy_ppdu_id, + msdu_end->ip_hdr_chksum, + msdu_end->tcp_udp_chksum, + msdu_end->key_id_octet, + msdu_end->cce_super_rule, + msdu_end->cce_classify_not_done_truncate, + msdu_end->cce_classify_not_done_cce_dis, + msdu_end->ext_wapi_pn_63_48, + msdu_end->ext_wapi_pn_95_64, + msdu_end->ext_wapi_pn_127_96, + msdu_end->reported_mpdu_length, + msdu_end->first_msdu, + msdu_end->last_msdu, + msdu_end->sa_idx_timeout, + msdu_end->da_idx_timeout, + msdu_end->msdu_limit_error, + msdu_end->flow_idx_timeout, + msdu_end->flow_idx_invalid, + msdu_end->wifi_parser_error, + msdu_end->amsdu_parser_error, + msdu_end->sa_is_valid, + msdu_end->da_is_valid, + msdu_end->da_is_mcbc, + msdu_end->l3_header_padding, + msdu_end->ipv6_options_crc, + msdu_end->tcp_seq_number, + msdu_end->tcp_ack_number, + msdu_end->tcp_flag, + msdu_end->lro_eligible, + msdu_end->window_size, + msdu_end->da_offset, + msdu_end->sa_offset, + msdu_end->da_offset_valid, + msdu_end->sa_offset_valid, + msdu_end->rule_indication_31_0, + msdu_end->rule_indication_63_32, + msdu_end->sa_idx, + msdu_end->da_idx, + msdu_end->msdu_drop, + msdu_end->reo_destination_indication, + msdu_end->flow_idx, + msdu_end->fse_metadata, + msdu_end->cce_metadata, + msdu_end->sa_sw_peer_id); +} + +/** + * hal_rx_dump_mpdu_end_tlv: dump RX mpdu_end TLV in structured + * human readable format. + * @ mpdu_end: pointer the mpdu_end TLV in pkt. + * @ dbg_level: log level. + * + * Return: void + */ +static inline void hal_rx_dump_mpdu_end_tlv(struct rx_mpdu_end *mpdu_end, + uint8_t dbg_level) +{ + QDF_TRACE(QDF_MODULE_ID_DP, dbg_level, + "rx_mpdu_end tlv - " + "rxpcu_mpdu_filter_in_category: %d " + "sw_frame_group_id: %d " + "phy_ppdu_id: %d " + "unsup_ktype_short_frame: %d " + "rx_in_tx_decrypt_byp: %d " + "overflow_err: %d " + "mpdu_length_err: %d " + "tkip_mic_err: %d " + "decrypt_err: %d " + "unencrypted_frame_err: %d " + "pn_fields_contain_valid_info: %d " + "fcs_err: %d " + "msdu_length_err: %d " + "rxdma0_destination_ring: %d " + "rxdma1_destination_ring: %d " + "decrypt_status_code: %d " + "rx_bitmap_not_updated: %d ", + mpdu_end->rxpcu_mpdu_filter_in_category, + mpdu_end->sw_frame_group_id, + mpdu_end->phy_ppdu_id, + mpdu_end->unsup_ktype_short_frame, + mpdu_end->rx_in_tx_decrypt_byp, + mpdu_end->overflow_err, + mpdu_end->mpdu_length_err, + mpdu_end->tkip_mic_err, + mpdu_end->decrypt_err, + mpdu_end->unencrypted_frame_err, + mpdu_end->pn_fields_contain_valid_info, + mpdu_end->fcs_err, + mpdu_end->msdu_length_err, + mpdu_end->rxdma0_destination_ring, + mpdu_end->rxdma1_destination_ring, + mpdu_end->decrypt_status_code, + mpdu_end->rx_bitmap_not_updated); +} + +/** + * hal_rx_dump_pkt_hdr_tlv: dump RX pkt header TLV in hex format + * @ pkt_hdr_tlv: pointer the pkt_hdr_tlv in pkt. + * @ dbg_level: log level. + * + * Return: void + */ +static inline void hal_rx_dump_pkt_hdr_tlv(struct rx_pkt_hdr_tlv *pkt_hdr_tlv, + uint8_t dbg_level) +{ + QDF_TRACE(QDF_MODULE_ID_DP, dbg_level, + "\n---------------\n" + "rx_pkt_hdr_tlv \n" + "---------------\n" + "phy_ppdu_id %d \n", + pkt_hdr_tlv->phy_ppdu_id); + + QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_DP, dbg_level, + pkt_hdr_tlv->rx_pkt_hdr, 128); +} + +/** + * hal_rx_dump_pkt_tlvs: API to print all member elements of + * RX TLVs + * @ buf: pointer the pkt buffer. + * @ dbg_level: log level. + * + * Return: void + */ +static inline void hal_rx_dump_pkt_tlvs(uint8_t *buf, uint8_t dbg_level) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *) buf; + struct rx_attention *rx_attn = &pkt_tlvs->attn_tlv.rx_attn; + struct rx_mpdu_start *mpdu_start = + &pkt_tlvs->mpdu_start_tlv.rx_mpdu_start; + struct rx_msdu_start *msdu_start = + &pkt_tlvs->msdu_start_tlv.rx_msdu_start; + struct rx_mpdu_end *mpdu_end = &pkt_tlvs->mpdu_end_tlv.rx_mpdu_end; + struct rx_msdu_end *msdu_end = &pkt_tlvs->msdu_end_tlv.rx_msdu_end; + struct rx_pkt_hdr_tlv *pkt_hdr_tlv = &pkt_tlvs->pkt_hdr_tlv; + + hal_rx_dump_rx_attention_tlv(rx_attn, dbg_level); + hal_rx_dump_mpdu_start_tlv(mpdu_start, dbg_level); + hal_rx_dump_msdu_start_tlv(msdu_start, dbg_level); + hal_rx_dump_mpdu_end_tlv(mpdu_end, dbg_level); + hal_rx_dump_msdu_end_tlv(msdu_end, dbg_level); + hal_rx_dump_pkt_hdr_tlv(pkt_hdr_tlv, dbg_level); +} + +/** + * hal_srng_ring_id_get: API to retrieve ring id from hal ring + * structure + * @hal_ring: pointer to hal_srng structure + * + * Return: ring_id + */ +static inline uint8_t hal_srng_ring_id_get(void *hal_ring) +{ + return ((struct hal_srng *)hal_ring)->ring_id; +} + +/* Rx MSDU link pointer info */ +struct hal_rx_msdu_link_ptr_info { + struct rx_msdu_link msdu_link; + struct hal_buf_info msdu_link_buf_info; +}; + +/** + * hal_rx_get_pkt_tlvs(): Function to retrieve pkt tlvs from nbuf + * + * @nbuf: Pointer to data buffer field + * Returns: pointer to rx_pkt_tlvs + */ +static inline +struct rx_pkt_tlvs *hal_rx_get_pkt_tlvs(uint8_t *rx_buf_start) +{ + return (struct rx_pkt_tlvs *)rx_buf_start; +} + +/** + * hal_rx_get_mpdu_info(): Function to retrieve mpdu info from pkt tlvs + * + * @pkt_tlvs: Pointer to pkt_tlvs + * Returns: pointer to rx_mpdu_info structure + */ +static inline +struct rx_mpdu_info *hal_rx_get_mpdu_info(struct rx_pkt_tlvs *pkt_tlvs) +{ + return &pkt_tlvs->mpdu_start_tlv.rx_mpdu_start.rx_mpdu_info_details; +} + +/** + * hal_rx_get_rx_sequence(): Function to retrieve rx sequence number + * + * @nbuf: Network buffer + * Returns: rx sequence number + */ +#define DOT11_SEQ_FRAG_MASK 0x000f +#define DOT11_FC1_MORE_FRAG_OFFSET 0x04 + +#define HAL_RX_MPDU_GET_SEQUENCE_NUMBER(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_2_MPDU_SEQUENCE_NUMBER_OFFSET)), \ + RX_MPDU_INFO_2_MPDU_SEQUENCE_NUMBER_MASK, \ + RX_MPDU_INFO_2_MPDU_SEQUENCE_NUMBER_LSB)) +static inline +uint16_t hal_rx_get_rx_sequence(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = hal_rx_get_pkt_tlvs(buf); + struct rx_mpdu_info *rx_mpdu_info = hal_rx_get_mpdu_info(pkt_tlvs); + uint16_t seq_number = 0; + + seq_number = + HAL_RX_MPDU_GET_SEQUENCE_NUMBER(rx_mpdu_info) >> 4; + + /* Skip first 4-bits for fragment number */ + return seq_number; +} + +/** + * hal_rx_get_rx_fragment_number(): Function to retrieve rx fragment number + * + * @nbuf: Network buffer + * Returns: rx fragment number + */ +static inline +uint8_t hal_rx_get_rx_fragment_number(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = hal_rx_get_pkt_tlvs(buf); + struct rx_mpdu_info *rx_mpdu_info = hal_rx_get_mpdu_info(pkt_tlvs); + uint8_t frag_number = 0; + + frag_number = HAL_RX_MPDU_GET_SEQUENCE_NUMBER(rx_mpdu_info) & + DOT11_SEQ_FRAG_MASK; + + /* Return first 4 bits as fragment number */ + return frag_number; +} + +#define HAL_RX_MPDU_GET_FRAME_CONTROL_FIELD(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_14_MPDU_FRAME_CONTROL_FIELD_OFFSET)), \ + RX_MPDU_INFO_14_MPDU_FRAME_CONTROL_FIELD_MASK, \ + RX_MPDU_INFO_14_MPDU_FRAME_CONTROL_FIELD_LSB)) +/** + * hal_rx_get_rx_more_frag_bit(): Function to retrieve more fragment bit + * + * @nbuf: Network buffer + * Returns: rx more fragment bit + */ +static inline +uint8_t hal_rx_get_rx_more_frag_bit(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = hal_rx_get_pkt_tlvs(buf); + struct rx_mpdu_info *rx_mpdu_info = hal_rx_get_mpdu_info(pkt_tlvs); + uint16_t frame_ctrl = 0; + + frame_ctrl = HAL_RX_MPDU_GET_FRAME_CONTROL_FIELD(rx_mpdu_info) >> + DOT11_FC1_MORE_FRAG_OFFSET; + + /* more fragment bit if at offset bit 4 */ + return frame_ctrl; +} + +/** + * hal_rx_get_frame_ctrl_field(): Function to retrieve frame control field + * + * @nbuf: Network buffer + * Returns: rx more fragment bit + * + */ +static inline +uint16_t hal_rx_get_frame_ctrl_field(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = hal_rx_get_pkt_tlvs(buf); + struct rx_mpdu_info *rx_mpdu_info = hal_rx_get_mpdu_info(pkt_tlvs); + uint16_t frame_ctrl = 0; + + frame_ctrl = HAL_RX_MPDU_GET_FRAME_CONTROL_FIELD(rx_mpdu_info); + + return frame_ctrl; +} + +/* + * hal_rx_msdu_is_wlan_mcast(): Check if the buffer is for multicast address + * + * @nbuf: Network buffer + * Returns: flag to indicate whether the nbuf has MC/BC address + */ +static inline +uint32_t hal_rx_msdu_is_wlan_mcast(qdf_nbuf_t nbuf) +{ + uint8 *buf = qdf_nbuf_data(nbuf); + + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_attention *rx_attn = &pkt_tlvs->attn_tlv.rx_attn; + + return rx_attn->mcast_bcast; +} + +#define HAL_RX_MPDU_GET_SEQUENCE_CONTROL_VALID(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_2_MPDU_SEQUENCE_CONTROL_VALID_OFFSET)), \ + RX_MPDU_INFO_2_MPDU_SEQUENCE_CONTROL_VALID_MASK, \ + RX_MPDU_INFO_2_MPDU_SEQUENCE_CONTROL_VALID_LSB)) +/* + * hal_rx_get_mpdu_sequence_control_valid(): Get mpdu sequence control valid + * + * @nbuf: Network buffer + * Returns: value of sequence control valid field + */ +static inline +uint8_t hal_rx_get_mpdu_sequence_control_valid(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = hal_rx_get_pkt_tlvs(buf); + struct rx_mpdu_info *rx_mpdu_info = hal_rx_get_mpdu_info(pkt_tlvs); + uint8_t seq_ctrl_valid = 0; + + seq_ctrl_valid = + HAL_RX_MPDU_GET_SEQUENCE_CONTROL_VALID(rx_mpdu_info); + + return seq_ctrl_valid; +} + +#define HAL_RX_MPDU_GET_FRAME_CONTROL_VALID(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_2_MPDU_FRAME_CONTROL_VALID_OFFSET)), \ + RX_MPDU_INFO_2_MPDU_FRAME_CONTROL_VALID_MASK, \ + RX_MPDU_INFO_2_MPDU_FRAME_CONTROL_VALID_LSB)) +/* + * hal_rx_get_mpdu_frame_control_valid(): Retrieves mpdu frame control valid + * + * @nbuf: Network buffer + * Returns: value of frame control valid field + */ +static inline +uint8_t hal_rx_get_mpdu_frame_control_valid(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = hal_rx_get_pkt_tlvs(buf); + struct rx_mpdu_info *rx_mpdu_info = hal_rx_get_mpdu_info(pkt_tlvs); + uint8_t frm_ctrl_valid = 0; + + frm_ctrl_valid = + HAL_RX_MPDU_GET_FRAME_CONTROL_VALID(rx_mpdu_info); + + return frm_ctrl_valid; +} + +#define HAL_RX_MPDU_GET_MAC_AD4_VALID(_rx_mpdu_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info, \ + RX_MPDU_INFO_2_MAC_ADDR_AD4_VALID_OFFSET)), \ + RX_MPDU_INFO_2_MAC_ADDR_AD4_VALID_MASK, \ + RX_MPDU_INFO_2_MAC_ADDR_AD4_VALID_LSB)) +/* + * hal_rx_get_mpdu_mac_ad4_valid(): Retrieves if mpdu 4th addr is valid + * + * @nbuf: Network buffer + * Returns: value of mpdu 4th address valid field + */ +static inline +bool hal_rx_get_mpdu_mac_ad4_valid(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = hal_rx_get_pkt_tlvs(buf); + struct rx_mpdu_info *rx_mpdu_info = hal_rx_get_mpdu_info(pkt_tlvs); + bool ad4_valid = 0; + + ad4_valid = HAL_RX_MPDU_GET_MAC_AD4_VALID(rx_mpdu_info); + + return ad4_valid; +} + +/* + * hal_rx_clear_mpdu_desc_info(): Clears mpdu_desc_info + * + * @rx_mpdu_desc_info: HAL view of rx mpdu desc info + * Returns: None + */ +static inline +void hal_rx_clear_mpdu_desc_info( + struct hal_rx_mpdu_desc_info *rx_mpdu_desc_info) +{ + qdf_mem_zero(rx_mpdu_desc_info, + sizeof(*rx_mpdu_desc_info)); +} + +/* + * hal_rx_clear_msdu_link_ptr(): Clears msdu_link_ptr + * + * @msdu_link_ptr: HAL view of msdu link ptr + * @size: number of msdu link pointers + * Returns: None + */ +static inline +void hal_rx_clear_msdu_link_ptr(struct hal_rx_msdu_link_ptr_info *msdu_link_ptr, + int size) +{ + qdf_mem_zero(msdu_link_ptr, + (sizeof(*msdu_link_ptr) * size)); +} + +/* + * hal_rx_chain_msdu_links() - Chains msdu link pointers + * @msdu_link_ptr: msdu link pointer + * @mpdu_desc_info: mpdu descriptor info + * + * Build a list of msdus using msdu link pointer. If the + * number of msdus are more, chain them together + * + * Returns: Number of processed msdus + */ +static inline +int hal_rx_chain_msdu_links(qdf_nbuf_t msdu, + struct hal_rx_msdu_link_ptr_info *msdu_link_ptr_info, + struct hal_rx_mpdu_desc_info *mpdu_desc_info) +{ + int j; + struct rx_msdu_link *msdu_link_ptr = + &msdu_link_ptr_info->msdu_link; + struct rx_msdu_link *prev_msdu_link_ptr = NULL; + struct rx_msdu_details *msdu_details = + HAL_RX_LINK_DESC_MSDU0_PTR(msdu_link_ptr); + uint8_t num_msdus = mpdu_desc_info->msdu_count; + struct rx_msdu_desc_info *msdu_desc_info; + uint8_t fragno, more_frag; + uint8_t *rx_desc_info; + struct hal_rx_msdu_list msdu_list; + + for (j = 0; j < num_msdus; j++) { + msdu_desc_info = + HAL_RX_MSDU_DESC_INFO_GET(&msdu_details[j]); + msdu_list.msdu_info[j].msdu_flags = + HAL_RX_MSDU_FLAGS_GET(msdu_desc_info); + msdu_list.msdu_info[j].msdu_len = + HAL_RX_MSDU_PKT_LENGTH_GET(msdu_desc_info); + msdu_list.sw_cookie[j] = HAL_RX_BUF_COOKIE_GET( + &msdu_details[j].buffer_addr_info_details); + } + + /* Chain msdu links together */ + if (prev_msdu_link_ptr) { + /* 31-0 bits of the physical address */ + prev_msdu_link_ptr-> + next_msdu_link_desc_addr_info.buffer_addr_31_0 = + msdu_link_ptr_info->msdu_link_buf_info.paddr & + BUFFER_ADDR_INFO_0_BUFFER_ADDR_31_0_MASK; + /* 39-32 bits of the physical address */ + prev_msdu_link_ptr-> + next_msdu_link_desc_addr_info.buffer_addr_39_32 + = ((msdu_link_ptr_info->msdu_link_buf_info.paddr + >> 32) && + BUFFER_ADDR_INFO_1_BUFFER_ADDR_39_32_MASK); + prev_msdu_link_ptr-> + next_msdu_link_desc_addr_info.sw_buffer_cookie = + msdu_link_ptr_info->msdu_link_buf_info.sw_cookie; + } + + /* There is space for only 6 MSDUs in a MSDU link descriptor */ + if (num_msdus < HAL_RX_NUM_MSDU_DESC) { + /* mark first and last MSDUs */ + rx_desc_info = qdf_nbuf_data(msdu); + fragno = hal_rx_get_rx_fragment_number(rx_desc_info); + more_frag = hal_rx_get_rx_more_frag_bit(rx_desc_info); + + /* TODO: create skb->fragslist[] */ + + if (more_frag == 0) { + msdu_list.msdu_info[num_msdus].msdu_flags |= + RX_MSDU_DESC_INFO_0_LAST_MSDU_IN_MPDU_FLAG_MASK; + } else if (fragno == 1) { + msdu_list.msdu_info[num_msdus].msdu_flags |= + RX_MSDU_DESC_INFO_0_FIRST_MSDU_IN_MPDU_FLAG_MASK; + + msdu_list.msdu_info[num_msdus].msdu_flags |= + RX_MSDU_DESC_INFO_0_MSDU_CONTINUATION_MASK; + } + + num_msdus++; + + /* Number of MSDUs per mpdu descriptor is updated */ + mpdu_desc_info->msdu_count += num_msdus; + } else { + num_msdus = 0; + prev_msdu_link_ptr = msdu_link_ptr; + } + + return num_msdus; +} + +/* + * hal_rx_defrag_update_src_ring_desc(): updates reo src ring desc + * + * @ring_desc: HAL view of ring descriptor + * @mpdu_des_info: saved mpdu desc info + * @msdu_link_ptr: saved msdu link ptr + * + * API used explicitly for rx defrag to update ring desc with + * mpdu desc info and msdu link ptr before reinjecting the + * packet back to REO + * + * Returns: None + */ +static inline +void hal_rx_defrag_update_src_ring_desc(void *ring_desc, + void *saved_mpdu_desc_info, + struct hal_rx_msdu_link_ptr_info *saved_msdu_link_ptr) +{ + struct reo_entrance_ring *reo_ent_ring; + struct rx_mpdu_desc_info *reo_ring_mpdu_desc_info; + struct hal_buf_info buf_info; + + reo_ent_ring = (struct reo_entrance_ring *)ring_desc; + reo_ring_mpdu_desc_info = &reo_ent_ring-> + reo_level_mpdu_frame_info.rx_mpdu_desc_info_details; + + qdf_mem_copy(&reo_ring_mpdu_desc_info, saved_mpdu_desc_info, + sizeof(*reo_ring_mpdu_desc_info)); + + /* + * TODO: Check for additional fields that need configuration in + * reo_ring_mpdu_desc_info + */ + + /* Update msdu_link_ptr in the reo entrance ring */ + hal_rx_reo_buf_paddr_get(ring_desc, &buf_info); + buf_info.paddr = saved_msdu_link_ptr->msdu_link_buf_info.paddr; + buf_info.sw_cookie = + saved_msdu_link_ptr->msdu_link_buf_info.sw_cookie; +} + +/* + * hal_rx_defrag_save_info_from_ring_desc(): Saves info from ring desc + * + * @msdu_link_desc_va: msdu link descriptor handle + * @msdu_link_ptr_info: HAL view of msdu link pointer info + * + * API used to save msdu link information along with physical + * address. The API also copues the sw cookie. + * + * Returns: None + */ +static inline +void hal_rx_defrag_save_info_from_ring_desc(void *msdu_link_desc_va, + struct hal_rx_msdu_link_ptr_info *msdu_link_ptr_info, + struct hal_buf_info *hbi) +{ + struct rx_msdu_link *msdu_link_ptr = + (struct rx_msdu_link *)msdu_link_desc_va; + + qdf_mem_copy(&msdu_link_ptr_info->msdu_link, msdu_link_ptr, + sizeof(struct rx_msdu_link)); + + msdu_link_ptr_info->msdu_link_buf_info.paddr = hbi->paddr; + msdu_link_ptr_info->msdu_link_buf_info.sw_cookie = hbi->sw_cookie; +} + +/* + * hal_rx_get_desc_len(): Returns rx descriptor length + * + * Returns the size of rx_pkt_tlvs which follows the + * data in the nbuf + * + * Returns: Length of rx descriptor + */ +static inline +uint16_t hal_rx_get_desc_len(void) +{ + return sizeof(struct rx_pkt_tlvs); +} + +/* + * hal_rx_reo_ent_rxdma_push_reason_get(): Retrieves RXDMA push reason from + * reo_entrance_ring descriptor + * + * @reo_ent_desc: reo_entrance_ring descriptor + * Returns: value of rxdma_push_reason + */ +static inline +uint8_t hal_rx_reo_ent_rxdma_push_reason_get(void *reo_ent_desc) +{ + return _HAL_MS((*_OFFSET_TO_WORD_PTR(reo_ent_desc, + REO_ENTRANCE_RING_6_RXDMA_PUSH_REASON_OFFSET)), + REO_ENTRANCE_RING_6_RXDMA_PUSH_REASON_MASK, + REO_ENTRANCE_RING_6_RXDMA_PUSH_REASON_LSB); +} + +/** + * hal_rx_reo_ent_rxdma_error_code_get(): Retrieves RXDMA error code from + * reo_entrance_ring descriptor + * @reo_ent_desc: reo_entrance_ring descriptor + * Return: value of rxdma_error_code + */ +static inline +uint8_t hal_rx_reo_ent_rxdma_error_code_get(void *reo_ent_desc) +{ + return _HAL_MS((*_OFFSET_TO_WORD_PTR(reo_ent_desc, + REO_ENTRANCE_RING_6_RXDMA_ERROR_CODE_OFFSET)), + REO_ENTRANCE_RING_6_RXDMA_ERROR_CODE_MASK, + REO_ENTRANCE_RING_6_RXDMA_ERROR_CODE_LSB); +} + +/** + * hal_rx_wbm_err_info_get(): Retrieves WBM error code and reason and + * save it to hal_wbm_err_desc_info structure passed by caller + * @wbm_desc: wbm ring descriptor + * @wbm_er_info: hal_wbm_err_desc_info structure, output parameter. + * Return: void + */ +static inline void hal_rx_wbm_err_info_get(void *wbm_desc, + struct hal_wbm_err_desc_info *wbm_er_info) +{ + wbm_er_info->wbm_err_src = HAL_RX_WBM_ERR_SRC_GET(wbm_desc); + wbm_er_info->reo_psh_rsn = HAL_RX_WBM_REO_PUSH_REASON_GET(wbm_desc); + wbm_er_info->reo_err_code = HAL_RX_WBM_REO_ERROR_CODE_GET(wbm_desc); + wbm_er_info->rxdma_psh_rsn = HAL_RX_WBM_RXDMA_PUSH_REASON_GET(wbm_desc); + wbm_er_info->rxdma_err_code = HAL_RX_WBM_RXDMA_ERROR_CODE_GET(wbm_desc); +} + +/** + * hal_rx_wbm_err_info_set_in_tlv(): Save the wbm error codes and reason to + * the reserved bytes of rx_tlv_hdr + * @buf: start of rx_tlv_hdr + * @wbm_er_info: hal_wbm_err_desc_info structure + * Return: void + */ +static inline void hal_rx_wbm_err_info_set_in_tlv(uint8_t *buf, + struct hal_wbm_err_desc_info *wbm_er_info) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + + qdf_mem_copy(pkt_tlvs->rx_padding0, wbm_er_info, + sizeof(struct hal_wbm_err_desc_info)); +} + +/** + * hal_rx_wbm_err_info_get_from_tlv(): retrieve wbm error codes and reason from + * the reserved bytes of rx_tlv_hdr. + * @buf: start of rx_tlv_hdr + * @wbm_er_info: hal_wbm_err_desc_info structure, output parameter. + * Return: void + */ +static inline void hal_rx_wbm_err_info_get_from_tlv(uint8_t *buf, + struct hal_wbm_err_desc_info *wbm_er_info) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + + qdf_mem_copy(wbm_er_info, pkt_tlvs->rx_padding0, + sizeof(struct hal_wbm_err_desc_info)); +} + +#endif /* _HAL_RX_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/ftm/core/src/wlan_ftm_svc.c b/drivers/staging/qca-wifi-host-cmn/ftm/core/src/wlan_ftm_svc.c new file mode 100644 index 0000000000000000000000000000000000000000..a055642a1319db426c5125ce5e1cb95e48263177 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/ftm/core/src/wlan_ftm_svc.c @@ -0,0 +1,166 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: This implementation of init/deint functions for FTM services. + */ + +#include "wlan_ftm_svc_i.h" +#include +#include + +static inline struct wlan_lmac_if_ftm_tx_ops * +wlan_psoc_get_ftm_txops(struct wlan_objmgr_psoc *psoc) +{ + return &((psoc->soc_cb.tx_ops.ftm_tx_ops)); +} + +static QDF_STATUS +ftm_pdev_obj_init(struct wifi_ftm_pdev_priv_obj *ftm_pdev_obj) +{ + ftm_pdev_obj->data = qdf_mem_malloc(FTM_CMD_MAX_BUF_LENGTH); + if (!ftm_pdev_obj->data) { + ftm_err("Memory alloc failed for ftm pdev obj data"); + return QDF_STATUS_E_NOMEM; + } + + ftm_pdev_obj->length = 0; + + ftm_pdev_obj->cmd_type = WIFI_FTM_CMD_UNKNOWN; + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS +wlan_ftm_pdev_obj_create_notification(struct wlan_objmgr_pdev *pdev, + void *arg_list) +{ + QDF_STATUS status; + struct wifi_ftm_pdev_priv_obj *ftm_pdev_obj; + + ftm_pdev_obj = qdf_mem_malloc(sizeof(*ftm_pdev_obj)); + + if (!ftm_pdev_obj) { + ftm_err("Memory alloc failed for ftm pdev obj"); + return QDF_STATUS_E_NOMEM; + } + + ftm_pdev_obj->pdev = pdev; + status = ftm_pdev_obj_init(ftm_pdev_obj); + + if (QDF_IS_STATUS_ERROR(status)) { + ftm_err("ftm pdev obj init failed"); + qdf_mem_free(ftm_pdev_obj); + return status; + } + + status = wlan_objmgr_pdev_component_obj_attach(pdev, + WLAN_UMAC_COMP_FTM, + ftm_pdev_obj, + QDF_STATUS_SUCCESS); + + if (QDF_IS_STATUS_ERROR(status)) { + ftm_err("ftm pdev obj attach failed"); + qdf_mem_free(ftm_pdev_obj); + return status; + } + + return status; +} + +static QDF_STATUS +ftm_pdev_obj_deinit(struct wifi_ftm_pdev_priv_obj *ftm_pdev_obj) +{ + if (ftm_pdev_obj->data) { + qdf_mem_free(ftm_pdev_obj->data); + + ftm_pdev_obj->data = NULL; + ftm_pdev_obj->length = 0; + } + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS +wlan_ftm_pdev_obj_destroy_notification(struct wlan_objmgr_pdev *pdev, + void *arg_list) +{ + QDF_STATUS status; + struct wifi_ftm_pdev_priv_obj *ftm_pdev_obj = + wlan_objmgr_pdev_get_comp_private_obj(pdev, WLAN_UMAC_COMP_FTM); + + if (NULL == ftm_pdev_obj) { + ftm_err("invalid wifi ftm obj"); + return QDF_STATUS_E_FAULT; + } + + status = wlan_objmgr_pdev_component_obj_detach(pdev, WLAN_UMAC_COMP_FTM, + ftm_pdev_obj); + + status = ftm_pdev_obj_deinit(ftm_pdev_obj); + ftm_pdev_obj->pdev = NULL; + + qdf_mem_free(ftm_pdev_obj); + + return status; +} + +QDF_STATUS +wlan_ftm_testmode_attach(struct wlan_objmgr_psoc *psoc) +{ + struct wlan_lmac_if_ftm_tx_ops *ftm_tx_ops; + + ftm_tx_ops = wlan_psoc_get_ftm_txops(psoc); + + if (ftm_tx_ops->ftm_attach) + return ftm_tx_ops->ftm_attach(psoc); + else + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS +wlan_ftm_testmode_detach(struct wlan_objmgr_psoc *psoc) +{ + struct wlan_lmac_if_ftm_tx_ops *ftm_tx_ops; + + ftm_tx_ops = wlan_psoc_get_ftm_txops(psoc); + + if (ftm_tx_ops->ftm_detach) + return ftm_tx_ops->ftm_detach(psoc); + else + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS +wlan_ftm_cmd_send(struct wlan_objmgr_pdev *pdev, uint8_t *buf, + uint32_t len, uint8_t pdev_id) +{ + struct wlan_lmac_if_ftm_tx_ops *ftm_tx_ops; + struct wlan_objmgr_psoc *psoc; + + psoc = wlan_pdev_get_psoc(pdev); + if (!psoc) + return QDF_STATUS_E_NOENT; + + ftm_tx_ops = wlan_psoc_get_ftm_txops(psoc); + + if (ftm_tx_ops->ftm_cmd_send) + return ftm_tx_ops->ftm_cmd_send(pdev, buf, len, pdev_id); + + return QDF_STATUS_SUCCESS; +} diff --git a/drivers/staging/qca-wifi-host-cmn/ftm/core/src/wlan_ftm_svc_i.h b/drivers/staging/qca-wifi-host-cmn/ftm/core/src/wlan_ftm_svc_i.h new file mode 100644 index 0000000000000000000000000000000000000000..bd8be913e46ea783577924e0a68d81c7f38d7737 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/ftm/core/src/wlan_ftm_svc_i.h @@ -0,0 +1,91 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: declare the ftm service data structure and apis + */ +#ifndef _WLAN_FTM_SVC_H_ +#define _WLAN_FTM_SVC_H_ + +#include +#include +#include + +/** + * struct ftm_seg_hdr_info - the segment header for the event from FW + * @len: length of the segment header + * @msgref: message reference + * @segment_info: segment information + * @pad: padding + * + */ +struct ftm_seg_hdr_info { + uint32_t len; + uint32_t msgref; + uint32_t segment_info; + uint32_t pad; +}; + +/** + * wlan_ftm_pdev_obj_create_notification() - ftm pdev create handler + * @pdev: pdev pointer + * @arg_list: argument list + * + * return: QDF_STATUS_SUCCESS for success or error code + */ +QDF_STATUS wlan_ftm_pdev_obj_create_notification(struct wlan_objmgr_pdev *pdev, + void *arg_list); + +/** + * wlan_ftm_pdev_obj_destroy_notification() - ftm pdev destroy handler + * @pdev: pdev pointer + * @arg_list: argument list + * + * return: QDF_STATUS_SUCCESS for success or error code + */ +QDF_STATUS wlan_ftm_pdev_obj_destroy_notification(struct wlan_objmgr_pdev *pdev, + void *arg_list); + +/** + * wlan_ftm_cmd_send() - send ftm command to target_if layer + * @pdev: pdev pointer + * @buf: data buffer + * @len: event length + * + * return: QDF_STATUS_SUCCESS for success or error code + */ +QDF_STATUS wlan_ftm_cmd_send(struct wlan_objmgr_pdev *pdev, uint8_t *buf, + uint32_t len, uint8_t pdev_id); + +/** + * wlan_ftm_testmode_attach() - Attach FTM UTF handle + * @psoc: psoc pointer + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +QDF_STATUS wlan_ftm_testmode_attach(struct wlan_objmgr_psoc *psoc); + +/** + * wlan_ftm_testmode_detach() - Attach FTM UTF handle + * @psoc: psoc pointer + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +QDF_STATUS wlan_ftm_testmode_detach(struct wlan_objmgr_psoc *psoc); +#endif /* _WLAN_FTM_SVC_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/ftm/dispatcher/inc/wlan_ftm_init_deinit_api.h b/drivers/staging/qca-wifi-host-cmn/ftm/dispatcher/inc/wlan_ftm_init_deinit_api.h new file mode 100644 index 0000000000000000000000000000000000000000..0487af6478d10b1caaffa32a65479df90f534ef6 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/ftm/dispatcher/inc/wlan_ftm_init_deinit_api.h @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: declare the ftm service data structure and apis + */ +#ifndef _WLAN_FTM_UCFG_API_H_ +#define _WLAN_FTM_UCFG_API_H_ + +#include +#include +#include + +/** + * dispatcher_ftm_init() - FTM testmode initialization API + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +QDF_STATUS dispatcher_ftm_init(void); + +/** + * dispatcher_ftm_deinit() - FTM testmode deinitialization API + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +QDF_STATUS dispatcher_ftm_deinit(void); + +/** + * dispatcher_ftm_psoc_open() - FTM module open API + * @psoc: psoc object + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +QDF_STATUS dispatcher_ftm_psoc_open(struct wlan_objmgr_psoc *psoc); + +/** + * dispatcher_ftm_psoc_close() - FTM module close API + * @psoc: psoc object + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +QDF_STATUS dispatcher_ftm_psoc_close(struct wlan_objmgr_psoc *psoc); +#endif /* _WLAN_FTM_UCFG_API_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/ftm/dispatcher/inc/wlan_ftm_ucfg_api.h b/drivers/staging/qca-wifi-host-cmn/ftm/dispatcher/inc/wlan_ftm_ucfg_api.h new file mode 100644 index 0000000000000000000000000000000000000000..34a6489c6d20eaef31eeb3e2891dbf37838dddcf --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/ftm/dispatcher/inc/wlan_ftm_ucfg_api.h @@ -0,0 +1,125 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: declare the ftm service data structure and apis + */ +#ifndef _WLAN_FTM_UCFG_API_H_ +#define _WLAN_FTM_UCFG_API_H_ + +#include +#include +#include + +#define FTM_DEBUG 0 + +#if FTM_DEBUG +#define ftm_log(level, args...) \ + QDF_TRACE(QDF_MODULE_ID_FTM, level, ## args) +#define ftm_logfl(level, format, args...) \ + ftm_log(level, FL(format), ## args) + +#define ftm_alert(format, args...) \ + ftm_logfl(QDF_TRACE_LEVEL_FATAL, format, ## args) +#define ftm_err(format, args...) \ + ftm_logfl(QDF_TRACE_LEVEL_ERROR, format, ## args) +#define ftm_warn(format, args...) \ + ftm_logfl(QDF_TRACE_LEVEL_WARN, format, ## args) +#define ftm_notice(format, args...) \ + ftm_logfl(QDF_TRACE_LEVEL_INFO, format, ## args) +#define ftm_debug(format, args...) \ + ftm_logfl(QDF_TRACE_LEVEL_DEBUG, format, ## args) +#else +#define ftm_alert(format, args...) +#define ftm_err(format, args...) +#define ftm_warn(format, args...) +#define ftm_notice(format, args...) +#define ftm_debug(format, args...) +#endif + +#define FTM_IOCTL_UNIFIED_UTF_CMD 0x1000 +#define FTM_IOCTL_UNIFIED_UTF_RSP 0x1001 +#define FTM_CMD_MAX_BUF_LENGTH 2048 + +/** + * enum wifi_ftm_cmd_type - the enumeration of the command source per pdev + * @WIFI_FTM_CMD_IOCTL: command from ioctl on the pdev + * @WIFI_FTM_CMD_NL80211: command from nl80211 on the pdev + * + */ +enum wifi_ftm_pdev_cmd_type { + WIFI_FTM_CMD_IOCTL = 1, + WIFI_FTM_CMD_NL80211, + + /* command should be added above */ + WIFI_FTM_CMD_UNKNOWN, +}; + +/** + * struct wifi_ftm_pdev_priv_obj - wifi ftm pdev utf event info + * @pdev: pointer to pdev + * @data: data ptr + * @current_seq: curent squence + * @expected_seq: expected sequence + * @length: length + * @offset: offset + * @cmd_type: command type from either ioctl or nl80211 + */ +struct wifi_ftm_pdev_priv_obj { + struct wlan_objmgr_pdev *pdev; + uint8_t *data; + uint8_t current_seq; + uint8_t expected_seq; + qdf_size_t length; + qdf_size_t offset; + enum wifi_ftm_pdev_cmd_type cmd_type; +}; + +/** + * wlan_ftm_testmode_cmd() - handle FTM testmode command + * @pdev: pdev pointer + * @data: data + * @len: data length + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +QDF_STATUS ucfg_wlan_ftm_testmode_cmd(struct wlan_objmgr_pdev *pdev, + uint8_t *data, uint32_t len); + +/** + * wlan_ftm_testmode_rsp() - handle FTM testmode command + * @pdev: pdev pointer + * @data: data + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +QDF_STATUS ucfg_wlan_ftm_testmode_rsp(struct wlan_objmgr_pdev *pdev, + uint8_t *data); + +/** + * wlan_ftm_process_utf_event() - process ftm UTF event + * @scn_handle: scn handle + * @event: event buffer + * @len: event length + * + * return: QDF_STATUS_SUCCESS for success or error code + */ +QDF_STATUS wlan_ftm_process_utf_event(struct wlan_objmgr_pdev *pdev, + uint8_t *event_buf, uint32_t len); +#endif /* _WLAN_FTM_UCFG_API_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/ftm/dispatcher/src/wlan_ftm_init_deinit.c b/drivers/staging/qca-wifi-host-cmn/ftm/dispatcher/src/wlan_ftm_init_deinit.c new file mode 100644 index 0000000000000000000000000000000000000000..47364e07284dde2cae184265b2e9716160019a75 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/ftm/dispatcher/src/wlan_ftm_init_deinit.c @@ -0,0 +1,85 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: This implementation of init/deint functions for FTM services. + */ + +#include +#include +#include +#include "../../core/src/wlan_ftm_svc_i.h" +#include +#include + +QDF_STATUS dispatcher_ftm_init(void) +{ + QDF_STATUS status = QDF_STATUS_SUCCESS; + + status = wlan_objmgr_register_pdev_create_handler(WLAN_UMAC_COMP_FTM, + wlan_ftm_pdev_obj_create_notification, NULL); + + if (QDF_IS_STATUS_ERROR(status)) + goto err_pdev_create; + + status = wlan_objmgr_register_pdev_destroy_handler(WLAN_UMAC_COMP_FTM, + wlan_ftm_pdev_obj_destroy_notification, NULL); + + if (QDF_IS_STATUS_ERROR(status)) + goto err_pdev_delete; + + return QDF_STATUS_SUCCESS; + +err_pdev_delete: + wlan_objmgr_unregister_pdev_create_handler(WLAN_UMAC_COMP_FTM, + wlan_ftm_pdev_obj_create_notification, NULL); +err_pdev_create: + return status; +} + +QDF_STATUS dispatcher_ftm_deinit(void) +{ + QDF_STATUS status = QDF_STATUS_SUCCESS; + + status = wlan_objmgr_unregister_pdev_create_handler(WLAN_UMAC_COMP_FTM, + wlan_ftm_pdev_obj_create_notification, NULL); + + if (QDF_IS_STATUS_ERROR(status)) + return QDF_STATUS_E_FAILURE; + + status = wlan_objmgr_unregister_pdev_destroy_handler(WLAN_UMAC_COMP_FTM, + wlan_ftm_pdev_obj_destroy_notification, NULL); + + if (QDF_IS_STATUS_ERROR(status)) + return QDF_STATUS_E_FAILURE; + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS dispatcher_ftm_psoc_open(struct wlan_objmgr_psoc *psoc) +{ + /* calling the wmi event handler registration */ + return wlan_ftm_testmode_attach(psoc); +} + +QDF_STATUS dispatcher_ftm_psoc_close(struct wlan_objmgr_psoc *psoc) +{ + /* calling the wmi event handler de-registration */ + return wlan_ftm_testmode_detach(psoc); +} diff --git a/drivers/staging/qca-wifi-host-cmn/ftm/dispatcher/src/wlan_ftm_ucfg_api.c b/drivers/staging/qca-wifi-host-cmn/ftm/dispatcher/src/wlan_ftm_ucfg_api.c new file mode 100644 index 0000000000000000000000000000000000000000..cb327bd21a78e8a8ee8f1fd50938d17d66955c78 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/ftm/dispatcher/src/wlan_ftm_ucfg_api.c @@ -0,0 +1,144 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: This implementation of init/deint functions for FTM services. + */ + +#include +#include +#include "../../core/src/wlan_ftm_svc_i.h" +#include +#include + +QDF_STATUS ucfg_wlan_ftm_testmode_cmd(struct wlan_objmgr_pdev *pdev, + uint8_t *data, uint32_t len) +{ + struct wifi_ftm_pdev_priv_obj *ftm_pdev_obj; + uint8_t pdev_id; + + ftm_pdev_obj = wlan_objmgr_pdev_get_comp_private_obj(pdev, + WLAN_UMAC_COMP_FTM); + if (!ftm_pdev_obj) { + ftm_err("Failed to get ftm pdev component"); + return QDF_STATUS_E_FAILURE; + } + + ftm_pdev_obj->length = 0; + pdev_id = wlan_objmgr_pdev_get_pdev_id(pdev); + + return wlan_ftm_cmd_send(pdev, data, len, pdev_id); +} + +QDF_STATUS ucfg_wlan_ftm_testmode_rsp(struct wlan_objmgr_pdev *pdev, + uint8_t *data) +{ + struct wifi_ftm_pdev_priv_obj *ftm_pdev_obj; + uint32_t *len; + + ftm_pdev_obj = wlan_objmgr_pdev_get_comp_private_obj(pdev, + WLAN_UMAC_COMP_FTM); + if (!ftm_pdev_obj) { + ftm_err("Failed to get ftm pdev component"); + return QDF_STATUS_E_FAILURE; + } + + if (ftm_pdev_obj->length) { + len = (uint32_t *)data; + *len = ftm_pdev_obj->length; + qdf_mem_copy((data + 4), ftm_pdev_obj->data, + ftm_pdev_obj->length); + + ftm_pdev_obj->length = 0; + return QDF_STATUS_SUCCESS; + } + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wlan_ftm_process_utf_event(struct wlan_objmgr_pdev *pdev, + uint8_t *event_buf, uint32_t len) +{ + struct wifi_ftm_pdev_priv_obj *ftm_pdev_obj; + uint32_t utf_datalen; + uint8_t *utf_data; + struct ftm_seg_hdr_info seghdr_info; + u_int8_t total_segments, current_seq; + + ftm_pdev_obj = wlan_objmgr_pdev_get_comp_private_obj(pdev, + WLAN_UMAC_COMP_FTM); + if (!ftm_pdev_obj) { + ftm_err("Failed to get ftm pdev component"); + return QDF_STATUS_E_FAILURE; + } + + utf_data = event_buf; + seghdr_info = *(struct ftm_seg_hdr_info *)(event_buf); + ftm_pdev_obj->current_seq = (seghdr_info.segment_info & 0xF); + + current_seq = (seghdr_info.segment_info & 0xF); + total_segments = (seghdr_info.segment_info >> 4) & 0xF; + + utf_datalen = len - sizeof(seghdr_info); + + if (current_seq == 0) { + ftm_pdev_obj->expected_seq = 0; + ftm_pdev_obj->offset = 0; + } else { + if (ftm_pdev_obj->expected_seq != current_seq) { + ftm_debug("seq mismatch exp Seq %d got seq %d\n", + ftm_pdev_obj->expected_seq, current_seq); + } + } + + if ((len > FTM_CMD_MAX_BUF_LENGTH) || + (ftm_pdev_obj->offset > (FTM_CMD_MAX_BUF_LENGTH - utf_datalen))) { + ftm_err("Invalid utf data len :%d", len); + return QDF_STATUS_E_FAILURE; + } + qdf_mem_copy(&ftm_pdev_obj->data[ftm_pdev_obj->offset], + &utf_data[sizeof(seghdr_info)], utf_datalen); + + ftm_pdev_obj->offset = ftm_pdev_obj->offset + utf_datalen; + ftm_pdev_obj->expected_seq++; + + if (ftm_pdev_obj->expected_seq == total_segments) { + if (ftm_pdev_obj->offset != seghdr_info.len) { + ftm_debug("len mismatch len %zu total len %d\n", + ftm_pdev_obj->offset, seghdr_info.len); + } + + ftm_pdev_obj->length = ftm_pdev_obj->offset; + + /** + * If the repsonse is for a command from FTM daemon, + * send this repsonse data to cfg80211 + */ + if (ftm_pdev_obj->cmd_type == WIFI_FTM_CMD_NL80211) { + if (wlan_cfg80211_ftm_rx_event(pdev, ftm_pdev_obj->data, + ftm_pdev_obj->length) != QDF_STATUS_SUCCESS) { + return QDF_STATUS_E_FAILURE; + } + ftm_pdev_obj->cmd_type = WIFI_FTM_CMD_UNKNOWN; + } + } + + return QDF_STATUS_SUCCESS; +} diff --git a/drivers/staging/qca-wifi-host-cmn/global_lmac_if/inc/wlan_global_lmac_if_api.h b/drivers/staging/qca-wifi-host-cmn/global_lmac_if/inc/wlan_global_lmac_if_api.h new file mode 100644 index 0000000000000000000000000000000000000000..aadfe7def89c2a041fa39164cc36e6a08d7c2ad5 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/global_lmac_if/inc/wlan_global_lmac_if_api.h @@ -0,0 +1,85 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _WLAN_GLOBAL_LMAC_IF_API_H_ +#define _WLAN_GLOBAL_LMAC_IF_API_H_ + +#include "wlan_objmgr_cmn.h" +#include "wlan_objmgr_psoc_obj.h" + +/** + * wlan_global_lmac_if_open() - global lmac_if open + * @psoc: psoc context + * + * Opens up lmac_if southbound layer. This function calls OL,DA and UMAC + * modules to register respective tx and rx callbacks. + * + * Return: QDF_STATUS + */ +QDF_STATUS wlan_global_lmac_if_open(struct wlan_objmgr_psoc *psoc); + +/** + * wlan_global_lmac_if_rx_ops_register() - UMAC rx handler register + * @rx_ops: Pointer to rx_ops structure to be populated + * + * Register umac RX callabacks which will be called by DA/OL/WMA/WMI + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_global_lmac_if_rx_ops_register + (struct wlan_lmac_if_rx_ops *rx_ops); + +/** + * wlan_global_lmac_if_close() - Close global lmac_if + * @psoc: psoc context + * + * Deregister global lmac_if TX and RX handlers + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_global_lmac_if_close(struct wlan_objmgr_psoc *psoc); + +/** + * wlan_global_lmac_if_set_txops_registration_cb() -tx + * registration callback assignment + * @dev_type: Dev type can be either Direct attach or Offload + * @handler: handler to be called for LMAC tx ops registration + * + * API to assign appropriate tx registration callback handler based on the + * device type(Offload or Direct attach) + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_global_lmac_if_set_txops_registration_cb(WLAN_DEV_TYPE dev_type, + QDF_STATUS (*handler)(struct wlan_lmac_if_tx_ops *)); + +#ifdef WLAN_CONV_SPECTRAL_ENABLE +/** + * wlan_lmac_if_sptrl_set_rx_ops_register_cb ()- Spectral LMAC Rx ops + * registration callback assignment + * @handler: Handler to be called for spectral LMAC rx ops registration + * + * API to assign appropriate Spectral LMAC rx ops registration callback handler + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_lmac_if_sptrl_set_rx_ops_register_cb(void (*handler) + (struct wlan_lmac_if_rx_ops *)); +#endif /* WLAN_CONV_SPECTRAL_ENABLE */ +#endif /* _WLAN_LMAC_IF_API_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/global_lmac_if/src/wlan_global_lmac_if.c b/drivers/staging/qca-wifi-host-cmn/global_lmac_if/src/wlan_global_lmac_if.c new file mode 100644 index 0000000000000000000000000000000000000000..d02bfae42aa5d3bacb135b2fab02638a126f0836 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/global_lmac_if/src/wlan_global_lmac_if.c @@ -0,0 +1,178 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "qdf_mem.h" +#include "qdf_module.h" +#include "wlan_lmac_if_def.h" +#include "wlan_lmac_if_api.h" +#include "wlan_global_lmac_if_api.h" +#ifdef WLAN_CONV_SPECTRAL_ENABLE +#include +#endif +/* Function pointer to call DA/OL specific tx_ops registration function */ +QDF_STATUS (*wlan_global_lmac_if_tx_ops_register[MAX_DEV_TYPE]) + (struct wlan_lmac_if_tx_ops *tx_ops); + +#ifdef WLAN_CONV_SPECTRAL_ENABLE +/* Function pointer for spectral rx_ops registration function */ +void (*wlan_lmac_if_sptrl_rx_ops)(struct wlan_lmac_if_rx_ops *rx_ops); + +QDF_STATUS wlan_lmac_if_sptrl_set_rx_ops_register_cb(void (*handler) + (struct wlan_lmac_if_rx_ops *)) +{ + wlan_lmac_if_sptrl_rx_ops = handler; + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(wlan_lmac_if_sptrl_set_rx_ops_register_cb); +#endif /* WLAN_CONV_SPECTRAL_ENABLE */ + +/* + * spectral scan is built as separate .ko for WIN where + * MCL it is part of wlan.ko so the registration of +.* rx ops to global lmac if layer is different between WIN + * and MCL + */ +#ifdef WLAN_CONV_SPECTRAL_ENABLE +/** + * wlan_spectral_register_rx_ops() - Register spectral component RX OPS + * @rx_ops: lmac if receive ops + * + * Return: None + */ +#ifdef CONFIG_WIN +static void wlan_spectral_register_rx_ops(struct wlan_lmac_if_rx_ops *rx_ops) +{ + wlan_lmac_if_sptrl_rx_ops(rx_ops); +} +#else +static void wlan_spectral_register_rx_ops(struct wlan_lmac_if_rx_ops *rx_ops) +{ + wlan_lmac_if_sptrl_register_rx_ops(rx_ops); +} +#endif /*CONFIG_WIN*/ +#else +/** + * wlan_spectral_register_rx_ops() - Dummy api to register spectral RX OPS + * @rx_ops: lmac if receive ops + * + * Return: None + */ +static void wlan_spectral_register_rx_ops(struct wlan_lmac_if_rx_ops *rx_ops) +{ +} +#endif /*WLAN_CONV_SPECTRAL_ENABLE*/ + +/** + * wlan_global_lmac_if_rx_ops_register() - Global lmac_if + * rx handler register + * @rx_ops: Pointer to rx_ops structure to be populated + * + * Register lmac_if RX callabacks which will be called by DA/OL/WMA/WMI + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS +wlan_global_lmac_if_rx_ops_register(struct wlan_lmac_if_rx_ops *rx_ops) +{ + /* + * Component specific public api's to be called to register + * respective callbacks + * Ex: rx_ops->fp = function; + */ + if (!rx_ops) { + qdf_print("%s: lmac if rx ops pointer is NULL", __func__); + return QDF_STATUS_E_INVAL; + } + /* Registeration for UMAC componets */ + wlan_lmac_if_umac_rx_ops_register(rx_ops); + + /* spectral rx_ops registration*/ + wlan_spectral_register_rx_ops(rx_ops); + + return QDF_STATUS_SUCCESS; +} + +/** + * wlan_global_lmac_if_open() - global lmac_if open + * @psoc: psoc context + * + * Opens up lmac_if southbound layer. This function calls OL,DA and UMAC + * modules to register respective tx and rx callbacks. + * + * Return: QDF_STATUS + */ +QDF_STATUS wlan_global_lmac_if_open(struct wlan_objmgr_psoc *psoc) +{ + WLAN_DEV_TYPE dev_type; + + dev_type = psoc->soc_nif.phy_type; + + if (dev_type == WLAN_DEV_DA || dev_type == WLAN_DEV_OL) { + wlan_global_lmac_if_tx_ops_register[dev_type] + (&psoc->soc_cb.tx_ops); + } else { + /* Control should ideally not reach here */ + qdf_print("Invalid device type"); + return QDF_STATUS_E_INVAL; + } + + /* Function call to register rx-ops handlers */ + wlan_global_lmac_if_rx_ops_register(&psoc->soc_cb.rx_ops); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(wlan_global_lmac_if_open); + +/** + * wlan_global_lmac_if_close() - Close global lmac_if + * @psoc: psoc context + * + * Deregister lmac_if TX and RX handlers + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_global_lmac_if_close(struct wlan_objmgr_psoc *psoc) +{ + qdf_mem_set(&psoc->soc_cb.tx_ops, sizeof(psoc->soc_cb.tx_ops), 0); + qdf_mem_set(&psoc->soc_cb.rx_ops, sizeof(psoc->soc_cb.rx_ops), 0); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(wlan_global_lmac_if_close); + +/** + * wlan_global_lmac_if_set_txops_registration_cb() - tx + * registration callback assignment + * @dev_type: Dev type can be either Direct attach or Offload + * @handler: handler to be called for LMAC tx ops registration + * + * API to assign appropriate tx registration callback handler based on the + * device type(Offload or Direct attach) + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_global_lmac_if_set_txops_registration_cb(WLAN_DEV_TYPE dev_type, + QDF_STATUS (*handler)(struct wlan_lmac_if_tx_ops *)) +{ + wlan_global_lmac_if_tx_ops_register[dev_type] = handler; + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(wlan_global_lmac_if_set_txops_registration_cb); diff --git a/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/hal_api.h b/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/hal_api.h new file mode 100644 index 0000000000000000000000000000000000000000..dba2718bd6f840c3a404e95f230c09562dec29b9 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/hal_api.h @@ -0,0 +1,1150 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of The Linux Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _HAL_API_H_ +#define _HAL_API_H_ + +#include "qdf_types.h" +#include "qdf_util.h" +#include "hal_internal.h" +#include "rx_msdu_link.h" +#include "rx_reo_queue.h" +#include "rx_reo_queue_ext.h" + +#define MAX_UNWINDOWED_ADDRESS 0x80000 +#define WINDOW_ENABLE_BIT 0x80000000 +#define WINDOW_REG_ADDRESS 0x310C +#define WINDOW_SHIFT 19 +#define WINDOW_VALUE_MASK 0x3F +#define WINDOW_START MAX_UNWINDOWED_ADDRESS +#define WINDOW_RANGE_MASK 0x7FFFF + +static inline void hal_select_window(struct hal_soc *hal_soc, uint32_t offset) +{ + uint32_t window = (offset >> WINDOW_SHIFT) & WINDOW_VALUE_MASK; + if (window != hal_soc->register_window) { + qdf_iowrite32(hal_soc->dev_base_addr + WINDOW_REG_ADDRESS, + WINDOW_ENABLE_BIT | window); + hal_soc->register_window = window; + } +} + +/** + * note1: WINDOW_RANGE_MASK = (1 << WINDOW_SHIFT) -1 + * note2: 1 << WINDOW_SHIFT = MAX_UNWINDOWED_ADDRESS + * note3: WINDOW_VALUE_MASK = big enough that trying to write past that window + * would be a bug + */ +static inline void hal_write32_mb(struct hal_soc *hal_soc, uint32_t offset, + uint32_t value) +{ + + if (!hal_soc->use_register_windowing || + offset < MAX_UNWINDOWED_ADDRESS) { + qdf_iowrite32(hal_soc->dev_base_addr + offset, value); + } else { + qdf_spin_lock_irqsave(&hal_soc->register_access_lock); + hal_select_window(hal_soc, offset); + qdf_iowrite32(hal_soc->dev_base_addr + WINDOW_START + + (offset & WINDOW_RANGE_MASK), value); + qdf_spin_unlock_irqrestore(&hal_soc->register_access_lock); + } +} + +/** + * hal_write_address_32_mb - write a value to a register + * + */ +static inline void hal_write_address_32_mb(struct hal_soc *hal_soc, + void __iomem *addr, uint32_t value) +{ + uint32_t offset; + + if (!hal_soc->use_register_windowing) + return qdf_iowrite32(addr, value); + + offset = addr - hal_soc->dev_base_addr; + hal_write32_mb(hal_soc, offset, value); +} + +static inline uint32_t hal_read32_mb(struct hal_soc *hal_soc, uint32_t offset) +{ + uint32_t ret; + + if (!hal_soc->use_register_windowing || + offset < MAX_UNWINDOWED_ADDRESS) { + return qdf_ioread32(hal_soc->dev_base_addr + offset); + } + + qdf_spin_lock_irqsave(&hal_soc->register_access_lock); + hal_select_window(hal_soc, offset); + ret = qdf_ioread32(hal_soc->dev_base_addr + WINDOW_START + + (offset & WINDOW_RANGE_MASK)); + qdf_spin_unlock_irqrestore(&hal_soc->register_access_lock); + + return ret; +} + +#include "hif_io32.h" + +/** + * hal_attach - Initialize HAL layer + * @hif_handle: Opaque HIF handle + * @qdf_dev: QDF device + * + * Return: Opaque HAL SOC handle + * NULL on failure (if given ring is not available) + * + * This function should be called as part of HIF initialization (for accessing + * copy engines). DP layer will get hal_soc handle using hif_get_hal_handle() + */ +extern void *hal_attach(void *hif_handle, qdf_device_t qdf_dev); + +/** + * hal_detach - Detach HAL layer + * @hal_soc: HAL SOC handle + * + * This function should be called as part of HIF detach + * + */ +extern void hal_detach(void *hal_soc); + +/* SRNG type to be passed in APIs hal_srng_get_entrysize and hal_srng_setup */ +enum hal_ring_type { + REO_DST, + REO_EXCEPTION, + REO_REINJECT, + REO_CMD, + REO_STATUS, + TCL_DATA, + TCL_CMD, + TCL_STATUS, + CE_SRC, + CE_DST, + CE_DST_STATUS, + WBM_IDLE_LINK, + SW2WBM_RELEASE, + WBM2SW_RELEASE, + RXDMA_BUF, + RXDMA_DST, + RXDMA_MONITOR_BUF, + RXDMA_MONITOR_STATUS, + RXDMA_MONITOR_DST, + RXDMA_MONITOR_DESC, + DIR_BUF_RX_DMA_SRC, +#ifdef WLAN_FEATURE_CIF_CFR + WIFI_POS_SRC, +#endif + MAX_RING_TYPES +}; + +/* SRNG flags passed in hal_srng_params.flags */ +#define HAL_SRNG_MSI_SWAP 0x00000008 +#define HAL_SRNG_RING_PTR_SWAP 0x00000010 +#define HAL_SRNG_DATA_TLV_SWAP 0x00000020 +#define HAL_SRNG_LOW_THRES_INTR_ENABLE 0x00010000 +#define HAL_SRNG_MSI_INTR 0x00020000 + +#define PN_SIZE_24 0 +#define PN_SIZE_48 1 +#define PN_SIZE_128 2 + +/** + * hal_srng_get_entrysize - Returns size of ring entry in bytes. Should be + * used by callers for calculating the size of memory to be allocated before + * calling hal_srng_setup to setup the ring + * + * @hal_soc: Opaque HAL SOC handle + * @ring_type: one of the types from hal_ring_type + * + */ +extern uint32_t hal_srng_get_entrysize(void *hal_soc, int ring_type); + +/** + * hal_srng_max_entries - Returns maximum possible number of ring entries + * @hal_soc: Opaque HAL SOC handle + * @ring_type: one of the types from hal_ring_type + * + * Return: Maximum number of entries for the given ring_type + */ +uint32_t hal_srng_max_entries(void *hal_soc, int ring_type); + +/** + * hal_srng_dump - Dump ring status + * @srng: hal srng pointer + */ +void hal_srng_dump(struct hal_srng *srng); + +/** + * hal_srng_get_dir - Returns the direction of the ring + * @hal_soc: Opaque HAL SOC handle + * @ring_type: one of the types from hal_ring_type + * + * Return: Ring direction + */ +enum hal_srng_dir hal_srng_get_dir(void *hal_soc, int ring_type); + +/* HAL memory information */ +struct hal_mem_info { + /* dev base virutal addr */ + void *dev_base_addr; + /* dev base physical addr */ + void *dev_base_paddr; + /* Remote virtual pointer memory for HW/FW updates */ + void *shadow_rdptr_mem_vaddr; + /* Remote physical pointer memory for HW/FW updates */ + void *shadow_rdptr_mem_paddr; + /* Shared memory for ring pointer updates from host to FW */ + void *shadow_wrptr_mem_vaddr; + /* Shared physical memory for ring pointer updates from host to FW */ + void *shadow_wrptr_mem_paddr; +}; + +/* SRNG parameters to be passed to hal_srng_setup */ +struct hal_srng_params { + /* Physical base address of the ring */ + qdf_dma_addr_t ring_base_paddr; + /* Virtual base address of the ring */ + void *ring_base_vaddr; + /* Number of entries in ring */ + uint32_t num_entries; + /* max transfer length */ + uint16_t max_buffer_length; + /* MSI Address */ + qdf_dma_addr_t msi_addr; + /* MSI data */ + uint32_t msi_data; + /* Interrupt timer threshold – in micro seconds */ + uint32_t intr_timer_thres_us; + /* Interrupt batch counter threshold – in number of ring entries */ + uint32_t intr_batch_cntr_thres_entries; + /* Low threshold – in number of ring entries + * (valid for src rings only) + */ + uint32_t low_threshold; + /* Misc flags */ + uint32_t flags; + /* Unique ring id */ + uint8_t ring_id; + /* Source or Destination ring */ + enum hal_srng_dir ring_dir; + /* Size of ring entry */ + uint32_t entry_size; + /* hw register base address */ + void *hwreg_base[MAX_SRNG_REG_GROUPS]; +}; + +/* hal_construct_shadow_config() - initialize the shadow registers for dp rings + * @hal_soc: hal handle + * + * Return: QDF_STATUS_OK on success + */ +extern QDF_STATUS hal_construct_shadow_config(void *hal_soc); + +/* hal_set_one_shadow_config() - add a config for the specified ring + * @hal_soc: hal handle + * @ring_type: ring type + * @ring_num: ring num + * + * The ring type and ring num uniquely specify the ring. After this call, + * the hp/tp will be added as the next entry int the shadow register + * configuration table. The hal code will use the shadow register address + * in place of the hp/tp address. + * + * This function is exposed, so that the CE module can skip configuring shadow + * registers for unused ring and rings assigned to the firmware. + * + * Return: QDF_STATUS_OK on success + */ +extern QDF_STATUS hal_set_one_shadow_config(void *hal_soc, int ring_type, + int ring_num); +/** + * hal_get_shadow_config() - retrieve the config table + * @hal_soc: hal handle + * @shadow_config: will point to the table after + * @num_shadow_registers_configured: will contain the number of valid entries + */ +extern void hal_get_shadow_config(void *hal_soc, + struct pld_shadow_reg_v2_cfg **shadow_config, + int *num_shadow_registers_configured); +/** + * hal_srng_setup - Initialize HW SRNG ring. + * + * @hal_soc: Opaque HAL SOC handle + * @ring_type: one of the types from hal_ring_type + * @ring_num: Ring number if there are multiple rings of + * same type (staring from 0) + * @mac_id: valid MAC Id should be passed if ring type is one of lmac rings + * @ring_params: SRNG ring params in hal_srng_params structure. + + * Callers are expected to allocate contiguous ring memory of size + * 'num_entries * entry_size' bytes and pass the physical and virtual base + * addresses through 'ring_base_paddr' and 'ring_base_vaddr' in hal_srng_params + * structure. Ring base address should be 8 byte aligned and size of each ring + * entry should be queried using the API hal_srng_get_entrysize + * + * Return: Opaque pointer to ring on success + * NULL on failure (if given ring is not available) + */ +extern void *hal_srng_setup(void *hal_soc, int ring_type, int ring_num, + int mac_id, struct hal_srng_params *ring_params); + +/* Remapping ids of REO rings */ +#define REO_REMAP_TCL 0 +#define REO_REMAP_SW1 1 +#define REO_REMAP_SW2 2 +#define REO_REMAP_SW3 3 +#define REO_REMAP_SW4 4 +#define REO_REMAP_RELEASE 5 +#define REO_REMAP_FW 6 +#define REO_REMAP_UNUSED 7 + +/* + * currently this macro only works for IX0 since all the rings we are remapping + * can be remapped from HWIO_REO_R0_DESTINATION_RING_CTRL_IX_0 + */ +#define HAL_REO_REMAP_VAL(_ORIGINAL_DEST, _NEW_DEST) \ + HAL_REO_REMAP_VAL_(_ORIGINAL_DEST, _NEW_DEST) +/* allow the destination macros to be expanded */ +#define HAL_REO_REMAP_VAL_(_ORIGINAL_DEST, _NEW_DEST) \ + (_NEW_DEST << \ + (HWIO_REO_R0_DESTINATION_RING_CTRL_IX_0_DEST_RING_MAPPING_ ## \ + _ORIGINAL_DEST ## _SHFT)) + +/** + * hal_reo_remap_IX0 - Remap REO ring destination + * @hal: HAL SOC handle + * @remap_val: Remap value + */ +extern void hal_reo_remap_IX0(struct hal_soc *hal, uint32_t remap_val); + +/** + * hal_srng_set_hp_paddr() - Set physical address to dest SRNG head pointer + * @sring: sring pointer + * @paddr: physical address + */ +extern void hal_srng_dst_set_hp_paddr(struct hal_srng *sring, uint64_t paddr); + +/** + * hal_srng_dst_init_hp() - Initilaize head pointer with cached head pointer + * @srng: sring pointer + * @vaddr: virtual address + */ +extern void hal_srng_dst_init_hp(struct hal_srng *srng, uint32_t *vaddr); + +/** + * hal_srng_cleanup - Deinitialize HW SRNG ring. + * @hal_soc: Opaque HAL SOC handle + * @hal_srng: Opaque HAL SRNG pointer + */ +extern void hal_srng_cleanup(void *hal_soc, void *hal_srng); + +static inline bool hal_srng_initialized(void *hal_ring) +{ + struct hal_srng *srng = (struct hal_srng *)hal_ring; + + return !!srng->initialized; +} + +/** + * hal_srng_access_start_unlocked - Start ring access (unlocked). Should use + * hal_srng_access_start if locked access is required + * + * @hal_soc: Opaque HAL SOC handle + * @hal_ring: Ring pointer (Source or Destination ring) + * + * Return: 0 on success; error on failire + */ +static inline int hal_srng_access_start_unlocked(void *hal_soc, void *hal_ring) +{ + struct hal_srng *srng = (struct hal_srng *)hal_ring; + + if (srng->ring_dir == HAL_SRNG_SRC_RING) + srng->u.src_ring.cached_tp = + *(volatile uint32_t *)(srng->u.src_ring.tp_addr); + else + srng->u.dst_ring.cached_hp = + *(volatile uint32_t *)(srng->u.dst_ring.hp_addr); + + return 0; +} + +/** + * hal_srng_access_start - Start (locked) ring access + * + * @hal_soc: Opaque HAL SOC handle + * @hal_ring: Ring pointer (Source or Destination ring) + * + * Return: 0 on success; error on failire + */ +static inline int hal_srng_access_start(void *hal_soc, void *hal_ring) +{ + struct hal_srng *srng = (struct hal_srng *)hal_ring; + + SRNG_LOCK(&(srng->lock)); + + return hal_srng_access_start_unlocked(hal_soc, hal_ring); +} + +/** + * hal_srng_dst_get_next - Get next entry from a destination ring and move + * cached tail pointer + * + * @hal_soc: Opaque HAL SOC handle + * @hal_ring: Destination ring pointer + * + * Return: Opaque pointer for next ring entry; NULL on failire + */ +static inline void *hal_srng_dst_get_next(void *hal_soc, void *hal_ring) +{ + struct hal_srng *srng = (struct hal_srng *)hal_ring; + uint32_t *desc; + + if (srng->u.dst_ring.tp != srng->u.dst_ring.cached_hp) { + desc = &(srng->ring_base_vaddr[srng->u.dst_ring.tp]); + /* TODO: Using % is expensive, but we have to do this since + * size of some SRNG rings is not power of 2 (due to descriptor + * sizes). Need to create separate API for rings used + * per-packet, with sizes power of 2 (TCL2SW, REO2SW, + * SW2RXDMA and CE rings) + */ + srng->u.dst_ring.tp = (srng->u.dst_ring.tp + srng->entry_size) % + srng->ring_size; + + return (void *)desc; + } + + return NULL; +} + +/** + * hal_srng_dst_get_next_hp - Get next entry from a destination ring and move + * cached head pointer + * + * @hal_soc: Opaque HAL SOC handle + * @hal_ring: Destination ring pointer + * + * Return: Opaque pointer for next ring entry; NULL on failire + */ +static inline void *hal_srng_dst_get_next_hp(void *hal_soc, void *hal_ring) +{ + struct hal_srng *srng = (struct hal_srng *)hal_ring; + uint32_t *desc; + /* TODO: Using % is expensive, but we have to do this since + * size of some SRNG rings is not power of 2 (due to descriptor + * sizes). Need to create separate API for rings used + * per-packet, with sizes power of 2 (TCL2SW, REO2SW, + * SW2RXDMA and CE rings) + */ + uint32_t next_hp = (srng->u.dst_ring.cached_hp + srng->entry_size) % + srng->ring_size; + + if (next_hp != srng->u.dst_ring.tp) { + desc = &(srng->ring_base_vaddr[srng->u.dst_ring.cached_hp]); + srng->u.dst_ring.cached_hp = next_hp; + return (void *)desc; + } + + return NULL; +} + +/** + * hal_srng_dst_peek - Get next entry from a ring without moving tail pointer. + * hal_srng_dst_get_next should be called subsequently to move the tail pointer + * TODO: See if we need an optimized version of get_next that doesn't check for + * loop_cnt + * + * @hal_soc: Opaque HAL SOC handle + * @hal_ring: Destination ring pointer + * + * Return: Opaque pointer for next ring entry; NULL on failire + */ +static inline void *hal_srng_dst_peek(void *hal_soc, void *hal_ring) +{ + struct hal_srng *srng = (struct hal_srng *)hal_ring; + + if (srng->u.dst_ring.tp != srng->u.dst_ring.cached_hp) + return (void *)(&(srng->ring_base_vaddr[srng->u.dst_ring.tp])); + + return NULL; +} + +/** + * hal_srng_dst_num_valid - Returns number of valid entries (to be processed + * by SW) in destination ring + * + * @hal_soc: Opaque HAL SOC handle + * @hal_ring: Destination ring pointer + * @sync_hw_ptr: Sync cached head pointer with HW + * + */ +static inline uint32_t hal_srng_dst_num_valid(void *hal_soc, void *hal_ring, + int sync_hw_ptr) +{ + struct hal_srng *srng = (struct hal_srng *)hal_ring; + uint32 hp; + uint32 tp = srng->u.dst_ring.tp; + + if (sync_hw_ptr) { + hp = *(srng->u.dst_ring.hp_addr); + srng->u.dst_ring.cached_hp = hp; + } else { + hp = srng->u.dst_ring.cached_hp; + } + + if (hp >= tp) + return (hp - tp) / srng->entry_size; + else + return (srng->ring_size - tp + hp) / srng->entry_size; +} + +/** + * hal_srng_src_reap_next - Reap next entry from a source ring and move reap + * pointer. This can be used to release any buffers associated with completed + * ring entries. Note that this should not be used for posting new descriptor + * entries. Posting of new entries should be done only using + * hal_srng_src_get_next_reaped when this function is used for reaping. + * + * @hal_soc: Opaque HAL SOC handle + * @hal_ring: Source ring pointer + * + * Return: Opaque pointer for next ring entry; NULL on failire + */ +static inline void *hal_srng_src_reap_next(void *hal_soc, void *hal_ring) +{ + struct hal_srng *srng = (struct hal_srng *)hal_ring; + uint32_t *desc; + + /* TODO: Using % is expensive, but we have to do this since + * size of some SRNG rings is not power of 2 (due to descriptor + * sizes). Need to create separate API for rings used + * per-packet, with sizes power of 2 (TCL2SW, REO2SW, + * SW2RXDMA and CE rings) + */ + uint32_t next_reap_hp = (srng->u.src_ring.reap_hp + srng->entry_size) % + srng->ring_size; + + if (next_reap_hp != srng->u.src_ring.cached_tp) { + desc = &(srng->ring_base_vaddr[next_reap_hp]); + srng->u.src_ring.reap_hp = next_reap_hp; + return (void *)desc; + } + + return NULL; +} + +/** + * hal_srng_src_get_next_reaped - Get next entry from a source ring that is + * already reaped using hal_srng_src_reap_next, for posting new entries to + * the ring + * + * @hal_soc: Opaque HAL SOC handle + * @hal_ring: Source ring pointer + * + * Return: Opaque pointer for next (reaped) source ring entry; NULL on failire + */ +static inline void *hal_srng_src_get_next_reaped(void *hal_soc, void *hal_ring) +{ + struct hal_srng *srng = (struct hal_srng *)hal_ring; + uint32_t *desc; + + if (srng->u.src_ring.hp != srng->u.src_ring.reap_hp) { + desc = &(srng->ring_base_vaddr[srng->u.src_ring.hp]); + srng->u.src_ring.hp = (srng->u.src_ring.hp + srng->entry_size) % + srng->ring_size; + + return (void *)desc; + } + + return NULL; +} + +/** + * hal_srng_src_pending_reap_next - Reap next entry from a source ring and + * move reap pointer. This API is used in detach path to release any buffers + * associated with ring entries which are pending reap. + * + * @hal_soc: Opaque HAL SOC handle + * @hal_ring: Source ring pointer + * + * Return: Opaque pointer for next ring entry; NULL on failire + */ +static inline void *hal_srng_src_pending_reap_next(void *hal_soc, void *hal_ring) +{ + struct hal_srng *srng = (struct hal_srng *)hal_ring; + uint32_t *desc; + + uint32_t next_reap_hp = (srng->u.src_ring.reap_hp + srng->entry_size) % + srng->ring_size; + + if (next_reap_hp != srng->u.src_ring.hp) { + desc = &(srng->ring_base_vaddr[next_reap_hp]); + srng->u.src_ring.reap_hp = next_reap_hp; + return (void *)desc; + } + + return NULL; +} + +/** + * hal_srng_src_done_val - + * + * @hal_soc: Opaque HAL SOC handle + * @hal_ring: Source ring pointer + * + * Return: Opaque pointer for next ring entry; NULL on failire + */ +static inline uint32_t hal_srng_src_done_val(void *hal_soc, void *hal_ring) +{ + struct hal_srng *srng = (struct hal_srng *)hal_ring; + /* TODO: Using % is expensive, but we have to do this since + * size of some SRNG rings is not power of 2 (due to descriptor + * sizes). Need to create separate API for rings used + * per-packet, with sizes power of 2 (TCL2SW, REO2SW, + * SW2RXDMA and CE rings) + */ + uint32_t next_reap_hp = (srng->u.src_ring.reap_hp + srng->entry_size) % + srng->ring_size; + + if (next_reap_hp == srng->u.src_ring.cached_tp) + return 0; + + if (srng->u.src_ring.cached_tp > next_reap_hp) + return (srng->u.src_ring.cached_tp - next_reap_hp) / + srng->entry_size; + else + return ((srng->ring_size - next_reap_hp) + + srng->u.src_ring.cached_tp) / srng->entry_size; +} + +/** + * hal_api_get_tphp - Get head and tail pointer location for any ring + * @hal_soc: Opaque HAL SOC handle + * @hal_ring: Source ring pointer + * @tailp: Tail Pointer + * @headp: Head Pointer + * + * Return: Update tail pointer and head pointer in arguments. + */ +static inline void hal_api_get_tphp(void *hal_soc, void *hal_ring, + uint32_t *tailp, uint32_t *headp) +{ + struct hal_srng *srng = (struct hal_srng *)hal_ring; + + if (srng->ring_dir == HAL_SRNG_SRC_RING) { + *headp = srng->u.src_ring.hp / srng->entry_size; + *tailp = *(srng->u.src_ring.tp_addr) / srng->entry_size; + } else { + *tailp = srng->u.dst_ring.tp / srng->entry_size; + *headp = *(srng->u.dst_ring.hp_addr) / srng->entry_size; + } +} + +/** + * hal_srng_src_get_next - Get next entry from a source ring and move cached tail pointer + * + * @hal_soc: Opaque HAL SOC handle + * @hal_ring: Source ring pointer + * + * Return: Opaque pointer for next ring entry; NULL on failire + */ +static inline void *hal_srng_src_get_next(void *hal_soc, void *hal_ring) +{ + struct hal_srng *srng = (struct hal_srng *)hal_ring; + uint32_t *desc; + /* TODO: Using % is expensive, but we have to do this since + * size of some SRNG rings is not power of 2 (due to descriptor + * sizes). Need to create separate API for rings used + * per-packet, with sizes power of 2 (TCL2SW, REO2SW, + * SW2RXDMA and CE rings) + */ + uint32_t next_hp = (srng->u.src_ring.hp + srng->entry_size) % + srng->ring_size; + + if (next_hp != srng->u.src_ring.cached_tp) { + desc = &(srng->ring_base_vaddr[srng->u.src_ring.hp]); + srng->u.src_ring.hp = next_hp; + /* TODO: Since reap function is not used by all rings, we can + * remove the following update of reap_hp in this function + * if we can ensure that only hal_srng_src_get_next_reaped + * is used for the rings requiring reap functionality + */ + srng->u.src_ring.reap_hp = next_hp; + return (void *)desc; + } + + return NULL; +} + +/** + * hal_srng_src_peek - Get next entry from a ring without moving head pointer. + * hal_srng_src_get_next should be called subsequently to move the head pointer + * + * @hal_soc: Opaque HAL SOC handle + * @hal_ring: Source ring pointer + * + * Return: Opaque pointer for next ring entry; NULL on failire + */ +static inline void *hal_srng_src_peek(void *hal_soc, void *hal_ring) +{ + struct hal_srng *srng = (struct hal_srng *)hal_ring; + uint32_t *desc; + + /* TODO: Using % is expensive, but we have to do this since + * size of some SRNG rings is not power of 2 (due to descriptor + * sizes). Need to create separate API for rings used + * per-packet, with sizes power of 2 (TCL2SW, REO2SW, + * SW2RXDMA and CE rings) + */ + if (((srng->u.src_ring.hp + srng->entry_size) % + srng->ring_size) != srng->u.src_ring.cached_tp) { + desc = &(srng->ring_base_vaddr[srng->u.src_ring.hp]); + return (void *)desc; + } + + return NULL; +} + +/** + * hal_srng_src_num_avail - Returns number of available entries in src ring + * + * @hal_soc: Opaque HAL SOC handle + * @hal_ring: Source ring pointer + * @sync_hw_ptr: Sync cached tail pointer with HW + * + */ +static inline uint32_t hal_srng_src_num_avail(void *hal_soc, + void *hal_ring, int sync_hw_ptr) +{ + struct hal_srng *srng = (struct hal_srng *)hal_ring; + uint32 tp; + uint32 hp = srng->u.src_ring.hp; + + if (sync_hw_ptr) { + tp = *(srng->u.src_ring.tp_addr); + srng->u.src_ring.cached_tp = tp; + } else { + tp = srng->u.src_ring.cached_tp; + } + + if (tp > hp) + return ((tp - hp) / srng->entry_size) - 1; + else + return ((srng->ring_size - hp + tp) / srng->entry_size) - 1; +} + +/** + * hal_srng_access_end_unlocked - End ring access (unlocked) - update cached + * ring head/tail pointers to HW. + * This should be used only if hal_srng_access_start_unlocked to start ring + * access + * + * @hal_soc: Opaque HAL SOC handle + * @hal_ring: Ring pointer (Source or Destination ring) + * + * Return: 0 on success; error on failire + */ +static inline void hal_srng_access_end_unlocked(void *hal_soc, void *hal_ring) +{ + struct hal_srng *srng = (struct hal_srng *)hal_ring; + + /* TODO: See if we need a write memory barrier here */ + if (srng->flags & HAL_SRNG_LMAC_RING) { + /* For LMAC rings, ring pointer updates are done through FW and + * hence written to a shared memory location that is read by FW + */ + if (srng->ring_dir == HAL_SRNG_SRC_RING) { + *(srng->u.src_ring.hp_addr) = srng->u.src_ring.hp; + } else { + *(srng->u.dst_ring.tp_addr) = srng->u.dst_ring.tp; + } + } else { + if (srng->ring_dir == HAL_SRNG_SRC_RING) + hal_write_address_32_mb(hal_soc, + srng->u.src_ring.hp_addr, + srng->u.src_ring.hp); + else + hal_write_address_32_mb(hal_soc, + srng->u.dst_ring.tp_addr, + srng->u.dst_ring.tp); + } +} + +/** + * hal_srng_access_end - Unlock ring access and update cached ring head/tail + * pointers to HW + * This should be used only if hal_srng_access_start to start ring access + * + * @hal_soc: Opaque HAL SOC handle + * @hal_ring: Ring pointer (Source or Destination ring) + * + * Return: 0 on success; error on failire + */ +static inline void hal_srng_access_end(void *hal_soc, void *hal_ring) +{ + struct hal_srng *srng = (struct hal_srng *)hal_ring; + + hal_srng_access_end_unlocked(hal_soc, hal_ring); + SRNG_UNLOCK(&(srng->lock)); +} + +/** + * hal_srng_access_end_reap - Unlock ring access + * This should be used only if hal_srng_access_start to start ring access + * and should be used only while reaping SRC ring completions + * + * @hal_soc: Opaque HAL SOC handle + * @hal_ring: Ring pointer (Source or Destination ring) + * + * Return: 0 on success; error on failire + */ +static inline void hal_srng_access_end_reap(void *hal_soc, void *hal_ring) +{ + struct hal_srng *srng = (struct hal_srng *)hal_ring; + + SRNG_UNLOCK(&(srng->lock)); +} + +/* TODO: Check if the following definitions is available in HW headers */ +#define WBM_IDLE_DESC_LIST 1 +#define WBM_IDLE_SCATTER_BUF_SIZE 32704 +#define NUM_MPDUS_PER_LINK_DESC 6 +#define NUM_MSDUS_PER_LINK_DESC 7 +#define REO_QUEUE_DESC_ALIGN 128 + +#define LINK_DESC_SIZE (NUM_OF_DWORDS_RX_MSDU_LINK << 2) +#define LINK_DESC_ALIGN 128 + +#define ADDRESS_MATCH_TAG_VAL 0x5 +/* Number of mpdu link pointers is 9 in case of TX_MPDU_QUEUE_HEAD and 14 in + * of TX_MPDU_QUEUE_EXT. We are defining a common average count here + */ +#define NUM_MPDU_LINKS_PER_QUEUE_DESC 12 + +/* TODO: Check with HW team on the scatter buffer size supported. As per WBM + * MLD, scatter_buffer_size in IDLE_LIST_CONTROL register is 9 bits and size + * should be specified in 16 word units. But the number of bits defined for + * this field in HW header files is 5. + */ +#define WBM_IDLE_SCATTER_BUF_NEXT_PTR_SIZE 8 + +/** + * hal_set_link_desc_addr - Setup link descriptor in a buffer_addr_info + * HW structure + * + * @desc: Descriptor entry (from WBM_IDLE_LINK ring) + * @cookie: SW cookie for the buffer/descriptor + * @link_desc_paddr: Physical address of link descriptor entry + * + */ +static inline void hal_set_link_desc_addr(void *desc, uint32_t cookie, + qdf_dma_addr_t link_desc_paddr) +{ + uint32_t *buf_addr = (uint32_t *)desc; + HAL_DESC_SET_FIELD(buf_addr, BUFFER_ADDR_INFO_0, BUFFER_ADDR_31_0, + link_desc_paddr & 0xffffffff); + HAL_DESC_SET_FIELD(buf_addr, BUFFER_ADDR_INFO_1, BUFFER_ADDR_39_32, + (uint64_t)link_desc_paddr >> 32); + HAL_DESC_SET_FIELD(buf_addr, BUFFER_ADDR_INFO_1, RETURN_BUFFER_MANAGER, + WBM_IDLE_DESC_LIST); + HAL_DESC_SET_FIELD(buf_addr, BUFFER_ADDR_INFO_1, SW_BUFFER_COOKIE, + cookie); +} + +/** + * hal_idle_list_scatter_buf_size - Get the size of each scatter buffer + * in an idle list + * + * @hal_soc: Opaque HAL SOC handle + * + */ +static inline uint32_t hal_idle_list_scatter_buf_size(void *hal_soc) +{ + return WBM_IDLE_SCATTER_BUF_SIZE; +} + +/** + * hal_get_link_desc_size - Get the size of each link descriptor + * + * @hal_soc: Opaque HAL SOC handle + * + */ +static inline uint32_t hal_get_link_desc_size(void *hal_soc) +{ + return LINK_DESC_SIZE; +} + +/** + * hal_get_link_desc_align - Get the required start address alignment for + * link descriptors + * + * @hal_soc: Opaque HAL SOC handle + * + */ +static inline uint32_t hal_get_link_desc_align(void *hal_soc) +{ + return LINK_DESC_ALIGN; +} + +/** + * hal_num_mpdus_per_link_desc - Get number of mpdus each link desc can hold + * + * @hal_soc: Opaque HAL SOC handle + * + */ +static inline uint32_t hal_num_mpdus_per_link_desc(void *hal_soc) +{ + return NUM_MPDUS_PER_LINK_DESC; +} + +/** + * hal_num_msdus_per_link_desc - Get number of msdus each link desc can hold + * + * @hal_soc: Opaque HAL SOC handle + * + */ +static inline uint32_t hal_num_msdus_per_link_desc(void *hal_soc) +{ + return NUM_MSDUS_PER_LINK_DESC; +} + +/** + * hal_num_mpdu_links_per_queue_desc - Get number of mpdu links each queue + * descriptor can hold + * + * @hal_soc: Opaque HAL SOC handle + * + */ +static inline uint32_t hal_num_mpdu_links_per_queue_desc(void *hal_soc) +{ + return NUM_MPDU_LINKS_PER_QUEUE_DESC; +} + +/** + * hal_idle_list_scatter_buf_num_entries - Get the number of link desc entries + * that the given buffer size + * + * @hal_soc: Opaque HAL SOC handle + * @scatter_buf_size: Size of scatter buffer + * + */ +static inline uint32_t hal_idle_scatter_buf_num_entries(void *hal_soc, + uint32_t scatter_buf_size) +{ + return (scatter_buf_size - WBM_IDLE_SCATTER_BUF_NEXT_PTR_SIZE) / + hal_srng_get_entrysize(hal_soc, WBM_IDLE_LINK); +} + +/** + * hal_idle_list_num_scatter_bufs - Get the number of sctater buffer + * each given buffer size + * + * @hal_soc: Opaque HAL SOC handle + * @total_mem: size of memory to be scattered + * @scatter_buf_size: Size of scatter buffer + * + */ +static inline uint32_t hal_idle_list_num_scatter_bufs(void *hal_soc, + uint32_t total_mem, uint32_t scatter_buf_size) +{ + uint8_t rem = (total_mem % (scatter_buf_size - + WBM_IDLE_SCATTER_BUF_NEXT_PTR_SIZE)) ? 1 : 0; + + uint32_t num_scatter_bufs = (total_mem / (scatter_buf_size - + WBM_IDLE_SCATTER_BUF_NEXT_PTR_SIZE)) + rem; + + return num_scatter_bufs; +} + +/** + * hal_idle_scatter_buf_setup - Setup scattered idle list using the buffer list + * provided + * + * @hal_soc: Opaque HAL SOC handle + * @idle_scatter_bufs_base_paddr: Array of physical base addresses + * @idle_scatter_bufs_base_vaddr: Array of virtual base addresses + * @num_scatter_bufs: Number of scatter buffers in the above lists + * @scatter_buf_size: Size of each scatter buffer + * @last_buf_end_offset: Offset to the last entry + * @num_entries: Total entries of all scatter bufs + * + */ +extern void hal_setup_link_idle_list(void *hal_soc, + qdf_dma_addr_t scatter_bufs_base_paddr[], + void *scatter_bufs_base_vaddr[], uint32_t num_scatter_bufs, + uint32_t scatter_buf_size, uint32_t last_buf_end_offset, + uint32_t num_entries); + +/* REO parameters to be passed to hal_reo_setup */ +struct hal_reo_params { + /** rx hash steering enabled or disabled */ + bool rx_hash_enabled; + /** reo remap 1 register */ + uint32_t remap1; + /** reo remap 2 register */ + uint32_t remap2; + /** fragment destination ring */ + uint8_t frag_dst_ring; + /** padding */ + uint8_t padding[3]; +}; + +/** + * hal_reo_setup - Initialize HW REO block + * + * @hal_soc: Opaque HAL SOC handle + * @reo_params: parameters needed by HAL for REO config + */ +extern void hal_reo_setup(void *hal_soc, + struct hal_reo_params *reo_params); + +enum hal_pn_type { + HAL_PN_NONE, + HAL_PN_WPA, + HAL_PN_WAPI_EVEN, + HAL_PN_WAPI_UNEVEN, +}; + +#define HAL_RX_MAX_BA_WINDOW 256 +/** + * hal_get_reo_qdesc_size - Get size of reo queue descriptor + * + * @hal_soc: Opaque HAL SOC handle + * @ba_window_size: BlockAck window size + * + */ +static inline uint32_t hal_get_reo_qdesc_size(void *hal_soc, + uint32_t ba_window_size) +{ + if (ba_window_size <= 1) + return sizeof(struct rx_reo_queue); + + if (ba_window_size <= 105) + return sizeof(struct rx_reo_queue) + + sizeof(struct rx_reo_queue_ext); + + if (ba_window_size <= 210) + return sizeof(struct rx_reo_queue) + + (2 * sizeof(struct rx_reo_queue_ext)); + + return sizeof(struct rx_reo_queue) + + (3 * sizeof(struct rx_reo_queue_ext)); +} + +/** + * hal_get_reo_qdesc_align - Get start address alignment for reo + * queue descriptors + * + * @hal_soc: Opaque HAL SOC handle + * + */ +static inline uint32_t hal_get_reo_qdesc_align(void *hal_soc) +{ + return REO_QUEUE_DESC_ALIGN; +} + +/** + * hal_reo_qdesc_setup - Setup HW REO queue descriptor + * + * @hal_soc: Opaque HAL SOC handle + * @ba_window_size: BlockAck window size + * @start_seq: Starting sequence number + * @hw_qdesc_vaddr: Virtual address of REO queue descriptor memory + * @hw_qdesc_paddr: Physical address of REO queue descriptor memory + * @pn_type: PN type (one of the types defined in 'enum hal_pn_type') + * + */ +extern void hal_reo_qdesc_setup(void *hal_soc, int tid, uint32_t ba_window_size, + uint32_t start_seq, void *hw_qdesc_vaddr, qdf_dma_addr_t hw_qdesc_paddr, + int pn_type); + +/** + * hal_srng_get_hp_addr - Get head pointer physical address + * + * @hal_soc: Opaque HAL SOC handle + * @hal_ring: Ring pointer (Source or Destination ring) + * + */ +static inline qdf_dma_addr_t hal_srng_get_hp_addr(void *hal_soc, void *hal_ring) +{ + struct hal_srng *srng = (struct hal_srng *)hal_ring; + struct hal_soc *hal = (struct hal_soc *)hal_soc; + + if (srng->ring_dir == HAL_SRNG_SRC_RING) { + return hal->shadow_wrptr_mem_paddr + + ((unsigned long)(srng->u.src_ring.hp_addr) - + (unsigned long)(hal->shadow_wrptr_mem_vaddr)); + } else { + return hal->shadow_rdptr_mem_paddr + + ((unsigned long)(srng->u.dst_ring.hp_addr) - + (unsigned long)(hal->shadow_rdptr_mem_vaddr)); + } +} + +/** + * hal_srng_get_tp_addr - Get tail pointer physical address + * + * @hal_soc: Opaque HAL SOC handle + * @hal_ring: Ring pointer (Source or Destination ring) + * + */ +static inline qdf_dma_addr_t hal_srng_get_tp_addr(void *hal_soc, void *hal_ring) +{ + struct hal_srng *srng = (struct hal_srng *)hal_ring; + struct hal_soc *hal = (struct hal_soc *)hal_soc; + + if (srng->ring_dir == HAL_SRNG_SRC_RING) { + return hal->shadow_rdptr_mem_paddr + + ((unsigned long)(srng->u.src_ring.tp_addr) - + (unsigned long)(hal->shadow_rdptr_mem_vaddr)); + } else { + return hal->shadow_wrptr_mem_paddr + + ((unsigned long)(srng->u.dst_ring.tp_addr) - + (unsigned long)(hal->shadow_wrptr_mem_vaddr)); + } +} + +/** + * hal_get_srng_params - Retrieve SRNG parameters for a given ring from HAL + * + * @hal_soc: Opaque HAL SOC handle + * @hal_ring: Ring pointer (Source or Destination ring) + * @ring_params: SRNG parameters will be returned through this structure + */ +extern void hal_get_srng_params(void *hal_soc, void *hal_ring, + struct hal_srng_params *ring_params); + +/** + * hal_mem_info - Retrieve hal memory base address + * + * @hal_soc: Opaque HAL SOC handle + * @mem: pointer to structure to be updated with hal mem info + */ +extern void hal_get_meminfo(void *hal_soc,struct hal_mem_info *mem ); +#endif /* _HAL_APIH_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/hal_api_mon.h b/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/hal_api_mon.h new file mode 100644 index 0000000000000000000000000000000000000000..0d8c06d758ac08380989f1021b2374e3cee45c03 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/hal_api_mon.h @@ -0,0 +1,1400 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _HAL_API_MON_H_ +#define _HAL_API_MON_H_ + +#include "qdf_types.h" +#include "hal_internal.h" + +#define HAL_RX_OFFSET(block, field) block##_##field##_OFFSET +#define HAL_RX_LSB(block, field) block##_##field##_LSB +#define HAL_RX_MASk(block, field) block##_##field##_MASK + +#define HAL_RX_GET(_ptr, block, field) \ + (((*((volatile uint32_t *)_ptr + (HAL_RX_OFFSET(block, field)>>2))) & \ + HAL_RX_MASk(block, field)) >> \ + HAL_RX_LSB(block, field)) + +#define HAL_RX_PHY_DATA_RADAR 0x01 +#define HAL_SU_MU_CODING_LDPC 0x01 + +#define HAL_RX_FCS_LEN (4) +#define KEY_EXTIV 0x20 + +#define HAL_RX_USER_TLV32_TYPE_OFFSET 0x00000000 +#define HAL_RX_USER_TLV32_TYPE_LSB 1 +#define HAL_RX_USER_TLV32_TYPE_MASK 0x000003FE + +#define HAL_RX_USER_TLV32_LEN_OFFSET 0x00000000 +#define HAL_RX_USER_TLV32_LEN_LSB 10 +#define HAL_RX_USER_TLV32_LEN_MASK 0x003FFC00 + +#define HAL_RX_USER_TLV32_USERID_OFFSET 0x00000000 +#define HAL_RX_USER_TLV32_USERID_LSB 26 +#define HAL_RX_USER_TLV32_USERID_MASK 0xFC000000 + +#define HAL_ALIGN(x, a) HAL_ALIGN_MASK(x, (a)-1) +#define HAL_ALIGN_MASK(x, mask) (typeof(x))(((uint32)(x) + (mask)) & ~(mask)) + +#define HAL_RX_TLV32_HDR_SIZE 4 + +#define HAL_RX_GET_USER_TLV32_TYPE(rx_status_tlv_ptr) \ + ((*((uint32_t *)(rx_status_tlv_ptr)) & \ + HAL_RX_USER_TLV32_TYPE_MASK) >> \ + HAL_RX_USER_TLV32_TYPE_LSB) + +#define HAL_RX_GET_USER_TLV32_LEN(rx_status_tlv_ptr) \ + ((*((uint32_t *)(rx_status_tlv_ptr)) & \ + HAL_RX_USER_TLV32_LEN_MASK) >> \ + HAL_RX_USER_TLV32_LEN_LSB) + +#define HAL_RX_GET_USER_TLV32_USERID(rx_status_tlv_ptr) \ + ((*((uint32_t *)(rx_status_tlv_ptr)) & \ + HAL_RX_USER_TLV32_USERID_MASK) >> \ + HAL_RX_USER_TLV32_USERID_LSB) + +#define HAL_TLV_STATUS_PPDU_NOT_DONE 0 +#define HAL_TLV_STATUS_PPDU_DONE 1 +#define HAL_TLV_STATUS_BUF_DONE 2 + +#define HAL_MAX_UL_MU_USERS 8 + +#define HAL_RX_PKT_TYPE_11A 0 +#define HAL_RX_PKT_TYPE_11B 1 +#define HAL_RX_PKT_TYPE_11N 2 +#define HAL_RX_PKT_TYPE_11AC 3 +#define HAL_RX_PKT_TYPE_11AX 4 + +#define HAL_RX_RECEPTION_TYPE_SU 0 +#define HAL_RX_RECEPTION_TYPE_MU_MIMO 1 +#define HAL_RX_RECEPTION_TYPE_OFDMA 2 +#define HAL_RX_RECEPTION_TYPE_MU_OFDMA 3 + +/* Multiply rate by 2 to avoid float point + * and get rate in units of 500kbps + */ +#define HAL_11B_RATE_0MCS 11*2 +#define HAL_11B_RATE_1MCS 5.5*2 +#define HAL_11B_RATE_2MCS 2*2 +#define HAL_11B_RATE_3MCS 1*2 +#define HAL_11B_RATE_4MCS 11*2 +#define HAL_11B_RATE_5MCS 5.5*2 +#define HAL_11B_RATE_6MCS 2*2 + +#define HAL_11A_RATE_0MCS 48*2 +#define HAL_11A_RATE_1MCS 24*2 +#define HAL_11A_RATE_2MCS 12*2 +#define HAL_11A_RATE_3MCS 6*2 +#define HAL_11A_RATE_4MCS 54*2 +#define HAL_11A_RATE_5MCS 36*2 +#define HAL_11A_RATE_6MCS 18*2 +#define HAL_11A_RATE_7MCS 9*2 + +#define HE_GI_0_8 0 +#define HE_GI_1_6 1 +#define HE_GI_3_2 2 + +#define HT_SGI_PRESENT 0x80 + +#define HE_LTF_1_X 0 +#define HE_LTF_2_X 1 +#define HE_LTF_4_X 2 +#define VHT_SIG_SU_NSS_MASK 0x7 + +#define HAL_TID_INVALID 31 +#define HAL_AST_IDX_INVALID 0xFFFF + +#ifdef GET_MSDU_AGGREGATION +#define HAL_RX_GET_MSDU_AGGREGATION(rx_desc, rs)\ +{\ + struct rx_msdu_end *rx_msdu_end;\ + bool first_msdu, last_msdu; \ + rx_msdu_end = &rx_desc->msdu_end_tlv.rx_msdu_end;\ + first_msdu = HAL_RX_GET(rx_msdu_end, RX_MSDU_END_5, FIRST_MSDU);\ + last_msdu = HAL_RX_GET(rx_msdu_end, RX_MSDU_END_5, LAST_MSDU);\ + if (first_msdu && last_msdu)\ + rs->rs_flags &= (~IEEE80211_AMSDU_FLAG);\ + else\ + rs->rs_flags |= (IEEE80211_AMSDU_FLAG); \ +} \ + +#else +#define HAL_RX_GET_MSDU_AGGREGATION(rx_desc, rs) +#endif + +enum { + HAL_HW_RX_DECAP_FORMAT_RAW = 0, + HAL_HW_RX_DECAP_FORMAT_NWIFI, + HAL_HW_RX_DECAP_FORMAT_ETH2, + HAL_HW_RX_DECAP_FORMAT_8023, +}; + +enum { + DP_PPDU_STATUS_START, + DP_PPDU_STATUS_DONE, +}; + +static inline +uint32_t HAL_RX_MON_HW_RX_DESC_SIZE(void) +{ + /* return the HW_RX_DESC size */ + return sizeof(struct rx_pkt_tlvs); +} + +static inline +uint8_t *HAL_RX_MON_DEST_GET_DESC(uint8_t *data) +{ + return data; +} + +static inline +uint32_t HAL_RX_DESC_GET_MPDU_LENGTH_ERR(void *hw_desc_addr) +{ + struct rx_attention *rx_attn; + struct rx_pkt_tlvs *rx_desc = (struct rx_pkt_tlvs *)hw_desc_addr; + + rx_attn = &rx_desc->attn_tlv.rx_attn; + + return HAL_RX_GET(rx_attn, RX_ATTENTION_1, MPDU_LENGTH_ERR); +} + +static inline +uint32_t HAL_RX_DESC_GET_MPDU_FCS_ERR(void *hw_desc_addr) +{ + struct rx_attention *rx_attn; + struct rx_pkt_tlvs *rx_desc = (struct rx_pkt_tlvs *)hw_desc_addr; + + rx_attn = &rx_desc->attn_tlv.rx_attn; + + return HAL_RX_GET(rx_attn, RX_ATTENTION_1, FCS_ERR); +} + +static inline +uint32_t +HAL_RX_DESC_GET_DECAP_FORMAT(void *hw_desc_addr) { + struct rx_msdu_start *rx_msdu_start; + struct rx_pkt_tlvs *rx_desc = (struct rx_pkt_tlvs *)hw_desc_addr; + + rx_msdu_start = &rx_desc->msdu_start_tlv.rx_msdu_start; + + return HAL_RX_GET(rx_msdu_start, RX_MSDU_START_2, DECAP_FORMAT); +} + +static inline +uint8_t * +HAL_RX_DESC_GET_80211_HDR(void *hw_desc_addr) { + uint8_t *rx_pkt_hdr; + struct rx_pkt_tlvs *rx_desc = (struct rx_pkt_tlvs *)hw_desc_addr; + + rx_pkt_hdr = &rx_desc->pkt_hdr_tlv.rx_pkt_hdr[0]; + + return rx_pkt_hdr; +} + +static inline +uint32_t HAL_RX_HW_DESC_GET_PPDUID_GET(void *hw_desc_addr) +{ + struct rx_mpdu_info *rx_mpdu_info; + struct rx_pkt_tlvs *rx_desc = (struct rx_pkt_tlvs *)hw_desc_addr; + + rx_mpdu_info = + &rx_desc->mpdu_start_tlv.rx_mpdu_start.rx_mpdu_info_details; + + return HAL_RX_GET(rx_mpdu_info, RX_MPDU_INFO_0, PHY_PPDU_ID); +} + +/* TODO: Move all Rx descriptor functions to hal_rx.h to avoid duplication */ +static inline +uint32_t hal_rx_desc_is_first_msdu(void *hw_desc_addr) +{ + struct rx_pkt_tlvs *rx_tlvs = (struct rx_pkt_tlvs *)hw_desc_addr; + struct rx_msdu_end *msdu_end = &rx_tlvs->msdu_end_tlv.rx_msdu_end; + + return HAL_RX_GET(msdu_end, RX_MSDU_END_5, FIRST_MSDU); +} + +#define HAL_RX_BUFFER_ADDR_31_0_GET(buff_addr_info) \ + (_HAL_MS((*_OFFSET_TO_WORD_PTR(buff_addr_info, \ + BUFFER_ADDR_INFO_0_BUFFER_ADDR_31_0_OFFSET)), \ + BUFFER_ADDR_INFO_0_BUFFER_ADDR_31_0_MASK, \ + BUFFER_ADDR_INFO_0_BUFFER_ADDR_31_0_LSB)) + +#define HAL_RX_REO_ENT_BUFFER_ADDR_39_32_GET(reo_ent_desc) \ + (HAL_RX_BUFFER_ADDR_39_32_GET(& \ + (((struct reo_entrance_ring *)reo_ent_desc) \ + ->reo_level_mpdu_frame_info.msdu_link_desc_addr_info))) + +#define HAL_RX_REO_ENT_BUFFER_ADDR_31_0_GET(reo_ent_desc) \ + (HAL_RX_BUFFER_ADDR_31_0_GET(& \ + (((struct reo_entrance_ring *)reo_ent_desc) \ + ->reo_level_mpdu_frame_info.msdu_link_desc_addr_info))) + +#define HAL_RX_REO_ENT_BUF_COOKIE_GET(reo_ent_desc) \ + (HAL_RX_BUF_COOKIE_GET(& \ + (((struct reo_entrance_ring *)reo_ent_desc) \ + ->reo_level_mpdu_frame_info.msdu_link_desc_addr_info))) + +/** + * hal_rx_reo_ent_buf_paddr_get: Gets the physical address and + * cookie from the REO entrance ring element + * + * @ hal_rx_desc_cookie: Opaque cookie pointer used by HAL to get to + * the current descriptor + * @ buf_info: structure to return the buffer information + * @ msdu_cnt: pointer to msdu count in MPDU + * Return: void + */ +static inline +void hal_rx_reo_ent_buf_paddr_get(void *rx_desc, + struct hal_buf_info *buf_info, + void **pp_buf_addr_info, + uint32_t *msdu_cnt +) +{ + struct reo_entrance_ring *reo_ent_ring = + (struct reo_entrance_ring *)rx_desc; + struct buffer_addr_info *buf_addr_info; + struct rx_mpdu_desc_info *rx_mpdu_desc_info_details; + uint32_t loop_cnt; + + rx_mpdu_desc_info_details = + &reo_ent_ring->reo_level_mpdu_frame_info.rx_mpdu_desc_info_details; + + *msdu_cnt = HAL_RX_GET(rx_mpdu_desc_info_details, + RX_MPDU_DESC_INFO_0, MSDU_COUNT); + + loop_cnt = HAL_RX_GET(reo_ent_ring, REO_ENTRANCE_RING_7, LOOPING_COUNT); + + buf_addr_info = + &reo_ent_ring->reo_level_mpdu_frame_info.msdu_link_desc_addr_info; + + buf_info->paddr = + (HAL_RX_BUFFER_ADDR_31_0_GET(buf_addr_info) | + ((uint64_t) + (HAL_RX_BUFFER_ADDR_39_32_GET(buf_addr_info)) << 32)); + + buf_info->sw_cookie = HAL_RX_BUF_COOKIE_GET(buf_addr_info); + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "[%s][%d] ReoAddr=%pK, addrInfo=%pK, paddr=0x%llx, loopcnt=%d\n", + __func__, __LINE__, reo_ent_ring, buf_addr_info, + (unsigned long long)buf_info->paddr, loop_cnt); + + *pp_buf_addr_info = (void *)buf_addr_info; +} + +static inline +void hal_rx_mon_next_link_desc_get(void *rx_msdu_link_desc, + struct hal_buf_info *buf_info, void **pp_buf_addr_info) +{ + struct rx_msdu_link *msdu_link = + (struct rx_msdu_link *)rx_msdu_link_desc; + struct buffer_addr_info *buf_addr_info; + + buf_addr_info = &msdu_link->next_msdu_link_desc_addr_info; + + buf_info->paddr = + (HAL_RX_BUFFER_ADDR_31_0_GET(buf_addr_info) | + ((uint64_t) + (HAL_RX_BUFFER_ADDR_39_32_GET(buf_addr_info)) << 32)); + + buf_info->sw_cookie = HAL_RX_BUF_COOKIE_GET(buf_addr_info); + + *pp_buf_addr_info = (void *)buf_addr_info; +} + +/** + * hal_rx_msdu_link_desc_set: Retrieves MSDU Link Descriptor to WBM + * + * @ soc : HAL version of the SOC pointer + * @ src_srng_desc : void pointer to the WBM Release Ring descriptor + * @ buf_addr_info : void pointer to the buffer_addr_info + * + * Return: void + */ + +static inline void hal_rx_mon_msdu_link_desc_set(struct hal_soc *soc, + void *src_srng_desc, void *buf_addr_info) +{ + struct buffer_addr_info *wbm_srng_buffer_addr_info = + (struct buffer_addr_info *)src_srng_desc; + uint64_t paddr; + struct buffer_addr_info *p_buffer_addr_info = + (struct buffer_addr_info *)buf_addr_info; + + paddr = + (HAL_RX_BUFFER_ADDR_31_0_GET(buf_addr_info) | + ((uint64_t) + (HAL_RX_BUFFER_ADDR_39_32_GET(buf_addr_info)) << 32)); + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "[%s][%d] src_srng_desc=%pK, buf_addr=0x%llx, cookie=0x%llx\n", + __func__, __LINE__, src_srng_desc, (unsigned long long)paddr, + (unsigned long long)p_buffer_addr_info->sw_buffer_cookie); + + /* Structure copy !!! */ + *wbm_srng_buffer_addr_info = + *((struct buffer_addr_info *)buf_addr_info); +} + +static inline +uint32 hal_get_rx_msdu_link_desc_size(void) +{ + return sizeof(struct rx_msdu_link); +} + +enum { + HAL_PKT_TYPE_OFDM = 0, + HAL_PKT_TYPE_CCK, + HAL_PKT_TYPE_HT, + HAL_PKT_TYPE_VHT, + HAL_PKT_TYPE_HE, +}; + +enum { + HAL_SGI_0_8_US, + HAL_SGI_0_4_US, + HAL_SGI_1_6_US, + HAL_SGI_3_2_US, +}; + +enum { + HAL_FULL_RX_BW_20, + HAL_FULL_RX_BW_40, + HAL_FULL_RX_BW_80, + HAL_FULL_RX_BW_160, +}; + +enum { + HAL_RX_TYPE_SU, + HAL_RX_TYPE_MU_MIMO, + HAL_RX_TYPE_MU_OFDMA, + HAL_RX_TYPE_MU_OFDMA_MIMO, +}; + +/** + * enum + * @HAL_RX_MON_PPDU_START: PPDU start TLV is decoded in HAL + * @HAL_RX_MON_PPDU_END: PPDU end TLV is decided in HAL + */ +enum { + HAL_RX_MON_PPDU_START = 0, + HAL_RX_MON_PPDU_END, +}; + +/** + * hal_rx_mon_hw_desc_get_mpdu_status: Retrieve MPDU status + * + * @ hw_desc_addr: Start address of Rx HW TLVs + * @ rs: Status for monitor mode + * + * Return: void + */ +static inline +void hal_rx_mon_hw_desc_get_mpdu_status(void *hw_desc_addr, + struct mon_rx_status *rs) +{ + struct rx_msdu_start *rx_msdu_start; + struct rx_pkt_tlvs *rx_desc = (struct rx_pkt_tlvs *)hw_desc_addr; + uint32_t reg_value; + static uint32_t sgi_hw_to_cdp[] = { + CDP_SGI_0_8_US, + CDP_SGI_0_4_US, + CDP_SGI_1_6_US, + CDP_SGI_3_2_US, + }; + + rx_msdu_start = &rx_desc->msdu_start_tlv.rx_msdu_start; + HAL_RX_GET_MSDU_AGGREGATION(rx_desc, rs); + + rs->ant_signal_db = HAL_RX_GET(rx_msdu_start, + RX_MSDU_START_5, USER_RSSI); + rs->is_stbc = HAL_RX_GET(rx_msdu_start, RX_MSDU_START_5, STBC); + + reg_value = HAL_RX_GET(rx_msdu_start, RX_MSDU_START_5, SGI); + rs->sgi = sgi_hw_to_cdp[reg_value]; +#if !defined(QCA_WIFI_QCA6290_11AX) + rs->nr_ant = HAL_RX_GET(rx_msdu_start, RX_MSDU_START_5, NSS); +#endif + + reg_value = HAL_RX_GET(rx_msdu_start, RX_MSDU_START_5, RECEPTION_TYPE); + rs->beamformed = (reg_value == HAL_RX_RECEPTION_TYPE_MU_MIMO) ? 1 : 0; + /* TODO: rs->beamformed should be set for SU beamforming also */ + hal_rx_dump_pkt_tlvs((uint8_t *)rx_desc, QDF_TRACE_LEVEL_DEBUG); +} + +struct hal_rx_ppdu_user_info { + +}; + +struct hal_rx_ppdu_common_info { + uint32_t ppdu_id; + uint32_t last_ppdu_id; + uint32_t ppdu_timestamp; + uint32_t mpdu_cnt_fcs_ok; + uint32_t mpdu_cnt_fcs_err; +}; + +struct hal_rx_msdu_payload_info { + uint8_t *first_msdu_payload; + uint32_t payload_len; +}; + +struct hal_rx_ppdu_info { + struct hal_rx_ppdu_common_info com_info; + struct hal_rx_ppdu_user_info user_info[HAL_MAX_UL_MU_USERS]; + struct mon_rx_status rx_status; + struct hal_rx_msdu_payload_info msdu_info; + /* status ring PPDU start and end state */ + uint32_t rx_state; +}; + +static inline uint32_t +hal_get_rx_status_buf_size(void) { + /* RX status buffer size is hard coded for now */ + return 2048; +} + +static inline uint8_t* +hal_rx_status_get_next_tlv(uint8_t *rx_tlv) { + uint32_t tlv_len, tlv_tag; + + tlv_len = HAL_RX_GET_USER_TLV32_LEN(rx_tlv); + tlv_tag = HAL_RX_GET_USER_TLV32_TYPE(rx_tlv); + + /* The actual length of PPDU_END is the combined length of many PHY + * TLVs that follow. Skip the TLV header and + * rx_rxpcu_classification_overview that follows the header to get to + * next TLV. + */ + if (tlv_tag == WIFIRX_PPDU_END_E) + tlv_len = sizeof(struct rx_rxpcu_classification_overview); + + return (uint8_t *)(((unsigned long)(rx_tlv + tlv_len + + HAL_RX_TLV32_HDR_SIZE + 3)) & (~((unsigned long)3))); +} + +#ifdef QCA_WIFI_QCA6290_11AX +/** + * hal_rx_proc_phyrx_other_receive_info_tlv() - process other receive info TLV + * @rx_tlv_hdr: pointer to TLV header + * @ppdu_info: pointer to ppdu_info + * + * Return: None + */ +static void hal_rx_proc_phyrx_other_receive_info_tlv(void *rx_tlv_hdr, + struct hal_rx_ppdu_info *ppdu_info) +{ + uint32_t tlv_tag, tlv_len; + uint32_t temp_len, other_tlv_len, other_tlv_tag; + void *rx_tlv = (uint8_t *)rx_tlv_hdr + HAL_RX_TLV32_HDR_SIZE; + void *other_tlv_hdr = NULL; + void *other_tlv = NULL; + uint32_t ru_details_channel_0; + + tlv_tag = HAL_RX_GET_USER_TLV32_TYPE(rx_tlv_hdr); + tlv_len = HAL_RX_GET_USER_TLV32_LEN(rx_tlv_hdr); + temp_len = 0; + + other_tlv_hdr = rx_tlv + HAL_RX_TLV32_HDR_SIZE; + + other_tlv_tag = HAL_RX_GET_USER_TLV32_TYPE(other_tlv_hdr); + other_tlv_len = HAL_RX_GET_USER_TLV32_LEN(other_tlv_hdr); + temp_len += other_tlv_len; + other_tlv = other_tlv_hdr + HAL_RX_TLV32_HDR_SIZE; + + switch (other_tlv_tag) { + case WIFIPHYRX_OTHER_RECEIVE_INFO_RU_DETAILS_E: + ru_details_channel_0 = + HAL_RX_GET(other_tlv, + PHYRX_OTHER_RECEIVE_INFO_RU_DETAILS_0, + RU_DETAILS_CHANNEL_0); + + qdf_mem_copy(ppdu_info->rx_status.he_RU, + &ru_details_channel_0, + sizeof(ppdu_info->rx_status.he_RU)); + + if (ppdu_info->rx_status.bw >= HAL_FULL_RX_BW_20) + ppdu_info->rx_status.he_sig_b_common_known |= + QDF_MON_STATUS_HE_SIG_B_COMMON_KNOWN_RU0; + + if (ppdu_info->rx_status.bw >= HAL_FULL_RX_BW_40) + ppdu_info->rx_status.he_sig_b_common_known |= + QDF_MON_STATUS_HE_SIG_B_COMMON_KNOWN_RU1; + + if (ppdu_info->rx_status.bw >= HAL_FULL_RX_BW_80) + ppdu_info->rx_status.he_sig_b_common_known |= + QDF_MON_STATUS_HE_SIG_B_COMMON_KNOWN_RU2; + + if (ppdu_info->rx_status.bw >= HAL_FULL_RX_BW_160) + ppdu_info->rx_status.he_sig_b_common_known |= + QDF_MON_STATUS_HE_SIG_B_COMMON_KNOWN_RU3; + break; + default: + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "%s unhandled TLV type: %d, TLV len:%d", + __func__, other_tlv_tag, other_tlv_len); + break; + } + +} +#else +static inline void +hal_rx_proc_phyrx_other_receive_info_tlv(void *rx_tlv_hdr, + struct hal_rx_ppdu_info *ppdu_info) +{ +} +#endif /* QCA_WIFI_QCA6290_11AX */ + +/** + * hal_rx_status_get_tlv_info() - process receive info TLV + * @rx_tlv_hdr: pointer to TLV header + * @ppdu_info: pointer to ppdu_info + * + * Return: HAL_TLV_STATUS_PPDU_NOT_DONE or HAL_TLV_STATUS_PPDU_DONE from tlv + */ +static inline uint32_t +hal_rx_status_get_tlv_info(void *rx_tlv_hdr, struct hal_rx_ppdu_info *ppdu_info) +{ + uint32_t tlv_tag, user_id, tlv_len, value; + uint8_t group_id = 0; + uint8_t he_dcm = 0; + uint8_t he_stbc = 0; + uint16_t he_gi = 0; + uint16_t he_ltf = 0; + void *rx_tlv; + bool unhandled = false; + + + tlv_tag = HAL_RX_GET_USER_TLV32_TYPE(rx_tlv_hdr); + user_id = HAL_RX_GET_USER_TLV32_USERID(rx_tlv_hdr); + tlv_len = HAL_RX_GET_USER_TLV32_LEN(rx_tlv_hdr); + + rx_tlv = (uint8_t *)rx_tlv_hdr + HAL_RX_TLV32_HDR_SIZE; + switch (tlv_tag) { + + case WIFIRX_PPDU_START_E: + ppdu_info->com_info.ppdu_id = + HAL_RX_GET(rx_tlv, RX_PPDU_START_0, + PHY_PPDU_ID); + /* channel number is set in PHY meta data */ + ppdu_info->rx_status.chan_num = + HAL_RX_GET(rx_tlv, RX_PPDU_START_1, + SW_PHY_META_DATA); + ppdu_info->com_info.ppdu_timestamp = + HAL_RX_GET(rx_tlv, RX_PPDU_START_2, + PPDU_START_TIMESTAMP); + ppdu_info->rx_state = HAL_RX_MON_PPDU_START; + break; + + case WIFIRX_PPDU_START_USER_INFO_E: + break; + + case WIFIRX_PPDU_END_E: + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "[%s][%d] ppdu_end_e len=%d", + __func__, __LINE__, tlv_len); + /* This is followed by sub-TLVs of PPDU_END */ + ppdu_info->rx_state = HAL_RX_MON_PPDU_END; + break; + + case WIFIRXPCU_PPDU_END_INFO_E: + ppdu_info->rx_status.tsft = + HAL_RX_GET(rx_tlv, RXPCU_PPDU_END_INFO_1, + WB_TIMESTAMP_UPPER_32); + ppdu_info->rx_status.tsft = (ppdu_info->rx_status.tsft << 32) | + HAL_RX_GET(rx_tlv, RXPCU_PPDU_END_INFO_0, + WB_TIMESTAMP_LOWER_32); + ppdu_info->rx_status.duration = + HAL_RX_GET(rx_tlv, RXPCU_PPDU_END_INFO_8, + RX_PPDU_DURATION); + break; + + case WIFIRX_PPDU_END_USER_STATS_E: + { + unsigned long tid = 0; + uint16_t seq = 0; + + ppdu_info->rx_status.ast_index = + HAL_RX_GET(rx_tlv, RX_PPDU_END_USER_STATS_4, + AST_INDEX); + + tid = HAL_RX_GET(rx_tlv, RX_PPDU_END_USER_STATS_12, + RECEIVED_QOS_DATA_TID_BITMAP); + ppdu_info->rx_status.tid = qdf_find_first_bit(&tid, sizeof(tid)*8); + + if (ppdu_info->rx_status.tid == (sizeof(tid) * 8)) + ppdu_info->rx_status.tid = HAL_TID_INVALID; + + ppdu_info->rx_status.tcp_msdu_count = + HAL_RX_GET(rx_tlv, RX_PPDU_END_USER_STATS_9, + TCP_MSDU_COUNT) + + HAL_RX_GET(rx_tlv, RX_PPDU_END_USER_STATS_10, + TCP_ACK_MSDU_COUNT); + ppdu_info->rx_status.udp_msdu_count = + HAL_RX_GET(rx_tlv, RX_PPDU_END_USER_STATS_9, + UDP_MSDU_COUNT); + ppdu_info->rx_status.other_msdu_count = + HAL_RX_GET(rx_tlv, RX_PPDU_END_USER_STATS_10, + OTHER_MSDU_COUNT); + + ppdu_info->rx_status.frame_control_info_valid = + HAL_RX_GET(rx_tlv, RX_PPDU_END_USER_STATS_3, + DATA_SEQUENCE_CONTROL_INFO_VALID); + + seq = HAL_RX_GET(rx_tlv, RX_PPDU_END_USER_STATS_5, + FIRST_DATA_SEQ_CTRL); + if (ppdu_info->rx_status.frame_control_info_valid) + ppdu_info->rx_status.first_data_seq_ctrl = seq; + + ppdu_info->rx_status.preamble_type = + HAL_RX_GET(rx_tlv, RX_PPDU_END_USER_STATS_3, + HT_CONTROL_FIELD_PKT_TYPE); + switch (ppdu_info->rx_status.preamble_type) { + case HAL_RX_PKT_TYPE_11N: + ppdu_info->rx_status.ht_flags = 1; + ppdu_info->rx_status.rtap_flags |= HT_SGI_PRESENT; + break; + case HAL_RX_PKT_TYPE_11AC: + ppdu_info->rx_status.vht_flags = 1; + break; + case HAL_RX_PKT_TYPE_11AX: + ppdu_info->rx_status.he_flags = 1; + break; + default: + break; + } + + ppdu_info->com_info.mpdu_cnt_fcs_ok = + HAL_RX_GET(rx_tlv, RX_PPDU_END_USER_STATS_3, + MPDU_CNT_FCS_OK); + ppdu_info->com_info.mpdu_cnt_fcs_err = + HAL_RX_GET(rx_tlv, RX_PPDU_END_USER_STATS_2, + MPDU_CNT_FCS_ERR); + if ((ppdu_info->com_info.mpdu_cnt_fcs_ok | + ppdu_info->com_info.mpdu_cnt_fcs_err) > 1) + ppdu_info->rx_status.rs_flags |= IEEE80211_AMPDU_FLAG; + else + ppdu_info->rx_status.rs_flags &= + (~IEEE80211_AMPDU_FLAG); + break; + } + + case WIFIRX_PPDU_END_USER_STATS_EXT_E: + break; + + case WIFIRX_PPDU_END_STATUS_DONE_E: + return HAL_TLV_STATUS_PPDU_DONE; + + case WIFIDUMMY_E: + return HAL_TLV_STATUS_BUF_DONE; + + case WIFIPHYRX_HT_SIG_E: + { + uint8_t *ht_sig_info = (uint8_t *)rx_tlv + + HAL_RX_OFFSET(PHYRX_HT_SIG_0, + HT_SIG_INFO_PHYRX_HT_SIG_INFO_DETAILS); + value = HAL_RX_GET(ht_sig_info, HT_SIG_INFO_1, + FEC_CODING); + ppdu_info->rx_status.ldpc = (value == HAL_SU_MU_CODING_LDPC) ? + 1 : 0; + ppdu_info->rx_status.mcs = HAL_RX_GET(ht_sig_info, + HT_SIG_INFO_0, MCS); + ppdu_info->rx_status.bw = HAL_RX_GET(ht_sig_info, + HT_SIG_INFO_0, CBW); + ppdu_info->rx_status.sgi = HAL_RX_GET(ht_sig_info, + HT_SIG_INFO_1, SHORT_GI); + break; + } + + case WIFIPHYRX_L_SIG_B_E: + { + uint8_t *l_sig_b_info = (uint8_t *)rx_tlv + + HAL_RX_OFFSET(PHYRX_L_SIG_B_0, + L_SIG_B_INFO_PHYRX_L_SIG_B_INFO_DETAILS); + + value = HAL_RX_GET(l_sig_b_info, L_SIG_B_INFO_0, RATE); + switch (value) { + case 1: + ppdu_info->rx_status.rate = HAL_11B_RATE_3MCS; + break; + case 2: + ppdu_info->rx_status.rate = HAL_11B_RATE_2MCS; + break; + case 3: + ppdu_info->rx_status.rate = HAL_11B_RATE_1MCS; + break; + case 4: + ppdu_info->rx_status.rate = HAL_11B_RATE_0MCS; + break; + case 5: + ppdu_info->rx_status.rate = HAL_11B_RATE_6MCS; + break; + case 6: + ppdu_info->rx_status.rate = HAL_11B_RATE_5MCS; + break; + case 7: + ppdu_info->rx_status.rate = HAL_11B_RATE_4MCS; + break; + default: + break; + } + ppdu_info->rx_status.cck_flag = 1; + break; + } + + case WIFIPHYRX_L_SIG_A_E: + { + uint8_t *l_sig_a_info = (uint8_t *)rx_tlv + + HAL_RX_OFFSET(PHYRX_L_SIG_A_0, + L_SIG_A_INFO_PHYRX_L_SIG_A_INFO_DETAILS); + + value = HAL_RX_GET(l_sig_a_info, L_SIG_A_INFO_0, RATE); + switch (value) { + case 8: + ppdu_info->rx_status.rate = HAL_11A_RATE_0MCS; + break; + case 9: + ppdu_info->rx_status.rate = HAL_11A_RATE_1MCS; + break; + case 10: + ppdu_info->rx_status.rate = HAL_11A_RATE_2MCS; + break; + case 11: + ppdu_info->rx_status.rate = HAL_11A_RATE_3MCS; + break; + case 12: + ppdu_info->rx_status.rate = HAL_11A_RATE_4MCS; + break; + case 13: + ppdu_info->rx_status.rate = HAL_11A_RATE_5MCS; + break; + case 14: + ppdu_info->rx_status.rate = HAL_11A_RATE_6MCS; + break; + case 15: + ppdu_info->rx_status.rate = HAL_11A_RATE_7MCS; + break; + default: + break; + } + ppdu_info->rx_status.ofdm_flag = 1; + break; + } + + case WIFIPHYRX_VHT_SIG_A_E: + { + uint8_t *vht_sig_a_info = (uint8_t *)rx_tlv + + HAL_RX_OFFSET(PHYRX_VHT_SIG_A_0, + VHT_SIG_A_INFO_PHYRX_VHT_SIG_A_INFO_DETAILS); + + value = HAL_RX_GET(vht_sig_a_info, VHT_SIG_A_INFO_1, + SU_MU_CODING); + ppdu_info->rx_status.ldpc = (value == HAL_SU_MU_CODING_LDPC) ? + 1 : 0; + group_id = HAL_RX_GET(vht_sig_a_info, VHT_SIG_A_INFO_0, GROUP_ID); + ppdu_info->rx_status.vht_flag_values5 = group_id; + ppdu_info->rx_status.mcs = HAL_RX_GET(vht_sig_a_info, + VHT_SIG_A_INFO_1, MCS); + ppdu_info->rx_status.sgi = HAL_RX_GET(vht_sig_a_info, + VHT_SIG_A_INFO_1, GI_SETTING); +#if !defined(QCA_WIFI_QCA6290_11AX) + value = HAL_RX_GET(vht_sig_a_info, + VHT_SIG_A_INFO_0, N_STS); + ppdu_info->rx_status.nss = ((value & VHT_SIG_SU_NSS_MASK) + 1); +#else + ppdu_info->rx_status.nss = 0; +#endif + ppdu_info->rx_status.vht_flag_values3[0] = + (((ppdu_info->rx_status.mcs) << 4) + | ppdu_info->rx_status.nss); + ppdu_info->rx_status.bw = HAL_RX_GET(vht_sig_a_info, + VHT_SIG_A_INFO_0, BANDWIDTH); + ppdu_info->rx_status.vht_flag_values2 = + ppdu_info->rx_status.bw; + ppdu_info->rx_status.vht_flag_values4 = + HAL_RX_GET(vht_sig_a_info, + VHT_SIG_A_INFO_1, SU_MU_CODING); + + ppdu_info->rx_status.beamformed = HAL_RX_GET(vht_sig_a_info, + VHT_SIG_A_INFO_1, BEAMFORMED); + + break; + } + case WIFIPHYRX_HE_SIG_A_SU_E: + { + uint8_t *he_sig_a_su_info = (uint8_t *)rx_tlv + + HAL_RX_OFFSET(PHYRX_HE_SIG_A_SU_0, + HE_SIG_A_SU_INFO_PHYRX_HE_SIG_A_SU_INFO_DETAILS); + ppdu_info->rx_status.he_flags = 1; + value = HAL_RX_GET(he_sig_a_su_info, HE_SIG_A_SU_INFO_0, + FORMAT_INDICATION); + if (value == 0) { + ppdu_info->rx_status.he_data1 = + QDF_MON_STATUS_HE_TRIG_FORMAT_TYPE; + } else { + ppdu_info->rx_status.he_data1 = + QDF_MON_STATUS_HE_SU_FORMAT_TYPE; + } + + /* data1 */ + ppdu_info->rx_status.he_data1 |= + QDF_MON_STATUS_HE_BSS_COLOR_KNOWN | + QDF_MON_STATUS_HE_BEAM_CHANGE_KNOWN | + QDF_MON_STATUS_HE_DL_UL_KNOWN | + QDF_MON_STATUS_HE_MCS_KNOWN | + QDF_MON_STATUS_HE_DCM_KNOWN | + QDF_MON_STATUS_HE_CODING_KNOWN | + QDF_MON_STATUS_HE_LDPC_EXTRA_SYMBOL_KNOWN | + QDF_MON_STATUS_HE_STBC_KNOWN | + QDF_MON_STATUS_HE_DATA_BW_RU_KNOWN | + QDF_MON_STATUS_HE_DOPPLER_KNOWN; + + /* data2 */ + ppdu_info->rx_status.he_data2 = + QDF_MON_STATUS_HE_GI_KNOWN; + ppdu_info->rx_status.he_data2 |= + QDF_MON_STATUS_TXBF_KNOWN | + QDF_MON_STATUS_PE_DISAMBIGUITY_KNOWN | + QDF_MON_STATUS_TXOP_KNOWN | + QDF_MON_STATUS_LTF_SYMBOLS_KNOWN | + QDF_MON_STATUS_PRE_FEC_PADDING_KNOWN | + QDF_MON_STATUS_MIDABLE_PERIODICITY_KNOWN; + + /* data3 */ + value = HAL_RX_GET(he_sig_a_su_info, + HE_SIG_A_SU_INFO_0, BSS_COLOR_ID); + ppdu_info->rx_status.he_data3 = value; + value = HAL_RX_GET(he_sig_a_su_info, + HE_SIG_A_SU_INFO_0, BEAM_CHANGE); + value = value << QDF_MON_STATUS_BEAM_CHANGE_SHIFT; + ppdu_info->rx_status.he_data3 |= value; + value = HAL_RX_GET(he_sig_a_su_info, + HE_SIG_A_SU_INFO_0, DL_UL_FLAG); + value = value << QDF_MON_STATUS_DL_UL_SHIFT; + ppdu_info->rx_status.he_data3 |= value; + + value = HAL_RX_GET(he_sig_a_su_info, + HE_SIG_A_SU_INFO_0, TRANSMIT_MCS); + ppdu_info->rx_status.mcs = value; + value = value << QDF_MON_STATUS_TRANSMIT_MCS_SHIFT; + ppdu_info->rx_status.he_data3 |= value; + + value = HAL_RX_GET(he_sig_a_su_info, + HE_SIG_A_SU_INFO_0, DCM); + he_dcm = value; + value = value << QDF_MON_STATUS_DCM_SHIFT; + ppdu_info->rx_status.he_data3 |= value; + value = HAL_RX_GET(he_sig_a_su_info, + HE_SIG_A_SU_INFO_1, CODING); + value = value << QDF_MON_STATUS_CODING_SHIFT; + ppdu_info->rx_status.he_data3 |= value; + value = HAL_RX_GET(he_sig_a_su_info, + HE_SIG_A_SU_INFO_1, + LDPC_EXTRA_SYMBOL); + value = value << QDF_MON_STATUS_LDPC_EXTRA_SYMBOL_SHIFT; + ppdu_info->rx_status.he_data3 |= value; + value = HAL_RX_GET(he_sig_a_su_info, + HE_SIG_A_SU_INFO_1, STBC); + he_stbc = value; + value = value << QDF_MON_STATUS_STBC_SHIFT; + ppdu_info->rx_status.he_data3 |= value; + + /* data4 */ + value = HAL_RX_GET(he_sig_a_su_info, HE_SIG_A_SU_INFO_0, + SPATIAL_REUSE); + ppdu_info->rx_status.he_data4 = value; + + /* data5 */ + value = HAL_RX_GET(he_sig_a_su_info, + HE_SIG_A_SU_INFO_0, TRANSMIT_BW); + ppdu_info->rx_status.he_data5 = value; + ppdu_info->rx_status.bw = value; + value = HAL_RX_GET(he_sig_a_su_info, + HE_SIG_A_SU_INFO_0, CP_LTF_SIZE); + switch (value) { + case 0: + he_gi = HE_GI_0_8; + he_ltf = HE_LTF_1_X; + break; + case 1: + he_gi = HE_GI_0_8; + he_ltf = HE_LTF_2_X; + break; + case 2: + he_gi = HE_GI_1_6; + he_ltf = HE_LTF_2_X; + break; + case 3: + if (he_dcm && he_stbc) { + he_gi = HE_GI_0_8; + he_ltf = HE_LTF_4_X; + } else { + he_gi = HE_GI_3_2; + he_ltf = HE_LTF_4_X; + } + break; + } + ppdu_info->rx_status.sgi = he_gi; + value = he_gi << QDF_MON_STATUS_GI_SHIFT; + ppdu_info->rx_status.he_data5 |= value; + value = he_ltf << QDF_MON_STATUS_HE_LTF_SHIFT; + ppdu_info->rx_status.he_data5 |= value; + value = HAL_RX_GET(he_sig_a_su_info, HE_SIG_A_SU_INFO_1, + PACKET_EXTENSION_A_FACTOR); + value = value << QDF_MON_STATUS_PRE_FEC_PAD_SHIFT; + ppdu_info->rx_status.he_data5 |= value; + + value = HAL_RX_GET(he_sig_a_su_info, HE_SIG_A_SU_INFO_1, TXBF); + value = value << QDF_MON_STATUS_TXBF_SHIFT; + ppdu_info->rx_status.he_data5 |= value; + value = HAL_RX_GET(he_sig_a_su_info, HE_SIG_A_SU_INFO_1, + PACKET_EXTENSION_PE_DISAMBIGUITY); + value = value << QDF_MON_STATUS_PE_DISAMBIGUITY_SHIFT; + ppdu_info->rx_status.he_data5 |= value; + + /* data6 */ + value = HAL_RX_GET(he_sig_a_su_info, HE_SIG_A_SU_INFO_0, NSTS); + value++; + ppdu_info->rx_status.nss = value; + ppdu_info->rx_status.he_data6 = value; + value = HAL_RX_GET(he_sig_a_su_info, HE_SIG_A_SU_INFO_1, + DOPPLER_INDICATION); + value = value << QDF_MON_STATUS_DOPPLER_SHIFT; + ppdu_info->rx_status.he_data6 |= value; + value = HAL_RX_GET(he_sig_a_su_info, HE_SIG_A_SU_INFO_1, + TXOP_DURATION); + value = value << QDF_MON_STATUS_TXOP_SHIFT; + ppdu_info->rx_status.he_data6 |= value; + + ppdu_info->rx_status.beamformed = HAL_RX_GET(he_sig_a_su_info, + HE_SIG_A_SU_INFO_1, TXBF); + break; + } + case WIFIPHYRX_HE_SIG_A_MU_DL_E: + { + uint8_t *he_sig_a_mu_dl_info = (uint8_t *)rx_tlv + + HAL_RX_OFFSET(PHYRX_HE_SIG_A_MU_DL_0, + HE_SIG_A_MU_DL_INFO_PHYRX_HE_SIG_A_MU_DL_INFO_DETAILS); + + ppdu_info->rx_status.he_mu_flags = 1; + + /* HE Flags */ + /*data1*/ + ppdu_info->rx_status.he_data1 = + QDF_MON_STATUS_HE_MU_FORMAT_TYPE; + ppdu_info->rx_status.he_data1 |= + QDF_MON_STATUS_HE_BSS_COLOR_KNOWN | + QDF_MON_STATUS_HE_DL_UL_KNOWN | + QDF_MON_STATUS_HE_LDPC_EXTRA_SYMBOL_KNOWN | + QDF_MON_STATUS_HE_STBC_KNOWN | + QDF_MON_STATUS_HE_DATA_BW_RU_KNOWN | + QDF_MON_STATUS_HE_DOPPLER_KNOWN; + + /* data2 */ + ppdu_info->rx_status.he_data2 = + QDF_MON_STATUS_HE_GI_KNOWN; + ppdu_info->rx_status.he_data2 |= + QDF_MON_STATUS_LTF_SYMBOLS_KNOWN | + QDF_MON_STATUS_PRE_FEC_PADDING_KNOWN | + QDF_MON_STATUS_PE_DISAMBIGUITY_KNOWN | + QDF_MON_STATUS_TXOP_KNOWN | + QDF_MON_STATUS_MIDABLE_PERIODICITY_KNOWN; + + /*data3*/ + value = HAL_RX_GET(he_sig_a_mu_dl_info, + HE_SIG_A_MU_DL_INFO_0, BSS_COLOR_ID); + ppdu_info->rx_status.he_data3 = value; + + value = HAL_RX_GET(he_sig_a_mu_dl_info, + HE_SIG_A_MU_DL_INFO_0, DL_UL_FLAG); + value = value << QDF_MON_STATUS_DL_UL_SHIFT; + ppdu_info->rx_status.he_data3 |= value; + + value = HAL_RX_GET(he_sig_a_mu_dl_info, + HE_SIG_A_MU_DL_INFO_1, + LDPC_EXTRA_SYMBOL); + value = value << QDF_MON_STATUS_LDPC_EXTRA_SYMBOL_SHIFT; + ppdu_info->rx_status.he_data3 |= value; + + value = HAL_RX_GET(he_sig_a_mu_dl_info, + HE_SIG_A_MU_DL_INFO_1, STBC); + he_stbc = value; + value = value << QDF_MON_STATUS_STBC_SHIFT; + ppdu_info->rx_status.he_data3 |= value; + + /*data4*/ + value = HAL_RX_GET(he_sig_a_mu_dl_info, HE_SIG_A_MU_DL_INFO_0, + SPATIAL_REUSE); + ppdu_info->rx_status.he_data4 = value; + + /*data5*/ + value = HAL_RX_GET(he_sig_a_mu_dl_info, + HE_SIG_A_MU_DL_INFO_0, TRANSMIT_BW); + ppdu_info->rx_status.he_data5 = value; + ppdu_info->rx_status.bw = value; + + value = HAL_RX_GET(he_sig_a_mu_dl_info, + HE_SIG_A_MU_DL_INFO_0, CP_LTF_SIZE); + switch (value) { + case 0: + he_gi = HE_GI_0_8; + he_ltf = HE_LTF_4_X; + break; + case 1: + he_gi = HE_GI_0_8; + he_ltf = HE_LTF_2_X; + break; + case 2: + he_gi = HE_GI_1_6; + he_ltf = HE_LTF_2_X; + break; + case 3: + he_gi = HE_GI_3_2; + he_ltf = HE_LTF_4_X; + break; + } + ppdu_info->rx_status.sgi = he_gi; + value = he_gi << QDF_MON_STATUS_GI_SHIFT; + ppdu_info->rx_status.he_data5 |= value; + + value = he_ltf << QDF_MON_STATUS_HE_LTF_SHIFT; + ppdu_info->rx_status.he_data5 |= value; + + value = HAL_RX_GET(he_sig_a_mu_dl_info, HE_SIG_A_MU_DL_INFO_1, + PACKET_EXTENSION_A_FACTOR); + value = value << QDF_MON_STATUS_PRE_FEC_PAD_SHIFT; + ppdu_info->rx_status.he_data5 |= value; + + + value = HAL_RX_GET(he_sig_a_mu_dl_info, HE_SIG_A_MU_DL_INFO_1, + PACKET_EXTENSION_PE_DISAMBIGUITY); + value = value << QDF_MON_STATUS_PE_DISAMBIGUITY_SHIFT; + ppdu_info->rx_status.he_data5 |= value; + + /*data6*/ + value = HAL_RX_GET(he_sig_a_mu_dl_info, HE_SIG_A_MU_DL_INFO_0, + DOPPLER_INDICATION); + value = value << QDF_MON_STATUS_DOPPLER_SHIFT; + ppdu_info->rx_status.he_data6 |= value; + + value = HAL_RX_GET(he_sig_a_mu_dl_info, HE_SIG_A_MU_DL_INFO_1, + TXOP_DURATION); + value = value << QDF_MON_STATUS_TXOP_SHIFT; + ppdu_info->rx_status.he_data6 |= value; + + /* HE-MU Flags */ + /* HE-MU-flags1 */ + ppdu_info->rx_status.he_flags1 = + QDF_MON_STATUS_SIG_B_MCS_KNOWN | + QDF_MON_STATUS_SIG_B_DCM_KNOWN | + QDF_MON_STATUS_SIG_B_COMPRESSION_FLAG_1_KNOWN | + QDF_MON_STATUS_SIG_B_SYM_NUM_KNOWN | + QDF_MON_STATUS_RU_0_KNOWN; + + value = HAL_RX_GET(he_sig_a_mu_dl_info, + HE_SIG_A_MU_DL_INFO_0, MCS_OF_SIG_B); + ppdu_info->rx_status.he_flags1 |= value; + value = HAL_RX_GET(he_sig_a_mu_dl_info, + HE_SIG_A_MU_DL_INFO_0, DCM_OF_SIG_B); + value = value << QDF_MON_STATUS_DCM_FLAG_1_SHIFT; + ppdu_info->rx_status.he_flags1 |= value; + + /* HE-MU-flags2 */ + ppdu_info->rx_status.he_flags2 = + QDF_MON_STATUS_BW_KNOWN; + + value = HAL_RX_GET(he_sig_a_mu_dl_info, + HE_SIG_A_MU_DL_INFO_0, TRANSMIT_BW); + ppdu_info->rx_status.he_flags2 |= value; + value = HAL_RX_GET(he_sig_a_mu_dl_info, + HE_SIG_A_MU_DL_INFO_0, COMP_MODE_SIG_B); + value = value << QDF_MON_STATUS_SIG_B_COMPRESSION_FLAG_2_SHIFT; + ppdu_info->rx_status.he_flags2 |= value; + value = HAL_RX_GET(he_sig_a_mu_dl_info, + HE_SIG_A_MU_DL_INFO_0, NUM_SIG_B_SYMBOLS); + value = value - 1; + value = value << QDF_MON_STATUS_NUM_SIG_B_SYMBOLS_SHIFT; + ppdu_info->rx_status.he_flags2 |= value; + break; + } + case WIFIPHYRX_HE_SIG_B1_MU_E: + { + + uint8_t *he_sig_b1_mu_info = (uint8_t *)rx_tlv + + HAL_RX_OFFSET(PHYRX_HE_SIG_B1_MU_0, + HE_SIG_B1_MU_INFO_PHYRX_HE_SIG_B1_MU_INFO_DETAILS); + + ppdu_info->rx_status.he_sig_b_common_known |= + QDF_MON_STATUS_HE_SIG_B_COMMON_KNOWN_RU0; + /* TODO: Check on the availability of other fields in + * sig_b_common + */ + + value = HAL_RX_GET(he_sig_b1_mu_info, + HE_SIG_B1_MU_INFO_0, RU_ALLOCATION); + ppdu_info->rx_status.he_RU[0] = value; + break; + } + case WIFIPHYRX_HE_SIG_B2_MU_E: + { + uint8_t *he_sig_b2_mu_info = (uint8_t *)rx_tlv + + HAL_RX_OFFSET(PHYRX_HE_SIG_B2_MU_0, + HE_SIG_B2_MU_INFO_PHYRX_HE_SIG_B2_MU_INFO_DETAILS); + /* + * Not all "HE" fields can be updated from + * WIFIPHYRX_HE_SIG_A_MU_DL_E TLV. Use WIFIPHYRX_HE_SIG_B2_MU_E + * to populate rest of the "HE" fields for MU scenarios. + */ + + /* HE-data1 */ + ppdu_info->rx_status.he_data1 |= + QDF_MON_STATUS_HE_MCS_KNOWN | + QDF_MON_STATUS_HE_CODING_KNOWN; + + /* HE-data2 */ + + /* HE-data3 */ + value = HAL_RX_GET(he_sig_b2_mu_info, + HE_SIG_B2_MU_INFO_0, STA_MCS); + ppdu_info->rx_status.mcs = value; + value = value << QDF_MON_STATUS_TRANSMIT_MCS_SHIFT; + ppdu_info->rx_status.he_data3 |= value; + + + value = HAL_RX_GET(he_sig_b2_mu_info, + HE_SIG_B2_MU_INFO_0, STA_CODING); + value = value << QDF_MON_STATUS_CODING_SHIFT; + ppdu_info->rx_status.he_data3 |= value; + + /* HE-data4 */ + value = HAL_RX_GET(he_sig_b2_mu_info, + HE_SIG_B2_MU_INFO_0, STA_ID); + value = value << QDF_MON_STATUS_STA_ID_SHIFT; + ppdu_info->rx_status.he_data4 |= value; + + /* HE-data5 */ + + /* HE-data6 */ + value = HAL_RX_GET(he_sig_b2_mu_info, + HE_SIG_B2_MU_INFO_0, NSTS); + /* value n indicates n+1 spatial streams */ + value++; + ppdu_info->rx_status.nss = value; + ppdu_info->rx_status.he_data6 |= value; + + break; + + } + case WIFIPHYRX_HE_SIG_B2_OFDMA_E: + { + uint8_t *he_sig_b2_ofdma_info = + (uint8_t *)rx_tlv + + HAL_RX_OFFSET(PHYRX_HE_SIG_B2_OFDMA_0, + HE_SIG_B2_OFDMA_INFO_PHYRX_HE_SIG_B2_OFDMA_INFO_DETAILS); + + /* + * Not all "HE" fields can be updated from + * WIFIPHYRX_HE_SIG_A_MU_DL_E TLV. Use WIFIPHYRX_HE_SIG_B2_MU_E + * to populate rest of "HE" fields for MU OFDMA scenarios. + */ + + /* HE-data1 */ + ppdu_info->rx_status.he_data1 |= + QDF_MON_STATUS_HE_MCS_KNOWN | + QDF_MON_STATUS_HE_DCM_KNOWN | + QDF_MON_STATUS_HE_CODING_KNOWN; + + /* HE-data2 */ + ppdu_info->rx_status.he_data2 |= + QDF_MON_STATUS_TXBF_KNOWN; + + /* HE-data3 */ + value = HAL_RX_GET(he_sig_b2_ofdma_info, + HE_SIG_B2_OFDMA_INFO_0, STA_MCS); + ppdu_info->rx_status.mcs = value; + value = value << QDF_MON_STATUS_TRANSMIT_MCS_SHIFT; + ppdu_info->rx_status.he_data3 |= value; + + value = HAL_RX_GET(he_sig_b2_ofdma_info, + HE_SIG_B2_OFDMA_INFO_0, STA_DCM); + he_dcm = value; + value = value << QDF_MON_STATUS_DCM_SHIFT; + ppdu_info->rx_status.he_data3 |= value; + + value = HAL_RX_GET(he_sig_b2_ofdma_info, + HE_SIG_B2_OFDMA_INFO_0, STA_CODING); + value = value << QDF_MON_STATUS_CODING_SHIFT; + ppdu_info->rx_status.he_data3 |= value; + + /* HE-data4 */ + value = HAL_RX_GET(he_sig_b2_ofdma_info, + HE_SIG_B2_OFDMA_INFO_0, STA_ID); + value = value << QDF_MON_STATUS_STA_ID_SHIFT; + ppdu_info->rx_status.he_data4 |= value; + + /* HE-data5 */ + value = HAL_RX_GET(he_sig_b2_ofdma_info, + HE_SIG_B2_OFDMA_INFO_0, TXBF); + value = value << QDF_MON_STATUS_TXBF_SHIFT; + ppdu_info->rx_status.he_data5 |= value; + + /* HE-data6 */ + value = HAL_RX_GET(he_sig_b2_ofdma_info, + HE_SIG_B2_OFDMA_INFO_0, NSTS); + /* value n indicates n+1 spatial streams */ + value++; + ppdu_info->rx_status.nss = value; + ppdu_info->rx_status.he_data6 |= value; + + break; + } + case WIFIPHYRX_RSSI_LEGACY_E: + { + uint8_t *rssi_info_tlv = (uint8_t *)rx_tlv + + HAL_RX_OFFSET(PHYRX_RSSI_LEGACY_3, + RECEIVE_RSSI_INFO_PRE_RSSI_INFO_DETAILS); + + ppdu_info->rx_status.rssi_comb = HAL_RX_GET(rx_tlv, + PHYRX_RSSI_LEGACY_35, RSSI_COMB); + ppdu_info->rx_status.bw = HAL_RX_GET(rx_tlv, +#if !defined(QCA_WIFI_QCA6290_11AX) + PHYRX_RSSI_LEGACY_35, RECEIVE_BANDWIDTH); +#else + PHYRX_RSSI_LEGACY_0, RECEIVE_BANDWIDTH); +#endif + ppdu_info->rx_status.he_re = 0; + + ppdu_info->rx_status.reception_type = HAL_RX_GET(rx_tlv, + PHYRX_RSSI_LEGACY_0, RECEPTION_TYPE); + + value = HAL_RX_GET(rssi_info_tlv, + RECEIVE_RSSI_INFO_0, RSSI_PRI20_CHAIN0); + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "RSSI_PRI20_CHAIN0: %d\n", value); + + value = HAL_RX_GET(rssi_info_tlv, + RECEIVE_RSSI_INFO_0, RSSI_EXT20_CHAIN0); + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "RSSI_EXT20_CHAIN0: %d\n", value); + + value = HAL_RX_GET(rssi_info_tlv, + RECEIVE_RSSI_INFO_0, RSSI_EXT40_LOW20_CHAIN0); + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "RSSI_EXT40_LOW20_CHAIN0: %d\n", value); + + value = HAL_RX_GET(rssi_info_tlv, + RECEIVE_RSSI_INFO_0, RSSI_EXT40_HIGH20_CHAIN0); + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "RSSI_EXT40_HIGH20_CHAIN0: %d\n", value); + + value = HAL_RX_GET(rssi_info_tlv, + RECEIVE_RSSI_INFO_1, RSSI_EXT80_LOW20_CHAIN0); + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "RSSI_EXT80_LOW20_CHAIN0: %d\n", value); + + value = HAL_RX_GET(rssi_info_tlv, + RECEIVE_RSSI_INFO_1, RSSI_EXT80_LOW_HIGH20_CHAIN0); + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "RSSI_EXT80_LOW_HIGH20_CHAIN0: %d\n", value); + + value = HAL_RX_GET(rssi_info_tlv, + RECEIVE_RSSI_INFO_1, RSSI_EXT80_HIGH_LOW20_CHAIN0); + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "RSSI_EXT80_HIGH_LOW20_CHAIN0: %d\n", value); + + value = HAL_RX_GET(rssi_info_tlv, + RECEIVE_RSSI_INFO_1, RSSI_EXT80_HIGH20_CHAIN0); + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "RSSI_EXT80_HIGH20_CHAIN0: %d\n", value); + break; + } + case WIFIPHYRX_OTHER_RECEIVE_INFO_E: + hal_rx_proc_phyrx_other_receive_info_tlv(rx_tlv_hdr, ppdu_info); + break; + case WIFIRX_HEADER_E: + ppdu_info->msdu_info.first_msdu_payload = rx_tlv; + ppdu_info->msdu_info.payload_len = tlv_len; + break; + case WIFIRX_MPDU_START_E: + { + uint8_t *rx_mpdu_start = + (uint8_t *)rx_tlv + HAL_RX_OFFSET(RX_MPDU_START_0, + RX_MPDU_INFO_RX_MPDU_INFO_DETAILS); + uint32_t ppdu_id = HAL_RX_GET(rx_mpdu_start, RX_MPDU_INFO_0, + PHY_PPDU_ID); + + if (ppdu_info->rx_status.prev_ppdu_id != ppdu_id) { + ppdu_info->rx_status.prev_ppdu_id = ppdu_id; + ppdu_info->rx_status.ppdu_len = + HAL_RX_GET(rx_mpdu_start, RX_MPDU_INFO_13, + MPDU_LENGTH); + } else { + ppdu_info->rx_status.ppdu_len += + HAL_RX_GET(rx_mpdu_start, RX_MPDU_INFO_13, + MPDU_LENGTH); + } + break; + } + case 0: + return HAL_TLV_STATUS_PPDU_DONE; + + default: + unhandled = true; + break; + } + + if (!unhandled) + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, + "%s TLV type: %d, TLV len:%d %s", + __func__, tlv_tag, tlv_len, + unhandled == true ? "unhandled" : ""); + + qdf_trace_hex_dump(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, rx_tlv, tlv_len); + + return HAL_TLV_STATUS_PPDU_NOT_DONE; +} + +static inline +uint32_t hal_get_rx_status_done_tlv_size(void *hal_soc) +{ + return HAL_RX_TLV32_HDR_SIZE; +} + +static inline QDF_STATUS +hal_get_rx_status_done(uint8_t *rx_tlv) +{ + uint32_t tlv_tag; + + tlv_tag = HAL_RX_GET_USER_TLV32_TYPE(rx_tlv); + + if (tlv_tag == WIFIRX_STATUS_BUFFER_DONE_E) + return QDF_STATUS_SUCCESS; + else + return QDF_STATUS_E_EMPTY; +} + +static inline QDF_STATUS +hal_clear_rx_status_done(uint8_t *rx_tlv) +{ + *(uint32_t *)rx_tlv = 0; + return QDF_STATUS_SUCCESS; +} + +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/hal_internal.h b/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/hal_internal.h new file mode 100644 index 0000000000000000000000000000000000000000..45e547a05cd100bb0a8b9b63bfa894563f959473 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/hal_internal.h @@ -0,0 +1,395 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of The Linux Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _HAL_INTERNAL_H_ +#define _HAL_INTERNAL_H_ + +#include "qdf_types.h" +#include "qdf_lock.h" +#include "qdf_mem.h" +#include "qdf_nbuf.h" +#include "wcss_seq_hwiobase.h" +#include "tlv_hdr.h" +#include "tlv_tag_def.h" +#include "reo_destination_ring.h" +#include "reo_reg_seq_hwioreg.h" +#include "reo_entrance_ring.h" +#include "reo_get_queue_stats.h" +#include "reo_get_queue_stats_status.h" +#include "tcl_data_cmd.h" +#include "tcl_gse_cmd.h" +#include "tcl_status_ring.h" +#include "mac_tcl_reg_seq_hwioreg.h" +#include "ce_src_desc.h" +#include "ce_stat_desc.h" +#include "wfss_ce_reg_seq_hwioreg.h" +#include "wbm_link_descriptor_ring.h" +#include "wbm_reg_seq_hwioreg.h" +#include "wbm_buffer_ring.h" +#include "wbm_release_ring.h" +#include "rx_msdu_desc_info.h" +#include "rx_mpdu_start.h" +#include "rx_mpdu_end.h" +#include "rx_msdu_start.h" +#include "rx_msdu_end.h" +#include "rx_attention.h" +#include "rx_ppdu_start.h" +#include "rx_ppdu_start_user_info.h" +#include "rx_ppdu_end_user_stats.h" +#include "rx_ppdu_end_user_stats_ext.h" +#include "rx_mpdu_desc_info.h" +#include "rxpcu_ppdu_end_info.h" +#include "phyrx_he_sig_a_su.h" +#include "phyrx_he_sig_a_mu_dl.h" +#include "phyrx_he_sig_b1_mu.h" +#include "phyrx_he_sig_b2_mu.h" +#include "phyrx_he_sig_b2_ofdma.h" +#include "phyrx_l_sig_a.h" +#include "phyrx_l_sig_b.h" +#include "phyrx_vht_sig_a.h" +#include "phyrx_ht_sig.h" +#include "tx_msdu_extension.h" +#include "receive_rssi_info.h" +#include "phyrx_pkt_end.h" +#include "phyrx_rssi_legacy.h" +#include "wcss_version.h" +#include "pld_common.h" +#include "rx_msdu_link.h" + +#ifdef QCA_WIFI_QCA6290_11AX +#include "phyrx_other_receive_info_ru_details.h" +#endif /* QCA_WIFI_QCA6290_11AX */ + +/* TBD: This should be movded to shared HW header file */ +enum hal_srng_ring_id { + /* UMAC rings */ + HAL_SRNG_REO2SW1 = 0, + HAL_SRNG_REO2SW2 = 1, + HAL_SRNG_REO2SW3 = 2, + HAL_SRNG_REO2SW4 = 3, + HAL_SRNG_REO2TCL = 4, + HAL_SRNG_SW2REO = 5, + /* 6-7 unused */ + HAL_SRNG_REO_CMD = 8, + HAL_SRNG_REO_STATUS = 9, + /* 10-15 unused */ + HAL_SRNG_SW2TCL1 = 16, + HAL_SRNG_SW2TCL2 = 17, + HAL_SRNG_SW2TCL3 = 18, + HAL_SRNG_SW2TCL4 = 19, /* FW2TCL ring */ + /* 20-23 unused */ + HAL_SRNG_SW2TCL_CMD = 24, + HAL_SRNG_TCL_STATUS = 25, + /* 26-31 unused */ + HAL_SRNG_CE_0_SRC = 32, + HAL_SRNG_CE_1_SRC = 33, + HAL_SRNG_CE_2_SRC = 34, + HAL_SRNG_CE_3_SRC = 35, + HAL_SRNG_CE_4_SRC = 36, + HAL_SRNG_CE_5_SRC = 37, + HAL_SRNG_CE_6_SRC = 38, + HAL_SRNG_CE_7_SRC = 39, + HAL_SRNG_CE_8_SRC = 40, + HAL_SRNG_CE_9_SRC = 41, + HAL_SRNG_CE_10_SRC = 42, + HAL_SRNG_CE_11_SRC = 43, + /* 44-55 unused */ + HAL_SRNG_CE_0_DST = 56, + HAL_SRNG_CE_1_DST = 57, + HAL_SRNG_CE_2_DST = 58, + HAL_SRNG_CE_3_DST = 59, + HAL_SRNG_CE_4_DST = 60, + HAL_SRNG_CE_5_DST = 61, + HAL_SRNG_CE_6_DST = 62, + HAL_SRNG_CE_7_DST = 63, + HAL_SRNG_CE_8_DST = 64, + HAL_SRNG_CE_9_DST = 65, + HAL_SRNG_CE_10_DST = 66, + HAL_SRNG_CE_11_DST = 67, + /* 68-79 unused */ + HAL_SRNG_CE_0_DST_STATUS = 80, + HAL_SRNG_CE_1_DST_STATUS = 81, + HAL_SRNG_CE_2_DST_STATUS = 82, + HAL_SRNG_CE_3_DST_STATUS = 83, + HAL_SRNG_CE_4_DST_STATUS = 84, + HAL_SRNG_CE_5_DST_STATUS = 85, + HAL_SRNG_CE_6_DST_STATUS = 86, + HAL_SRNG_CE_7_DST_STATUS = 87, + HAL_SRNG_CE_8_DST_STATUS = 88, + HAL_SRNG_CE_9_DST_STATUS = 89, + HAL_SRNG_CE_10_DST_STATUS = 90, + HAL_SRNG_CE_11_DST_STATUS = 91, + /* 92-103 unused */ + HAL_SRNG_WBM_IDLE_LINK = 104, + HAL_SRNG_WBM_SW_RELEASE = 105, + HAL_SRNG_WBM2SW0_RELEASE = 106, + HAL_SRNG_WBM2SW1_RELEASE = 107, + HAL_SRNG_WBM2SW2_RELEASE = 108, + HAL_SRNG_WBM2SW3_RELEASE = 109, + /* 110-127 unused */ + HAL_SRNG_UMAC_ID_END = 127, + /* LMAC rings - The following set will be replicated for each LMAC */ + HAL_SRNG_LMAC1_ID_START = 128, + HAL_SRNG_WMAC1_SW2RXDMA0_BUF0 = HAL_SRNG_LMAC1_ID_START, +#ifdef IPA_OFFLOAD + HAL_SRNG_WMAC1_SW2RXDMA0_BUF1 = (HAL_SRNG_LMAC1_ID_START + 1), + HAL_SRNG_WMAC1_SW2RXDMA0_BUF2 = (HAL_SRNG_LMAC1_ID_START + 2), + HAL_SRNG_WMAC1_SW2RXDMA1_BUF = (HAL_SRNG_WMAC1_SW2RXDMA0_BUF2 + 1), +#else + HAL_SRNG_WMAC1_SW2RXDMA1_BUF = (HAL_SRNG_WMAC1_SW2RXDMA0_BUF0 + 1), +#endif + HAL_SRNG_WMAC1_SW2RXDMA2_BUF = (HAL_SRNG_WMAC1_SW2RXDMA1_BUF + 1), + HAL_SRNG_WMAC1_SW2RXDMA0_STATBUF = (HAL_SRNG_WMAC1_SW2RXDMA2_BUF + 1), + HAL_SRNG_WMAC1_SW2RXDMA1_STATBUF = + (HAL_SRNG_WMAC1_SW2RXDMA0_STATBUF + 1), + HAL_SRNG_WMAC1_RXDMA2SW0 = (HAL_SRNG_WMAC1_SW2RXDMA1_STATBUF + 1), + HAL_SRNG_WMAC1_RXDMA2SW1 = (HAL_SRNG_WMAC1_RXDMA2SW0 + 1), + HAL_SRNG_WMAC1_SW2RXDMA1_DESC = (HAL_SRNG_WMAC1_RXDMA2SW1 + 1), +#ifdef WLAN_FEATURE_CIF_CFR + HAL_SRNG_WIFI_POS_SRC_DMA_RING = (HAL_SRNG_WMAC1_SW2RXDMA1_DESC + 1), + HAL_SRNG_DIR_BUF_RX_SRC_DMA_RING = (HAL_SRNG_WIFI_POS_SRC_DMA_RING + 1), +#else + HAL_SRNG_DIR_BUF_RX_SRC_DMA_RING = (HAL_SRNG_WMAC1_SW2RXDMA1_DESC + 1), +#endif + /* -142 unused */ + HAL_SRNG_LMAC1_ID_END = 143 +}; + +#define HAL_SRNG_REO_EXCEPTION HAL_SRNG_REO2SW1 +#define HAL_SRNG_REO_ALTERNATE_SELECT 0x7 + +#define HAL_MAX_LMACS 3 +#define HAL_MAX_RINGS_PER_LMAC (HAL_SRNG_LMAC1_ID_END - HAL_SRNG_LMAC1_ID_START) +#define HAL_MAX_LMAC_RINGS (HAL_MAX_LMACS * HAL_MAX_RINGS_PER_LMAC) + +#define HAL_SRNG_ID_MAX (HAL_SRNG_UMAC_ID_END + HAL_MAX_LMAC_RINGS) + +enum hal_srng_dir { + HAL_SRNG_SRC_RING, + HAL_SRNG_DST_RING +}; + +/* Lock wrappers for SRNG */ +#define hal_srng_lock_t qdf_spinlock_t +#define SRNG_LOCK_INIT(_lock) qdf_spinlock_create(_lock) +#define SRNG_LOCK(_lock) qdf_spin_lock_bh(_lock) +#define SRNG_UNLOCK(_lock) qdf_spin_unlock_bh(_lock) +#define SRNG_LOCK_DESTROY(_lock) qdf_spinlock_destroy(_lock) + +#define MAX_SRNG_REG_GROUPS 2 + +/* Common SRNG ring structure for source and destination rings */ +struct hal_srng { + /* Unique SRNG ring ID */ + uint8_t ring_id; + + /* Ring initialization done */ + uint8_t initialized; + + /* Interrupt/MSI value assigned to this ring */ + int irq; + + /* Physical base address of the ring */ + qdf_dma_addr_t ring_base_paddr; + + /* Virtual base address of the ring */ + uint32_t *ring_base_vaddr; + + /* Number of entries in ring */ + uint32_t num_entries; + + /* Ring size */ + uint32_t ring_size; + + /* Ring size mask */ + uint32_t ring_size_mask; + + /* Size of ring entry */ + uint32_t entry_size; + + /* Interrupt timer threshold – in micro seconds */ + uint32_t intr_timer_thres_us; + + /* Interrupt batch counter threshold – in number of ring entries */ + uint32_t intr_batch_cntr_thres_entries; + + /* MSI Address */ + qdf_dma_addr_t msi_addr; + + /* MSI data */ + uint32_t msi_data; + + /* Misc flags */ + uint32_t flags; + + /* Lock for serializing ring index updates */ + hal_srng_lock_t lock; + + /* Start offset of SRNG register groups for this ring + * TBD: See if this is required - register address can be derived + * from ring ID + */ + void *hwreg_base[MAX_SRNG_REG_GROUPS]; + + /* Source or Destination ring */ + enum hal_srng_dir ring_dir; + + union { + struct { + /* SW tail pointer */ + uint32_t tp; + + /* Shadow head pointer location to be updated by HW */ + uint32_t *hp_addr; + + /* Cached head pointer */ + uint32_t cached_hp; + + /* Tail pointer location to be updated by SW – This + * will be a register address and need not be + * accessed through SW structure */ + uint32_t *tp_addr; + + /* Current SW loop cnt */ + uint32_t loop_cnt; + + /* max transfer size */ + uint16_t max_buffer_length; + } dst_ring; + + struct { + /* SW head pointer */ + uint32_t hp; + + /* SW reap head pointer */ + uint32_t reap_hp; + + /* Shadow tail pointer location to be updated by HW */ + uint32_t *tp_addr; + + /* Cached tail pointer */ + uint32_t cached_tp; + + /* Head pointer location to be updated by SW – This + * will be a register address and need not be accessed + * through SW structure */ + uint32_t *hp_addr; + + /* Low threshold – in number of ring entries */ + uint32_t low_threshold; + } src_ring; + } u; + + struct hal_soc *hal_soc; +}; + +/* HW SRNG configuration table */ +struct hal_hw_srng_config { + int start_ring_id; + uint16_t max_rings; + uint16_t entry_size; + uint32_t reg_start[MAX_SRNG_REG_GROUPS]; + uint16_t reg_size[MAX_SRNG_REG_GROUPS]; + uint8_t lmac_ring; + enum hal_srng_dir ring_dir; +}; + +/* calculate the register address offset from bar0 of shadow register x */ +#define SHADOW_REGISTER(x) (0x00003024 + (4*x)) +#define MAX_SHADOW_REGISTERS 36 + +/** + * HAL context to be used to access SRNG APIs (currently used by data path + * and transport (CE) modules) + */ +struct hal_soc { + /* HIF handle to access HW registers */ + void *hif_handle; + + /* QDF device handle */ + qdf_device_t qdf_dev; + + /* Device base address */ + void *dev_base_addr; + + /* HAL internal state for all SRNG rings. + * TODO: See if this is required + */ + struct hal_srng srng_list[HAL_SRNG_ID_MAX]; + + /* Remote pointer memory for HW/FW updates */ + uint32_t *shadow_rdptr_mem_vaddr; + qdf_dma_addr_t shadow_rdptr_mem_paddr; + + /* Shared memory for ring pointer updates from host to FW */ + uint32_t *shadow_wrptr_mem_vaddr; + qdf_dma_addr_t shadow_wrptr_mem_paddr; + + /* REO blocking resource index */ + uint8_t reo_res_bitmap; + uint8_t index; + + /* shadow register configuration */ + struct pld_shadow_reg_v2_cfg shadow_config[MAX_SHADOW_REGISTERS]; + int num_shadow_registers_configured; + bool use_register_windowing; + uint32_t register_window; + qdf_spinlock_t register_access_lock; +}; + +/* TODO: Check if the following can be provided directly by HW headers */ +#define SRNG_LOOP_CNT_MASK REO_DESTINATION_RING_15_LOOPING_COUNT_MASK +#define SRNG_LOOP_CNT_LSB REO_DESTINATION_RING_15_LOOPING_COUNT_LSB + +#define HAL_SRNG_LMAC_RING 0x80000000 + +#define HAL_DEFAULT_REO_TIMEOUT_MS 40 /* milliseconds */ + +#define HAL_DESC_SET_FIELD(_desc, _word, _fld, _value) do { \ + ((uint32_t *)(_desc))[(_word ## _ ## _fld ## _OFFSET) >> 2] &= \ + ~(_word ## _ ## _fld ## _MASK); \ + ((uint32_t *)(_desc))[(_word ## _ ## _fld ## _OFFSET) >> 2] |= \ + ((_value) << _word ## _ ## _fld ## _LSB); \ +} while (0) + +#define HAL_SM(_reg, _fld, _val) \ + (((_val) << (_reg ## _ ## _fld ## _SHFT)) & \ + (_reg ## _ ## _fld ## _BMSK)) + +#define HAL_MS(_reg, _fld, _val) \ + (((_val) & (_reg ## _ ## _fld ## _BMSK)) >> \ + (_reg ## _ ## _fld ## _SHFT)) + +#define HAL_REG_WRITE(_soc, _reg, _value) \ + hal_write32_mb(_soc, (_reg), (_value)) + +#define HAL_REG_READ(_soc, _offset) \ + hal_read32_mb(_soc, (_offset)) + +#endif /* _HAL_INTERNAL_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/hal_reo.c b/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/hal_reo.c new file mode 100644 index 0000000000000000000000000000000000000000..7b4bc5ac2c564efbb77d21f6114fa561e0158cd9 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/hal_reo.c @@ -0,0 +1,988 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "hal_reo.h" +#include "hal_tx.h" +#include "qdf_module.h" + +#define BLOCK_RES_MASK 0xF +static inline uint8_t hal_find_one_bit(uint8_t x) +{ + uint8_t y = (x & (~x + 1)) & BLOCK_RES_MASK; + uint8_t pos; + + for (pos = 0; y; y >>= 1) + pos++; + + return pos-1; +} + +static inline uint8_t hal_find_zero_bit(uint8_t x) +{ + uint8_t y = (~x & (x+1)) & BLOCK_RES_MASK; + uint8_t pos; + + for (pos = 0; y; y >>= 1) + pos++; + + return pos-1; +} + +inline void hal_reo_cmd_set_descr_addr(uint32_t *reo_desc, + enum hal_reo_cmd_type type, + uint32_t paddr_lo, + uint8_t paddr_hi) +{ + switch (type) { + case CMD_GET_QUEUE_STATS: + HAL_DESC_SET_FIELD(reo_desc, REO_GET_QUEUE_STATS_1, + RX_REO_QUEUE_DESC_ADDR_31_0, paddr_lo); + HAL_DESC_SET_FIELD(reo_desc, REO_GET_QUEUE_STATS_2, + RX_REO_QUEUE_DESC_ADDR_39_32, paddr_hi); + break; + case CMD_FLUSH_QUEUE: + HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_QUEUE_1, + FLUSH_DESC_ADDR_31_0, paddr_lo); + HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_QUEUE_2, + FLUSH_DESC_ADDR_39_32, paddr_hi); + break; + case CMD_FLUSH_CACHE: + HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_1, + FLUSH_ADDR_31_0, paddr_lo); + HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2, + FLUSH_ADDR_39_32, paddr_hi); + break; + case CMD_UPDATE_RX_REO_QUEUE: + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_1, + RX_REO_QUEUE_DESC_ADDR_31_0, paddr_lo); + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, + RX_REO_QUEUE_DESC_ADDR_39_32, paddr_hi); + break; + default: + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "%s: Invalid REO command type\n", __func__); + break; + } +} + +inline int hal_reo_cmd_queue_stats(void *reo_ring, struct hal_soc *soc, + struct hal_reo_cmd_params *cmd) + +{ + uint32_t *reo_desc, val; + + hal_srng_access_start(soc, reo_ring); + reo_desc = hal_srng_src_get_next(soc, reo_ring); + if (!reo_desc) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, + "%s: Out of cmd ring entries\n", __func__); + hal_srng_access_end(soc, reo_ring); + return -EBUSY; + } + + HAL_SET_TLV_HDR(reo_desc, WIFIREO_GET_QUEUE_STATS_E, + sizeof(struct reo_get_queue_stats)); + + /* Offsets of descriptor fields defined in HW headers start from + * the field after TLV header */ + reo_desc += (sizeof(struct tlv_32_hdr) >> 2); + qdf_mem_zero((void *)reo_desc, sizeof(struct reo_get_queue_stats)); + + HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0, + REO_STATUS_REQUIRED, cmd->std.need_status); + + hal_reo_cmd_set_descr_addr(reo_desc, CMD_GET_QUEUE_STATS, + cmd->std.addr_lo, + cmd->std.addr_hi); + + HAL_DESC_SET_FIELD(reo_desc, REO_GET_QUEUE_STATS_2, CLEAR_STATS, + cmd->u.stats_params.clear); + + hal_srng_access_end(soc, reo_ring); + + val = reo_desc[CMD_HEADER_DW_OFFSET]; + return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER, + val); +} +qdf_export_symbol(hal_reo_cmd_queue_stats); + +inline int hal_reo_cmd_flush_queue(void *reo_ring, struct hal_soc *soc, + struct hal_reo_cmd_params *cmd) +{ + uint32_t *reo_desc, val; + + hal_srng_access_start(soc, reo_ring); + reo_desc = hal_srng_src_get_next(soc, reo_ring); + if (!reo_desc) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, + "%s: Out of cmd ring entries\n", __func__); + hal_srng_access_end(soc, reo_ring); + return -EBUSY; + } + + HAL_SET_TLV_HDR(reo_desc, WIFIREO_FLUSH_QUEUE_E, + sizeof(struct reo_flush_queue)); + + /* Offsets of descriptor fields defined in HW headers start from + * the field after TLV header */ + reo_desc += (sizeof(struct tlv_32_hdr) >> 2); + qdf_mem_zero((void *)reo_desc, sizeof(struct reo_flush_queue)); + + HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0, + REO_STATUS_REQUIRED, cmd->std.need_status); + + hal_reo_cmd_set_descr_addr(reo_desc, CMD_FLUSH_QUEUE, cmd->std.addr_lo, + cmd->std.addr_hi); + + HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_QUEUE_2, + BLOCK_DESC_ADDR_USAGE_AFTER_FLUSH, + cmd->u.fl_queue_params.block_use_after_flush); + + if (cmd->u.fl_queue_params.block_use_after_flush) { + HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_QUEUE_2, + BLOCK_RESOURCE_INDEX, cmd->u.fl_queue_params.index); + } + + hal_srng_access_end(soc, reo_ring); + val = reo_desc[CMD_HEADER_DW_OFFSET]; + return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER, + val); +} +qdf_export_symbol(hal_reo_cmd_flush_queue); + +inline int hal_reo_cmd_flush_cache(void *reo_ring, struct hal_soc *soc, + struct hal_reo_cmd_params *cmd) +{ + uint32_t *reo_desc, val; + struct hal_reo_cmd_flush_cache_params *cp; + uint8_t index = 0; + + cp = &cmd->u.fl_cache_params; + + hal_srng_access_start(soc, reo_ring); + + /* We need a cache block resource for this operation, and REO HW has + * only 4 such blocking resources. These resources are managed using + * reo_res_bitmap, and we return failure if none is available. + */ + if (cp->block_use_after_flush) { + index = hal_find_zero_bit(soc->reo_res_bitmap); + if (index > 3) { + qdf_print("%s, No blocking resource available!\n", __func__); + hal_srng_access_end(soc, reo_ring); + return -EBUSY; + } + soc->index = index; + } + + reo_desc = hal_srng_src_get_next(soc, reo_ring); + if (!reo_desc) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, + "%s: Out of cmd ring entries\n", __func__); + hal_srng_access_end(soc, reo_ring); + hal_srng_dump(reo_ring); + return -EBUSY; + } + + HAL_SET_TLV_HDR(reo_desc, WIFIREO_FLUSH_CACHE_E, + sizeof(struct reo_flush_cache)); + + /* Offsets of descriptor fields defined in HW headers start from + * the field after TLV header */ + reo_desc += (sizeof(struct tlv_32_hdr) >> 2); + qdf_mem_zero((void *)reo_desc, sizeof(struct reo_flush_cache)); + + HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0, + REO_STATUS_REQUIRED, cmd->std.need_status); + + hal_reo_cmd_set_descr_addr(reo_desc, CMD_FLUSH_CACHE, cmd->std.addr_lo, + cmd->std.addr_hi); + + HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2, + FORWARD_ALL_MPDUS_IN_QUEUE, cp->fwd_mpdus_in_queue); + + /* set it to 0 for now */ + cp->rel_block_index = 0; + HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2, + RELEASE_CACHE_BLOCK_INDEX, cp->rel_block_index); + + if (cp->block_use_after_flush) { + HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2, + CACHE_BLOCK_RESOURCE_INDEX, index); + } + + HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2, + FLUSH_WITHOUT_INVALIDATE, cp->flush_no_inval); + + HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2, + BLOCK_CACHE_USAGE_AFTER_FLUSH, cp->block_use_after_flush); + + HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2, FLUSH_ENTIRE_CACHE, + cp->flush_all); + + hal_srng_access_end(soc, reo_ring); + val = reo_desc[CMD_HEADER_DW_OFFSET]; + return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER, + val); +} +qdf_export_symbol(hal_reo_cmd_flush_cache); + +inline int hal_reo_cmd_unblock_cache(void *reo_ring, struct hal_soc *soc, + struct hal_reo_cmd_params *cmd) + +{ + uint32_t *reo_desc, val; + uint8_t index = 0; + + hal_srng_access_start(soc, reo_ring); + + if (cmd->u.unblk_cache_params.type == UNBLOCK_RES_INDEX) { + index = hal_find_one_bit(soc->reo_res_bitmap); + if (index > 3) { + hal_srng_access_end(soc, reo_ring); + qdf_print("%s: No blocking resource to unblock!\n", + __func__); + return -EBUSY; + } + } + + reo_desc = hal_srng_src_get_next(soc, reo_ring); + if (!reo_desc) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, + "%s: Out of cmd ring entries\n", __func__); + hal_srng_access_end(soc, reo_ring); + return -EBUSY; + } + + HAL_SET_TLV_HDR(reo_desc, WIFIREO_UNBLOCK_CACHE_E, + sizeof(struct reo_unblock_cache)); + + /* Offsets of descriptor fields defined in HW headers start from + * the field after TLV header */ + reo_desc += (sizeof(struct tlv_32_hdr) >> 2); + qdf_mem_zero((void *)reo_desc, sizeof(struct reo_unblock_cache)); + + HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0, + REO_STATUS_REQUIRED, cmd->std.need_status); + + HAL_DESC_SET_FIELD(reo_desc, REO_UNBLOCK_CACHE_1, + UNBLOCK_TYPE, cmd->u.unblk_cache_params.type); + + if (cmd->u.unblk_cache_params.type == UNBLOCK_RES_INDEX) { + HAL_DESC_SET_FIELD(reo_desc, REO_UNBLOCK_CACHE_1, + CACHE_BLOCK_RESOURCE_INDEX, + cmd->u.unblk_cache_params.index); + } + + hal_srng_access_end(soc, reo_ring); + val = reo_desc[CMD_HEADER_DW_OFFSET]; + return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER, + val); +} +qdf_export_symbol(hal_reo_cmd_unblock_cache); + +inline int hal_reo_cmd_flush_timeout_list(void *reo_ring, struct hal_soc *soc, + struct hal_reo_cmd_params *cmd) +{ + uint32_t *reo_desc, val; + + hal_srng_access_start(soc, reo_ring); + reo_desc = hal_srng_src_get_next(soc, reo_ring); + if (!reo_desc) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, + "%s: Out of cmd ring entries\n", __func__); + hal_srng_access_end(soc, reo_ring); + return -EBUSY; + } + + HAL_SET_TLV_HDR(reo_desc, WIFIREO_FLUSH_TIMEOUT_LIST_E, + sizeof(struct reo_flush_timeout_list)); + + /* Offsets of descriptor fields defined in HW headers start from + * the field after TLV header */ + reo_desc += (sizeof(struct tlv_32_hdr) >> 2); + qdf_mem_zero((void *)reo_desc, sizeof(struct reo_flush_timeout_list)); + + HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0, + REO_STATUS_REQUIRED, cmd->std.need_status); + + HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_TIMEOUT_LIST_1, AC_TIMOUT_LIST, + cmd->u.fl_tim_list_params.ac_list); + + HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_TIMEOUT_LIST_2, + MINIMUM_RELEASE_DESC_COUNT, + cmd->u.fl_tim_list_params.min_rel_desc); + + HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_TIMEOUT_LIST_2, + MINIMUM_FORWARD_BUF_COUNT, + cmd->u.fl_tim_list_params.min_fwd_buf); + + hal_srng_access_end(soc, reo_ring); + val = reo_desc[CMD_HEADER_DW_OFFSET]; + return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER, + val); +} +qdf_export_symbol(hal_reo_cmd_flush_timeout_list); + +inline int hal_reo_cmd_update_rx_queue(void *reo_ring, struct hal_soc *soc, + struct hal_reo_cmd_params *cmd) +{ + uint32_t *reo_desc, val; + struct hal_reo_cmd_update_queue_params *p; + + p = &cmd->u.upd_queue_params; + + hal_srng_access_start(soc, reo_ring); + reo_desc = hal_srng_src_get_next(soc, reo_ring); + if (!reo_desc) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, + "%s: Out of cmd ring entries\n", __func__); + hal_srng_access_end(soc, reo_ring); + return -EBUSY; + } + + HAL_SET_TLV_HDR(reo_desc, WIFIREO_UPDATE_RX_REO_QUEUE_E, + sizeof(struct reo_update_rx_reo_queue)); + + /* Offsets of descriptor fields defined in HW headers start from + * the field after TLV header */ + reo_desc += (sizeof(struct tlv_32_hdr) >> 2); + qdf_mem_zero((void *)reo_desc, sizeof(struct reo_update_rx_reo_queue)); + + HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0, + REO_STATUS_REQUIRED, cmd->std.need_status); + + hal_reo_cmd_set_descr_addr(reo_desc, CMD_UPDATE_RX_REO_QUEUE, + cmd->std.addr_lo, cmd->std.addr_hi); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, + UPDATE_RECEIVE_QUEUE_NUMBER, p->update_rx_queue_num); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, UPDATE_VLD, + p->update_vld); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, + UPDATE_ASSOCIATED_LINK_DESCRIPTOR_COUNTER, + p->update_assoc_link_desc); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, + UPDATE_DISABLE_DUPLICATE_DETECTION, + p->update_disable_dup_detect); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, + UPDATE_DISABLE_DUPLICATE_DETECTION, + p->update_disable_dup_detect); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, + UPDATE_SOFT_REORDER_ENABLE, + p->update_soft_reorder_enab); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, + UPDATE_AC, p->update_ac); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, + UPDATE_BAR, p->update_bar); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, + UPDATE_BAR, p->update_bar); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, + UPDATE_RTY, p->update_rty); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, + UPDATE_CHK_2K_MODE, p->update_chk_2k_mode); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, + UPDATE_OOR_MODE, p->update_oor_mode); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, + UPDATE_BA_WINDOW_SIZE, p->update_ba_window_size); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, + UPDATE_PN_CHECK_NEEDED, p->update_pn_check_needed); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, + UPDATE_PN_SHALL_BE_EVEN, p->update_pn_even); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, + UPDATE_PN_SHALL_BE_UNEVEN, p->update_pn_uneven); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, + UPDATE_PN_HANDLING_ENABLE, p->update_pn_hand_enab); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, + UPDATE_PN_SIZE, p->update_pn_size); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, + UPDATE_IGNORE_AMPDU_FLAG, p->update_ignore_ampdu); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, + UPDATE_SVLD, p->update_svld); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, + UPDATE_SSN, p->update_ssn); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, + UPDATE_SEQ_2K_ERROR_DETECTED_FLAG, + p->update_seq_2k_err_detect); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, + UPDATE_PN_VALID, p->update_pn_valid); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, + UPDATE_PN, p->update_pn); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3, + RECEIVE_QUEUE_NUMBER, p->rx_queue_num); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3, + VLD, p->vld); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3, + ASSOCIATED_LINK_DESCRIPTOR_COUNTER, + p->assoc_link_desc); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3, + DISABLE_DUPLICATE_DETECTION, p->disable_dup_detect); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3, + SOFT_REORDER_ENABLE, p->soft_reorder_enab); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3, AC, p->ac); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3, + BAR, p->bar); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3, + CHK_2K_MODE, p->chk_2k_mode); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3, + RTY, p->rty); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3, + OOR_MODE, p->oor_mode); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3, + PN_CHECK_NEEDED, p->pn_check_needed); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3, + PN_SHALL_BE_EVEN, p->pn_even); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3, + PN_SHALL_BE_UNEVEN, p->pn_uneven); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3, + PN_HANDLING_ENABLE, p->pn_hand_enab); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3, + IGNORE_AMPDU_FLAG, p->ignore_ampdu); + + if (p->ba_window_size < 1) + p->ba_window_size = 1; + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4, + BA_WINDOW_SIZE, p->ba_window_size - 1); + + if (p->pn_size == 24) + p->pn_size = PN_SIZE_24; + else if (p->pn_size == 48) + p->pn_size = PN_SIZE_48; + else if (p->pn_size == 128) + p->pn_size = PN_SIZE_128; + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4, + PN_SIZE, p->pn_size); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4, + SVLD, p->svld); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4, + SSN, p->ssn); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4, + SEQ_2K_ERROR_DETECTED_FLAG, p->seq_2k_err_detect); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4, + PN_ERROR_DETECTED_FLAG, p->pn_err_detect); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_5, + PN_31_0, p->pn_31_0); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_6, + PN_63_32, p->pn_63_32); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_7, + PN_95_64, p->pn_95_64); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_8, + PN_127_96, p->pn_127_96); + + hal_srng_access_end(soc, reo_ring); + val = reo_desc[CMD_HEADER_DW_OFFSET]; + return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER, + val); +} +qdf_export_symbol(hal_reo_cmd_update_rx_queue); + +inline void hal_reo_queue_stats_status(uint32_t *reo_desc, + struct hal_reo_queue_status *st) +{ + uint32_t val; + + /* Offsets of descriptor fields defined in HW headers start + * from the field after TLV header */ + reo_desc += (sizeof(struct tlv_32_hdr) >> 2); + + /* header */ + HAL_REO_STATUS_GET_HEADER(reo_desc, REO_GET_QUEUE_STATS, st->header); + + /* SSN */ + val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_2, SSN)]; + st->ssn = HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_2, SSN, val); + + /* current index */ + val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_2, + CURRENT_INDEX)]; + st->curr_idx = + HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_2, + CURRENT_INDEX, val); + + /* PN bits */ + val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_3, + PN_31_0)]; + st->pn_31_0 = + HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_3, + PN_31_0, val); + + val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_4, + PN_63_32)]; + st->pn_63_32 = + HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_4, + PN_63_32, val); + + val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_5, + PN_95_64)]; + st->pn_95_64 = + HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_5, + PN_95_64, val); + + val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_6, + PN_127_96)]; + st->pn_127_96 = + HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_6, + PN_127_96, val); + + /* timestamps */ + val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_7, + LAST_RX_ENQUEUE_TIMESTAMP)]; + st->last_rx_enq_tstamp = + HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_7, + LAST_RX_ENQUEUE_TIMESTAMP, val); + + val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_8, + LAST_RX_DEQUEUE_TIMESTAMP)]; + st->last_rx_deq_tstamp = + HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_8, + LAST_RX_DEQUEUE_TIMESTAMP, val); + + /* rx bitmap */ + val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_9, + RX_BITMAP_31_0)]; + st->rx_bitmap_31_0 = + HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_9, + RX_BITMAP_31_0, val); + + val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_10, + RX_BITMAP_63_32)]; + st->rx_bitmap_63_32 = + HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_10, + RX_BITMAP_63_32, val); + + val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_11, + RX_BITMAP_95_64)]; + st->rx_bitmap_95_64 = + HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_11, + RX_BITMAP_95_64, val); + + val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_12, + RX_BITMAP_127_96)]; + st->rx_bitmap_127_96 = + HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_12, + RX_BITMAP_127_96, val); + + val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_13, + RX_BITMAP_159_128)]; + st->rx_bitmap_159_128 = + HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_13, + RX_BITMAP_159_128, val); + + val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_14, + RX_BITMAP_191_160)]; + st->rx_bitmap_191_160 = + HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_14, + RX_BITMAP_191_160, val); + + val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_15, + RX_BITMAP_223_192)]; + st->rx_bitmap_223_192 = + HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_15, + RX_BITMAP_223_192, val); + + val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_16, + RX_BITMAP_255_224)]; + st->rx_bitmap_255_224 = + HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_16, + RX_BITMAP_255_224, val); + + /* various counts */ + val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_17, + CURRENT_MPDU_COUNT)]; + st->curr_mpdu_cnt = + HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_17, + CURRENT_MPDU_COUNT, val); + + val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_17, + CURRENT_MSDU_COUNT)]; + st->curr_msdu_cnt = + HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_17, + CURRENT_MSDU_COUNT, val); + + val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_18, + TIMEOUT_COUNT)]; + st->fwd_timeout_cnt = + HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_18, + TIMEOUT_COUNT, val); + + val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_18, + FORWARD_DUE_TO_BAR_COUNT)]; + st->fwd_bar_cnt = + HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_18, + FORWARD_DUE_TO_BAR_COUNT, val); + + val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_18, + DUPLICATE_COUNT)]; + st->dup_cnt = + HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_18, + DUPLICATE_COUNT, val); + + val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_19, + FRAMES_IN_ORDER_COUNT)]; + st->frms_in_order_cnt = + HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_19, + FRAMES_IN_ORDER_COUNT, val); + + val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_19, + BAR_RECEIVED_COUNT)]; + st->bar_rcvd_cnt = + HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_19, + BAR_RECEIVED_COUNT, val); + + val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_20, + MPDU_FRAMES_PROCESSED_COUNT)]; + st->mpdu_frms_cnt = + HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_20, + MPDU_FRAMES_PROCESSED_COUNT, val); + + val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_21, + MSDU_FRAMES_PROCESSED_COUNT)]; + st->msdu_frms_cnt = + HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_21, + MSDU_FRAMES_PROCESSED_COUNT, val); + + val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_22, + TOTAL_PROCESSED_BYTE_COUNT)]; + st->total_cnt = + HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_22, + TOTAL_PROCESSED_BYTE_COUNT, val); + + val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_23, + LATE_RECEIVE_MPDU_COUNT)]; + st->late_recv_mpdu_cnt = + HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_23, + LATE_RECEIVE_MPDU_COUNT, val); + + val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_23, + WINDOW_JUMP_2K)]; + st->win_jump_2k = + HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_23, + WINDOW_JUMP_2K, val); + + val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_23, + HOLE_COUNT)]; + st->hole_cnt = + HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_23, + HOLE_COUNT, val); +} +qdf_export_symbol(hal_reo_queue_stats_status); + +inline void hal_reo_flush_queue_status(uint32_t *reo_desc, + struct hal_reo_flush_queue_status *st) +{ + uint32_t val; + + /* Offsets of descriptor fields defined in HW headers start + * from the field after TLV header */ + reo_desc += (sizeof(struct tlv_32_hdr) >> 2); + + /* header */ + HAL_REO_STATUS_GET_HEADER(reo_desc, REO_FLUSH_QUEUE, st->header); + + /* error bit */ + val = reo_desc[HAL_OFFSET(REO_FLUSH_QUEUE_STATUS_2, + ERROR_DETECTED)]; + st->error = HAL_GET_FIELD(REO_FLUSH_QUEUE_STATUS_2, ERROR_DETECTED, + val); +} +qdf_export_symbol(hal_reo_flush_queue_status); + +inline void hal_reo_flush_cache_status(uint32_t *reo_desc, struct hal_soc *soc, + struct hal_reo_flush_cache_status *st) +{ + uint32_t val; + + /* Offsets of descriptor fields defined in HW headers start + * from the field after TLV header */ + reo_desc += (sizeof(struct tlv_32_hdr) >> 2); + + /* header */ + HAL_REO_STATUS_GET_HEADER(reo_desc, REO_FLUSH_CACHE, st->header); + + /* error bit */ + val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2, + ERROR_DETECTED)]; + st->error = HAL_GET_FIELD(REO_FLUSH_QUEUE_STATUS_2, ERROR_DETECTED, + val); + + /* block error */ + val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2, + BLOCK_ERROR_DETAILS)]; + st->block_error = HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS_2, + BLOCK_ERROR_DETAILS, + val); + if (!st->block_error) + qdf_set_bit(soc->index, (unsigned long *)&soc->reo_res_bitmap); + + /* cache flush status */ + val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2, + CACHE_CONTROLLER_FLUSH_STATUS_HIT)]; + st->cache_flush_status = HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS_2, + CACHE_CONTROLLER_FLUSH_STATUS_HIT, + val); + + /* cache flush descriptor type */ + val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2, + CACHE_CONTROLLER_FLUSH_STATUS_DESC_TYPE)]; + st->cache_flush_status_desc_type = + HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS_2, + CACHE_CONTROLLER_FLUSH_STATUS_DESC_TYPE, + val); + + /* cache flush count */ + val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2, + CACHE_CONTROLLER_FLUSH_COUNT)]; + st->cache_flush_cnt = + HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS_2, + CACHE_CONTROLLER_FLUSH_COUNT, + val); + +} +qdf_export_symbol(hal_reo_flush_cache_status); + +inline void hal_reo_unblock_cache_status(uint32_t *reo_desc, + struct hal_soc *soc, + struct hal_reo_unblk_cache_status *st) +{ + uint32_t val; + + /* Offsets of descriptor fields defined in HW headers start + * from the field after TLV header */ + reo_desc += (sizeof(struct tlv_32_hdr) >> 2); + + /* header */ + HAL_REO_STATUS_GET_HEADER(reo_desc, REO_UNBLOCK_CACHE, st->header); + + /* error bit */ + val = reo_desc[HAL_OFFSET_DW(REO_UNBLOCK_CACHE_STATUS_2, + ERROR_DETECTED)]; + st->error = HAL_GET_FIELD(REO_UNBLOCK_CACHE_STATUS_2, + ERROR_DETECTED, + val); + + /* unblock type */ + val = reo_desc[HAL_OFFSET_DW(REO_UNBLOCK_CACHE_STATUS_2, + UNBLOCK_TYPE)]; + st->unblock_type = HAL_GET_FIELD(REO_UNBLOCK_CACHE_STATUS_2, + UNBLOCK_TYPE, + val); + + if (!st->error && (st->unblock_type == UNBLOCK_RES_INDEX)) + qdf_clear_bit(soc->index, + (unsigned long *)&soc->reo_res_bitmap); +} +qdf_export_symbol(hal_reo_unblock_cache_status); + +inline void hal_reo_flush_timeout_list_status( + uint32_t *reo_desc, + struct hal_reo_flush_timeout_list_status *st) + +{ + uint32_t val; + + /* Offsets of descriptor fields defined in HW headers start + * from the field after TLV header */ + reo_desc += (sizeof(struct tlv_32_hdr) >> 2); + + /* header */ + HAL_REO_STATUS_GET_HEADER(reo_desc, REO_FLUSH_TIMEOUT_LIST, st->header); + + /* error bit */ + val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_TIMEOUT_LIST_STATUS_2, + ERROR_DETECTED)]; + st->error = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS_2, + ERROR_DETECTED, + val); + + /* list empty */ + val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_TIMEOUT_LIST_STATUS_2, + TIMOUT_LIST_EMPTY)]; + st->list_empty = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS_2, + TIMOUT_LIST_EMPTY, + val); + + /* release descriptor count */ + val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_TIMEOUT_LIST_STATUS_3, + RELEASE_DESC_COUNT)]; + st->rel_desc_cnt = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS_3, + RELEASE_DESC_COUNT, + val); + + /* forward buf count */ + val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_TIMEOUT_LIST_STATUS_3, + FORWARD_BUF_COUNT)]; + st->fwd_buf_cnt = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS_3, + FORWARD_BUF_COUNT, + val); +} +qdf_export_symbol(hal_reo_flush_timeout_list_status); + +inline void hal_reo_desc_thres_reached_status( + uint32_t *reo_desc, + struct hal_reo_desc_thres_reached_status *st) +{ + uint32_t val; + + /* Offsets of descriptor fields defined in HW headers start + * from the field after TLV header */ + reo_desc += (sizeof(struct tlv_32_hdr) >> 2); + + /* header */ + HAL_REO_STATUS_GET_HEADER(reo_desc, + REO_DESCRIPTOR_THRESHOLD_REACHED, st->header); + + /* threshold index */ + val = reo_desc[HAL_OFFSET_DW( + REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_2, + THRESHOLD_INDEX)]; + st->thres_index = HAL_GET_FIELD( + REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_2, + THRESHOLD_INDEX, + val); + + /* link desc counters */ + val = reo_desc[HAL_OFFSET_DW( + REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_3, + LINK_DESCRIPTOR_COUNTER0)]; + st->link_desc_counter0 = HAL_GET_FIELD( + REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_3, + LINK_DESCRIPTOR_COUNTER0, + val); + + val = reo_desc[HAL_OFFSET_DW( + REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_4, + LINK_DESCRIPTOR_COUNTER1)]; + st->link_desc_counter1 = HAL_GET_FIELD( + REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_4, + LINK_DESCRIPTOR_COUNTER1, + val); + + val = reo_desc[HAL_OFFSET_DW( + REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_5, + LINK_DESCRIPTOR_COUNTER2)]; + st->link_desc_counter2 = HAL_GET_FIELD( + REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_5, + LINK_DESCRIPTOR_COUNTER2, + val); + + val = reo_desc[HAL_OFFSET_DW( + REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_6, + LINK_DESCRIPTOR_COUNTER_SUM)]; + st->link_desc_counter_sum = HAL_GET_FIELD( + REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_6, + LINK_DESCRIPTOR_COUNTER_SUM, + val); +} +qdf_export_symbol(hal_reo_desc_thres_reached_status); + +inline void hal_reo_rx_update_queue_status(uint32_t *reo_desc, + struct hal_reo_update_rx_queue_status *st) +{ + /* Offsets of descriptor fields defined in HW headers start + * from the field after TLV header */ + reo_desc += (sizeof(struct tlv_32_hdr) >> 2); + + /* header */ + HAL_REO_STATUS_GET_HEADER(reo_desc, + REO_UPDATE_RX_REO_QUEUE, st->header); +} +qdf_export_symbol(hal_reo_rx_update_queue_status); + +/** + * hal_reo_init_cmd_ring() - Initialize descriptors of REO command SRNG + * with command number + * @hal_soc: Handle to HAL SoC structure + * @hal_ring: Handle to HAL SRNG structure + * + * Return: none + */ +inline void hal_reo_init_cmd_ring(struct hal_soc *soc, void *hal_srng) +{ + int cmd_num; + uint32_t *desc_addr; + struct hal_srng_params srng_params; + uint32_t desc_size; + uint32_t num_desc; + + hal_get_srng_params(soc, hal_srng, &srng_params); + + desc_addr = (uint32_t *)(srng_params.ring_base_vaddr); + desc_addr += (sizeof(struct tlv_32_hdr) >> 2); + desc_size = hal_srng_get_entrysize(soc, REO_CMD) >> 2; + num_desc = srng_params.num_entries; + cmd_num = 1; + while (num_desc) { + /* Offsets of descriptor fields defined in HW headers start + * from the field after TLV header */ + HAL_DESC_SET_FIELD(desc_addr, UNIFORM_REO_CMD_HEADER_0, + REO_CMD_NUMBER, cmd_num); + desc_addr += desc_size; + num_desc--; cmd_num++; + } + + soc->reo_res_bitmap = 0; +} +qdf_export_symbol(hal_reo_init_cmd_ring); diff --git a/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/hal_reo.h b/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/hal_reo.h new file mode 100644 index 0000000000000000000000000000000000000000..fa672ec17a71f12ee8071757076fb4b5e92f3f52 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/hal_reo.h @@ -0,0 +1,547 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _HAL_REO_H_ +#define _HAL_REO_H_ + +#include +/* HW headers */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* SW headers */ +#include "hal_api.h" + +/*--------------------------------------------------------------------------- + Preprocessor definitions and constants + ---------------------------------------------------------------------------*/ + +/* TLV values */ +#define HAL_REO_GET_QUEUE_STATS_TLV WIFIREO_GET_QUEUE_STATS_E +#define HAL_REO_FLUSH_QUEUE_TLV WIFIREO_FLUSH_QUEUE_E +#define HAL_REO_FLUSH_CACHE_TLV WIFIREO_FLUSH_CACHE_E +#define HAL_REO_UNBLOCK_CACHE_TLV WIFIREO_UNBLOCK_CACHE_E +#define HAL_REO_FLUSH_TIMEOUT_LIST_TLV WIFIREO_FLUSH_TIMEOUT_LIST_E +#define HAL_REO_RX_UPDATE_QUEUE_TLV WIFIREO_UPDATE_RX_REO_QUEUE_E + +#define HAL_REO_QUEUE_STATS_STATUS_TLV WIFIREO_GET_QUEUE_STATS_STATUS_E +#define HAL_REO_FLUSH_QUEUE_STATUS_TLV WIFIREO_FLUSH_QUEUE_STATUS_E +#define HAL_REO_FLUSH_CACHE_STATUS_TLV WIFIREO_FLUSH_CACHE_STATUS_E +#define HAL_REO_UNBLK_CACHE_STATUS_TLV WIFIREO_UNBLOCK_CACHE_STATUS_E +#define HAL_REO_TIMOUT_LIST_STATUS_TLV WIFIREO_FLUSH_TIMEOUT_LIST_STATUS_E +#define HAL_REO_DESC_THRES_STATUS_TLV \ + WIFIREO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_E +#define HAL_REO_UPDATE_RX_QUEUE_STATUS_TLV WIFIREO_UPDATE_RX_REO_QUEUE_STATUS_E + +#define HAL_SET_FIELD(block, field, value) \ + ((value << (block ## _ ## field ## _LSB)) & \ + (block ## _ ## field ## _MASK)) + +#define HAL_GET_FIELD(block, field, value) \ + ((value & (block ## _ ## field ## _MASK)) >> \ + (block ## _ ## field ## _LSB)) + +#define HAL_SET_TLV_HDR(desc, tag, len) \ + do { \ + ((struct tlv_32_hdr *) desc)->tlv_tag = tag; \ + ((struct tlv_32_hdr *) desc)->tlv_len = len; \ + } while (0) + +#define HAL_GET_TLV(desc) (((struct tlv_32_hdr *) desc)->tlv_tag) + +#define HAL_OFFSET_DW(_block, _field) (HAL_OFFSET(_block, _field) >> 2) +/* dword offsets in REO cmd TLV */ +#define CMD_HEADER_DW_OFFSET 0 + +#define HAL_REO_STATUS_GET_HEADER(d, b, h) do { \ + uint32_t val1 = d[HAL_OFFSET_DW(b ##_STATUS_0, \ + UNIFORM_REO_STATUS_HEADER_STATUS_HEADER)]; \ + h.cmd_num = \ + HAL_GET_FIELD( \ + UNIFORM_REO_STATUS_HEADER_0, REO_STATUS_NUMBER, \ + val1); \ + h.exec_time = \ + HAL_GET_FIELD(UNIFORM_REO_STATUS_HEADER_0, \ + CMD_EXECUTION_TIME, val1); \ + h.status = \ + HAL_GET_FIELD(UNIFORM_REO_STATUS_HEADER_0, \ + REO_CMD_EXECUTION_STATUS, val1); \ + val1 = d[HAL_OFFSET_DW(b ##_STATUS_1, \ + UNIFORM_REO_STATUS_HEADER_STATUS_HEADER)]; \ + h.tstamp = \ + HAL_GET_FIELD(UNIFORM_REO_STATUS_HEADER_1, TIMESTAMP, val1); \ +} while (0) + +/** + * enum reo_unblock_cache_type: Enum for unblock type in REO unblock command + * @UNBLOCK_RES_INDEX: Unblock a block resource + * @UNBLOCK_CACHE: Unblock cache + */ +enum reo_unblock_cache_type { + UNBLOCK_RES_INDEX = 0, + UNBLOCK_CACHE = 1 +}; + +/** + * enum reo_thres_index_reg: Enum for reo descriptor usage counter for + * which threshold status is being indicated. + * @reo_desc_counter0_threshold: counter0 reached threshold + * @reo_desc_counter1_threshold: counter1 reached threshold + * @reo_desc_counter2_threshold: counter2 reached threshold + * @reo_desc_counter_sum_threshold: Total count reached threshold + */ +enum reo_thres_index_reg { + reo_desc_counter0_threshold = 0, + reo_desc_counter1_threshold = 1, + reo_desc_counter2_threshold = 2, + reo_desc_counter_sum_threshold = 3 +}; + +/** + * enum reo_cmd_exec_status: Enum for execution status of REO command + * + * @HAL_REO_CMD_SUCCESS: Command has successfully be executed + * @HAL_REO_CMD_BLOCKED: Command could not be executed as the queue or cache + * was blocked + * @HAL_REO_CMD_FAILED: Command has encountered problems when executing, like + * the queue descriptor not being valid + */ +enum reo_cmd_exec_status { + HAL_REO_CMD_SUCCESS = 0, + HAL_REO_CMD_BLOCKED = 1, + HAL_REO_CMD_FAILED = 2, + HAL_REO_CMD_RESOURCE_BLOCKED = 3, + HAL_REO_CMD_DRAIN = 0xff +}; + +/** + * enum hal_reo_cmd_type: Enum for REO command type + * @CMD_GET_QUEUE_STATS: Get REO queue status/stats + * @CMD_FLUSH_QUEUE: Flush all frames in REO queue + * @CMD_FLUSH_CACHE: Flush descriptor entries in the cache + * @CMD_UNBLOCK_CACHE: Unblock a descriptor’s address that was blocked + * earlier with a ‘REO_FLUSH_CACHE’ command + * @CMD_FLUSH_TIMEOUT_LIST: Flush buffers/descriptors from timeout list + * @CMD_UPDATE_RX_REO_QUEUE: Update REO queue settings + */ +enum hal_reo_cmd_type { + CMD_GET_QUEUE_STATS = 0, + CMD_FLUSH_QUEUE = 1, + CMD_FLUSH_CACHE = 2, + CMD_UNBLOCK_CACHE = 3, + CMD_FLUSH_TIMEOUT_LIST = 4, + CMD_UPDATE_RX_REO_QUEUE = 5 +}; + +/** + * struct hal_reo_cmd_params_std: Standard REO command parameters + * @need_status: Status required for the command + * @addr_lo: Lower 32 bits of REO queue descriptor address + * @addr_hi: Upper 8 bits of REO queue descriptor address + */ +struct hal_reo_cmd_params_std { + bool need_status; + uint32_t addr_lo; + uint8_t addr_hi; +}; + +/** + * struct hal_reo_cmd_get_queue_stats_params: Parameters to + * CMD_GET_QUEUE_STATScommand + * @clear: Clear stats after retreiving + */ +struct hal_reo_cmd_get_queue_stats_params { + bool clear; +}; + +/** + * struct hal_reo_cmd_flush_queue_params: Parameters to CMD_FLUSH_QUEUE + * @use_after_flush: Block usage after flush till unblock command + * @index: Blocking resource to be used + */ +struct hal_reo_cmd_flush_queue_params { + bool block_use_after_flush; + uint8_t index; +}; + +/** + * struct hal_reo_cmd_flush_cache_params: Parameters to CMD_FLUSH_CACHE + * @fwd_mpdus_in_queue: Forward MPDUs before flushing descriptor + * @rel_block_index: Release blocking resource used earlier + * @cache_block_res_index: Blocking resource to be used + * @flush_no_inval: Flush without invalidatig descriptor + * @use_after_flush: Block usage after flush till unblock command + * @flush_all: Flush entire REO cache + */ +struct hal_reo_cmd_flush_cache_params { + bool fwd_mpdus_in_queue; + bool rel_block_index; + uint8_t cache_block_res_index; + bool flush_no_inval; + bool block_use_after_flush; + bool flush_all; +}; + +/** + * struct hal_reo_cmd_unblock_cache_params: Parameters to CMD_UNBLOCK_CACHE + * @type: Unblock type (enum reo_unblock_cache_type) + * @index: Blocking index to be released + */ +struct hal_reo_cmd_unblock_cache_params { + enum reo_unblock_cache_type type; + uint8_t index; +}; + +/** + * struct hal_reo_cmd_flush_timeout_list_params: Parameters to + * CMD_FLUSH_TIMEOUT_LIST + * @ac_list: AC timeout list to be flushed + * @min_rel_desc: Min. number of link descriptors to be release + * @min_fwd_buf: Min. number of buffers to be forwarded + */ +struct hal_reo_cmd_flush_timeout_list_params { + uint8_t ac_list; + uint16_t min_rel_desc; + uint16_t min_fwd_buf; +}; + +/** + * struct hal_reo_cmd_update_queue_params: Parameters to CMD_UPDATE_RX_REO_QUEUE + * @update_rx_queue_num: Update receive queue number + * @update_vld: Update valid bit + * @update_assoc_link_desc: Update associated link descriptor + * @update_disable_dup_detect: Update duplicate detection + * @update_soft_reorder_enab: Update soft reorder enable + * @update_ac: Update access category + * @update_bar: Update BAR received bit + * @update_rty: Update retry bit + * @update_chk_2k_mode: Update chk_2k_mode setting + * @update_oor_mode: Update OOR mode setting + * @update_ba_window_size: Update BA window size + * @update_pn_check_needed: Update pn_check_needed + * @update_pn_even: Update pn_even + * @update_pn_uneven: Update pn_uneven + * @update_pn_hand_enab: Update pn_handling_enable + * @update_pn_size: Update pn_size + * @update_ignore_ampdu: Update ignore_ampdu + * @update_svld: update svld + * @update_ssn: Update SSN + * @update_seq_2k_err_detect: Update seq_2k_err_detected flag + * @update_pn_err_detect: Update pn_err_detected flag + * @update_pn_valid: Update pn_valid + * @update_pn: Update PN + * @rx_queue_num: rx_queue_num to be updated + * @vld: valid bit to be updated + * @assoc_link_desc: assoc_link_desc counter + * @disable_dup_detect: disable_dup_detect to be updated + * @soft_reorder_enab: soft_reorder_enab to be updated + * @ac: AC to be updated + * @bar: BAR flag to be updated + * @rty: RTY flag to be updated + * @chk_2k_mode: check_2k_mode setting to be updated + * @oor_mode: oor_mode to be updated + * @pn_check_needed: pn_check_needed to be updated + * @pn_even: pn_even to be updated + * @pn_uneven: pn_uneven to be updated + * @pn_hand_enab: pn_handling_enable to be updated + * @ignore_ampdu: ignore_ampdu to be updated + * @ba_window_size: BA window size to be updated + * @pn_size: pn_size to be updated + * @svld: svld flag to be updated + * @ssn: SSN to be updated + * @seq_2k_err_detect: seq_2k_err_detected flag to be updated + * @pn_err_detect: pn_err_detected flag to be updated + * @pn_31_0: PN bits 31-0 + * @pn_63_32: PN bits 63-32 + * @pn_95_64: PN bits 95-64 + * @pn_127_96: PN bits 127-96 + */ +struct hal_reo_cmd_update_queue_params { + uint32_t update_rx_queue_num:1, + update_vld:1, + update_assoc_link_desc:1, + update_disable_dup_detect:1, + update_soft_reorder_enab:1, + update_ac:1, + update_bar:1, + update_rty:1, + update_chk_2k_mode:1, + update_oor_mode:1, + update_ba_window_size:1, + update_pn_check_needed:1, + update_pn_even:1, + update_pn_uneven:1, + update_pn_hand_enab:1, + update_pn_size:1, + update_ignore_ampdu:1, + update_svld:1, + update_ssn:1, + update_seq_2k_err_detect:1, + update_pn_err_detect:1, + update_pn_valid:1, + update_pn:1; + uint32_t rx_queue_num:16, + vld:1, + assoc_link_desc:2, + disable_dup_detect:1, + soft_reorder_enab:1, + ac:2, + bar:1, + rty:1, + chk_2k_mode:1, + oor_mode:1, + pn_check_needed:1, + pn_even:1, + pn_uneven:1, + pn_hand_enab:1, + ignore_ampdu:1; + uint32_t ba_window_size:8, + pn_size:8, + svld:1, + ssn:12, + seq_2k_err_detect:1, + pn_err_detect:1; + uint32_t pn_31_0:32; + uint32_t pn_63_32:32; + uint32_t pn_95_64:32; + uint32_t pn_127_96:32; +}; + +/** + * struct hal_reo_cmd_params: Common structure to pass REO command parameters + * @hal_reo_cmd_params_std: Standard parameters + * @u: Union of various REO command parameters + */ +struct hal_reo_cmd_params { + struct hal_reo_cmd_params_std std; + union { + struct hal_reo_cmd_get_queue_stats_params stats_params; + struct hal_reo_cmd_flush_queue_params fl_queue_params; + struct hal_reo_cmd_flush_cache_params fl_cache_params; + struct hal_reo_cmd_unblock_cache_params unblk_cache_params; + struct hal_reo_cmd_flush_timeout_list_params fl_tim_list_params; + struct hal_reo_cmd_update_queue_params upd_queue_params; + } u; +}; + +/** + * struct hal_reo_status_header: Common REO status header + * @cmd_num: Command number + * @exec_time: execution time + * @status: command execution status + * @tstamp: Timestamp of status updated + */ +struct hal_reo_status_header { + uint16_t cmd_num; + uint16_t exec_time; + enum reo_cmd_exec_status status; + uint32_t tstamp; +}; + +/** + * struct hal_reo_queue_status: REO queue status structure + * @header: Common REO status header + * @ssn: SSN of current BA window + * @curr_idx: last forwarded pkt + * @pn_31_0, pn_63_32, pn_95_64, pn_127_96: + * PN number bits extracted from IV field + * @last_rx_enq_tstamp: Last enqueue timestamp + * @last_rx_deq_tstamp: Last dequeue timestamp + * @rx_bitmap_31_0, rx_bitmap_63_32, rx_bitmap_95_64 + * @rx_bitmap_127_96, rx_bitmap_159_128, rx_bitmap_191_160 + * @rx_bitmap_223_192, rx_bitmap_255_224: Each bit corresonds to a frame + * held in re-order queue + * @curr_mpdu_cnt, curr_msdu_cnt: Number of MPDUs and MSDUs in the queue + * @fwd_timeout_cnt: Frames forwarded due to timeout + * @fwd_bar_cnt: Frames forwarded BAR frame + * @dup_cnt: duplicate frames detected + * @frms_in_order_cnt: Frames received in order + * @bar_rcvd_cnt: BAR frame count + * @mpdu_frms_cnt, msdu_frms_cnt, total_cnt: MPDU, MSDU, total frames + processed by REO + * @late_recv_mpdu_cnt; received after window had moved on + * @win_jump_2k: 2K jump count + * @hole_cnt: sequence hole count + */ +struct hal_reo_queue_status { + struct hal_reo_status_header header; + uint16_t ssn; + uint8_t curr_idx; + uint32_t pn_31_0, pn_63_32, pn_95_64, pn_127_96; + uint32_t last_rx_enq_tstamp, last_rx_deq_tstamp; + uint32_t rx_bitmap_31_0, rx_bitmap_63_32, rx_bitmap_95_64; + uint32_t rx_bitmap_127_96, rx_bitmap_159_128, rx_bitmap_191_160; + uint32_t rx_bitmap_223_192, rx_bitmap_255_224; + uint8_t curr_mpdu_cnt, curr_msdu_cnt; + uint8_t fwd_timeout_cnt, fwd_bar_cnt; + uint16_t dup_cnt; + uint32_t frms_in_order_cnt; + uint8_t bar_rcvd_cnt; + uint32_t mpdu_frms_cnt, msdu_frms_cnt, total_cnt; + uint16_t late_recv_mpdu_cnt; + uint8_t win_jump_2k; + uint16_t hole_cnt; +}; + +/** + * struct hal_reo_flush_queue_status: FLUSH_QUEUE status structure + * @header: Common REO status header + * @error: Error detected + */ +struct hal_reo_flush_queue_status { + struct hal_reo_status_header header; + bool error; +}; + +/** + * struct hal_reo_flush_cache_status: FLUSH_CACHE status structure + * @header: Common REO status header + * @error: Error detected + * @block_error: Blocking related error + * @cache_flush_status: Cache hit/miss + * @cache_flush_status_desc_type: type of descriptor flushed + * @cache_flush_cnt: number of lines actually flushed + */ +struct hal_reo_flush_cache_status { + struct hal_reo_status_header header; + bool error; + uint8_t block_error; + bool cache_flush_status; + uint8_t cache_flush_status_desc_type; + uint8_t cache_flush_cnt; +}; + +/** + * struct hal_reo_unblk_cache_status: UNBLOCK_CACHE status structure + * @header: Common REO status header + * @error: error detected + * unblock_type: resoure or cache + */ +struct hal_reo_unblk_cache_status { + struct hal_reo_status_header header; + bool error; + enum reo_unblock_cache_type unblock_type; +}; + +/** + * struct hal_reo_flush_timeout_list_status: FLUSH_TIMEOUT_LIST status structure + * @header: Common REO status header + * @error: error detected + * @list_empty: timeout list empty + * @rel_desc_cnt: number of link descriptors released + * @fwd_buf_cnt: number of buffers forwarded to REO destination ring + */ +struct hal_reo_flush_timeout_list_status { + struct hal_reo_status_header header; + bool error; + bool list_empty; + uint16_t rel_desc_cnt; + uint16_t fwd_buf_cnt; +}; + +/** + * struct hal_reo_desc_thres_reached_status: desc_thres_reached status structure + * @header: Common REO status header + * @thres_index: Index of descriptor threshold counter + * @link_desc_counter0, link_desc_counter1, link_desc_counter2: descriptor + * counter values + * @link_desc_counter_sum: overall descriptor count + */ +struct hal_reo_desc_thres_reached_status { + struct hal_reo_status_header header; + enum reo_thres_index_reg thres_index; + uint32_t link_desc_counter0, link_desc_counter1, link_desc_counter2; + uint32_t link_desc_counter_sum; +}; + +/** + * struct hal_reo_update_rx_queue_status: UPDATE_RX_QUEUE status structure + * @header: Common REO status header + */ +struct hal_reo_update_rx_queue_status { + struct hal_reo_status_header header; +}; + +/** + * union hal_reo_status: Union to pass REO status to callbacks + * @queue_status: Refer to struct hal_reo_queue_status + * @fl_cache_status: Refer to struct hal_reo_flush_cache_status + * @fl_queue_status: Refer to struct hal_reo_flush_queue_status + * @fl_timeout_status: Refer to struct hal_reo_flush_timeout_list_status + * @unblk_cache_status: Refer to struct hal_reo_unblk_cache_status + * @thres_status: struct hal_reo_desc_thres_reached_status + * @rx_queue_status: struct hal_reo_update_rx_queue_status + */ +union hal_reo_status { + struct hal_reo_queue_status queue_status; + struct hal_reo_flush_cache_status fl_cache_status; + struct hal_reo_flush_queue_status fl_queue_status; + struct hal_reo_flush_timeout_list_status fl_timeout_status; + struct hal_reo_unblk_cache_status unblk_cache_status; + struct hal_reo_desc_thres_reached_status thres_status; + struct hal_reo_update_rx_queue_status rx_queue_status; +}; + +/* Prototypes */ +/* REO command ring routines */ +int hal_reo_cmd_queue_stats(void *reo_ring, struct hal_soc *soc, + struct hal_reo_cmd_params *cmd); +int hal_reo_cmd_flush_queue(void *reo_ring, struct hal_soc *soc, + struct hal_reo_cmd_params *cmd); +int hal_reo_cmd_flush_cache(void *reo_ring, struct hal_soc *soc, + struct hal_reo_cmd_params *cmd); +int hal_reo_cmd_unblock_cache(void *reo_ring, struct hal_soc *soc, + struct hal_reo_cmd_params *cmd); +int hal_reo_cmd_flush_timeout_list(void *reo_ring, struct hal_soc *soc, + struct hal_reo_cmd_params *cmd); +int hal_reo_cmd_update_rx_queue(void *reo_ring, struct hal_soc *soc, + struct hal_reo_cmd_params *cmd); + +/* REO status ring routines */ +void hal_reo_queue_stats_status(uint32_t *reo_desc, + struct hal_reo_queue_status *st); +void hal_reo_flush_queue_status(uint32_t *reo_desc, + struct hal_reo_flush_queue_status *st); +void hal_reo_flush_cache_status(uint32_t *reo_desc, struct hal_soc *soc, + struct hal_reo_flush_cache_status *st); +void hal_reo_unblock_cache_status(uint32_t *reo_desc, struct hal_soc *soc, + struct hal_reo_unblk_cache_status *st); +void hal_reo_flush_timeout_list_status( + uint32_t *reo_desc, + struct hal_reo_flush_timeout_list_status *st); +void hal_reo_desc_thres_reached_status( + uint32_t *reo_desc, + struct hal_reo_desc_thres_reached_status *st); +void hal_reo_rx_update_queue_status(uint32_t *reo_desc, + struct hal_reo_update_rx_queue_status *st); + +void hal_reo_init_cmd_ring(struct hal_soc *soc, void *hal_srng); + +#endif /* _HAL_REO_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/hal_rx.c b/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/hal_rx.c new file mode 100644 index 0000000000000000000000000000000000000000..44cac9ea9244926d25d1468c7dcfd550f571bb6b --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/hal_rx.c @@ -0,0 +1,326 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "hal_api.h" +#include "qdf_module.h" + +/* TODO: See if the following definition is available in HW headers */ +#define HAL_REO_OWNED 4 +#define HAL_REO_QUEUE_DESC 8 +#define HAL_REO_QUEUE_EXT_DESC 9 + +/* TODO: Using associated link desc counter 1 for Rx. Check with FW on + * how these counters are assigned + */ +#define HAL_RX_LINK_DESC_CNTR 1 +/* TODO: Following definition should be from HW headers */ +#define HAL_DESC_REO_OWNED 4 + +/* TODO: Move this to common header file */ +static inline void hal_uniform_desc_hdr_setup(uint32_t *desc, uint32_t owner, + uint32_t buffer_type) +{ + HAL_DESC_SET_FIELD(desc, UNIFORM_DESCRIPTOR_HEADER_0, OWNER, + owner); + HAL_DESC_SET_FIELD(desc, UNIFORM_DESCRIPTOR_HEADER_0, BUFFER_TYPE, + buffer_type); +} + +#ifndef TID_TO_WME_AC +#define WME_AC_BE 0 /* best effort */ +#define WME_AC_BK 1 /* background */ +#define WME_AC_VI 2 /* video */ +#define WME_AC_VO 3 /* voice */ + +#define TID_TO_WME_AC(_tid) ( \ + (((_tid) == 0) || ((_tid) == 3)) ? WME_AC_BE : \ + (((_tid) == 1) || ((_tid) == 2)) ? WME_AC_BK : \ + (((_tid) == 4) || ((_tid) == 5)) ? WME_AC_VI : \ + WME_AC_VO) +#endif +#define HAL_NON_QOS_TID 16 + +/** + * hal_reo_qdesc_setup - Setup HW REO queue descriptor + * + * @hal_soc: Opaque HAL SOC handle + * @ba_window_size: BlockAck window size + * @start_seq: Starting sequence number + * @hw_qdesc_vaddr: Virtual address of REO queue descriptor memory + * @hw_qdesc_paddr: Physical address of REO queue descriptor memory + * @tid: TID + * + */ +void hal_reo_qdesc_setup(void *hal_soc, int tid, uint32_t ba_window_size, + uint32_t start_seq, void *hw_qdesc_vaddr, qdf_dma_addr_t hw_qdesc_paddr, + int pn_type) +{ + uint32_t *reo_queue_desc = (uint32_t *)hw_qdesc_vaddr; + uint32_t *reo_queue_ext_desc; + uint32_t reg_val; + uint32_t pn_enable; + uint32_t pn_size = 0; + + qdf_mem_zero(hw_qdesc_vaddr, sizeof(struct rx_reo_queue)); + + hal_uniform_desc_hdr_setup(reo_queue_desc, HAL_DESC_REO_OWNED, + HAL_REO_QUEUE_DESC); + /* Fixed pattern in reserved bits for debugging */ + HAL_DESC_SET_FIELD(reo_queue_desc, UNIFORM_DESCRIPTOR_HEADER_0, + RESERVED_0A, 0xDDBEEF); + + /* This a just a SW meta data and will be copied to REO destination + * descriptors indicated by hardware. + * TODO: Setting TID in this field. See if we should set something else. + */ + HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_1, + RECEIVE_QUEUE_NUMBER, tid); + HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, + VLD, 1); + HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, + ASSOCIATED_LINK_DESCRIPTOR_COUNTER, HAL_RX_LINK_DESC_CNTR); + + /* + * Fields DISABLE_DUPLICATE_DETECTION and SOFT_REORDER_ENABLE will be 0 + */ + + reg_val = TID_TO_WME_AC(tid); + HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, AC, reg_val); + + if (ba_window_size < 1) + ba_window_size = 1; + + /* Set RTY bit for non-BA case. Duplicate detection is currently not + * done by HW in non-BA case if RTY bit is not set. + * TODO: This is a temporary War and should be removed once HW fix is + * made to check and discard duplicates even if RTY bit is not set. + */ + if (ba_window_size == 1) + HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, RTY, 1); + + HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, BA_WINDOW_SIZE, + ba_window_size - 1); + + switch (pn_type) { + case HAL_PN_WPA: + pn_enable = 1; + pn_size = PN_SIZE_48; + case HAL_PN_WAPI_EVEN: + case HAL_PN_WAPI_UNEVEN: + pn_enable = 1; + pn_size = PN_SIZE_128; + default: + pn_enable = 0; + } + + HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, PN_CHECK_NEEDED, + pn_enable); + + if (pn_type == HAL_PN_WAPI_EVEN) + HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, + PN_SHALL_BE_EVEN, 1); + else if (pn_type == HAL_PN_WAPI_UNEVEN) + HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, + PN_SHALL_BE_UNEVEN, 1); + + HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, PN_HANDLING_ENABLE, + pn_enable); + + HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, PN_SIZE, + pn_size); + + /* TODO: Check if RX_REO_QUEUE_2_IGNORE_AMPDU_FLAG need to be set + * based on BA window size and/or AMPDU capabilities + */ + HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, + IGNORE_AMPDU_FLAG, 1); + + if (start_seq <= 0xfff) + HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_3, SSN, + start_seq); + + /* TODO: SVLD should be set to 1 if a valid SSN is received in ADDBA, + * but REO is not delivering packets if we set it to 1. Need to enable + * this once the issue is resolved */ + HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_3, SVLD, 0); + + /* TODO: Check if we should set start PN for WAPI */ + +#ifdef notyet + /* Setup first queue extension if BA window size is more than 1 */ + if (ba_window_size > 1) { + reo_queue_ext_desc = + (uint32_t *)(((struct rx_reo_queue *)reo_queue_desc) + + 1); + qdf_mem_zero(reo_queue_ext_desc, + sizeof(struct rx_reo_queue_ext)); + hal_uniform_desc_hdr_setup(reo_queue_ext_desc, + HAL_DESC_REO_OWNED, HAL_REO_QUEUE_EXT_DESC); + } + /* Setup second queue extension if BA window size is more than 105 */ + if (ba_window_size > 105) { + reo_queue_ext_desc = (uint32_t *) + (((struct rx_reo_queue_ext *)reo_queue_ext_desc) + 1); + qdf_mem_zero(reo_queue_ext_desc, + sizeof(struct rx_reo_queue_ext)); + hal_uniform_desc_hdr_setup(reo_queue_ext_desc, + HAL_DESC_REO_OWNED, HAL_REO_QUEUE_EXT_DESC); + } + /* Setup third queue extension if BA window size is more than 210 */ + if (ba_window_size > 210) { + reo_queue_ext_desc = (uint32_t *) + (((struct rx_reo_queue_ext *)reo_queue_ext_desc) + 1); + qdf_mem_zero(reo_queue_ext_desc, + sizeof(struct rx_reo_queue_ext)); + hal_uniform_desc_hdr_setup(reo_queue_ext_desc, + HAL_DESC_REO_OWNED, HAL_REO_QUEUE_EXT_DESC); + } +#else + /* TODO: HW queue descriptors are currently allocated for max BA + * window size for all QOS TIDs so that same descriptor can be used + * later when ADDBA request is recevied. This should be changed to + * allocate HW queue descriptors based on BA window size being + * negotiated (0 for non BA cases), and reallocate when BA window + * size changes and also send WMI message to FW to change the REO + * queue descriptor in Rx peer entry as part of dp_rx_tid_update. + */ + if (tid != HAL_NON_QOS_TID) { + reo_queue_ext_desc = (uint32_t *) + (((struct rx_reo_queue *)reo_queue_desc) + 1); + qdf_mem_zero(reo_queue_ext_desc, 3 * + sizeof(struct rx_reo_queue_ext)); + /* Initialize first reo queue extension descriptor */ + hal_uniform_desc_hdr_setup(reo_queue_ext_desc, + HAL_DESC_REO_OWNED, HAL_REO_QUEUE_EXT_DESC); + /* Fixed pattern in reserved bits for debugging */ + HAL_DESC_SET_FIELD(reo_queue_ext_desc, + UNIFORM_DESCRIPTOR_HEADER_0, RESERVED_0A, 0xADBEEF); + /* Initialize second reo queue extension descriptor */ + reo_queue_ext_desc = (uint32_t *) + (((struct rx_reo_queue_ext *)reo_queue_ext_desc) + 1); + hal_uniform_desc_hdr_setup(reo_queue_ext_desc, + HAL_DESC_REO_OWNED, HAL_REO_QUEUE_EXT_DESC); + /* Fixed pattern in reserved bits for debugging */ + HAL_DESC_SET_FIELD(reo_queue_ext_desc, + UNIFORM_DESCRIPTOR_HEADER_0, RESERVED_0A, 0xBDBEEF); + /* Initialize third reo queue extension descriptor */ + reo_queue_ext_desc = (uint32_t *) + (((struct rx_reo_queue_ext *)reo_queue_ext_desc) + 1); + hal_uniform_desc_hdr_setup(reo_queue_ext_desc, + HAL_DESC_REO_OWNED, HAL_REO_QUEUE_EXT_DESC); + /* Fixed pattern in reserved bits for debugging */ + HAL_DESC_SET_FIELD(reo_queue_ext_desc, + UNIFORM_DESCRIPTOR_HEADER_0, RESERVED_0A, 0xCDBEEF); + } +#endif +} +qdf_export_symbol(hal_reo_qdesc_setup); + + +/** + * hal_reo_setup - Initialize HW REO block + * + * @hal_soc: Opaque HAL SOC handle + * @reo_params: parameters needed by HAL for REO config + */ +void hal_reo_setup(void *hal_soc, + struct hal_reo_params *reo_params) +{ + struct hal_soc *soc = (struct hal_soc *)hal_soc; + + HAL_REG_WRITE(soc, HWIO_REO_R0_GENERAL_ENABLE_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET), + HAL_SM(HWIO_REO_R0_GENERAL_ENABLE, + FRAGMENT_DEST_RING, reo_params->frag_dst_ring) | + HAL_SM(HWIO_REO_R0_GENERAL_ENABLE, AGING_LIST_ENABLE, 1) | + HAL_SM(HWIO_REO_R0_GENERAL_ENABLE, AGING_FLUSH_ENABLE, 1)); + /* Other ring enable bits and REO_ENABLE will be set by FW */ + + /* TODO: Setup destination ring mapping if enabled */ + + /* TODO: Error destination ring setting is left to default. + * Default setting is to send all errors to release ring. + */ + + HAL_REG_WRITE(soc, + HWIO_REO_R0_AGING_THRESHOLD_IX_0_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET), + HAL_DEFAULT_REO_TIMEOUT_MS * 1000); + + HAL_REG_WRITE(soc, + HWIO_REO_R0_AGING_THRESHOLD_IX_1_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET), + (HAL_DEFAULT_REO_TIMEOUT_MS * 1000)); + + HAL_REG_WRITE(soc, + HWIO_REO_R0_AGING_THRESHOLD_IX_2_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET), + (HAL_DEFAULT_REO_TIMEOUT_MS * 1000)); + + HAL_REG_WRITE(soc, + HWIO_REO_R0_AGING_THRESHOLD_IX_3_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET), + (HAL_DEFAULT_REO_TIMEOUT_MS * 1000)); + + /* + * When hash based routing is enabled, routing of the rx packet + * is done based on the following value: 1 _ _ _ _ The last 4 + * bits are based on hash[3:0]. This means the possible values + * are 0x10 to 0x1f. This value is used to look-up the + * ring ID configured in Destination_Ring_Ctrl_IX_* register. + * The Destination_Ring_Ctrl_IX_2 and Destination_Ring_Ctrl_IX_3 + * registers need to be configured to set-up the 16 entries to + * map the hash values to a ring number. There are 3 bits per + * hash entry – which are mapped as follows: + * 0: TCL, 1:SW1, 2:SW2, * 3:SW3, 4:SW4, 5:Release, 6:FW(WIFI), + * 7: NOT_USED. + */ + if (reo_params->rx_hash_enabled) { + HAL_REG_WRITE(soc, + HWIO_REO_R0_DESTINATION_RING_CTRL_IX_2_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET), + reo_params->remap1); + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("HWIO_REO_R0_DESTINATION_RING_CTRL_IX_2_ADDR 0x%x\n"), + HAL_REG_READ(soc, + HWIO_REO_R0_DESTINATION_RING_CTRL_IX_2_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET))); + + HAL_REG_WRITE(soc, + HWIO_REO_R0_DESTINATION_RING_CTRL_IX_3_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET), + reo_params->remap2); + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("HWIO_REO_R0_DESTINATION_RING_CTRL_IX_3_ADDR 0x%x\n"), + HAL_REG_READ(soc, + HWIO_REO_R0_DESTINATION_RING_CTRL_IX_3_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET))); + } + + + /* TODO: Check if the following registers shoould be setup by host: + * AGING_CONTROL + * HIGH_MEMORY_THRESHOLD + * GLOBAL_LINK_DESC_COUNT_THRESH_IX_0[1,2] + * GLOBAL_LINK_DESC_COUNT_CTRL + */ +} +qdf_export_symbol(hal_reo_setup); + diff --git a/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/hal_srng.c b/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/hal_srng.c new file mode 100644 index 0000000000000000000000000000000000000000..fb2b5116d50979b9316973e9eb055b1069a9923c --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/hal_srng.c @@ -0,0 +1,1394 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of The Linux Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "hal_api.h" +#include "target_type.h" +#include "wcss_version.h" +#include "qdf_module.h" + +/** + * Common SRNG register access macros: + * The SRNG registers are distributed across various UMAC and LMAC HW blocks, + * but the register group and format is exactly same for all rings, with some + * difference between producer rings (these are 'producer rings' with respect + * to HW and referred as 'destination rings' in SW) and consumer rings (these + * are 'consumer rings' with respect to HW and referred as 'source rings' in SW). + * The following macros provide uniform access to all SRNG rings. + */ + +/* SRNG registers are split among two groups R0 and R2 and following + * definitions identify the group to which each register belongs to + */ +#define R0_INDEX 0 +#define R2_INDEX 1 + +#define HWREG_INDEX(_reg_group) _reg_group ## _ ## INDEX + +/* Registers in R0 group */ +#define BASE_LSB_GROUP R0 +#define BASE_MSB_GROUP R0 +#define ID_GROUP R0 +#define STATUS_GROUP R0 +#define MISC_GROUP R0 +#define HP_ADDR_LSB_GROUP R0 +#define HP_ADDR_MSB_GROUP R0 +#define PRODUCER_INT_SETUP_GROUP R0 +#define PRODUCER_INT_STATUS_GROUP R0 +#define PRODUCER_FULL_COUNTER_GROUP R0 +#define MSI1_BASE_LSB_GROUP R0 +#define MSI1_BASE_MSB_GROUP R0 +#define MSI1_DATA_GROUP R0 +#define HP_TP_SW_OFFSET_GROUP R0 +#define TP_ADDR_LSB_GROUP R0 +#define TP_ADDR_MSB_GROUP R0 +#define CONSUMER_INT_SETUP_IX0_GROUP R0 +#define CONSUMER_INT_SETUP_IX1_GROUP R0 +#define CONSUMER_INT_STATUS_GROUP R0 +#define CONSUMER_EMPTY_COUNTER_GROUP R0 +#define CONSUMER_PREFETCH_TIMER_GROUP R0 +#define CONSUMER_PREFETCH_STATUS_GROUP R0 + +/* Registers in R2 group */ +#define HP_GROUP R2 +#define TP_GROUP R2 + +/** + * Register definitions for all SRNG based rings are same, except few + * differences between source (HW consumer) and destination (HW producer) + * registers. Following macros definitions provide generic access to all + * SRNG based rings. + * For source rings, we will use the register/field definitions of SW2TCL1 + * ring defined in the HW header file mac_tcl_reg_seq_hwioreg.h. To setup + * individual fields, SRNG_SM macros should be used with fields specified + * using SRNG_SRC_FLD(, ), Register writes should be done + * using SRNG_SRC_REG_WRITE(, , ). + * Similarly for destination rings we will use definitions of REO2SW1 ring + * defined in the register reo_destination_ring.h. To setup individual + * fields SRNG_SM macros should be used with fields specified using + * SRNG_DST_FLD(, ). Register writes should be done using + * SRNG_DST_REG_WRITE(, , ). + */ + +#define SRNG_DST_REG_OFFSET(_reg, _reg_group) \ + HWIO_REO_ ## _reg_group ## _REO2SW1_RING_ ## _reg##_ADDR(0) + +#define SRNG_SRC_REG_OFFSET(_reg, _reg_group) \ + HWIO_TCL_ ## _reg_group ## _SW2TCL1_RING_ ## _reg ## _ADDR(0) + +#define _SRNG_DST_FLD(_reg_group, _reg_fld) \ + HWIO_REO_ ## _reg_group ## _REO2SW1_RING_ ## _reg_fld +#define _SRNG_SRC_FLD(_reg_group, _reg_fld) \ + HWIO_TCL_ ## _reg_group ## _SW2TCL1_RING_ ## _reg_fld + +#define _SRNG_FLD(_reg_group, _reg_fld, _dir) \ + _SRNG_ ## _dir ## _FLD(_reg_group, _reg_fld) + +#define SRNG_DST_FLD(_reg, _f) _SRNG_FLD(_reg ## _GROUP, _reg ## _ ## _f, DST) +#define SRNG_SRC_FLD(_reg, _f) _SRNG_FLD(_reg ## _GROUP, _reg ## _ ## _f, SRC) + +#define SRNG_SRC_R0_START_OFFSET SRNG_SRC_REG_OFFSET(BASE_LSB, R0) +#define SRNG_DST_R0_START_OFFSET SRNG_DST_REG_OFFSET(BASE_LSB, R0) + +#define SRNG_SRC_R2_START_OFFSET SRNG_SRC_REG_OFFSET(HP, R2) +#define SRNG_DST_R2_START_OFFSET SRNG_DST_REG_OFFSET(HP, R2) + +#define SRNG_SRC_START_OFFSET(_reg_group) \ + SRNG_SRC_ ## _reg_group ## _START_OFFSET +#define SRNG_DST_START_OFFSET(_reg_group) \ + SRNG_DST_ ## _reg_group ## _START_OFFSET + +#define SRNG_REG_ADDR(_srng, _reg, _reg_group, _dir) \ + ((_srng)->hwreg_base[HWREG_INDEX(_reg_group)] + \ + SRNG_ ## _dir ## _REG_OFFSET(_reg, _reg_group) - \ + SRNG_ ## _dir ## _START_OFFSET(_reg_group)) + +#define SRNG_DST_ADDR(_srng, _reg) \ + SRNG_REG_ADDR(_srng, _reg, _reg ## _GROUP, DST) + +#define SRNG_SRC_ADDR(_srng, _reg) \ + SRNG_REG_ADDR(_srng, _reg, _reg ## _GROUP, SRC) + +#define SRNG_REG_WRITE(_srng, _reg, _value, _dir) \ + hal_write_address_32_mb(_srng->hal_soc, SRNG_ ## _dir ## _ADDR(_srng, _reg), (_value)) + +#define SRNG_REG_READ(_srng, _reg, _dir) \ + hal_read_address_32_mb(_srng->hal_soc, SRNG_ ## _dir ## _ADDR(_srng, _reg)) + +#define SRNG_SRC_REG_WRITE(_srng, _reg, _value) \ + SRNG_REG_WRITE(_srng, _reg, _value, SRC) + +#define SRNG_DST_REG_WRITE(_srng, _reg, _value) \ + SRNG_REG_WRITE(_srng, _reg, _value, DST) + +#define SRNG_SRC_REG_READ(_srng, _reg) \ + SRNG_REG_READ(_srng, _reg, SRC) + +#define _SRNG_FM(_reg_fld) _reg_fld ## _BMSK +#define _SRNG_FS(_reg_fld) _reg_fld ## _SHFT + +#define SRNG_SM(_reg_fld, _val) \ + (((_val) << _SRNG_FS(_reg_fld)) & _SRNG_FM(_reg_fld)) + +#define SRNG_MS(_reg_fld, _val) \ + (((_val) & _SRNG_FM(_reg_fld)) >> _SRNG_FS(_reg_fld)) + +#define SRNG_MAX_SIZE_DWORDS \ + (SRNG_MS(SRNG_SRC_FLD(BASE_MSB, RING_SIZE), 0xffffffff)) + +/** + * HW ring configuration table to identify hardware ring attributes like + * register addresses, number of rings, ring entry size etc., for each type + * of SRNG ring. + * + * Currently there is just one HW ring table, but there could be multiple + * configurations in future based on HW variants from the same wifi3.0 family + * and hence need to be attached with hal_soc based on HW type + */ +#define HAL_SRNG_CONFIG(_hal_soc, _ring_type) (&hw_srng_table[_ring_type]) +static struct hal_hw_srng_config hw_srng_table[] = { + /* TODO: max_rings can populated by querying HW capabilities */ + { /* REO_DST */ + .start_ring_id = HAL_SRNG_REO2SW1, + .max_rings = 4, + .entry_size = sizeof(struct reo_destination_ring) >> 2, + .lmac_ring = FALSE, + .ring_dir = HAL_SRNG_DST_RING, + .reg_start = { + HWIO_REO_R0_REO2SW1_RING_BASE_LSB_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET), + HWIO_REO_R2_REO2SW1_RING_HP_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET) + }, + .reg_size = { + HWIO_REO_R0_REO2SW2_RING_BASE_LSB_ADDR(0) - + HWIO_REO_R0_REO2SW1_RING_BASE_LSB_ADDR(0), + HWIO_REO_R2_REO2SW2_RING_HP_ADDR(0) - + HWIO_REO_R2_REO2SW1_RING_HP_ADDR(0), + }, + }, + { /* REO_EXCEPTION */ + /* Designating REO2TCL ring as exception ring. This ring is + * similar to other REO2SW rings though it is named as REO2TCL. + * Any of theREO2SW rings can be used as exception ring. + */ + .start_ring_id = HAL_SRNG_REO2TCL, + .max_rings = 1, + .entry_size = sizeof(struct reo_destination_ring) >> 2, + .lmac_ring = FALSE, + .ring_dir = HAL_SRNG_DST_RING, + .reg_start = { + HWIO_REO_R0_REO2TCL_RING_BASE_LSB_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET), + HWIO_REO_R2_REO2TCL_RING_HP_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET) + }, + /* Single ring - provide ring size if multiple rings of this + * type are supported */ + .reg_size = {}, + }, + { /* REO_REINJECT */ + .start_ring_id = HAL_SRNG_SW2REO, + .max_rings = 1, + .entry_size = sizeof(struct reo_entrance_ring) >> 2, + .lmac_ring = FALSE, + .ring_dir = HAL_SRNG_SRC_RING, + .reg_start = { + HWIO_REO_R0_SW2REO_RING_BASE_LSB_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET), + HWIO_REO_R2_SW2REO_RING_HP_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET) + }, + /* Single ring - provide ring size if multiple rings of this + * type are supported */ + .reg_size = {}, + }, + { /* REO_CMD */ + .start_ring_id = HAL_SRNG_REO_CMD, + .max_rings = 1, + .entry_size = (sizeof(struct tlv_32_hdr) + + sizeof(struct reo_get_queue_stats)) >> 2, + .lmac_ring = FALSE, + .ring_dir = HAL_SRNG_SRC_RING, + .reg_start = { + HWIO_REO_R0_REO_CMD_RING_BASE_LSB_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET), + HWIO_REO_R2_REO_CMD_RING_HP_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET), + }, + /* Single ring - provide ring size if multiple rings of this + * type are supported */ + .reg_size = {}, + }, + { /* REO_STATUS */ + .start_ring_id = HAL_SRNG_REO_STATUS, + .max_rings = 1, + .entry_size = (sizeof(struct tlv_32_hdr) + + sizeof(struct reo_get_queue_stats_status)) >> 2, + .lmac_ring = FALSE, + .ring_dir = HAL_SRNG_DST_RING, + .reg_start = { + HWIO_REO_R0_REO_STATUS_RING_BASE_LSB_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET), + HWIO_REO_R2_REO_STATUS_RING_HP_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET), + }, + /* Single ring - provide ring size if multiple rings of this + * type are supported */ + .reg_size = {}, + }, + { /* TCL_DATA */ + .start_ring_id = HAL_SRNG_SW2TCL1, + .max_rings = 3, + .entry_size = (sizeof(struct tlv_32_hdr) + + sizeof(struct tcl_data_cmd)) >> 2, + .lmac_ring = FALSE, + .ring_dir = HAL_SRNG_SRC_RING, + .reg_start = { + HWIO_TCL_R0_SW2TCL1_RING_BASE_LSB_ADDR( + SEQ_WCSS_UMAC_MAC_TCL_REG_OFFSET), + HWIO_TCL_R2_SW2TCL1_RING_HP_ADDR( + SEQ_WCSS_UMAC_MAC_TCL_REG_OFFSET), + }, + .reg_size = { + HWIO_TCL_R0_SW2TCL2_RING_BASE_LSB_ADDR(0) - + HWIO_TCL_R0_SW2TCL1_RING_BASE_LSB_ADDR(0), + HWIO_TCL_R2_SW2TCL2_RING_HP_ADDR(0) - + HWIO_TCL_R2_SW2TCL1_RING_HP_ADDR(0), + }, + }, + { /* TCL_CMD */ + .start_ring_id = HAL_SRNG_SW2TCL_CMD, + .max_rings = 1, + .entry_size = (sizeof(struct tlv_32_hdr) + + sizeof(struct tcl_gse_cmd)) >> 2, + .lmac_ring = FALSE, + .ring_dir = HAL_SRNG_SRC_RING, + .reg_start = { + HWIO_TCL_R0_SW2TCL_CMD_RING_BASE_LSB_ADDR( + SEQ_WCSS_UMAC_MAC_TCL_REG_OFFSET), + HWIO_TCL_R2_SW2TCL_CMD_RING_HP_ADDR( + SEQ_WCSS_UMAC_MAC_TCL_REG_OFFSET), + }, + /* Single ring - provide ring size if multiple rings of this + * type are supported */ + .reg_size = {}, + }, + { /* TCL_STATUS */ + .start_ring_id = HAL_SRNG_TCL_STATUS, + .max_rings = 1, + .entry_size = (sizeof(struct tlv_32_hdr) + + sizeof(struct tcl_status_ring)) >> 2, + .lmac_ring = FALSE, + .ring_dir = HAL_SRNG_DST_RING, + .reg_start = { + HWIO_TCL_R0_TCL_STATUS1_RING_BASE_LSB_ADDR( + SEQ_WCSS_UMAC_MAC_TCL_REG_OFFSET), + HWIO_TCL_R2_TCL_STATUS1_RING_HP_ADDR( + SEQ_WCSS_UMAC_MAC_TCL_REG_OFFSET), + }, + /* Single ring - provide ring size if multiple rings of this + * type are supported */ + .reg_size = {}, + }, + { /* CE_SRC */ + .start_ring_id = HAL_SRNG_CE_0_SRC, + .max_rings = 12, + .entry_size = sizeof(struct ce_src_desc) >> 2, + .lmac_ring = FALSE, + .ring_dir = HAL_SRNG_SRC_RING, + .reg_start = { + HWIO_WFSS_CE_CHANNEL_DST_R0_DEST_RING_BASE_LSB_ADDR( + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_SRC_REG_OFFSET), + HWIO_WFSS_CE_CHANNEL_DST_R2_DEST_RING_HP_ADDR( + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_SRC_REG_OFFSET), + }, + .reg_size = { + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_1_CHANNEL_SRC_REG_OFFSET - + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_SRC_REG_OFFSET, + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_1_CHANNEL_SRC_REG_OFFSET - + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_SRC_REG_OFFSET, + }, + }, + { /* CE_DST */ + .start_ring_id = HAL_SRNG_CE_0_DST, + .max_rings = 12, + .entry_size = 8 >> 2, + /*TODO: entry_size above should actually be + * sizeof(struct ce_dst_desc) >> 2, but couldn't find definition + * of struct ce_dst_desc in HW header files + */ + .lmac_ring = FALSE, + .ring_dir = HAL_SRNG_SRC_RING, + .reg_start = { + HWIO_WFSS_CE_CHANNEL_DST_R0_DEST_RING_BASE_LSB_ADDR( + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_DST_REG_OFFSET), + HWIO_WFSS_CE_CHANNEL_DST_R2_DEST_RING_HP_ADDR( + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_DST_REG_OFFSET), + }, + .reg_size = { + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_1_CHANNEL_DST_REG_OFFSET - + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_DST_REG_OFFSET, + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_1_CHANNEL_DST_REG_OFFSET - + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_DST_REG_OFFSET, + }, + }, + { /* CE_DST_STATUS */ + .start_ring_id = HAL_SRNG_CE_0_DST_STATUS, + .max_rings = 12, + .entry_size = sizeof(struct ce_stat_desc) >> 2, + .lmac_ring = FALSE, + .ring_dir = HAL_SRNG_DST_RING, + .reg_start = { + HWIO_WFSS_CE_CHANNEL_DST_R0_STATUS_RING_BASE_LSB_ADDR( + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_DST_REG_OFFSET), + HWIO_WFSS_CE_CHANNEL_DST_R2_STATUS_RING_HP_ADDR( + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_DST_REG_OFFSET), + }, + /* TODO: check destination status ring registers */ + .reg_size = { + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_1_CHANNEL_DST_REG_OFFSET - + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_DST_REG_OFFSET, + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_1_CHANNEL_DST_REG_OFFSET - + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_DST_REG_OFFSET, + }, + }, + { /* WBM_IDLE_LINK */ + .start_ring_id = HAL_SRNG_WBM_IDLE_LINK, + .max_rings = 1, + .entry_size = sizeof(struct wbm_link_descriptor_ring) >> 2, + .lmac_ring = FALSE, + .ring_dir = HAL_SRNG_SRC_RING, + .reg_start = { + HWIO_WBM_R0_WBM_IDLE_LINK_RING_BASE_LSB_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET), + HWIO_WBM_R2_WBM_IDLE_LINK_RING_HP_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET), + }, + /* Single ring - provide ring size if multiple rings of this + * type are supported */ + .reg_size = {}, + }, + { /* SW2WBM_RELEASE */ + .start_ring_id = HAL_SRNG_WBM_SW_RELEASE, + .max_rings = 1, + .entry_size = sizeof(struct wbm_release_ring) >> 2, + .lmac_ring = FALSE, + .ring_dir = HAL_SRNG_SRC_RING, + .reg_start = { + HWIO_WBM_R0_SW_RELEASE_RING_BASE_LSB_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET), + HWIO_WBM_R2_SW_RELEASE_RING_HP_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET), + }, + /* Single ring - provide ring size if multiple rings of this + * type are supported */ + .reg_size = {}, + }, + { /* WBM2SW_RELEASE */ + .start_ring_id = HAL_SRNG_WBM2SW0_RELEASE, + .max_rings = 4, + .entry_size = sizeof(struct wbm_release_ring) >> 2, + .lmac_ring = FALSE, + .ring_dir = HAL_SRNG_DST_RING, + .reg_start = { + HWIO_WBM_R0_WBM2SW0_RELEASE_RING_BASE_LSB_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET), + HWIO_WBM_R2_WBM2SW0_RELEASE_RING_HP_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET), + }, + .reg_size = { + HWIO_WBM_R0_WBM2SW1_RELEASE_RING_BASE_LSB_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET) - + HWIO_WBM_R0_WBM2SW0_RELEASE_RING_BASE_LSB_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET), + HWIO_WBM_R2_WBM2SW1_RELEASE_RING_HP_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET) - + HWIO_WBM_R2_WBM2SW0_RELEASE_RING_HP_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET), + }, + }, + { /* RXDMA_BUF */ + .start_ring_id = HAL_SRNG_WMAC1_SW2RXDMA0_BUF0, +#ifdef IPA_OFFLOAD + .max_rings = 3, +#else + .max_rings = 2, +#endif + .entry_size = sizeof(struct wbm_buffer_ring) >> 2, + .lmac_ring = TRUE, + .ring_dir = HAL_SRNG_SRC_RING, + /* reg_start is not set because LMAC rings are not accessed + * from host + */ + .reg_start = {}, + .reg_size = {}, + }, + { /* RXDMA_DST */ + .start_ring_id = HAL_SRNG_WMAC1_RXDMA2SW0, + .max_rings = 1, + .entry_size = sizeof(struct reo_entrance_ring) >> 2, + .lmac_ring = TRUE, + .ring_dir = HAL_SRNG_DST_RING, + /* reg_start is not set because LMAC rings are not accessed + * from host + */ + .reg_start = {}, + .reg_size = {}, + }, + { /* RXDMA_MONITOR_BUF */ + .start_ring_id = HAL_SRNG_WMAC1_SW2RXDMA2_BUF, + .max_rings = 1, + .entry_size = sizeof(struct wbm_buffer_ring) >> 2, + .lmac_ring = TRUE, + .ring_dir = HAL_SRNG_SRC_RING, + /* reg_start is not set because LMAC rings are not accessed + * from host + */ + .reg_start = {}, + .reg_size = {}, + }, + { /* RXDMA_MONITOR_STATUS */ + .start_ring_id = HAL_SRNG_WMAC1_SW2RXDMA1_STATBUF, + .max_rings = 1, + .entry_size = sizeof(struct wbm_buffer_ring) >> 2, + .lmac_ring = TRUE, + .ring_dir = HAL_SRNG_SRC_RING, + /* reg_start is not set because LMAC rings are not accessed + * from host + */ + .reg_start = {}, + .reg_size = {}, + }, + { /* RXDMA_MONITOR_DST */ + .start_ring_id = HAL_SRNG_WMAC1_RXDMA2SW1, + .max_rings = 1, + .entry_size = sizeof(struct reo_entrance_ring) >> 2, + .lmac_ring = TRUE, + .ring_dir = HAL_SRNG_DST_RING, + /* reg_start is not set because LMAC rings are not accessed + * from host + */ + .reg_start = {}, + .reg_size = {}, + }, + { /* RXDMA_MONITOR_DESC */ + .start_ring_id = HAL_SRNG_WMAC1_SW2RXDMA1_DESC, + .max_rings = 1, + .entry_size = sizeof(struct wbm_buffer_ring) >> 2, + .lmac_ring = TRUE, + .ring_dir = HAL_SRNG_SRC_RING, + /* reg_start is not set because LMAC rings are not accessed + * from host + */ + .reg_start = {}, + .reg_size = {}, + }, + { /* DIR_BUF_RX_DMA_SRC */ + .start_ring_id = HAL_SRNG_DIR_BUF_RX_SRC_DMA_RING, + .max_rings = 1, + .entry_size = 2, + .lmac_ring = TRUE, + .ring_dir = HAL_SRNG_SRC_RING, + /* reg_start is not set because LMAC rings are not accessed + * from host + */ + .reg_start = {}, + .reg_size = {}, + }, +#ifdef WLAN_FEATURE_CIF_CFR + { /* WIFI_POS_SRC */ + .start_ring_id = HAL_SRNG_WIFI_POS_SRC_DMA_RING, + .max_rings = 1, + .entry_size = sizeof(wmi_oem_dma_buf_release_entry) >> 2, + .lmac_ring = TRUE, + .ring_dir = HAL_SRNG_SRC_RING, + /* reg_start is not set because LMAC rings are not accessed + * from host + */ + .reg_start = {}, + .reg_size = {}, + }, +#endif +}; + +/** + * hal_get_srng_ring_id() - get the ring id of a descriped ring + * @hal: hal_soc data structure + * @ring_type: type enum describing the ring + * @ring_num: which ring of the ring type + * @mac_id: which mac does the ring belong to (or 0 for non-lmac rings) + * + * Return: the ring id or -EINVAL if the ring does not exist. + */ +static int hal_get_srng_ring_id(struct hal_soc *hal, int ring_type, + int ring_num, int mac_id) +{ + struct hal_hw_srng_config *ring_config = + HAL_SRNG_CONFIG(hal, ring_type); + int ring_id; + + if (ring_num >= ring_config->max_rings) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "%s: ring_num exceeded maximum no. of supported rings\n", + __func__); + /* TODO: This is a programming error. Assert if this happens */ + return -EINVAL; + } + + if (ring_config->lmac_ring) { + ring_id = ring_config->start_ring_id + ring_num + + (mac_id * HAL_MAX_RINGS_PER_LMAC); + } else { + ring_id = ring_config->start_ring_id + ring_num; + } + + return ring_id; +} + +static struct hal_srng *hal_get_srng(struct hal_soc *hal, int ring_id) +{ + /* TODO: Should we allocate srng structures dynamically? */ + return &(hal->srng_list[ring_id]); +} + +#define HP_OFFSET_IN_REG_START 1 +#define OFFSET_FROM_HP_TO_TP 4 +static void hal_update_srng_hp_tp_address(void *hal_soc, + int shadow_config_index, + int ring_type, + int ring_num) +{ + struct hal_srng *srng; + struct hal_soc *hal = (struct hal_soc *)hal_soc; + int ring_id; + + ring_id = hal_get_srng_ring_id(hal_soc, ring_type, ring_num, 0); + if (ring_id < 0) + return; + + srng = hal_get_srng(hal_soc, ring_id); + + if (srng->ring_dir == HAL_SRNG_DST_RING) + srng->u.dst_ring.tp_addr = SHADOW_REGISTER(shadow_config_index) + + hal->dev_base_addr; + else + srng->u.src_ring.hp_addr = SHADOW_REGISTER(shadow_config_index) + + hal->dev_base_addr; +} + +QDF_STATUS hal_set_one_shadow_config(void *hal_soc, + int ring_type, + int ring_num) +{ + uint32_t target_register; + struct hal_soc *hal = (struct hal_soc *)hal_soc; + struct hal_hw_srng_config *srng_config = &hw_srng_table[ring_type]; + int shadow_config_index = hal->num_shadow_registers_configured; + + if (shadow_config_index >= MAX_SHADOW_REGISTERS) { + QDF_ASSERT(0); + return QDF_STATUS_E_RESOURCES; + } + + hal->num_shadow_registers_configured++; + + target_register = srng_config->reg_start[HP_OFFSET_IN_REG_START]; + target_register += (srng_config->reg_size[HP_OFFSET_IN_REG_START] + *ring_num); + + /* if the ring is a dst ring, we need to shadow the tail pointer */ + if (srng_config->ring_dir == HAL_SRNG_DST_RING) + target_register += OFFSET_FROM_HP_TO_TP; + + hal->shadow_config[shadow_config_index].addr = target_register; + + /* update hp/tp addr in the hal_soc structure*/ + hal_update_srng_hp_tp_address(hal_soc, shadow_config_index, ring_type, + ring_num); + + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO, + "%s: target_reg %x, shadow_index %x, ring_type %d, ring num %d\n", + __func__, target_register, shadow_config_index, + ring_type, ring_num); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS hal_construct_shadow_config(void *hal_soc) +{ + int ring_type, ring_num; + + for (ring_type = 0; ring_type < MAX_RING_TYPES; ring_type++) { + struct hal_hw_srng_config *srng_config = + &hw_srng_table[ring_type]; + + if (ring_type == CE_SRC || + ring_type == CE_DST || + ring_type == CE_DST_STATUS) + continue; + + if (srng_config->lmac_ring) + continue; + + for (ring_num = 0; ring_num < srng_config->max_rings; + ring_num++) + hal_set_one_shadow_config(hal_soc, ring_type, ring_num); + } + + return QDF_STATUS_SUCCESS; +} + +void hal_get_shadow_config(void *hal_soc, + struct pld_shadow_reg_v2_cfg **shadow_config, + int *num_shadow_registers_configured) +{ + struct hal_soc *hal = (struct hal_soc *)hal_soc; + + *shadow_config = hal->shadow_config; + *num_shadow_registers_configured = + hal->num_shadow_registers_configured; + + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "%s\n", __func__); +} + + +static void hal_validate_shadow_register(struct hal_soc *hal, + uint32_t *destination, + uint32_t *shadow_address) +{ + unsigned int index; + uint32_t *shadow_0_offset = SHADOW_REGISTER(0) + hal->dev_base_addr; + int destination_ba_offset = + ((char *)destination) - (char *)hal->dev_base_addr; + + index = shadow_address - shadow_0_offset; + + if (index >= MAX_SHADOW_REGISTERS) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "%s: index %x out of bounds\n", __func__, index); + goto error; + } else if (hal->shadow_config[index].addr != destination_ba_offset) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "%s: sanity check failure, expected %x, found %x\n", + __func__, destination_ba_offset, + hal->shadow_config[index].addr); + goto error; + } + return; +error: + qdf_print("%s: baddr %pK, desination %pK, shadow_address %pK s0offset %pK index %x", + __func__, hal->dev_base_addr, destination, shadow_address, + shadow_0_offset, index); + QDF_BUG(0); + return; +} + +static void hal_target_based_configure(struct hal_soc *hal) +{ + struct hif_target_info *tgt_info = + hif_get_target_info_handle(hal->hif_handle); + + switch (tgt_info->target_type) { + case TARGET_TYPE_QCA6290: + hal->use_register_windowing = true; + break; + default: + break; + } +} + +/** + * hal_attach - Initialize HAL layer + * @hif_handle: Opaque HIF handle + * @qdf_dev: QDF device + * + * Return: Opaque HAL SOC handle + * NULL on failure (if given ring is not available) + * + * This function should be called as part of HIF initialization (for accessing + * copy engines). DP layer will get hal_soc handle using hif_get_hal_handle() + * + */ +void *hal_attach(void *hif_handle, qdf_device_t qdf_dev) +{ + struct hal_soc *hal; + int i; + + hal = qdf_mem_malloc(sizeof(*hal)); + + if (!hal) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "%s: hal_soc allocation failed\n", __func__); + goto fail0; + } + hal->hif_handle = hif_handle; + hal->dev_base_addr = hif_get_dev_ba(hif_handle); + hal->qdf_dev = qdf_dev; + hal->shadow_rdptr_mem_vaddr = (uint32_t *)qdf_mem_alloc_consistent( + qdf_dev, qdf_dev->dev, sizeof(*(hal->shadow_rdptr_mem_vaddr)) * + HAL_SRNG_ID_MAX, &(hal->shadow_rdptr_mem_paddr)); + if (!hal->shadow_rdptr_mem_paddr) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "%s: hal->shadow_rdptr_mem_paddr allocation failed\n", + __func__); + goto fail1; + } + + hal->shadow_wrptr_mem_vaddr = + (uint32_t *)qdf_mem_alloc_consistent(qdf_dev, qdf_dev->dev, + sizeof(*(hal->shadow_wrptr_mem_vaddr)) * HAL_MAX_LMAC_RINGS, + &(hal->shadow_wrptr_mem_paddr)); + if (!hal->shadow_wrptr_mem_vaddr) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "%s: hal->shadow_wrptr_mem_vaddr allocation failed\n", + __func__); + goto fail2; + } + + for (i = 0; i < HAL_SRNG_ID_MAX; i++) { + hal->srng_list[i].initialized = 0; + hal->srng_list[i].ring_id = i; + } + + qdf_spinlock_create(&hal->register_access_lock); + hal->register_window = 0; + + hal_target_based_configure(hal); + + return (void *)hal; + +fail2: + qdf_mem_free_consistent(qdf_dev, qdf_dev->dev, + sizeof(*(hal->shadow_rdptr_mem_vaddr)) * HAL_SRNG_ID_MAX, + hal->shadow_rdptr_mem_vaddr, hal->shadow_rdptr_mem_paddr, 0); +fail1: + qdf_mem_free(hal); +fail0: + return NULL; +} +qdf_export_symbol(hal_attach); + +/** + * hal_mem_info - Retrieve hal memory base address + * + * @hal_soc: Opaque HAL SOC handle + * @mem: pointer to structure to be updated with hal mem info + */ +void hal_get_meminfo(void *hal_soc, struct hal_mem_info *mem ) +{ + struct hal_soc *hal = (struct hal_soc *)hal_soc; + mem->dev_base_addr = (void *)hal->dev_base_addr; + mem->shadow_rdptr_mem_vaddr = (void *)hal->shadow_rdptr_mem_vaddr; + mem->shadow_wrptr_mem_vaddr = (void *)hal->shadow_wrptr_mem_vaddr; + mem->shadow_rdptr_mem_paddr = (void *)hal->shadow_rdptr_mem_paddr; + mem->shadow_wrptr_mem_paddr = (void *)hal->shadow_wrptr_mem_paddr; + hif_read_phy_mem_base(hal->hif_handle, (qdf_dma_addr_t *)&mem->dev_base_paddr); + return; +} +qdf_export_symbol(hal_get_meminfo); + +/** + * hal_detach - Detach HAL layer + * @hal_soc: HAL SOC handle + * + * Return: Opaque HAL SOC handle + * NULL on failure (if given ring is not available) + * + * This function should be called as part of HIF initialization (for accessing + * copy engines). DP layer will get hal_soc handle using hif_get_hal_handle() + * + */ +extern void hal_detach(void *hal_soc) +{ + struct hal_soc *hal = (struct hal_soc *)hal_soc; + + qdf_mem_free_consistent(hal->qdf_dev, hal->qdf_dev->dev, + sizeof(*(hal->shadow_rdptr_mem_vaddr)) * HAL_SRNG_ID_MAX, + hal->shadow_rdptr_mem_vaddr, hal->shadow_rdptr_mem_paddr, 0); + qdf_mem_free_consistent(hal->qdf_dev, hal->qdf_dev->dev, + sizeof(*(hal->shadow_wrptr_mem_vaddr)) * HAL_MAX_LMAC_RINGS, + hal->shadow_wrptr_mem_vaddr, hal->shadow_wrptr_mem_paddr, 0); + qdf_mem_free(hal); + + return; +} +qdf_export_symbol(hal_detach); + +/** + * hal_srng_src_hw_init - Private function to initialize SRNG + * source ring HW + * @hal_soc: HAL SOC handle + * @srng: SRNG ring pointer + */ +static inline void hal_srng_src_hw_init(struct hal_soc *hal, + struct hal_srng *srng) +{ + uint32_t reg_val = 0; + uint64_t tp_addr = 0; + + HIF_DBG("%s: hw_init srng %d", __func__, srng->ring_id); + + if (srng->flags & HAL_SRNG_MSI_INTR) { + SRNG_SRC_REG_WRITE(srng, MSI1_BASE_LSB, + srng->msi_addr & 0xffffffff); + reg_val = SRNG_SM(SRNG_SRC_FLD(MSI1_BASE_MSB, ADDR), + (uint64_t)(srng->msi_addr) >> 32) | + SRNG_SM(SRNG_SRC_FLD(MSI1_BASE_MSB, + MSI1_ENABLE), 1); + SRNG_SRC_REG_WRITE(srng, MSI1_BASE_MSB, reg_val); + SRNG_SRC_REG_WRITE(srng, MSI1_DATA, srng->msi_data); + } + + SRNG_SRC_REG_WRITE(srng, BASE_LSB, srng->ring_base_paddr & 0xffffffff); + reg_val = SRNG_SM(SRNG_SRC_FLD(BASE_MSB, RING_BASE_ADDR_MSB), + ((uint64_t)(srng->ring_base_paddr) >> 32)) | + SRNG_SM(SRNG_SRC_FLD(BASE_MSB, RING_SIZE), + srng->entry_size * srng->num_entries); + SRNG_SRC_REG_WRITE(srng, BASE_MSB, reg_val); + +#if defined(WCSS_VERSION) && \ + ((defined(CONFIG_WIN) && (WCSS_VERSION > 81)) || \ + (defined(CONFIG_MCL) && (WCSS_VERSION >= 72))) + reg_val = SRNG_SM(SRNG_SRC_FLD(ID, ENTRY_SIZE), srng->entry_size); +#else + reg_val = SRNG_SM(SRNG_SRC_FLD(ID, RING_ID), srng->ring_id) | + SRNG_SM(SRNG_SRC_FLD(ID, ENTRY_SIZE), srng->entry_size); +#endif + SRNG_SRC_REG_WRITE(srng, ID, reg_val); + + /** + * Interrupt setup: + * Default interrupt mode is 'pulse'. Need to setup SW_INTERRUPT_MODE + * if level mode is required + */ + reg_val = 0; + + /* + * WAR - Hawkeye v1 has a hardware bug which requires timer value to be + * programmed in terms of 1us resolution instead of 8us resolution as + * given in MLD. + */ + if (srng->intr_timer_thres_us) { + reg_val |= SRNG_SM(SRNG_SRC_FLD(CONSUMER_INT_SETUP_IX0, + INTERRUPT_TIMER_THRESHOLD), + srng->intr_timer_thres_us); + /* For HK v2 this should be (srng->intr_timer_thres_us >> 3) */ + } + + if (srng->intr_batch_cntr_thres_entries) { + reg_val |= SRNG_SM(SRNG_SRC_FLD(CONSUMER_INT_SETUP_IX0, + BATCH_COUNTER_THRESHOLD), + srng->intr_batch_cntr_thres_entries * + srng->entry_size); + } + SRNG_SRC_REG_WRITE(srng, CONSUMER_INT_SETUP_IX0, reg_val); + + reg_val = 0; + if (srng->flags & HAL_SRNG_LOW_THRES_INTR_ENABLE) { + reg_val |= SRNG_SM(SRNG_SRC_FLD(CONSUMER_INT_SETUP_IX1, + LOW_THRESHOLD), srng->u.src_ring.low_threshold); + } + + SRNG_SRC_REG_WRITE(srng, CONSUMER_INT_SETUP_IX1, reg_val); + + /* As per HW team, TP_ADDR and HP_ADDR for Idle link ring should + * remain 0 to avoid some WBM stability issues. Remote head/tail + * pointers are not required since this ring is completely managed + * by WBM HW */ + if (srng->ring_id != HAL_SRNG_WBM_IDLE_LINK) { + tp_addr = (uint64_t)(hal->shadow_rdptr_mem_paddr + + ((unsigned long)(srng->u.src_ring.tp_addr) - + (unsigned long)(hal->shadow_rdptr_mem_vaddr))); + SRNG_SRC_REG_WRITE(srng, TP_ADDR_LSB, tp_addr & 0xffffffff); + SRNG_SRC_REG_WRITE(srng, TP_ADDR_MSB, tp_addr >> 32); + } + + /* Initilaize head and tail pointers to indicate ring is empty */ + SRNG_SRC_REG_WRITE(srng, HP, 0); + SRNG_SRC_REG_WRITE(srng, TP, 0); + *(srng->u.src_ring.tp_addr) = 0; + + reg_val = ((srng->flags & HAL_SRNG_DATA_TLV_SWAP) ? + SRNG_SM(SRNG_SRC_FLD(MISC, DATA_TLV_SWAP_BIT), 1) : 0) | + ((srng->flags & HAL_SRNG_RING_PTR_SWAP) ? + SRNG_SM(SRNG_SRC_FLD(MISC, HOST_FW_SWAP_BIT), 1) : 0) | + ((srng->flags & HAL_SRNG_MSI_SWAP) ? + SRNG_SM(SRNG_SRC_FLD(MISC, MSI_SWAP_BIT), 1) : 0); + + /* Loop count is not used for SRC rings */ + reg_val |= SRNG_SM(SRNG_SRC_FLD(MISC, LOOPCNT_DISABLE), 1); + + /* + * reg_val |= SRNG_SM(SRNG_SRC_FLD(MISC, SRNG_ENABLE), 1); + * todo: update fw_api and replace with above line + * (when SRNG_ENABLE field for the MISC register is available in fw_api) + * (WCSS_UMAC_CE_0_SRC_WFSS_CE_CHANNEL_SRC_R0_SRC_RING_MISC) + */ + reg_val |= 0x40; + + SRNG_SRC_REG_WRITE(srng, MISC, reg_val); + +} + +/** + * hal_ce_dst_setup - Initialize CE destination ring registers + * @hal_soc: HAL SOC handle + * @srng: SRNG ring pointer + */ +static inline void hal_ce_dst_setup(struct hal_soc *hal, struct hal_srng *srng, + int ring_num) +{ + uint32_t reg_val = 0; + uint32_t reg_addr; + struct hal_hw_srng_config *ring_config = + HAL_SRNG_CONFIG(hal, CE_DST); + + /* set DEST_MAX_LENGTH according to ce assignment */ + reg_addr = HWIO_WFSS_CE_CHANNEL_DST_R0_DEST_CTRL_ADDR( + ring_config->reg_start[R0_INDEX] + + (ring_num * ring_config->reg_size[R0_INDEX])); + + reg_val = HAL_REG_READ(hal, reg_addr); + reg_val &= ~HWIO_WFSS_CE_CHANNEL_DST_R0_DEST_CTRL_DEST_MAX_LENGTH_BMSK; + reg_val |= srng->u.dst_ring.max_buffer_length & + HWIO_WFSS_CE_CHANNEL_DST_R0_DEST_CTRL_DEST_MAX_LENGTH_BMSK; + HAL_REG_WRITE(hal, reg_addr, reg_val); +} + +/** + * hal_reo_remap_IX0 - Remap REO ring destination + * @hal: HAL SOC handle + * @remap_val: Remap value + */ +void hal_reo_remap_IX0(struct hal_soc *hal, uint32_t remap_val) +{ + uint32_t reg_offset = HWIO_REO_R0_DESTINATION_RING_CTRL_IX_0_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET); + HAL_REG_WRITE(hal, reg_offset, remap_val); +} + +/** + * hal_srng_dst_set_hp_paddr() - Set physical address to dest ring head pointer + * @srng: sring pointer + * @paddr: physical address + */ +void hal_srng_dst_set_hp_paddr(struct hal_srng *srng, + uint64_t paddr) +{ + SRNG_DST_REG_WRITE(srng, HP_ADDR_LSB, + paddr & 0xffffffff); + SRNG_DST_REG_WRITE(srng, HP_ADDR_MSB, + paddr >> 32); +} + +/** + * hal_srng_dst_init_hp() - Initilaize destination ring head pointer + * @srng: sring pointer + * @vaddr: virtual address + */ +void hal_srng_dst_init_hp(struct hal_srng *srng, + uint32_t *vaddr) +{ + srng->u.dst_ring.hp_addr = vaddr; + SRNG_DST_REG_WRITE(srng, HP, srng->u.dst_ring.cached_hp); + *(srng->u.dst_ring.hp_addr) = srng->u.dst_ring.cached_hp; + + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "hp_addr=%pK, cached_hp=%d, hp=%d\n", + (void *)srng->u.dst_ring.hp_addr, srng->u.dst_ring.cached_hp, + *(srng->u.dst_ring.hp_addr)); +} + +/** + * hal_srng_dst_hw_init - Private function to initialize SRNG + * destination ring HW + * @hal_soc: HAL SOC handle + * @srng: SRNG ring pointer + */ +static inline void hal_srng_dst_hw_init(struct hal_soc *hal, + struct hal_srng *srng) +{ + uint32_t reg_val = 0; + uint64_t hp_addr = 0; + + HIF_DBG("%s: hw_init srng %d", __func__, srng->ring_id); + + if (srng->flags & HAL_SRNG_MSI_INTR) { + SRNG_DST_REG_WRITE(srng, MSI1_BASE_LSB, + srng->msi_addr & 0xffffffff); + reg_val = SRNG_SM(SRNG_DST_FLD(MSI1_BASE_MSB, ADDR), + (uint64_t)(srng->msi_addr) >> 32) | + SRNG_SM(SRNG_DST_FLD(MSI1_BASE_MSB, + MSI1_ENABLE), 1); + SRNG_DST_REG_WRITE(srng, MSI1_BASE_MSB, reg_val); + SRNG_DST_REG_WRITE(srng, MSI1_DATA, srng->msi_data); + } + + SRNG_DST_REG_WRITE(srng, BASE_LSB, srng->ring_base_paddr & 0xffffffff); + reg_val = SRNG_SM(SRNG_DST_FLD(BASE_MSB, RING_BASE_ADDR_MSB), + ((uint64_t)(srng->ring_base_paddr) >> 32)) | + SRNG_SM(SRNG_DST_FLD(BASE_MSB, RING_SIZE), + srng->entry_size * srng->num_entries); + SRNG_DST_REG_WRITE(srng, BASE_MSB, reg_val); + + reg_val = SRNG_SM(SRNG_DST_FLD(ID, RING_ID), srng->ring_id) | + SRNG_SM(SRNG_DST_FLD(ID, ENTRY_SIZE), srng->entry_size); + SRNG_DST_REG_WRITE(srng, ID, reg_val); + + + /** + * Interrupt setup: + * Default interrupt mode is 'pulse'. Need to setup SW_INTERRUPT_MODE + * if level mode is required + */ + reg_val = 0; + if (srng->intr_timer_thres_us) { + reg_val |= SRNG_SM(SRNG_DST_FLD(PRODUCER_INT_SETUP, + INTERRUPT_TIMER_THRESHOLD), + srng->intr_timer_thres_us >> 3); + } + + if (srng->intr_batch_cntr_thres_entries) { + reg_val |= SRNG_SM(SRNG_DST_FLD(PRODUCER_INT_SETUP, + BATCH_COUNTER_THRESHOLD), + srng->intr_batch_cntr_thres_entries * + srng->entry_size); + } + + SRNG_DST_REG_WRITE(srng, PRODUCER_INT_SETUP, reg_val); + hp_addr = (uint64_t)(hal->shadow_rdptr_mem_paddr + + ((unsigned long)(srng->u.dst_ring.hp_addr) - + (unsigned long)(hal->shadow_rdptr_mem_vaddr))); + SRNG_DST_REG_WRITE(srng, HP_ADDR_LSB, hp_addr & 0xffffffff); + SRNG_DST_REG_WRITE(srng, HP_ADDR_MSB, hp_addr >> 32); + + /* Initilaize head and tail pointers to indicate ring is empty */ + SRNG_DST_REG_WRITE(srng, HP, 0); + SRNG_DST_REG_WRITE(srng, TP, 0); + *(srng->u.dst_ring.hp_addr) = 0; + + reg_val = ((srng->flags & HAL_SRNG_DATA_TLV_SWAP) ? + SRNG_SM(SRNG_DST_FLD(MISC, DATA_TLV_SWAP_BIT), 1) : 0) | + ((srng->flags & HAL_SRNG_RING_PTR_SWAP) ? + SRNG_SM(SRNG_DST_FLD(MISC, HOST_FW_SWAP_BIT), 1) : 0) | + ((srng->flags & HAL_SRNG_MSI_SWAP) ? + SRNG_SM(SRNG_DST_FLD(MISC, MSI_SWAP_BIT), 1) : 0); + + /* + * reg_val |= SRNG_SM(SRNG_SRC_FLD(MISC, SRNG_ENABLE), 1); + * todo: update fw_api and replace with above line + * (when SRNG_ENABLE field for the MISC register is available in fw_api) + * (WCSS_UMAC_CE_0_SRC_WFSS_CE_CHANNEL_SRC_R0_SRC_RING_MISC) + */ + reg_val |= 0x40; + + SRNG_DST_REG_WRITE(srng, MISC, reg_val); + +} + +/** + * hal_srng_hw_init - Private function to initialize SRNG HW + * @hal_soc: HAL SOC handle + * @srng: SRNG ring pointer + */ +static inline void hal_srng_hw_init(struct hal_soc *hal, + struct hal_srng *srng) +{ + if (srng->ring_dir == HAL_SRNG_SRC_RING) + hal_srng_src_hw_init(hal, srng); + else + hal_srng_dst_hw_init(hal, srng); +} + +#ifdef CONFIG_SHADOW_V2 +#define ignore_shadow false +#define CHECK_SHADOW_REGISTERS true +#else +#define ignore_shadow true +#define CHECK_SHADOW_REGISTERS false +#endif + +/** + * hal_srng_setup - Initialize HW SRNG ring. + * @hal_soc: Opaque HAL SOC handle + * @ring_type: one of the types from hal_ring_type + * @ring_num: Ring number if there are multiple rings of same type (staring + * from 0) + * @mac_id: valid MAC Id should be passed if ring type is one of lmac rings + * @ring_params: SRNG ring params in hal_srng_params structure. + + * Callers are expected to allocate contiguous ring memory of size + * 'num_entries * entry_size' bytes and pass the physical and virtual base + * addresses through 'ring_base_paddr' and 'ring_base_vaddr' in + * hal_srng_params structure. Ring base address should be 8 byte aligned + * and size of each ring entry should be queried using the API + * hal_srng_get_entrysize + * + * Return: Opaque pointer to ring on success + * NULL on failure (if given ring is not available) + */ +void *hal_srng_setup(void *hal_soc, int ring_type, int ring_num, + int mac_id, struct hal_srng_params *ring_params) +{ + int ring_id; + struct hal_soc *hal = (struct hal_soc *)hal_soc; + struct hal_srng *srng; + struct hal_hw_srng_config *ring_config = + HAL_SRNG_CONFIG(hal, ring_type); + void *dev_base_addr; + int i; + + ring_id = hal_get_srng_ring_id(hal_soc, ring_type, ring_num, mac_id); + if (ring_id < 0) + return NULL; + + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "%s: mac_id %d ring_id %d\n", + __func__, mac_id, ring_id); + + srng = hal_get_srng(hal_soc, ring_id); + + if (srng->initialized) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "%s: Ring (ring_type, ring_num) already initialized\n", + __func__); + return NULL; + } + + dev_base_addr = hal->dev_base_addr; + srng->ring_id = ring_id; + srng->ring_dir = ring_config->ring_dir; + srng->ring_base_paddr = ring_params->ring_base_paddr; + srng->ring_base_vaddr = ring_params->ring_base_vaddr; + srng->entry_size = ring_config->entry_size; + srng->num_entries = ring_params->num_entries; + srng->ring_size = srng->num_entries * srng->entry_size; + srng->ring_size_mask = srng->ring_size - 1; + srng->msi_addr = ring_params->msi_addr; + srng->msi_data = ring_params->msi_data; + srng->intr_timer_thres_us = ring_params->intr_timer_thres_us; + srng->intr_batch_cntr_thres_entries = + ring_params->intr_batch_cntr_thres_entries; + srng->hal_soc = hal_soc; + + for (i = 0 ; i < MAX_SRNG_REG_GROUPS; i++) { + srng->hwreg_base[i] = dev_base_addr + ring_config->reg_start[i] + + (ring_num * ring_config->reg_size[i]); + } + + /* Zero out the entire ring memory */ + qdf_mem_zero(srng->ring_base_vaddr, (srng->entry_size * + srng->num_entries) << 2); + + srng->flags = ring_params->flags; +#ifdef BIG_ENDIAN_HOST + /* TODO: See if we should we get these flags from caller */ + srng->flags |= HAL_SRNG_DATA_TLV_SWAP; + srng->flags |= HAL_SRNG_MSI_SWAP; + srng->flags |= HAL_SRNG_RING_PTR_SWAP; +#endif + + if (srng->ring_dir == HAL_SRNG_SRC_RING) { + srng->u.src_ring.hp = 0; + srng->u.src_ring.reap_hp = srng->ring_size - + srng->entry_size; + srng->u.src_ring.tp_addr = + &(hal->shadow_rdptr_mem_vaddr[ring_id]); + srng->u.src_ring.low_threshold = + ring_params->low_threshold * srng->entry_size; + if (ring_config->lmac_ring) { + /* For LMAC rings, head pointer updates will be done + * through FW by writing to a shared memory location + */ + srng->u.src_ring.hp_addr = + &(hal->shadow_wrptr_mem_vaddr[ring_id - + HAL_SRNG_LMAC1_ID_START]); + srng->flags |= HAL_SRNG_LMAC_RING; + } else if (ignore_shadow || (srng->u.src_ring.hp_addr == 0)) { + srng->u.src_ring.hp_addr = SRNG_SRC_ADDR(srng, HP); + + if (CHECK_SHADOW_REGISTERS) { + QDF_TRACE(QDF_MODULE_ID_TXRX, + QDF_TRACE_LEVEL_ERROR, + "%s: Ring (%d, %d) missing shadow config\n", + __func__, ring_type, ring_num); + } + } else { + hal_validate_shadow_register(hal, + SRNG_SRC_ADDR(srng, HP), + srng->u.src_ring.hp_addr); + } + } else { + /* During initialization loop count in all the descriptors + * will be set to zero, and HW will set it to 1 on completing + * descriptor update in first loop, and increments it by 1 on + * subsequent loops (loop count wraps around after reaching + * 0xffff). The 'loop_cnt' in SW ring state is the expected + * loop count in descriptors updated by HW (to be processed + * by SW). + */ + srng->u.dst_ring.loop_cnt = 1; + srng->u.dst_ring.tp = 0; + srng->u.dst_ring.hp_addr = + &(hal->shadow_rdptr_mem_vaddr[ring_id]); + if (ring_config->lmac_ring) { + /* For LMAC rings, tail pointer updates will be done + * through FW by writing to a shared memory location + */ + srng->u.dst_ring.tp_addr = + &(hal->shadow_wrptr_mem_vaddr[ring_id - + HAL_SRNG_LMAC1_ID_START]); + srng->flags |= HAL_SRNG_LMAC_RING; + } else if (ignore_shadow || srng->u.dst_ring.tp_addr == 0) { + srng->u.dst_ring.tp_addr = SRNG_DST_ADDR(srng, TP); + + if (CHECK_SHADOW_REGISTERS) { + QDF_TRACE(QDF_MODULE_ID_TXRX, + QDF_TRACE_LEVEL_ERROR, + "%s: Ring (%d, %d) missing shadow config\n", + __func__, ring_type, ring_num); + } + } else { + hal_validate_shadow_register(hal, + SRNG_DST_ADDR(srng, TP), + srng->u.dst_ring.tp_addr); + } + } + + if (!(ring_config->lmac_ring)) { + hal_srng_hw_init(hal, srng); + + if (ring_type == CE_DST) { + srng->u.dst_ring.max_buffer_length = ring_params->max_buffer_length; + hal_ce_dst_setup(hal, srng, ring_num); + } + } + + SRNG_LOCK_INIT(&srng->lock); + + srng->initialized = true; + + return (void *)srng; +} +qdf_export_symbol(hal_srng_setup); + +/** + * hal_srng_cleanup - Deinitialize HW SRNG ring. + * @hal_soc: Opaque HAL SOC handle + * @hal_srng: Opaque HAL SRNG pointer + */ +void hal_srng_cleanup(void *hal_soc, void *hal_srng) +{ + struct hal_srng *srng = (struct hal_srng *)hal_srng; + SRNG_LOCK_DESTROY(&srng->lock); + srng->initialized = 0; +} +qdf_export_symbol(hal_srng_cleanup); + +/** + * hal_srng_get_entrysize - Returns size of ring entry in bytes + * @hal_soc: Opaque HAL SOC handle + * @ring_type: one of the types from hal_ring_type + * + */ +uint32_t hal_srng_get_entrysize(void *hal_soc, int ring_type) +{ + struct hal_hw_srng_config *ring_config = + HAL_SRNG_CONFIG(hal, ring_type); + return ring_config->entry_size << 2; +} +qdf_export_symbol(hal_srng_get_entrysize); + +/** + * hal_srng_max_entries - Returns maximum possible number of ring entries + * @hal_soc: Opaque HAL SOC handle + * @ring_type: one of the types from hal_ring_type + * + * Return: Maximum number of entries for the given ring_type + */ +uint32_t hal_srng_max_entries(void *hal_soc, int ring_type) +{ + struct hal_hw_srng_config *ring_config = HAL_SRNG_CONFIG(hal, ring_type); + return SRNG_MAX_SIZE_DWORDS / ring_config->entry_size; +} +qdf_export_symbol(hal_srng_max_entries); + +enum hal_srng_dir hal_srng_get_dir(void *hal_soc, int ring_type) +{ + struct hal_hw_srng_config *ring_config = + HAL_SRNG_CONFIG(hal, ring_type); + + return ring_config->ring_dir; +} + +/** + * hal_srng_dump - Dump ring status + * @srng: hal srng pointer + */ +void hal_srng_dump(struct hal_srng *srng) +{ + if (srng->ring_dir == HAL_SRNG_SRC_RING) { + qdf_print("=== SRC RING %d ===", srng->ring_id); + qdf_print("hp %u, reap_hp %u, tp %u, cached tp %u", + srng->u.src_ring.hp, + srng->u.src_ring.reap_hp, + *srng->u.src_ring.tp_addr, + srng->u.src_ring.cached_tp); + } else { + qdf_print("=== DST RING %d ===", srng->ring_id); + qdf_print("tp %u, hp %u, cached tp %u, loop_cnt %u", + srng->u.dst_ring.tp, + *srng->u.dst_ring.hp_addr, + srng->u.dst_ring.cached_hp, + srng->u.dst_ring.loop_cnt); + } +} + +/** + * hal_get_srng_params - Retrieve SRNG parameters for a given ring from HAL + * + * @hal_soc: Opaque HAL SOC handle + * @hal_ring: Ring pointer (Source or Destination ring) + * @ring_params: SRNG parameters will be returned through this structure + */ +extern void hal_get_srng_params(void *hal_soc, void *hal_ring, + struct hal_srng_params *ring_params) +{ + struct hal_srng *srng = (struct hal_srng *)hal_ring; + int i =0; + ring_params->ring_id = srng->ring_id; + ring_params->ring_dir = srng->ring_dir; + ring_params->entry_size = srng->entry_size; + + ring_params->ring_base_paddr = srng->ring_base_paddr; + ring_params->ring_base_vaddr = srng->ring_base_vaddr; + ring_params->num_entries = srng->num_entries; + ring_params->msi_addr = srng->msi_addr; + ring_params->msi_data = srng->msi_data; + ring_params->intr_timer_thres_us = srng->intr_timer_thres_us; + ring_params->intr_batch_cntr_thres_entries = + srng->intr_batch_cntr_thres_entries; + ring_params->low_threshold = srng->u.src_ring.low_threshold; + ring_params->flags = srng->flags; + ring_params->ring_id = srng->ring_id; + for (i = 0 ; i < MAX_SRNG_REG_GROUPS; i++) + ring_params->hwreg_base[i] = srng->hwreg_base[i]; +} +qdf_export_symbol(hal_get_srng_params); diff --git a/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/hal_tx.h b/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/hal_tx.h new file mode 100644 index 0000000000000000000000000000000000000000..6097509b7a5233e177d6b5a77d6799e22cf8679d --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/hal_tx.h @@ -0,0 +1,1228 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#if !defined(HAL_TX_H) +#define HAL_TX_H + +/*--------------------------------------------------------------------------- + Include files + ---------------------------------------------------------------------------*/ +#include "hal_api.h" +#include "wcss_version.h" + +#define WBM_RELEASE_RING_5_TX_RATE_STATS_OFFSET 0x00000014 +#define WBM_RELEASE_RING_5_TX_RATE_STATS_LSB 0 +#define WBM_RELEASE_RING_5_TX_RATE_STATS_MASK 0xffffffff + + +/*--------------------------------------------------------------------------- + Preprocessor definitions and constants + ---------------------------------------------------------------------------*/ +#define HAL_OFFSET(block, field) block ## _ ## field ## _OFFSET + +#define HAL_SET_FLD(desc, block , field) \ + (*(uint32_t *) ((uint8_t *) desc + HAL_OFFSET(block, field))) + +#define HAL_SET_FLD_OFFSET(desc, block , field, offset) \ + (*(uint32_t *) ((uint8_t *) desc + HAL_OFFSET(block, field) + (offset))) + +#define HAL_TX_DESC_SET_TLV_HDR(desc, tag, len) \ +do { \ + ((struct tlv_32_hdr *) desc)->tlv_tag = (tag); \ + ((struct tlv_32_hdr *) desc)->tlv_len = (len); \ +} while (0) + +#define HAL_TX_TCL_DATA_TAG WIFITCL_DATA_CMD_E +#define HAL_TX_TCL_CMD_TAG WIFITCL_GSE_CMD_E + +#define HAL_TX_SM(block, field, value) \ + ((value << (block ## _ ## field ## _LSB)) & \ + (block ## _ ## field ## _MASK)) + +#define HAL_TX_MS(block, field, value) \ + (((value) & (block ## _ ## field ## _MASK)) >> \ + (block ## _ ## field ## _LSB)) + +#define HAL_TX_DESC_GET(desc, block, field) \ + HAL_TX_MS(block, field, HAL_SET_FLD(desc, block, field)) + +#define HAL_TX_DESC_SUBBLOCK_GET(desc, block, sub, field) \ + HAL_TX_MS(sub, field, HAL_SET_FLD(desc, block, sub)) + +#define HAL_TX_BUF_TYPE_BUFFER 0 +#define HAL_TX_BUF_TYPE_EXT_DESC 1 + +#define HAL_TX_DESC_LEN_DWORDS (NUM_OF_DWORDS_TCL_DATA_CMD) +#define HAL_TX_DESC_LEN_BYTES (NUM_OF_DWORDS_TCL_DATA_CMD * 4) +#define HAL_TX_EXTENSION_DESC_LEN_DWORDS (NUM_OF_DWORDS_TX_MSDU_EXTENSION) +#define HAL_TX_EXTENSION_DESC_LEN_BYTES (NUM_OF_DWORDS_TX_MSDU_EXTENSION * 4) + +#define HAL_TX_COMPLETION_DESC_LEN_DWORDS (NUM_OF_DWORDS_WBM_RELEASE_RING) +#define HAL_TX_COMPLETION_DESC_LEN_BYTES (NUM_OF_DWORDS_WBM_RELEASE_RING*4) +#define HAL_TX_BITS_PER_TID 3 +#define HAL_TX_TID_BITS_MASK ((1 << HAL_TX_BITS_PER_TID) - 1) +#define HAL_TX_NUM_DSCP_PER_REGISTER 10 +#define HAL_MAX_HW_DSCP_TID_MAPS 2 +#define HAL_MAX_HW_DSCP_TID_MAPS_11AX 32 + +#define HTT_META_HEADER_LEN_BYTES 64 +#define HAL_TX_EXT_DESC_WITH_META_DATA \ + (HTT_META_HEADER_LEN_BYTES + HAL_TX_EXTENSION_DESC_LEN_BYTES) + +/* Length of WBM release ring without the status words */ +#define HAL_TX_COMPLETION_DESC_BASE_LEN 12 + +#define HAL_TX_COMP_RELEASE_SOURCE_TQM 0 +#define HAL_TX_COMP_RELEASE_SOURCE_FW 3 + +/* Define a place-holder release reason for FW */ +#define HAL_TX_COMP_RELEASE_REASON_FW 99 + +/* + * Offset of HTT Tx Descriptor in WBM Completion + * HTT Tx Desc structure is passed from firmware to host overlayed + * on wbm_release_ring DWORDs 2,3 ,4 and 5for software based completions + * (Exception frames and TQM bypass frames) + */ +#define HAL_TX_COMP_HTT_STATUS_OFFSET 8 +#define HAL_TX_COMP_HTT_STATUS_LEN 16 + +#define HAL_TX_BUF_TYPE_BUFFER 0 +#define HAL_TX_BUF_TYPE_EXT_DESC 1 + +#define HAL_TX_EXT_DESC_BUF_OFFSET TX_MSDU_EXTENSION_6_BUF0_PTR_31_0_OFFSET +#define HAL_TX_EXT_BUF_LOW_MASK TX_MSDU_EXTENSION_6_BUF0_PTR_31_0_MASK +#define HAL_TX_EXT_BUF_HI_MASK TX_MSDU_EXTENSION_7_BUF0_PTR_39_32_MASK +#define HAL_TX_EXT_BUF_LEN_MASK TX_MSDU_EXTENSION_7_BUF0_LEN_MASK +#define HAL_TX_EXT_BUF_LEN_LSB TX_MSDU_EXTENSION_7_BUF0_LEN_LSB +#define HAL_TX_EXT_BUF_WD_SIZE 2 + +#define HAL_TX_DESC_ADDRX_EN 0x1 +#define HAL_TX_DESC_ADDRY_EN 0x2 +#define HAL_TX_DESC_DEFAULT_LMAC_ID 0x3 + +enum hal_tx_ret_buf_manager { + HAL_WBM_SW0_BM_ID = 3, + HAL_WBM_SW1_BM_ID = 4, + HAL_WBM_SW2_BM_ID = 5, + HAL_WBM_SW3_BM_ID = 6, +}; + +/*--------------------------------------------------------------------------- + Structures + ---------------------------------------------------------------------------*/ +/** + * struct hal_tx_completion_status - HAL Tx completion descriptor contents + * @status: frame acked/failed + * @release_src: release source = TQM/FW + * @ack_frame_rssi: RSSI of the received ACK or BA frame + * @first_msdu: Indicates this MSDU is the first MSDU in AMSDU + * @last_msdu: Indicates this MSDU is the last MSDU in AMSDU + * @msdu_part_of_amsdu : Indicates this MSDU was part of an A-MSDU in MPDU + * @bw: Indicates the BW of the upcoming transmission - + * + * + * + * + * @pkt_type: Transmit Packet Type + * @stbc: When set, STBC transmission rate was used + * @ldpc: When set, use LDPC transmission rates + * @sgi: Legacy normal GI + * Legacy short GI + * HE related GI + * HE + * @mcs: Transmit MCS Rate + * @ofdma: Set when the transmission was an OFDMA transmission + * @tones_in_ru: The number of tones in the RU used. + * @tsf: Lower 32 bits of the TSF + * @ppdu_id: TSF, snapshot of this value when transmission of the + * PPDU containing the frame finished. + * @transmit_cnt: Number of times this frame has been transmitted + * @tid: TID of the flow or MPDU queue + * @peer_id: Peer ID of the flow or MPDU queue + */ +struct hal_tx_completion_status { + uint8_t status; + uint8_t release_src; + uint8_t ack_frame_rssi; + uint8_t first_msdu:1, + last_msdu:1, + msdu_part_of_amsdu:1; + uint32_t bw:2, + pkt_type:4, + stbc:1, + ldpc:1, + sgi:2, + mcs:4, + ofdma:1, + tones_in_ru:12, + valid:1; + uint32_t tsf; + uint32_t ppdu_id; + uint8_t transmit_cnt; + uint8_t tid; + uint16_t peer_id; +}; + +/** + * struct hal_tx_desc_comp_s - hal tx completion descriptor contents + * @desc: Transmit status information from descriptor + */ +struct hal_tx_desc_comp_s { + uint32_t desc[HAL_TX_COMPLETION_DESC_LEN_DWORDS]; +}; + +/* + * enum hal_tx_encrypt_type - Type of decrypt cipher used (valid only for RAW) + * @HAL_TX_ENCRYPT_TYPE_WEP_40: WEP 40-bit + * @HAL_TX_ENCRYPT_TYPE_WEP_10: WEP 10-bit + * @HAL_TX_ENCRYPT_TYPE_TKIP_NO_MIC: TKIP without MIC + * @HAL_TX_ENCRYPT_TYPE_WEP_128: WEP_128 + * @HAL_TX_ENCRYPT_TYPE_TKIP_WITH_MIC: TKIP_WITH_MIC + * @HAL_TX_ENCRYPT_TYPE_WAPI: WAPI + * @HAL_TX_ENCRYPT_TYPE_AES_CCMP_128: AES_CCMP_128 + * @HAL_TX_ENCRYPT_TYPE_NO_CIPHER: NO CIPHER + * @HAL_TX_ENCRYPT_TYPE_AES_CCMP_256: AES_CCMP_256 + * @HAL_TX_ENCRYPT_TYPE_AES_GCMP_128: AES_GCMP_128 + * @HAL_TX_ENCRYPT_TYPE_AES_GCMP_256: AES_GCMP_256 + * @HAL_TX_ENCRYPT_TYPE_WAPI_GCM_SM4: WAPI GCM SM4 + */ +enum hal_tx_encrypt_type { + HAL_TX_ENCRYPT_TYPE_WEP_40 = 0, + HAL_TX_ENCRYPT_TYPE_WEP_104 = 1 , + HAL_TX_ENCRYPT_TYPE_TKIP_NO_MIC = 2, + HAL_TX_ENCRYPT_TYPE_WEP_128 = 3, + HAL_TX_ENCRYPT_TYPE_TKIP_WITH_MIC = 4, + HAL_TX_ENCRYPT_TYPE_WAPI = 5, + HAL_TX_ENCRYPT_TYPE_AES_CCMP_128 = 6, + HAL_TX_ENCRYPT_TYPE_NO_CIPHER = 7, + HAL_TX_ENCRYPT_TYPE_AES_CCMP_256 = 8, + HAL_TX_ENCRYPT_TYPE_AES_GCMP_128 = 9, + HAL_TX_ENCRYPT_TYPE_AES_GCMP_256 = 10, + HAL_TX_ENCRYPT_TYPE_WAPI_GCM_SM4 = 11, +}; + +/* + * enum hal_tx_encap_type - Encapsulation type that HW will perform + * @HAL_TX_ENCAP_TYPE_RAW: Raw Packet Type + * @HAL_TX_ENCAP_TYPE_NWIFI: Native WiFi Type + * @HAL_TX_ENCAP_TYPE_ETHERNET: Ethernet + * @HAL_TX_ENCAP_TYPE_802_3: 802.3 Frame + */ +enum hal_tx_encap_type { + HAL_TX_ENCAP_TYPE_RAW = 0, + HAL_TX_ENCAP_TYPE_NWIFI = 1, + HAL_TX_ENCAP_TYPE_ETHERNET = 2, + HAL_TX_ENCAP_TYPE_802_3 = 3, +}; + +/** + * enum hal_tx_tqm_release_reason - TQM Release reason codes + * + * @HAL_TX_TQM_RR_FRAME_ACKED : ACK of BA for it was received + * @HAL_TX_TQM_RR_REM_CMD_REM : Remove cmd of type “Remove_mpdus†initiated + * by SW + * @HAL_TX_TQM_RR_REM_CMD_TX : Remove command of type Remove_transmitted_mpdus + * initiated by SW + * @HAL_TX_TQM_RR_REM_CMD_NOTX : Remove cmd of type Remove_untransmitted_mpdus + * initiated by SW + * @HAL_TX_TQM_RR_REM_CMD_AGED : Remove command of type “Remove_aged_mpdus†or + * “Remove_aged_msdus†initiated by SW + * @HAL_TX_TQM_RR_FW_REASON1 : Remove command where fw indicated that + * remove reason is fw_reason1 + * @HAL_TX_TQM_RR_FW_REASON2 : Remove command where fw indicated that + * remove reason is fw_reason2 + * @HAL_TX_TQM_RR_FW_REASON3 : Remove command where fw indicated that + * remove reason is fw_reason3 + */ +enum hal_tx_tqm_release_reason { + HAL_TX_TQM_RR_FRAME_ACKED, + HAL_TX_TQM_RR_REM_CMD_REM, + HAL_TX_TQM_RR_REM_CMD_TX, + HAL_TX_TQM_RR_REM_CMD_NOTX, + HAL_TX_TQM_RR_REM_CMD_AGED, + HAL_TX_TQM_RR_FW_REASON1, + HAL_TX_TQM_RR_FW_REASON2, + HAL_TX_TQM_RR_FW_REASON3, +}; + +/* enum - Table IDs for 2 DSCP-TID mapping Tables that TCL H/W supports + * @HAL_TX_DSCP_TID_MAP_TABLE_DEFAULT: Default DSCP-TID mapping table + * @HAL_TX_DSCP_TID_MAP_TABLE_OVERRIDE: DSCP-TID map override table + */ +enum hal_tx_dscp_tid_table_id { + HAL_TX_DSCP_TID_MAP_TABLE_DEFAULT, + HAL_TX_DSCP_TID_MAP_TABLE_OVERRIDE, +}; + +/*--------------------------------------------------------------------------- + Function declarations and documentation + ---------------------------------------------------------------------------*/ + +/*--------------------------------------------------------------------------- + TCL Descriptor accessor APIs + ---------------------------------------------------------------------------*/ +/** + * hal_tx_desc_set_buf_addr - Fill Buffer Address information in Tx Descriptor + * @desc: Handle to Tx Descriptor + * @paddr: Physical Address + * @pool_id: Return Buffer Manager ID + * @desc_id: Descriptor ID + * @type: 0 - Address points to a MSDU buffer + * 1 - Address points to MSDU extension descriptor + * + * Return: void + */ +static inline void hal_tx_desc_set_buf_addr(void *desc, + dma_addr_t paddr, uint8_t pool_id, + uint32_t desc_id, uint8_t type) +{ + /* Set buffer_addr_info.buffer_addr_31_0 */ + HAL_SET_FLD(desc, TCL_DATA_CMD_0, BUFFER_ADDR_INFO_BUF_ADDR_INFO) = + HAL_TX_SM(BUFFER_ADDR_INFO_0, BUFFER_ADDR_31_0, paddr); + + /* Set buffer_addr_info.buffer_addr_39_32 */ + HAL_SET_FLD(desc, TCL_DATA_CMD_1, + BUFFER_ADDR_INFO_BUF_ADDR_INFO) |= + HAL_TX_SM(BUFFER_ADDR_INFO_1, BUFFER_ADDR_39_32, + (((uint64_t) paddr) >> 32)); + + /* Set buffer_addr_info.return_buffer_manager = pool id */ + HAL_SET_FLD(desc, TCL_DATA_CMD_1, + BUFFER_ADDR_INFO_BUF_ADDR_INFO) |= + HAL_TX_SM(BUFFER_ADDR_INFO_1, + RETURN_BUFFER_MANAGER, (pool_id + HAL_WBM_SW0_BM_ID)); + + /* Set buffer_addr_info.sw_buffer_cookie = desc_id */ + HAL_SET_FLD(desc, TCL_DATA_CMD_1, + BUFFER_ADDR_INFO_BUF_ADDR_INFO) |= + HAL_TX_SM(BUFFER_ADDR_INFO_1, SW_BUFFER_COOKIE, desc_id); + + /* Set Buffer or Ext Descriptor Type */ + HAL_SET_FLD(desc, TCL_DATA_CMD_2, + BUF_OR_EXT_DESC_TYPE) |= + HAL_TX_SM(TCL_DATA_CMD_2, BUF_OR_EXT_DESC_TYPE, type); +} + +/** + * hal_tx_desc_set_buf_length - Set Data length in bytes in Tx Descriptor + * @desc: Handle to Tx Descriptor + * @data_length: MSDU length in case of direct descriptor. + * Length of link extension descriptor in case of Link extension + * descriptor.Includes the length of Metadata + * Return: None + */ +static inline void hal_tx_desc_set_buf_length(void *desc, + uint16_t data_length) +{ + HAL_SET_FLD(desc, TCL_DATA_CMD_3, DATA_LENGTH) |= + HAL_TX_SM(TCL_DATA_CMD_3, DATA_LENGTH, data_length); +} + +/** + * hal_tx_desc_set_buf_offset - Sets Packet Offset field in Tx descriptor + * @desc: Handle to Tx Descriptor + * @offset: Packet offset from Metadata in case of direct buffer descriptor. + * + * Return: void + */ +static inline void hal_tx_desc_set_buf_offset(void *desc, + uint8_t offset) +{ + HAL_SET_FLD(desc, TCL_DATA_CMD_3, PACKET_OFFSET) |= + HAL_TX_SM(TCL_DATA_CMD_3, PACKET_OFFSET, offset); +} + +/** + * hal_tx_desc_set_encap_type - Set encapsulation type in Tx Descriptor + * @desc: Handle to Tx Descriptor + * @encap_type: Encapsulation that HW will perform + * + * Return: void + * + */ +static inline void hal_tx_desc_set_encap_type(void *desc, + enum hal_tx_encap_type encap_type) +{ + HAL_SET_FLD(desc, TCL_DATA_CMD_2, ENCAP_TYPE) |= + HAL_TX_SM(TCL_DATA_CMD_2, ENCAP_TYPE, encap_type); +} + +/** + * hal_tx_desc_set_encrypt_type - Sets the Encrypt Type in Tx Descriptor + * @desc: Handle to Tx Descriptor + * @type: Encrypt Type + * + * Return: void + */ +static inline void hal_tx_desc_set_encrypt_type(void *desc, + enum hal_tx_encrypt_type type) +{ + HAL_SET_FLD(desc, TCL_DATA_CMD_2, ENCRYPT_TYPE) |= + HAL_TX_SM(TCL_DATA_CMD_2, ENCRYPT_TYPE, type); +} + +/** + * hal_tx_desc_set_addr_search_flags - Enable AddrX and AddrY search flags + * @desc: Handle to Tx Descriptor + * @flags: Bit 0 - AddrY search enable, Bit 1 - AddrX search enable + * + * Return: void + */ +static inline void hal_tx_desc_set_addr_search_flags(void *desc, + uint8_t flags) +{ + HAL_SET_FLD(desc, TCL_DATA_CMD_2, ADDRX_EN) |= + HAL_TX_SM(TCL_DATA_CMD_2, ADDRX_EN, (flags & 0x1)); + + HAL_SET_FLD(desc, TCL_DATA_CMD_2, ADDRY_EN) |= + HAL_TX_SM(TCL_DATA_CMD_2, ADDRY_EN, (flags >> 1)); +} + +/** + * hal_tx_desc_set_l4_checksum_en - Set TCP/IP checksum enable flags + * Tx Descriptor for MSDU_buffer type + * @desc: Handle to Tx Descriptor + * @en: UDP/TCP over ipv4/ipv6 checksum enable flags (5 bits) + * + * Return: void + */ +static inline void hal_tx_desc_set_l4_checksum_en(void *desc, + uint8_t en) +{ + HAL_SET_FLD(desc, TCL_DATA_CMD_3, IPV4_CHECKSUM_EN) |= + (HAL_TX_SM(TCL_DATA_CMD_3, UDP_OVER_IPV4_CHECKSUM_EN, en) | + HAL_TX_SM(TCL_DATA_CMD_3, UDP_OVER_IPV6_CHECKSUM_EN, en) | + HAL_TX_SM(TCL_DATA_CMD_3, TCP_OVER_IPV4_CHECKSUM_EN, en) | + HAL_TX_SM(TCL_DATA_CMD_3, TCP_OVER_IPV6_CHECKSUM_EN, en)); +} + +/** + * hal_tx_desc_set_l3_checksum_en - Set IPv4 checksum enable flag in + * Tx Descriptor for MSDU_buffer type + * @desc: Handle to Tx Descriptor + * @checksum_en_flags: ipv4 checksum enable flags + * + * Return: void + */ +static inline void hal_tx_desc_set_l3_checksum_en(void *desc, + uint8_t en) +{ + HAL_SET_FLD(desc, TCL_DATA_CMD_3, IPV4_CHECKSUM_EN) |= + HAL_TX_SM(TCL_DATA_CMD_3, IPV4_CHECKSUM_EN, en); +} + +/** + * hal_tx_desc_set_fw_metadata- Sets the metadata that is part of TCL descriptor + * @desc:Handle to Tx Descriptor + * @metadata: Metadata to be sent to Firmware + * + * Return: void + */ +static inline void hal_tx_desc_set_fw_metadata(void *desc, + uint16_t metadata) +{ + HAL_SET_FLD(desc, TCL_DATA_CMD_2, TCL_CMD_NUMBER) |= + HAL_TX_SM(TCL_DATA_CMD_2, TCL_CMD_NUMBER, metadata); +} + +/** + * hal_tx_desc_set_to_fw - Set To_FW bit in Tx Descriptor. + * @desc:Handle to Tx Descriptor + * @to_fw: if set, Forward packet to FW along with classification result + * + * Return: void + */ +static inline void hal_tx_desc_set_to_fw(void *desc, uint8_t to_fw) +{ + HAL_SET_FLD(desc, TCL_DATA_CMD_3, TO_FW) |= + HAL_TX_SM(TCL_DATA_CMD_3, TO_FW, to_fw); +} + +/** + * hal_tx_desc_set_dscp_tid_table_id - Sets DSCP to TID conversion table ID + * @desc: Handle to Tx Descriptor + * @id: DSCP to tid conversion table to be used for this frame + * + * Return: void + */ +#if !defined(QCA_WIFI_QCA6290_11AX) +static inline void hal_tx_desc_set_dscp_tid_table_id(void *desc, + uint8_t id) +{ + HAL_SET_FLD(desc, TCL_DATA_CMD_3, + DSCP_TO_TID_PRIORITY_TABLE_ID) |= + HAL_TX_SM(TCL_DATA_CMD_3, + DSCP_TO_TID_PRIORITY_TABLE_ID, id); +} +#else +static inline void hal_tx_desc_set_dscp_tid_table_id(void *desc, + uint8_t id) +{ + HAL_SET_FLD(desc, TCL_DATA_CMD_5, + DSCP_TID_TABLE_NUM) |= + HAL_TX_SM(TCL_DATA_CMD_5, + DSCP_TID_TABLE_NUM, id); +} +#endif + +/** + * hal_tx_desc_set_mesh_en - Set mesh_enable flag in Tx descriptor + * @desc: Handle to Tx Descriptor + * @en: For raw WiFi frames, this indicates transmission to a mesh STA, + * enabling the interpretation of the 'Mesh Control Present' bit + * (bit 8) of QoS Control (otherwise this bit is ignored), + * For native WiFi frames, this indicates that a 'Mesh Control' field + * is present between the header and the LLC. + * + * Return: void + */ +static inline void hal_tx_desc_set_mesh_en(void *desc, uint8_t en) +{ + HAL_SET_FLD(desc, TCL_DATA_CMD_4, MESH_ENABLE) |= + HAL_TX_SM(TCL_DATA_CMD_4, MESH_ENABLE, en); +} + +/** + * hal_tx_desc_set_hlos_tid - Set the TID value (override DSCP/PCP fields in + * frame) to be used for Tx Frame + * @desc: Handle to Tx Descriptor + * @hlos_tid: HLOS TID + * + * Return: void + */ +static inline void hal_tx_desc_set_hlos_tid(void *desc, + uint8_t hlos_tid) +{ + HAL_SET_FLD(desc, TCL_DATA_CMD_4, HLOS_TID) |= + HAL_TX_SM(TCL_DATA_CMD_4, HLOS_TID, hlos_tid); + + HAL_SET_FLD(desc, TCL_DATA_CMD_4, HLOS_TID_OVERWRITE) |= + HAL_TX_SM(TCL_DATA_CMD_4, HLOS_TID_OVERWRITE, 1); +} + +#ifdef QCA_WIFI_QCA6290_11AX +/** + * hal_tx_desc_set_lmac_id - Set the lmac_id value + * @desc: Handle to Tx Descriptor + * @lmac_id: mac Id to ast matching + * b00 – mac 0 + * b01 – mac 1 + * b10 – mac 2 + * b11 – all macs (legacy HK way) + * + * Return: void + */ +static inline void hal_tx_desc_set_lmac_id(void *desc, + uint8_t lmac_id) +{ + HAL_SET_FLD(desc, TCL_DATA_CMD_4, LMAC_ID) |= + HAL_TX_SM(TCL_DATA_CMD_4, LMAC_ID, lmac_id); +} +#else +static inline void hal_tx_desc_set_lmac_id(void *desc, + uint8_t lmac_id) +{ +} +#endif +/** + * hal_tx_desc_sync - Commit the descriptor to Hardware + * @hal_tx_des_cached: Cached descriptor that software maintains + * @hw_desc: Hardware descriptor to be updated + */ +static inline void hal_tx_desc_sync(void *hal_tx_desc_cached, + void *hw_desc) +{ + qdf_mem_copy((hw_desc + sizeof(struct tlv_32_hdr)), + hal_tx_desc_cached, 20); +} + +/*--------------------------------------------------------------------------- + Tx MSDU Extension Descriptor accessor APIs + ---------------------------------------------------------------------------*/ +/** + * hal_tx_ext_desc_set_tso_enable() - Set TSO Enable Flag + * @desc: Handle to Tx MSDU Extension Descriptor + * @tso_en: bool value set to true if TSO is enabled + * + * Return: none + */ +static inline void hal_tx_ext_desc_set_tso_enable(void *desc, + uint8_t tso_en) +{ + HAL_SET_FLD(desc, TX_MSDU_EXTENSION_0, TSO_ENABLE) |= + HAL_TX_SM(TX_MSDU_EXTENSION_0, TSO_ENABLE, tso_en); +} + +/** + * hal_tx_ext_desc_set_tso_flags() - Set TSO Flags + * @desc: Handle to Tx MSDU Extension Descriptor + * @falgs: 32-bit word with all TSO flags consolidated + * + * Return: none + */ +static inline void hal_tx_ext_desc_set_tso_flags(void *desc, + uint32_t tso_flags) +{ + HAL_SET_FLD_OFFSET(desc, TX_MSDU_EXTENSION_0, TSO_ENABLE, 0) = + tso_flags; +} + +/** + * hal_tx_ext_desc_set_tcp_flags() - Enable HW Checksum offload + * @desc: Handle to Tx MSDU Extension Descriptor + * @tcp_flags: TCP flags {NS,CWR,ECE,URG,ACK,PSH, RST ,SYN,FIN} + * @mask: TCP flag mask. Tcp_flag is inserted into the header + * based on the mask, if tso is enabled + * + * Return: none + */ +static inline void hal_tx_ext_desc_set_tcp_flags(void *desc, + uint16_t tcp_flags, + uint16_t mask) +{ + HAL_SET_FLD(desc, TX_MSDU_EXTENSION_0, TCP_FLAG) |= + ((HAL_TX_SM(TX_MSDU_EXTENSION_0, TCP_FLAG, tcp_flags)) | + (HAL_TX_SM(TX_MSDU_EXTENSION_0, TCP_FLAG_MASK, mask))); +} + +/** + * hal_tx_ext_desc_set_msdu_length() - Set L2 and IP Lengths + * @desc: Handle to Tx MSDU Extension Descriptor + * @l2_len: L2 length for the msdu, if tso is enabled + * @ip_len: IP length for the msdu, if tso is enabled + * + * Return: none + */ +static inline void hal_tx_ext_desc_set_msdu_length(void *desc, + uint16_t l2_len, + uint16_t ip_len) +{ + HAL_SET_FLD(desc, TX_MSDU_EXTENSION_1, L2_LENGTH) |= + ((HAL_TX_SM(TX_MSDU_EXTENSION_1, L2_LENGTH, l2_len)) | + (HAL_TX_SM(TX_MSDU_EXTENSION_1, IP_LENGTH, ip_len))); +} + +/** + * hal_tx_ext_desc_set_tcp_seq() - Set TCP Sequence number + * @desc: Handle to Tx MSDU Extension Descriptor + * @seq_num: Tcp_seq_number for the msdu, if tso is enabled + * + * Return: none + */ +static inline void hal_tx_ext_desc_set_tcp_seq(void *desc, + uint32_t seq_num) +{ + HAL_SET_FLD(desc, TX_MSDU_EXTENSION_2, TCP_SEQ_NUMBER) |= + ((HAL_TX_SM(TX_MSDU_EXTENSION_2, TCP_SEQ_NUMBER, seq_num))); +} + + +/** + * hal_tx_ext_desc_set_ip_id() - Set IP Identification field + * @desc: Handle to Tx MSDU Extension Descriptor + * @id: IP Id field for the msdu, if tso is enabled + * + * Return: none + */ +static inline void hal_tx_ext_desc_set_ip_id(void *desc, + uint16_t id) +{ + HAL_SET_FLD(desc, TX_MSDU_EXTENSION_3, IP_IDENTIFICATION) |= + ((HAL_TX_SM(TX_MSDU_EXTENSION_3, IP_IDENTIFICATION, id))); +} +/** + * hal_tx_ext_desc_set_buffer() - Set Buffer Pointer and Length for a fragment + * @desc: Handle to Tx MSDU Extension Descriptor + * @frag_num: Fragment number (value can be 0 to 5) + * @paddr_lo: Lower 32-bit of Buffer Physical address + * @paddr_hi: Upper 32-bit of Buffer Physical address + * @length: Buffer Length + * + * Return: none + */ +static inline void hal_tx_ext_desc_set_buffer(void *desc, + uint8_t frag_num, + uint32_t paddr_lo, + uint16_t paddr_hi, + uint16_t length) +{ + HAL_SET_FLD_OFFSET(desc, TX_MSDU_EXTENSION_6, BUF0_PTR_31_0, + (frag_num << 3)) |= + ((HAL_TX_SM(TX_MSDU_EXTENSION_6, BUF0_PTR_31_0, paddr_lo))); + + HAL_SET_FLD_OFFSET(desc, TX_MSDU_EXTENSION_7, BUF0_PTR_39_32, + (frag_num << 3)) |= + ((HAL_TX_SM(TX_MSDU_EXTENSION_7, BUF0_PTR_39_32, + (paddr_hi)))); + + HAL_SET_FLD_OFFSET(desc, TX_MSDU_EXTENSION_7, BUF0_LEN, + (frag_num << 3)) |= + ((HAL_TX_SM(TX_MSDU_EXTENSION_7, BUF0_LEN, length))); +} + +/** + * hal_tx_ext_desc_set_buffer0_param() - Set Buffer 0 Pointer and Length + * @desc: Handle to Tx MSDU Extension Descriptor + * @paddr_lo: Lower 32-bit of Buffer Physical address + * @paddr_hi: Upper 32-bit of Buffer Physical address + * @length: Buffer 0 Length + * + * Return: none + */ +static inline void hal_tx_ext_desc_set_buffer0_param(void *desc, + uint32_t paddr_lo, + uint16_t paddr_hi, + uint16_t length) +{ + HAL_SET_FLD(desc, TX_MSDU_EXTENSION_6, BUF0_PTR_31_0) |= + ((HAL_TX_SM(TX_MSDU_EXTENSION_6, BUF0_PTR_31_0, paddr_lo))); + + HAL_SET_FLD(desc, TX_MSDU_EXTENSION_7, BUF0_PTR_39_32) |= + ((HAL_TX_SM(TX_MSDU_EXTENSION_7, + BUF0_PTR_39_32, paddr_hi))); + + HAL_SET_FLD(desc, TX_MSDU_EXTENSION_7, BUF0_LEN) |= + ((HAL_TX_SM(TX_MSDU_EXTENSION_7, BUF0_LEN, length))); +} + +/** + * hal_tx_ext_desc_set_buffer1_param() - Set Buffer 1 Pointer and Length + * @desc: Handle to Tx MSDU Extension Descriptor + * @paddr_lo: Lower 32-bit of Buffer Physical address + * @paddr_hi: Upper 32-bit of Buffer Physical address + * @length: Buffer 1 Length + * + * Return: none + */ +static inline void hal_tx_ext_desc_set_buffer1_param(void *desc, + uint32_t paddr_lo, + uint16_t paddr_hi, + uint16_t length) +{ + HAL_SET_FLD(desc, TX_MSDU_EXTENSION_8, BUF1_PTR_31_0) |= + ((HAL_TX_SM(TX_MSDU_EXTENSION_8, BUF1_PTR_31_0, paddr_lo))); + + HAL_SET_FLD(desc, TX_MSDU_EXTENSION_9, BUF1_PTR_39_32) |= + ((HAL_TX_SM(TX_MSDU_EXTENSION_9, + BUF1_PTR_39_32, paddr_hi))); + + HAL_SET_FLD(desc, TX_MSDU_EXTENSION_9, BUF1_LEN) |= + ((HAL_TX_SM(TX_MSDU_EXTENSION_9, BUF1_LEN, length))); +} + +/** + * hal_tx_ext_desc_set_buffer2_param() - Set Buffer 2 Pointer and Length + * @desc: Handle to Tx MSDU Extension Descriptor + * @paddr_lo: Lower 32-bit of Buffer Physical address + * @paddr_hi: Upper 32-bit of Buffer Physical address + * @length: Buffer 2 Length + * + * Return: none + */ +static inline void hal_tx_ext_desc_set_buffer2_param(void *desc, + uint32_t paddr_lo, + uint16_t paddr_hi, + uint16_t length) +{ + HAL_SET_FLD(desc, TX_MSDU_EXTENSION_10, BUF2_PTR_31_0) |= + ((HAL_TX_SM(TX_MSDU_EXTENSION_10, BUF2_PTR_31_0, + paddr_lo))); + + HAL_SET_FLD(desc, TX_MSDU_EXTENSION_11, BUF2_PTR_39_32) |= + ((HAL_TX_SM(TX_MSDU_EXTENSION_11, BUF2_PTR_39_32, + paddr_hi))); + + HAL_SET_FLD(desc, TX_MSDU_EXTENSION_11, BUF2_LEN) |= + ((HAL_TX_SM(TX_MSDU_EXTENSION_11, BUF2_LEN, length))); +} + +/** + * hal_tx_ext_desc_sync - Commit the descriptor to Hardware + * @desc_cached: Cached descriptor that software maintains + * @hw_desc: Hardware descriptor to be updated + * + * Return: none + */ +static inline void hal_tx_ext_desc_sync(uint8_t *desc_cached, + uint8_t *hw_desc) +{ + qdf_mem_copy(&hw_desc[0], &desc_cached[0], + HAL_TX_EXT_DESC_WITH_META_DATA); +} + +/** + * hal_tx_ext_desc_get_tso_enable() - Set TSO Enable Flag + * @hal_tx_ext_desc: Handle to Tx MSDU Extension Descriptor + * + * Return: tso_enable value in the descriptor + */ +static inline uint32_t hal_tx_ext_desc_get_tso_enable(void *hal_tx_ext_desc) +{ + uint32_t *desc = (uint32_t *) hal_tx_ext_desc; + return (*desc & TX_MSDU_EXTENSION_0_TSO_ENABLE_MASK) >> + TX_MSDU_EXTENSION_0_TSO_ENABLE_LSB; +} + +/*--------------------------------------------------------------------------- + WBM Descriptor accessor APIs for Tx completions + ---------------------------------------------------------------------------*/ +/** + * hal_tx_comp_get_desc_id() - Get TX descriptor id within comp descriptor + * @hal_desc: completion ring descriptor pointer + * + * This function will tx descriptor id, cookie, within hardware completion + * descriptor + * + * Return: cookie + */ +static inline uint32_t hal_tx_comp_get_desc_id(void *hal_desc) +{ + uint32_t comp_desc = + *(uint32_t *) (((uint8_t *) hal_desc) + + BUFFER_ADDR_INFO_1_SW_BUFFER_COOKIE_OFFSET); + + /* Cookie is placed on 2nd word */ + return (comp_desc & BUFFER_ADDR_INFO_1_SW_BUFFER_COOKIE_MASK) >> + BUFFER_ADDR_INFO_1_SW_BUFFER_COOKIE_LSB; +} + +/** + * hal_tx_comp_get_paddr() - Get paddr within comp descriptor + * @hal_desc: completion ring descriptor pointer + * + * This function will get buffer physical address within hardware completion + * descriptor + * + * Return: Buffer physical address + */ +static inline qdf_dma_addr_t hal_tx_comp_get_paddr(void *hal_desc) +{ + uint32_t paddr_lo; + uint32_t paddr_hi; + + paddr_lo = *(uint32_t *) (((uint8_t *) hal_desc) + + BUFFER_ADDR_INFO_0_BUFFER_ADDR_31_0_OFFSET); + + paddr_hi = *(uint32_t *) (((uint8_t *) hal_desc) + + BUFFER_ADDR_INFO_1_BUFFER_ADDR_39_32_OFFSET); + + paddr_hi = (paddr_hi & BUFFER_ADDR_INFO_1_BUFFER_ADDR_39_32_MASK) >> + BUFFER_ADDR_INFO_1_BUFFER_ADDR_39_32_LSB; + + return (qdf_dma_addr_t) (paddr_lo | (((uint64_t) paddr_hi) << 32)); +} + +/** + * hal_tx_comp_get_buffer_source() - Get buffer release source value + * @hal_desc: completion ring descriptor pointer + * + * This function will get buffer release source from Tx completion descriptor + * + * Return: buffer release source + */ +static inline uint32_t hal_tx_comp_get_buffer_source(void *hal_desc) +{ + uint32_t comp_desc = + *(uint32_t *) (((uint8_t *) hal_desc) + + WBM_RELEASE_RING_2_RELEASE_SOURCE_MODULE_OFFSET); + + return (comp_desc & WBM_RELEASE_RING_2_RELEASE_SOURCE_MODULE_MASK) >> + WBM_RELEASE_RING_2_RELEASE_SOURCE_MODULE_LSB; +} + +/** + * hal_tx_comp_get_buffer_type() - Buffer or Descriptor type + * @hal_desc: completion ring descriptor pointer + * + * This function will return the type of pointer - buffer or descriptor + * + * Return: buffer type + */ +static inline uint32_t hal_tx_comp_get_buffer_type(void *hal_desc) +{ + uint32_t comp_desc = + *(uint32_t *) (((uint8_t *) hal_desc) + + WBM_RELEASE_RING_2_BUFFER_OR_DESC_TYPE_OFFSET); + + return (comp_desc & WBM_RELEASE_RING_2_BUFFER_OR_DESC_TYPE_MASK) >> + WBM_RELEASE_RING_2_BUFFER_OR_DESC_TYPE_LSB; +} + +/** + * hal_tx_comp_get_release_reason() - TQM Release reason + * @hal_desc: completion ring descriptor pointer + * + * This function will return the type of pointer - buffer or descriptor + * + * Return: buffer type + */ +static inline uint8_t hal_tx_comp_get_release_reason(void *hal_desc) +{ + uint32_t comp_desc = + *(uint32_t *) (((uint8_t *) hal_desc) + + WBM_RELEASE_RING_2_TQM_RELEASE_REASON_OFFSET); + + return (comp_desc & WBM_RELEASE_RING_2_TQM_RELEASE_REASON_MASK) >> + WBM_RELEASE_RING_2_TQM_RELEASE_REASON_LSB; +} + +/** + * hal_tx_comp_get_status() - TQM Release reason + * @hal_desc: completion ring Tx status + * + * This function will parse the WBM completion descriptor and populate in + * HAL structure + * + * Return: none + */ +#if defined(WCSS_VERSION) && \ + ((defined(CONFIG_WIN) && (WCSS_VERSION > 81)) || \ + (defined(CONFIG_MCL) && (WCSS_VERSION >= 72))) +static inline void hal_tx_comp_get_status(void *desc, + struct hal_tx_completion_status *ts) +{ + uint8_t rate_stats_valid = 0; + uint32_t rate_stats = 0; + + ts->ppdu_id = HAL_TX_DESC_GET(desc, WBM_RELEASE_RING_3, + TQM_STATUS_NUMBER); + ts->ack_frame_rssi = HAL_TX_DESC_GET(desc, WBM_RELEASE_RING_4, + ACK_FRAME_RSSI); + ts->first_msdu = HAL_TX_DESC_GET(desc, WBM_RELEASE_RING_4, FIRST_MSDU); + ts->last_msdu = HAL_TX_DESC_GET(desc, WBM_RELEASE_RING_4, LAST_MSDU); + ts->msdu_part_of_amsdu = HAL_TX_DESC_GET(desc, WBM_RELEASE_RING_4, + MSDU_PART_OF_AMSDU); + + ts->peer_id = HAL_TX_DESC_GET(desc, WBM_RELEASE_RING_7, SW_PEER_ID); + ts->tid = HAL_TX_DESC_GET(desc, WBM_RELEASE_RING_7, TID); + ts->transmit_cnt = HAL_TX_DESC_GET(desc, WBM_RELEASE_RING_3, + TRANSMIT_COUNT); + + rate_stats = HAL_TX_DESC_GET(desc, WBM_RELEASE_RING_5, + TX_RATE_STATS); + + rate_stats_valid = HAL_TX_MS(TX_RATE_STATS_INFO_0, + TX_RATE_STATS_INFO_VALID, rate_stats); + + ts->valid = rate_stats_valid; + + if (rate_stats_valid) { + ts->bw = HAL_TX_MS(TX_RATE_STATS_INFO_0, TRANSMIT_BW, + rate_stats); + ts->pkt_type = HAL_TX_MS(TX_RATE_STATS_INFO_0, + TRANSMIT_PKT_TYPE, rate_stats); + ts->stbc = HAL_TX_MS(TX_RATE_STATS_INFO_0, + TRANSMIT_STBC, rate_stats); + ts->ldpc = HAL_TX_MS(TX_RATE_STATS_INFO_0, TRANSMIT_LDPC, + rate_stats); + ts->sgi = HAL_TX_MS(TX_RATE_STATS_INFO_0, TRANSMIT_SGI, + rate_stats); + ts->mcs = HAL_TX_MS(TX_RATE_STATS_INFO_0, TRANSMIT_MCS, + rate_stats); + ts->ofdma = HAL_TX_MS(TX_RATE_STATS_INFO_0, OFDMA_TRANSMISSION, + rate_stats); + ts->tones_in_ru = HAL_TX_MS(TX_RATE_STATS_INFO_0, TONES_IN_RU, + rate_stats); + } + + ts->release_src = hal_tx_comp_get_buffer_source(desc); + ts->status = hal_tx_comp_get_release_reason(desc); + + ts->tsf = HAL_TX_DESC_GET(desc, WBM_RELEASE_RING_6, + TX_RATE_STATS_INFO_TX_RATE_STATS); +} +#else +static inline void hal_tx_comp_get_status(void *desc, + struct hal_tx_completion_status *ts) +{ + + ts->ppdu_id = HAL_TX_DESC_GET(desc, WBM_RELEASE_RING_3, + TQM_STATUS_NUMBER); + ts->ack_frame_rssi = HAL_TX_DESC_GET(desc, WBM_RELEASE_RING_4, + ACK_FRAME_RSSI); + ts->first_msdu = HAL_TX_DESC_GET(desc, WBM_RELEASE_RING_4, FIRST_MSDU); + ts->last_msdu = HAL_TX_DESC_GET(desc, WBM_RELEASE_RING_4, LAST_MSDU); + ts->msdu_part_of_amsdu = HAL_TX_DESC_GET(desc, WBM_RELEASE_RING_4, + MSDU_PART_OF_AMSDU); + + ts->release_src = hal_tx_comp_get_buffer_source(desc); + ts->status = hal_tx_comp_get_release_reason(desc); +} +#endif + +/** + * hal_tx_comp_desc_sync() - collect hardware descriptor contents + * @hal_desc: hardware descriptor pointer + * @comp: software descriptor pointer + * @read_status: 0 - Do not read status words from descriptors + * 1 - Enable reading of status words from descriptor + * + * This function will collect hardware release ring element contents and + * translate to software descriptor content + * + * Return: none + */ + +static inline void hal_tx_comp_desc_sync(void *hw_desc, + struct hal_tx_desc_comp_s *comp, + bool read_status) +{ + if (!read_status) + qdf_mem_copy(comp, hw_desc, HAL_TX_COMPLETION_DESC_BASE_LEN); + else + qdf_mem_copy(comp, hw_desc, HAL_TX_COMPLETION_DESC_LEN_BYTES); +} + +/** + * hal_tx_comp_get_htt_desc() - Read the HTT portion of WBM Descriptor + * @hal_desc: Hardware (WBM) descriptor pointer + * @htt_desc: Software HTT descriptor pointer + * + * This function will read the HTT structure overlaid on WBM descriptor + * into a cached software descriptor + * + */ +static inline void hal_tx_comp_get_htt_desc(void *hw_desc, uint8_t *htt_desc) +{ + uint8_t *desc = hw_desc + HAL_TX_COMP_HTT_STATUS_OFFSET; + + qdf_mem_copy(htt_desc, desc, HAL_TX_COMP_HTT_STATUS_LEN); +} + +#if !defined(QCA_WIFI_QCA6290_11AX) +/** + * hal_tx_set_dscp_tid_map_default() - Configure default DSCP to TID map table + * @soc: HAL SoC context + * @map: DSCP-TID mapping table + * @id: mapping table ID - 0,1 + * + * DSCP are mapped to 8 TID values using TID values programmed + * in two set of mapping registers DSCP_TID1_MAP_<0 to 6> (id = 0) + * and DSCP_TID2_MAP_<0 to 6> (id = 1) + * Each mapping register has TID mapping for 10 DSCP values + * + * Return: none + */ +static inline void hal_tx_set_dscp_tid_map(void *hal_soc, uint8_t *map, + uint8_t id) +{ + int i; + uint32_t addr; + uint32_t value; + + struct hal_soc *soc = (struct hal_soc *)hal_soc; + + if (id == HAL_TX_DSCP_TID_MAP_TABLE_DEFAULT) { + addr = + HWIO_TCL_R0_DSCP_TID1_MAP_0_ADDR( + SEQ_WCSS_UMAC_MAC_TCL_REG_OFFSET); + } else { + addr = + HWIO_TCL_R0_DSCP_TID2_MAP_0_ADDR( + SEQ_WCSS_UMAC_MAC_TCL_REG_OFFSET); + } + + for (i = 0; i < 64; i += 10) { + value = (map[i] | + (map[i+1] << HWIO_TCL_R0_DSCP_TID1_MAP_0_DSCP_1_SHFT) | + (map[i+2] << HWIO_TCL_R0_DSCP_TID1_MAP_0_DSCP_2_SHFT) | + (map[i+3] << HWIO_TCL_R0_DSCP_TID1_MAP_0_DSCP_3_SHFT) | + (map[i+4] << HWIO_TCL_R0_DSCP_TID1_MAP_0_DSCP_4_SHFT) | + (map[i+5] << HWIO_TCL_R0_DSCP_TID1_MAP_0_DSCP_5_SHFT) | + (map[i+6] << HWIO_TCL_R0_DSCP_TID1_MAP_0_DSCP_6_SHFT) | + (map[i+7] << HWIO_TCL_R0_DSCP_TID1_MAP_0_DSCP_7_SHFT) | + (map[i+8] << HWIO_TCL_R0_DSCP_TID1_MAP_0_DSCP_8_SHFT) | + (map[i+9] << HWIO_TCL_R0_DSCP_TID1_MAP_0_DSCP_9_SHFT)); + + HAL_REG_WRITE(soc, addr, + (value & HWIO_TCL_R0_DSCP_TID1_MAP_1_RMSK)); + + addr += 4; + } +} + +/** + * hal_tx_update_dscp_tid() - Update the dscp tid map table as updated by user + * @soc: HAL SoC context + * @map: DSCP-TID mapping table + * @id : MAP ID + * @dscp: DSCP_TID map index + * + * Return: void + */ +static inline void hal_tx_update_dscp_tid(void *hal_soc, uint8_t tid, + uint8_t id, uint8_t dscp) +{ + int index; + uint32_t addr; + uint32_t value; + uint32_t regval; + + struct hal_soc *soc = (struct hal_soc *)hal_soc; + + if (id == HAL_TX_DSCP_TID_MAP_TABLE_DEFAULT) + addr = + HWIO_TCL_R0_DSCP_TID1_MAP_0_ADDR( + SEQ_WCSS_UMAC_MAC_TCL_REG_OFFSET); + else + addr = + HWIO_TCL_R0_DSCP_TID2_MAP_0_ADDR( + SEQ_WCSS_UMAC_MAC_TCL_REG_OFFSET); + + index = dscp % HAL_TX_NUM_DSCP_PER_REGISTER; + addr += 4 * (dscp/HAL_TX_NUM_DSCP_PER_REGISTER); + value = tid << (HAL_TX_BITS_PER_TID * index); + + /* Read back previous DSCP TID config and update + * with new config. + */ + regval = HAL_REG_READ(soc, addr); + regval &= ~(HAL_TX_TID_BITS_MASK << (HAL_TX_BITS_PER_TID * index)); + regval |= value; + + HAL_REG_WRITE(soc, addr, + (regval & HWIO_TCL_R0_DSCP_TID1_MAP_1_RMSK)); +} +#else + +#define DSCP_TID_TABLE_SIZE 24 +#define NUM_WORDS_PER_DSCP_TID_TABLE (DSCP_TID_TABLE_SIZE/4) + +/** + * hal_tx_set_dscp_tid_map_default() - Configure default DSCP to TID map table + * @soc: HAL SoC context + * @map: DSCP-TID mapping table + * @id: mapping table ID - 0-31 + * + * DSCP are mapped to 8 TID values using TID values programmed + * in any of the 32 DSCP_TID_MAPS (id = 0-31). + * + * Return: none + */ +static inline void hal_tx_set_dscp_tid_map(void *hal_soc, uint8_t *map, + uint8_t id) +{ + int i; + uint32_t addr, cmn_reg_addr; + uint32_t value = 0, regval; + uint8_t val[DSCP_TID_TABLE_SIZE], cnt = 0; + + struct hal_soc *soc = (struct hal_soc *)hal_soc; + + if (id >= HAL_MAX_HW_DSCP_TID_MAPS_11AX) { + return; + } + + cmn_reg_addr = HWIO_TCL_R0_CONS_RING_CMN_CTRL_REG_ADDR( + SEQ_WCSS_UMAC_MAC_TCL_REG_OFFSET); + + addr = HWIO_TCL_R0_DSCP_TID_MAP_n_ADDR( + SEQ_WCSS_UMAC_MAC_TCL_REG_OFFSET, + id * NUM_WORDS_PER_DSCP_TID_TABLE); + + /* Enable read/write access */ + regval = HAL_REG_READ(soc, cmn_reg_addr); + regval |= + (1 << HWIO_TCL_R0_CONS_RING_CMN_CTRL_REG_DSCP_TID_MAP_PROGRAM_EN_SHFT); + + HAL_REG_WRITE(soc, cmn_reg_addr, regval); + + /* Write 8 (24 bits) DSCP-TID mappings in each interation */ + for (i = 0; i < 64; i += 8) { + value = (map[i] | + (map[i+1] << 0x3) | + (map[i+2] << 0x6) | + (map[i+3] << 0x9) | + (map[i+4] << 0xc) | + (map[i+5] << 0xf) | + (map[i+6] << 0x12) | + (map[i+7] << 0x15)); + + qdf_mem_copy(&val[cnt], (void *)&value, 3); + cnt += 3; + } + + for (i = 0; i < DSCP_TID_TABLE_SIZE; i += 4) { + regval = *(uint32_t *)(val + i); + HAL_REG_WRITE(soc, addr, + (regval & HWIO_TCL_R0_DSCP_TID_MAP_n_RMSK)); + addr += 4; + } + + /* Diasble read/write access */ + regval = HAL_REG_READ(soc, cmn_reg_addr); + regval &= + ~(HWIO_TCL_R0_CONS_RING_CMN_CTRL_REG_DSCP_TID_MAP_PROGRAM_EN_BMSK); + + HAL_REG_WRITE(soc, cmn_reg_addr, regval); +} + +static inline void hal_tx_update_dscp_tid(void *hal_soc, uint8_t tid, + uint8_t id, uint8_t dscp) +{ + int index; + uint32_t addr; + uint32_t value; + uint32_t regval; + + struct hal_soc *soc = (struct hal_soc *)hal_soc; + addr = HWIO_TCL_R0_DSCP_TID_MAP_n_ADDR( + SEQ_WCSS_UMAC_MAC_TCL_REG_OFFSET, id); + + index = dscp % HAL_TX_NUM_DSCP_PER_REGISTER; + addr += 4 * (dscp/HAL_TX_NUM_DSCP_PER_REGISTER); + value = tid << (HAL_TX_BITS_PER_TID * index); + + regval = HAL_REG_READ(soc, addr); + regval &= ~(HAL_TX_TID_BITS_MASK << (HAL_TX_BITS_PER_TID * index)); + regval |= value; + + HAL_REG_WRITE(soc, addr, + (regval & HWIO_TCL_R0_DSCP_TID_MAP_n_RMSK)); +} +#endif + +/** + * hal_tx_init_data_ring() - Initialize all the TCL Descriptors in SRNG + * @hal_soc: Handle to HAL SoC structure + * @hal_srng: Handle to HAL SRNG structure + * + * Return: none + */ +static inline void hal_tx_init_data_ring(void *hal_soc, void *hal_srng) +{ + uint8_t *desc_addr; + struct hal_srng_params srng_params; + uint32_t desc_size; + uint32_t num_desc; + + hal_get_srng_params(hal_soc, hal_srng, &srng_params); + + desc_addr = (uint8_t *) srng_params.ring_base_vaddr; + desc_size = sizeof(struct tcl_data_cmd); + num_desc = srng_params.num_entries; + + while (num_desc) { + HAL_TX_DESC_SET_TLV_HDR(desc_addr, HAL_TX_TCL_DATA_TAG, + desc_size); + desc_addr += (desc_size + sizeof(struct tlv_32_hdr)); + num_desc--; + } +} +#endif /* HAL_TX_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/hal_wbm.c b/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/hal_wbm.c new file mode 100644 index 0000000000000000000000000000000000000000..a25fb8ce9a5c3a45213f3612cf9b80ef1e48b7ce --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hal/wifi3.0/hal_wbm.c @@ -0,0 +1,158 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "hal_api.h" +#include "qdf_module.h" + +/** + * hal_setup_link_idle_list - Setup scattered idle list using the + * buffer list provided + * + * @hal_soc: Opaque HAL SOC handle + * @scatter_bufs_base_paddr: Array of physical base addresses + * @scatter_bufs_base_vaddr: Array of virtual base addresses + * @num_scatter_bufs: Number of scatter buffers in the above lists + * @scatter_buf_size: Size of each scatter buffer + * @last_buf_end_offset: Offset to the last entry + * @num_entries: Total entries of all scatter bufs + * + */ +void hal_setup_link_idle_list(void *hal_soc, + qdf_dma_addr_t scatter_bufs_base_paddr[], + void *scatter_bufs_base_vaddr[], uint32_t num_scatter_bufs, + uint32_t scatter_buf_size, uint32_t last_buf_end_offset, + uint32_t num_entries) +{ + int i; + uint32_t *prev_buf_link_ptr = NULL; + struct hal_soc *soc = (struct hal_soc *)hal_soc; + uint32_t reg_scatter_buf_size, reg_tot_scatter_buf_size; + + /* Link the scatter buffers */ + for (i = 0; i < num_scatter_bufs; i++) { + if (i > 0) { + prev_buf_link_ptr[0] = + scatter_bufs_base_paddr[i] & 0xffffffff; + prev_buf_link_ptr[1] = HAL_SM( + HWIO_WBM_R0_SCATTERED_LINK_DESC_LIST_BASE_MSB, + BASE_ADDRESS_39_32, + ((uint64_t)(scatter_bufs_base_paddr[i]) + >> 32)) | HAL_SM( + HWIO_WBM_R0_SCATTERED_LINK_DESC_LIST_BASE_MSB, + ADDRESS_MATCH_TAG, + ADDRESS_MATCH_TAG_VAL); + } + prev_buf_link_ptr = (uint32_t *)(scatter_bufs_base_vaddr[i] + + scatter_buf_size - WBM_IDLE_SCATTER_BUF_NEXT_PTR_SIZE); + } + + /* TBD: Register programming partly based on MLD & the rest based on + * inputs from HW team. Not complete yet. + */ + + reg_scatter_buf_size = (scatter_buf_size - + WBM_IDLE_SCATTER_BUF_NEXT_PTR_SIZE)/64; + reg_tot_scatter_buf_size = ((scatter_buf_size - + WBM_IDLE_SCATTER_BUF_NEXT_PTR_SIZE) * num_scatter_bufs)/64; + + HAL_REG_WRITE(soc, + HWIO_WBM_R0_IDLE_LIST_CONTROL_ADDR( + SEQ_WCSS_UMAC_WBM_REG_OFFSET), + HAL_SM(HWIO_WBM_R0_IDLE_LIST_CONTROL, SCATTER_BUFFER_SIZE, + reg_scatter_buf_size) | + HAL_SM(HWIO_WBM_R0_IDLE_LIST_CONTROL, LINK_DESC_IDLE_LIST_MODE, + 0x1)); + + HAL_REG_WRITE(soc, + HWIO_WBM_R0_IDLE_LIST_SIZE_ADDR( + SEQ_WCSS_UMAC_WBM_REG_OFFSET), + HAL_SM(HWIO_WBM_R0_IDLE_LIST_SIZE, + SCATTER_RING_SIZE_OF_IDLE_LINK_DESC_LIST, + reg_tot_scatter_buf_size)); + + HAL_REG_WRITE(soc, + HWIO_WBM_R0_SCATTERED_LINK_DESC_LIST_BASE_LSB_ADDR( + SEQ_WCSS_UMAC_WBM_REG_OFFSET), + scatter_bufs_base_paddr[0] & 0xffffffff); + + HAL_REG_WRITE(soc, + HWIO_WBM_R0_SCATTERED_LINK_DESC_LIST_BASE_MSB_ADDR( + SEQ_WCSS_UMAC_WBM_REG_OFFSET), + ((uint64_t)(scatter_bufs_base_paddr[0]) >> 32) & + HWIO_WBM_R0_SCATTERED_LINK_DESC_LIST_BASE_MSB_BASE_ADDRESS_39_32_BMSK); + + HAL_REG_WRITE(soc, + HWIO_WBM_R0_SCATTERED_LINK_DESC_LIST_BASE_MSB_ADDR( + SEQ_WCSS_UMAC_WBM_REG_OFFSET), + HAL_SM(HWIO_WBM_R0_SCATTERED_LINK_DESC_LIST_BASE_MSB, + BASE_ADDRESS_39_32, ((uint64_t)(scatter_bufs_base_paddr[0]) + >> 32)) | + HAL_SM(HWIO_WBM_R0_SCATTERED_LINK_DESC_LIST_BASE_MSB, + ADDRESS_MATCH_TAG, ADDRESS_MATCH_TAG_VAL)); + + /* ADDRESS_MATCH_TAG field in the above register is expected to match + * with the upper bits of link pointer. The above write sets this field + * to zero and we are also setting the upper bits of link pointers to + * zero while setting up the link list of scatter buffers above + */ + + /* Setup head and tail pointers for the idle list */ + HAL_REG_WRITE(soc, + HWIO_WBM_R0_SCATTERED_LINK_DESC_PTR_HEAD_INFO_IX0_ADDR( + SEQ_WCSS_UMAC_WBM_REG_OFFSET), + scatter_bufs_base_paddr[num_scatter_bufs-1] & 0xffffffff); + HAL_REG_WRITE(soc, + HWIO_WBM_R0_SCATTERED_LINK_DESC_PTR_HEAD_INFO_IX1_ADDR( + SEQ_WCSS_UMAC_WBM_REG_OFFSET), + HAL_SM(HWIO_WBM_R0_SCATTERED_LINK_DESC_PTR_HEAD_INFO_IX1, + BUFFER_ADDRESS_39_32, + ((uint64_t)(scatter_bufs_base_paddr[num_scatter_bufs-1]) + >> 32)) | + HAL_SM(HWIO_WBM_R0_SCATTERED_LINK_DESC_PTR_HEAD_INFO_IX1, + HEAD_POINTER_OFFSET, last_buf_end_offset >> 2)); + + HAL_REG_WRITE(soc, + HWIO_WBM_R0_SCATTERED_LINK_DESC_PTR_HEAD_INFO_IX0_ADDR( + SEQ_WCSS_UMAC_WBM_REG_OFFSET), + scatter_bufs_base_paddr[0] & 0xffffffff); + + HAL_REG_WRITE(soc, + HWIO_WBM_R0_SCATTERED_LINK_DESC_PTR_TAIL_INFO_IX0_ADDR( + SEQ_WCSS_UMAC_WBM_REG_OFFSET), + scatter_bufs_base_paddr[0] & 0xffffffff); + HAL_REG_WRITE(soc, + HWIO_WBM_R0_SCATTERED_LINK_DESC_PTR_TAIL_INFO_IX1_ADDR( + SEQ_WCSS_UMAC_WBM_REG_OFFSET), + HAL_SM(HWIO_WBM_R0_SCATTERED_LINK_DESC_PTR_TAIL_INFO_IX1, + BUFFER_ADDRESS_39_32, + ((uint64_t)(scatter_bufs_base_paddr[0]) >> + 32)) | HAL_SM(HWIO_WBM_R0_SCATTERED_LINK_DESC_PTR_TAIL_INFO_IX1, + TAIL_POINTER_OFFSET, 0)); + + HAL_REG_WRITE(soc, + HWIO_WBM_R0_SCATTERED_LINK_DESC_PTR_HP_ADDR( + SEQ_WCSS_UMAC_WBM_REG_OFFSET), + 2*num_entries); + + /* Enable the SRNG */ + HAL_REG_WRITE(soc, + HWIO_WBM_R0_WBM_IDLE_LINK_RING_MISC_ADDR( + SEQ_WCSS_UMAC_WBM_REG_OFFSET), + 0x40); +} +qdf_export_symbol(hal_setup_link_idle_list); diff --git a/drivers/staging/qca-wifi-host-cmn/hif/inc/hif.h b/drivers/staging/qca-wifi-host-cmn/hif/inc/hif.h new file mode 100644 index 0000000000000000000000000000000000000000..f561c54122e90c3799e3a27f1836f3e792897acd --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/inc/hif.h @@ -0,0 +1,982 @@ +/* + * Copyright (c) 2013-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _HIF_H_ +#define _HIF_H_ + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +/* Header files */ +#include +#include "qdf_nbuf.h" +#include "qdf_lro.h" +#include "ol_if_athvar.h" +#include +#ifdef HIF_PCI +#include +#endif /* HIF_PCI */ +#ifdef HIF_USB +#include +#endif /* HIF_USB */ +#ifdef IPA_OFFLOAD +#include +#endif +#define ENABLE_MBOX_DUMMY_SPACE_FEATURE 1 + +typedef void __iomem *A_target_id_t; +typedef void *hif_handle_t; + +#define HIF_TYPE_AR6002 2 +#define HIF_TYPE_AR6003 3 +#define HIF_TYPE_AR6004 5 +#define HIF_TYPE_AR9888 6 +#define HIF_TYPE_AR6320 7 +#define HIF_TYPE_AR6320V2 8 +/* For attaching Peregrine 2.0 board host_reg_tbl only */ +#define HIF_TYPE_AR9888V2 9 +#define HIF_TYPE_ADRASTEA 10 +#define HIF_TYPE_AR900B 11 +#define HIF_TYPE_QCA9984 12 +#define HIF_TYPE_IPQ4019 13 +#define HIF_TYPE_QCA9888 14 +#define HIF_TYPE_QCA8074 15 +#define HIF_TYPE_QCA6290 16 + +#ifdef IPA_OFFLOAD +#define DMA_COHERENT_MASK_IPA_VER_3_AND_ABOVE 37 +#define DMA_COHERENT_MASK_BELOW_IPA_VER_3 32 +#endif + +/* enum hif_ic_irq - enum defining integrated chip irq numbers + * defining irq nubers that can be used by external modules like datapath + */ +enum hif_ic_irq { + host2wbm_desc_feed = 18, + host2reo_re_injection, + host2reo_command, + host2rxdma_monitor_ring3, + host2rxdma_monitor_ring2, + host2rxdma_monitor_ring1, + reo2host_exception, + wbm2host_rx_release, + reo2host_status, + reo2host_destination_ring4, + reo2host_destination_ring3, + reo2host_destination_ring2, + reo2host_destination_ring1, + rxdma2host_monitor_destination_mac3, + rxdma2host_monitor_destination_mac2, + rxdma2host_monitor_destination_mac1, + ppdu_end_interrupts_mac3, + ppdu_end_interrupts_mac2, + ppdu_end_interrupts_mac1, + rxdma2host_monitor_status_ring_mac3, + rxdma2host_monitor_status_ring_mac2, + rxdma2host_monitor_status_ring_mac1, + host2rxdma_host_buf_ring_mac3, + host2rxdma_host_buf_ring_mac2, + host2rxdma_host_buf_ring_mac1, + rxdma2host_destination_ring_mac3, + rxdma2host_destination_ring_mac2, + rxdma2host_destination_ring_mac1, + host2tcl_input_ring4, + host2tcl_input_ring3, + host2tcl_input_ring2, + host2tcl_input_ring1, + wbm2host_tx_completions_ring3, + wbm2host_tx_completions_ring2, + wbm2host_tx_completions_ring1, + tcl2host_status_ring, +}; + +struct CE_state; +#define CE_COUNT_MAX 12 +#define HIF_MAX_GRP_IRQ 16 +#define HIF_MAX_GROUP 8 + +#ifdef CONFIG_SLUB_DEBUG_ON +#ifndef CONFIG_WIN +#define HIF_CONFIG_SLUB_DEBUG_ON +#endif +#endif + +#ifndef NAPI_YIELD_BUDGET_BASED +#ifdef HIF_CONFIG_SLUB_DEBUG_ON +#define QCA_NAPI_DEF_SCALE_BIN_SHIFT 1 +#else /* PERF build */ +#ifdef CONFIG_WIN +#define QCA_NAPI_DEF_SCALE_BIN_SHIFT 1 +#else +#define QCA_NAPI_DEF_SCALE_BIN_SHIFT 4 +#endif /* CONFIG_WIN */ +#endif /* SLUB_DEBUG_ON */ +#else /* NAPI_YIELD_BUDGET_BASED */ +#define QCA_NAPI_DEF_SCALE_BIN_SHIFT 2 +#endif /* NAPI_YIELD_BUDGET_BASED */ +#define QCA_NAPI_BUDGET 64 +#define QCA_NAPI_DEF_SCALE \ + (1 << QCA_NAPI_DEF_SCALE_BIN_SHIFT) + +#define HIF_NAPI_MAX_RECEIVES (QCA_NAPI_BUDGET * QCA_NAPI_DEF_SCALE) +/* NOTE: "napi->scale" can be changed, + * but this does not change the number of buckets + */ +#define QCA_NAPI_NUM_BUCKETS 4 +/** + * qca_napi_stat - stats structure for execution contexts + * @napi_schedules - number of times the schedule function is called + * @napi_polls - number of times the execution context runs + * @napi_completes - number of times that the generating interrupt is reenabled + * @napi_workdone - cumulative of all work done reported by handler + * @cpu_corrected - incremented when execution context runs on a different core + * than the one that its irq is affined to. + * @napi_budget_uses - histogram of work done per execution run + * @time_limit_reache - count of yields due to time limit threshholds + * @rxpkt_thresh_reached - count of yields due to a work limit + * + * needs to be renamed + */ +struct qca_napi_stat { + uint32_t napi_schedules; + uint32_t napi_polls; + uint32_t napi_completes; + uint32_t napi_workdone; + uint32_t cpu_corrected; + uint32_t napi_budget_uses[QCA_NAPI_NUM_BUCKETS]; + uint32_t time_limit_reached; + uint32_t rxpkt_thresh_reached; + unsigned long long napi_max_poll_time; +}; + + +/** + * per NAPI instance data structure + * This data structure holds stuff per NAPI instance. + * Note that, in the current implementation, though scale is + * an instance variable, it is set to the same value for all + * instances. + */ +struct qca_napi_info { + struct net_device netdev; /* dummy net_dev */ + void *hif_ctx; + struct napi_struct napi; + uint8_t scale; /* currently same on all instances */ + uint8_t id; + uint8_t cpu; + int irq; + cpumask_t cpumask; + struct qca_napi_stat stats[NR_CPUS]; +#ifdef RECEIVE_OFFLOAD + /* will only be present for data rx CE's */ + void (*offld_flush_cb)(void *); + struct napi_struct rx_thread_napi; + struct net_device rx_thread_netdev; +#endif /* RECEIVE_OFFLOAD */ + qdf_lro_ctx_t lro_ctx; +}; + +enum qca_napi_tput_state { + QCA_NAPI_TPUT_UNINITIALIZED, + QCA_NAPI_TPUT_LO, + QCA_NAPI_TPUT_HI +}; +enum qca_napi_cpu_state { + QCA_NAPI_CPU_UNINITIALIZED, + QCA_NAPI_CPU_DOWN, + QCA_NAPI_CPU_UP }; + +/** + * struct qca_napi_cpu - an entry of the napi cpu table + * @core_id: physical core id of the core + * @cluster_id: cluster this core belongs to + * @core_mask: mask to match all core of this cluster + * @thread_mask: mask for this core within the cluster + * @max_freq: maximum clock this core can be clocked at + * same for all cpus of the same core. + * @napis: bitmap of napi instances on this core + * @execs: bitmap of execution contexts on this core + * cluster_nxt: chain to link cores within the same cluster + * + * This structure represents a single entry in the napi cpu + * table. The table is part of struct qca_napi_data. + * This table is initialized by the init function, called while + * the first napi instance is being created, updated by hotplug + * notifier and when cpu affinity decisions are made (by throughput + * detection), and deleted when the last napi instance is removed. + */ +struct qca_napi_cpu { + enum qca_napi_cpu_state state; + int core_id; + int cluster_id; + cpumask_t core_mask; + cpumask_t thread_mask; + unsigned int max_freq; + uint32_t napis; + uint32_t execs; + int cluster_nxt; /* index, not pointer */ +}; + +/** + * struct qca_napi_data - collection of napi data for a single hif context + * @hif_softc: pointer to the hif context + * @lock: spinlock used in the event state machine + * @state: state variable used in the napi stat machine + * @ce_map: bit map indicating which ce's have napis running + * @exec_map: bit map of instanciated exec contexts + * @user_cpu_affin_map: CPU affinity map from INI config. + * @napi_cpu: cpu info for irq affinty + * @lilcl_head: + * @bigcl_head: + * @napi_mode: irq affinity & clock voting mode + * @cpuhp_handler: CPU hotplug event registration handle + */ +struct qca_napi_data { + struct hif_softc *hif_softc; + qdf_spinlock_t lock; + uint32_t state; + + /* bitmap of created/registered NAPI instances, indexed by pipe_id, + * not used by clients (clients use an id returned by create) + */ + uint32_t ce_map; + uint32_t exec_map; + uint32_t user_cpu_affin_mask; + struct qca_napi_info *napis[CE_COUNT_MAX]; + struct qca_napi_cpu napi_cpu[NR_CPUS]; + int lilcl_head, bigcl_head; + enum qca_napi_tput_state napi_mode; + struct qdf_cpuhp_handler *cpuhp_handler; + uint8_t flags; +}; + +/** + * struct hif_config_info - Place Holder for hif confiruation + * @enable_self_recovery: Self Recovery + * + * Structure for holding hif ini parameters. + */ +struct hif_config_info { + bool enable_self_recovery; +#ifdef FEATURE_RUNTIME_PM + bool enable_runtime_pm; + u_int32_t runtime_pm_delay; +#endif +}; + +/** + * struct hif_target_info - Target Information + * @target_version: Target Version + * @target_type: Target Type + * @target_revision: Target Revision + * @soc_version: SOC Version + * + * Structure to hold target information. + */ +struct hif_target_info { + uint32_t target_version; + uint32_t target_type; + uint32_t target_revision; + uint32_t soc_version; + char *hw_name; +}; + +struct hif_opaque_softc { +}; + +/** + * enum HIF_DEVICE_POWER_CHANGE_TYPE: Device Power change type + * + * @HIF_DEVICE_POWER_UP: HIF layer should power up interface and/or module + * @HIF_DEVICE_POWER_DOWN: HIF layer should initiate bus-specific measures to + * minimize power + * @HIF_DEVICE_POWER_CUT: HIF layer should initiate bus-specific AND/OR + * platform-specific measures to completely power-off + * the module and associated hardware (i.e. cut power + * supplies) + */ +enum HIF_DEVICE_POWER_CHANGE_TYPE { + HIF_DEVICE_POWER_UP, + HIF_DEVICE_POWER_DOWN, + HIF_DEVICE_POWER_CUT +}; + +/** + * enum hif_enable_type: what triggered the enabling of hif + * + * @HIF_ENABLE_TYPE_PROBE: probe triggered enable + * @HIF_ENABLE_TYPE_REINIT: reinit triggered enable + */ +enum hif_enable_type { + HIF_ENABLE_TYPE_PROBE, + HIF_ENABLE_TYPE_REINIT, + HIF_ENABLE_TYPE_MAX +}; + +/** + * enum hif_disable_type: what triggered the disabling of hif + * + * @HIF_DISABLE_TYPE_PROBE_ERROR: probe error triggered disable + * @HIF_DISABLE_TYPE_REINIT_ERROR: reinit error triggered disable + * @HIF_DISABLE_TYPE_REMOVE: remove triggered disable + * @HIF_DISABLE_TYPE_SHUTDOWN: shutdown triggered disable + */ +enum hif_disable_type { + HIF_DISABLE_TYPE_PROBE_ERROR, + HIF_DISABLE_TYPE_REINIT_ERROR, + HIF_DISABLE_TYPE_REMOVE, + HIF_DISABLE_TYPE_SHUTDOWN, + HIF_DISABLE_TYPE_MAX +}; +/** + * enum hif_device_config_opcode: configure mode + * + * @HIF_DEVICE_POWER_STATE: device power state + * @HIF_DEVICE_GET_MBOX_BLOCK_SIZE: get mbox block size + * @HIF_DEVICE_GET_MBOX_ADDR: get mbox block address + * @HIF_DEVICE_GET_PENDING_EVENTS_FUNC: get pending events functions + * @HIF_DEVICE_GET_IRQ_PROC_MODE: get irq proc mode + * @HIF_DEVICE_GET_RECV_EVENT_MASK_UNMASK_FUNC: receive event function + * @HIF_DEVICE_POWER_STATE_CHANGE: change power state + * @HIF_DEVICE_GET_IRQ_YIELD_PARAMS: get yield params + * @HIF_CONFIGURE_QUERY_SCATTER_REQUEST_SUPPORT: configure scatter request + * @HIF_DEVICE_GET_OS_DEVICE: get OS device + * @HIF_DEVICE_DEBUG_BUS_STATE: debug bus state + * @HIF_BMI_DONE: bmi done + * @HIF_DEVICE_SET_TARGET_TYPE: set target type + * @HIF_DEVICE_SET_HTC_CONTEXT: set htc context + * @HIF_DEVICE_GET_HTC_CONTEXT: get htc context + */ +enum hif_device_config_opcode { + HIF_DEVICE_POWER_STATE = 0, + HIF_DEVICE_GET_MBOX_BLOCK_SIZE, + HIF_DEVICE_GET_MBOX_ADDR, + HIF_DEVICE_GET_PENDING_EVENTS_FUNC, + HIF_DEVICE_GET_IRQ_PROC_MODE, + HIF_DEVICE_GET_RECV_EVENT_MASK_UNMASK_FUNC, + HIF_DEVICE_POWER_STATE_CHANGE, + HIF_DEVICE_GET_IRQ_YIELD_PARAMS, + HIF_CONFIGURE_QUERY_SCATTER_REQUEST_SUPPORT, + HIF_DEVICE_GET_OS_DEVICE, + HIF_DEVICE_DEBUG_BUS_STATE, + HIF_BMI_DONE, + HIF_DEVICE_SET_TARGET_TYPE, + HIF_DEVICE_SET_HTC_CONTEXT, + HIF_DEVICE_GET_HTC_CONTEXT, +}; + +#ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG +struct HID_ACCESS_LOG { + uint32_t seqnum; + bool is_write; + void *addr; + uint32_t value; +}; +#endif + +void hif_reg_write(struct hif_opaque_softc *hif_ctx, uint32_t offset, + uint32_t value); +uint32_t hif_reg_read(struct hif_opaque_softc *hif_ctx, uint32_t offset); + +#define HIF_MAX_DEVICES 1 +/** + * struct htc_callbacks - Structure for HTC Callbacks methods + * @context: context to pass to the dsrhandler + * note : rwCompletionHandler is provided the context + * passed to hif_read_write + * @rwCompletionHandler: Read / write completion handler + * @dsrHandler: DSR Handler + */ +struct htc_callbacks { + void *context; + QDF_STATUS(*rwCompletionHandler)(void *rwContext, QDF_STATUS status); + QDF_STATUS(*dsrHandler)(void *context); +}; + +/** + * struct hif_driver_state_callbacks - Callbacks for HIF to query Driver state + * @context: Private data context + * @set_recovery_in_progress: To Set Driver state for recovery in progress + * @is_recovery_in_progress: Query if driver state is recovery in progress + * @is_load_unload_in_progress: Query if driver state Load/Unload in Progress + * @is_driver_unloading: Query if driver is unloading. + * + * This Structure provides callback pointer for HIF to query hdd for driver + * states. + */ +struct hif_driver_state_callbacks { + void *context; + void (*set_recovery_in_progress)(void *context, uint8_t val); + bool (*is_recovery_in_progress)(void *context); + bool (*is_load_unload_in_progress)(void *context); + bool (*is_driver_unloading)(void *context); + bool (*is_target_ready)(void *context); +}; + +/* This API detaches the HTC layer from the HIF device */ +void hif_detach_htc(struct hif_opaque_softc *hif_ctx); + +/****************************************************************/ +/* BMI and Diag window abstraction */ +/****************************************************************/ + +#define HIF_BMI_EXCHANGE_NO_TIMEOUT ((uint32_t)(0)) + +#define DIAG_TRANSFER_LIMIT 2048U /* maximum number of bytes that can be + * handled atomically by + * DiagRead/DiagWrite + */ + +/* + * API to handle HIF-specific BMI message exchanges, this API is synchronous + * and only allowed to be called from a context that can block (sleep) + */ +QDF_STATUS hif_exchange_bmi_msg(struct hif_opaque_softc *hif_ctx, + qdf_dma_addr_t cmd, qdf_dma_addr_t rsp, + uint8_t *pSendMessage, uint32_t Length, + uint8_t *pResponseMessage, + uint32_t *pResponseLength, uint32_t TimeoutMS); +void hif_register_bmi_callbacks(struct hif_softc *hif_sc); +/* + * APIs to handle HIF specific diagnostic read accesses. These APIs are + * synchronous and only allowed to be called from a context that + * can block (sleep). They are not high performance APIs. + * + * hif_diag_read_access reads a 4 Byte aligned/length value from a + * Target register or memory word. + * + * hif_diag_read_mem reads an arbitrary length of arbitrarily aligned memory. + */ +QDF_STATUS hif_diag_read_access(struct hif_opaque_softc *hif_ctx, + uint32_t address, uint32_t *data); +QDF_STATUS hif_diag_read_mem(struct hif_opaque_softc *hif_ctx, uint32_t address, + uint8_t *data, int nbytes); +void hif_dump_target_memory(struct hif_opaque_softc *hif_ctx, + void *ramdump_base, uint32_t address, uint32_t size); +/* + * APIs to handle HIF specific diagnostic write accesses. These APIs are + * synchronous and only allowed to be called from a context that + * can block (sleep). + * They are not high performance APIs. + * + * hif_diag_write_access writes a 4 Byte aligned/length value to a + * Target register or memory word. + * + * hif_diag_write_mem writes an arbitrary length of arbitrarily aligned memory. + */ +QDF_STATUS hif_diag_write_access(struct hif_opaque_softc *hif_ctx, + uint32_t address, uint32_t data); +QDF_STATUS hif_diag_write_mem(struct hif_opaque_softc *hif_ctx, + uint32_t address, uint8_t *data, int nbytes); + +typedef void (*fastpath_msg_handler)(void *, qdf_nbuf_t *, uint32_t); + +void hif_enable_polled_mode(struct hif_opaque_softc *hif_ctx); +bool hif_is_polled_mode_enabled(struct hif_opaque_softc *hif_ctx); + +/* + * Set the FASTPATH_mode_on flag in sc, for use by data path + */ +#ifdef WLAN_FEATURE_FASTPATH +void hif_enable_fastpath(struct hif_opaque_softc *hif_ctx); +bool hif_is_fastpath_mode_enabled(struct hif_opaque_softc *hif_ctx); +void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int ret); +int hif_ce_fastpath_cb_register(struct hif_opaque_softc *hif_ctx, + fastpath_msg_handler handler, void *context); +#else +static inline int hif_ce_fastpath_cb_register(struct hif_opaque_softc *hif_ctx, + fastpath_msg_handler handler, + void *context) +{ + return QDF_STATUS_E_FAILURE; +} +static inline void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int ret) +{ + return NULL; +} + +#endif + +/* + * Enable/disable CDC max performance workaround + * For max-performace set this to 0 + * To allow SoC to enter sleep set this to 1 + */ +#define CONFIG_DISABLE_CDC_MAX_PERF_WAR 0 + +void hif_ipa_get_ce_resource(struct hif_opaque_softc *hif_ctx, + qdf_shared_mem_t **ce_sr, + uint32_t *ce_sr_ring_size, + qdf_dma_addr_t *ce_reg_paddr); + +/** + * @brief List of callbacks - filled in by HTC. + */ +struct hif_msg_callbacks { + void *Context; + /**< context meaningful to HTC */ + QDF_STATUS (*txCompletionHandler)(void *Context, qdf_nbuf_t wbuf, + uint32_t transferID, + uint32_t toeplitz_hash_result); + QDF_STATUS (*rxCompletionHandler)(void *Context, qdf_nbuf_t wbuf, + uint8_t pipeID); + void (*txResourceAvailHandler)(void *context, uint8_t pipe); + void (*fwEventHandler)(void *context, QDF_STATUS status); +}; + +enum hif_target_status { + TARGET_STATUS_CONNECTED = 0, /* target connected */ + TARGET_STATUS_RESET, /* target got reset */ + TARGET_STATUS_EJECT, /* target got ejected */ + TARGET_STATUS_SUSPEND /*target got suspend */ +}; + +/** + * enum hif_attribute_flags: configure hif + * + * @HIF_LOWDESC_CE_CFG: Configure HIF with Low descriptor CE + * @HIF_LOWDESC_CE_NO_PKTLOG_CFG: Configure HIF with Low descriptor + * + No pktlog CE + */ +enum hif_attribute_flags { + HIF_LOWDESC_CE_CFG = 1, + HIF_LOWDESC_CE_NO_PKTLOG_CFG +}; + +#define HIF_DATA_ATTR_SET_TX_CLASSIFY(attr, v) \ + (attr |= (v & 0x01) << 5) +#define HIF_DATA_ATTR_SET_ENCAPSULATION_TYPE(attr, v) \ + (attr |= (v & 0x03) << 6) +#define HIF_DATA_ATTR_SET_ADDR_X_SEARCH_DISABLE(attr, v) \ + (attr |= (v & 0x01) << 13) +#define HIF_DATA_ATTR_SET_ADDR_Y_SEARCH_DISABLE(attr, v) \ + (attr |= (v & 0x01) << 14) +#define HIF_DATA_ATTR_SET_TOEPLITZ_HASH_ENABLE(attr, v) \ + (attr |= (v & 0x01) << 15) +#define HIF_DATA_ATTR_SET_PACKET_OR_RESULT_OFFSET(attr, v) \ + (attr |= (v & 0x0FFF) << 16) +#define HIF_DATA_ATTR_SET_ENABLE_11H(attr, v) \ + (attr |= (v & 0x01) << 30) + +struct hif_ul_pipe_info { + unsigned int nentries; + unsigned int nentries_mask; + unsigned int sw_index; + unsigned int write_index; /* cached copy */ + unsigned int hw_index; /* cached copy */ + void *base_addr_owner_space; /* Host address space */ + qdf_dma_addr_t base_addr_CE_space; /* CE address space */ +}; + +struct hif_dl_pipe_info { + unsigned int nentries; + unsigned int nentries_mask; + unsigned int sw_index; + unsigned int write_index; /* cached copy */ + unsigned int hw_index; /* cached copy */ + void *base_addr_owner_space; /* Host address space */ + qdf_dma_addr_t base_addr_CE_space; /* CE address space */ +}; + +struct hif_pipe_addl_info { + uint32_t pci_mem; + uint32_t ctrl_addr; + struct hif_ul_pipe_info ul_pipe; + struct hif_dl_pipe_info dl_pipe; +}; + +#ifdef CONFIG_SLUB_DEBUG_ON +#define MSG_FLUSH_NUM 16 +#else /* PERF build */ +#define MSG_FLUSH_NUM 32 +#endif /* SLUB_DEBUG_ON */ + +struct hif_bus_id; + +void hif_claim_device(struct hif_opaque_softc *hif_ctx); +QDF_STATUS hif_get_config_item(struct hif_opaque_softc *hif_ctx, + int opcode, void *config, uint32_t config_len); +void hif_set_mailbox_swap(struct hif_opaque_softc *hif_ctx); +void hif_mask_interrupt_call(struct hif_opaque_softc *hif_ctx); +void hif_post_init(struct hif_opaque_softc *hif_ctx, void *hHTC, + struct hif_msg_callbacks *callbacks); +QDF_STATUS hif_start(struct hif_opaque_softc *hif_ctx); +void hif_stop(struct hif_opaque_softc *hif_ctx); +void hif_flush_surprise_remove(struct hif_opaque_softc *hif_ctx); +void hif_dump(struct hif_opaque_softc *hif_ctx, uint8_t CmdId, bool start); +void hif_trigger_dump(struct hif_opaque_softc *hif_ctx, + uint8_t cmd_id, bool start); + +QDF_STATUS hif_send_head(struct hif_opaque_softc *hif_ctx, uint8_t PipeID, + uint32_t transferID, uint32_t nbytes, + qdf_nbuf_t wbuf, uint32_t data_attr); +void hif_send_complete_check(struct hif_opaque_softc *hif_ctx, uint8_t PipeID, + int force); +void hif_shut_down_device(struct hif_opaque_softc *hif_ctx); +void hif_get_default_pipe(struct hif_opaque_softc *hif_ctx, uint8_t *ULPipe, + uint8_t *DLPipe); +int hif_map_service_to_pipe(struct hif_opaque_softc *hif_ctx, uint16_t svc_id, + uint8_t *ul_pipe, uint8_t *dl_pipe, int *ul_is_polled, + int *dl_is_polled); +uint16_t +hif_get_free_queue_number(struct hif_opaque_softc *hif_ctx, uint8_t PipeID); +void *hif_get_targetdef(struct hif_opaque_softc *hif_ctx); +uint32_t hif_hia_item_address(uint32_t target_type, uint32_t item_offset); +void hif_set_target_sleep(struct hif_opaque_softc *hif_ctx, bool sleep_ok, + bool wait_for_it); +int hif_check_fw_reg(struct hif_opaque_softc *hif_ctx); +#ifndef HIF_PCI +static inline int hif_check_soc_status(struct hif_opaque_softc *hif_ctx) +{ + return 0; +} +#else +int hif_check_soc_status(struct hif_opaque_softc *hif_ctx); +#endif +void hif_get_hw_info(struct hif_opaque_softc *hif_ctx, u32 *version, + u32 *revision, const char **target_name); + +#ifdef RECEIVE_OFFLOAD +/** + * hif_offld_flush_cb_register() - Register the offld flush callback + * @scn: HIF opaque context + * @offld_flush_handler: Flush callback is either ol_flush, incase of rx_thread + * Or GRO/LRO flush when RxThread is not enabled. Called + * with corresponding context for flush. + * Return: None + */ +void hif_offld_flush_cb_register(struct hif_opaque_softc *scn, + void (offld_flush_handler)(void *ol_ctx)); + +/** + * hif_offld_flush_cb_deregister() - deRegister the offld flush callback + * @scn: HIF opaque context + * + * Return: None + */ +void hif_offld_flush_cb_deregister(struct hif_opaque_softc *scn); +#endif + +void hif_disable_isr(struct hif_opaque_softc *hif_ctx); +void hif_reset_soc(struct hif_opaque_softc *hif_ctx); +void hif_save_htc_htt_config_endpoint(struct hif_opaque_softc *hif_ctx, + int htc_htt_tx_endpoint); +struct hif_opaque_softc *hif_open(qdf_device_t qdf_ctx, uint32_t mode, + enum qdf_bus_type bus_type, + struct hif_driver_state_callbacks *cbk); +void hif_close(struct hif_opaque_softc *hif_ctx); +QDF_STATUS hif_enable(struct hif_opaque_softc *hif_ctx, struct device *dev, + void *bdev, const struct hif_bus_id *bid, + enum qdf_bus_type bus_type, + enum hif_enable_type type); +void hif_disable(struct hif_opaque_softc *hif_ctx, enum hif_disable_type type); +void hif_display_stats(struct hif_opaque_softc *hif_ctx); +void hif_clear_stats(struct hif_opaque_softc *hif_ctx); +#ifdef FEATURE_RUNTIME_PM +struct hif_pm_runtime_lock; +void hif_fastpath_resume(struct hif_opaque_softc *hif_ctx); +int hif_pm_runtime_get(struct hif_opaque_softc *hif_ctx); +void hif_pm_runtime_get_noresume(struct hif_opaque_softc *hif_ctx); +int hif_pm_runtime_put(struct hif_opaque_softc *hif_ctx); +int hif_runtime_lock_init(qdf_runtime_lock_t *lock, const char *name); +void hif_runtime_lock_deinit(struct hif_opaque_softc *hif_ctx, + struct hif_pm_runtime_lock *lock); +int hif_pm_runtime_prevent_suspend(struct hif_opaque_softc *ol_sc, + struct hif_pm_runtime_lock *lock); +int hif_pm_runtime_allow_suspend(struct hif_opaque_softc *ol_sc, + struct hif_pm_runtime_lock *lock); +int hif_pm_runtime_prevent_suspend_timeout(struct hif_opaque_softc *ol_sc, + struct hif_pm_runtime_lock *lock, unsigned int delay); +#else +struct hif_pm_runtime_lock { + const char *name; +}; +static inline void hif_fastpath_resume(struct hif_opaque_softc *hif_ctx) {} +static inline void hif_pm_runtime_get_noresume(struct hif_opaque_softc *hif_ctx) +{} + +static inline int hif_pm_runtime_get(struct hif_opaque_softc *hif_ctx) +{ return 0; } +static inline int hif_pm_runtime_put(struct hif_opaque_softc *hif_ctx) +{ return 0; } +static inline int hif_runtime_lock_init(qdf_runtime_lock_t *lock, + const char *name) +{ return 0; } +static inline void +hif_runtime_lock_deinit(struct hif_opaque_softc *hif_ctx, + struct hif_pm_runtime_lock *lock) {} + +static inline int hif_pm_runtime_prevent_suspend(struct hif_opaque_softc *ol_sc, + struct hif_pm_runtime_lock *lock) +{ return 0; } +static inline int hif_pm_runtime_allow_suspend(struct hif_opaque_softc *ol_sc, + struct hif_pm_runtime_lock *lock) +{ return 0; } +static inline int +hif_pm_runtime_prevent_suspend_timeout(struct hif_opaque_softc *ol_sc, + struct hif_pm_runtime_lock *lock, unsigned int delay) +{ return 0; } +#endif + +void hif_enable_power_management(struct hif_opaque_softc *hif_ctx, + bool is_packet_log_enabled); +void hif_disable_power_management(struct hif_opaque_softc *hif_ctx); + +void hif_vote_link_down(struct hif_opaque_softc *hif_ctx); +void hif_vote_link_up(struct hif_opaque_softc *hif_ctx); +bool hif_can_suspend_link(struct hif_opaque_softc *hif_ctx); + +#ifdef IPA_OFFLOAD +/** + * hif_get_ipa_hw_type() - get IPA hw type + * + * This API return the IPA hw type. + * + * Return: IPA hw type + */ +static inline +enum ipa_hw_type hif_get_ipa_hw_type(void) +{ + return ipa_get_hw_type(); +} + +/** + * hif_get_ipa_present() - get IPA hw status + * + * This API return the IPA hw status. + * + * Return: true if IPA is present or false otherwise + */ +static inline +bool hif_get_ipa_present(void) +{ + if (ipa_uc_reg_rdyCB(NULL) != -EPERM) + return true; + else + return false; +} +#endif +int hif_bus_resume(struct hif_opaque_softc *hif_ctx); +/** + * hif_bus_ealry_suspend() - stop non wmi tx traffic + * @context: hif context + */ +int hif_bus_early_suspend(struct hif_opaque_softc *hif_ctx); + +/** + * hif_bus_late_resume() - resume non wmi traffic + * @context: hif context + */ +int hif_bus_late_resume(struct hif_opaque_softc *hif_ctx); +int hif_bus_suspend(struct hif_opaque_softc *hif_ctx); +int hif_bus_resume_noirq(struct hif_opaque_softc *hif_ctx); +int hif_bus_suspend_noirq(struct hif_opaque_softc *hif_ctx); + +/** + * hif_apps_irqs_enable() - Enables all irqs from the APPS side + * @hif_ctx: an opaque HIF handle to use + * + * As opposed to the standard hif_irq_enable, this function always applies to + * the APPS side kernel interrupt handling. + * + * Return: errno + */ +int hif_apps_irqs_enable(struct hif_opaque_softc *hif_ctx); + +/** + * hif_apps_irqs_disable() - Disables all irqs from the APPS side + * @hif_ctx: an opaque HIF handle to use + * + * As opposed to the standard hif_irq_disable, this function always applies to + * the APPS side kernel interrupt handling. + * + * Return: errno + */ +int hif_apps_irqs_disable(struct hif_opaque_softc *hif_ctx); + +/** + * hif_apps_wake_irq_enable() - Enables the wake irq from the APPS side + * @hif_ctx: an opaque HIF handle to use + * + * As opposed to the standard hif_irq_enable, this function always applies to + * the APPS side kernel interrupt handling. + * + * Return: errno + */ +int hif_apps_wake_irq_enable(struct hif_opaque_softc *hif_ctx); + +/** + * hif_apps_wake_irq_disable() - Disables the wake irq from the APPS side + * @hif_ctx: an opaque HIF handle to use + * + * As opposed to the standard hif_irq_disable, this function always applies to + * the APPS side kernel interrupt handling. + * + * Return: errno + */ +int hif_apps_wake_irq_disable(struct hif_opaque_softc *hif_ctx); + +#ifdef FEATURE_RUNTIME_PM +int hif_pre_runtime_suspend(struct hif_opaque_softc *hif_ctx); +void hif_pre_runtime_resume(struct hif_opaque_softc *hif_ctx); +int hif_runtime_suspend(struct hif_opaque_softc *hif_ctx); +int hif_runtime_resume(struct hif_opaque_softc *hif_ctx); +void hif_process_runtime_suspend_success(struct hif_opaque_softc *hif_ctx); +void hif_process_runtime_suspend_failure(struct hif_opaque_softc *hif_ctx); +void hif_process_runtime_resume_success(struct hif_opaque_softc *hif_ctx); +#endif + +int hif_get_irq_num(struct hif_opaque_softc *scn, int *irq, uint32_t size); +int hif_dump_registers(struct hif_opaque_softc *scn); +int ol_copy_ramdump(struct hif_opaque_softc *scn); +void hif_crash_shutdown(struct hif_opaque_softc *hif_ctx); +void hif_get_hw_info(struct hif_opaque_softc *hif_ctx, u32 *version, + u32 *revision, const char **target_name); +bool hif_needs_bmi(struct hif_opaque_softc *hif_ctx); +enum qdf_bus_type hif_get_bus_type(struct hif_opaque_softc *hif_hdl); +struct hif_target_info *hif_get_target_info_handle(struct hif_opaque_softc * + scn); +struct hif_config_info *hif_get_ini_handle(struct hif_opaque_softc *hif_ctx); +struct ramdump_info *hif_get_ramdump_ctx(struct hif_opaque_softc *hif_ctx); +enum hif_target_status hif_get_target_status(struct hif_opaque_softc *hif_ctx); +void hif_set_target_status(struct hif_opaque_softc *hif_ctx, enum + hif_target_status); +void hif_init_ini_config(struct hif_opaque_softc *hif_ctx, + struct hif_config_info *cfg); +void hif_update_tx_ring(struct hif_opaque_softc *osc, u_int32_t num_htt_cmpls); +qdf_nbuf_t hif_batch_send(struct hif_opaque_softc *osc, qdf_nbuf_t msdu, + uint32_t transfer_id, u_int32_t len, uint32_t sendhead); +int hif_send_single(struct hif_opaque_softc *osc, qdf_nbuf_t msdu, uint32_t + transfer_id, u_int32_t len); +int hif_send_fast(struct hif_opaque_softc *osc, qdf_nbuf_t nbuf, + uint32_t transfer_id, uint32_t download_len); +void hif_pkt_dl_len_set(void *hif_sc, unsigned int pkt_download_len); +void hif_ce_war_disable(void); +void hif_ce_war_enable(void); +void hif_disable_interrupt(struct hif_opaque_softc *osc, uint32_t pipe_num); +#ifdef QCA_NSS_WIFI_OFFLOAD_SUPPORT +struct hif_pipe_addl_info *hif_get_addl_pipe_info(struct hif_opaque_softc *osc, + struct hif_pipe_addl_info *hif_info, uint32_t pipe_number); +uint32_t hif_set_nss_wifiol_mode(struct hif_opaque_softc *osc, + uint32_t pipe_num); +int32_t hif_get_nss_wifiol_bypass_nw_process(struct hif_opaque_softc *osc); +#endif /* QCA_NSS_WIFI_OFFLOAD_SUPPORT */ + +void hif_set_bundle_mode(struct hif_opaque_softc *hif_ctx, bool enabled, + int rx_bundle_cnt); +int hif_bus_reset_resume(struct hif_opaque_softc *hif_ctx); + +void hif_set_attribute(struct hif_opaque_softc *osc, uint8_t hif_attrib); + +void *hif_get_lro_info(int ctx_id, struct hif_opaque_softc *hif_hdl); + +enum hif_exec_type { + HIF_EXEC_NAPI_TYPE, + HIF_EXEC_TASKLET_TYPE, +}; + +typedef uint32_t (*ext_intr_handler)(void *, uint32_t); +uint32_t hif_configure_ext_group_interrupts(struct hif_opaque_softc *hif_ctx); +uint32_t hif_register_ext_group(struct hif_opaque_softc *hif_ctx, + uint32_t numirq, uint32_t irq[], ext_intr_handler handler, + void *cb_ctx, const char *context_name, + enum hif_exec_type type, uint32_t scale); + +void hif_deregister_exec_group(struct hif_opaque_softc *hif_ctx, + const char *context_name); + +void hif_update_pipe_callback(struct hif_opaque_softc *osc, + u_int8_t pipeid, + struct hif_msg_callbacks *callbacks); + +void hif_print_napi_stats(struct hif_opaque_softc *hif_ctx); +#ifdef __cplusplus +} +#endif + +void *hif_get_dev_ba(struct hif_opaque_softc *hif_handle); + +/** + * hif_set_initial_wakeup_cb() - set the initial wakeup event handler function + * @hif_ctx - the HIF context to assign the callback to + * @callback - the callback to assign + * @priv - the private data to pass to the callback when invoked + * + * Return: None + */ +void hif_set_initial_wakeup_cb(struct hif_opaque_softc *hif_ctx, + void (*callback)(void *), + void *priv); +#ifndef CONFIG_WIN +#ifndef HIF_CE_DEBUG_DATA_BUF +#define HIF_CE_DEBUG_DATA_BUF 0 +#endif +#endif +/* + * Note: For MCL, #if defined (HIF_CONFIG_SLUB_DEBUG_ON) needs to be checked + * for defined here + */ +#if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) +ssize_t hif_dump_desc_trace_buf(struct device *dev, + struct device_attribute *attr, char *buf); +ssize_t hif_input_desc_trace_buf_index(struct hif_softc *scn, + const char *buf, size_t size); +ssize_t hif_ce_en_desc_hist(struct hif_softc *scn, + const char *buf, size_t size); +ssize_t hif_disp_ce_enable_desc_data_hist(struct hif_softc *scn, char *buf); +ssize_t hif_dump_desc_event(struct hif_softc *scn, char *buf); +#endif /* Note: for MCL, #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || HIF_CE_DEBUG_DATA_BUF */ + +/** + * hif_set_ce_service_max_yield_time() - sets CE service max yield time + * @hif: hif context + * @ce_service_max_yield_time: CE service max yield time to set + * + * This API storess CE service max yield time in hif context based + * on ini value. + * + * Return: void + */ +void hif_set_ce_service_max_yield_time(struct hif_opaque_softc *hif, + uint32_t ce_service_max_yield_time); + +/** + * hif_get_ce_service_max_yield_time() - get CE service max yield time + * @hif: hif context + * + * This API returns CE service max yield time. + * + * Return: CE service max yield time + */ +unsigned long long +hif_get_ce_service_max_yield_time(struct hif_opaque_softc *hif); + +/** + * hif_set_ce_service_max_rx_ind_flush() - sets CE service max rx ind flush + * @hif: hif context + * @ce_service_max_rx_ind_flush: CE service max rx ind flush to set + * + * This API stores CE service max rx ind flush in hif context based + * on ini value. + * + * Return: void + */ +void hif_set_ce_service_max_rx_ind_flush(struct hif_opaque_softc *hif, + uint8_t ce_service_max_rx_ind_flush); +#endif /* _HIF_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/hif/inc/hif_unit_test_suspend.h b/drivers/staging/qca-wifi-host-cmn/hif/inc/hif_unit_test_suspend.h new file mode 100644 index 0000000000000000000000000000000000000000..cb036ecabb459b9952e9364315e83b8eab5f58de --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/inc/hif_unit_test_suspend.h @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: Public unit-test related APIs for triggering WoW suspend/resume while + * the application processor is still up. + */ + +#ifndef _HIF_UNIT_TEST_SUSPEND_H_ +#define _HIF_UNIT_TEST_SUSPEND_H_ + +#ifdef WLAN_SUSPEND_RESUME_TEST + +#include "qdf_status.h" +#include "hif.h" + +typedef void (*hif_ut_resume_callback)(void); + +/** + * hif_ut_apps_suspend() - Setup unit-test related suspend state. + * @opaque_scn: The HIF context to operate on + * @callback: The function to call when unit-test resume is triggered + * + * Call after a normal WoW suspend has been completed. + * + * Return: QDF_STATUS + */ +QDF_STATUS hif_ut_apps_suspend(struct hif_opaque_softc *opaque_scn, + hif_ut_resume_callback callback); + +/** + * hif_ut_apps_resume() - Cleanup unit-test related suspend state. + * @opaque_scn: The HIF context to operate on + * + * Call before doing a normal WoW resume if suspend was initiated via + * unit-test suspend. + * + * Return: QDF_STATUS + */ +QDF_STATUS hif_ut_apps_resume(struct hif_opaque_softc *opaque_scn); + +#endif /* WLAN_SUSPEND_RESUME_TEST */ + +#endif /* _HIF_UNIT_TEST_SUSPEND_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/hif/inc/host_reg_init.h b/drivers/staging/qca-wifi-host-cmn/hif/inc/host_reg_init.h new file mode 100644 index 0000000000000000000000000000000000000000..f154192c9f9f958844c029997559a6713af4d3d4 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/inc/host_reg_init.h @@ -0,0 +1,181 @@ +/* + * Copyright (c) 2016 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef HOST_REG_INIT_H +#define HOST_REG_INIT_H + +#include "reg_struct.h" +#include "targaddrs.h" + +#if defined(MY_HOST_DEF) + +#if !defined(FW_IND_HOST_READY) +#define FW_IND_HOST_READY 0 +#endif + +#if !defined(PCIE_LOCAL_BASE_ADDRESS) +#define PCIE_LOCAL_BASE_ADDRESS 0 +#define PCIE_SOC_WAKE_RESET 0 +#define PCIE_SOC_WAKE_ADDRESS 0 +#define PCIE_SOC_WAKE_V_MASK 0 +#define RTC_STATE_ADDRESS 0 +#define RTC_STATE_COLD_RESET_MASK 0 +#define RTC_STATE_V_MASK 0 +#define RTC_STATE_V_LSB 0 +#define RTC_STATE_V_ON 0 +#define SOC_GLOBAL_RESET_ADDRESS 0 +#endif + +#if !defined(CE_COUNT) +#define CE_COUNT 0 +#endif + +#if !defined(TRANSACTION_ID_MASK) +#define TRANSACTION_ID_MASK 0xfff +#endif + +static struct hostdef_s my_host_def = { + .d_INT_STATUS_ENABLE_ERROR_LSB = INT_STATUS_ENABLE_ERROR_LSB, + .d_INT_STATUS_ENABLE_ERROR_MASK = INT_STATUS_ENABLE_ERROR_MASK, + .d_INT_STATUS_ENABLE_CPU_LSB = INT_STATUS_ENABLE_CPU_LSB, + .d_INT_STATUS_ENABLE_CPU_MASK = INT_STATUS_ENABLE_CPU_MASK, + .d_INT_STATUS_ENABLE_COUNTER_LSB = INT_STATUS_ENABLE_COUNTER_LSB, + .d_INT_STATUS_ENABLE_COUNTER_MASK = INT_STATUS_ENABLE_COUNTER_MASK, + .d_INT_STATUS_ENABLE_MBOX_DATA_LSB = INT_STATUS_ENABLE_MBOX_DATA_LSB, + .d_INT_STATUS_ENABLE_MBOX_DATA_MASK = INT_STATUS_ENABLE_MBOX_DATA_MASK, + .d_ERROR_STATUS_ENABLE_RX_UNDERFLOW_LSB + = ERROR_STATUS_ENABLE_RX_UNDERFLOW_LSB, + .d_ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK + = ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK, + .d_ERROR_STATUS_ENABLE_TX_OVERFLOW_LSB + = ERROR_STATUS_ENABLE_TX_OVERFLOW_LSB, + .d_ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK + = ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK, + .d_COUNTER_INT_STATUS_ENABLE_BIT_LSB + = COUNTER_INT_STATUS_ENABLE_BIT_LSB, + .d_COUNTER_INT_STATUS_ENABLE_BIT_MASK + = COUNTER_INT_STATUS_ENABLE_BIT_MASK, + .d_INT_STATUS_ENABLE_ADDRESS = INT_STATUS_ENABLE_ADDRESS, + .d_CPU_INT_STATUS_ENABLE_BIT_LSB = CPU_INT_STATUS_ENABLE_BIT_LSB, + .d_CPU_INT_STATUS_ENABLE_BIT_MASK = CPU_INT_STATUS_ENABLE_BIT_MASK, + .d_HOST_INT_STATUS_ADDRESS = HOST_INT_STATUS_ADDRESS, + .d_CPU_INT_STATUS_ADDRESS = CPU_INT_STATUS_ADDRESS, + .d_ERROR_INT_STATUS_ADDRESS = ERROR_INT_STATUS_ADDRESS, + .d_ERROR_INT_STATUS_WAKEUP_MASK = ERROR_INT_STATUS_WAKEUP_MASK, + .d_ERROR_INT_STATUS_WAKEUP_LSB = ERROR_INT_STATUS_WAKEUP_LSB, + .d_ERROR_INT_STATUS_RX_UNDERFLOW_MASK + = ERROR_INT_STATUS_RX_UNDERFLOW_MASK, + .d_ERROR_INT_STATUS_RX_UNDERFLOW_LSB + = ERROR_INT_STATUS_RX_UNDERFLOW_LSB, + .d_ERROR_INT_STATUS_TX_OVERFLOW_MASK + = ERROR_INT_STATUS_TX_OVERFLOW_MASK, + .d_ERROR_INT_STATUS_TX_OVERFLOW_LSB = ERROR_INT_STATUS_TX_OVERFLOW_LSB, + .d_COUNT_DEC_ADDRESS = COUNT_DEC_ADDRESS, + .d_HOST_INT_STATUS_CPU_MASK = HOST_INT_STATUS_CPU_MASK, + .d_HOST_INT_STATUS_CPU_LSB = HOST_INT_STATUS_CPU_LSB, + .d_HOST_INT_STATUS_ERROR_MASK = HOST_INT_STATUS_ERROR_MASK, + .d_HOST_INT_STATUS_ERROR_LSB = HOST_INT_STATUS_ERROR_LSB, + .d_HOST_INT_STATUS_COUNTER_MASK = HOST_INT_STATUS_COUNTER_MASK, + .d_HOST_INT_STATUS_COUNTER_LSB = HOST_INT_STATUS_COUNTER_LSB, + .d_RX_LOOKAHEAD_VALID_ADDRESS = RX_LOOKAHEAD_VALID_ADDRESS, + .d_WINDOW_DATA_ADDRESS = WINDOW_DATA_ADDRESS, + .d_WINDOW_READ_ADDR_ADDRESS = WINDOW_READ_ADDR_ADDRESS, + .d_WINDOW_WRITE_ADDR_ADDRESS = WINDOW_WRITE_ADDR_ADDRESS, + .d_SOC_GLOBAL_RESET_ADDRESS = SOC_GLOBAL_RESET_ADDRESS, + .d_RTC_STATE_ADDRESS = RTC_STATE_ADDRESS, + .d_RTC_STATE_COLD_RESET_MASK = RTC_STATE_COLD_RESET_MASK, + .d_PCIE_LOCAL_BASE_ADDRESS = PCIE_LOCAL_BASE_ADDRESS, + .d_PCIE_SOC_WAKE_RESET = PCIE_SOC_WAKE_RESET, + .d_PCIE_SOC_WAKE_ADDRESS = PCIE_SOC_WAKE_ADDRESS, + .d_PCIE_SOC_WAKE_V_MASK = PCIE_SOC_WAKE_V_MASK, + .d_RTC_STATE_V_MASK = RTC_STATE_V_MASK, + .d_RTC_STATE_V_LSB = RTC_STATE_V_LSB, + .d_FW_IND_EVENT_PENDING = FW_IND_EVENT_PENDING, + .d_FW_IND_INITIALIZED = FW_IND_INITIALIZED, + .d_RTC_STATE_V_ON = RTC_STATE_V_ON, +#if defined(SDIO_3_0) + .d_HOST_INT_STATUS_MBOX_DATA_MASK = HOST_INT_STATUS_MBOX_DATA_MASK, + .d_HOST_INT_STATUS_MBOX_DATA_LSB = HOST_INT_STATUS_MBOX_DATA_LSB, +#endif + .d_FW_IND_HOST_READY = FW_IND_HOST_READY, + .d_HOST_CE_COUNT = CE_COUNT, + .d_TRANSACTION_ID_MASK = TRANSACTION_ID_MASK, +}; + +struct hostdef_s *MY_HOST_DEF = &my_host_def; +#else /* MY_HOST_DEF */ +#endif /* MY_HOST_DEF */ + + + +#if defined(MY_HOST_SHADOW_REGS) +struct host_shadow_regs_s my_host_shadow_regs = { + .d_A_LOCAL_SHADOW_REG_VALUE_0 = A_LOCAL_SHADOW_REG_VALUE_0; + .d_A_LOCAL_SHADOW_REG_VALUE_1 = A_LOCAL_SHADOW_REG_VALUE_1; + .d_A_LOCAL_SHADOW_REG_VALUE_2 = A_LOCAL_SHADOW_REG_VALUE_2; + .d_A_LOCAL_SHADOW_REG_VALUE_3 = A_LOCAL_SHADOW_REG_VALUE_3; + .d_A_LOCAL_SHADOW_REG_VALUE_4 = A_LOCAL_SHADOW_REG_VALUE_4; + .d_A_LOCAL_SHADOW_REG_VALUE_5 = A_LOCAL_SHADOW_REG_VALUE_5; + .d_A_LOCAL_SHADOW_REG_VALUE_6 = A_LOCAL_SHADOW_REG_VALUE_6; + .d_A_LOCAL_SHADOW_REG_VALUE_7 = A_LOCAL_SHADOW_REG_VALUE_7; + .d_A_LOCAL_SHADOW_REG_VALUE_8 = A_LOCAL_SHADOW_REG_VALUE_8; + .d_A_LOCAL_SHADOW_REG_VALUE_9 = A_LOCAL_SHADOW_REG_VALUE_9; + .d_A_LOCAL_SHADOW_REG_VALUE_10 = A_LOCAL_SHADOW_REG_VALUE_10; + .d_A_LOCAL_SHADOW_REG_VALUE_11 = A_LOCAL_SHADOW_REG_VALUE_11; + .d_A_LOCAL_SHADOW_REG_VALUE_12 = A_LOCAL_SHADOW_REG_VALUE_12; + .d_A_LOCAL_SHADOW_REG_VALUE_13 = A_LOCAL_SHADOW_REG_VALUE_13; + .d_A_LOCAL_SHADOW_REG_VALUE_14 = A_LOCAL_SHADOW_REG_VALUE_14; + .d_A_LOCAL_SHADOW_REG_VALUE_15 = A_LOCAL_SHADOW_REG_VALUE_15; + .d_A_LOCAL_SHADOW_REG_VALUE_16 = A_LOCAL_SHADOW_REG_VALUE_16; + .d_A_LOCAL_SHADOW_REG_VALUE_17 = A_LOCAL_SHADOW_REG_VALUE_17; + .d_A_LOCAL_SHADOW_REG_VALUE_18 = A_LOCAL_SHADOW_REG_VALUE_18; + .d_A_LOCAL_SHADOW_REG_VALUE_19 = A_LOCAL_SHADOW_REG_VALUE_19; + .d_A_LOCAL_SHADOW_REG_VALUE_20 = A_LOCAL_SHADOW_REG_VALUE_20; + .d_A_LOCAL_SHADOW_REG_VALUE_21 = A_LOCAL_SHADOW_REG_VALUE_21; + .d_A_LOCAL_SHADOW_REG_VALUE_22 = A_LOCAL_SHADOW_REG_VALUE_22; + .d_A_LOCAL_SHADOW_REG_VALUE_23 = A_LOCAL_SHADOW_REG_VALUE_23; + .d_A_LOCAL_SHADOW_REG_ADDRESS_0 = A_LOCAL_SHADOW_REG_ADDRESS_0; + .d_A_LOCAL_SHADOW_REG_ADDRESS_1 = A_LOCAL_SHADOW_REG_ADDRESS_1; + .d_A_LOCAL_SHADOW_REG_ADDRESS_2 = A_LOCAL_SHADOW_REG_ADDRESS_2; + .d_A_LOCAL_SHADOW_REG_ADDRESS_3 = A_LOCAL_SHADOW_REG_ADDRESS_3; + .d_A_LOCAL_SHADOW_REG_ADDRESS_4 = A_LOCAL_SHADOW_REG_ADDRESS_4; + .d_A_LOCAL_SHADOW_REG_ADDRESS_5 = A_LOCAL_SHADOW_REG_ADDRESS_5; + .d_A_LOCAL_SHADOW_REG_ADDRESS_6 = A_LOCAL_SHADOW_REG_ADDRESS_6; + .d_A_LOCAL_SHADOW_REG_ADDRESS_7 = A_LOCAL_SHADOW_REG_ADDRESS_7; + .d_A_LOCAL_SHADOW_REG_ADDRESS_8 = A_LOCAL_SHADOW_REG_ADDRESS_8; + .d_A_LOCAL_SHADOW_REG_ADDRESS_9 = A_LOCAL_SHADOW_REG_ADDRESS_9; + .d_A_LOCAL_SHADOW_REG_ADDRESS_10 = A_LOCAL_SHADOW_REG_ADDRESS_10; + .d_A_LOCAL_SHADOW_REG_ADDRESS_11 = A_LOCAL_SHADOW_REG_ADDRESS_11; + .d_A_LOCAL_SHADOW_REG_ADDRESS_12 = A_LOCAL_SHADOW_REG_ADDRESS_12; + .d_A_LOCAL_SHADOW_REG_ADDRESS_13 = A_LOCAL_SHADOW_REG_ADDRESS_13; + .d_A_LOCAL_SHADOW_REG_ADDRESS_14 = A_LOCAL_SHADOW_REG_ADDRESS_14; + .d_A_LOCAL_SHADOW_REG_ADDRESS_15 = A_LOCAL_SHADOW_REG_ADDRESS_15; + .d_A_LOCAL_SHADOW_REG_ADDRESS_16 = A_LOCAL_SHADOW_REG_ADDRESS_16; + .d_A_LOCAL_SHADOW_REG_ADDRESS_17 = A_LOCAL_SHADOW_REG_ADDRESS_17; + .d_A_LOCAL_SHADOW_REG_ADDRESS_18 = A_LOCAL_SHADOW_REG_ADDRESS_18; + .d_A_LOCAL_SHADOW_REG_ADDRESS_19 = A_LOCAL_SHADOW_REG_ADDRESS_19; + .d_A_LOCAL_SHADOW_REG_ADDRESS_20 = A_LOCAL_SHADOW_REG_ADDRESS_20; + .d_A_LOCAL_SHADOW_REG_ADDRESS_21 = A_LOCAL_SHADOW_REG_ADDRESS_21; + .d_A_LOCAL_SHADOW_REG_ADDRESS_22 = A_LOCAL_SHADOW_REG_ADDRESS_22; + .d_A_LOCAL_SHADOW_REG_ADDRESS_23 = A_LOCAL_SHADOW_REG_ADDRESS_23; +}; + +struct hostdef_s *MY_HOST_SHADOW_REGS = &my_host_shadow_regs; +#else /* MY_HOST_SHADOW_REGS */ +#endif /* MY_HOST_SHADOW_REGS */ +#endif /* HOST_REG_INIT_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/hif/inc/hostdef.h b/drivers/staging/qca-wifi-host-cmn/hif/inc/hostdef.h new file mode 100644 index 0000000000000000000000000000000000000000..82f331e39e42ae994560da97dba0074f313f3722 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/inc/hostdef.h @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2013-2016 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef HOSTDEFS_H_ +#define HOSTDEFS_H_ + +#include +#include +#include +#include "host_reg_init.h" + +extern struct hostdef_s *AR6002_HOSTdef; +extern struct hostdef_s *AR6003_HOSTdef; +extern struct hostdef_s *AR6004_HOSTdef; +extern struct hostdef_s *AR9888_HOSTdef; +extern struct hostdef_s *AR9888V2_HOSTdef; +extern struct hostdef_s *AR6320_HOSTdef; +extern struct hostdef_s *AR900B_HOSTdef; +extern struct hostdef_s *QCA9984_HOSTdef; +extern struct hostdef_s *QCA9888_HOSTdef; +extern struct hostdef_s *QCA6290_HOSTdef; +#ifdef ATH_AHB +extern struct hostdef_s *IPQ4019_HOSTdef; +#endif +extern struct hostdef_s *QCA8074_HOSTdef; + +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/hif/inc/reg_struct.h b/drivers/staging/qca-wifi-host-cmn/hif/inc/reg_struct.h new file mode 100644 index 0000000000000000000000000000000000000000..d3cb668d4b91c648a7c5312e83cc01246cdd63d1 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/inc/reg_struct.h @@ -0,0 +1,664 @@ +/* + * Copyright (c) 2015-2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef REG_STRUCT_H +#define REG_STRUCT_H + +#define MISSING_REGISTER 0 +#define UNSUPPORTED_REGISTER_OFFSET 0xffffffff + +/** + * is_register_supported() - return true if the register offset is valid + * @reg: register address being checked + * + * Return: true if the register offset is valid + */ +static inline bool is_register_supported(uint32_t reg) +{ + return (reg != MISSING_REGISTER) && + (reg != UNSUPPORTED_REGISTER_OFFSET); +} + +struct targetdef_s { + uint32_t d_RTC_SOC_BASE_ADDRESS; + uint32_t d_RTC_WMAC_BASE_ADDRESS; + uint32_t d_SYSTEM_SLEEP_OFFSET; + uint32_t d_WLAN_SYSTEM_SLEEP_OFFSET; + uint32_t d_WLAN_SYSTEM_SLEEP_DISABLE_LSB; + uint32_t d_WLAN_SYSTEM_SLEEP_DISABLE_MASK; + uint32_t d_CLOCK_CONTROL_OFFSET; + uint32_t d_CLOCK_CONTROL_SI0_CLK_MASK; + uint32_t d_RESET_CONTROL_OFFSET; + uint32_t d_RESET_CONTROL_MBOX_RST_MASK; + uint32_t d_RESET_CONTROL_SI0_RST_MASK; + uint32_t d_WLAN_RESET_CONTROL_OFFSET; + uint32_t d_WLAN_RESET_CONTROL_COLD_RST_MASK; + uint32_t d_WLAN_RESET_CONTROL_WARM_RST_MASK; + uint32_t d_GPIO_BASE_ADDRESS; + uint32_t d_GPIO_PIN0_OFFSET; + uint32_t d_GPIO_PIN1_OFFSET; + uint32_t d_GPIO_PIN0_CONFIG_MASK; + uint32_t d_GPIO_PIN1_CONFIG_MASK; + uint32_t d_SI_CONFIG_BIDIR_OD_DATA_LSB; + uint32_t d_SI_CONFIG_BIDIR_OD_DATA_MASK; + uint32_t d_SI_CONFIG_I2C_LSB; + uint32_t d_SI_CONFIG_I2C_MASK; + uint32_t d_SI_CONFIG_POS_SAMPLE_LSB; + uint32_t d_SI_CONFIG_POS_SAMPLE_MASK; + uint32_t d_SI_CONFIG_INACTIVE_CLK_LSB; + uint32_t d_SI_CONFIG_INACTIVE_CLK_MASK; + uint32_t d_SI_CONFIG_INACTIVE_DATA_LSB; + uint32_t d_SI_CONFIG_INACTIVE_DATA_MASK; + uint32_t d_SI_CONFIG_DIVIDER_LSB; + uint32_t d_SI_CONFIG_DIVIDER_MASK; + uint32_t d_SI_BASE_ADDRESS; + uint32_t d_SI_CONFIG_OFFSET; + uint32_t d_SI_TX_DATA0_OFFSET; + uint32_t d_SI_TX_DATA1_OFFSET; + uint32_t d_SI_RX_DATA0_OFFSET; + uint32_t d_SI_RX_DATA1_OFFSET; + uint32_t d_SI_CS_OFFSET; + uint32_t d_SI_CS_DONE_ERR_MASK; + uint32_t d_SI_CS_DONE_INT_MASK; + uint32_t d_SI_CS_START_LSB; + uint32_t d_SI_CS_START_MASK; + uint32_t d_SI_CS_RX_CNT_LSB; + uint32_t d_SI_CS_RX_CNT_MASK; + uint32_t d_SI_CS_TX_CNT_LSB; + uint32_t d_SI_CS_TX_CNT_MASK; + uint32_t d_BOARD_DATA_SZ; + uint32_t d_BOARD_EXT_DATA_SZ; + uint32_t d_MBOX_BASE_ADDRESS; + uint32_t d_LOCAL_SCRATCH_OFFSET; + uint32_t d_CPU_CLOCK_OFFSET; + uint32_t d_LPO_CAL_OFFSET; + uint32_t d_GPIO_PIN10_OFFSET; + uint32_t d_GPIO_PIN11_OFFSET; + uint32_t d_GPIO_PIN12_OFFSET; + uint32_t d_GPIO_PIN13_OFFSET; + uint32_t d_CLOCK_GPIO_OFFSET; + uint32_t d_CPU_CLOCK_STANDARD_LSB; + uint32_t d_CPU_CLOCK_STANDARD_MASK; + uint32_t d_LPO_CAL_ENABLE_LSB; + uint32_t d_LPO_CAL_ENABLE_MASK; + uint32_t d_CLOCK_GPIO_BT_CLK_OUT_EN_LSB; + uint32_t d_CLOCK_GPIO_BT_CLK_OUT_EN_MASK; + uint32_t d_ANALOG_INTF_BASE_ADDRESS; + uint32_t d_WLAN_MAC_BASE_ADDRESS; + uint32_t d_FW_INDICATOR_ADDRESS; + uint32_t d_FW_CPU_PLL_CONFIG; + uint32_t d_DRAM_BASE_ADDRESS; + uint32_t d_SOC_CORE_BASE_ADDRESS; + uint32_t d_CORE_CTRL_ADDRESS; + uint32_t d_CE_COUNT; + uint32_t d_MSI_NUM_REQUEST; + uint32_t d_MSI_ASSIGN_FW; + uint32_t d_MSI_ASSIGN_CE_INITIAL; + uint32_t d_PCIE_INTR_ENABLE_ADDRESS; + uint32_t d_PCIE_INTR_CLR_ADDRESS; + uint32_t d_PCIE_INTR_FIRMWARE_MASK; + uint32_t d_PCIE_INTR_CE_MASK_ALL; + uint32_t d_CORE_CTRL_CPU_INTR_MASK; + uint32_t d_WIFICMN_PCIE_BAR_REG_ADDRESS; + /* htt_rx.c */ + /* htt tx */ + uint32_t d_MSDU_LINK_EXT_3_TCP_OVER_IPV4_CHECKSUM_EN_MASK; + uint32_t d_MSDU_LINK_EXT_3_TCP_OVER_IPV6_CHECKSUM_EN_MASK; + uint32_t d_MSDU_LINK_EXT_3_UDP_OVER_IPV4_CHECKSUM_EN_MASK; + uint32_t d_MSDU_LINK_EXT_3_UDP_OVER_IPV6_CHECKSUM_EN_MASK; + uint32_t d_MSDU_LINK_EXT_3_TCP_OVER_IPV4_CHECKSUM_EN_LSB; + uint32_t d_MSDU_LINK_EXT_3_TCP_OVER_IPV6_CHECKSUM_EN_LSB; + uint32_t d_MSDU_LINK_EXT_3_UDP_OVER_IPV4_CHECKSUM_EN_LSB; + uint32_t d_MSDU_LINK_EXT_3_UDP_OVER_IPV6_CHECKSUM_EN_LSB; + /* copy_engine.c */ + uint32_t d_SR_WR_INDEX_ADDRESS; + uint32_t d_DST_WATERMARK_ADDRESS; + /* htt_rx.c */ + uint32_t d_RX_MSDU_END_4_FIRST_MSDU_MASK; + uint32_t d_RX_MSDU_END_4_FIRST_MSDU_LSB; + uint32_t d_RX_MPDU_START_0_RETRY_LSB; + uint32_t d_RX_MPDU_START_0_RETRY_MASK; + uint32_t d_RX_MPDU_START_0_SEQ_NUM_MASK; + uint32_t d_RX_MPDU_START_0_SEQ_NUM_LSB; + uint32_t d_RX_MPDU_START_2_PN_47_32_LSB; + uint32_t d_RX_MPDU_START_2_PN_47_32_MASK; + uint32_t d_RX_MPDU_START_2_TID_LSB; + uint32_t d_RX_MPDU_START_2_TID_MASK; + uint32_t d_RX_MSDU_END_1_EXT_WAPI_PN_63_48_MASK; + uint32_t d_RX_MSDU_END_1_EXT_WAPI_PN_63_48_LSB; + uint32_t d_RX_MSDU_END_1_KEY_ID_OCT_MASK; + uint32_t d_RX_MSDU_END_1_KEY_ID_OCT_LSB; + uint32_t d_RX_MSDU_END_4_LAST_MSDU_MASK; + uint32_t d_RX_MSDU_END_4_LAST_MSDU_LSB; + uint32_t d_RX_ATTENTION_0_MCAST_BCAST_MASK; + uint32_t d_RX_ATTENTION_0_MCAST_BCAST_LSB; + uint32_t d_RX_ATTENTION_0_FRAGMENT_MASK; + uint32_t d_RX_ATTENTION_0_FRAGMENT_LSB; + uint32_t d_RX_ATTENTION_0_MPDU_LENGTH_ERR_MASK; + uint32_t d_RX_FRAG_INFO_0_RING2_MORE_COUNT_MASK; + uint32_t d_RX_FRAG_INFO_0_RING2_MORE_COUNT_LSB; + uint32_t d_RX_MSDU_START_0_MSDU_LENGTH_MASK; + uint32_t d_RX_MSDU_START_0_MSDU_LENGTH_LSB; + uint32_t d_RX_MSDU_START_2_DECAP_FORMAT_OFFSET; + uint32_t d_RX_MSDU_START_2_DECAP_FORMAT_MASK; + uint32_t d_RX_MSDU_START_2_DECAP_FORMAT_LSB; + uint32_t d_RX_MPDU_START_0_ENCRYPTED_MASK; + uint32_t d_RX_MPDU_START_0_ENCRYPTED_LSB; + uint32_t d_RX_ATTENTION_0_MORE_DATA_MASK; + uint32_t d_RX_ATTENTION_0_MSDU_DONE_MASK; + uint32_t d_RX_ATTENTION_0_TCP_UDP_CHKSUM_FAIL_MASK; + /* end */ + + /* PLL start */ + uint32_t d_EFUSE_OFFSET; + uint32_t d_EFUSE_XTAL_SEL_MSB; + uint32_t d_EFUSE_XTAL_SEL_LSB; + uint32_t d_EFUSE_XTAL_SEL_MASK; + uint32_t d_BB_PLL_CONFIG_OFFSET; + uint32_t d_BB_PLL_CONFIG_OUTDIV_MSB; + uint32_t d_BB_PLL_CONFIG_OUTDIV_LSB; + uint32_t d_BB_PLL_CONFIG_OUTDIV_MASK; + uint32_t d_BB_PLL_CONFIG_FRAC_MSB; + uint32_t d_BB_PLL_CONFIG_FRAC_LSB; + uint32_t d_BB_PLL_CONFIG_FRAC_MASK; + uint32_t d_WLAN_PLL_SETTLE_TIME_MSB; + uint32_t d_WLAN_PLL_SETTLE_TIME_LSB; + uint32_t d_WLAN_PLL_SETTLE_TIME_MASK; + uint32_t d_WLAN_PLL_SETTLE_OFFSET; + uint32_t d_WLAN_PLL_SETTLE_SW_MASK; + uint32_t d_WLAN_PLL_SETTLE_RSTMASK; + uint32_t d_WLAN_PLL_SETTLE_RESET; + uint32_t d_WLAN_PLL_CONTROL_NOPWD_MSB; + uint32_t d_WLAN_PLL_CONTROL_NOPWD_LSB; + uint32_t d_WLAN_PLL_CONTROL_NOPWD_MASK; + uint32_t d_WLAN_PLL_CONTROL_BYPASS_MSB; + uint32_t d_WLAN_PLL_CONTROL_BYPASS_LSB; + uint32_t d_WLAN_PLL_CONTROL_BYPASS_MASK; + uint32_t d_WLAN_PLL_CONTROL_BYPASS_RESET; + uint32_t d_WLAN_PLL_CONTROL_CLK_SEL_MSB; + uint32_t d_WLAN_PLL_CONTROL_CLK_SEL_LSB; + uint32_t d_WLAN_PLL_CONTROL_CLK_SEL_MASK; + uint32_t d_WLAN_PLL_CONTROL_CLK_SEL_RESET; + uint32_t d_WLAN_PLL_CONTROL_REFDIV_MSB; + uint32_t d_WLAN_PLL_CONTROL_REFDIV_LSB; + uint32_t d_WLAN_PLL_CONTROL_REFDIV_MASK; + uint32_t d_WLAN_PLL_CONTROL_REFDIV_RESET; + uint32_t d_WLAN_PLL_CONTROL_DIV_MSB; + uint32_t d_WLAN_PLL_CONTROL_DIV_LSB; + uint32_t d_WLAN_PLL_CONTROL_DIV_MASK; + uint32_t d_WLAN_PLL_CONTROL_DIV_RESET; + uint32_t d_WLAN_PLL_CONTROL_OFFSET; + uint32_t d_WLAN_PLL_CONTROL_SW_MASK; + uint32_t d_WLAN_PLL_CONTROL_RSTMASK; + uint32_t d_WLAN_PLL_CONTROL_RESET; + uint32_t d_SOC_CORE_CLK_CTRL_OFFSET; + uint32_t d_SOC_CORE_CLK_CTRL_DIV_MSB; + uint32_t d_SOC_CORE_CLK_CTRL_DIV_LSB; + uint32_t d_SOC_CORE_CLK_CTRL_DIV_MASK; + uint32_t d_RTC_SYNC_STATUS_PLL_CHANGING_MSB; + uint32_t d_RTC_SYNC_STATUS_PLL_CHANGING_LSB; + uint32_t d_RTC_SYNC_STATUS_PLL_CHANGING_MASK; + uint32_t d_RTC_SYNC_STATUS_PLL_CHANGING_RESET; + uint32_t d_RTC_SYNC_STATUS_OFFSET; + uint32_t d_SOC_CPU_CLOCK_OFFSET; + uint32_t d_SOC_CPU_CLOCK_STANDARD_MSB; + uint32_t d_SOC_CPU_CLOCK_STANDARD_LSB; + uint32_t d_SOC_CPU_CLOCK_STANDARD_MASK; + /* PLL end */ + + uint32_t d_SOC_POWER_REG_OFFSET; + uint32_t d_PCIE_INTR_CAUSE_ADDRESS; + uint32_t d_SOC_RESET_CONTROL_ADDRESS; + uint32_t d_SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_MASK; + uint32_t d_SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_LSB; + uint32_t d_SOC_RESET_CONTROL_CE_RST_MASK; + uint32_t d_SOC_RESET_CONTROL_CPU_WARM_RST_MASK; + uint32_t d_CPU_INTR_ADDRESS; + uint32_t d_SOC_LF_TIMER_CONTROL0_ADDRESS; + uint32_t d_SOC_LF_TIMER_CONTROL0_ENABLE_MASK; + + /* chip id start */ + uint32_t d_SI_CONFIG_ERR_INT_MASK; + uint32_t d_SI_CONFIG_ERR_INT_LSB; + uint32_t d_GPIO_ENABLE_W1TS_LOW_ADDRESS; + uint32_t d_GPIO_PIN0_CONFIG_LSB; + uint32_t d_GPIO_PIN0_PAD_PULL_LSB; + uint32_t d_GPIO_PIN0_PAD_PULL_MASK; + + uint32_t d_SOC_CHIP_ID_ADDRESS; + uint32_t d_SOC_CHIP_ID_VERSION_MASK; + uint32_t d_SOC_CHIP_ID_VERSION_LSB; + uint32_t d_SOC_CHIP_ID_REVISION_MASK; + uint32_t d_SOC_CHIP_ID_REVISION_LSB; + uint32_t d_SOC_CHIP_ID_REVISION_MSB; + uint32_t d_FW_AXI_MSI_ADDR; + uint32_t d_FW_AXI_MSI_DATA; + uint32_t d_WLAN_SUBSYSTEM_CORE_ID_ADDRESS; + + /* chip id end */ + + uint32_t d_A_SOC_CORE_SCRATCH_0_ADDRESS; + uint32_t d_A_SOC_CORE_SCRATCH_1_ADDRESS; + uint32_t d_A_SOC_CORE_SCRATCH_2_ADDRESS; + uint32_t d_A_SOC_CORE_SCRATCH_3_ADDRESS; + uint32_t d_A_SOC_CORE_SCRATCH_4_ADDRESS; + uint32_t d_A_SOC_CORE_SCRATCH_5_ADDRESS; + uint32_t d_A_SOC_CORE_SCRATCH_6_ADDRESS; + uint32_t d_A_SOC_CORE_SCRATCH_7_ADDRESS; + uint32_t d_A_SOC_CORE_SPARE_0_REGISTER; + uint32_t d_PCIE_INTR_FIRMWARE_ROUTE_MASK; + uint32_t d_A_SOC_CORE_PCIE_INTR_CAUSE_GRP1; + uint32_t d_A_SOC_CORE_SPARE_1_REGISTER; + uint32_t d_A_SOC_CORE_PCIE_INTR_CLR_GRP1; + uint32_t d_A_SOC_CORE_PCIE_INTR_ENABLE_GRP1; + uint32_t d_A_SOC_PCIE_PCIE_SCRATCH_0; + uint32_t d_A_SOC_PCIE_PCIE_SCRATCH_1; + uint32_t d_A_WIFI_APB_1_A_WFSS_CE_TARGET_HOST_DELTA; + uint32_t d_A_SOC_PCIE_PCIE_SCRATCH_2; + uint32_t d_A_SOC_CORE_PCIE_INTR_ENABLE_GRP0_Q6_MASK; + + uint32_t d_WLAN_DEBUG_INPUT_SEL_OFFSET; + uint32_t d_WLAN_DEBUG_INPUT_SEL_SRC_MSB; + uint32_t d_WLAN_DEBUG_INPUT_SEL_SRC_LSB; + uint32_t d_WLAN_DEBUG_INPUT_SEL_SRC_MASK; + uint32_t d_WLAN_DEBUG_CONTROL_OFFSET; + uint32_t d_WLAN_DEBUG_CONTROL_ENABLE_MSB; + uint32_t d_WLAN_DEBUG_CONTROL_ENABLE_LSB; + uint32_t d_WLAN_DEBUG_CONTROL_ENABLE_MASK; + uint32_t d_WLAN_DEBUG_OUT_OFFSET; + uint32_t d_WLAN_DEBUG_OUT_DATA_MSB; + uint32_t d_WLAN_DEBUG_OUT_DATA_LSB; + uint32_t d_WLAN_DEBUG_OUT_DATA_MASK; + uint32_t d_AMBA_DEBUG_BUS_OFFSET; + uint32_t d_AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MSB; + uint32_t d_AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_LSB; + uint32_t d_AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MASK; + uint32_t d_AMBA_DEBUG_BUS_SEL_MSB; + uint32_t d_AMBA_DEBUG_BUS_SEL_LSB; + uint32_t d_AMBA_DEBUG_BUS_SEL_MASK; + +#ifdef QCA_WIFI_3_0_ADRASTEA + uint32_t d_Q6_ENABLE_REGISTER_0; + uint32_t d_Q6_ENABLE_REGISTER_1; + uint32_t d_Q6_CAUSE_REGISTER_0; + uint32_t d_Q6_CAUSE_REGISTER_1; + uint32_t d_Q6_CLEAR_REGISTER_0; + uint32_t d_Q6_CLEAR_REGISTER_1; +#endif +#ifdef CONFIG_BYPASS_QMI + uint32_t d_BYPASS_QMI_TEMP_REGISTER; +#endif + uint32_t d_WIFICMN_INT_STATUS_ADDRESS; +}; + +struct hostdef_s { + uint32_t d_INT_STATUS_ENABLE_ERROR_LSB; + uint32_t d_INT_STATUS_ENABLE_ERROR_MASK; + uint32_t d_INT_STATUS_ENABLE_CPU_LSB; + uint32_t d_INT_STATUS_ENABLE_CPU_MASK; + uint32_t d_INT_STATUS_ENABLE_COUNTER_LSB; + uint32_t d_INT_STATUS_ENABLE_COUNTER_MASK; + uint32_t d_INT_STATUS_ENABLE_MBOX_DATA_LSB; + uint32_t d_INT_STATUS_ENABLE_MBOX_DATA_MASK; + uint32_t d_ERROR_STATUS_ENABLE_RX_UNDERFLOW_LSB; + uint32_t d_ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK; + uint32_t d_ERROR_STATUS_ENABLE_TX_OVERFLOW_LSB; + uint32_t d_ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK; + uint32_t d_COUNTER_INT_STATUS_ENABLE_BIT_LSB; + uint32_t d_COUNTER_INT_STATUS_ENABLE_BIT_MASK; + uint32_t d_INT_STATUS_ENABLE_ADDRESS; + uint32_t d_CPU_INT_STATUS_ENABLE_BIT_LSB; + uint32_t d_CPU_INT_STATUS_ENABLE_BIT_MASK; + uint32_t d_HOST_INT_STATUS_ADDRESS; + uint32_t d_CPU_INT_STATUS_ADDRESS; + uint32_t d_ERROR_INT_STATUS_ADDRESS; + uint32_t d_ERROR_INT_STATUS_WAKEUP_MASK; + uint32_t d_ERROR_INT_STATUS_WAKEUP_LSB; + uint32_t d_ERROR_INT_STATUS_RX_UNDERFLOW_MASK; + uint32_t d_ERROR_INT_STATUS_RX_UNDERFLOW_LSB; + uint32_t d_ERROR_INT_STATUS_TX_OVERFLOW_MASK; + uint32_t d_ERROR_INT_STATUS_TX_OVERFLOW_LSB; + uint32_t d_COUNT_DEC_ADDRESS; + uint32_t d_HOST_INT_STATUS_CPU_MASK; + uint32_t d_HOST_INT_STATUS_CPU_LSB; + uint32_t d_HOST_INT_STATUS_ERROR_MASK; + uint32_t d_HOST_INT_STATUS_ERROR_LSB; + uint32_t d_HOST_INT_STATUS_COUNTER_MASK; + uint32_t d_HOST_INT_STATUS_COUNTER_LSB; + uint32_t d_RX_LOOKAHEAD_VALID_ADDRESS; + uint32_t d_WINDOW_DATA_ADDRESS; + uint32_t d_WINDOW_READ_ADDR_ADDRESS; + uint32_t d_WINDOW_WRITE_ADDR_ADDRESS; + uint32_t d_SOC_GLOBAL_RESET_ADDRESS; + uint32_t d_RTC_STATE_ADDRESS; + uint32_t d_RTC_STATE_COLD_RESET_MASK; + uint32_t d_PCIE_LOCAL_BASE_ADDRESS; + uint32_t d_PCIE_SOC_WAKE_RESET; + uint32_t d_PCIE_SOC_WAKE_ADDRESS; + uint32_t d_PCIE_SOC_WAKE_V_MASK; + uint32_t d_RTC_STATE_V_MASK; + uint32_t d_RTC_STATE_V_LSB; + uint32_t d_FW_IND_EVENT_PENDING; + uint32_t d_FW_IND_INITIALIZED; + uint32_t d_FW_IND_HELPER; + uint32_t d_RTC_STATE_V_ON; +#if defined(SDIO_3_0) + uint32_t d_HOST_INT_STATUS_MBOX_DATA_MASK; + uint32_t d_HOST_INT_STATUS_MBOX_DATA_LSB; +#endif + uint32_t d_PCIE_SOC_RDY_STATUS_ADDRESS; + uint32_t d_PCIE_SOC_RDY_STATUS_BAR_MASK; + uint32_t d_SOC_PCIE_BASE_ADDRESS; + uint32_t d_MSI_MAGIC_ADR_ADDRESS; + uint32_t d_MSI_MAGIC_ADDRESS; + uint32_t d_HOST_CE_COUNT; + uint32_t d_ENABLE_MSI; + uint32_t d_MUX_ID_MASK; + uint32_t d_TRANSACTION_ID_MASK; + uint32_t d_DESC_DATA_FLAG_MASK; + uint32_t d_A_SOC_PCIE_PCIE_BAR0_START; + uint32_t d_FW_IND_HOST_READY; +}; + +struct host_shadow_regs_s { + uint32_t d_A_LOCAL_SHADOW_REG_VALUE_0; + uint32_t d_A_LOCAL_SHADOW_REG_VALUE_1; + uint32_t d_A_LOCAL_SHADOW_REG_VALUE_2; + uint32_t d_A_LOCAL_SHADOW_REG_VALUE_3; + uint32_t d_A_LOCAL_SHADOW_REG_VALUE_4; + uint32_t d_A_LOCAL_SHADOW_REG_VALUE_5; + uint32_t d_A_LOCAL_SHADOW_REG_VALUE_6; + uint32_t d_A_LOCAL_SHADOW_REG_VALUE_7; + uint32_t d_A_LOCAL_SHADOW_REG_VALUE_8; + uint32_t d_A_LOCAL_SHADOW_REG_VALUE_9; + uint32_t d_A_LOCAL_SHADOW_REG_VALUE_10; + uint32_t d_A_LOCAL_SHADOW_REG_VALUE_11; + uint32_t d_A_LOCAL_SHADOW_REG_VALUE_12; + uint32_t d_A_LOCAL_SHADOW_REG_VALUE_13; + uint32_t d_A_LOCAL_SHADOW_REG_VALUE_14; + uint32_t d_A_LOCAL_SHADOW_REG_VALUE_15; + uint32_t d_A_LOCAL_SHADOW_REG_VALUE_16; + uint32_t d_A_LOCAL_SHADOW_REG_VALUE_17; + uint32_t d_A_LOCAL_SHADOW_REG_VALUE_18; + uint32_t d_A_LOCAL_SHADOW_REG_VALUE_19; + uint32_t d_A_LOCAL_SHADOW_REG_VALUE_20; + uint32_t d_A_LOCAL_SHADOW_REG_VALUE_21; + uint32_t d_A_LOCAL_SHADOW_REG_VALUE_22; + uint32_t d_A_LOCAL_SHADOW_REG_VALUE_23; + uint32_t d_A_LOCAL_SHADOW_REG_ADDRESS_0; + uint32_t d_A_LOCAL_SHADOW_REG_ADDRESS_1; + uint32_t d_A_LOCAL_SHADOW_REG_ADDRESS_2; + uint32_t d_A_LOCAL_SHADOW_REG_ADDRESS_3; + uint32_t d_A_LOCAL_SHADOW_REG_ADDRESS_4; + uint32_t d_A_LOCAL_SHADOW_REG_ADDRESS_5; + uint32_t d_A_LOCAL_SHADOW_REG_ADDRESS_6; + uint32_t d_A_LOCAL_SHADOW_REG_ADDRESS_7; + uint32_t d_A_LOCAL_SHADOW_REG_ADDRESS_8; + uint32_t d_A_LOCAL_SHADOW_REG_ADDRESS_9; + uint32_t d_A_LOCAL_SHADOW_REG_ADDRESS_10; + uint32_t d_A_LOCAL_SHADOW_REG_ADDRESS_11; + uint32_t d_A_LOCAL_SHADOW_REG_ADDRESS_12; + uint32_t d_A_LOCAL_SHADOW_REG_ADDRESS_13; + uint32_t d_A_LOCAL_SHADOW_REG_ADDRESS_14; + uint32_t d_A_LOCAL_SHADOW_REG_ADDRESS_15; + uint32_t d_A_LOCAL_SHADOW_REG_ADDRESS_16; + uint32_t d_A_LOCAL_SHADOW_REG_ADDRESS_17; + uint32_t d_A_LOCAL_SHADOW_REG_ADDRESS_18; + uint32_t d_A_LOCAL_SHADOW_REG_ADDRESS_19; + uint32_t d_A_LOCAL_SHADOW_REG_ADDRESS_20; + uint32_t d_A_LOCAL_SHADOW_REG_ADDRESS_21; + uint32_t d_A_LOCAL_SHADOW_REG_ADDRESS_22; + uint32_t d_A_LOCAL_SHADOW_REG_ADDRESS_23; +}; + + +/* + * @d_DST_WR_INDEX_ADDRESS: Destination ring write index + * + * @d_SRC_WATERMARK_ADDRESS: Source ring watermark + * + * @d_SRC_WATERMARK_LOW_MASK: Bits indicating low watermark from Source ring + * watermark + * + * @d_SRC_WATERMARK_HIGH_MASK: Bits indicating high watermark from Source ring + * watermark + * + * @d_DST_WATERMARK_LOW_MASK: Bits indicating low watermark from Destination + * ring watermark + * + * @d_DST_WATERMARK_HIGH_MASK: Bits indicating high watermark from Destination + * ring watermark + * + * @d_CURRENT_SRRI_ADDRESS: Current source ring read index.The Start Offset + * will be reflected after a CE transfer is completed. + * + * @d_CURRENT_DRRI_ADDRESS: Current Destination ring read index. The Start + * Offset will be reflected after a CE transfer + * is completed. + * + * @d_HOST_IS_SRC_RING_HIGH_WATERMARK_MASK: Source ring high watermark + * Interrupt Status + * + * @d_HOST_IS_SRC_RING_LOW_WATERMARK_MASK: Source ring low watermark + * Interrupt Status + * + * @d_HOST_IS_DST_RING_HIGH_WATERMARK_MASK: Destination ring high watermark + * Interrupt Status + * + * @d_HOST_IS_DST_RING_LOW_WATERMARK_MASK: Source ring low watermark + * Interrupt Status + * + * @d_HOST_IS_ADDRESS: Host Interrupt Status Register + * + * @d_MISC_IS_ADDRESS: Miscellaneous Interrupt Status Register + * + * @d_HOST_IS_COPY_COMPLETE_MASK: Bits indicating Copy complete interrupt + * status from the Host Interrupt Status + * register + * + * @d_CE_WRAPPER_BASE_ADDRESS: Copy Engine Wrapper Base Address + * + * @d_CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS: CE Wrapper summary for interrupts + * to host + * + * @d_CE_WRAPPER_INDEX_BASE_LOW: The LSB Base address to which source and + * destination read indices are written + * + * @d_CE_WRAPPER_INDEX_BASE_HIGH: The MSB Base address to which source and + * destination read indices are written + * + * @d_HOST_IE_ADDRESS: Host Line Interrupt Enable Register + * + * @d_HOST_IE_COPY_COMPLETE_MASK: Bits indicating Copy complete interrupt + * enable from the IE register + * + * @d_SR_BA_ADDRESS: LSB of Source Ring Base Address + * + * @d_SR_BA_ADDRESS_HIGH: MSB of Source Ring Base Address + * + * @d_SR_SIZE_ADDRESS: Source Ring size - number of entries and Start Offset + * + * @d_CE_CTRL1_ADDRESS: CE Control register + * + * @d_CE_CTRL1_DMAX_LENGTH_MASK: Destination buffer Max Length used for error + * check + * + * @d_DR_BA_ADDRESS: Destination Ring Base Address Low + * + * @d_DR_BA_ADDRESS_HIGH: Destination Ring Base Address High + * + * @d_DR_SIZE_ADDRESS: Destination Ring size - number of entries Start Offset + * + * @d_CE_CMD_REGISTER: Implements commands to all CE Halt Flush + * + * @d_CE_MSI_ADDRESS: CE MSI LOW Address register + * + * @d_CE_MSI_ADDRESS_HIGH: CE MSI High Address register + * + * @d_CE_MSI_DATA: CE MSI Data Register + * + * @d_CE_MSI_ENABLE_BIT: Bit in CTRL1 register indication the MSI enable + * + * @d_MISC_IE_ADDRESS: Miscellaneous Interrupt Enable Register + * + * @d_MISC_IS_AXI_ERR_MASK: + * Bit in Misc IS indicating AXI Timeout Interrupt status + * + * @d_MISC_IS_DST_ADDR_ERR_MASK: + * Bit in Misc IS indicating Destination Address Error + * + * @d_MISC_IS_SRC_LEN_ERR_MASK: Bit in Misc IS indicating Source Zero Length + * Error Interrupt status + * + * @d_MISC_IS_DST_MAX_LEN_VIO_MASK: Bit in Misc IS indicating Destination Max + * Length Violated Interrupt status + * + * @d_MISC_IS_DST_RING_OVERFLOW_MASK: Bit in Misc IS indicating Destination + * Ring Overflow Interrupt status + * + * @d_MISC_IS_SRC_RING_OVERFLOW_MASK: Bit in Misc IS indicating Source Ring + * Overflow Interrupt status + * + * @d_SRC_WATERMARK_LOW_LSB: Source Ring Low Watermark LSB + * + * @d_SRC_WATERMARK_HIGH_LSB: Source Ring Low Watermark MSB + * + * @d_DST_WATERMARK_LOW_LSB: Destination Ring Low Watermark LSB + * + * @d_DST_WATERMARK_HIGH_LSB: Destination Ring High Watermark LSB + * + * @d_CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK: + * Bits in d_CE_WRAPPER_INTERRUPT_SUMMARY_ADDR + * indicating Copy engine miscellaneous interrupt summary + * + * @d_CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB: + * Bits in d_CE_WRAPPER_INTERRUPT_SUMMARY_ADDR + * indicating Host interrupts summary + * + * @d_CE_CTRL1_DMAX_LENGTH_LSB: + * LSB of Destination buffer Max Length used for error check + * + * @d_CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK: + * Bits indicating Source ring Byte Swap enable. + * Treats source ring memory organisation as big-endian. + * + * @d_CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK: + * Bits indicating Destination ring byte swap enable. + * Treats destination ring memory organisation as big-endian + * + * @d_CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB: + * LSB of Source ring Byte Swap enable + * + * @d_CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB: + * LSB of Destination ring Byte Swap enable + * + * @d_CE_WRAPPER_DEBUG_OFFSET: Offset of CE OBS BUS Select register + * + * @d_CE_WRAPPER_DEBUG_SEL_MSB: + * MSB of Control register selecting inputs for trace/debug + * + * @d_CE_WRAPPER_DEBUG_SEL_LSB: + * LSB of Control register selecting inputs for trace/debug + * + * @d_CE_WRAPPER_DEBUG_SEL_MASK: + * Bit mask for trace/debug Control register + * + * @d_CE_DEBUG_OFFSET: Offset of Copy Engine FSM Debug Status + * + * @d_CE_DEBUG_SEL_MSB: MSB of Copy Engine FSM Debug Status + * + * @d_CE_DEBUG_SEL_LSB: LSB of Copy Engine FSM Debug Status + * + * @d_CE_DEBUG_SEL_MASK: Bits indicating Copy Engine FSM Debug Status + * + */ +struct ce_reg_def { + /* copy_engine.c */ + uint32_t d_DST_WR_INDEX_ADDRESS; + uint32_t d_SRC_WATERMARK_ADDRESS; + uint32_t d_SRC_WATERMARK_LOW_MASK; + uint32_t d_SRC_WATERMARK_HIGH_MASK; + uint32_t d_DST_WATERMARK_LOW_MASK; + uint32_t d_DST_WATERMARK_HIGH_MASK; + uint32_t d_CURRENT_SRRI_ADDRESS; + uint32_t d_CURRENT_DRRI_ADDRESS; + uint32_t d_HOST_IS_SRC_RING_HIGH_WATERMARK_MASK; + uint32_t d_HOST_IS_SRC_RING_LOW_WATERMARK_MASK; + uint32_t d_HOST_IS_DST_RING_HIGH_WATERMARK_MASK; + uint32_t d_HOST_IS_DST_RING_LOW_WATERMARK_MASK; + uint32_t d_HOST_IS_ADDRESS; + uint32_t d_MISC_IS_ADDRESS; + uint32_t d_HOST_IS_COPY_COMPLETE_MASK; + uint32_t d_CE_WRAPPER_BASE_ADDRESS; + uint32_t d_CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS; + uint32_t d_CE_DDR_ADDRESS_FOR_RRI_LOW; + uint32_t d_CE_DDR_ADDRESS_FOR_RRI_HIGH; + uint32_t d_HOST_IE_ADDRESS; + uint32_t d_HOST_IE_ADDRESS_2; + uint32_t d_HOST_IE_COPY_COMPLETE_MASK; + uint32_t d_SR_BA_ADDRESS; + uint32_t d_SR_BA_ADDRESS_HIGH; + uint32_t d_SR_SIZE_ADDRESS; + uint32_t d_CE_CTRL1_ADDRESS; + uint32_t d_CE_CTRL1_DMAX_LENGTH_MASK; + uint32_t d_DR_BA_ADDRESS; + uint32_t d_DR_BA_ADDRESS_HIGH; + uint32_t d_DR_SIZE_ADDRESS; + uint32_t d_CE_CMD_REGISTER; + uint32_t d_CE_MSI_ADDRESS; + uint32_t d_CE_MSI_ADDRESS_HIGH; + uint32_t d_CE_MSI_DATA; + uint32_t d_CE_MSI_ENABLE_BIT; + uint32_t d_MISC_IE_ADDRESS; + uint32_t d_MISC_IS_AXI_ERR_MASK; + uint32_t d_MISC_IS_DST_ADDR_ERR_MASK; + uint32_t d_MISC_IS_SRC_LEN_ERR_MASK; + uint32_t d_MISC_IS_DST_MAX_LEN_VIO_MASK; + uint32_t d_MISC_IS_DST_RING_OVERFLOW_MASK; + uint32_t d_MISC_IS_SRC_RING_OVERFLOW_MASK; + uint32_t d_SRC_WATERMARK_LOW_LSB; + uint32_t d_SRC_WATERMARK_HIGH_LSB; + uint32_t d_DST_WATERMARK_LOW_LSB; + uint32_t d_DST_WATERMARK_HIGH_LSB; + uint32_t d_CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK; + uint32_t d_CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB; + uint32_t d_CE_CTRL1_DMAX_LENGTH_LSB; + uint32_t d_CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK; + uint32_t d_CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK; + uint32_t d_CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB; + uint32_t d_CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB; + uint32_t d_CE_CTRL1_IDX_UPD_EN_MASK; + uint32_t d_CE_WRAPPER_DEBUG_OFFSET; + uint32_t d_CE_WRAPPER_DEBUG_SEL_MSB; + uint32_t d_CE_WRAPPER_DEBUG_SEL_LSB; + uint32_t d_CE_WRAPPER_DEBUG_SEL_MASK; + uint32_t d_CE_DEBUG_OFFSET; + uint32_t d_CE_DEBUG_SEL_MSB; + uint32_t d_CE_DEBUG_SEL_LSB; + uint32_t d_CE_DEBUG_SEL_MASK; + uint32_t d_CE0_BASE_ADDRESS; + uint32_t d_CE1_BASE_ADDRESS; + uint32_t d_A_WIFI_APB_3_A_WCMN_APPS_CE_INTR_ENABLES; + uint32_t d_A_WIFI_APB_3_A_WCMN_APPS_CE_INTR_STATUS; + uint32_t d_HOST_IE_ADDRESS_3; + uint32_t d_HOST_IE_REG1_CE_LSB; + uint32_t d_HOST_IE_REG2_CE_LSB; + uint32_t d_HOST_IE_REG3_CE_LSB; +}; + +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/hif/inc/regtable.h b/drivers/staging/qca-wifi-host-cmn/hif/inc/regtable.h new file mode 100644 index 0000000000000000000000000000000000000000..31cc83609cfe6d9b46ec6c4f492ee3a343d1f4bc --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/inc/regtable.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2015-2016 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _REGTABLE_H_ +#define _REGTABLE_H_ + +#ifdef HIF_SDIO +#include "regtable_sdio.h" +#endif + +#if defined(HIF_PCI) || defined(HIF_SNOC) || defined(HIF_AHB) +#include "reg_struct.h" +#include "regtable_pcie.h" +#endif + +#if defined(HIF_USB) +#include "regtable_usb.h" +#endif + +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/hif/inc/regtable_pcie.h b/drivers/staging/qca-wifi-host-cmn/hif/inc/regtable_pcie.h new file mode 100644 index 0000000000000000000000000000000000000000..cd8b61317b0f34f276627914ca270891336360b3 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/inc/regtable_pcie.h @@ -0,0 +1,742 @@ +/* + * Copyright (c) 2011-2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _REGTABLE_PCIE_H_ +#define _REGTABLE_PCIE_H_ + +#define MISSING 0 + +#define A_SOC_CORE_PCIE_INTR_ENABLE_GRP0_Q6_MASK \ + (scn->targetdef->d_A_SOC_CORE_PCIE_INTR_ENABLE_GRP0_Q6_MASK) +#define A_SOC_CORE_PCIE_INTR_CAUSE_GRP1 \ + (scn->targetdef->d_A_SOC_CORE_PCIE_INTR_CAUSE_GRP1) +#define A_SOC_CORE_SPARE_1_REGISTER \ + (scn->targetdef->d_A_SOC_CORE_SPARE_1_REGISTER) +#define A_SOC_CORE_PCIE_INTR_CLR_GRP1 \ + (scn->targetdef->d_A_SOC_CORE_PCIE_INTR_CLR_GRP1) +#define A_SOC_CORE_PCIE_INTR_ENABLE_GRP1 \ + (scn->targetdef->d_A_SOC_CORE_PCIE_INTR_ENABLE_GRP1) +#define A_SOC_PCIE_PCIE_SCRATCH_0 \ + (scn->targetdef->d_A_SOC_PCIE_PCIE_SCRATCH_0) +#define A_SOC_PCIE_PCIE_SCRATCH_1 \ + (scn->targetdef->d_A_SOC_PCIE_PCIE_SCRATCH_1) +#define A_WIFI_APB_1_A_WFSS_CE_TARGET_HOST_DELTA \ + (scn->targetdef->d_A_WIFI_APB_1_A_WFSS_CE_TARGET_HOST_DELTA) +#define A_SOC_PCIE_PCIE_SCRATCH_2 \ + (scn->targetdef->d_A_SOC_PCIE_PCIE_SCRATCH_2) +/* end Q6 iHelium emu registers */ + +#define PCIE_INTR_FIRMWARE_ROUTE_MASK \ + (scn->targetdef->d_PCIE_INTR_FIRMWARE_ROUTE_MASK) +#define A_SOC_CORE_SPARE_0_REGISTER \ + (scn->targetdef->d_A_SOC_CORE_SPARE_0_REGISTER) +#define A_SOC_CORE_SCRATCH_0_ADDRESS \ + (scn->targetdef->d_A_SOC_CORE_SCRATCH_0_ADDRESS) +#define A_SOC_CORE_SCRATCH_1_ADDRESS \ + (scn->targetdef->d_A_SOC_CORE_SCRATCH_1_ADDRESS) +#define A_SOC_CORE_SCRATCH_2_ADDRESS \ + (scn->targetdef->d_A_SOC_CORE_SCRATCH_2_ADDRESS) +#define A_SOC_CORE_SCRATCH_3_ADDRESS \ + (scn->targetdef->d_A_SOC_CORE_SCRATCH_3_ADDRESS) +#define A_SOC_CORE_SCRATCH_4_ADDRESS \ + (scn->targetdef->d_A_SOC_CORE_SCRATCH_4_ADDRESS) +#define A_SOC_CORE_SCRATCH_5_ADDRESS \ + (scn->targetdef->d_A_SOC_CORE_SCRATCH_5_ADDRESS) +#define A_SOC_CORE_SCRATCH_6_ADDRESS \ + (scn->targetdef->d_A_SOC_CORE_SCRATCH_6_ADDRESS) +#define A_SOC_CORE_SCRATCH_7_ADDRESS \ + (scn->targetdef->d_A_SOC_CORE_SCRATCH_7_ADDRESS) +#define RTC_SOC_BASE_ADDRESS (scn->targetdef->d_RTC_SOC_BASE_ADDRESS) +#define RTC_WMAC_BASE_ADDRESS (scn->targetdef->d_RTC_WMAC_BASE_ADDRESS) +#define SYSTEM_SLEEP_OFFSET (scn->targetdef->d_SYSTEM_SLEEP_OFFSET) +#define WLAN_SYSTEM_SLEEP_OFFSET \ + (scn->targetdef->d_WLAN_SYSTEM_SLEEP_OFFSET) +#define WLAN_SYSTEM_SLEEP_DISABLE_LSB \ + (scn->targetdef->d_WLAN_SYSTEM_SLEEP_DISABLE_LSB) +#define WLAN_SYSTEM_SLEEP_DISABLE_MASK \ + (scn->targetdef->d_WLAN_SYSTEM_SLEEP_DISABLE_MASK) +#define CLOCK_CONTROL_OFFSET (scn->targetdef->d_CLOCK_CONTROL_OFFSET) +#define CLOCK_CONTROL_SI0_CLK_MASK \ + (scn->targetdef->d_CLOCK_CONTROL_SI0_CLK_MASK) +#define RESET_CONTROL_OFFSET (scn->targetdef->d_RESET_CONTROL_OFFSET) +#define RESET_CONTROL_MBOX_RST_MASK \ + (scn->targetdef->d_RESET_CONTROL_MBOX_RST_MASK) +#define RESET_CONTROL_SI0_RST_MASK \ + (scn->targetdef->d_RESET_CONTROL_SI0_RST_MASK) +#define WLAN_RESET_CONTROL_OFFSET \ + (scn->targetdef->d_WLAN_RESET_CONTROL_OFFSET) +#define WLAN_RESET_CONTROL_COLD_RST_MASK \ + (scn->targetdef->d_WLAN_RESET_CONTROL_COLD_RST_MASK) +#define WLAN_RESET_CONTROL_WARM_RST_MASK \ + (scn->targetdef->d_WLAN_RESET_CONTROL_WARM_RST_MASK) +#define GPIO_BASE_ADDRESS (scn->targetdef->d_GPIO_BASE_ADDRESS) +#define GPIO_PIN0_OFFSET (scn->targetdef->d_GPIO_PIN0_OFFSET) +#define GPIO_PIN1_OFFSET (scn->targetdef->d_GPIO_PIN1_OFFSET) +#define GPIO_PIN0_CONFIG_MASK (scn->targetdef->d_GPIO_PIN0_CONFIG_MASK) +#define GPIO_PIN1_CONFIG_MASK (scn->targetdef->d_GPIO_PIN1_CONFIG_MASK) +#define A_SOC_CORE_SCRATCH_0 (scn->targetdef->d_A_SOC_CORE_SCRATCH_0) +#define SI_CONFIG_BIDIR_OD_DATA_LSB \ + (scn->targetdef->d_SI_CONFIG_BIDIR_OD_DATA_LSB) +#define SI_CONFIG_BIDIR_OD_DATA_MASK \ + (scn->targetdef->d_SI_CONFIG_BIDIR_OD_DATA_MASK) +#define SI_CONFIG_I2C_LSB (scn->targetdef->d_SI_CONFIG_I2C_LSB) +#define SI_CONFIG_I2C_MASK \ + (scn->targetdef->d_SI_CONFIG_I2C_MASK) +#define SI_CONFIG_POS_SAMPLE_LSB \ + (scn->targetdef->d_SI_CONFIG_POS_SAMPLE_LSB) +#define SI_CONFIG_POS_SAMPLE_MASK \ + (scn->targetdef->d_SI_CONFIG_POS_SAMPLE_MASK) +#define SI_CONFIG_INACTIVE_CLK_LSB \ + (scn->targetdef->d_SI_CONFIG_INACTIVE_CLK_LSB) +#define SI_CONFIG_INACTIVE_CLK_MASK \ + (scn->targetdef->d_SI_CONFIG_INACTIVE_CLK_MASK) +#define SI_CONFIG_INACTIVE_DATA_LSB \ + (scn->targetdef->d_SI_CONFIG_INACTIVE_DATA_LSB) +#define SI_CONFIG_INACTIVE_DATA_MASK \ + (scn->targetdef->d_SI_CONFIG_INACTIVE_DATA_MASK) +#define SI_CONFIG_DIVIDER_LSB (scn->targetdef->d_SI_CONFIG_DIVIDER_LSB) +#define SI_CONFIG_DIVIDER_MASK (scn->targetdef->d_SI_CONFIG_DIVIDER_MASK) +#define SI_BASE_ADDRESS (scn->targetdef->d_SI_BASE_ADDRESS) +#define SI_CONFIG_OFFSET (scn->targetdef->d_SI_CONFIG_OFFSET) +#define SI_TX_DATA0_OFFSET (scn->targetdef->d_SI_TX_DATA0_OFFSET) +#define SI_TX_DATA1_OFFSET (scn->targetdef->d_SI_TX_DATA1_OFFSET) +#define SI_RX_DATA0_OFFSET (scn->targetdef->d_SI_RX_DATA0_OFFSET) +#define SI_RX_DATA1_OFFSET (scn->targetdef->d_SI_RX_DATA1_OFFSET) +#define SI_CS_OFFSET (scn->targetdef->d_SI_CS_OFFSET) +#define SI_CS_DONE_ERR_MASK (scn->targetdef->d_SI_CS_DONE_ERR_MASK) +#define SI_CS_DONE_INT_MASK (scn->targetdef->d_SI_CS_DONE_INT_MASK) +#define SI_CS_START_LSB (scn->targetdef->d_SI_CS_START_LSB) +#define SI_CS_START_MASK (scn->targetdef->d_SI_CS_START_MASK) +#define SI_CS_RX_CNT_LSB (scn->targetdef->d_SI_CS_RX_CNT_LSB) +#define SI_CS_RX_CNT_MASK (scn->targetdef->d_SI_CS_RX_CNT_MASK) +#define SI_CS_TX_CNT_LSB (scn->targetdef->d_SI_CS_TX_CNT_LSB) +#define SI_CS_TX_CNT_MASK (scn->targetdef->d_SI_CS_TX_CNT_MASK) +#define EEPROM_SZ (scn->targetdef->d_BOARD_DATA_SZ) +#define EEPROM_EXT_SZ (scn->targetdef->d_BOARD_EXT_DATA_SZ) +#define MBOX_BASE_ADDRESS (scn->targetdef->d_MBOX_BASE_ADDRESS) +#define LOCAL_SCRATCH_OFFSET (scn->targetdef->d_LOCAL_SCRATCH_OFFSET) +#define CPU_CLOCK_OFFSET (scn->targetdef->d_CPU_CLOCK_OFFSET) +#define LPO_CAL_OFFSET (scn->targetdef->d_LPO_CAL_OFFSET) +#define GPIO_PIN10_OFFSET (scn->targetdef->d_GPIO_PIN10_OFFSET) +#define GPIO_PIN11_OFFSET (scn->targetdef->d_GPIO_PIN11_OFFSET) +#define GPIO_PIN12_OFFSET (scn->targetdef->d_GPIO_PIN12_OFFSET) +#define GPIO_PIN13_OFFSET (scn->targetdef->d_GPIO_PIN13_OFFSET) +#define CLOCK_GPIO_OFFSET (scn->targetdef->d_CLOCK_GPIO_OFFSET) +#define CPU_CLOCK_STANDARD_LSB (scn->targetdef->d_CPU_CLOCK_STANDARD_LSB) +#define CPU_CLOCK_STANDARD_MASK (scn->targetdef->d_CPU_CLOCK_STANDARD_MASK) +#define LPO_CAL_ENABLE_LSB (scn->targetdef->d_LPO_CAL_ENABLE_LSB) +#define LPO_CAL_ENABLE_MASK (scn->targetdef->d_LPO_CAL_ENABLE_MASK) +#define CLOCK_GPIO_BT_CLK_OUT_EN_LSB \ + (scn->targetdef->d_CLOCK_GPIO_BT_CLK_OUT_EN_LSB) +#define CLOCK_GPIO_BT_CLK_OUT_EN_MASK \ + (scn->targetdef->d_CLOCK_GPIO_BT_CLK_OUT_EN_MASK) +#define ANALOG_INTF_BASE_ADDRESS (scn->targetdef->d_ANALOG_INTF_BASE_ADDRESS) +#define WLAN_MAC_BASE_ADDRESS (scn->targetdef->d_WLAN_MAC_BASE_ADDRESS) +#define FW_INDICATOR_ADDRESS (scn->targetdef->d_FW_INDICATOR_ADDRESS) +#define DRAM_BASE_ADDRESS (scn->targetdef->d_DRAM_BASE_ADDRESS) +#define SOC_CORE_BASE_ADDRESS (scn->targetdef->d_SOC_CORE_BASE_ADDRESS) +#define CORE_CTRL_ADDRESS (scn->targetdef->d_CORE_CTRL_ADDRESS) +#define CE_COUNT (scn->targetdef->d_CE_COUNT) +#define PCIE_INTR_ENABLE_ADDRESS (scn->targetdef->d_PCIE_INTR_ENABLE_ADDRESS) +#define PCIE_INTR_CLR_ADDRESS (scn->targetdef->d_PCIE_INTR_CLR_ADDRESS) +#define PCIE_INTR_FIRMWARE_MASK (scn->targetdef->d_PCIE_INTR_FIRMWARE_MASK) +#define PCIE_INTR_CE_MASK_ALL (scn->targetdef->d_PCIE_INTR_CE_MASK_ALL) +#define CORE_CTRL_CPU_INTR_MASK (scn->targetdef->d_CORE_CTRL_CPU_INTR_MASK) +#define PCIE_INTR_CAUSE_ADDRESS (scn->targetdef->d_PCIE_INTR_CAUSE_ADDRESS) +#define SOC_RESET_CONTROL_ADDRESS (scn->targetdef->d_SOC_RESET_CONTROL_ADDRESS) +#define HOST_GROUP0_MASK (PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL | \ + A_SOC_CORE_PCIE_INTR_ENABLE_GRP0_Q6_MASK) +#define SOC_RESET_CONTROL_CE_RST_MASK \ + (scn->targetdef->d_SOC_RESET_CONTROL_CE_RST_MASK) +#define SOC_RESET_CONTROL_CPU_WARM_RST_MASK \ + (scn->targetdef->d_SOC_RESET_CONTROL_CPU_WARM_RST_MASK) +#define CPU_INTR_ADDRESS (scn->targetdef->d_CPU_INTR_ADDRESS) +#define SOC_LF_TIMER_CONTROL0_ADDRESS \ + (scn->targetdef->d_SOC_LF_TIMER_CONTROL0_ADDRESS) +#define SOC_LF_TIMER_CONTROL0_ENABLE_MASK \ + (scn->targetdef->d_SOC_LF_TIMER_CONTROL0_ENABLE_MASK) +#define SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_LSB \ + (scn->targetdef->d_SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_LSB) +#define SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_MASK \ + (scn->targetdef->d_SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_MASK) + +#define SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_GET(x) \ + (((x) & SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_MASK) >> \ + SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_LSB) +#define SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_SET(x) \ + (((x) << SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_LSB) & \ + SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_MASK) + +/* hif_pci.c */ +#define CHIP_ID_ADDRESS (scn->targetdef->d_SOC_CHIP_ID_ADDRESS) +#define SOC_CHIP_ID_REVISION_MASK (scn->targetdef->d_SOC_CHIP_ID_REVISION_MASK) +#define SOC_CHIP_ID_REVISION_LSB (scn->targetdef->d_SOC_CHIP_ID_REVISION_LSB) +#define SOC_CHIP_ID_VERSION_MASK (scn->targetdef->d_SOC_CHIP_ID_VERSION_MASK) +#define SOC_CHIP_ID_VERSION_LSB (scn->targetdef->d_SOC_CHIP_ID_VERSION_LSB) +#define CHIP_ID_REVISION_GET(x) \ + (((x) & SOC_CHIP_ID_REVISION_MASK) >> SOC_CHIP_ID_REVISION_LSB) +#define CHIP_ID_VERSION_GET(x) \ + (((x) & SOC_CHIP_ID_VERSION_MASK) >> SOC_CHIP_ID_VERSION_LSB) +/* hif_pci.c end */ + +/* misc */ +#define SR_WR_INDEX_ADDRESS (scn->targetdef->d_SR_WR_INDEX_ADDRESS) +#define DST_WATERMARK_ADDRESS (scn->targetdef->d_DST_WATERMARK_ADDRESS) +#define SOC_POWER_REG_OFFSET (scn->targetdef->d_SOC_POWER_REG_OFFSET) +/* end */ + +#if !defined(CONFIG_WIN) +/* htt_rx.c */ +#define RX_MSDU_END_4_FIRST_MSDU_MASK \ + (pdev->targetdef->d_RX_MSDU_END_4_FIRST_MSDU_MASK) +#define RX_MSDU_END_4_FIRST_MSDU_LSB \ + (pdev->targetdef->d_RX_MSDU_END_4_FIRST_MSDU_LSB) +#define RX_MPDU_START_0_RETRY_LSB \ + (pdev->targetdef->d_RX_MPDU_START_0_RETRY_LSB) +#define RX_MPDU_START_0_RETRY_MASK \ + (pdev->targetdef->d_RX_MPDU_START_0_RETRY_MASK) +#define RX_MPDU_START_0_SEQ_NUM_MASK \ + (pdev->targetdef->d_RX_MPDU_START_0_SEQ_NUM_MASK) +#define RX_MPDU_START_0_SEQ_NUM_LSB \ + (pdev->targetdef->d_RX_MPDU_START_0_SEQ_NUM_LSB) +#define RX_MPDU_START_2_PN_47_32_LSB \ + (pdev->targetdef->d_RX_MPDU_START_2_PN_47_32_LSB) +#define RX_MPDU_START_2_PN_47_32_MASK \ + (pdev->targetdef->d_RX_MPDU_START_2_PN_47_32_MASK) +#define RX_MPDU_START_2_TID_LSB \ + (pdev->targetdef->d_RX_MPDU_START_2_TID_LSB) +#define RX_MPDU_START_2_TID_MASK \ + (pdev->targetdef->d_RX_MPDU_START_2_TID_MASK) +#define RX_MSDU_END_1_KEY_ID_OCT_MASK \ + (pdev->targetdef->d_RX_MSDU_END_1_KEY_ID_OCT_MASK) +#define RX_MSDU_END_1_KEY_ID_OCT_LSB \ + (pdev->targetdef->d_RX_MSDU_END_1_KEY_ID_OCT_LSB) +#define RX_MSDU_END_1_EXT_WAPI_PN_63_48_MASK \ + (pdev->targetdef->d_RX_MSDU_END_1_EXT_WAPI_PN_63_48_MASK) +#define RX_MSDU_END_1_EXT_WAPI_PN_63_48_LSB \ + (pdev->targetdef->d_RX_MSDU_END_1_EXT_WAPI_PN_63_48_LSB) +#define RX_MSDU_END_4_LAST_MSDU_MASK \ + (pdev->targetdef->d_RX_MSDU_END_4_LAST_MSDU_MASK) +#define RX_MSDU_END_4_LAST_MSDU_LSB \ + (pdev->targetdef->d_RX_MSDU_END_4_LAST_MSDU_LSB) +#define RX_ATTENTION_0_MCAST_BCAST_MASK \ + (pdev->targetdef->d_RX_ATTENTION_0_MCAST_BCAST_MASK) +#define RX_ATTENTION_0_MCAST_BCAST_LSB \ + (pdev->targetdef->d_RX_ATTENTION_0_MCAST_BCAST_LSB) +#define RX_ATTENTION_0_FRAGMENT_MASK \ + (pdev->targetdef->d_RX_ATTENTION_0_FRAGMENT_MASK) +#define RX_ATTENTION_0_FRAGMENT_LSB \ + (pdev->targetdef->d_RX_ATTENTION_0_FRAGMENT_LSB) +#define RX_ATTENTION_0_MPDU_LENGTH_ERR_MASK \ + (pdev->targetdef->d_RX_ATTENTION_0_MPDU_LENGTH_ERR_MASK) +#define RX_FRAG_INFO_0_RING2_MORE_COUNT_MASK \ + (pdev->targetdef->d_RX_FRAG_INFO_0_RING2_MORE_COUNT_MASK) +#define RX_FRAG_INFO_0_RING2_MORE_COUNT_LSB \ + (pdev->targetdef->d_RX_FRAG_INFO_0_RING2_MORE_COUNT_LSB) +#define RX_MSDU_START_0_MSDU_LENGTH_MASK \ + (pdev->targetdef->d_RX_MSDU_START_0_MSDU_LENGTH_MASK) +#define RX_MSDU_START_0_MSDU_LENGTH_LSB \ + (pdev->targetdef->d_RX_MSDU_START_0_MSDU_LENGTH_LSB) +#define RX_MPDU_START_0_ENCRYPTED_MASK \ + (pdev->targetdef->d_RX_MPDU_START_0_ENCRYPTED_MASK) +#define RX_MPDU_START_0_ENCRYPTED_LSB \ + (pdev->targetdef->d_RX_MPDU_START_0_ENCRYPTED_LSB) +#define RX_ATTENTION_0_MORE_DATA_MASK \ + (pdev->targetdef->d_RX_ATTENTION_0_MORE_DATA_MASK) +#define RX_ATTENTION_0_MSDU_DONE_MASK \ + (pdev->targetdef->d_RX_ATTENTION_0_MSDU_DONE_MASK) +#define RX_ATTENTION_0_TCP_UDP_CHKSUM_FAIL_MASK \ + (pdev->targetdef->d_RX_ATTENTION_0_TCP_UDP_CHKSUM_FAIL_MASK) +#if !defined(QCA6290_HEADERS_DEF) +#ifndef RX_MSDU_START_2_DECAP_FORMAT_OFFSET +#define RX_MSDU_START_2_DECAP_FORMAT_OFFSET \ + (pdev->targetdef->d_RX_MSDU_START_2_DECAP_FORMAT_OFFSET) +#endif +#ifndef RX_MSDU_START_2_DECAP_FORMAT_LSB +#define RX_MSDU_START_2_DECAP_FORMAT_LSB \ + (pdev->targetdef->d_RX_MSDU_START_2_DECAP_FORMAT_LSB) +#endif +#ifndef RX_MSDU_START_2_DECAP_FORMAT_MASK +#define RX_MSDU_START_2_DECAP_FORMAT_MASK \ + (pdev->targetdef->d_RX_MSDU_START_2_DECAP_FORMAT_MASK) +#endif +#endif /*!QCA6290_HEADERS_DEF*/ +/* end */ +#endif + +/* copy_engine.c */ +/* end */ +/* PLL start */ +#define EFUSE_OFFSET (scn->targetdef->d_EFUSE_OFFSET) +#define EFUSE_XTAL_SEL_MSB (scn->targetdef->d_EFUSE_XTAL_SEL_MSB) +#define EFUSE_XTAL_SEL_LSB (scn->targetdef->d_EFUSE_XTAL_SEL_LSB) +#define EFUSE_XTAL_SEL_MASK (scn->targetdef->d_EFUSE_XTAL_SEL_MASK) +#define BB_PLL_CONFIG_OFFSET (scn->targetdef->d_BB_PLL_CONFIG_OFFSET) +#define BB_PLL_CONFIG_OUTDIV_MSB (scn->targetdef->d_BB_PLL_CONFIG_OUTDIV_MSB) +#define BB_PLL_CONFIG_OUTDIV_LSB (scn->targetdef->d_BB_PLL_CONFIG_OUTDIV_LSB) +#define BB_PLL_CONFIG_OUTDIV_MASK (scn->targetdef->d_BB_PLL_CONFIG_OUTDIV_MASK) +#define BB_PLL_CONFIG_FRAC_MSB (scn->targetdef->d_BB_PLL_CONFIG_FRAC_MSB) +#define BB_PLL_CONFIG_FRAC_LSB (scn->targetdef->d_BB_PLL_CONFIG_FRAC_LSB) +#define BB_PLL_CONFIG_FRAC_MASK (scn->targetdef->d_BB_PLL_CONFIG_FRAC_MASK) +#define WLAN_PLL_SETTLE_TIME_MSB (scn->targetdef->d_WLAN_PLL_SETTLE_TIME_MSB) +#define WLAN_PLL_SETTLE_TIME_LSB (scn->targetdef->d_WLAN_PLL_SETTLE_TIME_LSB) +#define WLAN_PLL_SETTLE_TIME_MASK (scn->targetdef->d_WLAN_PLL_SETTLE_TIME_MASK) +#define WLAN_PLL_SETTLE_OFFSET (scn->targetdef->d_WLAN_PLL_SETTLE_OFFSET) +#define WLAN_PLL_SETTLE_SW_MASK (scn->targetdef->d_WLAN_PLL_SETTLE_SW_MASK) +#define WLAN_PLL_SETTLE_RSTMASK (scn->targetdef->d_WLAN_PLL_SETTLE_RSTMASK) +#define WLAN_PLL_SETTLE_RESET (scn->targetdef->d_WLAN_PLL_SETTLE_RESET) +#define WLAN_PLL_CONTROL_NOPWD_MSB \ + (scn->targetdef->d_WLAN_PLL_CONTROL_NOPWD_MSB) +#define WLAN_PLL_CONTROL_NOPWD_LSB \ + (scn->targetdef->d_WLAN_PLL_CONTROL_NOPWD_LSB) +#define WLAN_PLL_CONTROL_NOPWD_MASK \ + (scn->targetdef->d_WLAN_PLL_CONTROL_NOPWD_MASK) +#define WLAN_PLL_CONTROL_BYPASS_MSB \ + (scn->targetdef->d_WLAN_PLL_CONTROL_BYPASS_MSB) +#define WLAN_PLL_CONTROL_BYPASS_LSB \ + (scn->targetdef->d_WLAN_PLL_CONTROL_BYPASS_LSB) +#define WLAN_PLL_CONTROL_BYPASS_MASK \ + (scn->targetdef->d_WLAN_PLL_CONTROL_BYPASS_MASK) +#define WLAN_PLL_CONTROL_BYPASS_RESET \ + (scn->targetdef->d_WLAN_PLL_CONTROL_BYPASS_RESET) +#define WLAN_PLL_CONTROL_CLK_SEL_MSB \ + (scn->targetdef->d_WLAN_PLL_CONTROL_CLK_SEL_MSB) +#define WLAN_PLL_CONTROL_CLK_SEL_LSB \ + (scn->targetdef->d_WLAN_PLL_CONTROL_CLK_SEL_LSB) +#define WLAN_PLL_CONTROL_CLK_SEL_MASK \ + (scn->targetdef->d_WLAN_PLL_CONTROL_CLK_SEL_MASK) +#define WLAN_PLL_CONTROL_CLK_SEL_RESET \ + (scn->targetdef->d_WLAN_PLL_CONTROL_CLK_SEL_RESET) +#define WLAN_PLL_CONTROL_REFDIV_MSB \ + (scn->targetdef->d_WLAN_PLL_CONTROL_REFDIV_MSB) +#define WLAN_PLL_CONTROL_REFDIV_LSB \ + (scn->targetdef->d_WLAN_PLL_CONTROL_REFDIV_LSB) +#define WLAN_PLL_CONTROL_REFDIV_MASK \ + (scn->targetdef->d_WLAN_PLL_CONTROL_REFDIV_MASK) +#define WLAN_PLL_CONTROL_REFDIV_RESET \ + (scn->targetdef->d_WLAN_PLL_CONTROL_REFDIV_RESET) +#define WLAN_PLL_CONTROL_DIV_MSB (scn->targetdef->d_WLAN_PLL_CONTROL_DIV_MSB) +#define WLAN_PLL_CONTROL_DIV_LSB (scn->targetdef->d_WLAN_PLL_CONTROL_DIV_LSB) +#define WLAN_PLL_CONTROL_DIV_MASK (scn->targetdef->d_WLAN_PLL_CONTROL_DIV_MASK) +#define WLAN_PLL_CONTROL_DIV_RESET \ + (scn->targetdef->d_WLAN_PLL_CONTROL_DIV_RESET) +#define WLAN_PLL_CONTROL_OFFSET (scn->targetdef->d_WLAN_PLL_CONTROL_OFFSET) +#define WLAN_PLL_CONTROL_SW_MASK (scn->targetdef->d_WLAN_PLL_CONTROL_SW_MASK) +#define WLAN_PLL_CONTROL_RSTMASK (scn->targetdef->d_WLAN_PLL_CONTROL_RSTMASK) +#define WLAN_PLL_CONTROL_RESET (scn->targetdef->d_WLAN_PLL_CONTROL_RESET) +#define SOC_CORE_CLK_CTRL_OFFSET (scn->targetdef->d_SOC_CORE_CLK_CTRL_OFFSET) +#define SOC_CORE_CLK_CTRL_DIV_MSB (scn->targetdef->d_SOC_CORE_CLK_CTRL_DIV_MSB) +#define SOC_CORE_CLK_CTRL_DIV_LSB (scn->targetdef->d_SOC_CORE_CLK_CTRL_DIV_LSB) +#define SOC_CORE_CLK_CTRL_DIV_MASK \ + (scn->targetdef->d_SOC_CORE_CLK_CTRL_DIV_MASK) +#define RTC_SYNC_STATUS_PLL_CHANGING_MSB \ + (scn->targetdef->d_RTC_SYNC_STATUS_PLL_CHANGING_MSB) +#define RTC_SYNC_STATUS_PLL_CHANGING_LSB \ + (scn->targetdef->d_RTC_SYNC_STATUS_PLL_CHANGING_LSB) +#define RTC_SYNC_STATUS_PLL_CHANGING_MASK \ + (scn->targetdef->d_RTC_SYNC_STATUS_PLL_CHANGING_MASK) +#define RTC_SYNC_STATUS_PLL_CHANGING_RESET \ + (scn->targetdef->d_RTC_SYNC_STATUS_PLL_CHANGING_RESET) +#define RTC_SYNC_STATUS_OFFSET (scn->targetdef->d_RTC_SYNC_STATUS_OFFSET) +#define SOC_CPU_CLOCK_OFFSET (scn->targetdef->d_SOC_CPU_CLOCK_OFFSET) +#define SOC_CPU_CLOCK_STANDARD_MSB \ + (scn->targetdef->d_SOC_CPU_CLOCK_STANDARD_MSB) +#define SOC_CPU_CLOCK_STANDARD_LSB \ + (scn->targetdef->d_SOC_CPU_CLOCK_STANDARD_LSB) +#define SOC_CPU_CLOCK_STANDARD_MASK \ + (scn->targetdef->d_SOC_CPU_CLOCK_STANDARD_MASK) +/* PLL end */ + +#define FW_CPU_PLL_CONFIG \ + (scn->targetdef->d_FW_CPU_PLL_CONFIG) + +#define WIFICMN_PCIE_BAR_REG_ADDRESS \ + (sc->targetdef->d_WIFICMN_PCIE_BAR_REG_ADDRESS) + + /* htt tx */ +#define MSDU_LINK_EXT_3_TCP_OVER_IPV4_CHECKSUM_EN_MASK \ + (pdev->targetdef->d_MSDU_LINK_EXT_3_TCP_OVER_IPV4_CHECKSUM_EN_MASK) +#define MSDU_LINK_EXT_3_TCP_OVER_IPV6_CHECKSUM_EN_MASK \ + (pdev->targetdef->d_MSDU_LINK_EXT_3_TCP_OVER_IPV6_CHECKSUM_EN_MASK) +#define MSDU_LINK_EXT_3_UDP_OVER_IPV4_CHECKSUM_EN_MASK \ + (pdev->targetdef->d_MSDU_LINK_EXT_3_UDP_OVER_IPV4_CHECKSUM_EN_MASK) +#define MSDU_LINK_EXT_3_UDP_OVER_IPV6_CHECKSUM_EN_MASK \ + (pdev->targetdef->d_MSDU_LINK_EXT_3_UDP_OVER_IPV6_CHECKSUM_EN_MASK) +#define MSDU_LINK_EXT_3_TCP_OVER_IPV4_CHECKSUM_EN_LSB \ + (pdev->targetdef->d_MSDU_LINK_EXT_3_TCP_OVER_IPV4_CHECKSUM_EN_LSB) +#define MSDU_LINK_EXT_3_TCP_OVER_IPV6_CHECKSUM_EN_LSB \ + (pdev->targetdef->d_MSDU_LINK_EXT_3_TCP_OVER_IPV6_CHECKSUM_EN_LSB) +#define MSDU_LINK_EXT_3_UDP_OVER_IPV4_CHECKSUM_EN_LSB \ + (pdev->targetdef->d_MSDU_LINK_EXT_3_UDP_OVER_IPV4_CHECKSUM_EN_LSB) +#define MSDU_LINK_EXT_3_UDP_OVER_IPV6_CHECKSUM_EN_LSB \ + (pdev->targetdef->d_MSDU_LINK_EXT_3_UDP_OVER_IPV6_CHECKSUM_EN_LSB) + +#define CE_CMD_ADDRESS \ + (scn->targetdef->d_CE_CMD_ADDRESS) +#define CE_CMD_HALT_MASK \ + (scn->targetdef->d_CE_CMD_HALT_MASK) +#define CE_CMD_HALT_STATUS_MASK \ + (scn->targetdef->d_CE_CMD_HALT_STATUS_MASK) +#define CE_CMD_HALT_STATUS_LSB \ + (scn->targetdef->d_CE_CMD_HALT_STATUS_LSB) + +#define SI_CONFIG_ERR_INT_MASK \ + (scn->targetdef->d_SI_CONFIG_ERR_INT_MASK) +#define SI_CONFIG_ERR_INT_LSB \ + (scn->targetdef->d_SI_CONFIG_ERR_INT_LSB) +#define GPIO_ENABLE_W1TS_LOW_ADDRESS \ + (scn->targetdef->d_GPIO_ENABLE_W1TS_LOW_ADDRESS) +#define GPIO_PIN0_CONFIG_LSB \ + (scn->targetdef->d_GPIO_PIN0_CONFIG_LSB) +#define GPIO_PIN0_PAD_PULL_LSB \ + (scn->targetdef->d_GPIO_PIN0_PAD_PULL_LSB) +#define GPIO_PIN0_PAD_PULL_MASK \ + (scn->targetdef->d_GPIO_PIN0_PAD_PULL_MASK) + +#define SOC_CHIP_ID_REVISION_MSB \ + (scn->targetdef->d_SOC_CHIP_ID_REVISION_MSB) + +#define FW_AXI_MSI_ADDR \ + (scn->targetdef->d_FW_AXI_MSI_ADDR) +#define FW_AXI_MSI_DATA \ + (scn->targetdef->d_FW_AXI_MSI_DATA) +#define WLAN_SUBSYSTEM_CORE_ID_ADDRESS \ + (scn->targetdef->d_WLAN_SUBSYSTEM_CORE_ID_ADDRESS) +#define FPGA_VERSION_ADDRESS \ + (scn->targetdef->d_FPGA_VERSION_ADDRESS) + +/* SET macros */ +#define WLAN_SYSTEM_SLEEP_DISABLE_SET(x) \ + (((x) << WLAN_SYSTEM_SLEEP_DISABLE_LSB) & \ + WLAN_SYSTEM_SLEEP_DISABLE_MASK) +#define SI_CONFIG_BIDIR_OD_DATA_SET(x) \ + (((x) << SI_CONFIG_BIDIR_OD_DATA_LSB) & SI_CONFIG_BIDIR_OD_DATA_MASK) +#define SI_CONFIG_I2C_SET(x) (((x) << SI_CONFIG_I2C_LSB) & SI_CONFIG_I2C_MASK) +#define SI_CONFIG_POS_SAMPLE_SET(x) \ + (((x) << SI_CONFIG_POS_SAMPLE_LSB) & SI_CONFIG_POS_SAMPLE_MASK) +#define SI_CONFIG_INACTIVE_CLK_SET(x) \ + (((x) << SI_CONFIG_INACTIVE_CLK_LSB) & SI_CONFIG_INACTIVE_CLK_MASK) +#define SI_CONFIG_INACTIVE_DATA_SET(x) \ + (((x) << SI_CONFIG_INACTIVE_DATA_LSB) & SI_CONFIG_INACTIVE_DATA_MASK) +#define SI_CONFIG_DIVIDER_SET(x) \ + (((x) << SI_CONFIG_DIVIDER_LSB) & SI_CONFIG_DIVIDER_MASK) +#define SI_CS_START_SET(x) (((x) << SI_CS_START_LSB) & SI_CS_START_MASK) +#define SI_CS_RX_CNT_SET(x) (((x) << SI_CS_RX_CNT_LSB) & SI_CS_RX_CNT_MASK) +#define SI_CS_TX_CNT_SET(x) (((x) << SI_CS_TX_CNT_LSB) & SI_CS_TX_CNT_MASK) +#define LPO_CAL_ENABLE_SET(x) \ + (((x) << LPO_CAL_ENABLE_LSB) & LPO_CAL_ENABLE_MASK) +#define CPU_CLOCK_STANDARD_SET(x) \ + (((x) << CPU_CLOCK_STANDARD_LSB) & CPU_CLOCK_STANDARD_MASK) +#define CLOCK_GPIO_BT_CLK_OUT_EN_SET(x) \ + (((x) << CLOCK_GPIO_BT_CLK_OUT_EN_LSB) & CLOCK_GPIO_BT_CLK_OUT_EN_MASK) +/* copy_engine.c */ +/* end */ +/* PLL start */ +#define EFUSE_XTAL_SEL_GET(x) \ + (((x) & EFUSE_XTAL_SEL_MASK) >> EFUSE_XTAL_SEL_LSB) +#define EFUSE_XTAL_SEL_SET(x) \ + (((x) << EFUSE_XTAL_SEL_LSB) & EFUSE_XTAL_SEL_MASK) +#define BB_PLL_CONFIG_OUTDIV_GET(x) \ + (((x) & BB_PLL_CONFIG_OUTDIV_MASK) >> BB_PLL_CONFIG_OUTDIV_LSB) +#define BB_PLL_CONFIG_OUTDIV_SET(x) \ + (((x) << BB_PLL_CONFIG_OUTDIV_LSB) & BB_PLL_CONFIG_OUTDIV_MASK) +#define BB_PLL_CONFIG_FRAC_GET(x) \ + (((x) & BB_PLL_CONFIG_FRAC_MASK) >> BB_PLL_CONFIG_FRAC_LSB) +#define BB_PLL_CONFIG_FRAC_SET(x) \ + (((x) << BB_PLL_CONFIG_FRAC_LSB) & BB_PLL_CONFIG_FRAC_MASK) +#define WLAN_PLL_SETTLE_TIME_GET(x) \ + (((x) & WLAN_PLL_SETTLE_TIME_MASK) >> WLAN_PLL_SETTLE_TIME_LSB) +#define WLAN_PLL_SETTLE_TIME_SET(x) \ + (((x) << WLAN_PLL_SETTLE_TIME_LSB) & WLAN_PLL_SETTLE_TIME_MASK) +#define WLAN_PLL_CONTROL_NOPWD_GET(x) \ + (((x) & WLAN_PLL_CONTROL_NOPWD_MASK) >> WLAN_PLL_CONTROL_NOPWD_LSB) +#define WLAN_PLL_CONTROL_NOPWD_SET(x) \ + (((x) << WLAN_PLL_CONTROL_NOPWD_LSB) & WLAN_PLL_CONTROL_NOPWD_MASK) +#define WLAN_PLL_CONTROL_BYPASS_GET(x) \ + (((x) & WLAN_PLL_CONTROL_BYPASS_MASK) >> WLAN_PLL_CONTROL_BYPASS_LSB) +#define WLAN_PLL_CONTROL_BYPASS_SET(x) \ + (((x) << WLAN_PLL_CONTROL_BYPASS_LSB) & WLAN_PLL_CONTROL_BYPASS_MASK) +#define WLAN_PLL_CONTROL_CLK_SEL_GET(x) \ + (((x) & WLAN_PLL_CONTROL_CLK_SEL_MASK) >> WLAN_PLL_CONTROL_CLK_SEL_LSB) +#define WLAN_PLL_CONTROL_CLK_SEL_SET(x) \ + (((x) << WLAN_PLL_CONTROL_CLK_SEL_LSB) & WLAN_PLL_CONTROL_CLK_SEL_MASK) +#define WLAN_PLL_CONTROL_REFDIV_GET(x) \ + (((x) & WLAN_PLL_CONTROL_REFDIV_MASK) >> WLAN_PLL_CONTROL_REFDIV_LSB) +#define WLAN_PLL_CONTROL_REFDIV_SET(x) \ + (((x) << WLAN_PLL_CONTROL_REFDIV_LSB) & WLAN_PLL_CONTROL_REFDIV_MASK) +#define WLAN_PLL_CONTROL_DIV_GET(x) \ + (((x) & WLAN_PLL_CONTROL_DIV_MASK) >> WLAN_PLL_CONTROL_DIV_LSB) +#define WLAN_PLL_CONTROL_DIV_SET(x) \ + (((x) << WLAN_PLL_CONTROL_DIV_LSB) & WLAN_PLL_CONTROL_DIV_MASK) +#define SOC_CORE_CLK_CTRL_DIV_GET(x) \ + (((x) & SOC_CORE_CLK_CTRL_DIV_MASK) >> SOC_CORE_CLK_CTRL_DIV_LSB) +#define SOC_CORE_CLK_CTRL_DIV_SET(x) \ + (((x) << SOC_CORE_CLK_CTRL_DIV_LSB) & SOC_CORE_CLK_CTRL_DIV_MASK) +#define RTC_SYNC_STATUS_PLL_CHANGING_GET(x) \ + (((x) & RTC_SYNC_STATUS_PLL_CHANGING_MASK) >> \ + RTC_SYNC_STATUS_PLL_CHANGING_LSB) +#define RTC_SYNC_STATUS_PLL_CHANGING_SET(x) \ + (((x) << RTC_SYNC_STATUS_PLL_CHANGING_LSB) & \ + RTC_SYNC_STATUS_PLL_CHANGING_MASK) +#define SOC_CPU_CLOCK_STANDARD_GET(x) \ + (((x) & SOC_CPU_CLOCK_STANDARD_MASK) >> SOC_CPU_CLOCK_STANDARD_LSB) +#define SOC_CPU_CLOCK_STANDARD_SET(x) \ + (((x) << SOC_CPU_CLOCK_STANDARD_LSB) & SOC_CPU_CLOCK_STANDARD_MASK) +/* PLL end */ +#define WLAN_GPIO_PIN0_CONFIG_SET(x) \ + (((x) << GPIO_PIN0_CONFIG_LSB) & GPIO_PIN0_CONFIG_MASK) +#define WLAN_GPIO_PIN0_PAD_PULL_SET(x) \ + (((x) << GPIO_PIN0_PAD_PULL_LSB) & GPIO_PIN0_PAD_PULL_MASK) +#define SI_CONFIG_ERR_INT_SET(x) \ + (((x) << SI_CONFIG_ERR_INT_LSB) & SI_CONFIG_ERR_INT_MASK) + + +#ifdef QCA_WIFI_3_0_ADRASTEA +#define Q6_ENABLE_REGISTER_0 \ + (scn->targetdef->d_Q6_ENABLE_REGISTER_0) +#define Q6_ENABLE_REGISTER_1 \ + (scn->targetdef->d_Q6_ENABLE_REGISTER_1) +#define Q6_CAUSE_REGISTER_0 \ + (scn->targetdef->d_Q6_CAUSE_REGISTER_0) +#define Q6_CAUSE_REGISTER_1 \ + (scn->targetdef->d_Q6_CAUSE_REGISTER_1) +#define Q6_CLEAR_REGISTER_0 \ + (scn->targetdef->d_Q6_CLEAR_REGISTER_0) +#define Q6_CLEAR_REGISTER_1 \ + (scn->targetdef->d_Q6_CLEAR_REGISTER_1) +#endif + +#ifdef CONFIG_BYPASS_QMI +#define BYPASS_QMI_TEMP_REGISTER \ + (scn->targetdef->d_BYPASS_QMI_TEMP_REGISTER) +#endif + +#define A_SOC_PCIE_PCIE_BAR0_START (scn->hostdef->d_A_SOC_PCIE_PCIE_BAR0_START) +#define DESC_DATA_FLAG_MASK (scn->hostdef->d_DESC_DATA_FLAG_MASK) +#define MUX_ID_MASK (scn->hostdef->d_MUX_ID_MASK) +#define TRANSACTION_ID_MASK (scn->hostdef->d_TRANSACTION_ID_MASK) +#define HOST_CE_COUNT (scn->hostdef->d_HOST_CE_COUNT) +#define ENABLE_MSI (scn->hostdef->d_ENABLE_MSI) +#define INT_STATUS_ENABLE_ERROR_LSB \ + (scn->hostdef->d_INT_STATUS_ENABLE_ERROR_LSB) +#define INT_STATUS_ENABLE_ERROR_MASK \ + (scn->hostdef->d_INT_STATUS_ENABLE_ERROR_MASK) +#define INT_STATUS_ENABLE_CPU_LSB (scn->hostdef->d_INT_STATUS_ENABLE_CPU_LSB) +#define INT_STATUS_ENABLE_CPU_MASK (scn->hostdef->d_INT_STATUS_ENABLE_CPU_MASK) +#define INT_STATUS_ENABLE_COUNTER_LSB \ + (scn->hostdef->d_INT_STATUS_ENABLE_COUNTER_LSB) +#define INT_STATUS_ENABLE_COUNTER_MASK \ + (scn->hostdef->d_INT_STATUS_ENABLE_COUNTER_MASK) +#define INT_STATUS_ENABLE_MBOX_DATA_LSB \ + (scn->hostdef->d_INT_STATUS_ENABLE_MBOX_DATA_LSB) +#define INT_STATUS_ENABLE_MBOX_DATA_MASK \ + (scn->hostdef->d_INT_STATUS_ENABLE_MBOX_DATA_MASK) +#define ERROR_STATUS_ENABLE_RX_UNDERFLOW_LSB \ + (scn->hostdef->d_ERROR_STATUS_ENABLE_RX_UNDERFLOW_LSB) +#define ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK \ + (scn->hostdef->d_ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK) +#define ERROR_STATUS_ENABLE_TX_OVERFLOW_LSB \ + (scn->hostdef->d_ERROR_STATUS_ENABLE_TX_OVERFLOW_LSB) +#define ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK \ + (scn->hostdef->d_ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK) +#define COUNTER_INT_STATUS_ENABLE_BIT_LSB \ + (scn->hostdef->d_COUNTER_INT_STATUS_ENABLE_BIT_LSB) +#define COUNTER_INT_STATUS_ENABLE_BIT_MASK \ + (scn->hostdef->d_COUNTER_INT_STATUS_ENABLE_BIT_MASK) +#define INT_STATUS_ENABLE_ADDRESS \ + (scn->hostdef->d_INT_STATUS_ENABLE_ADDRESS) +#define CPU_INT_STATUS_ENABLE_BIT_LSB \ + (scn->hostdef->d_CPU_INT_STATUS_ENABLE_BIT_LSB) +#define CPU_INT_STATUS_ENABLE_BIT_MASK \ + (scn->hostdef->d_CPU_INT_STATUS_ENABLE_BIT_MASK) +#define HOST_INT_STATUS_ADDRESS (scn->hostdef->d_HOST_INT_STATUS_ADDRESS) +#define CPU_INT_STATUS_ADDRESS (scn->hostdef->d_CPU_INT_STATUS_ADDRESS) +#define ERROR_INT_STATUS_ADDRESS (scn->hostdef->d_ERROR_INT_STATUS_ADDRESS) +#define ERROR_INT_STATUS_WAKEUP_MASK \ + (scn->hostdef->d_ERROR_INT_STATUS_WAKEUP_MASK) +#define ERROR_INT_STATUS_WAKEUP_LSB \ + (scn->hostdef->d_ERROR_INT_STATUS_WAKEUP_LSB) +#define ERROR_INT_STATUS_RX_UNDERFLOW_MASK \ + (scn->hostdef->d_ERROR_INT_STATUS_RX_UNDERFLOW_MASK) +#define ERROR_INT_STATUS_RX_UNDERFLOW_LSB \ + (scn->hostdef->d_ERROR_INT_STATUS_RX_UNDERFLOW_LSB) +#define ERROR_INT_STATUS_TX_OVERFLOW_MASK \ + (scn->hostdef->d_ERROR_INT_STATUS_TX_OVERFLOW_MASK) +#define ERROR_INT_STATUS_TX_OVERFLOW_LSB \ + (scn->hostdef->d_ERROR_INT_STATUS_TX_OVERFLOW_LSB) +#define COUNT_DEC_ADDRESS (scn->hostdef->d_COUNT_DEC_ADDRESS) +#define HOST_INT_STATUS_CPU_MASK (scn->hostdef->d_HOST_INT_STATUS_CPU_MASK) +#define HOST_INT_STATUS_CPU_LSB (scn->hostdef->d_HOST_INT_STATUS_CPU_LSB) +#define HOST_INT_STATUS_ERROR_MASK (scn->hostdef->d_HOST_INT_STATUS_ERROR_MASK) +#define HOST_INT_STATUS_ERROR_LSB (scn->hostdef->d_HOST_INT_STATUS_ERROR_LSB) +#define HOST_INT_STATUS_COUNTER_MASK \ + (scn->hostdef->d_HOST_INT_STATUS_COUNTER_MASK) +#define HOST_INT_STATUS_COUNTER_LSB \ + (scn->hostdef->d_HOST_INT_STATUS_COUNTER_LSB) +#define RX_LOOKAHEAD_VALID_ADDRESS (scn->hostdef->d_RX_LOOKAHEAD_VALID_ADDRESS) +#define WINDOW_DATA_ADDRESS (scn->hostdef->d_WINDOW_DATA_ADDRESS) +#define WINDOW_READ_ADDR_ADDRESS (scn->hostdef->d_WINDOW_READ_ADDR_ADDRESS) +#define WINDOW_WRITE_ADDR_ADDRESS (scn->hostdef->d_WINDOW_WRITE_ADDR_ADDRESS) +#define SOC_GLOBAL_RESET_ADDRESS (scn->hostdef->d_SOC_GLOBAL_RESET_ADDRESS) +#define RTC_STATE_ADDRESS (scn->hostdef->d_RTC_STATE_ADDRESS) +#define RTC_STATE_COLD_RESET_MASK (scn->hostdef->d_RTC_STATE_COLD_RESET_MASK) +#define PCIE_LOCAL_BASE_ADDRESS (scn->hostdef->d_PCIE_LOCAL_BASE_ADDRESS) +#define PCIE_SOC_WAKE_RESET (scn->hostdef->d_PCIE_SOC_WAKE_RESET) +#define PCIE_SOC_WAKE_ADDRESS (scn->hostdef->d_PCIE_SOC_WAKE_ADDRESS) +#define PCIE_SOC_WAKE_V_MASK (scn->hostdef->d_PCIE_SOC_WAKE_V_MASK) +#define RTC_STATE_V_MASK (scn->hostdef->d_RTC_STATE_V_MASK) +#define RTC_STATE_V_LSB (scn->hostdef->d_RTC_STATE_V_LSB) +#define FW_IND_EVENT_PENDING (scn->hostdef->d_FW_IND_EVENT_PENDING) +#define FW_IND_INITIALIZED (scn->hostdef->d_FW_IND_INITIALIZED) +#define FW_IND_HELPER (scn->hostdef->d_FW_IND_HELPER) +#define RTC_STATE_V_ON (scn->hostdef->d_RTC_STATE_V_ON) + +#define FW_IND_HOST_READY (scn->hostdef->d_FW_IND_HOST_READY) + +#if defined(SDIO_3_0) +#define HOST_INT_STATUS_MBOX_DATA_MASK \ + (scn->hostdef->d_HOST_INT_STATUS_MBOX_DATA_MASK) +#define HOST_INT_STATUS_MBOX_DATA_LSB \ + (scn->hostdef->d_HOST_INT_STATUS_MBOX_DATA_LSB) +#endif + +#if !defined(SOC_PCIE_BASE_ADDRESS) +#define SOC_PCIE_BASE_ADDRESS 0 +#endif + +#if !defined(PCIE_SOC_RDY_STATUS_ADDRESS) +#define PCIE_SOC_RDY_STATUS_ADDRESS 0 +#define PCIE_SOC_RDY_STATUS_BAR_MASK 0 +#endif + +#if !defined(MSI_MAGIC_ADR_ADDRESS) +#define MSI_MAGIC_ADR_ADDRESS 0 +#define MSI_MAGIC_ADDRESS 0 +#endif + +/* SET/GET macros */ +#define INT_STATUS_ENABLE_ERROR_SET(x) \ + (((x) << INT_STATUS_ENABLE_ERROR_LSB) & INT_STATUS_ENABLE_ERROR_MASK) +#define INT_STATUS_ENABLE_CPU_SET(x) \ + (((x) << INT_STATUS_ENABLE_CPU_LSB) & INT_STATUS_ENABLE_CPU_MASK) +#define INT_STATUS_ENABLE_COUNTER_SET(x) \ + (((x) << INT_STATUS_ENABLE_COUNTER_LSB) & \ + INT_STATUS_ENABLE_COUNTER_MASK) +#define INT_STATUS_ENABLE_MBOX_DATA_SET(x) \ + (((x) << INT_STATUS_ENABLE_MBOX_DATA_LSB) & \ + INT_STATUS_ENABLE_MBOX_DATA_MASK) +#define CPU_INT_STATUS_ENABLE_BIT_SET(x) \ + (((x) << CPU_INT_STATUS_ENABLE_BIT_LSB) & \ + CPU_INT_STATUS_ENABLE_BIT_MASK) +#define ERROR_STATUS_ENABLE_RX_UNDERFLOW_SET(x) \ + (((x) << ERROR_STATUS_ENABLE_RX_UNDERFLOW_LSB) & \ + ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK) +#define ERROR_STATUS_ENABLE_TX_OVERFLOW_SET(x) \ + (((x) << ERROR_STATUS_ENABLE_TX_OVERFLOW_LSB) & \ + ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK) +#define COUNTER_INT_STATUS_ENABLE_BIT_SET(x) \ + (((x) << COUNTER_INT_STATUS_ENABLE_BIT_LSB) & \ + COUNTER_INT_STATUS_ENABLE_BIT_MASK) +#define ERROR_INT_STATUS_WAKEUP_GET(x) \ + (((x) & ERROR_INT_STATUS_WAKEUP_MASK) >> \ + ERROR_INT_STATUS_WAKEUP_LSB) +#define ERROR_INT_STATUS_RX_UNDERFLOW_GET(x) \ + (((x) & ERROR_INT_STATUS_RX_UNDERFLOW_MASK) >> \ + ERROR_INT_STATUS_RX_UNDERFLOW_LSB) +#define ERROR_INT_STATUS_TX_OVERFLOW_GET(x) \ + (((x) & ERROR_INT_STATUS_TX_OVERFLOW_MASK) >> \ + ERROR_INT_STATUS_TX_OVERFLOW_LSB) +#define HOST_INT_STATUS_CPU_GET(x) \ + (((x) & HOST_INT_STATUS_CPU_MASK) >> HOST_INT_STATUS_CPU_LSB) +#define HOST_INT_STATUS_ERROR_GET(x) \ + (((x) & HOST_INT_STATUS_ERROR_MASK) >> HOST_INT_STATUS_ERROR_LSB) +#define HOST_INT_STATUS_COUNTER_GET(x) \ + (((x) & HOST_INT_STATUS_COUNTER_MASK) >> HOST_INT_STATUS_COUNTER_LSB) +#define RTC_STATE_V_GET(x) \ + (((x) & RTC_STATE_V_MASK) >> RTC_STATE_V_LSB) +#if defined(SDIO_3_0) +#define HOST_INT_STATUS_MBOX_DATA_GET(x) \ + (((x) & HOST_INT_STATUS_MBOX_DATA_MASK) >> \ + HOST_INT_STATUS_MBOX_DATA_LSB) +#endif + +#define INVALID_REG_LOC_DUMMY_DATA 0xAA + +#define AR6320_CORE_CLK_DIV_ADDR 0x403fa8 +#define AR6320_CPU_PLL_INIT_DONE_ADDR 0x403fd0 +#define AR6320_CPU_SPEED_ADDR 0x403fa4 +#define AR6320V2_CORE_CLK_DIV_ADDR 0x403fd8 +#define AR6320V2_CPU_PLL_INIT_DONE_ADDR 0x403fd0 +#define AR6320V2_CPU_SPEED_ADDR 0x403fd4 +#define AR6320V3_CORE_CLK_DIV_ADDR 0x404028 +#define AR6320V3_CPU_PLL_INIT_DONE_ADDR 0x404020 +#define AR6320V3_CPU_SPEED_ADDR 0x404024 + +enum a_refclk_speed_t { + SOC_REFCLK_UNKNOWN = -1, /* Unsupported ref clock -- use PLL Bypass */ + SOC_REFCLK_48_MHZ = 0, + SOC_REFCLK_19_2_MHZ = 1, + SOC_REFCLK_24_MHZ = 2, + SOC_REFCLK_26_MHZ = 3, + SOC_REFCLK_37_4_MHZ = 4, + SOC_REFCLK_38_4_MHZ = 5, + SOC_REFCLK_40_MHZ = 6, + SOC_REFCLK_52_MHZ = 7, +}; + +#define A_REFCLK_UNKNOWN SOC_REFCLK_UNKNOWN +#define A_REFCLK_48_MHZ SOC_REFCLK_48_MHZ +#define A_REFCLK_19_2_MHZ SOC_REFCLK_19_2_MHZ +#define A_REFCLK_24_MHZ SOC_REFCLK_24_MHZ +#define A_REFCLK_26_MHZ SOC_REFCLK_26_MHZ +#define A_REFCLK_37_4_MHZ SOC_REFCLK_37_4_MHZ +#define A_REFCLK_38_4_MHZ SOC_REFCLK_38_4_MHZ +#define A_REFCLK_40_MHZ SOC_REFCLK_40_MHZ +#define A_REFCLK_52_MHZ SOC_REFCLK_52_MHZ + +#define TARGET_CPU_FREQ 176000000 + +struct wlan_pll_s { + uint32_t refdiv; + uint32_t div; + uint32_t rnfrac; + uint32_t outdiv; +}; + +struct cmnos_clock_s { + enum a_refclk_speed_t refclk_speed; + uint32_t refclk_hz; + uint32_t pll_settling_time; /* 50us */ + struct wlan_pll_s wlan_pll; +}; + +struct tgt_reg_section { + uint32_t start_addr; + uint32_t end_addr; +}; + +struct tgt_reg_table { + const struct tgt_reg_section *section; + uint32_t section_size; +}; + +struct hif_softc; +void hif_target_register_tbl_attach(struct hif_softc *scn, u32 target_type); +void hif_register_tbl_attach(struct hif_softc *scn, u32 hif_type); + +#endif /* _REGTABLE_PCIE_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/hif/inc/target_reg_init.h b/drivers/staging/qca-wifi-host-cmn/hif/inc/target_reg_init.h new file mode 100644 index 0000000000000000000000000000000000000000..efd09f308db4d394d3f36ff0bf1105b3ff9239e8 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/inc/target_reg_init.h @@ -0,0 +1,465 @@ +/* + * Copyright (c) 2016-2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef TARGET_REG_INIT_H +#define TARGET_REG_INIT_H +#include "reg_struct.h" +#include "targaddrs.h" +/*** WARNING : Add to the end of the TABLE! do not change the order ****/ +struct targetdef_s; + + + +#define ATH_UNSUPPORTED_REG_OFFSET UNSUPPORTED_REGISTER_OFFSET +#define ATH_SUPPORTED_BY_TARGET(reg_offset) \ + ((reg_offset) != ATH_UNSUPPORTED_REG_OFFSET) + +#if defined(MY_TARGET_DEF) + +/* Cross-platform compatibility */ +#if !defined(SOC_RESET_CONTROL_OFFSET) && defined(RESET_CONTROL_OFFSET) +#define SOC_RESET_CONTROL_OFFSET RESET_CONTROL_OFFSET +#endif + +#if !defined(CLOCK_GPIO_OFFSET) +#define CLOCK_GPIO_OFFSET ATH_UNSUPPORTED_REG_OFFSET +#define CLOCK_GPIO_BT_CLK_OUT_EN_LSB 0 +#define CLOCK_GPIO_BT_CLK_OUT_EN_MASK 0 +#endif + +#if !defined(WLAN_MAC_BASE_ADDRESS) +#define WLAN_MAC_BASE_ADDRESS ATH_UNSUPPORTED_REG_OFFSET +#endif + +#if !defined(CE0_BASE_ADDRESS) +#define CE0_BASE_ADDRESS ATH_UNSUPPORTED_REG_OFFSET +#define CE1_BASE_ADDRESS ATH_UNSUPPORTED_REG_OFFSET +#define CE_COUNT 0 +#endif + +#if !defined(MSI_NUM_REQUEST) +#define MSI_NUM_REQUEST 0 +#define MSI_ASSIGN_FW 0 +#define MSI_ASSIGN_CE_INITIAL 0 +#endif + +#if !defined(FW_INDICATOR_ADDRESS) +#define FW_INDICATOR_ADDRESS ATH_UNSUPPORTED_REG_OFFSET +#endif + +#if !defined(FW_CPU_PLL_CONFIG) +#define FW_CPU_PLL_CONFIG ATH_UNSUPPORTED_REG_OFFSET +#endif + +#if !defined(DRAM_BASE_ADDRESS) +#define DRAM_BASE_ADDRESS ATH_UNSUPPORTED_REG_OFFSET +#endif + +#if !defined(SOC_CORE_BASE_ADDRESS) +#define SOC_CORE_BASE_ADDRESS ATH_UNSUPPORTED_REG_OFFSET +#endif + +#if !defined(CPU_INTR_ADDRESS) +#define CPU_INTR_ADDRESS ATH_UNSUPPORTED_REG_OFFSET +#endif + +#if !defined(SOC_LF_TIMER_CONTROL0_ADDRESS) +#define SOC_LF_TIMER_CONTROL0_ADDRESS ATH_UNSUPPORTED_REG_OFFSET +#define SOC_LF_TIMER_CONTROL0_ENABLE_MASK ATH_UNSUPPORTED_REG_OFFSET +#endif + +#if !defined(SOC_RESET_CONTROL_ADDRESS) +#define SOC_RESET_CONTROL_ADDRESS ATH_UNSUPPORTED_REG_OFFSET +#define SOC_RESET_CONTROL_CE_RST_MASK ATH_UNSUPPORTED_REG_OFFSET +#define SOC_RESET_CONTROL_CPU_WARM_RST_MASK ATH_UNSUPPORTED_REG_OFFSET +#endif + +#if !defined(CORE_CTRL_ADDRESS) +#define CORE_CTRL_ADDRESS ATH_UNSUPPORTED_REG_OFFSET +#define CORE_CTRL_CPU_INTR_MASK 0 +#endif + +#if !defined(PCIE_INTR_ENABLE_ADDRESS) +#define PCIE_INTR_ENABLE_ADDRESS ATH_UNSUPPORTED_REG_OFFSET +#define PCIE_INTR_CLR_ADDRESS ATH_UNSUPPORTED_REG_OFFSET +#define PCIE_INTR_FIRMWARE_MASK ATH_UNSUPPORTED_REG_OFFSET +#define PCIE_INTR_CE_MASK_ALL ATH_UNSUPPORTED_REG_OFFSET +#define PCIE_INTR_CAUSE_ADDRESS ATH_UNSUPPORTED_REG_OFFSET +#endif + +#if !defined(WIFICMN_PCIE_BAR_REG_ADDRESS) +#define WIFICMN_PCIE_BAR_REG_ADDRESS ATH_UNSUPPORTED_REG_OFFSET +#endif + +#if !defined(WIFICMN_INT_STATUS_ADDRESS) +#define WIFICMN_INT_STATUS_ADDRESS ATH_UNSUPPORTED_REG_OFFSET +#endif + +#if !defined(FW_AXI_MSI_ADDR) +#define FW_AXI_MSI_ADDR ATH_UNSUPPORTED_REG_OFFSET +#endif + +#if !defined(FW_AXI_MSI_DATA) +#define FW_AXI_MSI_DATA ATH_UNSUPPORTED_REG_OFFSET +#endif + +#if !defined(WLAN_SUBSYSTEM_CORE_ID_ADDRESS) +#define WLAN_SUBSYSTEM_CORE_ID_ADDRESS ATH_UNSUPPORTED_REG_OFFSET +#endif + +#if !defined(FPGA_VERSION_ADDRESS) +#define FPGA_VERSION_ADDRESS ATH_UNSUPPORTED_REG_OFFSET +#endif + +#if !defined(SI_CONFIG_ADDRESS) +#define SI_CONFIG_ADDRESS ATH_UNSUPPORTED_REG_OFFSET +#define SI_CONFIG_BIDIR_OD_DATA_LSB 0 +#define SI_CONFIG_BIDIR_OD_DATA_MASK 0 +#define SI_CONFIG_I2C_LSB 0 +#define SI_CONFIG_I2C_MASK 0 +#define SI_CONFIG_POS_SAMPLE_LSB 0 +#define SI_CONFIG_POS_SAMPLE_MASK 0 +#define SI_CONFIG_INACTIVE_CLK_LSB 0 +#define SI_CONFIG_INACTIVE_CLK_MASK 0 +#define SI_CONFIG_INACTIVE_DATA_LSB 0 +#define SI_CONFIG_INACTIVE_DATA_MASK 0 +#define SI_CONFIG_DIVIDER_LSB 0 +#define SI_CONFIG_DIVIDER_MASK 0 +#define SI_CONFIG_OFFSET 0 +#define SI_TX_DATA0_OFFSET ATH_UNSUPPORTED_REG_OFFSET +#define SI_TX_DATA1_OFFSET ATH_UNSUPPORTED_REG_OFFSET +#define SI_RX_DATA0_OFFSET ATH_UNSUPPORTED_REG_OFFSET +#define SI_RX_DATA1_OFFSET ATH_UNSUPPORTED_REG_OFFSET +#define SI_CS_OFFSET ATH_UNSUPPORTED_REG_OFFSET +#define SI_CS_DONE_ERR_MASK 0 +#define SI_CS_DONE_INT_MASK 0 +#define SI_CS_START_LSB 0 +#define SI_CS_START_MASK 0 +#define SI_CS_RX_CNT_LSB 0 +#define SI_CS_RX_CNT_MASK 0 +#define SI_CS_TX_CNT_LSB 0 +#define SI_CS_TX_CNT_MASK 0 +#endif + +#ifndef SI_BASE_ADDRESS +#define SI_BASE_ADDRESS ATH_UNSUPPORTED_REG_OFFSET +#endif + +#ifndef WLAN_GPIO_PIN10_ADDRESS +#define WLAN_GPIO_PIN10_ADDRESS ATH_UNSUPPORTED_REG_OFFSET +#endif + +#ifndef WLAN_GPIO_PIN11_ADDRESS +#define WLAN_GPIO_PIN11_ADDRESS ATH_UNSUPPORTED_REG_OFFSET +#endif + +#ifndef WLAN_GPIO_PIN12_ADDRESS +#define WLAN_GPIO_PIN12_ADDRESS ATH_UNSUPPORTED_REG_OFFSET +#endif + +#ifndef WLAN_GPIO_PIN13_ADDRESS +#define WLAN_GPIO_PIN13_ADDRESS ATH_UNSUPPORTED_REG_OFFSET +#endif + +#ifndef WIFICMN_INT_STATUS_ADDRESS +#define WIFICMN_INT_STATUS_ADDRESS ATH_UNSUPPORTED_REG_OFFSET +#endif + +static struct targetdef_s my_target_def = { + .d_RTC_SOC_BASE_ADDRESS = RTC_SOC_BASE_ADDRESS, + .d_RTC_WMAC_BASE_ADDRESS = RTC_WMAC_BASE_ADDRESS, + .d_SYSTEM_SLEEP_OFFSET = WLAN_SYSTEM_SLEEP_OFFSET, + .d_WLAN_SYSTEM_SLEEP_OFFSET = WLAN_SYSTEM_SLEEP_OFFSET, + .d_WLAN_SYSTEM_SLEEP_DISABLE_LSB = WLAN_SYSTEM_SLEEP_DISABLE_LSB, + .d_WLAN_SYSTEM_SLEEP_DISABLE_MASK = WLAN_SYSTEM_SLEEP_DISABLE_MASK, + .d_CLOCK_CONTROL_OFFSET = CLOCK_CONTROL_OFFSET, + .d_CLOCK_CONTROL_SI0_CLK_MASK = CLOCK_CONTROL_SI0_CLK_MASK, + .d_RESET_CONTROL_OFFSET = SOC_RESET_CONTROL_OFFSET, + .d_RESET_CONTROL_SI0_RST_MASK = RESET_CONTROL_SI0_RST_MASK, + .d_WLAN_RESET_CONTROL_OFFSET = WLAN_RESET_CONTROL_OFFSET, + .d_WLAN_RESET_CONTROL_COLD_RST_MASK = WLAN_RESET_CONTROL_COLD_RST_MASK, + .d_WLAN_RESET_CONTROL_WARM_RST_MASK = WLAN_RESET_CONTROL_WARM_RST_MASK, + .d_GPIO_BASE_ADDRESS = GPIO_BASE_ADDRESS, + .d_GPIO_PIN0_OFFSET = GPIO_PIN0_OFFSET, + .d_GPIO_PIN1_OFFSET = GPIO_PIN1_OFFSET, + .d_GPIO_PIN0_CONFIG_MASK = GPIO_PIN0_CONFIG_MASK, + .d_GPIO_PIN1_CONFIG_MASK = GPIO_PIN1_CONFIG_MASK, + .d_SI_CONFIG_BIDIR_OD_DATA_LSB = SI_CONFIG_BIDIR_OD_DATA_LSB, + .d_SI_CONFIG_BIDIR_OD_DATA_MASK = SI_CONFIG_BIDIR_OD_DATA_MASK, + .d_SI_CONFIG_I2C_LSB = SI_CONFIG_I2C_LSB, + .d_SI_CONFIG_I2C_MASK = SI_CONFIG_I2C_MASK, + .d_SI_CONFIG_POS_SAMPLE_LSB = SI_CONFIG_POS_SAMPLE_LSB, + .d_SI_CONFIG_POS_SAMPLE_MASK = SI_CONFIG_POS_SAMPLE_MASK, + .d_SI_CONFIG_INACTIVE_CLK_LSB = SI_CONFIG_INACTIVE_CLK_LSB, + .d_SI_CONFIG_INACTIVE_CLK_MASK = SI_CONFIG_INACTIVE_CLK_MASK, + .d_SI_CONFIG_INACTIVE_DATA_LSB = SI_CONFIG_INACTIVE_DATA_LSB, + .d_SI_CONFIG_INACTIVE_DATA_MASK = SI_CONFIG_INACTIVE_DATA_MASK, + .d_SI_CONFIG_DIVIDER_LSB = SI_CONFIG_DIVIDER_LSB, + .d_SI_CONFIG_DIVIDER_MASK = SI_CONFIG_DIVIDER_MASK, + .d_SI_BASE_ADDRESS = SI_BASE_ADDRESS, + .d_SI_CONFIG_OFFSET = SI_CONFIG_OFFSET, + .d_SI_TX_DATA0_OFFSET = SI_TX_DATA0_OFFSET, + .d_SI_TX_DATA1_OFFSET = SI_TX_DATA1_OFFSET, + .d_SI_RX_DATA0_OFFSET = SI_RX_DATA0_OFFSET, + .d_SI_RX_DATA1_OFFSET = SI_RX_DATA1_OFFSET, + .d_SI_CS_OFFSET = SI_CS_OFFSET, + .d_SI_CS_DONE_ERR_MASK = SI_CS_DONE_ERR_MASK, + .d_SI_CS_DONE_INT_MASK = SI_CS_DONE_INT_MASK, + .d_SI_CS_START_LSB = SI_CS_START_LSB, + .d_SI_CS_START_MASK = SI_CS_START_MASK, + .d_SI_CS_RX_CNT_LSB = SI_CS_RX_CNT_LSB, + .d_SI_CS_RX_CNT_MASK = SI_CS_RX_CNT_MASK, + .d_SI_CS_TX_CNT_LSB = SI_CS_TX_CNT_LSB, + .d_SI_CS_TX_CNT_MASK = SI_CS_TX_CNT_MASK, + .d_BOARD_DATA_SZ = MY_TARGET_BOARD_DATA_SZ, + .d_BOARD_EXT_DATA_SZ = MY_TARGET_BOARD_EXT_DATA_SZ, + .d_MBOX_BASE_ADDRESS = MBOX_BASE_ADDRESS, + .d_LOCAL_SCRATCH_OFFSET = LOCAL_SCRATCH_OFFSET, + .d_CPU_CLOCK_OFFSET = CPU_CLOCK_OFFSET, + .d_GPIO_PIN10_OFFSET = GPIO_PIN10_OFFSET, + .d_GPIO_PIN11_OFFSET = GPIO_PIN11_OFFSET, + .d_GPIO_PIN12_OFFSET = GPIO_PIN12_OFFSET, + .d_GPIO_PIN13_OFFSET = GPIO_PIN13_OFFSET, + .d_CLOCK_GPIO_OFFSET = CLOCK_GPIO_OFFSET, + .d_CPU_CLOCK_STANDARD_LSB = CPU_CLOCK_STANDARD_LSB, + .d_CPU_CLOCK_STANDARD_MASK = CPU_CLOCK_STANDARD_MASK, + .d_LPO_CAL_ENABLE_LSB = LPO_CAL_ENABLE_LSB, + .d_LPO_CAL_ENABLE_MASK = LPO_CAL_ENABLE_MASK, + .d_CLOCK_GPIO_BT_CLK_OUT_EN_LSB = CLOCK_GPIO_BT_CLK_OUT_EN_LSB, + .d_CLOCK_GPIO_BT_CLK_OUT_EN_MASK = CLOCK_GPIO_BT_CLK_OUT_EN_MASK, + .d_ANALOG_INTF_BASE_ADDRESS = ANALOG_INTF_BASE_ADDRESS, + .d_WLAN_MAC_BASE_ADDRESS = WLAN_MAC_BASE_ADDRESS, + .d_FW_INDICATOR_ADDRESS = FW_INDICATOR_ADDRESS, + .d_FW_CPU_PLL_CONFIG = FW_CPU_PLL_CONFIG, + .d_DRAM_BASE_ADDRESS = DRAM_BASE_ADDRESS, + .d_SOC_CORE_BASE_ADDRESS = SOC_CORE_BASE_ADDRESS, + .d_CORE_CTRL_ADDRESS = CORE_CTRL_ADDRESS, + .d_CE_COUNT = CE_COUNT, + .d_MSI_NUM_REQUEST = MSI_NUM_REQUEST, + .d_MSI_ASSIGN_FW = MSI_ASSIGN_FW, + .d_MSI_ASSIGN_CE_INITIAL = MSI_ASSIGN_CE_INITIAL, + .d_PCIE_INTR_ENABLE_ADDRESS = PCIE_INTR_ENABLE_ADDRESS, + .d_PCIE_INTR_CLR_ADDRESS = PCIE_INTR_CLR_ADDRESS, + .d_PCIE_INTR_FIRMWARE_MASK = PCIE_INTR_FIRMWARE_MASK, + .d_PCIE_INTR_CE_MASK_ALL = PCIE_INTR_CE_MASK_ALL, + .d_CORE_CTRL_CPU_INTR_MASK = CORE_CTRL_CPU_INTR_MASK, + .d_WIFICMN_PCIE_BAR_REG_ADDRESS = WIFICMN_PCIE_BAR_REG_ADDRESS, + /* htt_rx.c */ + /* htt tx */ + .d_MSDU_LINK_EXT_3_TCP_OVER_IPV4_CHECKSUM_EN_MASK + = MSDU_LINK_EXT_3_TCP_OVER_IPV4_CHECKSUM_EN_MASK, + .d_MSDU_LINK_EXT_3_TCP_OVER_IPV6_CHECKSUM_EN_MASK + = MSDU_LINK_EXT_3_TCP_OVER_IPV6_CHECKSUM_EN_MASK, + .d_MSDU_LINK_EXT_3_UDP_OVER_IPV4_CHECKSUM_EN_MASK + = MSDU_LINK_EXT_3_UDP_OVER_IPV4_CHECKSUM_EN_MASK, + .d_MSDU_LINK_EXT_3_UDP_OVER_IPV6_CHECKSUM_EN_MASK + = MSDU_LINK_EXT_3_UDP_OVER_IPV6_CHECKSUM_EN_MASK, + .d_MSDU_LINK_EXT_3_TCP_OVER_IPV4_CHECKSUM_EN_LSB + = MSDU_LINK_EXT_3_TCP_OVER_IPV4_CHECKSUM_EN_LSB, + .d_MSDU_LINK_EXT_3_TCP_OVER_IPV6_CHECKSUM_EN_LSB + = MSDU_LINK_EXT_3_TCP_OVER_IPV6_CHECKSUM_EN_LSB, + .d_MSDU_LINK_EXT_3_UDP_OVER_IPV4_CHECKSUM_EN_LSB + = MSDU_LINK_EXT_3_UDP_OVER_IPV4_CHECKSUM_EN_LSB, + .d_MSDU_LINK_EXT_3_UDP_OVER_IPV6_CHECKSUM_EN_LSB + = MSDU_LINK_EXT_3_UDP_OVER_IPV6_CHECKSUM_EN_LSB, + /* copy_engine.c */ + .d_SR_WR_INDEX_ADDRESS = SR_WR_INDEX_ADDRESS, + .d_DST_WATERMARK_ADDRESS = DST_WATERMARK_ADDRESS, + + .d_PCIE_INTR_CAUSE_ADDRESS = PCIE_INTR_CAUSE_ADDRESS, + .d_SOC_RESET_CONTROL_ADDRESS = SOC_RESET_CONTROL_ADDRESS, + .d_SOC_RESET_CONTROL_CE_RST_MASK = SOC_RESET_CONTROL_CE_RST_MASK, + .d_SOC_RESET_CONTROL_CPU_WARM_RST_MASK + = SOC_RESET_CONTROL_CPU_WARM_RST_MASK, + .d_CPU_INTR_ADDRESS = CPU_INTR_ADDRESS, + .d_SOC_LF_TIMER_CONTROL0_ADDRESS = SOC_LF_TIMER_CONTROL0_ADDRESS, + .d_SOC_LF_TIMER_CONTROL0_ENABLE_MASK + = SOC_LF_TIMER_CONTROL0_ENABLE_MASK, + .d_SI_CONFIG_ERR_INT_MASK = SI_CONFIG_ERR_INT_MASK, + .d_SI_CONFIG_ERR_INT_LSB = SI_CONFIG_ERR_INT_LSB, + .d_GPIO_ENABLE_W1TS_LOW_ADDRESS = GPIO_ENABLE_W1TS_LOW_ADDRESS, + .d_GPIO_PIN0_CONFIG_LSB = GPIO_PIN0_CONFIG_LSB, + .d_GPIO_PIN0_PAD_PULL_LSB = GPIO_PIN0_PAD_PULL_LSB, + .d_GPIO_PIN0_PAD_PULL_MASK = GPIO_PIN0_PAD_PULL_MASK, + .d_SOC_CHIP_ID_ADDRESS = SOC_CHIP_ID_ADDRESS, + .d_SOC_CHIP_ID_REVISION_MASK = SOC_CHIP_ID_REVISION_MASK, + .d_SOC_CHIP_ID_REVISION_LSB = SOC_CHIP_ID_REVISION_LSB, + .d_SOC_CHIP_ID_REVISION_MSB = SOC_CHIP_ID_REVISION_MSB, + .d_WIFICMN_PCIE_BAR_REG_ADDRESS = WIFICMN_PCIE_BAR_REG_ADDRESS, + .d_FW_AXI_MSI_ADDR = FW_AXI_MSI_ADDR, + .d_FW_AXI_MSI_DATA = FW_AXI_MSI_DATA, + .d_WLAN_SUBSYSTEM_CORE_ID_ADDRESS = WLAN_SUBSYSTEM_CORE_ID_ADDRESS, + .d_WIFICMN_INT_STATUS_ADDRESS = WIFICMN_INT_STATUS_ADDRESS, +}; + +struct targetdef_s *MY_TARGET_DEF = &my_target_def; +#else +#endif + +#if defined(MY_CEREG_DEF) + +#if !defined(CE_DDR_ADDRESS_FOR_RRI_LOW) +#define CE_DDR_ADDRESS_FOR_RRI_LOW ATH_UNSUPPORTED_REG_OFFSET +#endif +#if !defined(CE_DDR_ADDRESS_FOR_RRI_HIGH) +#define CE_DDR_ADDRESS_FOR_RRI_HIGH ATH_UNSUPPORTED_REG_OFFSET +#endif +#if !defined(SR_BA_ADDRESS_HIGH) +#define SR_BA_ADDRESS_HIGH ATH_UNSUPPORTED_REG_OFFSET +#endif +#if !defined(DR_BA_ADDRESS_HIGH) +#define DR_BA_ADDRESS_HIGH ATH_UNSUPPORTED_REG_OFFSET +#endif +#if !defined(CE_CMD_REGISTER) +#define CE_CMD_REGISTER ATH_UNSUPPORTED_REG_OFFSET +#endif +#if !defined(CE_MSI_ADDRESS) +#define CE_MSI_ADDRESS ATH_UNSUPPORTED_REG_OFFSET +#endif +#if !defined(CE_MSI_ADDRESS_HIGH) +#define CE_MSI_ADDRESS_HIGH ATH_UNSUPPORTED_REG_OFFSET +#endif +#if !defined(CE_MSI_DATA) +#define CE_MSI_DATA ATH_UNSUPPORTED_REG_OFFSET +#endif +#if !defined(CE_MSI_ENABLE_BIT) +#define CE_MSI_ENABLE_BIT ATH_UNSUPPORTED_REG_OFFSET +#endif +#if !defined(CE_CTRL1_IDX_UPD_EN_MASK) +#define CE_CTRL1_IDX_UPD_EN_MASK ATH_UNSUPPORTED_REG_OFFSET +#endif +#if !defined(CE_WRAPPER_DEBUG_OFFSET) +#define CE_WRAPPER_DEBUG_OFFSET ATH_UNSUPPORTED_REG_OFFSET +#endif +#if !defined(CE_DEBUG_OFFSET) +#define CE_DEBUG_OFFSET ATH_UNSUPPORTED_REG_OFFSET +#endif +#if !defined(A_WIFI_APB_3_A_WCMN_APPS_CE_INTR_ENABLES) +#define A_WIFI_APB_3_A_WCMN_APPS_CE_INTR_ENABLES ATH_UNSUPPORTED_REG_OFFSET +#endif +#if !defined(A_WIFI_APB_3_A_WCMN_APPS_CE_INTR_STATUS) +#define A_WIFI_APB_3_A_WCMN_APPS_CE_INTR_STATUS ATH_UNSUPPORTED_REG_OFFSET +#endif +#if !defined(HOST_IE_ADDRESS_2) +#define HOST_IE_ADDRESS_2 ATH_UNSUPPORTED_REG_OFFSET +#endif +#if !defined(HOST_IE_ADDRESS_3) +#define HOST_IE_ADDRESS_3 ATH_UNSUPPORTED_REG_OFFSET +#endif +#if !defined(HOST_IE_REG1_CE_LSB) +#define HOST_IE_REG1_CE_LSB 0 +#endif +#if !defined(HOST_IE_REG2_CE_LSB) +#define HOST_IE_REG2_CE_LSB 0 +#endif +#if !defined(HOST_IE_REG3_CE_LSB) +#define HOST_IE_REG3_CE_LSB 0 +#endif + +static struct ce_reg_def my_ce_reg_def = { + /* copy_engine.c */ + .d_DST_WR_INDEX_ADDRESS = DST_WR_INDEX_ADDRESS, + .d_SRC_WATERMARK_ADDRESS = SRC_WATERMARK_ADDRESS, + .d_SRC_WATERMARK_LOW_MASK = SRC_WATERMARK_LOW_MASK, + .d_SRC_WATERMARK_HIGH_MASK = SRC_WATERMARK_HIGH_MASK, + .d_DST_WATERMARK_LOW_MASK = DST_WATERMARK_LOW_MASK, + .d_DST_WATERMARK_HIGH_MASK = DST_WATERMARK_HIGH_MASK, + .d_CURRENT_SRRI_ADDRESS = CURRENT_SRRI_ADDRESS, + .d_CURRENT_DRRI_ADDRESS = CURRENT_DRRI_ADDRESS, + .d_HOST_IS_SRC_RING_HIGH_WATERMARK_MASK + = HOST_IS_SRC_RING_HIGH_WATERMARK_MASK, + .d_HOST_IS_SRC_RING_LOW_WATERMARK_MASK + = HOST_IS_SRC_RING_LOW_WATERMARK_MASK, + .d_HOST_IS_DST_RING_HIGH_WATERMARK_MASK + = HOST_IS_DST_RING_HIGH_WATERMARK_MASK, + .d_HOST_IS_DST_RING_LOW_WATERMARK_MASK + = HOST_IS_DST_RING_LOW_WATERMARK_MASK, + .d_HOST_IS_ADDRESS = HOST_IS_ADDRESS, + .d_MISC_IS_ADDRESS = MISC_IS_ADDRESS, + .d_HOST_IS_COPY_COMPLETE_MASK = HOST_IS_COPY_COMPLETE_MASK, + .d_CE_WRAPPER_BASE_ADDRESS = CE_WRAPPER_BASE_ADDRESS, + .d_CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS + = CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS, + .d_CE_DDR_ADDRESS_FOR_RRI_LOW = CE_DDR_ADDRESS_FOR_RRI_LOW, + .d_CE_DDR_ADDRESS_FOR_RRI_HIGH = CE_DDR_ADDRESS_FOR_RRI_HIGH, + .d_HOST_IE_ADDRESS = HOST_IE_ADDRESS, + .d_HOST_IE_REG1_CE_LSB = HOST_IE_REG1_CE_LSB, + .d_HOST_IE_ADDRESS_2 = HOST_IE_ADDRESS_2, + .d_HOST_IE_REG2_CE_LSB = HOST_IE_REG2_CE_LSB, + .d_HOST_IE_ADDRESS_3 = HOST_IE_ADDRESS_3, + .d_HOST_IE_REG3_CE_LSB = HOST_IE_REG3_CE_LSB, + .d_HOST_IE_COPY_COMPLETE_MASK = HOST_IE_COPY_COMPLETE_MASK, + .d_SR_BA_ADDRESS = SR_BA_ADDRESS, + .d_SR_BA_ADDRESS_HIGH = SR_BA_ADDRESS_HIGH, + .d_SR_SIZE_ADDRESS = SR_SIZE_ADDRESS, + .d_CE_CTRL1_ADDRESS = CE_CTRL1_ADDRESS, + .d_CE_CTRL1_DMAX_LENGTH_MASK = CE_CTRL1_DMAX_LENGTH_MASK, + .d_DR_BA_ADDRESS = DR_BA_ADDRESS, + .d_DR_BA_ADDRESS_HIGH = DR_BA_ADDRESS_HIGH, + .d_DR_SIZE_ADDRESS = DR_SIZE_ADDRESS, + .d_CE_CMD_REGISTER = CE_CMD_REGISTER, + .d_CE_MSI_ADDRESS = CE_MSI_ADDRESS, + .d_CE_MSI_ADDRESS_HIGH = CE_MSI_ADDRESS_HIGH, + .d_CE_MSI_DATA = CE_MSI_DATA, + .d_CE_MSI_ENABLE_BIT = CE_MSI_ENABLE_BIT, + .d_MISC_IE_ADDRESS = MISC_IE_ADDRESS, + .d_MISC_IS_AXI_ERR_MASK = MISC_IS_AXI_ERR_MASK, + .d_MISC_IS_DST_ADDR_ERR_MASK = MISC_IS_DST_ADDR_ERR_MASK, + .d_MISC_IS_SRC_LEN_ERR_MASK = MISC_IS_SRC_LEN_ERR_MASK, + .d_MISC_IS_DST_MAX_LEN_VIO_MASK = MISC_IS_DST_MAX_LEN_VIO_MASK, + .d_MISC_IS_DST_RING_OVERFLOW_MASK = MISC_IS_DST_RING_OVERFLOW_MASK, + .d_MISC_IS_SRC_RING_OVERFLOW_MASK = MISC_IS_SRC_RING_OVERFLOW_MASK, + .d_SRC_WATERMARK_LOW_LSB = SRC_WATERMARK_LOW_LSB, + .d_SRC_WATERMARK_HIGH_LSB = SRC_WATERMARK_HIGH_LSB, + .d_DST_WATERMARK_LOW_LSB = DST_WATERMARK_LOW_LSB, + .d_DST_WATERMARK_HIGH_LSB = DST_WATERMARK_HIGH_LSB, + .d_CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK + = CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK, + .d_CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB + = CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB, + .d_CE_CTRL1_DMAX_LENGTH_LSB = CE_CTRL1_DMAX_LENGTH_LSB, + .d_CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK + = CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK, + .d_CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK + = CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK, + .d_CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB + = CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB, + .d_CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB + = CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB, + .d_CE_CTRL1_IDX_UPD_EN_MASK = CE_CTRL1_IDX_UPD_EN_MASK, + .d_CE_WRAPPER_DEBUG_OFFSET = CE_WRAPPER_DEBUG_OFFSET, + .d_CE_WRAPPER_DEBUG_SEL_MSB = CE_WRAPPER_DEBUG_SEL_MSB, + .d_CE_WRAPPER_DEBUG_SEL_LSB = CE_WRAPPER_DEBUG_SEL_LSB, + .d_CE_WRAPPER_DEBUG_SEL_MASK = CE_WRAPPER_DEBUG_SEL_MASK, + .d_CE_DEBUG_OFFSET = CE_DEBUG_OFFSET, + .d_CE_DEBUG_SEL_MSB = CE_DEBUG_SEL_MSB, + .d_CE_DEBUG_SEL_LSB = CE_DEBUG_SEL_LSB, + .d_CE_DEBUG_SEL_MASK = CE_DEBUG_SEL_MASK, + .d_CE0_BASE_ADDRESS = CE0_BASE_ADDRESS, + .d_CE1_BASE_ADDRESS = CE1_BASE_ADDRESS, + .d_A_WIFI_APB_3_A_WCMN_APPS_CE_INTR_ENABLES + = A_WIFI_APB_3_A_WCMN_APPS_CE_INTR_ENABLES, + .d_A_WIFI_APB_3_A_WCMN_APPS_CE_INTR_STATUS + = A_WIFI_APB_3_A_WCMN_APPS_CE_INTR_STATUS +}; + +struct ce_reg_def *MY_CEREG_DEF = &my_ce_reg_def; + +#else +#endif +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/hif/inc/target_type.h b/drivers/staging/qca-wifi-host-cmn/hif/inc/target_type.h new file mode 100644 index 0000000000000000000000000000000000000000..74a92b8bb3a1f93ebcafcc69ac477f82be4d7c56 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/inc/target_type.h @@ -0,0 +1,69 @@ +/* + * Copyright (c) 2013-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _TARGET_TYPE_H_ +#define _TARGET_TYPE_H_ + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +/* Header files */ + +/* TARGET definition needs to be abstracted in fw common + * header files, below is the placeholder till WIN codebase + * moved to latest copy of fw common header files. + */ +#ifdef CONFIG_WIN +#define TARGET_TYPE_UNKNOWN 0 +#define TARGET_TYPE_AR6001 1 +#define TARGET_TYPE_AR6002 2 +#define TARGET_TYPE_AR6003 3 +#define TARGET_TYPE_AR6004 5 +#define TARGET_TYPE_AR6006 6 +#define TARGET_TYPE_AR9888 7 +#define TARGET_TYPE_AR900B 9 +#define TARGET_TYPE_QCA9984 10 +#define TARGET_TYPE_IPQ4019 11 +#define TARGET_TYPE_QCA9888 12 +/* For attach Peregrine 2.0 board target_reg_tbl only */ +#define TARGET_TYPE_AR9888V2 13 +/* For attach Rome1.0 target_reg_tbl only*/ +#define TARGET_TYPE_AR6320V1 14 +/* For Rome2.0/2.1 target_reg_tbl ID*/ +#define TARGET_TYPE_AR6320V2 15 +/* For Rome3.0 target_reg_tbl ID*/ +#define TARGET_TYPE_AR6320V3 16 +/* For Tufello1.0 target_reg_tbl ID*/ +#define TARGET_TYPE_QCA9377V1 17 +#endif /* CONFIG_WIN */ +#define TARGET_TYPE_AR6320 8 +/* For Adrastea target */ +#define TARGET_TYPE_ADRASTEA 19 +#ifndef TARGET_TYPE_QCA8074 +#define TARGET_TYPE_QCA8074 20 +#endif +#ifndef TARGET_TYPE_QCA6290 +#define TARGET_TYPE_QCA6290 21 +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* _TARGET_TYPE_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/hif/inc/targetdef.h b/drivers/staging/qca-wifi-host-cmn/hif/inc/targetdef.h new file mode 100644 index 0000000000000000000000000000000000000000..341e3a4a7ebbd83c13ad458a0625463c4b62ba3b --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/inc/targetdef.h @@ -0,0 +1,58 @@ +/* + * Copyright (c) 2013-2016 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef TARGETDEFS_H_ +#define TARGETDEFS_H_ + +#include +#include +#include +#include "target_reg_init.h" + +extern struct targetdef_s *AR6002_TARGETdef; +extern struct targetdef_s *AR6003_TARGETdef; +extern struct targetdef_s *AR6004_TARGETdef; +extern struct targetdef_s *AR9888_TARGETdef; +extern struct targetdef_s *AR9888V2_TARGETdef; +extern struct targetdef_s *AR6320_TARGETdef; +extern struct targetdef_s *AR900B_TARGETdef; +extern struct targetdef_s *QCA9984_TARGETdef; +extern struct targetdef_s *QCA9888_TARGETdef; +extern struct targetdef_s *QCA6290_TARGETdef; +#ifdef ATH_AHB +extern struct targetdef_s *IPQ4019_TARGETdef; +#endif +extern struct targetdef_s *QCA8074_TARGETdef; + +extern struct ce_reg_def *AR6002_CE_TARGETdef; +extern struct ce_reg_def *AR6003_CE_TARGETdef; +extern struct ce_reg_def *AR6004_CE_TARGETdef; +extern struct ce_reg_def *AR9888_CE_TARGETdef; +extern struct ce_reg_def *AR9888V2_CE_TARGETdef; +extern struct ce_reg_def *AR6320_CE_TARGETdef; +extern struct ce_reg_def *AR900B_CE_TARGETdef; +extern struct ce_reg_def *QCA9984_CE_TARGETdef; +extern struct ce_reg_def *QCA9888_CE_TARGETdef; +extern struct ce_reg_def *QCA6290_CE_TARGETdef; +#ifdef ATH_AHB +extern struct ce_reg_def *IPQ4019_CE_TARGETdef; +#endif +extern struct ce_reg_def *QCA8074_CE_TARGETdef; + + +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/adrastea_reg_def.h b/drivers/staging/qca-wifi-host-cmn/hif/src/adrastea_reg_def.h new file mode 100644 index 0000000000000000000000000000000000000000..ab399fed86987466ca02d6d8b4cd43254d2dab71 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/adrastea_reg_def.h @@ -0,0 +1,2367 @@ +/* + * Copyright (c) 2015-2016 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef ADRASTEA_REG_DEF_H +#define ADRASTEA_REG_DEF_H + +/* + * Start auto-generated headers from register parser + * + * DO NOT CHANGE MANUALLY +*/ + + +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CE_CMD__SRC_FLUSH___S 1 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE10_MISC_IS__AXI_TIMEOUT_ERR___S 10 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE1_SR_BA_LOW (0x00241000) +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE11_MISC_IS__AXI_TIMEOUT_ERR___M 0x00000400 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CE_CTRL1__SRC_RING_BYTE_SWAP_EN___POR 0x0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE9_MISC_IS__AXI_TIMEOUT_ERR___M 0x00000400 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS2__ADDRESS_REGISTER___M 0x003FFFFF +#define ADRASTEA_A_WCSS_SR_APSS_FORCE_WAKE___M 0x00000001 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_QDSP_ERROR_INTR_ENABLES_SET__CE_INTR_MISC_P___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE6___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS2 (0x00030028) +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS13___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_CLEAR__FORCE_WAKE_CLEAR___POR 0x0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_QDSP_ERROR_INTR_ENABLES_SET__EXTERNAL_INTR___POR 0x000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE4_SR_BA_LOW (0x00244000) +#define ADRASTEA_A_WCSS_SR_APSS_SR_CONTROL__SOFT_RESET___M 0x00000001 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_CLEAR___M 0x000003FF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_APPS_CE_INTR_ENABLES_CLEAR___RWC QCSR_REG_RW +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE17___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_DIRTY___RWC QCSR_REG_RO +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CE_CMD__HALT_STATUS___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_FORCE_WAKE (0x00032060) +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE5_SR_BA_LOW__BASE_ADDR_LOW___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS15___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CE_CTRL1___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS6 (0x00030038) +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_QDSP_ERROR_INTR_ENABLES_SET__CE_INTR_TIMEOUT_P___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_SW_SCRATCH (0x00032064) +#define ADRASTEA_A_WCSS_SR_APSS_ADDRESS_VALID__BITS___POR 0x000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_DR_BA_LOW___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS4___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE8_MISC_IS__AXI_BUS_ERR___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_DIRTY___M 0x00FFFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE6_MISC_IS__AXI_BUS_ERR___M 0x00000200 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_CLEAR__ERR_RESP_CLEAR___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_CLEAR__FORCE_WAKE_CLEAR___S 1 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE1_SR_BA_LOW__BASE_ADDR_LOW___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS15__ADDRESS_REGISTER___M 0x003FFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE4___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE4_SR_BA_LOW__BASE_ADDR_LOW___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS13___M 0x003FFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE2_MISC_IS__AXI_TIMEOUT_ERR___S 10 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE22___RWC QCSR_REG_RW +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_STATUS__ECAHB_TIMEOUT___POR 0x0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_QDSP_ERROR_INTR_ENABLES_SET__MCIM_INT___M 0x00000010 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_ENABLE__WLAN2_HW2SW_GRANT___M 0x00000080 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_DR_SIZE__SIZE___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_CLEAR__ECAHB_TIMEOUT___M 0x00000010 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_DR_SIZE___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE3_SR_BA_LOW__BASE_ADDR_LOW___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS0___M 0x003FFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CE_CTRL1__SRC_RING_BYTE_SWAP_EN___S 17 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE2__VALUE_REGISTER___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_INVALID_ADDR_ACCESS___M 0x0003FFFF +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE19___RWC QCSR_REG_RW +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS10___RWC QCSR_REG_RO +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IE__SRC_RING_HIGH_WATERMARK___M 0x00000002 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_SRC_WR_INDEX__SRC_WR_INDEX___M 0x0000FFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE_COMMON_WRAPPER_CE_WRAPPER_HOST_INTERRUPT_SUMMARY___M 0x00FFF000 +#define ADRASTEA_A_WCSS_SR_APSS_ADDRESS_VALID___M 0x00FFFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_SR_BA_HIGH__BASE_ADDR_HIGH___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE10_MISC_IS__AXI_BUS_ERR___M 0x00000200 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE18___RWC QCSR_REG_RW +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS4 (0x00030030) +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE8___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE14___RWC QCSR_REG_RW +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE5_SR_BA_LOW___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE3_MISC_IS__AXI_BUS_ERR___M 0x00000200 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IS__SRC_LEN_ERR___POR 0x0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_QDSP_ERROR_INTR_ENABLES_SET__CE_INTR_TIMEOUT_P___M 0x00000100 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_QDSP_ERROR_INTR_ENABLES_SET___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_CLEAR__ECAHB_TIMEOUT___S 4 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE22__VALUE_REGISTER___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IS__SRC_RING_LOW_WATERMARK___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_SR_CONTROL___RWC QCSR_REG_RO +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_STATUS__DIRTY_BIT_SET___M 0x00000001 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_DST_WR_INDEX (0x00240040) +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IS (0x00240038) +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE5__VALUE_REGISTER___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_QDSP_ERROR_INTR_ENABLES_SET__INVALID_BB_1_INTR___S 10 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE5___RWC QCSR_REG_RW +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE0__VALUE_REGISTER___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS16___M 0x003FFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SR_CONTROL__SR_PLL_REF_MUX_SEL___POR 0x0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_APPS_CE_INTR_ENABLES_CLEAR (0x002F1008) +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS20___RWC QCSR_REG_RO +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE23___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE9___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS7___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE4__VALUE_REGISTER___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IE__DST_MAX_LEN_VIO___M 0x00000080 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE15__VALUE_REGISTER___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_SR_BA_LOW___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS14___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_HOST_IS__DST_RING_LOW_WATERMARK___M 0x00000010 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS17___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_PMM_SR_MSB__STATUS___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE16___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE5_MISC_IS__AXI_TIMEOUT_ERR___S 10 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IE__DST_RING_HIGH_WATERMARK___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_WCSSAON_SR_LSB___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE11_MISC_IS__AXI_TIMEOUT_ERR___S 10 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE9__VALUE_REGISTER___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_APPS_CE_INTR_ENABLES_CLEAR___M 0x00000FFF +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS11___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CE_CMD__SRC_FLUSH___M 0x00000002 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS5___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS20 (0x00030070) +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_SRC_WATERMARK___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_DST_WR_INDEX___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE12 (0x00032030) +#define ADRASTEA_A_WCSS_SR_APSS_SR_CONTROL__CLOCK_GATE_DISABLE___M 0x00000002 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_APPS_CE_INTR_ENABLES_SET__CE_INTR_LINE_HOST_P___POR 0x000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_QDSP_ERROR_INTR_ENABLES_SET__EXTERNAL_INTR___S 18 +#define ADRASTEA_A_WCSS_SR_APSS_SR_TESTBUS__SELECT___POR 0x0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IS___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE9__VALUE_REGISTER___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_DST_WR_INDEX__DST_WR_INDEX___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE7_MISC_IS__AXI_TIMEOUT_ERR___M 0x00000400 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_APPS_CE_INTR_ENABLES___M 0x00000FFF +#define ADRASTEA_A_WCSS_SR_APSS_WCSSAON_SR_LSB (0x00032070) +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_CLEAR__WLAN2_HW2SW_GRANT___M 0x00000080 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE12__VALUE_REGISTER___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IE__AXI_TIMEOUT_ERR___S 10 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE21__VALUE_REGISTER___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS22__ADDRESS_REGISTER___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE9_SR_BA_LOW___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE1___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_SR_BA_LOW__BASE_ADDR_LOW___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE10_SR_BA_LOW___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE2_MISC_IS__AXI_BUS_ERR___M 0x00000200 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS6__ADDRESS_REGISTER___M 0x003FFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IS__DST_RING_OVERFLOW___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_CLEAR__WLAN2_HW2SW_GRANT___POR 0x0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_APPS_CE_INTR_ENABLES_CLEAR__CE_INTR_LINE_HOST_P___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE8_SR_BA_LOW___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_STATUS__PMM_SR_XO_SETTLE_TIMEOUT___S 9 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_HOST_IE__COPY_COMPLETE___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE6_MISC_IS__AXI_BUS_ERR___S 9 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS21___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE10___RWC QCSR_REG_RW +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_DR_BA_HIGH__BASE_ADDR_HIGH___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE9___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE4__VALUE_REGISTER___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_SR_SIZE__SIZE___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_SRC_WR_INDEX___M 0x0000FFFF +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE2__VALUE_REGISTER___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS6___M 0x003FFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS14__ADDRESS_REGISTER___POR 0x000000 +#define ADRASTEA_A_WCSS_SR_APSS_INVALID_ADDR_ACCESS___RWC QCSR_REG_RO +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE1_SR_BA_LOW__BASE_ADDR_LOW___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE6__VALUE_REGISTER___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_SR_BA_HIGH___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE11_SR_BA_LOW___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_HOST_IE__DST_RING_HIGH_WATERMARK___M 0x00000008 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE15__VALUE_REGISTER___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS16___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE3__VALUE_REGISTER___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE18__VALUE_REGISTER___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_FORCE_WAKE___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE17___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_DR_BA_HIGH___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS5__ADDRESS_REGISTER___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_HOST_IS___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CURRENT_SRRI___RWC QCSR_REG_RO +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE13 (0x00032034) +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE3___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS3 (0x0003002C) +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS3__ADDRESS_REGISTER___M 0x003FFFFF +#define ADRASTEA_A_WCSS_SR_APSS_INVALID_ADDR_ACCESS___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_ADDRESS_VALID (0x000300E0) +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE22 (0x00032058) +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS4___RWC QCSR_REG_RO +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE0___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE11___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IE__SRC_RING_OVERFLOW___M 0x00000020 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS22__ADDRESS_REGISTER___M 0x003FFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CE_CTRL1__MSI_EN___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_PMM_SR_MSB___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE19___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE15___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE8_SR_BA_LOW___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_CLEAR__VALUE_REG_UPDATED_WITH_INVALID_ADDR___M 0x00000020 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS23___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_APPS_CE_INTR_ENABLES_CLEAR___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_SR_SIZE__SIZE___M 0x0000FFFF +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_STATUS__VALUE_REG_UPDATED_WITH_INVALID_ADDR___POR 0x0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CE_CTRL2___RWC QCSR_REG_RW +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE3_MISC_IS__AXI_BUS_ERR___S 9 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE2_SR_BA_LOW__BASE_ADDR_LOW___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS19___M 0x003FFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_HOST_IE__SRC_RING_LOW_WATERMARK___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS14 (0x00030058) +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS2___RWC QCSR_REG_RO +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_DR_BA_HIGH___M 0x0000007F +#define ADRASTEA_A_WCSS_SR_APSS_SR_CONTROL__CLOCK_GATE_DISABLE___POR 0x0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CE_CTRL1__IDX_UPD_EN___S 19 +#define ADRASTEA_A_WCSS_SR_APSS_PMM_SR_MSB__STATUS___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE0__VALUE_REGISTER___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_SR_BA_LOW___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE2_SR_BA_LOW___RWC QCSR_REG_RW +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE1 (0x00032004) +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS8___M 0x003FFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE9_SR_BA_LOW___RWC QCSR_REG_RW +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IS__SRC_RING_OVERFLOW___S 5 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_SRC_WATERMARK__SR_HIGH_WATER_MARK_THRESHOLD___M 0x0000FFFF +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE23___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_QDSP_ERROR_INTR_ENABLES_SET__BMH_INT___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS18___M 0x003FFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE3_SR_BA_LOW___RWC QCSR_REG_RW +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS8__ADDRESS_REGISTER___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOWREG_STATUS__WCSS_CORE_WAKE_SLEEP_STATE___M 0x00000008 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS1___M 0x003FFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IE__SRC_RING_LOW_WATERMARK___M 0x00000004 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_ENABLE__DIRTY_BIT_SET_ENABLE___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE6_SR_BA_LOW__BASE_ADDR_LOW___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_APPS_CE_INTR_ENABLES_SET__CE_INTR_LINE_HOST_P___M 0x00000FFF +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE14 (0x00032038) +#define ADRASTEA_A_WCSS_SR_APSS_SR_CONTROL__SR_RF_XO_MUX_SEL___M 0x00000010 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE8_MISC_IS__AXI_BUS_ERR___M 0x00000200 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IE___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS3___M 0x003FFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IE__COPY_COMPLETE___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS12___M 0x003FFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE_SECURE_WRAPPER_CE_WRAPPER_INTERRUPT_SUMMARY__MISC___M 0x00000FFF +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE21__VALUE_REGISTER___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE9_SR_BA_LOW__BASE_ADDR_LOW___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE17__VALUE_REGISTER___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_ENABLE__PMM_SR_XO_SETTLE_TIMEOUT___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS11___RWC QCSR_REG_RO +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_HOST_IE___RWC QCSR_REG_RW +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE1_MISC_IS__AXI_TIMEOUT_ERR___S 10 +#define ADRASTEA_A_WCSS_SR_APSS_COMMIT_REPLAY__ENABLE___M 0x00000001 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE7___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_SR_APSS_ADDRESS_VALID___RWC QCSR_REG_RO +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_QDSP_ERROR_INTR_ENABLES_SET__LCMH_STROBE_INTERRUPT___S 1 +#define ADRASTEA_A_WCSS_SR_APSS_WCSSAON_SR_MSB___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE9_MISC_IS__AXI_TIMEOUT_ERR___S 10 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS12___RWC QCSR_REG_RO +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE10___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS9___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_HOST_IE__DST_RING_LOW_WATERMARK___M 0x00000010 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS11___M 0x003FFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_HOST_IE___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE3_MISC_IS__AXI_TIMEOUT_ERR___M 0x00000400 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_HOST_IS__DST_RING_LOW_WATERMARK___S 4 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_HOST_IS___M 0x0000001F +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE23 (0x0003205C) +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IE (0x00240034) +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS12__ADDRESS_REGISTER___POR 0x000000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS1__ADDRESS_REGISTER___M 0x003FFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS16__ADDRESS_REGISTER___M 0x003FFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE_SECURE_WRAPPER_CE_WRAPPER_INTERRUPT_SUMMARY___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS17___RWC QCSR_REG_RO +#define ADRASTEA_A_WCSS_SR_APSS_PMM_SR_MSB___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE_SECURE_WRAPPER_CE_WRAPPER_INTERRUPT_SUMMARY (0x0024D000) +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE5_SR_BA_LOW__BASE_ADDR_LOW___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS18___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS15 (0x0003005C) +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS10___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_QDSP_ERROR_INTR_ENABLES_SET__WLAN1_SLP_TMR_INTR___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_COMMIT_REPLAY___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_ENABLE___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE22___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS19___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CURRENT_SRRI__CURRENT_SRRI___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_HOST_IE (0x0024002C) +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE6_SR_BA_LOW__BASE_ADDR_LOW___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IE__DST_RING_LOW_WATERMARK___S 4 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE4_MISC_IS__AXI_BUS_ERR___M 0x00000200 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS1___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_CLEAR__PMM_SR_XO_SETTLE_TIMEOUT___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS2___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_CLEAR__WLAN2_HW2SW_GRANT___S 7 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE11__VALUE_REGISTER___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_SR_BA_HIGH___M 0x0000001F +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IS__DST_RING_HIGH_WATERMARK___POR 0x0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE1_MISC_IS__AXI_BUS_ERR___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_INVALID_ADDR_ACCESS__ADDRESS_BITS_17_TO_2___M 0x0000FFFF +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_STATUS__ECAHB_TIMEOUT___M 0x00000010 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IE__AXI_TIMEOUT_ERR___POR 0x0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE_SECURE_WRAPPER_CE_WRAPPER_INTERRUPT_SUMMARY___M 0x01FFFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_DR_BA_LOW___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IE__PARSER_INT___POR 0x000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IS__COPY_COMPLETE___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_INVALID_ADDR_ACCESS__ADDRESS_BITS_17_TO_2___POR 0x0000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE_SECURE_WRAPPER_CE_WRAPPER_INTERRUPT_SUMMARY__ILL_REG___S 24 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS6___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CURRENT_SRRI___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE1_SR_BA_LOW___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IE__DST_RING_OVERFLOW___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS19___RWC QCSR_REG_RO +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE1_SR_BA_LOW__BASE_ADDR_LOW___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_APPS_CE_INTR_ENABLES_CLEAR___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE6_SR_BA_LOW___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SHADOWREG_STATUS__STATE___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE0 (0x00032000) +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS23__ADDRESS_REGISTER___M 0x003FFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_SRC_WATERMARK__SR_LOW_WATER_MARK_THRESOLD___M 0xFFFF0000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE2_SR_BA_LOW__BASE_ADDR_LOW___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE10__VALUE_REGISTER___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_CLEAR (0x00030014) +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS10__ADDRESS_REGISTER___M 0x003FFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE12___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_QDSP_ERROR_INTR_ENABLES_SET__CE_INTR_MISC_P___M 0x00000080 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE11__VALUE_REGISTER___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE20 (0x00032050) +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_QDSP_ERROR_INTR_ENABLES_SET__LMH_INT___S 3 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CURRENT_DRRI__CURRENT_DRRI___M 0x0000FFFF +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_ENABLE__ERR_RESP_ENABLE___S 2 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE10_SR_BA_LOW__BASE_ADDR_LOW___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_QDSP_ERROR_INTR_ENABLES_SET__WLAN2_SLP_TMR_INTR___M 0x00008000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS18___RWC QCSR_REG_RO +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE14__VALUE_REGISTER___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_SRC_WR_INDEX__SRC_WR_INDEX___POR 0x0000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE5_MISC_IS__AXI_BUS_ERR___M 0x00000200 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE7___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE7___RWC QCSR_REG_RW +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS23___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE4_SR_BA_LOW___RWC QCSR_REG_RW +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CE_CTRL1__SRC_RING_BYTE_SWAP_EN___M 0x00020000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE11_SR_BA_LOW (0x0024B000) +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_APPS_CE_INTR_ENABLES_SET (0x002F1004) +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE2 (0x00032008) +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE7_SR_BA_LOW___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_DR_BA_HIGH__DESC_SKIP_DWORD___S 5 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_HOST_IE__SRC_RING_HIGH_WATERMARK___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_SR_TESTBUS___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_DST_WATERMARK__DR_LOW_WATER_MARK_THRESHOLD___M 0xFFFF0000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE2___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_FORCE_WAKE__FORCE_WAKE_ENABLE___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_DR_BA_HIGH__BASE_ADDR_HIGH___POR 0x00 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE4_SR_BA_LOW___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_ENABLE__FORCE_WAKE_ENABLE___M 0x00000002 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS12___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_APPS_CE_INTR_ENABLES___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_QDSP_ERROR_INTR_ENABLES_SET__BMH_INT___M 0x00000001 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CE_CTRL2__DST_AXI_MAX_LEN___S 2 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_DR_BA_HIGH__DESC_SKIP_DWORD___M 0x00000060 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_SR_BA_LOW__BASE_ADDR_LOW___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_QDSP_ERROR_INTR_ENABLES_SET__SMH_INT___S 6 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE5_SR_BA_LOW___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_QDSP_ERROR_INTR_ENABLES_SET__EXTERNAL_INTR___M 0x0FFC0000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE7__VALUE_REGISTER___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE1__VALUE_REGISTER___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CURRENT_DRRI__CURRENT_DRRI___POR 0x0000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS3___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS17__ADDRESS_REGISTER___POR 0x000000 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_STATUS__INVALID_ADDR___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS17___M 0x003FFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_STATUS__WLAN1_HW2SW_GRANT___S 6 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_ENABLE__PMM_WCSS_WAKEUP_IRQ_ACK___S 8 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_SR_SIZE___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE5_SR_BA_LOW (0x00245000) +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_SRC_WR_INDEX___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_HOST_IS__SRC_RING_LOW_WATERMARK___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS22 (0x00030078) +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_QDSP_ERROR_INTR_ENABLES_SET__SMH_INT___M 0x00000040 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CE_CTRL2___POR 0x00000005 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS4__ADDRESS_REGISTER___POR 0x000000 +#define ADRASTEA_A_WCSS_SR_APSS_COMMIT_REPLAY___RWC QCSR_REG_RO +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CURRENT_DRRI___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CE_CMD__HALT_STATUS___S 3 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_STATUS__DIRTY_BIT_SET___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE_SECURE_WRAPPER_CE_WRAPPER_INTERRUPT_SUMMARY__MISC___POR 0x000 +#define ADRASTEA_A_WCSS_SR_APSS_WCSSAON_SR_LSB___RWC QCSR_REG_RO +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_ENABLE__FORCE_WAKE_ENABLE___S 1 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IE__SRC_RING_OVERFLOW___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_INVALID_ADDR_ACCESS__ADDRESS_BITS_17_TO_2___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IE___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_CLEAR___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE6___RWC QCSR_REG_RW +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE11_SR_BA_LOW___RWC QCSR_REG_RW +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CURRENT_DRRI___RWC QCSR_REG_RO +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE8_SR_BA_LOW__BASE_ADDR_LOW___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS2___M 0x003FFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE11___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE7_SR_BA_LOW___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS6__ADDRESS_REGISTER___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE16__VALUE_REGISTER___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CE_CTRL2__DST_AXI_MAX_LEN___POR 0x1 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_SRC_WATERMARK (0x0024004C) +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_STATUS__ERR_RESP___M 0x00000004 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IE___RWC QCSR_REG_RW +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_STATUS__PMM_WCSS_WAKEUP_IRQ_ACK___M 0x00000100 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_QDSP_ERROR_INTR_ENABLES_SET__INVALID_BB_2_INTR___S 11 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE4___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IE__SRC_RING_LOW_WATERMARK___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS17 (0x00030064) +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE16___RWC QCSR_REG_RW +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE8__VALUE_REGISTER___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS19___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_DR_BA_LOW (0x0024000C) +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_QDSP_ERROR_INTR_ENABLES_SET__INVALID_BB_1_INTR___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_ADDRESS_VALID__BITS___M 0x00FFFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_DR_BA_HIGH__DESC_SKIP_DWORD___POR 0x0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE10_SR_BA_LOW (0x0024A000) +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_ENABLE__WLAN1_HW2SW_GRANT___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS15___M 0x003FFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_CLEAR__INVALID_ADDR___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE12___RWC QCSR_REG_RW +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_SRC_WATERMARK___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_QDSP_ERROR_INTR_ENABLES_SET__MCIM_INT___S 4 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE13__VALUE_REGISTER___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_DST_WATERMARK___RWC QCSR_REG_RW +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE4_SR_BA_LOW__BASE_ADDR_LOW___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE7_SR_BA_LOW__BASE_ADDR_LOW___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE19__VALUE_REGISTER___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CE_CTRL2__SRC_AXI_MAX_LEN___M 0x00000003 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS8__ADDRESS_REGISTER___M 0x003FFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE10__VALUE_REGISTER___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE8__VALUE_REGISTER___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS5___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IE__DST_RING_OVERFLOW___M 0x00000040 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_CLEAR__PMM_WCSS_WAKEUP_IRQ_ACK___M 0x00000100 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE12___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_DST_WATERMARK___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IS__AXI_TIMEOUT_ERR___POR 0x0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE10_SR_BA_LOW___RWC QCSR_REG_RW +#define ADRASTEA_A_WCSS_SR_APSS_SHADOWREG_STATUS (0x00030008) +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE9_SR_BA_LOW___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SR_CONTROL__SR_PLL_REF_MUX_SEL___S 3 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS8__ADDRESS_REGISTER___POR 0x000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CE_CTRL1__MSI_EN___M 0x00010000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS22___M 0x003FFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IE__PARSER_INT___M 0x000FF800 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IE__SRC_LEN_ERR___S 8 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_ENABLE__ECAHB_TIMEOUT___M 0x00000010 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS16___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS1___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE19 (0x0003204C) +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_HOST_IE__DST_RING_HIGH_WATERMARK___S 3 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IS__AXI_TIMEOUT_ERR___S 10 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE3_MISC_IS__AXI_TIMEOUT_ERR___S 10 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS5 (0x00030034) +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IS__SRC_RING_OVERFLOW___M 0x00000020 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_APPS_CE_INTR_ENABLES_CLEAR__CE_INTR_LINE_HOST_P___M 0x00000FFF +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS22___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SW_SCRATCH___RWC QCSR_REG_RW +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE_SECURE_WRAPPER_CE_WRAPPER_INTERRUPT_SUMMARY__HOST___M 0x00FFF000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE3_MISC_IS__AXI_TIMEOUT_ERR___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE8___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SR_CONTROL__SR_RF_XO_MUX_SEL___S 4 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CE_CTRL1__MSI_EN___S 16 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE13___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_ENABLE__VALUE_REG_UPDATED_WITH_INVALID_ADDR___POR 0x0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IE___M 0x000FFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IS__PARSER_INT___M 0x000FF800 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE18__VALUE_REGISTER___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_SRC_WATERMARK__SR_HIGH_WATER_MARK_THRESHOLD___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_DR_BA_LOW__BASE_ADDR_LOW___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IS___RWC QCSR_REG_RW +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE23___RWC QCSR_REG_RW +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_QDSP_ERROR_INTR_ENABLES_SET__WFSS_DBG_INTR___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE0__VALUE_REGISTER___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE6_SR_BA_LOW___RWC QCSR_REG_RW +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_DR_SIZE__SIZE___M 0x0000FFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CURRENT_DRRI___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_QDSP_ERROR_INTR_ENABLES_SET__SW_SLP_TMR_INTR___M 0x00010000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_DST_WATERMARK__DR_HIGH_WATER_MARK_THRESHOLD___POR 0x0000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_APPS_CE_INTR_ENABLES___RWC QCSR_REG_RO +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS13___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_SR_SIZE___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE3___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_CLEAR__PMM_WCSS_WAKEUP_IRQ_ACK___S 8 +#define ADRASTEA_A_WCSS_SR_APSS_WCSSAON_SR_LSB__STATUS___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IS__DST_MAX_LEN_VIO___M 0x00000080 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE_COMMON_WRAPPER_CE_WRAPPER_HOST_INTERRUPT_SUMMARY___RWC QCSR_REG_RO +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS13__ADDRESS_REGISTER___M 0x003FFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_DR_SIZE__SIZE___POR 0x0000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS10___M 0x003FFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_HOST_IS___RWC QCSR_REG_RW +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE5 (0x00032014) +#define ADRASTEA_A_WCSS_SR_APSS_SR_TESTBUS___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE12__VALUE_REGISTER___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE4_MISC_IS__AXI_TIMEOUT_ERR___S 10 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CE_CTRL1___RWC QCSR_REG_RW +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_STATUS (0x0003000C) +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE11_MISC_IS__AXI_BUS_ERR___POR 0x0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE9_MISC_IS__AXI_BUS_ERR___M 0x00000200 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_QDSP_ERROR_INTR_ENABLES_SET___RWC QCSR_REG_RW +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_CLEAR__DIRTY_BIT_SET_CLEAR___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS7___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_CLEAR___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_DR_BA_HIGH__BASE_ADDR_HIGH___M 0x0000001F +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS0___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE4_SR_BA_LOW___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CE_CMD__SRC_FLUSH___POR 0x0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_DST_WATERMARK__DR_LOW_WATER_MARK_THRESHOLD___S 16 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE5_MISC_IS__AXI_TIMEOUT_ERR___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE9 (0x00032024) +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IS__COPY_COMPLETE___M 0x00000001 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CE_CMD___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_INVALID_ADDR_ACCESS__WRITE_ACCESS___POR 0x0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_HOST_IE___M 0x0000001F +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS23___RWC QCSR_REG_RO +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_HOST_IE__COPY_COMPLETE___M 0x00000001 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE21__VALUE_REGISTER___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IE__PARSER_INT___S 11 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS21__ADDRESS_REGISTER___M 0x003FFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS20__ADDRESS_REGISTER___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_DST_WR_INDEX___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE_SECURE_WRAPPER_CE_WRAPPER_INTERRUPT_SUMMARY__HOST___POR 0x000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IE__DST_RING_OVERFLOW___S 6 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE13___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE7_SR_BA_LOW__BASE_ADDR_LOW___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_QDSP_ERROR_INTR_ENABLES_SET__WLAN1_SLP_TMR_INTR___M 0x00004000 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_CLEAR__ERR_RESP_CLEAR___M 0x00000004 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE15___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE3_SR_BA_LOW___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_QDSP_ERROR_INTR_ENABLES_SET___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOWREG_STATUS___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_INVALID_ADDR_ACCESS__WRITE_ACCESS___M 0x00020000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE1_SR_BA_LOW___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_SR_BA_HIGH___RWC QCSR_REG_RW +#define ADRASTEA_A_WCSS_SR_APSS_ADDRESS_VALID___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_CLEAR__VALUE_REG_UPDATED_WITH_INVALID_ADDR___S 5 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CE_CTRL1__DEST_MAX_LENGTH___M 0x0000FFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE4_MISC_IS__AXI_TIMEOUT_ERR___M 0x00000400 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CE_CMD__HALT_STATUS___M 0x00000008 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_QDSP_ERROR_INTR_ENABLES_SET__LCMH_WCI2_INTERRUPT___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS2__ADDRESS_REGISTER___POR 0x000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_HOST_IS__DST_RING_HIGH_WATERMARK___M 0x00000008 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE1_SR_BA_LOW___RWC QCSR_REG_RW +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CE_CTRL1___POR 0x00000080 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE19__VALUE_REGISTER___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS7__ADDRESS_REGISTER___M 0x003FFFFF +#define ADRASTEA_A_WCSS_SR_APSS_DIRTY___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_CLEAR__DIRTY_BIT_SET_CLEAR___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_CLEAR__PMM_SR_XO_SETTLE_TIMEOUT___S 9 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE18 (0x00032048) +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_QDSP_ERROR_INTR_ENABLES_SET__NOC_WCMN_INTR___M 0x00001000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS21___RWC QCSR_REG_RO +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS10__ADDRESS_REGISTER___POR 0x000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_DST_WATERMARK (0x00240050) +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE4_MISC_IS__AXI_BUS_ERR___S 9 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE4_SR_BA_LOW___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_STATUS__DIRTY_BIT_SET___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_WCSSAON_SR_LSB__STATUS___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_ENABLE__DIRTY_BIT_SET_ENABLE___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS14___RWC QCSR_REG_RO +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CE_CTRL1__IDX_UPD_EN___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_CLEAR__WLAN1_HW2SW_GRANT___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS12__ADDRESS_REGISTER___M 0x003FFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS20___M 0x003FFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE3_SR_BA_LOW (0x00243000) +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_SR_SIZE__START_OFFSET___S 16 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS5___M 0x003FFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_SR_SIZE___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS9__ADDRESS_REGISTER___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE16__VALUE_REGISTER___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE9__VALUE_REGISTER___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_DIRTY (0x00030080) +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IE__AXI_TIMEOUT_ERR___M 0x00000400 +#define ADRASTEA_A_WCSS_SR_APSS_PMM_SR_LSB___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE8_SR_BA_LOW (0x00248000) +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS6___RWC QCSR_REG_RO +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS21___M 0x003FFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE21___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS15__ADDRESS_REGISTER___POR 0x000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_APPS_CE_INTR_ENABLES_SET___RWC QCSR_REG_RW +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_HOST_IS__SRC_RING_LOW_WATERMARK___S 2 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS3___RWC QCSR_REG_RO +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE16 (0x00032040) +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_HOST_IE__DST_RING_HIGH_WATERMARK___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS18__ADDRESS_REGISTER___POR 0x000000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOWREG_STATUS___M 0x0000000F +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_HOST_IS__DST_RING_HIGH_WATERMARK___POR 0x0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_DR_BA_LOW__BASE_ADDR_LOW___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE8_MISC_IS__AXI_TIMEOUT_ERR___M 0x00000400 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE2___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_DR_SIZE__START_OFFSET___S 16 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS14___M 0x003FFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE_COMMON_WRAPPER_CE_WRAPPER_HOST_INTERRUPT_SUMMARY (0x0024C000) +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS6___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE10_SR_BA_LOW___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE2_SR_BA_LOW___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_ENABLE__WLAN2_HW2SW_GRANT___POR 0x0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_HOST_IS__COPY_COMPLETE___POR 0x0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_QDSP_ERROR_INTR_ENABLES_SET__INVALID_BB_2_INTR___M 0x00000800 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CE_CTRL1__DST_RING_BYTE_SWAP_EN___S 18 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE0___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_SR_APSS_INVALID_ADDR_ACCESS (0x00032078) +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE13__VALUE_REGISTER___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE6_SR_BA_LOW___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_DR_BA_LOW___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_WCSSAON_SR_MSB__STATUS___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SW_SCRATCH__SPARE_REGISTER___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS15___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS21___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_ADDRESS_VALID__BITS___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS7___RWC QCSR_REG_RO +#define ADRASTEA_A_WCSS_SR_APSS_SR_TESTBUS__SELECT___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS23__ADDRESS_REGISTER___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IS__SRC_RING_LOW_WATERMARK___S 2 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS21__ADDRESS_REGISTER___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_QDSP_ERROR_INTR_ENABLES_SET__LMH_INT___M 0x00000008 +#define ADRASTEA_A_WCSS_SR_APSS_ADDRESS_VALID___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE2_SR_BA_LOW___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_PMM_SR_LSB__STATUS___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE18___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_DR_BA_LOW__BASE_ADDR_LOW___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS8___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS12 (0x00030050) +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE9_MISC_IS__AXI_BUS_ERR___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_SR_TESTBUS__SELECT___M 0x00000007 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS23___M 0x003FFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS20___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_QDSP_ERROR_INTR_ENABLES_SET__PMH_INT___M 0x00000020 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE11_SR_BA_LOW___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE20__VALUE_REGISTER___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_WCSSAON_SR_MSB___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SR_CONTROL__ENABLE_APSS_FULL_ACCESS___M 0x00000004 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_QDSP_ERROR_INTR_ENABLES_SET__WLAN1_SLP_TMR_INTR___S 14 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_HOST_IS__SRC_RING_LOW_WATERMARK___M 0x00000004 +#define ADRASTEA_A_WCSS_SR_APSS_WCSSAON_SR_MSB___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CE_CTRL1__DST_RING_BYTE_SWAP_EN___M 0x00040000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS10___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS13__ADDRESS_REGISTER___POR 0x000000 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_STATUS__PMM_WCSS_WAKEUP_IRQ_ACK___S 8 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE9_SR_BA_LOW__BASE_ADDR_LOW___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IE__SRC_LEN_ERR___M 0x00000100 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE13___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_COMMIT_REPLAY__ENABLE___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS7 (0x0003003C) +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CURRENT_DRRI__CURRENT_DRRI___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_DST_WATERMARK___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SW_SCRATCH__SPARE_REGISTER___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE8__VALUE_REGISTER___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_FORCE_WAKE___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IE__DST_MAX_LEN_VIO___POR 0x0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_QDSP_ERROR_INTR_ENABLES_SET (0x002F0084) +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_APPS_CE_INTR_ENABLES_SET__CE_INTR_LINE_HOST_P___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_DST_WATERMARK__DR_HIGH_WATER_MARK_THRESHOLD___M 0x0000FFFF +#define ADRASTEA_A_WCSS_SR_APSS_INVALID_ADDR_ACCESS__READ_ACCESS___POR 0x0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_SR_SIZE__SIZE___POR 0x0000 +#define ADRASTEA_A_WCSS_SR_APSS_SR_CONTROL__ENABLE_APSS_FULL_ACCESS___S 2 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_APPS_CE_INTR_ENABLES__CE_INTR_LINE_HOST_P___POR 0x000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE9_MISC_IS__AXI_TIMEOUT_ERR___POR 0x0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE6_MISC_IS__AXI_TIMEOUT_ERR___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_COMMIT_REPLAY__ENABLE___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_STATUS___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS8 (0x00030040) +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS0___RWC QCSR_REG_RO +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_HOST_IS (0x00240030) +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IE__DST_RING_LOW_WATERMARK___M 0x00000010 +#define ADRASTEA_A_WCSS_SR_APSS_SR_CONTROL__SOFT_RESET___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS22___RWC QCSR_REG_RO +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS15__ADDRESS_REGISTER___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS22__ADDRESS_REGISTER___POR 0x000000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE23___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE15 (0x0003203C) +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS2___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IS__SRC_RING_HIGH_WATERMARK___M 0x00000002 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IE__COPY_COMPLETE___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_SR_BA_HIGH (0x00240004) +#define ADRASTEA_A_WCSS_SR_APSS_SHADOWREG_STATUS__STATE___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE8___RWC QCSR_REG_RW +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS11__ADDRESS_REGISTER___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_INVALID_ADDR_ACCESS__WRITE_ACCESS___S 17 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE0___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_APPS_CE_INTR_ENABLES_SET___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_COMMIT_REPLAY___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE5_SR_BA_LOW___RWC QCSR_REG_RW +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IS__DST_RING_OVERFLOW___S 6 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS8___RWC QCSR_REG_RO +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_ENABLE__PMM_SR_XO_SETTLE_TIMEOUT___M 0x00000200 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_STATUS__ECAHB_TIMEOUT___S 4 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE15__VALUE_REGISTER___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CE_CTRL1__DEST_MAX_LENGTH___POR 0x0080 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_DST_WATERMARK__DR_LOW_WATER_MARK_THRESHOLD___POR 0x0000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE10___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE9___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_STATUS__PMM_SR_XO_SETTLE_TIMEOUT___M 0x00000200 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_STATUS__WLAN1_HW2SW_GRANT___POR 0x0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IS__DST_RING_HIGH_WATERMARK___S 3 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE10_SR_BA_LOW__BASE_ADDR_LOW___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE22___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE7___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE5_MISC_IS__AXI_TIMEOUT_ERR___M 0x00000400 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_HOST_IS__DST_RING_HIGH_WATERMARK___S 3 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE8_MISC_IS__AXI_TIMEOUT_ERR___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE13___RWC QCSR_REG_RW +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_CLEAR__PMM_WCSS_WAKEUP_IRQ_ACK___POR 0x0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_SR_BA_LOW (0x00240000) +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS5__ADDRESS_REGISTER___POR 0x000000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS16__ADDRESS_REGISTER___POR 0x000000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE16___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS13 (0x00030054) +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_QDSP_ERROR_INTR_ENABLES_SET__WLAN2_SLP_TMR_INTR___S 15 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_DR_BA_HIGH (0x00240010) +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE4__VALUE_REGISTER___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS12___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE11___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IS__PARSER_INT___S 11 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_CLEAR__ERR_RESP_CLEAR___S 2 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE20___RWC QCSR_REG_RW +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE4 (0x00032010) +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS14__ADDRESS_REGISTER___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_HOST_IS___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS10__ADDRESS_REGISTER___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_COMMIT_REPLAY___M 0x00000001 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS4___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS1__ADDRESS_REGISTER___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS5___RWC QCSR_REG_RO +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_ENABLE__PMM_WCSS_WAKEUP_IRQ_ACK___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_PMM_SR_LSB___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE8_MISC_IS__AXI_TIMEOUT_ERR___S 10 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE11___RWC QCSR_REG_RW +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_ENABLE___RWC QCSR_REG_RO +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_QDSP_ERROR_INTR_ENABLES_SET__SW_SLP_TMR_INTR___POR 0x0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IE__AXI_ERR___POR 0x0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE7_MISC_IS__AXI_TIMEOUT_ERR___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS23__ADDRESS_REGISTER___POR 0x000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IS__DST_MAX_LEN_VIO___S 7 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_HOST_IE__DST_RING_LOW_WATERMARK___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS3__ADDRESS_REGISTER___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IE__DST_MAX_LEN_VIO___S 7 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS0__ADDRESS_REGISTER___M 0x003FFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE1___RWC QCSR_REG_RW +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_HOST_IE___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS7__ADDRESS_REGISTER___POR 0x000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE6_SR_BA_LOW (0x00246000) +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_APPS_CE_INTR_ENABLES___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CE_CTRL1__DST_RING_BYTE_SWAP_EN___POR 0x0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_QDSP_ERROR_INTR_ENABLES_SET__INVALID_BB_2_INTR___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE7__VALUE_REGISTER___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE13__VALUE_REGISTER___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_STATUS__PMM_WCSS_WAKEUP_IRQ_ACK___POR 0x0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE_COMMON_WRAPPER_CE_WRAPPER_HOST_INTERRUPT_SUMMARY__HOST___POR 0x000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_QDSP_ERROR_INTR_ENABLES_SET__NOC_WCMN_INTR___S 12 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IS__PARSER_INT___POR 0x000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE1___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CE_CMD__DST_FLUSH___M 0x00000004 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE18___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE10_MISC_IS__AXI_TIMEOUT_ERR___POR 0x0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE8_SR_BA_LOW__BASE_ADDR_LOW___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SW_SCRATCH___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS23 (0x0003007C) +#define ADRASTEA_A_WCSS_SR_APSS_SR_CONTROL___M 0x0000001F +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE3_SR_BA_LOW__BASE_ADDR_LOW___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_SRC_WATERMARK___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IS__DST_RING_OVERFLOW___M 0x00000040 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE6__VALUE_REGISTER___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS1__ADDRESS_REGISTER___POR 0x000000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS9___M 0x003FFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_QDSP_ERROR_INTR_ENABLES_SET__SMH_INT___POR 0x0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE10_MISC_IS__AXI_BUS_ERR___S 9 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_QDSP_ERROR_INTR_ENABLES_SET__SW_SLP_TMR_INTR___S 16 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE14__VALUE_REGISTER___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE_COMMON_WRAPPER_CE_WRAPPER_HOST_INTERRUPT_SUMMARY__HOST___S 12 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE2_MISC_IS__AXI_TIMEOUT_ERR___M 0x00000400 +#define ADRASTEA_A_WCSS_SR_APSS_PMM_SR_LSB___RWC QCSR_REG_RO +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_SR_BA_HIGH__BASE_ADDR_HIGH___POR 0x00 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IS__SRC_LEN_ERR___M 0x00000100 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_CLEAR__VALUE_REG_UPDATED_WITH_INVALID_ADDR___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_FORCE_WAKE___RWC QCSR_REG_RW +#define ADRASTEA_A_WCSS_SR_APSS_SW_SCRATCH__SPARE_REGISTER___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_HOST_IS__SRC_RING_HIGH_WATERMARK___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS4__ADDRESS_REGISTER___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_FORCE_WAKE__FORCE_WAKE_ENABLE___POR 0x0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_SRC_WR_INDEX__SRC_WR_INDEX___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE5_MISC_IS__AXI_BUS_ERR___S 9 +#define ADRASTEA_A_WCSS_SR_APSS_WCSSAON_SR_MSB__STATUS___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_ENABLE__ERR_RESP_ENABLE___POR 0x0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CURRENT_DRRI___M 0x0000FFFF +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE8___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE20___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_DR_BA_HIGH___RWC QCSR_REG_RW +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE_SECURE_WRAPPER_CE_WRAPPER_INTERRUPT_SUMMARY__ILL_REG___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE12___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CE_CTRL2__SRC_AXI_MAX_LEN___POR 0x1 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CE_CTRL1 (0x00240018) +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE7_SR_BA_LOW__BASE_ADDR_LOW___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE0___RWC QCSR_REG_RW +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS19 (0x0003006C) +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_SRC_WR_INDEX (0x0024003C) +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE20___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS20__ADDRESS_REGISTER___M 0x003FFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CE_CMD___M 0x0000000F +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS18__ADDRESS_REGISTER___M 0x003FFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_ENABLE__VALUE_REG_UPDATED_WITH_INVALID_ADDR___S 5 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE1__VALUE_REGISTER___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE10_MISC_IS__AXI_TIMEOUT_ERR___M 0x00000400 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CURRENT_SRRI__CURRENT_SRRI___POR 0x0000 +#define ADRASTEA_A_WCSS_SR_APSS_SR_CONTROL__SR_RF_XO_MUX_SEL___POR 0x0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CE_CTRL2___M 0x0000000F +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE2__VALUE_REGISTER___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_ENABLE__INVALID_ADDR___M 0x00000008 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE22__VALUE_REGISTER___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS16__ADDRESS_REGISTER___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE3__VALUE_REGISTER___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_QDSP_ERROR_INTR_ENABLES_SET___M 0x0FFFDDFF +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE7 (0x0003201C) +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_HOST_IE__SRC_RING_LOW_WATERMARK___M 0x00000004 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_ENABLE__INVALID_ADDR___S 3 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CE_CTRL2__DST_AXI_MAX_LEN___M 0x0000000C +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE5_MISC_IS__AXI_BUS_ERR___POR 0x0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE11_MISC_IS__AXI_TIMEOUT_ERR___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS0__ADDRESS_REGISTER___POR 0x000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE11_MISC_IS__AXI_BUS_ERR___M 0x00000200 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_SRC_WR_INDEX___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE_SECURE_WRAPPER_CE_WRAPPER_INTERRUPT_SUMMARY__MISC___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE15___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IS__COPY_COMPLETE___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_SRC_WATERMARK__SR_HIGH_WATER_MARK_THRESHOLD___POR 0x0000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS2__ADDRESS_REGISTER___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS17__ADDRESS_REGISTER___M 0x003FFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IS__AXI_TIMEOUT_ERR___M 0x00000400 +#define ADRASTEA_A_WCSS_SR_APSS_SR_CONTROL__CLOCK_GATE_DISABLE___S 1 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE_SECURE_WRAPPER_CE_WRAPPER_INTERRUPT_SUMMARY___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_CLEAR__DIRTY_BIT_SET_CLEAR___M 0x00000001 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_ENABLE__ECAHB_TIMEOUT___POR 0x0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE6_MISC_IS__AXI_BUS_ERR___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE6__VALUE_REGISTER___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_STATUS___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_STATUS__FORCE_WAKE___S 1 +#define ADRASTEA_A_WCSS_SR_APSS_PMM_SR_MSB__STATUS___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_SRC_WATERMARK___RWC QCSR_REG_RW +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IS__AXI_BUS_ERR___M 0x00000200 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS0___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_ENABLE__PMM_SR_XO_SETTLE_TIMEOUT___S 9 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_STATUS__VALUE_REG_UPDATED_WITH_INVALID_ADDR___S 5 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE3___RWC QCSR_REG_RW +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS18___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_WCSSAON_SR_MSB__STATUS___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_ENABLE__VALUE_REG_UPDATED_WITH_INVALID_ADDR___M 0x00000020 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE11_SR_BA_LOW__BASE_ADDR_LOW___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_QDSP_ERROR_INTR_ENABLES_SET__CE_INTR_MISC_P___S 7 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IS__SRC_RING_OVERFLOW___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_CLEAR__INVALID_ADDR___S 3 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE1_MISC_IS__AXI_TIMEOUT_ERR___POR 0x0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_DST_WR_INDEX__DST_WR_INDEX___M 0x0000FFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_QDSP_ERROR_INTR_ENABLES_SET__LCMH_STROBE_INTERRUPT___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_CLEAR___RWC QCSR_REG_WO +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_STATUS__PMM_SR_XO_SETTLE_TIMEOUT___POR 0x0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_APPS_CE_INTR_ENABLES__CE_INTR_LINE_HOST_P___M 0x00000FFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_SRC_WATERMARK__SR_LOW_WATER_MARK_THRESOLD___S 16 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS20___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_STATUS__FORCE_WAKE___M 0x00000002 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS16 (0x00030060) +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE4___RWC QCSR_REG_RW +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE2___RWC QCSR_REG_RW +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE3_SR_BA_LOW__BASE_ADDR_LOW___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SR_TESTBUS (0x00030144) +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE1_SR_BA_LOW___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_DST_WATERMARK___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_SR_SIZE__START_OFFSET___POR 0x0000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS5__ADDRESS_REGISTER___M 0x003FFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SR_CONTROL__SR_PLL_REF_MUX_SEL___M 0x00000008 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS13___RWC QCSR_REG_RO +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_SR_BA_HIGH___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE17 (0x00032044) +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_SRC_WR_INDEX___RWC QCSR_REG_RW +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_STATUS___M 0x000003FF +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE3__VALUE_REGISTER___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_QDSP_ERROR_INTR_ENABLES_SET__WFSS_DBG_INTR___M 0x00020000 +#define ADRASTEA_A_WCSS_SR_APSS_WCSSAON_SR_LSB__STATUS___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SR_CONTROL__ENABLE_APSS_FULL_ACCESS___POR 0x0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_HOST_IS__SRC_RING_HIGH_WATERMARK___M 0x00000002 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE5___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_SR_SIZE__START_OFFSET___M 0xFFFF0000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE17___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CE_CMD__HALT___POR 0x0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_HOST_IE__SRC_RING_HIGH_WATERMARK___S 1 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE1_MISC_IS__AXI_BUS_ERR___M 0x00000200 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_QDSP_ERROR_INTR_ENABLES_SET__CE_INTR_TIMEOUT_P___S 8 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_HOST_IS__COPY_COMPLETE___M 0x00000001 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_DR_SIZE__START_OFFSET___POR 0x0000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IS___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE6_MISC_IS__AXI_TIMEOUT_ERR___M 0x00000400 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IS___M 0x000FFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE10_SR_BA_LOW___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE1___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IS__SRC_RING_HIGH_WATERMARK___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_ENABLE__ECAHB_TIMEOUT___S 4 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CE_CTRL1___M 0x000FFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE2_MISC_IS__AXI_TIMEOUT_ERR___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS17__ADDRESS_REGISTER___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS14__ADDRESS_REGISTER___M 0x003FFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_ENABLE__FORCE_WAKE_ENABLE___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_PMM_SR_LSB__STATUS___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE_SECURE_WRAPPER_CE_WRAPPER_INTERRUPT_SUMMARY___RWC QCSR_REG_RO +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE9_SR_BA_LOW__BASE_ADDR_LOW___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_CLEAR__INVALID_ADDR___M 0x00000008 +#define ADRASTEA_A_WCSS_SR_APSS_PMM_SR_MSB___RWC QCSR_REG_RO +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS18__ADDRESS_REGISTER___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SR_TESTBUS___M 0x00000007 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS17___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_ENABLE__WLAN1_HW2SW_GRANT___S 6 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IS__AXI_BUS_ERR___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS19__ADDRESS_REGISTER___POR 0x000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IS__SRC_RING_HIGH_WATERMARK___S 1 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE2_SR_BA_LOW (0x00242000) +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CE_CTRL2___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS7___M 0x003FFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_ENABLE__WLAN2_HW2SW_GRANT___S 7 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS9___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IS__DST_RING_LOW_WATERMARK___S 4 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS9__ADDRESS_REGISTER___M 0x003FFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CE_CTRL1__DEST_MAX_LENGTH___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_ENABLE (0x00030010) +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_CLEAR__FORCE_WAKE_CLEAR___M 0x00000002 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE16___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE19___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE8 (0x00032020) +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE8_SR_BA_LOW___RWC QCSR_REG_RW +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE8_SR_BA_LOW___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE4___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_APPS_CE_INTR_ENABLES_SET___M 0x00000FFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE6_SR_BA_LOW___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_SR_SIZE___RWC QCSR_REG_RW +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE10_MISC_IS__AXI_BUS_ERR___POR 0x0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CE_CMD___RWC QCSR_REG_RW +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS11__ADDRESS_REGISTER___M 0x003FFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CE_CTRL1__IDX_UPD_EN___M 0x00080000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS19__ADDRESS_REGISTER___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE7_MISC_IS__AXI_BUS_ERR___S 9 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE11__VALUE_REGISTER___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_QDSP_ERROR_INTR_ENABLES_SET__BMH_INT___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE7__VALUE_REGISTER___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SR_CONTROL__SOFT_RESET___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS9__ADDRESS_REGISTER___POR 0x000000 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_CLEAR__WLAN1_HW2SW_GRANT___S 6 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS0 (0x00030020) +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_QDSP_ERROR_INTR_ENABLES_SET__PMH_INT___S 5 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_DST_WR_INDEX___RWC QCSR_REG_RW +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE9___RWC QCSR_REG_RW +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE_COMMON_WRAPPER_CE_WRAPPER_HOST_INTERRUPT_SUMMARY__HOST___M 0x00FFF000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE3_SR_BA_LOW___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_DR_BA_LOW___RWC QCSR_REG_RW +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_DST_WR_INDEX__DST_WR_INDEX___POR 0x0000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_DST_WATERMARK__DR_HIGH_WATER_MARK_THRESHOLD___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE5_SR_BA_LOW___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE_SECURE_WRAPPER_CE_WRAPPER_INTERRUPT_SUMMARY__ILL_REG___M 0x01000000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE5___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE21___RWC QCSR_REG_RW +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_HOST_IS__DST_RING_LOW_WATERMARK___POR 0x0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_QDSP_ERROR_INTR_ENABLES_SET__WLAN2_SLP_TMR_INTR___POR 0x0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CE_CMD___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE1__VALUE_REGISTER___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS11 (0x0003004C) +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_QDSP_ERROR_INTR_ENABLES_SET__PMH_INT___POR 0x0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE4_SR_BA_LOW__BASE_ADDR_LOW___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE8_SR_BA_LOW__BASE_ADDR_LOW___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_QDSP_ERROR_INTR_ENABLES_SET__INVALID_BB_1_INTR___M 0x00000400 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE9_MISC_IS__AXI_BUS_ERR___S 9 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_SRC_WATERMARK__SR_LOW_WATER_MARK_THRESOLD___POR 0x0000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_DR_SIZE___RWC QCSR_REG_RW +#define ADRASTEA_A_WCSS_SR_APSS_PMM_SR_MSB (0x0003206C) +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_ENABLE__PMM_WCSS_WAKEUP_IRQ_ACK___M 0x00000100 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_DR_SIZE___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_INVALID_ADDR_ACCESS___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS11__ADDRESS_REGISTER___POR 0x000000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE10___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IS__DST_RING_LOW_WATERMARK___POR 0x0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CURRENT_SRRI___M 0x0000FFFF +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE2___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_ENABLE__DIRTY_BIT_SET_ENABLE___M 0x00000001 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_DST_WR_INDEX___M 0x0000FFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_DR_SIZE (0x00240014) +#define ADRASTEA_A_WCSS_SR_APSS_SHADOWREG_STATUS__STATE___M 0x00000007 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE6_MISC_IS__AXI_TIMEOUT_ERR___S 10 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE11 (0x0003202C) +#define ADRASTEA_A_WCSS_SR_APSS_SR_CONTROL___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CURRENT_DRRI (0x00240048) +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE6_SR_BA_LOW__BASE_ADDR_LOW___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOWREG_STATUS__WCSS_CORE_WAKE_SLEEP_STATE___S 3 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE4_MISC_IS__AXI_BUS_ERR___POR 0x0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE9_SR_BA_LOW (0x00249000) +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_HOST_IE__DST_RING_LOW_WATERMARK___S 4 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IS__SRC_LEN_ERR___S 8 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE20__VALUE_REGISTER___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE4_MISC_IS__AXI_TIMEOUT_ERR___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_ENABLE__ERR_RESP_ENABLE___M 0x00000004 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE18__VALUE_REGISTER___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_QDSP_ERROR_INTR_ENABLES_SET__LCMH_WCI2_INTERRUPT___S 2 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CE_CMD__HALT___M 0x00000001 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOWREG_STATUS___RWC QCSR_REG_RO +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE6___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_HOST_IS__SRC_RING_HIGH_WATERMARK___S 1 +#define ADRASTEA_A_WCSS_SR_APSS_DIRTY___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE14__VALUE_REGISTER___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SHADOWREG_STATUS__WCSS_CORE_WAKE_SLEEP_STATE___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_INVALID_ADDR_ACCESS__READ_ACCESS___S 16 +#define ADRASTEA_A_WCSS_SR_APSS_SR_TESTBUS___RWC QCSR_REG_RW +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_SR_BA_LOW___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS8___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_INVALID_ADDR_ACCESS__READ_ACCESS___M 0x00010000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IS__DST_RING_HIGH_WATERMARK___M 0x00000008 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE22___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_APPS_CE_INTR_ENABLES_CLEAR__CE_INTR_LINE_HOST_P___POR 0x000 +#define ADRASTEA_A_WCSS_SR_APSS_SR_CONTROL (0x00030000) +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS21__ADDRESS_REGISTER___POR 0x000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CURRENT_SRRI (0x00240044) +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS19__ADDRESS_REGISTER___M 0x003FFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE14___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS22___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS11___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE8_MISC_IS__AXI_BUS_ERR___S 9 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IS__DST_MAX_LEN_VIO___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_WCSSAON_SR_LSB___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE14___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_DIRTY__BITS___M 0x00FFFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE5___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_ENABLE___M 0x000003FF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CE_CMD (0x00240020) +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE19__VALUE_REGISTER___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE3_SR_BA_LOW___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS20__ADDRESS_REGISTER___POR 0x000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_DR_SIZE___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IS__SRC_RING_LOW_WATERMARK___M 0x00000004 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE2_MISC_IS__AXI_BUS_ERR___S 9 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_QDSP_ERROR_INTR_ENABLES_SET__NOC_WCMN_INTR___POR 0x0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE_SECURE_WRAPPER_CE_WRAPPER_INTERRUPT_SUMMARY__HOST___S 12 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE14___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_WCSSAON_SR_LSB___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_HOST_IE__COPY_COMPLETE___POR 0x0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE1_MISC_IS__AXI_BUS_ERR___S 9 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOWREG_STATUS___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_PMM_SR_LSB__STATUS___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE11_SR_BA_LOW__BASE_ADDR_LOW___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_APPS_CE_INTR_ENABLES (0x002F1000) +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE1_MISC_IS__AXI_TIMEOUT_ERR___M 0x00000400 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS15___RWC QCSR_REG_RO +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE19___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE18___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS6__ADDRESS_REGISTER___POR 0x000000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS9 (0x00030044) +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_SR_BA_LOW__BASE_ADDR_LOW___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE15___RWC QCSR_REG_RW +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE6 (0x00032018) +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE21 (0x00032054) +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_SR_BA_HIGH__BASE_ADDR_HIGH___M 0x0000001F +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE_COMMON_WRAPPER_CE_WRAPPER_HOST_INTERRUPT_SUMMARY___S 12 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CE_CMD__HALT___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE7_SR_BA_LOW (0x00247000) +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS18 (0x00030068) +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS7__ADDRESS_REGISTER___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE3___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IE__AXI_ERR___S 9 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_STATUS__WLAN1_HW2SW_GRANT___M 0x00000040 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IS__DST_RING_LOW_WATERMARK___M 0x00000010 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_ENABLE__INVALID_ADDR___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE23__VALUE_REGISTER___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_PMM_SR_LSB___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS3___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IE__DST_RING_HIGH_WATERMARK___M 0x00000008 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS16___RWC QCSR_REG_RO +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_STATUS__ERR_RESP___S 2 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE22__VALUE_REGISTER___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_SR_SIZE (0x00240008) +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE23__VALUE_REGISTER___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_STATUS___RWC QCSR_REG_RO +#define ADRASTEA_A_WCSS_SR_APSS_SR_CONTROL___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS13__ADDRESS_REGISTER___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_DIRTY__BITS___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IE__DST_RING_HIGH_WATERMARK___S 3 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE17__VALUE_REGISTER___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_CLEAR__WLAN1_HW2SW_GRANT___M 0x00000040 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_QDSP_ERROR_INTR_ENABLES_SET__LMH_INT___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_STATUS__WLAN2_HW2SW_GRANT___M 0x00000080 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE11_SR_BA_LOW__BASE_ADDR_LOW___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS1 (0x00030024) +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CE_CTRL2 (0x0024001C) +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_APPS_CE_INTR_ENABLES__CE_INTR_LINE_HOST_P___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS0__ADDRESS_REGISTER___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE11_MISC_IS__AXI_BUS_ERR___S 9 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE23__VALUE_REGISTER___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_STATUS__WLAN2_HW2SW_GRANT___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_SW_SCRATCH___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE3_MISC_IS__AXI_BUS_ERR___POR 0x0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_APPS_CE_INTR_ENABLES_SET___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_QDSP_ERROR_INTR_ENABLES_SET__LCMH_STROBE_INTERRUPT___M 0x00000002 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_STATUS__VALUE_REG_UPDATED_WITH_INVALID_ADDR___M 0x00000020 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_QDSP_ERROR_INTR_ENABLES_SET__MCIM_INT___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_STATUS__FORCE_WAKE___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE6___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_PMM_SR_MSB___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS21 (0x00030074) +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS14___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE7_SR_BA_LOW___RWC QCSR_REG_RW +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE2_SR_BA_LOW___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_SR_APSS_DIRTY__BITS___POR 0x000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE2_SR_BA_LOW__BASE_ADDR_LOW___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CE_CMD__DST_FLUSH___POR 0x0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IE__AXI_ERR___M 0x00000200 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_STATUS__WLAN2_HW2SW_GRANT___S 7 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_ENABLE___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE12__VALUE_REGISTER___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE20___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CURRENT_SRRI___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE5__VALUE_REGISTER___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE17__VALUE_REGISTER___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IE__SRC_LEN_ERR___POR 0x0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE7_SR_BA_LOW___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE_COMMON_WRAPPER_CE_WRAPPER_HOST_INTERRUPT_SUMMARY___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE17___RWC QCSR_REG_RW +#define ADRASTEA_A_WCSS_SR_APSS_WCSSAON_SR_MSB___RWC QCSR_REG_RO +#define ADRASTEA_A_WCSS_SR_APSS_WCSSAON_SR_MSB (0x00032074) +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS9___RWC QCSR_REG_RO +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IE__SRC_RING_HIGH_WATERMARK___POR 0x0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE2_MISC_IS__AXI_BUS_ERR___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_FORCE_WAKE__FORCE_WAKE_ENABLE___M 0x00000001 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CE_CMD__DST_FLUSH___S 2 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_HOST_IE__SRC_RING_LOW_WATERMARK___S 2 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_DR_SIZE__START_OFFSET___M 0xFFFF0000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS10 (0x00030048) +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS4___M 0x003FFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IE__SRC_RING_OVERFLOW___S 5 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CURRENT_SRRI__CURRENT_SRRI___M 0x0000FFFF +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE21___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS4__ADDRESS_REGISTER___M 0x003FFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_HOST_IE__SRC_RING_HIGH_WATERMARK___M 0x00000002 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_HOST_IS__COPY_COMPLETE___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_STATUS__ERR_RESP___POR 0x0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IE__COPY_COMPLETE___M 0x00000001 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE20__VALUE_REGISTER___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE5_SR_BA_LOW__BASE_ADDR_LOW___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE7_MISC_IS__AXI_TIMEOUT_ERR___S 10 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE10_SR_BA_LOW__BASE_ADDR_LOW___M 0xFFFFFFFF +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE9_SR_BA_LOW___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_STATUS__INVALID_ADDR___S 3 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS1___RWC QCSR_REG_RO +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_CLEAR__ECAHB_TIMEOUT___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_SW_SCRATCH___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IS__AXI_BUS_ERR___S 9 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE5__VALUE_REGISTER___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS3__ADDRESS_REGISTER___POR 0x000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IE__SRC_RING_LOW_WATERMARK___S 2 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CE_CTRL2__SRC_AXI_MAX_LEN___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_PMM_SR_LSB (0x00032068) +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_QDSP_ERROR_INTR_ENABLES_SET__WFSS_DBG_INTR___S 17 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_CLEAR__PMM_SR_XO_SETTLE_TIMEOUT___M 0x00000200 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_DR_BA_HIGH___S 0 +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_ENABLE__WLAN1_HW2SW_GRANT___M 0x00000040 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE11_SR_BA_LOW___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE7_MISC_IS__AXI_BUS_ERR___M 0x00000200 +#define ADRASTEA_A_WCSS_SR_APSS_COMMIT_REPLAY (0x00030004) +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_SR_BA_LOW___RWC QCSR_REG_RW +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE21___S 0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE7_MISC_IS__AXI_BUS_ERR___POR 0x0 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_3_A_WCMN_QDSP_ERROR_INTR_ENABLES_SET__LCMH_WCI2_INTERRUPT___M 0x00000004 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IE__DST_RING_LOW_WATERMARK___POR 0x0 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE10__VALUE_REGISTER___POR 0x00000000 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE10 (0x00032028) +#define ADRASTEA_A_WCSS_SR_APSS_SR_INTERRUPT_STATUS__INVALID_ADDR___M 0x00000008 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE3 (0x0003200C) +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE16__VALUE_REGISTER___POR 0x00000000 +#define ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IE__SRC_RING_HIGH_WATERMARK___S 1 +#define ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS12__ADDRESS_REGISTER___S 0 + + +/* End auto-generated headers from register parser */ + +#define A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE_COMMON_WRAPPER_CE_WRAPPER_INDEX_BASE_LOW 0x0024C004 +#define A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE_COMMON_WRAPPER_CE_WRAPPER_INDEX_BASE_HIGH 0x0024C008 + +#define MISSING 0 +#define MISSING_FOR_ADRASTEA MISSING +#define ADRASTEA_PCIE_LOCAL_REG_BASE_ADDRESS 0 +#define ADRASTEA_WIFI_RTC_REG_BASE_ADDRESS 0x45000 +#define ADRASTEA_RTC_SOC_REG_BASE_ADDRESS 0x113000 +#define ADRASTEA_GPIO_ATHR_WLAN_REG_BASE_ADDRESS 0x85000 +#define ADRASTEA_SI_REG_BASE_ADDRESS 0x84000 +#define ADRASTEA_SOC_CORE_REG_BASE_ADDRESS 0x113000 +#define ADRASTEA_CE_WRAPPER_REG_CSR_BASE_ADDRESS 0xC000 +#define ADRASTEA_MAC_WIFICMN_REG_BASE_ADDRESS MISSING + +/* Base Addresses */ +#define ADRASTEA_RTC_SOC_BASE_ADDRESS 0x00000000 +#define ADRASTEA_RTC_WMAC_BASE_ADDRESS 0x00000000 +#define ADRASTEA_MAC_COEX_BASE_ADDRESS 0x0000f000 +#define ADRASTEA_BT_COEX_BASE_ADDRESS 0x00002000 +#define ADRASTEA_SOC_PCIE_BASE_ADDRESS 0x00130000 +#define ADRASTEA_SOC_CORE_BASE_ADDRESS 0x00000000 +#define ADRASTEA_WLAN_UART_BASE_ADDRESS 0x00111000 +#define ADRASTEA_WLAN_SI_BASE_ADDRESS 0x00010000 +#define ADRASTEA_WLAN_GPIO_BASE_ADDRESS 0x00000000 +#define ADRASTEA_WLAN_ANALOG_INTF_BASE_ADDRESS 0x00000000 +#define ADRASTEA_WLAN_MAC_BASE_ADDRESS 0x00000000 +#define ADRASTEA_EFUSE_BASE_ADDRESS 0x00024000 +#define ADRASTEA_FPGA_REG_BASE_ADDRESS 0x00039000 +#define ADRASTEA_WLAN_UART2_BASE_ADDRESS 0x00054c00 + +#define ADRASTEA_CE_WRAPPER_BASE_ADDRESS \ + ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE_COMMON_WRAPPER_CE_WRAPPER_HOST_INTERRUPT_SUMMARY +#define ADRASTEA_CE0_BASE_ADDRESS \ + ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_SR_BA_LOW +#define ADRASTEA_CE1_BASE_ADDRESS \ + ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE1_SR_BA_LOW +#define ADRASTEA_CE2_BASE_ADDRESS \ + ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE2_SR_BA_LOW +#define ADRASTEA_CE3_BASE_ADDRESS \ + ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE3_SR_BA_LOW +#define ADRASTEA_CE4_BASE_ADDRESS \ + ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE4_SR_BA_LOW +#define ADRASTEA_CE5_BASE_ADDRESS \ + ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE5_SR_BA_LOW +#define ADRASTEA_CE6_BASE_ADDRESS \ + ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE6_SR_BA_LOW +#define ADRASTEA_CE7_BASE_ADDRESS \ + ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE7_SR_BA_LOW +#define ADRASTEA_CE8_BASE_ADDRESS \ + ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE8_SR_BA_LOW +#define ADRASTEA_CE9_BASE_ADDRESS \ + ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE9_SR_BA_LOW +#define ADRASTEA_CE10_BASE_ADDRESS \ + ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE10_SR_BA_LOW +#define ADRASTEA_CE11_BASE_ADDRESS \ + ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE11_SR_BA_LOW + +#define ADRASTEA_A_SOC_PCIE_SOC_PCIE_REG MISSING +#define ADRASTEA_DBI_BASE_ADDRESS MISSING +#define ADRASTEA_WLAN_ANALOG_INTF_PCIE_BASE_ADDRESS MISSING +#define ADRASTEA_WIFICMN_BASE_ADDRESS MISSING +#define ADRASTEA_BOARD_DATA_SZ MISSING +#define ADRASTEA_BOARD_EXT_DATA_SZ MISSING +#define ADRASTEA_A_SOC_PCIE_PCIE_BAR0_START MISSING +#define ADRASTEA_A_SOC_CORE_SCRATCH_0_ADDRESS MISSING +#define ADRASTEA_A_SOC_CORE_SPARE_0_REGISTER MISSING +#define ADRASTEA_PCIE_INTR_FIRMWARE_ROUTE_MASK MISSING +#define ADRASTEA_SCRATCH_3_ADDRESS MISSING +#define ADRASTEA_TARG_DRAM_START 0x00400000 +#define ADRASTEA_SOC_SYSTEM_SLEEP_OFFSET 0x000000c0 +#define ADRASTEA_SOC_RESET_CONTROL_OFFSET \ + (0x00000000 + ADRASTEA_RTC_SOC_REG_BASE_ADDRESS) +#define ADRASTEA_SOC_CLOCK_CONTROL_OFFSET \ + (0x00000028 + ADRASTEA_RTC_SOC_REG_BASE_ADDRESS) +#define ADRASTEA_SOC_CLOCK_CONTROL_SI0_CLK_MASK 0x00000001 +#define ADRASTEA_SOC_RESET_CONTROL_SI0_RST_MASK 0x00000001 +#define ADRASTEA_WLAN_GPIO_PIN0_ADDRESS \ + (0x50 + ADRASTEA_GPIO_ATHR_WLAN_REG_BASE_ADDRESS) +#define ADRASTEA_WLAN_GPIO_PIN1_ADDRESS \ + (0x54 + ADRASTEA_GPIO_ATHR_WLAN_REG_BASE_ADDRESS) +#define ADRASTEA_WLAN_GPIO_PIN0_CONFIG_MASK 0x00007800 +#define ADRASTEA_WLAN_GPIO_PIN1_CONFIG_MASK 0x00007800 +#define ADRASTEA_SOC_CPU_CLOCK_OFFSET 0x00000020 +#define ADRASTEA_SOC_LPO_CAL_OFFSET \ + (0xe0 + ADRASTEA_RTC_SOC_REG_BASE_ADDRESS) +#define ADRASTEA_WLAN_GPIO_PIN10_ADDRESS \ + (0x78 + ADRASTEA_GPIO_ATHR_WLAN_REG_BASE_ADDRESS) +#define ADRASTEA_WLAN_GPIO_PIN11_ADDRESS \ + (0x7c + ADRASTEA_GPIO_ATHR_WLAN_REG_BASE_ADDRESS) +#define ADRASTEA_WLAN_GPIO_PIN12_ADDRESS \ + (0x80 + ADRASTEA_GPIO_ATHR_WLAN_REG_BASE_ADDRESS) +#define ADRASTEA_WLAN_GPIO_PIN13_ADDRESS \ + (0x84 + ADRASTEA_GPIO_ATHR_WLAN_REG_BASE_ADDRESS) +#define ADRASTEA_SOC_CPU_CLOCK_STANDARD_LSB 0 +#define ADRASTEA_SOC_CPU_CLOCK_STANDARD_MASK 0x00000003 +#define ADRASTEA_SOC_LPO_CAL_ENABLE_LSB 20 +#define ADRASTEA_SOC_LPO_CAL_ENABLE_MASK 0x00100000 + +#define ADRASTEA_WLAN_SYSTEM_SLEEP_DISABLE_LSB 0 +#define ADRASTEA_WLAN_SYSTEM_SLEEP_DISABLE_MASK 0x00000001 +#define ADRASTEA_WLAN_RESET_CONTROL_COLD_RST_MASK 0x00000002 +#define ADRASTEA_WLAN_RESET_CONTROL_WARM_RST_MASK 0x00000001 +#define ADRASTEA_SI_CONFIG_BIDIR_OD_DATA_LSB 18 +#define ADRASTEA_SI_CONFIG_BIDIR_OD_DATA_MASK 0x00040000 +#define ADRASTEA_SI_CONFIG_I2C_LSB 16 +#define ADRASTEA_SI_CONFIG_I2C_MASK 0x00010000 +#define ADRASTEA_SI_CONFIG_POS_SAMPLE_LSB 7 +#define ADRASTEA_SI_CONFIG_POS_SAMPLE_MASK 0x00000080 +#define ADRASTEA_SI_CONFIG_INACTIVE_CLK_LSB 4 +#define ADRASTEA_SI_CONFIG_INACTIVE_CLK_MASK 0x00000010 +#define ADRASTEA_SI_CONFIG_INACTIVE_DATA_LSB 5 +#define ADRASTEA_SI_CONFIG_INACTIVE_DATA_MASK 0x00000020 +#define ADRASTEA_SI_CONFIG_DIVIDER_LSB 0 +#define ADRASTEA_SI_CONFIG_DIVIDER_MASK 0x0000000f +#define ADRASTEA_SI_CONFIG_OFFSET (0x00000000 + ADRASTEA_SI_REG_BASE_ADDRESS) +#define ADRASTEA_SI_TX_DATA0_OFFSET (0x00000008 + ADRASTEA_SI_REG_BASE_ADDRESS) +#define ADRASTEA_SI_TX_DATA1_OFFSET (0x0000000c + ADRASTEA_SI_REG_BASE_ADDRESS) +#define ADRASTEA_SI_RX_DATA0_OFFSET (0x00000010 + ADRASTEA_SI_REG_BASE_ADDRESS) +#define ADRASTEA_SI_RX_DATA1_OFFSET (0x00000014 + ADRASTEA_SI_REG_BASE_ADDRESS) +#define ADRASTEA_SI_CS_OFFSET (0x00000004 + ADRASTEA_SI_REG_BASE_ADDRESS) +#define ADRASTEA_SI_CS_DONE_ERR_MASK 0x00000400 +#define ADRASTEA_SI_CS_DONE_INT_MASK 0x00000200 +#define ADRASTEA_SI_CS_START_LSB 8 +#define ADRASTEA_SI_CS_START_MASK 0x00000100 +#define ADRASTEA_SI_CS_RX_CNT_LSB 4 +#define ADRASTEA_SI_CS_RX_CNT_MASK 0x000000f0 +#define ADRASTEA_SI_CS_TX_CNT_LSB 0 +#define ADRASTEA_SI_CS_TX_CNT_MASK 0x0000000f +#define ADRASTEA_CE_COUNT 12 +#define ADRASTEA_SR_WR_INDEX_OFFSET (ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_SRC_WR_INDEX \ + - ADRASTEA_CE0_BASE_ADDRESS) +#define ADRASTEA_DST_WATERMARK_OFFSET (ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_DST_WATERMARK \ + - ADRASTEA_CE0_BASE_ADDRESS) +#define ADRASTEA_RX_MSDU_END_4_FIRST_MSDU_LSB 14 +#define ADRASTEA_RX_MSDU_END_4_FIRST_MSDU_MASK 0x00004000 +#define ADRASTEA_RX_MPDU_START_0_SEQ_NUM_LSB 16 +#define ADRASTEA_RX_MPDU_START_0_SEQ_NUM_MASK 0x0fff0000 +#define ADRASTEA_RX_MPDU_START_2_PN_47_32_LSB 0 +#define ADRASTEA_RX_MPDU_START_2_PN_47_32_MASK 0x0000ffff +#define ADRASTEA_RX_MSDU_END_1_EXT_WAPI_PN_63_48_LSB 16 +#define ADRASTEA_RX_MSDU_END_1_EXT_WAPI_PN_63_48_MASK 0xffff0000 +#define ADRASTEA_RX_MSDU_END_4_LAST_MSDU_LSB 15 +#define ADRASTEA_RX_MSDU_END_4_LAST_MSDU_MASK 0x00008000 +#define ADRASTEA_RX_ATTENTION_0_MCAST_BCAST_LSB 2 +#define ADRASTEA_RX_ATTENTION_0_MCAST_BCAST_MASK 0x00000004 +#define ADRASTEA_RX_ATTENTION_0_FRAGMENT_LSB 13 +#define ADRASTEA_RX_ATTENTION_0_FRAGMENT_MASK 0x00002000 +#define ADRASTEA_RX_ATTENTION_0_MPDU_LENGTH_ERR_MASK 0x08000000 +#define ADRASTEA_RX_FRAG_INFO_0_RING2_MORE_COUNT_LSB 16 +#define ADRASTEA_RX_FRAG_INFO_0_RING2_MORE_COUNT_MASK 0x00ff0000 +#define ADRASTEA_RX_MSDU_START_0_MSDU_LENGTH_LSB 0 +#define ADRASTEA_RX_MSDU_START_0_MSDU_LENGTH_MASK 0x00003fff + +#define ADRASTEA_RX_MSDU_START_2_DECAP_FORMAT_OFFSET 0x00000008 +#define ADRASTEA_RX_MSDU_START_2_DECAP_FORMAT_LSB 8 +#define ADRASTEA_RX_MSDU_START_2_DECAP_FORMAT_MASK 0x00000300 +#define ADRASTEA_RX_MPDU_START_0_ENCRYPTED_LSB 13 +#define ADRASTEA_RX_MPDU_START_0_ENCRYPTED_MASK 0x00002000 +#define ADRASTEA_RX_ATTENTION_0_MORE_DATA_MASK 0x00000400 +#define ADRASTEA_RX_ATTENTION_0_MSDU_DONE_MASK 0x80000000 +#define ADRASTEA_RX_ATTENTION_0_TCP_UDP_CHKSUM_FAIL_MASK 0x00040000 + +#define ADRASTEA_DST_WR_INDEX_OFFSET (ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_DST_WR_INDEX\ + - ADRASTEA_CE0_BASE_ADDRESS) + +#define ADRASTEA_SRC_WATERMARK_OFFSET (ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_SRC_WATERMARK\ + - ADRASTEA_CE0_BASE_ADDRESS) + +#define ADRASTEA_SRC_WATERMARK_LOW_MASK 0xffff0000 +#define ADRASTEA_SRC_WATERMARK_HIGH_MASK 0x0000ffff +#define ADRASTEA_DST_WATERMARK_LOW_MASK 0xffff0000 +#define ADRASTEA_DST_WATERMARK_HIGH_MASK 0x0000ffff + +#define ADRASTEA_CURRENT_SRRI_OFFSET (ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CURRENT_SRRI\ + - ADRASTEA_CE0_BASE_ADDRESS) + +#define ADRASTEA_CURRENT_DRRI_OFFSET (ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CURRENT_DRRI\ + - ADRASTEA_CE0_BASE_ADDRESS) + +#define ADRASTEA_HOST_IS_SRC_RING_HIGH_WATERMARK_MASK 0x00000002 +#define ADRASTEA_HOST_IS_SRC_RING_LOW_WATERMARK_MASK \ + ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_HOST_IS__SRC_RING_LOW_WATERMARK___M + +#define ADRASTEA_HOST_IS_DST_RING_HIGH_WATERMARK_MASK \ + ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_HOST_IS__DST_RING_HIGH_WATERMARK___M + +#define ADRASTEA_HOST_IS_DST_RING_LOW_WATERMARK_MASK \ + ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_HOST_IS__DST_RING_LOW_WATERMARK___M + +#define ADRASTEA_HOST_IS_OFFSET (ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_HOST_IS \ + - ADRASTEA_CE0_BASE_ADDRESS) + +#define ADRASTEA_MISC_IS_OFFSET (ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IS \ + - ADRASTEA_CE0_BASE_ADDRESS) + +#define ADRASTEA_HOST_IS_COPY_COMPLETE_MASK \ + ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_HOST_IS__COPY_COMPLETE___M + +#define ADRASTEA_CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS_OFFSET \ + (ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE_COMMON_WRAPPER_CE_WRAPPER_HOST_INTERRUPT_SUMMARY\ + - ADRASTEA_CE_WRAPPER_BASE_ADDRESS) + +/* + * Base address where the CE source and destination ring read + * indices are written to be viewed by host. + */ + +#define ADRASTEA_CE_DDR_ADDRESS_FOR_RRI_LOW \ + (A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE_COMMON_WRAPPER_CE_WRAPPER_INDEX_BASE_LOW\ + - ADRASTEA_CE_WRAPPER_BASE_ADDRESS) + +#define ADRASTEA_CE_DDR_ADDRESS_FOR_RRI_HIGH \ + (A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE_COMMON_WRAPPER_CE_WRAPPER_INDEX_BASE_HIGH - ADRASTEA_CE_WRAPPER_BASE_ADDRESS) + +#define ADRASTEA_HOST_IE_OFFSET (ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_HOST_IE\ + - ADRASTEA_CE0_BASE_ADDRESS) + +#define ADRASTEA_HOST_IE_COPY_COMPLETE_MASK \ + ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_HOST_IE__COPY_COMPLETE___M + +#define ADRASTEA_SR_BA_OFFSET (ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_SR_BA_LOW\ + - ADRASTEA_CE0_BASE_ADDRESS) + +#define ADRASTEA_SR_BA_HIGH_OFFSET \ + (ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_SR_BA_HIGH \ + - ADRASTEA_CE0_BASE_ADDRESS) + +#define ADRASTEA_SR_SIZE_OFFSET (ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_SR_SIZE \ + - ADRASTEA_CE0_BASE_ADDRESS) + +#define ADRASTEA_CE_CTRL1_OFFSET (ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CE_CTRL1 \ + - ADRASTEA_CE0_BASE_ADDRESS) + +#define ADRASTEA_CE_CTRL1_DMAX_LENGTH_MASK \ + ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CE_CTRL1__DEST_MAX_LENGTH___M + +#define ADRASTEA_DR_BA_OFFSET (ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_DR_BA_LOW\ + - ADRASTEA_CE0_BASE_ADDRESS) + +#define ADRASTEA_DR_BA_HIGH_OFFSET \ + (ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_DR_BA_HIGH\ + - ADRASTEA_CE0_BASE_ADDRESS) + +#define ADRASTEA_DR_SIZE_OFFSET (ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_DR_SIZE\ + - ADRASTEA_CE0_BASE_ADDRESS) + +#define ADRASTEA_CE_CMD_REGISTER_OFFSET (ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CE_CMD\ + - ADRASTEA_CE0_BASE_ADDRESS) + +#define ADRASTEA_MISC_IE_OFFSET \ + (ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IE - ADRASTEA_CE0_BASE_ADDRESS) + +#define ADRASTEA_MISC_IS_AXI_ERR_MASK 0x00000100 + +#define ADRASTEA_MISC_IS_DST_ADDR_ERR_MASK 0x00000200 + +#define ADRASTEA_MISC_IS_AXI_TIMEOUT_ERR \ + ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IS__AXI_TIMEOUT_ERR___M + +#define ADRASTEA_MISC_IS_SRC_LEN_ERR_MASK \ + ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IS__SRC_LEN_ERR___M + +#define ADRASTEA_MISC_IS_DST_MAX_LEN_VIO_MASK\ + ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IS__DST_MAX_LEN_VIO___M + +#define ADRASTEA_MISC_IS_DST_RING_OVERFLOW_MASK \ + ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IS__DST_RING_OVERFLOW___M + +#define ADRASTEA_MISC_IS_SRC_RING_OVERFLOW_MASK \ + ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_MISC_IS__SRC_RING_OVERFLOW___M + +#define ADRASTEA_SRC_WATERMARK_LOW_LSB \ + ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_SRC_WATERMARK__SR_LOW_WATER_MARK_THRESOLD___S + +#define ADRASTEA_SRC_WATERMARK_HIGH_LSB \ + ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_SRC_WATERMARK__SR_HIGH_WATER_MARK_THRESHOLD___S + +#define ADRASTEA_DST_WATERMARK_LOW_LSB \ + ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_DST_WATERMARK__DR_LOW_WATER_MARK_THRESHOLD___S + +#define ADRASTEA_DST_WATERMARK_HIGH_LSB \ + ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_DST_WATERMARK__DR_HIGH_WATER_MARK_THRESHOLD___S + +#define ADRASTEA_CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK \ + ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE_COMMON_WRAPPER_CE_WRAPPER_HOST_INTERRUPT_SUMMARY__HOST___M + +#define ADRASTEA_CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB \ + ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE_COMMON_WRAPPER_CE_WRAPPER_HOST_INTERRUPT_SUMMARY__HOST___S + +#define ADRASTEA_CE_CTRL1_DMAX_LENGTH_LSB \ + ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CE_CTRL1__DEST_MAX_LENGTH___S + +#define ADRASTEA_CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK \ + ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CE_CTRL1__SRC_RING_BYTE_SWAP_EN___M + +#define ADRASTEA_CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK \ + ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CE_CTRL1__DST_RING_BYTE_SWAP_EN___M + +#define ADRASTEA_CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB \ + ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CE_CTRL1__SRC_RING_BYTE_SWAP_EN___S + +#define ADRASTEA_CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB \ + ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CE_CTRL1__DST_RING_BYTE_SWAP_EN___S + +#define ADRASTEA_SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_MASK 0x0000004 +#define ADRASTEA_SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_LSB 2 +#define ADRASTEA_SOC_GLOBAL_RESET_ADDRESS \ + (0x0008 + ADRASTEA_PCIE_LOCAL_REG_BASE_ADDRESS) +#define ADRASTEA_RTC_STATE_ADDRESS \ + (0x0000 + ADRASTEA_PCIE_LOCAL_REG_BASE_ADDRESS) +#define ADRASTEA_RTC_STATE_COLD_RESET_MASK 0x400 + +#define ADRASTEA_PCIE_SOC_WAKE_RESET 0x00000000 +#define ADRASTEA_PCIE_SOC_WAKE_ADDRESS (ADRASTEA_A_WCSS_SR_APSS_FORCE_WAKE) +#define ADRASTEA_PCIE_SOC_WAKE_V_MASK 0x00000001 + +#define ADRASTEA_RTC_STATE_V_MASK 0x00000007 +#define ADRASTEA_RTC_STATE_V_LSB 0 +#define ADRASTEA_RTC_STATE_V_ON 5 +#define ADRASTEA_PCIE_LOCAL_BASE_ADDRESS 0x0 +#define ADRASTEA_FW_IND_EVENT_PENDING 1 +#define ADRASTEA_FW_IND_INITIALIZED 2 +#define ADRASTEA_FW_IND_HELPER 4 + +#define ADRASTEA_PCIE_INTR_FIRMWARE_MASK 0x00000000 +#define ADRASTEA_PCIE_INTR_CE0_MASK 0x00000100 +#define ADRASTEA_PCIE_INTR_CE_MASK_ALL 0x00001ffe + +#define ADRASTEA_CPU_INTR_ADDRESS 0xffffffff +#define ADRASTEA_SOC_LF_TIMER_CONTROL0_ADDRESS 0xffffffff +#define ADRASTEA_SOC_LF_TIMER_CONTROL0_ENABLE_MASK 0xffffffff +#define ADRASTEA_SOC_RESET_CONTROL_ADDRESS \ + (0x00000000 + ADRASTEA_RTC_SOC_REG_BASE_ADDRESS) +#define ADRASTEA_SOC_RESET_CONTROL_CE_RST_MASK 0x0100 +#define ADRASTEA_SOC_RESET_CONTROL_CPU_WARM_RST_MASK 0x00000040 +#define ADRASTEA_CORE_CTRL_ADDRESS (0x0000 + ADRASTEA_SOC_CORE_REG_BASE_ADDRESS) +#define ADRASTEA_CORE_CTRL_CPU_INTR_MASK 0x00002000 +#define ADRASTEA_LOCAL_SCRATCH_OFFSET 0x00000018 +#define ADRASTEA_CLOCK_GPIO_OFFSET 0xffffffff +#define ADRASTEA_CLOCK_GPIO_BT_CLK_OUT_EN_LSB 0 +#define ADRASTEA_CLOCK_GPIO_BT_CLK_OUT_EN_MASK 0 +#define ADRASTEA_SOC_CHIP_ID_ADDRESS 0x000000f0 +#define ADRASTEA_SOC_CHIP_ID_VERSION_MASK 0xfffc0000 +#define ADRASTEA_SOC_CHIP_ID_VERSION_LSB 18 +#define ADRASTEA_SOC_CHIP_ID_REVISION_MASK 0x00000f00 +#define ADRASTEA_SOC_CHIP_ID_REVISION_LSB 8 +#define ADRASTEA_SOC_POWER_REG_OFFSET 0x0000010c + +/* Copy Engine Debug */ +#define ADRASTEA_WLAN_DEBUG_INPUT_SEL_OFFSET 0x0000010c +#define ADRASTEA_WLAN_DEBUG_INPUT_SEL_SRC_MSB 3 +#define ADRASTEA_WLAN_DEBUG_INPUT_SEL_SRC_LSB 0 +#define ADRASTEA_WLAN_DEBUG_INPUT_SEL_SRC_MASK 0x0000000f +#define ADRASTEA_WLAN_DEBUG_CONTROL_OFFSET 0x00000108 +#define ADRASTEA_WLAN_DEBUG_CONTROL_ENABLE_MSB 0 +#define ADRASTEA_WLAN_DEBUG_CONTROL_ENABLE_LSB 0 +#define ADRASTEA_WLAN_DEBUG_CONTROL_ENABLE_MASK 0x00000001 +#define ADRASTEA_WLAN_DEBUG_OUT_OFFSET 0x00000110 +#define ADRASTEA_WLAN_DEBUG_OUT_DATA_MSB 19 +#define ADRASTEA_WLAN_DEBUG_OUT_DATA_LSB 0 +#define ADRASTEA_WLAN_DEBUG_OUT_DATA_MASK 0x000fffff +#define ADRASTEA_AMBA_DEBUG_BUS_OFFSET 0x0000011c +#define ADRASTEA_AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MSB 13 +#define ADRASTEA_AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_LSB 8 +#define ADRASTEA_AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MASK 0x00003f00 +#define ADRASTEA_AMBA_DEBUG_BUS_SEL_MSB 4 +#define ADRASTEA_AMBA_DEBUG_BUS_SEL_LSB 0 +#define ADRASTEA_AMBA_DEBUG_BUS_SEL_MASK 0x0000001f +#define ADRASTEA_CE_WRAPPER_DEBUG_OFFSET 0x0008 +#define ADRASTEA_CE_WRAPPER_DEBUG_SEL_MSB 4 +#define ADRASTEA_CE_WRAPPER_DEBUG_SEL_LSB 0 +#define ADRASTEA_CE_WRAPPER_DEBUG_SEL_MASK 0x0000001f +#define ADRASTEA_CE_DEBUG_OFFSET 0x0054 +#define ADRASTEA_CE_DEBUG_SEL_MSB 5 +#define ADRASTEA_CE_DEBUG_SEL_LSB 0 +#define ADRASTEA_CE_DEBUG_SEL_MASK 0x0000003f +/* End */ + +/* PLL start */ +#define ADRASTEA_EFUSE_OFFSET 0x0000032c +#define ADRASTEA_EFUSE_XTAL_SEL_MSB 10 +#define ADRASTEA_EFUSE_XTAL_SEL_LSB 8 +#define ADRASTEA_EFUSE_XTAL_SEL_MASK 0x00000700 +#define ADRASTEA_BB_PLL_CONFIG_OFFSET 0x000002f4 +#define ADRASTEA_BB_PLL_CONFIG_OUTDIV_MSB 20 +#define ADRASTEA_BB_PLL_CONFIG_OUTDIV_LSB 18 +#define ADRASTEA_BB_PLL_CONFIG_OUTDIV_MASK 0x001c0000 +#define ADRASTEA_BB_PLL_CONFIG_FRAC_MSB 17 +#define ADRASTEA_BB_PLL_CONFIG_FRAC_LSB 0 +#define ADRASTEA_BB_PLL_CONFIG_FRAC_MASK 0x0003ffff +#define ADRASTEA_WLAN_PLL_SETTLE_TIME_MSB 10 +#define ADRASTEA_WLAN_PLL_SETTLE_TIME_LSB 0 +#define ADRASTEA_WLAN_PLL_SETTLE_TIME_MASK 0x000007ff +#define ADRASTEA_WLAN_PLL_SETTLE_OFFSET 0x0018 +#define ADRASTEA_WLAN_PLL_SETTLE_SW_MASK 0x000007ff +#define ADRASTEA_WLAN_PLL_SETTLE_RSTMASK 0xffffffff +#define ADRASTEA_WLAN_PLL_SETTLE_RESET 0x00000400 +#define ADRASTEA_WLAN_PLL_CONTROL_NOPWD_MSB 18 +#define ADRASTEA_WLAN_PLL_CONTROL_NOPWD_LSB 18 +#define ADRASTEA_WLAN_PLL_CONTROL_NOPWD_MASK 0x00040000 +#define ADRASTEA_WLAN_PLL_CONTROL_BYPASS_MSB 16 +#define ADRASTEA_WLAN_PLL_CONTROL_BYPASS_LSB 16 +#define ADRASTEA_WLAN_PLL_CONTROL_BYPASS_MASK 0x00010000 +#define ADRASTEA_WLAN_PLL_CONTROL_BYPASS_RESET 0x1 +#define ADRASTEA_WLAN_PLL_CONTROL_CLK_SEL_MSB 15 +#define ADRASTEA_WLAN_PLL_CONTROL_CLK_SEL_LSB 14 +#define ADRASTEA_WLAN_PLL_CONTROL_CLK_SEL_MASK 0x0000c000 +#define ADRASTEA_WLAN_PLL_CONTROL_CLK_SEL_RESET 0x0 +#define ADRASTEA_WLAN_PLL_CONTROL_REFDIV_MSB 13 +#define ADRASTEA_WLAN_PLL_CONTROL_REFDIV_LSB 10 +#define ADRASTEA_WLAN_PLL_CONTROL_REFDIV_MASK 0x00003c00 +#define ADRASTEA_WLAN_PLL_CONTROL_REFDIV_RESET 0x0 +#define ADRASTEA_WLAN_PLL_CONTROL_DIV_MSB 9 +#define ADRASTEA_WLAN_PLL_CONTROL_DIV_LSB 0 +#define ADRASTEA_WLAN_PLL_CONTROL_DIV_MASK 0x000003ff +#define ADRASTEA_WLAN_PLL_CONTROL_DIV_RESET 0x11 +#define ADRASTEA_WLAN_PLL_CONTROL_OFFSET 0x0014 +#define ADRASTEA_WLAN_PLL_CONTROL_SW_MASK 0x001fffff +#define ADRASTEA_WLAN_PLL_CONTROL_RSTMASK 0xffffffff +#define ADRASTEA_WLAN_PLL_CONTROL_RESET 0x00010011 +#define ADRASTEA_SOC_CORE_CLK_CTRL_OFFSET 0x00000114 +#define ADRASTEA_SOC_CORE_CLK_CTRL_DIV_MSB 2 +#define ADRASTEA_SOC_CORE_CLK_CTRL_DIV_LSB 0 +#define ADRASTEA_SOC_CORE_CLK_CTRL_DIV_MASK 0x00000007 +#define ADRASTEA_RTC_SYNC_STATUS_PLL_CHANGING_MSB 5 +#define ADRASTEA_RTC_SYNC_STATUS_PLL_CHANGING_LSB 5 +#define ADRASTEA_RTC_SYNC_STATUS_PLL_CHANGING_MASK 0x00000020 +#define ADRASTEA_RTC_SYNC_STATUS_PLL_CHANGING_RESET 0x0 +#define ADRASTEA_RTC_SYNC_STATUS_OFFSET 0x0244 +#define ADRASTEA_SOC_CPU_CLOCK_OFFSET 0x00000020 +#define ADRASTEA_SOC_CPU_CLOCK_STANDARD_MSB 1 +#define ADRASTEA_SOC_CPU_CLOCK_STANDARD_LSB 0 +#define ADRASTEA_SOC_CPU_CLOCK_STANDARD_MASK 0x00000003 +/* PLL end */ + +#define ADRASTEA_PCIE_INTR_CE_MASK(n) (ADRASTEA_PCIE_INTR_CE0_MASK << (n)) +#define ADRASTEA_DRAM_BASE_ADDRESS ADRASTEA_TARG_DRAM_START +#define ADRASTEA_FW_INDICATOR_ADDRESS \ + (ADRASTEA_WIFICMN_BASE_ADDRESS + ADRASTEA_SCRATCH_3_ADDRESS) +#define ADRASTEA_SYSTEM_SLEEP_OFFSET ADRASTEA_SOC_SYSTEM_SLEEP_OFFSET +#define ADRASTEA_WLAN_SYSTEM_SLEEP_OFFSET (0x002c + ADRASTEA_WIFI_RTC_REG_BASE_ADDRESS) +#define ADRASTEA_WLAN_RESET_CONTROL_OFFSET (0x0000 + ADRASTEA_WIFI_RTC_REG_BASE_ADDRESS) +#define ADRASTEA_CLOCK_CONTROL_OFFSET ADRASTEA_SOC_CLOCK_CONTROL_OFFSET +#define ADRASTEA_CLOCK_CONTROL_SI0_CLK_MASK \ + ADRASTEA_SOC_CLOCK_CONTROL_SI0_CLK_MASK +#define ADRASTEA_RESET_CONTROL_MBOX_RST_MASK 0x00000004 +#define ADRASTEA_RESET_CONTROL_SI0_RST_MASK \ + ADRASTEA_SOC_RESET_CONTROL_SI0_RST_MASK +#define ADRASTEA_GPIO_BASE_ADDRESS ADRASTEA_WLAN_GPIO_BASE_ADDRESS +#define ADRASTEA_GPIO_PIN0_OFFSET ADRASTEA_WLAN_GPIO_PIN0_ADDRESS +#define ADRASTEA_GPIO_PIN1_OFFSET ADRASTEA_WLAN_GPIO_PIN1_ADDRESS +#define ADRASTEA_GPIO_PIN0_CONFIG_MASK ADRASTEA_WLAN_GPIO_PIN0_CONFIG_MASK +#define ADRASTEA_GPIO_PIN1_CONFIG_MASK ADRASTEA_WLAN_GPIO_PIN1_CONFIG_MASK +#define ADRASTEA_SI_BASE_ADDRESS 0x00000000 +#define ADRASTEA_CPU_CLOCK_OFFSET (0x20 + ADRASTEA_RTC_SOC_REG_BASE_ADDRESS) +#define ADRASTEA_LPO_CAL_OFFSET ADRASTEA_SOC_LPO_CAL_OFFSET +#define ADRASTEA_GPIO_PIN10_OFFSET ADRASTEA_WLAN_GPIO_PIN10_ADDRESS +#define ADRASTEA_GPIO_PIN11_OFFSET ADRASTEA_WLAN_GPIO_PIN11_ADDRESS +#define ADRASTEA_GPIO_PIN12_OFFSET ADRASTEA_WLAN_GPIO_PIN12_ADDRESS +#define ADRASTEA_GPIO_PIN13_OFFSET ADRASTEA_WLAN_GPIO_PIN13_ADDRESS +#define ADRASTEA_CPU_CLOCK_STANDARD_LSB 0 +#define ADRASTEA_CPU_CLOCK_STANDARD_MASK 0x1 +#define ADRASTEA_LPO_CAL_ENABLE_LSB ADRASTEA_SOC_LPO_CAL_ENABLE_LSB +#define ADRASTEA_LPO_CAL_ENABLE_MASK ADRASTEA_SOC_LPO_CAL_ENABLE_MASK +#define ADRASTEA_ANALOG_INTF_BASE_ADDRESS ADRASTEA_WLAN_ANALOG_INTF_BASE_ADDRESS +#define ADRASTEA_MBOX_BASE_ADDRESS 0x00008000 +#define ADRASTEA_INT_STATUS_ENABLE_ERROR_LSB MISSING +#define ADRASTEA_INT_STATUS_ENABLE_ERROR_MASK MISSING +#define ADRASTEA_INT_STATUS_ENABLE_CPU_LSB MISSING +#define ADRASTEA_INT_STATUS_ENABLE_CPU_MASK MISSING +#define ADRASTEA_INT_STATUS_ENABLE_COUNTER_LSB MISSING +#define ADRASTEA_INT_STATUS_ENABLE_COUNTER_MASK MISSING +#define ADRASTEA_INT_STATUS_ENABLE_MBOX_DATA_LSB MISSING +#define ADRASTEA_INT_STATUS_ENABLE_MBOX_DATA_MASK MISSING +#define ADRASTEA_ERROR_STATUS_ENABLE_RX_UNDERFLOW_LSB MISSING +#define ADRASTEA_ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK MISSING +#define ADRASTEA_ERROR_STATUS_ENABLE_TX_OVERFLOW_LSB MISSING +#define ADRASTEA_ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK MISSING +#define ADRASTEA_COUNTER_INT_STATUS_ENABLE_BIT_LSB MISSING +#define ADRASTEA_COUNTER_INT_STATUS_ENABLE_BIT_MASK MISSING +#define ADRASTEA_INT_STATUS_ENABLE_ADDRESS MISSING +#define ADRASTEA_CPU_INT_STATUS_ENABLE_BIT_LSB MISSING +#define ADRASTEA_CPU_INT_STATUS_ENABLE_BIT_MASK MISSING +#define ADRASTEA_HOST_INT_STATUS_ADDRESS MISSING +#define ADRASTEA_CPU_INT_STATUS_ADDRESS MISSING +#define ADRASTEA_ERROR_INT_STATUS_ADDRESS MISSING +#define ADRASTEA_ERROR_INT_STATUS_WAKEUP_MASK MISSING +#define ADRASTEA_ERROR_INT_STATUS_WAKEUP_LSB MISSING +#define ADRASTEA_ERROR_INT_STATUS_RX_UNDERFLOW_MASK MISSING +#define ADRASTEA_ERROR_INT_STATUS_RX_UNDERFLOW_LSB MISSING +#define ADRASTEA_ERROR_INT_STATUS_TX_OVERFLOW_MASK MISSING +#define ADRASTEA_ERROR_INT_STATUS_TX_OVERFLOW_LSB MISSING +#define ADRASTEA_COUNT_DEC_ADDRESS MISSING +#define ADRASTEA_HOST_INT_STATUS_CPU_MASK MISSING +#define ADRASTEA_HOST_INT_STATUS_CPU_LSB MISSING +#define ADRASTEA_HOST_INT_STATUS_ERROR_MASK MISSING +#define ADRASTEA_HOST_INT_STATUS_ERROR_LSB MISSING +#define ADRASTEA_HOST_INT_STATUS_COUNTER_MASK MISSING +#define ADRASTEA_HOST_INT_STATUS_COUNTER_LSB MISSING +#define ADRASTEA_RX_LOOKAHEAD_VALID_ADDRESS MISSING +#define ADRASTEA_WINDOW_DATA_ADDRESS MISSING +#define ADRASTEA_WINDOW_READ_ADDR_ADDRESS MISSING +#define ADRASTEA_WINDOW_WRITE_ADDR_ADDRESS MISSING + +/* Shadow Registers - Start */ +#define ADRASTEA_A_LOCAL_SHADOW_REG_VALUE_0 \ + ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE0 +#define ADRASTEA_A_LOCAL_SHADOW_REG_VALUE_1 \ + ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE1 +#define ADRASTEA_A_LOCAL_SHADOW_REG_VALUE_2 \ + ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE2 +#define ADRASTEA_A_LOCAL_SHADOW_REG_VALUE_3 \ + ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE3 +#define ADRASTEA_A_LOCAL_SHADOW_REG_VALUE_4 \ + ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE4 +#define ADRASTEA_A_LOCAL_SHADOW_REG_VALUE_5 \ + ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE5 +#define ADRASTEA_A_LOCAL_SHADOW_REG_VALUE_6 \ + ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE6 +#define ADRASTEA_A_LOCAL_SHADOW_REG_VALUE_7 \ + ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE7 +#define ADRASTEA_A_LOCAL_SHADOW_REG_VALUE_8 \ + ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE8 +#define ADRASTEA_A_LOCAL_SHADOW_REG_VALUE_9 \ + ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE9 +#define ADRASTEA_A_LOCAL_SHADOW_REG_VALUE_10 \ + ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE10 +#define ADRASTEA_A_LOCAL_SHADOW_REG_VALUE_11 \ + ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE11 +#define ADRASTEA_A_LOCAL_SHADOW_REG_VALUE_12 \ + ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE12 +#define ADRASTEA_A_LOCAL_SHADOW_REG_VALUE_13 \ + ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE13 +#define ADRASTEA_A_LOCAL_SHADOW_REG_VALUE_14 \ + ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE14 +#define ADRASTEA_A_LOCAL_SHADOW_REG_VALUE_15 \ + ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE15 +#define ADRASTEA_A_LOCAL_SHADOW_REG_VALUE_16 \ + ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE16 +#define ADRASTEA_A_LOCAL_SHADOW_REG_VALUE_17 \ + ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE17 +#define ADRASTEA_A_LOCAL_SHADOW_REG_VALUE_18 \ + ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE18 +#define ADRASTEA_A_LOCAL_SHADOW_REG_VALUE_19 \ + ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE19 +#define ADRASTEA_A_LOCAL_SHADOW_REG_VALUE_20 \ + ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE20 +#define ADRASTEA_A_LOCAL_SHADOW_REG_VALUE_21 \ + ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE21 +#define ADRASTEA_A_LOCAL_SHADOW_REG_VALUE_22 \ + ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE22 +#define ADRASTEA_A_LOCAL_SHADOW_REG_VALUE_23 \ + ADRASTEA_A_WCSS_SR_APSS_SHADOW_VALUE23 + +#define ADRASTEA_A_LOCAL_SHADOW_REG_ADDRESS_0 \ + ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS0 +#define ADRASTEA_A_LOCAL_SHADOW_REG_ADDRESS_1 \ + ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS1 +#define ADRASTEA_A_LOCAL_SHADOW_REG_ADDRESS_2 \ + ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS2 +#define ADRASTEA_A_LOCAL_SHADOW_REG_ADDRESS_3 \ + ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS3 +#define ADRASTEA_A_LOCAL_SHADOW_REG_ADDRESS_4 \ + ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS4 +#define ADRASTEA_A_LOCAL_SHADOW_REG_ADDRESS_5 \ + ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS5 +#define ADRASTEA_A_LOCAL_SHADOW_REG_ADDRESS_6 \ + ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS6 +#define ADRASTEA_A_LOCAL_SHADOW_REG_ADDRESS_7 \ + ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS7 +#define ADRASTEA_A_LOCAL_SHADOW_REG_ADDRESS_8 \ + ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS8 +#define ADRASTEA_A_LOCAL_SHADOW_REG_ADDRESS_9 \ + ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS9 +#define ADRASTEA_A_LOCAL_SHADOW_REG_ADDRESS_10 \ + ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS10 +#define ADRASTEA_A_LOCAL_SHADOW_REG_ADDRESS_11 \ + ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS11 +#define ADRASTEA_A_LOCAL_SHADOW_REG_ADDRESS_12 \ + ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS12 +#define ADRASTEA_A_LOCAL_SHADOW_REG_ADDRESS_13 \ + ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS13 +#define ADRASTEA_A_LOCAL_SHADOW_REG_ADDRESS_14 \ + ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS14 +#define ADRASTEA_A_LOCAL_SHADOW_REG_ADDRESS_15 \ + ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS15 +#define ADRASTEA_A_LOCAL_SHADOW_REG_ADDRESS_16 \ + ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS16 +#define ADRASTEA_A_LOCAL_SHADOW_REG_ADDRESS_17 \ + ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS17 +#define ADRASTEA_A_LOCAL_SHADOW_REG_ADDRESS_18 \ + ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS18 +#define ADRASTEA_A_LOCAL_SHADOW_REG_ADDRESS_19 \ + ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS19 +#define ADRASTEA_A_LOCAL_SHADOW_REG_ADDRESS_20 \ + ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS20 +#define ADRASTEA_A_LOCAL_SHADOW_REG_ADDRESS_21 \ + ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS21 +#define ADRASTEA_A_LOCAL_SHADOW_REG_ADDRESS_22 \ + ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS22 +#define ADRASTEA_A_LOCAL_SHADOW_REG_ADDRESS_23 \ + ADRASTEA_A_WCSS_SR_APSS_SHADOW_ADDRESS23 + +/* Q6 iHelium emulation registers */ +#define ADRASTEA_A_SOC_CORE_PCIE_INTR_CAUSE_GRP1 0x00113018 +#define ADRASTEA_A_SOC_CORE_SPARE_1_REGISTER 0x00113184 +#define ADRASTEA_A_SOC_CORE_PCIE_INTR_CLR_GRP1 0x00113020 +#define ADRASTEA_A_SOC_CORE_PCIE_INTR_ENABLE_GRP1 0x00113010 +#define ADRASTEA_A_SOC_PCIE_PCIE_SCRATCH_0 0x00130040 +#define ADRASTEA_A_SOC_PCIE_PCIE_SCRATCH_1 0x00130044 + +#define ADRASTEA_HOST_ENABLE_REGISTER 0x00188000 +#define ADRASTEA_Q6_ENABLE_REGISTER_0 0x00188004 +#define ADRASTEA_Q6_ENABLE_REGISTER_1 0x00188008 +#define ADRASTEA_HOST_CAUSE_REGISTER 0x0018800c +#define ADRASTEA_Q6_CAUSE_REGISTER_0 0x00188010 +#define ADRASTEA_Q6_CAUSE_REGISTER_1 0x00188014 +#define ADRASTEA_HOST_CLEAR_REGISTER 0x00188018 +#define ADRASTEA_Q6_CLEAR_REGISTER_0 0x0018801c +#define ADRASTEA_Q6_CLEAR_REGISTER_1 0x00188020 + +#define ADRASTEA_A_WIFI_APB_1_A_WFSS_CE_TARGET_HOST_DELTA 0x08 +#define ADRASTEA_A_SOC_PCIE_PCIE_SCRATCH_2 0x0013005C +#define ADRASTEA_A_SOC_CORE_PCIE_INTR_ENABLE_GRP0_Q6_MASK 0x0 +/* end: Q6 iHelium emulation registers */ + +#define ADRASTEA_BYPASS_QMI_TEMP_REGISTER 0x00032064 + +struct targetdef_s adrastea_targetdef = { + .d_RTC_SOC_BASE_ADDRESS = ADRASTEA_RTC_SOC_BASE_ADDRESS, + .d_RTC_WMAC_BASE_ADDRESS = ADRASTEA_RTC_WMAC_BASE_ADDRESS, + .d_SYSTEM_SLEEP_OFFSET = ADRASTEA_WLAN_SYSTEM_SLEEP_OFFSET, + .d_WLAN_SYSTEM_SLEEP_OFFSET = ADRASTEA_WLAN_SYSTEM_SLEEP_OFFSET, + .d_WLAN_SYSTEM_SLEEP_DISABLE_LSB = + ADRASTEA_WLAN_SYSTEM_SLEEP_DISABLE_LSB, + .d_WLAN_SYSTEM_SLEEP_DISABLE_MASK = + ADRASTEA_WLAN_SYSTEM_SLEEP_DISABLE_MASK, + .d_CLOCK_CONTROL_OFFSET = ADRASTEA_CLOCK_CONTROL_OFFSET, + .d_CLOCK_CONTROL_SI0_CLK_MASK = ADRASTEA_CLOCK_CONTROL_SI0_CLK_MASK, + .d_RESET_CONTROL_OFFSET = ADRASTEA_SOC_RESET_CONTROL_OFFSET, + .d_RESET_CONTROL_MBOX_RST_MASK = ADRASTEA_RESET_CONTROL_MBOX_RST_MASK, + .d_RESET_CONTROL_SI0_RST_MASK = ADRASTEA_RESET_CONTROL_SI0_RST_MASK, + .d_WLAN_RESET_CONTROL_OFFSET = ADRASTEA_WLAN_RESET_CONTROL_OFFSET, + .d_WLAN_RESET_CONTROL_COLD_RST_MASK = + ADRASTEA_WLAN_RESET_CONTROL_COLD_RST_MASK, + .d_WLAN_RESET_CONTROL_WARM_RST_MASK = + ADRASTEA_WLAN_RESET_CONTROL_WARM_RST_MASK, + .d_GPIO_BASE_ADDRESS = ADRASTEA_GPIO_BASE_ADDRESS, + .d_GPIO_PIN0_OFFSET = ADRASTEA_GPIO_PIN0_OFFSET, + .d_GPIO_PIN1_OFFSET = ADRASTEA_GPIO_PIN1_OFFSET, + .d_GPIO_PIN0_CONFIG_MASK = ADRASTEA_GPIO_PIN0_CONFIG_MASK, + .d_GPIO_PIN1_CONFIG_MASK = ADRASTEA_GPIO_PIN1_CONFIG_MASK, + .d_SI_CONFIG_BIDIR_OD_DATA_LSB = ADRASTEA_SI_CONFIG_BIDIR_OD_DATA_LSB, + .d_SI_CONFIG_BIDIR_OD_DATA_MASK = ADRASTEA_SI_CONFIG_BIDIR_OD_DATA_MASK, + .d_SI_CONFIG_I2C_LSB = ADRASTEA_SI_CONFIG_I2C_LSB, + .d_SI_CONFIG_I2C_MASK = ADRASTEA_SI_CONFIG_I2C_MASK, + .d_SI_CONFIG_POS_SAMPLE_LSB = ADRASTEA_SI_CONFIG_POS_SAMPLE_LSB, + .d_SI_CONFIG_POS_SAMPLE_MASK = ADRASTEA_SI_CONFIG_POS_SAMPLE_MASK, + .d_SI_CONFIG_INACTIVE_CLK_LSB = ADRASTEA_SI_CONFIG_INACTIVE_CLK_LSB, + .d_SI_CONFIG_INACTIVE_CLK_MASK = ADRASTEA_SI_CONFIG_INACTIVE_CLK_MASK, + .d_SI_CONFIG_INACTIVE_DATA_LSB = ADRASTEA_SI_CONFIG_INACTIVE_DATA_LSB, + .d_SI_CONFIG_INACTIVE_DATA_MASK = ADRASTEA_SI_CONFIG_INACTIVE_DATA_MASK, + .d_SI_CONFIG_DIVIDER_LSB = ADRASTEA_SI_CONFIG_DIVIDER_LSB, + .d_SI_CONFIG_DIVIDER_MASK = ADRASTEA_SI_CONFIG_DIVIDER_MASK, + .d_SI_BASE_ADDRESS = ADRASTEA_SI_BASE_ADDRESS, + .d_SI_CONFIG_OFFSET = ADRASTEA_SI_CONFIG_OFFSET, + .d_SI_TX_DATA0_OFFSET = ADRASTEA_SI_TX_DATA0_OFFSET, + .d_SI_TX_DATA1_OFFSET = ADRASTEA_SI_TX_DATA1_OFFSET, + .d_SI_RX_DATA0_OFFSET = ADRASTEA_SI_RX_DATA0_OFFSET, + .d_SI_RX_DATA1_OFFSET = ADRASTEA_SI_RX_DATA1_OFFSET, + .d_SI_CS_OFFSET = ADRASTEA_SI_CS_OFFSET, + .d_SI_CS_DONE_ERR_MASK = ADRASTEA_SI_CS_DONE_ERR_MASK, + .d_SI_CS_DONE_INT_MASK = ADRASTEA_SI_CS_DONE_INT_MASK, + .d_SI_CS_START_LSB = ADRASTEA_SI_CS_START_LSB, + .d_SI_CS_START_MASK = ADRASTEA_SI_CS_START_MASK, + .d_SI_CS_RX_CNT_LSB = ADRASTEA_SI_CS_RX_CNT_LSB, + .d_SI_CS_RX_CNT_MASK = ADRASTEA_SI_CS_RX_CNT_MASK, + .d_SI_CS_TX_CNT_LSB = ADRASTEA_SI_CS_TX_CNT_LSB, + .d_SI_CS_TX_CNT_MASK = ADRASTEA_SI_CS_TX_CNT_MASK, + .d_BOARD_DATA_SZ = ADRASTEA_BOARD_DATA_SZ, + .d_BOARD_EXT_DATA_SZ = ADRASTEA_BOARD_EXT_DATA_SZ, + .d_MBOX_BASE_ADDRESS = ADRASTEA_MBOX_BASE_ADDRESS, + .d_LOCAL_SCRATCH_OFFSET = ADRASTEA_LOCAL_SCRATCH_OFFSET, + .d_CPU_CLOCK_OFFSET = ADRASTEA_CPU_CLOCK_OFFSET, + .d_LPO_CAL_OFFSET = ADRASTEA_LPO_CAL_OFFSET, + .d_GPIO_PIN10_OFFSET = ADRASTEA_GPIO_PIN10_OFFSET, + .d_GPIO_PIN11_OFFSET = ADRASTEA_GPIO_PIN11_OFFSET, + .d_GPIO_PIN12_OFFSET = ADRASTEA_GPIO_PIN12_OFFSET, + .d_GPIO_PIN13_OFFSET = ADRASTEA_GPIO_PIN13_OFFSET, + .d_CLOCK_GPIO_OFFSET = ADRASTEA_CLOCK_GPIO_OFFSET, + .d_CPU_CLOCK_STANDARD_LSB = ADRASTEA_CPU_CLOCK_STANDARD_LSB, + .d_CPU_CLOCK_STANDARD_MASK = ADRASTEA_CPU_CLOCK_STANDARD_MASK, + .d_LPO_CAL_ENABLE_LSB = ADRASTEA_LPO_CAL_ENABLE_LSB, + .d_LPO_CAL_ENABLE_MASK = ADRASTEA_LPO_CAL_ENABLE_MASK, + .d_CLOCK_GPIO_BT_CLK_OUT_EN_LSB = ADRASTEA_CLOCK_GPIO_BT_CLK_OUT_EN_LSB, + .d_CLOCK_GPIO_BT_CLK_OUT_EN_MASK = + ADRASTEA_CLOCK_GPIO_BT_CLK_OUT_EN_MASK, + .d_ANALOG_INTF_BASE_ADDRESS = ADRASTEA_ANALOG_INTF_BASE_ADDRESS, + .d_WLAN_MAC_BASE_ADDRESS = ADRASTEA_WLAN_MAC_BASE_ADDRESS, + .d_FW_INDICATOR_ADDRESS = ADRASTEA_FW_INDICATOR_ADDRESS, + .d_DRAM_BASE_ADDRESS = ADRASTEA_DRAM_BASE_ADDRESS, + .d_SOC_CORE_BASE_ADDRESS = ADRASTEA_SOC_CORE_BASE_ADDRESS, + .d_CORE_CTRL_ADDRESS = ADRASTEA_CORE_CTRL_ADDRESS, + .d_CE_COUNT = ADRASTEA_CE_COUNT, + .d_MSI_NUM_REQUEST = MSI_NUM_REQUEST, + .d_MSI_ASSIGN_FW = MSI_ASSIGN_FW, + .d_MSI_ASSIGN_CE_INITIAL = MSI_ASSIGN_CE_INITIAL, + .d_PCIE_INTR_ENABLE_ADDRESS = ADRASTEA_HOST_ENABLE_REGISTER, + .d_PCIE_INTR_CLR_ADDRESS = ADRASTEA_HOST_CLEAR_REGISTER, + .d_PCIE_INTR_FIRMWARE_MASK = ADRASTEA_PCIE_INTR_FIRMWARE_MASK, + .d_PCIE_INTR_CE_MASK_ALL = ADRASTEA_PCIE_INTR_CE_MASK_ALL, + .d_CORE_CTRL_CPU_INTR_MASK = ADRASTEA_CORE_CTRL_CPU_INTR_MASK, + .d_SR_WR_INDEX_ADDRESS = ADRASTEA_SR_WR_INDEX_OFFSET, + .d_DST_WATERMARK_ADDRESS = ADRASTEA_DST_WATERMARK_OFFSET, + /* htt_rx.c */ + .d_RX_MSDU_END_4_FIRST_MSDU_MASK = + ADRASTEA_RX_MSDU_END_4_FIRST_MSDU_MASK, + .d_RX_MSDU_END_4_FIRST_MSDU_LSB = ADRASTEA_RX_MSDU_END_4_FIRST_MSDU_LSB, + .d_RX_MPDU_START_0_SEQ_NUM_MASK = ADRASTEA_RX_MPDU_START_0_SEQ_NUM_MASK, + .d_RX_MPDU_START_0_SEQ_NUM_LSB = ADRASTEA_RX_MPDU_START_0_SEQ_NUM_LSB, + .d_RX_MPDU_START_2_PN_47_32_LSB = ADRASTEA_RX_MPDU_START_2_PN_47_32_LSB, + .d_RX_MPDU_START_2_PN_47_32_MASK = + ADRASTEA_RX_MPDU_START_2_PN_47_32_MASK, + .d_RX_MSDU_END_1_EXT_WAPI_PN_63_48_MASK = + ADRASTEA_RX_MSDU_END_1_EXT_WAPI_PN_63_48_MASK, + .d_RX_MSDU_END_1_EXT_WAPI_PN_63_48_LSB = + ADRASTEA_RX_MSDU_END_1_EXT_WAPI_PN_63_48_LSB, + .d_RX_MSDU_END_4_LAST_MSDU_MASK = ADRASTEA_RX_MSDU_END_4_LAST_MSDU_MASK, + .d_RX_MSDU_END_4_LAST_MSDU_LSB = ADRASTEA_RX_MSDU_END_4_LAST_MSDU_LSB, + .d_RX_ATTENTION_0_MCAST_BCAST_MASK = + ADRASTEA_RX_ATTENTION_0_MCAST_BCAST_MASK, + .d_RX_ATTENTION_0_MCAST_BCAST_LSB = + ADRASTEA_RX_ATTENTION_0_MCAST_BCAST_LSB, + .d_RX_ATTENTION_0_FRAGMENT_MASK = ADRASTEA_RX_ATTENTION_0_FRAGMENT_MASK, + .d_RX_ATTENTION_0_FRAGMENT_LSB = ADRASTEA_RX_ATTENTION_0_FRAGMENT_LSB, + .d_RX_ATTENTION_0_MPDU_LENGTH_ERR_MASK = + ADRASTEA_RX_ATTENTION_0_MPDU_LENGTH_ERR_MASK, + .d_RX_FRAG_INFO_0_RING2_MORE_COUNT_MASK = + ADRASTEA_RX_FRAG_INFO_0_RING2_MORE_COUNT_MASK, + .d_RX_FRAG_INFO_0_RING2_MORE_COUNT_LSB = + ADRASTEA_RX_FRAG_INFO_0_RING2_MORE_COUNT_LSB, + .d_RX_MSDU_START_0_MSDU_LENGTH_MASK = + ADRASTEA_RX_MSDU_START_0_MSDU_LENGTH_MASK, + .d_RX_MSDU_START_0_MSDU_LENGTH_LSB = + ADRASTEA_RX_MSDU_START_0_MSDU_LENGTH_LSB, + .d_RX_MSDU_START_2_DECAP_FORMAT_OFFSET = + ADRASTEA_RX_MSDU_START_2_DECAP_FORMAT_OFFSET, + .d_RX_MSDU_START_2_DECAP_FORMAT_MASK = + ADRASTEA_RX_MSDU_START_2_DECAP_FORMAT_MASK, + .d_RX_MSDU_START_2_DECAP_FORMAT_LSB = + ADRASTEA_RX_MSDU_START_2_DECAP_FORMAT_LSB, + .d_RX_MPDU_START_0_ENCRYPTED_MASK = + ADRASTEA_RX_MPDU_START_0_ENCRYPTED_MASK, + .d_RX_MPDU_START_0_ENCRYPTED_LSB = + ADRASTEA_RX_MPDU_START_0_ENCRYPTED_LSB, + .d_RX_ATTENTION_0_MORE_DATA_MASK = + ADRASTEA_RX_ATTENTION_0_MORE_DATA_MASK, + .d_RX_ATTENTION_0_MSDU_DONE_MASK = + ADRASTEA_RX_ATTENTION_0_MSDU_DONE_MASK, + .d_RX_ATTENTION_0_TCP_UDP_CHKSUM_FAIL_MASK = + ADRASTEA_RX_ATTENTION_0_TCP_UDP_CHKSUM_FAIL_MASK, + + /* PLL start */ + .d_EFUSE_OFFSET = ADRASTEA_EFUSE_OFFSET, + .d_EFUSE_XTAL_SEL_MSB = ADRASTEA_EFUSE_XTAL_SEL_MSB, + .d_EFUSE_XTAL_SEL_LSB = ADRASTEA_EFUSE_XTAL_SEL_LSB, + .d_EFUSE_XTAL_SEL_MASK = ADRASTEA_EFUSE_XTAL_SEL_MASK, + .d_BB_PLL_CONFIG_OFFSET = ADRASTEA_BB_PLL_CONFIG_OFFSET, + .d_BB_PLL_CONFIG_OUTDIV_MSB = ADRASTEA_BB_PLL_CONFIG_OUTDIV_MSB, + .d_BB_PLL_CONFIG_OUTDIV_LSB = ADRASTEA_BB_PLL_CONFIG_OUTDIV_LSB, + .d_BB_PLL_CONFIG_OUTDIV_MASK = ADRASTEA_BB_PLL_CONFIG_OUTDIV_MASK, + .d_BB_PLL_CONFIG_FRAC_MSB = ADRASTEA_BB_PLL_CONFIG_FRAC_MSB, + .d_BB_PLL_CONFIG_FRAC_LSB = ADRASTEA_BB_PLL_CONFIG_FRAC_LSB, + .d_BB_PLL_CONFIG_FRAC_MASK = ADRASTEA_BB_PLL_CONFIG_FRAC_MASK, + .d_WLAN_PLL_SETTLE_TIME_MSB = ADRASTEA_WLAN_PLL_SETTLE_TIME_MSB, + .d_WLAN_PLL_SETTLE_TIME_LSB = ADRASTEA_WLAN_PLL_SETTLE_TIME_LSB, + .d_WLAN_PLL_SETTLE_TIME_MASK = ADRASTEA_WLAN_PLL_SETTLE_TIME_MASK, + .d_WLAN_PLL_SETTLE_OFFSET = ADRASTEA_WLAN_PLL_SETTLE_OFFSET, + .d_WLAN_PLL_SETTLE_SW_MASK = ADRASTEA_WLAN_PLL_SETTLE_SW_MASK, + .d_WLAN_PLL_SETTLE_RSTMASK = ADRASTEA_WLAN_PLL_SETTLE_RSTMASK, + .d_WLAN_PLL_SETTLE_RESET = ADRASTEA_WLAN_PLL_SETTLE_RESET, + .d_WLAN_PLL_CONTROL_NOPWD_MSB = ADRASTEA_WLAN_PLL_CONTROL_NOPWD_MSB, + .d_WLAN_PLL_CONTROL_NOPWD_LSB = ADRASTEA_WLAN_PLL_CONTROL_NOPWD_LSB, + .d_WLAN_PLL_CONTROL_NOPWD_MASK = ADRASTEA_WLAN_PLL_CONTROL_NOPWD_MASK, + .d_WLAN_PLL_CONTROL_BYPASS_MSB = ADRASTEA_WLAN_PLL_CONTROL_BYPASS_MSB, + .d_WLAN_PLL_CONTROL_BYPASS_LSB = ADRASTEA_WLAN_PLL_CONTROL_BYPASS_LSB, + .d_WLAN_PLL_CONTROL_BYPASS_MASK = ADRASTEA_WLAN_PLL_CONTROL_BYPASS_MASK, + .d_WLAN_PLL_CONTROL_BYPASS_RESET = + ADRASTEA_WLAN_PLL_CONTROL_BYPASS_RESET, + .d_WLAN_PLL_CONTROL_CLK_SEL_MSB = ADRASTEA_WLAN_PLL_CONTROL_CLK_SEL_MSB, + .d_WLAN_PLL_CONTROL_CLK_SEL_LSB = ADRASTEA_WLAN_PLL_CONTROL_CLK_SEL_LSB, + .d_WLAN_PLL_CONTROL_CLK_SEL_MASK = + ADRASTEA_WLAN_PLL_CONTROL_CLK_SEL_MASK, + .d_WLAN_PLL_CONTROL_CLK_SEL_RESET = + ADRASTEA_WLAN_PLL_CONTROL_CLK_SEL_RESET, + .d_WLAN_PLL_CONTROL_REFDIV_MSB = ADRASTEA_WLAN_PLL_CONTROL_REFDIV_MSB, + .d_WLAN_PLL_CONTROL_REFDIV_LSB = ADRASTEA_WLAN_PLL_CONTROL_REFDIV_LSB, + .d_WLAN_PLL_CONTROL_REFDIV_MASK = ADRASTEA_WLAN_PLL_CONTROL_REFDIV_MASK, + .d_WLAN_PLL_CONTROL_REFDIV_RESET = + ADRASTEA_WLAN_PLL_CONTROL_REFDIV_RESET, + .d_WLAN_PLL_CONTROL_DIV_MSB = ADRASTEA_WLAN_PLL_CONTROL_DIV_MSB, + .d_WLAN_PLL_CONTROL_DIV_LSB = ADRASTEA_WLAN_PLL_CONTROL_DIV_LSB, + .d_WLAN_PLL_CONTROL_DIV_MASK = ADRASTEA_WLAN_PLL_CONTROL_DIV_MASK, + .d_WLAN_PLL_CONTROL_DIV_RESET = ADRASTEA_WLAN_PLL_CONTROL_DIV_RESET, + .d_WLAN_PLL_CONTROL_OFFSET = ADRASTEA_WLAN_PLL_CONTROL_OFFSET, + .d_WLAN_PLL_CONTROL_SW_MASK = ADRASTEA_WLAN_PLL_CONTROL_SW_MASK, + .d_WLAN_PLL_CONTROL_RSTMASK = ADRASTEA_WLAN_PLL_CONTROL_RSTMASK, + .d_WLAN_PLL_CONTROL_RESET = ADRASTEA_WLAN_PLL_CONTROL_RESET, + .d_SOC_CORE_CLK_CTRL_OFFSET = ADRASTEA_SOC_CORE_CLK_CTRL_OFFSET, + .d_SOC_CORE_CLK_CTRL_DIV_MSB = ADRASTEA_SOC_CORE_CLK_CTRL_DIV_MSB, + .d_SOC_CORE_CLK_CTRL_DIV_LSB = ADRASTEA_SOC_CORE_CLK_CTRL_DIV_LSB, + .d_SOC_CORE_CLK_CTRL_DIV_MASK = ADRASTEA_SOC_CORE_CLK_CTRL_DIV_MASK, + .d_RTC_SYNC_STATUS_PLL_CHANGING_MSB = + ADRASTEA_RTC_SYNC_STATUS_PLL_CHANGING_MSB, + .d_RTC_SYNC_STATUS_PLL_CHANGING_LSB = + ADRASTEA_RTC_SYNC_STATUS_PLL_CHANGING_LSB, + .d_RTC_SYNC_STATUS_PLL_CHANGING_MASK = + ADRASTEA_RTC_SYNC_STATUS_PLL_CHANGING_MASK, + .d_RTC_SYNC_STATUS_PLL_CHANGING_RESET = + ADRASTEA_RTC_SYNC_STATUS_PLL_CHANGING_RESET, + .d_RTC_SYNC_STATUS_OFFSET = ADRASTEA_RTC_SYNC_STATUS_OFFSET, + .d_SOC_CPU_CLOCK_OFFSET = ADRASTEA_SOC_CPU_CLOCK_OFFSET, + .d_SOC_CPU_CLOCK_STANDARD_MSB = ADRASTEA_SOC_CPU_CLOCK_STANDARD_MSB, + .d_SOC_CPU_CLOCK_STANDARD_LSB = ADRASTEA_SOC_CPU_CLOCK_STANDARD_LSB, + .d_SOC_CPU_CLOCK_STANDARD_MASK = ADRASTEA_SOC_CPU_CLOCK_STANDARD_MASK, + /* PLL end */ + .d_SOC_POWER_REG_OFFSET = ADRASTEA_SOC_POWER_REG_OFFSET, + .d_PCIE_INTR_CAUSE_ADDRESS = ADRASTEA_HOST_CAUSE_REGISTER, + .d_SOC_RESET_CONTROL_ADDRESS = ADRASTEA_SOC_RESET_CONTROL_ADDRESS, + .d_SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_MASK = + ADRASTEA_SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_MASK, + .d_SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_LSB = + ADRASTEA_SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_LSB, + .d_SOC_RESET_CONTROL_CE_RST_MASK = + ADRASTEA_SOC_RESET_CONTROL_CE_RST_MASK, + .d_SOC_RESET_CONTROL_CPU_WARM_RST_MASK = + ADRASTEA_SOC_RESET_CONTROL_CPU_WARM_RST_MASK, + .d_CPU_INTR_ADDRESS = ADRASTEA_CPU_INTR_ADDRESS, + .d_SOC_LF_TIMER_CONTROL0_ADDRESS = + ADRASTEA_SOC_LF_TIMER_CONTROL0_ADDRESS, + .d_SOC_LF_TIMER_CONTROL0_ENABLE_MASK = + ADRASTEA_SOC_LF_TIMER_CONTROL0_ENABLE_MASK, + /* chip id start */ + .d_SOC_CHIP_ID_ADDRESS = ADRASTEA_SOC_CHIP_ID_ADDRESS, + .d_SOC_CHIP_ID_VERSION_MASK = ADRASTEA_SOC_CHIP_ID_VERSION_MASK, + .d_SOC_CHIP_ID_VERSION_LSB = ADRASTEA_SOC_CHIP_ID_VERSION_LSB, + .d_SOC_CHIP_ID_REVISION_MASK = ADRASTEA_SOC_CHIP_ID_REVISION_MASK, + .d_SOC_CHIP_ID_REVISION_LSB = ADRASTEA_SOC_CHIP_ID_REVISION_LSB, + /* chip id end */ + .d_A_SOC_CORE_SCRATCH_0_ADDRESS = ADRASTEA_A_SOC_CORE_SCRATCH_0_ADDRESS, + .d_A_SOC_CORE_SPARE_0_REGISTER = ADRASTEA_A_SOC_CORE_SPARE_0_REGISTER, + .d_PCIE_INTR_FIRMWARE_ROUTE_MASK = + ADRASTEA_PCIE_INTR_FIRMWARE_ROUTE_MASK, + .d_A_SOC_CORE_PCIE_INTR_CAUSE_GRP1 = + ADRASTEA_A_SOC_CORE_PCIE_INTR_CAUSE_GRP1, + .d_A_SOC_CORE_SPARE_1_REGISTER = + ADRASTEA_A_SOC_CORE_SPARE_1_REGISTER, + .d_A_SOC_CORE_PCIE_INTR_CLR_GRP1 = + ADRASTEA_A_SOC_CORE_PCIE_INTR_CLR_GRP1, + .d_A_SOC_CORE_PCIE_INTR_ENABLE_GRP1 = + ADRASTEA_A_SOC_CORE_PCIE_INTR_ENABLE_GRP1, + .d_A_SOC_PCIE_PCIE_SCRATCH_0 = ADRASTEA_A_SOC_PCIE_PCIE_SCRATCH_0, + .d_A_SOC_PCIE_PCIE_SCRATCH_1 = ADRASTEA_A_SOC_PCIE_PCIE_SCRATCH_1, + .d_A_WIFI_APB_1_A_WFSS_CE_TARGET_HOST_DELTA = + ADRASTEA_A_WIFI_APB_1_A_WFSS_CE_TARGET_HOST_DELTA, + .d_A_SOC_PCIE_PCIE_SCRATCH_2 = ADRASTEA_A_SOC_PCIE_PCIE_SCRATCH_2, + .d_A_SOC_CORE_PCIE_INTR_ENABLE_GRP0_Q6_MASK = + ADRASTEA_A_SOC_CORE_PCIE_INTR_ENABLE_GRP0_Q6_MASK, + .d_WLAN_DEBUG_INPUT_SEL_OFFSET = ADRASTEA_WLAN_DEBUG_INPUT_SEL_OFFSET, + .d_WLAN_DEBUG_INPUT_SEL_SRC_MSB = ADRASTEA_WLAN_DEBUG_INPUT_SEL_SRC_MSB, + .d_WLAN_DEBUG_INPUT_SEL_SRC_LSB = ADRASTEA_WLAN_DEBUG_INPUT_SEL_SRC_LSB, + .d_WLAN_DEBUG_INPUT_SEL_SRC_MASK = + ADRASTEA_WLAN_DEBUG_INPUT_SEL_SRC_MASK, + .d_WLAN_DEBUG_CONTROL_OFFSET = ADRASTEA_WLAN_DEBUG_CONTROL_OFFSET, + .d_WLAN_DEBUG_CONTROL_ENABLE_MSB = + ADRASTEA_WLAN_DEBUG_CONTROL_ENABLE_MSB, + .d_WLAN_DEBUG_CONTROL_ENABLE_LSB = + ADRASTEA_WLAN_DEBUG_CONTROL_ENABLE_LSB, + .d_WLAN_DEBUG_CONTROL_ENABLE_MASK = + ADRASTEA_WLAN_DEBUG_CONTROL_ENABLE_MASK, + .d_WLAN_DEBUG_OUT_OFFSET = ADRASTEA_WLAN_DEBUG_OUT_OFFSET, + .d_WLAN_DEBUG_OUT_DATA_MSB = ADRASTEA_WLAN_DEBUG_OUT_DATA_MSB, + .d_WLAN_DEBUG_OUT_DATA_LSB = ADRASTEA_WLAN_DEBUG_OUT_DATA_LSB, + .d_WLAN_DEBUG_OUT_DATA_MASK = ADRASTEA_WLAN_DEBUG_OUT_DATA_MASK, + .d_AMBA_DEBUG_BUS_OFFSET = ADRASTEA_AMBA_DEBUG_BUS_OFFSET, + .d_AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MSB = + ADRASTEA_AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MSB, + .d_AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_LSB = + ADRASTEA_AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_LSB, + .d_AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MASK = + ADRASTEA_AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MASK, + .d_AMBA_DEBUG_BUS_SEL_MSB = ADRASTEA_AMBA_DEBUG_BUS_SEL_MSB, + .d_AMBA_DEBUG_BUS_SEL_LSB = ADRASTEA_AMBA_DEBUG_BUS_SEL_LSB, + .d_AMBA_DEBUG_BUS_SEL_MASK = ADRASTEA_AMBA_DEBUG_BUS_SEL_MASK, + +#ifdef QCA_WIFI_3_0_ADRASTEA + .d_Q6_ENABLE_REGISTER_0 = ADRASTEA_Q6_ENABLE_REGISTER_0, + .d_Q6_ENABLE_REGISTER_1 = ADRASTEA_Q6_ENABLE_REGISTER_1, + .d_Q6_CAUSE_REGISTER_0 = ADRASTEA_Q6_CAUSE_REGISTER_0, + .d_Q6_CAUSE_REGISTER_1 = ADRASTEA_Q6_CAUSE_REGISTER_1, + .d_Q6_CLEAR_REGISTER_0 = ADRASTEA_Q6_CLEAR_REGISTER_0, + .d_Q6_CLEAR_REGISTER_1 = ADRASTEA_Q6_CLEAR_REGISTER_1, +#endif + +#ifdef CONFIG_BYPASS_QMI + .d_BYPASS_QMI_TEMP_REGISTER = ADRASTEA_BYPASS_QMI_TEMP_REGISTER, +#endif +}; + +struct hostdef_s adrastea_hostdef = { + .d_INT_STATUS_ENABLE_ERROR_LSB = ADRASTEA_INT_STATUS_ENABLE_ERROR_LSB, + .d_INT_STATUS_ENABLE_ERROR_MASK = ADRASTEA_INT_STATUS_ENABLE_ERROR_MASK, + .d_INT_STATUS_ENABLE_CPU_LSB = ADRASTEA_INT_STATUS_ENABLE_CPU_LSB, + .d_INT_STATUS_ENABLE_CPU_MASK = ADRASTEA_INT_STATUS_ENABLE_CPU_MASK, + .d_INT_STATUS_ENABLE_COUNTER_LSB = + ADRASTEA_INT_STATUS_ENABLE_COUNTER_LSB, + .d_INT_STATUS_ENABLE_COUNTER_MASK = + ADRASTEA_INT_STATUS_ENABLE_COUNTER_MASK, + .d_INT_STATUS_ENABLE_MBOX_DATA_LSB = + ADRASTEA_INT_STATUS_ENABLE_MBOX_DATA_LSB, + .d_INT_STATUS_ENABLE_MBOX_DATA_MASK = + ADRASTEA_INT_STATUS_ENABLE_MBOX_DATA_MASK, + .d_ERROR_STATUS_ENABLE_RX_UNDERFLOW_LSB = + ADRASTEA_ERROR_STATUS_ENABLE_RX_UNDERFLOW_LSB, + .d_ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK = + ADRASTEA_ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK, + .d_ERROR_STATUS_ENABLE_TX_OVERFLOW_LSB = + ADRASTEA_ERROR_STATUS_ENABLE_TX_OVERFLOW_LSB, + .d_ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK = + ADRASTEA_ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK, + .d_COUNTER_INT_STATUS_ENABLE_BIT_LSB = + ADRASTEA_COUNTER_INT_STATUS_ENABLE_BIT_LSB, + .d_COUNTER_INT_STATUS_ENABLE_BIT_MASK = + ADRASTEA_COUNTER_INT_STATUS_ENABLE_BIT_MASK, + .d_INT_STATUS_ENABLE_ADDRESS = ADRASTEA_INT_STATUS_ENABLE_ADDRESS, + .d_CPU_INT_STATUS_ENABLE_BIT_LSB = + ADRASTEA_CPU_INT_STATUS_ENABLE_BIT_LSB, + .d_CPU_INT_STATUS_ENABLE_BIT_MASK = + ADRASTEA_CPU_INT_STATUS_ENABLE_BIT_MASK, + .d_HOST_INT_STATUS_ADDRESS = ADRASTEA_HOST_INT_STATUS_ADDRESS, + .d_CPU_INT_STATUS_ADDRESS = ADRASTEA_CPU_INT_STATUS_ADDRESS, + .d_ERROR_INT_STATUS_ADDRESS = ADRASTEA_ERROR_INT_STATUS_ADDRESS, + .d_ERROR_INT_STATUS_WAKEUP_MASK = ADRASTEA_ERROR_INT_STATUS_WAKEUP_MASK, + .d_ERROR_INT_STATUS_WAKEUP_LSB = ADRASTEA_ERROR_INT_STATUS_WAKEUP_LSB, + .d_ERROR_INT_STATUS_RX_UNDERFLOW_MASK = + ADRASTEA_ERROR_INT_STATUS_RX_UNDERFLOW_MASK, + .d_ERROR_INT_STATUS_RX_UNDERFLOW_LSB = + ADRASTEA_ERROR_INT_STATUS_RX_UNDERFLOW_LSB, + .d_ERROR_INT_STATUS_TX_OVERFLOW_MASK = + ADRASTEA_ERROR_INT_STATUS_TX_OVERFLOW_MASK, + .d_ERROR_INT_STATUS_TX_OVERFLOW_LSB = + ADRASTEA_ERROR_INT_STATUS_TX_OVERFLOW_LSB, + .d_COUNT_DEC_ADDRESS = ADRASTEA_COUNT_DEC_ADDRESS, + .d_HOST_INT_STATUS_CPU_MASK = ADRASTEA_HOST_INT_STATUS_CPU_MASK, + .d_HOST_INT_STATUS_CPU_LSB = ADRASTEA_HOST_INT_STATUS_CPU_LSB, + .d_HOST_INT_STATUS_ERROR_MASK = ADRASTEA_HOST_INT_STATUS_ERROR_MASK, + .d_HOST_INT_STATUS_ERROR_LSB = ADRASTEA_HOST_INT_STATUS_ERROR_LSB, + .d_HOST_INT_STATUS_COUNTER_MASK = ADRASTEA_HOST_INT_STATUS_COUNTER_MASK, + .d_HOST_INT_STATUS_COUNTER_LSB = ADRASTEA_HOST_INT_STATUS_COUNTER_LSB, + .d_RX_LOOKAHEAD_VALID_ADDRESS = ADRASTEA_RX_LOOKAHEAD_VALID_ADDRESS, + .d_WINDOW_DATA_ADDRESS = ADRASTEA_WINDOW_DATA_ADDRESS, + .d_WINDOW_READ_ADDR_ADDRESS = ADRASTEA_WINDOW_READ_ADDR_ADDRESS, + .d_WINDOW_WRITE_ADDR_ADDRESS = ADRASTEA_WINDOW_WRITE_ADDR_ADDRESS, + .d_SOC_GLOBAL_RESET_ADDRESS = ADRASTEA_SOC_GLOBAL_RESET_ADDRESS, + .d_RTC_STATE_ADDRESS = ADRASTEA_RTC_STATE_ADDRESS, + .d_RTC_STATE_COLD_RESET_MASK = ADRASTEA_RTC_STATE_COLD_RESET_MASK, + .d_PCIE_LOCAL_BASE_ADDRESS = ADRASTEA_PCIE_LOCAL_BASE_ADDRESS, + .d_PCIE_SOC_WAKE_RESET = ADRASTEA_PCIE_SOC_WAKE_RESET, + .d_PCIE_SOC_WAKE_ADDRESS = ADRASTEA_PCIE_SOC_WAKE_ADDRESS, + .d_PCIE_SOC_WAKE_V_MASK = ADRASTEA_PCIE_SOC_WAKE_V_MASK, + .d_RTC_STATE_V_MASK = ADRASTEA_RTC_STATE_V_MASK, + .d_RTC_STATE_V_LSB = ADRASTEA_RTC_STATE_V_LSB, + .d_FW_IND_EVENT_PENDING = ADRASTEA_FW_IND_EVENT_PENDING, + .d_FW_IND_INITIALIZED = ADRASTEA_FW_IND_INITIALIZED, + .d_FW_IND_HELPER = ADRASTEA_FW_IND_HELPER, + .d_RTC_STATE_V_ON = ADRASTEA_RTC_STATE_V_ON, +#if defined(SDIO_3_0) + .d_HOST_INT_STATUS_MBOX_DATA_MASK = + ADRASTEA_HOST_INT_STATUS_MBOX_DATA_MASK, + .d_HOST_INT_STATUS_MBOX_DATA_LSB = + ADRASTEA_HOST_INT_STATUS_MBOX_DATA_LSB, +#endif + .d_PCIE_SOC_RDY_STATUS_ADDRESS = PCIE_SOC_RDY_STATUS_ADDRESS, + .d_PCIE_SOC_RDY_STATUS_BAR_MASK = PCIE_SOC_RDY_STATUS_BAR_MASK, + .d_SOC_PCIE_BASE_ADDRESS = SOC_PCIE_BASE_ADDRESS, + .d_MSI_MAGIC_ADR_ADDRESS = MSI_MAGIC_ADR_ADDRESS, + .d_MSI_MAGIC_ADDRESS = MSI_MAGIC_ADDRESS, + .d_HOST_CE_COUNT = ADRASTEA_CE_COUNT, + .d_ENABLE_MSI = 0, + .d_MUX_ID_MASK = 0xf000, + .d_TRANSACTION_ID_MASK = 0x0fff, + .d_DESC_DATA_FLAG_MASK = 0x1FFFE3E0, + .d_A_SOC_PCIE_PCIE_BAR0_START = ADRASTEA_A_SOC_PCIE_PCIE_BAR0_START, +}; + + +struct ce_reg_def adrastea_ce_targetdef = { + /* copy_engine.c */ + .d_DST_WR_INDEX_ADDRESS = ADRASTEA_DST_WR_INDEX_OFFSET, + .d_SRC_WATERMARK_ADDRESS = ADRASTEA_SRC_WATERMARK_OFFSET, + .d_SRC_WATERMARK_LOW_MASK = ADRASTEA_SRC_WATERMARK_LOW_MASK, + .d_SRC_WATERMARK_HIGH_MASK = ADRASTEA_SRC_WATERMARK_HIGH_MASK, + .d_DST_WATERMARK_LOW_MASK = ADRASTEA_DST_WATERMARK_LOW_MASK, + .d_DST_WATERMARK_HIGH_MASK = ADRASTEA_DST_WATERMARK_HIGH_MASK, + .d_CURRENT_SRRI_ADDRESS = ADRASTEA_CURRENT_SRRI_OFFSET, + .d_CURRENT_DRRI_ADDRESS = ADRASTEA_CURRENT_DRRI_OFFSET, + .d_HOST_IS_SRC_RING_HIGH_WATERMARK_MASK = + ADRASTEA_HOST_IS_SRC_RING_HIGH_WATERMARK_MASK, + .d_HOST_IS_SRC_RING_LOW_WATERMARK_MASK = + ADRASTEA_HOST_IS_SRC_RING_LOW_WATERMARK_MASK, + .d_HOST_IS_DST_RING_HIGH_WATERMARK_MASK = + ADRASTEA_HOST_IS_DST_RING_HIGH_WATERMARK_MASK, + .d_HOST_IS_DST_RING_LOW_WATERMARK_MASK = + ADRASTEA_HOST_IS_DST_RING_LOW_WATERMARK_MASK, + .d_HOST_IS_ADDRESS = ADRASTEA_HOST_IS_OFFSET, + .d_MISC_IS_ADDRESS = ADRASTEA_MISC_IS_OFFSET, + .d_HOST_IS_COPY_COMPLETE_MASK = ADRASTEA_HOST_IS_COPY_COMPLETE_MASK, + .d_CE_WRAPPER_BASE_ADDRESS = ADRASTEA_CE_WRAPPER_BASE_ADDRESS, + .d_CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS = + ADRASTEA_CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS_OFFSET, + .d_CE_DDR_ADDRESS_FOR_RRI_LOW = + ADRASTEA_CE_DDR_ADDRESS_FOR_RRI_LOW, + .d_CE_DDR_ADDRESS_FOR_RRI_HIGH = + ADRASTEA_CE_DDR_ADDRESS_FOR_RRI_HIGH, + .d_HOST_IE_ADDRESS = ADRASTEA_HOST_IE_OFFSET, + .d_HOST_IE_COPY_COMPLETE_MASK = ADRASTEA_HOST_IE_COPY_COMPLETE_MASK, + .d_SR_BA_ADDRESS = ADRASTEA_SR_BA_OFFSET, + .d_SR_BA_ADDRESS_HIGH = ADRASTEA_SR_BA_HIGH_OFFSET, + .d_SR_SIZE_ADDRESS = ADRASTEA_SR_SIZE_OFFSET, + .d_CE_CTRL1_ADDRESS = ADRASTEA_CE_CTRL1_OFFSET, + .d_CE_CTRL1_DMAX_LENGTH_MASK = ADRASTEA_CE_CTRL1_DMAX_LENGTH_MASK, + .d_DR_BA_ADDRESS = ADRASTEA_DR_BA_OFFSET, + .d_DR_BA_ADDRESS_HIGH = ADRASTEA_DR_BA_HIGH_OFFSET, + .d_DR_SIZE_ADDRESS = ADRASTEA_DR_SIZE_OFFSET, + .d_CE_CMD_REGISTER = ADRASTEA_CE_CMD_REGISTER_OFFSET, + .d_CE_MSI_ADDRESS = MISSING_FOR_ADRASTEA, + .d_CE_MSI_ADDRESS_HIGH = MISSING_FOR_ADRASTEA, + .d_CE_MSI_DATA = MISSING_FOR_ADRASTEA, + .d_CE_MSI_ENABLE_BIT = MISSING_FOR_ADRASTEA, + .d_MISC_IE_ADDRESS = ADRASTEA_MISC_IE_OFFSET, + .d_MISC_IS_AXI_ERR_MASK = ADRASTEA_MISC_IS_AXI_ERR_MASK, + .d_MISC_IS_DST_ADDR_ERR_MASK = ADRASTEA_MISC_IS_DST_ADDR_ERR_MASK, + .d_MISC_IS_SRC_LEN_ERR_MASK = ADRASTEA_MISC_IS_SRC_LEN_ERR_MASK, + .d_MISC_IS_DST_MAX_LEN_VIO_MASK = ADRASTEA_MISC_IS_DST_MAX_LEN_VIO_MASK, + .d_MISC_IS_DST_RING_OVERFLOW_MASK = + ADRASTEA_MISC_IS_DST_RING_OVERFLOW_MASK, + .d_MISC_IS_SRC_RING_OVERFLOW_MASK = + ADRASTEA_MISC_IS_SRC_RING_OVERFLOW_MASK, + .d_SRC_WATERMARK_LOW_LSB = ADRASTEA_SRC_WATERMARK_LOW_LSB, + .d_SRC_WATERMARK_HIGH_LSB = ADRASTEA_SRC_WATERMARK_HIGH_LSB, + .d_DST_WATERMARK_LOW_LSB = ADRASTEA_DST_WATERMARK_LOW_LSB, + .d_DST_WATERMARK_HIGH_LSB = ADRASTEA_DST_WATERMARK_HIGH_LSB, + .d_CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK = + ADRASTEA_CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK, + .d_CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB = + ADRASTEA_CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB, + .d_CE_CTRL1_DMAX_LENGTH_LSB = ADRASTEA_CE_CTRL1_DMAX_LENGTH_LSB, + .d_CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK = + ADRASTEA_CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK, + .d_CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK = + ADRASTEA_CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK, + .d_CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB = + ADRASTEA_CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB, + .d_CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB = + ADRASTEA_CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB, + .d_CE_CTRL1_IDX_UPD_EN_MASK = + ADRASTEA_A_WCSS_HM_A_WIFI_APB_1_A_WFSS_CE0_CE_CTRL1__IDX_UPD_EN___M, + .d_CE_WRAPPER_DEBUG_OFFSET = ADRASTEA_CE_WRAPPER_DEBUG_OFFSET, + .d_CE_WRAPPER_DEBUG_SEL_MSB = ADRASTEA_CE_WRAPPER_DEBUG_SEL_MSB, + .d_CE_WRAPPER_DEBUG_SEL_LSB = ADRASTEA_CE_WRAPPER_DEBUG_SEL_LSB, + .d_CE_WRAPPER_DEBUG_SEL_MASK = ADRASTEA_CE_WRAPPER_DEBUG_SEL_MASK, + .d_CE_DEBUG_OFFSET = ADRASTEA_CE_DEBUG_OFFSET, + .d_CE_DEBUG_SEL_MSB = ADRASTEA_CE_DEBUG_SEL_MSB, + .d_CE_DEBUG_SEL_LSB = ADRASTEA_CE_DEBUG_SEL_LSB, + .d_CE_DEBUG_SEL_MASK = ADRASTEA_CE_DEBUG_SEL_MASK, + .d_CE0_BASE_ADDRESS = ADRASTEA_CE0_BASE_ADDRESS, + .d_CE1_BASE_ADDRESS = ADRASTEA_CE1_BASE_ADDRESS, + .d_A_WIFI_APB_3_A_WCMN_APPS_CE_INTR_ENABLES = + MISSING_FOR_ADRASTEA, + .d_A_WIFI_APB_3_A_WCMN_APPS_CE_INTR_STATUS = + MISSING_FOR_ADRASTEA, +}; + + +struct host_shadow_regs_s adrastea_host_shadow_regs = { + .d_A_LOCAL_SHADOW_REG_VALUE_0 = + ADRASTEA_A_LOCAL_SHADOW_REG_VALUE_0, + .d_A_LOCAL_SHADOW_REG_VALUE_1 = + ADRASTEA_A_LOCAL_SHADOW_REG_VALUE_1, + .d_A_LOCAL_SHADOW_REG_VALUE_2 = + ADRASTEA_A_LOCAL_SHADOW_REG_VALUE_2, + .d_A_LOCAL_SHADOW_REG_VALUE_3 = + ADRASTEA_A_LOCAL_SHADOW_REG_VALUE_3, + .d_A_LOCAL_SHADOW_REG_VALUE_4 = + ADRASTEA_A_LOCAL_SHADOW_REG_VALUE_4, + .d_A_LOCAL_SHADOW_REG_VALUE_5 = + ADRASTEA_A_LOCAL_SHADOW_REG_VALUE_5, + .d_A_LOCAL_SHADOW_REG_VALUE_6 = + ADRASTEA_A_LOCAL_SHADOW_REG_VALUE_6, + .d_A_LOCAL_SHADOW_REG_VALUE_7 = + ADRASTEA_A_LOCAL_SHADOW_REG_VALUE_7, + .d_A_LOCAL_SHADOW_REG_VALUE_8 = + ADRASTEA_A_LOCAL_SHADOW_REG_VALUE_8, + .d_A_LOCAL_SHADOW_REG_VALUE_9 = + ADRASTEA_A_LOCAL_SHADOW_REG_VALUE_9, + .d_A_LOCAL_SHADOW_REG_VALUE_10 = + ADRASTEA_A_LOCAL_SHADOW_REG_VALUE_10, + .d_A_LOCAL_SHADOW_REG_VALUE_11 = + ADRASTEA_A_LOCAL_SHADOW_REG_VALUE_11, + .d_A_LOCAL_SHADOW_REG_VALUE_12 = + ADRASTEA_A_LOCAL_SHADOW_REG_VALUE_12, + .d_A_LOCAL_SHADOW_REG_VALUE_13 = + ADRASTEA_A_LOCAL_SHADOW_REG_VALUE_13, + .d_A_LOCAL_SHADOW_REG_VALUE_14 = + ADRASTEA_A_LOCAL_SHADOW_REG_VALUE_14, + .d_A_LOCAL_SHADOW_REG_VALUE_15 = + ADRASTEA_A_LOCAL_SHADOW_REG_VALUE_15, + .d_A_LOCAL_SHADOW_REG_VALUE_16 = + ADRASTEA_A_LOCAL_SHADOW_REG_VALUE_16, + .d_A_LOCAL_SHADOW_REG_VALUE_17 = + ADRASTEA_A_LOCAL_SHADOW_REG_VALUE_17, + .d_A_LOCAL_SHADOW_REG_VALUE_18 = + ADRASTEA_A_LOCAL_SHADOW_REG_VALUE_18, + .d_A_LOCAL_SHADOW_REG_VALUE_19 = + ADRASTEA_A_LOCAL_SHADOW_REG_VALUE_19, + .d_A_LOCAL_SHADOW_REG_VALUE_20 = + ADRASTEA_A_LOCAL_SHADOW_REG_VALUE_20, + .d_A_LOCAL_SHADOW_REG_VALUE_21 = + ADRASTEA_A_LOCAL_SHADOW_REG_VALUE_21, + .d_A_LOCAL_SHADOW_REG_VALUE_22 = + ADRASTEA_A_LOCAL_SHADOW_REG_VALUE_22, + .d_A_LOCAL_SHADOW_REG_VALUE_23 = + ADRASTEA_A_LOCAL_SHADOW_REG_VALUE_23, + .d_A_LOCAL_SHADOW_REG_ADDRESS_0 = + ADRASTEA_A_LOCAL_SHADOW_REG_ADDRESS_0, + .d_A_LOCAL_SHADOW_REG_ADDRESS_1 = + ADRASTEA_A_LOCAL_SHADOW_REG_ADDRESS_1, + .d_A_LOCAL_SHADOW_REG_ADDRESS_2 = + ADRASTEA_A_LOCAL_SHADOW_REG_ADDRESS_2, + .d_A_LOCAL_SHADOW_REG_ADDRESS_3 = + ADRASTEA_A_LOCAL_SHADOW_REG_ADDRESS_3, + .d_A_LOCAL_SHADOW_REG_ADDRESS_4 = + ADRASTEA_A_LOCAL_SHADOW_REG_ADDRESS_4, + .d_A_LOCAL_SHADOW_REG_ADDRESS_5 = + ADRASTEA_A_LOCAL_SHADOW_REG_ADDRESS_5, + .d_A_LOCAL_SHADOW_REG_ADDRESS_6 = + ADRASTEA_A_LOCAL_SHADOW_REG_ADDRESS_6, + .d_A_LOCAL_SHADOW_REG_ADDRESS_7 = + ADRASTEA_A_LOCAL_SHADOW_REG_ADDRESS_7, + .d_A_LOCAL_SHADOW_REG_ADDRESS_8 = + ADRASTEA_A_LOCAL_SHADOW_REG_ADDRESS_8, + .d_A_LOCAL_SHADOW_REG_ADDRESS_9 = + ADRASTEA_A_LOCAL_SHADOW_REG_ADDRESS_9, + .d_A_LOCAL_SHADOW_REG_ADDRESS_10 = + ADRASTEA_A_LOCAL_SHADOW_REG_ADDRESS_10, + .d_A_LOCAL_SHADOW_REG_ADDRESS_11 = + ADRASTEA_A_LOCAL_SHADOW_REG_ADDRESS_11, + .d_A_LOCAL_SHADOW_REG_ADDRESS_12 = + ADRASTEA_A_LOCAL_SHADOW_REG_ADDRESS_12, + .d_A_LOCAL_SHADOW_REG_ADDRESS_13 = + ADRASTEA_A_LOCAL_SHADOW_REG_ADDRESS_13, + .d_A_LOCAL_SHADOW_REG_ADDRESS_14 = + ADRASTEA_A_LOCAL_SHADOW_REG_ADDRESS_14, + .d_A_LOCAL_SHADOW_REG_ADDRESS_15 = + ADRASTEA_A_LOCAL_SHADOW_REG_ADDRESS_15, + .d_A_LOCAL_SHADOW_REG_ADDRESS_16 = + ADRASTEA_A_LOCAL_SHADOW_REG_ADDRESS_16, + .d_A_LOCAL_SHADOW_REG_ADDRESS_17 = + ADRASTEA_A_LOCAL_SHADOW_REG_ADDRESS_17, + .d_A_LOCAL_SHADOW_REG_ADDRESS_18 = + ADRASTEA_A_LOCAL_SHADOW_REG_ADDRESS_18, + .d_A_LOCAL_SHADOW_REG_ADDRESS_19 = + ADRASTEA_A_LOCAL_SHADOW_REG_ADDRESS_19, + .d_A_LOCAL_SHADOW_REG_ADDRESS_20 = + ADRASTEA_A_LOCAL_SHADOW_REG_ADDRESS_20, + .d_A_LOCAL_SHADOW_REG_ADDRESS_21 = + ADRASTEA_A_LOCAL_SHADOW_REG_ADDRESS_21, + .d_A_LOCAL_SHADOW_REG_ADDRESS_22 = + ADRASTEA_A_LOCAL_SHADOW_REG_ADDRESS_22, + .d_A_LOCAL_SHADOW_REG_ADDRESS_23 = + ADRASTEA_A_LOCAL_SHADOW_REG_ADDRESS_23 +}; + +#endif /* ADRASTEA_REG_DEF_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/ar6004def.c b/drivers/staging/qca-wifi-host-cmn/hif/src/ar6004def.c new file mode 100644 index 0000000000000000000000000000000000000000..88bdcc5df2320dfbbe3a0574e4b27a1d4a5eb646 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/ar6004def.c @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2013,2016 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#if defined(AR6004_HEADERS_DEF) +#define AR6004 1 + +#define WLAN_HEADERS 1 +#include "common_drv.h" +#include "AR6004/hw/apb_map.h" +#include "AR6004/hw/gpio_reg.h" +#include "AR6004/hw/rtc_reg.h" +#include "AR6004/hw/si_reg.h" +#include "AR6004/hw/mbox_reg.h" +#include "AR6004/hw/mbox_wlan_host_reg.h" + +#define SYSTEM_SLEEP_OFFSET SOC_SYSTEM_SLEEP_OFFSET +#define SCRATCH_BASE_ADDRESS MBOX_BASE_ADDRESS + +#define MY_TARGET_DEF AR6004_TARGETdef +#define MY_HOST_DEF AR6004_HOSTdef +#define MY_CEREG_DEF AR6004_CE_TARGETdef +#define MY_TARGET_BOARD_DATA_SZ AR6004_BOARD_DATA_SZ +#define MY_TARGET_BOARD_EXT_DATA_SZ AR6004_BOARD_EXT_DATA_SZ +#include "targetdef.h" +#include "hostdef.h" +#else +#include "common_drv.h" +#include "targetdef.h" +#include "hostdef.h" +struct targetdef_s *AR6004_TARGETdef; +struct hostdef_s *AR6004_HOSTdef; +#endif /*AR6004_HEADERS_DEF */ diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/ar6320def.c b/drivers/staging/qca-wifi-host-cmn/hif/src/ar6320def.c new file mode 100644 index 0000000000000000000000000000000000000000..5440ecdecd9a1462abfcfaa892d23e3ef1e919b3 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/ar6320def.c @@ -0,0 +1,73 @@ +/* + * Copyright (c) 2013,2016 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#if defined(AR6320_HEADERS_DEF) +#define AR6320 1 + +#define WLAN_HEADERS 1 +#include "common_drv.h" +#include "AR6320/hw/apb_map.h" +#include "AR6320/hw/gpio_reg.h" +#include "AR6320/hw/rtc_reg.h" +#include "AR6320/extra/hw/si_reg.h" +#include "AR6320/hw/mbox_reg.h" +#include "AR6320/extra/hw/ce_reg_csr.h" +#include "AR6320/hw/mbox_wlan_host_reg.h" +#include "soc_addrs.h" +#include "AR6320/extra/hw/soc_core_reg.h" +#include "AR6320/hw/pcie_local_reg.h" +#include "AR6320/hw/soc_pcie_reg.h" + +#ifndef SYSTEM_SLEEP_OFFSET +#define SYSTEM_SLEEP_OFFSET SOC_SYSTEM_SLEEP_OFFSET +#endif +#ifndef WLAN_SYSTEM_SLEEP_OFFSET +#define WLAN_SYSTEM_SLEEP_OFFSET SOC_SYSTEM_SLEEP_OFFSET +#endif +#ifndef WLAN_RESET_CONTROL_OFFSET +#define WLAN_RESET_CONTROL_OFFSET SOC_RESET_CONTROL_OFFSET +#endif +#ifndef RESET_CONTROL_SI0_RST_MASK +#define RESET_CONTROL_SI0_RST_MASK SOC_RESET_CONTROL_SI0_RST_MASK +#endif +#ifndef SI_BASE_ADDRESS +#define SI_BASE_ADDRESS WLAN_SI_BASE_ADDRESS +#endif +#ifndef PCIE_LOCAL_BASE_ADDRESS +/* TBDXXX: Eventually, this Base Address will be defined in HW header files */ +#define PCIE_LOCAL_BASE_ADDRESS 0x80000 +#endif +#ifndef RTC_STATE_V_ON +#define RTC_STATE_V_ON 3 +#endif + +#define MY_TARGET_DEF AR6320_TARGETdef +#define MY_HOST_DEF AR6320_HOSTdef +#define MY_CEREG_DEF AR6320_CE_TARGETdef +#define MY_TARGET_BOARD_DATA_SZ AR6320_BOARD_DATA_SZ +#define MY_TARGET_BOARD_EXT_DATA_SZ AR6320_BOARD_EXT_DATA_SZ +#define DRAM_BASE_ADDRESS TARG_DRAM_START +#include "targetdef.h" +#include "hostdef.h" +#else +#include "common_drv.h" +#include "targetdef.h" +#include "hostdef.h" +struct targetdef_s *AR6320_TARGETdef; +struct hostdef_s *AR6320_HOSTdef; +#endif /* AR6320_HEADERS_DEF */ diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/ar6320def.h b/drivers/staging/qca-wifi-host-cmn/hif/src/ar6320def.h new file mode 100644 index 0000000000000000000000000000000000000000..77ab1c52722575a7e741e5a0f026c725d3e384cc --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/ar6320def.h @@ -0,0 +1,796 @@ +/* + * Copyright (c) 2011-2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _AR6320DEF_H_ +#define _AR6320DEF_H_ + +/* Base Addresses */ +#define AR6320_RTC_SOC_BASE_ADDRESS 0x00000000 +#define AR6320_RTC_WMAC_BASE_ADDRESS 0x00001000 +#define AR6320_MAC_COEX_BASE_ADDRESS 0x0000f000 +#define AR6320_BT_COEX_BASE_ADDRESS 0x00002000 +#define AR6320_SOC_CORE_BASE_ADDRESS 0x0003a000 +#define AR6320_WLAN_UART_BASE_ADDRESS 0x0000c000 +#define AR6320_WLAN_SI_BASE_ADDRESS 0x00010000 +#define AR6320_WLAN_GPIO_BASE_ADDRESS 0x00005000 +#define AR6320_WLAN_ANALOG_INTF_BASE_ADDRESS 0x00006000 +#define AR6320_WLAN_MAC_BASE_ADDRESS 0x00010000 +#define AR6320_EFUSE_BASE_ADDRESS 0x00024000 +#define AR6320_FPGA_REG_BASE_ADDRESS 0x00039000 +#define AR6320_WLAN_UART2_BASE_ADDRESS 0x00054c00 +#define AR6320_DBI_BASE_ADDRESS 0x0003c000 + +#define AR6320_SCRATCH_3_ADDRESS 0x0028 +#define AR6320_TARG_DRAM_START 0x00400000 +#define AR6320_SOC_SYSTEM_SLEEP_OFFSET 0x000000c0 +#define AR6320_SOC_RESET_CONTROL_OFFSET 0x00000000 +#define AR6320_SOC_CLOCK_CONTROL_OFFSET 0x00000028 +#define AR6320_SOC_CLOCK_CONTROL_SI0_CLK_MASK 0x00000001 +#define AR6320_SOC_RESET_CONTROL_SI0_RST_MASK 0x00000000 +#define AR6320_WLAN_GPIO_PIN0_ADDRESS 0x00000068 +#define AR6320_WLAN_GPIO_PIN1_ADDRESS 0x0000006c +#define AR6320_WLAN_GPIO_PIN0_CONFIG_MASK 0x00007800 +#define AR6320_WLAN_GPIO_PIN1_CONFIG_MASK 0x00007800 +#define AR6320_SOC_CPU_CLOCK_OFFSET 0x00000020 +#define AR6320_SOC_LPO_CAL_OFFSET 0x000000e0 +#define AR6320_WLAN_GPIO_PIN10_ADDRESS 0x00000090 +#define AR6320_WLAN_GPIO_PIN11_ADDRESS 0x00000094 +#define AR6320_WLAN_GPIO_PIN12_ADDRESS 0x00000098 +#define AR6320_WLAN_GPIO_PIN13_ADDRESS 0x0000009c +#define AR6320_SOC_CPU_CLOCK_STANDARD_LSB 0 +#define AR6320_SOC_CPU_CLOCK_STANDARD_MASK 0x00000003 +#define AR6320_SOC_LPO_CAL_ENABLE_LSB 20 +#define AR6320_SOC_LPO_CAL_ENABLE_MASK 0x00100000 + +#define AR6320_WLAN_SYSTEM_SLEEP_DISABLE_LSB 0 +#define AR6320_WLAN_SYSTEM_SLEEP_DISABLE_MASK 0x00000001 +#define AR6320_WLAN_RESET_CONTROL_COLD_RST_MASK 0x00000008 +#define AR6320_WLAN_RESET_CONTROL_WARM_RST_MASK 0x00000004 +#define AR6320_SI_CONFIG_BIDIR_OD_DATA_LSB 18 +#define AR6320_SI_CONFIG_BIDIR_OD_DATA_MASK 0x00040000 +#define AR6320_SI_CONFIG_I2C_LSB 16 +#define AR6320_SI_CONFIG_I2C_MASK 0x00010000 +#define AR6320_SI_CONFIG_POS_SAMPLE_LSB 7 +#define AR6320_SI_CONFIG_POS_SAMPLE_MASK 0x00000080 +#define AR6320_SI_CONFIG_INACTIVE_CLK_LSB 4 +#define AR6320_SI_CONFIG_INACTIVE_CLK_MASK 0x00000010 +#define AR6320_SI_CONFIG_INACTIVE_DATA_LSB 5 +#define AR6320_SI_CONFIG_INACTIVE_DATA_MASK 0x00000020 +#define AR6320_SI_CONFIG_DIVIDER_LSB 0 +#define AR6320_SI_CONFIG_DIVIDER_MASK 0x0000000f +#define AR6320_SI_CONFIG_OFFSET 0x00000000 +#define AR6320_SI_TX_DATA0_OFFSET 0x00000008 +#define AR6320_SI_TX_DATA1_OFFSET 0x0000000c +#define AR6320_SI_RX_DATA0_OFFSET 0x00000010 +#define AR6320_SI_RX_DATA1_OFFSET 0x00000014 +#define AR6320_SI_CS_OFFSET 0x00000004 +#define AR6320_SI_CS_DONE_ERR_MASK 0x00000400 +#define AR6320_SI_CS_DONE_INT_MASK 0x00000200 +#define AR6320_SI_CS_START_LSB 8 +#define AR6320_SI_CS_START_MASK 0x00000100 +#define AR6320_SI_CS_RX_CNT_LSB 4 +#define AR6320_SI_CS_RX_CNT_MASK 0x000000f0 +#define AR6320_SI_CS_TX_CNT_LSB 0 +#define AR6320_SI_CS_TX_CNT_MASK 0x0000000f +#define AR6320_SR_WR_INDEX_ADDRESS 0x003c +#define AR6320_DST_WATERMARK_ADDRESS 0x0050 +#define AR6320_RX_MSDU_END_4_FIRST_MSDU_LSB 14 +#define AR6320_RX_MSDU_END_4_FIRST_MSDU_MASK 0x00004000 +#define AR6320_RX_MPDU_START_0_RETRY_LSB 14 +#define AR6320_RX_MPDU_START_0_RETRY_MASK 0x00004000 +#define AR6320_RX_MPDU_START_0_SEQ_NUM_LSB 16 +#define AR6320_RX_MPDU_START_0_SEQ_NUM_MASK 0x0fff0000 +#define AR6320_RX_MPDU_START_2_TID_LSB 28 +#define AR6320_RX_MPDU_START_2_TID_MASK 0xf0000000 +#if defined(HIF_PCI) || defined(HIF_SNOC) || defined(HIF_AHB) +#define AR6320_SOC_PCIE_BASE_ADDRESS 0x00038000 +#define AR6320_CE_WRAPPER_BASE_ADDRESS 0x00034000 +#define AR6320_CE0_BASE_ADDRESS 0x00034400 +#define AR6320_CE1_BASE_ADDRESS 0x00034800 +#define AR6320_CE2_BASE_ADDRESS 0x00034c00 +#define AR6320_CE3_BASE_ADDRESS 0x00035000 +#define AR6320_CE4_BASE_ADDRESS 0x00035400 +#define AR6320_CE5_BASE_ADDRESS 0x00035800 +#define AR6320_CE6_BASE_ADDRESS 0x00035c00 +#define AR6320_CE7_BASE_ADDRESS 0x00036000 +#define AR6320_WLAN_ANALOG_INTF_PCIE_BASE_ADDRESS 0x00007800 +#define AR6320_CE_COUNT 8 +#define AR6320_CE_CTRL1_ADDRESS 0x0010 +#define AR6320_CE_CTRL1_DMAX_LENGTH_MASK 0x0000ffff +#define AR6320_CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS 0x0000 +#define AR6320_CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK 0x0000ff00 +#define AR6320_CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB 8 +#define AR6320_CE_CTRL1_DMAX_LENGTH_LSB 0 +#define AR6320_CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK 0x00010000 +#define AR6320_CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK 0x00020000 +#define AR6320_CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB 16 +#define AR6320_CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB 17 +#define AR6320_SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_MASK 0x00000020 +#define AR6320_SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_LSB 5 +#define AR6320_PCIE_SOC_WAKE_RESET 0x00000000 +#define AR6320_PCIE_SOC_WAKE_ADDRESS 0x0004 +#define AR6320_PCIE_SOC_WAKE_V_MASK 0x00000001 +#define AR6320_MUX_ID_MASK 0x0000 +#define AR6320_TRANSACTION_ID_MASK 0x3fff +#define AR6320_PCIE_LOCAL_BASE_ADDRESS 0x80000 +#define AR6320_FW_IND_HELPER 4 +#define AR6320_PCIE_INTR_ENABLE_ADDRESS 0x0008 +#define AR6320_PCIE_INTR_CLR_ADDRESS 0x0014 +#define AR6320_PCIE_INTR_FIRMWARE_MASK 0x00000400 +#define AR6320_PCIE_INTR_CE0_MASK 0x00000800 +#define AR6320_PCIE_INTR_CE_MASK_ALL 0x0007f800 +#define AR6320_PCIE_INTR_CAUSE_ADDRESS 0x000c +#define AR6320_SOC_RESET_CONTROL_CE_RST_MASK 0x00000001 +#endif +#define AR6320_RX_MPDU_START_2_PN_47_32_LSB 0 +#define AR6320_RX_MPDU_START_2_PN_47_32_MASK 0x0000ffff +#define AR6320_RX_MSDU_END_1_KEY_ID_OCT_MASK 0x000000ff +#define AR6320_RX_MSDU_END_1_KEY_ID_OCT_LSB 0 +#define AR6320_RX_MSDU_END_1_EXT_WAPI_PN_63_48_LSB 16 +#define AR6320_RX_MSDU_END_1_EXT_WAPI_PN_63_48_MASK 0xffff0000 +#define AR6320_RX_MSDU_END_4_LAST_MSDU_LSB 15 +#define AR6320_RX_MSDU_END_4_LAST_MSDU_MASK 0x00008000 +#define AR6320_RX_ATTENTION_0_MCAST_BCAST_LSB 2 +#define AR6320_RX_ATTENTION_0_MCAST_BCAST_MASK 0x00000004 +#define AR6320_RX_ATTENTION_0_FRAGMENT_LSB 13 +#define AR6320_RX_ATTENTION_0_FRAGMENT_MASK 0x00002000 +#define AR6320_RX_ATTENTION_0_MPDU_LENGTH_ERR_MASK 0x08000000 +#define AR6320_RX_FRAG_INFO_0_RING2_MORE_COUNT_LSB 16 +#define AR6320_RX_FRAG_INFO_0_RING2_MORE_COUNT_MASK 0x00ff0000 +#define AR6320_RX_MSDU_START_0_MSDU_LENGTH_LSB 0 +#define AR6320_RX_MSDU_START_0_MSDU_LENGTH_MASK 0x00003fff +#define AR6320_RX_MSDU_START_2_DECAP_FORMAT_OFFSET 0x00000008 +#define AR6320_RX_MSDU_START_2_DECAP_FORMAT_LSB 8 +#define AR6320_RX_MSDU_START_2_DECAP_FORMAT_MASK 0x00000300 +#define AR6320_RX_MPDU_START_0_ENCRYPTED_LSB 13 +#define AR6320_RX_MPDU_START_0_ENCRYPTED_MASK 0x00002000 +#define AR6320_RX_ATTENTION_0_MORE_DATA_MASK 0x00000400 +#define AR6320_RX_ATTENTION_0_MSDU_DONE_MASK 0x80000000 +#define AR6320_RX_ATTENTION_0_TCP_UDP_CHKSUM_FAIL_MASK 0x00040000 +#define AR6320_DST_WR_INDEX_ADDRESS 0x0040 +#define AR6320_SRC_WATERMARK_ADDRESS 0x004c +#define AR6320_SRC_WATERMARK_LOW_MASK 0xffff0000 +#define AR6320_SRC_WATERMARK_HIGH_MASK 0x0000ffff +#define AR6320_DST_WATERMARK_LOW_MASK 0xffff0000 +#define AR6320_DST_WATERMARK_HIGH_MASK 0x0000ffff +#define AR6320_CURRENT_SRRI_ADDRESS 0x0044 +#define AR6320_CURRENT_DRRI_ADDRESS 0x0048 +#define AR6320_HOST_IS_SRC_RING_HIGH_WATERMARK_MASK 0x00000002 +#define AR6320_HOST_IS_SRC_RING_LOW_WATERMARK_MASK 0x00000004 +#define AR6320_HOST_IS_DST_RING_HIGH_WATERMARK_MASK 0x00000008 +#define AR6320_HOST_IS_DST_RING_LOW_WATERMARK_MASK 0x00000010 +#define AR6320_HOST_IS_ADDRESS 0x0030 +#define AR6320_HOST_IS_COPY_COMPLETE_MASK 0x00000001 +#define AR6320_HOST_IE_ADDRESS 0x002c +#define AR6320_HOST_IE_COPY_COMPLETE_MASK 0x00000001 +#define AR6320_SR_BA_ADDRESS 0x0000 +#define AR6320_SR_SIZE_ADDRESS 0x0004 +#define AR6320_DR_BA_ADDRESS 0x0008 +#define AR6320_DR_SIZE_ADDRESS 0x000c +#define AR6320_MISC_IE_ADDRESS 0x0034 +#define AR6320_MISC_IS_AXI_ERR_MASK 0x00000400 +#define AR6320_MISC_IS_DST_ADDR_ERR_MASK 0x00000200 +#define AR6320_MISC_IS_SRC_LEN_ERR_MASK 0x00000100 +#define AR6320_MISC_IS_DST_MAX_LEN_VIO_MASK 0x00000080 +#define AR6320_MISC_IS_DST_RING_OVERFLOW_MASK 0x00000040 +#define AR6320_MISC_IS_SRC_RING_OVERFLOW_MASK 0x00000020 +#define AR6320_SRC_WATERMARK_LOW_LSB 16 +#define AR6320_SRC_WATERMARK_HIGH_LSB 0 +#define AR6320_DST_WATERMARK_LOW_LSB 16 +#define AR6320_DST_WATERMARK_HIGH_LSB 0 +#define AR6320_SOC_GLOBAL_RESET_ADDRESS 0x0008 +#define AR6320_RTC_STATE_ADDRESS 0x0000 +#define AR6320_RTC_STATE_COLD_RESET_MASK 0x00002000 +#define AR6320_RTC_STATE_V_MASK 0x00000007 +#define AR6320_RTC_STATE_V_LSB 0 +#define AR6320_RTC_STATE_V_ON 3 +#define AR6320_FW_IND_EVENT_PENDING 1 +#define AR6320_FW_IND_INITIALIZED 2 +#define AR6320_CPU_INTR_ADDRESS 0x0010 +#define AR6320_SOC_LF_TIMER_CONTROL0_ADDRESS 0x00000050 +#define AR6320_SOC_LF_TIMER_CONTROL0_ENABLE_MASK 0x00000004 +#define AR6320_SOC_RESET_CONTROL_ADDRESS 0x00000000 +#define AR6320_SOC_RESET_CONTROL_CPU_WARM_RST_MASK 0x00000040 +#define AR6320_CORE_CTRL_ADDRESS 0x0000 +#define AR6320_CORE_CTRL_CPU_INTR_MASK 0x00002000 +#define AR6320_LOCAL_SCRATCH_OFFSET 0x000000c0 +#define AR6320_CLOCK_GPIO_OFFSET 0xffffffff +#define AR6320_CLOCK_GPIO_BT_CLK_OUT_EN_LSB 0 +#define AR6320_CLOCK_GPIO_BT_CLK_OUT_EN_MASK 0 +#define AR6320_SOC_CHIP_ID_ADDRESS 0x000000f0 +#define AR6320_SOC_CHIP_ID_VERSION_MASK 0xfffc0000 +#define AR6320_SOC_CHIP_ID_VERSION_LSB 18 +#define AR6320_SOC_CHIP_ID_REVISION_MASK 0x00000f00 +#define AR6320_SOC_CHIP_ID_REVISION_LSB 8 +#if defined(HIF_PCI) || defined(HIF_SNOC) || defined(HIF_AHB) +#define AR6320_SOC_POWER_REG_OFFSET 0x0000010c +/* Copy Engine Debug */ +#define AR6320_WLAN_DEBUG_INPUT_SEL_OFFSET 0x0000010c +#define AR6320_WLAN_DEBUG_INPUT_SEL_SRC_MSB 3 +#define AR6320_WLAN_DEBUG_INPUT_SEL_SRC_LSB 0 +#define AR6320_WLAN_DEBUG_INPUT_SEL_SRC_MASK 0x0000000f +#define AR6320_WLAN_DEBUG_CONTROL_OFFSET 0x00000108 +#define AR6320_WLAN_DEBUG_CONTROL_ENABLE_MSB 0 +#define AR6320_WLAN_DEBUG_CONTROL_ENABLE_LSB 0 +#define AR6320_WLAN_DEBUG_CONTROL_ENABLE_MASK 0x00000001 +#define AR6320_WLAN_DEBUG_OUT_OFFSET 0x00000110 +#define AR6320_WLAN_DEBUG_OUT_DATA_MSB 19 +#define AR6320_WLAN_DEBUG_OUT_DATA_LSB 0 +#define AR6320_WLAN_DEBUG_OUT_DATA_MASK 0x000fffff +#define AR6320_AMBA_DEBUG_BUS_OFFSET 0x0000011c +#define AR6320_AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MSB 13 +#define AR6320_AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_LSB 8 +#define AR6320_AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MASK 0x00003f00 +#define AR6320_AMBA_DEBUG_BUS_SEL_MSB 4 +#define AR6320_AMBA_DEBUG_BUS_SEL_LSB 0 +#define AR6320_AMBA_DEBUG_BUS_SEL_MASK 0x0000001f +#define AR6320_CE_WRAPPER_DEBUG_OFFSET 0x0008 +#define AR6320_CE_WRAPPER_DEBUG_SEL_MSB 5 +#define AR6320_CE_WRAPPER_DEBUG_SEL_LSB 0 +#define AR6320_CE_WRAPPER_DEBUG_SEL_MASK 0x0000003f +#define AR6320_CE_DEBUG_OFFSET 0x0054 +#define AR6320_CE_DEBUG_SEL_MSB 5 +#define AR6320_CE_DEBUG_SEL_LSB 0 +#define AR6320_CE_DEBUG_SEL_MASK 0x0000003f +/* End */ + +/* PLL start */ +#define AR6320_EFUSE_OFFSET 0x0000032c +#define AR6320_EFUSE_XTAL_SEL_MSB 10 +#define AR6320_EFUSE_XTAL_SEL_LSB 8 +#define AR6320_EFUSE_XTAL_SEL_MASK 0x00000700 +#define AR6320_BB_PLL_CONFIG_OFFSET 0x000002f4 +#define AR6320_BB_PLL_CONFIG_OUTDIV_MSB 20 +#define AR6320_BB_PLL_CONFIG_OUTDIV_LSB 18 +#define AR6320_BB_PLL_CONFIG_OUTDIV_MASK 0x001c0000 +#define AR6320_BB_PLL_CONFIG_FRAC_MSB 17 +#define AR6320_BB_PLL_CONFIG_FRAC_LSB 0 +#define AR6320_BB_PLL_CONFIG_FRAC_MASK 0x0003ffff +#define AR6320_WLAN_PLL_SETTLE_TIME_MSB 10 +#define AR6320_WLAN_PLL_SETTLE_TIME_LSB 0 +#define AR6320_WLAN_PLL_SETTLE_TIME_MASK 0x000007ff +#define AR6320_WLAN_PLL_SETTLE_OFFSET 0x0018 +#define AR6320_WLAN_PLL_SETTLE_SW_MASK 0x000007ff +#define AR6320_WLAN_PLL_SETTLE_RSTMASK 0xffffffff +#define AR6320_WLAN_PLL_SETTLE_RESET 0x00000400 +#define AR6320_WLAN_PLL_CONTROL_NOPWD_MSB 18 +#define AR6320_WLAN_PLL_CONTROL_NOPWD_LSB 18 +#define AR6320_WLAN_PLL_CONTROL_NOPWD_MASK 0x00040000 +#define AR6320_WLAN_PLL_CONTROL_BYPASS_MSB 16 +#define AR6320_WLAN_PLL_CONTROL_BYPASS_LSB 16 +#define AR6320_WLAN_PLL_CONTROL_BYPASS_MASK 0x00010000 +#define AR6320_WLAN_PLL_CONTROL_BYPASS_RESET 0x1 +#define AR6320_WLAN_PLL_CONTROL_CLK_SEL_MSB 15 +#define AR6320_WLAN_PLL_CONTROL_CLK_SEL_LSB 14 +#define AR6320_WLAN_PLL_CONTROL_CLK_SEL_MASK 0x0000c000 +#define AR6320_WLAN_PLL_CONTROL_CLK_SEL_RESET 0x0 +#define AR6320_WLAN_PLL_CONTROL_REFDIV_MSB 13 +#define AR6320_WLAN_PLL_CONTROL_REFDIV_LSB 10 +#define AR6320_WLAN_PLL_CONTROL_REFDIV_MASK 0x00003c00 +#define AR6320_WLAN_PLL_CONTROL_REFDIV_RESET 0x0 +#define AR6320_WLAN_PLL_CONTROL_DIV_MSB 9 +#define AR6320_WLAN_PLL_CONTROL_DIV_LSB 0 +#define AR6320_WLAN_PLL_CONTROL_DIV_MASK 0x000003ff +#define AR6320_WLAN_PLL_CONTROL_DIV_RESET 0x11 +#define AR6320_WLAN_PLL_CONTROL_OFFSET 0x0014 +#define AR6320_WLAN_PLL_CONTROL_SW_MASK 0x001fffff +#define AR6320_WLAN_PLL_CONTROL_RSTMASK 0xffffffff +#define AR6320_WLAN_PLL_CONTROL_RESET 0x00010011 +#define AR6320_SOC_CORE_CLK_CTRL_OFFSET 0x00000114 +#define AR6320_SOC_CORE_CLK_CTRL_DIV_MSB 2 +#define AR6320_SOC_CORE_CLK_CTRL_DIV_LSB 0 +#define AR6320_SOC_CORE_CLK_CTRL_DIV_MASK 0x00000007 +#define AR6320_RTC_SYNC_STATUS_PLL_CHANGING_MSB 5 +#define AR6320_RTC_SYNC_STATUS_PLL_CHANGING_LSB 5 +#define AR6320_RTC_SYNC_STATUS_PLL_CHANGING_MASK 0x00000020 +#define AR6320_RTC_SYNC_STATUS_PLL_CHANGING_RESET 0x0 +#define AR6320_RTC_SYNC_STATUS_OFFSET 0x0244 +#define AR6320_SOC_CPU_CLOCK_OFFSET 0x00000020 +#define AR6320_SOC_CPU_CLOCK_STANDARD_MSB 1 +#define AR6320_SOC_CPU_CLOCK_STANDARD_LSB 0 +#define AR6320_SOC_CPU_CLOCK_STANDARD_MASK 0x00000003 +/* PLL end */ +#define AR6320_PCIE_INTR_CE_MASK(n) \ + (AR6320_PCIE_INTR_CE0_MASK << (n)) +#endif +#define AR6320_DRAM_BASE_ADDRESS AR6320_TARG_DRAM_START +#define AR6320_FW_INDICATOR_ADDRESS \ + (AR6320_SOC_CORE_BASE_ADDRESS + AR6320_SCRATCH_3_ADDRESS) +#define AR6320_SYSTEM_SLEEP_OFFSET AR6320_SOC_SYSTEM_SLEEP_OFFSET +#define AR6320_WLAN_SYSTEM_SLEEP_OFFSET 0x002c +#define AR6320_WLAN_RESET_CONTROL_OFFSET AR6320_SOC_RESET_CONTROL_OFFSET +#define AR6320_CLOCK_CONTROL_OFFSET AR6320_SOC_CLOCK_CONTROL_OFFSET +#define AR6320_CLOCK_CONTROL_SI0_CLK_MASK AR6320_SOC_CLOCK_CONTROL_SI0_CLK_MASK +#define AR6320_RESET_CONTROL_MBOX_RST_MASK 0x00000004 +#define AR6320_RESET_CONTROL_SI0_RST_MASK AR6320_SOC_RESET_CONTROL_SI0_RST_MASK +#define AR6320_GPIO_BASE_ADDRESS AR6320_WLAN_GPIO_BASE_ADDRESS +#define AR6320_GPIO_PIN0_OFFSET AR6320_WLAN_GPIO_PIN0_ADDRESS +#define AR6320_GPIO_PIN1_OFFSET AR6320_WLAN_GPIO_PIN1_ADDRESS +#define AR6320_GPIO_PIN0_CONFIG_MASK AR6320_WLAN_GPIO_PIN0_CONFIG_MASK +#define AR6320_GPIO_PIN1_CONFIG_MASK AR6320_WLAN_GPIO_PIN1_CONFIG_MASK +#define AR6320_SI_BASE_ADDRESS 0x00050000 +#define AR6320_CPU_CLOCK_OFFSET AR6320_SOC_CPU_CLOCK_OFFSET +#define AR6320_LPO_CAL_OFFSET AR6320_SOC_LPO_CAL_OFFSET +#define AR6320_GPIO_PIN10_OFFSET AR6320_WLAN_GPIO_PIN10_ADDRESS +#define AR6320_GPIO_PIN11_OFFSET AR6320_WLAN_GPIO_PIN11_ADDRESS +#define AR6320_GPIO_PIN12_OFFSET AR6320_WLAN_GPIO_PIN12_ADDRESS +#define AR6320_GPIO_PIN13_OFFSET AR6320_WLAN_GPIO_PIN13_ADDRESS +#define AR6320_CPU_CLOCK_STANDARD_LSB AR6320_SOC_CPU_CLOCK_STANDARD_LSB +#define AR6320_CPU_CLOCK_STANDARD_MASK AR6320_SOC_CPU_CLOCK_STANDARD_MASK +#define AR6320_LPO_CAL_ENABLE_LSB AR6320_SOC_LPO_CAL_ENABLE_LSB +#define AR6320_LPO_CAL_ENABLE_MASK AR6320_SOC_LPO_CAL_ENABLE_MASK +#define AR6320_ANALOG_INTF_BASE_ADDRESS AR6320_WLAN_ANALOG_INTF_BASE_ADDRESS +#define AR6320_MBOX_BASE_ADDRESS 0x00008000 +#define AR6320_INT_STATUS_ENABLE_ERROR_LSB 7 +#define AR6320_INT_STATUS_ENABLE_ERROR_MASK 0x00000080 +#define AR6320_INT_STATUS_ENABLE_CPU_LSB 6 +#define AR6320_INT_STATUS_ENABLE_CPU_MASK 0x00000040 +#define AR6320_INT_STATUS_ENABLE_COUNTER_LSB 4 +#define AR6320_INT_STATUS_ENABLE_COUNTER_MASK 0x00000010 +#define AR6320_INT_STATUS_ENABLE_MBOX_DATA_LSB 0 +#define AR6320_INT_STATUS_ENABLE_MBOX_DATA_MASK 0x0000000f +#define AR6320_ERROR_STATUS_ENABLE_RX_UNDERFLOW_LSB 17 +#define AR6320_ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK 0x00020000 +#define AR6320_ERROR_STATUS_ENABLE_TX_OVERFLOW_LSB 16 +#define AR6320_ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK 0x00010000 +#define AR6320_COUNTER_INT_STATUS_ENABLE_BIT_LSB 24 +#define AR6320_COUNTER_INT_STATUS_ENABLE_BIT_MASK 0xff000000 +#define AR6320_INT_STATUS_ENABLE_ADDRESS 0x0828 +#define AR6320_CPU_INT_STATUS_ENABLE_BIT_LSB 8 +#define AR6320_CPU_INT_STATUS_ENABLE_BIT_MASK 0x0000ff00 +#define AR6320_HOST_INT_STATUS_ADDRESS 0x0800 +#define AR6320_CPU_INT_STATUS_ADDRESS 0x0801 +#define AR6320_ERROR_INT_STATUS_ADDRESS 0x0802 +#define AR6320_ERROR_INT_STATUS_WAKEUP_MASK 0x00040000 +#define AR6320_ERROR_INT_STATUS_WAKEUP_LSB 18 +#define AR6320_ERROR_INT_STATUS_RX_UNDERFLOW_MASK 0x00020000 +#define AR6320_ERROR_INT_STATUS_RX_UNDERFLOW_LSB 17 +#define AR6320_ERROR_INT_STATUS_TX_OVERFLOW_MASK 0x00010000 +#define AR6320_ERROR_INT_STATUS_TX_OVERFLOW_LSB 16 +#define AR6320_COUNT_DEC_ADDRESS 0x0840 +#define AR6320_HOST_INT_STATUS_CPU_MASK 0x00000040 +#define AR6320_HOST_INT_STATUS_CPU_LSB 6 +#define AR6320_HOST_INT_STATUS_ERROR_MASK 0x00000080 +#define AR6320_HOST_INT_STATUS_ERROR_LSB 7 +#define AR6320_HOST_INT_STATUS_COUNTER_MASK 0x00000010 +#define AR6320_HOST_INT_STATUS_COUNTER_LSB 4 +#define AR6320_RX_LOOKAHEAD_VALID_ADDRESS 0x0805 +#define AR6320_WINDOW_DATA_ADDRESS 0x0874 +#define AR6320_WINDOW_READ_ADDR_ADDRESS 0x087c +#define AR6320_WINDOW_WRITE_ADDR_ADDRESS 0x0878 +#define AR6320_HOST_INT_STATUS_MBOX_DATA_MASK 0x0f +#define AR6320_HOST_INT_STATUS_MBOX_DATA_LSB 0 + +struct targetdef_s ar6320_targetdef = { + .d_RTC_SOC_BASE_ADDRESS = AR6320_RTC_SOC_BASE_ADDRESS, + .d_RTC_WMAC_BASE_ADDRESS = AR6320_RTC_WMAC_BASE_ADDRESS, + .d_SYSTEM_SLEEP_OFFSET = AR6320_WLAN_SYSTEM_SLEEP_OFFSET, + .d_WLAN_SYSTEM_SLEEP_OFFSET = AR6320_WLAN_SYSTEM_SLEEP_OFFSET, + .d_WLAN_SYSTEM_SLEEP_DISABLE_LSB = + AR6320_WLAN_SYSTEM_SLEEP_DISABLE_LSB, + .d_WLAN_SYSTEM_SLEEP_DISABLE_MASK = + AR6320_WLAN_SYSTEM_SLEEP_DISABLE_MASK, + .d_CLOCK_CONTROL_OFFSET = AR6320_CLOCK_CONTROL_OFFSET, + .d_CLOCK_CONTROL_SI0_CLK_MASK = AR6320_CLOCK_CONTROL_SI0_CLK_MASK, + .d_RESET_CONTROL_OFFSET = AR6320_SOC_RESET_CONTROL_OFFSET, + .d_RESET_CONTROL_MBOX_RST_MASK = AR6320_RESET_CONTROL_MBOX_RST_MASK, + .d_RESET_CONTROL_SI0_RST_MASK = AR6320_RESET_CONTROL_SI0_RST_MASK, + .d_WLAN_RESET_CONTROL_OFFSET = AR6320_WLAN_RESET_CONTROL_OFFSET, + .d_WLAN_RESET_CONTROL_COLD_RST_MASK = + AR6320_WLAN_RESET_CONTROL_COLD_RST_MASK, + .d_WLAN_RESET_CONTROL_WARM_RST_MASK = + AR6320_WLAN_RESET_CONTROL_WARM_RST_MASK, + .d_GPIO_BASE_ADDRESS = AR6320_GPIO_BASE_ADDRESS, + .d_GPIO_PIN0_OFFSET = AR6320_GPIO_PIN0_OFFSET, + .d_GPIO_PIN1_OFFSET = AR6320_GPIO_PIN1_OFFSET, + .d_GPIO_PIN0_CONFIG_MASK = AR6320_GPIO_PIN0_CONFIG_MASK, + .d_GPIO_PIN1_CONFIG_MASK = AR6320_GPIO_PIN1_CONFIG_MASK, + .d_SI_CONFIG_BIDIR_OD_DATA_LSB = AR6320_SI_CONFIG_BIDIR_OD_DATA_LSB, + .d_SI_CONFIG_BIDIR_OD_DATA_MASK = AR6320_SI_CONFIG_BIDIR_OD_DATA_MASK, + .d_SI_CONFIG_I2C_LSB = AR6320_SI_CONFIG_I2C_LSB, + .d_SI_CONFIG_I2C_MASK = AR6320_SI_CONFIG_I2C_MASK, + .d_SI_CONFIG_POS_SAMPLE_LSB = AR6320_SI_CONFIG_POS_SAMPLE_LSB, + .d_SI_CONFIG_POS_SAMPLE_MASK = AR6320_SI_CONFIG_POS_SAMPLE_MASK, + .d_SI_CONFIG_INACTIVE_CLK_LSB = AR6320_SI_CONFIG_INACTIVE_CLK_LSB, + .d_SI_CONFIG_INACTIVE_CLK_MASK = AR6320_SI_CONFIG_INACTIVE_CLK_MASK, + .d_SI_CONFIG_INACTIVE_DATA_LSB = AR6320_SI_CONFIG_INACTIVE_DATA_LSB, + .d_SI_CONFIG_INACTIVE_DATA_MASK = AR6320_SI_CONFIG_INACTIVE_DATA_MASK, + .d_SI_CONFIG_DIVIDER_LSB = AR6320_SI_CONFIG_DIVIDER_LSB, + .d_SI_CONFIG_DIVIDER_MASK = AR6320_SI_CONFIG_DIVIDER_MASK, + .d_SI_BASE_ADDRESS = AR6320_SI_BASE_ADDRESS, + .d_SI_CONFIG_OFFSET = AR6320_SI_CONFIG_OFFSET, + .d_SI_TX_DATA0_OFFSET = AR6320_SI_TX_DATA0_OFFSET, + .d_SI_TX_DATA1_OFFSET = AR6320_SI_TX_DATA1_OFFSET, + .d_SI_RX_DATA0_OFFSET = AR6320_SI_RX_DATA0_OFFSET, + .d_SI_RX_DATA1_OFFSET = AR6320_SI_RX_DATA1_OFFSET, + .d_SI_CS_OFFSET = AR6320_SI_CS_OFFSET, + .d_SI_CS_DONE_ERR_MASK = AR6320_SI_CS_DONE_ERR_MASK, + .d_SI_CS_DONE_INT_MASK = AR6320_SI_CS_DONE_INT_MASK, + .d_SI_CS_START_LSB = AR6320_SI_CS_START_LSB, + .d_SI_CS_START_MASK = AR6320_SI_CS_START_MASK, + .d_SI_CS_RX_CNT_LSB = AR6320_SI_CS_RX_CNT_LSB, + .d_SI_CS_RX_CNT_MASK = AR6320_SI_CS_RX_CNT_MASK, + .d_SI_CS_TX_CNT_LSB = AR6320_SI_CS_TX_CNT_LSB, + .d_SI_CS_TX_CNT_MASK = AR6320_SI_CS_TX_CNT_MASK, + .d_BOARD_DATA_SZ = AR6320_BOARD_DATA_SZ, + .d_BOARD_EXT_DATA_SZ = AR6320_BOARD_EXT_DATA_SZ, + .d_MBOX_BASE_ADDRESS = AR6320_MBOX_BASE_ADDRESS, + .d_LOCAL_SCRATCH_OFFSET = AR6320_LOCAL_SCRATCH_OFFSET, + .d_CPU_CLOCK_OFFSET = AR6320_CPU_CLOCK_OFFSET, + .d_LPO_CAL_OFFSET = AR6320_LPO_CAL_OFFSET, + .d_GPIO_PIN10_OFFSET = AR6320_GPIO_PIN10_OFFSET, + .d_GPIO_PIN11_OFFSET = AR6320_GPIO_PIN11_OFFSET, + .d_GPIO_PIN12_OFFSET = AR6320_GPIO_PIN12_OFFSET, + .d_GPIO_PIN13_OFFSET = AR6320_GPIO_PIN13_OFFSET, + .d_CLOCK_GPIO_OFFSET = AR6320_CLOCK_GPIO_OFFSET, + .d_CPU_CLOCK_STANDARD_LSB = AR6320_CPU_CLOCK_STANDARD_LSB, + .d_CPU_CLOCK_STANDARD_MASK = AR6320_CPU_CLOCK_STANDARD_MASK, + .d_LPO_CAL_ENABLE_LSB = AR6320_LPO_CAL_ENABLE_LSB, + .d_LPO_CAL_ENABLE_MASK = AR6320_LPO_CAL_ENABLE_MASK, + .d_CLOCK_GPIO_BT_CLK_OUT_EN_LSB = AR6320_CLOCK_GPIO_BT_CLK_OUT_EN_LSB, + .d_CLOCK_GPIO_BT_CLK_OUT_EN_MASK = + AR6320_CLOCK_GPIO_BT_CLK_OUT_EN_MASK, + .d_ANALOG_INTF_BASE_ADDRESS = AR6320_ANALOG_INTF_BASE_ADDRESS, + .d_WLAN_MAC_BASE_ADDRESS = AR6320_WLAN_MAC_BASE_ADDRESS, + .d_FW_INDICATOR_ADDRESS = AR6320_FW_INDICATOR_ADDRESS, + .d_DRAM_BASE_ADDRESS = AR6320_DRAM_BASE_ADDRESS, + .d_SOC_CORE_BASE_ADDRESS = AR6320_SOC_CORE_BASE_ADDRESS, + .d_CORE_CTRL_ADDRESS = AR6320_CORE_CTRL_ADDRESS, +#if defined(HIF_PCI) || defined(HIF_SNOC) || defined(HIF_AHB) + .d_MSI_NUM_REQUEST = MSI_NUM_REQUEST, + .d_MSI_ASSIGN_FW = MSI_ASSIGN_FW, +#endif + .d_CORE_CTRL_CPU_INTR_MASK = AR6320_CORE_CTRL_CPU_INTR_MASK, + .d_SR_WR_INDEX_ADDRESS = AR6320_SR_WR_INDEX_ADDRESS, + .d_DST_WATERMARK_ADDRESS = AR6320_DST_WATERMARK_ADDRESS, + /* htt_rx.c */ + .d_RX_MSDU_END_4_FIRST_MSDU_MASK = + AR6320_RX_MSDU_END_4_FIRST_MSDU_MASK, + .d_RX_MSDU_END_4_FIRST_MSDU_LSB = AR6320_RX_MSDU_END_4_FIRST_MSDU_LSB, + .d_RX_MPDU_START_0_RETRY_LSB = AR6320_RX_MPDU_START_0_RETRY_LSB, + .d_RX_MPDU_START_0_RETRY_MASK = AR6320_RX_MPDU_START_0_RETRY_MASK, + .d_RX_MPDU_START_0_SEQ_NUM_MASK = AR6320_RX_MPDU_START_0_SEQ_NUM_MASK, + .d_RX_MPDU_START_0_SEQ_NUM_LSB = AR6320_RX_MPDU_START_0_SEQ_NUM_LSB, + .d_RX_MPDU_START_2_PN_47_32_LSB = AR6320_RX_MPDU_START_2_PN_47_32_LSB, + .d_RX_MPDU_START_2_PN_47_32_MASK = + AR6320_RX_MPDU_START_2_PN_47_32_MASK, + .d_RX_MPDU_START_2_TID_LSB = AR6320_RX_MPDU_START_2_TID_LSB, + .d_RX_MPDU_START_2_TID_MASK = AR6320_RX_MPDU_START_2_TID_MASK, + .d_RX_MSDU_END_1_KEY_ID_OCT_MASK = + AR6320_RX_MSDU_END_1_KEY_ID_OCT_MASK, + .d_RX_MSDU_END_1_KEY_ID_OCT_LSB = AR6320_RX_MSDU_END_1_KEY_ID_OCT_LSB, + .d_RX_MSDU_END_1_EXT_WAPI_PN_63_48_MASK = + AR6320_RX_MSDU_END_1_EXT_WAPI_PN_63_48_MASK, + .d_RX_MSDU_END_1_EXT_WAPI_PN_63_48_LSB = + AR6320_RX_MSDU_END_1_EXT_WAPI_PN_63_48_LSB, + .d_RX_MSDU_END_4_LAST_MSDU_MASK = AR6320_RX_MSDU_END_4_LAST_MSDU_MASK, + .d_RX_MSDU_END_4_LAST_MSDU_LSB = AR6320_RX_MSDU_END_4_LAST_MSDU_LSB, + .d_RX_ATTENTION_0_MCAST_BCAST_MASK = + AR6320_RX_ATTENTION_0_MCAST_BCAST_MASK, + .d_RX_ATTENTION_0_MCAST_BCAST_LSB = + AR6320_RX_ATTENTION_0_MCAST_BCAST_LSB, + .d_RX_ATTENTION_0_FRAGMENT_MASK = AR6320_RX_ATTENTION_0_FRAGMENT_MASK, + .d_RX_ATTENTION_0_FRAGMENT_LSB = AR6320_RX_ATTENTION_0_FRAGMENT_LSB, + .d_RX_ATTENTION_0_MPDU_LENGTH_ERR_MASK = + AR6320_RX_ATTENTION_0_MPDU_LENGTH_ERR_MASK, + .d_RX_FRAG_INFO_0_RING2_MORE_COUNT_MASK = + AR6320_RX_FRAG_INFO_0_RING2_MORE_COUNT_MASK, + .d_RX_FRAG_INFO_0_RING2_MORE_COUNT_LSB = + AR6320_RX_FRAG_INFO_0_RING2_MORE_COUNT_LSB, + .d_RX_MSDU_START_0_MSDU_LENGTH_MASK = + AR6320_RX_MSDU_START_0_MSDU_LENGTH_MASK, + .d_RX_MSDU_START_0_MSDU_LENGTH_LSB = + AR6320_RX_MSDU_START_0_MSDU_LENGTH_LSB, + .d_RX_MSDU_START_2_DECAP_FORMAT_OFFSET = + AR6320_RX_MSDU_START_2_DECAP_FORMAT_OFFSET, + .d_RX_MSDU_START_2_DECAP_FORMAT_MASK = + AR6320_RX_MSDU_START_2_DECAP_FORMAT_MASK, + .d_RX_MSDU_START_2_DECAP_FORMAT_LSB = + AR6320_RX_MSDU_START_2_DECAP_FORMAT_LSB, + .d_RX_MPDU_START_0_ENCRYPTED_MASK = + AR6320_RX_MPDU_START_0_ENCRYPTED_MASK, + .d_RX_MPDU_START_0_ENCRYPTED_LSB = + AR6320_RX_MPDU_START_0_ENCRYPTED_LSB, + .d_RX_ATTENTION_0_MORE_DATA_MASK = + AR6320_RX_ATTENTION_0_MORE_DATA_MASK, + .d_RX_ATTENTION_0_MSDU_DONE_MASK = + AR6320_RX_ATTENTION_0_MSDU_DONE_MASK, + .d_RX_ATTENTION_0_TCP_UDP_CHKSUM_FAIL_MASK = + AR6320_RX_ATTENTION_0_TCP_UDP_CHKSUM_FAIL_MASK, +#if defined(HIF_PCI) || defined(HIF_SNOC) || defined(HIF_AHB) + .d_CE_COUNT = AR6320_CE_COUNT, + .d_MSI_ASSIGN_CE_INITIAL = MSI_ASSIGN_CE_INITIAL, + .d_PCIE_INTR_ENABLE_ADDRESS = AR6320_PCIE_INTR_ENABLE_ADDRESS, + .d_PCIE_INTR_CLR_ADDRESS = AR6320_PCIE_INTR_CLR_ADDRESS, + .d_PCIE_INTR_FIRMWARE_MASK = AR6320_PCIE_INTR_FIRMWARE_MASK, + .d_PCIE_INTR_CE_MASK_ALL = AR6320_PCIE_INTR_CE_MASK_ALL, + /* PLL start */ + .d_EFUSE_OFFSET = AR6320_EFUSE_OFFSET, + .d_EFUSE_XTAL_SEL_MSB = AR6320_EFUSE_XTAL_SEL_MSB, + .d_EFUSE_XTAL_SEL_LSB = AR6320_EFUSE_XTAL_SEL_LSB, + .d_EFUSE_XTAL_SEL_MASK = AR6320_EFUSE_XTAL_SEL_MASK, + .d_BB_PLL_CONFIG_OFFSET = AR6320_BB_PLL_CONFIG_OFFSET, + .d_BB_PLL_CONFIG_OUTDIV_MSB = AR6320_BB_PLL_CONFIG_OUTDIV_MSB, + .d_BB_PLL_CONFIG_OUTDIV_LSB = AR6320_BB_PLL_CONFIG_OUTDIV_LSB, + .d_BB_PLL_CONFIG_OUTDIV_MASK = AR6320_BB_PLL_CONFIG_OUTDIV_MASK, + .d_BB_PLL_CONFIG_FRAC_MSB = AR6320_BB_PLL_CONFIG_FRAC_MSB, + .d_BB_PLL_CONFIG_FRAC_LSB = AR6320_BB_PLL_CONFIG_FRAC_LSB, + .d_BB_PLL_CONFIG_FRAC_MASK = AR6320_BB_PLL_CONFIG_FRAC_MASK, + .d_WLAN_PLL_SETTLE_TIME_MSB = AR6320_WLAN_PLL_SETTLE_TIME_MSB, + .d_WLAN_PLL_SETTLE_TIME_LSB = AR6320_WLAN_PLL_SETTLE_TIME_LSB, + .d_WLAN_PLL_SETTLE_TIME_MASK = AR6320_WLAN_PLL_SETTLE_TIME_MASK, + .d_WLAN_PLL_SETTLE_OFFSET = AR6320_WLAN_PLL_SETTLE_OFFSET, + .d_WLAN_PLL_SETTLE_SW_MASK = AR6320_WLAN_PLL_SETTLE_SW_MASK, + .d_WLAN_PLL_SETTLE_RSTMASK = AR6320_WLAN_PLL_SETTLE_RSTMASK, + .d_WLAN_PLL_SETTLE_RESET = AR6320_WLAN_PLL_SETTLE_RESET, + .d_WLAN_PLL_CONTROL_NOPWD_MSB = AR6320_WLAN_PLL_CONTROL_NOPWD_MSB, + .d_WLAN_PLL_CONTROL_NOPWD_LSB = AR6320_WLAN_PLL_CONTROL_NOPWD_LSB, + .d_WLAN_PLL_CONTROL_NOPWD_MASK = AR6320_WLAN_PLL_CONTROL_NOPWD_MASK, + .d_WLAN_PLL_CONTROL_BYPASS_MSB = AR6320_WLAN_PLL_CONTROL_BYPASS_MSB, + .d_WLAN_PLL_CONTROL_BYPASS_LSB = AR6320_WLAN_PLL_CONTROL_BYPASS_LSB, + .d_WLAN_PLL_CONTROL_BYPASS_MASK = AR6320_WLAN_PLL_CONTROL_BYPASS_MASK, + .d_WLAN_PLL_CONTROL_BYPASS_RESET = + AR6320_WLAN_PLL_CONTROL_BYPASS_RESET, + .d_WLAN_PLL_CONTROL_CLK_SEL_MSB = AR6320_WLAN_PLL_CONTROL_CLK_SEL_MSB, + .d_WLAN_PLL_CONTROL_CLK_SEL_LSB = AR6320_WLAN_PLL_CONTROL_CLK_SEL_LSB, + .d_WLAN_PLL_CONTROL_CLK_SEL_MASK = + AR6320_WLAN_PLL_CONTROL_CLK_SEL_MASK, + .d_WLAN_PLL_CONTROL_CLK_SEL_RESET = + AR6320_WLAN_PLL_CONTROL_CLK_SEL_RESET, + .d_WLAN_PLL_CONTROL_REFDIV_MSB = AR6320_WLAN_PLL_CONTROL_REFDIV_MSB, + .d_WLAN_PLL_CONTROL_REFDIV_LSB = AR6320_WLAN_PLL_CONTROL_REFDIV_LSB, + .d_WLAN_PLL_CONTROL_REFDIV_MASK = AR6320_WLAN_PLL_CONTROL_REFDIV_MASK, + .d_WLAN_PLL_CONTROL_REFDIV_RESET = + AR6320_WLAN_PLL_CONTROL_REFDIV_RESET, + .d_WLAN_PLL_CONTROL_DIV_MSB = AR6320_WLAN_PLL_CONTROL_DIV_MSB, + .d_WLAN_PLL_CONTROL_DIV_LSB = AR6320_WLAN_PLL_CONTROL_DIV_LSB, + .d_WLAN_PLL_CONTROL_DIV_MASK = AR6320_WLAN_PLL_CONTROL_DIV_MASK, + .d_WLAN_PLL_CONTROL_DIV_RESET = AR6320_WLAN_PLL_CONTROL_DIV_RESET, + .d_WLAN_PLL_CONTROL_OFFSET = AR6320_WLAN_PLL_CONTROL_OFFSET, + .d_WLAN_PLL_CONTROL_SW_MASK = AR6320_WLAN_PLL_CONTROL_SW_MASK, + .d_WLAN_PLL_CONTROL_RSTMASK = AR6320_WLAN_PLL_CONTROL_RSTMASK, + .d_WLAN_PLL_CONTROL_RESET = AR6320_WLAN_PLL_CONTROL_RESET, + .d_SOC_CORE_CLK_CTRL_OFFSET = AR6320_SOC_CORE_CLK_CTRL_OFFSET, + .d_SOC_CORE_CLK_CTRL_DIV_MSB = AR6320_SOC_CORE_CLK_CTRL_DIV_MSB, + .d_SOC_CORE_CLK_CTRL_DIV_LSB = AR6320_SOC_CORE_CLK_CTRL_DIV_LSB, + .d_SOC_CORE_CLK_CTRL_DIV_MASK = AR6320_SOC_CORE_CLK_CTRL_DIV_MASK, + .d_RTC_SYNC_STATUS_PLL_CHANGING_MSB = + AR6320_RTC_SYNC_STATUS_PLL_CHANGING_MSB, + .d_RTC_SYNC_STATUS_PLL_CHANGING_LSB = + AR6320_RTC_SYNC_STATUS_PLL_CHANGING_LSB, + .d_RTC_SYNC_STATUS_PLL_CHANGING_MASK = + AR6320_RTC_SYNC_STATUS_PLL_CHANGING_MASK, + .d_RTC_SYNC_STATUS_PLL_CHANGING_RESET = + AR6320_RTC_SYNC_STATUS_PLL_CHANGING_RESET, + .d_RTC_SYNC_STATUS_OFFSET = AR6320_RTC_SYNC_STATUS_OFFSET, + .d_SOC_CPU_CLOCK_OFFSET = AR6320_SOC_CPU_CLOCK_OFFSET, + .d_SOC_CPU_CLOCK_STANDARD_MSB = AR6320_SOC_CPU_CLOCK_STANDARD_MSB, + .d_SOC_CPU_CLOCK_STANDARD_LSB = AR6320_SOC_CPU_CLOCK_STANDARD_LSB, + .d_SOC_CPU_CLOCK_STANDARD_MASK = AR6320_SOC_CPU_CLOCK_STANDARD_MASK, + /* PLL end */ + .d_SOC_POWER_REG_OFFSET = AR6320_SOC_POWER_REG_OFFSET, + .d_PCIE_INTR_CAUSE_ADDRESS = AR6320_PCIE_INTR_CAUSE_ADDRESS, + .d_SOC_RESET_CONTROL_ADDRESS = AR6320_SOC_RESET_CONTROL_ADDRESS, + .d_SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_MASK = + AR6320_SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_MASK, + .d_SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_LSB = + AR6320_SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_LSB, + .d_SOC_RESET_CONTROL_CE_RST_MASK = + AR6320_SOC_RESET_CONTROL_CE_RST_MASK, + .d_SOC_RESET_CONTROL_CPU_WARM_RST_MASK = + AR6320_SOC_RESET_CONTROL_CPU_WARM_RST_MASK, + .d_CPU_INTR_ADDRESS = AR6320_CPU_INTR_ADDRESS, + .d_SOC_LF_TIMER_CONTROL0_ADDRESS = + AR6320_SOC_LF_TIMER_CONTROL0_ADDRESS, + .d_SOC_LF_TIMER_CONTROL0_ENABLE_MASK = + AR6320_SOC_LF_TIMER_CONTROL0_ENABLE_MASK, + + .d_WLAN_DEBUG_INPUT_SEL_OFFSET = AR6320_WLAN_DEBUG_INPUT_SEL_OFFSET, + .d_WLAN_DEBUG_INPUT_SEL_SRC_MSB = AR6320_WLAN_DEBUG_INPUT_SEL_SRC_MSB, + .d_WLAN_DEBUG_INPUT_SEL_SRC_LSB = AR6320_WLAN_DEBUG_INPUT_SEL_SRC_LSB, + .d_WLAN_DEBUG_INPUT_SEL_SRC_MASK = + AR6320_WLAN_DEBUG_INPUT_SEL_SRC_MASK, + .d_WLAN_DEBUG_CONTROL_OFFSET = AR6320_WLAN_DEBUG_CONTROL_OFFSET, + .d_WLAN_DEBUG_CONTROL_ENABLE_MSB = + AR6320_WLAN_DEBUG_CONTROL_ENABLE_MSB, + .d_WLAN_DEBUG_CONTROL_ENABLE_LSB = + AR6320_WLAN_DEBUG_CONTROL_ENABLE_LSB, + .d_WLAN_DEBUG_CONTROL_ENABLE_MASK = + AR6320_WLAN_DEBUG_CONTROL_ENABLE_MASK, + .d_WLAN_DEBUG_OUT_OFFSET = AR6320_WLAN_DEBUG_OUT_OFFSET, + .d_WLAN_DEBUG_OUT_DATA_MSB = AR6320_WLAN_DEBUG_OUT_DATA_MSB, + .d_WLAN_DEBUG_OUT_DATA_LSB = AR6320_WLAN_DEBUG_OUT_DATA_LSB, + .d_WLAN_DEBUG_OUT_DATA_MASK = AR6320_WLAN_DEBUG_OUT_DATA_MASK, + .d_AMBA_DEBUG_BUS_OFFSET = AR6320_AMBA_DEBUG_BUS_OFFSET, + .d_AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MSB = + AR6320_AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MSB, + .d_AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_LSB = + AR6320_AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_LSB, + .d_AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MASK = + AR6320_AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MASK, + .d_AMBA_DEBUG_BUS_SEL_MSB = AR6320_AMBA_DEBUG_BUS_SEL_MSB, + .d_AMBA_DEBUG_BUS_SEL_LSB = AR6320_AMBA_DEBUG_BUS_SEL_LSB, + .d_AMBA_DEBUG_BUS_SEL_MASK = AR6320_AMBA_DEBUG_BUS_SEL_MASK, +#endif + /* chip id start */ + .d_SOC_CHIP_ID_ADDRESS = AR6320_SOC_CHIP_ID_ADDRESS, + .d_SOC_CHIP_ID_VERSION_MASK = AR6320_SOC_CHIP_ID_VERSION_MASK, + .d_SOC_CHIP_ID_VERSION_LSB = AR6320_SOC_CHIP_ID_VERSION_LSB, + .d_SOC_CHIP_ID_REVISION_MASK = AR6320_SOC_CHIP_ID_REVISION_MASK, + .d_SOC_CHIP_ID_REVISION_LSB = AR6320_SOC_CHIP_ID_REVISION_LSB, + /* chip id end */ +}; + +struct hostdef_s ar6320_hostdef = { + .d_INT_STATUS_ENABLE_ERROR_LSB = AR6320_INT_STATUS_ENABLE_ERROR_LSB, + .d_INT_STATUS_ENABLE_ERROR_MASK = AR6320_INT_STATUS_ENABLE_ERROR_MASK, + .d_INT_STATUS_ENABLE_CPU_LSB = AR6320_INT_STATUS_ENABLE_CPU_LSB, + .d_INT_STATUS_ENABLE_CPU_MASK = AR6320_INT_STATUS_ENABLE_CPU_MASK, + .d_INT_STATUS_ENABLE_COUNTER_LSB = + AR6320_INT_STATUS_ENABLE_COUNTER_LSB, + .d_INT_STATUS_ENABLE_COUNTER_MASK = + AR6320_INT_STATUS_ENABLE_COUNTER_MASK, + .d_INT_STATUS_ENABLE_MBOX_DATA_LSB = + AR6320_INT_STATUS_ENABLE_MBOX_DATA_LSB, + .d_INT_STATUS_ENABLE_MBOX_DATA_MASK = + AR6320_INT_STATUS_ENABLE_MBOX_DATA_MASK, + .d_ERROR_STATUS_ENABLE_RX_UNDERFLOW_LSB = + AR6320_ERROR_STATUS_ENABLE_RX_UNDERFLOW_LSB, + .d_ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK = + AR6320_ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK, + .d_ERROR_STATUS_ENABLE_TX_OVERFLOW_LSB = + AR6320_ERROR_STATUS_ENABLE_TX_OVERFLOW_LSB, + .d_ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK = + AR6320_ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK, + .d_COUNTER_INT_STATUS_ENABLE_BIT_LSB = + AR6320_COUNTER_INT_STATUS_ENABLE_BIT_LSB, + .d_COUNTER_INT_STATUS_ENABLE_BIT_MASK = + AR6320_COUNTER_INT_STATUS_ENABLE_BIT_MASK, + .d_INT_STATUS_ENABLE_ADDRESS = AR6320_INT_STATUS_ENABLE_ADDRESS, + .d_CPU_INT_STATUS_ENABLE_BIT_LSB = + AR6320_CPU_INT_STATUS_ENABLE_BIT_LSB, + .d_CPU_INT_STATUS_ENABLE_BIT_MASK = + AR6320_CPU_INT_STATUS_ENABLE_BIT_MASK, + .d_HOST_INT_STATUS_ADDRESS = AR6320_HOST_INT_STATUS_ADDRESS, + .d_CPU_INT_STATUS_ADDRESS = AR6320_CPU_INT_STATUS_ADDRESS, + .d_ERROR_INT_STATUS_ADDRESS = AR6320_ERROR_INT_STATUS_ADDRESS, + .d_ERROR_INT_STATUS_WAKEUP_MASK = AR6320_ERROR_INT_STATUS_WAKEUP_MASK, + .d_ERROR_INT_STATUS_WAKEUP_LSB = AR6320_ERROR_INT_STATUS_WAKEUP_LSB, + .d_ERROR_INT_STATUS_RX_UNDERFLOW_MASK = + AR6320_ERROR_INT_STATUS_RX_UNDERFLOW_MASK, + .d_ERROR_INT_STATUS_RX_UNDERFLOW_LSB = + AR6320_ERROR_INT_STATUS_RX_UNDERFLOW_LSB, + .d_ERROR_INT_STATUS_TX_OVERFLOW_MASK = + AR6320_ERROR_INT_STATUS_TX_OVERFLOW_MASK, + .d_ERROR_INT_STATUS_TX_OVERFLOW_LSB = + AR6320_ERROR_INT_STATUS_TX_OVERFLOW_LSB, + .d_COUNT_DEC_ADDRESS = AR6320_COUNT_DEC_ADDRESS, + .d_HOST_INT_STATUS_CPU_MASK = AR6320_HOST_INT_STATUS_CPU_MASK, + .d_HOST_INT_STATUS_CPU_LSB = AR6320_HOST_INT_STATUS_CPU_LSB, + .d_HOST_INT_STATUS_ERROR_MASK = AR6320_HOST_INT_STATUS_ERROR_MASK, + .d_HOST_INT_STATUS_ERROR_LSB = AR6320_HOST_INT_STATUS_ERROR_LSB, + .d_HOST_INT_STATUS_COUNTER_MASK = AR6320_HOST_INT_STATUS_COUNTER_MASK, + .d_HOST_INT_STATUS_COUNTER_LSB = AR6320_HOST_INT_STATUS_COUNTER_LSB, + .d_RX_LOOKAHEAD_VALID_ADDRESS = AR6320_RX_LOOKAHEAD_VALID_ADDRESS, + .d_WINDOW_DATA_ADDRESS = AR6320_WINDOW_DATA_ADDRESS, + .d_WINDOW_READ_ADDR_ADDRESS = AR6320_WINDOW_READ_ADDR_ADDRESS, + .d_WINDOW_WRITE_ADDR_ADDRESS = AR6320_WINDOW_WRITE_ADDR_ADDRESS, + .d_SOC_GLOBAL_RESET_ADDRESS = AR6320_SOC_GLOBAL_RESET_ADDRESS, + .d_RTC_STATE_ADDRESS = AR6320_RTC_STATE_ADDRESS, + .d_RTC_STATE_COLD_RESET_MASK = AR6320_RTC_STATE_COLD_RESET_MASK, +#if defined(HIF_PCI) || defined(HIF_SNOC) || defined(HIF_AHB) + .d_PCIE_LOCAL_BASE_ADDRESS = AR6320_PCIE_LOCAL_BASE_ADDRESS, + .d_PCIE_SOC_WAKE_RESET = AR6320_PCIE_SOC_WAKE_RESET, + .d_PCIE_SOC_WAKE_ADDRESS = AR6320_PCIE_SOC_WAKE_ADDRESS, + .d_PCIE_SOC_WAKE_V_MASK = AR6320_PCIE_SOC_WAKE_V_MASK, + .d_MUX_ID_MASK = AR6320_MUX_ID_MASK, + .d_TRANSACTION_ID_MASK = AR6320_TRANSACTION_ID_MASK, + .d_FW_IND_HELPER = AR6320_FW_IND_HELPER, + .d_PCIE_SOC_RDY_STATUS_ADDRESS = PCIE_SOC_RDY_STATUS_ADDRESS, + .d_PCIE_SOC_RDY_STATUS_BAR_MASK = PCIE_SOC_RDY_STATUS_BAR_MASK, + .d_SOC_PCIE_BASE_ADDRESS = SOC_PCIE_BASE_ADDRESS, + .d_MSI_MAGIC_ADR_ADDRESS = MSI_MAGIC_ADR_ADDRESS, + .d_MSI_MAGIC_ADDRESS = MSI_MAGIC_ADDRESS, + .d_HOST_CE_COUNT = 8, + .d_ENABLE_MSI = 0, +#endif + .d_RTC_STATE_V_MASK = AR6320_RTC_STATE_V_MASK, + .d_RTC_STATE_V_LSB = AR6320_RTC_STATE_V_LSB, + .d_FW_IND_EVENT_PENDING = AR6320_FW_IND_EVENT_PENDING, + .d_FW_IND_INITIALIZED = AR6320_FW_IND_INITIALIZED, + .d_RTC_STATE_V_ON = AR6320_RTC_STATE_V_ON, +#if defined(SDIO_3_0) + .d_HOST_INT_STATUS_MBOX_DATA_MASK = + AR6320_HOST_INT_STATUS_MBOX_DATA_MASK, + .d_HOST_INT_STATUS_MBOX_DATA_LSB = + AR6320_HOST_INT_STATUS_MBOX_DATA_LSB, +#endif +}; + +#if defined(HIF_PCI) || defined(HIF_SNOC) || defined(HIF_AHB) +struct ce_reg_def ar6320_ce_targetdef = { + /* copy_engine.c */ + .d_DST_WR_INDEX_ADDRESS = AR6320_DST_WR_INDEX_ADDRESS, + .d_SRC_WATERMARK_ADDRESS = AR6320_SRC_WATERMARK_ADDRESS, + .d_SRC_WATERMARK_LOW_MASK = AR6320_SRC_WATERMARK_LOW_MASK, + .d_SRC_WATERMARK_HIGH_MASK = AR6320_SRC_WATERMARK_HIGH_MASK, + .d_DST_WATERMARK_LOW_MASK = AR6320_DST_WATERMARK_LOW_MASK, + .d_DST_WATERMARK_HIGH_MASK = AR6320_DST_WATERMARK_HIGH_MASK, + .d_CURRENT_SRRI_ADDRESS = AR6320_CURRENT_SRRI_ADDRESS, + .d_CURRENT_DRRI_ADDRESS = AR6320_CURRENT_DRRI_ADDRESS, + .d_HOST_IS_SRC_RING_HIGH_WATERMARK_MASK = + AR6320_HOST_IS_SRC_RING_HIGH_WATERMARK_MASK, + .d_HOST_IS_SRC_RING_LOW_WATERMARK_MASK = + AR6320_HOST_IS_SRC_RING_LOW_WATERMARK_MASK, + .d_HOST_IS_DST_RING_HIGH_WATERMARK_MASK = + AR6320_HOST_IS_DST_RING_HIGH_WATERMARK_MASK, + .d_HOST_IS_DST_RING_LOW_WATERMARK_MASK = + AR6320_HOST_IS_DST_RING_LOW_WATERMARK_MASK, + .d_HOST_IS_ADDRESS = AR6320_HOST_IS_ADDRESS, + .d_HOST_IS_COPY_COMPLETE_MASK = AR6320_HOST_IS_COPY_COMPLETE_MASK, + .d_CE_WRAPPER_BASE_ADDRESS = AR6320_CE_WRAPPER_BASE_ADDRESS, + .d_CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS = + AR6320_CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS, + .d_HOST_IE_ADDRESS = AR6320_HOST_IE_ADDRESS, + .d_HOST_IE_COPY_COMPLETE_MASK = AR6320_HOST_IE_COPY_COMPLETE_MASK, + .d_SR_BA_ADDRESS = AR6320_SR_BA_ADDRESS, + .d_SR_SIZE_ADDRESS = AR6320_SR_SIZE_ADDRESS, + .d_CE_CTRL1_ADDRESS = AR6320_CE_CTRL1_ADDRESS, + .d_CE_CTRL1_DMAX_LENGTH_MASK = AR6320_CE_CTRL1_DMAX_LENGTH_MASK, + .d_DR_BA_ADDRESS = AR6320_DR_BA_ADDRESS, + .d_DR_SIZE_ADDRESS = AR6320_DR_SIZE_ADDRESS, + .d_MISC_IE_ADDRESS = AR6320_MISC_IE_ADDRESS, + .d_MISC_IS_AXI_ERR_MASK = AR6320_MISC_IS_AXI_ERR_MASK, + .d_MISC_IS_DST_ADDR_ERR_MASK = AR6320_MISC_IS_DST_ADDR_ERR_MASK, + .d_MISC_IS_SRC_LEN_ERR_MASK = AR6320_MISC_IS_SRC_LEN_ERR_MASK, + .d_MISC_IS_DST_MAX_LEN_VIO_MASK = AR6320_MISC_IS_DST_MAX_LEN_VIO_MASK, + .d_MISC_IS_DST_RING_OVERFLOW_MASK = + AR6320_MISC_IS_DST_RING_OVERFLOW_MASK, + .d_MISC_IS_SRC_RING_OVERFLOW_MASK = + AR6320_MISC_IS_SRC_RING_OVERFLOW_MASK, + .d_SRC_WATERMARK_LOW_LSB = AR6320_SRC_WATERMARK_LOW_LSB, + .d_SRC_WATERMARK_HIGH_LSB = AR6320_SRC_WATERMARK_HIGH_LSB, + .d_DST_WATERMARK_LOW_LSB = AR6320_DST_WATERMARK_LOW_LSB, + .d_DST_WATERMARK_HIGH_LSB = AR6320_DST_WATERMARK_HIGH_LSB, + .d_CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK = + AR6320_CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK, + .d_CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB = + AR6320_CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB, + .d_CE_CTRL1_DMAX_LENGTH_LSB = AR6320_CE_CTRL1_DMAX_LENGTH_LSB, + .d_CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK = + AR6320_CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK, + .d_CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK = + AR6320_CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK, + .d_CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB = + AR6320_CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB, + .d_CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB = + AR6320_CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB, + .d_CE_WRAPPER_DEBUG_OFFSET = AR6320_CE_WRAPPER_DEBUG_OFFSET, + .d_CE_WRAPPER_DEBUG_SEL_MSB = AR6320_CE_WRAPPER_DEBUG_SEL_MSB, + .d_CE_WRAPPER_DEBUG_SEL_LSB = AR6320_CE_WRAPPER_DEBUG_SEL_LSB, + .d_CE_WRAPPER_DEBUG_SEL_MASK = AR6320_CE_WRAPPER_DEBUG_SEL_MASK, + .d_CE_DEBUG_OFFSET = AR6320_CE_DEBUG_OFFSET, + .d_CE_DEBUG_SEL_MSB = AR6320_CE_DEBUG_SEL_MSB, + .d_CE_DEBUG_SEL_LSB = AR6320_CE_DEBUG_SEL_LSB, + .d_CE_DEBUG_SEL_MASK = AR6320_CE_DEBUG_SEL_MASK, + .d_CE0_BASE_ADDRESS = AR6320_CE0_BASE_ADDRESS, + .d_CE1_BASE_ADDRESS = AR6320_CE1_BASE_ADDRESS, + +}; +#endif +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/ar6320v2def.h b/drivers/staging/qca-wifi-host-cmn/hif/src/ar6320v2def.h new file mode 100644 index 0000000000000000000000000000000000000000..5ad0b98fd4ab9a473d260fca1fe0bd08b76f0aa7 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/ar6320v2def.h @@ -0,0 +1,821 @@ +/* + * Copyright (c) 2013-2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _AR6320V2DEF_H_ +#define _AR6320V2DEF_H_ + +/* Base Addresses */ +#define AR6320V2_RTC_SOC_BASE_ADDRESS 0x00000800 +#define AR6320V2_RTC_WMAC_BASE_ADDRESS 0x00001000 +#define AR6320V2_MAC_COEX_BASE_ADDRESS 0x0000f000 +#define AR6320V2_BT_COEX_BASE_ADDRESS 0x00002000 +#define AR6320V2_SOC_PCIE_BASE_ADDRESS 0x00038000 +#define AR6320V2_SOC_CORE_BASE_ADDRESS 0x0003a000 +#define AR6320V2_WLAN_UART_BASE_ADDRESS 0x0000c000 +#define AR6320V2_WLAN_SI_BASE_ADDRESS 0x00010000 +#define AR6320V2_WLAN_GPIO_BASE_ADDRESS 0x00005000 +#define AR6320V2_WLAN_ANALOG_INTF_BASE_ADDRESS 0x00006000 +#define AR6320V2_WLAN_MAC_BASE_ADDRESS 0x00010000 +#define AR6320V2_EFUSE_BASE_ADDRESS 0x00024000 +#define AR6320V2_FPGA_REG_BASE_ADDRESS 0x00039000 +#define AR6320V2_WLAN_UART2_BASE_ADDRESS 0x00054c00 +#define AR6320V2_DBI_BASE_ADDRESS 0x0003c000 + +#define AR6320V2_SCRATCH_3_ADDRESS 0x0028 +#define AR6320V2_TARG_DRAM_START 0x00400000 +#define AR6320V2_SOC_SYSTEM_SLEEP_OFFSET 0x000000c0 +#define AR6320V2_SOC_RESET_CONTROL_OFFSET 0x00000000 +#define AR6320V2_SOC_CLOCK_CONTROL_OFFSET 0x00000028 +#define AR6320V2_SOC_CLOCK_CONTROL_SI0_CLK_MASK 0x00000001 +#define AR6320V2_SOC_RESET_CONTROL_SI0_RST_MASK 0x00000000 +#define AR6320V2_WLAN_GPIO_PIN0_ADDRESS 0x00000068 +#define AR6320V2_WLAN_GPIO_PIN1_ADDRESS 0x0000006c +#define AR6320V2_WLAN_GPIO_PIN0_CONFIG_MASK 0x00007800 +#define AR6320V2_WLAN_GPIO_PIN1_CONFIG_MASK 0x00007800 +#define AR6320V2_SOC_CPU_CLOCK_OFFSET 0x00000020 +#define AR6320V2_SOC_LPO_CAL_OFFSET 0x000000e0 +#define AR6320V2_WLAN_GPIO_PIN10_ADDRESS 0x00000090 +#define AR6320V2_WLAN_GPIO_PIN11_ADDRESS 0x00000094 +#define AR6320V2_WLAN_GPIO_PIN12_ADDRESS 0x00000098 +#define AR6320V2_WLAN_GPIO_PIN13_ADDRESS 0x0000009c +#define AR6320V2_SOC_CPU_CLOCK_STANDARD_LSB 0 +#define AR6320V2_SOC_CPU_CLOCK_STANDARD_MASK 0x00000003 +#define AR6320V2_SOC_LPO_CAL_ENABLE_LSB 20 +#define AR6320V2_SOC_LPO_CAL_ENABLE_MASK 0x00100000 + +#define AR6320V2_WLAN_SYSTEM_SLEEP_DISABLE_LSB 0 +#define AR6320V2_WLAN_SYSTEM_SLEEP_DISABLE_MASK 0x00000001 +#define AR6320V2_WLAN_RESET_CONTROL_COLD_RST_MASK 0x00000008 +#define AR6320V2_WLAN_RESET_CONTROL_WARM_RST_MASK 0x00000004 +#define AR6320V2_SI_CONFIG_BIDIR_OD_DATA_LSB 18 +#define AR6320V2_SI_CONFIG_BIDIR_OD_DATA_MASK 0x00040000 +#define AR6320V2_SI_CONFIG_I2C_LSB 16 +#define AR6320V2_SI_CONFIG_I2C_MASK 0x00010000 +#define AR6320V2_SI_CONFIG_POS_SAMPLE_LSB 7 +#define AR6320V2_SI_CONFIG_POS_SAMPLE_MASK 0x00000080 +#define AR6320V2_SI_CONFIG_INACTIVE_CLK_LSB 4 +#define AR6320V2_SI_CONFIG_INACTIVE_CLK_MASK 0x00000010 +#define AR6320V2_SI_CONFIG_INACTIVE_DATA_LSB 5 +#define AR6320V2_SI_CONFIG_INACTIVE_DATA_MASK 0x00000020 +#define AR6320V2_SI_CONFIG_DIVIDER_LSB 0 +#define AR6320V2_SI_CONFIG_DIVIDER_MASK 0x0000000f +#define AR6320V2_SI_CONFIG_OFFSET 0x00000000 +#define AR6320V2_SI_TX_DATA0_OFFSET 0x00000008 +#define AR6320V2_SI_TX_DATA1_OFFSET 0x0000000c +#define AR6320V2_SI_RX_DATA0_OFFSET 0x00000010 +#define AR6320V2_SI_RX_DATA1_OFFSET 0x00000014 +#define AR6320V2_SI_CS_OFFSET 0x00000004 +#define AR6320V2_SI_CS_DONE_ERR_MASK 0x00000400 +#define AR6320V2_SI_CS_DONE_INT_MASK 0x00000200 +#define AR6320V2_SI_CS_START_LSB 8 +#define AR6320V2_SI_CS_START_MASK 0x00000100 +#define AR6320V2_SI_CS_RX_CNT_LSB 4 +#define AR6320V2_SI_CS_RX_CNT_MASK 0x000000f0 +#define AR6320V2_SI_CS_TX_CNT_LSB 0 +#define AR6320V2_SI_CS_TX_CNT_MASK 0x0000000f +#define AR6320V2_CE_COUNT 8 +#define AR6320V2_SR_WR_INDEX_ADDRESS 0x003c +#define AR6320V2_DST_WATERMARK_ADDRESS 0x0050 +#define AR6320V2_RX_MSDU_END_4_FIRST_MSDU_LSB 14 +#define AR6320V2_RX_MSDU_END_4_FIRST_MSDU_MASK 0x00004000 +#define AR6320V2_RX_MPDU_START_0_RETRY_LSB 14 +#define AR6320V2_RX_MPDU_START_0_RETRY_MASK 0x00004000 +#define AR6320V2_RX_MPDU_START_0_SEQ_NUM_LSB 16 +#define AR6320V2_RX_MPDU_START_0_SEQ_NUM_MASK 0x0fff0000 +#define AR6320V2_RX_MPDU_START_2_PN_47_32_LSB 0 +#define AR6320V2_RX_MPDU_START_2_PN_47_32_MASK 0x0000ffff +#define AR6320V2_RX_MPDU_START_2_TID_LSB 28 +#define AR6320V2_RX_MPDU_START_2_TID_MASK 0xf0000000 +#define AR6320V2_RX_MSDU_END_1_EXT_WAPI_PN_63_48_LSB 16 +#define AR6320V2_RX_MSDU_END_1_EXT_WAPI_PN_63_48_MASK 0xffff0000 +#define AR6320V2_RX_MSDU_END_4_LAST_MSDU_LSB 15 +#define AR6320V2_RX_MSDU_END_4_LAST_MSDU_MASK 0x00008000 +#define AR6320V2_RX_ATTENTION_0_MCAST_BCAST_LSB 2 +#define AR6320V2_RX_ATTENTION_0_MCAST_BCAST_MASK 0x00000004 +#define AR6320V2_RX_ATTENTION_0_FRAGMENT_LSB 13 +#define AR6320V2_RX_ATTENTION_0_FRAGMENT_MASK 0x00002000 +#define AR6320V2_RX_ATTENTION_0_MPDU_LENGTH_ERR_MASK 0x08000000 +#define AR6320V2_RX_FRAG_INFO_0_RING2_MORE_COUNT_LSB 16 +#define AR6320V2_RX_FRAG_INFO_0_RING2_MORE_COUNT_MASK 0x00ff0000 +#define AR6320V2_RX_MSDU_START_0_MSDU_LENGTH_LSB 0 +#define AR6320V2_RX_MSDU_START_0_MSDU_LENGTH_MASK 0x00003fff + +#define AR6320V2_RX_MSDU_START_2_DECAP_FORMAT_OFFSET 0x00000008 +#define AR6320V2_RX_MSDU_START_2_DECAP_FORMAT_LSB 8 +#define AR6320V2_RX_MSDU_START_2_DECAP_FORMAT_MASK 0x00000300 +#define AR6320V2_RX_MPDU_START_0_ENCRYPTED_LSB 13 +#define AR6320V2_RX_MPDU_START_0_ENCRYPTED_MASK 0x00002000 +#define AR6320V2_RX_ATTENTION_0_MORE_DATA_MASK 0x00000400 +#define AR6320V2_RX_ATTENTION_0_MSDU_DONE_MASK 0x80000000 +#define AR6320V2_RX_ATTENTION_0_TCP_UDP_CHKSUM_FAIL_MASK 0x00040000 +#define AR6320V2_DST_WR_INDEX_ADDRESS 0x0040 +#define AR6320V2_SRC_WATERMARK_ADDRESS 0x004c +#define AR6320V2_SRC_WATERMARK_LOW_MASK 0xffff0000 +#define AR6320V2_SRC_WATERMARK_HIGH_MASK 0x0000ffff +#define AR6320V2_DST_WATERMARK_LOW_MASK 0xffff0000 +#define AR6320V2_DST_WATERMARK_HIGH_MASK 0x0000ffff +#define AR6320V2_CURRENT_SRRI_ADDRESS 0x0044 +#define AR6320V2_CURRENT_DRRI_ADDRESS 0x0048 +#define AR6320V2_HOST_IS_SRC_RING_HIGH_WATERMARK_MASK 0x00000002 +#define AR6320V2_HOST_IS_SRC_RING_LOW_WATERMARK_MASK 0x00000004 +#define AR6320V2_HOST_IS_DST_RING_HIGH_WATERMARK_MASK 0x00000008 +#define AR6320V2_HOST_IS_DST_RING_LOW_WATERMARK_MASK 0x00000010 +#define AR6320V2_HOST_IS_ADDRESS 0x0030 +#define AR6320V2_HOST_IS_COPY_COMPLETE_MASK 0x00000001 +#define AR6320V2_HOST_IE_ADDRESS 0x002c +#define AR6320V2_HOST_IE_COPY_COMPLETE_MASK 0x00000001 +#define AR6320V2_SR_BA_ADDRESS 0x0000 +#define AR6320V2_SR_SIZE_ADDRESS 0x0004 +#define AR6320V2_DR_BA_ADDRESS 0x0008 +#define AR6320V2_DR_SIZE_ADDRESS 0x000c +#define AR6320V2_MISC_IE_ADDRESS 0x0034 +#define AR6320V2_MISC_IS_AXI_ERR_MASK 0x00000400 +#define AR6320V2_MISC_IS_DST_ADDR_ERR_MASK 0x00000200 +#define AR6320V2_MISC_IS_SRC_LEN_ERR_MASK 0x00000100 +#define AR6320V2_MISC_IS_DST_MAX_LEN_VIO_MASK 0x00000080 +#define AR6320V2_MISC_IS_DST_RING_OVERFLOW_MASK 0x00000040 +#define AR6320V2_MISC_IS_SRC_RING_OVERFLOW_MASK 0x00000020 +#define AR6320V2_SRC_WATERMARK_LOW_LSB 16 +#define AR6320V2_SRC_WATERMARK_HIGH_LSB 0 +#define AR6320V2_DST_WATERMARK_LOW_LSB 16 +#define AR6320V2_DST_WATERMARK_HIGH_LSB 0 +#define AR6320V2_SOC_GLOBAL_RESET_ADDRESS 0x0008 +#define AR6320V2_RTC_STATE_ADDRESS 0x0000 +#define AR6320V2_RTC_STATE_COLD_RESET_MASK 0x00002000 +#define AR6320V2_RTC_STATE_V_MASK 0x00000007 +#define AR6320V2_RTC_STATE_V_LSB 0 +#define AR6320V2_RTC_STATE_V_ON 3 +#define AR6320V2_FW_IND_EVENT_PENDING 1 +#define AR6320V2_FW_IND_INITIALIZED 2 +#define AR6320V2_CPU_INTR_ADDRESS 0x0010 +#define AR6320V2_SOC_LF_TIMER_CONTROL0_ADDRESS 0x00000050 +#define AR6320V2_SOC_LF_TIMER_CONTROL0_ENABLE_MASK 0x00000004 +#define AR6320V2_SOC_RESET_CONTROL_ADDRESS 0x00000000 +#define AR6320V2_SOC_RESET_CONTROL_CPU_WARM_RST_MASK 0x00000040 +#define AR6320V2_CORE_CTRL_ADDRESS 0x0000 +#define AR6320V2_CORE_CTRL_CPU_INTR_MASK 0x00002000 +#define AR6320V2_LOCAL_SCRATCH_OFFSET 0x000000c0 +#define AR6320V2_CLOCK_GPIO_OFFSET 0xffffffff +#define AR6320V2_CLOCK_GPIO_BT_CLK_OUT_EN_LSB 0 +#define AR6320V2_CLOCK_GPIO_BT_CLK_OUT_EN_MASK 0 +#define AR6320V2_SOC_CHIP_ID_ADDRESS 0x000000f0 +#define AR6320V2_SOC_CHIP_ID_VERSION_MASK 0xfffc0000 +#define AR6320V2_SOC_CHIP_ID_VERSION_LSB 18 +#define AR6320V2_SOC_CHIP_ID_REVISION_MASK 0x00000f00 +#define AR6320V2_SOC_CHIP_ID_REVISION_LSB 8 +#if defined(HIF_SDIO) +#define AR6320V2_FW_IND_HELPER 4 +#endif +#if defined(HIF_PCI) || defined(HIF_SNOC) || defined(HIF_AHB) +#define AR6320V2_CE_WRAPPER_BASE_ADDRESS 0x00034000 +#define AR6320V2_CE0_BASE_ADDRESS 0x00034400 +#define AR6320V2_CE1_BASE_ADDRESS 0x00034800 +#define AR6320V2_CE2_BASE_ADDRESS 0x00034c00 +#define AR6320V2_CE3_BASE_ADDRESS 0x00035000 +#define AR6320V2_CE4_BASE_ADDRESS 0x00035400 +#define AR6320V2_CE5_BASE_ADDRESS 0x00035800 +#define AR6320V2_CE6_BASE_ADDRESS 0x00035c00 +#define AR6320V2_CE7_BASE_ADDRESS 0x00036000 +#define AR6320V2_WLAN_ANALOG_INTF_PCIE_BASE_ADDRESS 0x00007800 +#define AR6320V2_CE_CTRL1_ADDRESS 0x0010 +#define AR6320V2_CE_CTRL1_DMAX_LENGTH_MASK 0x0000ffff +#define AR6320V2_CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS 0x0000 +#define AR6320V2_CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK 0x0000ff00 +#define AR6320V2_CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB 8 +#define AR6320V2_CE_CTRL1_DMAX_LENGTH_LSB 0 +#define AR6320V2_CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK 0x00010000 +#define AR6320V2_CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK 0x00020000 +#define AR6320V2_CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB 16 +#define AR6320V2_CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB 17 +#define AR6320V2_SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_MASK 0x00000020 +#define AR6320V2_SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_LSB 5 +#define AR6320V2_PCIE_SOC_WAKE_RESET 0x00000000 +#define AR6320V2_PCIE_SOC_WAKE_ADDRESS 0x0004 +#define AR6320V2_PCIE_SOC_WAKE_V_MASK 0x00000001 +#define AR6320V2_MUX_ID_MASK 0x0000 +#define AR6320V2_TRANSACTION_ID_MASK 0x3fff +#define AR6320V2_PCIE_LOCAL_BASE_ADDRESS 0x80000 +#define AR6320V2_FW_IND_HELPER 4 +#define AR6320V2_PCIE_INTR_ENABLE_ADDRESS 0x0008 +#define AR6320V2_PCIE_INTR_CLR_ADDRESS 0x0014 +#define AR6320V2_PCIE_INTR_FIRMWARE_MASK 0x00000400 +#define AR6320V2_PCIE_INTR_CE0_MASK 0x00000800 +#define AR6320V2_PCIE_INTR_CE_MASK_ALL 0x0007f800 +#define AR6320V2_PCIE_INTR_CAUSE_ADDRESS 0x000c +#define AR6320V2_SOC_RESET_CONTROL_CE_RST_MASK 0x00000001 +#define AR6320V2_SOC_POWER_REG_OFFSET 0x0000010c +/* Copy Engine Debug */ +#define AR6320V2_WLAN_DEBUG_INPUT_SEL_OFFSET 0x0000010c +#define AR6320V2_WLAN_DEBUG_INPUT_SEL_SRC_MSB 3 +#define AR6320V2_WLAN_DEBUG_INPUT_SEL_SRC_LSB 0 +#define AR6320V2_WLAN_DEBUG_INPUT_SEL_SRC_MASK 0x0000000f +#define AR6320V2_WLAN_DEBUG_CONTROL_OFFSET 0x00000108 +#define AR6320V2_WLAN_DEBUG_CONTROL_ENABLE_MSB 0 +#define AR6320V2_WLAN_DEBUG_CONTROL_ENABLE_LSB 0 +#define AR6320V2_WLAN_DEBUG_CONTROL_ENABLE_MASK 0x00000001 +#define AR6320V2_WLAN_DEBUG_OUT_OFFSET 0x00000110 +#define AR6320V2_WLAN_DEBUG_OUT_DATA_MSB 19 +#define AR6320V2_WLAN_DEBUG_OUT_DATA_LSB 0 +#define AR6320V2_WLAN_DEBUG_OUT_DATA_MASK 0x000fffff +#define AR6320V2_AMBA_DEBUG_BUS_OFFSET 0x0000011c +#define AR6320V2_AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MSB 13 +#define AR6320V2_AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_LSB 8 +#define AR6320V2_AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MASK 0x00003f00 +#define AR6320V2_AMBA_DEBUG_BUS_SEL_MSB 4 +#define AR6320V2_AMBA_DEBUG_BUS_SEL_LSB 0 +#define AR6320V2_AMBA_DEBUG_BUS_SEL_MASK 0x0000001f +#define AR6320V2_CE_WRAPPER_DEBUG_OFFSET 0x0008 +#define AR6320V2_CE_WRAPPER_DEBUG_SEL_MSB 5 +#define AR6320V2_CE_WRAPPER_DEBUG_SEL_LSB 0 +#define AR6320V2_CE_WRAPPER_DEBUG_SEL_MASK 0x0000003f +#define AR6320V2_CE_DEBUG_OFFSET 0x0054 +#define AR6320V2_CE_DEBUG_SEL_MSB 5 +#define AR6320V2_CE_DEBUG_SEL_LSB 0 +#define AR6320V2_CE_DEBUG_SEL_MASK 0x0000003f +/* End */ + +/* PLL start */ +#define AR6320V2_EFUSE_OFFSET 0x0000032c +#define AR6320V2_EFUSE_XTAL_SEL_MSB 10 +#define AR6320V2_EFUSE_XTAL_SEL_LSB 8 +#define AR6320V2_EFUSE_XTAL_SEL_MASK 0x00000700 +#define AR6320V2_BB_PLL_CONFIG_OFFSET 0x000002f4 +#define AR6320V2_BB_PLL_CONFIG_OUTDIV_MSB 20 +#define AR6320V2_BB_PLL_CONFIG_OUTDIV_LSB 18 +#define AR6320V2_BB_PLL_CONFIG_OUTDIV_MASK 0x001c0000 +#define AR6320V2_BB_PLL_CONFIG_FRAC_MSB 17 +#define AR6320V2_BB_PLL_CONFIG_FRAC_LSB 0 +#define AR6320V2_BB_PLL_CONFIG_FRAC_MASK 0x0003ffff +#define AR6320V2_WLAN_PLL_SETTLE_TIME_MSB 10 +#define AR6320V2_WLAN_PLL_SETTLE_TIME_LSB 0 +#define AR6320V2_WLAN_PLL_SETTLE_TIME_MASK 0x000007ff +#define AR6320V2_WLAN_PLL_SETTLE_OFFSET 0x0018 +#define AR6320V2_WLAN_PLL_SETTLE_SW_MASK 0x000007ff +#define AR6320V2_WLAN_PLL_SETTLE_RSTMASK 0xffffffff +#define AR6320V2_WLAN_PLL_SETTLE_RESET 0x00000400 +#define AR6320V2_WLAN_PLL_CONTROL_NOPWD_MSB 18 +#define AR6320V2_WLAN_PLL_CONTROL_NOPWD_LSB 18 +#define AR6320V2_WLAN_PLL_CONTROL_NOPWD_MASK 0x00040000 +#define AR6320V2_WLAN_PLL_CONTROL_BYPASS_MSB 16 +#define AR6320V2_WLAN_PLL_CONTROL_BYPASS_LSB 16 +#define AR6320V2_WLAN_PLL_CONTROL_BYPASS_MASK 0x00010000 +#define AR6320V2_WLAN_PLL_CONTROL_BYPASS_RESET 0x1 +#define AR6320V2_WLAN_PLL_CONTROL_CLK_SEL_MSB 15 +#define AR6320V2_WLAN_PLL_CONTROL_CLK_SEL_LSB 14 +#define AR6320V2_WLAN_PLL_CONTROL_CLK_SEL_MASK 0x0000c000 +#define AR6320V2_WLAN_PLL_CONTROL_CLK_SEL_RESET 0x0 +#define AR6320V2_WLAN_PLL_CONTROL_REFDIV_MSB 13 +#define AR6320V2_WLAN_PLL_CONTROL_REFDIV_LSB 10 +#define AR6320V2_WLAN_PLL_CONTROL_REFDIV_MASK 0x00003c00 +#define AR6320V2_WLAN_PLL_CONTROL_REFDIV_RESET 0x0 +#define AR6320V2_WLAN_PLL_CONTROL_DIV_MSB 9 +#define AR6320V2_WLAN_PLL_CONTROL_DIV_LSB 0 +#define AR6320V2_WLAN_PLL_CONTROL_DIV_MASK 0x000003ff +#define AR6320V2_WLAN_PLL_CONTROL_DIV_RESET 0x11 +#define AR6320V2_WLAN_PLL_CONTROL_OFFSET 0x0014 +#define AR6320V2_WLAN_PLL_CONTROL_SW_MASK 0x001fffff +#define AR6320V2_WLAN_PLL_CONTROL_RSTMASK 0xffffffff +#define AR6320V2_WLAN_PLL_CONTROL_RESET 0x00010011 +#define AR6320V2_SOC_CORE_CLK_CTRL_OFFSET 0x00000114 +#define AR6320V2_SOC_CORE_CLK_CTRL_DIV_MSB 2 +#define AR6320V2_SOC_CORE_CLK_CTRL_DIV_LSB 0 +#define AR6320V2_SOC_CORE_CLK_CTRL_DIV_MASK 0x00000007 +#define AR6320V2_RTC_SYNC_STATUS_PLL_CHANGING_MSB 5 +#define AR6320V2_RTC_SYNC_STATUS_PLL_CHANGING_LSB 5 +#define AR6320V2_RTC_SYNC_STATUS_PLL_CHANGING_MASK 0x00000020 +#define AR6320V2_RTC_SYNC_STATUS_PLL_CHANGING_RESET 0x0 +#define AR6320V2_RTC_SYNC_STATUS_OFFSET 0x0244 +#define AR6320V2_SOC_CPU_CLOCK_OFFSET 0x00000020 +#define AR6320V2_SOC_CPU_CLOCK_STANDARD_MSB 1 +#define AR6320V2_SOC_CPU_CLOCK_STANDARD_LSB 0 +#define AR6320V2_SOC_CPU_CLOCK_STANDARD_MASK 0x00000003 +/* PLL end */ + +#define AR6320V2_PCIE_INTR_CE_MASK(n) \ + (AR6320V2_PCIE_INTR_CE0_MASK << (n)) +#endif +#define AR6320V2_DRAM_BASE_ADDRESS AR6320V2_TARG_DRAM_START +#define AR6320V2_FW_INDICATOR_ADDRESS \ + (AR6320V2_SOC_CORE_BASE_ADDRESS + AR6320V2_SCRATCH_3_ADDRESS) +#define AR6320V2_SYSTEM_SLEEP_OFFSET AR6320V2_SOC_SYSTEM_SLEEP_OFFSET +#define AR6320V2_WLAN_SYSTEM_SLEEP_OFFSET 0x002c +#define AR6320V2_WLAN_RESET_CONTROL_OFFSET AR6320V2_SOC_RESET_CONTROL_OFFSET +#define AR6320V2_CLOCK_CONTROL_OFFSET AR6320V2_SOC_CLOCK_CONTROL_OFFSET +#define AR6320V2_CLOCK_CONTROL_SI0_CLK_MASK \ + AR6320V2_SOC_CLOCK_CONTROL_SI0_CLK_MASK +#define AR6320V2_RESET_CONTROL_MBOX_RST_MASK 0x00000004 +#define AR6320V2_RESET_CONTROL_SI0_RST_MASK \ + AR6320V2_SOC_RESET_CONTROL_SI0_RST_MASK +#define AR6320V2_GPIO_BASE_ADDRESS AR6320V2_WLAN_GPIO_BASE_ADDRESS +#define AR6320V2_GPIO_PIN0_OFFSET AR6320V2_WLAN_GPIO_PIN0_ADDRESS +#define AR6320V2_GPIO_PIN1_OFFSET AR6320V2_WLAN_GPIO_PIN1_ADDRESS +#define AR6320V2_GPIO_PIN0_CONFIG_MASK AR6320V2_WLAN_GPIO_PIN0_CONFIG_MASK +#define AR6320V2_GPIO_PIN1_CONFIG_MASK AR6320V2_WLAN_GPIO_PIN1_CONFIG_MASK +#define AR6320V2_SI_BASE_ADDRESS 0x00050000 +#define AR6320V2_CPU_CLOCK_OFFSET AR6320V2_SOC_CPU_CLOCK_OFFSET +#define AR6320V2_LPO_CAL_OFFSET AR6320V2_SOC_LPO_CAL_OFFSET +#define AR6320V2_GPIO_PIN10_OFFSET AR6320V2_WLAN_GPIO_PIN10_ADDRESS +#define AR6320V2_GPIO_PIN11_OFFSET AR6320V2_WLAN_GPIO_PIN11_ADDRESS +#define AR6320V2_GPIO_PIN12_OFFSET AR6320V2_WLAN_GPIO_PIN12_ADDRESS +#define AR6320V2_GPIO_PIN13_OFFSET AR6320V2_WLAN_GPIO_PIN13_ADDRESS +#define AR6320V2_CPU_CLOCK_STANDARD_LSB AR6320V2_SOC_CPU_CLOCK_STANDARD_LSB +#define AR6320V2_CPU_CLOCK_STANDARD_MASK AR6320V2_SOC_CPU_CLOCK_STANDARD_MASK +#define AR6320V2_LPO_CAL_ENABLE_LSB AR6320V2_SOC_LPO_CAL_ENABLE_LSB +#define AR6320V2_LPO_CAL_ENABLE_MASK AR6320V2_SOC_LPO_CAL_ENABLE_MASK +#define AR6320V2_ANALOG_INTF_BASE_ADDRESS \ + AR6320V2_WLAN_ANALOG_INTF_BASE_ADDRESS +#define AR6320V2_MBOX_BASE_ADDRESS 0x00008000 +#define AR6320V2_INT_STATUS_ENABLE_ERROR_LSB 7 +#define AR6320V2_INT_STATUS_ENABLE_ERROR_MASK 0x00000080 +#define AR6320V2_INT_STATUS_ENABLE_CPU_LSB 6 +#define AR6320V2_INT_STATUS_ENABLE_CPU_MASK 0x00000040 +#define AR6320V2_INT_STATUS_ENABLE_COUNTER_LSB 4 +#define AR6320V2_INT_STATUS_ENABLE_COUNTER_MASK 0x00000010 +#define AR6320V2_INT_STATUS_ENABLE_MBOX_DATA_LSB 0 +#define AR6320V2_INT_STATUS_ENABLE_MBOX_DATA_MASK 0x0000000f +#define AR6320V2_ERROR_STATUS_ENABLE_RX_UNDERFLOW_LSB 17 +#define AR6320V2_ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK 0x00020000 +#define AR6320V2_ERROR_STATUS_ENABLE_TX_OVERFLOW_LSB 16 +#define AR6320V2_ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK 0x00010000 +#define AR6320V2_COUNTER_INT_STATUS_ENABLE_BIT_LSB 24 +#define AR6320V2_COUNTER_INT_STATUS_ENABLE_BIT_MASK 0xff000000 +#define AR6320V2_INT_STATUS_ENABLE_ADDRESS 0x0828 +#define AR6320V2_CPU_INT_STATUS_ENABLE_BIT_LSB 8 +#define AR6320V2_CPU_INT_STATUS_ENABLE_BIT_MASK 0x0000ff00 +#define AR6320V2_HOST_INT_STATUS_ADDRESS 0x0800 +#define AR6320V2_CPU_INT_STATUS_ADDRESS 0x0801 +#define AR6320V2_ERROR_INT_STATUS_ADDRESS 0x0802 +#define AR6320V2_ERROR_INT_STATUS_WAKEUP_MASK 0x00040000 +#define AR6320V2_ERROR_INT_STATUS_WAKEUP_LSB 18 +#define AR6320V2_ERROR_INT_STATUS_RX_UNDERFLOW_MASK 0x00020000 +#define AR6320V2_ERROR_INT_STATUS_RX_UNDERFLOW_LSB 17 +#define AR6320V2_ERROR_INT_STATUS_TX_OVERFLOW_MASK 0x00010000 +#define AR6320V2_ERROR_INT_STATUS_TX_OVERFLOW_LSB 16 +#define AR6320V2_COUNT_DEC_ADDRESS 0x0840 +#define AR6320V2_HOST_INT_STATUS_CPU_MASK 0x00000040 +#define AR6320V2_HOST_INT_STATUS_CPU_LSB 6 +#define AR6320V2_HOST_INT_STATUS_ERROR_MASK 0x00000080 +#define AR6320V2_HOST_INT_STATUS_ERROR_LSB 7 +#define AR6320V2_HOST_INT_STATUS_COUNTER_MASK 0x00000010 +#define AR6320V2_HOST_INT_STATUS_COUNTER_LSB 4 +#define AR6320V2_RX_LOOKAHEAD_VALID_ADDRESS 0x0805 +#define AR6320V2_WINDOW_DATA_ADDRESS 0x0874 +#define AR6320V2_WINDOW_READ_ADDR_ADDRESS 0x087c +#define AR6320V2_WINDOW_WRITE_ADDR_ADDRESS 0x0878 +#define AR6320V2_HOST_INT_STATUS_MBOX_DATA_MASK 0x0f +#define AR6320V2_HOST_INT_STATUS_MBOX_DATA_LSB 0 + +struct targetdef_s ar6320v2_targetdef = { + .d_RTC_SOC_BASE_ADDRESS = AR6320V2_RTC_SOC_BASE_ADDRESS, + .d_RTC_WMAC_BASE_ADDRESS = AR6320V2_RTC_WMAC_BASE_ADDRESS, + .d_SYSTEM_SLEEP_OFFSET = AR6320V2_WLAN_SYSTEM_SLEEP_OFFSET, + .d_WLAN_SYSTEM_SLEEP_OFFSET = AR6320V2_WLAN_SYSTEM_SLEEP_OFFSET, + .d_WLAN_SYSTEM_SLEEP_DISABLE_LSB = + AR6320V2_WLAN_SYSTEM_SLEEP_DISABLE_LSB, + .d_WLAN_SYSTEM_SLEEP_DISABLE_MASK = + AR6320V2_WLAN_SYSTEM_SLEEP_DISABLE_MASK, + .d_CLOCK_CONTROL_OFFSET = AR6320V2_CLOCK_CONTROL_OFFSET, + .d_CLOCK_CONTROL_SI0_CLK_MASK = AR6320V2_CLOCK_CONTROL_SI0_CLK_MASK, + .d_RESET_CONTROL_OFFSET = AR6320V2_SOC_RESET_CONTROL_OFFSET, + .d_RESET_CONTROL_MBOX_RST_MASK = AR6320V2_RESET_CONTROL_MBOX_RST_MASK, + .d_RESET_CONTROL_SI0_RST_MASK = AR6320V2_RESET_CONTROL_SI0_RST_MASK, + .d_WLAN_RESET_CONTROL_OFFSET = AR6320V2_WLAN_RESET_CONTROL_OFFSET, + .d_WLAN_RESET_CONTROL_COLD_RST_MASK = + AR6320V2_WLAN_RESET_CONTROL_COLD_RST_MASK, + .d_WLAN_RESET_CONTROL_WARM_RST_MASK = + AR6320V2_WLAN_RESET_CONTROL_WARM_RST_MASK, + .d_GPIO_BASE_ADDRESS = AR6320V2_GPIO_BASE_ADDRESS, + .d_GPIO_PIN0_OFFSET = AR6320V2_GPIO_PIN0_OFFSET, + .d_GPIO_PIN1_OFFSET = AR6320V2_GPIO_PIN1_OFFSET, + .d_GPIO_PIN0_CONFIG_MASK = AR6320V2_GPIO_PIN0_CONFIG_MASK, + .d_GPIO_PIN1_CONFIG_MASK = AR6320V2_GPIO_PIN1_CONFIG_MASK, + .d_SI_CONFIG_BIDIR_OD_DATA_LSB = AR6320V2_SI_CONFIG_BIDIR_OD_DATA_LSB, + .d_SI_CONFIG_BIDIR_OD_DATA_MASK = + AR6320V2_SI_CONFIG_BIDIR_OD_DATA_MASK, + .d_SI_CONFIG_I2C_LSB = AR6320V2_SI_CONFIG_I2C_LSB, + .d_SI_CONFIG_I2C_MASK = AR6320V2_SI_CONFIG_I2C_MASK, + .d_SI_CONFIG_POS_SAMPLE_LSB = AR6320V2_SI_CONFIG_POS_SAMPLE_LSB, + .d_SI_CONFIG_POS_SAMPLE_MASK = AR6320V2_SI_CONFIG_POS_SAMPLE_MASK, + .d_SI_CONFIG_INACTIVE_CLK_LSB = AR6320V2_SI_CONFIG_INACTIVE_CLK_LSB, + .d_SI_CONFIG_INACTIVE_CLK_MASK = AR6320V2_SI_CONFIG_INACTIVE_CLK_MASK, + .d_SI_CONFIG_INACTIVE_DATA_LSB = AR6320V2_SI_CONFIG_INACTIVE_DATA_LSB, + .d_SI_CONFIG_INACTIVE_DATA_MASK = + AR6320V2_SI_CONFIG_INACTIVE_DATA_MASK, + .d_SI_CONFIG_DIVIDER_LSB = AR6320V2_SI_CONFIG_DIVIDER_LSB, + .d_SI_CONFIG_DIVIDER_MASK = AR6320V2_SI_CONFIG_DIVIDER_MASK, + .d_SI_BASE_ADDRESS = AR6320V2_SI_BASE_ADDRESS, + .d_SI_CONFIG_OFFSET = AR6320V2_SI_CONFIG_OFFSET, + .d_SI_TX_DATA0_OFFSET = AR6320V2_SI_TX_DATA0_OFFSET, + .d_SI_TX_DATA1_OFFSET = AR6320V2_SI_TX_DATA1_OFFSET, + .d_SI_RX_DATA0_OFFSET = AR6320V2_SI_RX_DATA0_OFFSET, + .d_SI_RX_DATA1_OFFSET = AR6320V2_SI_RX_DATA1_OFFSET, + .d_SI_CS_OFFSET = AR6320V2_SI_CS_OFFSET, + .d_SI_CS_DONE_ERR_MASK = AR6320V2_SI_CS_DONE_ERR_MASK, + .d_SI_CS_DONE_INT_MASK = AR6320V2_SI_CS_DONE_INT_MASK, + .d_SI_CS_START_LSB = AR6320V2_SI_CS_START_LSB, + .d_SI_CS_START_MASK = AR6320V2_SI_CS_START_MASK, + .d_SI_CS_RX_CNT_LSB = AR6320V2_SI_CS_RX_CNT_LSB, + .d_SI_CS_RX_CNT_MASK = AR6320V2_SI_CS_RX_CNT_MASK, + .d_SI_CS_TX_CNT_LSB = AR6320V2_SI_CS_TX_CNT_LSB, + .d_SI_CS_TX_CNT_MASK = AR6320V2_SI_CS_TX_CNT_MASK, + .d_BOARD_DATA_SZ = AR6320_BOARD_DATA_SZ, + .d_BOARD_EXT_DATA_SZ = AR6320_BOARD_EXT_DATA_SZ, + .d_MBOX_BASE_ADDRESS = AR6320V2_MBOX_BASE_ADDRESS, + .d_LOCAL_SCRATCH_OFFSET = AR6320V2_LOCAL_SCRATCH_OFFSET, + .d_CPU_CLOCK_OFFSET = AR6320V2_CPU_CLOCK_OFFSET, + .d_LPO_CAL_OFFSET = AR6320V2_LPO_CAL_OFFSET, + .d_GPIO_PIN10_OFFSET = AR6320V2_GPIO_PIN10_OFFSET, + .d_GPIO_PIN11_OFFSET = AR6320V2_GPIO_PIN11_OFFSET, + .d_GPIO_PIN12_OFFSET = AR6320V2_GPIO_PIN12_OFFSET, + .d_GPIO_PIN13_OFFSET = AR6320V2_GPIO_PIN13_OFFSET, + .d_CLOCK_GPIO_OFFSET = AR6320V2_CLOCK_GPIO_OFFSET, + .d_CPU_CLOCK_STANDARD_LSB = AR6320V2_CPU_CLOCK_STANDARD_LSB, + .d_CPU_CLOCK_STANDARD_MASK = AR6320V2_CPU_CLOCK_STANDARD_MASK, + .d_LPO_CAL_ENABLE_LSB = AR6320V2_LPO_CAL_ENABLE_LSB, + .d_LPO_CAL_ENABLE_MASK = AR6320V2_LPO_CAL_ENABLE_MASK, + .d_CLOCK_GPIO_BT_CLK_OUT_EN_LSB = + AR6320V2_CLOCK_GPIO_BT_CLK_OUT_EN_LSB, + .d_CLOCK_GPIO_BT_CLK_OUT_EN_MASK = + AR6320V2_CLOCK_GPIO_BT_CLK_OUT_EN_MASK, + .d_ANALOG_INTF_BASE_ADDRESS = AR6320V2_ANALOG_INTF_BASE_ADDRESS, + .d_WLAN_MAC_BASE_ADDRESS = AR6320V2_WLAN_MAC_BASE_ADDRESS, + .d_FW_INDICATOR_ADDRESS = AR6320V2_FW_INDICATOR_ADDRESS, + .d_DRAM_BASE_ADDRESS = AR6320V2_DRAM_BASE_ADDRESS, + .d_SOC_CORE_BASE_ADDRESS = AR6320V2_SOC_CORE_BASE_ADDRESS, + .d_CORE_CTRL_ADDRESS = AR6320V2_CORE_CTRL_ADDRESS, +#if defined(HIF_PCI) || defined(HIF_SNOC) || defined(HIF_AHB) + .d_MSI_NUM_REQUEST = MSI_NUM_REQUEST, + .d_MSI_ASSIGN_FW = MSI_ASSIGN_FW, +#endif + .d_CORE_CTRL_CPU_INTR_MASK = AR6320V2_CORE_CTRL_CPU_INTR_MASK, + .d_SR_WR_INDEX_ADDRESS = AR6320V2_SR_WR_INDEX_ADDRESS, + .d_DST_WATERMARK_ADDRESS = AR6320V2_DST_WATERMARK_ADDRESS, + /* htt_rx.c */ + .d_RX_MSDU_END_4_FIRST_MSDU_MASK = + AR6320V2_RX_MSDU_END_4_FIRST_MSDU_MASK, + .d_RX_MSDU_END_4_FIRST_MSDU_LSB = + AR6320V2_RX_MSDU_END_4_FIRST_MSDU_LSB, + .d_RX_MPDU_START_0_RETRY_MASK = + AR6320V2_RX_MPDU_START_0_RETRY_MASK, + .d_RX_MPDU_START_0_SEQ_NUM_MASK = + AR6320V2_RX_MPDU_START_0_SEQ_NUM_MASK, + .d_RX_MPDU_START_0_SEQ_NUM_MASK = + AR6320V2_RX_MPDU_START_0_SEQ_NUM_MASK, + .d_RX_MPDU_START_0_SEQ_NUM_LSB = AR6320V2_RX_MPDU_START_0_SEQ_NUM_LSB, + .d_RX_MPDU_START_2_PN_47_32_LSB = + AR6320V2_RX_MPDU_START_2_PN_47_32_LSB, + .d_RX_MPDU_START_2_PN_47_32_MASK = + AR6320V2_RX_MPDU_START_2_PN_47_32_MASK, + .d_RX_MPDU_START_2_TID_LSB = + AR6320V2_RX_MPDU_START_2_TID_LSB, + .d_RX_MPDU_START_2_TID_MASK = + AR6320V2_RX_MPDU_START_2_TID_MASK, + .d_RX_MSDU_END_1_EXT_WAPI_PN_63_48_MASK = + AR6320V2_RX_MSDU_END_1_EXT_WAPI_PN_63_48_MASK, + .d_RX_MSDU_END_1_EXT_WAPI_PN_63_48_LSB = + AR6320V2_RX_MSDU_END_1_EXT_WAPI_PN_63_48_LSB, + .d_RX_MSDU_END_4_LAST_MSDU_MASK = + AR6320V2_RX_MSDU_END_4_LAST_MSDU_MASK, + .d_RX_MSDU_END_4_LAST_MSDU_LSB = AR6320V2_RX_MSDU_END_4_LAST_MSDU_LSB, + .d_RX_ATTENTION_0_MCAST_BCAST_MASK = + AR6320V2_RX_ATTENTION_0_MCAST_BCAST_MASK, + .d_RX_ATTENTION_0_MCAST_BCAST_LSB = + AR6320V2_RX_ATTENTION_0_MCAST_BCAST_LSB, + .d_RX_ATTENTION_0_FRAGMENT_MASK = + AR6320V2_RX_ATTENTION_0_FRAGMENT_MASK, + .d_RX_ATTENTION_0_FRAGMENT_LSB = AR6320V2_RX_ATTENTION_0_FRAGMENT_LSB, + .d_RX_ATTENTION_0_MPDU_LENGTH_ERR_MASK = + AR6320V2_RX_ATTENTION_0_MPDU_LENGTH_ERR_MASK, + .d_RX_FRAG_INFO_0_RING2_MORE_COUNT_MASK = + AR6320V2_RX_FRAG_INFO_0_RING2_MORE_COUNT_MASK, + .d_RX_FRAG_INFO_0_RING2_MORE_COUNT_LSB = + AR6320V2_RX_FRAG_INFO_0_RING2_MORE_COUNT_LSB, + .d_RX_MSDU_START_0_MSDU_LENGTH_MASK = + AR6320V2_RX_MSDU_START_0_MSDU_LENGTH_MASK, + .d_RX_MSDU_START_0_MSDU_LENGTH_LSB = + AR6320V2_RX_MSDU_START_0_MSDU_LENGTH_LSB, + .d_RX_MSDU_START_2_DECAP_FORMAT_OFFSET = + AR6320V2_RX_MSDU_START_2_DECAP_FORMAT_OFFSET, + .d_RX_MSDU_START_2_DECAP_FORMAT_MASK = + AR6320V2_RX_MSDU_START_2_DECAP_FORMAT_MASK, + .d_RX_MSDU_START_2_DECAP_FORMAT_LSB = + AR6320V2_RX_MSDU_START_2_DECAP_FORMAT_LSB, + .d_RX_MPDU_START_0_ENCRYPTED_MASK = + AR6320V2_RX_MPDU_START_0_ENCRYPTED_MASK, + .d_RX_MPDU_START_0_ENCRYPTED_LSB = + AR6320V2_RX_MPDU_START_0_ENCRYPTED_LSB, + .d_RX_ATTENTION_0_MORE_DATA_MASK = + AR6320V2_RX_ATTENTION_0_MORE_DATA_MASK, + .d_RX_ATTENTION_0_MSDU_DONE_MASK = + AR6320V2_RX_ATTENTION_0_MSDU_DONE_MASK, + .d_RX_ATTENTION_0_TCP_UDP_CHKSUM_FAIL_MASK = + AR6320V2_RX_ATTENTION_0_TCP_UDP_CHKSUM_FAIL_MASK, +#if defined(HIF_PCI) || defined(HIF_SNOC) || defined(HIF_AHB) + .d_CE_COUNT = AR6320V2_CE_COUNT, + .d_MSI_ASSIGN_CE_INITIAL = MSI_ASSIGN_CE_INITIAL, + .d_PCIE_INTR_ENABLE_ADDRESS = AR6320V2_PCIE_INTR_ENABLE_ADDRESS, + .d_PCIE_INTR_CLR_ADDRESS = AR6320V2_PCIE_INTR_CLR_ADDRESS, + .d_PCIE_INTR_FIRMWARE_MASK = AR6320V2_PCIE_INTR_FIRMWARE_MASK, + .d_PCIE_INTR_CE_MASK_ALL = AR6320V2_PCIE_INTR_CE_MASK_ALL, + /* PLL start */ + .d_EFUSE_OFFSET = AR6320V2_EFUSE_OFFSET, + .d_EFUSE_XTAL_SEL_MSB = AR6320V2_EFUSE_XTAL_SEL_MSB, + .d_EFUSE_XTAL_SEL_LSB = AR6320V2_EFUSE_XTAL_SEL_LSB, + .d_EFUSE_XTAL_SEL_MASK = AR6320V2_EFUSE_XTAL_SEL_MASK, + .d_BB_PLL_CONFIG_OFFSET = AR6320V2_BB_PLL_CONFIG_OFFSET, + .d_BB_PLL_CONFIG_OUTDIV_MSB = AR6320V2_BB_PLL_CONFIG_OUTDIV_MSB, + .d_BB_PLL_CONFIG_OUTDIV_LSB = AR6320V2_BB_PLL_CONFIG_OUTDIV_LSB, + .d_BB_PLL_CONFIG_OUTDIV_MASK = AR6320V2_BB_PLL_CONFIG_OUTDIV_MASK, + .d_BB_PLL_CONFIG_FRAC_MSB = AR6320V2_BB_PLL_CONFIG_FRAC_MSB, + .d_BB_PLL_CONFIG_FRAC_LSB = AR6320V2_BB_PLL_CONFIG_FRAC_LSB, + .d_BB_PLL_CONFIG_FRAC_MASK = AR6320V2_BB_PLL_CONFIG_FRAC_MASK, + .d_WLAN_PLL_SETTLE_TIME_MSB = AR6320V2_WLAN_PLL_SETTLE_TIME_MSB, + .d_WLAN_PLL_SETTLE_TIME_LSB = AR6320V2_WLAN_PLL_SETTLE_TIME_LSB, + .d_WLAN_PLL_SETTLE_TIME_MASK = AR6320V2_WLAN_PLL_SETTLE_TIME_MASK, + .d_WLAN_PLL_SETTLE_OFFSET = AR6320V2_WLAN_PLL_SETTLE_OFFSET, + .d_WLAN_PLL_SETTLE_SW_MASK = AR6320V2_WLAN_PLL_SETTLE_SW_MASK, + .d_WLAN_PLL_SETTLE_RSTMASK = AR6320V2_WLAN_PLL_SETTLE_RSTMASK, + .d_WLAN_PLL_SETTLE_RESET = AR6320V2_WLAN_PLL_SETTLE_RESET, + .d_WLAN_PLL_CONTROL_NOPWD_MSB = AR6320V2_WLAN_PLL_CONTROL_NOPWD_MSB, + .d_WLAN_PLL_CONTROL_NOPWD_LSB = AR6320V2_WLAN_PLL_CONTROL_NOPWD_LSB, + .d_WLAN_PLL_CONTROL_NOPWD_MASK = AR6320V2_WLAN_PLL_CONTROL_NOPWD_MASK, + .d_WLAN_PLL_CONTROL_BYPASS_MSB = AR6320V2_WLAN_PLL_CONTROL_BYPASS_MSB, + .d_WLAN_PLL_CONTROL_BYPASS_LSB = AR6320V2_WLAN_PLL_CONTROL_BYPASS_LSB, + .d_WLAN_PLL_CONTROL_BYPASS_MASK = + AR6320V2_WLAN_PLL_CONTROL_BYPASS_MASK, + .d_WLAN_PLL_CONTROL_BYPASS_RESET = + AR6320V2_WLAN_PLL_CONTROL_BYPASS_RESET, + .d_WLAN_PLL_CONTROL_CLK_SEL_MSB = + AR6320V2_WLAN_PLL_CONTROL_CLK_SEL_MSB, + .d_WLAN_PLL_CONTROL_CLK_SEL_LSB = + AR6320V2_WLAN_PLL_CONTROL_CLK_SEL_LSB, + .d_WLAN_PLL_CONTROL_CLK_SEL_MASK = + AR6320V2_WLAN_PLL_CONTROL_CLK_SEL_MASK, + .d_WLAN_PLL_CONTROL_CLK_SEL_RESET = + AR6320V2_WLAN_PLL_CONTROL_CLK_SEL_RESET, + .d_WLAN_PLL_CONTROL_REFDIV_MSB = AR6320V2_WLAN_PLL_CONTROL_REFDIV_MSB, + .d_WLAN_PLL_CONTROL_REFDIV_LSB = AR6320V2_WLAN_PLL_CONTROL_REFDIV_LSB, + .d_WLAN_PLL_CONTROL_REFDIV_MASK = + AR6320V2_WLAN_PLL_CONTROL_REFDIV_MASK, + .d_WLAN_PLL_CONTROL_REFDIV_RESET = + AR6320V2_WLAN_PLL_CONTROL_REFDIV_RESET, + .d_WLAN_PLL_CONTROL_DIV_MSB = AR6320V2_WLAN_PLL_CONTROL_DIV_MSB, + .d_WLAN_PLL_CONTROL_DIV_LSB = AR6320V2_WLAN_PLL_CONTROL_DIV_LSB, + .d_WLAN_PLL_CONTROL_DIV_MASK = AR6320V2_WLAN_PLL_CONTROL_DIV_MASK, + .d_WLAN_PLL_CONTROL_DIV_RESET = AR6320V2_WLAN_PLL_CONTROL_DIV_RESET, + .d_WLAN_PLL_CONTROL_OFFSET = AR6320V2_WLAN_PLL_CONTROL_OFFSET, + .d_WLAN_PLL_CONTROL_SW_MASK = AR6320V2_WLAN_PLL_CONTROL_SW_MASK, + .d_WLAN_PLL_CONTROL_RSTMASK = AR6320V2_WLAN_PLL_CONTROL_RSTMASK, + .d_WLAN_PLL_CONTROL_RESET = AR6320V2_WLAN_PLL_CONTROL_RESET, + .d_SOC_CORE_CLK_CTRL_OFFSET = AR6320V2_SOC_CORE_CLK_CTRL_OFFSET, + .d_SOC_CORE_CLK_CTRL_DIV_MSB = AR6320V2_SOC_CORE_CLK_CTRL_DIV_MSB, + .d_SOC_CORE_CLK_CTRL_DIV_LSB = AR6320V2_SOC_CORE_CLK_CTRL_DIV_LSB, + .d_SOC_CORE_CLK_CTRL_DIV_MASK = AR6320V2_SOC_CORE_CLK_CTRL_DIV_MASK, + .d_RTC_SYNC_STATUS_PLL_CHANGING_MSB = + AR6320V2_RTC_SYNC_STATUS_PLL_CHANGING_MSB, + .d_RTC_SYNC_STATUS_PLL_CHANGING_LSB = + AR6320V2_RTC_SYNC_STATUS_PLL_CHANGING_LSB, + .d_RTC_SYNC_STATUS_PLL_CHANGING_MASK = + AR6320V2_RTC_SYNC_STATUS_PLL_CHANGING_MASK, + .d_RTC_SYNC_STATUS_PLL_CHANGING_RESET = + AR6320V2_RTC_SYNC_STATUS_PLL_CHANGING_RESET, + .d_RTC_SYNC_STATUS_OFFSET = AR6320V2_RTC_SYNC_STATUS_OFFSET, + .d_SOC_CPU_CLOCK_OFFSET = AR6320V2_SOC_CPU_CLOCK_OFFSET, + .d_SOC_CPU_CLOCK_STANDARD_MSB = AR6320V2_SOC_CPU_CLOCK_STANDARD_MSB, + .d_SOC_CPU_CLOCK_STANDARD_LSB = AR6320V2_SOC_CPU_CLOCK_STANDARD_LSB, + .d_SOC_CPU_CLOCK_STANDARD_MASK = AR6320V2_SOC_CPU_CLOCK_STANDARD_MASK, + /* PLL end */ + .d_SOC_POWER_REG_OFFSET = AR6320V2_SOC_POWER_REG_OFFSET, + .d_PCIE_INTR_CAUSE_ADDRESS = AR6320V2_PCIE_INTR_CAUSE_ADDRESS, + .d_SOC_RESET_CONTROL_ADDRESS = AR6320V2_SOC_RESET_CONTROL_ADDRESS, + .d_SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_MASK = + AR6320V2_SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_MASK, + .d_SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_LSB = + AR6320V2_SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_LSB, + .d_SOC_RESET_CONTROL_CE_RST_MASK = + AR6320V2_SOC_RESET_CONTROL_CE_RST_MASK, + .d_WLAN_DEBUG_INPUT_SEL_OFFSET = AR6320V2_WLAN_DEBUG_INPUT_SEL_OFFSET, + .d_WLAN_DEBUG_INPUT_SEL_SRC_MSB = + AR6320V2_WLAN_DEBUG_INPUT_SEL_SRC_MSB, + .d_WLAN_DEBUG_INPUT_SEL_SRC_LSB = + AR6320V2_WLAN_DEBUG_INPUT_SEL_SRC_LSB, + .d_WLAN_DEBUG_INPUT_SEL_SRC_MASK = + AR6320V2_WLAN_DEBUG_INPUT_SEL_SRC_MASK, + .d_WLAN_DEBUG_CONTROL_OFFSET = AR6320V2_WLAN_DEBUG_CONTROL_OFFSET, + .d_WLAN_DEBUG_CONTROL_ENABLE_MSB = + AR6320V2_WLAN_DEBUG_CONTROL_ENABLE_MSB, + .d_WLAN_DEBUG_CONTROL_ENABLE_LSB = + AR6320V2_WLAN_DEBUG_CONTROL_ENABLE_LSB, + .d_WLAN_DEBUG_CONTROL_ENABLE_MASK = + AR6320V2_WLAN_DEBUG_CONTROL_ENABLE_MASK, + .d_WLAN_DEBUG_OUT_OFFSET = AR6320V2_WLAN_DEBUG_OUT_OFFSET, + .d_WLAN_DEBUG_OUT_DATA_MSB = AR6320V2_WLAN_DEBUG_OUT_DATA_MSB, + .d_WLAN_DEBUG_OUT_DATA_LSB = AR6320V2_WLAN_DEBUG_OUT_DATA_LSB, + .d_WLAN_DEBUG_OUT_DATA_MASK = AR6320V2_WLAN_DEBUG_OUT_DATA_MASK, + .d_AMBA_DEBUG_BUS_OFFSET = AR6320V2_AMBA_DEBUG_BUS_OFFSET, + .d_AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MSB = + AR6320V2_AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MSB, + .d_AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_LSB = + AR6320V2_AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_LSB, + .d_AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MASK = + AR6320V2_AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MASK, + .d_AMBA_DEBUG_BUS_SEL_MSB = AR6320V2_AMBA_DEBUG_BUS_SEL_MSB, + .d_AMBA_DEBUG_BUS_SEL_LSB = AR6320V2_AMBA_DEBUG_BUS_SEL_LSB, + .d_AMBA_DEBUG_BUS_SEL_MASK = AR6320V2_AMBA_DEBUG_BUS_SEL_MASK, +#endif + .d_SOC_RESET_CONTROL_CPU_WARM_RST_MASK = + AR6320V2_SOC_RESET_CONTROL_CPU_WARM_RST_MASK, + .d_CPU_INTR_ADDRESS = AR6320V2_CPU_INTR_ADDRESS, + .d_SOC_LF_TIMER_CONTROL0_ADDRESS = + AR6320V2_SOC_LF_TIMER_CONTROL0_ADDRESS, + .d_SOC_LF_TIMER_CONTROL0_ENABLE_MASK = + AR6320V2_SOC_LF_TIMER_CONTROL0_ENABLE_MASK, + /* chip id start */ + .d_SOC_CHIP_ID_ADDRESS = AR6320V2_SOC_CHIP_ID_ADDRESS, + .d_SOC_CHIP_ID_VERSION_MASK = AR6320V2_SOC_CHIP_ID_VERSION_MASK, + .d_SOC_CHIP_ID_VERSION_LSB = AR6320V2_SOC_CHIP_ID_VERSION_LSB, + .d_SOC_CHIP_ID_REVISION_MASK = AR6320V2_SOC_CHIP_ID_REVISION_MASK, + .d_SOC_CHIP_ID_REVISION_LSB = AR6320V2_SOC_CHIP_ID_REVISION_LSB, + /* chip id end */ +}; + +struct hostdef_s ar6320v2_hostdef = { + .d_INT_STATUS_ENABLE_ERROR_LSB = AR6320V2_INT_STATUS_ENABLE_ERROR_LSB, + .d_INT_STATUS_ENABLE_ERROR_MASK = + AR6320V2_INT_STATUS_ENABLE_ERROR_MASK, + .d_INT_STATUS_ENABLE_CPU_LSB = AR6320V2_INT_STATUS_ENABLE_CPU_LSB, + .d_INT_STATUS_ENABLE_CPU_MASK = AR6320V2_INT_STATUS_ENABLE_CPU_MASK, + .d_INT_STATUS_ENABLE_COUNTER_LSB = + AR6320V2_INT_STATUS_ENABLE_COUNTER_LSB, + .d_INT_STATUS_ENABLE_COUNTER_MASK = + AR6320V2_INT_STATUS_ENABLE_COUNTER_MASK, + .d_INT_STATUS_ENABLE_MBOX_DATA_LSB = + AR6320V2_INT_STATUS_ENABLE_MBOX_DATA_LSB, + .d_INT_STATUS_ENABLE_MBOX_DATA_MASK = + AR6320V2_INT_STATUS_ENABLE_MBOX_DATA_MASK, + .d_ERROR_STATUS_ENABLE_RX_UNDERFLOW_LSB = + AR6320V2_ERROR_STATUS_ENABLE_RX_UNDERFLOW_LSB, + .d_ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK = + AR6320V2_ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK, + .d_ERROR_STATUS_ENABLE_TX_OVERFLOW_LSB = + AR6320V2_ERROR_STATUS_ENABLE_TX_OVERFLOW_LSB, + .d_ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK = + AR6320V2_ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK, + .d_COUNTER_INT_STATUS_ENABLE_BIT_LSB = + AR6320V2_COUNTER_INT_STATUS_ENABLE_BIT_LSB, + .d_COUNTER_INT_STATUS_ENABLE_BIT_MASK = + AR6320V2_COUNTER_INT_STATUS_ENABLE_BIT_MASK, + .d_INT_STATUS_ENABLE_ADDRESS = AR6320V2_INT_STATUS_ENABLE_ADDRESS, + .d_CPU_INT_STATUS_ENABLE_BIT_LSB = + AR6320V2_CPU_INT_STATUS_ENABLE_BIT_LSB, + .d_CPU_INT_STATUS_ENABLE_BIT_MASK = + AR6320V2_CPU_INT_STATUS_ENABLE_BIT_MASK, + .d_HOST_INT_STATUS_ADDRESS = AR6320V2_HOST_INT_STATUS_ADDRESS, + .d_CPU_INT_STATUS_ADDRESS = AR6320V2_CPU_INT_STATUS_ADDRESS, + .d_ERROR_INT_STATUS_ADDRESS = AR6320V2_ERROR_INT_STATUS_ADDRESS, + .d_ERROR_INT_STATUS_WAKEUP_MASK = + AR6320V2_ERROR_INT_STATUS_WAKEUP_MASK, + .d_ERROR_INT_STATUS_WAKEUP_LSB = AR6320V2_ERROR_INT_STATUS_WAKEUP_LSB, + .d_ERROR_INT_STATUS_RX_UNDERFLOW_MASK = + AR6320V2_ERROR_INT_STATUS_RX_UNDERFLOW_MASK, + .d_ERROR_INT_STATUS_RX_UNDERFLOW_LSB = + AR6320V2_ERROR_INT_STATUS_RX_UNDERFLOW_LSB, + .d_ERROR_INT_STATUS_TX_OVERFLOW_MASK = + AR6320V2_ERROR_INT_STATUS_TX_OVERFLOW_MASK, + .d_ERROR_INT_STATUS_TX_OVERFLOW_LSB = + AR6320V2_ERROR_INT_STATUS_TX_OVERFLOW_LSB, + .d_COUNT_DEC_ADDRESS = AR6320V2_COUNT_DEC_ADDRESS, + .d_HOST_INT_STATUS_CPU_MASK = AR6320V2_HOST_INT_STATUS_CPU_MASK, + .d_HOST_INT_STATUS_CPU_LSB = AR6320V2_HOST_INT_STATUS_CPU_LSB, + .d_HOST_INT_STATUS_ERROR_MASK = AR6320V2_HOST_INT_STATUS_ERROR_MASK, + .d_HOST_INT_STATUS_ERROR_LSB = AR6320V2_HOST_INT_STATUS_ERROR_LSB, + .d_HOST_INT_STATUS_COUNTER_MASK = + AR6320V2_HOST_INT_STATUS_COUNTER_MASK, + .d_HOST_INT_STATUS_COUNTER_LSB = AR6320V2_HOST_INT_STATUS_COUNTER_LSB, + .d_RX_LOOKAHEAD_VALID_ADDRESS = AR6320V2_RX_LOOKAHEAD_VALID_ADDRESS, + .d_WINDOW_DATA_ADDRESS = AR6320V2_WINDOW_DATA_ADDRESS, + .d_WINDOW_READ_ADDR_ADDRESS = AR6320V2_WINDOW_READ_ADDR_ADDRESS, + .d_WINDOW_WRITE_ADDR_ADDRESS = AR6320V2_WINDOW_WRITE_ADDR_ADDRESS, + .d_SOC_GLOBAL_RESET_ADDRESS = AR6320V2_SOC_GLOBAL_RESET_ADDRESS, + .d_RTC_STATE_ADDRESS = AR6320V2_RTC_STATE_ADDRESS, + .d_RTC_STATE_COLD_RESET_MASK = AR6320V2_RTC_STATE_COLD_RESET_MASK, + .d_RTC_STATE_V_MASK = AR6320V2_RTC_STATE_V_MASK, + .d_RTC_STATE_V_LSB = AR6320V2_RTC_STATE_V_LSB, + .d_FW_IND_EVENT_PENDING = AR6320V2_FW_IND_EVENT_PENDING, + .d_FW_IND_INITIALIZED = AR6320V2_FW_IND_INITIALIZED, + .d_RTC_STATE_V_ON = AR6320V2_RTC_STATE_V_ON, +#if defined(SDIO_3_0) + .d_HOST_INT_STATUS_MBOX_DATA_MASK = + AR6320V2_HOST_INT_STATUS_MBOX_DATA_MASK, + .d_HOST_INT_STATUS_MBOX_DATA_LSB = + AR6320V2_HOST_INT_STATUS_MBOX_DATA_LSB, +#endif +#if defined(HIF_PCI) || defined(HIF_SNOC) || defined(HIF_AHB) + .d_FW_IND_HELPER = AR6320V2_FW_IND_HELPER, + .d_MUX_ID_MASK = AR6320V2_MUX_ID_MASK, + .d_TRANSACTION_ID_MASK = AR6320V2_TRANSACTION_ID_MASK, + .d_PCIE_LOCAL_BASE_ADDRESS = AR6320V2_PCIE_LOCAL_BASE_ADDRESS, + .d_PCIE_SOC_WAKE_RESET = AR6320V2_PCIE_SOC_WAKE_RESET, + .d_PCIE_SOC_WAKE_ADDRESS = AR6320V2_PCIE_SOC_WAKE_ADDRESS, + .d_PCIE_SOC_WAKE_V_MASK = AR6320V2_PCIE_SOC_WAKE_V_MASK, + .d_PCIE_SOC_RDY_STATUS_ADDRESS = PCIE_SOC_RDY_STATUS_ADDRESS, + .d_PCIE_SOC_RDY_STATUS_BAR_MASK = PCIE_SOC_RDY_STATUS_BAR_MASK, + .d_SOC_PCIE_BASE_ADDRESS = SOC_PCIE_BASE_ADDRESS, + .d_MSI_MAGIC_ADR_ADDRESS = MSI_MAGIC_ADR_ADDRESS, + .d_MSI_MAGIC_ADDRESS = MSI_MAGIC_ADDRESS, + .d_HOST_CE_COUNT = 8, + .d_ENABLE_MSI = 0, +#endif +#if defined(HIF_SDIO) + .d_FW_IND_HELPER = AR6320V2_FW_IND_HELPER, +#endif +}; + +#if defined(HIF_PCI) || defined(HIF_SNOC) || defined(HIF_AHB) +struct ce_reg_def ar6320v2_ce_targetdef = { + /* copy_engine.c */ + .d_DST_WR_INDEX_ADDRESS = AR6320V2_DST_WR_INDEX_ADDRESS, + .d_SRC_WATERMARK_ADDRESS = AR6320V2_SRC_WATERMARK_ADDRESS, + .d_SRC_WATERMARK_LOW_MASK = AR6320V2_SRC_WATERMARK_LOW_MASK, + .d_SRC_WATERMARK_HIGH_MASK = AR6320V2_SRC_WATERMARK_HIGH_MASK, + .d_DST_WATERMARK_LOW_MASK = AR6320V2_DST_WATERMARK_LOW_MASK, + .d_DST_WATERMARK_HIGH_MASK = AR6320V2_DST_WATERMARK_HIGH_MASK, + .d_CURRENT_SRRI_ADDRESS = AR6320V2_CURRENT_SRRI_ADDRESS, + .d_CURRENT_DRRI_ADDRESS = AR6320V2_CURRENT_DRRI_ADDRESS, + .d_HOST_IS_SRC_RING_HIGH_WATERMARK_MASK = + AR6320V2_HOST_IS_SRC_RING_HIGH_WATERMARK_MASK, + .d_HOST_IS_SRC_RING_LOW_WATERMARK_MASK = + AR6320V2_HOST_IS_SRC_RING_LOW_WATERMARK_MASK, + .d_HOST_IS_DST_RING_HIGH_WATERMARK_MASK = + AR6320V2_HOST_IS_DST_RING_HIGH_WATERMARK_MASK, + .d_HOST_IS_DST_RING_LOW_WATERMARK_MASK = + AR6320V2_HOST_IS_DST_RING_LOW_WATERMARK_MASK, + .d_HOST_IS_ADDRESS = AR6320V2_HOST_IS_ADDRESS, + .d_HOST_IS_COPY_COMPLETE_MASK = AR6320V2_HOST_IS_COPY_COMPLETE_MASK, + .d_CE_WRAPPER_BASE_ADDRESS = AR6320V2_CE_WRAPPER_BASE_ADDRESS, + .d_CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS = + AR6320V2_CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS, + .d_HOST_IE_ADDRESS = AR6320V2_HOST_IE_ADDRESS, + .d_HOST_IE_COPY_COMPLETE_MASK = AR6320V2_HOST_IE_COPY_COMPLETE_MASK, + .d_SR_BA_ADDRESS = AR6320V2_SR_BA_ADDRESS, + .d_SR_SIZE_ADDRESS = AR6320V2_SR_SIZE_ADDRESS, + .d_CE_CTRL1_ADDRESS = AR6320V2_CE_CTRL1_ADDRESS, + .d_CE_CTRL1_DMAX_LENGTH_MASK = AR6320V2_CE_CTRL1_DMAX_LENGTH_MASK, + .d_DR_BA_ADDRESS = AR6320V2_DR_BA_ADDRESS, + .d_DR_SIZE_ADDRESS = AR6320V2_DR_SIZE_ADDRESS, + .d_MISC_IE_ADDRESS = AR6320V2_MISC_IE_ADDRESS, + .d_MISC_IS_AXI_ERR_MASK = AR6320V2_MISC_IS_AXI_ERR_MASK, + .d_MISC_IS_DST_ADDR_ERR_MASK = AR6320V2_MISC_IS_DST_ADDR_ERR_MASK, + .d_MISC_IS_SRC_LEN_ERR_MASK = AR6320V2_MISC_IS_SRC_LEN_ERR_MASK, + .d_MISC_IS_DST_MAX_LEN_VIO_MASK = + AR6320V2_MISC_IS_DST_MAX_LEN_VIO_MASK, + .d_MISC_IS_DST_RING_OVERFLOW_MASK = + AR6320V2_MISC_IS_DST_RING_OVERFLOW_MASK, + .d_MISC_IS_SRC_RING_OVERFLOW_MASK = + AR6320V2_MISC_IS_SRC_RING_OVERFLOW_MASK, + .d_SRC_WATERMARK_LOW_LSB = AR6320V2_SRC_WATERMARK_LOW_LSB, + .d_SRC_WATERMARK_HIGH_LSB = AR6320V2_SRC_WATERMARK_HIGH_LSB, + .d_DST_WATERMARK_LOW_LSB = AR6320V2_DST_WATERMARK_LOW_LSB, + .d_DST_WATERMARK_HIGH_LSB = AR6320V2_DST_WATERMARK_HIGH_LSB, + .d_CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK = + AR6320V2_CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK, + .d_CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB = + AR6320V2_CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB, + .d_CE_CTRL1_DMAX_LENGTH_LSB = AR6320V2_CE_CTRL1_DMAX_LENGTH_LSB, + .d_CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK = + AR6320V2_CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK, + .d_CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK = + AR6320V2_CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK, + .d_CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB = + AR6320V2_CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB, + .d_CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB = + AR6320V2_CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB, + .d_CE_WRAPPER_DEBUG_OFFSET = AR6320V2_CE_WRAPPER_DEBUG_OFFSET, + .d_CE_WRAPPER_DEBUG_SEL_MSB = AR6320V2_CE_WRAPPER_DEBUG_SEL_MSB, + .d_CE_WRAPPER_DEBUG_SEL_LSB = AR6320V2_CE_WRAPPER_DEBUG_SEL_LSB, + .d_CE_WRAPPER_DEBUG_SEL_MASK = AR6320V2_CE_WRAPPER_DEBUG_SEL_MASK, + .d_CE_DEBUG_OFFSET = AR6320V2_CE_DEBUG_OFFSET, + .d_CE_DEBUG_SEL_MSB = AR6320V2_CE_DEBUG_SEL_MSB, + .d_CE_DEBUG_SEL_LSB = AR6320V2_CE_DEBUG_SEL_LSB, + .d_CE_DEBUG_SEL_MASK = AR6320V2_CE_DEBUG_SEL_MASK, + .d_CE0_BASE_ADDRESS = AR6320V2_CE0_BASE_ADDRESS, + .d_CE1_BASE_ADDRESS = AR6320V2_CE1_BASE_ADDRESS, + +}; +#endif +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/ar900Bdef.c b/drivers/staging/qca-wifi-host-cmn/hif/src/ar900Bdef.c new file mode 100644 index 0000000000000000000000000000000000000000..e2087d2615986286f2e2f8c97e49c25d6cac53b2 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/ar900Bdef.c @@ -0,0 +1,233 @@ +/* + * Copyright (c) 2010, 2016-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "qdf_module.h" + +#if defined(AR900B_HEADERS_DEF) +#define AR900B 1 + +#define WLAN_HEADERS 1 +#include "common_drv.h" +#include "AR900B/soc_addrs.h" +#include "AR900B/extra/hw/apb_map.h" +#include "AR900B/hw/gpio_athr_wlan_reg.h" +#ifdef WLAN_HEADERS +#include "AR900B/extra/hw/wifi_top_reg_map.h" +#include "AR900B/hw/rtc_soc_reg.h" +#endif +#include "AR900B/hw/si_reg.h" +#include "AR900B/extra/hw/pcie_local_reg.h" +#include "AR900B/hw/ce_wrapper_reg_csr.h" +/* TODO + * #include "hw/soc_core_reg.h" + * #include "hw/soc_pcie_reg.h" + * #include "hw/ce_reg_csr.h" + */ + +#include "AR900B/extra/hw/soc_core_reg.h" +#include "AR900B/hw/soc_pcie_reg.h" +#include "AR900B/extra/hw/ce_reg_csr.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* Base address is defined in pcie_local_reg.h. Macros which access the + * registers include the base address in their definition. + */ +#define PCIE_LOCAL_BASE_ADDRESS 0 + +#define FW_EVENT_PENDING_ADDRESS (WIFICMN_SCRATCH_3_ADDRESS) +#define DRAM_BASE_ADDRESS TARG_DRAM_START + +/* Backwards compatibility -- TBDXXX */ + +#define MISSING 0 + +#define WLAN_SYSTEM_SLEEP_DISABLE_LSB WIFI_SYSTEM_SLEEP_DISABLE_LSB +#define WLAN_SYSTEM_SLEEP_DISABLE_MASK WIFI_SYSTEM_SLEEP_DISABLE_MASK +#define WLAN_RESET_CONTROL_COLD_RST_MASK WIFI_RESET_CONTROL_MAC_COLD_RST_MASK +#define WLAN_RESET_CONTROL_WARM_RST_MASK WIFI_RESET_CONTROL_MAC_WARM_RST_MASK +#define SOC_CLOCK_CONTROL_OFFSET SOC_CLOCK_CONTROL_ADDRESS +#define SOC_RESET_CONTROL_OFFSET SOC_RESET_CONTROL_ADDRESS +#define CPU_CLOCK_OFFSET SOC_CPU_CLOCK_ADDRESS +#define SOC_LPO_CAL_OFFSET SOC_LPO_CAL_ADDRESS +#define SOC_RESET_CONTROL_CE_RST_MASK WIFI_RESET_CONTROL_CE_RESET_MASK +#define WLAN_SYSTEM_SLEEP_OFFSET WIFI_SYSTEM_SLEEP_ADDRESS +#define WLAN_RESET_CONTROL_OFFSET WIFI_RESET_CONTROL_ADDRESS +#define CLOCK_CONTROL_OFFSET SOC_CLOCK_CONTROL_OFFSET +#define CLOCK_CONTROL_SI0_CLK_MASK SOC_CLOCK_CONTROL_SI0_CLK_MASK +#define RESET_CONTROL_SI0_RST_MASK SOC_RESET_CONTROL_SI0_RST_MASK +#define GPIO_BASE_ADDRESS WLAN_GPIO_BASE_ADDRESS +#define GPIO_PIN0_OFFSET WLAN_GPIO_PIN0_ADDRESS +#define GPIO_PIN1_OFFSET WLAN_GPIO_PIN1_ADDRESS +#define GPIO_PIN0_CONFIG_MASK WLAN_GPIO_PIN0_CONFIG_MASK +#define GPIO_PIN1_CONFIG_MASK WLAN_GPIO_PIN1_CONFIG_MASK +#define SI_BASE_ADDRESS WLAN_SI_BASE_ADDRESS +#define SCRATCH_BASE_ADDRESS SOC_CORE_BASE_ADDRESS +#define LOCAL_SCRATCH_OFFSET 0x18 +#define GPIO_PIN10_OFFSET WLAN_GPIO_PIN10_ADDRESS +#define GPIO_PIN11_OFFSET WLAN_GPIO_PIN11_ADDRESS +#define GPIO_PIN12_OFFSET WLAN_GPIO_PIN12_ADDRESS +#define GPIO_PIN13_OFFSET WLAN_GPIO_PIN13_ADDRESS +#define SI_CONFIG_OFFSET SI_CONFIG_ADDRESS +#define SI_TX_DATA0_OFFSET SI_TX_DATA0_ADDRESS +#define SI_TX_DATA1_OFFSET SI_TX_DATA1_ADDRESS +#define SI_RX_DATA0_OFFSET SI_RX_DATA0_ADDRESS +#define SI_RX_DATA1_OFFSET SI_RX_DATA1_ADDRESS +#define SI_CS_OFFSET SI_CS_ADDRESS +#define CPU_CLOCK_STANDARD_LSB SOC_CPU_CLOCK_STANDARD_LSB +#define CPU_CLOCK_STANDARD_MASK SOC_CPU_CLOCK_STANDARD_MASK +#define LPO_CAL_ENABLE_LSB SOC_LPO_CAL_ENABLE_LSB +#define LPO_CAL_ENABLE_MASK SOC_LPO_CAL_ENABLE_MASK +#define ANALOG_INTF_BASE_ADDRESS WLAN_ANALOG_INTF_BASE_ADDRESS +#define MBOX_BASE_ADDRESS MISSING +#define INT_STATUS_ENABLE_ERROR_LSB MISSING +#define INT_STATUS_ENABLE_ERROR_MASK MISSING +#define INT_STATUS_ENABLE_CPU_LSB MISSING +#define INT_STATUS_ENABLE_CPU_MASK MISSING +#define INT_STATUS_ENABLE_COUNTER_LSB MISSING +#define INT_STATUS_ENABLE_COUNTER_MASK MISSING +#define INT_STATUS_ENABLE_MBOX_DATA_LSB MISSING +#define INT_STATUS_ENABLE_MBOX_DATA_MASK MISSING +#define ERROR_STATUS_ENABLE_RX_UNDERFLOW_LSB MISSING +#define ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK MISSING +#define ERROR_STATUS_ENABLE_TX_OVERFLOW_LSB MISSING +#define ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK MISSING +#define COUNTER_INT_STATUS_ENABLE_BIT_LSB MISSING +#define COUNTER_INT_STATUS_ENABLE_BIT_MASK MISSING +#define INT_STATUS_ENABLE_ADDRESS MISSING +#define CPU_INT_STATUS_ENABLE_BIT_LSB MISSING +#define CPU_INT_STATUS_ENABLE_BIT_MASK MISSING +#define HOST_INT_STATUS_ADDRESS MISSING +#define CPU_INT_STATUS_ADDRESS MISSING +#define ERROR_INT_STATUS_ADDRESS MISSING +#define ERROR_INT_STATUS_WAKEUP_MASK MISSING +#define ERROR_INT_STATUS_WAKEUP_LSB MISSING +#define ERROR_INT_STATUS_RX_UNDERFLOW_MASK MISSING +#define ERROR_INT_STATUS_RX_UNDERFLOW_LSB MISSING +#define ERROR_INT_STATUS_TX_OVERFLOW_MASK MISSING +#define ERROR_INT_STATUS_TX_OVERFLOW_LSB MISSING +#define COUNT_DEC_ADDRESS MISSING +#define HOST_INT_STATUS_CPU_MASK MISSING +#define HOST_INT_STATUS_CPU_LSB MISSING +#define HOST_INT_STATUS_ERROR_MASK MISSING +#define HOST_INT_STATUS_ERROR_LSB MISSING +#define HOST_INT_STATUS_COUNTER_MASK MISSING +#define HOST_INT_STATUS_COUNTER_LSB MISSING +#define RX_LOOKAHEAD_VALID_ADDRESS MISSING +#define WINDOW_DATA_ADDRESS MISSING +#define WINDOW_READ_ADDR_ADDRESS MISSING +#define WINDOW_WRITE_ADDR_ADDRESS MISSING +/* MAC Descriptor */ +#define RX_PPDU_END_ANTENNA_OFFSET_DWORD (RX_PPDU_END_25_RX_ANTENNA_OFFSET >> 2) +/* GPIO Register */ +#define GPIO_ENABLE_W1TS_LOW_ADDRESS WLAN_GPIO_ENABLE_W1TS_LOW_ADDRESS +#define GPIO_PIN0_CONFIG_LSB WLAN_GPIO_PIN0_CONFIG_LSB +#define GPIO_PIN0_PAD_PULL_LSB WLAN_GPIO_PIN0_PAD_PULL_LSB +#define GPIO_PIN0_PAD_PULL_MASK WLAN_GPIO_PIN0_PAD_PULL_MASK +/* CE descriptor */ +#define CE_SRC_DESC_SIZE_DWORD 2 +#define CE_DEST_DESC_SIZE_DWORD 2 +#define CE_SRC_DESC_SRC_PTR_OFFSET_DWORD 0 +#define CE_SRC_DESC_INFO_OFFSET_DWORD 1 +#define CE_DEST_DESC_DEST_PTR_OFFSET_DWORD 0 +#define CE_DEST_DESC_INFO_OFFSET_DWORD 1 +#if _BYTE_ORDER == _BIG_ENDIAN +#define CE_SRC_DESC_INFO_NBYTES_MASK 0xFFFF0000 +#define CE_SRC_DESC_INFO_NBYTES_SHIFT 16 +#define CE_SRC_DESC_INFO_GATHER_MASK 0x00008000 +#define CE_SRC_DESC_INFO_GATHER_SHIFT 15 +#define CE_SRC_DESC_INFO_BYTE_SWAP_MASK 0x00004000 +#define CE_SRC_DESC_INFO_BYTE_SWAP_SHIFT 14 +#define CE_SRC_DESC_INFO_HOST_INT_DISABLE_MASK 0x00002000 +#define CE_SRC_DESC_INFO_HOST_INT_DISABLE_SHIFT 13 +#define CE_SRC_DESC_INFO_TARGET_INT_DISABLE_MASK 0x00001000 +#define CE_SRC_DESC_INFO_TARGET_INT_DISABLE_SHIFT 12 +#define CE_SRC_DESC_INFO_META_DATA_MASK 0x00000FFF +#define CE_SRC_DESC_INFO_META_DATA_SHIFT 0 +#else +#define CE_SRC_DESC_INFO_NBYTES_MASK 0x0000FFFF +#define CE_SRC_DESC_INFO_NBYTES_SHIFT 0 +#define CE_SRC_DESC_INFO_GATHER_MASK 0x00010000 +#define CE_SRC_DESC_INFO_GATHER_SHIFT 16 +#define CE_SRC_DESC_INFO_BYTE_SWAP_MASK 0x00020000 +#define CE_SRC_DESC_INFO_BYTE_SWAP_SHIFT 17 +#define CE_SRC_DESC_INFO_HOST_INT_DISABLE_MASK 0x00040000 +#define CE_SRC_DESC_INFO_HOST_INT_DISABLE_SHIFT 18 +#define CE_SRC_DESC_INFO_TARGET_INT_DISABLE_MASK 0x00080000 +#define CE_SRC_DESC_INFO_TARGET_INT_DISABLE_SHIFT 19 +#define CE_SRC_DESC_INFO_META_DATA_MASK 0xFFF00000 +#define CE_SRC_DESC_INFO_META_DATA_SHIFT 20 +#endif +#if _BYTE_ORDER == _BIG_ENDIAN +#define CE_DEST_DESC_INFO_NBYTES_MASK 0xFFFF0000 +#define CE_DEST_DESC_INFO_NBYTES_SHIFT 16 +#define CE_DEST_DESC_INFO_GATHER_MASK 0x00008000 +#define CE_DEST_DESC_INFO_GATHER_SHIFT 15 +#define CE_DEST_DESC_INFO_BYTE_SWAP_MASK 0x00004000 +#define CE_DEST_DESC_INFO_BYTE_SWAP_SHIFT 14 +#define CE_DEST_DESC_INFO_HOST_INT_DISABLE_MASK 0x00002000 +#define CE_DEST_DESC_INFO_HOST_INT_DISABLE_SHIFT 13 +#define CE_DEST_DESC_INFO_TARGET_INT_DISABLE_MASK 0x00001000 +#define CE_DEST_DESC_INFO_TARGET_INT_DISABLE_SHIFT 12 +#define CE_DEST_DESC_INFO_META_DATA_MASK 0x00000FFF +#define CE_DEST_DESC_INFO_META_DATA_SHIFT 0 +#else +#define CE_DEST_DESC_INFO_NBYTES_MASK 0x0000FFFF +#define CE_DEST_DESC_INFO_NBYTES_SHIFT 0 +#define CE_DEST_DESC_INFO_GATHER_MASK 0x00010000 +#define CE_DEST_DESC_INFO_GATHER_SHIFT 16 +#define CE_DEST_DESC_INFO_BYTE_SWAP_MASK 0x00020000 +#define CE_DEST_DESC_INFO_BYTE_SWAP_SHIFT 17 +#define CE_DEST_DESC_INFO_HOST_INT_DISABLE_MASK 0x00040000 +#define CE_DEST_DESC_INFO_HOST_INT_DISABLE_SHIFT 18 +#define CE_DEST_DESC_INFO_TARGET_INT_DISABLE_MASK 0x00080000 +#define CE_DEST_DESC_INFO_TARGET_INT_DISABLE_SHIFT 19 +#define CE_DEST_DESC_INFO_META_DATA_MASK 0xFFF00000 +#define CE_DEST_DESC_INFO_META_DATA_SHIFT 20 +#endif + +#define MY_TARGET_DEF AR900B_TARGETdef +#define MY_HOST_DEF AR900B_HOSTdef +#define MY_CEREG_DEF AR900B_CE_TARGETdef +#define MY_TARGET_BOARD_DATA_SZ AR900B_BOARD_DATA_SZ +#define MY_TARGET_BOARD_EXT_DATA_SZ AR900B_BOARD_EXT_DATA_SZ +#include "targetdef.h" +#include "hostdef.h" +qdf_export_symbol(AR900B_CE_TARGETdef); +#else +#include "common_drv.h" +#include "targetdef.h" +#include "hostdef.h" +struct targetdef_s *AR900B_TARGETdef; +struct hostdef_s *AR900B_HOSTdef; +#endif /*AR900B_HEADERS_DEF */ +qdf_export_symbol(AR900B_TARGETdef); +qdf_export_symbol(AR900B_HOSTdef); diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/ar9888def.c b/drivers/staging/qca-wifi-host-cmn/hif/src/ar9888def.c new file mode 100644 index 0000000000000000000000000000000000000000..e0266ae58a86d5edd9ed6c837d3cc3bcdb909576 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/ar9888def.c @@ -0,0 +1,213 @@ +/* + * Copyright (c) 2013,2016,2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "qdf_module.h" + +#if defined(AR9888_HEADERS_DEF) +#define AR9888 1 + +#define WLAN_HEADERS 1 +#include "common_drv.h" +#include "AR9888/v2/soc_addrs.h" +#include "AR9888/v2/hw/apb_athr_wlan_map.h" +#include "AR9888/v2/hw/gpio_athr_wlan_reg.h" +#include "AR9888/v2/hw/rtc_soc_reg.h" +#include "AR9888/v2/hw/rtc_wlan_reg.h" +#include "AR9888/v2/hw/si_reg.h" +#include "AR9888/v2/extra/hw/pcie_local_reg.h" + +#include "AR9888/v2/extra/hw/soc_core_reg.h" +#include "AR9888/v2/hw/soc_pcie_reg.h" +#include "AR9888/v2/extra/hw/ce_reg_csr.h" +#include "AR9888/v2/hw/ce_wrapper_reg_csr.h" + +#include +#include +#include +#include +#include +#include +#include +#include + +/* TBDXXX: Eventually, this Base Address will be defined in HW header files */ +#define PCIE_LOCAL_BASE_ADDRESS 0x80000 + +#define FW_EVENT_PENDING_ADDRESS (SOC_CORE_BASE_ADDRESS+SCRATCH_3_ADDRESS) +#define DRAM_BASE_ADDRESS TARG_DRAM_START + +/* Backwards compatibility -- TBDXXX */ + +#define MISSING 0 + +#define SYSTEM_SLEEP_OFFSET SOC_SYSTEM_SLEEP_OFFSET +#define WLAN_SYSTEM_SLEEP_OFFSET SOC_SYSTEM_SLEEP_OFFSET +#define WLAN_RESET_CONTROL_OFFSET SOC_RESET_CONTROL_OFFSET +#define CLOCK_CONTROL_OFFSET SOC_CLOCK_CONTROL_OFFSET +#define CLOCK_CONTROL_SI0_CLK_MASK SOC_CLOCK_CONTROL_SI0_CLK_MASK +#define RESET_CONTROL_MBOX_RST_MASK MISSING +#define RESET_CONTROL_SI0_RST_MASK SOC_RESET_CONTROL_SI0_RST_MASK +#define GPIO_BASE_ADDRESS WLAN_GPIO_BASE_ADDRESS +#define GPIO_PIN0_OFFSET WLAN_GPIO_PIN0_ADDRESS +#define GPIO_PIN1_OFFSET WLAN_GPIO_PIN1_ADDRESS +#define GPIO_PIN0_CONFIG_MASK WLAN_GPIO_PIN0_CONFIG_MASK +#define GPIO_PIN1_CONFIG_MASK WLAN_GPIO_PIN1_CONFIG_MASK +#define SI_BASE_ADDRESS WLAN_SI_BASE_ADDRESS +#define SCRATCH_BASE_ADDRESS SOC_CORE_BASE_ADDRESS +#define LOCAL_SCRATCH_OFFSET 0x18 +#define CPU_CLOCK_OFFSET SOC_CPU_CLOCK_OFFSET +#define LPO_CAL_OFFSET SOC_LPO_CAL_OFFSET +#define GPIO_PIN10_OFFSET WLAN_GPIO_PIN10_ADDRESS +#define GPIO_PIN11_OFFSET WLAN_GPIO_PIN11_ADDRESS +#define GPIO_PIN12_OFFSET WLAN_GPIO_PIN12_ADDRESS +#define GPIO_PIN13_OFFSET WLAN_GPIO_PIN13_ADDRESS +#define CPU_CLOCK_STANDARD_LSB SOC_CPU_CLOCK_STANDARD_LSB +#define CPU_CLOCK_STANDARD_MASK SOC_CPU_CLOCK_STANDARD_MASK +#define LPO_CAL_ENABLE_LSB SOC_LPO_CAL_ENABLE_LSB +#define LPO_CAL_ENABLE_MASK SOC_LPO_CAL_ENABLE_MASK +#define ANALOG_INTF_BASE_ADDRESS WLAN_ANALOG_INTF_BASE_ADDRESS +#define MBOX_BASE_ADDRESS MISSING +#define INT_STATUS_ENABLE_ERROR_LSB MISSING +#define INT_STATUS_ENABLE_ERROR_MASK MISSING +#define INT_STATUS_ENABLE_CPU_LSB MISSING +#define INT_STATUS_ENABLE_CPU_MASK MISSING +#define INT_STATUS_ENABLE_COUNTER_LSB MISSING +#define INT_STATUS_ENABLE_COUNTER_MASK MISSING +#define INT_STATUS_ENABLE_MBOX_DATA_LSB MISSING +#define INT_STATUS_ENABLE_MBOX_DATA_MASK MISSING +#define ERROR_STATUS_ENABLE_RX_UNDERFLOW_LSB MISSING +#define ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK MISSING +#define ERROR_STATUS_ENABLE_TX_OVERFLOW_LSB MISSING +#define ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK MISSING +#define COUNTER_INT_STATUS_ENABLE_BIT_LSB MISSING +#define COUNTER_INT_STATUS_ENABLE_BIT_MASK MISSING +#define INT_STATUS_ENABLE_ADDRESS MISSING +#define CPU_INT_STATUS_ENABLE_BIT_LSB MISSING +#define CPU_INT_STATUS_ENABLE_BIT_MASK MISSING +#define HOST_INT_STATUS_ADDRESS MISSING +#define CPU_INT_STATUS_ADDRESS MISSING +#define ERROR_INT_STATUS_ADDRESS MISSING +#define ERROR_INT_STATUS_WAKEUP_MASK MISSING +#define ERROR_INT_STATUS_WAKEUP_LSB MISSING +#define ERROR_INT_STATUS_RX_UNDERFLOW_MASK MISSING +#define ERROR_INT_STATUS_RX_UNDERFLOW_LSB MISSING +#define ERROR_INT_STATUS_TX_OVERFLOW_MASK MISSING +#define ERROR_INT_STATUS_TX_OVERFLOW_LSB MISSING +#define COUNT_DEC_ADDRESS MISSING +#define HOST_INT_STATUS_CPU_MASK MISSING +#define HOST_INT_STATUS_CPU_LSB MISSING +#define HOST_INT_STATUS_ERROR_MASK MISSING +#define HOST_INT_STATUS_ERROR_LSB MISSING +#define HOST_INT_STATUS_COUNTER_MASK MISSING +#define HOST_INT_STATUS_COUNTER_LSB MISSING +#define RX_LOOKAHEAD_VALID_ADDRESS MISSING +#define WINDOW_DATA_ADDRESS MISSING +#define WINDOW_READ_ADDR_ADDRESS MISSING +#define WINDOW_WRITE_ADDR_ADDRESS MISSING +/* MAC descriptor */ +#define RX_ATTENTION_0_PHY_DATA_TYPE_MASK MISSING +#define RX_MSDU_END_8_LRO_ELIGIBLE_MASK MISSING +#define RX_MSDU_END_8_LRO_ELIGIBLE_LSB MISSING +#define RX_MSDU_END_8_L3_HEADER_PADDING_LSB MISSING +#define RX_MSDU_END_8_L3_HEADER_PADDING_MASK MISSING +#define RX_PPDU_END_ANTENNA_OFFSET_DWORD (RX_PPDU_END_19_RX_ANTENNA_OFFSET >> 2) +#define MSDU_LINK_EXT_3_TCP_OVER_IPV4_CHECKSUM_EN_MASK MISSING +#define MSDU_LINK_EXT_3_TCP_OVER_IPV6_CHECKSUM_EN_MASK MISSING +#define MSDU_LINK_EXT_3_UDP_OVER_IPV4_CHECKSUM_EN_MASK MISSING +#define MSDU_LINK_EXT_3_UDP_OVER_IPV6_CHECKSUM_EN_MASK MISSING +#define MSDU_LINK_EXT_3_TCP_OVER_IPV4_CHECKSUM_EN_LSB MISSING +#define MSDU_LINK_EXT_3_TCP_OVER_IPV6_CHECKSUM_EN_LSB MISSING +#define MSDU_LINK_EXT_3_UDP_OVER_IPV4_CHECKSUM_EN_LSB MISSING +#define MSDU_LINK_EXT_3_UDP_OVER_IPV6_CHECKSUM_EN_LSB MISSING +/* GPIO Register */ + +#define GPIO_ENABLE_W1TS_LOW_ADDRESS WLAN_GPIO_ENABLE_W1TS_LOW_ADDRESS +#define GPIO_PIN0_CONFIG_LSB WLAN_GPIO_PIN0_CONFIG_LSB +#define GPIO_PIN0_PAD_PULL_LSB WLAN_GPIO_PIN0_PAD_PULL_LSB +#define GPIO_PIN0_PAD_PULL_MASK WLAN_GPIO_PIN0_PAD_PULL_MASK +/* CE descriptor */ +#define CE_SRC_DESC_SIZE_DWORD 2 +#define CE_DEST_DESC_SIZE_DWORD 2 +#define CE_SRC_DESC_SRC_PTR_OFFSET_DWORD 0 +#define CE_SRC_DESC_INFO_OFFSET_DWORD 1 +#define CE_DEST_DESC_DEST_PTR_OFFSET_DWORD 0 +#define CE_DEST_DESC_INFO_OFFSET_DWORD 1 +#define CE_SRC_DESC_INFO_HOST_INT_DISABLE_MASK MISSING +#define CE_SRC_DESC_INFO_HOST_INT_DISABLE_SHIFT MISSING +#define CE_SRC_DESC_INFO_TARGET_INT_DISABLE_MASK MISSING +#define CE_SRC_DESC_INFO_TARGET_INT_DISABLE_SHIFT MISSING +#define CE_DEST_DESC_INFO_HOST_INT_DISABLE_MASK MISSING +#define CE_DEST_DESC_INFO_HOST_INT_DISABLE_SHIFT MISSING +#define CE_DEST_DESC_INFO_TARGET_INT_DISABLE_MASK MISSING +#define CE_DEST_DESC_INFO_TARGET_INT_DISABLE_SHIFT MISSING +#if _BYTE_ORDER == _BIG_ENDIAN +#define CE_SRC_DESC_INFO_NBYTES_MASK 0xFFFF0000 +#define CE_SRC_DESC_INFO_NBYTES_SHIFT 16 +#define CE_SRC_DESC_INFO_GATHER_MASK 0x00008000 +#define CE_SRC_DESC_INFO_GATHER_SHIFT 15 +#define CE_SRC_DESC_INFO_BYTE_SWAP_MASK 0x00004000 +#define CE_SRC_DESC_INFO_BYTE_SWAP_SHIFT 14 +#define CE_SRC_DESC_INFO_META_DATA_MASK 0x00003FFF +#define CE_SRC_DESC_INFO_META_DATA_SHIFT 0 +#else +#define CE_SRC_DESC_INFO_NBYTES_MASK 0x0000FFFF +#define CE_SRC_DESC_INFO_NBYTES_SHIFT 0 +#define CE_SRC_DESC_INFO_GATHER_MASK 0x00010000 +#define CE_SRC_DESC_INFO_GATHER_SHIFT 16 +#define CE_SRC_DESC_INFO_BYTE_SWAP_MASK 0x00020000 +#define CE_SRC_DESC_INFO_BYTE_SWAP_SHIFT 17 +#define CE_SRC_DESC_INFO_META_DATA_MASK 0xFFFC0000 +#define CE_SRC_DESC_INFO_META_DATA_SHIFT 18 +#endif +#if _BYTE_ORDER == _BIG_ENDIAN +#define CE_DEST_DESC_INFO_NBYTES_MASK 0xFFFF0000 +#define CE_DEST_DESC_INFO_NBYTES_SHIFT 16 +#define CE_DEST_DESC_INFO_GATHER_MASK 0x00008000 +#define CE_DEST_DESC_INFO_GATHER_SHIFT 15 +#define CE_DEST_DESC_INFO_BYTE_SWAP_MASK 0x00004000 +#define CE_DEST_DESC_INFO_BYTE_SWAP_SHIFT 14 +#define CE_DEST_DESC_INFO_META_DATA_MASK 0x00003FFF +#define CE_DEST_DESC_INFO_META_DATA_SHIFT 0 +#else +#define CE_DEST_DESC_INFO_NBYTES_MASK 0x0000FFFF +#define CE_DEST_DESC_INFO_NBYTES_SHIFT 0 +#define CE_DEST_DESC_INFO_GATHER_MASK 0x00010000 +#define CE_DEST_DESC_INFO_GATHER_SHIFT 16 +#define CE_DEST_DESC_INFO_BYTE_SWAP_MASK 0x00020000 +#define CE_DEST_DESC_INFO_BYTE_SWAP_SHIFT 17 +#define CE_DEST_DESC_INFO_META_DATA_MASK 0xFFFC0000 +#define CE_DEST_DESC_INFO_META_DATA_SHIFT 18 +#endif + +#define MY_TARGET_DEF AR9888_TARGETdef +#define MY_HOST_DEF AR9888_HOSTdef +#define MY_CEREG_DEF AR9888_CE_TARGETdef +#define MY_TARGET_BOARD_DATA_SZ AR9888_BOARD_DATA_SZ +#define MY_TARGET_BOARD_EXT_DATA_SZ AR9888_BOARD_EXT_DATA_SZ +#include "targetdef.h" +#include "hostdef.h" +qdf_export_symbol(AR9888_CE_TARGETdef); +#else +#include "common_drv.h" +#include "targetdef.h" +#include "hostdef.h" +struct targetdef_s *AR9888_TARGETdef; +struct hostdef_s *AR9888_HOSTdef; +#endif /*AR9888_HEADERS_DEF */ +qdf_export_symbol(AR9888_TARGETdef); +qdf_export_symbol(AR9888_HOSTdef); diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/ar9888def.h b/drivers/staging/qca-wifi-host-cmn/hif/src/ar9888def.h new file mode 100644 index 0000000000000000000000000000000000000000..ce2e891c9f425010c31504feb277027df5fe36ae --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/ar9888def.h @@ -0,0 +1,591 @@ +/* + * Copyright (c) 2011-2016 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _AR9888DEF_H_ +#define _AR9888DEF_H_ + +/* Base Addresses */ +#define AR9888_RTC_SOC_BASE_ADDRESS 0x00004000 +#define AR9888_RTC_WMAC_BASE_ADDRESS 0x00005000 +#define AR9888_MAC_COEX_BASE_ADDRESS 0x00006000 +#define AR9888_BT_COEX_BASE_ADDRESS 0x00007000 +#define AR9888_SOC_PCIE_BASE_ADDRESS 0x00008000 +#define AR9888_SOC_CORE_BASE_ADDRESS 0x00009000 +#define AR9888_WLAN_UART_BASE_ADDRESS 0x0000c000 +#define AR9888_WLAN_SI_BASE_ADDRESS 0x00010000 +#define AR9888_WLAN_GPIO_BASE_ADDRESS 0x00014000 +#define AR9888_WLAN_ANALOG_INTF_BASE_ADDRESS 0x0001c000 +#define AR9888_WLAN_MAC_BASE_ADDRESS 0x00020000 +#define AR9888_EFUSE_BASE_ADDRESS 0x00030000 +#define AR9888_FPGA_REG_BASE_ADDRESS 0x00039000 +#define AR9888_WLAN_UART2_BASE_ADDRESS 0x00054c00 +#if defined(HIF_PCI) || defined(HIF_SNOC) || defined(HIF_AHB) +#define AR9888_CE_WRAPPER_BASE_ADDRESS 0x00057000 +#define AR9888_CE0_BASE_ADDRESS 0x00057400 +#define AR9888_CE1_BASE_ADDRESS 0x00057800 +#define AR9888_CE2_BASE_ADDRESS 0x00057c00 +#define AR9888_CE3_BASE_ADDRESS 0x00058000 +#define AR9888_CE4_BASE_ADDRESS 0x00058400 +#define AR9888_CE5_BASE_ADDRESS 0x00058800 +#define AR9888_CE6_BASE_ADDRESS 0x00058c00 +#define AR9888_CE7_BASE_ADDRESS 0x00059000 +#define AR9888_WLAN_ANALOG_INTF_PCIE_BASE_ADDRESS 0x0006c000 +#define AR9888_CE_CTRL1_ADDRESS 0x0010 +#define AR9888_CE_CTRL1_DMAX_LENGTH_MASK 0x0000ffff +#define AR9888_CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS 0x0000 +#define AR9888_CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK 0x0000ff00 +#define AR9888_CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB 8 +#define AR9888_CE_CTRL1_DMAX_LENGTH_LSB 0 +#define AR9888_CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK 0x00010000 +#define AR9888_CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK 0x00020000 +#define AR9888_CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB 16 +#define AR9888_CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB 17 +#define AR9888_SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_MASK 0x00000004 +#define AR9888_SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_LSB 2 +#define AR9888_PCIE_SOC_WAKE_RESET 0x00000000 +#define AR9888_PCIE_SOC_WAKE_ADDRESS 0x0004 +#define AR9888_PCIE_SOC_WAKE_V_MASK 0x00000001 +#define AR9888_PCIE_INTR_ENABLE_ADDRESS 0x0008 +#define AR9888_PCIE_INTR_CLR_ADDRESS 0x0014 +#define AR9888_PCIE_INTR_FIRMWARE_MASK 0x00000400 +#define AR9888_PCIE_INTR_CE0_MASK 0x00000800 +#define AR9888_PCIE_INTR_CE_MASK_ALL 0x0007f800 +#define AR9888_PCIE_INTR_CAUSE_ADDRESS 0x000c +#define AR9888_MUX_ID_MASK 0x0000 +#define AR9888_TRANSACTION_ID_MASK 0x3fff +#define AR9888_PCIE_LOCAL_BASE_ADDRESS 0x80000 +#define AR9888_SOC_RESET_CONTROL_CE_RST_MASK 0x00040000 +#define AR9888_PCIE_INTR_CE_MASK(n) (AR9888_PCIE_INTR_CE0_MASK << (n)) +#endif +#define AR9888_DBI_BASE_ADDRESS 0x00060000 +#define AR9888_SCRATCH_3_ADDRESS 0x0030 +#define AR9888_TARG_DRAM_START 0x00400000 +#define AR9888_SOC_SYSTEM_SLEEP_OFFSET 0x000000c4 +#define AR9888_SOC_RESET_CONTROL_OFFSET 0x00000000 +#define AR9888_SOC_CLOCK_CONTROL_OFFSET 0x00000028 +#define AR9888_SOC_CLOCK_CONTROL_SI0_CLK_MASK 0x00000001 +#define AR9888_SOC_RESET_CONTROL_SI0_RST_MASK 0x00000001 +#define AR9888_WLAN_GPIO_BASE_ADDRESS 0x00014000 +#define AR9888_WLAN_GPIO_PIN0_ADDRESS 0x00000028 +#define AR9888_WLAN_GPIO_PIN1_ADDRESS 0x0000002c +#define AR9888_WLAN_GPIO_PIN0_CONFIG_MASK 0x00007800 +#define AR9888_WLAN_GPIO_PIN1_CONFIG_MASK 0x00007800 +#define AR9888_WLAN_SI_BASE_ADDRESS 0x00010000 +#define AR9888_SOC_CPU_CLOCK_OFFSET 0x00000020 +#define AR9888_SOC_LPO_CAL_OFFSET 0x000000e0 +#define AR9888_WLAN_GPIO_PIN10_ADDRESS 0x00000050 +#define AR9888_WLAN_GPIO_PIN11_ADDRESS 0x00000054 +#define AR9888_WLAN_GPIO_PIN12_ADDRESS 0x00000058 +#define AR9888_WLAN_GPIO_PIN13_ADDRESS 0x0000005c +#define AR9888_SOC_CPU_CLOCK_STANDARD_LSB 0 +#define AR9888_SOC_CPU_CLOCK_STANDARD_MASK 0x00000003 +#define AR9888_SOC_LPO_CAL_ENABLE_LSB 20 +#define AR9888_SOC_LPO_CAL_ENABLE_MASK 0x00100000 +#define AR9888_WLAN_ANALOG_INTF_BASE_ADDRESS 0x0001c000 + +#define AR9888_WLAN_SYSTEM_SLEEP_DISABLE_LSB 0 +#define AR9888_WLAN_SYSTEM_SLEEP_DISABLE_MASK 0x00000001 +#define AR9888_WLAN_RESET_CONTROL_COLD_RST_MASK 0x00000008 +#define AR9888_WLAN_RESET_CONTROL_WARM_RST_MASK 0x00000004 +#define AR9888_SI_CONFIG_BIDIR_OD_DATA_LSB 18 +#define AR9888_SI_CONFIG_BIDIR_OD_DATA_MASK 0x00040000 +#define AR9888_SI_CONFIG_I2C_LSB 16 +#define AR9888_SI_CONFIG_I2C_MASK 0x00010000 +#define AR9888_SI_CONFIG_POS_SAMPLE_LSB 7 +#define AR9888_SI_CONFIG_POS_SAMPLE_MASK 0x00000080 +#define AR9888_SI_CONFIG_INACTIVE_CLK_LSB 4 +#define AR9888_SI_CONFIG_INACTIVE_CLK_MASK 0x00000010 +#define AR9888_SI_CONFIG_INACTIVE_DATA_LSB 5 +#define AR9888_SI_CONFIG_INACTIVE_DATA_MASK 0x00000020 +#define AR9888_SI_CONFIG_DIVIDER_LSB 0 +#define AR9888_SI_CONFIG_DIVIDER_MASK 0x0000000f +#define AR9888_SI_CONFIG_OFFSET 0x00000000 +#define AR9888_SI_TX_DATA0_OFFSET 0x00000008 +#define AR9888_SI_TX_DATA1_OFFSET 0x0000000c +#define AR9888_SI_RX_DATA0_OFFSET 0x00000010 +#define AR9888_SI_RX_DATA1_OFFSET 0x00000014 +#define AR9888_SI_CS_OFFSET 0x00000004 +#define AR9888_SI_CS_DONE_ERR_MASK 0x00000400 +#define AR9888_SI_CS_DONE_INT_MASK 0x00000200 +#define AR9888_SI_CS_START_LSB 8 +#define AR9888_SI_CS_START_MASK 0x00000100 +#define AR9888_SI_CS_RX_CNT_LSB 4 +#define AR9888_SI_CS_RX_CNT_MASK 0x000000f0 +#define AR9888_SI_CS_TX_CNT_LSB 0 +#define AR9888_SI_CS_TX_CNT_MASK 0x0000000f +#define AR9888_CE_COUNT 8 +#define AR9888_SR_WR_INDEX_ADDRESS 0x003c +#define AR9888_DST_WATERMARK_ADDRESS 0x0050 +#define AR9888_RX_MSDU_END_4_FIRST_MSDU_LSB 14 +#define AR9888_RX_MSDU_END_4_FIRST_MSDU_MASK 0x00004000 +#define AR9888_RX_MPDU_START_0_SEQ_NUM_LSB 16 +#define AR9888_RX_MPDU_START_0_SEQ_NUM_MASK 0x0fff0000 +#define AR9888_RX_MPDU_START_2_PN_47_32_LSB 0 +#define AR9888_RX_MPDU_START_2_PN_47_32_MASK 0x0000ffff +#define AR9888_RX_MSDU_END_1_KEY_ID_OCT_MASK 0x000000ff +#define AR9888_RX_MSDU_END_1_KEY_ID_OCT_LSB 0 +#define AR9888_RX_MSDU_END_1_EXT_WAPI_PN_63_48_LSB 16 +#define AR9888_RX_MSDU_END_1_EXT_WAPI_PN_63_48_MASK 0xffff0000 +#define AR9888_RX_MSDU_END_4_LAST_MSDU_LSB 15 +#define AR9888_RX_MSDU_END_4_LAST_MSDU_MASK 0x00008000 +#define AR9888_RX_ATTENTION_0_MCAST_BCAST_LSB 2 +#define AR9888_RX_ATTENTION_0_MCAST_BCAST_MASK 0x00000004 +#define AR9888_RX_ATTENTION_0_FRAGMENT_LSB 13 +#define AR9888_RX_ATTENTION_0_FRAGMENT_MASK 0x00002000 +#define AR9888_RX_ATTENTION_0_MPDU_LENGTH_ERR_MASK 0x08000000 +#define AR9888_RX_FRAG_INFO_0_RING2_MORE_COUNT_LSB 16 +#define AR9888_RX_FRAG_INFO_0_RING2_MORE_COUNT_MASK 0x00ff0000 +#define AR9888_RX_MSDU_START_0_MSDU_LENGTH_LSB 0 +#define AR9888_RX_MSDU_START_0_MSDU_LENGTH_MASK 0x00003fff +#define AR9888_RX_MSDU_START_2_DECAP_FORMAT_OFFSET 0x00000008 +#define AR9888_RX_MSDU_START_2_DECAP_FORMAT_LSB 8 +#define AR9888_RX_MSDU_START_2_DECAP_FORMAT_MASK 0x00000300 +#define AR9888_RX_MPDU_START_0_ENCRYPTED_LSB 13 +#define AR9888_RX_MPDU_START_0_ENCRYPTED_MASK 0x00002000 +#define AR9888_RX_ATTENTION_0_MORE_DATA_MASK 0x00000400 +#define AR9888_RX_ATTENTION_0_MSDU_DONE_MASK 0x80000000 +#define AR9888_RX_ATTENTION_0_TCP_UDP_CHKSUM_FAIL_MASK 0x00040000 +#define AR9888_DST_WR_INDEX_ADDRESS 0x0040 +#define AR9888_SRC_WATERMARK_ADDRESS 0x004c +#define AR9888_SRC_WATERMARK_LOW_MASK 0xffff0000 +#define AR9888_SRC_WATERMARK_HIGH_MASK 0x0000ffff +#define AR9888_DST_WATERMARK_LOW_MASK 0xffff0000 +#define AR9888_DST_WATERMARK_HIGH_MASK 0x0000ffff +#define AR9888_CURRENT_SRRI_ADDRESS 0x0044 +#define AR9888_CURRENT_DRRI_ADDRESS 0x0048 +#define AR9888_HOST_IS_SRC_RING_HIGH_WATERMARK_MASK 0x00000002 +#define AR9888_HOST_IS_SRC_RING_LOW_WATERMARK_MASK 0x00000004 +#define AR9888_HOST_IS_DST_RING_HIGH_WATERMARK_MASK 0x00000008 +#define AR9888_HOST_IS_DST_RING_LOW_WATERMARK_MASK 0x00000010 +#define AR9888_HOST_IS_ADDRESS 0x0030 +#define AR9888_HOST_IS_COPY_COMPLETE_MASK 0x00000001 +#define AR9888_HOST_IE_ADDRESS 0x002c +#define AR9888_HOST_IE_COPY_COMPLETE_MASK 0x00000001 +#define AR9888_SR_BA_ADDRESS 0x0000 +#define AR9888_SR_SIZE_ADDRESS 0x0004 +#define AR9888_DR_BA_ADDRESS 0x0008 +#define AR9888_DR_SIZE_ADDRESS 0x000c +#define AR9888_MISC_IE_ADDRESS 0x0034 +#define AR9888_MISC_IS_AXI_ERR_MASK 0x00000400 +#define AR9888_MISC_IS_DST_ADDR_ERR_MASK 0x00000200 +#define AR9888_MISC_IS_SRC_LEN_ERR_MASK 0x00000100 +#define AR9888_MISC_IS_DST_MAX_LEN_VIO_MASK 0x00000080 +#define AR9888_MISC_IS_DST_RING_OVERFLOW_MASK 0x00000040 +#define AR9888_MISC_IS_SRC_RING_OVERFLOW_MASK 0x00000020 +#define AR9888_SRC_WATERMARK_LOW_LSB 16 +#define AR9888_SRC_WATERMARK_HIGH_LSB 0 +#define AR9888_DST_WATERMARK_LOW_LSB 16 +#define AR9888_DST_WATERMARK_HIGH_LSB 0 +#define AR9888_SOC_GLOBAL_RESET_ADDRESS 0x0008 +#define AR9888_RTC_STATE_ADDRESS 0x0000 +#define AR9888_RTC_STATE_COLD_RESET_MASK 0x00000400 + +#define AR9888_RTC_STATE_V_MASK 0x00000007 +#define AR9888_RTC_STATE_V_LSB 0 +#define AR9888_RTC_STATE_V_ON 3 +#define AR9888_FW_IND_EVENT_PENDING 1 +#define AR9888_FW_IND_INITIALIZED 2 +#define AR9888_CPU_INTR_ADDRESS 0x0010 +#define AR9888_SOC_LF_TIMER_CONTROL0_ADDRESS 0x00000050 +#define AR9888_SOC_LF_TIMER_CONTROL0_ENABLE_MASK 0x00000004 +#define AR9888_SOC_RESET_CONTROL_ADDRESS 0x00000000 +#define AR9888_SOC_RESET_CONTROL_CPU_WARM_RST_MASK 0x00000040 +#define AR9888_CORE_CTRL_ADDRESS 0x0000 +#define AR9888_CORE_CTRL_CPU_INTR_MASK 0x00002000 +#define AR9888_LOCAL_SCRATCH_OFFSET 0x18 +#define AR9888_CLOCK_GPIO_OFFSET 0xffffffff +#define AR9888_CLOCK_GPIO_BT_CLK_OUT_EN_LSB 0 +#define AR9888_CLOCK_GPIO_BT_CLK_OUT_EN_MASK 0 + +#define AR9888_FW_EVENT_PENDING_ADDRESS \ + (AR9888_SOC_CORE_BASE_ADDRESS + AR9888_SCRATCH_3_ADDRESS) +#define AR9888_DRAM_BASE_ADDRESS AR9888_TARG_DRAM_START +#define AR9888_FW_INDICATOR_ADDRESS \ + (AR9888_SOC_CORE_BASE_ADDRESS + AR9888_SCRATCH_3_ADDRESS) +#define AR9888_SYSTEM_SLEEP_OFFSET AR9888_SOC_SYSTEM_SLEEP_OFFSET +#define AR9888_WLAN_SYSTEM_SLEEP_OFFSET AR9888_SOC_SYSTEM_SLEEP_OFFSET +#define AR9888_WLAN_RESET_CONTROL_OFFSET AR9888_SOC_RESET_CONTROL_OFFSET +#define AR9888_CLOCK_CONTROL_OFFSET AR9888_SOC_CLOCK_CONTROL_OFFSET +#define AR9888_CLOCK_CONTROL_SI0_CLK_MASK AR9888_SOC_CLOCK_CONTROL_SI0_CLK_MASK +#define AR9888_RESET_CONTROL_MBOX_RST_MASK MISSING +#define AR9888_RESET_CONTROL_SI0_RST_MASK AR9888_SOC_RESET_CONTROL_SI0_RST_MASK +#define AR9888_GPIO_BASE_ADDRESS AR9888_WLAN_GPIO_BASE_ADDRESS +#define AR9888_GPIO_PIN0_OFFSET AR9888_WLAN_GPIO_PIN0_ADDRESS +#define AR9888_GPIO_PIN1_OFFSET AR9888_WLAN_GPIO_PIN1_ADDRESS +#define AR9888_GPIO_PIN0_CONFIG_MASK AR9888_WLAN_GPIO_PIN0_CONFIG_MASK +#define AR9888_GPIO_PIN1_CONFIG_MASK AR9888_WLAN_GPIO_PIN1_CONFIG_MASK +#define AR9888_SI_BASE_ADDRESS AR9888_WLAN_SI_BASE_ADDRESS +#define AR9888_SCRATCH_BASE_ADDRESS AR9888_SOC_CORE_BASE_ADDRESS +#define AR9888_CPU_CLOCK_OFFSET AR9888_SOC_CPU_CLOCK_OFFSET +#define AR9888_LPO_CAL_OFFSET AR9888_SOC_LPO_CAL_OFFSET +#define AR9888_GPIO_PIN10_OFFSET AR9888_WLAN_GPIO_PIN10_ADDRESS +#define AR9888_GPIO_PIN11_OFFSET AR9888_WLAN_GPIO_PIN11_ADDRESS +#define AR9888_GPIO_PIN12_OFFSET AR9888_WLAN_GPIO_PIN12_ADDRESS +#define AR9888_GPIO_PIN13_OFFSET AR9888_WLAN_GPIO_PIN13_ADDRESS +#define AR9888_CPU_CLOCK_STANDARD_LSB AR9888_SOC_CPU_CLOCK_STANDARD_LSB +#define AR9888_CPU_CLOCK_STANDARD_MASK AR9888_SOC_CPU_CLOCK_STANDARD_MASK +#define AR9888_LPO_CAL_ENABLE_LSB AR9888_SOC_LPO_CAL_ENABLE_LSB +#define AR9888_LPO_CAL_ENABLE_MASK AR9888_SOC_LPO_CAL_ENABLE_MASK +#define AR9888_ANALOG_INTF_BASE_ADDRESS AR9888_WLAN_ANALOG_INTF_BASE_ADDRESS +#define AR9888_MBOX_BASE_ADDRESS MISSING +#define AR9888_INT_STATUS_ENABLE_ERROR_LSB MISSING +#define AR9888_INT_STATUS_ENABLE_ERROR_MASK MISSING +#define AR9888_INT_STATUS_ENABLE_CPU_LSB MISSING +#define AR9888_INT_STATUS_ENABLE_CPU_MASK MISSING +#define AR9888_INT_STATUS_ENABLE_COUNTER_LSB MISSING +#define AR9888_INT_STATUS_ENABLE_COUNTER_MASK MISSING +#define AR9888_INT_STATUS_ENABLE_MBOX_DATA_LSB MISSING +#define AR9888_INT_STATUS_ENABLE_MBOX_DATA_MASK MISSING +#define AR9888_ERROR_STATUS_ENABLE_RX_UNDERFLOW_LSB MISSING +#define AR9888_ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK MISSING +#define AR9888_ERROR_STATUS_ENABLE_TX_OVERFLOW_LSB MISSING +#define AR9888_ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK MISSING +#define AR9888_COUNTER_INT_STATUS_ENABLE_BIT_LSB MISSING +#define AR9888_COUNTER_INT_STATUS_ENABLE_BIT_MASK MISSING +#define AR9888_INT_STATUS_ENABLE_ADDRESS MISSING +#define AR9888_CPU_INT_STATUS_ENABLE_BIT_LSB MISSING +#define AR9888_CPU_INT_STATUS_ENABLE_BIT_MASK MISSING +#define AR9888_HOST_INT_STATUS_ADDRESS MISSING +#define AR9888_CPU_INT_STATUS_ADDRESS MISSING +#define AR9888_ERROR_INT_STATUS_ADDRESS MISSING +#define AR9888_ERROR_INT_STATUS_WAKEUP_MASK MISSING +#define AR9888_ERROR_INT_STATUS_WAKEUP_LSB MISSING +#define AR9888_ERROR_INT_STATUS_RX_UNDERFLOW_MASK MISSING +#define AR9888_ERROR_INT_STATUS_RX_UNDERFLOW_LSB MISSING +#define AR9888_ERROR_INT_STATUS_TX_OVERFLOW_MASK MISSING +#define AR9888_ERROR_INT_STATUS_TX_OVERFLOW_LSB MISSING +#define AR9888_COUNT_DEC_ADDRESS MISSING +#define AR9888_HOST_INT_STATUS_CPU_MASK MISSING +#define AR9888_HOST_INT_STATUS_CPU_LSB MISSING +#define AR9888_HOST_INT_STATUS_ERROR_MASK MISSING +#define AR9888_HOST_INT_STATUS_ERROR_LSB MISSING +#define AR9888_HOST_INT_STATUS_COUNTER_MASK MISSING +#define AR9888_HOST_INT_STATUS_COUNTER_LSB MISSING +#define AR9888_RX_LOOKAHEAD_VALID_ADDRESS MISSING +#define AR9888_WINDOW_DATA_ADDRESS MISSING +#define AR9888_WINDOW_READ_ADDR_ADDRESS MISSING +#define AR9888_WINDOW_WRITE_ADDR_ADDRESS MISSING +#define AR9888_HOST_INT_STATUS_MBOX_DATA_MASK 0x0f +#define AR9888_HOST_INT_STATUS_MBOX_DATA_LSB 0 + +struct targetdef_s ar9888_targetdef = { + .d_RTC_SOC_BASE_ADDRESS = AR9888_RTC_SOC_BASE_ADDRESS, + .d_RTC_WMAC_BASE_ADDRESS = AR9888_RTC_WMAC_BASE_ADDRESS, + .d_SYSTEM_SLEEP_OFFSET = AR9888_WLAN_SYSTEM_SLEEP_OFFSET, + .d_WLAN_SYSTEM_SLEEP_OFFSET = AR9888_WLAN_SYSTEM_SLEEP_OFFSET, + .d_WLAN_SYSTEM_SLEEP_DISABLE_LSB = + AR9888_WLAN_SYSTEM_SLEEP_DISABLE_LSB, + .d_WLAN_SYSTEM_SLEEP_DISABLE_MASK = + AR9888_WLAN_SYSTEM_SLEEP_DISABLE_MASK, + .d_CLOCK_CONTROL_OFFSET = AR9888_CLOCK_CONTROL_OFFSET, + .d_CLOCK_CONTROL_SI0_CLK_MASK = AR9888_CLOCK_CONTROL_SI0_CLK_MASK, + .d_RESET_CONTROL_OFFSET = AR9888_SOC_RESET_CONTROL_OFFSET, + .d_RESET_CONTROL_MBOX_RST_MASK = AR9888_RESET_CONTROL_MBOX_RST_MASK, + .d_RESET_CONTROL_SI0_RST_MASK = AR9888_RESET_CONTROL_SI0_RST_MASK, + .d_WLAN_RESET_CONTROL_OFFSET = AR9888_WLAN_RESET_CONTROL_OFFSET, + .d_WLAN_RESET_CONTROL_COLD_RST_MASK = + AR9888_WLAN_RESET_CONTROL_COLD_RST_MASK, + .d_WLAN_RESET_CONTROL_WARM_RST_MASK = + AR9888_WLAN_RESET_CONTROL_WARM_RST_MASK, + .d_GPIO_BASE_ADDRESS = AR9888_GPIO_BASE_ADDRESS, + .d_GPIO_PIN0_OFFSET = AR9888_GPIO_PIN0_OFFSET, + .d_GPIO_PIN1_OFFSET = AR9888_GPIO_PIN1_OFFSET, + .d_GPIO_PIN0_CONFIG_MASK = AR9888_GPIO_PIN0_CONFIG_MASK, + .d_GPIO_PIN1_CONFIG_MASK = AR9888_GPIO_PIN1_CONFIG_MASK, + .d_SI_CONFIG_BIDIR_OD_DATA_LSB = AR9888_SI_CONFIG_BIDIR_OD_DATA_LSB, + .d_SI_CONFIG_BIDIR_OD_DATA_MASK = AR9888_SI_CONFIG_BIDIR_OD_DATA_MASK, + .d_SI_CONFIG_I2C_LSB = AR9888_SI_CONFIG_I2C_LSB, + .d_SI_CONFIG_I2C_MASK = AR9888_SI_CONFIG_I2C_MASK, + .d_SI_CONFIG_POS_SAMPLE_LSB = AR9888_SI_CONFIG_POS_SAMPLE_LSB, + .d_SI_CONFIG_POS_SAMPLE_MASK = AR9888_SI_CONFIG_POS_SAMPLE_MASK, + .d_SI_CONFIG_INACTIVE_CLK_LSB = AR9888_SI_CONFIG_INACTIVE_CLK_LSB, + .d_SI_CONFIG_INACTIVE_CLK_MASK = AR9888_SI_CONFIG_INACTIVE_CLK_MASK, + .d_SI_CONFIG_INACTIVE_DATA_LSB = AR9888_SI_CONFIG_INACTIVE_DATA_LSB, + .d_SI_CONFIG_INACTIVE_DATA_MASK = AR9888_SI_CONFIG_INACTIVE_DATA_MASK, + .d_SI_CONFIG_DIVIDER_LSB = AR9888_SI_CONFIG_DIVIDER_LSB, + .d_SI_CONFIG_DIVIDER_MASK = AR9888_SI_CONFIG_DIVIDER_MASK, + .d_SI_BASE_ADDRESS = AR9888_SI_BASE_ADDRESS, + .d_SI_CONFIG_OFFSET = AR9888_SI_CONFIG_OFFSET, + .d_SI_TX_DATA0_OFFSET = AR9888_SI_TX_DATA0_OFFSET, + .d_SI_TX_DATA1_OFFSET = AR9888_SI_TX_DATA1_OFFSET, + .d_SI_RX_DATA0_OFFSET = AR9888_SI_RX_DATA0_OFFSET, + .d_SI_RX_DATA1_OFFSET = AR9888_SI_RX_DATA1_OFFSET, + .d_SI_CS_OFFSET = AR9888_SI_CS_OFFSET, + .d_SI_CS_DONE_ERR_MASK = AR9888_SI_CS_DONE_ERR_MASK, + .d_SI_CS_DONE_INT_MASK = AR9888_SI_CS_DONE_INT_MASK, + .d_SI_CS_START_LSB = AR9888_SI_CS_START_LSB, + .d_SI_CS_START_MASK = AR9888_SI_CS_START_MASK, + .d_SI_CS_RX_CNT_LSB = AR9888_SI_CS_RX_CNT_LSB, + .d_SI_CS_RX_CNT_MASK = AR9888_SI_CS_RX_CNT_MASK, + .d_SI_CS_TX_CNT_LSB = AR9888_SI_CS_TX_CNT_LSB, + .d_SI_CS_TX_CNT_MASK = AR9888_SI_CS_TX_CNT_MASK, + .d_BOARD_DATA_SZ = AR9888_BOARD_DATA_SZ, + .d_BOARD_EXT_DATA_SZ = AR9888_BOARD_EXT_DATA_SZ, + .d_MBOX_BASE_ADDRESS = AR9888_MBOX_BASE_ADDRESS, + .d_LOCAL_SCRATCH_OFFSET = AR9888_LOCAL_SCRATCH_OFFSET, + .d_CPU_CLOCK_OFFSET = AR9888_CPU_CLOCK_OFFSET, + .d_LPO_CAL_OFFSET = AR9888_LPO_CAL_OFFSET, + .d_GPIO_PIN10_OFFSET = AR9888_GPIO_PIN10_OFFSET, + .d_GPIO_PIN11_OFFSET = AR9888_GPIO_PIN11_OFFSET, + .d_GPIO_PIN12_OFFSET = AR9888_GPIO_PIN12_OFFSET, + .d_GPIO_PIN13_OFFSET = AR9888_GPIO_PIN13_OFFSET, + .d_CLOCK_GPIO_OFFSET = AR9888_CLOCK_GPIO_OFFSET, + .d_CPU_CLOCK_STANDARD_LSB = AR9888_CPU_CLOCK_STANDARD_LSB, + .d_CPU_CLOCK_STANDARD_MASK = AR9888_CPU_CLOCK_STANDARD_MASK, + .d_LPO_CAL_ENABLE_LSB = AR9888_LPO_CAL_ENABLE_LSB, + .d_LPO_CAL_ENABLE_MASK = AR9888_LPO_CAL_ENABLE_MASK, + .d_CLOCK_GPIO_BT_CLK_OUT_EN_LSB = AR9888_CLOCK_GPIO_BT_CLK_OUT_EN_LSB, + .d_CLOCK_GPIO_BT_CLK_OUT_EN_MASK = + AR9888_CLOCK_GPIO_BT_CLK_OUT_EN_MASK, + .d_ANALOG_INTF_BASE_ADDRESS = AR9888_ANALOG_INTF_BASE_ADDRESS, + .d_WLAN_MAC_BASE_ADDRESS = AR9888_WLAN_MAC_BASE_ADDRESS, + .d_FW_INDICATOR_ADDRESS = AR9888_FW_INDICATOR_ADDRESS, + .d_DRAM_BASE_ADDRESS = AR9888_DRAM_BASE_ADDRESS, + .d_SOC_CORE_BASE_ADDRESS = AR9888_SOC_CORE_BASE_ADDRESS, + .d_CORE_CTRL_ADDRESS = AR9888_CORE_CTRL_ADDRESS, +#if defined(HIF_PCI) || defined(HIF_SNOC) || defined(HIF_AHB) + .d_MSI_NUM_REQUEST = MSI_NUM_REQUEST, + .d_MSI_ASSIGN_FW = MSI_ASSIGN_FW, +#endif + .d_CORE_CTRL_CPU_INTR_MASK = AR9888_CORE_CTRL_CPU_INTR_MASK, + .d_SR_WR_INDEX_ADDRESS = AR9888_SR_WR_INDEX_ADDRESS, + .d_DST_WATERMARK_ADDRESS = AR9888_DST_WATERMARK_ADDRESS, + /* htt_rx.c */ + .d_RX_MSDU_END_4_FIRST_MSDU_MASK = + AR9888_RX_MSDU_END_4_FIRST_MSDU_MASK, + .d_RX_MSDU_END_4_FIRST_MSDU_LSB = AR9888_RX_MSDU_END_4_FIRST_MSDU_LSB, + .d_RX_MPDU_START_0_SEQ_NUM_MASK = AR9888_RX_MPDU_START_0_SEQ_NUM_MASK, + .d_RX_MPDU_START_0_SEQ_NUM_LSB = AR9888_RX_MPDU_START_0_SEQ_NUM_LSB, + .d_RX_MPDU_START_2_PN_47_32_LSB = AR9888_RX_MPDU_START_2_PN_47_32_LSB, + .d_RX_MPDU_START_2_PN_47_32_MASK = + AR9888_RX_MPDU_START_2_PN_47_32_MASK, + .d_RX_MSDU_END_1_EXT_WAPI_PN_63_48_MASK = + AR9888_RX_MSDU_END_1_EXT_WAPI_PN_63_48_MASK, + .d_RX_MSDU_END_1_EXT_WAPI_PN_63_48_LSB = + AR9888_RX_MSDU_END_1_EXT_WAPI_PN_63_48_LSB, + .d_RX_MSDU_END_1_KEY_ID_OCT_MASK = + AR9888_RX_MSDU_END_1_KEY_ID_OCT_MASK, + .d_RX_MSDU_END_1_KEY_ID_OCT_LSB = AR9888_RX_MSDU_END_1_KEY_ID_OCT_LSB, + .d_RX_MSDU_END_4_LAST_MSDU_MASK = AR9888_RX_MSDU_END_4_LAST_MSDU_MASK, + .d_RX_MSDU_END_4_LAST_MSDU_LSB = AR9888_RX_MSDU_END_4_LAST_MSDU_LSB, + .d_RX_ATTENTION_0_MCAST_BCAST_MASK = + AR9888_RX_ATTENTION_0_MCAST_BCAST_MASK, + .d_RX_ATTENTION_0_MCAST_BCAST_LSB = + AR9888_RX_ATTENTION_0_MCAST_BCAST_LSB, + .d_RX_ATTENTION_0_FRAGMENT_MASK = AR9888_RX_ATTENTION_0_FRAGMENT_MASK, + .d_RX_ATTENTION_0_FRAGMENT_LSB = AR9888_RX_ATTENTION_0_FRAGMENT_LSB, + .d_RX_ATTENTION_0_MPDU_LENGTH_ERR_MASK = + AR9888_RX_ATTENTION_0_MPDU_LENGTH_ERR_MASK, + .d_RX_FRAG_INFO_0_RING2_MORE_COUNT_MASK = + AR9888_RX_FRAG_INFO_0_RING2_MORE_COUNT_MASK, + .d_RX_FRAG_INFO_0_RING2_MORE_COUNT_LSB = + AR9888_RX_FRAG_INFO_0_RING2_MORE_COUNT_LSB, + .d_RX_MSDU_START_0_MSDU_LENGTH_MASK = + AR9888_RX_MSDU_START_0_MSDU_LENGTH_MASK, + .d_RX_MSDU_START_0_MSDU_LENGTH_LSB = + AR9888_RX_MSDU_START_0_MSDU_LENGTH_LSB, + .d_RX_MSDU_START_2_DECAP_FORMAT_OFFSET = + AR9888_RX_MSDU_START_2_DECAP_FORMAT_OFFSET, + .d_RX_MSDU_START_2_DECAP_FORMAT_MASK = + AR9888_RX_MSDU_START_2_DECAP_FORMAT_MASK, + .d_RX_MSDU_START_2_DECAP_FORMAT_LSB = + AR9888_RX_MSDU_START_2_DECAP_FORMAT_LSB, + .d_RX_MPDU_START_0_ENCRYPTED_MASK = + AR9888_RX_MPDU_START_0_ENCRYPTED_MASK, + .d_RX_MPDU_START_0_ENCRYPTED_LSB = + AR9888_RX_MPDU_START_0_ENCRYPTED_LSB, + .d_RX_ATTENTION_0_MORE_DATA_MASK = + AR9888_RX_ATTENTION_0_MORE_DATA_MASK, + .d_RX_ATTENTION_0_MSDU_DONE_MASK = + AR9888_RX_ATTENTION_0_MSDU_DONE_MASK, + .d_RX_ATTENTION_0_TCP_UDP_CHKSUM_FAIL_MASK = + AR9888_RX_ATTENTION_0_TCP_UDP_CHKSUM_FAIL_MASK, +#if defined(HIF_PCI) || defined(HIF_SNOC) || defined(HIF_AHB) + .d_CE_COUNT = AR9888_CE_COUNT, + .d_MSI_ASSIGN_CE_INITIAL = MSI_ASSIGN_CE_INITIAL, + .d_PCIE_INTR_ENABLE_ADDRESS = AR9888_PCIE_INTR_ENABLE_ADDRESS, + .d_PCIE_INTR_CLR_ADDRESS = AR9888_PCIE_INTR_CLR_ADDRESS, + .d_PCIE_INTR_FIRMWARE_MASK = AR9888_PCIE_INTR_FIRMWARE_MASK, + .d_PCIE_INTR_CE_MASK_ALL = AR9888_PCIE_INTR_CE_MASK_ALL, + .d_PCIE_INTR_CAUSE_ADDRESS = AR9888_PCIE_INTR_CAUSE_ADDRESS, + .d_SOC_RESET_CONTROL_ADDRESS = AR9888_SOC_RESET_CONTROL_ADDRESS, + .d_SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_MASK = + AR9888_SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_MASK, + .d_SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_LSB = + AR9888_SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_LSB, + .d_SOC_RESET_CONTROL_CE_RST_MASK = + AR9888_SOC_RESET_CONTROL_CE_RST_MASK, +#endif + .d_SOC_RESET_CONTROL_CPU_WARM_RST_MASK = + AR9888_SOC_RESET_CONTROL_CPU_WARM_RST_MASK, + .d_CPU_INTR_ADDRESS = AR9888_CPU_INTR_ADDRESS, + .d_SOC_LF_TIMER_CONTROL0_ADDRESS = + AR9888_SOC_LF_TIMER_CONTROL0_ADDRESS, + .d_SOC_LF_TIMER_CONTROL0_ENABLE_MASK = + AR9888_SOC_LF_TIMER_CONTROL0_ENABLE_MASK, +}; + +struct hostdef_s ar9888_hostdef = { + .d_INT_STATUS_ENABLE_ERROR_LSB = AR9888_INT_STATUS_ENABLE_ERROR_LSB, + .d_INT_STATUS_ENABLE_ERROR_MASK = AR9888_INT_STATUS_ENABLE_ERROR_MASK, + .d_INT_STATUS_ENABLE_CPU_LSB = AR9888_INT_STATUS_ENABLE_CPU_LSB, + .d_INT_STATUS_ENABLE_CPU_MASK = AR9888_INT_STATUS_ENABLE_CPU_MASK, + .d_INT_STATUS_ENABLE_COUNTER_LSB = + AR9888_INT_STATUS_ENABLE_COUNTER_LSB, + .d_INT_STATUS_ENABLE_COUNTER_MASK = + AR9888_INT_STATUS_ENABLE_COUNTER_MASK, + .d_INT_STATUS_ENABLE_MBOX_DATA_LSB = + AR9888_INT_STATUS_ENABLE_MBOX_DATA_LSB, + .d_INT_STATUS_ENABLE_MBOX_DATA_MASK = + AR9888_INT_STATUS_ENABLE_MBOX_DATA_MASK, + .d_ERROR_STATUS_ENABLE_RX_UNDERFLOW_LSB = + AR9888_ERROR_STATUS_ENABLE_RX_UNDERFLOW_LSB, + .d_ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK = + AR9888_ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK, + .d_ERROR_STATUS_ENABLE_TX_OVERFLOW_LSB = + AR9888_ERROR_STATUS_ENABLE_TX_OVERFLOW_LSB, + .d_ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK = + AR9888_ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK, + .d_COUNTER_INT_STATUS_ENABLE_BIT_LSB = + AR9888_COUNTER_INT_STATUS_ENABLE_BIT_LSB, + .d_COUNTER_INT_STATUS_ENABLE_BIT_MASK = + AR9888_COUNTER_INT_STATUS_ENABLE_BIT_MASK, + .d_INT_STATUS_ENABLE_ADDRESS = AR9888_INT_STATUS_ENABLE_ADDRESS, + .d_CPU_INT_STATUS_ENABLE_BIT_LSB = + AR9888_CPU_INT_STATUS_ENABLE_BIT_LSB, + .d_CPU_INT_STATUS_ENABLE_BIT_MASK = + AR9888_CPU_INT_STATUS_ENABLE_BIT_MASK, + .d_HOST_INT_STATUS_ADDRESS = AR9888_HOST_INT_STATUS_ADDRESS, + .d_CPU_INT_STATUS_ADDRESS = AR9888_CPU_INT_STATUS_ADDRESS, + .d_ERROR_INT_STATUS_ADDRESS = AR9888_ERROR_INT_STATUS_ADDRESS, + .d_ERROR_INT_STATUS_WAKEUP_MASK = AR9888_ERROR_INT_STATUS_WAKEUP_MASK, + .d_ERROR_INT_STATUS_WAKEUP_LSB = AR9888_ERROR_INT_STATUS_WAKEUP_LSB, + .d_ERROR_INT_STATUS_RX_UNDERFLOW_MASK = + AR9888_ERROR_INT_STATUS_RX_UNDERFLOW_MASK, + .d_ERROR_INT_STATUS_RX_UNDERFLOW_LSB = + AR9888_ERROR_INT_STATUS_RX_UNDERFLOW_LSB, + .d_ERROR_INT_STATUS_TX_OVERFLOW_MASK = + AR9888_ERROR_INT_STATUS_TX_OVERFLOW_MASK, + .d_ERROR_INT_STATUS_TX_OVERFLOW_LSB = + AR9888_ERROR_INT_STATUS_TX_OVERFLOW_LSB, + .d_COUNT_DEC_ADDRESS = AR9888_COUNT_DEC_ADDRESS, + .d_HOST_INT_STATUS_CPU_MASK = AR9888_HOST_INT_STATUS_CPU_MASK, + .d_HOST_INT_STATUS_CPU_LSB = AR9888_HOST_INT_STATUS_CPU_LSB, + .d_HOST_INT_STATUS_ERROR_MASK = AR9888_HOST_INT_STATUS_ERROR_MASK, + .d_HOST_INT_STATUS_ERROR_LSB = AR9888_HOST_INT_STATUS_ERROR_LSB, + .d_HOST_INT_STATUS_COUNTER_MASK = AR9888_HOST_INT_STATUS_COUNTER_MASK, + .d_HOST_INT_STATUS_COUNTER_LSB = AR9888_HOST_INT_STATUS_COUNTER_LSB, + .d_RX_LOOKAHEAD_VALID_ADDRESS = AR9888_RX_LOOKAHEAD_VALID_ADDRESS, + .d_WINDOW_DATA_ADDRESS = AR9888_WINDOW_DATA_ADDRESS, + .d_WINDOW_READ_ADDR_ADDRESS = AR9888_WINDOW_READ_ADDR_ADDRESS, + .d_WINDOW_WRITE_ADDR_ADDRESS = AR9888_WINDOW_WRITE_ADDR_ADDRESS, + .d_SOC_GLOBAL_RESET_ADDRESS = AR9888_SOC_GLOBAL_RESET_ADDRESS, + .d_RTC_STATE_ADDRESS = AR9888_RTC_STATE_ADDRESS, + .d_RTC_STATE_COLD_RESET_MASK = AR9888_RTC_STATE_COLD_RESET_MASK, + .d_RTC_STATE_V_MASK = AR9888_RTC_STATE_V_MASK, + .d_RTC_STATE_V_LSB = AR9888_RTC_STATE_V_LSB, + .d_FW_IND_EVENT_PENDING = AR9888_FW_IND_EVENT_PENDING, + .d_FW_IND_INITIALIZED = AR9888_FW_IND_INITIALIZED, + .d_RTC_STATE_V_ON = AR9888_RTC_STATE_V_ON, +#if defined(SDIO_3_0) + .d_HOST_INT_STATUS_MBOX_DATA_MASK = + AR9888_HOST_INT_STATUS_MBOX_DATA_MASK, + .d_HOST_INT_STATUS_MBOX_DATA_LSB = + AR9888_HOST_INT_STATUS_MBOX_DATA_LSB, +#endif +#if defined(HIF_PCI) || defined(HIF_SNOC) || defined(HIF_AHB) + .d_MUX_ID_MASK = AR9888_MUX_ID_MASK, + .d_TRANSACTION_ID_MASK = AR9888_TRANSACTION_ID_MASK, + .d_PCIE_LOCAL_BASE_ADDRESS = AR9888_PCIE_LOCAL_BASE_ADDRESS, + .d_PCIE_SOC_WAKE_RESET = AR9888_PCIE_SOC_WAKE_RESET, + .d_PCIE_SOC_WAKE_ADDRESS = AR9888_PCIE_SOC_WAKE_ADDRESS, + .d_PCIE_SOC_WAKE_V_MASK = AR9888_PCIE_SOC_WAKE_V_MASK, + .d_PCIE_SOC_RDY_STATUS_ADDRESS = PCIE_SOC_RDY_STATUS_ADDRESS, + .d_PCIE_SOC_RDY_STATUS_BAR_MASK = PCIE_SOC_RDY_STATUS_BAR_MASK, + .d_SOC_PCIE_BASE_ADDRESS = SOC_PCIE_BASE_ADDRESS, + .d_MSI_MAGIC_ADR_ADDRESS = MSI_MAGIC_ADR_ADDRESS, + .d_MSI_MAGIC_ADDRESS = MSI_MAGIC_ADDRESS, + .d_HOST_CE_COUNT = 8, + .d_ENABLE_MSI = 0, +#endif +}; + +#if defined(HIF_PCI) || defined(HIF_SNOC) || defined(HIF_AHB) +struct ce_reg_def ar9888_ce_targetdef = { + /* copy_engine.c */ + .d_DST_WR_INDEX_ADDRESS = AR9888_DST_WR_INDEX_ADDRESS, + .d_SRC_WATERMARK_ADDRESS = AR9888_SRC_WATERMARK_ADDRESS, + .d_SRC_WATERMARK_LOW_MASK = AR9888_SRC_WATERMARK_LOW_MASK, + .d_SRC_WATERMARK_HIGH_MASK = AR9888_SRC_WATERMARK_HIGH_MASK, + .d_DST_WATERMARK_LOW_MASK = AR9888_DST_WATERMARK_LOW_MASK, + .d_DST_WATERMARK_HIGH_MASK = AR9888_DST_WATERMARK_HIGH_MASK, + .d_CURRENT_SRRI_ADDRESS = AR9888_CURRENT_SRRI_ADDRESS, + .d_CURRENT_DRRI_ADDRESS = AR9888_CURRENT_DRRI_ADDRESS, + .d_HOST_IS_SRC_RING_HIGH_WATERMARK_MASK = + AR9888_HOST_IS_SRC_RING_HIGH_WATERMARK_MASK, + .d_HOST_IS_SRC_RING_LOW_WATERMARK_MASK = + AR9888_HOST_IS_SRC_RING_LOW_WATERMARK_MASK, + .d_HOST_IS_DST_RING_HIGH_WATERMARK_MASK = + AR9888_HOST_IS_DST_RING_HIGH_WATERMARK_MASK, + .d_HOST_IS_DST_RING_LOW_WATERMARK_MASK = + AR9888_HOST_IS_DST_RING_LOW_WATERMARK_MASK, + .d_HOST_IS_ADDRESS = AR9888_HOST_IS_ADDRESS, + .d_HOST_IS_COPY_COMPLETE_MASK = AR9888_HOST_IS_COPY_COMPLETE_MASK, + .d_CE_WRAPPER_BASE_ADDRESS = AR9888_CE_WRAPPER_BASE_ADDRESS, + .d_CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS = + AR9888_CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS, + .d_HOST_IE_ADDRESS = AR9888_HOST_IE_ADDRESS, + .d_HOST_IE_COPY_COMPLETE_MASK = AR9888_HOST_IE_COPY_COMPLETE_MASK, + .d_SR_BA_ADDRESS = AR9888_SR_BA_ADDRESS, + .d_SR_SIZE_ADDRESS = AR9888_SR_SIZE_ADDRESS, + .d_CE_CTRL1_ADDRESS = AR9888_CE_CTRL1_ADDRESS, + .d_CE_CTRL1_DMAX_LENGTH_MASK = AR9888_CE_CTRL1_DMAX_LENGTH_MASK, + .d_DR_BA_ADDRESS = AR9888_DR_BA_ADDRESS, + .d_DR_SIZE_ADDRESS = AR9888_DR_SIZE_ADDRESS, + .d_MISC_IE_ADDRESS = AR9888_MISC_IE_ADDRESS, + .d_MISC_IS_AXI_ERR_MASK = AR9888_MISC_IS_AXI_ERR_MASK, + .d_MISC_IS_DST_ADDR_ERR_MASK = AR9888_MISC_IS_DST_ADDR_ERR_MASK, + .d_MISC_IS_SRC_LEN_ERR_MASK = AR9888_MISC_IS_SRC_LEN_ERR_MASK, + .d_MISC_IS_DST_MAX_LEN_VIO_MASK = AR9888_MISC_IS_DST_MAX_LEN_VIO_MASK, + .d_MISC_IS_DST_RING_OVERFLOW_MASK = + AR9888_MISC_IS_DST_RING_OVERFLOW_MASK, + .d_MISC_IS_SRC_RING_OVERFLOW_MASK = + AR9888_MISC_IS_SRC_RING_OVERFLOW_MASK, + .d_SRC_WATERMARK_LOW_LSB = AR9888_SRC_WATERMARK_LOW_LSB, + .d_SRC_WATERMARK_HIGH_LSB = AR9888_SRC_WATERMARK_HIGH_LSB, + .d_DST_WATERMARK_LOW_LSB = AR9888_DST_WATERMARK_LOW_LSB, + .d_DST_WATERMARK_HIGH_LSB = AR9888_DST_WATERMARK_HIGH_LSB, + .d_CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK = + AR9888_CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK, + .d_CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB = + AR9888_CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB, + .d_CE_CTRL1_DMAX_LENGTH_LSB = AR9888_CE_CTRL1_DMAX_LENGTH_LSB, + .d_CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK = + AR9888_CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK, + .d_CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK = + AR9888_CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK, + .d_CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB = + AR9888_CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB, + .d_CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB = + AR9888_CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB, + .d_CE0_BASE_ADDRESS = AR9888_CE0_BASE_ADDRESS, + .d_CE1_BASE_ADDRESS = AR9888_CE1_BASE_ADDRESS, + +}; +#endif +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/ath_procfs.c b/drivers/staging/qca-wifi-host-cmn/hif/src/ath_procfs.c new file mode 100644 index 0000000000000000000000000000000000000000..3c1cfad94c369fe96e04cb9057c8ab2317adfadf --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/ath_procfs.c @@ -0,0 +1,242 @@ +/* + * Copyright (c) 2013-2014, 2016-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#if defined(CONFIG_ATH_PROCFS_DIAG_SUPPORT) +#include /* Specifically, a module */ +#include /* We're doing kernel work */ +#include /* We're doing kernel work */ +#include /* Necessary because we use the proc fs */ +#include /* for copy_from_user */ +#include "hif.h" +#include "hif_main.h" +#if defined(HIF_USB) +#include "if_usb.h" +#endif +#if defined(HIF_SDIO) +#include "if_sdio.h" +#endif +#include "hif_debug.h" +#include "pld_common.h" +#include "target_type.h" + +#define PROCFS_NAME "athdiagpfs" +#ifdef MULTI_IF_NAME +#define PROCFS_DIR "cld" MULTI_IF_NAME +#else +#define PROCFS_DIR "cld" +#endif + +/** + * This structure hold information about the /proc file + * + */ +static struct proc_dir_entry *proc_file, *proc_dir; + +static void *get_hif_hdl_from_file(struct file *file) +{ + struct hif_opaque_softc *scn; + + scn = (struct hif_opaque_softc *)PDE_DATA(file_inode(file)); + return (void *)scn; +} + +static ssize_t ath_procfs_diag_read(struct file *file, char __user *buf, + size_t count, loff_t *pos) +{ + hif_handle_t hif_hdl; + int rv; + uint8_t *read_buffer = NULL; + struct hif_softc *scn; + uint32_t offset = 0, memtype = 0; + struct hif_target_info *tgt_info; + + hif_hdl = get_hif_hdl_from_file(file); + scn = HIF_GET_SOFTC(hif_hdl); + + if (scn->bus_ops.hif_addr_in_boundary(hif_hdl, (uint32_t)(*pos))) + return -EINVAL; + + read_buffer = qdf_mem_malloc(count); + if (NULL == read_buffer) { + HIF_ERROR("%s: cdf_mem_alloc failed", __func__); + return -ENOMEM; + } + + HIF_DBG("rd buff 0x%pK cnt %zu offset 0x%x buf 0x%pK", + read_buffer, count, (int)*pos, buf); + + tgt_info = hif_get_target_info_handle(GET_HIF_OPAQUE_HDL(hif_hdl)); + if (scn->bus_type == QDF_BUS_TYPE_SNOC || + (scn->bus_type == QDF_BUS_TYPE_PCI && + (tgt_info->target_type == TARGET_TYPE_QCA6290 || + tgt_info->target_type == TARGET_TYPE_QCA8074))) { + memtype = ((uint32_t)(*pos) & 0xff000000) >> 24; + offset = (uint32_t)(*pos) & 0xffffff; + HIF_TRACE("%s: offset 0x%x memtype 0x%x, datalen %zu\n", + __func__, offset, memtype, count); + rv = pld_athdiag_read(scn->qdf_dev->dev, + offset, memtype, count, + (uint8_t *)read_buffer); + goto out; + } + + if ((count == 4) && ((((uint32_t) (*pos)) & 3) == 0)) { + /* reading a word? */ + rv = hif_diag_read_access(hif_hdl, (uint32_t)(*pos), + (uint32_t *)read_buffer); + } else { + rv = hif_diag_read_mem(hif_hdl, (uint32_t)(*pos), + (uint8_t *)read_buffer, count); + } + +out: + if (rv) { + qdf_mem_free(read_buffer); + return -EIO; + } + + if (copy_to_user(buf, read_buffer, count)) { + qdf_mem_free(read_buffer); + HIF_ERROR("%s: copy_to_user error in /proc/%s", + __func__, PROCFS_NAME); + return -EFAULT; + } + qdf_mem_free(read_buffer); + return count; +} + +static ssize_t ath_procfs_diag_write(struct file *file, + const char __user *buf, + size_t count, loff_t *pos) +{ + hif_handle_t hif_hdl; + int rv; + uint8_t *write_buffer = NULL; + struct hif_softc *scn; + uint32_t offset = 0, memtype = 0; + struct hif_target_info *tgt_info; + + hif_hdl = get_hif_hdl_from_file(file); + scn = HIF_GET_SOFTC(hif_hdl); + + if (scn->bus_ops.hif_addr_in_boundary(hif_hdl, (uint32_t)(*pos))) + return -EINVAL; + + write_buffer = qdf_mem_malloc(count); + if (NULL == write_buffer) { + HIF_ERROR("%s: cdf_mem_alloc failed", __func__); + return -ENOMEM; + } + if (copy_from_user(write_buffer, buf, count)) { + qdf_mem_free(write_buffer); + HIF_ERROR("%s: copy_to_user error in /proc/%s", + __func__, PROCFS_NAME); + return -EFAULT; + } + + HIF_DBG("wr buff 0x%pK buf 0x%pK cnt %zu offset 0x%x value 0x%x", + write_buffer, buf, count, + (int)*pos, *((uint32_t *) write_buffer)); + + tgt_info = hif_get_target_info_handle(GET_HIF_OPAQUE_HDL(hif_hdl)); + if (scn->bus_type == QDF_BUS_TYPE_SNOC || + (scn->bus_type == QDF_BUS_TYPE_PCI && + (tgt_info->target_type == TARGET_TYPE_QCA6290 || + tgt_info->target_type == TARGET_TYPE_QCA8074))) { + memtype = ((uint32_t)(*pos) & 0xff000000) >> 24; + offset = (uint32_t)(*pos) & 0xffffff; + HIF_TRACE("%s: offset 0x%x memtype 0x%x, datalen %zu\n", + __func__, offset, memtype, count); + rv = pld_athdiag_write(scn->qdf_dev->dev, + offset, memtype, count, + (uint8_t *)write_buffer); + goto out; + } + + if ((count == 4) && ((((uint32_t) (*pos)) & 3) == 0)) { + /* reading a word? */ + uint32_t value = *((uint32_t *)write_buffer); + + rv = hif_diag_write_access(hif_hdl, (uint32_t)(*pos), value); + } else { + rv = hif_diag_write_mem(hif_hdl, (uint32_t)(*pos), + (uint8_t *)write_buffer, count); + } + +out: + + qdf_mem_free(write_buffer); + if (rv == 0) + return count; + else + return -EIO; +} + +static const struct file_operations athdiag_fops = { + .read = ath_procfs_diag_read, + .write = ath_procfs_diag_write, +}; + +/* + * This function is called when the module is loaded + * + */ +int athdiag_procfs_init(void *scn) +{ + proc_dir = proc_mkdir(PROCFS_DIR, NULL); + if (proc_dir == NULL) { + remove_proc_entry(PROCFS_DIR, NULL); + HIF_ERROR("%s: Error: Could not initialize /proc/%s", + __func__, PROCFS_DIR); + return -ENOMEM; + } + + proc_file = proc_create_data(PROCFS_NAME, 0600, proc_dir, + &athdiag_fops, (void *)scn); + if (proc_file == NULL) { + remove_proc_entry(PROCFS_NAME, proc_dir); + HIF_ERROR("%s: Could not initialize /proc/%s", + __func__, PROCFS_NAME); + return -ENOMEM; + } + + HIF_DBG("/proc/%s/%s created", PROCFS_DIR, PROCFS_NAME); + return 0; /* everything is ok */ +} + +/* + * This function is called when the module is unloaded + * + */ +void athdiag_procfs_remove(void) +{ + if (proc_dir != NULL) { + remove_proc_entry(PROCFS_NAME, proc_dir); + HIF_DBG("/proc/%s/%s removed", PROCFS_DIR, PROCFS_NAME); + remove_proc_entry(PROCFS_DIR, NULL); + HIF_DBG("/proc/%s removed", PROCFS_DIR); + proc_dir = NULL; + } +} +#else +int athdiag_procfs_init(void *scn) +{ + return 0; +} +void athdiag_procfs_remove(void) {} +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/ce/ce_api.h b/drivers/staging/qca-wifi-host-cmn/hif/src/ce/ce_api.h new file mode 100644 index 0000000000000000000000000000000000000000..2b50d0c9cb7ef1696b4bd7035302365298a3bc8d --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/ce/ce_api.h @@ -0,0 +1,544 @@ +/* + * Copyright (c) 2013-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef __COPY_ENGINE_API_H__ +#define __COPY_ENGINE_API_H__ + +#include "pld_common.h" +#include "ce_main.h" +#include "hif_main.h" + +/* TBDXXX: Use int return values for consistency with Target */ + +/* TBDXXX: Perhaps merge Host/Target-->common */ + +/* + * Copy Engine support: low-level Target-side Copy Engine API. + * This is a hardware access layer used by code that understands + * how to use copy engines. + */ + +/* + * A "struct CE_handle *" serves as an opaque pointer-sized + * handle to a specific copy engine. + */ +struct CE_handle; + +/* + * "Send Completion" callback type for Send Completion Notification. + * + * If a Send Completion callback is registered and one or more sends + * have completed, the callback is invoked. + * + * per_ce_send_context is a context supplied by the calling layer + * (via ce_send_cb_register). It is associated with a copy engine. + * + * per_transfer_send_context is context supplied by the calling layer + * (via the "send" call). It may be different for each invocation + * of send. + * + * The buffer parameter is the first byte sent of the first buffer + * sent (if more than one buffer). + * + * nbytes is the number of bytes of that buffer that were sent. + * + * transfer_id matches the value used when the buffer or + * buf_list was sent. + * + * Implementation note: Pops 1 completed send buffer from Source ring + */ +typedef void (*ce_send_cb)(struct CE_handle *copyeng, + void *per_ce_send_context, + void *per_transfer_send_context, + qdf_dma_addr_t buffer, + unsigned int nbytes, + unsigned int transfer_id, + unsigned int sw_index, + unsigned int hw_index, + uint32_t toeplitz_hash_result); + +/* + * "Buffer Received" callback type for Buffer Received Notification. + * + * Implementation note: Pops 1 completed recv buffer from Dest ring + */ +typedef void (*CE_recv_cb)(struct CE_handle *copyeng, + void *per_CE_recv_context, + void *per_transfer_recv_context, + qdf_dma_addr_t buffer, + unsigned int nbytes, + unsigned int transfer_id, + unsigned int flags); + +/* + * Copy Engine Watermark callback type. + * + * Allows upper layers to be notified when watermarks are reached: + * space is available and/or running short in a source ring + * buffers are exhausted and/or abundant in a destination ring + * + * The flags parameter indicates which condition triggered this + * callback. See CE_WM_FLAG_*. + * + * Watermark APIs are provided to allow upper layers "batch" + * descriptor processing and to allow upper layers to + * throttle/unthrottle. + */ +typedef void (*CE_watermark_cb)(struct CE_handle *copyeng, + void *per_CE_wm_context, unsigned int flags); + +#define CE_WM_FLAG_SEND_HIGH 1 +#define CE_WM_FLAG_SEND_LOW 2 +#define CE_WM_FLAG_RECV_HIGH 4 +#define CE_WM_FLAG_RECV_LOW 8 +#define CE_HTT_TX_CE 4 + +/* A list of buffers to be gathered and sent */ +struct ce_sendlist; + +/* Copy Engine settable attributes */ +struct CE_attr; + +/*==================Send=====================================================*/ + +/* ce_send flags */ +/* disable ring's byte swap, even if the default policy is to swap */ +#define CE_SEND_FLAG_SWAP_DISABLE 1 + +/* + * Queue a source buffer to be sent to an anonymous destination buffer. + * copyeng - which copy engine to use + * buffer - address of buffer + * nbytes - number of bytes to send + * transfer_id - arbitrary ID; reflected to destination + * flags - CE_SEND_FLAG_* values + * Returns 0 on success; otherwise an error status. + * + * Note: If no flags are specified, use CE's default data swap mode. + * + * Implementation note: pushes 1 buffer to Source ring + */ +int ce_send(struct CE_handle *copyeng, + void *per_transfer_send_context, + qdf_dma_addr_t buffer, + unsigned int nbytes, + unsigned int transfer_id, + unsigned int flags, + unsigned int user_flags); + +#ifdef WLAN_FEATURE_FASTPATH +int ce_send_fast(struct CE_handle *copyeng, qdf_nbuf_t msdu, + unsigned int transfer_id, uint32_t download_len); + +#endif + +void ce_update_tx_ring(struct CE_handle *ce_tx_hdl, uint32_t num_htt_cmpls); +extern qdf_nbuf_t ce_batch_send(struct CE_handle *ce_tx_hdl, + qdf_nbuf_t msdu, + uint32_t transfer_id, + uint32_t len, + uint32_t sendhead); + +extern int ce_send_single(struct CE_handle *ce_tx_hdl, + qdf_nbuf_t msdu, + uint32_t transfer_id, + uint32_t len); +/* + * Register a Send Callback function. + * This function is called as soon as the contents of a Send + * have reached the destination, unless disable_interrupts is + * requested. In this case, the callback is invoked when the + * send status is polled, shortly after the send completes. + */ +void ce_send_cb_register(struct CE_handle *copyeng, + ce_send_cb fn_ptr, + void *per_ce_send_context, int disable_interrupts); + +/* + * Return the size of a SendList. This allows the caller to allocate + * a SendList while the SendList structure remains opaque. + */ +unsigned int ce_sendlist_sizeof(void); + +/* Initialize a sendlist */ +void ce_sendlist_init(struct ce_sendlist *sendlist); + +/* Append a simple buffer (address/length) to a sendlist. */ +int ce_sendlist_buf_add(struct ce_sendlist *sendlist, + qdf_dma_addr_t buffer, + unsigned int nbytes, + /* OR-ed with internal flags */ + uint32_t flags, + uint32_t user_flags); + +/* + * Queue a "sendlist" of buffers to be sent using gather to a single + * anonymous destination buffer + * copyeng - which copy engine to use + * sendlist - list of simple buffers to send using gather + * transfer_id - arbitrary ID; reflected to destination + * Returns 0 on success; otherwise an error status. + * + * Implementation note: Pushes multiple buffers with Gather to Source ring. + */ +int ce_sendlist_send(struct CE_handle *copyeng, + void *per_transfer_send_context, + struct ce_sendlist *sendlist, + unsigned int transfer_id); + +/*==================Recv=====================================================*/ + +/* + * Make a buffer available to receive. The buffer must be at least of a + * minimal size appropriate for this copy engine (src_sz_max attribute). + * copyeng - which copy engine to use + * per_transfer_recv_context - context passed back to caller's recv_cb + * buffer - address of buffer in CE space + * Returns 0 on success; otherwise an error status. + * + * Implementation note: Pushes a buffer to Dest ring. + */ +int ce_recv_buf_enqueue(struct CE_handle *copyeng, + void *per_transfer_recv_context, + qdf_dma_addr_t buffer); + +/* + * Register a Receive Callback function. + * This function is called as soon as data is received + * from the source. + */ +void ce_recv_cb_register(struct CE_handle *copyeng, + CE_recv_cb fn_ptr, + void *per_CE_recv_context, + int disable_interrupts); + +/*==================CE Watermark=============================================*/ + +/* + * Register a Watermark Callback function. + * This function is called as soon as a watermark level + * is crossed. A Watermark Callback function is free to + * handle received data "en masse"; but then some coordination + * is required with a registered Receive Callback function. + * [Suggestion: Either handle Receives in a Receive Callback + * or en masse in a Watermark Callback; but not both.] + */ +void ce_watermark_cb_register(struct CE_handle *copyeng, + CE_watermark_cb fn_ptr, + void *per_CE_wm_context); + +/* + * Set low/high watermarks for the send/source side of a copy engine. + * + * Typically, the destination side CPU manages watermarks for + * the receive side and the source side CPU manages watermarks + * for the send side. + * + * A low watermark of 0 is never hit (so the watermark function + * will never be called for a Low Watermark condition). + * + * A high watermark equal to nentries is never hit (so the + * watermark function will never be called for a High Watermark + * condition). + */ +void ce_send_watermarks_set(struct CE_handle *copyeng, + unsigned int low_alert_nentries, + unsigned int high_alert_nentries); + +/* Set low/high watermarks for the receive/destination side of copy engine. */ +void ce_recv_watermarks_set(struct CE_handle *copyeng, + unsigned int low_alert_nentries, + unsigned int high_alert_nentries); + +/* + * Return the number of entries that can be queued + * to a ring at an instant in time. + * + * For source ring, does not imply that destination-side + * buffers are available; merely indicates descriptor space + * in the source ring. + * + * For destination ring, does not imply that previously + * received buffers have been processed; merely indicates + * descriptor space in destination ring. + * + * Mainly for use with CE Watermark callback. + */ +unsigned int ce_send_entries_avail(struct CE_handle *copyeng); +unsigned int ce_recv_entries_avail(struct CE_handle *copyeng); + +/* + * Return the number of entries in the ring that are ready + * to be processed by software. + * + * For source ring, the number of descriptors that have + * been completed and can now be overwritten with new send + * descriptors. + * + * For destination ring, the number of descriptors that + * are available to be processed (newly received buffers). + */ +unsigned int ce_send_entries_done(struct CE_handle *copyeng); +unsigned int ce_recv_entries_done(struct CE_handle *copyeng); + +/* recv flags */ +/* Data is byte-swapped */ +#define CE_RECV_FLAG_SWAPPED 1 + +/* + * Supply data for the next completed unprocessed receive descriptor. + * + * For use + * with CE Watermark callback, + * in a recv_cb function when processing buf_lists + * in a recv_cb function in order to mitigate recv_cb's. + * + * Implementation note: Pops buffer from Dest ring. + */ +int ce_completed_recv_next(struct CE_handle *copyeng, + void **per_CE_contextp, + void **per_transfer_contextp, + qdf_dma_addr_t *bufferp, + unsigned int *nbytesp, + unsigned int *transfer_idp, + unsigned int *flagsp); + +/* + * Supply data for the next completed unprocessed send descriptor. + * + * For use + * with CE Watermark callback + * in a send_cb function in order to mitigate send_cb's. + * + * Implementation note: Pops 1 completed send buffer from Source ring + */ +int ce_completed_send_next(struct CE_handle *copyeng, + void **per_CE_contextp, + void **per_transfer_contextp, + qdf_dma_addr_t *bufferp, + unsigned int *nbytesp, + unsigned int *transfer_idp, + unsigned int *sw_idx, + unsigned int *hw_idx, + uint32_t *toeplitz_hash_result); + +/*==================CE Engine Initialization=================================*/ + +/* Initialize an instance of a CE */ +struct CE_handle *ce_init(struct hif_softc *scn, + unsigned int CE_id, struct CE_attr *attr); + +/*==================CE Engine Shutdown=======================================*/ +/* + * Support clean shutdown by allowing the caller to revoke + * receive buffers. Target DMA must be stopped before using + * this API. + */ +QDF_STATUS +ce_revoke_recv_next(struct CE_handle *copyeng, + void **per_CE_contextp, + void **per_transfer_contextp, + qdf_dma_addr_t *bufferp); + +/* + * Support clean shutdown by allowing the caller to cancel + * pending sends. Target DMA must be stopped before using + * this API. + */ +QDF_STATUS +ce_cancel_send_next(struct CE_handle *copyeng, + void **per_CE_contextp, + void **per_transfer_contextp, + qdf_dma_addr_t *bufferp, + unsigned int *nbytesp, + unsigned int *transfer_idp, + uint32_t *toeplitz_hash_result); + +void ce_fini(struct CE_handle *copyeng); + +/*==================CE Interrupt Handlers====================================*/ +void ce_per_engine_service_any(int irq, struct hif_softc *scn); +int ce_per_engine_service(struct hif_softc *scn, unsigned int CE_id); +void ce_per_engine_servicereap(struct hif_softc *scn, unsigned int CE_id); + +/*===================CE cmpl interrupt Enable/Disable =======================*/ +void ce_disable_any_copy_compl_intr_nolock(struct hif_softc *scn); +void ce_enable_any_copy_compl_intr_nolock(struct hif_softc *scn); + +/* API to check if any of the copy engine pipes has + * pending frames for prcoessing + */ +bool ce_get_rx_pending(struct hif_softc *scn); + +/* CE_attr.flags values */ +#define CE_ATTR_NO_SNOOP 0x01 /* Use NonSnooping PCIe accesses? */ +#define CE_ATTR_BYTE_SWAP_DATA 0x02 /* Byte swap data words */ +#define CE_ATTR_SWIZZLE_DESCRIPTORS 0x04 /* Swizzle descriptors? */ +#define CE_ATTR_DISABLE_INTR 0x08 /* no interrupt on copy completion */ +#define CE_ATTR_ENABLE_POLL 0x10 /* poll for residue descriptors */ +#define CE_ATTR_DIAG 0x20 /* Diag CE */ + +/** + * struct CE_attr - Attributes of an instance of a Copy Engine + * @flags: CE_ATTR_* values + * @priority: TBD + * @src_nentries: #entries in source ring - Must be a power of 2 + * @src_sz_max: Max source send size for this CE. This is also the minimum + * size of a destination buffer + * @dest_nentries: #entries in destination ring - Must be a power of 2 + * @reserved: Future Use + */ +struct CE_attr { + unsigned int flags; + unsigned int priority; + unsigned int src_nentries; + unsigned int src_sz_max; + unsigned int dest_nentries; + void *reserved; +}; + +/* + * When using sendlist_send to transfer multiple buffer fragments, the + * transfer context of each fragment, except last one, will be filled + * with CE_SENDLIST_ITEM_CTXT. CE_completed_send will return success for + * each fragment done with send and the transfer context would be + * CE_SENDLIST_ITEM_CTXT. Upper layer could use this to identify the + * status of a send completion. + */ +#define CE_SENDLIST_ITEM_CTXT ((void *)0xcecebeef) + +/* + * This is an opaque type that is at least large enough to hold + * a sendlist. A sendlist can only be accessed through CE APIs, + * but this allows a sendlist to be allocated on the run-time + * stack. TBDXXX: un-opaque would be simpler... + */ +struct ce_sendlist { + unsigned int word[62]; +}; + +#define ATH_ISR_NOSCHED 0x0000 /* Do not schedule bottom half/DPC */ +#define ATH_ISR_SCHED 0x0001 /* Schedule the bottom half for execution */ +#define ATH_ISR_NOTMINE 0x0002 /* for shared IRQ's */ + +#ifdef IPA_OFFLOAD +void ce_ipa_get_resource(struct CE_handle *ce, + qdf_shared_mem_t **ce_sr, + uint32_t *ce_sr_ring_size, + qdf_dma_addr_t *ce_reg_paddr); +#else +/** + * ce_ipa_get_resource() - get uc resource on copyengine + * @ce: copyengine context + * @ce_sr: copyengine source ring resource info + * @ce_sr_ring_size: copyengine source ring size + * @ce_reg_paddr: copyengine register physical address + * + * Copy engine should release resource to micro controller + * Micro controller needs + * - Copy engine source descriptor base address + * - Copy engine source descriptor size + * - PCI BAR address to access copy engine regiser + * + * Return: None + */ +static inline void ce_ipa_get_resource(struct CE_handle *ce, + qdf_shared_mem_t **ce_sr, + uint32_t *ce_sr_ring_size, + qdf_dma_addr_t *ce_reg_paddr) +{ +} +#endif /* IPA_OFFLOAD */ + +static inline void ce_pkt_error_count_incr( + struct HIF_CE_state *_hif_state, + enum ol_ath_hif_pkt_ecodes _hif_ecode) +{ + struct hif_softc *scn = HIF_GET_SOFTC(_hif_state); + + if (_hif_ecode == HIF_PIPE_NO_RESOURCE) + (scn->pkt_stats.hif_pipe_no_resrc_count) + += 1; +} + +bool ce_check_rx_pending(struct CE_state *CE_state); +void *hif_ce_get_lro_ctx(struct hif_opaque_softc *hif_hdl, int ctx_id); +struct ce_ops *ce_services_srng(void); +struct ce_ops *ce_services_legacy(void); +bool ce_srng_based(struct hif_softc *scn); +/* Forward declaration */ +struct CE_ring_state; + +struct ce_ops { + uint32_t (*ce_get_desc_size)(uint8_t ring_type); + int (*ce_ring_setup)(struct hif_softc *scn, uint8_t ring_type, + uint32_t ce_id, struct CE_ring_state *ring, + struct CE_attr *attr); + int (*ce_send_nolock)(struct CE_handle *copyeng, + void *per_transfer_context, + qdf_dma_addr_t buffer, + uint32_t nbytes, + uint32_t transfer_id, + uint32_t flags, + uint32_t user_flags); + int (*ce_sendlist_send)(struct CE_handle *copyeng, + void *per_transfer_context, + struct ce_sendlist *sendlist, unsigned int transfer_id); + QDF_STATUS (*ce_revoke_recv_next)(struct CE_handle *copyeng, + void **per_CE_contextp, + void **per_transfer_contextp, + qdf_dma_addr_t *bufferp); + QDF_STATUS (*ce_cancel_send_next)(struct CE_handle *copyeng, + void **per_CE_contextp, void **per_transfer_contextp, + qdf_dma_addr_t *bufferp, unsigned int *nbytesp, + unsigned int *transfer_idp, + uint32_t *toeplitz_hash_result); + int (*ce_recv_buf_enqueue)(struct CE_handle *copyeng, + void *per_recv_context, qdf_dma_addr_t buffer); + bool (*watermark_int)(struct CE_state *CE_state, unsigned int *flags); + int (*ce_completed_recv_next_nolock)(struct CE_state *CE_state, + void **per_CE_contextp, + void **per_transfer_contextp, + qdf_dma_addr_t *bufferp, + unsigned int *nbytesp, + unsigned int *transfer_idp, + unsigned int *flagsp); + int (*ce_completed_send_next_nolock)(struct CE_state *CE_state, + void **per_CE_contextp, + void **per_transfer_contextp, + qdf_dma_addr_t *bufferp, + unsigned int *nbytesp, + unsigned int *transfer_idp, + unsigned int *sw_idx, + unsigned int *hw_idx, + uint32_t *toeplitz_hash_result); + unsigned int (*ce_recv_entries_done_nolock)(struct hif_softc *scn, + struct CE_state *CE_state); + unsigned int (*ce_send_entries_done_nolock)(struct hif_softc *scn, + struct CE_state *CE_state); + void (*ce_per_engine_handler_adjust)(struct CE_state *CE_state, + int disable_copy_compl_intr); + void (*ce_prepare_shadow_register_v2_cfg)(struct hif_softc *scn, + struct pld_shadow_reg_v2_cfg **shadow_config, + int *num_shadow_registers_configured); + +}; + +int hif_ce_bus_early_suspend(struct hif_softc *scn); +int hif_ce_bus_late_resume(struct hif_softc *scn); +#endif /* __COPY_ENGINE_API_H__ */ diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/ce/ce_assignment.h b/drivers/staging/qca-wifi-host-cmn/hif/src/ce/ce_assignment.h new file mode 100644 index 0000000000000000000000000000000000000000..b0a365c01289133cb76abd1fc0cd746f59a59230 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/ce/ce_assignment.h @@ -0,0 +1,888 @@ +/* + * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/* + * Implementation of the Host-side Host InterFace (HIF) API + * for a Host/Target interconnect using Copy Engines over PCIe. + */ + +#ifndef __HIF_PCI_INTERNAL_H__ +#define __HIF_PCI_INTERNAL_H__ + +#ifndef CONFIG_WIN +#ifndef PEER_CACHEING_HOST_ENABLE +#define PEER_CACHEING_HOST_ENABLE 0 +#endif +#endif + +#define HIF_PCI_DEBUG ATH_DEBUG_MAKE_MODULE_MASK(0) +#define HIF_PCI_IPA_UC_ASSIGNED_CE 5 + +#if defined(WLAN_DEBUG) || defined(DEBUG) +static ATH_DEBUG_MASK_DESCRIPTION g_hif_debug_description[] = { + {HIF_PCI_DEBUG, "hif_pci"}, +}; + +ATH_DEBUG_INSTANTIATE_MODULE_VAR(hif, "hif", "PCIe Host Interface", + ATH_DEBUG_MASK_DEFAULTS | ATH_DEBUG_INFO, + ATH_DEBUG_DESCRIPTION_COUNT + (g_hif_debug_description), + g_hif_debug_description); +#endif + +#ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG +/* globals are initialized to 0 by the compiler */; +spinlock_t pcie_access_log_lock; +unsigned int pcie_access_log_seqnum; +struct HIF_ACCESS_LOG pcie_access_log[PCIE_ACCESS_LOG_NUM]; +static void hif_target_dump_access_log(void); +#endif + +/* + * Host software's Copy Engine configuration. + * This table is derived from the CE_PCI TABLE, above. + */ +#ifdef BIG_ENDIAN_HOST +#define CE_ATTR_FLAGS CE_ATTR_BYTE_SWAP_DATA +#else +#define CE_ATTR_FLAGS 0 +#endif + +/* Maximum number of Copy Engine's supported */ +#define CE_HTT_H2T_MSG_SRC_NENTRIES 2048 +#define CE_HTT_H2T_MSG_SRC_NENTRIES_AR900B 4096 + +#define EPPING_CE_FLAGS_POLL \ + (CE_ATTR_DISABLE_INTR|CE_ATTR_ENABLE_POLL|CE_ATTR_FLAGS) + +#ifdef CONFIG_WIN +#define PIPEDIR_INOUT_H2H 4 +#endif + +#define CE_ATTR_DIAG_FLAGS \ + (CE_ATTR_FLAGS | CE_ATTR_DIAG) + +#ifdef QCA_WIFI_3_0 +static struct CE_attr host_ce_config_wlan[] = { + /* host->target HTC control and raw streams */ + { /* CE0 */ CE_ATTR_FLAGS, 0, 16, 2048, 0, NULL,}, + /* target->host HTT + HTC control */ + { /* CE1 */ CE_ATTR_FLAGS, 0, 0, 2048, 512, NULL,}, + /* target->host WMI */ + { /* CE2 */ CE_ATTR_FLAGS, 0, 0, 2048, 128, NULL,}, + /* host->target WMI */ + { /* CE3 */ CE_ATTR_FLAGS, 0, 32, 2048, 0, NULL,}, + /* host->target HTT */ + { /* CE4 */ (CE_ATTR_FLAGS | CE_ATTR_DISABLE_INTR), 0, + CE_HTT_H2T_MSG_SRC_NENTRIES, 256, 0, NULL,}, + /* ipa_uc->target HTC control */ + { /* CE5 */ (CE_ATTR_FLAGS | CE_ATTR_DISABLE_INTR), 0, + CE_HTT_H2T_MSG_SRC_NENTRIES, 512, 0, NULL,}, + /* Target autonomous HIF_memcpy */ + { /* CE6 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,}, + /* ce_diag, the Diagnostic Window */ + { /* CE7 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,}, + /* Target to uMC */ + { /* CE8 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,}, + /* target->host HTT */ + { /* CE9 */ CE_ATTR_FLAGS, 0, 0, 2048, 512, NULL,}, + /* target->host HTT */ + { /* CE10 */ CE_ATTR_FLAGS, 0, 0, 2048, 512, NULL,}, + /* target -> host PKTLOG */ + { /* CE11 */ CE_ATTR_FLAGS, 0, 0, 2048, 512, NULL,}, +}; + +static struct CE_pipe_config target_ce_config_wlan[] = { + /* host->target HTC control and raw streams */ + { /* CE0 */ 0, PIPEDIR_OUT, 32, 2048, CE_ATTR_FLAGS, 0,}, + /* target->host HTT */ + { /* CE1 */ 1, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,}, + /* target->host WMI + HTC control */ + { /* CE2 */ 2, PIPEDIR_IN, 64, 2048, CE_ATTR_FLAGS, 0,}, + /* host->target WMI */ + { /* CE3 */ 3, PIPEDIR_OUT, 32, 2048, CE_ATTR_FLAGS, 0,}, + /* host->target HTT */ + { /* CE4 */ 4, PIPEDIR_OUT, 256, 256, + (CE_ATTR_FLAGS | CE_ATTR_DISABLE_INTR), 0,}, + /* NB: 50% of src nentries, since tx has 2 frags */ + /* ipa_uc->target */ + { /* CE5 */ 5, PIPEDIR_OUT, 1024, 64, + (CE_ATTR_FLAGS | CE_ATTR_DISABLE_INTR), 0,}, + /* Reserved for target autonomous HIF_memcpy */ + { /* CE6 */ 6, PIPEDIR_INOUT, 32, 16384, CE_ATTR_FLAGS, 0,}, + /* CE7 used only by Host */ + { /* CE7 */ 7, PIPEDIR_INOUT_H2H, 0, 0, + (CE_ATTR_FLAGS | CE_ATTR_DISABLE_INTR), 0,}, + /* CE8 used only by IPA */ + { /* CE8 */ 8, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,}, + /* CE9 target->host HTT */ + { /* CE9 */ 9, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,}, + /* CE10 target->host HTT */ + { /* CE10 */ 10, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,}, + /* Target -> host PKTLOG */ + { /* CE11 */ 11, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,}, +}; + +#ifdef WLAN_FEATURE_EPPING +static struct CE_attr host_ce_config_wlan_epping_poll[] = { + /* host->target HTC control and raw streams */ + { /* CE0 */ CE_ATTR_FLAGS, 0, 16, 2048, 0, NULL,}, + /* target->host EP-ping */ + { /* CE1 */ EPPING_CE_FLAGS_POLL, 0, 0, 2048, 128, NULL,}, + /* target->host EP-ping */ + { /* CE2 */ EPPING_CE_FLAGS_POLL, 0, 0, 2048, 128, NULL,}, + /* host->target EP-ping */ + { /* CE3 */ CE_ATTR_FLAGS, 0, 128, 2048, 0, NULL,}, + /* host->target EP-ping */ + { /* CE4 */ CE_ATTR_FLAGS, 0, 128, 2048, 0, NULL,}, + /* EP-ping heartbeat */ + { /* CE5 */ CE_ATTR_FLAGS, 0, 0, 2048, 128, NULL,}, + /* unused */ + { /* CE6 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,}, + /* ce_diag, the Diagnostic Window */ + { /* CE7 */ CE_ATTR_DIAG_FLAGS, 0, 2, DIAG_TRANSFER_LIMIT, 2, NULL,}, +}; + +static struct CE_attr host_ce_config_wlan_epping_irq[] = { + /* host->target HTC control and raw streams */ + { /* CE0 */ CE_ATTR_FLAGS, 0, 16, 2048, 0, NULL,}, + /* target->host EP-ping */ + { /* CE1 */ CE_ATTR_FLAGS, 0, 0, 2048, 128, NULL,}, + /* target->host EP-ping */ + { /* CE2 */ CE_ATTR_FLAGS, 0, 0, 2048, 128, NULL,}, + /* host->target EP-ping */ + { /* CE3 */ CE_ATTR_FLAGS, 0, 128, 2048, 0, NULL,}, + /* host->target EP-ping */ + { /* CE4 */ CE_ATTR_FLAGS, 0, 128, 2048, 0, NULL,}, + /* EP-ping heartbeat */ + { /* CE5 */ CE_ATTR_FLAGS, 0, 0, 2048, 128, NULL,}, + /* unused */ + { /* CE6 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,}, + /* ce_diag, the Diagnostic Window */ + { /* CE7 */ CE_ATTR_DIAG_FLAGS, 0, 2, DIAG_TRANSFER_LIMIT, 2, NULL,}, +}; +/* + * EP-ping firmware's CE configuration + */ +static struct CE_pipe_config target_ce_config_wlan_epping[] = { + /* host->target HTC control and raw streams */ + { /* CE0 */ 0, PIPEDIR_OUT, 16, 2048, CE_ATTR_FLAGS, 0,}, + /* target->host EP-ping */ + { /* CE1 */ 1, PIPEDIR_IN, 128, 2048, CE_ATTR_FLAGS, 0,}, + /* target->host EP-ping */ + { /* CE2 */ 2, PIPEDIR_IN, 128, 2048, CE_ATTR_FLAGS, 0,}, + /* host->target EP-ping */ + { /* CE3 */ 3, PIPEDIR_OUT, 128, 2048, CE_ATTR_FLAGS, 0,}, + /* host->target EP-ping */ + { /* CE4 */ 4, PIPEDIR_OUT, 128, 2048, CE_ATTR_FLAGS, 0,}, + /* EP-ping heartbeat */ + { /* CE5 */ 5, PIPEDIR_IN, 128, 2048, CE_ATTR_FLAGS, 0,}, + /* unused */ + { /* CE6 */ 6, PIPEDIR_INOUT, 0, 0, CE_ATTR_FLAGS, 0,}, + /* CE7 used only by Host */ + { /* CE7 */ 7, PIPEDIR_INOUT_H2H, 0, 0, 0, 0,}, + /* CE8 used only by IPA */ + { /* CE8 */ 8, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,} +}; +#endif +#else +static struct CE_attr host_ce_config_wlan[] = { + /* host->target HTC control and raw streams */ + { /* CE0 */ CE_ATTR_FLAGS, 0, 16, 256, 0, NULL,}, + /* target->host HTT + HTC control */ + { /* CE1 */ CE_ATTR_FLAGS, 0, 0, 2048, 512, NULL,}, + /* target->host WMI */ + { /* CE2 */ CE_ATTR_FLAGS, 0, 0, 2048, 32, NULL,}, + /* host->target WMI */ + { /* CE3 */ CE_ATTR_FLAGS, 0, 32, 2048, 0, NULL,}, + /* host->target HTT */ + { /* CE4 */ CE_ATTR_FLAGS | CE_ATTR_DISABLE_INTR, 0, + CE_HTT_H2T_MSG_SRC_NENTRIES, 256, 0, NULL,}, + /* ipa_uc->target HTC control */ + { /* CE5 */ CE_ATTR_FLAGS | CE_ATTR_DISABLE_INTR, 0, + 1024, 512, 0, NULL,}, + /* Target autonomous HIF_memcpy */ + { /* CE6 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,}, + /* ce_diag, the Diagnostic Window */ + { /* CE7 */ (CE_ATTR_DIAG_FLAGS | CE_ATTR_DISABLE_INTR), + 0, 2, DIAG_TRANSFER_LIMIT, 2, NULL,}, +}; + +static struct CE_pipe_config target_ce_config_wlan[] = { + /* host->target HTC control and raw streams */ + { /* CE0 */ 0, PIPEDIR_OUT, 32, 256, CE_ATTR_FLAGS, 0,}, + /* target->host HTT + HTC control */ + { /* CE1 */ 1, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,}, + /* target->host WMI */ + { /* CE2 */ 2, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,}, + /* host->target WMI */ + { /* CE3 */ 3, PIPEDIR_OUT, 32, 2048, CE_ATTR_FLAGS, 0,}, + /* host->target HTT */ + { /* CE4 */ 4, PIPEDIR_OUT, 256, 256, CE_ATTR_FLAGS, 0,}, + /* NB: 50% of src nentries, since tx has 2 frags */ + /* ipa_uc->target HTC control */ + { /* CE5 */ 5, PIPEDIR_OUT, 1024, 64, CE_ATTR_FLAGS, 0,}, + /* Reserved for target autonomous HIF_memcpy */ + { /* CE6 */ 6, PIPEDIR_INOUT, 32, 4096, CE_ATTR_FLAGS, 0,}, + /* CE7 used only by Host */ + { /* CE7 */ 7, PIPEDIR_INOUT_H2H, 0, 0, 0, 0,}, + /* CE8 used only by IPA */ + { /* CE8 */ 8, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,} +}; + +#ifdef WLAN_FEATURE_EPPING +static struct CE_attr host_ce_config_wlan_epping_poll[] = { + /* host->target HTC control and raw streams */ + { /* CE0 */ CE_ATTR_FLAGS, 0, 16, 256, 0, NULL,}, + /* target->host EP-ping */ + { /* CE1 */ EPPING_CE_FLAGS_POLL, 0, 0, 2048, 128, NULL,}, + /* target->host EP-ping */ + { /* CE2 */ EPPING_CE_FLAGS_POLL, 0, 0, 2048, 128, NULL,}, + /* host->target EP-ping */ + { /* CE3 */ CE_ATTR_FLAGS, 0, 128, 2048, 0, NULL,}, + /* host->target EP-ping */ + { /* CE4 */ CE_ATTR_FLAGS, 0, 128, 2048, 0, NULL,}, + /* EP-ping heartbeat */ + { /* CE5 */ CE_ATTR_FLAGS, 0, 0, 2048, 128, NULL,}, + /* unused */ + { /* CE6 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,}, + /* ce_diag, the Diagnostic Window */ + { /* CE7 */ CE_ATTR_DIAG_FLAGS, 0, 2, DIAG_TRANSFER_LIMIT, 2, NULL,}, + { /* CE8 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,}, + /* The following CEs are not being used yet */ + { /* CE9 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,}, + { /* CE10 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,}, + { /* CE11 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,}, +}; +static struct CE_attr host_ce_config_wlan_epping_irq[] = { + /* host->target HTC control and raw streams */ + { /* CE0 */ CE_ATTR_FLAGS, 0, 16, 256, 0, NULL,}, + /* target->host EP-ping */ + { /* CE1 */ CE_ATTR_FLAGS, 0, 0, 2048, 128, NULL,}, + /* target->host EP-ping */ + { /* CE2 */ CE_ATTR_FLAGS, 0, 0, 2048, 128, NULL,}, + /* host->target EP-ping */ + { /* CE3 */ CE_ATTR_FLAGS, 0, 128, 2048, 0, NULL,}, + /* host->target EP-ping */ + { /* CE4 */ CE_ATTR_FLAGS, 0, 128, 2048, 0, NULL,}, + /* EP-ping heartbeat */ + { /* CE5 */ CE_ATTR_FLAGS, 0, 0, 2048, 128, NULL,}, + /* unused */ + { /* CE6 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,}, + /* ce_diag, the Diagnostic Window */ + { /* CE7 */ CE_ATTR_DIAG_FLAGS, 0, 2, DIAG_TRANSFER_LIMIT, 2, NULL,}, + { /* CE8 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,}, + /* The following CEs are not being used yet */ + { /* CE9 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,}, + { /* CE10 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,}, + { /* CE11 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,}, +}; +/* + * EP-ping firmware's CE configuration + */ +static struct CE_pipe_config target_ce_config_wlan_epping[] = { + /* host->target HTC control and raw streams */ + { /* CE0 */ 0, PIPEDIR_OUT, 16, 256, CE_ATTR_FLAGS, 0,}, + /* target->host EP-ping */ + { /* CE1 */ 1, PIPEDIR_IN, 128, 2048, CE_ATTR_FLAGS, 0,}, + /* target->host EP-ping */ + { /* CE2 */ 2, PIPEDIR_IN, 128, 2048, CE_ATTR_FLAGS, 0,}, + /* host->target EP-ping */ + { /* CE3 */ 3, PIPEDIR_OUT, 128, 2048, CE_ATTR_FLAGS, 0,}, + /* host->target EP-ping */ + { /* CE4 */ 4, PIPEDIR_OUT, 128, 2048, CE_ATTR_FLAGS, 0,}, + /* EP-ping heartbeat */ + { /* CE5 */ 5, PIPEDIR_IN, 128, 2048, CE_ATTR_FLAGS, 0,}, + /* unused */ + { /* CE6 */ 6, PIPEDIR_INOUT, 0, 0, CE_ATTR_FLAGS, 0,}, + /* CE7 used only by Host */ + { /* CE7 */ 7, PIPEDIR_INOUT_H2H, 0, 0, 0, 0,}, + /* CE8 used only by IPA */ + { /* CE8 */ 8, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,}, + { /* CE9 */ 9, PIPEDIR_IN, 0, 0, CE_ATTR_FLAGS, 0,}, + { /* CE10 */ 10, PIPEDIR_IN, 0, 0, CE_ATTR_FLAGS, 0,}, + { /* CE11 */ 11, PIPEDIR_IN, 0, 0, CE_ATTR_FLAGS, 0,}, +}; +#endif +#endif + +static struct CE_attr host_ce_config_wlan_ar9888[] = { + /* host->target HTC control and raw streams */ + { /* CE0 */ CE_ATTR_FLAGS, 0, 16, 256, 0, NULL, }, + /* target->host BMI + HTC control */ + /* could be moved to share CE3 */ + { /* CE1 */ CE_ATTR_FLAGS, 0, 0, 512, 512, NULL, }, + /* target->host WMI */ + { /* CE2 */ CE_ATTR_FLAGS, 0, 0, 2048, 128, NULL, }, + /* host->target WMI */ + { /* CE3 */ CE_ATTR_FLAGS, 0, 32, 2048, 0, NULL, }, + /* host->target HTT */ + { /* CE4 */ CE_ATTR_FLAGS | CE_ATTR_DISABLE_INTR, 0, + CE_HTT_H2T_MSG_SRC_NENTRIES_AR900B, 256, 0, NULL, }, +#ifdef WLAN_FEATURE_FASTPATH + /* target->host HTT messages */ + { /* CE5 */ CE_ATTR_FLAGS, 0, 0, 512, 512, NULL, }, +#else /* WLAN_FEATURE_FASTPATH */ + /* unused */ + { /* CE5 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL, }, +#endif /* WLAN_FEATURE_FASTPATH */ + /* Target autonomous HIF_memcpy */ + { /* CE6 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL, }, + /* ce_diag, the Diagnostic Window */ + { /* CE7 */ CE_ATTR_DIAG_FLAGS, 0, 2, DIAG_TRANSFER_LIMIT, 2, NULL, }, + /* Target autonomous HIF_memcpy */ + { /* CE8 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL, }, +}; + +static struct CE_attr host_ce_config_wlan_ar900b[] = { + /* host->target HTC control and raw streams */ + { /* CE0 */ CE_ATTR_FLAGS, 0, 16, 256, 0, NULL, }, + /* target->host BMI + HTC control */ + /* could be moved to share CE3 */ + { /* CE1 */ CE_ATTR_FLAGS, 0, 0, 512, 512, NULL, }, + /* target->host WMI */ + { /* CE2 */ CE_ATTR_FLAGS, 0, 0, 2048, 128, NULL, }, + /* host->target WMI */ + { /* CE3 */ CE_ATTR_FLAGS, 0, 32, 2048, 0, NULL, }, + /* host->target HTT */ + { /* CE4 */ CE_ATTR_FLAGS | CE_ATTR_DISABLE_INTR, 0, + CE_HTT_H2T_MSG_SRC_NENTRIES_AR900B, 256, 0, NULL, }, +#ifdef WLAN_FEATURE_FASTPATH + /* target->host HTT messages */ + { /* CE5 */ CE_ATTR_FLAGS, 0, 0, 512, 512, NULL, }, +#else /* WLAN_FEATURE_FASTPATH */ + /* unused */ + { /* CE5 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL, }, +#endif /* WLAN_FEATURE_FASTPATH */ + /* Target autonomous HIF_memcpy */ + { /* CE6 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL, }, + /* ce_diag, the Diagnostic Window */ + { /* CE7 */ CE_ATTR_DIAG_FLAGS, 0, 2, DIAG_TRANSFER_LIMIT, 2, NULL, }, + /* target->host pktlog */ + { /* CE8 */ CE_ATTR_FLAGS, 0, 0, 2048, 128, NULL, }, + /* Target autonomous HIF_memcpy */ + { /* CE9 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL, }, + /* Target autonomous HIF_memcpy */ + { /* CE10 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL, }, + /* Target autonomous HIF_memcpy */ + { /* CE11 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL, }, +}; + +static struct CE_attr host_lowdesc_ce_cfg_wlan_ar9888[] = { + /* host->target HTC control and raw streams */ + { /* CE0 */ CE_ATTR_FLAGS, 0, 16, 256, 0, NULL, }, + /* could be moved to share CE3 */ +#ifdef WLAN_FEATURE_FASTPATH + /* target->host BMI + HTC control */ + { /* CE1 */ CE_ATTR_FLAGS, 0, 0, 512, 64, NULL, }, +#else + /* target->host BMI + HTC control */ + { /* CE1 */ CE_ATTR_FLAGS, 0, 0, 512, 512, NULL, }, +#endif + /* target->host WMI */ + { /* CE2 */ CE_ATTR_FLAGS, 0, 0, 2048, 128, NULL, }, + /* host->target WMI */ + { /* CE3 */ CE_ATTR_FLAGS, 0, 32, 2048, 0, NULL, }, + /* host->target HTT */ + { /* CE4 */ CE_ATTR_FLAGS | CE_ATTR_DISABLE_INTR, 0, + CE_HTT_H2T_MSG_SRC_NENTRIES_AR900B, 256, 0, NULL, }, +#ifdef WLAN_FEATURE_FASTPATH + /* target->host HTT messages */ + { /* CE5 */ CE_ATTR_FLAGS, 0, 0, 512, 512, NULL, }, +#else /* WLAN_FEATURE_FASTPATH */ + /* unused */ + { /* CE5 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL, }, +#endif /* WLAN_FEATURE_FASTPATH */ + /* Target autonomous HIF_memcpy */ + { /* CE6 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL, }, + /* ce_diag, the Diagnostic Window */ + { /* CE7 */ CE_ATTR_DIAG_FLAGS, 0, 2, DIAG_TRANSFER_LIMIT, 2, NULL, }, + /* Target autonomous HIF_memcpy */ + { /* CE8 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL, }, +}; + +static struct CE_attr host_lowdesc_ce_cfg_wlan_ar900b[] = { + /* host->target HTC control and raw streams */ + { /* CE0 */ CE_ATTR_FLAGS, 0, 16, 256, 0, NULL, }, + /* could be moved to share CE3 */ +#ifdef WLAN_FEATURE_FASTPATH + /* target->host BMI + HTC control */ + { /* CE1 */ CE_ATTR_FLAGS, 0, 0, 512, 64, NULL, }, +#else + /* target->host BMI + HTC control */ + { /* CE1 */ CE_ATTR_FLAGS, 0, 0, 512, 512, NULL, }, +#endif + /* target->host WMI */ + { /* CE2 */ CE_ATTR_FLAGS, 0, 0, 2048, 128, NULL, }, + /* host->target WMI */ + { /* CE3 */ CE_ATTR_FLAGS, 0, 32, 2048, 0, NULL, }, + /* host->target HTT */ + { /* CE4 */ CE_ATTR_FLAGS | CE_ATTR_DISABLE_INTR, 0, + CE_HTT_H2T_MSG_SRC_NENTRIES_AR900B, 256, 0, NULL, }, +#ifdef WLAN_FEATURE_FASTPATH + /* target->host HTT messages */ + { /* CE5 */ CE_ATTR_FLAGS, 0, 0, 512, 512, NULL, }, +#else /* WLAN_FEATURE_FASTPATH */ + /* unused */ + { /* CE5 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL, }, +#endif /* WLAN_FEATURE_FASTPATH */ + /* Target autonomous HIF_memcpy */ + { /* CE6 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL, }, + /* ce_diag, the Diagnostic Window */ + { /* CE7 */ CE_ATTR_DIAG_FLAGS, 0, 2, DIAG_TRANSFER_LIMIT, 2, NULL, }, + /* target->host pktlog */ + { /* CE8 */ CE_ATTR_FLAGS, 0, 0, 2048, 128, NULL, }, + /* Target autonomous HIF_memcpy */ + { /* CE9 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL, }, + /* Target autonomous HIF_memcpy */ + { /* CE10 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL, }, + /* Target autonomous HIF_memcpy */ + { /* CE11 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL, }, +}; + +static struct CE_attr host_lowdesc_ce_cfg_wlan_ar900b_nopktlog[] = { + /* host->target HTC control and raw streams */ + { /* CE0 */ CE_ATTR_FLAGS, 0, 16, 256, 0, NULL, }, + /* could be moved to share CE3 */ +#ifdef WLAN_FEATURE_FASTPATH + /* target->host BMI + HTC control */ + { /* CE1 */ CE_ATTR_FLAGS, 0, 0, 512, 64, NULL, }, +#else + /* target->host BMI + HTC control */ + { /* CE1 */ CE_ATTR_FLAGS, 0, 0, 512, 512, NULL, }, +#endif + /* target->host WMI */ + { /* CE2 */ CE_ATTR_FLAGS, 0, 0, 2048, 128, NULL, }, + /* host->target WMI */ + { /* CE3 */ CE_ATTR_FLAGS, 0, 32, 2048, 0, NULL, }, + /* host->target HTT */ + { /* CE4 */ CE_ATTR_FLAGS | CE_ATTR_DISABLE_INTR, 0, + CE_HTT_H2T_MSG_SRC_NENTRIES_AR900B, 256, 0, NULL, }, +#ifdef WLAN_FEATURE_FASTPATH + /* target->host HTT messages */ + { /* CE5 */ CE_ATTR_FLAGS, 0, 0, 512, 512, NULL, }, +#else /* WLAN_FEATURE_FASTPATH */ + /* unused */ + { /* CE5 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL, }, +#endif /* WLAN_FEATURE_FASTPATH */ + /* Target autonomous HIF_memcpy */ + { /* CE6 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL, }, + /* ce_diag, the Diagnostic Window */ + { /* CE7 */ CE_ATTR_DIAG_FLAGS, 0, 2, DIAG_TRANSFER_LIMIT, 2, NULL, }, + /* target->host pktlog */ + { /* CE8 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL, }, + /* Target autonomous HIF_memcpy */ + { /* CE9 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL, }, + /* Target autonomous HIF_memcpy */ + { /* CE10 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL, }, + /* Target autonomous HIF_memcpy */ + { /* CE11 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL, }, +}; + +static struct CE_pipe_config target_ce_config_wlan_ar9888[] = { + /* host->target HTC control and raw streams */ + { /* CE0 */ 0, PIPEDIR_OUT, 32, 256, CE_ATTR_FLAGS, 0, }, + /* target->host HTC control */ + { /* CE1 */ 1, PIPEDIR_IN, 32, 512, CE_ATTR_FLAGS, 0, }, + /* target->host WMI */ + { /* CE2 */ 2, PIPEDIR_IN, 64, 2048, CE_ATTR_FLAGS, 0, }, + /* host->target WMI */ + { /* CE3 */ 3, PIPEDIR_OUT, 32, 2048, CE_ATTR_FLAGS, 0, }, + /* host->target HTT */ + { /* CE4 */ 4, PIPEDIR_OUT, 256, 256, CE_ATTR_FLAGS, 0, }, + /* NB: 50% of src nentries, since tx has 2 frags */ +#ifdef WLAN_FEATURE_FASTPATH + /* target->host HTT */ + { /* CE5 */ 5, PIPEDIR_IN, 32, 512, CE_ATTR_FLAGS, 0, }, +#else + /* unused */ + { /* CE5 */ 5, PIPEDIR_OUT, 32, 2048, CE_ATTR_FLAGS, 0, }, +#endif + /* Reserved for target autonomous HIF_memcpy */ + { /* CE6 */ 6, PIPEDIR_INOUT, 32, 4096, CE_ATTR_FLAGS, 0, }, + /* CE7 used only by Host */ +}; + +static struct CE_pipe_config target_ce_config_wlan_ar900b[] = { + /* host->target HTC control and raw streams */ + { /* CE0 */ 0, PIPEDIR_OUT, 32, 256, CE_ATTR_FLAGS, 0, }, + /* target->host HTC control */ + { /* CE1 */ 1, PIPEDIR_IN, 32, 512, CE_ATTR_FLAGS, 0, }, + /* target->host WMI */ + { /* CE2 */ 2, PIPEDIR_IN, 64, 2048, CE_ATTR_FLAGS, 0, }, + /* host->target WMI */ + { /* CE3 */ 3, PIPEDIR_OUT, 32, 2048, CE_ATTR_FLAGS, 0, }, + /* host->target HTT */ + { /* CE4 */ 4, PIPEDIR_OUT, 256, 256, CE_ATTR_FLAGS, 0, }, + /* NB: 50% of src nentries, since tx has 2 frags */ +#ifdef WLAN_FEATURE_FASTPATH + /* target->host HTT */ + { /* CE5 */ 5, PIPEDIR_IN, 32, 512, CE_ATTR_FLAGS, 0, }, +#else + /* unused */ + { /* CE5 */ 5, PIPEDIR_OUT, 32, 2048, CE_ATTR_FLAGS, 0, }, +#endif + /* Reserved for target autonomous HIF_memcpy */ + { /* CE6 */ 6, PIPEDIR_INOUT, 32, 4096, CE_ATTR_FLAGS, 0, }, + /* CE7 used only by Host */ + { /* CE7 */ 7, PIPEDIR_INOUT, 0, 0, 0, 0, }, + { /* CE8 */ 8, PIPEDIR_IN, 64, 2048, CE_ATTR_FLAGS + /* target->host packtlog */ + | CE_ATTR_DISABLE_INTR, 0, }, +#if PEER_CACHEING_HOST_ENABLE + /* target autonomous qcache memcpy */ + { /* CE9 */ 9, PIPEDIR_INOUT, 32, 2048, CE_ATTR_FLAGS | + CE_ATTR_DISABLE_INTR, 0, }, +#endif +}; + +static struct CE_attr host_ce_config_wlan_qca8074[] = { + /* host->target HTC control and raw streams */ + { /* CE0 */ CE_ATTR_FLAGS, 0, 16, 2048, 0, NULL,}, + /* target->host HTT + HTC control */ + { /* CE1 */ CE_ATTR_FLAGS, 0, 0, 2048, 512, NULL,}, + /* target->host WMI */ + { /* CE2 */ CE_ATTR_FLAGS, 0, 0, 2048, 256, NULL,}, + /* host->target WMI (mac0) */ + { /* CE3 */ CE_ATTR_FLAGS, 0, 32, 2048, 0, NULL,}, + /* host->target HTT */ + { /* CE4 */ (CE_ATTR_FLAGS | CE_ATTR_DISABLE_INTR), 0, + CE_HTT_H2T_MSG_SRC_NENTRIES, 256, 0, NULL,}, + /* target -> host PKTLOG */ + { /* CE5 */ CE_ATTR_FLAGS, 0, 0, 2048, 512, NULL,}, + /* Target autonomous HIF_memcpy */ + { /* CE6 */ CE_ATTR_FLAGS | CE_ATTR_DISABLE_INTR, 0, 0, + 0, 0, NULL,}, + /* host->target WMI (mac1) */ + { /* CE7 */ CE_ATTR_FLAGS, 0, 32, 2048, 0, NULL,}, + /* Target to uMC */ + { /* CE8 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,}, + /* host->target WMI (mac2) */ + { /* CE9 */ CE_ATTR_FLAGS, 0, 32, 2048, 0, NULL,}, + /* target->host HTT */ + { /* CE10 */ CE_ATTR_FLAGS, 0, 0, 2048, 512, NULL,}, + /* CE11 unused */ +}; + +static struct CE_pipe_config target_ce_config_wlan_qca8074[] = { + /* host->target HTC control and raw streams */ + { /* CE0 */ 0, PIPEDIR_OUT, 32, 2048, CE_ATTR_FLAGS, 0,}, + /* target->host HTT */ + { /* CE1 */ 1, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,}, + /* target->host WMI + HTC control */ + { /* CE2 */ 2, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,}, + /* host->target WMI */ + { /* CE3 */ 3, PIPEDIR_OUT, 32, 2048, CE_ATTR_FLAGS, 0,}, + /* host->target HTT */ + { /* CE4 */ 4, PIPEDIR_OUT, 256, 256, + (CE_ATTR_FLAGS | CE_ATTR_DISABLE_INTR), 0,}, + /* NB: 50% of src nentries, since tx has 2 frags */ + /* Target -> host PKTLOG */ + { /* CE5 */ 5, PIPEDIR_IN, 32, 2048, 0, 0,}, + /* Reserved for target autonomous HIF_memcpy */ + { /* CE6 */ 6, PIPEDIR_INOUT, 32, 65535, 64, 0,}, + /* CE7 used only by Host */ + { /* CE7 */ 7, PIPEDIR_OUT, 32, 2048, + 8192, 0,}, + /* CE8 used only by IPA */ + { /* CE8 */ 8, PIPEDIR_INOUT, 32, 65535, 112, 0,}, + /* CE9 target->host HTT */ + { /* CE9 */ 9, PIPEDIR_OUT, 32, 2048, 8192, 0,}, + /* CE10 target->host HTT */ + { /* CE10 */ 10, PIPEDIR_INOUT_H2H, 0, 0, 0, 0,}, +}; + +static struct CE_attr host_ce_config_wlan_qca8074_pci[] = { + /* host->target HTC control and raw streams */ + { /* CE0 */ EPPING_CE_FLAGS_POLL, 0, 16, 2048, 0, NULL,}, + /* target->host HTT + HTC control */ + { /* CE1 */ EPPING_CE_FLAGS_POLL, 0, 0, 2048, 512, NULL,}, + /* target->host WMI */ + { /* CE2 */ EPPING_CE_FLAGS_POLL, 0, 0, 2048, 256, NULL,}, + /* host->target WMI (mac0) */ + { /* CE3 */ EPPING_CE_FLAGS_POLL, 0, 32, 2048, 0, NULL,}, + /* host->target HTT */ + { /* CE4 */ (CE_ATTR_FLAGS | CE_ATTR_DISABLE_INTR), 0, + CE_HTT_H2T_MSG_SRC_NENTRIES, 256, 0, NULL,}, + /* target -> host PKTLOG */ + { /* CE5 */ EPPING_CE_FLAGS_POLL, 0, 0, 2048, 512, NULL,}, + /* Target autonomous HIF_memcpy */ + { /* CE6 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,}, + /* host->target WMI (mac1) */ + { /* CE7 */ EPPING_CE_FLAGS_POLL, 0, 32, 2048, 0, NULL,}, + /* Target to uMC */ + { /* CE8 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,}, + /* host->target WMI (mac2) */ + { /* CE9 */ EPPING_CE_FLAGS_POLL, 0, 32, 2048, 0, NULL,}, + /* target->host HTT */ + { /* CE10 */ CE_ATTR_FLAGS, 0, 0, 2048, 512, NULL,}, + /* CE11 unused */ +}; + +static struct CE_pipe_config target_ce_config_wlan_qca8074_pci[] = { + /* host->target HTC control and raw streams */ + { /* CE0 */ 0, PIPEDIR_OUT, 32, 2048, CE_ATTR_FLAGS, 0,}, + /* target->host HTT */ + { /* CE1 */ 1, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,}, + /* target->host WMI + HTC control */ + { /* CE2 */ 2, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,}, + /* host->target WMI */ + { /* CE3 */ 3, PIPEDIR_OUT, 32, 2048, CE_ATTR_FLAGS, 0,}, + /* host->target HTT */ + { /* CE4 */ 4, PIPEDIR_OUT, 256, 256, + (CE_ATTR_FLAGS | CE_ATTR_DISABLE_INTR), 0,}, + /* NB: 50% of src nentries, since tx has 2 frags */ + /* ipa_uc->target */ + { /* CE5 */ 5, PIPEDIR_OUT, 1024, 64, + (CE_ATTR_FLAGS | CE_ATTR_DISABLE_INTR), 0,}, + /* Reserved for target autonomous HIF_memcpy */ + { /* CE6 */ 6, PIPEDIR_INOUT, 32, 16384, CE_ATTR_FLAGS, 0,}, + /* CE7 used only by Host */ + { /* CE7 */ 7, PIPEDIR_INOUT_H2H, 0, 0, + (CE_ATTR_FLAGS | CE_ATTR_DISABLE_INTR), 0,}, + /* CE8 used only by IPA */ + { /* CE8 */ 8, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,}, + /* CE9 target->host HTT */ + { /* CE9 */ 9, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,}, + /* CE10 target->host HTT */ + { /* CE10 */ 10, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,}, + /* Target -> host PKTLOG */ + { /* CE11 */ 11, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,}, +}; + +static struct CE_attr host_lowdesc_ce_config_wlan_adrastea_nopktlog[] = { + /* host->target HTC control and raw streams */ + { /* CE0 */ CE_ATTR_FLAGS, 0, 16, 2048, 0, NULL,}, + /* target->host HTT + HTC control */ + { /* CE1 */ CE_ATTR_FLAGS, 0, 0, 2048, 256, NULL,}, + /* target->host WMI */ + { /* CE2 */ CE_ATTR_FLAGS, 0, 0, 2048, 64, NULL,}, + /* host->target WMI */ + { /* CE3 */ CE_ATTR_FLAGS, 0, 32, 2048, 0, NULL,}, + /* host->target HTT */ + { /* CE4 */ (CE_ATTR_FLAGS | CE_ATTR_DISABLE_INTR), 0, + CE_HTT_H2T_MSG_SRC_NENTRIES, 256, 0, NULL,}, + /* ipa_uc->target HTC control */ + { /* CE5 */ (CE_ATTR_FLAGS | CE_ATTR_DISABLE_INTR), 0, + CE_HTT_H2T_MSG_SRC_NENTRIES, 512, 0, NULL,}, + /* Target autonomous HIF_memcpy */ + { /* CE6 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,}, + /* ce_diag, the Diagnostic Window */ + { /* CE7 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,}, + /* Target to uMC */ + { /* CE8 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,}, + /* target->host HTT */ + { /* CE9 */ CE_ATTR_FLAGS, 0, 0, 2048, 64, NULL,}, + /* target->host HTT */ + { /* CE10 */ CE_ATTR_FLAGS, 0, 0, 2048, 64, NULL,}, + /* target -> host PKTLOG */ + { /* CE11 */ (CE_ATTR_FLAGS | CE_ATTR_DISABLE_INTR), 0, + 0, 2048, 0, NULL,}, +}; + +static struct CE_attr host_ce_config_wlan_adrastea[] = { + /* host->target HTC control and raw streams */ + { /* CE0 */ CE_ATTR_FLAGS, 0, 16, 2048, 0, NULL,}, + /* target->host HTT + HTC control */ + { /* CE1 */ CE_ATTR_FLAGS, 0, 0, 2048, 512, NULL,}, + /* target->host WMI */ + { /* CE2 */ CE_ATTR_FLAGS, 0, 0, 2048, 128, NULL,}, + /* host->target WMI */ + { /* CE3 */ CE_ATTR_FLAGS, 0, 32, 2048, 0, NULL,}, + /* host->target HTT */ + { /* CE4 */ (CE_ATTR_FLAGS | CE_ATTR_DISABLE_INTR), 0, + CE_HTT_H2T_MSG_SRC_NENTRIES, 256, 0, NULL,}, + /* ipa_uc->target HTC control */ + { /* CE5 */ (CE_ATTR_FLAGS | CE_ATTR_DISABLE_INTR), 0, + CE_HTT_H2T_MSG_SRC_NENTRIES, 512, 0, NULL,}, + /* Target autonomous HIF_memcpy */ + { /* CE6 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,}, + /* ce_diag, the Diagnostic Window */ + { /* CE7 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,}, + /* Target to uMC */ + { /* CE8 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,}, + /* target->host HTT */ + { /* CE9 */ CE_ATTR_FLAGS, 0, 0, 2048, 512, NULL,}, + /* target->host HTT */ + { /* CE10 */ CE_ATTR_FLAGS, 0, 0, 2048, 512, NULL,}, + /* target -> host PKTLOG */ + { /* CE11 */ CE_ATTR_FLAGS, 0, 0, 2048, 512, NULL,}, +}; + +static struct CE_pipe_config + target_lowdesc_ce_config_wlan_adrastea_nopktlog[] = { + /* host->target HTC control and raw streams */ + { /* CE0 */ 0, PIPEDIR_OUT, 32, 2048, CE_ATTR_FLAGS, 0,}, + /* target->host HTT */ + { /* CE1 */ 1, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,}, + /* target->host WMI + HTC control */ + { /* CE2 */ 2, PIPEDIR_IN, 64, 2048, CE_ATTR_FLAGS, 0,}, + /* host->target WMI */ + { /* CE3 */ 3, PIPEDIR_OUT, 32, 2048, CE_ATTR_FLAGS, 0,}, + /* host->target HTT */ + { /* CE4 */ 4, PIPEDIR_OUT, 256, 256, + (CE_ATTR_FLAGS | CE_ATTR_DISABLE_INTR), 0,}, + /* NB: 50% of src nentries, since tx has 2 frags */ + /* ipa_uc->target */ + { /* CE5 */ 5, PIPEDIR_OUT, 1024, 64, + (CE_ATTR_FLAGS | CE_ATTR_DISABLE_INTR), 0,}, + /* Reserved for target autonomous HIF_memcpy */ + { /* CE6 */ 6, PIPEDIR_INOUT, 32, 16384, CE_ATTR_FLAGS, 0,}, + /* CE7 used only by Host */ + { /* CE7 */ 7, PIPEDIR_INOUT_H2H, 0, 0, + (CE_ATTR_FLAGS | CE_ATTR_DISABLE_INTR), 0,}, + /* CE8 used only by IPA */ + { /* CE8 */ 8, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,}, + /* CE9 target->host HTT */ + { /* CE9 */ 9, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,}, + /* CE10 target->host HTT */ + { /* CE10 */ 10, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,}, + /* Target -> host PKTLOG */ + { /* CE11 */ 11, PIPEDIR_IN, 32, 2048, + (CE_ATTR_FLAGS | CE_ATTR_DISABLE_INTR), 0,}, +}; + +static struct CE_pipe_config target_ce_config_wlan_adrastea[] = { + /* host->target HTC control and raw streams */ + { /* CE0 */ 0, PIPEDIR_OUT, 32, 2048, CE_ATTR_FLAGS, 0,}, + /* target->host HTT */ + { /* CE1 */ 1, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,}, + /* target->host WMI + HTC control */ + { /* CE2 */ 2, PIPEDIR_IN, 64, 2048, CE_ATTR_FLAGS, 0,}, + /* host->target WMI */ + { /* CE3 */ 3, PIPEDIR_OUT, 32, 2048, CE_ATTR_FLAGS, 0,}, + /* host->target HTT */ + { /* CE4 */ 4, PIPEDIR_OUT, 256, 256, + (CE_ATTR_FLAGS | CE_ATTR_DISABLE_INTR), 0,}, + /* NB: 50% of src nentries, since tx has 2 frags */ + /* ipa_uc->target */ + { /* CE5 */ 5, PIPEDIR_OUT, 1024, 64, + (CE_ATTR_FLAGS | CE_ATTR_DISABLE_INTR), 0,}, + /* Reserved for target autonomous HIF_memcpy */ + { /* CE6 */ 6, PIPEDIR_INOUT, 32, 16384, CE_ATTR_FLAGS, 0,}, + /* CE7 used only by Host */ + { /* CE7 */ 7, PIPEDIR_INOUT_H2H, 0, 0, + (CE_ATTR_FLAGS | CE_ATTR_DISABLE_INTR), 0,}, + /* CE8 used only by IPA */ + { /* CE8 */ 8, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,}, + /* CE9 target->host HTT */ + { /* CE9 */ 9, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,}, + /* CE10 target->host HTT */ + { /* CE10 */ 10, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,}, + /* Target -> host PKTLOG */ + { /* CE11 */ 11, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,}, +}; + +#define QCA_6290_CE_COUNT 9 +#ifdef CONFIG_WIN +static struct CE_attr host_ce_config_wlan_qca6290[] = { + /* host->target HTC control and raw streams */ + { /* CE0 */ CE_ATTR_FLAGS, 0, 16, 2048, 0, NULL,}, + /* target->host HTT + HTC control */ + { /* CE1 */ CE_ATTR_FLAGS, 0, 0, 2048, 512, NULL,}, + /* target->host WMI */ + { /* CE2 */ CE_ATTR_FLAGS, 0, 0, 2048, 32, NULL,}, + /* host->target WMI */ + { /* CE3 */ CE_ATTR_FLAGS, 0, 32, 2048, 0, NULL,}, + /* host->target HTT */ + { /* CE4 */ (CE_ATTR_FLAGS | CE_ATTR_DISABLE_INTR), 0, + CE_HTT_H2T_MSG_SRC_NENTRIES, 256, 0, NULL,}, + /* target -> host PKTLOG */ + { /* CE5 */ CE_ATTR_FLAGS, 0, 0, 2048, 512, NULL,}, + /* Target autonomous HIF_memcpy */ + { /* CE6 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,}, + /* host->target WMI (mac1) */ + { /* CE7 */ CE_ATTR_FLAGS, 0, 32, 2048, 0, NULL,}, + /* Reserved for target */ + { /* CE8 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,}, + /* CE 9, 10, 11 belong to CoreBsp & MHI driver */ +}; + +static struct CE_pipe_config target_ce_config_wlan_qca6290[] = { + /* host->target HTC control and raw streams */ + { /* CE0 */ 0, PIPEDIR_OUT, 32, 2048, CE_ATTR_FLAGS, 0,}, + /* target->host HTT */ + { /* CE1 */ 1, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,}, + /* target->host WMI + HTC control */ + { /* CE2 */ 2, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,}, + /* host->target WMI */ + { /* CE3 */ 3, PIPEDIR_OUT, 32, 2048, CE_ATTR_FLAGS, 0,}, + /* host->target HTT */ + { /* CE4 */ 4, PIPEDIR_OUT, 256, 256, + (CE_ATTR_FLAGS | CE_ATTR_DISABLE_INTR), 0,}, + /* Target -> host PKTLOG */ + { /* CE5 */ 5, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,}, + /* Reserved for target autonomous HIF_memcpy */ + { /* CE6 */ 6, PIPEDIR_INOUT, 32, 16384, CE_ATTR_FLAGS, 0,}, + /* CE7 used only by Host */ + { /* CE7 */ 7, PIPEDIR_OUT, 32, 2048, + 8192, 0,}, + /* Reserved for target */ + { /* CE8 */ 8, PIPEDIR_INOUT, 32, 16384, CE_ATTR_FLAGS, 0,}, + /* CE 9, 10, 11 belong to CoreBsp & MHI driver */ +}; +#else +static struct CE_attr host_ce_config_wlan_qca6290[] = { + /* host->target HTC control and raw streams */ + { /* CE0 */ CE_ATTR_FLAGS, 0, 16, 2048, 0, NULL,}, + /* target->host HTT + HTC control */ + { /* CE1 */ CE_ATTR_FLAGS, 0, 0, 2048, 512, NULL,}, + /* target->host WMI */ + { /* CE2 */ CE_ATTR_FLAGS, 0, 0, 2048, 32, NULL,}, + /* host->target WMI */ + { /* CE3 */ CE_ATTR_FLAGS, 0, 32, 2048, 0, NULL,}, + /* host->target HTT */ + { /* CE4 */ (CE_ATTR_FLAGS | CE_ATTR_DISABLE_INTR), 0, + CE_HTT_H2T_MSG_SRC_NENTRIES, 256, 0, NULL,}, + /* target -> host PKTLOG */ + { /* CE5 */ CE_ATTR_FLAGS, 0, 0, 2048, 512, NULL,}, + /* Target autonomous HIF_memcpy */ + { /* CE6 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,}, + /* ce_diag, the Diagnostic Window */ + { /* CE7 */ (CE_ATTR_DIAG_FLAGS | CE_ATTR_DISABLE_INTR), 0, + 2, DIAG_TRANSFER_LIMIT, 2, NULL,}, + /* Reserved for target */ + { /* CE8 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,}, + /* CE 9, 10, 11 belong to CoreBsp & MHI driver */ +}; + +static struct CE_pipe_config target_ce_config_wlan_qca6290[] = { + /* host->target HTC control and raw streams */ + { /* CE0 */ 0, PIPEDIR_OUT, 32, 2048, CE_ATTR_FLAGS, 0,}, + /* target->host HTT */ + { /* CE1 */ 1, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,}, + /* target->host WMI + HTC control */ + { /* CE2 */ 2, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,}, + /* host->target WMI */ + { /* CE3 */ 3, PIPEDIR_OUT, 32, 2048, CE_ATTR_FLAGS, 0,}, + /* host->target HTT */ + { /* CE4 */ 4, PIPEDIR_OUT, 256, 256, + (CE_ATTR_FLAGS | CE_ATTR_DISABLE_INTR), 0,}, + /* Target -> host PKTLOG */ + { /* CE5 */ 5, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,}, + /* Reserved for target autonomous HIF_memcpy */ + { /* CE6 */ 6, PIPEDIR_INOUT, 32, 16384, CE_ATTR_FLAGS, 0,}, + /* CE7 used only by Host */ + { /* CE7 */ 7, PIPEDIR_INOUT_H2H, 0, 0, + (CE_ATTR_FLAGS | CE_ATTR_DISABLE_INTR), 0,}, + /* Reserved for target */ + { /* CE8 */ 8, PIPEDIR_INOUT, 32, 16384, CE_ATTR_FLAGS, 0,}, + /* CE 9, 10, 11 belong to CoreBsp & MHI driver */ +}; +#endif +#endif /* __HIF_PCI_INTERNAL_H__ */ diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/ce/ce_bmi.c b/drivers/staging/qca-wifi-host-cmn/hif/src/ce/ce_bmi.c new file mode 100644 index 0000000000000000000000000000000000000000..49004bd2503da98eea495f60d686e69d6ffdd345 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/ce/ce_bmi.c @@ -0,0 +1,316 @@ +/* + * Copyright (c) 2015-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "targcfg.h" +#include "qdf_lock.h" +#include "qdf_status.h" +#include "qdf_status.h" +#include /* qdf_atomic_read */ +#include +#include "hif_io32.h" +#include +#include "regtable.h" +#define ATH_MODULE_NAME hif +#include +#include "hif_main.h" +#include "ce_api.h" +#include "ce_bmi.h" +#include "qdf_trace.h" +#include "hif_debug.h" +#include "bmi_msg.h" +#include "qdf_module.h" + +/* Track a BMI transaction that is in progress */ +#ifndef BIT +#define BIT(n) (1 << (n)) +#endif + +enum { + BMI_REQ_SEND_DONE = BIT(0), /* the bmi tx completion */ + BMI_RESP_RECV_DONE = BIT(1), /* the bmi respond is received */ +}; + +struct BMI_transaction { + struct HIF_CE_state *hif_state; + qdf_semaphore_t bmi_transaction_sem; + uint8_t *bmi_request_host; /* Req BMI msg in Host addr space */ + qdf_dma_addr_t bmi_request_CE; /* Req BMI msg in CE addr space */ + uint32_t bmi_request_length; /* Length of BMI request */ + uint8_t *bmi_response_host; /* Rsp BMI msg in Host addr space */ + qdf_dma_addr_t bmi_response_CE; /* Rsp BMI msg in CE addr space */ + unsigned int bmi_response_length; /* Length of received response */ + unsigned int bmi_timeout_ms; + uint32_t bmi_transaction_flags; /* flags for the transcation */ +}; + +/* + * send/recv completion functions for BMI. + * NB: The "net_buf" parameter is actually just a + * straight buffer, not an sk_buff. + */ +void hif_bmi_send_done(struct CE_handle *copyeng, void *ce_context, + void *transfer_context, qdf_dma_addr_t data, + unsigned int nbytes, + unsigned int transfer_id, unsigned int sw_index, + unsigned int hw_index, uint32_t toeplitz_hash_result) +{ + struct BMI_transaction *transaction = + (struct BMI_transaction *)transfer_context; + +#ifdef BMI_RSP_POLLING + /* + * Fix EV118783, Release a semaphore after sending + * no matter whether a response is been expecting now. + */ + qdf_semaphore_release(&transaction->bmi_transaction_sem); +#else + /* + * If a response is anticipated, we'll complete the + * transaction if the response has been received. + * If no response is anticipated, complete the + * transaction now. + */ + transaction->bmi_transaction_flags |= BMI_REQ_SEND_DONE; + + /* resp is't needed or has already been received, + * never assume resp comes later then this + */ + if (!transaction->bmi_response_CE || + (transaction->bmi_transaction_flags & BMI_RESP_RECV_DONE)) { + qdf_semaphore_release(&transaction->bmi_transaction_sem); + } +#endif +} + +#ifndef BMI_RSP_POLLING +void hif_bmi_recv_data(struct CE_handle *copyeng, void *ce_context, + void *transfer_context, qdf_dma_addr_t data, + unsigned int nbytes, + unsigned int transfer_id, unsigned int flags) +{ + struct BMI_transaction *transaction = + (struct BMI_transaction *)transfer_context; + + transaction->bmi_response_length = nbytes; + transaction->bmi_transaction_flags |= BMI_RESP_RECV_DONE; + + /* when both send/recv are done, the sem can be released */ + if (transaction->bmi_transaction_flags & BMI_REQ_SEND_DONE) + qdf_semaphore_release(&transaction->bmi_transaction_sem); +} +#endif + +/* Timeout for BMI message exchange */ +#define HIF_EXCHANGE_BMI_MSG_TIMEOUT 6000 + +QDF_STATUS hif_exchange_bmi_msg(struct hif_opaque_softc *hif_ctx, + qdf_dma_addr_t bmi_cmd_da, + qdf_dma_addr_t bmi_rsp_da, + uint8_t *bmi_request, + uint32_t request_length, + uint8_t *bmi_response, + uint32_t *bmi_response_lengthp, + uint32_t TimeoutMS) +{ + struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx); + struct HIF_CE_pipe_info *send_pipe_info = + &(hif_state->pipe_info[BMI_CE_NUM_TO_TARG]); + struct CE_handle *ce_send_hdl = send_pipe_info->ce_hdl; + qdf_dma_addr_t CE_request, CE_response = 0; + struct BMI_transaction *transaction = NULL; + int status = QDF_STATUS_SUCCESS; + struct HIF_CE_pipe_info *recv_pipe_info = + &(hif_state->pipe_info[BMI_CE_NUM_TO_HOST]); + struct CE_handle *ce_recv = recv_pipe_info->ce_hdl; + unsigned int mux_id = 0; + unsigned int transaction_id = 0xffff; + unsigned int user_flags = 0; +#ifdef BMI_RSP_POLLING + qdf_dma_addr_t buf; + unsigned int completed_nbytes, id, flags; + int i; +#endif + + transaction = + (struct BMI_transaction *)qdf_mem_malloc(sizeof(*transaction)); + if (unlikely(!transaction)) { + HIF_ERROR("%s: no memory", __func__); + return QDF_STATUS_E_NOMEM; + } + transaction_id = (mux_id & MUX_ID_MASK) | + (transaction_id & TRANSACTION_ID_MASK); +#ifdef QCA_WIFI_3_0 + user_flags &= DESC_DATA_FLAG_MASK; +#endif + A_TARGET_ACCESS_LIKELY(scn); + + /* Initialize bmi_transaction_sem to block */ + qdf_semaphore_init(&transaction->bmi_transaction_sem); + qdf_semaphore_acquire(&transaction->bmi_transaction_sem); + + transaction->hif_state = hif_state; + transaction->bmi_request_host = bmi_request; + transaction->bmi_request_length = request_length; + transaction->bmi_response_length = 0; + transaction->bmi_timeout_ms = TimeoutMS; + transaction->bmi_transaction_flags = 0; + + /* + * CE_request = dma_map_single(dev, + * (void *)bmi_request, request_length, DMA_TO_DEVICE); + */ + CE_request = bmi_cmd_da; + transaction->bmi_request_CE = CE_request; + + if (bmi_response) { + + /* + * CE_response = dma_map_single(dev, bmi_response, + * BMI_DATASZ_MAX, DMA_FROM_DEVICE); + */ + CE_response = bmi_rsp_da; + transaction->bmi_response_host = bmi_response; + transaction->bmi_response_CE = CE_response; + /* dma_cache_sync(dev, bmi_response, + * BMI_DATASZ_MAX, DMA_FROM_DEVICE); + */ + qdf_mem_dma_sync_single_for_device(scn->qdf_dev, + CE_response, + BMI_DATASZ_MAX, + DMA_FROM_DEVICE); + ce_recv_buf_enqueue(ce_recv, transaction, + transaction->bmi_response_CE); + /* NB: see HIF_BMI_recv_done */ + } else { + transaction->bmi_response_host = NULL; + transaction->bmi_response_CE = 0; + } + + /* dma_cache_sync(dev, bmi_request, request_length, DMA_TO_DEVICE); */ + qdf_mem_dma_sync_single_for_device(scn->qdf_dev, CE_request, + request_length, DMA_TO_DEVICE); + + status = + ce_send(ce_send_hdl, transaction, + CE_request, request_length, + transaction_id, 0, user_flags); + ASSERT(status == QDF_STATUS_SUCCESS); + /* NB: see hif_bmi_send_done */ + + /* TBDXXX: handle timeout */ + + /* Wait for BMI request/response transaction to complete */ + /* Always just wait for BMI request here if + * BMI_RSP_POLLING is defined + */ + if (qdf_semaphore_acquire_timeout + (&transaction->bmi_transaction_sem, + HIF_EXCHANGE_BMI_MSG_TIMEOUT)) { + HIF_ERROR("%s: Fatal error, BMI transaction timeout. Please check the HW interface!!", + __func__); + qdf_mem_free(transaction); + return QDF_STATUS_E_TIMEOUT; + } + + if (bmi_response) { +#ifdef BMI_RSP_POLLING + /* Fix EV118783, do not wait a semaphore for the BMI response + * since the relative interruption may be lost. + * poll the BMI response instead. + */ + i = 0; + while (ce_completed_recv_next( + ce_recv, NULL, NULL, &buf, + &completed_nbytes, &id, + &flags) != QDF_STATUS_SUCCESS) { + if (i++ > BMI_RSP_TO_MILLISEC) { + HIF_ERROR("%s:error, can't get bmi response", + __func__); + status = QDF_STATUS_E_BUSY; + break; + } + OS_DELAY(1000); + } + + if ((status == QDF_STATUS_SUCCESS) && bmi_response_lengthp) + *bmi_response_lengthp = completed_nbytes; +#else + if ((status == QDF_STATUS_SUCCESS) && bmi_response_lengthp) { + *bmi_response_lengthp = + transaction->bmi_response_length; + } +#endif + + } + + /* dma_unmap_single(dev, transaction->bmi_request_CE, + * request_length, DMA_TO_DEVICE); + * bus_unmap_single(scn->sc_osdev, + * transaction->bmi_request_CE, + * request_length, BUS_DMA_TODEVICE); + */ + + if (status != QDF_STATUS_SUCCESS) { + qdf_dma_addr_t unused_buffer; + unsigned int unused_nbytes; + unsigned int unused_id; + unsigned int toeplitz_hash_result; + + ce_cancel_send_next(ce_send_hdl, + NULL, NULL, &unused_buffer, + &unused_nbytes, &unused_id, + &toeplitz_hash_result); + } + + A_TARGET_ACCESS_UNLIKELY(scn); + qdf_mem_free(transaction); + return status; +} +qdf_export_symbol(hif_exchange_bmi_msg); + +#ifdef BMI_RSP_POLLING +#define BMI_RSP_CB_REGISTER 0 +#else +#define BMI_RSP_CB_REGISTER 1 +#endif + +/** + * hif_register_bmi_callbacks() - register bmi callbacks + * @hif_sc: hif context + * + * Bmi phase uses different copy complete callbacks than mission mode. + */ +void hif_register_bmi_callbacks(struct hif_softc *hif_sc) +{ + struct HIF_CE_pipe_info *pipe_info; + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc); + + /* + * Initially, establish CE completion handlers for use with BMI. + * These are overwritten with generic handlers after we exit BMI phase. + */ + pipe_info = &hif_state->pipe_info[BMI_CE_NUM_TO_TARG]; + ce_send_cb_register(pipe_info->ce_hdl, hif_bmi_send_done, pipe_info, 0); + + if (BMI_RSP_CB_REGISTER) { + pipe_info = &hif_state->pipe_info[BMI_CE_NUM_TO_HOST]; + ce_recv_cb_register( + pipe_info->ce_hdl, hif_bmi_recv_data, pipe_info, 0); + } +} diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/ce/ce_bmi.h b/drivers/staging/qca-wifi-host-cmn/hif/src/ce/ce_bmi.h new file mode 100644 index 0000000000000000000000000000000000000000..7d330cb1a2687289e8d7f75f45aee9b2c81a1e50 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/ce/ce_bmi.h @@ -0,0 +1,36 @@ +/* + * Copyright (c) 2015 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef __CE_BMI_H__ +#define __CE_BMI_H__ + +#include /* qdf_atomic_read */ +#include "qdf_lock.h" +#include "ce_api.h" +#include "cepci.h" + +void hif_bmi_recv_data(struct CE_handle *copyeng, void *ce_context, + void *transfer_context, qdf_dma_addr_t data, + unsigned int nbytes, + unsigned int transfer_id, unsigned int flags); +void hif_bmi_send_done(struct CE_handle *copyeng, void *ce_context, + void *transfer_context, qdf_dma_addr_t data, + unsigned int nbytes, + unsigned int transfer_id, unsigned int sw_index, + unsigned int hw_index, uint32_t toeplitz_hash_result); +#endif /* __CE_BMI_H__ */ diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/ce/ce_diag.c b/drivers/staging/qca-wifi-host-cmn/hif/src/ce/ce_diag.c new file mode 100644 index 0000000000000000000000000000000000000000..86aa0c81c46d5df28fface4195d57a8bd78a3980 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/ce/ce_diag.c @@ -0,0 +1,529 @@ +/* + * Copyright (c) 2015-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "targcfg.h" +#include "target_type.h" +#include "qdf_lock.h" +#include "qdf_status.h" +#include "qdf_status.h" +#include /* qdf_atomic_read */ +#include +#include "hif_io32.h" +#include +#include "regtable.h" +#include +#include "hif_main.h" +#include "ce_api.h" +#include "qdf_trace.h" +#include "hif_debug.h" +#include "qdf_module.h" + +void +hif_ce_dump_target_memory(struct hif_softc *scn, void *ramdump_base, + uint32_t address, uint32_t size) +{ + uint32_t loc = address; + uint32_t val = 0; + uint32_t j = 0; + u8 *temp = ramdump_base; + + if (Q_TARGET_ACCESS_BEGIN(scn) < 0) + return; + + while (j < size) { + val = hif_read32_mb(scn->mem + loc + j); + qdf_mem_copy(temp, &val, 4); + j += 4; + temp += 4; + } + + Q_TARGET_ACCESS_END(scn); +} +/* + * TBDXXX: Should be a function call specific to each Target-type. + * This convoluted macro converts from Target CPU Virtual Address + * Space to CE Address Space. As part of this process, we + * conservatively fetch the current PCIE_BAR. MOST of the time, + * this should match the upper bits of PCI space for this device; + * but that's not guaranteed. + */ +#ifdef QCA_WIFI_3_0 +#define TARG_CPU_SPACE_TO_CE_SPACE(pci_addr, addr) \ + (scn->mem_pa + addr) +#else +#define TARG_CPU_SPACE_TO_CE_SPACE(pci_addr, addr) \ + (((hif_read32_mb((pci_addr) + \ + (SOC_CORE_BASE_ADDRESS|CORE_CTRL_ADDRESS)) & 0x7ff) << 21) \ + | 0x100000 | ((addr) & 0xfffff)) +#endif + +#define TARG_CPU_SPACE_TO_CE_SPACE_IPQ4019(pci_addr, addr) \ + (hif_read32_mb((pci_addr)+(WIFICMN_PCIE_BAR_REG_ADDRESS)) \ + | ((addr) & 0xfffff)) + +#define TARG_CPU_SPACE_TO_CE_SPACE_AR900B(pci_addr, addr) \ + (hif_read32_mb((pci_addr)+(WIFICMN_PCIE_BAR_REG_ADDRESS)) \ + | 0x100000 | ((addr) & 0xfffff)) + +#define SRAM_BASE_ADDRESS 0xc0000 +#define SRAM_END_ADDRESS 0x100000 +#define WIFI0_IPQ4019_BAR 0xa000000 +#define WIFI1_IPQ4019_BAR 0xa800000 + +/* Wait up to this many Ms for a Diagnostic Access CE operation to complete */ +#define DIAG_ACCESS_CE_TIMEOUT_MS 10 + +/** + * get_ce_phy_addr() - get the physical address of an soc virtual address + * @sc: hif context + * @address: soc virtual address + * @target_type: target type being used. + * + * Return: soc physical address + */ +static qdf_dma_addr_t get_ce_phy_addr(struct hif_softc *sc, uint32_t address, + unsigned int target_type) +{ + qdf_dma_addr_t ce_phy_addr; + struct hif_softc *scn = sc; + unsigned int region = address & 0xfffff; + unsigned int bar = address & 0xfff00000; + unsigned int sramregion = 0; + + if ((target_type == TARGET_TYPE_IPQ4019) && + (region >= SRAM_BASE_ADDRESS && region <= SRAM_END_ADDRESS) + && (bar == WIFI0_IPQ4019_BAR || + bar == WIFI1_IPQ4019_BAR || bar == 0)) { + sramregion = 1; + } + + if ((target_type == TARGET_TYPE_IPQ4019) && sramregion == 1) { + ce_phy_addr = + TARG_CPU_SPACE_TO_CE_SPACE_IPQ4019(sc->mem, address); + } else if ((target_type == TARGET_TYPE_AR900B) || + (target_type == TARGET_TYPE_QCA9984) || + (target_type == TARGET_TYPE_IPQ4019) || + (target_type == TARGET_TYPE_QCA9888)) { + ce_phy_addr = + TARG_CPU_SPACE_TO_CE_SPACE_AR900B(sc->mem, address); + } else { + ce_phy_addr = + TARG_CPU_SPACE_TO_CE_SPACE(sc->mem, address); + } + + return ce_phy_addr; +} + +/* + * Diagnostic read/write access is provided for startup/config/debug usage. + * Caller must guarantee proper alignment, when applicable, and single user + * at any moment. + */ + +#define FW_SRAM_ADDRESS 0x000C0000 + +QDF_STATUS hif_diag_read_mem(struct hif_opaque_softc *hif_ctx, + uint32_t address, uint8_t *data, int nbytes) +{ + struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); + + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); + QDF_STATUS status = QDF_STATUS_SUCCESS; + qdf_dma_addr_t buf; + unsigned int completed_nbytes, orig_nbytes, remaining_bytes; + unsigned int id; + unsigned int flags; + struct CE_handle *ce_diag; + qdf_dma_addr_t CE_data; /* Host buffer address in CE space */ + qdf_dma_addr_t CE_data_base = 0; + void *data_buf = NULL; + int i; + unsigned int mux_id = 0; + unsigned int transaction_id = 0xffff; + qdf_dma_addr_t ce_phy_addr = address; + unsigned int toeplitz_hash_result; + unsigned int user_flags = 0; + unsigned int target_type = 0; + unsigned int boundary_addr = 0; + + ce_diag = hif_state->ce_diag; + if (ce_diag == NULL) { + HIF_ERROR("%s: DIAG CE not present", __func__); + return QDF_STATUS_E_INVAL; + } + /* not supporting diag ce on srng based systems, therefore we know this + * isn't an srng based system */ + + transaction_id = (mux_id & MUX_ID_MASK) | + (transaction_id & TRANSACTION_ID_MASK); +#ifdef QCA_WIFI_3_0 + user_flags &= DESC_DATA_FLAG_MASK; +#endif + target_type = (hif_get_target_info_handle(hif_ctx))->target_type; + + /* This code cannot handle reads to non-memory space. Redirect to the + * register read fn but preserve the multi word read capability of + * this fn + */ + if ((target_type == TARGET_TYPE_IPQ4019) || + (target_type == TARGET_TYPE_AR900B) || + (target_type == TARGET_TYPE_QCA9984) || + (target_type == TARGET_TYPE_AR9888) || + (target_type == TARGET_TYPE_QCA9888)) + boundary_addr = FW_SRAM_ADDRESS; + else + boundary_addr = DRAM_BASE_ADDRESS; + + if (address < boundary_addr) { + + if ((address & 0x3) || ((uintptr_t) data & 0x3)) + return QDF_STATUS_E_INVAL; + + while ((nbytes >= 4) && + (QDF_STATUS_SUCCESS == (status = + hif_diag_read_access(hif_ctx, address, + (uint32_t *)data)))) { + + nbytes -= sizeof(uint32_t); + address += sizeof(uint32_t); + data += sizeof(uint32_t); + + } + + return status; + } + + A_TARGET_ACCESS_LIKELY(scn); + + /* + * Allocate a temporary bounce buffer to hold caller's data + * to be DMA'ed from Target. This guarantees + * 1) 4-byte alignment + * 2) Buffer in DMA-able space + */ + orig_nbytes = nbytes; + data_buf = qdf_mem_alloc_consistent(scn->qdf_dev, scn->qdf_dev->dev, + orig_nbytes, &CE_data_base); + if (!data_buf) { + status = QDF_STATUS_E_NOMEM; + goto done; + } + qdf_mem_set(data_buf, orig_nbytes, 0); + qdf_mem_dma_sync_single_for_device(scn->qdf_dev, CE_data_base, + orig_nbytes, DMA_FROM_DEVICE); + + remaining_bytes = orig_nbytes; + CE_data = CE_data_base; + while (remaining_bytes) { + nbytes = min(remaining_bytes, DIAG_TRANSFER_LIMIT); + { + status = ce_recv_buf_enqueue(ce_diag, NULL, CE_data); + if (status != QDF_STATUS_SUCCESS) + goto done; + } + + if (Q_TARGET_ACCESS_BEGIN(scn) < 0) { + status = QDF_STATUS_E_FAILURE; + goto done; + } + + /* convert soc virtual address to physical address */ + ce_phy_addr = get_ce_phy_addr(scn, address, target_type); + + if (Q_TARGET_ACCESS_END(scn) < 0) { + status = QDF_STATUS_E_FAILURE; + goto done; + } + + /* Request CE to send from Target(!) + * address to Host buffer + */ + status = ce_send(ce_diag, NULL, ce_phy_addr, nbytes, + transaction_id, 0, user_flags); + if (status != QDF_STATUS_SUCCESS) + goto done; + + i = 0; + while (ce_completed_send_next(ce_diag, NULL, NULL, &buf, + &completed_nbytes, &id, NULL, NULL, + &toeplitz_hash_result) != QDF_STATUS_SUCCESS) { + qdf_mdelay(1); + if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) { + status = QDF_STATUS_E_BUSY; + goto done; + } + } + if (nbytes != completed_nbytes) { + status = QDF_STATUS_E_FAILURE; + goto done; + } + if (buf != ce_phy_addr) { + status = QDF_STATUS_E_FAILURE; + goto done; + } + + i = 0; + while (ce_completed_recv_next + (ce_diag, NULL, NULL, &buf, + &completed_nbytes, &id, + &flags) != QDF_STATUS_SUCCESS) { + qdf_mdelay(1); + if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) { + status = QDF_STATUS_E_BUSY; + goto done; + } + } + if (nbytes != completed_nbytes) { + status = QDF_STATUS_E_FAILURE; + goto done; + } + if (buf != CE_data) { + status = QDF_STATUS_E_FAILURE; + goto done; + } + + remaining_bytes -= nbytes; + address += nbytes; + CE_data += nbytes; + } + +done: + A_TARGET_ACCESS_UNLIKELY(scn); + + if (status == QDF_STATUS_SUCCESS) + qdf_mem_copy(data, data_buf, orig_nbytes); + else + HIF_ERROR("%s failure (0x%x)", __func__, address); + + if (data_buf) + qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev, + orig_nbytes, data_buf, CE_data_base, 0); + + return status; +} +qdf_export_symbol(hif_diag_read_mem); + +/* Read 4-byte aligned data from Target memory or register */ +QDF_STATUS hif_diag_read_access(struct hif_opaque_softc *hif_ctx, + uint32_t address, uint32_t *data) +{ + struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); + + if (address >= DRAM_BASE_ADDRESS) { + /* Assume range doesn't cross this boundary */ + return hif_diag_read_mem(hif_ctx, address, (uint8_t *) data, + sizeof(uint32_t)); + } else { + if (Q_TARGET_ACCESS_BEGIN(scn) < 0) + return QDF_STATUS_E_FAILURE; + *data = A_TARGET_READ(scn, address); + if (Q_TARGET_ACCESS_END(scn) < 0) + return QDF_STATUS_E_FAILURE; + + return QDF_STATUS_SUCCESS; + } +} + +/** + * hif_diag_write_mem() - write data into the soc memory + * @hif_ctx: hif context + * @address: soc virtual address + * @data: data to copy into the soc address + * @nbytes: number of bytes to coppy + */ +QDF_STATUS hif_diag_write_mem(struct hif_opaque_softc *hif_ctx, + uint32_t address, uint8_t *data, int nbytes) +{ + struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx); + QDF_STATUS status = QDF_STATUS_SUCCESS; + qdf_dma_addr_t buf; + unsigned int completed_nbytes, orig_nbytes, remaining_bytes; + unsigned int id; + unsigned int flags; + struct CE_handle *ce_diag; + void *data_buf = NULL; + qdf_dma_addr_t CE_data; /* Host buffer address in CE space */ + qdf_dma_addr_t CE_data_base = 0; + int i; + unsigned int mux_id = 0; + unsigned int transaction_id = 0xffff; + qdf_dma_addr_t ce_phy_addr = address; + unsigned int toeplitz_hash_result; + unsigned int user_flags = 0; + unsigned int target_type = 0; + + ce_diag = hif_state->ce_diag; + if (ce_diag == NULL) { + HIF_ERROR("%s: DIAG CE not present", __func__); + return QDF_STATUS_E_INVAL; + } + /* not supporting diag ce on srng based systems, therefore we know this + * isn't an srng based system */ + + transaction_id = (mux_id & MUX_ID_MASK) | + (transaction_id & TRANSACTION_ID_MASK); +#ifdef QCA_WIFI_3_0 + user_flags &= DESC_DATA_FLAG_MASK; +#endif + + A_TARGET_ACCESS_LIKELY(scn); + + /* + * Allocate a temporary bounce buffer to hold caller's data + * to be DMA'ed to Target. This guarantees + * 1) 4-byte alignment + * 2) Buffer in DMA-able space + */ + orig_nbytes = nbytes; + data_buf = qdf_mem_alloc_consistent(scn->qdf_dev, scn->qdf_dev->dev, + orig_nbytes, &CE_data_base); + if (!data_buf) { + status = QDF_STATUS_E_NOMEM; + goto done; + } + + /* Copy caller's data to allocated DMA buf */ + qdf_mem_copy(data_buf, data, orig_nbytes); + qdf_mem_dma_sync_single_for_device(scn->qdf_dev, CE_data_base, + orig_nbytes, DMA_TO_DEVICE); + + target_type = (hif_get_target_info_handle(hif_ctx))->target_type; + + if (Q_TARGET_ACCESS_BEGIN(scn) < 0) { + status = QDF_STATUS_E_FAILURE; + goto done; + } + + /* convert soc virtual address to physical address */ + ce_phy_addr = get_ce_phy_addr(scn, address, target_type); + + if (Q_TARGET_ACCESS_END(scn) < 0) { + status = QDF_STATUS_E_FAILURE; + goto done; + } + + remaining_bytes = orig_nbytes; + CE_data = CE_data_base; + while (remaining_bytes) { + nbytes = min(remaining_bytes, DIAG_TRANSFER_LIMIT); + + /* Set up to receive directly into Target(!) address */ + status = ce_recv_buf_enqueue(ce_diag, NULL, ce_phy_addr); + if (status != QDF_STATUS_SUCCESS) + goto done; + + /* + * Request CE to send caller-supplied data that + * was copied to bounce buffer to Target(!) address. + */ + status = ce_send(ce_diag, NULL, (qdf_dma_addr_t) CE_data, + nbytes, transaction_id, 0, user_flags); + + if (status != QDF_STATUS_SUCCESS) + goto done; + + /* poll for transfer complete */ + i = 0; + while (ce_completed_send_next(ce_diag, NULL, NULL, &buf, + &completed_nbytes, &id, + NULL, NULL, &toeplitz_hash_result) != + QDF_STATUS_SUCCESS) { + qdf_mdelay(1); + if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) { + status = QDF_STATUS_E_BUSY; + goto done; + } + } + + if (nbytes != completed_nbytes) { + status = QDF_STATUS_E_FAILURE; + goto done; + } + + if (buf != CE_data) { + status = QDF_STATUS_E_FAILURE; + goto done; + } + + i = 0; + while (ce_completed_recv_next + (ce_diag, NULL, NULL, &buf, + &completed_nbytes, &id, + &flags) != QDF_STATUS_SUCCESS) { + qdf_mdelay(1); + if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) { + status = QDF_STATUS_E_BUSY; + goto done; + } + } + + if (nbytes != completed_nbytes) { + status = QDF_STATUS_E_FAILURE; + goto done; + } + + if (buf != ce_phy_addr) { + status = QDF_STATUS_E_FAILURE; + goto done; + } + + remaining_bytes -= nbytes; + address += nbytes; + CE_data += nbytes; + } + +done: + A_TARGET_ACCESS_UNLIKELY(scn); + + if (data_buf) { + qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev, + orig_nbytes, data_buf, CE_data_base, 0); + } + + if (status != QDF_STATUS_SUCCESS) { + HIF_ERROR("%s failure (0x%llx)", __func__, + (uint64_t)ce_phy_addr); + } + + return status; +} + +/* Write 4B data to Target memory or register */ +QDF_STATUS hif_diag_write_access(struct hif_opaque_softc *hif_ctx, + uint32_t address, uint32_t data) +{ + struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); + + if (address >= DRAM_BASE_ADDRESS) { + /* Assume range doesn't cross this boundary */ + uint32_t data_buf = data; + + return hif_diag_write_mem(hif_ctx, address, + (uint8_t *) &data_buf, + sizeof(uint32_t)); + } else { + if (Q_TARGET_ACCESS_BEGIN(scn) < 0) + return QDF_STATUS_E_FAILURE; + A_TARGET_WRITE(scn, address, data); + if (Q_TARGET_ACCESS_END(scn) < 0) + return QDF_STATUS_E_FAILURE; + + return QDF_STATUS_SUCCESS; + } +} diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/ce/ce_internal.h b/drivers/staging/qca-wifi-host-cmn/hif/src/ce/ce_internal.h new file mode 100644 index 0000000000000000000000000000000000000000..cde67cee22973f3a7cca0929fb110c4e654ac9bf --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/ce/ce_internal.h @@ -0,0 +1,550 @@ +/* + * Copyright (c) 2013-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef __COPY_ENGINE_INTERNAL_H__ +#define __COPY_ENGINE_INTERNAL_H__ + +#include /* A_TARGET_WRITE */ + +/* Copy Engine operational state */ +enum CE_op_state { + CE_UNUSED, + CE_PAUSED, + CE_RUNNING, + CE_PENDING, +}; + +enum ol_ath_hif_ce_ecodes { + CE_RING_DELTA_FAIL = 0 +}; + +struct CE_src_desc; + +/* Copy Engine Ring internal state */ +struct CE_ring_state { + + /* Number of entries in this ring; must be power of 2 */ + unsigned int nentries; + unsigned int nentries_mask; + + /* + * For dest ring, this is the next index to be processed + * by software after it was/is received into. + * + * For src ring, this is the last descriptor that was sent + * and completion processed by software. + * + * Regardless of src or dest ring, this is an invariant + * (modulo ring size): + * write index >= read index >= sw_index + */ + unsigned int sw_index; + unsigned int write_index; /* cached copy */ + /* + * For src ring, this is the next index not yet processed by HW. + * This is a cached copy of the real HW index (read index), used + * for avoiding reading the HW index register more often than + * necessary. + * This extends the invariant: + * write index >= read index >= hw_index >= sw_index + * + * For dest ring, this is currently unused. + */ + unsigned int hw_index; /* cached copy */ + + /* Start of DMA-coherent area reserved for descriptors */ + void *base_addr_owner_space_unaligned; /* Host address space */ + qdf_dma_addr_t base_addr_CE_space_unaligned; /* CE address space */ + + /* + * Actual start of descriptors. + * Aligned to descriptor-size boundary. + * Points into reserved DMA-coherent area, above. + */ + void *base_addr_owner_space; /* Host address space */ + qdf_dma_addr_t base_addr_CE_space; /* CE address space */ + /* + * Start of shadow copy of descriptors, within regular memory. + * Aligned to descriptor-size boundary. + */ + char *shadow_base_unaligned; + struct CE_src_desc *shadow_base; + + unsigned int low_water_mark_nentries; + unsigned int high_water_mark_nentries; + void *srng_ctx; + void **per_transfer_context; + OS_DMA_MEM_CONTEXT(ce_dmacontext); /* OS Specific DMA context */ +}; + +/* Copy Engine internal state */ +struct CE_state { + struct hif_softc *scn; + unsigned int id; + unsigned int attr_flags; /* CE_ATTR_* */ + uint32_t ctrl_addr; /* relative to BAR */ + enum CE_op_state state; + +#ifdef WLAN_FEATURE_FASTPATH + fastpath_msg_handler fastpath_handler; + void *context; +#endif /* WLAN_FEATURE_FASTPATH */ + qdf_work_t oom_allocation_work; + + ce_send_cb send_cb; + void *send_context; + + CE_recv_cb recv_cb; + void *recv_context; + + /* misc_cbs - are any callbacks besides send and recv enabled? */ + uint8_t misc_cbs; + + CE_watermark_cb watermark_cb; + void *wm_context; + + /*Record the state of the copy compl interrupt */ + int disable_copy_compl_intr; + + unsigned int src_sz_max; + struct CE_ring_state *src_ring; + struct CE_ring_state *dest_ring; + struct CE_ring_state *status_ring; + atomic_t rx_pending; + + qdf_spinlock_t ce_index_lock; + /* Flag to indicate whether to break out the DPC context */ + bool force_break; + + /* time in nanoseconds to yield control of napi poll */ + unsigned long long ce_service_yield_time; + /* CE service start time in nanoseconds */ + unsigned long long ce_service_start_time; + /* Num Of Receive Buffers handled for one interrupt DPC routine */ + unsigned int receive_count; + /* epping */ + bool timer_inited; + qdf_timer_t poll_timer; + + /* datapath - for faster access, use bools instead of a bitmap */ + bool htt_tx_data; + bool htt_rx_data; + qdf_lro_ctx_t lro_data; +}; + +/* Descriptor rings must be aligned to this boundary */ +#define CE_DESC_RING_ALIGN 8 +#define CLOCK_OVERRIDE 0x2 + +#ifdef QCA_WIFI_3_0 +#define HIF_CE_DESC_ADDR_TO_DMA(desc) \ + (qdf_dma_addr_t)(((uint64_t)(desc)->buffer_addr + \ + ((uint64_t)((desc)->buffer_addr_hi & 0x1F) << 32))) +#else +#define HIF_CE_DESC_ADDR_TO_DMA(desc) \ + (qdf_dma_addr_t)((desc)->buffer_addr) +#endif + +#ifdef QCA_WIFI_3_0 +struct CE_src_desc { + uint32_t buffer_addr:32; +#if _BYTE_ORDER == _BIG_ENDIAN + uint32_t gather:1, + enable_11h:1, + meta_data_low:2, /* fw_metadata_low */ + packet_result_offset:12, + toeplitz_hash_enable:1, + addr_y_search_disable:1, + addr_x_search_disable:1, + misc_int_disable:1, + target_int_disable:1, + host_int_disable:1, + dest_byte_swap:1, + byte_swap:1, + type:2, + tx_classify:1, + buffer_addr_hi:5; + uint32_t meta_data:16, /* fw_metadata_high */ + nbytes:16; /* length in register map */ +#else + uint32_t buffer_addr_hi:5, + tx_classify:1, + type:2, + byte_swap:1, /* src_byte_swap */ + dest_byte_swap:1, + host_int_disable:1, + target_int_disable:1, + misc_int_disable:1, + addr_x_search_disable:1, + addr_y_search_disable:1, + toeplitz_hash_enable:1, + packet_result_offset:12, + meta_data_low:2, /* fw_metadata_low */ + enable_11h:1, + gather:1; + uint32_t nbytes:16, /* length in register map */ + meta_data:16; /* fw_metadata_high */ +#endif + uint32_t toeplitz_hash_result:32; +}; + +struct CE_dest_desc { + uint32_t buffer_addr:32; +#if _BYTE_ORDER == _BIG_ENDIAN + uint32_t gather:1, + enable_11h:1, + meta_data_low:2, /* fw_metadata_low */ + packet_result_offset:12, + toeplitz_hash_enable:1, + addr_y_search_disable:1, + addr_x_search_disable:1, + misc_int_disable:1, + target_int_disable:1, + host_int_disable:1, + byte_swap:1, + src_byte_swap:1, + type:2, + tx_classify:1, + buffer_addr_hi:5; + uint32_t meta_data:16, /* fw_metadata_high */ + nbytes:16; /* length in register map */ +#else + uint32_t buffer_addr_hi:5, + tx_classify:1, + type:2, + src_byte_swap:1, + byte_swap:1, /* dest_byte_swap */ + host_int_disable:1, + target_int_disable:1, + misc_int_disable:1, + addr_x_search_disable:1, + addr_y_search_disable:1, + toeplitz_hash_enable:1, + packet_result_offset:12, + meta_data_low:2, /* fw_metadata_low */ + enable_11h:1, + gather:1; + uint32_t nbytes:16, /* length in register map */ + meta_data:16; /* fw_metadata_high */ +#endif + uint32_t toeplitz_hash_result:32; +}; +#else +struct CE_src_desc { + uint32_t buffer_addr; +#if _BYTE_ORDER == _BIG_ENDIAN + uint32_t meta_data:12, + target_int_disable:1, + host_int_disable:1, + byte_swap:1, + gather:1, + nbytes:16; +#else + + uint32_t nbytes:16, + gather:1, + byte_swap:1, + host_int_disable:1, + target_int_disable:1, + meta_data:12; +#endif +}; + +struct CE_dest_desc { + uint32_t buffer_addr; +#if _BYTE_ORDER == _BIG_ENDIAN + uint32_t meta_data:12, + target_int_disable:1, + host_int_disable:1, + byte_swap:1, + gather:1, + nbytes:16; +#else + uint32_t nbytes:16, + gather:1, + byte_swap:1, + host_int_disable:1, + target_int_disable:1, + meta_data:12; +#endif +}; +#endif /* QCA_WIFI_3_0 */ + +struct ce_srng_src_desc { + uint32_t buffer_addr_lo; +#if _BYTE_ORDER == _BIG_ENDIAN + uint32_t nbytes:16, + rsvd:4, + gather:1, + dest_swap:1, + byte_swap:1, + toeplitz_hash_enable:1, + buffer_addr_hi:8; + uint32_t rsvd1:16, + meta_data:16; + uint32_t loop_count:4, + ring_id:8, + rsvd3:20; +#else + uint32_t buffer_addr_hi:8, + toeplitz_hash_enable:1, + byte_swap:1, + dest_swap:1, + gather:1, + rsvd:4, + nbytes:16; + uint32_t meta_data:16, + rsvd1:16; + uint32_t rsvd3:20, + ring_id:8, + loop_count:4; +#endif +}; +struct ce_srng_dest_desc { + uint32_t buffer_addr_lo; +#if _BYTE_ORDER == _BIG_ENDIAN + uint32_t loop_count:4, + ring_id:8, + rsvd1:12, + buffer_addr_hi:8; +#else + uint32_t buffer_addr_hi:8, + rsvd1:12, + ring_id:8, + loop_count:4; +#endif +}; +struct ce_srng_dest_status_desc { +#if _BYTE_ORDER == _BIG_ENDIAN + uint32_t nbytes:16, + rsvd:4, + gather:1, + dest_swap:1, + byte_swap:1, + toeplitz_hash_enable:1, + rsvd0:8; + uint32_t rsvd1:16, + meta_data:16; +#else + uint32_t rsvd0:8, + toeplitz_hash_enable:1, + byte_swap:1, + dest_swap:1, + gather:1, + rsvd:4, + nbytes:16; + uint32_t meta_data:16, + rsvd1:16; +#endif + uint32_t toeplitz_hash; +#if _BYTE_ORDER == _BIG_ENDIAN + uint32_t loop_count:4, + ring_id:8, + rsvd3:20; +#else + uint32_t rsvd3:20, + ring_id:8, + loop_count:4; +#endif +}; + +#define CE_SENDLIST_ITEMS_MAX 12 + +/** + * union ce_desc - unified data type for ce descriptors + * + * Both src and destination descriptors follow the same format. + * They use different data structures for different access symantics. + * Here we provice a unifying data type. + */ +union ce_desc { + struct CE_src_desc src_desc; + struct CE_dest_desc dest_desc; +}; + +/** + * enum hif_ce_event_type - HIF copy engine event type + * @HIF_RX_DESC_POST: event recorded before updating write index of RX ring. + * @HIF_RX_DESC_COMPLETION: event recorded before updating sw index of RX ring. + * @HIF_TX_GATHER_DESC_POST: post gather desc. (no write index update) + * @HIF_TX_DESC_POST: event recorded before updating write index of TX ring. + * @HIF_TX_DESC_SOFTWARE_POST: event recorded when dropping a write to the write + * index in a normal tx + * @HIF_TX_DESC_COMPLETION: event recorded before updating sw index of TX ring. + * @FAST_RX_WRITE_INDEX_UPDATE: event recorded before updating the write index + * of the RX ring in fastpath + * @FAST_RX_SOFTWARE_INDEX_UPDATE: event recorded before updating the software + * index of the RX ring in fastpath + * @FAST_TX_WRITE_INDEX_UPDATE: event recorded before updating the write index + * of the TX ring in fastpath + * @FAST_TX_WRITE_INDEX_SOFTWARE_UPDATE: recored when dropping a write to + * the write index in fastpath + * @FAST_TX_SOFTWARE_INDEX_UPDATE: event recorded before updating the software + * index of the RX ring in fastpath + * @HIF_IRQ_EVENT: event recorded in the irq before scheduling the bh + * @HIF_CE_TASKLET_ENTRY: records the start of the ce_tasklet + * @HIF_CE_TASKLET_RESCHEDULE: records the rescheduling of the wlan_tasklet + * @HIF_CE_TASKLET_EXIT: records the exit of the wlan tasklet without reschedule + * @HIF_CE_REAP_ENTRY: records when we process completion outside of a bh + * @HIF_CE_REAP_EXIT: records when we process completion outside of a bh + * @NAPI_SCHEDULE: records when napi is scheduled from the irq context + * @NAPI_POLL_ENTER: records the start of the napi poll function + * @NAPI_COMPLETE: records when interrupts are reenabled + * @NAPI_POLL_EXIT: records when the napi poll function returns + */ +enum hif_ce_event_type { + HIF_RX_DESC_POST, + HIF_RX_DESC_COMPLETION, + HIF_TX_GATHER_DESC_POST, + HIF_TX_DESC_POST, + HIF_TX_DESC_SOFTWARE_POST, + HIF_TX_DESC_COMPLETION, + FAST_RX_WRITE_INDEX_UPDATE, + FAST_RX_SOFTWARE_INDEX_UPDATE, + FAST_TX_WRITE_INDEX_UPDATE, + FAST_TX_WRITE_INDEX_SOFTWARE_UPDATE, + FAST_TX_SOFTWARE_INDEX_UPDATE, + RESUME_WRITE_INDEX_UPDATE, + + HIF_IRQ_EVENT = 0x10, + HIF_CE_TASKLET_ENTRY, + HIF_CE_TASKLET_RESCHEDULE, + HIF_CE_TASKLET_EXIT, + HIF_CE_REAP_ENTRY, + HIF_CE_REAP_EXIT, + NAPI_SCHEDULE, + NAPI_POLL_ENTER, + NAPI_COMPLETE, + NAPI_POLL_EXIT, + + HIF_RX_NBUF_ALLOC_FAILURE = 0x20, + HIF_RX_NBUF_MAP_FAILURE, + HIF_RX_NBUF_ENQUEUE_FAILURE, +}; + +void ce_init_ce_desc_event_log(struct hif_softc *scn, int ce_id, int size); +void ce_deinit_ce_desc_event_log(struct hif_softc *scn, int ce_id); +void hif_record_ce_desc_event(struct hif_softc *scn, int ce_id, + enum hif_ce_event_type type, + union ce_desc *descriptor, void *memory, + int index, int len); + +enum ce_sendlist_type_e { + CE_SIMPLE_BUFFER_TYPE, + /* TBDXXX: CE_RX_DESC_LIST, */ +}; + +/* + * There's a public "ce_sendlist" and a private "ce_sendlist_s". + * The former is an opaque structure with sufficient space + * to hold the latter. The latter is the actual structure + * definition and it is only used internally. The opaque version + * of the structure allows callers to allocate an instance on the + * run-time stack without knowing any of the details of the + * structure layout. + */ +struct ce_sendlist_s { + unsigned int num_items; + struct ce_sendlist_item { + enum ce_sendlist_type_e send_type; + dma_addr_t data; /* e.g. buffer or desc list */ + union { + unsigned int nbytes; /* simple buffer */ + unsigned int ndesc; /* Rx descriptor list */ + } u; + /* flags: externally-specified flags; + * OR-ed with internal flags + */ + uint32_t flags; + uint32_t user_flags; + } item[CE_SENDLIST_ITEMS_MAX]; +}; + +bool hif_ce_service_should_yield(struct hif_softc *scn, struct CE_state + *ce_state); + +#ifdef WLAN_FEATURE_FASTPATH +void ce_h2t_tx_ce_cleanup(struct CE_handle *ce_hdl); +void ce_t2h_msg_ce_cleanup(struct CE_handle *ce_hdl); +#else +static inline void ce_h2t_tx_ce_cleanup(struct CE_handle *ce_hdl) +{ +} + +static inline void ce_t2h_msg_ce_cleanup(struct CE_handle *ce_hdl) +{ +} +#endif + +/* which ring of a CE? */ +#define CE_RING_SRC 0 +#define CE_RING_DEST 1 +#define CE_RING_STATUS 2 + +#define CDC_WAR_MAGIC_STR 0xceef0000 +#define CDC_WAR_DATA_CE 4 + +/* Additional internal-only ce_send flags */ +#define CE_SEND_FLAG_GATHER 0x00010000 /* Use Gather */ + +/** + * hif_get_wake_ce_id() - gets the copy engine id used for waking up + * @scn: The hif context to use + * @ce_id: a pointer where the copy engine Id should be populated + * + * Return: errno + */ +int hif_get_wake_ce_id(struct hif_softc *scn, uint8_t *ce_id); + +/* + * Note: For MCL, #if defined (HIF_CONFIG_SLUB_DEBUG_ON) needs to be checked + * for defined here + */ +#if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) + +#define HIF_CE_HISTORY_MAX 512 + +#define CE_DEBUG_MAX_DATA_BUF_SIZE 64 +/** + * struct hif_ce_desc_event - structure for detailing a ce event + * @type: what the event was + * @time: when it happened + * @descriptor: descriptor enqueued or dequeued + * @memory: virtual address that was used + * @index: location of the descriptor in the ce ring; + * @data: data pointed by descriptor + * @actual_data_len: length of the data + */ +struct hif_ce_desc_event { + uint16_t index; + enum hif_ce_event_type type; + uint64_t time; + union ce_desc descriptor; + void *memory; +#if HIF_CE_DEBUG_DATA_BUF + uint8_t *data; + ssize_t actual_data_len; +#endif +}; + +#if HIF_CE_DEBUG_DATA_BUF +QDF_STATUS alloc_mem_ce_debug_hist_data(struct hif_softc *scn, uint32_t ce_id); +void free_mem_ce_debug_hist_data(struct hif_softc *scn, uint32_t ce_id); +#endif /*HIF_CE_DEBUG_DATA_BUF*/ +#endif /* #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || HIF_CE_DEBUG_DATA_BUF */ +#endif /* __COPY_ENGINE_INTERNAL_H__ */ diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/ce/ce_main.c b/drivers/staging/qca-wifi-host-cmn/hif/src/ce/ce_main.c new file mode 100644 index 0000000000000000000000000000000000000000..f63194eae3d3ea6fe0b3f9fa266b2494573fdc98 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/ce/ce_main.c @@ -0,0 +1,3602 @@ +/* + * Copyright (c) 2013-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "targcfg.h" +#include "qdf_lock.h" +#include "qdf_status.h" +#include "qdf_status.h" +#include /* qdf_atomic_read */ +#include +#include "hif_io32.h" +#include +#include +#include "regtable.h" +#define ATH_MODULE_NAME hif +#include +#include "hif_main.h" +#include "ce_api.h" +#include "qdf_trace.h" +#include "pld_common.h" +#include "hif_debug.h" +#include "ce_internal.h" +#include "ce_reg.h" +#include "ce_assignment.h" +#include "ce_tasklet.h" +#ifndef CONFIG_WIN +#include "qwlan_version.h" +#endif +#include "qdf_module.h" + +#define CE_POLL_TIMEOUT 10 /* ms */ + +#define AGC_DUMP 1 +#define CHANINFO_DUMP 2 +#define BB_WATCHDOG_DUMP 3 +#ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG +#define PCIE_ACCESS_DUMP 4 +#endif +#include "mp_dev.h" + +#if (defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6290)) && \ + !defined(QCA_WIFI_SUPPORT_SRNG) +#define QCA_WIFI_SUPPORT_SRNG +#endif + +/* Forward references */ +QDF_STATUS hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info); + +/* + * Fix EV118783, poll to check whether a BMI response comes + * other than waiting for the interruption which may be lost. + */ +/* #define BMI_RSP_POLLING */ +#define BMI_RSP_TO_MILLISEC 1000 + +#ifdef CONFIG_BYPASS_QMI +#define BYPASS_QMI 1 +#else +#define BYPASS_QMI 0 +#endif + +#ifdef CONFIG_WIN +#if ENABLE_10_4_FW_HDR +#define WDI_IPA_SERVICE_GROUP 5 +#define WDI_IPA_TX_SVC MAKE_SERVICE_ID(WDI_IPA_SERVICE_GROUP, 0) +#define HTT_DATA2_MSG_SVC MAKE_SERVICE_ID(HTT_SERVICE_GROUP, 1) +#define HTT_DATA3_MSG_SVC MAKE_SERVICE_ID(HTT_SERVICE_GROUP, 2) +#endif /* ENABLE_10_4_FW_HDR */ +#endif + +QDF_STATUS hif_post_recv_buffers(struct hif_softc *scn); +static void hif_config_rri_on_ddr(struct hif_softc *scn); + +/** + * hif_target_access_log_dump() - dump access log + * + * dump access log + * + * Return: n/a + */ +#ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG +static void hif_target_access_log_dump(void) +{ + hif_target_dump_access_log(); +} +#endif + + +void hif_trigger_dump(struct hif_opaque_softc *hif_ctx, + uint8_t cmd_id, bool start) +{ + struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); + + switch (cmd_id) { + case AGC_DUMP: + if (start) + priv_start_agc(scn); + else + priv_dump_agc(scn); + break; + case CHANINFO_DUMP: + if (start) + priv_start_cap_chaninfo(scn); + else + priv_dump_chaninfo(scn); + break; + case BB_WATCHDOG_DUMP: + priv_dump_bbwatchdog(scn); + break; +#ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG + case PCIE_ACCESS_DUMP: + hif_target_access_log_dump(); + break; +#endif + default: + HIF_ERROR("%s: Invalid htc dump command", __func__); + break; + } +} + +static void ce_poll_timeout(void *arg) +{ + struct CE_state *CE_state = (struct CE_state *)arg; + + if (CE_state->timer_inited) { + ce_per_engine_service(CE_state->scn, CE_state->id); + qdf_timer_mod(&CE_state->poll_timer, CE_POLL_TIMEOUT); + } +} + +static unsigned int roundup_pwr2(unsigned int n) +{ + int i; + unsigned int test_pwr2; + + if (!(n & (n - 1))) + return n; /* already a power of 2 */ + + test_pwr2 = 4; + for (i = 0; i < 29; i++) { + if (test_pwr2 > n) + return test_pwr2; + test_pwr2 = test_pwr2 << 1; + } + + QDF_ASSERT(0); /* n too large */ + return 0; +} + +#define ADRASTEA_SRC_WR_INDEX_OFFSET 0x3C +#define ADRASTEA_DST_WR_INDEX_OFFSET 0x40 + +static struct shadow_reg_cfg target_shadow_reg_cfg_map[] = { + { 0, ADRASTEA_SRC_WR_INDEX_OFFSET}, + { 3, ADRASTEA_SRC_WR_INDEX_OFFSET}, + { 4, ADRASTEA_SRC_WR_INDEX_OFFSET}, + { 5, ADRASTEA_SRC_WR_INDEX_OFFSET}, + { 7, ADRASTEA_SRC_WR_INDEX_OFFSET}, + { 1, ADRASTEA_DST_WR_INDEX_OFFSET}, + { 2, ADRASTEA_DST_WR_INDEX_OFFSET}, + { 7, ADRASTEA_DST_WR_INDEX_OFFSET}, + { 8, ADRASTEA_DST_WR_INDEX_OFFSET}, +#ifdef QCA_WIFI_3_0_ADRASTEA + { 9, ADRASTEA_DST_WR_INDEX_OFFSET}, + { 10, ADRASTEA_DST_WR_INDEX_OFFSET}, + { 11, ADRASTEA_DST_WR_INDEX_OFFSET}, +#endif +}; + +#ifdef WLAN_FEATURE_EPPING +static struct shadow_reg_cfg target_shadow_reg_cfg_epping[] = { + { 0, ADRASTEA_SRC_WR_INDEX_OFFSET}, + { 3, ADRASTEA_SRC_WR_INDEX_OFFSET}, + { 4, ADRASTEA_SRC_WR_INDEX_OFFSET}, + { 7, ADRASTEA_SRC_WR_INDEX_OFFSET}, + { 1, ADRASTEA_DST_WR_INDEX_OFFSET}, + { 2, ADRASTEA_DST_WR_INDEX_OFFSET}, + { 5, ADRASTEA_DST_WR_INDEX_OFFSET}, + { 7, ADRASTEA_DST_WR_INDEX_OFFSET}, + { 8, ADRASTEA_DST_WR_INDEX_OFFSET}, +}; +#endif + +/* CE_PCI TABLE */ +/* + * NOTE: the table below is out of date, though still a useful reference. + * Refer to target_service_to_ce_map and hif_map_service_to_pipe for the actual + * mapping of HTC services to HIF pipes. + */ +/* + * This authoritative table defines Copy Engine configuration and the mapping + * of services/endpoints to CEs. A subset of this information is passed to + * the Target during startup as a prerequisite to entering BMI phase. + * See: + * target_service_to_ce_map - Target-side mapping + * hif_map_service_to_pipe - Host-side mapping + * target_ce_config - Target-side configuration + * host_ce_config - Host-side configuration + ============================================================================ + Purpose | Service / Endpoint | CE | Dire | Xfer | Xfer + | | | ctio | Size | Frequency + | | | n | | + ============================================================================ + tx | HTT_DATA (downlink) | CE 0 | h->t | medium - | very frequent + descriptor | | | | O(100B) | and regular + download | | | | | + ---------------------------------------------------------------------------- + rx | HTT_DATA (uplink) | CE 1 | t->h | small - | frequent and + indication | | | | O(10B) | regular + upload | | | | | + ---------------------------------------------------------------------------- + MSDU | DATA_BK (uplink) | CE 2 | t->h | large - | rare + upload | | | | O(1000B) | (frequent + e.g. noise | | | | | during IP1.0 + packets | | | | | testing) + ---------------------------------------------------------------------------- + MSDU | DATA_BK (downlink) | CE 3 | h->t | large - | very rare + download | | | | O(1000B) | (frequent + e.g. | | | | | during IP1.0 + misdirecte | | | | | testing) + d EAPOL | | | | | + packets | | | | | + ---------------------------------------------------------------------------- + n/a | DATA_BE, DATA_VI | CE 2 | t->h | | never(?) + | DATA_VO (uplink) | | | | + ---------------------------------------------------------------------------- + n/a | DATA_BE, DATA_VI | CE 3 | h->t | | never(?) + | DATA_VO (downlink) | | | | + ---------------------------------------------------------------------------- + WMI events | WMI_CONTROL (uplink) | CE 4 | t->h | medium - | infrequent + | | | | O(100B) | + ---------------------------------------------------------------------------- + WMI | WMI_CONTROL | CE 5 | h->t | medium - | infrequent + messages | (downlink) | | | O(100B) | + | | | | | + ---------------------------------------------------------------------------- + n/a | HTC_CTRL_RSVD, | CE 1 | t->h | | never(?) + | HTC_RAW_STREAMS | | | | + | (uplink) | | | | + ---------------------------------------------------------------------------- + n/a | HTC_CTRL_RSVD, | CE 0 | h->t | | never(?) + | HTC_RAW_STREAMS | | | | + | (downlink) | | | | + ---------------------------------------------------------------------------- + diag | none (raw CE) | CE 7 | t<>h | 4 | Diag Window + | | | | | infrequent + ============================================================================ + */ + +/* + * Map from service/endpoint to Copy Engine. + * This table is derived from the CE_PCI TABLE, above. + * It is passed to the Target at startup for use by firmware. + */ +static struct service_to_pipe target_service_to_ce_map_wlan[] = { + { + WMI_DATA_VO_SVC, + PIPEDIR_OUT, /* out = UL = host -> target */ + 3, + }, + { + WMI_DATA_VO_SVC, + PIPEDIR_IN, /* in = DL = target -> host */ + 2, + }, + { + WMI_DATA_BK_SVC, + PIPEDIR_OUT, /* out = UL = host -> target */ + 3, + }, + { + WMI_DATA_BK_SVC, + PIPEDIR_IN, /* in = DL = target -> host */ + 2, + }, + { + WMI_DATA_BE_SVC, + PIPEDIR_OUT, /* out = UL = host -> target */ + 3, + }, + { + WMI_DATA_BE_SVC, + PIPEDIR_IN, /* in = DL = target -> host */ + 2, + }, + { + WMI_DATA_VI_SVC, + PIPEDIR_OUT, /* out = UL = host -> target */ + 3, + }, + { + WMI_DATA_VI_SVC, + PIPEDIR_IN, /* in = DL = target -> host */ + 2, + }, + { + WMI_CONTROL_SVC, + PIPEDIR_OUT, /* out = UL = host -> target */ + 3, + }, + { + WMI_CONTROL_SVC, + PIPEDIR_IN, /* in = DL = target -> host */ + 2, + }, + { + HTC_CTRL_RSVD_SVC, + PIPEDIR_OUT, /* out = UL = host -> target */ + 0, /* could be moved to 3 (share with WMI) */ + }, + { + HTC_CTRL_RSVD_SVC, + PIPEDIR_IN, /* in = DL = target -> host */ + 2, + }, + { + HTC_RAW_STREAMS_SVC, /* not currently used */ + PIPEDIR_OUT, /* out = UL = host -> target */ + 0, + }, + { + HTC_RAW_STREAMS_SVC, /* not currently used */ + PIPEDIR_IN, /* in = DL = target -> host */ + 2, + }, + { + HTT_DATA_MSG_SVC, + PIPEDIR_OUT, /* out = UL = host -> target */ + 4, + }, + { + HTT_DATA_MSG_SVC, + PIPEDIR_IN, /* in = DL = target -> host */ + 1, + }, + { + WDI_IPA_TX_SVC, + PIPEDIR_OUT, /* in = DL = target -> host */ + 5, + }, +#if defined(QCA_WIFI_3_0_ADRASTEA) + { + HTT_DATA2_MSG_SVC, + PIPEDIR_IN, /* in = DL = target -> host */ + 9, + }, + { + HTT_DATA3_MSG_SVC, + PIPEDIR_IN, /* in = DL = target -> host */ + 10, + }, + { + PACKET_LOG_SVC, + PIPEDIR_IN, /* in = DL = target -> host */ + 11, + }, +#endif + /* (Additions here) */ + + { /* Must be last */ + 0, + 0, + 0, + }, +}; + +/* PIPEDIR_OUT = HOST to Target */ +/* PIPEDIR_IN = TARGET to HOST */ +#if (defined(QCA_WIFI_QCA8074)) +static struct service_to_pipe target_service_to_ce_map_qca8074[] = { + { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, }, + { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, }, + { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, }, + { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, }, + { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, }, + { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, }, + { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, }, + { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, }, + { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, }, + { WMI_CONTROL_SVC, PIPEDIR_IN, 2, }, + { WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7}, + { WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2}, + { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, }, + { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, }, + { HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0}, + { HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 }, + { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, }, + { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, }, + { PACKET_LOG_SVC, PIPEDIR_IN, 5, }, + /* (Additions here) */ + { 0, 0, 0, }, +}; +#else +static struct service_to_pipe target_service_to_ce_map_qca8074[] = { +}; +#endif + +#if (defined(QCA_WIFI_QCA6290)) +#ifdef CONFIG_WIN +static struct service_to_pipe target_service_to_ce_map_qca6290[] = { + { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, }, + { WMI_DATA_VO_SVC, PIPEDIR_IN , 2, }, + { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, }, + { WMI_DATA_BK_SVC, PIPEDIR_IN , 2, }, + { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, }, + { WMI_DATA_BE_SVC, PIPEDIR_IN , 2, }, + { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, }, + { WMI_DATA_VI_SVC, PIPEDIR_IN , 2, }, + { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, }, + { WMI_CONTROL_SVC, PIPEDIR_IN , 2, }, + { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, }, + { HTC_CTRL_RSVD_SVC, PIPEDIR_IN , 2, }, + { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, }, + { HTT_DATA_MSG_SVC, PIPEDIR_IN , 1, }, + { WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7}, + { WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2}, + { PACKET_LOG_SVC, PIPEDIR_IN, 5, }, + /* (Additions here) */ + { 0, 0, 0, }, +}; +#else +static struct service_to_pipe target_service_to_ce_map_qca6290[] = { + { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, }, + { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, }, + { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, }, + { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, }, + { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, }, + { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, }, + { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, }, + { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, }, + { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, }, + { WMI_CONTROL_SVC, PIPEDIR_IN, 2, }, + { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, }, + { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, }, + { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, }, + { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, }, + /* (Additions here) */ + { 0, 0, 0, }, +}; +#endif +#else +static struct service_to_pipe target_service_to_ce_map_qca6290[] = { +}; +#endif + +static struct service_to_pipe target_service_to_ce_map_ar900b[] = { + { + WMI_DATA_VO_SVC, + PIPEDIR_OUT, /* out = UL = host -> target */ + 3, + }, + { + WMI_DATA_VO_SVC, + PIPEDIR_IN, /* in = DL = target -> host */ + 2, + }, + { + WMI_DATA_BK_SVC, + PIPEDIR_OUT, /* out = UL = host -> target */ + 3, + }, + { + WMI_DATA_BK_SVC, + PIPEDIR_IN, /* in = DL = target -> host */ + 2, + }, + { + WMI_DATA_BE_SVC, + PIPEDIR_OUT, /* out = UL = host -> target */ + 3, + }, + { + WMI_DATA_BE_SVC, + PIPEDIR_IN, /* in = DL = target -> host */ + 2, + }, + { + WMI_DATA_VI_SVC, + PIPEDIR_OUT, /* out = UL = host -> target */ + 3, + }, + { + WMI_DATA_VI_SVC, + PIPEDIR_IN, /* in = DL = target -> host */ + 2, + }, + { + WMI_CONTROL_SVC, + PIPEDIR_OUT, /* out = UL = host -> target */ + 3, + }, + { + WMI_CONTROL_SVC, + PIPEDIR_IN, /* in = DL = target -> host */ + 2, + }, + { + HTC_CTRL_RSVD_SVC, + PIPEDIR_OUT, /* out = UL = host -> target */ + 0, /* could be moved to 3 (share with WMI) */ + }, + { + HTC_CTRL_RSVD_SVC, + PIPEDIR_IN, /* in = DL = target -> host */ + 1, + }, + { + HTC_RAW_STREAMS_SVC, /* not currently used */ + PIPEDIR_OUT, /* out = UL = host -> target */ + 0, + }, + { + HTC_RAW_STREAMS_SVC, /* not currently used */ + PIPEDIR_IN, /* in = DL = target -> host */ + 1, + }, + { + HTT_DATA_MSG_SVC, + PIPEDIR_OUT, /* out = UL = host -> target */ + 4, + }, +#ifdef WLAN_FEATURE_FASTPATH + { + HTT_DATA_MSG_SVC, + PIPEDIR_IN, /* in = DL = target -> host */ + 5, + }, +#else /* WLAN_FEATURE_FASTPATH */ + { + HTT_DATA_MSG_SVC, + PIPEDIR_IN, /* in = DL = target -> host */ + 1, + }, +#endif /* WLAN_FEATURE_FASTPATH */ + + /* (Additions here) */ + + { /* Must be last */ + 0, + 0, + 0, + }, +}; + +static struct shadow_reg_cfg *target_shadow_reg_cfg = target_shadow_reg_cfg_map; +static int shadow_cfg_sz = sizeof(target_shadow_reg_cfg_map); + +#ifdef WLAN_FEATURE_EPPING +static struct service_to_pipe target_service_to_ce_map_wlan_epping[] = { + {WMI_DATA_VO_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */ + {WMI_DATA_VO_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */ + {WMI_DATA_BK_SVC, PIPEDIR_OUT, 4,}, /* out = UL = host -> target */ + {WMI_DATA_BK_SVC, PIPEDIR_IN, 1,}, /* in = DL = target -> host */ + {WMI_DATA_BE_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */ + {WMI_DATA_BE_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */ + {WMI_DATA_VI_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */ + {WMI_DATA_VI_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */ + {WMI_CONTROL_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */ + {WMI_CONTROL_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */ + {HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0,}, /* out = UL = host -> target */ + {HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */ + {HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0,}, /* out = UL = host -> target */ + {HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */ + {HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4,}, /* out = UL = host -> target */ + {HTT_DATA_MSG_SVC, PIPEDIR_IN, 1,}, /* in = DL = target -> host */ + {0, 0, 0,}, /* Must be last */ +}; + +void hif_select_epping_service_to_pipe_map(struct service_to_pipe + **tgt_svc_map_to_use, + uint32_t *sz_tgt_svc_map_to_use) +{ + *tgt_svc_map_to_use = target_service_to_ce_map_wlan_epping; + *sz_tgt_svc_map_to_use = + sizeof(target_service_to_ce_map_wlan_epping); +} +#endif + +static void hif_select_service_to_pipe_map(struct hif_softc *scn, + struct service_to_pipe **tgt_svc_map_to_use, + uint32_t *sz_tgt_svc_map_to_use) +{ + uint32_t mode = hif_get_conparam(scn); + struct hif_target_info *tgt_info = &scn->target_info; + + if (QDF_IS_EPPING_ENABLED(mode)) { + hif_select_epping_service_to_pipe_map(tgt_svc_map_to_use, + sz_tgt_svc_map_to_use); + } else { + switch (tgt_info->target_type) { + default: + *tgt_svc_map_to_use = target_service_to_ce_map_wlan; + *sz_tgt_svc_map_to_use = + sizeof(target_service_to_ce_map_wlan); + break; + case TARGET_TYPE_AR900B: + case TARGET_TYPE_QCA9984: + case TARGET_TYPE_IPQ4019: + case TARGET_TYPE_QCA9888: + case TARGET_TYPE_AR9888: + case TARGET_TYPE_AR9888V2: + *tgt_svc_map_to_use = target_service_to_ce_map_ar900b; + *sz_tgt_svc_map_to_use = + sizeof(target_service_to_ce_map_ar900b); + break; + case TARGET_TYPE_QCA6290: + *tgt_svc_map_to_use = target_service_to_ce_map_qca6290; + *sz_tgt_svc_map_to_use = + sizeof(target_service_to_ce_map_qca6290); + break; + case TARGET_TYPE_QCA8074: + *tgt_svc_map_to_use = target_service_to_ce_map_qca8074; + *sz_tgt_svc_map_to_use = + sizeof(target_service_to_ce_map_qca8074); + break; + } + } +} + +/** + * ce_mark_datapath() - marks the ce_state->htt_rx_data accordingly + * @ce_state : pointer to the state context of the CE + * + * Description: + * Sets htt_rx_data attribute of the state structure if the + * CE serves one of the HTT DATA services. + * + * Return: + * false (attribute set to false) + * true (attribute set to true); + */ +static bool ce_mark_datapath(struct CE_state *ce_state) +{ + struct service_to_pipe *svc_map; + uint32_t map_sz, map_len; + int i; + bool rc = false; + + if (ce_state != NULL) { + hif_select_service_to_pipe_map(ce_state->scn, &svc_map, + &map_sz); + + map_len = map_sz / sizeof(struct service_to_pipe); + for (i = 0; i < map_len; i++) { + if ((svc_map[i].pipenum == ce_state->id) && + ((svc_map[i].service_id == HTT_DATA_MSG_SVC) || + (svc_map[i].service_id == HTT_DATA2_MSG_SVC) || + (svc_map[i].service_id == HTT_DATA3_MSG_SVC))) { + /* HTT CEs are unidirectional */ + if (svc_map[i].pipedir == PIPEDIR_IN) + ce_state->htt_rx_data = true; + else + ce_state->htt_tx_data = true; + rc = true; + } + } + } + return rc; +} + +/** + * ce_ring_test_initial_indexes() - tests the initial ce ring indexes + * @ce_id: ce in question + * @ring: ring state being examined + * @type: "src_ring" or "dest_ring" string for identifying the ring + * + * Warns on non-zero index values. + * Causes a kernel panic if the ring is not empty durring initialization. + */ +static void ce_ring_test_initial_indexes(int ce_id, struct CE_ring_state *ring, + char *type) +{ + if (ring->write_index != 0 || ring->sw_index != 0) + HIF_ERROR("ce %d, %s, initial sw_index = %d, initial write_index =%d", + ce_id, type, ring->sw_index, ring->write_index); + if (ring->write_index != ring->sw_index) + QDF_BUG(0); +} + +#ifdef IPA_OFFLOAD +/** + * ce_alloc_desc_ring() - Allocate copyengine descriptor ring + * @scn: softc instance + * @ce_id: ce in question + * @base_addr: pointer to copyengine ring base address + * @ce_ring: copyengine instance + * @nentries: number of entries should be allocated + * @desc_size: ce desc size + * + * Return: QDF_STATUS_SUCCESS - for success + */ +static QDF_STATUS ce_alloc_desc_ring(struct hif_softc *scn, unsigned int CE_id, + qdf_dma_addr_t *base_addr, + struct CE_ring_state *ce_ring, + unsigned int nentries, uint32_t desc_size) +{ + if (CE_id == HIF_PCI_IPA_UC_ASSIGNED_CE) { + scn->ipa_ce_ring = qdf_mem_shared_mem_alloc(scn->qdf_dev, + nentries * desc_size + CE_DESC_RING_ALIGN); + if (!scn->ipa_ce_ring) { + HIF_ERROR("%s: Failed to allocate memory for IPA ce ring", + __func__); + return QDF_STATUS_E_NOMEM; + } + *base_addr = qdf_mem_get_dma_addr(scn->qdf_dev, + &scn->ipa_ce_ring->mem_info); + ce_ring->base_addr_owner_space_unaligned = + scn->ipa_ce_ring->vaddr; + } else { + ce_ring->base_addr_owner_space_unaligned = + qdf_mem_alloc_consistent(scn->qdf_dev, + scn->qdf_dev->dev, + (nentries * desc_size + + CE_DESC_RING_ALIGN), + base_addr); + if (!ce_ring->base_addr_owner_space_unaligned) { + HIF_ERROR("%s: Failed to allocate DMA memory for ce ring id : %u", + __func__, CE_id); + return QDF_STATUS_E_NOMEM; + } + } + return QDF_STATUS_SUCCESS; +} + +/** + * ce_free_desc_ring() - Frees copyengine descriptor ring + * @scn: softc instance + * @ce_id: ce in question + * @ce_ring: copyengine instance + * @desc_size: ce desc size + * + * Return: None + */ +static void ce_free_desc_ring(struct hif_softc *scn, unsigned int CE_id, + struct CE_ring_state *ce_ring, uint32_t desc_size) +{ + if (CE_id == HIF_PCI_IPA_UC_ASSIGNED_CE) { + qdf_mem_shared_mem_free(scn->qdf_dev, + scn->ipa_ce_ring); + ce_ring->base_addr_owner_space_unaligned = NULL; + } else { + qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev, + ce_ring->nentries * desc_size + CE_DESC_RING_ALIGN, + ce_ring->base_addr_owner_space_unaligned, + ce_ring->base_addr_CE_space, 0); + ce_ring->base_addr_owner_space_unaligned = NULL; + } +} +#else +static QDF_STATUS ce_alloc_desc_ring(struct hif_softc *scn, unsigned int CE_id, + qdf_dma_addr_t *base_addr, + struct CE_ring_state *ce_ring, + unsigned int nentries, uint32_t desc_size) +{ + ce_ring->base_addr_owner_space_unaligned = + qdf_mem_alloc_consistent(scn->qdf_dev, scn->qdf_dev->dev, + (nentries * desc_size + + CE_DESC_RING_ALIGN), base_addr); + if (!ce_ring->base_addr_owner_space_unaligned) { + HIF_ERROR("%s: Failed to allocate DMA memory for ce ring id : %u", + __func__, CE_id); + return QDF_STATUS_E_NOMEM; + } + return QDF_STATUS_SUCCESS; +} + +static void ce_free_desc_ring(struct hif_softc *scn, unsigned int CE_id, + struct CE_ring_state *ce_ring, uint32_t desc_size) +{ + qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev, + ce_ring->nentries * desc_size + CE_DESC_RING_ALIGN, + ce_ring->base_addr_owner_space_unaligned, + ce_ring->base_addr_CE_space, 0); + ce_ring->base_addr_owner_space_unaligned = NULL; +} +#endif /* IPA_OFFLOAD */ + +/** + * ce_srng_based() - Does this target use srng + * @ce_state : pointer to the state context of the CE + * + * Description: + * returns true if the target is SRNG based + * + * Return: + * false (attribute set to false) + * true (attribute set to true); + */ +bool ce_srng_based(struct hif_softc *scn) +{ + struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn); + struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl); + + switch (tgt_info->target_type) { + case TARGET_TYPE_QCA8074: + case TARGET_TYPE_QCA6290: + return true; + default: + return false; + } + return false; +} +qdf_export_symbol(ce_srng_based); + +#ifdef QCA_WIFI_SUPPORT_SRNG +static struct ce_ops *ce_services_attach(struct hif_softc *scn) +{ + if (ce_srng_based(scn)) + return ce_services_srng(); + + return ce_services_legacy(); +} + + +#else /* QCA_LITHIUM */ +static struct ce_ops *ce_services_attach(struct hif_softc *scn) +{ + return ce_services_legacy(); +} +#endif /* QCA_LITHIUM */ + +static void hif_prepare_hal_shadow_register_cfg(struct hif_softc *scn, + struct pld_shadow_reg_v2_cfg **shadow_config, + int *num_shadow_registers_configured) { + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); + + return hif_state->ce_services->ce_prepare_shadow_register_v2_cfg( + scn, shadow_config, num_shadow_registers_configured); +} + +static inline uint32_t ce_get_desc_size(struct hif_softc *scn, + uint8_t ring_type) +{ + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); + + return hif_state->ce_services->ce_get_desc_size(ring_type); +} + + +static struct CE_ring_state *ce_alloc_ring_state(struct CE_state *CE_state, + uint8_t ring_type, uint32_t nentries) +{ + uint32_t ce_nbytes; + char *ptr; + qdf_dma_addr_t base_addr; + struct CE_ring_state *ce_ring; + uint32_t desc_size; + struct hif_softc *scn = CE_state->scn; + + ce_nbytes = sizeof(struct CE_ring_state) + + (nentries * sizeof(void *)); + ptr = qdf_mem_malloc(ce_nbytes); + if (!ptr) + return NULL; + + ce_ring = (struct CE_ring_state *)ptr; + ptr += sizeof(struct CE_ring_state); + ce_ring->nentries = nentries; + ce_ring->nentries_mask = nentries - 1; + + ce_ring->low_water_mark_nentries = 0; + ce_ring->high_water_mark_nentries = nentries; + ce_ring->per_transfer_context = (void **)ptr; + + desc_size = ce_get_desc_size(scn, ring_type); + + /* Legacy platforms that do not support cache + * coherent DMA are unsupported + */ + if (ce_alloc_desc_ring(scn, CE_state->id, &base_addr, + ce_ring, nentries, + desc_size) != + QDF_STATUS_SUCCESS) { + HIF_ERROR("%s: ring has no DMA mem", + __func__); + qdf_mem_free(ce_ring); + return NULL; + } + ce_ring->base_addr_CE_space_unaligned = base_addr; + + /* Correctly initialize memory to 0 to + * prevent garbage data crashing system + * when download firmware + */ + qdf_mem_zero(ce_ring->base_addr_owner_space_unaligned, + nentries * desc_size + + CE_DESC_RING_ALIGN); + + if (ce_ring->base_addr_CE_space_unaligned & (CE_DESC_RING_ALIGN - 1)) { + + ce_ring->base_addr_CE_space = + (ce_ring->base_addr_CE_space_unaligned + + CE_DESC_RING_ALIGN - 1) & ~(CE_DESC_RING_ALIGN - 1); + + ce_ring->base_addr_owner_space = (void *) + (((size_t) ce_ring->base_addr_owner_space_unaligned + + CE_DESC_RING_ALIGN - 1) & ~(CE_DESC_RING_ALIGN - 1)); + } else { + ce_ring->base_addr_CE_space = + ce_ring->base_addr_CE_space_unaligned; + ce_ring->base_addr_owner_space = + ce_ring->base_addr_owner_space_unaligned; + } + + return ce_ring; +} + +static int ce_ring_setup(struct hif_softc *scn, uint8_t ring_type, + uint32_t ce_id, struct CE_ring_state *ring, + struct CE_attr *attr) +{ + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); + + return hif_state->ce_services->ce_ring_setup(scn, ring_type, ce_id, + ring, attr); +} + +int hif_ce_bus_early_suspend(struct hif_softc *scn) +{ + uint8_t ul_pipe, dl_pipe; + int ce_id, status, ul_is_polled, dl_is_polled; + struct CE_state *ce_state; + + status = hif_map_service_to_pipe(&scn->osc, WMI_CONTROL_SVC, + &ul_pipe, &dl_pipe, + &ul_is_polled, &dl_is_polled); + if (status) { + HIF_ERROR("%s: pipe_mapping failure", __func__); + return status; + } + + for (ce_id = 0; ce_id < scn->ce_count; ce_id++) { + if (ce_id == ul_pipe) + continue; + if (ce_id == dl_pipe) + continue; + + ce_state = scn->ce_id_to_state[ce_id]; + qdf_spin_lock_bh(&ce_state->ce_index_lock); + if (ce_state->state == CE_RUNNING) + ce_state->state = CE_PAUSED; + qdf_spin_unlock_bh(&ce_state->ce_index_lock); + } + + return status; +} + +int hif_ce_bus_late_resume(struct hif_softc *scn) +{ + int ce_id; + struct CE_state *ce_state; + int write_index; + bool index_updated; + + for (ce_id = 0; ce_id < scn->ce_count; ce_id++) { + ce_state = scn->ce_id_to_state[ce_id]; + qdf_spin_lock_bh(&ce_state->ce_index_lock); + if (ce_state->state == CE_PENDING) { + write_index = ce_state->src_ring->write_index; + CE_SRC_RING_WRITE_IDX_SET(scn, ce_state->ctrl_addr, + write_index); + ce_state->state = CE_RUNNING; + index_updated = true; + } else { + index_updated = false; + } + + if (ce_state->state == CE_PAUSED) + ce_state->state = CE_RUNNING; + qdf_spin_unlock_bh(&ce_state->ce_index_lock); + + if (index_updated) + hif_record_ce_desc_event(scn, ce_id, + RESUME_WRITE_INDEX_UPDATE, + NULL, NULL, write_index, 0); + } + + return 0; +} + +/** + * ce_oom_recovery() - try to recover rx ce from oom condition + * @context: CE_state of the CE with oom rx ring + * + * the executing work Will continue to be rescheduled until + * at least 1 descriptor is successfully posted to the rx ring. + * + * return: none + */ +static void ce_oom_recovery(void *context) +{ + struct CE_state *ce_state = context; + struct hif_softc *scn = ce_state->scn; + struct HIF_CE_state *ce_softc = HIF_GET_CE_STATE(scn); + struct HIF_CE_pipe_info *pipe_info = + &ce_softc->pipe_info[ce_state->id]; + + hif_post_recv_buffers_for_pipe(pipe_info); +} + +#if HIF_CE_DEBUG_DATA_BUF +/** + * alloc_mem_ce_debug_hist_data() - Allocate mem for the data pointed by + * the CE descriptors. + * Allocate HIF_CE_HISTORY_MAX records by CE_DEBUG_MAX_DATA_BUF_SIZE + * @scn: hif scn handle + * ce_id: Copy Engine Id + * + * Return: QDF_STATUS + */ +QDF_STATUS alloc_mem_ce_debug_hist_data(struct hif_softc *scn, uint32_t ce_id) +{ + struct hif_ce_desc_event *event = NULL; + struct hif_ce_desc_event *hist_ev = NULL; + uint32_t index = 0; + + hist_ev = + (struct hif_ce_desc_event *)scn->hif_ce_desc_hist.hist_ev[ce_id]; + + if (!hist_ev) + return QDF_STATUS_E_NOMEM; + + for (index = 0; index < HIF_CE_HISTORY_MAX; index++) { + event = &hist_ev[index]; + event->data = + (uint8_t *)qdf_mem_malloc(CE_DEBUG_MAX_DATA_BUF_SIZE); + if (event->data == NULL) + return QDF_STATUS_E_NOMEM; + } + return QDF_STATUS_SUCCESS; +} + +/** + * free_mem_ce_debug_hist_data() - Free mem of the data pointed by + * the CE descriptors. + * @scn: hif scn handle + * ce_id: Copy Engine Id + * + * Return: + */ +void free_mem_ce_debug_hist_data(struct hif_softc *scn, uint32_t ce_id) +{ + struct hif_ce_desc_event *event = NULL; + struct hif_ce_desc_event *hist_ev = NULL; + uint32_t index = 0; + + hist_ev = + (struct hif_ce_desc_event *)scn->hif_ce_desc_hist.hist_ev[ce_id]; + + if (!hist_ev) + return; + + for (index = 0; index < HIF_CE_HISTORY_MAX; index++) { + event = &hist_ev[index]; + if (event->data != NULL) + qdf_mem_free(event->data); + event->data = NULL; + event = NULL; + } +} +#endif /* HIF_CE_DEBUG_DATA_BUF */ + +#if defined(HIF_CONFIG_SLUB_DEBUG_ON) /* MCL */ +struct hif_ce_desc_event hif_ce_desc_history[CE_COUNT_MAX][HIF_CE_HISTORY_MAX]; + +/** + * alloc_mem_ce_debug_history() - Allocate CE descriptor history + * @scn: hif scn handle + * @ce_id: Copy Engine Id + * + * Return: QDF_STATUS + */ +static QDF_STATUS +alloc_mem_ce_debug_history(struct hif_softc *scn, unsigned int ce_id) +{ + struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist; + + ce_hist->hist_ev[ce_id] = hif_ce_desc_history[ce_id]; + ce_hist->enable[ce_id] = 1; + + return QDF_STATUS_SUCCESS; +} + +/** + * free_mem_ce_debug_history() - Free CE descriptor history + * @scn: hif scn handle + * @ce_id: Copy Engine Id + * + * Return: None + */ +static void free_mem_ce_debug_history(struct hif_softc *scn, unsigned int ce_id) +{ + struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist; + + ce_hist->enable[ce_id] = 0; + ce_hist->hist_ev[ce_id] = NULL; +} + +#elif HIF_CE_DEBUG_DATA_BUF /* WIN */ + +static QDF_STATUS +alloc_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id) +{ + scn->hif_ce_desc_hist.hist_ev[CE_id] = (struct hif_ce_desc_event *) + qdf_mem_malloc(HIF_CE_HISTORY_MAX * sizeof(struct hif_ce_desc_event)); + + if (scn->hif_ce_desc_hist.hist_ev[CE_id] == NULL) { + scn->hif_ce_desc_hist.enable[CE_id] = 0; + return QDF_STATUS_E_NOMEM; + } else { + scn->hif_ce_desc_hist.enable[CE_id] = 1; + return QDF_STATUS_SUCCESS; + } +} + +static void free_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id) +{ + struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist; + struct hif_ce_desc_event *hist_ev = ce_hist->hist_ev[CE_id]; + + if (!hist_ev) + return; + + if (ce_hist->data_enable[CE_id] == 1) { + ce_hist->data_enable[CE_id] = 0; + free_mem_ce_debug_hist_data(scn, CE_id); + } + + ce_hist->enable[CE_id] = 0; + qdf_mem_free(ce_hist->hist_ev[CE_id]); + ce_hist->hist_ev[CE_id] = NULL; +} + +#else /* Disabled */ + +static inline QDF_STATUS +alloc_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id) +{ + return QDF_STATUS_SUCCESS; +} + +static inline void +free_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id) { } +#endif + +#if defined(HIF_CONFIG_SLUB_DEBUG_ON) || HIF_CE_DEBUG_DATA_BUF +/** + * reset_ce_debug_history() - reset the index and ce id used for dumping the + * CE records on the console using sysfs. + * @scn: hif scn handle + * + * Return: + */ +static inline void reset_ce_debug_history(struct hif_softc *scn) +{ + struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist; + /* Initialise the CE debug history sysfs interface inputs ce_id and + * index. Disable data storing + */ + ce_hist->hist_index = 0; + ce_hist->hist_id = 0; +} +#else /* defined(HIF_CONFIG_SLUB_DEBUG_ON) || HIF_CE_DEBUG_DATA_BUF */ +static inline void reset_ce_debug_history(struct hif_softc *scn) { } +#endif /* defined(HIF_CONFIG_SLUB_DEBUG_ON) || HIF_CE_DEBUG_DATA_BUF */ + +/* + * Initialize a Copy Engine based on caller-supplied attributes. + * This may be called once to initialize both source and destination + * rings or it may be called twice for separate source and destination + * initialization. It may be that only one side or the other is + * initialized by software/firmware. + * + * This should be called durring the initialization sequence before + * interupts are enabled, so we don't have to worry about thread safety. + */ +struct CE_handle *ce_init(struct hif_softc *scn, + unsigned int CE_id, struct CE_attr *attr) +{ + struct CE_state *CE_state; + uint32_t ctrl_addr; + unsigned int nentries; + bool malloc_CE_state = false; + bool malloc_src_ring = false; + int status; + + QDF_ASSERT(CE_id < scn->ce_count); + ctrl_addr = CE_BASE_ADDRESS(CE_id); + CE_state = scn->ce_id_to_state[CE_id]; + + if (!CE_state) { + CE_state = + (struct CE_state *)qdf_mem_malloc(sizeof(*CE_state)); + if (!CE_state) { + HIF_ERROR("%s: CE_state has no mem", __func__); + return NULL; + } + malloc_CE_state = true; + qdf_spinlock_create(&CE_state->ce_index_lock); + + CE_state->id = CE_id; + CE_state->ctrl_addr = ctrl_addr; + CE_state->state = CE_RUNNING; + CE_state->attr_flags = attr->flags; + } + CE_state->scn = scn; + + qdf_atomic_init(&CE_state->rx_pending); + if (attr == NULL) { + /* Already initialized; caller wants the handle */ + return (struct CE_handle *)CE_state; + } + + if (CE_state->src_sz_max) + QDF_ASSERT(CE_state->src_sz_max == attr->src_sz_max); + else + CE_state->src_sz_max = attr->src_sz_max; + + ce_init_ce_desc_event_log(scn, CE_id, + attr->src_nentries + attr->dest_nentries); + + /* source ring setup */ + nentries = attr->src_nentries; + if (nentries) { + struct CE_ring_state *src_ring; + + nentries = roundup_pwr2(nentries); + if (CE_state->src_ring) { + QDF_ASSERT(CE_state->src_ring->nentries == nentries); + } else { + src_ring = CE_state->src_ring = + ce_alloc_ring_state(CE_state, + CE_RING_SRC, + nentries); + if (!src_ring) { + /* cannot allocate src ring. If the + * CE_state is allocated locally free + * CE_State and return error. + */ + HIF_ERROR("%s: src ring has no mem", __func__); + if (malloc_CE_state) { + /* allocated CE_state locally */ + qdf_mem_free(CE_state); + malloc_CE_state = false; + } + return NULL; + } + /* we can allocate src ring. Mark that the src ring is + * allocated locally + */ + malloc_src_ring = true; + + /* + * Also allocate a shadow src ring in + * regular mem to use for faster access. + */ + src_ring->shadow_base_unaligned = + qdf_mem_malloc(nentries * + sizeof(struct CE_src_desc) + + CE_DESC_RING_ALIGN); + if (src_ring->shadow_base_unaligned == NULL) { + HIF_ERROR("%s: src ring no shadow_base mem", + __func__); + goto error_no_dma_mem; + } + src_ring->shadow_base = (struct CE_src_desc *) + (((size_t) src_ring->shadow_base_unaligned + + CE_DESC_RING_ALIGN - 1) & + ~(CE_DESC_RING_ALIGN - 1)); + + status = ce_ring_setup(scn, CE_RING_SRC, CE_id, + src_ring, attr); + if (status < 0) + goto error_target_access; + + ce_ring_test_initial_indexes(CE_id, src_ring, + "src_ring"); + } + } + + /* destination ring setup */ + nentries = attr->dest_nentries; + if (nentries) { + struct CE_ring_state *dest_ring; + + nentries = roundup_pwr2(nentries); + if (CE_state->dest_ring) { + QDF_ASSERT(CE_state->dest_ring->nentries == nentries); + } else { + dest_ring = CE_state->dest_ring = + ce_alloc_ring_state(CE_state, + CE_RING_DEST, + nentries); + if (!dest_ring) { + /* cannot allocate dst ring. If the CE_state + * or src ring is allocated locally free + * CE_State and src ring and return error. + */ + HIF_ERROR("%s: dest ring has no mem", + __func__); + goto error_no_dma_mem; + } + + status = ce_ring_setup(scn, CE_RING_DEST, CE_id, + dest_ring, attr); + if (status < 0) + goto error_target_access; + + ce_ring_test_initial_indexes(CE_id, dest_ring, + "dest_ring"); + + /* For srng based target, init status ring here */ + if (ce_srng_based(CE_state->scn)) { + CE_state->status_ring = + ce_alloc_ring_state(CE_state, + CE_RING_STATUS, + nentries); + if (CE_state->status_ring == NULL) { + /*Allocation failed. Cleanup*/ + qdf_mem_free(CE_state->dest_ring); + if (malloc_src_ring) { + qdf_mem_free + (CE_state->src_ring); + CE_state->src_ring = NULL; + malloc_src_ring = false; + } + if (malloc_CE_state) { + /* allocated CE_state locally */ + scn->ce_id_to_state[CE_id] = + NULL; + qdf_mem_free(CE_state); + malloc_CE_state = false; + } + + return NULL; + } + + status = ce_ring_setup(scn, CE_RING_STATUS, + CE_id, CE_state->status_ring, + attr); + if (status < 0) + goto error_target_access; + + } + + /* epping */ + /* poll timer */ + if ((CE_state->attr_flags & CE_ATTR_ENABLE_POLL) || + scn->polled_mode_on) { + qdf_timer_init(scn->qdf_dev, + &CE_state->poll_timer, + ce_poll_timeout, + CE_state, + QDF_TIMER_TYPE_SW); + CE_state->timer_inited = true; + qdf_timer_mod(&CE_state->poll_timer, + CE_POLL_TIMEOUT); + } + } + } + + if (!ce_srng_based(scn)) { + /* Enable CE error interrupts */ + if (Q_TARGET_ACCESS_BEGIN(scn) < 0) + goto error_target_access; + CE_ERROR_INTR_ENABLE(scn, ctrl_addr); + if (Q_TARGET_ACCESS_END(scn) < 0) + goto error_target_access; + } + + qdf_create_work(scn->qdf_dev, &CE_state->oom_allocation_work, + ce_oom_recovery, CE_state); + + /* update the htt_data attribute */ + ce_mark_datapath(CE_state); + scn->ce_id_to_state[CE_id] = CE_state; + + alloc_mem_ce_debug_history(scn, CE_id); + + return (struct CE_handle *)CE_state; + +error_target_access: +error_no_dma_mem: + ce_fini((struct CE_handle *)CE_state); + return NULL; +} + +#ifdef WLAN_FEATURE_FASTPATH +/** + * hif_enable_fastpath() Update that we have enabled fastpath mode + * @hif_ctx: HIF context + * + * For use in data path + * + * Retrun: void + */ +void hif_enable_fastpath(struct hif_opaque_softc *hif_ctx) +{ + struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); + + if (ce_srng_based(scn)) { + HIF_INFO("%s, srng rings do not support fastpath", __func__); + return; + } + HIF_DBG("%s, Enabling fastpath mode", __func__); + scn->fastpath_mode_on = true; +} + +void hif_enable_polled_mode(struct hif_opaque_softc *hif_ctx) +{ + struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); + HIF_DBG("%s, Enabling polled mode", __func__); + + scn->polled_mode_on = true; +} + +/** + * hif_is_fastpath_mode_enabled - API to query if fasthpath mode is enabled + * @hif_ctx: HIF Context + * + * For use in data path to skip HTC + * + * Return: bool + */ +bool hif_is_fastpath_mode_enabled(struct hif_opaque_softc *hif_ctx) +{ + struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); + + return scn->fastpath_mode_on; +} + +bool hif_is_polled_mode_enabled(struct hif_opaque_softc *hif_ctx) +{ + struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); + + return scn->polled_mode_on; +} + +/** + * hif_get_ce_handle - API to get CE handle for FastPath mode + * @hif_ctx: HIF Context + * @id: CopyEngine Id + * + * API to return CE handle for fastpath mode + * + * Return: void + */ +void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int id) +{ + struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); + + return scn->ce_id_to_state[id]; +} + +/** + * ce_h2t_tx_ce_cleanup() Place holder function for H2T CE cleanup. + * No processing is required inside this function. + * @ce_hdl: Cope engine handle + * Using an assert, this function makes sure that, + * the TX CE has been processed completely. + * + * This is called while dismantling CE structures. No other thread + * should be using these structures while dismantling is occurring + * therfore no locking is needed. + * + * Return: none + */ +void ce_h2t_tx_ce_cleanup(struct CE_handle *ce_hdl) +{ + struct CE_state *ce_state = (struct CE_state *)ce_hdl; + struct CE_ring_state *src_ring = ce_state->src_ring; + struct hif_softc *sc = ce_state->scn; + uint32_t sw_index, write_index; + + if (hif_is_nss_wifi_enabled(sc)) + return; + + if (sc->fastpath_mode_on && ce_state->htt_tx_data) { + HIF_DBG("%s %d Fastpath mode ON, Cleaning up HTT Tx CE", + __func__, __LINE__); + sw_index = src_ring->sw_index; + write_index = src_ring->sw_index; + + /* At this point Tx CE should be clean */ + qdf_assert_always(sw_index == write_index); + } +} + +/** + * ce_t2h_msg_ce_cleanup() - Cleanup buffers on the t2h datapath msg queue. + * @ce_hdl: Handle to CE + * + * These buffers are never allocated on the fly, but + * are allocated only once during HIF start and freed + * only once during HIF stop. + * NOTE: + * The assumption here is there is no in-flight DMA in progress + * currently, so that buffers can be freed up safely. + * + * Return: NONE + */ +void ce_t2h_msg_ce_cleanup(struct CE_handle *ce_hdl) +{ + struct CE_state *ce_state = (struct CE_state *)ce_hdl; + struct CE_ring_state *dst_ring = ce_state->dest_ring; + qdf_nbuf_t nbuf; + int i; + + if (ce_state->scn->fastpath_mode_on == false) + return; + + if (!ce_state->htt_rx_data) + return; + + /* + * when fastpath_mode is on and for datapath CEs. Unlike other CE's, + * this CE is completely full: does not leave one blank space, to + * distinguish between empty queue & full queue. So free all the + * entries. + */ + for (i = 0; i < dst_ring->nentries; i++) { + nbuf = dst_ring->per_transfer_context[i]; + + /* + * The reasons for doing this check are: + * 1) Protect against calling cleanup before allocating buffers + * 2) In a corner case, FASTPATH_mode_on may be set, but we + * could have a partially filled ring, because of a memory + * allocation failure in the middle of allocating ring. + * This check accounts for that case, checking + * fastpath_mode_on flag or started flag would not have + * covered that case. This is not in performance path, + * so OK to do this. + */ + if (nbuf) { + qdf_nbuf_unmap_single(ce_state->scn->qdf_dev, nbuf, + QDF_DMA_FROM_DEVICE); + qdf_nbuf_free(nbuf); + } + } +} + +/** + * hif_update_fastpath_recv_bufs_cnt() - Increments the Rx buf count by 1 + * @scn: HIF handle + * + * Datapath Rx CEs are special case, where we reuse all the message buffers. + * Hence we have to post all the entries in the pipe, even, in the beginning + * unlike for other CE pipes where one less than dest_nentries are filled in + * the beginning. + * + * Return: None + */ +static void hif_update_fastpath_recv_bufs_cnt(struct hif_softc *scn) +{ + int pipe_num; + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); + + if (scn->fastpath_mode_on == false) + return; + + for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) { + struct HIF_CE_pipe_info *pipe_info = + &hif_state->pipe_info[pipe_num]; + struct CE_state *ce_state = + scn->ce_id_to_state[pipe_info->pipe_num]; + + if (ce_state->htt_rx_data) + atomic_inc(&pipe_info->recv_bufs_needed); + } +} +#else +static inline void hif_update_fastpath_recv_bufs_cnt(struct hif_softc *scn) +{ +} + +static inline bool ce_is_fastpath_enabled(struct hif_softc *scn) +{ + return false; +} + +static inline bool ce_is_fastpath_handler_registered(struct CE_state *ce_state) +{ + return false; +} +#endif /* WLAN_FEATURE_FASTPATH */ + +void ce_fini(struct CE_handle *copyeng) +{ + struct CE_state *CE_state = (struct CE_state *)copyeng; + unsigned int CE_id = CE_state->id; + struct hif_softc *scn = CE_state->scn; + uint32_t desc_size; + + bool inited = CE_state->timer_inited; + CE_state->state = CE_UNUSED; + scn->ce_id_to_state[CE_id] = NULL; + /* Set the flag to false first to stop processing in ce_poll_timeout */ + CE_state->timer_inited = false; + qdf_lro_deinit(CE_state->lro_data); + + if (CE_state->src_ring) { + /* Cleanup the datapath Tx ring */ + ce_h2t_tx_ce_cleanup(copyeng); + + desc_size = ce_get_desc_size(scn, CE_RING_SRC); + if (CE_state->src_ring->shadow_base_unaligned) + qdf_mem_free(CE_state->src_ring->shadow_base_unaligned); + if (CE_state->src_ring->base_addr_owner_space_unaligned) + ce_free_desc_ring(scn, CE_state->id, + CE_state->src_ring, + desc_size); + qdf_mem_free(CE_state->src_ring); + } + if (CE_state->dest_ring) { + /* Cleanup the datapath Rx ring */ + ce_t2h_msg_ce_cleanup(copyeng); + + desc_size = ce_get_desc_size(scn, CE_RING_DEST); + if (CE_state->dest_ring->base_addr_owner_space_unaligned) + ce_free_desc_ring(scn, CE_state->id, + CE_state->dest_ring, + desc_size); + qdf_mem_free(CE_state->dest_ring); + + /* epping */ + if (inited) { + qdf_timer_free(&CE_state->poll_timer); + } + } + if ((ce_srng_based(CE_state->scn)) && (CE_state->status_ring)) { + /* Cleanup the datapath Tx ring */ + ce_h2t_tx_ce_cleanup(copyeng); + + if (CE_state->status_ring->shadow_base_unaligned) + qdf_mem_free( + CE_state->status_ring->shadow_base_unaligned); + + desc_size = ce_get_desc_size(scn, CE_RING_STATUS); + if (CE_state->status_ring->base_addr_owner_space_unaligned) + ce_free_desc_ring(scn, CE_state->id, + CE_state->status_ring, + desc_size); + qdf_mem_free(CE_state->status_ring); + } + + free_mem_ce_debug_history(scn, CE_id); + reset_ce_debug_history(scn); + ce_deinit_ce_desc_event_log(scn, CE_id); + + qdf_spinlock_destroy(&CE_state->ce_index_lock); + qdf_mem_free(CE_state); +} + +void hif_detach_htc(struct hif_opaque_softc *hif_ctx) +{ + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx); + + qdf_mem_zero(&hif_state->msg_callbacks_pending, + sizeof(hif_state->msg_callbacks_pending)); + qdf_mem_zero(&hif_state->msg_callbacks_current, + sizeof(hif_state->msg_callbacks_current)); +} + +/* Send the first nbytes bytes of the buffer */ +QDF_STATUS +hif_send_head(struct hif_opaque_softc *hif_ctx, + uint8_t pipe, unsigned int transfer_id, unsigned int nbytes, + qdf_nbuf_t nbuf, unsigned int data_attr) +{ + struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx); + struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]); + struct CE_handle *ce_hdl = pipe_info->ce_hdl; + int bytes = nbytes, nfrags = 0; + struct ce_sendlist sendlist; + int status, i = 0; + unsigned int mux_id = 0; + + QDF_ASSERT(nbytes <= qdf_nbuf_len(nbuf)); + + transfer_id = + (mux_id & MUX_ID_MASK) | + (transfer_id & TRANSACTION_ID_MASK); + data_attr &= DESC_DATA_FLAG_MASK; + /* + * The common case involves sending multiple fragments within a + * single download (the tx descriptor and the tx frame header). + * So, optimize for the case of multiple fragments by not even + * checking whether it's necessary to use a sendlist. + * The overhead of using a sendlist for a single buffer download + * is not a big deal, since it happens rarely (for WMI messages). + */ + ce_sendlist_init(&sendlist); + do { + qdf_dma_addr_t frag_paddr; + int frag_bytes; + + frag_paddr = qdf_nbuf_get_frag_paddr(nbuf, nfrags); + frag_bytes = qdf_nbuf_get_frag_len(nbuf, nfrags); + /* + * Clear the packet offset for all but the first CE desc. + */ + if (i++ > 0) + data_attr &= ~QDF_CE_TX_PKT_OFFSET_BIT_M; + + status = ce_sendlist_buf_add(&sendlist, frag_paddr, + frag_bytes > + bytes ? bytes : frag_bytes, + qdf_nbuf_get_frag_is_wordstream + (nbuf, + nfrags) ? 0 : + CE_SEND_FLAG_SWAP_DISABLE, + data_attr); + if (status != QDF_STATUS_SUCCESS) { + HIF_ERROR("%s: error, frag_num %d larger than limit", + __func__, nfrags); + return status; + } + bytes -= frag_bytes; + nfrags++; + } while (bytes > 0); + + /* Make sure we have resources to handle this request */ + qdf_spin_lock_bh(&pipe_info->completion_freeq_lock); + if (pipe_info->num_sends_allowed < nfrags) { + qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock); + ce_pkt_error_count_incr(hif_state, HIF_PIPE_NO_RESOURCE); + return QDF_STATUS_E_RESOURCES; + } + pipe_info->num_sends_allowed -= nfrags; + qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock); + + if (qdf_unlikely(ce_hdl == NULL)) { + HIF_ERROR("%s: error CE handle is null", __func__); + return A_ERROR; + } + + QDF_NBUF_UPDATE_TX_PKT_COUNT(nbuf, QDF_NBUF_TX_PKT_HIF); + DPTRACE(qdf_dp_trace(nbuf, QDF_DP_TRACE_HIF_PACKET_PTR_RECORD, + QDF_TRACE_DEFAULT_PDEV_ID, qdf_nbuf_data_addr(nbuf), + sizeof(qdf_nbuf_data(nbuf)), QDF_TX)); + status = ce_sendlist_send(ce_hdl, nbuf, &sendlist, transfer_id); + QDF_ASSERT(status == QDF_STATUS_SUCCESS); + + return status; +} + +void hif_send_complete_check(struct hif_opaque_softc *hif_ctx, uint8_t pipe, + int force) +{ + struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx); + + if (!force) { + int resources; + /* + * Decide whether to actually poll for completions, or just + * wait for a later chance. If there seem to be plenty of + * resources left, then just wait, since checking involves + * reading a CE register, which is a relatively expensive + * operation. + */ + resources = hif_get_free_queue_number(hif_ctx, pipe); + /* + * If at least 50% of the total resources are still available, + * don't bother checking again yet. + */ + if (resources > (hif_state->host_ce_config[pipe].src_nentries >> + 1)) + return; + } +#if ATH_11AC_TXCOMPACT + ce_per_engine_servicereap(scn, pipe); +#else + ce_per_engine_service(scn, pipe); +#endif +} + +uint16_t +hif_get_free_queue_number(struct hif_opaque_softc *hif_ctx, uint8_t pipe) +{ + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx); + struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]); + uint16_t rv; + + qdf_spin_lock_bh(&pipe_info->completion_freeq_lock); + rv = pipe_info->num_sends_allowed; + qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock); + return rv; +} + +/* Called by lower (CE) layer when a send to Target completes. */ +static void +hif_pci_ce_send_done(struct CE_handle *copyeng, void *ce_context, + void *transfer_context, qdf_dma_addr_t CE_data, + unsigned int nbytes, unsigned int transfer_id, + unsigned int sw_index, unsigned int hw_index, + unsigned int toeplitz_hash_result) +{ + struct HIF_CE_pipe_info *pipe_info = + (struct HIF_CE_pipe_info *)ce_context; + struct HIF_CE_state *hif_state = pipe_info->HIF_CE_state; + struct hif_softc *scn = HIF_GET_SOFTC(hif_state); + unsigned int sw_idx = sw_index, hw_idx = hw_index; + struct hif_msg_callbacks *msg_callbacks = + &pipe_info->pipe_callbacks; + + do { + /* + * The upper layer callback will be triggered + * when last fragment is complteted. + */ + if (transfer_context != CE_SENDLIST_ITEM_CTXT) { + if (scn->target_status == TARGET_STATUS_RESET) { + + qdf_nbuf_unmap_single(scn->qdf_dev, + transfer_context, + QDF_DMA_TO_DEVICE); + qdf_nbuf_free(transfer_context); + } else + msg_callbacks->txCompletionHandler( + msg_callbacks->Context, + transfer_context, transfer_id, + toeplitz_hash_result); + } + + qdf_spin_lock(&pipe_info->completion_freeq_lock); + pipe_info->num_sends_allowed++; + qdf_spin_unlock(&pipe_info->completion_freeq_lock); + } while (ce_completed_send_next(copyeng, + &ce_context, &transfer_context, + &CE_data, &nbytes, &transfer_id, + &sw_idx, &hw_idx, + &toeplitz_hash_result) == QDF_STATUS_SUCCESS); +} + +/** + * hif_ce_do_recv(): send message from copy engine to upper layers + * @msg_callbacks: structure containing callback and callback context + * @netbuff: skb containing message + * @nbytes: number of bytes in the message + * @pipe_info: used for the pipe_number info + * + * Checks the packet length, configures the length in the netbuff, + * and calls the upper layer callback. + * + * return: None + */ +static inline void hif_ce_do_recv(struct hif_msg_callbacks *msg_callbacks, + qdf_nbuf_t netbuf, int nbytes, + struct HIF_CE_pipe_info *pipe_info) { + if (nbytes <= pipe_info->buf_sz) { + qdf_nbuf_set_pktlen(netbuf, nbytes); + msg_callbacks-> + rxCompletionHandler(msg_callbacks->Context, + netbuf, pipe_info->pipe_num); + } else { + HIF_ERROR("%s: Invalid Rx msg buf:%pK nbytes:%d", + __func__, netbuf, nbytes); + + qdf_nbuf_free(netbuf); + } +} + +/* Called by lower (CE) layer when data is received from the Target. */ +static void +hif_pci_ce_recv_data(struct CE_handle *copyeng, void *ce_context, + void *transfer_context, qdf_dma_addr_t CE_data, + unsigned int nbytes, unsigned int transfer_id, + unsigned int flags) +{ + struct HIF_CE_pipe_info *pipe_info = + (struct HIF_CE_pipe_info *)ce_context; + struct HIF_CE_state *hif_state = pipe_info->HIF_CE_state; + struct CE_state *ce_state = (struct CE_state *) copyeng; + struct hif_softc *scn = HIF_GET_SOFTC(hif_state); +#ifdef HIF_PCI + struct hif_pci_softc *hif_pci_sc = HIF_GET_PCI_SOFTC(hif_state); +#endif + struct hif_msg_callbacks *msg_callbacks = + &pipe_info->pipe_callbacks; + + do { +#ifdef HIF_PCI + hif_pm_runtime_mark_last_busy(hif_pci_sc->dev); +#endif + qdf_nbuf_unmap_single(scn->qdf_dev, + (qdf_nbuf_t) transfer_context, + QDF_DMA_FROM_DEVICE); + + atomic_inc(&pipe_info->recv_bufs_needed); + hif_post_recv_buffers_for_pipe(pipe_info); + if (scn->target_status == TARGET_STATUS_RESET) + qdf_nbuf_free(transfer_context); + else + hif_ce_do_recv(msg_callbacks, transfer_context, + nbytes, pipe_info); + + /* Set up force_break flag if num of receices reaches + * MAX_NUM_OF_RECEIVES + */ + ce_state->receive_count++; + if (qdf_unlikely(hif_ce_service_should_yield(scn, ce_state))) { + ce_state->force_break = 1; + break; + } + } while (ce_completed_recv_next(copyeng, &ce_context, &transfer_context, + &CE_data, &nbytes, &transfer_id, + &flags) == QDF_STATUS_SUCCESS); + +} + +/* TBDXXX: Set CE High Watermark; invoke txResourceAvailHandler in response */ + +void +hif_post_init(struct hif_opaque_softc *hif_ctx, void *unused, + struct hif_msg_callbacks *callbacks) +{ + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx); + +#ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG + spin_lock_init(&pcie_access_log_lock); +#endif + /* Save callbacks for later installation */ + qdf_mem_copy(&hif_state->msg_callbacks_pending, callbacks, + sizeof(hif_state->msg_callbacks_pending)); + +} + +static int hif_completion_thread_startup(struct HIF_CE_state *hif_state) +{ + struct CE_handle *ce_diag = hif_state->ce_diag; + int pipe_num; + struct hif_softc *scn = HIF_GET_SOFTC(hif_state); + struct hif_msg_callbacks *hif_msg_callbacks = + &hif_state->msg_callbacks_current; + + /* daemonize("hif_compl_thread"); */ + + if (scn->ce_count == 0) { + HIF_ERROR("%s: Invalid ce_count", __func__); + return -EINVAL; + } + + if (!hif_msg_callbacks || + !hif_msg_callbacks->rxCompletionHandler || + !hif_msg_callbacks->txCompletionHandler) { + HIF_ERROR("%s: no completion handler registered", __func__); + return -EFAULT; + } + + A_TARGET_ACCESS_LIKELY(scn); + for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) { + struct CE_attr attr; + struct HIF_CE_pipe_info *pipe_info; + + pipe_info = &hif_state->pipe_info[pipe_num]; + if (pipe_info->ce_hdl == ce_diag) + continue; /* Handle Diagnostic CE specially */ + attr = hif_state->host_ce_config[pipe_num]; + if (attr.src_nentries) { + /* pipe used to send to target */ + HIF_DBG("%s: pipe_num:%d pipe_info:0x%pK", + __func__, pipe_num, pipe_info); + ce_send_cb_register(pipe_info->ce_hdl, + hif_pci_ce_send_done, pipe_info, + attr.flags & CE_ATTR_DISABLE_INTR); + pipe_info->num_sends_allowed = attr.src_nentries - 1; + } + if (attr.dest_nentries) { + /* pipe used to receive from target */ + ce_recv_cb_register(pipe_info->ce_hdl, + hif_pci_ce_recv_data, pipe_info, + attr.flags & CE_ATTR_DISABLE_INTR); + } + + if (attr.src_nentries) + qdf_spinlock_create(&pipe_info->completion_freeq_lock); + + qdf_mem_copy(&pipe_info->pipe_callbacks, hif_msg_callbacks, + sizeof(pipe_info->pipe_callbacks)); + } + + A_TARGET_ACCESS_UNLIKELY(scn); + return 0; +} + +/* + * Install pending msg callbacks. + * + * TBDXXX: This hack is needed because upper layers install msg callbacks + * for use with HTC before BMI is done; yet this HIF implementation + * needs to continue to use BMI msg callbacks. Really, upper layers + * should not register HTC callbacks until AFTER BMI phase. + */ +static void hif_msg_callbacks_install(struct hif_softc *scn) +{ + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); + + qdf_mem_copy(&hif_state->msg_callbacks_current, + &hif_state->msg_callbacks_pending, + sizeof(hif_state->msg_callbacks_pending)); +} + +void hif_get_default_pipe(struct hif_opaque_softc *hif_hdl, uint8_t *ULPipe, + uint8_t *DLPipe) +{ + int ul_is_polled, dl_is_polled; + + (void)hif_map_service_to_pipe(hif_hdl, HTC_CTRL_RSVD_SVC, + ULPipe, DLPipe, &ul_is_polled, &dl_is_polled); +} + +/** + * hif_dump_pipe_debug_count() - Log error count + * @scn: hif_softc pointer. + * + * Output the pipe error counts of each pipe to log file + * + * Return: N/A + */ +void hif_dump_pipe_debug_count(struct hif_softc *scn) +{ + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); + int pipe_num; + + if (hif_state == NULL) { + HIF_ERROR("%s hif_state is NULL", __func__); + return; + } + for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) { + struct HIF_CE_pipe_info *pipe_info; + + pipe_info = &hif_state->pipe_info[pipe_num]; + + if (pipe_info->nbuf_alloc_err_count > 0 || + pipe_info->nbuf_dma_err_count > 0 || + pipe_info->nbuf_ce_enqueue_err_count) + HIF_ERROR( + "%s: pipe_id = %d, recv_bufs_needed = %d, nbuf_alloc_err_count = %u, nbuf_dma_err_count = %u, nbuf_ce_enqueue_err_count = %u", + __func__, pipe_info->pipe_num, + atomic_read(&pipe_info->recv_bufs_needed), + pipe_info->nbuf_alloc_err_count, + pipe_info->nbuf_dma_err_count, + pipe_info->nbuf_ce_enqueue_err_count); + } +} + +static void hif_post_recv_buffers_failure(struct HIF_CE_pipe_info *pipe_info, + void *nbuf, uint32_t *error_cnt, + enum hif_ce_event_type failure_type, + const char *failure_type_string) +{ + int bufs_needed_tmp = atomic_inc_return(&pipe_info->recv_bufs_needed); + struct CE_state *CE_state = (struct CE_state *)pipe_info->ce_hdl; + struct hif_softc *scn = HIF_GET_SOFTC(pipe_info->HIF_CE_state); + int ce_id = CE_state->id; + uint32_t error_cnt_tmp; + + qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock); + error_cnt_tmp = ++(*error_cnt); + qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock); + HIF_DBG("%s: pipe_num %d, needed %d, err_cnt = %u, fail_type = %s", + __func__, pipe_info->pipe_num, bufs_needed_tmp, error_cnt_tmp, + failure_type_string); + hif_record_ce_desc_event(scn, ce_id, failure_type, + NULL, nbuf, bufs_needed_tmp, 0); + /* if we fail to allocate the last buffer for an rx pipe, + * there is no trigger to refill the ce and we will + * eventually crash + */ + if (bufs_needed_tmp == CE_state->dest_ring->nentries - 1) + qdf_sched_work(scn->qdf_dev, &CE_state->oom_allocation_work); + +} + + + + +QDF_STATUS hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info) +{ + struct CE_handle *ce_hdl; + qdf_size_t buf_sz; + struct hif_softc *scn = HIF_GET_SOFTC(pipe_info->HIF_CE_state); + QDF_STATUS status; + uint32_t bufs_posted = 0; + + buf_sz = pipe_info->buf_sz; + if (buf_sz == 0) { + /* Unused Copy Engine */ + return QDF_STATUS_SUCCESS; + } + + ce_hdl = pipe_info->ce_hdl; + + qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock); + while (atomic_read(&pipe_info->recv_bufs_needed) > 0) { + qdf_dma_addr_t CE_data; /* CE space buffer address */ + qdf_nbuf_t nbuf; + + atomic_dec(&pipe_info->recv_bufs_needed); + qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock); + + nbuf = qdf_nbuf_alloc(scn->qdf_dev, buf_sz, 0, 4, false); + if (!nbuf) { + hif_post_recv_buffers_failure(pipe_info, nbuf, + &pipe_info->nbuf_alloc_err_count, + HIF_RX_NBUF_ALLOC_FAILURE, + "HIF_RX_NBUF_ALLOC_FAILURE"); + return QDF_STATUS_E_NOMEM; + } + + /* + * qdf_nbuf_peek_header(nbuf, &data, &unused); + * CE_data = dma_map_single(dev, data, buf_sz, ); + * DMA_FROM_DEVICE); + */ + status = qdf_nbuf_map_single(scn->qdf_dev, nbuf, + QDF_DMA_FROM_DEVICE); + + if (qdf_unlikely(status != QDF_STATUS_SUCCESS)) { + hif_post_recv_buffers_failure(pipe_info, nbuf, + &pipe_info->nbuf_dma_err_count, + HIF_RX_NBUF_MAP_FAILURE, + "HIF_RX_NBUF_MAP_FAILURE"); + qdf_nbuf_free(nbuf); + return status; + } + + CE_data = qdf_nbuf_get_frag_paddr(nbuf, 0); + + qdf_mem_dma_sync_single_for_device(scn->qdf_dev, CE_data, + buf_sz, DMA_FROM_DEVICE); + status = ce_recv_buf_enqueue(ce_hdl, (void *)nbuf, CE_data); + if (qdf_unlikely(status != QDF_STATUS_SUCCESS)) { + hif_post_recv_buffers_failure(pipe_info, nbuf, + &pipe_info->nbuf_ce_enqueue_err_count, + HIF_RX_NBUF_ENQUEUE_FAILURE, + "HIF_RX_NBUF_ENQUEUE_FAILURE"); + + qdf_nbuf_unmap_single(scn->qdf_dev, nbuf, + QDF_DMA_FROM_DEVICE); + qdf_nbuf_free(nbuf); + return status; + } + + qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock); + bufs_posted++; + } + pipe_info->nbuf_alloc_err_count = + (pipe_info->nbuf_alloc_err_count > bufs_posted) ? + pipe_info->nbuf_alloc_err_count - bufs_posted : 0; + pipe_info->nbuf_dma_err_count = + (pipe_info->nbuf_dma_err_count > bufs_posted) ? + pipe_info->nbuf_dma_err_count - bufs_posted : 0; + pipe_info->nbuf_ce_enqueue_err_count = + (pipe_info->nbuf_ce_enqueue_err_count > bufs_posted) ? + pipe_info->nbuf_ce_enqueue_err_count - bufs_posted : 0; + + qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock); + + return QDF_STATUS_SUCCESS; +} + +/* + * Try to post all desired receive buffers for all pipes. + * Returns 0 for non fastpath rx copy engine as + * oom_allocation_work will be scheduled to recover any + * failures, non-zero if unable to completely replenish + * receive buffers for fastpath rx Copy engine. + */ +QDF_STATUS hif_post_recv_buffers(struct hif_softc *scn) +{ + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); + int pipe_num; + struct CE_state *ce_state = NULL; + QDF_STATUS qdf_status; + + A_TARGET_ACCESS_LIKELY(scn); + for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) { + struct HIF_CE_pipe_info *pipe_info; + + ce_state = scn->ce_id_to_state[pipe_num]; + pipe_info = &hif_state->pipe_info[pipe_num]; + + if (hif_is_nss_wifi_enabled(scn) && + ce_state && (ce_state->htt_rx_data)) + continue; + + qdf_status = hif_post_recv_buffers_for_pipe(pipe_info); + if (!QDF_IS_STATUS_SUCCESS(qdf_status) && ce_state && + ce_state->htt_rx_data && + scn->fastpath_mode_on) { + A_TARGET_ACCESS_UNLIKELY(scn); + return qdf_status; + } + } + + A_TARGET_ACCESS_UNLIKELY(scn); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS hif_start(struct hif_opaque_softc *hif_ctx) +{ + struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); + QDF_STATUS qdf_status = QDF_STATUS_SUCCESS; + + hif_update_fastpath_recv_bufs_cnt(scn); + + hif_msg_callbacks_install(scn); + + if (hif_completion_thread_startup(hif_state)) + return QDF_STATUS_E_FAILURE; + + /* enable buffer cleanup */ + hif_state->started = true; + + /* Post buffers once to start things off. */ + qdf_status = hif_post_recv_buffers(scn); + if (!QDF_IS_STATUS_SUCCESS(qdf_status)) { + /* cleanup is done in hif_ce_disable */ + HIF_ERROR("%s:failed to post buffers", __func__); + return qdf_status; + } + + return qdf_status; +} + +static void hif_recv_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info) +{ + struct hif_softc *scn; + struct CE_handle *ce_hdl; + uint32_t buf_sz; + struct HIF_CE_state *hif_state; + qdf_nbuf_t netbuf; + qdf_dma_addr_t CE_data; + void *per_CE_context; + + buf_sz = pipe_info->buf_sz; + /* Unused Copy Engine */ + if (buf_sz == 0) + return; + + + hif_state = pipe_info->HIF_CE_state; + if (!hif_state->started) + return; + + scn = HIF_GET_SOFTC(hif_state); + ce_hdl = pipe_info->ce_hdl; + + if (scn->qdf_dev == NULL) + return; + while (ce_revoke_recv_next + (ce_hdl, &per_CE_context, (void **)&netbuf, + &CE_data) == QDF_STATUS_SUCCESS) { + if (netbuf) { + qdf_nbuf_unmap_single(scn->qdf_dev, netbuf, + QDF_DMA_FROM_DEVICE); + qdf_nbuf_free(netbuf); + } + } +} + +static void hif_send_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info) +{ + struct CE_handle *ce_hdl; + struct HIF_CE_state *hif_state; + struct hif_softc *scn; + qdf_nbuf_t netbuf; + void *per_CE_context; + qdf_dma_addr_t CE_data; + unsigned int nbytes; + unsigned int id; + uint32_t buf_sz; + uint32_t toeplitz_hash_result; + + buf_sz = pipe_info->buf_sz; + if (buf_sz == 0) { + /* Unused Copy Engine */ + return; + } + + hif_state = pipe_info->HIF_CE_state; + if (!hif_state->started) { + return; + } + + scn = HIF_GET_SOFTC(hif_state); + + ce_hdl = pipe_info->ce_hdl; + + while (ce_cancel_send_next + (ce_hdl, &per_CE_context, + (void **)&netbuf, &CE_data, &nbytes, + &id, &toeplitz_hash_result) == QDF_STATUS_SUCCESS) { + if (netbuf != CE_SENDLIST_ITEM_CTXT) { + /* + * Packets enqueued by htt_h2t_ver_req_msg() and + * htt_h2t_rx_ring_cfg_msg_ll() have already been + * freed in htt_htc_misc_pkt_pool_free() in + * wlantl_close(), so do not free them here again + * by checking whether it's the endpoint + * which they are queued in. + */ + if (id == scn->htc_htt_tx_endpoint) + return; + /* Indicate the completion to higher + * layer to free the buffer + */ + if (pipe_info->pipe_callbacks.txCompletionHandler) + pipe_info->pipe_callbacks. + txCompletionHandler(pipe_info-> + pipe_callbacks.Context, + netbuf, id, toeplitz_hash_result); + } + } +} + +/* + * Cleanup residual buffers for device shutdown: + * buffers that were enqueued for receive + * buffers that were to be sent + * Note: Buffers that had completed but which were + * not yet processed are on a completion queue. They + * are handled when the completion thread shuts down. + */ +static void hif_buffer_cleanup(struct HIF_CE_state *hif_state) +{ + int pipe_num; + struct hif_softc *scn = HIF_GET_SOFTC(hif_state); + struct CE_state *ce_state; + + for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) { + struct HIF_CE_pipe_info *pipe_info; + + ce_state = scn->ce_id_to_state[pipe_num]; + if (hif_is_nss_wifi_enabled(scn) && ce_state && + ((ce_state->htt_tx_data) || + (ce_state->htt_rx_data))) { + continue; + } + + pipe_info = &hif_state->pipe_info[pipe_num]; + hif_recv_buffer_cleanup_on_pipe(pipe_info); + hif_send_buffer_cleanup_on_pipe(pipe_info); + } +} + +void hif_flush_surprise_remove(struct hif_opaque_softc *hif_ctx) +{ + struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); + + hif_buffer_cleanup(hif_state); +} + +static void hif_destroy_oom_work(struct hif_softc *scn) +{ + struct CE_state *ce_state; + int ce_id; + + for (ce_id = 0; ce_id < scn->ce_count; ce_id++) { + ce_state = scn->ce_id_to_state[ce_id]; + if (ce_state) + qdf_destroy_work(scn->qdf_dev, + &ce_state->oom_allocation_work); + } +} + +void hif_ce_stop(struct hif_softc *scn) +{ + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); + int pipe_num; + + /* + * before cleaning up any memory, ensure irq & + * bottom half contexts will not be re-entered + */ + hif_disable_isr(&scn->osc); + hif_destroy_oom_work(scn); + scn->hif_init_done = false; + + /* + * At this point, asynchronous threads are stopped, + * The Target should not DMA nor interrupt, Host code may + * not initiate anything more. So we just need to clean + * up Host-side state. + */ + + if (scn->athdiag_procfs_inited) { + athdiag_procfs_remove(); + scn->athdiag_procfs_inited = false; + } + + hif_buffer_cleanup(hif_state); + + for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) { + struct HIF_CE_pipe_info *pipe_info; + struct CE_attr attr; + struct CE_handle *ce_diag = hif_state->ce_diag; + + pipe_info = &hif_state->pipe_info[pipe_num]; + if (pipe_info->ce_hdl) { + if (pipe_info->ce_hdl != ce_diag) { + attr = hif_state->host_ce_config[pipe_num]; + if (attr.src_nentries) + qdf_spinlock_destroy(&pipe_info-> + completion_freeq_lock); + } + ce_fini(pipe_info->ce_hdl); + pipe_info->ce_hdl = NULL; + pipe_info->buf_sz = 0; + qdf_spinlock_destroy(&pipe_info->recv_bufs_needed_lock); + } + } + + if (hif_state->sleep_timer_init) { + qdf_timer_stop(&hif_state->sleep_timer); + qdf_timer_free(&hif_state->sleep_timer); + hif_state->sleep_timer_init = false; + } + + hif_state->started = false; +} + + +/** + * hif_get_target_ce_config() - get copy engine configuration + * @target_ce_config_ret: basic copy engine configuration + * @target_ce_config_sz_ret: size of the basic configuration in bytes + * @target_service_to_ce_map_ret: service mapping for the copy engines + * @target_service_to_ce_map_sz_ret: size of the mapping in bytes + * @target_shadow_reg_cfg_ret: shadow register configuration + * @shadow_cfg_sz_ret: size of the shadow register configuration in bytes + * + * providing accessor to these values outside of this file. + * currently these are stored in static pointers to const sections. + * there are multiple configurations that are selected from at compile time. + * Runtime selection would need to consider mode, target type and bus type. + * + * Return: return by parameter. + */ +void hif_get_target_ce_config(struct hif_softc *scn, + struct CE_pipe_config **target_ce_config_ret, + uint32_t *target_ce_config_sz_ret, + struct service_to_pipe **target_service_to_ce_map_ret, + uint32_t *target_service_to_ce_map_sz_ret, + struct shadow_reg_cfg **target_shadow_reg_cfg_ret, + uint32_t *shadow_cfg_sz_ret) +{ + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); + + *target_ce_config_ret = hif_state->target_ce_config; + *target_ce_config_sz_ret = hif_state->target_ce_config_sz; + + hif_select_service_to_pipe_map(scn, target_service_to_ce_map_ret, + target_service_to_ce_map_sz_ret); + + if (target_shadow_reg_cfg_ret) + *target_shadow_reg_cfg_ret = target_shadow_reg_cfg; + + if (shadow_cfg_sz_ret) + *shadow_cfg_sz_ret = shadow_cfg_sz; +} + +#ifdef CONFIG_SHADOW_V2 +static void hif_print_hal_shadow_register_cfg(struct pld_wlan_enable_cfg *cfg) +{ + int i; + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "%s: num_config %d\n", __func__, cfg->num_shadow_reg_v2_cfg); + + for (i = 0; i < cfg->num_shadow_reg_v2_cfg; i++) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO, + "%s: i %d, val %x\n", __func__, i, + cfg->shadow_reg_v2_cfg[i].addr); + } +} + +#else +static void hif_print_hal_shadow_register_cfg(struct pld_wlan_enable_cfg *cfg) +{ + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "%s: CONFIG_SHADOW_V2 not defined\n", __func__); +} +#endif + +/** + * hif_wlan_enable(): call the platform driver to enable wlan + * @scn: HIF Context + * + * This function passes the con_mode and CE configuration to + * platform driver to enable wlan. + * + * Return: linux error code + */ +int hif_wlan_enable(struct hif_softc *scn) +{ + struct pld_wlan_enable_cfg cfg; + enum pld_driver_mode mode; + uint32_t con_mode = hif_get_conparam(scn); + + hif_get_target_ce_config(scn, + (struct CE_pipe_config **)&cfg.ce_tgt_cfg, + &cfg.num_ce_tgt_cfg, + (struct service_to_pipe **)&cfg.ce_svc_cfg, + &cfg.num_ce_svc_pipe_cfg, + (struct shadow_reg_cfg **)&cfg.shadow_reg_cfg, + &cfg.num_shadow_reg_cfg); + + /* translate from structure size to array size */ + cfg.num_ce_tgt_cfg /= sizeof(struct CE_pipe_config); + cfg.num_ce_svc_pipe_cfg /= sizeof(struct service_to_pipe); + cfg.num_shadow_reg_cfg /= sizeof(struct shadow_reg_cfg); + + hif_prepare_hal_shadow_register_cfg(scn, &cfg.shadow_reg_v2_cfg, + &cfg.num_shadow_reg_v2_cfg); + + hif_print_hal_shadow_register_cfg(&cfg); + + if (QDF_GLOBAL_FTM_MODE == con_mode) + mode = PLD_FTM; + else if (QDF_GLOBAL_COLDBOOT_CALIB_MODE == con_mode) + mode = PLD_COLDBOOT_CALIBRATION; + else if (QDF_IS_EPPING_ENABLED(con_mode)) + mode = PLD_EPPING; + else + mode = PLD_MISSION; + + if (BYPASS_QMI) + return 0; + else + return pld_wlan_enable(scn->qdf_dev->dev, &cfg, + mode, QWLAN_VERSIONSTR); +} + +#ifdef WLAN_FEATURE_EPPING + +#define CE_EPPING_USES_IRQ true + +void hif_ce_prepare_epping_config(struct HIF_CE_state *hif_state) +{ + if (CE_EPPING_USES_IRQ) + hif_state->host_ce_config = host_ce_config_wlan_epping_irq; + else + hif_state->host_ce_config = host_ce_config_wlan_epping_poll; + hif_state->target_ce_config = target_ce_config_wlan_epping; + hif_state->target_ce_config_sz = sizeof(target_ce_config_wlan_epping); + target_shadow_reg_cfg = target_shadow_reg_cfg_epping; + shadow_cfg_sz = sizeof(target_shadow_reg_cfg_epping); +} +#endif + +/** + * hif_ce_prepare_config() - load the correct static tables. + * @scn: hif context + * + * Epping uses different static attribute tables than mission mode. + */ +void hif_ce_prepare_config(struct hif_softc *scn) +{ + uint32_t mode = hif_get_conparam(scn); + struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn); + struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl); + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); + + hif_state->ce_services = ce_services_attach(scn); + + scn->ce_count = HOST_CE_COUNT; + /* if epping is enabled we need to use the epping configuration. */ + if (QDF_IS_EPPING_ENABLED(mode)) { + hif_ce_prepare_epping_config(hif_state); + } + + switch (tgt_info->target_type) { + default: + hif_state->host_ce_config = host_ce_config_wlan; + hif_state->target_ce_config = target_ce_config_wlan; + hif_state->target_ce_config_sz = sizeof(target_ce_config_wlan); + break; + case TARGET_TYPE_AR900B: + case TARGET_TYPE_QCA9984: + case TARGET_TYPE_IPQ4019: + case TARGET_TYPE_QCA9888: + if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_NO_PKTLOG_CFG)) { + hif_state->host_ce_config = + host_lowdesc_ce_cfg_wlan_ar900b_nopktlog; + } else if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_CFG)) { + hif_state->host_ce_config = + host_lowdesc_ce_cfg_wlan_ar900b; + } else { + hif_state->host_ce_config = host_ce_config_wlan_ar900b; + } + + hif_state->target_ce_config = target_ce_config_wlan_ar900b; + hif_state->target_ce_config_sz = + sizeof(target_ce_config_wlan_ar900b); + + break; + + case TARGET_TYPE_AR9888: + case TARGET_TYPE_AR9888V2: + if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_CFG)) { + hif_state->host_ce_config = host_lowdesc_ce_cfg_wlan_ar9888; + } else { + hif_state->host_ce_config = host_ce_config_wlan_ar9888; + } + + hif_state->target_ce_config = target_ce_config_wlan_ar9888; + hif_state->target_ce_config_sz = + sizeof(target_ce_config_wlan_ar9888); + + break; + + case TARGET_TYPE_QCA8074: + if (scn->bus_type == QDF_BUS_TYPE_PCI) { + hif_state->host_ce_config = + host_ce_config_wlan_qca8074_pci; + hif_state->target_ce_config = + target_ce_config_wlan_qca8074_pci; + hif_state->target_ce_config_sz = + sizeof(target_ce_config_wlan_qca8074_pci); + } else { + hif_state->host_ce_config = host_ce_config_wlan_qca8074; + hif_state->target_ce_config = + target_ce_config_wlan_qca8074; + hif_state->target_ce_config_sz = + sizeof(target_ce_config_wlan_qca8074); + } + break; + case TARGET_TYPE_QCA6290: + hif_state->host_ce_config = host_ce_config_wlan_qca6290; + hif_state->target_ce_config = target_ce_config_wlan_qca6290; + hif_state->target_ce_config_sz = + sizeof(target_ce_config_wlan_qca6290); + + scn->ce_count = QCA_6290_CE_COUNT; + break; + case TARGET_TYPE_ADRASTEA: + if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_NO_PKTLOG_CFG)) { + hif_state->host_ce_config = + host_lowdesc_ce_config_wlan_adrastea_nopktlog; + hif_state->target_ce_config = + target_lowdesc_ce_config_wlan_adrastea_nopktlog; + hif_state->target_ce_config_sz = + sizeof(target_lowdesc_ce_config_wlan_adrastea_nopktlog); + } else { + hif_state->host_ce_config = + host_ce_config_wlan_adrastea; + hif_state->target_ce_config = + target_ce_config_wlan_adrastea; + hif_state->target_ce_config_sz = + sizeof(target_ce_config_wlan_adrastea); + } + break; + + } + QDF_BUG(scn->ce_count <= CE_COUNT_MAX); +} + +/** + * hif_ce_open() - do ce specific allocations + * @hif_sc: pointer to hif context + * + * return: 0 for success or QDF_STATUS_E_NOMEM + */ +QDF_STATUS hif_ce_open(struct hif_softc *hif_sc) +{ + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc); + + qdf_spinlock_create(&hif_state->irq_reg_lock); + qdf_spinlock_create(&hif_state->keep_awake_lock); + return QDF_STATUS_SUCCESS; +} + +/** + * hif_ce_close() - do ce specific free + * @hif_sc: pointer to hif context + */ +void hif_ce_close(struct hif_softc *hif_sc) +{ + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc); + + qdf_spinlock_destroy(&hif_state->irq_reg_lock); + qdf_spinlock_destroy(&hif_state->keep_awake_lock); +} + +/** + * hif_unconfig_ce() - ensure resources from hif_config_ce are freed + * @hif_sc: hif context + * + * uses state variables to support cleaning up when hif_config_ce fails. + */ +void hif_unconfig_ce(struct hif_softc *hif_sc) +{ + int pipe_num; + struct HIF_CE_pipe_info *pipe_info; + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc); + struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(hif_sc); + + for (pipe_num = 0; pipe_num < hif_sc->ce_count; pipe_num++) { + pipe_info = &hif_state->pipe_info[pipe_num]; + if (pipe_info->ce_hdl) { + ce_unregister_irq(hif_state, (1 << pipe_num)); + } + } + deinit_tasklet_workers(hif_hdl); + for (pipe_num = 0; pipe_num < hif_sc->ce_count; pipe_num++) { + pipe_info = &hif_state->pipe_info[pipe_num]; + if (pipe_info->ce_hdl) { + ce_fini(pipe_info->ce_hdl); + pipe_info->ce_hdl = NULL; + pipe_info->buf_sz = 0; + qdf_spinlock_destroy(&pipe_info->recv_bufs_needed_lock); + } + } + if (hif_sc->athdiag_procfs_inited) { + athdiag_procfs_remove(); + hif_sc->athdiag_procfs_inited = false; + } +} + +#ifdef CONFIG_BYPASS_QMI +#define FW_SHARED_MEM (2 * 1024 * 1024) + +/** + * hif_post_static_buf_to_target() - post static buffer to WLAN FW + * @scn: pointer to HIF structure + * + * WLAN FW needs 2MB memory from DDR when QMI is disabled. + * + * Return: void + */ +static void hif_post_static_buf_to_target(struct hif_softc *scn) +{ + void *target_va; + phys_addr_t target_pa; + + target_va = qdf_mem_alloc_consistent(scn->qdf_dev, scn->qdf_dev->dev, + FW_SHARED_MEM, &target_pa); + if (NULL == target_va) { + HIF_TRACE("Memory allocation failed could not post target buf"); + return; + } + hif_write32_mb(scn->mem + BYPASS_QMI_TEMP_REGISTER, target_pa); + HIF_TRACE("target va %pK target pa %pa", target_va, &target_pa); +} +#else +static inline void hif_post_static_buf_to_target(struct hif_softc *scn) +{ +} +#endif + +static int hif_srng_sleep_state_adjust(struct hif_softc *scn, bool sleep_ok, + bool wait_for_it) +{ + /* todo */ + return 0; +} + +/** + * hif_config_ce() - configure copy engines + * @scn: hif context + * + * Prepares fw, copy engine hardware and host sw according + * to the attributes selected by hif_ce_prepare_config. + * + * also calls athdiag_procfs_init + * + * return: 0 for success nonzero for failure. + */ +int hif_config_ce(struct hif_softc *scn) +{ + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); + struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn); + struct HIF_CE_pipe_info *pipe_info; + int pipe_num; + struct CE_state *ce_state = NULL; + +#ifdef ADRASTEA_SHADOW_REGISTERS + int i; +#endif + QDF_STATUS rv = QDF_STATUS_SUCCESS; + + scn->notice_send = true; + scn->ce_service_max_rx_ind_flush = MSG_FLUSH_NUM; + + hif_post_static_buf_to_target(scn); + + hif_state->fw_indicator_address = FW_INDICATOR_ADDRESS; + + hif_config_rri_on_ddr(scn); + + if (ce_srng_based(scn)) + scn->bus_ops.hif_target_sleep_state_adjust = + &hif_srng_sleep_state_adjust; + + /* Initialise the CE debug history sysfs interface inputs ce_id and + * index. Disable data storing + */ + reset_ce_debug_history(scn); + + for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) { + struct CE_attr *attr; + + pipe_info = &hif_state->pipe_info[pipe_num]; + pipe_info->pipe_num = pipe_num; + pipe_info->HIF_CE_state = hif_state; + attr = &hif_state->host_ce_config[pipe_num]; + + pipe_info->ce_hdl = ce_init(scn, pipe_num, attr); + ce_state = scn->ce_id_to_state[pipe_num]; + if (!ce_state) { + A_TARGET_ACCESS_UNLIKELY(scn); + goto err; + } + qdf_spinlock_create(&pipe_info->recv_bufs_needed_lock); + QDF_ASSERT(pipe_info->ce_hdl != NULL); + if (pipe_info->ce_hdl == NULL) { + rv = QDF_STATUS_E_FAILURE; + A_TARGET_ACCESS_UNLIKELY(scn); + goto err; + } + + ce_state->lro_data = qdf_lro_init(); + + if (attr->flags & CE_ATTR_DIAG) { + /* Reserve the ultimate CE for + * Diagnostic Window support + */ + hif_state->ce_diag = pipe_info->ce_hdl; + continue; + } + + if (hif_is_nss_wifi_enabled(scn) && ce_state && + (ce_state->htt_rx_data)) + continue; + + pipe_info->buf_sz = (qdf_size_t) (attr->src_sz_max); + if (attr->dest_nentries > 0) { + atomic_set(&pipe_info->recv_bufs_needed, + init_buffer_count(attr->dest_nentries - 1)); + /*SRNG based CE has one entry less */ + if (ce_srng_based(scn)) + atomic_dec(&pipe_info->recv_bufs_needed); + } else { + atomic_set(&pipe_info->recv_bufs_needed, 0); + } + ce_tasklet_init(hif_state, (1 << pipe_num)); + ce_register_irq(hif_state, (1 << pipe_num)); + } + + if (athdiag_procfs_init(scn) != 0) { + A_TARGET_ACCESS_UNLIKELY(scn); + goto err; + } + scn->athdiag_procfs_inited = true; + + HIF_DBG("%s: ce_init done", __func__); + + init_tasklet_workers(hif_hdl); + + HIF_DBG("%s: X, ret = %d", __func__, rv); + +#ifdef ADRASTEA_SHADOW_REGISTERS + HIF_DBG("%s, Using Shadow Registers instead of CE Registers", __func__); + for (i = 0; i < NUM_SHADOW_REGISTERS; i++) { + HIF_DBG("%s Shadow Register%d is mapped to address %x", + __func__, i, + (A_TARGET_READ(scn, (SHADOW_ADDRESS(i))) << 2)); + } +#endif + + return rv != QDF_STATUS_SUCCESS; + +err: + /* Failure, so clean up */ + hif_unconfig_ce(scn); + HIF_TRACE("%s: X, ret = %d", __func__, rv); + return QDF_STATUS_SUCCESS != QDF_STATUS_E_FAILURE; +} + +#ifdef WLAN_FEATURE_FASTPATH +/** + * hif_ce_fastpath_cb_register() - Register callback for fastpath msg handler + * @handler: Callback funtcion + * @context: handle for callback function + * + * Return: QDF_STATUS_SUCCESS on success or QDF_STATUS_E_FAILURE + */ +int hif_ce_fastpath_cb_register(struct hif_opaque_softc *hif_ctx, + fastpath_msg_handler handler, + void *context) +{ + struct CE_state *ce_state; + struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); + int i; + + if (!scn) { + HIF_ERROR("%s: scn is NULL", __func__); + QDF_ASSERT(0); + return QDF_STATUS_E_FAILURE; + } + + if (!scn->fastpath_mode_on) { + HIF_WARN("%s: Fastpath mode disabled", __func__); + return QDF_STATUS_E_FAILURE; + } + + for (i = 0; i < scn->ce_count; i++) { + ce_state = scn->ce_id_to_state[i]; + if (ce_state->htt_rx_data) { + ce_state->fastpath_handler = handler; + ce_state->context = context; + } + } + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(hif_ce_fastpath_cb_register); +#endif + +#ifdef IPA_OFFLOAD +/** + * hif_ce_ipa_get_ce_resource() - get uc resource on hif + * @scn: bus context + * @ce_sr_base_paddr: copyengine source ring base physical address + * @ce_sr_ring_size: copyengine source ring size + * @ce_reg_paddr: copyengine register physical address + * + * IPA micro controller data path offload feature enabled, + * HIF should release copy engine related resource information to IPA UC + * IPA UC will access hardware resource with released information + * + * Return: None + */ +void hif_ce_ipa_get_ce_resource(struct hif_softc *scn, + qdf_shared_mem_t **ce_sr, + uint32_t *ce_sr_ring_size, + qdf_dma_addr_t *ce_reg_paddr) +{ + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); + struct HIF_CE_pipe_info *pipe_info = + &(hif_state->pipe_info[HIF_PCI_IPA_UC_ASSIGNED_CE]); + struct CE_handle *ce_hdl = pipe_info->ce_hdl; + + ce_ipa_get_resource(ce_hdl, ce_sr, ce_sr_ring_size, + ce_reg_paddr); +} +#endif /* IPA_OFFLOAD */ + + +#ifdef ADRASTEA_SHADOW_REGISTERS + +/* + * Current shadow register config + * + * ----------------------------------------------------------- + * Shadow Register | CE | src/dst write index + * ----------------------------------------------------------- + * 0 | 0 | src + * 1 No Config - Doesn't point to anything + * 2 No Config - Doesn't point to anything + * 3 | 3 | src + * 4 | 4 | src + * 5 | 5 | src + * 6 No Config - Doesn't point to anything + * 7 | 7 | src + * 8 No Config - Doesn't point to anything + * 9 No Config - Doesn't point to anything + * 10 No Config - Doesn't point to anything + * 11 No Config - Doesn't point to anything + * ----------------------------------------------------------- + * 12 No Config - Doesn't point to anything + * 13 | 1 | dst + * 14 | 2 | dst + * 15 No Config - Doesn't point to anything + * 16 No Config - Doesn't point to anything + * 17 No Config - Doesn't point to anything + * 18 No Config - Doesn't point to anything + * 19 | 7 | dst + * 20 | 8 | dst + * 21 No Config - Doesn't point to anything + * 22 No Config - Doesn't point to anything + * 23 No Config - Doesn't point to anything + * ----------------------------------------------------------- + * + * + * ToDo - Move shadow register config to following in the future + * This helps free up a block of shadow registers towards the end. + * Can be used for other purposes + * + * ----------------------------------------------------------- + * Shadow Register | CE | src/dst write index + * ----------------------------------------------------------- + * 0 | 0 | src + * 1 | 3 | src + * 2 | 4 | src + * 3 | 5 | src + * 4 | 7 | src + * ----------------------------------------------------------- + * 5 | 1 | dst + * 6 | 2 | dst + * 7 | 7 | dst + * 8 | 8 | dst + * ----------------------------------------------------------- + * 9 No Config - Doesn't point to anything + * 12 No Config - Doesn't point to anything + * 13 No Config - Doesn't point to anything + * 14 No Config - Doesn't point to anything + * 15 No Config - Doesn't point to anything + * 16 No Config - Doesn't point to anything + * 17 No Config - Doesn't point to anything + * 18 No Config - Doesn't point to anything + * 19 No Config - Doesn't point to anything + * 20 No Config - Doesn't point to anything + * 21 No Config - Doesn't point to anything + * 22 No Config - Doesn't point to anything + * 23 No Config - Doesn't point to anything + * ----------------------------------------------------------- +*/ + +u32 shadow_sr_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr) +{ + u32 addr = 0; + u32 ce = COPY_ENGINE_ID(ctrl_addr); + + switch (ce) { + case 0: + addr = SHADOW_VALUE0; + break; + case 3: + addr = SHADOW_VALUE3; + break; + case 4: + addr = SHADOW_VALUE4; + break; + case 5: + addr = SHADOW_VALUE5; + break; + case 7: + addr = SHADOW_VALUE7; + break; + default: + HIF_ERROR("invalid CE ctrl_addr (CE=%d)", ce); + QDF_ASSERT(0); + } + return addr; + +} + +u32 shadow_dst_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr) +{ + u32 addr = 0; + u32 ce = COPY_ENGINE_ID(ctrl_addr); + + switch (ce) { + case 1: + addr = SHADOW_VALUE13; + break; + case 2: + addr = SHADOW_VALUE14; + break; + case 5: + addr = SHADOW_VALUE17; + break; + case 7: + addr = SHADOW_VALUE19; + break; + case 8: + addr = SHADOW_VALUE20; + break; + case 9: + addr = SHADOW_VALUE21; + break; + case 10: + addr = SHADOW_VALUE22; + break; + case 11: + addr = SHADOW_VALUE23; + break; + default: + HIF_ERROR("invalid CE ctrl_addr (CE=%d)", ce); + QDF_ASSERT(0); + } + + return addr; + +} +#endif + +#if defined(FEATURE_LRO) +void *hif_ce_get_lro_ctx(struct hif_opaque_softc *hif_hdl, int ctx_id) +{ + struct CE_state *ce_state; + struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl); + + ce_state = scn->ce_id_to_state[ctx_id]; + + return ce_state->lro_data; +} +#endif + +/** + * hif_map_service_to_pipe() - returns the ce ids pertaining to + * this service + * @scn: hif_softc pointer. + * @svc_id: Service ID for which the mapping is needed. + * @ul_pipe: address of the container in which ul pipe is returned. + * @dl_pipe: address of the container in which dl pipe is returned. + * @ul_is_polled: address of the container in which a bool + * indicating if the UL CE for this service + * is polled is returned. + * @dl_is_polled: address of the container in which a bool + * indicating if the DL CE for this service + * is polled is returned. + * + * Return: Indicates whether the service has been found in the table. + * Upon return, ul_is_polled is updated only if ul_pipe is updated. + * There will be warning logs if either leg has not been updated + * because it missed the entry in the table (but this is not an err). + */ +int hif_map_service_to_pipe(struct hif_opaque_softc *hif_hdl, uint16_t svc_id, + uint8_t *ul_pipe, uint8_t *dl_pipe, int *ul_is_polled, + int *dl_is_polled) +{ + int status = QDF_STATUS_E_INVAL; + unsigned int i; + struct service_to_pipe element; + struct service_to_pipe *tgt_svc_map_to_use; + uint32_t sz_tgt_svc_map_to_use; + struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl); + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); + bool dl_updated = false; + bool ul_updated = false; + + hif_select_service_to_pipe_map(scn, &tgt_svc_map_to_use, + &sz_tgt_svc_map_to_use); + + *dl_is_polled = 0; /* polling for received messages not supported */ + + for (i = 0; i < (sz_tgt_svc_map_to_use/sizeof(element)); i++) { + + memcpy(&element, &tgt_svc_map_to_use[i], sizeof(element)); + if (element.service_id == svc_id) { + if (element.pipedir == PIPEDIR_OUT) { + *ul_pipe = element.pipenum; + *ul_is_polled = + (hif_state->host_ce_config[*ul_pipe].flags & + CE_ATTR_DISABLE_INTR) != 0; + ul_updated = true; + } else if (element.pipedir == PIPEDIR_IN) { + *dl_pipe = element.pipenum; + dl_updated = true; + } + status = QDF_STATUS_SUCCESS; + } + } + if (ul_updated == false) + HIF_DBG("ul pipe is NOT updated for service %d", svc_id); + if (dl_updated == false) + HIF_DBG("dl pipe is NOT updated for service %d", svc_id); + + return status; +} + +#ifdef SHADOW_REG_DEBUG +inline uint32_t DEBUG_CE_SRC_RING_READ_IDX_GET(struct hif_softc *scn, + uint32_t CE_ctrl_addr) +{ + uint32_t read_from_hw, srri_from_ddr = 0; + + read_from_hw = A_TARGET_READ(scn, CE_ctrl_addr + CURRENT_SRRI_ADDRESS); + + srri_from_ddr = SRRI_FROM_DDR_ADDR(VADDR_FOR_CE(scn, CE_ctrl_addr)); + + if (read_from_hw != srri_from_ddr) { + HIF_ERROR("%s: error: read from ddr = %d actual read from register = %d, CE_MISC_INT_STATUS_GET = 0x%x", + __func__, srri_from_ddr, read_from_hw, + CE_MISC_INT_STATUS_GET(scn, CE_ctrl_addr)); + QDF_ASSERT(0); + } + return srri_from_ddr; +} + + +inline uint32_t DEBUG_CE_DEST_RING_READ_IDX_GET(struct hif_softc *scn, + uint32_t CE_ctrl_addr) +{ + uint32_t read_from_hw, drri_from_ddr = 0; + + read_from_hw = A_TARGET_READ(scn, CE_ctrl_addr + CURRENT_DRRI_ADDRESS); + + drri_from_ddr = DRRI_FROM_DDR_ADDR(VADDR_FOR_CE(scn, CE_ctrl_addr)); + + if (read_from_hw != drri_from_ddr) { + HIF_ERROR("error: read from ddr = %d actual read from register = %d, CE_MISC_INT_STATUS_GET = 0x%x", + drri_from_ddr, read_from_hw, + CE_MISC_INT_STATUS_GET(scn, CE_ctrl_addr)); + QDF_ASSERT(0); + } + return drri_from_ddr; +} + +#endif + +#ifdef ADRASTEA_RRI_ON_DDR +/** + * hif_get_src_ring_read_index(): Called to get the SRRI + * + * @scn: hif_softc pointer + * @CE_ctrl_addr: base address of the CE whose RRI is to be read + * + * This function returns the SRRI to the caller. For CEs that + * dont have interrupts enabled, we look at the DDR based SRRI + * + * Return: SRRI + */ +inline unsigned int hif_get_src_ring_read_index(struct hif_softc *scn, + uint32_t CE_ctrl_addr) +{ + struct CE_attr attr; + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); + + attr = hif_state->host_ce_config[COPY_ENGINE_ID(CE_ctrl_addr)]; + if (attr.flags & CE_ATTR_DISABLE_INTR) { + return CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr); + } else { + if (TARGET_REGISTER_ACCESS_ALLOWED(scn)) + return A_TARGET_READ(scn, + (CE_ctrl_addr) + CURRENT_SRRI_ADDRESS); + else + return CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, + CE_ctrl_addr); + } +} + +/** + * hif_get_dst_ring_read_index(): Called to get the DRRI + * + * @scn: hif_softc pointer + * @CE_ctrl_addr: base address of the CE whose RRI is to be read + * + * This function returns the DRRI to the caller. For CEs that + * dont have interrupts enabled, we look at the DDR based DRRI + * + * Return: DRRI + */ +inline unsigned int hif_get_dst_ring_read_index(struct hif_softc *scn, + uint32_t CE_ctrl_addr) +{ + struct CE_attr attr; + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); + + attr = hif_state->host_ce_config[COPY_ENGINE_ID(CE_ctrl_addr)]; + + if (attr.flags & CE_ATTR_DISABLE_INTR) { + return CE_DEST_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr); + } else { + if (TARGET_REGISTER_ACCESS_ALLOWED(scn)) + return A_TARGET_READ(scn, + (CE_ctrl_addr) + CURRENT_DRRI_ADDRESS); + else + return CE_DEST_RING_READ_IDX_GET_FROM_DDR(scn, + CE_ctrl_addr); + } +} + +/** + * hif_config_rri_on_ddr(): Configure the RRI on DDR mechanism + * + * @scn: hif_softc pointer + * + * This function allocates non cached memory on ddr and sends + * the physical address of this memory to the CE hardware. The + * hardware updates the RRI on this particular location. + * + * Return: None + */ +static inline void hif_config_rri_on_ddr(struct hif_softc *scn) +{ + unsigned int i; + uint32_t high_paddr, low_paddr; + qdf_dma_addr_t paddr_rri_on_ddr = 0; + + scn->vaddr_rri_on_ddr = + (uint32_t *)qdf_mem_alloc_consistent(scn->qdf_dev, + scn->qdf_dev->dev, (CE_COUNT*sizeof(uint32_t)), + &paddr_rri_on_ddr); + + if (!scn->vaddr_rri_on_ddr) { + HIF_DBG("dmaable page alloc fail"); + return; + } + + scn->paddr_rri_on_ddr = paddr_rri_on_ddr; + low_paddr = BITS0_TO_31(paddr_rri_on_ddr); + high_paddr = BITS32_TO_35(paddr_rri_on_ddr); + + HIF_DBG("%s using srri and drri from DDR", __func__); + + WRITE_CE_DDR_ADDRESS_FOR_RRI_LOW(scn, low_paddr); + WRITE_CE_DDR_ADDRESS_FOR_RRI_HIGH(scn, high_paddr); + + for (i = 0; i < CE_COUNT; i++) + CE_IDX_UPD_EN_SET(scn, CE_BASE_ADDRESS(i)); + + qdf_mem_zero(scn->vaddr_rri_on_ddr, CE_COUNT*sizeof(uint32_t)); + +} +#else + +/** + * hif_config_rri_on_ddr(): Configure the RRI on DDR mechanism + * + * @scn: hif_softc pointer + * + * This is a dummy implementation for platforms that don't + * support this functionality. + * + * Return: None + */ +static inline void hif_config_rri_on_ddr(struct hif_softc *scn) +{ +} +#endif + +/** + * hif_dump_ce_registers() - dump ce registers + * @scn: hif_opaque_softc pointer. + * + * Output the copy engine registers + * + * Return: 0 for success or error code + */ +int hif_dump_ce_registers(struct hif_softc *scn) +{ + struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn); + uint32_t ce_reg_address = CE0_BASE_ADDRESS; + uint32_t ce_reg_values[CE_USEFUL_SIZE >> 2]; + uint32_t ce_reg_word_size = CE_USEFUL_SIZE >> 2; + uint16_t i; + QDF_STATUS status; + + for (i = 0; i < scn->ce_count; i++, ce_reg_address += CE_OFFSET) { + if (scn->ce_id_to_state[i] == NULL) { + HIF_DBG("CE%d not used.", i); + continue; + } + + status = hif_diag_read_mem(hif_hdl, ce_reg_address, + (uint8_t *) &ce_reg_values[0], + ce_reg_word_size * sizeof(uint32_t)); + + if (status != QDF_STATUS_SUCCESS) { + HIF_ERROR("Dumping CE register failed!"); + return -EACCES; + } + HIF_ERROR("CE%d=>\n", i); + qdf_trace_hex_dump(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_DEBUG, + (uint8_t *) &ce_reg_values[0], + ce_reg_word_size * sizeof(uint32_t)); + qdf_print("ADDR:[0x%08X], SR_WR_INDEX:%d\n", (ce_reg_address + + SR_WR_INDEX_ADDRESS), + ce_reg_values[SR_WR_INDEX_ADDRESS/4]); + qdf_print("ADDR:[0x%08X], CURRENT_SRRI:%d\n", (ce_reg_address + + CURRENT_SRRI_ADDRESS), + ce_reg_values[CURRENT_SRRI_ADDRESS/4]); + qdf_print("ADDR:[0x%08X], DST_WR_INDEX:%d\n", (ce_reg_address + + DST_WR_INDEX_ADDRESS), + ce_reg_values[DST_WR_INDEX_ADDRESS/4]); + qdf_print("ADDR:[0x%08X], CURRENT_DRRI:%d\n", (ce_reg_address + + CURRENT_DRRI_ADDRESS), + ce_reg_values[CURRENT_DRRI_ADDRESS/4]); + qdf_print("---\n"); + } + return 0; +} +qdf_export_symbol(hif_dump_ce_registers); +#ifdef QCA_NSS_WIFI_OFFLOAD_SUPPORT +struct hif_pipe_addl_info *hif_get_addl_pipe_info(struct hif_opaque_softc *osc, + struct hif_pipe_addl_info *hif_info, uint32_t pipe) +{ + struct hif_softc *scn = HIF_GET_SOFTC(osc); + struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn); + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(osc); + struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]); + struct CE_handle *ce_hdl = pipe_info->ce_hdl; + struct CE_state *ce_state = (struct CE_state *)ce_hdl; + struct CE_ring_state *src_ring = ce_state->src_ring; + struct CE_ring_state *dest_ring = ce_state->dest_ring; + + if (src_ring) { + hif_info->ul_pipe.nentries = src_ring->nentries; + hif_info->ul_pipe.nentries_mask = src_ring->nentries_mask; + hif_info->ul_pipe.sw_index = src_ring->sw_index; + hif_info->ul_pipe.write_index = src_ring->write_index; + hif_info->ul_pipe.hw_index = src_ring->hw_index; + hif_info->ul_pipe.base_addr_CE_space = + src_ring->base_addr_CE_space; + hif_info->ul_pipe.base_addr_owner_space = + src_ring->base_addr_owner_space; + } + + + if (dest_ring) { + hif_info->dl_pipe.nentries = dest_ring->nentries; + hif_info->dl_pipe.nentries_mask = dest_ring->nentries_mask; + hif_info->dl_pipe.sw_index = dest_ring->sw_index; + hif_info->dl_pipe.write_index = dest_ring->write_index; + hif_info->dl_pipe.hw_index = dest_ring->hw_index; + hif_info->dl_pipe.base_addr_CE_space = + dest_ring->base_addr_CE_space; + hif_info->dl_pipe.base_addr_owner_space = + dest_ring->base_addr_owner_space; + } + + hif_info->pci_mem = pci_resource_start(sc->pdev, 0); + hif_info->ctrl_addr = ce_state->ctrl_addr; + + return hif_info; +} +qdf_export_symbol(hif_get_addl_pipe_info); + +uint32_t hif_set_nss_wifiol_mode(struct hif_opaque_softc *osc, uint32_t mode) +{ + struct hif_softc *scn = HIF_GET_SOFTC(osc); + + scn->nss_wifi_ol_mode = mode; + return 0; +} +qdf_export_symbol(hif_set_nss_wifiol_mode); +#endif + +void hif_set_attribute(struct hif_opaque_softc *osc, uint8_t hif_attrib) +{ + struct hif_softc *scn = HIF_GET_SOFTC(osc); + scn->hif_attribute = hif_attrib; +} + + +/* disable interrupts (only applicable for legacy copy engine currently */ +void hif_disable_interrupt(struct hif_opaque_softc *osc, uint32_t pipe_num) +{ + struct hif_softc *scn = HIF_GET_SOFTC(osc); + struct CE_state *CE_state = scn->ce_id_to_state[pipe_num]; + uint32_t ctrl_addr = CE_state->ctrl_addr; + + Q_TARGET_ACCESS_BEGIN(scn); + CE_COPY_COMPLETE_INTR_DISABLE(scn, ctrl_addr); + Q_TARGET_ACCESS_END(scn); +} +qdf_export_symbol(hif_disable_interrupt); + +/** + * hif_fw_event_handler() - hif fw event handler + * @hif_state: pointer to hif ce state structure + * + * Process fw events and raise HTC callback to process fw events. + * + * Return: none + */ +static inline void hif_fw_event_handler(struct HIF_CE_state *hif_state) +{ + struct hif_msg_callbacks *msg_callbacks = + &hif_state->msg_callbacks_current; + + if (!msg_callbacks->fwEventHandler) + return; + + msg_callbacks->fwEventHandler(msg_callbacks->Context, + QDF_STATUS_E_FAILURE); +} + +#ifndef QCA_WIFI_3_0 +/** + * hif_fw_interrupt_handler() - FW interrupt handler + * @irq: irq number + * @arg: the user pointer + * + * Called from the PCI interrupt handler when a + * firmware-generated interrupt to the Host. + * + * only registered for legacy ce devices + * + * Return: status of handled irq + */ +irqreturn_t hif_fw_interrupt_handler(int irq, void *arg) +{ + struct hif_softc *scn = arg; + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); + uint32_t fw_indicator_address, fw_indicator; + + if (Q_TARGET_ACCESS_BEGIN(scn) < 0) + return ATH_ISR_NOSCHED; + + fw_indicator_address = hif_state->fw_indicator_address; + /* For sudden unplug this will return ~0 */ + fw_indicator = A_TARGET_READ(scn, fw_indicator_address); + + if ((fw_indicator != ~0) && (fw_indicator & FW_IND_EVENT_PENDING)) { + /* ACK: clear Target-side pending event */ + A_TARGET_WRITE(scn, fw_indicator_address, + fw_indicator & ~FW_IND_EVENT_PENDING); + if (Q_TARGET_ACCESS_END(scn) < 0) + return ATH_ISR_SCHED; + + if (hif_state->started) { + hif_fw_event_handler(hif_state); + } else { + /* + * Probable Target failure before we're prepared + * to handle it. Generally unexpected. + * fw_indicator used as bitmap, and defined as below: + * FW_IND_EVENT_PENDING 0x1 + * FW_IND_INITIALIZED 0x2 + * FW_IND_NEEDRECOVER 0x4 + */ + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("%s: Early firmware event indicated 0x%x\n", + __func__, fw_indicator)); + } + } else { + if (Q_TARGET_ACCESS_END(scn) < 0) + return ATH_ISR_SCHED; + } + + return ATH_ISR_SCHED; +} +#else +irqreturn_t hif_fw_interrupt_handler(int irq, void *arg) +{ + return ATH_ISR_SCHED; +} +#endif /* #ifdef QCA_WIFI_3_0 */ + + +/** + * hif_wlan_disable(): call the platform driver to disable wlan + * @scn: HIF Context + * + * This function passes the con_mode to platform driver to disable + * wlan. + * + * Return: void + */ +void hif_wlan_disable(struct hif_softc *scn) +{ + enum pld_driver_mode mode; + uint32_t con_mode = hif_get_conparam(scn); + + if (scn->target_status == TARGET_STATUS_RESET) + return; + + if (QDF_GLOBAL_FTM_MODE == con_mode) + mode = PLD_FTM; + else if (QDF_IS_EPPING_ENABLED(con_mode)) + mode = PLD_EPPING; + else + mode = PLD_MISSION; + + pld_wlan_disable(scn->qdf_dev->dev, mode); +} + +int hif_get_wake_ce_id(struct hif_softc *scn, uint8_t *ce_id) +{ + QDF_STATUS status; + uint8_t ul_pipe, dl_pipe; + int ul_is_polled, dl_is_polled; + + /* DL pipe for HTC_CTRL_RSVD_SVC should map to the wake CE */ + status = hif_map_service_to_pipe(GET_HIF_OPAQUE_HDL(scn), + HTC_CTRL_RSVD_SVC, + &ul_pipe, &dl_pipe, + &ul_is_polled, &dl_is_polled); + if (status) { + HIF_ERROR("%s: failed to map pipe: %d", __func__, status); + return qdf_status_to_os_return(status); + } + + *ce_id = dl_pipe; + + return 0; +} diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/ce/ce_main.h b/drivers/staging/qca-wifi-host-cmn/hif/src/ce/ce_main.h new file mode 100644 index 0000000000000000000000000000000000000000..58906c52da4fc988443bdcc6cd686a6d993b7d41 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/ce/ce_main.h @@ -0,0 +1,229 @@ +/* + * Copyright (c) 2015-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef __CE_H__ +#define __CE_H__ + +#include "qdf_atomic.h" +#include "qdf_lock.h" +#include "hif_main.h" +#include "qdf_util.h" +#include "hif_exec.h" + +#define CE_HTT_T2H_MSG 1 +#define CE_HTT_H2T_MSG 4 + +#define CE_OFFSET 0x00000400 +#define CE_USEFUL_SIZE 0x00000058 +#define CE_ALL_BITMAP 0xFFFF + +/** + * enum ce_id_type + * + * @ce_id_type: Copy engine ID + */ +enum ce_id_type { + CE_ID_0, + CE_ID_1, + CE_ID_2, + CE_ID_3, + CE_ID_4, + CE_ID_5, + CE_ID_6, + CE_ID_7, + CE_ID_8, + CE_ID_9, + CE_ID_10, + CE_ID_11, + CE_ID_MAX +}; + +#ifdef CONFIG_WIN +#define QWLAN_VERSIONSTR "WIN" +#endif + +enum ol_ath_hif_pkt_ecodes { + HIF_PIPE_NO_RESOURCE = 0 +}; + +struct HIF_CE_state; + +/* Per-pipe state. */ +struct HIF_CE_pipe_info { + /* Handle of underlying Copy Engine */ + struct CE_handle *ce_hdl; + + /* Our pipe number; facilitiates use of pipe_info ptrs. */ + uint8_t pipe_num; + + /* Convenience back pointer to HIF_CE_state. */ + struct HIF_CE_state *HIF_CE_state; + + /* Instantaneous number of receive buffers that should be posted */ + atomic_t recv_bufs_needed; + qdf_size_t buf_sz; + qdf_spinlock_t recv_bufs_needed_lock; + + qdf_spinlock_t completion_freeq_lock; + /* Limit the number of outstanding send requests. */ + int num_sends_allowed; + + /* adding three counts for debugging ring buffer errors */ + uint32_t nbuf_alloc_err_count; + uint32_t nbuf_dma_err_count; + uint32_t nbuf_ce_enqueue_err_count; + struct hif_msg_callbacks pipe_callbacks; +}; + +/** + * struct ce_tasklet_entry + * + * @intr_tq: intr_tq + * @ce_id: ce_id + * @inited: inited + * @hif_ce_state: hif_ce_state + * @from_irq: from_irq + */ +struct ce_tasklet_entry { + struct tasklet_struct intr_tq; + enum ce_id_type ce_id; + bool inited; + void *hif_ce_state; +}; + +static inline bool hif_dummy_grp_done(struct hif_exec_context *grp_entry, int + work_done) +{ + return true; +} + +extern struct hif_execution_ops tasklet_sched_ops; +extern struct hif_execution_ops napi_sched_ops; + +struct ce_stats { + uint32_t ce_per_cpu[CE_COUNT_MAX][QDF_MAX_AVAILABLE_CPU]; +}; + +struct HIF_CE_state { + struct hif_softc ol_sc; + bool started; + struct ce_tasklet_entry tasklets[CE_COUNT_MAX]; + struct hif_exec_context *hif_ext_group[HIF_MAX_GROUP]; + uint32_t hif_num_extgroup; + qdf_spinlock_t keep_awake_lock; + qdf_spinlock_t irq_reg_lock; + unsigned int keep_awake_count; + bool verified_awake; + bool fake_sleep; + qdf_timer_t sleep_timer; + bool sleep_timer_init; + qdf_time_t sleep_ticks; + uint32_t ce_register_irq_done; + + struct CE_pipe_config *target_ce_config; + struct CE_attr *host_ce_config; + uint32_t target_ce_config_sz; + /* Per-pipe state. */ + struct HIF_CE_pipe_info pipe_info[CE_COUNT_MAX]; + /* to be activated after BMI_DONE */ + struct hif_msg_callbacks msg_callbacks_pending; + /* current msg callbacks in use */ + struct hif_msg_callbacks msg_callbacks_current; + + /* Target address used to signal a pending firmware event */ + uint32_t fw_indicator_address; + + /* Copy Engine used for Diagnostic Accesses */ + struct CE_handle *ce_diag; + struct ce_stats stats; + struct ce_ops *ce_services; +}; + +/* + * HIA Map Definition + */ +struct host_interest_area_t { + uint32_t hi_interconnect_state; + uint32_t hi_early_alloc; + uint32_t hi_option_flag2; + uint32_t hi_board_data; + uint32_t hi_board_data_initialized; + uint32_t hi_failure_state; + uint32_t hi_rddi_msi_num; + uint32_t hi_pcie_perst_couple_en; + uint32_t hi_sw_protocol_version; +}; + +struct shadow_reg_cfg { + uint16_t ce_id; + uint16_t reg_offset; +}; + +struct shadow_reg_v2_cfg { + uint32_t reg_value; +}; + +void hif_ce_stop(struct hif_softc *scn); +int hif_dump_ce_registers(struct hif_softc *scn); +void +hif_ce_dump_target_memory(struct hif_softc *scn, void *ramdump_base, + uint32_t address, uint32_t size); + +#ifdef IPA_OFFLOAD +void hif_ce_ipa_get_ce_resource(struct hif_softc *scn, + qdf_shared_mem_t **ce_sr, + uint32_t *ce_sr_ring_size, + qdf_dma_addr_t *ce_reg_paddr); +#else +static inline +void hif_ce_ipa_get_ce_resource(struct hif_softc *scn, + qdf_shared_mem_t **ce_sr, + uint32_t *ce_sr_ring_size, + qdf_dma_addr_t *ce_reg_paddr) +{ +} + +#endif +int hif_wlan_enable(struct hif_softc *scn); +void hif_wlan_disable(struct hif_softc *scn); +void hif_get_target_ce_config(struct hif_softc *scn, + struct CE_pipe_config **target_ce_config_ret, + uint32_t *target_ce_config_sz_ret, + struct service_to_pipe **target_service_to_ce_map_ret, + uint32_t *target_service_to_ce_map_sz_ret, + struct shadow_reg_cfg **target_shadow_reg_cfg_v1_ret, + uint32_t *shadow_cfg_v1_sz_ret); + +#ifdef WLAN_FEATURE_EPPING +void hif_ce_prepare_epping_config(struct HIF_CE_state *hif_state); +void hif_select_epping_service_to_pipe_map(struct service_to_pipe + **tgt_svc_map_to_use, + uint32_t *sz_tgt_svc_map_to_use); + +#else +static inline +void hif_ce_prepare_epping_config(struct HIF_CE_state *hif_state) +{ } +static inline +void hif_select_epping_service_to_pipe_map(struct service_to_pipe + **tgt_svc_map_to_use, + uint32_t *sz_tgt_svc_map_to_use) +{ } +#endif + +#endif /* __CE_H__ */ diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/ce/ce_reg.h b/drivers/staging/qca-wifi-host-cmn/hif/src/ce/ce_reg.h new file mode 100644 index 0000000000000000000000000000000000000000..de2fb844a1521633735d58fba6a157903ddb59d5 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/ce/ce_reg.h @@ -0,0 +1,563 @@ +/* + * Copyright (c) 2015-2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef __CE_REG_H__ +#define __CE_REG_H__ + +#define COPY_ENGINE_ID(COPY_ENGINE_BASE_ADDRESS) ((COPY_ENGINE_BASE_ADDRESS \ + - CE0_BASE_ADDRESS)/(CE1_BASE_ADDRESS - CE0_BASE_ADDRESS)) + +#define DST_WR_INDEX_ADDRESS (scn->target_ce_def->d_DST_WR_INDEX_ADDRESS) +#define SRC_WATERMARK_ADDRESS (scn->target_ce_def->d_SRC_WATERMARK_ADDRESS) +#define SRC_WATERMARK_LOW_MASK (scn->target_ce_def->d_SRC_WATERMARK_LOW_MASK) +#define SRC_WATERMARK_HIGH_MASK (scn->target_ce_def->d_SRC_WATERMARK_HIGH_MASK) +#define DST_WATERMARK_LOW_MASK (scn->target_ce_def->d_DST_WATERMARK_LOW_MASK) +#define DST_WATERMARK_HIGH_MASK (scn->target_ce_def->d_DST_WATERMARK_HIGH_MASK) +#define CURRENT_SRRI_ADDRESS (scn->target_ce_def->d_CURRENT_SRRI_ADDRESS) +#define CURRENT_DRRI_ADDRESS (scn->target_ce_def->d_CURRENT_DRRI_ADDRESS) + +#define SHADOW_VALUE0 (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_VALUE_0) +#define SHADOW_VALUE1 (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_VALUE_1) +#define SHADOW_VALUE2 (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_VALUE_2) +#define SHADOW_VALUE3 (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_VALUE_3) +#define SHADOW_VALUE4 (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_VALUE_4) +#define SHADOW_VALUE5 (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_VALUE_5) +#define SHADOW_VALUE6 (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_VALUE_6) +#define SHADOW_VALUE7 (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_VALUE_7) +#define SHADOW_VALUE8 (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_VALUE_8) +#define SHADOW_VALUE9 (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_VALUE_9) +#define SHADOW_VALUE10 (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_VALUE_10) +#define SHADOW_VALUE11 (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_VALUE_11) +#define SHADOW_VALUE12 (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_VALUE_12) +#define SHADOW_VALUE13 (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_VALUE_13) +#define SHADOW_VALUE14 (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_VALUE_14) +#define SHADOW_VALUE15 (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_VALUE_15) +#define SHADOW_VALUE16 (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_VALUE_16) +#define SHADOW_VALUE17 (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_VALUE_17) +#define SHADOW_VALUE18 (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_VALUE_18) +#define SHADOW_VALUE19 (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_VALUE_19) +#define SHADOW_VALUE20 (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_VALUE_20) +#define SHADOW_VALUE21 (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_VALUE_21) +#define SHADOW_VALUE22 (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_VALUE_22) +#define SHADOW_VALUE23 (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_VALUE_23) +#define SHADOW_ADDRESS0 (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_ADDRESS_0) +#define SHADOW_ADDRESS1 (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_ADDRESS_1) +#define SHADOW_ADDRESS2 (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_ADDRESS_2) +#define SHADOW_ADDRESS3 (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_ADDRESS_3) +#define SHADOW_ADDRESS4 (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_ADDRESS_4) +#define SHADOW_ADDRESS5 (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_ADDRESS_5) +#define SHADOW_ADDRESS6 (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_ADDRESS_6) +#define SHADOW_ADDRESS7 (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_ADDRESS_7) +#define SHADOW_ADDRESS8 (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_ADDRESS_8) +#define SHADOW_ADDRESS9 (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_ADDRESS_9) +#define SHADOW_ADDRESS10 \ + (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_ADDRESS_10) +#define SHADOW_ADDRESS11 \ + (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_ADDRESS_11) +#define SHADOW_ADDRESS12 \ + (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_ADDRESS_12) +#define SHADOW_ADDRESS13 \ + (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_ADDRESS_13) +#define SHADOW_ADDRESS14 \ + (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_ADDRESS_14) +#define SHADOW_ADDRESS15 \ + (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_ADDRESS_15) +#define SHADOW_ADDRESS16 \ + (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_ADDRESS_16) +#define SHADOW_ADDRESS17 \ + (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_ADDRESS_17) +#define SHADOW_ADDRESS18 \ + (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_ADDRESS_18) +#define SHADOW_ADDRESS19 \ + (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_ADDRESS_19) +#define SHADOW_ADDRESS20 \ + (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_ADDRESS_20) +#define SHADOW_ADDRESS21 \ + (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_ADDRESS_21) +#define SHADOW_ADDRESS22 \ + (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_ADDRESS_22) +#define SHADOW_ADDRESS23 \ + (scn->host_shadow_regs->d_A_LOCAL_SHADOW_REG_ADDRESS_23) + +#define SHADOW_ADDRESS(i) \ + (SHADOW_ADDRESS0 + i*(SHADOW_ADDRESS1-SHADOW_ADDRESS0)) + +#define HOST_IS_SRC_RING_HIGH_WATERMARK_MASK \ + (scn->target_ce_def->d_HOST_IS_SRC_RING_HIGH_WATERMARK_MASK) +#define HOST_IS_SRC_RING_LOW_WATERMARK_MASK \ + (scn->target_ce_def->d_HOST_IS_SRC_RING_LOW_WATERMARK_MASK) +#define HOST_IS_DST_RING_HIGH_WATERMARK_MASK \ + (scn->target_ce_def->d_HOST_IS_DST_RING_HIGH_WATERMARK_MASK) +#define HOST_IS_DST_RING_LOW_WATERMARK_MASK \ + (scn->target_ce_def->d_HOST_IS_DST_RING_LOW_WATERMARK_MASK) +#define MISC_IS_ADDRESS (scn->target_ce_def->d_MISC_IS_ADDRESS) +#define HOST_IS_COPY_COMPLETE_MASK \ + (scn->target_ce_def->d_HOST_IS_COPY_COMPLETE_MASK) +#define CE_WRAPPER_BASE_ADDRESS (scn->target_ce_def->d_CE_WRAPPER_BASE_ADDRESS) +#define CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS \ + (scn->target_ce_def->d_CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS) +#define CE_DDR_ADDRESS_FOR_RRI_LOW \ + (scn->target_ce_def->d_CE_DDR_ADDRESS_FOR_RRI_LOW) +#define CE_DDR_ADDRESS_FOR_RRI_HIGH \ + (scn->target_ce_def->d_CE_DDR_ADDRESS_FOR_RRI_HIGH) +#define HOST_IE_COPY_COMPLETE_MASK \ + (scn->target_ce_def->d_HOST_IE_COPY_COMPLETE_MASK) +#define SR_BA_ADDRESS (scn->target_ce_def->d_SR_BA_ADDRESS) +#define SR_BA_ADDRESS_HIGH (scn->target_ce_def->d_SR_BA_ADDRESS_HIGH) +#define SR_SIZE_ADDRESS (scn->target_ce_def->d_SR_SIZE_ADDRESS) +#define CE_CTRL1_ADDRESS (scn->target_ce_def->d_CE_CTRL1_ADDRESS) +#define CE_CTRL1_DMAX_LENGTH_MASK \ + (scn->target_ce_def->d_CE_CTRL1_DMAX_LENGTH_MASK) +#define DR_BA_ADDRESS (scn->target_ce_def->d_DR_BA_ADDRESS) +#define DR_BA_ADDRESS_HIGH (scn->target_ce_def->d_DR_BA_ADDRESS_HIGH) +#define DR_SIZE_ADDRESS (scn->target_ce_def->d_DR_SIZE_ADDRESS) +#define CE_CMD_REGISTER (scn->target_ce_def->d_CE_CMD_REGISTER) +#define CE_MSI_ADDRESS (scn->target_ce_def->d_CE_MSI_ADDRESS) +#define CE_MSI_ADDRESS_HIGH (scn->target_ce_def->d_CE_MSI_ADDRESS_HIGH) +#define CE_MSI_DATA (scn->target_ce_def->d_CE_MSI_DATA) +#define CE_MSI_ENABLE_BIT (scn->target_ce_def->d_CE_MSI_ENABLE_BIT) +#define MISC_IE_ADDRESS (scn->target_ce_def->d_MISC_IE_ADDRESS) +#define MISC_IS_AXI_ERR_MASK (scn->target_ce_def->d_MISC_IS_AXI_ERR_MASK) +#define MISC_IS_DST_ADDR_ERR_MASK \ + (scn->target_ce_def->d_MISC_IS_DST_ADDR_ERR_MASK) +#define MISC_IS_SRC_LEN_ERR_MASK \ + (scn->target_ce_def->d_MISC_IS_SRC_LEN_ERR_MASK) +#define MISC_IS_DST_MAX_LEN_VIO_MASK \ + (scn->target_ce_def->d_MISC_IS_DST_MAX_LEN_VIO_MASK) +#define MISC_IS_DST_RING_OVERFLOW_MASK \ + (scn->target_ce_def->d_MISC_IS_DST_RING_OVERFLOW_MASK) +#define MISC_IS_SRC_RING_OVERFLOW_MASK \ + (scn->target_ce_def->d_MISC_IS_SRC_RING_OVERFLOW_MASK) +#define SRC_WATERMARK_LOW_LSB (scn->target_ce_def->d_SRC_WATERMARK_LOW_LSB) +#define SRC_WATERMARK_HIGH_LSB (scn->target_ce_def->d_SRC_WATERMARK_HIGH_LSB) +#define DST_WATERMARK_LOW_LSB (scn->target_ce_def->d_DST_WATERMARK_LOW_LSB) +#define DST_WATERMARK_HIGH_LSB (scn->target_ce_def->d_DST_WATERMARK_HIGH_LSB) +#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK \ + (scn->target_ce_def->d_CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK) +#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB \ + (scn->target_ce_def->d_CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB) +#define CE_CTRL1_DMAX_LENGTH_LSB \ + (scn->target_ce_def->d_CE_CTRL1_DMAX_LENGTH_LSB) +#define CE_CTRL1_IDX_UPD_EN (scn->target_ce_def->d_CE_CTRL1_IDX_UPD_EN_MASK) +#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK \ + (scn->target_ce_def->d_CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK) +#define CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK \ + (scn->target_ce_def->d_CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK) +#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB \ + (scn->target_ce_def->d_CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB) +#define CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB \ + (scn->target_ce_def->d_CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB) +#define WLAN_DEBUG_INPUT_SEL_OFFSET \ + (scn->targetdef->d_WLAN_DEBUG_INPUT_SEL_OFFSET) +#define WLAN_DEBUG_INPUT_SEL_SRC_MSB \ + (scn->targetdef->d_WLAN_DEBUG_INPUT_SEL_SRC_MSB) +#define WLAN_DEBUG_INPUT_SEL_SRC_LSB \ + (scn->targetdef->d_WLAN_DEBUG_INPUT_SEL_SRC_LSB) +#define WLAN_DEBUG_INPUT_SEL_SRC_MASK \ + (scn->targetdef->d_WLAN_DEBUG_INPUT_SEL_SRC_MASK) +#define WLAN_DEBUG_CONTROL_OFFSET (scn->targetdef->d_WLAN_DEBUG_CONTROL_OFFSET) +#define WLAN_DEBUG_CONTROL_ENABLE_MSB \ + (scn->targetdef->d_WLAN_DEBUG_CONTROL_ENABLE_MSB) +#define WLAN_DEBUG_CONTROL_ENABLE_LSB \ + (scn->targetdef->d_WLAN_DEBUG_CONTROL_ENABLE_LSB) +#define WLAN_DEBUG_CONTROL_ENABLE_MASK \ + (scn->targetdef->d_WLAN_DEBUG_CONTROL_ENABLE_MASK) +#define WLAN_DEBUG_OUT_OFFSET (scn->targetdef->d_WLAN_DEBUG_OUT_OFFSET) +#define WLAN_DEBUG_OUT_DATA_MSB (scn->targetdef->d_WLAN_DEBUG_OUT_DATA_MSB) +#define WLAN_DEBUG_OUT_DATA_LSB (scn->targetdef->d_WLAN_DEBUG_OUT_DATA_LSB) +#define WLAN_DEBUG_OUT_DATA_MASK (scn->targetdef->d_WLAN_DEBUG_OUT_DATA_MASK) +#define AMBA_DEBUG_BUS_OFFSET (scn->targetdef->d_AMBA_DEBUG_BUS_OFFSET) +#define AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MSB \ + (scn->targetdef->d_AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MSB) +#define AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_LSB \ + (scn->targetdef->d_AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_LSB) +#define AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MASK \ + (scn->targetdef->d_AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MASK) +#define AMBA_DEBUG_BUS_SEL_MSB (scn->targetdef->d_AMBA_DEBUG_BUS_SEL_MSB) +#define AMBA_DEBUG_BUS_SEL_LSB (scn->targetdef->d_AMBA_DEBUG_BUS_SEL_LSB) +#define AMBA_DEBUG_BUS_SEL_MASK (scn->targetdef->d_AMBA_DEBUG_BUS_SEL_MASK) +#define CE_WRAPPER_DEBUG_OFFSET \ + (scn->target_ce_def->d_CE_WRAPPER_DEBUG_OFFSET) +#define CE_WRAPPER_DEBUG_SEL_MSB \ + (scn->target_ce_def->d_CE_WRAPPER_DEBUG_SEL_MSB) +#define CE_WRAPPER_DEBUG_SEL_LSB \ + (scn->target_ce_def->d_CE_WRAPPER_DEBUG_SEL_LSB) +#define CE_WRAPPER_DEBUG_SEL_MASK \ + (scn->target_ce_def->d_CE_WRAPPER_DEBUG_SEL_MASK) +#define CE_DEBUG_OFFSET (scn->target_ce_def->d_CE_DEBUG_OFFSET) +#define CE_DEBUG_SEL_MSB (scn->target_ce_def->d_CE_DEBUG_SEL_MSB) +#define CE_DEBUG_SEL_LSB (scn->target_ce_def->d_CE_DEBUG_SEL_LSB) +#define CE_DEBUG_SEL_MASK (scn->target_ce_def->d_CE_DEBUG_SEL_MASK) +#define HOST_IE_ADDRESS (scn->target_ce_def->d_HOST_IE_ADDRESS) +#define HOST_IE_REG1_CE_LSB (scn->target_ce_def->d_HOST_IE_REG1_CE_LSB) +#define HOST_IE_ADDRESS_2 (scn->target_ce_def->d_HOST_IE_ADDRESS_2) +#define HOST_IE_REG2_CE_LSB (scn->target_ce_def->d_HOST_IE_REG2_CE_LSB) +#define HOST_IE_ADDRESS_3 (scn->target_ce_def->d_HOST_IE_ADDRESS_3) +#define HOST_IE_REG3_CE_LSB (scn->target_ce_def->d_HOST_IE_REG3_CE_LSB) +#define HOST_IS_ADDRESS (scn->target_ce_def->d_HOST_IS_ADDRESS) + +#define SRC_WATERMARK_LOW_SET(x) \ + (((x) << SRC_WATERMARK_LOW_LSB) & SRC_WATERMARK_LOW_MASK) +#define SRC_WATERMARK_HIGH_SET(x) \ + (((x) << SRC_WATERMARK_HIGH_LSB) & SRC_WATERMARK_HIGH_MASK) +#define DST_WATERMARK_LOW_SET(x) \ + (((x) << DST_WATERMARK_LOW_LSB) & DST_WATERMARK_LOW_MASK) +#define DST_WATERMARK_HIGH_SET(x) \ + (((x) << DST_WATERMARK_HIGH_LSB) & DST_WATERMARK_HIGH_MASK) +#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_GET(x) \ + (((x) & CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK) >> \ + CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB) +#define CE_CTRL1_DMAX_LENGTH_SET(x) \ + (((x) << CE_CTRL1_DMAX_LENGTH_LSB) & CE_CTRL1_DMAX_LENGTH_MASK) +#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_SET(x) \ + (((x) << CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB) & \ + CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK) +#define CE_CTRL1_DST_RING_BYTE_SWAP_EN_SET(x) \ + (((x) << CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB) & \ + CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK) +#define WLAN_DEBUG_INPUT_SEL_SRC_GET(x) \ + (((x) & WLAN_DEBUG_INPUT_SEL_SRC_MASK) >> \ + WLAN_DEBUG_INPUT_SEL_SRC_LSB) +#define WLAN_DEBUG_INPUT_SEL_SRC_SET(x) \ + (((x) << WLAN_DEBUG_INPUT_SEL_SRC_LSB) & \ + WLAN_DEBUG_INPUT_SEL_SRC_MASK) +#define WLAN_DEBUG_CONTROL_ENABLE_GET(x) \ + (((x) & WLAN_DEBUG_CONTROL_ENABLE_MASK) >> \ + WLAN_DEBUG_CONTROL_ENABLE_LSB) +#define WLAN_DEBUG_CONTROL_ENABLE_SET(x) \ + (((x) << WLAN_DEBUG_CONTROL_ENABLE_LSB) & \ + WLAN_DEBUG_CONTROL_ENABLE_MASK) +#define WLAN_DEBUG_OUT_DATA_GET(x) \ + (((x) & WLAN_DEBUG_OUT_DATA_MASK) >> WLAN_DEBUG_OUT_DATA_LSB) +#define WLAN_DEBUG_OUT_DATA_SET(x) \ + (((x) << WLAN_DEBUG_OUT_DATA_LSB) & WLAN_DEBUG_OUT_DATA_MASK) +#define AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_GET(x) \ + (((x) & AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MASK) >> \ + AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_LSB) +#define AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_SET(x) \ + (((x) << AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_LSB) & \ + AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MASK) +#define AMBA_DEBUG_BUS_SEL_GET(x) \ + (((x) & AMBA_DEBUG_BUS_SEL_MASK) >> AMBA_DEBUG_BUS_SEL_LSB) +#define AMBA_DEBUG_BUS_SEL_SET(x) \ + (((x) << AMBA_DEBUG_BUS_SEL_LSB) & AMBA_DEBUG_BUS_SEL_MASK) +#define CE_WRAPPER_DEBUG_SEL_GET(x) \ + (((x) & CE_WRAPPER_DEBUG_SEL_MASK) >> CE_WRAPPER_DEBUG_SEL_LSB) +#define CE_WRAPPER_DEBUG_SEL_SET(x) \ + (((x) << CE_WRAPPER_DEBUG_SEL_LSB) & CE_WRAPPER_DEBUG_SEL_MASK) +#define CE_DEBUG_SEL_GET(x) (((x) & CE_DEBUG_SEL_MASK) >> CE_DEBUG_SEL_LSB) +#define CE_DEBUG_SEL_SET(x) (((x) << CE_DEBUG_SEL_LSB) & CE_DEBUG_SEL_MASK) +#define HOST_IE_REG1_CE_BIT(_ce_id) (1 << (_ce_id + HOST_IE_REG1_CE_LSB)) +#define HOST_IE_REG2_CE_BIT(_ce_id) (1 << (_ce_id + HOST_IE_REG2_CE_LSB)) +#define HOST_IE_REG3_CE_BIT(_ce_id) (1 << (_ce_id + HOST_IE_REG3_CE_LSB)) + +uint32_t DEBUG_CE_SRC_RING_READ_IDX_GET(struct hif_softc *scn, + uint32_t CE_ctrl_addr); +uint32_t DEBUG_CE_DEST_RING_READ_IDX_GET(struct hif_softc *scn, + uint32_t CE_ctrl_addr); + +#define BITS0_TO_31(val) ((uint32_t)((uint64_t)(paddr_rri_on_ddr)\ + & (uint64_t)(0xFFFFFFFF))) +#define BITS32_TO_35(val) ((uint32_t)(((uint64_t)(paddr_rri_on_ddr)\ + & (uint64_t)(0xF00000000))>>32)) + +#define VADDR_FOR_CE(scn, CE_ctrl_addr)\ + ((scn->vaddr_rri_on_ddr) + COPY_ENGINE_ID(CE_ctrl_addr)) + +#define SRRI_FROM_DDR_ADDR(addr) ((*(addr)) & 0xFFFF) +#define DRRI_FROM_DDR_ADDR(addr) (((*(addr))>>16) & 0xFFFF) + +#define CE_SRC_RING_READ_IDX_GET_FROM_REGISTER(scn, CE_ctrl_addr) \ + A_TARGET_READ(scn, (CE_ctrl_addr) + CURRENT_SRRI_ADDRESS) +#define CE_DEST_RING_READ_IDX_GET_FROM_REGISTER(scn, CE_ctrl_addr) \ + A_TARGET_READ(scn, (CE_ctrl_addr) + CURRENT_DRRI_ADDRESS) + +#ifdef ADRASTEA_RRI_ON_DDR +#ifdef SHADOW_REG_DEBUG +#define CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr)\ + DEBUG_CE_SRC_RING_READ_IDX_GET(scn, CE_ctrl_addr) +#define CE_DEST_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr)\ + DEBUG_CE_DEST_RING_READ_IDX_GET(scn, CE_ctrl_addr) +#else +#define CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr)\ + SRRI_FROM_DDR_ADDR(VADDR_FOR_CE(scn, CE_ctrl_addr)) +#define CE_DEST_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr)\ + DRRI_FROM_DDR_ADDR(VADDR_FOR_CE(scn, CE_ctrl_addr)) +#endif + +unsigned int hif_get_src_ring_read_index(struct hif_softc *scn, + uint32_t CE_ctrl_addr); +unsigned int hif_get_dst_ring_read_index(struct hif_softc *scn, + uint32_t CE_ctrl_addr); + +#define CE_SRC_RING_READ_IDX_GET(scn, CE_ctrl_addr)\ + hif_get_src_ring_read_index(scn, CE_ctrl_addr) +#define CE_DEST_RING_READ_IDX_GET(scn, CE_ctrl_addr)\ + hif_get_dst_ring_read_index(scn, CE_ctrl_addr) +#else +#define CE_SRC_RING_READ_IDX_GET(scn, CE_ctrl_addr) \ + CE_SRC_RING_READ_IDX_GET_FROM_REGISTER(scn, CE_ctrl_addr) +#define CE_DEST_RING_READ_IDX_GET(scn, CE_ctrl_addr)\ + CE_DEST_RING_READ_IDX_GET_FROM_REGISTER(scn, CE_ctrl_addr) + +/** + * if RRI on DDR is not enabled, get idx from ddr defaults to + * using the register value & force wake must be used for + * non interrupt processing. + */ +#define CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr)\ + A_TARGET_READ(scn, (CE_ctrl_addr) + CURRENT_SRRI_ADDRESS) +#endif + +#define CE_SRC_RING_BASE_ADDR_SET(scn, CE_ctrl_addr, addr) \ + A_TARGET_WRITE(scn, (CE_ctrl_addr) + SR_BA_ADDRESS, (addr)) + +#define CE_SRC_RING_BASE_ADDR_HIGH_SET(scn, CE_ctrl_addr, addr) \ + A_TARGET_WRITE(scn, (CE_ctrl_addr) + SR_BA_ADDRESS_HIGH, (addr)) + +#define CE_SRC_RING_BASE_ADDR_HIGH_GET(scn, CE_ctrl_addr) \ + A_TARGET_READ(scn, (CE_ctrl_addr) + SR_BA_ADDRESS_HIGH) + +#define CE_SRC_RING_SZ_SET(scn, CE_ctrl_addr, n) \ + A_TARGET_WRITE(scn, (CE_ctrl_addr) + SR_SIZE_ADDRESS, (n)) + +#define CE_SRC_RING_DMAX_SET(scn, CE_ctrl_addr, n) \ + A_TARGET_WRITE(scn, (CE_ctrl_addr) + CE_CTRL1_ADDRESS, \ + (A_TARGET_READ(scn, (CE_ctrl_addr) + \ + CE_CTRL1_ADDRESS) & ~CE_CTRL1_DMAX_LENGTH_MASK) | \ + CE_CTRL1_DMAX_LENGTH_SET(n)) + +#define CE_IDX_UPD_EN_SET(scn, CE_ctrl_addr) \ + A_TARGET_WRITE(scn, (CE_ctrl_addr) + CE_CTRL1_ADDRESS, \ + (A_TARGET_READ(scn, (CE_ctrl_addr) + CE_CTRL1_ADDRESS) \ + | CE_CTRL1_IDX_UPD_EN)) + +#define CE_CMD_REGISTER_GET(scn, CE_ctrl_addr) \ + A_TARGET_READ(scn, (CE_ctrl_addr) + CE_CMD_REGISTER) + +#define CE_CMD_REGISTER_SET(scn, CE_ctrl_addr, n) \ + A_TARGET_WRITE(scn, (CE_ctrl_addr) + CE_CMD_REGISTER, n) + +#define CE_MSI_ADDR_LOW_SET(scn, CE_ctrl_addr, addr) \ + A_TARGET_WRITE(scn, (CE_ctrl_addr) + CE_MSI_ADDRESS, (addr)) + +#define CE_MSI_ADDR_HIGH_SET(scn, CE_ctrl_addr, addr) \ + A_TARGET_WRITE(scn, (CE_ctrl_addr) + CE_MSI_ADDRESS_HIGH, (addr)) + +#define CE_MSI_DATA_SET(scn, CE_ctrl_addr, data) \ + A_TARGET_WRITE(scn, (CE_ctrl_addr) + CE_MSI_DATA, (data)) + +#define CE_CTRL_REGISTER1_SET(scn, CE_ctrl_addr, val) \ + A_TARGET_WRITE(scn, (CE_ctrl_addr) + CE_CTRL1_ADDRESS, val) + +#define CE_CTRL_REGISTER1_GET(scn, CE_ctrl_addr) \ + A_TARGET_READ(scn, (CE_ctrl_addr) + CE_CTRL1_ADDRESS) + +#define CE_SRC_RING_BYTE_SWAP_SET(scn, CE_ctrl_addr, n) \ + A_TARGET_WRITE(scn, (CE_ctrl_addr) + CE_CTRL1_ADDRESS, \ + (A_TARGET_READ(scn, \ + (CE_ctrl_addr) + CE_CTRL1_ADDRESS) \ + & ~CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK) | \ + CE_CTRL1_SRC_RING_BYTE_SWAP_EN_SET(n)) + +#define CE_DEST_RING_BYTE_SWAP_SET(scn, CE_ctrl_addr, n) \ + A_TARGET_WRITE(scn, (CE_ctrl_addr)+CE_CTRL1_ADDRESS, \ + (A_TARGET_READ(scn, \ + (CE_ctrl_addr) + CE_CTRL1_ADDRESS) \ + & ~CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK) | \ + CE_CTRL1_DST_RING_BYTE_SWAP_EN_SET(n)) + + +#define CE_DEST_RING_BASE_ADDR_SET(scn, CE_ctrl_addr, addr) \ + A_TARGET_WRITE(scn, (CE_ctrl_addr) + DR_BA_ADDRESS, (addr)) + +#define CE_DEST_RING_BASE_ADDR_HIGH_SET(scn, CE_ctrl_addr, addr) \ + A_TARGET_WRITE(scn, (CE_ctrl_addr) + DR_BA_ADDRESS_HIGH, (addr)) + +#define CE_DEST_RING_BASE_ADDR_HIGH_GET(scn, CE_ctrl_addr) \ + A_TARGET_READ(scn, (CE_ctrl_addr) + DR_BA_ADDRESS_HIGH) + +#define CE_DEST_RING_SZ_SET(scn, CE_ctrl_addr, n) \ + A_TARGET_WRITE(scn, (CE_ctrl_addr) + DR_SIZE_ADDRESS, (n)) + +#define CE_SRC_RING_HIGHMARK_SET(scn, CE_ctrl_addr, n) \ + A_TARGET_WRITE(scn, (CE_ctrl_addr) + SRC_WATERMARK_ADDRESS, \ + (A_TARGET_READ(scn, \ + (CE_ctrl_addr) + SRC_WATERMARK_ADDRESS) \ + & ~SRC_WATERMARK_HIGH_MASK) | \ + SRC_WATERMARK_HIGH_SET(n)) + +#define CE_SRC_RING_LOWMARK_SET(scn, CE_ctrl_addr, n) \ + A_TARGET_WRITE(scn, (CE_ctrl_addr) + SRC_WATERMARK_ADDRESS, \ + (A_TARGET_READ(scn, \ + (CE_ctrl_addr) + SRC_WATERMARK_ADDRESS) \ + & ~SRC_WATERMARK_LOW_MASK) | \ + SRC_WATERMARK_LOW_SET(n)) + +#define CE_DEST_RING_HIGHMARK_SET(scn, CE_ctrl_addr, n) \ + A_TARGET_WRITE(scn, (CE_ctrl_addr) + DST_WATERMARK_ADDRESS, \ + (A_TARGET_READ(scn, \ + (CE_ctrl_addr) + DST_WATERMARK_ADDRESS) \ + & ~DST_WATERMARK_HIGH_MASK) | \ + DST_WATERMARK_HIGH_SET(n)) + +#define CE_DEST_RING_LOWMARK_SET(scn, CE_ctrl_addr, n) \ + A_TARGET_WRITE(scn, (CE_ctrl_addr) + DST_WATERMARK_ADDRESS, \ + (A_TARGET_READ(scn, \ + (CE_ctrl_addr) + DST_WATERMARK_ADDRESS) \ + & ~DST_WATERMARK_LOW_MASK) | \ + DST_WATERMARK_LOW_SET(n)) + +#define CE_COPY_COMPLETE_INTR_ENABLE(scn, CE_ctrl_addr) \ + A_TARGET_WRITE(scn, (CE_ctrl_addr) + HOST_IE_ADDRESS, \ + A_TARGET_READ(scn, \ + (CE_ctrl_addr) + HOST_IE_ADDRESS) | \ + HOST_IE_COPY_COMPLETE_MASK) + +#define CE_COPY_COMPLETE_INTR_DISABLE(scn, CE_ctrl_addr) \ + A_TARGET_WRITE(scn, (CE_ctrl_addr) + HOST_IE_ADDRESS, \ + A_TARGET_READ(scn, \ + (CE_ctrl_addr) + HOST_IE_ADDRESS) \ + & ~HOST_IE_COPY_COMPLETE_MASK) + +#define CE_BASE_ADDRESS(CE_id) \ + CE0_BASE_ADDRESS + ((CE1_BASE_ADDRESS - \ + CE0_BASE_ADDRESS)*(CE_id)) + +#define CE_WATERMARK_INTR_ENABLE(scn, CE_ctrl_addr) \ + A_TARGET_WRITE(scn, (CE_ctrl_addr) + HOST_IE_ADDRESS, \ + A_TARGET_READ(scn, \ + (CE_ctrl_addr) + HOST_IE_ADDRESS) | \ + CE_WATERMARK_MASK) + +#define CE_WATERMARK_INTR_DISABLE(scn, CE_ctrl_addr) \ + A_TARGET_WRITE(scn, (CE_ctrl_addr) + HOST_IE_ADDRESS, \ + A_TARGET_READ(scn, \ + (CE_ctrl_addr) + HOST_IE_ADDRESS) \ + & ~CE_WATERMARK_MASK) + +#define CE_ERROR_INTR_ENABLE(scn, CE_ctrl_addr) \ + A_TARGET_WRITE(scn, (CE_ctrl_addr) + MISC_IE_ADDRESS, \ + A_TARGET_READ(scn, \ + (CE_ctrl_addr) + MISC_IE_ADDRESS) | CE_ERROR_MASK) + +#define CE_MISC_INT_STATUS_GET(scn, CE_ctrl_addr) \ + A_TARGET_READ(scn, (CE_ctrl_addr) + MISC_IS_ADDRESS) + +#define CE_ENGINE_INT_STATUS_GET(scn, CE_ctrl_addr) \ + A_TARGET_READ(scn, (CE_ctrl_addr) + HOST_IS_ADDRESS) + +#define CE_ENGINE_INT_STATUS_CLEAR(scn, CE_ctrl_addr, mask) \ + A_TARGET_WRITE(scn, (CE_ctrl_addr) + HOST_IS_ADDRESS, (mask)) + +#define CE_WATERMARK_MASK (HOST_IS_SRC_RING_LOW_WATERMARK_MASK | \ + HOST_IS_SRC_RING_HIGH_WATERMARK_MASK | \ + HOST_IS_DST_RING_LOW_WATERMARK_MASK | \ + HOST_IS_DST_RING_HIGH_WATERMARK_MASK) + +#define CE_ERROR_MASK (MISC_IS_AXI_ERR_MASK | \ + MISC_IS_DST_ADDR_ERR_MASK | \ + MISC_IS_SRC_LEN_ERR_MASK | \ + MISC_IS_DST_MAX_LEN_VIO_MASK | \ + MISC_IS_DST_RING_OVERFLOW_MASK | \ + MISC_IS_SRC_RING_OVERFLOW_MASK) + +#define CE_SRC_RING_TO_DESC(baddr, idx) \ + (&(((struct CE_src_desc *)baddr)[idx])) +#define CE_DEST_RING_TO_DESC(baddr, idx) \ + (&(((struct CE_dest_desc *)baddr)[idx])) + +/* Ring arithmetic (modulus number of entries in ring, which is a pwr of 2). */ +#define CE_RING_DELTA(nentries_mask, fromidx, toidx) \ + (((int)(toidx)-(int)(fromidx)) & (nentries_mask)) + +#define CE_RING_IDX_INCR(nentries_mask, idx) \ + (((idx) + 1) & (nentries_mask)) + +#define CE_RING_IDX_ADD(nentries_mask, idx, num) \ + (((idx) + (num)) & (nentries_mask)) + +#define CE_INTERRUPT_SUMMARY(scn) \ + CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_GET( \ + A_TARGET_READ(scn, CE_WRAPPER_BASE_ADDRESS + \ + CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS)) + +#define READ_CE_DDR_ADDRESS_FOR_RRI_LOW(scn) \ + (A_TARGET_READ(scn, \ + CE_WRAPPER_BASE_ADDRESS + CE_DDR_ADDRESS_FOR_RRI_LOW)) + +#define READ_CE_DDR_ADDRESS_FOR_RRI_HIGH(scn) \ + (A_TARGET_READ(scn, \ + CE_WRAPPER_BASE_ADDRESS + CE_DDR_ADDRESS_FOR_RRI_HIGH)) + +#define WRITE_CE_DDR_ADDRESS_FOR_RRI_LOW(scn, val) \ + (A_TARGET_WRITE(scn, \ + CE_WRAPPER_BASE_ADDRESS + CE_DDR_ADDRESS_FOR_RRI_LOW, \ + val)) + +#define WRITE_CE_DDR_ADDRESS_FOR_RRI_HIGH(scn, val) \ + (A_TARGET_WRITE(scn, \ + CE_WRAPPER_BASE_ADDRESS + CE_DDR_ADDRESS_FOR_RRI_HIGH, \ + val)) + +/*Macro to increment CE packet errors*/ +#define OL_ATH_CE_PKT_ERROR_COUNT_INCR(_scn, _ce_ecode) \ + do { if (_ce_ecode == CE_RING_DELTA_FAIL) \ + (_scn->pkt_stats.ce_ring_delta_fail_count) \ + += 1; } while (0) + +/* Given a Copy Engine's ID, determine the interrupt number for that + * copy engine's interrupts. + */ +#define CE_ID_TO_INUM(id) (A_INUM_CE0_COPY_COMP_BASE + (id)) +#define CE_INUM_TO_ID(inum) ((inum) - A_INUM_CE0_COPY_COMP_BASE) +#define CE0_BASE_ADDRESS (scn->target_ce_def->d_CE0_BASE_ADDRESS) +#define CE1_BASE_ADDRESS (scn->target_ce_def->d_CE1_BASE_ADDRESS) + + +#ifdef ADRASTEA_SHADOW_REGISTERS +#define NUM_SHADOW_REGISTERS 24 +u32 shadow_sr_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr); +u32 shadow_dst_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr); +#endif + + +#ifdef ADRASTEA_SHADOW_REGISTERS +#define CE_SRC_RING_WRITE_IDX_SET(scn, CE_ctrl_addr, n) \ + A_TARGET_WRITE(scn, shadow_sr_wr_ind_addr(scn, CE_ctrl_addr), n) +#define CE_DEST_RING_WRITE_IDX_SET(scn, CE_ctrl_addr, n) \ + A_TARGET_WRITE(scn, shadow_dst_wr_ind_addr(scn, CE_ctrl_addr), n) + +#else + +#define CE_SRC_RING_WRITE_IDX_SET(scn, CE_ctrl_addr, n) \ + A_TARGET_WRITE(scn, (CE_ctrl_addr) + SR_WR_INDEX_ADDRESS, (n)) +#define CE_DEST_RING_WRITE_IDX_SET(scn, CE_ctrl_addr, n) \ + A_TARGET_WRITE(scn, (CE_ctrl_addr) + DST_WR_INDEX_ADDRESS, (n)) +#endif + +/* The write index read is only needed durring initialization because + * we keep track of the index that was last written. Thus the register + * is the only hardware supported location to read the initial value from. + */ +#define CE_SRC_RING_WRITE_IDX_GET_FROM_REGISTER(scn, CE_ctrl_addr) \ + A_TARGET_READ(scn, (CE_ctrl_addr) + SR_WR_INDEX_ADDRESS) +#define CE_DEST_RING_WRITE_IDX_GET_FROM_REGISTER(scn, CE_ctrl_addr) \ + A_TARGET_READ(scn, (CE_ctrl_addr) + DST_WR_INDEX_ADDRESS) + +#endif /* __CE_REG_H__ */ diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/ce/ce_service.c b/drivers/staging/qca-wifi-host-cmn/hif/src/ce/ce_service.c new file mode 100644 index 0000000000000000000000000000000000000000..0fee1bf0faf1447c356b18189ff1833805867eac --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/ce/ce_service.c @@ -0,0 +1,3065 @@ +/* + * Copyright (c) 2013-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "hif.h" +#include "hif_io32.h" +#include "ce_api.h" +#include "ce_main.h" +#include "ce_internal.h" +#include "ce_reg.h" +#include "qdf_lock.h" +#include "regtable.h" +#include "hif_main.h" +#include "hif_debug.h" +#include "hif_napi.h" +#include "qdf_module.h" + +#ifdef IPA_OFFLOAD +#ifdef QCA_WIFI_3_0 +#define CE_IPA_RING_INIT(ce_desc) \ + do { \ + ce_desc->gather = 0; \ + ce_desc->enable_11h = 0; \ + ce_desc->meta_data_low = 0; \ + ce_desc->packet_result_offset = 64; \ + ce_desc->toeplitz_hash_enable = 0; \ + ce_desc->addr_y_search_disable = 0; \ + ce_desc->addr_x_search_disable = 0; \ + ce_desc->misc_int_disable = 0; \ + ce_desc->target_int_disable = 0; \ + ce_desc->host_int_disable = 0; \ + ce_desc->dest_byte_swap = 0; \ + ce_desc->byte_swap = 0; \ + ce_desc->type = 2; \ + ce_desc->tx_classify = 1; \ + ce_desc->buffer_addr_hi = 0; \ + ce_desc->meta_data = 0; \ + ce_desc->nbytes = 128; \ + } while (0) +#else +#define CE_IPA_RING_INIT(ce_desc) \ + do { \ + ce_desc->byte_swap = 0; \ + ce_desc->nbytes = 60; \ + ce_desc->gather = 0; \ + } while (0) +#endif /* QCA_WIFI_3_0 */ +#endif /* IPA_OFFLOAD */ + +#ifndef DATA_CE_SW_INDEX_NO_INLINE_UPDATE +#define DATA_CE_UPDATE_SWINDEX(x, scn, addr) \ + do { \ + x = CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, addr); \ + } while (0); +#else +#define DATA_CE_UPDATE_SWINDEX(x, scn, addr) +#endif + +static int war1_allow_sleep; +/* io32 write workaround */ +static int hif_ce_war1; + +/** + * hif_ce_war_disable() - disable ce war gobally + */ +void hif_ce_war_disable(void) +{ + hif_ce_war1 = 0; +} + +/** + * hif_ce_war_enable() - enable ce war gobally + */ +void hif_ce_war_enable(void) +{ + hif_ce_war1 = 1; +} + +/* + * Note: For MCL, #if defined (HIF_CONFIG_SLUB_DEBUG_ON) needs to be checked + * for defined here + */ +#if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) + +#define CE_DEBUG_PRINT_BUF_SIZE(x) (((x) * 3) - 1) +#define CE_DEBUG_DATA_PER_ROW 16 + +qdf_mutex_t ce_dbg_datamem_lock[CE_COUNT_MAX]; +static const char *ce_event_type_to_str(enum hif_ce_event_type type); + +/** + * get_next_record_index() - get the next record index + * @table_index: atomic index variable to increment + * @array_size: array size of the circular buffer + * + * Increment the atomic index and reserve the value. + * Takes care of buffer wrap. + * Guaranteed to be thread safe as long as fewer than array_size contexts + * try to access the array. If there are more than array_size contexts + * trying to access the array, full locking of the recording process would + * be needed to have sane logging. + */ +static int get_next_record_index(qdf_atomic_t *table_index, int array_size) +{ + int record_index = qdf_atomic_inc_return(table_index); + + if (record_index == array_size) + qdf_atomic_sub(array_size, table_index); + + while (record_index >= array_size) + record_index -= array_size; + return record_index; +} + +#if HIF_CE_DEBUG_DATA_BUF +/** + * hif_ce_desc_data_record() - Record data pointed by the CE descriptor + * @event: structure detailing a ce event + * @len: length of the data + * Return: + */ +static void hif_ce_desc_data_record(struct hif_ce_desc_event *event, int len) +{ + uint8_t *data = NULL; + + if (!event->data) + return; + + if (event->memory && len > 0) + data = qdf_nbuf_data((qdf_nbuf_t)event->memory); + + event->actual_data_len = 0; + qdf_mem_zero(event->data, CE_DEBUG_MAX_DATA_BUF_SIZE); + + if (data && len > 0) { + qdf_mem_copy(event->data, data, + ((len < CE_DEBUG_MAX_DATA_BUF_SIZE) ? + len : CE_DEBUG_MAX_DATA_BUF_SIZE)); + event->actual_data_len = len; + } +} +#endif + +/** + * hif_record_ce_desc_event() - record ce descriptor events + * @scn: hif_softc + * @ce_id: which ce is the event occurring on + * @type: what happened + * @descriptor: pointer to the descriptor posted/completed + * @memory: virtual address of buffer related to the descriptor + * @index: index that the descriptor was/will be at. + */ +void hif_record_ce_desc_event(struct hif_softc *scn, int ce_id, + enum hif_ce_event_type type, + union ce_desc *descriptor, + void *memory, int index, + int len) +{ + int record_index; + struct hif_ce_desc_event *event; + + struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist; + struct hif_ce_desc_event *hist_ev = NULL; + + if (ce_id < CE_COUNT_MAX) + hist_ev = (struct hif_ce_desc_event *)ce_hist->hist_ev[ce_id]; + else + return; + + if (ce_id >= CE_COUNT_MAX) + return; + + if (!ce_hist->enable[ce_id]) + return; + + if (!hist_ev) + return; + + record_index = get_next_record_index( + &ce_hist->history_index[ce_id], HIF_CE_HISTORY_MAX); + + event = &hist_ev[record_index]; + + event->type = type; + event->time = qdf_get_log_timestamp(); + + if (descriptor != NULL) { + qdf_mem_copy(&event->descriptor, descriptor, sizeof(union ce_desc)); + } else { + qdf_mem_zero(&event->descriptor, sizeof(union ce_desc)); + } + + event->memory = memory; + event->index = index; + +#if HIF_CE_DEBUG_DATA_BUF + if (ce_hist->data_enable[ce_id]) + hif_ce_desc_data_record(event, len); +#endif +} +qdf_export_symbol(hif_record_ce_desc_event); + +/** + * ce_init_ce_desc_event_log() - initialize the ce event log + * @ce_id: copy engine id for which we are initializing the log + * @size: size of array to dedicate + * + * Currently the passed size is ignored in favor of a precompiled value. + */ +void ce_init_ce_desc_event_log(struct hif_softc *scn, int ce_id, int size) +{ + struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist; + qdf_atomic_init(&ce_hist->history_index[ce_id]); + qdf_mutex_create(&ce_dbg_datamem_lock[ce_id]); +} + +/** + * ce_deinit_ce_desc_event_log() - deinitialize the ce event log + * @ce_id: copy engine id for which we are deinitializing the log + * + */ +inline void ce_deinit_ce_desc_event_log(struct hif_softc *scn, int ce_id) +{ + qdf_mutex_destroy(&ce_dbg_datamem_lock[ce_id]); +} + +#else /* Note: For MCL, (HIF_CONFIG_SLUB_DEBUG_ON) || HIF_CE_DEBUG_DATA_BUF */ +void hif_record_ce_desc_event(struct hif_softc *scn, + int ce_id, enum hif_ce_event_type type, + union ce_desc *descriptor, void *memory, + int index, int len) +{ +} +qdf_export_symbol(hif_record_ce_desc_event); + +inline void ce_init_ce_desc_event_log(struct hif_softc *scn, int ce_id, + int size) +{ +} + +void ce_deinit_ce_desc_event_log(struct hif_softc *scn, int ce_id) +{ +} +#endif /* Note: for MCL, HIF_CONFIG_SLUB_DEBUG_ON || HIF_CE_DEBUG_DATA_BUF */ + +#ifdef NAPI_YIELD_BUDGET_BASED +bool hif_ce_service_should_yield(struct hif_softc *scn, + struct CE_state *ce_state) +{ + bool yield = hif_max_num_receives_reached(scn, ce_state->receive_count); + return yield; +} +#else +/** + * hif_ce_service_should_yield() - return true if the service is hogging the cpu + * @scn: hif context + * @ce_state: context of the copy engine being serviced + * + * Return: true if the service should yield + */ +bool hif_ce_service_should_yield(struct hif_softc *scn, + struct CE_state *ce_state) +{ + bool yield, time_limit_reached, rxpkt_thresh_reached = 0; + + time_limit_reached = + sched_clock() > ce_state->ce_service_yield_time ? 1 : 0; + + if (!time_limit_reached) + rxpkt_thresh_reached = hif_max_num_receives_reached + (scn, ce_state->receive_count); + + yield = time_limit_reached || rxpkt_thresh_reached; + + if (yield && ce_state->htt_rx_data) + hif_napi_update_yield_stats(ce_state, + time_limit_reached, + rxpkt_thresh_reached); + return yield; +} +#endif +/* + * Support for Copy Engine hardware, which is mainly used for + * communication between Host and Target over a PCIe interconnect. + */ + +/* + * A single CopyEngine (CE) comprises two "rings": + * a source ring + * a destination ring + * + * Each ring consists of a number of descriptors which specify + * an address, length, and meta-data. + * + * Typically, one side of the PCIe interconnect (Host or Target) + * controls one ring and the other side controls the other ring. + * The source side chooses when to initiate a transfer and it + * chooses what to send (buffer address, length). The destination + * side keeps a supply of "anonymous receive buffers" available and + * it handles incoming data as it arrives (when the destination + * receives an interrupt). + * + * The sender may send a simple buffer (address/length) or it may + * send a small list of buffers. When a small list is sent, hardware + * "gathers" these and they end up in a single destination buffer + * with a single interrupt. + * + * There are several "contexts" managed by this layer -- more, it + * may seem -- than should be needed. These are provided mainly for + * maximum flexibility and especially to facilitate a simpler HIF + * implementation. There are per-CopyEngine recv, send, and watermark + * contexts. These are supplied by the caller when a recv, send, + * or watermark handler is established and they are echoed back to + * the caller when the respective callbacks are invoked. There is + * also a per-transfer context supplied by the caller when a buffer + * (or sendlist) is sent and when a buffer is enqueued for recv. + * These per-transfer contexts are echoed back to the caller when + * the buffer is sent/received. + * Target TX harsh result toeplitz_hash_result + */ + +/* + * Guts of ce_send, used by both ce_send and ce_sendlist_send. + * The caller takes responsibility for any needed locking. + */ + +static +void war_ce_src_ring_write_idx_set(struct hif_softc *scn, + u32 ctrl_addr, unsigned int write_index) +{ + if (hif_ce_war1) { + void __iomem *indicator_addr; + + indicator_addr = scn->mem + ctrl_addr + DST_WATERMARK_ADDRESS; + + if (!war1_allow_sleep + && ctrl_addr == CE_BASE_ADDRESS(CDC_WAR_DATA_CE)) { + hif_write32_mb(indicator_addr, + (CDC_WAR_MAGIC_STR | write_index)); + } else { + unsigned long irq_flags; + + local_irq_save(irq_flags); + hif_write32_mb(indicator_addr, 1); + + /* + * PCIE write waits for ACK in IPQ8K, there is no + * need to read back value. + */ + (void)hif_read32_mb(indicator_addr); + (void)hif_read32_mb(indicator_addr); /* conservative */ + + CE_SRC_RING_WRITE_IDX_SET(scn, + ctrl_addr, write_index); + + hif_write32_mb(indicator_addr, 0); + local_irq_restore(irq_flags); + } + } else { + CE_SRC_RING_WRITE_IDX_SET(scn, ctrl_addr, write_index); + } +} + +#ifdef HIF_CONFIG_SLUB_DEBUG_ON +/** + * ce_validate_nbytes() - validate nbytes for slub builds on tx descriptors + * @nbytes: nbytes value being written into a send descriptor + * @ce_state: context of the copy engine + + * nbytes should be non-zero and less than max configured for the copy engine + * + * Return: none + */ +static void ce_validate_nbytes(uint32_t nbytes, struct CE_state *ce_state) +{ + if (nbytes <= 0 || nbytes > ce_state->src_sz_max) + QDF_BUG(0); +} +#else +static void ce_validate_nbytes(uint32_t nbytes, struct CE_state *ce_state) +{ +} +#endif + +static int +ce_send_nolock_legacy(struct CE_handle *copyeng, + void *per_transfer_context, + qdf_dma_addr_t buffer, + uint32_t nbytes, + uint32_t transfer_id, + uint32_t flags, + uint32_t user_flags) +{ + int status; + struct CE_state *CE_state = (struct CE_state *)copyeng; + struct CE_ring_state *src_ring = CE_state->src_ring; + uint32_t ctrl_addr = CE_state->ctrl_addr; + unsigned int nentries_mask = src_ring->nentries_mask; + unsigned int sw_index = src_ring->sw_index; + unsigned int write_index = src_ring->write_index; + uint64_t dma_addr = buffer; + struct hif_softc *scn = CE_state->scn; + + if (Q_TARGET_ACCESS_BEGIN(scn) < 0) + return QDF_STATUS_E_FAILURE; + if (unlikely(CE_RING_DELTA(nentries_mask, + write_index, sw_index - 1) <= 0)) { + OL_ATH_CE_PKT_ERROR_COUNT_INCR(scn, CE_RING_DELTA_FAIL); + Q_TARGET_ACCESS_END(scn); + return QDF_STATUS_E_FAILURE; + } + { + enum hif_ce_event_type event_type; + struct CE_src_desc *src_ring_base = + (struct CE_src_desc *)src_ring->base_addr_owner_space; + struct CE_src_desc *shadow_base = + (struct CE_src_desc *)src_ring->shadow_base; + struct CE_src_desc *src_desc = + CE_SRC_RING_TO_DESC(src_ring_base, write_index); + struct CE_src_desc *shadow_src_desc = + CE_SRC_RING_TO_DESC(shadow_base, write_index); + + /* Update low 32 bits source descriptor address */ + shadow_src_desc->buffer_addr = + (uint32_t)(dma_addr & 0xFFFFFFFF); +#ifdef QCA_WIFI_3_0 + shadow_src_desc->buffer_addr_hi = + (uint32_t)((dma_addr >> 32) & 0x1F); + user_flags |= shadow_src_desc->buffer_addr_hi; + memcpy(&(((uint32_t *)shadow_src_desc)[1]), &user_flags, + sizeof(uint32_t)); +#endif + shadow_src_desc->target_int_disable = 0; + shadow_src_desc->host_int_disable = 0; + + shadow_src_desc->meta_data = transfer_id; + + /* + * Set the swap bit if: + * typical sends on this CE are swapped (host is big-endian) + * and this send doesn't disable the swapping + * (data is not bytestream) + */ + shadow_src_desc->byte_swap = + (((CE_state->attr_flags & CE_ATTR_BYTE_SWAP_DATA) + != 0) & ((flags & CE_SEND_FLAG_SWAP_DISABLE) == 0)); + shadow_src_desc->gather = ((flags & CE_SEND_FLAG_GATHER) != 0); + shadow_src_desc->nbytes = nbytes; + ce_validate_nbytes(nbytes, CE_state); + + *src_desc = *shadow_src_desc; + + src_ring->per_transfer_context[write_index] = + per_transfer_context; + + /* Update Source Ring Write Index */ + write_index = CE_RING_IDX_INCR(nentries_mask, write_index); + + /* WORKAROUND */ + if (shadow_src_desc->gather) { + event_type = HIF_TX_GATHER_DESC_POST; + } else if (qdf_unlikely(CE_state->state != CE_RUNNING)) { + event_type = HIF_TX_DESC_SOFTWARE_POST; + CE_state->state = CE_PENDING; + } else { + event_type = HIF_TX_DESC_POST; + war_ce_src_ring_write_idx_set(scn, ctrl_addr, + write_index); + } + + /* src_ring->write index hasn't been updated event though + * the register has allready been written to. + */ + hif_record_ce_desc_event(scn, CE_state->id, event_type, + (union ce_desc *) shadow_src_desc, per_transfer_context, + src_ring->write_index, nbytes); + + src_ring->write_index = write_index; + status = QDF_STATUS_SUCCESS; + } + Q_TARGET_ACCESS_END(scn); + return status; +} + +int +ce_send(struct CE_handle *copyeng, + void *per_transfer_context, + qdf_dma_addr_t buffer, + uint32_t nbytes, + uint32_t transfer_id, + uint32_t flags, + uint32_t user_flag) +{ + struct CE_state *CE_state = (struct CE_state *)copyeng; + int status; + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn); + + qdf_spin_lock_bh(&CE_state->ce_index_lock); + status = hif_state->ce_services->ce_send_nolock(copyeng, + per_transfer_context, buffer, nbytes, + transfer_id, flags, user_flag); + qdf_spin_unlock_bh(&CE_state->ce_index_lock); + + return status; +} + +unsigned int ce_sendlist_sizeof(void) +{ + return sizeof(struct ce_sendlist); +} + +void ce_sendlist_init(struct ce_sendlist *sendlist) +{ + struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist; + + sl->num_items = 0; +} + +int +ce_sendlist_buf_add(struct ce_sendlist *sendlist, + qdf_dma_addr_t buffer, + uint32_t nbytes, + uint32_t flags, + uint32_t user_flags) +{ + struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist; + unsigned int num_items = sl->num_items; + struct ce_sendlist_item *item; + + if (num_items >= CE_SENDLIST_ITEMS_MAX) { + QDF_ASSERT(num_items < CE_SENDLIST_ITEMS_MAX); + return QDF_STATUS_E_RESOURCES; + } + + item = &sl->item[num_items]; + item->send_type = CE_SIMPLE_BUFFER_TYPE; + item->data = buffer; + item->u.nbytes = nbytes; + item->flags = flags; + item->user_flags = user_flags; + sl->num_items = num_items + 1; + return QDF_STATUS_SUCCESS; +} + +int +ce_sendlist_send(struct CE_handle *copyeng, + void *per_transfer_context, + struct ce_sendlist *sendlist, unsigned int transfer_id) +{ + struct CE_state *CE_state = (struct CE_state *)copyeng; + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn); + + return hif_state->ce_services->ce_sendlist_send(copyeng, + per_transfer_context, sendlist, transfer_id); +} + +static int +ce_sendlist_send_legacy(struct CE_handle *copyeng, + void *per_transfer_context, + struct ce_sendlist *sendlist, unsigned int transfer_id) +{ + int status = -ENOMEM; + struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist; + struct CE_state *CE_state = (struct CE_state *)copyeng; + struct CE_ring_state *src_ring = CE_state->src_ring; + unsigned int nentries_mask = src_ring->nentries_mask; + unsigned int num_items = sl->num_items; + unsigned int sw_index; + unsigned int write_index; + struct hif_softc *scn = CE_state->scn; + + QDF_ASSERT((num_items > 0) && (num_items < src_ring->nentries)); + + qdf_spin_lock_bh(&CE_state->ce_index_lock); + + if (CE_state->scn->fastpath_mode_on && CE_state->htt_tx_data && + Q_TARGET_ACCESS_BEGIN(scn) == 0) { + src_ring->sw_index = CE_SRC_RING_READ_IDX_GET_FROM_DDR( + scn, CE_state->ctrl_addr); + Q_TARGET_ACCESS_END(scn); + } + + sw_index = src_ring->sw_index; + write_index = src_ring->write_index; + + if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) >= + num_items) { + struct ce_sendlist_item *item; + int i; + + /* handle all but the last item uniformly */ + for (i = 0; i < num_items - 1; i++) { + item = &sl->item[i]; + /* TBDXXX: Support extensible sendlist_types? */ + QDF_ASSERT(item->send_type == CE_SIMPLE_BUFFER_TYPE); + status = ce_send_nolock_legacy(copyeng, + CE_SENDLIST_ITEM_CTXT, + (qdf_dma_addr_t) item->data, + item->u.nbytes, transfer_id, + item->flags | CE_SEND_FLAG_GATHER, + item->user_flags); + QDF_ASSERT(status == QDF_STATUS_SUCCESS); + } + /* provide valid context pointer for final item */ + item = &sl->item[i]; + /* TBDXXX: Support extensible sendlist_types? */ + QDF_ASSERT(item->send_type == CE_SIMPLE_BUFFER_TYPE); + status = ce_send_nolock_legacy(copyeng, per_transfer_context, + (qdf_dma_addr_t) item->data, + item->u.nbytes, + transfer_id, item->flags, + item->user_flags); + QDF_ASSERT(status == QDF_STATUS_SUCCESS); + QDF_NBUF_UPDATE_TX_PKT_COUNT((qdf_nbuf_t)per_transfer_context, + QDF_NBUF_TX_PKT_CE); + DPTRACE(qdf_dp_trace((qdf_nbuf_t)per_transfer_context, + QDF_DP_TRACE_CE_PACKET_PTR_RECORD, + QDF_TRACE_DEFAULT_PDEV_ID, + (uint8_t *)&(((qdf_nbuf_t)per_transfer_context)->data), + sizeof(((qdf_nbuf_t)per_transfer_context)->data), + QDF_TX)); + } else { + /* + * Probably not worth the additional complexity to support + * partial sends with continuation or notification. We expect + * to use large rings and small sendlists. If we can't handle + * the entire request at once, punt it back to the caller. + */ + } + qdf_spin_unlock_bh(&CE_state->ce_index_lock); + + return status; +} + +#ifdef WLAN_FEATURE_FASTPATH +#ifdef QCA_WIFI_3_0 +static inline void +ce_buffer_addr_hi_set(struct CE_src_desc *shadow_src_desc, + uint64_t dma_addr, + uint32_t user_flags) +{ + shadow_src_desc->buffer_addr_hi = + (uint32_t)((dma_addr >> 32) & 0x1F); + user_flags |= shadow_src_desc->buffer_addr_hi; + memcpy(&(((uint32_t *)shadow_src_desc)[1]), &user_flags, + sizeof(uint32_t)); +} +#else +static inline void +ce_buffer_addr_hi_set(struct CE_src_desc *shadow_src_desc, + uint64_t dma_addr, + uint32_t user_flags) +{ +} +#endif + +#define SLOTS_PER_DATAPATH_TX 2 + +/** + * ce_send_fast() CE layer Tx buffer posting function + * @copyeng: copy engine handle + * @msdu: msdu to be sent + * @transfer_id: transfer_id + * @download_len: packet download length + * + * Assumption : Called with an array of MSDU's + * Function: + * For each msdu in the array + * 1. Check no. of available entries + * 2. Create src ring entries (allocated in consistent memory + * 3. Write index to h/w + * + * Return: No. of packets that could be sent + */ +int ce_send_fast(struct CE_handle *copyeng, qdf_nbuf_t msdu, + unsigned int transfer_id, uint32_t download_len) +{ + struct CE_state *ce_state = (struct CE_state *)copyeng; + struct hif_softc *scn = ce_state->scn; + struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn); + struct CE_ring_state *src_ring = ce_state->src_ring; + u_int32_t ctrl_addr = ce_state->ctrl_addr; + unsigned int nentries_mask = src_ring->nentries_mask; + unsigned int write_index; + unsigned int sw_index; + unsigned int frag_len; + uint64_t dma_addr; + uint32_t user_flags; + enum hif_ce_event_type type = FAST_TX_SOFTWARE_INDEX_UPDATE; + bool ok_to_send = true; + + /* + * Create a log assuming the call will go through, and if not, we would + * add an error trace as well. + * Please add the same failure log for any additional error paths. + */ + DPTRACE(qdf_dp_trace(msdu, + QDF_DP_TRACE_CE_FAST_PACKET_PTR_RECORD, + QDF_TRACE_DEFAULT_PDEV_ID, + qdf_nbuf_data_addr(msdu), + sizeof(qdf_nbuf_data(msdu)), QDF_TX)); + + qdf_spin_lock_bh(&ce_state->ce_index_lock); + + /* + * Request runtime PM resume if it has already suspended and make + * sure there is no PCIe link access. + */ + if (hif_pm_runtime_get(hif_hdl) != 0) + ok_to_send = false; + + if (ok_to_send) { + Q_TARGET_ACCESS_BEGIN(scn); + DATA_CE_UPDATE_SWINDEX(src_ring->sw_index, scn, ctrl_addr); + } + + write_index = src_ring->write_index; + sw_index = src_ring->sw_index; + hif_record_ce_desc_event(scn, ce_state->id, + FAST_TX_SOFTWARE_INDEX_UPDATE, + NULL, NULL, sw_index, 0); + + if (qdf_unlikely(CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) + < SLOTS_PER_DATAPATH_TX)) { + hif_err_rl("Source ring full, required %d, available %d", + SLOTS_PER_DATAPATH_TX, + CE_RING_DELTA(nentries_mask, write_index, + sw_index - 1)); + OL_ATH_CE_PKT_ERROR_COUNT_INCR(scn, CE_RING_DELTA_FAIL); + if (ok_to_send) + Q_TARGET_ACCESS_END(scn); + qdf_spin_unlock_bh(&ce_state->ce_index_lock); + + DPTRACE(qdf_dp_trace(NULL, + QDF_DP_TRACE_CE_FAST_PACKET_ERR_RECORD, + QDF_TRACE_DEFAULT_PDEV_ID, + NULL, 0, QDF_TX)); + + return 0; + } + + { + struct CE_src_desc *src_ring_base = + (struct CE_src_desc *)src_ring->base_addr_owner_space; + struct CE_src_desc *shadow_base = + (struct CE_src_desc *)src_ring->shadow_base; + struct CE_src_desc *src_desc = + CE_SRC_RING_TO_DESC(src_ring_base, write_index); + struct CE_src_desc *shadow_src_desc = + CE_SRC_RING_TO_DESC(shadow_base, write_index); + + hif_pm_runtime_get_noresume(hif_hdl); + + /* + * First fill out the ring descriptor for the HTC HTT frame + * header. These are uncached writes. Should we use a local + * structure instead? + */ + /* HTT/HTC header can be passed as a argument */ + dma_addr = qdf_nbuf_get_frag_paddr(msdu, 0); + shadow_src_desc->buffer_addr = (uint32_t)(dma_addr & + 0xFFFFFFFF); + user_flags = qdf_nbuf_data_attr_get(msdu) & DESC_DATA_FLAG_MASK; + ce_buffer_addr_hi_set(shadow_src_desc, dma_addr, user_flags); + shadow_src_desc->meta_data = transfer_id; + shadow_src_desc->nbytes = qdf_nbuf_get_frag_len(msdu, 0); + ce_validate_nbytes(shadow_src_desc->nbytes, ce_state); + download_len -= shadow_src_desc->nbytes; + /* + * HTC HTT header is a word stream, so byte swap if CE byte + * swap enabled + */ + shadow_src_desc->byte_swap = ((ce_state->attr_flags & + CE_ATTR_BYTE_SWAP_DATA) != 0); + /* For the first one, it still does not need to write */ + shadow_src_desc->gather = 1; + *src_desc = *shadow_src_desc; + /* By default we could initialize the transfer context to this + * value + */ + src_ring->per_transfer_context[write_index] = + CE_SENDLIST_ITEM_CTXT; + write_index = CE_RING_IDX_INCR(nentries_mask, write_index); + + src_desc = CE_SRC_RING_TO_DESC(src_ring_base, write_index); + shadow_src_desc = CE_SRC_RING_TO_DESC(shadow_base, write_index); + /* + * Now fill out the ring descriptor for the actual data + * packet + */ + dma_addr = qdf_nbuf_get_frag_paddr(msdu, 1); + shadow_src_desc->buffer_addr = (uint32_t)(dma_addr & + 0xFFFFFFFF); + /* + * Clear packet offset for all but the first CE desc. + */ + user_flags &= ~QDF_CE_TX_PKT_OFFSET_BIT_M; + ce_buffer_addr_hi_set(shadow_src_desc, dma_addr, user_flags); + shadow_src_desc->meta_data = transfer_id; + + /* get actual packet length */ + frag_len = qdf_nbuf_get_frag_len(msdu, 1); + + /* download remaining bytes of payload */ + shadow_src_desc->nbytes = download_len; + ce_validate_nbytes(shadow_src_desc->nbytes, ce_state); + if (shadow_src_desc->nbytes > frag_len) + shadow_src_desc->nbytes = frag_len; + + /* Data packet is a byte stream, so disable byte swap */ + shadow_src_desc->byte_swap = 0; + /* For the last one, gather is not set */ + shadow_src_desc->gather = 0; + *src_desc = *shadow_src_desc; + src_ring->per_transfer_context[write_index] = msdu; + + hif_record_ce_desc_event(scn, ce_state->id, type, + (union ce_desc *)src_desc, + src_ring->per_transfer_context[write_index], + write_index, shadow_src_desc->nbytes); + + write_index = CE_RING_IDX_INCR(nentries_mask, write_index); + + DPTRACE(qdf_dp_trace(msdu, + QDF_DP_TRACE_CE_FAST_PACKET_PTR_RECORD, + QDF_TRACE_DEFAULT_PDEV_ID, qdf_nbuf_data_addr(msdu), + sizeof(qdf_nbuf_data(msdu)), QDF_TX)); + } + + src_ring->write_index = write_index; + + if (ok_to_send) { + if (qdf_likely(ce_state->state == CE_RUNNING)) { + type = FAST_TX_WRITE_INDEX_UPDATE; + war_ce_src_ring_write_idx_set(scn, ctrl_addr, + write_index); + Q_TARGET_ACCESS_END(scn); + } else + ce_state->state = CE_PENDING; + hif_pm_runtime_put(hif_hdl); + } + + qdf_spin_unlock_bh(&ce_state->ce_index_lock); + + /* sent 1 packet */ + return 1; +} + +/** + * ce_is_fastpath_enabled() - returns true if fastpath mode is enabled + * @scn: Handle to HIF context + * + * Return: true if fastpath is enabled else false. + */ +static bool ce_is_fastpath_enabled(struct hif_softc *scn) +{ + return scn->fastpath_mode_on; +} + +/** + * ce_is_fastpath_handler_registered() - return true for datapath CEs and if + * fastpath is enabled. + * @ce_state: handle to copy engine + * + * Return: true if fastpath handler is registered for datapath CE. + */ +static bool ce_is_fastpath_handler_registered(struct CE_state *ce_state) +{ + if (ce_state->fastpath_handler) + return true; + else + return false; +} + + +#else +static inline bool ce_is_fastpath_enabled(struct hif_softc *scn) +{ + return false; +} + +static inline bool ce_is_fastpath_handler_registered(struct CE_state *ce_state) +{ + return false; +} +#endif /* WLAN_FEATURE_FASTPATH */ + +#ifndef AH_NEED_TX_DATA_SWAP +#define AH_NEED_TX_DATA_SWAP 0 +#endif + +/** + * ce_batch_send() - sends bunch of msdus at once + * @ce_tx_hdl : pointer to CE handle + * @msdu : list of msdus to be sent + * @transfer_id : transfer id + * @len : Downloaded length + * @sendhead : sendhead + * + * Assumption : Called with an array of MSDU's + * Function: + * For each msdu in the array + * 1. Send each msdu + * 2. Increment write index accordinlgy. + * + * Return: list of msds not sent + */ +qdf_nbuf_t ce_batch_send(struct CE_handle *ce_tx_hdl, qdf_nbuf_t msdu, + uint32_t transfer_id, u_int32_t len, uint32_t sendhead) +{ + struct CE_state *ce_state = (struct CE_state *)ce_tx_hdl; + struct hif_softc *scn = ce_state->scn; + struct CE_ring_state *src_ring = ce_state->src_ring; + u_int32_t ctrl_addr = ce_state->ctrl_addr; + /* A_target_id_t targid = TARGID(scn);*/ + + uint32_t nentries_mask = src_ring->nentries_mask; + uint32_t sw_index, write_index; + + struct CE_src_desc *src_desc_base = + (struct CE_src_desc *)src_ring->base_addr_owner_space; + uint32_t *src_desc; + + struct CE_src_desc lsrc_desc = {0}; + int deltacount = 0; + qdf_nbuf_t freelist = NULL, hfreelist = NULL, tempnext; + + DATA_CE_UPDATE_SWINDEX(src_ring->sw_index, scn, ctrl_addr); + sw_index = src_ring->sw_index; + write_index = src_ring->write_index; + + deltacount = CE_RING_DELTA(nentries_mask, write_index, sw_index-1); + + while (msdu) { + tempnext = qdf_nbuf_next(msdu); + + if (deltacount < 2) { + if (sendhead) + return msdu; + HIF_ERROR("%s: Out of descriptors", __func__); + src_ring->write_index = write_index; + war_ce_src_ring_write_idx_set(scn, ctrl_addr, + write_index); + + sw_index = src_ring->sw_index; + write_index = src_ring->write_index; + + deltacount = CE_RING_DELTA(nentries_mask, write_index, + sw_index-1); + if (freelist == NULL) { + freelist = msdu; + hfreelist = msdu; + } else { + qdf_nbuf_set_next(freelist, msdu); + freelist = msdu; + } + qdf_nbuf_set_next(msdu, NULL); + msdu = tempnext; + continue; + } + + src_desc = (uint32_t *)CE_SRC_RING_TO_DESC(src_desc_base, + write_index); + + src_desc[0] = qdf_nbuf_get_frag_paddr(msdu, 0); + + lsrc_desc.meta_data = transfer_id; + if (len > msdu->len) + len = msdu->len; + lsrc_desc.nbytes = len; + /* Data packet is a byte stream, so disable byte swap */ + lsrc_desc.byte_swap = AH_NEED_TX_DATA_SWAP; + lsrc_desc.gather = 0; /*For the last one, gather is not set*/ + + src_desc[1] = ((uint32_t *)&lsrc_desc)[1]; + + + src_ring->per_transfer_context[write_index] = msdu; + write_index = CE_RING_IDX_INCR(nentries_mask, write_index); + + if (sendhead) + break; + qdf_nbuf_set_next(msdu, NULL); + msdu = tempnext; + + } + + + src_ring->write_index = write_index; + war_ce_src_ring_write_idx_set(scn, ctrl_addr, write_index); + + return hfreelist; +} + +/** + * ce_update_tx_ring() - Advance sw index. + * @ce_tx_hdl : pointer to CE handle + * @num_htt_cmpls : htt completions received. + * + * Function: + * Increment the value of sw index of src ring + * according to number of htt completions + * received. + * + * Return: void + */ +#ifdef DATA_CE_SW_INDEX_NO_INLINE_UPDATE +void ce_update_tx_ring(struct CE_handle *ce_tx_hdl, uint32_t num_htt_cmpls) +{ + struct CE_state *ce_state = (struct CE_state *)ce_tx_hdl; + struct CE_ring_state *src_ring = ce_state->src_ring; + uint32_t nentries_mask = src_ring->nentries_mask; + /* + * Advance the s/w index: + * This effectively simulates completing the CE ring descriptors + */ + src_ring->sw_index = + CE_RING_IDX_ADD(nentries_mask, src_ring->sw_index, + num_htt_cmpls); +} +#else +void ce_update_tx_ring(struct CE_handle *ce_tx_hdl, uint32_t num_htt_cmpls) +{} +#endif + +/** + * ce_send_single() - sends + * @ce_tx_hdl : pointer to CE handle + * @msdu : msdu to be sent + * @transfer_id : transfer id + * @len : Downloaded length + * + * Function: + * 1. Send one msdu + * 2. Increment write index of src ring accordinlgy. + * + * Return: int: CE sent status + */ +int ce_send_single(struct CE_handle *ce_tx_hdl, qdf_nbuf_t msdu, + uint32_t transfer_id, u_int32_t len) +{ + struct CE_state *ce_state = (struct CE_state *)ce_tx_hdl; + struct hif_softc *scn = ce_state->scn; + struct CE_ring_state *src_ring = ce_state->src_ring; + uint32_t ctrl_addr = ce_state->ctrl_addr; + /*A_target_id_t targid = TARGID(scn);*/ + + uint32_t nentries_mask = src_ring->nentries_mask; + uint32_t sw_index, write_index; + + struct CE_src_desc *src_desc_base = + (struct CE_src_desc *)src_ring->base_addr_owner_space; + uint32_t *src_desc; + + struct CE_src_desc lsrc_desc = {0}; + enum hif_ce_event_type event_type; + + DATA_CE_UPDATE_SWINDEX(src_ring->sw_index, scn, ctrl_addr); + sw_index = src_ring->sw_index; + write_index = src_ring->write_index; + + if (qdf_unlikely(CE_RING_DELTA(nentries_mask, write_index, + sw_index-1) < 1)) { + /* ol_tx_stats_inc_ring_error(sc->scn->pdev_txrx_handle, 1); */ + HIF_ERROR("%s: ce send fail %d %d %d", __func__, nentries_mask, + write_index, sw_index); + return 1; + } + + src_desc = (uint32_t *)CE_SRC_RING_TO_DESC(src_desc_base, write_index); + + src_desc[0] = qdf_nbuf_get_frag_paddr(msdu, 0); + + lsrc_desc.meta_data = transfer_id; + lsrc_desc.nbytes = len; + /* Data packet is a byte stream, so disable byte swap */ + lsrc_desc.byte_swap = AH_NEED_TX_DATA_SWAP; + lsrc_desc.gather = 0; /* For the last one, gather is not set */ + + src_desc[1] = ((uint32_t *)&lsrc_desc)[1]; + + + src_ring->per_transfer_context[write_index] = msdu; + + if (((struct CE_src_desc *)src_desc)->gather) + event_type = HIF_TX_GATHER_DESC_POST; + else if (qdf_unlikely(ce_state->state != CE_RUNNING)) + event_type = HIF_TX_DESC_SOFTWARE_POST; + else + event_type = HIF_TX_DESC_POST; + + hif_record_ce_desc_event(scn, ce_state->id, event_type, + (union ce_desc *)src_desc, msdu, + write_index, len); + + write_index = CE_RING_IDX_INCR(nentries_mask, write_index); + + src_ring->write_index = write_index; + + war_ce_src_ring_write_idx_set(scn, ctrl_addr, write_index); + + return QDF_STATUS_SUCCESS; +} + +/** + * ce_recv_buf_enqueue() - enqueue a recv buffer into a copy engine + * @coyeng: copy engine handle + * @per_recv_context: virtual address of the nbuf + * @buffer: physical address of the nbuf + * + * Return: 0 if the buffer is enqueued + */ +int +ce_recv_buf_enqueue(struct CE_handle *copyeng, + void *per_recv_context, qdf_dma_addr_t buffer) +{ + struct CE_state *CE_state = (struct CE_state *)copyeng; + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn); + + return hif_state->ce_services->ce_recv_buf_enqueue(copyeng, + per_recv_context, buffer); +} + +/** + * ce_recv_buf_enqueue_legacy() - enqueue a recv buffer into a copy engine + * @coyeng: copy engine handle + * @per_recv_context: virtual address of the nbuf + * @buffer: physical address of the nbuf + * + * Return: 0 if the buffer is enqueued + */ +static int +ce_recv_buf_enqueue_legacy(struct CE_handle *copyeng, + void *per_recv_context, qdf_dma_addr_t buffer) +{ + int status; + struct CE_state *CE_state = (struct CE_state *)copyeng; + struct CE_ring_state *dest_ring = CE_state->dest_ring; + uint32_t ctrl_addr = CE_state->ctrl_addr; + unsigned int nentries_mask = dest_ring->nentries_mask; + unsigned int write_index; + unsigned int sw_index; + uint64_t dma_addr = buffer; + struct hif_softc *scn = CE_state->scn; + + qdf_spin_lock_bh(&CE_state->ce_index_lock); + write_index = dest_ring->write_index; + sw_index = dest_ring->sw_index; + + if (Q_TARGET_ACCESS_BEGIN(scn) < 0) { + qdf_spin_unlock_bh(&CE_state->ce_index_lock); + return -EIO; + } + + if ((CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) > 0) || + (ce_is_fastpath_enabled(scn) && CE_state->htt_rx_data)) { + struct CE_dest_desc *dest_ring_base = + (struct CE_dest_desc *)dest_ring->base_addr_owner_space; + struct CE_dest_desc *dest_desc = + CE_DEST_RING_TO_DESC(dest_ring_base, write_index); + + /* Update low 32 bit destination descriptor */ + dest_desc->buffer_addr = (uint32_t)(dma_addr & 0xFFFFFFFF); +#ifdef QCA_WIFI_3_0 + dest_desc->buffer_addr_hi = + (uint32_t)((dma_addr >> 32) & 0x1F); +#endif + dest_desc->nbytes = 0; + + dest_ring->per_transfer_context[write_index] = + per_recv_context; + + hif_record_ce_desc_event(scn, CE_state->id, HIF_RX_DESC_POST, + (union ce_desc *) dest_desc, per_recv_context, + write_index, 0); + + /* Update Destination Ring Write Index */ + write_index = CE_RING_IDX_INCR(nentries_mask, write_index); + if (write_index != sw_index) { + CE_DEST_RING_WRITE_IDX_SET(scn, ctrl_addr, write_index); + dest_ring->write_index = write_index; + } + status = QDF_STATUS_SUCCESS; + } else + status = QDF_STATUS_E_FAILURE; + + Q_TARGET_ACCESS_END(scn); + qdf_spin_unlock_bh(&CE_state->ce_index_lock); + return status; +} + +void +ce_send_watermarks_set(struct CE_handle *copyeng, + unsigned int low_alert_nentries, + unsigned int high_alert_nentries) +{ + struct CE_state *CE_state = (struct CE_state *)copyeng; + uint32_t ctrl_addr = CE_state->ctrl_addr; + struct hif_softc *scn = CE_state->scn; + + CE_SRC_RING_LOWMARK_SET(scn, ctrl_addr, low_alert_nentries); + CE_SRC_RING_HIGHMARK_SET(scn, ctrl_addr, high_alert_nentries); +} + +void +ce_recv_watermarks_set(struct CE_handle *copyeng, + unsigned int low_alert_nentries, + unsigned int high_alert_nentries) +{ + struct CE_state *CE_state = (struct CE_state *)copyeng; + uint32_t ctrl_addr = CE_state->ctrl_addr; + struct hif_softc *scn = CE_state->scn; + + CE_DEST_RING_LOWMARK_SET(scn, ctrl_addr, + low_alert_nentries); + CE_DEST_RING_HIGHMARK_SET(scn, ctrl_addr, + high_alert_nentries); +} + +unsigned int ce_send_entries_avail(struct CE_handle *copyeng) +{ + struct CE_state *CE_state = (struct CE_state *)copyeng; + struct CE_ring_state *src_ring = CE_state->src_ring; + unsigned int nentries_mask = src_ring->nentries_mask; + unsigned int sw_index; + unsigned int write_index; + + qdf_spin_lock(&CE_state->ce_index_lock); + sw_index = src_ring->sw_index; + write_index = src_ring->write_index; + qdf_spin_unlock(&CE_state->ce_index_lock); + + return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1); +} + +unsigned int ce_recv_entries_avail(struct CE_handle *copyeng) +{ + struct CE_state *CE_state = (struct CE_state *)copyeng; + struct CE_ring_state *dest_ring = CE_state->dest_ring; + unsigned int nentries_mask = dest_ring->nentries_mask; + unsigned int sw_index; + unsigned int write_index; + + qdf_spin_lock(&CE_state->ce_index_lock); + sw_index = dest_ring->sw_index; + write_index = dest_ring->write_index; + qdf_spin_unlock(&CE_state->ce_index_lock); + + return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1); +} + +/* + * Guts of ce_send_entries_done. + * The caller takes responsibility for any necessary locking. + */ +static unsigned int +ce_send_entries_done_nolock_legacy(struct hif_softc *scn, + struct CE_state *CE_state) +{ + struct CE_ring_state *src_ring = CE_state->src_ring; + uint32_t ctrl_addr = CE_state->ctrl_addr; + unsigned int nentries_mask = src_ring->nentries_mask; + unsigned int sw_index; + unsigned int read_index; + + sw_index = src_ring->sw_index; + read_index = CE_SRC_RING_READ_IDX_GET(scn, ctrl_addr); + + return CE_RING_DELTA(nentries_mask, sw_index, read_index); +} + +unsigned int ce_send_entries_done(struct CE_handle *copyeng) +{ + struct CE_state *CE_state = (struct CE_state *)copyeng; + unsigned int nentries; + struct hif_softc *scn = CE_state->scn; + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); + + qdf_spin_lock(&CE_state->ce_index_lock); + nentries = hif_state->ce_services->ce_send_entries_done_nolock( + CE_state->scn, CE_state); + qdf_spin_unlock(&CE_state->ce_index_lock); + + return nentries; +} + +/* + * Guts of ce_recv_entries_done. + * The caller takes responsibility for any necessary locking. + */ +static unsigned int +ce_recv_entries_done_nolock_legacy(struct hif_softc *scn, + struct CE_state *CE_state) +{ + struct CE_ring_state *dest_ring = CE_state->dest_ring; + uint32_t ctrl_addr = CE_state->ctrl_addr; + unsigned int nentries_mask = dest_ring->nentries_mask; + unsigned int sw_index; + unsigned int read_index; + + sw_index = dest_ring->sw_index; + read_index = CE_DEST_RING_READ_IDX_GET(scn, ctrl_addr); + + return CE_RING_DELTA(nentries_mask, sw_index, read_index); +} + +unsigned int ce_recv_entries_done(struct CE_handle *copyeng) +{ + struct CE_state *CE_state = (struct CE_state *)copyeng; + unsigned int nentries; + struct hif_softc *scn = CE_state->scn; + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); + + qdf_spin_lock(&CE_state->ce_index_lock); + nentries = hif_state->ce_services->ce_recv_entries_done_nolock( + CE_state->scn, CE_state); + qdf_spin_unlock(&CE_state->ce_index_lock); + + return nentries; +} + +/* + * Guts of ce_completed_recv_next. + * The caller takes responsibility for any necessary locking. + */ +static int +ce_completed_recv_next_nolock_legacy(struct CE_state *CE_state, + void **per_CE_contextp, + void **per_transfer_contextp, + qdf_dma_addr_t *bufferp, + unsigned int *nbytesp, + unsigned int *transfer_idp, + unsigned int *flagsp) +{ + int status; + struct CE_ring_state *dest_ring = CE_state->dest_ring; + unsigned int nentries_mask = dest_ring->nentries_mask; + unsigned int sw_index = dest_ring->sw_index; + struct hif_softc *scn = CE_state->scn; + struct CE_dest_desc *dest_ring_base = + (struct CE_dest_desc *)dest_ring->base_addr_owner_space; + struct CE_dest_desc *dest_desc = + CE_DEST_RING_TO_DESC(dest_ring_base, sw_index); + int nbytes; + struct CE_dest_desc dest_desc_info; + /* + * By copying the dest_desc_info element to local memory, we could + * avoid extra memory read from non-cachable memory. + */ + dest_desc_info = *dest_desc; + nbytes = dest_desc_info.nbytes; + if (nbytes == 0) { + /* + * This closes a relatively unusual race where the Host + * sees the updated DRRI before the update to the + * corresponding descriptor has completed. We treat this + * as a descriptor that is not yet done. + */ + status = QDF_STATUS_E_FAILURE; + goto done; + } + + hif_record_ce_desc_event(scn, CE_state->id, HIF_RX_DESC_COMPLETION, + (union ce_desc *) dest_desc, + dest_ring->per_transfer_context[sw_index], + sw_index, 0); + + dest_desc->nbytes = 0; + + /* Return data from completed destination descriptor */ + *bufferp = HIF_CE_DESC_ADDR_TO_DMA(&dest_desc_info); + *nbytesp = nbytes; + *transfer_idp = dest_desc_info.meta_data; + *flagsp = (dest_desc_info.byte_swap) ? CE_RECV_FLAG_SWAPPED : 0; + + if (per_CE_contextp) + *per_CE_contextp = CE_state->recv_context; + + if (per_transfer_contextp) { + *per_transfer_contextp = + dest_ring->per_transfer_context[sw_index]; + } + dest_ring->per_transfer_context[sw_index] = 0; /* sanity */ + + /* Update sw_index */ + sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index); + dest_ring->sw_index = sw_index; + status = QDF_STATUS_SUCCESS; + +done: + return status; +} + +int +ce_completed_recv_next(struct CE_handle *copyeng, + void **per_CE_contextp, + void **per_transfer_contextp, + qdf_dma_addr_t *bufferp, + unsigned int *nbytesp, + unsigned int *transfer_idp, unsigned int *flagsp) +{ + struct CE_state *CE_state = (struct CE_state *)copyeng; + int status; + struct hif_softc *scn = CE_state->scn; + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); + struct ce_ops *ce_services; + + ce_services = hif_state->ce_services; + qdf_spin_lock_bh(&CE_state->ce_index_lock); + status = + ce_services->ce_completed_recv_next_nolock(CE_state, + per_CE_contextp, per_transfer_contextp, bufferp, + nbytesp, transfer_idp, flagsp); + qdf_spin_unlock_bh(&CE_state->ce_index_lock); + + return status; +} + +QDF_STATUS +ce_revoke_recv_next(struct CE_handle *copyeng, + void **per_CE_contextp, + void **per_transfer_contextp, qdf_dma_addr_t *bufferp) +{ + struct CE_state *CE_state = (struct CE_state *)copyeng; + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn); + + return hif_state->ce_services->ce_revoke_recv_next(copyeng, + per_CE_contextp, per_transfer_contextp, bufferp); +} +/* NB: Modeled after ce_completed_recv_next_nolock */ +static QDF_STATUS +ce_revoke_recv_next_legacy(struct CE_handle *copyeng, + void **per_CE_contextp, + void **per_transfer_contextp, qdf_dma_addr_t *bufferp) +{ + struct CE_state *CE_state; + struct CE_ring_state *dest_ring; + unsigned int nentries_mask; + unsigned int sw_index; + unsigned int write_index; + QDF_STATUS status; + struct hif_softc *scn; + + CE_state = (struct CE_state *)copyeng; + dest_ring = CE_state->dest_ring; + if (!dest_ring) + return QDF_STATUS_E_FAILURE; + + scn = CE_state->scn; + qdf_spin_lock(&CE_state->ce_index_lock); + nentries_mask = dest_ring->nentries_mask; + sw_index = dest_ring->sw_index; + write_index = dest_ring->write_index; + if (write_index != sw_index) { + struct CE_dest_desc *dest_ring_base = + (struct CE_dest_desc *)dest_ring-> + base_addr_owner_space; + struct CE_dest_desc *dest_desc = + CE_DEST_RING_TO_DESC(dest_ring_base, sw_index); + + /* Return data from completed destination descriptor */ + *bufferp = HIF_CE_DESC_ADDR_TO_DMA(dest_desc); + + if (per_CE_contextp) + *per_CE_contextp = CE_state->recv_context; + + if (per_transfer_contextp) { + *per_transfer_contextp = + dest_ring->per_transfer_context[sw_index]; + } + dest_ring->per_transfer_context[sw_index] = 0; /* sanity */ + + /* Update sw_index */ + sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index); + dest_ring->sw_index = sw_index; + status = QDF_STATUS_SUCCESS; + } else { + status = QDF_STATUS_E_FAILURE; + } + qdf_spin_unlock(&CE_state->ce_index_lock); + + return status; +} + +/* + * Guts of ce_completed_send_next. + * The caller takes responsibility for any necessary locking. + */ +static int +ce_completed_send_next_nolock_legacy(struct CE_state *CE_state, + void **per_CE_contextp, + void **per_transfer_contextp, + qdf_dma_addr_t *bufferp, + unsigned int *nbytesp, + unsigned int *transfer_idp, + unsigned int *sw_idx, + unsigned int *hw_idx, + uint32_t *toeplitz_hash_result) +{ + int status = QDF_STATUS_E_FAILURE; + struct CE_ring_state *src_ring = CE_state->src_ring; + uint32_t ctrl_addr = CE_state->ctrl_addr; + unsigned int nentries_mask = src_ring->nentries_mask; + unsigned int sw_index = src_ring->sw_index; + unsigned int read_index; + struct hif_softc *scn = CE_state->scn; + + if (src_ring->hw_index == sw_index) { + /* + * The SW completion index has caught up with the cached + * version of the HW completion index. + * Update the cached HW completion index to see whether + * the SW has really caught up to the HW, or if the cached + * value of the HW index has become stale. + */ + if (Q_TARGET_ACCESS_BEGIN(scn) < 0) + return QDF_STATUS_E_FAILURE; + src_ring->hw_index = + CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, ctrl_addr); + if (Q_TARGET_ACCESS_END(scn) < 0) + return QDF_STATUS_E_FAILURE; + } + read_index = src_ring->hw_index; + + if (sw_idx) + *sw_idx = sw_index; + + if (hw_idx) + *hw_idx = read_index; + + if ((read_index != sw_index) && (read_index != 0xffffffff)) { + struct CE_src_desc *shadow_base = + (struct CE_src_desc *)src_ring->shadow_base; + struct CE_src_desc *shadow_src_desc = + CE_SRC_RING_TO_DESC(shadow_base, sw_index); +#ifdef QCA_WIFI_3_0 + struct CE_src_desc *src_ring_base = + (struct CE_src_desc *)src_ring->base_addr_owner_space; + struct CE_src_desc *src_desc = + CE_SRC_RING_TO_DESC(src_ring_base, sw_index); +#endif + hif_record_ce_desc_event(scn, CE_state->id, + HIF_TX_DESC_COMPLETION, + (union ce_desc *) shadow_src_desc, + src_ring->per_transfer_context[sw_index], + sw_index, shadow_src_desc->nbytes); + + /* Return data from completed source descriptor */ + *bufferp = HIF_CE_DESC_ADDR_TO_DMA(shadow_src_desc); + *nbytesp = shadow_src_desc->nbytes; + *transfer_idp = shadow_src_desc->meta_data; +#ifdef QCA_WIFI_3_0 + *toeplitz_hash_result = src_desc->toeplitz_hash_result; +#else + *toeplitz_hash_result = 0; +#endif + if (per_CE_contextp) + *per_CE_contextp = CE_state->send_context; + + if (per_transfer_contextp) { + *per_transfer_contextp = + src_ring->per_transfer_context[sw_index]; + } + src_ring->per_transfer_context[sw_index] = 0; /* sanity */ + + /* Update sw_index */ + sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index); + src_ring->sw_index = sw_index; + status = QDF_STATUS_SUCCESS; + } + + return status; +} + +QDF_STATUS +ce_cancel_send_next(struct CE_handle *copyeng, + void **per_CE_contextp, + void **per_transfer_contextp, + qdf_dma_addr_t *bufferp, + unsigned int *nbytesp, + unsigned int *transfer_idp, + uint32_t *toeplitz_hash_result) +{ + struct CE_state *CE_state = (struct CE_state *)copyeng; + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn); + + return hif_state->ce_services->ce_cancel_send_next + (copyeng, per_CE_contextp, per_transfer_contextp, + bufferp, nbytesp, transfer_idp, toeplitz_hash_result); +} + +/* NB: Modeled after ce_completed_send_next */ +static QDF_STATUS +ce_cancel_send_next_legacy(struct CE_handle *copyeng, + void **per_CE_contextp, + void **per_transfer_contextp, + qdf_dma_addr_t *bufferp, + unsigned int *nbytesp, + unsigned int *transfer_idp, + uint32_t *toeplitz_hash_result) +{ + struct CE_state *CE_state; + struct CE_ring_state *src_ring; + unsigned int nentries_mask; + unsigned int sw_index; + unsigned int write_index; + QDF_STATUS status; + struct hif_softc *scn; + + CE_state = (struct CE_state *)copyeng; + src_ring = CE_state->src_ring; + if (!src_ring) + return QDF_STATUS_E_FAILURE; + + scn = CE_state->scn; + qdf_spin_lock(&CE_state->ce_index_lock); + nentries_mask = src_ring->nentries_mask; + sw_index = src_ring->sw_index; + write_index = src_ring->write_index; + + if (write_index != sw_index) { + struct CE_src_desc *src_ring_base = + (struct CE_src_desc *)src_ring->base_addr_owner_space; + struct CE_src_desc *src_desc = + CE_SRC_RING_TO_DESC(src_ring_base, sw_index); + + /* Return data from completed source descriptor */ + *bufferp = HIF_CE_DESC_ADDR_TO_DMA(src_desc); + *nbytesp = src_desc->nbytes; + *transfer_idp = src_desc->meta_data; +#ifdef QCA_WIFI_3_0 + *toeplitz_hash_result = src_desc->toeplitz_hash_result; +#else + *toeplitz_hash_result = 0; +#endif + + if (per_CE_contextp) + *per_CE_contextp = CE_state->send_context; + + if (per_transfer_contextp) { + *per_transfer_contextp = + src_ring->per_transfer_context[sw_index]; + } + src_ring->per_transfer_context[sw_index] = 0; /* sanity */ + + /* Update sw_index */ + sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index); + src_ring->sw_index = sw_index; + status = QDF_STATUS_SUCCESS; + } else { + status = QDF_STATUS_E_FAILURE; + } + qdf_spin_unlock(&CE_state->ce_index_lock); + + return status; +} + +/* Shift bits to convert IS_*_RING_*_WATERMARK_MASK to CE_WM_FLAG_*_* */ +#define CE_WM_SHFT 1 + +int +ce_completed_send_next(struct CE_handle *copyeng, + void **per_CE_contextp, + void **per_transfer_contextp, + qdf_dma_addr_t *bufferp, + unsigned int *nbytesp, + unsigned int *transfer_idp, + unsigned int *sw_idx, + unsigned int *hw_idx, + unsigned int *toeplitz_hash_result) +{ + struct CE_state *CE_state = (struct CE_state *)copyeng; + struct hif_softc *scn = CE_state->scn; + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); + struct ce_ops *ce_services; + int status; + + ce_services = hif_state->ce_services; + qdf_spin_lock_bh(&CE_state->ce_index_lock); + status = + ce_services->ce_completed_send_next_nolock(CE_state, + per_CE_contextp, per_transfer_contextp, + bufferp, nbytesp, transfer_idp, sw_idx, + hw_idx, toeplitz_hash_result); + qdf_spin_unlock_bh(&CE_state->ce_index_lock); + + return status; +} + +#ifdef ATH_11AC_TXCOMPACT +/* CE engine descriptor reap + * Similar to ce_per_engine_service , Only difference is ce_per_engine_service + * does receive and reaping of completed descriptor , + * This function only handles reaping of Tx complete descriptor. + * The Function is called from threshold reap poll routine + * hif_send_complete_check so should not countain receive functionality + * within it . + */ + +void ce_per_engine_servicereap(struct hif_softc *scn, unsigned int ce_id) +{ + void *CE_context; + void *transfer_context; + qdf_dma_addr_t buf; + unsigned int nbytes; + unsigned int id; + unsigned int sw_idx, hw_idx; + uint32_t toeplitz_hash_result; + struct CE_state *CE_state = scn->ce_id_to_state[ce_id]; + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); + + if (Q_TARGET_ACCESS_BEGIN(scn) < 0) + return; + + hif_record_ce_desc_event(scn, ce_id, HIF_CE_REAP_ENTRY, + NULL, NULL, 0, 0); + + /* Since this function is called from both user context and + * tasklet context the spinlock has to lock the bottom halves. + * This fix assumes that ATH_11AC_TXCOMPACT flag is always + * enabled in TX polling mode. If this is not the case, more + * bottom halve spin lock changes are needed. Due to data path + * performance concern, after internal discussion we've decided + * to make minimum change, i.e., only address the issue occurred + * in this function. The possible negative effect of this minimum + * change is that, in the future, if some other function will also + * be opened to let the user context to use, those cases need to be + * addressed by change spin_lock to spin_lock_bh also. + */ + + qdf_spin_lock_bh(&CE_state->ce_index_lock); + + if (CE_state->send_cb) { + { + struct ce_ops *ce_services = hif_state->ce_services; + /* Pop completed send buffers and call the + * registered send callback for each + */ + while (ce_services->ce_completed_send_next_nolock + (CE_state, &CE_context, + &transfer_context, &buf, + &nbytes, &id, &sw_idx, &hw_idx, + &toeplitz_hash_result) == + QDF_STATUS_SUCCESS) { + if (ce_id != CE_HTT_H2T_MSG) { + qdf_spin_unlock_bh( + &CE_state->ce_index_lock); + CE_state->send_cb( + (struct CE_handle *) + CE_state, CE_context, + transfer_context, buf, + nbytes, id, sw_idx, hw_idx, + toeplitz_hash_result); + qdf_spin_lock_bh( + &CE_state->ce_index_lock); + } else { + struct HIF_CE_pipe_info *pipe_info = + (struct HIF_CE_pipe_info *) + CE_context; + + qdf_spin_lock_bh(&pipe_info-> + completion_freeq_lock); + pipe_info->num_sends_allowed++; + qdf_spin_unlock_bh(&pipe_info-> + completion_freeq_lock); + } + } + } + } + + qdf_spin_unlock_bh(&CE_state->ce_index_lock); + + hif_record_ce_desc_event(scn, ce_id, HIF_CE_REAP_EXIT, + NULL, NULL, 0, 0); + Q_TARGET_ACCESS_END(scn); +} + +#endif /*ATH_11AC_TXCOMPACT */ + +/* + * Number of times to check for any pending tx/rx completion on + * a copy engine, this count should be big enough. Once we hit + * this threashold we'll not check for any Tx/Rx comlpetion in same + * interrupt handling. Note that this threashold is only used for + * Rx interrupt processing, this can be used tor Tx as well if we + * suspect any infinite loop in checking for pending Tx completion. + */ +#define CE_TXRX_COMP_CHECK_THRESHOLD 20 + +#ifdef WLAN_FEATURE_FASTPATH +/** + * ce_fastpath_rx_handle() - Updates write_index and calls fastpath msg handler + * @ce_state: handle to copy engine state + * @cmpl_msdus: Rx msdus + * @num_cmpls: number of Rx msdus + * @ctrl_addr: CE control address + * + * Return: None + */ +static void ce_fastpath_rx_handle(struct CE_state *ce_state, + qdf_nbuf_t *cmpl_msdus, uint32_t num_cmpls, + uint32_t ctrl_addr) +{ + struct hif_softc *scn = ce_state->scn; + struct CE_ring_state *dest_ring = ce_state->dest_ring; + uint32_t nentries_mask = dest_ring->nentries_mask; + uint32_t write_index; + + qdf_spin_unlock(&ce_state->ce_index_lock); + (ce_state->fastpath_handler)(ce_state->context, cmpl_msdus, num_cmpls); + qdf_spin_lock(&ce_state->ce_index_lock); + + /* Update Destination Ring Write Index */ + write_index = dest_ring->write_index; + write_index = CE_RING_IDX_ADD(nentries_mask, write_index, num_cmpls); + + hif_record_ce_desc_event(scn, ce_state->id, + FAST_RX_WRITE_INDEX_UPDATE, + NULL, NULL, write_index, 0); + + CE_DEST_RING_WRITE_IDX_SET(scn, ctrl_addr, write_index); + dest_ring->write_index = write_index; +} + +/** + * ce_per_engine_service_fast() - CE handler routine to service fastpath msgs + * @scn: hif_context + * @ce_id: Copy engine ID + * 1) Go through the CE ring, and find the completions + * 2) For valid completions retrieve context (nbuf) for per_transfer_context[] + * 3) Unmap buffer & accumulate in an array. + * 4) Call message handler when array is full or when exiting the handler + * + * Return: void + */ + +static void ce_per_engine_service_fast(struct hif_softc *scn, int ce_id) +{ + struct CE_state *ce_state = scn->ce_id_to_state[ce_id]; + struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn); + struct CE_ring_state *dest_ring = ce_state->dest_ring; + struct CE_dest_desc *dest_ring_base = + (struct CE_dest_desc *)dest_ring->base_addr_owner_space; + + uint32_t nentries_mask = dest_ring->nentries_mask; + uint32_t sw_index = dest_ring->sw_index; + uint32_t nbytes; + qdf_nbuf_t nbuf; + dma_addr_t paddr; + struct CE_dest_desc *dest_desc; + qdf_nbuf_t cmpl_msdus[MSG_FLUSH_NUM]; + uint32_t ctrl_addr = ce_state->ctrl_addr; + uint32_t nbuf_cmpl_idx = 0; + unsigned int more_comp_cnt = 0; + +more_data: + for (;;) { + + dest_desc = CE_DEST_RING_TO_DESC(dest_ring_base, + sw_index); + + /* + * The following 2 reads are from non-cached memory + */ + nbytes = dest_desc->nbytes; + + /* If completion is invalid, break */ + if (qdf_unlikely(nbytes == 0)) + break; + + + /* + * Build the nbuf list from valid completions + */ + nbuf = dest_ring->per_transfer_context[sw_index]; + + /* + * No lock is needed here, since this is the only thread + * that accesses the sw_index + */ + sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index); + + /* + * CAREFUL : Uncached write, but still less expensive, + * since most modern caches use "write-combining" to + * flush multiple cache-writes all at once. + */ + dest_desc->nbytes = 0; + + /* + * Per our understanding this is not required on our + * since we are doing the same cache invalidation + * operation on the same buffer twice in succession, + * without any modifiication to this buffer by CPU in + * between. + * However, this code with 2 syncs in succession has + * been undergoing some testing at a customer site, + * and seemed to be showing no problems so far. Would + * like to validate from the customer, that this line + * is really not required, before we remove this line + * completely. + */ + paddr = QDF_NBUF_CB_PADDR(nbuf); + + qdf_mem_dma_sync_single_for_cpu(scn->qdf_dev, paddr, + (skb_end_pointer(nbuf) - (nbuf)->data), + DMA_FROM_DEVICE); + + qdf_nbuf_put_tail(nbuf, nbytes); + + qdf_assert_always(nbuf->data != NULL); + + QDF_NBUF_CB_RX_CTX_ID(nbuf) = + hif_get_rx_ctx_id(ce_state->id, hif_hdl); + cmpl_msdus[nbuf_cmpl_idx++] = nbuf; + + /* + * we are not posting the buffers back instead + * reusing the buffers + */ + if (nbuf_cmpl_idx == scn->ce_service_max_rx_ind_flush) { + hif_record_ce_desc_event(scn, ce_state->id, + FAST_RX_SOFTWARE_INDEX_UPDATE, + NULL, NULL, sw_index, 0); + dest_ring->sw_index = sw_index; + ce_fastpath_rx_handle(ce_state, cmpl_msdus, + nbuf_cmpl_idx, ctrl_addr); + + ce_state->receive_count += nbuf_cmpl_idx; + if (qdf_unlikely(hif_ce_service_should_yield( + scn, ce_state))) { + ce_state->force_break = 1; + qdf_atomic_set(&ce_state->rx_pending, 1); + return; + } + + nbuf_cmpl_idx = 0; + more_comp_cnt = 0; + } + } + + hif_record_ce_desc_event(scn, ce_state->id, + FAST_RX_SOFTWARE_INDEX_UPDATE, + NULL, NULL, sw_index, 0); + + dest_ring->sw_index = sw_index; + + /* + * If there are not enough completions to fill the array, + * just call the message handler here + */ + if (nbuf_cmpl_idx) { + ce_fastpath_rx_handle(ce_state, cmpl_msdus, + nbuf_cmpl_idx, ctrl_addr); + + ce_state->receive_count += nbuf_cmpl_idx; + if (qdf_unlikely(hif_ce_service_should_yield(scn, ce_state))) { + ce_state->force_break = 1; + qdf_atomic_set(&ce_state->rx_pending, 1); + return; + } + + /* check for more packets after upper layer processing */ + nbuf_cmpl_idx = 0; + more_comp_cnt = 0; + goto more_data; + } + + hif_update_napi_max_poll_time(ce_state, ce_id, qdf_get_cpu()); + + qdf_atomic_set(&ce_state->rx_pending, 0); + if (TARGET_REGISTER_ACCESS_ALLOWED(scn)) { + CE_ENGINE_INT_STATUS_CLEAR(scn, ctrl_addr, + HOST_IS_COPY_COMPLETE_MASK); + } else { + hif_err_rl("%s: target access is not allowed", __func__); + return; + } + + if (ce_recv_entries_done_nolock_legacy(scn, ce_state)) { + if (more_comp_cnt++ < CE_TXRX_COMP_CHECK_THRESHOLD) { + goto more_data; + } else { + HIF_ERROR("%s:Potential infinite loop detected during Rx processing nentries_mask:0x%x sw read_idx:0x%x hw read_idx:0x%x", + __func__, nentries_mask, + ce_state->dest_ring->sw_index, + CE_DEST_RING_READ_IDX_GET(scn, ctrl_addr)); + } + } +#ifdef NAPI_YIELD_BUDGET_BASED + /* Caution : Before you modify this code, please refer hif_napi_poll function + to understand how napi_complete gets called and make the necessary changes + Force break has to be done till WIN disables the interrupt at source */ + ce_state->force_break = 1; +#endif +} + +#else +static void ce_per_engine_service_fast(struct hif_softc *scn, int ce_id) +{ +} +#endif /* WLAN_FEATURE_FASTPATH */ + +/* + * Guts of interrupt handler for per-engine interrupts on a particular CE. + * + * Invokes registered callbacks for recv_complete, + * send_complete, and watermarks. + * + * Returns: number of messages processed + */ +int ce_per_engine_service(struct hif_softc *scn, unsigned int CE_id) +{ + struct CE_state *CE_state = scn->ce_id_to_state[CE_id]; + uint32_t ctrl_addr = CE_state->ctrl_addr; + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); + void *CE_context; + void *transfer_context; + qdf_dma_addr_t buf; + unsigned int nbytes; + unsigned int id; + unsigned int flags; + unsigned int more_comp_cnt = 0; + unsigned int more_snd_comp_cnt = 0; + unsigned int sw_idx, hw_idx; + uint32_t toeplitz_hash_result; + uint32_t mode = hif_get_conparam(scn); + + if (hif_is_nss_wifi_enabled(scn) && (CE_state->htt_rx_data)) + return CE_state->receive_count; + + if (Q_TARGET_ACCESS_BEGIN(scn) < 0) { + HIF_ERROR("[premature rc=0]"); + return 0; /* no work done */ + } + + /* Clear force_break flag and re-initialize receive_count to 0 */ + CE_state->receive_count = 0; + CE_state->force_break = 0; + CE_state->ce_service_start_time = sched_clock(); + CE_state->ce_service_yield_time = + CE_state->ce_service_start_time + + hif_get_ce_service_max_yield_time( + (struct hif_opaque_softc *)scn); + + qdf_spin_lock(&CE_state->ce_index_lock); + /* + * With below check we make sure CE we are handling is datapath CE and + * fastpath is enabled. + */ + if (ce_is_fastpath_handler_registered(CE_state)) { + /* For datapath only Rx CEs */ + ce_per_engine_service_fast(scn, CE_id); + goto unlock_end; + } + +more_completions: + if (CE_state->recv_cb) { + + /* Pop completed recv buffers and call + * the registered recv callback for each + */ + while (hif_state->ce_services->ce_completed_recv_next_nolock + (CE_state, &CE_context, &transfer_context, + &buf, &nbytes, &id, &flags) == + QDF_STATUS_SUCCESS) { + qdf_spin_unlock(&CE_state->ce_index_lock); + CE_state->recv_cb((struct CE_handle *)CE_state, + CE_context, transfer_context, buf, + nbytes, id, flags); + + /* + * EV #112693 - + * [Peregrine][ES1][WB342][Win8x86][Performance] + * BSoD_0x133 occurred in VHT80 UDP_DL + * Break out DPC by force if number of loops in + * hif_pci_ce_recv_data reaches MAX_NUM_OF_RECEIVES + * to avoid spending too long time in + * DPC for each interrupt handling. Schedule another + * DPC to avoid data loss if we had taken + * force-break action before apply to Windows OS + * only currently, Linux/MAC os can expand to their + * platform if necessary + */ + + /* Break the receive processes by + * force if force_break set up + */ + if (qdf_unlikely(CE_state->force_break)) { + qdf_atomic_set(&CE_state->rx_pending, 1); + goto target_access_end; + } + qdf_spin_lock(&CE_state->ce_index_lock); + } + } + + /* + * Attention: We may experience potential infinite loop for below + * While Loop during Sending Stress test. + * Resolve the same way as Receive Case (Refer to EV #112693) + */ + + if (CE_state->send_cb) { + /* Pop completed send buffers and call + * the registered send callback for each + */ + +#ifdef ATH_11AC_TXCOMPACT + while (hif_state->ce_services->ce_completed_send_next_nolock + (CE_state, &CE_context, + &transfer_context, &buf, &nbytes, + &id, &sw_idx, &hw_idx, + &toeplitz_hash_result) == QDF_STATUS_SUCCESS) { + + if (CE_id != CE_HTT_H2T_MSG || + QDF_IS_EPPING_ENABLED(mode)) { + qdf_spin_unlock(&CE_state->ce_index_lock); + CE_state->send_cb((struct CE_handle *)CE_state, + CE_context, transfer_context, + buf, nbytes, id, sw_idx, + hw_idx, toeplitz_hash_result); + qdf_spin_lock(&CE_state->ce_index_lock); + } else { + struct HIF_CE_pipe_info *pipe_info = + (struct HIF_CE_pipe_info *)CE_context; + + qdf_spin_lock(&pipe_info-> + completion_freeq_lock); + pipe_info->num_sends_allowed++; + qdf_spin_unlock(&pipe_info-> + completion_freeq_lock); + } + } +#else /*ATH_11AC_TXCOMPACT */ + while (hif_state->ce_services->ce_completed_send_next_nolock + (CE_state, &CE_context, + &transfer_context, &buf, &nbytes, + &id, &sw_idx, &hw_idx, + &toeplitz_hash_result) == QDF_STATUS_SUCCESS) { + qdf_spin_unlock(&CE_state->ce_index_lock); + CE_state->send_cb((struct CE_handle *)CE_state, + CE_context, transfer_context, buf, + nbytes, id, sw_idx, hw_idx, + toeplitz_hash_result); + qdf_spin_lock(&CE_state->ce_index_lock); + } +#endif /*ATH_11AC_TXCOMPACT */ + } + +more_watermarks: + if (CE_state->misc_cbs) { + if (CE_state->watermark_cb && + hif_state->ce_services->watermark_int(CE_state, + &flags)) { + qdf_spin_unlock(&CE_state->ce_index_lock); + /* Convert HW IS bits to software flags */ + CE_state->watermark_cb((struct CE_handle *)CE_state, + CE_state->wm_context, flags); + qdf_spin_lock(&CE_state->ce_index_lock); + } + } + + /* + * Clear the misc interrupts (watermark) that were handled above, + * and that will be checked again below. + * Clear and check for copy-complete interrupts again, just in case + * more copy completions happened while the misc interrupts were being + * handled. + */ + if (!ce_srng_based(scn)) { + if (TARGET_REGISTER_ACCESS_ALLOWED(scn)) { + CE_ENGINE_INT_STATUS_CLEAR(scn, ctrl_addr, + CE_WATERMARK_MASK | + HOST_IS_COPY_COMPLETE_MASK); + } else { + qdf_atomic_set(&CE_state->rx_pending, 0); + hif_err_rl("%s: target access is not allowed", + __func__); + goto unlock_end; + } + } + + /* + * Now that per-engine interrupts are cleared, verify that + * no recv interrupts arrive while processing send interrupts, + * and no recv or send interrupts happened while processing + * misc interrupts.Go back and check again.Keep checking until + * we find no more events to process. + */ + if (CE_state->recv_cb && + hif_state->ce_services->ce_recv_entries_done_nolock(scn, + CE_state)) { + if (QDF_IS_EPPING_ENABLED(mode) || + more_comp_cnt++ < CE_TXRX_COMP_CHECK_THRESHOLD) { + goto more_completions; + } else { + if (!ce_srng_based(scn)) { + HIF_ERROR( + "%s:Potential infinite loop detected during Rx processing nentries_mask:0x%x sw read_idx:0x%x hw read_idx:0x%x", + __func__, + CE_state->dest_ring->nentries_mask, + CE_state->dest_ring->sw_index, + CE_DEST_RING_READ_IDX_GET(scn, + CE_state->ctrl_addr)); + } + } + } + + if (CE_state->send_cb && + hif_state->ce_services->ce_send_entries_done_nolock(scn, + CE_state)) { + if (QDF_IS_EPPING_ENABLED(mode) || + more_snd_comp_cnt++ < CE_TXRX_COMP_CHECK_THRESHOLD) { + goto more_completions; + } else { + if (!ce_srng_based(scn)) { + HIF_ERROR( + "%s:Potential infinite loop detected during send completion nentries_mask:0x%x sw read_idx:0x%x hw read_idx:0x%x", + __func__, + CE_state->src_ring->nentries_mask, + CE_state->src_ring->sw_index, + CE_SRC_RING_READ_IDX_GET(scn, + CE_state->ctrl_addr)); + } + } + } + + if (CE_state->misc_cbs && CE_state->watermark_cb) { + if (hif_state->ce_services->watermark_int(CE_state, &flags)) + goto more_watermarks; + } + + qdf_atomic_set(&CE_state->rx_pending, 0); + +unlock_end: + qdf_spin_unlock(&CE_state->ce_index_lock); +target_access_end: + if (Q_TARGET_ACCESS_END(scn) < 0) + HIF_ERROR("<--[premature rc=%d]", CE_state->receive_count); + return CE_state->receive_count; +} +qdf_export_symbol(ce_per_engine_service); + +/* + * Handler for per-engine interrupts on ALL active CEs. + * This is used in cases where the system is sharing a + * single interrput for all CEs + */ + +void ce_per_engine_service_any(int irq, struct hif_softc *scn) +{ + int CE_id; + uint32_t intr_summary; + + if (Q_TARGET_ACCESS_BEGIN(scn) < 0) + return; + + if (!qdf_atomic_read(&scn->tasklet_from_intr)) { + for (CE_id = 0; CE_id < scn->ce_count; CE_id++) { + struct CE_state *CE_state = scn->ce_id_to_state[CE_id]; + + if (qdf_atomic_read(&CE_state->rx_pending)) { + qdf_atomic_set(&CE_state->rx_pending, 0); + ce_per_engine_service(scn, CE_id); + } + } + + Q_TARGET_ACCESS_END(scn); + return; + } + + intr_summary = CE_INTERRUPT_SUMMARY(scn); + + for (CE_id = 0; intr_summary && (CE_id < scn->ce_count); CE_id++) { + if (intr_summary & (1 << CE_id)) + intr_summary &= ~(1 << CE_id); + else + continue; /* no intr pending on this CE */ + + ce_per_engine_service(scn, CE_id); + } + + Q_TARGET_ACCESS_END(scn); +} + +/* + * Adjust interrupts for the copy complete handler. + * If it's needed for either send or recv, then unmask + * this interrupt; otherwise, mask it. + * + * Called with target_lock held. + */ +static void +ce_per_engine_handler_adjust_legacy(struct CE_state *CE_state, + int disable_copy_compl_intr) +{ + uint32_t ctrl_addr = CE_state->ctrl_addr; + struct hif_softc *scn = CE_state->scn; + + CE_state->disable_copy_compl_intr = disable_copy_compl_intr; + + if (Q_TARGET_ACCESS_BEGIN(scn) < 0) + return; + + if (!TARGET_REGISTER_ACCESS_ALLOWED(scn)) { + hif_err_rl("%s: target access is not allowed", __func__); + return; + } + + if ((!disable_copy_compl_intr) && + (CE_state->send_cb || CE_state->recv_cb)) + CE_COPY_COMPLETE_INTR_ENABLE(scn, ctrl_addr); + else + CE_COPY_COMPLETE_INTR_DISABLE(scn, ctrl_addr); + + if (CE_state->watermark_cb) + CE_WATERMARK_INTR_ENABLE(scn, ctrl_addr); + else + CE_WATERMARK_INTR_DISABLE(scn, ctrl_addr); + Q_TARGET_ACCESS_END(scn); +} + +/*Iterate the CE_state list and disable the compl interrupt + * if it has been registered already. + */ +void ce_disable_any_copy_compl_intr_nolock(struct hif_softc *scn) +{ + int CE_id; + + if (Q_TARGET_ACCESS_BEGIN(scn) < 0) + return; + + for (CE_id = 0; CE_id < scn->ce_count; CE_id++) { + struct CE_state *CE_state = scn->ce_id_to_state[CE_id]; + uint32_t ctrl_addr = CE_state->ctrl_addr; + + /* if the interrupt is currently enabled, disable it */ + if (!CE_state->disable_copy_compl_intr + && (CE_state->send_cb || CE_state->recv_cb)) + CE_COPY_COMPLETE_INTR_DISABLE(scn, ctrl_addr); + + if (CE_state->watermark_cb) + CE_WATERMARK_INTR_DISABLE(scn, ctrl_addr); + } + Q_TARGET_ACCESS_END(scn); +} + +void ce_enable_any_copy_compl_intr_nolock(struct hif_softc *scn) +{ + int CE_id; + + if (Q_TARGET_ACCESS_BEGIN(scn) < 0) + return; + + for (CE_id = 0; CE_id < scn->ce_count; CE_id++) { + struct CE_state *CE_state = scn->ce_id_to_state[CE_id]; + uint32_t ctrl_addr = CE_state->ctrl_addr; + + /* + * If the CE is supposed to have copy complete interrupts + * enabled (i.e. there a callback registered, and the + * "disable" flag is not set), then re-enable the interrupt. + */ + if (!CE_state->disable_copy_compl_intr + && (CE_state->send_cb || CE_state->recv_cb)) + CE_COPY_COMPLETE_INTR_ENABLE(scn, ctrl_addr); + + if (CE_state->watermark_cb) + CE_WATERMARK_INTR_ENABLE(scn, ctrl_addr); + } + Q_TARGET_ACCESS_END(scn); +} + +/** + * ce_send_cb_register(): register completion handler + * @copyeng: CE_state representing the ce we are adding the behavior to + * @fn_ptr: callback that the ce should use when processing tx completions + * @disable_interrupts: if the interupts should be enabled or not. + * + * Caller should guarantee that no transactions are in progress before + * switching the callback function. + * + * Registers the send context before the fn pointer so that if the cb is valid + * the context should be valid. + * + * Beware that currently this function will enable completion interrupts. + */ +void +ce_send_cb_register(struct CE_handle *copyeng, + ce_send_cb fn_ptr, + void *ce_send_context, int disable_interrupts) +{ + struct CE_state *CE_state = (struct CE_state *)copyeng; + struct hif_softc *scn; + struct HIF_CE_state *hif_state; + + if (CE_state == NULL) { + HIF_ERROR("%s: Error CE state = NULL", __func__); + return; + } + scn = CE_state->scn; + hif_state = HIF_GET_CE_STATE(scn); + if (hif_state == NULL) { + HIF_ERROR("%s: Error HIF state = NULL", __func__); + return; + } + CE_state->send_context = ce_send_context; + CE_state->send_cb = fn_ptr; + hif_state->ce_services->ce_per_engine_handler_adjust(CE_state, + disable_interrupts); +} + +/** + * ce_recv_cb_register(): register completion handler + * @copyeng: CE_state representing the ce we are adding the behavior to + * @fn_ptr: callback that the ce should use when processing rx completions + * @disable_interrupts: if the interupts should be enabled or not. + * + * Registers the send context before the fn pointer so that if the cb is valid + * the context should be valid. + * + * Caller should guarantee that no transactions are in progress before + * switching the callback function. + */ +void +ce_recv_cb_register(struct CE_handle *copyeng, + CE_recv_cb fn_ptr, + void *CE_recv_context, int disable_interrupts) +{ + struct CE_state *CE_state = (struct CE_state *)copyeng; + struct hif_softc *scn; + struct HIF_CE_state *hif_state; + + if (CE_state == NULL) { + HIF_ERROR("%s: ERROR CE state = NULL", __func__); + return; + } + scn = CE_state->scn; + hif_state = HIF_GET_CE_STATE(scn); + if (hif_state == NULL) { + HIF_ERROR("%s: Error HIF state = NULL", __func__); + return; + } + CE_state->recv_context = CE_recv_context; + CE_state->recv_cb = fn_ptr; + hif_state->ce_services->ce_per_engine_handler_adjust(CE_state, + disable_interrupts); +} + +/** + * ce_watermark_cb_register(): register completion handler + * @copyeng: CE_state representing the ce we are adding the behavior to + * @fn_ptr: callback that the ce should use when processing watermark events + * + * Caller should guarantee that no watermark events are being processed before + * switching the callback function. + */ +void +ce_watermark_cb_register(struct CE_handle *copyeng, + CE_watermark_cb fn_ptr, void *CE_wm_context) +{ + struct CE_state *CE_state = (struct CE_state *)copyeng; + struct hif_softc *scn = CE_state->scn; + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); + + CE_state->watermark_cb = fn_ptr; + CE_state->wm_context = CE_wm_context; + hif_state->ce_services->ce_per_engine_handler_adjust(CE_state, + 0); + if (fn_ptr) + CE_state->misc_cbs = 1; +} + +bool ce_get_rx_pending(struct hif_softc *scn) +{ + int CE_id; + + for (CE_id = 0; CE_id < scn->ce_count; CE_id++) { + struct CE_state *CE_state = scn->ce_id_to_state[CE_id]; + + if (qdf_atomic_read(&CE_state->rx_pending)) + return true; + } + + return false; +} + +/** + * ce_check_rx_pending() - ce_check_rx_pending + * @CE_state: context of the copy engine to check + * + * Return: true if there per_engine_service + * didn't process all the rx descriptors. + */ +bool ce_check_rx_pending(struct CE_state *CE_state) +{ + if (qdf_atomic_read(&CE_state->rx_pending)) + return true; + else + return false; +} +qdf_export_symbol(ce_check_rx_pending); + +#ifdef IPA_OFFLOAD +/** + * ce_ipa_get_resource() - get uc resource on copyengine + * @ce: copyengine context + * @ce_sr: copyengine source ring resource info + * @ce_sr_ring_size: copyengine source ring size + * @ce_reg_paddr: copyengine register physical address + * + * Copy engine should release resource to micro controller + * Micro controller needs + * - Copy engine source descriptor base address + * - Copy engine source descriptor size + * - PCI BAR address to access copy engine regiser + * + * Return: None + */ +void ce_ipa_get_resource(struct CE_handle *ce, + qdf_shared_mem_t **ce_sr, + uint32_t *ce_sr_ring_size, + qdf_dma_addr_t *ce_reg_paddr) +{ + struct CE_state *CE_state = (struct CE_state *)ce; + uint32_t ring_loop; + struct CE_src_desc *ce_desc; + qdf_dma_addr_t phy_mem_base; + struct hif_softc *scn = CE_state->scn; + + if (CE_UNUSED == CE_state->state) { + *qdf_mem_get_dma_addr_ptr(scn->qdf_dev, + &CE_state->scn->ipa_ce_ring->mem_info) = 0; + *ce_sr_ring_size = 0; + return; + } + + /* Update default value for descriptor */ + for (ring_loop = 0; ring_loop < CE_state->src_ring->nentries; + ring_loop++) { + ce_desc = (struct CE_src_desc *) + ((char *)CE_state->src_ring->base_addr_owner_space + + ring_loop * (sizeof(struct CE_src_desc))); + CE_IPA_RING_INIT(ce_desc); + } + + /* Get BAR address */ + hif_read_phy_mem_base(CE_state->scn, &phy_mem_base); + + *ce_sr = CE_state->scn->ipa_ce_ring; + *ce_sr_ring_size = (uint32_t)(CE_state->src_ring->nentries * + sizeof(struct CE_src_desc)); + *ce_reg_paddr = phy_mem_base + CE_BASE_ADDRESS(CE_state->id) + + SR_WR_INDEX_ADDRESS; +} +#endif /* IPA_OFFLOAD */ + +static bool ce_check_int_watermark(struct CE_state *CE_state, + unsigned int *flags) +{ + uint32_t ce_int_status; + uint32_t ctrl_addr = CE_state->ctrl_addr; + struct hif_softc *scn = CE_state->scn; + + ce_int_status = CE_ENGINE_INT_STATUS_GET(scn, ctrl_addr); + if (ce_int_status & CE_WATERMARK_MASK) { + /* Convert HW IS bits to software flags */ + *flags = + (ce_int_status & CE_WATERMARK_MASK) >> + CE_WM_SHFT; + return true; + } + + return false; +} + +static void ce_legacy_src_ring_setup(struct hif_softc *scn, uint32_t ce_id, + struct CE_ring_state *src_ring, + struct CE_attr *attr) +{ + uint32_t ctrl_addr; + uint64_t dma_addr; + + QDF_ASSERT(ce_id < scn->ce_count); + ctrl_addr = CE_BASE_ADDRESS(ce_id); + + src_ring->hw_index = + CE_SRC_RING_READ_IDX_GET_FROM_REGISTER(scn, ctrl_addr); + src_ring->sw_index = src_ring->hw_index; + src_ring->write_index = + CE_SRC_RING_WRITE_IDX_GET_FROM_REGISTER(scn, ctrl_addr); + dma_addr = src_ring->base_addr_CE_space; + CE_SRC_RING_BASE_ADDR_SET(scn, ctrl_addr, + (uint32_t)(dma_addr & 0xFFFFFFFF)); + + /* if SR_BA_ADDRESS_HIGH register exists */ + if (is_register_supported(SR_BA_ADDRESS_HIGH)) { + uint32_t tmp; + + tmp = CE_SRC_RING_BASE_ADDR_HIGH_GET( + scn, ctrl_addr); + tmp &= ~0x1F; + dma_addr = ((dma_addr >> 32) & 0x1F)|tmp; + CE_SRC_RING_BASE_ADDR_HIGH_SET(scn, + ctrl_addr, (uint32_t)dma_addr); + } + CE_SRC_RING_SZ_SET(scn, ctrl_addr, src_ring->nentries); + CE_SRC_RING_DMAX_SET(scn, ctrl_addr, attr->src_sz_max); +#ifdef BIG_ENDIAN_HOST + /* Enable source ring byte swap for big endian host */ + CE_SRC_RING_BYTE_SWAP_SET(scn, ctrl_addr, 1); +#endif + CE_SRC_RING_LOWMARK_SET(scn, ctrl_addr, 0); + CE_SRC_RING_HIGHMARK_SET(scn, ctrl_addr, src_ring->nentries); + +} + +static void ce_legacy_dest_ring_setup(struct hif_softc *scn, uint32_t ce_id, + struct CE_ring_state *dest_ring, + struct CE_attr *attr) +{ + uint32_t ctrl_addr; + uint64_t dma_addr; + + QDF_ASSERT(ce_id < scn->ce_count); + ctrl_addr = CE_BASE_ADDRESS(ce_id); + dest_ring->sw_index = + CE_DEST_RING_READ_IDX_GET_FROM_REGISTER(scn, ctrl_addr); + dest_ring->write_index = + CE_DEST_RING_WRITE_IDX_GET_FROM_REGISTER(scn, ctrl_addr); + dma_addr = dest_ring->base_addr_CE_space; + CE_DEST_RING_BASE_ADDR_SET(scn, ctrl_addr, + (uint32_t)(dma_addr & 0xFFFFFFFF)); + + /* if DR_BA_ADDRESS_HIGH exists */ + if (is_register_supported(DR_BA_ADDRESS_HIGH)) { + uint32_t tmp; + + tmp = CE_DEST_RING_BASE_ADDR_HIGH_GET(scn, + ctrl_addr); + tmp &= ~0x1F; + dma_addr = ((dma_addr >> 32) & 0x1F)|tmp; + CE_DEST_RING_BASE_ADDR_HIGH_SET(scn, + ctrl_addr, (uint32_t)dma_addr); + } + + CE_DEST_RING_SZ_SET(scn, ctrl_addr, dest_ring->nentries); +#ifdef BIG_ENDIAN_HOST + /* Enable Dest ring byte swap for big endian host */ + CE_DEST_RING_BYTE_SWAP_SET(scn, ctrl_addr, 1); +#endif + CE_DEST_RING_LOWMARK_SET(scn, ctrl_addr, 0); + CE_DEST_RING_HIGHMARK_SET(scn, ctrl_addr, dest_ring->nentries); +} + +static uint32_t ce_get_desc_size_legacy(uint8_t ring_type) +{ + switch (ring_type) { + case CE_RING_SRC: + return sizeof(struct CE_src_desc); + case CE_RING_DEST: + return sizeof(struct CE_dest_desc); + case CE_RING_STATUS: + qdf_assert(0); + return 0; + default: + return 0; + } + + return 0; +} + +static int ce_ring_setup_legacy(struct hif_softc *scn, uint8_t ring_type, + uint32_t ce_id, struct CE_ring_state *ring, + struct CE_attr *attr) +{ + int status = Q_TARGET_ACCESS_BEGIN(scn); + + if (status < 0) + goto out; + + + switch (ring_type) { + case CE_RING_SRC: + ce_legacy_src_ring_setup(scn, ce_id, ring, attr); + break; + case CE_RING_DEST: + ce_legacy_dest_ring_setup(scn, ce_id, ring, attr); + break; + case CE_RING_STATUS: + default: + qdf_assert(0); + break; + } + + Q_TARGET_ACCESS_END(scn); +out: + return status; +} + +static void ce_prepare_shadow_register_v2_cfg_legacy(struct hif_softc *scn, + struct pld_shadow_reg_v2_cfg **shadow_config, + int *num_shadow_registers_configured) +{ + *num_shadow_registers_configured = 0; + *shadow_config = NULL; +} + +struct ce_ops ce_service_legacy = { + .ce_get_desc_size = ce_get_desc_size_legacy, + .ce_ring_setup = ce_ring_setup_legacy, + .ce_sendlist_send = ce_sendlist_send_legacy, + .ce_completed_recv_next_nolock = ce_completed_recv_next_nolock_legacy, + .ce_revoke_recv_next = ce_revoke_recv_next_legacy, + .ce_cancel_send_next = ce_cancel_send_next_legacy, + .ce_recv_buf_enqueue = ce_recv_buf_enqueue_legacy, + .ce_per_engine_handler_adjust = ce_per_engine_handler_adjust_legacy, + .ce_send_nolock = ce_send_nolock_legacy, + .watermark_int = ce_check_int_watermark, + .ce_completed_send_next_nolock = ce_completed_send_next_nolock_legacy, + .ce_recv_entries_done_nolock = ce_recv_entries_done_nolock_legacy, + .ce_send_entries_done_nolock = ce_send_entries_done_nolock_legacy, + .ce_prepare_shadow_register_v2_cfg = + ce_prepare_shadow_register_v2_cfg_legacy, +}; + + +struct ce_ops *ce_services_legacy() +{ + return &ce_service_legacy; +} + +#if HIF_CE_DEBUG_DATA_BUF +/** + * hif_dump_desc_data_buf() - record ce descriptor events + * @buf: buffer to copy to + * @pos: Current position till which the buf is filled + * @data: Data to be copied + * @data_len: Length of the data to be copied + */ +static uint32_t hif_dump_desc_data_buf(uint8_t *buf, ssize_t pos, + uint8_t *data, uint32_t data_len) +{ + pos += snprintf(buf + pos, PAGE_SIZE - pos, "Data:(Max%dBytes)\n", + CE_DEBUG_MAX_DATA_BUF_SIZE); + + if ((data_len > 0) && data) { + if (data_len < 16) { + hex_dump_to_buffer(data, + CE_DEBUG_DATA_PER_ROW, + 16, 1, buf + pos, + (ssize_t)PAGE_SIZE - pos, + false); + pos += CE_DEBUG_PRINT_BUF_SIZE(data_len); + pos += snprintf(buf + pos, PAGE_SIZE - pos, "\n"); + } else { + uint32_t rows = (data_len / 16) + 1; + uint32_t row = 0; + + for (row = 0; row < rows; row++) { + hex_dump_to_buffer(data + (row * 16), + CE_DEBUG_DATA_PER_ROW, + 16, 1, buf + pos, + (ssize_t)PAGE_SIZE + - pos, false); + pos += + CE_DEBUG_PRINT_BUF_SIZE(CE_DEBUG_DATA_PER_ROW); + pos += snprintf(buf + pos, PAGE_SIZE - pos, + "\n"); + } + } + } + + return pos; +} +#endif + +/* + * Note: For MCL, #if defined (HIF_CONFIG_SLUB_DEBUG_ON) needs to be checked + * for defined here + */ +#if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) +static const char *ce_event_type_to_str(enum hif_ce_event_type type) +{ + switch (type) { + case HIF_RX_DESC_POST: + return "HIF_RX_DESC_POST"; + case HIF_RX_DESC_COMPLETION: + return "HIF_RX_DESC_COMPLETION"; + case HIF_TX_GATHER_DESC_POST: + return "HIF_TX_GATHER_DESC_POST"; + case HIF_TX_DESC_POST: + return "HIF_TX_DESC_POST"; + case HIF_TX_DESC_SOFTWARE_POST: + return "HIF_TX_DESC_SOFTWARE_POST"; + case HIF_TX_DESC_COMPLETION: + return "HIF_TX_DESC_COMPLETION"; + case FAST_RX_WRITE_INDEX_UPDATE: + return "FAST_RX_WRITE_INDEX_UPDATE"; + case FAST_RX_SOFTWARE_INDEX_UPDATE: + return "FAST_RX_SOFTWARE_INDEX_UPDATE"; + case FAST_TX_WRITE_INDEX_UPDATE: + return "FAST_TX_WRITE_INDEX_UPDATE"; + case FAST_TX_WRITE_INDEX_SOFTWARE_UPDATE: + return "FAST_TX_WRITE_INDEX_SOFTWARE_UPDATE"; + case FAST_TX_SOFTWARE_INDEX_UPDATE: + return "FAST_TX_SOFTWARE_INDEX_UPDATE"; + case RESUME_WRITE_INDEX_UPDATE: + return "RESUME_WRITE_INDEX_UPDATE"; + case HIF_IRQ_EVENT: + return "HIF_IRQ_EVENT"; + case HIF_CE_TASKLET_ENTRY: + return "HIF_CE_TASKLET_ENTRY"; + case HIF_CE_TASKLET_RESCHEDULE: + return "HIF_CE_TASKLET_RESCHEDULE"; + case HIF_CE_TASKLET_EXIT: + return "HIF_CE_TASKLET_EXIT"; + case HIF_CE_REAP_ENTRY: + return "HIF_CE_REAP_ENTRY"; + case HIF_CE_REAP_EXIT: + return "HIF_CE_REAP_EXIT"; + case NAPI_SCHEDULE: + return "NAPI_SCHEDULE"; + case NAPI_POLL_ENTER: + return "NAPI_POLL_ENTER"; + case NAPI_COMPLETE: + return "NAPI_COMPLETE"; + case NAPI_POLL_EXIT: + return "NAPI_POLL_EXIT"; + case HIF_RX_NBUF_ALLOC_FAILURE: + return "HIF_RX_NBUF_ALLOC_FAILURE"; + case HIF_RX_NBUF_MAP_FAILURE: + return "HIF_RX_NBUF_MAP_FAILURE"; + case HIF_RX_NBUF_ENQUEUE_FAILURE: + return "HIF_RX_NBUF_ENQUEUE_FAILURE"; + default: + return "invalid"; + } +} + +/** + * hif_dump_desc_event() - record ce descriptor events + * @buf: Buffer to which to be copied + * @ce_id: which ce is the event occurring on + * @index: index that the descriptor was/will be at. + */ +ssize_t hif_dump_desc_event(struct hif_softc *scn, char *buf) +{ + struct hif_ce_desc_event *event; + uint64_t secs, usecs; + ssize_t len = 0; + struct ce_desc_hist *ce_hist = NULL; + struct hif_ce_desc_event *hist_ev = NULL; + + if (!scn) + return -EINVAL; + + ce_hist = &scn->hif_ce_desc_hist; + + if (ce_hist->hist_id >= CE_COUNT_MAX || + ce_hist->hist_index >= HIF_CE_HISTORY_MAX) { + qdf_print("Invalid values"); + return -EINVAL; + } + + hist_ev = + (struct hif_ce_desc_event *)ce_hist->hist_ev[ce_hist->hist_id]; + + if (!hist_ev) { + qdf_print("Low Memory\n"); + return -EINVAL; + } + + event = &hist_ev[ce_hist->hist_index]; + + qdf_log_timestamp_to_secs(event->time, &secs, &usecs); + + len += snprintf(buf, PAGE_SIZE - len, + "\nTime:%lld.%06lld, CE:%d, EventType: %s, EventIndex: %d\nDataAddr=%pK", + secs, usecs, ce_hist->hist_id, + ce_event_type_to_str(event->type), + event->index, event->memory); +#if HIF_CE_DEBUG_DATA_BUF + len += snprintf(buf + len, PAGE_SIZE - len, ", Data len=%d", + event->actual_data_len); +#endif + + len += snprintf(buf + len, PAGE_SIZE - len, "\nCE descriptor: "); + + hex_dump_to_buffer(&event->descriptor, sizeof(union ce_desc), + 16, 1, buf + len, + (ssize_t)PAGE_SIZE - len, false); + len += CE_DEBUG_PRINT_BUF_SIZE(sizeof(union ce_desc)); + len += snprintf(buf + len, PAGE_SIZE - len, "\n"); + +#if HIF_CE_DEBUG_DATA_BUF + if (ce_hist->data_enable[ce_hist->hist_id]) + len = hif_dump_desc_data_buf(buf, len, event->data, + (event->actual_data_len < + CE_DEBUG_MAX_DATA_BUF_SIZE) ? + event->actual_data_len : + CE_DEBUG_MAX_DATA_BUF_SIZE); +#endif /*HIF_CE_DEBUG_DATA_BUF*/ + + len += snprintf(buf + len, PAGE_SIZE - len, "END\n"); + + return len; +} + +/* + * hif_store_desc_trace_buf_index() - + * API to get the CE id and CE debug storage buffer index + * + * @dev: network device + * @attr: sysfs attribute + * @buf: data got from the user + * + * Return total length + */ +ssize_t hif_input_desc_trace_buf_index(struct hif_softc *scn, + const char *buf, size_t size) +{ + struct ce_desc_hist *ce_hist = NULL; + + if (!scn) + return -EINVAL; + + ce_hist = &scn->hif_ce_desc_hist; + + if (!size) { + pr_err("%s: Invalid input buffer.\n", __func__); + return -EINVAL; + } + + if (sscanf(buf, "%u %u", (unsigned int *)&ce_hist->hist_id, + (unsigned int *)&ce_hist->hist_index) != 2) { + pr_err("%s: Invalid input value.\n", __func__); + return -EINVAL; + } + if ((ce_hist->hist_id >= CE_COUNT_MAX) || + (ce_hist->hist_index >= HIF_CE_HISTORY_MAX)) { + qdf_print("Invalid values\n"); + return -EINVAL; + } + + return size; +} + +#endif /*For MCL, HIF_CONFIG_SLUB_DEBUG_ON || HIF_CE_DEBUG_DATA_BUF */ + +#if HIF_CE_DEBUG_DATA_BUF +/* + * hif_ce_en_desc_hist() - + * API to enable recording the CE desc history + * + * @dev: network device + * @attr: sysfs attribute + * @buf: buffer to copy the data. + * + * Starts recording the ce desc history + * + * Return total length copied + */ +ssize_t hif_ce_en_desc_hist(struct hif_softc *scn, const char *buf, size_t size) +{ + struct ce_desc_hist *ce_hist = NULL; + uint32_t cfg = 0; + uint32_t ce_id = 0; + + if (!scn) + return -EINVAL; + + ce_hist = &scn->hif_ce_desc_hist; + + if (!size) { + pr_err("%s: Invalid input buffer.\n", __func__); + return -EINVAL; + } + + if (sscanf(buf, "%u %u", (unsigned int *)&ce_id, + (unsigned int *)&cfg) != 2) { + pr_err("%s: Invalid input: Enter CE Id<1/0>.\n", __func__); + return -EINVAL; + } + if (ce_id >= CE_COUNT_MAX) { + qdf_print("Invalid value CE Id\n"); + return -EINVAL; + } + + if ((cfg > 1 || cfg < 0)) { + qdf_print("Invalid values: enter 0 or 1\n"); + return -EINVAL; + } + + if (!ce_hist->hist_ev[ce_id]) + return -EINVAL; + + qdf_mutex_acquire(&ce_dbg_datamem_lock[ce_id]); + if (cfg == 1) { + if (ce_hist->data_enable[ce_id] == 1) { + qdf_print("\nAlready Enabled\n"); + } else { + if (alloc_mem_ce_debug_hist_data(scn, ce_id) + == QDF_STATUS_E_NOMEM){ + ce_hist->data_enable[ce_id] = 0; + qdf_print("%s:Memory Alloc failed\n"); + } else + ce_hist->data_enable[ce_id] = 1; + } + } else if (cfg == 0) { + if (ce_hist->data_enable[ce_id] == 0) { + qdf_print("\nAlready Disabled\n"); + } else { + ce_hist->data_enable[ce_id] = 0; + free_mem_ce_debug_hist_data(scn, ce_id); + } + } + qdf_mutex_release(&ce_dbg_datamem_lock[ce_id]); + + return size; +} + +/* + * hif_disp_ce_enable_desc_data_hist() - + * API to display value of data_enable + * + * @dev: network device + * @attr: sysfs attribute + * @buf: buffer to copy the data. + * + * Return total length copied + */ +ssize_t hif_disp_ce_enable_desc_data_hist(struct hif_softc *scn, char *buf) +{ + ssize_t len = 0; + uint32_t ce_id = 0; + struct ce_desc_hist *ce_hist = NULL; + + if (!scn) + return -EINVAL; + + ce_hist = &scn->hif_ce_desc_hist; + + for (ce_id = 0; ce_id < CE_COUNT_MAX; ce_id++) { + len += snprintf(buf + len, PAGE_SIZE - len, " CE%d: %d\n", + ce_id, ce_hist->data_enable[ce_id]); + } + + return len; +} +#endif /* HIF_CE_DEBUG_DATA_BUF */ diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/ce/ce_service_srng.c b/drivers/staging/qca-wifi-host-cmn/hif/src/ce/ce_service_srng.c new file mode 100644 index 0000000000000000000000000000000000000000..9f188bb497e896b24a8d42a5262bba1fdaf4672c --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/ce/ce_service_srng.c @@ -0,0 +1,868 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "hif.h" +#include "hif_io32.h" +#include "reg_struct.h" +#include "ce_api.h" +#include "ce_main.h" +#include "ce_internal.h" +#include "ce_reg.h" +#include "qdf_lock.h" +#include "regtable.h" +#include "hif_main.h" +#include "hif_debug.h" +#include "hal_api.h" +#include "pld_common.h" +#include "qdf_module.h" + +/* + * Support for Copy Engine hardware, which is mainly used for + * communication between Host and Target over a PCIe interconnect. + */ + +/* + * A single CopyEngine (CE) comprises two "rings": + * a source ring + * a destination ring + * + * Each ring consists of a number of descriptors which specify + * an address, length, and meta-data. + * + * Typically, one side of the PCIe interconnect (Host or Target) + * controls one ring and the other side controls the other ring. + * The source side chooses when to initiate a transfer and it + * chooses what to send (buffer address, length). The destination + * side keeps a supply of "anonymous receive buffers" available and + * it handles incoming data as it arrives (when the destination + * receives an interrupt). + * + * The sender may send a simple buffer (address/length) or it may + * send a small list of buffers. When a small list is sent, hardware + * "gathers" these and they end up in a single destination buffer + * with a single interrupt. + * + * There are several "contexts" managed by this layer -- more, it + * may seem -- than should be needed. These are provided mainly for + * maximum flexibility and especially to facilitate a simpler HIF + * implementation. There are per-CopyEngine recv, send, and watermark + * contexts. These are supplied by the caller when a recv, send, + * or watermark handler is established and they are echoed back to + * the caller when the respective callbacks are invoked. There is + * also a per-transfer context supplied by the caller when a buffer + * (or sendlist) is sent and when a buffer is enqueued for recv. + * These per-transfer contexts are echoed back to the caller when + * the buffer is sent/received. + * Target TX harsh result toeplitz_hash_result + */ + +#define CE_ADDR_COPY(desc, dma_addr) do {\ + (desc)->buffer_addr_lo = (uint32_t)((dma_addr) &\ + 0xFFFFFFFF);\ + (desc)->buffer_addr_hi =\ + (uint32_t)(((dma_addr) >> 32) & 0xFF);\ + } while (0) + +static int +ce_send_nolock_srng(struct CE_handle *copyeng, + void *per_transfer_context, + qdf_dma_addr_t buffer, + uint32_t nbytes, + uint32_t transfer_id, + uint32_t flags, + uint32_t user_flags) +{ + int status; + struct CE_state *CE_state = (struct CE_state *)copyeng; + struct CE_ring_state *src_ring = CE_state->src_ring; + unsigned int nentries_mask = src_ring->nentries_mask; + unsigned int write_index = src_ring->write_index; + uint64_t dma_addr = buffer; + struct hif_softc *scn = CE_state->scn; + + if (Q_TARGET_ACCESS_BEGIN(scn) < 0) + return QDF_STATUS_E_FAILURE; + if (unlikely(hal_srng_src_num_avail(scn->hal_soc, src_ring->srng_ctx, + false) <= 0)) { + OL_ATH_CE_PKT_ERROR_COUNT_INCR(scn, CE_RING_DELTA_FAIL); + Q_TARGET_ACCESS_END(scn); + return QDF_STATUS_E_FAILURE; + } + { + enum hif_ce_event_type event_type = HIF_TX_GATHER_DESC_POST; + struct ce_srng_src_desc *src_desc; + + if (hal_srng_access_start(scn->hal_soc, src_ring->srng_ctx)) { + Q_TARGET_ACCESS_END(scn); + return QDF_STATUS_E_FAILURE; + } + + src_desc = hal_srng_src_get_next_reaped(scn->hal_soc, + src_ring->srng_ctx); + if (!src_desc) { + Q_TARGET_ACCESS_END(scn); + return QDF_STATUS_E_INVAL; + } + + /* Update low 32 bits source descriptor address */ + src_desc->buffer_addr_lo = + (uint32_t)(dma_addr & 0xFFFFFFFF); + src_desc->buffer_addr_hi = + (uint32_t)((dma_addr >> 32) & 0xFF); + + src_desc->meta_data = transfer_id; + + /* + * Set the swap bit if: + * typical sends on this CE are swapped (host is big-endian) + * and this send doesn't disable the swapping + * (data is not bytestream) + */ + src_desc->byte_swap = + (((CE_state->attr_flags & CE_ATTR_BYTE_SWAP_DATA) + != 0) & ((flags & CE_SEND_FLAG_SWAP_DISABLE) == 0)); + src_desc->gather = ((flags & CE_SEND_FLAG_GATHER) != 0); + src_desc->nbytes = nbytes; + + src_ring->per_transfer_context[write_index] = + per_transfer_context; + write_index = CE_RING_IDX_INCR(nentries_mask, write_index); + + hal_srng_access_end(scn->hal_soc, src_ring->srng_ctx); + + /* src_ring->write index hasn't been updated event though + * the register has allready been written to. + */ + hif_record_ce_desc_event(scn, CE_state->id, event_type, + (union ce_desc *) src_desc, per_transfer_context, + src_ring->write_index, nbytes); + + src_ring->write_index = write_index; + status = QDF_STATUS_SUCCESS; + } + Q_TARGET_ACCESS_END(scn); + return status; +} + +static int +ce_sendlist_send_srng(struct CE_handle *copyeng, + void *per_transfer_context, + struct ce_sendlist *sendlist, unsigned int transfer_id) +{ + int status = -ENOMEM; + struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist; + struct CE_state *CE_state = (struct CE_state *)copyeng; + struct CE_ring_state *src_ring = CE_state->src_ring; + unsigned int num_items = sl->num_items; + unsigned int sw_index; + unsigned int write_index; + struct hif_softc *scn = CE_state->scn; + + QDF_ASSERT((num_items > 0) && (num_items < src_ring->nentries)); + + qdf_spin_lock_bh(&CE_state->ce_index_lock); + sw_index = src_ring->sw_index; + write_index = src_ring->write_index; + + if (hal_srng_src_num_avail(scn->hal_soc, src_ring->srng_ctx, false) >= + num_items) { + struct ce_sendlist_item *item; + int i; + + /* handle all but the last item uniformly */ + for (i = 0; i < num_items - 1; i++) { + item = &sl->item[i]; + /* TBDXXX: Support extensible sendlist_types? */ + QDF_ASSERT(item->send_type == CE_SIMPLE_BUFFER_TYPE); + status = ce_send_nolock_srng(copyeng, + CE_SENDLIST_ITEM_CTXT, + (qdf_dma_addr_t) item->data, + item->u.nbytes, transfer_id, + item->flags | CE_SEND_FLAG_GATHER, + item->user_flags); + QDF_ASSERT(status == QDF_STATUS_SUCCESS); + } + /* provide valid context pointer for final item */ + item = &sl->item[i]; + /* TBDXXX: Support extensible sendlist_types? */ + QDF_ASSERT(item->send_type == CE_SIMPLE_BUFFER_TYPE); + status = ce_send_nolock_srng(copyeng, per_transfer_context, + (qdf_dma_addr_t) item->data, + item->u.nbytes, + transfer_id, item->flags, + item->user_flags); + QDF_ASSERT(status == QDF_STATUS_SUCCESS); + QDF_NBUF_UPDATE_TX_PKT_COUNT((qdf_nbuf_t)per_transfer_context, + QDF_NBUF_TX_PKT_CE); + DPTRACE(qdf_dp_trace((qdf_nbuf_t)per_transfer_context, + QDF_DP_TRACE_CE_PACKET_PTR_RECORD, + QDF_TRACE_DEFAULT_PDEV_ID, + (uint8_t *)(((qdf_nbuf_t)per_transfer_context)->data), + sizeof(((qdf_nbuf_t)per_transfer_context)->data), QDF_TX)); + } else { + /* + * Probably not worth the additional complexity to support + * partial sends with continuation or notification. We expect + * to use large rings and small sendlists. If we can't handle + * the entire request at once, punt it back to the caller. + */ + } + qdf_spin_unlock_bh(&CE_state->ce_index_lock); + + return status; +} + +#define SLOTS_PER_DATAPATH_TX 2 + +#ifndef AH_NEED_TX_DATA_SWAP +#define AH_NEED_TX_DATA_SWAP 0 +#endif +/** + * ce_recv_buf_enqueue_srng() - enqueue a recv buffer into a copy engine + * @coyeng: copy engine handle + * @per_recv_context: virtual address of the nbuf + * @buffer: physical address of the nbuf + * + * Return: 0 if the buffer is enqueued + */ +static int +ce_recv_buf_enqueue_srng(struct CE_handle *copyeng, + void *per_recv_context, qdf_dma_addr_t buffer) +{ + int status; + struct CE_state *CE_state = (struct CE_state *)copyeng; + struct CE_ring_state *dest_ring = CE_state->dest_ring; + unsigned int nentries_mask = dest_ring->nentries_mask; + unsigned int write_index; + unsigned int sw_index; + uint64_t dma_addr = buffer; + struct hif_softc *scn = CE_state->scn; + + qdf_spin_lock_bh(&CE_state->ce_index_lock); + write_index = dest_ring->write_index; + sw_index = dest_ring->sw_index; + + if (Q_TARGET_ACCESS_BEGIN(scn) < 0) { + qdf_spin_unlock_bh(&CE_state->ce_index_lock); + return -EIO; + } + + if (hal_srng_access_start(scn->hal_soc, dest_ring->srng_ctx)) { + qdf_spin_unlock_bh(&CE_state->ce_index_lock); + return QDF_STATUS_E_FAILURE; + } + + if ((hal_srng_src_num_avail(scn->hal_soc, + dest_ring->srng_ctx, false) > 0)) { + struct ce_srng_dest_desc *dest_desc = + hal_srng_src_get_next(scn->hal_soc, + dest_ring->srng_ctx); + + if (dest_desc == NULL) { + status = QDF_STATUS_E_FAILURE; + } else { + + CE_ADDR_COPY(dest_desc, dma_addr); + + dest_ring->per_transfer_context[write_index] = + per_recv_context; + + /* Update Destination Ring Write Index */ + write_index = CE_RING_IDX_INCR(nentries_mask, + write_index); + status = QDF_STATUS_SUCCESS; + } + } else + status = QDF_STATUS_E_FAILURE; + + dest_ring->write_index = write_index; + hal_srng_access_end(scn->hal_soc, dest_ring->srng_ctx); + Q_TARGET_ACCESS_END(scn); + qdf_spin_unlock_bh(&CE_state->ce_index_lock); + return status; +} + +/* + * Guts of ce_recv_entries_done. + * The caller takes responsibility for any necessary locking. + */ +static unsigned int +ce_recv_entries_done_nolock_srng(struct hif_softc *scn, + struct CE_state *CE_state) +{ + struct CE_ring_state *status_ring = CE_state->status_ring; + + return hal_srng_dst_num_valid(scn->hal_soc, + status_ring->srng_ctx, false); +} + +/* + * Guts of ce_send_entries_done. + * The caller takes responsibility for any necessary locking. + */ +static unsigned int +ce_send_entries_done_nolock_srng(struct hif_softc *scn, + struct CE_state *CE_state) +{ + + struct CE_ring_state *src_ring = CE_state->src_ring; + int count = 0; + + if (hal_srng_access_start(scn->hal_soc, src_ring->srng_ctx)) + return 0; + + count = hal_srng_src_done_val(scn->hal_soc, src_ring->srng_ctx); + + hal_srng_access_end(scn->hal_soc, src_ring->srng_ctx); + + return count; +} + +/* + * Guts of ce_completed_recv_next. + * The caller takes responsibility for any necessary locking. + */ +static int +ce_completed_recv_next_nolock_srng(struct CE_state *CE_state, + void **per_CE_contextp, + void **per_transfer_contextp, + qdf_dma_addr_t *bufferp, + unsigned int *nbytesp, + unsigned int *transfer_idp, + unsigned int *flagsp) +{ + int status; + struct CE_ring_state *dest_ring = CE_state->dest_ring; + struct CE_ring_state *status_ring = CE_state->status_ring; + unsigned int nentries_mask = dest_ring->nentries_mask; + unsigned int sw_index = dest_ring->sw_index; + struct hif_softc *scn = CE_state->scn; + struct ce_srng_dest_status_desc *dest_status; + int nbytes; + struct ce_srng_dest_status_desc dest_status_info; + + if (hal_srng_access_start(scn->hal_soc, status_ring->srng_ctx)) { + status = QDF_STATUS_E_FAILURE; + goto done; + } + + dest_status = hal_srng_dst_get_next(scn->hal_soc, + status_ring->srng_ctx); + + if (dest_status == NULL) { + status = QDF_STATUS_E_FAILURE; + goto done; + } + /* + * By copying the dest_desc_info element to local memory, we could + * avoid extra memory read from non-cachable memory. + */ + dest_status_info = *dest_status; + nbytes = dest_status_info.nbytes; + if (nbytes == 0) { + /* + * This closes a relatively unusual race where the Host + * sees the updated DRRI before the update to the + * corresponding descriptor has completed. We treat this + * as a descriptor that is not yet done. + */ + status = QDF_STATUS_E_FAILURE; + goto done; + } + + dest_status->nbytes = 0; + + *nbytesp = nbytes; + *transfer_idp = dest_status_info.meta_data; + *flagsp = (dest_status_info.byte_swap) ? CE_RECV_FLAG_SWAPPED : 0; + + if (per_CE_contextp) + *per_CE_contextp = CE_state->recv_context; + + /* NOTE: sw_index is more like a read_index in this context. It has a + * one-to-one mapping with status ring. + * Get the per trasnfer context from dest_ring. + */ + if (per_transfer_contextp) + *per_transfer_contextp = + dest_ring->per_transfer_context[sw_index]; + + dest_ring->per_transfer_context[sw_index] = 0; /* sanity */ + + /* Update sw_index */ + sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index); + dest_ring->sw_index = sw_index; + status = QDF_STATUS_SUCCESS; + +done: + hal_srng_access_end(scn->hal_soc, status_ring->srng_ctx); + + return status; +} + +static QDF_STATUS +ce_revoke_recv_next_srng(struct CE_handle *copyeng, + void **per_CE_contextp, + void **per_transfer_contextp, qdf_dma_addr_t *bufferp) +{ + struct CE_state *CE_state = (struct CE_state *)copyeng; + struct CE_ring_state *dest_ring = CE_state->dest_ring; + unsigned int sw_index; + + if (!dest_ring) + return QDF_STATUS_E_FAILURE; + + sw_index = dest_ring->sw_index; + + if (per_CE_contextp) + *per_CE_contextp = CE_state->recv_context; + + /* NOTE: sw_index is more like a read_index in this context. It has a + * one-to-one mapping with status ring. + * Get the per trasnfer context from dest_ring. + */ + if (per_transfer_contextp) + *per_transfer_contextp = + dest_ring->per_transfer_context[sw_index]; + + if (dest_ring->per_transfer_context[sw_index] == NULL) + return QDF_STATUS_E_FAILURE; + + /* provide end condition */ + dest_ring->per_transfer_context[sw_index] = NULL; + + /* Update sw_index */ + sw_index = CE_RING_IDX_INCR(dest_ring->nentries_mask, sw_index); + dest_ring->sw_index = sw_index; + return QDF_STATUS_SUCCESS; +} + +/* + * Guts of ce_completed_send_next. + * The caller takes responsibility for any necessary locking. + */ +static int +ce_completed_send_next_nolock_srng(struct CE_state *CE_state, + void **per_CE_contextp, + void **per_transfer_contextp, + qdf_dma_addr_t *bufferp, + unsigned int *nbytesp, + unsigned int *transfer_idp, + unsigned int *sw_idx, + unsigned int *hw_idx, + uint32_t *toeplitz_hash_result) +{ + int status = QDF_STATUS_E_FAILURE; + struct CE_ring_state *src_ring = CE_state->src_ring; + unsigned int nentries_mask = src_ring->nentries_mask; + unsigned int sw_index = src_ring->sw_index; + unsigned int swi = src_ring->sw_index; + struct hif_softc *scn = CE_state->scn; + struct ce_srng_src_desc *src_desc; + + if (hal_srng_access_start(scn->hal_soc, src_ring->srng_ctx)) { + status = QDF_STATUS_E_FAILURE; + return status; + } + + src_desc = hal_srng_src_reap_next(scn->hal_soc, src_ring->srng_ctx); + if (src_desc) { + hif_record_ce_desc_event(scn, CE_state->id, + HIF_TX_DESC_COMPLETION, + (union ce_desc *)src_desc, + src_ring->per_transfer_context[swi], + swi, src_desc->nbytes); + + /* Return data from completed source descriptor */ + *bufferp = (qdf_dma_addr_t) + (((uint64_t)(src_desc)->buffer_addr_lo + + ((uint64_t)((src_desc)->buffer_addr_hi & + 0xFF) << 32))); + *nbytesp = src_desc->nbytes; + *transfer_idp = src_desc->meta_data; + *toeplitz_hash_result = 0; /*src_desc->toeplitz_hash_result;*/ + + if (per_CE_contextp) + *per_CE_contextp = CE_state->send_context; + + /* sw_index is used more like read index */ + if (per_transfer_contextp) + *per_transfer_contextp = + src_ring->per_transfer_context[sw_index]; + + src_ring->per_transfer_context[sw_index] = 0; /* sanity */ + + /* Update sw_index */ + sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index); + src_ring->sw_index = sw_index; + status = QDF_STATUS_SUCCESS; + } + hal_srng_access_end_reap(scn->hal_soc, src_ring->srng_ctx); + + return status; +} + +/* NB: Modelled after ce_completed_send_next */ +static QDF_STATUS +ce_cancel_send_next_srng(struct CE_handle *copyeng, + void **per_CE_contextp, + void **per_transfer_contextp, + qdf_dma_addr_t *bufferp, + unsigned int *nbytesp, + unsigned int *transfer_idp, + uint32_t *toeplitz_hash_result) +{ + struct CE_state *CE_state; + int status = QDF_STATUS_E_FAILURE; + struct CE_ring_state *src_ring; + unsigned int nentries_mask; + unsigned int sw_index; + struct hif_softc *scn; + struct ce_srng_src_desc *src_desc; + + CE_state = (struct CE_state *)copyeng; + src_ring = CE_state->src_ring; + if (!src_ring) + return QDF_STATUS_E_FAILURE; + + nentries_mask = src_ring->nentries_mask; + sw_index = src_ring->sw_index; + scn = CE_state->scn; + + if (hal_srng_access_start(scn->hal_soc, src_ring->srng_ctx)) { + status = QDF_STATUS_E_FAILURE; + return status; + } + + src_desc = hal_srng_src_pending_reap_next(scn->hal_soc, + src_ring->srng_ctx); + if (src_desc) { + /* Return data from completed source descriptor */ + *bufferp = (qdf_dma_addr_t) + (((uint64_t)(src_desc)->buffer_addr_lo + + ((uint64_t)((src_desc)->buffer_addr_hi & + 0xFF) << 32))); + *nbytesp = src_desc->nbytes; + *transfer_idp = src_desc->meta_data; + *toeplitz_hash_result = 0; /*src_desc->toeplitz_hash_result;*/ + + if (per_CE_contextp) + *per_CE_contextp = CE_state->send_context; + + /* sw_index is used more like read index */ + if (per_transfer_contextp) + *per_transfer_contextp = + src_ring->per_transfer_context[sw_index]; + + src_ring->per_transfer_context[sw_index] = 0; /* sanity */ + + /* Update sw_index */ + sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index); + src_ring->sw_index = sw_index; + status = QDF_STATUS_SUCCESS; + } + hal_srng_access_end_reap(scn->hal_soc, src_ring->srng_ctx); + + return status; +} + +/* Shift bits to convert IS_*_RING_*_WATERMARK_MASK to CE_WM_FLAG_*_* */ +#define CE_WM_SHFT 1 + +/* + * Number of times to check for any pending tx/rx completion on + * a copy engine, this count should be big enough. Once we hit + * this threashold we'll not check for any Tx/Rx comlpetion in same + * interrupt handling. Note that this threashold is only used for + * Rx interrupt processing, this can be used tor Tx as well if we + * suspect any infinite loop in checking for pending Tx completion. + */ +#define CE_TXRX_COMP_CHECK_THRESHOLD 20 + +/* + * Adjust interrupts for the copy complete handler. + * If it's needed for either send or recv, then unmask + * this interrupt; otherwise, mask it. + * + * Called with target_lock held. + */ +static void +ce_per_engine_handler_adjust_srng(struct CE_state *CE_state, + int disable_copy_compl_intr) +{ +} + +static bool ce_check_int_watermark_srng(struct CE_state *CE_state, + unsigned int *flags) +{ + /*TODO*/ + return false; +} + +static uint32_t ce_get_desc_size_srng(uint8_t ring_type) +{ + switch (ring_type) { + case CE_RING_SRC: + return sizeof(struct ce_srng_src_desc); + case CE_RING_DEST: + return sizeof(struct ce_srng_dest_desc); + case CE_RING_STATUS: + return sizeof(struct ce_srng_dest_status_desc); + default: + return 0; + } + return 0; +} + +static void ce_srng_msi_ring_params_setup(struct hif_softc *scn, uint32_t ce_id, + struct hal_srng_params *ring_params) +{ + uint32_t addr_low; + uint32_t addr_high; + uint32_t msi_data_start; + uint32_t msi_data_count; + uint32_t msi_irq_start; + int ret; + + ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE", + &msi_data_count, &msi_data_start, + &msi_irq_start); + + /* msi config not found */ + if (ret) + return; + + pld_get_msi_address(scn->qdf_dev->dev, &addr_low, &addr_high); + + ring_params->msi_addr = addr_low; + ring_params->msi_addr |= (qdf_dma_addr_t)(((uint64_t)addr_high) << 32); + ring_params->msi_data = (ce_id % msi_data_count) + msi_data_start; + ring_params->flags |= HAL_SRNG_MSI_INTR; + + HIF_DBG("%s: ce_id %d, msi_addr %pK, msi_data %d", __func__, ce_id, + (void *)ring_params->msi_addr, ring_params->msi_data); +} + +static void ce_srng_src_ring_setup(struct hif_softc *scn, uint32_t ce_id, + struct CE_ring_state *src_ring, + struct CE_attr *attr) +{ + struct hal_srng_params ring_params = {0}; + + HIF_INFO("%s: ce_id %d", __func__, ce_id); + + ring_params.ring_base_paddr = src_ring->base_addr_CE_space; + ring_params.ring_base_vaddr = src_ring->base_addr_owner_space; + ring_params.num_entries = src_ring->nentries; + /* + * The minimum increment for the timer is 8us + * A default value of 0 disables the timer + * A valid default value caused continuous interrupts to + * fire with MSI enabled. Need to revisit usage of the timer + */ + + if (!(CE_ATTR_DISABLE_INTR & attr->flags)) { + ce_srng_msi_ring_params_setup(scn, ce_id, &ring_params); + + ring_params.intr_timer_thres_us = 0; + ring_params.intr_batch_cntr_thres_entries = 1; + } + + src_ring->srng_ctx = hal_srng_setup(scn->hal_soc, CE_SRC, ce_id, 0, + &ring_params); +} + +/** + * ce_srng_initialize_dest_timer_interrupt_war() - war initialization + * @dest_ring: ring being initialized + * @ring_params: pointer to initialized parameters + * + * For Napier & Hawkeye v1, the status ring timer interrupts do not work + * As a work arround host configures the destination rings to be a proxy for + * work needing to be done. + * + * The interrupts are setup such that if the destination ring is less than fully + * posted, there is likely undone work for the status ring that the host should + * process. + * + * There is a timing bug in srng based copy engines such that a fully posted + * srng based copy engine has 2 empty entries instead of just one. The copy + * engine data sturctures work with 1 empty entry, but the software frequently + * fails to post the last entry due to the race condition. + */ +static void ce_srng_initialize_dest_timer_interrupt_war( + struct CE_ring_state *dest_ring, + struct hal_srng_params *ring_params) { + int num_buffers_when_fully_posted = dest_ring->nentries - 2; + + ring_params->low_threshold = num_buffers_when_fully_posted - 1; + ring_params->intr_timer_thres_us = 1024; + ring_params->intr_batch_cntr_thres_entries = 0; + ring_params->flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE; +} + +static void ce_srng_dest_ring_setup(struct hif_softc *scn, uint32_t ce_id, + struct CE_ring_state *dest_ring, + struct CE_attr *attr) +{ + struct hal_srng_params ring_params = {0}; + bool status_ring_timer_thresh_work_arround = true; + + HIF_INFO("%s: ce_id %d", __func__, ce_id); + + ring_params.ring_base_paddr = dest_ring->base_addr_CE_space; + ring_params.ring_base_vaddr = dest_ring->base_addr_owner_space; + ring_params.num_entries = dest_ring->nentries; + ring_params.max_buffer_length = attr->src_sz_max; + + if (!(CE_ATTR_DISABLE_INTR & attr->flags)) { + ce_srng_msi_ring_params_setup(scn, ce_id, &ring_params); + if (status_ring_timer_thresh_work_arround) { + ce_srng_initialize_dest_timer_interrupt_war( + dest_ring, &ring_params); + } else { + /* normal behavior for future chips */ + ring_params.low_threshold = dest_ring->nentries >> 3; + ring_params.intr_timer_thres_us = 100000; + ring_params.intr_batch_cntr_thres_entries = 0; + ring_params.flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE; + } + } + + /*Dest ring is also source ring*/ + dest_ring->srng_ctx = hal_srng_setup(scn->hal_soc, CE_DST, ce_id, 0, + &ring_params); +} + +static void ce_srng_status_ring_setup(struct hif_softc *scn, uint32_t ce_id, + struct CE_ring_state *status_ring, + struct CE_attr *attr) +{ + struct hal_srng_params ring_params = {0}; + + HIF_INFO("%s: ce_id %d", __func__, ce_id); + + ce_srng_msi_ring_params_setup(scn, ce_id, &ring_params); + + ring_params.ring_base_paddr = status_ring->base_addr_CE_space; + ring_params.ring_base_vaddr = status_ring->base_addr_owner_space; + ring_params.num_entries = status_ring->nentries; + + if (!(CE_ATTR_DISABLE_INTR & attr->flags)) { + ring_params.intr_timer_thres_us = 0x1000; + ring_params.intr_batch_cntr_thres_entries = 0x1; + } + + status_ring->srng_ctx = hal_srng_setup(scn->hal_soc, CE_DST_STATUS, + ce_id, 0, &ring_params); +} + +static int ce_ring_setup_srng(struct hif_softc *scn, uint8_t ring_type, + uint32_t ce_id, struct CE_ring_state *ring, + struct CE_attr *attr) +{ + switch (ring_type) { + case CE_RING_SRC: + ce_srng_src_ring_setup(scn, ce_id, ring, attr); + break; + case CE_RING_DEST: + ce_srng_dest_ring_setup(scn, ce_id, ring, attr); + break; + case CE_RING_STATUS: + ce_srng_status_ring_setup(scn, ce_id, ring, attr); + break; + default: + qdf_assert(0); + break; + } + + return 0; +} + +static void ce_construct_shadow_config_srng(struct hif_softc *scn) +{ + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); + int ce_id; + + for (ce_id = 0; ce_id < scn->ce_count; ce_id++) { + if (hif_state->host_ce_config[ce_id].src_nentries) + hal_set_one_shadow_config(scn->hal_soc, + CE_SRC, ce_id); + + if (hif_state->host_ce_config[ce_id].dest_nentries) { + hal_set_one_shadow_config(scn->hal_soc, + CE_DST, ce_id); + + hal_set_one_shadow_config(scn->hal_soc, + CE_DST_STATUS, ce_id); + } + } +} + +static void ce_prepare_shadow_register_v2_cfg_srng(struct hif_softc *scn, + struct pld_shadow_reg_v2_cfg **shadow_config, + int *num_shadow_registers_configured) +{ + if (scn->hal_soc == NULL) { + HIF_ERROR("%s: hal not initialized: not initializing shadow config", + __func__); + return; + } + + hal_get_shadow_config(scn->hal_soc, shadow_config, + num_shadow_registers_configured); + + if (*num_shadow_registers_configured != 0) { + HIF_ERROR("%s: hal shadow register configuration allready constructed", + __func__); + + /* return with original configuration*/ + return; + } + + hal_construct_shadow_config(scn->hal_soc); + ce_construct_shadow_config_srng(scn); + + /* get updated configuration */ + hal_get_shadow_config(scn->hal_soc, shadow_config, + num_shadow_registers_configured); +} + +static struct ce_ops ce_service_srng = { + .ce_get_desc_size = ce_get_desc_size_srng, + .ce_ring_setup = ce_ring_setup_srng, + .ce_sendlist_send = ce_sendlist_send_srng, + .ce_completed_recv_next_nolock = ce_completed_recv_next_nolock_srng, + .ce_revoke_recv_next = ce_revoke_recv_next_srng, + .ce_cancel_send_next = ce_cancel_send_next_srng, + .ce_recv_buf_enqueue = ce_recv_buf_enqueue_srng, + .ce_per_engine_handler_adjust = ce_per_engine_handler_adjust_srng, + .ce_send_nolock = ce_send_nolock_srng, + .watermark_int = ce_check_int_watermark_srng, + .ce_completed_send_next_nolock = ce_completed_send_next_nolock_srng, + .ce_recv_entries_done_nolock = ce_recv_entries_done_nolock_srng, + .ce_send_entries_done_nolock = ce_send_entries_done_nolock_srng, + .ce_prepare_shadow_register_v2_cfg = + ce_prepare_shadow_register_v2_cfg_srng, +}; + +struct ce_ops *ce_services_srng() +{ + return &ce_service_srng; +} +qdf_export_symbol(ce_services_srng); diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/ce/ce_tasklet.c b/drivers/staging/qca-wifi-host-cmn/hif/src/ce/ce_tasklet.c new file mode 100644 index 0000000000000000000000000000000000000000..8ac93bfe0fb204a0f88177abb5a235649654ad5c --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/ce/ce_tasklet.c @@ -0,0 +1,520 @@ +/* + * Copyright (c) 2015-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include +#include +#include +#include "qdf_lock.h" +#include "qdf_types.h" +#include "qdf_status.h" +#include "regtable.h" +#include "hif.h" +#include "hif_io32.h" +#include "ce_main.h" +#include "ce_api.h" +#include "ce_reg.h" +#include "ce_internal.h" +#include "ce_tasklet.h" +#include "pld_common.h" +#include "hif_debug.h" +#include "hif_napi.h" + + +/** + * struct tasklet_work + * + * @id: ce_id + * @work: work + */ +struct tasklet_work { + enum ce_id_type id; + void *data; + struct work_struct work; +}; + + +/** + * reschedule_ce_tasklet_work_handler() - reschedule work + * @work: struct work_struct + * + * Return: N/A + */ +static void reschedule_ce_tasklet_work_handler(struct work_struct *work) +{ + struct tasklet_work *ce_work = container_of(work, struct tasklet_work, + work); + struct hif_softc *scn = ce_work->data; + struct HIF_CE_state *hif_ce_state; + + if (NULL == scn) { + HIF_ERROR("%s: tasklet scn is null", __func__); + return; + } + + hif_ce_state = HIF_GET_CE_STATE(scn); + + if (scn->hif_init_done == false) { + HIF_ERROR("%s: wlan driver is unloaded", __func__); + return; + } + tasklet_schedule(&hif_ce_state->tasklets[ce_work->id].intr_tq); +} + +static struct tasklet_work tasklet_workers[CE_ID_MAX]; +static bool work_initialized; + +/** + * init_tasklet_work() - init_tasklet_work + * @work: struct work_struct + * @work_handler: work_handler + * + * Return: N/A + */ +static void init_tasklet_work(struct work_struct *work, + work_func_t work_handler) +{ + INIT_WORK(work, work_handler); +} + +/** + * init_tasklet_workers() - init_tasklet_workers + * @scn: HIF Context + * + * Return: N/A + */ +void init_tasklet_workers(struct hif_opaque_softc *scn) +{ + uint32_t id; + + for (id = 0; id < CE_ID_MAX; id++) { + tasklet_workers[id].id = id; + tasklet_workers[id].data = scn; + init_tasklet_work(&tasklet_workers[id].work, + reschedule_ce_tasklet_work_handler); + } + work_initialized = true; +} + +/** + * deinit_tasklet_workers() - deinit_tasklet_workers + * @scn: HIF Context + * + * Return: N/A + */ +void deinit_tasklet_workers(struct hif_opaque_softc *scn) +{ + u32 id; + + for (id = 0; id < CE_ID_MAX; id++) + cancel_work_sync(&tasklet_workers[id].work); + + work_initialized = false; +} + +/** + * ce_schedule_tasklet() - schedule ce tasklet + * @tasklet_entry: struct ce_tasklet_entry + * + * Return: N/A + */ +static inline void ce_schedule_tasklet(struct ce_tasklet_entry *tasklet_entry) +{ + tasklet_schedule(&tasklet_entry->intr_tq); +} + +/** + * ce_tasklet() - ce_tasklet + * @data: data + * + * Return: N/A + */ +static void ce_tasklet(unsigned long data) +{ + struct ce_tasklet_entry *tasklet_entry = + (struct ce_tasklet_entry *)data; + struct HIF_CE_state *hif_ce_state = tasklet_entry->hif_ce_state; + struct hif_softc *scn = HIF_GET_SOFTC(hif_ce_state); + struct CE_state *CE_state = scn->ce_id_to_state[tasklet_entry->ce_id]; + + hif_record_ce_desc_event(scn, tasklet_entry->ce_id, + HIF_CE_TASKLET_ENTRY, NULL, NULL, 0, 0); + + if (qdf_atomic_read(&scn->link_suspended)) { + HIF_ERROR("%s: ce %d tasklet fired after link suspend.", + __func__, tasklet_entry->ce_id); + QDF_BUG(0); + } + + ce_per_engine_service(scn, tasklet_entry->ce_id); + + if (ce_check_rx_pending(CE_state)) { + /* + * There are frames pending, schedule tasklet to process them. + * Enable the interrupt only when there is no pending frames in + * any of the Copy Engine pipes. + */ + hif_record_ce_desc_event(scn, tasklet_entry->ce_id, + HIF_CE_TASKLET_RESCHEDULE, NULL, NULL, 0, 0); + + ce_schedule_tasklet(tasklet_entry); + return; + } + + if (scn->target_status != TARGET_STATUS_RESET) + hif_irq_enable(scn, tasklet_entry->ce_id); + + hif_record_ce_desc_event(scn, tasklet_entry->ce_id, HIF_CE_TASKLET_EXIT, + NULL, NULL, 0, 0); + + qdf_atomic_dec(&scn->active_tasklet_cnt); +} + +/** + * ce_tasklet_init() - ce_tasklet_init + * @hif_ce_state: hif_ce_state + * @mask: mask + * + * Return: N/A + */ +void ce_tasklet_init(struct HIF_CE_state *hif_ce_state, uint32_t mask) +{ + int i; + + for (i = 0; i < CE_COUNT_MAX; i++) { + if (mask & (1 << i)) { + hif_ce_state->tasklets[i].ce_id = i; + hif_ce_state->tasklets[i].inited = true; + hif_ce_state->tasklets[i].hif_ce_state = hif_ce_state; + tasklet_init(&hif_ce_state->tasklets[i].intr_tq, + ce_tasklet, + (unsigned long)&hif_ce_state->tasklets[i]); + } + } +} +/** + * ce_tasklet_kill() - ce_tasklet_kill + * @hif_ce_state: hif_ce_state + * + * Return: N/A + */ +void ce_tasklet_kill(struct hif_softc *scn) +{ + int i; + struct HIF_CE_state *hif_ce_state = HIF_GET_CE_STATE(scn); + + for (i = 0; i < CE_COUNT_MAX; i++) + if (hif_ce_state->tasklets[i].inited) { + tasklet_kill(&hif_ce_state->tasklets[i].intr_tq); + hif_ce_state->tasklets[i].inited = false; + } + qdf_atomic_set(&scn->active_tasklet_cnt, 0); +} + +#define HIF_CE_DRAIN_WAIT_CNT 20 +/** + * hif_drain_tasklets(): wait until no tasklet is pending + * @scn: hif context + * + * Let running tasklets clear pending trafic. + * + * Return: 0 if no bottom half is in progress when it returns. + * -EFAULT if it times out. + */ +int hif_drain_tasklets(struct hif_softc *scn) +{ + uint32_t ce_drain_wait_cnt = 0; + int32_t tasklet_cnt; + + while ((tasklet_cnt = qdf_atomic_read(&scn->active_tasklet_cnt))) { + if (++ce_drain_wait_cnt > HIF_CE_DRAIN_WAIT_CNT) { + HIF_ERROR("%s: CE still not done with access: %d", + __func__, tasklet_cnt); + + return -EFAULT; + } + HIF_INFO("%s: Waiting for CE to finish access", __func__); + msleep(10); + } + return 0; +} + +#ifdef WLAN_SUSPEND_RESUME_TEST +/** + * hif_interrupt_is_ut_resume(): Tests if an irq on the given copy engine should + * trigger a unit-test resume. + * @scn: The HIF context to operate on + * @ce_id: The copy engine Id from the originating interrupt + * + * Return: true if the raised irq should trigger a unit-test resume + */ +static bool hif_interrupt_is_ut_resume(struct hif_softc *scn, int ce_id) +{ + int errno; + uint8_t wake_ce_id; + + if (!hif_is_ut_suspended(scn)) + return false; + + /* ensure passed ce_id matches wake ce_id */ + errno = hif_get_wake_ce_id(scn, &wake_ce_id); + if (errno) { + HIF_ERROR("%s: failed to get wake CE Id: %d", __func__, errno); + return false; + } + + return ce_id == wake_ce_id; +} +#else +static inline bool +hif_interrupt_is_ut_resume(struct hif_softc *scn, int ce_id) +{ + return false; +} +#endif /* WLAN_SUSPEND_RESUME_TEST */ + +/** + * hif_snoc_interrupt_handler() - hif_snoc_interrupt_handler + * @irq: irq coming from kernel + * @context: context + * + * Return: N/A + */ +static irqreturn_t hif_snoc_interrupt_handler(int irq, void *context) +{ + struct ce_tasklet_entry *tasklet_entry = context; + struct hif_softc *scn = HIF_GET_SOFTC(tasklet_entry->hif_ce_state); + + return ce_dispatch_interrupt(pld_get_ce_id(scn->qdf_dev->dev, irq), + tasklet_entry); +} + +/** + * hif_ce_increment_interrupt_count() - update ce stats + * @hif_ce_state: ce state + * @ce_id: ce id + * + * Return: none + */ +static inline void +hif_ce_increment_interrupt_count(struct HIF_CE_state *hif_ce_state, int ce_id) +{ + int cpu_id = qdf_get_cpu(); + + hif_ce_state->stats.ce_per_cpu[ce_id][cpu_id]++; +} + +/** + * hif_display_ce_stats() - display ce stats + * @hif_ce_state: ce state + * + * Return: none + */ +void hif_display_ce_stats(struct HIF_CE_state *hif_ce_state) +{ +#define STR_SIZE 128 + uint8_t i, j, pos; + char str_buffer[STR_SIZE]; + int size, ret; + + qdf_debug("CE interrupt statistics:"); + for (i = 0; i < CE_COUNT_MAX; i++) { + size = STR_SIZE; + pos = 0; + for (j = 0; j < QDF_MAX_AVAILABLE_CPU; j++) { + ret = snprintf(str_buffer + pos, size, "[%d]:%d ", + j, hif_ce_state->stats.ce_per_cpu[i][j]); + if (ret <= 0 || ret >= size) + break; + size -= ret; + pos += ret; + } + qdf_debug("CE id[%2d] - %s", i, str_buffer); + } +#undef STR_SIZE +} + +/** + * hif_clear_ce_stats() - clear ce stats + * @hif_ce_state: ce state + * + * Return: none + */ +void hif_clear_ce_stats(struct HIF_CE_state *hif_ce_state) +{ + qdf_mem_zero(&hif_ce_state->stats, sizeof(struct ce_stats)); +} + +/** + * ce_dispatch_interrupt() - dispatch an interrupt to a processing context + * @ce_id: ce_id + * @tasklet_entry: context + * + * Return: N/A + */ +irqreturn_t ce_dispatch_interrupt(int ce_id, + struct ce_tasklet_entry *tasklet_entry) +{ + struct HIF_CE_state *hif_ce_state = tasklet_entry->hif_ce_state; + struct hif_softc *scn = HIF_GET_SOFTC(hif_ce_state); + struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn); + + if (tasklet_entry->ce_id != ce_id) { + HIF_ERROR("%s: ce_id (expect %d, received %d) does not match", + __func__, tasklet_entry->ce_id, ce_id); + return IRQ_NONE; + } + if (unlikely(ce_id >= CE_COUNT_MAX)) { + HIF_ERROR("%s: ce_id=%d > CE_COUNT_MAX=%d", + __func__, tasklet_entry->ce_id, CE_COUNT_MAX); + return IRQ_NONE; + } + + hif_irq_disable(scn, ce_id); + + if (!TARGET_REGISTER_ACCESS_ALLOWED(scn)) + return IRQ_HANDLED; + + hif_record_ce_desc_event(scn, ce_id, HIF_IRQ_EVENT, + NULL, NULL, 0, 0); + hif_ce_increment_interrupt_count(hif_ce_state, ce_id); + + if (unlikely(hif_interrupt_is_ut_resume(scn, ce_id))) { + hif_ut_fw_resume(scn); + hif_irq_enable(scn, ce_id); + return IRQ_HANDLED; + } + + qdf_atomic_inc(&scn->active_tasklet_cnt); + + if (hif_napi_enabled(hif_hdl, ce_id)) + hif_napi_schedule(hif_hdl, ce_id); + else + tasklet_schedule(&tasklet_entry->intr_tq); + + return IRQ_HANDLED; +} + +/** + * const char *ce_name + * + * @ce_name: ce_name + */ +const char *ce_name[] = { + "WLAN_CE_0", + "WLAN_CE_1", + "WLAN_CE_2", + "WLAN_CE_3", + "WLAN_CE_4", + "WLAN_CE_5", + "WLAN_CE_6", + "WLAN_CE_7", + "WLAN_CE_8", + "WLAN_CE_9", + "WLAN_CE_10", + "WLAN_CE_11", +}; +/** + * ce_unregister_irq() - ce_unregister_irq + * @hif_ce_state: hif_ce_state copy engine device handle + * @mask: which coppy engines to unregister for. + * + * Unregisters copy engine irqs matching mask. If a 1 is set at bit x, + * unregister for copy engine x. + * + * Return: QDF_STATUS + */ +QDF_STATUS ce_unregister_irq(struct HIF_CE_state *hif_ce_state, uint32_t mask) +{ + int id; + int ce_count; + int ret; + struct hif_softc *scn; + + if (hif_ce_state == NULL) { + HIF_WARN("%s: hif_ce_state = NULL", __func__); + return QDF_STATUS_SUCCESS; + } + + scn = HIF_GET_SOFTC(hif_ce_state); + ce_count = scn->ce_count; + /* we are removing interrupts, so better stop NAPI */ + ret = hif_napi_event(GET_HIF_OPAQUE_HDL(scn), + NAPI_EVT_INT_STATE, (void *)0); + if (ret != 0) + HIF_ERROR("%s: napi_event INT_STATE returned %d", + __func__, ret); + /* this is not fatal, continue */ + + /* filter mask to free only for ce's with irq registered */ + mask &= hif_ce_state->ce_register_irq_done; + for (id = 0; id < ce_count; id++) { + if ((mask & (1 << id)) && hif_ce_state->tasklets[id].inited) { + ret = pld_ce_free_irq(scn->qdf_dev->dev, id, + &hif_ce_state->tasklets[id]); + if (ret < 0) + HIF_ERROR( + "%s: pld_unregister_irq error - ce_id = %d, ret = %d", + __func__, id, ret); + } + } + hif_ce_state->ce_register_irq_done &= ~mask; + + return QDF_STATUS_SUCCESS; +} +/** + * ce_register_irq() - ce_register_irq + * @hif_ce_state: hif_ce_state + * @mask: which coppy engines to unregister for. + * + * Registers copy engine irqs matching mask. If a 1 is set at bit x, + * Register for copy engine x. + * + * Return: QDF_STATUS + */ +QDF_STATUS ce_register_irq(struct HIF_CE_state *hif_ce_state, uint32_t mask) +{ + int id; + int ce_count; + int ret; + unsigned long irqflags = IRQF_TRIGGER_RISING; + uint32_t done_mask = 0; + struct hif_softc *scn = HIF_GET_SOFTC(hif_ce_state); + + ce_count = scn->ce_count; + + for (id = 0; id < ce_count; id++) { + if ((mask & (1 << id)) && hif_ce_state->tasklets[id].inited) { + ret = pld_ce_request_irq(scn->qdf_dev->dev, id, + hif_snoc_interrupt_handler, + irqflags, ce_name[id], + &hif_ce_state->tasklets[id]); + if (ret) { + HIF_ERROR( + "%s: cannot register CE %d irq handler, ret = %d", + __func__, id, ret); + ce_unregister_irq(hif_ce_state, done_mask); + return QDF_STATUS_E_FAULT; + } + done_mask |= 1 << id; + } + } + hif_ce_state->ce_register_irq_done |= done_mask; + + return QDF_STATUS_SUCCESS; +} diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/ce/ce_tasklet.h b/drivers/staging/qca-wifi-host-cmn/hif/src/ce/ce_tasklet.h new file mode 100644 index 0000000000000000000000000000000000000000..05da16872781430932bba3edce11020403640b42 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/ce/ce_tasklet.h @@ -0,0 +1,33 @@ +/* + * Copyright (c) 2015-2016,2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef __CE_TASKLET_H__ +#define __CE_TASKLET_H__ +#include "ce_main.h" +void init_tasklet_workers(struct hif_opaque_softc *scn); +void deinit_tasklet_workers(struct hif_opaque_softc *scn); +void ce_tasklet_init(struct HIF_CE_state *hif_ce_state, uint32_t mask); +void ce_tasklet_kill(struct hif_softc *scn); +int hif_drain_tasklets(struct hif_softc *scn); +QDF_STATUS ce_register_irq(struct HIF_CE_state *hif_ce_state, uint32_t mask); +QDF_STATUS ce_unregister_irq(struct HIF_CE_state *hif_ce_state, uint32_t mask); +irqreturn_t ce_dispatch_interrupt(int irq, + struct ce_tasklet_entry *tasklet_entry); +void hif_display_ce_stats(struct HIF_CE_state *hif_ce_state); +void hif_clear_ce_stats(struct HIF_CE_state *hif_ce_state); +#endif /* __CE_TASKLET_H__ */ diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/dispatcher/ahb_api.h b/drivers/staging/qca-wifi-host-cmn/hif/src/dispatcher/ahb_api.h new file mode 100644 index 0000000000000000000000000000000000000000..103114b87e529c9c02a4b3830348ef3d87dcb620 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/dispatcher/ahb_api.h @@ -0,0 +1,53 @@ +/* + * Copyright (c) 2013-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef __AHB_API_H +#define __AHB_API_H +struct hif_exec_context; + +QDF_STATUS hif_ahb_open(struct hif_softc *hif_ctx, + enum qdf_bus_type bus_type); +void hif_ahb_close(struct hif_softc *hif_ctx); + + +void hif_ahb_disable_isr(struct hif_softc *hif_ctx); +void hif_ahb_nointrs(struct hif_softc *scn); +void hif_ahb_reset_soc(struct hif_softc *hif_ctx); +QDF_STATUS hif_ahb_enable_bus(struct hif_softc *ol_sc, + struct device *dev, void *bdev, + const struct hif_bus_id *bid, + enum hif_enable_type type); +void hif_ahb_disable_bus(struct hif_softc *scn); +int hif_ahb_bus_configure(struct hif_softc *scn); +void hif_ahb_irq_disable(struct hif_softc *scn, int ce_id); +void hif_ahb_irq_enable(struct hif_softc *scn, int ce_id); +void hif_ahb_exec_grp_irq_disable(struct hif_exec_context *hif_ext_grp); +void hif_ahb_exec_grp_irq_enable(struct hif_exec_context *hif_ext_grp); +int hif_ahb_dump_registers(struct hif_softc *scn); + +int hif_ahb_configure_legacy_irq(struct hif_pci_softc *sc); +int hif_ahb_clk_enable_disable(struct device *dev, int enable); +void hif_ahb_device_reset(struct hif_softc *scn); +int hif_ahb_enable_radio(struct hif_pci_softc *sc, + struct platform_device *pdev, + const struct platform_device_id *id); +int hif_ahb_configure_irq(struct hif_pci_softc *sc); +int hif_ahb_configure_grp_irq(struct hif_softc *scn, + struct hif_exec_context *hif_ext_grp); +bool hif_ahb_needs_bmi(struct hif_softc *scn); +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/dispatcher/dummy.c b/drivers/staging/qca-wifi-host-cmn/hif/src/dispatcher/dummy.c new file mode 100644 index 0000000000000000000000000000000000000000..beb4cfb03c9ee3ed4763d90cba1e81c132f5f7b9 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/dispatcher/dummy.c @@ -0,0 +1,370 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "qdf_types.h" +#include "dummy.h" +#include "hif_debug.h" + +/** + * hif_dummy_bus_prevent_linkdown() - prevent linkdown + * @hif_ctx: hif context + * @flag: weather to keep the bus alive or not + * + * Dummy function for busses and platforms that do not support + * link down. This may need to be replaced with a wakelock. + */ +void hif_dummy_bus_prevent_linkdown(struct hif_softc *scn, bool flag) +{ + HIF_DBG("wlan: %s pcie power collapse ignored", + (flag ? "disable" : "enable")); +} + +/** + * hif_reset_soc(): reset soc + * + * this function resets soc + * + * @hif_ctx: HIF context + * + * Return: void + */ +/* Function to reset SoC */ +void hif_dummy_reset_soc(struct hif_softc *hif_ctx) +{ +} + +/** + * hif_dummy_suspend() - suspend the bus + * @hif_ctx: hif context + * + * dummy for busses that don't need to suspend. + * + * Return: 0 for success and non-zero for failure + */ +int hif_dummy_bus_suspend(struct hif_softc *hif_ctx) +{ + return 0; +} + +/** + * hif_dummy_resume() - hif resume API + * + * This function resumes the bus. but snoc doesn't need to resume. + * Therefore do nothing. + * + * Return: 0 for success and non-zero for failure + */ +int hif_dummy_bus_resume(struct hif_softc *hif_ctx) +{ + return 0; +} + +/** + * hif_dummy_suspend_noirq() - suspend the bus + * @hif_ctx: hif context + * + * dummy for busses that don't need to synchronize + * with interrupt disable. + * + * Return: 0 for success and non-zero for failure + */ +int hif_dummy_bus_suspend_noirq(struct hif_softc *hif_ctx) +{ + return 0; +} + +/** + * hif_dummy_resume_noirq() - resume the bus + * @hif_ctx: hif context + * + * dummy for busses that don't need to synchronize + * with interrupt disable. + * + * Return: 0 for success and non-zero for failure + */ +int hif_dummy_bus_resume_noirq(struct hif_softc *hif_ctx) +{ + return 0; +} + +/** + * hif_dummy_target_sleep_state_adjust() - api to adjust state of target + * @scn: hif context + * @sleep_ok: allow or deny target to go to sleep + * @wait_for_it: ensure target has change + */ +int hif_dummy_target_sleep_state_adjust(struct hif_softc *scn, + bool sleep_ok, bool wait_for_it) +{ + return 0; +} + +/** + * hif_dummy_enable_power_management - dummy call + * hif_ctx: hif context + * is_packet_log_enabled: true if packet log is enabled + */ +void hif_dummy_enable_power_management(struct hif_softc *hif_ctx, + bool is_packet_log_enabled) +{} + +/** + * hif_dummy_disable_power_management - dummy call + * hif_ctx: hif context + * + * Return: none + */ +void hif_dummy_disable_power_management(struct hif_softc *hif_ctx) +{} + +/** + * hif_dummy_disable_isr - dummy call + * hif_ctx: hif context + * + * Return: none + */ +void hif_dummy_disable_isr(struct hif_softc *scn) +{} + +/** + * hif_dummy_nointrs - dummy call + * hif_sc: hif context + * + * Return: none + */ +void hif_dummy_nointrs(struct hif_softc *hif_sc) +{} + +/** + * hif_dummy_bus_configure - dummy call + * hif_ctx: hif context + * + * Return: 0 for success + */ +int hif_dummy_bus_configure(struct hif_softc *hif_sc) +{ + return 0; +} + +/** + * hif_dummy_get_config_item - dummy call + * @hif_sc: hif context + * @opcode: configuration type + * @config: configuration value to set + * @config_len: configuration length + * + * Return: 0 for success + */ +QDF_STATUS +hif_dummy_get_config_item(struct hif_softc *hif_sc, + int opcode, void *config, uint32_t config_len) +{ + return 0; +} + +/** + * hif_dummy_set_mailbox_swap - dummy call + * @hif_sc: hif context + * + * Return: None + */ +void +hif_dummy_set_mailbox_swap(struct hif_softc *hif_sc) +{ +} + +/** + * hif_dummy_claim_device - dummy call + * @hif_sc: hif context + * + * Return: None + */ +void +hif_dummy_claim_device(struct hif_softc *hif_sc) +{ +} + +/** + * hif_dummy_cancel_deferred_target_sleep - dummy call + * @hif_sc: hif context + * + * Return: None + */ +void +hif_dummy_cancel_deferred_target_sleep(struct hif_softc *hif_sc) +{ +} + +/** + * hif_dummy_irq_enable - dummy call + * hif_ctx: hif context + * @irq_id: irq id + * + * Return: none + */ +void hif_dummy_irq_enable(struct hif_softc *hif_sc, int irq_id) +{} + +/** + * hif_dummy_grp_irq_enable - dummy call + * hif_ctx: hif context + * @irq_id: grp id + * + * Return: none + */ +void hif_dummy_grp_irq_enable(struct hif_softc *hif_sc, uint32_t grp_id) +{} + +/** + * hif_dummy_irq_disable - dummy call + * hif_ctx: hif context + * @irq_id: irq id + * + * Return: none + */ +void hif_dummy_irq_disable(struct hif_softc *hif_sc, int irq_id) +{} + +/** + * hif_dummy_grp_irq_disable- dummy call + * hif_ctx: hif context + * @grp_id: grp id + * + * Return: none + */ +void hif_dummy_grp_irq_disable(struct hif_softc *hif_sc, uint32_t grp_id) +{} + +/** + * hif_dummy_grp_irq_configure - dummy call + * hif_ctx: hif context + * + * Return: none + */ +int hif_dummy_grp_irq_configure(struct hif_softc *hif_sc, + struct hif_exec_context *exec) +{ + return 0; +} + +/** + * hif_dummy_dump_registers - dummy call + * hif_sc: hif context + * + * Return: 0 for success + */ +int hif_dummy_dump_registers(struct hif_softc *hif_sc) +{ + return 0; +} + +/** + * hif_dummy_dump_target_memory - dummy call + * @hif_sc: hif context + * @ramdump_base: base + * @address: address + * @size: size + * + * Return: None + */ +void hif_dummy_dump_target_memory(struct hif_softc *hif_sc, void *ramdump_base, + uint32_t address, uint32_t size) +{ +} + +/** + * hif_dummy_ipa_get_ce_resource - dummy call + * @scn: HIF context + * @ce_sr: copyengine source ring resource info + * @sr_ring_size: source ring size + * @reg_paddr: bus physical address + * + * Return: None + */ +void hif_dummy_ipa_get_ce_resource(struct hif_softc *hif_sc, + qdf_shared_mem_t **ce_sr, + uint32_t *sr_ring_size, + qdf_dma_addr_t *reg_paddr) +{ +} + +/** + * hif_dummy_mask_interrupt_call - dummy call + * @hif_sc: hif context + * + * Return: None + */ +void +hif_dummy_mask_interrupt_call(struct hif_softc *hif_sc) +{ +} + +/** + * hif_dummy_display_stats - dummy call + * hif_ctx: hif context + * + * Return: none + */ +void hif_dummy_display_stats(struct hif_softc *hif_ctx) +{} + +/** + * hif_dummy_clear_stats - dummy call + * hif_ctx: hif context + * + * Return: none + */ +void hif_dummy_clear_stats(struct hif_softc *hif_ctx) +{} +/** + * hif_dummy_set_bundle_mode() - dummy call + * @hif_sc: hif context + * @enabled: flag to enable/disable bundling + * @rx_bundle_cnt: bundle count to be used for RX + * + * Return: none + */ +void hif_dummy_set_bundle_mode(struct hif_softc *hif_ctx, + bool enabled, int rx_bundle_cnt) +{ +} + +/** + * hif_dummy_bus_reset_resume() - dummy call + * @hif_sc: hif context + * + * Return: int 0 for success, non zero for failure + */ +int hif_dummy_bus_reset_resume(struct hif_softc *hif_ctx) +{ + return 0; +} + +int hif_dummy_map_ce_to_irq(struct hif_softc *scn, int ce_id) +{ + HIF_ERROR("%s: hif_map_ce_to_irq is not implemented on this platform", + __func__); + QDF_BUG(0); + return -(1); +} + +int hif_dummy_addr_in_boundary(struct hif_softc *scn, uint32_t offset) +{ + return 0; +} + diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/dispatcher/dummy.h b/drivers/staging/qca-wifi-host-cmn/hif/src/dispatcher/dummy.h new file mode 100644 index 0000000000000000000000000000000000000000..25f3e57542d058d634f66eb0cc66c1653188cca3 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/dispatcher/dummy.h @@ -0,0 +1,61 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +struct hif_softc; +struct hif_exec_context; + +void hif_dummy_bus_prevent_linkdown(struct hif_softc *scn, bool flag); +void hif_dummy_reset_soc(struct hif_softc *scn); +int hif_dummy_bus_suspend(struct hif_softc *hif_ctx); +int hif_dummy_bus_resume(struct hif_softc *hif_ctx); +int hif_dummy_bus_suspend_noirq(struct hif_softc *hif_ctx); +int hif_dummy_bus_resume_noirq(struct hif_softc *hif_ctx); +int hif_dummy_target_sleep_state_adjust(struct hif_softc *scn, + bool sleep_ok, bool wait_for_it); +void hif_dummy_enable_power_management(struct hif_softc *hif_ctx, + bool is_packet_log_enabled); +void hif_dummy_disable_power_management(struct hif_softc *hif_ctx); +void hif_dummy_disable_isr(struct hif_softc *scn); +void hif_dummy_nointrs(struct hif_softc *hif_sc); +int hif_dummy_bus_configure(struct hif_softc *hif_sc); +QDF_STATUS hif_dummy_get_config_item(struct hif_softc *hif_sc, + int opcode, void *config, uint32_t config_len); +void hif_dummy_set_mailbox_swap(struct hif_softc *hif_sc); +void hif_dummy_claim_device(struct hif_softc *hif_sc); +void hif_dummy_cancel_deferred_target_sleep(struct hif_softc *hif_sc); +void hif_dummy_irq_enable(struct hif_softc *hif_sc, int irq_id); +void hif_dummy_irq_disable(struct hif_softc *hif_sc, int irq_id); +void hif_dummy_grp_irq_enable(struct hif_softc *hif_sc, uint32_t grp_id); +void hif_dummy_grp_irq_disable(struct hif_softc *hif_sc, uint32_t grp_id); +int hif_dummy_grp_irq_configure(struct hif_softc *hif_sc, + struct hif_exec_context *exec); +int hif_dummy_dump_registers(struct hif_softc *hif_sc); +void hif_dummy_dump_target_memory(struct hif_softc *hif_sc, void *ramdump_base, + uint32_t address, uint32_t size); +void hif_dummy_ipa_get_ce_resource(struct hif_softc *hif_sc, + qdf_shared_mem_t **ce_sr, + uint32_t *sr_ring_size, + qdf_dma_addr_t *reg_paddr); +void hif_dummy_mask_interrupt_call(struct hif_softc *hif_sc); +void hif_dummy_display_stats(struct hif_softc *hif_ctx); +void hif_dummy_clear_stats(struct hif_softc *hif_ctx); +void hif_dummy_set_bundle_mode(struct hif_softc *hif_ctx, + bool enabled, int rx_bundle_cnt); +int hif_dummy_bus_reset_resume(struct hif_softc *hif_ctx); +int hif_dummy_map_ce_to_irq(struct hif_softc *scn, int ce_id); +int hif_dummy_addr_in_boundary(struct hif_softc *scn, uint32_t offset); diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/dispatcher/multibus.c b/drivers/staging/qca-wifi-host-cmn/hif/src/dispatcher/multibus.c new file mode 100644 index 0000000000000000000000000000000000000000..bf2b308bc9e96caebbc6fcda025c09e8716e2d69 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/dispatcher/multibus.c @@ -0,0 +1,518 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/* this file dispatches functions to bus specific definitions */ +#include "hif_debug.h" +#include "hif.h" +#include "hif_main.h" +#include "hif_io32.h" +#include "multibus.h" +#include "dummy.h" +#if defined(HIF_PCI) || defined(HIF_SNOC) || defined(HIF_AHB) +#include "ce_main.h" +#include "ce_api.h" +#include "ce_internal.h" +#endif +#include "htc_services.h" +#include "a_types.h" +#include "dummy.h" +#include "qdf_module.h" + +/** + * hif_initialize_default_ops() - initializes default operations values + * + * bus specific features should assign their dummy implementations here. + */ +static void hif_initialize_default_ops(struct hif_softc *hif_sc) +{ + struct hif_bus_ops *bus_ops = &hif_sc->bus_ops; + + /* must be filled in by hif_bus_open */ + bus_ops->hif_bus_close = NULL; + /* dummy implementations */ + bus_ops->hif_display_stats = + &hif_dummy_display_stats; + bus_ops->hif_clear_stats = + &hif_dummy_clear_stats; + bus_ops->hif_set_bundle_mode = &hif_dummy_set_bundle_mode; + bus_ops->hif_bus_reset_resume = &hif_dummy_bus_reset_resume; + bus_ops->hif_bus_suspend_noirq = &hif_dummy_bus_suspend_noirq; + bus_ops->hif_bus_resume_noirq = &hif_dummy_bus_resume_noirq; + bus_ops->hif_bus_early_suspend = &hif_dummy_bus_suspend; + bus_ops->hif_bus_late_resume = &hif_dummy_bus_resume; + bus_ops->hif_map_ce_to_irq = &hif_dummy_map_ce_to_irq; + bus_ops->hif_grp_irq_configure = &hif_dummy_grp_irq_configure; +} + +#define NUM_OPS (sizeof(struct hif_bus_ops) / sizeof(void *)) + +/** + * hif_verify_basic_ops() - ensure required bus apis are defined + * + * all bus operations must be defined to avoid crashes + * itterate over the structure and ensure all function pointers + * are non null. + * + * Return: QDF_STATUS_SUCCESS if all the operations are defined + */ +static QDF_STATUS hif_verify_basic_ops(struct hif_softc *hif_sc) +{ + struct hif_bus_ops *bus_ops = &hif_sc->bus_ops; + void **ops_array = (void *)bus_ops; + QDF_STATUS status = QDF_STATUS_SUCCESS; + int i; + + for (i = 0; i < NUM_OPS; i++) { + if (!ops_array[i]) { + HIF_ERROR("%s: function %d is null", __func__, i); + status = QDF_STATUS_E_NOSUPPORT; + } + } + return status; +} + +/** + * hif_bus_get_context_size - API to return size of the bus specific structure + * + * Return: sizeof of hif_pci_softc + */ +int hif_bus_get_context_size(enum qdf_bus_type bus_type) +{ + switch (bus_type) { + case QDF_BUS_TYPE_PCI: + return hif_pci_get_context_size(); + case QDF_BUS_TYPE_AHB: + return hif_ahb_get_context_size(); + case QDF_BUS_TYPE_SNOC: + return hif_snoc_get_context_size(); + case QDF_BUS_TYPE_SDIO: + return hif_sdio_get_context_size(); + case QDF_BUS_TYPE_USB: + return hif_usb_get_context_size(); + default: + return 0; + } +} + +/** + * hif_bus_open() - initialize the bus_ops and call the bus specific open + * hif_sc: hif_context + * bus_type: type of bus being enumerated + * + * Return: QDF_STATUS_SUCCESS or error + */ +QDF_STATUS hif_bus_open(struct hif_softc *hif_sc, + enum qdf_bus_type bus_type) +{ + QDF_STATUS status = QDF_STATUS_E_INVAL; + + hif_initialize_default_ops(hif_sc); + + switch (bus_type) { + case QDF_BUS_TYPE_PCI: + status = hif_initialize_pci_ops(hif_sc); + break; + case QDF_BUS_TYPE_SNOC: + status = hif_initialize_snoc_ops(&hif_sc->bus_ops); + break; + case QDF_BUS_TYPE_AHB: + status = hif_initialize_ahb_ops(&hif_sc->bus_ops); + break; + case QDF_BUS_TYPE_SDIO: + status = hif_initialize_sdio_ops(hif_sc); + break; + case QDF_BUS_TYPE_USB: + status = hif_initialize_usb_ops(&hif_sc->bus_ops); + break; + default: + status = QDF_STATUS_E_NOSUPPORT; + break; + } + + if (status != QDF_STATUS_SUCCESS) { + HIF_ERROR("%s: %d not supported", __func__, bus_type); + return status; + } + + status = hif_verify_basic_ops(hif_sc); + if (status != QDF_STATUS_SUCCESS) + return status; + + return hif_sc->bus_ops.hif_bus_open(hif_sc, bus_type); +} + +/** + * hif_bus_close() - close the bus + * @hif_sc: hif_context + */ +void hif_bus_close(struct hif_softc *hif_sc) +{ + hif_sc->bus_ops.hif_bus_close(hif_sc); +} + +/** + * hif_bus_prevent_linkdown() - prevent linkdown + * @hif_ctx: hif context + * @flag: true = keep bus alive false = let bus go to sleep + * + * Keeps the bus awake durring suspend. + */ +void hif_bus_prevent_linkdown(struct hif_softc *hif_sc, bool flag) +{ + hif_sc->bus_ops.hif_bus_prevent_linkdown(hif_sc, flag); +} + + +void hif_reset_soc(struct hif_opaque_softc *hif_ctx) +{ + struct hif_softc *hif_sc = HIF_GET_SOFTC(hif_ctx); + + hif_sc->bus_ops.hif_reset_soc(hif_sc); +} + +int hif_bus_early_suspend(struct hif_opaque_softc *hif_ctx) +{ + struct hif_softc *hif_sc = HIF_GET_SOFTC(hif_ctx); + + return hif_sc->bus_ops.hif_bus_early_suspend(hif_sc); +} + +int hif_bus_late_resume(struct hif_opaque_softc *hif_ctx) +{ + struct hif_softc *hif_sc = HIF_GET_SOFTC(hif_ctx); + + return hif_sc->bus_ops.hif_bus_late_resume(hif_sc); +} + +int hif_bus_suspend(struct hif_opaque_softc *hif_ctx) +{ + struct hif_softc *hif_sc = HIF_GET_SOFTC(hif_ctx); + + return hif_sc->bus_ops.hif_bus_suspend(hif_sc); +} + +int hif_bus_resume(struct hif_opaque_softc *hif_ctx) +{ + struct hif_softc *hif_sc = HIF_GET_SOFTC(hif_ctx); + + return hif_sc->bus_ops.hif_bus_resume(hif_sc); +} + +int hif_bus_suspend_noirq(struct hif_opaque_softc *hif_ctx) +{ + struct hif_softc *hif_sc = HIF_GET_SOFTC(hif_ctx); + + return hif_sc->bus_ops.hif_bus_suspend_noirq(hif_sc); +} + +int hif_bus_resume_noirq(struct hif_opaque_softc *hif_ctx) +{ + struct hif_softc *hif_sc = HIF_GET_SOFTC(hif_ctx); + + return hif_sc->bus_ops.hif_bus_resume_noirq(hif_sc); +} + +int hif_target_sleep_state_adjust(struct hif_softc *hif_sc, + bool sleep_ok, bool wait_for_it) +{ + return hif_sc->bus_ops.hif_target_sleep_state_adjust(hif_sc, + sleep_ok, wait_for_it); +} +qdf_export_symbol(hif_target_sleep_state_adjust); + +void hif_disable_isr(struct hif_opaque_softc *hif_hdl) +{ + struct hif_softc *hif_sc = HIF_GET_SOFTC(hif_hdl); + + hif_sc->bus_ops.hif_disable_isr(hif_sc); +} + +void hif_nointrs(struct hif_softc *hif_sc) +{ + hif_sc->bus_ops.hif_nointrs(hif_sc); +} + +QDF_STATUS hif_enable_bus(struct hif_softc *hif_sc, struct device *dev, + void *bdev, const struct hif_bus_id *bid, + enum hif_enable_type type) +{ + return hif_sc->bus_ops.hif_enable_bus(hif_sc, dev, bdev, bid, type); +} + +void hif_disable_bus(struct hif_softc *hif_sc) +{ + hif_sc->bus_ops.hif_disable_bus(hif_sc); +} + +int hif_bus_configure(struct hif_softc *hif_sc) +{ + return hif_sc->bus_ops.hif_bus_configure(hif_sc); +} + +QDF_STATUS hif_get_config_item(struct hif_opaque_softc *hif_ctx, + int opcode, void *config, uint32_t config_len) +{ + struct hif_softc *hif_sc = HIF_GET_SOFTC(hif_ctx); + + return hif_sc->bus_ops.hif_get_config_item(hif_sc, opcode, config, + config_len); +} + +void hif_set_mailbox_swap(struct hif_opaque_softc *hif_ctx) +{ + struct hif_softc *hif_sc = HIF_GET_SOFTC(hif_ctx); + + hif_sc->bus_ops.hif_set_mailbox_swap(hif_sc); +} + +void hif_claim_device(struct hif_opaque_softc *hif_ctx) +{ + struct hif_softc *hif_sc = HIF_GET_SOFTC(hif_ctx); + + hif_sc->bus_ops.hif_claim_device(hif_sc); +} + +void hif_shutdown_device(struct hif_opaque_softc *hif_ctx) +{ + struct hif_softc *hif_sc = HIF_GET_SOFTC(hif_ctx); + + hif_sc->bus_ops.hif_shutdown_device(hif_sc); +} + +void hif_stop(struct hif_opaque_softc *hif_ctx) +{ + struct hif_softc *hif_sc = HIF_GET_SOFTC(hif_ctx); + + hif_sc->bus_ops.hif_stop(hif_sc); +} + +void hif_cancel_deferred_target_sleep(struct hif_softc *hif_sc) +{ + return hif_sc->bus_ops.hif_cancel_deferred_target_sleep(hif_sc); +} + +void hif_irq_enable(struct hif_softc *hif_sc, int irq_id) +{ + hif_sc->bus_ops.hif_irq_enable(hif_sc, irq_id); +} +qdf_export_symbol(hif_irq_enable); + +void hif_irq_disable(struct hif_softc *hif_sc, int irq_id) +{ + hif_sc->bus_ops.hif_irq_disable(hif_sc, irq_id); +} + +int hif_grp_irq_configure(struct hif_softc *hif_sc, + struct hif_exec_context *hif_exec) +{ + return hif_sc->bus_ops.hif_grp_irq_configure(hif_sc, hif_exec); +} + +int hif_dump_registers(struct hif_opaque_softc *hif_hdl) +{ + struct hif_softc *hif_sc = HIF_GET_SOFTC(hif_hdl); + + return hif_sc->bus_ops.hif_dump_registers(hif_sc); +} + +void hif_dump_target_memory(struct hif_opaque_softc *hif_hdl, + void *ramdump_base, + uint32_t address, uint32_t size) +{ + struct hif_softc *hif_sc = HIF_GET_SOFTC(hif_hdl); + + hif_sc->bus_ops.hif_dump_target_memory(hif_sc, ramdump_base, + address, size); +} + +void hif_ipa_get_ce_resource(struct hif_opaque_softc *hif_hdl, + qdf_shared_mem_t **ce_sr, + uint32_t *ce_sr_ring_size, + qdf_dma_addr_t *ce_reg_paddr) +{ + struct hif_softc *hif_sc = HIF_GET_SOFTC(hif_hdl); + + hif_sc->bus_ops.hif_ipa_get_ce_resource(hif_sc, ce_sr, + ce_sr_ring_size, ce_reg_paddr); +} + +void hif_mask_interrupt_call(struct hif_opaque_softc *hif_hdl) +{ + struct hif_softc *hif_sc = HIF_GET_SOFTC(hif_hdl); + + hif_sc->bus_ops.hif_mask_interrupt_call(hif_sc); +} + +void hif_display_bus_stats(struct hif_opaque_softc *scn) +{ + struct hif_softc *hif_sc = HIF_GET_SOFTC(scn); + + hif_sc->bus_ops.hif_display_stats(hif_sc); +} + +void hif_clear_bus_stats(struct hif_opaque_softc *scn) +{ + struct hif_softc *hif_sc = HIF_GET_SOFTC(scn); + + hif_sc->bus_ops.hif_clear_stats(hif_sc); +} + +/** + * hif_enable_power_management() - enable power management after driver load + * @hif_hdl: opaque pointer to the hif context + * is_packet_log_enabled: true if packet log is enabled + * + * Driver load and firmware download are done in a high performance mode. + * Enable power management after the driver is loaded. + * packet log can require fewer power management features to be enabled. + */ +void hif_enable_power_management(struct hif_opaque_softc *hif_hdl, + bool is_packet_log_enabled) +{ + struct hif_softc *hif_sc = HIF_GET_SOFTC(hif_hdl); + + hif_sc->bus_ops.hif_enable_power_management(hif_sc, + is_packet_log_enabled); +} + +/** + * hif_disable_power_management() - reset the bus power management + * @hif_hdl: opaque pointer to the hif context + * + * return the power management of the bus to its default state. + * This isn't necessarily a complete reversal of its counterpart. + * This should be called when unloading the driver. + */ +void hif_disable_power_management(struct hif_opaque_softc *hif_hdl) +{ + struct hif_softc *hif_sc = HIF_GET_SOFTC(hif_hdl); + + hif_sc->bus_ops.hif_disable_power_management(hif_sc); +} + +/** + * hif_set_bundle_mode() - enable bundling and set default rx bundle cnt + * @scn: pointer to hif_opaque_softc structure + * @enabled: flag to enable/disable bundling + * @rx_bundle_cnt: bundle count to be used for RX + * + * Return: none + */ +void hif_set_bundle_mode(struct hif_opaque_softc *scn, bool enabled, + int rx_bundle_cnt) +{ + struct hif_softc *hif_sc = HIF_GET_SOFTC(scn); + + hif_sc->bus_ops.hif_set_bundle_mode(hif_sc, enabled, rx_bundle_cnt); +} + +/** + * hif_bus_reset_resume() - resume the bus after reset + * @scn: struct hif_opaque_softc + * + * This function is called to tell the driver that USB device has been resumed + * and it has also been reset. The driver should redo any necessary + * initialization. This function resets WLAN SOC. + * + * Return: int 0 for success, non zero for failure + */ +int hif_bus_reset_resume(struct hif_opaque_softc *scn) +{ + struct hif_softc *hif_sc = HIF_GET_SOFTC(scn); + + return hif_sc->bus_ops.hif_bus_reset_resume(hif_sc); +} + +int hif_apps_irqs_disable(struct hif_opaque_softc *hif_ctx) +{ + struct hif_softc *scn; + int i; + + QDF_BUG(hif_ctx); + scn = HIF_GET_SOFTC(hif_ctx); + if (!scn) + return -EINVAL; + + /* if the wake_irq is shared, don't disable it twice */ + disable_irq(scn->wake_irq); + for (i = 0; i < scn->ce_count; ++i) { + int irq = scn->bus_ops.hif_map_ce_to_irq(scn, i); + + if (irq != scn->wake_irq) + disable_irq(irq); + } + + return 0; +} + +int hif_apps_irqs_enable(struct hif_opaque_softc *hif_ctx) +{ + struct hif_softc *scn; + int i; + + QDF_BUG(hif_ctx); + scn = HIF_GET_SOFTC(hif_ctx); + if (!scn) + return -EINVAL; + + /* if the wake_irq is shared, don't enable it twice */ + enable_irq(scn->wake_irq); + for (i = 0; i < scn->ce_count; ++i) { + int irq = scn->bus_ops.hif_map_ce_to_irq(scn, i); + + if (irq != scn->wake_irq) + enable_irq(irq); + } + + return 0; +} + +int hif_apps_wake_irq_disable(struct hif_opaque_softc *hif_ctx) +{ + struct hif_softc *scn; + + QDF_BUG(hif_ctx); + scn = HIF_GET_SOFTC(hif_ctx); + if (!scn) + return -EINVAL; + + disable_irq(scn->wake_irq); + + return 0; +} + +int hif_apps_wake_irq_enable(struct hif_opaque_softc *hif_ctx) +{ + struct hif_softc *scn; + + QDF_BUG(hif_ctx); + scn = HIF_GET_SOFTC(hif_ctx); + if (!scn) + return -EINVAL; + + enable_irq(scn->wake_irq); + + return 0; +} + +bool hif_needs_bmi(struct hif_opaque_softc *scn) +{ + struct hif_softc *hif_sc = HIF_GET_SOFTC(scn); + + return hif_sc->bus_ops.hif_needs_bmi(hif_sc); +} diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/dispatcher/multibus.h b/drivers/staging/qca-wifi-host-cmn/hif/src/dispatcher/multibus.h new file mode 100644 index 0000000000000000000000000000000000000000..d029b284531b606b0f787a551429811a02bae5b7 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/dispatcher/multibus.h @@ -0,0 +1,199 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _MULTIBUS_H_ +#define _MULTIBUS_H_ + +#include "osdep.h" +#include "qdf_status.h" +#include "hif_debug.h" + +struct hif_softc; +struct hif_exec_context; + +struct hif_bus_ops { + QDF_STATUS (*hif_bus_open)(struct hif_softc *hif_sc, + enum qdf_bus_type bus_type); + void (*hif_bus_close)(struct hif_softc *hif_sc); + void (*hif_bus_prevent_linkdown)(struct hif_softc *hif_sc, bool flag); + void (*hif_reset_soc)(struct hif_softc *hif_sc); + int (*hif_bus_early_suspend)(struct hif_softc *hif_ctx); + int (*hif_bus_late_resume)(struct hif_softc *hif_ctx); + int (*hif_bus_suspend)(struct hif_softc *hif_ctx); + int (*hif_bus_resume)(struct hif_softc *hif_ctx); + int (*hif_bus_suspend_noirq)(struct hif_softc *hif_ctx); + int (*hif_bus_resume_noirq)(struct hif_softc *hif_ctx); + int (*hif_target_sleep_state_adjust)(struct hif_softc *scn, + bool sleep_ok, bool wait_for_it); + void (*hif_disable_isr)(struct hif_softc *hif_sc); + void (*hif_nointrs)(struct hif_softc *hif_sc); + QDF_STATUS (*hif_enable_bus)(struct hif_softc *hif_sc, + struct device *dev, + void *bdev, + const struct hif_bus_id *bid, + enum hif_enable_type type); + void (*hif_disable_bus)(struct hif_softc *hif_sc); + int (*hif_bus_configure)(struct hif_softc *hif_sc); + QDF_STATUS (*hif_get_config_item)(struct hif_softc *hif_sc, + int opcode, void *config, uint32_t config_len); + void (*hif_set_mailbox_swap)(struct hif_softc *hif_sc); + void (*hif_claim_device)(struct hif_softc *hif_sc); + void (*hif_shutdown_device)(struct hif_softc *hif_sc); + void (*hif_stop)(struct hif_softc *hif_sc); + void (*hif_cancel_deferred_target_sleep)(struct hif_softc *hif_sc); + void (*hif_irq_disable)(struct hif_softc *hif_sc, int ce_id); + void (*hif_irq_enable)(struct hif_softc *hif_sc, int ce_id); + int (*hif_grp_irq_configure)(struct hif_softc *hif_sc, + struct hif_exec_context *exec); + int (*hif_dump_registers)(struct hif_softc *hif_sc); + void (*hif_dump_target_memory)(struct hif_softc *hif_sc, + void *ramdump_base, + uint32_t address, uint32_t size); + void (*hif_ipa_get_ce_resource)(struct hif_softc *hif_sc, + qdf_shared_mem_t **ce_sr, + uint32_t *sr_ring_size, + qdf_dma_addr_t *reg_paddr); + void (*hif_mask_interrupt_call)(struct hif_softc *hif_sc); + void (*hif_enable_power_management)(struct hif_softc *hif_ctx, + bool is_packet_log_enabled); + void (*hif_disable_power_management)(struct hif_softc *hif_ctx); + void (*hif_display_stats)(struct hif_softc *hif_ctx); + void (*hif_clear_stats)(struct hif_softc *hif_ctx); + void (*hif_set_bundle_mode)(struct hif_softc *hif_ctx, bool enabled, + int rx_bundle_cnt); + int (*hif_bus_reset_resume)(struct hif_softc *hif_ctx); + int (*hif_map_ce_to_irq)(struct hif_softc *hif_sc, int ce_id); + int (*hif_addr_in_boundary)(struct hif_softc *scn, uint32_t offset); + bool (*hif_needs_bmi)(struct hif_softc *hif_sc); +}; + +#ifdef HIF_SNOC +QDF_STATUS hif_initialize_snoc_ops(struct hif_bus_ops *hif_sc); +int hif_snoc_get_context_size(void); +#else +static inline QDF_STATUS hif_initialize_snoc_ops(struct hif_bus_ops *hif_sc) +{ + HIF_ERROR("%s: not supported", __func__); + return QDF_STATUS_E_NOSUPPORT; +} +/** + * hif_snoc_get_context_size() - dummy when snoc isn't supported + * + * Return: 0 as an invalid size to indicate no support + */ +static inline int hif_snoc_get_context_size(void) +{ + return 0; +} +#endif /* HIF_SNOC */ + +#ifdef HIF_PCI +QDF_STATUS hif_initialize_pci_ops(struct hif_softc *hif_sc); +int hif_pci_get_context_size(void); +#else +static inline QDF_STATUS hif_initialize_pci_ops(struct hif_softc *hif_sc) +{ + HIF_ERROR("%s: not supported", __func__); + return QDF_STATUS_E_NOSUPPORT; +} +/** + * hif_pci_get_context_size() - dummy when pci isn't supported + * + * Return: 0 as an invalid size to indicate no support + */ +static inline int hif_pci_get_context_size(void) +{ + return 0; +} +#endif /* HIF_PCI */ + +#ifdef HIF_AHB +QDF_STATUS hif_initialize_ahb_ops(struct hif_bus_ops *bus_ops); +int hif_ahb_get_context_size(void); +#else +/** + * hif_initialize_ahb_ops() - dummy for when ahb not supported + * + * Return: QDF_STATUS_E_NOSUPPORT + */ +static inline QDF_STATUS hif_initialize_ahb_ops(struct hif_bus_ops *bus_ops) +{ + HIF_ERROR("%s: not supported", __func__); + return QDF_STATUS_E_NOSUPPORT; +} + +/** + * hif_ahb_get_context_size() - dummy for when ahb not supported + * + * Return: 0 as an invalid size to indicate no support + */ +static inline int hif_ahb_get_context_size(void) +{ + return 0; +} +#endif + +#ifdef HIF_SDIO +QDF_STATUS hif_initialize_sdio_ops(struct hif_softc *hif_sc); +int hif_sdio_get_context_size(void); +#else +/** + * hif_initialize_sdio_ops() - dummy for when sdio not supported + * + * Return: QDF_STATUS_E_NOSUPPORT + */ + +static inline QDF_STATUS hif_initialize_sdio_ops(struct hif_softc *hif_sc) +{ + HIF_ERROR("%s: not supported", __func__); + return QDF_STATUS_E_NOSUPPORT; +} + +/** + * hif_sdio_get_context_size() - dummy when sdio isn't supported + * + * Return: 0 as an invalid size to indicate no support + */ +static inline int hif_sdio_get_context_size(void) +{ + return 0; +} +#endif /* HIF_SDIO */ + +int hif_grp_irq_configure(struct hif_softc *hif_sc, + struct hif_exec_context *hif_exec); +#ifdef HIF_USB +QDF_STATUS hif_initialize_usb_ops(struct hif_bus_ops *bus_ops); +int hif_usb_get_context_size(void); +#else +static inline QDF_STATUS hif_initialize_usb_ops(struct hif_bus_ops *bus_ops) +{ + HIF_ERROR("%s: not supported", __func__); + return QDF_STATUS_E_NOSUPPORT; +} +/** + * hif_usb_get_context_size() - dummy when usb isn't supported + * + * Return: 0 as an invalid size to indicate no support + */ +static inline int hif_usb_get_context_size(void) +{ + return 0; +} +#endif /* HIF_USB */ +#endif /* _MULTIBUS_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/dispatcher/multibus_ahb.c b/drivers/staging/qca-wifi-host-cmn/hif/src/dispatcher/multibus_ahb.c new file mode 100644 index 0000000000000000000000000000000000000000..04bf46d7f90bc189436422986d338d27efe0772a --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/dispatcher/multibus_ahb.c @@ -0,0 +1,84 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "hif.h" +#include "hif_main.h" +#include "multibus.h" +#include "ce_main.h" +#include "if_pci.h" +#include "ahb_api.h" +#include "dummy.h" + +/** + * hif_initialize_ahb_ops() - initialize the ahb ops + * @bus_ops: hif_bus_ops table pointer to initialize + * + * This function will assign the set of callbacks that needs + * to be called for ipq4019 platform + * + * Return: QDF_STATUS_SUCCESS + */ +QDF_STATUS hif_initialize_ahb_ops(struct hif_bus_ops *bus_ops) +{ + bus_ops->hif_bus_open = &hif_ahb_open; + bus_ops->hif_bus_close = &hif_ahb_close; + bus_ops->hif_bus_prevent_linkdown = &hif_dummy_bus_prevent_linkdown; + bus_ops->hif_reset_soc = &hif_ahb_reset_soc; + bus_ops->hif_bus_suspend = &hif_dummy_bus_suspend; + bus_ops->hif_bus_resume = &hif_dummy_bus_resume; + bus_ops->hif_target_sleep_state_adjust = + &hif_dummy_target_sleep_state_adjust; + + bus_ops->hif_disable_isr = &hif_ahb_disable_isr; + bus_ops->hif_nointrs = &hif_ahb_nointrs; + bus_ops->hif_enable_bus = &hif_ahb_enable_bus; + bus_ops->hif_disable_bus = &hif_ahb_disable_bus; + bus_ops->hif_bus_configure = &hif_ahb_bus_configure; + bus_ops->hif_get_config_item = &hif_dummy_get_config_item; + bus_ops->hif_set_mailbox_swap = &hif_dummy_set_mailbox_swap; + bus_ops->hif_claim_device = &hif_dummy_claim_device; + bus_ops->hif_shutdown_device = &hif_ce_stop; + bus_ops->hif_stop = &hif_ce_stop; + bus_ops->hif_cancel_deferred_target_sleep = + &hif_dummy_cancel_deferred_target_sleep; + bus_ops->hif_irq_disable = &hif_ahb_irq_disable; + bus_ops->hif_irq_enable = &hif_ahb_irq_enable; + bus_ops->hif_dump_registers = &hif_ahb_dump_registers; + bus_ops->hif_dump_target_memory = &hif_dummy_dump_target_memory; + bus_ops->hif_ipa_get_ce_resource = &hif_dummy_ipa_get_ce_resource; + bus_ops->hif_mask_interrupt_call = &hif_dummy_mask_interrupt_call; + bus_ops->hif_enable_power_management = + &hif_dummy_enable_power_management; + bus_ops->hif_disable_power_management = + &hif_dummy_disable_power_management; + bus_ops->hif_grp_irq_configure = &hif_ahb_configure_grp_irq; + bus_ops->hif_addr_in_boundary = &hif_dummy_addr_in_boundary; + bus_ops->hif_needs_bmi = &hif_ahb_needs_bmi; + + return QDF_STATUS_SUCCESS; +} + +/** + * hif_ahb_get_context_size() - return the size of the snoc context + * + * Return the size of the context. (0 for invalid bus) + */ +int hif_ahb_get_context_size(void) +{ + return sizeof(struct hif_pci_softc); +} diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/dispatcher/multibus_pci.c b/drivers/staging/qca-wifi-host-cmn/hif/src/dispatcher/multibus_pci.c new file mode 100644 index 0000000000000000000000000000000000000000..7ef683790c312599a92554f15ab42a6b348bd9a7 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/dispatcher/multibus_pci.c @@ -0,0 +1,99 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "hif.h" +#include "hif_main.h" +#include "multibus.h" +#include "pci_api.h" +#include "hif_io32.h" +#include "dummy.h" +#include "ce_api.h" + +/** + * hif_initialize_pci_ops() - initialize the pci ops + * @bus_ops: hif_bus_ops table pointer to initialize + * + * Return: QDF_STATUS_SUCCESS + */ +QDF_STATUS hif_initialize_pci_ops(struct hif_softc *hif_sc) +{ + struct hif_bus_ops *bus_ops = &hif_sc->bus_ops; + + bus_ops->hif_bus_open = &hif_pci_open; + bus_ops->hif_bus_close = &hif_pci_close; + bus_ops->hif_bus_prevent_linkdown = &hif_pci_prevent_linkdown; + bus_ops->hif_reset_soc = &hif_pci_reset_soc; + bus_ops->hif_bus_suspend = &hif_pci_bus_suspend; + bus_ops->hif_bus_resume = &hif_pci_bus_resume; + bus_ops->hif_bus_suspend_noirq = &hif_pci_bus_suspend_noirq; + bus_ops->hif_bus_resume_noirq = &hif_pci_bus_resume_noirq; + + /* do not put the target to sleep for epping or maxperf mode */ + if (CONFIG_ATH_PCIE_MAX_PERF == 0 && + !QDF_IS_EPPING_ENABLED(hif_get_conparam(hif_sc))) + bus_ops->hif_target_sleep_state_adjust = + &hif_pci_target_sleep_state_adjust; + else + bus_ops->hif_target_sleep_state_adjust = + &hif_dummy_target_sleep_state_adjust; + + bus_ops->hif_disable_isr = &hif_pci_disable_isr; + bus_ops->hif_nointrs = &hif_pci_nointrs; + bus_ops->hif_enable_bus = &hif_pci_enable_bus; + bus_ops->hif_disable_bus = &hif_pci_disable_bus; + bus_ops->hif_bus_configure = &hif_pci_bus_configure; + bus_ops->hif_get_config_item = &hif_dummy_get_config_item; + bus_ops->hif_set_mailbox_swap = &hif_dummy_set_mailbox_swap; + bus_ops->hif_claim_device = &hif_dummy_claim_device; + bus_ops->hif_shutdown_device = &hif_ce_stop; + bus_ops->hif_stop = &hif_ce_stop; + bus_ops->hif_cancel_deferred_target_sleep = + &hif_pci_cancel_deferred_target_sleep; + bus_ops->hif_irq_disable = &hif_pci_irq_disable; + bus_ops->hif_irq_enable = &hif_pci_irq_enable; + bus_ops->hif_dump_registers = &hif_pci_dump_registers; + bus_ops->hif_dump_target_memory = &hif_ce_dump_target_memory; + bus_ops->hif_ipa_get_ce_resource = &hif_ce_ipa_get_ce_resource; + bus_ops->hif_mask_interrupt_call = &hif_dummy_mask_interrupt_call; + bus_ops->hif_enable_power_management = + &hif_pci_enable_power_management; + bus_ops->hif_disable_power_management = + &hif_pci_disable_power_management; + bus_ops->hif_grp_irq_configure = &hif_pci_configure_grp_irq; + bus_ops->hif_display_stats = + &hif_pci_display_stats; + bus_ops->hif_clear_stats = + &hif_pci_clear_stats; + bus_ops->hif_addr_in_boundary = &hif_pci_addr_in_boundary; + + /* default to legacy mapping handler; override as needed */ + bus_ops->hif_map_ce_to_irq = &hif_pci_legacy_map_ce_to_irq; + bus_ops->hif_needs_bmi = &hif_pci_needs_bmi; + + return QDF_STATUS_SUCCESS; +} + +/** + * hif_pci_get_context_size() - return the size of the pci context + * + * Return the size of the context. (0 for invalid bus) + */ +int hif_pci_get_context_size(void) +{ + return sizeof(struct hif_pci_softc); +} diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/dispatcher/multibus_sdio.c b/drivers/staging/qca-wifi-host-cmn/hif/src/dispatcher/multibus_sdio.c new file mode 100644 index 0000000000000000000000000000000000000000..3c220be0b42ad8d827a6e0bef192443227e802e4 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/dispatcher/multibus_sdio.c @@ -0,0 +1,81 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "hif.h" +#include "hif_main.h" +#include "multibus.h" +#include "sdio_api.h" +#include "hif_io32.h" +#include "dummy.h" +#include "if_sdio.h" + +/** + * hif_initialize_sdio_ops() - initialize the pci ops + * @bus_ops: hif_bus_ops table pointer to initialize + * + * Return: QDF_STATUS_SUCCESS + */ +QDF_STATUS hif_initialize_sdio_ops(struct hif_softc *hif_sc) +{ + struct hif_bus_ops *bus_ops = &hif_sc->bus_ops; + + bus_ops->hif_bus_open = &hif_sdio_open; + bus_ops->hif_bus_close = &hif_sdio_close; + bus_ops->hif_bus_prevent_linkdown = &hif_dummy_bus_prevent_linkdown; + bus_ops->hif_reset_soc = &hif_dummy_reset_soc; + bus_ops->hif_bus_suspend = &hif_sdio_bus_suspend; + bus_ops->hif_bus_resume = &hif_sdio_bus_resume; + bus_ops->hif_target_sleep_state_adjust = + &hif_dummy_target_sleep_state_adjust; + bus_ops->hif_disable_isr = &hif_dummy_disable_isr; + bus_ops->hif_nointrs = &hif_dummy_nointrs; + bus_ops->hif_enable_bus = &hif_sdio_enable_bus; + bus_ops->hif_disable_bus = &hif_sdio_disable_bus; + bus_ops->hif_bus_configure = &hif_dummy_bus_configure; + bus_ops->hif_get_config_item = &hif_sdio_get_config_item; + bus_ops->hif_set_mailbox_swap = &hif_sdio_set_mailbox_swap; + bus_ops->hif_claim_device = &hif_sdio_claim_device; + bus_ops->hif_shutdown_device = &hif_sdio_shutdown; + bus_ops->hif_stop = &hif_sdio_stop; + bus_ops->hif_cancel_deferred_target_sleep = + &hif_dummy_cancel_deferred_target_sleep; + bus_ops->hif_irq_disable = &hif_dummy_irq_disable; + bus_ops->hif_irq_enable = &hif_dummy_irq_enable; + bus_ops->hif_dump_registers = &hif_dummy_dump_registers; + bus_ops->hif_dump_target_memory = &hif_dummy_dump_target_memory; + bus_ops->hif_ipa_get_ce_resource = &hif_dummy_ipa_get_ce_resource; + bus_ops->hif_mask_interrupt_call = &hif_sdio_mask_interrupt_call; + bus_ops->hif_enable_power_management = + &hif_dummy_enable_power_management; + bus_ops->hif_disable_power_management = + &hif_dummy_disable_power_management; + bus_ops->hif_addr_in_boundary = &hif_dummy_addr_in_boundary; + bus_ops->hif_needs_bmi = &hif_sdio_needs_bmi; + + return QDF_STATUS_SUCCESS; +} + +/** + * hif_sdio_get_context_size() - return the size of the sdio context + * + * Return the size of the context. (0 for invalid bus) + */ +int hif_sdio_get_context_size(void) +{ + return sizeof(struct hif_sdio_softc); +} diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/dispatcher/multibus_snoc.c b/drivers/staging/qca-wifi-host-cmn/hif/src/dispatcher/multibus_snoc.c new file mode 100644 index 0000000000000000000000000000000000000000..3c0253ca060f38104ffa20142943d7a4a2519ef4 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/dispatcher/multibus_snoc.c @@ -0,0 +1,90 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "hif.h" +#include "hif_main.h" +#include "multibus.h" +#include "ce_main.h" +#include "snoc_api.h" +#include "dummy.h" +#include "ce_api.h" + +/** + * hif_initialize_pci_ops() - initialize the pci ops + * @bus_ops: hif_bus_ops table pointer to initialize + * + * Return: QDF_STATUS_SUCCESS + */ +QDF_STATUS hif_initialize_snoc_ops(struct hif_bus_ops *bus_ops) +{ + bus_ops->hif_bus_open = &hif_snoc_open; + bus_ops->hif_bus_close = &hif_snoc_close; + bus_ops->hif_bus_prevent_linkdown = &hif_dummy_bus_prevent_linkdown; + bus_ops->hif_reset_soc = &hif_dummy_reset_soc; + bus_ops->hif_bus_early_suspend = &hif_ce_bus_early_suspend; + bus_ops->hif_bus_late_resume = &hif_ce_bus_late_resume; + bus_ops->hif_bus_suspend = &hif_snoc_bus_suspend; + bus_ops->hif_bus_resume = &hif_snoc_bus_resume; + bus_ops->hif_bus_suspend_noirq = &hif_snoc_bus_suspend_noirq; + /* snoc_bus_resume_noirq had no side effects, use dummy resume_noirq */ + bus_ops->hif_bus_resume_noirq = &hif_dummy_bus_resume_noirq; + bus_ops->hif_target_sleep_state_adjust = + &hif_dummy_target_sleep_state_adjust; + + bus_ops->hif_disable_isr = &hif_snoc_disable_isr; + bus_ops->hif_nointrs = &hif_snoc_nointrs; + bus_ops->hif_enable_bus = &hif_snoc_enable_bus; + bus_ops->hif_disable_bus = &hif_snoc_disable_bus; + bus_ops->hif_bus_configure = &hif_snoc_bus_configure; + bus_ops->hif_get_config_item = &hif_dummy_get_config_item; + bus_ops->hif_set_mailbox_swap = &hif_dummy_set_mailbox_swap; + bus_ops->hif_claim_device = &hif_dummy_claim_device; + bus_ops->hif_shutdown_device = &hif_ce_stop; + bus_ops->hif_stop = &hif_ce_stop; + bus_ops->hif_cancel_deferred_target_sleep = + &hif_dummy_cancel_deferred_target_sleep; + bus_ops->hif_irq_disable = &hif_snoc_irq_disable; + bus_ops->hif_irq_enable = &hif_snoc_irq_enable; + bus_ops->hif_dump_registers = &hif_snoc_dump_registers; + bus_ops->hif_dump_target_memory = &hif_ce_dump_target_memory; + bus_ops->hif_ipa_get_ce_resource = &hif_ce_ipa_get_ce_resource; + bus_ops->hif_mask_interrupt_call = &hif_dummy_mask_interrupt_call; + bus_ops->hif_enable_power_management = + &hif_dummy_enable_power_management; + bus_ops->hif_disable_power_management = + &hif_dummy_disable_power_management; + bus_ops->hif_display_stats = + &hif_snoc_display_stats; + bus_ops->hif_clear_stats = + &hif_snoc_clear_stats; + bus_ops->hif_map_ce_to_irq = &hif_snoc_map_ce_to_irq; + bus_ops->hif_addr_in_boundary = &hif_dummy_addr_in_boundary; + bus_ops->hif_needs_bmi = &hif_snoc_needs_bmi; + + return QDF_STATUS_SUCCESS; +} + +/** + * hif_snoc_get_context_size() - return the size of the snoc context + * + * Return the size of the context. (0 for invalid bus) + */ +int hif_snoc_get_context_size(void) +{ + return sizeof(struct HIF_CE_state); +} diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/dispatcher/multibus_usb.c b/drivers/staging/qca-wifi-host-cmn/hif/src/dispatcher/multibus_usb.c new file mode 100644 index 0000000000000000000000000000000000000000..e632a40134868060b6c1ca2a4b59be0dcecc7632 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/dispatcher/multibus_usb.c @@ -0,0 +1,82 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "hif.h" +#include "hif_main.h" +#include "multibus.h" +#include "usb_api.h" +#include "hif_io32.h" +#include "dummy.h" +#include "if_usb.h" + +/** + * hif_initialize_usb_ops() - initialize the usb ops + * @bus_ops: hif_bus_ops table pointer to initialize + * + * Return: QDF_STATUS_SUCCESS + */ +QDF_STATUS hif_initialize_usb_ops(struct hif_bus_ops *bus_ops) +{ + bus_ops->hif_bus_open = &hif_usb_open; + bus_ops->hif_bus_close = &hif_usb_close; + bus_ops->hif_bus_prevent_linkdown = &hif_dummy_bus_prevent_linkdown; + bus_ops->hif_reset_soc = &hif_dummy_reset_soc; + bus_ops->hif_bus_suspend = &hif_usb_bus_suspend; + bus_ops->hif_bus_resume = &hif_usb_bus_resume; + bus_ops->hif_target_sleep_state_adjust = + &hif_dummy_target_sleep_state_adjust; + bus_ops->hif_disable_isr = &hif_usb_disable_isr; + bus_ops->hif_nointrs = &hif_usb_nointrs; + bus_ops->hif_enable_bus = &hif_usb_enable_bus; + bus_ops->hif_disable_bus = &hif_usb_disable_bus; + bus_ops->hif_bus_configure = &hif_usb_bus_configure; + bus_ops->hif_get_config_item = &hif_dummy_get_config_item; + bus_ops->hif_set_mailbox_swap = &hif_dummy_set_mailbox_swap; + bus_ops->hif_claim_device = &hif_dummy_claim_device; + bus_ops->hif_shutdown_device = &hif_usb_shutdown_bus_device; + bus_ops->hif_stop = &hif_usb_stop_device; + bus_ops->hif_cancel_deferred_target_sleep = + &hif_dummy_cancel_deferred_target_sleep; + bus_ops->hif_irq_disable = &hif_usb_irq_disable; + bus_ops->hif_irq_enable = &hif_usb_irq_enable; + bus_ops->hif_dump_registers = &hif_dummy_dump_registers; + bus_ops->hif_dump_target_memory = &hif_dummy_dump_target_memory; + bus_ops->hif_ipa_get_ce_resource = &hif_dummy_ipa_get_ce_resource; + bus_ops->hif_mask_interrupt_call = &hif_dummy_mask_interrupt_call; + bus_ops->hif_enable_power_management = + &hif_dummy_enable_power_management; + bus_ops->hif_disable_power_management = + &hif_dummy_disable_power_management; + bus_ops->hif_addr_in_boundary = &hif_dummy_addr_in_boundary; + bus_ops->hif_set_bundle_mode = &hif_usb_set_bundle_mode; + bus_ops->hif_bus_reset_resume = &hif_usb_bus_reset_resume; + bus_ops->hif_map_ce_to_irq = &hif_dummy_map_ce_to_irq; + bus_ops->hif_needs_bmi = &hif_usb_needs_bmi; + + return QDF_STATUS_SUCCESS; +} + +/** + * hif_usb_get_context_size() - return the size of the usb context + * + * Return the size of the context. (0 for invalid bus) + */ +int hif_usb_get_context_size(void) +{ + return sizeof(struct hif_usb_softc); +} diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/dispatcher/pci_api.h b/drivers/staging/qca-wifi-host-cmn/hif/src/dispatcher/pci_api.h new file mode 100644 index 0000000000000000000000000000000000000000..3a19eeacfb761b5fa0f8873ca421cbb8cde51cd8 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/dispatcher/pci_api.h @@ -0,0 +1,55 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _PCI_API_H_ +#define _PCI_API_H_ +struct hif_exec_context; + +QDF_STATUS hif_pci_open(struct hif_softc *hif_ctx, + enum qdf_bus_type bus_type); +void hif_pci_close(struct hif_softc *hif_ctx); +void hif_pci_prevent_linkdown(struct hif_softc *scn, bool flag); +void hif_pci_reset_soc(struct hif_softc *ol_sc); +int hif_pci_bus_suspend(struct hif_softc *scn); +int hif_pci_bus_suspend_noirq(struct hif_softc *scn); +int hif_pci_bus_resume(struct hif_softc *scn); +int hif_pci_bus_resume_noirq(struct hif_softc *scn); +int hif_pci_target_sleep_state_adjust(struct hif_softc *scn, + bool sleep_ok, bool wait_for_it); + +void hif_pci_disable_isr(struct hif_softc *scn); +void hif_pci_nointrs(struct hif_softc *scn); +QDF_STATUS hif_pci_enable_bus(struct hif_softc *scn, + struct device *dev, void *bdev, + const struct hif_bus_id *bid, + enum hif_enable_type type); +void hif_pci_disable_bus(struct hif_softc *scn); +int hif_pci_bus_configure(struct hif_softc *scn); +void hif_pci_irq_disable(struct hif_softc *scn, int ce_id); +void hif_pci_irq_enable(struct hif_softc *scn, int ce_id); +int hif_pci_dump_registers(struct hif_softc *scn); +void hif_pci_enable_power_management(struct hif_softc *hif_ctx, + bool is_packet_log_enabled); +void hif_pci_disable_power_management(struct hif_softc *hif_ctx); +int hif_pci_configure_grp_irq(struct hif_softc *scn, + struct hif_exec_context *exec); +void hif_pci_display_stats(struct hif_softc *hif_ctx); +void hif_pci_clear_stats(struct hif_softc *hif_ctx); +int hif_pci_legacy_map_ce_to_irq(struct hif_softc *scn, int ce_id); +bool hif_pci_needs_bmi(struct hif_softc *scn); +#endif /* _PCI_API_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/dispatcher/sdio_api.h b/drivers/staging/qca-wifi-host-cmn/hif/src/dispatcher/sdio_api.h new file mode 100644 index 0000000000000000000000000000000000000000..76ced83ea2c7ba6d8744a3ee4483045d7b44c743 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/dispatcher/sdio_api.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +QDF_STATUS hif_sdio_open(struct hif_softc *hif_sc, + enum qdf_bus_type bus_type); +void hif_sdio_close(struct hif_softc *hif_sc); +int hif_sdio_bus_suspend(struct hif_softc *hif_ctx); +int hif_sdio_bus_resume(struct hif_softc *hif_ctx); +QDF_STATUS hif_sdio_enable_bus(struct hif_softc *hif_sc, + struct device *dev, void *bdev, + const struct hif_bus_id *bid, + enum hif_enable_type type); +void hif_sdio_disable_bus(struct hif_softc *hif_sc); +QDF_STATUS +hif_sdio_get_config_item(struct hif_softc *hif_sc, + int opcode, void *config, uint32_t config_len); +void hif_sdio_set_mailbox_swap(struct hif_softc *hif_sc); +void hif_sdio_claim_device(struct hif_softc *hif_sc); +void hif_sdio_mask_interrupt_call(struct hif_softc *scn); +bool hif_sdio_needs_bmi(struct hif_softc *scn); diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/dispatcher/snoc_api.h b/drivers/staging/qca-wifi-host-cmn/hif/src/dispatcher/snoc_api.h new file mode 100644 index 0000000000000000000000000000000000000000..9b342f4966601091a81a88b2fb1c16e576fd222a --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/dispatcher/snoc_api.h @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _SNOC_API_H_ +#define _SNOC_API_H_ +QDF_STATUS hif_snoc_open(struct hif_softc *hif_ctx, + enum qdf_bus_type bus_type); +void hif_snoc_close(struct hif_softc *hif_ctx); +int hif_snoc_bus_suspend(struct hif_softc *hif_ctx); +int hif_snoc_bus_resume(struct hif_softc *hif_ctx); +int hif_snoc_bus_suspend_noirq(struct hif_softc *scn); +void hif_snoc_disable_isr(struct hif_softc *hif_ctx); +void hif_snoc_nointrs(struct hif_softc *scn); +QDF_STATUS hif_snoc_enable_bus(struct hif_softc *ol_sc, + struct device *dev, void *bdev, + const struct hif_bus_id *bid, + enum hif_enable_type type); +void hif_snoc_disable_bus(struct hif_softc *scn); +int hif_snoc_bus_configure(struct hif_softc *scn); +void hif_snoc_irq_disable(struct hif_softc *scn, int ce_id); +void hif_snoc_irq_enable(struct hif_softc *scn, int ce_id); +int hif_snoc_dump_registers(struct hif_softc *scn); +void hif_snoc_display_stats(struct hif_softc *hif_ctx); +void hif_snoc_clear_stats(struct hif_softc *hif_ctx); +int hif_snoc_map_ce_to_irq(struct hif_softc *scn, int ce_id); +bool hif_snoc_needs_bmi(struct hif_softc *scn); +#endif /* _SNOC_API_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/dispatcher/usb_api.h b/drivers/staging/qca-wifi-host-cmn/hif/src/dispatcher/usb_api.h new file mode 100644 index 0000000000000000000000000000000000000000..a54aa81698e060bd18d216eb99060eba6a5aa162 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/dispatcher/usb_api.h @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ +#ifndef _USB_API_H_ +#define _USB_API_H_ +#include "if_usb.h" + +QDF_STATUS hif_usb_open(struct hif_softc *hif_ctx, + enum qdf_bus_type bus_type); +void hif_usb_close(struct hif_softc *hif_ctx); + + +void hif_usb_disable_isr(struct hif_softc *hif_ctx); +void hif_usb_nointrs(struct hif_softc *scn); +QDF_STATUS hif_usb_enable_bus(struct hif_softc *ol_sc, + struct device *dev, void *bdev, + const struct hif_bus_id *bid, + enum hif_enable_type type); +void hif_usb_disable_bus(struct hif_softc *scn); +int hif_usb_bus_configure(struct hif_softc *scn); +void hif_usb_irq_disable(struct hif_softc *scn, int ce_id); +void hif_usb_irq_enable(struct hif_softc *scn, int ce_id); +int hif_usb_dump_registers(struct hif_softc *scn); +int hif_usb_bus_suspend(struct hif_softc *hif_ctx); +int hif_usb_bus_resume(struct hif_softc *hif_ctx); +void hif_usb_stop_device(struct hif_softc *hif_sc); +void hif_usb_shutdown_bus_device(struct hif_softc *scn); +int hif_usb_bus_reset_resume(struct hif_softc *hif_ctx); +void hif_usb_set_bundle_mode(struct hif_softc *scn, + bool enabled, int rx_bundle_cnt); +void hif_usb_reg_tbl_attach(struct hif_softc *scn); +void hif_fw_assert_ramdump_pattern(struct hif_usb_softc *sc); +void hif_usb_ramdump_handler(struct hif_opaque_softc *scn); +bool hif_usb_needs_bmi(struct hif_softc *scn); +#endif /*_USB_API_H_*/ diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/hif_debug.h b/drivers/staging/qca-wifi-host-cmn/hif/src/hif_debug.h new file mode 100644 index 0000000000000000000000000000000000000000..6dffa7d81a4fada830b5ec91d241ab7481c44e26 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/hif_debug.h @@ -0,0 +1,52 @@ +/* + * Copyright (c) 2014, 2016, 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef __HIF_DEBUG_H__ +#define __HIF_DEBUG_H__ +#include "qdf_trace.h" + +#define hif_alert_rl(params...) QDF_TRACE_FATAL_RL(QDF_MODULE_ID_HIF, params) +#define hif_err_rl(params...) QDF_TRACE_ERROR_RL(QDF_MODULE_ID_HIF, params) +#define hif_warn_rl(params...) QDF_TRACE_WARN_RL(QDF_MODULE_ID_HIF, params) +#define hif_info_rl(params...) QDF_TRACE_INFO_RL(QDF_MODULE_ID_HIF, params) +#define hif_debug_rl(params...) QDF_TRACE_DEBUG_RL(QDF_MODULE_ID_HIF, params) + +#define HIF_ERROR(args ...) \ + QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_ERROR, ## args) +#define HIF_WARN(args ...) \ + QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_WARN, ## args) +#define HIF_INFO(args ...) \ + QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_INFO, ## args) +#define HIF_INFO_HI(args ...) \ + QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_INFO_HIGH, ## args) +#define HIF_INFO_MED(args ...) \ + QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_INFO_MED, ## args) +#define HIF_INFO_LO(args ...) \ + QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_INFO_LOW, ## args) +#define HIF_TRACE(args ...) \ + QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_INFO, ## args) +#define HIF_DBG(args ...) \ + QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_DEBUG, ## args) + +#define HIF_ENTER(fmt, ...) QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_INFO, \ + "Enter: %s "fmt, __func__, ## __VA_ARGS__) + +#define HIF_EXIT(fmt, ...) QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_INFO, \ + "Exit: %s "fmt, __func__, ## __VA_ARGS__) + +#endif /* __HIF_DEBUG_H__ */ diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/hif_exec.c b/drivers/staging/qca-wifi-host-cmn/hif/src/hif_exec.c new file mode 100644 index 0000000000000000000000000000000000000000..f702e1808b593b8dc8531aa76781ccc3b726ef19 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/hif_exec.c @@ -0,0 +1,486 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include +#include +#include "qdf_module.h" + +/* mapping NAPI budget 0 to internal budget 0 + * NAPI budget 1 to internal budget [1,scaler -1] + * NAPI budget 2 to internal budget [scaler, 2 * scaler - 1], etc + */ +#define NAPI_BUDGET_TO_INTERNAL_BUDGET(n, s) \ + (((n) << (s)) - 1) +#define INTERNAL_BUDGET_TO_NAPI_BUDGET(n, s) \ + (((n) + 1) >> (s)) + +static struct hif_exec_context *hif_exec_tasklet_create(void); + +/** + * hif_print_napi_stats() - print NAPI stats + * @hif_ctx: hif context + * + * return: void + */ +void hif_print_napi_stats(struct hif_opaque_softc *hif_ctx) +{ + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx); + struct hif_exec_context *hif_ext_group; + struct qca_napi_stat *napi_stats; + int i, j; + + QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_FATAL, + "NAPI[#ctx]CPU[#] |schedules |polls |completes |workdone\n"); + + for (i = 0; i < hif_state->hif_num_extgroup; i++) { + if (hif_state->hif_ext_group[i]) { + hif_ext_group = hif_state->hif_ext_group[i]; + for (j = 0; j < num_possible_cpus(); j++) { + napi_stats = &(hif_ext_group->stats[j]); + if (napi_stats->napi_schedules != 0) + QDF_TRACE(QDF_MODULE_ID_HIF, + QDF_TRACE_LEVEL_FATAL, + "NAPI[%2d]CPU[%d]: " + "%7d %7d %7d %7d \n", + i, j, + napi_stats->napi_schedules, + napi_stats->napi_polls, + napi_stats->napi_completes, + napi_stats->napi_workdone); + } + } + } +} +qdf_export_symbol(hif_print_napi_stats); + +static void hif_exec_tasklet_schedule(struct hif_exec_context *ctx) +{ + struct hif_tasklet_exec_context *t_ctx = hif_exec_get_tasklet(ctx); + + tasklet_schedule(&t_ctx->tasklet); +} + +/** + * hif_exec_tasklet() - grp tasklet + * data: context + * + * return: void + */ +static void hif_exec_tasklet_fn(unsigned long data) +{ + struct hif_exec_context *hif_ext_group = + (struct hif_exec_context *)data; + struct hif_softc *scn = HIF_GET_SOFTC(hif_ext_group->hif); + unsigned int work_done; + + work_done = + hif_ext_group->handler(hif_ext_group->context, HIF_MAX_BUDGET); + + if (hif_ext_group->work_complete(hif_ext_group, work_done)) { + qdf_atomic_dec(&(scn->active_grp_tasklet_cnt)); + hif_ext_group->irq_enable(hif_ext_group); + } else { + hif_exec_tasklet_schedule(hif_ext_group); + } +} + +/** + * hif_exec_poll() - grp tasklet + * data: context + * + * return: void + */ +static int hif_exec_poll(struct napi_struct *napi, int budget) +{ + struct hif_napi_exec_context *exec_ctx = + qdf_container_of(napi, struct hif_napi_exec_context, napi); + struct hif_exec_context *hif_ext_group = &exec_ctx->exec_ctx; + struct hif_softc *scn = HIF_GET_SOFTC(hif_ext_group->hif); + int work_done; + int normalized_budget = 0; + int shift = hif_ext_group->scale_bin_shift; + int cpu = smp_processor_id(); + + if (budget) + normalized_budget = NAPI_BUDGET_TO_INTERNAL_BUDGET(budget, shift); + work_done = hif_ext_group->handler(hif_ext_group->context, + normalized_budget); + + if (work_done < normalized_budget) { + napi_complete(napi); + qdf_atomic_dec(&scn->active_grp_tasklet_cnt); + hif_ext_group->irq_enable(hif_ext_group); + hif_ext_group->stats[cpu].napi_completes++; + } else { + /* if the ext_group supports time based yield, claim full work + * done anyways */ + work_done = normalized_budget; + } + + hif_ext_group->stats[cpu].napi_polls++; + hif_ext_group->stats[cpu].napi_workdone += work_done; + + /* map internal budget to NAPI budget */ + if (work_done) + work_done = INTERNAL_BUDGET_TO_NAPI_BUDGET(work_done, shift); + + return work_done; +} + +/** + * hif_exec_napi_schedule() - schedule the napi exec instance + * @ctx: a hif_exec_context known to be of napi type + */ +static void hif_exec_napi_schedule(struct hif_exec_context *ctx) +{ + struct hif_napi_exec_context *n_ctx = hif_exec_get_napi(ctx); + ctx->stats[smp_processor_id()].napi_schedules++; + + napi_schedule(&n_ctx->napi); +} + +/** + * hif_exec_napi_kill() - stop a napi exec context from being rescheduled + * @ctx: a hif_exec_context known to be of napi type + */ +static void hif_exec_napi_kill(struct hif_exec_context *ctx) +{ + struct hif_napi_exec_context *n_ctx = hif_exec_get_napi(ctx); + int irq_ind; + + if (ctx->inited) { + napi_disable(&n_ctx->napi); + ctx->inited = 0; + } + + for (irq_ind = 0; irq_ind < ctx->numirq; irq_ind++) + hif_irq_affinity_remove(ctx->os_irq[irq_ind]); + + netif_napi_del(&(n_ctx->napi)); +} + +struct hif_execution_ops napi_sched_ops = { + .schedule = &hif_exec_napi_schedule, + .kill = &hif_exec_napi_kill, +}; + +#ifdef FEATURE_NAPI +/** + * hif_exec_napi_create() - allocate and initialize a napi exec context + * @scale: a binary shift factor to map NAPI budget from\to internal + * budget + */ +static struct hif_exec_context *hif_exec_napi_create(uint32_t scale) +{ + struct hif_napi_exec_context *ctx; + + ctx = qdf_mem_malloc(sizeof(struct hif_napi_exec_context)); + if (ctx == NULL) + return NULL; + + ctx->exec_ctx.sched_ops = &napi_sched_ops; + ctx->exec_ctx.inited = true; + ctx->exec_ctx.scale_bin_shift = scale; + init_dummy_netdev(&(ctx->netdev)); + netif_napi_add(&(ctx->netdev), &(ctx->napi), hif_exec_poll, + QCA_NAPI_BUDGET); + napi_enable(&ctx->napi); + + return &ctx->exec_ctx; +} +#else +static struct hif_exec_context *hif_exec_napi_create(uint32_t scale) +{ + HIF_WARN("%s: FEATURE_NAPI not defined, making tasklet"); + return hif_exec_tasklet_create(); +} +#endif + + +/** + * hif_exec_tasklet_kill() - stop a tasklet exec context from being rescheduled + * @ctx: a hif_exec_context known to be of tasklet type + */ +static void hif_exec_tasklet_kill(struct hif_exec_context *ctx) +{ + struct hif_tasklet_exec_context *t_ctx = hif_exec_get_tasklet(ctx); + int irq_ind; + + if (ctx->inited) { + tasklet_disable(&t_ctx->tasklet); + tasklet_kill(&t_ctx->tasklet); + } + ctx->inited = false; + + for (irq_ind = 0; irq_ind < ctx->numirq; irq_ind++) + hif_irq_affinity_remove(ctx->os_irq[irq_ind]); +} + +struct hif_execution_ops tasklet_sched_ops = { + .schedule = &hif_exec_tasklet_schedule, + .kill = &hif_exec_tasklet_kill, +}; + +/** + * hif_exec_tasklet_schedule() - allocate and initialize a tasklet exec context + */ +static struct hif_exec_context *hif_exec_tasklet_create(void) +{ + struct hif_tasklet_exec_context *ctx; + + ctx = qdf_mem_malloc(sizeof(struct hif_tasklet_exec_context)); + if (ctx == NULL) + return NULL; + + ctx->exec_ctx.sched_ops = &tasklet_sched_ops; + tasklet_init(&ctx->tasklet, hif_exec_tasklet_fn, + (unsigned long)ctx); + + ctx->exec_ctx.inited = true; + + return &ctx->exec_ctx; +} + +/** + * hif_exec_get_ctx() - retrieve an exec context based on an id + * @softc: the hif context owning the exec context + * @id: the id of the exec context + * + * mostly added to make it easier to rename or move the context array + */ +struct hif_exec_context *hif_exec_get_ctx(struct hif_opaque_softc *softc, + uint8_t id) +{ + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(softc); + + if (id < hif_state->hif_num_extgroup) + return hif_state->hif_ext_group[id]; + + return NULL; +} + +/** + * hif_configure_ext_group_interrupts() - API to configure external group + * interrpts + * @hif_ctx : HIF Context + * + * Return: status + */ +uint32_t hif_configure_ext_group_interrupts(struct hif_opaque_softc *hif_ctx) +{ + struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx); + struct hif_exec_context *hif_ext_group; + int i, status; + + if (scn->ext_grp_irq_configured) { + HIF_ERROR("%s Called after ext grp irq configured\n", __func__); + return QDF_STATUS_E_FAILURE; + } + + for (i = 0; i < hif_state->hif_num_extgroup; i++) { + hif_ext_group = hif_state->hif_ext_group[i]; + status = 0; + qdf_spinlock_create(&hif_ext_group->irq_lock); + if (hif_ext_group->configured && + hif_ext_group->irq_requested == false) { + hif_ext_group->irq_enabled = true; + status = hif_grp_irq_configure(scn, hif_ext_group); + } + if (status != 0) { + HIF_ERROR("%s: failed for group %d", __func__, i); + hif_ext_group->irq_enabled = false; + } + } + + scn->ext_grp_irq_configured = true; + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(hif_configure_ext_group_interrupts); + +/** + * hif_ext_group_interrupt_handler() - handler for related interrupts + * @irq: irq number of the interrupt + * @context: the associated hif_exec_group context + * + * This callback function takes care of dissabling the associated interrupts + * and scheduling the expected bottom half for the exec_context. + * This callback function also helps keep track of the count running contexts. + */ +irqreturn_t hif_ext_group_interrupt_handler(int irq, void *context) +{ + struct hif_exec_context *hif_ext_group = context; + struct hif_softc *scn = HIF_GET_SOFTC(hif_ext_group->hif); + + hif_ext_group->irq_disable(hif_ext_group); + qdf_atomic_inc(&scn->active_grp_tasklet_cnt); + + hif_ext_group->sched_ops->schedule(hif_ext_group); + + return IRQ_HANDLED; +} + +/** + * hif_exec_kill() - grp tasklet kill + * scn: hif_softc + * + * return: void + */ +void hif_exec_kill(struct hif_opaque_softc *hif_ctx) +{ + int i; + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx); + + for (i = 0; i < hif_state->hif_num_extgroup; i++) + hif_state->hif_ext_group[i]->sched_ops->kill( + hif_state->hif_ext_group[i]); + + qdf_atomic_set(&hif_state->ol_sc.active_grp_tasklet_cnt, 0); +} + +/** + * hif_register_ext_group() - API to register external group + * interrupt handler. + * @hif_ctx : HIF Context + * @numirq: number of irq's in the group + * @irq: array of irq values + * @handler: callback interrupt handler function + * @cb_ctx: context to passed in callback + * @type: napi vs tasklet + * + * Return: status + */ +uint32_t hif_register_ext_group(struct hif_opaque_softc *hif_ctx, + uint32_t numirq, uint32_t irq[], ext_intr_handler handler, + void *cb_ctx, const char *context_name, + enum hif_exec_type type, uint32_t scale) +{ + struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); + struct hif_exec_context *hif_ext_group; + + if (scn->ext_grp_irq_configured) { + HIF_ERROR("%s Called after ext grp irq configured\n", __func__); + return QDF_STATUS_E_FAILURE; + } + + if (hif_state->hif_num_extgroup >= HIF_MAX_GROUP) { + HIF_ERROR("%s Max groups reached\n", __func__); + return QDF_STATUS_E_FAILURE; + } + + if (numirq >= HIF_MAX_GRP_IRQ) { + HIF_ERROR("%s invalid numirq\n", __func__); + return QDF_STATUS_E_FAILURE; + } + + hif_ext_group = hif_exec_create(type, scale); + if (hif_ext_group == NULL) + return QDF_STATUS_E_FAILURE; + + hif_state->hif_ext_group[hif_state->hif_num_extgroup] = + hif_ext_group; + + hif_ext_group->numirq = numirq; + qdf_mem_copy(&hif_ext_group->irq[0], irq, numirq * sizeof(irq[0])); + hif_ext_group->context = cb_ctx; + hif_ext_group->handler = handler; + hif_ext_group->configured = true; + hif_ext_group->grp_id = hif_state->hif_num_extgroup; + hif_ext_group->hif = hif_ctx; + hif_ext_group->context_name = context_name; + + hif_state->hif_num_extgroup++; + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(hif_register_ext_group); + +/** + * hif_exec_create() - create an execution context + * @type: the type of execution context to create + */ +struct hif_exec_context *hif_exec_create(enum hif_exec_type type, + uint32_t scale) +{ + HIF_INFO("%s: create exec_type %d budget %d\n", + __func__, type, QCA_NAPI_BUDGET * scale); + + switch (type) { + case HIF_EXEC_NAPI_TYPE: + return hif_exec_napi_create(scale); + + case HIF_EXEC_TASKLET_TYPE: + return hif_exec_tasklet_create(); + default: + return NULL; + } +} + +/** + * hif_exec_destroy() - free the hif_exec context + * @ctx: context to free + * + * please kill the context before freeing it to avoid a use after free. + */ +void hif_exec_destroy(struct hif_exec_context *ctx) +{ + qdf_spinlock_destroy(&ctx->irq_lock); + qdf_mem_free(ctx); +} + +/** + * hif_deregister_exec_group() - API to free the exec contexts + * @hif_ctx: HIF context + * @context_name: name of the module whose contexts need to be deregistered + * + * This function deregisters the contexts of the requestor identified + * based on the context_name & frees the memory. + * + * Return: void + */ +void hif_deregister_exec_group(struct hif_opaque_softc *hif_ctx, + const char *context_name) +{ + struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); + struct hif_exec_context *hif_ext_group; + int i; + + for (i = 0; i < HIF_MAX_GROUP; i++) { + hif_ext_group = hif_state->hif_ext_group[i]; + + if (!hif_ext_group) + continue; + + HIF_INFO("%s: Deregistering grp id %d name %s\n", + __func__, + hif_ext_group->grp_id, + hif_ext_group->context_name); + + if (strcmp(hif_ext_group->context_name, context_name) == 0) { + hif_ext_group->sched_ops->kill(hif_ext_group); + hif_state->hif_ext_group[i] = NULL; + hif_exec_destroy(hif_ext_group); + hif_state->hif_num_extgroup--; + } + + } +} +qdf_export_symbol(hif_deregister_exec_group); diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/hif_exec.h b/drivers/staging/qca-wifi-host-cmn/hif/src/hif_exec.h new file mode 100644 index 0000000000000000000000000000000000000000..ceec48fbfcf7a5245f932d787bcc4b58730acbf4 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/hif_exec.h @@ -0,0 +1,126 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef __HIF_EXEC_H__ +#define __HIF_EXEC_H__ + +#include +#include + +struct hif_exec_context; + +struct hif_execution_ops { + char *context_type; + void (*schedule)(struct hif_exec_context *); + void (*reschedule)(struct hif_exec_context *); + void (*kill)(struct hif_exec_context *); +}; + +/** + * hif_exec_context: only ever allocated as a subtype eg. + * hif_tasklet_exec_context + * + * @context: context for the handler function to use. + * @context_name: a pointer to a const string for debugging. + * this should help whenever there could be ambiguity + * in what type of context the void* context points to + * @irq: irq handle coresponding to hw block + * @os_irq: irq handle for irq_afinity + * @cpu: the cpu this context should be affined to + * @work_complete: Function call called when leaving the execution context to + * determine if this context should reschedule or wait for an interrupt. + * This function may be used as a hook for post processing. + * + * @irq_disable: called before scheduling the context. + * @irq_enable: called when the context leaves polling mode + */ +struct hif_exec_context { + struct hif_execution_ops *sched_ops; + struct hif_opaque_softc *hif; + uint32_t numirq; + uint32_t irq[HIF_MAX_GRP_IRQ]; + uint32_t os_irq[HIF_MAX_GRP_IRQ]; + cpumask_t cpumask; + uint32_t grp_id; + uint32_t scale_bin_shift; + const char *context_name; + void *context; + ext_intr_handler handler; + + bool (*work_complete)(struct hif_exec_context *, int work_done); + void (*irq_enable)(struct hif_exec_context *); + void (*irq_disable)(struct hif_exec_context *); + + uint8_t cpu; + struct qca_napi_stat stats[NR_CPUS]; + bool inited; + bool configured; + bool irq_requested; + bool irq_enabled; + qdf_spinlock_t irq_lock; +}; + +/** + * struct hif_tasklet_exec_context - exec_context for tasklets + * @exec_ctx: inherited data type + * @tasklet: tasklet structure for scheduling + */ +struct hif_tasklet_exec_context { + struct hif_exec_context exec_ctx; + struct tasklet_struct tasklet; +}; + +/** + * struct hif_napi_exec_context - exec_context for tasklets + * @exec_ctx: inherited data type + * @netdev: dummy net device associated with the napi context + * @napi: napi structure used in scheduling + */ +struct hif_napi_exec_context { + struct hif_exec_context exec_ctx; + struct net_device netdev; /* dummy net_dev */ + struct napi_struct napi; +}; + +static inline struct hif_napi_exec_context* + hif_exec_get_napi(struct hif_exec_context *ctx) +{ + return (struct hif_napi_exec_context *) ctx; +} + +static inline struct hif_tasklet_exec_context* + hif_exec_get_tasklet(struct hif_exec_context *ctx) +{ + return (struct hif_tasklet_exec_context *) ctx; +} + +struct hif_exec_context *hif_exec_create(enum hif_exec_type type, + uint32_t scale); + +void hif_exec_destroy(struct hif_exec_context *ctx); + +int hif_grp_irq_configure(struct hif_softc *scn, + struct hif_exec_context *hif_exec); +irqreturn_t hif_ext_group_interrupt_handler(int irq, void *context); + +struct hif_exec_context *hif_exec_get_ctx(struct hif_opaque_softc *hif, + uint8_t id); +void hif_exec_kill(struct hif_opaque_softc *scn); + +#endif + diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/hif_hw_version.h b/drivers/staging/qca-wifi-host-cmn/hif/src/hif_hw_version.h new file mode 100644 index 0000000000000000000000000000000000000000..4a55d7d3e49c9605af09717cec7c20589d14ca2b --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/hif_hw_version.h @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2012-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef HIF_HW_VERSION_H +#define HIF_HW_VERSION_H + +#define AR6004_VERSION_REV1_3 0x31c8088a +#define AR9888_REV2_VERSION 0x4100016c +#define AR9887_REV1_VERSION 0x4100016d +#define AR6320_REV1_VERSION 0x5000000 +#define AR6320_REV1_1_VERSION 0x5000001 +#define AR6320_REV1_3_VERSION 0x5000003 +#define AR6320_REV2_1_VERSION 0x5010000 +#define AR6320_REV3_VERSION 0x5020000 +#define AR6320_REV3_2_VERSION 0x5030000 +#define QCA9379_REV1_VERSION 0x5040000 +#define AR6320_DEV_VERSION 0x1000000 +#define QCA9377_REV1_1_VERSION 0x5020001 +#define WCN3990_v1 0x40000000 +#define WCN3990_v2 0x40010000 +#define WCN3990_v2_1 0x40010002 +#define WCN3998 0x40030001 + +struct qwlan_hw { + u32 id; + u32 subid; + const char *name; +}; + +#endif /* HIF_HW_VERSION_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/hif_io32.h b/drivers/staging/qca-wifi-host-cmn/hif/src/hif_io32.h new file mode 100644 index 0000000000000000000000000000000000000000..de704539f947bcaf2d935d4532e7444b490e7d3c --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/hif_io32.h @@ -0,0 +1,101 @@ +/* + * Copyright (c) 2015-2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef __HIF_IO32_H__ +#define __HIF_IO32_H__ + +#include +#include "hif.h" +#include "hif_main.h" + +#define hif_read32_mb(addr) ioread32((void __iomem *)addr) +#define hif_write32_mb(addr, value) \ + iowrite32((u32)(value), (void __iomem *)(addr)) + +#define Q_TARGET_ACCESS_BEGIN(scn) \ + hif_target_sleep_state_adjust(scn, false, true) +#define Q_TARGET_ACCESS_END(scn) \ + hif_target_sleep_state_adjust(scn, true, false) +#define TARGET_REGISTER_ACCESS_ALLOWED(scn)\ + hif_is_target_register_access_allowed(scn) + +/* + * A_TARGET_ACCESS_LIKELY will not wait for the target to wake up before + * continuing execution. Because A_TARGET_ACCESS_LIKELY does not guarantee + * that the target is awake before continuing, Q_TARGET_ACCESS macros must + * protect the actual target access. Since Q_TARGET_ACCESS protect the actual + * target access, A_TARGET_ACCESS_LIKELY hints are optional. + * + * To ignore "LIKELY" hints, set CONFIG_TARGET_ACCESS_LIKELY to 0 + * (slightly worse performance, less power) + * + * To use "LIKELY" hints, set CONFIG_TARGET_ACCESS_LIKELY to 1 + * (slightly better performance, more power) + * + * note: if a bus doesn't use hif_target_sleep_state_adjust, this will have + * no impact. + */ +#define CONFIG_TARGET_ACCESS_LIKELY 0 +#if CONFIG_TARGET_ACCESS_LIKELY +#define A_TARGET_ACCESS_LIKELY(scn) \ + hif_target_sleep_state_adjust(scn, false, false) +#define A_TARGET_ACCESS_UNLIKELY(scn) \ + hif_target_sleep_state_adjust(scn, true, false) +#else /* CONFIG_ATH_PCIE_ACCESS_LIKELY */ +#define A_TARGET_ACCESS_LIKELY(scn) \ + do { \ + unsigned long unused = (unsigned long)(scn); \ + unused = unused; \ + } while (0) + +#define A_TARGET_ACCESS_UNLIKELY(scn) \ + do { \ + unsigned long unused = (unsigned long)(scn); \ + unused = unused; \ + } while (0) +#endif /* CONFIG_ATH_PCIE_ACCESS_LIKELY */ + + +#ifdef HIF_PCI +#include "hif_io32_pci.h" +#endif +#ifdef HIF_SNOC +#include "hif_io32_snoc.h" +#endif /* HIF_PCI */ + +#ifdef CONFIG_IO_MEM_ACCESS_DEBUG +uint32_t hif_target_read_checked(struct hif_softc *scn, + uint32_t offset); +void hif_target_write_checked(struct hif_softc *scn, uint32_t offset, + uint32_t value); + +#define A_TARGET_READ(scn, offset) \ + hif_target_read_checked(scn, (offset)) +#define A_TARGET_WRITE(scn, offset, value) \ + hif_target_write_checked(scn, (offset), (value)) +#else /* CONFIG_ATH_PCIE_ACCESS_DEBUG */ +#define A_TARGET_READ(scn, offset) \ + hif_read32_mb(scn->mem + (offset)) +#define A_TARGET_WRITE(scn, offset, value) \ + hif_write32_mb((scn->mem) + (offset), value) +#endif + +void hif_irq_enable(struct hif_softc *scn, int irq_id); +void hif_irq_disable(struct hif_softc *scn, int irq_id); + +#endif /* __HIF_IO32_H__ */ diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/hif_irq_affinity.c b/drivers/staging/qca-wifi-host-cmn/hif/src/hif_irq_affinity.c new file mode 100644 index 0000000000000000000000000000000000000000..6d23958235e6df584359f83f6da2fb5dcfd392e2 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/hif_irq_affinity.c @@ -0,0 +1,545 @@ +/* + * Copyright (c) 2015-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: hif_irq_afinity.c + * + * This irq afinity implementation is os dependent, so this can be treated as + * an abstraction layer... Should this be moved into a /linux folder? + */ + +#include /* memset */ + +/* Linux headers */ +#include +#include +#include +#include +#include +#include +#ifdef CONFIG_SCHED_CORE_CTL +#include +#endif +#include +#include +#include +#include +#include + +#if defined(FEATURE_NAPI_DEBUG) && defined(HIF_IRQ_AFFINITY) +/* + * Local functions + * - no argument checks, all internal/trusted callers + */ +static void hnc_dump_cpus(struct qca_napi_data *napid) +{ + hif_napi_stats(napid); +} +#else +static void hnc_dump_cpus(struct qca_napi_data *napid) { /* no-op */ }; +#endif /* FEATURE_NAPI_DEBUG */ + +#ifdef HIF_IRQ_AFFINITY +/** + * + * hif_exec_event() - reacts to events that impact irq affinity + * @hif : pointer to hif context + * @evnt: event that has been detected + * @data: more data regarding the event + * + * Description: + * This function handles two types of events: + * 1- Events that change the state of NAPI (enabled/disabled): + * {NAPI_EVT_INI_FILE, NAPI_EVT_CMD_STATE} + * The state is retrievable by "hdd_napi_enabled(-1)" + * - NAPI will be on if either INI file is on and it has not been disabled + * by a subsequent vendor CMD, + * or it has been enabled by a vendor CMD. + * 2- Events that change the CPU affinity of a NAPI instance/IRQ: + * {NAPI_EVT_TPUT_STATE, NAPI_EVT_CPU_STATE} + * - NAPI will support a throughput mode (HI/LO), kept at napid->napi_mode + * - NAPI will switch throughput mode based on hdd_napi_throughput_policy() + * - In LO tput mode, NAPI will yield control if its interrupts to the system + * management functions. However in HI throughput mode, NAPI will actively + * manage its interrupts/instances (by trying to disperse them out to + * separate performance cores). + * - CPU eligibility is kept up-to-date by NAPI_EVT_CPU_STATE events. + * + * + In some cases (roaming peer management is the only case so far), a + * a client can trigger a "SERIALIZE" event. Basically, this means that the + * users is asking NAPI to go into a truly single execution context state. + * So, NAPI indicates to msm-irqbalancer that it wants to be blacklisted, + * (if called for the first time) and then moves all IRQs (for NAPI + * instances) to be collapsed to a single core. If called multiple times, + * it will just re-collapse the CPUs. This is because blacklist-on() API + * is reference-counted, and because the API has already been called. + * + * Such a user, should call "DESERIALIZE" (NORMAL) event, to set NAPI to go + * to its "normal" operation. Optionally, they can give a timeout value (in + * multiples of BusBandwidthCheckPeriod -- 100 msecs by default). In this + * case, NAPI will just set the current throughput state to uninitialized + * and set the delay period. Once policy handler is called, it would skip + * applying the policy delay period times, and otherwise apply the policy. + * + * Return: + * < 0: some error + * = 0: event handled successfully + */ +int hif_exec_event(struct hif_opaque_softc *hif_ctx, enum qca_napi_event event, + void *data) +{ + int rc = 0; + uint32_t prev_state; + struct hif_softc *hif = HIF_GET_SOFTC(hif_ctx); + struct qca_napi_data *napid = &(hif->napi_data); + enum qca_napi_tput_state tput_mode = QCA_NAPI_TPUT_UNINITIALIZED; + enum { + BLACKLIST_NOT_PENDING, + BLACKLIST_ON_PENDING, + BLACKLIST_OFF_PENDING + } blacklist_pending = BLACKLIST_NOT_PENDING; + + NAPI_DEBUG("%s: -->(event=%d, aux=%pK)", __func__, event, data); + + qdf_spin_lock_bh(&(napid->lock)); + prev_state = napid->state; + switch (event) { + case NAPI_EVT_INI_FILE: + case NAPI_EVT_CMD_STATE: + case NAPI_EVT_INT_STATE: + /* deprecated */ + break; + + case NAPI_EVT_CPU_STATE: { + int cpu = ((unsigned long int)data >> 16); + int val = ((unsigned long int)data & 0x0ff); + + NAPI_DEBUG("%s: evt=CPU_STATE on CPU %d value=%d", + __func__, cpu, val); + + /* state has already been set by hnc_cpu_notify_cb */ + if ((val == QCA_NAPI_CPU_DOWN) && + (napid->napi_mode == QCA_NAPI_TPUT_HI) && /* we manage */ + (napid->napi_cpu[cpu].napis != 0)) { + NAPI_DEBUG("%s: Migrating NAPIs out of cpu %d", + __func__, cpu); + rc = hif_exec_cpu_migrate(napid, + cpu, + HNC_ACT_RELOCATE); + napid->napi_cpu[cpu].napis = 0; + } + /* in QCA_NAPI_TPUT_LO case, napis MUST == 0 */ + break; + } + + case NAPI_EVT_TPUT_STATE: { + tput_mode = (enum qca_napi_tput_state)data; + if (tput_mode == QCA_NAPI_TPUT_LO) { + /* from TPUT_HI -> TPUT_LO */ + NAPI_DEBUG("%s: Moving to napi_tput_LO state", + __func__); + blacklist_pending = BLACKLIST_OFF_PENDING; + /* + * Ideally we should "collapse" interrupts here, since + * we are "dispersing" interrupts in the "else" case. + * This allows the possibility that our interrupts may + * still be on the perf cluster the next time we enter + * high tput mode. However, the irq_balancer is free + * to move our interrupts to power cluster once + * blacklisting has been turned off in the "else" case. + */ + } else { + /* from TPUT_LO -> TPUT->HI */ + NAPI_DEBUG("%s: Moving to napi_tput_HI state", + __func__); + rc = hif_exec_cpu_migrate(napid, + HNC_ANY_CPU, + HNC_ACT_DISPERSE); + + blacklist_pending = BLACKLIST_ON_PENDING; + } + napid->napi_mode = tput_mode; + break; + } + + case NAPI_EVT_USR_SERIAL: { + unsigned long users = (unsigned long)data; + + NAPI_DEBUG("%s: User forced SERIALIZATION; users=%ld", + __func__, users); + + rc = hif_exec_cpu_migrate(napid, + HNC_ANY_CPU, + HNC_ACT_COLLAPSE); + if ((users == 0) && (rc == 0)) + blacklist_pending = BLACKLIST_ON_PENDING; + break; + } + case NAPI_EVT_USR_NORMAL: { + NAPI_DEBUG("%s: User forced DE-SERIALIZATION", __func__); + if (!napid->user_cpu_affin_mask) + blacklist_pending = BLACKLIST_OFF_PENDING; + /* + * Deserialization timeout is handled at hdd layer; + * just mark current mode to uninitialized to ensure + * it will be set when the delay is over + */ + napid->napi_mode = QCA_NAPI_TPUT_UNINITIALIZED; + break; + } + default: { + HIF_ERROR("%s: unknown event: %d (data=0x%0lx)", + __func__, event, (unsigned long) data); + break; + } /* default */ + }; /* switch */ + + + switch (blacklist_pending) { + case BLACKLIST_ON_PENDING: + /* assume the control of WLAN IRQs */ + hif_napi_cpu_blacklist(napid, BLACKLIST_ON); + break; + case BLACKLIST_OFF_PENDING: + /* yield the control of WLAN IRQs */ + hif_napi_cpu_blacklist(napid, BLACKLIST_OFF); + break; + default: /* nothing to do */ + break; + } /* switch blacklist_pending */ + + qdf_spin_unlock_bh(&(napid->lock)); + + NAPI_DEBUG("<--[rc=%d]", rc); + return rc; +} + +#endif + +/** + * hncm_migrate_to() - migrates a NAPI to a CPU + * @napid: pointer to NAPI block + * @ce_id: CE_id of the NAPI instance + * @didx : index in the CPU topology table for the CPU to migrate to + * + * Migrates NAPI (identified by the CE_id) to the destination core + * Updates the napi_map of the destination entry + * + * Return: + * =0 : success + * <0 : error + */ +static int hncm_exec_migrate_to(struct qca_napi_data *napid, uint8_t ctx_id, + int didx) +{ + struct hif_exec_context *exec_ctx; + int rc = 0; + int status = 0; + int ind; + + NAPI_DEBUG("-->%s(napi_cd=%d, didx=%d)", __func__, napi_ce, didx); + + exec_ctx = hif_exec_get_ctx(&napid->hif_softc->osc, ctx_id); + if (exec_ctx == NULL) + return -EINVAL; + + exec_ctx->cpumask.bits[0] = (1 << didx); + + for (ind = 0; ind < exec_ctx->numirq; ind++) { + if (exec_ctx->os_irq[ind]) { + irq_modify_status(exec_ctx->os_irq[ind], + IRQ_NO_BALANCING, 0); + rc = irq_set_affinity_hint(exec_ctx->os_irq[ind], + &exec_ctx->cpumask); + if (rc) + status = rc; + } + } + + /* unmark the napis bitmap in the cpu table */ + napid->napi_cpu[exec_ctx->cpu].napis &= ~(0x01 << ctx_id); + /* mark the napis bitmap for the new designated cpu */ + napid->napi_cpu[didx].napis |= (0x01 << ctx_id); + exec_ctx->cpu = didx; + + NAPI_DEBUG("<--%s[%d]", __func__, rc); + return status; +} + +/** + * hncm_dest_cpu() - finds a destination CPU for NAPI + * @napid: pointer to NAPI block + * @act : RELOCATE | COLLAPSE | DISPERSE + * + * Finds the designated destionation for the next IRQ. + * RELOCATE: translated to either COLLAPSE or DISPERSE based + * on napid->napi_mode (throughput state) + * COLLAPSE: All have the same destination: the first online CPU in lilcl + * DISPERSE: One of the CPU in bigcl, which has the smallest number of + * NAPIs on it + * + * Return: >=0 : index in the cpu topology table + * : < 0 : error + */ +static int hncm_dest_cpu(struct qca_napi_data *napid, int act) +{ + int destidx = -1; + int head, i; + + NAPI_DEBUG("-->%s(act=%d)", __func__, act); + if (act == HNC_ACT_RELOCATE) { + if (napid->napi_mode == QCA_NAPI_TPUT_LO) + act = HNC_ACT_COLLAPSE; + else + act = HNC_ACT_DISPERSE; + NAPI_DEBUG("%s: act changed from HNC_ACT_RELOCATE to %d", + __func__, act); + } + if (act == HNC_ACT_COLLAPSE) { + head = i = napid->lilcl_head; +retry_collapse: + while (i >= 0) { + if (napid->napi_cpu[i].state == QCA_NAPI_CPU_UP) { + destidx = i; + break; + } + i = napid->napi_cpu[i].cluster_nxt; + } + if ((destidx < 0) && (head == napid->lilcl_head)) { + NAPI_DEBUG("%s: COLLAPSE: no lilcl dest, try bigcl", + __func__); + head = i = napid->bigcl_head; + goto retry_collapse; + } + } else { /* HNC_ACT_DISPERSE */ + int smallest = 99; /* all 32 bits full */ + int smallidx = -1; + + head = i = napid->bigcl_head; +retry_disperse: + while (i >= 0) { + if ((napid->napi_cpu[i].state == QCA_NAPI_CPU_UP) && + (hweight32(napid->napi_cpu[i].napis) <= smallest)) { + smallest = napid->napi_cpu[i].napis; + smallidx = i; + } + i = napid->napi_cpu[i].cluster_nxt; + } + destidx = smallidx; + if ((destidx < 0) && (head == napid->bigcl_head)) { + NAPI_DEBUG("%s: DISPERSE: no bigcl dest, try lilcl", + __func__); + head = i = napid->lilcl_head; + goto retry_disperse; + } + } + NAPI_DEBUG("<--%s[dest=%d]", __func__, destidx); + return destidx; +} +/** + * hif_napi_cpu_migrate() - migrate IRQs away + * @cpu: -1: all CPUs specific CPU + * @act: COLLAPSE | DISPERSE + * + * Moves IRQs/NAPIs from specific or all CPUs (specified by @cpu) to eligible + * cores. Eligible cores are: + * act=COLLAPSE -> the first online core of the little cluster + * act=DISPERSE -> separate cores of the big cluster, so that each core will + * host minimum number of NAPIs/IRQs (napid->cpus[cpu].napis) + * + * Note that this function is called with a spinlock acquired already. + * + * Return: =0: success + * <0: error + */ +int hif_exec_cpu_migrate(struct qca_napi_data *napid, int cpu, int action) +{ + int rc = 0; + struct qca_napi_cpu *cpup; + int i, dind; + uint32_t napis; + + + NAPI_DEBUG("-->%s(.., cpu=%d, act=%d)", + __func__, cpu, action); + + if (napid->exec_map == 0) { + NAPI_DEBUG("%s: datapath contexts to disperse", __func__); + goto hncm_return; + } + cpup = napid->napi_cpu; + + switch (action) { + case HNC_ACT_RELOCATE: + case HNC_ACT_DISPERSE: + case HNC_ACT_COLLAPSE: { + /* first find the src napi set */ + if (cpu == HNC_ANY_CPU) + napis = napid->exec_map; + else + napis = cpup[cpu].napis; + /* then clear the napi bitmap on each CPU */ + for (i = 0; i < NR_CPUS; i++) + cpup[i].napis = 0; + /* then for each of the NAPIs to disperse: */ + for (i = 0; i < HIF_MAX_GROUP; i++) + if (napis & (1 << i)) { + /* find a destination CPU */ + dind = hncm_dest_cpu(napid, action); + if (dind >= 0) { + NAPI_DEBUG("Migrating NAPI ce%d to %d", + i, dind); + rc = hncm_exec_migrate_to(napid, i, + dind); + } else { + NAPI_DEBUG("No dest for NAPI ce%d", i); + hnc_dump_cpus(napid); + rc = -1; + } + } + break; + } + default: { + NAPI_DEBUG("%s: bad action: %d\n", __func__, action); + QDF_BUG(0); + break; + } + } /* switch action */ + +hncm_return: + hnc_dump_cpus(napid); + return rc; +} + + +/** + * hif_exec_bl_irq() - calls irq_modify_status to enable/disable blacklisting + * @napid: pointer to qca_napi_data structure + * @bl_flag: blacklist flag to enable/disable blacklisting + * + * The function enables/disables blacklisting for all the copy engine + * interrupts on which NAPI is enabled. + * + * Return: None + */ +static inline void hif_exec_bl_irq(struct qca_napi_data *napid, bool bl_flag) +{ + int i, j; + struct hif_exec_context *exec_ctx; + + for (i = 0; i < HIF_MAX_GROUP; i++) { + /* check if NAPI is enabled on the CE */ + if (!(napid->exec_map & (0x01 << i))) + continue; + + /*double check that NAPI is allocated for the CE */ + exec_ctx = hif_exec_get_ctx(&napid->hif_softc->osc, i); + if (!(exec_ctx)) + continue; + + if (bl_flag == true) + for (j = 0; j < exec_ctx->numirq; j++) + irq_modify_status(exec_ctx->os_irq[j], + 0, IRQ_NO_BALANCING); + else + for (j = 0; j < exec_ctx->numirq; j++) + irq_modify_status(exec_ctx->os_irq[j], + IRQ_NO_BALANCING, 0); + HIF_DBG("%s: bl_flag %d CE %d", __func__, bl_flag, i); + } +} + +#ifdef CONFIG_SCHED_CORE_CTL +/* Enable this API only if kernel feature - CONFIG_SCHED_CORE_CTL is defined */ +static inline int hif_napi_core_ctl_set_boost(bool boost) +{ + return core_ctl_set_boost(boost); +} +#else +static inline int hif_napi_core_ctl_set_boost(bool boost) +{ + return 0; +} +#endif + +/** + * hif_napi_cpu_blacklist() - en(dis)ables blacklisting for NAPI RX interrupts. + * @napid: pointer to qca_napi_data structure + * @op: blacklist operation to perform + * + * The function enables/disables/queries blacklisting for all CE RX + * interrupts with NAPI enabled. Besides blacklisting, it also enables/disables + * core_ctl_set_boost. + * Once blacklisting is enabled, the interrupts will not be managed by the IRQ + * balancer. + * + * Return: -EINVAL, in case IRQ_BLACKLISTING and CORE_CTL_BOOST is not enabled + * for BLACKLIST_QUERY op - blacklist refcount + * for BLACKLIST_ON op - return value from core_ctl_set_boost API + * for BLACKLIST_OFF op - return value from core_ctl_set_boost API + */ +int hif_exec_cpu_blacklist(struct qca_napi_data *napid, + enum qca_blacklist_op op) +{ + int rc = 0; + static int ref_count; /* = 0 by the compiler */ + uint8_t flags = napid->flags; + bool bl_en = flags & QCA_NAPI_FEATURE_IRQ_BLACKLISTING; + bool ccb_en = flags & QCA_NAPI_FEATURE_CORE_CTL_BOOST; + + NAPI_DEBUG("-->%s(%d %d)", __func__, flags, op); + + if (!(bl_en && ccb_en)) { + rc = -EINVAL; + goto out; + } + + switch (op) { + case BLACKLIST_QUERY: + rc = ref_count; + break; + case BLACKLIST_ON: + ref_count++; + rc = 0; + if (ref_count == 1) { + rc = hif_napi_core_ctl_set_boost(true); + NAPI_DEBUG("boost_on() returns %d - refcnt=%d", + rc, ref_count); + hif_exec_bl_irq(napid, true); + } + break; + case BLACKLIST_OFF: + if (ref_count) + ref_count--; + rc = 0; + if (ref_count == 0) { + rc = hif_napi_core_ctl_set_boost(false); + NAPI_DEBUG("boost_off() returns %d - refcnt=%d", + rc, ref_count); + hif_exec_bl_irq(napid, false); + } + break; + default: + NAPI_DEBUG("Invalid blacklist op: %d", op); + rc = -EINVAL; + } /* switch */ +out: + NAPI_DEBUG("<--%s[%d]", __func__, rc); + return rc; +} + diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/hif_irq_affinity.h b/drivers/staging/qca-wifi-host-cmn/hif/src/hif_irq_affinity.h new file mode 100644 index 0000000000000000000000000000000000000000..74b0470658b3459ee558a823c53724c6fbe8ef23 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/hif_irq_affinity.h @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2015-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef __HIF_IRQ_AFFINITY_H__ +#define __HIF_IRQ_AFFINITY_H__ + +#ifdef HIF_IRQ_AFFINITY +#ifndef FEATURE_NAPI +#error /*HIF_IRQ_AFFINITY currently relies on FEATURE_NAPI*/ +#endif +#endif + +/* CLD headers */ +#include /* struct hif_opaque_softc; */ +#include +struct hif_opaque_softc; +enum qca_blacklist_op; + +int hif_exec_cpu_migrate(struct qca_napi_data *napid, int cpu, int action); + +int hif_exec_cpu_blacklist(struct qca_napi_data *napid, + enum qca_blacklist_op op); + +#ifdef HIF_IRQ_AFFINITY +int hif_exec_event(struct hif_opaque_softc *hif, + enum qca_napi_event event, + void *data); + + +/* hif_irq_affinity_remove() - remove affinity before freeing the irq + * @os_irq: irq number to remove affinity from + */ +static inline void hif_irq_affinity_remove(int os_irq) +{ + irq_set_affinity_hint(os_irq, NULL); +} +#else +static inline void hif_irq_affinity_remove(int os_irq) +{ +} + +static inline int hif_exec_event(struct hif_opaque_softc *hif, + enum qca_napi_event event, + void *data) +{ + return 0; +} +#endif +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/hif_main.c b/drivers/staging/qca-wifi-host-cmn/hif/src/hif_main.c new file mode 100644 index 0000000000000000000000000000000000000000..94b4f8ffb1b2c8fa50da22fd8ed75f9ab82ed2df --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/hif_main.c @@ -0,0 +1,1302 @@ +/* + * Copyright (c) 2015-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "targcfg.h" +#include "qdf_lock.h" +#include "qdf_status.h" +#include "qdf_status.h" +#include /* qdf_atomic_read */ +#include +#include "hif_io32.h" +#include +#include +#include "regtable.h" +#define ATH_MODULE_NAME hif +#include +#include "hif_main.h" +#include "hif_hw_version.h" +#if defined(HIF_PCI) || defined(HIF_SNOC) || defined(HIF_AHB) +#include "ce_tasklet.h" +#include "ce_api.h" +#endif +#include "qdf_trace.h" +#include "qdf_status.h" +#include "hif_debug.h" +#include "mp_dev.h" +#ifdef QCA_WIFI_QCA8074 +#include "hal_api.h" +#endif +#include "hif_napi.h" +#include "hif_unit_test_suspend_i.h" +#include "qdf_module.h" + +void hif_dump(struct hif_opaque_softc *hif_ctx, uint8_t cmd_id, bool start) +{ + hif_trigger_dump(hif_ctx, cmd_id, start); +} + +/** + * hif_get_target_id(): hif_get_target_id + * + * Return the virtual memory base address to the caller + * + * @scn: hif_softc + * + * Return: A_target_id_t + */ +A_target_id_t hif_get_target_id(struct hif_softc *scn) +{ + return scn->mem; +} + +/** + * hif_get_targetdef(): hif_get_targetdef + * @scn: scn + * + * Return: void * + */ +void *hif_get_targetdef(struct hif_opaque_softc *hif_ctx) +{ + struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); + + return scn->targetdef; +} + +/** + * hif_vote_link_down(): unvote for link up + * + * Call hif_vote_link_down to release a previous request made using + * hif_vote_link_up. A hif_vote_link_down call should only be made + * after a corresponding hif_vote_link_up, otherwise you could be + * negating a vote from another source. When no votes are present + * hif will not guarantee the linkstate after hif_bus_suspend. + * + * SYNCHRONIZE WITH hif_vote_link_up by only calling in MC thread + * and initialization deinitialization sequencences. + * + * Return: n/a + */ +void hif_vote_link_down(struct hif_opaque_softc *hif_ctx) +{ + struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); + + QDF_BUG(scn); + scn->linkstate_vote--; + if (scn->linkstate_vote == 0) + hif_bus_prevent_linkdown(scn, false); +} + +/** + * hif_vote_link_up(): vote to prevent bus from suspending + * + * Makes hif guarantee that fw can message the host normally + * durring suspend. + * + * SYNCHRONIZE WITH hif_vote_link_up by only calling in MC thread + * and initialization deinitialization sequencences. + * + * Return: n/a + */ +void hif_vote_link_up(struct hif_opaque_softc *hif_ctx) +{ + struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); + + QDF_BUG(scn); + scn->linkstate_vote++; + if (scn->linkstate_vote == 1) + hif_bus_prevent_linkdown(scn, true); +} + +/** + * hif_can_suspend_link(): query if hif is permitted to suspend the link + * + * Hif will ensure that the link won't be suspended if the upperlayers + * don't want it to. + * + * SYNCHRONIZATION: MC thread is stopped before bus suspend thus + * we don't need extra locking to ensure votes dont change while + * we are in the process of suspending or resuming. + * + * Return: false if hif will guarantee link up durring suspend. + */ +bool hif_can_suspend_link(struct hif_opaque_softc *hif_ctx) +{ + struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); + + QDF_BUG(scn); + return scn->linkstate_vote == 0; +} + +/** + * hif_hia_item_address(): hif_hia_item_address + * @target_type: target_type + * @item_offset: item_offset + * + * Return: n/a + */ +uint32_t hif_hia_item_address(uint32_t target_type, uint32_t item_offset) +{ + switch (target_type) { + case TARGET_TYPE_AR6002: + return AR6002_HOST_INTEREST_ADDRESS + item_offset; + case TARGET_TYPE_AR6003: + return AR6003_HOST_INTEREST_ADDRESS + item_offset; + case TARGET_TYPE_AR6004: + return AR6004_HOST_INTEREST_ADDRESS + item_offset; + case TARGET_TYPE_AR6006: + return AR6006_HOST_INTEREST_ADDRESS + item_offset; + case TARGET_TYPE_AR9888: + return AR9888_HOST_INTEREST_ADDRESS + item_offset; + case TARGET_TYPE_AR6320: + case TARGET_TYPE_AR6320V2: + return AR6320_HOST_INTEREST_ADDRESS + item_offset; + case TARGET_TYPE_ADRASTEA: + /* ADRASTEA doesn't have a host interest address */ + ASSERT(0); + return 0; + case TARGET_TYPE_AR900B: + return AR900B_HOST_INTEREST_ADDRESS + item_offset; + case TARGET_TYPE_QCA9984: + return QCA9984_HOST_INTEREST_ADDRESS + item_offset; + case TARGET_TYPE_QCA9888: + return QCA9888_HOST_INTEREST_ADDRESS + item_offset; + case TARGET_TYPE_IPQ4019: + return IPQ4019_HOST_INTEREST_ADDRESS + item_offset; + + default: + ASSERT(0); + return 0; + } +} + +/** + * hif_max_num_receives_reached() - check max receive is reached + * @scn: HIF Context + * @count: unsigned int. + * + * Output check status as bool + * + * Return: bool + */ +bool hif_max_num_receives_reached(struct hif_softc *scn, unsigned int count) +{ + if (QDF_IS_EPPING_ENABLED(hif_get_conparam(scn))) + return count > 120; + else + return count > MAX_NUM_OF_RECEIVES; +} + +/** + * init_buffer_count() - initial buffer count + * @maxSize: qdf_size_t + * + * routine to modify the initial buffer count to be allocated on an os + * platform basis. Platform owner will need to modify this as needed + * + * Return: qdf_size_t + */ +qdf_size_t init_buffer_count(qdf_size_t maxSize) +{ + return maxSize; +} + +/** + * hif_save_htc_htt_config_endpoint() - save htt_tx_endpoint + * @hif_ctx: hif context + * @htc_htt_tx_endpoint: htt_tx_endpoint + * + * Return: void + */ +void hif_save_htc_htt_config_endpoint(struct hif_opaque_softc *hif_ctx, + int htc_htt_tx_endpoint) +{ + struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); + + if (!scn) { + HIF_ERROR("%s: error: scn or scn->hif_sc is NULL!", + __func__); + return; + } + + scn->htc_htt_tx_endpoint = htc_htt_tx_endpoint; +} +qdf_export_symbol(hif_save_htc_htt_config_endpoint); + +static const struct qwlan_hw qwlan_hw_list[] = { + { + .id = AR6320_REV1_VERSION, + .subid = 0, + .name = "QCA6174_REV1", + }, + { + .id = AR6320_REV1_1_VERSION, + .subid = 0x1, + .name = "QCA6174_REV1_1", + }, + { + .id = AR6320_REV1_3_VERSION, + .subid = 0x2, + .name = "QCA6174_REV1_3", + }, + { + .id = AR6320_REV2_1_VERSION, + .subid = 0x4, + .name = "QCA6174_REV2_1", + }, + { + .id = AR6320_REV2_1_VERSION, + .subid = 0x5, + .name = "QCA6174_REV2_2", + }, + { + .id = AR6320_REV3_VERSION, + .subid = 0x6, + .name = "QCA6174_REV2.3", + }, + { + .id = AR6320_REV3_VERSION, + .subid = 0x8, + .name = "QCA6174_REV3", + }, + { + .id = AR6320_REV3_VERSION, + .subid = 0x9, + .name = "QCA6174_REV3_1", + }, + { + .id = AR6320_REV3_2_VERSION, + .subid = 0xA, + .name = "AR6320_REV3_2_VERSION", + }, + { + .id = WCN3990_v1, + .subid = 0x0, + .name = "WCN3990_V1", + }, + { + .id = WCN3990_v2, + .subid = 0x0, + .name = "WCN3990_V2", + }, + { + .id = WCN3990_v2_1, + .subid = 0x0, + .name = "WCN3990_V2.1", + }, + { + .id = WCN3998, + .subid = 0x0, + .name = "WCN3998", + }, + { + .id = QCA9379_REV1_VERSION, + .subid = 0xC, + .name = "QCA9379_REV1", + }, + { + .id = QCA9379_REV1_VERSION, + .subid = 0xD, + .name = "QCA9379_REV1_1", + } +}; + +/** + * hif_get_hw_name(): get a human readable name for the hardware + * @info: Target Info + * + * Return: human readable name for the underlying wifi hardware. + */ +static const char *hif_get_hw_name(struct hif_target_info *info) +{ + int i; + + if (info->hw_name) + return info->hw_name; + + for (i = 0; i < ARRAY_SIZE(qwlan_hw_list); i++) { + if (info->target_version == qwlan_hw_list[i].id && + info->target_revision == qwlan_hw_list[i].subid) { + return qwlan_hw_list[i].name; + } + } + + info->hw_name = qdf_mem_malloc(64); + if (!info->hw_name) + return "Unknown Device (nomem)"; + + i = qdf_snprint(info->hw_name, 64, "HW_VERSION=%x.", + info->target_version); + if (i < 0) + return "Unknown Device (snprintf failure)"; + else + return info->hw_name; +} + +/** + * hif_get_hw_info(): hif_get_hw_info + * @scn: scn + * @version: version + * @revision: revision + * + * Return: n/a + */ +void hif_get_hw_info(struct hif_opaque_softc *scn, u32 *version, u32 *revision, + const char **target_name) +{ + struct hif_target_info *info = hif_get_target_info_handle(scn); + struct hif_softc *sc = HIF_GET_SOFTC(scn); + + if (sc->bus_type == QDF_BUS_TYPE_USB) + hif_usb_get_hw_info(sc); + + *version = info->target_version; + *revision = info->target_revision; + *target_name = hif_get_hw_name(info); +} + +/** + * hif_get_dev_ba(): API to get device base address. + * @scn: scn + * @version: version + * @revision: revision + * + * Return: n/a + */ +void *hif_get_dev_ba(struct hif_opaque_softc *hif_handle) +{ + struct hif_softc *scn = (struct hif_softc *)hif_handle; + + return scn->mem; +} +qdf_export_symbol(hif_get_dev_ba); +/** + * hif_open(): hif_open + * @qdf_ctx: QDF Context + * @mode: Driver Mode + * @bus_type: Bus Type + * @cbk: CDS Callbacks + * + * API to open HIF Context + * + * Return: HIF Opaque Pointer + */ +struct hif_opaque_softc *hif_open(qdf_device_t qdf_ctx, uint32_t mode, + enum qdf_bus_type bus_type, + struct hif_driver_state_callbacks *cbk) +{ + struct hif_softc *scn; + QDF_STATUS status = QDF_STATUS_SUCCESS; + int bus_context_size = hif_bus_get_context_size(bus_type); + + if (bus_context_size == 0) { + HIF_ERROR("%s: context size 0 not allowed", __func__); + return NULL; + } + + scn = (struct hif_softc *)qdf_mem_malloc(bus_context_size); + if (!scn) { + HIF_ERROR("%s: cannot alloc memory for HIF context of size:%d", + __func__, bus_context_size); + return GET_HIF_OPAQUE_HDL(scn); + } + + scn->qdf_dev = qdf_ctx; + scn->hif_con_param = mode; + qdf_atomic_init(&scn->active_tasklet_cnt); + qdf_atomic_init(&scn->active_grp_tasklet_cnt); + qdf_atomic_init(&scn->link_suspended); + qdf_atomic_init(&scn->tasklet_from_intr); + qdf_mem_copy(&scn->callbacks, cbk, + sizeof(struct hif_driver_state_callbacks)); + scn->bus_type = bus_type; + status = hif_bus_open(scn, bus_type); + if (status != QDF_STATUS_SUCCESS) { + HIF_ERROR("%s: hif_bus_open error = %d, bus_type = %d", + __func__, status, bus_type); + qdf_mem_free(scn); + scn = NULL; + } + + return GET_HIF_OPAQUE_HDL(scn); +} + +#ifdef ADRASTEA_RRI_ON_DDR +/** + * hif_uninit_rri_on_ddr(): free consistent memory allocated for rri + * @scn: hif context + * + * Return: none + */ +void hif_uninit_rri_on_ddr(struct hif_softc *scn) +{ + if (scn->vaddr_rri_on_ddr) + qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev, + (CE_COUNT * sizeof(uint32_t)), + scn->vaddr_rri_on_ddr, + scn->paddr_rri_on_ddr, 0); + scn->vaddr_rri_on_ddr = NULL; +} +#endif + +/** + * hif_close(): hif_close + * @hif_ctx: hif_ctx + * + * Return: n/a + */ +void hif_close(struct hif_opaque_softc *hif_ctx) +{ + struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); + + if (scn == NULL) { + HIF_ERROR("%s: hif_opaque_softc is NULL", __func__); + return; + } + + if (scn->athdiag_procfs_inited) { + athdiag_procfs_remove(); + scn->athdiag_procfs_inited = false; + } + + if (scn->target_info.hw_name) { + char *hw_name = scn->target_info.hw_name; + + scn->target_info.hw_name = "ErrUnloading"; + qdf_mem_free(hw_name); + } + + hif_uninit_rri_on_ddr(scn); + + hif_bus_close(scn); + qdf_mem_free(scn); +} + +#ifdef QCA_WIFI_QCA8074 +static QDF_STATUS hif_hal_attach(struct hif_softc *scn) +{ + if (ce_srng_based(scn)) { + scn->hal_soc = hal_attach(scn, scn->qdf_dev); + if (scn->hal_soc == NULL) + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS hif_hal_detach(struct hif_softc *scn) +{ + if (ce_srng_based(scn)) { + hal_detach(scn->hal_soc); + scn->hal_soc = NULL; + } + + return QDF_STATUS_SUCCESS; +} +#else +static QDF_STATUS hif_hal_attach(struct hif_softc *scn) +{ + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS hif_hal_detach(struct hif_softc *scn) +{ + return QDF_STATUS_SUCCESS; +} +#endif + +/** + * hif_enable(): hif_enable + * @hif_ctx: hif_ctx + * @dev: dev + * @bdev: bus dev + * @bid: bus ID + * @bus_type: bus type + * @type: enable type + * + * Return: QDF_STATUS + */ +QDF_STATUS hif_enable(struct hif_opaque_softc *hif_ctx, struct device *dev, + void *bdev, + const struct hif_bus_id *bid, + enum qdf_bus_type bus_type, + enum hif_enable_type type) +{ + QDF_STATUS status; + struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); + + if (scn == NULL) { + HIF_ERROR("%s: hif_ctx = NULL", __func__); + return QDF_STATUS_E_NULL_VALUE; + } + + status = hif_enable_bus(scn, dev, bdev, bid, type); + if (status != QDF_STATUS_SUCCESS) { + HIF_ERROR("%s: hif_enable_bus error = %d", + __func__, status); + return status; + } + + status = hif_hal_attach(scn); + if (status != QDF_STATUS_SUCCESS) { + HIF_ERROR("%s: hal attach failed", __func__); + goto disable_bus; + } + + if (hif_bus_configure(scn)) { + HIF_ERROR("%s: Target probe failed.", __func__); + status = QDF_STATUS_E_FAILURE; + goto hal_detach; + } + + hif_ut_suspend_init(scn); + + /* + * Flag to avoid potential unallocated memory access from MSI + * interrupt handler which could get scheduled as soon as MSI + * is enabled, i.e to take care of the race due to the order + * in where MSI is enabled before the memory, that will be + * in interrupt handlers, is allocated. + */ + + scn->hif_init_done = true; + + HIF_DBG("%s: OK", __func__); + + return QDF_STATUS_SUCCESS; + +hal_detach: + hif_hal_detach(scn); +disable_bus: + hif_disable_bus(scn); + return status; +} + +void hif_disable(struct hif_opaque_softc *hif_ctx, enum hif_disable_type type) +{ + struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); + + if (!scn) + return; + + hif_nointrs(scn); + if (scn->hif_init_done == false) + hif_shutdown_device(hif_ctx); + else + hif_stop(hif_ctx); + + hif_hal_detach(scn); + + hif_disable_bus(scn); + + hif_wlan_disable(scn); + + scn->notice_send = false; + + HIF_DBG("%s: X", __func__); +} + +void hif_display_stats(struct hif_opaque_softc *hif_ctx) +{ + hif_display_bus_stats(hif_ctx); +} + +void hif_clear_stats(struct hif_opaque_softc *hif_ctx) +{ + hif_clear_bus_stats(hif_ctx); +} + +/** + * hif_crash_shutdown_dump_bus_register() - dump bus registers + * @hif_ctx: hif_ctx + * + * Return: n/a + */ +#if defined(TARGET_RAMDUMP_AFTER_KERNEL_PANIC) \ +&& defined(DEBUG) + +static void hif_crash_shutdown_dump_bus_register(void *hif_ctx) +{ + struct hif_opaque_softc *scn = hif_ctx; + + if (hif_check_soc_status(scn)) + return; + + if (hif_dump_registers(scn)) + HIF_ERROR("Failed to dump bus registers!"); +} + +/** + * hif_crash_shutdown(): hif_crash_shutdown + * + * This function is called by the platform driver to dump CE registers + * + * @hif_ctx: hif_ctx + * + * Return: n/a + */ +void hif_crash_shutdown(struct hif_opaque_softc *hif_ctx) +{ + struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); + + if (!hif_ctx) + return; + + if (scn->bus_type == QDF_BUS_TYPE_SNOC) { + HIF_INFO_MED("%s: RAM dump disabled for bustype %d", + __func__, scn->bus_type); + return; + } + + if (TARGET_STATUS_RESET == scn->target_status) { + HIF_INFO_MED("%s: Target is already asserted, ignore!", + __func__); + return; + } + + if (hif_is_load_or_unload_in_progress(scn)) { + HIF_ERROR("%s: Load/unload is in progress, ignore!", __func__); + return; + } + + hif_crash_shutdown_dump_bus_register(hif_ctx); + + if (ol_copy_ramdump(hif_ctx)) + goto out; + + HIF_INFO_MED("%s: RAM dump collecting completed!", __func__); + +out: + return; +} +#else +void hif_crash_shutdown(struct hif_opaque_softc *hif_ctx) +{ + HIF_INFO_MED("%s: Collecting target RAM dump disabled", + __func__); +} +#endif /* TARGET_RAMDUMP_AFTER_KERNEL_PANIC */ + +#ifdef QCA_WIFI_3_0 +/** + * hif_check_fw_reg(): hif_check_fw_reg + * @scn: scn + * @state: + * + * Return: int + */ +int hif_check_fw_reg(struct hif_opaque_softc *scn) +{ + return 0; +} +#endif + +/** + * hif_read_phy_mem_base(): hif_read_phy_mem_base + * @scn: scn + * @phy_mem_base: physical mem base + * + * Return: n/a + */ +void hif_read_phy_mem_base(struct hif_softc *scn, qdf_dma_addr_t *phy_mem_base) +{ + *phy_mem_base = scn->mem_pa; +} +qdf_export_symbol(hif_read_phy_mem_base); + +/** + * hif_get_device_type(): hif_get_device_type + * @device_id: device_id + * @revision_id: revision_id + * @hif_type: returned hif_type + * @target_type: returned target_type + * + * Return: int + */ +int hif_get_device_type(uint32_t device_id, + uint32_t revision_id, + uint32_t *hif_type, uint32_t *target_type) +{ + int ret = 0; + + switch (device_id) { + case ADRASTEA_DEVICE_ID_P2_E12: + + *hif_type = HIF_TYPE_ADRASTEA; + *target_type = TARGET_TYPE_ADRASTEA; + break; + + case AR9888_DEVICE_ID: + *hif_type = HIF_TYPE_AR9888; + *target_type = TARGET_TYPE_AR9888; + break; + + case AR6320_DEVICE_ID: + switch (revision_id) { + case AR6320_FW_1_1: + case AR6320_FW_1_3: + *hif_type = HIF_TYPE_AR6320; + *target_type = TARGET_TYPE_AR6320; + break; + + case AR6320_FW_2_0: + case AR6320_FW_3_0: + case AR6320_FW_3_2: + *hif_type = HIF_TYPE_AR6320V2; + *target_type = TARGET_TYPE_AR6320V2; + break; + + default: + HIF_ERROR("%s: error - dev_id = 0x%x, rev_id = 0x%x", + __func__, device_id, revision_id); + ret = -ENODEV; + goto end; + } + break; + + case AR9887_DEVICE_ID: + *hif_type = HIF_TYPE_AR9888; + *target_type = TARGET_TYPE_AR9888; + HIF_INFO(" *********** AR9887 **************"); + break; + + case QCA9984_DEVICE_ID: + *hif_type = HIF_TYPE_QCA9984; + *target_type = TARGET_TYPE_QCA9984; + HIF_INFO(" *********** QCA9984 *************"); + break; + + case QCA9888_DEVICE_ID: + *hif_type = HIF_TYPE_QCA9888; + *target_type = TARGET_TYPE_QCA9888; + HIF_INFO(" *********** QCA9888 *************"); + break; + + case AR900B_DEVICE_ID: + *hif_type = HIF_TYPE_AR900B; + *target_type = TARGET_TYPE_AR900B; + HIF_INFO(" *********** AR900B *************"); + break; + + case IPQ4019_DEVICE_ID: + *hif_type = HIF_TYPE_IPQ4019; + *target_type = TARGET_TYPE_IPQ4019; + HIF_INFO(" *********** IPQ4019 *************"); + break; + + case QCA8074_DEVICE_ID: + case RUMIM2M_DEVICE_ID_NODE0: + case RUMIM2M_DEVICE_ID_NODE1: + case RUMIM2M_DEVICE_ID_NODE2: + case RUMIM2M_DEVICE_ID_NODE3: + *hif_type = HIF_TYPE_QCA8074; + *target_type = TARGET_TYPE_QCA8074; + HIF_INFO(" *********** QCA8074 *************\n"); + break; + + case QCA6290_EMULATION_DEVICE_ID: + case QCA6290_DEVICE_ID: + *hif_type = HIF_TYPE_QCA6290; + *target_type = TARGET_TYPE_QCA6290; + HIF_INFO(" *********** QCA6290EMU *************\n"); + break; + + default: + HIF_ERROR("%s: Unsupported device ID!", __func__); + ret = -ENODEV; + break; + } + + if (*target_type == TARGET_TYPE_UNKNOWN) { + HIF_ERROR("%s: Unsupported target_type!", __func__); + ret = -ENODEV; + } +end: + return ret; +} + +/** + * hif_get_bus_type() - return the bus type + * + * Return: enum qdf_bus_type + */ +enum qdf_bus_type hif_get_bus_type(struct hif_opaque_softc *hif_hdl) +{ + struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl); + + return scn->bus_type; +} + +/** + * Target info and ini parameters are global to the driver + * Hence these structures are exposed to all the modules in + * the driver and they don't need to maintains multiple copies + * of the same info, instead get the handle from hif and + * modify them in hif + */ + +/** + * hif_get_ini_handle() - API to get hif_config_param handle + * @hif_ctx: HIF Context + * + * Return: pointer to hif_config_info + */ +struct hif_config_info *hif_get_ini_handle(struct hif_opaque_softc *hif_ctx) +{ + struct hif_softc *sc = HIF_GET_SOFTC(hif_ctx); + + return &sc->hif_config; +} + +/** + * hif_get_target_info_handle() - API to get hif_target_info handle + * @hif_ctx: HIF context + * + * Return: Pointer to hif_target_info + */ +struct hif_target_info *hif_get_target_info_handle( + struct hif_opaque_softc *hif_ctx) +{ + struct hif_softc *sc = HIF_GET_SOFTC(hif_ctx); + + return &sc->target_info; + +} +qdf_export_symbol(hif_get_target_info_handle); + +#ifdef RECEIVE_OFFLOAD +void hif_offld_flush_cb_register(struct hif_opaque_softc *scn, + void (offld_flush_handler)(void *)) +{ + if (hif_napi_enabled(scn, -1)) + hif_napi_rx_offld_flush_cb_register(scn, offld_flush_handler); + else + HIF_ERROR("NAPI not enabled\n"); +} +qdf_export_symbol(hif_offld_flush_cb_register); + +void hif_offld_flush_cb_deregister(struct hif_opaque_softc *scn) +{ + if (hif_napi_enabled(scn, -1)) + hif_napi_rx_offld_flush_cb_deregister(scn); + else + HIF_ERROR("NAPI not enabled\n"); +} +qdf_export_symbol(hif_offld_flush_cb_deregister); + +int hif_get_rx_ctx_id(int ctx_id, struct hif_opaque_softc *hif_hdl) +{ + if (hif_napi_enabled(hif_hdl, -1)) + return NAPI_PIPE2ID(ctx_id); + else + return ctx_id; +} +#else /* RECEIVE_OFFLOAD */ +int hif_get_rx_ctx_id(int ctx_id, struct hif_opaque_softc *hif_hdl) +{ + return 0; +} +#endif /* RECEIVE_OFFLOAD */ + +#if defined(FEATURE_LRO) + +/** + * hif_get_lro_info - Returns LRO instance for instance ID + * @ctx_id: LRO instance ID + * @hif_hdl: HIF Context + * + * Return: Pointer to LRO instance. + */ +void *hif_get_lro_info(int ctx_id, struct hif_opaque_softc *hif_hdl) +{ + void *data; + + if (hif_napi_enabled(hif_hdl, -1)) + data = hif_napi_get_lro_info(hif_hdl, ctx_id); + else + data = hif_ce_get_lro_ctx(hif_hdl, ctx_id); + + return data; +} +#endif + +/** + * hif_get_target_status - API to get target status + * @hif_ctx: HIF Context + * + * Return: enum hif_target_status + */ +enum hif_target_status hif_get_target_status(struct hif_opaque_softc *hif_ctx) +{ + struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); + + return scn->target_status; +} +qdf_export_symbol(hif_get_target_status); + +/** + * hif_set_target_status() - API to set target status + * @hif_ctx: HIF Context + * @status: Target Status + * + * Return: void + */ +void hif_set_target_status(struct hif_opaque_softc *hif_ctx, enum + hif_target_status status) +{ + struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); + + scn->target_status = status; +} + +/** + * hif_init_ini_config() - API to initialize HIF configuration parameters + * @hif_ctx: HIF Context + * @cfg: HIF Configuration + * + * Return: void + */ +void hif_init_ini_config(struct hif_opaque_softc *hif_ctx, + struct hif_config_info *cfg) +{ + struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); + + qdf_mem_copy(&scn->hif_config, cfg, sizeof(struct hif_config_info)); +} + +/** + * hif_get_conparam() - API to get driver mode in HIF + * @scn: HIF Context + * + * Return: driver mode of operation + */ +uint32_t hif_get_conparam(struct hif_softc *scn) +{ + if (!scn) + return 0; + + return scn->hif_con_param; +} + +/** + * hif_get_callbacks_handle() - API to get callbacks Handle + * @scn: HIF Context + * + * Return: pointer to HIF Callbacks + */ +struct hif_driver_state_callbacks *hif_get_callbacks_handle( + struct hif_softc *scn) +{ + return &scn->callbacks; +} + +/** + * hif_is_driver_unloading() - API to query upper layers if driver is unloading + * @scn: HIF Context + * + * Return: True/False + */ +bool hif_is_driver_unloading(struct hif_softc *scn) +{ + struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn); + + if (cbk && cbk->is_driver_unloading) + return cbk->is_driver_unloading(cbk->context); + + return false; +} + +/** + * hif_is_load_or_unload_in_progress() - API to query upper layers if + * load/unload in progress + * @scn: HIF Context + * + * Return: True/False + */ +bool hif_is_load_or_unload_in_progress(struct hif_softc *scn) +{ + struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn); + + if (cbk && cbk->is_load_unload_in_progress) + return cbk->is_load_unload_in_progress(cbk->context); + + return false; +} + +/** + * hif_is_recovery_in_progress() - API to query upper layers if recovery in + * progress + * @scn: HIF Context + * + * Return: True/False + */ +bool hif_is_recovery_in_progress(struct hif_softc *scn) +{ + struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn); + + if (cbk && cbk->is_recovery_in_progress) + return cbk->is_recovery_in_progress(cbk->context); + + return false; +} + +#if defined(HIF_PCI) || defined(HIF_SNOC) || defined(HIF_AHB) + +/** + * hif_update_pipe_callback() - API to register pipe specific callbacks + * @osc: Opaque softc + * @pipeid: pipe id + * @callbacks: callbacks to register + * + * Return: void + */ + +void hif_update_pipe_callback(struct hif_opaque_softc *osc, + u_int8_t pipeid, + struct hif_msg_callbacks *callbacks) +{ + struct hif_softc *scn = HIF_GET_SOFTC(osc); + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); + struct HIF_CE_pipe_info *pipe_info; + + QDF_BUG(pipeid < CE_COUNT_MAX); + + HIF_INFO_LO("+%s pipeid %d\n", __func__, pipeid); + + pipe_info = &hif_state->pipe_info[pipeid]; + + qdf_mem_copy(&pipe_info->pipe_callbacks, + callbacks, sizeof(pipe_info->pipe_callbacks)); + + HIF_INFO_LO("-%s\n", __func__); +} +qdf_export_symbol(hif_update_pipe_callback); + +/** + * hif_is_target_ready() - API to query if target is in ready state + * progress + * @scn: HIF Context + * + * Return: True/False + */ +bool hif_is_target_ready(struct hif_softc *scn) +{ + struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn); + + if (cbk && cbk->is_target_ready) + return cbk->is_target_ready(cbk->context); + + return false; +} + +/** + * hif_batch_send() - API to access hif specific function + * ce_batch_send. + * @osc: HIF Context + * @msdu : list of msdus to be sent + * @transfer_id : transfer id + * @len : donwloaded length + * + * Return: list of msds not sent + */ +qdf_nbuf_t hif_batch_send(struct hif_opaque_softc *osc, qdf_nbuf_t msdu, + uint32_t transfer_id, u_int32_t len, uint32_t sendhead) +{ + void *ce_tx_hdl = hif_get_ce_handle(osc, CE_HTT_TX_CE); + + return ce_batch_send((struct CE_handle *)ce_tx_hdl, msdu, transfer_id, + len, sendhead); +} +qdf_export_symbol(hif_batch_send); + +/** + * hif_update_tx_ring() - API to access hif specific function + * ce_update_tx_ring. + * @osc: HIF Context + * @num_htt_cmpls : number of htt compl received. + * + * Return: void + */ +void hif_update_tx_ring(struct hif_opaque_softc *osc, u_int32_t num_htt_cmpls) +{ + void *ce_tx_hdl = hif_get_ce_handle(osc, CE_HTT_TX_CE); + + ce_update_tx_ring(ce_tx_hdl, num_htt_cmpls); +} +qdf_export_symbol(hif_update_tx_ring); + + +/** + * hif_send_single() - API to access hif specific function + * ce_send_single. + * @osc: HIF Context + * @msdu : msdu to be sent + * @transfer_id: transfer id + * @len : downloaded length + * + * Return: msdu sent status + */ +int hif_send_single(struct hif_opaque_softc *osc, qdf_nbuf_t msdu, uint32_t + transfer_id, u_int32_t len) +{ + void *ce_tx_hdl = hif_get_ce_handle(osc, CE_HTT_TX_CE); + + return ce_send_single((struct CE_handle *)ce_tx_hdl, msdu, transfer_id, + len); +} +qdf_export_symbol(hif_send_single); + +#ifdef WLAN_FEATURE_FASTPATH +/** + * hif_send_fast() - API to access hif specific function + * ce_send_fast. + * @osc: HIF Context + * @msdu : array of msdus to be sent + * @num_msdus : number of msdus in an array + * @transfer_id: transfer id + * @download_len: download length + * + * Return: No. of packets that could be sent + */ +int hif_send_fast(struct hif_opaque_softc *osc, qdf_nbuf_t nbuf, + uint32_t transfer_id, uint32_t download_len) +{ + void *ce_tx_hdl = hif_get_ce_handle(osc, CE_HTT_TX_CE); + + return ce_send_fast((struct CE_handle *)ce_tx_hdl, nbuf, + transfer_id, download_len); +} +qdf_export_symbol(hif_send_fast); +#endif +#endif + +/** + * hif_reg_write() - API to access hif specific function + * hif_write32_mb. + * @hif_ctx : HIF Context + * @offset : offset on which value has to be written + * @value : value to be written + * + * Return: None + */ +void hif_reg_write(struct hif_opaque_softc *hif_ctx, uint32_t offset, + uint32_t value) +{ + struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); + + hif_write32_mb(scn->mem + offset, value); + +} +qdf_export_symbol(hif_reg_write); + +/** + * hif_reg_read() - API to access hif specific function + * hif_read32_mb. + * @hif_ctx : HIF Context + * @offset : offset from which value has to be read + * + * Return: Read value + */ +uint32_t hif_reg_read(struct hif_opaque_softc *hif_ctx, uint32_t offset) +{ + + struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); + + return hif_read32_mb(scn->mem + offset); +} +qdf_export_symbol(hif_reg_read); + +/** + * hif_ramdump_handler(): generic ramdump handler + * @scn: struct hif_opaque_softc + * + * Return: None + */ +void hif_ramdump_handler(struct hif_opaque_softc *scn) +{ + if (hif_get_bus_type(scn) == QDF_BUS_TYPE_USB) + hif_usb_ramdump_handler(scn); +} + +#ifdef WLAN_SUSPEND_RESUME_TEST +irqreturn_t hif_wake_interrupt_handler(int irq, void *context) +{ + struct hif_softc *scn = context; + + HIF_INFO("wake interrupt received on irq %d", irq); + + if (scn->initial_wakeup_cb) + scn->initial_wakeup_cb(scn->initial_wakeup_priv); + + if (hif_is_ut_suspended(scn)) + hif_ut_fw_resume(scn); + + return IRQ_HANDLED; +} +#else /* WLAN_SUSPEND_RESUME_TEST */ +irqreturn_t hif_wake_interrupt_handler(int irq, void *context) +{ + struct hif_softc *scn = context; + + HIF_INFO("wake interrupt received on irq %d", irq); + + if (scn->initial_wakeup_cb) + scn->initial_wakeup_cb(scn->initial_wakeup_priv); + + return IRQ_HANDLED; +} +#endif /* WLAN_SUSPEND_RESUME_TEST */ + +void hif_set_initial_wakeup_cb(struct hif_opaque_softc *hif_ctx, + void (*callback)(void *), + void *priv) +{ + struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); + + scn->initial_wakeup_cb = callback; + scn->initial_wakeup_priv = priv; +} + +void hif_set_ce_service_max_yield_time(struct hif_opaque_softc *hif, + uint32_t ce_service_max_yield_time) +{ + struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif); + + hif_ctx->ce_service_max_yield_time = + ce_service_max_yield_time * 1000; +} + +unsigned long long +hif_get_ce_service_max_yield_time(struct hif_opaque_softc *hif) +{ + struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif); + + return hif_ctx->ce_service_max_yield_time; +} + +void hif_set_ce_service_max_rx_ind_flush(struct hif_opaque_softc *hif, + uint8_t ce_service_max_rx_ind_flush) +{ + struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif); + + if (ce_service_max_rx_ind_flush == 0 || + ce_service_max_rx_ind_flush > MSG_FLUSH_NUM) + hif_ctx->ce_service_max_rx_ind_flush = MSG_FLUSH_NUM; + else + hif_ctx->ce_service_max_rx_ind_flush = + ce_service_max_rx_ind_flush; +} diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/hif_main.h b/drivers/staging/qca-wifi-host-cmn/hif/src/hif_main.h new file mode 100644 index 0000000000000000000000000000000000000000..3685df1d09970ed5a137160e73339a3218002f8c --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/hif_main.h @@ -0,0 +1,317 @@ +/* + * Copyright (c) 2013-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/* + * NB: Inappropriate references to "HTC" are used in this (and other) + * HIF implementations. HTC is typically the calling layer, but it + * theoretically could be some alternative. + */ + +/* + * This holds all state needed to process a pending send/recv interrupt. + * The information is saved here as soon as the interrupt occurs (thus + * allowing the underlying CE to re-use the ring descriptor). The + * information here is eventually processed by a completion processing + * thread. + */ + +#ifndef __HIF_MAIN_H__ +#define __HIF_MAIN_H__ + +#include /* qdf_atomic_read */ +#include "qdf_lock.h" +#include "cepci.h" +#include "hif.h" +#include "multibus.h" +#include "hif_unit_test_suspend_i.h" + +#define HIF_MIN_SLEEP_INACTIVITY_TIME_MS 50 +#define HIF_SLEEP_INACTIVITY_TIMER_PERIOD_MS 60 + +#define HIF_MAX_BUDGET 0xFFFF + +/* + * This macro implementation is exposed for efficiency only. + * The implementation may change and callers should + * consider the targid to be a completely opaque handle. + */ +#define TARGID_TO_PCI_ADDR(targid) (*((A_target_id_t *)(targid))) + +#ifdef QCA_WIFI_3_0 +#define DISABLE_L1SS_STATES 1 +#endif + +#define MAX_NUM_OF_RECEIVES HIF_NAPI_MAX_RECEIVES + +#ifdef QCA_WIFI_3_0_ADRASTEA +#define ADRASTEA_BU 1 +#else +#define ADRASTEA_BU 0 +#endif + +#ifdef QCA_WIFI_3_0 +#define HAS_FW_INDICATOR 0 +#else +#define HAS_FW_INDICATOR 1 +#endif + + +#define AR9888_DEVICE_ID (0x003c) +#define AR6320_DEVICE_ID (0x003e) +#define AR6320_FW_1_1 (0x11) +#define AR6320_FW_1_3 (0x13) +#define AR6320_FW_2_0 (0x20) +#define AR6320_FW_3_0 (0x30) +#define AR6320_FW_3_2 (0x32) +#define QCA6290_EMULATION_DEVICE_ID (0xabcd) +#define QCA6290_DEVICE_ID (0x1100) +#define ADRASTEA_DEVICE_ID_P2_E12 (0x7021) +#define AR9887_DEVICE_ID (0x0050) +#define AR900B_DEVICE_ID (0x0040) +#define QCA9984_DEVICE_ID (0x0046) +#define QCA9888_DEVICE_ID (0x0056) +#ifndef IPQ4019_DEVICE_ID +#define IPQ4019_DEVICE_ID (0x12ef) +#endif +#define QCA8074_DEVICE_ID (0xffff) /* Todo: replace this with + actual number once available. + currently defining this to 0xffff for + emulation purpose */ +#define RUMIM2M_DEVICE_ID_NODE0 0xabc0 +#define RUMIM2M_DEVICE_ID_NODE1 0xabc1 +#define RUMIM2M_DEVICE_ID_NODE2 0xabc2 +#define RUMIM2M_DEVICE_ID_NODE3 0xabc3 + +#define HIF_GET_PCI_SOFTC(scn) ((struct hif_pci_softc *)scn) +#define HIF_GET_CE_STATE(scn) ((struct HIF_CE_state *)scn) +#define HIF_GET_SDIO_SOFTC(scn) ((struct hif_sdio_softc *)scn) +#define HIF_GET_USB_SOFTC(scn) ((struct hif_usb_softc *)scn) +#define HIF_GET_USB_DEVICE(scn) ((struct HIF_DEVICE_USB *)scn) +#define HIF_GET_SOFTC(scn) ((struct hif_softc *)scn) +#define GET_HIF_OPAQUE_HDL(scn) ((struct hif_opaque_softc *)scn) + +struct hif_ce_stats { + int hif_pipe_no_resrc_count; + int ce_ring_delta_fail_count; +}; + +/* + * Note: For MCL, #if defined (HIF_CONFIG_SLUB_DEBUG_ON) needs to be checked + * for defined here + */ +#if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) +struct ce_desc_hist { + qdf_atomic_t history_index[CE_COUNT_MAX]; + uint32_t enable[CE_COUNT_MAX]; + uint32_t data_enable[CE_COUNT_MAX]; + uint32_t hist_index; + uint32_t hist_id; + void *hist_ev[CE_COUNT_MAX]; +}; +#endif /* #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || HIF_CE_DEBUG_DATA_BUF */ + +struct hif_softc { + struct hif_opaque_softc osc; + struct hif_config_info hif_config; + struct hif_target_info target_info; + void __iomem *mem; + enum qdf_bus_type bus_type; + struct hif_bus_ops bus_ops; + void *ce_id_to_state[CE_COUNT_MAX]; + qdf_device_t qdf_dev; + bool hif_init_done; + bool request_irq_done; + bool ext_grp_irq_configured; + /* Packet statistics */ + struct hif_ce_stats pkt_stats; + enum hif_target_status target_status; + + struct targetdef_s *targetdef; + struct ce_reg_def *target_ce_def; + struct hostdef_s *hostdef; + struct host_shadow_regs_s *host_shadow_regs; + + bool recovery; + bool notice_send; + bool per_ce_irq; + uint32_t ce_irq_summary; + /* No of copy engines supported */ + unsigned int ce_count; + atomic_t active_tasklet_cnt; + atomic_t active_grp_tasklet_cnt; + atomic_t link_suspended; + uint32_t *vaddr_rri_on_ddr; + qdf_dma_addr_t paddr_rri_on_ddr; + int linkstate_vote; + bool fastpath_mode_on; + bool polled_mode_on; + atomic_t tasklet_from_intr; + int htc_htt_tx_endpoint; + qdf_dma_addr_t mem_pa; + bool athdiag_procfs_inited; +#ifdef FEATURE_NAPI + struct qca_napi_data napi_data; +#endif /* FEATURE_NAPI */ + /* stores ce_service_max_yield_time in ns */ + unsigned long long ce_service_max_yield_time; + uint8_t ce_service_max_rx_ind_flush; + struct hif_driver_state_callbacks callbacks; + uint32_t hif_con_param; +#ifdef QCA_NSS_WIFI_OFFLOAD_SUPPORT + uint32_t nss_wifi_ol_mode; +#endif + void *hal_soc; + struct hif_ut_suspend_context ut_suspend_ctx; + uint32_t hif_attribute; + int wake_irq; + void (*initial_wakeup_cb)(void *); + void *initial_wakeup_priv; +#ifdef REMOVE_PKT_LOG + /* Handle to pktlog device */ + void *pktlog_dev; +#endif + +/* + * Note: For MCL, #if defined (HIF_CONFIG_SLUB_DEBUG_ON) needs to be checked + * for defined here + */ +#if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) + struct ce_desc_hist hif_ce_desc_hist; +#endif /* #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || HIF_CE_DEBUG_DATA_BUF */ +#ifdef IPA_OFFLOAD + qdf_shared_mem_t *ipa_ce_ring; +#endif +}; + +static inline void *hif_get_hal_handle(void *hif_hdl) +{ + struct hif_softc *sc = (struct hif_softc *)hif_hdl; + + if (!sc) + return NULL; + + return sc->hal_soc; +} + +#ifdef QCA_NSS_WIFI_OFFLOAD_SUPPORT +static inline bool hif_is_nss_wifi_enabled(struct hif_softc *sc) +{ + return !!(sc->nss_wifi_ol_mode); +} +#else +static inline bool hif_is_nss_wifi_enabled(struct hif_softc *sc) +{ + return false; +} +#endif + +static inline uint8_t hif_is_attribute_set(struct hif_softc *sc, + uint32_t hif_attrib) +{ + return sc->hif_attribute == hif_attrib; +} + +A_target_id_t hif_get_target_id(struct hif_softc *scn); +void hif_dump_pipe_debug_count(struct hif_softc *scn); +void hif_display_bus_stats(struct hif_opaque_softc *scn); +void hif_clear_bus_stats(struct hif_opaque_softc *scn); +bool hif_max_num_receives_reached(struct hif_softc *scn, unsigned int count); +void hif_shutdown_device(struct hif_opaque_softc *hif_ctx); +int hif_bus_configure(struct hif_softc *scn); +void hif_cancel_deferred_target_sleep(struct hif_softc *scn); +int hif_config_ce(struct hif_softc *scn); +void hif_unconfig_ce(struct hif_softc *scn); +void hif_ce_prepare_config(struct hif_softc *scn); +QDF_STATUS hif_ce_open(struct hif_softc *scn); +void hif_ce_close(struct hif_softc *scn); +int athdiag_procfs_init(void *scn); +void athdiag_procfs_remove(void); +/* routine to modify the initial buffer count to be allocated on an os + * platform basis. Platform owner will need to modify this as needed + */ +qdf_size_t init_buffer_count(qdf_size_t maxSize); + +irqreturn_t hif_fw_interrupt_handler(int irq, void *arg); +int hif_get_device_type(uint32_t device_id, + uint32_t revision_id, + uint32_t *hif_type, uint32_t *target_type); +/*These functions are exposed to HDD*/ +void hif_nointrs(struct hif_softc *scn); +void hif_bus_close(struct hif_softc *ol_sc); +QDF_STATUS hif_bus_open(struct hif_softc *ol_sc, + enum qdf_bus_type bus_type); +QDF_STATUS hif_enable_bus(struct hif_softc *ol_sc, struct device *dev, + void *bdev, const struct hif_bus_id *bid, enum hif_enable_type type); +void hif_disable_bus(struct hif_softc *scn); +void hif_bus_prevent_linkdown(struct hif_softc *scn, bool flag); +int hif_bus_get_context_size(enum qdf_bus_type bus_type); +void hif_read_phy_mem_base(struct hif_softc *scn, qdf_dma_addr_t *bar_value); +uint32_t hif_get_conparam(struct hif_softc *scn); +struct hif_driver_state_callbacks *hif_get_callbacks_handle( + struct hif_softc *scn); +bool hif_is_driver_unloading(struct hif_softc *scn); +bool hif_is_load_or_unload_in_progress(struct hif_softc *scn); +bool hif_is_recovery_in_progress(struct hif_softc *scn); +bool hif_is_target_ready(struct hif_softc *scn); +void hif_wlan_disable(struct hif_softc *scn); +int hif_target_sleep_state_adjust(struct hif_softc *scn, + bool sleep_ok, + bool wait_for_it); +/** + * hif_get_rx_ctx_id() - Returns NAPI instance ID based on CE ID + * @ctx_id: Rx CE context ID + * @hif_hdl: HIF Context + * + * Return: Rx instance ID + */ +int hif_get_rx_ctx_id(int ctx_id, struct hif_opaque_softc *hif_hdl); +void hif_ramdump_handler(struct hif_opaque_softc *scn); +#ifdef HIF_USB +void hif_usb_get_hw_info(struct hif_softc *scn); +void hif_usb_ramdump_handler(struct hif_opaque_softc *scn); +#else +static inline void hif_usb_get_hw_info(struct hif_softc *scn) {} +static inline void hif_usb_ramdump_handler(struct hif_opaque_softc *scn) {} +#endif + +/** + * hif_wake_interrupt_handler() - interrupt handler for standalone wake irq + * @irq: the irq number that fired + * @context: the opaque pointer passed to request_irq() + * + * Return: an irq return type + */ +irqreturn_t hif_wake_interrupt_handler(int irq, void *context); + +#ifdef HIF_SNOC +bool hif_is_target_register_access_allowed(struct hif_softc *hif_sc); +#else +static inline +bool hif_is_target_register_access_allowed(struct hif_softc *hif_sc) +{ + return true; +} +#endif + +#ifdef ADRASTEA_RRI_ON_DDR +void hif_uninit_rri_on_ddr(struct hif_softc *scn); +#else +static inline +void hif_uninit_rri_on_ddr(struct hif_softc *scn) {} +#endif +#endif /* __HIF_MAIN_H__ */ diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/hif_napi.c b/drivers/staging/qca-wifi-host-cmn/hif/src/hif_napi.c new file mode 100644 index 0000000000000000000000000000000000000000..c7e5c9626c391f0e2bdf427d34461f73712af331 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/hif_napi.c @@ -0,0 +1,1766 @@ +/* + * Copyright (c) 2015-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: hif_napi.c + * + * HIF NAPI interface implementation + */ + +#include /* memset */ + +/* Linux headers */ +#include +#include +#include +#include +#include +#include +#ifdef CONFIG_SCHED_CORE_CTL +#include +#endif +#include +#include + +/* Driver headers */ +#include +#include +#include +#include +#include +#include +#include "qdf_cpuhp.h" +#include "qdf_module.h" + +enum napi_decision_vector { + HIF_NAPI_NOEVENT = 0, + HIF_NAPI_INITED = 1, + HIF_NAPI_CONF_UP = 2 +}; +#define ENABLE_NAPI_MASK (HIF_NAPI_INITED | HIF_NAPI_CONF_UP) + +#ifdef RECEIVE_OFFLOAD +/** + * hif_rxthread_napi_poll() - dummy napi poll for rx_thread NAPI + * @napi: Rx_thread NAPI + * @budget: NAPI BUDGET + * + * Return: 0 as it is not supposed to be polled at all as it is not scheduled. + */ +static int hif_rxthread_napi_poll(struct napi_struct *napi, int budget) +{ + HIF_ERROR("This napi_poll should not be polled as we don't schedule it"); + QDF_ASSERT(0); + return 0; +} + +/** + * hif_init_rx_thread_napi() - Initialize dummy Rx_thread NAPI + * @napii: Handle to napi_info holding rx_thread napi + * + * Return: None + */ +static void hif_init_rx_thread_napi(struct qca_napi_info *napii) +{ + init_dummy_netdev(&napii->rx_thread_netdev); + netif_napi_add(&napii->rx_thread_netdev, &napii->rx_thread_napi, + hif_rxthread_napi_poll, 64); + napi_enable(&napii->rx_thread_napi); +} + +/** + * hif_deinit_rx_thread_napi() - Deinitialize dummy Rx_thread NAPI + * @napii: Handle to napi_info holding rx_thread napi + * + * Return: None + */ +static void hif_deinit_rx_thread_napi(struct qca_napi_info *napii) +{ + netif_napi_del(&napii->rx_thread_napi); +} +#else /* RECEIVE_OFFLOAD */ +static void hif_init_rx_thread_napi(struct qca_napi_info *napii) +{ +} + +static void hif_deinit_rx_thread_napi(struct qca_napi_info *napii) +{ +} +#endif + +/** + * hif_napi_create() - creates the NAPI structures for a given CE + * @hif : pointer to hif context + * @pipe_id: the CE id on which the instance will be created + * @poll : poll function to be used for this NAPI instance + * @budget : budget to be registered with the NAPI instance + * @scale : scale factor on the weight (to scaler budget to 1000) + * @flags : feature flags + * + * Description: + * Creates NAPI instances. This function is called + * unconditionally during initialization. It creates + * napi structures through the proper HTC/HIF calls. + * The structures are disabled on creation. + * Note that for each NAPI instance a separate dummy netdev is used + * + * Return: + * < 0: error + * = 0: + * > 0: id of the created object (for multi-NAPI, number of objects created) + */ +int hif_napi_create(struct hif_opaque_softc *hif_ctx, + int (*poll)(struct napi_struct *, int), + int budget, + int scale, + uint8_t flags) +{ + int i; + struct qca_napi_data *napid; + struct qca_napi_info *napii; + struct CE_state *ce_state; + struct hif_softc *hif = HIF_GET_SOFTC(hif_ctx); + int rc = 0; + + NAPI_DEBUG("-->(budget=%d, scale=%d)", + budget, scale); + NAPI_DEBUG("hif->napi_data.state = 0x%08x", + hif->napi_data.state); + NAPI_DEBUG("hif->napi_data.ce_map = 0x%08x", + hif->napi_data.ce_map); + + napid = &(hif->napi_data); + if (0 == (napid->state & HIF_NAPI_INITED)) { + memset(napid, 0, sizeof(struct qca_napi_data)); + qdf_spinlock_create(&(napid->lock)); + + napid->state |= HIF_NAPI_INITED; + napid->flags = flags; + + rc = hif_napi_cpu_init(hif_ctx); + if (rc != 0 && rc != -EALREADY) { + HIF_ERROR("NAPI_initialization failed,. %d", rc); + rc = napid->ce_map; + goto hnc_err; + } else + rc = 0; + + HIF_DBG("%s: NAPI structures initialized, rc=%d", + __func__, rc); + } + for (i = 0; i < hif->ce_count; i++) { + ce_state = hif->ce_id_to_state[i]; + NAPI_DEBUG("ce %d: htt_rx=%d htt_tx=%d", + i, ce_state->htt_rx_data, + ce_state->htt_tx_data); + if (ce_srng_based(hif)) + continue; + + if (!ce_state->htt_rx_data) + continue; + + /* Now this is a CE where we need NAPI on */ + NAPI_DEBUG("Creating NAPI on pipe %d", i); + napii = qdf_mem_malloc(sizeof(*napii)); + napid->napis[i] = napii; + if (!napii) { + NAPI_DEBUG("NAPI alloc failure %d", i); + rc = -ENOMEM; + goto napii_alloc_failure; + } + } + + for (i = 0; i < hif->ce_count; i++) { + napii = napid->napis[i]; + if (!napii) + continue; + + NAPI_DEBUG("initializing NAPI for pipe %d", i); + memset(napii, 0, sizeof(struct qca_napi_info)); + napii->scale = scale; + napii->id = NAPI_PIPE2ID(i); + napii->hif_ctx = hif_ctx; + napii->irq = pld_get_irq(hif->qdf_dev->dev, i); + + if (napii->irq < 0) + HIF_WARN("%s: bad IRQ value for CE %d: %d", + __func__, i, napii->irq); + + init_dummy_netdev(&(napii->netdev)); + + NAPI_DEBUG("adding napi=%pK to netdev=%pK (poll=%pK, bdgt=%d)", + &(napii->napi), &(napii->netdev), poll, budget); + netif_napi_add(&(napii->netdev), &(napii->napi), poll, budget); + + NAPI_DEBUG("after napi_add"); + NAPI_DEBUG("napi=0x%pK, netdev=0x%pK", + &(napii->napi), &(napii->netdev)); + NAPI_DEBUG("napi.dev_list.prev=0x%pK, next=0x%pK", + napii->napi.dev_list.prev, + napii->napi.dev_list.next); + NAPI_DEBUG("dev.napi_list.prev=0x%pK, next=0x%pK", + napii->netdev.napi_list.prev, + napii->netdev.napi_list.next); + + hif_init_rx_thread_napi(napii); + napii->lro_ctx = qdf_lro_init(); + NAPI_DEBUG("Registering LRO for ce_id %d NAPI callback for %d lro_ctx %pK\n", + i, napii->id, napii->lro_ctx); + + /* It is OK to change the state variable below without + * protection as there should be no-one around yet + */ + napid->ce_map |= (0x01 << i); + HIF_DBG("%s: NAPI id %d created for pipe %d", __func__, + napii->id, i); + } + + /* no ces registered with the napi */ + if (!ce_srng_based(hif) && napid->ce_map == 0) { + HIF_WARN("%s: no napis created for copy engines", __func__); + return -EFAULT; + } + + NAPI_DEBUG("napi map = %x", napid->ce_map); + NAPI_DEBUG("NAPI ids created for all applicable pipes"); + return napid->ce_map; + +napii_alloc_failure: + for (i = 0; i < hif->ce_count; i++) { + napii = napid->napis[i]; + napid->napis[i] = NULL; + if (napii) + qdf_mem_free(napii); + } + +hnc_err: + NAPI_DEBUG("<--napi_instances_map=%x]", napid->ce_map); + return rc; +} +qdf_export_symbol(hif_napi_create); + +#ifdef RECEIVE_OFFLOAD +void hif_napi_rx_offld_flush_cb_register(struct hif_opaque_softc *hif_hdl, + void (offld_flush_handler)(void *)) +{ + int i; + struct CE_state *ce_state; + struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl); + struct qca_napi_data *napid; + struct qca_napi_info *napii; + + if (!scn) { + HIF_ERROR("%s: hif_state NULL!", __func__); + QDF_ASSERT(0); + return; + } + + napid = hif_napi_get_all(hif_hdl); + for (i = 0; i < scn->ce_count; i++) { + ce_state = scn->ce_id_to_state[i]; + if (ce_state && (ce_state->htt_rx_data)) { + napii = napid->napis[i]; + napii->offld_flush_cb = offld_flush_handler; + HIF_DBG("Registering offload for ce_id %d NAPI callback for %d flush_cb %pK\n", + i, napii->id, napii->offld_flush_cb); + } + } +} + +void hif_napi_rx_offld_flush_cb_deregister(struct hif_opaque_softc *hif_hdl) +{ + int i; + struct CE_state *ce_state; + struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl); + struct qca_napi_data *napid; + struct qca_napi_info *napii; + + if (!scn) { + HIF_ERROR("%s: hif_state NULL!", __func__); + QDF_ASSERT(0); + return; + } + + napid = hif_napi_get_all(hif_hdl); + for (i = 0; i < scn->ce_count; i++) { + ce_state = scn->ce_id_to_state[i]; + if (ce_state && (ce_state->htt_rx_data)) { + napii = napid->napis[i]; + HIF_DBG("deRegistering offld for ce_id %d NAPI callback for %d flush_cb %pK\n", + i, napii->id, napii->offld_flush_cb); + /* Not required */ + napii->offld_flush_cb = NULL; + } + } +} +#endif /* RECEIVE_OFFLOAD */ + +/** + * + * hif_napi_destroy() - destroys the NAPI structures for a given instance + * @hif : pointer to hif context + * @ce_id : the CE id whose napi instance will be destroyed + * @force : if set, will destroy even if entry is active (de-activates) + * + * Description: + * Destroy a given NAPI instance. This function is called + * unconditionally during cleanup. + * Refuses to destroy an entry of it is still enabled (unless force=1) + * Marks the whole napi_data invalid if all instances are destroyed. + * + * Return: + * -EINVAL: specific entry has not been created + * -EPERM : specific entry is still active + * 0 < : error + * 0 = : success + */ +int hif_napi_destroy(struct hif_opaque_softc *hif_ctx, + uint8_t id, + int force) +{ + uint8_t ce = NAPI_ID2PIPE(id); + int rc = 0; + struct hif_softc *hif = HIF_GET_SOFTC(hif_ctx); + + NAPI_DEBUG("-->(id=%d, force=%d)", id, force); + + if (0 == (hif->napi_data.state & HIF_NAPI_INITED)) { + HIF_ERROR("%s: NAPI not initialized or entry %d not created", + __func__, id); + rc = -EINVAL; + } else if (0 == (hif->napi_data.ce_map & (0x01 << ce))) { + HIF_ERROR("%s: NAPI instance %d (pipe %d) not created", + __func__, id, ce); + if (hif->napi_data.napis[ce]) + HIF_ERROR("%s: memory allocated but ce_map not set %d (pipe %d)", + __func__, id, ce); + rc = -EINVAL; + } else { + struct qca_napi_data *napid; + struct qca_napi_info *napii; + + napid = &(hif->napi_data); + napii = napid->napis[ce]; + if (!napii) { + if (napid->ce_map & (0x01 << ce)) + HIF_ERROR("%s: napii & ce_map out of sync(ce %d)", + __func__, ce); + return -EINVAL; + } + + + if (hif->napi_data.state == HIF_NAPI_CONF_UP) { + if (force) { + napi_disable(&(napii->napi)); + HIF_DBG("%s: NAPI entry %d force disabled", + __func__, id); + NAPI_DEBUG("NAPI %d force disabled", id); + } else { + HIF_ERROR("%s: Cannot destroy active NAPI %d", + __func__, id); + rc = -EPERM; + } + } + if (0 == rc) { + NAPI_DEBUG("before napi_del"); + NAPI_DEBUG("napi.dlist.prv=0x%pK, next=0x%pK", + napii->napi.dev_list.prev, + napii->napi.dev_list.next); + NAPI_DEBUG("dev.napi_l.prv=0x%pK, next=0x%pK", + napii->netdev.napi_list.prev, + napii->netdev.napi_list.next); + + qdf_lro_deinit(napii->lro_ctx); + netif_napi_del(&(napii->napi)); + hif_deinit_rx_thread_napi(napii); + + napid->ce_map &= ~(0x01 << ce); + napid->napis[ce] = NULL; + napii->scale = 0; + qdf_mem_free(napii); + HIF_DBG("%s: NAPI %d destroyed\n", __func__, id); + + /* if there are no active instances and + * if they are all destroyed, + * set the whole structure to uninitialized state + */ + if (napid->ce_map == 0) { + rc = hif_napi_cpu_deinit(hif_ctx); + /* caller is tolerant to receiving !=0 rc */ + + qdf_spinlock_destroy(&(napid->lock)); + memset(napid, + 0, sizeof(struct qca_napi_data)); + HIF_DBG("%s: no NAPI instances. Zapped.", + __func__); + } + } + } + + return rc; +} +qdf_export_symbol(hif_napi_destroy); + +#ifdef FEATURE_LRO +void *hif_napi_get_lro_info(struct hif_opaque_softc *hif_hdl, int napi_id) +{ + struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl); + struct qca_napi_data *napid; + struct qca_napi_info *napii; + + napid = &(scn->napi_data); + napii = napid->napis[NAPI_ID2PIPE(napi_id)]; + + if (napii) + return napii->lro_ctx; + return 0; +} +#endif + +/** + * + * hif_napi_get_all() - returns the address of the whole HIF NAPI structure + * @hif: pointer to hif context + * + * Description: + * Returns the address of the whole structure + * + * Return: + * : address of the whole HIF NAPI structure + */ +inline struct qca_napi_data *hif_napi_get_all(struct hif_opaque_softc *hif_ctx) +{ + struct hif_softc *hif = HIF_GET_SOFTC(hif_ctx); + + return &(hif->napi_data); +} + +struct qca_napi_info *hif_get_napi(int napi_id, struct qca_napi_data *napid) +{ + int id = NAPI_ID2PIPE(napi_id); + + return napid->napis[id]; +} + +/** + * + * hif_napi_event() - reacts to events that impact NAPI + * @hif : pointer to hif context + * @evnt: event that has been detected + * @data: more data regarding the event + * + * Description: + * This function handles two types of events: + * 1- Events that change the state of NAPI (enabled/disabled): + * {NAPI_EVT_INI_FILE, NAPI_EVT_CMD_STATE} + * The state is retrievable by "hdd_napi_enabled(-1)" + * - NAPI will be on if either INI file is on and it has not been disabled + * by a subsequent vendor CMD, + * or it has been enabled by a vendor CMD. + * 2- Events that change the CPU affinity of a NAPI instance/IRQ: + * {NAPI_EVT_TPUT_STATE, NAPI_EVT_CPU_STATE} + * - NAPI will support a throughput mode (HI/LO), kept at napid->napi_mode + * - NAPI will switch throughput mode based on hdd_napi_throughput_policy() + * - In LO tput mode, NAPI will yield control if its interrupts to the system + * management functions. However in HI throughput mode, NAPI will actively + * manage its interrupts/instances (by trying to disperse them out to + * separate performance cores). + * - CPU eligibility is kept up-to-date by NAPI_EVT_CPU_STATE events. + * + * + In some cases (roaming peer management is the only case so far), a + * a client can trigger a "SERIALIZE" event. Basically, this means that the + * users is asking NAPI to go into a truly single execution context state. + * So, NAPI indicates to msm-irqbalancer that it wants to be blacklisted, + * (if called for the first time) and then moves all IRQs (for NAPI + * instances) to be collapsed to a single core. If called multiple times, + * it will just re-collapse the CPUs. This is because blacklist-on() API + * is reference-counted, and because the API has already been called. + * + * Such a user, should call "DESERIALIZE" (NORMAL) event, to set NAPI to go + * to its "normal" operation. Optionally, they can give a timeout value (in + * multiples of BusBandwidthCheckPeriod -- 100 msecs by default). In this + * case, NAPI will just set the current throughput state to uninitialized + * and set the delay period. Once policy handler is called, it would skip + * applying the policy delay period times, and otherwise apply the policy. + * + * Return: + * < 0: some error + * = 0: event handled successfully + */ +int hif_napi_event(struct hif_opaque_softc *hif_ctx, enum qca_napi_event event, + void *data) +{ + int rc = 0; + uint32_t prev_state; + int i; + bool state_changed; + struct napi_struct *napi; + struct hif_softc *hif = HIF_GET_SOFTC(hif_ctx); + struct qca_napi_data *napid = &(hif->napi_data); + enum qca_napi_tput_state tput_mode = QCA_NAPI_TPUT_UNINITIALIZED; + enum { + BLACKLIST_NOT_PENDING, + BLACKLIST_ON_PENDING, + BLACKLIST_OFF_PENDING + } blacklist_pending = BLACKLIST_NOT_PENDING; + + NAPI_DEBUG("%s: -->(event=%d, aux=%pK)", __func__, event, data); + + if (ce_srng_based(hif)) + return hif_exec_event(hif_ctx, event, data); + + if ((napid->state & HIF_NAPI_INITED) == 0) { + NAPI_DEBUG("%s: got event when NAPI not initialized", + __func__); + return -EINVAL; + } + qdf_spin_lock_bh(&(napid->lock)); + prev_state = napid->state; + switch (event) { + case NAPI_EVT_INI_FILE: + case NAPI_EVT_CMD_STATE: + case NAPI_EVT_INT_STATE: { + int on = (data != ((void *)0)); + + HIF_DBG("%s: recved evnt: STATE_CMD %d; v = %d (state=0x%0x)", + __func__, event, + on, prev_state); + if (on) + if (prev_state & HIF_NAPI_CONF_UP) { + HIF_DBG("%s: duplicate NAPI conf ON msg", + __func__); + } else { + HIF_DBG("%s: setting state to ON", + __func__); + napid->state |= HIF_NAPI_CONF_UP; + } + else /* off request */ + if (prev_state & HIF_NAPI_CONF_UP) { + HIF_DBG("%s: setting state to OFF", + __func__); + napid->state &= ~HIF_NAPI_CONF_UP; + } else { + HIF_DBG("%s: duplicate NAPI conf OFF msg", + __func__); + } + break; + } + /* case NAPI_INIT_FILE/CMD_STATE */ + + case NAPI_EVT_CPU_STATE: { + int cpu = ((unsigned long int)data >> 16); + int val = ((unsigned long int)data & 0x0ff); + + NAPI_DEBUG("%s: evt=CPU_STATE on CPU %d value=%d", + __func__, cpu, val); + + /* state has already been set by hnc_cpu_notify_cb */ + if ((val == QCA_NAPI_CPU_DOWN) && + (napid->napi_mode == QCA_NAPI_TPUT_HI) && /* we manage */ + (napid->napi_cpu[cpu].napis != 0)) { + NAPI_DEBUG("%s: Migrating NAPIs out of cpu %d", + __func__, cpu); + rc = hif_napi_cpu_migrate(napid, + cpu, + HNC_ACT_RELOCATE); + napid->napi_cpu[cpu].napis = 0; + } + /* in QCA_NAPI_TPUT_LO case, napis MUST == 0 */ + break; + } + + case NAPI_EVT_TPUT_STATE: { + tput_mode = (enum qca_napi_tput_state)data; + if (tput_mode == QCA_NAPI_TPUT_LO) { + /* from TPUT_HI -> TPUT_LO */ + NAPI_DEBUG("%s: Moving to napi_tput_LO state", + __func__); + blacklist_pending = BLACKLIST_OFF_PENDING; + /* + * Ideally we should "collapse" interrupts here, since + * we are "dispersing" interrupts in the "else" case. + * This allows the possibility that our interrupts may + * still be on the perf cluster the next time we enter + * high tput mode. However, the irq_balancer is free + * to move our interrupts to power cluster once + * blacklisting has been turned off in the "else" case. + */ + } else { + /* from TPUT_LO -> TPUT->HI */ + NAPI_DEBUG("%s: Moving to napi_tput_HI state", + __func__); + rc = hif_napi_cpu_migrate(napid, + HNC_ANY_CPU, + HNC_ACT_DISPERSE); + + blacklist_pending = BLACKLIST_ON_PENDING; + } + napid->napi_mode = tput_mode; + break; + } + + case NAPI_EVT_USR_SERIAL: { + unsigned long users = (unsigned long)data; + + NAPI_DEBUG("%s: User forced SERIALIZATION; users=%ld", + __func__, users); + + rc = hif_napi_cpu_migrate(napid, + HNC_ANY_CPU, + HNC_ACT_COLLAPSE); + if ((users == 0) && (rc == 0)) + blacklist_pending = BLACKLIST_ON_PENDING; + break; + } + case NAPI_EVT_USR_NORMAL: { + NAPI_DEBUG("%s: User forced DE-SERIALIZATION", __func__); + if (!napid->user_cpu_affin_mask) + blacklist_pending = BLACKLIST_OFF_PENDING; + /* + * Deserialization timeout is handled at hdd layer; + * just mark current mode to uninitialized to ensure + * it will be set when the delay is over + */ + napid->napi_mode = QCA_NAPI_TPUT_UNINITIALIZED; + break; + } + default: { + HIF_ERROR("%s: unknown event: %d (data=0x%0lx)", + __func__, event, (unsigned long) data); + break; + } /* default */ + }; /* switch */ + + + switch (blacklist_pending) { + case BLACKLIST_ON_PENDING: + /* assume the control of WLAN IRQs */ + hif_napi_cpu_blacklist(napid, BLACKLIST_ON); + break; + case BLACKLIST_OFF_PENDING: + /* yield the control of WLAN IRQs */ + hif_napi_cpu_blacklist(napid, BLACKLIST_OFF); + break; + default: /* nothing to do */ + break; + } /* switch blacklist_pending */ + + /* we want to perform the comparison in lock: + * there is a possiblity of hif_napi_event get called + * from two different contexts (driver unload and cpu hotplug + * notification) and napid->state get changed + * in driver unload context and can lead to race condition + * in cpu hotplug context. Therefore, perform the napid->state + * comparison before releasing lock. + */ + state_changed = (prev_state != napid->state); + qdf_spin_unlock_bh(&(napid->lock)); + + if (state_changed) { + if (napid->state == ENABLE_NAPI_MASK) { + rc = 1; + for (i = 0; i < CE_COUNT_MAX; i++) { + struct qca_napi_info *napii = napid->napis[i]; + if (napii) { + napi = &(napii->napi); + NAPI_DEBUG("%s: enabling NAPI %d", + __func__, i); + napi_enable(napi); + } + } + } else { + rc = 0; + for (i = 0; i < CE_COUNT_MAX; i++) { + struct qca_napi_info *napii = napid->napis[i]; + if (napii) { + napi = &(napii->napi); + NAPI_DEBUG("%s: disabling NAPI %d", + __func__, i); + napi_disable(napi); + /* in case it is affined, remove it */ + irq_set_affinity_hint(napii->irq, NULL); + } + } + } + } else { + HIF_DBG("%s: no change in hif napi state (still %d)", + __func__, prev_state); + } + + NAPI_DEBUG("<--[rc=%d]", rc); + return rc; +} +qdf_export_symbol(hif_napi_event); + +/** + * hif_napi_enabled() - checks whether NAPI is enabled for given ce or not + * @hif: hif context + * @ce : CE instance (or -1, to check if any CEs are enabled) + * + * Return: bool + */ +int hif_napi_enabled(struct hif_opaque_softc *hif_ctx, int ce) +{ + int rc; + struct hif_softc *hif = HIF_GET_SOFTC(hif_ctx); + + if (-1 == ce) + rc = ((hif->napi_data.state == ENABLE_NAPI_MASK)); + else + rc = ((hif->napi_data.state == ENABLE_NAPI_MASK) && + (hif->napi_data.ce_map & (0x01 << ce))); + return rc; +} +qdf_export_symbol(hif_napi_enabled); + +/** + * hif_napi_enable_irq() - enables bus interrupts after napi_complete + * + * @hif: hif context + * @id : id of NAPI instance calling this (used to determine the CE) + * + * Return: void + */ +inline void hif_napi_enable_irq(struct hif_opaque_softc *hif, int id) +{ + struct hif_softc *scn = HIF_GET_SOFTC(hif); + + hif_irq_enable(scn, NAPI_ID2PIPE(id)); +} + + +/** + * hif_napi_schedule() - schedules napi, updates stats + * @scn: hif context + * @ce_id: index of napi instance + * + * Return: void + */ +int hif_napi_schedule(struct hif_opaque_softc *hif_ctx, int ce_id) +{ + int cpu = smp_processor_id(); + struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); + struct qca_napi_info *napii; + + hif_record_ce_desc_event(scn, ce_id, NAPI_SCHEDULE, + NULL, NULL, 0, 0); + + napii = scn->napi_data.napis[ce_id]; + if (qdf_unlikely(!napii)) { + HIF_ERROR("%s, scheduling unallocated napi (ce:%d)", + __func__, ce_id); + qdf_atomic_dec(&scn->active_tasklet_cnt); + return false; + } + + napii->stats[cpu].napi_schedules++; + NAPI_DEBUG("scheduling napi %d (ce:%d)", napii->id, ce_id); + napi_schedule(&(napii->napi)); + + return true; +} +qdf_export_symbol(hif_napi_schedule); + +/** + * hif_napi_correct_cpu() - correct the interrupt affinity for napi if needed + * @napi_info: pointer to qca_napi_info for the napi instance + * + * Return: true => interrupt already on correct cpu, no correction needed + * false => interrupt on wrong cpu, correction done for cpu affinity + * of the interrupt + */ +static inline +bool hif_napi_correct_cpu(struct qca_napi_info *napi_info) +{ + bool right_cpu = true; + int rc = 0; + int cpu; + struct qca_napi_data *napid; + + napid = hif_napi_get_all(GET_HIF_OPAQUE_HDL(napi_info->hif_ctx)); + + if (napid->flags & QCA_NAPI_FEATURE_CPU_CORRECTION) { + + cpu = qdf_get_cpu(); + if (unlikely((hif_napi_cpu_blacklist(napid, + BLACKLIST_QUERY) > 0) && + (cpu != napi_info->cpu))) { + right_cpu = false; + + NAPI_DEBUG("interrupt on wrong CPU, correcting"); + napi_info->cpumask.bits[0] = (0x01 << napi_info->cpu); + + irq_modify_status(napi_info->irq, IRQ_NO_BALANCING, 0); + rc = irq_set_affinity_hint(napi_info->irq, + &napi_info->cpumask); + irq_modify_status(napi_info->irq, 0, IRQ_NO_BALANCING); + + if (rc) + HIF_ERROR("error setting irq affinity hint: %d", + rc); + else + napi_info->stats[cpu].cpu_corrected++; + } + } + return right_cpu; +} + +#ifdef RECEIVE_OFFLOAD +/** + * hif_napi_offld_flush_cb() - Call upper layer flush callback + * @napi_info: Handle to hif_napi_info + * + * Return: None + */ +static void hif_napi_offld_flush_cb(struct qca_napi_info *napi_info) +{ + if (napi_info->offld_flush_cb) + napi_info->offld_flush_cb(napi_info); +} +#else +static void hif_napi_offld_flush_cb(struct qca_napi_info *napi_info) +{ +} +#endif + +/** + * hif_napi_poll() - NAPI poll routine + * @napi : pointer to NAPI struct as kernel holds it + * @budget: + * + * This is the body of the poll function. + * The poll function is called by kernel. So, there is a wrapper + * function in HDD, which in turn calls this function. + * Two main reasons why the whole thing is not implemented in HDD: + * a) references to things like ce_service that HDD is not aware of + * b) proximity to the implementation of ce_tasklet, which the body + * of this function should be very close to. + * + * NOTE TO THE MAINTAINER: + * Consider this function and ce_tasklet very tightly coupled pairs. + * Any changes to ce_tasklet or this function may likely need to be + * reflected in the counterpart. + * + * Returns: + * int: the amount of work done in this poll (<= budget) + */ +int hif_napi_poll(struct hif_opaque_softc *hif_ctx, + struct napi_struct *napi, + int budget) +{ + int rc = 0; /* default: no work done, also takes care of error */ + int normalized = 0; + int bucket; + int cpu = smp_processor_id(); + bool poll_on_right_cpu; + struct hif_softc *hif = HIF_GET_SOFTC(hif_ctx); + struct qca_napi_info *napi_info; + struct CE_state *ce_state = NULL; + + if (unlikely(NULL == hif)) { + HIF_ERROR("%s: hif context is NULL", __func__); + QDF_ASSERT(0); + goto out; + } + + napi_info = (struct qca_napi_info *) + container_of(napi, struct qca_napi_info, napi); + + NAPI_DEBUG("%s -->(napi(%d, irq=%d), budget=%d)", + __func__, napi_info->id, napi_info->irq, budget); + + napi_info->stats[cpu].napi_polls++; + + hif_record_ce_desc_event(hif, NAPI_ID2PIPE(napi_info->id), + NAPI_POLL_ENTER, NULL, NULL, cpu, 0); + + rc = ce_per_engine_service(hif, NAPI_ID2PIPE(napi_info->id)); + NAPI_DEBUG("%s: ce_per_engine_service processed %d msgs", + __func__, rc); + + hif_napi_offld_flush_cb(napi_info); + + /* do not return 0, if there was some work done, + * even if it is below the scale + */ + if (rc) { + napi_info->stats[cpu].napi_workdone += rc; + normalized = (rc / napi_info->scale); + if (normalized == 0) + normalized++; + bucket = normalized / (QCA_NAPI_BUDGET / QCA_NAPI_NUM_BUCKETS); + if (bucket >= QCA_NAPI_NUM_BUCKETS) { + bucket = QCA_NAPI_NUM_BUCKETS - 1; + HIF_ERROR("Bad bucket#(%d) > QCA_NAPI_NUM_BUCKETS(%d)", + bucket, QCA_NAPI_NUM_BUCKETS); + } + napi_info->stats[cpu].napi_budget_uses[bucket]++; + } else { + /* if ce_per engine reports 0, then poll should be terminated */ + NAPI_DEBUG("%s:%d: nothing processed by CE. Completing NAPI", + __func__, __LINE__); + } + + ce_state = hif->ce_id_to_state[NAPI_ID2PIPE(napi_info->id)]; + + /* + * Not using the API hif_napi_correct_cpu directly in the if statement + * below since the API may not get evaluated if put at the end if any + * prior condition would evaluate to be true. The CPU correction + * check should kick in every poll. + */ +#ifdef NAPI_YIELD_BUDGET_BASED + if (ce_state && (ce_state->force_break || 0 == rc)) { +#else + poll_on_right_cpu = hif_napi_correct_cpu(napi_info); + if ((ce_state) && + (!ce_check_rx_pending(ce_state) || (0 == rc) || + !poll_on_right_cpu)) { +#endif + napi_info->stats[cpu].napi_completes++; +#ifdef NAPI_YIELD_BUDGET_BASED + ce_state->force_break = 0; +#endif + + hif_record_ce_desc_event(hif, ce_state->id, NAPI_COMPLETE, + NULL, NULL, 0, 0); + if (normalized >= budget) + normalized = budget - 1; + + napi_complete(napi); + /* enable interrupts */ + hif_napi_enable_irq(hif_ctx, napi_info->id); + /* support suspend/resume */ + qdf_atomic_dec(&(hif->active_tasklet_cnt)); + + NAPI_DEBUG("%s:%d: napi_complete + enabling the interrupts", + __func__, __LINE__); + } else { + /* 4.4 kernel NAPI implementation requires drivers to + * return full work when they ask to be re-scheduled, + * or napi_complete and re-start with a fresh interrupt + */ + normalized = budget; + } + + hif_record_ce_desc_event(hif, NAPI_ID2PIPE(napi_info->id), + NAPI_POLL_EXIT, NULL, NULL, normalized, 0); + + NAPI_DEBUG("%s <--[normalized=%d]", __func__, normalized); + return normalized; +out: + return rc; +} +qdf_export_symbol(hif_napi_poll); + +void hif_update_napi_max_poll_time(struct CE_state *ce_state, + int ce_id, + int cpu_id) +{ + struct hif_softc *hif; + struct qca_napi_info *napi_info; + unsigned long long napi_poll_time = sched_clock() - + ce_state->ce_service_start_time; + + hif = ce_state->scn; + napi_info = hif->napi_data.napis[ce_id]; + if (napi_poll_time > + napi_info->stats[cpu_id].napi_max_poll_time) + napi_info->stats[cpu_id].napi_max_poll_time = napi_poll_time; +} + +#ifdef HIF_IRQ_AFFINITY +/** + * + * hif_napi_update_yield_stats() - update NAPI yield related stats + * @cpu_id: CPU ID for which stats needs to be updates + * @ce_id: Copy Engine ID for which yield stats needs to be updates + * @time_limit_reached: indicates whether the time limit was reached + * @rxpkt_thresh_reached: indicates whether rx packet threshold was reached + * + * Return: None + */ +void hif_napi_update_yield_stats(struct CE_state *ce_state, + bool time_limit_reached, + bool rxpkt_thresh_reached) +{ + struct hif_softc *hif; + struct qca_napi_data *napi_data = NULL; + int ce_id = 0; + int cpu_id = 0; + + if (unlikely(NULL == ce_state)) { + QDF_ASSERT(NULL != ce_state); + return; + } + + hif = ce_state->scn; + + if (unlikely(NULL == hif)) { + QDF_ASSERT(NULL != hif); + return; + } + napi_data = &(hif->napi_data); + if (unlikely(NULL == napi_data)) { + QDF_ASSERT(NULL != napi_data); + return; + } + + ce_id = ce_state->id; + cpu_id = qdf_get_cpu(); + + if (unlikely(!napi_data->napis[ce_id])) { + HIF_INFO("%s: NAPI info is NULL for ce id: %d", + __func__, ce_id); + return; + } + + if (time_limit_reached) + napi_data->napis[ce_id]->stats[cpu_id].time_limit_reached++; + else + napi_data->napis[ce_id]->stats[cpu_id].rxpkt_thresh_reached++; + + hif_update_napi_max_poll_time(ce_state, ce_id, + cpu_id); +} + +/** + * + * hif_napi_stats() - display NAPI CPU statistics + * @napid: pointer to qca_napi_data + * + * Description: + * Prints the various CPU cores on which the NAPI instances /CEs interrupts + * are being executed. Can be called from outside NAPI layer. + * + * Return: None + */ +void hif_napi_stats(struct qca_napi_data *napid) +{ + int i; + struct qca_napi_cpu *cpu; + + if (napid == NULL) { + qdf_debug("%s: napiid struct is null", __func__); + return; + } + + cpu = napid->napi_cpu; + qdf_debug("NAPI CPU TABLE"); + qdf_debug("lilclhead=%d, bigclhead=%d", + napid->lilcl_head, napid->bigcl_head); + for (i = 0; i < NR_CPUS; i++) { + qdf_debug("CPU[%02d]: state:%d crid=%02d clid=%02d crmk:0x%0lx thmk:0x%0lx frq:%d napi = 0x%08x lnk:%d", + i, + cpu[i].state, cpu[i].core_id, cpu[i].cluster_id, + cpu[i].core_mask.bits[0], + cpu[i].thread_mask.bits[0], + cpu[i].max_freq, cpu[i].napis, + cpu[i].cluster_nxt); + } +} + +#ifdef FEATURE_NAPI_DEBUG +/* + * Local functions + * - no argument checks, all internal/trusted callers + */ +static void hnc_dump_cpus(struct qca_napi_data *napid) +{ + hif_napi_stats(napid); +} +#else +static void hnc_dump_cpus(struct qca_napi_data *napid) { /* no-op */ }; +#endif /* FEATURE_NAPI_DEBUG */ +/** + * hnc_link_clusters() - partitions to cpu table into clusters + * @napid: pointer to NAPI data + * + * Takes in a CPU topology table and builds two linked lists + * (big cluster cores, list-head at bigcl_head, and little cluster + * cores, list-head at lilcl_head) out of it. + * + * If there are more than two clusters: + * - bigcl_head and lilcl_head will be different, + * - the cluster with highest cpufreq will be considered the "big" cluster. + * If there are more than one with the highest frequency, the *last* of such + * clusters will be designated as the "big cluster" + * - the cluster with lowest cpufreq will be considered the "li'l" cluster. + * If there are more than one clusters with the lowest cpu freq, the *first* + * of such clusters will be designated as the "little cluster" + * - We only support up to 32 clusters + * Return: 0 : OK + * !0: error (at least one of lil/big clusters could not be found) + */ +#define HNC_MIN_CLUSTER 0 +#define HNC_MAX_CLUSTER 1 +static int hnc_link_clusters(struct qca_napi_data *napid) +{ + int rc = 0; + + int i; + int it = 0; + uint32_t cl_done = 0x0; + int cl, curcl, curclhead = 0; + int more; + unsigned int lilfrq = INT_MAX; + unsigned int bigfrq = 0; + unsigned int clfrq = 0; + int prev = 0; + struct qca_napi_cpu *cpus = napid->napi_cpu; + + napid->lilcl_head = napid->bigcl_head = -1; + + do { + more = 0; + it++; curcl = -1; + for (i = 0; i < NR_CPUS; i++) { + cl = cpus[i].cluster_id; + NAPI_DEBUG("Processing cpu[%d], cluster=%d\n", + i, cl); + if ((cl < HNC_MIN_CLUSTER) || (cl > HNC_MAX_CLUSTER)) { + NAPI_DEBUG("Bad cluster (%d). SKIPPED\n", cl); + /* continue if ASSERTs are disabled */ + continue; + }; + if (cpumask_weight(&(cpus[i].core_mask)) == 0) { + NAPI_DEBUG("Core mask 0. SKIPPED\n"); + continue; + } + if (cl_done & (0x01 << cl)) { + NAPI_DEBUG("Cluster already processed. SKIPPED\n"); + continue; + } else { + if (more == 0) { + more = 1; + curcl = cl; + curclhead = i; /* row */ + clfrq = cpus[i].max_freq; + prev = -1; + }; + if ((curcl >= 0) && (curcl != cl)) { + NAPI_DEBUG("Entry cl(%d) != curcl(%d). SKIPPED\n", + cl, curcl); + continue; + } + if (cpus[i].max_freq != clfrq) + NAPI_DEBUG("WARN: frq(%d)!=clfrq(%d)\n", + cpus[i].max_freq, clfrq); + if (clfrq >= bigfrq) { + bigfrq = clfrq; + napid->bigcl_head = curclhead; + NAPI_DEBUG("bigcl=%d\n", curclhead); + } + if (clfrq < lilfrq) { + lilfrq = clfrq; + napid->lilcl_head = curclhead; + NAPI_DEBUG("lilcl=%d\n", curclhead); + } + if (prev != -1) + cpus[prev].cluster_nxt = i; + + prev = i; + } + } + if (curcl >= 0) + cl_done |= (0x01 << curcl); + + } while (more); + + if (qdf_unlikely((napid->lilcl_head < 0) && (napid->bigcl_head < 0))) + rc = -EFAULT; + + hnc_dump_cpus(napid); /* if NAPI_DEBUG */ + return rc; +} +#undef HNC_MIN_CLUSTER +#undef HNC_MAX_CLUSTER + +/* + * hotplug function group + */ + +/** + * hnc_cpu_online_cb() - handles CPU hotplug "up" events + * @context: the associated HIF context + * @cpu: the CPU Id of the CPU the event happened on + * + * Return: None + */ +static void hnc_cpu_online_cb(void *context, uint32_t cpu) +{ + struct hif_softc *hif = context; + struct qca_napi_data *napid = &hif->napi_data; + + if (cpu >= NR_CPUS) + return; + + NAPI_DEBUG("-->%s(act=online, cpu=%u)", __func__, cpu); + + napid->napi_cpu[cpu].state = QCA_NAPI_CPU_UP; + NAPI_DEBUG("%s: CPU %u marked %d", + __func__, cpu, napid->napi_cpu[cpu].state); + + NAPI_DEBUG("<--%s", __func__); +} + +/** + * hnc_cpu_before_offline_cb() - handles CPU hotplug "prepare down" events + * @context: the associated HIF context + * @cpu: the CPU Id of the CPU the event happened on + * + * On transtion to offline, we act on PREP events, because we may need to move + * the irqs/NAPIs to another CPU before it is actually off-lined. + * + * Return: None + */ +static void hnc_cpu_before_offline_cb(void *context, uint32_t cpu) +{ + struct hif_softc *hif = context; + struct qca_napi_data *napid = &hif->napi_data; + + if (cpu >= NR_CPUS) + return; + + NAPI_DEBUG("-->%s(act=before_offline, cpu=%u)", __func__, cpu); + + napid->napi_cpu[cpu].state = QCA_NAPI_CPU_DOWN; + + NAPI_DEBUG("%s: CPU %u marked %d; updating affinity", + __func__, cpu, napid->napi_cpu[cpu].state); + + /** + * we need to move any NAPIs on this CPU out. + * if we are in LO throughput mode, then this is valid + * if the CPU is the the low designated CPU. + */ + hif_napi_event(GET_HIF_OPAQUE_HDL(hif), + NAPI_EVT_CPU_STATE, + (void *) + ((size_t)cpu << 16 | napid->napi_cpu[cpu].state)); + + NAPI_DEBUG("<--%s", __func__); +} + +static int hnc_hotplug_register(struct hif_softc *hif_sc) +{ + QDF_STATUS status; + + NAPI_DEBUG("-->%s", __func__); + + status = qdf_cpuhp_register(&hif_sc->napi_data.cpuhp_handler, + hif_sc, + hnc_cpu_online_cb, + hnc_cpu_before_offline_cb); + + NAPI_DEBUG("<--%s [%d]", __func__, status); + + return qdf_status_to_os_return(status); +} + +static void hnc_hotplug_unregister(struct hif_softc *hif_sc) +{ + NAPI_DEBUG("-->%s", __func__); + + if (hif_sc->napi_data.cpuhp_handler) + qdf_cpuhp_unregister(&hif_sc->napi_data.cpuhp_handler); + + NAPI_DEBUG("<--%s", __func__); +} + +/** + * hnc_install_tput() - installs a callback in the throughput detector + * @register: !0 => register; =0: unregister + * + * installs a callback to be called when wifi driver throughput (tx+rx) + * crosses a threshold. Currently, we are using the same criteria as + * TCP ack suppression (500 packets/100ms by default). + * + * Return: 0 : success + * <0: failure + */ + +static int hnc_tput_hook(int install) +{ + int rc = 0; + + /* + * Nothing, until the bw_calculation accepts registration + * it is now hardcoded in the wlan_hdd_main.c::hdd_bus_bw_compute_cbk + * hdd_napi_throughput_policy(...) + */ + return rc; +} + +/* + * Implementation of hif_napi_cpu API + */ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)) +static inline void record_sibling_cpumask(struct qca_napi_cpu *cpus, int i) +{ + cpumask_copy(&(cpus[i].thread_mask), + topology_sibling_cpumask(i)); +} +#else +static inline void record_sibling_cpumask(struct qca_napi_cpu *cpus, int i) +{ +} +#endif + + +/** + * hif_napi_cpu_init() - initialization of irq affinity block + * @ctx: pointer to qca_napi_data + * + * called by hif_napi_create, after the first instance is called + * - builds napi_rss_cpus table from cpu topology + * - links cores of the same clusters together + * - installs hot-plug notifier + * - installs throughput trigger notifier (when such mechanism exists) + * + * Return: 0: OK + * <0: error code + */ +int hif_napi_cpu_init(struct hif_opaque_softc *hif) +{ + int rc = 0; + int i; + struct qca_napi_data *napid = &HIF_GET_SOFTC(hif)->napi_data; + struct qca_napi_cpu *cpus = napid->napi_cpu; + + NAPI_DEBUG("--> "); + + if (cpus[0].state != QCA_NAPI_CPU_UNINITIALIZED) { + NAPI_DEBUG("NAPI RSS table already initialized.\n"); + rc = -EALREADY; + goto lab_rss_init; + } + + /* build CPU topology table */ + for_each_possible_cpu(i) { + cpus[i].state = ((cpumask_test_cpu(i, cpu_online_mask) + ? QCA_NAPI_CPU_UP + : QCA_NAPI_CPU_DOWN)); + cpus[i].core_id = topology_core_id(i); + cpus[i].cluster_id = topology_physical_package_id(i); + cpumask_copy(&(cpus[i].core_mask), + topology_core_cpumask(i)); + record_sibling_cpumask(cpus, i); + cpus[i].max_freq = cpufreq_quick_get_max(i); + cpus[i].napis = 0x0; + cpus[i].cluster_nxt = -1; /* invalid */ + } + + /* link clusters together */ + rc = hnc_link_clusters(napid); + if (0 != rc) + goto lab_err_topology; + + /* install hotplug notifier */ + rc = hnc_hotplug_register(HIF_GET_SOFTC(hif)); + if (0 != rc) + goto lab_err_hotplug; + + /* install throughput notifier */ + rc = hnc_tput_hook(1); + if (0 == rc) + goto lab_rss_init; + +lab_err_hotplug: + hnc_tput_hook(0); + hnc_hotplug_unregister(HIF_GET_SOFTC(hif)); +lab_err_topology: + memset(napid->napi_cpu, 0, sizeof(struct qca_napi_cpu) * NR_CPUS); +lab_rss_init: + NAPI_DEBUG("<-- [rc=%d]", rc); + return rc; +} + +/** + * hif_napi_cpu_deinit() - clean-up of irq affinity block + * + * called by hif_napi_destroy, when the last instance is removed + * - uninstalls throughput and hotplug notifiers + * - clears cpu topology table + * Return: 0: OK + */ +int hif_napi_cpu_deinit(struct hif_opaque_softc *hif) +{ + int rc = 0; + struct qca_napi_data *napid = &HIF_GET_SOFTC(hif)->napi_data; + + NAPI_DEBUG("-->%s(...)", __func__); + + /* uninstall tput notifier */ + rc = hnc_tput_hook(0); + + /* uninstall hotplug notifier */ + hnc_hotplug_unregister(HIF_GET_SOFTC(hif)); + + /* clear the topology table */ + memset(napid->napi_cpu, 0, sizeof(struct qca_napi_cpu) * NR_CPUS); + + NAPI_DEBUG("<--%s[rc=%d]", __func__, rc); + + return rc; +} + +/** + * hncm_migrate_to() - migrates a NAPI to a CPU + * @napid: pointer to NAPI block + * @ce_id: CE_id of the NAPI instance + * @didx : index in the CPU topology table for the CPU to migrate to + * + * Migrates NAPI (identified by the CE_id) to the destination core + * Updates the napi_map of the destination entry + * + * Return: + * =0 : success + * <0 : error + */ +static int hncm_migrate_to(struct qca_napi_data *napid, + int napi_ce, + int didx) +{ + int rc = 0; + + NAPI_DEBUG("-->%s(napi_cd=%d, didx=%d)", __func__, napi_ce, didx); + + if (!napid->napis[napi_ce]) + return -EINVAL; + + napid->napis[napi_ce]->cpumask.bits[0] = (1 << didx); + + irq_modify_status(napid->napis[napi_ce]->irq, IRQ_NO_BALANCING, 0); + rc = irq_set_affinity_hint(napid->napis[napi_ce]->irq, + &napid->napis[napi_ce]->cpumask); + + /* unmark the napis bitmap in the cpu table */ + napid->napi_cpu[napid->napis[napi_ce]->cpu].napis &= ~(0x01 << napi_ce); + /* mark the napis bitmap for the new designated cpu */ + napid->napi_cpu[didx].napis |= (0x01 << napi_ce); + napid->napis[napi_ce]->cpu = didx; + + NAPI_DEBUG("<--%s[%d]", __func__, rc); + return rc; +} +/** + * hncm_dest_cpu() - finds a destination CPU for NAPI + * @napid: pointer to NAPI block + * @act : RELOCATE | COLLAPSE | DISPERSE + * + * Finds the designated destionation for the next IRQ. + * RELOCATE: translated to either COLLAPSE or DISPERSE based + * on napid->napi_mode (throughput state) + * COLLAPSE: All have the same destination: the first online CPU in lilcl + * DISPERSE: One of the CPU in bigcl, which has the smallest number of + * NAPIs on it + * + * Return: >=0 : index in the cpu topology table + * : < 0 : error + */ +static int hncm_dest_cpu(struct qca_napi_data *napid, int act) +{ + int destidx = -1; + int head, i; + + NAPI_DEBUG("-->%s(act=%d)", __func__, act); + if (act == HNC_ACT_RELOCATE) { + if (napid->napi_mode == QCA_NAPI_TPUT_LO) + act = HNC_ACT_COLLAPSE; + else + act = HNC_ACT_DISPERSE; + NAPI_DEBUG("%s: act changed from HNC_ACT_RELOCATE to %d", + __func__, act); + } + if (act == HNC_ACT_COLLAPSE) { + head = i = napid->lilcl_head; +retry_collapse: + while (i >= 0) { + if (napid->napi_cpu[i].state == QCA_NAPI_CPU_UP) { + destidx = i; + break; + } + i = napid->napi_cpu[i].cluster_nxt; + } + if ((destidx < 0) && (head == napid->lilcl_head)) { + NAPI_DEBUG("%s: COLLAPSE: no lilcl dest, try bigcl", + __func__); + head = i = napid->bigcl_head; + goto retry_collapse; + } + } else { /* HNC_ACT_DISPERSE */ + int smallest = 99; /* all 32 bits full */ + int smallidx = -1; + + head = i = napid->bigcl_head; +retry_disperse: + while (i >= 0) { + if ((napid->napi_cpu[i].state == QCA_NAPI_CPU_UP) && + (hweight32(napid->napi_cpu[i].napis) <= smallest)) { + smallest = napid->napi_cpu[i].napis; + smallidx = i; + } + i = napid->napi_cpu[i].cluster_nxt; + } + /* Check if matches with user sepecified CPU mask */ + smallidx = ((1 << smallidx) & napid->user_cpu_affin_mask) ? + smallidx : -1; + + if ((smallidx < 0) && (head == napid->bigcl_head)) { + NAPI_DEBUG("%s: DISPERSE: no bigcl dest, try lilcl", + __func__); + head = i = napid->lilcl_head; + goto retry_disperse; + } + destidx = smallidx; + } + NAPI_DEBUG("<--%s[dest=%d]", __func__, destidx); + return destidx; +} +/** + * hif_napi_cpu_migrate() - migrate IRQs away + * @cpu: -1: all CPUs specific CPU + * @act: COLLAPSE | DISPERSE + * + * Moves IRQs/NAPIs from specific or all CPUs (specified by @cpu) to eligible + * cores. Eligible cores are: + * act=COLLAPSE -> the first online core of the little cluster + * act=DISPERSE -> separate cores of the big cluster, so that each core will + * host minimum number of NAPIs/IRQs (napid->cpus[cpu].napis) + * + * Note that this function is called with a spinlock acquired already. + * + * Return: =0: success + * <0: error + */ + +int hif_napi_cpu_migrate(struct qca_napi_data *napid, int cpu, int action) +{ + int rc = 0; + struct qca_napi_cpu *cpup; + int i, dind; + uint32_t napis; + + NAPI_DEBUG("-->%s(.., cpu=%d, act=%d)", + __func__, cpu, action); + /* the following is really: hif_napi_enabled() with less overhead */ + if (napid->ce_map == 0) { + NAPI_DEBUG("%s: NAPI disabled. Not migrating.", __func__); + goto hncm_return; + } + + cpup = napid->napi_cpu; + + switch (action) { + case HNC_ACT_RELOCATE: + case HNC_ACT_DISPERSE: + case HNC_ACT_COLLAPSE: { + /* first find the src napi set */ + if (cpu == HNC_ANY_CPU) + napis = napid->ce_map; + else + napis = cpup[cpu].napis; + /* then clear the napi bitmap on each CPU */ + for (i = 0; i < NR_CPUS; i++) + cpup[i].napis = 0; + /* then for each of the NAPIs to disperse: */ + for (i = 0; i < CE_COUNT_MAX; i++) + if (napis & (1 << i)) { + /* find a destination CPU */ + dind = hncm_dest_cpu(napid, action); + if (dind >= 0) { + NAPI_DEBUG("Migrating NAPI ce%d to %d", + i, dind); + rc = hncm_migrate_to(napid, i, dind); + } else { + NAPI_DEBUG("No dest for NAPI ce%d", i); + hnc_dump_cpus(napid); + rc = -1; + } + } + break; + } + default: { + NAPI_DEBUG("%s: bad action: %d\n", __func__, action); + QDF_BUG(0); + break; + } + } /* switch action */ + +hncm_return: + hnc_dump_cpus(napid); + return rc; +} + + +/** + * hif_napi_bl_irq() - calls irq_modify_status to enable/disable blacklisting + * @napid: pointer to qca_napi_data structure + * @bl_flag: blacklist flag to enable/disable blacklisting + * + * The function enables/disables blacklisting for all the copy engine + * interrupts on which NAPI is enabled. + * + * Return: None + */ +static inline void hif_napi_bl_irq(struct qca_napi_data *napid, bool bl_flag) +{ + int i; + struct qca_napi_info *napii; + + for (i = 0; i < CE_COUNT_MAX; i++) { + /* check if NAPI is enabled on the CE */ + if (!(napid->ce_map & (0x01 << i))) + continue; + + /*double check that NAPI is allocated for the CE */ + napii = napid->napis[i]; + if (!(napii)) + continue; + + if (bl_flag == true) + irq_modify_status(napii->irq, + 0, IRQ_NO_BALANCING); + else + irq_modify_status(napii->irq, + IRQ_NO_BALANCING, 0); + HIF_DBG("%s: bl_flag %d CE %d", __func__, bl_flag, i); + } +} + +#ifdef CONFIG_SCHED_CORE_CTL +/* Enable this API only if kernel feature - CONFIG_SCHED_CORE_CTL is defined */ +static inline int hif_napi_core_ctl_set_boost(bool boost) +{ + return core_ctl_set_boost(boost); +} +#else +static inline int hif_napi_core_ctl_set_boost(bool boost) +{ + return 0; +} +#endif +/** + * hif_napi_cpu_blacklist() - en(dis)ables blacklisting for NAPI RX interrupts. + * @napid: pointer to qca_napi_data structure + * @op: blacklist operation to perform + * + * The function enables/disables/queries blacklisting for all CE RX + * interrupts with NAPI enabled. Besides blacklisting, it also enables/disables + * core_ctl_set_boost. + * Once blacklisting is enabled, the interrupts will not be managed by the IRQ + * balancer. + * + * Return: -EINVAL, in case IRQ_BLACKLISTING and CORE_CTL_BOOST is not enabled + * for BLACKLIST_QUERY op - blacklist refcount + * for BLACKLIST_ON op - return value from core_ctl_set_boost API + * for BLACKLIST_OFF op - return value from core_ctl_set_boost API + */ +int hif_napi_cpu_blacklist(struct qca_napi_data *napid, + enum qca_blacklist_op op) +{ + int rc = 0; + static int ref_count; /* = 0 by the compiler */ + uint8_t flags = napid->flags; + bool bl_en = flags & QCA_NAPI_FEATURE_IRQ_BLACKLISTING; + bool ccb_en = flags & QCA_NAPI_FEATURE_CORE_CTL_BOOST; + + NAPI_DEBUG("-->%s(%d %d)", __func__, flags, op); + + if (!(bl_en && ccb_en)) { + rc = -EINVAL; + goto out; + } + + switch (op) { + case BLACKLIST_QUERY: + rc = ref_count; + break; + case BLACKLIST_ON: + ref_count++; + rc = 0; + if (ref_count == 1) { + rc = hif_napi_core_ctl_set_boost(true); + NAPI_DEBUG("boost_on() returns %d - refcnt=%d", + rc, ref_count); + hif_napi_bl_irq(napid, true); + } + break; + case BLACKLIST_OFF: + if (ref_count) { + ref_count--; + rc = 0; + if (ref_count == 0) { + rc = hif_napi_core_ctl_set_boost(false); + NAPI_DEBUG("boost_off() returns %d - refcnt=%d", + rc, ref_count); + hif_napi_bl_irq(napid, false); + } + } + break; + default: + NAPI_DEBUG("Invalid blacklist op: %d", op); + rc = -EINVAL; + } /* switch */ +out: + NAPI_DEBUG("<--%s[%d]", __func__, rc); + return rc; +} + +/** + * hif_napi_serialize() - [de-]serialize NAPI operations + * @hif: context + * @is_on: 1: serialize, 0: deserialize + * + * hif_napi_serialize(hif, 1) can be called multiple times. It will perform the + * following steps (see hif_napi_event for code): + * - put irqs of all NAPI instances on the same CPU + * - only for the first serialize call: blacklist + * + * hif_napi_serialize(hif, 0): + * - start a timer (multiple of BusBandwidthTimer -- default: 100 msec) + * - at the end of the timer, check the current throughput state and + * implement it. + */ +static unsigned long napi_serialize_reqs; +int hif_napi_serialize(struct hif_opaque_softc *hif, int is_on) +{ + int rc = -EINVAL; + + if (hif != NULL) + switch (is_on) { + case 0: { /* de-serialize */ + rc = hif_napi_event(hif, NAPI_EVT_USR_NORMAL, + (void *) 0); + napi_serialize_reqs = 0; + break; + } /* end de-serialize */ + case 1: { /* serialize */ + rc = hif_napi_event(hif, NAPI_EVT_USR_SERIAL, + (void *)napi_serialize_reqs++); + break; + } /* end serialize */ + default: + break; /* no-op */ + } /* switch */ + return rc; +} + +#endif /* ifdef HIF_IRQ_AFFINITY */ diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/hif_napi.h b/drivers/staging/qca-wifi-host-cmn/hif/src/hif_napi.h new file mode 100644 index 0000000000000000000000000000000000000000..87f865eb2ca5a83f57fcbb1d39e86d4d7281eced --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/hif_napi.h @@ -0,0 +1,316 @@ +/* + * Copyright (c) 2015-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef __HIF_NAPI_H__ +#define __HIF_NAPI_H__ + +/** + * DOC: hif_napi.h + * + * Interface to HIF implemented functions of NAPI. + * These are used by hdd_napi. + */ + + +/* CLD headers */ +#include /* struct hif_opaque_softc; */ + +/** + * common stuff + * The declarations until #ifdef FEATURE_NAPI below + * are valid whether or not FEATURE_NAPI has been + * defined. + */ + +/** + * NAPI manages the following states: + * NAPI state: per NAPI instance, ENABLED/DISABLED + * CPU state: per CPU, DOWN/UP + * TPUT state: global, LOW/HI + * + * "Dynamic" changes to state of various NAPI structures are + * managed by NAPI events. The events may be produced by + * various detection points. With each event, some data is + * sent. The main event handler in hif_napi handles and makes + * the state changes. + * + * event : data : generated + * ---------------:------------------:------------------ + * EVT_INI_FILE : cfg->napi_enable : after ini file processed + * EVT_CMD_STATE : cmd arg : by the vendor cmd + * EVT_INT_STATE : 0 : internal - shut off/disable + * EVT_CPU_STATE : (cpu << 16)|state: CPU hotplug events + * EVT_TPUT_STATE : (high/low) : tput trigger + * EVT_USR_SERIAL : num-serial_calls : WMA/ROAMING-START/IND + * EVT_USR_NORMAL : N/A : WMA/ROAMING-END + */ +enum qca_napi_event { + NAPI_EVT_INVALID, + NAPI_EVT_INI_FILE, + NAPI_EVT_CMD_STATE, + NAPI_EVT_INT_STATE, + NAPI_EVT_CPU_STATE, + NAPI_EVT_TPUT_STATE, + NAPI_EVT_USR_SERIAL, + NAPI_EVT_USR_NORMAL +}; +/** + * Following are some of NAPI related features controlled using feature flag + * These flags need to be enabled in the qca_napi_data->flags variable for the + * feature to kick in. +.* QCA_NAPI_FEATURE_CPU_CORRECTION - controls CPU correction logic +.* QCA_NAPI_FEATURE_IRQ_BLACKLISTING - controls call to irq_blacklist_on API +.* QCA_NAPI_FEATURE_CORE_CTL_BOOST - controls call to core_ctl_set_boost API + */ +#define QCA_NAPI_FEATURE_CPU_CORRECTION BIT(1) +#define QCA_NAPI_FEATURE_IRQ_BLACKLISTING BIT(2) +#define QCA_NAPI_FEATURE_CORE_CTL_BOOST BIT(3) + +/** + * Macros to map ids -returned by ...create()- to pipes and vice versa + */ +#define NAPI_ID2PIPE(i) ((i)-1) +#define NAPI_PIPE2ID(p) ((p)+1) + +#ifdef RECEIVE_OFFLOAD +/** + * hif_napi_rx_offld_flush_cb_register() - Register flush callback for Rx offld + * @hif_hdl: pointer to hif context + * @offld_flush_handler: register offld flush callback + * + * Return: None + */ +void hif_napi_rx_offld_flush_cb_register(struct hif_opaque_softc *hif_hdl, + void (rx_ol_flush_handler)(void *arg)); + +/** + * hif_napi_rx_offld_flush_cb_deregister() - Degregister offld flush_cb + * @hif_hdl: pointer to hif context + * + * Return: NONE + */ +void hif_napi_rx_offld_flush_cb_deregister(struct hif_opaque_softc *hif_hdl); +#endif /* RECEIVE_OFFLOAD */ + +/** + * hif_napi_get_lro_info() - returns the address LRO data for napi_id + * @hif: pointer to hif context + * @napi_id: napi instance + * + * Description: + * Returns the address of the LRO structure + * + * Return: + * : address of the LRO structure + */ +void *hif_napi_get_lro_info(struct hif_opaque_softc *hif_hdl, int napi_id); + +enum qca_blacklist_op { + BLACKLIST_QUERY, + BLACKLIST_OFF, + BLACKLIST_ON +}; + +#ifdef FEATURE_NAPI + +/** + * NAPI HIF API + * + * the declarations below only apply to the case + * where FEATURE_NAPI is defined + */ + +int hif_napi_create(struct hif_opaque_softc *hif, + int (*poll)(struct napi_struct *, int), + int budget, + int scale, + uint8_t flags); +int hif_napi_destroy(struct hif_opaque_softc *hif, + uint8_t id, + int force); + +struct qca_napi_data *hif_napi_get_all(struct hif_opaque_softc *hif); + +/** + * hif_get_napi() - get NAPI corresponding to napi_id + * @napi_id: NAPI instance + * @napid: Handle NAPI + * + * Return: napi corresponding napi_id + */ +struct qca_napi_info *hif_get_napi(int napi_id, struct qca_napi_data *napid); + +int hif_napi_event(struct hif_opaque_softc *hif, + enum qca_napi_event event, + void *data); + +/* called from the ISR within hif, so, ce is known */ +int hif_napi_enabled(struct hif_opaque_softc *hif, int ce); + +/* called from hdd (napi_poll), using napi id as a selector */ +void hif_napi_enable_irq(struct hif_opaque_softc *hif, int id); + +/* called by ce_tasklet.c::ce_dispatch_interrupt*/ +int hif_napi_schedule(struct hif_opaque_softc *scn, int ce_id); + +/* called by hdd_napi, which is called by kernel */ +int hif_napi_poll(struct hif_opaque_softc *hif_ctx, + struct napi_struct *napi, int budget); + +#ifdef FEATURE_NAPI_DEBUG +#define NAPI_DEBUG(fmt, ...) \ + qdf_debug("wlan: NAPI: %s:%d "fmt, __func__, __LINE__, ##__VA_ARGS__) +#else +#define NAPI_DEBUG(fmt, ...) /* NO-OP */ +#endif /* FEATURE NAPI_DEBUG */ + +#define HNC_ANY_CPU (-1) +#define HNC_ACT_RELOCATE (0) +#define HNC_ACT_COLLAPSE (1) +#define HNC_ACT_DISPERSE (-1) + +/** + * hif_update_napi_max_poll_time() - updates NAPI max poll time + * @ce_state: ce state + * @ce_id: Copy engine ID + * @cpu_id: cpu id + * + * This API updates NAPI max poll time per CE per SPU. + * + * Return: void + */ +void hif_update_napi_max_poll_time(struct CE_state *ce_state, + int ce_id, + int cpu_id); +/** + * Local interface to HIF implemented functions of NAPI CPU affinity management. + * Note: + * 1- The symbols in this file are NOT supposed to be used by any + * entity other than hif_napi.c + * 2- The symbols are valid only if HELIUMPLUS is defined. They are otherwise + * mere wrappers. + * + */ + +#else /* ! defined(FEATURE_NAPI) */ + +/** + * Stub API + * + * The declarations in this section are valid only + * when FEATURE_NAPI has *not* been defined. + */ + +#define NAPI_DEBUG(fmt, ...) /* NO-OP */ + +static inline int hif_napi_create(struct hif_opaque_softc *hif, + uint8_t pipe_id, + int (*poll)(struct napi_struct *, int), + int budget, + int scale, + uint8_t flags) +{ return -EPERM; } + +static inline int hif_napi_destroy(struct hif_opaque_softc *hif, + uint8_t id, + int force) +{ return -EPERM; } + +static inline struct qca_napi_data *hif_napi_get_all( + struct hif_opaque_softc *hif) +{ return NULL; } + +static inline int hif_napi_event(struct hif_opaque_softc *hif, + enum qca_napi_event event, + void *data) +{ return -EPERM; } + +/* called from the ISR within hif, so, ce is known */ +static inline int hif_napi_enabled(struct hif_opaque_softc *hif, int ce) +{ return 0; } + +/* called from hdd (napi_poll), using napi id as a selector */ +static inline void hif_napi_enable_irq(struct hif_opaque_softc *hif, int id) +{ return; } + +static inline int hif_napi_schedule(struct hif_opaque_softc *hif, int ce_id) +{ return 0; } + +static inline int hif_napi_poll(struct napi_struct *napi, int budget) +{ return -EPERM; } + +/** + * hif_update_napi_max_poll_time() - updates NAPI max poll time + * @ce_state: ce state + * @ce_id: Copy engine ID + * @cpu_id: cpu id + * + * This API updates NAPI max poll time per CE per SPU. + * + * Return: void + */ +static inline void hif_update_napi_max_poll_time(struct CE_state *ce_state, + int ce_id, + int cpu_id) +{ return; } +#endif /* FEATURE_NAPI */ + +#if defined(HIF_IRQ_AFFINITY) && defined(FEATURE_NAPI) +/* + * prototype signatures + */ +int hif_napi_cpu_init(struct hif_opaque_softc *hif); +int hif_napi_cpu_deinit(struct hif_opaque_softc *hif); + +int hif_napi_cpu_migrate(struct qca_napi_data *napid, int cpu, int action); +int hif_napi_serialize(struct hif_opaque_softc *hif, int is_on); + +int hif_napi_cpu_blacklist(struct qca_napi_data *napid, + enum qca_blacklist_op op); + +/* not directly related to irq affinity, but oh well */ +void hif_napi_stats(struct qca_napi_data *napid); +void hif_napi_update_yield_stats(struct CE_state *ce_state, + bool time_limit_reached, + bool rxpkt_thresh_reached); +#else +struct qca_napi_data; +static inline int hif_napi_cpu_init(struct hif_opaque_softc *hif) +{ return 0; } + +static inline int hif_napi_cpu_deinit(struct hif_opaque_softc *hif) +{ return 0; } + +static inline int hif_napi_cpu_migrate(struct qca_napi_data *napid, int cpu, + int action) +{ return 0; } + +static inline int hif_napi_serialize(struct hif_opaque_softc *hif, int is_on) +{ return -EPERM; } + +static inline void hif_napi_stats(struct qca_napi_data *napid) { } +static inline void hif_napi_update_yield_stats(struct CE_state *ce_state, + bool time_limit_reached, + bool rxpkt_thresh_reached) { } + +static inline int hif_napi_cpu_blacklist(struct qca_napi_data *napid, + enum qca_blacklist_op op) +{ return 0; } +#endif /* HIF_IRQ_AFFINITY */ + +#endif /* __HIF_NAPI_H__ */ diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/hif_unit_test_suspend.c b/drivers/staging/qca-wifi-host-cmn/hif/src/hif_unit_test_suspend.c new file mode 100644 index 0000000000000000000000000000000000000000..938648c325dec07ab895ce9e3cd89429e2d20a6a --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/hif_unit_test_suspend.c @@ -0,0 +1,114 @@ +/* + * Copyright (c) 2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "qdf_status.h" +#include "hif_main.h" +#include "hif_unit_test_suspend.h" +#include "hif_unit_test_suspend_i.h" + +enum hif_ut_suspend_state_bits { + UT_SUSPENDED_BIT = 0 +}; + +/** + * hif_ut_fw_resume_work() - Work handler for firmware-triggered resume + * @work: The work struct being passed from the linux kernel + * + * Return: None + */ +static void hif_ut_fw_resume_work(struct work_struct *work) +{ + struct hif_ut_suspend_context *ctx = + container_of(work, struct hif_ut_suspend_context, resume_work); + + QDF_BUG(ctx); + if (!ctx) + return; + + QDF_BUG(ctx->resume_callback); + if (!ctx->resume_callback) + return; + + ctx->resume_callback(); + ctx->resume_callback = NULL; +} + +void hif_ut_suspend_init(struct hif_softc *scn) +{ + INIT_WORK(&scn->ut_suspend_ctx.resume_work, hif_ut_fw_resume_work); +} + +bool hif_is_ut_suspended(struct hif_softc *scn) +{ + QDF_BUG(scn); + if (!scn) + return false; + + return test_bit(UT_SUSPENDED_BIT, &scn->ut_suspend_ctx.state); +} + +QDF_STATUS hif_ut_apps_suspend(struct hif_opaque_softc *opaque_scn, + hif_ut_resume_callback callback) +{ + struct hif_softc *scn = HIF_GET_SOFTC(opaque_scn); + + QDF_BUG(scn); + if (!scn) + return QDF_STATUS_E_INVAL; + + QDF_BUG(callback); + if (!callback) + return QDF_STATUS_E_INVAL; + + if (test_and_set_bit(UT_SUSPENDED_BIT, &scn->ut_suspend_ctx.state)) + return QDF_STATUS_E_INVAL; + + scn->ut_suspend_ctx.resume_callback = callback; + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS hif_ut_apps_resume(struct hif_opaque_softc *opaque_scn) +{ + struct hif_softc *scn = HIF_GET_SOFTC(opaque_scn); + + QDF_BUG(scn); + if (!scn) + return QDF_STATUS_E_INVAL; + + if (!test_and_clear_bit(UT_SUSPENDED_BIT, &scn->ut_suspend_ctx.state)) + return QDF_STATUS_E_INVAL; + + scn->ut_suspend_ctx.resume_callback = NULL; + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS hif_ut_fw_resume(struct hif_softc *scn) +{ + QDF_BUG(scn); + if (!scn) + return QDF_STATUS_E_INVAL; + + if (!test_and_clear_bit(UT_SUSPENDED_BIT, &scn->ut_suspend_ctx.state)) + return QDF_STATUS_E_INVAL; + + schedule_work(&scn->ut_suspend_ctx.resume_work); + + return QDF_STATUS_SUCCESS; +} diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/hif_unit_test_suspend_i.h b/drivers/staging/qca-wifi-host-cmn/hif/src/hif_unit_test_suspend_i.h new file mode 100644 index 0000000000000000000000000000000000000000..dedb1e51cc92ed9da5150f0702975e4232ce8e78 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/hif_unit_test_suspend_i.h @@ -0,0 +1,84 @@ +/* + * Copyright (c) 2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: HIF internal unit-test related APIs for triggering WoW suspend/resume + * while the application processor is still up. + */ + +#ifndef _HIF_UNIT_TEST_SUSPEND_I_H_ +#define _HIF_UNIT_TEST_SUSPEND_I_H_ + +#include "qdf_status.h" +#include "hif_main.h" +#include "hif_unit_test_suspend.h" + +#ifdef WLAN_SUSPEND_RESUME_TEST + +struct hif_ut_suspend_context { + unsigned long state; + hif_ut_resume_callback resume_callback; + struct work_struct resume_work; +}; + +/** + * hif_ut_suspend_init() - Initialize the unit-test suspend context + * @scn: the hif context to initialize + * + * Return: None + */ +void hif_ut_suspend_init(struct hif_softc *scn); + +/** + * hif_is_ut_suspended() - Tests if the given hif context is unit-test suspended + * @scn: The HIF context to check + * + * Return: true, if unit-test suspended, otherwise false + */ +bool hif_is_ut_suspended(struct hif_softc *scn); + +/** + * hif_ut_fw_resume() - Initiate a firmware triggered unit-test resume + * @scn: The HIF context to operate on + * + * This schedules the callback previously registered via a call to + * hif_ut_apps_suspend for execution. + * + * Return: QDF_STATUS + */ +QDF_STATUS hif_ut_fw_resume(struct hif_softc *scn); + +#else /* WLAN_SUSPEND_RESUME_TEST */ + +struct hif_ut_suspend_context {}; + +static inline void hif_ut_suspend_init(struct hif_softc *scn) {} + +static inline bool hif_is_ut_suspended(struct hif_softc *scn) +{ + return false; +} + +static inline QDF_STATUS hif_ut_fw_resume(struct hif_softc *scn) +{ + return QDF_STATUS_SUCCESS; +} + +#endif /* WLAN_SUSPEND_RESUME_TEST */ + +#endif /* _HIF_UNIT_TEST_SUSPEND_I_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/ipq4019def.c b/drivers/staging/qca-wifi-host-cmn/hif/src/ipq4019def.c new file mode 100644 index 0000000000000000000000000000000000000000..02d560e0a887cfb7fd506fd3f1bec78be3399f62 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/ipq4019def.c @@ -0,0 +1,219 @@ +/* + * Copyright (c) 2015-2016,2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "qdf_module.h" + +#if defined(IPQ4019_HEADERS_DEF) +#define AR900B 1 + +#define WLAN_HEADERS 1 +#include "common_drv.h" +#include "IPQ4019/soc_addrs.h" +#include "IPQ4019/extra/hw/apb_map.h" +#ifdef WLAN_HEADERS +#include "IPQ4019/extra/hw/wifi_top_reg_map.h" +#include "IPQ4019/hw/rtc_soc_reg.h" +#endif +#include "IPQ4019/hw/ce_wrapper_reg_csr.h" + +#include "IPQ4019/extra/hw/soc_core_reg.h" +#include "IPQ4019/extra/hw/ce_reg_csr.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* Base address is defined in pcie_local_reg.h. Macros which access the + * registers include the base address in their definition. + */ + +#define FW_EVENT_PENDING_ADDRESS (WIFICMN_SCRATCH_3_ADDRESS) +#define DRAM_BASE_ADDRESS TARG_DRAM_START + +/* Backwards compatibility -- TBDXXX */ + +#define MISSING 0 + +#define WLAN_SYSTEM_SLEEP_DISABLE_LSB WIFI_SYSTEM_SLEEP_DISABLE_LSB +#define WLAN_SYSTEM_SLEEP_DISABLE_MASK WIFI_SYSTEM_SLEEP_DISABLE_MASK +#define WLAN_RESET_CONTROL_COLD_RST_MASK WIFI_RESET_CONTROL_MAC_COLD_RST_MASK +#define WLAN_RESET_CONTROL_WARM_RST_MASK WIFI_RESET_CONTROL_MAC_WARM_RST_MASK +#define SOC_CLOCK_CONTROL_OFFSET SOC_CLOCK_CONTROL_ADDRESS +#define SOC_RESET_CONTROL_OFFSET SOC_RESET_CONTROL_ADDRESS +#define CPU_CLOCK_OFFSET SOC_CPU_CLOCK_ADDRESS +#define SOC_LPO_CAL_OFFSET SOC_LPO_CAL_ADDRESS +#define SOC_RESET_CONTROL_CE_RST_MASK WIFI_RESET_CONTROL_CE_RESET_MASK +#define WLAN_SYSTEM_SLEEP_OFFSET WIFI_SYSTEM_SLEEP_ADDRESS +#define WLAN_RESET_CONTROL_OFFSET WIFI_RESET_CONTROL_ADDRESS +#define CLOCK_CONTROL_OFFSET SOC_CLOCK_CONTROL_OFFSET +#define CLOCK_CONTROL_SI0_CLK_MASK SOC_CLOCK_CONTROL_SI0_CLK_MASK +#define RESET_CONTROL_SI0_RST_MASK SOC_RESET_CONTROL_SI0_RST_MASK +#define GPIO_BASE_ADDRESS WLAN_GPIO_BASE_ADDRESS +#define GPIO_PIN0_OFFSET MISSING +#define GPIO_PIN1_OFFSET MISSING +#define GPIO_PIN0_CONFIG_MASK MISSING +#define GPIO_PIN1_CONFIG_MASK MISSING +#define SCRATCH_BASE_ADDRESS SOC_CORE_BASE_ADDRESS +#define LOCAL_SCRATCH_OFFSET 0x18 +#define GPIO_PIN10_OFFSET WLAN_GPIO_PIN10_ADDRESS +#define GPIO_PIN11_OFFSET WLAN_GPIO_PIN11_ADDRESS +#define GPIO_PIN12_OFFSET WLAN_GPIO_PIN12_ADDRESS +#define GPIO_PIN13_OFFSET WLAN_GPIO_PIN13_ADDRESS +/*TBD:dakota Check if these can be removed for dakota */ +#define CPU_CLOCK_STANDARD_LSB SOC_CPU_CLOCK_STANDARD_LSB +#define CPU_CLOCK_STANDARD_MASK SOC_CPU_CLOCK_STANDARD_MASK +#define LPO_CAL_ENABLE_LSB SOC_LPO_CAL_ENABLE_LSB +#define LPO_CAL_ENABLE_MASK SOC_LPO_CAL_ENABLE_MASK +#define ANALOG_INTF_BASE_ADDRESS WLAN_ANALOG_INTF_BASE_ADDRESS +#define MBOX_BASE_ADDRESS MISSING +#define INT_STATUS_ENABLE_ERROR_LSB MISSING +#define INT_STATUS_ENABLE_ERROR_MASK MISSING +#define INT_STATUS_ENABLE_CPU_LSB MISSING +#define INT_STATUS_ENABLE_CPU_MASK MISSING +#define INT_STATUS_ENABLE_COUNTER_LSB MISSING +#define INT_STATUS_ENABLE_COUNTER_MASK MISSING +#define INT_STATUS_ENABLE_MBOX_DATA_LSB MISSING +#define INT_STATUS_ENABLE_MBOX_DATA_MASK MISSING +#define ERROR_STATUS_ENABLE_RX_UNDERFLOW_LSB MISSING +#define ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK MISSING +#define ERROR_STATUS_ENABLE_TX_OVERFLOW_LSB MISSING +#define ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK MISSING +#define COUNTER_INT_STATUS_ENABLE_BIT_LSB MISSING +#define COUNTER_INT_STATUS_ENABLE_BIT_MASK MISSING +#define INT_STATUS_ENABLE_ADDRESS MISSING +#define CPU_INT_STATUS_ENABLE_BIT_LSB MISSING +#define CPU_INT_STATUS_ENABLE_BIT_MASK MISSING +#define HOST_INT_STATUS_ADDRESS MISSING +#define CPU_INT_STATUS_ADDRESS MISSING +#define ERROR_INT_STATUS_ADDRESS MISSING +#define ERROR_INT_STATUS_WAKEUP_MASK MISSING +#define ERROR_INT_STATUS_WAKEUP_LSB MISSING +#define ERROR_INT_STATUS_RX_UNDERFLOW_MASK MISSING +#define ERROR_INT_STATUS_RX_UNDERFLOW_LSB MISSING +#define ERROR_INT_STATUS_TX_OVERFLOW_MASK MISSING +#define ERROR_INT_STATUS_TX_OVERFLOW_LSB MISSING +#define COUNT_DEC_ADDRESS MISSING +#define HOST_INT_STATUS_CPU_MASK MISSING +#define HOST_INT_STATUS_CPU_LSB MISSING +#define HOST_INT_STATUS_ERROR_MASK MISSING +#define HOST_INT_STATUS_ERROR_LSB MISSING +#define HOST_INT_STATUS_COUNTER_MASK MISSING +#define HOST_INT_STATUS_COUNTER_LSB MISSING +#define RX_LOOKAHEAD_VALID_ADDRESS MISSING +#define WINDOW_DATA_ADDRESS MISSING +#define WINDOW_READ_ADDR_ADDRESS MISSING +#define WINDOW_WRITE_ADDR_ADDRESS MISSING +/* MAC Descriptor */ +#define RX_PPDU_END_ANTENNA_OFFSET_DWORD (RX_PPDU_END_25_RX_ANTENNA_OFFSET >> 2) +/* GPIO Register */ +#define GPIO_ENABLE_W1TS_LOW_ADDRESS MISSING +#define GPIO_PIN0_CONFIG_LSB MISSING +#define GPIO_PIN0_PAD_PULL_LSB MISSING +#define GPIO_PIN0_PAD_PULL_MASK MISSING +/* SI reg */ +#define SI_CONFIG_ERR_INT_MASK MISSING +#define SI_CONFIG_ERR_INT_LSB MISSING +/* CE descriptor */ +#define CE_SRC_DESC_SIZE_DWORD 2 +#define CE_DEST_DESC_SIZE_DWORD 2 +#define CE_SRC_DESC_SRC_PTR_OFFSET_DWORD 0 +#define CE_SRC_DESC_INFO_OFFSET_DWORD 1 +#define CE_DEST_DESC_DEST_PTR_OFFSET_DWORD 0 +#define CE_DEST_DESC_INFO_OFFSET_DWORD 1 +#if _BYTE_ORDER == _BIG_ENDIAN +#define CE_SRC_DESC_INFO_NBYTES_MASK 0xFFFF0000 +#define CE_SRC_DESC_INFO_NBYTES_SHIFT 16 +#define CE_SRC_DESC_INFO_GATHER_MASK 0x00008000 +#define CE_SRC_DESC_INFO_GATHER_SHIFT 15 +#define CE_SRC_DESC_INFO_BYTE_SWAP_MASK 0x00004000 +#define CE_SRC_DESC_INFO_BYTE_SWAP_SHIFT 14 +#define CE_SRC_DESC_INFO_HOST_INT_DISABLE_MASK 0x00002000 +#define CE_SRC_DESC_INFO_HOST_INT_DISABLE_SHIFT 13 +#define CE_SRC_DESC_INFO_TARGET_INT_DISABLE_MASK 0x00001000 +#define CE_SRC_DESC_INFO_TARGET_INT_DISABLE_SHIFT 12 +#define CE_SRC_DESC_INFO_META_DATA_MASK 0x00000FFF +#define CE_SRC_DESC_INFO_META_DATA_SHIFT 0 +#else +#define CE_SRC_DESC_INFO_NBYTES_MASK 0x0000FFFF +#define CE_SRC_DESC_INFO_NBYTES_SHIFT 0 +#define CE_SRC_DESC_INFO_GATHER_MASK 0x00010000 +#define CE_SRC_DESC_INFO_GATHER_SHIFT 16 +#define CE_SRC_DESC_INFO_BYTE_SWAP_MASK 0x00020000 +#define CE_SRC_DESC_INFO_BYTE_SWAP_SHIFT 17 +#define CE_SRC_DESC_INFO_HOST_INT_DISABLE_MASK 0x00040000 +#define CE_SRC_DESC_INFO_HOST_INT_DISABLE_SHIFT 18 +#define CE_SRC_DESC_INFO_TARGET_INT_DISABLE_MASK 0x00080000 +#define CE_SRC_DESC_INFO_TARGET_INT_DISABLE_SHIFT 19 +#define CE_SRC_DESC_INFO_META_DATA_MASK 0xFFF00000 +#define CE_SRC_DESC_INFO_META_DATA_SHIFT 20 +#endif +#if _BYTE_ORDER == _BIG_ENDIAN +#define CE_DEST_DESC_INFO_NBYTES_MASK 0xFFFF0000 +#define CE_DEST_DESC_INFO_NBYTES_SHIFT 16 +#define CE_DEST_DESC_INFO_GATHER_MASK 0x00008000 +#define CE_DEST_DESC_INFO_GATHER_SHIFT 15 +#define CE_DEST_DESC_INFO_BYTE_SWAP_MASK 0x00004000 +#define CE_DEST_DESC_INFO_BYTE_SWAP_SHIFT 14 +#define CE_DEST_DESC_INFO_HOST_INT_DISABLE_MASK 0x00002000 +#define CE_DEST_DESC_INFO_HOST_INT_DISABLE_SHIFT 13 +#define CE_DEST_DESC_INFO_TARGET_INT_DISABLE_MASK 0x00001000 +#define CE_DEST_DESC_INFO_TARGET_INT_DISABLE_SHIFT 12 +#define CE_DEST_DESC_INFO_META_DATA_MASK 0x00000FFF +#define CE_DEST_DESC_INFO_META_DATA_SHIFT 0 +#else +#define CE_DEST_DESC_INFO_NBYTES_MASK 0x0000FFFF +#define CE_DEST_DESC_INFO_NBYTES_SHIFT 0 +#define CE_DEST_DESC_INFO_GATHER_MASK 0x00010000 +#define CE_DEST_DESC_INFO_GATHER_SHIFT 16 +#define CE_DEST_DESC_INFO_BYTE_SWAP_MASK 0x00020000 +#define CE_DEST_DESC_INFO_BYTE_SWAP_SHIFT 17 +#define CE_DEST_DESC_INFO_HOST_INT_DISABLE_MASK 0x00040000 +#define CE_DEST_DESC_INFO_HOST_INT_DISABLE_SHIFT 18 +#define CE_DEST_DESC_INFO_TARGET_INT_DISABLE_MASK 0x00080000 +#define CE_DEST_DESC_INFO_TARGET_INT_DISABLE_SHIFT 19 +#define CE_DEST_DESC_INFO_META_DATA_MASK 0xFFF00000 +#define CE_DEST_DESC_INFO_META_DATA_SHIFT 20 +#endif + +#define MY_TARGET_DEF IPQ4019_TARGETdef +#define MY_HOST_DEF IPQ4019_HOSTdef +#define MY_CEREG_DEF IPQ4019_CE_TARGETdef +#define MY_TARGET_BOARD_DATA_SZ IPQ4019_BOARD_DATA_SZ +#define MY_TARGET_BOARD_EXT_DATA_SZ IPQ4019_BOARD_EXT_DATA_SZ +#include "targetdef.h" +#include "hostdef.h" +qdf_export_symbol(IPQ4019_CE_TARGETdef); +#else +#include "common_drv.h" +#include "targetdef.h" +#include "hostdef.h" +struct targetdef_s *IPQ4019_TARGETdef; +struct hostdef_s *IPQ4019_HOSTdef; +#endif /* IPQ4019_HEADERS_DEF */ +qdf_export_symbol(IPQ4019_TARGETdef); +qdf_export_symbol(IPQ4019_HOSTdef); diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/mp_dev.c b/drivers/staging/qca-wifi-host-cmn/hif/src/mp_dev.c new file mode 100644 index 0000000000000000000000000000000000000000..c8db5d888cf85e5762c6d9ba9d75a7e871ea109e --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/mp_dev.c @@ -0,0 +1,321 @@ +/* + * Copyright (c) 2013-2014, 2016-2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "hif_io32.h" +#include "hif_debug.h" +#include "mp_dev.h" + +/*chaninfo*/ +#define CHANINFOMEM_S2_READ_MASK 0x00000008 +#define CHANINFO_CTRL_CAPTURE_CHAN_INFO_MASK 0x00000001 +#define CHANINFO_CTRL_CHANINFOMEM_BW_MASK 0x00000030 +#define MULTICHAIN_ENABLE_RX_CHAIN_MASK_MASK 0x00000007 + +/*agc*/ +#define GAINS_MIN_OFFSETS_CF_AGC_HIST_ENABLE_MASK 0x00040000 +#define GAINS_MIN_OFFSETS_CF_AGC_HIST_GC_MASK 0x00080000 +#define GAINS_MIN_OFFSETS_CF_AGC_HIST_VOTING_MASK 0x00100000 +#define GAINS_MIN_OFFSETS_CF_AGC_HIST_PHY_ERR_MASK 0x00200000 +#define AGC_HISTORY_DUMP_MASK (\ + GAINS_MIN_OFFSETS_CF_AGC_HIST_ENABLE_MASK| \ + GAINS_MIN_OFFSETS_CF_AGC_HIST_GC_MASK| \ + GAINS_MIN_OFFSETS_CF_AGC_HIST_VOTING_MASK| \ + GAINS_MIN_OFFSETS_CF_AGC_HIST_PHY_ERR_MASK \ + ) + +#define BB_chaninfo_ctrl 0x1a370 +#define BB_multichain_enable 0x1a2a0 +#define BB_chn_tables_intf_addr 0x19894 +#define BB_chn1_tables_intf_addr 0x1a894 +#define BB_chn_tables_intf_data 0x19898 +#define BB_chn1_tables_intf_data 0x1a898 +#define BB_gains_min_offsets 0x19e08 +#define BB_chaninfo_tab_b0 0x03200 +#define BB_chaninfo_tab_b1 0x03300 +#define BB_watchdog_status 0x1a7c0 +#define BB_watchdog_ctrl_1 0x1a7c4 +#define BB_watchdog_ctrl_2 0x1a7c8 +#define BB_watchdog_status_B 0x1a7e0 + + +#define PHY_BB_CHN_TABLES_INTF_ADDR 0x19894 +#define PHY_BB_CHN_TABLES_INTF_DATA 0x19898 + +#define PHY_BB_CHN1_TABLES_INTF_ADDR 0x1a894 +#define PHY_BB_CHN1_TABLES_INTF_DATA 0x1a898 + + +struct priv_ctrl_ctx { + uint32_t chaninfo_ctrl_orig; + uint32_t gain_min_offsets_orig; + uint32_t anyreg_start; + uint32_t anyreg_len; +}; + +static struct priv_ctrl_ctx g_priv_dump_ctx; + +static inline void set_target_reg_bits(void __iomem *mem, uint32_t reg, + uint32_t bitmask, uint32_t val) +{ + uint32_t value = hif_read32_mb(mem + (reg)); + uint32_t shift = 0; + + value &= ~(bitmask); + while (!((bitmask >> shift) & 0x01)) + shift++; + + value |= (((val) << shift) & (bitmask)); + hif_write32_mb(mem + (reg), value); +} + +static inline uint32_t get_target_reg_bits(void __iomem *mem, + uint32_t reg, uint32_t bitmask) +{ + uint32_t value = hif_read32_mb(mem + (reg)); + uint32_t shift = 0; + + while (!((bitmask >> shift) & 0x01)) + shift++; + + return (value >> shift) & bitmask; +} + +void priv_start_cap_chaninfo(struct hif_softc *scn) +{ + set_target_reg_bits(scn->mem, BB_chaninfo_ctrl, + CHANINFO_CTRL_CAPTURE_CHAN_INFO_MASK, 1); +} + +void priv_start_agc(struct hif_softc *scn) +{ + g_priv_dump_ctx.gain_min_offsets_orig = + hif_read32_mb(scn->mem + BB_gains_min_offsets); + set_target_reg_bits(scn->mem, BB_gains_min_offsets, + AGC_HISTORY_DUMP_MASK, + 0x0f); +} + +static void priv_stop_agc(struct hif_softc *scn) +{ + set_target_reg_bits(scn->mem, BB_gains_min_offsets, + AGC_HISTORY_DUMP_MASK, + 0); +} + +void priv_dump_chaninfo(struct hif_softc *scn) +{ + uint32_t bw, val; + uint32_t len, i, tmp; + uint32_t chain_mask; + uint32_t chain0, chain1; + + chain_mask = + get_target_reg_bits(scn->mem, BB_multichain_enable, + MULTICHAIN_ENABLE_RX_CHAIN_MASK_MASK); + chain0 = chain_mask & 1; + chain1 = chain_mask & 2; + + HIF_TRACE("%s: E", __func__); + bw = get_target_reg_bits(scn->mem, BB_chaninfo_ctrl, + CHANINFO_CTRL_CHANINFOMEM_BW_MASK); + + if (bw == 0) + len = 53; + else if (bw == 1) + len = 57; + else if (bw == 2) + len = 59 * 2 - 1; + else + len = 60 * 2 + 61 * 2; + + /* + * each tone is 16 bit valid, write to 32bit buffer each. + * bw==0(legacy20): 53 tones. + * bw==1(ht/vht20): 57 tones. + * bw==2(ht/vht40): 59+58 tones. + * bw==3(vht80): 60*2+61*2 tones. + */ + + if (chain0) { + hif_write32_mb(scn->mem + BB_chn_tables_intf_addr, + 0x80003200); + } + if (chain1) { + hif_write32_mb(scn->mem + BB_chn1_tables_intf_addr, + 0x80003200); + } + + set_target_reg_bits(scn->mem, BB_chaninfo_ctrl, + CHANINFOMEM_S2_READ_MASK, 0); + + if (chain0) { + if (bw < 2) { + len = (bw == 0) ? 53 : 57; + for (i = 0; i < len; i++) { + val = + hif_read32_mb(scn->mem + + BB_chn_tables_intf_data) & + 0x0000ffff; + qdf_debug("0x%x\t", val); + if (i % 4 == 0) + qdf_debug("\n"); + } + } else { + len = (bw == 2) ? 59 : 60; + for (i = 0; i < len; i++) { + tmp = + hif_read32_mb(scn->mem + + BB_chn_tables_intf_data); + qdf_debug("0x%x\t", ((tmp >> 16) & 0x0000ffff)); + qdf_debug("0x%x\t", (tmp & 0x0000ffff)); + if (i % 2 == 0) + qdf_debug("\n"); + } + if (bw > 2) { + /* bw == 3 for vht80 */ + hif_write32_mb(scn->mem + + BB_chn_tables_intf_addr, + 0x80003300); + len = 61; + for (i = 0; i < len; i++) { + tmp = + hif_read32_mb(scn->mem + + BB_chn_tables_intf_data); + qdf_debug("0x%x\t", + ((tmp >> 16) & 0x0000ffff)); + qdf_debug("0x%x\t", (tmp & 0x0000ffff)); + if (i % 2 == 0) + qdf_debug("\n"); + } + } + } + } + if (chain1) { + if (bw < 2) { + len = (bw == 0) ? 53 : 57; + for (i = 0; i < len; i++) { + val = + hif_read32_mb(scn->mem + + BB_chn1_tables_intf_data) & + 0x0000ffff; + qdf_debug("0x%x\t", val); + if (i % 4 == 0) + qdf_debug("\n"); + } + } else { + len = (bw == 2) ? 59 : 60; + for (i = 0; i < len; i++) { + tmp = + hif_read32_mb(scn->mem + + BB_chn1_tables_intf_data); + qdf_debug("0x%x\n", (tmp >> 16) & 0x0000ffff); + qdf_debug("0x%x\n", tmp & 0x0000ffff); + if (i % 2 == 0) + qdf_debug("\n"); + } + if (bw > 2) { + /* bw == 3 for vht80 */ + hif_write32_mb(scn->mem + + BB_chn1_tables_intf_addr, + 0x80003300); + len = 61; + for (i = 0; i < len; i++) { + tmp = + hif_read32_mb(scn->mem + + BB_chn1_tables_intf_data); + qdf_debug("0x%x\t", + ((tmp >> 16) & 0x0000ffff)); + qdf_debug("0x%x\t", (tmp & 0x0000ffff)); + if (i % 2 == 0) + qdf_debug("\n"); + } + } + } + } + HIF_TRACE("%s: X", __func__); +} + +void priv_dump_agc(struct hif_softc *scn) +{ + int i, len = 30; /* check this value for Rome and Peregrine */ + uint32_t chain0, chain1, chain_mask, val; + + if (Q_TARGET_ACCESS_BEGIN(scn) < 0) + return; + + chain_mask = + get_target_reg_bits(scn->mem, BB_multichain_enable, + MULTICHAIN_ENABLE_RX_CHAIN_MASK_MASK); + chain0 = chain_mask & 1; + chain1 = chain_mask & 2; + + len = len << 1; /* each agc item is 64bit, total*2 */ + priv_stop_agc(scn); + + set_target_reg_bits(scn->mem, BB_chaninfo_ctrl, + CHANINFOMEM_S2_READ_MASK, 0); + + HIF_TRACE("%s: AGC history buffer dump: E", __func__); + if (chain0) { + for (i = 0; i < len; i++) { + hif_write32_mb(scn->mem + + PHY_BB_CHN_TABLES_INTF_ADDR, + BB_chaninfo_tab_b0 + i * 4); + val = hif_read32_mb(scn->mem + + PHY_BB_CHN_TABLES_INTF_DATA); + qdf_debug("0x%x\t", val); + if (i % 4 == 0) + qdf_debug("\n"); + } + } + if (chain1) { + for (i = 0; i < len; i++) { + hif_write32_mb(scn->mem + + PHY_BB_CHN1_TABLES_INTF_ADDR, + BB_chaninfo_tab_b0 + i * 4); + val = hif_read32_mb(scn->mem + + PHY_BB_CHN1_TABLES_INTF_DATA); + qdf_debug("0x%x\t", val); + if (i % 4 == 0) + qdf_debug("\n"); + } + } + HIF_TRACE("%s: AGC history buffer dump X", __func__); + /* restore original value */ + hif_write32_mb(scn->mem + BB_gains_min_offsets, + g_priv_dump_ctx.gain_min_offsets_orig); + + Q_TARGET_ACCESS_END(scn); + +} + +void priv_dump_bbwatchdog(struct hif_softc *scn) +{ + uint32_t val; + + HIF_TRACE("%s: BB watchdog dump E", __func__); + val = hif_read32_mb(scn->mem + BB_watchdog_status); + qdf_debug("0x%x\t", val); + val = hif_read32_mb(scn->mem + BB_watchdog_ctrl_1); + qdf_debug("0x%x\t", val); + val = hif_read32_mb(scn->mem + BB_watchdog_ctrl_2); + qdf_debug("0x%x\t", val); + val = hif_read32_mb(scn->mem + BB_watchdog_status_B); + qdf_debug("0x%x", val); + HIF_TRACE("%s: BB watchdog dump X", __func__); +} diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/mp_dev.h b/drivers/staging/qca-wifi-host-cmn/hif/src/mp_dev.h new file mode 100644 index 0000000000000000000000000000000000000000..208c911b57c8c91e45787271731f032f8f294b4e --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/mp_dev.h @@ -0,0 +1,27 @@ +/* + * Copyright (c) 2015-2016 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef __MP_DEV_H__ +#define __MP_DEV_H__ +void priv_start_agc(struct hif_softc *scn); +void priv_dump_agc(struct hif_softc *scn); +void priv_start_cap_chaninfo(struct hif_softc *scn); +void priv_dump_chaninfo(struct hif_softc *scn); +void priv_dump_bbwatchdog(struct hif_softc *scn); +void hif_shutdown_device(struct hif_opaque_softc *scn); +#endif /* __MP_DEV_H__ */ diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/pcie/hif_io32_pci.h b/drivers/staging/qca-wifi-host-cmn/hif/src/pcie/hif_io32_pci.h new file mode 100644 index 0000000000000000000000000000000000000000..65f46d024833beb8152347a7c0e62497cf0dd4a5 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/pcie/hif_io32_pci.h @@ -0,0 +1,72 @@ +/* + * Copyright (c) 2015-2016 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef __HIF_IO32_PCI_H__ +#define __HIF_IO32_PCI_H__ + +#ifdef HIF_PCI + +#include "hif_main.h" +#include "regtable.h" +#include "ce_reg.h" +#include "qdf_atomic.h" +#include "if_pci.h" +/* + * For maximum performance and no power management, set this to 1. + * For power management at the cost of performance, set this to 0. + */ +#ifndef CONFIG_ATH_PCIE_MAX_PERF +#define CONFIG_ATH_PCIE_MAX_PERF 0 +#endif + +/* + * For keeping the target awake till the driver is + * loaded, set this to 1 + */ +#ifndef CONFIG_ATH_PCIE_AWAKE_WHILE_DRIVER_LOAD +#define CONFIG_ATH_PCIE_AWAKE_WHILE_DRIVER_LOAD 1 +#endif + +/* + * PCI-E L1 ASPPM sub-states + * To enable clock gating in L1 state, set this to 1. + * (less power, slightly more wakeup latency) + * To disable clock gating in L1 state, set this to 0. (slighly more power) + */ +#define CONFIG_PCIE_ENABLE_L1_CLOCK_GATE 1 + +/* + * PCIE_ACCESS_LOG_NUM specifies the number of + * read/write records to store + */ +#ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG +#define PCIE_ACCESS_LOG_NUM 500 +#endif + +/* 64-bit MSI support */ +#define CONFIG_PCIE_64BIT_MSI 0 + +/* BAR0 ready checking for AR6320v2 */ +#define PCIE_BAR0_READY_CHECKING 0 + +/* AXI gating when L1, L2 to reduce power consumption */ +#define CONFIG_PCIE_ENABLE_AXI_CLK_GATE 0 + +irqreturn_t hif_fw_interrupt_handler(int irq, void *arg); +#endif /* HIF_PCI */ +#endif /* __HIF_IO32_PCI_H__ */ diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/pcie/if_pci.c b/drivers/staging/qca-wifi-host-cmn/hif/src/pcie/if_pci.c new file mode 100644 index 0000000000000000000000000000000000000000..005813386b7cd970e901639a731a8c4419faa3ad --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/pcie/if_pci.c @@ -0,0 +1,4485 @@ +/* + * Copyright (c) 2013-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include +#include +#include +#ifdef CONFIG_PCI_MSM +#include +#endif +#include "hif_io32.h" +#include "if_pci.h" +#include "hif.h" +#include "target_type.h" +#include "hif_main.h" +#include "ce_main.h" +#include "ce_api.h" +#include "ce_internal.h" +#include "ce_reg.h" +#include "ce_bmi.h" +#include "regtable.h" +#include "hif_hw_version.h" +#include +#include +#include "qdf_status.h" +#include "qdf_atomic.h" +#include "pld_common.h" +#include "mp_dev.h" +#include "hif_debug.h" + +#include "if_pci_internal.h" +#include "ce_tasklet.h" +#include "targaddrs.h" +#include "hif_exec.h" + +#include "pci_api.h" +#include "ahb_api.h" + +/* Maximum ms timeout for host to wake up target */ +#define PCIE_WAKE_TIMEOUT 1000 +#define RAMDUMP_EVENT_TIMEOUT 2500 + +/* Setting SOC_GLOBAL_RESET during driver unload causes intermittent + * PCIe data bus error + * As workaround for this issue - changing the reset sequence to + * use TargetCPU warm reset * instead of SOC_GLOBAL_RESET + */ +#define CPU_WARM_RESET_WAR + +#ifdef CONFIG_WIN +extern int32_t frac, intval, ar900b_20_targ_clk, qca9888_20_targ_clk; +#endif + +/* + * Top-level interrupt handler for all PCI interrupts from a Target. + * When a block of MSI interrupts is allocated, this top-level handler + * is not used; instead, we directly call the correct sub-handler. + */ +struct ce_irq_reg_table { + uint32_t irq_enable; + uint32_t irq_status; +}; + +#ifndef QCA_WIFI_3_0_ADRASTEA +static inline void hif_pci_route_adrastea_interrupt(struct hif_pci_softc *sc) +{ +} +#else +void hif_pci_route_adrastea_interrupt(struct hif_pci_softc *sc) +{ + struct hif_softc *scn = HIF_GET_SOFTC(sc); + unsigned int target_enable0, target_enable1; + unsigned int target_cause0, target_cause1; + + target_enable0 = hif_read32_mb(sc->mem + Q6_ENABLE_REGISTER_0); + target_enable1 = hif_read32_mb(sc->mem + Q6_ENABLE_REGISTER_1); + target_cause0 = hif_read32_mb(sc->mem + Q6_CAUSE_REGISTER_0); + target_cause1 = hif_read32_mb(sc->mem + Q6_CAUSE_REGISTER_1); + + if ((target_enable0 & target_cause0) || + (target_enable1 & target_cause1)) { + hif_write32_mb(sc->mem + Q6_ENABLE_REGISTER_0, 0); + hif_write32_mb(sc->mem + Q6_ENABLE_REGISTER_1, 0); + + if (scn->notice_send) + pld_intr_notify_q6(sc->dev); + } +} +#endif + + +/** + * pci_dispatch_ce_irq() - pci_dispatch_ce_irq + * @scn: scn + * + * Return: N/A + */ +static void pci_dispatch_interrupt(struct hif_softc *scn) +{ + uint32_t intr_summary; + int id; + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); + + if (scn->hif_init_done != true) + return; + + if (Q_TARGET_ACCESS_BEGIN(scn) < 0) + return; + + intr_summary = CE_INTERRUPT_SUMMARY(scn); + + if (intr_summary == 0) { + if ((scn->target_status != TARGET_STATUS_RESET) && + (!qdf_atomic_read(&scn->link_suspended))) { + + hif_write32_mb(scn->mem + + (SOC_CORE_BASE_ADDRESS | + PCIE_INTR_ENABLE_ADDRESS), + HOST_GROUP0_MASK); + + hif_read32_mb(scn->mem + + (SOC_CORE_BASE_ADDRESS | + PCIE_INTR_ENABLE_ADDRESS)); + } + Q_TARGET_ACCESS_END(scn); + return; + } + Q_TARGET_ACCESS_END(scn); + + scn->ce_irq_summary = intr_summary; + for (id = 0; intr_summary && (id < scn->ce_count); id++) { + if (intr_summary & (1 << id)) { + intr_summary &= ~(1 << id); + ce_dispatch_interrupt(id, &hif_state->tasklets[id]); + } + } +} + +irqreturn_t hif_pci_legacy_ce_interrupt_handler(int irq, void *arg) +{ + struct hif_pci_softc *sc = (struct hif_pci_softc *)arg; + struct hif_softc *scn = HIF_GET_SOFTC(sc); + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(arg); + + volatile int tmp; + uint16_t val = 0; + uint32_t bar0 = 0; + uint32_t fw_indicator_address, fw_indicator; + bool ssr_irq = false; + unsigned int host_cause, host_enable; + + if (LEGACY_INTERRUPTS(sc)) { + if (Q_TARGET_ACCESS_BEGIN(scn) < 0) + return IRQ_HANDLED; + + if (ADRASTEA_BU) { + host_enable = hif_read32_mb(sc->mem + + PCIE_INTR_ENABLE_ADDRESS); + host_cause = hif_read32_mb(sc->mem + + PCIE_INTR_CAUSE_ADDRESS); + if (!(host_enable & host_cause)) { + hif_pci_route_adrastea_interrupt(sc); + return IRQ_HANDLED; + } + } + + /* Clear Legacy PCI line interrupts + * IMPORTANT: INTR_CLR regiser has to be set + * after INTR_ENABLE is set to 0, + * otherwise interrupt can not be really cleared + */ + hif_write32_mb(sc->mem + + (SOC_CORE_BASE_ADDRESS | + PCIE_INTR_ENABLE_ADDRESS), 0); + + hif_write32_mb(sc->mem + + (SOC_CORE_BASE_ADDRESS | PCIE_INTR_CLR_ADDRESS), + ADRASTEA_BU ? + (host_enable & host_cause) : + HOST_GROUP0_MASK); + + if (ADRASTEA_BU) + hif_write32_mb(sc->mem + 0x2f100c, (host_cause >> 1)); + + /* IMPORTANT: this extra read transaction is required to + * flush the posted write buffer + */ + if (!ADRASTEA_BU) { + tmp = + hif_read32_mb(sc->mem + + (SOC_CORE_BASE_ADDRESS | + PCIE_INTR_ENABLE_ADDRESS)); + + if (tmp == 0xdeadbeef) { + HIF_ERROR("BUG(%s): SoC returns 0xdeadbeef!!", + __func__); + + pci_read_config_word(sc->pdev, PCI_VENDOR_ID, &val); + HIF_ERROR("%s: PCI Vendor ID = 0x%04x", + __func__, val); + + pci_read_config_word(sc->pdev, PCI_DEVICE_ID, &val); + HIF_ERROR("%s: PCI Device ID = 0x%04x", + __func__, val); + + pci_read_config_word(sc->pdev, PCI_COMMAND, &val); + HIF_ERROR("%s: PCI Command = 0x%04x", __func__, + val); + + pci_read_config_word(sc->pdev, PCI_STATUS, &val); + HIF_ERROR("%s: PCI Status = 0x%04x", __func__, + val); + + pci_read_config_dword(sc->pdev, PCI_BASE_ADDRESS_0, + &bar0); + HIF_ERROR("%s: PCI BAR0 = 0x%08x", __func__, + bar0); + + HIF_ERROR("%s: RTC_STATE_ADDRESS = 0x%08x", + __func__, + hif_read32_mb(sc->mem + + PCIE_LOCAL_BASE_ADDRESS + + RTC_STATE_ADDRESS)); + HIF_ERROR("%s: PCIE_SOC_WAKE_ADDRESS = 0x%08x", + __func__, + hif_read32_mb(sc->mem + + PCIE_LOCAL_BASE_ADDRESS + + PCIE_SOC_WAKE_ADDRESS)); + HIF_ERROR("%s: 0x80008 = 0x%08x, 0x8000c = 0x%08x", + __func__, + hif_read32_mb(sc->mem + 0x80008), + hif_read32_mb(sc->mem + 0x8000c)); + HIF_ERROR("%s: 0x80010 = 0x%08x, 0x80014 = 0x%08x", + __func__, + hif_read32_mb(sc->mem + 0x80010), + hif_read32_mb(sc->mem + 0x80014)); + HIF_ERROR("%s: 0x80018 = 0x%08x, 0x8001c = 0x%08x", + __func__, + hif_read32_mb(sc->mem + 0x80018), + hif_read32_mb(sc->mem + 0x8001c)); + QDF_BUG(0); + } + + PCI_CLR_CAUSE0_REGISTER(sc); + } + + if (HAS_FW_INDICATOR) { + fw_indicator_address = hif_state->fw_indicator_address; + fw_indicator = A_TARGET_READ(scn, fw_indicator_address); + if ((fw_indicator != ~0) && + (fw_indicator & FW_IND_EVENT_PENDING)) + ssr_irq = true; + } + + if (Q_TARGET_ACCESS_END(scn) < 0) + return IRQ_HANDLED; + } + /* TBDXXX: Add support for WMAC */ + + if (ssr_irq) { + sc->irq_event = irq; + qdf_atomic_set(&scn->tasklet_from_intr, 1); + + qdf_atomic_inc(&scn->active_tasklet_cnt); + tasklet_schedule(&sc->intr_tq); + } else { + pci_dispatch_interrupt(scn); + } + + return IRQ_HANDLED; +} + +static irqreturn_t hif_pci_msi_fw_handler(int irq, void *arg) +{ + struct hif_pci_softc *sc = (struct hif_pci_softc *)arg; + + (irqreturn_t) hif_fw_interrupt_handler(sc->irq_event, arg); + + return IRQ_HANDLED; +} + +bool hif_pci_targ_is_present(struct hif_softc *scn, void *__iomem *mem) +{ + return 1; /* FIX THIS */ +} + +int hif_get_irq_num(struct hif_opaque_softc *scn, int *irq, uint32_t size) +{ + struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn); + int i = 0; + + if (!irq || !size) { + return -EINVAL; + } + + if (!sc->num_msi_intrs || sc->num_msi_intrs == 1) { + irq[0] = sc->irq; + return 1; + } + + if (sc->num_msi_intrs > size) { + qdf_print("Not enough space in irq buffer to return irqs\n"); + return -EINVAL; + } + + for (i = 0; i < sc->num_msi_intrs; i++) { + irq[i] = sc->irq + i + MSI_ASSIGN_CE_INITIAL; + } + + return sc->num_msi_intrs; +} + + +/** + * hif_pci_cancel_deferred_target_sleep() - cancels the defered target sleep + * @scn: hif_softc + * + * Return: void + */ +#if CONFIG_ATH_PCIE_MAX_PERF == 0 +void hif_pci_cancel_deferred_target_sleep(struct hif_softc *scn) +{ + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); + A_target_id_t pci_addr = scn->mem; + + qdf_spin_lock_irqsave(&hif_state->keep_awake_lock); + /* + * If the deferred sleep timer is running cancel it + * and put the soc into sleep. + */ + if (hif_state->fake_sleep == true) { + qdf_timer_stop(&hif_state->sleep_timer); + if (hif_state->verified_awake == false) { + hif_write32_mb(pci_addr + PCIE_LOCAL_BASE_ADDRESS + + PCIE_SOC_WAKE_ADDRESS, + PCIE_SOC_WAKE_RESET); + } + hif_state->fake_sleep = false; + } + qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock); +} +#else +inline void hif_pci_cancel_deferred_target_sleep(struct hif_softc *scn) +{ +} +#endif + +#define A_PCIE_LOCAL_REG_READ(mem, addr) \ + hif_read32_mb((char *)(mem) + \ + PCIE_LOCAL_BASE_ADDRESS + (uint32_t)(addr)) + +#define A_PCIE_LOCAL_REG_WRITE(mem, addr, val) \ + hif_write32_mb(((char *)(mem) + \ + PCIE_LOCAL_BASE_ADDRESS + (uint32_t)(addr)), (val)) + +#ifdef QCA_WIFI_3_0 +/** + * hif_targ_is_awake() - check to see if the target is awake + * @hif_ctx: hif context + * + * emulation never goes to sleep + * + * Return: true if target is awake + */ +static bool hif_targ_is_awake(struct hif_softc *hif_ctx, void *__iomem *mem) +{ + return true; +} +#else +/** + * hif_targ_is_awake() - check to see if the target is awake + * @hif_ctx: hif context + * + * Return: true if the targets clocks are on + */ +static bool hif_targ_is_awake(struct hif_softc *scn, void *__iomem *mem) +{ + uint32_t val; + + if (scn->recovery) + return false; + val = hif_read32_mb(mem + PCIE_LOCAL_BASE_ADDRESS + + RTC_STATE_ADDRESS); + return (RTC_STATE_V_GET(val) & RTC_STATE_V_ON) == RTC_STATE_V_ON; +} +#endif + +#define ATH_PCI_RESET_WAIT_MAX 10 /* Ms */ +static void hif_pci_device_reset(struct hif_pci_softc *sc) +{ + void __iomem *mem = sc->mem; + int i; + uint32_t val; + struct hif_softc *scn = HIF_GET_SOFTC(sc); + + if (!scn->hostdef) + return; + + /* NB: Don't check resetok here. This form of reset + * is integral to correct operation. + */ + + if (!SOC_GLOBAL_RESET_ADDRESS) + return; + + if (!mem) + return; + + HIF_ERROR("%s: Reset Device", __func__); + + /* + * NB: If we try to write SOC_GLOBAL_RESET_ADDRESS without first + * writing WAKE_V, the Target may scribble over Host memory! + */ + A_PCIE_LOCAL_REG_WRITE(mem, PCIE_SOC_WAKE_ADDRESS, + PCIE_SOC_WAKE_V_MASK); + for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) { + if (hif_targ_is_awake(scn, mem)) + break; + + qdf_mdelay(1); + } + + /* Put Target, including PCIe, into RESET. */ + val = A_PCIE_LOCAL_REG_READ(mem, SOC_GLOBAL_RESET_ADDRESS); + val |= 1; + A_PCIE_LOCAL_REG_WRITE(mem, SOC_GLOBAL_RESET_ADDRESS, val); + for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) { + if (A_PCIE_LOCAL_REG_READ(mem, RTC_STATE_ADDRESS) & + RTC_STATE_COLD_RESET_MASK) + break; + + qdf_mdelay(1); + } + + /* Pull Target, including PCIe, out of RESET. */ + val &= ~1; + A_PCIE_LOCAL_REG_WRITE(mem, SOC_GLOBAL_RESET_ADDRESS, val); + for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) { + if (! + (A_PCIE_LOCAL_REG_READ(mem, RTC_STATE_ADDRESS) & + RTC_STATE_COLD_RESET_MASK)) + break; + + qdf_mdelay(1); + } + + A_PCIE_LOCAL_REG_WRITE(mem, PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET); +} + +/* CPU warm reset function + * Steps: + * 1. Disable all pending interrupts - so no pending interrupts on WARM reset + * 2. Clear the FW_INDICATOR_ADDRESS -so Traget CPU initializes FW + * correctly on WARM reset + * 3. Clear TARGET CPU LF timer interrupt + * 4. Reset all CEs to clear any pending CE tarnsactions + * 5. Warm reset CPU + */ +static void hif_pci_device_warm_reset(struct hif_pci_softc *sc) +{ + void __iomem *mem = sc->mem; + int i; + uint32_t val; + uint32_t fw_indicator; + struct hif_softc *scn = HIF_GET_SOFTC(sc); + + /* NB: Don't check resetok here. This form of reset is + * integral to correct operation. + */ + + if (!mem) + return; + + HIF_INFO_MED("%s: Target Warm Reset", __func__); + + /* + * NB: If we try to write SOC_GLOBAL_RESET_ADDRESS without first + * writing WAKE_V, the Target may scribble over Host memory! + */ + A_PCIE_LOCAL_REG_WRITE(mem, PCIE_SOC_WAKE_ADDRESS, + PCIE_SOC_WAKE_V_MASK); + for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) { + if (hif_targ_is_awake(scn, mem)) + break; + qdf_mdelay(1); + } + + /* + * Disable Pending interrupts + */ + val = + hif_read32_mb(mem + + (SOC_CORE_BASE_ADDRESS | + PCIE_INTR_CAUSE_ADDRESS)); + HIF_INFO_MED("%s: Host Intr Cause reg 0x%x : value : 0x%x", __func__, + (SOC_CORE_BASE_ADDRESS | PCIE_INTR_CAUSE_ADDRESS), val); + /* Target CPU Intr Cause */ + val = hif_read32_mb(mem + (SOC_CORE_BASE_ADDRESS | CPU_INTR_ADDRESS)); + HIF_INFO_MED("%s: Target CPU Intr Cause 0x%x", __func__, val); + + val = + hif_read32_mb(mem + + (SOC_CORE_BASE_ADDRESS | + PCIE_INTR_ENABLE_ADDRESS)); + hif_write32_mb((mem + + (SOC_CORE_BASE_ADDRESS | PCIE_INTR_ENABLE_ADDRESS)), 0); + hif_write32_mb((mem + (SOC_CORE_BASE_ADDRESS + PCIE_INTR_CLR_ADDRESS)), + HOST_GROUP0_MASK); + + qdf_mdelay(100); + + /* Clear FW_INDICATOR_ADDRESS */ + if (HAS_FW_INDICATOR) { + fw_indicator = hif_read32_mb(mem + FW_INDICATOR_ADDRESS); + hif_write32_mb(mem + FW_INDICATOR_ADDRESS, 0); + } + + /* Clear Target LF Timer interrupts */ + val = + hif_read32_mb(mem + + (RTC_SOC_BASE_ADDRESS + + SOC_LF_TIMER_CONTROL0_ADDRESS)); + HIF_INFO_MED("%s: addr 0x%x : 0x%x", __func__, + (RTC_SOC_BASE_ADDRESS + SOC_LF_TIMER_CONTROL0_ADDRESS), val); + val &= ~SOC_LF_TIMER_CONTROL0_ENABLE_MASK; + hif_write32_mb(mem + + (RTC_SOC_BASE_ADDRESS + SOC_LF_TIMER_CONTROL0_ADDRESS), + val); + + /* Reset CE */ + val = + hif_read32_mb(mem + + (RTC_SOC_BASE_ADDRESS | + SOC_RESET_CONTROL_ADDRESS)); + val |= SOC_RESET_CONTROL_CE_RST_MASK; + hif_write32_mb((mem + + (RTC_SOC_BASE_ADDRESS | SOC_RESET_CONTROL_ADDRESS)), + val); + val = + hif_read32_mb(mem + + (RTC_SOC_BASE_ADDRESS | + SOC_RESET_CONTROL_ADDRESS)); + qdf_mdelay(10); + + /* CE unreset */ + val &= ~SOC_RESET_CONTROL_CE_RST_MASK; + hif_write32_mb(mem + (RTC_SOC_BASE_ADDRESS | SOC_RESET_CONTROL_ADDRESS), + val); + val = + hif_read32_mb(mem + + (RTC_SOC_BASE_ADDRESS | + SOC_RESET_CONTROL_ADDRESS)); + qdf_mdelay(10); + + /* Read Target CPU Intr Cause */ + val = hif_read32_mb(mem + (SOC_CORE_BASE_ADDRESS | CPU_INTR_ADDRESS)); + HIF_INFO_MED("%s: Target CPU Intr Cause after CE reset 0x%x", + __func__, val); + + /* CPU warm RESET */ + val = + hif_read32_mb(mem + + (RTC_SOC_BASE_ADDRESS | + SOC_RESET_CONTROL_ADDRESS)); + val |= SOC_RESET_CONTROL_CPU_WARM_RST_MASK; + hif_write32_mb(mem + (RTC_SOC_BASE_ADDRESS | SOC_RESET_CONTROL_ADDRESS), + val); + val = + hif_read32_mb(mem + + (RTC_SOC_BASE_ADDRESS | + SOC_RESET_CONTROL_ADDRESS)); + HIF_INFO_MED("%s: RESET_CONTROL after cpu warm reset 0x%x", + __func__, val); + + qdf_mdelay(100); + HIF_INFO_MED("%s: Target Warm reset complete", __func__); + +} + +#ifndef QCA_WIFI_3_0 +/* only applicable to legacy ce */ +int hif_check_fw_reg(struct hif_opaque_softc *hif_ctx) +{ + struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); + struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn); + void __iomem *mem = sc->mem; + uint32_t val; + + if (Q_TARGET_ACCESS_BEGIN(scn) < 0) + return ATH_ISR_NOSCHED; + val = hif_read32_mb(mem + FW_INDICATOR_ADDRESS); + if (Q_TARGET_ACCESS_END(scn) < 0) + return ATH_ISR_SCHED; + + HIF_INFO_MED("%s: FW_INDICATOR register is 0x%x", __func__, val); + + if (val & FW_IND_HELPER) + return 0; + + return 1; +} +#endif + +int hif_check_soc_status(struct hif_opaque_softc *hif_ctx) +{ + struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); + uint16_t device_id = 0; + uint32_t val; + uint16_t timeout_count = 0; + struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn); + + /* Check device ID from PCIe configuration space for link status */ + pci_read_config_word(sc->pdev, PCI_DEVICE_ID, &device_id); + if (device_id != sc->devid) { + HIF_ERROR("%s: device ID does match (read 0x%x, expect 0x%x)", + __func__, device_id, sc->devid); + return -EACCES; + } + + /* Check PCIe local register for bar/memory access */ + val = hif_read32_mb(sc->mem + PCIE_LOCAL_BASE_ADDRESS + + RTC_STATE_ADDRESS); + HIF_INFO_MED("%s: RTC_STATE_ADDRESS is %08x", __func__, val); + + /* Try to wake up taget if it sleeps */ + hif_write32_mb(sc->mem + PCIE_LOCAL_BASE_ADDRESS + + PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK); + HIF_INFO_MED("%s: PCIE_SOC_WAKE_ADDRESS is %08x", __func__, + hif_read32_mb(sc->mem + PCIE_LOCAL_BASE_ADDRESS + + PCIE_SOC_WAKE_ADDRESS)); + + /* Check if taget can be woken up */ + while (!hif_targ_is_awake(scn, sc->mem)) { + if (timeout_count >= PCIE_WAKE_TIMEOUT) { + HIF_ERROR("%s: wake up timeout, %08x, %08x", + __func__, + hif_read32_mb(sc->mem + + PCIE_LOCAL_BASE_ADDRESS + + RTC_STATE_ADDRESS), + hif_read32_mb(sc->mem + + PCIE_LOCAL_BASE_ADDRESS + + PCIE_SOC_WAKE_ADDRESS)); + return -EACCES; + } + + hif_write32_mb(sc->mem + PCIE_LOCAL_BASE_ADDRESS + + PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK); + + qdf_mdelay(100); + timeout_count += 100; + } + + /* Check Power register for SoC internal bus issues */ + val = + hif_read32_mb(sc->mem + RTC_SOC_BASE_ADDRESS + + SOC_POWER_REG_OFFSET); + HIF_INFO_MED("%s: Power register is %08x", __func__, val); + + return 0; +} + +/** + * __hif_pci_dump_registers(): dump other PCI debug registers + * @scn: struct hif_softc + * + * This function dumps pci debug registers. The parrent function + * dumps the copy engine registers before calling this function. + * + * Return: void + */ +static void __hif_pci_dump_registers(struct hif_softc *scn) +{ + struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn); + void __iomem *mem = sc->mem; + uint32_t val, i, j; + uint32_t wrapper_idx[] = { 1, 2, 3, 4, 5, 6, 7, 8, 9 }; + uint32_t ce_base; + + if (Q_TARGET_ACCESS_BEGIN(scn) < 0) + return; + + /* DEBUG_INPUT_SEL_SRC = 0x6 */ + val = + hif_read32_mb(mem + GPIO_BASE_ADDRESS + + WLAN_DEBUG_INPUT_SEL_OFFSET); + val &= ~WLAN_DEBUG_INPUT_SEL_SRC_MASK; + val |= WLAN_DEBUG_INPUT_SEL_SRC_SET(0x6); + hif_write32_mb(mem + GPIO_BASE_ADDRESS + WLAN_DEBUG_INPUT_SEL_OFFSET, + val); + + /* DEBUG_CONTROL_ENABLE = 0x1 */ + val = hif_read32_mb(mem + GPIO_BASE_ADDRESS + + WLAN_DEBUG_CONTROL_OFFSET); + val &= ~WLAN_DEBUG_CONTROL_ENABLE_MASK; + val |= WLAN_DEBUG_CONTROL_ENABLE_SET(0x1); + hif_write32_mb(mem + GPIO_BASE_ADDRESS + + WLAN_DEBUG_CONTROL_OFFSET, val); + + HIF_INFO_MED("%s: Debug: inputsel: %x dbgctrl: %x", __func__, + hif_read32_mb(mem + GPIO_BASE_ADDRESS + + WLAN_DEBUG_INPUT_SEL_OFFSET), + hif_read32_mb(mem + GPIO_BASE_ADDRESS + + WLAN_DEBUG_CONTROL_OFFSET)); + + HIF_INFO_MED("%s: Debug CE", __func__); + /* Loop CE debug output */ + /* AMBA_DEBUG_BUS_SEL = 0xc */ + val = hif_read32_mb(mem + GPIO_BASE_ADDRESS + AMBA_DEBUG_BUS_OFFSET); + val &= ~AMBA_DEBUG_BUS_SEL_MASK; + val |= AMBA_DEBUG_BUS_SEL_SET(0xc); + hif_write32_mb(mem + GPIO_BASE_ADDRESS + AMBA_DEBUG_BUS_OFFSET, val); + + for (i = 0; i < sizeof(wrapper_idx) / sizeof(uint32_t); i++) { + /* For (i=1,2,3,4,8,9) write CE_WRAPPER_DEBUG_SEL = i */ + val = hif_read32_mb(mem + CE_WRAPPER_BASE_ADDRESS + + CE_WRAPPER_DEBUG_OFFSET); + val &= ~CE_WRAPPER_DEBUG_SEL_MASK; + val |= CE_WRAPPER_DEBUG_SEL_SET(wrapper_idx[i]); + hif_write32_mb(mem + CE_WRAPPER_BASE_ADDRESS + + CE_WRAPPER_DEBUG_OFFSET, val); + + HIF_INFO_MED("%s: ce wrapper: %d amdbg: %x cewdbg: %x", + __func__, wrapper_idx[i], + hif_read32_mb(mem + GPIO_BASE_ADDRESS + + AMBA_DEBUG_BUS_OFFSET), + hif_read32_mb(mem + CE_WRAPPER_BASE_ADDRESS + + CE_WRAPPER_DEBUG_OFFSET)); + + if (wrapper_idx[i] <= 7) { + for (j = 0; j <= 5; j++) { + ce_base = CE_BASE_ADDRESS(wrapper_idx[i]); + /* For (j=0~5) write CE_DEBUG_SEL = j */ + val = + hif_read32_mb(mem + ce_base + + CE_DEBUG_OFFSET); + val &= ~CE_DEBUG_SEL_MASK; + val |= CE_DEBUG_SEL_SET(j); + hif_write32_mb(mem + ce_base + CE_DEBUG_OFFSET, + val); + + /* read (@gpio_athr_wlan_reg) + * WLAN_DEBUG_OUT_DATA + */ + val = hif_read32_mb(mem + GPIO_BASE_ADDRESS + + WLAN_DEBUG_OUT_OFFSET); + val = WLAN_DEBUG_OUT_DATA_GET(val); + + HIF_INFO_MED("%s: module%d: cedbg: %x out: %x", + __func__, j, + hif_read32_mb(mem + ce_base + + CE_DEBUG_OFFSET), val); + } + } else { + /* read (@gpio_athr_wlan_reg) WLAN_DEBUG_OUT_DATA */ + val = + hif_read32_mb(mem + GPIO_BASE_ADDRESS + + WLAN_DEBUG_OUT_OFFSET); + val = WLAN_DEBUG_OUT_DATA_GET(val); + + HIF_INFO_MED("%s: out: %x", __func__, val); + } + } + + HIF_INFO_MED("%s: Debug PCIe:", __func__); + /* Loop PCIe debug output */ + /* Write AMBA_DEBUG_BUS_SEL = 0x1c */ + val = hif_read32_mb(mem + GPIO_BASE_ADDRESS + AMBA_DEBUG_BUS_OFFSET); + val &= ~AMBA_DEBUG_BUS_SEL_MASK; + val |= AMBA_DEBUG_BUS_SEL_SET(0x1c); + hif_write32_mb(mem + GPIO_BASE_ADDRESS + AMBA_DEBUG_BUS_OFFSET, val); + + for (i = 0; i <= 8; i++) { + /* For (i=1~8) write AMBA_DEBUG_BUS_PCIE_DEBUG_SEL = i */ + val = + hif_read32_mb(mem + GPIO_BASE_ADDRESS + + AMBA_DEBUG_BUS_OFFSET); + val &= ~AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MASK; + val |= AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_SET(i); + hif_write32_mb(mem + GPIO_BASE_ADDRESS + AMBA_DEBUG_BUS_OFFSET, + val); + + /* read (@gpio_athr_wlan_reg) WLAN_DEBUG_OUT_DATA */ + val = + hif_read32_mb(mem + GPIO_BASE_ADDRESS + + WLAN_DEBUG_OUT_OFFSET); + val = WLAN_DEBUG_OUT_DATA_GET(val); + + HIF_INFO_MED("%s: amdbg: %x out: %x %x", __func__, + hif_read32_mb(mem + GPIO_BASE_ADDRESS + + WLAN_DEBUG_OUT_OFFSET), val, + hif_read32_mb(mem + GPIO_BASE_ADDRESS + + WLAN_DEBUG_OUT_OFFSET)); + } + + Q_TARGET_ACCESS_END(scn); +} + +/** + * hif_dump_registers(): dump bus debug registers + * @scn: struct hif_opaque_softc + * + * This function dumps hif bus debug registers + * + * Return: 0 for success or error code + */ +int hif_pci_dump_registers(struct hif_softc *hif_ctx) +{ + int status; + struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); + + status = hif_dump_ce_registers(scn); + + if (status) + HIF_ERROR("%s: Dump CE Registers Failed", __func__); + + /* dump non copy engine pci registers */ + __hif_pci_dump_registers(scn); + + return 0; +} + +/* + * Handler for a per-engine interrupt on a PARTICULAR CE. + * This is used in cases where each CE has a private + * MSI interrupt. + */ +static irqreturn_t ce_per_engine_handler(int irq, void *arg) +{ + int CE_id = irq - MSI_ASSIGN_CE_INITIAL; + + /* + * NOTE: We are able to derive CE_id from irq because we + * use a one-to-one mapping for CE's 0..5. + * CE's 6 & 7 do not use interrupts at all. + * + * This mapping must be kept in sync with the mapping + * used by firmware. + */ + + ce_per_engine_service(arg, CE_id); + + return IRQ_HANDLED; +} + +#ifdef HIF_CONFIG_SLUB_DEBUG_ON + +/* worker thread to schedule wlan_tasklet in SLUB debug build */ +static void reschedule_tasklet_work_handler(void *arg) +{ + struct hif_pci_softc *sc = arg; + struct hif_softc *scn = HIF_GET_SOFTC(sc); + + if (!scn) { + HIF_ERROR("%s: hif_softc is NULL\n", __func__); + return; + } + + if (scn->hif_init_done == false) { + HIF_ERROR("%s: wlan driver is unloaded", __func__); + return; + } + + tasklet_schedule(&sc->intr_tq); +} + +/** + * hif_init_reschedule_tasklet_work() - API to initialize reschedule tasklet + * work + * @sc: HIF PCI Context + * + * Return: void + */ +static void hif_init_reschedule_tasklet_work(struct hif_pci_softc *sc) +{ + qdf_create_work(0, &sc->reschedule_tasklet_work, + reschedule_tasklet_work_handler, NULL); +} +#else +static void hif_init_reschedule_tasklet_work(struct hif_pci_softc *sc) { } +#endif /* HIF_CONFIG_SLUB_DEBUG_ON */ + +void wlan_tasklet(unsigned long data) +{ + struct hif_pci_softc *sc = (struct hif_pci_softc *)data; + struct hif_softc *scn = HIF_GET_SOFTC(sc); + + if (scn->hif_init_done == false) + goto end; + + if (qdf_atomic_read(&scn->link_suspended)) + goto end; + + if (!ADRASTEA_BU) { + (irqreturn_t) hif_fw_interrupt_handler(sc->irq_event, scn); + if (scn->target_status == TARGET_STATUS_RESET) + goto end; + } + +end: + qdf_atomic_set(&scn->tasklet_from_intr, 0); + qdf_atomic_dec(&scn->active_tasklet_cnt); +} + +#ifdef FEATURE_RUNTIME_PM +static const char *hif_pm_runtime_state_to_string(uint32_t state) +{ + switch (state) { + case HIF_PM_RUNTIME_STATE_NONE: + return "INIT_STATE"; + case HIF_PM_RUNTIME_STATE_ON: + return "ON"; + case HIF_PM_RUNTIME_STATE_INPROGRESS: + return "INPROGRESS"; + case HIF_PM_RUNTIME_STATE_SUSPENDED: + return "SUSPENDED"; + default: + return "INVALID STATE"; + } +} + +#define HIF_PCI_RUNTIME_PM_STATS(_s, _sc, _name) \ + seq_printf(_s, "%30s: %u\n", #_name, _sc->pm_stats._name) +/** + * hif_pci_runtime_pm_warn() - Runtime PM Debugging API + * @sc: hif_pci_softc context + * @msg: log message + * + * log runtime pm stats when something seems off. + * + * Return: void + */ +static void hif_pci_runtime_pm_warn(struct hif_pci_softc *sc, const char *msg) +{ + struct hif_pm_runtime_lock *ctx; + + HIF_ERROR("%s: usage_count: %d, pm_state: %s, prevent_suspend_cnt: %d", + msg, atomic_read(&sc->dev->power.usage_count), + hif_pm_runtime_state_to_string( + atomic_read(&sc->pm_state)), + sc->prevent_suspend_cnt); + + HIF_ERROR("runtime_status: %d, runtime_error: %d, disable_depth: %d autosuspend_delay: %d", + sc->dev->power.runtime_status, + sc->dev->power.runtime_error, + sc->dev->power.disable_depth, + sc->dev->power.autosuspend_delay); + + HIF_ERROR("runtime_get: %u, runtime_put: %u, request_resume: %u", + sc->pm_stats.runtime_get, sc->pm_stats.runtime_put, + sc->pm_stats.request_resume); + + HIF_ERROR("allow_suspend: %u, prevent_suspend: %u", + sc->pm_stats.allow_suspend, + sc->pm_stats.prevent_suspend); + + HIF_ERROR("prevent_suspend_timeout: %u, allow_suspend_timeout: %u", + sc->pm_stats.prevent_suspend_timeout, + sc->pm_stats.allow_suspend_timeout); + + HIF_ERROR("Suspended: %u, resumed: %u count", + sc->pm_stats.suspended, + sc->pm_stats.resumed); + + HIF_ERROR("suspend_err: %u, runtime_get_err: %u", + sc->pm_stats.suspend_err, + sc->pm_stats.runtime_get_err); + + HIF_ERROR("Active Wakeup Sources preventing Runtime Suspend: "); + + list_for_each_entry(ctx, &sc->prevent_suspend_list, list) { + HIF_ERROR("source %s; timeout %d ms", ctx->name, ctx->timeout); + } + + WARN_ON(1); +} + +/** + * hif_pci_pm_runtime_debugfs_show(): show debug stats for runtimepm + * @s: file to print to + * @data: unused + * + * debugging tool added to the debug fs for displaying runtimepm stats + * + * Return: 0 + */ +static int hif_pci_pm_runtime_debugfs_show(struct seq_file *s, void *data) +{ + struct hif_pci_softc *sc = s->private; + static const char * const autopm_state[] = {"NONE", "ON", "INPROGRESS", + "SUSPENDED"}; + unsigned int msecs_age; + int pm_state = atomic_read(&sc->pm_state); + unsigned long timer_expires; + struct hif_pm_runtime_lock *ctx; + + seq_printf(s, "%30s: %s\n", "Runtime PM state", + autopm_state[pm_state]); + seq_printf(s, "%30s: %pf\n", "Last Resume Caller", + sc->pm_stats.last_resume_caller); + + if (pm_state == HIF_PM_RUNTIME_STATE_SUSPENDED) { + msecs_age = jiffies_to_msecs( + jiffies - sc->pm_stats.suspend_jiffies); + seq_printf(s, "%30s: %d.%03ds\n", "Suspended Since", + msecs_age / 1000, msecs_age % 1000); + } + + seq_printf(s, "%30s: %d\n", "PM Usage count", + atomic_read(&sc->dev->power.usage_count)); + + seq_printf(s, "%30s: %u\n", "prevent_suspend_cnt", + sc->prevent_suspend_cnt); + + HIF_PCI_RUNTIME_PM_STATS(s, sc, suspended); + HIF_PCI_RUNTIME_PM_STATS(s, sc, suspend_err); + HIF_PCI_RUNTIME_PM_STATS(s, sc, resumed); + HIF_PCI_RUNTIME_PM_STATS(s, sc, runtime_get); + HIF_PCI_RUNTIME_PM_STATS(s, sc, runtime_put); + HIF_PCI_RUNTIME_PM_STATS(s, sc, request_resume); + HIF_PCI_RUNTIME_PM_STATS(s, sc, prevent_suspend); + HIF_PCI_RUNTIME_PM_STATS(s, sc, allow_suspend); + HIF_PCI_RUNTIME_PM_STATS(s, sc, prevent_suspend_timeout); + HIF_PCI_RUNTIME_PM_STATS(s, sc, allow_suspend_timeout); + HIF_PCI_RUNTIME_PM_STATS(s, sc, runtime_get_err); + + timer_expires = sc->runtime_timer_expires; + if (timer_expires > 0) { + msecs_age = jiffies_to_msecs(timer_expires - jiffies); + seq_printf(s, "%30s: %d.%03ds\n", "Prevent suspend timeout", + msecs_age / 1000, msecs_age % 1000); + } + + spin_lock_bh(&sc->runtime_lock); + if (list_empty(&sc->prevent_suspend_list)) { + spin_unlock_bh(&sc->runtime_lock); + return 0; + } + + seq_printf(s, "%30s: ", "Active Wakeup_Sources"); + list_for_each_entry(ctx, &sc->prevent_suspend_list, list) { + seq_printf(s, "%s", ctx->name); + if (ctx->timeout) + seq_printf(s, "(%d ms)", ctx->timeout); + seq_puts(s, " "); + } + seq_puts(s, "\n"); + spin_unlock_bh(&sc->runtime_lock); + + return 0; +} +#undef HIF_PCI_RUNTIME_PM_STATS + +/** + * hif_pci_autopm_open() - open a debug fs file to access the runtime pm stats + * @inode + * @file + * + * Return: linux error code of single_open. + */ +static int hif_pci_runtime_pm_open(struct inode *inode, struct file *file) +{ + return single_open(file, hif_pci_pm_runtime_debugfs_show, + inode->i_private); +} + +static const struct file_operations hif_pci_runtime_pm_fops = { + .owner = THIS_MODULE, + .open = hif_pci_runtime_pm_open, + .release = single_release, + .read = seq_read, + .llseek = seq_lseek, +}; + +/** + * hif_runtime_pm_debugfs_create() - creates runtimepm debugfs entry + * @sc: pci context + * + * creates a debugfs entry to debug the runtime pm feature. + */ +static void hif_runtime_pm_debugfs_create(struct hif_pci_softc *sc) +{ + sc->pm_dentry = debugfs_create_file("cnss_runtime_pm", + 0400, NULL, sc, + &hif_pci_runtime_pm_fops); +} + +/** + * hif_runtime_pm_debugfs_remove() - removes runtimepm debugfs entry + * @sc: pci context + * + * removes the debugfs entry to debug the runtime pm feature. + */ +static void hif_runtime_pm_debugfs_remove(struct hif_pci_softc *sc) +{ + debugfs_remove(sc->pm_dentry); +} + +static void hif_runtime_init(struct device *dev, int delay) +{ + pm_runtime_set_autosuspend_delay(dev, delay); + pm_runtime_use_autosuspend(dev); + pm_runtime_allow(dev); + pm_runtime_mark_last_busy(dev); + pm_runtime_put_noidle(dev); + pm_suspend_ignore_children(dev, true); +} + +static void hif_runtime_exit(struct device *dev) +{ + pm_runtime_get_noresume(dev); + pm_runtime_set_active(dev); +} + +static void hif_pm_runtime_lock_timeout_fn(void *data); + +/** + * hif_pm_runtime_start(): start the runtime pm + * @sc: pci context + * + * After this call, runtime pm will be active. + */ +static void hif_pm_runtime_start(struct hif_pci_softc *sc) +{ + struct hif_softc *ol_sc = HIF_GET_SOFTC(sc); + uint32_t mode = hif_get_conparam(ol_sc); + + if (!ol_sc->hif_config.enable_runtime_pm) { + HIF_INFO("%s: RUNTIME PM is disabled in ini\n", __func__); + return; + } + + if (mode == QDF_GLOBAL_FTM_MODE || QDF_IS_EPPING_ENABLED(mode)) { + HIF_INFO("%s: RUNTIME PM is disabled for FTM/EPPING mode\n", + __func__); + return; + } + + qdf_timer_init(NULL, &sc->runtime_timer, + hif_pm_runtime_lock_timeout_fn, + sc, QDF_TIMER_TYPE_WAKE_APPS); + + HIF_INFO("%s: Enabling RUNTIME PM, Delay: %d ms", __func__, + ol_sc->hif_config.runtime_pm_delay); + + hif_runtime_init(sc->dev, ol_sc->hif_config.runtime_pm_delay); + qdf_atomic_set(&sc->pm_state, HIF_PM_RUNTIME_STATE_ON); + hif_runtime_pm_debugfs_create(sc); +} + +/** + * hif_pm_runtime_stop(): stop runtime pm + * @sc: pci context + * + * Turns off runtime pm and frees corresponding resources + * that were acquired by hif_runtime_pm_start(). + */ +static void hif_pm_runtime_stop(struct hif_pci_softc *sc) +{ + struct hif_softc *ol_sc = HIF_GET_SOFTC(sc); + uint32_t mode = hif_get_conparam(ol_sc); + + if (!ol_sc->hif_config.enable_runtime_pm) + return; + + if (mode == QDF_GLOBAL_FTM_MODE || QDF_IS_EPPING_ENABLED(mode)) + return; + + hif_runtime_exit(sc->dev); + hif_pm_runtime_resume(sc->dev); + + qdf_atomic_set(&sc->pm_state, HIF_PM_RUNTIME_STATE_NONE); + + hif_runtime_pm_debugfs_remove(sc); + qdf_timer_free(&sc->runtime_timer); + /* doesn't wait for penting trafic unlike cld-2.0 */ +} + +/** + * hif_pm_runtime_open(): initialize runtime pm + * @sc: pci data structure + * + * Early initialization + */ +static void hif_pm_runtime_open(struct hif_pci_softc *sc) +{ + spin_lock_init(&sc->runtime_lock); + + qdf_atomic_init(&sc->pm_state); + qdf_runtime_lock_init(&sc->prevent_linkdown_lock); + qdf_atomic_set(&sc->pm_state, HIF_PM_RUNTIME_STATE_NONE); + INIT_LIST_HEAD(&sc->prevent_suspend_list); +} + +/** + * hif_pm_runtime_sanitize_on_exit(): sanitize the pm usage count and state + * @sc: pci context + * + * Ensure we have only one vote against runtime suspend before closing + * the runtime suspend feature. + * + * all gets by the wlan driver should have been returned + * one vote should remain as part of cnss_runtime_exit + * + * needs to be revisited if we share the root complex. + */ +static void hif_pm_runtime_sanitize_on_exit(struct hif_pci_softc *sc) +{ + struct hif_pm_runtime_lock *ctx, *tmp; + + if (atomic_read(&sc->dev->power.usage_count) != 1) + hif_pci_runtime_pm_warn(sc, "Driver UnLoaded"); + else + return; + + spin_lock_bh(&sc->runtime_lock); + list_for_each_entry_safe(ctx, tmp, &sc->prevent_suspend_list, list) { + spin_unlock_bh(&sc->runtime_lock); + hif_runtime_lock_deinit(GET_HIF_OPAQUE_HDL(sc), ctx); + spin_lock_bh(&sc->runtime_lock); + } + spin_unlock_bh(&sc->runtime_lock); + + /* ensure 1 and only 1 usage count so that when the wlan + * driver is re-insmodded runtime pm won't be + * disabled also ensures runtime pm doesn't get + * broken on by being less than 1. + */ + if (atomic_read(&sc->dev->power.usage_count) <= 0) + atomic_set(&sc->dev->power.usage_count, 1); + while (atomic_read(&sc->dev->power.usage_count) > 1) + hif_pm_runtime_put_auto(sc->dev); +} + +static int __hif_pm_runtime_allow_suspend(struct hif_pci_softc *hif_sc, + struct hif_pm_runtime_lock *lock); + +/** + * hif_pm_runtime_sanitize_on_ssr_exit() - Empty the suspend list on SSR + * @sc: PCIe Context + * + * API is used to empty the runtime pm prevent suspend list. + * + * Return: void + */ +static void hif_pm_runtime_sanitize_on_ssr_exit(struct hif_pci_softc *sc) +{ + struct hif_pm_runtime_lock *ctx, *tmp; + + spin_lock_bh(&sc->runtime_lock); + list_for_each_entry_safe(ctx, tmp, &sc->prevent_suspend_list, list) { + __hif_pm_runtime_allow_suspend(sc, ctx); + } + spin_unlock_bh(&sc->runtime_lock); +} + +/** + * hif_pm_runtime_close(): close runtime pm + * @sc: pci bus handle + * + * ensure runtime_pm is stopped before closing the driver + */ +static void hif_pm_runtime_close(struct hif_pci_softc *sc) +{ + struct hif_softc *scn = HIF_GET_SOFTC(sc); + + qdf_runtime_lock_deinit(&sc->prevent_linkdown_lock); + if (qdf_atomic_read(&sc->pm_state) == HIF_PM_RUNTIME_STATE_NONE) + return; + + hif_pm_runtime_stop(sc); + + hif_is_recovery_in_progress(scn) ? + hif_pm_runtime_sanitize_on_ssr_exit(sc) : + hif_pm_runtime_sanitize_on_exit(sc); +} +#else +static void hif_pm_runtime_close(struct hif_pci_softc *sc) {} +static void hif_pm_runtime_open(struct hif_pci_softc *sc) {} +static void hif_pm_runtime_start(struct hif_pci_softc *sc) {} +static void hif_pm_runtime_stop(struct hif_pci_softc *sc) {} +#endif + +/** + * hif_disable_power_gating() - disable HW power gating + * @hif_ctx: hif context + * + * disables pcie L1 power states + */ +static void hif_disable_power_gating(struct hif_opaque_softc *hif_ctx) +{ + struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); + struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx); + + if (NULL == scn) { + HIF_ERROR("%s: Could not disable ASPM scn is null", + __func__); + return; + } + + /* Disable ASPM when pkt log is enabled */ + pci_read_config_dword(sc->pdev, 0x80, &sc->lcr_val); + pci_write_config_dword(sc->pdev, 0x80, (sc->lcr_val & 0xffffff00)); +} + +/** + * hif_enable_power_gating() - enable HW power gating + * @hif_ctx: hif context + * + * enables pcie L1 power states + */ +static void hif_enable_power_gating(struct hif_pci_softc *sc) +{ + if (NULL == sc) { + HIF_ERROR("%s: Could not disable ASPM scn is null", + __func__); + return; + } + + /* Re-enable ASPM after firmware/OTP download is complete */ + pci_write_config_dword(sc->pdev, 0x80, sc->lcr_val); +} + +/** + * hif_enable_power_management() - enable power management + * @hif_ctx: hif context + * + * Enables runtime pm, aspm(PCI.. hif_enable_power_gating) and re-enabling + * soc-sleep after driver load (hif_pci_target_sleep_state_adjust). + * + * note: epping mode does not call this function as it does not + * care about saving power. + */ +void hif_pci_enable_power_management(struct hif_softc *hif_sc, + bool is_packet_log_enabled) +{ + struct hif_pci_softc *pci_ctx = HIF_GET_PCI_SOFTC(hif_sc); + + if (pci_ctx == NULL) { + HIF_ERROR("%s, hif_ctx null", __func__); + return; + } + + hif_pm_runtime_start(pci_ctx); + + if (!is_packet_log_enabled) + hif_enable_power_gating(pci_ctx); + + if (!CONFIG_ATH_PCIE_MAX_PERF && + CONFIG_ATH_PCIE_AWAKE_WHILE_DRIVER_LOAD && + !ce_srng_based(hif_sc)) { + /* allow sleep for PCIE_AWAKE_WHILE_DRIVER_LOAD feature */ + if (hif_pci_target_sleep_state_adjust(hif_sc, true, false) < 0) + HIF_ERROR("%s, failed to set target to sleep", + __func__); + } +} + +/** + * hif_disable_power_management() - disable power management + * @hif_ctx: hif context + * + * Currently disables runtime pm. Should be updated to behave + * if runtime pm is not started. Should be updated to take care + * of aspm and soc sleep for driver load. + */ +void hif_pci_disable_power_management(struct hif_softc *hif_ctx) +{ + struct hif_pci_softc *pci_ctx = HIF_GET_PCI_SOFTC(hif_ctx); + + if (pci_ctx == NULL) { + HIF_ERROR("%s, hif_ctx null", __func__); + return; + } + + hif_pm_runtime_stop(pci_ctx); +} + +void hif_pci_display_stats(struct hif_softc *hif_ctx) +{ + struct hif_pci_softc *pci_ctx = HIF_GET_PCI_SOFTC(hif_ctx); + + if (pci_ctx == NULL) { + HIF_ERROR("%s, hif_ctx null", __func__); + return; + } + hif_display_ce_stats(&pci_ctx->ce_sc); +} + +void hif_pci_clear_stats(struct hif_softc *hif_ctx) +{ + struct hif_pci_softc *pci_ctx = HIF_GET_PCI_SOFTC(hif_ctx); + + if (pci_ctx == NULL) { + HIF_ERROR("%s, hif_ctx null", __func__); + return; + } + hif_clear_ce_stats(&pci_ctx->ce_sc); +} + +#define ATH_PCI_PROBE_RETRY_MAX 3 +/** + * hif_bus_open(): hif_bus_open + * @scn: scn + * @bus_type: bus type + * + * Return: n/a + */ +QDF_STATUS hif_pci_open(struct hif_softc *hif_ctx, enum qdf_bus_type bus_type) +{ + struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx); + + hif_ctx->bus_type = bus_type; + hif_pm_runtime_open(sc); + + qdf_spinlock_create(&sc->irq_lock); + + return hif_ce_open(hif_ctx); +} + +/** + * hif_wake_target_cpu() - wake the target's cpu + * @scn: hif context + * + * Send an interrupt to the device to wake up the Target CPU + * so it has an opportunity to notice any changed state. + */ +static void hif_wake_target_cpu(struct hif_softc *scn) +{ + QDF_STATUS rv; + uint32_t core_ctrl; + struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn); + + rv = hif_diag_read_access(hif_hdl, + SOC_CORE_BASE_ADDRESS | CORE_CTRL_ADDRESS, + &core_ctrl); + QDF_ASSERT(rv == QDF_STATUS_SUCCESS); + /* A_INUM_FIRMWARE interrupt to Target CPU */ + core_ctrl |= CORE_CTRL_CPU_INTR_MASK; + + rv = hif_diag_write_access(hif_hdl, + SOC_CORE_BASE_ADDRESS | CORE_CTRL_ADDRESS, + core_ctrl); + QDF_ASSERT(rv == QDF_STATUS_SUCCESS); +} + +/** + * soc_wake_reset() - allow the target to go to sleep + * @scn: hif_softc + * + * Clear the force wake register. This is done by + * hif_sleep_entry and cancel defered timer sleep. + */ +static void soc_wake_reset(struct hif_softc *scn) +{ + hif_write32_mb(scn->mem + + PCIE_LOCAL_BASE_ADDRESS + + PCIE_SOC_WAKE_ADDRESS, + PCIE_SOC_WAKE_RESET); +} + +/** + * hif_sleep_entry() - gate target sleep + * @arg: hif context + * + * This function is the callback for the sleep timer. + * Check if last force awake critical section was at least + * HIF_MIN_SLEEP_INACTIVITY_TIME_MS time ago. if it was, + * allow the target to go to sleep and cancel the sleep timer. + * otherwise reschedule the sleep timer. + */ +static void hif_sleep_entry(void *arg) +{ + struct HIF_CE_state *hif_state = (struct HIF_CE_state *)arg; + struct hif_softc *scn = HIF_GET_SOFTC(hif_state); + uint32_t idle_ms; + + if (scn->recovery) + return; + + if (hif_is_driver_unloading(scn)) + return; + + qdf_spin_lock_irqsave(&hif_state->keep_awake_lock); + if (hif_state->verified_awake == false) { + idle_ms = qdf_system_ticks_to_msecs(qdf_system_ticks() + - hif_state->sleep_ticks); + if (idle_ms >= HIF_MIN_SLEEP_INACTIVITY_TIME_MS) { + if (!qdf_atomic_read(&scn->link_suspended)) { + soc_wake_reset(scn); + hif_state->fake_sleep = false; + } + } else { + qdf_timer_stop(&hif_state->sleep_timer); + qdf_timer_start(&hif_state->sleep_timer, + HIF_SLEEP_INACTIVITY_TIMER_PERIOD_MS); + } + } else { + qdf_timer_stop(&hif_state->sleep_timer); + qdf_timer_start(&hif_state->sleep_timer, + HIF_SLEEP_INACTIVITY_TIMER_PERIOD_MS); + } + qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock); +} + +#define HIF_HIA_MAX_POLL_LOOP 1000000 +#define HIF_HIA_POLLING_DELAY_MS 10 + +#ifdef CONFIG_WIN +static void hif_set_hia_extnd(struct hif_softc *scn) +{ + struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn); + struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl); + uint32_t target_type = tgt_info->target_type; + + HIF_TRACE("%s: E", __func__); + + if ((target_type == TARGET_TYPE_AR900B) || + target_type == TARGET_TYPE_QCA9984 || + target_type == TARGET_TYPE_QCA9888) { + /* CHIP revision is 8-11 bits of the CHIP_ID register 0xec + * in RTC space + */ + tgt_info->target_revision + = CHIP_ID_REVISION_GET(hif_read32_mb(scn->mem + + CHIP_ID_ADDRESS)); + qdf_print(KERN_INFO"chip_id 0x%x chip_revision 0x%x\n", + target_type, tgt_info->target_revision); + } + + { + uint32_t flag2_value = 0; + uint32_t flag2_targ_addr = + host_interest_item_address(target_type, + offsetof(struct host_interest_s, hi_skip_clock_init)); + + if ((ar900b_20_targ_clk != -1) && + (frac != -1) && (intval != -1)) { + hif_diag_read_access(hif_hdl, flag2_targ_addr, + &flag2_value); + qdf_print("\n Setting clk_override\n"); + flag2_value |= CLOCK_OVERRIDE; + + hif_diag_write_access(hif_hdl, flag2_targ_addr, + flag2_value); + qdf_print("\n CLOCK PLL val set %d\n", flag2_value); + } else { + qdf_print(KERN_INFO"\n CLOCK PLL skipped\n"); + } + } + + if (target_type == TARGET_TYPE_AR900B + || target_type == TARGET_TYPE_QCA9984 + || target_type == TARGET_TYPE_QCA9888) { + + /* for AR9980_2.0, 300 mhz clock is used, right now we assume + * this would be supplied through module parameters, + * if not supplied assumed default or same behavior as 1.0. + * Assume 1.0 clock can't be tuned, reset to defaults + */ + + qdf_print(KERN_INFO + "%s: setting the target pll frac %x intval %x\n", + __func__, frac, intval); + + /* do not touch frac, and int val, let them be default -1, + * if desired, host can supply these through module params + */ + if (frac != -1 || intval != -1) { + uint32_t flag2_value = 0; + uint32_t flag2_targ_addr; + + flag2_targ_addr = + host_interest_item_address(target_type, + offsetof(struct host_interest_s, + hi_clock_info)); + hif_diag_read_access(hif_hdl, + flag2_targ_addr, &flag2_value); + qdf_print("\n ====> FRAC Val %x Address %x\n", frac, + flag2_value); + hif_diag_write_access(hif_hdl, flag2_value, frac); + qdf_print("\n INT Val %x Address %x\n", + intval, flag2_value + 4); + hif_diag_write_access(hif_hdl, + flag2_value + 4, intval); + } else { + qdf_print(KERN_INFO + "%s: no frac provided, skipping pre-configuring PLL\n", + __func__); + } + + /* for 2.0 write 300 mhz into hi_desired_cpu_speed_hz */ + if ((target_type == TARGET_TYPE_AR900B) + && (tgt_info->target_revision == AR900B_REV_2) + && ar900b_20_targ_clk != -1) { + uint32_t flag2_value = 0; + uint32_t flag2_targ_addr; + + flag2_targ_addr + = host_interest_item_address(target_type, + offsetof(struct host_interest_s, + hi_desired_cpu_speed_hz)); + hif_diag_read_access(hif_hdl, flag2_targ_addr, + &flag2_value); + qdf_print("\n ==> hi_desired_cpu_speed_hz Address %x\n", + flag2_value); + hif_diag_write_access(hif_hdl, flag2_value, + ar900b_20_targ_clk/*300000000u*/); + } else if (target_type == TARGET_TYPE_QCA9888) { + uint32_t flag2_targ_addr; + + if (200000000u != qca9888_20_targ_clk) { + qca9888_20_targ_clk = 300000000u; + /* Setting the target clock speed to 300 mhz */ + } + + flag2_targ_addr + = host_interest_item_address(target_type, + offsetof(struct host_interest_s, + hi_desired_cpu_speed_hz)); + hif_diag_write_access(hif_hdl, flag2_targ_addr, + qca9888_20_targ_clk); + } else { + qdf_print(KERN_INFO"%s: targ_clk is not provided, skipping pre-configuring PLL\n", + __func__); + } + } else { + if (frac != -1 || intval != -1) { + uint32_t flag2_value = 0; + uint32_t flag2_targ_addr = + host_interest_item_address(target_type, + offsetof(struct host_interest_s, + hi_clock_info)); + hif_diag_read_access(hif_hdl, flag2_targ_addr, + &flag2_value); + qdf_print("\n ====> FRAC Val %x Address %x\n", frac, + flag2_value); + hif_diag_write_access(hif_hdl, flag2_value, frac); + qdf_print("\n INT Val %x Address %x\n", intval, + flag2_value + 4); + hif_diag_write_access(hif_hdl, flag2_value + 4, + intval); + } + } +} + +#else + +static void hif_set_hia_extnd(struct hif_softc *scn) +{ +} + +#endif + +/** + * hif_set_hia() - fill out the host interest area + * @scn: hif context + * + * This is replaced by hif_wlan_enable for integrated targets. + * This fills out the host interest area. The firmware will + * process these memory addresses when it is first brought out + * of reset. + * + * Return: 0 for success. + */ +static int hif_set_hia(struct hif_softc *scn) +{ + QDF_STATUS rv; + uint32_t interconnect_targ_addr = 0; + uint32_t pcie_state_targ_addr = 0; + uint32_t pipe_cfg_targ_addr = 0; + uint32_t svc_to_pipe_map = 0; + uint32_t pcie_config_flags = 0; + uint32_t flag2_value = 0; + uint32_t flag2_targ_addr = 0; +#ifdef QCA_WIFI_3_0 + uint32_t host_interest_area = 0; + uint8_t i; +#else + uint32_t ealloc_value = 0; + uint32_t ealloc_targ_addr = 0; + uint8_t banks_switched = 1; + uint32_t chip_id; +#endif + uint32_t pipe_cfg_addr; + struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn); + struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl); + uint32_t target_type = tgt_info->target_type; + uint32_t target_ce_config_sz, target_service_to_ce_map_sz; + static struct CE_pipe_config *target_ce_config; + struct service_to_pipe *target_service_to_ce_map; + + HIF_TRACE("%s: E", __func__); + + hif_get_target_ce_config(scn, + &target_ce_config, &target_ce_config_sz, + &target_service_to_ce_map, + &target_service_to_ce_map_sz, + NULL, NULL); + + if (ADRASTEA_BU) + return QDF_STATUS_SUCCESS; + +#ifdef QCA_WIFI_3_0 + i = 0; + while (i < HIF_HIA_MAX_POLL_LOOP) { + host_interest_area = hif_read32_mb(scn->mem + + A_SOC_CORE_SCRATCH_0_ADDRESS); + if ((host_interest_area & 0x01) == 0) { + qdf_mdelay(HIF_HIA_POLLING_DELAY_MS); + host_interest_area = 0; + i++; + if (i > HIF_HIA_MAX_POLL_LOOP && (i % 1000 == 0)) + HIF_ERROR("%s: poll timeout(%d)", __func__, i); + } else { + host_interest_area &= (~0x01); + hif_write32_mb(scn->mem + 0x113014, 0); + break; + } + } + + if (i >= HIF_HIA_MAX_POLL_LOOP) { + HIF_ERROR("%s: hia polling timeout", __func__); + return -EIO; + } + + if (host_interest_area == 0) { + HIF_ERROR("%s: host_interest_area = 0", __func__); + return -EIO; + } + + interconnect_targ_addr = host_interest_area + + offsetof(struct host_interest_area_t, + hi_interconnect_state); + + flag2_targ_addr = host_interest_area + + offsetof(struct host_interest_area_t, hi_option_flag2); + +#else + interconnect_targ_addr = hif_hia_item_address(target_type, + offsetof(struct host_interest_s, hi_interconnect_state)); + ealloc_targ_addr = hif_hia_item_address(target_type, + offsetof(struct host_interest_s, hi_early_alloc)); + flag2_targ_addr = hif_hia_item_address(target_type, + offsetof(struct host_interest_s, hi_option_flag2)); +#endif + /* Supply Target-side CE configuration */ + rv = hif_diag_read_access(hif_hdl, interconnect_targ_addr, + &pcie_state_targ_addr); + if (rv != QDF_STATUS_SUCCESS) { + HIF_ERROR("%s: interconnect_targ_addr = 0x%0x, ret = %d", + __func__, interconnect_targ_addr, rv); + goto done; + } + if (pcie_state_targ_addr == 0) { + rv = QDF_STATUS_E_FAILURE; + HIF_ERROR("%s: pcie state addr is 0", __func__); + goto done; + } + pipe_cfg_addr = pcie_state_targ_addr + + offsetof(struct pcie_state_s, + pipe_cfg_addr); + rv = hif_diag_read_access(hif_hdl, + pipe_cfg_addr, + &pipe_cfg_targ_addr); + if (rv != QDF_STATUS_SUCCESS) { + HIF_ERROR("%s: pipe_cfg_addr = 0x%0x, ret = %d", + __func__, pipe_cfg_addr, rv); + goto done; + } + if (pipe_cfg_targ_addr == 0) { + rv = QDF_STATUS_E_FAILURE; + HIF_ERROR("%s: pipe cfg addr is 0", __func__); + goto done; + } + + rv = hif_diag_write_mem(hif_hdl, pipe_cfg_targ_addr, + (uint8_t *) target_ce_config, + target_ce_config_sz); + + if (rv != QDF_STATUS_SUCCESS) { + HIF_ERROR("%s: write pipe cfg (%d)", __func__, rv); + goto done; + } + + rv = hif_diag_read_access(hif_hdl, + pcie_state_targ_addr + + offsetof(struct pcie_state_s, + svc_to_pipe_map), + &svc_to_pipe_map); + if (rv != QDF_STATUS_SUCCESS) { + HIF_ERROR("%s: get svc/pipe map (%d)", __func__, rv); + goto done; + } + if (svc_to_pipe_map == 0) { + rv = QDF_STATUS_E_FAILURE; + HIF_ERROR("%s: svc_to_pipe map is 0", __func__); + goto done; + } + + rv = hif_diag_write_mem(hif_hdl, + svc_to_pipe_map, + (uint8_t *) target_service_to_ce_map, + target_service_to_ce_map_sz); + if (rv != QDF_STATUS_SUCCESS) { + HIF_ERROR("%s: write svc/pipe map (%d)", __func__, rv); + goto done; + } + + rv = hif_diag_read_access(hif_hdl, + pcie_state_targ_addr + + offsetof(struct pcie_state_s, + config_flags), + &pcie_config_flags); + if (rv != QDF_STATUS_SUCCESS) { + HIF_ERROR("%s: get pcie config_flags (%d)", __func__, rv); + goto done; + } +#if (CONFIG_PCIE_ENABLE_L1_CLOCK_GATE) + pcie_config_flags |= PCIE_CONFIG_FLAG_ENABLE_L1; +#else + pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1; +#endif /* CONFIG_PCIE_ENABLE_L1_CLOCK_GATE */ + pcie_config_flags |= PCIE_CONFIG_FLAG_CLK_SWITCH_WAIT; +#if (CONFIG_PCIE_ENABLE_AXI_CLK_GATE) + pcie_config_flags |= PCIE_CONFIG_FLAG_AXI_CLK_GATE; +#endif + rv = hif_diag_write_mem(hif_hdl, + pcie_state_targ_addr + + offsetof(struct pcie_state_s, + config_flags), + (uint8_t *) &pcie_config_flags, + sizeof(pcie_config_flags)); + if (rv != QDF_STATUS_SUCCESS) { + HIF_ERROR("%s: write pcie config_flags (%d)", __func__, rv); + goto done; + } + +#ifndef QCA_WIFI_3_0 + /* configure early allocation */ + ealloc_targ_addr = hif_hia_item_address(target_type, + offsetof( + struct host_interest_s, + hi_early_alloc)); + + rv = hif_diag_read_access(hif_hdl, ealloc_targ_addr, + &ealloc_value); + if (rv != QDF_STATUS_SUCCESS) { + HIF_ERROR("%s: get early alloc val (%d)", __func__, rv); + goto done; + } + + /* 1 bank is switched to IRAM, except ROME 1.0 */ + ealloc_value |= + ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) & + HI_EARLY_ALLOC_MAGIC_MASK); + + rv = hif_diag_read_access(hif_hdl, + CHIP_ID_ADDRESS | + RTC_SOC_BASE_ADDRESS, &chip_id); + if (rv != QDF_STATUS_SUCCESS) { + HIF_ERROR("%s: get chip id val (%d)", __func__, rv); + goto done; + } + if (CHIP_ID_VERSION_GET(chip_id) == 0xD) { + tgt_info->target_revision = CHIP_ID_REVISION_GET(chip_id); + switch (CHIP_ID_REVISION_GET(chip_id)) { + case 0x2: /* ROME 1.3 */ + /* 2 banks are switched to IRAM */ + banks_switched = 2; + break; + case 0x4: /* ROME 2.1 */ + case 0x5: /* ROME 2.2 */ + banks_switched = 6; + break; + case 0x8: /* ROME 3.0 */ + case 0x9: /* ROME 3.1 */ + case 0xA: /* ROME 3.2 */ + banks_switched = 9; + break; + case 0x0: /* ROME 1.0 */ + case 0x1: /* ROME 1.1 */ + default: + /* 3 banks are switched to IRAM */ + banks_switched = 3; + break; + } + } + + ealloc_value |= + ((banks_switched << HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) + & HI_EARLY_ALLOC_IRAM_BANKS_MASK); + + rv = hif_diag_write_access(hif_hdl, + ealloc_targ_addr, + ealloc_value); + if (rv != QDF_STATUS_SUCCESS) { + HIF_ERROR("%s: set early alloc val (%d)", __func__, rv); + goto done; + } +#endif + if ((target_type == TARGET_TYPE_AR900B) + || (target_type == TARGET_TYPE_QCA9984) + || (target_type == TARGET_TYPE_QCA9888) + || (target_type == TARGET_TYPE_AR9888)) { + hif_set_hia_extnd(scn); + } + + /* Tell Target to proceed with initialization */ + flag2_targ_addr = hif_hia_item_address(target_type, + offsetof( + struct host_interest_s, + hi_option_flag2)); + + rv = hif_diag_read_access(hif_hdl, flag2_targ_addr, + &flag2_value); + if (rv != QDF_STATUS_SUCCESS) { + HIF_ERROR("%s: get option val (%d)", __func__, rv); + goto done; + } + + flag2_value |= HI_OPTION_EARLY_CFG_DONE; + rv = hif_diag_write_access(hif_hdl, flag2_targ_addr, + flag2_value); + if (rv != QDF_STATUS_SUCCESS) { + HIF_ERROR("%s: set option val (%d)", __func__, rv); + goto done; + } + + hif_wake_target_cpu(scn); + +done: + + return rv; +} + +/** + * hif_bus_configure() - configure the pcie bus + * @hif_sc: pointer to the hif context. + * + * return: 0 for success. nonzero for failure. + */ +int hif_pci_bus_configure(struct hif_softc *hif_sc) +{ + int status = 0; + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc); + struct hif_opaque_softc *hif_osc = GET_HIF_OPAQUE_HDL(hif_sc); + + hif_ce_prepare_config(hif_sc); + + /* initialize sleep state adjust variables */ + hif_state->sleep_timer_init = true; + hif_state->keep_awake_count = 0; + hif_state->fake_sleep = false; + hif_state->sleep_ticks = 0; + + qdf_timer_init(NULL, &hif_state->sleep_timer, + hif_sleep_entry, (void *)hif_state, + QDF_TIMER_TYPE_WAKE_APPS); + hif_state->sleep_timer_init = true; + + status = hif_wlan_enable(hif_sc); + if (status) { + HIF_ERROR("%s: hif_wlan_enable error = %d", + __func__, status); + goto timer_free; + } + + A_TARGET_ACCESS_LIKELY(hif_sc); + + if ((CONFIG_ATH_PCIE_MAX_PERF || + CONFIG_ATH_PCIE_AWAKE_WHILE_DRIVER_LOAD) && + !ce_srng_based(hif_sc)) { + /* + * prevent sleep for PCIE_AWAKE_WHILE_DRIVER_LOAD feature + * prevent sleep when we want to keep firmware always awake + * note: when we want to keep firmware always awake, + * hif_target_sleep_state_adjust will point to a dummy + * function, and hif_pci_target_sleep_state_adjust must + * be called instead. + * note: bus type check is here because AHB bus is reusing + * hif_pci_bus_configure code. + */ + if (hif_sc->bus_type == QDF_BUS_TYPE_PCI) { + if (hif_pci_target_sleep_state_adjust(hif_sc, + false, true) < 0) { + status = -EACCES; + goto disable_wlan; + } + } + } + + /* todo: consider replacing this with an srng field */ + if ((hif_sc->target_info.target_type == TARGET_TYPE_QCA8074) && + (hif_sc->bus_type == QDF_BUS_TYPE_AHB)) { + hif_sc->per_ce_irq = true; + } + + status = hif_config_ce(hif_sc); + if (status) + goto disable_wlan; + + /* QCA_WIFI_QCA8074_VP:Should not be executed on 8074 VP platform */ + if (hif_needs_bmi(hif_osc)) { + status = hif_set_hia(hif_sc); + if (status) + goto unconfig_ce; + + HIF_INFO_MED("%s: hif_set_hia done", __func__); + + hif_register_bmi_callbacks(hif_sc); + } + + if ((hif_sc->target_info.target_type == TARGET_TYPE_QCA8074) && + (hif_sc->bus_type == QDF_BUS_TYPE_PCI)) + HIF_INFO_MED("%s: Skip irq config for PCI based 8074 target", + __func__); + else { + status = hif_configure_irq(hif_sc); + if (status < 0) + goto unconfig_ce; + } + + A_TARGET_ACCESS_UNLIKELY(hif_sc); + + return status; + +unconfig_ce: + hif_unconfig_ce(hif_sc); +disable_wlan: + A_TARGET_ACCESS_UNLIKELY(hif_sc); + hif_wlan_disable(hif_sc); + +timer_free: + qdf_timer_stop(&hif_state->sleep_timer); + qdf_timer_free(&hif_state->sleep_timer); + hif_state->sleep_timer_init = false; + + HIF_ERROR("%s: failed, status = %d", __func__, status); + return status; +} + +/** + * hif_bus_close(): hif_bus_close + * + * Return: n/a + */ +void hif_pci_close(struct hif_softc *hif_sc) +{ + struct hif_pci_softc *hif_pci_sc = HIF_GET_PCI_SOFTC(hif_sc); + + hif_pm_runtime_close(hif_pci_sc); + hif_ce_close(hif_sc); +} + +#define BAR_NUM 0 + +#ifndef CONFIG_PLD_PCIE_INIT +static int hif_enable_pci(struct hif_pci_softc *sc, + struct pci_dev *pdev, + const struct pci_device_id *id) +{ + void __iomem *mem; + int ret = 0; + uint16_t device_id = 0; + struct hif_softc *ol_sc = HIF_GET_SOFTC(sc); + + pci_read_config_word(pdev, PCI_DEVICE_ID, &device_id); + if (device_id != id->device) { + HIF_ERROR( + "%s: dev id mismatch, config id = 0x%x, probing id = 0x%x", + __func__, device_id, id->device); + /* pci link is down, so returing with error code */ + return -EIO; + } + + /* FIXME: temp. commenting out assign_resource + * call for dev_attach to work on 2.6.38 kernel + */ +#if (!defined(__LINUX_ARM_ARCH__)) + if (pci_assign_resource(pdev, BAR_NUM)) { + HIF_ERROR("%s: pci_assign_resource error", __func__); + return -EIO; + } +#endif + if (pci_enable_device(pdev)) { + HIF_ERROR("%s: pci_enable_device error", + __func__); + return -EIO; + } + + /* Request MMIO resources */ + ret = pci_request_region(pdev, BAR_NUM, "ath"); + if (ret) { + HIF_ERROR("%s: PCI MMIO reservation error", __func__); + ret = -EIO; + goto err_region; + } + +#ifdef CONFIG_ARM_LPAE + /* if CONFIG_ARM_LPAE is enabled, we have to set 64 bits mask + * for 32 bits device also. + */ + ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); + if (ret) { + HIF_ERROR("%s: Cannot enable 64-bit pci DMA", __func__); + goto err_dma; + } + ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); + if (ret) { + HIF_ERROR("%s: Cannot enable 64-bit DMA", __func__); + goto err_dma; + } +#else + ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (ret) { + HIF_ERROR("%s: Cannot enable 32-bit pci DMA", __func__); + goto err_dma; + } + ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); + if (ret) { + HIF_ERROR("%s: Cannot enable 32-bit consistent DMA!", + __func__); + goto err_dma; + } +#endif + + PCI_CFG_TO_DISABLE_L1SS_STATES(pdev, 0x188); + + /* Set bus master bit in PCI_COMMAND to enable DMA */ + pci_set_master(pdev); + + /* Arrange for access to Target SoC registers. */ + mem = pci_iomap(pdev, BAR_NUM, 0); + if (!mem) { + HIF_ERROR("%s: PCI iomap error", __func__); + ret = -EIO; + goto err_iomap; + } + + pr_err("*****BAR is %pK\n", mem); + + sc->mem = mem; + + HIF_INFO("%s, mem after pci_iomap:%pK\n", + __func__, sc->mem); + + /* Hawkeye emulation specific change */ + if ((device_id == RUMIM2M_DEVICE_ID_NODE0) || + (device_id == RUMIM2M_DEVICE_ID_NODE1) || + (device_id == RUMIM2M_DEVICE_ID_NODE2) || + (device_id == RUMIM2M_DEVICE_ID_NODE3)) { + mem = mem + 0x0c000000; + sc->mem = mem; + HIF_INFO("%s: Changing PCI mem base to %pK\n", + __func__, sc->mem); + } + + sc->mem_len = pci_resource_len(pdev, BAR_NUM); + ol_sc->mem = mem; + ol_sc->mem_pa = pci_resource_start(pdev, BAR_NUM); + sc->pci_enabled = true; + return ret; + +err_iomap: + pci_clear_master(pdev); +err_dma: + pci_release_region(pdev, BAR_NUM); +err_region: + pci_disable_device(pdev); + return ret; +} +#else +static int hif_enable_pci(struct hif_pci_softc *sc, + struct pci_dev *pdev, + const struct pci_device_id *id) +{ + PCI_CFG_TO_DISABLE_L1SS_STATES(pdev, 0x188); + sc->pci_enabled = true; + return 0; +} +#endif + + +#ifndef CONFIG_PLD_PCIE_INIT +static inline void hif_pci_deinit(struct hif_pci_softc *sc) +{ + pci_iounmap(sc->pdev, sc->mem); + pci_clear_master(sc->pdev); + pci_release_region(sc->pdev, BAR_NUM); + pci_disable_device(sc->pdev); +} +#else +static inline void hif_pci_deinit(struct hif_pci_softc *sc) {} +#endif + +static void hif_disable_pci(struct hif_pci_softc *sc) +{ + struct hif_softc *ol_sc = HIF_GET_SOFTC(sc); + + if (ol_sc == NULL) { + HIF_ERROR("%s: ol_sc = NULL", __func__); + return; + } + hif_pci_device_reset(sc); + + hif_pci_deinit(sc); + + sc->mem = NULL; + ol_sc->mem = NULL; +} + +static int hif_pci_probe_tgt_wakeup(struct hif_pci_softc *sc) +{ + int ret = 0; + int targ_awake_limit = 500; +#ifndef QCA_WIFI_3_0 + uint32_t fw_indicator; +#endif + struct hif_softc *scn = HIF_GET_SOFTC(sc); + + /* + * Verify that the Target was started cleanly.* + * The case where this is most likely is with an AUX-powered + * Target and a Host in WoW mode. If the Host crashes, + * loses power, or is restarted (without unloading the driver) + * then the Target is left (aux) powered and running. On a + * subsequent driver load, the Target is in an unexpected state. + * We try to catch that here in order to reset the Target and + * retry the probe. + */ + hif_write32_mb(sc->mem + PCIE_LOCAL_BASE_ADDRESS + + PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK); + while (!hif_targ_is_awake(scn, sc->mem)) { + if (0 == targ_awake_limit) { + HIF_ERROR("%s: target awake timeout", __func__); + ret = -EAGAIN; + goto end; + } + qdf_mdelay(1); + targ_awake_limit--; + } + +#if PCIE_BAR0_READY_CHECKING + { + int wait_limit = 200; + /* Synchronization point: wait the BAR0 is configured */ + while (wait_limit-- && + !(hif_read32_mb(sc->mem + + PCIE_LOCAL_BASE_ADDRESS + + PCIE_SOC_RDY_STATUS_ADDRESS) + & PCIE_SOC_RDY_STATUS_BAR_MASK)) { + qdf_mdelay(10); + } + if (wait_limit < 0) { + /* AR6320v1 doesn't support checking of BAR0 + * configuration, takes one sec to wait BAR0 ready + */ + HIF_INFO_MED("%s: AR6320v1 waits two sec for BAR0", + __func__); + } + } +#endif + +#ifndef QCA_WIFI_3_0 + fw_indicator = hif_read32_mb(sc->mem + FW_INDICATOR_ADDRESS); + hif_write32_mb(sc->mem + PCIE_LOCAL_BASE_ADDRESS + + PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET); + + if (fw_indicator & FW_IND_INITIALIZED) { + HIF_ERROR("%s: Target is in an unknown state. EAGAIN", + __func__); + ret = -EAGAIN; + goto end; + } +#endif + +end: + return ret; +} + +static void wlan_tasklet_msi(unsigned long data) +{ + struct hif_tasklet_entry *entry = (struct hif_tasklet_entry *)data; + struct hif_pci_softc *sc = (struct hif_pci_softc *) entry->hif_handler; + struct hif_softc *scn = HIF_GET_SOFTC(sc); + + if (scn->hif_init_done == false) + goto irq_handled; + + if (qdf_atomic_read(&scn->link_suspended)) + goto irq_handled; + + qdf_atomic_inc(&scn->active_tasklet_cnt); + + if (entry->id == HIF_MAX_TASKLET_NUM) { + /* the last tasklet is for fw IRQ */ + (irqreturn_t)hif_fw_interrupt_handler(sc->irq_event, scn); + if (scn->target_status == TARGET_STATUS_RESET) + goto irq_handled; + } else if (entry->id < scn->ce_count) { + ce_per_engine_service(scn, entry->id); + } else { + HIF_ERROR("%s: ERROR - invalid CE_id = %d", + __func__, entry->id); + } + return; + +irq_handled: + qdf_atomic_dec(&scn->active_tasklet_cnt); + +} + +/* deprecated */ +static int hif_configure_msi(struct hif_pci_softc *sc) +{ + int ret = 0; + int num_msi_desired; + int rv = -1; + struct hif_softc *scn = HIF_GET_SOFTC(sc); + + HIF_TRACE("%s: E", __func__); + + num_msi_desired = MSI_NUM_REQUEST; /* Multiple MSI */ + if (num_msi_desired < 1) { + HIF_ERROR("%s: MSI is not configured", __func__); + return -EINVAL; + } + + if (num_msi_desired > 1) { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)) + rv = pci_enable_msi_range(sc->pdev, num_msi_desired, + num_msi_desired); +#else + rv = pci_enable_msi_block(sc->pdev, num_msi_desired); +#endif + } + HIF_TRACE("%s: num_msi_desired = %d, available_msi = %d", + __func__, num_msi_desired, rv); + + if (rv == 0 || rv >= HIF_MAX_TASKLET_NUM) { + int i; + + sc->num_msi_intrs = HIF_MAX_TASKLET_NUM; + sc->tasklet_entries[HIF_MAX_TASKLET_NUM-1].hif_handler = + (void *)sc; + sc->tasklet_entries[HIF_MAX_TASKLET_NUM-1].id = + HIF_MAX_TASKLET_NUM; + tasklet_init(&sc->intr_tq, wlan_tasklet_msi, + (unsigned long)&sc->tasklet_entries[ + HIF_MAX_TASKLET_NUM-1]); + ret = request_irq(sc->pdev->irq + MSI_ASSIGN_FW, + hif_pci_msi_fw_handler, + IRQF_SHARED, "wlan_pci", sc); + if (ret) { + HIF_ERROR("%s: request_irq failed", __func__); + goto err_intr; + } + for (i = 0; i <= scn->ce_count; i++) { + sc->tasklet_entries[i].hif_handler = (void *)sc; + sc->tasklet_entries[i].id = i; + tasklet_init(&sc->intr_tq, wlan_tasklet_msi, + (unsigned long)&sc->tasklet_entries[i]); + ret = request_irq((sc->pdev->irq + + i + MSI_ASSIGN_CE_INITIAL), + ce_per_engine_handler, IRQF_SHARED, + "wlan_pci", sc); + if (ret) { + HIF_ERROR("%s: request_irq failed", __func__); + goto err_intr; + } + } + } else if (rv > 0) { + HIF_TRACE("%s: use single msi", __func__); + + ret = pci_enable_msi(sc->pdev); + if (ret < 0) { + HIF_ERROR("%s: single MSI allocation failed", + __func__); + /* Try for legacy PCI line interrupts */ + sc->num_msi_intrs = 0; + } else { + sc->num_msi_intrs = 1; + tasklet_init(&sc->intr_tq, + wlan_tasklet, (unsigned long)sc); + ret = request_irq(sc->pdev->irq, + hif_pci_legacy_ce_interrupt_handler, + IRQF_SHARED, "wlan_pci", sc); + if (ret) { + HIF_ERROR("%s: request_irq failed", __func__); + goto err_intr; + } + } + } else { + sc->num_msi_intrs = 0; + ret = -EIO; + HIF_ERROR("%s: do not support MSI, rv = %d", __func__, rv); + } + ret = pci_enable_msi(sc->pdev); + if (ret < 0) { + HIF_ERROR("%s: single MSI interrupt allocation failed", + __func__); + /* Try for legacy PCI line interrupts */ + sc->num_msi_intrs = 0; + } else { + sc->num_msi_intrs = 1; + tasklet_init(&sc->intr_tq, wlan_tasklet, (unsigned long)sc); + ret = request_irq(sc->pdev->irq, + hif_pci_legacy_ce_interrupt_handler, + IRQF_SHARED, "wlan_pci", sc); + if (ret) { + HIF_ERROR("%s: request_irq failed", __func__); + goto err_intr; + } + } + + if (ret == 0) { + hif_write32_mb(sc->mem+(SOC_CORE_BASE_ADDRESS | + PCIE_INTR_ENABLE_ADDRESS), + HOST_GROUP0_MASK); + hif_write32_mb(sc->mem + + PCIE_LOCAL_BASE_ADDRESS + PCIE_SOC_WAKE_ADDRESS, + PCIE_SOC_WAKE_RESET); + } + HIF_TRACE("%s: X, ret = %d", __func__, ret); + + return ret; + +err_intr: + if (sc->num_msi_intrs >= 1) + pci_disable_msi(sc->pdev); + return ret; +} + +static int hif_pci_configure_legacy_irq(struct hif_pci_softc *sc) +{ + int ret = 0; + struct hif_softc *scn = HIF_GET_SOFTC(sc); + uint32_t target_type = scn->target_info.target_type; + + HIF_TRACE("%s: E", __func__); + + /* do notn support MSI or MSI IRQ failed */ + tasklet_init(&sc->intr_tq, wlan_tasklet, (unsigned long)sc); + ret = request_irq(sc->pdev->irq, + hif_pci_legacy_ce_interrupt_handler, IRQF_SHARED, + "wlan_pci", sc); + if (ret) { + HIF_ERROR("%s: request_irq failed, ret = %d", __func__, ret); + goto end; + } + scn->wake_irq = sc->pdev->irq; + /* Use sc->irq instead of sc->pdev-irq + * platform_device pdev doesn't have an irq field + */ + sc->irq = sc->pdev->irq; + /* Use Legacy PCI Interrupts */ + hif_write32_mb(sc->mem+(SOC_CORE_BASE_ADDRESS | + PCIE_INTR_ENABLE_ADDRESS), + HOST_GROUP0_MASK); + hif_read32_mb(sc->mem+(SOC_CORE_BASE_ADDRESS | + PCIE_INTR_ENABLE_ADDRESS)); + hif_write32_mb(sc->mem + PCIE_LOCAL_BASE_ADDRESS + + PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET); + + if ((target_type == TARGET_TYPE_IPQ4019) || + (target_type == TARGET_TYPE_AR900B) || + (target_type == TARGET_TYPE_QCA9984) || + (target_type == TARGET_TYPE_AR9888) || + (target_type == TARGET_TYPE_QCA9888) || + (target_type == TARGET_TYPE_AR6320V1) || + (target_type == TARGET_TYPE_AR6320V2) || + (target_type == TARGET_TYPE_AR6320V3)) { + hif_write32_mb(scn->mem + PCIE_LOCAL_BASE_ADDRESS + + PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK); + } +end: + QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_ERROR, + "%s: X, ret = %d", __func__, ret); + return ret; +} + +static int hif_ce_srng_msi_free_irq(struct hif_softc *scn) +{ + int ret; + int ce_id, irq; + uint32_t msi_data_start; + uint32_t msi_data_count; + uint32_t msi_irq_start; + struct HIF_CE_state *ce_sc = HIF_GET_CE_STATE(scn); + + ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE", + &msi_data_count, &msi_data_start, + &msi_irq_start); + if (ret) + return ret; + + /* needs to match the ce_id -> irq data mapping + * used in the srng parameter configuration + */ + for (ce_id = 0; ce_id < scn->ce_count; ce_id++) { + unsigned int msi_data; + + if (!ce_sc->tasklets[ce_id].inited) + continue; + + msi_data = (ce_id % msi_data_count) + msi_irq_start; + irq = pld_get_msi_irq(scn->qdf_dev->dev, msi_data); + + HIF_INFO("%s: (ce_id %d, msi_data %d, irq %d)", __func__, + ce_id, msi_data, irq); + + free_irq(irq, &ce_sc->tasklets[ce_id]); + } + + return ret; +} + +static void hif_pci_deconfigure_grp_irq(struct hif_softc *scn) +{ + int i, j, irq; + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); + struct hif_exec_context *hif_ext_group; + + for (i = 0; i < hif_state->hif_num_extgroup; i++) { + hif_ext_group = hif_state->hif_ext_group[i]; + if (hif_ext_group->irq_requested) { + hif_ext_group->irq_requested = false; + for (j = 0; j < hif_ext_group->numirq; j++) { + irq = hif_ext_group->os_irq[j]; + free_irq(irq, hif_ext_group); + } + hif_ext_group->numirq = 0; + } + } +} + +/** + * hif_nointrs(): disable IRQ + * + * This function stops interrupt(s) + * + * @scn: struct hif_softc + * + * Return: none + */ +void hif_pci_nointrs(struct hif_softc *scn) +{ + int i, ret; + struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn); + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); + + ce_unregister_irq(hif_state, CE_ALL_BITMAP); + + if (scn->request_irq_done == false) + return; + + hif_pci_deconfigure_grp_irq(scn); + + ret = hif_ce_srng_msi_free_irq(scn); + if (ret != -EINVAL) { + /* ce irqs freed in hif_ce_srng_msi_free_irq */ + + if (scn->wake_irq) + free_irq(scn->wake_irq, scn); + scn->wake_irq = 0; + } else if (sc->num_msi_intrs > 0) { + /* MSI interrupt(s) */ + for (i = 0; i < sc->num_msi_intrs; i++) + free_irq(sc->irq + i, sc); + sc->num_msi_intrs = 0; + } else { + /* Legacy PCI line interrupt + * Use sc->irq instead of sc->pdev-irq + * platform_device pdev doesn't have an irq field + */ + free_irq(sc->irq, sc); + } + scn->request_irq_done = false; +} + +/** + * hif_disable_bus(): hif_disable_bus + * + * This function disables the bus + * + * @bdev: bus dev + * + * Return: none + */ +void hif_pci_disable_bus(struct hif_softc *scn) +{ + struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn); + struct pci_dev *pdev; + void __iomem *mem; + struct hif_target_info *tgt_info = &scn->target_info; + + /* Attach did not succeed, all resources have been + * freed in error handler + */ + if (!sc) + return; + + pdev = sc->pdev; + if (ADRASTEA_BU) { + hif_vote_link_down(GET_HIF_OPAQUE_HDL(scn)); + + hif_write32_mb(sc->mem + PCIE_INTR_ENABLE_ADDRESS, 0); + hif_write32_mb(sc->mem + PCIE_INTR_CLR_ADDRESS, + HOST_GROUP0_MASK); + } + +#if defined(CPU_WARM_RESET_WAR) + /* Currently CPU warm reset sequence is tested only for AR9888_REV2 + * Need to enable for AR9888_REV1 once CPU warm reset sequence is + * verified for AR9888_REV1 + */ + if ((tgt_info->target_version == AR9888_REV2_VERSION) || + (tgt_info->target_version == AR9887_REV1_VERSION)) + hif_pci_device_warm_reset(sc); + else + hif_pci_device_reset(sc); +#else + hif_pci_device_reset(sc); +#endif + mem = (void __iomem *)sc->mem; + if (mem) { +#ifndef CONFIG_PLD_PCIE_INIT + pci_disable_msi(pdev); +#endif + hif_dump_pipe_debug_count(scn); + if (scn->athdiag_procfs_inited) { + athdiag_procfs_remove(); + scn->athdiag_procfs_inited = false; + } + hif_pci_deinit(sc); + scn->mem = NULL; + } + HIF_INFO("%s: X", __func__); +} + +#define OL_ATH_PCI_PM_CONTROL 0x44 + +#ifdef FEATURE_RUNTIME_PM +/** + * hif_runtime_prevent_linkdown() - prevent or allow a runtime pm from occurring + * @scn: hif context + * @flag: prevent linkdown if true otherwise allow + * + * this api should only be called as part of bus prevent linkdown + */ +static void hif_runtime_prevent_linkdown(struct hif_softc *scn, bool flag) +{ + struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn); + + if (flag) + qdf_runtime_pm_prevent_suspend(&sc->prevent_linkdown_lock); + else + qdf_runtime_pm_allow_suspend(&sc->prevent_linkdown_lock); +} +#else +static void hif_runtime_prevent_linkdown(struct hif_softc *scn, bool flag) +{ +} +#endif + +#if defined(CONFIG_PCI_MSM) +/** + * hif_bus_prevent_linkdown(): allow or permit linkdown + * @flag: true prevents linkdown, false allows + * + * Calls into the platform driver to vote against taking down the + * pcie link. + * + * Return: n/a + */ +void hif_pci_prevent_linkdown(struct hif_softc *scn, bool flag) +{ + int errno; + + HIF_DBG("wlan: %s pcie power collapse", flag ? "disable" : "enable"); + hif_runtime_prevent_linkdown(scn, flag); + + errno = pld_wlan_pm_control(scn->qdf_dev->dev, flag); + if (errno) + HIF_ERROR("%s: Failed pld_wlan_pm_control; errno %d", + __func__, errno); +} +#else +void hif_pci_prevent_linkdown(struct hif_softc *scn, bool flag) +{ + HIF_DBG("wlan: %s pcie power collapse", + (flag ? "disable" : "enable")); + hif_runtime_prevent_linkdown(scn, flag); +} +#endif + +static int hif_mark_wake_irq_wakeable(struct hif_softc *scn) +{ + int errno; + + errno = enable_irq_wake(scn->wake_irq); + if (errno) { + HIF_ERROR("%s: Failed to mark wake IRQ: %d", __func__, errno); + return errno; + } + + return 0; +} + +/** + * hif_pci_bus_suspend(): prepare hif for suspend + * + * Enables pci bus wake irq based on link suspend voting. + * + * Return: 0 for success and non-zero error code for failure + */ +int hif_pci_bus_suspend(struct hif_softc *scn) +{ + if (hif_can_suspend_link(GET_HIF_OPAQUE_HDL(scn))) + return 0; + + /* pci link is staying up; enable wake irq */ + return hif_mark_wake_irq_wakeable(scn); +} + +/** + * __hif_check_link_status() - API to check if PCIe link is active/not + * @scn: HIF Context + * + * API reads the PCIe config space to verify if PCIe link training is + * successful or not. + * + * Return: Success/Failure + */ +static int __hif_check_link_status(struct hif_softc *scn) +{ + uint16_t dev_id = 0; + struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn); + struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn); + + if (!sc) { + HIF_ERROR("%s: HIF Bus Context is Invalid", __func__); + return -EINVAL; + } + + pci_read_config_word(sc->pdev, PCI_DEVICE_ID, &dev_id); + + if (dev_id == sc->devid) + return 0; + + HIF_ERROR("%s: Invalid PCIe Config Space; PCIe link down dev_id:0x%04x", + __func__, dev_id); + + scn->recovery = true; + + if (cbk && cbk->set_recovery_in_progress) + cbk->set_recovery_in_progress(cbk->context, true); + else + HIF_ERROR("%s: Driver Global Recovery is not set", __func__); + + pld_is_pci_link_down(sc->dev); + return -EACCES; +} + +static int hif_unmark_wake_irq_wakeable(struct hif_softc *scn) +{ + int errno; + + errno = disable_irq_wake(scn->wake_irq); + if (errno) { + HIF_ERROR("%s: Failed to unmark wake IRQ: %d", __func__, errno); + return errno; + } + + return 0; +} + +/** + * hif_pci_bus_resume(): prepare hif for resume + * + * Disables pci bus wake irq based on link suspend voting. + * + * Return: 0 for success and non-zero error code for failure + */ +int hif_pci_bus_resume(struct hif_softc *scn) +{ + int ret; + + ret = __hif_check_link_status(scn); + if (ret) + return ret; + + if (hif_can_suspend_link(GET_HIF_OPAQUE_HDL(scn))) + return 0; + + /* pci link is up; disable wake irq */ + return hif_unmark_wake_irq_wakeable(scn); +} + +/** + * hif_pci_bus_suspend_noirq() - ensure there are no pending transactions + * @scn: hif context + * + * Ensure that if we received the wakeup message before the irq + * was disabled that the message is pocessed before suspending. + * + * Return: -EBUSY if we fail to flush the tasklets. + */ +int hif_pci_bus_suspend_noirq(struct hif_softc *scn) +{ + if (hif_drain_tasklets(scn) != 0) + return -EBUSY; + + /* Stop the HIF Sleep Timer */ + hif_cancel_deferred_target_sleep(scn); + + if (hif_can_suspend_link(GET_HIF_OPAQUE_HDL(scn))) + qdf_atomic_set(&scn->link_suspended, 1); + + return 0; +} + +/** + * hif_pci_bus_resume_noirq() - ensure there are no pending transactions + * @scn: hif context + * + * Ensure that if we received the wakeup message before the irq + * was disabled that the message is pocessed before suspending. + * + * Return: -EBUSY if we fail to flush the tasklets. + */ +int hif_pci_bus_resume_noirq(struct hif_softc *scn) +{ + if (hif_can_suspend_link(GET_HIF_OPAQUE_HDL(scn))) + qdf_atomic_set(&scn->link_suspended, 0); + + return 0; +} + +#ifdef FEATURE_RUNTIME_PM +/** + * __hif_runtime_pm_set_state(): utility function + * @state: state to set + * + * indexes into the runtime pm state and sets it. + */ +static void __hif_runtime_pm_set_state(struct hif_softc *scn, + enum hif_pm_runtime_state state) +{ + struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn); + + if (NULL == sc) { + HIF_ERROR("%s: HIF_CTX not initialized", + __func__); + return; + } + + qdf_atomic_set(&sc->pm_state, state); +} + +/** + * hif_runtime_pm_set_state_inprogress(): adjust runtime pm state + * + * Notify hif that a runtime pm opperation has started + */ +static void hif_runtime_pm_set_state_inprogress(struct hif_softc *scn) +{ + __hif_runtime_pm_set_state(scn, HIF_PM_RUNTIME_STATE_INPROGRESS); +} + +/** + * hif_runtime_pm_set_state_on(): adjust runtime pm state + * + * Notify hif that a the runtime pm state should be on + */ +static void hif_runtime_pm_set_state_on(struct hif_softc *scn) +{ + __hif_runtime_pm_set_state(scn, HIF_PM_RUNTIME_STATE_ON); +} + +/** + * hif_runtime_pm_set_state_suspended(): adjust runtime pm state + * + * Notify hif that a runtime suspend attempt has been completed successfully + */ +static void hif_runtime_pm_set_state_suspended(struct hif_softc *scn) +{ + __hif_runtime_pm_set_state(scn, HIF_PM_RUNTIME_STATE_SUSPENDED); +} + +/** + * hif_log_runtime_suspend_success() - log a successful runtime suspend + */ +static void hif_log_runtime_suspend_success(struct hif_softc *hif_ctx) +{ + struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx); + + if (sc == NULL) + return; + + sc->pm_stats.suspended++; + sc->pm_stats.suspend_jiffies = jiffies; +} + +/** + * hif_log_runtime_suspend_failure() - log a failed runtime suspend + * + * log a failed runtime suspend + * mark last busy to prevent immediate runtime suspend + */ +static void hif_log_runtime_suspend_failure(void *hif_ctx) +{ + struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx); + + if (sc == NULL) + return; + + sc->pm_stats.suspend_err++; +} + +/** + * hif_log_runtime_resume_success() - log a successful runtime resume + * + * log a successful runtime resume + * mark last busy to prevent immediate runtime suspend + */ +static void hif_log_runtime_resume_success(void *hif_ctx) +{ + struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx); + + if (sc == NULL) + return; + + sc->pm_stats.resumed++; +} + +/** + * hif_process_runtime_suspend_failure() - bookkeeping of suspend failure + * + * Record the failure. + * mark last busy to delay a retry. + * adjust the runtime_pm state. + */ +void hif_process_runtime_suspend_failure(struct hif_opaque_softc *hif_ctx) +{ + struct hif_pci_softc *hif_pci_sc = HIF_GET_PCI_SOFTC(hif_ctx); + struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); + + hif_log_runtime_suspend_failure(hif_ctx); + if (hif_pci_sc != NULL) + hif_pm_runtime_mark_last_busy(hif_pci_sc->dev); + hif_runtime_pm_set_state_on(scn); +} + +/** + * hif_pre_runtime_suspend() - bookkeeping before beginning runtime suspend + * + * Makes sure that the pci link will be taken down by the suspend opperation. + * If the hif layer is configured to leave the bus on, runtime suspend will + * not save any power. + * + * Set the runtime suspend state to in progress. + * + * return -EINVAL if the bus won't go down. otherwise return 0 + */ +int hif_pre_runtime_suspend(struct hif_opaque_softc *hif_ctx) +{ + struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); + + if (!hif_can_suspend_link(hif_ctx)) { + HIF_ERROR("Runtime PM not supported for link up suspend"); + return -EINVAL; + } + + hif_runtime_pm_set_state_inprogress(scn); + return 0; +} + +/** + * hif_process_runtime_suspend_success() - bookkeeping of suspend success + * + * Record the success. + * adjust the runtime_pm state + */ +void hif_process_runtime_suspend_success(struct hif_opaque_softc *hif_ctx) +{ + struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); + + hif_runtime_pm_set_state_suspended(scn); + hif_log_runtime_suspend_success(scn); +} + +/** + * hif_pre_runtime_resume() - bookkeeping before beginning runtime resume + * + * update the runtime pm state. + */ +void hif_pre_runtime_resume(struct hif_opaque_softc *hif_ctx) +{ + struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); + + hif_runtime_pm_set_state_inprogress(scn); +} + +/** + * hif_process_runtime_resume_success() - bookkeeping after a runtime resume + * + * record the success. + * adjust the runtime_pm state + */ +void hif_process_runtime_resume_success(struct hif_opaque_softc *hif_ctx) +{ + struct hif_pci_softc *hif_pci_sc = HIF_GET_PCI_SOFTC(hif_ctx); + struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); + + hif_log_runtime_resume_success(hif_ctx); + if (hif_pci_sc != NULL) + hif_pm_runtime_mark_last_busy(hif_pci_sc->dev); + hif_runtime_pm_set_state_on(scn); +} + +/** + * hif_runtime_suspend() - do the bus suspend part of a runtime suspend + * + * Return: 0 for success and non-zero error code for failure + */ +int hif_runtime_suspend(struct hif_opaque_softc *hif_ctx) +{ + int errno; + + errno = hif_bus_suspend(hif_ctx); + if (errno) { + HIF_ERROR("%s: failed bus suspend: %d", __func__, errno); + return errno; + } + + errno = hif_apps_irqs_disable(hif_ctx); + if (errno) { + HIF_ERROR("%s: failed disable irqs: %d", __func__, errno); + goto bus_resume; + } + + errno = hif_bus_suspend_noirq(hif_ctx); + if (errno) { + HIF_ERROR("%s: failed bus suspend noirq: %d", __func__, errno); + goto irqs_enable; + } + + /* link should always be down; skip enable wake irq */ + + return 0; + +irqs_enable: + QDF_BUG(!hif_apps_irqs_enable(hif_ctx)); + +bus_resume: + QDF_BUG(!hif_bus_resume(hif_ctx)); + + return errno; +} + +/** + * hif_fastpath_resume() - resume fastpath for runtimepm + * + * ensure that the fastpath write index register is up to date + * since runtime pm may cause ce_send_fast to skip the register + * write. + * + * fastpath only applicable to legacy copy engine + */ +void hif_fastpath_resume(struct hif_opaque_softc *hif_ctx) +{ + struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); + struct CE_state *ce_state; + + if (!scn) + return; + + if (scn->fastpath_mode_on) { + if (Q_TARGET_ACCESS_BEGIN(scn) < 0) + return; + + ce_state = scn->ce_id_to_state[CE_HTT_H2T_MSG]; + qdf_spin_lock_bh(&ce_state->ce_index_lock); + + /*war_ce_src_ring_write_idx_set */ + CE_SRC_RING_WRITE_IDX_SET(scn, ce_state->ctrl_addr, + ce_state->src_ring->write_index); + qdf_spin_unlock_bh(&ce_state->ce_index_lock); + Q_TARGET_ACCESS_END(scn); + } +} + +/** + * hif_runtime_resume() - do the bus resume part of a runtime resume + * + * Return: 0 for success and non-zero error code for failure + */ +int hif_runtime_resume(struct hif_opaque_softc *hif_ctx) +{ + /* link should always be down; skip disable wake irq */ + + QDF_BUG(!hif_bus_resume_noirq(hif_ctx)); + QDF_BUG(!hif_apps_irqs_enable(hif_ctx)); + QDF_BUG(!hif_bus_resume(hif_ctx)); + return 0; +} +#endif /* #ifdef FEATURE_RUNTIME_PM */ + +#if CONFIG_PCIE_64BIT_MSI +static void hif_free_msi_ctx(struct hif_softc *scn) +{ + struct hif_pci_softc *sc = scn->hif_sc; + struct hif_msi_info *info = &sc->msi_info; + struct device *dev = scn->qdf_dev->dev; + + OS_FREE_CONSISTENT(dev, 4, info->magic, info->magic_dma, + OS_GET_DMA_MEM_CONTEXT(scn, dmacontext)); + info->magic = NULL; + info->magic_dma = 0; +} +#else +static void hif_free_msi_ctx(struct hif_softc *scn) +{ +} +#endif + +void hif_pci_disable_isr(struct hif_softc *scn) +{ + struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn); + + hif_exec_kill(&scn->osc); + hif_nointrs(scn); + hif_free_msi_ctx(scn); + /* Cancel the pending tasklet */ + ce_tasklet_kill(scn); + tasklet_kill(&sc->intr_tq); + qdf_atomic_set(&scn->active_tasklet_cnt, 0); + qdf_atomic_set(&scn->active_grp_tasklet_cnt, 0); +} + +/* Function to reset SoC */ +void hif_pci_reset_soc(struct hif_softc *hif_sc) +{ + struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_sc); + struct hif_opaque_softc *ol_sc = GET_HIF_OPAQUE_HDL(hif_sc); + struct hif_target_info *tgt_info = hif_get_target_info_handle(ol_sc); + +#if defined(CPU_WARM_RESET_WAR) + /* Currently CPU warm reset sequence is tested only for AR9888_REV2 + * Need to enable for AR9888_REV1 once CPU warm reset sequence is + * verified for AR9888_REV1 + */ + if (tgt_info->target_version == AR9888_REV2_VERSION) + hif_pci_device_warm_reset(sc); + else + hif_pci_device_reset(sc); +#else + hif_pci_device_reset(sc); +#endif +} + +#ifdef CONFIG_PCI_MSM +static inline void hif_msm_pcie_debug_info(struct hif_pci_softc *sc) +{ + msm_pcie_debug_info(sc->pdev, 13, 1, 0, 0, 0); + msm_pcie_debug_info(sc->pdev, 13, 2, 0, 0, 0); +} +#else +static inline void hif_msm_pcie_debug_info(struct hif_pci_softc *sc) {}; +#endif + +/** + * hif_log_soc_wakeup_timeout() - API to log PCIe and SOC Info + * @sc: HIF PCIe Context + * + * API to log PCIe Config space and SOC info when SOC wakeup timeout happens + * + * Return: Failure to caller + */ +static int hif_log_soc_wakeup_timeout(struct hif_pci_softc *sc) +{ + uint16_t val = 0; + uint32_t bar = 0; + struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(sc); + struct hif_softc *scn = HIF_GET_SOFTC(sc); + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(sc); + struct hif_config_info *cfg = hif_get_ini_handle(hif_hdl); + struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn); + A_target_id_t pci_addr = scn->mem; + + HIF_ERROR("%s: keep_awake_count = %d", + __func__, hif_state->keep_awake_count); + + pci_read_config_word(sc->pdev, PCI_VENDOR_ID, &val); + + HIF_ERROR("%s: PCI Vendor ID = 0x%04x", __func__, val); + + pci_read_config_word(sc->pdev, PCI_DEVICE_ID, &val); + + HIF_ERROR("%s: PCI Device ID = 0x%04x", __func__, val); + + pci_read_config_word(sc->pdev, PCI_COMMAND, &val); + + HIF_ERROR("%s: PCI Command = 0x%04x", __func__, val); + + pci_read_config_word(sc->pdev, PCI_STATUS, &val); + + HIF_ERROR("%s: PCI Status = 0x%04x", __func__, val); + + pci_read_config_dword(sc->pdev, PCI_BASE_ADDRESS_0, &bar); + + HIF_ERROR("%s: PCI BAR 0 = 0x%08x", __func__, bar); + + HIF_ERROR("%s: SOC_WAKE_ADDR 0%08x", __func__, + hif_read32_mb(pci_addr + PCIE_LOCAL_BASE_ADDRESS + + PCIE_SOC_WAKE_ADDRESS)); + + HIF_ERROR("%s: RTC_STATE_ADDR 0x%08x", __func__, + hif_read32_mb(pci_addr + PCIE_LOCAL_BASE_ADDRESS + + RTC_STATE_ADDRESS)); + + HIF_ERROR("%s:error, wakeup target", __func__); + hif_msm_pcie_debug_info(sc); + + if (!cfg->enable_self_recovery) + QDF_BUG(0); + + scn->recovery = true; + + if (cbk->set_recovery_in_progress) + cbk->set_recovery_in_progress(cbk->context, true); + + pld_is_pci_link_down(sc->dev); + return -EACCES; +} + +/* + * For now, we use simple on-demand sleep/wake. + * Some possible improvements: + * -Use the Host-destined A_INUM_PCIE_AWAKE interrupt rather than spin/delay + * (or perhaps spin/delay for a short while, then convert to sleep/interrupt) + * Careful, though, these functions may be used by + * interrupt handlers ("atomic") + * -Don't use host_reg_table for this code; instead use values directly + * -Use a separate timer to track activity and allow Target to sleep only + * if it hasn't done anything for a while; may even want to delay some + * processing for a short while in order to "batch" (e.g.) transmit + * requests with completion processing into "windows of up time". Costs + * some performance, but improves power utilization. + * -On some platforms, it might be possible to eliminate explicit + * sleep/wakeup. Instead, take a chance that each access works OK. If not, + * recover from the failure by forcing the Target awake. + * -Change keep_awake_count to an atomic_t in order to avoid spin lock + * overhead in some cases. Perhaps this makes more sense when + * CONFIG_ATH_PCIE_ACCESS_LIKELY is used and less sense when LIKELY is + * disabled. + * -It is possible to compile this code out and simply force the Target + * to remain awake. That would yield optimal performance at the cost of + * increased power. See CONFIG_ATH_PCIE_MAX_PERF. + * + * Note: parameter wait_for_it has meaning only when waking (when sleep_ok==0). + */ +/** + * hif_target_sleep_state_adjust() - on-demand sleep/wake + * @scn: hif_softc pointer. + * @sleep_ok: bool + * @wait_for_it: bool + * + * Output the pipe error counts of each pipe to log file + * + * Return: int + */ +int hif_pci_target_sleep_state_adjust(struct hif_softc *scn, + bool sleep_ok, bool wait_for_it) +{ + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); + A_target_id_t pci_addr = scn->mem; + static int max_delay; + struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn); + static int debug; + if (scn->recovery) + return -EACCES; + + if (qdf_atomic_read(&scn->link_suspended)) { + HIF_ERROR("%s:invalid access, PCIe link is down", __func__); + debug = true; + QDF_ASSERT(0); + return -EACCES; + } + + if (debug) { + wait_for_it = true; + HIF_ERROR("%s: doing debug for invalid access, PCIe link is suspended", + __func__); + QDF_ASSERT(0); + } + + if (sleep_ok) { + qdf_spin_lock_irqsave(&hif_state->keep_awake_lock); + hif_state->keep_awake_count--; + if (hif_state->keep_awake_count == 0) { + /* Allow sleep */ + hif_state->verified_awake = false; + hif_state->sleep_ticks = qdf_system_ticks(); + } + if (hif_state->fake_sleep == false) { + /* Set the Fake Sleep */ + hif_state->fake_sleep = true; + + /* Start the Sleep Timer */ + qdf_timer_stop(&hif_state->sleep_timer); + qdf_timer_start(&hif_state->sleep_timer, + HIF_SLEEP_INACTIVITY_TIMER_PERIOD_MS); + } + qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock); + } else { + qdf_spin_lock_irqsave(&hif_state->keep_awake_lock); + + if (hif_state->fake_sleep) { + hif_state->verified_awake = true; + } else { + if (hif_state->keep_awake_count == 0) { + /* Force AWAKE */ + hif_write32_mb(pci_addr + + PCIE_LOCAL_BASE_ADDRESS + + PCIE_SOC_WAKE_ADDRESS, + PCIE_SOC_WAKE_V_MASK); + } + } + hif_state->keep_awake_count++; + qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock); + + if (wait_for_it && !hif_state->verified_awake) { +#define PCIE_SLEEP_ADJUST_TIMEOUT 8000 /* 8Ms */ + int tot_delay = 0; + int curr_delay = 5; + + for (;; ) { + if (hif_targ_is_awake(scn, pci_addr)) { + hif_state->verified_awake = true; + break; + } + if (!hif_pci_targ_is_present(scn, pci_addr)) + break; + if (tot_delay > PCIE_SLEEP_ADJUST_TIMEOUT) + return hif_log_soc_wakeup_timeout(sc); + + OS_DELAY(curr_delay); + tot_delay += curr_delay; + + if (curr_delay < 50) + curr_delay += 5; + } + + /* + * NB: If Target has to come out of Deep Sleep, + * this may take a few Msecs. Typically, though + * this delay should be <30us. + */ + if (tot_delay > max_delay) + max_delay = tot_delay; + } + } + + if (debug && hif_state->verified_awake) { + debug = 0; + HIF_ERROR("%s: INTR_ENABLE_REG = 0x%08x, INTR_CAUSE_REG = 0x%08x, CPU_INTR_REG = 0x%08x, INTR_CLR_REG = 0x%08x, CE_INTERRUPT_SUMMARY_REG = 0x%08x", + __func__, + hif_read32_mb(sc->mem + SOC_CORE_BASE_ADDRESS + + PCIE_INTR_ENABLE_ADDRESS), + hif_read32_mb(sc->mem + SOC_CORE_BASE_ADDRESS + + PCIE_INTR_CAUSE_ADDRESS), + hif_read32_mb(sc->mem + SOC_CORE_BASE_ADDRESS + + CPU_INTR_ADDRESS), + hif_read32_mb(sc->mem + SOC_CORE_BASE_ADDRESS + + PCIE_INTR_CLR_ADDRESS), + hif_read32_mb(sc->mem + CE_WRAPPER_BASE_ADDRESS + + CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS)); + } + + return 0; +} + +#ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG +uint32_t hif_target_read_checked(struct hif_softc *scn, uint32_t offset) +{ + uint32_t value; + void *addr; + + addr = scn->mem + offset; + value = hif_read32_mb(addr); + + { + unsigned long irq_flags; + int idx = pcie_access_log_seqnum % PCIE_ACCESS_LOG_NUM; + + spin_lock_irqsave(&pcie_access_log_lock, irq_flags); + pcie_access_log[idx].seqnum = pcie_access_log_seqnum; + pcie_access_log[idx].is_write = false; + pcie_access_log[idx].addr = addr; + pcie_access_log[idx].value = value; + pcie_access_log_seqnum++; + spin_unlock_irqrestore(&pcie_access_log_lock, irq_flags); + } + + return value; +} + +void +hif_target_write_checked(struct hif_softc *scn, uint32_t offset, uint32_t value) +{ + void *addr; + + addr = scn->mem + (offset); + hif_write32_mb(addr, value); + + { + unsigned long irq_flags; + int idx = pcie_access_log_seqnum % PCIE_ACCESS_LOG_NUM; + + spin_lock_irqsave(&pcie_access_log_lock, irq_flags); + pcie_access_log[idx].seqnum = pcie_access_log_seqnum; + pcie_access_log[idx].is_write = true; + pcie_access_log[idx].addr = addr; + pcie_access_log[idx].value = value; + pcie_access_log_seqnum++; + spin_unlock_irqrestore(&pcie_access_log_lock, irq_flags); + } +} + +/** + * hif_target_dump_access_log() - dump access log + * + * dump access log + * + * Return: n/a + */ +void hif_target_dump_access_log(void) +{ + int idx, len, start_idx, cur_idx; + unsigned long irq_flags; + + spin_lock_irqsave(&pcie_access_log_lock, irq_flags); + if (pcie_access_log_seqnum > PCIE_ACCESS_LOG_NUM) { + len = PCIE_ACCESS_LOG_NUM; + start_idx = pcie_access_log_seqnum % PCIE_ACCESS_LOG_NUM; + } else { + len = pcie_access_log_seqnum; + start_idx = 0; + } + + for (idx = 0; idx < len; idx++) { + cur_idx = (start_idx + idx) % PCIE_ACCESS_LOG_NUM; + HIF_ERROR("%s: idx:%d sn:%u wr:%d addr:%pK val:%u.", + __func__, idx, + pcie_access_log[cur_idx].seqnum, + pcie_access_log[cur_idx].is_write, + pcie_access_log[cur_idx].addr, + pcie_access_log[cur_idx].value); + } + + pcie_access_log_seqnum = 0; + spin_unlock_irqrestore(&pcie_access_log_lock, irq_flags); +} +#endif + +#ifndef HIF_AHB +int hif_ahb_configure_legacy_irq(struct hif_pci_softc *sc) +{ + QDF_BUG(0); + return -EINVAL; +} + +int hif_ahb_configure_irq(struct hif_pci_softc *sc) +{ + QDF_BUG(0); + return -EINVAL; +} +#endif + +static irqreturn_t hif_ce_interrupt_handler(int irq, void *context) +{ + struct ce_tasklet_entry *tasklet_entry = context; + return ce_dispatch_interrupt(tasklet_entry->ce_id, tasklet_entry); +} +extern const char *ce_name[]; + +static int hif_ce_msi_map_ce_to_irq(struct hif_softc *scn, int ce_id) +{ + struct hif_pci_softc *pci_scn = HIF_GET_PCI_SOFTC(scn); + + return pci_scn->ce_msi_irq_num[ce_id]; +} + +/* hif_srng_msi_irq_disable() - disable the irq for msi + * @hif_sc: hif context + * @ce_id: which ce to disable copy complete interrupts for + * + * since MSI interrupts are not level based, the system can function + * without disabling these interrupts. Interrupt mitigation can be + * added here for better system performance. + */ +static void hif_ce_srng_msi_irq_disable(struct hif_softc *hif_sc, int ce_id) +{ + disable_irq_nosync(hif_ce_msi_map_ce_to_irq(hif_sc, ce_id)); +} + +static void hif_ce_srng_msi_irq_enable(struct hif_softc *hif_sc, int ce_id) +{ + enable_irq(hif_ce_msi_map_ce_to_irq(hif_sc, ce_id)); +} + +static void hif_ce_legacy_msi_irq_disable(struct hif_softc *hif_sc, int ce_id) +{} + +static void hif_ce_legacy_msi_irq_enable(struct hif_softc *hif_sc, int ce_id) +{} + +static int hif_ce_msi_configure_irq(struct hif_softc *scn) +{ + int ret; + int ce_id, irq; + uint32_t msi_data_start; + uint32_t msi_data_count; + uint32_t msi_irq_start; + struct HIF_CE_state *ce_sc = HIF_GET_CE_STATE(scn); + struct hif_pci_softc *pci_sc = HIF_GET_PCI_SOFTC(scn); + + /* do wake irq assignment */ + ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "WAKE", + &msi_data_count, &msi_data_start, + &msi_irq_start); + if (ret) + return ret; + + scn->wake_irq = pld_get_msi_irq(scn->qdf_dev->dev, msi_irq_start); + ret = request_irq(scn->wake_irq, hif_wake_interrupt_handler, 0, + "wlan_wake_irq", scn); + if (ret) + return ret; + + /* do ce irq assignments */ + ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE", + &msi_data_count, &msi_data_start, + &msi_irq_start); + if (ret) + goto free_wake_irq; + + if (ce_srng_based(scn)) { + scn->bus_ops.hif_irq_disable = &hif_ce_srng_msi_irq_disable; + scn->bus_ops.hif_irq_enable = &hif_ce_srng_msi_irq_enable; + } else { + scn->bus_ops.hif_irq_disable = &hif_ce_legacy_msi_irq_disable; + scn->bus_ops.hif_irq_enable = &hif_ce_legacy_msi_irq_enable; + } + + scn->bus_ops.hif_map_ce_to_irq = &hif_ce_msi_map_ce_to_irq; + + /* needs to match the ce_id -> irq data mapping + * used in the srng parameter configuration + */ + for (ce_id = 0; ce_id < scn->ce_count; ce_id++) { + unsigned int msi_data = (ce_id % msi_data_count) + + msi_irq_start; + irq = pld_get_msi_irq(scn->qdf_dev->dev, msi_data); + HIF_DBG("%s: (ce_id %d, msi_data %d, irq %d tasklet %pK)", + __func__, ce_id, msi_data, irq, + &ce_sc->tasklets[ce_id]); + + /* implies the ce is also initialized */ + if (!ce_sc->tasklets[ce_id].inited) + continue; + + pci_sc->ce_msi_irq_num[ce_id] = irq; + ret = request_irq(irq, hif_ce_interrupt_handler, + IRQF_SHARED, + ce_name[ce_id], + &ce_sc->tasklets[ce_id]); + if (ret) + goto free_irq; + } + + return ret; + +free_irq: + /* the request_irq for the last ce_id failed so skip it. */ + while (ce_id > 0 && ce_id < scn->ce_count) { + unsigned int msi_data; + + ce_id--; + msi_data = (ce_id % msi_data_count) + msi_data_start; + irq = pld_get_msi_irq(scn->qdf_dev->dev, msi_data); + free_irq(irq, &ce_sc->tasklets[ce_id]); + } + +free_wake_irq: + free_irq(scn->wake_irq, scn->qdf_dev->dev); + scn->wake_irq = 0; + + return ret; +} + +static void hif_exec_grp_irq_disable(struct hif_exec_context *hif_ext_group) +{ + int i; + + for (i = 0; i < hif_ext_group->numirq; i++) + disable_irq_nosync(hif_ext_group->os_irq[i]); +} + +static void hif_exec_grp_irq_enable(struct hif_exec_context *hif_ext_group) +{ + int i; + + for (i = 0; i < hif_ext_group->numirq; i++) + enable_irq(hif_ext_group->os_irq[i]); +} + + +int hif_pci_configure_grp_irq(struct hif_softc *scn, + struct hif_exec_context *hif_ext_group) +{ + int ret = 0; + int irq = 0; + int j; + + hif_ext_group->irq_enable = &hif_exec_grp_irq_enable; + hif_ext_group->irq_disable = &hif_exec_grp_irq_disable; + hif_ext_group->work_complete = &hif_dummy_grp_done; + + for (j = 0; j < hif_ext_group->numirq; j++) { + irq = hif_ext_group->irq[j]; + + HIF_DBG("%s: request_irq = %d for grp %d", + __func__, irq, hif_ext_group->grp_id); + ret = request_irq(irq, + hif_ext_group_interrupt_handler, + IRQF_SHARED, "wlan_EXT_GRP", + hif_ext_group); + if (ret) { + HIF_ERROR("%s: request_irq failed ret = %d", + __func__, ret); + return -EFAULT; + } + hif_ext_group->os_irq[j] = irq; + } + hif_ext_group->irq_requested = true; + return 0; +} + +/** + * hif_configure_irq() - configure interrupt + * + * This function configures interrupt(s) + * + * @sc: PCIe control struct + * @hif_hdl: struct HIF_CE_state + * + * Return: 0 - for success + */ +int hif_configure_irq(struct hif_softc *scn) +{ + int ret = 0; + struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn); + + HIF_TRACE("%s: E", __func__); + if (scn->polled_mode_on) { + scn->request_irq_done = false; + return 0; + } + + hif_init_reschedule_tasklet_work(sc); + + ret = hif_ce_msi_configure_irq(scn); + if (ret == 0) { + goto end; + } + + if (ENABLE_MSI) { + ret = hif_configure_msi(sc); + if (ret == 0) + goto end; + } + /* MSI failed. Try legacy irq */ + switch (scn->target_info.target_type) { + case TARGET_TYPE_IPQ4019: + ret = hif_ahb_configure_legacy_irq(sc); + break; + case TARGET_TYPE_QCA8074: + ret = hif_ahb_configure_irq(sc); + break; + default: + ret = hif_pci_configure_legacy_irq(sc); + break; + } + if (ret < 0) { + HIF_ERROR("%s: hif_pci_configure_legacy_irq error = %d", + __func__, ret); + return ret; + } +end: + scn->request_irq_done = true; + return 0; +} + +/** + * hif_target_sync() : ensure the target is ready + * @scn: hif control structure + * + * Informs fw that we plan to use legacy interupts so that + * it can begin booting. Ensures that the fw finishes booting + * before continuing. Should be called before trying to write + * to the targets other registers for the first time. + * + * Return: none + */ +static void hif_target_sync(struct hif_softc *scn) +{ + hif_write32_mb(scn->mem+(SOC_CORE_BASE_ADDRESS | + PCIE_INTR_ENABLE_ADDRESS), + PCIE_INTR_FIRMWARE_MASK); + + hif_write32_mb(scn->mem + PCIE_LOCAL_BASE_ADDRESS + + PCIE_SOC_WAKE_ADDRESS, + PCIE_SOC_WAKE_V_MASK); + while (!hif_targ_is_awake(scn, scn->mem)) + ; + + if (HAS_FW_INDICATOR) { + int wait_limit = 500; + int fw_ind = 0; + + HIF_TRACE("%s: Loop checking FW signal", __func__); + while (1) { + fw_ind = hif_read32_mb(scn->mem + + FW_INDICATOR_ADDRESS); + if (fw_ind & FW_IND_INITIALIZED) + break; + if (wait_limit-- < 0) + break; + hif_write32_mb(scn->mem+(SOC_CORE_BASE_ADDRESS | + PCIE_INTR_ENABLE_ADDRESS), + PCIE_INTR_FIRMWARE_MASK); + + qdf_mdelay(10); + } + if (wait_limit < 0) + HIF_TRACE("%s: FW signal timed out", + __func__); + else + HIF_TRACE("%s: Got FW signal, retries = %x", + __func__, 500-wait_limit); + } + hif_write32_mb(scn->mem + PCIE_LOCAL_BASE_ADDRESS + + PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET); +} + +#ifdef CONFIG_PLD_PCIE_INIT +static void hif_pci_get_soc_info(struct hif_pci_softc *sc, struct device *dev) +{ + struct pld_soc_info info; + + pld_get_soc_info(dev, &info); + sc->mem = info.v_addr; + sc->ce_sc.ol_sc.mem = info.v_addr; + sc->ce_sc.ol_sc.mem_pa = info.p_addr; +} +#else +static void hif_pci_get_soc_info(struct hif_pci_softc *sc, struct device *dev) +{} +#endif + +/** + * hif_enable_bus(): enable bus + * + * This function enables the bus + * + * @ol_sc: soft_sc struct + * @dev: device pointer + * @bdev: bus dev pointer + * bid: bus id pointer + * type: enum hif_enable_type such as HIF_ENABLE_TYPE_PROBE + * Return: QDF_STATUS + */ +QDF_STATUS hif_pci_enable_bus(struct hif_softc *ol_sc, + struct device *dev, void *bdev, + const struct hif_bus_id *bid, + enum hif_enable_type type) +{ + int ret = 0; + uint32_t hif_type, target_type; + struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(ol_sc); + struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(ol_sc); + uint16_t revision_id; + int probe_again = 0; + struct pci_dev *pdev = bdev; + const struct pci_device_id *id = (const struct pci_device_id *)bid; + struct hif_target_info *tgt_info; + + if (!ol_sc) { + HIF_ERROR("%s: hif_ctx is NULL", __func__); + return QDF_STATUS_E_NOMEM; + } + + HIF_TRACE("%s: con_mode = 0x%x, device_id = 0x%x", + __func__, hif_get_conparam(ol_sc), id->device); + + sc->pdev = pdev; + sc->dev = &pdev->dev; + sc->devid = id->device; + sc->cacheline_sz = dma_get_cache_alignment(); + tgt_info = hif_get_target_info_handle(hif_hdl); + hif_pci_get_soc_info(sc, dev); +again: + ret = hif_enable_pci(sc, pdev, id); + if (ret < 0) { + HIF_ERROR("%s: ERROR - hif_enable_pci error = %d", + __func__, ret); + goto err_enable_pci; + } + HIF_TRACE("%s: hif_enable_pci done", __func__); + + /* Temporary FIX: disable ASPM on peregrine. + * Will be removed after the OTP is programmed + */ + hif_disable_power_gating(hif_hdl); + + device_disable_async_suspend(&pdev->dev); + pci_read_config_word(pdev, 0x08, &revision_id); + + ret = hif_get_device_type(id->device, revision_id, + &hif_type, &target_type); + if (ret < 0) { + HIF_ERROR("%s: invalid device id/revision_id", __func__); + goto err_tgtstate; + } + HIF_TRACE("%s: hif_type = 0x%x, target_type = 0x%x", + __func__, hif_type, target_type); + + hif_register_tbl_attach(ol_sc, hif_type); + hif_target_register_tbl_attach(ol_sc, target_type); + + tgt_info->target_type = target_type; + + if (ce_srng_based(ol_sc)) { + HIF_TRACE("%s:Skip tgt_wake up for srng devices\n", __func__); + } else { + ret = hif_pci_probe_tgt_wakeup(sc); + if (ret < 0) { + HIF_ERROR("%s: ERROR - hif_pci_prob_wakeup error = %d", + __func__, ret); + if (ret == -EAGAIN) + probe_again++; + goto err_tgtstate; + } + HIF_TRACE("%s: hif_pci_probe_tgt_wakeup done", __func__); + } + + if (!ol_sc->mem_pa) { + HIF_ERROR("%s: ERROR - BAR0 uninitialized", __func__); + ret = -EIO; + goto err_tgtstate; + } + + if (!ce_srng_based(ol_sc)) { + hif_target_sync(ol_sc); + + if (ADRASTEA_BU) + hif_vote_link_up(hif_hdl); + } + + return 0; + +err_tgtstate: + hif_disable_pci(sc); + sc->pci_enabled = false; + HIF_ERROR("%s: error, hif_disable_pci done", __func__); + return QDF_STATUS_E_ABORTED; + +err_enable_pci: + if (probe_again && (probe_again <= ATH_PCI_PROBE_RETRY_MAX)) { + int delay_time; + + HIF_INFO("%s: pci reprobe", __func__); + /* 10, 40, 90, 100, 100, ... */ + delay_time = max(100, 10 * (probe_again * probe_again)); + qdf_mdelay(delay_time); + goto again; + } + return ret; +} + +/** + * hif_pci_irq_enable() - ce_irq_enable + * @scn: hif_softc + * @ce_id: ce_id + * + * Return: void + */ +void hif_pci_irq_enable(struct hif_softc *scn, int ce_id) +{ + uint32_t tmp = 1 << ce_id; + struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn); + + qdf_spin_lock_irqsave(&sc->irq_lock); + scn->ce_irq_summary &= ~tmp; + if (scn->ce_irq_summary == 0) { + /* Enable Legacy PCI line interrupts */ + if (LEGACY_INTERRUPTS(sc) && + (scn->target_status != TARGET_STATUS_RESET) && + (!qdf_atomic_read(&scn->link_suspended))) { + + hif_write32_mb(scn->mem + + (SOC_CORE_BASE_ADDRESS | + PCIE_INTR_ENABLE_ADDRESS), + HOST_GROUP0_MASK); + + hif_read32_mb(scn->mem + + (SOC_CORE_BASE_ADDRESS | + PCIE_INTR_ENABLE_ADDRESS)); + } + } + if (scn->hif_init_done == true) + Q_TARGET_ACCESS_END(scn); + qdf_spin_unlock_irqrestore(&sc->irq_lock); + + /* check for missed firmware crash */ + hif_fw_interrupt_handler(0, scn); +} + +/** + * hif_pci_irq_disable() - ce_irq_disable + * @scn: hif_softc + * @ce_id: ce_id + * + * only applicable to legacy copy engine... + * + * Return: void + */ +void hif_pci_irq_disable(struct hif_softc *scn, int ce_id) +{ + /* For Rome only need to wake up target */ + /* target access is maintained until interrupts are re-enabled */ + Q_TARGET_ACCESS_BEGIN(scn); +} + +#ifdef FEATURE_RUNTIME_PM + +void hif_pm_runtime_get_noresume(struct hif_opaque_softc *hif_ctx) +{ + struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx); + + if (NULL == sc) + return; + + sc->pm_stats.runtime_get++; + pm_runtime_get_noresume(sc->dev); +} + +/** + * hif_pm_runtime_get() - do a get opperation on the device + * + * A get opperation will prevent a runtime suspend until a + * corresponding put is done. This api should be used when sending + * data. + * + * CONTRARY TO THE REGULAR RUNTIME PM, WHEN THE BUS IS SUSPENDED, + * THIS API WILL ONLY REQUEST THE RESUME AND NOT TO A GET!!! + * + * return: success if the bus is up and a get has been issued + * otherwise an error code. + */ +int hif_pm_runtime_get(struct hif_opaque_softc *hif_ctx) +{ + struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); + struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx); + int ret; + int pm_state; + + if (NULL == scn) { + HIF_ERROR("%s: Could not do runtime get, scn is null", + __func__); + return -EFAULT; + } + + pm_state = qdf_atomic_read(&sc->pm_state); + + if (pm_state == HIF_PM_RUNTIME_STATE_ON || + pm_state == HIF_PM_RUNTIME_STATE_NONE) { + sc->pm_stats.runtime_get++; + ret = __hif_pm_runtime_get(sc->dev); + + /* Get can return 1 if the device is already active, just return + * success in that case + */ + if (ret > 0) + ret = 0; + + if (ret) + hif_pm_runtime_put(hif_ctx); + + if (ret && ret != -EINPROGRESS) { + sc->pm_stats.runtime_get_err++; + HIF_ERROR("%s: Runtime Get PM Error in pm_state:%d ret: %d", + __func__, qdf_atomic_read(&sc->pm_state), ret); + } + + return ret; + } + + sc->pm_stats.request_resume++; + sc->pm_stats.last_resume_caller = (void *)_RET_IP_; + ret = hif_pm_request_resume(sc->dev); + + return -EAGAIN; +} + +/** + * hif_pm_runtime_put() - do a put opperation on the device + * + * A put opperation will allow a runtime suspend after a corresponding + * get was done. This api should be used when sending data. + * + * This api will return a failure if runtime pm is stopped + * This api will return failure if it would decrement the usage count below 0. + * + * return: QDF_STATUS_SUCCESS if the put is performed + */ +int hif_pm_runtime_put(struct hif_opaque_softc *hif_ctx) +{ + struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); + struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx); + int pm_state, usage_count; + char *error = NULL; + + if (NULL == scn) { + HIF_ERROR("%s: Could not do runtime put, scn is null", + __func__); + return -EFAULT; + } + usage_count = atomic_read(&sc->dev->power.usage_count); + + if (usage_count == 1) { + pm_state = qdf_atomic_read(&sc->pm_state); + + if (pm_state == HIF_PM_RUNTIME_STATE_NONE) + error = "Ignoring unexpected put when runtime pm is disabled"; + + } else if (usage_count == 0) { + error = "PUT Without a Get Operation"; + } + + if (error) { + hif_pci_runtime_pm_warn(sc, error); + return -EINVAL; + } + + sc->pm_stats.runtime_put++; + + hif_pm_runtime_mark_last_busy(sc->dev); + hif_pm_runtime_put_auto(sc->dev); + + return 0; +} + + +/** + * __hif_pm_runtime_prevent_suspend() - prevent runtime suspend for a protocol + * reason + * @hif_sc: pci context + * @lock: runtime_pm lock being acquired + * + * Return 0 if successful. + */ +static int __hif_pm_runtime_prevent_suspend(struct hif_pci_softc + *hif_sc, struct hif_pm_runtime_lock *lock) +{ + int ret = 0; + + /* + * We shouldn't be setting context->timeout to zero here when + * context is active as we will have a case where Timeout API's + * for the same context called back to back. + * eg: echo "1=T:10:T:20" > /d/cnss_runtime_pm + * Set context->timeout to zero in hif_pm_runtime_prevent_suspend + * API to ensure the timeout version is no more active and + * list entry of this context will be deleted during allow suspend. + */ + if (lock->active) + return 0; + + ret = __hif_pm_runtime_get(hif_sc->dev); + + /** + * The ret can be -EINPROGRESS, if Runtime status is RPM_RESUMING or + * RPM_SUSPENDING. Any other negative value is an error. + * We shouldn't be do runtime_put here as in later point allow + * suspend gets called with the the context and there the usage count + * is decremented, so suspend will be prevented. + */ + + if (ret < 0 && ret != -EINPROGRESS) { + hif_sc->pm_stats.runtime_get_err++; + hif_pci_runtime_pm_warn(hif_sc, + "Prevent Suspend Runtime PM Error"); + } + + hif_sc->prevent_suspend_cnt++; + + lock->active = true; + + list_add_tail(&lock->list, &hif_sc->prevent_suspend_list); + + hif_sc->pm_stats.prevent_suspend++; + + HIF_ERROR("%s: in pm_state:%s ret: %d", __func__, + hif_pm_runtime_state_to_string( + qdf_atomic_read(&hif_sc->pm_state)), + ret); + + return ret; +} + +static int __hif_pm_runtime_allow_suspend(struct hif_pci_softc *hif_sc, + struct hif_pm_runtime_lock *lock) +{ + int ret = 0; + int usage_count; + + if (hif_sc->prevent_suspend_cnt == 0) + return ret; + + if (!lock->active) + return ret; + + usage_count = atomic_read(&hif_sc->dev->power.usage_count); + + /* + * During Driver unload, platform driver increments the usage + * count to prevent any runtime suspend getting called. + * So during driver load in HIF_PM_RUNTIME_STATE_NONE state the + * usage_count should be one. Ideally this shouldn't happen as + * context->active should be active for allow suspend to happen + * Handling this case here to prevent any failures. + */ + if ((qdf_atomic_read(&hif_sc->pm_state) == HIF_PM_RUNTIME_STATE_NONE + && usage_count == 1) || usage_count == 0) { + hif_pci_runtime_pm_warn(hif_sc, + "Allow without a prevent suspend"); + return -EINVAL; + } + + list_del(&lock->list); + + hif_sc->prevent_suspend_cnt--; + + lock->active = false; + lock->timeout = 0; + + hif_pm_runtime_mark_last_busy(hif_sc->dev); + ret = hif_pm_runtime_put_auto(hif_sc->dev); + + HIF_ERROR("%s: in pm_state:%s ret: %d", __func__, + hif_pm_runtime_state_to_string( + qdf_atomic_read(&hif_sc->pm_state)), + ret); + + hif_sc->pm_stats.allow_suspend++; + return ret; +} + +/** + * hif_pm_runtime_lock_timeout_fn() - callback the runtime lock timeout + * @data: calback data that is the pci context + * + * if runtime locks are acquired with a timeout, this function releases + * the locks when the last runtime lock expires. + * + * dummy implementation until lock acquisition is implemented. + */ +static void hif_pm_runtime_lock_timeout_fn(void *data) +{ + struct hif_pci_softc *hif_sc = data; + unsigned long timer_expires; + struct hif_pm_runtime_lock *context, *temp; + + spin_lock_bh(&hif_sc->runtime_lock); + + timer_expires = hif_sc->runtime_timer_expires; + + /* Make sure we are not called too early, this should take care of + * following case + * + * CPU0 CPU1 (timeout function) + * ---- ---------------------- + * spin_lock_irq + * timeout function called + * + * mod_timer() + * + * spin_unlock_irq + * spin_lock_irq + */ + if (timer_expires > 0 && !time_after(timer_expires, jiffies)) { + hif_sc->runtime_timer_expires = 0; + list_for_each_entry_safe(context, temp, + &hif_sc->prevent_suspend_list, list) { + if (context->timeout) { + __hif_pm_runtime_allow_suspend(hif_sc, context); + hif_sc->pm_stats.allow_suspend_timeout++; + } + } + } + + spin_unlock_bh(&hif_sc->runtime_lock); +} + +int hif_pm_runtime_prevent_suspend(struct hif_opaque_softc *ol_sc, + struct hif_pm_runtime_lock *data) +{ + struct hif_softc *sc = HIF_GET_SOFTC(ol_sc); + struct hif_pci_softc *hif_sc = HIF_GET_PCI_SOFTC(ol_sc); + struct hif_pm_runtime_lock *context = data; + + if (!sc->hif_config.enable_runtime_pm) + return 0; + + if (!context) + return -EINVAL; + + if (in_irq()) + WARN_ON(1); + + spin_lock_bh(&hif_sc->runtime_lock); + context->timeout = 0; + __hif_pm_runtime_prevent_suspend(hif_sc, context); + spin_unlock_bh(&hif_sc->runtime_lock); + + return 0; +} + +int hif_pm_runtime_allow_suspend(struct hif_opaque_softc *ol_sc, + struct hif_pm_runtime_lock *data) +{ + struct hif_softc *sc = HIF_GET_SOFTC(ol_sc); + struct hif_pci_softc *hif_sc = HIF_GET_PCI_SOFTC(ol_sc); + struct hif_pm_runtime_lock *context = data; + + if (!sc->hif_config.enable_runtime_pm) + return 0; + + if (!context) + return -EINVAL; + + if (in_irq()) + WARN_ON(1); + + spin_lock_bh(&hif_sc->runtime_lock); + + __hif_pm_runtime_allow_suspend(hif_sc, context); + + /* The list can be empty as well in cases where + * we have one context in the list and the allow + * suspend came before the timer expires and we delete + * context above from the list. + * When list is empty prevent_suspend count will be zero. + */ + if (hif_sc->prevent_suspend_cnt == 0 && + hif_sc->runtime_timer_expires > 0) { + qdf_timer_free(&hif_sc->runtime_timer); + hif_sc->runtime_timer_expires = 0; + } + + spin_unlock_bh(&hif_sc->runtime_lock); + + return 0; +} + +/** + * hif_pm_runtime_prevent_suspend_timeout() - Prevent runtime suspend timeout + * @ol_sc: HIF context + * @lock: which lock is being acquired + * @delay: Timeout in milliseconds + * + * Prevent runtime suspend with a timeout after which runtime suspend would be + * allowed. This API uses a single timer to allow the suspend and timer is + * modified if the timeout is changed before timer fires. + * If the timeout is less than autosuspend_delay then use mark_last_busy instead + * of starting the timer. + * + * It is wise to try not to use this API and correct the design if possible. + * + * Return: 0 on success and negative error code on failure + */ +int hif_pm_runtime_prevent_suspend_timeout(struct hif_opaque_softc *ol_sc, + struct hif_pm_runtime_lock *lock, unsigned int delay) +{ + struct hif_softc *sc = HIF_GET_SOFTC(ol_sc); + struct hif_pci_softc *hif_sc = HIF_GET_PCI_SOFTC(sc); + + int ret = 0; + unsigned long expires; + struct hif_pm_runtime_lock *context = lock; + + if (hif_is_load_or_unload_in_progress(sc)) { + HIF_ERROR("%s: Load/unload in progress, ignore!", + __func__); + return -EINVAL; + } + + if (hif_is_recovery_in_progress(sc)) { + HIF_ERROR("%s: LOGP in progress, ignore!", __func__); + return -EINVAL; + } + + if (!sc->hif_config.enable_runtime_pm) + return 0; + + if (!context) + return -EINVAL; + + if (in_irq()) + WARN_ON(1); + + /* + * Don't use internal timer if the timeout is less than auto suspend + * delay. + */ + if (delay <= hif_sc->dev->power.autosuspend_delay) { + hif_pm_request_resume(hif_sc->dev); + hif_pm_runtime_mark_last_busy(hif_sc->dev); + return ret; + } + + expires = jiffies + msecs_to_jiffies(delay); + expires += !expires; + + spin_lock_bh(&hif_sc->runtime_lock); + + context->timeout = delay; + ret = __hif_pm_runtime_prevent_suspend(hif_sc, context); + hif_sc->pm_stats.prevent_suspend_timeout++; + + /* Modify the timer only if new timeout is after already configured + * timeout + */ + if (time_after(expires, hif_sc->runtime_timer_expires)) { + qdf_timer_mod(&hif_sc->runtime_timer, delay); + hif_sc->runtime_timer_expires = expires; + } + + spin_unlock_bh(&hif_sc->runtime_lock); + + HIF_ERROR("%s: pm_state: %s delay: %dms ret: %d\n", __func__, + hif_pm_runtime_state_to_string( + qdf_atomic_read(&hif_sc->pm_state)), + delay, ret); + + return ret; +} + +/** + * hif_runtime_lock_init() - API to initialize Runtime PM context + * @name: Context name + * + * This API initializes the Runtime PM context of the caller and + * return the pointer. + * + * Return: None + */ +int hif_runtime_lock_init(qdf_runtime_lock_t *lock, const char *name) +{ + struct hif_pm_runtime_lock *context; + + HIF_INFO("Initializing Runtime PM wakelock %s", name); + + context = qdf_mem_malloc(sizeof(*context)); + if (!context) { + HIF_ERROR("%s: No memory for Runtime PM wakelock context", + __func__); + return -ENOMEM; + } + + context->name = name ? name : "Default"; + lock->lock = context; + + return 0; +} + +/** + * hif_runtime_lock_deinit() - This API frees the runtime pm ctx + * @data: Runtime PM context + * + * Return: void + */ +void hif_runtime_lock_deinit(struct hif_opaque_softc *hif_ctx, + struct hif_pm_runtime_lock *data) +{ + struct hif_pm_runtime_lock *context = data; + struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx); + + if (!context) { + HIF_ERROR("Runtime PM wakelock context is NULL"); + return; + } + + HIF_INFO("Deinitializing Runtime PM wakelock %s", context->name); + + /* + * Ensure to delete the context list entry and reduce the usage count + * before freeing the context if context is active. + */ + if (sc) { + spin_lock_bh(&sc->runtime_lock); + __hif_pm_runtime_allow_suspend(sc, context); + spin_unlock_bh(&sc->runtime_lock); + } + + qdf_mem_free(context); +} +#endif /* FEATURE_RUNTIME_PM */ + +int hif_pci_legacy_map_ce_to_irq(struct hif_softc *scn, int ce_id) +{ + struct hif_pci_softc *pci_scn = HIF_GET_PCI_SOFTC(scn); + + /* legacy case only has one irq */ + return pci_scn->irq; +} + +int hif_pci_addr_in_boundary(struct hif_softc *scn, uint32_t offset) +{ + struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn); + struct hif_target_info *tgt_info; + + tgt_info = hif_get_target_info_handle(GET_HIF_OPAQUE_HDL(scn)); + + if (tgt_info->target_type == TARGET_TYPE_QCA6290 || + tgt_info->target_type == TARGET_TYPE_QCA8074) { + /* + * Need to consider offset's memtype for QCA6290/QCA8074, + * also mem_len and DRAM_BASE_ADDRESS/DRAM_SIZE need to be + * well initialized/defined. + */ + return 0; + } + + if ((offset >= DRAM_BASE_ADDRESS && offset <= DRAM_BASE_ADDRESS + DRAM_SIZE) + || (offset + sizeof(unsigned int) <= sc->mem_len)) { + return 0; + } + + HIF_TRACE("Refusing to read memory at 0x%x - 0x%lx (max 0x%zx)\n", + offset, offset + sizeof(unsigned int), sc->mem_len); + + return -EINVAL; +} + +/** + * hif_pci_needs_bmi() - return true if the soc needs bmi through the driver + * @scn: hif context + * + * Return: true if soc needs driver bmi otherwise false + */ +bool hif_pci_needs_bmi(struct hif_softc *scn) +{ + return !ce_srng_based(scn); +} diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/pcie/if_pci.h b/drivers/staging/qca-wifi-host-cmn/hif/src/pcie/if_pci.h new file mode 100644 index 0000000000000000000000000000000000000000..d490c48b769d0cf54321ffb53d23c455baafb489 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/pcie/if_pci.h @@ -0,0 +1,209 @@ +/* + * Copyright (c) 2013-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef __ATH_PCI_H__ +#define __ATH_PCI_H__ + +#include +#include +#include + +#define ATH_DBG_DEFAULT 0 +#define DRAM_SIZE 0x000a8000 +#include "hif.h" +#include "cepci.h" +#include "ce_main.h" + +/* An address (e.g. of a buffer) in Copy Engine space. */ + +#define HIF_MAX_TASKLET_NUM 11 +struct hif_tasklet_entry { + uint8_t id; /* 0 - 9: maps to CE, 10: fw */ + void *hif_handler; /* struct hif_pci_softc */ +}; + +/** + * enum hif_pm_runtime_state - Driver States for Runtime Power Management + * HIF_PM_RUNTIME_STATE_NONE: runtime pm is off + * HIF_PM_RUNTIME_STATE_ON: runtime pm is active and link is active + * HIF_PM_RUNTIME_STATE_INPROGRESS: a runtime suspend or resume is in progress + * HIF_PM_RUNTIME_STATE_SUSPENDED: the driver is runtime suspended + */ +enum hif_pm_runtime_state { + HIF_PM_RUNTIME_STATE_NONE, + HIF_PM_RUNTIME_STATE_ON, + HIF_PM_RUNTIME_STATE_INPROGRESS, + HIF_PM_RUNTIME_STATE_SUSPENDED, +}; + +#ifdef FEATURE_RUNTIME_PM + +/** + * struct hif_pm_runtime_lock - data structure for preventing runtime suspend + * @list - global list of runtime locks + * @active - true if this lock is preventing suspend + * @name - character string for tracking this lock + */ +struct hif_pm_runtime_lock { + struct list_head list; + bool active; + uint32_t timeout; + const char *name; +}; + +/* Debugging stats for Runtime PM */ +struct hif_pci_pm_stats { + u32 suspended; + u32 suspend_err; + u32 resumed; + u32 runtime_get; + u32 runtime_put; + u32 request_resume; + u32 allow_suspend; + u32 prevent_suspend; + u32 prevent_suspend_timeout; + u32 allow_suspend_timeout; + u32 runtime_get_err; + void *last_resume_caller; + unsigned long suspend_jiffies; +}; +#endif + +/** + * struct hif_msi_info - Structure to hold msi info + * @magic: cookie + * @magic_da: dma address + * @dmaContext: dma address + * + * Structure to hold MSI information for PCIe interrupts + */ +struct hif_msi_info { + void *magic; + dma_addr_t magic_da; + OS_DMA_MEM_CONTEXT(dmacontext); +}; + +struct hif_pci_softc { + struct HIF_CE_state ce_sc; + void __iomem *mem; /* PCI address. */ + size_t mem_len; + + struct device *dev; /* For efficiency, should be first in struct */ + struct pci_dev *pdev; + int num_msi_intrs; /* number of MSI interrupts granted */ + /* 0 --> using legacy PCI line interrupts */ + struct tasklet_struct intr_tq; /* tasklet */ + struct hif_msi_info msi_info; + int ce_msi_irq_num[CE_COUNT_MAX]; + int irq; + int irq_event; + int cacheline_sz; + u16 devid; + struct hif_tasklet_entry tasklet_entries[HIF_MAX_TASKLET_NUM]; + bool pci_enabled; + qdf_spinlock_t irq_lock; + qdf_work_t reschedule_tasklet_work; + uint32_t lcr_val; +#ifdef FEATURE_RUNTIME_PM + atomic_t pm_state; + uint32_t prevent_suspend_cnt; + struct hif_pci_pm_stats pm_stats; + struct work_struct pm_work; + spinlock_t runtime_lock; + qdf_timer_t runtime_timer; + struct list_head prevent_suspend_list; + unsigned long runtime_timer_expires; + qdf_runtime_lock_t prevent_linkdown_lock; +#ifdef WLAN_OPEN_SOURCE + struct dentry *pm_dentry; +#endif +#endif +}; + +bool hif_pci_targ_is_present(struct hif_softc *scn, void *__iomem *mem); +int hif_configure_irq(struct hif_softc *sc); +void hif_pci_cancel_deferred_target_sleep(struct hif_softc *scn); +void wlan_tasklet(unsigned long data); +irqreturn_t hif_pci_legacy_ce_interrupt_handler(int irq, void *arg); +int hif_pci_addr_in_boundary(struct hif_softc *scn, uint32_t offset); + +/* + * A firmware interrupt to the Host is indicated by the + * low bit of SCRATCH_3_ADDRESS being set. + */ +#define FW_EVENT_PENDING_REG_ADDRESS SCRATCH_3_ADDRESS + +/* + * Typically, MSI Interrupts are used with PCIe. To force use of legacy + * "ABCD" PCI line interrupts rather than MSI, define + * FORCE_LEGACY_PCI_INTERRUPTS. + * Even when NOT forced, the driver may attempt to use legacy PCI interrupts + * MSI allocation fails + */ +#define LEGACY_INTERRUPTS(sc) ((sc)->num_msi_intrs == 0) + +/* + * There may be some pending tx frames during platform suspend. + * Suspend operation should be delayed until those tx frames are + * transferred from the host to target. This macro specifies how + * long suspend thread has to sleep before checking pending tx + * frame count. + */ +#define OL_ATH_TX_DRAIN_WAIT_DELAY 50 /* ms */ + +#define HIF_CE_DRAIN_WAIT_DELAY 10 /* ms */ +/* + * Wait time (in unit of OL_ATH_TX_DRAIN_WAIT_DELAY) for pending + * tx frame completion before suspend. Refer: hif_pci_suspend() + */ +#ifndef QCA_WIFI_3_0_EMU +#define OL_ATH_TX_DRAIN_WAIT_CNT 10 +#else +#define OL_ATH_TX_DRAIN_WAIT_CNT 60 +#endif + +#ifdef FEATURE_RUNTIME_PM +#include + +static inline int hif_pm_request_resume(struct device *dev) +{ + return pm_request_resume(dev); +} +static inline int __hif_pm_runtime_get(struct device *dev) +{ + return pm_runtime_get(dev); +} + +static inline int hif_pm_runtime_put_auto(struct device *dev) +{ + return pm_runtime_put_autosuspend(dev); +} + +static inline void hif_pm_runtime_mark_last_busy(struct device *dev) +{ + pm_runtime_mark_last_busy(dev); +} + +static inline int hif_pm_runtime_resume(struct device *dev) +{ + return pm_runtime_resume(dev); +} +#else +static inline void hif_pm_runtime_mark_last_busy(struct device *dev) { } +#endif /* FEATURE_RUNTIME_PM */ +#endif /* __ATH_PCI_H__ */ diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/pcie/if_pci_internal.h b/drivers/staging/qca-wifi-host-cmn/hif/src/pcie/if_pci_internal.h new file mode 100644 index 0000000000000000000000000000000000000000..f15b03b4fd8464b2d6071bba4fdf6771f64633e1 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/pcie/if_pci_internal.h @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2015-2016 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef __IF_PCI_INTERNAL_H__ +#define __IF_PCI_INTERNAL_H__ + +#ifdef DISABLE_L1SS_STATES +#define PCI_CFG_TO_DISABLE_L1SS_STATES(pdev, addr) \ +{ \ + uint32_t lcr_val; \ + pci_read_config_dword(pdev, addr, &lcr_val); \ + pci_write_config_dword(pdev, addr, (lcr_val & ~0x0000000f)); \ +} +#else +#define PCI_CFG_TO_DISABLE_L1SS_STATES(pdev, addr) +#endif + +#ifdef QCA_WIFI_3_0 +#define PCI_CLR_CAUSE0_REGISTER(sc) \ +{ \ + uint32_t tmp_cause0; \ + tmp_cause0 = hif_read32_mb(sc->mem + PCIE_INTR_CAUSE_ADDRESS); \ + hif_write32_mb(sc->mem + PCIE_INTR_CLR_ADDRESS, \ + PCIE_INTR_FIRMWARE_MASK | tmp_cause0); \ + hif_read32_mb(sc->mem + PCIE_INTR_CLR_ADDRESS); \ + hif_write32_mb(sc->mem + PCIE_INTR_CLR_ADDRESS, 0); \ + hif_read32_mb(sc->mem + PCIE_INTR_CLR_ADDRESS); \ +} +#else +#define PCI_CLR_CAUSE0_REGISTER(sc) +#endif +#endif /* __IF_PCI_INTERNAL_H__ */ diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/qca6290def.c b/drivers/staging/qca-wifi-host-cmn/hif/src/qca6290def.c new file mode 100644 index 0000000000000000000000000000000000000000..e755ced1297ad2994a5e69ab345516dc8e301ff2 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/qca6290def.c @@ -0,0 +1,227 @@ +/* + * Copyright (c) 2016-2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#if defined(QCA6290_HEADERS_DEF) + +#undef UMAC +#define WLAN_HEADERS 1 + +#include "lithium_top_reg.h" +#include "wcss_version.h" + +#define MISSING 0 + +#define SOC_RESET_CONTROL_OFFSET MISSING +#define GPIO_PIN0_OFFSET MISSING +#define GPIO_PIN1_OFFSET MISSING +#define GPIO_PIN0_CONFIG_MASK MISSING +#define GPIO_PIN1_CONFIG_MASK MISSING +#define LOCAL_SCRATCH_OFFSET 0x18 +#define GPIO_PIN10_OFFSET MISSING +#define GPIO_PIN11_OFFSET MISSING +#define GPIO_PIN12_OFFSET MISSING +#define GPIO_PIN13_OFFSET MISSING +#define MBOX_BASE_ADDRESS MISSING +#define INT_STATUS_ENABLE_ERROR_LSB MISSING +#define INT_STATUS_ENABLE_ERROR_MASK MISSING +#define INT_STATUS_ENABLE_CPU_LSB MISSING +#define INT_STATUS_ENABLE_CPU_MASK MISSING +#define INT_STATUS_ENABLE_COUNTER_LSB MISSING +#define INT_STATUS_ENABLE_COUNTER_MASK MISSING +#define INT_STATUS_ENABLE_MBOX_DATA_LSB MISSING +#define INT_STATUS_ENABLE_MBOX_DATA_MASK MISSING +#define ERROR_STATUS_ENABLE_RX_UNDERFLOW_LSB MISSING +#define ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK MISSING +#define ERROR_STATUS_ENABLE_TX_OVERFLOW_LSB MISSING +#define ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK MISSING +#define COUNTER_INT_STATUS_ENABLE_BIT_LSB MISSING +#define COUNTER_INT_STATUS_ENABLE_BIT_MASK MISSING +#define INT_STATUS_ENABLE_ADDRESS MISSING +#define CPU_INT_STATUS_ENABLE_BIT_LSB MISSING +#define CPU_INT_STATUS_ENABLE_BIT_MASK MISSING +#define HOST_INT_STATUS_ADDRESS MISSING +#define CPU_INT_STATUS_ADDRESS MISSING +#define ERROR_INT_STATUS_ADDRESS MISSING +#define ERROR_INT_STATUS_WAKEUP_MASK MISSING +#define ERROR_INT_STATUS_WAKEUP_LSB MISSING +#define ERROR_INT_STATUS_RX_UNDERFLOW_MASK MISSING +#define ERROR_INT_STATUS_RX_UNDERFLOW_LSB MISSING +#define ERROR_INT_STATUS_TX_OVERFLOW_MASK MISSING +#define ERROR_INT_STATUS_TX_OVERFLOW_LSB MISSING +#define COUNT_DEC_ADDRESS MISSING +#define HOST_INT_STATUS_CPU_MASK MISSING +#define HOST_INT_STATUS_CPU_LSB MISSING +#define HOST_INT_STATUS_ERROR_MASK MISSING +#define HOST_INT_STATUS_ERROR_LSB MISSING +#define HOST_INT_STATUS_COUNTER_MASK MISSING +#define HOST_INT_STATUS_COUNTER_LSB MISSING +#define RX_LOOKAHEAD_VALID_ADDRESS MISSING +#define WINDOW_DATA_ADDRESS MISSING +#define WINDOW_READ_ADDR_ADDRESS MISSING +#define WINDOW_WRITE_ADDR_ADDRESS MISSING +/* GPIO Register */ +#define GPIO_ENABLE_W1TS_LOW_ADDRESS MISSING +#define GPIO_PIN0_CONFIG_LSB MISSING +#define GPIO_PIN0_PAD_PULL_LSB MISSING +#define GPIO_PIN0_PAD_PULL_MASK MISSING +/* SI reg */ +#define SI_CONFIG_ERR_INT_MASK MISSING +#define SI_CONFIG_ERR_INT_LSB MISSING + +#define RTC_SOC_BASE_ADDRESS MISSING +#define RTC_WMAC_BASE_ADDRESS MISSING +#define SOC_CORE_BASE_ADDRESS MISSING +#define WLAN_MAC_BASE_ADDRESS MISSING +#define GPIO_BASE_ADDRESS MISSING +#define ANALOG_INTF_BASE_ADDRESS MISSING +#define CE0_BASE_ADDRESS MISSING +#define CE1_BASE_ADDRESS MISSING +#define CE_COUNT 12 +#define CE_WRAPPER_BASE_ADDRESS MISSING +#define SI_BASE_ADDRESS MISSING +#define DRAM_BASE_ADDRESS MISSING + +#define WLAN_SYSTEM_SLEEP_DISABLE_LSB MISSING +#define WLAN_SYSTEM_SLEEP_DISABLE_MASK MISSING +#define CLOCK_CONTROL_OFFSET MISSING +#define CLOCK_CONTROL_SI0_CLK_MASK MISSING +#define RESET_CONTROL_SI0_RST_MASK MISSING +#define WLAN_RESET_CONTROL_OFFSET MISSING +#define WLAN_RESET_CONTROL_COLD_RST_MASK MISSING +#define WLAN_RESET_CONTROL_WARM_RST_MASK MISSING +#define CPU_CLOCK_OFFSET MISSING + +#define CPU_CLOCK_STANDARD_LSB MISSING +#define CPU_CLOCK_STANDARD_MASK MISSING +#define LPO_CAL_ENABLE_LSB MISSING +#define LPO_CAL_ENABLE_MASK MISSING +#define WLAN_SYSTEM_SLEEP_OFFSET MISSING + +#define SOC_CHIP_ID_ADDRESS MISSING +#define SOC_CHIP_ID_REVISION_MASK MISSING +#define SOC_CHIP_ID_REVISION_LSB MISSING +#define SOC_CHIP_ID_REVISION_MSB MISSING + +#define FW_IND_EVENT_PENDING MISSING +#define FW_IND_INITIALIZED MISSING + +#define MSDU_LINK_EXT_3_TCP_OVER_IPV4_CHECKSUM_EN_MASK MISSING +#define MSDU_LINK_EXT_3_TCP_OVER_IPV6_CHECKSUM_EN_MASK MISSING +#define MSDU_LINK_EXT_3_UDP_OVER_IPV4_CHECKSUM_EN_MASK MISSING +#define MSDU_LINK_EXT_3_UDP_OVER_IPV6_CHECKSUM_EN_MASK MISSING +#define MSDU_LINK_EXT_3_TCP_OVER_IPV4_CHECKSUM_EN_LSB MISSING +#define MSDU_LINK_EXT_3_TCP_OVER_IPV6_CHECKSUM_EN_LSB MISSING +#define MSDU_LINK_EXT_3_UDP_OVER_IPV4_CHECKSUM_EN_LSB MISSING +#define MSDU_LINK_EXT_3_UDP_OVER_IPV6_CHECKSUM_EN_LSB MISSING + +#define SR_WR_INDEX_ADDRESS MISSING +#define DST_WATERMARK_ADDRESS MISSING + +#define DST_WR_INDEX_ADDRESS MISSING +#define SRC_WATERMARK_ADDRESS MISSING +#define SRC_WATERMARK_LOW_MASK MISSING +#define SRC_WATERMARK_HIGH_MASK MISSING +#define DST_WATERMARK_LOW_MASK MISSING +#define DST_WATERMARK_HIGH_MASK MISSING +#define CURRENT_SRRI_ADDRESS MISSING +#define CURRENT_DRRI_ADDRESS MISSING +#define HOST_IS_SRC_RING_HIGH_WATERMARK_MASK MISSING +#define HOST_IS_SRC_RING_LOW_WATERMARK_MASK MISSING +#define HOST_IS_DST_RING_HIGH_WATERMARK_MASK MISSING +#define HOST_IS_DST_RING_LOW_WATERMARK_MASK MISSING +#define HOST_IS_ADDRESS MISSING +#define MISC_IS_ADDRESS MISSING +#define HOST_IS_COPY_COMPLETE_MASK MISSING +#define CE_WRAPPER_BASE_ADDRESS MISSING +#define CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS MISSING +#define CE_DDR_ADDRESS_FOR_RRI_LOW MISSING +#define CE_DDR_ADDRESS_FOR_RRI_HIGH MISSING + +#if defined(WCSS_VERSION) && \ + ((defined(CONFIG_WIN) && (WCSS_VERSION > 81)) || \ + (defined(CONFIG_MCL) && (WCSS_VERSION >= 72))) +#define HOST_IE_ADDRESS UMAC_CE_COMMON_WFSS_CE_COMMON_R0_CE_HOST_IE_0 +#define HOST_IE_ADDRESS_2 UMAC_CE_COMMON_WFSS_CE_COMMON_R0_CE_HOST_IE_1 +#else /* WCSS_VERSION < 72 */ +#define HOST_IE_ADDRESS UMAC_CE_COMMON_CE_HOST_IE_0 +#define HOST_IE_ADDRESS_2 UMAC_CE_COMMON_CE_HOST_IE_1 +#endif /* WCSS_VERSION */ + +#define HOST_IE_COPY_COMPLETE_MASK MISSING +#define SR_BA_ADDRESS MISSING +#define SR_BA_ADDRESS_HIGH MISSING +#define SR_SIZE_ADDRESS MISSING +#define CE_CTRL1_ADDRESS MISSING +#define CE_CTRL1_DMAX_LENGTH_MASK MISSING +#define DR_BA_ADDRESS MISSING +#define DR_BA_ADDRESS_HIGH MISSING +#define DR_SIZE_ADDRESS MISSING +#define CE_CMD_REGISTER MISSING +#define CE_MSI_ADDRESS MISSING +#define CE_MSI_ADDRESS_HIGH MISSING +#define CE_MSI_DATA MISSING +#define CE_MSI_ENABLE_BIT MISSING +#define MISC_IE_ADDRESS MISSING +#define MISC_IS_AXI_ERR_MASK MISSING +#define MISC_IS_DST_ADDR_ERR_MASK MISSING +#define MISC_IS_SRC_LEN_ERR_MASK MISSING +#define MISC_IS_DST_MAX_LEN_VIO_MASK MISSING +#define MISC_IS_DST_RING_OVERFLOW_MASK MISSING +#define MISC_IS_SRC_RING_OVERFLOW_MASK MISSING +#define SRC_WATERMARK_LOW_LSB MISSING +#define SRC_WATERMARK_HIGH_LSB MISSING +#define DST_WATERMARK_LOW_LSB MISSING +#define DST_WATERMARK_HIGH_LSB MISSING +#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK MISSING +#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB MISSING +#define CE_CTRL1_DMAX_LENGTH_LSB MISSING +#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK MISSING +#define CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK MISSING +#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB MISSING +#define CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB MISSING +#define CE_CTRL1_IDX_UPD_EN_MASK MISSING +#define CE_WRAPPER_DEBUG_OFFSET MISSING +#define CE_WRAPPER_DEBUG_SEL_MSB MISSING +#define CE_WRAPPER_DEBUG_SEL_LSB MISSING +#define CE_WRAPPER_DEBUG_SEL_MASK MISSING +#define CE_DEBUG_OFFSET MISSING +#define CE_DEBUG_SEL_MSB MISSING +#define CE_DEBUG_SEL_LSB MISSING +#define CE_DEBUG_SEL_MASK MISSING +#define CE0_BASE_ADDRESS MISSING +#define CE1_BASE_ADDRESS MISSING +#define A_WIFI_APB_3_A_WCMN_APPS_CE_INTR_ENABLES MISSING +#define A_WIFI_APB_3_A_WCMN_APPS_CE_INTR_STATUS MISSING + +#define QCA6290_BOARD_DATA_SZ MISSING +#define QCA6290_BOARD_EXT_DATA_SZ MISSING + +#define MY_TARGET_DEF QCA6290_TARGETdef +#define MY_HOST_DEF QCA6290_HOSTdef +#define MY_CEREG_DEF QCA6290_CE_TARGETdef +#define MY_TARGET_BOARD_DATA_SZ QCA6290_BOARD_DATA_SZ +#define MY_TARGET_BOARD_EXT_DATA_SZ QCA6290_BOARD_EXT_DATA_SZ +#include "targetdef.h" +#include "hostdef.h" +#else +#include "common_drv.h" +#include "targetdef.h" +#include "hostdef.h" +struct targetdef_s *QCA6290_TARGETdef; +struct hostdef_s *QCA6290_HOSTdef; +#endif /*QCA6290_HEADERS_DEF */ diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/qca8074def.c b/drivers/staging/qca-wifi-host-cmn/hif/src/qca8074def.c new file mode 100644 index 0000000000000000000000000000000000000000..51d32fdd028d0b50120c7713dc5859023cfcb57e --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/qca8074def.c @@ -0,0 +1,239 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "qdf_module.h" + +#if defined(QCA8074_HEADERS_DEF) + +#undef UMAC +#define WLAN_HEADERS 1 + +#include "wcss_version.h" +#include "wcss_seq_hwiobase.h" +#include "wfss_ce_reg_seq_hwioreg.h" + +#define MISSING 0 + +#define SOC_RESET_CONTROL_OFFSET MISSING +#define GPIO_PIN0_OFFSET MISSING +#define GPIO_PIN1_OFFSET MISSING +#define GPIO_PIN0_CONFIG_MASK MISSING +#define GPIO_PIN1_CONFIG_MASK MISSING +#define LOCAL_SCRATCH_OFFSET 0x18 +#define GPIO_PIN10_OFFSET MISSING +#define GPIO_PIN11_OFFSET MISSING +#define GPIO_PIN12_OFFSET MISSING +#define GPIO_PIN13_OFFSET MISSING +#define MBOX_BASE_ADDRESS MISSING +#define INT_STATUS_ENABLE_ERROR_LSB MISSING +#define INT_STATUS_ENABLE_ERROR_MASK MISSING +#define INT_STATUS_ENABLE_CPU_LSB MISSING +#define INT_STATUS_ENABLE_CPU_MASK MISSING +#define INT_STATUS_ENABLE_COUNTER_LSB MISSING +#define INT_STATUS_ENABLE_COUNTER_MASK MISSING +#define INT_STATUS_ENABLE_MBOX_DATA_LSB MISSING +#define INT_STATUS_ENABLE_MBOX_DATA_MASK MISSING +#define ERROR_STATUS_ENABLE_RX_UNDERFLOW_LSB MISSING +#define ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK MISSING +#define ERROR_STATUS_ENABLE_TX_OVERFLOW_LSB MISSING +#define ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK MISSING +#define COUNTER_INT_STATUS_ENABLE_BIT_LSB MISSING +#define COUNTER_INT_STATUS_ENABLE_BIT_MASK MISSING +#define INT_STATUS_ENABLE_ADDRESS MISSING +#define CPU_INT_STATUS_ENABLE_BIT_LSB MISSING +#define CPU_INT_STATUS_ENABLE_BIT_MASK MISSING +#define HOST_INT_STATUS_ADDRESS MISSING +#define CPU_INT_STATUS_ADDRESS MISSING +#define ERROR_INT_STATUS_ADDRESS MISSING +#define ERROR_INT_STATUS_WAKEUP_MASK MISSING +#define ERROR_INT_STATUS_WAKEUP_LSB MISSING +#define ERROR_INT_STATUS_RX_UNDERFLOW_MASK MISSING +#define ERROR_INT_STATUS_RX_UNDERFLOW_LSB MISSING +#define ERROR_INT_STATUS_TX_OVERFLOW_MASK MISSING +#define ERROR_INT_STATUS_TX_OVERFLOW_LSB MISSING +#define COUNT_DEC_ADDRESS MISSING +#define HOST_INT_STATUS_CPU_MASK MISSING +#define HOST_INT_STATUS_CPU_LSB MISSING +#define HOST_INT_STATUS_ERROR_MASK MISSING +#define HOST_INT_STATUS_ERROR_LSB MISSING +#define HOST_INT_STATUS_COUNTER_MASK MISSING +#define HOST_INT_STATUS_COUNTER_LSB MISSING +#define RX_LOOKAHEAD_VALID_ADDRESS MISSING +#define WINDOW_DATA_ADDRESS MISSING +#define WINDOW_READ_ADDR_ADDRESS MISSING +#define WINDOW_WRITE_ADDR_ADDRESS MISSING +/* GPIO Register */ +#define GPIO_ENABLE_W1TS_LOW_ADDRESS MISSING +#define GPIO_PIN0_CONFIG_LSB MISSING +#define GPIO_PIN0_PAD_PULL_LSB MISSING +#define GPIO_PIN0_PAD_PULL_MASK MISSING +/* SI reg */ +#define SI_CONFIG_ERR_INT_MASK MISSING +#define SI_CONFIG_ERR_INT_LSB MISSING + +#define RTC_SOC_BASE_ADDRESS MISSING +#define RTC_WMAC_BASE_ADDRESS MISSING +#define SOC_CORE_BASE_ADDRESS MISSING +#define WLAN_MAC_BASE_ADDRESS MISSING +#define GPIO_BASE_ADDRESS MISSING +#define ANALOG_INTF_BASE_ADDRESS MISSING +#define CE0_BASE_ADDRESS MISSING +#define CE1_BASE_ADDRESS MISSING +#define CE_COUNT 12 +#define CE_WRAPPER_BASE_ADDRESS MISSING +#define SI_BASE_ADDRESS MISSING +#define DRAM_BASE_ADDRESS MISSING + +#define WLAN_SYSTEM_SLEEP_DISABLE_LSB MISSING +#define WLAN_SYSTEM_SLEEP_DISABLE_MASK MISSING +#define CLOCK_CONTROL_OFFSET MISSING +#define CLOCK_CONTROL_SI0_CLK_MASK MISSING +#define RESET_CONTROL_SI0_RST_MASK MISSING +#define WLAN_RESET_CONTROL_OFFSET MISSING +#define WLAN_RESET_CONTROL_COLD_RST_MASK MISSING +#define WLAN_RESET_CONTROL_WARM_RST_MASK MISSING +#define CPU_CLOCK_OFFSET MISSING + +#define CPU_CLOCK_STANDARD_LSB MISSING +#define CPU_CLOCK_STANDARD_MASK MISSING +#define LPO_CAL_ENABLE_LSB MISSING +#define LPO_CAL_ENABLE_MASK MISSING +#define WLAN_SYSTEM_SLEEP_OFFSET MISSING + +#define SOC_CHIP_ID_ADDRESS MISSING +#define SOC_CHIP_ID_REVISION_MASK MISSING +#define SOC_CHIP_ID_REVISION_LSB MISSING +#define SOC_CHIP_ID_REVISION_MSB MISSING + +#define FW_IND_EVENT_PENDING MISSING +#define FW_IND_INITIALIZED MISSING + +#define MSDU_LINK_EXT_3_TCP_OVER_IPV4_CHECKSUM_EN_MASK MISSING +#define MSDU_LINK_EXT_3_TCP_OVER_IPV6_CHECKSUM_EN_MASK MISSING +#define MSDU_LINK_EXT_3_UDP_OVER_IPV4_CHECKSUM_EN_MASK MISSING +#define MSDU_LINK_EXT_3_UDP_OVER_IPV6_CHECKSUM_EN_MASK MISSING +#define MSDU_LINK_EXT_3_TCP_OVER_IPV4_CHECKSUM_EN_LSB MISSING +#define MSDU_LINK_EXT_3_TCP_OVER_IPV6_CHECKSUM_EN_LSB MISSING +#define MSDU_LINK_EXT_3_UDP_OVER_IPV4_CHECKSUM_EN_LSB MISSING +#define MSDU_LINK_EXT_3_UDP_OVER_IPV6_CHECKSUM_EN_LSB MISSING + +#define SR_WR_INDEX_ADDRESS MISSING +#define DST_WATERMARK_ADDRESS MISSING + +#define DST_WR_INDEX_ADDRESS MISSING +#define SRC_WATERMARK_ADDRESS MISSING +#define SRC_WATERMARK_LOW_MASK MISSING +#define SRC_WATERMARK_HIGH_MASK MISSING +#define DST_WATERMARK_LOW_MASK MISSING +#define DST_WATERMARK_HIGH_MASK MISSING +#define CURRENT_SRRI_ADDRESS MISSING +#define CURRENT_DRRI_ADDRESS MISSING +#define HOST_IS_SRC_RING_HIGH_WATERMARK_MASK MISSING +#define HOST_IS_SRC_RING_LOW_WATERMARK_MASK MISSING +#define HOST_IS_DST_RING_HIGH_WATERMARK_MASK MISSING +#define HOST_IS_DST_RING_LOW_WATERMARK_MASK MISSING +#define HOST_IS_ADDRESS MISSING +#define MISC_IS_ADDRESS MISSING +#define HOST_IS_COPY_COMPLETE_MASK MISSING +#define CE_WRAPPER_BASE_ADDRESS MISSING +#define CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS MISSING +#define CE_DDR_ADDRESS_FOR_RRI_LOW MISSING +#define CE_DDR_ADDRESS_FOR_RRI_HIGH MISSING +#if defined(WCSS_VERSION) && (WCSS_VERSION > 68) +#define HOST_IE_ADDRESS \ + HWIO_WFSS_CE_COMMON_R0_CE_HOST_IE_0_ADDR(\ + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_COMMON_REG_OFFSET) +#define HOST_IE_REG1_CE_LSB HWIO_WFSS_CE_COMMON_R0_CE_HOST_IE_0_SRC_RING_IE_SHFT +#define HOST_IE_ADDRESS_2 \ + HWIO_WFSS_CE_COMMON_R0_CE_HOST_IE_1_ADDR(\ + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_COMMON_REG_OFFSET) +#define HOST_IE_REG2_CE_LSB HWIO_WFSS_CE_COMMON_R0_CE_HOST_IE_1_STS_RING_IE_SHFT +#define HOST_IE_ADDRESS_3 \ + HWIO_WFSS_CE_COMMON_R0_CE_HOST_IE_0_ADDR(\ + SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_COMMON_REG_OFFSET) +#define HOST_IE_REG3_CE_LSB HWIO_WFSS_CE_COMMON_R0_CE_HOST_IE_0_DST_RING_IE_SHFT +#else +#define HOST_IE_ADDRESS UMAC_CE_COMMON_CE_HOST_IE_0 +#define HOST_IE_ADDRESS_2 UMAC_CE_COMMON_CE_HOST_IE_1 +#endif +#define HOST_IE_COPY_COMPLETE_MASK MISSING +#define SR_BA_ADDRESS MISSING +#define SR_BA_ADDRESS_HIGH MISSING +#define SR_SIZE_ADDRESS MISSING +#define CE_CTRL1_ADDRESS MISSING +#define CE_CTRL1_DMAX_LENGTH_MASK MISSING +#define DR_BA_ADDRESS MISSING +#define DR_BA_ADDRESS_HIGH MISSING +#define DR_SIZE_ADDRESS MISSING +#define CE_CMD_REGISTER MISSING +#define CE_MSI_ADDRESS MISSING +#define CE_MSI_ADDRESS_HIGH MISSING +#define CE_MSI_DATA MISSING +#define CE_MSI_ENABLE_BIT MISSING +#define MISC_IE_ADDRESS MISSING +#define MISC_IS_AXI_ERR_MASK MISSING +#define MISC_IS_DST_ADDR_ERR_MASK MISSING +#define MISC_IS_SRC_LEN_ERR_MASK MISSING +#define MISC_IS_DST_MAX_LEN_VIO_MASK MISSING +#define MISC_IS_DST_RING_OVERFLOW_MASK MISSING +#define MISC_IS_SRC_RING_OVERFLOW_MASK MISSING +#define SRC_WATERMARK_LOW_LSB MISSING +#define SRC_WATERMARK_HIGH_LSB MISSING +#define DST_WATERMARK_LOW_LSB MISSING +#define DST_WATERMARK_HIGH_LSB MISSING +#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK MISSING +#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB MISSING +#define CE_CTRL1_DMAX_LENGTH_LSB MISSING +#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK MISSING +#define CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK MISSING +#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB MISSING +#define CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB MISSING +#define CE_CTRL1_IDX_UPD_EN_MASK MISSING +#define CE_WRAPPER_DEBUG_OFFSET MISSING +#define CE_WRAPPER_DEBUG_SEL_MSB MISSING +#define CE_WRAPPER_DEBUG_SEL_LSB MISSING +#define CE_WRAPPER_DEBUG_SEL_MASK MISSING +#define CE_DEBUG_OFFSET MISSING +#define CE_DEBUG_SEL_MSB MISSING +#define CE_DEBUG_SEL_LSB MISSING +#define CE_DEBUG_SEL_MASK MISSING +#define CE0_BASE_ADDRESS MISSING +#define CE1_BASE_ADDRESS MISSING +#define A_WIFI_APB_3_A_WCMN_APPS_CE_INTR_ENABLES MISSING +#define A_WIFI_APB_3_A_WCMN_APPS_CE_INTR_STATUS MISSING + +#define QCA8074_BOARD_DATA_SZ MISSING +#define QCA8074_BOARD_EXT_DATA_SZ MISSING + +#define MY_TARGET_DEF QCA8074_TARGETdef +#define MY_HOST_DEF QCA8074_HOSTdef +#define MY_CEREG_DEF QCA8074_CE_TARGETdef +#define MY_TARGET_BOARD_DATA_SZ QCA8074_BOARD_DATA_SZ +#define MY_TARGET_BOARD_EXT_DATA_SZ QCA8074_BOARD_EXT_DATA_SZ +#include "targetdef.h" +#include "hostdef.h" +qdf_export_symbol(QCA8074_CE_TARGETdef); +#else +#include "common_drv.h" +#include "targetdef.h" +#include "hostdef.h" +struct targetdef_s *QCA8074_TARGETdef; +struct hostdef_s *QCA8074_HOSTdef; +#endif /*QCA8074_HEADERS_DEF */ +qdf_export_symbol(QCA8074_TARGETdef); +qdf_export_symbol(QCA8074_HOSTdef); diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/qca9888def.c b/drivers/staging/qca-wifi-host-cmn/hif/src/qca9888def.c new file mode 100644 index 0000000000000000000000000000000000000000..f44313cbb11c1d0920ae8966f1cc3f5ae6533f0c --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/qca9888def.c @@ -0,0 +1,229 @@ +/* + * Copyright (c) 2015,2016,2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ +#include "qdf_module.h" + +#if defined(QCA9888_HEADERS_DEF) +#define QCA9888 1 + +#define WLAN_HEADERS 1 +#include "common_drv.h" +#include "QCA9888/v2/soc_addrs.h" +#include "QCA9888/v2/extra/hw/apb_map.h" +#include "QCA9888/v2/hw/gpio_athr_wlan_reg.h" +#ifdef WLAN_HEADERS + +#include "QCA9888/v2/extra/hw/wifi_top_reg_map.h" +#include "QCA9888/v2/hw/rtc_soc_reg.h" + +#endif +#include "QCA9888/v2/hw/si_reg.h" +#include "QCA9888/v2/extra/hw/pcie_local_reg.h" +#include "QCA9888/v2/hw/ce_wrapper_reg_csr.h" + +#include "QCA9888/v2/extra/hw/soc_core_reg.h" +#include "QCA9888/v2/hw/soc_pcie_reg.h" +#include "QCA9888/v2/extra/hw/ce_reg_csr.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* Base address is defined in pcie_local_reg.h. Macros which access the + * registers include the base address in their definition. + */ +#define PCIE_LOCAL_BASE_ADDRESS 0 + +#define FW_EVENT_PENDING_ADDRESS (WIFICMN_SCRATCH_3_ADDRESS) +#define DRAM_BASE_ADDRESS TARG_DRAM_START + +/* Backwards compatibility -- TBDXXX */ + +#define MISSING 0 + +#define WLAN_SYSTEM_SLEEP_DISABLE_LSB WIFI_SYSTEM_SLEEP_DISABLE_LSB +#define WLAN_SYSTEM_SLEEP_DISABLE_MASK WIFI_SYSTEM_SLEEP_DISABLE_MASK +#define WLAN_RESET_CONTROL_COLD_RST_MASK WIFI_RESET_CONTROL_MAC_COLD_RST_MASK +#define WLAN_RESET_CONTROL_WARM_RST_MASK WIFI_RESET_CONTROL_MAC_WARM_RST_MASK +#define SOC_CLOCK_CONTROL_OFFSET SOC_CLOCK_CONTROL_ADDRESS +#define SOC_RESET_CONTROL_OFFSET SOC_RESET_CONTROL_ADDRESS +#define CPU_CLOCK_OFFSET SOC_CPU_CLOCK_ADDRESS +#define SOC_LPO_CAL_OFFSET SOC_LPO_CAL_ADDRESS +#define SOC_RESET_CONTROL_CE_RST_MASK WIFI_RESET_CONTROL_CE_RESET_MASK +#define WLAN_SYSTEM_SLEEP_OFFSET WIFI_SYSTEM_SLEEP_ADDRESS +#define WLAN_RESET_CONTROL_OFFSET WIFI_RESET_CONTROL_ADDRESS +#define CLOCK_CONTROL_OFFSET SOC_CLOCK_CONTROL_OFFSET +#define CLOCK_CONTROL_SI0_CLK_MASK SOC_CLOCK_CONTROL_SI0_CLK_MASK +#define RESET_CONTROL_SI0_RST_MASK SOC_RESET_CONTROL_SI0_RST_MASK +#define GPIO_BASE_ADDRESS WLAN_GPIO_BASE_ADDRESS +#define GPIO_PIN0_OFFSET WLAN_GPIO_PIN0_ADDRESS +#define GPIO_PIN1_OFFSET WLAN_GPIO_PIN1_ADDRESS +#define GPIO_PIN0_CONFIG_MASK WLAN_GPIO_PIN0_CONFIG_MASK +#define GPIO_PIN1_CONFIG_MASK WLAN_GPIO_PIN1_CONFIG_MASK +#define SI_BASE_ADDRESS WLAN_SI_BASE_ADDRESS +#define SCRATCH_BASE_ADDRESS SOC_CORE_BASE_ADDRESS +#define LOCAL_SCRATCH_OFFSET 0x18 +#define GPIO_PIN10_OFFSET WLAN_GPIO_PIN10_ADDRESS +#define GPIO_PIN11_OFFSET WLAN_GPIO_PIN11_ADDRESS +#define GPIO_PIN12_OFFSET WLAN_GPIO_PIN12_ADDRESS +#define GPIO_PIN13_OFFSET WLAN_GPIO_PIN13_ADDRESS +#define SI_CONFIG_OFFSET SI_CONFIG_ADDRESS +#define SI_TX_DATA0_OFFSET SI_TX_DATA0_ADDRESS +#define SI_TX_DATA1_OFFSET SI_TX_DATA1_ADDRESS +#define SI_RX_DATA0_OFFSET SI_RX_DATA0_ADDRESS +#define SI_RX_DATA1_OFFSET SI_RX_DATA1_ADDRESS +#define SI_CS_OFFSET SI_CS_ADDRESS +#define CPU_CLOCK_STANDARD_LSB SOC_CPU_CLOCK_STANDARD_LSB +#define CPU_CLOCK_STANDARD_MASK SOC_CPU_CLOCK_STANDARD_MASK +#define LPO_CAL_ENABLE_LSB SOC_LPO_CAL_ENABLE_LSB +#define LPO_CAL_ENABLE_MASK SOC_LPO_CAL_ENABLE_MASK +#define ANALOG_INTF_BASE_ADDRESS WLAN_ANALOG_INTF_BASE_ADDRESS +#define MBOX_BASE_ADDRESS MISSING +#define INT_STATUS_ENABLE_ERROR_LSB MISSING +#define INT_STATUS_ENABLE_ERROR_MASK MISSING +#define INT_STATUS_ENABLE_CPU_LSB MISSING +#define INT_STATUS_ENABLE_CPU_MASK MISSING +#define INT_STATUS_ENABLE_COUNTER_LSB MISSING +#define INT_STATUS_ENABLE_COUNTER_MASK MISSING +#define INT_STATUS_ENABLE_MBOX_DATA_LSB MISSING +#define INT_STATUS_ENABLE_MBOX_DATA_MASK MISSING +#define ERROR_STATUS_ENABLE_RX_UNDERFLOW_LSB MISSING +#define ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK MISSING +#define ERROR_STATUS_ENABLE_TX_OVERFLOW_LSB MISSING +#define ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK MISSING +#define COUNTER_INT_STATUS_ENABLE_BIT_LSB MISSING +#define COUNTER_INT_STATUS_ENABLE_BIT_MASK MISSING +#define INT_STATUS_ENABLE_ADDRESS MISSING +#define CPU_INT_STATUS_ENABLE_BIT_LSB MISSING +#define CPU_INT_STATUS_ENABLE_BIT_MASK MISSING +#define HOST_INT_STATUS_ADDRESS MISSING +#define CPU_INT_STATUS_ADDRESS MISSING +#define ERROR_INT_STATUS_ADDRESS MISSING +#define ERROR_INT_STATUS_WAKEUP_MASK MISSING +#define ERROR_INT_STATUS_WAKEUP_LSB MISSING +#define ERROR_INT_STATUS_RX_UNDERFLOW_MASK MISSING +#define ERROR_INT_STATUS_RX_UNDERFLOW_LSB MISSING +#define ERROR_INT_STATUS_TX_OVERFLOW_MASK MISSING +#define ERROR_INT_STATUS_TX_OVERFLOW_LSB MISSING +#define COUNT_DEC_ADDRESS MISSING +#define HOST_INT_STATUS_CPU_MASK MISSING +#define HOST_INT_STATUS_CPU_LSB MISSING +#define HOST_INT_STATUS_ERROR_MASK MISSING +#define HOST_INT_STATUS_ERROR_LSB MISSING +#define HOST_INT_STATUS_COUNTER_MASK MISSING +#define HOST_INT_STATUS_COUNTER_LSB MISSING +#define RX_LOOKAHEAD_VALID_ADDRESS MISSING +#define WINDOW_DATA_ADDRESS MISSING +#define WINDOW_READ_ADDR_ADDRESS MISSING +#define WINDOW_WRITE_ADDR_ADDRESS MISSING +/* MAC Descriptor */ +#define RX_PPDU_END_ANTENNA_OFFSET_DWORD (RX_PPDU_END_25_RX_ANTENNA_OFFSET >> 2) +/* GPIO Register */ +#define GPIO_ENABLE_W1TS_LOW_ADDRESS WLAN_GPIO_ENABLE_W1TS_LOW_ADDRESS +#define GPIO_PIN0_CONFIG_LSB WLAN_GPIO_PIN0_CONFIG_LSB +#define GPIO_PIN0_PAD_PULL_LSB WLAN_GPIO_PIN0_PAD_PULL_LSB +#define GPIO_PIN0_PAD_PULL_MASK WLAN_GPIO_PIN0_PAD_PULL_MASK +/* CE descriptor */ +#define CE_SRC_DESC_SIZE_DWORD 2 +#define CE_DEST_DESC_SIZE_DWORD 2 +#define CE_SRC_DESC_SRC_PTR_OFFSET_DWORD 0 +#define CE_SRC_DESC_INFO_OFFSET_DWORD 1 +#define CE_DEST_DESC_DEST_PTR_OFFSET_DWORD 0 +#define CE_DEST_DESC_INFO_OFFSET_DWORD 1 +#if _BYTE_ORDER == _BIG_ENDIAN +#define CE_SRC_DESC_INFO_NBYTES_MASK 0xFFFF0000 +#define CE_SRC_DESC_INFO_NBYTES_SHIFT 16 +#define CE_SRC_DESC_INFO_GATHER_MASK 0x00008000 +#define CE_SRC_DESC_INFO_GATHER_SHIFT 15 +#define CE_SRC_DESC_INFO_BYTE_SWAP_MASK 0x00004000 +#define CE_SRC_DESC_INFO_BYTE_SWAP_SHIFT 14 +#define CE_SRC_DESC_INFO_HOST_INT_DISABLE_MASK 0x00002000 +#define CE_SRC_DESC_INFO_HOST_INT_DISABLE_SHIFT 13 +#define CE_SRC_DESC_INFO_TARGET_INT_DISABLE_MASK 0x00001000 +#define CE_SRC_DESC_INFO_TARGET_INT_DISABLE_SHIFT 12 +#define CE_SRC_DESC_INFO_META_DATA_MASK 0x00000FFF +#define CE_SRC_DESC_INFO_META_DATA_SHIFT 0 +#else +#define CE_SRC_DESC_INFO_NBYTES_MASK 0x0000FFFF +#define CE_SRC_DESC_INFO_NBYTES_SHIFT 0 +#define CE_SRC_DESC_INFO_GATHER_MASK 0x00010000 +#define CE_SRC_DESC_INFO_GATHER_SHIFT 16 +#define CE_SRC_DESC_INFO_BYTE_SWAP_MASK 0x00020000 +#define CE_SRC_DESC_INFO_BYTE_SWAP_SHIFT 17 +#define CE_SRC_DESC_INFO_HOST_INT_DISABLE_MASK 0x00040000 +#define CE_SRC_DESC_INFO_HOST_INT_DISABLE_SHIFT 18 +#define CE_SRC_DESC_INFO_TARGET_INT_DISABLE_MASK 0x00080000 +#define CE_SRC_DESC_INFO_TARGET_INT_DISABLE_SHIFT 19 +#define CE_SRC_DESC_INFO_META_DATA_MASK 0xFFF00000 +#define CE_SRC_DESC_INFO_META_DATA_SHIFT 20 +#endif +#if _BYTE_ORDER == _BIG_ENDIAN +#define CE_DEST_DESC_INFO_NBYTES_MASK 0xFFFF0000 +#define CE_DEST_DESC_INFO_NBYTES_SHIFT 16 +#define CE_DEST_DESC_INFO_GATHER_MASK 0x00008000 +#define CE_DEST_DESC_INFO_GATHER_SHIFT 15 +#define CE_DEST_DESC_INFO_BYTE_SWAP_MASK 0x00004000 +#define CE_DEST_DESC_INFO_BYTE_SWAP_SHIFT 14 +#define CE_DEST_DESC_INFO_HOST_INT_DISABLE_MASK 0x00002000 +#define CE_DEST_DESC_INFO_HOST_INT_DISABLE_SHIFT 13 +#define CE_DEST_DESC_INFO_TARGET_INT_DISABLE_MASK 0x00001000 +#define CE_DEST_DESC_INFO_TARGET_INT_DISABLE_SHIFT 12 +#define CE_DEST_DESC_INFO_META_DATA_MASK 0x00000FFF +#define CE_DEST_DESC_INFO_META_DATA_SHIFT 0 +#else +#define CE_DEST_DESC_INFO_NBYTES_MASK 0x0000FFFF +#define CE_DEST_DESC_INFO_NBYTES_SHIFT 0 +#define CE_DEST_DESC_INFO_GATHER_MASK 0x00010000 +#define CE_DEST_DESC_INFO_GATHER_SHIFT 16 +#define CE_DEST_DESC_INFO_BYTE_SWAP_MASK 0x00020000 +#define CE_DEST_DESC_INFO_BYTE_SWAP_SHIFT 17 +#define CE_DEST_DESC_INFO_HOST_INT_DISABLE_MASK 0x00040000 +#define CE_DEST_DESC_INFO_HOST_INT_DISABLE_SHIFT 18 +#define CE_DEST_DESC_INFO_TARGET_INT_DISABLE_MASK 0x00080000 +#define CE_DEST_DESC_INFO_TARGET_INT_DISABLE_SHIFT 19 +#define CE_DEST_DESC_INFO_META_DATA_MASK 0xFFF00000 +#define CE_DEST_DESC_INFO_META_DATA_SHIFT 20 +#endif + +#define MY_TARGET_DEF QCA9888_TARGETdef +#define MY_HOST_DEF QCA9888_HOSTdef +#define MY_CEREG_DEF QCA9888_CE_TARGETdef +#define MY_TARGET_BOARD_DATA_SZ QCA9888_BOARD_DATA_SZ +#define MY_TARGET_BOARD_EXT_DATA_SZ QCA9888_BOARD_EXT_DATA_SZ +#include "targetdef.h" +#include "hostdef.h" +qdf_export_symbol(QCA9888_CE_TARGETdef); +#else +#include "common_drv.h" +#include "targetdef.h" +#include "hostdef.h" +struct targetdef_s *QCA9888_TARGETdef; +struct hostdef_s *QCA9888_HOSTdef; +#endif /* QCA9888_HEADERS_DEF */ +qdf_export_symbol(QCA9888_TARGETdef); +qdf_export_symbol(QCA9888_HOSTdef); diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/qca9984def.c b/drivers/staging/qca-wifi-host-cmn/hif/src/qca9984def.c new file mode 100644 index 0000000000000000000000000000000000000000..572d082171ee8562d4db4537a44950f84e7840ff --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/qca9984def.c @@ -0,0 +1,231 @@ +/* + * Copyright (c) 2015,2016,2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "qdf_module.h" + +#if defined(QCA9984_HEADERS_DEF) +#define QCA9984 1 + +#define WLAN_HEADERS 1 +#include "common_drv.h" +#include "QCA9984/soc_addrs.h" +#include "QCA9984/extra/hw/apb_map.h" +#include "QCA9984/hw/gpio_athr_wlan_reg.h" +#ifdef WLAN_HEADERS + +#include "QCA9984/extra/hw/wifi_top_reg_map.h" +#include "QCA9984/hw/rtc_soc_reg.h" + +#endif +#include "QCA9984/hw/si_reg.h" +#include "QCA9984/extra/hw/pcie_local_reg.h" +#include "QCA9984/hw/ce_wrapper_reg_csr.h" + +#include "QCA9984/extra/hw/soc_core_reg.h" +#include "QCA9984/hw/soc_pcie_reg.h" +#include "QCA9984/extra/hw/ce_reg_csr.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* Base address is defined in pcie_local_reg.h. Macros which access the + * registers include the base address in their definition. + */ +#define PCIE_LOCAL_BASE_ADDRESS 0 + +#define FW_EVENT_PENDING_ADDRESS (WIFICMN_SCRATCH_3_ADDRESS) +#define DRAM_BASE_ADDRESS TARG_DRAM_START + +/* Backwards compatibility -- TBDXXX */ + +#define MISSING 0 + +#define WLAN_SYSTEM_SLEEP_DISABLE_LSB WIFI_SYSTEM_SLEEP_DISABLE_LSB +#define WLAN_SYSTEM_SLEEP_DISABLE_MASK WIFI_SYSTEM_SLEEP_DISABLE_MASK +#define WLAN_RESET_CONTROL_COLD_RST_MASK WIFI_RESET_CONTROL_MAC_COLD_RST_MASK +#define WLAN_RESET_CONTROL_WARM_RST_MASK WIFI_RESET_CONTROL_MAC_WARM_RST_MASK +#define SOC_CLOCK_CONTROL_OFFSET SOC_CLOCK_CONTROL_ADDRESS +#define SOC_RESET_CONTROL_OFFSET SOC_RESET_CONTROL_ADDRESS +#define CPU_CLOCK_OFFSET SOC_CPU_CLOCK_ADDRESS +#define SOC_LPO_CAL_OFFSET SOC_LPO_CAL_ADDRESS +#define SOC_RESET_CONTROL_CE_RST_MASK WIFI_RESET_CONTROL_CE_RESET_MASK +#define WLAN_SYSTEM_SLEEP_OFFSET WIFI_SYSTEM_SLEEP_ADDRESS +#define WLAN_RESET_CONTROL_OFFSET WIFI_RESET_CONTROL_ADDRESS +#define CLOCK_CONTROL_OFFSET SOC_CLOCK_CONTROL_OFFSET +#define CLOCK_CONTROL_SI0_CLK_MASK SOC_CLOCK_CONTROL_SI0_CLK_MASK +#define RESET_CONTROL_SI0_RST_MASK SOC_RESET_CONTROL_SI0_RST_MASK +#define GPIO_BASE_ADDRESS WLAN_GPIO_BASE_ADDRESS +#define GPIO_PIN0_OFFSET WLAN_GPIO_PIN0_ADDRESS +#define GPIO_PIN1_OFFSET WLAN_GPIO_PIN1_ADDRESS +#define GPIO_PIN0_CONFIG_MASK WLAN_GPIO_PIN0_CONFIG_MASK +#define GPIO_PIN1_CONFIG_MASK WLAN_GPIO_PIN1_CONFIG_MASK +#define SI_BASE_ADDRESS WLAN_SI_BASE_ADDRESS +#define SCRATCH_BASE_ADDRESS SOC_CORE_BASE_ADDRESS +#define LOCAL_SCRATCH_OFFSET 0x18 +#define GPIO_PIN10_OFFSET WLAN_GPIO_PIN10_ADDRESS +#define GPIO_PIN11_OFFSET WLAN_GPIO_PIN11_ADDRESS +#define GPIO_PIN12_OFFSET WLAN_GPIO_PIN12_ADDRESS +#define GPIO_PIN13_OFFSET WLAN_GPIO_PIN13_ADDRESS +#define SI_CONFIG_OFFSET SI_CONFIG_ADDRESS +#define SI_TX_DATA0_OFFSET SI_TX_DATA0_ADDRESS +#define SI_TX_DATA1_OFFSET SI_TX_DATA1_ADDRESS +#define SI_RX_DATA0_OFFSET SI_RX_DATA0_ADDRESS +#define SI_RX_DATA1_OFFSET SI_RX_DATA1_ADDRESS +#define SI_CS_OFFSET SI_CS_ADDRESS +#define CPU_CLOCK_STANDARD_LSB SOC_CPU_CLOCK_STANDARD_LSB +#define CPU_CLOCK_STANDARD_MASK SOC_CPU_CLOCK_STANDARD_MASK +#define LPO_CAL_ENABLE_LSB SOC_LPO_CAL_ENABLE_LSB +#define LPO_CAL_ENABLE_MASK SOC_LPO_CAL_ENABLE_MASK +#define ANALOG_INTF_BASE_ADDRESS WLAN_ANALOG_INTF_BASE_ADDRESS +#define MBOX_BASE_ADDRESS MISSING +#define INT_STATUS_ENABLE_ERROR_LSB MISSING +#define INT_STATUS_ENABLE_ERROR_MASK MISSING +#define INT_STATUS_ENABLE_CPU_LSB MISSING +#define INT_STATUS_ENABLE_CPU_MASK MISSING +#define INT_STATUS_ENABLE_COUNTER_LSB MISSING +#define INT_STATUS_ENABLE_COUNTER_MASK MISSING +#define INT_STATUS_ENABLE_MBOX_DATA_LSB MISSING +#define INT_STATUS_ENABLE_MBOX_DATA_MASK MISSING +#define ERROR_STATUS_ENABLE_RX_UNDERFLOW_LSB MISSING +#define ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK MISSING +#define ERROR_STATUS_ENABLE_TX_OVERFLOW_LSB MISSING +#define ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK MISSING +#define COUNTER_INT_STATUS_ENABLE_BIT_LSB MISSING +#define COUNTER_INT_STATUS_ENABLE_BIT_MASK MISSING +#define INT_STATUS_ENABLE_ADDRESS MISSING +#define CPU_INT_STATUS_ENABLE_BIT_LSB MISSING +#define CPU_INT_STATUS_ENABLE_BIT_MASK MISSING +#define HOST_INT_STATUS_ADDRESS MISSING +#define CPU_INT_STATUS_ADDRESS MISSING +#define ERROR_INT_STATUS_ADDRESS MISSING +#define ERROR_INT_STATUS_WAKEUP_MASK MISSING +#define ERROR_INT_STATUS_WAKEUP_LSB MISSING +#define ERROR_INT_STATUS_RX_UNDERFLOW_MASK MISSING +#define ERROR_INT_STATUS_RX_UNDERFLOW_LSB MISSING +#define ERROR_INT_STATUS_TX_OVERFLOW_MASK MISSING +#define ERROR_INT_STATUS_TX_OVERFLOW_LSB MISSING +#define COUNT_DEC_ADDRESS MISSING +#define HOST_INT_STATUS_CPU_MASK MISSING +#define HOST_INT_STATUS_CPU_LSB MISSING +#define HOST_INT_STATUS_ERROR_MASK MISSING +#define HOST_INT_STATUS_ERROR_LSB MISSING +#define HOST_INT_STATUS_COUNTER_MASK MISSING +#define HOST_INT_STATUS_COUNTER_LSB MISSING +#define RX_LOOKAHEAD_VALID_ADDRESS MISSING +#define WINDOW_DATA_ADDRESS MISSING +#define WINDOW_READ_ADDR_ADDRESS MISSING +#define WINDOW_WRITE_ADDR_ADDRESS MISSING +/* MAC Descriptor */ +#define RX_PPDU_END_ANTENNA_OFFSET_DWORD (RX_PPDU_END_25_RX_ANTENNA_OFFSET >> 2) +/* GPIO Register */ +#define GPIO_ENABLE_W1TS_LOW_ADDRESS WLAN_GPIO_ENABLE_W1TS_LOW_ADDRESS +#define GPIO_PIN0_CONFIG_LSB WLAN_GPIO_PIN0_CONFIG_LSB +#define GPIO_PIN0_PAD_PULL_LSB WLAN_GPIO_PIN0_PAD_PULL_LSB +#define GPIO_PIN0_PAD_PULL_MASK WLAN_GPIO_PIN0_PAD_PULL_MASK +/* CE descriptor */ +#define CE_SRC_DESC_SIZE_DWORD 2 +#define CE_DEST_DESC_SIZE_DWORD 2 +#define CE_SRC_DESC_SRC_PTR_OFFSET_DWORD 0 +#define CE_SRC_DESC_INFO_OFFSET_DWORD 1 +#define CE_DEST_DESC_DEST_PTR_OFFSET_DWORD 0 +#define CE_DEST_DESC_INFO_OFFSET_DWORD 1 +#if _BYTE_ORDER == _BIG_ENDIAN +#define CE_SRC_DESC_INFO_NBYTES_MASK 0xFFFF0000 +#define CE_SRC_DESC_INFO_NBYTES_SHIFT 16 +#define CE_SRC_DESC_INFO_GATHER_MASK 0x00008000 +#define CE_SRC_DESC_INFO_GATHER_SHIFT 15 +#define CE_SRC_DESC_INFO_BYTE_SWAP_MASK 0x00004000 +#define CE_SRC_DESC_INFO_BYTE_SWAP_SHIFT 14 +#define CE_SRC_DESC_INFO_HOST_INT_DISABLE_MASK 0x00002000 +#define CE_SRC_DESC_INFO_HOST_INT_DISABLE_SHIFT 13 +#define CE_SRC_DESC_INFO_TARGET_INT_DISABLE_MASK 0x00001000 +#define CE_SRC_DESC_INFO_TARGET_INT_DISABLE_SHIFT 12 +#define CE_SRC_DESC_INFO_META_DATA_MASK 0x00000FFF +#define CE_SRC_DESC_INFO_META_DATA_SHIFT 0 +#else +#define CE_SRC_DESC_INFO_NBYTES_MASK 0x0000FFFF +#define CE_SRC_DESC_INFO_NBYTES_SHIFT 0 +#define CE_SRC_DESC_INFO_GATHER_MASK 0x00010000 +#define CE_SRC_DESC_INFO_GATHER_SHIFT 16 +#define CE_SRC_DESC_INFO_BYTE_SWAP_MASK 0x00020000 +#define CE_SRC_DESC_INFO_BYTE_SWAP_SHIFT 17 +#define CE_SRC_DESC_INFO_HOST_INT_DISABLE_MASK 0x00040000 +#define CE_SRC_DESC_INFO_HOST_INT_DISABLE_SHIFT 18 +#define CE_SRC_DESC_INFO_TARGET_INT_DISABLE_MASK 0x00080000 +#define CE_SRC_DESC_INFO_TARGET_INT_DISABLE_SHIFT 19 +#define CE_SRC_DESC_INFO_META_DATA_MASK 0xFFF00000 +#define CE_SRC_DESC_INFO_META_DATA_SHIFT 20 +#endif +#if _BYTE_ORDER == _BIG_ENDIAN +#define CE_DEST_DESC_INFO_NBYTES_MASK 0xFFFF0000 +#define CE_DEST_DESC_INFO_NBYTES_SHIFT 16 +#define CE_DEST_DESC_INFO_GATHER_MASK 0x00008000 +#define CE_DEST_DESC_INFO_GATHER_SHIFT 15 +#define CE_DEST_DESC_INFO_BYTE_SWAP_MASK 0x00004000 +#define CE_DEST_DESC_INFO_BYTE_SWAP_SHIFT 14 +#define CE_DEST_DESC_INFO_HOST_INT_DISABLE_MASK 0x00002000 +#define CE_DEST_DESC_INFO_HOST_INT_DISABLE_SHIFT 13 +#define CE_DEST_DESC_INFO_TARGET_INT_DISABLE_MASK 0x00001000 +#define CE_DEST_DESC_INFO_TARGET_INT_DISABLE_SHIFT 12 +#define CE_DEST_DESC_INFO_META_DATA_MASK 0x00000FFF +#define CE_DEST_DESC_INFO_META_DATA_SHIFT 0 +#else +#define CE_DEST_DESC_INFO_NBYTES_MASK 0x0000FFFF +#define CE_DEST_DESC_INFO_NBYTES_SHIFT 0 +#define CE_DEST_DESC_INFO_GATHER_MASK 0x00010000 +#define CE_DEST_DESC_INFO_GATHER_SHIFT 16 +#define CE_DEST_DESC_INFO_BYTE_SWAP_MASK 0x00020000 +#define CE_DEST_DESC_INFO_BYTE_SWAP_SHIFT 17 +#define CE_DEST_DESC_INFO_HOST_INT_DISABLE_MASK 0x00040000 +#define CE_DEST_DESC_INFO_HOST_INT_DISABLE_SHIFT 18 +#define CE_DEST_DESC_INFO_TARGET_INT_DISABLE_MASK 0x00080000 +#define CE_DEST_DESC_INFO_TARGET_INT_DISABLE_SHIFT 19 +#define CE_DEST_DESC_INFO_META_DATA_MASK 0xFFF00000 +#define CE_DEST_DESC_INFO_META_DATA_SHIFT 20 +#endif + +#define MY_TARGET_DEF QCA9984_TARGETdef +#define MY_HOST_DEF QCA9984_HOSTdef +#define MY_CEREG_DEF QCA9984_CE_TARGETdef +#define MY_TARGET_BOARD_DATA_SZ QCA9984_BOARD_DATA_SZ +#define MY_TARGET_BOARD_EXT_DATA_SZ QCA9984_BOARD_EXT_DATA_SZ +#include "targetdef.h" +#include "hostdef.h" +qdf_export_symbol(QCA9984_CE_TARGETdef); +#else +#include "common_drv.h" +#include "targetdef.h" +#include "hostdef.h" +struct targetdef_s *QCA9984_TARGETdef; +struct hostdef_s *QCA9984_HOSTdef; +#endif /* QCA9984_HEADERS_DEF */ +qdf_export_symbol(QCA9984_TARGETdef); +qdf_export_symbol(QCA9984_HOSTdef); + diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/regtable.c b/drivers/staging/qca-wifi-host-cmn/hif/src/regtable.c new file mode 100644 index 0000000000000000000000000000000000000000..ad08b716145a04abf7e684a55bd18da7214a2a4b --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/regtable.c @@ -0,0 +1,192 @@ +/* + * Copyright (c) 2013-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "targaddrs.h" +#include "target_type.h" +#include "cepci.h" +#include "regtable.h" +#include "ar6320def.h" +#include "ar6320v2def.h" +#include "hif_main.h" +#include "adrastea_reg_def.h" + +#include "targetdef.h" +#include "hostdef.h" + +void hif_target_register_tbl_attach(struct hif_softc *scn, u32 target_type) +{ + switch (target_type) { + case TARGET_TYPE_AR6320: + scn->targetdef = &ar6320_targetdef; + scn->target_ce_def = &ar6320_ce_targetdef; + break; + case TARGET_TYPE_AR6320V2: + scn->targetdef = &ar6320v2_targetdef; + scn->target_ce_def = &ar6320v2_ce_targetdef; + break; + case TARGET_TYPE_ADRASTEA: + scn->targetdef = &adrastea_targetdef; + scn->target_ce_def = &adrastea_ce_targetdef; + break; +#if defined(AR6002_HEADERS_DEF) + case TARGET_TYPE_AR6002: + scn->targetdef = AR6002_TARGETdef; + break; +#endif +#if defined(AR6003_HEADERS_DEF) + case TARGET_TYPE_AR6003: + scn->targetdef = AR6003_TARGETdef; + break; +#endif +#if defined(AR6004_HEADERS_DEF) + case TARGET_TYPE_AR6004: + scn->targetdef = AR6004_TARGETdef; + break; +#endif +#if defined(AR9888_HEADERS_DEF) + case TARGET_TYPE_AR9888: + scn->targetdef = AR9888_TARGETdef; + scn->target_ce_def = AR9888_CE_TARGETdef; + break; +#endif +#if defined(AR9888V2_HEADERS_DEF) + case TARGET_TYPE_AR9888V2: + scn->targetdef = AR9888V2_TARGETdef; + scn->target_ce_def = AR9888_CE_TARGETdef; + break; +#endif +#if defined(AR900B_HEADERS_DEF) + case TARGET_TYPE_AR900B: + scn->targetdef = AR900B_TARGETdef; + scn->target_ce_def = AR900B_CE_TARGETdef; + break; +#endif +#if defined(QCA9984_HEADERS_DEF) + case TARGET_TYPE_QCA9984: + scn->targetdef = QCA9984_TARGETdef; + scn->target_ce_def = QCA9984_CE_TARGETdef; + break; +#endif +#if defined(QCA9888_HEADERS_DEF) + case TARGET_TYPE_QCA9888: + scn->targetdef = QCA9888_TARGETdef; + scn->target_ce_def = QCA9888_CE_TARGETdef; + break; +#endif +#ifdef ATH_AHB +#if defined(IPQ4019_HEADERS_DEF) + case TARGET_TYPE_IPQ4019: + scn->targetdef = IPQ4019_TARGETdef; + scn->target_ce_def = IPQ4019_CE_TARGETdef; + break; +#endif +#endif +#if defined(QCA8074_HEADERS_DEF) + case TARGET_TYPE_QCA8074: + scn->targetdef = QCA8074_TARGETdef; + scn->target_ce_def = QCA8074_CE_TARGETdef; + break; +#endif + +#if defined(QCA6290_HEADERS_DEF) + /* use the same defs for HAWKEYE & NAPIER */ + case TARGET_TYPE_QCA6290: + scn->targetdef = QCA6290_TARGETdef; + scn->target_ce_def = QCA6290_CE_TARGETdef; + break; +#endif + + default: + break; + } +} + +void hif_register_tbl_attach(struct hif_softc *scn, u32 hif_type) +{ + switch (hif_type) { + case HIF_TYPE_AR6320V2: + scn->hostdef = &ar6320v2_hostdef; + break; + case HIF_TYPE_ADRASTEA: + scn->hostdef = &adrastea_hostdef; + scn->host_shadow_regs = &adrastea_host_shadow_regs; + break; +#if defined(AR6002_HEADERS_DEF) + case HIF_TYPE_AR6002: + scn->hostdef = AR6002_HOSTdef; + break; +#endif +#if defined(AR6003_HEADERS_DEF) + case HIF_TYPE_AR6003: + scn->hostdef = AR6003_HOSTdef; + break; +#endif +#if defined(AR6004_HEADERS_DEF) + case HIF_TYPE_AR6004: + scn->hostdef = AR6004_HOSTdef; + break; +#endif +#if defined(AR9888_HEADERS_DEF) + case HIF_TYPE_AR9888: + scn->hostdef = AR9888_HOSTdef; + break; +#endif +#if defined(AR9888V2_HEADERS_DEF) + case HIF_TYPE_AR9888V2: + scn->hostdef = AR9888V2_HOSTdef; + break; +#endif +#if defined(AR900B_HEADERS_DEF) + case HIF_TYPE_AR900B: + scn->hostdef = AR900B_HOSTdef; + break; +#endif +#if defined(QCA9984_HEADERS_DEF) + case HIF_TYPE_QCA9984: + scn->hostdef = QCA9984_HOSTdef; + break; +#endif +#if defined(QCA9888_HEADERS_DEF) + case HIF_TYPE_QCA9888: + scn->hostdef = QCA9888_HOSTdef; + break; +#endif + +#ifdef ATH_AHB +#if defined(IPQ4019_HEADERS_DEF) + case HIF_TYPE_IPQ4019: + scn->hostdef = IPQ4019_HOSTdef; + break; +#endif +#endif +#if defined(QCA8074_HEADERS_DEF) + case HIF_TYPE_QCA8074: + scn->hostdef = QCA8074_HOSTdef; + break; +#endif + +#if defined(QCA6290_HEADERS_DEF) + case HIF_TYPE_QCA6290: + scn->hostdef = QCA6290_HOSTdef; + break; +#endif + + default: + break; + } +} diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/sdio/hif_bmi_reg_access.c b/drivers/staging/qca-wifi-host-cmn/hif/src/sdio/hif_bmi_reg_access.c new file mode 100644 index 0000000000000000000000000000000000000000..5aa5a441da8a89faae6e795deb0416b83a8d03f9 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/sdio/hif_bmi_reg_access.c @@ -0,0 +1,523 @@ +/* + * Copyright (c) 2013-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "athdefs.h" +#include "a_types.h" +#include "a_osapi.h" +#define ATH_MODULE_NAME hif +#include "a_debug.h" +#define ATH_DEBUG_BMI ATH_DEBUG_MAKE_MODULE_MASK(0) +#include "hif.h" +#include "bmi.h" +#include "htc_api.h" +#include "if_sdio.h" +#include "regtable_sdio.h" + +#define BMI_COMMUNICATION_TIMEOUT 100000 + +static bool pending_events_func_check; +static uint32_t command_credits; +static uint32_t *p_bmi_cmd_credits = &command_credits; + +/* BMI Access routines */ + +/** + * hif_bmi_buffer_send - call to send bmi buffer + * @device: hif context + * @buffer: buffer + * @length: length + * + * Return: QDF_STATUS_SUCCESS for success. + */ +static QDF_STATUS +hif_bmi_buffer_send(struct hif_sdio_dev *device, char *buffer, uint32_t length) +{ + QDF_STATUS status; + uint32_t timeout; + uint32_t address; + uint32_t mbox_address[HTC_MAILBOX_NUM_MAX]; + + hif_configure_device(device, HIF_DEVICE_GET_MBOX_ADDR, + &mbox_address[0], sizeof(mbox_address)); + + *p_bmi_cmd_credits = 0; + timeout = BMI_COMMUNICATION_TIMEOUT; + + while (timeout-- && !(*p_bmi_cmd_credits)) { + /* Read the counter register to get the command credits */ + address = + COUNT_DEC_ADDRESS + (HTC_MAILBOX_NUM_MAX + ENDPOINT1) * 4; + /* hit the credit counter with a 4-byte access, the first + * byte read will hit the counter and cause + * a decrement, while the remaining 3 bytes has no effect. + * The rationale behind this is to make all HIF accesses + * 4-byte aligned + */ + status = + hif_read_write(device, address, + (uint8_t *) p_bmi_cmd_credits, 4, + HIF_RD_SYNC_BYTE_INC, NULL); + if (status != QDF_STATUS_SUCCESS) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("%s:Unable to decrement the credit count register\n", + __func__)); + return QDF_STATUS_E_FAILURE; + } + /* the counter is only 8=bits, ignore anything in the + * upper 3 bytes + */ + (*p_bmi_cmd_credits) &= 0xFF; + } + + if (*p_bmi_cmd_credits) { + address = mbox_address[ENDPOINT1]; + status = hif_read_write(device, address, buffer, length, + HIF_WR_SYNC_BYTE_INC, NULL); + if (status != QDF_STATUS_SUCCESS) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("%s:Unable to send the BMI data to the device\n", + __func__)); + return QDF_STATUS_E_FAILURE; + } + } else { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("%s:BMI Communication timeout - hif_bmi_buffer_send\n", + __func__)); + return QDF_STATUS_E_FAILURE; + } + + return status; +} + +#if defined(SDIO_3_0) + +static QDF_STATUS +hif_bmi_read_write(struct hif_sdio_dev *device, + char *buffer, uint32_t length) +{ + QDF_STATUS status; + + status = hif_read_write(device, HOST_INT_STATUS_ADDRESS, + buffer, length, + HIF_RD_SYNC_BYTE_INC, NULL); + if (status != QDF_STATUS_SUCCESS) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("%s:Unable to read int status reg\n", + __func__)); + return QDF_STATUS_E_FAILURE; + } + *buffer = (HOST_INT_STATUS_MBOX_DATA_GET(*buffer) & (1 << ENDPOINT1)); + return status; +} +#else + +static QDF_STATUS +hif_bmi_read_write(struct hif_sdio_dev *device, + char *buffer, uint32_t length) +{ + QDF_STATUS status; + + status = hif_read_write(device, RX_LOOKAHEAD_VALID_ADDRESS, + buffer, length, + HIF_RD_SYNC_BYTE_INC, NULL); + if (status != QDF_STATUS_SUCCESS) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("%s:Unable to read rx lookahead reg\n", + __func__)); + return QDF_STATUS_E_FAILURE; + } + *buffer &= (1 << ENDPOINT1); + return status; +} +#endif + +/** + * hif_bmi_buffer_receive - call when bmi buffer is received + * @device: hif context + * @buffer: buffer + * @length: length + * @want_timeout: timeout is needed or not + * + * Return: QDF_STATUS_SUCCESS for success. + */ +static QDF_STATUS +hif_bmi_buffer_receive(struct hif_sdio_dev *device, + char *buffer, uint32_t length, bool want_timeout) +{ + QDF_STATUS status; + uint32_t address; + uint32_t mbox_address[HTC_MAILBOX_NUM_MAX]; + struct _HIF_PENDING_EVENTS_INFO hif_pending_events; + + static HIF_PENDING_EVENTS_FUNC get_pending_events_func; + + if (!pending_events_func_check) { + /* see if the HIF layer implements an alternative + * function to get pending events + * do this only once! + */ + hif_configure_device(device, + HIF_DEVICE_GET_PENDING_EVENTS_FUNC, + &get_pending_events_func, + sizeof(get_pending_events_func)); + pending_events_func_check = true; + } + + hif_configure_device(device, HIF_DEVICE_GET_MBOX_ADDR, + &mbox_address[0], sizeof(mbox_address)); + + /* + * During normal bootup, small reads may be required. + * Rather than issue an HIF Read and then wait as the Target + * adds successive bytes to the FIFO, we wait here until + * we know that response data is available. + * + * This allows us to cleanly timeout on an unexpected + * Target failure rather than risk problems at the HIF level. In + * particular, this avoids SDIO timeouts and possibly garbage + * data on some host controllers. And on an interconnect + * such as Compact Flash (as well as some SDIO masters) which + * does not provide any indication on data timeout, it avoids + * a potential hang or garbage response. + * + * Synchronization is more difficult for reads larger than the + * size of the MBOX FIFO (128B), because the Target is unable + * to push the 129th byte of data until AFTER the Host posts an + * HIF Read and removes some FIFO data. So for large reads the + * Host proceeds to post an HIF Read BEFORE all the data is + * actually available to read. Fortunately, large BMI reads do + * not occur in practice -- they're supported for debug/development. + * + * So Host/Target BMI synchronization is divided into these cases: + * CASE 1: length < 4 + * Should not happen + * + * CASE 2: 4 <= length <= 128 + * Wait for first 4 bytes to be in FIFO + * If CONSERVATIVE_BMI_READ is enabled, also wait for + * a BMI command credit, which indicates that the ENTIRE + * response is available in the the FIFO + * + * CASE 3: length > 128 + * Wait for the first 4 bytes to be in FIFO + * + * For most uses, a small timeout should be sufficient and we will + * usually see a response quickly; but there may be some unusual + * (debug) cases of BMI_EXECUTE where we want an larger timeout. + * For now, we use an unbounded busy loop while waiting for + * BMI_EXECUTE. + * + * If BMI_EXECUTE ever needs to support longer-latency execution, + * especially in production, this code needs to be enhanced to sleep + * and yield. Also note that BMI_COMMUNICATION_TIMEOUT is currently + * a function of Host processor speed. + */ + if (length >= 4) { /* NB: Currently, always true */ + /* + * NB: word_available is declared static for esoteric reasons + * having to do with protection on some OSes. + */ + static uint32_t word_available; + uint32_t timeout; + + word_available = 0; + timeout = BMI_COMMUNICATION_TIMEOUT; + while ((!want_timeout || timeout--) && !word_available) { + + if (get_pending_events_func != NULL) { + status = get_pending_events_func(device, + &hif_pending_events, + NULL); + if (status != QDF_STATUS_SUCCESS) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("%s:Failed to get pending events\n", + __func__)); + break; + } + + if (hif_pending_events.available_recv_bytes >= + sizeof(uint32_t)) { + word_available = 1; + } + continue; + } + status = hif_bmi_read_write(device, + (uint8_t *) &word_available, + sizeof(word_available)); + if (status != QDF_STATUS_SUCCESS) + return QDF_STATUS_E_FAILURE; + } + + if (!word_available) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("%s:BMI Communication timeout FIFO empty\n", + __func__)); + return QDF_STATUS_E_FAILURE; + } + } + + address = mbox_address[ENDPOINT1]; + status = hif_read_write(device, address, buffer, length, + HIF_RD_SYNC_BYTE_INC, NULL); + if (status != QDF_STATUS_SUCCESS) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("%s:Unable to read the BMI data from the device\n", + __func__)); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * hif_reg_based_get_target_info - to retrieve target info + * @hif_ctx: hif context + * @targ_info: bmi target info + * + * Return: QDF_STATUS_SUCCESS for success. + */ +QDF_STATUS +hif_reg_based_get_target_info(struct hif_opaque_softc *hif_ctx, + struct bmi_target_info *targ_info) +{ + QDF_STATUS status; + uint32_t cid; + struct hif_sdio_softc *scn = HIF_GET_SDIO_SOFTC(hif_ctx); + struct hif_sdio_dev *device = scn->hif_handle; + + AR_DEBUG_PRINTF(ATH_DEBUG_BMI, + ("BMI Get Target Info: Enter (device: 0x%pK)\n", + device)); + cid = BMI_GET_TARGET_INFO; + status = hif_bmi_buffer_send(device, (char *) &cid, sizeof(cid)); + if (status != QDF_STATUS_SUCCESS) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("%s:Unable to write to the device\n", + __func__)); + return QDF_STATUS_E_FAILURE; + } + + status = hif_bmi_buffer_receive(device, + (char *) &targ_info->target_ver, + sizeof(targ_info->target_ver), true); + if (status != QDF_STATUS_SUCCESS) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("%s:Unable to read Target Version from the device\n", + __func__)); + return QDF_STATUS_E_FAILURE; + } + + if (targ_info->target_ver == TARGET_VERSION_SENTINAL) { + /* Determine how many bytes are in the Target's targ_info */ + status = hif_bmi_buffer_receive(device, + (char *) &targ_info-> + target_info_byte_count, + sizeof(targ_info-> + target_info_byte_count), + true); + if (status != QDF_STATUS_SUCCESS) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("%s:Unable to read target Info\n", + __func__)); + return QDF_STATUS_E_FAILURE; + } + + /* + * The Target's targ_info doesn't match the Host's targ_info. + * We need to do some backwards compatibility work to make this + * OK. + */ + QDF_ASSERT(targ_info->target_info_byte_count == + sizeof(*targ_info)); + /* Read the remainder of the targ_info */ + status = hif_bmi_buffer_receive(device, + ((char *) targ_info) + + sizeof(targ_info-> + target_info_byte_count), + sizeof(*targ_info) - + sizeof(targ_info-> + target_info_byte_count), + true); + if (status != QDF_STATUS_SUCCESS) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("%s:Unable to read Target Info (%d bytes)\n", + __func__, targ_info->target_info_byte_count)); + return QDF_STATUS_E_FAILURE; + } + } else { + /* + * Target must be an AR6001 whose firmware does not + * support BMI_GET_TARGET_INFO. Construct the data + * that it would have sent. + */ + targ_info->target_info_byte_count = sizeof(*targ_info); + targ_info->target_type = TARGET_TYPE_AR6001; + } + + AR_DEBUG_PRINTF(ATH_DEBUG_BMI, + ("BMI Get Target Info: Exit (ver: 0x%x type: 0x%x)\n", + targ_info->target_ver, + targ_info->target_type)); + + return QDF_STATUS_SUCCESS; +} + +/** + * hif_exchange_bmi_msg - API to handle HIF-specific BMI message exchanges + * @hif_ctx: hif context + * @bmi_cmd_da: bmi cmd + * @bmi_rsp_da: bmi rsp + * @send_message: send message + * @length: length + * @response_message: response message + * @response_length: response length + * @timeout_ms: timeout in ms + * + * This API is synchronous + * and only allowed to be called from a context that can block (sleep) + * + * Return: QDF_STATUS_SUCCESS for success. + */ +QDF_STATUS hif_exchange_bmi_msg(struct hif_opaque_softc *hif_ctx, + qdf_dma_addr_t bmi_cmd_da, + qdf_dma_addr_t bmi_rsp_da, + uint8_t *send_message, + uint32_t length, + uint8_t *response_message, + uint32_t *response_length, + uint32_t timeout_ms) { + struct hif_sdio_softc *scn = HIF_GET_SDIO_SOFTC(hif_ctx); + struct hif_sdio_dev *device = scn->hif_handle; + QDF_STATUS status = QDF_STATUS_SUCCESS; + + if (device == NULL) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("%s:Null device argument\n", + __func__)); + return QDF_STATUS_E_INVAL; + } + + status = hif_bmi_buffer_send(device, send_message, length); + if (QDF_IS_STATUS_ERROR(status)) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("%s:Unable to Send Message to device\n", + __func__)); + return status; + } + + if (response_message != NULL) { + status = hif_bmi_buffer_receive(device, response_message, + *response_length, + timeout_ms ? true : false); + if (QDF_IS_STATUS_ERROR(status)) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("%s:Unable to read response\n", + __func__)); + return status; + } + } + + return status; +} + +/** + * hif_bmi_raw_write - API to handle bmi raw buffer + * @device: hif context + * @buffer: buffer + * @length: length + * + * Return: QDF_STATUS_SUCCESS for success. + */ + +QDF_STATUS +hif_bmi_raw_write(struct hif_sdio_dev *device, char *buffer, + uint32_t length) +{ + return hif_bmi_buffer_send(device, buffer, length); +} + +/** + * hif_bmi_raw_read - call when bmi buffer is received + * @device: hif context + * @buffer: buffer + * @length: length + * @want_timeout: timeout is needed or not + * + * Return: QDF_STATUS_SUCCESS for success. + */ +QDF_STATUS +hif_bmi_raw_read(struct hif_sdio_dev *device, char *buffer, + uint32_t length, bool want_timeout) +{ + return hif_bmi_buffer_receive(device, buffer, length, + want_timeout); +} + +#ifdef BRINGUP_DEBUG +#define SDIO_SCRATCH_1_ADDRESS 0x864 +/*Functions used for debugging*/ +/** + * hif_bmi_write_scratch_register - API to write scratch register + * @device: hif context + * @buffer: buffer + * + * Return: QDF_STATUS_SUCCESS for success. + */ +QDF_STATUS hif_bmi_write_scratch_register(struct hif_sdio_dev *device, + uint32_t buffer) { + QDF_STATUS status = QDF_STATUS_SUCCESS; + + status = hif_read_write(device, SDIO_SCRATCH_1_ADDRESS, + (uint8_t *) &buffer, 4, + HIF_WR_SYNC_BYTE_INC, NULL); + if (status != QDF_STATUS_SUCCESS) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("%s: Unable to write to 0x%x\n", + __func__, SDIO_SCRATCH_1_ADDRESS)); + return QDF_STATUS_E_FAILURE; + } + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: wrote 0x%x to 0x%x\n", __func__, + buffer, SDIO_SCRATCH_1_ADDRESS)); + + return status; +} + +/** + * hif_bmi_read_scratch_register - API to read from scratch register + * @device: hif context + * + * Return: QDF_STATUS_SUCCESS for success. + */ +QDF_STATUS hif_bmi_read_scratch_register(struct hif_sdio_dev *device) +{ + QDF_STATUS status = QDF_STATUS_SUCCESS; + uint32_t buffer = 0; + + status = hif_read_write(device, SDIO_SCRATCH_1_ADDRESS, + (uint8_t *) &buffer, 4, + HIF_RD_SYNC_BYTE_INC, NULL); + if (status != QDF_STATUS_SUCCESS) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("%s: Unable to read from 0x%x\n", + __func__, SDIO_SCRATCH_1_ADDRESS)); + return QDF_STATUS_E_FAILURE; + } + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: read 0x%x from 0x%x\n", __func__, + buffer, SDIO_SCRATCH_1_ADDRESS)); + + return status; +} +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/sdio/hif_diag_reg_access.c b/drivers/staging/qca-wifi-host-cmn/hif/src/sdio/hif_diag_reg_access.c new file mode 100644 index 0000000000000000000000000000000000000000..0f4d50c83abb402efc57e908694f638b684dfd86 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/sdio/hif_diag_reg_access.c @@ -0,0 +1,323 @@ +/* + * Copyright (c) 2013-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "athdefs.h" +#include "a_types.h" +#include "a_osapi.h" +#define ATH_MODULE_NAME hif +#include "a_debug.h" + +#include "targaddrs.h" +#include "hif.h" +#include "if_sdio.h" +#include "regtable_sdio.h" + +#include "qdf_module.h" + +#define CPU_DBG_SEL_ADDRESS 0x00000483 +#define CPU_DBG_ADDRESS 0x00000484 +#define WORD_NON_ALIGNMENT_MASK 0x03 + +/** + * hif_ar6000_set_address_window_register - set the window address register + * (using 4-byte register access). + * @hif_device: hif context + * @register_addr: register address + * @addr: addr + * + * This mitigates host interconnect issues with non-4byte aligned bus requests, + * some interconnects use bus adapters that impose strict limitations. + * Since diag window access is not intended for performance critical operations, + * the 4byte mode should be satisfactory as it generates 4X the bus activity. + * + * Return: QDF_STATUS_SUCCESS for success. + */ +static +QDF_STATUS hif_ar6000_set_address_window_register( + struct hif_sdio_dev *hif_device, + uint32_t register_addr, + uint32_t addr) +{ + QDF_STATUS status; + static uint32_t address; + + address = addr; + /*AR6320,just write the 4-byte address to window register*/ + status = hif_read_write(hif_device, + register_addr, + (char *) (&address), + 4, HIF_WR_SYNC_BYTE_INC, NULL); + + if (status != QDF_STATUS_SUCCESS) { + AR_DEBUG_PRINTF(ATH_LOG_ERR, + ("Cannot write 0x%x to window reg: 0x%X\n", + addr, register_addr)); + return status; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * hif_diag_read_access - Read from the AR6000 through its diagnostic window. + * @hif_ctx: hif context + * @address: address + * @data: data + * + * No cooperation from the Target is required for this. + * + * Return: QDF_STATUS_SUCCESS for success. + */ +QDF_STATUS hif_diag_read_access(struct hif_opaque_softc *hif_ctx, + uint32_t address, + uint32_t *data) +{ + QDF_STATUS status; + static uint32_t readvalue; + struct hif_sdio_softc *scn = HIF_GET_SDIO_SOFTC(hif_ctx); + struct hif_sdio_dev *hif_device = scn->hif_handle; + + if (address & WORD_NON_ALIGNMENT_MASK) { + AR_DEBUG_PRINTF(ATH_LOG_ERR, + ("[%s]addr is not 4 bytes align.addr[0x%08x]\n", + __func__, address)); + return QDF_STATUS_E_FAILURE; + } + + /* set window register to start read cycle */ + status = hif_ar6000_set_address_window_register(hif_device, + WINDOW_READ_ADDR_ADDRESS, + address); + + if (status != QDF_STATUS_SUCCESS) + return status; + + /* read the data */ + status = hif_read_write(hif_device, + WINDOW_DATA_ADDRESS, + (char *) &readvalue, + sizeof(uint32_t), HIF_RD_SYNC_BYTE_INC, NULL); + if (status != QDF_STATUS_SUCCESS) { + AR_DEBUG_PRINTF(ATH_LOG_ERR, + ("Cannot read from WINDOW_DATA_ADDRESS\n")); + return status; + } + + *data = readvalue; + return status; +} + +/** + * hif_diag_write_access - Write to the AR6000 through its diagnostic window. + * @hif_ctx: hif context + * @address: address + * @data: data + * + * No cooperation from the Target is required for this. + * + * Return: QDF_STATUS_SUCCESS for success. + */ +QDF_STATUS hif_diag_write_access(struct hif_opaque_softc *hif_ctx, + uint32_t address, uint32_t data) +{ + QDF_STATUS status; + static uint32_t write_value; + struct hif_sdio_softc *scn = HIF_GET_SDIO_SOFTC(hif_ctx); + struct hif_sdio_dev *hif_device = scn->hif_handle; + + if (address & WORD_NON_ALIGNMENT_MASK) { + AR_DEBUG_PRINTF(ATH_LOG_ERR, + ("[%s]addr is not 4 bytes align.addr[0x%08x]\n", + __func__, address)); + return QDF_STATUS_E_FAILURE; + } + + write_value = data; + + /* set write data */ + status = hif_read_write(hif_device, + WINDOW_DATA_ADDRESS, + (char *) &write_value, + sizeof(uint32_t), HIF_WR_SYNC_BYTE_INC, NULL); + if (status != QDF_STATUS_SUCCESS) { + AR_DEBUG_PRINTF(ATH_LOG_ERR, + ("Cannot write 0x%x to WINDOW_DATA_ADDRESS\n", + data)); + return status; + } + + /* set window register, which starts the write cycle */ + return hif_ar6000_set_address_window_register(hif_device, + WINDOW_WRITE_ADDR_ADDRESS, + address); +} + +/** + * hif_diag_write_mem - Write a block data to the AR6000 through its diagnostic + * window. + * @scn: hif context + * @address: address + * @data: data + * @nbytes: nbytes + * + * This function may take some time. + * No cooperation from the Target is required for this. + * + * Return: QDF_STATUS_SUCCESS for success. + */ +QDF_STATUS hif_diag_write_mem(struct hif_opaque_softc *scn, uint32_t address, + uint8_t *data, int nbytes) +{ + QDF_STATUS status; + int32_t i; + uint32_t tmp_data; + + if ((address & WORD_NON_ALIGNMENT_MASK) || + (nbytes & WORD_NON_ALIGNMENT_MASK)) { + AR_DEBUG_PRINTF(ATH_LOG_ERR, + ("[%s]addr or length is not 4 bytes align.addr[0x%08x] len[0x%08x]\n", + __func__, address, nbytes)); + return QDF_STATUS_E_FAILURE; + } + + for (i = 0; i < nbytes; i += 4) { + tmp_data = + data[i] | (data[i + 1] << 8) | (data[i + 2] << 16) | + (data[i + 3] << 24); + status = hif_diag_write_access(scn, address + i, tmp_data); + if (status != QDF_STATUS_SUCCESS) { + AR_DEBUG_PRINTF(ATH_LOG_ERR, + ("Diag Write mem failed.addr[0x%08x] value[0x%08x]\n", + address + i, tmp_data)); + return status; + } + } + + return QDF_STATUS_SUCCESS; +} + +/** + * hif_diag_read_mem - Read a block data to the AR6000 through its diagnostic + * window. + * @scn: hif context + * @data: data + * @nbytes: nbytes + * + * This function may take some time. + * No cooperation from the Target is required for this. + * + * Return: QDF_STATUS_SUCCESS for success. + */ +QDF_STATUS hif_diag_read_mem(struct hif_opaque_softc *scn, + uint32_t address, uint8_t *data, + int nbytes) +{ + QDF_STATUS status; + int32_t i; + uint32_t tmp_data; + + if ((address & WORD_NON_ALIGNMENT_MASK) || + (nbytes & WORD_NON_ALIGNMENT_MASK)) { + AR_DEBUG_PRINTF(ATH_LOG_ERR, + ("[%s]addr or length is not 4 bytes align.addr[0x%08x] len[0x%08x]\n", + __func__, address, nbytes)); + return QDF_STATUS_E_FAILURE; + } + + for (i = 0; i < nbytes; i += 4) { + status = hif_diag_read_access(scn, address + i, &tmp_data); + if (status != QDF_STATUS_SUCCESS) { + AR_DEBUG_PRINTF(ATH_LOG_ERR, + ("Diag Write mem failed.addr[0x%08x] value[0x%08x]\n", + address + i, tmp_data)); + return status; + } + data[i] = tmp_data & 0xff; + data[i + 1] = tmp_data >> 8 & 0xff; + data[i + 2] = tmp_data >> 16 & 0xff; + data[i + 3] = tmp_data >> 24 & 0xff; + } + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(hif_diag_read_mem); + +/** + * hif_ar6k_read_target_register - call to read target register values + * @hif_device: hif context + * @regsel: register selection + * @regval: reg value + * + * Return: QDF_STATUS_SUCCESS for success. + */ +static QDF_STATUS hif_ar6k_read_target_register(struct hif_sdio_dev *hif_device, + int regsel, uint32_t *regval) +{ + QDF_STATUS status; + char vals[4]; + char register_selection[4]; + + register_selection[0] = regsel & 0xff; + register_selection[1] = regsel & 0xff; + register_selection[2] = regsel & 0xff; + register_selection[3] = regsel & 0xff; + status = hif_read_write(hif_device, CPU_DBG_SEL_ADDRESS, + register_selection, 4, + HIF_WR_SYNC_BYTE_FIX, NULL); + + if (status != QDF_STATUS_SUCCESS) { + AR_DEBUG_PRINTF(ATH_LOG_ERR, + ("Cannot write CPU_DBG_SEL (%d)\n", regsel)); + return status; + } + + status = hif_read_write(hif_device, + CPU_DBG_ADDRESS, + (char *) vals, + sizeof(vals), HIF_RD_SYNC_BYTE_INC, NULL); + if (status != QDF_STATUS_SUCCESS) { + AR_DEBUG_PRINTF(ATH_LOG_ERR, + ("Cannot read from CPU_DBG_ADDRESS\n")); + return status; + } + + *regval = vals[0] << 0 | vals[1] << 8 | + vals[2] << 16 | vals[3] << 24; + + return status; +} + +/** + * hif_ar6k_fetch_target_regs - call to fetch target reg values + * @hif_device: hif context + * @targregs: target regs + * + * Return: None + */ +void hif_ar6k_fetch_target_regs(struct hif_sdio_dev *hif_device, + uint32_t *targregs) +{ + int i; + uint32_t val; + + for (i = 0; i < AR6003_FETCH_TARG_REGS_COUNT; i++) { + val = 0xffffffff; + hif_ar6k_read_target_register(hif_device, i, &val); + targregs[i] = val; + } +} diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/sdio/hif_sdio.c b/drivers/staging/qca-wifi-host-cmn/hif/src/sdio/hif_sdio.c new file mode 100644 index 0000000000000000000000000000000000000000..896dff6689b1b297ef14049edf300659a1a993ec --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/sdio/hif_sdio.c @@ -0,0 +1,217 @@ +/* + * Copyright (c) 2013-2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "qdf_net_types.h" +#include "a_types.h" +#include "athdefs.h" +#include "a_osapi.h" +#include +#include +#include +#include "hif_sdio_dev.h" +#include "if_sdio.h" +#include "regtable_sdio.h" + +#define ATH_MODULE_NAME hif_sdio + +/** + * hif_start() - start hif bus interface. + * @hif_ctx: HIF context + * + * Enables hif device interrupts + * + * Return: int + */ +uint32_t hif_start(struct hif_opaque_softc *hif_ctx) +{ + struct hif_sdio_softc *scn = HIF_GET_SDIO_SOFTC(hif_ctx); + struct hif_sdio_dev *hif_device = scn->hif_handle; + struct hif_sdio_device *htc_sdio_device = hif_dev_from_hif(hif_device); + + HIF_ENTER(); + hif_dev_enable_interrupts(htc_sdio_device); + HIF_EXIT(); + return QDF_STATUS_SUCCESS; +} + +/** + * hif_flush_surprise_remove() - remove hif bus interface. + * @hif_ctx: HIF context + * + * + * Return: none + */ +void hif_flush_surprise_remove(struct hif_opaque_softc *hif_ctx) +{ + +} + +/** + * hif_sdio_stop() - stop hif bus interface. + * @hif_ctx: HIF context + * + * Disable hif device interrupts and destroy hif context + * + * Return: none + */ +void hif_sdio_stop(struct hif_softc *hif_ctx) +{ + struct hif_sdio_softc *scn = HIF_GET_SDIO_SOFTC(hif_ctx); + struct hif_sdio_dev *hif_device = scn->hif_handle; + struct hif_sdio_device *htc_sdio_device = hif_dev_from_hif(hif_device); + + HIF_ENTER(); + if (htc_sdio_device != NULL) { + hif_dev_disable_interrupts(htc_sdio_device); + hif_dev_destroy(htc_sdio_device); + } + HIF_EXIT(); +} + +/** + * hif_send_head() - send data on hif bus interface. + * @hif_ctx: HIF context + * + * send tx data on a given pipe id + * + * Return: int + */ +QDF_STATUS hif_send_head(struct hif_opaque_softc *hif_ctx, uint8_t pipe, + uint32_t transfer_id, uint32_t nbytes, qdf_nbuf_t buf, + uint32_t data_attr) +{ + struct hif_sdio_softc *scn = HIF_GET_SDIO_SOFTC(hif_ctx); + struct hif_sdio_dev *hif_device = scn->hif_handle; + struct hif_sdio_device *htc_sdio_device = hif_dev_from_hif(hif_device); + + return hif_dev_send_buffer(htc_sdio_device, + transfer_id, pipe, + nbytes, buf); +} + +/** + * hif_map_service_to_pipe() - maps ul/dl pipe to service id. + * @hif_ctx: HIF hdl + * @ServiceId: sevice index + * @ULPipe: uplink pipe id + * @DLPipe: down-linklink pipe id + * @ul_is_polled: if ul is polling based + * @ul_is_polled: if dl is polling based + * + * Return: int + */ +int hif_map_service_to_pipe(struct hif_opaque_softc *hif_hdl, + uint16_t service_id, uint8_t *ul_pipe, + uint8_t *dl_pipe, int *ul_is_polled, + int *dl_is_polled) +{ + struct hif_sdio_softc *scn = HIF_GET_SDIO_SOFTC(hif_hdl); + struct hif_sdio_dev *hif_device = scn->hif_handle; + struct hif_sdio_device *htc_sdio_device = hif_dev_from_hif(hif_device); + + return hif_dev_map_service_to_pipe(htc_sdio_device, + service_id, ul_pipe, dl_pipe, + hif_device->swap_mailbox); +} + +/** + * hif_map_service_to_pipe() - maps ul/dl pipe to service id. + * @scn: HIF context + * @ServiceId: sevice index + * @ULPipe: uplink pipe id + * @DLPipe: down-linklink pipe id + * @ul_is_polled: if ul is polling based + * @ul_is_polled: if dl is polling based + * + * Return: int + */ +void hif_get_default_pipe(struct hif_opaque_softc *scn, uint8_t *ul_pipe, + uint8_t *dl_pipe) +{ + hif_map_service_to_pipe(scn, HTC_CTRL_RSVD_SVC, + ul_pipe, dl_pipe, NULL, NULL); +} + +/** + * hif_post_init() - create hif device after probe. + * @hif_ctx: HIF context + * @target: HIF target + * @callbacks: htc callbacks + * + * + * Return: int + */ +void hif_post_init(struct hif_opaque_softc *hif_ctx, void *target, + struct hif_msg_callbacks *callbacks) +{ + struct hif_sdio_softc *scn = HIF_GET_SDIO_SOFTC(hif_ctx); + struct hif_sdio_dev *hif_device = scn->hif_handle; + struct hif_sdio_device *htc_sdio_device = hif_dev_from_hif(hif_device); + + if (htc_sdio_device == NULL) + htc_sdio_device = hif_dev_create(hif_device, callbacks, target); + + if (htc_sdio_device) + hif_dev_setup(htc_sdio_device); +} + +/** + * hif_get_free_queue_number() - create hif device after probe. + * @hif_ctx: HIF context + * @pipe: pipe id + * + * SDIO uses credit based flow control at the HTC layer + * so transmit resource checks are bypassed + * Return: int + */ +uint16_t hif_get_free_queue_number(struct hif_opaque_softc *hif_ctx, + uint8_t pipe) +{ + uint16_t rv; + + rv = 1; + return rv; +} + +/** + * hif_send_complete_check() - check tx complete on a given pipe. + * @hif_ctx: HIF context + * @pipe: HIF target + * @force: check if need to pool for completion + * Decide whether to actually poll for completions, or just + * wait for a later chance. + * + * Return: int + */ +void hif_send_complete_check(struct hif_opaque_softc *hif_ctx, uint8_t pipe, + int force) +{ + +} + diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/sdio/hif_sdio_common.h b/drivers/staging/qca-wifi-host-cmn/hif/src/sdio/hif_sdio_common.h new file mode 100644 index 0000000000000000000000000000000000000000..ad1088973c86cd2fa76ad894082be79045ce8810 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/sdio/hif_sdio_common.h @@ -0,0 +1,122 @@ +/* + * Copyright (c) 2013-2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _HIF_SDIO_COMMON_H_ +#define _HIF_SDIO_COMMON_H_ + +/* SDIO manufacturer ID and Codes */ +#define MANUFACTURER_ID_AR6002_BASE 0x200 +#define MANUFACTURER_ID_AR6003_BASE 0x300 +#define MANUFACTURER_ID_AR6004_BASE 0x400 +#define MANUFACTURER_ID_AR6320_BASE 0x500 +#define MANUFACTURER_ID_QCA9377_BASE 0x700 +#define MANUFACTURER_ID_QCA9379_BASE 0x800 +#define MANUFACTURER_ID_AR6K_BASE_MASK 0xFF00 +#define MANUFACTURER_ID_AR6K_REV_MASK 0x00FF +#define FUNCTION_CLASS 0x0 +#define MANUFACTURER_CODE 0x271 + + /* Mailbox address in SDIO address space */ +#if defined(SDIO_3_0) +#define HIF_MBOX_BASE_ADDR 0x1000 +#define HIF_MBOX_DUMMY_WIDTH 0x800 +#else +#define HIF_MBOX_BASE_ADDR 0x800 +#define HIF_MBOX_DUMMY_WIDTH 0 +#endif + +#define HIF_MBOX_WIDTH 0x800 + +#define HIF_MBOX_START_ADDR(mbox) \ + (HIF_MBOX_BASE_ADDR + mbox * (HIF_MBOX_WIDTH + HIF_MBOX_DUMMY_WIDTH)) + +#define HIF_MBOX_END_ADDR(mbox) \ + (HIF_MBOX_START_ADDR(mbox) + HIF_MBOX_WIDTH - 1) + + /* extended MBOX address for larger MBOX writes to MBOX 0*/ +#if defined(SDIO_3_0) +#define HIF_MBOX0_EXTENDED_BASE_ADDR 0x5000 +#else +#define HIF_MBOX0_EXTENDED_BASE_ADDR 0x2800 +#endif +#define HIF_MBOX0_EXTENDED_WIDTH_AR6002 (6*1024) +#define HIF_MBOX0_EXTENDED_WIDTH_AR6003 (18*1024) + + /* version 1 of the chip has only a 12K extended mbox range */ +#define HIF_MBOX0_EXTENDED_BASE_ADDR_AR6003_V1 0x4000 +#define HIF_MBOX0_EXTENDED_WIDTH_AR6003_V1 (12*1024) + +#define HIF_MBOX0_EXTENDED_BASE_ADDR_AR6004 0x2800 +#define HIF_MBOX0_EXTENDED_WIDTH_AR6004 (18*1024) + + +#if defined(SDIO_3_0) +#define HIF_MBOX0_EXTENDED_BASE_ADDR_AR6320 0x5000 +#define HIF_MBOX0_EXTENDED_WIDTH_AR6320 (36*1024) +#define HIF_MBOX0_EXTENDED_WIDTH_AR6320_ROME_2_0 (56*1024) +#define HIF_MBOX1_EXTENDED_WIDTH_AR6320 (36*1024) +#define HIF_MBOX_DUMMY_SPACE_SIZE_AR6320 (2*1024) +#else +#define HIF_MBOX0_EXTENDED_BASE_ADDR_AR6320 0x2800 +#define HIF_MBOX0_EXTENDED_WIDTH_AR6320 (24*1024) +#define HIF_MBOX1_EXTENDED_WIDTH_AR6320 (24*1024) +#define HIF_MBOX_DUMMY_SPACE_SIZE_AR6320 0 +#endif + + + /* GMBOX addresses */ +#define HIF_GMBOX_BASE_ADDR 0x7000 +#define HIF_GMBOX_WIDTH 0x4000 + +/* for SDIO we recommend a 128-byte block size */ +#if defined(WITH_BACKPORTS) +#define HIF_DEFAULT_IO_BLOCK_SIZE 128 +#else +#define HIF_DEFAULT_IO_BLOCK_SIZE 256 +#endif + +#define FIFO_TIMEOUT_AND_CHIP_CONTROL 0x00000868 +#define FIFO_TIMEOUT_AND_CHIP_CONTROL_DISABLE_SLEEP_OFF 0xFFFEFFFF +#define FIFO_TIMEOUT_AND_CHIP_CONTROL_DISABLE_SLEEP_ON 0x10000 +/* In SDIO 2.0, asynchronous interrupt is not in SPEC + * requirement, but AR6003 support it, so the register + * is placed in vendor specific field 0xF0(bit0) + * In SDIO 3.0, the register is defined in SPEC, and its + * address is 0x16(bit1) + */ +/* interrupt mode register of AR6003 */ +#define CCCR_SDIO_IRQ_MODE_REG_AR6003 0xF0 +/* mode to enable special 4-bit interrupt assertion without clock */ +#define SDIO_IRQ_MODE_ASYNC_4BIT_IRQ_AR6003 (1 << 0) + /* interrupt mode register of AR6320 */ +#define CCCR_SDIO_IRQ_MODE_REG_AR6320 0x16 +/* mode to enable special 4-bit interrupt assertion without clock */ +#define SDIO_IRQ_MODE_ASYNC_4BIT_IRQ_AR6320 (1 << 1) + +#define CCCR_SDIO_ASYNC_INT_DELAY_ADDRESS 0xF0 +#define CCCR_SDIO_ASYNC_INT_DELAY_LSB 0x06 +#define CCCR_SDIO_ASYNC_INT_DELAY_MASK 0xC0 + +/* Vendor Specific Driver Strength Settings */ +#define CCCR_SDIO_DRIVER_STRENGTH_ENABLE_ADDR 0xf2 +#define CCCR_SDIO_DRIVER_STRENGTH_ENABLE_MASK 0x0e +#define CCCR_SDIO_DRIVER_STRENGTH_ENABLE_A 0x02 +#define CCCR_SDIO_DRIVER_STRENGTH_ENABLE_C 0x04 +#define CCCR_SDIO_DRIVER_STRENGTH_ENABLE_D 0x08 + +#endif /* _HIF_SDIO_COMMON_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/sdio/hif_sdio_dev.c b/drivers/staging/qca-wifi-host-cmn/hif/src/sdio/hif_sdio_dev.c new file mode 100644 index 0000000000000000000000000000000000000000..34dce04e93f4fc4d5513fd129f9da68aec5a8773 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/sdio/hif_sdio_dev.c @@ -0,0 +1,517 @@ +/* + * Copyright (c) 2013-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#define ATH_MODULE_NAME hif +#include "a_debug.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "hif_sdio_internal.h" +#include "if_sdio.h" +#include "regtable_sdio.h" + +/* under HL SDIO, with Interface Memory support, we have + * the following reasons to support 2 mboxs: + * a) we need place different buffers in different + * mempool, for example, data using Interface Memory, + * desc and other using DRAM, they need different SDIO + * mbox channels. + * b) currently, tx mempool in LL case is separated from + * main mempool, the structure (descs at the beginning + * of every pool buffer) is different, because they only + * need store tx desc from host. To align with LL case, + * we also need 2 mbox support just as PCIe LL cases. + */ + +/** + * hif_dev_map_pipe_to_mail_box() - maps pipe id to mailbox. + * @pdev: sdio device context + * @pipeid: pipe index + * + * + * Return: mailbox index + */ +uint8_t hif_dev_map_pipe_to_mail_box(struct hif_sdio_device *pdev, + uint8_t pipeid) +{ + /* TODO: temp version, should not hardcoded here, will be + * updated after HIF design + */ + if (2 == pipeid || 3 == pipeid) + return 1; + else if (0 == pipeid || 1 == pipeid) + return 0; + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: pipeid=%d,should not happen\n", + __func__, pipeid)); + qdf_assert(0); + return INVALID_MAILBOX_NUMBER; +} + +/** + * hif_dev_map_mail_box_to_pipe() - map sdio mailbox to htc pipe. + * @pdev: sdio device + * @mboxIndex: mailbox index + * @upload: boolean to decide mailbox index + * + * Disable hif device interrupts and destroy hif context + * + * Return: none + */ +uint8_t hif_dev_map_mail_box_to_pipe(struct hif_sdio_device *pdev, + uint8_t mbox_index, + bool upload) +{ + /* TODO: temp version, should not hardcoded here, will be + * updated after HIF design + */ + if (mbox_index == 0) + return upload ? 1 : 0; + else if (mbox_index == 1) + return upload ? 3 : 2; + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("%s:-----mboxIndex=%d,upload=%d, should not happen\n", + __func__, mbox_index, upload)); + qdf_assert(0); + return 0xff; +} + +/** + * hif_dev_map_service_to_pipe() - maps ul/dl pipe to service id. + * @pDev: sdio device context + * @ServiceId: sevice index + * @ULPipe: uplink pipe id + * @DLPipe: down-linklink pipe id + * @SwapMapping: mailbox swap mapping + * + * Return: int + */ +QDF_STATUS hif_dev_map_service_to_pipe(struct hif_sdio_device *pdev, + uint16_t service_id, + uint8_t *ul_pipe, uint8_t *dl_pipe, + bool swap_mapping) +{ + QDF_STATUS status = QDF_STATUS_SUCCESS; + + switch (service_id) { + case HTT_DATA_MSG_SVC: + if (swap_mapping) { + *ul_pipe = 1; + *dl_pipe = 0; + } else { + *ul_pipe = 3; + *dl_pipe = 2; + } + break; + + case HTC_CTRL_RSVD_SVC: + case HTC_RAW_STREAMS_SVC: + *ul_pipe = 1; + *dl_pipe = 0; + break; + + case WMI_DATA_BE_SVC: + case WMI_DATA_BK_SVC: + case WMI_DATA_VI_SVC: + case WMI_DATA_VO_SVC: + *ul_pipe = 1; + *dl_pipe = 0; + break; + + case WMI_CONTROL_SVC: + if (swap_mapping) { + *ul_pipe = 3; + *dl_pipe = 2; + } else { + *ul_pipe = 1; + *dl_pipe = 0; + } + break; + + default: + status = !QDF_STATUS_SUCCESS; + break; + } + return status; +} + +/** + * hif_dev_alloc_rx_buffer() - allocate rx buffer. + * @pDev: sdio device context + * + * + * Return: htc buffer pointer + */ +HTC_PACKET *hif_dev_alloc_rx_buffer(struct hif_sdio_device *pdev) +{ + HTC_PACKET *packet; + qdf_nbuf_t netbuf; + uint32_t bufsize = 0, headsize = 0; + + bufsize = HIF_SDIO_RX_BUFFER_SIZE + HIF_SDIO_RX_DATA_OFFSET; + headsize = sizeof(HTC_PACKET); + netbuf = qdf_nbuf_alloc(NULL, bufsize + headsize, 0, 4, false); + if (netbuf == NULL) { + hif_err_rl("Allocate netbuf failed"); + return NULL; + } + packet = (HTC_PACKET *) qdf_nbuf_data(netbuf); + qdf_nbuf_reserve(netbuf, headsize); + + SET_HTC_PACKET_INFO_RX_REFILL(packet, + pdev, + qdf_nbuf_data(netbuf), + bufsize, ENDPOINT_0); + SET_HTC_PACKET_NET_BUF_CONTEXT(packet, netbuf); + return packet; +} + +/** + * hif_dev_create() - create hif device after probe. + * @scn: HIF context + * @callbacks: htc callbacks + * @target: HIF target + * + * + * Return: int + */ +struct hif_sdio_device *hif_dev_create(struct hif_sdio_dev *hif_device, + struct hif_msg_callbacks *callbacks, void *target) +{ + + QDF_STATUS status; + struct hif_sdio_device *pdev; + + pdev = qdf_mem_malloc(sizeof(struct hif_sdio_device)); + if (!pdev) { + A_ASSERT(false); + return NULL; + } + + qdf_spinlock_create(&pdev->Lock); + qdf_spinlock_create(&pdev->TxLock); + qdf_spinlock_create(&pdev->RxLock); + + pdev->HIFDevice = hif_device; + pdev->pTarget = target; + status = hif_configure_device(hif_device, + HIF_DEVICE_SET_HTC_CONTEXT, + (void *)pdev, sizeof(pdev)); + if (status != QDF_STATUS_SUCCESS) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("(%s)HIF_DEVICE_SET_HTC_CONTEXT failed!!!\n", + __func__)); + } + + A_MEMCPY(&pdev->hif_callbacks, callbacks, sizeof(*callbacks)); + + return pdev; +} + +/** + * hif_dev_destroy() - destroy hif device. + * @pDev: sdio device context + * + * + * Return: none + */ +void hif_dev_destroy(struct hif_sdio_device *pdev) +{ + QDF_STATUS status; + + status = hif_configure_device(pdev->HIFDevice, + HIF_DEVICE_SET_HTC_CONTEXT, + (void *)NULL, 0); + if (status != QDF_STATUS_SUCCESS) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("(%s)HIF_DEVICE_SET_HTC_CONTEXT failed!!!\n", + __func__)); + } + qdf_mem_free(pdev); +} + +/** + * hif_dev_from_hif() - get sdio device from hif device. + * @pDev: hif device context + * + * + * Return: hif sdio device context + */ +struct hif_sdio_device *hif_dev_from_hif(struct hif_sdio_dev *hif_device) +{ + struct hif_sdio_device *pdev = NULL; + QDF_STATUS status; + + status = hif_configure_device(hif_device, + HIF_DEVICE_GET_HTC_CONTEXT, + (void **)&pdev, sizeof(struct hif_sdio_device)); + if (status != QDF_STATUS_SUCCESS) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("(%s)HTC_SDIO_CONTEXT is NULL!!!\n", + __func__)); + } + return pdev; +} + +/** + * hif_dev_disable_interrupts() - disable hif device interrupts. + * @pDev: sdio device context + * + * + * Return: int + */ +QDF_STATUS hif_dev_disable_interrupts(struct hif_sdio_device *pdev) +{ + struct MBOX_IRQ_ENABLE_REGISTERS regs; + QDF_STATUS status = QDF_STATUS_SUCCESS; + + HIF_ENTER(); + + LOCK_HIF_DEV(pdev); + /* Disable all interrupts */ + pdev->IrqEnableRegisters.int_status_enable = 0; + pdev->IrqEnableRegisters.cpu_int_status_enable = 0; + pdev->IrqEnableRegisters.error_status_enable = 0; + pdev->IrqEnableRegisters.counter_int_status_enable = 0; + /* copy into our temp area */ + A_MEMCPY(®s, + &pdev->IrqEnableRegisters, sizeof(pdev->IrqEnableRegisters)); + + UNLOCK_HIF_DEV(pdev); + + /* always synchronous */ + status = hif_read_write(pdev->HIFDevice, + INT_STATUS_ENABLE_ADDRESS, + (char *) ®s, + sizeof(struct MBOX_IRQ_ENABLE_REGISTERS), + HIF_WR_SYNC_BYTE_INC, NULL); + + if (status != QDF_STATUS_SUCCESS) { + /* Can't write it for some reason */ + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("Failed to update interrupt control registers err: %d", + status)); + } + + /* To Do mask the host controller interrupts */ + hif_mask_interrupt(pdev->HIFDevice); + HIF_EXIT("status :%d", status); + return status; +} + +/** + * hif_dev_enable_interrupts() - enables hif device interrupts. + * @pDev: sdio device context + * + * + * Return: int + */ +QDF_STATUS hif_dev_enable_interrupts(struct hif_sdio_device *pdev) +{ + QDF_STATUS status; + struct MBOX_IRQ_ENABLE_REGISTERS regs; + + HIF_ENTER(); + + /* for good measure, make sure interrupt are disabled + * before unmasking at the HIF layer. + * The rationale here is that between device insertion + * (where we clear the interrupts the first time) + * and when HTC is finally ready to handle interrupts, + * other software can perform target "soft" resets. + * The AR6K interrupt enables reset back to an "enabled" + * state when this happens. + */ + hif_dev_disable_interrupts(pdev); + + /* Unmask the host controller interrupts */ + hif_un_mask_interrupt(pdev->HIFDevice); + + LOCK_HIF_DEV(pdev); + + /* Enable all the interrupts except for the internal + * AR6000 CPU interrupt + */ + pdev->IrqEnableRegisters.int_status_enable = + INT_STATUS_ENABLE_ERROR_SET(0x01) | + INT_STATUS_ENABLE_CPU_SET(0x01) + | INT_STATUS_ENABLE_COUNTER_SET(0x01); + + /* enable 2 mboxs INT */ + pdev->IrqEnableRegisters.int_status_enable |= + INT_STATUS_ENABLE_MBOX_DATA_SET(0x01) | + INT_STATUS_ENABLE_MBOX_DATA_SET(0x02); + + /* Set up the CPU Interrupt Status Register, enable + * CPU sourced interrupt #0, #1. + * #0 is used for report assertion from target + * #1 is used for inform host that credit arrived + */ + pdev->IrqEnableRegisters.cpu_int_status_enable = 0x03; + + /* Set up the Error Interrupt Status Register */ + pdev->IrqEnableRegisters.error_status_enable = + (ERROR_STATUS_ENABLE_RX_UNDERFLOW_SET(0x01) + | ERROR_STATUS_ENABLE_TX_OVERFLOW_SET(0x01)) >> 16; + + /* Set up the Counter Interrupt Status Register + * (only for debug interrupt to catch fatal errors) + */ + pdev->IrqEnableRegisters.counter_int_status_enable = + (COUNTER_INT_STATUS_ENABLE_BIT_SET(AR6K_TARGET_DEBUG_INTR_MASK)) >> + 24; + + /* copy into our temp area */ + A_MEMCPY(®s, + &pdev->IrqEnableRegisters, + sizeof(struct MBOX_IRQ_ENABLE_REGISTERS)); + + UNLOCK_HIF_DEV(pdev); + + /* always synchronous */ + status = hif_read_write(pdev->HIFDevice, + INT_STATUS_ENABLE_ADDRESS, + (char *) ®s, + sizeof(struct MBOX_IRQ_ENABLE_REGISTERS), + HIF_WR_SYNC_BYTE_INC, NULL); + + if (status != QDF_STATUS_SUCCESS) { + /* Can't write it for some reason */ + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("Failed to update interrupt control registers err: %d\n", + status)); + + } + HIF_EXIT(); + return status; +} + +/** + * hif_dev_setup() - set up sdio device. + * @pDev: sdio device context + * + * + * Return: int + */ +QDF_STATUS hif_dev_setup(struct hif_sdio_device *pdev) +{ + QDF_STATUS status; + uint32_t blocksizes[MAILBOX_COUNT]; + struct htc_callbacks htc_cbs; + struct hif_sdio_dev *hif_device = pdev->HIFDevice; + + HIF_ENTER(); + + status = hif_configure_device(hif_device, + HIF_DEVICE_GET_MBOX_ADDR, + &pdev->MailBoxInfo, + sizeof(pdev->MailBoxInfo)); + + if (status != QDF_STATUS_SUCCESS) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("(%s)HIF_DEVICE_GET_MBOX_ADDR failed!!!\n", + __func__)); + A_ASSERT(false); + } + + status = hif_configure_device(hif_device, + HIF_DEVICE_GET_MBOX_BLOCK_SIZE, + blocksizes, sizeof(blocksizes)); + if (status != QDF_STATUS_SUCCESS) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("(%s)HIF_DEVICE_GET_MBOX_BLOCK_SIZE failed!!!\n", + __func__)); + A_ASSERT(false); + } + + pdev->BlockSize = blocksizes[MAILBOX_FOR_BLOCK_SIZE]; + pdev->BlockMask = pdev->BlockSize - 1; + A_ASSERT((pdev->BlockSize & pdev->BlockMask) == 0); + + /* assume we can process HIF interrupt events asynchronously */ + pdev->HifIRQProcessingMode = HIF_DEVICE_IRQ_ASYNC_SYNC; + + /* see if the HIF layer overrides this assumption */ + hif_configure_device(hif_device, + HIF_DEVICE_GET_IRQ_PROC_MODE, + &pdev->HifIRQProcessingMode, + sizeof(pdev->HifIRQProcessingMode)); + + switch (pdev->HifIRQProcessingMode) { + case HIF_DEVICE_IRQ_SYNC_ONLY: + AR_DEBUG_PRINTF(ATH_DEBUG_WARN, + ("HIF Interrupt processing is SYNC ONLY\n")); + /* see if HIF layer wants HTC to yield */ + hif_configure_device(hif_device, + HIF_DEVICE_GET_IRQ_YIELD_PARAMS, + &pdev->HifIRQYieldParams, + sizeof(pdev->HifIRQYieldParams)); + + if (pdev->HifIRQYieldParams.recv_packet_yield_count > 0) { + AR_DEBUG_PRINTF(ATH_DEBUG_WARN, + ("HIF req of DSR yield per %d RECV packets\n", + pdev->HifIRQYieldParams. + recv_packet_yield_count)); + pdev->DSRCanYield = true; + } + break; + case HIF_DEVICE_IRQ_ASYNC_SYNC: + AR_DEBUG_PRINTF(ATH_DEBUG_TRC, + ("HIF Interrupt processing is ASYNC and SYNC\n")); + break; + default: + A_ASSERT(false); + break; + } + + pdev->HifMaskUmaskRecvEvent = NULL; + + /* see if the HIF layer implements the mask/unmask recv + * events function + */ + hif_configure_device(hif_device, + HIF_DEVICE_GET_RECV_EVENT_MASK_UNMASK_FUNC, + &pdev->HifMaskUmaskRecvEvent, + sizeof(pdev->HifMaskUmaskRecvEvent)); + + status = hif_dev_disable_interrupts(pdev); + + qdf_mem_zero(&htc_cbs, sizeof(struct htc_callbacks)); + /* the device layer handles these */ + htc_cbs.rwCompletionHandler = hif_dev_rw_completion_handler; + htc_cbs.dsrHandler = hif_dev_dsr_handler; + htc_cbs.context = pdev; + status = hif_attach_htc(pdev->HIFDevice, &htc_cbs); + + HIF_EXIT(); + return status; +} diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/sdio/hif_sdio_dev.h b/drivers/staging/qca-wifi-host-cmn/hif/src/sdio/hif_sdio_dev.h new file mode 100644 index 0000000000000000000000000000000000000000..6dc6aaacadcbb32e14be0dad2f89fe299274f2bd --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/sdio/hif_sdio_dev.h @@ -0,0 +1,54 @@ +/* + * Copyright (c) 2013-2016 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef HIF_SDIO_DEV_H_ +#define HIF_SDIO_DEV_H_ + +#include "qdf_net_types.h" +#include "a_types.h" +#include "athdefs.h" +#include "a_osapi.h" +#include +#include "athstartpack.h" +#include "hif_internal.h" + +struct hif_sdio_device *hif_dev_from_hif(struct hif_sdio_dev *hif_device); + +struct hif_sdio_device *hif_dev_create(struct hif_sdio_dev *hif_device, + struct hif_msg_callbacks *callbacks, + void *target); + +void hif_dev_destroy(struct hif_sdio_device *htc_sdio_device); + +QDF_STATUS hif_dev_setup(struct hif_sdio_device *htc_sdio_device); + +QDF_STATUS hif_dev_enable_interrupts(struct hif_sdio_device *htc_sdio_device); + +QDF_STATUS hif_dev_disable_interrupts(struct hif_sdio_device *htc_sdio_device); + +QDF_STATUS hif_dev_send_buffer(struct hif_sdio_device *htc_sdio_device, + unsigned int transfer_id, uint8_t pipe, + unsigned int nbytes, qdf_nbuf_t buf); + +QDF_STATUS hif_dev_map_service_to_pipe(struct hif_sdio_device *pdev, + uint16_t service_id, + uint8_t *ul_pipe, + uint8_t *dl_pipe, + bool swap_mapping); + +#endif /* HIF_SDIO_DEV_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/sdio/hif_sdio_internal.h b/drivers/staging/qca-wifi-host-cmn/hif/src/sdio/hif_sdio_internal.h new file mode 100644 index 0000000000000000000000000000000000000000..fb0d76844eab4b8590e0d8e793eead8deef39c9d --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/sdio/hif_sdio_internal.h @@ -0,0 +1,149 @@ +/* + * Copyright (c) 2013-2014, 2016-2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _HIF_SDIO_INTERNAL_H_ +#define _HIF_SDIO_INTERNAL_H_ + +#include "a_debug.h" +#include "hif_sdio_dev.h" +#include "htc_packet.h" +#include "htc_api.h" +#include "hif_internal.h" + +#define INVALID_MAILBOX_NUMBER 0xFF + +#define HIF_SDIO_RX_BUFFER_SIZE 1792 +#define HIF_SDIO_RX_DATA_OFFSET 64 + +/* TODO: print output level and mask control */ +#define ATH_DEBUG_IRQ ATH_DEBUG_MAKE_MODULE_MASK(4) +#define ATH_DEBUG_XMIT ATH_DEBUG_MAKE_MODULE_MASK(5) +#define ATH_DEBUG_RECV ATH_DEBUG_MAKE_MODULE_MASK(6) + +#define ATH_DEBUG_MAX_MASK 32 + +#define OTHER_INTS_ENABLED (INT_STATUS_ENABLE_ERROR_MASK | \ + INT_STATUS_ENABLE_CPU_MASK | \ + INT_STATUS_ENABLE_COUNTER_MASK) + +/* HTC operational parameters */ +#define HTC_TARGET_RESPONSE_TIMEOUT 2000 /* in ms */ +#define HTC_TARGET_DEBUG_INTR_MASK 0x01 +#define HTC_TARGET_CREDIT_INTR_MASK 0xF0 + +#define MAILBOX_COUNT 4 +#define MAILBOX_FOR_BLOCK_SIZE 1 +#define MAILBOX_USED_COUNT 2 +#if defined(SDIO_3_0) +#define MAILBOX_LOOKAHEAD_SIZE_IN_WORD 2 +#else +#define MAILBOX_LOOKAHEAD_SIZE_IN_WORD 1 +#endif +#define AR6K_TARGET_DEBUG_INTR_MASK 0x01 + +PREPACK struct MBOX_IRQ_PROC_REGISTERS { + uint8_t host_int_status; + uint8_t cpu_int_status; + uint8_t error_int_status; + uint8_t counter_int_status; + uint8_t mbox_frame; + uint8_t rx_lookahead_valid; + uint8_t host_int_status2; + uint8_t gmbox_rx_avail; + uint32_t rx_lookahead[MAILBOX_LOOKAHEAD_SIZE_IN_WORD * MAILBOX_COUNT]; + uint32_t int_status_enable; +} POSTPACK; + +PREPACK struct MBOX_IRQ_ENABLE_REGISTERS { + uint8_t int_status_enable; + uint8_t cpu_int_status_enable; + uint8_t error_status_enable; + uint8_t counter_int_status_enable; +} POSTPACK; + +#define TOTAL_CREDIT_COUNTER_CNT 4 + +PREPACK struct MBOX_COUNTER_REGISTERS { + uint32_t counter[TOTAL_CREDIT_COUNTER_CNT]; +} POSTPACK; + +#define SDIO_NUM_DATA_RX_BUFFERS 64 +#define SDIO_DATA_RX_SIZE 1664 + +struct hif_sdio_device { + struct hif_sdio_dev *HIFDevice; + qdf_spinlock_t Lock; + qdf_spinlock_t TxLock; + qdf_spinlock_t RxLock; + struct MBOX_IRQ_PROC_REGISTERS IrqProcRegisters; + struct MBOX_IRQ_ENABLE_REGISTERS IrqEnableRegisters; + struct MBOX_COUNTER_REGISTERS MailBoxCounterRegisters; + struct hif_msg_callbacks hif_callbacks; + struct hif_device_mbox_info MailBoxInfo; + uint32_t BlockSize; + uint32_t BlockMask; + enum hif_device_irq_mode HifIRQProcessingMode; + struct hif_device_irq_yield_params HifIRQYieldParams; + bool DSRCanYield; + HIF_MASK_UNMASK_RECV_EVENT HifMaskUmaskRecvEvent; + int CurrentDSRRecvCount; + int RecheckIRQStatusCnt; + uint32_t RecvStateFlags; + void *pTarget; +}; + +#define LOCK_HIF_DEV(device) qdf_spin_lock(&(device)->Lock) +#define UNLOCK_HIF_DEV(device) qdf_spin_unlock(&(device)->Lock) +#define LOCK_HIF_DEV_RX(t) qdf_spin_lock(&(t)->RxLock) +#define UNLOCK_HIF_DEV_RX(t) qdf_spin_unlock(&(t)->RxLock) +#define LOCK_HIF_DEV_TX(t) qdf_spin_lock(&(t)->TxLock) +#define UNLOCK_HIF_DEV_TX(t) qdf_spin_unlock(&(t)->TxLock) + +#define DEV_CALC_RECV_PADDED_LEN(pDev, length) \ + (((length) + (pDev)->BlockMask) & (~((pDev)->BlockMask))) +#define DEV_CALC_SEND_PADDED_LEN(pDev, length) \ + DEV_CALC_RECV_PADDED_LEN(pDev, length) +#define DEV_IS_LEN_BLOCK_ALIGNED(pDev, length) \ + (((length) % (pDev)->BlockSize) == 0) + +#define HTC_RECV_WAIT_BUFFERS (1 << 0) +#define HTC_OP_STATE_STOPPING (1 << 0) + +#define HTC_RX_PKT_IGNORE_LOOKAHEAD (1 << 0) +#define HTC_RX_PKT_REFRESH_HDR (1 << 1) +#define HTC_RX_PKT_PART_OF_BUNDLE (1 << 2) +#define HTC_RX_PKT_NO_RECYCLE (1 << 3) +#define HTC_RX_PKT_LAST_BUNDLED_PKT_HAS_ADDTIONAL_BLOCK (1 << 4) + +#define IS_DEV_IRQ_PROCESSING_ASYNC_ALLOWED(pDev) \ + ((pDev)->HifIRQProcessingMode != HIF_DEVICE_IRQ_SYNC_ONLY) + +/* hif_sdio_dev.c */ +HTC_PACKET *hif_dev_alloc_rx_buffer(struct hif_sdio_device *pDev); + +uint8_t hif_dev_map_pipe_to_mail_box(struct hif_sdio_device *pDev, + uint8_t pipeid); +uint8_t hif_dev_map_mail_box_to_pipe(struct hif_sdio_device *pDev, + uint8_t mboxIndex, + bool upload); + +/* hif_sdio_recv.c */ +QDF_STATUS hif_dev_rw_completion_handler(void *context, QDF_STATUS status); +QDF_STATUS hif_dev_dsr_handler(void *context); + +#endif /* _HIF_SDIO_INTERNAL_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/sdio/hif_sdio_recv.c b/drivers/staging/qca-wifi-host-cmn/hif/src/sdio/hif_sdio_recv.c new file mode 100644 index 0000000000000000000000000000000000000000..d04c87f0fef48be819bb856be39cb8fe1c948ba9 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/sdio/hif_sdio_recv.c @@ -0,0 +1,1538 @@ +/* + * Copyright (c) 2014-2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#define ATH_MODULE_NAME hif +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "hif_sdio_internal.h" +#include +#include "regtable_sdio.h" +#include "if_sdio.h" + +#define NBUF_ALLOC_FAIL_WAIT_TIME 100 +/* high nibble */ +#define BUNDLE_COUNT_HIGH(f) ((f & 0x0C) << 2) +/* low nibble */ +#define BUNDLE_COUNT_LOW(f) ((f & 0xF0) >> 4) +#define GET_RECV_BUNDLE_COUNT(f) (BUNDLE_COUNT_HIGH(f) + BUNDLE_COUNT_LOW(f)) + +static void hif_dev_dump_registers(struct hif_sdio_device *pdev, + struct MBOX_IRQ_PROC_REGISTERS *irq_proc_regs, + struct MBOX_IRQ_ENABLE_REGISTERS * + irq_enable_regs, + struct MBOX_COUNTER_REGISTERS * + mailbox_counter_registers) +{ + + AR_DEBUG_PRINTF(ATH_DEBUG_ANY, ("RegTable->")); + + if (irq_proc_regs != NULL) { + AR_DEBUG_PRINTF(ATH_DEBUG_ANY, + ("HostIntStatus: 0x%x ", + irq_proc_regs->host_int_status)); + AR_DEBUG_PRINTF(ATH_DEBUG_ANY, + ("CPUIntStatus: 0x%x ", + irq_proc_regs->cpu_int_status)); + AR_DEBUG_PRINTF(ATH_DEBUG_ANY, + ("ErrorIntStatus: 0x%x ", + irq_proc_regs->error_int_status)); + AR_DEBUG_PRINTF(ATH_DEBUG_ANY, + ("CounterIntStatus: 0x%x ", + irq_proc_regs->counter_int_status)); + AR_DEBUG_PRINTF(ATH_DEBUG_ANY, + ("MboxFrame: 0x%x ", + irq_proc_regs->mbox_frame)); + + AR_DEBUG_PRINTF(ATH_DEBUG_ANY, ("\nRegTable->")); + + AR_DEBUG_PRINTF(ATH_DEBUG_ANY, + ("RxLKAValid: 0x%x ", + irq_proc_regs->rx_lookahead_valid)); + AR_DEBUG_PRINTF(ATH_DEBUG_ANY, + ("RxLKA0: 0x%x", + irq_proc_regs->rx_lookahead[0])); + AR_DEBUG_PRINTF(ATH_DEBUG_ANY, + ("RxLKA1: 0x%x ", + irq_proc_regs->rx_lookahead[1])); + + AR_DEBUG_PRINTF(ATH_DEBUG_ANY, + ("RxLKA2: 0x%x ", + irq_proc_regs->rx_lookahead[2])); + AR_DEBUG_PRINTF(ATH_DEBUG_ANY, + ("RxLKA3: 0x%x", + irq_proc_regs->rx_lookahead[3])); + AR_DEBUG_PRINTF(ATH_DEBUG_ANY, ("\nRegTable->")); + + if (pdev->MailBoxInfo.gmbox_address != 0) { + /* if the target supports GMBOX hardware, + * dump some additional state + */ + AR_DEBUG_PRINTF(ATH_DEBUG_ANY, + ("GMBOX-HostIntStatus2: 0x%x ", + irq_proc_regs->host_int_status2)); + AR_DEBUG_PRINTF(ATH_DEBUG_ANY, + ("GMBOX-RX-Avail: 0x%x ", + irq_proc_regs->gmbox_rx_avail)); + } + } + + if (irq_enable_regs != NULL) { + AR_DEBUG_PRINTF(ATH_DEBUG_ANY, + ("Int Status Enable: 0x%x\n", + irq_enable_regs->int_status_enable)); + AR_DEBUG_PRINTF(ATH_DEBUG_ANY, + ("Counter Int Status Enable: 0x%x\n", + irq_enable_regs->counter_int_status_enable)); + } + + if (mailbox_counter_registers != NULL) { + int i; + + for (i = 0; i < 4; i++) { + AR_DEBUG_PRINTF(ATH_DEBUG_ANY, + ("Counter[%d]: 0x%x\n", i, + mailbox_counter_registers-> + counter[i])); + } + } + AR_DEBUG_PRINTF(ATH_DEBUG_ANY, + ("<------------------------------->\n")); +} + +static +QDF_STATUS hif_dev_alloc_and_prepare_rx_packets(struct hif_sdio_device *pdev, + uint32_t look_aheads[], + int messages, + HTC_PACKET_QUEUE *queue) +{ + QDF_STATUS status = QDF_STATUS_SUCCESS; + HTC_PACKET *packet; + HTC_FRAME_HDR *hdr; + int i, j; + int num_messages; + int full_length; + bool no_recycle; + + /* lock RX while we assemble the packet buffers */ + LOCK_HIF_DEV_RX(pdev); + + for (i = 0; i < messages; i++) { + + hdr = (HTC_FRAME_HDR *) &look_aheads[i]; + if (hdr->EndpointID >= ENDPOINT_MAX) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("Invalid Endpoint in look-ahead: %d\n", + hdr->EndpointID)); + /* invalid endpoint */ + status = QDF_STATUS_E_PROTO; + break; + } + + if (hdr->PayloadLen > HTC_MAX_PAYLOAD_LENGTH) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("Payload length %d exceeds max HTC : %d !\n", + hdr->PayloadLen, + (uint32_t) HTC_MAX_PAYLOAD_LENGTH)); + status = QDF_STATUS_E_PROTO; + break; + } + + if ((hdr->Flags & HTC_FLAGS_RECV_BUNDLE_CNT_MASK) == 0) { + /* HTC header only indicates 1 message to fetch */ + num_messages = 1; + } else { + /* HTC header indicates that every packet to follow + * has the same padded length so that it can + * be optimally fetched as a full bundle + */ + num_messages = GET_RECV_BUNDLE_COUNT(hdr->Flags); + /* the count doesn't include the starter frame, just + * a count of frames to follow + */ + num_messages++; + /* A_ASSERT(numMessages <= target->MaxMsgPerBundle); */ + AR_DEBUG_PRINTF(ATH_DEBUG_RECV, + ("HTC header indicates :%d messages can be fetched as a bundle\n", + num_messages)); + } + + full_length = + DEV_CALC_RECV_PADDED_LEN(pdev, + hdr->PayloadLen + + sizeof(HTC_FRAME_HDR)); + + /* get packet buffers for each message, if there was a + * bundle detected in the header, + * use pHdr as a template to fetch all packets in the bundle + */ + for (j = 0; j < num_messages; j++) { + + /* reset flag, any packets allocated using the + * RecvAlloc() API cannot be recycled on cleanup, + * they must be explicitly returned + */ + no_recycle = false; + packet = hif_dev_alloc_rx_buffer(pdev); + + if (packet == NULL) { + /* No error, simply need to mark that + * we are waiting for buffers. + */ + pdev->RecvStateFlags |= HTC_RECV_WAIT_BUFFERS; + /* pDev->EpWaitingForBuffers = pEndpoint->Id; */ + status = QDF_STATUS_E_RESOURCES; + break; + } + /* AR_DEBUG_ASSERT(pPacket->Endpoint == pEndpoint->Id); + */ + /* clear flags */ + packet->PktInfo.AsRx.HTCRxFlags = 0; + packet->PktInfo.AsRx.IndicationFlags = 0; + packet->Status = QDF_STATUS_SUCCESS; + + if (no_recycle) + /* flag that these packets cannot be recycled, + * they have to be returned to the user + */ + packet->PktInfo.AsRx.HTCRxFlags |= + HTC_RX_PKT_NO_RECYCLE; + /* add packet to queue (also incase we need to + * cleanup down below) + */ + HTC_PACKET_ENQUEUE(queue, packet); + + /* if (HTC_STOPPING(target)) { + * status = QDF_STATUS_E_CANCELED; + * break; + * } + */ + + /* make sure message can fit in the endpoint buffer */ + if ((uint32_t) full_length > packet->BufferLength) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("Payload Length Error : header reports payload of: %d (%d) endpoint buffer size: %d\n", + hdr->PayloadLen, full_length, + packet->BufferLength)); + status = QDF_STATUS_E_PROTO; + break; + } + + if (j > 0) { + /* for messages fetched in a bundle the expected + * lookahead is unknown as we are only using the + * lookahead of the first packet as a template + * of what to expect for lengths + */ + packet->PktInfo.AsRx.HTCRxFlags |= + HTC_RX_PKT_REFRESH_HDR; + /* set it to something invalid */ + packet->PktInfo.AsRx.ExpectedHdr = 0xFFFFFFFF; + } else { + packet->PktInfo.AsRx.ExpectedHdr = + look_aheads[i]; + } + /* set the amount of data to fetch */ + packet->ActualLength = + hdr->PayloadLen + HTC_HDR_LENGTH; + if ((j == (num_messages-1)) + && ((hdr->Flags) & HTC_FLAGS_RECV_1MORE_BLOCK)) + packet->PktInfo.AsRx.HTCRxFlags |= + HTC_RX_PKT_LAST_BUNDLED_PKT_HAS_ADDTIONAL_BLOCK; + packet->Endpoint = hdr->EndpointID; + packet->Completion = NULL; + } + + if (QDF_IS_STATUS_ERROR(status)) { + break; + } + + } + + UNLOCK_HIF_DEV_RX(pdev); + + /* for NO RESOURCE error, no need to flush data queue */ + if (QDF_IS_STATUS_ERROR(status) + && (status != QDF_STATUS_E_RESOURCES)) { + while (!HTC_QUEUE_EMPTY(queue)) { + qdf_nbuf_t netbuf; + packet = htc_packet_dequeue(queue); + if (packet == NULL) + break; + netbuf = (qdf_nbuf_t) packet->pNetBufContext; + if (netbuf) + qdf_nbuf_free(netbuf); + } + } + if (status == QDF_STATUS_E_RESOURCES) + status = QDF_STATUS_SUCCESS; + return status; +} + +static inline QDF_STATUS hif_dev_recv_packet(struct hif_sdio_device *pdev, + HTC_PACKET *packet, + uint32_t recv_length, uint8_t mbox_index) +{ + uint32_t padded_length; + QDF_STATUS status; + bool sync = (packet->Completion == NULL) ? true : false; + + /* adjust the length to be a multiple of block size if appropriate */ + padded_length = DEV_CALC_RECV_PADDED_LEN(pdev, recv_length); + + if (padded_length > packet->BufferLength) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("DevRecvPacket, Not enough space for padlen:%d recvlen:%d bufferlen:%d\n", + padded_length, recv_length, + packet->BufferLength)); + if (packet->Completion != NULL) { + COMPLETE_HTC_PACKET(packet, QDF_STATUS_E_INVAL); + return QDF_STATUS_SUCCESS; + } + return QDF_STATUS_E_INVAL; + } + + /* mailbox index is saved in Endpoint member */ + AR_DEBUG_PRINTF(ATH_DEBUG_RECV, + ("hif_dev_recv_packet (0x%lX : hdr:0x%X) Len:%d, Padded Length: %d Mbox:0x%X\n", + (unsigned long)packet, + packet->PktInfo.AsRx.ExpectedHdr, recv_length, + padded_length, + pdev->MailBoxInfo.mbox_addresses[mbox_index])); + status = hif_read_write(pdev->HIFDevice, + pdev->MailBoxInfo.mbox_addresses[mbox_index], + packet->pBuffer, padded_length, + (sync ? HIF_RD_SYNC_BLOCK_FIX : + HIF_RD_ASYNC_BLOCK_FIX), + sync ? NULL : packet); + AR_DEBUG_PRINTF(ATH_DEBUG_RECV, ("EP%d, Seq:%d\n", + ((HTC_FRAME_HDR *) packet->pBuffer)-> + EndpointID, + ((HTC_FRAME_HDR *) packet->pBuffer)-> + ControlBytes1)); + if (status != QDF_STATUS_SUCCESS) { + AR_DEBUG_PRINTF(ATH_DEBUG_RECV, + ("hif_dev_recv_packet (0x%lX : hdr:0x%X) Failed\n", + (unsigned long)packet, + packet->PktInfo.AsRx.ExpectedHdr)); + } + if (sync) { + packet->Status = status; + if (status == QDF_STATUS_SUCCESS) { + HTC_FRAME_HDR *hdr = + (HTC_FRAME_HDR *) packet->pBuffer; + AR_DEBUG_PRINTF(ATH_DEBUG_RECV, + ("hif_dev_recv_packet EP:%d,Len:%d,Flag:%d,CB:0x%02X,0x%02X\n", + hdr->EndpointID, hdr->PayloadLen, + hdr->Flags, hdr->ControlBytes0, + hdr->ControlBytes1)); + } + } + + return status; +} + +static inline QDF_STATUS hif_dev_process_trailer(struct hif_sdio_device *pdev, + uint8_t *buffer, int length, + uint32_t *next_look_aheads, + int *num_look_aheads, + HTC_ENDPOINT_ID from_endpoint) +{ + HTC_RECORD_HDR *record; + uint8_t *record_buf; + HTC_LOOKAHEAD_REPORT *look_ahead; + uint8_t *orig_buffer; + int orig_length; + QDF_STATUS status; + + AR_DEBUG_PRINTF(ATH_DEBUG_RECV, + ("+htc_process_trailer (length:%d)\n", length)); + + if (AR_DEBUG_LVL_CHECK(ATH_DEBUG_RECV)) + AR_DEBUG_PRINTBUF(buffer, length, "Recv Trailer"); + + orig_buffer = buffer; + orig_length = length; + status = QDF_STATUS_SUCCESS; + + while (length > 0) { + + if (length < sizeof(HTC_RECORD_HDR)) { + status = QDF_STATUS_E_PROTO; + break; + } + /* these are byte aligned structs */ + record = (HTC_RECORD_HDR *) buffer; + length -= sizeof(HTC_RECORD_HDR); + buffer += sizeof(HTC_RECORD_HDR); + + if (record->Length > length) { + /* no room left in buffer for record */ + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + (" invalid record len: %d (id:%d) buffer has:%d bytes left\n", + record->Length, record->RecordID, + length)); + status = QDF_STATUS_E_PROTO; + break; + } + /* start of record follows the header */ + record_buf = buffer; + + switch (record->RecordID) { + case HTC_RECORD_CREDITS: + /* Process in HTC, ignore here */ + break; + case HTC_RECORD_LOOKAHEAD: + AR_DEBUG_ASSERT(record->Length >= + sizeof(HTC_LOOKAHEAD_REPORT)); + look_ahead = (HTC_LOOKAHEAD_REPORT *) record_buf; + if ((look_ahead->PreValid == + ((~look_ahead->PostValid) & 0xFF)) + && (next_look_aheads != NULL)) { + + AR_DEBUG_PRINTF(ATH_DEBUG_RECV, + (" look_ahead Report (pre valid:0x%X, post valid:0x%X) %d %d\n", + look_ahead->PreValid, + look_ahead->PostValid, + from_endpoint, + look_ahead->LookAhead0)); + /* look ahead bytes are valid, copy them over */ + ((uint8_t *) (&next_look_aheads[0]))[0] = + look_ahead->LookAhead0; + ((uint8_t *) (&next_look_aheads[0]))[1] = + look_ahead->LookAhead1; + ((uint8_t *) (&next_look_aheads[0]))[2] = + look_ahead->LookAhead2; + ((uint8_t *) (&next_look_aheads[0]))[3] = + look_ahead->LookAhead3; + + if (AR_DEBUG_LVL_CHECK(ATH_DEBUG_RECV)) { + debug_dump_bytes((uint8_t *) + next_look_aheads, 4, + "Next Look Ahead"); + } + /* just one normal lookahead */ + if (num_look_aheads != NULL) + *num_look_aheads = 1; + } + break; + case HTC_RECORD_LOOKAHEAD_BUNDLE: + AR_DEBUG_ASSERT(record->Length >= + sizeof(HTC_BUNDLED_LOOKAHEAD_REPORT)); + if (record->Length >= + sizeof(HTC_BUNDLED_LOOKAHEAD_REPORT) + && (next_look_aheads != NULL)) { + HTC_BUNDLED_LOOKAHEAD_REPORT + *pBundledLookAheadRpt; + int i; + + pBundledLookAheadRpt = + (HTC_BUNDLED_LOOKAHEAD_REPORT *) record_buf; + + if (AR_DEBUG_LVL_CHECK(ATH_DEBUG_RECV)) { + debug_dump_bytes(record_buf, + record->Length, + "Bundle look_ahead"); + } + + if ((record->Length / + (sizeof(HTC_BUNDLED_LOOKAHEAD_REPORT))) + > HTC_MAX_MSG_PER_BUNDLE_RX) { + /* this should never happen, the target + * restricts the number of messages per + * bundle configured by the host + */ + A_ASSERT(false); + status = QDF_STATUS_E_PROTO; + break; + } + for (i = 0; + i < + (int)(record->Length / + (sizeof + (HTC_BUNDLED_LOOKAHEAD_REPORT))); + i++) { + ((uint8_t *)(&next_look_aheads[i]))[0] = + pBundledLookAheadRpt->LookAhead0; + ((uint8_t *)(&next_look_aheads[i]))[1] = + pBundledLookAheadRpt->LookAhead1; + ((uint8_t *)(&next_look_aheads[i]))[2] = + pBundledLookAheadRpt->LookAhead2; + ((uint8_t *)(&next_look_aheads[i]))[3] = + pBundledLookAheadRpt->LookAhead3; + pBundledLookAheadRpt++; + } + if (num_look_aheads) { + *num_look_aheads = i; + } + } + break; + default: + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + (" HIF unhandled record: id:%d length:%d\n", + record->RecordID, record->Length)); + break; + } + + if (QDF_IS_STATUS_ERROR(status)) + break; + + /* advance buffer past this record for next time around */ + buffer += record->Length; + length -= record->Length; + } + + if (QDF_IS_STATUS_ERROR(status)) + debug_dump_bytes(orig_buffer, orig_length, + "BAD Recv Trailer"); + + AR_DEBUG_PRINTF(ATH_DEBUG_RECV, ("-htc_process_trailer\n")); + return status; + +} + +/* process a received message (i.e. strip off header, + * process any trailer data). + * note : locks must be released when this function is called + */ +static QDF_STATUS hif_dev_process_recv_header(struct hif_sdio_device *pdev, + HTC_PACKET *packet, + uint32_t *next_look_aheads, + int *num_look_aheads) +{ + uint8_t temp; + uint8_t *buf; + QDF_STATUS status = QDF_STATUS_SUCCESS; + uint16_t payloadLen; + uint32_t look_ahead, actual_length; + + buf = packet->pBuffer; + actual_length = packet->ActualLength; + + if (num_look_aheads != NULL) + *num_look_aheads = 0; + + AR_DEBUG_PRINTF(ATH_DEBUG_RECV, ("+HTCProcessRecvHeader\n")); + + if (AR_DEBUG_LVL_CHECK(ATH_DEBUG_RECV)) + AR_DEBUG_PRINTBUF(buf, packet->ActualLength, "HTC Recv PKT"); + + do { + /* note, we cannot assume the alignment of pBuffer, + * so we use the safe macros to + * retrieve 16 bit fields + */ + payloadLen = HTC_GET_FIELD(buf, HTC_FRAME_HDR, + PAYLOADLEN); + + ((uint8_t *) &look_ahead)[0] = buf[0]; + ((uint8_t *) &look_ahead)[1] = buf[1]; + ((uint8_t *) &look_ahead)[2] = buf[2]; + ((uint8_t *) &look_ahead)[3] = buf[3]; + + if (packet->PktInfo.AsRx.HTCRxFlags & HTC_RX_PKT_REFRESH_HDR) { + /* refresh expected hdr, since this was unknown + * at the time we grabbed the packets + * as part of a bundle + */ + packet->PktInfo.AsRx.ExpectedHdr = look_ahead; + /* refresh actual length since we now have the + * real header + */ + packet->ActualLength = payloadLen + HTC_HDR_LENGTH; + + /* validate the actual header that was refreshed */ + if (packet->ActualLength > packet->BufferLength) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("Invalid HDR payload length (%d) in bundled RECV (hdr: 0x%X)\n", + payloadLen, look_ahead)); + /* limit this to max buffer just to print out + * some of the buffer + */ + packet->ActualLength = + min(packet->ActualLength, + packet->BufferLength); + status = QDF_STATUS_E_PROTO; + break; + } + + if (packet->Endpoint + != HTC_GET_FIELD(buf, HTC_FRAME_HDR, ENDPOINTID)) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("Refreshed HDR endpoint (%d) does not match expected endpoint (%d)\n", + HTC_GET_FIELD(buf, + HTC_FRAME_HDR, + ENDPOINTID), + packet->Endpoint)); + status = QDF_STATUS_E_PROTO; + break; + } + } + + if (look_ahead != packet->PktInfo.AsRx.ExpectedHdr) { + /* somehow the lookahead that gave us the full read + * length did not reflect the actual header + * in the pending message + */ + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("hif_dev_process_recv_header, lookahead mismatch! (pPkt:0x%lX flags:0x%X), 0x%08X != 0x%08X\n", + (unsigned long)packet, + packet->PktInfo.AsRx.HTCRxFlags, + look_ahead, + packet->PktInfo.AsRx.ExpectedHdr)); +#ifdef ATH_DEBUG_MODULE + debug_dump_bytes((uint8_t *) &packet->PktInfo.AsRx. + ExpectedHdr, 4, + "Expected Message look_ahead"); + debug_dump_bytes(buf, sizeof(HTC_FRAME_HDR), + "Current Frame Header"); +#ifdef HTC_CAPTURE_LAST_FRAME + debug_dump_bytes((uint8_t *) &target->LastFrameHdr, + sizeof(HTC_FRAME_HDR), + "Last Frame Header"); + if (target->LastTrailerLength != 0) + debug_dump_bytes(target->LastTrailer, + target->LastTrailerLength, + "Last trailer"); +#endif +#endif + status = QDF_STATUS_E_PROTO; + break; + } + + /* get flags */ + temp = HTC_GET_FIELD(buf, HTC_FRAME_HDR, FLAGS); + + if (temp & HTC_FLAGS_RECV_TRAILER) { + /* this packet has a trailer */ + + /* extract the trailer length in control byte 0 */ + temp = + HTC_GET_FIELD(buf, HTC_FRAME_HDR, + CONTROLBYTES0); + + if ((temp < sizeof(HTC_RECORD_HDR)) + || (temp > payloadLen)) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("hif_dev_process_recv_header, invalid header(payloadlength should be :%d, CB[0] is:%d)\n", + payloadLen, temp)); + status = QDF_STATUS_E_PROTO; + break; + } + + if (packet->PktInfo.AsRx. + HTCRxFlags & HTC_RX_PKT_IGNORE_LOOKAHEAD) { + /* this packet was fetched as part of an HTC + * bundle as the lookahead is not valid. + * Next packet may have already been fetched as + * part of the bundle + */ + next_look_aheads = NULL; + num_look_aheads = NULL; + } + + /* process trailer data that follows HDR and + * application payload + */ + status = hif_dev_process_trailer(pdev, + (buf + HTC_HDR_LENGTH + + payloadLen - temp), temp, + next_look_aheads, + num_look_aheads, + packet->Endpoint); + + if (QDF_IS_STATUS_ERROR(status)) + break; + } + } while (false); + + if (QDF_IS_STATUS_ERROR(status)) { + /* dump the whole packet */ + debug_dump_bytes(buf, packet->ActualLength, + "BAD HTC Recv PKT"); + } else { + if (AR_DEBUG_LVL_CHECK(ATH_DEBUG_RECV)) { + if (packet->ActualLength > 0) { + AR_DEBUG_PRINTBUF(packet->pBuffer, + packet->ActualLength, + "HTC - Application Msg"); + } + } + } + AR_DEBUG_PRINTF(ATH_DEBUG_RECV, + ("-hif_dev_process_recv_header\n")); + return status; +} + +static QDF_STATUS hif_dev_issue_recv_packet_bundle(struct hif_sdio_device *pdev, + HTC_PACKET_QUEUE *recv_pkt_queue, + HTC_PACKET_QUEUE * + sync_completion_queue, + uint8_t mail_box_index, + int *num_packets_fetched, + bool partial_bundle) +{ + QDF_STATUS status = QDF_STATUS_SUCCESS; + int i, total_length = 0; + unsigned char *bundle_buffer = NULL; + HTC_PACKET *packet, *packet_rx_bundle; + HTC_TARGET *target = NULL; + uint32_t padded_length; + int bundleSpaceRemaining = 0; + + target = (HTC_TARGET *) pdev->pTarget; + + if ((HTC_PACKET_QUEUE_DEPTH(recv_pkt_queue) - + HTC_MAX_MSG_PER_BUNDLE_RX) > 0) { + partial_bundle = true; + AR_DEBUG_PRINTF(ATH_DEBUG_WARN, + ("%s, partial bundle detected num: %d, %d\n", + __func__, + HTC_PACKET_QUEUE_DEPTH(recv_pkt_queue), + HTC_MAX_MSG_PER_BUNDLE_RX)); + } + + bundleSpaceRemaining = + HTC_MAX_MSG_PER_BUNDLE_RX * target->TargetCreditSize; + packet_rx_bundle = allocate_htc_bundle_packet(target); + if (!packet_rx_bundle) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("%s: packet_rx_bundle is NULL\n", __func__)); + qdf_sleep(NBUF_ALLOC_FAIL_WAIT_TIME); /* 100 msec sleep */ + return QDF_STATUS_E_NOMEM; + } + bundle_buffer = packet_rx_bundle->pBuffer; + + for (i = 0; + !HTC_QUEUE_EMPTY(recv_pkt_queue) && i < HTC_MAX_MSG_PER_BUNDLE_RX; + i++) { + packet = htc_packet_dequeue(recv_pkt_queue); + A_ASSERT(packet != NULL); + if (!packet) { + break; + } + padded_length = + DEV_CALC_RECV_PADDED_LEN(pdev, packet->ActualLength); + if (packet->PktInfo.AsRx.HTCRxFlags & + HTC_RX_PKT_LAST_BUNDLED_PKT_HAS_ADDTIONAL_BLOCK) + padded_length += HIF_MBOX_BLOCK_SIZE; + if ((bundleSpaceRemaining - padded_length) < 0) { + /* exceeds what we can transfer, put the packet back */ + HTC_PACKET_ENQUEUE_TO_HEAD(recv_pkt_queue, packet); + break; + } + bundleSpaceRemaining -= padded_length; + + if (partial_bundle || + HTC_PACKET_QUEUE_DEPTH(recv_pkt_queue) > 0) { + packet->PktInfo.AsRx.HTCRxFlags |= + HTC_RX_PKT_IGNORE_LOOKAHEAD; + } + packet->PktInfo.AsRx.HTCRxFlags |= HTC_RX_PKT_PART_OF_BUNDLE; + + if (sync_completion_queue) { + HTC_PACKET_ENQUEUE(sync_completion_queue, packet); + } + total_length += padded_length; + } +#if DEBUG_BUNDLE + qdf_print("Recv bundle count %d, length %d.\n", + sync_completion_queue ? + HTC_PACKET_QUEUE_DEPTH(sync_completion_queue) : 0, + total_length); +#endif + + status = hif_read_write(pdev->HIFDevice, + pdev->MailBoxInfo. + mbox_addresses[(int)mail_box_index], + bundle_buffer, total_length, + HIF_RD_SYNC_BLOCK_FIX, NULL); + + if (status != QDF_STATUS_SUCCESS) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("%s, hif_send Failed status:%d\n", + __func__, status)); + } else { + unsigned char *buffer = bundle_buffer; + *num_packets_fetched = i; + if (sync_completion_queue) { + HTC_PACKET_QUEUE_ITERATE_ALLOW_REMOVE( + sync_completion_queue, packet) { + padded_length = + DEV_CALC_RECV_PADDED_LEN(pdev, + packet->ActualLength); + if (packet->PktInfo.AsRx.HTCRxFlags & + HTC_RX_PKT_LAST_BUNDLED_PKT_HAS_ADDTIONAL_BLOCK) + padded_length += + HIF_MBOX_BLOCK_SIZE; + A_MEMCPY(packet->pBuffer, + buffer, padded_length); + buffer += padded_length; + } HTC_PACKET_QUEUE_ITERATE_END; + } + } + /* free bundle space under Sync mode */ + free_htc_bundle_packet(target, packet_rx_bundle); + return status; +} + +static inline void hif_dev_free_recv_pkt_queue(HTC_PACKET_QUEUE *recv_pkt_queue) +{ + HTC_PACKET *packet; + qdf_nbuf_t netbuf; + + while (!HTC_QUEUE_EMPTY(recv_pkt_queue)) { + + packet = htc_packet_dequeue(recv_pkt_queue); + if (packet == NULL) + break; + netbuf = (qdf_nbuf_t) packet-> + pNetBufContext; + if (netbuf) + qdf_nbuf_free(netbuf); + } +} + +static +QDF_STATUS hif_dev_recv_message_pending_handler(struct hif_sdio_device *pdev, + uint8_t mail_box_index, + uint32_t msg_look_aheads[], + int num_look_aheads, + bool *async_proc, + int *num_pkts_fetched) +{ + QDF_STATUS status = QDF_STATUS_SUCCESS; + HTC_PACKET *packet; + bool asyncProc = false; + uint32_t look_aheads[HTC_MAX_MSG_PER_BUNDLE_RX]; + int pkts_fetched; + HTC_PACKET_QUEUE recv_pkt_queue, sync_completed_pkts_queue; + bool partial_bundle; + HTC_ENDPOINT_ID id; + int total_fetched = 0; + + AR_DEBUG_PRINTF(ATH_DEBUG_RECV, + ("+HTCRecvMessagePendingHandler NumLookAheads: %d\n", + num_look_aheads)); + + if (num_pkts_fetched != NULL) + *num_pkts_fetched = 0; + + if (IS_DEV_IRQ_PROCESSING_ASYNC_ALLOWED(pdev)) { + /* We use async mode to get the packets if the + * device layer supports it. The device layer + * interfaces with HIF in which HIF may have + * restrictions on how interrupts are processed + */ + asyncProc = true; + } + + if (async_proc != NULL) + /* indicate to caller how we decided to process this */ + *async_proc = asyncProc; + if (num_look_aheads > HTC_MAX_MSG_PER_BUNDLE_RX) { + A_ASSERT(false); + return QDF_STATUS_E_PROTO; + } + A_MEMCPY(look_aheads, msg_look_aheads, + (sizeof(uint32_t)) * num_look_aheads); + while (true) { + + /* reset packets queues */ + INIT_HTC_PACKET_QUEUE(&recv_pkt_queue); + INIT_HTC_PACKET_QUEUE(&sync_completed_pkts_queue); + if (num_look_aheads > HTC_MAX_MSG_PER_BUNDLE_RX) { + status = QDF_STATUS_E_PROTO; + A_ASSERT(false); + break; + } + + /* first lookahead sets the expected endpoint IDs for + * all packets in a bundle + */ + id = ((HTC_FRAME_HDR *) &look_aheads[0])->EndpointID; + + if (id >= ENDPOINT_MAX) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("MsgPend, Invalid Endpoint in lookahead: %d\n", + id)); + status = QDF_STATUS_E_PROTO; + break; + } + /* try to allocate as many HTC RX packets indicated + * by the lookaheads these packets are stored + * in the recvPkt queue + */ + status = hif_dev_alloc_and_prepare_rx_packets(pdev, + look_aheads, + num_look_aheads, + &recv_pkt_queue); + if (QDF_IS_STATUS_ERROR(status)) + break; + total_fetched += HTC_PACKET_QUEUE_DEPTH(&recv_pkt_queue); + + /* we've got packet buffers for all we can currently fetch, + * this count is not valid anymore + */ + num_look_aheads = 0; + partial_bundle = false; + + /* now go fetch the list of HTC packets */ + while (!HTC_QUEUE_EMPTY(&recv_pkt_queue)) { + + pkts_fetched = 0; + if ((HTC_PACKET_QUEUE_DEPTH(&recv_pkt_queue) > 1)) { + /* there are enough packets to attempt a bundle + * transfer and recv bundling is allowed + */ + status = hif_dev_issue_recv_packet_bundle(pdev, + &recv_pkt_queue, + asyncProc ? + NULL : + &sync_completed_pkts_queue, + mail_box_index, + &pkts_fetched, + partial_bundle); + if (QDF_IS_STATUS_ERROR(status)) { + hif_dev_free_recv_pkt_queue( + &recv_pkt_queue); + break; + } + + if (HTC_PACKET_QUEUE_DEPTH(&recv_pkt_queue) != + 0) { + /* we couldn't fetch all packets at one, + * time this creates a broken + * bundle + */ + partial_bundle = true; + } + } + + /* see if the previous operation fetched any + * packets using bundling + */ + if (0 == pkts_fetched) { + /* dequeue one packet */ + packet = htc_packet_dequeue(&recv_pkt_queue); + A_ASSERT(packet != NULL); + if (!packet) { + break; + } + + packet->Completion = NULL; + + if (HTC_PACKET_QUEUE_DEPTH(&recv_pkt_queue) > + 0) { + /* lookaheads in all packets except the + * last one in must be ignored + */ + packet->PktInfo.AsRx.HTCRxFlags |= + HTC_RX_PKT_IGNORE_LOOKAHEAD; + } + + /* go fetch the packet */ + status = + hif_dev_recv_packet(pdev, packet, + packet->ActualLength, + mail_box_index); + if (QDF_IS_STATUS_ERROR(status)) { + while (!HTC_QUEUE_EMPTY(&recv_pkt_queue)) { + qdf_nbuf_t netbuf; + packet = + htc_packet_dequeue(&recv_pkt_queue); + if (packet == NULL) + break; + netbuf = + (qdf_nbuf_t) packet->pNetBufContext; + if (netbuf) + qdf_nbuf_free(netbuf); + } + break; + } + /* sent synchronously, queue this packet for + * synchronous completion + */ + HTC_PACKET_ENQUEUE(&sync_completed_pkts_queue, + packet); + } + } + + /* synchronous handling */ + if (pdev->DSRCanYield) { + /* for the SYNC case, increment count that tracks + * when the DSR should yield + */ + pdev->CurrentDSRRecvCount++; + } + + /* in the sync case, all packet buffers are now filled, + * we can process each packet, check lookahead , then repeat + */ + + /* unload sync completion queue */ + while (!HTC_QUEUE_EMPTY(&sync_completed_pkts_queue)) { + uint8_t pipeid; + qdf_nbuf_t netbuf; + + packet = htc_packet_dequeue(&sync_completed_pkts_queue); + A_ASSERT(packet != NULL); + if (!packet) { + break; + } + + num_look_aheads = 0; + status = + hif_dev_process_recv_header(pdev, packet, + look_aheads, + &num_look_aheads); + if (QDF_IS_STATUS_ERROR(status)) { + HTC_PACKET_ENQUEUE_TO_HEAD(&sync_completed_pkts_queue, + packet); + break; + } + + netbuf = (qdf_nbuf_t) packet->pNetBufContext; + /* set data length */ + qdf_nbuf_put_tail(netbuf, packet->ActualLength); + + if (pdev->hif_callbacks.rxCompletionHandler) { + pipeid = + hif_dev_map_mail_box_to_pipe(pdev, + mail_box_index, + true); + pdev->hif_callbacks.rxCompletionHandler(pdev-> + hif_callbacks. + Context, + netbuf, + pipeid); + } + } + + if (QDF_IS_STATUS_ERROR(status)) { + if (!HTC_QUEUE_EMPTY(&sync_completed_pkts_queue)) + hif_dev_free_recv_pkt_queue( + &sync_completed_pkts_queue); + break; + } + + if (num_look_aheads == 0) { + /* no more look aheads */ + break; + } + /* check whether other OS contexts have queued any WMI + * command/data for WLAN. This check is needed only if WLAN + * Tx and Rx happens in same thread context + */ + /* A_CHECK_DRV_TX(); */ + } + if (num_pkts_fetched != NULL) + *num_pkts_fetched = total_fetched; + + AR_DEBUG_PRINTF(ATH_DEBUG_RECV, ("-HTCRecvMessagePendingHandler\n")); + return status; +} + +/** + * hif_dev_service_cpu_interrupt() - service fatal interrupts + * synchronously + * + * @pDev: hif sdio device context + * + * Return: QDF_STATUS_SUCCESS for success + */ +static QDF_STATUS hif_dev_service_cpu_interrupt(struct hif_sdio_device *pdev) +{ + QDF_STATUS status; + uint8_t cpu_int_status; + uint8_t reg_buffer[4]; + + AR_DEBUG_PRINTF(ATH_DEBUG_IRQ, ("CPU Interrupt\n")); + cpu_int_status = pdev->IrqProcRegisters.cpu_int_status + & pdev->IrqEnableRegisters.cpu_int_status_enable; + A_ASSERT(cpu_int_status); + AR_DEBUG_PRINTF(ATH_DEBUG_IRQ, + ("Valid interrupt source(s) in CPU_INT_STATUS: 0x%x\n", + cpu_int_status)); + + /* Clear the interrupt */ + pdev->IrqProcRegisters.cpu_int_status &= ~cpu_int_status; + + /*set up the register transfer buffer to hit the register + * 4 times , this is done to make the access 4-byte aligned + * to mitigate issues with host bus interconnects that + * restrict bus transfer lengths to be a multiple of 4-bytes + * set W1C value to clear the interrupt, this hits the register + * first + */ + reg_buffer[0] = cpu_int_status; + /* the remaining 4 values are set to zero which have no-effect */ + reg_buffer[1] = 0; + reg_buffer[2] = 0; + reg_buffer[3] = 0; + + status = hif_read_write(pdev->HIFDevice, + CPU_INT_STATUS_ADDRESS, + reg_buffer, 4, HIF_WR_SYNC_BYTE_FIX, NULL); + + A_ASSERT(status == QDF_STATUS_SUCCESS); + + /* The Interrupt sent to the Host is generated via bit0 + * of CPU INT register + */ + if (cpu_int_status & 0x1) { + if (pdev->hif_callbacks.fwEventHandler) + /* It calls into HTC which propagates this + * to ol_target_failure() + */ + pdev->hif_callbacks.fwEventHandler(pdev->hif_callbacks. + Context, QDF_STATUS_E_FAILURE); + } else + AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, + ("%s: Unable to call fwEventHandler, invalid input arguments\n", + __func__)); + + return status; +} + +/** + * hif_dev_service_error_interrupt() - service error interrupts + * synchronously + * + * @pDev: hif sdio device context + * + * Return: QDF_STATUS_SUCCESS for success + */ +static QDF_STATUS hif_dev_service_error_interrupt(struct hif_sdio_device *pdev) +{ + QDF_STATUS status; + uint8_t error_int_status; + uint8_t reg_buffer[4]; + + AR_DEBUG_PRINTF(ATH_DEBUG_IRQ, ("Error Interrupt\n")); + error_int_status = pdev->IrqProcRegisters.error_int_status & 0x0F; + A_ASSERT(error_int_status); + AR_DEBUG_PRINTF(ATH_DEBUG_IRQ, + ("Valid interrupt source in ERROR_INT_STATUS: 0x%x\n", + error_int_status)); + + if (ERROR_INT_STATUS_WAKEUP_GET(error_int_status)) { + /* Wakeup */ + AR_DEBUG_PRINTF(ATH_DEBUG_IRQ, ("Error : Wakeup\n")); + } + + if (ERROR_INT_STATUS_RX_UNDERFLOW_GET(error_int_status)) { + /* Rx Underflow */ + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Error : Rx Underflow\n")); + } + + if (ERROR_INT_STATUS_TX_OVERFLOW_GET(error_int_status)) { + /* Tx Overflow */ + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Error : Tx Overflow\n")); + } + + /* Clear the interrupt */ + pdev->IrqProcRegisters.error_int_status &= ~error_int_status; + + /* set up the register transfer buffer to hit the register + * 4 times , this is done to make the access 4-byte + * aligned to mitigate issues with host bus interconnects that + * restrict bus transfer lengths to be a multiple of 4-bytes + */ + + /* set W1C value to clear the interrupt */ + reg_buffer[0] = error_int_status; + /* the remaining 4 values are set to zero which have no-effect */ + reg_buffer[1] = 0; + reg_buffer[2] = 0; + reg_buffer[3] = 0; + + status = hif_read_write(pdev->HIFDevice, + ERROR_INT_STATUS_ADDRESS, + reg_buffer, 4, HIF_WR_SYNC_BYTE_FIX, NULL); + + A_ASSERT(status == QDF_STATUS_SUCCESS); + return status; +} + +/** + * hif_dev_service_debug_interrupt() - service debug interrupts + * synchronously + * + * @pDev: hif sdio device context + * + * Return: QDF_STATUS_SUCCESS for success + */ +static QDF_STATUS hif_dev_service_debug_interrupt(struct hif_sdio_device *pdev) +{ + uint32_t dummy; + QDF_STATUS status; + + /* Send a target failure event to the application */ + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Target debug interrupt\n")); + + /* clear the interrupt , the debug error interrupt is counter 0 + * read counter to clear interrupt + */ + status = hif_read_write(pdev->HIFDevice, + COUNT_DEC_ADDRESS, + (uint8_t *) &dummy, + 4, HIF_RD_SYNC_BYTE_INC, NULL); + + A_ASSERT(status == QDF_STATUS_SUCCESS); + return status; +} + +/** + * hif_dev_service_counter_interrupt() - service counter interrupts + * synchronously + * + * @pDev: hif sdio device context + * + * Return: QDF_STATUS_SUCCESS for success + */ +static +QDF_STATUS hif_dev_service_counter_interrupt(struct hif_sdio_device *pdev) +{ + uint8_t counter_int_status; + + AR_DEBUG_PRINTF(ATH_DEBUG_IRQ, ("Counter Interrupt\n")); + + counter_int_status = pdev->IrqProcRegisters.counter_int_status & + pdev->IrqEnableRegisters.counter_int_status_enable; + + AR_DEBUG_PRINTF(ATH_DEBUG_IRQ, + ("Valid interrupt source in COUNTER_INT_STATUS: 0x%x\n", + counter_int_status)); + + /* Check if the debug interrupt is pending + * NOTE: other modules like GMBOX may use the counter interrupt + * for credit flow control on other counters, we only need to + * check for the debug assertion counter interrupt + */ + if (counter_int_status & AR6K_TARGET_DEBUG_INTR_MASK) + return hif_dev_service_debug_interrupt(pdev); + + return QDF_STATUS_SUCCESS; +} + +/** + * hif_dev_process_pending_irqs() - process pending interrupts + * synchronously + * + * @pDev: hif sdio device context + * @pDone: pending irq completion status + * @pASyncProcessing: sync/async processing flag + * Return: QDF_STATUS_SUCCESS for success + */ +static QDF_STATUS hif_dev_process_pending_irqs(struct hif_sdio_device *pdev, + bool *done, + bool *async_processing) +{ + QDF_STATUS status = QDF_STATUS_SUCCESS; + uint8_t host_int_status = 0; + uint32_t look_ahead[MAILBOX_USED_COUNT]; + int i; + + qdf_mem_zero(&look_ahead, sizeof(look_ahead)); + AR_DEBUG_PRINTF(ATH_DEBUG_IRQ, + ("+ProcessPendingIRQs: (dev: 0x%lX)\n", + (unsigned long)pdev)); + + /* NOTE: the HIF implementation guarantees that the context + * of this call allows us to perform SYNCHRONOUS I/O, + * that is we can block, sleep or call any API that + * can block or switch thread/task ontexts. + * This is a fully schedulable context. + */ + do { + + if (pdev->IrqEnableRegisters.int_status_enable == 0) { + /* interrupt enables have been cleared, do not try + * to process any pending interrupts that + * may result in more bus transactions. + * The target may be unresponsive at this point. + */ + break; + } + status = hif_read_write(pdev->HIFDevice, + HOST_INT_STATUS_ADDRESS, + (uint8_t *) &pdev->IrqProcRegisters, + sizeof(pdev->IrqProcRegisters), + HIF_RD_SYNC_BYTE_INC, NULL); + + if (QDF_IS_STATUS_ERROR(status)) + break; + + if (AR_DEBUG_LVL_CHECK(ATH_DEBUG_IRQ)) { + hif_dev_dump_registers(pdev, + &pdev->IrqProcRegisters, + &pdev->IrqEnableRegisters, + &pdev->MailBoxCounterRegisters); + } + + /* Update only those registers that are enabled */ + host_int_status = pdev->IrqProcRegisters.host_int_status + & pdev->IrqEnableRegisters.int_status_enable; + + /* only look at mailbox status if the HIF layer did not + * provide this function, on some HIF interfaces reading + * the RX lookahead is not valid to do + */ + for (i = 0; i < MAILBOX_USED_COUNT; i++) { + look_ahead[i] = 0; + if (host_int_status & (1 << i)) { + /* mask out pending mailbox value, we use + * "lookAhead" as the real flag for + * mailbox processing below + */ + host_int_status &= ~(1 << i); + if (pdev->IrqProcRegisters. + rx_lookahead_valid & (1 << i)) { + /* mailbox has a message and the + * look ahead is valid + */ + look_ahead[i] = + pdev-> + IrqProcRegisters.rx_lookahead[ + MAILBOX_LOOKAHEAD_SIZE_IN_WORD * + i]; + } + } + } /*end of for loop */ + } while (false); + + do { + bool bLookAheadValid = false; + /* did the interrupt status fetches succeed? */ + if (QDF_IS_STATUS_ERROR(status)) + break; + + for (i = 0; i < MAILBOX_USED_COUNT; i++) { + if (look_ahead[i] != 0) { + bLookAheadValid = true; + break; + } + } + + if ((0 == host_int_status) && !bLookAheadValid) { + /* nothing to process, the caller can use this + * to break out of a loop + */ + *done = true; + break; + } + + if (bLookAheadValid) { + for (i = 0; i < MAILBOX_USED_COUNT; i++) { + int fetched = 0; + + if (look_ahead[i] == 0) + continue; + AR_DEBUG_PRINTF(ATH_DEBUG_IRQ, + ("Pending mailbox[%d] message, look_ahead: 0x%X\n", + i, look_ahead[i])); + /* Mailbox Interrupt, the HTC layer may issue + * async requests to empty the mailbox... + * When emptying the recv mailbox we use the + * async handler from the completion routine of + * routine of the callers read request. + * This can improve performance by reducing + * the context switching when we rapidly + * pull packets + */ + status = hif_dev_recv_message_pending_handler( + pdev, i, + &look_ahead + [i], 1, + async_processing, + &fetched); + if (QDF_IS_STATUS_ERROR(status)) + break; + + if (!fetched) { + /* HTC could not pull any messages out + * due to lack of resources force DSR + * handle to ack the interrupt + */ + *async_processing = false; + pdev->RecheckIRQStatusCnt = 0; + } + } + } + + /* now handle the rest of them */ + AR_DEBUG_PRINTF(ATH_DEBUG_IRQ, + (" Valid interrupt source for OTHER interrupts: 0x%x\n", + host_int_status)); + + if (HOST_INT_STATUS_CPU_GET(host_int_status)) { + /* CPU Interrupt */ + status = hif_dev_service_cpu_interrupt(pdev); + if (QDF_IS_STATUS_ERROR(status)) + break; + } + + if (HOST_INT_STATUS_ERROR_GET(host_int_status)) { + /* Error Interrupt */ + status = hif_dev_service_error_interrupt(pdev); + if (QDF_IS_STATUS_ERROR(status)) + break; + } + + if (HOST_INT_STATUS_COUNTER_GET(host_int_status)) { + /* Counter Interrupt */ + status = hif_dev_service_counter_interrupt(pdev); + if (QDF_IS_STATUS_ERROR(status)) + break; + } + + } while (false); + + /* an optimization to bypass reading the IRQ status registers + * unecessarily which can re-wake the target, if upper layers + * determine that we are in a low-throughput mode, we can + * rely on taking another interrupt rather than re-checking + * the status registers which can re-wake the target. + * + * NOTE : for host interfaces that use the special + * GetPendingEventsFunc, this optimization cannot be used due to + * possible side-effects. For example, SPI requires the host + * to drain all messages from the mailbox before exiting + * the ISR routine. + */ + if (!(*async_processing) && (pdev->RecheckIRQStatusCnt == 0)) { + AR_DEBUG_PRINTF(ATH_DEBUG_IRQ, + ("Bypassing IRQ Status re-check, forcing done\n")); + *done = true; + } + + AR_DEBUG_PRINTF(ATH_DEBUG_IRQ, + ("-ProcessPendingIRQs: (done:%d, async:%d) status=%d\n", + *done, *async_processing, status)); + + return status; +} + +#define DEV_CHECK_RECV_YIELD(pdev) \ + ((pdev)->CurrentDSRRecvCount >= \ + (pdev)->HifIRQYieldParams.recv_packet_yield_count) + +/** + * hif_dev_dsr_handler() - Synchronous interrupt handler + * + * @context: hif send context + * + * Return: 0 for success and non-zero for failure + */ +QDF_STATUS hif_dev_dsr_handler(void *context) +{ + struct hif_sdio_device *pdev = (struct hif_sdio_device *) context; + QDF_STATUS status = QDF_STATUS_SUCCESS; + bool done = false; + bool async_proc = false; + + AR_DEBUG_PRINTF(ATH_DEBUG_IRQ, + ("+DevDsrHandler: (dev: 0x%lX)\n", + (unsigned long)pdev)); + + /* reset the recv counter that tracks when we need + * to yield from the DSR + */ + pdev->CurrentDSRRecvCount = 0; + /* reset counter used to flag a re-scan of IRQ + * status registers on the target + */ + pdev->RecheckIRQStatusCnt = 0; + + while (!done) { + status = hif_dev_process_pending_irqs(pdev, &done, &async_proc); + if (QDF_IS_STATUS_ERROR(status)) + break; + + if (HIF_DEVICE_IRQ_SYNC_ONLY == pdev->HifIRQProcessingMode) { + /* the HIF layer does not allow async IRQ processing, + * override the asyncProc flag + */ + async_proc = false; + /* this will cause us to re-enter ProcessPendingIRQ() + * and re-read interrupt status registers. + * This has a nice side effect of blocking us until all + * async read requests are completed. This behavior is + * required as we do not allow ASYNC processing + * in interrupt handlers (like Windows CE) + */ + + if (pdev->DSRCanYield && DEV_CHECK_RECV_YIELD(pdev)) + /* ProcessPendingIRQs() pulled enough recv + * messages to satisfy the yield count, stop + * checking for more messages and return + */ + break; + } + + if (async_proc) { + /* the function does some async I/O for performance, + * we need to exit the ISR immediately, the check below + * will prevent the interrupt from being + * Ack'd while we handle it asynchronously + */ + break; + } + + } + + if (QDF_IS_STATUS_SUCCESS(status) && !async_proc) { + /* Ack the interrupt only if : + * 1. we did not get any errors in processing interrupts + * 2. there are no outstanding async processing requests + */ + if (pdev->DSRCanYield) { + /* if the DSR can yield do not ACK the interrupt, there + * could be more pending messages. The HIF layer + * must ACK the interrupt on behalf of HTC + */ + AR_DEBUG_PRINTF(ATH_DEBUG_IRQ, + (" Yield in effect (cur RX count: %d)\n", + pdev->CurrentDSRRecvCount)); + } else { + AR_DEBUG_PRINTF(ATH_DEBUG_IRQ, + (" Acking interrupt from DevDsrHandler\n")); + hif_ack_interrupt(pdev->HIFDevice); + } + } + + AR_DEBUG_PRINTF(ATH_DEBUG_IRQ, ("-DevDsrHandler\n")); + return status; +} + diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/sdio/hif_sdio_send.c b/drivers/staging/qca-wifi-host-cmn/hif/src/sdio/hif_sdio_send.c new file mode 100644 index 0000000000000000000000000000000000000000..1ea149480fe5b10d280997983b3bc70d097f8179 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/sdio/hif_sdio_send.c @@ -0,0 +1,204 @@ +/* + * Copyright (c) 2013-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#define ATH_MODULE_NAME hif +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "hif_sdio_internal.h" + +/* + * Data structure to record required sending context data + */ +struct hif_sendContext { + bool bNewAlloc; + struct hif_sdio_device *pDev; + qdf_nbuf_t netbuf; + unsigned int transferID; + unsigned int head_data_len; +}; + +/** + * hif_dev_rw_completion_handler() - Completion routine + * for ALL HIF layer async I/O + * @context: hif send context + * @status: completion routine sync/async context + * + * Return: 0 for success and non-zero for failure + */ +QDF_STATUS hif_dev_rw_completion_handler(void *context, QDF_STATUS status) +{ + struct hif_sendContext *send_context = + (struct hif_sendContext *)context; + unsigned int transfer_id = send_context->transferID; + struct hif_sdio_device *pdev = send_context->pDev; + qdf_nbuf_t buf = send_context->netbuf; + /* Fix Me: Do we need toeplitz_hash_result for SDIO */ + uint32_t toeplitz_hash_result = 0; + + if (send_context->bNewAlloc) + qdf_mem_free((void *)send_context); + else + qdf_nbuf_pull_head(buf, send_context->head_data_len); + if (pdev->hif_callbacks.txCompletionHandler) + pdev->hif_callbacks.txCompletionHandler(pdev->hif_callbacks. + Context, buf, + transfer_id, toeplitz_hash_result); + + return QDF_STATUS_SUCCESS; +} + +/** + * hif_dev_send_buffer() - send buffer to sdio device + * @pDev: sdio function + * @transferID: transfer id + * @pipe: ul/dl pipe + * @nbytes: no of bytes to transfer + * @buf: pointer to buffer + * + * Return: 0 for success and non-zero for failure + */ +QDF_STATUS hif_dev_send_buffer(struct hif_sdio_device *pdev, + unsigned int transfer_id, + uint8_t pipe, unsigned int nbytes, + qdf_nbuf_t buf) +{ + QDF_STATUS status; + uint32_t padded_length; + int frag_count = 0, i, head_data_len; + struct hif_sendContext *send_context; + unsigned char *pData; + uint32_t request = HIF_WR_ASYNC_BLOCK_INC; + uint8_t mbox_index = hif_dev_map_pipe_to_mail_box(pdev, pipe); + + if (mbox_index == INVALID_MAILBOX_NUMBER) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("pipe id(%d) invalid\n", pipe)); + return QDF_STATUS_E_FAILURE; + } + + padded_length = DEV_CALC_SEND_PADDED_LEN(pdev, nbytes); + A_ASSERT(padded_length - nbytes < HIF_DUMMY_SPACE_MASK + 1); + /* + * two most significant bytes to save dummy data count + * data written into the dummy space will not put into + * the final mbox FIFO. + */ + request |= ((padded_length - nbytes) << 16); + + frag_count = qdf_nbuf_get_num_frags(buf); + + if (frag_count > 1) { + /* header data length should be total sending length subtract + * internal data length of netbuf + */ + head_data_len = sizeof(struct hif_sendContext) + + (nbytes - qdf_nbuf_get_frag_len(buf, frag_count - 1)); + } else { + /* + * | hif_sendContext | netbuf->data + */ + head_data_len = sizeof(struct hif_sendContext); + } + + /* Check whether head room is enough to save extra head data */ + if ((head_data_len <= qdf_nbuf_headroom(buf)) && + (qdf_nbuf_tailroom(buf) >= (padded_length - nbytes))) { + send_context = + (struct hif_sendContext *)qdf_nbuf_push_head(buf, + head_data_len); + send_context->bNewAlloc = false; + } else { + send_context = + (struct hif_sendContext *) + qdf_mem_malloc(sizeof(struct hif_sendContext) + + padded_length); + if (send_context) { + send_context->bNewAlloc = true; + } else { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("Allocate send context fail %d\n", + (int) sizeof(struct hif_sendContext) + + padded_length)); + return QDF_STATUS_E_NOMEM; + } + } + + send_context->netbuf = buf; + send_context->pDev = pdev; + send_context->transferID = transfer_id; + send_context->head_data_len = head_data_len; + /* + * Copy data to head part of netbuf or head of allocated buffer. + * if buffer is new allocated, the last buffer should be copied also. + * It assume last fragment is internal buffer of netbuf + * sometime total length of fragments larger than nbytes + */ + pData = (unsigned char *)send_context + sizeof(struct hif_sendContext); + for (i = 0; i < (send_context->bNewAlloc ? frag_count : frag_count - 1); + i++) { + int frag_len = qdf_nbuf_get_frag_len(buf, i); + unsigned char *frag_addr = qdf_nbuf_get_frag_vaddr(buf, i); + + if (frag_len > nbytes) + frag_len = nbytes; + memcpy(pData, frag_addr, frag_len); + pData += frag_len; + nbytes -= frag_len; + if (nbytes <= 0) + break; + } + + /* Reset pData pointer and send_context out */ + pData = (unsigned char *)send_context + sizeof(struct hif_sendContext); + status = hif_read_write(pdev->HIFDevice, + pdev->MailBoxInfo.mbox_prop[mbox_index]. + extended_address, (char *)pData, padded_length, + request, (void *)send_context); + + if (status == QDF_STATUS_E_PENDING) + /* + * it will return QDF_STATUS_E_PENDING in native HIF + * implementation, which should be treated as successful + * result here. + */ + status = QDF_STATUS_SUCCESS; + /* release buffer or move back data pointer when failed */ + if (status != QDF_STATUS_SUCCESS) { + if (send_context->bNewAlloc) + qdf_mem_free(send_context); + else + qdf_nbuf_pull_head(buf, head_data_len); + } + + return status; +} diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/sdio/if_sdio.c b/drivers/staging/qca-wifi-host-cmn/hif/src/sdio/if_sdio.c new file mode 100644 index 0000000000000000000000000000000000000000..5808d4175fa5ccc2808b1a073a4b14c9c7ac960b --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/sdio/if_sdio.c @@ -0,0 +1,590 @@ +/* + * Copyright (c) 2013-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef EXPORT_SYMTAB +#define EXPORT_SYMTAB +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "if_sdio.h" +#include +#include +#include "regtable_sdio.h" +#include +#include "target_type.h" +#include "epping_main.h" +#include "pld_sdio.h" +#include "targaddrs.h" +#include "sdio_api.h" +#ifndef REMOVE_PKT_LOG +#include "ol_txrx_types.h" +#include "pktlog_ac_api.h" +#include "pktlog_ac.h" +#endif + +#ifndef ATH_BUS_PM +#ifdef CONFIG_PM +#define ATH_BUS_PM +#endif /* CONFIG_PM */ +#endif /* ATH_BUS_PM */ + +#ifndef REMOVE_PKT_LOG +struct ol_pl_os_dep_funcs *g_ol_pl_os_dep_funcs; +#endif +#define HIF_SDIO_LOAD_TIMEOUT 1000 + +struct hif_sdio_softc *scn; +struct hif_softc *ol_sc; +static atomic_t hif_sdio_load_state; +/* Wait queue for MC thread */ +wait_queue_head_t sync_wait_queue; + +/** + * hif_sdio_probe() - configure sdio device + * @context: sdio device context + * @hif_handle: pointer to hif handle + * + * Return: 0 for success and non-zero for failure + */ +static A_STATUS hif_sdio_probe(void *context, void *hif_handle) +{ + int ret = 0; + struct HIF_DEVICE_OS_DEVICE_INFO os_dev_info; + struct sdio_func *func = NULL; + const struct sdio_device_id *id; + uint32_t target_type; + + HIF_ENTER(); + scn = (struct hif_sdio_softc *)qdf_mem_malloc(sizeof(*scn)); + if (!scn) { + ret = -ENOMEM; + goto err_alloc; + } + + scn->hif_handle = hif_handle; + hif_configure_device(hif_handle, HIF_DEVICE_GET_OS_DEVICE, + &os_dev_info, + sizeof(os_dev_info)); + + scn->aps_osdev.device = os_dev_info.os_dev; + scn->aps_osdev.bc.bc_bustype = QDF_BUS_TYPE_SDIO; + spin_lock_init(&scn->target_lock); + ol_sc = qdf_mem_malloc(sizeof(*ol_sc)); + if (!ol_sc) { + ret = -ENOMEM; + goto err_attach; + } + OS_MEMZERO(ol_sc, sizeof(*ol_sc)); + + { + /* + * Attach Target register table. This is needed early on + * even before BMI since PCI and HIF initialization + * directly access Target registers. + * + * TBDXXX: targetdef should not be global -- should be stored + * in per-device struct so that we can support multiple + * different Target types with a single Host driver. + * The whole notion of an "hif type" -- (not as in the hif + * module, but generic "Host Interface Type") is bizarre. + * At first, one one expect it to be things like SDIO, USB, PCI. + * But instead, it's an actual platform type. Inexplicably, the + * values used for HIF platform types are *different* from the + * values used for Target Types. + */ + +#if defined(CONFIG_AR9888_SUPPORT) + hif_register_tbl_attach(ol_sc, HIF_TYPE_AR9888); + target_register_tbl_attach(ol_sc, TARGET_TYPE_AR9888); + target_type = TARGET_TYPE_AR9888; +#elif defined(CONFIG_AR6320_SUPPORT) + id = ((struct hif_sdio_dev *) hif_handle)->id; + if (((id->device & MANUFACTURER_ID_AR6K_BASE_MASK) == + MANUFACTURER_ID_QCA9377_BASE) || + ((id->device & MANUFACTURER_ID_AR6K_BASE_MASK) == + MANUFACTURER_ID_QCA9379_BASE)) { + hif_register_tbl_attach(ol_sc, HIF_TYPE_AR6320V2); + target_register_tbl_attach(ol_sc, TARGET_TYPE_AR6320V2); + } else if ((id->device & MANUFACTURER_ID_AR6K_BASE_MASK) == + MANUFACTURER_ID_AR6320_BASE) { + int ar6kid = id->device & MANUFACTURER_ID_AR6K_REV_MASK; + + if (ar6kid >= 1) { + /* v2 or higher silicon */ + hif_register_tbl_attach(ol_sc, + HIF_TYPE_AR6320V2); + target_register_tbl_attach(ol_sc, + TARGET_TYPE_AR6320V2); + } else { + /* legacy v1 silicon */ + hif_register_tbl_attach(ol_sc, + HIF_TYPE_AR6320); + target_register_tbl_attach(ol_sc, + TARGET_TYPE_AR6320); + } + } + target_type = TARGET_TYPE_AR6320; + +#endif + } + func = ((struct hif_sdio_dev *) hif_handle)->func; + scn->targetdef = ol_sc->targetdef; + scn->hostdef = ol_sc->hostdef; + scn->aps_osdev.bdev = func; + ol_sc->bus_type = scn->aps_osdev.bc.bc_bustype; + scn->ol_sc = *ol_sc; + ol_sc->target_info.target_type = target_type; + + scn->ramdump_base = pld_hif_sdio_get_virt_ramdump_mem( + scn->aps_osdev.device, + &scn->ramdump_size); + if (scn->ramdump_base == NULL || !scn->ramdump_size) { + QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_ERROR, + "%s: Failed to get RAM dump memory address or size!\n", + __func__); + } else { + QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_INFO, + "%s: ramdump base 0x%pK size %d\n", __func__, + scn->ramdump_base, (int)scn->ramdump_size); + } + + if (athdiag_procfs_init(scn) != 0) { + QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_ERROR, + "%s athdiag_procfs_init failed", __func__); + ret = QDF_STATUS_E_FAILURE; + goto err_attach1; + } + + atomic_set(&hif_sdio_load_state, true); + wake_up_interruptible(&sync_wait_queue); + + return 0; + +err_attach1: + if (scn->ramdump_base) + pld_hif_sdio_release_ramdump_mem(scn->ramdump_base); + qdf_mem_free(ol_sc); +err_attach: + qdf_mem_free(scn); + scn = NULL; +err_alloc: + return ret; +} + +/** + * hif_sdio_remove() - remove sdio device + * @conext: sdio device context + * @hif_handle: pointer to sdio function + * + * Return: 0 for success and non-zero for failure + */ +static A_STATUS hif_sdio_remove(void *context, void *hif_handle) +{ + HIF_ENTER(); + + if (!scn) { + QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_ERROR, + "Global SDIO context is NULL"); + return A_ERROR; + } + + atomic_set(&hif_sdio_load_state, false); + athdiag_procfs_remove(); + +#ifndef TARGET_DUMP_FOR_NON_QC_PLATFORM + iounmap(scn->ramdump_base); +#endif + + if (ol_sc) { + qdf_mem_free(ol_sc); + ol_sc = NULL; + } + + if (scn) { + qdf_mem_free(scn); + scn = NULL; + } + + HIF_EXIT(); + + return 0; +} + +/** + * hif_sdio_suspend() - sdio suspend routine + * @context: sdio device context + * + * Return: 0 for success and non-zero for failure + */ +static A_STATUS hif_sdio_suspend(void *context) +{ + return 0; +} + +/** + * hif_sdio_resume() - sdio resume routine + * @context: sdio device context + * + * Return: 0 for success and non-zero for failure + */ +static A_STATUS hif_sdio_resume(void *context) +{ + return 0; +} + +/** + * hif_sdio_power_change() - change power state of sdio bus + * @conext: sdio device context + * @config: power state configurartion + * + * Return: 0 for success and non-zero for failure + */ +static A_STATUS hif_sdio_power_change(void *context, uint32_t config) +{ + return 0; +} + +/* + * Module glue. + */ +#include +static char *version = "HIF (Atheros/multi-bss)"; +static char *dev_info = "ath_hif_sdio"; + +/** + * init_ath_hif_sdio() - initialize hif sdio callbacks + * @param: none + * + * Return: 0 for success and non-zero for failure + */ +static int init_ath_hif_sdio(void) +{ + QDF_STATUS status; + struct osdrv_callbacks osdrv_callbacks; + + HIF_ENTER(); + qdf_mem_zero(&osdrv_callbacks, sizeof(osdrv_callbacks)); + osdrv_callbacks.device_inserted_handler = hif_sdio_probe; + osdrv_callbacks.device_removed_handler = hif_sdio_remove; + osdrv_callbacks.device_suspend_handler = hif_sdio_suspend; + osdrv_callbacks.device_resume_handler = hif_sdio_resume; + osdrv_callbacks.device_power_change_handler = hif_sdio_power_change; + + QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_INFO, "%s %d", __func__, + __LINE__); + status = hif_init(&osdrv_callbacks); + if (status != QDF_STATUS_SUCCESS) { + QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_FATAL, + "%s hif_init failed!", __func__); + return -ENODEV; + } + QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_ERROR, + "%s: %s\n", dev_info, version); + + return 0; +} + +/** + * hif_sdio_bus_suspend() - suspend the bus + * + * This function suspends the bus, but sdio doesn't need to suspend. + * Therefore do nothing. + * + * Return: 0 for success and non-zero for failure + */ +int hif_sdio_bus_suspend(struct hif_softc *hif_ctx) +{ + struct hif_sdio_softc *scn = HIF_GET_SDIO_SOFTC(hif_ctx); + struct hif_sdio_dev *hif_device = scn->hif_handle; + struct device *dev = &hif_device->func->dev; + + hif_device_suspend(dev); + return 0; +} + + +/** + * hif_sdio_bus_resume() - hif resume API + * + * This function resumes the bus. but sdio doesn't need to resume. + * Therefore do nothing. + * + * Return: 0 for success and non-zero for failure + */ +int hif_sdio_bus_resume(struct hif_softc *hif_ctx) +{ + struct hif_sdio_softc *scn = HIF_GET_SDIO_SOFTC(hif_ctx); + struct hif_sdio_dev *hif_device = scn->hif_handle; + struct device *dev = &hif_device->func->dev; + + hif_device_resume(dev); + return 0; +} + +/** + * hif_enable_power_gating() - enable HW power gating + * + * Return: n/a + */ +void hif_enable_power_gating(void *hif_ctx) +{ +} + +/** + * hif_sdio_close() - hif_bus_close + * + * Return: None + */ +void hif_sdio_close(struct hif_softc *hif_sc) +{ + if (ol_sc) { + qdf_mem_free(ol_sc); + ol_sc = NULL; + } + + if (scn) { + qdf_mem_free(scn); + scn = NULL; + } +} + +/** + * hif_sdio_open() - hif_bus_open + * @hif_sc: hif context + * @bus_type: bus type + * + * Return: QDF status + */ +QDF_STATUS hif_sdio_open(struct hif_softc *hif_sc, + enum qdf_bus_type bus_type) +{ + QDF_STATUS status; + + hif_sc->bus_type = bus_type; + status = init_ath_hif_sdio(); + + return status; +} + +void hif_get_target_revision(struct hif_softc *ol_sc) +{ + struct hif_softc *ol_sc_local = (struct hif_softc *)ol_sc; + struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(ol_sc_local); + uint32_t chip_id = 0; + QDF_STATUS rv; + + rv = hif_diag_read_access(hif_hdl, + (CHIP_ID_ADDRESS | RTC_SOC_BASE_ADDRESS), &chip_id); + if (rv != QDF_STATUS_SUCCESS) { + HIF_ERROR("%s[%d]: get chip id fail\n", __func__, __LINE__); + } else { + ol_sc_local->target_info.target_revision = + CHIP_ID_REVISION_GET(chip_id); + } +} + +/** + * hif_sdio_enable_bus() - hif_enable_bus + * @hif_sc: hif context + * @dev: dev + * @bdev: bus dev + * @bid: bus id + * @type: bus type + * + * Return: QDF_STATUS + */ +QDF_STATUS hif_sdio_enable_bus(struct hif_softc *hif_sc, + struct device *dev, void *bdev, const struct hif_bus_id *bid, + enum hif_enable_type type) +{ + int ret = 0; + const struct sdio_device_id *id = (const struct sdio_device_id *)bid; + struct hif_sdio_softc *sc = HIF_GET_SDIO_SOFTC(hif_sc); + + init_waitqueue_head(&sync_wait_queue); + if (hif_sdio_device_inserted(dev, id)) { + HIF_ERROR("wlan: %s hif_sdio_device_inserted failed", __func__); + return QDF_STATUS_E_NOMEM; + } + + wait_event_interruptible_timeout(sync_wait_queue, + atomic_read(&hif_sdio_load_state) == true, + HIF_SDIO_LOAD_TIMEOUT); + hif_sc->hostdef = ol_sc->hostdef; + hif_sc->targetdef = ol_sc->targetdef; + hif_sc->bus_type = ol_sc->bus_type; + hif_sc->target_info.target_type = ol_sc->target_info.target_type; + + sc->hif_handle = scn->hif_handle; + sc->aps_osdev.device = scn->aps_osdev.device; + sc->aps_osdev.bc.bc_bustype = scn->aps_osdev.bc.bc_bustype; + sc->target_lock = scn->target_lock; + sc->targetdef = scn->targetdef; + sc->hostdef = scn->hostdef; + sc->aps_osdev.bdev = scn->aps_osdev.bdev; + sc->ramdump_size = scn->ramdump_size; + sc->ramdump_base = scn->ramdump_base; + + return ret; +} + + +/** + * hif_sdio_disable_bus() - sdio disable bus + * @hif_sc: hif softc pointer + * + * Return: none + */ +void hif_sdio_disable_bus(struct hif_softc *hif_sc) +{ + struct hif_sdio_softc *sc = HIF_GET_SDIO_SOFTC(hif_sc); + struct sdio_func *func = sc->aps_osdev.bdev; + + hif_sdio_device_removed(func); +} + +/** + * hif_sdio_get_config_item - sdio configure bus + * @hif_sc: hif context + * @opcode: configuration type + * @config: configuration value to set + * @config_len: configuration length + * + * Return: QDF_STATUS_SUCCESS for success + */ +QDF_STATUS hif_sdio_get_config_item(struct hif_softc *hif_sc, + int opcode, void *config, uint32_t config_len) +{ + struct hif_sdio_softc *sc = HIF_GET_SDIO_SOFTC(hif_sc); + struct hif_sdio_dev *hif_device = sc->hif_handle; + + return hif_configure_device(hif_device, + opcode, config, config_len); +} + +/** + * hif_sdio_set_mailbox_swap - set mailbox swap + * @hif_sc: hif context + * + * Return: None + */ +void hif_sdio_set_mailbox_swap(struct hif_softc *hif_sc) +{ + struct hif_sdio_softc *scn = HIF_GET_SDIO_SOFTC(hif_sc); + struct hif_sdio_dev *hif_device = scn->hif_handle; + + hif_device->swap_mailbox = true; +} + +/** + * hif_sdio_claim_device - set mailbox swap + * @hif_sc: hif context + * + * Return: None + */ +void hif_sdio_claim_device(struct hif_softc *hif_sc) +{ + struct hif_sdio_softc *scn = HIF_GET_SDIO_SOFTC(hif_sc); + struct hif_sdio_dev *hif_device = scn->hif_handle; + + hif_device->claimed_ctx = hif_sc; +} + +/** + * hif_sdio_mask_interrupt_call() - disbale hif device irq + * @scn: pointr to softc structure + * + * Return: None + */ +void hif_sdio_mask_interrupt_call(struct hif_softc *scn) +{ + struct hif_sdio_softc *hif_ctx = HIF_GET_SDIO_SOFTC(scn); + struct hif_sdio_dev *hif_device = hif_ctx->hif_handle; + + hif_mask_interrupt(hif_device); +} + +/** + * hif_trigger_dump() - trigger various dump cmd + * @scn: struct hif_opaque_softc + * @cmd_id: dump command id + * @start: start/stop dump + * + * Return: None + */ +void hif_trigger_dump(struct hif_opaque_softc *scn, uint8_t cmd_id, bool start) +{ +} + +/** + * hif_check_fw_reg() - check fw selfrecovery indication + * @hif_ctx: hif_opaque_softc + * + * Return: int + */ +int hif_check_fw_reg(struct hif_opaque_softc *hif_ctx) +{ + int ret = 1; + uint32_t fw_indication = 0; + struct hif_sdio_softc *scn = HIF_GET_SDIO_SOFTC(hif_ctx); + + if (hif_diag_read_access(hif_ctx, FW_INDICATOR_ADDRESS, + &fw_indication) != QDF_STATUS_SUCCESS) { + HIF_ERROR("%s Get fw indication failed\n", __func__); + return 1; + } + HIF_INFO("%s: fw indication is 0x%x def 0x%x.\n", __func__, + fw_indication, FW_IND_HELPER); + if (fw_indication & FW_IND_HELPER) + ret = 0; + + return ret; +} + +/** + * hif_wlan_disable() - call the platform driver to disable wlan + * @scn: scn + * + * Return: void + */ +void hif_wlan_disable(struct hif_softc *scn) +{ +} + +/** + * hif_sdio_needs_bmi() - return true if the soc needs bmi through the driver + * @scn: hif context + * + * Return: true if soc needs driver bmi otherwise false + */ +bool hif_sdio_needs_bmi(struct hif_softc *scn) +{ + return true; +} diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/sdio/if_sdio.h b/drivers/staging/qca-wifi-host-cmn/hif/src/sdio/if_sdio.h new file mode 100644 index 0000000000000000000000000000000000000000..301d15906a04842c47fdb141d7da08637b88b9e7 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/sdio/if_sdio.h @@ -0,0 +1,110 @@ +/* + * Copyright (c) 2013-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef __IF_SDIO_H__ +#define __IF_SDIO_H__ + +#include +#include +#include +#include +#include +#include +#include "a_osapi.h" +#include "hif_internal.h" + + +#define AR6320_HEADERS_DEF + +#define ATH_DBG_DEFAULT 0 + +#define RAMDUMP_ADDR 0x8F000000 +#define RAMDUMP_SIZE 0x700000 + +struct hif_sdio_softc { + struct hif_softc ol_sc; + struct device *dev; + struct _NIC_DEV aps_osdev; + struct tasklet_struct intr_tq; /* tasklet */ + + int irq; + /* + * Guard changes to Target HW state and to software + * structures that track hardware state. + */ + spinlock_t target_lock; + void *hif_handle; + void *ramdump_base; + unsigned long ramdump_address; + unsigned long ramdump_size; + struct targetdef_s *targetdef; + struct hostdef_s *hostdef; +}; + +#if defined(CONFIG_ATH_PROCFS_DIAG_SUPPORT) +int athdiag_procfs_init(void *scn); +void athdiag_procfs_remove(void); +#else +static inline int athdiag_procfs_init(void *scn) +{ + return 0; +} + +static inline void athdiag_procfs_remove(void) +{ +} +#endif + +#define DMA_MAPPING_ERROR(dev, addr) dma_mapping_error((dev), (addr)) + +int ath_sdio_probe(void *context, void *hif_handle); +void ath_sdio_remove(void *context, void *hif_handle); +int ath_sdio_suspend(void *context); +int ath_sdio_resume(void *context); + +/*These functions are exposed to HDD*/ +void hif_init_qdf_ctx(qdf_device_t qdf_dev, void *ol_sc); +void hif_deinit_qdf_ctx(void *ol_sc); + +int hif_sdio_device_inserted(struct device *dev, + const struct sdio_device_id *id); +void hif_sdio_stop(struct hif_softc *hif_ctx); +void hif_sdio_shutdown(struct hif_softc *hif_ctx); +void hif_sdio_device_removed(struct sdio_func *func); +int hif_device_suspend(struct device *dev); +int hif_device_resume(struct device *dev); +void hif_register_tbl_attach(struct hif_softc *scn, + u32 hif_type); +void target_register_tbl_attach(struct hif_softc *scn, + u32 target_type); +void hif_enable_power_gating(void *hif_ctx); +void hif_sdio_close(struct hif_softc *hif_sc); +QDF_STATUS hif_sdio_open(struct hif_softc *hif_sc, + enum qdf_bus_type bus_type); +void hif_ar6k_fetch_target_regs(struct hif_sdio_dev *hif_device, + uint32_t *targregs); +QDF_STATUS hif_reg_based_get_target_info(struct hif_opaque_softc *hif_ctx, + struct bmi_target_info *targ_info); +QDF_STATUS +hif_bmi_raw_write(struct hif_sdio_dev *device, char *buffer, + uint32_t length); +QDF_STATUS +hif_bmi_raw_read(struct hif_sdio_dev *device, char *buffer, + u32 length, bool want_timeout); + +#endif /* __IF_SDIO_H__ */ diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/sdio/native_sdio/include/hif_internal.h b/drivers/staging/qca-wifi-host-cmn/hif/src/sdio/native_sdio/include/hif_internal.h new file mode 100644 index 0000000000000000000000000000000000000000..5d6f77f8d534e7871d9e055d1d64372940e999bb --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/sdio/native_sdio/include/hif_internal.h @@ -0,0 +1,418 @@ +/* + * Copyright (c) 2013-2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _HIF_INTERNAL_H_ +#define _HIF_INTERNAL_H_ + +#include "athdefs.h" +#include "a_types.h" +#include "a_osapi.h" +#include /* qdf_device_t, qdf_print */ +#include /* qdf_system_ticks, etc. */ +#include +#include +#include +#include "hif.h" +#include "hif_debug.h" +#include "hif_sdio_common.h" +#include +#include "hif_main.h" + +#define HIF_LINUX_MMC_SCATTER_SUPPORT + +#define BUS_REQUEST_MAX_NUM 105 + +#define SDIO_CLOCK_FREQUENCY_DEFAULT 25000000 +#define SDWLAN_ENABLE_DISABLE_TIMEOUT 20 +#define FLAGS_CARD_ENAB 0x02 +#define FLAGS_CARD_IRQ_UNMSK 0x04 + +#define HIF_MBOX_BLOCK_SIZE HIF_DEFAULT_IO_BLOCK_SIZE +#define HIF_MBOX0_BLOCK_SIZE 1 +#define HIF_MBOX1_BLOCK_SIZE HIF_MBOX_BLOCK_SIZE +#define HIF_MBOX2_BLOCK_SIZE HIF_MBOX_BLOCK_SIZE +#define HIF_MBOX3_BLOCK_SIZE HIF_MBOX_BLOCK_SIZE + +/* + * direction - Direction of transfer (HIF_SDIO_READ/HIF_SDIO_WRITE). + */ +#define HIF_SDIO_READ 0x00000001 +#define HIF_SDIO_WRITE 0x00000002 +#define HIF_SDIO_DIR_MASK (HIF_SDIO_READ | HIF_SDIO_WRITE) + +/* + * type - An interface may support different kind of rd/wr commands. + * For example: SDIO supports CMD52/CMD53s. In case of MSIO it + * translates to using different kinds of TPCs. The command type + * is thus divided into a basic and an extended command and can + * be specified using HIF_BASIC_IO/HIF_EXTENDED_IO. + */ +#define HIF_BASIC_IO 0x00000004 +#define HIF_EXTENDED_IO 0x00000008 +#define HIF_TYPE_MASK (HIF_BASIC_IO | HIF_EXTENDED_IO) + +/* + * This indicates the whether the command is to be executed in a + * blocking or non-blocking fashion (HIF_SYNCHRONOUS/ + * HIF_ASYNCHRONOUS). The read/write data paths in HTC have been + * implemented using the asynchronous mode allowing the the bus + * driver to indicate the completion of operation through the + * registered callback routine. The requirement primarily comes + * from the contexts these operations get called from (a driver's + * transmit context or the ISR context in case of receive). + * Support for both of these modes is essential. + */ +#define HIF_SYNCHRONOUS 0x00000010 +#define HIF_ASYNCHRONOUS 0x00000020 +#define HIF_EMODE_MASK (HIF_SYNCHRONOUS | HIF_ASYNCHRONOUS) + +/* + * An interface may support different kinds of commands based on + * the tradeoff between the amount of data it can carry and the + * setup time. Byte and Block modes are supported (HIF_BYTE_BASIS/ + * HIF_BLOCK_BASIS). In case of latter, the data is rounded off + * to the nearest block size by padding. The size of the block is + * configurable at compile time using the HIF_BLOCK_SIZE and is + * negotiated with the target during initialization after the + * AR6000 interrupts are enabled. + */ +#define HIF_BYTE_BASIS 0x00000040 +#define HIF_BLOCK_BASIS 0x00000080 +#define HIF_DMODE_MASK (HIF_BYTE_BASIS | HIF_BLOCK_BASIS) + +/* + * This indicates if the address has to be incremented on AR6000 + * after every read/write operation (HIF?FIXED_ADDRESS/ + * HIF_INCREMENTAL_ADDRESS). + */ +#define HIF_FIXED_ADDRESS 0x00000100 +#define HIF_INCREMENTAL_ADDRESS 0x00000200 +#define HIF_AMODE_MASK (HIF_FIXED_ADDRESS | \ + HIF_INCREMENTAL_ADDRESS) + +/* + * data written into the dummy space will not put into the final mbox FIFO + */ +#define HIF_DUMMY_SPACE_MASK 0xFFFF0000 + +/* + * data written into the dummy space will not put into the final mbox FIFO + */ +#define HIF_DUMMY_SPACE_MASK 0xFFFF0000 + + +#define HIF_WR_ASYNC_BYTE_FIX \ + (HIF_SDIO_WRITE | HIF_ASYNCHRONOUS | HIF_EXTENDED_IO | \ + HIF_BYTE_BASIS | HIF_FIXED_ADDRESS) +#define HIF_WR_ASYNC_BYTE_INC \ + (HIF_SDIO_WRITE | HIF_ASYNCHRONOUS | HIF_EXTENDED_IO | \ + HIF_BYTE_BASIS | HIF_INCREMENTAL_ADDRESS) +#define HIF_WR_ASYNC_BLOCK_INC \ + (HIF_SDIO_WRITE | HIF_ASYNCHRONOUS | HIF_EXTENDED_IO | \ + HIF_BLOCK_BASIS | HIF_INCREMENTAL_ADDRESS) +#define HIF_WR_SYNC_BYTE_FIX \ + (HIF_SDIO_WRITE | HIF_SYNCHRONOUS | HIF_EXTENDED_IO | \ + HIF_BYTE_BASIS | HIF_FIXED_ADDRESS) +#define HIF_WR_SYNC_BYTE_INC \ + (HIF_SDIO_WRITE | HIF_SYNCHRONOUS | HIF_EXTENDED_IO | \ + HIF_BYTE_BASIS | HIF_INCREMENTAL_ADDRESS) +#define HIF_WR_SYNC_BLOCK_INC \ + (HIF_SDIO_WRITE | HIF_SYNCHRONOUS | HIF_EXTENDED_IO | \ + HIF_BLOCK_BASIS | HIF_INCREMENTAL_ADDRESS) +#define HIF_WR_ASYNC_BLOCK_FIX \ + (HIF_SDIO_WRITE | HIF_ASYNCHRONOUS | HIF_EXTENDED_IO | \ + HIF_BLOCK_BASIS | HIF_FIXED_ADDRESS) +#define HIF_WR_SYNC_BLOCK_FIX \ + (HIF_SDIO_WRITE | HIF_SYNCHRONOUS | HIF_EXTENDED_IO | \ + HIF_BLOCK_BASIS | HIF_FIXED_ADDRESS) +#define HIF_RD_SYNC_BYTE_INC \ + (HIF_SDIO_READ | HIF_SYNCHRONOUS | HIF_EXTENDED_IO | \ + HIF_BYTE_BASIS | HIF_INCREMENTAL_ADDRESS) +#define HIF_RD_SYNC_BYTE_FIX \ + (HIF_SDIO_READ | HIF_SYNCHRONOUS | HIF_EXTENDED_IO | \ + HIF_BYTE_BASIS | HIF_FIXED_ADDRESS) +#define HIF_RD_ASYNC_BYTE_FIX \ + (HIF_SDIO_READ | HIF_ASYNCHRONOUS | HIF_EXTENDED_IO | \ + HIF_BYTE_BASIS | HIF_FIXED_ADDRESS) +#define HIF_RD_ASYNC_BLOCK_FIX \ + (HIF_SDIO_READ | HIF_ASYNCHRONOUS | HIF_EXTENDED_IO | \ + HIF_BLOCK_BASIS | HIF_FIXED_ADDRESS) +#define HIF_RD_ASYNC_BYTE_INC \ + (HIF_SDIO_READ | HIF_ASYNCHRONOUS | HIF_EXTENDED_IO | \ + HIF_BYTE_BASIS | HIF_INCREMENTAL_ADDRESS) +#define HIF_RD_ASYNC_BLOCK_INC \ + (HIF_SDIO_READ | HIF_ASYNCHRONOUS | HIF_EXTENDED_IO | \ + HIF_BLOCK_BASIS | HIF_INCREMENTAL_ADDRESS) +#define HIF_RD_SYNC_BLOCK_INC \ + (HIF_SDIO_READ | HIF_SYNCHRONOUS | HIF_EXTENDED_IO | \ + HIF_BLOCK_BASIS | HIF_INCREMENTAL_ADDRESS) +#define HIF_RD_SYNC_BLOCK_FIX \ + (HIF_SDIO_READ | HIF_SYNCHRONOUS | HIF_EXTENDED_IO | \ + HIF_BLOCK_BASIS | HIF_FIXED_ADDRESS) + +enum hif_sdio_device_state { + HIF_DEVICE_STATE_ON, + HIF_DEVICE_STATE_DEEPSLEEP, + HIF_DEVICE_STATE_CUTPOWER, + HIF_DEVICE_STATE_WOW +}; + +struct bus_request { + struct bus_request *next; /* link list of available requests */ + struct bus_request *inusenext; /* link list of in use requests */ + struct semaphore sem_req; + uint32_t address; /* request data */ + char *buffer; + uint32_t length; + uint32_t request; + void *context; + QDF_STATUS status; + struct HIF_SCATTER_REQ_PRIV *scatter_req; +}; + +struct hif_sdio_dev { + struct sdio_func *func; + qdf_spinlock_t asynclock; + struct task_struct *async_task; /* task to handle async commands */ + struct semaphore sem_async; /* wake up for async task */ + int async_shutdown; /* stop the async task */ + struct completion async_completion; /* thread completion */ + struct bus_request *asyncreq; /* request for async tasklet */ + struct bus_request *taskreq; /* async tasklet data */ + qdf_spinlock_t lock; + struct bus_request *bus_request_free_queue; /* free list */ + struct bus_request bus_request[BUS_REQUEST_MAX_NUM]; /* bus requests */ + void *claimed_ctx; + struct htc_callbacks htc_callbacks; + uint8_t *dma_buffer; + DL_LIST scatter_req_head; /* scatter request list head */ + bool scatter_enabled; /* scatter enabled flag */ + bool is_suspend; + bool is_disabled; + atomic_t irq_handling; + enum HIF_DEVICE_POWER_CHANGE_TYPE power_config; + enum hif_sdio_device_state device_state; + const struct sdio_device_id *id; + struct mmc_host *host; + void *htc_context; + bool swap_mailbox; +}; + +struct HIF_DEVICE_OS_DEVICE_INFO { + void *os_dev; +}; + +struct hif_mailbox_properties { + u_int32_t extended_address; /* extended address for larger writes */ + u_int32_t extended_size; +}; + +struct hif_device_irq_yield_params { + int recv_packet_yield_count; + /* max number of packets to force DSR to return */ +}; + +struct hif_device_mbox_info { + u_int32_t mbox_addresses[4]; + /* first element for legacy HIFs and return the address and ARRAY of + * 32bit words + */ + struct hif_mailbox_properties mbox_prop[4]; + u_int32_t gmbox_address; + u_int32_t gmbox_size; + u_int32_t flags; + /* flags to describe mbox behavior or usage */ +}; + +enum hif_device_irq_mode { + HIF_DEVICE_IRQ_SYNC_ONLY, + /* DSR to process all interrupts before returning */ + HIF_DEVICE_IRQ_ASYNC_SYNC, /* DSR to process interrupts */ +}; + +struct osdrv_callbacks { + void *context; + /* context to pass for all callbacks + * except device_removed_handler + * the device_removed_handler is only + * called if the device is claimed + */ + int (*device_inserted_handler)(void *context, void *hif_handle); + int (*device_removed_handler)(void *claimed_ctx, + void *hif_handle); + int (*device_suspend_handler)(void *context); + int (*device_resume_handler)(void *context); + int (*device_wakeup_handler)(void *context); + int (*device_power_change_handler)(void *context, + enum HIF_DEVICE_POWER_CHANGE_TYPE + config); +}; + +/* other interrupts are pending, host + * needs to read the to monitor + */ +#define HIF_OTHER_EVENTS (1 << 0) +/* pending recv packet */ +#define HIF_RECV_MSG_AVAIL (1 << 1) + +struct _HIF_PENDING_EVENTS_INFO { + uint32_t events; + uint32_t look_ahead; + uint32_t available_recv_bytes; +}; + +/* hif-sdio pending events handler type, some HIF modules + * use special mechanisms to detect packet available and other interrupts + */ +typedef int (*HIF_PENDING_EVENTS_FUNC)(struct hif_sdio_dev *device, + struct _HIF_PENDING_EVENTS_INFO * + events, void *async_context); + +#define HIF_MASK_RECV true +#define HIF_UNMASK_RECV false +/* hif-sdio Handler type to mask receive events */ +typedef int (*HIF_MASK_UNMASK_RECV_EVENT)(struct hif_sdio_dev *device, + bool mask, + void *async_context); + +QDF_STATUS hif_configure_device(struct hif_sdio_dev *device, + enum hif_device_config_opcode opcode, + void *config, uint32_t config_len); + +QDF_STATUS hif_init(struct osdrv_callbacks *callbacks); + +QDF_STATUS hif_attach_htc(struct hif_sdio_dev *device, + struct htc_callbacks *callbacks); + +QDF_STATUS hif_read_write(struct hif_sdio_dev *device, + uint32_t address, + char *buffer, + uint32_t length, uint32_t request, void *context); + +void hif_ack_interrupt(struct hif_sdio_dev *device); + +void hif_mask_interrupt(struct hif_sdio_dev *device); + +void hif_un_mask_interrupt(struct hif_sdio_dev *device); + +QDF_STATUS hif_wait_for_pending_recv(struct hif_sdio_dev *device); + +struct _HIF_SCATTER_ITEM { + u_int8_t *buffer; /* CPU accessible address of buffer */ + int length; /* length of transfer to/from this buffer */ + void *caller_contexts[2]; /* caller context */ +}; + +struct _HIF_SCATTER_REQ; + +typedef void (*HIF_SCATTER_COMP_CB)(struct _HIF_SCATTER_REQ *); + +enum HIF_SCATTER_METHOD { + HIF_SCATTER_NONE = 0, + HIF_SCATTER_DMA_REAL, /* Real SG support no restrictions */ + HIF_SCATTER_DMA_BOUNCE, /* Uses SG DMA */ +}; + +struct _HIF_SCATTER_REQ { + DL_LIST list_link; /* link management */ + u_int32_t address; /* address for the read/write operation */ + u_int32_t request; /* request flags */ + u_int32_t total_length; /* total length of entire transfer */ + u_int32_t caller_flags; /* caller specific flags */ + HIF_SCATTER_COMP_CB completion_routine; /* completion callback */ + int completion_status; /* status of completion */ + void *context; /* caller context for this request */ + int valid_scatter_entries; /* no of valid entries */ + /* scatter method handled by HIF */ + enum HIF_SCATTER_METHOD scatter_method; + void *hif_private[4]; /* HIF private area */ + u_int8_t *scatter_bounce_buffer; /* bounce buffers */ + struct _HIF_SCATTER_ITEM scatter_list[1]; /* start of scatter list */ +}; + +typedef struct _HIF_SCATTER_REQ * (*HIF_ALLOCATE_SCATTER_REQUEST)( + struct hif_sdio_dev *device); +typedef void (*HIF_FREE_SCATTER_REQUEST)(struct hif_sdio_dev *device, + struct _HIF_SCATTER_REQ *request); +typedef QDF_STATUS (*HIF_READWRITE_SCATTER)(struct hif_sdio_dev *device, + struct _HIF_SCATTER_REQ *request); + +struct HIF_DEVICE_SCATTER_SUPPORT_INFO { + /* information returned from HIF layer */ + HIF_ALLOCATE_SCATTER_REQUEST allocate_req_func; + HIF_FREE_SCATTER_REQUEST free_req_func; + HIF_READWRITE_SCATTER read_write_scatter_func; + int max_scatter_entries; + int max_tx_size_per_scatter_req; +}; + +void hif_get_target_revision(struct hif_softc *ol_sc); +struct HIF_SCATTER_REQ_PRIV; + +#define HIF_DMA_BUFFER_SIZE (4 * 1024) +#define CMD53_FIXED_ADDRESS 1 +#define CMD53_INCR_ADDRESS 2 + +struct bus_request *hif_allocate_bus_request(struct hif_sdio_dev *device); +void hif_free_bus_request(struct hif_sdio_dev *device, + struct bus_request *busrequest); +void add_to_async_list(struct hif_sdio_dev *device, + struct bus_request *busrequest); +void hif_dump_cccr(struct hif_sdio_dev *hif_device); + +#ifdef HIF_LINUX_MMC_SCATTER_SUPPORT + +#define MAX_SCATTER_REQUESTS 4 +#define MAX_SCATTER_ENTRIES_PER_REQ 16 +#define MAX_SCATTER_REQ_TRANSFER_SIZE (32*1024) + +struct HIF_SCATTER_REQ_PRIV { + struct _HIF_SCATTER_REQ *hif_scatter_req; + struct hif_sdio_dev *device; /* this device */ + struct bus_request *busrequest; + /* scatter list for linux */ + struct scatterlist sgentries[MAX_SCATTER_ENTRIES_PER_REQ]; +}; + +#define ATH_DEBUG_SCATTER ATH_DEBUG_MAKE_MODULE_MASK(0) + +QDF_STATUS setup_hif_scatter_support(struct hif_sdio_dev *device, + struct HIF_DEVICE_SCATTER_SUPPORT_INFO *info); +void cleanup_hif_scatter_resources(struct hif_sdio_dev *device); +QDF_STATUS do_hif_read_write_scatter(struct hif_sdio_dev *device, + struct bus_request *busrequest); + +#else /* HIF_LINUX_MMC_SCATTER_SUPPORT */ + +static inline QDF_STATUS setup_hif_scatter_support(struct hif_sdio_dev *device, + struct HIF_DEVICE_SCATTER_SUPPORT_INFO *info) +{ + return QDF_STATUS_E_NOSUPPORT; +} + +static inline QDF_STATUS do_hif_read_write_scatter(struct hif_sdio_dev *device, + struct bus_request *busrequest) +{ + return QDF_STATUS_E_NOSUPPORT; +} + +#define cleanup_hif_scatter_resources(d) { } + +#endif /* HIF_LINUX_MMC_SCATTER_SUPPORT */ + +#endif /* _HIF_INTERNAL_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/sdio/native_sdio/src/hif.c b/drivers/staging/qca-wifi-host-cmn/hif/src/sdio/native_sdio/src/hif.c new file mode 100644 index 0000000000000000000000000000000000000000..966101b4d0ace8b3f9ee140eab1ea792436062de --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/sdio/native_sdio/src/hif.c @@ -0,0 +1,2720 @@ +/* + * Copyright (c) 2013-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "hif_sdio_dev.h" +#include "if_sdio.h" +#include "regtable_sdio.h" +#include "wma_api.h" +#include "hif_internal.h" + +/* by default setup a bounce buffer for the data packets, + * if the underlying host controller driver + * does not use DMA you may be able to skip this step + * and save the memory allocation and transfer time + */ +#define HIF_USE_DMA_BOUNCE_BUFFER 1 +#define ATH_MODULE_NAME hif +#include "a_debug.h" + +#if HIF_USE_DMA_BOUNCE_BUFFER +/* macro to check if DMA buffer is WORD-aligned and DMA-able. + * Most host controllers assume the + * buffer is DMA'able and will bug-check otherwise (i.e. buffers on the stack). + * virt_addr_valid check fails on stack memory. + */ +#define BUFFER_NEEDS_BOUNCE(buffer) (((unsigned long)(buffer) & 0x3) || \ + !virt_addr_valid((buffer))) +#else +#define BUFFER_NEEDS_BOUNCE(buffer) (false) +#endif +#define MAX_HIF_DEVICES 2 +#ifdef HIF_MBOX_SLEEP_WAR +#define HIF_MIN_SLEEP_INACTIVITY_TIME_MS 50 +#define HIF_SLEEP_DISABLE_UPDATE_DELAY 1 +#define HIF_IS_WRITE_REQUEST_MBOX1_TO_3(request) \ + ((request->request & HIF_SDIO_WRITE) && \ + (request->address >= 0x1000 && \ + request->address < 0x1FFFF)) +#endif + +unsigned int mmcbuswidth; +/* PERM:S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH */ +module_param(mmcbuswidth, uint, 0644); +MODULE_PARM_DESC(mmcbuswidth, + "Set MMC driver Bus Width: 1-1Bit, 4-4Bit, 8-8Bit"); + +unsigned int mmcclock; +module_param(mmcclock, uint, 0644); +MODULE_PARM_DESC(mmcclock, "Set MMC driver Clock value"); + +unsigned int brokenirq; +module_param(brokenirq, uint, 0644); +MODULE_PARM_DESC(brokenirq, + "Set as 1 to use polling method instead of interrupt mode"); + +unsigned int forcesleepmode; +module_param(forcesleepmode, uint, 0644); +MODULE_PARM_DESC(forcesleepmode, + "Set sleep mode: 0-host capbility, 1-force WOW, 2-force DeepSleep, 3-force CutPower"); + +#ifdef CONFIG_X86 +unsigned int asyncintdelay = 2; +module_param(asyncintdelay, uint, 0644); +MODULE_PARM_DESC(asyncintdelay, + "Delay clock count for async interrupt, 2 is default, valid values are 1 and 2"); +#else +unsigned int asyncintdelay; +module_param(asyncintdelay, uint, 0644); +MODULE_PARM_DESC(asyncintdelay, + "Delay clock count for async interrupt, 0 is default, valid values are 1 and 2"); +#endif + +unsigned int forcecard; +module_param(forcecard, uint, 0644); +MODULE_PARM_DESC(forcecard, + "Ignore card capabilities information to switch bus mode"); + +unsigned int debugcccr = 1; +module_param(debugcccr, uint, 0644); +MODULE_PARM_DESC(debugcccr, "Output this cccr values"); + +unsigned int writecccr1; +module_param(writecccr1, uint, 0644); +unsigned int writecccr1value; +module_param(writecccr1value, uint, 0644); + +unsigned int writecccr2; +module_param(writecccr2, uint, 0644); +unsigned int writecccr2value; +module_param(writecccr2value, uint, 0644); + +unsigned int writecccr3; +module_param(writecccr3, uint, 0644); +unsigned int writecccr3value; +module_param(writecccr3value, uint, 0644); + +unsigned int writecccr4; +module_param(writecccr4, uint, 0644); + +unsigned int writecccr4value; +module_param(writecccr4value, uint, 0644); + +unsigned int modstrength; +module_param(modstrength, uint, 0644); +MODULE_PARM_DESC(modstrength, "Adjust internal driver strength"); + +#define dev_to_sdio_func(d) container_of(d, struct sdio_func, dev) +#define to_sdio_driver(d) container_of(d, struct sdio_driver, drv) +static struct hif_sdio_dev *add_hif_device(struct sdio_func *func); +static struct hif_sdio_dev *get_hif_device(struct sdio_func *func); +static void del_hif_device(struct hif_sdio_dev *device); +static int func0_cmd52_write_byte(struct mmc_card *card, unsigned int address, + unsigned char byte); +static int func0_cmd52_read_byte(struct mmc_card *card, unsigned int address, + unsigned char *byte); + +int reset_sdio_on_unload; +module_param(reset_sdio_on_unload, int, 0644); + +uint32_t nohifscattersupport = 1; + +uint32_t forcedriverstrength = 1; /* force driver strength to type D */ + +/* ------ Static Variables ------ */ +static const struct sdio_device_id ar6k_id_table[] = { +#ifdef AR6002_HEADERS_DEF + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6002_BASE | 0x0))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6002_BASE | 0x1))}, +#endif +#ifdef AR6003_HEADERS_DEF + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6003_BASE | 0x0))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6003_BASE | 0x1))}, +#endif +#ifdef AR6004_HEADERS_DEF + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x0))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x1))}, +#endif +#ifdef AR6320_HEADERS_DEF + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6320_BASE | 0x0))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6320_BASE | 0x1))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6320_BASE | 0x2))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6320_BASE | 0x3))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6320_BASE | 0x4))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6320_BASE | 0x5))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6320_BASE | 0x6))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6320_BASE | 0x7))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6320_BASE | 0x8))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6320_BASE | 0x9))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6320_BASE | 0xA))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6320_BASE | 0xB))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6320_BASE | 0xC))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6320_BASE | 0xD))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6320_BASE | 0xE))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6320_BASE | 0xF))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9377_BASE | 0x0))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9377_BASE | 0x1))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9377_BASE | 0x2))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9377_BASE | 0x3))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9377_BASE | 0x4))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9377_BASE | 0x5))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9377_BASE | 0x6))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9377_BASE | 0x7))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9377_BASE | 0x8))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9377_BASE | 0x9))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9377_BASE | 0xA))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9377_BASE | 0xB))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9377_BASE | 0xC))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9377_BASE | 0xD))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9377_BASE | 0xE))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9377_BASE | 0xF))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9379_BASE | 0x0))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9379_BASE | 0x1))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9379_BASE | 0x2))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9379_BASE | 0x3))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9379_BASE | 0x4))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9379_BASE | 0x5))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9379_BASE | 0x6))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9379_BASE | 0x7))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9379_BASE | 0x8))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9379_BASE | 0x9))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9379_BASE | 0xA))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9379_BASE | 0xB))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9379_BASE | 0xC))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9379_BASE | 0xD))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9379_BASE | 0xE))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9379_BASE | 0xF))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (0 | 0x0))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (0 | 0x1))}, +#endif + { /* null */ }, +}; + +/* make sure we only unregister when registered. */ +static int registered; + +struct osdrv_callbacks osdrv_callbacks; +uint32_t onebitmode; +uint32_t busspeedlow; +uint32_t debughif; + +static struct hif_sdio_dev *hif_devices[MAX_HIF_DEVICES]; + +static void reset_all_cards(void); +static QDF_STATUS hif_disable_func(struct hif_sdio_dev *device, + struct sdio_func *func); +static QDF_STATUS hif_enable_func(struct hif_sdio_dev *device, + struct sdio_func *func); + +#if defined(WLAN_DEBUG) || defined(DEBUG) +ATH_DEBUG_INSTANTIATE_MODULE_VAR(hif, + "hif", + "(Linux MMC) Host Interconnect Framework", + ATH_DEBUG_MASK_DEFAULTS, 0, NULL); +#endif + +static int hif_sdio_init_callbacks(struct osdrv_callbacks *callbacks) +{ + int status = 0; + /* store the callback handlers */ + osdrv_callbacks = *callbacks; + + /* Register with bus driver core is done from HDD */ + AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("%s: HIFInit registering\n", + __func__)); + registered = 1; + + return status; +} +static void hif_sdio_remove_callbacks(void) +{ + qdf_mem_zero(&osdrv_callbacks, sizeof(osdrv_callbacks)); +} + + +/** + * hif_init() - Initializes the driver callbacks + * @callbacks: pointer to driver callback structure + * + * Return: 0 on success, error number otherwise. + */ +QDF_STATUS hif_init(struct osdrv_callbacks *callbacks) +{ + int status; + + AR_DEBUG_ASSERT(callbacks != NULL); + A_REGISTER_MODULE_DEBUG_INFO(hif); + + HIF_ENTER(); + + status = hif_sdio_init_callbacks(callbacks); + AR_DEBUG_ASSERT(status == 0); + + if (status != 0) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, + ("%s sdio_register_driver failed!", __func__)); + return QDF_STATUS_E_FAILURE; + } + AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, + ("%s sdio_register_driver successful", __func__)); + + return QDF_STATUS_SUCCESS; + +} + +/** + * __hif_read_write() - sdio read/write wrapper + * @device: pointer to hif device structure + * @address: address to read + * @buffer: buffer to hold read/write data + * @length: length to read/write + * @request: read/write/sync/async request + * @context: pointer to hold calling context + * + * Return: 0 on success, error number otherwise. + */ +static QDF_STATUS +__hif_read_write(struct hif_sdio_dev *device, + uint32_t address, + char *buffer, + uint32_t length, uint32_t request, void *context) +{ + uint8_t opcode; + QDF_STATUS status = QDF_STATUS_SUCCESS; + int ret = A_OK; + uint8_t *tbuffer; + bool bounced = false; + + AR_DEBUG_ASSERT(device != NULL); + AR_DEBUG_ASSERT(device->func != NULL); + AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, + ("__hif_read_write, addr:0X%06X, len:%08d, %s, %s\n", + address, length, + request & HIF_SDIO_READ ? "Read " : "Write", + request & HIF_ASYNCHRONOUS ? "Async" : "Sync ")); + + do { + if (request & HIF_EXTENDED_IO) { + AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, + ("%s: Command type: CMD53\n", __func__)); + } else { + AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, + ("%s: Invalid command type: 0x%08x\n", + __func__, request)); + status = QDF_STATUS_E_INVAL; + break; + } + + if (request & HIF_BLOCK_BASIS) { + /* round to whole block length size */ + length = + (length / HIF_MBOX_BLOCK_SIZE) * + HIF_MBOX_BLOCK_SIZE; + AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, + ("%s: Block mode (BlockLen: %d)\n", + __func__, length)); + } else if (request & HIF_BYTE_BASIS) { + AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, + ("%s: Byte mode (BlockLen: %d)\n", + __func__, length)); + } else { + AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, + ("%s: Invalid data mode: 0x%08x\n", + __func__, request)); + status = QDF_STATUS_E_INVAL; + break; + } + if (request & HIF_SDIO_WRITE) { + struct hif_device_mbox_info MailBoxInfo; + unsigned int mboxLength = 0; + + hif_configure_device(device, + HIF_DEVICE_GET_MBOX_ADDR, + &MailBoxInfo, sizeof(MailBoxInfo)); + if (address >= 0x800 && address < 0xC00) { + /* Host control register and CIS Window */ + mboxLength = 0; + } else if (address == MailBoxInfo.mbox_addresses[0] + || address == MailBoxInfo.mbox_addresses[1] + || address == MailBoxInfo.mbox_addresses[2] + || address == + MailBoxInfo.mbox_addresses[3]) { + mboxLength = HIF_MBOX_WIDTH; + } else if (address == + MailBoxInfo.mbox_prop[0].extended_address) { + mboxLength = + MailBoxInfo.mbox_prop[0].extended_size; + } else if (address == + MailBoxInfo.mbox_prop[1].extended_address) { + mboxLength = + MailBoxInfo.mbox_prop[1].extended_size; + } else { + AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, + ("Invalid written address: 0x%08x\n", + address)); + break; + } + AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, + ("address:%08X, Length:0x%08X, Dummy:0x%04X, Final:0x%08X\n", + address, length, + (request & HIF_DUMMY_SPACE_MASK) >> 16, + mboxLength == + 0 ? address : address + (mboxLength - + length))); + if (mboxLength != 0) { + if (length > mboxLength) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, + ("%s: written length(0x%08X) larger than mbox len(0x%08x)\n", + __func__, length, mboxLength)); + break; + } + address += (mboxLength - length); + /* + * plus dummy byte count + */ + address += ((request & + HIF_DUMMY_SPACE_MASK) >> 16); + } + } + + if (request & HIF_FIXED_ADDRESS) { + opcode = CMD53_FIXED_ADDRESS; + AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, + ("%s: Address mode: Fixed 0x%X\n", + __func__, address)); + } else if (request & HIF_INCREMENTAL_ADDRESS) { + opcode = CMD53_INCR_ADDRESS; + AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, + ("%s: Address mode: Incremental 0x%X\n", + __func__, address)); + } else { + AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, + ("%s: Invalid address mode: 0x%08x\n", + __func__, request)); + status = QDF_STATUS_E_INVAL; + break; + } + + if (request & HIF_SDIO_WRITE) { +#if HIF_USE_DMA_BOUNCE_BUFFER + if (BUFFER_NEEDS_BOUNCE(buffer)) { + AR_DEBUG_ASSERT(device->dma_buffer != NULL); + tbuffer = device->dma_buffer; + /* copy the write data to the dma buffer */ + AR_DEBUG_ASSERT(length <= HIF_DMA_BUFFER_SIZE); + if (length > HIF_DMA_BUFFER_SIZE) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, + ("%s: Invalid write length: %d\n", + __func__, length)); + status = QDF_STATUS_E_INVAL; + break; + } + memcpy(tbuffer, buffer, length); + bounced = true; + } else { + tbuffer = buffer; + } +#else + tbuffer = buffer; +#endif + if (opcode == CMD53_FIXED_ADDRESS && tbuffer != NULL) { + ret = + sdio_writesb(device->func, address, + tbuffer, + length); + AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, + ("%s: writesb ret=%d address: 0x%X, len: %d, 0x%X\n", + __func__, ret, address, length, + *(int *)tbuffer)); + } else if (tbuffer) { + ret = + sdio_memcpy_toio(device->func, address, + tbuffer, length); + AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, + ("%s: writeio ret=%d address: 0x%X, len: %d, 0x%X\n", + __func__, ret, address, length, + *(int *)tbuffer)); + } + } else if (request & HIF_SDIO_READ) { +#if HIF_USE_DMA_BOUNCE_BUFFER + if (BUFFER_NEEDS_BOUNCE(buffer)) { + AR_DEBUG_ASSERT(device->dma_buffer != NULL); + AR_DEBUG_ASSERT(length <= HIF_DMA_BUFFER_SIZE); + if (length > HIF_DMA_BUFFER_SIZE) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, + ("%s: Invalid read length: %d\n", + __func__, length)); + status = QDF_STATUS_E_INVAL; + break; + } + tbuffer = device->dma_buffer; + bounced = true; + } else { + tbuffer = buffer; + } +#else + tbuffer = buffer; +#endif + if (opcode == CMD53_FIXED_ADDRESS && tbuffer != NULL) { + ret = + sdio_readsb(device->func, tbuffer, + address, + length); + AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, + ("%s: readsb ret=%d address: 0x%X, len: %d, 0x%X\n", + __func__, ret, address, length, + *(int *)tbuffer)); + } else if (tbuffer) { + ret = + sdio_memcpy_fromio(device->func, + tbuffer, + address, length); + AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, + ("%s: readio ret=%d address: 0x%X, len: %d, 0x%X\n", + __func__, ret, address, length, + *(int *)tbuffer)); + } +#if HIF_USE_DMA_BOUNCE_BUFFER + if (bounced && tbuffer) + memcpy(buffer, tbuffer, length); +#endif + } else { + AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, + ("%s: Invalid direction: 0x%08x\n", + __func__, request)); + status = QDF_STATUS_E_INVAL; + return status; + } + + if (ret) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, + ("%s: SDIO bus operation failed! MMC stack returned : %d\n", + __func__, ret)); + AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, + ("__hif_read_write, addr:0X%06X, len:%08d, %s, %s\n", + address, length, + request & HIF_SDIO_READ ? "Read " : "Write", + request & HIF_ASYNCHRONOUS ? "Async" : + "Sync ")); + status = QDF_STATUS_E_FAILURE; + } + } while (false); + + return status; +} + +/** + * add_to_async_list() - add bus reqest to async task list + * @device: pointer to hif device + * @busrequest: pointer to type of bus request + * + * Return: None. + */ +void add_to_async_list(struct hif_sdio_dev *device, + struct bus_request *busrequest) +{ + struct bus_request *async; + struct bus_request *active; + + qdf_spin_lock_irqsave(&device->asynclock); + active = device->asyncreq; + if (active == NULL) { + device->asyncreq = busrequest; + device->asyncreq->inusenext = NULL; + } else { + for (async = device->asyncreq; + async != NULL; async = async->inusenext) { + active = async; + } + active->inusenext = busrequest; + busrequest->inusenext = NULL; + } + qdf_spin_unlock_irqrestore(&device->asynclock); +} + +/** + * hif_read_write() - queue a read/write request + * @device: pointer to hif device structure + * @address: address to read + * @buffer: buffer to hold read/write data + * @length: length to read/write + * @request: read/write/sync/async request + * @context: pointer to hold calling context + * + * Return: 0 on success, error number otherwise. + */ +QDF_STATUS +hif_read_write(struct hif_sdio_dev *device, + uint32_t address, + char *buffer, uint32_t length, + uint32_t request, void *context) +{ + QDF_STATUS status = QDF_STATUS_SUCCESS; + struct bus_request *busrequest; + + AR_DEBUG_ASSERT(device != NULL); + AR_DEBUG_ASSERT(device->func != NULL); + AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, + ("%s: device 0x%pK addr 0x%X buffer 0x%pK len %d req 0x%X context 0x%pK", + __func__, device, address, buffer, + length, request, context)); + + /*sdio r/w action is not needed when suspend, so just return */ + if ((device->is_suspend == true) + && (device->power_config == HIF_DEVICE_POWER_CUT)) { + AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("skip io when suspending\n")); + return QDF_STATUS_SUCCESS; + } + do { + if ((request & HIF_ASYNCHRONOUS) || + (request & HIF_SYNCHRONOUS)) { + /* serialize all requests through the async thread */ + AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, + ("%s: Execution mode: %s\n", __func__, + (request & HIF_ASYNCHRONOUS) ? "Async" + : "Synch")); + busrequest = hif_allocate_bus_request(device); + if (busrequest == NULL) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, + ("no async bus requests available (%s, addr:0x%X, len:%d)\n", + request & HIF_SDIO_READ ? "READ" : + "WRITE", address, length)); + return QDF_STATUS_E_FAILURE; + } + busrequest->address = address; + busrequest->buffer = buffer; + busrequest->length = length; + busrequest->request = request; + busrequest->context = context; + + add_to_async_list(device, busrequest); + + if (request & HIF_SYNCHRONOUS) { + AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, + ("%s: queued sync req: 0x%lX\n", + __func__, (unsigned long)busrequest)); + + /* wait for completion */ + up(&device->sem_async); + if (down_interruptible(&busrequest->sem_req) != + 0) { + /* interrupted, exit */ + return QDF_STATUS_E_FAILURE; + } else { + QDF_STATUS status = busrequest->status; + + AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, + ("%s: sync return freeing 0x%lX: 0x%X\n", + __func__, + (unsigned long) + busrequest, + busrequest->status)); + AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, + ("%s: freeing req: 0x%X\n", + __func__, + (unsigned int) + request)); + hif_free_bus_request(device, + busrequest); + return status; + } + } else { + AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, + ("%s: queued async req: 0x%lX\n", + __func__, + (unsigned long)busrequest)); + up(&device->sem_async); + return QDF_STATUS_E_PENDING; + } + } else { + AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, + ("%s: Invalid execution mode: 0x%08x\n", + __func__, + (unsigned int)request)); + status = QDF_STATUS_E_INVAL; + break; + } + } while (0); + + return status; +} + +/** + * async_task() - thread function to serialize all bus requests + * @param: pointer to hif device + * + * thread function to serialize all requests, both sync and async + * Return: 0 on success, error number otherwise. + */ +static int async_task(void *param) +{ + struct hif_sdio_dev *device; + struct bus_request *request; + QDF_STATUS status; + + device = (struct hif_sdio_dev *) param; + set_current_state(TASK_INTERRUPTIBLE); + while (!device->async_shutdown) { + /* wait for work */ + if (down_interruptible(&device->sem_async) != 0) { + /* interrupted, exit */ + AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, + ("%s: async task interrupted\n", + __func__)); + break; + } + if (device->async_shutdown) { + AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, + ("%s: async task stopping\n", + __func__)); + break; + } + /* we want to hold the host over multiple cmds + * if possible, but holding the host blocks + * card interrupts + */ + sdio_claim_host(device->func); + qdf_spin_lock_irqsave(&device->asynclock); + /* pull the request to work on */ + while (device->asyncreq != NULL) { + request = device->asyncreq; + if (request->inusenext != NULL) + device->asyncreq = request->inusenext; + else + device->asyncreq = NULL; + qdf_spin_unlock_irqrestore(&device->asynclock); + AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, + ("%s: async_task processing req: 0x%lX\n", + __func__, (unsigned long)request)); + + if (request->scatter_req != NULL) { + A_ASSERT(device->scatter_enabled); + /* pass the request to scatter routine which + * executes it synchronously, note, no need + * to free the request since scatter requests + * are maintained on a separate list + */ + status = do_hif_read_write_scatter(device, + request); + } else { + /* call hif_read_write in sync mode */ + status = + __hif_read_write(device, + request->address, + request->buffer, + request->length, + request-> + request & + ~HIF_SYNCHRONOUS, + NULL); + if (request->request & HIF_ASYNCHRONOUS) { + void *context = request->context; + + AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, + ("%s: freeing req: 0x%lX\n", + __func__, (unsigned long) + request)); + hif_free_bus_request(device, request); + AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, + ("%s: async_task completion req 0x%lX\n", + __func__, (unsigned long) + request)); + device->htc_callbacks. + rwCompletionHandler(context, + status); + } else { + AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, + ("%s: async_task upping req: 0x%lX\n", + __func__, (unsigned long) + request)); + request->status = status; + up(&request->sem_req); + } + } + qdf_spin_lock_irqsave(&device->asynclock); + } + qdf_spin_unlock_irqrestore(&device->asynclock); + sdio_release_host(device->func); + } + + complete_and_exit(&device->async_completion, 0); + + return 0; +} + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 0)) +/** + * sdio_card_highspeed() - check if high speed supported + * @card: pointer to mmc card struct + * + * Return: non zero if card supports high speed. + */ +static inline int sdio_card_highspeed(struct mmc_card *card) +{ + return mmc_card_highspeed(card); +} +#else +static inline int sdio_card_highspeed(struct mmc_card *card) +{ + return mmc_card_hs(card); +} +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 0)) +/** + * sdio_card_set_highspeed() - set high speed + * @card: pointer to mmc card struct + * + * Return: none. + */ +static inline void sdio_card_set_highspeed(struct mmc_card *card) +{ + mmc_card_set_highspeed(card); +} +#else +static inline void sdio_card_set_highspeed(struct mmc_card *card) +{ +} +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 0)) +/** + * sdio_card_state() - set card state + * @card: pointer to mmc card struct + * + * Return: none. + */ +static inline void sdio_card_state(struct mmc_card *card) +{ + card->state &= ~MMC_STATE_HIGHSPEED; +} +#else +static inline void sdio_card_state(struct mmc_card *card) +{ +} +#endif + +/** + * reinit_sdio() - re-initialize sdio bus + * @param: pointer to hif device + * + * Return: 0 on success, error number otherwise. + */ +static QDF_STATUS reinit_sdio(struct hif_sdio_dev *device) +{ + int32_t err = 0; + struct mmc_host *host; + struct mmc_card *card; + struct sdio_func *func; + uint8_t cmd52_resp; + uint32_t clock; + + func = device->func; + card = func->card; + host = card->host; + + sdio_claim_host(func); + + do { + /* Enable high speed */ + if (card->host->caps & MMC_CAP_SD_HIGHSPEED) { + AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, + ("%s: Set high speed mode\n", + __func__)); + err = func0_cmd52_read_byte(card, SDIO_CCCR_SPEED, + &cmd52_resp); + if (err) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("%s: CMD52 read to CCCR speed register failed : %d\n", + __func__, err)); + sdio_card_state(card); + /* no need to break */ + } else { + err = func0_cmd52_write_byte(card, + SDIO_CCCR_SPEED, + (cmd52_resp | SDIO_SPEED_EHS)); + if (err) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("%s: CMD52 write to CCCR speed register failed : %d\n", + __func__, err)); + break; + } + sdio_card_set_highspeed(card); + host->ios.timing = MMC_TIMING_SD_HS; + host->ops->set_ios(host, &host->ios); + } + } + + /* Set clock */ + if (sdio_card_highspeed(card)) + clock = 50000000; + else + clock = card->cis.max_dtr; + + if (clock > host->f_max) + clock = host->f_max; + /* + * In fpga mode the clk should be set to 12500000, + * or will result in scan channel setting timeout error. + * So in fpga mode, please set module parameter mmcclock + * to 12500000. + */ + if (mmcclock > 0) + clock = mmcclock; + host->ios.clock = clock; + host->ops->set_ios(host, &host->ios); + + + if (card->host->caps & MMC_CAP_4_BIT_DATA) { + /* CMD52: Set bus width & disable card detect resistor */ + err = func0_cmd52_write_byte(card, SDIO_CCCR_IF, + SDIO_BUS_CD_DISABLE | SDIO_BUS_WIDTH_4BIT); + if (err) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("%s: CMD52 to set bus mode failed : %d\n", + __func__, err)); + break; + } + host->ios.bus_width = MMC_BUS_WIDTH_4; + host->ops->set_ios(host, &host->ios); + } + } while (0); + + sdio_release_host(func); + + return (err) ? QDF_STATUS_E_FAILURE : QDF_STATUS_SUCCESS; +} + +/* + * Setup IRQ mode for deep sleep and WoW + * Switch back to 1 bits mode when we suspend for + * WoW in order to detect SDIO irq without clock. + * Re-enable async 4-bit irq mode for some host controllers + * after resume. + */ +static int sdio_enable4bits(struct hif_sdio_dev *device, int enable) +{ + int ret = 0; + struct sdio_func *func = device->func; + struct mmc_card *card = func->card; + struct mmc_host *host = card->host; + + if (!(host->caps & (MMC_CAP_4_BIT_DATA))) + return 0; + + if (card->cccr.low_speed && !card->cccr.wide_bus) + return 0; + + sdio_claim_host(func); + do { + int setAsyncIRQ = 0; + __u16 manufacturer_id = + device->id->device & MANUFACTURER_ID_AR6K_BASE_MASK; + + /* Re-enable 4-bit ASYNC interrupt on AR6003x + * after system resume for some host controller + */ + if (manufacturer_id == MANUFACTURER_ID_AR6003_BASE) { + setAsyncIRQ = 1; + ret = + func0_cmd52_write_byte(func->card, + CCCR_SDIO_IRQ_MODE_REG_AR6003, + enable ? + SDIO_IRQ_MODE_ASYNC_4BIT_IRQ_AR6003 + : 0); + } else if (manufacturer_id == MANUFACTURER_ID_AR6320_BASE || + manufacturer_id == MANUFACTURER_ID_QCA9377_BASE || + manufacturer_id == MANUFACTURER_ID_QCA9379_BASE) { + unsigned char data = 0; + + setAsyncIRQ = 1; + ret = + func0_cmd52_read_byte(func->card, + CCCR_SDIO_IRQ_MODE_REG_AR6320, + &data); + if (ret) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("%s: failed to read interrupt extension register %d\n", + __func__, ret)); + sdio_release_host(func); + return ret; + } + if (enable) + data |= SDIO_IRQ_MODE_ASYNC_4BIT_IRQ_AR6320; + else + data &= ~SDIO_IRQ_MODE_ASYNC_4BIT_IRQ_AR6320; + ret = + func0_cmd52_write_byte(func->card, + CCCR_SDIO_IRQ_MODE_REG_AR6320, + data); + } + if (setAsyncIRQ) { + if (ret) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("%s: failed to setup 4-bit ASYNC IRQ mode into %d err %d\n", + __func__, enable, ret)); + } else { + AR_DEBUG_PRINTF(ATH_DEBUG_INFO, + ("%s: Setup 4-bit ASYNC IRQ mode into %d successfully\n", + __func__, enable)); + } + } + } while (0); + sdio_release_host(func); + + return ret; +} + + +/** + * power_state_change_notify() - SDIO bus power notification handler + * @config: hif device power change type + * + * Return: 0 on success, error number otherwise. + */ +static QDF_STATUS +power_state_change_notify(struct hif_sdio_dev *device, + enum HIF_DEVICE_POWER_CHANGE_TYPE config) +{ + QDF_STATUS status = QDF_STATUS_SUCCESS; + struct sdio_func *func = device->func; + int old_reset_val; + + AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, + ("%s: config type %d\n", + __func__, config)); + switch (config) { + case HIF_DEVICE_POWER_DOWN: + /* Disable 4bits to allow SDIO bus to detect + * DAT1 as interrupt source + */ + sdio_enable4bits(device, 0); + break; + case HIF_DEVICE_POWER_CUT: + old_reset_val = reset_sdio_on_unload; + reset_sdio_on_unload = 1; + status = hif_disable_func(device, func); + reset_sdio_on_unload = old_reset_val; + if (!device->is_suspend) { + device->power_config = config; + mmc_detect_change(device->host, HZ / 3); + } + break; + case HIF_DEVICE_POWER_UP: + if (device->power_config == HIF_DEVICE_POWER_CUT) { + if (device->is_suspend) { + status = reinit_sdio(device); + /* set power_config before EnableFunc to + * passthrough sdio r/w action when resuming + * from cut power + */ + device->power_config = config; + if (status == QDF_STATUS_SUCCESS) + status = hif_enable_func(device, func); + } else { + /* device->func is bad pointer at this time */ + mmc_detect_change(device->host, 0); + return QDF_STATUS_E_PENDING; + } + } else if (device->power_config == HIF_DEVICE_POWER_DOWN) { + int ret = sdio_enable4bits(device, 1); + + status = (ret == 0) ? QDF_STATUS_SUCCESS : + QDF_STATUS_E_FAILURE; + } + break; + } + device->power_config = config; + + AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, + ("%s:\n", __func__)); + + return status; +} + +#ifdef SDIO_3_0 +/** + * set_extended_mbox_size() - set extended MBOX size + * @pinfo: sdio mailbox info + * + * Return: none. + */ +static void set_extended_mbox_size(struct hif_device_mbox_info *pinfo) +{ + pinfo->mbox_prop[0].extended_size = + HIF_MBOX0_EXTENDED_WIDTH_AR6320_ROME_2_0; + pinfo->mbox_prop[1].extended_size = + HIF_MBOX1_EXTENDED_WIDTH_AR6320; +} + +/** + * set_extended_mbox_address() - set extended MBOX address + * @pinfo: sdio mailbox info + * + * Return: none. + */ +static void set_extended_mbox_address(struct hif_device_mbox_info *pinfo) +{ + pinfo->mbox_prop[1].extended_address = + pinfo->mbox_prop[0].extended_address + + pinfo->mbox_prop[0].extended_size + + HIF_MBOX_DUMMY_SPACE_SIZE_AR6320; +} +#else +static void set_extended_mbox_size(struct hif_device_mbox_info *pinfo) +{ + pinfo->mbox_prop[0].extended_size = + HIF_MBOX0_EXTENDED_WIDTH_AR6320; +} +static inline void +set_extended_mbox_address(struct hif_device_mbox_info *pinfo) +{ + +} +#endif + +/** + * set_extended_mbox_window_info() - set extended MBOX window + * information for SDIO interconnects + * @manf_id: manufacturer id + * @pinfo: sdio mailbox info + * + * Return: none. + */ +static void set_extended_mbox_window_info(uint16_t manf_id, + struct hif_device_mbox_info *pinfo) +{ + switch (manf_id & MANUFACTURER_ID_AR6K_BASE_MASK) { + case MANUFACTURER_ID_AR6002_BASE: + /* MBOX 0 has an extended range */ + + pinfo->mbox_prop[0].extended_address = + HIF_MBOX0_EXTENDED_BASE_ADDR_AR6003_V1; + pinfo->mbox_prop[0].extended_size = + HIF_MBOX0_EXTENDED_WIDTH_AR6003_V1; + + pinfo->mbox_prop[0].extended_address = + HIF_MBOX0_EXTENDED_BASE_ADDR_AR6003_V1; + pinfo->mbox_prop[0].extended_size = + HIF_MBOX0_EXTENDED_WIDTH_AR6003_V1; + + pinfo->mbox_prop[0].extended_address = + HIF_MBOX0_EXTENDED_BASE_ADDR_AR6004; + pinfo->mbox_prop[0].extended_size = + HIF_MBOX0_EXTENDED_WIDTH_AR6004; + + break; + case MANUFACTURER_ID_AR6003_BASE: + /* MBOX 0 has an extended range */ + pinfo->mbox_prop[0].extended_address = + HIF_MBOX0_EXTENDED_BASE_ADDR_AR6003_V1; + pinfo->mbox_prop[0].extended_size = + HIF_MBOX0_EXTENDED_WIDTH_AR6003_V1; + pinfo->gmbox_address = HIF_GMBOX_BASE_ADDR; + pinfo->gmbox_size = HIF_GMBOX_WIDTH; + break; + case MANUFACTURER_ID_AR6004_BASE: + pinfo->mbox_prop[0].extended_address = + HIF_MBOX0_EXTENDED_BASE_ADDR_AR6004; + pinfo->mbox_prop[0].extended_size = + HIF_MBOX0_EXTENDED_WIDTH_AR6004; + pinfo->gmbox_address = HIF_GMBOX_BASE_ADDR; + pinfo->gmbox_size = HIF_GMBOX_WIDTH; + break; + case MANUFACTURER_ID_AR6320_BASE: { + uint16_t ManuRevID = + manf_id & MANUFACTURER_ID_AR6K_REV_MASK; + pinfo->mbox_prop[0].extended_address = + HIF_MBOX0_EXTENDED_BASE_ADDR_AR6320; + if (ManuRevID < 4) { + pinfo->mbox_prop[0].extended_size = + HIF_MBOX0_EXTENDED_WIDTH_AR6320; + } else { + /* from rome 2.0(0x504), the width has been extended to 56K */ + set_extended_mbox_size(pinfo); + } + set_extended_mbox_address(pinfo); + pinfo->gmbox_address = HIF_GMBOX_BASE_ADDR; + pinfo->gmbox_size = HIF_GMBOX_WIDTH; + break; + } + case MANUFACTURER_ID_QCA9377_BASE: + case MANUFACTURER_ID_QCA9379_BASE: + pinfo->mbox_prop[0].extended_address = + HIF_MBOX0_EXTENDED_BASE_ADDR_AR6320; + pinfo->mbox_prop[0].extended_size = + HIF_MBOX0_EXTENDED_WIDTH_AR6320_ROME_2_0; + pinfo->mbox_prop[1].extended_address = + pinfo->mbox_prop[0].extended_address + + pinfo->mbox_prop[0].extended_size + + HIF_MBOX_DUMMY_SPACE_SIZE_AR6320; + pinfo->mbox_prop[1].extended_size = + HIF_MBOX1_EXTENDED_WIDTH_AR6320; + pinfo->gmbox_address = HIF_GMBOX_BASE_ADDR; + pinfo->gmbox_size = HIF_GMBOX_WIDTH; + break; + default: + A_ASSERT(false); + break; + } +} + +/** + * hif_configure_device() - configure sdio device + * @device: pointer to hif device structure + * @opcode: configuration type + * @config: configuration value to set + * @configLen: configuration length + * + * Return: 0 on success, error number otherwise. + */ +QDF_STATUS +hif_configure_device(struct hif_sdio_dev *device, + enum hif_device_config_opcode opcode, + void *config, uint32_t config_len) +{ + uint32_t count; + QDF_STATUS status = QDF_STATUS_SUCCESS; + + switch (opcode) { + case HIF_DEVICE_GET_MBOX_BLOCK_SIZE: + ((uint32_t *) config)[0] = HIF_MBOX0_BLOCK_SIZE; + ((uint32_t *) config)[1] = HIF_MBOX1_BLOCK_SIZE; + ((uint32_t *) config)[2] = HIF_MBOX2_BLOCK_SIZE; + ((uint32_t *) config)[3] = HIF_MBOX3_BLOCK_SIZE; + break; + + case HIF_DEVICE_GET_MBOX_ADDR: + for (count = 0; count < 4; count++) { + ((uint32_t *) config)[count] = + HIF_MBOX_START_ADDR(count); + } + + if (config_len >= sizeof(struct hif_device_mbox_info)) { + set_extended_mbox_window_info((uint16_t) device->func-> + device, + (struct hif_device_mbox_info *) + config); + } + + break; + case HIF_DEVICE_GET_PENDING_EVENTS_FUNC: + AR_DEBUG_PRINTF(ATH_DEBUG_WARN, + ("%s: configuration opcode %d\n", + __func__, opcode)); + status = QDF_STATUS_E_FAILURE; + break; + case HIF_DEVICE_GET_IRQ_PROC_MODE: + *((enum hif_device_irq_mode *) config) = + HIF_DEVICE_IRQ_SYNC_ONLY; + break; + case HIF_DEVICE_GET_RECV_EVENT_MASK_UNMASK_FUNC: + AR_DEBUG_PRINTF(ATH_DEBUG_WARN, + ("%s: configuration opcode %d\n", + __func__, opcode)); + status = QDF_STATUS_E_FAILURE; + break; + case HIF_CONFIGURE_QUERY_SCATTER_REQUEST_SUPPORT: + if (!device->scatter_enabled) + return QDF_STATUS_E_NOSUPPORT; + status = + setup_hif_scatter_support(device, + (struct HIF_DEVICE_SCATTER_SUPPORT_INFO *) + config); + if (QDF_IS_STATUS_ERROR(status)) + device->scatter_enabled = false; + break; + case HIF_DEVICE_GET_OS_DEVICE: + /* pass back a pointer to the SDIO function's "dev" struct */ + ((struct HIF_DEVICE_OS_DEVICE_INFO *) config)->os_dev = + &device->func->dev; + break; + case HIF_DEVICE_POWER_STATE_CHANGE: + status = + power_state_change_notify(device, + *(enum HIF_DEVICE_POWER_CHANGE_TYPE *) + config); + break; + case HIF_DEVICE_GET_IRQ_YIELD_PARAMS: + AR_DEBUG_PRINTF(ATH_DEBUG_WARN, + ("%s: configuration opcode %d\n", + __func__, opcode)); + status = QDF_STATUS_E_FAILURE; + break; + case HIF_DEVICE_SET_HTC_CONTEXT: + device->htc_context = config; + break; + case HIF_DEVICE_GET_HTC_CONTEXT: + if (config == NULL) { + AR_DEBUG_PRINTF(ATH_DEBUG_WARN, + ("%s: htc context is NULL\n", + __func__)); + return QDF_STATUS_E_FAILURE; + } + *(void **)config = device->htc_context; + break; + case HIF_BMI_DONE: + { + AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, + ("%s: BMI_DONE\n", __func__)); + break; + } + default: + AR_DEBUG_PRINTF(ATH_DEBUG_WARN, + ("%s: Unsupported configuration opcode: %d\n", + __func__, opcode)); + status = QDF_STATUS_E_FAILURE; + } + + return status; +} + +/** + * hif_sdio_shutdown() - hif-sdio shutdown routine + * @hif_ctx: pointer to hif_softc structore + * + * Return: None. + */ +void hif_sdio_shutdown(struct hif_softc *hif_ctx) +{ + struct hif_sdio_softc *scn = HIF_GET_SDIO_SOFTC(hif_ctx); + struct hif_sdio_dev *hif_device = scn->hif_handle; + + AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, + ("%s: Enter\n", __func__)); + if (hif_device != NULL) { + AR_DEBUG_ASSERT(hif_device->power_config == HIF_DEVICE_POWER_CUT + || hif_device->func != NULL); + } else { + int i; + /* since we are unloading the driver anyways, + * reset all cards in case the SDIO card is + * externally powered and we are unloading the SDIO + * stack. This avoids the problem when the SDIO stack + * is reloaded and attempts are made to re-enumerate + * a card that is already enumerated + */ + AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, + ("%s: hif_shut_down_device, resetting\n", + __func__)); + reset_all_cards(); + + /* Unregister with bus driver core */ + if (registered) { + registered = 0; + AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, + ("%s: Unregistering with the bus driver\n", + __func__)); + hif_sdio_remove_callbacks(); + AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, + ("%s: Unregistered!", + __func__)); + } + + for (i = 0; i < MAX_HIF_DEVICES; ++i) { + if (hif_devices[i] && hif_devices[i]->func == NULL) { + AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, + ("%s: Remove pending hif_device %pK\n", + __func__, hif_devices[i])); + del_hif_device(hif_devices[i]); + hif_devices[i] = NULL; + } + } + } + AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, + ("%s: Exit\n", __func__)); +} + +/** + * hif_irq_handler() - hif-sdio interrupt handler + * @func: pointer to sdio_func + * + * Return: None. + */ +static void hif_irq_handler(struct sdio_func *func) +{ + QDF_STATUS status; + struct hif_sdio_dev *device; + + AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, + ("%s: Enter\n", __func__)); + + device = get_hif_device(func); + atomic_set(&device->irq_handling, 1); + /* release the host during intr so we can use + * it when we process cmds + */ + sdio_release_host(device->func); + status = device->htc_callbacks.dsrHandler(device->htc_callbacks + .context); + sdio_claim_host(device->func); + atomic_set(&device->irq_handling, 0); + AR_DEBUG_ASSERT(status == QDF_STATUS_SUCCESS || + status == QDF_STATUS_E_CANCELED); + AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, + ("%s: Exit\n", __func__)); +} + +/** + * startup_task() - startup task to fill ol_softc + * @param: pointer to struct hif_sdio_dev + * + * Return: 0 on success, error number otherwise. + */ +static int startup_task(void *param) +{ + struct hif_sdio_dev *device; + + device = (struct hif_sdio_dev *) param; + AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, + ("%s: call HTC from startup_task\n", + __func__)); + /* start up inform DRV layer */ + if ((osdrv_callbacks. + device_inserted_handler(osdrv_callbacks.context, + device)) != QDF_STATUS_SUCCESS) { + AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, + ("%s: Device rejected\n", __func__)); + } + + return 0; +} + +static int enable_task(void *param) +{ + struct hif_sdio_dev *device; + + device = (struct hif_sdio_dev *) param; + AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, + ("%s: call from resume_task\n", + __func__)); + + /* start up inform DRV layer */ + if (device && + device->claimed_ctx && + osdrv_callbacks.device_power_change_handler && + osdrv_callbacks.device_power_change_handler(device->claimed_ctx, + HIF_DEVICE_POWER_UP) != + QDF_STATUS_SUCCESS) { + AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, + ("%s: Device rejected\n", + __func__)); + } + + return 0; +} + +/** + * foce_drive_strength() - Set sdio drive strength + * @func: pointer to sdio_func + * + * Return: none. + */ +static void foce_drive_strength(struct sdio_func *func) +{ + unsigned int addr = SDIO_CCCR_DRIVE_STRENGTH; + unsigned char value = 0; + + uint32_t err = func0_cmd52_read_byte(func->card, + addr, &value); + if (err) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, + ("%s: Read CCCR 0x%02X failed: %d\n", + __func__, + (unsigned int) addr, + (unsigned int) err)); + } else { + value = (value & + (~(SDIO_DRIVE_DTSx_MASK << + SDIO_DRIVE_DTSx_SHIFT))) | + SDIO_DTSx_SET_TYPE_D; + err = func0_cmd52_write_byte(func->card, addr, + value); + if (err) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, + ("%s: Write CCCR 0x%02X to 0x%02X failed: %d\n", + __func__, + (unsigned int) addr, + (unsigned int) value, + (unsigned int) err)); + } else { + addr = CCCR_SDIO_DRIVER_STRENGTH_ENABLE_ADDR; + value = 0; + err = func0_cmd52_read_byte(func->card, + addr, &value); + if (err) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, + ("Read CCCR 0x%02X failed: %d\n", + (unsigned int) addr, + (unsigned int) err)); + } else { + value = (value & + (~CCCR_SDIO_DRIVER_STRENGTH_ENABLE_MASK) + ) | + CCCR_SDIO_DRIVER_STRENGTH_ENABLE_A | + CCCR_SDIO_DRIVER_STRENGTH_ENABLE_C | + CCCR_SDIO_DRIVER_STRENGTH_ENABLE_D; + err = func0_cmd52_write_byte(func->card, + addr, value); + if (err) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, + ("Write CCCR 0x%02X to 0x%02X failed: %d\n", + (unsigned int) addr, + (unsigned int) value, + (unsigned int) err)); + } + } + } + } +} + +/** + * write_cccr() - write CCCR + * @func: pointer to sdio_func + * + * Return: none. + */ +static void write_cccr(struct sdio_func *func) +{ + if (writecccr1) { + uint32_t err = func0_cmd52_write_byte(func->card, + writecccr1, + writecccr1value); + if (err) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, + ("Write CCCR 0x%02X to 0x%02X failed: %d\n", + (unsigned int)writecccr1, + (unsigned int)writecccr1value, + (unsigned int)err)); + } else { + AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, + ("Write CCCR 0x%02X to 0x%02X OK\n", + (unsigned int)writecccr1, + (unsigned int)writecccr1value)); + } + } + if (writecccr2) { + uint32_t err = func0_cmd52_write_byte(func->card, + writecccr2, + writecccr2value); + if (err) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, + ("Write CCCR 0x%02X to 0x%02X failed: %d\n", + (unsigned int)writecccr2, + (unsigned int)writecccr2value, + (unsigned int)err)); + } else { + AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, + ("Write CCCR 0x%02X to 0x%02X OK\n", + (unsigned int)writecccr2, + (unsigned int)writecccr2value)); + } + } + if (writecccr3) { + uint32_t err = func0_cmd52_write_byte(func->card, + writecccr3, + writecccr3value); + if (err) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, + ("Write CCCR 0x%02X to 0x%02X failed: %d\n", + (unsigned int)writecccr3, + (unsigned int)writecccr3value, + (unsigned int)err)); + } else { + AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, + ("Write CCCR 0x%02X to 0x%02X OK\n", + (unsigned int)writecccr3, + (unsigned int)writecccr3value)); + } + } + if (writecccr4) { + uint32_t err = func0_cmd52_write_byte(func->card, + writecccr4, + writecccr4value); + if (err) + AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, + ("Write CCCR 0x%02X to 0x%02X failed: %d\n", + (unsigned int)writecccr4, + (unsigned int)writecccr4value, + (unsigned int)err)); + else + AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, + ("Write CCCR 0x%02X to 0x%02X OK\n", + (unsigned int)writecccr4, + (unsigned int)writecccr4value)); + } +} + +#ifdef SDIO_BUS_WIDTH_8BIT +static int hif_cmd52_write_byte_8bit(struct sdio_func *func) +{ + return func0_cmd52_write_byte(func->card, SDIO_CCCR_IF, + SDIO_BUS_CD_DISABLE | SDIO_BUS_WIDTH_8BIT); +} +#else +static int hif_cmd52_write_byte_8bit(struct sdio_func *func) +{ + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("%s: 8BIT Bus Width not supported\n", __func__)); + return QDF_STATUS_E_FAILURE; +} +#endif + +/** + * hif_device_inserted() - hif-sdio driver probe handler + * @func: pointer to sdio_func + * @id: pointer to sdio_device_id + * + * Return: 0 on success, error number otherwise. + */ +static int hif_device_inserted(struct sdio_func *func, + const struct sdio_device_id *id) +{ + int i; + int ret; + struct hif_sdio_dev *device = NULL; + int count; + uint32_t clock, clock_set = 12500000; + + AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, + ("%s: Function: 0x%X, Vendor ID: 0x%X, Device ID: 0x%X, block size: 0x%X/0x%X\n", + __func__, func->num, func->vendor, id->device, + func->max_blksize, func->cur_blksize)); + /* dma_mask should not be NULL, otherwise dma_map_single + * will crash. TODO: check why dma_mask is NULL here + */ + if (func->dev.dma_mask == NULL) { + static u64 dma_mask = 0xFFFFFFFF; + + func->dev.dma_mask = &dma_mask; + } + for (i = 0; i < MAX_HIF_DEVICES; ++i) { + struct hif_sdio_dev *hifdevice = hif_devices[i]; + + if (hifdevice && hifdevice->power_config == HIF_DEVICE_POWER_CUT + && hifdevice->host == func->card->host) { + hifdevice->func = func; + hifdevice->power_config = HIF_DEVICE_POWER_UP; + sdio_set_drvdata(func, hifdevice); + device = get_hif_device(func); + + if (device->is_suspend) { + AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, + ("%s: Resume from suspend", + __func__)); + ret = reinit_sdio(device); + } + break; + } + } + + if (device == NULL) { + if (add_hif_device(func) == NULL) + return QDF_STATUS_E_FAILURE; + device = get_hif_device(func); + + for (i = 0; i < MAX_HIF_DEVICES; ++i) { + if (hif_devices[i] == NULL) { + hif_devices[i] = device; + break; + } + } + if (i == MAX_HIF_DEVICES) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, + ("%s: No more hif_devices[] slot for %pK", + __func__, device)); + } + + device->id = id; + device->host = func->card->host; + device->is_disabled = true; + /* TODO: MMC SDIO3.0 Setting should also be modified in ReInit() + * function when Power Manage work. + */ + sdio_claim_host(func); + /* force driver strength to type D */ + if (forcedriverstrength == 1) + foce_drive_strength(func); + write_cccr(func); + /* Set MMC Clock */ + if (mmcclock > 0) + clock_set = mmcclock; + if (sdio_card_highspeed(func->card)) + clock = 50000000; + else + clock = func->card->cis.max_dtr; + if (clock > device->host->f_max) + clock = device->host->f_max; + + AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, + ("%s: Dumping clocks (%d,%d)\n", + __func__, func->card->cis.max_dtr, + device->host->f_max)); + + /* only when mmcclock module parameter is specified, + * set the clock explicitly + */ + if (mmcclock > 0) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("Decrease host clock from %d to %d(%d,%d)\n", + clock, clock_set, + func->card->cis.max_dtr, + device->host->f_max)); + device->host->ios.clock = clock_set; + device->host->ops->set_ios(device->host, + &device->host->ios); + } + /* Set SDIO3.0 */ + /* Set MMC Bus Width: 1-1Bit, 4-4Bit, 8-8Bit */ + if (mmcbuswidth > 0) { + if (mmcbuswidth == 1) { + ret = + func0_cmd52_write_byte(func->card, + SDIO_CCCR_IF, + SDIO_BUS_CD_DISABLE + | + SDIO_BUS_WIDTH_1BIT); + if (ret) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("%s: CMD52 to set bus width failed: %d\n", + __func__, ret)); + goto del_hif_dev;; + } + device->host->ios.bus_width = + MMC_BUS_WIDTH_1; + device->host->ops->set_ios(device->host, + &device-> + host->ios); + } else if (mmcbuswidth == 4 + && (device->host-> + caps & MMC_CAP_4_BIT_DATA)) { + ret = + func0_cmd52_write_byte(func->card, + SDIO_CCCR_IF, + SDIO_BUS_CD_DISABLE + | + SDIO_BUS_WIDTH_4BIT); + if (ret) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("%s: CMD52 to bus width failed: %d\n", + __func__, + ret)); + goto del_hif_dev; + } + device->host->ios.bus_width = + MMC_BUS_WIDTH_4; + device->host->ops->set_ios(device->host, + &device-> + host->ios); + } else if (mmcbuswidth == 8 + && (device->host-> + caps & MMC_CAP_8_BIT_DATA)) { + ret = hif_cmd52_write_byte_8bit(func); + if (ret) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("%s: CMD52 to bus width failed: %d\n", + __func__, + ret)); + goto del_hif_dev; + } + device->host->ios.bus_width = + MMC_BUS_WIDTH_8; + device->host->ops->set_ios(device->host, + &device-> + host->ios); + } else { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("%s: MMC bus width %d is not supported.\n", + __func__, + mmcbuswidth)); + ret = QDF_STATUS_E_FAILURE; + goto del_hif_dev; + } + AR_DEBUG_PRINTF(ATH_DEBUG_ANY, + ("%s: Set MMC bus width to %dBit.\n", + __func__, mmcbuswidth)); + } + if (debugcccr) + hif_dump_cccr(device); + + sdio_release_host(func); + } + + qdf_spinlock_create(&device->lock); + + qdf_spinlock_create(&device->asynclock); + + DL_LIST_INIT(&device->scatter_req_head); + + if (!nohifscattersupport) { + /* try to allow scatter operation on all instances, + * unless globally overridden + */ + device->scatter_enabled = true; + } else + device->scatter_enabled = false; + + /* Initialize the bus requests to be used later */ + qdf_mem_zero(device->bus_request, sizeof(device->bus_request)); + for (count = 0; count < BUS_REQUEST_MAX_NUM; count++) { + sema_init(&device->bus_request[count].sem_req, 0); + hif_free_bus_request(device, &device->bus_request[count]); + } + sema_init(&device->sem_async, 0); + + ret = hif_enable_func(device, func); + if ((ret == QDF_STATUS_SUCCESS || ret == QDF_STATUS_E_PENDING)) + return 0; + ret = QDF_STATUS_E_FAILURE; +del_hif_dev: + del_hif_device(device); + for (i = 0; i < MAX_HIF_DEVICES; ++i) { + if (hif_devices[i] == device) { + hif_devices[i] = NULL; + break; + } + } + if (i == MAX_HIF_DEVICES) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, + ("%s: No hif_devices[] slot for %pK", + __func__, device)); + } + return ret; +} + +/** + * hif_ack_interrupt() - Acknowledge hif device irq + * @device: pointer to struct hif_sdio_dev + * + * This should translate to an acknowledgment to the bus driver indicating that + * the previous interrupt request has been serviced and the all the relevant + * sources have been cleared. HTC is ready to process more interrupts. + * This should prevent the bus driver from raising an interrupt unless the + * previous one has been serviced and acknowledged using the previous API. + * + * Return: None. + */ +void hif_ack_interrupt(struct hif_sdio_dev *device) +{ + AR_DEBUG_ASSERT(device != NULL); + + /* Acknowledge our function IRQ */ +} + +/** + * hif_un_mask_interrupt() - Re-enable hif device irq + * @device: pointer to struct hif_sdio_dev + * + * + * Return: None. + */ +void hif_un_mask_interrupt(struct hif_sdio_dev *device) +{ + int ret; + + AR_DEBUG_ASSERT(device != NULL); + AR_DEBUG_ASSERT(device->func != NULL); + + HIF_ENTER(); + /* + * On HP Elitebook 8460P, interrupt mode is not stable + * in high throughput, so polling method should be used + * instead of interrupt mode. + */ + if (brokenirq) { + AR_DEBUG_PRINTF(ATH_DEBUG_INFO, + ("%s: Using broken IRQ mode\n", + __func__)); + /* disable IRQ support even the capability exists */ + device->func->card->host->caps &= ~MMC_CAP_SDIO_IRQ; + } + /* Register the IRQ Handler */ + sdio_claim_host(device->func); + ret = sdio_claim_irq(device->func, hif_irq_handler); + sdio_release_host(device->func); + AR_DEBUG_ASSERT(ret == 0); + HIF_EXIT(); +} + +/** + * hif_mask_interrupt() - Disable hif device irq + * @device: pointer to struct hif_sdio_dev + * + * + * Return: None. + */ +void hif_mask_interrupt(struct hif_sdio_dev *device) +{ + int ret; + + AR_DEBUG_ASSERT(device != NULL); + AR_DEBUG_ASSERT(device->func != NULL); + + HIF_ENTER(); + + /* Mask our function IRQ */ + sdio_claim_host(device->func); + while (atomic_read(&device->irq_handling)) { + sdio_release_host(device->func); + schedule_timeout_interruptible(HZ / 10); + sdio_claim_host(device->func); + } + ret = sdio_release_irq(device->func); + sdio_release_host(device->func); + if (ret) { + if (ret == -ETIMEDOUT) { + AR_DEBUG_PRINTF(ATH_DEBUG_WARN, + ("%s: Timeout to mask interrupt\n", + __func__)); + } else { + AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, + ("%s: Unable to mask interrupt %d\n", + __func__, ret)); + AR_DEBUG_ASSERT(ret == 0); + } + } + HIF_EXIT(); +} + +/** + * hif_allocate_bus_request() - Allocate hif bus request + * @device: pointer to struct hif_sdio_dev + * + * + * Return: pointer to struct bus_request structure. + */ +struct bus_request *hif_allocate_bus_request(struct hif_sdio_dev *device) +{ + struct bus_request *busrequest; + + qdf_spin_lock_irqsave(&device->lock); + busrequest = device->bus_request_free_queue; + /* Remove first in list */ + if (busrequest != NULL) + device->bus_request_free_queue = busrequest->next; + + /* Release lock */ + qdf_spin_unlock_irqrestore(&device->lock); + AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, + ("%s: hif_allocate_bus_request: 0x%pK\n", + __func__, busrequest)); + + return busrequest; +} + +/** + * hif_free_bus_request() - Free hif bus request + * @device: pointer to struct hif_sdio_dev + * + * + * Return: None. + */ +void hif_free_bus_request(struct hif_sdio_dev *device, + struct bus_request *busrequest) +{ + AR_DEBUG_ASSERT(busrequest != NULL); + /* Acquire lock */ + qdf_spin_lock_irqsave(&device->lock); + + /* Insert first in list */ + busrequest->next = device->bus_request_free_queue; + busrequest->inusenext = NULL; + device->bus_request_free_queue = busrequest; + + /* Release lock */ + qdf_spin_unlock_irqrestore(&device->lock); +} + +static QDF_STATUS hif_disable_func(struct hif_sdio_dev *device, + struct sdio_func *func) +{ + int ret; + QDF_STATUS status = QDF_STATUS_SUCCESS; + + HIF_ENTER(); + device = get_hif_device(func); + if (!IS_ERR(device->async_task)) { + init_completion(&device->async_completion); + device->async_shutdown = 1; + up(&device->sem_async); + wait_for_completion(&device->async_completion); + device->async_task = NULL; + sema_init(&device->sem_async, 0); + } + /* Disable the card */ + sdio_claim_host(device->func); + ret = sdio_disable_func(device->func); + if (ret) + status = QDF_STATUS_E_FAILURE; + + if (reset_sdio_on_unload && status == QDF_STATUS_SUCCESS) { + /* reset the SDIO interface. It's useful in automated testing + * where the card does not need to be removed at the end + * of the test. It is expected that the user will also + * un/reload the host controller driver to force the bus + * driver to re-enumerate the slot + */ + AR_DEBUG_PRINTF(ATH_DEBUG_WARN, + ("%s: resetting SDIO card", + __func__)); + + /* sdio_f0_writeb() cannot be used here, this allows access + * to undefined registers in the range of: 0xF0-0xFF + */ + + ret = + func0_cmd52_write_byte(device->func->card, + SDIO_CCCR_ABORT, + (1 << 3)); + if (ret) { + status = QDF_STATUS_E_FAILURE; + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("%s: reset failed : %d\n", + __func__, ret)); + } + } + + sdio_release_host(device->func); + + if (status == QDF_STATUS_SUCCESS) + device->is_disabled = true; + cleanup_hif_scatter_resources(device); + + HIF_EXIT(); + + return status; +} + +static QDF_STATUS hif_enable_func(struct hif_sdio_dev *device, + struct sdio_func *func) +{ + struct task_struct *task; + const char *task_name = NULL; + int (*taskFunc)(void *) = NULL; + int ret = QDF_STATUS_SUCCESS; + + HIF_ENTER("sdio_func 0x%pK", func); + + device = get_hif_device(func); + + if (!device) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("HIF device is NULL\n")); + return QDF_STATUS_E_INVAL; + } + + if (device->is_disabled) { + int setAsyncIRQ = 0; + __u16 manufacturer_id = + device->id->device & MANUFACTURER_ID_AR6K_BASE_MASK; + /* enable the SDIO function */ + sdio_claim_host(func); + /* enable 4-bit ASYNC interrupt on AR6003x or later devices */ + if (manufacturer_id == MANUFACTURER_ID_AR6003_BASE) { + setAsyncIRQ = 1; + ret = + func0_cmd52_write_byte(func->card, + CCCR_SDIO_IRQ_MODE_REG_AR6003, + SDIO_IRQ_MODE_ASYNC_4BIT_IRQ_AR6003); + } else if (manufacturer_id == MANUFACTURER_ID_AR6320_BASE || + manufacturer_id == MANUFACTURER_ID_QCA9377_BASE || + manufacturer_id == MANUFACTURER_ID_QCA9379_BASE) { + unsigned char data = 0; + + setAsyncIRQ = 1; + ret = + func0_cmd52_read_byte(func->card, + CCCR_SDIO_IRQ_MODE_REG_AR6320, + &data); + if (ret) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("%s: failed to read irq reg %d\n", + __func__, ret)); + sdio_release_host(func); + return QDF_STATUS_E_FAILURE; + } + data |= SDIO_IRQ_MODE_ASYNC_4BIT_IRQ_AR6320; + ret = + func0_cmd52_write_byte(func->card, + CCCR_SDIO_IRQ_MODE_REG_AR6320, + data); + } + if (setAsyncIRQ) { + if (ret) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("%s: failed to enable ASYNC IRQ mode %d\n", + __func__, ret)); + sdio_release_host(func); + return QDF_STATUS_E_FAILURE; + } + AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, + ("%s: 4-bit ASYNC IRQ mode enabled\n", + __func__)); + } + + /* set CCCR 0xF0[7:6] to increase async interrupt delay clock to + * fix interrupt missing issue on dell 8460p + */ + if (asyncintdelay != 0) { + unsigned char data = 0; + + ret = func0_cmd52_read_byte(func->card, + CCCR_SDIO_ASYNC_INT_DELAY_ADDRESS, + &data); + if (ret) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("%s: failed to read CCCR %d, val is %d\n", + __func__, + CCCR_SDIO_ASYNC_INT_DELAY_ADDRESS, + ret)); + sdio_release_host(func); + return QDF_STATUS_E_FAILURE; + } + data = (data & ~CCCR_SDIO_ASYNC_INT_DELAY_MASK) | + ((asyncintdelay << + CCCR_SDIO_ASYNC_INT_DELAY_LSB) & + CCCR_SDIO_ASYNC_INT_DELAY_MASK); + ret = + func0_cmd52_write_byte(func->card, + CCCR_SDIO_ASYNC_INT_DELAY_ADDRESS, + data); + if (ret) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("%s: failed to write CCCR %d, val is %d\n", + __func__, + CCCR_SDIO_ASYNC_INT_DELAY_ADDRESS, + ret)); + sdio_release_host(func); + return QDF_STATUS_E_FAILURE; + } + AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, + ("%s: Set async interrupt delay clock as %d.\n", + __func__, + asyncintdelay)); + } + /* give us some time to enable, in ms */ + func->enable_timeout = 100; + ret = sdio_enable_func(func); + if (ret) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, + ("%s: Unable to enable AR6K: 0x%X\n", + __func__, ret)); + sdio_release_host(func); + return QDF_STATUS_E_FAILURE; + } + ret = sdio_set_block_size(func, HIF_MBOX_BLOCK_SIZE); + + if (modstrength) { + unsigned int address = WINDOW_DATA_ADDRESS; + unsigned int value = 0x0FFF; + + ret = sdio_memcpy_toio(device->func, address, + &value, 4); + if (ret) { + AR_DEBUG_PRINTF(ATH_DEBUG_INFO, + ("memcpy_toio 0x%x 0x%x error:%d\n", + address, value, ret)); + } else { + AR_DEBUG_PRINTF(ATH_DEBUG_INFO, + ("memcpy_toio, 0x%x 0x%x OK\n", address, + value)); + address = WINDOW_WRITE_ADDR_ADDRESS; + value = 0x50F8; + ret = + sdio_memcpy_toio(device->func, address, + &value, 4); + if (ret) + AR_DEBUG_PRINTF(ATH_DEBUG_INFO, + ("memcpy_toio 0x%x 0x%x error:%d\n", + address, value, ret)); + else + AR_DEBUG_PRINTF(ATH_DEBUG_INFO, + ("memcpy_toio, 0x%x 0x%x OK\n", + address, value)); + } + }; + sdio_release_host(func); + if (ret) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, + ("%s: can't set block size 0x%x AR6K: 0x%X\n", + __func__, HIF_MBOX_BLOCK_SIZE, + ret)); + return QDF_STATUS_E_FAILURE; + } + device->is_disabled = false; + /* create async I/O thread */ + if (!device->async_task) { + device->async_shutdown = 0; + device->async_task = kthread_create(async_task, + (void *)device, + "AR6K Async"); + if (IS_ERR(device->async_task)) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, + ("%s: to create async task\n", + __func__)); + return QDF_STATUS_E_FAILURE; + } + AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, + ("%s: start async task\n", + __func__)); + wake_up_process(device->async_task); + } + } + + if (!device->claimed_ctx) { + taskFunc = startup_task; + task_name = "AR6K startup"; + ret = QDF_STATUS_SUCCESS; + } else { + taskFunc = enable_task; + task_name = "AR6K enable"; + ret = QDF_STATUS_E_PENDING; + } + /* create resume thread */ + task = kthread_create(taskFunc, (void *)device, task_name); + if (IS_ERR(task)) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, + ("%s: to create enabel task\n", + __func__)); + return QDF_STATUS_E_FAILURE; + } + wake_up_process(task); + + /* task will call the enable func, indicate pending */ + HIF_EXIT(); + + return ret; +} + +int hif_device_suspend(struct device *dev) +{ + struct sdio_func *func = dev_to_sdio_func(dev); + QDF_STATUS status = QDF_STATUS_SUCCESS; + int ret = QDF_STATUS_SUCCESS; +#if defined(MMC_PM_KEEP_POWER) + mmc_pm_flag_t pm_flag = 0; + enum HIF_DEVICE_POWER_CHANGE_TYPE config; + struct mmc_host *host = NULL; +#endif + + struct hif_sdio_dev *device = get_hif_device(func); + +#if defined(MMC_PM_KEEP_POWER) + if (device && device->func) + host = device->func->card->host; +#endif + + HIF_ENTER(); + if (device && device->claimed_ctx + && osdrv_callbacks.device_suspend_handler) { + device->is_suspend = true; + status = osdrv_callbacks.device_suspend_handler( + device->claimed_ctx); +#if defined(MMC_PM_KEEP_POWER) + switch (forcesleepmode) { + case 0: /* depend on sdio host pm capbility */ + pm_flag = sdio_get_host_pm_caps(func); + break; + case 1: /* force WOW */ + pm_flag |= MMC_PM_KEEP_POWER | MMC_PM_WAKE_SDIO_IRQ; + break; + case 2: /* force DeepSleep */ + pm_flag &= ~MMC_PM_WAKE_SDIO_IRQ; + pm_flag |= MMC_PM_KEEP_POWER; + break; + case 3: /* force CutPower */ + pm_flag &= + ~(MMC_PM_WAKE_SDIO_IRQ | MMC_PM_WAKE_SDIO_IRQ); + break; + } + if (!(pm_flag & MMC_PM_KEEP_POWER)) { + /* cut power support */ + /* setting power_config before hif_configure_device to + * skip sdio r/w when suspending with cut power + */ + AR_DEBUG_PRINTF(ATH_DEBUG_INFO, + ("hif_device_suspend: cut power enter\n")); + config = HIF_DEVICE_POWER_CUT; + device->power_config = config; + if ((device->claimed_ctx != NULL) + && osdrv_callbacks.device_removed_handler) { + status = osdrv_callbacks. + device_removed_handler(device-> + claimed_ctx, + device); + } + ret = hif_configure_device(device, + HIF_DEVICE_POWER_STATE_CHANGE, + &config, + sizeof + (enum HIF_DEVICE_POWER_CHANGE_TYPE)); + if (ret) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, + ("%s: hif config device failed: %d\n", + __func__, ret)); + return ret; + } + + hif_mask_interrupt(device); + device->device_state = HIF_DEVICE_STATE_CUTPOWER; + AR_DEBUG_PRINTF(ATH_DEBUG_INFO, + ("hif_device_suspend: cut power success\n")); + return ret; + } + ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER); + if (ret) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, + ("%s: set sdio pm flags failed %d\n", + __func__, ret)); + return ret; + } + + /* TODO:WOW support */ + if (pm_flag & MMC_PM_WAKE_SDIO_IRQ) { + AR_DEBUG_PRINTF(ATH_DEBUG_INFO, + ("hif_device_suspend: wow enter\n")); + config = HIF_DEVICE_POWER_DOWN; + ret = hif_configure_device(device, + HIF_DEVICE_POWER_STATE_CHANGE, + &config, + sizeof + (enum HIF_DEVICE_POWER_CHANGE_TYPE)); + + if (ret) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, + ("%s: hif config dev failed: %d\n", + __func__, ret)); + return ret; + } + ret = sdio_set_host_pm_flags(func, + MMC_PM_WAKE_SDIO_IRQ); + if (ret) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, + ("%s: set sdio pm flags %d\n", + __func__, ret)); + return ret; + } + hif_mask_interrupt(device); + device->device_state = HIF_DEVICE_STATE_WOW; + AR_DEBUG_PRINTF(ATH_DEBUG_INFO, + ("hif_device_suspend: wow success\n")); + return ret; + } + /* deep sleep support */ + AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s: deep sleep enter\n", + __func__)); + + /* + * Wait for some async clean handler finished. + * These handlers are part of vdev disconnect. + * As handlers are async,sleep is not suggested, + * some blocking method may be a good choice. + * But before adding callback function to these + * handler, sleep wait is a simple method. + */ + msleep(100); + hif_mask_interrupt(device); + device->device_state = HIF_DEVICE_STATE_DEEPSLEEP; + AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s: deep sleep done\n", + __func__)); + return ret; +#endif + } + + HIF_EXIT(); + + switch (status) { + case QDF_STATUS_SUCCESS: +#if defined(MMC_PM_KEEP_POWER) + if (host) { + host->pm_flags &= + ~(MMC_PM_KEEP_POWER | MMC_PM_WAKE_SDIO_IRQ); + } +#endif + return 0; + case QDF_STATUS_E_BUSY: +#if defined(MMC_PM_KEEP_POWER) + if (host) { + /* WAKE_SDIO_IRQ in order to wake up by DAT1 */ + host->pm_flags |= + (MMC_PM_KEEP_POWER | MMC_PM_WAKE_SDIO_IRQ); + host->pm_flags &= host->pm_caps; + } + return 0; +#else + return -EBUSY; /* Hack to support deep sleep and wow */ +#endif + default: + device->is_suspend = false; + + return QDF_STATUS_E_FAILURE; + } +} + +int hif_device_resume(struct device *dev) +{ + struct sdio_func *func = dev_to_sdio_func(dev); + QDF_STATUS status = QDF_STATUS_SUCCESS; + enum HIF_DEVICE_POWER_CHANGE_TYPE config; + struct hif_sdio_dev *device; + + device = get_hif_device(func); + if (!device) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, ("get hif device failed\n")); + return QDF_STATUS_E_FAILURE; + } + + if (device->device_state == HIF_DEVICE_STATE_CUTPOWER) { + config = HIF_DEVICE_POWER_UP; + status = hif_configure_device(device, + HIF_DEVICE_POWER_STATE_CHANGE, + &config, + sizeof(enum + HIF_DEVICE_POWER_CHANGE_TYPE)); + if (status) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, + ("%s: hif_configure_device failed\n", + __func__)); + return status; + } + } else if (device->device_state == HIF_DEVICE_STATE_DEEPSLEEP) { + hif_un_mask_interrupt(device); + } else if (device->device_state == HIF_DEVICE_STATE_WOW) { + /*TODO:WOW support */ + hif_un_mask_interrupt(device); + } + + /* + * device_resume_handler do nothing now. If some operation + * should be added to this handler in power cut + * resume flow, do make sure those operation is not + * depent on what startup_task has done,or the resume + * flow will block. + */ + AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, + ("%s: +hif_device_resume\n", + __func__)); + if (device->claimed_ctx + && osdrv_callbacks.device_suspend_handler) { + status = + osdrv_callbacks.device_resume_handler(device->claimed_ctx); + device->is_suspend = false; + } + AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, + ("%s: -hif_device_resume\n", + __func__)); + device->device_state = HIF_DEVICE_STATE_ON; + + return QDF_IS_STATUS_SUCCESS(status) ? 0 : status; +} + +static void hif_device_removed(struct sdio_func *func) +{ + QDF_STATUS status = QDF_STATUS_SUCCESS; + struct hif_sdio_dev *device; + int i; + + AR_DEBUG_ASSERT(func != NULL); + HIF_ENTER(); + device = get_hif_device(func); + + if (device->power_config == HIF_DEVICE_POWER_CUT) { + device->func = NULL; /* func will be free by mmc stack */ + return; /* Just return for cut-off mode */ + } + for (i = 0; i < MAX_HIF_DEVICES; ++i) { + if (hif_devices[i] == device) + hif_devices[i] = NULL; + } + + if (device->claimed_ctx != NULL) + status = + osdrv_callbacks.device_removed_handler(device->claimed_ctx, + device); + + hif_mask_interrupt(device); + + if (device->is_disabled) + device->is_disabled = false; + else + status = hif_disable_func(device, func); + + + del_hif_device(device); + if (status != QDF_STATUS_SUCCESS) + AR_DEBUG_PRINTF(ATH_DEBUG_WARN, + ("%s: Unable to disable sdio func\n", + __func__)); + + HIF_EXIT(); +} + +/* + * This should be moved to AR6K HTC layer. + */ +QDF_STATUS hif_wait_for_pending_recv(struct hif_sdio_dev *device) +{ + int32_t cnt = 10; + uint8_t host_int_status; + QDF_STATUS status = QDF_STATUS_SUCCESS; + + do { + while (atomic_read(&device->irq_handling)) { + /* wait until irq handler finished all the jobs */ + schedule_timeout_interruptible(HZ / 10); + } + /* check if there is any pending irq due to force done */ + host_int_status = 0; + status = hif_read_write(device, HOST_INT_STATUS_ADDRESS, + (uint8_t *) &host_int_status, + sizeof(host_int_status), + HIF_RD_SYNC_BYTE_INC, NULL); + host_int_status = + QDF_IS_STATUS_SUCCESS(status) ? + (host_int_status & (1 << 0)) : 0; + if (host_int_status) + /* wait until irq handler finishs its job */ + schedule_timeout_interruptible(1); + } while (host_int_status && --cnt > 0); + + if (host_int_status && cnt == 0) + AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, + ("%s: Unable clear up pending IRQ\n", + __func__)); + + return QDF_STATUS_SUCCESS; +} + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 15, 0)) && \ + !defined(WITH_BACKPORTS) +/** + * hif_sdio_set_drvdata() - set driver data + * @func: pointer to sdio function + * @hifdevice: pointer to hif device + * + * Return: non zero for success. + */ +static inline int hif_sdio_set_drvdata(struct sdio_func *func, + struct hif_sdio_dev *hifdevice) +{ + return sdio_set_drvdata(func, hifdevice); +} +#else +static inline int hif_sdio_set_drvdata(struct sdio_func *func, + struct hif_sdio_dev *hifdevice) +{ + sdio_set_drvdata(func, hifdevice); + return 0; +} +#endif + +static struct hif_sdio_dev *add_hif_device(struct sdio_func *func) +{ + struct hif_sdio_dev *hifdevice = NULL; + int ret = 0; + + HIF_ENTER(); + AR_DEBUG_ASSERT(func != NULL); + hifdevice = (struct hif_sdio_dev *) qdf_mem_malloc(sizeof( + struct hif_sdio_dev)); + AR_DEBUG_ASSERT(hifdevice != NULL); + if (hifdevice == NULL) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, ("Alloc hif device fail\n")); + return NULL; + } +#if HIF_USE_DMA_BOUNCE_BUFFER + hifdevice->dma_buffer = qdf_mem_malloc(HIF_DMA_BUFFER_SIZE); + AR_DEBUG_ASSERT(hifdevice->dma_buffer != NULL); + if (hifdevice->dma_buffer == NULL) { + qdf_mem_free(hifdevice); + AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, ("Alloc dma buffer fail\n")); + return NULL; + } +#endif + hifdevice->func = func; + hifdevice->power_config = HIF_DEVICE_POWER_UP; + hifdevice->device_state = HIF_DEVICE_STATE_ON; + ret = hif_sdio_set_drvdata(func, hifdevice); + HIF_EXIT("status %d", ret); + + return hifdevice; +} + +static struct hif_sdio_dev *get_hif_device(struct sdio_func *func) +{ + AR_DEBUG_ASSERT(func != NULL); + + return (struct hif_sdio_dev *) sdio_get_drvdata(func); +} + +static void del_hif_device(struct hif_sdio_dev *device) +{ + AR_DEBUG_ASSERT(device != NULL); + AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, + ("%s: deleting hif device 0x%pK\n", + __func__, device)); + if (device->dma_buffer != NULL) + qdf_mem_free(device->dma_buffer); + + qdf_mem_free(device); +} + +static void reset_all_cards(void) +{ +} + +QDF_STATUS hif_attach_htc(struct hif_sdio_dev *device, + struct htc_callbacks *callbacks) +{ + if (device->htc_callbacks.context != NULL) + /* already in use! */ + return QDF_STATUS_E_FAILURE; + device->htc_callbacks = *callbacks; + + return QDF_STATUS_SUCCESS; +} + +void hif_detach_htc(struct hif_opaque_softc *hif_ctx) +{ + struct hif_sdio_softc *scn = HIF_GET_SDIO_SOFTC(hif_ctx); + struct hif_sdio_dev *hif_device = scn->hif_handle; + + qdf_mem_zero(&hif_device->htc_callbacks, + sizeof(hif_device->htc_callbacks)); +} + +#define SDIO_SET_CMD52_ARG(arg, rw, func, raw, address, writedata) \ + ((arg) = (((rw) & 1) << 31) | \ + ((func & 0x7) << 28) | \ + (((raw) & 1) << 27) | \ + (1 << 26) | \ + (((address) & 0x1FFFF) << 9) | \ + (1 << 8) | \ + ((writedata) & 0xFF)) + +#define SDIO_SET_CMD52_READ_ARG(arg, func, address) \ + SDIO_SET_CMD52_ARG(arg, 0, (func), 0, address, 0x00) +#define SDIO_SET_CMD52_WRITE_ARG(arg, func, address, value) \ + SDIO_SET_CMD52_ARG(arg, 1, (func), 0, address, value) + +static int func0_cmd52_write_byte(struct mmc_card *card, + unsigned int address, + unsigned char byte) +{ + struct mmc_command io_cmd; + unsigned long arg; + int status = 0; + + memset(&io_cmd, 0, sizeof(io_cmd)); + SDIO_SET_CMD52_WRITE_ARG(arg, 0, address, byte); + io_cmd.opcode = SD_IO_RW_DIRECT; + io_cmd.arg = arg; + io_cmd.flags = MMC_RSP_R5 | MMC_CMD_AC; + status = mmc_wait_for_cmd(card->host, &io_cmd, 0); + + if (status) + AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, + ("%s: mmc_wait_for_cmd returned %d\n", + __func__, status)); + + return status; +} + +static int func0_cmd52_read_byte(struct mmc_card *card, + unsigned int address, + unsigned char *byte) +{ + struct mmc_command io_cmd; + unsigned long arg; + int32_t err; + + memset(&io_cmd, 0, sizeof(io_cmd)); + SDIO_SET_CMD52_READ_ARG(arg, 0, address); + io_cmd.opcode = SD_IO_RW_DIRECT; + io_cmd.arg = arg; + io_cmd.flags = MMC_RSP_R5 | MMC_CMD_AC; + + err = mmc_wait_for_cmd(card->host, &io_cmd, 0); + + if ((!err) && (byte)) + *byte = io_cmd.resp[0] & 0xFF; + + if (err) + AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, + ("%s: mmc_wait_for_cmd returned %d\n", + __func__, err)); + + return err; +} + +void hif_dump_cccr(struct hif_sdio_dev *hif_device) +{ + int i; + uint8_t cccr_val; + uint32_t err; + + if (!hif_device || !hif_device->func || + !hif_device->func->card) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, + ("hif_dump_cccr incorrect input arguments\n")); + return; + } + + AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, ("hif_dump_cccr ")); + for (i = 0; i <= 0x16; i++) { + err = func0_cmd52_read_byte(hif_device->func->card, + i, &cccr_val); + if (err) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, + ("Reading CCCR 0x%02X failed: %d\n", + (unsigned int)i, (unsigned int)err)); + } else { + AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, + ("%X(%X) ", (unsigned int)i, + (unsigned int)cccr_val)); + } + } + + AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, ("\n")); +} + +int hif_sdio_device_inserted(struct device *dev, + const struct sdio_device_id *id) +{ + struct sdio_func *func = dev_to_sdio_func(dev); + + return hif_device_inserted(func, id); +} + +void hif_sdio_device_removed(struct sdio_func *func) +{ + hif_device_removed(func); +} diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/sdio/native_sdio/src/hif_scatter.c b/drivers/staging/qca-wifi-host-cmn/hif/src/sdio/native_sdio/src/hif_scatter.c new file mode 100644 index 0000000000000000000000000000000000000000..fb135b39288785e7a42775fa03c5071d0186168c --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/sdio/native_sdio/src/hif_scatter.c @@ -0,0 +1,477 @@ +/* + * Copyright (c) 2013-2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include +#include "hif_internal.h" +#include +#include "dl_list.h" +#define ATH_MODULE_NAME hif +#include "a_debug.h" + +#ifdef HIF_LINUX_MMC_SCATTER_SUPPORT + +#define _CMD53_ARG_READ 0 +#define _CMD53_ARG_WRITE 1 +#define _CMD53_ARG_BLOCK_BASIS 1 +#define _CMD53_ARG_FIXED_ADDRESS 0 +#define _CMD53_ARG_INCR_ADDRESS 1 + +#define SDIO_SET_CMD53_ARG(arg, rw, func, mode, opcode, address, bytes_blocks) \ + ((arg) = (((rw) & 1) << 31) | \ + ((func & 0x7) << 28) | \ + (((mode) & 1) << 27) | \ + (((opcode) & 1) << 26) | \ + (((address) & 0x1FFFF) << 9) | \ + ((bytes_blocks) & 0x1FF)) + +/** + * free_scatter_req() - free scattered request. + * @device: hif device context + * @pReq: scatter list node + * + * Return: none + */ +static void free_scatter_req(struct hif_sdio_dev *device, + struct _HIF_SCATTER_REQ *pReq) +{ + qdf_spin_lock_irqsave(&device->lock); + + dl_list_insert_tail(&device->scatter_req_head, &pReq->list_link); + + qdf_spin_unlock_irqrestore(&device->lock); +} + +/** + * alloc_scatter_req() - allocate scattered request. + * @device: hif device context + * + * + * Return: pointer to allocated scatter list node + */ +static struct _HIF_SCATTER_REQ *alloc_scatter_req(struct hif_sdio_dev *device) +{ + DL_LIST *item; + + qdf_spin_lock_irqsave(&device->lock); + + item = dl_list_remove_item_from_head(&device->scatter_req_head); + + qdf_spin_unlock_irqrestore(&device->lock); + + if (item != NULL) + return A_CONTAINING_STRUCT(item, + struct _HIF_SCATTER_REQ, list_link); + + return NULL; +} + +/** + * do_hif_read_write_scatter() - rd/wr scattered operation. + * @device: hif device context + * @busrequest: rd/wr bus request + * + * called by async task to perform the operation synchronously + * using direct MMC APIs + * Return: int + */ +QDF_STATUS do_hif_read_write_scatter(struct hif_sdio_dev *device, + struct bus_request *busrequest) +{ + int i; + uint8_t rw; + uint8_t opcode; + struct mmc_request mmcreq; + struct mmc_command cmd; + struct mmc_data data; + struct HIF_SCATTER_REQ_PRIV *req_priv; + struct _HIF_SCATTER_REQ *req; + QDF_STATUS status = QDF_STATUS_SUCCESS; + struct scatterlist *sg; + + HIF_ENTER(); + + req_priv = busrequest->scatter_req; + + A_ASSERT(req_priv != NULL); + if (!req_priv) { + return QDF_STATUS_E_FAILURE; + } + + req = req_priv->hif_scatter_req; + + memset(&mmcreq, 0, sizeof(struct mmc_request)); + memset(&cmd, 0, sizeof(struct mmc_command)); + memset(&data, 0, sizeof(struct mmc_data)); + + data.blksz = HIF_MBOX_BLOCK_SIZE; + data.blocks = req->total_length / HIF_MBOX_BLOCK_SIZE; + + AR_DEBUG_PRINTF(ATH_DEBUG_SCATTER, + ("HIF-SCATTER: (%s) Address: 0x%X, (BlockLen: %d, BlockCount: %d), (tot:%d,sg:%d)\n", + (req->request & HIF_SDIO_WRITE) ? "WRITE" : "READ", + req->address, data.blksz, data.blocks, + req->total_length, req->valid_scatter_entries)); + + if (req->request & HIF_SDIO_WRITE) { + rw = _CMD53_ARG_WRITE; + data.flags = MMC_DATA_WRITE; + } else { + rw = _CMD53_ARG_READ; + data.flags = MMC_DATA_READ; + } + + if (req->request & HIF_FIXED_ADDRESS) + opcode = _CMD53_ARG_FIXED_ADDRESS; + else + opcode = _CMD53_ARG_INCR_ADDRESS; + + /* fill SG entries */ + sg = req_priv->sgentries; + sg_init_table(sg, req->valid_scatter_entries); + + /* assemble SG list */ + for (i = 0; i < req->valid_scatter_entries; i++, sg++) { + /* setup each sg entry */ + if ((unsigned long)req->scatter_list[i].buffer & 0x3) { + /* note some scatter engines can handle unaligned + * buffers, print this as informational only + */ + AR_DEBUG_PRINTF(ATH_DEBUG_SCATTER, + ("HIF: (%s) Scatter Buf is unaligned 0x%lx\n", + req-> + request & HIF_SDIO_WRITE ? "WRITE" : "READ", + (unsigned long)req->scatter_list[i]. + buffer)); + } + + AR_DEBUG_PRINTF(ATH_DEBUG_SCATTER, + (" %d: Addr:0x%lX, Len:%d\n", i, + (unsigned long)req->scatter_list[i].buffer, + req->scatter_list[i].length)); + + sg_set_buf(sg, req->scatter_list[i].buffer, + req->scatter_list[i].length); + } + /* set scatter-gather table for request */ + data.sg = req_priv->sgentries; + data.sg_len = req->valid_scatter_entries; + /* set command argument */ + SDIO_SET_CMD53_ARG(cmd.arg, + rw, + device->func->num, + _CMD53_ARG_BLOCK_BASIS, + opcode, req->address, data.blocks); + + cmd.opcode = SD_IO_RW_EXTENDED; + cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC; + + mmcreq.cmd = &cmd; + mmcreq.data = &data; + + mmc_set_data_timeout(&data, device->func->card); + /* synchronous call to process request */ + mmc_wait_for_req(device->func->card->host, &mmcreq); + + if (cmd.error) { + status = QDF_STATUS_E_FAILURE; + AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, + ("HIF-SCATTER: cmd error: %d\n", cmd.error)); + } + + if (data.error) { + status = QDF_STATUS_E_FAILURE; + AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, + ("HIF-SCATTER: data error: %d\n", data.error)); + } + + if (QDF_IS_STATUS_ERROR(status)) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, + ("HIF-SCATTER: FAILED!!! (%s) Address: 0x%X, Block mode (BlockLen: %d, BlockCount: %d)\n", + (req->request & HIF_SDIO_WRITE) ? "WRITE" : "READ", + req->address, data.blksz, data.blocks)); + } + + /* set completion status, fail or success */ + req->completion_status = status; + + if (req->request & HIF_ASYNCHRONOUS) { + AR_DEBUG_PRINTF(ATH_DEBUG_SCATTER, + ("HIF-SCATTER: async_task completion routine req: 0x%lX (%d)\n", + (unsigned long)busrequest, status)); + /* complete the request */ + A_ASSERT(req->completion_routine != NULL); + if (req->completion_routine) { + req->completion_routine(req); + } + } else { + AR_DEBUG_PRINTF(ATH_DEBUG_SCATTER, + ("HIF-SCATTER async_task upping busreq : 0x%lX (%d)\n", + (unsigned long)busrequest, status)); + /* signal wait */ + up(&busrequest->sem_req); + } + HIF_EXIT(); + + return status; +} + +/** + * alloc_scatter_req() - callback to issue a read-write + * scatter request. + * @device: hif device context + * @pReq: rd/wr scatter request + * + * Return: int + */ +static QDF_STATUS hif_read_write_scatter(struct hif_sdio_dev *device, + struct _HIF_SCATTER_REQ *req) +{ + QDF_STATUS status = QDF_STATUS_E_INVAL; + uint32_t request = req->request; + struct HIF_SCATTER_REQ_PRIV *req_priv = + (struct HIF_SCATTER_REQ_PRIV *) req->hif_private[0]; + + do { + + A_ASSERT(req_priv != NULL); + if (!req_priv) { + break; + } + + AR_DEBUG_PRINTF(ATH_DEBUG_SCATTER, + ("HIF-SCATTER: total len: %d Scatter Entries: %d\n", + req->total_length, + req->valid_scatter_entries)); + + if (!(request & HIF_EXTENDED_IO)) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, + ("HIF-SCATTER: Invalid command type: 0x%08x\n", + request)); + break; + } + + if (!(request & (HIF_SYNCHRONOUS | HIF_ASYNCHRONOUS))) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, + ("HIF-SCATTER: Invalid mode: 0x%08x\n", + request)); + break; + } + + if (!(request & HIF_BLOCK_BASIS)) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, + ("HIF-SCATTER: Invalid data mode: 0x%08x\n", + request)); + break; + } + + if (req->total_length > MAX_SCATTER_REQ_TRANSFER_SIZE) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, + ("HIF-SCATTER: Invalid length: %d\n", + req->total_length)); + break; + } + + if (req->total_length == 0) { + A_ASSERT(false); + break; + } + + /* add bus request to the async list for the async + * I/O thread to process + */ + add_to_async_list(device, req_priv->busrequest); + + if (request & HIF_SYNCHRONOUS) { + AR_DEBUG_PRINTF(ATH_DEBUG_SCATTER, + ("HIF-SCATTER: queued sync req: 0x%lX\n", + (unsigned long)req_priv->busrequest)); + /* signal thread and wait */ + up(&device->sem_async); + if (down_interruptible(&req_priv->busrequest->sem_req) + != 0) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, + ("HIF-SCATTER: interrupted!\n")); + /* interrupted, exit */ + status = QDF_STATUS_E_FAILURE; + break; + } + status = req->completion_status; + } else { + AR_DEBUG_PRINTF(ATH_DEBUG_SCATTER, + ("HIF-SCATTER: queued async req: 0x%lX\n", + (unsigned long)req_priv->busrequest)); + /* wake thread, it will process and then take + * care of the async callback + */ + up(&device->sem_async); + status = QDF_STATUS_SUCCESS; + } + + } while (false); + + if (QDF_IS_STATUS_ERROR(status) && (request & HIF_ASYNCHRONOUS)) { + req->completion_status = status; + req->completion_routine(req); + status = QDF_STATUS_SUCCESS; + } + + return status; +} + +/** + * setup_hif_scatter_support() - setup of HIF scatter resources + * scatter request. + * @device: hif device context + * @pInfo: scatter info + * + * Return: int + */ +QDF_STATUS setup_hif_scatter_support(struct hif_sdio_dev *device, + struct HIF_DEVICE_SCATTER_SUPPORT_INFO *info) +{ + QDF_STATUS status = QDF_STATUS_E_FAILURE; + int i; + struct HIF_SCATTER_REQ_PRIV *req_priv; + struct bus_request *busrequest; + + if (device->func->card->host->max_segs < + MAX_SCATTER_ENTRIES_PER_REQ) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("host only supports scatter of : %d entries, need: %d\n", + device->func->card->host->max_segs, + MAX_SCATTER_ENTRIES_PER_REQ)); + status = QDF_STATUS_E_NOSUPPORT; + goto end; + } + + AR_DEBUG_PRINTF(ATH_DEBUG_ANY, + ("max scatter req : %d entries: %d\n", + MAX_SCATTER_REQUESTS, + MAX_SCATTER_ENTRIES_PER_REQ)); + + for (i = 0; i < MAX_SCATTER_REQUESTS; i++) { + /* allocate the private request blob */ + req_priv = + (struct HIF_SCATTER_REQ_PRIV *) + qdf_mem_malloc(sizeof( + struct HIF_SCATTER_REQ_PRIV)); + if (NULL == req_priv) + goto end; + /* save the device instance */ + req_priv->device = device; + /* allocate the scatter request */ + req_priv->hif_scatter_req = + (struct _HIF_SCATTER_REQ *) + qdf_mem_malloc(sizeof(struct _HIF_SCATTER_REQ) + + (MAX_SCATTER_ENTRIES_PER_REQ - + 1) * (sizeof(struct _HIF_SCATTER_ITEM))); + + if (NULL == req_priv->hif_scatter_req) { + qdf_mem_free(req_priv); + goto end; + } + /* back pointer to the private struct */ + req_priv->hif_scatter_req->hif_private[0] = req_priv; + /* allocate a bus request for this scatter request */ + busrequest = hif_allocate_bus_request(device); + if (NULL == busrequest) { + qdf_mem_free(req_priv->hif_scatter_req); + qdf_mem_free(req_priv); + goto end; + } + /* assign the scatter request to this bus request */ + busrequest->scatter_req = req_priv; + /* point back to the request */ + req_priv->busrequest = busrequest; + /* req_priv it to the scatter pool */ + free_scatter_req(device, req_priv->hif_scatter_req); + } + + if (i != MAX_SCATTER_REQUESTS) { + status = QDF_STATUS_E_NOMEM; + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("failed to alloc scatter resources !\n")); + goto end; + } + + /* set scatter function pointers */ + info->allocate_req_func = alloc_scatter_req; + info->free_req_func = free_scatter_req; + info->read_write_scatter_func = hif_read_write_scatter; + info->max_scatter_entries = MAX_SCATTER_ENTRIES_PER_REQ; + info->max_tx_size_per_scatter_req = + MAX_SCATTER_REQ_TRANSFER_SIZE; + + status = QDF_STATUS_SUCCESS; + +end: + if (QDF_IS_STATUS_ERROR(status)) + cleanup_hif_scatter_resources(device); + + return status; +} + +/** + * cleanup_hif_scatter_resources() - cleanup HIF scatter resources + * scatter request. + * @device: hif device context + * + * + * Return: none + */ +void cleanup_hif_scatter_resources(struct hif_sdio_dev *device) +{ + struct HIF_SCATTER_REQ_PRIV *req_priv; + struct _HIF_SCATTER_REQ *req; + + /* empty the free list */ + + while (true) { + req = alloc_scatter_req(device); + + if (NULL == req) + break; + + req_priv = (struct HIF_SCATTER_REQ_PRIV *)req->hif_private[0]; + A_ASSERT(req_priv != NULL); + if (!req_priv) { + continue; + } + + if (req_priv->busrequest != NULL) { + req_priv->busrequest->scatter_req = NULL; + /* free bus request */ + hif_free_bus_request(device, req_priv->busrequest); + req_priv->busrequest = NULL; + } + + if (req_priv->hif_scatter_req != NULL) { + qdf_mem_free(req_priv->hif_scatter_req); + req_priv->hif_scatter_req = NULL; + } + + qdf_mem_free(req_priv); + } +} + +#endif /* HIF_LINUX_MMC_SCATTER_SUPPORT */ diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/sdio/regtable_sdio.c b/drivers/staging/qca-wifi-host-cmn/hif/src/sdio/regtable_sdio.c new file mode 100644 index 0000000000000000000000000000000000000000..540c003e3cd5fc21f2ca189164a12c618fa8d92b --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/sdio/regtable_sdio.c @@ -0,0 +1,69 @@ +/* + * Copyright (c) 2013-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "bmi_msg.h" +#include "target_type.h" +#include "cepci.h" + +#define MISSING 0 +#include "regtable_sdio.h" +#include "targaddrs.h" +#include "if_sdio.h" +#include "ar9888def.h" +#include "ar6320def.h" +#include "ar6320v2def.h" + +void target_register_tbl_attach(struct hif_softc *scn, u32 target_type) +{ + switch (target_type) { + case TARGET_TYPE_AR9888: + scn->targetdef = &ar9888_targetdef; + break; + case TARGET_TYPE_AR6320: + scn->targetdef = &ar6320_targetdef; + break; + case TARGET_TYPE_AR6320V2: + scn->targetdef = &ar6320v2_targetdef; + break; + default: + break; + } +} + +void hif_register_tbl_attach(struct hif_softc *scn, u32 hif_type) +{ + if (NULL == scn) { + QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_ERROR, + "%s: sc is NULL", __func__); + return; + } + + switch (hif_type) { + case HIF_TYPE_AR9888: + scn->hostdef = &ar9888_hostdef; + break; + case HIF_TYPE_AR6320: + scn->hostdef = &ar6320_hostdef; + break; + case HIF_TYPE_AR6320V2: + scn->hostdef = &ar6320v2_hostdef; + break; + default: + break; + } +} diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/sdio/regtable_sdio.h b/drivers/staging/qca-wifi-host-cmn/hif/src/sdio/regtable_sdio.h new file mode 100644 index 0000000000000000000000000000000000000000..185d11bbf79c1ef701f0c32a61c993ecf99240f3 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/sdio/regtable_sdio.h @@ -0,0 +1,877 @@ +/* + * Copyright (c) 2013-2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _REGTABLE_SDIO_H_ +#define _REGTABLE_SDIO_H_ + +#define MISSING 0 +extern struct hif_sdio_softc *scn; + +struct targetdef_s { + uint32_t d_RTC_SOC_BASE_ADDRESS; + uint32_t d_RTC_WMAC_BASE_ADDRESS; + uint32_t d_SYSTEM_SLEEP_OFFSET; + uint32_t d_WLAN_SYSTEM_SLEEP_OFFSET; + uint32_t d_WLAN_SYSTEM_SLEEP_DISABLE_LSB; + uint32_t d_WLAN_SYSTEM_SLEEP_DISABLE_MASK; + uint32_t d_CLOCK_CONTROL_OFFSET; + uint32_t d_CLOCK_CONTROL_SI0_CLK_MASK; + uint32_t d_RESET_CONTROL_OFFSET; + uint32_t d_RESET_CONTROL_MBOX_RST_MASK; + uint32_t d_RESET_CONTROL_SI0_RST_MASK; + uint32_t d_WLAN_RESET_CONTROL_OFFSET; + uint32_t d_WLAN_RESET_CONTROL_COLD_RST_MASK; + uint32_t d_WLAN_RESET_CONTROL_WARM_RST_MASK; + uint32_t d_GPIO_BASE_ADDRESS; + uint32_t d_GPIO_PIN0_OFFSET; + uint32_t d_GPIO_PIN1_OFFSET; + uint32_t d_GPIO_PIN0_CONFIG_MASK; + uint32_t d_GPIO_PIN1_CONFIG_MASK; + uint32_t d_SI_CONFIG_BIDIR_OD_DATA_LSB; + uint32_t d_SI_CONFIG_BIDIR_OD_DATA_MASK; + uint32_t d_SI_CONFIG_I2C_LSB; + uint32_t d_SI_CONFIG_I2C_MASK; + uint32_t d_SI_CONFIG_POS_SAMPLE_LSB; + uint32_t d_SI_CONFIG_POS_SAMPLE_MASK; + uint32_t d_SI_CONFIG_INACTIVE_CLK_LSB; + uint32_t d_SI_CONFIG_INACTIVE_CLK_MASK; + uint32_t d_SI_CONFIG_INACTIVE_DATA_LSB; + uint32_t d_SI_CONFIG_INACTIVE_DATA_MASK; + uint32_t d_SI_CONFIG_DIVIDER_LSB; + uint32_t d_SI_CONFIG_DIVIDER_MASK; + uint32_t d_SI_BASE_ADDRESS; + uint32_t d_SI_CONFIG_OFFSET; + uint32_t d_SI_TX_DATA0_OFFSET; + uint32_t d_SI_TX_DATA1_OFFSET; + uint32_t d_SI_RX_DATA0_OFFSET; + uint32_t d_SI_RX_DATA1_OFFSET; + uint32_t d_SI_CS_OFFSET; + uint32_t d_SI_CS_DONE_ERR_MASK; + uint32_t d_SI_CS_DONE_INT_MASK; + uint32_t d_SI_CS_START_LSB; + uint32_t d_SI_CS_START_MASK; + uint32_t d_SI_CS_RX_CNT_LSB; + uint32_t d_SI_CS_RX_CNT_MASK; + uint32_t d_SI_CS_TX_CNT_LSB; + uint32_t d_SI_CS_TX_CNT_MASK; + uint32_t d_BOARD_DATA_SZ; + uint32_t d_BOARD_EXT_DATA_SZ; + uint32_t d_MBOX_BASE_ADDRESS; + uint32_t d_LOCAL_SCRATCH_OFFSET; + uint32_t d_CPU_CLOCK_OFFSET; + uint32_t d_LPO_CAL_OFFSET; + uint32_t d_GPIO_PIN10_OFFSET; + uint32_t d_GPIO_PIN11_OFFSET; + uint32_t d_GPIO_PIN12_OFFSET; + uint32_t d_GPIO_PIN13_OFFSET; + uint32_t d_CLOCK_GPIO_OFFSET; + uint32_t d_CPU_CLOCK_STANDARD_LSB; + uint32_t d_CPU_CLOCK_STANDARD_MASK; + uint32_t d_LPO_CAL_ENABLE_LSB; + uint32_t d_LPO_CAL_ENABLE_MASK; + uint32_t d_CLOCK_GPIO_BT_CLK_OUT_EN_LSB; + uint32_t d_CLOCK_GPIO_BT_CLK_OUT_EN_MASK; + uint32_t d_ANALOG_INTF_BASE_ADDRESS; + uint32_t d_WLAN_MAC_BASE_ADDRESS; + uint32_t d_FW_INDICATOR_ADDRESS; + uint32_t d_DRAM_BASE_ADDRESS; + uint32_t d_SOC_CORE_BASE_ADDRESS; + uint32_t d_CORE_CTRL_ADDRESS; + uint32_t d_MSI_NUM_REQUEST; + uint32_t d_MSI_ASSIGN_FW; + uint32_t d_CORE_CTRL_CPU_INTR_MASK; + uint32_t d_SR_WR_INDEX_ADDRESS; + uint32_t d_DST_WATERMARK_ADDRESS; + + /* htt_rx.c */ + uint32_t d_RX_MSDU_END_4_FIRST_MSDU_MASK; + uint32_t d_RX_MSDU_END_4_FIRST_MSDU_LSB; + uint32_t d_RX_MPDU_START_0_RETRY_LSB; + uint32_t d_RX_MPDU_START_0_RETRY_MASK; + uint32_t d_RX_MPDU_START_0_SEQ_NUM_MASK; + uint32_t d_RX_MPDU_START_0_SEQ_NUM_LSB; + uint32_t d_RX_MPDU_START_2_PN_47_32_LSB; + uint32_t d_RX_MPDU_START_2_PN_47_32_MASK; + uint32_t d_RX_MPDU_START_2_TID_LSB; + uint32_t d_RX_MPDU_START_2_TID_MASK; + uint32_t d_RX_MSDU_END_1_EXT_WAPI_PN_63_48_MASK; + uint32_t d_RX_MSDU_END_1_EXT_WAPI_PN_63_48_LSB; + uint32_t d_RX_MSDU_END_1_KEY_ID_OCT_MASK; + uint32_t d_RX_MSDU_END_1_KEY_ID_OCT_LSB; + uint32_t d_RX_MSDU_END_4_LAST_MSDU_MASK; + uint32_t d_RX_MSDU_END_4_LAST_MSDU_LSB; + uint32_t d_RX_ATTENTION_0_MCAST_BCAST_MASK; + uint32_t d_RX_ATTENTION_0_MCAST_BCAST_LSB; + uint32_t d_RX_ATTENTION_0_FRAGMENT_MASK; + uint32_t d_RX_ATTENTION_0_FRAGMENT_LSB; + uint32_t d_RX_ATTENTION_0_MPDU_LENGTH_ERR_MASK; + uint32_t d_RX_FRAG_INFO_0_RING2_MORE_COUNT_MASK; + uint32_t d_RX_FRAG_INFO_0_RING2_MORE_COUNT_LSB; + uint32_t d_RX_MSDU_START_0_MSDU_LENGTH_MASK; + uint32_t d_RX_MSDU_START_0_MSDU_LENGTH_LSB; + uint32_t d_RX_MSDU_START_2_DECAP_FORMAT_OFFSET; + uint32_t d_RX_MSDU_START_2_DECAP_FORMAT_MASK; + uint32_t d_RX_MSDU_START_2_DECAP_FORMAT_LSB; + uint32_t d_RX_MPDU_START_0_ENCRYPTED_MASK; + uint32_t d_RX_MPDU_START_0_ENCRYPTED_LSB; + uint32_t d_RX_ATTENTION_0_MORE_DATA_MASK; + uint32_t d_RX_ATTENTION_0_MSDU_DONE_MASK; + uint32_t d_RX_ATTENTION_0_TCP_UDP_CHKSUM_FAIL_MASK; + /* end */ + + /* PLL start */ + uint32_t d_EFUSE_OFFSET; + uint32_t d_EFUSE_XTAL_SEL_MSB; + uint32_t d_EFUSE_XTAL_SEL_LSB; + uint32_t d_EFUSE_XTAL_SEL_MASK; + uint32_t d_BB_PLL_CONFIG_OFFSET; + uint32_t d_BB_PLL_CONFIG_OUTDIV_MSB; + uint32_t d_BB_PLL_CONFIG_OUTDIV_LSB; + uint32_t d_BB_PLL_CONFIG_OUTDIV_MASK; + uint32_t d_BB_PLL_CONFIG_FRAC_MSB; + uint32_t d_BB_PLL_CONFIG_FRAC_LSB; + uint32_t d_BB_PLL_CONFIG_FRAC_MASK; + uint32_t d_WLAN_PLL_SETTLE_TIME_MSB; + uint32_t d_WLAN_PLL_SETTLE_TIME_LSB; + uint32_t d_WLAN_PLL_SETTLE_TIME_MASK; + uint32_t d_WLAN_PLL_SETTLE_OFFSET; + uint32_t d_WLAN_PLL_SETTLE_SW_MASK; + uint32_t d_WLAN_PLL_SETTLE_RSTMASK; + uint32_t d_WLAN_PLL_SETTLE_RESET; + uint32_t d_WLAN_PLL_CONTROL_NOPWD_MSB; + uint32_t d_WLAN_PLL_CONTROL_NOPWD_LSB; + uint32_t d_WLAN_PLL_CONTROL_NOPWD_MASK; + uint32_t d_WLAN_PLL_CONTROL_BYPASS_MSB; + uint32_t d_WLAN_PLL_CONTROL_BYPASS_LSB; + uint32_t d_WLAN_PLL_CONTROL_BYPASS_MASK; + uint32_t d_WLAN_PLL_CONTROL_BYPASS_RESET; + uint32_t d_WLAN_PLL_CONTROL_CLK_SEL_MSB; + uint32_t d_WLAN_PLL_CONTROL_CLK_SEL_LSB; + uint32_t d_WLAN_PLL_CONTROL_CLK_SEL_MASK; + uint32_t d_WLAN_PLL_CONTROL_CLK_SEL_RESET; + uint32_t d_WLAN_PLL_CONTROL_REFDIV_MSB; + uint32_t d_WLAN_PLL_CONTROL_REFDIV_LSB; + uint32_t d_WLAN_PLL_CONTROL_REFDIV_MASK; + uint32_t d_WLAN_PLL_CONTROL_REFDIV_RESET; + uint32_t d_WLAN_PLL_CONTROL_DIV_MSB; + uint32_t d_WLAN_PLL_CONTROL_DIV_LSB; + uint32_t d_WLAN_PLL_CONTROL_DIV_MASK; + uint32_t d_WLAN_PLL_CONTROL_DIV_RESET; + uint32_t d_WLAN_PLL_CONTROL_OFFSET; + uint32_t d_WLAN_PLL_CONTROL_SW_MASK; + uint32_t d_WLAN_PLL_CONTROL_RSTMASK; + uint32_t d_WLAN_PLL_CONTROL_RESET; + uint32_t d_SOC_CORE_CLK_CTRL_OFFSET; + uint32_t d_SOC_CORE_CLK_CTRL_DIV_MSB; + uint32_t d_SOC_CORE_CLK_CTRL_DIV_LSB; + uint32_t d_SOC_CORE_CLK_CTRL_DIV_MASK; + uint32_t d_RTC_SYNC_STATUS_PLL_CHANGING_MSB; + uint32_t d_RTC_SYNC_STATUS_PLL_CHANGING_LSB; + uint32_t d_RTC_SYNC_STATUS_PLL_CHANGING_MASK; + uint32_t d_RTC_SYNC_STATUS_PLL_CHANGING_RESET; + uint32_t d_RTC_SYNC_STATUS_OFFSET; + uint32_t d_SOC_CPU_CLOCK_OFFSET; + uint32_t d_SOC_CPU_CLOCK_STANDARD_MSB; + uint32_t d_SOC_CPU_CLOCK_STANDARD_LSB; + uint32_t d_SOC_CPU_CLOCK_STANDARD_MASK; + /* PLL end */ + + uint32_t d_SOC_POWER_REG_OFFSET; + uint32_t d_SOC_RESET_CONTROL_ADDRESS; + uint32_t d_SOC_RESET_CONTROL_CPU_WARM_RST_MASK; + uint32_t d_CPU_INTR_ADDRESS; + uint32_t d_SOC_LF_TIMER_CONTROL0_ADDRESS; + uint32_t d_SOC_LF_TIMER_CONTROL0_ENABLE_MASK; + + /* chip id start */ + uint32_t d_SOC_CHIP_ID_ADDRESS; + uint32_t d_SOC_CHIP_ID_VERSION_MASK; + uint32_t d_SOC_CHIP_ID_VERSION_LSB; + uint32_t d_SOC_CHIP_ID_REVISION_MASK; + uint32_t d_SOC_CHIP_ID_REVISION_LSB; + /* chip id end */ + + uint32_t d_A_SOC_CORE_SCRATCH_0_ADDRESS; + uint32_t d_A_SOC_CORE_SCRATCH_1_ADDRESS; + uint32_t d_A_SOC_CORE_SCRATCH_2_ADDRESS; + uint32_t d_A_SOC_CORE_SCRATCH_3_ADDRESS; + uint32_t d_A_SOC_CORE_SCRATCH_4_ADDRESS; + uint32_t d_A_SOC_CORE_SCRATCH_5_ADDRESS; + uint32_t d_A_SOC_CORE_SCRATCH_6_ADDRESS; + uint32_t d_A_SOC_CORE_SCRATCH_7_ADDRESS; + uint32_t d_A_SOC_CORE_SPARE_0_REGISTER; + uint32_t d_A_SOC_CORE_SPARE_1_REGISTER; + + uint32_t d_WLAN_DEBUG_INPUT_SEL_OFFSET; + uint32_t d_WLAN_DEBUG_INPUT_SEL_SRC_MSB; + uint32_t d_WLAN_DEBUG_INPUT_SEL_SRC_LSB; + uint32_t d_WLAN_DEBUG_INPUT_SEL_SRC_MASK; + uint32_t d_WLAN_DEBUG_CONTROL_OFFSET; + uint32_t d_WLAN_DEBUG_CONTROL_ENABLE_MSB; + uint32_t d_WLAN_DEBUG_CONTROL_ENABLE_LSB; + uint32_t d_WLAN_DEBUG_CONTROL_ENABLE_MASK; + uint32_t d_WLAN_DEBUG_OUT_OFFSET; + uint32_t d_WLAN_DEBUG_OUT_DATA_MSB; + uint32_t d_WLAN_DEBUG_OUT_DATA_LSB; + uint32_t d_WLAN_DEBUG_OUT_DATA_MASK; + uint32_t d_AMBA_DEBUG_BUS_OFFSET; + uint32_t d_AMBA_DEBUG_BUS_SEL_MSB; + uint32_t d_AMBA_DEBUG_BUS_SEL_LSB; + uint32_t d_AMBA_DEBUG_BUS_SEL_MASK; + +#ifdef QCA_WIFI_3_0_ADRASTEA + uint32_t d_Q6_ENABLE_REGISTER_0; + uint32_t d_Q6_ENABLE_REGISTER_1; + uint32_t d_Q6_CAUSE_REGISTER_0; + uint32_t d_Q6_CAUSE_REGISTER_1; + uint32_t d_Q6_CLEAR_REGISTER_0; + uint32_t d_Q6_CLEAR_REGISTER_1; +#endif +}; + +#define A_SOC_CORE_SPARE_0_REGISTER \ + (scn->targetdef->d_A_SOC_CORE_SPARE_0_REGISTER) +#define A_SOC_CORE_SCRATCH_0_ADDRESS \ + (scn->targetdef->d_A_SOC_CORE_SCRATCH_0_ADDRESS) +#define A_SOC_CORE_SCRATCH_1_ADDRESS \ + (scn->targetdef->d_A_SOC_CORE_SCRATCH_1_ADDRESS) +#define A_SOC_CORE_SCRATCH_2_ADDRESS \ + (scn->targetdef->d_A_SOC_CORE_SCRATCH_2_ADDRESS) +#define A_SOC_CORE_SCRATCH_3_ADDRESS \ + (scn->targetdef->d_A_SOC_CORE_SCRATCH_3_ADDRESS) +#define A_SOC_CORE_SCRATCH_4_ADDRESS \ + (scn->targetdef->d_A_SOC_CORE_SCRATCH_4_ADDRESS) +#define A_SOC_CORE_SCRATCH_5_ADDRESS \ + (scn->targetdef->d_A_SOC_CORE_SCRATCH_5_ADDRESS) +#define A_SOC_CORE_SCRATCH_6_ADDRESS \ + (scn->targetdef->d_A_SOC_CORE_SCRATCH_6_ADDRESS) +#define A_SOC_CORE_SCRATCH_7_ADDRESS \ + (scn->targetdef->d_A_SOC_CORE_SCRATCH_7_ADDRESS) +#define RTC_SOC_BASE_ADDRESS (scn->targetdef->d_RTC_SOC_BASE_ADDRESS) +#define RTC_WMAC_BASE_ADDRESS (scn->targetdef->d_RTC_WMAC_BASE_ADDRESS) +#define SYSTEM_SLEEP_OFFSET (scn->targetdef->d_SYSTEM_SLEEP_OFFSET) +#define WLAN_SYSTEM_SLEEP_OFFSET \ + (scn->targetdef->d_WLAN_SYSTEM_SLEEP_OFFSET) +#define WLAN_SYSTEM_SLEEP_DISABLE_LSB \ + (scn->targetdef->d_WLAN_SYSTEM_SLEEP_DISABLE_LSB) +#define WLAN_SYSTEM_SLEEP_DISABLE_MASK \ + (scn->targetdef->d_WLAN_SYSTEM_SLEEP_DISABLE_MASK) +#define CLOCK_CONTROL_OFFSET (scn->targetdef->d_CLOCK_CONTROL_OFFSET) +#define CLOCK_CONTROL_SI0_CLK_MASK \ + (scn->targetdef->d_CLOCK_CONTROL_SI0_CLK_MASK) +#define RESET_CONTROL_OFFSET (scn->targetdef->d_RESET_CONTROL_OFFSET) +#define RESET_CONTROL_MBOX_RST_MASK \ + (scn->targetdef->d_RESET_CONTROL_MBOX_RST_MASK) +#define RESET_CONTROL_SI0_RST_MASK \ + (scn->targetdef->d_RESET_CONTROL_SI0_RST_MASK) +#define WLAN_RESET_CONTROL_OFFSET \ + (scn->targetdef->d_WLAN_RESET_CONTROL_OFFSET) +#define WLAN_RESET_CONTROL_COLD_RST_MASK \ + (scn->targetdef->d_WLAN_RESET_CONTROL_COLD_RST_MASK) +#define WLAN_RESET_CONTROL_WARM_RST_MASK \ + (scn->targetdef->d_WLAN_RESET_CONTROL_WARM_RST_MASK) +#define GPIO_BASE_ADDRESS (scn->targetdef->d_GPIO_BASE_ADDRESS) +#define GPIO_PIN0_OFFSET (scn->targetdef->d_GPIO_PIN0_OFFSET) +#define GPIO_PIN1_OFFSET (scn->targetdef->d_GPIO_PIN1_OFFSET) +#define GPIO_PIN0_CONFIG_MASK (scn->targetdef->d_GPIO_PIN0_CONFIG_MASK) +#define GPIO_PIN1_CONFIG_MASK (scn->targetdef->d_GPIO_PIN1_CONFIG_MASK) +#define A_SOC_CORE_SCRATCH_0 (scn->targetdef->d_A_SOC_CORE_SCRATCH_0) +#define SI_CONFIG_BIDIR_OD_DATA_LSB \ + (scn->targetdef->d_SI_CONFIG_BIDIR_OD_DATA_LSB) +#define SI_CONFIG_BIDIR_OD_DATA_MASK \ + (scn->targetdef->d_SI_CONFIG_BIDIR_OD_DATA_MASK) +#define SI_CONFIG_I2C_LSB (scn->targetdef->d_SI_CONFIG_I2C_LSB) +#define SI_CONFIG_I2C_MASK \ + (scn->targetdef->d_SI_CONFIG_I2C_MASK) +#define SI_CONFIG_POS_SAMPLE_LSB \ + (scn->targetdef->d_SI_CONFIG_POS_SAMPLE_LSB) +#define SI_CONFIG_POS_SAMPLE_MASK \ + (scn->targetdef->d_SI_CONFIG_POS_SAMPLE_MASK) +#define SI_CONFIG_INACTIVE_CLK_LSB \ + (scn->targetdef->d_SI_CONFIG_INACTIVE_CLK_LSB) +#define SI_CONFIG_INACTIVE_CLK_MASK \ + (scn->targetdef->d_SI_CONFIG_INACTIVE_CLK_MASK) +#define SI_CONFIG_INACTIVE_DATA_LSB \ + (scn->targetdef->d_SI_CONFIG_INACTIVE_DATA_LSB) +#define SI_CONFIG_INACTIVE_DATA_MASK \ + (scn->targetdef->d_SI_CONFIG_INACTIVE_DATA_MASK) +#define SI_CONFIG_DIVIDER_LSB (scn->targetdef->d_SI_CONFIG_DIVIDER_LSB) +#define SI_CONFIG_DIVIDER_MASK (scn->targetdef->d_SI_CONFIG_DIVIDER_MASK) +#define SI_BASE_ADDRESS (scn->targetdef->d_SI_BASE_ADDRESS) +#define SI_CONFIG_OFFSET (scn->targetdef->d_SI_CONFIG_OFFSET) +#define SI_TX_DATA0_OFFSET (scn->targetdef->d_SI_TX_DATA0_OFFSET) +#define SI_TX_DATA1_OFFSET (scn->targetdef->d_SI_TX_DATA1_OFFSET) +#define SI_RX_DATA0_OFFSET (scn->targetdef->d_SI_RX_DATA0_OFFSET) +#define SI_RX_DATA1_OFFSET (scn->targetdef->d_SI_RX_DATA1_OFFSET) +#define SI_CS_OFFSET (scn->targetdef->d_SI_CS_OFFSET) +#define SI_CS_DONE_ERR_MASK (scn->targetdef->d_SI_CS_DONE_ERR_MASK) +#define SI_CS_DONE_INT_MASK (scn->targetdef->d_SI_CS_DONE_INT_MASK) +#define SI_CS_START_LSB (scn->targetdef->d_SI_CS_START_LSB) +#define SI_CS_START_MASK (scn->targetdef->d_SI_CS_START_MASK) +#define SI_CS_RX_CNT_LSB (scn->targetdef->d_SI_CS_RX_CNT_LSB) +#define SI_CS_RX_CNT_MASK (scn->targetdef->d_SI_CS_RX_CNT_MASK) +#define SI_CS_TX_CNT_LSB (scn->targetdef->d_SI_CS_TX_CNT_LSB) +#define SI_CS_TX_CNT_MASK (scn->targetdef->d_SI_CS_TX_CNT_MASK) +#define EEPROM_SZ (scn->targetdef->d_BOARD_DATA_SZ) +#define EEPROM_EXT_SZ (scn->targetdef->d_BOARD_EXT_DATA_SZ) +#define MBOX_BASE_ADDRESS (scn->targetdef->d_MBOX_BASE_ADDRESS) +#define LOCAL_SCRATCH_OFFSET (scn->targetdef->d_LOCAL_SCRATCH_OFFSET) +#define CPU_CLOCK_OFFSET (scn->targetdef->d_CPU_CLOCK_OFFSET) +#define LPO_CAL_OFFSET (scn->targetdef->d_LPO_CAL_OFFSET) +#define GPIO_PIN10_OFFSET (scn->targetdef->d_GPIO_PIN10_OFFSET) +#define GPIO_PIN11_OFFSET (scn->targetdef->d_GPIO_PIN11_OFFSET) +#define GPIO_PIN12_OFFSET (scn->targetdef->d_GPIO_PIN12_OFFSET) +#define GPIO_PIN13_OFFSET (scn->targetdef->d_GPIO_PIN13_OFFSET) +#define CLOCK_GPIO_OFFSET (scn->targetdef->d_CLOCK_GPIO_OFFSET) +#define CPU_CLOCK_STANDARD_LSB (scn->targetdef->d_CPU_CLOCK_STANDARD_LSB) +#define CPU_CLOCK_STANDARD_MASK (scn->targetdef->d_CPU_CLOCK_STANDARD_MASK) +#define LPO_CAL_ENABLE_LSB (scn->targetdef->d_LPO_CAL_ENABLE_LSB) +#define LPO_CAL_ENABLE_MASK (scn->targetdef->d_LPO_CAL_ENABLE_MASK) +#define CLOCK_GPIO_BT_CLK_OUT_EN_LSB \ + (scn->targetdef->d_CLOCK_GPIO_BT_CLK_OUT_EN_LSB) +#define CLOCK_GPIO_BT_CLK_OUT_EN_MASK \ + (scn->targetdef->d_CLOCK_GPIO_BT_CLK_OUT_EN_MASK) +#define ANALOG_INTF_BASE_ADDRESS (scn->targetdef->d_ANALOG_INTF_BASE_ADDRESS) +#define WLAN_MAC_BASE_ADDRESS (scn->targetdef->d_WLAN_MAC_BASE_ADDRESS) +#define FW_INDICATOR_ADDRESS (scn->targetdef->d_FW_INDICATOR_ADDRESS) +#define DRAM_BASE_ADDRESS (scn->targetdef->d_DRAM_BASE_ADDRESS) +#define SOC_CORE_BASE_ADDRESS (scn->targetdef->d_SOC_CORE_BASE_ADDRESS) +#define CORE_CTRL_ADDRESS (scn->targetdef->d_CORE_CTRL_ADDRESS) +#define CORE_CTRL_CPU_INTR_MASK (scn->targetdef->d_CORE_CTRL_CPU_INTR_MASK) +#define SOC_RESET_CONTROL_ADDRESS (scn->targetdef->d_SOC_RESET_CONTROL_ADDRESS) +#define SOC_RESET_CONTROL_CPU_WARM_RST_MASK \ + (scn->targetdef->d_SOC_RESET_CONTROL_CPU_WARM_RST_MASK) +#define CPU_INTR_ADDRESS (scn->targetdef->d_CPU_INTR_ADDRESS) +#define SOC_LF_TIMER_CONTROL0_ADDRESS \ + (scn->targetdef->d_SOC_LF_TIMER_CONTROL0_ADDRESS) +#define SOC_LF_TIMER_CONTROL0_ENABLE_MASK \ + (scn->targetdef->d_SOC_LF_TIMER_CONTROL0_ENABLE_MASK) + + +#define CHIP_ID_ADDRESS (scn->targetdef->d_SOC_CHIP_ID_ADDRESS) +#define SOC_CHIP_ID_REVISION_MASK (scn->targetdef->d_SOC_CHIP_ID_REVISION_MASK) +#define SOC_CHIP_ID_REVISION_LSB (scn->targetdef->d_SOC_CHIP_ID_REVISION_LSB) +#define SOC_CHIP_ID_VERSION_MASK (scn->targetdef->d_SOC_CHIP_ID_VERSION_MASK) +#define SOC_CHIP_ID_VERSION_LSB (scn->targetdef->d_SOC_CHIP_ID_VERSION_LSB) +#define CHIP_ID_REVISION_GET(x) \ + (((x) & SOC_CHIP_ID_REVISION_MASK) >> SOC_CHIP_ID_REVISION_LSB) +#define CHIP_ID_VERSION_GET(x) \ + (((x) & SOC_CHIP_ID_VERSION_MASK) >> SOC_CHIP_ID_VERSION_LSB) + +/* misc */ +#define SR_WR_INDEX_ADDRESS (scn->targetdef->d_SR_WR_INDEX_ADDRESS) +#define DST_WATERMARK_ADDRESS (scn->targetdef->d_DST_WATERMARK_ADDRESS) +#define SOC_POWER_REG_OFFSET (scn->targetdef->d_SOC_POWER_REG_OFFSET) +/* end */ + +/* htt_rx.c */ +#define RX_MSDU_END_4_FIRST_MSDU_MASK \ + (pdev->targetdef->d_RX_MSDU_END_4_FIRST_MSDU_MASK) +#define RX_MSDU_END_4_FIRST_MSDU_LSB \ + (pdev->targetdef->d_RX_MSDU_END_4_FIRST_MSDU_LSB) +#define RX_MPDU_START_0_RETRY_LSB \ + (pdev->targetdef->d_RX_MPDU_START_0_RETRY_LSB) +#define RX_MPDU_START_0_RETRY_MASK \ + (pdev->targetdef->d_RX_MPDU_START_0_RETRY_MASK) +#define RX_MPDU_START_0_SEQ_NUM_MASK \ + (pdev->targetdef->d_RX_MPDU_START_0_SEQ_NUM_MASK) +#define RX_MPDU_START_0_SEQ_NUM_LSB \ + (pdev->targetdef->d_RX_MPDU_START_0_SEQ_NUM_LSB) +#define RX_MPDU_START_2_PN_47_32_LSB \ + (pdev->targetdef->d_RX_MPDU_START_2_PN_47_32_LSB) +#define RX_MPDU_START_2_PN_47_32_MASK \ + (pdev->targetdef->d_RX_MPDU_START_2_PN_47_32_MASK) +#define RX_MPDU_START_2_TID_LSB \ + (pdev->targetdef->d_RX_MPDU_START_2_TID_LSB) +#define RX_MPDU_START_2_TID_MASK \ + (pdev->targetdef->d_RX_MPDU_START_2_TID_MASK) +#define RX_MSDU_END_1_KEY_ID_OCT_MASK \ + (pdev->targetdef->d_RX_MSDU_END_1_KEY_ID_OCT_MASK) +#define RX_MSDU_END_1_KEY_ID_OCT_LSB \ + (pdev->targetdef->d_RX_MSDU_END_1_KEY_ID_OCT_LSB) +#define RX_MSDU_END_1_EXT_WAPI_PN_63_48_MASK \ + (pdev->targetdef->d_RX_MSDU_END_1_EXT_WAPI_PN_63_48_MASK) +#define RX_MSDU_END_1_EXT_WAPI_PN_63_48_LSB \ + (pdev->targetdef->d_RX_MSDU_END_1_EXT_WAPI_PN_63_48_LSB) +#define RX_MSDU_END_4_LAST_MSDU_MASK \ + (pdev->targetdef->d_RX_MSDU_END_4_LAST_MSDU_MASK) +#define RX_MSDU_END_4_LAST_MSDU_LSB \ + (pdev->targetdef->d_RX_MSDU_END_4_LAST_MSDU_LSB) +#define RX_ATTENTION_0_MCAST_BCAST_MASK \ + (pdev->targetdef->d_RX_ATTENTION_0_MCAST_BCAST_MASK) +#define RX_ATTENTION_0_MCAST_BCAST_LSB \ + (pdev->targetdef->d_RX_ATTENTION_0_MCAST_BCAST_LSB) +#define RX_ATTENTION_0_FRAGMENT_MASK \ + (pdev->targetdef->d_RX_ATTENTION_0_FRAGMENT_MASK) +#define RX_ATTENTION_0_FRAGMENT_LSB \ + (pdev->targetdef->d_RX_ATTENTION_0_FRAGMENT_LSB) +#define RX_ATTENTION_0_MPDU_LENGTH_ERR_MASK \ + (pdev->targetdef->d_RX_ATTENTION_0_MPDU_LENGTH_ERR_MASK) +#define RX_FRAG_INFO_0_RING2_MORE_COUNT_MASK \ + (pdev->targetdef->d_RX_FRAG_INFO_0_RING2_MORE_COUNT_MASK) +#define RX_FRAG_INFO_0_RING2_MORE_COUNT_LSB \ + (pdev->targetdef->d_RX_FRAG_INFO_0_RING2_MORE_COUNT_LSB) +#define RX_MSDU_START_0_MSDU_LENGTH_MASK \ + (pdev->targetdef->d_RX_MSDU_START_0_MSDU_LENGTH_MASK) +#define RX_MSDU_START_0_MSDU_LENGTH_LSB \ + (pdev->targetdef->d_RX_MSDU_START_0_MSDU_LENGTH_LSB) +#define RX_MSDU_START_2_DECAP_FORMAT_OFFSET \ + (pdev->targetdef->d_RX_MSDU_START_2_DECAP_FORMAT_OFFSET) +#define RX_MSDU_START_2_DECAP_FORMAT_MASK \ + (pdev->targetdef->d_RX_MSDU_START_2_DECAP_FORMAT_MASK) +#define RX_MSDU_START_2_DECAP_FORMAT_LSB \ + (pdev->targetdef->d_RX_MSDU_START_2_DECAP_FORMAT_LSB) +#define RX_MPDU_START_0_ENCRYPTED_MASK \ + (pdev->targetdef->d_RX_MPDU_START_0_ENCRYPTED_MASK) +#define RX_MPDU_START_0_ENCRYPTED_LSB \ + (pdev->targetdef->d_RX_MPDU_START_0_ENCRYPTED_LSB) +#define RX_ATTENTION_0_MORE_DATA_MASK \ + (pdev->targetdef->d_RX_ATTENTION_0_MORE_DATA_MASK) +#define RX_ATTENTION_0_MSDU_DONE_MASK \ + (pdev->targetdef->d_RX_ATTENTION_0_MSDU_DONE_MASK) +#define RX_ATTENTION_0_TCP_UDP_CHKSUM_FAIL_MASK \ + (pdev->targetdef->d_RX_ATTENTION_0_TCP_UDP_CHKSUM_FAIL_MASK) +/* end */ + +/* copy_engine.c */ +/* end */ +/* PLL start */ +#define EFUSE_OFFSET (scn->targetdef->d_EFUSE_OFFSET) +#define EFUSE_XTAL_SEL_MSB (scn->targetdef->d_EFUSE_XTAL_SEL_MSB) +#define EFUSE_XTAL_SEL_LSB (scn->targetdef->d_EFUSE_XTAL_SEL_LSB) +#define EFUSE_XTAL_SEL_MASK (scn->targetdef->d_EFUSE_XTAL_SEL_MASK) +#define BB_PLL_CONFIG_OFFSET (scn->targetdef->d_BB_PLL_CONFIG_OFFSET) +#define BB_PLL_CONFIG_OUTDIV_MSB (scn->targetdef->d_BB_PLL_CONFIG_OUTDIV_MSB) +#define BB_PLL_CONFIG_OUTDIV_LSB (scn->targetdef->d_BB_PLL_CONFIG_OUTDIV_LSB) +#define BB_PLL_CONFIG_OUTDIV_MASK (scn->targetdef->d_BB_PLL_CONFIG_OUTDIV_MASK) +#define BB_PLL_CONFIG_FRAC_MSB (scn->targetdef->d_BB_PLL_CONFIG_FRAC_MSB) +#define BB_PLL_CONFIG_FRAC_LSB (scn->targetdef->d_BB_PLL_CONFIG_FRAC_LSB) +#define BB_PLL_CONFIG_FRAC_MASK (scn->targetdef->d_BB_PLL_CONFIG_FRAC_MASK) +#define WLAN_PLL_SETTLE_TIME_MSB (scn->targetdef->d_WLAN_PLL_SETTLE_TIME_MSB) +#define WLAN_PLL_SETTLE_TIME_LSB (scn->targetdef->d_WLAN_PLL_SETTLE_TIME_LSB) +#define WLAN_PLL_SETTLE_TIME_MASK (scn->targetdef->d_WLAN_PLL_SETTLE_TIME_MASK) +#define WLAN_PLL_SETTLE_OFFSET (scn->targetdef->d_WLAN_PLL_SETTLE_OFFSET) +#define WLAN_PLL_SETTLE_SW_MASK (scn->targetdef->d_WLAN_PLL_SETTLE_SW_MASK) +#define WLAN_PLL_SETTLE_RSTMASK (scn->targetdef->d_WLAN_PLL_SETTLE_RSTMASK) +#define WLAN_PLL_SETTLE_RESET (scn->targetdef->d_WLAN_PLL_SETTLE_RESET) +#define WLAN_PLL_CONTROL_NOPWD_MSB \ + (scn->targetdef->d_WLAN_PLL_CONTROL_NOPWD_MSB) +#define WLAN_PLL_CONTROL_NOPWD_LSB \ + (scn->targetdef->d_WLAN_PLL_CONTROL_NOPWD_LSB) +#define WLAN_PLL_CONTROL_NOPWD_MASK \ + (scn->targetdef->d_WLAN_PLL_CONTROL_NOPWD_MASK) +#define WLAN_PLL_CONTROL_BYPASS_MSB \ + (scn->targetdef->d_WLAN_PLL_CONTROL_BYPASS_MSB) +#define WLAN_PLL_CONTROL_BYPASS_LSB \ + (scn->targetdef->d_WLAN_PLL_CONTROL_BYPASS_LSB) +#define WLAN_PLL_CONTROL_BYPASS_MASK \ + (scn->targetdef->d_WLAN_PLL_CONTROL_BYPASS_MASK) +#define WLAN_PLL_CONTROL_BYPASS_RESET \ + (scn->targetdef->d_WLAN_PLL_CONTROL_BYPASS_RESET) +#define WLAN_PLL_CONTROL_CLK_SEL_MSB \ + (scn->targetdef->d_WLAN_PLL_CONTROL_CLK_SEL_MSB) +#define WLAN_PLL_CONTROL_CLK_SEL_LSB \ + (scn->targetdef->d_WLAN_PLL_CONTROL_CLK_SEL_LSB) +#define WLAN_PLL_CONTROL_CLK_SEL_MASK \ + (scn->targetdef->d_WLAN_PLL_CONTROL_CLK_SEL_MASK) +#define WLAN_PLL_CONTROL_CLK_SEL_RESET \ + (scn->targetdef->d_WLAN_PLL_CONTROL_CLK_SEL_RESET) +#define WLAN_PLL_CONTROL_REFDIV_MSB \ + (scn->targetdef->d_WLAN_PLL_CONTROL_REFDIV_MSB) +#define WLAN_PLL_CONTROL_REFDIV_LSB \ + (scn->targetdef->d_WLAN_PLL_CONTROL_REFDIV_LSB) +#define WLAN_PLL_CONTROL_REFDIV_MASK \ + (scn->targetdef->d_WLAN_PLL_CONTROL_REFDIV_MASK) +#define WLAN_PLL_CONTROL_REFDIV_RESET \ + (scn->targetdef->d_WLAN_PLL_CONTROL_REFDIV_RESET) +#define WLAN_PLL_CONTROL_DIV_MSB (scn->targetdef->d_WLAN_PLL_CONTROL_DIV_MSB) +#define WLAN_PLL_CONTROL_DIV_LSB (scn->targetdef->d_WLAN_PLL_CONTROL_DIV_LSB) +#define WLAN_PLL_CONTROL_DIV_MASK (scn->targetdef->d_WLAN_PLL_CONTROL_DIV_MASK) +#define WLAN_PLL_CONTROL_DIV_RESET \ + (scn->targetdef->d_WLAN_PLL_CONTROL_DIV_RESET) +#define WLAN_PLL_CONTROL_OFFSET (scn->targetdef->d_WLAN_PLL_CONTROL_OFFSET) +#define WLAN_PLL_CONTROL_SW_MASK (scn->targetdef->d_WLAN_PLL_CONTROL_SW_MASK) +#define WLAN_PLL_CONTROL_RSTMASK (scn->targetdef->d_WLAN_PLL_CONTROL_RSTMASK) +#define WLAN_PLL_CONTROL_RESET (scn->targetdef->d_WLAN_PLL_CONTROL_RESET) +#define SOC_CORE_CLK_CTRL_OFFSET (scn->targetdef->d_SOC_CORE_CLK_CTRL_OFFSET) +#define SOC_CORE_CLK_CTRL_DIV_MSB (scn->targetdef->d_SOC_CORE_CLK_CTRL_DIV_MSB) +#define SOC_CORE_CLK_CTRL_DIV_LSB (scn->targetdef->d_SOC_CORE_CLK_CTRL_DIV_LSB) +#define SOC_CORE_CLK_CTRL_DIV_MASK \ + (scn->targetdef->d_SOC_CORE_CLK_CTRL_DIV_MASK) +#define RTC_SYNC_STATUS_PLL_CHANGING_MSB \ + (scn->targetdef->d_RTC_SYNC_STATUS_PLL_CHANGING_MSB) +#define RTC_SYNC_STATUS_PLL_CHANGING_LSB \ + (scn->targetdef->d_RTC_SYNC_STATUS_PLL_CHANGING_LSB) +#define RTC_SYNC_STATUS_PLL_CHANGING_MASK \ + (scn->targetdef->d_RTC_SYNC_STATUS_PLL_CHANGING_MASK) +#define RTC_SYNC_STATUS_PLL_CHANGING_RESET \ + (scn->targetdef->d_RTC_SYNC_STATUS_PLL_CHANGING_RESET) +#define RTC_SYNC_STATUS_OFFSET (scn->targetdef->d_RTC_SYNC_STATUS_OFFSET) +#define SOC_CPU_CLOCK_OFFSET (scn->targetdef->d_SOC_CPU_CLOCK_OFFSET) +#define SOC_CPU_CLOCK_STANDARD_MSB \ + (scn->targetdef->d_SOC_CPU_CLOCK_STANDARD_MSB) +#define SOC_CPU_CLOCK_STANDARD_LSB \ + (scn->targetdef->d_SOC_CPU_CLOCK_STANDARD_LSB) +#define SOC_CPU_CLOCK_STANDARD_MASK \ + (scn->targetdef->d_SOC_CPU_CLOCK_STANDARD_MASK) +/* PLL end */ + +/* SET macros */ +#define WLAN_SYSTEM_SLEEP_DISABLE_SET(x) \ + (((x) << WLAN_SYSTEM_SLEEP_DISABLE_LSB) & \ + WLAN_SYSTEM_SLEEP_DISABLE_MASK) +#define SI_CONFIG_BIDIR_OD_DATA_SET(x) \ + (((x) << SI_CONFIG_BIDIR_OD_DATA_LSB) & SI_CONFIG_BIDIR_OD_DATA_MASK) +#define SI_CONFIG_I2C_SET(x) (((x) << SI_CONFIG_I2C_LSB) & SI_CONFIG_I2C_MASK) +#define SI_CONFIG_POS_SAMPLE_SET(x) \ + (((x) << SI_CONFIG_POS_SAMPLE_LSB) & SI_CONFIG_POS_SAMPLE_MASK) +#define SI_CONFIG_INACTIVE_CLK_SET(x) \ + (((x) << SI_CONFIG_INACTIVE_CLK_LSB) & SI_CONFIG_INACTIVE_CLK_MASK) +#define SI_CONFIG_INACTIVE_DATA_SET(x) \ + (((x) << SI_CONFIG_INACTIVE_DATA_LSB) & SI_CONFIG_INACTIVE_DATA_MASK) +#define SI_CONFIG_DIVIDER_SET(x) \ + (((x) << SI_CONFIG_DIVIDER_LSB) & SI_CONFIG_DIVIDER_MASK) +#define SI_CS_START_SET(x) (((x) << SI_CS_START_LSB) & SI_CS_START_MASK) +#define SI_CS_RX_CNT_SET(x) (((x) << SI_CS_RX_CNT_LSB) & SI_CS_RX_CNT_MASK) +#define SI_CS_TX_CNT_SET(x) (((x) << SI_CS_TX_CNT_LSB) & SI_CS_TX_CNT_MASK) +#define LPO_CAL_ENABLE_SET(x) \ + (((x) << LPO_CAL_ENABLE_LSB) & LPO_CAL_ENABLE_MASK) +#define CPU_CLOCK_STANDARD_SET(x) \ + (((x) << CPU_CLOCK_STANDARD_LSB) & CPU_CLOCK_STANDARD_MASK) +#define CLOCK_GPIO_BT_CLK_OUT_EN_SET(x) \ + (((x) << CLOCK_GPIO_BT_CLK_OUT_EN_LSB) & CLOCK_GPIO_BT_CLK_OUT_EN_MASK) +/* copy_engine.c */ +/* end */ +/* PLL start */ +#define EFUSE_XTAL_SEL_GET(x) \ + (((x) & EFUSE_XTAL_SEL_MASK) >> EFUSE_XTAL_SEL_LSB) +#define EFUSE_XTAL_SEL_SET(x) \ + (((x) << EFUSE_XTAL_SEL_LSB) & EFUSE_XTAL_SEL_MASK) +#define BB_PLL_CONFIG_OUTDIV_GET(x) \ + (((x) & BB_PLL_CONFIG_OUTDIV_MASK) >> BB_PLL_CONFIG_OUTDIV_LSB) +#define BB_PLL_CONFIG_OUTDIV_SET(x) \ + (((x) << BB_PLL_CONFIG_OUTDIV_LSB) & BB_PLL_CONFIG_OUTDIV_MASK) +#define BB_PLL_CONFIG_FRAC_GET(x) \ + (((x) & BB_PLL_CONFIG_FRAC_MASK) >> BB_PLL_CONFIG_FRAC_LSB) +#define BB_PLL_CONFIG_FRAC_SET(x) \ + (((x) << BB_PLL_CONFIG_FRAC_LSB) & BB_PLL_CONFIG_FRAC_MASK) +#define WLAN_PLL_SETTLE_TIME_GET(x) \ + (((x) & WLAN_PLL_SETTLE_TIME_MASK) >> WLAN_PLL_SETTLE_TIME_LSB) +#define WLAN_PLL_SETTLE_TIME_SET(x) \ + (((x) << WLAN_PLL_SETTLE_TIME_LSB) & WLAN_PLL_SETTLE_TIME_MASK) +#define WLAN_PLL_CONTROL_NOPWD_GET(x) \ + (((x) & WLAN_PLL_CONTROL_NOPWD_MASK) >> WLAN_PLL_CONTROL_NOPWD_LSB) +#define WLAN_PLL_CONTROL_NOPWD_SET(x) \ + (((x) << WLAN_PLL_CONTROL_NOPWD_LSB) & WLAN_PLL_CONTROL_NOPWD_MASK) +#define WLAN_PLL_CONTROL_BYPASS_GET(x) \ + (((x) & WLAN_PLL_CONTROL_BYPASS_MASK) >> WLAN_PLL_CONTROL_BYPASS_LSB) +#define WLAN_PLL_CONTROL_BYPASS_SET(x) \ + (((x) << WLAN_PLL_CONTROL_BYPASS_LSB) & WLAN_PLL_CONTROL_BYPASS_MASK) +#define WLAN_PLL_CONTROL_CLK_SEL_GET(x) \ + (((x) & WLAN_PLL_CONTROL_CLK_SEL_MASK) >> WLAN_PLL_CONTROL_CLK_SEL_LSB) +#define WLAN_PLL_CONTROL_CLK_SEL_SET(x) \ + (((x) << WLAN_PLL_CONTROL_CLK_SEL_LSB) & WLAN_PLL_CONTROL_CLK_SEL_MASK) +#define WLAN_PLL_CONTROL_REFDIV_GET(x) \ + (((x) & WLAN_PLL_CONTROL_REFDIV_MASK) >> WLAN_PLL_CONTROL_REFDIV_LSB) +#define WLAN_PLL_CONTROL_REFDIV_SET(x) \ + (((x) << WLAN_PLL_CONTROL_REFDIV_LSB) & WLAN_PLL_CONTROL_REFDIV_MASK) +#define WLAN_PLL_CONTROL_DIV_GET(x) \ + (((x) & WLAN_PLL_CONTROL_DIV_MASK) >> WLAN_PLL_CONTROL_DIV_LSB) +#define WLAN_PLL_CONTROL_DIV_SET(x) \ + (((x) << WLAN_PLL_CONTROL_DIV_LSB) & WLAN_PLL_CONTROL_DIV_MASK) +#define SOC_CORE_CLK_CTRL_DIV_GET(x) \ + (((x) & SOC_CORE_CLK_CTRL_DIV_MASK) >> SOC_CORE_CLK_CTRL_DIV_LSB) +#define SOC_CORE_CLK_CTRL_DIV_SET(x) \ + (((x) << SOC_CORE_CLK_CTRL_DIV_LSB) & SOC_CORE_CLK_CTRL_DIV_MASK) +#define RTC_SYNC_STATUS_PLL_CHANGING_GET(x) \ + (((x) & RTC_SYNC_STATUS_PLL_CHANGING_MASK) >> \ + RTC_SYNC_STATUS_PLL_CHANGING_LSB) +#define RTC_SYNC_STATUS_PLL_CHANGING_SET(x) \ + (((x) << RTC_SYNC_STATUS_PLL_CHANGING_LSB) & \ + RTC_SYNC_STATUS_PLL_CHANGING_MASK) +#define SOC_CPU_CLOCK_STANDARD_GET(x) \ + (((x) & SOC_CPU_CLOCK_STANDARD_MASK) >> SOC_CPU_CLOCK_STANDARD_LSB) +#define SOC_CPU_CLOCK_STANDARD_SET(x) \ + (((x) << SOC_CPU_CLOCK_STANDARD_LSB) & SOC_CPU_CLOCK_STANDARD_MASK) +/* PLL end */ + +#ifdef QCA_WIFI_3_0_ADRASTEA +#define Q6_ENABLE_REGISTER_0 \ + (scn->targetdef->d_Q6_ENABLE_REGISTER_0) +#define Q6_ENABLE_REGISTER_1 \ + (scn->targetdef->d_Q6_ENABLE_REGISTER_1) +#define Q6_CAUSE_REGISTER_0 \ + (scn->targetdef->d_Q6_CAUSE_REGISTER_0) +#define Q6_CAUSE_REGISTER_1 \ + (scn->targetdef->d_Q6_CAUSE_REGISTER_1) +#define Q6_CLEAR_REGISTER_0 \ + (scn->targetdef->d_Q6_CLEAR_REGISTER_0) +#define Q6_CLEAR_REGISTER_1 \ + (scn->targetdef->d_Q6_CLEAR_REGISTER_1) +#endif + +struct hostdef_s { + uint32_t d_INT_STATUS_ENABLE_ERROR_LSB; + uint32_t d_INT_STATUS_ENABLE_ERROR_MASK; + uint32_t d_INT_STATUS_ENABLE_CPU_LSB; + uint32_t d_INT_STATUS_ENABLE_CPU_MASK; + uint32_t d_INT_STATUS_ENABLE_COUNTER_LSB; + uint32_t d_INT_STATUS_ENABLE_COUNTER_MASK; + uint32_t d_INT_STATUS_ENABLE_MBOX_DATA_LSB; + uint32_t d_INT_STATUS_ENABLE_MBOX_DATA_MASK; + uint32_t d_ERROR_STATUS_ENABLE_RX_UNDERFLOW_LSB; + uint32_t d_ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK; + uint32_t d_ERROR_STATUS_ENABLE_TX_OVERFLOW_LSB; + uint32_t d_ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK; + uint32_t d_COUNTER_INT_STATUS_ENABLE_BIT_LSB; + uint32_t d_COUNTER_INT_STATUS_ENABLE_BIT_MASK; + uint32_t d_INT_STATUS_ENABLE_ADDRESS; + uint32_t d_CPU_INT_STATUS_ENABLE_BIT_LSB; + uint32_t d_CPU_INT_STATUS_ENABLE_BIT_MASK; + uint32_t d_HOST_INT_STATUS_ADDRESS; + uint32_t d_CPU_INT_STATUS_ADDRESS; + uint32_t d_ERROR_INT_STATUS_ADDRESS; + uint32_t d_ERROR_INT_STATUS_WAKEUP_MASK; + uint32_t d_ERROR_INT_STATUS_WAKEUP_LSB; + uint32_t d_ERROR_INT_STATUS_RX_UNDERFLOW_MASK; + uint32_t d_ERROR_INT_STATUS_RX_UNDERFLOW_LSB; + uint32_t d_ERROR_INT_STATUS_TX_OVERFLOW_MASK; + uint32_t d_ERROR_INT_STATUS_TX_OVERFLOW_LSB; + uint32_t d_COUNT_DEC_ADDRESS; + uint32_t d_HOST_INT_STATUS_CPU_MASK; + uint32_t d_HOST_INT_STATUS_CPU_LSB; + uint32_t d_HOST_INT_STATUS_ERROR_MASK; + uint32_t d_HOST_INT_STATUS_ERROR_LSB; + uint32_t d_HOST_INT_STATUS_COUNTER_MASK; + uint32_t d_HOST_INT_STATUS_COUNTER_LSB; + uint32_t d_RX_LOOKAHEAD_VALID_ADDRESS; + uint32_t d_WINDOW_DATA_ADDRESS; + uint32_t d_WINDOW_READ_ADDR_ADDRESS; + uint32_t d_WINDOW_WRITE_ADDR_ADDRESS; + uint32_t d_SOC_GLOBAL_RESET_ADDRESS; + uint32_t d_RTC_STATE_ADDRESS; + uint32_t d_RTC_STATE_COLD_RESET_MASK; + uint32_t d_RTC_STATE_V_MASK; + uint32_t d_RTC_STATE_V_LSB; + uint32_t d_FW_IND_EVENT_PENDING; + uint32_t d_FW_IND_INITIALIZED; + uint32_t d_FW_IND_HELPER; + uint32_t d_RTC_STATE_V_ON; +#if defined(SDIO_3_0) + uint32_t d_HOST_INT_STATUS_MBOX_DATA_MASK; + uint32_t d_HOST_INT_STATUS_MBOX_DATA_LSB; +#endif + uint32_t d_MSI_MAGIC_ADR_ADDRESS; + uint32_t d_MSI_MAGIC_ADDRESS; + uint32_t d_ENABLE_MSI; + uint32_t d_MUX_ID_MASK; + uint32_t d_TRANSACTION_ID_MASK; + uint32_t d_DESC_DATA_FLAG_MASK; +}; +#define DESC_DATA_FLAG_MASK (scn->hostdef->d_DESC_DATA_FLAG_MASK) +#define MUX_ID_MASK (scn->hostdef->d_MUX_ID_MASK) +#define TRANSACTION_ID_MASK (scn->hostdef->d_TRANSACTION_ID_MASK) +#define ENABLE_MSI (scn->hostdef->d_ENABLE_MSI) +#define INT_STATUS_ENABLE_ERROR_LSB \ + (scn->hostdef->d_INT_STATUS_ENABLE_ERROR_LSB) +#define INT_STATUS_ENABLE_ERROR_MASK \ + (scn->hostdef->d_INT_STATUS_ENABLE_ERROR_MASK) +#define INT_STATUS_ENABLE_CPU_LSB (scn->hostdef->d_INT_STATUS_ENABLE_CPU_LSB) +#define INT_STATUS_ENABLE_CPU_MASK (scn->hostdef->d_INT_STATUS_ENABLE_CPU_MASK) +#define INT_STATUS_ENABLE_COUNTER_LSB \ + (scn->hostdef->d_INT_STATUS_ENABLE_COUNTER_LSB) +#define INT_STATUS_ENABLE_COUNTER_MASK \ + (scn->hostdef->d_INT_STATUS_ENABLE_COUNTER_MASK) +#define INT_STATUS_ENABLE_MBOX_DATA_LSB \ + (scn->hostdef->d_INT_STATUS_ENABLE_MBOX_DATA_LSB) +#define INT_STATUS_ENABLE_MBOX_DATA_MASK \ + (scn->hostdef->d_INT_STATUS_ENABLE_MBOX_DATA_MASK) +#define ERROR_STATUS_ENABLE_RX_UNDERFLOW_LSB \ + (scn->hostdef->d_ERROR_STATUS_ENABLE_RX_UNDERFLOW_LSB) +#define ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK \ + (scn->hostdef->d_ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK) +#define ERROR_STATUS_ENABLE_TX_OVERFLOW_LSB \ + (scn->hostdef->d_ERROR_STATUS_ENABLE_TX_OVERFLOW_LSB) +#define ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK \ + (scn->hostdef->d_ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK) +#define COUNTER_INT_STATUS_ENABLE_BIT_LSB \ + (scn->hostdef->d_COUNTER_INT_STATUS_ENABLE_BIT_LSB) +#define COUNTER_INT_STATUS_ENABLE_BIT_MASK \ + (scn->hostdef->d_COUNTER_INT_STATUS_ENABLE_BIT_MASK) +#define INT_STATUS_ENABLE_ADDRESS \ + (scn->hostdef->d_INT_STATUS_ENABLE_ADDRESS) +#define CPU_INT_STATUS_ENABLE_BIT_LSB \ + (scn->hostdef->d_CPU_INT_STATUS_ENABLE_BIT_LSB) +#define CPU_INT_STATUS_ENABLE_BIT_MASK \ + (scn->hostdef->d_CPU_INT_STATUS_ENABLE_BIT_MASK) +#define HOST_INT_STATUS_ADDRESS (scn->hostdef->d_HOST_INT_STATUS_ADDRESS) +#define CPU_INT_STATUS_ADDRESS (scn->hostdef->d_CPU_INT_STATUS_ADDRESS) +#define ERROR_INT_STATUS_ADDRESS (scn->hostdef->d_ERROR_INT_STATUS_ADDRESS) +#define ERROR_INT_STATUS_WAKEUP_MASK \ + (scn->hostdef->d_ERROR_INT_STATUS_WAKEUP_MASK) +#define ERROR_INT_STATUS_WAKEUP_LSB \ + (scn->hostdef->d_ERROR_INT_STATUS_WAKEUP_LSB) +#define ERROR_INT_STATUS_RX_UNDERFLOW_MASK \ + (scn->hostdef->d_ERROR_INT_STATUS_RX_UNDERFLOW_MASK) +#define ERROR_INT_STATUS_RX_UNDERFLOW_LSB \ + (scn->hostdef->d_ERROR_INT_STATUS_RX_UNDERFLOW_LSB) +#define ERROR_INT_STATUS_TX_OVERFLOW_MASK \ + (scn->hostdef->d_ERROR_INT_STATUS_TX_OVERFLOW_MASK) +#define ERROR_INT_STATUS_TX_OVERFLOW_LSB \ + (scn->hostdef->d_ERROR_INT_STATUS_TX_OVERFLOW_LSB) +#define COUNT_DEC_ADDRESS (scn->hostdef->d_COUNT_DEC_ADDRESS) +#define HOST_INT_STATUS_CPU_MASK (scn->hostdef->d_HOST_INT_STATUS_CPU_MASK) +#define HOST_INT_STATUS_CPU_LSB (scn->hostdef->d_HOST_INT_STATUS_CPU_LSB) +#define HOST_INT_STATUS_ERROR_MASK (scn->hostdef->d_HOST_INT_STATUS_ERROR_MASK) +#define HOST_INT_STATUS_ERROR_LSB (scn->hostdef->d_HOST_INT_STATUS_ERROR_LSB) +#define HOST_INT_STATUS_COUNTER_MASK \ + (scn->hostdef->d_HOST_INT_STATUS_COUNTER_MASK) +#define HOST_INT_STATUS_COUNTER_LSB \ + (scn->hostdef->d_HOST_INT_STATUS_COUNTER_LSB) +#define RX_LOOKAHEAD_VALID_ADDRESS (scn->hostdef->d_RX_LOOKAHEAD_VALID_ADDRESS) +#define WINDOW_DATA_ADDRESS (scn->hostdef->d_WINDOW_DATA_ADDRESS) +#define WINDOW_READ_ADDR_ADDRESS (scn->hostdef->d_WINDOW_READ_ADDR_ADDRESS) +#define WINDOW_WRITE_ADDR_ADDRESS (scn->hostdef->d_WINDOW_WRITE_ADDR_ADDRESS) +#define SOC_GLOBAL_RESET_ADDRESS (scn->hostdef->d_SOC_GLOBAL_RESET_ADDRESS) +#define RTC_STATE_ADDRESS (scn->hostdef->d_RTC_STATE_ADDRESS) +#define RTC_STATE_COLD_RESET_MASK (scn->hostdef->d_RTC_STATE_COLD_RESET_MASK) +#define RTC_STATE_V_MASK (scn->hostdef->d_RTC_STATE_V_MASK) +#define RTC_STATE_V_LSB (scn->hostdef->d_RTC_STATE_V_LSB) +#define FW_IND_EVENT_PENDING (scn->hostdef->d_FW_IND_EVENT_PENDING) +#define FW_IND_INITIALIZED (scn->hostdef->d_FW_IND_INITIALIZED) +#define FW_IND_HELPER (scn->hostdef->d_FW_IND_HELPER) +#define RTC_STATE_V_ON (scn->hostdef->d_RTC_STATE_V_ON) +#if defined(SDIO_3_0) +#define HOST_INT_STATUS_MBOX_DATA_MASK \ + (scn->hostdef->d_HOST_INT_STATUS_MBOX_DATA_MASK) +#define HOST_INT_STATUS_MBOX_DATA_LSB \ + (scn->hostdef->d_HOST_INT_STATUS_MBOX_DATA_LSB) +#endif + +#if !defined(MSI_MAGIC_ADR_ADDRESS) +#define MSI_MAGIC_ADR_ADDRESS 0 +#define MSI_MAGIC_ADDRESS 0 +#endif + +/* SET/GET macros */ +#define INT_STATUS_ENABLE_ERROR_SET(x) \ + (((x) << INT_STATUS_ENABLE_ERROR_LSB) & INT_STATUS_ENABLE_ERROR_MASK) +#define INT_STATUS_ENABLE_CPU_SET(x) \ + (((x) << INT_STATUS_ENABLE_CPU_LSB) & INT_STATUS_ENABLE_CPU_MASK) +#define INT_STATUS_ENABLE_COUNTER_SET(x) \ + (((x) << INT_STATUS_ENABLE_COUNTER_LSB) & \ + INT_STATUS_ENABLE_COUNTER_MASK) +#define INT_STATUS_ENABLE_MBOX_DATA_SET(x) \ + (((x) << INT_STATUS_ENABLE_MBOX_DATA_LSB) & \ + INT_STATUS_ENABLE_MBOX_DATA_MASK) +#define CPU_INT_STATUS_ENABLE_BIT_SET(x) \ + (((x) << CPU_INT_STATUS_ENABLE_BIT_LSB) & \ + CPU_INT_STATUS_ENABLE_BIT_MASK) +#define ERROR_STATUS_ENABLE_RX_UNDERFLOW_SET(x) \ + (((x) << ERROR_STATUS_ENABLE_RX_UNDERFLOW_LSB) & \ + ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK) +#define ERROR_STATUS_ENABLE_TX_OVERFLOW_SET(x) \ + (((x) << ERROR_STATUS_ENABLE_TX_OVERFLOW_LSB) & \ + ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK) +#define COUNTER_INT_STATUS_ENABLE_BIT_SET(x) \ + (((x) << COUNTER_INT_STATUS_ENABLE_BIT_LSB) & \ + COUNTER_INT_STATUS_ENABLE_BIT_MASK) +#define ERROR_INT_STATUS_WAKEUP_GET(x) \ + (((x) & ERROR_INT_STATUS_WAKEUP_MASK) >> \ + ERROR_INT_STATUS_WAKEUP_LSB) +#define ERROR_INT_STATUS_RX_UNDERFLOW_GET(x) \ + (((x) & ERROR_INT_STATUS_RX_UNDERFLOW_MASK) >> \ + ERROR_INT_STATUS_RX_UNDERFLOW_LSB) +#define ERROR_INT_STATUS_TX_OVERFLOW_GET(x) \ + (((x) & ERROR_INT_STATUS_TX_OVERFLOW_MASK) >> \ + ERROR_INT_STATUS_TX_OVERFLOW_LSB) +#define HOST_INT_STATUS_CPU_GET(x) \ + (((x) & HOST_INT_STATUS_CPU_MASK) >> HOST_INT_STATUS_CPU_LSB) +#define HOST_INT_STATUS_ERROR_GET(x) \ + (((x) & HOST_INT_STATUS_ERROR_MASK) >> HOST_INT_STATUS_ERROR_LSB) +#define HOST_INT_STATUS_COUNTER_GET(x) \ + (((x) & HOST_INT_STATUS_COUNTER_MASK) >> HOST_INT_STATUS_COUNTER_LSB) +#define RTC_STATE_V_GET(x) \ + (((x) & RTC_STATE_V_MASK) >> RTC_STATE_V_LSB) +#if defined(SDIO_3_0) +#define HOST_INT_STATUS_MBOX_DATA_GET(x) \ + (((x) & HOST_INT_STATUS_MBOX_DATA_MASK) >> \ + HOST_INT_STATUS_MBOX_DATA_LSB) +#endif + +#define INVALID_REG_LOC_DUMMY_DATA 0xAA + +#define AR6320_CORE_CLK_DIV_ADDR 0x403fa8 +#define AR6320_CPU_PLL_INIT_DONE_ADDR 0x403fd0 +#define AR6320_CPU_SPEED_ADDR 0x403fa4 +#define AR6320V2_CORE_CLK_DIV_ADDR 0x403fd8 +#define AR6320V2_CPU_PLL_INIT_DONE_ADDR 0x403fd0 +#define AR6320V2_CPU_SPEED_ADDR 0x403fd4 +#define AR6320V3_CORE_CLK_DIV_ADDR 0x404028 +#define AR6320V3_CPU_PLL_INIT_DONE_ADDR 0x404020 +#define AR6320V3_CPU_SPEED_ADDR 0x404024 + +enum a_refclk_speed_t { + SOC_REFCLK_UNKNOWN = -1, /* Unsupported ref clock -- use PLL Bypass */ + SOC_REFCLK_48_MHZ = 0, + SOC_REFCLK_19_2_MHZ = 1, + SOC_REFCLK_24_MHZ = 2, + SOC_REFCLK_26_MHZ = 3, + SOC_REFCLK_37_4_MHZ = 4, + SOC_REFCLK_38_4_MHZ = 5, + SOC_REFCLK_40_MHZ = 6, + SOC_REFCLK_52_MHZ = 7, +}; + +#define A_REFCLK_UNKNOWN SOC_REFCLK_UNKNOWN +#define A_REFCLK_48_MHZ SOC_REFCLK_48_MHZ +#define A_REFCLK_19_2_MHZ SOC_REFCLK_19_2_MHZ +#define A_REFCLK_24_MHZ SOC_REFCLK_24_MHZ +#define A_REFCLK_26_MHZ SOC_REFCLK_26_MHZ +#define A_REFCLK_37_4_MHZ SOC_REFCLK_37_4_MHZ +#define A_REFCLK_38_4_MHZ SOC_REFCLK_38_4_MHZ +#define A_REFCLK_40_MHZ SOC_REFCLK_40_MHZ +#define A_REFCLK_52_MHZ SOC_REFCLK_52_MHZ + +#define TARGET_CPU_FREQ 176000000 + +struct wlan_pll_s { + uint32_t refdiv; + uint32_t div; + uint32_t rnfrac; + uint32_t outdiv; +}; + +struct cmnos_clock_s { + enum a_refclk_speed_t refclk_speed; + uint32_t refclk_hz; + uint32_t pll_settling_time; /* 50us */ + struct wlan_pll_s wlan_pll; +}; + +struct tgt_reg_section { + uint32_t start_addr; + uint32_t end_addr; +}; + + +struct tgt_reg_table { + const struct tgt_reg_section *section; + uint32_t section_size; +}; +#endif /* _REGTABLE_SDIO_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/snoc/hif_io32_snoc.h b/drivers/staging/qca-wifi-host-cmn/hif/src/snoc/hif_io32_snoc.h new file mode 100644 index 0000000000000000000000000000000000000000..d330f5ef62cf8f8ebee21bffdb0698d10a66ab0d --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/snoc/hif_io32_snoc.h @@ -0,0 +1,66 @@ +/* + * Copyright (c) 2015-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: hif_io32_snoc.h + * + * snoc specific implementations and configurations + */ + +#ifndef __HIF_IO32_SNOC_H__ +#define __HIF_IO32_SNOC_H__ + +#include "hif.h" +#include "regtable.h" +#include "ce_reg.h" +#include "qdf_atomic.h" +#include "hif_main.h" +#include "hif_debug.h" + +static inline void ce_enable_irq_in_individual_register(struct hif_softc *scn, + int ce_id) +{ + uint32_t offset; + + offset = HOST_IE_ADDRESS + CE_BASE_ADDRESS(ce_id); + if (!TARGET_REGISTER_ACCESS_ALLOWED(scn)) { + hif_err_rl("%s: target access is not allowed", __func__); + return; + } + hif_write32_mb(scn->mem + offset, 1); +} + +static inline void ce_disable_irq_in_individual_register(struct hif_softc *scn, + int ce_id) +{ + uint32_t offset; + + offset = HOST_IE_ADDRESS + CE_BASE_ADDRESS(ce_id); + if (!TARGET_REGISTER_ACCESS_ALLOWED(scn)) { + hif_err_rl("%s: target access is not allowed", __func__); + return; + } + hif_write32_mb(scn->mem + offset, 0); + + if (!TARGET_REGISTER_ACCESS_ALLOWED(scn)) { + hif_err_rl("%s: target access is not allowed", __func__); + return; + } + hif_read32_mb(scn->mem + offset); +} +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/snoc/if_ahb.c b/drivers/staging/qca-wifi-host-cmn/hif/src/snoc/if_ahb.c new file mode 100644 index 0000000000000000000000000000000000000000..ce1ae0fde31ef1b085032cdad335eb4a209e0e79 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/snoc/if_ahb.c @@ -0,0 +1,733 @@ +/* + * Copyright (c) 2013-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: if_ahb.c + * + * c file for ahb specific implementations. + */ + +#include "hif.h" +#include "target_type.h" +#include "hif_main.h" +#include "hif_debug.h" +#include "hif_io32.h" +#include "ce_main.h" +#include "ce_api.h" +#include "ce_tasklet.h" +#include "if_ahb.h" +#include "if_pci.h" +#include "ahb_api.h" +#include "pci_api.h" +#include "hif_napi.h" + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0) +#define IRQF_DISABLED 0x00000020 +#endif + +#define HIF_IC_CE0_IRQ_OFFSET 4 +#define HIF_IC_MAX_IRQ 54 + +static uint8_t ic_irqnum[HIF_IC_MAX_IRQ]; +/* integrated chip irq names */ +const char *ic_irqname[HIF_IC_MAX_IRQ] = { +"misc-pulse1", +"misc-latch", +"sw-exception", +"watchdog", +"ce0", +"ce1", +"ce2", +"ce3", +"ce4", +"ce5", +"ce6", +"ce7", +"ce8", +"ce9", +"ce10", +"ce11", +"ce12", +"ce13", +"host2wbm-desc-feed", +"host2reo-re-injection", +"host2reo-command", +"host2rxdma-monitor-ring3", +"host2rxdma-monitor-ring2", +"host2rxdma-monitor-ring1", +"reo2ost-exception", +"wbm2host-rx-release", +"reo2host-status", +"reo2host-destination-ring4", +"reo2host-destination-ring3", +"reo2host-destination-ring2", +"reo2host-destination-ring1", +"rxdma2host-monitor-destination-mac3", +"rxdma2host-monitor-destination-mac2", +"rxdma2host-monitor-destination-mac1", +"ppdu-end-interrupts-mac3", +"ppdu-end-interrupts-mac2", +"ppdu-end-interrupts-mac1", +"rxdma2host-monitor-status-ring-mac3", +"rxdma2host-monitor-status-ring-mac2", +"rxdma2host-monitor-status-ring-mac1", +"host2rxdma-host-buf-ring-mac3", +"host2rxdma-host-buf-ring-mac2", +"host2rxdma-host-buf-ring-mac1", +"rxdma2host-destination-ring-mac3", +"rxdma2host-destination-ring-mac2", +"rxdma2host-destination-ring-mac1", +"host2tcl-input-ring4", +"host2tcl-input-ring3", +"host2tcl-input-ring2", +"host2tcl-input-ring1", +"wbm2host-tx-completions-ring3", +"wbm2host-tx-completions-ring2", +"wbm2host-tx-completions-ring1", +"tcl2host-status-ring", +}; + +/** + * hif_disable_isr() - disable isr + * + * This function disables isr and kills tasklets + * + * @hif_ctx: struct hif_softc + * + * Return: void + */ +void hif_ahb_disable_isr(struct hif_softc *scn) +{ + struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn); + + hif_exec_kill(&scn->osc); + hif_nointrs(scn); + ce_tasklet_kill(scn); + tasklet_kill(&sc->intr_tq); + qdf_atomic_set(&scn->active_tasklet_cnt, 0); + qdf_atomic_set(&scn->active_grp_tasklet_cnt, 0); +} + +/** + * hif_dump_registers() - dump bus debug registers + * @scn: struct hif_opaque_softc + * + * This function dumps hif bus debug registers + * + * Return: 0 for success or error code + */ +int hif_ahb_dump_registers(struct hif_softc *hif_ctx) +{ + int status; + struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); + + status = hif_dump_ce_registers(scn); + if (status) + HIF_ERROR("%s: Dump CE Registers Failed status %d", __func__, + status); + + return 0; +} + +/** + * hif_ahb_close() - hif_bus_close + * @scn: pointer to the hif context. + * + * This is a callback function for hif_bus_close. + * + * + * Return: n/a + */ +void hif_ahb_close(struct hif_softc *scn) +{ + hif_ce_close(scn); +} + +/** + * hif_bus_open() - hif_ahb open + * @hif_ctx: hif context + * @bus_type: bus type + * + * This is a callback function for hif_bus_open. + * + * Return: n/a + */ +QDF_STATUS hif_ahb_open(struct hif_softc *hif_ctx, enum qdf_bus_type bus_type) +{ + + struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx); + + qdf_spinlock_create(&sc->irq_lock); + return hif_ce_open(hif_ctx); +} + +/** + * hif_bus_configure() - Configure the bus + * @scn: pointer to the hif context. + * + * This function configure the ahb bus + * + * return: 0 for success. nonzero for failure. + */ +int hif_ahb_bus_configure(struct hif_softc *scn) +{ + return hif_pci_bus_configure(scn); +} + +/** + * hif_configure_msi_ahb - Configure MSI interrupts + * @sc : pointer to the hif context + * + * return: 0 for success. nonzero for failure. + */ + +int hif_configure_msi_ahb(struct hif_pci_softc *sc) +{ + return 0; +} + +/** + * hif_ahb_configure_legacy_irq() - Configure Legacy IRQ + * @sc: pointer to the hif context. + * + * This function registers the irq handler and enables legacy interrupts + * + * return: 0 for success. nonzero for failure. + */ +int hif_ahb_configure_legacy_irq(struct hif_pci_softc *sc) +{ + int ret = 0; + struct hif_softc *scn = HIF_GET_SOFTC(sc); + struct platform_device *pdev = (struct platform_device *)sc->pdev; + int irq = 0; + + /* do not support MSI or MSI IRQ failed */ + tasklet_init(&sc->intr_tq, wlan_tasklet, (unsigned long)sc); + irq = platform_get_irq_byname(pdev, "legacy"); + if (irq < 0) { + dev_err(&pdev->dev, "Unable to get irq\n"); + ret = -1; + goto end; + } + ret = request_irq(irq, hif_pci_legacy_ce_interrupt_handler, + IRQF_DISABLED, "wlan_ahb", sc); + if (ret) { + dev_err(&pdev->dev, "ath_request_irq failed\n"); + ret = -1; + goto end; + } + sc->irq = irq; + + /* Use Legacy PCI Interrupts */ + hif_write32_mb(sc->mem+(SOC_CORE_BASE_ADDRESS | + PCIE_INTR_ENABLE_ADDRESS), + PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL); + /* read once to flush */ + hif_read32_mb(sc->mem+(SOC_CORE_BASE_ADDRESS | + PCIE_INTR_ENABLE_ADDRESS) + ); + +end: + return ret; +} + +int hif_ahb_configure_irq(struct hif_pci_softc *sc) +{ + int ret = 0; + struct hif_softc *scn = HIF_GET_SOFTC(sc); + struct platform_device *pdev = (struct platform_device *)sc->pdev; + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); + struct CE_attr *host_ce_conf = hif_state->host_ce_config; + int irq = 0; + int i; + + /* configure per CE interrupts */ + for (i = 0; i < scn->ce_count; i++) { + if (host_ce_conf[i].flags & CE_ATTR_DISABLE_INTR) + continue; + irq = platform_get_irq_byname(pdev, ic_irqname[HIF_IC_CE0_IRQ_OFFSET + i]); + ic_irqnum[HIF_IC_CE0_IRQ_OFFSET + i] = irq; + ret = request_irq(irq , + hif_ahb_interrupt_handler, + IRQF_TRIGGER_RISING, ic_irqname[HIF_IC_CE0_IRQ_OFFSET + i], + &hif_state->tasklets[i]); + if (ret) { + dev_err(&pdev->dev, "ath_request_irq failed\n"); + ret = -1; + goto end; + } + hif_ahb_irq_enable(scn, i); + } + +end: + return ret; +} + +int hif_ahb_configure_grp_irq(struct hif_softc *scn, + struct hif_exec_context *hif_ext_group) +{ + int ret = 0; + struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn); + struct platform_device *pdev = (struct platform_device *)sc->pdev; + int irq = 0; + const char *irq_name; + int j; + + /* configure external interrupts */ + hif_ext_group->irq_enable = &hif_ahb_exec_grp_irq_enable; + hif_ext_group->irq_disable = &hif_ahb_exec_grp_irq_disable; + hif_ext_group->work_complete = &hif_dummy_grp_done; + + hif_ext_group->irq_requested = true; + + for (j = 0; j < hif_ext_group->numirq; j++) { + irq_name = ic_irqname[hif_ext_group->irq[j]]; + irq = platform_get_irq_byname(pdev, irq_name); + + ic_irqnum[hif_ext_group->irq[j]] = irq; + ret = request_irq(irq, hif_ext_group_interrupt_handler, + IRQF_TRIGGER_RISING, + ic_irqname[hif_ext_group->irq[j]], + hif_ext_group); + if (ret) { + dev_err(&pdev->dev, + "ath_request_irq failed\n"); + ret = -1; + goto end; + } + hif_ext_group->os_irq[j] = irq; + } + +end: + return ret; +} + +void hif_ahb_deconfigure_grp_irq(struct hif_softc *scn) +{ + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); + struct hif_exec_context *hif_ext_group; + int i, j; + + /* configure external interrupts */ + for (i = 0; i < hif_state->hif_num_extgroup; i++) { + hif_ext_group = hif_state->hif_ext_group[i]; + if (hif_ext_group->irq_requested == true) { + hif_ext_group->irq_requested = false; + for (j = 0; j < hif_ext_group->numirq; j++) { + free_irq(hif_ext_group->os_irq[j], + hif_ext_group); + } + } + } +} + +irqreturn_t hif_ahb_interrupt_handler(int irq, void *context) +{ + struct ce_tasklet_entry *tasklet_entry = context; + return ce_dispatch_interrupt(tasklet_entry->ce_id, tasklet_entry); +} + +/** + * hif_target_sync() : ensure the target is ready + * @scn: hif control structure + * + * Informs fw that we plan to use legacy interupts so that + * it can begin booting. Ensures that the fw finishes booting + * before continuing. Should be called before trying to write + * to the targets other registers for the first time. + * + * Return: none + */ +int hif_target_sync_ahb(struct hif_softc *scn) +{ + hif_write32_mb(scn->mem + FW_INDICATOR_ADDRESS, FW_IND_HOST_READY); + if (HAS_FW_INDICATOR) { + int wait_limit = 500; + int fw_ind = 0; + + while (1) { + fw_ind = hif_read32_mb(scn->mem + + FW_INDICATOR_ADDRESS); + if (fw_ind & FW_IND_INITIALIZED) + break; + if (wait_limit-- < 0) + break; + hif_write32_mb(scn->mem+(SOC_CORE_BASE_ADDRESS | + PCIE_INTR_ENABLE_ADDRESS), + PCIE_INTR_FIRMWARE_MASK); + qdf_mdelay(10); + } + if (wait_limit < 0) { + HIF_TRACE("%s: FW signal timed out", __func__); + return -EIO; + } + HIF_TRACE("%s: Got FW signal, retries = %x", __func__, + 500-wait_limit); + } + + return 0; +} + +/** + * hif_disable_bus() - Disable the bus + * @scn : pointer to the hif context + * + * This function disables the bus and helds the target in reset state + * + * Return: none + */ +void hif_ahb_disable_bus(struct hif_softc *scn) +{ + struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn); + void __iomem *mem; + struct platform_device *pdev = (struct platform_device *)sc->pdev; + struct resource *memres = NULL; + int mem_pa_size = 0; + struct hif_target_info *tgt_info = NULL; + + tgt_info = &scn->target_info; + /*Disable WIFI clock input*/ + if (sc->mem) { + memres = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!memres) { + HIF_INFO("%s: Failed to get IORESOURCE_MEM\n", + __func__); + return; + } + mem_pa_size = memres->end - memres->start + 1; + + /* Should not be executed on 8074 platform */ + if (tgt_info->target_type != TARGET_TYPE_QCA8074) { + hif_ahb_clk_enable_disable(&pdev->dev, 0); + + hif_ahb_device_reset(scn); + } + mem = (void __iomem *)sc->mem; + if (mem) { + devm_iounmap(&pdev->dev, mem); + devm_release_mem_region(&pdev->dev, scn->mem_pa, + mem_pa_size); + sc->mem = NULL; + } + } + scn->mem = NULL; +} + +/** + * hif_enable_bus() - Enable the bus + * @dev: dev + * @bdev: bus dev + * @bid: bus id + * @type: bus type + * + * This function enables the radio bus by enabling necessary + * clocks and waits for the target to get ready to proceed futher + * + * Return: QDF_STATUS + */ +QDF_STATUS hif_ahb_enable_bus(struct hif_softc *ol_sc, + struct device *dev, void *bdev, + const struct hif_bus_id *bid, + enum hif_enable_type type) +{ + int ret = 0; + int hif_type; + int target_type; + const struct platform_device_id *id = (struct platform_device_id *)bid; + struct platform_device *pdev = bdev; + struct hif_target_info *tgt_info = NULL; + struct resource *memres = NULL; + void __iomem *mem = NULL; + uint32_t revision_id = 0; + struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(ol_sc); + + sc->pdev = (struct pci_dev *)pdev; + sc->dev = &pdev->dev; + sc->devid = id->driver_data; + + ret = hif_get_device_type(id->driver_data, revision_id, + &hif_type, &target_type); + if (ret < 0) { + HIF_ERROR("%s: invalid device ret %d id %d revision_id %d", + __func__, ret, (int)id->driver_data, revision_id); + return QDF_STATUS_E_FAILURE; + } + + memres = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!memres) { + HIF_INFO("%s: Failed to get IORESOURCE_MEM\n", __func__); + return -EIO; + } + + ret = dma_set_mask(dev, DMA_BIT_MASK(32)); + if (ret) { + HIF_INFO("ath: 32-bit DMA not available\n"); + goto err_cleanup1; + } + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0) + ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); +#else + ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(32)); +#endif + if (ret) { + HIF_ERROR("%s: failed to set dma mask error = %d", + __func__, ret); + return ret; + } + + /* Arrange for access to Target SoC registers. */ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0) + mem = devm_ioremap_resource(&pdev->dev, memres); +#else + mem = devm_request_and_ioremap(&pdev->dev, memres); +#endif + if (IS_ERR(mem)) { + HIF_INFO("ath: ioremap error\n"); + ret = PTR_ERR(mem); + goto err_cleanup1; + } + + sc->mem = mem; + ol_sc->mem = mem; + ol_sc->mem_pa = memres->start; + + tgt_info = hif_get_target_info_handle((struct hif_opaque_softc *)ol_sc); + + tgt_info->target_type = target_type; + hif_register_tbl_attach(ol_sc, hif_type); + hif_target_register_tbl_attach(ol_sc, target_type); + + /* QCA_WIFI_QCA8074_VP:Should not be executed on 8074 VP platform */ + if (tgt_info->target_type != TARGET_TYPE_QCA8074) { + if (hif_ahb_enable_radio(sc, pdev, id) != 0) { + HIF_INFO("error in enabling soc\n"); + return -EIO; + } + + if (hif_target_sync_ahb(ol_sc) < 0) { + ret = -EIO; + goto err_target_sync; + } + } + HIF_TRACE("%s: X - hif_type = 0x%x, target_type = 0x%x", + __func__, hif_type, target_type); + + return QDF_STATUS_SUCCESS; +err_target_sync: + /* QCA_WIFI_QCA8074_VP:Should not be executed on 8074 VP platform */ + if (tgt_info->target_type != TARGET_TYPE_QCA8074) { + HIF_INFO("Error: Disabling target\n"); + hif_ahb_disable_bus(ol_sc); + } +err_cleanup1: + return ret; +} + + +/** + * hif_reset_soc() - reset soc + * + * @hif_ctx: HIF context + * + * This function resets soc and helds the + * target in reset state + * + * Return: void + */ +/* Function to reset SoC */ +void hif_ahb_reset_soc(struct hif_softc *hif_ctx) +{ + hif_ahb_device_reset(hif_ctx); +} + + +/** + * hif_nointrs() - disable IRQ + * + * @scn: struct hif_softc + * + * This function stops interrupt(s) + * + * Return: none + */ +void hif_ahb_nointrs(struct hif_softc *scn) +{ + int i; + struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn); + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); + struct CE_attr *host_ce_conf = hif_state->host_ce_config; + + ce_unregister_irq(hif_state, CE_ALL_BITMAP); + + if (scn->request_irq_done == false) + return; + + if (sc->num_msi_intrs > 0) { + /* MSI interrupt(s) */ + for (i = 0; i < sc->num_msi_intrs; i++) { + free_irq(sc->irq + i, sc); + } + sc->num_msi_intrs = 0; + } else { + if (!scn->per_ce_irq) { + free_irq(sc->irq, sc); + } else { + for (i = 0; i < scn->ce_count; i++) { + if (host_ce_conf[i].flags + & CE_ATTR_DISABLE_INTR) + continue; + + free_irq(ic_irqnum[HIF_IC_CE0_IRQ_OFFSET + i], + &hif_state->tasklets[i]); + } + hif_ahb_deconfigure_grp_irq(scn); + } + } + scn->request_irq_done = false; + +} + +/** + * ce_irq_enable() - enable copy engine IRQ + * @scn: struct hif_softc + * @ce_id: ce_id + * + * This function enables the interrupt for the radio. + * + * Return: N/A + */ +void hif_ahb_irq_enable(struct hif_softc *scn, int ce_id) +{ + uint32_t regval; + uint32_t reg_offset = 0; + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); + struct CE_pipe_config *target_ce_conf = &hif_state->target_ce_config[ce_id]; + struct hif_target_info *tgt_info = &scn->target_info; + + if (scn->per_ce_irq) { + if (target_ce_conf->pipedir & PIPEDIR_OUT) { + reg_offset = HOST_IE_ADDRESS; + qdf_spin_lock_irqsave(&hif_state->irq_reg_lock); + regval = hif_read32_mb(scn->mem + reg_offset); + regval |= HOST_IE_REG1_CE_BIT(ce_id); + hif_write32_mb(scn->mem + reg_offset, regval); + qdf_spin_unlock_irqrestore(&hif_state->irq_reg_lock); + } + if (target_ce_conf->pipedir & PIPEDIR_IN) { + reg_offset = HOST_IE_ADDRESS_2; + qdf_spin_lock_irqsave(&hif_state->irq_reg_lock); + regval = hif_read32_mb(scn->mem + reg_offset); + regval |= HOST_IE_REG2_CE_BIT(ce_id); + hif_write32_mb(scn->mem + reg_offset, regval); + if (tgt_info->target_type == TARGET_TYPE_QCA8074) { + /* Enable destination ring interrupts for 8074 + * TODO: To be removed in 2.0 HW */ + regval = hif_read32_mb(scn->mem + + HOST_IE_ADDRESS_3); + regval |= HOST_IE_REG3_CE_BIT(ce_id); + } + hif_write32_mb(scn->mem + HOST_IE_ADDRESS_3, regval); + qdf_spin_unlock_irqrestore(&hif_state->irq_reg_lock); + } + } else { + hif_pci_irq_enable(scn, ce_id); + } +} + +/** + * ce_irq_disable() - disable copy engine IRQ + * @scn: struct hif_softc + * @ce_id: ce_id + * + * Return: N/A + */ +void hif_ahb_irq_disable(struct hif_softc *scn, int ce_id) +{ + uint32_t regval; + uint32_t reg_offset = 0; + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); + struct CE_pipe_config *target_ce_conf = &hif_state->target_ce_config[ce_id]; + struct hif_target_info *tgt_info = &scn->target_info; + + if (scn->per_ce_irq) { + if (target_ce_conf->pipedir & PIPEDIR_OUT) { + reg_offset = HOST_IE_ADDRESS; + qdf_spin_lock_irqsave(&hif_state->irq_reg_lock); + regval = hif_read32_mb(scn->mem + reg_offset); + regval &= ~HOST_IE_REG1_CE_BIT(ce_id); + hif_write32_mb(scn->mem + reg_offset, regval); + qdf_spin_unlock_irqrestore(&hif_state->irq_reg_lock); + } + if (target_ce_conf->pipedir & PIPEDIR_IN) { + reg_offset = HOST_IE_ADDRESS_2; + qdf_spin_lock_irqsave(&hif_state->irq_reg_lock); + regval = hif_read32_mb(scn->mem + reg_offset); + regval &= ~HOST_IE_REG2_CE_BIT(ce_id); + hif_write32_mb(scn->mem + reg_offset, regval); + if (tgt_info->target_type == TARGET_TYPE_QCA8074) { + /* Disable destination ring interrupts for 8074 + * TODO: To be removed in 2.0 HW */ + regval = hif_read32_mb(scn->mem + + HOST_IE_ADDRESS_3); + regval &= ~HOST_IE_REG3_CE_BIT(ce_id); + } + hif_write32_mb(scn->mem + HOST_IE_ADDRESS_3, regval); + qdf_spin_unlock_irqrestore(&hif_state->irq_reg_lock); + } + } +} + +void hif_ahb_exec_grp_irq_disable(struct hif_exec_context *hif_ext_group) +{ + int i; + + qdf_spin_lock_irqsave(&hif_ext_group->irq_lock); + if (hif_ext_group->irq_enabled) { + for (i = 0; i < hif_ext_group->numirq; i++) { + disable_irq_nosync(hif_ext_group->os_irq[i]); + } + hif_ext_group->irq_enabled = false; + } + qdf_spin_unlock_irqrestore(&hif_ext_group->irq_lock); +} + +void hif_ahb_exec_grp_irq_enable(struct hif_exec_context *hif_ext_group) +{ + int i; + + qdf_spin_lock_irqsave(&hif_ext_group->irq_lock); + if (!hif_ext_group->irq_enabled) { + for (i = 0; i < hif_ext_group->numirq; i++) { + enable_irq(hif_ext_group->os_irq[i]); + } + hif_ext_group->irq_enabled = true; + } + qdf_spin_unlock_irqrestore(&hif_ext_group->irq_lock); +} + +/** + * hif_ahb_needs_bmi() - return true if the soc needs bmi through the driver + * @scn: hif context + * + * Return: true if soc needs driver bmi otherwise false + */ +bool hif_ahb_needs_bmi(struct hif_softc *scn) +{ + return !ce_srng_based(scn); +} diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/snoc/if_ahb.h b/drivers/staging/qca-wifi-host-cmn/hif/src/snoc/if_ahb.h new file mode 100644 index 0000000000000000000000000000000000000000..6ab4568856f0d26b85fb6bef340bb017ce026637 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/snoc/if_ahb.h @@ -0,0 +1,52 @@ +/* + * Copyright (c) 2013-2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: if_ahb.h + * + * h file for ahb specific implementations. + */ + +#ifndef __IF_AHB_H +#define __IF_AHB_H + +#define GCC_BASE 0x1800000 +#define GCC_SIZE 0x60000 +#define GCC_FEPLL_PLL_DIV 0x2f020 +#define GCC_FEPLL_PLL_CLK_WIFI_0_SEL_MASK 0x00000300 +#define GCC_FEPLL_PLL_CLK_WIFI_0_SEL_SHIFT 8 +#define GCC_FEPLL_PLL_CLK_WIFI_1_SEL_MASK 0x00003000 +#define GCC_FEPLL_PLL_CLK_WIFI_1_SEL_SHIFT 12 + + +/* These registers are outsize Wifi space. */ +/* TBD: Should we add these offsets as device tree properties? */ +#define TCSR_BASE 0x1900000 +#define TCSR_SIZE 0x80000 +#define TCSR_WIFI0_GLB_CFG 0x49000 +#define TCSR_WIFI1_GLB_CFG 0x49004 +#define TCSR_WCSS0_HALTREQ 0x52000 +#define TCSR_WCSS1_HALTREQ 0x52004 +#define TCSR_WCSS0_HALTACK 0x52010 +#define TCSR_WCSS1_HALTACK 0x52014 +#define ATH_AHB_RESET_WAIT_MAX 10 /* Ms */ + +irqreturn_t hif_ahb_interrupt_handler(int irq, void *context); + +#endif + diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/snoc/if_ahb_reset.c b/drivers/staging/qca-wifi-host-cmn/hif/src/snoc/if_ahb_reset.c new file mode 100644 index 0000000000000000000000000000000000000000..a6f13bfdcf3750a8581a99b121cce9b823b339e3 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/snoc/if_ahb_reset.c @@ -0,0 +1,395 @@ +/* + * Copyright (c) 2013-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: if_ahb_reset.c + * + * c file for ahb ipq4019 specific implementations. + */ + +#include "hif.h" +#include "target_type.h" +#include "hif_main.h" +#include "hif_debug.h" +#include "hif_io32.h" +#include "ce_main.h" +#include "ce_tasklet.h" +#include "ahb_api.h" +#include "if_ahb.h" + +#include +#include +#include +#include +#include +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0) +#include +#endif + +/** + * clk_enable_disable() - Enable/disable clock + * @dev : pointer to device structure + * @str : clock name + * @enable : should be true, if the clock needs to be enabled + * should be false, if the clock needs to be enabled + * + * This is a helper function for hif_ahb_clk_enable_disable to enable + * disable clocks. + * clk_prepare_enable will enable the clock + * clk_disable_unprepare will disable the clock + * + * Return: zero on success, non-zero incase of error. + */ + +static int clk_enable_disable(struct device *dev, const char *str, int enable) +{ + struct clk *clk_t = NULL; + int ret; + + clk_t = clk_get(dev, str); + if (IS_ERR(clk_t)) { + HIF_INFO("%s: Failed to get %s clk %ld\n", + __func__, str, PTR_ERR(clk_t)); + return -EFAULT; + } + if (true == enable) { + /* Prepare and Enable clk */ + ret = clk_prepare_enable(clk_t); + if (ret) { + HIF_INFO("%s: err enabling clk %s , error:%d\n", + __func__, str, ret); + return ret; + } + } else { + /* Disable and unprepare clk */ + clk_disable_unprepare(clk_t); + } + return 0; +} + + +/** + * hif_ahb_clk_enable_disable() - Enable/disable ahb clock + * @dev : pointer to device structure + * @enable : should be true, if the clock needs to be enabled + * should be false, if the clock needs to be enabled + * + * This functions helps to enable/disable all the necesasary clocks + * for bus access. + * + * Return: zero on success, non-zero incase of error + */ +int hif_ahb_clk_enable_disable(struct device *dev, int enable) +{ + int ret; + + ret = clk_enable_disable(dev, "wifi_wcss_cmd", enable); + if (ret) + return ret; + ret = clk_enable_disable(dev, "wifi_wcss_ref", enable); + if (ret) + return ret; + ret = clk_enable_disable(dev, "wifi_wcss_rtc", enable); + if (ret) + return ret; + return 0; +} + +/** + * hif_enable_radio() - Enable the target radio. + * @sc : pointer to the hif context + * + * This function helps to release the target from reset state + * + * Return : zero on success, non-zero incase of error. + */ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0) +int hif_ahb_enable_radio(struct hif_pci_softc *sc, + struct platform_device *pdev, + const struct platform_device_id *id) +{ + struct reset_control *reset_ctl = NULL; + uint32_t msi_addr, msi_base, wifi_core_id; + struct hif_softc *scn = HIF_GET_SOFTC(sc); + struct device_node *dev_node = pdev->dev.of_node; + bool msienable = false; + int ret = 0; + + ret = of_property_read_u32(dev_node, "qca,msi_addr", &msi_addr); + if (ret) { + HIF_INFO("%s: Unable to get msi_addr - error:%d\n", + __func__, ret); + return -EIO; + } + ret = of_property_read_u32(dev_node, "qca,msi_base", &msi_base); + if (ret) { + HIF_INFO("%s: Unable to get msi_base - error:%d\n", + __func__, ret); + return -EIO; + } + ret = of_property_read_u32(dev_node, "core-id", &wifi_core_id); + if (ret) { + HIF_INFO("%s: Unable to get core-id - error:%d\n", + __func__, ret); + return -EIO; + } + + /* Program the above values into Wifi scratch regists */ + if (msienable) { + hif_write32_mb(sc->mem + FW_AXI_MSI_ADDR, msi_addr); + hif_write32_mb(sc->mem + FW_AXI_MSI_DATA, msi_base); + } + + /* TBD: Temporary changes. Frequency should be + * retrieved through clk_xxx once kernel GCC driver is available + */ + { + void __iomem *mem_gcc; + uint32_t clk_sel; + uint32_t gcc_fepll_pll_div; + uint32_t wifi_cpu_freq[4] = {266700000, 250000000, 222200000, + 200000000}; + uint32_t current_freq = 0; + + /* Enable WIFI clock input */ + if (scn->target_info.target_type == TARGET_TYPE_IPQ4019) { + ret = hif_ahb_clk_enable_disable(&pdev->dev, 1); + if (ret) { + HIF_INFO("%s:Error while enabling clock :%d\n", + __func__, ret); + return ret; + } + } + + mem_gcc = ioremap_nocache(GCC_BASE, GCC_SIZE); + if (IS_ERR(mem_gcc)) { + HIF_INFO("%s: GCC ioremap failed\n", __func__); + return PTR_ERR(mem_gcc); + } + gcc_fepll_pll_div = hif_read32_mb(mem_gcc + GCC_FEPLL_PLL_DIV); + clk_sel = (wifi_core_id == 0) ? ((gcc_fepll_pll_div & + GCC_FEPLL_PLL_CLK_WIFI_0_SEL_MASK) >> + GCC_FEPLL_PLL_CLK_WIFI_0_SEL_SHIFT) : + ((gcc_fepll_pll_div & GCC_FEPLL_PLL_CLK_WIFI_1_SEL_MASK) + >> GCC_FEPLL_PLL_CLK_WIFI_1_SEL_SHIFT); + current_freq = wifi_cpu_freq[clk_sel]; + + HIF_INFO("Wifi%d CPU frequency %u\n", wifi_core_id, + current_freq); + hif_write32_mb(sc->mem + FW_CPU_PLL_CONFIG, gcc_fepll_pll_div); + iounmap(mem_gcc); + } + + /* De-assert radio cold reset */ + reset_ctl = reset_control_get(&pdev->dev, "wifi_radio_cold"); + if (IS_ERR(reset_ctl)) { + HIF_INFO("%s: Failed to get radio cold reset control\n", + __func__); + ret = PTR_ERR(reset_ctl); + goto err_reset; + } + reset_control_deassert(reset_ctl); + reset_control_put(reset_ctl); + + /* De-assert radio warm reset */ + reset_ctl = reset_control_get(&pdev->dev, "wifi_radio_warm"); + if (IS_ERR(reset_ctl)) { + HIF_INFO("%s: Failed to get radio warm reset control\n", + __func__); + ret = PTR_ERR(reset_ctl); + goto err_reset; + } + reset_control_deassert(reset_ctl); + reset_control_put(reset_ctl); + + /* De-assert radio srif reset */ + reset_ctl = reset_control_get(&pdev->dev, "wifi_radio_srif"); + if (IS_ERR(reset_ctl)) { + HIF_INFO("%s: Failed to get radio srif reset control\n", + __func__); + ret = PTR_ERR(reset_ctl); + goto err_reset; + } + reset_control_deassert(reset_ctl); + reset_control_put(reset_ctl); + + /* De-assert target CPU reset */ + reset_ctl = reset_control_get(&pdev->dev, "wifi_cpu_init"); + if (IS_ERR(reset_ctl)) { + HIF_INFO("%s: Failed to get cpu init reset control", __func__); + ret = PTR_ERR(reset_ctl); + goto err_reset; + } + reset_control_deassert(reset_ctl); + reset_control_put(reset_ctl); + + return 0; + +err_reset: + return -EIO; +} +#else +int hif_ahb_enable_radio(struct hif_pci_softc *sc, + struct platform_device *pdev, + const struct platform_device_id *id) +{ + qdf_print("%s:%d:Reset routines not available in kernel version.\n", + __func__, __LINE__); + return -EIO; +} +#endif + +/* "wifi_core_warm" is the other reset type */ +#define AHB_RESET_TYPE "wifi_core_cold" + +/** + * hif_ahb_device_reset() - Disable the radio and held the radio is reset state. + * @scn : pointer to the hif context + * + * This function will hold the target in reset state. + * Will be called while unload the driver or any graceful unload path. + * + * Return : n/a. + */ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0) +void hif_ahb_device_reset(struct hif_softc *scn) +{ + struct reset_control *resetctl = NULL; + struct reset_control *core_resetctl = NULL; + struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn); + struct platform_device *pdev = (struct platform_device *)(sc->pdev); + uint32_t glb_cfg_offset; + uint32_t haltreq_offset; + uint32_t haltack_offset; + void __iomem *mem_tcsr; + uint32_t wifi_core_id; + uint32_t reg_value; + int wait_limit = ATH_AHB_RESET_WAIT_MAX; + + + wifi_core_id = hif_read32_mb(sc->mem + WLAN_SUBSYSTEM_CORE_ID_ADDRESS); + glb_cfg_offset = (wifi_core_id == 0) ? TCSR_WIFI0_GLB_CFG : + TCSR_WIFI1_GLB_CFG; + haltreq_offset = (wifi_core_id == 0) ? TCSR_WCSS0_HALTREQ : + TCSR_WCSS1_HALTREQ; + haltack_offset = (wifi_core_id == 0) ? TCSR_WCSS0_HALTACK : + TCSR_WCSS1_HALTACK; + + mem_tcsr = ioremap_nocache(TCSR_BASE, TCSR_SIZE); + if (IS_ERR(mem_tcsr)) { + HIF_INFO("%s: TCSR ioremap failed\n", __func__); + return; + } + reg_value = hif_read32_mb(mem_tcsr + haltreq_offset); + hif_write32_mb(mem_tcsr + haltreq_offset, reg_value | 0x1); + /* Wait for halt ack before asserting reset */ + while (wait_limit) { + + if (hif_read32_mb(mem_tcsr + haltack_offset) & 0x1) + break; + + qdf_mdelay(1); + wait_limit--; + } + + reg_value = hif_read32_mb(mem_tcsr + glb_cfg_offset); + hif_write32_mb(mem_tcsr + glb_cfg_offset, reg_value | (1 << 25)); + + core_resetctl = reset_control_get(&pdev->dev, AHB_RESET_TYPE); + if (IS_ERR(core_resetctl)) { + HIF_INFO("Failed to get wifi core cold reset control\n"); + return; + } + + /* Reset wifi core */ + reset_control_assert(core_resetctl); + + /* TBD: Check if we should also assert other bits (radio_cold, radio_ + * warm, radio_srif, cpu_ini) + */ + qdf_mdelay(1); /* TBD: Get reqd delay from HW team */ + + /* Assert radio cold reset */ + resetctl = reset_control_get(&pdev->dev, "wifi_radio_cold"); + if (IS_ERR(resetctl)) { + HIF_INFO("%s: Failed to get radio cold reset control\n", + __func__); + return; + } + reset_control_assert(resetctl); + qdf_mdelay(1); /* TBD: Get reqd delay from HW team */ + reset_control_put(resetctl); + + /* Assert radio warm reset */ + resetctl = reset_control_get(&pdev->dev, "wifi_radio_warm"); + if (IS_ERR(resetctl)) { + HIF_INFO("%s: Failed to get radio warm reset control\n", + __func__); + return; + } + reset_control_assert(resetctl); + qdf_mdelay(1); /* TBD: Get reqd delay from HW team */ + reset_control_put(resetctl); + + /* Assert radio srif reset */ + resetctl = reset_control_get(&pdev->dev, "wifi_radio_srif"); + if (IS_ERR(resetctl)) { + HIF_INFO("%s: Failed to get radio srif reset control\n", + __func__); + return; + } + reset_control_assert(resetctl); + qdf_mdelay(1); /* TBD: Get reqd delay from HW team */ + reset_control_put(resetctl); + + /* Assert target CPU reset */ + resetctl = reset_control_get(&pdev->dev, "wifi_cpu_init"); + if (IS_ERR(resetctl)) { + HIF_INFO("%s: Failed to get cpu init reset control", __func__); + return; + } + reset_control_assert(resetctl); + qdf_mdelay(10); /* TBD: Get reqd delay from HW team */ + reset_control_put(resetctl); + + /* Clear gbl_cfg and haltreq before clearing Wifi core reset */ + reg_value = hif_read32_mb(mem_tcsr + haltreq_offset); + hif_write32_mb(mem_tcsr + haltreq_offset, reg_value & ~0x1); + reg_value = hif_read32_mb(mem_tcsr + glb_cfg_offset); + hif_write32_mb(mem_tcsr + glb_cfg_offset, reg_value & ~(1 << 25)); + + /* de-assert wifi core reset */ + reset_control_deassert(core_resetctl); + + qdf_mdelay(1); /* TBD: Get reqd delay from HW team */ + + /* TBD: Check if we should de-assert other bits here */ + reset_control_put(core_resetctl); + iounmap(mem_tcsr); + HIF_INFO("Reset complete for wifi core id : %d\n", wifi_core_id); +} +#else +void hif_ahb_device_reset(struct hif_softc *scn) +{ + qdf_print("%s:%d:Reset routines not available in kernel version.\n", + __func__, __LINE__); +} +#endif + + + diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/snoc/if_snoc.c b/drivers/staging/qca-wifi-host-cmn/hif/src/snoc/if_snoc.c new file mode 100644 index 0000000000000000000000000000000000000000..bbdaa7a99525905706ec49bf4e91306a884d474c --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/snoc/if_snoc.c @@ -0,0 +1,484 @@ +/* + * Copyright (c) 2015-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: if_snoc.c + * + * c file for snoc specif implementations. + */ + +#include "hif.h" +#include "hif_main.h" +#include "hif_debug.h" +#include "hif_io32.h" +#include "ce_main.h" +#include "ce_tasklet.h" +#include "ce_api.h" +#include "ce_internal.h" +#include "snoc_api.h" +#include "pld_common.h" +#include "qdf_util.h" +#ifdef IPA_OFFLOAD +#include +#endif +#include "target_type.h" + +/** + * hif_disable_isr(): disable isr + * + * This function disables isr and kills tasklets + * + * @hif_ctx: struct hif_softc + * + * Return: void + */ +void hif_snoc_disable_isr(struct hif_softc *scn) +{ + hif_exec_kill(&scn->osc); + hif_nointrs(scn); + ce_tasklet_kill(scn); + qdf_atomic_set(&scn->active_tasklet_cnt, 0); + qdf_atomic_set(&scn->active_grp_tasklet_cnt, 0); +} + +/** + * hif_dump_registers(): dump bus debug registers + * @hif_ctx: struct hif_opaque_softc + * + * This function dumps hif bus debug registers + * + * Return: 0 for success or error code + */ +int hif_snoc_dump_registers(struct hif_softc *hif_ctx) +{ + int status; + struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); + + status = hif_dump_ce_registers(scn); + if (status) + HIF_ERROR("%s: Dump CE Registers Failed", __func__); + + return 0; +} + +void hif_snoc_display_stats(struct hif_softc *hif_ctx) +{ + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx); + + if (hif_state == NULL) { + HIF_ERROR("%s, hif_ctx null", __func__); + return; + } + hif_display_ce_stats(hif_state); +} + +void hif_snoc_clear_stats(struct hif_softc *hif_ctx) +{ + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx); + + if (hif_state == NULL) { + HIF_ERROR("%s, hif_ctx null", __func__); + return; + } + hif_clear_ce_stats(hif_state); +} + +/** + * hif_snoc_close(): hif_bus_close + * + * Return: n/a + */ +void hif_snoc_close(struct hif_softc *scn) +{ + hif_ce_close(scn); +} + +/** + * hif_bus_open(): hif_bus_open + * @hif_ctx: hif context + * @bus_type: bus type + * + * Return: n/a + */ +QDF_STATUS hif_snoc_open(struct hif_softc *hif_ctx, enum qdf_bus_type bus_type) +{ + return hif_ce_open(hif_ctx); +} + +/** + * hif_snoc_get_soc_info() - populates scn with hw info + * + * fills in the virtual and physical base address as well as + * soc version info. + * + * return 0 or QDF_STATUS_E_FAILURE + */ +static QDF_STATUS hif_snoc_get_soc_info(struct hif_softc *scn) +{ + int ret; + struct pld_soc_info soc_info; + + qdf_mem_zero(&soc_info, sizeof(soc_info)); + + ret = pld_get_soc_info(scn->qdf_dev->dev, &soc_info); + if (ret < 0) { + HIF_ERROR("%s: pld_get_soc_info error = %d", __func__, ret); + return QDF_STATUS_E_FAILURE; + } + + scn->mem = soc_info.v_addr; + scn->mem_pa = soc_info.p_addr; + + scn->target_info.soc_version = soc_info.soc_id; + scn->target_info.target_version = soc_info.soc_id; + scn->target_info.target_revision = 0; + return QDF_STATUS_SUCCESS; +} + +/** + * hif_bus_configure() - configure the snoc bus + * @scn: pointer to the hif context. + * + * return: 0 for success. nonzero for failure. + */ +int hif_snoc_bus_configure(struct hif_softc *scn) +{ + int ret; + uint8_t wake_ce_id; + + ret = hif_snoc_get_soc_info(scn); + if (ret) + return ret; + + hif_ce_prepare_config(scn); + + ret = hif_wlan_enable(scn); + if (ret) { + HIF_ERROR("%s: hif_wlan_enable error = %d", + __func__, ret); + return ret; + } + + ret = hif_config_ce(scn); + if (ret) + goto wlan_disable; + + ret = hif_get_wake_ce_id(scn, &wake_ce_id); + if (ret) + goto unconfig_ce; + + scn->wake_irq = pld_get_irq(scn->qdf_dev->dev, wake_ce_id); + + HIF_INFO(FL("expecting wake from ce %d, irq %d"), + wake_ce_id, scn->wake_irq); + + return 0; + +unconfig_ce: + hif_unconfig_ce(scn); + +wlan_disable: + hif_wlan_disable(scn); + + return ret; +} + +/** + * hif_snoc_get_target_type(): Get the target type + * + * This function is used to query the target type. + * + * @ol_sc: hif_softc struct pointer + * @dev: device pointer + * @bdev: bus dev pointer + * @bid: bus id pointer + * @hif_type: HIF type such as HIF_TYPE_QCA6180 + * @target_type: target type such as TARGET_TYPE_QCA6180 + * + * Return: 0 for success + */ +static inline int hif_snoc_get_target_type(struct hif_softc *ol_sc, + struct device *dev, void *bdev, const struct hif_bus_id *bid, + uint32_t *hif_type, uint32_t *target_type) +{ + /* TODO: need to use HW version. Hard code for now */ +#ifdef QCA_WIFI_3_0_ADRASTEA + *hif_type = HIF_TYPE_ADRASTEA; + *target_type = TARGET_TYPE_ADRASTEA; +#else + *hif_type = 0; + *target_type = 0; +#endif + return 0; +} + +#ifdef IPA_OFFLOAD +static int hif_set_dma_coherent_mask(qdf_device_t osdev) +{ + uint8_t addr_bits; + + if (false == hif_get_ipa_present()) + return qdf_set_dma_coherent_mask(osdev->dev, + DMA_COHERENT_MASK_IPA_VER_3_AND_ABOVE); + + if (hif_get_ipa_hw_type() < IPA_HW_v3_0) + addr_bits = DMA_COHERENT_MASK_BELOW_IPA_VER_3; + else + addr_bits = DMA_COHERENT_MASK_IPA_VER_3_AND_ABOVE; + + return qdf_set_dma_coherent_mask(osdev->dev, addr_bits); +} +#else +static int hif_set_dma_coherent_mask(qdf_device_t osdev) +{ + return qdf_set_dma_coherent_mask(osdev->dev, 37); +} +#endif + +/** + * hif_enable_bus(): hif_enable_bus + * @dev: dev + * @bdev: bus dev + * @bid: bus id + * @type: bus type + * + * Return: QDF_STATUS + */ +QDF_STATUS hif_snoc_enable_bus(struct hif_softc *ol_sc, + struct device *dev, void *bdev, + const struct hif_bus_id *bid, + enum hif_enable_type type) +{ + int ret; + int hif_type; + int target_type; + + if (!ol_sc) { + HIF_ERROR("%s: hif_ctx is NULL", __func__); + return QDF_STATUS_E_NOMEM; + } + + ret = hif_set_dma_coherent_mask(ol_sc->qdf_dev); + if (ret) { + HIF_ERROR("%s: failed to set dma mask error = %d", + __func__, ret); + return ret; + } + + ret = qdf_device_init_wakeup(ol_sc->qdf_dev, true); + if (ret == -EEXIST) + HIF_WARN("%s: device_init_wakeup already done", + __func__); + else if (ret) { + HIF_ERROR("%s: device_init_wakeup: err= %d", + __func__, ret); + return ret; + } + + ret = hif_snoc_get_target_type(ol_sc, dev, bdev, bid, + &hif_type, &target_type); + if (ret < 0) { + HIF_ERROR("%s: invalid device id/revision_id", __func__); + return QDF_STATUS_E_FAILURE; + } + + ol_sc->target_info.target_type = target_type; + + hif_register_tbl_attach(ol_sc, hif_type); + hif_target_register_tbl_attach(ol_sc, target_type); + + /* the bus should remain on durring suspend for snoc */ + hif_vote_link_up(GET_HIF_OPAQUE_HDL(ol_sc)); + + HIF_DBG("%s: X - hif_type = 0x%x, target_type = 0x%x", + __func__, hif_type, target_type); + + return QDF_STATUS_SUCCESS; +} + +/** + * hif_disable_bus(): hif_disable_bus + * + * This function disables the bus + * + * @bdev: bus dev + * + * Return: none + */ +void hif_snoc_disable_bus(struct hif_softc *scn) +{ + int ret; + + hif_vote_link_down(GET_HIF_OPAQUE_HDL(scn)); + + ret = qdf_device_init_wakeup(scn->qdf_dev, false); + if (ret) + HIF_ERROR("%s: device_init_wakeup: err %d", __func__, ret); +} + +/** + * hif_nointrs(): disable IRQ + * + * This function stops interrupt(s) + * + * @scn: struct hif_softc + * + * Return: none + */ +void hif_snoc_nointrs(struct hif_softc *scn) +{ + struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); + + ce_unregister_irq(hif_state, CE_ALL_BITMAP); +} + +/** + * ce_irq_enable() - enable copy engine IRQ + * @scn: struct hif_softc + * @ce_id: ce_id + * + * Return: N/A + */ +void hif_snoc_irq_enable(struct hif_softc *scn, + int ce_id) +{ + ce_enable_irq_in_individual_register(scn, ce_id); +} + +/** + * ce_irq_disable() - disable copy engine IRQ + * @scn: struct hif_softc + * @ce_id: ce_id + * + * Return: N/A + */ +void hif_snoc_irq_disable(struct hif_softc *scn, int ce_id) +{ + ce_disable_irq_in_individual_register(scn, ce_id); +} + +/* + * hif_snoc_setup_wakeup_sources() - enable/disable irq wake on correct irqs + * @hif_softc: hif context + * + * Firmware will send a wakeup request to the HTC_CTRL_RSVD_SVC when waking up + * the host driver. Ensure that the copy complete interrupt from this copy + * engine can wake up the apps processor. + * + * Return: 0 for success + */ +static +QDF_STATUS hif_snoc_setup_wakeup_sources(struct hif_softc *scn, bool enable) +{ + int ret; + + if (enable) + ret = enable_irq_wake(scn->wake_irq); + else + ret = disable_irq_wake(scn->wake_irq); + + if (ret) { + HIF_ERROR("%s: Fail to setup wake IRQ!", __func__); + return QDF_STATUS_E_RESOURCES; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * hif_snoc_bus_suspend() - prepare to suspend the bus + * @scn: hif context + * + * Setup wakeup interrupt configuration. + * Disable CE interrupts (wakeup interrupt will still wake apps) + * Drain tasklets. - make sure that we don't suspend while processing + * the wakeup message. + * + * Return: 0 on success. + */ +int hif_snoc_bus_suspend(struct hif_softc *scn) +{ + if (hif_snoc_setup_wakeup_sources(scn, true) != QDF_STATUS_SUCCESS) + return -EFAULT; + return 0; +} + +/** + * hif_snoc_bus_resume() - snoc bus resume function + * @scn: hif context + * + * Clear wakeup interrupt configuration. + * Reenable ce interrupts + * + * Return: 0 on success + */ +int hif_snoc_bus_resume(struct hif_softc *scn) +{ + if (hif_snoc_setup_wakeup_sources(scn, false) != QDF_STATUS_SUCCESS) + QDF_BUG(0); + + return 0; +} + +/** + * hif_snoc_bus_suspend_noirq() - ensure there are no pending transactions + * @scn: hif context + * + * Ensure that if we received the wakeup message before the irq + * was disabled that the message is pocessed before suspending. + * + * Return: -EBUSY if we fail to flush the tasklets. + */ +int hif_snoc_bus_suspend_noirq(struct hif_softc *scn) +{ + if (hif_drain_tasklets(scn) != 0) + return -EBUSY; + return 0; +} + +int hif_snoc_map_ce_to_irq(struct hif_softc *scn, int ce_id) +{ + return pld_get_irq(scn->qdf_dev->dev, ce_id); +} + +/** + * hif_is_target_register_access_allowed(): Check target register access allow + * @scn: HIF Context + * + * This function help to check whether target register access is allowed or not + * + * Return: true if target access is allowed else false + */ +bool hif_is_target_register_access_allowed(struct hif_softc *scn) +{ + if (hif_is_recovery_in_progress(scn)) + return hif_is_target_ready(scn); + else + return true; +} + +/** + * hif_snoc_needs_bmi() - return true if the soc needs bmi through the driver + * @scn: hif context + * + * Return: true if soc needs driver bmi otherwise false + */ +bool hif_snoc_needs_bmi(struct hif_softc *scn) +{ + return false; +} diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/usb/hif_usb.c b/drivers/staging/qca-wifi-host-cmn/hif/src/usb/hif_usb.c new file mode 100644 index 0000000000000000000000000000000000000000..1e5cd21103a548531db91001c200192e7a1bf119 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/usb/hif_usb.c @@ -0,0 +1,929 @@ +/* + * Copyright (c) 2013-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include "qdf_net_types.h" +#include +#include +#include +#define ATH_MODULE_NAME hif +#include +#include "qdf_module.h" +#include "hif_usb_internal.h" +#include "if_usb.h" +#include "usb_api.h" + +#if defined(WLAN_DEBUG) || defined(DEBUG) +static ATH_DEBUG_MASK_DESCRIPTION g_hif_debug_description[] = { + {USB_HIF_DEBUG_CTRL_TRANS, "Control Transfers"}, + {USB_HIF_DEBUG_BULK_IN, "BULK In Transfers"}, + {USB_HIF_DEBUG_BULK_OUT, "BULK Out Transfers"}, + {USB_HIF_DEBUG_DUMP_DATA, "Dump data"}, + {USB_HIF_DEBUG_ENUM, "Enumeration"}, +}; + +ATH_DEBUG_INSTANTIATE_MODULE_VAR(hif, + "hif", + "USB Host Interface", + ATH_DEBUG_MASK_DEFAULTS | ATH_DEBUG_INFO | + USB_HIF_DEBUG_ENUM, + ATH_DEBUG_DESCRIPTION_COUNT + (g_hif_debug_description), + g_hif_debug_description); + +#endif + +#ifdef USB_ISOC_SUPPORT +unsigned int hif_usb_isoch_vo = 1; +#else +unsigned int hif_usb_isoch_vo; +#endif +unsigned int hif_usb_disable_rxdata2 = 1; + +/** + * usb_hif_usb_transmit_complete() - completion routing for tx urb's + * @urb: pointer to urb for which tx completion is called + * + * Return: none + */ +static void usb_hif_usb_transmit_complete(struct urb *urb) +{ + struct HIF_URB_CONTEXT *urb_context = + (struct HIF_URB_CONTEXT *)urb->context; + qdf_nbuf_t buf; + struct HIF_USB_PIPE *pipe = urb_context->pipe; + struct hif_usb_send_context *send_context; + + HIF_DBG("+%s: pipe: %d, stat:%d, len:%d", __func__, + pipe->logical_pipe_num, urb->status, urb->actual_length); + + /* this urb is not pending anymore */ + usb_hif_remove_pending_transfer(urb_context); + + if (urb->status != 0) { + HIF_ERROR("%s: pipe: %d, failed:%d", + __func__, pipe->logical_pipe_num, urb->status); + } + + buf = urb_context->buf; + send_context = urb_context->send_context; + + if (send_context->new_alloc) + qdf_mem_free(send_context); + else + qdf_nbuf_pull_head(buf, send_context->head_data_len); + + urb_context->buf = NULL; + usb_hif_cleanup_transmit_urb(urb_context); + + /* note: queue implements a lock */ + skb_queue_tail(&pipe->io_comp_queue, buf); + HIF_USB_SCHEDULE_WORK(pipe); + + HIF_DBG("-%s", __func__); +} + +/** + * hif_send_internal() - HIF internal routine to prepare and submit tx urbs + * @hif_usb_device: pointer to HIF_DEVICE_USB structure + * @pipe_id: HIF pipe on which data is to be sent + * @hdr_buf: any header buf to be prepended, currently ignored + * @buf: qdf_nbuf_t containing data to be transmitted + * @nbytes: number of bytes to be transmitted + * + * Return: QDF_STATUS_SUCCESS on success and error QDF status on failure + */ +static QDF_STATUS hif_send_internal(struct HIF_DEVICE_USB *hif_usb_device, + uint8_t pipe_id, + qdf_nbuf_t hdr_buf, + qdf_nbuf_t buf, unsigned int nbytes) +{ + QDF_STATUS status = QDF_STATUS_SUCCESS; + struct HIF_DEVICE_USB *device = hif_usb_device; + struct HIF_USB_PIPE *pipe = &device->pipes[pipe_id]; + struct HIF_URB_CONTEXT *urb_context; + uint8_t *data; + uint32_t len; + struct urb *urb; + int usb_status; + int i; + struct hif_usb_send_context *send_context; + uint8_t frag_count; + uint32_t head_data_len, tmp_frag_count = 0; + unsigned char *data_ptr; + + HIF_DBG("+%s pipe : %d, buf:0x%pK nbytes %u", + __func__, pipe_id, buf, nbytes); + + frag_count = qdf_nbuf_get_num_frags(buf); + if (frag_count == 1) { + /* + * | hif_usb_send_context | netbuf->data + */ + head_data_len = sizeof(struct hif_usb_send_context); + } else if ((frag_count - 1) <= QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS) { + /* + * means have extra fragment buf in skb + * header data length should be total sending length subtract + * internal data length of netbuf + * | hif_usb_send_context | fragments except internal buffer | + * netbuf->data + */ + head_data_len = sizeof(struct hif_usb_send_context); + while (tmp_frag_count < (frag_count - 1)) { + head_data_len = + head_data_len + qdf_nbuf_get_frag_len(buf, + tmp_frag_count); + tmp_frag_count = tmp_frag_count + 1; + } + } else { + /* Extra fragments overflow */ + HIF_ERROR("%s Extra fragments count overflow : %d\n", + __func__, frag_count); + status = QDF_STATUS_E_RESOURCES; + goto err; + } + + /* Check whether head room is enough to save extra head data */ + if (head_data_len <= qdf_nbuf_headroom(buf)) { + send_context = (struct hif_usb_send_context *) + qdf_nbuf_push_head(buf, head_data_len); + send_context->new_alloc = false; + } else { + send_context = + qdf_mem_malloc(sizeof(struct hif_usb_send_context) + + head_data_len + nbytes); + if (send_context == NULL) { + HIF_ERROR("%s: qdf_mem_malloc failed", __func__); + status = QDF_STATUS_E_NOMEM; + goto err; + } + send_context->new_alloc = true; + } + send_context->netbuf = buf; + send_context->hif_usb_device = hif_usb_device; + send_context->transfer_id = pipe_id; + send_context->head_data_len = head_data_len; + /* + * Copy data to head part of netbuf or head of allocated buffer. + * if buffer is new allocated, the last buffer should be copied also. + * It assume last fragment is internal buffer of netbuf + * sometime total length of fragments larger than nbytes + */ + data_ptr = (unsigned char *)send_context + + sizeof(struct hif_usb_send_context); + for (i = 0; + i < (send_context->new_alloc ? frag_count : frag_count - 1); i++) { + int frag_len = qdf_nbuf_get_frag_len(buf, i); + unsigned char *frag_addr = qdf_nbuf_get_frag_vaddr(buf, i); + + qdf_mem_copy(data_ptr, frag_addr, frag_len); + data_ptr += frag_len; + } + /* Reset pData pointer and send out */ + data_ptr = (unsigned char *)send_context + + sizeof(struct hif_usb_send_context); + + urb_context = usb_hif_alloc_urb_from_pipe(pipe); + if (NULL == urb_context) { + /* TODO : note, it is possible to run out of urbs if 2 + * endpoints map to the same pipe ID + */ + HIF_ERROR("%s pipe:%d no urbs left. URB Cnt : %d", + __func__, pipe_id, pipe->urb_cnt); + status = QDF_STATUS_E_RESOURCES; + goto err; + } + urb_context->send_context = send_context; + urb = urb_context->urb; + urb_context->buf = buf; + data = data_ptr; + len = nbytes; + + usb_fill_bulk_urb(urb, + device->udev, + pipe->usb_pipe_handle, + data, + (len % pipe->max_packet_size) == + 0 ? (len + 1) : len, + usb_hif_usb_transmit_complete, urb_context); + + if ((len % pipe->max_packet_size) == 0) + /* hit a max packet boundary on this pipe */ + + HIF_DBG + ("athusb bulk send submit:%d, 0x%X (ep:0x%2.2X), %d bytes", + pipe->logical_pipe_num, pipe->usb_pipe_handle, + pipe->ep_address, nbytes); + + usb_hif_enqueue_pending_transfer(pipe, urb_context); + usb_status = usb_submit_urb(urb, GFP_ATOMIC); + if (usb_status) { + if (send_context->new_alloc) + qdf_mem_free(send_context); + else + qdf_nbuf_pull_head(buf, head_data_len); + urb_context->buf = NULL; + HIF_ERROR("athusb : usb bulk transmit failed %d", + usb_status); + usb_hif_remove_pending_transfer(urb_context); + usb_hif_cleanup_transmit_urb(urb_context); + status = QDF_STATUS_E_FAILURE; + goto err; + } + +err: + if (!QDF_IS_STATUS_SUCCESS(status) && + (status != QDF_STATUS_E_RESOURCES)) { + HIF_ERROR("athusb send failed %d", status); + } + + HIF_DBG("-%s pipe : %d", __func__, pipe_id); + + return status; +} + +/** + * hif_send_head() - HIF routine exposed to upper layers to send data + * @scn: pointer to hif_opaque_softc structure + * @pipe_id: HIF pipe on which data is to be sent + * @transfer_id: endpoint ID on which data is to be sent + * @nbytes: number of bytes to be transmitted + * @wbuf: qdf_nbuf_t containing data to be transmitted + * @hdr_buf: any header buf to be prepended, currently ignored + * @data_attr: data_attr field from cvg_nbuf_cb of wbuf + * + * Return: QDF_STATUS_SUCCESS on success and error QDF status on failure + */ +QDF_STATUS hif_send_head(struct hif_opaque_softc *scn, uint8_t pipe_id, + uint32_t transfer_id, uint32_t nbytes, + qdf_nbuf_t wbuf, uint32_t data_attr) +{ + QDF_STATUS status = QDF_STATUS_SUCCESS; + struct HIF_DEVICE_USB *device = HIF_GET_USB_DEVICE(scn); + + HIF_TRACE("+%s", __func__); + status = hif_send_internal(device, pipe_id, NULL, wbuf, nbytes); + HIF_TRACE("-%s", __func__); + return status; +} + +/** + * hif_get_free_queue_number() - get # of free TX resources in a given HIF pipe + * @scn: pointer to hif_opaque_softc structure + * @pipe_id: HIF pipe which is being polled for free resources + * + * Return: # of free resources in pipe_id + */ +uint16_t hif_get_free_queue_number(struct hif_opaque_softc *scn, + uint8_t pipe_id) +{ + struct HIF_DEVICE_USB *device = HIF_GET_USB_DEVICE(scn); + + return device->pipes[pipe_id].urb_cnt; +} + +/** + * hif_post_init() - copy HTC callbacks to HIF + * @scn: pointer to hif_opaque_softc structure + * @target: pointer to HTC_TARGET structure + * @callbacks: htc callbacks + * + * Return: none + */ +void hif_post_init(struct hif_opaque_softc *scn, void *target, + struct hif_msg_callbacks *callbacks) +{ + struct HIF_DEVICE_USB *device = HIF_GET_USB_DEVICE(scn); + + qdf_mem_copy(&device->htc_callbacks, callbacks, + sizeof(device->htc_callbacks)); +} + +/** + * hif_detach_htc() - remove HTC callbacks from HIF + * @scn: pointer to hif_opaque_softc structure + * + * Return: none + */ +void hif_detach_htc(struct hif_opaque_softc *scn) +{ + struct HIF_DEVICE_USB *device = HIF_GET_USB_DEVICE(scn); + + usb_hif_flush_all(device); + qdf_mem_zero(&device->htc_callbacks, sizeof(device->htc_callbacks)); +} + +/** + * hif_usb_device_deinit() - de- init HIF_DEVICE_USB, cleanup pipe resources + * @sc: pointer to hif_usb_softc structure + * + * Return: None + */ +void hif_usb_device_deinit(struct hif_usb_softc *sc) +{ + struct HIF_DEVICE_USB *device = &sc->hif_hdl; + + HIF_TRACE("+%s", __func__); + + usb_hif_cleanup_pipe_resources(device); + + usb_set_intfdata(device->interface, NULL); + + if (device->diag_cmd_buffer != NULL) + qdf_mem_free(device->diag_cmd_buffer); + + if (device->diag_resp_buffer != NULL) + qdf_mem_free(device->diag_resp_buffer); + + HIF_TRACE("-%s", __func__); +} + +/** + * hif_usb_device_init() - init HIF_DEVICE_USB, setup pipe resources + * @sc: pointer to hif_usb_softc structure + * + * Return: QDF_STATUS_SUCCESS on success or a QDF error + */ +QDF_STATUS hif_usb_device_init(struct hif_usb_softc *sc) +{ + int i; + struct HIF_DEVICE_USB *device = &sc->hif_hdl; + struct usb_interface *interface = sc->interface; + struct usb_device *dev = interface_to_usbdev(interface); + QDF_STATUS status = QDF_STATUS_SUCCESS; + struct HIF_USB_PIPE *pipe; + + HIF_TRACE("+%s", __func__); + + do { + + usb_set_intfdata(interface, device); + qdf_spinlock_create(&(device->cs_lock)); + qdf_spinlock_create(&(device->rx_lock)); + qdf_spinlock_create(&(device->tx_lock)); + device->udev = dev; + device->interface = interface; + + HIF_ERROR("%s device %pK device->udev %pK device->interface %pK", + __func__, + device, + device->udev, + device->interface); + + for (i = 0; i < HIF_USB_PIPE_MAX; i++) { + pipe = &device->pipes[i]; + + HIF_USB_INIT_WORK(pipe); + skb_queue_head_init(&pipe->io_comp_queue); + } + + device->diag_cmd_buffer = + qdf_mem_malloc(USB_CTRL_MAX_DIAG_CMD_SIZE); + if (NULL == device->diag_cmd_buffer) { + status = QDF_STATUS_E_NOMEM; + break; + } + device->diag_resp_buffer = + qdf_mem_malloc(USB_CTRL_MAX_DIAG_RESP_SIZE); + if (NULL == device->diag_resp_buffer) { + status = QDF_STATUS_E_NOMEM; + break; + } + + status = usb_hif_setup_pipe_resources(device); + + } while (false); + + if (status != QDF_STATUS_SUCCESS) + HIF_ERROR("%s: abnormal condition", __func__); + + HIF_TRACE("+%s", __func__); + return status; +} + +/** + * hif_start() - Enable HIF TX and RX + * @scn: pointer to hif_opaque_softc structure + * + * Return: QDF_STATUS_SUCCESS if success else an appropriate QDF_STATUS error + */ +QDF_STATUS hif_start(struct hif_opaque_softc *scn) +{ + struct HIF_DEVICE_USB *device = HIF_GET_USB_DEVICE(scn); + int i; + + HIF_TRACE("+%s", __func__); + usb_hif_prestart_recv_pipes(device); + + /* set the TX resource avail threshold for each TX pipe */ + for (i = HIF_TX_CTRL_PIPE; i <= HIF_TX_DATA_HP_PIPE; i++) { + device->pipes[i].urb_cnt_thresh = + device->pipes[i].urb_alloc / 2; + } + + HIF_TRACE("-%s", __func__); + return QDF_STATUS_SUCCESS; +} + +/** + * hif_usb_stop_device() - Stop/flush all HIF communication + * @scn: pointer to hif_opaque_softc structure + * + * Return: none + */ +void hif_usb_stop_device(struct hif_softc *hif_sc) +{ + struct HIF_DEVICE_USB *device = HIF_GET_USB_DEVICE(hif_sc); + + HIF_TRACE("+%s", __func__); + + usb_hif_flush_all(device); + + HIF_TRACE("-%s", __func__); +} + +/** + * hif_get_default_pipe() - get default pipes for HIF TX/RX + * @scn: pointer to hif_opaque_softc structure + * @ul_pipe: pointer to TX pipe + * @ul_pipe: pointer to TX pipe + * + * Return: none + */ +void hif_get_default_pipe(struct hif_opaque_softc *scn, uint8_t *ul_pipe, + uint8_t *dl_pipe) +{ + *ul_pipe = HIF_TX_CTRL_PIPE; + *dl_pipe = HIF_RX_CTRL_PIPE; +} + +#if defined(USB_MULTI_IN_TEST) || defined(USB_ISOC_TEST) +/** + * hif_map_service_to_pipe() - maps ul/dl pipe to service id. + * @scn: HIF context + * @svc_id: sevice index + * @ul_pipe: pointer to uplink pipe id + * @dl_pipe: pointer to down-linklink pipe id + * @ul_is_polled: if ul is polling based + * @ul_is_polled: if dl is polling based + * + * Return: QDF_STATUS_SUCCESS if success else an appropriate QDF_STATUS error + */ +int hif_map_service_to_pipe(struct hif_opaque_softc *scn, uint16_t svc_id, + uint8_t *ul_pipe, uint8_t *dl_pipe, + int *ul_is_polled, int *dl_is_polled) +{ + QDF_STATUS status = QDF_STATUS_SUCCESS; + + switch (svc_id) { + case HTC_CTRL_RSVD_SVC: + case WMI_CONTROL_SVC: + case HTC_RAW_STREAMS_SVC: + *ul_pipe = HIF_TX_CTRL_PIPE; + *dl_pipe = HIF_RX_DATA_PIPE; + break; + case WMI_DATA_BE_SVC: + *ul_pipe = HIF_TX_DATA_LP_PIPE; + *dl_pipe = HIF_RX_DATA_PIPE; + break; + case WMI_DATA_BK_SVC: + *ul_pipe = HIF_TX_DATA_MP_PIPE; + *dl_pipe = HIF_RX_DATA2_PIPE; + break; + case WMI_DATA_VI_SVC: + *ul_pipe = HIF_TX_DATA_HP_PIPE; + *dl_pipe = HIF_RX_DATA_PIPE; + break; + case WMI_DATA_VO_SVC: + *ul_pipe = HIF_TX_DATA_LP_PIPE; + *dl_pipe = HIF_RX_DATA_PIPE; + break; + default: + status = QDF_STATUS_E_FAILURE; + break; + } + + return status; +} +#else + +#ifdef QCA_TX_HTT2_SUPPORT +#define USB_TX_CHECK_HTT2_SUPPORT 1 +#else +#define USB_TX_CHECK_HTT2_SUPPORT 0 +#endif + +/** + * hif_map_service_to_pipe() - maps ul/dl pipe to service id. + * @scn: HIF context + * @svc_id: sevice index + * @ul_pipe: pointer to uplink pipe id + * @dl_pipe: pointer to down-linklink pipe id + * @ul_is_polled: if ul is polling based + * @ul_is_polled: if dl is polling based + * + * Return: QDF_STATUS_SUCCESS if success else an appropriate QDF_STATUS error + */ +int hif_map_service_to_pipe(struct hif_opaque_softc *scn, uint16_t svc_id, + uint8_t *ul_pipe, uint8_t *dl_pipe, + int *ul_is_polled, int *dl_is_polled) +{ + QDF_STATUS status = QDF_STATUS_SUCCESS; + + switch (svc_id) { + case HTC_CTRL_RSVD_SVC: + case WMI_CONTROL_SVC: + *ul_pipe = HIF_TX_CTRL_PIPE; + *dl_pipe = HIF_RX_DATA_PIPE; + break; + case WMI_DATA_BE_SVC: + case WMI_DATA_BK_SVC: + *ul_pipe = HIF_TX_DATA_LP_PIPE; + if (hif_usb_disable_rxdata2) + *dl_pipe = HIF_RX_DATA_PIPE; + else + *dl_pipe = HIF_RX_DATA2_PIPE; + break; + case WMI_DATA_VI_SVC: + *ul_pipe = HIF_TX_DATA_MP_PIPE; + if (hif_usb_disable_rxdata2) + *dl_pipe = HIF_RX_DATA_PIPE; + else + *dl_pipe = HIF_RX_DATA2_PIPE; + break; + case WMI_DATA_VO_SVC: + *ul_pipe = HIF_TX_DATA_HP_PIPE; + if (hif_usb_disable_rxdata2) + *dl_pipe = HIF_RX_DATA_PIPE; + else + *dl_pipe = HIF_RX_DATA2_PIPE; + break; + case HTC_RAW_STREAMS_SVC: + *ul_pipe = HIF_TX_CTRL_PIPE; + *dl_pipe = HIF_RX_DATA_PIPE; + break; + case HTT_DATA_MSG_SVC: + *ul_pipe = HIF_TX_DATA_LP_PIPE; + if (hif_usb_disable_rxdata2) + *dl_pipe = HIF_RX_DATA_PIPE; + else + *dl_pipe = HIF_RX_DATA2_PIPE; + break; + case HTT_DATA2_MSG_SVC: + if (USB_TX_CHECK_HTT2_SUPPORT) { + *ul_pipe = HIF_TX_DATA_HP_PIPE; + if (hif_usb_disable_rxdata2) + *dl_pipe = HIF_RX_DATA_PIPE; + else + *dl_pipe = HIF_RX_DATA2_PIPE; + } + break; + default: + status = QDF_STATUS_E_FAILURE; + break; + } + + return status; +} +#endif + +/** + * hif_ctrl_msg_exchange() - send usb ctrl message and receive response + * @macp: pointer to HIF_DEVICE_USB + * @send_req_val: USB send message request value + * @send_msg: pointer to data to send + * @len: length in bytes of the data to send + * @response_req_val: USB response message request value + * @response_msg: pointer to response msg + * @response_len: length of the response message + * + * Return: QDF_STATUS_SUCCESS if success else an appropriate QDF_STATUS error + */ +static QDF_STATUS hif_ctrl_msg_exchange(struct HIF_DEVICE_USB *macp, + uint8_t send_req_val, + uint8_t *send_msg, + uint32_t len, + uint8_t response_req_val, + uint8_t *response_msg, + uint32_t *response_len) +{ + QDF_STATUS status; + + do { + + /* send command */ + status = usb_hif_submit_ctrl_out(macp, send_req_val, 0, 0, + send_msg, len); + + if (!QDF_IS_STATUS_SUCCESS(status)) + break; + + if (NULL == response_msg) { + /* no expected response */ + break; + } + + /* get response */ + status = usb_hif_submit_ctrl_in(macp, response_req_val, 0, 0, + response_msg, *response_len); + + if (!QDF_IS_STATUS_SUCCESS(status)) + break; + + } while (false); + + return status; +} + +/** + * hif_exchange_bmi_msg() - send/recev ctrl message of type BMI_CMD/BMI_RESP + * @scn: pointer to hif_opaque_softc + * @bmi_request: pointer to data to send + * @request_length: length in bytes of the data to send + * @bmi_response: pointer to response msg + * @bmi_response_length: length of the response message + * @timeout_ms: timeout to wait for response (ignored in current implementation) + * + * Return: QDF_STATUS_SUCCESS if success else an appropriate QDF_STATUS error + */ + +QDF_STATUS hif_exchange_bmi_msg(struct hif_opaque_softc *scn, + qdf_dma_addr_t cmd, qdf_dma_addr_t rsp, + uint8_t *bmi_request, + uint32_t request_length, + uint8_t *bmi_response, + uint32_t *bmi_response_lengthp, + uint32_t timeout_ms) +{ + struct HIF_DEVICE_USB *macp = HIF_GET_USB_DEVICE(scn); + + return hif_ctrl_msg_exchange(macp, + USB_CONTROL_REQ_SEND_BMI_CMD, + bmi_request, + request_length, + USB_CONTROL_REQ_RECV_BMI_RESP, + bmi_response, bmi_response_lengthp); +} + +/** + * hif_diag_read_access() - Read data from target memory or register + * @scn: pointer to hif_opaque_softc + * @address: register address to read from + * @data: pointer to buffer to store the value read from the register + * + * Return: QDF_STATUS_SUCCESS if success else an appropriate QDF_STATUS error + */ +QDF_STATUS hif_diag_read_access(struct hif_opaque_softc *scn, uint32_t address, + uint32_t *data) +{ + struct HIF_DEVICE_USB *macp = HIF_GET_USB_DEVICE(scn); + QDF_STATUS status; + USB_CTRL_DIAG_CMD_READ *cmd; + uint32_t respLength; + + cmd = (USB_CTRL_DIAG_CMD_READ *) macp->diag_cmd_buffer; + + qdf_mem_zero(cmd, sizeof(*cmd)); + cmd->Cmd = USB_CTRL_DIAG_CC_READ; + cmd->Address = address; + respLength = sizeof(USB_CTRL_DIAG_RESP_READ); + + status = hif_ctrl_msg_exchange(macp, + USB_CONTROL_REQ_DIAG_CMD, + (uint8_t *) cmd, + sizeof(*cmd), + USB_CONTROL_REQ_DIAG_RESP, + macp->diag_resp_buffer, &respLength); + + if (QDF_IS_STATUS_SUCCESS(status)) { + USB_CTRL_DIAG_RESP_READ *pResp = + (USB_CTRL_DIAG_RESP_READ *) macp->diag_resp_buffer; + *data = pResp->ReadValue; + status = QDF_STATUS_SUCCESS; + } else { + status = QDF_STATUS_E_FAILURE; + } + + return status; +} + +/** + * hif_diag_write_access() - write data to target memory or register + * @scn: pointer to hif_opaque_softc + * @address: register address to write to + * @data: value to be written to the address + * + * Return: QDF_STATUS_SUCCESS if success else an appropriate QDF_STATUS error + */ +QDF_STATUS hif_diag_write_access(struct hif_opaque_softc *scn, + uint32_t address, + uint32_t data) +{ + struct HIF_DEVICE_USB *macp = HIF_GET_USB_DEVICE(scn); + USB_CTRL_DIAG_CMD_WRITE *cmd; + + cmd = (USB_CTRL_DIAG_CMD_WRITE *) macp->diag_cmd_buffer; + + qdf_mem_zero(cmd, sizeof(*cmd)); + cmd->Cmd = USB_CTRL_DIAG_CC_WRITE; + cmd->Address = address; + cmd->Value = data; + + return hif_ctrl_msg_exchange(macp, + USB_CONTROL_REQ_DIAG_CMD, + (uint8_t *) cmd, + sizeof(*cmd), 0, NULL, 0); +} + +/** + * hif_dump_info() - dump info about all HIF pipes and endpoints + * @scn: pointer to hif_opaque_softc + * + * Return: none + */ +void hif_dump_info(struct hif_opaque_softc *scn) +{ + struct HIF_DEVICE_USB *device = HIF_GET_USB_DEVICE(scn); + struct HIF_USB_PIPE *pipe = NULL; + struct usb_host_interface *iface_desc = NULL; + struct usb_endpoint_descriptor *ep_desc; + uint8_t i = 0; + + for (i = 0; i < HIF_USB_PIPE_MAX; i++) { + pipe = &device->pipes[i]; + HIF_ERROR("PipeIndex : %d URB Cnt : %d PipeHandle : %x", + i, pipe->urb_cnt, + pipe->usb_pipe_handle); + if (usb_pipeisoc(pipe->usb_pipe_handle)) + HIF_INFO("Pipe Type ISOC"); + else if (usb_pipebulk(pipe->usb_pipe_handle)) + HIF_INFO("Pipe Type BULK"); + else if (usb_pipeint(pipe->usb_pipe_handle)) + HIF_INFO("Pipe Type INT"); + else if (usb_pipecontrol(pipe->usb_pipe_handle)) + HIF_INFO("Pipe Type control"); + } + + for (i = 0; i < iface_desc->desc.bNumEndpoints; i++) { + ep_desc = &iface_desc->endpoint[i].desc; + if (ep_desc) { + HIF_INFO( + "ep_desc : %pK Index : %d: DescType : %d Addr : %d Maxp : %d Atrrib : %d", + ep_desc, i, ep_desc->bDescriptorType, + ep_desc->bEndpointAddress, + ep_desc->wMaxPacketSize, + ep_desc->bmAttributes); + if ((ep_desc) && (usb_endpoint_type(ep_desc) == + USB_ENDPOINT_XFER_ISOC)) { + HIF_INFO("ISOC EP Detected"); + } + } + } + +} + +/** + * hif_flush_surprise_remove() - Cleanup residual buffers for device shutdown + * @scn: HIF context + * + * Not applicable to USB bus + * + * Return: none + */ +void hif_flush_surprise_remove(struct hif_opaque_softc *scn) +{ +/* TO DO... */ +} + +/** + * hif_diag_read_mem() -read nbytes of data from target memory or register + * @scn: pointer to hif_opaque_softc + * @address: register address to read from + * @data: buffer to store the value read + * @nbytes: number of bytes to be read from 'address' + * + * Return: QDF_STATUS_SUCCESS if success else an appropriate QDF_STATUS error + */ +QDF_STATUS hif_diag_read_mem(struct hif_opaque_softc *scn, + uint32_t address, uint8_t *data, + int nbytes) +{ + QDF_STATUS status = QDF_STATUS_SUCCESS; + + HIF_TRACE("+%s", __func__); + + if ((address & 0x3) || ((uintptr_t)data & 0x3)) + return QDF_STATUS_E_IO; + + while ((nbytes >= 4) && + QDF_IS_STATUS_SUCCESS(status = + hif_diag_read_access(scn, + address, + (uint32_t *)data))) { + + nbytes -= sizeof(uint32_t); + address += sizeof(uint32_t); + data += sizeof(uint32_t); + + } + HIF_TRACE("-%s", __func__); + return status; +} +qdf_export_symbol(hif_diag_read_mem); + +/** + * hif_diag_write_mem() -write nbytes of data to target memory or register + * @scn: pointer to hif_opaque_softc + * @address: register address to write to + * @data: buffer containing data to be written + * @nbytes: number of bytes to be written + * + * Return: QDF_STATUS_SUCCESS if success else an appropriate QDF_STATUS error + */ +QDF_STATUS hif_diag_write_mem(struct hif_opaque_softc *scn, + uint32_t address, + uint8_t *data, int nbytes) +{ + QDF_STATUS status = QDF_STATUS_SUCCESS; + + HIF_TRACE("+%s", __func__); + if ((address & 0x3) || ((uintptr_t)data & 0x3)) + return QDF_STATUS_E_IO; + + while (nbytes >= 4 && + QDF_IS_STATUS_SUCCESS(status = + hif_diag_write_access(scn, + address, + *((uint32_t *)data)))) { + + nbytes -= sizeof(uint32_t); + address += sizeof(uint32_t); + data += sizeof(uint32_t); + + } + HIF_TRACE("-%s", __func__); + return status; +} + +void hif_send_complete_check(struct hif_opaque_softc *scn, + uint8_t PipeID, int force) +{ + /* NO-OP*/ +} + +/* diagnostic command defnitions */ +#define USB_CTRL_DIAG_CC_READ 0 +#define USB_CTRL_DIAG_CC_WRITE 1 +#define USB_CTRL_DIAG_CC_WARM_RESET 2 + +void hif_suspend_wow(struct hif_opaque_softc *scn) +{ + HIF_INFO("HIFsuspendwow - TODO"); +} + +/** + * hif_usb_set_bundle_mode() - enable bundling and set default rx bundle cnt + * @scn: pointer to hif_opaque_softc structure + * @enabled: flag to enable/disable bundling + * @rx_bundle_cnt: bundle count to be used for RX + * + * Return: none + */ +void hif_usb_set_bundle_mode(struct hif_softc *scn, + bool enabled, int rx_bundle_cnt) +{ + struct HIF_DEVICE_USB *device = HIF_GET_USB_DEVICE(scn); + + device->is_bundle_enabled = enabled; + device->rx_bundle_cnt = rx_bundle_cnt; + if (device->is_bundle_enabled && (device->rx_bundle_cnt == 0)) + device->rx_bundle_cnt = 1; + + device->rx_bundle_buf_len = device->rx_bundle_cnt * + HIF_USB_RX_BUNDLE_ONE_PKT_SIZE; + + HIF_DBG("athusb bundle %s cnt %d", enabled ? "enabled" : "disabled", + rx_bundle_cnt); +} diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/usb/hif_usb_internal.h b/drivers/staging/qca-wifi-host-cmn/hif/src/usb/hif_usb_internal.h new file mode 100644 index 0000000000000000000000000000000000000000..bfccc528f4d5bbf6f9045777f9e10dc124792fab --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/usb/hif_usb_internal.h @@ -0,0 +1,121 @@ +/* + * Copyright (c) 2013-2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _HIF_USB_INTERNAL_H +#define _HIF_USB_INTERNAL_H + +#include +#include "a_types.h" +#include "athdefs.h" +#include "a_osapi.h" +#include "a_usb_defs.h" +#include +#include +#include "hif.h" +#include "if_usb.h" + +#define TX_URB_COUNT 32 +#define RX_URB_COUNT 32 + +#define HIF_USB_RX_BUFFER_SIZE (1792 + 8) +#define HIF_USB_RX_BUNDLE_ONE_PKT_SIZE (1792 + 8) + +#ifdef HIF_USB_TASKLET +#define HIF_USB_SCHEDULE_WORK(pipe)\ + tasklet_schedule(&pipe->io_complete_tasklet) + +#define HIF_USB_INIT_WORK(pipe)\ + tasklet_init(&pipe->io_complete_tasklet,\ + usb_hif_io_comp_tasklet,\ + (unsigned long)pipe) + +#define HIF_USB_FLUSH_WORK(pipe) flush_work(&pipe->io_complete_work) +#else +#define HIF_USB_SCHEDULE_WORK(pipe) schedule_work(&pipe->io_complete_work) +#define HIF_USB_INIT_WORK(pipe)\ + INIT_WORK(&pipe->io_complete_work,\ + usb_hif_io_comp_work) +#define HIF_USB_FLUSH_WORK(pipe) +#endif + +/* debug masks */ +#define USB_HIF_DEBUG_CTRL_TRANS ATH_DEBUG_MAKE_MODULE_MASK(0) +#define USB_HIF_DEBUG_BULK_IN ATH_DEBUG_MAKE_MODULE_MASK(1) +#define USB_HIF_DEBUG_BULK_OUT ATH_DEBUG_MAKE_MODULE_MASK(2) +#define USB_HIF_DEBUG_ENUM ATH_DEBUG_MAKE_MODULE_MASK(3) +#define USB_HIF_DEBUG_DUMP_DATA ATH_DEBUG_MAKE_MODULE_MASK(4) +#define USB_HIF_SUSPEND ATH_DEBUG_MAKE_MODULE_MASK(5) +#define USB_HIF_ISOC_SUPPORT ATH_DEBUG_MAKE_MODULE_MASK(6) + +struct HIF_USB_PIPE; + +struct HIF_URB_CONTEXT { + DL_LIST link; + struct HIF_USB_PIPE *pipe; + qdf_nbuf_t buf; + struct urb *urb; + struct hif_usb_send_context *send_context; +}; + +#define HIF_USB_PIPE_FLAG_TX (1 << 0) + +/* + * Data structure to record required sending context data + */ +struct hif_usb_send_context { + A_BOOL new_alloc; + struct HIF_DEVICE_USB *hif_usb_device; + qdf_nbuf_t netbuf; + unsigned int transfer_id; + unsigned int head_data_len; +}; + +extern unsigned int hif_usb_disable_rxdata2; + +extern QDF_STATUS usb_hif_submit_ctrl_in(struct HIF_DEVICE_USB *macp, + uint8_t req, + uint16_t value, + uint16_t index, + void *data, uint32_t size); + +extern QDF_STATUS usb_hif_submit_ctrl_out(struct HIF_DEVICE_USB *macp, + uint8_t req, + uint16_t value, + uint16_t index, + void *data, uint32_t size); + +QDF_STATUS usb_hif_setup_pipe_resources(struct HIF_DEVICE_USB *device); +void usb_hif_cleanup_pipe_resources(struct HIF_DEVICE_USB *device); +void usb_hif_prestart_recv_pipes(struct HIF_DEVICE_USB *device); +void usb_hif_start_recv_pipes(struct HIF_DEVICE_USB *device); +void usb_hif_flush_all(struct HIF_DEVICE_USB *device); +void usb_hif_cleanup_transmit_urb(struct HIF_URB_CONTEXT *urb_context); +void usb_hif_enqueue_pending_transfer(struct HIF_USB_PIPE *pipe, + struct HIF_URB_CONTEXT *urb_context); +void usb_hif_remove_pending_transfer(struct HIF_URB_CONTEXT *urb_context); +struct HIF_URB_CONTEXT *usb_hif_alloc_urb_from_pipe(struct HIF_USB_PIPE *pipe); +void hif_usb_device_deinit(struct hif_usb_softc *sc); +QDF_STATUS hif_usb_device_init(struct hif_usb_softc *sc); +#ifdef HIF_USB_TASKLET +void usb_hif_io_comp_tasklet(unsigned long context); +#else +void usb_hif_io_comp_work(struct work_struct *work); +#endif +QDF_STATUS hif_diag_write_warm_reset(struct usb_interface *interface, + uint32_t address, uint32_t data); +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/usb/if_usb.c b/drivers/staging/qca-wifi-host-cmn/hif/src/usb/if_usb.c new file mode 100644 index 0000000000000000000000000000000000000000..26f0836b9c32c22e2cd868bdcb1715b08cfb3c5e --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/usb/if_usb.c @@ -0,0 +1,727 @@ +/* + * Copyright (c) 2013-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include +#include "if_usb.h" +#include "hif_usb_internal.h" +#include "target_type.h" /* TARGET_TYPE_ */ +#include "regtable_usb.h" +#include "ol_fw.h" +#include "hif_debug.h" +#include "epping_main.h" +#include "hif_main.h" +#include "qwlan_version.h" +#include "usb_api.h" + +#define DELAY_FOR_TARGET_READY 200 /* 200ms */ + +/* Save memory addresses where we save FW ram dump, and then we could obtain + * them by symbol table. + */ +uint32_t fw_stack_addr; +void *fw_ram_seg_addr[FW_RAM_SEG_CNT]; + + + +static int hif_usb_unload_dev_num = -1; +struct hif_usb_softc *g_usb_sc; + +/** + * hif_usb_diag_write_cold_reset() - reset SOC by sending a diag command + * @scn: pointer to ol_softc structure + * + * Return: QDF_STATUS_SUCCESS if success else an appropriate QDF_STATUS error + */ +static inline QDF_STATUS +hif_usb_diag_write_cold_reset(struct hif_softc *scn) +{ + struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn); + + + HIF_DBG("%s: resetting SOC", __func__); + + return hif_diag_write_access(hif_hdl, + (ROME_USB_SOC_RESET_CONTROL_COLD_RST_LSB | + ROME_USB_RTC_SOC_BASE_ADDRESS), + SOC_RESET_CONTROL_COLD_RST_SET(1)); +} + +/** + * hif_usb_procfs_init() - create init procfs + * @scn: pointer to hif_usb_softc structure + * + * Return: int 0 if success else an appropriate error number + */ +static int +hif_usb_procfs_init(struct hif_softc *scn) +{ + int ret = 0; + + HIF_ENTER(); + + if (athdiag_procfs_init(scn) != 0) { + HIF_ERROR("athdiag_procfs_init failed"); + ret = A_ERROR; + } + + scn->athdiag_procfs_inited = true; + + HIF_EXIT(); + return ret; +} + +/** + * hif_nointrs(): disable IRQ + * @scn: pointer to struct hif_softc + * + * This function stops interrupt(s) + * + * Return: none + */ +void hif_usb_nointrs(struct hif_softc *scn) +{ + +} + +/** + * hif_usb_reboot() - called at reboot time to reset WLAN SOC + * @nb: pointer to notifier_block registered during register_reboot_notifier + * @val: code indicating reboot reason + * @v: unused pointer + * + * Return: int 0 if success else an appropriate error number + */ +static int hif_usb_reboot(struct notifier_block *nb, unsigned long val, + void *v) +{ + struct hif_usb_softc *sc; + + HIF_ENTER(); + sc = container_of(nb, struct hif_usb_softc, reboot_notifier); + /* do cold reset */ + hif_usb_diag_write_cold_reset(HIF_GET_SOFTC(sc)); + HIF_EXIT(); + return NOTIFY_DONE; +} + +/** + * hif_usb_disable_lpm() - Disable lpm feature of usb2.0 + * @udev: pointer to usb_device for which LPM is to be disabled + * + * LPM needs to be disabled to avoid usb2.0 probe timeout + * + * Return: int 0 if success else an appropriate error number + */ +static int hif_usb_disable_lpm(struct usb_device *udev) +{ + struct usb_hcd *hcd; + int ret = -EPERM; + + HIF_ENTER(); + + if (!udev || !udev->bus) { + HIF_ERROR("Invalid input parameters"); + goto exit; + } + + hcd = bus_to_hcd(udev->bus); + if (udev->usb2_hw_lpm_enabled) { + if (hcd->driver->set_usb2_hw_lpm) { + ret = hcd->driver->set_usb2_hw_lpm(hcd, udev, false); + if (!ret) { + udev->usb2_hw_lpm_enabled = false; + udev->usb2_hw_lpm_capable = false; + HIF_TRACE("%s: LPM is disabled", __func__); + } else { + HIF_TRACE("%s: Fail to disable LPM", + __func__); + } + } else { + HIF_TRACE("%s: hcd doesn't support LPM", + __func__); + } + } else { + HIF_TRACE("%s: LPM isn't enabled", __func__); + } +exit: + HIF_EXIT(); + return ret; +} + +/** + * hif_usb_enable_bus() - enable usb bus + * @ol_sc: hif_softc struct + * @dev: device pointer + * @bdev: bus dev pointer + * @bid: bus id pointer + * @type: enum hif_enable_type such as HIF_ENABLE_TYPE_PROBE + * + * Return: QDF_STATUS_SUCCESS on success and error QDF status on failure + */ +QDF_STATUS hif_usb_enable_bus(struct hif_softc *scn, + struct device *dev, void *bdev, + const struct hif_bus_id *bid, + enum hif_enable_type type) + +{ + struct usb_interface *interface = (struct usb_interface *)bdev; + struct usb_device_id *id = (struct usb_device_id *)bid; + int ret = 0; + struct hif_usb_softc *sc; + struct usb_device *usbdev = interface_to_usbdev(interface); + int vendor_id, product_id; + + usb_get_dev(usbdev); + + if (!scn) { + HIF_ERROR("%s: hif_ctx is NULL", __func__); + goto err_usb; + } + + sc = HIF_GET_USB_SOFTC(scn); + + HIF_INFO("%s hif_softc %pK usbdev %pK interface %pK\n", + __func__, + scn, + usbdev, + interface); + + vendor_id = qdf_le16_to_cpu(usbdev->descriptor.idVendor); + product_id = qdf_le16_to_cpu(usbdev->descriptor.idProduct); + + HIF_ERROR("%s: con_mode = 0x%x, vendor_id = 0x%x product_id = 0x%x", + __func__, hif_get_conparam(scn), vendor_id, product_id); + + sc->pdev = (void *)usbdev; + sc->dev = &usbdev->dev; + sc->devid = id->idProduct; + + if ((usb_control_msg(usbdev, usb_sndctrlpipe(usbdev, 0), + USB_REQ_SET_CONFIGURATION, 0, 1, 0, NULL, 0, + HZ)) < 0) { + HIF_ERROR("%s[%d]", __func__, __LINE__); + goto err_usb; + } + + usb_set_interface(usbdev, 0, 0); + /* disable lpm to avoid usb2.0 probe timeout */ + hif_usb_disable_lpm(usbdev); + + /* params need to be added - TODO + * scn->enableuartprint = 1; + * scn->enablefwlog = 0; + * scn->max_no_of_peers = 1; + */ + + sc->interface = interface; + sc->reboot_notifier.notifier_call = hif_usb_reboot; + register_reboot_notifier(&sc->reboot_notifier); + + if (hif_usb_device_init(sc) != QDF_STATUS_SUCCESS) { + HIF_ERROR("ath: %s: hif_usb_device_init failed", __func__); + goto err_reset; + } + + if (hif_usb_procfs_init(scn)) + goto err_reset; + + hif_usb_unload_dev_num = usbdev->devnum; + g_usb_sc = sc; + HIF_EXIT(); + return 0; + +err_reset: + hif_usb_diag_write_cold_reset(scn); + g_usb_sc = NULL; + hif_usb_unload_dev_num = -1; + unregister_reboot_notifier(&sc->reboot_notifier); +err_usb: + ret = QDF_STATUS_E_FAILURE; + usb_put_dev(usbdev); + return ret; +} + + +/** + * hif_usb_close(): close bus, delete hif_sc + * @ol_sc: soft_sc struct + * + * Return: none + */ +void hif_usb_close(struct hif_softc *scn) +{ + g_usb_sc = NULL; +} + +/** + * hif_usb_disable_bus(): This function disables usb bus + * @hif_ctx: pointer to struct hif_softc + * + * Return: none + */ +void hif_usb_disable_bus(struct hif_softc *hif_ctx) +{ + struct hif_usb_softc *sc = HIF_GET_USB_SOFTC(hif_ctx); + struct usb_interface *interface = sc->interface; + struct usb_device *udev = interface_to_usbdev(interface); + + HIF_TRACE("%s: trying to remove hif_usb!", __func__); + + /* disable lpm to avoid following cold reset will + * cause xHCI U1/U2 timeout + */ + usb_disable_lpm(udev); + + /* wait for disable lpm */ + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(msecs_to_jiffies(DELAY_FOR_TARGET_READY)); + set_current_state(TASK_RUNNING); + + /* do cold reset */ + hif_usb_diag_write_cold_reset(hif_ctx); + + if (g_usb_sc->suspend_state) + hif_bus_resume(GET_HIF_OPAQUE_HDL(hif_ctx)); + + unregister_reboot_notifier(&sc->reboot_notifier); + usb_put_dev(interface_to_usbdev(interface)); + + hif_usb_device_deinit(sc); + + HIF_TRACE("%s hif_usb removed !!!!!!", __func__); +} + +/** + * hif_usb_bus_suspend() - suspend the bus + * @hif_ctx: hif_ctx + * + * This function suspends the bus, but usb doesn't need to suspend. + * Therefore just remove all the pending urb transactions + * + * Return: 0 for success and non-zero for failure + */ +int hif_usb_bus_suspend(struct hif_softc *hif_ctx) +{ + struct hif_usb_softc *sc = HIF_GET_USB_SOFTC(hif_ctx); + struct HIF_DEVICE_USB *device = HIF_GET_USB_DEVICE(hif_ctx); + + HIF_ENTER(); + sc->suspend_state = 1; + usb_hif_flush_all(device); + HIF_EXIT(); + return 0; +} + +/** + * hif_usb_bus_resume() - hif resume API + * @hif_ctx: struct hif_opaque_softc + * + * This function resumes the bus. but usb doesn't need to resume. + * Post recv urbs for RX data pipe + * + * Return: 0 for success and non-zero for failure + */ +int hif_usb_bus_resume(struct hif_softc *hif_ctx) +{ + struct hif_usb_softc *sc = HIF_GET_USB_SOFTC(hif_ctx); + struct HIF_DEVICE_USB *device = HIF_GET_USB_DEVICE(hif_ctx); + + HIF_ENTER(); + sc->suspend_state = 0; + usb_hif_start_recv_pipes(device); + + HIF_EXIT(); + return 0; +} + +/** + * hif_usb_bus_reset_resume() - resume the bus after reset + * @scn: struct hif_opaque_softc + * + * This function is called to tell the driver that USB device has been resumed + * and it has also been reset. The driver should redo any necessary + * initialization. This function resets WLAN SOC. + * + * Return: int 0 for success, non zero for failure + */ +int hif_usb_bus_reset_resume(struct hif_softc *hif_ctx) +{ + int ret = 0; + + HIF_ENTER(); + if (hif_usb_diag_write_cold_reset(hif_ctx) != QDF_STATUS_SUCCESS) + ret = 1; + + HIF_EXIT(); + return ret; +} + +/** + * hif_usb_open()- initialization routine for usb bus + * @ol_sc: ol_sc + * @bus_type: bus type + * + * Return: QDF_STATUS_SUCCESS on success and error QDF status on failure + */ +QDF_STATUS hif_usb_open(struct hif_softc *hif_ctx, + enum qdf_bus_type bus_type) +{ + hif_ctx->bus_type = bus_type; + return QDF_STATUS_SUCCESS; +} + +/** + * hif_usb_disable_isr(): disable isr + * @hif_ctx: struct hif_softc + * + * Return: void + */ +void hif_usb_disable_isr(struct hif_softc *hif_ctx) +{ + /* TODO */ +} + +/** + * hif_usb_reg_tbl_attach()- attach hif, target register tables + * @scn: pointer to ol_softc structure + * + * Attach host and target register tables based on target_type, target_version + * + * Return: none + */ +void hif_usb_reg_tbl_attach(struct hif_softc *scn) +{ + u_int32_t hif_type, target_type; + int32_t ret = 0; + uint32_t chip_id; + QDF_STATUS rv; + struct hif_target_info *tgt_info = &scn->target_info; + struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn); + + if (scn->hostdef == NULL && scn->targetdef == NULL) { + switch (tgt_info->target_type) { + case TARGET_TYPE_AR6320: + switch (tgt_info->target_version) { + case AR6320_REV1_VERSION: + case AR6320_REV1_1_VERSION: + case AR6320_REV1_3_VERSION: + hif_type = HIF_TYPE_AR6320; + target_type = TARGET_TYPE_AR6320; + break; + case AR6320_REV2_1_VERSION: + case AR6320_REV3_VERSION: + case QCA9377_REV1_1_VERSION: + case QCA9379_REV1_VERSION: + hif_type = HIF_TYPE_AR6320V2; + target_type = TARGET_TYPE_AR6320V2; + break; + default: + ret = -1; + break; + } + break; + default: + ret = -1; + break; + } + + if (ret) + return; + + /* assign target register table if we find + * corresponding type + */ + hif_register_tbl_attach(scn, hif_type); + target_register_tbl_attach(scn, target_type); + /* read the chip revision*/ + rv = hif_diag_read_access(hif_hdl, + (CHIP_ID_ADDRESS | + RTC_SOC_BASE_ADDRESS), + &chip_id); + if (rv != QDF_STATUS_SUCCESS) { + HIF_ERROR("%s: get chip id val (%d)", __func__, + rv); + } + tgt_info->target_revision = + CHIP_ID_REVISION_GET(chip_id); + } +} + +/** + * hif_usb_get_hw_info()- attach register table for USB + * @hif_ctx: pointer to hif_softc structure + + * This function is used to attach the host and target register tables. + * Ideally, we should not attach register tables as a part of this function. + * There is scope of cleanup to move register table attach during + * initialization for USB bus. + * + * The reason we are doing register table attach for USB here is that, it relies + * on target_info->target_type and target_info->target_version, + * which get populated during bmi_firmware_download. "hif_get_fw_info" is the + * only initialization related call into HIF there after. + * + * To fix this, we can move the "get target info, functionality currently in + * bmi_firmware_download into hif initialization functions. This change will + * affect all buses. Can be taken up as a part of convergence. + * + * Return: none + */ +void hif_usb_get_hw_info(struct hif_softc *hif_ctx) +{ + hif_usb_reg_tbl_attach(hif_ctx); +} + + +/** + * hif_bus_configure() - configure the bus + * @scn: pointer to the hif context. + * + * return: 0 for success. nonzero for failure. + */ +int hif_usb_bus_configure(struct hif_softc *scn) +{ + return 0; +} + +/** + * hif_usb_irq_enable() - hif_usb_irq_enable + * @scn: hif_softc + * @ce_id: ce_id + * + * Return: void + */ +void hif_usb_irq_enable(struct hif_softc *scn, int ce_id) +{ +} + +/** + * hif_usb_irq_disable() - hif_usb_irq_disable + * @scn: hif_softc + * @ce_id: ce_id + * + * Return: void + */ +void hif_usb_irq_disable(struct hif_softc *scn, int ce_id) +{ +} + +/** + * hif_usb_shutdown_bus_device() - This function shuts down the device + * @scn: hif opaque pointer + * + * Return: void + */ +void hif_usb_shutdown_bus_device(struct hif_softc *scn) +{ +} + +/** + * hif_trigger_dump() - trigger various dump cmd + * @scn: struct hif_opaque_softc + * @cmd_id: dump command id + * @start: start/stop dump + * + * Return: None + */ +void hif_trigger_dump(struct hif_opaque_softc *scn, uint8_t cmd_id, bool start) +{ +} + +/** + * hif_wlan_disable() - call the platform driver to disable wlan + * @scn: scn + * + * Return: void + */ +void hif_wlan_disable(struct hif_softc *scn) +{ +} + +/** + * hif_fw_assert_ramdump_pattern() - handle firmware assert with ramdump pattern + * @sc: pointer to hif_usb_softc structure + * + * Return: void + */ + +void hif_fw_assert_ramdump_pattern(struct hif_usb_softc *sc) +{ + uint32_t *reg, pattern, i = 0; + uint32_t len; + uint8_t *data; + uint8_t *ram_ptr = NULL; + char *fw_ram_seg_name[FW_RAM_SEG_CNT] = {"DRAM", "IRAM", "AXI"}; + size_t fw_ram_reg_size[FW_RAM_SEG_CNT] = { + FW_RAMDUMP_DRAMSIZE, + FW_RAMDUMP_IRAMSIZE, + FW_RAMDUMP_AXISIZE }; + + data = sc->fw_data; + len = sc->fw_data_len; + pattern = *((uint32_t *) data); + + qdf_assert(sc->ramdump_index < FW_RAM_SEG_CNT); + i = sc->ramdump_index; + reg = (uint32_t *) (data + 4); + if (sc->fw_ram_dumping == 0) { + sc->fw_ram_dumping = 1; + HIF_ERROR("Firmware %s dump:\n", fw_ram_seg_name[i]); + sc->ramdump[i] = + qdf_mem_malloc(sizeof(struct fw_ramdump) + + fw_ram_reg_size[i]); + if (!sc->ramdump[i]) { + pr_err("Fail to allocate memory for ram dump"); + QDF_BUG(0); + } + (sc->ramdump[i])->mem = (uint8_t *) (sc->ramdump[i] + 1); + fw_ram_seg_addr[i] = (sc->ramdump[i])->mem; + HIF_ERROR("FW %s start addr = %#08x\n", + fw_ram_seg_name[i], *reg); + HIF_ERROR("Memory addr for %s = %pK\n", + fw_ram_seg_name[i], + (sc->ramdump[i])->mem); + (sc->ramdump[i])->start_addr = *reg; + (sc->ramdump[i])->length = 0; + } + reg++; + ram_ptr = (sc->ramdump[i])->mem + (sc->ramdump[i])->length; + (sc->ramdump[i])->length += (len - 8); + if (sc->ramdump[i]->length <= fw_ram_reg_size[i]) { + qdf_mem_copy(ram_ptr, (uint8_t *) reg, len - 8); + } else { + HIF_ERROR("memory copy overlap\n"); + QDF_BUG(0); + } + + if (pattern == FW_RAMDUMP_END_PATTERN) { + HIF_ERROR("%s memory size = %d\n", fw_ram_seg_name[i], + (sc->ramdump[i])->length); + if (i == (FW_RAM_SEG_CNT - 1)) + QDF_BUG(0); + + sc->ramdump_index++; + sc->fw_ram_dumping = 0; + } +} + +/** + * hif_usb_ramdump_handler(): dump bus debug registers + * @scn: struct hif_opaque_softc + * + * This function is to receive information of firmware crash dump, and + * save it in host memory. It consists of 5 parts: registers, call stack, + * DRAM dump, IRAM dump, and AXI dump, and they are reported to host in order. + * + * registers: wrapped in a USB packet by starting as FW_ASSERT_PATTERN and + * 60 registers. + * call stack: wrapped in multiple USB packets, and each of them starts as + * FW_REG_PATTERN and contains multiple double-words. The tail + * of the last packet is FW_REG_END_PATTERN. + * DRAM dump: wrapped in multiple USB pakcets, and each of them start as + * FW_RAMDUMP_PATTERN and contains multiple double-wors. The tail + * of the last packet is FW_RAMDUMP_END_PATTERN; + * IRAM dump and AXI dump are with the same format as DRAM dump. + * + * Return: 0 for success or error code + */ + +void hif_usb_ramdump_handler(struct hif_opaque_softc *scn) +{ + uint32_t *reg, pattern, i, start_addr = 0; + uint32_t len; + uint8_t *data; + uint8_t str_buf[128]; + uint32_t remaining; + struct hif_usb_softc *sc = HIF_GET_USB_SOFTC(scn); + struct hif_softc *hif_ctx = HIF_GET_SOFTC(scn); + struct hif_target_info *tgt_info = &hif_ctx->target_info; + + data = sc->fw_data; + len = sc->fw_data_len; + pattern = *((uint32_t *) data); + + if (pattern == FW_ASSERT_PATTERN) { + HIF_ERROR("Firmware crash detected...\n"); + HIF_ERROR("Host SW version: %s\n", QWLAN_VERSIONSTR); + HIF_ERROR("target_type: %d.target_version %d. target_revision%d.", + tgt_info->target_type, + tgt_info->target_version, + tgt_info->target_revision); + + reg = (uint32_t *) (data + 4); + print_hex_dump(KERN_DEBUG, " ", DUMP_PREFIX_OFFSET, 16, 4, reg, + min_t(uint32_t, len - 4, FW_REG_DUMP_CNT * 4), + false); + sc->fw_ram_dumping = 0; + + } else if (pattern == FW_REG_PATTERN) { + reg = (uint32_t *) (data + 4); + start_addr = *reg++; + if (sc->fw_ram_dumping == 0) { + pr_err("Firmware stack dump:"); + sc->fw_ram_dumping = 1; + fw_stack_addr = start_addr; + } + remaining = len - 8; + /* len is in byte, but it's printed in double-word. */ + for (i = 0; i < (len - 8); i += 16) { + if ((*reg == FW_REG_END_PATTERN) && (i == len - 12)) { + sc->fw_ram_dumping = 0; + pr_err("Stack start address = %#08x\n", + fw_stack_addr); + break; + } + hex_dump_to_buffer(reg, remaining, 16, 4, str_buf, + sizeof(str_buf), false); + pr_err("%#08x: %s\n", start_addr + i, str_buf); + remaining -= 16; + reg += 4; + } + } else if ((!sc->enable_self_recovery) && + ((pattern & FW_RAMDUMP_PATTERN_MASK) == + FW_RAMDUMP_PATTERN)) { + hif_fw_assert_ramdump_pattern(sc); + } +} + +#ifndef QCA_WIFI_3_0 +/** + * hif_check_fw_reg(): hif_check_fw_reg + * @scn: scn + * @state: + * + * Return: int + */ +int hif_check_fw_reg(struct hif_opaque_softc *scn) +{ + return 0; +} +#endif + +/** + * hif_usb_needs_bmi() - return true if the soc needs bmi through the driver + * @scn: hif context + * + * Return: true if soc needs driver bmi otherwise false + */ +bool hif_usb_needs_bmi(struct hif_softc *scn) +{ + return true; +} diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/usb/if_usb.h b/drivers/staging/qca-wifi-host-cmn/hif/src/usb/if_usb.h new file mode 100644 index 0000000000000000000000000000000000000000..130e08a41820365ffa00f046311631bba7070961 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/usb/if_usb.h @@ -0,0 +1,170 @@ +/* + * Copyright (c) 2013-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef __ATH_USB_H__ +#define __ATH_USB_H__ + +#include + +/* + * There may be some pending tx frames during platform suspend. + * Suspend operation should be delayed until those tx frames are + * transferred from the host to target. This macro specifies how + * long suspend thread has to sleep before checking pending tx + * frame count. + */ +#define OL_ATH_TX_DRAIN_WAIT_DELAY 50 /* ms */ +/* + * Wait time (in unit of OL_ATH_TX_DRAIN_WAIT_DELAY) for pending + * tx frame completion before suspend. Refer: hif_pci_suspend() + */ +#define OL_ATH_TX_DRAIN_WAIT_CNT 10 + +#define CONFIG_COPY_ENGINE_SUPPORT /* TBDXXX: here for now */ +#define ATH_DBG_DEFAULT 0 +#include +#include +#include +#include "osapi_linux.h" +#include "hif_main.h" +#include "hif.h" + +#define FW_REG_DUMP_CNT 60 + +/* Magic patterns for FW to report crash information (Rome USB) */ +#define FW_ASSERT_PATTERN 0x0000c600 +#define FW_REG_PATTERN 0x0000d600 +#define FW_REG_END_PATTERN 0x0000e600 +#define FW_RAMDUMP_PATTERN 0x0000f600 +#define FW_RAMDUMP_END_PATTERN 0x0000f601 +#define FW_RAMDUMP_PATTERN_MASK 0xfffffff0 +#define FW_RAMDUMP_DRAMSIZE 0x00098000 +#define FW_RAMDUMP_IRAMSIZE 0x000C0000 +#define FW_RAMDUMP_AXISIZE 0x00020000 + +/* FW RAM segments (Rome USB) */ +enum { + FW_RAM_SEG_DRAM, + FW_RAM_SEG_IRAM, + FW_RAM_SEG_AXI, + FW_RAM_SEG_CNT +}; + +/* Allocate 384K memory to save each segment of ram dump */ +#define FW_RAMDUMP_SEG_SIZE 393216 + +/* structure to save RAM dump information */ +struct fw_ramdump { + uint32_t start_addr; + uint32_t length; + uint8_t *mem; +}; + +/* USB Endpoint definition */ +enum HIF_USB_PIPE_ID { + HIF_TX_CTRL_PIPE = 0, + HIF_TX_DATA_LP_PIPE, + HIF_TX_DATA_MP_PIPE, + HIF_TX_DATA_HP_PIPE, + HIF_RX_CTRL_PIPE, + HIF_RX_DATA_PIPE, + HIF_RX_DATA2_PIPE, + HIF_RX_INT_PIPE, + HIF_USB_PIPE_MAX +}; + +#define HIF_USB_PIPE_INVALID HIF_USB_PIPE_MAX + +struct HIF_USB_PIPE { + DL_LIST urb_list_head; + DL_LIST urb_pending_list; + int32_t urb_alloc; + int32_t urb_cnt; + int32_t urb_cnt_thresh; + unsigned int usb_pipe_handle; + uint32_t flags; + uint8_t ep_address; + uint8_t logical_pipe_num; + struct HIF_DEVICE_USB *device; + uint16_t max_packet_size; +#ifdef HIF_USB_TASKLET + struct tasklet_struct io_complete_tasklet; +#else + struct work_struct io_complete_work; +#endif + struct sk_buff_head io_comp_queue; + struct usb_endpoint_descriptor *ep_desc; + int32_t urb_prestart_cnt; +}; + +struct HIF_DEVICE_USB { + struct hif_softc ol_sc; + qdf_spinlock_t cs_lock; + qdf_spinlock_t tx_lock; + qdf_spinlock_t rx_lock; + struct hif_msg_callbacks htc_callbacks; + struct usb_device *udev; + struct usb_interface *interface; + struct HIF_USB_PIPE pipes[HIF_USB_PIPE_MAX]; + uint8_t *diag_cmd_buffer; + uint8_t *diag_resp_buffer; + void *claimed_context; + A_BOOL is_bundle_enabled; + uint16_t rx_bundle_cnt; + uint32_t rx_bundle_buf_len; +}; + +struct hif_usb_softc { + struct HIF_DEVICE_USB hif_hdl; + /* For efficiency, should be first in struct */ + struct device *dev; + struct usb_dev *pdev; + /* + * Guard changes to Target HW state and to software + * structures that track hardware state. + */ + u16 devid; + struct usb_interface *interface; + struct notifier_block reboot_notifier; /* default mode before reboot */ + u8 suspend_state; + u8 *fw_data; + u32 fw_data_len; + /* structure to save FW RAM dump (Rome USB) */ + struct fw_ramdump *ramdump[FW_RAM_SEG_CNT]; + uint8_t ramdump_index; + bool fw_ram_dumping; + /* enable FW self-recovery for Rome USB */ + bool enable_self_recovery; +}; + +/** + * hif_dump_info() - dump info about all HIF pipes and endpoints + * @scn: pointer to hif_opaque_softc + * + * Return: none + */ +void hif_dump_info(struct hif_opaque_softc *scn); + +/** + * hif_suspend_wow() - Send wow suspend command + * @scn: pointer to hif_opaque_softc + * + * Return: none + */ +void hif_suspend_wow(struct hif_opaque_softc *scn); +#endif /* __ATH_USB_H__ */ diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/usb/regtable_usb.c b/drivers/staging/qca-wifi-host-cmn/hif/src/usb/regtable_usb.c new file mode 100644 index 0000000000000000000000000000000000000000..991eea70ba824f2465fcb7f697ceaa41a152940d --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/usb/regtable_usb.c @@ -0,0 +1,61 @@ +/* + * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "target_type.h" +#include "targaddrs.h" +#include "regtable_usb.h" +#include "ar9888def.h" +#include "ar6320def.h" +#include "ar6320v2def.h" +#include "hif_debug.h" + +void target_register_tbl_attach(struct hif_softc *scn, + uint32_t target_type) +{ + switch (target_type) { + case TARGET_TYPE_AR9888: + scn->targetdef = &ar9888_targetdef; + break; + case TARGET_TYPE_AR6320: + scn->targetdef = &ar6320_targetdef; + break; + case TARGET_TYPE_AR6320V2: + scn->targetdef = &ar6320v2_targetdef; + break; + default: + HIF_ERROR("%s: unknown target_type %u", __func__, target_type); + break; + } +} +void hif_register_tbl_attach(struct hif_softc *scn, uint32_t hif_type) +{ + switch (hif_type) { + case HIF_TYPE_AR9888: + scn->hostdef = &ar9888_hostdef; + break; + case HIF_TYPE_AR6320: + scn->hostdef = &ar6320_hostdef; + break; + case HIF_TYPE_AR6320V2: + scn->hostdef = &ar6320v2_hostdef; + break; + default: + HIF_ERROR("%s: unknown hif_type %u", __func__, hif_type); + break; + } +} diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/usb/regtable_usb.h b/drivers/staging/qca-wifi-host-cmn/hif/src/usb/regtable_usb.h new file mode 100644 index 0000000000000000000000000000000000000000..f3bf559c27e45f51f9bd83406a55b31da6386412 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/usb/regtable_usb.h @@ -0,0 +1,1288 @@ +/* + * Copyright (c) 2013-2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _REGTABLE_USB_H_ +#define _REGTABLE_USB_H_ +#include "if_usb.h" + +#define MISSING 0 + +struct targetdef_s { + u_int32_t d_RTC_SOC_BASE_ADDRESS; + u_int32_t d_RTC_WMAC_BASE_ADDRESS; + u_int32_t d_SYSTEM_SLEEP_OFFSET; + u_int32_t d_WLAN_SYSTEM_SLEEP_OFFSET; + u_int32_t d_WLAN_SYSTEM_SLEEP_DISABLE_LSB; + u_int32_t d_WLAN_SYSTEM_SLEEP_DISABLE_MASK; + u_int32_t d_CLOCK_CONTROL_OFFSET; + u_int32_t d_CLOCK_CONTROL_SI0_CLK_MASK; + u_int32_t d_RESET_CONTROL_OFFSET; + u_int32_t d_RESET_CONTROL_MBOX_RST_MASK; + u_int32_t d_RESET_CONTROL_SI0_RST_MASK; + u_int32_t d_WLAN_RESET_CONTROL_OFFSET; + u_int32_t d_WLAN_RESET_CONTROL_COLD_RST_MASK; + u_int32_t d_WLAN_RESET_CONTROL_WARM_RST_MASK; + u_int32_t d_GPIO_BASE_ADDRESS; + u_int32_t d_GPIO_PIN0_OFFSET; + u_int32_t d_GPIO_PIN1_OFFSET; + u_int32_t d_GPIO_PIN0_CONFIG_MASK; + u_int32_t d_GPIO_PIN1_CONFIG_MASK; + u_int32_t d_SI_CONFIG_BIDIR_OD_DATA_LSB; + u_int32_t d_SI_CONFIG_BIDIR_OD_DATA_MASK; + u_int32_t d_SI_CONFIG_I2C_LSB; + u_int32_t d_SI_CONFIG_I2C_MASK; + u_int32_t d_SI_CONFIG_POS_SAMPLE_LSB; + u_int32_t d_SI_CONFIG_POS_SAMPLE_MASK; + u_int32_t d_SI_CONFIG_INACTIVE_CLK_LSB; + u_int32_t d_SI_CONFIG_INACTIVE_CLK_MASK; + u_int32_t d_SI_CONFIG_INACTIVE_DATA_LSB; + u_int32_t d_SI_CONFIG_INACTIVE_DATA_MASK; + u_int32_t d_SI_CONFIG_DIVIDER_LSB; + u_int32_t d_SI_CONFIG_DIVIDER_MASK; + u_int32_t d_SI_BASE_ADDRESS; + u_int32_t d_SI_CONFIG_OFFSET; + u_int32_t d_SI_TX_DATA0_OFFSET; + u_int32_t d_SI_TX_DATA1_OFFSET; + u_int32_t d_SI_RX_DATA0_OFFSET; + u_int32_t d_SI_RX_DATA1_OFFSET; + u_int32_t d_SI_CS_OFFSET; + u_int32_t d_SI_CS_DONE_ERR_MASK; + u_int32_t d_SI_CS_DONE_INT_MASK; + u_int32_t d_SI_CS_START_LSB; + u_int32_t d_SI_CS_START_MASK; + u_int32_t d_SI_CS_RX_CNT_LSB; + u_int32_t d_SI_CS_RX_CNT_MASK; + u_int32_t d_SI_CS_TX_CNT_LSB; + u_int32_t d_SI_CS_TX_CNT_MASK; + u_int32_t d_BOARD_DATA_SZ; + u_int32_t d_BOARD_EXT_DATA_SZ; + u_int32_t d_MBOX_BASE_ADDRESS; + u_int32_t d_LOCAL_SCRATCH_OFFSET; + u_int32_t d_CPU_CLOCK_OFFSET; + u_int32_t d_LPO_CAL_OFFSET; + u_int32_t d_GPIO_PIN10_OFFSET; + u_int32_t d_GPIO_PIN11_OFFSET; + u_int32_t d_GPIO_PIN12_OFFSET; + u_int32_t d_GPIO_PIN13_OFFSET; + u_int32_t d_CLOCK_GPIO_OFFSET; + u_int32_t d_CPU_CLOCK_STANDARD_LSB; + u_int32_t d_CPU_CLOCK_STANDARD_MASK; + u_int32_t d_LPO_CAL_ENABLE_LSB; + u_int32_t d_LPO_CAL_ENABLE_MASK; + u_int32_t d_CLOCK_GPIO_BT_CLK_OUT_EN_LSB; + u_int32_t d_CLOCK_GPIO_BT_CLK_OUT_EN_MASK; + u_int32_t d_ANALOG_INTF_BASE_ADDRESS; + u_int32_t d_WLAN_MAC_BASE_ADDRESS; + u_int32_t d_CE0_BASE_ADDRESS; + u_int32_t d_CE1_BASE_ADDRESS; + u_int32_t d_FW_INDICATOR_ADDRESS; + u_int32_t d_DRAM_BASE_ADDRESS; + u_int32_t d_SOC_CORE_BASE_ADDRESS; + u_int32_t d_CORE_CTRL_ADDRESS; + u_int32_t d_CE_COUNT; + u_int32_t d_MSI_NUM_REQUEST; + u_int32_t d_MSI_ASSIGN_FW; + u_int32_t d_MSI_ASSIGN_CE_INITIAL; + u_int32_t d_PCIE_INTR_ENABLE_ADDRESS; + u_int32_t d_PCIE_INTR_CLR_ADDRESS; + u_int32_t d_PCIE_INTR_FIRMWARE_MASK; + u_int32_t d_PCIE_INTR_CE_MASK_ALL; + u_int32_t d_CORE_CTRL_CPU_INTR_MASK; + u_int32_t d_SR_WR_INDEX_ADDRESS; + u_int32_t d_DST_WATERMARK_ADDRESS; + + /* htt_rx.c */ + u_int32_t d_RX_MSDU_END_4_FIRST_MSDU_MASK; + u_int32_t d_RX_MSDU_END_4_FIRST_MSDU_LSB; + uint32_t d_RX_MPDU_START_0_RETRY_LSB; + uint32_t d_RX_MPDU_START_0_RETRY_MASK; + u_int32_t d_RX_MPDU_START_0_SEQ_NUM_MASK; + u_int32_t d_RX_MPDU_START_0_SEQ_NUM_LSB; + u_int32_t d_RX_MPDU_START_2_PN_47_32_LSB; + u_int32_t d_RX_MPDU_START_2_PN_47_32_MASK; + uint32_t d_RX_MPDU_START_2_TID_LSB; + uint32_t d_RX_MPDU_START_2_TID_MASK; + u_int32_t d_RX_MSDU_END_1_EXT_WAPI_PN_63_48_MASK; + u_int32_t d_RX_MSDU_END_1_EXT_WAPI_PN_63_48_LSB; + u_int32_t d_RX_MSDU_END_1_KEY_ID_OCT_MASK; + u_int32_t d_RX_MSDU_END_1_KEY_ID_OCT_LSB; + u_int32_t d_RX_MSDU_END_4_LAST_MSDU_MASK; + u_int32_t d_RX_MSDU_END_4_LAST_MSDU_LSB; + u_int32_t d_RX_ATTENTION_0_MCAST_BCAST_MASK; + u_int32_t d_RX_ATTENTION_0_MCAST_BCAST_LSB; + u_int32_t d_RX_ATTENTION_0_FRAGMENT_MASK; + u_int32_t d_RX_ATTENTION_0_FRAGMENT_LSB; + u_int32_t d_RX_ATTENTION_0_MPDU_LENGTH_ERR_MASK; + u_int32_t d_RX_FRAG_INFO_0_RING2_MORE_COUNT_MASK; + u_int32_t d_RX_FRAG_INFO_0_RING2_MORE_COUNT_LSB; + u_int32_t d_RX_MSDU_START_0_MSDU_LENGTH_MASK; + u_int32_t d_RX_MSDU_START_0_MSDU_LENGTH_LSB; + u_int32_t d_RX_MSDU_START_2_DECAP_FORMAT_OFFSET; + u_int32_t d_RX_MSDU_START_2_DECAP_FORMAT_MASK; + u_int32_t d_RX_MSDU_START_2_DECAP_FORMAT_LSB; + u_int32_t d_RX_MPDU_START_0_ENCRYPTED_MASK; + u_int32_t d_RX_MPDU_START_0_ENCRYPTED_LSB; + u_int32_t d_RX_ATTENTION_0_MORE_DATA_MASK; + u_int32_t d_RX_ATTENTION_0_MSDU_DONE_MASK; + u_int32_t d_RX_ATTENTION_0_TCP_UDP_CHKSUM_FAIL_MASK; + /* end */ + /* copy_engine.c */ + u_int32_t d_DST_WR_INDEX_ADDRESS; + u_int32_t d_SRC_WATERMARK_ADDRESS; + u_int32_t d_SRC_WATERMARK_LOW_MASK; + u_int32_t d_SRC_WATERMARK_HIGH_MASK; + u_int32_t d_DST_WATERMARK_LOW_MASK; + u_int32_t d_DST_WATERMARK_HIGH_MASK; + u_int32_t d_CURRENT_SRRI_ADDRESS; + u_int32_t d_CURRENT_DRRI_ADDRESS; + u_int32_t d_HOST_IS_SRC_RING_HIGH_WATERMARK_MASK; + u_int32_t d_HOST_IS_SRC_RING_LOW_WATERMARK_MASK; + u_int32_t d_HOST_IS_DST_RING_HIGH_WATERMARK_MASK; + u_int32_t d_HOST_IS_DST_RING_LOW_WATERMARK_MASK; + u_int32_t d_HOST_IS_ADDRESS; + u_int32_t d_HOST_IS_COPY_COMPLETE_MASK; + u_int32_t d_CE_WRAPPER_BASE_ADDRESS; + u_int32_t d_CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS; + u_int32_t d_HOST_IE_ADDRESS; + u_int32_t d_HOST_IE_COPY_COMPLETE_MASK; + u_int32_t d_SR_BA_ADDRESS; + u_int32_t d_SR_SIZE_ADDRESS; + u_int32_t d_CE_CTRL1_ADDRESS; + u_int32_t d_CE_CTRL1_DMAX_LENGTH_MASK; + u_int32_t d_DR_BA_ADDRESS; + u_int32_t d_DR_SIZE_ADDRESS; + u_int32_t d_MISC_IE_ADDRESS; + u_int32_t d_MISC_IS_AXI_ERR_MASK; + u_int32_t d_MISC_IS_DST_ADDR_ERR_MASK; + u_int32_t d_MISC_IS_SRC_LEN_ERR_MASK; + u_int32_t d_MISC_IS_DST_MAX_LEN_VIO_MASK; + u_int32_t d_MISC_IS_DST_RING_OVERFLOW_MASK; + u_int32_t d_MISC_IS_SRC_RING_OVERFLOW_MASK; + u_int32_t d_SRC_WATERMARK_LOW_LSB; + u_int32_t d_SRC_WATERMARK_HIGH_LSB; + u_int32_t d_DST_WATERMARK_LOW_LSB; + u_int32_t d_DST_WATERMARK_HIGH_LSB; + u_int32_t d_CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK; + u_int32_t d_CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB; + u_int32_t d_CE_CTRL1_DMAX_LENGTH_LSB; + u_int32_t d_CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK; + u_int32_t d_CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK; + u_int32_t d_CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB; + u_int32_t d_CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB; + u_int32_t d_WLAN_DEBUG_INPUT_SEL_OFFSET; + u_int32_t d_WLAN_DEBUG_INPUT_SEL_SRC_MSB; + u_int32_t d_WLAN_DEBUG_INPUT_SEL_SRC_LSB; + u_int32_t d_WLAN_DEBUG_INPUT_SEL_SRC_MASK; + u_int32_t d_WLAN_DEBUG_CONTROL_OFFSET; + u_int32_t d_WLAN_DEBUG_CONTROL_ENABLE_MSB; + u_int32_t d_WLAN_DEBUG_CONTROL_ENABLE_LSB; + u_int32_t d_WLAN_DEBUG_CONTROL_ENABLE_MASK; + u_int32_t d_WLAN_DEBUG_OUT_OFFSET; + u_int32_t d_WLAN_DEBUG_OUT_DATA_MSB; + u_int32_t d_WLAN_DEBUG_OUT_DATA_LSB; + u_int32_t d_WLAN_DEBUG_OUT_DATA_MASK; + u_int32_t d_AMBA_DEBUG_BUS_OFFSET; + u_int32_t d_AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MSB; + u_int32_t d_AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_LSB; + u_int32_t d_AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MASK; + u_int32_t d_AMBA_DEBUG_BUS_SEL_MSB; + u_int32_t d_AMBA_DEBUG_BUS_SEL_LSB; + u_int32_t d_AMBA_DEBUG_BUS_SEL_MASK; + u_int32_t d_CE_WRAPPER_DEBUG_OFFSET; + u_int32_t d_CE_WRAPPER_DEBUG_SEL_MSB; + u_int32_t d_CE_WRAPPER_DEBUG_SEL_LSB; + u_int32_t d_CE_WRAPPER_DEBUG_SEL_MASK; + u_int32_t d_CE_DEBUG_OFFSET; + u_int32_t d_CE_DEBUG_SEL_MSB; + u_int32_t d_CE_DEBUG_SEL_LSB; + u_int32_t d_CE_DEBUG_SEL_MASK; + /* end */ + /* PLL start */ + u_int32_t d_EFUSE_OFFSET; + u_int32_t d_EFUSE_XTAL_SEL_MSB; + u_int32_t d_EFUSE_XTAL_SEL_LSB; + u_int32_t d_EFUSE_XTAL_SEL_MASK; + u_int32_t d_BB_PLL_CONFIG_OFFSET; + u_int32_t d_BB_PLL_CONFIG_OUTDIV_MSB; + u_int32_t d_BB_PLL_CONFIG_OUTDIV_LSB; + u_int32_t d_BB_PLL_CONFIG_OUTDIV_MASK; + u_int32_t d_BB_PLL_CONFIG_FRAC_MSB; + u_int32_t d_BB_PLL_CONFIG_FRAC_LSB; + u_int32_t d_BB_PLL_CONFIG_FRAC_MASK; + u_int32_t d_WLAN_PLL_SETTLE_TIME_MSB; + u_int32_t d_WLAN_PLL_SETTLE_TIME_LSB; + u_int32_t d_WLAN_PLL_SETTLE_TIME_MASK; + u_int32_t d_WLAN_PLL_SETTLE_OFFSET; + u_int32_t d_WLAN_PLL_SETTLE_SW_MASK; + u_int32_t d_WLAN_PLL_SETTLE_RSTMASK; + u_int32_t d_WLAN_PLL_SETTLE_RESET; + u_int32_t d_WLAN_PLL_CONTROL_NOPWD_MSB; + u_int32_t d_WLAN_PLL_CONTROL_NOPWD_LSB; + u_int32_t d_WLAN_PLL_CONTROL_NOPWD_MASK; + u_int32_t d_WLAN_PLL_CONTROL_BYPASS_MSB; + u_int32_t d_WLAN_PLL_CONTROL_BYPASS_LSB; + u_int32_t d_WLAN_PLL_CONTROL_BYPASS_MASK; + u_int32_t d_WLAN_PLL_CONTROL_BYPASS_RESET; + u_int32_t d_WLAN_PLL_CONTROL_CLK_SEL_MSB; + u_int32_t d_WLAN_PLL_CONTROL_CLK_SEL_LSB; + u_int32_t d_WLAN_PLL_CONTROL_CLK_SEL_MASK; + u_int32_t d_WLAN_PLL_CONTROL_CLK_SEL_RESET; + u_int32_t d_WLAN_PLL_CONTROL_REFDIV_MSB; + u_int32_t d_WLAN_PLL_CONTROL_REFDIV_LSB; + u_int32_t d_WLAN_PLL_CONTROL_REFDIV_MASK; + u_int32_t d_WLAN_PLL_CONTROL_REFDIV_RESET; + u_int32_t d_WLAN_PLL_CONTROL_DIV_MSB; + u_int32_t d_WLAN_PLL_CONTROL_DIV_LSB; + u_int32_t d_WLAN_PLL_CONTROL_DIV_MASK; + u_int32_t d_WLAN_PLL_CONTROL_DIV_RESET; + u_int32_t d_WLAN_PLL_CONTROL_OFFSET; + u_int32_t d_WLAN_PLL_CONTROL_SW_MASK; + u_int32_t d_WLAN_PLL_CONTROL_RSTMASK; + u_int32_t d_WLAN_PLL_CONTROL_RESET; + u_int32_t d_SOC_CORE_CLK_CTRL_OFFSET; + u_int32_t d_SOC_CORE_CLK_CTRL_DIV_MSB; + u_int32_t d_SOC_CORE_CLK_CTRL_DIV_LSB; + u_int32_t d_SOC_CORE_CLK_CTRL_DIV_MASK; + u_int32_t d_RTC_SYNC_STATUS_PLL_CHANGING_MSB; + u_int32_t d_RTC_SYNC_STATUS_PLL_CHANGING_LSB; + u_int32_t d_RTC_SYNC_STATUS_PLL_CHANGING_MASK; + u_int32_t d_RTC_SYNC_STATUS_PLL_CHANGING_RESET; + u_int32_t d_RTC_SYNC_STATUS_OFFSET; + u_int32_t d_SOC_CPU_CLOCK_OFFSET; + u_int32_t d_SOC_CPU_CLOCK_STANDARD_MSB; + u_int32_t d_SOC_CPU_CLOCK_STANDARD_LSB; + u_int32_t d_SOC_CPU_CLOCK_STANDARD_MASK; + /* PLL end */ + u_int32_t d_SOC_POWER_REG_OFFSET; + u_int32_t d_PCIE_INTR_CAUSE_ADDRESS; + u_int32_t d_SOC_RESET_CONTROL_ADDRESS; + u_int32_t d_SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_MASK; + u_int32_t d_SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_LSB; + u_int32_t d_SOC_RESET_CONTROL_CE_RST_MASK; + u_int32_t d_SOC_RESET_CONTROL_CPU_WARM_RST_MASK; + u_int32_t d_CPU_INTR_ADDRESS; + u_int32_t d_SOC_LF_TIMER_CONTROL0_ADDRESS; + u_int32_t d_SOC_LF_TIMER_CONTROL0_ENABLE_MASK; + /* chip id start */ + u_int32_t d_SOC_CHIP_ID_ADDRESS; + u_int32_t d_SOC_CHIP_ID_VERSION_MASK; + u_int32_t d_SOC_CHIP_ID_VERSION_LSB; + u_int32_t d_SOC_CHIP_ID_REVISION_MASK; + u_int32_t d_SOC_CHIP_ID_REVISION_LSB; + /* chip id end */ +}; + +#define RTC_SOC_BASE_ADDRESS \ + (scn->targetdef->d_RTC_SOC_BASE_ADDRESS) +#define RTC_WMAC_BASE_ADDRESS \ + (scn->targetdef->d_RTC_WMAC_BASE_ADDRESS) +#define SYSTEM_SLEEP_OFFSET \ + (scn->targetdef->d_SYSTEM_SLEEP_OFFSET) +#define WLAN_SYSTEM_SLEEP_OFFSET \ + (scn->targetdef->d_WLAN_SYSTEM_SLEEP_OFFSET) +#define WLAN_SYSTEM_SLEEP_DISABLE_LSB \ + (scn->targetdef->d_WLAN_SYSTEM_SLEEP_DISABLE_LSB) +#define WLAN_SYSTEM_SLEEP_DISABLE_MASK \ + (scn->targetdef->d_WLAN_SYSTEM_SLEEP_DISABLE_MASK) +#define CLOCK_CONTROL_OFFSET \ + (scn->targetdef->d_CLOCK_CONTROL_OFFSET) +#define CLOCK_CONTROL_SI0_CLK_MASK \ + (scn->targetdef->d_CLOCK_CONTROL_SI0_CLK_MASK) +#define RESET_CONTROL_OFFSET \ + (scn->targetdef->d_RESET_CONTROL_OFFSET) +#define RESET_CONTROL_MBOX_RST_MASK \ + (scn->targetdef->d_RESET_CONTROL_MBOX_RST_MASK) +#define RESET_CONTROL_SI0_RST_MASK \ + (scn->targetdef->d_RESET_CONTROL_SI0_RST_MASK) +#define WLAN_RESET_CONTROL_OFFSET \ + (scn->targetdef->d_WLAN_RESET_CONTROL_OFFSET) +#define WLAN_RESET_CONTROL_COLD_RST_MASK \ + (scn->targetdef->d_WLAN_RESET_CONTROL_COLD_RST_MASK) +#define WLAN_RESET_CONTROL_WARM_RST_MASK \ + (scn->targetdef->d_WLAN_RESET_CONTROL_WARM_RST_MASK) +#define GPIO_BASE_ADDRESS \ + (scn->targetdef->d_GPIO_BASE_ADDRESS) +#define GPIO_PIN0_OFFSET \ + (scn->targetdef->d_GPIO_PIN0_OFFSET) +#define GPIO_PIN1_OFFSET \ + (scn->targetdef->d_GPIO_PIN1_OFFSET) +#define GPIO_PIN0_CONFIG_MASK \ + (scn->targetdef->d_GPIO_PIN0_CONFIG_MASK) +#define GPIO_PIN1_CONFIG_MASK \ + (scn->targetdef->d_GPIO_PIN1_CONFIG_MASK) +#define SI_CONFIG_BIDIR_OD_DATA_LSB \ + (scn->targetdef->d_SI_CONFIG_BIDIR_OD_DATA_LSB) +#define SI_CONFIG_BIDIR_OD_DATA_MASK \ + (scn->targetdef->d_SI_CONFIG_BIDIR_OD_DATA_MASK) +#define SI_CONFIG_I2C_LSB \ + (scn->targetdef->d_SI_CONFIG_I2C_LSB) +#define SI_CONFIG_I2C_MASK \ + (scn->targetdef->d_SI_CONFIG_I2C_MASK) +#define SI_CONFIG_POS_SAMPLE_LSB \ + (scn->targetdef->d_SI_CONFIG_POS_SAMPLE_LSB) +#define SI_CONFIG_POS_SAMPLE_MASK \ + (scn->targetdef->d_SI_CONFIG_POS_SAMPLE_MASK) +#define SI_CONFIG_INACTIVE_CLK_LSB \ + (scn->targetdef->d_SI_CONFIG_INACTIVE_CLK_LSB) +#define SI_CONFIG_INACTIVE_CLK_MASK \ + (scn->targetdef->d_SI_CONFIG_INACTIVE_CLK_MASK) +#define SI_CONFIG_INACTIVE_DATA_LSB \ + (scn->targetdef->d_SI_CONFIG_INACTIVE_DATA_LSB) +#define SI_CONFIG_INACTIVE_DATA_MASK \ + (scn->targetdef->d_SI_CONFIG_INACTIVE_DATA_MASK) +#define SI_CONFIG_DIVIDER_LSB \ + (scn->targetdef->d_SI_CONFIG_DIVIDER_LSB) +#define SI_CONFIG_DIVIDER_MASK \ + (scn->targetdef->d_SI_CONFIG_DIVIDER_MASK) +#define SI_BASE_ADDRESS \ + (scn->targetdef->d_SI_BASE_ADDRESS) +#define SI_CONFIG_OFFSET \ + (scn->targetdef->d_SI_CONFIG_OFFSET) +#define SI_TX_DATA0_OFFSET \ + (scn->targetdef->d_SI_TX_DATA0_OFFSET) +#define SI_TX_DATA1_OFFSET \ + (scn->targetdef->d_SI_TX_DATA1_OFFSET) +#define SI_RX_DATA0_OFFSET \ + (scn->targetdef->d_SI_RX_DATA0_OFFSET) +#define SI_RX_DATA1_OFFSET \ + (scn->targetdef->d_SI_RX_DATA1_OFFSET) +#define SI_CS_OFFSET \ + (scn->targetdef->d_SI_CS_OFFSET) +#define SI_CS_DONE_ERR_MASK \ + (scn->targetdef->d_SI_CS_DONE_ERR_MASK) +#define SI_CS_DONE_INT_MASK \ + (scn->targetdef->d_SI_CS_DONE_INT_MASK) +#define SI_CS_START_LSB \ + (scn->targetdef->d_SI_CS_START_LSB) +#define SI_CS_START_MASK \ + (scn->targetdef->d_SI_CS_START_MASK) +#define SI_CS_RX_CNT_LSB \ + (scn->targetdef->d_SI_CS_RX_CNT_LSB) +#define SI_CS_RX_CNT_MASK \ + (scn->targetdef->d_SI_CS_RX_CNT_MASK) +#define SI_CS_TX_CNT_LSB \ + (scn->targetdef->d_SI_CS_TX_CNT_LSB) +#define SI_CS_TX_CNT_MASK \ + (scn->targetdef->d_SI_CS_TX_CNT_MASK) +#define EEPROM_SZ \ + (scn->targetdef->d_BOARD_DATA_SZ) +#define EEPROM_EXT_SZ \ + (scn->targetdef->d_BOARD_EXT_DATA_SZ) +#define MBOX_BASE_ADDRESS \ + (scn->targetdef->d_MBOX_BASE_ADDRESS) +#define LOCAL_SCRATCH_OFFSET \ + (scn->targetdef->d_LOCAL_SCRATCH_OFFSET) +#define CPU_CLOCK_OFFSET \ + (scn->targetdef->d_CPU_CLOCK_OFFSET) +#define LPO_CAL_OFFSET \ + (scn->targetdef->d_LPO_CAL_OFFSET) +#define GPIO_PIN10_OFFSET \ + (scn->targetdef->d_GPIO_PIN10_OFFSET) +#define GPIO_PIN11_OFFSET \ + (scn->targetdef->d_GPIO_PIN11_OFFSET) +#define GPIO_PIN12_OFFSET \ + (scn->targetdef->d_GPIO_PIN12_OFFSET) +#define GPIO_PIN13_OFFSET \ + (scn->targetdef->d_GPIO_PIN13_OFFSET) +#define CLOCK_GPIO_OFFSET \ + (scn->targetdef->d_CLOCK_GPIO_OFFSET) +#define CPU_CLOCK_STANDARD_LSB \ + (scn->targetdef->d_CPU_CLOCK_STANDARD_LSB) +#define CPU_CLOCK_STANDARD_MASK \ + (scn->targetdef->d_CPU_CLOCK_STANDARD_MASK) +#define LPO_CAL_ENABLE_LSB \ + (scn->targetdef->d_LPO_CAL_ENABLE_LSB) +#define LPO_CAL_ENABLE_MASK \ + (scn->targetdef->d_LPO_CAL_ENABLE_MASK) +#define CLOCK_GPIO_BT_CLK_OUT_EN_LSB \ + (scn->targetdef->d_CLOCK_GPIO_BT_CLK_OUT_EN_LSB) +#define CLOCK_GPIO_BT_CLK_OUT_EN_MASK \ + (scn->targetdef->d_CLOCK_GPIO_BT_CLK_OUT_EN_MASK) +#define ANALOG_INTF_BASE_ADDRESS \ + (scn->targetdef->d_ANALOG_INTF_BASE_ADDRESS) +#define WLAN_MAC_BASE_ADDRESS \ + (scn->targetdef->d_WLAN_MAC_BASE_ADDRESS) +#define CE0_BASE_ADDRESS \ + (scn->targetdef->d_CE0_BASE_ADDRESS) +#define CE1_BASE_ADDRESS \ + (scn->targetdef->d_CE1_BASE_ADDRESS) +#define FW_INDICATOR_ADDRESS \ + (scn->targetdef->d_FW_INDICATOR_ADDRESS) +#define DRAM_BASE_ADDRESS \ + (scn->targetdef->d_DRAM_BASE_ADDRESS) +#define SOC_CORE_BASE_ADDRESS \ + (scn->targetdef->d_SOC_CORE_BASE_ADDRESS) +#define CORE_CTRL_ADDRESS \ + (scn->targetdef->d_CORE_CTRL_ADDRESS) +#define CE_COUNT \ + (scn->targetdef->d_CE_COUNT) +#define PCIE_INTR_ENABLE_ADDRESS \ + (scn->targetdef->d_PCIE_INTR_ENABLE_ADDRESS) +#define PCIE_INTR_CLR_ADDRESS \ + (scn->targetdef->d_PCIE_INTR_CLR_ADDRESS) +#define PCIE_INTR_FIRMWARE_MASK \ + (scn->targetdef->d_PCIE_INTR_FIRMWARE_MASK) +#define PCIE_INTR_CE_MASK_ALL \ + (scn->targetdef->d_PCIE_INTR_CE_MASK_ALL) +#define CORE_CTRL_CPU_INTR_MASK \ + (scn->targetdef->d_CORE_CTRL_CPU_INTR_MASK) +#define PCIE_INTR_CAUSE_ADDRESS \ + (scn->targetdef->d_PCIE_INTR_CAUSE_ADDRESS) +#define SOC_RESET_CONTROL_ADDRESS \ + (scn->targetdef->d_SOC_RESET_CONTROL_ADDRESS) +#define SOC_RESET_CONTROL_CE_RST_MASK \ + (scn->targetdef->d_SOC_RESET_CONTROL_CE_RST_MASK) +#define SOC_RESET_CONTROL_CPU_WARM_RST_MASK\ + (scn->targetdef->d_SOC_RESET_CONTROL_CPU_WARM_RST_MASK) +#define CPU_INTR_ADDRESS \ + (scn->targetdef->d_CPU_INTR_ADDRESS) +#define SOC_LF_TIMER_CONTROL0_ADDRESS \ + (scn->targetdef->d_SOC_LF_TIMER_CONTROL0_ADDRESS) +#define SOC_LF_TIMER_CONTROL0_ENABLE_MASK \ + (scn->targetdef->d_SOC_LF_TIMER_CONTROL0_ENABLE_MASK) +#define SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_LSB \ + (scn->targetdef->d_SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_LSB) +#define SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_MASK \ + (scn->targetdef->d_SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_MASK) + +#define SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_GET(x) \ + (((x) & SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_MASK) >> \ + SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_LSB) +#define SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_SET(x) \ + (((x) << SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_LSB) & \ + SOC_RESET_CONTROL_PCIE_RST_SHORT_OVRD_MASK) + +/* hif_pci.c */ +#define CHIP_ID_ADDRESS \ + (scn->targetdef->d_SOC_CHIP_ID_ADDRESS) +#define SOC_CHIP_ID_REVISION_MASK \ + (scn->targetdef->d_SOC_CHIP_ID_REVISION_MASK) +#define SOC_CHIP_ID_REVISION_LSB \ + (scn->targetdef->d_SOC_CHIP_ID_REVISION_LSB) +#define SOC_CHIP_ID_VERSION_MASK \ + (scn->targetdef->d_SOC_CHIP_ID_VERSION_MASK) +#define SOC_CHIP_ID_VERSION_LSB \ + (scn->targetdef->d_SOC_CHIP_ID_VERSION_LSB) +#define CHIP_ID_REVISION_GET(x) \ + (((x) & SOC_CHIP_ID_REVISION_MASK) >> SOC_CHIP_ID_REVISION_LSB) +#define CHIP_ID_VERSION_GET(x) \ + (((x) & SOC_CHIP_ID_VERSION_MASK) >> SOC_CHIP_ID_VERSION_LSB) +/* hif_pci.c end */ + +/* misc */ +#define SR_WR_INDEX_ADDRESS \ + (scn->targetdef->d_SR_WR_INDEX_ADDRESS) +#define DST_WATERMARK_ADDRESS \ + (scn->targetdef->d_DST_WATERMARK_ADDRESS) +#define SOC_POWER_REG_OFFSET \ + (scn->targetdef->d_SOC_POWER_REG_OFFSET) +/* end */ + +/* htt_rx.c */ +#define RX_MSDU_END_4_FIRST_MSDU_MASK \ + (pdev->targetdef->d_RX_MSDU_END_4_FIRST_MSDU_MASK) +#define RX_MSDU_END_4_FIRST_MSDU_LSB \ + (pdev->targetdef->d_RX_MSDU_END_4_FIRST_MSDU_LSB) +#define RX_MPDU_START_0_RETRY_LSB \ + (pdev->targetdef->d_RX_MPDU_START_0_RETRY_LSB) +#define RX_MPDU_START_0_RETRY_MASK \ + (pdev->targetdef->d_RX_MPDU_START_0_RETRY_MASK) +#define RX_MPDU_START_0_SEQ_NUM_MASK \ + (pdev->targetdef->d_RX_MPDU_START_0_SEQ_NUM_MASK) +#define RX_MPDU_START_0_SEQ_NUM_LSB \ + (pdev->targetdef->d_RX_MPDU_START_0_SEQ_NUM_LSB) +#define RX_MPDU_START_2_PN_47_32_LSB \ + (pdev->targetdef->d_RX_MPDU_START_2_PN_47_32_LSB) +#define RX_MPDU_START_2_PN_47_32_MASK \ + (pdev->targetdef->d_RX_MPDU_START_2_PN_47_32_MASK) +#define RX_MPDU_START_2_TID_LSB \ + (pdev->targetdef->d_RX_MPDU_START_2_TID_LSB) +#define RX_MPDU_START_2_TID_MASK \ + (pdev->targetdef->d_RX_MPDU_START_2_TID_MASK) +#define RX_MSDU_END_1_KEY_ID_OCT_MASK \ + (pdev->targetdef->d_RX_MSDU_END_1_KEY_ID_OCT_MASK) +#define RX_MSDU_END_1_KEY_ID_OCT_LSB \ + (pdev->targetdef->d_RX_MSDU_END_1_KEY_ID_OCT_LSB) +#define RX_MSDU_END_1_EXT_WAPI_PN_63_48_MASK \ + (pdev->targetdef->d_RX_MSDU_END_1_EXT_WAPI_PN_63_48_MASK) +#define RX_MSDU_END_1_EXT_WAPI_PN_63_48_LSB\ + (pdev->targetdef->d_RX_MSDU_END_1_EXT_WAPI_PN_63_48_LSB) +#define RX_MSDU_END_4_LAST_MSDU_MASK \ + (pdev->targetdef->d_RX_MSDU_END_4_LAST_MSDU_MASK) +#define RX_MSDU_END_4_LAST_MSDU_LSB \ + (pdev->targetdef->d_RX_MSDU_END_4_LAST_MSDU_LSB) +#define RX_ATTENTION_0_MCAST_BCAST_MASK \ + (pdev->targetdef->d_RX_ATTENTION_0_MCAST_BCAST_MASK) +#define RX_ATTENTION_0_MCAST_BCAST_LSB \ + (pdev->targetdef->d_RX_ATTENTION_0_MCAST_BCAST_LSB) +#define RX_ATTENTION_0_FRAGMENT_MASK \ + (pdev->targetdef->d_RX_ATTENTION_0_FRAGMENT_MASK) +#define RX_ATTENTION_0_FRAGMENT_LSB \ + (pdev->targetdef->d_RX_ATTENTION_0_FRAGMENT_LSB) +#define RX_ATTENTION_0_MPDU_LENGTH_ERR_MASK\ + (pdev->targetdef->d_RX_ATTENTION_0_MPDU_LENGTH_ERR_MASK) +#define RX_FRAG_INFO_0_RING2_MORE_COUNT_MASK \ + (pdev->targetdef->d_RX_FRAG_INFO_0_RING2_MORE_COUNT_MASK) +#define RX_FRAG_INFO_0_RING2_MORE_COUNT_LSB\ + (pdev->targetdef->d_RX_FRAG_INFO_0_RING2_MORE_COUNT_LSB) +#define RX_MSDU_START_0_MSDU_LENGTH_MASK \ + (pdev->targetdef->d_RX_MSDU_START_0_MSDU_LENGTH_MASK) +#define RX_MSDU_START_0_MSDU_LENGTH_LSB \ + (pdev->targetdef->d_RX_MSDU_START_0_MSDU_LENGTH_LSB) +#define RX_MSDU_START_2_DECAP_FORMAT_OFFSET\ + (pdev->targetdef->d_RX_MSDU_START_2_DECAP_FORMAT_OFFSET) +#define RX_MSDU_START_2_DECAP_FORMAT_MASK \ + (pdev->targetdef->d_RX_MSDU_START_2_DECAP_FORMAT_MASK) +#define RX_MSDU_START_2_DECAP_FORMAT_LSB \ + (pdev->targetdef->d_RX_MSDU_START_2_DECAP_FORMAT_LSB) +#define RX_MPDU_START_0_ENCRYPTED_MASK \ + (pdev->targetdef->d_RX_MPDU_START_0_ENCRYPTED_MASK) +#define RX_MPDU_START_0_ENCRYPTED_LSB \ + (pdev->targetdef->d_RX_MPDU_START_0_ENCRYPTED_LSB) +#define RX_ATTENTION_0_MORE_DATA_MASK \ + (pdev->targetdef->d_RX_ATTENTION_0_MORE_DATA_MASK) +#define RX_ATTENTION_0_MSDU_DONE_MASK \ + (pdev->targetdef->d_RX_ATTENTION_0_MSDU_DONE_MASK) +#define RX_ATTENTION_0_TCP_UDP_CHKSUM_FAIL_MASK \ + (pdev->targetdef->d_RX_ATTENTION_0_TCP_UDP_CHKSUM_FAIL_MASK) +/* end */ + +/* copy_engine.c */ +#define DST_WR_INDEX_ADDRESS \ + (scn->targetdef->d_DST_WR_INDEX_ADDRESS) +#define SRC_WATERMARK_ADDRESS \ + (scn->targetdef->d_SRC_WATERMARK_ADDRESS) +#define SRC_WATERMARK_LOW_MASK \ + (scn->targetdef->d_SRC_WATERMARK_LOW_MASK) +#define SRC_WATERMARK_HIGH_MASK \ + (scn->targetdef->d_SRC_WATERMARK_HIGH_MASK) +#define DST_WATERMARK_LOW_MASK \ + (scn->targetdef->d_DST_WATERMARK_LOW_MASK) +#define DST_WATERMARK_HIGH_MASK \ + (scn->targetdef->d_DST_WATERMARK_HIGH_MASK) +#define CURRENT_SRRI_ADDRESS \ + (scn->targetdef->d_CURRENT_SRRI_ADDRESS) +#define CURRENT_DRRI_ADDRESS \ + (scn->targetdef->d_CURRENT_DRRI_ADDRESS) +#define HOST_IS_SRC_RING_HIGH_WATERMARK_MASK \ + (scn->targetdef->d_HOST_IS_SRC_RING_HIGH_WATERMARK_MASK) +#define HOST_IS_SRC_RING_LOW_WATERMARK_MASK\ + (scn->targetdef->d_HOST_IS_SRC_RING_LOW_WATERMARK_MASK) +#define HOST_IS_DST_RING_HIGH_WATERMARK_MASK \ + (scn->targetdef->d_HOST_IS_DST_RING_HIGH_WATERMARK_MASK) +#define HOST_IS_DST_RING_LOW_WATERMARK_MASK\ + (scn->targetdef->d_HOST_IS_DST_RING_LOW_WATERMARK_MASK) +#define HOST_IS_ADDRESS \ + (scn->targetdef->d_HOST_IS_ADDRESS) +#define HOST_IS_COPY_COMPLETE_MASK \ + (scn->targetdef->d_HOST_IS_COPY_COMPLETE_MASK) +#define CE_WRAPPER_BASE_ADDRESS \ + (scn->targetdef->d_CE_WRAPPER_BASE_ADDRESS) +#define CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS \ + (scn->targetdef->d_CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS) +#define HOST_IE_ADDRESS \ + (scn->targetdef->d_HOST_IE_ADDRESS) +#define HOST_IE_COPY_COMPLETE_MASK \ + (scn->targetdef->d_HOST_IE_COPY_COMPLETE_MASK) +#define SR_BA_ADDRESS \ + (scn->targetdef->d_SR_BA_ADDRESS) +#define SR_SIZE_ADDRESS \ + (scn->targetdef->d_SR_SIZE_ADDRESS) +#define CE_CTRL1_ADDRESS \ + (scn->targetdef->d_CE_CTRL1_ADDRESS) +#define CE_CTRL1_DMAX_LENGTH_MASK \ + (scn->targetdef->d_CE_CTRL1_DMAX_LENGTH_MASK) +#define DR_BA_ADDRESS \ + (scn->targetdef->d_DR_BA_ADDRESS) +#define DR_SIZE_ADDRESS \ + (scn->targetdef->d_DR_SIZE_ADDRESS) +#define MISC_IE_ADDRESS \ + (scn->targetdef->d_MISC_IE_ADDRESS) +#define MISC_IS_AXI_ERR_MASK \ + (scn->targetdef->d_MISC_IS_AXI_ERR_MASK) +#define MISC_IS_DST_ADDR_ERR_MASK \ + (scn->targetdef->d_MISC_IS_DST_ADDR_ERR_MASK) +#define MISC_IS_SRC_LEN_ERR_MASK \ + (scn->targetdef->d_MISC_IS_SRC_LEN_ERR_MASK) +#define MISC_IS_DST_MAX_LEN_VIO_MASK \ + (scn->targetdef->d_MISC_IS_DST_MAX_LEN_VIO_MASK) +#define MISC_IS_DST_RING_OVERFLOW_MASK \ + (scn->targetdef->d_MISC_IS_DST_RING_OVERFLOW_MASK) +#define MISC_IS_SRC_RING_OVERFLOW_MASK \ + (scn->targetdef->d_MISC_IS_SRC_RING_OVERFLOW_MASK) +#define SRC_WATERMARK_LOW_LSB \ + (scn->targetdef->d_SRC_WATERMARK_LOW_LSB) +#define SRC_WATERMARK_HIGH_LSB \ + (scn->targetdef->d_SRC_WATERMARK_HIGH_LSB) +#define DST_WATERMARK_LOW_LSB \ + (scn->targetdef->d_DST_WATERMARK_LOW_LSB) +#define DST_WATERMARK_HIGH_LSB \ + (scn->targetdef->d_DST_WATERMARK_HIGH_LSB) +#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK \ + (scn->targetdef->d_CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK) +#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB \ + (scn->targetdef->d_CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB) +#define CE_CTRL1_DMAX_LENGTH_LSB \ + (scn->targetdef->d_CE_CTRL1_DMAX_LENGTH_LSB) +#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK\ + (scn->targetdef->d_CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK) +#define CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK\ + (scn->targetdef->d_CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK) +#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB \ + (scn->targetdef->d_CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB) +#define CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB \ + (scn->targetdef->d_CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB) +#define WLAN_DEBUG_INPUT_SEL_OFFSET \ + (scn->targetdef->d_WLAN_DEBUG_INPUT_SEL_OFFSET) +#define WLAN_DEBUG_INPUT_SEL_SRC_MSB \ + (scn->targetdef->d_WLAN_DEBUG_INPUT_SEL_SRC_MSB) +#define WLAN_DEBUG_INPUT_SEL_SRC_LSB \ + (scn->targetdef->d_WLAN_DEBUG_INPUT_SEL_SRC_LSB) +#define WLAN_DEBUG_INPUT_SEL_SRC_MASK \ + (scn->targetdef->d_WLAN_DEBUG_INPUT_SEL_SRC_MASK) +#define WLAN_DEBUG_CONTROL_OFFSET \ + (scn->targetdef->d_WLAN_DEBUG_CONTROL_OFFSET) +#define WLAN_DEBUG_CONTROL_ENABLE_MSB \ + (scn->targetdef->d_WLAN_DEBUG_CONTROL_ENABLE_MSB) +#define WLAN_DEBUG_CONTROL_ENABLE_LSB \ + (scn->targetdef->d_WLAN_DEBUG_CONTROL_ENABLE_LSB) +#define WLAN_DEBUG_CONTROL_ENABLE_MASK \ + (scn->targetdef->d_WLAN_DEBUG_CONTROL_ENABLE_MASK) +#define WLAN_DEBUG_OUT_OFFSET \ + (scn->targetdef->d_WLAN_DEBUG_OUT_OFFSET) +#define WLAN_DEBUG_OUT_DATA_MSB \ + (scn->targetdef->d_WLAN_DEBUG_OUT_DATA_MSB) +#define WLAN_DEBUG_OUT_DATA_LSB \ + (scn->targetdef->d_WLAN_DEBUG_OUT_DATA_LSB) +#define WLAN_DEBUG_OUT_DATA_MASK \ + (scn->targetdef->d_WLAN_DEBUG_OUT_DATA_MASK) +#define AMBA_DEBUG_BUS_OFFSET \ + (scn->targetdef->d_AMBA_DEBUG_BUS_OFFSET) +#define AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MSB \ + (scn->targetdef->d_AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MSB) +#define AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_LSB \ + (scn->targetdef->d_AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_LSB) +#define AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MASK \ + (scn->targetdef->d_AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MASK) +#define AMBA_DEBUG_BUS_SEL_MSB \ + (scn->targetdef->d_AMBA_DEBUG_BUS_SEL_MSB) +#define AMBA_DEBUG_BUS_SEL_LSB \ + (scn->targetdef->d_AMBA_DEBUG_BUS_SEL_LSB) +#define AMBA_DEBUG_BUS_SEL_MASK \ + (scn->targetdef->d_AMBA_DEBUG_BUS_SEL_MASK) +#define CE_WRAPPER_DEBUG_OFFSET \ + (scn->targetdef->d_CE_WRAPPER_DEBUG_OFFSET) +#define CE_WRAPPER_DEBUG_SEL_MSB \ + (scn->targetdef->d_CE_WRAPPER_DEBUG_SEL_MSB) +#define CE_WRAPPER_DEBUG_SEL_LSB \ + (scn->targetdef->d_CE_WRAPPER_DEBUG_SEL_LSB) +#define CE_WRAPPER_DEBUG_SEL_MASK \ + (scn->targetdef->d_CE_WRAPPER_DEBUG_SEL_MASK) +#define CE_DEBUG_OFFSET \ + (scn->targetdef->d_CE_DEBUG_OFFSET) +#define CE_DEBUG_SEL_MSB \ + (scn->targetdef->d_CE_DEBUG_SEL_MSB) +#define CE_DEBUG_SEL_LSB \ + (scn->targetdef->d_CE_DEBUG_SEL_LSB) +#define CE_DEBUG_SEL_MASK \ + (scn->targetdef->d_CE_DEBUG_SEL_MASK) +/* end */ +/* PLL start */ +#define EFUSE_OFFSET \ + (scn->targetdef->d_EFUSE_OFFSET) +#define EFUSE_XTAL_SEL_MSB \ + (scn->targetdef->d_EFUSE_XTAL_SEL_MSB) +#define EFUSE_XTAL_SEL_LSB \ + (scn->targetdef->d_EFUSE_XTAL_SEL_LSB) +#define EFUSE_XTAL_SEL_MASK \ + (scn->targetdef->d_EFUSE_XTAL_SEL_MASK) +#define BB_PLL_CONFIG_OFFSET \ + (scn->targetdef->d_BB_PLL_CONFIG_OFFSET) +#define BB_PLL_CONFIG_OUTDIV_MSB \ + (scn->targetdef->d_BB_PLL_CONFIG_OUTDIV_MSB) +#define BB_PLL_CONFIG_OUTDIV_LSB \ + (scn->targetdef->d_BB_PLL_CONFIG_OUTDIV_LSB) +#define BB_PLL_CONFIG_OUTDIV_MASK \ + (scn->targetdef->d_BB_PLL_CONFIG_OUTDIV_MASK) +#define BB_PLL_CONFIG_FRAC_MSB \ + (scn->targetdef->d_BB_PLL_CONFIG_FRAC_MSB) +#define BB_PLL_CONFIG_FRAC_LSB \ + (scn->targetdef->d_BB_PLL_CONFIG_FRAC_LSB) +#define BB_PLL_CONFIG_FRAC_MASK \ + (scn->targetdef->d_BB_PLL_CONFIG_FRAC_MASK) +#define WLAN_PLL_SETTLE_TIME_MSB \ + (scn->targetdef->d_WLAN_PLL_SETTLE_TIME_MSB) +#define WLAN_PLL_SETTLE_TIME_LSB \ + (scn->targetdef->d_WLAN_PLL_SETTLE_TIME_LSB) +#define WLAN_PLL_SETTLE_TIME_MASK \ + (scn->targetdef->d_WLAN_PLL_SETTLE_TIME_MASK) +#define WLAN_PLL_SETTLE_OFFSET \ + (scn->targetdef->d_WLAN_PLL_SETTLE_OFFSET) +#define WLAN_PLL_SETTLE_SW_MASK \ + (scn->targetdef->d_WLAN_PLL_SETTLE_SW_MASK) +#define WLAN_PLL_SETTLE_RSTMASK \ + (scn->targetdef->d_WLAN_PLL_SETTLE_RSTMASK) +#define WLAN_PLL_SETTLE_RESET \ + (scn->targetdef->d_WLAN_PLL_SETTLE_RESET) +#define WLAN_PLL_CONTROL_NOPWD_MSB \ + (scn->targetdef->d_WLAN_PLL_CONTROL_NOPWD_MSB) +#define WLAN_PLL_CONTROL_NOPWD_LSB \ + (scn->targetdef->d_WLAN_PLL_CONTROL_NOPWD_LSB) +#define WLAN_PLL_CONTROL_NOPWD_MASK \ + (scn->targetdef->d_WLAN_PLL_CONTROL_NOPWD_MASK) +#define WLAN_PLL_CONTROL_BYPASS_MSB \ + (scn->targetdef->d_WLAN_PLL_CONTROL_BYPASS_MSB) +#define WLAN_PLL_CONTROL_BYPASS_LSB \ + (scn->targetdef->d_WLAN_PLL_CONTROL_BYPASS_LSB) +#define WLAN_PLL_CONTROL_BYPASS_MASK \ + (scn->targetdef->d_WLAN_PLL_CONTROL_BYPASS_MASK) +#define WLAN_PLL_CONTROL_BYPASS_RESET \ + (scn->targetdef->d_WLAN_PLL_CONTROL_BYPASS_RESET) +#define WLAN_PLL_CONTROL_CLK_SEL_MSB \ + (scn->targetdef->d_WLAN_PLL_CONTROL_CLK_SEL_MSB) +#define WLAN_PLL_CONTROL_CLK_SEL_LSB \ + (scn->targetdef->d_WLAN_PLL_CONTROL_CLK_SEL_LSB) +#define WLAN_PLL_CONTROL_CLK_SEL_MASK \ + (scn->targetdef->d_WLAN_PLL_CONTROL_CLK_SEL_MASK) +#define WLAN_PLL_CONTROL_CLK_SEL_RESET \ + (scn->targetdef->d_WLAN_PLL_CONTROL_CLK_SEL_RESET) +#define WLAN_PLL_CONTROL_REFDIV_MSB \ + (scn->targetdef->d_WLAN_PLL_CONTROL_REFDIV_MSB) +#define WLAN_PLL_CONTROL_REFDIV_LSB \ + (scn->targetdef->d_WLAN_PLL_CONTROL_REFDIV_LSB) +#define WLAN_PLL_CONTROL_REFDIV_MASK \ + (scn->targetdef->d_WLAN_PLL_CONTROL_REFDIV_MASK) +#define WLAN_PLL_CONTROL_REFDIV_RESET \ + (scn->targetdef->d_WLAN_PLL_CONTROL_REFDIV_RESET) +#define WLAN_PLL_CONTROL_DIV_MSB \ + (scn->targetdef->d_WLAN_PLL_CONTROL_DIV_MSB) +#define WLAN_PLL_CONTROL_DIV_LSB \ + (scn->targetdef->d_WLAN_PLL_CONTROL_DIV_LSB) +#define WLAN_PLL_CONTROL_DIV_MASK \ + (scn->targetdef->d_WLAN_PLL_CONTROL_DIV_MASK) +#define WLAN_PLL_CONTROL_DIV_RESET \ + (scn->targetdef->d_WLAN_PLL_CONTROL_DIV_RESET) +#define WLAN_PLL_CONTROL_OFFSET \ + (scn->targetdef->d_WLAN_PLL_CONTROL_OFFSET) +#define WLAN_PLL_CONTROL_SW_MASK \ + (scn->targetdef->d_WLAN_PLL_CONTROL_SW_MASK) +#define WLAN_PLL_CONTROL_RSTMASK \ + (scn->targetdef->d_WLAN_PLL_CONTROL_RSTMASK) +#define WLAN_PLL_CONTROL_RESET \ + (scn->targetdef->d_WLAN_PLL_CONTROL_RESET) +#define SOC_CORE_CLK_CTRL_OFFSET \ + (scn->targetdef->d_SOC_CORE_CLK_CTRL_OFFSET) +#define SOC_CORE_CLK_CTRL_DIV_MSB \ + (scn->targetdef->d_SOC_CORE_CLK_CTRL_DIV_MSB) +#define SOC_CORE_CLK_CTRL_DIV_LSB \ + (scn->targetdef->d_SOC_CORE_CLK_CTRL_DIV_LSB) +#define SOC_CORE_CLK_CTRL_DIV_MASK \ + (scn->targetdef->d_SOC_CORE_CLK_CTRL_DIV_MASK) +#define RTC_SYNC_STATUS_PLL_CHANGING_MSB \ + (scn->targetdef->d_RTC_SYNC_STATUS_PLL_CHANGING_MSB) +#define RTC_SYNC_STATUS_PLL_CHANGING_LSB \ + (scn->targetdef->d_RTC_SYNC_STATUS_PLL_CHANGING_LSB) +#define RTC_SYNC_STATUS_PLL_CHANGING_MASK \ + (scn->targetdef->d_RTC_SYNC_STATUS_PLL_CHANGING_MASK) +#define RTC_SYNC_STATUS_PLL_CHANGING_RESET \ + (scn->targetdef->d_RTC_SYNC_STATUS_PLL_CHANGING_RESET) +#define RTC_SYNC_STATUS_OFFSET \ + (scn->targetdef->d_RTC_SYNC_STATUS_OFFSET) +#define SOC_CPU_CLOCK_OFFSET \ + (scn->targetdef->d_SOC_CPU_CLOCK_OFFSET) +#define SOC_CPU_CLOCK_STANDARD_MSB \ + (scn->targetdef->d_SOC_CPU_CLOCK_STANDARD_MSB) +#define SOC_CPU_CLOCK_STANDARD_LSB \ + (scn->targetdef->d_SOC_CPU_CLOCK_STANDARD_LSB) +#define SOC_CPU_CLOCK_STANDARD_MASK \ + (scn->targetdef->d_SOC_CPU_CLOCK_STANDARD_MASK) +/* PLL end */ + +/* SET macros */ +#define WLAN_SYSTEM_SLEEP_DISABLE_SET(x) \ + (((x) << WLAN_SYSTEM_SLEEP_DISABLE_LSB) & \ + WLAN_SYSTEM_SLEEP_DISABLE_MASK) +#define SI_CONFIG_BIDIR_OD_DATA_SET(x) \ + (((x) << SI_CONFIG_BIDIR_OD_DATA_LSB) & \ + SI_CONFIG_BIDIR_OD_DATA_MASK) +#define SI_CONFIG_I2C_SET(x) \ + (((x) << SI_CONFIG_I2C_LSB) & SI_CONFIG_I2C_MASK) +#define SI_CONFIG_POS_SAMPLE_SET(x) \ + (((x) << SI_CONFIG_POS_SAMPLE_LSB) & \ + SI_CONFIG_POS_SAMPLE_MASK) +#define SI_CONFIG_INACTIVE_CLK_SET(x) \ + (((x) << SI_CONFIG_INACTIVE_CLK_LSB) & \ + SI_CONFIG_INACTIVE_CLK_MASK) +#define SI_CONFIG_INACTIVE_DATA_SET(x) \ + (((x) << SI_CONFIG_INACTIVE_DATA_LSB) & \ + SI_CONFIG_INACTIVE_DATA_MASK) +#define SI_CONFIG_DIVIDER_SET(x) \ + (((x) << SI_CONFIG_DIVIDER_LSB) & SI_CONFIG_DIVIDER_MASK) +#define SI_CS_START_SET(x) \ + (((x) << SI_CS_START_LSB) & SI_CS_START_MASK) +#define SI_CS_RX_CNT_SET(x) \ + (((x) << SI_CS_RX_CNT_LSB) & SI_CS_RX_CNT_MASK) +#define SI_CS_TX_CNT_SET(x) \ + (((x) << SI_CS_TX_CNT_LSB) & SI_CS_TX_CNT_MASK) +#define LPO_CAL_ENABLE_SET(x) \ + (((x) << LPO_CAL_ENABLE_LSB) & LPO_CAL_ENABLE_MASK) +#define CPU_CLOCK_STANDARD_SET(x) \ + (((x) << CPU_CLOCK_STANDARD_LSB) & CPU_CLOCK_STANDARD_MASK) +#define CLOCK_GPIO_BT_CLK_OUT_EN_SET(x) \ + (((x) << CLOCK_GPIO_BT_CLK_OUT_EN_LSB) & \ + CLOCK_GPIO_BT_CLK_OUT_EN_MASK) +/* copy_engine.c */ +#define SRC_WATERMARK_LOW_SET(x) \ + (((x) << SRC_WATERMARK_LOW_LSB) & SRC_WATERMARK_LOW_MASK) +#define SRC_WATERMARK_HIGH_SET(x) \ + (((x) << SRC_WATERMARK_HIGH_LSB) & SRC_WATERMARK_HIGH_MASK) +#define DST_WATERMARK_LOW_SET(x) \ + (((x) << DST_WATERMARK_LOW_LSB) & DST_WATERMARK_LOW_MASK) +#define DST_WATERMARK_HIGH_SET(x) \ + (((x) << DST_WATERMARK_HIGH_LSB) & DST_WATERMARK_HIGH_MASK) +#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_GET(x) (((x) & \ + CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK) >> \ + CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB) +#define CE_CTRL1_DMAX_LENGTH_SET(x) \ + (((x) << CE_CTRL1_DMAX_LENGTH_LSB) & CE_CTRL1_DMAX_LENGTH_MASK) +#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_SET(x) \ + (((x) << CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB) & \ + CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK) +#define CE_CTRL1_DST_RING_BYTE_SWAP_EN_SET(x) \ + (((x) << CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB) & \ + CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK) +#define WLAN_DEBUG_INPUT_SEL_SRC_GET(x) \ + (((x) & \ + WLAN_DEBUG_INPUT_SEL_SRC_MASK) >> \ + WLAN_DEBUG_INPUT_SEL_SRC_LSB) +#define WLAN_DEBUG_INPUT_SEL_SRC_SET(x) \ + (((x) << WLAN_DEBUG_INPUT_SEL_SRC_LSB) & \ + WLAN_DEBUG_INPUT_SEL_SRC_MASK) +#define WLAN_DEBUG_CONTROL_ENABLE_GET(x) \ + (((x) & \ + WLAN_DEBUG_CONTROL_ENABLE_MASK) >> \ + WLAN_DEBUG_CONTROL_ENABLE_LSB) +#define WLAN_DEBUG_CONTROL_ENABLE_SET(x) \ + (((x) << WLAN_DEBUG_CONTROL_ENABLE_LSB) & \ + WLAN_DEBUG_CONTROL_ENABLE_MASK) +#define WLAN_DEBUG_OUT_DATA_GET(x) \ + (((x) & WLAN_DEBUG_OUT_DATA_MASK) >> WLAN_DEBUG_OUT_DATA_LSB) +#define WLAN_DEBUG_OUT_DATA_SET(x) \ + (((x) << WLAN_DEBUG_OUT_DATA_LSB) & WLAN_DEBUG_OUT_DATA_MASK) +#define AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_GET(x) \ + (((x) & \ + AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MASK) >> \ + AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_LSB) +#define AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_SET(x) \ + (((x) << AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_LSB) & \ + AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MASK) +#define AMBA_DEBUG_BUS_SEL_GET(x) \ + (((x) & AMBA_DEBUG_BUS_SEL_MASK) >> AMBA_DEBUG_BUS_SEL_LSB) +#define AMBA_DEBUG_BUS_SEL_SET(x) \ + (((x) << AMBA_DEBUG_BUS_SEL_LSB) & AMBA_DEBUG_BUS_SEL_MASK) +#define CE_WRAPPER_DEBUG_SEL_GET(x) \ + (((x) & CE_WRAPPER_DEBUG_SEL_MASK) >> CE_WRAPPER_DEBUG_SEL_LSB) +#define CE_WRAPPER_DEBUG_SEL_SET(x) \ + (((x) << CE_WRAPPER_DEBUG_SEL_LSB) & CE_WRAPPER_DEBUG_SEL_MASK) +#define CE_DEBUG_SEL_GET(x) \ + (((x) & CE_DEBUG_SEL_MASK) >> CE_DEBUG_SEL_LSB) +#define CE_DEBUG_SEL_SET(x) \ + (((x) << CE_DEBUG_SEL_LSB) & CE_DEBUG_SEL_MASK) +/* end */ +/* PLL start */ +#define EFUSE_XTAL_SEL_GET(x) \ + (((x) & EFUSE_XTAL_SEL_MASK) >> EFUSE_XTAL_SEL_LSB) +#define EFUSE_XTAL_SEL_SET(x) \ + (((x) << EFUSE_XTAL_SEL_LSB) & EFUSE_XTAL_SEL_MASK) +#define BB_PLL_CONFIG_OUTDIV_GET(x) \ + (((x) & BB_PLL_CONFIG_OUTDIV_MASK) >> BB_PLL_CONFIG_OUTDIV_LSB) +#define BB_PLL_CONFIG_OUTDIV_SET(x) \ + (((x) << BB_PLL_CONFIG_OUTDIV_LSB) & BB_PLL_CONFIG_OUTDIV_MASK) +#define BB_PLL_CONFIG_FRAC_GET(x) \ + (((x) & BB_PLL_CONFIG_FRAC_MASK) >> BB_PLL_CONFIG_FRAC_LSB) +#define BB_PLL_CONFIG_FRAC_SET(x) \ + (((x) << BB_PLL_CONFIG_FRAC_LSB) & BB_PLL_CONFIG_FRAC_MASK) +#define WLAN_PLL_SETTLE_TIME_GET(x) \ + (((x) & WLAN_PLL_SETTLE_TIME_MASK) >> WLAN_PLL_SETTLE_TIME_LSB) +#define WLAN_PLL_SETTLE_TIME_SET(x) \ + (((x) << WLAN_PLL_SETTLE_TIME_LSB) & WLAN_PLL_SETTLE_TIME_MASK) +#define WLAN_PLL_CONTROL_NOPWD_GET(x) \ + (((x) & \ + WLAN_PLL_CONTROL_NOPWD_MASK) >> \ + WLAN_PLL_CONTROL_NOPWD_LSB) +#define WLAN_PLL_CONTROL_NOPWD_SET(x) \ + (((x) << WLAN_PLL_CONTROL_NOPWD_LSB) & \ + WLAN_PLL_CONTROL_NOPWD_MASK) +#define WLAN_PLL_CONTROL_BYPASS_GET(x) \ + (((x) & \ + WLAN_PLL_CONTROL_BYPASS_MASK) >> \ + WLAN_PLL_CONTROL_BYPASS_LSB) +#define WLAN_PLL_CONTROL_BYPASS_SET(x) \ + (((x) << WLAN_PLL_CONTROL_BYPASS_LSB) & \ + WLAN_PLL_CONTROL_BYPASS_MASK) +#define WLAN_PLL_CONTROL_CLK_SEL_GET(x) \ + (((x) & \ + WLAN_PLL_CONTROL_CLK_SEL_MASK) >> \ + WLAN_PLL_CONTROL_CLK_SEL_LSB) +#define WLAN_PLL_CONTROL_CLK_SEL_SET(x) \ + (((x) << WLAN_PLL_CONTROL_CLK_SEL_LSB) & \ + WLAN_PLL_CONTROL_CLK_SEL_MASK) +#define WLAN_PLL_CONTROL_REFDIV_GET(x) \ + (((x) & \ + WLAN_PLL_CONTROL_REFDIV_MASK) >> \ + WLAN_PLL_CONTROL_REFDIV_LSB) +#define WLAN_PLL_CONTROL_REFDIV_SET(x) \ + (((x) << WLAN_PLL_CONTROL_REFDIV_LSB) & \ + WLAN_PLL_CONTROL_REFDIV_MASK) +#define WLAN_PLL_CONTROL_DIV_GET(x) \ + (((x) & \ + WLAN_PLL_CONTROL_DIV_MASK) >> \ + WLAN_PLL_CONTROL_DIV_LSB) +#define WLAN_PLL_CONTROL_DIV_SET(x) \ + (((x) << WLAN_PLL_CONTROL_DIV_LSB) & \ + WLAN_PLL_CONTROL_DIV_MASK) +#define SOC_CORE_CLK_CTRL_DIV_GET(x) \ + (((x) & \ + SOC_CORE_CLK_CTRL_DIV_MASK) >> \ + SOC_CORE_CLK_CTRL_DIV_LSB) +#define SOC_CORE_CLK_CTRL_DIV_SET(x) \ + (((x) << SOC_CORE_CLK_CTRL_DIV_LSB) & \ + SOC_CORE_CLK_CTRL_DIV_MASK) +#define RTC_SYNC_STATUS_PLL_CHANGING_GET(x) \ + (((x) & \ + RTC_SYNC_STATUS_PLL_CHANGING_MASK) >> \ + RTC_SYNC_STATUS_PLL_CHANGING_LSB) +#define RTC_SYNC_STATUS_PLL_CHANGING_SET(x) \ + (((x) << RTC_SYNC_STATUS_PLL_CHANGING_LSB) & \ + RTC_SYNC_STATUS_PLL_CHANGING_MASK) +#define SOC_CPU_CLOCK_STANDARD_GET(x) \ + (((x) & \ + SOC_CPU_CLOCK_STANDARD_MASK) >> \ + SOC_CPU_CLOCK_STANDARD_LSB) +#define SOC_CPU_CLOCK_STANDARD_SET(x) \ + (((x) << SOC_CPU_CLOCK_STANDARD_LSB) & \ + SOC_CPU_CLOCK_STANDARD_MASK) +/* PLL end */ + +struct hostdef_s { + uint32_t d_INT_STATUS_ENABLE_ERROR_LSB; + uint32_t d_INT_STATUS_ENABLE_ERROR_MASK; + uint32_t d_INT_STATUS_ENABLE_CPU_LSB; + uint32_t d_INT_STATUS_ENABLE_CPU_MASK; + uint32_t d_INT_STATUS_ENABLE_COUNTER_LSB; + uint32_t d_INT_STATUS_ENABLE_COUNTER_MASK; + uint32_t d_INT_STATUS_ENABLE_MBOX_DATA_LSB; + uint32_t d_INT_STATUS_ENABLE_MBOX_DATA_MASK; + uint32_t d_ERROR_STATUS_ENABLE_RX_UNDERFLOW_LSB; + uint32_t d_ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK; + uint32_t d_ERROR_STATUS_ENABLE_TX_OVERFLOW_LSB; + uint32_t d_ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK; + uint32_t d_COUNTER_INT_STATUS_ENABLE_BIT_LSB; + uint32_t d_COUNTER_INT_STATUS_ENABLE_BIT_MASK; + uint32_t d_INT_STATUS_ENABLE_ADDRESS; + uint32_t d_CPU_INT_STATUS_ENABLE_BIT_LSB; + uint32_t d_CPU_INT_STATUS_ENABLE_BIT_MASK; + uint32_t d_HOST_INT_STATUS_ADDRESS; + uint32_t d_CPU_INT_STATUS_ADDRESS; + uint32_t d_ERROR_INT_STATUS_ADDRESS; + uint32_t d_ERROR_INT_STATUS_WAKEUP_MASK; + uint32_t d_ERROR_INT_STATUS_WAKEUP_LSB; + uint32_t d_ERROR_INT_STATUS_RX_UNDERFLOW_MASK; + uint32_t d_ERROR_INT_STATUS_RX_UNDERFLOW_LSB; + uint32_t d_ERROR_INT_STATUS_TX_OVERFLOW_MASK; + uint32_t d_ERROR_INT_STATUS_TX_OVERFLOW_LSB; + uint32_t d_COUNT_DEC_ADDRESS; + uint32_t d_HOST_INT_STATUS_CPU_MASK; + uint32_t d_HOST_INT_STATUS_CPU_LSB; + uint32_t d_HOST_INT_STATUS_ERROR_MASK; + uint32_t d_HOST_INT_STATUS_ERROR_LSB; + uint32_t d_HOST_INT_STATUS_COUNTER_MASK; + uint32_t d_HOST_INT_STATUS_COUNTER_LSB; + uint32_t d_RX_LOOKAHEAD_VALID_ADDRESS; + uint32_t d_WINDOW_DATA_ADDRESS; + uint32_t d_WINDOW_READ_ADDR_ADDRESS; + uint32_t d_WINDOW_WRITE_ADDR_ADDRESS; + uint32_t d_SOC_GLOBAL_RESET_ADDRESS; + uint32_t d_RTC_STATE_ADDRESS; + uint32_t d_RTC_STATE_COLD_RESET_MASK; + uint32_t d_PCIE_LOCAL_BASE_ADDRESS; + uint32_t d_PCIE_SOC_WAKE_RESET; + uint32_t d_PCIE_SOC_WAKE_ADDRESS; + uint32_t d_PCIE_SOC_WAKE_V_MASK; + uint32_t d_RTC_STATE_V_MASK; + uint32_t d_RTC_STATE_V_LSB; + uint32_t d_FW_IND_EVENT_PENDING; + uint32_t d_FW_IND_INITIALIZED; + uint32_t d_RTC_STATE_V_ON; +#if defined(SDIO_3_0) + uint32_t d_HOST_INT_STATUS_MBOX_DATA_MASK; + uint32_t d_HOST_INT_STATUS_MBOX_DATA_LSB; +#endif + uint32_t d_PCIE_SOC_RDY_STATUS_ADDRESS; + uint32_t d_PCIE_SOC_RDY_STATUS_BAR_MASK; + uint32_t d_SOC_PCIE_BASE_ADDRESS; + uint32_t d_MSI_MAGIC_ADR_ADDRESS; + uint32_t d_MSI_MAGIC_ADDRESS; +}; + +#define INT_STATUS_ENABLE_ERROR_LSB \ + (scn->hostdef->d_INT_STATUS_ENABLE_ERROR_LSB) +#define INT_STATUS_ENABLE_ERROR_MASK \ + (scn->hostdef->d_INT_STATUS_ENABLE_ERROR_MASK) +#define INT_STATUS_ENABLE_CPU_LSB \ + (scn->hostdef->d_INT_STATUS_ENABLE_CPU_LSB) +#define INT_STATUS_ENABLE_CPU_MASK \ + (scn->hostdef->d_INT_STATUS_ENABLE_CPU_MASK) +#define INT_STATUS_ENABLE_COUNTER_LSB \ + (scn->hostdef->d_INT_STATUS_ENABLE_COUNTER_LSB) +#define INT_STATUS_ENABLE_COUNTER_MASK \ + (scn->hostdef->d_INT_STATUS_ENABLE_COUNTER_MASK) +#define INT_STATUS_ENABLE_MBOX_DATA_LSB \ + (scn->hostdef->d_INT_STATUS_ENABLE_MBOX_DATA_LSB) +#define INT_STATUS_ENABLE_MBOX_DATA_MASK \ + (scn->hostdef->d_INT_STATUS_ENABLE_MBOX_DATA_MASK) +#define ERROR_STATUS_ENABLE_RX_UNDERFLOW_LSB \ + (scn->hostdef->d_ERROR_STATUS_ENABLE_RX_UNDERFLOW_LSB) +#define ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK \ + (scn->hostdef->d_ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK) +#define ERROR_STATUS_ENABLE_TX_OVERFLOW_LSB\ + (scn->hostdef->d_ERROR_STATUS_ENABLE_TX_OVERFLOW_LSB) +#define ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK \ + (scn->hostdef->d_ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK) +#define COUNTER_INT_STATUS_ENABLE_BIT_LSB \ + (scn->hostdef->d_COUNTER_INT_STATUS_ENABLE_BIT_LSB) +#define COUNTER_INT_STATUS_ENABLE_BIT_MASK \ + (scn->hostdef->d_COUNTER_INT_STATUS_ENABLE_BIT_MASK) +#define INT_STATUS_ENABLE_ADDRESS \ + (scn->hostdef->d_INT_STATUS_ENABLE_ADDRESS) +#define CPU_INT_STATUS_ENABLE_BIT_LSB \ + (scn->hostdef->d_CPU_INT_STATUS_ENABLE_BIT_LSB) +#define CPU_INT_STATUS_ENABLE_BIT_MASK \ + (scn->hostdef->d_CPU_INT_STATUS_ENABLE_BIT_MASK) +#define HOST_INT_STATUS_ADDRESS \ + (scn->hostdef->d_HOST_INT_STATUS_ADDRESS) +#define CPU_INT_STATUS_ADDRESS \ + (scn->hostdef->d_CPU_INT_STATUS_ADDRESS) +#define ERROR_INT_STATUS_ADDRESS \ + (scn->hostdef->d_ERROR_INT_STATUS_ADDRESS) +#define ERROR_INT_STATUS_WAKEUP_MASK \ + (scn->hostdef->d_ERROR_INT_STATUS_WAKEUP_MASK) +#define ERROR_INT_STATUS_WAKEUP_LSB \ + (scn->hostdef->d_ERROR_INT_STATUS_WAKEUP_LSB) +#define ERROR_INT_STATUS_RX_UNDERFLOW_MASK \ + (scn->hostdef->d_ERROR_INT_STATUS_RX_UNDERFLOW_MASK) +#define ERROR_INT_STATUS_RX_UNDERFLOW_LSB \ + (scn->hostdef->d_ERROR_INT_STATUS_RX_UNDERFLOW_LSB) +#define ERROR_INT_STATUS_TX_OVERFLOW_MASK \ + (scn->hostdef->d_ERROR_INT_STATUS_TX_OVERFLOW_MASK) +#define ERROR_INT_STATUS_TX_OVERFLOW_LSB \ + (scn->hostdef->d_ERROR_INT_STATUS_TX_OVERFLOW_LSB) +#define COUNT_DEC_ADDRESS \ + (scn->hostdef->d_COUNT_DEC_ADDRESS) +#define HOST_INT_STATUS_CPU_MASK \ + (scn->hostdef->d_HOST_INT_STATUS_CPU_MASK) +#define HOST_INT_STATUS_CPU_LSB \ + (scn->hostdef->d_HOST_INT_STATUS_CPU_LSB) +#define HOST_INT_STATUS_ERROR_MASK \ + (scn->hostdef->d_HOST_INT_STATUS_ERROR_MASK) +#define HOST_INT_STATUS_ERROR_LSB \ + (scn->hostdef->d_HOST_INT_STATUS_ERROR_LSB) +#define HOST_INT_STATUS_COUNTER_MASK \ + (scn->hostdef->d_HOST_INT_STATUS_COUNTER_MASK) +#define HOST_INT_STATUS_COUNTER_LSB \ + (scn->hostdef->d_HOST_INT_STATUS_COUNTER_LSB) +#define RX_LOOKAHEAD_VALID_ADDRESS \ + (scn->hostdef->d_RX_LOOKAHEAD_VALID_ADDRESS) +#define WINDOW_DATA_ADDRESS \ + (scn->hostdef->d_WINDOW_DATA_ADDRESS) +#define WINDOW_READ_ADDR_ADDRESS \ + (scn->hostdef->d_WINDOW_READ_ADDR_ADDRESS) +#define WINDOW_WRITE_ADDR_ADDRESS \ + (scn->hostdef->d_WINDOW_WRITE_ADDR_ADDRESS) +#define SOC_GLOBAL_RESET_ADDRESS \ + (scn->hostdef->d_SOC_GLOBAL_RESET_ADDRESS) +#define RTC_STATE_ADDRESS \ + (scn->hostdef->d_RTC_STATE_ADDRESS) +#define RTC_STATE_COLD_RESET_MASK \ + (scn->hostdef->d_RTC_STATE_COLD_RESET_MASK) +#define PCIE_LOCAL_BASE_ADDRESS \ + (scn->hostdef->d_PCIE_LOCAL_BASE_ADDRESS) +#define PCIE_SOC_WAKE_RESET \ + (scn->hostdef->d_PCIE_SOC_WAKE_RESET) +#define PCIE_SOC_WAKE_ADDRESS \ + (scn->hostdef->d_PCIE_SOC_WAKE_ADDRESS) +#define PCIE_SOC_WAKE_V_MASK \ + (scn->hostdef->d_PCIE_SOC_WAKE_V_MASK) +#define RTC_STATE_V_MASK \ + (scn->hostdef->d_RTC_STATE_V_MASK) +#define RTC_STATE_V_LSB \ + (scn->hostdef->d_RTC_STATE_V_LSB) +#define FW_IND_EVENT_PENDING \ + (scn->hostdef->d_FW_IND_EVENT_PENDING) +#define FW_IND_INITIALIZED \ + (scn->hostdef->d_FW_IND_INITIALIZED) +#define RTC_STATE_V_ON \ + (scn->hostdef->d_RTC_STATE_V_ON) +#if defined(SDIO_3_0) +#define HOST_INT_STATUS_MBOX_DATA_MASK \ + (scn->hostdef->d_HOST_INT_STATUS_MBOX_DATA_MASK) +#define HOST_INT_STATUS_MBOX_DATA_LSB \ + (scn->hostdef->d_HOST_INT_STATUS_MBOX_DATA_LSB) +#endif + +#if !defined(SOC_PCIE_BASE_ADDRESS) +#define SOC_PCIE_BASE_ADDRESS 0 +#endif + +#if !defined(PCIE_SOC_RDY_STATUS_ADDRESS) +#define PCIE_SOC_RDY_STATUS_ADDRESS 0 +#define PCIE_SOC_RDY_STATUS_BAR_MASK 0 +#endif + +#if !defined(MSI_MAGIC_ADR_ADDRESS) +#define MSI_MAGIC_ADR_ADDRESS 0 +#define MSI_MAGIC_ADDRESS 0 +#endif + +/* SET/GET macros */ +#define INT_STATUS_ENABLE_ERROR_SET(x) \ + (((x) << INT_STATUS_ENABLE_ERROR_LSB) & \ + INT_STATUS_ENABLE_ERROR_MASK) +#define INT_STATUS_ENABLE_CPU_SET(x) \ + (((x) << INT_STATUS_ENABLE_CPU_LSB) & \ + INT_STATUS_ENABLE_CPU_MASK) +#define INT_STATUS_ENABLE_COUNTER_SET(x) \ + (((x) << INT_STATUS_ENABLE_COUNTER_LSB) & \ + INT_STATUS_ENABLE_COUNTER_MASK) +#define INT_STATUS_ENABLE_MBOX_DATA_SET(x) \ + (((x) << INT_STATUS_ENABLE_MBOX_DATA_LSB) & \ + INT_STATUS_ENABLE_MBOX_DATA_MASK) +#define CPU_INT_STATUS_ENABLE_BIT_SET(x) \ + (((x) << CPU_INT_STATUS_ENABLE_BIT_LSB) & \ + CPU_INT_STATUS_ENABLE_BIT_MASK) +#define ERROR_STATUS_ENABLE_RX_UNDERFLOW_SET(x) \ + (((x) << ERROR_STATUS_ENABLE_RX_UNDERFLOW_LSB) & \ + ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK) +#define ERROR_STATUS_ENABLE_TX_OVERFLOW_SET(x)\ + (((x) << ERROR_STATUS_ENABLE_TX_OVERFLOW_LSB) & \ + ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK) +#define COUNTER_INT_STATUS_ENABLE_BIT_SET(x) \ + (((x) << COUNTER_INT_STATUS_ENABLE_BIT_LSB) & \ + COUNTER_INT_STATUS_ENABLE_BIT_MASK) +#define ERROR_INT_STATUS_WAKEUP_GET(x) \ + (((x) & ERROR_INT_STATUS_WAKEUP_MASK) >> \ + ERROR_INT_STATUS_WAKEUP_LSB) +#define ERROR_INT_STATUS_RX_UNDERFLOW_GET(x) \ + (((x) & ERROR_INT_STATUS_RX_UNDERFLOW_MASK) >> \ + ERROR_INT_STATUS_RX_UNDERFLOW_LSB) +#define ERROR_INT_STATUS_TX_OVERFLOW_GET(x) \ + (((x) & ERROR_INT_STATUS_TX_OVERFLOW_MASK) >> \ + ERROR_INT_STATUS_TX_OVERFLOW_LSB) +#define HOST_INT_STATUS_CPU_GET(x) \ + (((x) & HOST_INT_STATUS_CPU_MASK) >> \ + HOST_INT_STATUS_CPU_LSB) +#define HOST_INT_STATUS_ERROR_GET(x) \ + (((x) & HOST_INT_STATUS_ERROR_MASK) >> \ + HOST_INT_STATUS_ERROR_LSB) +#define HOST_INT_STATUS_COUNTER_GET(x) \ + (((x) & HOST_INT_STATUS_COUNTER_MASK) >> \ + HOST_INT_STATUS_COUNTER_LSB) +#define RTC_STATE_V_GET(x) \ + (((x) & RTC_STATE_V_MASK) >> RTC_STATE_V_LSB) +#if defined(SDIO_3_0) +#define HOST_INT_STATUS_MBOX_DATA_GET(x) \ + (((x) & HOST_INT_STATUS_MBOX_DATA_MASK) >> \ + HOST_INT_STATUS_MBOX_DATA_LSB) +#endif + +#define INVALID_REG_LOC_DUMMY_DATA 0xAA + + + +#define ROME_USB_RTC_SOC_BASE_ADDRESS 0x00000800 +#define ROME_USB_SOC_RESET_CONTROL_COLD_RST_LSB 0x0 +#define SOC_RESET_CONTROL_COLD_RST_LSB 8 +#define SOC_RESET_CONTROL_COLD_RST_MASK 0x00000100 +#define SOC_RESET_CONTROL_COLD_RST_SET(x) \ + (((x) << SOC_RESET_CONTROL_COLD_RST_LSB) & \ + SOC_RESET_CONTROL_COLD_RST_MASK) + +#define AR6320_CORE_CLK_DIV_ADDR 0x403fa8 +#define AR6320_CPU_PLL_INIT_DONE_ADDR 0x403fd0 +#define AR6320_CPU_SPEED_ADDR 0x403fa4 +#define AR6320V2_CORE_CLK_DIV_ADDR 0x403fd8 +#define AR6320V2_CPU_PLL_INIT_DONE_ADDR 0x403fd0 +#define AR6320V2_CPU_SPEED_ADDR 0x403fd4 +#define AR6320V3_CORE_CLK_DIV_ADDR 0x404028 +#define AR6320V3_CPU_PLL_INIT_DONE_ADDR 0x404020 +#define AR6320V3_CPU_SPEED_ADDR 0x404024 + +enum a_refclk_speed_t { + /* Unsupported ref clock -- use PLL Bypass */ + SOC_REFCLK_UNKNOWN = -1, + SOC_REFCLK_48_MHZ = 0, + SOC_REFCLK_19_2_MHZ = 1, + SOC_REFCLK_24_MHZ = 2, + SOC_REFCLK_26_MHZ = 3, + SOC_REFCLK_37_4_MHZ = 4, + SOC_REFCLK_38_4_MHZ = 5, + SOC_REFCLK_40_MHZ = 6, + SOC_REFCLK_52_MHZ = 7, +}; + +#define A_REFCLK_UNKNOWN SOC_REFCLK_UNKNOWN +#define A_REFCLK_48_MHZ SOC_REFCLK_48_MHZ +#define A_REFCLK_19_2_MHZ SOC_REFCLK_19_2_MHZ +#define A_REFCLK_24_MHZ SOC_REFCLK_24_MHZ +#define A_REFCLK_26_MHZ SOC_REFCLK_26_MHZ +#define A_REFCLK_37_4_MHZ SOC_REFCLK_37_4_MHZ +#define A_REFCLK_38_4_MHZ SOC_REFCLK_38_4_MHZ +#define A_REFCLK_40_MHZ SOC_REFCLK_40_MHZ +#define A_REFCLK_52_MHZ SOC_REFCLK_52_MHZ + +#define TARGET_CPU_FREQ 176000000 + +struct wlan_pll_s { + u_int32_t refdiv; + u_int32_t div; + u_int32_t rnfrac; + u_int32_t outdiv; +}; + +struct cmnos_clock_s { + enum a_refclk_speed_t refclk_speed; + u_int32_t refclk_hz; + u_int32_t pll_settling_time; /* 50us */ + struct wlan_pll_s wlan_pll; +}; + +struct tgt_reg_section { + u_int32_t start_addr; + u_int32_t end_addr; +}; + +struct tgt_reg_table { + const struct tgt_reg_section *section; + u_int32_t section_size; +}; + +void target_register_tbl_attach(struct hif_softc *scn, + uint32_t target_type); +void hif_register_tbl_attach(struct hif_softc *scn, + uint32_t target_type); +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/usb/usbdrv.c b/drivers/staging/qca-wifi-host-cmn/hif/src/usb/usbdrv.c new file mode 100644 index 0000000000000000000000000000000000000000..4fd85e0f4bc84d642fcc26dce84975e42890e387 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/hif/src/usb/usbdrv.c @@ -0,0 +1,1251 @@ +/* + * Copyright (c) 2013-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#define ATH_MODULE_NAME hif +#include "a_debug.h" +#include "hif_usb_internal.h" +#include "if_usb.h" +#include "cds_api.h" +#include "hif_debug.h" + +#define IS_BULK_EP(attr) (((attr) & 3) == 0x02) +#define IS_INT_EP(attr) (((attr) & 3) == 0x03) +#define IS_ISOC_EP(attr) (((attr) & 3) == 0x01) +#define IS_DIR_IN(addr) ((addr) & 0x80) + +#define IS_FW_CRASH_DUMP(x)(((x == FW_ASSERT_PATTERN) || \ + (x == FW_REG_PATTERN) || \ + ((x & FW_RAMDUMP_PATTERN_MASK) == \ + FW_RAMDUMP_PATTERN)) ? 1 : 0) + +static void usb_hif_post_recv_transfers(struct HIF_USB_PIPE *recv_pipe, + int buffer_length); +static void usb_hif_post_recv_bundle_transfers + (struct HIF_USB_PIPE *recv_pipe, + int buffer_length); +static void usb_hif_cleanup_recv_urb(struct HIF_URB_CONTEXT *urb_context); + + +/** + * usb_hif_free_urb_to_pipe() - add urb back to urb list of a pipe + * @pipe: pointer to struct HIF_USB_PIPE + * @urb_context: pointer to struct HIF_URB_CONTEXT + * + * Return: none + */ +static void usb_hif_free_urb_to_pipe(struct HIF_USB_PIPE *pipe, + struct HIF_URB_CONTEXT *urb_context) +{ + qdf_spin_lock_irqsave(&pipe->device->cs_lock); + pipe->urb_cnt++; + DL_ListAdd(&pipe->urb_list_head, &urb_context->link); + qdf_spin_unlock_irqrestore(&pipe->device->cs_lock); +} + +/** + * usb_hif_alloc_urb_from_pipe() - remove urb back from urb list of a pipe + * @pipe: pointer to struct HIF_USB_PIPE + * + * Return: struct HIF_URB_CONTEXT urb context removed from the urb list + */ +struct HIF_URB_CONTEXT *usb_hif_alloc_urb_from_pipe(struct HIF_USB_PIPE *pipe) +{ + struct HIF_URB_CONTEXT *urb_context = NULL; + DL_LIST *item; + + qdf_spin_lock_irqsave(&pipe->device->cs_lock); + item = dl_list_remove_item_from_head(&pipe->urb_list_head); + if (item != NULL) { + urb_context = A_CONTAINING_STRUCT(item, struct HIF_URB_CONTEXT, + link); + pipe->urb_cnt--; + } + qdf_spin_unlock_irqrestore(&pipe->device->cs_lock); + + return urb_context; +} + +/** + * usb_hif_dequeue_pending_transfer() - remove urb from pending xfer list + * @pipe: pointer to struct HIF_USB_PIPE + * + * Return: struct HIF_URB_CONTEXT urb context removed from the pending xfer list + */ +static struct HIF_URB_CONTEXT *usb_hif_dequeue_pending_transfer + (struct HIF_USB_PIPE *pipe) +{ + struct HIF_URB_CONTEXT *urb_context = NULL; + DL_LIST *item; + + qdf_spin_lock_irqsave(&pipe->device->cs_lock); + item = dl_list_remove_item_from_head(&pipe->urb_pending_list); + if (item != NULL) + urb_context = A_CONTAINING_STRUCT(item, struct HIF_URB_CONTEXT, + link); + qdf_spin_unlock_irqrestore(&pipe->device->cs_lock); + + return urb_context; +} + +/** + * usb_hif_enqueue_pending_transfer() - add urb to pending xfer list + * @pipe: pointer to struct HIF_USB_PIPE + * @urb_context: pointer to struct HIF_URB_CONTEXT to be added to the xfer list + * + * Return: none + */ +void usb_hif_enqueue_pending_transfer(struct HIF_USB_PIPE *pipe, + struct HIF_URB_CONTEXT *urb_context) +{ + qdf_spin_lock_irqsave(&pipe->device->cs_lock); + dl_list_insert_tail(&pipe->urb_pending_list, &urb_context->link); + qdf_spin_unlock_irqrestore(&pipe->device->cs_lock); +} + + +/** + * usb_hif_remove_pending_transfer() - remove urb from its own list + * @urb_context: pointer to struct HIF_URB_CONTEXT to be removed + * + * Return: none + */ +void +usb_hif_remove_pending_transfer(struct HIF_URB_CONTEXT *urb_context) +{ + qdf_spin_lock_irqsave(&urb_context->pipe->device->cs_lock); + dl_list_remove(&urb_context->link); + qdf_spin_unlock_irqrestore(&urb_context->pipe->device->cs_lock); +} + +/** + * usb_hif_alloc_pipe_resources() - allocate urb_cnt urbs to a HIF pipe + * @pipe: pointer to struct HIF_USB_PIPE to which resources will be allocated + * @urb_cnt: number of urbs to be added to the HIF pipe + * + * Return: QDF_STATUS_SUCCESS if success else an appropriate QDF_STATUS error + */ +static QDF_STATUS usb_hif_alloc_pipe_resources + (struct HIF_USB_PIPE *pipe, int urb_cnt) +{ + QDF_STATUS status = QDF_STATUS_SUCCESS; + int i; + struct HIF_URB_CONTEXT *urb_context; + + DL_LIST_INIT(&pipe->urb_list_head); + DL_LIST_INIT(&pipe->urb_pending_list); + + for (i = 0; i < urb_cnt; i++) { + urb_context = qdf_mem_malloc(sizeof(*urb_context)); + if (NULL == urb_context) { + status = QDF_STATUS_E_NOMEM; + HIF_ERROR("urb_context is null"); + break; + } + urb_context->pipe = pipe; + urb_context->urb = usb_alloc_urb(0, GFP_KERNEL); + + if (NULL == urb_context->urb) { + status = QDF_STATUS_E_NOMEM; + qdf_mem_free(urb_context); + HIF_ERROR("urb_context->urb is null"); + break; + } + + /* note we are only allocate the urb contexts here, the actual + * URB is + * allocated from the kernel as needed to do a transaction + */ + pipe->urb_alloc++; + + usb_hif_free_urb_to_pipe(pipe, urb_context); + } + + HIF_DBG("athusb: alloc resources lpipe:%d hpipe:0x%X urbs:%d", + pipe->logical_pipe_num, + pipe->usb_pipe_handle, + pipe->urb_alloc); + return status; +} + +/** + * usb_hif_free_pipe_resources() - free urb resources allocated to a HIF pipe + * @pipe: pointer to struct HIF_USB_PIPE + * + * Return: none + */ +static void usb_hif_free_pipe_resources(struct HIF_USB_PIPE *pipe) +{ + struct HIF_URB_CONTEXT *urb_context; + + if (NULL == pipe->device) { + /* nothing allocated for this pipe */ + HIF_ERROR("pipe->device is null"); + return; + } + + HIF_TRACE("athusb: free resources lpipe:%d hpipe:0x%X urbs:%d avail:%d", + pipe->logical_pipe_num, + pipe->usb_pipe_handle, pipe->urb_alloc, + pipe->urb_cnt); + + if (pipe->urb_alloc != pipe->urb_cnt) { + HIF_ERROR("athusb: urb leak! lpipe:%d hpipe:0x%X urbs:%d avail:%d", + pipe->logical_pipe_num, + pipe->usb_pipe_handle, pipe->urb_alloc, + pipe->urb_cnt); + } + + while (true) { + urb_context = usb_hif_alloc_urb_from_pipe(pipe); + if (NULL == urb_context) + break; + + if (urb_context->buf) { + qdf_nbuf_free(urb_context->buf); + urb_context->buf = NULL; + } + + usb_free_urb(urb_context->urb); + urb_context->urb = NULL; + qdf_mem_free(urb_context); + } + +} + +/** + * usb_hif_get_logical_pipe_num() - get pipe number for a particular enpoint + * @device: pointer to HIF_DEVICE_USB structure + * @ep_address: endpoint address + * @urb_count: number of urb resources to be allocated to the pipe + * + * Return: uint8_t pipe number corresponding to ep_address + */ +static uint8_t usb_hif_get_logical_pipe_num + (struct HIF_DEVICE_USB *device, + uint8_t ep_address, + int *urb_count) +{ + uint8_t pipe_num = HIF_USB_PIPE_INVALID; + + switch (ep_address) { + case USB_EP_ADDR_APP_CTRL_IN: + pipe_num = HIF_RX_CTRL_PIPE; + *urb_count = RX_URB_COUNT; + break; + case USB_EP_ADDR_APP_DATA_IN: + pipe_num = HIF_RX_DATA_PIPE; + *urb_count = RX_URB_COUNT; + break; + case USB_EP_ADDR_APP_INT_IN: + pipe_num = HIF_RX_INT_PIPE; + *urb_count = RX_URB_COUNT; + break; + case USB_EP_ADDR_APP_DATA2_IN: + pipe_num = HIF_RX_DATA2_PIPE; + *urb_count = RX_URB_COUNT; + break; + case USB_EP_ADDR_APP_CTRL_OUT: + pipe_num = HIF_TX_CTRL_PIPE; + *urb_count = TX_URB_COUNT; + break; + case USB_EP_ADDR_APP_DATA_LP_OUT: + pipe_num = HIF_TX_DATA_LP_PIPE; + *urb_count = TX_URB_COUNT; + break; + case USB_EP_ADDR_APP_DATA_MP_OUT: + pipe_num = HIF_TX_DATA_MP_PIPE; + *urb_count = TX_URB_COUNT; + break; + case USB_EP_ADDR_APP_DATA_HP_OUT: + pipe_num = HIF_TX_DATA_HP_PIPE; + *urb_count = TX_URB_COUNT; + break; + default: + /* note: there may be endpoints not currently used */ + break; + } + + return pipe_num; +} + +/** + * usb_hif_get_logical_pipe_num() - setup urb resources for all pipes + * @device: pointer to HIF_DEVICE_USB structure + * + * Return: QDF_STATUS_SUCCESS if success else an appropriate QDF_STATUS error + */ +QDF_STATUS usb_hif_setup_pipe_resources(struct HIF_DEVICE_USB *device) +{ + struct usb_interface *interface = device->interface; + struct usb_host_interface *iface_desc = interface->cur_altsetting; + struct usb_endpoint_descriptor *endpoint; + int i; + int urbcount; + QDF_STATUS status = QDF_STATUS_SUCCESS; + struct HIF_USB_PIPE *pipe; + uint8_t pipe_num; + + /* walk decriptors and setup pipes */ + for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) { + endpoint = &iface_desc->endpoint[i].desc; + + if (IS_BULK_EP(endpoint->bmAttributes)) { + HIF_DBG("%s Bulk Ep:0x%2.2X maxpktsz:%d", + IS_DIR_IN(endpoint->bEndpointAddress) ? + "RX" : "TX", + endpoint->bEndpointAddress, + qdf_le16_to_cpu(endpoint->wMaxPacketSize)); + } else if (IS_INT_EP(endpoint->bmAttributes)) { + HIF_DBG("%s Int Ep:0x%2.2X maxpktsz:%d interval:%d", + IS_DIR_IN(endpoint->bEndpointAddress) ? + "RX" : "TX", + endpoint->bEndpointAddress, + qdf_le16_to_cpu(endpoint->wMaxPacketSize), + endpoint->bInterval); + } else if (IS_ISOC_EP(endpoint->bmAttributes)) { + /* TODO for ISO */ + HIF_DBG("%s ISOC Ep:0x%2.2X maxpktsz:%d interval:%d", + IS_DIR_IN(endpoint->bEndpointAddress) ? + "RX" : "TX", + endpoint->bEndpointAddress, + qdf_le16_to_cpu(endpoint->wMaxPacketSize), + endpoint->bInterval); + } + urbcount = 0; + + pipe_num = usb_hif_get_logical_pipe_num(device, + endpoint->bEndpointAddress, + &urbcount); + if (HIF_USB_PIPE_INVALID == pipe_num) + continue; + + pipe = &device->pipes[pipe_num]; + if (pipe->device != NULL) { + /*pipe was already setup */ + continue; + } + + pipe->device = device; + pipe->logical_pipe_num = pipe_num; + pipe->ep_address = endpoint->bEndpointAddress; + pipe->max_packet_size = + qdf_le16_to_cpu(endpoint->wMaxPacketSize); + + if (IS_BULK_EP(endpoint->bmAttributes)) { + if (IS_DIR_IN(pipe->ep_address)) { + pipe->usb_pipe_handle = + usb_rcvbulkpipe(device->udev, + pipe->ep_address); + } else { + pipe->usb_pipe_handle = + usb_sndbulkpipe(device->udev, + pipe->ep_address); + } + } else if (IS_INT_EP(endpoint->bmAttributes)) { + if (IS_DIR_IN(pipe->ep_address)) { + pipe->usb_pipe_handle = + usb_rcvintpipe(device->udev, + pipe->ep_address); + } else { + pipe->usb_pipe_handle = + usb_sndintpipe(device->udev, + pipe->ep_address); + } + } else if (IS_ISOC_EP(endpoint->bmAttributes)) { + /* TODO for ISO */ + if (IS_DIR_IN(pipe->ep_address)) { + pipe->usb_pipe_handle = + usb_rcvisocpipe(device->udev, + pipe->ep_address); + } else { + pipe->usb_pipe_handle = + usb_sndisocpipe(device->udev, + pipe->ep_address); + } + } + pipe->ep_desc = endpoint; + + if (!IS_DIR_IN(pipe->ep_address)) + pipe->flags |= HIF_USB_PIPE_FLAG_TX; + + status = usb_hif_alloc_pipe_resources(pipe, urbcount); + + if (!QDF_IS_STATUS_SUCCESS(status)) + break; + + } + + return status; +} + + +/** + * usb_hif_cleanup_pipe_resources() - free urb resources for all pipes + * @device: pointer to HIF_DEVICE_USB structure + * + * Return: none + */ +void usb_hif_cleanup_pipe_resources(struct HIF_DEVICE_USB *device) +{ + int i; + + for (i = 0; i < HIF_USB_PIPE_MAX; i++) + usb_hif_free_pipe_resources(&device->pipes[i]); +} + +/** + * usb_hif_flush_pending_transfers() - kill pending urbs for a pipe + * @pipe: pointer to struct HIF_USB_PIPE structure + * + * Return: none + */ +static void usb_hif_flush_pending_transfers(struct HIF_USB_PIPE *pipe) +{ + struct HIF_URB_CONTEXT *urb_context; + + HIF_TRACE("+%s pipe : %d", __func__, pipe->logical_pipe_num); + + while (1) { + urb_context = usb_hif_dequeue_pending_transfer(pipe); + if (NULL == urb_context) { + HIF_WARN("urb_context is NULL"); + break; + } + HIF_TRACE(" pending urb ctxt: 0x%pK", urb_context); + if (urb_context->urb != NULL) { + HIF_TRACE(" killing urb: 0x%pK", urb_context->urb); + /* killing the URB will cause the completion routines to + * run + */ + usb_kill_urb(urb_context->urb); + } + } + HIF_TRACE("-%s", __func__); +} + +/** + * usb_hif_flush_all() - flush pending transfers for all pipes for a usb bus + * @device: pointer to HIF_DEVICE_USB structure + * + * Return: none + */ +void usb_hif_flush_all(struct HIF_DEVICE_USB *device) +{ + int i; + struct HIF_USB_PIPE *pipe; + + HIF_TRACE("+%s", __func__); + + for (i = 0; i < HIF_USB_PIPE_MAX; i++) { + if (device->pipes[i].device != NULL) { + usb_hif_flush_pending_transfers(&device->pipes[i]); + pipe = &device->pipes[i]; + + HIF_USB_FLUSH_WORK(pipe); + } + } + + HIF_TRACE("-%s", __func__); +} + +/** + * usb_hif_cleanup_recv_urb() - cleanup recv urb + * @urb_context: pointer to struct HIF_URB_CONTEXT structure + * + * Return: none + */ +static void usb_hif_cleanup_recv_urb(struct HIF_URB_CONTEXT *urb_context) +{ + HIF_TRACE("+%s", __func__); + + if (urb_context->buf != NULL) { + qdf_nbuf_free(urb_context->buf); + urb_context->buf = NULL; + } + + usb_hif_free_urb_to_pipe(urb_context->pipe, urb_context); + HIF_TRACE("-%s", __func__); +} + +/** + * usb_hif_cleanup_transmit_urb() - cleanup transmit urb + * @urb_context: pointer to struct HIF_URB_CONTEXT structure + * + * Return: none + */ +void usb_hif_cleanup_transmit_urb(struct HIF_URB_CONTEXT *urb_context) +{ + usb_hif_free_urb_to_pipe(urb_context->pipe, urb_context); +} + +/** + * usb_hif_usb_recv_prestart_complete() - completion routine for prestart rx urb + * @urb: urb for which the completion routine is being called + * + * Return: none + */ +static void usb_hif_usb_recv_prestart_complete + (struct urb *urb) +{ + struct HIF_URB_CONTEXT *urb_context = + (struct HIF_URB_CONTEXT *) urb->context; + QDF_STATUS status = QDF_STATUS_SUCCESS; + qdf_nbuf_t buf = NULL; + struct HIF_USB_PIPE *pipe = urb_context->pipe; + + HIF_DBG("+%s: recv pipe: %d, stat:%d,len:%d urb:0x%pK", + __func__, + pipe->logical_pipe_num, + urb->status, urb->actual_length, + urb); + + /* this urb is not pending anymore */ + usb_hif_remove_pending_transfer(urb_context); + do { + if (urb->status != 0) { + status = A_ECOMM; + switch (urb->status) { + case -ECONNRESET: + case -ENOENT: + case -ESHUTDOWN: + /* NOTE: no need to spew these errors when + * device is removed + * or urb is killed due to driver shutdown + */ + status = A_ECANCELED; + break; + default: + HIF_ERROR("%s recv pipe: %d (ep:0x%2.2X), failed:%d", + __func__, + pipe->logical_pipe_num, + pipe->ep_address, + urb->status); + break; + } + break; + } + if (urb->actual_length == 0) + break; + buf = urb_context->buf; + /* we are going to pass it up */ + urb_context->buf = NULL; + qdf_nbuf_put_tail(buf, urb->actual_length); + + if (AR_DEBUG_LVL_CHECK(USB_HIF_DEBUG_DUMP_DATA)) { + uint8_t *data; + uint32_t len; + + qdf_nbuf_peek_header(buf, &data, &len); + debug_dump_bytes(data, len, "hif recv data"); + } + /* note: queue implements a lock */ + skb_queue_tail(&pipe->io_comp_queue, buf); + + HIF_USB_SCHEDULE_WORK(pipe); + } while (false); + + usb_hif_cleanup_recv_urb(urb_context); + + /* Prestart URBs runs out and now start working receive pipe. */ + if (--pipe->urb_prestart_cnt == 0) + usb_hif_start_recv_pipes(pipe->device); + + HIF_DBG("-%s", __func__); +} + +/** + * usb_hif_usb_recv_complete() - completion routine for rx urb + * @urb: urb for which the completion routine is being called + * + * Return: none + */ +static void usb_hif_usb_recv_complete(struct urb *urb) +{ + struct HIF_URB_CONTEXT *urb_context = + (struct HIF_URB_CONTEXT *) urb->context; + QDF_STATUS status = QDF_STATUS_SUCCESS; + qdf_nbuf_t buf = NULL; + struct HIF_USB_PIPE *pipe = urb_context->pipe; + struct hif_usb_softc *sc = HIF_GET_USB_SOFTC(pipe->device); + + HIF_DBG("+%s: recv pipe: %d, stat:%d,len:%d urb:0x%pK", + __func__, + pipe->logical_pipe_num, + urb->status, urb->actual_length, + urb); + + /* this urb is not pending anymore */ + usb_hif_remove_pending_transfer(urb_context); + + do { + + if (urb->status != 0) { + status = A_ECOMM; + switch (urb->status) { +#ifdef RX_SG_SUPPORT + case -EOVERFLOW: + urb->actual_length = HIF_USB_RX_BUFFER_SIZE; + status = QDF_STATUS_SUCCESS; + break; +#endif + case -ECONNRESET: + case -ENOENT: + case -ESHUTDOWN: + /* NOTE: no need to spew these errors when + * device is removed + * or urb is killed due to driver shutdown + */ + status = A_ECANCELED; + break; + default: + HIF_ERROR("%s recv pipe: %d (ep:0x%2.2X), failed:%d", + __func__, + pipe->logical_pipe_num, + pipe->ep_address, + urb->status); + break; + } + break; + } + if (urb->actual_length == 0) + break; + buf = urb_context->buf; + /* we are going to pass it up */ + urb_context->buf = NULL; + qdf_nbuf_put_tail(buf, urb->actual_length); + if (AR_DEBUG_LVL_CHECK(USB_HIF_DEBUG_DUMP_DATA)) { + uint8_t *data; + uint32_t len; + + qdf_nbuf_peek_header(buf, &data, &len); + debug_dump_bytes(data, len, "hif recv data"); + } + /* note: queue implements a lock */ + skb_queue_tail(&pipe->io_comp_queue, buf); + HIF_USB_SCHEDULE_WORK(pipe); + } while (false); + + usb_hif_cleanup_recv_urb(urb_context); + + /* Only re-submit URB when STATUS is success and HIF is not at the + * suspend state. + */ + if (QDF_IS_STATUS_SUCCESS(status) && !sc->suspend_state) { + if (pipe->urb_cnt >= pipe->urb_cnt_thresh) { + /* our free urbs are piling up, post more transfers */ + usb_hif_post_recv_transfers(pipe, + HIF_USB_RX_BUFFER_SIZE); + } + } else { + HIF_ERROR("%s: pipe: %d, fail to post URB: status(%d) suspend (%d)", + __func__, + pipe->logical_pipe_num, + urb->status, + sc->suspend_state); + } + + HIF_DBG("-%s", __func__); +} + +/** + * usb_hif_usb_recv_bundle_complete() - completion routine for rx bundling urb + * @urb: urb for which the completion routine is being called + * + * Return: none + */ +static void usb_hif_usb_recv_bundle_complete(struct urb *urb) +{ + struct HIF_URB_CONTEXT *urb_context = + (struct HIF_URB_CONTEXT *) urb->context; + QDF_STATUS status = QDF_STATUS_SUCCESS; + qdf_nbuf_t buf = NULL; + struct HIF_USB_PIPE *pipe = urb_context->pipe; + uint8_t *netdata, *netdata_new; + uint32_t netlen, netlen_new; + HTC_FRAME_HDR *HtcHdr; + uint16_t payloadLen; + qdf_nbuf_t new_skb = NULL; + + HIF_DBG("+%s: recv pipe: %d, stat:%d,len:%d urb:0x%pK", + __func__, + pipe->logical_pipe_num, + urb->status, urb->actual_length, + urb); + + /* this urb is not pending anymore */ + usb_hif_remove_pending_transfer(urb_context); + + do { + + if (urb->status != 0) { + status = A_ECOMM; + switch (urb->status) { + case -ECONNRESET: + case -ENOENT: + case -ESHUTDOWN: + /* NOTE: no need to spew these errors when + * device is removed + * or urb is killed due to driver shutdown + */ + status = A_ECANCELED; + break; + default: + HIF_ERROR("%s recv pipe: %d (ep:0x%2.2X), failed:%d", + __func__, + pipe->logical_pipe_num, + pipe->ep_address, + urb->status); + break; + } + break; + } + if (urb->actual_length == 0) + break; + buf = urb_context->buf; + if (AR_DEBUG_LVL_CHECK(USB_HIF_DEBUG_DUMP_DATA)) { + uint8_t *data; + uint32_t len; + + qdf_nbuf_peek_header(buf, &data, &len); + debug_dump_bytes(data, len, "hif recv data"); + } + + qdf_nbuf_peek_header(buf, &netdata, &netlen); + netlen = urb->actual_length; + + do { + uint16_t frame_len; + + if (IS_FW_CRASH_DUMP(*(uint32_t *) netdata)) + frame_len = netlen; + else { + /* Hack into HTC header for bundle processing */ + HtcHdr = (HTC_FRAME_HDR *) netdata; + if (HtcHdr->EndpointID >= ENDPOINT_MAX) { + HIF_ERROR("athusb: Rx: invalid EndpointID=%d", + HtcHdr->EndpointID); + break; + } + + payloadLen = HtcHdr->PayloadLen; + payloadLen = qdf_le16_to_cpu(payloadLen); + + if (payloadLen > HIF_USB_RX_BUFFER_SIZE) { + HIF_ERROR("athusb: payloadLen too long %u", + payloadLen); + break; + } + frame_len = (HTC_HDR_LENGTH + payloadLen); + } + + if (netlen < frame_len) { + HIF_ERROR("athusb: subframe length %d not fitted into bundle packet length %d" + , netlen, frame_len); + break; + } + + /* allocate a new skb and copy */ + new_skb = + qdf_nbuf_alloc(NULL, frame_len, 0, 4, false); + if (new_skb == NULL) { + HIF_ERROR("athusb: allocate skb (len=%u) failed" + , frame_len); + break; + } + + qdf_nbuf_peek_header(new_skb, &netdata_new, + &netlen_new); + qdf_mem_copy(netdata_new, netdata, frame_len); + qdf_nbuf_put_tail(new_skb, frame_len); + skb_queue_tail(&pipe->io_comp_queue, new_skb); + new_skb = NULL; + netdata += frame_len; + netlen -= frame_len; + } while (netlen); + HIF_USB_SCHEDULE_WORK(pipe); + } while (false); + + if (urb_context->buf == NULL) + HIF_ERROR("athusb: buffer in urb_context is NULL"); + + /* reset urb_context->buf ==> seems not necessary */ + usb_hif_free_urb_to_pipe(urb_context->pipe, urb_context); + + if (QDF_IS_STATUS_SUCCESS(status)) { + if (pipe->urb_cnt >= pipe->urb_cnt_thresh) { + /* our free urbs are piling up, post more transfers */ + usb_hif_post_recv_bundle_transfers(pipe, + pipe->device->rx_bundle_buf_len); + } + } + + HIF_DBG("-%s", __func__); +} + +/** + * usb_hif_post_recv_prestart_transfers() - post prestart recv urbs for a pipe + * @recv_pipe: rx data pipe + * @prestart_urb: number of prestart recv urbs to be posted + * + * Return: none + */ +static void usb_hif_post_recv_prestart_transfers(struct HIF_USB_PIPE *recv_pipe, + int prestart_urb) +{ + struct HIF_URB_CONTEXT *urb_context; + uint8_t *data; + uint32_t len; + struct urb *urb; + int i, usb_status, buffer_length = HIF_USB_RX_BUFFER_SIZE; + + HIF_TRACE("+%s", __func__); + + for (i = 0; i < prestart_urb; i++) { + urb_context = usb_hif_alloc_urb_from_pipe(recv_pipe); + if (NULL == urb_context) + break; + + urb_context->buf = + qdf_nbuf_alloc(NULL, buffer_length, 0, 4, false); + if (NULL == urb_context->buf) { + usb_hif_cleanup_recv_urb(urb_context); + break; + } + + qdf_nbuf_peek_header(urb_context->buf, &data, &len); + + urb = urb_context->urb; + + usb_fill_bulk_urb(urb, + recv_pipe->device->udev, + recv_pipe->usb_pipe_handle, + data, + buffer_length, + usb_hif_usb_recv_prestart_complete, + urb_context); + + HIF_DBG("athusb bulk recv submit:%d, 0x%X (ep:0x%2.2X), %d bytes, buf:0x%pK", + recv_pipe->logical_pipe_num, + recv_pipe->usb_pipe_handle, + recv_pipe->ep_address, buffer_length, + urb_context->buf); + + usb_hif_enqueue_pending_transfer(recv_pipe, urb_context); + + usb_status = usb_submit_urb(urb, GFP_ATOMIC); + + if (usb_status) { + HIF_ERROR("athusb : usb bulk recv failed %d", + usb_status); + usb_hif_remove_pending_transfer(urb_context); + usb_hif_cleanup_recv_urb(urb_context); + break; + } + recv_pipe->urb_prestart_cnt++; + } + + HIF_TRACE("-%s", __func__); +} + +/** + * usb_hif_post_recv_transfers() - post recv urbs for a given pipe + * @recv_pipe: recv pipe for which urbs need to be posted + * @buffer_length: buffer length of the recv urbs + * + * Return: none + */ +static void usb_hif_post_recv_transfers(struct HIF_USB_PIPE *recv_pipe, + int buffer_length) +{ + struct HIF_URB_CONTEXT *urb_context; + uint8_t *data; + uint32_t len; + struct urb *urb; + int usb_status; + + HIF_TRACE("+%s", __func__); + + while (1) { + + urb_context = usb_hif_alloc_urb_from_pipe(recv_pipe); + if (NULL == urb_context) + break; + + urb_context->buf = qdf_nbuf_alloc(NULL, buffer_length, 0, + 4, false); + if (NULL == urb_context->buf) { + usb_hif_cleanup_recv_urb(urb_context); + break; + } + + qdf_nbuf_peek_header(urb_context->buf, &data, &len); + + urb = urb_context->urb; + + usb_fill_bulk_urb(urb, + recv_pipe->device->udev, + recv_pipe->usb_pipe_handle, + data, + buffer_length, + usb_hif_usb_recv_complete, urb_context); + + HIF_DBG("athusb bulk recv submit:%d, 0x%X (ep:0x%2.2X), %d bytes, buf:0x%pK", + recv_pipe->logical_pipe_num, + recv_pipe->usb_pipe_handle, + recv_pipe->ep_address, buffer_length, + urb_context->buf); + + usb_hif_enqueue_pending_transfer(recv_pipe, urb_context); + + usb_status = usb_submit_urb(urb, GFP_ATOMIC); + + if (usb_status) { + HIF_ERROR("athusb : usb bulk recv failed %d", + usb_status); + usb_hif_remove_pending_transfer(urb_context); + usb_hif_cleanup_recv_urb(urb_context); + break; + } + } + + HIF_TRACE("-%s", __func__); + +} + +/** + * usb_hif_post_recv_bundle_transfers() - post recv urbs for a given pipe + * @recv_pipe: recv pipe for which urbs need to be posted + * @buffer_length: maximum length of rx bundle + * + * Return: none + */ +static void usb_hif_post_recv_bundle_transfers(struct HIF_USB_PIPE *recv_pipe, + int buffer_length) +{ + struct HIF_URB_CONTEXT *urb_context; + uint8_t *data; + uint32_t len; + struct urb *urb; + int usb_status; + + HIF_TRACE("+%s", __func__); + + while (1) { + + urb_context = usb_hif_alloc_urb_from_pipe(recv_pipe); + if (NULL == urb_context) + break; + + if (NULL == urb_context->buf) { + urb_context->buf = + qdf_nbuf_alloc(NULL, buffer_length, 0, 4, false); + if (NULL == urb_context->buf) { + usb_hif_cleanup_recv_urb(urb_context); + break; + } + } + + qdf_nbuf_peek_header(urb_context->buf, &data, &len); + + urb = urb_context->urb; + usb_fill_bulk_urb(urb, + recv_pipe->device->udev, + recv_pipe->usb_pipe_handle, + data, + buffer_length, + usb_hif_usb_recv_bundle_complete, + urb_context); + + HIF_DBG("athusb bulk recv submit:%d, 0x%X (ep:0x%2.2X), %d bytes, buf:0x%pK", + recv_pipe->logical_pipe_num, + recv_pipe->usb_pipe_handle, + recv_pipe->ep_address, buffer_length, + urb_context->buf); + + usb_hif_enqueue_pending_transfer(recv_pipe, urb_context); + + usb_status = usb_submit_urb(urb, GFP_ATOMIC); + + if (usb_status) { + HIF_ERROR("athusb : usb bulk recv failed %d", + usb_status); + usb_hif_remove_pending_transfer(urb_context); + usb_hif_free_urb_to_pipe(urb_context->pipe, + urb_context); + break; + } + + } + + HIF_TRACE("-%s", __func__); + +} + +/** + * usb_hif_prestart_recv_pipes() - post prestart recv urbs + * @device: HIF device for which prestart recv urbs need to be posted + * + * Return: none + */ +void usb_hif_prestart_recv_pipes(struct HIF_DEVICE_USB *device) +{ + struct HIF_USB_PIPE *pipe = &device->pipes[HIF_RX_DATA_PIPE]; + + /* + * USB driver learn to support bundle or not until the firmware + * download and ready. Only allocate some URBs for control message + * communication during the initial phase then start the final + * working pipe after all information understood. + */ + usb_hif_post_recv_prestart_transfers(pipe, 8); +} + +/** + * usb_hif_start_recv_pipes() - start recv urbs + * @device: HIF device for which recv urbs need to be posted + * + * This function is called after all prestart recv urbs are exhausted + * + * Return: none + */ +void usb_hif_start_recv_pipes(struct HIF_DEVICE_USB *device) +{ + struct HIF_USB_PIPE *pipe; + uint32_t buf_len; + + HIF_ENTER(); + pipe = &device->pipes[HIF_RX_DATA_PIPE]; + pipe->urb_cnt_thresh = pipe->urb_alloc / 2; + + HIF_TRACE("Post URBs to RX_DATA_PIPE: %d", + device->pipes[HIF_RX_DATA_PIPE].urb_cnt); + if (device->is_bundle_enabled) { + usb_hif_post_recv_bundle_transfers(pipe, + pipe->device->rx_bundle_buf_len); + } else { + buf_len = HIF_USB_RX_BUFFER_SIZE; + usb_hif_post_recv_transfers(pipe, buf_len); + } + + HIF_DBG("athusb bulk recv len %d", buf_len); + + if (!hif_usb_disable_rxdata2) { + HIF_TRACE("Post URBs to RX_DATA2_PIPE: %d", + device->pipes[HIF_RX_DATA2_PIPE].urb_cnt); + + pipe = &device->pipes[HIF_RX_DATA2_PIPE]; + pipe->urb_cnt_thresh = pipe->urb_alloc / 2; + usb_hif_post_recv_transfers(pipe, HIF_USB_RX_BUFFER_SIZE); + } + + HIF_EXIT(); +} + +/** + * usb_hif_submit_ctrl_out() - send out a ctrl urb + * @device: HIF device for which urb needs to be posted + * @req: request value for the ctrl message + * @value: USB message value + * @index: USB message index value + * @data: pointer to data containing ctrl message to send + * @size: size of the control message to send + * + * Return: QDF_STATUS_SUCCESS if success else an appropriate QDF_STATUS error + */ +QDF_STATUS usb_hif_submit_ctrl_out(struct HIF_DEVICE_USB *device, + uint8_t req, uint16_t value, uint16_t index, + void *data, uint32_t size) +{ + int32_t result = 0; + QDF_STATUS ret = QDF_STATUS_SUCCESS; + uint8_t *buf = NULL; + + do { + + if (size > 0) { + buf = qdf_mem_malloc(size); + if (NULL == buf) { + ret = QDF_STATUS_E_NOMEM; + break; + } + qdf_mem_copy(buf, (uint8_t *) data, size); + } + + HIF_DBG("ctrl-out req:0x%2.2X, value:0x%4.4X index:0x%4.4X, datasize:%d", + req, value, index, size); + + result = usb_control_msg(device->udev, + usb_sndctrlpipe(device->udev, 0), + req, + USB_DIR_OUT | USB_TYPE_VENDOR | + USB_RECIP_DEVICE, value, index, buf, + size, 2 * HZ); + + if (result < 0) { + HIF_ERROR("%s failed,result = %d", __func__, result); + ret = QDF_STATUS_E_FAILURE; + } + + } while (false); + + if (buf != NULL) + qdf_mem_free(buf); + + return ret; +} + +/** + * usb_hif_submit_ctrl_in() - recv a resonse to the ctrl message sent out + * @device: HIF device for which urb needs to be received + * @req: request value for the ctrl message + * @value: USB message value + * @index: USB message index value + * @data: pointer to data containing ctrl message to be received + * @size: size of the control message to be received + * + * Return: QDF_STATUS_SUCCESS if success else an appropriate QDF_STATUS error + */ +QDF_STATUS usb_hif_submit_ctrl_in(struct HIF_DEVICE_USB *device, + uint8_t req, uint16_t value, uint16_t index, + void *data, uint32_t size) +{ + int32_t result = 0; + QDF_STATUS ret = QDF_STATUS_SUCCESS; + uint8_t *buf = NULL; + + do { + + if (size > 0) { + buf = qdf_mem_malloc(size); + if (NULL == buf) { + ret = QDF_STATUS_E_NOMEM; + break; + } + } + + HIF_DBG("ctrl-in req:0x%2.2X, value:0x%4.4X index:0x%4.4X, datasize:%d", + req, value, index, size); + + result = usb_control_msg(device->udev, + usb_rcvctrlpipe(device->udev, 0), + req, + USB_DIR_IN | USB_TYPE_VENDOR | + USB_RECIP_DEVICE, value, index, buf, + size, 2 * HZ); + + if (result < 0) { + HIF_ERROR("%s failed, result = %d", __func__, result); + ret = QDF_STATUS_E_FAILURE; + break; + } + + qdf_mem_copy((uint8_t *) data, buf, size); + + } while (false); + + if (buf != NULL) + qdf_mem_free(buf); + + return ret; +} + +/** + * usb_hif_io_complete() - transmit call back for tx urb + * @pipe: pointer to struct HIF_USB_PIPE + * + * Return: none + */ +static void usb_hif_io_complete(struct HIF_USB_PIPE *pipe) +{ + qdf_nbuf_t buf; + struct HIF_DEVICE_USB *device; + HTC_FRAME_HDR *HtcHdr; + uint8_t *data; + uint32_t len; + struct hif_usb_softc *sc = HIF_GET_USB_SOFTC(pipe->device); + + device = pipe->device; + HIF_ENTER(); + while ((buf = skb_dequeue(&pipe->io_comp_queue))) { + if (pipe->flags & HIF_USB_PIPE_FLAG_TX) { + HIF_DBG("+athusb xmit callback buf:0x%pK", buf); + HtcHdr = (HTC_FRAME_HDR *) + qdf_nbuf_get_frag_vaddr(buf, 0); + +#ifdef ATH_11AC_TXCOMPACT +/* ATH_11AC_TXCOMPACT does not support High Latency mode */ +#else + device->htc_callbacks.txCompletionHandler(device-> + htc_callbacks. + Context, buf, + HtcHdr-> + EndpointID, 0); +#endif + HIF_DBG("-athusb xmit callback"); + } else { + HIF_DBG("+athusb recv callback buf: 0x%pK", buf); + qdf_nbuf_peek_header(buf, &data, &len); + + if (IS_FW_CRASH_DUMP(*((uint32_t *) data))) { + sc->fw_data = data; + sc->fw_data_len = len; + device->htc_callbacks.fwEventHandler( + device->htc_callbacks.Context, + QDF_STATUS_E_USB_ERROR); + qdf_nbuf_free(buf); + } else { + device->htc_callbacks.rxCompletionHandler( + device->htc_callbacks.Context, buf, + pipe->logical_pipe_num); + } + HIF_DBG("-athusb recv callback"); + } + } + + HIF_EXIT(); +} + +#ifdef HIF_USB_TASKLET +/** + * usb_hif_io_comp_tasklet() - per pipe tasklet routine + * @context: pointer to HIF USB pipe + * + * Return: none + */ +void usb_hif_io_comp_tasklet(unsigned long context) +{ + struct HIF_USB_PIPE *pipe = (struct HIF_USB_PIPE *) context; + + usb_hif_io_complete(pipe); +} + +#else +/** + * usb_hif_io_comp_work() - per pipe work queue + * @work: pointer to struct work_struct + * + * Return: none + */ +void usb_hif_io_comp_work(struct work_struct *work) +{ + struct HIF_USB_PIPE *pipe = container_of(work, struct HIF_USB_PIPE, + io_complete_work); + + usb_hif_io_complete(pipe); +} +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/htc/dl_list.h b/drivers/staging/qca-wifi-host-cmn/htc/dl_list.h new file mode 100644 index 0000000000000000000000000000000000000000..85b1b8542a4ebe57ad0e4fdaf2a7a8578c0a69df --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/htc/dl_list.h @@ -0,0 +1,199 @@ +/* + * Copyright (c) 2013-2014, 2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/*=========================================================================== */ +/* Double-link list definitions (adapted from Atheros SDIO stack) */ +/* */ +/* Author(s): ="Atheros" */ +/*=========================================================================== */ + +#ifndef __DL_LIST_H___ +#define __DL_LIST_H___ + +#define A_CONTAINING_STRUCT(address, struct_type, field_name) \ + ((struct_type *)((char *)(address) - \ + (char *)(&((struct_type *)0)->field_name))) + +/* list functions */ +/* pointers for the list */ +typedef struct _DL_LIST { + struct _DL_LIST *pPrev; + struct _DL_LIST *pNext; +} DL_LIST, *PDL_LIST; +/* + * DL_LIST_INIT , initialize doubly linked list + */ +#define DL_LIST_INIT(pList) \ + {(pList)->pPrev = pList; (pList)->pNext = pList; } + +/* faster macro to init list and add a single item */ +#define DL_LIST_INIT_AND_ADD(pList, pItem) \ + { (pList)->pPrev = (pItem); \ + (pList)->pNext = (pItem); \ + (pItem)->pNext = (pList); \ + (pItem)->pPrev = (pList); \ + } + +#define DL_LIST_IS_EMPTY(pList) (((pList)->pPrev == (pList)) && \ + ((pList)->pNext == (pList))) +#define DL_LIST_GET_ITEM_AT_HEAD(pList) (pList)->pNext +#define DL_LIST_GET_ITEM_AT_TAIL(pList) (pList)->pPrev +/* + * ITERATE_OVER_LIST pStart is the list, pTemp is a temp list member + * NOT: do not use this function if the items in the list are deleted inside the + * iteration loop + */ +#define ITERATE_OVER_LIST(pStart, pTemp) \ + for ((pTemp) = (pStart)->pNext; pTemp != (pStart); \ + (pTemp) = (pTemp)->pNext) + +static inline bool dl_list_is_entry_in_list(const DL_LIST *pList, + const DL_LIST *pEntry) +{ + const DL_LIST *pTmp; + + if (pList == pEntry) + return true; + + ITERATE_OVER_LIST(pList, pTmp) { + if (pTmp == pEntry) + return true; + } + + return false; +} + +/* safe iterate macro that allows the item to be removed from the list + * the iteration continues to the next item in the list + */ +#define ITERATE_OVER_LIST_ALLOW_REMOVE(pStart, pItem, st, offset) \ + { \ + PDL_LIST pTemp; \ + { pTemp = (pStart)->pNext; } \ + while (pTemp != (pStart)) { \ + { (pItem) = A_CONTAINING_STRUCT(pTemp, st, offset); } \ + { pTemp = pTemp->pNext; } \ + +#define ITERATE_IS_VALID(pStart) dl_list_is_entry_in_list(pStart, pTemp) +#define ITERATE_RESET(pStart) { pTemp = (pStart)->pNext; } + +#define ITERATE_END }} + +/* + * dl_list_insert_tail - insert pAdd to the end of the list + */ +static inline PDL_LIST dl_list_insert_tail(PDL_LIST pList, PDL_LIST pAdd) +{ + /* insert at tail */ + pAdd->pPrev = pList->pPrev; + pAdd->pNext = pList; + if (pList->pPrev) + pList->pPrev->pNext = pAdd; + pList->pPrev = pAdd; + return pAdd; +} + +/* + * dl_list_insert_head - insert pAdd into the head of the list + */ +static inline PDL_LIST dl_list_insert_head(PDL_LIST pList, PDL_LIST pAdd) +{ + /* insert at head */ + pAdd->pPrev = pList; + pAdd->pNext = pList->pNext; + pList->pNext->pPrev = pAdd; + pList->pNext = pAdd; + return pAdd; +} + +#define DL_ListAdd(pList, pItem) dl_list_insert_head((pList), (pItem)) +/* + * dl_list_remove - remove pDel from list + */ +static inline PDL_LIST dl_list_remove(PDL_LIST pDel) +{ + if (pDel->pNext != NULL) + pDel->pNext->pPrev = pDel->pPrev; + if (pDel->pPrev != NULL) + pDel->pPrev->pNext = pDel->pNext; + /* point back to itself just to be safe, if remove is called again */ + pDel->pNext = pDel; + pDel->pPrev = pDel; + return pDel; +} + +/* + * dl_list_remove_item_from_head - get a list item from the head + */ +static inline PDL_LIST dl_list_remove_item_from_head(PDL_LIST pList) +{ + PDL_LIST pItem = NULL; + + if (pList->pNext != pList) { + pItem = pList->pNext; + /* remove the first item from head */ + dl_list_remove(pItem); + } + return pItem; +} + +static inline PDL_LIST dl_list_remove_item_from_tail(PDL_LIST pList) +{ + PDL_LIST pItem = NULL; + + if (pList->pPrev != pList) { + pItem = pList->pPrev; + /* remove the item from tail */ + dl_list_remove(pItem); + } + return pItem; +} + +/* transfer src list items to the tail of the destination list */ +static inline void dl_list_transfer_items_to_tail(PDL_LIST pDest, PDL_LIST pSrc) +{ + /* only concatenate if src is not empty */ + if (!DL_LIST_IS_EMPTY(pSrc)) { + /* cut out circular list in src and re-attach to end of dest */ + pSrc->pPrev->pNext = pDest; + pSrc->pNext->pPrev = pDest->pPrev; + pDest->pPrev->pNext = pSrc->pNext; + pDest->pPrev = pSrc->pPrev; + /* terminate src list, it is now empty */ + pSrc->pPrev = pSrc; + pSrc->pNext = pSrc; + } +} + +/* transfer src list items to the head of the destination list */ +static inline void dl_list_transfer_items_to_head(PDL_LIST pDest, PDL_LIST pSrc) +{ + /* only concatenate if src is not empty */ + if (!DL_LIST_IS_EMPTY(pSrc)) { + /* cut out circular list in src and reattach to start of dest */ + pSrc->pNext->pPrev = pDest; + pDest->pNext->pPrev = pSrc->pPrev; + pSrc->pPrev->pNext = pDest->pNext; + pDest->pNext = pSrc->pNext; + /* terminate src list, it is now empty */ + pSrc->pPrev = pSrc; + pSrc->pNext = pSrc; + } +} + +#endif /* __DL_LIST_H___ */ diff --git a/drivers/staging/qca-wifi-host-cmn/htc/htc.c b/drivers/staging/qca-wifi-host-cmn/htc/htc.c new file mode 100644 index 0000000000000000000000000000000000000000..e4d0c92e03febc339b12164c4d4e292874db8be1 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/htc/htc.c @@ -0,0 +1,1118 @@ +/* + * Copyright (c) 2013-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "htc_debug.h" +#include "htc_internal.h" +#include "htc_credit_history.h" +#include +#include /* qdf_nbuf_t */ +#include /* qdf_print */ + +#define MAX_HTC_RX_BUNDLE 2 + +#if defined(WLAN_DEBUG) || defined(DEBUG) +static ATH_DEBUG_MASK_DESCRIPTION g_htc_debug_description[] = { + {ATH_DEBUG_SEND, "Send"}, + {ATH_DEBUG_RECV, "Recv"}, + {ATH_DEBUG_SYNC, "Sync"}, + {ATH_DEBUG_DUMP, "Dump Data (RX or TX)"}, + {ATH_DEBUG_SETUP, "Setup"}, +}; + +ATH_DEBUG_INSTANTIATE_MODULE_VAR(htc, + "htc", + "Host Target Communications", + ATH_DEBUG_MASK_DEFAULTS | ATH_DEBUG_INFO | + ATH_DEBUG_SETUP, + ATH_DEBUG_DESCRIPTION_COUNT + (g_htc_debug_description), + g_htc_debug_description); + +#endif + +#if (defined(CONFIG_MCL) || (QCA_WIFI_QCA8074)) +static const uint32_t svc_id[] = {WMI_CONTROL_SVC, WMI_CONTROL_SVC_WMAC1, + WMI_CONTROL_SVC_WMAC2}; +#else +static const uint32_t svc_id[] = {WMI_CONTROL_SVC}; +#endif + +extern unsigned int htc_credit_flow; + +static void reset_endpoint_states(HTC_TARGET *target); + +static void destroy_htc_tx_ctrl_packet(HTC_PACKET *pPacket) +{ + qdf_nbuf_t netbuf; + + netbuf = (qdf_nbuf_t) GET_HTC_PACKET_NET_BUF_CONTEXT(pPacket); + AR_DEBUG_PRINTF(ATH_DEBUG_TRC, ("free ctrl netbuf :0x%pK\n", netbuf)); + if (netbuf != NULL) + qdf_nbuf_free(netbuf); + qdf_mem_free(pPacket); +} + +static HTC_PACKET *build_htc_tx_ctrl_packet(qdf_device_t osdev) +{ + HTC_PACKET *pPacket = NULL; + qdf_nbuf_t netbuf; + + do { + pPacket = (HTC_PACKET *) qdf_mem_malloc(sizeof(HTC_PACKET)); + if (pPacket == NULL) + break; + netbuf = qdf_nbuf_alloc(osdev, HTC_CONTROL_BUFFER_SIZE, + 20, 4, true); + if (NULL == netbuf) { + qdf_mem_free(pPacket); + pPacket = NULL; + qdf_print("%s: nbuf alloc failed\n", __func__); + break; + } + AR_DEBUG_PRINTF(ATH_DEBUG_TRC, + ("alloc ctrl netbuf :0x%pK\n", netbuf)); + SET_HTC_PACKET_NET_BUF_CONTEXT(pPacket, netbuf); + } while (false); + + return pPacket; +} + +void htc_free_control_tx_packet(HTC_TARGET *target, HTC_PACKET *pPacket) +{ + +#ifdef TODO_FIXME + LOCK_HTC(target); + HTC_PACKET_ENQUEUE(&target->ControlBufferTXFreeList, pPacket); + UNLOCK_HTC(target); + /* TODO_FIXME netbufs cannot be RESET! */ +#else + destroy_htc_tx_ctrl_packet(pPacket); +#endif + +} + +HTC_PACKET *htc_alloc_control_tx_packet(HTC_TARGET *target) +{ +#ifdef TODO_FIXME + HTC_PACKET *pPacket; + + LOCK_HTC(target); + pPacket = htc_packet_dequeue(&target->ControlBufferTXFreeList); + UNLOCK_HTC(target); + + return pPacket; +#else + return build_htc_tx_ctrl_packet(target->osdev); +#endif +} + +/* Set the target failure handling callback */ +void htc_set_target_failure_callback(HTC_HANDLE HTCHandle, + HTC_TARGET_FAILURE Callback) +{ + HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(HTCHandle); + + target->HTCInitInfo.TargetFailure = Callback; +} + +void htc_dump(HTC_HANDLE HTCHandle, uint8_t CmdId, bool start) +{ + HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(HTCHandle); + + hif_dump(target->hif_dev, CmdId, start); +} + +/* cleanup the HTC instance */ +static void htc_cleanup(HTC_TARGET *target) +{ + HTC_PACKET *pPacket; + int i; + HTC_ENDPOINT *endpoint; + HTC_PACKET_QUEUE *pkt_queue; + qdf_nbuf_t netbuf; + + if (target->hif_dev != NULL) { + hif_detach_htc(target->hif_dev); + hif_mask_interrupt_call(target->hif_dev); + target->hif_dev = NULL; + } + + while (true) { + pPacket = allocate_htc_packet_container(target); + if (pPacket == NULL) + break; + qdf_mem_free(pPacket); + } + + LOCK_HTC_TX(target); + pPacket = target->pBundleFreeList; + target->pBundleFreeList = NULL; + UNLOCK_HTC_TX(target); + while (pPacket) { + HTC_PACKET *pPacketTmp = (HTC_PACKET *) pPacket->ListLink.pNext; + netbuf = GET_HTC_PACKET_NET_BUF_CONTEXT(pPacket); + if (netbuf) + qdf_nbuf_free(netbuf); + pkt_queue = pPacket->pContext; + if (pkt_queue) + qdf_mem_free(pkt_queue); + qdf_mem_free(pPacket); + pPacket = pPacketTmp; + } + +#ifdef TODO_FIXME + while (true) { + pPacket = htc_alloc_control_tx_packet(target); + if (pPacket == NULL) + break; + netbuf = (qdf_nbuf_t) GET_HTC_PACKET_NET_BUF_CONTEXT(pPacket); + if (netbuf != NULL) + qdf_nbuf_free(netbuf); + qdf_mem_free(pPacket); + } +#endif + + htc_flush_endpoint_txlookupQ(target, ENDPOINT_0, true); + + qdf_spinlock_destroy(&target->HTCLock); + qdf_spinlock_destroy(&target->HTCRxLock); + qdf_spinlock_destroy(&target->HTCTxLock); + qdf_spinlock_destroy(&target->HTCCreditLock); + for (i = 0; i < ENDPOINT_MAX; i++) { + endpoint = &target->endpoint[i]; + qdf_spinlock_destroy(&endpoint->lookup_queue_lock); + } + + /* free our instance */ + qdf_mem_free(target); +} + +#ifdef FEATURE_RUNTIME_PM +/** + * htc_runtime_pm_init(): runtime pm related intialization + * + * need to initialize a work item. + */ +static void htc_runtime_pm_init(HTC_TARGET *target) +{ + qdf_create_work(0, &target->queue_kicker, htc_kick_queues, target); +} + +/** + * htc_runtime_suspend() - runtime suspend HTC + * + * @htc_ctx: HTC context pointer + * + * This is a dummy function for symmetry. + * + * Return: 0 for success + */ +int htc_runtime_suspend(HTC_HANDLE htc_ctx) +{ + return 0; +} + +/** + * htc_runtime_resume(): resume htc + * + * The htc message queue needs to be kicked off after + * a runtime resume. Otherwise messages would get stuck. + * + * @htc_ctx: HTC context pointer + * + * Return: 0 for success; + */ +int htc_runtime_resume(HTC_HANDLE htc_ctx) +{ + HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(htc_ctx); + + if (target == NULL) + return 0; + + qdf_sched_work(0, &target->queue_kicker); + return 0; +} +#else +static inline void htc_runtime_pm_init(HTC_TARGET *target) { } +#endif + +/* registered target arrival callback from the HIF layer */ +HTC_HANDLE htc_create(void *ol_sc, struct htc_init_info *pInfo, + qdf_device_t osdev, uint32_t con_mode) +{ + struct hif_msg_callbacks htcCallbacks; + HTC_ENDPOINT *pEndpoint = NULL; + HTC_TARGET *target = NULL; + int i; + + if (ol_sc == NULL) { + HTC_ERROR("%s: ol_sc = NULL", __func__); + return NULL; + } + HTC_TRACE("+htc_create .. HIF :%pK", ol_sc); + + A_REGISTER_MODULE_DEBUG_INFO(htc); + + target = (HTC_TARGET *) qdf_mem_malloc(sizeof(HTC_TARGET)); + if (target == NULL) { + HTC_ERROR("%s: Unable to allocate memory", __func__); + return NULL; + } + + htc_runtime_pm_init(target); + htc_credit_history_init(); + qdf_spinlock_create(&target->HTCLock); + qdf_spinlock_create(&target->HTCRxLock); + qdf_spinlock_create(&target->HTCTxLock); + for (i = 0; i < ENDPOINT_MAX; i++) { + pEndpoint = &target->endpoint[i]; + qdf_spinlock_create(&pEndpoint->lookup_queue_lock); + } + target->is_nodrop_pkt = false; + target->htc_hdr_length_check = false; + target->wmi_ep_count = 1; + + do { + qdf_mem_copy(&target->HTCInitInfo, pInfo, + sizeof(struct htc_init_info)); + target->host_handle = pInfo->pContext; + target->osdev = osdev; + target->con_mode = con_mode; + + reset_endpoint_states(target); + + INIT_HTC_PACKET_QUEUE(&target->ControlBufferTXFreeList); + + for (i = 0; i < HTC_PACKET_CONTAINER_ALLOCATION; i++) { + HTC_PACKET *pPacket = (HTC_PACKET *) + qdf_mem_malloc(sizeof(HTC_PACKET)); + if (pPacket != NULL) + free_htc_packet_container(target, pPacket); + } + +#ifdef TODO_FIXME + for (i = 0; i < NUM_CONTROL_TX_BUFFERS; i++) { + pPacket = build_htc_tx_ctrl_packet(); + if (pPacket == NULL) + break; + htc_free_control_tx_packet(target, pPacket); + } +#endif + + /* setup HIF layer callbacks */ + qdf_mem_zero(&htcCallbacks, sizeof(struct hif_msg_callbacks)); + htcCallbacks.Context = target; + htcCallbacks.rxCompletionHandler = htc_rx_completion_handler; + htcCallbacks.txCompletionHandler = htc_tx_completion_handler; + htcCallbacks.txResourceAvailHandler = + htc_tx_resource_avail_handler; + htcCallbacks.fwEventHandler = htc_fw_event_handler; + target->hif_dev = ol_sc; + + /* Get HIF default pipe for HTC message exchange */ + pEndpoint = &target->endpoint[ENDPOINT_0]; + + hif_post_init(target->hif_dev, target, &htcCallbacks); + hif_get_default_pipe(target->hif_dev, &pEndpoint->UL_PipeID, + &pEndpoint->DL_PipeID); + hif_set_initial_wakeup_cb(target->hif_dev, + pInfo->target_initial_wakeup_cb, + pInfo->target_psoc); + + } while (false); + + htc_recv_init(target); + + HTC_TRACE("-htc_create: (0x%pK)", target); + + return (HTC_HANDLE) target; +} + +void htc_destroy(HTC_HANDLE HTCHandle) +{ + HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(HTCHandle); + + AR_DEBUG_PRINTF(ATH_DEBUG_TRC, + ("+htc_destroy .. Destroying :0x%pK\n", target)); + hif_stop(htc_get_hif_device(HTCHandle)); + if (target) + htc_cleanup(target); + AR_DEBUG_PRINTF(ATH_DEBUG_TRC, ("-htc_destroy\n")); +} + +/* get the low level HIF device for the caller , the caller may wish to do low + * level HIF requests + */ +void *htc_get_hif_device(HTC_HANDLE HTCHandle) +{ + HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(HTCHandle); + + return target->hif_dev; +} + +static void htc_control_tx_complete(void *Context, HTC_PACKET *pPacket) +{ + HTC_TARGET *target = (HTC_TARGET *) Context; + + AR_DEBUG_PRINTF(ATH_DEBUG_TRC, + ("+-htc_control_tx_complete 0x%pK (l:%d)\n", pPacket, + pPacket->ActualLength)); + htc_free_control_tx_packet(target, pPacket); +} + +/* TODO, this is just a temporary max packet size */ +#define MAX_MESSAGE_SIZE 1536 + +/** + * htc_setup_epping_credit_allocation() - allocate credits/HTC buffers to WMI + * @scn: pointer to hif_opaque_softc + * @pEntry: pointer to tx credit allocation entry + * @credits: number of credits + * + * Return: None + */ +static void +htc_setup_epping_credit_allocation(struct hif_opaque_softc *scn, + struct htc_service_tx_credit_allocation *pEntry, + int credits) +{ + switch (hif_get_bus_type(scn)) { + case QDF_BUS_TYPE_PCI: + pEntry++; + pEntry->service_id = WMI_DATA_BE_SVC; + pEntry->CreditAllocation = (credits >> 1); + + pEntry++; + pEntry->service_id = WMI_DATA_BK_SVC; + pEntry->CreditAllocation = (credits >> 1); + break; + case QDF_BUS_TYPE_SDIO: + pEntry++; + pEntry->service_id = WMI_DATA_BE_SVC; + pEntry->CreditAllocation = credits; + break; + default: + break; + } +} + +/** + * htc_setup_target_buffer_assignments() - setup target buffer assignments + * @target: HTC Target Pointer + * + * Return: A_STATUS + */ +static +A_STATUS htc_setup_target_buffer_assignments(HTC_TARGET *target) +{ + struct htc_service_tx_credit_allocation *pEntry; + A_STATUS status; + int credits; + int creditsPerMaxMsg; + + creditsPerMaxMsg = MAX_MESSAGE_SIZE / target->TargetCreditSize; + if (MAX_MESSAGE_SIZE % target->TargetCreditSize) + creditsPerMaxMsg++; + + /* TODO, this should be configured by the caller! */ + + credits = target->TotalTransmitCredits; + pEntry = &target->ServiceTxAllocTable[0]; + + status = A_OK; + /* + * Allocate all credists/HTC buffers to WMI. + * no buffers are used/required for data. data always + * remains on host. + */ + if (HTC_IS_EPPING_ENABLED(target->con_mode)) { + pEntry++; + pEntry->service_id = WMI_CONTROL_SVC; + pEntry->CreditAllocation = credits; + /* endpoint ping is a testing tool directly on top of HTC in + * both target and host sides. + * In target side, the endppint ping fw has no wlan stack and + * FW mboxping app directly sits on HTC and it simply drops + * or loops back TX packets. For rx perf, FW mboxping app + * generates packets and passes packets to HTC to send to host. + * There is no WMI message exchanges between host and target + * in endpoint ping case. + * In host side, the endpoint ping driver is a Ethernet driver + * and it directly sits on HTC. Only HIF, HTC, QDF, ADF are + * used by the endpoint ping driver. There is no wifi stack + * at all in host side also. For tx perf use case, + * the user space mboxping app sends the raw packets to endpoint + * ping driver and it directly forwards to HTC for transmission + * to stress the bus. For the rx perf, HTC passes the received + * packets to endpoint ping driver and it is passed to the user + * space through the Ethernet interface. + * For credit allocation, in SDIO bus case, only BE service is + * used for tx/rx perf testing so that all credits are given + * to BE service. In PCIe and USB bus case, endpoint ping uses + * both BE and BK services to stress the bus so that the total + * credits are equally distributed to BE and BK services. + */ + + htc_setup_epping_credit_allocation(target->hif_dev, + pEntry, credits); + } else { + int i; + uint32_t max_wmi_svc = (sizeof(svc_id) / sizeof(uint32_t)); + + if ((target->wmi_ep_count == 0) || + (target->wmi_ep_count > max_wmi_svc)) + return A_ERROR; + + /* + * Divide credit among number of endpoints for WMI + */ + credits = credits / target->wmi_ep_count; + for (i = 0; i < target->wmi_ep_count; i++) { + status = A_OK; + pEntry++; + pEntry->service_id = svc_id[i]; + pEntry->CreditAllocation = credits; + } + } + + if (A_SUCCESS(status)) { + int i; + + for (i = 0; i < HTC_MAX_SERVICE_ALLOC_ENTRIES; i++) { + if (target->ServiceTxAllocTable[i].service_id != 0) { + AR_DEBUG_PRINTF(ATH_DEBUG_INIT, + ("SVS Index : %d TX : 0x%2.2X : alloc:%d", + i, + target->ServiceTxAllocTable[i]. + service_id, + target->ServiceTxAllocTable[i]. + CreditAllocation)); + } + } + } + + return status; +} + +uint8_t htc_get_credit_allocation(HTC_TARGET *target, uint16_t service_id) +{ + uint8_t allocation = 0; + int i; + + for (i = 0; i < HTC_MAX_SERVICE_ALLOC_ENTRIES; i++) { + if (target->ServiceTxAllocTable[i].service_id == service_id) { + allocation = + target->ServiceTxAllocTable[i].CreditAllocation; + } + } + + if (0 == allocation) { + AR_DEBUG_PRINTF(ATH_DEBUG_RSVD1, + ("HTC Service TX : 0x%2.2X : allocation is zero!\n", + service_id)); + } + + return allocation; +} + +QDF_STATUS htc_wait_target(HTC_HANDLE HTCHandle) +{ + QDF_STATUS status = QDF_STATUS_SUCCESS; + HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(HTCHandle); + HTC_READY_EX_MSG *pReadyMsg; + struct htc_service_connect_req connect; + struct htc_service_connect_resp resp; + HTC_READY_MSG *rdy_msg; + uint16_t htc_rdy_msg_id; + uint8_t i = 0; + HTC_PACKET *rx_bundle_packet, *temp_bundle_packet; + + AR_DEBUG_PRINTF(ATH_DEBUG_TRC, + ("htc_wait_target - Enter (target:0x%pK)\n", HTCHandle)); + AR_DEBUG_PRINTF(ATH_DEBUG_RSVD1, ("+HWT\n")); + + do { + + status = hif_start(target->hif_dev); + if (QDF_IS_STATUS_ERROR(status)) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, + ("hif_start failed\n")); + break; + } + + status = htc_wait_recv_ctrl_message(target); + + if (QDF_IS_STATUS_ERROR(status)) + break; + + if (target->CtrlResponseLength < (sizeof(HTC_READY_EX_MSG))) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("Invalid HTC Ready Msg Len:%d!\n", + target->CtrlResponseLength)); + status = QDF_STATUS_E_BADMSG; + break; + } + + pReadyMsg = (HTC_READY_EX_MSG *) target->CtrlResponseBuffer; + + rdy_msg = &pReadyMsg->Version2_0_Info; + htc_rdy_msg_id = + HTC_GET_FIELD(rdy_msg, HTC_READY_MSG, MESSAGEID); + if (htc_rdy_msg_id != HTC_MSG_READY_ID) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("Invalid HTC Ready Msg : 0x%X!\n", + htc_rdy_msg_id)); + status = QDF_STATUS_E_BADMSG; + break; + } + + target->TotalTransmitCredits = + HTC_GET_FIELD(rdy_msg, HTC_READY_MSG, CREDITCOUNT); + target->TargetCreditSize = + (int)HTC_GET_FIELD(rdy_msg, HTC_READY_MSG, CREDITSIZE); + target->MaxMsgsPerHTCBundle = + (uint8_t) pReadyMsg->MaxMsgsPerHTCBundle; + UPDATE_ALT_CREDIT(target, pReadyMsg->AltDataCreditSize); + /* for old fw this value is set to 0. But the minimum value + * should be 1, i.e., no bundling + */ + if (target->MaxMsgsPerHTCBundle < 1) + target->MaxMsgsPerHTCBundle = 1; + + AR_DEBUG_PRINTF(ATH_DEBUG_INIT, + ("Target Ready! TX resource : %d size:%d, MaxMsgsPerHTCBundle = %d", + target->TotalTransmitCredits, + target->TargetCreditSize, + target->MaxMsgsPerHTCBundle)); + + if ((0 == target->TotalTransmitCredits) + || (0 == target->TargetCreditSize)) { + status = QDF_STATUS_E_ABORTED; + break; + } + + /* Allocate expected number of RX bundle buffer allocation */ + if (HTC_RX_BUNDLE_ENABLED(target)) { + temp_bundle_packet = NULL; + for (i = 0; i < MAX_HTC_RX_BUNDLE; i++) { + rx_bundle_packet = + allocate_htc_bundle_packet(target); + if (rx_bundle_packet != NULL) + rx_bundle_packet->ListLink.pNext = + (DL_LIST *)temp_bundle_packet; + else + break; + + temp_bundle_packet = rx_bundle_packet; + } + LOCK_HTC_TX(target); + target->pBundleFreeList = temp_bundle_packet; + UNLOCK_HTC_TX(target); + } + + /* done processing */ + target->CtrlResponseProcessing = false; + + htc_setup_target_buffer_assignments(target); + + /* setup our pseudo HTC control endpoint connection */ + qdf_mem_zero(&connect, sizeof(connect)); + qdf_mem_zero(&resp, sizeof(resp)); + connect.EpCallbacks.pContext = target; + connect.EpCallbacks.EpTxComplete = htc_control_tx_complete; + connect.EpCallbacks.EpRecv = htc_control_rx_complete; + connect.MaxSendQueueDepth = NUM_CONTROL_TX_BUFFERS; + connect.service_id = HTC_CTRL_RSVD_SVC; + + /* connect fake service */ + status = htc_connect_service((HTC_HANDLE) target, + &connect, &resp); + + } while (false); + + AR_DEBUG_PRINTF(ATH_DEBUG_TRC, ("htc_wait_target - Exit (%d)\n", + status)); + AR_DEBUG_PRINTF(ATH_DEBUG_RSVD1, ("-HWT\n")); + return status; +} + +/* start HTC, this is called after all services are connected */ +static A_STATUS htc_config_target_hif_pipe(HTC_TARGET *target) +{ + + return A_OK; +} + +static void reset_endpoint_states(HTC_TARGET *target) +{ + HTC_ENDPOINT *pEndpoint; + int i; + + for (i = ENDPOINT_0; i < ENDPOINT_MAX; i++) { + pEndpoint = &target->endpoint[i]; + pEndpoint->service_id = 0; + pEndpoint->MaxMsgLength = 0; + pEndpoint->MaxTxQueueDepth = 0; + pEndpoint->Id = i; + INIT_HTC_PACKET_QUEUE(&pEndpoint->TxQueue); + INIT_HTC_PACKET_QUEUE(&pEndpoint->TxLookupQueue); + INIT_HTC_PACKET_QUEUE(&pEndpoint->RxBufferHoldQueue); + pEndpoint->target = target; + pEndpoint->TxCreditFlowEnabled = (bool)htc_credit_flow; + qdf_atomic_init(&pEndpoint->TxProcessCount); + } +} + +/** + * htc_start() - Main HTC function to trigger HTC start + * @HTCHandle: pointer to HTC handle + * + * Return: QDF_STATUS_SUCCESS for success or an appropriate QDF_STATUS error + */ +QDF_STATUS htc_start(HTC_HANDLE HTCHandle) +{ + qdf_nbuf_t netbuf; + QDF_STATUS status = QDF_STATUS_SUCCESS; + HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(HTCHandle); + HTC_SETUP_COMPLETE_EX_MSG *pSetupComp; + HTC_PACKET *pSendPacket; + + AR_DEBUG_PRINTF(ATH_DEBUG_TRC, ("htc_start Enter\n")); + + do { + + htc_config_target_hif_pipe(target); + + /* allocate a buffer to send */ + pSendPacket = htc_alloc_control_tx_packet(target); + if (NULL == pSendPacket) { + AR_DEBUG_ASSERT(false); + qdf_print("%s: allocControlTxPacket failed\n", + __func__); + status = QDF_STATUS_E_NOMEM; + break; + } + + netbuf = + (qdf_nbuf_t) GET_HTC_PACKET_NET_BUF_CONTEXT(pSendPacket); + /* assemble setup complete message */ + qdf_nbuf_put_tail(netbuf, sizeof(HTC_SETUP_COMPLETE_EX_MSG)); + pSetupComp = + (HTC_SETUP_COMPLETE_EX_MSG *) qdf_nbuf_data(netbuf); + qdf_mem_zero(pSetupComp, sizeof(HTC_SETUP_COMPLETE_EX_MSG)); + + HTC_SET_FIELD(pSetupComp, HTC_SETUP_COMPLETE_EX_MSG, + MESSAGEID, HTC_MSG_SETUP_COMPLETE_EX_ID); + + if (!htc_credit_flow) { + AR_DEBUG_PRINTF(ATH_DEBUG_TRC, + ("HTC will not use TX credit flow control")); + pSetupComp->SetupFlags |= + HTC_SETUP_COMPLETE_FLAGS_DISABLE_TX_CREDIT_FLOW; + } else { + AR_DEBUG_PRINTF(ATH_DEBUG_TRC, + ("HTC using TX credit flow control")); + } + + if ((hif_get_bus_type(target->hif_dev) == QDF_BUS_TYPE_SDIO) || + (hif_get_bus_type(target->hif_dev) == + QDF_BUS_TYPE_USB)) { + if (HTC_RX_BUNDLE_ENABLED(target)) + pSetupComp->SetupFlags |= + HTC_SETUP_COMPLETE_FLAGS_ENABLE_BUNDLE_RECV; + hif_set_bundle_mode(target->hif_dev, true, + HTC_MAX_MSG_PER_BUNDLE_RX); + } + + SET_HTC_PACKET_INFO_TX(pSendPacket, + NULL, + (uint8_t *) pSetupComp, + sizeof(HTC_SETUP_COMPLETE_EX_MSG), + ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG); + + status = htc_send_pkt((HTC_HANDLE) target, pSendPacket); + if (QDF_IS_STATUS_ERROR(status)) + break; + } while (false); + + AR_DEBUG_PRINTF(ATH_DEBUG_TRC, ("htc_start Exit\n")); + return status; +} + +/*flush all queued buffers for surpriseremove case*/ +void htc_flush_surprise_remove(HTC_HANDLE HTCHandle) +{ + HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(HTCHandle); + int i; + HTC_ENDPOINT *pEndpoint; +#ifdef RX_SG_SUPPORT + qdf_nbuf_t netbuf; + qdf_nbuf_queue_t *rx_sg_queue = &target->RxSgQueue; +#endif + + AR_DEBUG_PRINTF(ATH_DEBUG_TRC, ("+htc_flush_surprise_remove\n")); + + /* cleanup endpoints */ + for (i = 0; i < ENDPOINT_MAX; i++) { + pEndpoint = &target->endpoint[i]; + htc_flush_rx_hold_queue(target, pEndpoint); + htc_flush_endpoint_tx(target, pEndpoint, HTC_TX_PACKET_TAG_ALL); + } + + hif_flush_surprise_remove(target->hif_dev); + +#ifdef RX_SG_SUPPORT + LOCK_HTC_RX(target); + while ((netbuf = qdf_nbuf_queue_remove(rx_sg_queue)) != NULL) + qdf_nbuf_free(netbuf); + RESET_RX_SG_CONFIG(target); + UNLOCK_HTC_RX(target); +#endif + + reset_endpoint_states(target); + + AR_DEBUG_PRINTF(ATH_DEBUG_TRC, ("-htc_flush_surprise_remove\n")); +} + +/* stop HTC communications, i.e. stop interrupt reception, and flush all queued + * buffers + */ +void htc_stop(HTC_HANDLE HTCHandle) +{ + HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(HTCHandle); + int i; + HTC_ENDPOINT *pEndpoint; +#ifdef RX_SG_SUPPORT + qdf_nbuf_t netbuf; + qdf_nbuf_queue_t *rx_sg_queue = &target->RxSgQueue; +#endif + + AR_DEBUG_PRINTF(ATH_DEBUG_TRC, ("+htc_stop\n")); + + HTC_INFO("%s: endpoints cleanup\n", __func__); + /* cleanup endpoints */ + for (i = 0; i < ENDPOINT_MAX; i++) { + pEndpoint = &target->endpoint[i]; + htc_flush_rx_hold_queue(target, pEndpoint); + htc_flush_endpoint_tx(target, pEndpoint, HTC_TX_PACKET_TAG_ALL); + if (pEndpoint->ul_is_polled) { + qdf_timer_stop(&pEndpoint->ul_poll_timer); + qdf_timer_free(&pEndpoint->ul_poll_timer); + } + } + + /* Note: htc_flush_endpoint_tx for all endpoints should be called before + * hif_stop - otherwise htc_tx_completion_handler called from + * hif_send_buffer_cleanup_on_pipe for residual tx frames in HIF layer, + * might queue the packet again to HIF Layer - which could cause tx + * buffer leak + */ + + HTC_INFO("%s: stopping hif layer\n", __func__); + hif_stop(target->hif_dev); + +#ifdef RX_SG_SUPPORT + LOCK_HTC_RX(target); + while ((netbuf = qdf_nbuf_queue_remove(rx_sg_queue)) != NULL) + qdf_nbuf_free(netbuf); + RESET_RX_SG_CONFIG(target); + UNLOCK_HTC_RX(target); +#endif + + /** + * In SSR case, HTC tx completion callback for wmi will be blocked + * by TARGET_STATUS_RESET and HTC packets will be left unfreed on + * lookup queue. + */ + HTC_INFO("%s: flush endpoints Tx lookup queue\n", __func__); + for (i = 0; i < ENDPOINT_MAX; i++) { + pEndpoint = &target->endpoint[i]; + if (pEndpoint->service_id == WMI_CONTROL_SVC) + htc_flush_endpoint_txlookupQ(target, i, false); + } + HTC_INFO("%s: resetting endpoints state\n", __func__); + + reset_endpoint_states(target); + + AR_DEBUG_PRINTF(ATH_DEBUG_TRC, ("-htc_stop\n")); +} + +void htc_dump_credit_states(HTC_HANDLE HTCHandle) +{ + HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(HTCHandle); + HTC_ENDPOINT *pEndpoint; + int i; + + for (i = 0; i < ENDPOINT_MAX; i++) { + pEndpoint = &target->endpoint[i]; + if (0 == pEndpoint->service_id) + continue; + + AR_DEBUG_PRINTF(ATH_DEBUG_ANY, + ("--- EP : %d service_id: 0x%X --------------\n", + pEndpoint->Id, pEndpoint->service_id)); + AR_DEBUG_PRINTF(ATH_DEBUG_ANY, + (" TxCredits : %d\n", + pEndpoint->TxCredits)); + AR_DEBUG_PRINTF(ATH_DEBUG_ANY, + (" TxCreditSize : %d\n", + pEndpoint->TxCreditSize)); + AR_DEBUG_PRINTF(ATH_DEBUG_ANY, + (" TxCreditsPerMaxMsg : %d\n", + pEndpoint->TxCreditsPerMaxMsg)); + AR_DEBUG_PRINTF(ATH_DEBUG_ANY, + (" TxQueueDepth : %d\n", + HTC_PACKET_QUEUE_DEPTH(&pEndpoint->TxQueue))); + AR_DEBUG_PRINTF(ATH_DEBUG_ANY, + ("----------------------------------------\n")); + } +} + +bool htc_get_endpoint_statistics(HTC_HANDLE HTCHandle, + HTC_ENDPOINT_ID Endpoint, + enum htc_endpoint_stat_action Action, + struct htc_endpoint_stats *pStats) +{ +#ifdef HTC_EP_STAT_PROFILING + HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(HTCHandle); + bool clearStats = false; + bool sample = false; + + switch (Action) { + case HTC_EP_STAT_SAMPLE: + sample = true; + break; + case HTC_EP_STAT_SAMPLE_AND_CLEAR: + sample = true; + clearStats = true; + break; + case HTC_EP_STAT_CLEAR: + clearStats = true; + break; + default: + break; + } + + A_ASSERT(Endpoint < ENDPOINT_MAX); + + /* lock out TX and RX while we sample and/or clear */ + LOCK_HTC_TX(target); + LOCK_HTC_RX(target); + + if (sample) { + A_ASSERT(pStats != NULL); + /* return the stats to the caller */ + qdf_mem_copy(pStats, &target->endpoint[Endpoint].endpoint_stats, + sizeof(struct htc_endpoint_stats)); + } + + if (clearStats) { + /* reset stats */ + qdf_mem_zero(&target->endpoint[Endpoint].endpoint_stats, + sizeof(struct htc_endpoint_stats)); + } + + UNLOCK_HTC_RX(target); + UNLOCK_HTC_TX(target); + + return true; +#else + return false; +#endif +} + +void *htc_get_targetdef(HTC_HANDLE htc_handle) +{ + HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(htc_handle); + + return hif_get_targetdef(target->hif_dev); +} + +#ifdef IPA_OFFLOAD +/** + * htc_ipa_get_ce_resource() - get uc resource on lower layer + * @htc_handle: htc context + * @ce_sr_base_paddr: copyengine source ring base physical address + * @ce_sr_ring_size: copyengine source ring size + * @ce_reg_paddr: copyengine register physical address + * + * Return: None + */ +void htc_ipa_get_ce_resource(HTC_HANDLE htc_handle, + qdf_shared_mem_t **ce_sr, + uint32_t *ce_sr_ring_size, + qdf_dma_addr_t *ce_reg_paddr) +{ + HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(htc_handle); + + if (target->hif_dev) + hif_ipa_get_ce_resource(target->hif_dev, + ce_sr, ce_sr_ring_size, ce_reg_paddr); +} +#endif /* IPA_OFFLOAD */ + +#if defined(DEBUG_HL_LOGGING) && defined(CONFIG_HL_SUPPORT) + +void htc_dump_bundle_stats(HTC_HANDLE HTCHandle) +{ + HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(HTCHandle); + int total, i; + + total = 0; + for (i = 0; i < HTC_MAX_MSG_PER_BUNDLE_RX; i++) + total += target->rx_bundle_stats[i]; + + if (total) { + AR_DEBUG_PRINTF(ATH_DEBUG_ANY, ("RX Bundle stats:\n")); + AR_DEBUG_PRINTF(ATH_DEBUG_ANY, ("Total RX packets: %d\n", + total)); + AR_DEBUG_PRINTF(ATH_DEBUG_ANY, ( + "Number of bundle: Number of packets\n")); + for (i = 0; i < HTC_MAX_MSG_PER_BUNDLE_RX; i++) + AR_DEBUG_PRINTF(ATH_DEBUG_ANY, + ("%10d:%10d(%2d%s)\n", (i+1), + target->rx_bundle_stats[i], + ((target->rx_bundle_stats[i]*100)/ + total), "%")); + } + + + total = 0; + for (i = 0; i < HTC_MAX_MSG_PER_BUNDLE_TX; i++) + total += target->tx_bundle_stats[i]; + + if (total) { + AR_DEBUG_PRINTF(ATH_DEBUG_ANY, ("TX Bundle stats:\n")); + AR_DEBUG_PRINTF(ATH_DEBUG_ANY, ("Total TX packets: %d\n", + total)); + AR_DEBUG_PRINTF(ATH_DEBUG_ANY, + ("Number of bundle: Number of packets\n")); + for (i = 0; i < HTC_MAX_MSG_PER_BUNDLE_TX; i++) + AR_DEBUG_PRINTF(ATH_DEBUG_ANY, + ("%10d:%10d(%2d%s)\n", (i+1), + target->tx_bundle_stats[i], + ((target->tx_bundle_stats[i]*100)/ + total), "%")); + } +} + +void htc_clear_bundle_stats(HTC_HANDLE HTCHandle) +{ + HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(HTCHandle); + + qdf_mem_zero(&target->rx_bundle_stats, sizeof(target->rx_bundle_stats)); + qdf_mem_zero(&target->tx_bundle_stats, sizeof(target->tx_bundle_stats)); +} +#endif + +/** + * htc_vote_link_down - API to vote for link down + * @htc_handle: HTC handle + * + * API for upper layers to call HIF to vote for link down + * + * Return: void + */ +void htc_vote_link_down(HTC_HANDLE htc_handle) +{ + HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(htc_handle); + + if (!target->hif_dev) + return; + + hif_vote_link_down(target->hif_dev); +} + +/** + * htc_vote_link_up - API to vote for link up + * @htc_handle: HTC Handle + * + * API for upper layers to call HIF to vote for link up + * + * Return: void + */ +void htc_vote_link_up(HTC_HANDLE htc_handle) +{ + HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(htc_handle); + + if (!target->hif_dev) + return; + + hif_vote_link_up(target->hif_dev); +} + +/** + * htc_can_suspend_link - API to query HIF for link status + * @htc_handle: HTC Handle + * + * API for upper layers to call HIF to query if the link can suspend + * + * Return: void + */ +bool htc_can_suspend_link(HTC_HANDLE htc_handle) +{ + HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(htc_handle); + + if (!target->hif_dev) + return false; + + return hif_can_suspend_link(target->hif_dev); +} + +#ifdef FEATURE_RUNTIME_PM +int htc_pm_runtime_get(HTC_HANDLE htc_handle) +{ + HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(htc_handle); + + return hif_pm_runtime_get(target->hif_dev); +} + +int htc_pm_runtime_put(HTC_HANDLE htc_handle) +{ + HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(htc_handle); + + return hif_pm_runtime_put(target->hif_dev); +} +#endif + +/** + * htc_set_wmi_endpoint_count: Set number of WMI endpoint + * @htc_handle: HTC handle + * @wmi_ep_count: WMI enpoint count + * + * return: None + */ +void htc_set_wmi_endpoint_count(HTC_HANDLE htc_handle, uint8_t wmi_ep_count) +{ + HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(htc_handle); + + target->wmi_ep_count = wmi_ep_count; +} + +/** + * htc_get_wmi_endpoint_count: Get number of WMI endpoint + * @htc_handle: HTC handle + * + * return: WMI enpoint count + */ +uint8_t htc_get_wmi_endpoint_count(HTC_HANDLE htc_handle) +{ + HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(htc_handle); + + return target->wmi_ep_count; +} diff --git a/drivers/staging/qca-wifi-host-cmn/htc/htc_api.h b/drivers/staging/qca-wifi-host-cmn/htc/htc_api.h new file mode 100644 index 0000000000000000000000000000000000000000..0950ba5d30c9848a55260ee4bb165b5bcc2d7b77 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/htc/htc_api.h @@ -0,0 +1,814 @@ +/* + * Copyright (c) 2013-2014, 2016-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _HTC_API_H_ +#define _HTC_API_H_ + +#include +#include +#include /* qdf_device_t */ +#include "htc_packet.h" + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +/* TODO.. for BMI */ +#define ENDPOINT1 0 +/* TODO -remove me, but we have to fix BMI first */ +#define HTC_MAILBOX_NUM_MAX 4 + +/* this is the amount of header room required by users of HTC */ +#define HTC_HEADER_LEN HTC_HDR_LENGTH + +#define HTC_HTT_TRANSFER_HDRSIZE 24 + +typedef void *HTC_HANDLE; + +typedef uint16_t HTC_SERVICE_ID; + +typedef void (*HTC_TARGET_FAILURE)(void *Instance, QDF_STATUS Status); + +struct htc_init_info { + void *pContext; /* context for target notifications */ + void (*TargetFailure)(void *Instance, QDF_STATUS Status); + void (*TargetSendSuspendComplete)(void *ctx, bool is_nack); + void (*target_initial_wakeup_cb)(void *cb_ctx); + void *target_psoc; +}; + +/* Struct for HTC layer packet stats*/ +struct ol_ath_htc_stats { + int htc_get_pkt_q_fail_count; + int htc_pkt_q_empty_count; + int htc_send_q_empty_count; +}; + +/* To resume HTT Tx queue during runtime resume */ +typedef void (*HTC_EP_RESUME_TX_QUEUE)(void *); + +/* per service connection send completion */ +typedef void (*HTC_EP_SEND_PKT_COMPLETE)(void *, HTC_PACKET *); +/* per service connection callback when a plurality of packets have been sent + * The HTC_PACKET_QUEUE is a temporary queue object (e.g. freed on return from + * the callback) to hold a list of completed send packets. + * If the handler cannot fully traverse the packet queue before returning, it + * should transfer the items of the queue into the caller's private queue using: + * HTC_PACKET_ENQUEUE() + */ +typedef void (*HTC_EP_SEND_PKT_COMP_MULTIPLE)(void *, + HTC_PACKET_QUEUE *); +/* per service connection pkt received */ +typedef void (*HTC_EP_RECV_PKT)(void *, HTC_PACKET *); +/* per service connection callback when a plurality of packets are received + * The HTC_PACKET_QUEUE is a temporary queue object (e.g. freed on return from + * the callback) to hold a list of recv packets. + * If the handler cannot fully traverse the packet queue before returning, it + * should transfer the items of the queue into the caller's private queue using: + * HTC_PACKET_ENQUEUE() + */ +typedef void (*HTC_EP_RECV_PKT_MULTIPLE)(void *, HTC_PACKET_QUEUE *); + +/* Optional per service connection receive buffer re-fill callback, + * On some OSes (like Linux) packets are allocated from a global pool and + * indicated up to the network stack. The driver never gets the packets back + * from the OS. For these OSes a refill callback can be used to allocate and + * re-queue buffers into HTC. + * + * On other OSes, the network stack can call into the driver's OS-specific + * "return_packet" handler and the driver can re-queue these buffers into HTC. + * In this regard a refill callback is unnecessary + */ +typedef void (*HTC_EP_RECV_REFILL)(void *, HTC_ENDPOINT_ID Endpoint); + +/* Optional per service connection receive buffer allocation callback. + * On some systems packet buffers are an extremely limited resource. Rather than + * queue largest-possible-sized buffers to HTC, some systems would rather + * allocate a specific size as the packet is received. The trade off is + * slightly more processing (callback invoked for each RX packet) + * for the benefit of committing fewer buffer resources into HTC. + * + * The callback is provided the length of the pending packet to fetch. This + * includes the HTC header length plus the length of payload. The callback can + * return a pointer to the allocated HTC packet for immediate use. + * + * Alternatively a variant of this handler can be used to allocate large receive + * packets as needed. For example an application can use the refill mechanism + * for normal packets and the recv-alloc mechanism to handle the case where a + * large packet buffer is required. This can significantly reduce the + * amount of "committed" memory used to receive packets. + */ +typedef HTC_PACKET *(*HTC_EP_RECV_ALLOC)(void *, + HTC_ENDPOINT_ID Endpoint, + int Length); + +enum htc_send_full_action { + /* packet that overflowed should be kept in the queue */ + HTC_SEND_FULL_KEEP = 0, + /* packet that overflowed should be dropped */ + HTC_SEND_FULL_DROP = 1, +}; + +/* Optional per service connection callback when a send queue is full. This can + * occur if host continues queueing up TX packets faster than credits can arrive + * To prevent the host (on some Oses like Linux) from continuously queueing pkts + * and consuming resources, this callback is provided so that that the host + * can disable TX in the subsystem (i.e. network stack). + * This callback is invoked for each packet that "overflows" the HTC queue. The + * callback can determine whether the new packet that overflowed the queue can + * be kept (HTC_SEND_FULL_KEEP) or dropped (HTC_SEND_FULL_DROP). If a packet is + * dropped, the EpTxComplete handler will be called and the packet's status + * field will be set to A_NO_RESOURCE. + * Other OSes require a "per-packet" indication for each completed TX packet, + * this closed loop mechanism will prevent the network stack from overunning the + * NIC. The packet to keep or drop is passed for inspection to the registered + * handler the handler must ONLY inspect the packet, it may not free or reclaim + * the packet. + */ +typedef enum htc_send_full_action (*HTC_EP_SEND_QUEUE_FULL)(void *, + HTC_PACKET *pPacket); + +struct htc_ep_callbacks { + /* context for each callback */ + void *pContext; + /* tx completion callback for connected endpoint */ + HTC_EP_SEND_PKT_COMPLETE EpTxComplete; + /* receive callback for connected endpoint */ + HTC_EP_RECV_PKT EpRecv; + /* OPTIONAL receive re-fill callback for connected endpoint */ + HTC_EP_RECV_REFILL EpRecvRefill; + /* OPTIONAL send full callback */ + HTC_EP_SEND_QUEUE_FULL EpSendFull; + /* OPTIONAL recv allocation callback */ + HTC_EP_RECV_ALLOC EpRecvAlloc; + /* OPTIONAL recv allocation callback based on a threshold */ + HTC_EP_RECV_ALLOC EpRecvAllocThresh; + /* OPTIONAL completion handler for multiple complete + * indications (EpTxComplete must be NULL) + */ + HTC_EP_SEND_PKT_COMP_MULTIPLE EpTxCompleteMultiple; + + HTC_EP_RESUME_TX_QUEUE ep_resume_tx_queue; + /* if EpRecvAllocThresh is non-NULL, HTC will compare the + * threshold value to the current recv packet length and invoke + * the EpRecvAllocThresh callback to acquire a packet buffer + */ + int RecvAllocThreshold; + /* if a EpRecvRefill handler is provided, this value + * can be used to set a trigger refill callback + * when the recv queue drops below this value + * if set to 0, the refill is only called when packets + * are empty + */ + int RecvRefillWaterMark; +}; + +/* service connection information */ +struct htc_service_connect_req { + /* service ID to connect to */ + HTC_SERVICE_ID service_id; + /* connection flags, see htc protocol definition */ + uint16_t ConnectionFlags; + /* ptr to optional service-specific meta-data */ + uint8_t *pMetaData; + /* optional meta data length */ + uint8_t MetaDataLength; + /* endpoint callbacks */ + struct htc_ep_callbacks EpCallbacks; + /* maximum depth of any send queue */ + int MaxSendQueueDepth; + /* HTC flags for the host-side (local) connection */ + uint32_t LocalConnectionFlags; + /* override max message size in send direction */ + unsigned int MaxSendMsgSize; +}; + +/* enable send bundle padding for this endpoint */ +#define HTC_LOCAL_CONN_FLAGS_ENABLE_SEND_BUNDLE_PADDING (1 << 0) + +/* service connection response information */ +struct htc_service_connect_resp { + /* caller supplied buffer to optional meta-data */ + uint8_t *pMetaData; + /* length of caller supplied buffer */ + uint8_t BufferLength; + /* actual length of meta data */ + uint8_t ActualLength; + /* endpoint to communicate over */ + HTC_ENDPOINT_ID Endpoint; + /* max length of all messages over this endpoint */ + unsigned int MaxMsgLength; + /* connect response code from target */ + uint8_t ConnectRespCode; +}; + +/* endpoint distribution structure */ +struct htc_endpoint_credit_dist { + struct _htc_endpoint_credit_dist *pNext; + struct _htc_endpoint_credit_dist *pPrev; + /* Service ID (set by HTC) */ + HTC_SERVICE_ID service_id; + /* endpoint for this distribution struct (set by HTC) */ + HTC_ENDPOINT_ID Endpoint; + /* distribution flags, distribution function can + * set default activity using SET_EP_ACTIVE() macro + */ + uint32_t DistFlags; + /* credits for normal operation, anything above this + * indicates the endpoint is over-subscribed, this field + * is only relevant to the credit distribution function + */ + int TxCreditsNorm; + /* floor for credit distribution, this field is + * only relevant to the credit distribution function + */ + int TxCreditsMin; + /* number of credits assigned to this EP, this field + * is only relevant to the credit dist function + */ + int TxCreditsAssigned; + /* current credits available, this field is used by + * HTC to determine whether a message can be sent or + * must be queued + */ + int TxCredits; + /* pending credits to distribute on this endpoint, this + * is set by HTC when credit reports arrive. + * The credit distribution functions sets this to zero + * when it distributes the credits + */ + int TxCreditsToDist; + /* this is the number of credits that the current pending TX + * packet needs to transmit. This is set by HTC when + * and endpoint needs credits in order to transmit + */ + int TxCreditsSeek; + /* size in bytes of each credit (set by HTC) */ + int TxCreditSize; + /* credits required for a maximum sized messages (set by HTC) */ + int TxCreditsPerMaxMsg; + /* reserved for HTC use */ + void *pHTCReserved; + /* current depth of TX queue , i.e. messages waiting for credits + * This field is valid only when HTC_CREDIT_DIST_ACTIVITY_CHANGE + * or HTC_CREDIT_DIST_SEND_COMPLETE is indicated on an endpoint + * that has non-zero credits to recover + */ + int TxQueueDepth; +}; + +#define HTC_EP_ACTIVE ((uint32_t) (1u << 31)) + +/* macro to check if an endpoint has gone active, useful for credit + * distributions */ +#define IS_EP_ACTIVE(epDist) ((epDist)->DistFlags & HTC_EP_ACTIVE) +#define SET_EP_ACTIVE(epDist) (epDist)->DistFlags |= HTC_EP_ACTIVE + +/* credit distibution code that is passed into the distrbution function, + * there are mandatory and optional codes that must be handled + */ +enum htc_credit_dist_reason { + /* credits available as a result of completed + * send operations (MANDATORY) resulting in credit reports + */ + HTC_CREDIT_DIST_SEND_COMPLETE = 0, + /* a change in endpoint activity occurred (OPTIONAL) */ + HTC_CREDIT_DIST_ACTIVITY_CHANGE = 1, + /* an endpoint needs to "seek" credits (OPTIONAL) */ + HTC_CREDIT_DIST_SEEK_CREDITS, + /* for debugging, dump any state information that is kept by + * the distribution function + */ + HTC_DUMP_CREDIT_STATE +}; + +typedef void (*HTC_CREDIT_DIST_CALLBACK)(void *Context, + struct htc_endpoint_credit_dist * + pEPList, + enum htc_credit_dist_reason + Reason); + +typedef void (*HTC_CREDIT_INIT_CALLBACK)(void *Context, + struct htc_endpoint_credit_dist * + pEPList, int TotalCredits); + +/* endpoint statistics action */ +enum htc_endpoint_stat_action { + /* only read statistics */ + HTC_EP_STAT_SAMPLE = 0, + /* sample and immediately clear statistics */ + HTC_EP_STAT_SAMPLE_AND_CLEAR = 1, + /* clear only */ + HTC_EP_STAT_CLEAR +}; + +/* endpoint statistics */ +struct htc_endpoint_stats { + /* number of TX packets posted to the endpoint */ + uint32_t TxPosted; + /* number of times the host set the credit-low flag in a send message on + * this endpoint + */ + uint32_t TxCreditLowIndications; + /* running count of total TX packets issued */ + uint32_t TxIssued; + /* running count of TX packets that were issued in bundles */ + uint32_t TxPacketsBundled; + /* running count of TX bundles that were issued */ + uint32_t TxBundles; + /* tx packets that were dropped */ + uint32_t TxDropped; + /* running count of total credit reports received for this endpoint */ + uint32_t TxCreditRpts; + /* credit reports received from this endpoint's RX packets */ + uint32_t TxCreditRptsFromRx; + /* credit reports received from RX packets of other endpoints */ + uint32_t TxCreditRptsFromOther; + /* credit reports received from endpoint 0 RX packets */ + uint32_t TxCreditRptsFromEp0; + /* count of credits received via Rx packets on this endpoint */ + uint32_t TxCreditsFromRx; + /* count of credits received via another endpoint */ + uint32_t TxCreditsFromOther; + /* count of credits received via another endpoint */ + uint32_t TxCreditsFromEp0; + /* count of consummed credits */ + uint32_t TxCreditsConsummed; + /* count of credits returned */ + uint32_t TxCreditsReturned; + /* count of RX packets received */ + uint32_t RxReceived; + /* count of lookahead records + * found in messages received on this endpoint + */ + uint32_t RxLookAheads; + /* count of recv packets received in a bundle */ + uint32_t RxPacketsBundled; + /* count of number of bundled lookaheads */ + uint32_t RxBundleLookAheads; + /* count of the number of bundle indications from the HTC header */ + uint32_t RxBundleIndFromHdr; + /* number of times the recv allocation threshold was hit */ + uint32_t RxAllocThreshHit; + /* total number of bytes */ + uint32_t RxAllocThreshBytes; +}; + +/* ------ Function Prototypes ------ */ +/** + * htc_create - Create an instance of HTC over the underlying HIF device + * @HifDevice: hif device handle, + * @pInfo: initialization information + * @osdev: QDF device structure + * @con_mode: driver connection mode + * + * Return: HTC_HANDLE on success, NULL on failure + */ +HTC_HANDLE htc_create(void *HifDevice, struct htc_init_info *pInfo, + qdf_device_t osdev, uint32_t con_mode); + +/** + * htc_get_hif_device - Get the underlying HIF device handle + * @HTCHandle: handle passed into the AddInstance callback + * + * Return: opaque HIF device handle usable in HIF API calls. + */ +void *htc_get_hif_device(HTC_HANDLE HTCHandle); + +/** + * htc_set_credit_distribution - Set credit distribution parameters + * @HTCHandle: HTC handle + * @pCreditDistCont: caller supplied context to pass into distribution functions + * @CreditDistFunc: Distribution function callback + * @CreditDistInit: Credit Distribution initialization callback + * @ServicePriorityOrder: Array containing list of service IDs, lowest index + * @is highestpriority: ListLength - number of elements in ServicePriorityOrder + * + * The user can set a custom credit distribution function to handle + * special requirementsfor each endpoint. A default credit distribution + * routine can be used by setting CreditInitFunc to NULL. The default + * credit distribution is only provided for simple "fair" credit distribution + * without regard to any prioritization. + * Return: None + */ +void htc_set_credit_distribution(HTC_HANDLE HTCHandle, + void *pCreditDistContext, + HTC_CREDIT_DIST_CALLBACK CreditDistFunc, + HTC_CREDIT_INIT_CALLBACK CreditInitFunc, + HTC_SERVICE_ID ServicePriorityOrder[], + int ListLength); + +/*+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + * Wait for the target to indicate the HTC layer is ready + * htc_wait_target + * @HTCHandle - HTC handle + * + * This API blocks until the target responds with an HTC ready message. + * The caller should not connect services until the target has indicated it is + * ready. + * Return: None + */ +QDF_STATUS htc_wait_target(HTC_HANDLE HTCHandle); + +/** + * htc_start - Start target service communications + * @HTCHandle - HTC handle + * + * This API indicates to the target that the service connection phase + * is completeand the target can freely start all connected services. This + * API should only be called AFTER all service connections have been made. + * TCStart will issue a SETUP_COMPLETE message to the target to indicate that + * all service connections have been made and the target can start + * communicating over the endpoints. + * Return: None + */ +QDF_STATUS htc_start(HTC_HANDLE HTCHandle); + +/** + * htc_connect_service - Connect to an HTC service + * @HTCHandle - HTC handle + * @pReq - connection details + * @pResp - connection response + * + * Service connections must be performed before htc_start. + * User provides callback handlersfor various endpoint events. + * Return: None + */ +QDF_STATUS htc_connect_service(HTC_HANDLE HTCHandle, + struct htc_service_connect_req *pReq, + struct htc_service_connect_resp *pResp); + +/** + * htc_dump - HTC register log dump + * @HTCHandle - HTC handle + * @CmdId - Log command + * @start - start/print logs + * + * Register logs will be started/printed/ be flushed. + * Return: None + */ +void htc_dump(HTC_HANDLE HTCHandle, uint8_t CmdId, bool start); + +/** + * htc_send_pkt - Send an HTC packet + * @HTCHandle - HTC handle + * @pPacket - packet to send + * + * Caller must initialize packet using SET_HTC_PACKET_INFO_TX() macro. + * This interface is fully asynchronous. On error, HTC SendPkt will + * call the registered Endpoint callback to cleanup the packet. + * Return: QDF_STATUS_SUCCESS + */ +QDF_STATUS htc_send_pkt(HTC_HANDLE HTCHandle, HTC_PACKET *pPacket); + +/** + * htc_send_data_pkt - Send an HTC packet containing a tx descriptor and data + * @HTCHandle - HTC handle + * @pPacket - packet to send + * + * Caller must initialize packet using SET_HTC_PACKET_INFO_TX() macro. + * Caller must provide headroom in an initial fragment added to the + * network buffer to store a HTC_FRAME_HDR. + * This interface is fully asynchronous. On error, htc_send_data_pkt will + * call the registered Endpoint EpDataTxComplete callback to cleanup + * the packet. + * Return: A_OK + */ +#ifdef ATH_11AC_TXCOMPACT +QDF_STATUS htc_send_data_pkt(HTC_HANDLE HTCHandle, qdf_nbuf_t netbuf, + int Epid, int ActualLength); +#else /*ATH_11AC_TXCOMPACT */ +QDF_STATUS htc_send_data_pkt(HTC_HANDLE HTCHandle, HTC_PACKET *pPacket, + uint8_t more_data); +#endif /*ATH_11AC_TXCOMPACT */ + +/** + * htc_flush_surprise_remove - Flush HTC when target is removed surprisely + * service communications + * @HTCHandle - HTC handle + * + * All receive and pending TX packets will be flushed. + * Return: None + */ +void htc_flush_surprise_remove(HTC_HANDLE HTCHandle); + +/** + * htc_stop - Stop HTC service communications + * @HTCHandle - HTC handle + * + * HTC communications is halted. All receive and pending TX packets + * will be flushed. + * Return: None + */ +void htc_stop(HTC_HANDLE HTCHandle); + +/** + * htc_destroy - Destroy HTC service + * @HTCHandle - HTC handle + * + * This cleans up all resources allocated by htc_create(). + * Return: None + */ +void htc_destroy(HTC_HANDLE HTCHandle); + +/** + * htc_flush_endpoint - Flush pending TX packets + * @HTCHandle - HTC handle + * @Endpoint - Endpoint to flush + * @Tag - flush tag + * + * The Tag parameter is used to selectively flush packets with matching + * tags. The value of 0 forces all packets to be flush regardless of tag + * Return: None + */ +void htc_flush_endpoint(HTC_HANDLE HTCHandle, HTC_ENDPOINT_ID Endpoint, + HTC_TX_TAG Tag); +/** + * htc_dump_credit_states - Dump credit distribution state + * @HTCHandle - HTC handle + * + * This dumps all credit distribution information to the debugger + * Return: None + */ +void htc_dump_credit_states(HTC_HANDLE HTCHandle); + +/** + * htc_indicate_activity_change - Indicate a traffic activity change on an + * endpoint + * @HTCHandle - HTC handle + * @Endpoint - endpoint in which activity has changed + * @Active - true if active, false if it has become inactive + * + * This triggers the registered credit distribution function to + * re-adjust credits for active/inactive endpoints. + * Return: None + */ +void htc_indicate_activity_change(HTC_HANDLE HTCHandle, + HTC_ENDPOINT_ID Endpoint, bool Active); + +/** + * htc_get_endpoint_statistics - Get endpoint statistics + * @HTCHandle - HTC handle + * @Endpoint - Endpoint identifier + * @Action - action to take with statistics + * @pStats - statistics that were sampled (can be NULL if Action is + * HTC_EP_STAT_CLEAR) + * + * Statistics is a compile-time option and this function may return + * false if HTC is not compiled with profiling. + * The caller can specify the statistic "action" to take when sampling + * the statistics. This includes : + * HTC_EP_STAT_SAMPLE : The pStats structure is filled with the current + * values. + * HTC_EP_STAT_SAMPLE_AND_CLEAR : The structure is filled and the current + * statisticsare cleared. + * HTC_EP_STAT_CLEA : the statistics are cleared, the called can pass + * a NULL value for pStats + * Return: true if statistics profiling is enabled, otherwise false. + */ +bool htc_get_endpoint_statistics(HTC_HANDLE HTCHandle, + HTC_ENDPOINT_ID Endpoint, + enum htc_endpoint_stat_action Action, + struct htc_endpoint_stats *pStats); + +/** + * htc_unblock_recv - Unblock HTC message reception + * @HTCHandle - HTC handle + * + * HTC will block the receiver if the EpRecvAlloc callback fails to provide a + * packet. The caller can use this API to indicate to HTC when resources + * (buffers) are available such that the receiver can be unblocked and HTC + * may re-attempt fetching the pending message. + * This API is not required if the user uses the EpRecvRefill callback or uses + * the HTCAddReceivePacket()API to recycle or provide receive packets to HTC. + * Return: None + */ +void htc_unblock_recv(HTC_HANDLE HTCHandle); + +/** + * htc_add_receive_pkt_multiple - Add multiple receive packets to HTC + * @HTCHandle - HTC handle + * @pPktQueue - HTC receive packet queue holding packets to add + * + * User must supply HTC packets for capturing incoming HTC frames. + * The caller mmust initialize each HTC packet using the + * SET_HTC_PACKET_INFO_RX_REFILL() macro. The queue must only contain + * recv packets for the same endpoint. Caller supplies a pointer to an + * HTC_PACKET_QUEUE structure holding the recv packet. This API will + * remove the packets from the pkt queue and place them into internal + * recv packet list. + * The caller may allocate the pkt queue on the stack to hold the pkts. + * Return: A_OK on success + */ +A_STATUS htc_add_receive_pkt_multiple(HTC_HANDLE HTCHandle, + HTC_PACKET_QUEUE *pPktQueue); + +/** + * htc_is_endpoint_active - Check if an endpoint is marked active + * @HTCHandle - HTC handle + * @Endpoint - endpoint to check for active state + * + * Return: returns true if Endpoint is Active + */ +bool htc_is_endpoint_active(HTC_HANDLE HTCHandle, + HTC_ENDPOINT_ID Endpoint); + +/** + * htc_set_nodrop_pkt - Set up nodrop pkt flag for mboxping nodrop pkt + * @HTCHandle - HTC handle + * @isNodropPkt - indicates whether it is nodrop pkt + * + * Return: None + * + */ +void htc_set_nodrop_pkt(HTC_HANDLE HTCHandle, A_BOOL isNodropPkt); + +/** + * htc_enable_hdr_length_check - Set up htc_hdr_length_check flag + * @HTCHandle - HTC handle + * @htc_hdr_length_check - flag to indicate whether htc header length check is + * required + * + * Return: None + * + */ +void +htc_enable_hdr_length_check(HTC_HANDLE htc_handle, bool htc_hdr_length_check); + +/** + * htc_get_num_recv_buffers - Get the number of recv buffers currently queued + * into an HTC endpoint + * @HTCHandle - HTC handle + * @Endpoint - endpoint to check + * + * Return: returns number of buffers in queue + * + */ +int htc_get_num_recv_buffers(HTC_HANDLE HTCHandle, + HTC_ENDPOINT_ID Endpoint); + +/** + * htc_set_target_failure_callback - Set the target failure handling callback + * in HTC layer + * @HTCHandle - HTC handle + * @Callback - target failure handling callback + * + * Return: None + */ +void htc_set_target_failure_callback(HTC_HANDLE HTCHandle, + HTC_TARGET_FAILURE Callback); + +/* internally used functions for testing... */ +void htc_enable_recv(HTC_HANDLE HTCHandle); +void htc_disable_recv(HTC_HANDLE HTCHandle); +A_STATUS HTCWaitForPendingRecv(HTC_HANDLE HTCHandle, + uint32_t TimeoutInMs, + bool *pbIsRecvPending); + +/* function to fetch stats from htc layer*/ +struct ol_ath_htc_stats *ieee80211_ioctl_get_htc_stats(HTC_HANDLE + HTCHandle); +/** + * htc_get_tx_queue_depth() - get the tx queue depth of an htc endpoint + * @htc_handle: htc handle + * @enpoint_id: endpoint to check + * + * Return: htc_handle tx queue depth + */ +int htc_get_tx_queue_depth(HTC_HANDLE *htc_handle, HTC_ENDPOINT_ID endpoint_id); + +#ifdef WLAN_FEATURE_FASTPATH +void htc_ctrl_msg_cmpl(HTC_HANDLE htc_pdev, HTC_ENDPOINT_ID htc_ep_id); + +#define HTC_TX_DESC_FILL(_htc_tx_desc, _download_len, _ep_id, _seq_no) \ +do { \ + HTC_WRITE32((_htc_tx_desc), \ + SM((_download_len), HTC_FRAME_HDR_PAYLOADLEN) | \ + SM((_ep_id), HTC_FRAME_HDR_ENDPOINTID)); \ + HTC_WRITE32((uint32_t *)(_htc_tx_desc) + 1, \ + SM((_seq_no), HTC_FRAME_HDR_CONTROLBYTES1)); \ +} while (0) +#endif /* WLAN_FEATURE_FASTPATH */ + +#ifdef __cplusplus +} +#endif +void htc_get_control_endpoint_tx_host_credits(HTC_HANDLE HTCHandle, + int *credit); +void htc_dump_counter_info(HTC_HANDLE HTCHandle); +void *htc_get_targetdef(HTC_HANDLE htc_handle); +#ifdef FEATURE_RUNTIME_PM +int htc_runtime_suspend(HTC_HANDLE htc_ctx); +int htc_runtime_resume(HTC_HANDLE htc_ctx); +#endif +void htc_global_credit_flow_disable(void); +void htc_global_credit_flow_enable(void); + +/* Disable ASPM : Disable PCIe low power */ +bool htc_can_suspend_link(HTC_HANDLE HTCHandle); +void htc_vote_link_down(HTC_HANDLE HTCHandle); +void htc_vote_link_up(HTC_HANDLE HTCHandle); +#ifdef IPA_OFFLOAD +void htc_ipa_get_ce_resource(HTC_HANDLE htc_handle, + qdf_shared_mem_t **ce_sr, + uint32_t *ce_sr_ring_size, + qdf_dma_addr_t *ce_reg_paddr); +#else +#define htc_ipa_get_ce_resource(htc_handle, \ + ce_sr, ce_sr_ring_size, ce_reg_paddr) /* NO-OP */ +#endif /* IPA_OFFLOAD */ + +#if defined(DEBUG_HL_LOGGING) && defined(CONFIG_HL_SUPPORT) + +/** + * htc_dump_bundle_stats() - dump tx and rx htc message bundle stats + * @HTCHandle: htc handle + * + * Return: None + */ +void htc_dump_bundle_stats(HTC_HANDLE HTCHandle); + +/** + * htc_clear_bundle_stats() - clear tx and rx htc message bundle stats + * @HTCHandle: htc handle + * + * Return: None + */ +void htc_clear_bundle_stats(HTC_HANDLE HTCHandle); +#endif + +#ifdef FEATURE_RUNTIME_PM +int htc_pm_runtime_get(HTC_HANDLE htc_handle); +int htc_pm_runtime_put(HTC_HANDLE htc_handle); +#else +static inline int htc_pm_runtime_get(HTC_HANDLE htc_handle) { return 0; } +static inline int htc_pm_runtime_put(HTC_HANDLE htc_handle) { return 0; } +#endif + +/** + * htc_set_async_ep() - set async HTC end point + * user should call this function after htc_connect_service before + * queing any packets to end point + * @HTCHandle: htc handle + * @HTC_ENDPOINT_ID: end point id + * @value: true or false + * + * Return: None + */ + +void htc_set_async_ep(HTC_HANDLE HTCHandle, + HTC_ENDPOINT_ID htc_ep_id, bool value); + +/** + * htc_set_wmi_endpoint_count: Set number of WMI endpoint + * @htc_handle: HTC handle + * @wmi_ep_count: WMI enpoint count + * + * return: None + */ +void htc_set_wmi_endpoint_count(HTC_HANDLE htc_handle, uint8_t wmi_ep_count); + +/** + * htc_get_wmi_endpoint_count: Get number of WMI endpoint + * @htc_handle: HTC handle + * + * return: WMI enpoint count + */ +uint8_t htc_get_wmi_endpoint_count(HTC_HANDLE htc_handle); + +/** + * htc_print_credit_history: print HTC credit history in buffer + * @htc: HTC handle + * @count: Number of lines to be copied + * @print: Print callback to print in the buffer + * @print_priv: any data required by the print method, e.g. a file handle + * + * return: None + */ +#ifdef FEATURE_HTC_CREDIT_HISTORY +void htc_print_credit_history(HTC_HANDLE htc, uint32_t count, + qdf_abstract_print * print, void *print_priv); +#else +static inline +void htc_print_credit_history(HTC_HANDLE htc, uint32_t count, + qdf_abstract_print *print, void *print_priv) +{ + print(print_priv, "HTC Credit History Feature is disabled"); +} +#endif +#endif /* _HTC_API_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/htc/htc_credit_history.c b/drivers/staging/qca-wifi-host-cmn/htc/htc_credit_history.c new file mode 100644 index 0000000000000000000000000000000000000000..4e067201232714b2a4cba15a102a91d3d0afb0c5 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/htc/htc_credit_history.c @@ -0,0 +1,137 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "htc_debug.h" +#include "htc_internal.h" +#include "htc_credit_history.h" +#include + +struct HTC_CREDIT_HISTORY { + enum htc_credit_exchange_type type; + uint64_t time; + uint32_t tx_credit; + uint32_t htc_tx_queue_depth; +}; + +static qdf_spinlock_t g_htc_credit_lock; +static uint32_t g_htc_credit_history_idx; +static uint32_t g_htc_credit_history_length; +static +struct HTC_CREDIT_HISTORY htc_credit_history_buffer[HTC_CREDIT_HISTORY_MAX]; + + +#ifdef QCA_WIFI_NAPIER_EMULATION +#define HTC_EMULATION_DELAY_IN_MS 20 +/** + * htc_add_delay(): Adds a delay in before proceeding, only for emulation + * + * Return: None + */ +static inline void htc_add_emulation_delay(void) +{ + qdf_mdelay(HTC_EMULATION_DELAY_IN_MS); +} +#else +static inline void htc_add_emulation_delay(void) +{ +} +#endif + +void htc_credit_history_init(void) +{ + qdf_spinlock_create(&g_htc_credit_lock); + g_htc_credit_history_idx = 0; + g_htc_credit_history_length = 0; +} + +/** + * htc_credit_record() - records tx que state & credit transactions + * @type: type of echange can be HTC_REQUEST_CREDIT + * or HTC_PROCESS_CREDIT_REPORT + * @tx_credits: current number of tx_credits + * @htc_tx_queue_depth: current hct tx queue depth + * + * This function records the credits and pending commands whenever a command is + * sent or credits are returned. Call this after the credits have been updated + * according to the transaction. Call this before dequeing commands. + * + * Consider making this function accept an HTC_ENDPOINT and find the current + * credits and queue depth itself. + * + */ +void htc_credit_record(enum htc_credit_exchange_type type, uint32_t tx_credit, + uint32_t htc_tx_queue_depth) +{ + qdf_spin_lock_bh(&g_htc_credit_lock); + if (g_htc_credit_history_idx >= HTC_CREDIT_HISTORY_MAX) + g_htc_credit_history_idx = 0; + + htc_credit_history_buffer[g_htc_credit_history_idx].type = type; + htc_credit_history_buffer[g_htc_credit_history_idx].time = + qdf_get_log_timestamp(); + htc_credit_history_buffer[g_htc_credit_history_idx].tx_credit = + tx_credit; + htc_credit_history_buffer[g_htc_credit_history_idx].htc_tx_queue_depth = + htc_tx_queue_depth; + + g_htc_credit_history_idx++; + g_htc_credit_history_length++; + htc_add_emulation_delay(); + qdf_spin_unlock_bh(&g_htc_credit_lock); +} + +void htc_print_credit_history(HTC_HANDLE htc, uint32_t count, + qdf_abstract_print *print, void *print_priv) +{ + uint32_t idx; + + print(print_priv, "HTC Credit History (count %u)", count); + qdf_spin_lock_bh(&g_htc_credit_lock); + + if (count > HTC_CREDIT_HISTORY_MAX) + count = HTC_CREDIT_HISTORY_MAX; + if (count > g_htc_credit_history_length) + count = g_htc_credit_history_length; + + /* subtract count from index, and wrap if necessary */ + idx = HTC_CREDIT_HISTORY_MAX + g_htc_credit_history_idx - count; + idx %= HTC_CREDIT_HISTORY_MAX; + + print(print_priv, + "Time (seconds) Type Credits Queue Depth"); + while (count) { + struct HTC_CREDIT_HISTORY *hist = + &htc_credit_history_buffer[idx]; + uint64_t secs, usecs; + + qdf_log_timestamp_to_secs(hist->time, &secs, &usecs); + print(print_priv, "% 8lld.%06lld %-25s %-7.d %d", + secs, + usecs, + htc_credit_exchange_type_str(hist->type), + hist->tx_credit, + hist->htc_tx_queue_depth); + + --count; + ++idx; + if (idx >= HTC_CREDIT_HISTORY_MAX) + idx = 0; + } + + qdf_spin_unlock_bh(&g_htc_credit_lock); +} diff --git a/drivers/staging/qca-wifi-host-cmn/htc/htc_credit_history.h b/drivers/staging/qca-wifi-host-cmn/htc/htc_credit_history.h new file mode 100644 index 0000000000000000000000000000000000000000..78b284eaac3e470611045c390a3f3395fc096a31 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/htc/htc_credit_history.h @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _HTC_CREDIT_HISTORY_H_ +#define _HTC_CREDIT_HISTORY_H_ + +#include "htc_internal.h" + +#ifdef FEATURE_HTC_CREDIT_HISTORY + +/** + * htc_credit_history_init(): Init helper function to initialize HTC credit + * history buffers and variable. + * Return: None + */ + +void htc_credit_history_init(void); +void htc_credit_record(enum htc_credit_exchange_type type, uint32_t tx_credit, + uint32_t htc_tx_queue_depth); + +#else /* FEATURE_HTC_CREDIT_HISTORY */ + +static inline +void htc_credit_history_init(void) +{ +} + +static inline +void htc_credit_record(enum htc_credit_exchange_type type, uint32_t tx_credit, + uint32_t htc_tx_queue_depth) +{ } +#endif /* FEATURE_HTC_CREDIT_HISTORY */ +#endif /* _HTC_CREDIT_HISTORY_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/htc/htc_debug.h b/drivers/staging/qca-wifi-host-cmn/htc/htc_debug.h new file mode 100644 index 0000000000000000000000000000000000000000..9ba247f7e394a5a57bc95ca21d15f775945198de --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/htc/htc_debug.h @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2013-2014 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef HTC_DEBUG_H_ +#define HTC_DEBUG_H_ + +#define ATH_MODULE_NAME htc +#include "a_debug.h" +#include "qdf_trace.h" + +/* ------- Debug related stuff ------- */ + +#define ATH_DEBUG_SEND ATH_DEBUG_MAKE_MODULE_MASK(0) +#define ATH_DEBUG_RECV ATH_DEBUG_MAKE_MODULE_MASK(1) +#define ATH_DEBUG_SYNC ATH_DEBUG_MAKE_MODULE_MASK(2) +#define ATH_DEBUG_DUMP ATH_DEBUG_MAKE_MODULE_MASK(3) +#define ATH_DEBUG_SETUP ATH_DEBUG_MAKE_MODULE_MASK(4) +#define HTC_ERROR(args ...) \ + QDF_TRACE(QDF_MODULE_ID_HTC, QDF_TRACE_LEVEL_ERROR, ## args) +#define HTC_WARN(args ...) \ + QDF_TRACE(QDF_MODULE_ID_HTC, QDF_TRACE_LEVEL_WARN, ## args) +#define HTC_INFO(args ...) \ + QDF_TRACE(QDF_MODULE_ID_HTC, QDF_TRACE_LEVEL_INFO, ## args) +#define HTC_TRACE(args ...) \ + QDF_TRACE(QDF_MODULE_ID_HTC, QDF_TRACE_LEVEL_DEBUG, ## args) +#endif /*HTC_DEBUG_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/htc/htc_internal.h b/drivers/staging/qca-wifi-host-cmn/htc/htc_internal.h new file mode 100644 index 0000000000000000000000000000000000000000..0bad02f96fdc5439cc5a82529bd781933a80263b --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/htc/htc_internal.h @@ -0,0 +1,406 @@ +/* + * Copyright (c) 2013-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _HTC_INTERNAL_H_ +#define _HTC_INTERNAL_H_ + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +#include "htc_api.h" +#include "htc_packet.h" +#include +#include +#include +#include +#include +#include +#include +#include + +/* HTC operational parameters */ +#define HTC_TARGET_RESPONSE_TIMEOUT 2000 /* in ms */ +#define HTC_TARGET_DEBUG_INTR_MASK 0x01 +#define HTC_TARGET_CREDIT_INTR_MASK 0xF0 +#define HTC_MIN_MSG_PER_BUNDLE 2 +#if defined(HIF_USB) +#define HTC_MAX_MSG_PER_BUNDLE_RX 11 +#define HTC_MAX_MSG_PER_BUNDLE_TX 8 +#else +#define HTC_MAX_MSG_PER_BUNDLE_RX 64 +#define HTC_MAX_MSG_PER_BUNDLE 16 +#define HTC_MAX_MSG_PER_BUNDLE_TX 32 +#endif + +#ifdef HIF_SDIO +#define UPDATE_ALT_CREDIT(tar, val) (tar->AltDataCreditSize = (uint16_t) val) +#else +#define UPDATE_ALT_CREDIT(tar, val) /* no-op */ +#endif + +/* + * HTC_MAX_TX_BUNDLE_SEND_LIMIT - + * This value is in units of tx frame fragments. + * It needs to be at least as large as the maximum number of tx frames in a + * HTC download bundle times the average number of fragments in each such frame + * (In certain operating systems, such as Linux, we expect to only have + * a single fragment per frame anyway.) + */ +#define HTC_MAX_TX_BUNDLE_SEND_LIMIT 255 + +#define HTC_PACKET_CONTAINER_ALLOCATION 32 +#define NUM_CONTROL_TX_BUFFERS 2 +#define HTC_CONTROL_BUFFER_SIZE (HTC_MAX_CONTROL_MESSAGE_LENGTH + \ + HTC_HDR_LENGTH) +#define HTC_CONTROL_BUFFER_ALIGN 32 +#define HTC_TARGET_RESPONSE_POLL_MS 10 +#if !defined(A_SIMOS_DEVHOST) +#define HTC_TARGET_MAX_RESPONSE_POLL 200 /* actual HW */ +#else +#define HTC_TARGET_MAX_RESPONSE_POLL 600 /* host + target simulation */ +#endif + +#define HTC_SERVICE_TX_PACKET_TAG HTC_TX_PACKET_TAG_INTERNAL + +#ifndef HTC_CREDIT_HISTORY_MAX +#define HTC_CREDIT_HISTORY_MAX 1024 +#endif + +#define HTC_IS_EPPING_ENABLED(_x) ((_x) == QDF_GLOBAL_EPPING_MODE) + +enum htc_credit_exchange_type { + HTC_REQUEST_CREDIT, + HTC_PROCESS_CREDIT_REPORT, + HTC_SUSPEND_ACK, + HTC_SUSPEND_NACK, + HTC_INITIAL_WAKE_UP, +}; + +static inline const char* +htc_credit_exchange_type_str(enum htc_credit_exchange_type type) +{ + switch (type) { + case HTC_REQUEST_CREDIT: + return "HTC_REQUEST_CREDIT"; + case HTC_PROCESS_CREDIT_REPORT: + return "HTC_PROCESS_CREDIT_REPORT"; + case HTC_SUSPEND_ACK: + return "HTC_SUSPEND_ACK"; + case HTC_SUSPEND_NACK: + return "HTC_SUSPEND_NACK"; + case HTC_INITIAL_WAKE_UP: + return "HTC_INITIAL_WAKE_UP"; + default: + return "Unknown htc_credit_exchange_type"; + } +} + +typedef struct _HTC_ENDPOINT { + HTC_ENDPOINT_ID Id; + + /* service ID this endpoint is bound to + * non-zero value means this endpoint is in use + */ + HTC_SERVICE_ID service_id; + + /* callbacks associated with this endpoint */ + struct htc_ep_callbacks EpCallBacks; + /* HTC frame buffer TX queue */ + HTC_PACKET_QUEUE TxQueue; + /* max depth of the TX queue before calling driver's full handler */ + int MaxTxQueueDepth; + /* max length of endpoint message */ + int MaxMsgLength; + uint8_t UL_PipeID; + uint8_t DL_PipeID; + /* Need to call HIF to get tx completion callbacks? */ + int ul_is_polled; + qdf_timer_t ul_poll_timer; + int ul_poll_timer_active; + int ul_outstanding_cnt; + /* Need to call HIF to fetch rx? (Not currently supported.) */ + int dl_is_polled; + /* not currently supported */ + /* qdf_timer_t dl_poll_timer; */ + + /* lookup queue to match netbufs to htc packets */ + HTC_PACKET_QUEUE TxLookupQueue; + /* temporary hold queue for back compatibility */ + HTC_PACKET_QUEUE RxBufferHoldQueue; + /* TX seq no (helpful) for debugging */ + uint8_t SeqNo; + /* serialization */ + qdf_atomic_t TxProcessCount; + struct _HTC_TARGET *target; + /* TX credits available on this endpoint */ + int TxCredits; + /* size in bytes of each credit (set by HTC) */ + int TxCreditSize; + /* credits required per max message (precalculated) */ + int TxCreditsPerMaxMsg; +#ifdef HTC_EP_STAT_PROFILING + /* endpoint statistics */ + struct htc_endpoint_stats endpoint_stats; +#endif + bool TxCreditFlowEnabled; + bool async_update; /* packets can be queued asynchronously */ + qdf_spinlock_t lookup_queue_lock; +} HTC_ENDPOINT; + +#ifdef HTC_EP_STAT_PROFILING +#define INC_HTC_EP_STAT(p, stat, count) ((p)->endpoint_stats.stat += (count)) +#else +#define INC_HTC_EP_STAT(p, stat, count) +#endif + +struct htc_service_tx_credit_allocation { + uint16_t service_id; + uint8_t CreditAllocation; +}; + +#define HTC_MAX_SERVICE_ALLOC_ENTRIES 8 + +/* Error codes for HTC layer packet stats*/ +enum ol_ath_htc_pkt_ecodes { + /* error- get packet at head of HTC_PACKET_Q */ + GET_HTC_PKT_Q_FAIL = 0, + HTC_PKT_Q_EMPTY, + HTC_SEND_Q_EMPTY +}; +/* our HTC target state */ +typedef struct _HTC_TARGET { + struct hif_opaque_softc *hif_dev; + HTC_ENDPOINT endpoint[ENDPOINT_MAX]; + qdf_spinlock_t HTCLock; + qdf_spinlock_t HTCRxLock; + qdf_spinlock_t HTCTxLock; + qdf_spinlock_t HTCCreditLock; + uint32_t HTCStateFlags; + void *host_handle; + struct htc_init_info HTCInitInfo; + HTC_PACKET *pHTCPacketStructPool; /* pool of HTC packets */ + HTC_PACKET_QUEUE ControlBufferTXFreeList; + uint8_t CtrlResponseBuffer[HTC_MAX_CONTROL_MESSAGE_LENGTH]; + int CtrlResponseLength; + qdf_event_t ctrl_response_valid; + bool CtrlResponseProcessing; + int TotalTransmitCredits; + struct htc_service_tx_credit_allocation + ServiceTxAllocTable[HTC_MAX_SERVICE_ALLOC_ENTRIES]; + int TargetCreditSize; +#ifdef RX_SG_SUPPORT + qdf_nbuf_queue_t RxSgQueue; + bool IsRxSgInprogress; + uint32_t CurRxSgTotalLen; /* current total length */ + uint32_t ExpRxSgTotalLen; /* expected total length */ +#endif + qdf_device_t osdev; + struct ol_ath_htc_stats htc_pkt_stats; + HTC_PACKET *pBundleFreeList; + uint32_t ce_send_cnt; + uint32_t TX_comp_cnt; + uint8_t MaxMsgsPerHTCBundle; + qdf_work_t queue_kicker; + +#ifdef HIF_SDIO + uint16_t AltDataCreditSize; +#endif + uint32_t avail_tx_credits; +#if defined(DEBUG_HL_LOGGING) && defined(CONFIG_HL_SUPPORT) + uint32_t rx_bundle_stats[HTC_MAX_MSG_PER_BUNDLE_RX]; + uint32_t tx_bundle_stats[HTC_MAX_MSG_PER_BUNDLE_TX]; +#endif + + uint32_t con_mode; + + /* + * This flag is from the mboxping tool. It indicates that we cannot + * drop it. Besides, nodrop pkts have higher priority than normal pkts. + */ + A_BOOL is_nodrop_pkt; + + /* + * Number of WMI endpoints used. + * Default value is 1. But it should be overidden after htc_create to + * reflect the actual count. + */ + uint8_t wmi_ep_count; + /* Flag to indicate whether htc header length check is required */ + bool htc_hdr_length_check; +} HTC_TARGET; + + +#ifdef RX_SG_SUPPORT +#define RESET_RX_SG_CONFIG(_target) \ +do { \ + _target->ExpRxSgTotalLen = 0; \ + _target->CurRxSgTotalLen = 0; \ + _target->IsRxSgInprogress = false; \ +} while (0) +#endif + +#define HTC_STATE_STOPPING (1 << 0) +#define HTC_STOPPING(t) ((t)->HTCStateFlags & HTC_STATE_STOPPING) +#define LOCK_HTC(t) qdf_spin_lock_bh(&(t)->HTCLock) +#define UNLOCK_HTC(t) qdf_spin_unlock_bh(&(t)->HTCLock) +#define LOCK_HTC_RX(t) qdf_spin_lock_bh(&(t)->HTCRxLock) +#define UNLOCK_HTC_RX(t) qdf_spin_unlock_bh(&(t)->HTCRxLock) +#define LOCK_HTC_TX(t) qdf_spin_lock_bh(&(t)->HTCTxLock) +#define UNLOCK_HTC_TX(t) qdf_spin_unlock_bh(&(t)->HTCTxLock) +#define LOCK_HTC_EP_TX_LOOKUP(t) qdf_spin_lock_bh(&(t)->lookup_queue_lock) +#define UNLOCK_HTC_EP_TX_LOOKUP(t) qdf_spin_unlock_bh(&(t)->lookup_queue_lock) + +#define GET_HTC_TARGET_FROM_HANDLE(hnd) ((HTC_TARGET *)(hnd)) + +#define IS_TX_CREDIT_FLOW_ENABLED(ep) ((ep)->TxCreditFlowEnabled) + +#define HTC_POLL_CLEANUP_PERIOD_MS 10 /* milliseconds */ + +/* Macro to Increment the HTC_PACKET_ERRORS for Tx.*/ +#define OL_ATH_HTC_PKT_ERROR_COUNT_INCR(_target, _ecode) \ + do { \ + if (_ecode == GET_HTC_PKT_Q_FAIL) \ + (_target->htc_pkt_stats.htc_get_pkt_q_fail_count) += 1; \ + if (_ecode == HTC_PKT_Q_EMPTY) \ + (_target->htc_pkt_stats.htc_pkt_q_empty_count) += 1; \ + if (_ecode == HTC_SEND_Q_EMPTY) \ + (_target->htc_pkt_stats.htc_send_q_empty_count) += 1; \ + } while (0) +/* internal HTC functions */ + +QDF_STATUS htc_rx_completion_handler(void *Context, qdf_nbuf_t netbuf, + uint8_t pipeID); +QDF_STATUS htc_tx_completion_handler(void *Context, qdf_nbuf_t netbuf, + unsigned int transferID, + uint32_t toeplitz_hash_result); + +HTC_PACKET *allocate_htc_bundle_packet(HTC_TARGET *target); +void free_htc_bundle_packet(HTC_TARGET *target, HTC_PACKET *pPacket); + +HTC_PACKET *allocate_htc_packet_container(HTC_TARGET *target); +void free_htc_packet_container(HTC_TARGET *target, HTC_PACKET *pPacket); +void htc_flush_rx_hold_queue(HTC_TARGET *target, HTC_ENDPOINT *pEndpoint); +void htc_flush_endpoint_tx(HTC_TARGET *target, HTC_ENDPOINT *pEndpoint, + HTC_TX_TAG Tag); + +/** + * htc_flush_endpoint_txlookupQ() - Flush EP's lookup queue + * @target: HTC target + * @endpoint_id: EP ID + * @call_ep_callback: whether to call EP tx completion callback + * + * Return: void + */ +void htc_flush_endpoint_txlookupQ(HTC_TARGET *target, + HTC_ENDPOINT_ID endpoint_id, + bool call_ep_callback); + +void htc_recv_init(HTC_TARGET *target); +QDF_STATUS htc_wait_recv_ctrl_message(HTC_TARGET *target); +void htc_free_control_tx_packet(HTC_TARGET *target, HTC_PACKET *pPacket); +HTC_PACKET *htc_alloc_control_tx_packet(HTC_TARGET *target); +uint8_t htc_get_credit_allocation(HTC_TARGET *target, uint16_t service_id); +void htc_tx_resource_avail_handler(void *context, uint8_t pipeID); +void htc_control_rx_complete(void *Context, HTC_PACKET *pPacket); +void htc_process_credit_rpt(HTC_TARGET *target, + HTC_CREDIT_REPORT *pRpt, + int NumEntries, HTC_ENDPOINT_ID FromEndpoint); +void htc_fw_event_handler(void *context, QDF_STATUS status); +void htc_send_complete_check_cleanup(void *context); +#ifdef FEATURE_RUNTIME_PM +void htc_kick_queues(void *context); +#endif + +static inline void htc_send_complete_poll_timer_stop(HTC_ENDPOINT * + pEndpoint) { + LOCK_HTC_TX(pEndpoint->target); + if (pEndpoint->ul_poll_timer_active) { + /* qdf_timer_stop(&pEndpoint->ul_poll_timer); */ + pEndpoint->ul_poll_timer_active = 0; + } + UNLOCK_HTC_TX(pEndpoint->target); +} + +static inline void htc_send_complete_poll_timer_start(HTC_ENDPOINT * + pEndpoint) { + LOCK_HTC_TX(pEndpoint->target); + if (pEndpoint->ul_outstanding_cnt + && !pEndpoint->ul_poll_timer_active) { + /* qdf_timer_start( + * &pEndpoint->ul_poll_timer, HTC_POLL_CLEANUP_PERIOD_MS); + */ + pEndpoint->ul_poll_timer_active = 1; + } + UNLOCK_HTC_TX(pEndpoint->target); +} + +static inline void +htc_send_complete_check(HTC_ENDPOINT *pEndpoint, int force) { + /* + * Stop the polling-cleanup timer that will result in a later call to + * this function. It may get started again below, if there are still + * outsending sends. + */ + htc_send_complete_poll_timer_stop(pEndpoint); + /* + * Check whether HIF has any prior sends that have finished, + * have not had the post-processing done. + */ + hif_send_complete_check(pEndpoint->target->hif_dev, + pEndpoint->UL_PipeID, force); + /* + * If there are still outstanding sends after polling, start a timer + * to check again a little later. + */ + htc_send_complete_poll_timer_start(pEndpoint); +} + +#ifdef __cplusplus +} +#endif + +#ifndef DEBUG_BUNDLE +#define DEBUG_BUNDLE 0 +#endif + +#if defined(HIF_SDIO) || defined(HIF_USB) +#ifndef ENABLE_BUNDLE_TX +#define ENABLE_BUNDLE_TX 1 +#endif + +#ifndef ENABLE_BUNDLE_RX +#define ENABLE_BUNDLE_RX 1 +#endif +#endif /*defined(HIF_SDIO) || defined(HIF_USB)*/ + +#if defined ENABLE_BUNDLE_TX +#define HTC_TX_BUNDLE_ENABLED(target) (target->MaxMsgsPerHTCBundle > 1) +#else +#define HTC_TX_BUNDLE_ENABLED(target) 0 +#endif + +#if defined ENABLE_BUNDLE_RX +#define HTC_RX_BUNDLE_ENABLED(target) (target->MaxMsgsPerHTCBundle > 1) +#else +#define HTC_RX_BUNDLE_ENABLED(target) 0 +#endif + +#define HTC_ENABLE_BUNDLE(target) (target->MaxMsgsPerHTCBundle > 1) + +#endif /* !_HTC_HOST_INTERNAL_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/htc/htc_packet.h b/drivers/staging/qca-wifi-host-cmn/htc/htc_packet.h new file mode 100644 index 0000000000000000000000000000000000000000..a205ae454c3628345b9bf3776acd366f09a1fa85 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/htc/htc_packet.h @@ -0,0 +1,336 @@ +/* + * Copyright (c) 2013-2014, 2016-2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef HTC_PACKET_H_ +#define HTC_PACKET_H_ + +#include +#include "dl_list.h" + +/* ------ Endpoint IDS ------ */ +typedef enum { + ENDPOINT_UNUSED = -1, + ENDPOINT_0 = 0, + ENDPOINT_1 = 1, + ENDPOINT_2 = 2, + ENDPOINT_3, + ENDPOINT_4, + ENDPOINT_5, + ENDPOINT_6, + ENDPOINT_7, + ENDPOINT_8, + ENDPOINT_MAX, +} HTC_ENDPOINT_ID; + +struct _HTC_PACKET; + +typedef void (*HTC_PACKET_COMPLETION)(void *, struct _HTC_PACKET *); + +typedef uint16_t HTC_TX_TAG; + +/** + * struct htc_tx_packet_info - HTC TX packet information + * @Tag: tag used to selective flush packets + * @CreditsUsed: number of credits used for this TX packet (HTC internal) + * @SendFlags: send flags (HTC internal) + * @SeqNo: internal seq no for debugging (HTC internal) + * @Flags: Internal use + */ +struct htc_tx_packet_info { + HTC_TX_TAG Tag; + int CreditsUsed; + uint8_t SendFlags; + int SeqNo; + uint32_t Flags; +}; + +/** + * HTC_TX_PACKET_TAG_XXX - #defines for tagging packets for special handling + * HTC_TX_PACKET_TAG_ALL: zero is reserved and used to flush ALL packets + * HTC_TX_PACKET_TAG_INTERNAL: internal tags start here + * HTC_TX_PACKET_TAG_USER_DEFINED: user-defined tags start here + * HTC_TX_PACKET_TAG_BUNDLED: indicate this is a bundled tx packet + * HTC_TX_PACKET_TAG_AUTO_PM: indicate a power management wmi command + */ +#define HTC_TX_PACKET_TAG_ALL 0 +#define HTC_TX_PACKET_TAG_INTERNAL 1 +#define HTC_TX_PACKET_TAG_USER_DEFINED (HTC_TX_PACKET_TAG_INTERNAL + 9) +#define HTC_TX_PACKET_TAG_BUNDLED (HTC_TX_PACKET_TAG_USER_DEFINED + 1) +#define HTC_TX_PACKET_TAG_AUTO_PM (HTC_TX_PACKET_TAG_USER_DEFINED + 2) + +/* Tag packet for runtime put after sending */ +#define HTC_TX_PACKET_TAG_RUNTIME_PUT (HTC_TX_PACKET_TAG_USER_DEFINED + 3) + + +#define HTC_TX_PACKET_FLAG_FIXUP_NETBUF (1 << 0) + +/** + * struct htc_rx_packet_info - HTC RX Packet information + * @ExpectedHdr: HTC Internal use + * @HTCRxFlags: HTC Internal use + * @IndicationFlags: indication flags set on each RX packet indication + */ +struct htc_rx_packet_info { + uint32_t ExpectedHdr; + uint32_t HTCRxFlags; + uint32_t IndicationFlags; +}; + +/* more packets on this endpoint are being fetched */ +#define HTC_RX_FLAGS_INDICATE_MORE_PKTS (1 << 0) +#define HTC_PACKET_MAGIC_COOKIE 0xdeadbeef + +/* wrapper around endpoint-specific packets */ +/** + * struct _HTC_PACKET - HTC Packet data structure + * @ListLink: double link + * @pPktContext: caller's per packet specific context + * @pBufferStart: The true buffer start, the caller can store the real buffer + * start here. In receive callbacks, the HTC layer sets pBuffer + * to the start of the payload past the header. This field allows + * the caller to reset pBuffer when it recycles receive packets + * back to HTC + * @pBuffer: payload start (RX/TX) + * @BufferLength: length of buffer + * @ActualLength: actual length of payload + * @Endpoint: endpoint that this packet was sent/recv'd from + * @Status: completion status + * @PktInfo: Packet specific info + * @netbufOrigHeadRoom: Original head room of skb + * @Completion: completion + * @pContext: HTC private completion context + * @pNetBufContext: optimization for network-oriented data, the HTC packet can + * pass the network buffer corresponding to the HTC packet + * lower layers may optimized the transfer knowing this is a + * network buffer + * @magic_cookie: HTC Magic cookie + */ +typedef struct _HTC_PACKET { + DL_LIST ListLink; + void *pPktContext; + uint8_t *pBufferStart; + /* + * Pointer to the start of the buffer. In the transmit + * direction this points to the start of the payload. In the + * receive direction, however, the buffer when queued up + * points to the start of the HTC header but when returned + * to the caller points to the start of the payload + */ + uint8_t *pBuffer; + uint32_t BufferLength; + uint32_t ActualLength; + HTC_ENDPOINT_ID Endpoint; + QDF_STATUS Status; + union { + struct htc_tx_packet_info AsTx; + struct htc_rx_packet_info AsRx; + } PktInfo; + /* the following fields are for internal HTC use */ + uint32_t netbufOrigHeadRoom; + HTC_PACKET_COMPLETION Completion; + void *pContext; + void *pNetBufContext; + uint32_t magic_cookie; +} HTC_PACKET; + +#define COMPLETE_HTC_PACKET(p, status) \ + { \ + (p)->Status = (status); \ + (p)->Completion((p)->pContext, (p)); \ + } + +#define INIT_HTC_PACKET_INFO(p, b, len) \ + { \ + (p)->pBufferStart = (b); \ + (p)->BufferLength = (len); \ + } + +/* macro to set an initial RX packet for refilling HTC */ +#define SET_HTC_PACKET_INFO_RX_REFILL(p, c, b, len, ep) \ + do { \ + (p)->pPktContext = (c); \ + (p)->pBuffer = (b); \ + (p)->pBufferStart = (b); \ + (p)->BufferLength = (len); \ + (p)->Endpoint = (ep); \ + } while (0) + +/* fast macro to recycle an RX packet that will be re-queued to HTC */ +#define HTC_PACKET_RESET_RX(p) \ + { (p)->pBuffer = (p)->pBufferStart; (p)->ActualLength = 0; } + +/* macro to set packet parameters for TX */ +#define SET_HTC_PACKET_INFO_TX(p, c, b, len, ep, tag) \ + do { \ + (p)->pPktContext = (c); \ + (p)->pBuffer = (b); \ + (p)->ActualLength = (len); \ + (p)->Endpoint = (ep); \ + (p)->PktInfo.AsTx.Tag = (tag); \ + (p)->PktInfo.AsTx.Flags = 0; \ + (p)->PktInfo.AsTx.SendFlags = 0; \ + } while (0) + +#define SET_HTC_PACKET_NET_BUF_CONTEXT(p, nb) \ + { \ + (p)->pNetBufContext = (nb); \ + } + +#define GET_HTC_PACKET_NET_BUF_CONTEXT(p) (p)->pNetBufContext + +/* HTC Packet Queueing Macros */ +typedef struct _HTC_PACKET_QUEUE { + DL_LIST QueueHead; + int Depth; +} HTC_PACKET_QUEUE; + +/* initialize queue */ +#define INIT_HTC_PACKET_QUEUE(pQ) \ + { \ + DL_LIST_INIT(&(pQ)->QueueHead); \ + (pQ)->Depth = 0; \ + } + +/* enqueue HTC packet to the tail of the queue */ +#define HTC_PACKET_ENQUEUE(pQ, p) \ + { dl_list_insert_tail(&(pQ)->QueueHead, &(p)->ListLink); \ + (pQ)->Depth++; \ + } + +/* enqueue HTC packet to the tail of the queue */ +#define HTC_PACKET_ENQUEUE_TO_HEAD(pQ, p) \ + { dl_list_insert_head(&(pQ)->QueueHead, &(p)->ListLink); \ + (pQ)->Depth++; \ + } +/* test if a queue is empty */ +#define HTC_QUEUE_EMPTY(pQ) ((pQ)->Depth == 0) +/* get packet at head without removing it */ +static inline HTC_PACKET *htc_get_pkt_at_head(HTC_PACKET_QUEUE *queue) +{ + if (queue->Depth == 0) + return NULL; + + return A_CONTAINING_STRUCT((DL_LIST_GET_ITEM_AT_HEAD( + &queue->QueueHead)), + HTC_PACKET, ListLink); +} + +/* remove a packet from a queue, where-ever it is in the queue */ +#define HTC_PACKET_REMOVE(pQ, p) \ + { \ + dl_list_remove(&(p)->ListLink); \ + (pQ)->Depth--; \ + } + +/* dequeue an HTC packet from the head of the queue */ +static inline HTC_PACKET *htc_packet_dequeue(HTC_PACKET_QUEUE *queue) +{ + DL_LIST *pItem = dl_list_remove_item_from_head(&queue->QueueHead); + + if (pItem != NULL) { + queue->Depth--; + return A_CONTAINING_STRUCT(pItem, HTC_PACKET, ListLink); + } + return NULL; +} + +/* dequeue an HTC packet from the tail of the queue */ +static inline HTC_PACKET *htc_packet_dequeue_tail(HTC_PACKET_QUEUE *queue) +{ + DL_LIST *pItem = dl_list_remove_item_from_tail(&queue->QueueHead); + + if (pItem != NULL) { + queue->Depth--; + return A_CONTAINING_STRUCT(pItem, HTC_PACKET, ListLink); + } + return NULL; +} + +#define HTC_PACKET_QUEUE_DEPTH(pQ) (pQ)->Depth + +#define HTC_GET_ENDPOINT_FROM_PKT(p) (p)->Endpoint +#define HTC_GET_TAG_FROM_PKT(p) (p)->PktInfo.AsTx.Tag + +/* transfer the packets from one queue to the tail of another queue */ +#define HTC_PACKET_QUEUE_TRANSFER_TO_TAIL(pQDest, pQSrc) \ + { \ + dl_list_transfer_items_to_tail(&(pQDest)->QueueHead, \ + &(pQSrc)->QueueHead); \ + (pQDest)->Depth += (pQSrc)->Depth; \ + (pQSrc)->Depth = 0; \ + } + +/* + * Transfer the packets from one queue to the head of another queue. + * This xfer_to_head(q1,q2) is basically equivalent to xfer_to_tail(q2,q1), + * but it updates the queue descriptor object for the initial queue to refer + * to the concatenated queue. + */ +#define HTC_PACKET_QUEUE_TRANSFER_TO_HEAD(pQDest, pQSrc) \ + { \ + dl_list_transfer_items_to_head(&(pQDest)->QueueHead, \ + &(pQSrc)->QueueHead); \ + (pQDest)->Depth += (pQSrc)->Depth; \ + (pQSrc)->Depth = 0; \ + } + +/* fast version to init and add a single packet to a queue */ +#define INIT_HTC_PACKET_QUEUE_AND_ADD(pQ, pP) \ + { \ + DL_LIST_INIT_AND_ADD(&(pQ)->QueueHead, &(pP)->ListLink) \ + (pQ)->Depth = 1; \ + } + +#define HTC_PACKET_QUEUE_ITERATE_ALLOW_REMOVE(pQ, pPTemp) \ + ITERATE_OVER_LIST_ALLOW_REMOVE(&(pQ)->QueueHead, \ + (pPTemp), HTC_PACKET, ListLink) + +#define HTC_PACKET_QUEUE_ITERATE_IS_VALID(pQ) ITERATE_IS_VALID(&(pQ)->QueueHead) +#define HTC_PACKET_QUEUE_ITERATE_RESET(pQ) ITERATE_RESET(&(pQ)->QueueHead) + +#define HTC_PACKET_QUEUE_ITERATE_END ITERATE_END + +/** + * htc_packet_set_magic_cookie() - set magic cookie in htc packet + * htc_pkt - pointer to htc packet + * value - value to set in magic cookie + * + * This API sets the magic cookie passed in htc packet. + * + * Return : None + */ +static inline void htc_packet_set_magic_cookie(HTC_PACKET *htc_pkt, + uint32_t value) +{ + htc_pkt->magic_cookie = value; +} + +/** + * htc_packet_set_magic_cookie() - get magic cookie in htc packet + * htc_pkt - pointer to htc packet + * + * This API returns the magic cookie in htc packet. + * + * Return : magic cookie + */ +static inline uint32_t htc_packet_get_magic_cookie(HTC_PACKET *htc_pkt) +{ + return htc_pkt->magic_cookie; +} + +#endif /*HTC_PACKET_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/htc/htc_recv.c b/drivers/staging/qca-wifi-host-cmn/htc/htc_recv.c new file mode 100644 index 0000000000000000000000000000000000000000..bd52a9ed88e9eeb4d954a560a730b88ef6712cbd --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/htc/htc_recv.c @@ -0,0 +1,710 @@ +/* + * Copyright (c) 2013-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "htc_debug.h" +#include "htc_internal.h" +#include "htc_credit_history.h" +#include /* qdf_nbuf_t */ + +/* HTC Control message receive timeout msec */ +#define HTC_CONTROL_RX_TIMEOUT 3000 + +#if defined(WLAN_DEBUG) || defined(DEBUG) +void debug_dump_bytes(uint8_t *buffer, uint16_t length, char *pDescription) +{ + int8_t stream[60]; + int8_t byteOffsetStr[10]; + uint32_t i; + uint16_t offset, count, byteOffset; + + A_PRINTF("<---------Dumping %d Bytes : %s ------>\n", length, + pDescription); + + count = 0; + offset = 0; + byteOffset = 0; + for (i = 0; i < length; i++) { + A_SNPRINTF(stream + offset, (sizeof(stream) - offset), + "%02X ", buffer[i]); + count++; + offset += 3; + + if (count == 16) { + count = 0; + offset = 0; + A_SNPRINTF(byteOffsetStr, sizeof(byteOffset), "%4.4X", + byteOffset); + A_PRINTF("[%s]: %s\n", byteOffsetStr, stream); + qdf_mem_zero(stream, 60); + byteOffset += 16; + } + } + + if (offset != 0) { + A_SNPRINTF(byteOffsetStr, sizeof(byteOffset), "%4.4X", + byteOffset); + A_PRINTF("[%s]: %s\n", byteOffsetStr, stream); + } + + A_PRINTF("<------------------------------------------------->\n"); +} +#else +void debug_dump_bytes(uint8_t *buffer, uint16_t length, char *pDescription) +{ +} +#endif + +static A_STATUS htc_process_trailer(HTC_TARGET *target, + uint8_t *pBuffer, + int Length, HTC_ENDPOINT_ID FromEndpoint); + +static void do_recv_completion_pkt(HTC_ENDPOINT *pEndpoint, + HTC_PACKET *pPacket) +{ + if (pEndpoint->EpCallBacks.EpRecv == NULL) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("HTC ep %d has NULL recv callback on packet %pK\n", + pEndpoint->Id, + pPacket)); + if (pPacket) + qdf_nbuf_free(pPacket->pPktContext); + } else { + AR_DEBUG_PRINTF(ATH_DEBUG_RECV, + ("HTC calling ep %d recv callback on packet %pK\n", + pEndpoint->Id, pPacket)); + pEndpoint->EpCallBacks.EpRecv(pEndpoint->EpCallBacks.pContext, + pPacket); + } +} + +static void do_recv_completion(HTC_ENDPOINT *pEndpoint, + HTC_PACKET_QUEUE *pQueueToIndicate) +{ + HTC_PACKET *pPacket; + + if (HTC_QUEUE_EMPTY(pQueueToIndicate)) { + /* nothing to indicate */ + return; + } + + while (!HTC_QUEUE_EMPTY(pQueueToIndicate)) { + pPacket = htc_packet_dequeue(pQueueToIndicate); + do_recv_completion_pkt(pEndpoint, pPacket); + } +} + +void htc_control_rx_complete(void *Context, HTC_PACKET *pPacket) +{ + /* TODO, can't really receive HTC control messages yet.... */ + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("Invalid call to htc_control_rx_complete\n")); +} + +void htc_unblock_recv(HTC_HANDLE HTCHandle) +{ + /* TODO find the Need in new model */ +} + +void htc_enable_recv(HTC_HANDLE HTCHandle) +{ + + /* TODO find the Need in new model */ +} + +void htc_disable_recv(HTC_HANDLE HTCHandle) +{ + + /* TODO find the Need in new model */ +} + +int htc_get_num_recv_buffers(HTC_HANDLE HTCHandle, HTC_ENDPOINT_ID Endpoint) +{ + HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(HTCHandle); + + HTC_ENDPOINT *pEndpoint = &target->endpoint[Endpoint]; + return HTC_PACKET_QUEUE_DEPTH(&pEndpoint->RxBufferHoldQueue); +} + +HTC_PACKET *allocate_htc_packet_container(HTC_TARGET *target) +{ + HTC_PACKET *pPacket; + + LOCK_HTC_RX(target); + + if (NULL == target->pHTCPacketStructPool) { + UNLOCK_HTC_RX(target); + return NULL; + } + + pPacket = target->pHTCPacketStructPool; + target->pHTCPacketStructPool = (HTC_PACKET *) pPacket->ListLink.pNext; + + UNLOCK_HTC_RX(target); + + pPacket->ListLink.pNext = NULL; + return pPacket; +} + +void free_htc_packet_container(HTC_TARGET *target, HTC_PACKET *pPacket) +{ + pPacket->ListLink.pPrev = NULL; + + LOCK_HTC_RX(target); + if (NULL == target->pHTCPacketStructPool) { + target->pHTCPacketStructPool = pPacket; + pPacket->ListLink.pNext = NULL; + } else { + pPacket->ListLink.pNext = + (DL_LIST *) target->pHTCPacketStructPool; + target->pHTCPacketStructPool = pPacket; + } + + UNLOCK_HTC_RX(target); +} + +#ifdef RX_SG_SUPPORT +qdf_nbuf_t rx_sg_to_single_netbuf(HTC_TARGET *target) +{ + qdf_nbuf_t skb; + uint8_t *anbdata; + uint8_t *anbdata_new; + uint32_t anblen; + qdf_nbuf_t new_skb = NULL; + uint32_t sg_queue_len; + qdf_nbuf_queue_t *rx_sg_queue = &target->RxSgQueue; + + sg_queue_len = qdf_nbuf_queue_len(rx_sg_queue); + + if (sg_queue_len <= 1) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("rx_sg_to_single_netbuf: invalid sg queue len %u\n")); + goto _failed; + } + + new_skb = qdf_nbuf_alloc(target->ExpRxSgTotalLen, 0, 4, false); + if (new_skb == NULL) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("rx_sg_to_single_netbuf: can't allocate %u size netbuf\n", + target->ExpRxSgTotalLen)); + goto _failed; + } + + qdf_nbuf_peek_header(new_skb, &anbdata_new, &anblen); + + skb = qdf_nbuf_queue_remove(rx_sg_queue); + do { + qdf_nbuf_peek_header(skb, &anbdata, &anblen); + qdf_mem_copy(anbdata_new, anbdata, qdf_nbuf_len(skb)); + qdf_nbuf_put_tail(new_skb, qdf_nbuf_len(skb)); + anbdata_new += qdf_nbuf_len(skb); + qdf_nbuf_free(skb); + skb = qdf_nbuf_queue_remove(rx_sg_queue); + } while (skb != NULL); + + RESET_RX_SG_CONFIG(target); + return new_skb; + +_failed: + + while ((skb = qdf_nbuf_queue_remove(rx_sg_queue)) != NULL) + qdf_nbuf_free(skb); + + RESET_RX_SG_CONFIG(target); + return NULL; +} +#endif + +#ifdef CONFIG_WIN +#define HTC_MSG_NACK_SUSPEND 7 +#endif + +QDF_STATUS htc_rx_completion_handler(void *Context, qdf_nbuf_t netbuf, + uint8_t pipeID) +{ + QDF_STATUS status = QDF_STATUS_SUCCESS; + HTC_FRAME_HDR *HtcHdr; + HTC_TARGET *target = (HTC_TARGET *) Context; + uint8_t *netdata; + uint32_t netlen; + HTC_ENDPOINT *pEndpoint; + HTC_PACKET *pPacket; + uint16_t payloadLen; + uint32_t trailerlen = 0; + uint8_t htc_ep_id; +#ifdef HTC_MSG_WAKEUP_FROM_SUSPEND_ID + struct htc_init_info *info; +#endif + +#ifdef RX_SG_SUPPORT + LOCK_HTC_RX(target); + if (target->IsRxSgInprogress) { + target->CurRxSgTotalLen += qdf_nbuf_len(netbuf); + qdf_nbuf_queue_add(&target->RxSgQueue, netbuf); + if (target->CurRxSgTotalLen == target->ExpRxSgTotalLen) { + netbuf = rx_sg_to_single_netbuf(target); + if (netbuf == NULL) { + UNLOCK_HTC_RX(target); + goto _out; + } + } else { + netbuf = NULL; + UNLOCK_HTC_RX(target); + goto _out; + } + } + UNLOCK_HTC_RX(target); +#endif + + netdata = qdf_nbuf_data(netbuf); + netlen = qdf_nbuf_len(netbuf); + + HtcHdr = (HTC_FRAME_HDR *) netdata; + + do { + + htc_ep_id = HTC_GET_FIELD(HtcHdr, HTC_FRAME_HDR, ENDPOINTID); + + if (htc_ep_id >= ENDPOINT_MAX) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("HTC Rx: invalid EndpointID=%d\n", + htc_ep_id)); + debug_dump_bytes((uint8_t *) HtcHdr, + sizeof(HTC_FRAME_HDR), + "BAD HTC Header"); + status = QDF_STATUS_E_FAILURE; + QDF_BUG(0); + break; + } + + pEndpoint = &target->endpoint[htc_ep_id]; + + /* + * If this endpoint that received a message from the target has + * a to-target HIF pipe whose send completions are polled rather + * than interrupt driven, this is a good point to ask HIF to + * check whether it has any completed sends to handle. + */ + if (pEndpoint->ul_is_polled) + htc_send_complete_check(pEndpoint, 1); + + payloadLen = HTC_GET_FIELD(HtcHdr, HTC_FRAME_HDR, PAYLOADLEN); + + if (netlen < (payloadLen + HTC_HDR_LENGTH)) { +#ifdef RX_SG_SUPPORT + LOCK_HTC_RX(target); + target->IsRxSgInprogress = true; + qdf_nbuf_queue_init(&target->RxSgQueue); + qdf_nbuf_queue_add(&target->RxSgQueue, netbuf); + target->ExpRxSgTotalLen = (payloadLen + HTC_HDR_LENGTH); + target->CurRxSgTotalLen += netlen; + UNLOCK_HTC_RX(target); + netbuf = NULL; + break; +#else + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("HTC Rx: insufficient length, got:%d expected =%zu\n", + netlen, payloadLen + HTC_HDR_LENGTH)); + debug_dump_bytes((uint8_t *) HtcHdr, + sizeof(HTC_FRAME_HDR), + "BAD RX packet length"); + status = QDF_STATUS_E_FAILURE; + QDF_BUG(0); + break; +#endif + } +#ifdef HTC_EP_STAT_PROFILING + LOCK_HTC_RX(target); + INC_HTC_EP_STAT(pEndpoint, RxReceived, 1); + UNLOCK_HTC_RX(target); +#endif + + /* if (IS_TX_CREDIT_FLOW_ENABLED(pEndpoint)) { */ + { + uint8_t temp; + A_STATUS temp_status; + /* get flags to check for trailer */ + temp = HTC_GET_FIELD(HtcHdr, HTC_FRAME_HDR, FLAGS); + if (temp & HTC_FLAGS_RECV_TRAILER) { + /* extract the trailer length */ + temp = + HTC_GET_FIELD(HtcHdr, HTC_FRAME_HDR, + CONTROLBYTES0); + if ((temp < sizeof(HTC_RECORD_HDR)) + || (temp > payloadLen)) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("htc_rx_completion_handler, invalid header (payloadlength should be :%d, CB[0] is:%d)\n", + payloadLen, temp)); + status = QDF_STATUS_E_INVAL; + break; + } + + trailerlen = temp; + /* process trailer data that follows HDR + + * application payload + */ + temp_status = htc_process_trailer(target, + ((uint8_t *) HtcHdr + + HTC_HDR_LENGTH + + payloadLen - temp), + temp, htc_ep_id); + if (A_FAILED(temp_status)) { + status = QDF_STATUS_E_FAILURE; + break; + } + + } + } + + if (((int)payloadLen - (int)trailerlen) <= 0) { + /* 0 length packet with trailer data, just drop these */ + break; + } + + if (htc_ep_id == ENDPOINT_0) { + uint16_t message_id; + HTC_UNKNOWN_MSG *htc_msg; + bool wow_nack; + + /* remove HTC header */ + qdf_nbuf_pull_head(netbuf, HTC_HDR_LENGTH); + netdata = qdf_nbuf_data(netbuf); + netlen = qdf_nbuf_len(netbuf); + + htc_msg = (HTC_UNKNOWN_MSG *) netdata; + message_id = HTC_GET_FIELD(htc_msg, HTC_UNKNOWN_MSG, + MESSAGEID); + + switch (message_id) { + default: + /* handle HTC control message */ + if (target->CtrlResponseProcessing) { + /* this is a fatal error, target should + * not be sending unsolicited messages + * on the endpoint 0 + */ + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("HTC Rx Ctrl still processing\n")); + status = QDF_STATUS_E_FAILURE; + QDF_BUG(false); + break; + } + + LOCK_HTC_RX(target); + target->CtrlResponseLength = + min((int)netlen, + HTC_MAX_CONTROL_MESSAGE_LENGTH); + qdf_mem_copy(target->CtrlResponseBuffer, + netdata, + target->CtrlResponseLength); + + /* Requester will clear this flag */ + target->CtrlResponseProcessing = true; + UNLOCK_HTC_RX(target); + + qdf_event_set(&target->ctrl_response_valid); + break; +#ifdef HTC_MSG_WAKEUP_FROM_SUSPEND_ID + case HTC_MSG_WAKEUP_FROM_SUSPEND_ID: + AR_DEBUG_PRINTF(ATH_DEBUG_ANY, + ("Received initial wake up")); + htc_credit_record(HTC_INITIAL_WAKE_UP, + pEndpoint->TxCredits, + HTC_PACKET_QUEUE_DEPTH( + &pEndpoint->TxQueue)); + info = &target->HTCInitInfo; + if (info && info->target_initial_wakeup_cb) + info->target_initial_wakeup_cb( + info->target_psoc); + else + AR_DEBUG_PRINTF(ATH_DEBUG_ANY, + ("No initial wake up cb")); + break; +#endif + case HTC_MSG_SEND_SUSPEND_COMPLETE: + wow_nack = false; + htc_credit_record(HTC_SUSPEND_ACK, + pEndpoint->TxCredits, + HTC_PACKET_QUEUE_DEPTH( + &pEndpoint->TxQueue)); + target->HTCInitInfo.TargetSendSuspendComplete( + target->HTCInitInfo.target_psoc, + wow_nack); + + break; + case HTC_MSG_NACK_SUSPEND: + wow_nack = true; + htc_credit_record(HTC_SUSPEND_ACK, + pEndpoint->TxCredits, + HTC_PACKET_QUEUE_DEPTH( + &pEndpoint->TxQueue)); + target->HTCInitInfo.TargetSendSuspendComplete( + target->HTCInitInfo.target_psoc, + wow_nack); + break; + } + + qdf_nbuf_free(netbuf); + netbuf = NULL; + break; + } + + /* the current message based HIF architecture allocates net bufs + * for recv packets since this layer bridges that HIF to upper + * layers , which expects HTC packets, we form the packets here + * TODO_FIXME + */ + pPacket = allocate_htc_packet_container(target); + if (NULL == pPacket) { + status = QDF_STATUS_E_RESOURCES; + break; + } + pPacket->Status = QDF_STATUS_SUCCESS; + pPacket->Endpoint = htc_ep_id; + pPacket->pPktContext = netbuf; + pPacket->pBuffer = qdf_nbuf_data(netbuf) + HTC_HDR_LENGTH; + pPacket->ActualLength = netlen - HTC_HEADER_LEN - trailerlen; + + qdf_nbuf_pull_head(netbuf, HTC_HEADER_LEN); + qdf_nbuf_set_pktlen(netbuf, pPacket->ActualLength); + + do_recv_completion_pkt(pEndpoint, pPacket); + + /* recover the packet container */ + free_htc_packet_container(target, pPacket); + + netbuf = NULL; + + } while (false); + +#ifdef RX_SG_SUPPORT +_out: +#endif + + if (netbuf != NULL) + qdf_nbuf_free(netbuf); + + return status; + +} + +A_STATUS htc_add_receive_pkt_multiple(HTC_HANDLE HTCHandle, + HTC_PACKET_QUEUE *pPktQueue) +{ + HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(HTCHandle); + HTC_ENDPOINT *pEndpoint; + HTC_PACKET *pFirstPacket; + A_STATUS status = A_OK; + HTC_PACKET *pPacket; + + pFirstPacket = htc_get_pkt_at_head(pPktQueue); + + if (NULL == pFirstPacket) { + A_ASSERT(false); + return A_EINVAL; + } + + if (pFirstPacket->Endpoint >= ENDPOINT_MAX) { + A_ASSERT(false); + return A_EINVAL; + } + + AR_DEBUG_PRINTF(ATH_DEBUG_RECV, + ("+- htc_add_receive_pkt_multiple : endPointId: %d, cnt:%d, length: %d\n", + pFirstPacket->Endpoint, + HTC_PACKET_QUEUE_DEPTH(pPktQueue), + pFirstPacket->BufferLength)); + + pEndpoint = &target->endpoint[pFirstPacket->Endpoint]; + + LOCK_HTC_RX(target); + + do { + + if (HTC_STOPPING(target)) { + status = A_ERROR; + break; + } + + /* store receive packets */ + HTC_PACKET_QUEUE_TRANSFER_TO_TAIL(&pEndpoint->RxBufferHoldQueue, + pPktQueue); + + } while (false); + + UNLOCK_HTC_RX(target); + + if (A_FAILED(status)) { + /* walk through queue and mark each one canceled */ + HTC_PACKET_QUEUE_ITERATE_ALLOW_REMOVE(pPktQueue, pPacket) { + pPacket->Status = QDF_STATUS_E_CANCELED; + } + HTC_PACKET_QUEUE_ITERATE_END; + + do_recv_completion(pEndpoint, pPktQueue); + } + + return status; +} + +void htc_flush_rx_hold_queue(HTC_TARGET *target, HTC_ENDPOINT *pEndpoint) +{ + HTC_PACKET *pPacket; + + LOCK_HTC_RX(target); + + while (1) { + pPacket = htc_packet_dequeue(&pEndpoint->RxBufferHoldQueue); + if (pPacket == NULL) + break; + UNLOCK_HTC_RX(target); + pPacket->Status = QDF_STATUS_E_CANCELED; + pPacket->ActualLength = 0; + AR_DEBUG_PRINTF(ATH_DEBUG_RECV, + ("Flushing RX packet:%pK, length:%d, ep:%d\n", + pPacket, pPacket->BufferLength, + pPacket->Endpoint)); + /* give the packet back */ + do_recv_completion_pkt(pEndpoint, pPacket); + LOCK_HTC_RX(target); + } + + UNLOCK_HTC_RX(target); +} + +void htc_recv_init(HTC_TARGET *target) +{ + /* Initialize ctrl_response_valid to block */ + qdf_event_create(&target->ctrl_response_valid); +} + +/* polling routine to wait for a control packet to be received */ +QDF_STATUS htc_wait_recv_ctrl_message(HTC_TARGET *target) +{ +/* int count = HTC_TARGET_MAX_RESPONSE_POLL; */ + + AR_DEBUG_PRINTF(ATH_DEBUG_TRC, ("+HTCWaitCtrlMessageRecv\n")); + + /* Wait for BMI request/response transaction to complete */ + if (qdf_wait_single_event(&target->ctrl_response_valid, + HTC_CONTROL_RX_TIMEOUT)) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("Failed to receive control message\n")); + return QDF_STATUS_E_FAILURE; + } + + LOCK_HTC_RX(target); + /* caller will clear this flag */ + target->CtrlResponseProcessing = true; + + UNLOCK_HTC_RX(target); + + AR_DEBUG_PRINTF(ATH_DEBUG_TRC, ("-HTCWaitCtrlMessageRecv success\n")); + return QDF_STATUS_SUCCESS; +} + +static A_STATUS htc_process_trailer(HTC_TARGET *target, + uint8_t *pBuffer, + int Length, HTC_ENDPOINT_ID FromEndpoint) +{ + HTC_RECORD_HDR *pRecord; + uint8_t htc_rec_id; + uint8_t htc_rec_len; + uint8_t *pRecordBuf; + uint8_t *pOrigBuffer; + int origLength; + A_STATUS status; + + AR_DEBUG_PRINTF(ATH_DEBUG_RECV, + ("+htc_process_trailer (length:%d)\n", Length)); + + if (AR_DEBUG_LVL_CHECK(ATH_DEBUG_RECV)) + AR_DEBUG_PRINTBUF(pBuffer, Length, "Recv Trailer"); + + pOrigBuffer = pBuffer; + origLength = Length; + status = A_OK; + + while (Length > 0) { + + if (Length < sizeof(HTC_RECORD_HDR)) { + status = A_EPROTO; + break; + } + /* these are byte aligned structs */ + pRecord = (HTC_RECORD_HDR *) pBuffer; + Length -= sizeof(HTC_RECORD_HDR); + pBuffer += sizeof(HTC_RECORD_HDR); + + htc_rec_len = HTC_GET_FIELD(pRecord, HTC_RECORD_HDR, LENGTH); + htc_rec_id = HTC_GET_FIELD(pRecord, HTC_RECORD_HDR, RECORDID); + + if (htc_rec_len > Length) { + /* no room left in buffer for record */ + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("invalid record length: %d (id:%d) buffer has: %d bytes left\n", + htc_rec_len, htc_rec_id, Length)); + status = A_EPROTO; + break; + } + /* start of record follows the header */ + pRecordBuf = pBuffer; + + switch (htc_rec_id) { + case HTC_RECORD_CREDITS: + AR_DEBUG_ASSERT(htc_rec_len >= + sizeof(HTC_CREDIT_REPORT)); + htc_process_credit_rpt(target, + (HTC_CREDIT_REPORT *) pRecordBuf, + htc_rec_len / + (sizeof(HTC_CREDIT_REPORT)), + FromEndpoint); + break; + +#ifdef HIF_SDIO + case HTC_RECORD_LOOKAHEAD: + /* Process in HIF layer */ + break; + + case HTC_RECORD_LOOKAHEAD_BUNDLE: + /* Process in HIF layer */ + break; +#endif /* HIF_SDIO */ + + default: + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("HTC unhandled record: id:%d length:%d\n", + htc_rec_id, htc_rec_len)); + break; + } + + if (A_FAILED(status)) { + break; + } + + /* advance buffer past this record for next time around */ + pBuffer += htc_rec_len; + Length -= htc_rec_len; + } + + if (A_FAILED(status)) + debug_dump_bytes(pOrigBuffer, origLength, "BAD Recv Trailer"); + + AR_DEBUG_PRINTF(ATH_DEBUG_RECV, ("-htc_process_trailer\n")); + return status; + +} diff --git a/drivers/staging/qca-wifi-host-cmn/htc/htc_send.c b/drivers/staging/qca-wifi-host-cmn/htc/htc_send.c new file mode 100644 index 0000000000000000000000000000000000000000..85d5dd86d2fc542939f3f4d628b344869b08948e --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/htc/htc_send.c @@ -0,0 +1,2259 @@ +/* + * Copyright (c) 2013-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "htc_debug.h" +#include "htc_internal.h" +#include "htc_credit_history.h" +#include /* qdf_mem_malloc */ +#include /* qdf_nbuf_t */ +#include "qdf_module.h" + +/* #define USB_HIF_SINGLE_PIPE_DATA_SCHED */ +/* #ifdef USB_HIF_SINGLE_PIPE_DATA_SCHED */ +#define DATA_EP_SIZE 4 +/* #endif */ +#define HTC_DATA_RESOURCE_THRS 256 +#define HTC_DATA_MINDESC_PERPACKET 2 + +enum HTC_SEND_QUEUE_RESULT { + HTC_SEND_QUEUE_OK = 0, /* packet was queued */ + HTC_SEND_QUEUE_DROP = 1, /* this packet should be dropped */ +}; + +#ifndef DEBUG_CREDIT +#define DEBUG_CREDIT 0 +#endif + +#if DEBUG_CREDIT +/* bit mask to enable debug certain endpoint */ +static unsigned int ep_debug_mask = + (1 << ENDPOINT_0) | (1 << ENDPOINT_1) | (1 << ENDPOINT_2); +#endif + +#ifdef QCA_WIFI_NAPIER_EMULATION +#define HTC_EMULATION_DELAY_IN_MS 20 +/** + * htc_add_delay(): Adds a delay in before proceeding, only for emulation + * + * Return: None + */ +static inline void htc_add_emulation_delay(void) +{ + qdf_mdelay(HTC_EMULATION_DELAY_IN_MS); +} +#else +static inline void htc_add_emulation_delay(void) +{ +} +#endif + +void htc_dump_counter_info(HTC_HANDLE HTCHandle) +{ +#ifdef WLAN_DEBUG + HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(HTCHandle); +#endif + + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("\n%s: ce_send_cnt = %d, TX_comp_cnt = %d\n", + __func__, target->ce_send_cnt, target->TX_comp_cnt)); +} + +int htc_get_tx_queue_depth(HTC_HANDLE *htc_handle, HTC_ENDPOINT_ID endpoint_id) +{ + HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(htc_handle); + HTC_ENDPOINT *endpoint = &target->endpoint[endpoint_id]; + + return HTC_PACKET_QUEUE_DEPTH(&endpoint->TxQueue); +} +qdf_export_symbol(htc_get_tx_queue_depth); + +void htc_get_control_endpoint_tx_host_credits(HTC_HANDLE HTCHandle, + int *credits) +{ + HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(HTCHandle); + HTC_ENDPOINT *pEndpoint; + int i; + + if (!credits || !target) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: invalid args", __func__)); + return; + } + + *credits = 0; + LOCK_HTC_TX(target); + for (i = 0; i < ENDPOINT_MAX; i++) { + pEndpoint = &target->endpoint[i]; + if (pEndpoint->service_id == WMI_CONTROL_SVC) { + *credits = pEndpoint->TxCredits; + break; + } + } + UNLOCK_HTC_TX(target); +} + +static inline void restore_tx_packet(HTC_TARGET *target, HTC_PACKET *pPacket) +{ + qdf_nbuf_t netbuf = GET_HTC_PACKET_NET_BUF_CONTEXT(pPacket); + + if (pPacket->PktInfo.AsTx.Flags & HTC_TX_PACKET_FLAG_FIXUP_NETBUF) { + qdf_nbuf_unmap(target->osdev, netbuf, QDF_DMA_TO_DEVICE); + pPacket->PktInfo.AsTx.Flags &= ~HTC_TX_PACKET_FLAG_FIXUP_NETBUF; + } + + qdf_nbuf_pull_head(netbuf, sizeof(HTC_FRAME_HDR)); +} + +static void send_packet_completion(HTC_TARGET *target, HTC_PACKET *pPacket) +{ + HTC_ENDPOINT *pEndpoint = &target->endpoint[pPacket->Endpoint]; + HTC_EP_SEND_PKT_COMPLETE EpTxComplete; + + restore_tx_packet(target, pPacket); + + /* do completion */ + AR_DEBUG_PRINTF(ATH_DEBUG_SEND, + ("HTC calling ep %d send complete callback on packet %pK\n", + pEndpoint->Id, pPacket)); + + EpTxComplete = pEndpoint->EpCallBacks.EpTxComplete; + if (EpTxComplete != NULL) + EpTxComplete(pEndpoint->EpCallBacks.pContext, pPacket); + else + qdf_nbuf_free(pPacket->pPktContext); + + +} + +void htc_send_complete_check_cleanup(void *context) +{ + HTC_ENDPOINT *pEndpoint = (HTC_ENDPOINT *) context; + + htc_send_complete_check(pEndpoint, 1); +} + +HTC_PACKET *allocate_htc_bundle_packet(HTC_TARGET *target) +{ + HTC_PACKET *pPacket; + HTC_PACKET_QUEUE *pQueueSave; + qdf_nbuf_t netbuf; + + LOCK_HTC_TX(target); + if (NULL == target->pBundleFreeList) { + UNLOCK_HTC_TX(target); + netbuf = qdf_nbuf_alloc(NULL, + target->MaxMsgsPerHTCBundle * + target->TargetCreditSize, 0, 4, false); + AR_DEBUG_ASSERT(netbuf); + if (!netbuf) + return NULL; + pPacket = qdf_mem_malloc(sizeof(HTC_PACKET)); + AR_DEBUG_ASSERT(pPacket); + if (!pPacket) { + qdf_nbuf_free(netbuf); + return NULL; + } + pQueueSave = qdf_mem_malloc(sizeof(HTC_PACKET_QUEUE)); + AR_DEBUG_ASSERT(pQueueSave); + if (!pQueueSave) { + qdf_nbuf_free(netbuf); + qdf_mem_free(pPacket); + return NULL; + } + INIT_HTC_PACKET_QUEUE(pQueueSave); + pPacket->pContext = pQueueSave; + SET_HTC_PACKET_NET_BUF_CONTEXT(pPacket, netbuf); + pPacket->pBuffer = qdf_nbuf_data(netbuf); + pPacket->BufferLength = qdf_nbuf_len(netbuf); + + /* store the original head room so that we can restore this + * when we "free" the packet. + * free packet puts the packet back on the free list + */ + pPacket->netbufOrigHeadRoom = qdf_nbuf_headroom(netbuf); + return pPacket; + } + /* already done malloc - restore from free list */ + pPacket = target->pBundleFreeList; + AR_DEBUG_ASSERT(pPacket); + if (!pPacket) { + UNLOCK_HTC_TX(target); + return NULL; + } + target->pBundleFreeList = (HTC_PACKET *) pPacket->ListLink.pNext; + UNLOCK_HTC_TX(target); + pPacket->ListLink.pNext = NULL; + + return pPacket; +} + +void free_htc_bundle_packet(HTC_TARGET *target, HTC_PACKET *pPacket) +{ + uint32_t curentHeadRoom; + qdf_nbuf_t netbuf; + HTC_PACKET_QUEUE *pQueueSave; + + netbuf = GET_HTC_PACKET_NET_BUF_CONTEXT(pPacket); + AR_DEBUG_ASSERT(netbuf); + if (!netbuf) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("\n%s: Invalid netbuf in HTC Packet\n", + __func__)); + return; + } + /* HIF adds data to the headroom section of the nbuf, restore thei + * original size. If this is not done, headroom keeps shrinking with + * every HIF send and eventually HIF ends up doing another malloc big + * enough to store the data + its header + */ + + curentHeadRoom = qdf_nbuf_headroom(netbuf); + qdf_nbuf_pull_head(netbuf, + pPacket->netbufOrigHeadRoom - curentHeadRoom); + qdf_nbuf_trim_tail(netbuf, qdf_nbuf_len(netbuf)); + + /* restore the pBuffer pointer. HIF changes this */ + pPacket->pBuffer = qdf_nbuf_data(netbuf); + pPacket->BufferLength = qdf_nbuf_len(netbuf); + + /* restore queue */ + pQueueSave = (HTC_PACKET_QUEUE *) pPacket->pContext; + if (qdf_unlikely(!pQueueSave)) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("\n%s: Invalid pQueueSave in HTC Packet\n", + __func__)); + AR_DEBUG_ASSERT(pQueueSave); + } else + INIT_HTC_PACKET_QUEUE(pQueueSave); + + LOCK_HTC_TX(target); + if (target->pBundleFreeList == NULL) { + target->pBundleFreeList = pPacket; + pPacket->ListLink.pNext = NULL; + } else { + pPacket->ListLink.pNext = (DL_LIST *) target->pBundleFreeList; + target->pBundleFreeList = pPacket; + } + UNLOCK_HTC_TX(target); +} + +#if defined(DEBUG_HL_LOGGING) && defined(CONFIG_HL_SUPPORT) + +/** + * htc_send_update_tx_bundle_stats() - update tx bundle stats depends + * on max bundle size + * @target: hif context + * @data_len: tx data len + * @TxCreditSize: endpoint tx credit size + * + * Return: None + */ +static inline void +htc_send_update_tx_bundle_stats(HTC_TARGET *target, + qdf_size_t data_len, + int TxCreditSize) +{ + if ((data_len / TxCreditSize) <= HTC_MAX_MSG_PER_BUNDLE_TX) + target->tx_bundle_stats[(data_len / TxCreditSize) - 1]++; +} + +/** + * htc_issue_tx_bundle_stats_inc() - increment in tx bundle stats + * on max bundle size + * @target: hif context + * + * Return: None + */ +static inline void +htc_issue_tx_bundle_stats_inc(HTC_TARGET *target) +{ + target->tx_bundle_stats[0]++; +} +#else + +static inline void +htc_send_update_tx_bundle_stats(HTC_TARGET *target, + qdf_size_t data_len, + int TxCreditSize) +{ +} + +static inline void +htc_issue_tx_bundle_stats_inc(HTC_TARGET *target) +{ +} +#endif + +#if defined(HIF_USB) || defined(HIF_SDIO) +#ifdef ENABLE_BUNDLE_TX +static QDF_STATUS htc_send_bundled_netbuf(HTC_TARGET *target, + HTC_ENDPOINT *pEndpoint, + unsigned char *pBundleBuffer, + HTC_PACKET *pPacketTx) +{ + qdf_size_t data_len; + QDF_STATUS status; + qdf_nbuf_t bundleBuf; + uint32_t data_attr = 0; + + bundleBuf = GET_HTC_PACKET_NET_BUF_CONTEXT(pPacketTx); + data_len = pBundleBuffer - qdf_nbuf_data(bundleBuf); + qdf_nbuf_put_tail(bundleBuf, data_len); + SET_HTC_PACKET_INFO_TX(pPacketTx, + target, + pBundleBuffer, + data_len, + pEndpoint->Id, HTC_TX_PACKET_TAG_BUNDLED); + LOCK_HTC_TX(target); + HTC_PACKET_ENQUEUE(&pEndpoint->TxLookupQueue, pPacketTx); + pEndpoint->ul_outstanding_cnt++; + UNLOCK_HTC_TX(target); +#if DEBUG_BUNDLE + qdf_print(" Send bundle EP%d buffer size:0x%x, total:0x%x, count:%d.\n", + pEndpoint->Id, + pEndpoint->TxCreditSize, + data_len, data_len / pEndpoint->TxCreditSize); +#endif + + htc_send_update_tx_bundle_stats(target, data_len, + pEndpoint->TxCreditSize); + + status = hif_send_head(target->hif_dev, + pEndpoint->UL_PipeID, + pEndpoint->Id, data_len, + bundleBuf, data_attr); + if (status != QDF_STATUS_SUCCESS) { + qdf_print("%s:hif_send_head failed(len=%zu).\n", __func__, + data_len); + } + return status; +} + +/** + * htc_issue_packets_bundle() - HTC function to send bundle packets from a queue + * @target: HTC target on which packets need to be sent + * @pEndpoint: logical endpoint on which packets needs to be sent + * @pPktQueue: HTC packet queue containing the list of packets to be sent + * + * Return: void + */ +static void htc_issue_packets_bundle(HTC_TARGET *target, + HTC_ENDPOINT *pEndpoint, + HTC_PACKET_QUEUE *pPktQueue) +{ + int i, frag_count, nbytes; + qdf_nbuf_t netbuf, bundleBuf; + unsigned char *pBundleBuffer = NULL; + HTC_PACKET *pPacket = NULL, *pPacketTx = NULL; + HTC_FRAME_HDR *pHtcHdr; + int last_credit_pad = 0; + int creditPad, creditRemainder, transferLength, bundlesSpaceRemaining = + 0; + HTC_PACKET_QUEUE *pQueueSave = NULL; + + bundlesSpaceRemaining = + target->MaxMsgsPerHTCBundle * pEndpoint->TxCreditSize; + pPacketTx = allocate_htc_bundle_packet(target); + if (!pPacketTx) { + /* good time to panic */ + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("allocate_htc_bundle_packet failed\n")); + AR_DEBUG_ASSERT(false); + return; + } + bundleBuf = GET_HTC_PACKET_NET_BUF_CONTEXT(pPacketTx); + pBundleBuffer = qdf_nbuf_data(bundleBuf); + pQueueSave = (HTC_PACKET_QUEUE *) pPacketTx->pContext; + while (1) { + pPacket = htc_packet_dequeue(pPktQueue); + if (pPacket == NULL) + break; + creditPad = 0; + transferLength = pPacket->ActualLength + HTC_HDR_LENGTH; + creditRemainder = transferLength % pEndpoint->TxCreditSize; + if (creditRemainder != 0) { + if (transferLength < pEndpoint->TxCreditSize) { + creditPad = pEndpoint->TxCreditSize - + transferLength; + } else { + creditPad = creditRemainder; + } + transferLength += creditPad; + } + + if (bundlesSpaceRemaining < transferLength) { + /* send out previous buffer */ + htc_send_bundled_netbuf(target, pEndpoint, + pBundleBuffer - last_credit_pad, + pPacketTx); + /* One packet has been dequeued from sending queue when enter + * this loop, so need to add 1 back for this checking. + */ + if ((HTC_PACKET_QUEUE_DEPTH(pPktQueue) + 1) < + HTC_MIN_MSG_PER_BUNDLE) { + HTC_PACKET_ENQUEUE_TO_HEAD(pPktQueue, pPacket); + return; + } + bundlesSpaceRemaining = + target->MaxMsgsPerHTCBundle * + pEndpoint->TxCreditSize; + pPacketTx = allocate_htc_bundle_packet(target); + if (!pPacketTx) { + HTC_PACKET_ENQUEUE_TO_HEAD(pPktQueue, pPacket); + /* good time to panic */ + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("allocate_htc_bundle_packet failed\n")); + AR_DEBUG_ASSERT(false); + return; + } + bundleBuf = GET_HTC_PACKET_NET_BUF_CONTEXT(pPacketTx); + pBundleBuffer = qdf_nbuf_data(bundleBuf); + pQueueSave = (HTC_PACKET_QUEUE *) pPacketTx->pContext; + } + + bundlesSpaceRemaining -= transferLength; + netbuf = GET_HTC_PACKET_NET_BUF_CONTEXT(pPacket); + + if (hif_get_bus_type(target->hif_dev) != QDF_BUS_TYPE_USB) { + pHtcHdr = (HTC_FRAME_HDR *)qdf_nbuf_get_frag_vaddr( + netbuf, 0); + HTC_WRITE32(pHtcHdr, + SM(pPacket->ActualLength, + HTC_FRAME_HDR_PAYLOADLEN) | + SM(pPacket->PktInfo.AsTx.SendFlags | + HTC_FLAGS_SEND_BUNDLE, + HTC_FRAME_HDR_FLAGS) | + SM(pPacket->Endpoint, + HTC_FRAME_HDR_ENDPOINTID)); + HTC_WRITE32((uint32_t *) pHtcHdr + 1, + SM(pPacket->PktInfo.AsTx.SeqNo, + HTC_FRAME_HDR_CONTROLBYTES1) | SM(creditPad, + HTC_FRAME_HDR_RESERVED)); + pHtcHdr->reserved = creditPad; + } + frag_count = qdf_nbuf_get_num_frags(netbuf); + nbytes = pPacket->ActualLength + HTC_HDR_LENGTH; + for (i = 0; i < frag_count && nbytes > 0; i++) { + int frag_len = qdf_nbuf_get_frag_len(netbuf, i); + unsigned char *frag_addr = + qdf_nbuf_get_frag_vaddr(netbuf, i); + if (frag_len > nbytes) + frag_len = nbytes; + qdf_mem_copy(pBundleBuffer, frag_addr, frag_len); + nbytes -= frag_len; + pBundleBuffer += frag_len; + } + HTC_PACKET_ENQUEUE(pQueueSave, pPacket); + pBundleBuffer += creditPad; + + /* last one can't be packed. */ + if (hif_get_bus_type(target->hif_dev) == QDF_BUS_TYPE_USB) + last_credit_pad = creditPad; + } + /* send out remaining buffer */ + if (pBundleBuffer != qdf_nbuf_data(bundleBuf)) + htc_send_bundled_netbuf(target, pEndpoint, + pBundleBuffer - last_credit_pad, + pPacketTx); + else + free_htc_bundle_packet(target, pPacketTx); +} +#endif /* ENABLE_BUNDLE_TX */ +#else +static void htc_issue_packets_bundle(HTC_TARGET *target, + HTC_ENDPOINT *pEndpoint, + HTC_PACKET_QUEUE *pPktQueue) +{ +} +#endif + +/** + * htc_issue_packets() - HTC function to send packets from a queue + * @target: HTC target on which packets need to be sent + * @pEndpoint: logical endpoint on which packets needs to be sent + * @pPktQueue: HTC packet queue containing the list of packets to be sent + * + * Return: QDF_STATUS_SUCCESS on success and error QDF status on failure + */ +static QDF_STATUS htc_issue_packets(HTC_TARGET *target, + HTC_ENDPOINT *pEndpoint, + HTC_PACKET_QUEUE *pPktQueue) +{ + QDF_STATUS status = QDF_STATUS_SUCCESS; + qdf_nbuf_t netbuf; + HTC_PACKET *pPacket = NULL; + uint16_t payloadLen; + HTC_FRAME_HDR *pHtcHdr; + uint32_t data_attr = 0; + enum qdf_bus_type bus_type; + QDF_STATUS ret; + bool rt_put = false; + + bus_type = hif_get_bus_type(target->hif_dev); + + AR_DEBUG_PRINTF(ATH_DEBUG_SEND, + ("+htc_issue_packets: Queue: %pK, Pkts %d\n", pPktQueue, + HTC_PACKET_QUEUE_DEPTH(pPktQueue))); + while (true) { + if (HTC_TX_BUNDLE_ENABLED(target) && + HTC_PACKET_QUEUE_DEPTH(pPktQueue) >= + HTC_MIN_MSG_PER_BUNDLE) { + switch (bus_type) { + case QDF_BUS_TYPE_SDIO: + if (!IS_TX_CREDIT_FLOW_ENABLED(pEndpoint)) + break; + case QDF_BUS_TYPE_USB: + htc_issue_packets_bundle(target, + pEndpoint, + pPktQueue); + break; + default: + break; + } + } + /* if not bundling or there was a packet that could not be + * placed in a bundle, and send it by normal way + */ + pPacket = htc_packet_dequeue(pPktQueue); + if (NULL == pPacket) { + /* local queue is fully drained */ + break; + } + + netbuf = GET_HTC_PACKET_NET_BUF_CONTEXT(pPacket); + AR_DEBUG_ASSERT(netbuf); + /* Non-credit enabled endpoints have been mapped and setup by + * now, so no need to revisit the HTC headers + */ + if (IS_TX_CREDIT_FLOW_ENABLED(pEndpoint)) { + + payloadLen = pPacket->ActualLength; + /* setup HTC frame header */ + + pHtcHdr = (HTC_FRAME_HDR *) + qdf_nbuf_get_frag_vaddr(netbuf, 0); + if (qdf_unlikely(!pHtcHdr)) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("%s Invalid pHtcHdr\n", + __func__)); + AR_DEBUG_ASSERT(pHtcHdr); + status = QDF_STATUS_E_FAILURE; + break; + } + + HTC_WRITE32(pHtcHdr, + SM(payloadLen, + HTC_FRAME_HDR_PAYLOADLEN) | + SM(pPacket->PktInfo.AsTx.SendFlags, + HTC_FRAME_HDR_FLAGS) | + SM(pPacket->Endpoint, + HTC_FRAME_HDR_ENDPOINTID)); + HTC_WRITE32(((uint32_t *) pHtcHdr) + 1, + SM(pPacket->PktInfo.AsTx.SeqNo, + HTC_FRAME_HDR_CONTROLBYTES1)); + + /* + * Now that the HTC frame header has been added, the + * netbuf can be mapped. This only applies to non-data + * frames, since data frames were already mapped as they + * entered into the driver. + */ + pPacket->PktInfo.AsTx.Flags |= + HTC_TX_PACKET_FLAG_FIXUP_NETBUF; + + ret = qdf_nbuf_map(target->osdev, + GET_HTC_PACKET_NET_BUF_CONTEXT(pPacket), + QDF_DMA_TO_DEVICE); + if (ret != QDF_STATUS_SUCCESS) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("%s nbuf Map Fail Endpnt %pK\n", + __func__, pEndpoint)); + HTC_PACKET_ENQUEUE_TO_HEAD(pPktQueue, pPacket); + status = QDF_STATUS_E_FAILURE; + break; + } + } + + if (!pEndpoint->async_update) { + LOCK_HTC_TX(target); + } + /* store in look up queue to match completions */ + HTC_PACKET_ENQUEUE(&pEndpoint->TxLookupQueue, pPacket); + INC_HTC_EP_STAT(pEndpoint, TxIssued, 1); + pEndpoint->ul_outstanding_cnt++; + if (!pEndpoint->async_update) { + UNLOCK_HTC_TX(target); + hif_send_complete_check(target->hif_dev, + pEndpoint->UL_PipeID, false); + } + + htc_packet_set_magic_cookie(pPacket, HTC_PACKET_MAGIC_COOKIE); + /* + * For HTT messages without a response from fw, + * do the runtime put here. + * otherwise runtime put will be done when the fw response comes + */ + if (pPacket->PktInfo.AsTx.Tag == HTC_TX_PACKET_TAG_RUNTIME_PUT) + rt_put = true; +#if DEBUG_BUNDLE + qdf_print(" Send single EP%d buffer size:0x%x, total:0x%x.\n", + pEndpoint->Id, + pEndpoint->TxCreditSize, + HTC_HDR_LENGTH + pPacket->ActualLength); +#endif + status = hif_send_head(target->hif_dev, + pEndpoint->UL_PipeID, pEndpoint->Id, + HTC_HDR_LENGTH + pPacket->ActualLength, + netbuf, data_attr); + + htc_issue_tx_bundle_stats_inc(target); + + target->ce_send_cnt++; + + if (qdf_unlikely(QDF_IS_STATUS_ERROR(status))) { + if (status != QDF_STATUS_E_RESOURCES) { + /* TODO : if more than 1 endpoint maps to the + * same PipeID it is possible to run out of + * resources in the HIF layer. Don't emit the + * error + */ + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("hif_send Failed status:%d\n", + status)); + } + + /* only unmap if we mapped in this function */ + if (IS_TX_CREDIT_FLOW_ENABLED(pEndpoint)) + qdf_nbuf_unmap(target->osdev, + GET_HTC_PACKET_NET_BUF_CONTEXT(pPacket), + QDF_DMA_TO_DEVICE); + + if (!pEndpoint->async_update) { + LOCK_HTC_TX(target); + } + target->ce_send_cnt--; + pEndpoint->ul_outstanding_cnt--; + HTC_PACKET_REMOVE(&pEndpoint->TxLookupQueue, pPacket); + /* reclaim credits */ + pEndpoint->TxCredits += + pPacket->PktInfo.AsTx.CreditsUsed; + htc_packet_set_magic_cookie(pPacket, 0); + /* put it back into the callers queue */ + HTC_PACKET_ENQUEUE_TO_HEAD(pPktQueue, pPacket); + if (!pEndpoint->async_update) { + UNLOCK_HTC_TX(target); + } + break; + } + if (rt_put) { + hif_pm_runtime_put(target->hif_dev); + rt_put = false; + } + } + if (qdf_unlikely(QDF_IS_STATUS_ERROR(status))) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("htc_issue_packets, failed pkt:0x%pK status:%d", + pPacket, status)); + } + + AR_DEBUG_PRINTF(ATH_DEBUG_SEND, ("-htc_issue_packets\n")); + + return status; +} + +#ifdef FEATURE_RUNTIME_PM +/** + * extract_htc_pm_packets(): move pm packets from endpoint into queue + * @endpoint: which enpoint to extract packets from + * @queue: a queue to store extracted packets in. + * + * remove pm packets from the endpoint's tx queue. + * queue them into a queue + */ +static void extract_htc_pm_packets(HTC_ENDPOINT *endpoint, + HTC_PACKET_QUEUE *queue) +{ + HTC_PACKET *packet; + + /* only WMI endpoint has power management packets */ + if (endpoint->service_id != WMI_CONTROL_SVC) + return; + + ITERATE_OVER_LIST_ALLOW_REMOVE(&endpoint->TxQueue.QueueHead, packet, + HTC_PACKET, ListLink) { + if (packet->PktInfo.AsTx.Tag == HTC_TX_PACKET_TAG_AUTO_PM) { + HTC_PACKET_REMOVE(&endpoint->TxQueue, packet); + HTC_PACKET_ENQUEUE(queue, packet); + } + } ITERATE_END +} + +/** + * queue_htc_pm_packets(): queue pm packets with priority + * @endpoint: enpoint to queue packets to + * @queue: queue of pm packets to enque + * + * suspend resume packets get special treatment & priority. + * need to queue them at the front of the queue. + */ +static void queue_htc_pm_packets(HTC_ENDPOINT *endpoint, + HTC_PACKET_QUEUE *queue) +{ + if (endpoint->service_id != WMI_CONTROL_SVC) + return; + + HTC_PACKET_QUEUE_TRANSFER_TO_HEAD(&endpoint->TxQueue, queue); +} +#else +static void extract_htc_pm_packets(HTC_ENDPOINT *endpoint, + HTC_PACKET_QUEUE *queue) +{} + +static void queue_htc_pm_packets(HTC_ENDPOINT *endpoint, + HTC_PACKET_QUEUE *queue) +{} +#endif + +/** + * get_htc_send_packets_credit_based() - get packets based on available credits + * @target: HTC target on which packets need to be sent + * @pEndpoint: logical endpoint on which packets needs to be sent + * @pQueue: HTC packet queue containing the list of packets to be sent + * + * Get HTC send packets from TX queue on an endpoint based on available credits. + * The function moves the packets from TX queue of the endpoint to pQueue. + * + * Return: None + */ +static void get_htc_send_packets_credit_based(HTC_TARGET *target, + HTC_ENDPOINT *pEndpoint, + HTC_PACKET_QUEUE *pQueue) +{ + int creditsRequired; + int remainder; + uint8_t sendFlags; + HTC_PACKET *pPacket; + unsigned int transferLength; + HTC_PACKET_QUEUE *tx_queue; + HTC_PACKET_QUEUE pm_queue; + bool do_pm_get = false; + + /*** NOTE : the TX lock is held when this function is called ***/ + AR_DEBUG_PRINTF(ATH_DEBUG_SEND, + ("+get_htc_send_packets_credit_based\n")); + + INIT_HTC_PACKET_QUEUE(&pm_queue); + extract_htc_pm_packets(pEndpoint, &pm_queue); + if (HTC_QUEUE_EMPTY(&pm_queue)) { + tx_queue = &pEndpoint->TxQueue; + do_pm_get = true; + } else { + tx_queue = &pm_queue; + } + + /* loop until we can grab as many packets out of the queue as we can */ + while (true) { + if (do_pm_get && hif_pm_runtime_get(target->hif_dev)) { + /* bus suspended, runtime resume issued */ + QDF_ASSERT(HTC_PACKET_QUEUE_DEPTH(pQueue) == 0); + break; + } + + sendFlags = 0; + /* get packet at head, but don't remove it */ + pPacket = htc_get_pkt_at_head(tx_queue); + if (pPacket == NULL) { + if (do_pm_get) + hif_pm_runtime_put(target->hif_dev); + break; + } + + AR_DEBUG_PRINTF(ATH_DEBUG_SEND, + (" Got head packet:%pK , Queue Depth: %d\n", + pPacket, + HTC_PACKET_QUEUE_DEPTH(tx_queue))); + + transferLength = pPacket->ActualLength + HTC_HDR_LENGTH; + + if (transferLength <= pEndpoint->TxCreditSize) { + creditsRequired = 1; + } else { + /* figure out how many credits this message requires */ + creditsRequired = + transferLength / pEndpoint->TxCreditSize; + remainder = transferLength % pEndpoint->TxCreditSize; + + if (remainder) + creditsRequired++; + } + + AR_DEBUG_PRINTF(ATH_DEBUG_SEND, + (" Credits Required:%d Got:%d\n", + creditsRequired, pEndpoint->TxCredits)); + + if (pEndpoint->Id == ENDPOINT_0) { + /* + * endpoint 0 is special, it always has a credit and + * does not require credit based flow control + */ + creditsRequired = 0; + } else { + + if (pEndpoint->TxCredits < creditsRequired) { +#if DEBUG_CREDIT + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("EP%d,No Credit now.%d < %d\n", + pEndpoint->Id, + pEndpoint->TxCredits, + creditsRequired)); +#endif + if (do_pm_get) + hif_pm_runtime_put(target->hif_dev); + break; + } + + pEndpoint->TxCredits -= creditsRequired; + INC_HTC_EP_STAT(pEndpoint, TxCreditsConsummed, + creditsRequired); + + /* check if we need credits back from the target */ + if (pEndpoint->TxCredits <= + pEndpoint->TxCreditsPerMaxMsg) { + /* tell the target we need credits ASAP! */ + sendFlags |= HTC_FLAGS_NEED_CREDIT_UPDATE; + if (pEndpoint->service_id == WMI_CONTROL_SVC) { + htc_credit_record(HTC_REQUEST_CREDIT, + pEndpoint->TxCredits, + HTC_PACKET_QUEUE_DEPTH + (tx_queue)); + } + INC_HTC_EP_STAT(pEndpoint, + TxCreditLowIndications, 1); +#if DEBUG_CREDIT + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + (" EP%d Needs Credits\n", + pEndpoint->Id)); +#endif + } + } + + /* now we can fully dequeue */ + pPacket = htc_packet_dequeue(tx_queue); + if (pPacket) { + /* save the number of credits this packet consumed */ + pPacket->PktInfo.AsTx.CreditsUsed = creditsRequired; + /* save send flags */ + pPacket->PktInfo.AsTx.SendFlags = sendFlags; + + /* queue this packet into the caller's queue */ + HTC_PACKET_ENQUEUE(pQueue, pPacket); + } + } + + if (!HTC_QUEUE_EMPTY(&pm_queue)) + queue_htc_pm_packets(pEndpoint, &pm_queue); + + AR_DEBUG_PRINTF(ATH_DEBUG_SEND, + ("-get_htc_send_packets_credit_based\n")); + +} + +static void get_htc_send_packets(HTC_TARGET *target, + HTC_ENDPOINT *pEndpoint, + HTC_PACKET_QUEUE *pQueue, int Resources) +{ + + HTC_PACKET *pPacket; + HTC_PACKET_QUEUE *tx_queue; + HTC_PACKET_QUEUE pm_queue; + bool do_pm_get = false; + + /*** NOTE : the TX lock is held when this function is called ***/ + AR_DEBUG_PRINTF(ATH_DEBUG_SEND, + ("+get_htc_send_packets %d resources\n", Resources)); + + INIT_HTC_PACKET_QUEUE(&pm_queue); + extract_htc_pm_packets(pEndpoint, &pm_queue); + if (HTC_QUEUE_EMPTY(&pm_queue)) { + tx_queue = &pEndpoint->TxQueue; + do_pm_get = true; + } else { + tx_queue = &pm_queue; + } + + /* loop until we can grab as many packets out of the queue as we can */ + while (Resources > 0) { + int num_frags; + + if (do_pm_get && hif_pm_runtime_get(target->hif_dev)) { + /* bus suspended, runtime resume issued */ + QDF_ASSERT(HTC_PACKET_QUEUE_DEPTH(pQueue) == 0); + break; + } + + pPacket = htc_packet_dequeue(tx_queue); + if (pPacket == NULL) { + if (do_pm_get) + hif_pm_runtime_put(target->hif_dev); + break; + } + AR_DEBUG_PRINTF(ATH_DEBUG_SEND, + (" Got packet:%pK , New Queue Depth: %d\n", + pPacket, + HTC_PACKET_QUEUE_DEPTH(tx_queue))); + /* For non-credit path the sequence number is already embedded + * in the constructed HTC header + */ + pPacket->PktInfo.AsTx.SendFlags = 0; + pPacket->PktInfo.AsTx.CreditsUsed = 0; + /* queue this packet into the caller's queue */ + HTC_PACKET_ENQUEUE(pQueue, pPacket); + + /* + * FIX THIS: + * For now, avoid calling qdf_nbuf_get_num_frags before calling + * qdf_nbuf_map, because the MacOS version of qdf_nbuf_t doesn't + * support qdf_nbuf_get_num_frags until after qdf_nbuf_map has + * been done. + * Assume that the non-data netbufs, i.e. WMI message netbufs, + * consist of a single fragment. + */ + /* WMI messages are in a single-fragment network buf */ + num_frags = + (pPacket->PktInfo.AsTx. + Flags & HTC_TX_PACKET_FLAG_FIXUP_NETBUF) ? 1 : + qdf_nbuf_get_num_frags(GET_HTC_PACKET_NET_BUF_CONTEXT + (pPacket)); + Resources -= num_frags; + } + + if (!HTC_QUEUE_EMPTY(&pm_queue)) + queue_htc_pm_packets(pEndpoint, &pm_queue); + + AR_DEBUG_PRINTF(ATH_DEBUG_SEND, ("-get_htc_send_packets\n")); + +} + +/** + * htc_try_send() - Send packets in a queue on an endpoint + * @target: HTC target on which packets need to be sent + * @pEndpoint: logical endpoint on which packets needs to be sent + * @pCallersSendQueue: packet queue containing the list of packets to be sent + * + * Return: enum HTC_SEND_QUEUE_RESULT indicates whether the packet was queued to + * be sent or the packet should be dropped by the upper layer + */ +static enum HTC_SEND_QUEUE_RESULT htc_try_send(HTC_TARGET *target, + HTC_ENDPOINT *pEndpoint, + HTC_PACKET_QUEUE *pCallersSendQueue) +{ + /* temp queue to hold packets at various stages */ + HTC_PACKET_QUEUE sendQueue; + HTC_PACKET *pPacket; + int tx_resources; + int overflow; + enum HTC_SEND_QUEUE_RESULT result = HTC_SEND_QUEUE_OK; + + AR_DEBUG_PRINTF(ATH_DEBUG_SEND, ("+htc_try_send (Queue:%pK Depth:%d)\n", + pCallersSendQueue, + (pCallersSendQueue == + NULL) ? 0 : + HTC_PACKET_QUEUE_DEPTH + (pCallersSendQueue))); + + /* init the local send queue */ + INIT_HTC_PACKET_QUEUE(&sendQueue); + + do { + + /* caller didn't provide a queue, just wants us to check + * queues and send + */ + if (pCallersSendQueue == NULL) + break; + + if (HTC_QUEUE_EMPTY(pCallersSendQueue)) { + /* empty queue */ + OL_ATH_HTC_PKT_ERROR_COUNT_INCR(target, + HTC_PKT_Q_EMPTY); + result = HTC_SEND_QUEUE_DROP; + break; + } + + if (HTC_PACKET_QUEUE_DEPTH(&pEndpoint->TxQueue) >= + pEndpoint->MaxTxQueueDepth) { + /* we've already overflowed */ + overflow = HTC_PACKET_QUEUE_DEPTH(pCallersSendQueue); + } else { + /* figure out how much we will overflow by */ + overflow = HTC_PACKET_QUEUE_DEPTH(&pEndpoint->TxQueue); + overflow += HTC_PACKET_QUEUE_DEPTH(pCallersSendQueue); + /* get how much we will overflow the TX queue by */ + overflow -= pEndpoint->MaxTxQueueDepth; + } + + /* if overflow is negative or zero, we are okay */ + if (overflow > 0) { + AR_DEBUG_PRINTF(ATH_DEBUG_SEND, + ("Endpoint %d, TX queue will overflow :%d , Tx Depth:%d, Max:%d\n", + pEndpoint->Id, overflow, + HTC_PACKET_QUEUE_DEPTH(&pEndpoint-> + TxQueue), + pEndpoint->MaxTxQueueDepth)); + } + if ((overflow <= 0) + || (pEndpoint->EpCallBacks.EpSendFull == NULL)) { + /* all packets will fit or caller did not provide send + * full indication handler + * just move all of them to local sendQueue object + */ + HTC_PACKET_QUEUE_TRANSFER_TO_TAIL(&sendQueue, + pCallersSendQueue); + } else { + int i; + int goodPkts = + HTC_PACKET_QUEUE_DEPTH(pCallersSendQueue) - + overflow; + + A_ASSERT(goodPkts >= 0); + /* we have overflowed and callback is provided. Dequeue + * all non-overflow packets into the sendqueue + */ + for (i = 0; i < goodPkts; i++) { + /* pop off caller's queue */ + pPacket = htc_packet_dequeue(pCallersSendQueue); + A_ASSERT(pPacket != NULL); + if (pPacket) + /* insert into local queue */ + HTC_PACKET_ENQUEUE(&sendQueue, + pPacket); + } + + /* the caller's queue has all the packets that won't fit + * walk through the caller's queue and indicate each one + * to the send full handler + */ + ITERATE_OVER_LIST_ALLOW_REMOVE(&pCallersSendQueue-> + QueueHead, pPacket, + HTC_PACKET, ListLink) { + + AR_DEBUG_PRINTF(ATH_DEBUG_SEND, + ("Indicating overflowed TX packet: %pK\n", + pPacket)); + /* + * Remove headroom reserved for HTC_FRAME_HDR + * before giving the packet back to the user via + * the EpSendFull callback. + */ + restore_tx_packet(target, pPacket); + + if (pEndpoint->EpCallBacks. + EpSendFull(pEndpoint->EpCallBacks.pContext, + pPacket) == HTC_SEND_FULL_DROP) { + /* callback wants the packet dropped */ + INC_HTC_EP_STAT(pEndpoint, TxDropped, + 1); + /* leave this one in the caller's queue + * for cleanup + */ + } else { + /* callback wants to keep this packet, + * remove from caller's queue + */ + HTC_PACKET_REMOVE(pCallersSendQueue, + pPacket); + /* put it in the send queue + * add HTC_FRAME_HDR space reservation + * again + */ + qdf_nbuf_push_head + (GET_HTC_PACKET_NET_BUF_CONTEXT + (pPacket), + sizeof(HTC_FRAME_HDR)); + + HTC_PACKET_ENQUEUE(&sendQueue, pPacket); + } + + } + ITERATE_END; + + if (HTC_QUEUE_EMPTY(&sendQueue)) { + /* no packets made it in, caller will cleanup */ + OL_ATH_HTC_PKT_ERROR_COUNT_INCR(target, + HTC_SEND_Q_EMPTY); + result = HTC_SEND_QUEUE_DROP; + break; + } + } + + } while (false); + + if (result != HTC_SEND_QUEUE_OK) { + AR_DEBUG_PRINTF(ATH_DEBUG_SEND, ("-htc_try_send: %d\n", + result)); + return result; + } + + if (!IS_TX_CREDIT_FLOW_ENABLED(pEndpoint)) { + tx_resources = + hif_get_free_queue_number(target->hif_dev, + pEndpoint->UL_PipeID); + } else { + tx_resources = 0; + } + + LOCK_HTC_TX(target); + + if (!HTC_QUEUE_EMPTY(&sendQueue)) { + if (target->is_nodrop_pkt) { + /* + * nodrop pkts have higher priority than normal pkts, + * insert nodrop pkt to head for proper + * start/termination of test. + */ + HTC_PACKET_QUEUE_TRANSFER_TO_HEAD(&pEndpoint->TxQueue, + &sendQueue); + target->is_nodrop_pkt = false; + } else { + /* transfer packets to tail */ + HTC_PACKET_QUEUE_TRANSFER_TO_TAIL(&pEndpoint->TxQueue, + &sendQueue); + A_ASSERT(HTC_QUEUE_EMPTY(&sendQueue)); + INIT_HTC_PACKET_QUEUE(&sendQueue); + } + } + + /* increment tx processing count on entry */ + if (qdf_atomic_inc_return(&pEndpoint->TxProcessCount) > 1) { + /* another thread or task is draining the TX queues on this + * endpoint that thread will reset the tx processing count when + * the queue is drained + */ + qdf_atomic_dec(&pEndpoint->TxProcessCount); + UNLOCK_HTC_TX(target); + AR_DEBUG_PRINTF(ATH_DEBUG_SEND, ("-htc_try_send (busy)\n")); + return HTC_SEND_QUEUE_OK; + } + + /***** beyond this point only 1 thread may enter ******/ + + /* now drain the endpoint TX queue for transmission as long as we have + * enough transmit resources + */ + while (true) { + + if (HTC_PACKET_QUEUE_DEPTH(&pEndpoint->TxQueue) == 0) + break; + + if (pEndpoint->async_update && + (!IS_TX_CREDIT_FLOW_ENABLED(pEndpoint)) && + (!tx_resources)) + break; + + if (IS_TX_CREDIT_FLOW_ENABLED(pEndpoint)) { +#if DEBUG_CREDIT + int cred = pEndpoint->TxCredits; +#endif + /* credit based mechanism provides flow control based on + * target transmit resource availability, we assume that + * the HIF layer will always have bus resources greater + * than target transmit resources + */ + get_htc_send_packets_credit_based(target, pEndpoint, + &sendQueue); +#if DEBUG_CREDIT + if (ep_debug_mask & (1 << pEndpoint->Id)) { + if (cred - pEndpoint->TxCredits > 0) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + (" Decrease EP%d %d - %d = %d credits.\n", + pEndpoint->Id, cred, + cred - + pEndpoint->TxCredits, + pEndpoint->TxCredits)); + } + } +#endif + } else { + + /* + * Header and payload belongs to the different fragments and + * consume 2 resource for one HTC package but USB combine into + * one transfer.And one WMI message only consumes one single + * resource. + */ + if (HTC_TX_BUNDLE_ENABLED(target) && tx_resources && + hif_get_bus_type(target->hif_dev) == + QDF_BUS_TYPE_USB) { + if (pEndpoint->service_id == + WMI_CONTROL_SVC) + tx_resources = + HTC_MAX_MSG_PER_BUNDLE_TX; + else + tx_resources = + (HTC_MAX_MSG_PER_BUNDLE_TX * 2); + } + /* get all the packets for this endpoint that we can for + * this pass + */ + get_htc_send_packets(target, pEndpoint, &sendQueue, + tx_resources); + } + + if (HTC_PACKET_QUEUE_DEPTH(&sendQueue) == 0) { + /* didn't get any packets due to a lack of resources or + * TX queue was drained + */ + break; + } + + if (!pEndpoint->async_update) + UNLOCK_HTC_TX(target); + + /* send what we can */ + if (htc_issue_packets(target, pEndpoint, &sendQueue)) { + int i; + + result = HTC_SEND_QUEUE_DROP; + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("htc_issue_packets, failed status:%d put it back to head of callersSendQueue", + result)); + + for (i = HTC_PACKET_QUEUE_DEPTH(&sendQueue); i > 0; i--) + hif_pm_runtime_put(target->hif_dev); + if (!pEndpoint->async_update) { + LOCK_HTC_TX(target); + } + HTC_PACKET_QUEUE_TRANSFER_TO_HEAD(&pEndpoint->TxQueue, + &sendQueue); + break; + } + + if (!IS_TX_CREDIT_FLOW_ENABLED(pEndpoint)) { + tx_resources = + hif_get_free_queue_number(target->hif_dev, + pEndpoint->UL_PipeID); + } + + if (!pEndpoint->async_update) { + LOCK_HTC_TX(target); + } + + } + + /* done with this endpoint, we can clear the count */ + qdf_atomic_init(&pEndpoint->TxProcessCount); + + UNLOCK_HTC_TX(target); + + AR_DEBUG_PRINTF(ATH_DEBUG_SEND, ("-htc_try_send:\n")); + + return HTC_SEND_QUEUE_OK; +} + +#ifdef USB_HIF_SINGLE_PIPE_DATA_SCHED +static uint16_t htc_send_pkts_sched_check(HTC_HANDLE HTCHandle, + HTC_ENDPOINT_ID id) +{ + HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(HTCHandle); + HTC_ENDPOINT *pEndpoint; + HTC_ENDPOINT_ID eid; + HTC_PACKET_QUEUE *pTxQueue; + uint16_t resources; + uint16_t acQueueStatus[DATA_EP_SIZE] = { 0, 0, 0, 0 }; + + if (id < ENDPOINT_2 || id > ENDPOINT_5) + return 1; + + for (eid = ENDPOINT_2; eid <= ENDPOINT_5; eid++) { + pEndpoint = &target->endpoint[eid]; + pTxQueue = &pEndpoint->TxQueue; + + if (HTC_QUEUE_EMPTY(pTxQueue)) + acQueueStatus[eid - 2] = 1; + } + + switch (id) { + case ENDPOINT_2: /* BE */ + return acQueueStatus[0] && acQueueStatus[2] + && acQueueStatus[3]; + case ENDPOINT_3: /* BK */ + return acQueueStatus[0] && acQueueStatus[1] && acQueueStatus[2] + && acQueueStatus[3]; + case ENDPOINT_4: /* VI */ + return acQueueStatus[2] && acQueueStatus[3]; + case ENDPOINT_5: /* VO */ + return acQueueStatus[3]; + default: + return 0; + } + +} + +static A_STATUS htc_send_pkts_sched_queue(HTC_TARGET *target, + HTC_PACKET_QUEUE *pPktQueue, + HTC_ENDPOINT_ID eid) +{ + HTC_ENDPOINT *pEndpoint; + HTC_PACKET_QUEUE *pTxQueue; + HTC_PACKET *pPacket; + int goodPkts; + + pEndpoint = &target->endpoint[eid]; + pTxQueue = &pEndpoint->TxQueue; + + LOCK_HTC_TX(target); + + goodPkts = + pEndpoint->MaxTxQueueDepth - + HTC_PACKET_QUEUE_DEPTH(&pEndpoint->TxQueue); + + if (goodPkts > 0) { + while (!HTC_QUEUE_EMPTY(pPktQueue)) { + pPacket = htc_packet_dequeue(pPktQueue); + HTC_PACKET_ENQUEUE(pTxQueue, pPacket); + goodPkts--; + + if (goodPkts <= 0) + break; + } + } + + if (HTC_PACKET_QUEUE_DEPTH(pPktQueue)) { + ITERATE_OVER_LIST_ALLOW_REMOVE(&pPktQueue->QueueHead, pPacket, + HTC_PACKET, ListLink) { + + if (pEndpoint->EpCallBacks. + EpSendFull(pEndpoint->EpCallBacks.pContext, + pPacket) == HTC_SEND_FULL_DROP) { + INC_HTC_EP_STAT(pEndpoint, TxDropped, 1); + } else { + HTC_PACKET_REMOVE(pPktQueue, pPacket); + HTC_PACKET_ENQUEUE(pTxQueue, pPacket); + } + } + ITERATE_END; + } + + UNLOCK_HTC_TX(target); + + return A_OK; +} + +#endif + +static inline QDF_STATUS __htc_send_pkt(HTC_HANDLE HTCHandle, + HTC_PACKET *pPacket) +{ + HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(HTCHandle); + HTC_ENDPOINT *pEndpoint; + HTC_PACKET_QUEUE pPktQueue; + qdf_nbuf_t netbuf; + HTC_FRAME_HDR *htc_hdr; + QDF_STATUS status; + + AR_DEBUG_PRINTF(ATH_DEBUG_SEND, + ("+__htc_send_pkt\n")); + + /* get packet at head to figure out which endpoint these packets will + * go into + */ + if (NULL == pPacket) { + OL_ATH_HTC_PKT_ERROR_COUNT_INCR(target, GET_HTC_PKT_Q_FAIL); + AR_DEBUG_PRINTF(ATH_DEBUG_SEND, ("-__htc_send_pkt\n")); + return QDF_STATUS_E_INVAL; + } + + if ((pPacket->Endpoint >= ENDPOINT_MAX) || + (pPacket->Endpoint <= ENDPOINT_UNUSED)) { + AR_DEBUG_PRINTF(ATH_DEBUG_SEND, + ("%s endpoint is invalid\n", __func__)); + AR_DEBUG_ASSERT(0); + return QDF_STATUS_E_INVAL; + } + pEndpoint = &target->endpoint[pPacket->Endpoint]; + + if (!pEndpoint->service_id) { + AR_DEBUG_PRINTF(ATH_DEBUG_SEND, ("%s service_id is invalid\n", + __func__)); + return QDF_STATUS_E_INVAL; + } + +#ifdef HTC_EP_STAT_PROFILING + LOCK_HTC_TX(target); + INC_HTC_EP_STAT(pEndpoint, TxPosted, 1); + UNLOCK_HTC_TX(target); +#endif + + /* provide room in each packet's netbuf for the HTC frame header */ + netbuf = GET_HTC_PACKET_NET_BUF_CONTEXT(pPacket); + AR_DEBUG_ASSERT(netbuf); + if (!netbuf) + return QDF_STATUS_E_INVAL; + + qdf_nbuf_push_head(netbuf, sizeof(HTC_FRAME_HDR)); + /* setup HTC frame header */ + htc_hdr = (HTC_FRAME_HDR *)qdf_nbuf_get_frag_vaddr(netbuf, 0); + AR_DEBUG_ASSERT(htc_hdr); + if (!htc_hdr) + return QDF_STATUS_E_INVAL; + + HTC_WRITE32(htc_hdr, + SM(pPacket->ActualLength, + HTC_FRAME_HDR_PAYLOADLEN) | + SM(pPacket->Endpoint, + HTC_FRAME_HDR_ENDPOINTID)); + LOCK_HTC_TX(target); + + pPacket->PktInfo.AsTx.SeqNo = pEndpoint->SeqNo; + pEndpoint->SeqNo++; + + HTC_WRITE32(((uint32_t *)htc_hdr) + 1, + SM(pPacket->PktInfo.AsTx.SeqNo, + HTC_FRAME_HDR_CONTROLBYTES1)); + + UNLOCK_HTC_TX(target); + + /* + * For flow control enabled endpoints mapping is done in + * htc_issue_packets and for non flow control enabled endpoints + * its done here. + */ + if (!IS_TX_CREDIT_FLOW_ENABLED(pEndpoint)) { + pPacket->PktInfo.AsTx.Flags |= HTC_TX_PACKET_FLAG_FIXUP_NETBUF; + status = qdf_nbuf_map(target->osdev, + GET_HTC_PACKET_NET_BUF_CONTEXT(pPacket), + QDF_DMA_TO_DEVICE); + if (status != QDF_STATUS_SUCCESS) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("%s: nbuf map failed, endpoint %pK, seq_no. %d\n", + __func__, pEndpoint, pEndpoint->SeqNo)); + return status; + } + } + + INIT_HTC_PACKET_QUEUE_AND_ADD(&pPktQueue, pPacket); +#ifdef USB_HIF_SINGLE_PIPE_DATA_SCHED + if (!htc_send_pkts_sched_check(HTCHandle, pEndpoint->Id)) + htc_send_pkts_sched_queue(HTCHandle, &pPktQueue, pEndpoint->Id); + else + htc_try_send(target, pEndpoint, &pPktQueue); +#else + htc_try_send(target, pEndpoint, &pPktQueue); +#endif + + /* do completion on any packets that couldn't get in */ + while (!HTC_QUEUE_EMPTY(&pPktQueue)) { + pPacket = htc_packet_dequeue(&pPktQueue); + + if (HTC_STOPPING(target)) + pPacket->Status = QDF_STATUS_E_CANCELED; + else + pPacket->Status = QDF_STATUS_E_RESOURCES; + + send_packet_completion(target, pPacket); + } + + AR_DEBUG_PRINTF(ATH_DEBUG_SEND, ("-__htc_send_pkt\n")); + + return QDF_STATUS_SUCCESS; +} + +/* HTC API - htc_send_pkt */ +QDF_STATUS htc_send_pkt(HTC_HANDLE htc_handle, HTC_PACKET *htc_packet) +{ + if (!htc_handle) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("%s: HTCHandle is NULL \n", __func__)); + return QDF_STATUS_E_FAILURE; + } + + if (!htc_packet) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("%s: pPacket is NULL \n", __func__)); + return QDF_STATUS_E_FAILURE; + } + + AR_DEBUG_PRINTF(ATH_DEBUG_SEND, + ("+-htc_send_pkt: Enter endPointId: %d, buffer: %pK, length: %d\n", + htc_packet->Endpoint, htc_packet->pBuffer, + htc_packet->ActualLength)); + return __htc_send_pkt(htc_handle, htc_packet); +} +qdf_export_symbol(htc_send_pkt); + +#ifdef ATH_11AC_TXCOMPACT +/** + * htc_send_data_pkt() - send single data packet on an endpoint + * @HTCHandle: pointer to HTC handle + * @netbuf: network buffer containing the data to be sent + * @ActualLength: length of data that needs to be transmitted + * + * Return: QDF_STATUS_SUCCESS for success or an appropriate QDF_STATUS error + */ +QDF_STATUS htc_send_data_pkt(HTC_HANDLE htc_hdl, qdf_nbuf_t netbuf, int ep_id, + int actual_length) +{ + HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(htc_hdl); + HTC_ENDPOINT *pEndpoint; + HTC_FRAME_HDR *p_htc_hdr; + QDF_STATUS status = QDF_STATUS_SUCCESS; + int tx_resources; + uint32_t data_attr = 0; + int htc_payload_len = actual_length; + + pEndpoint = &target->endpoint[ep_id]; + + tx_resources = hif_get_free_queue_number(target->hif_dev, + pEndpoint->UL_PipeID); + + if (tx_resources < HTC_DATA_RESOURCE_THRS) { + if (pEndpoint->ul_is_polled) { + hif_send_complete_check(pEndpoint->target->hif_dev, + pEndpoint->UL_PipeID, 1); + tx_resources = + hif_get_free_queue_number(target->hif_dev, + pEndpoint->UL_PipeID); + } + if (tx_resources < HTC_DATA_MINDESC_PERPACKET) + return QDF_STATUS_E_FAILURE; + } + + if (hif_pm_runtime_get(target->hif_dev)) + return QDF_STATUS_E_FAILURE; + + p_htc_hdr = (HTC_FRAME_HDR *)qdf_nbuf_get_frag_vaddr(netbuf, 0); + AR_DEBUG_ASSERT(p_htc_hdr); + + data_attr = qdf_nbuf_data_attr_get(netbuf); + + if (target->htc_hdr_length_check) + htc_payload_len = actual_length - HTC_HEADER_LEN; + + HTC_WRITE32(p_htc_hdr, SM(htc_payload_len, HTC_FRAME_HDR_PAYLOADLEN) + | SM(ep_id, HTC_FRAME_HDR_ENDPOINTID)); + /* + * If the HIF pipe for the data endpoint is polled rather than + * interrupt-driven, this is a good point to check whether any + * data previously sent through the HIF pipe have finished being + * sent. + * Since this may result in callbacks to htc_tx_completion_handler, + * which can take the HTC tx lock, make the hif_send_complete_check + * call before acquiring the HTC tx lock. + * Call hif_send_complete_check directly, rather than calling + * htc_send_complete_check, and call the PollTimerStart separately + * after calling hif_send_head, so the timer will be started to + * check for completion of the new outstanding download (in the + * unexpected event that other polling calls don't catch it). + */ + + LOCK_HTC_TX(target); + + HTC_WRITE32(((uint32_t *)p_htc_hdr) + 1, + SM(pEndpoint->SeqNo, HTC_FRAME_HDR_CONTROLBYTES1)); + + pEndpoint->SeqNo++; + + QDF_NBUF_UPDATE_TX_PKT_COUNT(netbuf, QDF_NBUF_TX_PKT_HTC); + DPTRACE(qdf_dp_trace(netbuf, QDF_DP_TRACE_HTC_PACKET_PTR_RECORD, + QDF_TRACE_DEFAULT_PDEV_ID, qdf_nbuf_data_addr(netbuf), + sizeof(qdf_nbuf_data(netbuf)), QDF_TX)); + status = hif_send_head(target->hif_dev, + pEndpoint->UL_PipeID, + pEndpoint->Id, actual_length, netbuf, data_attr); + + UNLOCK_HTC_TX(target); + return status; +} +#else /*ATH_11AC_TXCOMPACT */ + +/** + * htc_send_data_pkt() - htc_send_data_pkt + * @HTCHandle: pointer to HTC handle + * @pPacket: pointer to HTC_PACKET + * @more_data: indicates whether more data is to follow + * + * Return: QDF_STATUS_SUCCESS for success or an appropriate QDF_STATUS error + */ +QDF_STATUS htc_send_data_pkt(HTC_HANDLE HTCHandle, HTC_PACKET *pPacket, + uint8_t more_data) +{ + HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(HTCHandle); + HTC_ENDPOINT *pEndpoint; + HTC_FRAME_HDR *pHtcHdr; + HTC_PACKET_QUEUE sendQueue; + qdf_nbuf_t netbuf = NULL; + int tx_resources; + QDF_STATUS status = QDF_STATUS_SUCCESS; + uint32_t data_attr = 0; + + if (pPacket) { + if ((pPacket->Endpoint >= ENDPOINT_MAX) || + (pPacket->Endpoint <= ENDPOINT_UNUSED)) { + AR_DEBUG_PRINTF(ATH_DEBUG_SEND, + ("%s endpoint is invalid\n", __func__)); + AR_DEBUG_ASSERT(0); + return QDF_STATUS_E_INVAL; + } + pEndpoint = &target->endpoint[pPacket->Endpoint]; + + /* add HTC_FRAME_HDR in the initial fragment */ + netbuf = GET_HTC_PACKET_NET_BUF_CONTEXT(pPacket); + pHtcHdr = (HTC_FRAME_HDR *) qdf_nbuf_get_frag_vaddr(netbuf, 0); + AR_DEBUG_ASSERT(pHtcHdr); + + HTC_WRITE32(pHtcHdr, + SM(pPacket->ActualLength, + HTC_FRAME_HDR_PAYLOADLEN) | + SM(pPacket->PktInfo.AsTx.SendFlags, + HTC_FRAME_HDR_FLAGS) | + SM(pPacket->Endpoint, + HTC_FRAME_HDR_ENDPOINTID)); + /* + * If the HIF pipe for the data endpoint is polled rather than + * interrupt-driven, this is a good point to check whether any + * data previously sent through the HIF pipe have finished being + * sent. Since this may result in callbacks to + * htc_tx_completion_handler, which can take the HTC tx lock, + * make the hif_send_complete_check call before acquiring the + * HTC tx lock. + * Call hif_send_complete_check directly, rather than calling + * htc_send_complete_check, and call the PollTimerStart + * separately after calling hif_send_head, so the timer will be + * started to check for completion of the new outstanding + * download (in the unexpected event that other polling calls + * don't catch it). + */ + if (pEndpoint->ul_is_polled) { + htc_send_complete_poll_timer_stop(pEndpoint); + hif_send_complete_check(pEndpoint->target->hif_dev, + pEndpoint->UL_PipeID, 0); + } + + LOCK_HTC_TX(target); + + pPacket->PktInfo.AsTx.SeqNo = pEndpoint->SeqNo; + pEndpoint->SeqNo++; + + HTC_WRITE32(((uint32_t *) pHtcHdr) + 1, + SM(pPacket->PktInfo.AsTx.SeqNo, + HTC_FRAME_HDR_CONTROLBYTES1)); + + /* append new packet to pEndpoint->TxQueue */ + HTC_PACKET_ENQUEUE(&pEndpoint->TxQueue, pPacket); + if (HTC_TX_BUNDLE_ENABLED(target) && (more_data)) { + UNLOCK_HTC_TX(target); + return QDF_STATUS_SUCCESS; + } + + QDF_NBUF_UPDATE_TX_PKT_COUNT(netbuf, QDF_NBUF_TX_PKT_HTC); + DPTRACE(qdf_dp_trace(netbuf, QDF_DP_TRACE_HTC_PACKET_PTR_RECORD, + QDF_TRACE_DEFAULT_PDEV_ID, qdf_nbuf_data_addr(netbuf), + sizeof(qdf_nbuf_data(netbuf)), QDF_TX)); + } else { + LOCK_HTC_TX(target); + pEndpoint = &target->endpoint[1]; + } + + /* increment tx processing count on entry */ + qdf_atomic_inc(&pEndpoint->TxProcessCount); + if (qdf_atomic_read(&pEndpoint->TxProcessCount) > 1) { + /* + * Another thread or task is draining the TX queues on this + * endpoint. That thread will reset the tx processing count when + * the queue is drained. + */ + qdf_atomic_dec(&pEndpoint->TxProcessCount); + UNLOCK_HTC_TX(target); + return QDF_STATUS_SUCCESS; + } + + /***** beyond this point only 1 thread may enter ******/ + + INIT_HTC_PACKET_QUEUE(&sendQueue); + if (IS_TX_CREDIT_FLOW_ENABLED(pEndpoint)) { +#if DEBUG_CREDIT + int cred = pEndpoint->TxCredits; +#endif + get_htc_send_packets_credit_based(target, pEndpoint, + &sendQueue); +#if DEBUG_CREDIT + if (ep_debug_mask & (1 << pEndpoint->Id)) { + if (cred - pEndpoint->TxCredits > 0) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + (" Decrease EP%d %d - %d = %d credits.\n", + pEndpoint->Id, cred, + cred - pEndpoint->TxCredits, + pEndpoint->TxCredits)); + } + } +#endif + UNLOCK_HTC_TX(target); + } + + else if (HTC_TX_BUNDLE_ENABLED(target)) { + + if ((hif_get_bus_type(target->hif_dev) == QDF_BUS_TYPE_USB) && + hif_get_free_queue_number(target->hif_dev, + pEndpoint->UL_PipeID)) { + /* + * Header and payload belongs to the different fragments + * and consume 2 resource for one HTC package but USB + * combine into one transfer. + */ + get_htc_send_packets(target, pEndpoint, &sendQueue, + (HTC_MAX_MSG_PER_BUNDLE_TX * 2)); + } else { + /* Dequeue max packets from endpoint tx queue */ + get_htc_send_packets(target, pEndpoint, &sendQueue, + HTC_MAX_TX_BUNDLE_SEND_LIMIT); + } + + UNLOCK_HTC_TX(target); + } else { + /* + * Now drain the endpoint TX queue for transmission as long as + * we have enough transmit resources + */ + tx_resources = + hif_get_free_queue_number(target->hif_dev, + pEndpoint->UL_PipeID); + get_htc_send_packets(target, pEndpoint, &sendQueue, + tx_resources); + UNLOCK_HTC_TX(target); + } + + /* send what we can */ + while (true) { + if (HTC_TX_BUNDLE_ENABLED(target) && + (HTC_PACKET_QUEUE_DEPTH(&sendQueue) >= + HTC_MIN_MSG_PER_BUNDLE) && + (hif_get_bus_type(target->hif_dev) == QDF_BUS_TYPE_SDIO || + hif_get_bus_type(target->hif_dev) == QDF_BUS_TYPE_USB)) { + htc_issue_packets_bundle(target, pEndpoint, &sendQueue); + } + pPacket = htc_packet_dequeue(&sendQueue); + if (pPacket == NULL) + break; + netbuf = GET_HTC_PACKET_NET_BUF_CONTEXT(pPacket); + + LOCK_HTC_TX(target); + /* store in look up queue to match completions */ + HTC_PACKET_ENQUEUE(&pEndpoint->TxLookupQueue, pPacket); + INC_HTC_EP_STAT(pEndpoint, TxIssued, 1); + pEndpoint->ul_outstanding_cnt++; + UNLOCK_HTC_TX(target); + + status = hif_send_head(target->hif_dev, + pEndpoint->UL_PipeID, + pEndpoint->Id, + HTC_HDR_LENGTH + pPacket->ActualLength, + netbuf, data_attr); +#if DEBUG_BUNDLE + qdf_print(" Send single EP%d buffer size:0x%x, total:0x%x.\n", + pEndpoint->Id, + pEndpoint->TxCreditSize, + HTC_HDR_LENGTH + pPacket->ActualLength); +#endif + + htc_issue_tx_bundle_stats_inc(target); + + if (qdf_unlikely(QDF_IS_STATUS_ERROR(status))) { + LOCK_HTC_TX(target); + pEndpoint->ul_outstanding_cnt--; + /* remove this packet from the tx completion queue */ + HTC_PACKET_REMOVE(&pEndpoint->TxLookupQueue, pPacket); + + /* + * Don't bother reclaiming credits - HTC flow control + * is not applicable to tx data. + * In LL systems, there is no download flow control, + * since there's virtually no download delay. + * In HL systems, the txrx SW explicitly performs the + * tx flow control. + */ + /* pEndpoint->TxCredits += + * pPacket->PktInfo.AsTx.CreditsUsed; + */ + + /* put this frame back at the front of the sendQueue */ + HTC_PACKET_ENQUEUE_TO_HEAD(&sendQueue, pPacket); + + /* put the sendQueue back at the front of + * pEndpoint->TxQueue + */ + HTC_PACKET_QUEUE_TRANSFER_TO_HEAD(&pEndpoint->TxQueue, + &sendQueue); + UNLOCK_HTC_TX(target); + break; /* still need to reset TxProcessCount */ + } + } + /* done with this endpoint, we can clear the count */ + qdf_atomic_init(&pEndpoint->TxProcessCount); + + if (pEndpoint->ul_is_polled) { + /* + * Start a cleanup timer to poll for download completion. + * The download completion should be noticed promptly from + * other polling calls, but the timer provides a safety net + * in case other polling calls don't occur as expected. + */ + htc_send_complete_poll_timer_start(pEndpoint); + } + + return status; +} +#endif /*ATH_11AC_TXCOMPACT */ +qdf_export_symbol(htc_send_data_pkt); + +/* + * In the adapted HIF layer, qdf_nbuf_t are passed between HIF and HTC, + * since upper layers expects HTC_PACKET containers we use the completed netbuf + * and lookup its corresponding HTC packet buffer from a lookup list. + * This is extra overhead that can be fixed by re-aligning HIF interfaces + * with HTC. + * + */ +static HTC_PACKET *htc_lookup_tx_packet(HTC_TARGET *target, + HTC_ENDPOINT *pEndpoint, + qdf_nbuf_t netbuf) +{ + HTC_PACKET *pPacket = NULL; + HTC_PACKET *pFoundPacket = NULL; + HTC_PACKET_QUEUE lookupQueue; + + INIT_HTC_PACKET_QUEUE(&lookupQueue); + LOCK_HTC_EP_TX_LOOKUP(pEndpoint); + + LOCK_HTC_TX(target); + + /* mark that HIF has indicated the send complete for another packet */ + pEndpoint->ul_outstanding_cnt--; + + /* Dequeue first packet directly because of in-order completion */ + pPacket = htc_packet_dequeue(&pEndpoint->TxLookupQueue); + if (qdf_unlikely(!pPacket)) { + UNLOCK_HTC_TX(target); + UNLOCK_HTC_EP_TX_LOOKUP(pEndpoint); + return NULL; + } + if (netbuf == (qdf_nbuf_t) GET_HTC_PACKET_NET_BUF_CONTEXT(pPacket)) { + UNLOCK_HTC_TX(target); + UNLOCK_HTC_EP_TX_LOOKUP(pEndpoint); + return pPacket; + } + HTC_PACKET_ENQUEUE(&lookupQueue, pPacket); + + /* + * Move TX lookup queue to temp queue because most of packets that are + * not index 0 are not top 10 packets. + */ + HTC_PACKET_QUEUE_TRANSFER_TO_TAIL(&lookupQueue, + &pEndpoint->TxLookupQueue); + UNLOCK_HTC_TX(target); + + ITERATE_OVER_LIST_ALLOW_REMOVE(&lookupQueue.QueueHead, pPacket, + HTC_PACKET, ListLink) { + + if (NULL == pPacket) { + pFoundPacket = pPacket; + break; + } + /* check for removal */ + if (netbuf == + (qdf_nbuf_t) GET_HTC_PACKET_NET_BUF_CONTEXT(pPacket)) { + /* found it */ + HTC_PACKET_REMOVE(&lookupQueue, pPacket); + pFoundPacket = pPacket; + break; + } + + } + ITERATE_END; + + LOCK_HTC_TX(target); + HTC_PACKET_QUEUE_TRANSFER_TO_HEAD(&pEndpoint->TxLookupQueue, + &lookupQueue); + UNLOCK_HTC_TX(target); + UNLOCK_HTC_EP_TX_LOOKUP(pEndpoint); + + return pFoundPacket; +} + +/** + * htc_tx_completion_handler() - htc tx completion handler + * @Context: pointer to HTC_TARGET structure + * @netbuf: pointer to netbuf for which completion handler is being called + * @EpID: end point Id on which the packet was sent + * @toeplitz_hash_result: toeplitz hash result + * + * Return: QDF_STATUS_SUCCESS for success or an appropriate QDF_STATUS error + */ +QDF_STATUS htc_tx_completion_handler(void *Context, + qdf_nbuf_t netbuf, unsigned int EpID, + uint32_t toeplitz_hash_result) +{ + HTC_TARGET *target = (HTC_TARGET *) Context; + HTC_ENDPOINT *pEndpoint; + HTC_PACKET *pPacket; +#ifdef USB_HIF_SINGLE_PIPE_DATA_SCHED + HTC_ENDPOINT_ID eid[DATA_EP_SIZE] = { ENDPOINT_5, ENDPOINT_4, + ENDPOINT_2, ENDPOINT_3 }; + int epidIdx; + uint16_t resourcesThresh[DATA_EP_SIZE]; /* urb resources */ + uint16_t resources; + uint16_t resourcesMax; +#endif + + pEndpoint = &target->endpoint[EpID]; + target->TX_comp_cnt++; + + do { + pPacket = htc_lookup_tx_packet(target, pEndpoint, netbuf); + if (NULL == pPacket) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("HTC TX lookup failed!\n")); + /* may have already been flushed and freed */ + netbuf = NULL; + break; + } + if (pPacket->PktInfo.AsTx.Tag != HTC_TX_PACKET_TAG_AUTO_PM) + hif_pm_runtime_put(target->hif_dev); + + if (pPacket->PktInfo.AsTx.Tag == HTC_TX_PACKET_TAG_BUNDLED) { + HTC_PACKET *pPacketTemp; + HTC_PACKET_QUEUE *pQueueSave = + (HTC_PACKET_QUEUE *) pPacket->pContext; + HTC_PACKET_QUEUE_ITERATE_ALLOW_REMOVE(pQueueSave, + pPacketTemp) { + pPacket->Status = QDF_STATUS_SUCCESS; + send_packet_completion(target, pPacketTemp); + } + HTC_PACKET_QUEUE_ITERATE_END; + free_htc_bundle_packet(target, pPacket); + + if (hif_get_bus_type(target->hif_dev) == + QDF_BUS_TYPE_USB) { + if (!IS_TX_CREDIT_FLOW_ENABLED(pEndpoint)) + htc_try_send(target, pEndpoint, NULL); + } + + return QDF_STATUS_SUCCESS; + } + /* will be giving this buffer back to upper layers */ + netbuf = NULL; + pPacket->Status = QDF_STATUS_SUCCESS; + send_packet_completion(target, pPacket); + + } while (false); + + if (!IS_TX_CREDIT_FLOW_ENABLED(pEndpoint)) { + /* note: when using TX credit flow, the re-checking of queues + * happens when credits flow back from the target. In the non-TX + * credit case, we recheck after the packet completes + */ + if ((qdf_atomic_read(&pEndpoint->TxProcessCount) == 0) || + (!pEndpoint->async_update)) { + htc_try_send(target, pEndpoint, NULL); + } + } + + return QDF_STATUS_SUCCESS; +} + +#ifdef WLAN_FEATURE_FASTPATH +/** + * htc_ctrl_msg_cmpl(): checks for tx completion for the endpoint specified + * @HTC_HANDLE : pointer to the htc target context + * @htc_ep_id : end point id + * + * checks HTC tx completion + * + * Return: none + */ +void htc_ctrl_msg_cmpl(HTC_HANDLE htc_pdev, HTC_ENDPOINT_ID htc_ep_id) +{ + HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(htc_pdev); + HTC_ENDPOINT *pendpoint = &target->endpoint[htc_ep_id]; + + htc_send_complete_check(pendpoint, 1); +} +qdf_export_symbol(htc_ctrl_msg_cmpl); +#endif + +/* callback when TX resources become available */ +void htc_tx_resource_avail_handler(void *context, uint8_t pipeID) +{ + int i; + HTC_TARGET *target = (HTC_TARGET *) context; + HTC_ENDPOINT *pEndpoint = NULL; + + for (i = 0; i < ENDPOINT_MAX; i++) { + pEndpoint = &target->endpoint[i]; + if (pEndpoint->service_id != 0) { + if (pEndpoint->UL_PipeID == pipeID) + break; + } + } + + if (i >= ENDPOINT_MAX) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("Invalid pipe indicated for TX resource avail : %d!\n", + pipeID)); + return; + } + + AR_DEBUG_PRINTF(ATH_DEBUG_SEND, + ("HIF indicated more resources for pipe:%d\n", + pipeID)); + + htc_try_send(target, pEndpoint, NULL); +} + +#ifdef FEATURE_RUNTIME_PM +/** + * htc_kick_queues(): resumes tx transactions of suspended endpoints + * @context: pointer to the htc target context + * + * Iterates through the enpoints and provides a context to empty queues + * int the hif layer when they are stalled due to runtime suspend. + * + * Return: none + */ +void htc_kick_queues(void *context) +{ + int i; + HTC_TARGET *target = (HTC_TARGET *)context; + HTC_ENDPOINT *endpoint = NULL; + + for (i = 0; i < ENDPOINT_MAX; i++) { + endpoint = &target->endpoint[i]; + + if (endpoint->service_id == 0) + continue; + + if (endpoint->EpCallBacks.ep_resume_tx_queue) + endpoint->EpCallBacks.ep_resume_tx_queue( + endpoint->EpCallBacks.pContext); + + htc_try_send(target, endpoint, NULL); + } + + hif_fastpath_resume(target->hif_dev); +} +#endif + +/* flush endpoint TX queue */ +void htc_flush_endpoint_tx(HTC_TARGET *target, HTC_ENDPOINT *pEndpoint, + HTC_TX_TAG Tag) +{ + HTC_PACKET *pPacket; + + LOCK_HTC_TX(target); + while (HTC_PACKET_QUEUE_DEPTH(&pEndpoint->TxQueue)) { + pPacket = htc_packet_dequeue(&pEndpoint->TxQueue); + + if (pPacket) { + /* let the sender know the packet was not delivered */ + pPacket->Status = QDF_STATUS_E_CANCELED; + send_packet_completion(target, pPacket); + } + } + UNLOCK_HTC_TX(target); +} + +/* flush pending entries in endpoint TX Lookup queue */ +void htc_flush_endpoint_txlookupQ(HTC_TARGET *target, + HTC_ENDPOINT_ID endpoint_id, + bool call_ep_callback) +{ + HTC_PACKET *packet; + HTC_ENDPOINT *endpoint; + + endpoint = &target->endpoint[endpoint_id]; + + if (!endpoint && endpoint->service_id == 0) + return; + + while (HTC_PACKET_QUEUE_DEPTH(&endpoint->TxLookupQueue)) { + packet = htc_packet_dequeue(&endpoint->TxLookupQueue); + + if (packet) { + if (call_ep_callback == true) { + packet->Status = QDF_STATUS_E_CANCELED; + send_packet_completion(target, packet); + } else { + qdf_mem_free(packet); + } + } + } +} + +/* HTC API to flush an endpoint's TX queue*/ +void htc_flush_endpoint(HTC_HANDLE HTCHandle, HTC_ENDPOINT_ID Endpoint, + HTC_TX_TAG Tag) +{ + HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(HTCHandle); + HTC_ENDPOINT *pEndpoint = &target->endpoint[Endpoint]; + + if (pEndpoint->service_id == 0) { + AR_DEBUG_ASSERT(false); + /* not in use.. */ + return; + } + + htc_flush_endpoint_tx(target, pEndpoint, Tag); +} + +/* HTC API to indicate activity to the credit distribution function */ +void htc_indicate_activity_change(HTC_HANDLE HTCHandle, + HTC_ENDPOINT_ID Endpoint, bool Active) +{ + /* TODO */ +} + +bool htc_is_endpoint_active(HTC_HANDLE HTCHandle, HTC_ENDPOINT_ID Endpoint) +{ + return true; +} + +void htc_set_nodrop_pkt(HTC_HANDLE HTCHandle, A_BOOL isNodropPkt) +{ + HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(HTCHandle); + + target->is_nodrop_pkt = isNodropPkt; +} + +void htc_enable_hdr_length_check(HTC_HANDLE htc_hdl, bool htc_hdr_length_check) +{ + HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(htc_hdl); + + target->htc_hdr_length_check = htc_hdr_length_check; +} + +/** + * htc_process_credit_rpt() - process credit report, call distribution function + * @target: pointer to HTC_TARGET + * @pRpt: pointer to HTC_CREDIT_REPORT + * @NumEntries: number of entries in credit report + * @FromEndpoint: endpoint for which credit report is received + * + * Return: A_OK for success or an appropriate A_STATUS error + */ +void htc_process_credit_rpt(HTC_TARGET *target, HTC_CREDIT_REPORT *pRpt, + int NumEntries, HTC_ENDPOINT_ID FromEndpoint) +{ + int i; + HTC_ENDPOINT *pEndpoint; + int totalCredits = 0; + uint8_t rpt_credits, rpt_ep_id; + + AR_DEBUG_PRINTF(ATH_DEBUG_SEND, + ("+htc_process_credit_rpt, Credit Report Entries:%d\n", + NumEntries)); + + /* lock out TX while we update credits */ + LOCK_HTC_TX(target); + + for (i = 0; i < NumEntries; i++, pRpt++) { + + rpt_ep_id = HTC_GET_FIELD(pRpt, HTC_CREDIT_REPORT, ENDPOINTID); + + if (rpt_ep_id >= ENDPOINT_MAX) { + AR_DEBUG_ASSERT(false); + break; + } + + rpt_credits = HTC_GET_FIELD(pRpt, HTC_CREDIT_REPORT, CREDITS); + + pEndpoint = &target->endpoint[rpt_ep_id]; +#if DEBUG_CREDIT + if (ep_debug_mask & (1 << pEndpoint->Id)) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + (" Increase EP%d %d + %d = %d credits\n", + rpt_ep_id, pEndpoint->TxCredits, + rpt_credits, + pEndpoint->TxCredits + rpt_credits)); + } +#endif + +#ifdef HTC_EP_STAT_PROFILING + + INC_HTC_EP_STAT(pEndpoint, TxCreditRpts, 1); + INC_HTC_EP_STAT(pEndpoint, TxCreditsReturned, rpt_credits); + + if (FromEndpoint == rpt_ep_id) { + /* this credit report arrived on the same endpoint + * indicating it arrived in an RX packet + */ + INC_HTC_EP_STAT(pEndpoint, TxCreditsFromRx, + rpt_credits); + INC_HTC_EP_STAT(pEndpoint, TxCreditRptsFromRx, 1); + } else if (FromEndpoint == ENDPOINT_0) { + /* this credit arrived on endpoint 0 as a NULL msg */ + INC_HTC_EP_STAT(pEndpoint, TxCreditsFromEp0, + rpt_credits); + INC_HTC_EP_STAT(pEndpoint, TxCreditRptsFromEp0, 1); + } else { + /* arrived on another endpoint */ + INC_HTC_EP_STAT(pEndpoint, TxCreditsFromOther, + rpt_credits); + INC_HTC_EP_STAT(pEndpoint, TxCreditRptsFromOther, 1); + } + +#endif + + if (pEndpoint->service_id == WMI_CONTROL_SVC) { + htc_credit_record(HTC_PROCESS_CREDIT_REPORT, + pEndpoint->TxCredits + rpt_credits, + HTC_PACKET_QUEUE_DEPTH(&pEndpoint-> + TxQueue)); + } + + pEndpoint->TxCredits += rpt_credits; + + if (pEndpoint->TxCredits + && HTC_PACKET_QUEUE_DEPTH(&pEndpoint->TxQueue)) { + UNLOCK_HTC_TX(target); +#ifdef ATH_11AC_TXCOMPACT + htc_try_send(target, pEndpoint, NULL); +#else + if (pEndpoint->service_id == HTT_DATA_MSG_SVC) + htc_send_data_pkt(target, NULL, 0); + else + htc_try_send(target, pEndpoint, NULL); +#endif + LOCK_HTC_TX(target); + } + totalCredits += rpt_credits; + } + + AR_DEBUG_PRINTF(ATH_DEBUG_SEND, + (" Report indicated %d credits to distribute\n", + totalCredits)); + + UNLOCK_HTC_TX(target); + + AR_DEBUG_PRINTF(ATH_DEBUG_SEND, ("-htc_process_credit_rpt\n")); +} + +/* function to fetch stats from htc layer*/ +struct ol_ath_htc_stats *ieee80211_ioctl_get_htc_stats(HTC_HANDLE HTCHandle) +{ + HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(HTCHandle); + + return &(target->htc_pkt_stats); +} diff --git a/drivers/staging/qca-wifi-host-cmn/htc/htc_services.c b/drivers/staging/qca-wifi-host-cmn/htc/htc_services.c new file mode 100644 index 0000000000000000000000000000000000000000..3ec50d4cd0edb662dc2a926eb44114891d75a0b4 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/htc/htc_services.c @@ -0,0 +1,430 @@ +/* + * Copyright (c) 2013-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "htc_debug.h" +#include "htc_internal.h" +#include +#include /* qdf_nbuf_t */ +#include "qdf_module.h" + +/* use credit flow control over HTC */ +unsigned int htc_credit_flow = 1; +#ifndef DEBUG_CREDIT +#define DEBUG_CREDIT 0 +#endif + +/* HTC credit flow global disable */ +void htc_global_credit_flow_disable(void) +{ + htc_credit_flow = 0; +} + +/* HTC credit flow global enable */ +void htc_global_credit_flow_enable(void) +{ + htc_credit_flow = 1; +} + +#ifdef HIF_SDIO + +/** + * htc_alt_data_credit_size_update() - update tx credit size info + * on max bundle size + * @target: hif context + * @ul_pipe: endpoint ul pipe id + * @dl_pipe: endpoint dl pipe id + * @txCreditSize: endpoint tx credit size + * + * + * When AltDataCreditSize is non zero, it indicates the credit size for + * HTT and all other services on Mbox0. Mbox1 has WMI_CONTROL_SVC which + * uses the default credit size. Use AltDataCreditSize only when + * mailbox is swapped. Mailbox swap bit is set by bmi_target_ready at + * the end of BMI phase. + * + * The Credit Size is a parameter associated with the mbox rather than + * a service. Multiple services can run on this mbox. + * + * If AltDataCreditSize is 0, that means the firmware doesn't support + * this feature. Default to the TargetCreditSize + * + * Return: None + */ +static inline void +htc_alt_data_credit_size_update(HTC_TARGET *target, + uint8_t *ul_pipe, + uint8_t *dl_pipe, + int *txCreditSize) +{ + if ((target->AltDataCreditSize) && + (*ul_pipe == 1) && (*dl_pipe == 0)) + *txCreditSize = target->AltDataCreditSize; + +} +#else + +static inline void +htc_alt_data_credit_size_update(HTC_TARGET *target, + uint8_t *ul_pipe, + uint8_t *dl_pipe, + int *txCreditSize) +{ +} +#endif + +QDF_STATUS htc_connect_service(HTC_HANDLE HTCHandle, + struct htc_service_connect_req *pConnectReq, + struct htc_service_connect_resp *pConnectResp) +{ + HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(HTCHandle); + QDF_STATUS status = QDF_STATUS_SUCCESS; + HTC_PACKET *pSendPacket = NULL; + HTC_CONNECT_SERVICE_RESPONSE_MSG *pResponseMsg; + HTC_CONNECT_SERVICE_MSG *pConnectMsg; + HTC_ENDPOINT_ID assignedEndpoint = ENDPOINT_MAX; + HTC_ENDPOINT *pEndpoint; + unsigned int maxMsgSize = 0; + qdf_nbuf_t netbuf; + uint8_t txAlloc; + int length; + bool disableCreditFlowCtrl = false; + uint16_t conn_flags; + uint16_t rsp_msg_id, rsp_msg_serv_id, rsp_msg_max_msg_size; + uint8_t rsp_msg_status, rsp_msg_end_id, rsp_msg_serv_meta_len; + + AR_DEBUG_PRINTF(ATH_DEBUG_TRC, + ("+htc_connect_service, target:%pK SvcID:0x%X\n", target, + pConnectReq->service_id)); + + do { + + AR_DEBUG_ASSERT(pConnectReq->service_id != 0); + + if (HTC_CTRL_RSVD_SVC == pConnectReq->service_id) { + /* special case for pseudo control service */ + assignedEndpoint = ENDPOINT_0; + maxMsgSize = HTC_MAX_CONTROL_MESSAGE_LENGTH; + txAlloc = 0; + + } else { + + txAlloc = htc_get_credit_allocation(target, + pConnectReq->service_id); + + if (!txAlloc) { + AR_DEBUG_PRINTF(ATH_DEBUG_TRC, + ("Service %d does not allocate target credits!\n", + pConnectReq->service_id)); + } + + /* allocate a packet to send to the target */ + pSendPacket = htc_alloc_control_tx_packet(target); + + if (NULL == pSendPacket) { + AR_DEBUG_ASSERT(false); + status = QDF_STATUS_E_NOMEM; + break; + } + + netbuf = + (qdf_nbuf_t) + GET_HTC_PACKET_NET_BUF_CONTEXT(pSendPacket); + length = + sizeof(HTC_CONNECT_SERVICE_MSG) + + pConnectReq->MetaDataLength; + + /* assemble connect service message */ + qdf_nbuf_put_tail(netbuf, length); + pConnectMsg = + (HTC_CONNECT_SERVICE_MSG *) qdf_nbuf_data(netbuf); + + if (NULL == pConnectMsg) { + AR_DEBUG_ASSERT(0); + status = QDF_STATUS_E_FAULT; + break; + } + + qdf_mem_zero(pConnectMsg, + sizeof(HTC_CONNECT_SERVICE_MSG)); + + conn_flags = + (pConnectReq-> + ConnectionFlags & ~HTC_SET_RECV_ALLOC_MASK) | + HTC_CONNECT_FLAGS_SET_RECV_ALLOCATION(txAlloc); + HTC_SET_FIELD(pConnectMsg, HTC_CONNECT_SERVICE_MSG, + MESSAGEID, HTC_MSG_CONNECT_SERVICE_ID); + HTC_SET_FIELD(pConnectMsg, HTC_CONNECT_SERVICE_MSG, + SERVICE_ID, pConnectReq->service_id); + HTC_SET_FIELD(pConnectMsg, HTC_CONNECT_SERVICE_MSG, + CONNECTIONFLAGS, conn_flags); + + if (pConnectReq-> + ConnectionFlags & + HTC_CONNECT_FLAGS_DISABLE_CREDIT_FLOW_CTRL) { + disableCreditFlowCtrl = true; + } + + if (!htc_credit_flow) + disableCreditFlowCtrl = true; + + /* check caller if it wants to transfer meta data */ + if ((pConnectReq->pMetaData != NULL) && + (pConnectReq->MetaDataLength <= + HTC_SERVICE_META_DATA_MAX_LENGTH)) { + /* copy meta data into msg buffer (after hdr) */ + qdf_mem_copy((uint8_t *) pConnectMsg + + sizeof(HTC_CONNECT_SERVICE_MSG), + pConnectReq->pMetaData, + pConnectReq->MetaDataLength); + + HTC_SET_FIELD(pConnectMsg, + HTC_CONNECT_SERVICE_MSG, + SERVICEMETALENGTH, + pConnectReq->MetaDataLength); + } + + SET_HTC_PACKET_INFO_TX(pSendPacket, + NULL, + (uint8_t *) pConnectMsg, + length, + ENDPOINT_0, + HTC_SERVICE_TX_PACKET_TAG); + + status = htc_send_pkt((HTC_HANDLE) target, pSendPacket); + /* we don't own it anymore */ + pSendPacket = NULL; + if (QDF_IS_STATUS_ERROR(status)) + break; + + /* wait for response */ + status = htc_wait_recv_ctrl_message(target); + if (QDF_IS_STATUS_ERROR(status)) + break; + /* we controlled the buffer creation so it has to be + * properly aligned + */ + pResponseMsg = + (HTC_CONNECT_SERVICE_RESPONSE_MSG *) target-> + CtrlResponseBuffer; + + rsp_msg_id = HTC_GET_FIELD(pResponseMsg, + HTC_CONNECT_SERVICE_RESPONSE_MSG, + MESSAGEID); + rsp_msg_serv_id = + HTC_GET_FIELD(pResponseMsg, + HTC_CONNECT_SERVICE_RESPONSE_MSG, + SERVICEID); + rsp_msg_status = + HTC_GET_FIELD(pResponseMsg, + HTC_CONNECT_SERVICE_RESPONSE_MSG, + STATUS); + rsp_msg_end_id = + HTC_GET_FIELD(pResponseMsg, + HTC_CONNECT_SERVICE_RESPONSE_MSG, + ENDPOINTID); + rsp_msg_max_msg_size = + HTC_GET_FIELD(pResponseMsg, + HTC_CONNECT_SERVICE_RESPONSE_MSG, + MAXMSGSIZE); + rsp_msg_serv_meta_len = + HTC_GET_FIELD(pResponseMsg, + HTC_CONNECT_SERVICE_RESPONSE_MSG, + SERVICEMETALENGTH); + + if ((rsp_msg_id != HTC_MSG_CONNECT_SERVICE_RESPONSE_ID) + || (target->CtrlResponseLength < + sizeof(HTC_CONNECT_SERVICE_RESPONSE_MSG))) { + /* this message is not valid */ + AR_DEBUG_ASSERT(false); + status = QDF_STATUS_E_PROTO; + break; + } + + AR_DEBUG_PRINTF(ATH_DEBUG_TRC, + ("htc_connect_service, service 0x%X connect response from target status:%d, assigned ep: %d\n", + rsp_msg_serv_id, rsp_msg_status, + rsp_msg_end_id)); + + pConnectResp->ConnectRespCode = rsp_msg_status; + + /* check response status */ + if (rsp_msg_status != HTC_SERVICE_SUCCESS) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + (" Target failed service 0x%X connect request (status:%d)\n", + rsp_msg_serv_id, + rsp_msg_status)); + status = QDF_STATUS_E_PROTO; +/* TODO: restore the ifdef when FW supports services 301 and 302 + * (HTT_MSG_DATA[23]_MSG_SVC) + */ +/* #ifdef QCA_TX_HTT2_SUPPORT */ + /* Keep work and not to block the control msg */ + target->CtrlResponseProcessing = false; +/* #endif */ /* QCA_TX_HTT2_SUPPORT */ + break; + } + + assignedEndpoint = (HTC_ENDPOINT_ID) rsp_msg_end_id; + maxMsgSize = rsp_msg_max_msg_size; + + if ((pConnectResp->pMetaData != NULL) && + (rsp_msg_serv_meta_len > 0) && + (rsp_msg_serv_meta_len <= + HTC_SERVICE_META_DATA_MAX_LENGTH)) { + /* caller supplied a buffer and the target + * responded with data + */ + int copyLength = + min((int)pConnectResp->BufferLength, + (int)rsp_msg_serv_meta_len); + /* copy the meta data */ + qdf_mem_copy(pConnectResp->pMetaData, + ((uint8_t *) pResponseMsg) + + sizeof + (HTC_CONNECT_SERVICE_RESPONSE_MSG), + copyLength); + pConnectResp->ActualLength = copyLength; + } + /* done processing response buffer */ + target->CtrlResponseProcessing = false; + } + + /* rest of these are parameter checks so set the error status */ + status = QDF_STATUS_E_PROTO; + + if (assignedEndpoint >= ENDPOINT_MAX) { + AR_DEBUG_ASSERT(false); + break; + } + + if (0 == maxMsgSize) { + AR_DEBUG_ASSERT(false); + break; + } + + pEndpoint = &target->endpoint[assignedEndpoint]; + pEndpoint->Id = assignedEndpoint; + if (pEndpoint->service_id != 0) { + /* endpoint already in use! */ + AR_DEBUG_ASSERT(false); + break; + } + + /* return assigned endpoint to caller */ + pConnectResp->Endpoint = assignedEndpoint; + pConnectResp->MaxMsgLength = maxMsgSize; + + /* setup the endpoint */ + /* service_id marks the endpoint in use */ + pEndpoint->service_id = pConnectReq->service_id; + pEndpoint->MaxTxQueueDepth = pConnectReq->MaxSendQueueDepth; + pEndpoint->MaxMsgLength = maxMsgSize; + pEndpoint->TxCredits = txAlloc; + pEndpoint->TxCreditSize = target->TargetCreditSize; + pEndpoint->TxCreditsPerMaxMsg = + maxMsgSize / target->TargetCreditSize; + if (maxMsgSize % target->TargetCreditSize) + pEndpoint->TxCreditsPerMaxMsg++; +#if DEBUG_CREDIT + qdf_print(" Endpoint%d initial credit:%d, size:%d.\n", + pEndpoint->Id, pEndpoint->TxCredits, + pEndpoint->TxCreditSize); +#endif + + /* copy all the callbacks */ + pEndpoint->EpCallBacks = pConnectReq->EpCallbacks; + pEndpoint->async_update = 0; + + status = hif_map_service_to_pipe(target->hif_dev, + pEndpoint->service_id, + &pEndpoint->UL_PipeID, + &pEndpoint->DL_PipeID, + &pEndpoint->ul_is_polled, + &pEndpoint->dl_is_polled); + if (QDF_IS_STATUS_ERROR(status)) + break; + + htc_alt_data_credit_size_update(target, + &pEndpoint->UL_PipeID, + &pEndpoint->DL_PipeID, + &pEndpoint->TxCreditSize); + + /* not currently supported */ + qdf_assert(!pEndpoint->dl_is_polled); + + if (pEndpoint->ul_is_polled) { + qdf_timer_init(target->osdev, + &pEndpoint->ul_poll_timer, + htc_send_complete_check_cleanup, + pEndpoint, + QDF_TIMER_TYPE_SW); + } + + HTC_TRACE("SVC:0x%4.4X, ULpipe:%d DLpipe:%d id:%d Ready", + pEndpoint->service_id, pEndpoint->UL_PipeID, + pEndpoint->DL_PipeID, pEndpoint->Id); + + if (disableCreditFlowCtrl && pEndpoint->TxCreditFlowEnabled) { + pEndpoint->TxCreditFlowEnabled = false; + HTC_TRACE("SVC:0x%4.4X ep:%d TX flow control disabled", + pEndpoint->service_id, assignedEndpoint); + } + + } while (false); + + AR_DEBUG_PRINTF(ATH_DEBUG_TRC, ("-htc_connect_service\n")); + + return status; +} +qdf_export_symbol(htc_connect_service); + +void htc_set_credit_distribution(HTC_HANDLE HTCHandle, + void *pCreditDistContext, + HTC_CREDIT_DIST_CALLBACK CreditDistFunc, + HTC_CREDIT_INIT_CALLBACK CreditInitFunc, + HTC_SERVICE_ID ServicePriorityOrder[], + int ListLength) +{ + /* NOT Supported, this transport does not use a credit based flow + * control mechanism + */ + +} + +void htc_fw_event_handler(void *context, QDF_STATUS status) +{ + HTC_TARGET *target = (HTC_TARGET *) context; + struct htc_init_info *initInfo = &target->HTCInitInfo; + + /* check if target failure handler exists and pass error code to it. */ + if (target->HTCInitInfo.TargetFailure != NULL) + initInfo->TargetFailure(initInfo->pContext, status); +} + + +void htc_set_async_ep(HTC_HANDLE HTCHandle, + HTC_ENDPOINT_ID htc_ep_id, bool value) +{ + HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(HTCHandle); + HTC_ENDPOINT *pEndpoint = &target->endpoint[htc_ep_id]; + + pEndpoint->async_update = value; + qdf_print("%s: htc_handle %pK, ep %d, value %d\n", __func__, + HTCHandle, htc_ep_id, value); +} + diff --git a/drivers/staging/qca-wifi-host-cmn/init_deinit/dispatcher/inc/dispatcher_init_deinit.h b/drivers/staging/qca-wifi-host-cmn/init_deinit/dispatcher/inc/dispatcher_init_deinit.h new file mode 100644 index 0000000000000000000000000000000000000000..e0f23b132ada759b4a53c3860a99b0a36a6a12ee --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/init_deinit/dispatcher/inc/dispatcher_init_deinit.h @@ -0,0 +1,196 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: This file provides various init/deinit trigger point for new + * components. + */ + +#if !defined(__DISPATCHER_INIT_H) +#define __DISPATCHER_INIT_H + +#include +#include +#include +#include + +/* Function pointer for spectral pdev open handler */ +typedef QDF_STATUS (*spectral_pdev_open_handler)( + struct wlan_objmgr_pdev *pdev); + +/** + * dispatcher_init(): API to init all new components + * + * This API calls all new components init APIs. This is invoked + * from HDD/OS_If layer during: + * 1) Driver load sequence + * 2) before probing the attached device. + * 3) FW is not ready + * 4) WMI channel is not established + * + * A component can't communicate with FW during init stage. + * + * Return: none + */ +QDF_STATUS dispatcher_init(void); + +/** + * dispatcher_deinit(): API to de-init all new components + * + * This API calls all new components de-init APIs. This is invoked + * from HDD/OS_If layer during: + * 1) Driver unload sequence + * 2) FW is dead + * 3) WMI channel is destroyed + * 4) all PDEV and PSOC objects are destroyed + * + * A component can't communicate with FW during de-init stage. + * + * Return: none + */ +QDF_STATUS dispatcher_deinit(void); + +/** + * dispatcher_enable(): global (above psoc) level component start + * + * Prepare components to service requests. Must only be called after + * dispatcher_init(). + * + * Return: QDF_STATUS + */ +QDF_STATUS dispatcher_enable(void); + +/** + * dispatcher_disable(): global (above psoc) level component stop + * + * Stop components from servicing requests. Must be called before + * scheduler_deinit(). + * + * Return: QDF_STATUS + */ +QDF_STATUS dispatcher_disable(void); + +/** + * dispatcher_psoc_open(): API to trigger PSOC open for all new components + * @psoc: psoc context + * + * This API calls all new components PSOC OPEN APIs. This is invoked from + * HDD/OS_If layer during: + * 1) Driver load sequence + * 2) PSOC object is created + * 3) FW is not yet ready + * 4) WMI channel is not yet established with FW + * + * PSOC open happens before FW WMI ready and hence a component can't + * communicate with FW during PSOC open sequence. + * + * Return: none + */ +QDF_STATUS dispatcher_psoc_open(struct wlan_objmgr_psoc *psoc); + +/** + * dispatcher_psoc_close(): API to trigger PSOC close for all new components + * @psoc: psoc context + * + * This API calls all new components PSOC CLOSE APIs. This is invoked from + * HDD/OS_If layer during: + * 1) Driver unload sequence + * 2) PSOC object is destroyed + * 3) FW is already dead(PDEV suspended) + * 4) WMI channel is destroyed with FW + * + * A component can't communicate with FW during PSOC close. + * + * Return: none + */ +QDF_STATUS dispatcher_psoc_close(struct wlan_objmgr_psoc *psoc); + +/** + * dispatcher_psoc_enable(): API to trigger PSOC enable(start) for all new + * components + * @psoc: psoc context + * + * This API calls all new components PSOC enable(start) APIs. This is invoked + * from HDD/OS_If layer during: + * 1) Driver load sequence + * 2) PSOC object is created + * 3) WMI endpoint and WMI channel is ready with FW + * 4) WMI FW ready event is also received from FW. + * + * FW is already ready and WMI channel is established by this time so a + * component can communicate with FW during PSOC enable sequence. + * + * Return: none + */ +QDF_STATUS dispatcher_psoc_enable(struct wlan_objmgr_psoc *psoc); + +/** + * dispatcher_psoc_disable(): API to trigger PSOC disable(stop) for all new + * components + * @psoc: psoc context + * + * This API calls all new components PSOC disable(stop) APIs. This is invoked + * from HDD/OS_If layer during: + * 1) Driver unload sequence + * 2) WMI channel is still available + * 3) FW is still running and up + * 4) PSOC object is not destroyed + * + * A component should abort all its ongign transaction with FW at this stage + * for example scan component needs to abort all its ongoing scan in FW because + * is goign to be stopped very soon. + * + * Return: none + */ +QDF_STATUS dispatcher_psoc_disable(struct wlan_objmgr_psoc *psoc); + +/** + * dispatcher_pdev_open(): API to trigger PDEV open for all new components + * @pdev: pdev context + * + * This API calls all new components PDEV OPEN APIs. This is invoked from + * during PDEV object is created. + * + * Return: none + */ +QDF_STATUS dispatcher_pdev_open(struct wlan_objmgr_pdev *pdev); + +/** + * dispatcher_pdev_close(): API to trigger PDEV close for all new components + * @pdev: pdev context + * + * This API calls all new components PDEV CLOSE APIs. This is invoked from + * during driver unload sequence. + * + * Return: none + */ +QDF_STATUS dispatcher_pdev_close(struct wlan_objmgr_pdev *pdev); + +/** + * dispatcher_register_spectral_pdev_open_handler(): + * API to register spectral pdev open handler + * @handler: pdev open handler + * + * This API registers spectral pdev open handler. + * + * Return: none + */ +QDF_STATUS dispatcher_register_spectral_pdev_open_handler(QDF_STATUS (*handler) + (struct wlan_objmgr_pdev *pdev)); + +#endif /* End of !defined(__DISPATCHER_INIT_H) */ diff --git a/drivers/staging/qca-wifi-host-cmn/init_deinit/dispatcher/src/dispatcher_init_deinit.c b/drivers/staging/qca-wifi-host-cmn/init_deinit/dispatcher/src/dispatcher_init_deinit.c new file mode 100644 index 0000000000000000000000000000000000000000..8a4a4b6bddc877c28c7b8ea8055f2a724a3030f2 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/init_deinit/dispatcher/src/dispatcher_init_deinit.c @@ -0,0 +1,1324 @@ +/* + * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef WLAN_POLICY_MGR_ENABLE +#include "wlan_policy_mgr_api.h" +#endif +#ifdef WLAN_ATF_ENABLE +#include +#endif +#ifdef QCA_SUPPORT_SON +#include +#endif +#ifdef WLAN_SA_API_ENABLE +#include +#endif +#ifdef WIFI_POS_CONVERGED +#include "wifi_pos_api.h" +#endif /* WIFI_POS_CONVERGED */ +#ifdef WLAN_FEATURE_NAN_CONVERGENCE +#include "wlan_nan_api.h" +#endif /* WLAN_FEATURE_NAN_CONVERGENCE */ +#ifdef CONVERGED_P2P_ENABLE +#include +#include +#endif +#include +#ifdef WLAN_CONV_CRYPTO_SUPPORTED +#include "wlan_crypto_main.h" +#endif +#ifdef DFS_COMPONENT_ENABLE +#include +#endif + +#ifdef WLAN_OFFCHAN_TXRX_ENABLE +#include +#endif + +#ifdef CONVERGED_TDLS_ENABLE +#include "wlan_tdls_ucfg_api.h" +#endif + +#ifdef WLAN_SUPPORT_SPLITMAC +#include +#endif +#ifdef WLAN_CONV_SPECTRAL_ENABLE +#include +#endif +#ifdef WLAN_SUPPORT_FILS +#include +#endif + +#ifdef WLAN_SUPPORT_GREEN_AP +#include +#endif + +#ifdef QCA_SUPPORT_CP_STATS +#include +#endif + +/** + * DOC: This file provides various init/deinit trigger point for new + * components. + */ + +/* All new components needs to replace their dummy init/deinit + * psoc_open, psco_close, psoc_enable and psoc_disable APIs once + * their actual handlers are ready + */ + +spectral_pdev_open_handler dispatcher_spectral_pdev_open_handler_cb; + +#ifdef QCA_SUPPORT_CP_STATS +static QDF_STATUS dispatcher_init_cp_stats(void) +{ + return wlan_cp_stats_init(); +} + +static QDF_STATUS dispatcher_deinit_cp_stats(void) +{ + return wlan_cp_stats_deinit(); +} + +static QDF_STATUS cp_stats_psoc_open(struct wlan_objmgr_psoc *psoc) +{ + return wlan_cp_stats_open(psoc); +} + +static QDF_STATUS cp_stats_psoc_close(struct wlan_objmgr_psoc *psoc) +{ + return wlan_cp_stats_close(psoc); +} + +static QDF_STATUS cp_stats_psoc_enable(struct wlan_objmgr_psoc *psoc) +{ + return wlan_cp_stats_enable(psoc); +} + +static QDF_STATUS cp_stats_psoc_disable(struct wlan_objmgr_psoc *psoc) +{ + return wlan_cp_stats_disable(psoc); +} +#else +static QDF_STATUS dispatcher_init_cp_stats(void) +{ + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS dispatcher_deinit_cp_stats(void) +{ + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS cp_stats_psoc_open(struct wlan_objmgr_psoc *psoc) +{ + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS cp_stats_psoc_close(struct wlan_objmgr_psoc *psoc) +{ + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS cp_stats_psoc_enable(struct wlan_objmgr_psoc *psoc) +{ + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS cp_stats_psoc_disable(struct wlan_objmgr_psoc *psoc) +{ + return QDF_STATUS_SUCCESS; +} +#endif + +#ifdef CONVERGED_P2P_ENABLE +static QDF_STATUS p2p_init(void) +{ + return ucfg_p2p_init(); +} + +static QDF_STATUS p2p_deinit(void) +{ + return ucfg_p2p_deinit(); +} + +static QDF_STATUS p2p_psoc_open(struct wlan_objmgr_psoc *psoc) +{ + return ucfg_p2p_psoc_open(psoc); +} + +static QDF_STATUS p2p_psoc_close(struct wlan_objmgr_psoc *psoc) +{ + return ucfg_p2p_psoc_close(psoc); +} + +static QDF_STATUS p2p_psoc_enable(struct wlan_objmgr_psoc *psoc) +{ + return wlan_p2p_start(psoc); +} + +static QDF_STATUS p2p_psoc_disable(struct wlan_objmgr_psoc *psoc) +{ + return wlan_p2p_stop(psoc); +} +#else +static QDF_STATUS p2p_init(void) +{ + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS p2p_deinit(void) +{ + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS p2p_psoc_open(struct wlan_objmgr_psoc *psoc) +{ + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS p2p_psoc_close(struct wlan_objmgr_psoc *psoc) +{ + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS p2p_psoc_enable(struct wlan_objmgr_psoc *psoc) +{ + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS p2p_psoc_disable(struct wlan_objmgr_psoc *psoc) +{ + return QDF_STATUS_SUCCESS; +} +#endif /* END of CONVERGED_P2P_ENABLE */ + +#ifdef CONVERGED_TDLS_ENABLE +static QDF_STATUS tdls_init(void) +{ + return ucfg_tdls_init(); +} + +static QDF_STATUS tdls_deinit(void) +{ + return ucfg_tdls_deinit(); +} + +static QDF_STATUS tdls_psoc_open(struct wlan_objmgr_psoc *psoc) +{ + return ucfg_tdls_psoc_open(psoc); +} + +static QDF_STATUS tdls_psoc_close(struct wlan_objmgr_psoc *psoc) +{ + return ucfg_tdls_psoc_close(psoc); +} + +static QDF_STATUS tdls_psoc_enable(struct wlan_objmgr_psoc *psoc) +{ + return ucfg_tdls_psoc_enable(psoc); +} + +static QDF_STATUS tdls_psoc_disable(struct wlan_objmgr_psoc *psoc) +{ + return ucfg_tdls_psoc_disable(psoc); +} +#else +static QDF_STATUS tdls_init(void) +{ + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS tdls_deinit(void) +{ + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS tdls_psoc_open(struct wlan_objmgr_psoc *psoc) +{ + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS tdls_psoc_close(struct wlan_objmgr_psoc *psoc) +{ + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS tdls_psoc_enable(struct wlan_objmgr_psoc *psoc) +{ + return QDF_STATUS_SUCCESS; +} + + +static QDF_STATUS tdls_psoc_disable(struct wlan_objmgr_psoc *psoc) +{ + return QDF_STATUS_SUCCESS; +} +#endif + +#if defined QCA_SUPPORT_SON && QCA_SUPPORT_SON >= 1 +static QDF_STATUS dispatcher_init_son(void) +{ + return wlan_son_init(); +} +static QDF_STATUS son_psoc_open(struct wlan_objmgr_psoc *psoc) +{ + return wlan_son_psoc_open(psoc); +} +static QDF_STATUS dispatcher_deinit_son(void) +{ + return wlan_son_deinit(); +} + +static QDF_STATUS son_psoc_close(struct wlan_objmgr_psoc *psoc) +{ + return wlan_son_psoc_close(psoc); +} +#else +static QDF_STATUS dispatcher_init_son(void) +{ + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS dispatcher_deinit_son(void) +{ + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS son_psoc_open(struct wlan_objmgr_psoc *psoc) +{ + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS son_psoc_close(struct wlan_objmgr_psoc *psoc) +{ + return QDF_STATUS_SUCCESS; +} + +#endif /* END of QCA_SUPPORT_SON */ + +static QDF_STATUS dispatcher_regulatory_init(void) +{ + return wlan_regulatory_init(); +} + +static QDF_STATUS dispatcher_regulatory_deinit(void) +{ + return wlan_regulatory_deinit(); +} + +static QDF_STATUS dispatcher_regulatory_psoc_open(struct wlan_objmgr_psoc + *psoc) +{ + return regulatory_psoc_open(psoc); +} + +static QDF_STATUS dispatcher_regulatory_psoc_close(struct wlan_objmgr_psoc + *psoc) +{ + return regulatory_psoc_close(psoc); +} + +static QDF_STATUS dispatcher_regulatory_pdev_open(struct wlan_objmgr_pdev + *pdev) +{ + return regulatory_pdev_open(pdev); +} + +#ifdef WLAN_CONV_SPECTRAL_ENABLE +#ifdef CONFIG_WIN +QDF_STATUS dispatcher_register_spectral_pdev_open_handler( + spectral_pdev_open_handler handler) +{ + dispatcher_spectral_pdev_open_handler_cb = handler; + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(dispatcher_register_spectral_pdev_open_handler); + +static QDF_STATUS dispatcher_spectral_pdev_open(struct wlan_objmgr_pdev + *pdev) +{ + return dispatcher_spectral_pdev_open_handler_cb(pdev); +} + +static QDF_STATUS dispatcher_spectral_pdev_close(struct wlan_objmgr_pdev *pdev) +{ + return QDF_STATUS_SUCCESS; +} +#else +static QDF_STATUS dispatcher_spectral_pdev_open(struct wlan_objmgr_pdev + *pdev) +{ + return spectral_pdev_open(pdev); +} + +static QDF_STATUS dispatcher_spectral_pdev_close(struct wlan_objmgr_pdev *pdev) +{ + return QDF_STATUS_SUCCESS; +} +#endif +#else +static QDF_STATUS dispatcher_spectral_pdev_open(struct wlan_objmgr_pdev + *pdev) +{ + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS dispatcher_spectral_pdev_close(struct wlan_objmgr_pdev *pdev) +{ + return QDF_STATUS_SUCCESS; +} +#endif + +static QDF_STATUS dispatcher_regulatory_pdev_close(struct wlan_objmgr_pdev + *pdev) +{ + return regulatory_pdev_close(pdev); +} + +#ifdef WLAN_POLICY_MGR_ENABLE +static QDF_STATUS dispatcher_policy_mgr_init(void) +{ + return policy_mgr_init(); +} + +static QDF_STATUS dispatcher_policy_mgr_deinit(void) +{ + return policy_mgr_deinit(); +} + +static QDF_STATUS dispatcher_policy_mgr_psoc_open( + struct wlan_objmgr_psoc *psoc) +{ + return policy_mgr_psoc_open(psoc); +} + +static QDF_STATUS dispatcher_policy_mgr_psoc_close( + struct wlan_objmgr_psoc *psoc) +{ + return policy_mgr_psoc_close(psoc); +} + +static QDF_STATUS dispatcher_policy_mgr_psoc_enable( + struct wlan_objmgr_psoc *psoc) +{ + return policy_mgr_psoc_enable(psoc); +} + +static QDF_STATUS dispatcher_policy_mgr_psoc_disable( + struct wlan_objmgr_psoc *psoc) +{ + return policy_mgr_psoc_disable(psoc); +} +#else +static QDF_STATUS dispatcher_policy_mgr_init(void) +{ + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS dispatcher_policy_mgr_deinit(void) +{ + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS dispatcher_policy_mgr_psoc_open( + struct wlan_objmgr_psoc *psoc) +{ + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS dispatcher_policy_mgr_psoc_close( + struct wlan_objmgr_psoc *psoc) +{ + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS dispatcher_policy_mgr_psoc_enable( + struct wlan_objmgr_psoc *psoc) +{ + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS dispatcher_policy_mgr_psoc_disable( + struct wlan_objmgr_psoc *psoc) +{ + return QDF_STATUS_SUCCESS; +} +#endif /* END of WLAN_POLICY_MGR_ENABLE */ + +#ifdef WLAN_SA_API_ENABLE +static QDF_STATUS dispatcher_init_sa_api(void) +{ + return wlan_sa_api_init(); +} + +static QDF_STATUS dispatcher_deinit_sa_api(void) +{ + return wlan_sa_api_deinit(); +} + +static QDF_STATUS sa_api_psoc_enable(struct wlan_objmgr_psoc *psoc) +{ + return wlan_sa_api_enable(psoc); +} + +static QDF_STATUS sa_api_psoc_disable(struct wlan_objmgr_psoc *psoc) +{ + return wlan_sa_api_disable(psoc); +} +#else +static QDF_STATUS dispatcher_init_sa_api(void) +{ + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS dispatcher_deinit_sa_api(void) +{ + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS sa_api_psoc_enable(struct wlan_objmgr_psoc *psoc) +{ + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS sa_api_psoc_disable(struct wlan_objmgr_psoc *psoc) +{ + return QDF_STATUS_SUCCESS; +} +#endif /* END of WLAN_SA_API_ENABLE */ + + +#ifdef WLAN_ATF_ENABLE +static QDF_STATUS dispatcher_init_atf(void) +{ + return wlan_atf_init(); +} + +static QDF_STATUS dispatcher_deinit_atf(void) +{ + return wlan_atf_deinit(); +} + +static QDF_STATUS atf_psoc_open(struct wlan_objmgr_psoc *psoc) +{ + return wlan_atf_open(psoc); +} + +static QDF_STATUS atf_psoc_close(struct wlan_objmgr_psoc *psoc) +{ + return wlan_atf_close(psoc); +} + +static QDF_STATUS atf_psoc_enable(struct wlan_objmgr_psoc *psoc) +{ + return wlan_atf_enable(psoc); +} + +static QDF_STATUS atf_psoc_disable(struct wlan_objmgr_psoc *psoc) +{ + return wlan_atf_disable(psoc); +} +#else +static QDF_STATUS dispatcher_init_atf(void) +{ + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS dispatcher_deinit_atf(void) +{ + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS atf_psoc_open(struct wlan_objmgr_psoc *psoc) +{ + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS atf_psoc_close(struct wlan_objmgr_psoc *psoc) +{ + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS atf_psoc_enable(struct wlan_objmgr_psoc *psoc) +{ + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS atf_psoc_disable(struct wlan_objmgr_psoc *psoc) +{ + return QDF_STATUS_SUCCESS; +} +#endif /* END of WLAN_ATF_ENABLE */ + +#ifdef WLAN_CONV_CRYPTO_SUPPORTED +static QDF_STATUS dispatcher_init_crypto(void) +{ + return wlan_crypto_init(); +} + +static QDF_STATUS dispatcher_deinit_crypto(void) +{ + return wlan_crypto_deinit(); +} +#else +static QDF_STATUS dispatcher_init_crypto(void) +{ + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS dispatcher_deinit_crypto(void) +{ + return QDF_STATUS_SUCCESS; +} +#endif /* END of WLAN_CONV_CRYPTO_SUPPORTED */ + +#ifdef WIFI_POS_CONVERGED +static QDF_STATUS dispatcher_init_wifi_pos(void) +{ + return wifi_pos_init(); +} + +static QDF_STATUS dispatcher_deinit_wifi_pos(void) +{ + return wifi_pos_deinit(); +} + +static QDF_STATUS dispatcher_wifi_pos_enable(struct wlan_objmgr_psoc *psoc) +{ + return wifi_pos_psoc_enable(psoc); +} + +static QDF_STATUS dispatcher_wifi_pos_disable(struct wlan_objmgr_psoc *psoc) +{ + return wifi_pos_psoc_disable(psoc); +} +#else +static QDF_STATUS dispatcher_init_wifi_pos(void) +{ + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS dispatcher_deinit_wifi_pos(void) +{ + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS dispatcher_wifi_pos_enable(struct wlan_objmgr_psoc *psoc) +{ + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS dispatcher_wifi_pos_disable(struct wlan_objmgr_psoc *psoc) +{ + return QDF_STATUS_SUCCESS; +} +#endif + +#ifdef WLAN_FEATURE_NAN_CONVERGENCE +static QDF_STATUS dispatcher_init_nan(void) +{ + return nan_init(); +} + +static QDF_STATUS dispatcher_deinit_nan(void) +{ + return nan_deinit(); +} + +static QDF_STATUS dispatcher_nan_psoc_enable(struct wlan_objmgr_psoc *psoc) +{ + return nan_psoc_enable(psoc); +} + +static QDF_STATUS dispatcher_nan_psoc_disable(struct wlan_objmgr_psoc *psoc) +{ + return nan_psoc_disable(psoc); +} +#else +static QDF_STATUS dispatcher_init_nan(void) +{ + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS dispatcher_deinit_nan(void) +{ + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS dispatcher_nan_psoc_enable(struct wlan_objmgr_psoc *psoc) +{ + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS dispatcher_nan_psoc_disable(struct wlan_objmgr_psoc *psoc) +{ + return QDF_STATUS_SUCCESS; +} +#endif + +#ifdef DFS_COMPONENT_ENABLE +static QDF_STATUS dispatcher_init_dfs(void) +{ + return dfs_init(); +} + +static QDF_STATUS dispatcher_deinit_dfs(void) +{ + return dfs_deinit(); +} + +static QDF_STATUS dispatcher_dfs_psoc_enable(struct wlan_objmgr_psoc *psoc) +{ + return wifi_dfs_psoc_enable(psoc); +} + +static QDF_STATUS dispatcher_dfs_psoc_disable(struct wlan_objmgr_psoc *psoc) +{ + return wifi_dfs_psoc_disable(psoc); +} +#else +static QDF_STATUS dispatcher_init_dfs(void) +{ + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS dispatcher_deinit_dfs(void) +{ + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS dispatcher_dfs_psoc_enable(struct wlan_objmgr_psoc *psoc) +{ + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS dispatcher_dfs_psoc_disable(struct wlan_objmgr_psoc *psoc) +{ + return QDF_STATUS_SUCCESS; +} +#endif + +#ifdef WLAN_OFFCHAN_TXRX_ENABLE +static QDF_STATUS dispatcher_offchan_txrx_init(void) +{ + return wlan_offchan_txrx_init(); +} + +static QDF_STATUS dispatcher_offchan_txrx_deinit(void) +{ + return wlan_offchan_txrx_deinit(); +} +#else +static QDF_STATUS dispatcher_offchan_txrx_init(void) +{ + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS dispatcher_offchan_txrx_deinit(void) +{ + return QDF_STATUS_SUCCESS; +} +#endif /*WLAN_OFFCHAN_TXRX_ENABLE*/ + +#ifdef WLAN_SUPPORT_SPLITMAC +static QDF_STATUS dispatcher_splitmac_init(void) +{ + return wlan_splitmac_init(); +} + +static QDF_STATUS dispatcher_splitmac_deinit(void) +{ + return wlan_splitmac_deinit(); +} +#else +static QDF_STATUS dispatcher_splitmac_init(void) +{ + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS dispatcher_splitmac_deinit(void) +{ + return QDF_STATUS_SUCCESS; +} +#endif /* WLAN_SUPPORT_SPLITMAC */ + +#ifdef WLAN_CONV_SPECTRAL_ENABLE +#ifdef CONFIG_MCL +static QDF_STATUS dispatcher_spectral_init(void) +{ + return wlan_spectral_init(); +} + +static QDF_STATUS dispatcher_spectral_deinit(void) +{ + return wlan_spectral_deinit(); +} +#else +static QDF_STATUS dispatcher_spectral_init(void) +{ + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS dispatcher_spectral_deinit(void) +{ + return QDF_STATUS_SUCCESS; +} +#endif +#else +static QDF_STATUS dispatcher_spectral_init(void) +{ + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS dispatcher_spectral_deinit(void) +{ + return QDF_STATUS_SUCCESS; +} +#endif + +#ifdef DIRECT_BUF_RX_ENABLE +static QDF_STATUS dispatcher_dbr_psoc_enable(struct wlan_objmgr_psoc *psoc) +{ + struct wlan_lmac_if_tx_ops *tx_ops; + + tx_ops = wlan_psoc_get_lmac_if_txops(psoc); + if (tx_ops->dbr_tx_ops.direct_buf_rx_register_events) + return tx_ops->dbr_tx_ops.direct_buf_rx_register_events(psoc); + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS dispatcher_dbr_psoc_disable(struct wlan_objmgr_psoc *psoc) +{ + struct wlan_lmac_if_tx_ops *tx_ops; + + tx_ops = wlan_psoc_get_lmac_if_txops(psoc); + if (tx_ops->dbr_tx_ops.direct_buf_rx_unregister_events) + return tx_ops->dbr_tx_ops.direct_buf_rx_unregister_events(psoc); + + return QDF_STATUS_SUCCESS; +} +#else +static QDF_STATUS dispatcher_dbr_psoc_enable(struct wlan_objmgr_psoc *psoc) +{ + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS dispatcher_dbr_psoc_disable(struct wlan_objmgr_psoc *psoc) +{ + return QDF_STATUS_SUCCESS; +} +#endif /* DIRECT_BUF_RX_ENABLE */ + +#ifdef WLAN_SUPPORT_GREEN_AP +static QDF_STATUS dispatcher_green_ap_init(void) +{ + return wlan_green_ap_init(); +} + +static QDF_STATUS dispatcher_green_ap_deinit(void) +{ + return wlan_green_ap_deinit(); +} +#else +static QDF_STATUS dispatcher_green_ap_init(void) +{ + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS dispatcher_green_ap_deinit(void) +{ + return QDF_STATUS_SUCCESS; +} +#endif + +#ifdef WLAN_SUPPORT_FILS +static QDF_STATUS dispatcher_fd_init(void) +{ + return wlan_fd_init(); +} + +static QDF_STATUS dispatcher_fd_deinit(void) +{ + return wlan_fd_deinit(); +} + +static QDF_STATUS fd_psoc_enable(struct wlan_objmgr_psoc *psoc) +{ + return wlan_fd_enable(psoc); +} + +static QDF_STATUS fd_psoc_disable(struct wlan_objmgr_psoc *psoc) +{ + return wlan_fd_disable(psoc); +} +#else +static QDF_STATUS dispatcher_fd_init(void) +{ + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS dispatcher_fd_deinit(void) +{ + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS fd_psoc_enable(struct wlan_objmgr_psoc *psoc) +{ + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS fd_psoc_disable(struct wlan_objmgr_psoc *psoc) +{ + return QDF_STATUS_SUCCESS; +} +#endif /* WLAN_SUPPORT_FILS */ + +QDF_STATUS dispatcher_init(void) +{ + if (QDF_STATUS_SUCCESS != wlan_objmgr_global_obj_init()) + goto out; + + if (QDF_STATUS_SUCCESS != wlan_mgmt_txrx_init()) + goto mgmt_txrx_init_fail; + + if (QDF_STATUS_SUCCESS != ucfg_scan_init()) + goto ucfg_scan_init_fail; + + if (QDF_STATUS_SUCCESS != p2p_init()) + goto p2p_init_fail; + + if (QDF_STATUS_SUCCESS != tdls_init()) + goto tdls_init_fail; + + if (QDF_STATUS_SUCCESS != wlan_serialization_init()) + goto serialization_init_fail; + + if (QDF_STATUS_SUCCESS != dispatcher_init_crypto()) + goto crypto_init_fail; + + if (QDF_STATUS_SUCCESS != dispatcher_policy_mgr_init()) + goto policy_mgr_init_fail; + + if (QDF_STATUS_SUCCESS != dispatcher_init_cp_stats()) + goto cp_stats_init_fail; + + if (QDF_STATUS_SUCCESS != dispatcher_init_atf()) + goto atf_init_fail; + + if (QDF_STATUS_SUCCESS != dispatcher_init_sa_api()) + goto sa_api_init_fail; + + if (QDF_STATUS_SUCCESS != dispatcher_init_wifi_pos()) + goto wifi_pos_init_fail; + + if (QDF_STATUS_SUCCESS != dispatcher_init_nan()) + goto nan_init_fail; + + if (QDF_STATUS_SUCCESS != dispatcher_init_dfs()) + goto dfs_init_fail; + + if (QDF_STATUS_SUCCESS != dispatcher_regulatory_init()) + goto regulatory_init_fail; + + if (QDF_STATUS_SUCCESS != dispatcher_offchan_txrx_init()) + goto offchan_init_fail; + + if (QDF_STATUS_SUCCESS != dispatcher_init_son()) + goto son_init_fail; + + if (QDF_STATUS_SUCCESS != dispatcher_splitmac_init()) + goto splitmac_init_fail; + + if (QDF_STATUS_SUCCESS != dispatcher_spectral_init()) + goto spectral_init_fail; + + if (QDF_STATUS_SUCCESS != dispatcher_fd_init()) + goto fd_init_fail; + + if (QDF_STATUS_SUCCESS != dispatcher_green_ap_init()) + goto green_ap_init_fail; + + if (QDF_STATUS_SUCCESS != dispatcher_ftm_init()) + goto ftm_init_fail; + + /* + * scheduler INIT has to be the last as each component's + * initialization has to happen first and then at the end + * scheduler needs to start accepting the service. + */ + if (QDF_STATUS_SUCCESS != scheduler_init()) + goto scheduler_init_fail; + + return QDF_STATUS_SUCCESS; + +scheduler_init_fail: + dispatcher_ftm_deinit(); +ftm_init_fail: + dispatcher_green_ap_deinit(); +green_ap_init_fail: + dispatcher_fd_deinit(); +fd_init_fail: + dispatcher_spectral_deinit(); +spectral_init_fail: + dispatcher_splitmac_deinit(); +splitmac_init_fail: + dispatcher_deinit_son(); +son_init_fail: + dispatcher_offchan_txrx_deinit(); +offchan_init_fail: + dispatcher_regulatory_deinit(); +regulatory_init_fail: + dispatcher_deinit_dfs(); +dfs_init_fail: + dispatcher_deinit_nan(); +nan_init_fail: + dispatcher_deinit_wifi_pos(); +wifi_pos_init_fail: + dispatcher_deinit_sa_api(); +sa_api_init_fail: + dispatcher_deinit_atf(); +atf_init_fail: + dispatcher_deinit_cp_stats(); +cp_stats_init_fail: + dispatcher_policy_mgr_deinit(); +policy_mgr_init_fail: + dispatcher_deinit_crypto(); +crypto_init_fail: + wlan_serialization_deinit(); +serialization_init_fail: + tdls_deinit(); +tdls_init_fail: + p2p_deinit(); +p2p_init_fail: + ucfg_scan_deinit(); +ucfg_scan_init_fail: + wlan_mgmt_txrx_deinit(); +mgmt_txrx_init_fail: + wlan_objmgr_global_obj_deinit(); + +out: + return QDF_STATUS_E_FAILURE; +} +qdf_export_symbol(dispatcher_init); + +QDF_STATUS dispatcher_deinit(void) +{ + QDF_BUG(QDF_STATUS_SUCCESS == scheduler_deinit()); + + QDF_BUG(QDF_STATUS_SUCCESS == dispatcher_ftm_deinit()); + + QDF_BUG(QDF_STATUS_SUCCESS == dispatcher_green_ap_deinit()); + + QDF_BUG(QDF_STATUS_SUCCESS == dispatcher_fd_deinit()); + + QDF_BUG(QDF_STATUS_SUCCESS == dispatcher_spectral_deinit()); + + QDF_BUG(QDF_STATUS_SUCCESS == dispatcher_splitmac_deinit()); + + QDF_BUG(QDF_STATUS_SUCCESS == dispatcher_deinit_son()); + + QDF_BUG(QDF_STATUS_SUCCESS == dispatcher_offchan_txrx_deinit()); + + QDF_BUG(QDF_STATUS_SUCCESS == dispatcher_regulatory_deinit()); + + QDF_BUG(QDF_STATUS_SUCCESS == dispatcher_deinit_dfs()); + + QDF_BUG(QDF_STATUS_SUCCESS == dispatcher_deinit_nan()); + + QDF_BUG(QDF_STATUS_SUCCESS == dispatcher_deinit_wifi_pos()); + + QDF_BUG(QDF_STATUS_SUCCESS == dispatcher_deinit_sa_api()); + + QDF_BUG(QDF_STATUS_SUCCESS == dispatcher_deinit_atf()); + + QDF_BUG(QDF_STATUS_SUCCESS == dispatcher_deinit_cp_stats()); + + QDF_BUG(QDF_STATUS_SUCCESS == dispatcher_policy_mgr_deinit()); + + QDF_BUG(QDF_STATUS_SUCCESS == dispatcher_deinit_crypto()); + + QDF_BUG(QDF_STATUS_SUCCESS == wlan_serialization_deinit()); + + QDF_BUG(QDF_STATUS_SUCCESS == tdls_deinit()); + + QDF_BUG(QDF_STATUS_SUCCESS == p2p_deinit()); + + QDF_BUG(QDF_STATUS_SUCCESS == ucfg_scan_deinit()); + + QDF_BUG(QDF_STATUS_SUCCESS == wlan_mgmt_txrx_deinit()); + + QDF_BUG(QDF_STATUS_SUCCESS == wlan_objmgr_global_obj_deinit()); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(dispatcher_deinit); + +QDF_STATUS dispatcher_enable(void) +{ + QDF_STATUS status; + + status = scheduler_enable(); + + return status; +} +qdf_export_symbol(dispatcher_enable); + +QDF_STATUS dispatcher_disable(void) +{ + QDF_BUG(QDF_IS_STATUS_SUCCESS(scheduler_disable())); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(dispatcher_disable); + +QDF_STATUS dispatcher_psoc_open(struct wlan_objmgr_psoc *psoc) +{ + if (QDF_STATUS_SUCCESS != wlan_mgmt_txrx_psoc_open(psoc)) + goto out; + + if (QDF_STATUS_SUCCESS != ucfg_scan_psoc_open(psoc)) + goto scan_psoc_open_fail; + + if (QDF_STATUS_SUCCESS != p2p_psoc_open(psoc)) + goto p2p_psoc_open_fail; + + if (QDF_STATUS_SUCCESS != tdls_psoc_open(psoc)) + goto tdls_psoc_open_fail; + + if (QDF_STATUS_SUCCESS != cp_stats_psoc_open(psoc)) + goto cp_stats_psoc_open_fail; + + if (QDF_STATUS_SUCCESS != atf_psoc_open(psoc)) + goto atf_psoc_open_fail; + + if (QDF_STATUS_SUCCESS != dispatcher_policy_mgr_psoc_open(psoc)) + goto policy_mgr_psoc_open_fail; + + if (QDF_STATUS_SUCCESS != dispatcher_regulatory_psoc_open(psoc)) + goto regulatory_psoc_open_fail; + + if (QDF_STATUS_SUCCESS != son_psoc_open(psoc)) + goto psoc_son_fail; + + if (QDF_STATUS_SUCCESS != dispatcher_ftm_psoc_open(psoc)) + goto ftm_psoc_open_fail; + + return QDF_STATUS_SUCCESS; + +ftm_psoc_open_fail: + son_psoc_close(psoc); +psoc_son_fail: + regulatory_psoc_close(psoc); +regulatory_psoc_open_fail: + dispatcher_policy_mgr_psoc_close(psoc); +policy_mgr_psoc_open_fail: + atf_psoc_close(psoc); +atf_psoc_open_fail: + cp_stats_psoc_close(psoc); +cp_stats_psoc_open_fail: + tdls_psoc_close(psoc); +tdls_psoc_open_fail: + p2p_psoc_close(psoc); +p2p_psoc_open_fail: + ucfg_scan_psoc_close(psoc); +scan_psoc_open_fail: + wlan_mgmt_txrx_psoc_close(psoc); + +out: + return QDF_STATUS_E_FAILURE; +} +qdf_export_symbol(dispatcher_psoc_open); + +QDF_STATUS dispatcher_psoc_close(struct wlan_objmgr_psoc *psoc) +{ + QDF_BUG(QDF_STATUS_SUCCESS == dispatcher_ftm_psoc_close(psoc)); + + QDF_BUG(QDF_STATUS_SUCCESS == son_psoc_close(psoc)); + + QDF_BUG(QDF_STATUS_SUCCESS == dispatcher_regulatory_psoc_close(psoc)); + + QDF_BUG(QDF_STATUS_SUCCESS == dispatcher_policy_mgr_psoc_close(psoc)); + + QDF_BUG(QDF_STATUS_SUCCESS == atf_psoc_close(psoc)); + + QDF_BUG(QDF_STATUS_SUCCESS == cp_stats_psoc_close(psoc)); + + QDF_BUG(QDF_STATUS_SUCCESS == tdls_psoc_close(psoc)); + + QDF_BUG(QDF_STATUS_SUCCESS == p2p_psoc_close(psoc)); + + QDF_BUG(QDF_STATUS_SUCCESS == ucfg_scan_psoc_close(psoc)); + + QDF_BUG(QDF_STATUS_SUCCESS == wlan_mgmt_txrx_psoc_close(psoc)); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(dispatcher_psoc_close); + +QDF_STATUS dispatcher_psoc_enable(struct wlan_objmgr_psoc *psoc) +{ + if (QDF_STATUS_SUCCESS != wlan_serialization_psoc_enable(psoc)) + goto out; + + if (QDF_STATUS_SUCCESS != ucfg_scan_psoc_enable(psoc)) + goto serialization_psoc_enable_fail; + + if (QDF_STATUS_SUCCESS != p2p_psoc_enable(psoc)) + goto p2p_psoc_enable_fail; + + if (QDF_STATUS_SUCCESS != tdls_psoc_enable(psoc)) + goto tdls_psoc_enable_fail; + + if (QDF_STATUS_SUCCESS != dispatcher_policy_mgr_psoc_enable(psoc)) + goto policy_mgr_psoc_enable_fail; + + if (QDF_STATUS_SUCCESS != sa_api_psoc_enable(psoc)) + goto sa_api_psoc_enable_fail; + + if (QDF_STATUS_SUCCESS != cp_stats_psoc_enable(psoc)) + goto cp_stats_psoc_enable_fail; + + if (QDF_STATUS_SUCCESS != atf_psoc_enable(psoc)) + goto atf_psoc_enable_fail; + + if (QDF_STATUS_SUCCESS != dispatcher_wifi_pos_enable(psoc)) + goto wifi_pos_psoc_enable_fail; + + if (QDF_STATUS_SUCCESS != dispatcher_nan_psoc_enable(psoc)) + goto nan_psoc_enable_fail; + + if (QDF_STATUS_SUCCESS != dispatcher_dfs_psoc_enable(psoc)) + goto wifi_dfs_psoc_enable_fail; + + if (QDF_STATUS_SUCCESS != fd_psoc_enable(psoc)) + goto fd_psoc_enable_fail; + + if (QDF_STATUS_SUCCESS != dispatcher_dbr_psoc_enable(psoc)) + goto dbr_psoc_enable_fail; + + return QDF_STATUS_SUCCESS; + +dbr_psoc_enable_fail: + fd_psoc_disable(psoc); +fd_psoc_enable_fail: + dispatcher_dfs_psoc_disable(psoc); +wifi_dfs_psoc_enable_fail: + dispatcher_nan_psoc_disable(psoc); +nan_psoc_enable_fail: + dispatcher_wifi_pos_disable(psoc); +wifi_pos_psoc_enable_fail: + atf_psoc_disable(psoc); +atf_psoc_enable_fail: + cp_stats_psoc_disable(psoc); +cp_stats_psoc_enable_fail: + sa_api_psoc_disable(psoc); +sa_api_psoc_enable_fail: + dispatcher_policy_mgr_psoc_disable(psoc); +policy_mgr_psoc_enable_fail: + tdls_psoc_disable(psoc); +tdls_psoc_enable_fail: + p2p_psoc_disable(psoc); +p2p_psoc_enable_fail: + ucfg_scan_psoc_disable(psoc); +serialization_psoc_enable_fail: + wlan_serialization_psoc_disable(psoc); + + +out: + return QDF_STATUS_E_FAILURE; +} +qdf_export_symbol(dispatcher_psoc_enable); + +QDF_STATUS dispatcher_psoc_disable(struct wlan_objmgr_psoc *psoc) +{ + QDF_BUG(QDF_STATUS_SUCCESS == dispatcher_dbr_psoc_disable(psoc)); + + QDF_BUG(QDF_STATUS_SUCCESS == fd_psoc_disable(psoc)); + + QDF_BUG(QDF_STATUS_SUCCESS == dispatcher_dfs_psoc_disable(psoc)); + + QDF_BUG(QDF_STATUS_SUCCESS == dispatcher_nan_psoc_disable(psoc)); + + QDF_BUG(QDF_STATUS_SUCCESS == dispatcher_wifi_pos_disable(psoc)); + + QDF_BUG(QDF_STATUS_SUCCESS == atf_psoc_disable(psoc)); + + QDF_BUG(QDF_STATUS_SUCCESS == cp_stats_psoc_disable(psoc)); + + QDF_BUG(QDF_STATUS_SUCCESS == sa_api_psoc_disable(psoc)); + + QDF_BUG(QDF_STATUS_SUCCESS == + dispatcher_policy_mgr_psoc_disable(psoc)); + + QDF_BUG(QDF_STATUS_SUCCESS == tdls_psoc_disable(psoc)); + + QDF_BUG(QDF_STATUS_SUCCESS == p2p_psoc_disable(psoc)); + + QDF_BUG(QDF_STATUS_SUCCESS == ucfg_scan_psoc_disable(psoc)); + + QDF_BUG(QDF_STATUS_SUCCESS == wlan_serialization_psoc_disable(psoc)); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(dispatcher_psoc_disable); + +QDF_STATUS dispatcher_pdev_open(struct wlan_objmgr_pdev *pdev) +{ + if (QDF_STATUS_SUCCESS != dispatcher_regulatory_pdev_open(pdev)) + goto out; + + if (QDF_STATUS_SUCCESS != dispatcher_spectral_pdev_open(pdev)) + goto spectral_pdev_open_fail; + + if (QDF_STATUS_SUCCESS != wlan_mgmt_txrx_pdev_open(pdev)) + goto out; + + return QDF_STATUS_SUCCESS; + +spectral_pdev_open_fail: + dispatcher_regulatory_pdev_close(pdev); + +out: + return QDF_STATUS_E_FAILURE; +} +qdf_export_symbol(dispatcher_pdev_open); + +QDF_STATUS dispatcher_pdev_close(struct wlan_objmgr_pdev *pdev) +{ + QDF_BUG(QDF_STATUS_SUCCESS == dispatcher_regulatory_pdev_close(pdev)); + + QDF_BUG(QDF_STATUS_SUCCESS == dispatcher_spectral_pdev_close(pdev)); + + QDF_BUG(QDF_STATUS_SUCCESS == wlan_mgmt_txrx_pdev_close(pdev)); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(dispatcher_pdev_close); diff --git a/drivers/staging/qca-wifi-host-cmn/os_if/linux/cp_stats/inc/wlan_cfg80211_ic_cp_stats.h b/drivers/staging/qca-wifi-host-cmn/os_if/linux/cp_stats/inc/wlan_cfg80211_ic_cp_stats.h new file mode 100644 index 0000000000000000000000000000000000000000..66ab78badd06806048dc5d4fbca8888e05286508 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/os_if/linux/cp_stats/inc/wlan_cfg80211_ic_cp_stats.h @@ -0,0 +1,109 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_cfg80211_ic_cp_stats.h + * + * This Header file provide declaration for cfg80211 command handler API + * registered cp stats and specific with ic + */ + +#ifndef __WLAN_CFG80211_IC_CP_STATS_H__ +#define __WLAN_CFG80211_IC_CP_STATS_H__ + +#ifdef QCA_SUPPORT_CP_STATS +#include +#include +#ifdef WLAN_ATF_ENABLE +#include +#endif +#include + +/** + * wlan_cfg80211_get_peer_cp_stats() - API to get peer stats object + * @peer_obj: peer object as input + * @peer_cp_stats: peer stats object to populate + * + * Return: 0 on success, negative value on failure + */ +int wlan_cfg80211_get_peer_cp_stats(struct wlan_objmgr_peer *peer_obj, + struct peer_ic_cp_stats *peer_cp_stats); + +/** + * wlan_cfg80211_get_vdev_cp_stats() - API to get vdev stats object + * @vdev_obj: vdev object as input + * @vdev_cp_stats: vdev stats object to populate + * + * Return: 0 on success, negative value on failure + */ +int wlan_cfg80211_get_vdev_cp_stats(struct wlan_objmgr_vdev *vdev_obj, + struct vdev_ic_cp_stats *vdev_cp_stats); + +/** + * wlan_cfg80211_get_pdev_cp_stats() - API to get pdev cp stats object + * @pdev_obj: pdev object as input + * @pdev_cp_stats: pdev cp stats object to populate + * + * Return: 0 on success, negative value on failure + */ +int wlan_cfg80211_get_pdev_cp_stats(struct wlan_objmgr_pdev *pdev_obj, + struct pdev_ic_cp_stats *pdev_cp_stats); + +#ifdef WLAN_ATF_ENABLE +/** + * wlan_cfg80211_get_peer_atf_cp_stats() - API to get ATF peer stats object + * @peer_obj: peer object as input + * @atf_cp_stats: atf peer cp stats object to populate + * + * Return: 0 on success, negative value on failure + */ +int +wlan_cfg80211_get_atf_peer_cp_stats(struct wlan_objmgr_peer *peer_obj, + struct atf_peer_cp_stats *atf_cp_stats); + +/** + * wlan_cfg80211_get_peer_atf_cp_stats_from_mac() - API to get ATF peer + * stats object from peer mac address + * @vdev_obj: vdev object as input + * @mac: peer mac address as input + * @atf_cp_stats: atf peer cp stats object to populate + * + * API used from ucfg layer to get ATF peer cp stats object when only peer + * mac address is available + * + * Return: 0 on success, negative value on failure + */ +int wlan_cfg80211_get_atf_peer_cp_stats_from_mac( + struct wlan_objmgr_vdev *vdev_obj, + uint8_t *mac, + struct atf_peer_cp_stats *atf_cp_stats); +#endif /* WLAN_ATF_ENABLE */ + +/** + * wlan_cfg80211_get_dcs_pdev_cp_stats() - API to get DCS chan stats + * @pdev_obj: pdev object as input + * @dcs_chan_stats: DCS pdev stats object to populate + * + * Return: 0 on success, negative value on failure + */ +int wlan_cfg80211_get_dcs_pdev_cp_stats( + struct wlan_objmgr_pdev *pdev_obj, + struct pdev_dcs_chan_stats *dcs_chan_stats); + +#endif /* QCA_SUPPORT_CP_STATS */ +#endif /* __WLAN_CFG80211_IC_CP_STATS_H__ */ diff --git a/drivers/staging/qca-wifi-host-cmn/os_if/linux/cp_stats/inc/wlan_cfg80211_mc_cp_stats.h b/drivers/staging/qca-wifi-host-cmn/os_if/linux/cp_stats/inc/wlan_cfg80211_mc_cp_stats.h new file mode 100644 index 0000000000000000000000000000000000000000..40fcd481eeee7736ef1c7e5e2858fc1879956a5d --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/os_if/linux/cp_stats/inc/wlan_cfg80211_mc_cp_stats.h @@ -0,0 +1,95 @@ +/* + * Copyright (c) 2011-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_cfg80211_mc_cp_stats.h + * + * This Header file provide declaration for cfg80211 command handler API + * registered cp stats and specific with ic + */ + +#ifndef __WLAN_CFG80211_MC_CP_STATS_H__ +#define __WLAN_CFG80211_MC_CP_STATS_H__ + +#ifdef QCA_SUPPORT_CP_STATS + +/* forward declaration */ +struct wiphy; +struct wlan_objmgr_psoc; + +/** + * wlan_cfg80211_mc_cp_stats_get_wakelock_stats() - API to request wake lock + * stats. Stats are returned to user space via vender event + * @psoc: Pointer to psoc + * @wiphy: wiphy pointer + * + * Return: 0 on success, negative value on failure + */ +int wlan_cfg80211_mc_cp_stats_get_wakelock_stats(struct wlan_objmgr_psoc *psoc, + struct wiphy *wiphy); + +/** + * wlan_cfg80211_mc_cp_stats_get_tx_power() - API to fetch tx power + * @vdev: Pointer to vdev + * @dbm: Pointer to TX power in dbm + * + * Return: 0 on success, negative value on failure + */ +int wlan_cfg80211_mc_cp_stats_get_tx_power(struct wlan_objmgr_vdev *vdev, + int *dbm); + +/** + * wlan_cfg80211_mc_cp_stats_get_station_stats() - API to get station + * statistics to firmware + * @vdev: Pointer to vdev + * @errno: error type in case of failure + * + * Call of this API must call wlan_cfg80211_mc_cp_stats_free_stats_event + * API when done with information provided by info. + * Return: stats buffer on success, Null on failure + */ +struct stats_event * +wlan_cfg80211_mc_cp_stats_get_station_stats(struct wlan_objmgr_vdev *vdev, + int *errno); + +/** + * wlan_cfg80211_mc_cp_stats_free_stats_event() - API to release station + * statistics buffer + * @vdev: Pointer to vdev + * @info: pointer to object to populate with station stats + * + * Return: None + */ +void wlan_cfg80211_mc_cp_stats_free_stats_event(struct stats_event *info); + +/** + * wlan_cfg80211_mc_cp_stats_get_peer_rssi() - API to fetch peer rssi + * @vdev: Pointer to vdev + * @macaddress: mac address + * @errno: error type in case of failure + * + * Call of this API must call wlan_cfg80211_mc_cp_stats_free_stats_event + * API when done with information provided by rssi_info. + * Return: stats buffer on success, Null on failure + */ +struct stats_event * +wlan_cfg80211_mc_cp_stats_get_peer_rssi(struct wlan_objmgr_vdev *vdev, + uint8_t *macaddress, int *errno); + +#endif /* QCA_SUPPORT_CP_STATS */ +#endif /* __WLAN_CFG80211_MC_CP_STATS_H__ */ diff --git a/drivers/staging/qca-wifi-host-cmn/os_if/linux/cp_stats/src/wlan_cfg80211_ic_cp_stats.c b/drivers/staging/qca-wifi-host-cmn/os_if/linux/cp_stats/src/wlan_cfg80211_ic_cp_stats.c new file mode 100644 index 0000000000000000000000000000000000000000..4dcc0d5c8cdcb65189691e5617dc3c2959e27f92 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/os_if/linux/cp_stats/src/wlan_cfg80211_ic_cp_stats.c @@ -0,0 +1,185 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_cfg80211_ic_cp_stats.c + * + * This file provide definitions to os_if cp_stats APIs + */ +#include +#include +#include +#include + +int wlan_cfg80211_get_peer_cp_stats(struct wlan_objmgr_peer *peer_obj, + struct peer_ic_cp_stats *peer_cp_stats) +{ + QDF_STATUS status; + + if (!peer_obj) { + cfg80211_err("Invalid input, peer obj NULL"); + return -EINVAL; + } + + if (!peer_cp_stats) { + cfg80211_err("Invalid input, peer cp obj is NULL"); + return -EINVAL; + } + + status = wlan_ucfg_get_peer_cp_stats(peer_obj, peer_cp_stats); + if (QDF_IS_STATUS_ERROR(status)) { + cfg80211_err("wlan_cfg80211_get_peer_cp_stats status: %d", + status); + } + + return qdf_status_to_os_return(status); +} + +int wlan_cfg80211_get_vdev_cp_stats(struct wlan_objmgr_vdev *vdev_obj, + struct vdev_ic_cp_stats *vdev_cp_stats) +{ + QDF_STATUS status; + + if (!vdev_obj) { + cfg80211_err("Invalid input, vdev obj is NULL"); + return -EINVAL; + } + + if (!vdev_cp_stats) { + cfg80211_err("Invalid input, vdev cp obj is NULL"); + return -EINVAL; + } + + status = wlan_ucfg_get_vdev_cp_stats(vdev_obj, vdev_cp_stats); + if (QDF_IS_STATUS_ERROR(status)) { + cfg80211_err("wlan_cfg80211_get_vdev_cp_stats status: %d", + status); + } + + return qdf_status_to_os_return(status); +} + +int wlan_cfg80211_get_pdev_cp_stats(struct wlan_objmgr_pdev *pdev_obj, + struct pdev_ic_cp_stats *pdev_cp_stats) +{ + QDF_STATUS status; + + if (!pdev_obj) { + cfg80211_err("Invalid input, pdev obj is NULL"); + return -EINVAL; + } + + if (!pdev_cp_stats) { + cfg80211_err("Invalid input, pdev cp obj is NULL"); + return -EINVAL; + } + + status = wlan_ucfg_get_pdev_cp_stats(pdev_obj, pdev_cp_stats); + if (QDF_IS_STATUS_ERROR(status)) { + cfg80211_err("wlan_cfg80211_get_pdev_cp_stats status: %d", + status); + } + + return qdf_status_to_os_return(status); +} + +qdf_export_symbol(wlan_cfg80211_get_pdev_cp_stats); + +#ifdef WLAN_ATF_ENABLE +int +wlan_cfg80211_get_atf_peer_cp_stats(struct wlan_objmgr_peer *peer_obj, + struct atf_peer_cp_stats *atf_cp_stats) +{ + QDF_STATUS status; + + if (!peer_obj) { + cfg80211_err("Invalid input, peer obj is NULL"); + return -EINVAL; + } + + if (!atf_cp_stats) { + cfg80211_err("Invalid input, ATF peer cp obj is NULL!"); + return -EINVAL; + } + + status = wlan_ucfg_get_atf_peer_cp_stats(peer_obj, atf_cp_stats); + if (QDF_IS_STATUS_ERROR(status)) { + cfg80211_err("wlan_cfg80211_get_atf_peer_cp_stats status: %d", + status); + } + + return qdf_status_to_os_return(status); +} + +int wlan_cfg80211_get_atf_peer_cp_stats_from_mac( + struct wlan_objmgr_vdev *vdev_obj, + uint8_t *mac, + struct atf_peer_cp_stats *atf_cp_stats) +{ + QDF_STATUS status; + + if (!vdev_obj) { + cfg80211_err("Invalid input, vdev obj is NULL"); + return -EINVAL; + } + + if (!mac) { + cfg80211_err("Invalid input, peer mac is NULL"); + return -EINVAL; + } + + if (!atf_cp_stats) { + cfg80211_err("Invalid input, ATF peer cp stats obj is NULL"); + return -EINVAL; + } + + status = wlan_ucfg_get_atf_peer_cp_stats_from_mac(vdev_obj, mac, + atf_cp_stats); + if (QDF_IS_STATUS_ERROR(status)) { + cfg80211_err("wlan_cfg80211_get_cp_stats_from_mac status: %d", + status); + } + + return qdf_status_to_os_return(status); +} +#endif + +int +wlan_cfg80211_get_dcs_pdev_cp_stats(struct wlan_objmgr_pdev *pdev_obj, + struct pdev_dcs_chan_stats *dcs_chan_stats) +{ + QDF_STATUS status; + + if (!pdev_obj) { + cfg80211_err("Invalid input, pdev obj is NULL"); + return -EINVAL; + } + + if (!dcs_chan_stats) { + cfg80211_err("Invalid input, dcs chan stats is NULL"); + return -EINVAL; + } + + status = wlan_ucfg_get_dcs_chan_stats(pdev_obj, dcs_chan_stats); + if (QDF_IS_STATUS_ERROR(status)) { + cfg80211_err("wlan_cfg80211_get_dcs_pdev_cp_stats status: %d", + status); + } + + return qdf_status_to_os_return(status); +} diff --git a/drivers/staging/qca-wifi-host-cmn/os_if/linux/cp_stats/src/wlan_cfg80211_mc_cp_stats.c b/drivers/staging/qca-wifi-host-cmn/os_if/linux/cp_stats/src/wlan_cfg80211_mc_cp_stats.c new file mode 100644 index 0000000000000000000000000000000000000000..1b5c828af8487ea5f6f43c183e9e285e373744c6 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/os_if/linux/cp_stats/src/wlan_cfg80211_mc_cp_stats.c @@ -0,0 +1,586 @@ +/* + * Copyright (c) 2011-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_cfg80211_mc_cp_stats.c + * + * This file provide definitions to cp stats supported cfg80211 cmd handlers + */ + +#include +#include +#include +#include +#include "wlan_osif_request_manager.h" +#include "wlan_objmgr_peer_obj.h" + +/* max time in ms, caller may wait for stats request get serviced */ +#define CP_STATS_WAIT_TIME_STAT 800 + +/** + * wlan_cfg80211_mc_cp_stats_dealloc() - callback to free priv + * allocations for stats + * @priv: Pointer to priv data statucture + * + * Return: None + */ +static void wlan_cfg80211_mc_cp_stats_dealloc(void *priv) +{ + struct stats_event *stats = priv; + + if (!stats) + return; + + qdf_mem_free(stats->pdev_stats); + qdf_mem_free(stats->peer_stats); + qdf_mem_free(stats->cca_stats); + qdf_mem_free(stats->vdev_summary_stats); + qdf_mem_free(stats->vdev_chain_rssi); + qdf_mem_free(stats->peer_adv_stats); +} + +/** + * wlan_cfg80211_mc_cp_stats_send_wake_lock_stats() - API to send wakelock stats + * @wiphy: wiphy pointer + * @stats: stats data to be sent + * + * Return: 0 on success, error number otherwise. + */ +static int wlan_cfg80211_mc_cp_stats_send_wake_lock_stats(struct wiphy *wiphy, + struct wake_lock_stats *stats) +{ + struct sk_buff *skb; + uint32_t nl_buf_len; + uint32_t icmpv6_cnt; + uint32_t ipv6_rx_multicast_addr_cnt; + uint32_t total_rx_data_wake, rx_multicast_cnt; + + nl_buf_len = NLMSG_HDRLEN; + nl_buf_len += QCA_WLAN_VENDOR_GET_WAKE_STATS_MAX * + (NLMSG_HDRLEN + sizeof(uint32_t)); + + skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, nl_buf_len); + + if (!skb) { + cfg80211_err("cfg80211_vendor_cmd_alloc_reply_skb failed"); + return -ENOMEM; + } + + cfg80211_debug("wow_ucast_wake_up_count %d", + stats->ucast_wake_up_count); + cfg80211_debug("wow_bcast_wake_up_count %d", + stats->bcast_wake_up_count); + cfg80211_debug("wow_ipv4_mcast_wake_up_count %d", + stats->ipv4_mcast_wake_up_count); + cfg80211_debug("wow_ipv6_mcast_wake_up_count %d", + stats->ipv6_mcast_wake_up_count); + cfg80211_debug("wow_ipv6_mcast_ra_stats %d", + stats->ipv6_mcast_ra_stats); + cfg80211_debug("wow_ipv6_mcast_ns_stats %d", + stats->ipv6_mcast_ns_stats); + cfg80211_debug("wow_ipv6_mcast_na_stats %d", + stats->ipv6_mcast_na_stats); + cfg80211_debug("wow_icmpv4_count %d", + stats->icmpv4_count); + cfg80211_debug("wow_icmpv6_count %d", + stats->icmpv6_count); + cfg80211_debug("wow_rssi_breach_wake_up_count %d", + stats->rssi_breach_wake_up_count); + cfg80211_debug("wow_low_rssi_wake_up_count %d", + stats->low_rssi_wake_up_count); + cfg80211_debug("wow_gscan_wake_up_count %d", + stats->gscan_wake_up_count); + cfg80211_debug("wow_pno_complete_wake_up_count %d", + stats->pno_complete_wake_up_count); + cfg80211_debug("wow_pno_match_wake_up_count %d", + stats->pno_match_wake_up_count); + + ipv6_rx_multicast_addr_cnt = stats->ipv6_mcast_wake_up_count; + icmpv6_cnt = stats->icmpv6_count; + rx_multicast_cnt = stats->ipv4_mcast_wake_up_count + + ipv6_rx_multicast_addr_cnt; + total_rx_data_wake = stats->ucast_wake_up_count + + stats->bcast_wake_up_count + rx_multicast_cnt; + + if (nla_put_u32(skb, QCA_WLAN_VENDOR_ATTR_TOTAL_CMD_EVENT_WAKE, 0) || + nla_put_u32(skb, QCA_WLAN_VENDOR_ATTR_CMD_EVENT_WAKE_CNT_PTR, 0) || + nla_put_u32(skb, QCA_WLAN_VENDOR_ATTR_CMD_EVENT_WAKE_CNT_SZ, 0) || + nla_put_u32(skb, QCA_WLAN_VENDOR_ATTR_TOTAL_DRIVER_FW_LOCAL_WAKE, + 0) || + nla_put_u32(skb, QCA_WLAN_VENDOR_ATTR_DRIVER_FW_LOCAL_WAKE_CNT_PTR, + 0) || + nla_put_u32(skb, QCA_WLAN_VENDOR_ATTR_DRIVER_FW_LOCAL_WAKE_CNT_SZ, + 0) || + nla_put_u32(skb, + QCA_WLAN_VENDOR_ATTR_TOTAL_RX_DATA_WAKE, + total_rx_data_wake) || + nla_put_u32(skb, + QCA_WLAN_VENDOR_ATTR_RX_UNICAST_CNT, + stats->ucast_wake_up_count) || + nla_put_u32(skb, + QCA_WLAN_VENDOR_ATTR_RX_MULTICAST_CNT, + rx_multicast_cnt) || + nla_put_u32(skb, + QCA_WLAN_VENDOR_ATTR_RX_BROADCAST_CNT, + stats->bcast_wake_up_count) || + nla_put_u32(skb, + QCA_WLAN_VENDOR_ATTR_ICMP_PKT, + stats->icmpv4_count) || + nla_put_u32(skb, + QCA_WLAN_VENDOR_ATTR_ICMP6_PKT, + icmpv6_cnt) || + nla_put_u32(skb, + QCA_WLAN_VENDOR_ATTR_ICMP6_RA, + stats->ipv6_mcast_ra_stats) || + nla_put_u32(skb, + QCA_WLAN_VENDOR_ATTR_ICMP6_NA, + stats->ipv6_mcast_na_stats) || + nla_put_u32(skb, + QCA_WLAN_VENDOR_ATTR_ICMP6_NS, + stats->ipv6_mcast_ns_stats) || + nla_put_u32(skb, + QCA_WLAN_VENDOR_ATTR_ICMP4_RX_MULTICAST_CNT, + stats->ipv4_mcast_wake_up_count) || + nla_put_u32(skb, + QCA_WLAN_VENDOR_ATTR_ICMP6_RX_MULTICAST_CNT, + ipv6_rx_multicast_addr_cnt) || + nla_put_u32(skb, + QCA_WLAN_VENDOR_ATTR_RSSI_BREACH_CNT, + stats->rssi_breach_wake_up_count) || + nla_put_u32(skb, + QCA_WLAN_VENDOR_ATTR_LOW_RSSI_CNT, + stats->low_rssi_wake_up_count) || + nla_put_u32(skb, + QCA_WLAN_VENDOR_ATTR_GSCAN_CNT, + stats->gscan_wake_up_count) || + nla_put_u32(skb, + QCA_WLAN_VENDOR_ATTR_PNO_COMPLETE_CNT, + stats->pno_complete_wake_up_count) || + nla_put_u32(skb, + QCA_WLAN_VENDOR_ATTR_PNO_MATCH_CNT, + stats->pno_match_wake_up_count)) { + cfg80211_err("nla put fail"); + goto nla_put_failure; + } + + cfg80211_vendor_cmd_reply(skb); + return 0; + +nla_put_failure: + kfree_skb(skb); + return -EINVAL; +} + +int wlan_cfg80211_mc_cp_stats_get_wakelock_stats(struct wlan_objmgr_psoc *psoc, + struct wiphy *wiphy) +{ + /* refer __wlan_hdd_cfg80211_get_wakelock_stats */ + QDF_STATUS status; + struct wake_lock_stats stats = {0}; + + status = ucfg_mc_cp_stats_get_psoc_wake_lock_stats(psoc, &stats); + if (QDF_IS_STATUS_ERROR(status)) + return qdf_status_to_os_return(status); + + return wlan_cfg80211_mc_cp_stats_send_wake_lock_stats(wiphy, &stats); +} + +struct tx_power_priv { + int dbm; +}; + +/** + * get_tx_power_cb() - "Get tx power" callback function + * @tx_power: tx_power + * @cookie: a cookie for the request context + * + * Return: None + */ +static void get_tx_power_cb(int tx_power, void *cookie) +{ + struct osif_request *request; + struct tx_power_priv *priv; + + request = osif_request_get(cookie); + if (!request) { + cfg80211_err("Obsolete request"); + return; + } + + priv = osif_request_priv(request); + priv->dbm = tx_power; + osif_request_complete(request); + osif_request_put(request); +} + +int wlan_cfg80211_mc_cp_stats_get_tx_power(struct wlan_objmgr_vdev *vdev, + int *dbm) +{ + int ret = 0; + void *cookie; + QDF_STATUS status; + struct request_info info = {0}; + struct wlan_objmgr_peer *peer; + struct tx_power_priv *priv = NULL; + struct osif_request *request = NULL; + static const struct osif_request_params params = { + .priv_size = sizeof(*priv), + .timeout_ms = CP_STATS_WAIT_TIME_STAT, + }; + + request = osif_request_alloc(¶ms); + if (!request) { + cfg80211_err("Request allocation failure, return cached value"); + goto fetch_tx_power; + } + + cookie = osif_request_cookie(request); + info.cookie = cookie; + info.u.get_tx_power_cb = get_tx_power_cb; + info.vdev_id = wlan_vdev_get_id(vdev); + info.pdev_id = wlan_objmgr_pdev_get_pdev_id(wlan_vdev_get_pdev(vdev)); + peer = wlan_vdev_get_bsspeer(vdev); + if (!peer) { + ret = -EINVAL; + goto peer_is_null; + } + qdf_mem_copy(info.peer_mac_addr, peer->macaddr, WLAN_MACADDR_LEN); + + status = ucfg_mc_cp_stats_send_stats_request(vdev, + TYPE_CONNECTION_TX_POWER, + &info); + if (QDF_IS_STATUS_ERROR(status)) { + cfg80211_err("wlan_mc_cp_stats_request_tx_power status: %d", + status); + ret = qdf_status_to_os_return(status); + } else { + ret = osif_request_wait_for_response(request); + if (ret) + cfg80211_err("wait failed or timed out ret: %d", ret); + else + priv = osif_request_priv(request); + } + +fetch_tx_power: + if (priv) { + *dbm = priv->dbm; + } else { + status = ucfg_mc_cp_stats_get_tx_power(vdev, dbm); + if (QDF_IS_STATUS_ERROR(status)) { + cfg80211_err("ucfg_mc_cp_stats_get_tx_power status: %d", + status); + ret = qdf_status_to_os_return(status); + } + } + +peer_is_null: + /* + * either we never sent a request, we sent a request and + * received a response or we sent a request and timed out. + * regardless we are done with the request. + */ + if (request) + osif_request_put(request); + + return ret; +} + +/** + * get_peer_rssi_cb() - get_peer_rssi_cb callback function + * @ev: peer stats buffer + * @cookie: a cookie for the request context + * + * Return: None + */ +static void get_peer_rssi_cb(struct stats_event *ev, void *cookie) +{ + struct stats_event *priv; + struct osif_request *request; + uint32_t rssi_size; + + request = osif_request_get(cookie); + if (!request) { + cfg80211_err("Obsolete request"); + return; + } + + priv = osif_request_priv(request); + rssi_size = sizeof(*ev->peer_stats) * ev->num_peer_stats; + if (rssi_size == 0) { + cfg80211_err("Invalid rssi stats"); + goto get_peer_rssi_cb_fail; + } + + priv->peer_stats = qdf_mem_malloc(rssi_size); + if (!priv->peer_stats) { + cfg80211_err("allocation failed"); + goto get_peer_rssi_cb_fail; + } + + priv->num_peer_stats = ev->num_peer_stats; + qdf_mem_copy(priv->peer_stats, ev->peer_stats, rssi_size); + +get_peer_rssi_cb_fail: + osif_request_complete(request); + osif_request_put(request); +} + +struct stats_event * +wlan_cfg80211_mc_cp_stats_get_peer_rssi(struct wlan_objmgr_vdev *vdev, + uint8_t *mac_addr, + int *errno) +{ + void *cookie; + QDF_STATUS status; + struct stats_event *priv, *out; + struct request_info info = {0}; + struct osif_request *request = NULL; + static const struct osif_request_params params = { + .priv_size = sizeof(*priv), + .timeout_ms = CP_STATS_WAIT_TIME_STAT, + .dealloc = wlan_cfg80211_mc_cp_stats_dealloc, + }; + + out = qdf_mem_malloc(sizeof(*out)); + if (!out) { + cfg80211_err("allocation failed"); + *errno = -ENOMEM; + return NULL; + } + + request = osif_request_alloc(¶ms); + if (!request) { + cfg80211_err("Request allocation failure, return cached value"); + *errno = -ENOMEM; + qdf_mem_free(out); + return NULL; + } + + cookie = osif_request_cookie(request); + priv = osif_request_priv(request); + info.cookie = cookie; + info.u.get_peer_rssi_cb = get_peer_rssi_cb; + info.vdev_id = wlan_vdev_get_id(vdev); + info.pdev_id = wlan_objmgr_pdev_get_pdev_id(wlan_vdev_get_pdev(vdev)); + qdf_mem_copy(info.peer_mac_addr, mac_addr, WLAN_MACADDR_LEN); + status = ucfg_mc_cp_stats_send_stats_request(vdev, TYPE_PEER_STATS, + &info); + if (QDF_IS_STATUS_ERROR(status)) { + cfg80211_err("stats req failed: %d", status); + *errno = qdf_status_to_os_return(status); + goto get_peer_rssi_fail; + } + + *errno = osif_request_wait_for_response(request); + if (*errno) { + cfg80211_err("wait failed or timed out ret: %d", *errno); + goto get_peer_rssi_fail; + } + + if (!priv->peer_stats || priv->num_peer_stats == 0) { + cfg80211_err("Invalid peer stats, count %d, data %pK", + priv->num_peer_stats, priv->peer_stats); + *errno = -EINVAL; + goto get_peer_rssi_fail; + } + out->num_peer_stats = priv->num_peer_stats; + out->peer_stats = priv->peer_stats; + priv->peer_stats = NULL; + osif_request_put(request); + + return out; + +get_peer_rssi_fail: + osif_request_put(request); + wlan_cfg80211_mc_cp_stats_free_stats_event(out); + + return NULL; +} + +/** + * get_station_stats_cb() - get_station_stats_cb callback function + * @ev: station stats buffer + * @cookie: a cookie for the request context + * + * Return: None + */ +static void get_station_stats_cb(struct stats_event *ev, void *cookie) +{ + struct stats_event *priv; + struct osif_request *request; + uint32_t summary_size, rssi_size, peer_adv_size; + + request = osif_request_get(cookie); + if (!request) { + cfg80211_err("Obsolete request"); + return; + } + + priv = osif_request_priv(request); + summary_size = sizeof(*ev->vdev_summary_stats) * ev->num_summary_stats; + rssi_size = sizeof(*ev->vdev_chain_rssi) * ev->num_chain_rssi_stats; + peer_adv_size = sizeof(*ev->peer_adv_stats) * ev->num_peer_adv_stats; + + if (summary_size == 0 || rssi_size == 0) { + cfg80211_err("Invalid stats, summary %d rssi %d", + summary_size, rssi_size); + goto station_stats_cb_fail; + } + + priv->vdev_summary_stats = qdf_mem_malloc(summary_size); + if (!priv->vdev_summary_stats) { + cfg80211_err("memory allocation failed"); + goto station_stats_cb_fail; + } + + priv->vdev_chain_rssi = qdf_mem_malloc(rssi_size); + if (!priv->vdev_chain_rssi) { + cfg80211_err("memory allocation failed"); + goto station_stats_cb_fail; + } + + if (peer_adv_size) { + priv->peer_adv_stats = qdf_mem_malloc(peer_adv_size); + if (!priv->peer_adv_stats) + goto station_stats_cb_fail; + + qdf_mem_copy(priv->peer_adv_stats, ev->peer_adv_stats, + peer_adv_size); + } + + priv->num_summary_stats = ev->num_summary_stats; + priv->num_chain_rssi_stats = ev->num_chain_rssi_stats; + priv->tx_rate = ev->tx_rate; + priv->rx_rate = ev->rx_rate; + priv->tx_rate_flags = ev->tx_rate_flags; + priv->num_peer_adv_stats = ev->num_peer_adv_stats; + qdf_mem_copy(priv->vdev_chain_rssi, ev->vdev_chain_rssi, rssi_size); + qdf_mem_copy(priv->vdev_summary_stats, ev->vdev_summary_stats, + summary_size); + +station_stats_cb_fail: + osif_request_complete(request); + osif_request_put(request); +} + +struct stats_event * +wlan_cfg80211_mc_cp_stats_get_station_stats(struct wlan_objmgr_vdev *vdev, + int *errno) +{ + void *cookie; + QDF_STATUS status; + struct stats_event *priv, *out; + struct wlan_objmgr_peer *peer; + struct osif_request *request; + struct request_info info = {0}; + static const struct osif_request_params params = { + .priv_size = sizeof(*priv), + .timeout_ms = 2 * CP_STATS_WAIT_TIME_STAT, + .dealloc = wlan_cfg80211_mc_cp_stats_dealloc, + }; + + out = qdf_mem_malloc(sizeof(*out)); + if (!out) { + cfg80211_err("allocation failed"); + *errno = -ENOMEM; + return NULL; + } + + request = osif_request_alloc(¶ms); + if (!request) { + cfg80211_err("Request allocation failure, return cached value"); + qdf_mem_free(out); + *errno = -ENOMEM; + return NULL; + } + + cookie = osif_request_cookie(request); + priv = osif_request_priv(request); + info.cookie = cookie; + info.u.get_station_stats_cb = get_station_stats_cb; + info.vdev_id = wlan_vdev_get_id(vdev); + info.pdev_id = wlan_objmgr_pdev_get_pdev_id(wlan_vdev_get_pdev(vdev)); + peer = wlan_vdev_get_bsspeer(vdev); + if (!peer) { + cfg80211_err("peer is null"); + *errno = -EINVAL; + goto get_station_stats_fail; + } + qdf_mem_copy(info.peer_mac_addr, peer->macaddr, WLAN_MACADDR_LEN); + + status = ucfg_mc_cp_stats_send_stats_request(vdev, TYPE_STATION_STATS, + &info); + if (QDF_IS_STATUS_ERROR(status)) { + cfg80211_err("Failed to send stats request status: %d", status); + *errno = qdf_status_to_os_return(status); + goto get_station_stats_fail; + } + + *errno = osif_request_wait_for_response(request); + if (*errno) { + cfg80211_err("wait failed or timed out ret: %d", *errno); + goto get_station_stats_fail; + } + + if (!priv->vdev_summary_stats || !priv->vdev_chain_rssi || + priv->num_summary_stats == 0 || priv->num_chain_rssi_stats == 0) { + cfg80211_err("Invalid stats"); + cfg80211_err("summary %d:%pK, rssi %d:%pK", + priv->num_summary_stats, priv->vdev_summary_stats, + priv->num_chain_rssi_stats, priv->vdev_chain_rssi); + *errno = -EINVAL; + goto get_station_stats_fail; + } + + out->tx_rate = priv->tx_rate; + out->rx_rate = priv->rx_rate; + out->tx_rate_flags = priv->tx_rate_flags; + out->num_summary_stats = priv->num_summary_stats; + out->num_chain_rssi_stats = priv->num_chain_rssi_stats; + out->vdev_summary_stats = priv->vdev_summary_stats; + priv->vdev_summary_stats = NULL; + out->vdev_chain_rssi = priv->vdev_chain_rssi; + priv->vdev_chain_rssi = NULL; + out->num_peer_adv_stats = priv->num_peer_adv_stats; + if (priv->peer_adv_stats) + out->peer_adv_stats = priv->peer_adv_stats; + priv->peer_adv_stats = NULL; + osif_request_put(request); + + return out; + +get_station_stats_fail: + osif_request_put(request); + wlan_cfg80211_mc_cp_stats_free_stats_event(out); + + return NULL; +} + +void wlan_cfg80211_mc_cp_stats_free_stats_event(struct stats_event *stats) +{ + if (!stats) + return; + + qdf_mem_free(stats->pdev_stats); + qdf_mem_free(stats->peer_stats); + qdf_mem_free(stats->cca_stats); + qdf_mem_free(stats->vdev_summary_stats); + qdf_mem_free(stats->vdev_chain_rssi); + qdf_mem_free(stats->peer_adv_stats); + qdf_mem_free(stats); +} diff --git a/drivers/staging/qca-wifi-host-cmn/os_if/linux/ftm/inc/wlan_cfg80211_ftm.h b/drivers/staging/qca-wifi-host-cmn/os_if/linux/ftm/inc/wlan_cfg80211_ftm.h new file mode 100644 index 0000000000000000000000000000000000000000..3bb497eb38de6466c9412f407e4eed67ead4155c --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/os_if/linux/ftm/inc/wlan_cfg80211_ftm.h @@ -0,0 +1,75 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: declares driver FTM functions interfacing with linux kernel + */ + +#ifndef _WLAN_CFG80211_FTM_H_ +#define _WLAN_CFG80211_FTM_H_ + +/** + * enum wlan_cfg80211_ftm_attr - FTM Netlink attributes + * @WLAN_CFG80211_FTM_ATTR_INVALID: attribute is invalid + * @WLAN_CFG80211_FTM_ATTR_CMD: attribute type is FTM command + * @WLAN_CFG80211_FTM_ATTR_DATA: attribute type is data + * + * @WLAN_CFG80211_FTM_ATTR_MAX: Max number of attributes + */ +enum wlan_cfg80211_ftm_attr { + WLAN_CFG80211_FTM_ATTR_INVALID = 0, + WLAN_CFG80211_FTM_ATTR_CMD = 1, + WLAN_CFG80211_FTM_ATTR_DATA = 2, + + /* keep last */ + WLAN_CFG80211_FTM_ATTR_MAX, +}; + +/** + * enum wlan_cfg80211_ftm_cmd - FTM command types + * @WLAN_CFG80211_FTM_CMD_WLAN_FTM: command is of type FTM + */ +enum wlan_cfg80211_ftm_cmd { + WLAN_CFG80211_FTM_CMD_WLAN_FTM = 0, +}; + +#define WLAN_FTM_DATA_MAX_LEN 2048 + +/** + * wlan_cfg80211_ftm_testmode_cmd() - process cfg80211 testmode command + * @pdev: pdev object + * @data: ftm testmode command data of type void + * @len: length of the data + * + * Return: 0 on success or -Eerrno otherwise + */ +int wlan_cfg80211_ftm_testmode_cmd(struct wlan_objmgr_pdev *pdev, + void *data, uint32_t len); + +/** + * wlan_cfg80211_ftm_rx_event() - handle the received ftm event + * @pdev: pdev object + * @data: ftm event data + * @len: length of the data + * + * Return: QDF_STATUS_SUCCESS on success or QDF_STATUS_E errno otherwise + */ +QDF_STATUS wlan_cfg80211_ftm_rx_event(struct wlan_objmgr_pdev *pdev, + uint8_t *data, uint32_t len); + +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/os_if/linux/ftm/inc/wlan_ioctl_ftm.h b/drivers/staging/qca-wifi-host-cmn/os_if/linux/ftm/inc/wlan_ioctl_ftm.h new file mode 100644 index 0000000000000000000000000000000000000000..7a94c3fa2c1a902882e6a4223e2448a187cc8568 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/os_if/linux/ftm/inc/wlan_ioctl_ftm.h @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: declares driver FTM functions interfacing with linux kernel + */ + +#ifndef _WLAN_IOCTL_FTM_H_ +#define _WLAN_IOCTL_FTM_H_ + +/** + * wlan_ioctl_ftm_testmode_cmd() - handle the ftm ioctl command + * @pdev: pdev object + * @cmd: ftm command + * @userdata: the content of the command + * @length: the length of the userdata + * + * Return: 0 on success, otherwise the error code. + */ +int wlan_ioctl_ftm_testmode_cmd(struct wlan_objmgr_pdev *pdev, int cmd, + uint8_t *userdata, uint32_t length); + +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/os_if/linux/ftm/src/wlan_cfg80211_ftm.c b/drivers/staging/qca-wifi-host-cmn/os_if/linux/ftm/src/wlan_cfg80211_ftm.c new file mode 100644 index 0000000000000000000000000000000000000000..bcba198cec1b4f35b5c7c4a7857baf9ee0118d7f --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/os_if/linux/ftm/src/wlan_cfg80211_ftm.c @@ -0,0 +1,157 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: implementation of the driver FTM functions interfacing with linux kernel + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static const struct nla_policy +wlan_cfg80211_ftm_policy[WLAN_CFG80211_FTM_ATTR_MAX + 1] = { + [WLAN_CFG80211_FTM_ATTR_CMD] = {.type = NLA_U32}, + [WLAN_CFG80211_FTM_ATTR_DATA] = {.type = NLA_BINARY, + .len = WLAN_FTM_DATA_MAX_LEN}, +}; + +static int +wlan_cfg80211_process_ftm_cmd(struct wlan_objmgr_pdev *pdev, + struct nlattr *tb[]) +{ + int buf_len; + void *buf; + QDF_STATUS status; + + if (!tb[WLAN_CFG80211_FTM_ATTR_DATA]) { + ftm_err("WLAN_CFG80211_FTM_ATTR_DATA attribute is invalid"); + return -EINVAL; + } + + buf = nla_data(tb[WLAN_CFG80211_FTM_ATTR_DATA]); + buf_len = nla_len(tb[WLAN_CFG80211_FTM_ATTR_DATA]); + + if (buf_len > WLAN_FTM_DATA_MAX_LEN) + return -EINVAL; + + ftm_debug("****FTM Tx cmd len = %d*****", buf_len); + + status = ucfg_wlan_ftm_testmode_cmd(pdev, buf, buf_len); + + if (QDF_IS_STATUS_ERROR(status)) + status = QDF_STATUS_E_BUSY; + + return qdf_status_to_os_return(status); +} + +int +wlan_cfg80211_ftm_testmode_cmd(struct wlan_objmgr_pdev *pdev, + void *data, uint32_t len) +{ + struct nlattr *tb[WLAN_CFG80211_FTM_ATTR_MAX + 1]; + int err = 0, cmd; + struct wifi_ftm_pdev_priv_obj *ftm_pdev_obj; + + ftm_pdev_obj = wlan_objmgr_pdev_get_comp_private_obj(pdev, + WLAN_UMAC_COMP_FTM); + if (!ftm_pdev_obj) { + ftm_err("Failed to get ftm pdev component"); + return -EINVAL; + } + + ftm_pdev_obj->cmd_type = WIFI_FTM_CMD_NL80211; + + err = wlan_cfg80211_nla_parse(tb, WLAN_CFG80211_FTM_ATTR_MAX - 1, data, + len, wlan_cfg80211_ftm_policy); + if (err) { + ftm_err("Testmode INV ATTR"); + return err; + } + + if (!tb[WLAN_CFG80211_FTM_ATTR_CMD]) { + ftm_err("Testmode INV CMD"); + return -EINVAL; + } + cmd = nla_get_u32(tb[WLAN_CFG80211_FTM_ATTR_CMD]); + + switch (cmd) { + case WLAN_CFG80211_FTM_CMD_WLAN_FTM: + err = wlan_cfg80211_process_ftm_cmd(pdev, tb); + break; + + default: + ftm_err("unknown command: %d", cmd); + return -ENOENT; + } + + return err; +} + +qdf_export_symbol(wlan_cfg80211_ftm_testmode_cmd); + +QDF_STATUS +wlan_cfg80211_ftm_rx_event(struct wlan_objmgr_pdev *pdev, + uint8_t *data, uint32_t len) +{ + struct pdev_osif_priv *pdev_ospriv; + qdf_nbuf_t skb = NULL; + + if (!data || !len) { + ftm_err("Null data or invalid length"); + return QDF_STATUS_E_INVAL; + } + + pdev_ospriv = wlan_pdev_get_ospriv(pdev); + if (!pdev_ospriv) { + ftm_err("pdev_ospriv is NULL"); + return QDF_STATUS_E_INVAL; + } + + ftm_debug("Testmode response event generated"); +#ifdef CONFIG_NL80211_TESTMODE + skb = cfg80211_testmode_alloc_event_skb(pdev_ospriv->wiphy, + len, GFP_ATOMIC); +#else + return QDF_STATUS_E_INVAL; +#endif + if (!skb) + return QDF_STATUS_E_NOMEM; + + if (nla_put_u32(skb, WLAN_CFG80211_FTM_ATTR_CMD, + WLAN_CFG80211_FTM_CMD_WLAN_FTM) || + nla_put(skb, WLAN_CFG80211_FTM_ATTR_DATA, len, data)) { + goto nla_put_failure; + } +#ifdef CONFIG_NL80211_TESTMODE + cfg80211_testmode_event(skb, GFP_ATOMIC); +#endif + return QDF_STATUS_SUCCESS; + +nla_put_failure: + qdf_nbuf_free(skb); + ftm_err("nla_put failed on testmode rx skb!"); + + return QDF_STATUS_E_INVAL; +} diff --git a/drivers/staging/qca-wifi-host-cmn/os_if/linux/ftm/src/wlan_ioctl_ftm.c b/drivers/staging/qca-wifi-host-cmn/os_if/linux/ftm/src/wlan_ioctl_ftm.c new file mode 100644 index 0000000000000000000000000000000000000000..952b9f7d5065a7c2f1cddfab3afc96185f161584 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/os_if/linux/ftm/src/wlan_ioctl_ftm.c @@ -0,0 +1,115 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: implementation of the driver FTM functions interfacing with linux kernel + */ + +#include +#include +#include +#include +#include +#include +#include + +static QDF_STATUS +wlan_process_ftm_ioctl_cmd(struct wlan_objmgr_pdev *pdev, + uint8_t *userdata, uint32_t length) +{ + uint8_t *buffer; + QDF_STATUS error; + + if (get_user(length, (uint32_t *)userdata) != 0) + return QDF_STATUS_E_FAILURE; + + if (length > WLAN_FTM_DATA_MAX_LEN) + return QDF_STATUS_E_FAILURE; + + buffer = qdf_mem_malloc(length); + if (!buffer) + return QDF_STATUS_E_NOMEM; + + if (copy_from_user(buffer, &userdata[sizeof(length)], length)) + error = QDF_STATUS_E_FAILURE; + else + error = ucfg_wlan_ftm_testmode_cmd(pdev, buffer, length); + + qdf_mem_free(buffer); + + return error; +} + +static QDF_STATUS +wlan_process_ftm_ioctl_rsp(struct wlan_objmgr_pdev *pdev, + uint8_t *userdata, uint32_t length) +{ + uint8_t *buffer; + QDF_STATUS error; + + length = WLAN_FTM_DATA_MAX_LEN + sizeof(u_int32_t); + + buffer = qdf_mem_malloc(length); + if (!buffer) + return QDF_STATUS_E_NOMEM; + + error = ucfg_wlan_ftm_testmode_rsp(pdev, buffer); + if (!error) + error = copy_to_user((userdata - sizeof(int)), buffer, length); + else + error = QDF_STATUS_E_AGAIN; + + qdf_mem_free(buffer); + + return error; +} + +int +wlan_ioctl_ftm_testmode_cmd(struct wlan_objmgr_pdev *pdev, int cmd, + uint8_t *userdata, uint32_t length) +{ + QDF_STATUS error; + struct wifi_ftm_pdev_priv_obj *ftm_pdev_obj; + + ftm_pdev_obj = wlan_objmgr_pdev_get_comp_private_obj(pdev, + WLAN_UMAC_COMP_FTM); + if (!ftm_pdev_obj) { + ftm_err("Failed to get ftm pdev component"); + return QDF_STATUS_E_FAILURE; + } + + ftm_pdev_obj->cmd_type = WIFI_FTM_CMD_IOCTL; + + switch (cmd) { + case FTM_IOCTL_UNIFIED_UTF_CMD: + error = wlan_process_ftm_ioctl_cmd(pdev, + userdata, length); + break; + case FTM_IOCTL_UNIFIED_UTF_RSP: + error = wlan_process_ftm_ioctl_rsp(pdev, + userdata, length); + break; + default: + ftm_err("FTM Unknown cmd - not supported"); + error = QDF_STATUS_E_NOSUPPORT; + } + + return qdf_status_to_os_return(error); +} + +qdf_export_symbol(wlan_ioctl_ftm_testmode_cmd); diff --git a/drivers/staging/qca-wifi-host-cmn/os_if/linux/nan/inc/os_if_nan.h b/drivers/staging/qca-wifi-host-cmn/os_if/linux/nan/inc/os_if_nan.h new file mode 100644 index 0000000000000000000000000000000000000000..338ea7462d450dc24badc26ece797e8025f0967f --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/os_if/linux/nan/inc/os_if_nan.h @@ -0,0 +1,167 @@ +/* + * Copyright (c) 2012-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: declares nan component os interface APIs + */ + +#ifndef _OS_IF_NAN_H_ +#define _OS_IF_NAN_H_ + +#include "qdf_types.h" +#include "nan_public_structs.h" +#include "nan_ucfg_api.h" + +struct wlan_objmgr_psoc; +struct wlan_objmgr_vdev; + +#ifdef WLAN_FEATURE_NAN_CONVERGENCE + +/** + * os_if_nan_process_ndp_cmd: os_if api to handle nan request message + * @psoc: pointer to psoc object + * @data: request data. contains vendor cmd tlvs + * @data_len: length of data + * + * Return: status of operation + */ +int os_if_nan_process_ndp_cmd(struct wlan_objmgr_psoc *psoc, + const void *data, int data_len); + +/** + * os_if_nan_event_handler: os_if handler api for nan response messages + * @psoc: pointer to psoc object + * @vdev: pointer to vdev object + * @type: message type + * @msg: msg buffer + * + * Return: None + */ +void os_if_nan_event_handler(struct wlan_objmgr_psoc *psoc, + struct wlan_objmgr_vdev *vdev, + uint32_t type, void *msg); + +/** + * os_if_nan_register_hdd_callbacks: os_if api to register hdd callbacks + * @psoc: pointer to psoc object + * @cb_obj: struct pointer containing callbacks + * + * Return: status of operation + */ +int os_if_nan_register_hdd_callbacks(struct wlan_objmgr_psoc *psoc, + struct nan_callbacks *cb_obj); + +/** + * os_if_nan_register_lim_callbacks: os_if api to register lim callbacks + * @psoc: pointer to psoc object + * @cb_obj: struct pointer containing callbacks + * + * Return: status of operation + */ +int os_if_nan_register_lim_callbacks(struct wlan_objmgr_psoc *psoc, + struct nan_callbacks *cb_obj); + +/** + * os_if_nan_post_ndi_create_rsp: os_if api to pos ndi create rsp to umac nan + * component + * @psoc: pointer to psoc object + * @vdev_id: vdev id of ndi + * @success: if create was success or failure + * + * Return: None + */ +void os_if_nan_post_ndi_create_rsp(struct wlan_objmgr_psoc *psoc, + uint8_t vdev_id, bool success); + +/** + * os_if_nan_post_ndi_delete_rsp: os_if api to pos ndi delete rsp to umac nan + * component + * @psoc: pointer to psoc object + * @vdev_id: vdev id of ndi + * @success: if delete was success or failure + * + * Return: None + */ +void os_if_nan_post_ndi_delete_rsp(struct wlan_objmgr_psoc *psoc, + uint8_t vdev_id, bool success); + +/** + * os_if_nan_ndi_session_end: os_if api to process ndi session end + * component + * @vdev: pointer to vdev deleted + * + * Return: None + */ +void os_if_nan_ndi_session_end(struct wlan_objmgr_vdev *vdev); + +/** + * os_if_nan_set_ndi_state: os_if api set NDI state + * @vdev: pointer to vdev deleted + * @state: value to set + * + * Return: status of operation + */ +static inline QDF_STATUS os_if_nan_set_ndi_state(struct wlan_objmgr_vdev *vdev, + uint32_t state) +{ + return ucfg_nan_set_ndi_state(vdev, state); +} + +/** + * os_if_nan_set_ndp_create_transaction_id: set ndp create transaction id + * @vdev: pointer to vdev object + * @val: value to set + * + * Return: status of operation + */ +static inline QDF_STATUS os_if_nan_set_ndp_create_transaction_id( + struct wlan_objmgr_vdev *vdev, + uint16_t val) +{ + return ucfg_nan_set_ndp_create_transaction_id(vdev, val); +} + +/** + * os_if_nan_set_ndp_delete_transaction_id: set ndp delete transaction id + * @vdev: pointer to vdev object + * @val: value to set + * + * Return: status of operation + */ +static inline QDF_STATUS os_if_nan_set_ndp_delete_transaction_id( + struct wlan_objmgr_vdev *vdev, + uint16_t val) +{ + return ucfg_nan_set_ndp_delete_transaction_id(vdev, val); +} + +#else + +static inline void os_if_nan_post_ndi_create_rsp(struct wlan_objmgr_psoc *psoc, + uint8_t vdev_id, bool success) +{ +} + +static inline void os_if_nan_post_ndi_delete_rsp(struct wlan_objmgr_psoc *psoc, + uint8_t vdev_id, bool success) +{ +} + +#endif /* WLAN_FEATURE_NAN_CONVERGENCE */ + +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/os_if/linux/nan/src/os_if_nan.c b/drivers/staging/qca-wifi-host-cmn/os_if/linux/nan/src/os_if_nan.c new file mode 100644 index 0000000000000000000000000000000000000000..d5a11d11ad8aa037f69e80fd3f4b07a13a74cf9f --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/os_if/linux/nan/src/os_if_nan.c @@ -0,0 +1,2145 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: defines nan component os interface APIs + */ + +#include "qdf_str.h" +#include "qdf_trace.h" +#include "qdf_types.h" +#include "os_if_nan.h" +#include "wlan_nan_api.h" +#include "nan_ucfg_api.h" +#include "nan_public_structs.h" +#include "wlan_osif_priv.h" +#include +#include "wlan_cfg80211.h" +#include "wlan_objmgr_psoc_obj.h" +#include "wlan_objmgr_pdev_obj.h" +#include "wlan_objmgr_vdev_obj.h" +#include "wlan_utility.h" + +/* NLA policy */ +static const struct nla_policy +vendor_attr_policy[QCA_WLAN_VENDOR_ATTR_NDP_PARAMS_MAX + 1] = { + [QCA_WLAN_VENDOR_ATTR_NDP_SUBCMD] = { + .type = NLA_U32, + .len = sizeof(uint32_t) + }, + [QCA_WLAN_VENDOR_ATTR_NDP_TRANSACTION_ID] = { + .type = NLA_U16, + .len = sizeof(uint16_t) + }, + [QCA_WLAN_VENDOR_ATTR_NDP_IFACE_STR] = { + .type = NLA_NUL_STRING, + .len = IFNAMSIZ - 1 + }, + [QCA_WLAN_VENDOR_ATTR_NDP_SERVICE_INSTANCE_ID] = { + .type = NLA_U32, + .len = sizeof(uint32_t) + }, + [QCA_WLAN_VENDOR_ATTR_NDP_CHANNEL] = { + .type = NLA_U32, + .len = sizeof(uint32_t) + }, + [QCA_WLAN_VENDOR_ATTR_NDP_PEER_DISCOVERY_MAC_ADDR] = { + .type = NLA_UNSPEC, + .len = QDF_MAC_ADDR_SIZE + }, + [QCA_WLAN_VENDOR_ATTR_NDP_CONFIG_SECURITY] = { + .type = NLA_U16, + .len = sizeof(uint16_t) + }, + [QCA_WLAN_VENDOR_ATTR_NDP_CONFIG_QOS] = { + .type = NLA_U32, + .len = sizeof(uint32_t) + }, + [QCA_WLAN_VENDOR_ATTR_NDP_APP_INFO] = { + .type = NLA_BINARY, + .len = NDP_APP_INFO_LEN + }, + [QCA_WLAN_VENDOR_ATTR_NDP_INSTANCE_ID] = { + .type = NLA_U32, + .len = sizeof(uint32_t) + }, + [QCA_WLAN_VENDOR_ATTR_NDP_RESPONSE_CODE] = { + .type = NLA_U32, + .len = sizeof(uint32_t) + }, + [QCA_WLAN_VENDOR_ATTR_NDP_NDI_MAC_ADDR] = { + .type = NLA_BINARY, + .len = QDF_MAC_ADDR_SIZE + }, + [QCA_WLAN_VENDOR_ATTR_NDP_INSTANCE_ID_ARRAY] = { + .type = NLA_BINARY, + .len = NDP_NUM_INSTANCE_ID + }, + [QCA_WLAN_VENDOR_ATTR_NDP_CHANNEL_CONFIG] = { + .type = NLA_U32, + .len = sizeof(uint32_t) + }, + [QCA_WLAN_VENDOR_ATTR_NDP_CSID] = { + .type = NLA_U32, + .len = sizeof(uint32_t) + }, + [QCA_WLAN_VENDOR_ATTR_NDP_PMK] = { + .type = NLA_BINARY, + .len = NDP_PMK_LEN + }, + [QCA_WLAN_VENDOR_ATTR_NDP_SCID] = { + .type = NLA_BINARY, + .len = NDP_SCID_BUF_LEN + }, + [QCA_WLAN_VENDOR_ATTR_NDP_DRV_RESPONSE_STATUS_TYPE] = { + .type = NLA_U32, + .len = sizeof(uint32_t) + }, + [QCA_WLAN_VENDOR_ATTR_NDP_DRV_RETURN_VALUE] = { + .type = NLA_U32, + .len = sizeof(uint32_t) + }, + [QCA_WLAN_VENDOR_ATTR_NDP_PASSPHRASE] = { + .type = NLA_BINARY, + .len = NAN_PASSPHRASE_MAX_LEN + }, + [QCA_WLAN_VENDOR_ATTR_NDP_SERVICE_NAME] = { + .type = NLA_BINARY, + .len = NAN_MAX_SERVICE_NAME_LEN + }, + [QCA_WLAN_VENDOR_ATTR_NDP_CHANNEL_INFO] = { + .type = NLA_BINARY, + .len = NAN_CH_INFO_MAX_LEN + }, + [QCA_WLAN_VENDOR_ATTR_NDP_NSS] = { + .type = NLA_U32, + .len = sizeof(uint32_t) + }, + [QCA_WLAN_VENDOR_ATTR_NDP_IPV6_ADDR] = { + .type = NLA_UNSPEC, + .len = QDF_IPV6_ADDR_SIZE + }, + [QCA_WLAN_VENDOR_ATTR_NDP_TRANSPORT_PORT] = { + .type = NLA_U16, + .len = sizeof(uint16_t) + }, + [QCA_WLAN_VENDOR_ATTR_NDP_TRANSPORT_PROTOCOL] = { + .type = NLA_U8, + .len = sizeof(uint8_t) + }, +}; + +static int os_if_nan_process_ndi_create(struct wlan_objmgr_psoc *psoc, + struct nlattr **tb) +{ + int ret; + char *iface_name; + QDF_STATUS status; + uint16_t transaction_id; + struct wlan_objmgr_vdev *nan_vdev; + struct nan_callbacks cb_obj; + + cfg80211_debug("enter"); + if (!tb[QCA_WLAN_VENDOR_ATTR_NDP_IFACE_STR]) { + cfg80211_err("Interface name string is unavailable"); + return -EINVAL; + } + iface_name = nla_data(tb[QCA_WLAN_VENDOR_ATTR_NDP_IFACE_STR]); + + nan_vdev = wlan_util_get_vdev_by_ifname(psoc, iface_name, WLAN_NAN_ID); + if (nan_vdev) { + cfg80211_err("NAN data interface %s is already present", + iface_name); + wlan_objmgr_vdev_release_ref(nan_vdev, WLAN_NAN_ID); + return -EEXIST; + } + + if (!tb[QCA_WLAN_VENDOR_ATTR_NDP_TRANSACTION_ID]) { + cfg80211_err("transaction id is unavailable"); + return -EINVAL; + } + transaction_id = + nla_get_u16(tb[QCA_WLAN_VENDOR_ATTR_NDP_TRANSACTION_ID]); + + status = ucfg_nan_get_callbacks(psoc, &cb_obj); + if (QDF_IS_STATUS_ERROR(status)) { + cfg80211_err("Couldn't get ballback object"); + return -EINVAL; + } + + ret = cb_obj.ndi_open(iface_name); + if (ret) { + cfg80211_err("ndi_open failed"); + return ret; + } + + return cb_obj.ndi_start(iface_name, transaction_id); +} + +static int os_if_nan_process_ndi_delete(struct wlan_objmgr_psoc *psoc, + struct nlattr **tb) +{ + uint8_t vdev_id; + char *iface_name; + QDF_STATUS status; + uint32_t num_peers; + uint16_t transaction_id; + struct nan_callbacks cb_obj; + struct wlan_objmgr_vdev *nan_vdev = NULL; + + if (!tb[QCA_WLAN_VENDOR_ATTR_NDP_IFACE_STR]) { + cfg80211_err("Interface name string is unavailable"); + return -EINVAL; + } + + iface_name = nla_data(tb[QCA_WLAN_VENDOR_ATTR_NDP_IFACE_STR]); + + if (!tb[QCA_WLAN_VENDOR_ATTR_NDP_TRANSACTION_ID]) { + cfg80211_err("Transaction id is unavailable"); + return -EINVAL; + } + + nan_vdev = wlan_util_get_vdev_by_ifname(psoc, iface_name, WLAN_NAN_ID); + if (!nan_vdev) { + cfg80211_err("Nan datapath interface is not present"); + return -EINVAL; + } + + transaction_id = + nla_get_u16(tb[QCA_WLAN_VENDOR_ATTR_NDP_TRANSACTION_ID]); + vdev_id = wlan_vdev_get_id(nan_vdev); + num_peers = ucfg_nan_get_active_peers(nan_vdev); + /* + * wlan_util_get_vdev_by_ifname increments ref count + * decrement here since vdev returned by that api is not used any more + */ + wlan_objmgr_vdev_release_ref(nan_vdev, WLAN_NAN_ID); + + /* check if there are active peers on the adapter */ + if (num_peers) + cfg80211_err("NDP peers active: %d, active NDPs may not be terminated", + num_peers); + + status = ucfg_nan_get_callbacks(psoc, &cb_obj); + if (QDF_IS_STATUS_ERROR(status)) { + cfg80211_err("Couldn't get ballback object"); + return -EINVAL; + } + + return cb_obj.ndi_delete(vdev_id, iface_name, transaction_id); +} + +/** + * os_if_nan_parse_security_params() - parse vendor attributes for security + * params. + * @tb: parsed NL attribute list + * @ncs_sk_type: out parameter to populate ncs_sk_type + * @pmk: out parameter to populate pmk + * @passphrase: out parameter to populate passphrase + * @service_name: out parameter to populate service_name + * + * Return: 0 on success or error code on failure + */ +static int os_if_nan_parse_security_params(struct nlattr **tb, + uint32_t *ncs_sk_type, struct nan_datapath_pmk *pmk, + struct ndp_passphrase *passphrase, + struct ndp_service_name *service_name) +{ + if (!ncs_sk_type || !pmk || !passphrase || !service_name) { + cfg80211_err("out buffers for one ore more parameters is null"); + return -EINVAL; + } + + if (tb[QCA_WLAN_VENDOR_ATTR_NDP_CSID]) { + *ncs_sk_type = + nla_get_u32(tb[QCA_WLAN_VENDOR_ATTR_NDP_CSID]); + } + + if (tb[QCA_WLAN_VENDOR_ATTR_NDP_PMK]) { + pmk->pmk_len = nla_len(tb[QCA_WLAN_VENDOR_ATTR_NDP_PMK]); + qdf_mem_copy(pmk->pmk, + nla_data(tb[QCA_WLAN_VENDOR_ATTR_NDP_PMK]), + pmk->pmk_len); + cfg80211_err("pmk len: %d", pmk->pmk_len); + QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_HDD, QDF_TRACE_LEVEL_ERROR, + pmk->pmk, pmk->pmk_len); + } + + if (tb[QCA_WLAN_VENDOR_ATTR_NDP_PASSPHRASE]) { + passphrase->passphrase_len = + nla_len(tb[QCA_WLAN_VENDOR_ATTR_NDP_PASSPHRASE]); + qdf_mem_copy(passphrase->passphrase, + nla_data(tb[QCA_WLAN_VENDOR_ATTR_NDP_PASSPHRASE]), + passphrase->passphrase_len); + cfg80211_err("passphrase len: %d", passphrase->passphrase_len); + QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_HDD, QDF_TRACE_LEVEL_ERROR, + passphrase->passphrase, passphrase->passphrase_len); + } + + if (tb[QCA_WLAN_VENDOR_ATTR_NDP_SERVICE_NAME]) { + service_name->service_name_len = + nla_len(tb[QCA_WLAN_VENDOR_ATTR_NDP_SERVICE_NAME]); + qdf_mem_copy(service_name->service_name, + nla_data(tb[QCA_WLAN_VENDOR_ATTR_NDP_SERVICE_NAME]), + service_name->service_name_len); + cfg80211_err("service_name len: %d", + service_name->service_name_len); + QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_HDD, QDF_TRACE_LEVEL_ERROR, + service_name->service_name, + service_name->service_name_len); + } + + return 0; +} + +/** + * os_if_nan_process_ndp_initiator_req() - NDP initiator request handler + * @ctx: hdd context + * @tb: parsed NL attribute list + * + * tb will contain following vendor attributes: + * QCA_WLAN_VENDOR_ATTR_NDP_IFACE_STR + * QCA_WLAN_VENDOR_ATTR_NDP_TRANSACTION_ID + * QCA_WLAN_VENDOR_ATTR_NDP_CHANNEL - optional + * QCA_WLAN_VENDOR_ATTR_NDP_CHANNEL_CONFIG + * QCA_WLAN_VENDOR_ATTR_NDP_SERVICE_INSTANCE_ID + * QCA_WLAN_VENDOR_ATTR_NDP_PEER_DISCOVERY_MAC_ADDR + * QCA_WLAN_VENDOR_ATTR_NDP_APP_INFO - optional + * QCA_WLAN_VENDOR_ATTR_NDP_CONFIG_QOS - optional + * QCA_WLAN_VENDOR_ATTR_NDP_PMK - optional + * QCA_WLAN_VENDOR_ATTR_NDP_CSID - optional + * QCA_WLAN_VENDOR_ATTR_NDP_PASSPHRASE - optional + * QCA_WLAN_VENDOR_ATTR_NDP_SERVICE_NAME - optional + * + * Return: 0 on success or error code on failure + */ +static int os_if_nan_process_ndp_initiator_req(struct wlan_objmgr_psoc *psoc, + struct nlattr **tb) +{ + int ret = 0; + char *iface_name; + QDF_STATUS status; + enum nan_datapath_state state; + struct wlan_objmgr_vdev *nan_vdev; + struct nan_datapath_initiator_req req = {0}; + + if (!tb[QCA_WLAN_VENDOR_ATTR_NDP_IFACE_STR]) { + cfg80211_err("Interface name string is unavailable"); + return -EINVAL; + } + + iface_name = nla_data(tb[QCA_WLAN_VENDOR_ATTR_NDP_IFACE_STR]); + nan_vdev = wlan_util_get_vdev_by_ifname(psoc, iface_name, WLAN_NAN_ID); + if (!nan_vdev) { + cfg80211_err("NAN data interface %s not available", iface_name); + return -EINVAL; + } + + if (nan_vdev->vdev_mlme.vdev_opmode != QDF_NDI_MODE) { + cfg80211_err("Interface found is not NDI"); + ret = -EINVAL; + goto initiator_req_failed; + } + + state = ucfg_nan_get_ndi_state(nan_vdev); + if (state == NAN_DATA_NDI_DELETED_STATE || + state == NAN_DATA_NDI_DELETING_STATE || + state == NAN_DATA_NDI_CREATING_STATE) { + cfg80211_err("Data request not allowed in NDI current state: %d", + state); + ret = -EINVAL; + goto initiator_req_failed; + } + + if (!tb[QCA_WLAN_VENDOR_ATTR_NDP_TRANSACTION_ID]) { + cfg80211_err("Transaction ID is unavailable"); + ret = -EINVAL; + goto initiator_req_failed; + } + req.transaction_id = + nla_get_u16(tb[QCA_WLAN_VENDOR_ATTR_NDP_TRANSACTION_ID]); + + if (tb[QCA_WLAN_VENDOR_ATTR_NDP_CHANNEL]) { + req.channel = nla_get_u32(tb[QCA_WLAN_VENDOR_ATTR_NDP_CHANNEL]); + + if (tb[QCA_WLAN_VENDOR_ATTR_NDP_CHANNEL_CONFIG]) { + req.channel_cfg = nla_get_u32( + tb[QCA_WLAN_VENDOR_ATTR_NDP_CHANNEL_CONFIG]); + } else { + cfg80211_err("Channel config is unavailable"); + ret = -EINVAL; + goto initiator_req_failed; + } + } + + if (!tb[QCA_WLAN_VENDOR_ATTR_NDP_SERVICE_INSTANCE_ID]) { + cfg80211_err("NDP service instance ID is unavailable"); + ret = -EINVAL; + goto initiator_req_failed; + } + req.service_instance_id = + nla_get_u32(tb[QCA_WLAN_VENDOR_ATTR_NDP_SERVICE_INSTANCE_ID]); + + qdf_mem_copy(req.self_ndi_mac_addr.bytes, + wlan_vdev_mlme_get_macaddr(nan_vdev), QDF_MAC_ADDR_SIZE); + + if (!tb[QCA_WLAN_VENDOR_ATTR_NDP_PEER_DISCOVERY_MAC_ADDR]) { + cfg80211_err("NDI peer discovery mac addr is unavailable"); + ret = -EINVAL; + goto initiator_req_failed; + } + qdf_mem_copy(req.peer_discovery_mac_addr.bytes, + nla_data(tb[QCA_WLAN_VENDOR_ATTR_NDP_PEER_DISCOVERY_MAC_ADDR]), + QDF_MAC_ADDR_SIZE); + + if (tb[QCA_WLAN_VENDOR_ATTR_NDP_APP_INFO]) { + req.ndp_info.ndp_app_info_len = + nla_len(tb[QCA_WLAN_VENDOR_ATTR_NDP_APP_INFO]); + qdf_mem_copy(req.ndp_info.ndp_app_info, + nla_data(tb[QCA_WLAN_VENDOR_ATTR_NDP_APP_INFO]), + req.ndp_info.ndp_app_info_len); + } + + if (tb[QCA_WLAN_VENDOR_ATTR_NDP_CONFIG_QOS]) { + /* at present ndp config stores 4 bytes QOS info only */ + req.ndp_config.ndp_cfg_len = 4; + *((uint32_t *)req.ndp_config.ndp_cfg) = + nla_get_u32(tb[QCA_WLAN_VENDOR_ATTR_NDP_CONFIG_QOS]); + } + + if (tb[QCA_WLAN_VENDOR_ATTR_NDP_IPV6_ADDR]) { + req.is_ipv6_addr_present = true; + qdf_mem_copy(req.ipv6_addr, + nla_data(tb[QCA_WLAN_VENDOR_ATTR_NDP_IPV6_ADDR]), + QDF_IPV6_ADDR_SIZE); + } + cfg80211_debug("ipv6 addr present: %d, addr: %pI6", + req.is_ipv6_addr_present, req.ipv6_addr); + + if (os_if_nan_parse_security_params(tb, &req.ncs_sk_type, &req.pmk, + &req.passphrase, &req.service_name)) { + cfg80211_err("inconsistent security params in request."); + ret = -EINVAL; + goto initiator_req_failed; + } + + cfg80211_debug("vdev_id: %d, transaction_id: %d, channel: %d, service_instance_id: %d, ndp_app_info_len: %d, csid: %d, peer_discovery_mac_addr: %pM", + wlan_vdev_get_id(nan_vdev), req.transaction_id, req.channel, + req.service_instance_id, req.ndp_info.ndp_app_info_len, + req.ncs_sk_type, req.peer_discovery_mac_addr.bytes); + + req.vdev = nan_vdev; + status = ucfg_nan_req_processor(nan_vdev, &req, NDP_INITIATOR_REQ); + ret = qdf_status_to_os_return(status); +initiator_req_failed: + if (ret) + wlan_objmgr_vdev_release_ref(nan_vdev, WLAN_NAN_ID); + + return ret; +} + +/** + * os_if_nan_process_ndp_responder_req() - NDP responder request handler + * @nan_ctx: hdd context + * @tb: parsed NL attribute list + * + * tb includes following vendor attributes: + * QCA_WLAN_VENDOR_ATTR_NDP_IFACE_STR + * QCA_WLAN_VENDOR_ATTR_NDP_TRANSACTION_ID + * QCA_WLAN_VENDOR_ATTR_NDP_INSTANCE_ID + * QCA_WLAN_VENDOR_ATTR_NDP_RESPONSE_CODE + * QCA_WLAN_VENDOR_ATTR_NDP_APP_INFO - optional + * QCA_WLAN_VENDOR_ATTR_NDP_CONFIG_QOS - optional + * QCA_WLAN_VENDOR_ATTR_NDP_PMK - optional + * QCA_WLAN_VENDOR_ATTR_NDP_CSID - optional + * QCA_WLAN_VENDOR_ATTR_NDP_PASSPHRASE - optional + * QCA_WLAN_VENDOR_ATTR_NDP_SERVICE_NAME - optional + * + * Return: 0 on success or error code on failure + */ +static int os_if_nan_process_ndp_responder_req(struct wlan_objmgr_psoc *psoc, + struct nlattr **tb) +{ + int ret = 0; + char *iface_name; + QDF_STATUS status; + enum nan_datapath_state state; + struct wlan_objmgr_vdev *nan_vdev = NULL; + struct nan_datapath_responder_req req = {0}; + + if (!tb[QCA_WLAN_VENDOR_ATTR_NDP_RESPONSE_CODE]) { + cfg80211_err("ndp_rsp is unavailable"); + return -EINVAL; + } + req.ndp_rsp = nla_get_u32(tb[QCA_WLAN_VENDOR_ATTR_NDP_RESPONSE_CODE]); + + if (req.ndp_rsp == NAN_DATAPATH_RESPONSE_ACCEPT) { + if (!tb[QCA_WLAN_VENDOR_ATTR_NDP_IFACE_STR]) { + cfg80211_err("Interface not provided"); + return -ENODEV; + } + iface_name = nla_data(tb[QCA_WLAN_VENDOR_ATTR_NDP_IFACE_STR]); + + /* Check for an existing NAN interface */ + nan_vdev = wlan_util_get_vdev_by_ifname(psoc, iface_name, + WLAN_NAN_ID); + if (!nan_vdev) { + cfg80211_err("NAN data iface %s not available", + iface_name); + return -ENODEV; + } + + if (nan_vdev->vdev_mlme.vdev_opmode != QDF_NDI_MODE) { + cfg80211_err("Interface found is not NDI"); + ret = -ENODEV; + goto responder_req_failed; + } + } else { + /* + * If the data indication is rejected, the userspace + * may not send the iface name. Use the first NDI + * in that case + */ + cfg80211_debug("ndp rsp rejected, using first NDI"); + + nan_vdev = wlan_objmgr_get_vdev_by_opmode_from_psoc( + psoc, QDF_NDI_MODE, WLAN_NAN_ID); + if (!nan_vdev) { + cfg80211_err("NAN data iface is not available"); + return -ENODEV; + } + } + + state = ucfg_nan_get_ndi_state(nan_vdev); + if (state == NAN_DATA_NDI_DELETED_STATE || + state == NAN_DATA_NDI_DELETING_STATE || + state == NAN_DATA_NDI_CREATING_STATE) { + cfg80211_err("Data request not allowed in current NDI state:%d", + state); + ret = -EAGAIN; + goto responder_req_failed; + } + + if (!tb[QCA_WLAN_VENDOR_ATTR_NDP_TRANSACTION_ID]) { + cfg80211_err("Transaction ID is unavailable"); + ret = -EINVAL; + goto responder_req_failed; + } + req.transaction_id = + nla_get_u16(tb[QCA_WLAN_VENDOR_ATTR_NDP_TRANSACTION_ID]); + + if (!tb[QCA_WLAN_VENDOR_ATTR_NDP_INSTANCE_ID]) { + cfg80211_err("Instance ID is unavailable"); + ret = -EINVAL; + goto responder_req_failed; + } + req.ndp_instance_id = + nla_get_u32(tb[QCA_WLAN_VENDOR_ATTR_NDP_INSTANCE_ID]); + + if (tb[QCA_WLAN_VENDOR_ATTR_NDP_APP_INFO]) { + req.ndp_info.ndp_app_info_len = + nla_len(tb[QCA_WLAN_VENDOR_ATTR_NDP_APP_INFO]); + qdf_mem_copy(req.ndp_info.ndp_app_info, + nla_data(tb[QCA_WLAN_VENDOR_ATTR_NDP_APP_INFO]), + req.ndp_info.ndp_app_info_len); + } else { + cfg80211_debug("NDP app info is unavailable"); + } + + if (tb[QCA_WLAN_VENDOR_ATTR_NDP_CONFIG_QOS]) { + /* at present ndp config stores 4 bytes QOS info only */ + req.ndp_config.ndp_cfg_len = 4; + *((uint32_t *)req.ndp_config.ndp_cfg) = + nla_get_u32(tb[QCA_WLAN_VENDOR_ATTR_NDP_CONFIG_QOS]); + } else { + cfg80211_debug("NDP config data is unavailable"); + } + + if (tb[QCA_WLAN_VENDOR_ATTR_NDP_IPV6_ADDR]) { + req.is_ipv6_addr_present = true; + qdf_mem_copy(req.ipv6_addr, + nla_data(tb[QCA_WLAN_VENDOR_ATTR_NDP_IPV6_ADDR]), + QDF_IPV6_ADDR_SIZE); + } + if (tb[QCA_WLAN_VENDOR_ATTR_NDP_TRANSPORT_PORT]) { + req.is_port_present = true; + req.port = nla_get_u16( + tb[QCA_WLAN_VENDOR_ATTR_NDP_TRANSPORT_PORT]); + } + if (tb[QCA_WLAN_VENDOR_ATTR_NDP_TRANSPORT_PROTOCOL]) { + req.is_protocol_present = true; + req.protocol = nla_get_u8( + tb[QCA_WLAN_VENDOR_ATTR_NDP_TRANSPORT_PROTOCOL]); + } + cfg80211_debug("ipv6 addr present: %d, addr: %pI6", + req.is_ipv6_addr_present, req.ipv6_addr); + cfg80211_debug("port %d, present: %d", req.port, req.is_port_present); + cfg80211_debug("protocol %d, present: %d", + req.protocol, req.is_protocol_present); + + if (os_if_nan_parse_security_params(tb, &req.ncs_sk_type, &req.pmk, + &req.passphrase, &req.service_name)) { + cfg80211_err("inconsistent security params in request."); + ret = -EINVAL; + goto responder_req_failed; + } + + cfg80211_debug("vdev_id: %d, transaction_id: %d, ndp_rsp %d, ndp_instance_id: %d, ndp_app_info_len: %d, csid: %d", + wlan_vdev_get_id(nan_vdev), req.transaction_id, req.ndp_rsp, + req.ndp_instance_id, req.ndp_info.ndp_app_info_len, + req.ncs_sk_type); + + req.vdev = nan_vdev; + status = ucfg_nan_req_processor(nan_vdev, &req, NDP_RESPONDER_REQ); + ret = qdf_status_to_os_return(status); + +responder_req_failed: + if (ret) + wlan_objmgr_vdev_release_ref(nan_vdev, WLAN_NAN_ID); + + return ret; + +} + +/** + * os_if_nan_process_ndp_end_req() - NDP end request handler + * @psoc: pointer to psoc object + * + * @tb: parsed NL attribute list + * tb includes following vendor attributes: + * QCA_WLAN_VENDOR_ATTR_NDP_TRANSACTION_ID + * + * Return: 0 on success or error code on failure + */ +static int os_if_nan_process_ndp_end_req(struct wlan_objmgr_psoc *psoc, + struct nlattr **tb) +{ + int ret = 0; + QDF_STATUS status; + struct wlan_objmgr_vdev *nan_vdev; + struct nan_datapath_end_req req = {0}; + + if (!tb[QCA_WLAN_VENDOR_ATTR_NDP_TRANSACTION_ID]) { + cfg80211_err("Transaction ID is unavailable"); + return -EINVAL; + } + req.transaction_id = + nla_get_u16(tb[QCA_WLAN_VENDOR_ATTR_NDP_TRANSACTION_ID]); + + if (!tb[QCA_WLAN_VENDOR_ATTR_NDP_INSTANCE_ID_ARRAY]) { + cfg80211_err("NDP instance ID array is unavailable"); + return -EINVAL; + } + + req.num_ndp_instances = + nla_len(tb[QCA_WLAN_VENDOR_ATTR_NDP_INSTANCE_ID_ARRAY]) / + sizeof(uint32_t); + if (0 >= req.num_ndp_instances) { + cfg80211_err("Num NDP instances is 0"); + return -EINVAL; + } + qdf_mem_copy(req.ndp_ids, + nla_data(tb[QCA_WLAN_VENDOR_ATTR_NDP_INSTANCE_ID_ARRAY]), + req.num_ndp_instances * sizeof(uint32_t)); + + cfg80211_debug("sending ndp_end_req to SME, transaction_id: %d", + req.transaction_id); + + nan_vdev = wlan_objmgr_get_vdev_by_opmode_from_psoc(psoc, QDF_NDI_MODE, + WLAN_NAN_ID); + if (!nan_vdev) { + cfg80211_err("NAN data interface is not available"); + return -EINVAL; + } + + req.vdev = nan_vdev; + status = ucfg_nan_req_processor(nan_vdev, &req, NDP_END_REQ); + ret = qdf_status_to_os_return(status); + if (ret) + wlan_objmgr_vdev_release_ref(nan_vdev, WLAN_NAN_ID); + + return ret; +} + +int os_if_nan_process_ndp_cmd(struct wlan_objmgr_psoc *psoc, + const void *data, int data_len) +{ + uint32_t ndp_cmd_type; + uint16_t transaction_id; + struct nlattr *tb[QCA_WLAN_VENDOR_ATTR_NDP_PARAMS_MAX + 1]; + char *iface_name; + + if (wlan_cfg80211_nla_parse(tb, QCA_WLAN_VENDOR_ATTR_NDP_PARAMS_MAX, + data, data_len, vendor_attr_policy)) { + cfg80211_err("Invalid NDP vendor command attributes"); + return -EINVAL; + } + + /* Parse and fetch NDP Command Type*/ + if (!tb[QCA_WLAN_VENDOR_ATTR_NDP_SUBCMD]) { + cfg80211_err("NAN datapath cmd type failed"); + return -EINVAL; + } + ndp_cmd_type = nla_get_u32(tb[QCA_WLAN_VENDOR_ATTR_NDP_SUBCMD]); + + if (!tb[QCA_WLAN_VENDOR_ATTR_NDP_TRANSACTION_ID]) { + cfg80211_err("attr transaction id failed"); + return -EINVAL; + } + transaction_id = nla_get_u16( + tb[QCA_WLAN_VENDOR_ATTR_NDP_TRANSACTION_ID]); + + if (tb[QCA_WLAN_VENDOR_ATTR_NDP_IFACE_STR]) { + iface_name = nla_data(tb[QCA_WLAN_VENDOR_ATTR_NDP_IFACE_STR]); + cfg80211_err("Transaction Id: %d NDPCmd: %d iface_name: %s", + transaction_id, ndp_cmd_type, iface_name); + } else { + cfg80211_err("Transaction Id: %d NDPCmd: %d iface_name: unspecified", + transaction_id, ndp_cmd_type); + } + + cfg80211_debug("Received NDP cmd: %d", ndp_cmd_type); + switch (ndp_cmd_type) { + case QCA_WLAN_VENDOR_ATTR_NDP_INTERFACE_CREATE: + return os_if_nan_process_ndi_create(psoc, tb); + case QCA_WLAN_VENDOR_ATTR_NDP_INTERFACE_DELETE: + return os_if_nan_process_ndi_delete(psoc, tb); + case QCA_WLAN_VENDOR_ATTR_NDP_INITIATOR_REQUEST: + return os_if_nan_process_ndp_initiator_req(psoc, tb); + case QCA_WLAN_VENDOR_ATTR_NDP_RESPONDER_REQUEST: + return os_if_nan_process_ndp_responder_req(psoc, tb); + case QCA_WLAN_VENDOR_ATTR_NDP_END_REQUEST: + return os_if_nan_process_ndp_end_req(psoc, tb); + default: + cfg80211_err("Unrecognized NDP vendor cmd %d", ndp_cmd_type); + return -EINVAL; + } + + return -EINVAL; +} + +static inline uint32_t osif_ndp_get_ndp_initiator_rsp_len(void) +{ + uint32_t data_len = NLMSG_HDRLEN; + + data_len += nla_total_size(vendor_attr_policy[ + QCA_WLAN_VENDOR_ATTR_NDP_SUBCMD].len); + data_len += nla_total_size(vendor_attr_policy[ + QCA_WLAN_VENDOR_ATTR_NDP_TRANSACTION_ID].len); + data_len += nla_total_size(vendor_attr_policy[ + QCA_WLAN_VENDOR_ATTR_NDP_INSTANCE_ID].len); + data_len += nla_total_size(vendor_attr_policy[ + QCA_WLAN_VENDOR_ATTR_NDP_DRV_RESPONSE_STATUS_TYPE].len); + data_len += nla_total_size(vendor_attr_policy[ + QCA_WLAN_VENDOR_ATTR_NDP_DRV_RETURN_VALUE].len); + + return data_len; +} + +/** + * os_if_ndp_initiator_rsp_handler() - NDP initiator response handler + * @vdev: pointer to vdev object + * @rsp_params: response parameters + * + * Following vendor event is sent to cfg80211: + * QCA_WLAN_VENDOR_ATTR_NDP_SUBCMD = + * QCA_WLAN_VENDOR_ATTR_NDP_INITIATOR_RESPONSE (4 bytes) + * QCA_WLAN_VENDOR_ATTR_NDP_TRANSACTION_ID (2 bytes) + * QCA_WLAN_VENDOR_ATTR_NDP_INSTANCE_ID (4 bytes) + * QCA_WLAN_VENDOR_ATTR_NDP_DRV_RESPONSE_STATUS_TYPE (4 bytes) + * QCA_WLAN_VENDOR_ATTR_NDP_DRV_RETURN_VALUE (4 bytes) + * + * Return: none + */ +static void os_if_ndp_initiator_rsp_handler(struct wlan_objmgr_vdev *vdev, + struct nan_datapath_initiator_rsp *rsp) +{ + uint32_t data_len; + struct sk_buff *vendor_event; + struct wlan_objmgr_pdev *pdev = wlan_vdev_get_pdev(vdev); + struct pdev_osif_priv *os_priv = wlan_pdev_get_ospriv(pdev); + + if (!rsp) { + cfg80211_err("Invalid NDP Initator response"); + return; + } + + data_len = osif_ndp_get_ndp_initiator_rsp_len(); + vendor_event = cfg80211_vendor_event_alloc(os_priv->wiphy, NULL, + data_len, QCA_NL80211_VENDOR_SUBCMD_NDP_INDEX, + GFP_ATOMIC); + if (!vendor_event) { + cfg80211_err("cfg80211_vendor_event_alloc failed"); + return; + } + + if (nla_put_u32(vendor_event, QCA_WLAN_VENDOR_ATTR_NDP_SUBCMD, + QCA_WLAN_VENDOR_ATTR_NDP_INITIATOR_RESPONSE)) + goto ndp_initiator_rsp_nla_failed; + + if (nla_put_u16(vendor_event, QCA_WLAN_VENDOR_ATTR_NDP_TRANSACTION_ID, + rsp->transaction_id)) + goto ndp_initiator_rsp_nla_failed; + + if (nla_put_u32(vendor_event, + QCA_WLAN_VENDOR_ATTR_NDP_INSTANCE_ID, + rsp->ndp_instance_id)) + goto ndp_initiator_rsp_nla_failed; + + if (nla_put_u32(vendor_event, + QCA_WLAN_VENDOR_ATTR_NDP_DRV_RESPONSE_STATUS_TYPE, + rsp->status)) + goto ndp_initiator_rsp_nla_failed; + + if (nla_put_u32(vendor_event, QCA_WLAN_VENDOR_ATTR_NDP_DRV_RETURN_VALUE, + rsp->reason)) + goto ndp_initiator_rsp_nla_failed; + + cfg80211_debug("NDP Initiator rsp sent, tid:%d, instance id:%d, status:%d, reason: %d", + rsp->transaction_id, rsp->ndp_instance_id, rsp->status, + rsp->reason); + cfg80211_vendor_event(vendor_event, GFP_ATOMIC); + return; +ndp_initiator_rsp_nla_failed: + cfg80211_err("nla_put api failed"); + kfree_skb(vendor_event); +} + +static inline uint32_t osif_ndp_get_ndp_responder_rsp_len(void) +{ + uint32_t data_len = NLMSG_HDRLEN; + + data_len += nla_total_size(vendor_attr_policy[ + QCA_WLAN_VENDOR_ATTR_NDP_SUBCMD].len); + data_len += nla_total_size(vendor_attr_policy[ + QCA_WLAN_VENDOR_ATTR_NDP_TRANSACTION_ID].len); + data_len += nla_total_size(vendor_attr_policy[ + QCA_WLAN_VENDOR_ATTR_NDP_DRV_RESPONSE_STATUS_TYPE].len); + data_len += nla_total_size(vendor_attr_policy[ + QCA_WLAN_VENDOR_ATTR_NDP_DRV_RETURN_VALUE].len); + + return data_len; +} + +/* + * os_if_ndp_responder_rsp_handler() - NDP responder response handler + * @vdev: pointer to vdev object + * @rsp: response parameters + * + * Following vendor event is sent to cfg80211: + * QCA_WLAN_VENDOR_ATTR_NDP_SUBCMD = + * QCA_WLAN_VENDOR_ATTR_NDP_RESPONDER_RESPONSE (4 bytes) + * QCA_WLAN_VENDOR_ATTR_NDP_TRANSACTION_ID (2 bytes) + * QCA_WLAN_VENDOR_ATTR_NDP_DRV_RESPONSE_STATUS_TYPE (4 bytes) + * QCA_WLAN_VENDOR_ATTR_NDP_DRV_RETURN_VALUE (4 bytes) + * + * Return: none + */ +static void os_if_ndp_responder_rsp_handler(struct wlan_objmgr_vdev *vdev, + struct nan_datapath_responder_rsp *rsp) +{ + uint16_t data_len; + struct sk_buff *vendor_event; + struct wlan_objmgr_pdev *pdev = wlan_vdev_get_pdev(vdev); + struct pdev_osif_priv *os_priv = wlan_pdev_get_ospriv(pdev); + + if (!rsp) { + cfg80211_err("Invalid NDP Responder response"); + return; + } + + cfg80211_debug("NDP Responder,vdev id %d transaction_id %d status code: %d reason %d", + wlan_vdev_get_id(rsp->vdev), rsp->transaction_id, + rsp->status, rsp->reason); + data_len = osif_ndp_get_ndp_responder_rsp_len(); + vendor_event = cfg80211_vendor_event_alloc(os_priv->wiphy, NULL, + data_len, QCA_NL80211_VENDOR_SUBCMD_NDP_INDEX, + GFP_ATOMIC); + if (!vendor_event) { + cfg80211_err("cfg80211_vendor_event_alloc failed"); + return; + } + + if (nla_put_u32(vendor_event, QCA_WLAN_VENDOR_ATTR_NDP_SUBCMD, + QCA_WLAN_VENDOR_ATTR_NDP_RESPONDER_RESPONSE)) + goto ndp_responder_rsp_nla_failed; + + if (nla_put_u16(vendor_event, QCA_WLAN_VENDOR_ATTR_NDP_TRANSACTION_ID, + rsp->transaction_id)) + goto ndp_responder_rsp_nla_failed; + + if (nla_put_u32(vendor_event, + QCA_WLAN_VENDOR_ATTR_NDP_DRV_RESPONSE_STATUS_TYPE, + rsp->status)) + goto ndp_responder_rsp_nla_failed; + + if (nla_put_u32(vendor_event, + QCA_WLAN_VENDOR_ATTR_NDP_DRV_RETURN_VALUE, + rsp->reason)) + goto ndp_responder_rsp_nla_failed; + + cfg80211_vendor_event(vendor_event, GFP_ATOMIC); + return; +ndp_responder_rsp_nla_failed: + cfg80211_err("nla_put api failed"); + kfree_skb(vendor_event); +} + +static inline uint32_t osif_ndp_get_ndp_req_ind_len( + struct nan_datapath_indication_event *event) +{ + uint32_t data_len = NLMSG_HDRLEN; + + data_len += nla_total_size(vendor_attr_policy[ + QCA_WLAN_VENDOR_ATTR_NDP_SUBCMD].len); + data_len += nla_total_size(vendor_attr_policy[ + QCA_WLAN_VENDOR_ATTR_NDP_SERVICE_INSTANCE_ID].len); + data_len += nla_total_size(vendor_attr_policy[ + QCA_WLAN_VENDOR_ATTR_NDP_INSTANCE_ID].len); + data_len += nla_total_size(vendor_attr_policy[ + QCA_WLAN_VENDOR_ATTR_NDP_CONFIG_QOS].len); + data_len += nla_total_size(vendor_attr_policy[ + QCA_WLAN_VENDOR_ATTR_NDP_CSID].len); + /* allocate space including NULL terminator */ + data_len += nla_total_size(vendor_attr_policy[ + QCA_WLAN_VENDOR_ATTR_NDP_IFACE_STR].len + 1); + data_len += nla_total_size(vendor_attr_policy[ + QCA_WLAN_VENDOR_ATTR_NDP_NDI_MAC_ADDR].len); + data_len += nla_total_size(vendor_attr_policy[ + QCA_WLAN_VENDOR_ATTR_NDP_PEER_DISCOVERY_MAC_ADDR].len); + if (event->is_ipv6_addr_present) + data_len += nla_total_size(vendor_attr_policy[ + QCA_WLAN_VENDOR_ATTR_NDP_IPV6_ADDR].len); + if (event->scid.scid_len) + data_len += nla_total_size(event->scid.scid_len); + if (event->ndp_info.ndp_app_info_len) + data_len += nla_total_size(event->ndp_info.ndp_app_info_len); + + return data_len; +} + +/** + * os_if_ndp_indication_handler() - NDP indication handler + * @vdev: pointer to vdev object + * @ind_params: indication parameters + * + * Following vendor event is sent to cfg80211: + * QCA_WLAN_VENDOR_ATTR_NDP_SUBCMD = + * QCA_WLAN_VENDOR_ATTR_NDP_REQUEST_IND (4 bytes) + * QCA_WLAN_VENDOR_ATTR_NDP_IFACE_STR (IFNAMSIZ) + * QCA_WLAN_VENDOR_ATTR_NDP_SERVICE_INSTANCE_ID (4 bytes) + * QCA_WLAN_VENDOR_ATTR_NDP_NDI_MAC_ADDR (6 bytes) + * QCA_WLAN_VENDOR_ATTR_NDP_PEER_DISCOVERY_MAC_ADDR (6 bytes) + * QCA_WLAN_VENDOR_ATTR_NDP_INSTANCE_ID (4 bytes) + * QCA_WLAN_VENDOR_ATTR_NDP_APP_INFO (ndp_app_info_len size) + * QCA_WLAN_VENDOR_ATTR_NDP_CONFIG_QOS (4 bytes) + * QCA_WLAN_VENDOR_ATTR_NDP_CSID(4 bytes) + * QCA_WLAN_VENDOR_ATTR_NDP_SCID(scid_len in size) + * QCA_WLAN_VENDOR_ATTR_NDP_IPV6_ADDR (16 bytes) + * + * Return: none + */ +static void os_if_ndp_indication_handler(struct wlan_objmgr_vdev *vdev, + struct nan_datapath_indication_event *event) +{ + uint8_t *ifname; + uint16_t data_len; + qdf_size_t ifname_len; + uint32_t ndp_qos_config; + struct sk_buff *vendor_event; + enum nan_datapath_state state; + struct wlan_objmgr_pdev *pdev = wlan_vdev_get_pdev(vdev); + struct pdev_osif_priv *os_priv = wlan_pdev_get_ospriv(pdev); + + if (!event) { + cfg80211_err("Invalid NDP Indication"); + return; + } + + cfg80211_debug("NDP Indication, policy: %d", event->policy); + state = ucfg_nan_get_ndi_state(vdev); + /* check if we are in middle of deleting/creating the interface */ + + if (state == NAN_DATA_NDI_DELETED_STATE || + state == NAN_DATA_NDI_DELETING_STATE || + state == NAN_DATA_NDI_CREATING_STATE) { + cfg80211_err("Data request not allowed in current NDI state: %d", + state); + return; + } + + ifname = wlan_util_vdev_get_if_name(vdev); + if (!ifname) { + cfg80211_err("ifname is null"); + return; + } + ifname_len = qdf_str_len(ifname); + if (ifname_len > IFNAMSIZ) { + cfg80211_err("ifname(%zu) too long", ifname_len); + return; + } + + data_len = osif_ndp_get_ndp_req_ind_len(event); + /* notify response to the upper layer */ + vendor_event = cfg80211_vendor_event_alloc(os_priv->wiphy, + NULL, data_len, + QCA_NL80211_VENDOR_SUBCMD_NDP_INDEX, + GFP_ATOMIC); + if (!vendor_event) { + cfg80211_err("cfg80211_vendor_event_alloc failed"); + return; + } + + if (nla_put_u32(vendor_event, QCA_WLAN_VENDOR_ATTR_NDP_SUBCMD, + QCA_WLAN_VENDOR_ATTR_NDP_REQUEST_IND)) + goto ndp_indication_nla_failed; + + if (nla_put(vendor_event, QCA_WLAN_VENDOR_ATTR_NDP_IFACE_STR, + ifname_len, ifname)) + goto ndp_indication_nla_failed; + + if (nla_put_u32(vendor_event, + QCA_WLAN_VENDOR_ATTR_NDP_SERVICE_INSTANCE_ID, + event->service_instance_id)) + goto ndp_indication_nla_failed; + + if (nla_put(vendor_event, + QCA_WLAN_VENDOR_ATTR_NDP_NDI_MAC_ADDR, + QDF_MAC_ADDR_SIZE, event->peer_mac_addr.bytes)) + goto ndp_indication_nla_failed; + + if (nla_put(vendor_event, + QCA_WLAN_VENDOR_ATTR_NDP_PEER_DISCOVERY_MAC_ADDR, + QDF_MAC_ADDR_SIZE, event->peer_discovery_mac_addr.bytes)) + goto ndp_indication_nla_failed; + + if (nla_put_u32(vendor_event, QCA_WLAN_VENDOR_ATTR_NDP_INSTANCE_ID, + event->ndp_instance_id)) + goto ndp_indication_nla_failed; + + if (event->ndp_info.ndp_app_info_len) + if (nla_put(vendor_event, QCA_WLAN_VENDOR_ATTR_NDP_APP_INFO, + event->ndp_info.ndp_app_info_len, + event->ndp_info.ndp_app_info)) + goto ndp_indication_nla_failed; + + if (event->ndp_config.ndp_cfg_len) { + ndp_qos_config = *((uint32_t *)event->ndp_config.ndp_cfg); + /* at present ndp config stores 4 bytes QOS info only */ + if (nla_put_u32(vendor_event, + QCA_WLAN_VENDOR_ATTR_NDP_CONFIG_QOS, + ndp_qos_config)) + goto ndp_indication_nla_failed; + } + + if (event->scid.scid_len) { + if (nla_put_u32(vendor_event, + QCA_WLAN_VENDOR_ATTR_NDP_CSID, + event->ncs_sk_type)) + goto ndp_indication_nla_failed; + + if (nla_put(vendor_event, QCA_WLAN_VENDOR_ATTR_NDP_SCID, + event->scid.scid_len, + event->scid.scid)) + goto ndp_indication_nla_failed; + + cfg80211_debug("csid: %d, scid_len: %d", + event->ncs_sk_type, event->scid.scid_len); + + QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_HDD, QDF_TRACE_LEVEL_DEBUG, + event->scid.scid, event->scid.scid_len); + } + + if (event->is_ipv6_addr_present) { + if (nla_put(vendor_event, QCA_WLAN_VENDOR_ATTR_NDP_IPV6_ADDR, + QDF_IPV6_ADDR_SIZE, event->ipv6_addr)) + goto ndp_indication_nla_failed; + } + cfg80211_debug("ipv6 addr present: %d, addr: %pI6", + event->is_ipv6_addr_present, event->ipv6_addr); + + cfg80211_vendor_event(vendor_event, GFP_ATOMIC); + return; +ndp_indication_nla_failed: + cfg80211_err("nla_put api failed"); + kfree_skb(vendor_event); +} + +static inline uint32_t osif_ndp_get_ndp_confirm_ind_len( + struct nan_datapath_confirm_event *ndp_confirm) +{ + uint32_t ch_info_len = 0; + uint32_t data_len = NLMSG_HDRLEN; + + data_len += nla_total_size(vendor_attr_policy[ + QCA_WLAN_VENDOR_ATTR_NDP_SUBCMD].len); + data_len += nla_total_size(vendor_attr_policy[ + QCA_WLAN_VENDOR_ATTR_NDP_INSTANCE_ID].len); + data_len += nla_total_size(vendor_attr_policy[ + QCA_WLAN_VENDOR_ATTR_NDP_NDI_MAC_ADDR].len); + /* allocate space including NULL terminator */ + data_len += nla_total_size(vendor_attr_policy[ + QCA_WLAN_VENDOR_ATTR_NDP_IFACE_STR].len + 1); + data_len += nla_total_size(vendor_attr_policy[ + QCA_WLAN_VENDOR_ATTR_NDP_RESPONSE_CODE].len); + data_len += nla_total_size(vendor_attr_policy[ + QCA_WLAN_VENDOR_ATTR_NDP_DRV_RETURN_VALUE].len); + if (ndp_confirm->ndp_info.ndp_app_info_len) + data_len += + nla_total_size(ndp_confirm->ndp_info.ndp_app_info_len); + + if (ndp_confirm->is_ipv6_addr_present) + data_len += nla_total_size(vendor_attr_policy[ + QCA_WLAN_VENDOR_ATTR_NDP_IPV6_ADDR].len); + if (ndp_confirm->is_port_present) + data_len += nla_total_size(vendor_attr_policy[ + QCA_WLAN_VENDOR_ATTR_NDP_TRANSPORT_PORT].len); + if (ndp_confirm->is_protocol_present) + data_len += nla_total_size(vendor_attr_policy[ + QCA_WLAN_VENDOR_ATTR_NDP_TRANSPORT_PROTOCOL].len); + + /* ch_info is a nested array of following attributes */ + ch_info_len += nla_total_size( + vendor_attr_policy[QCA_WLAN_VENDOR_ATTR_NDP_CHANNEL].len); + ch_info_len += nla_total_size( + vendor_attr_policy[QCA_WLAN_VENDOR_ATTR_NDP_CHANNEL_WIDTH].len); + ch_info_len += nla_total_size( + vendor_attr_policy[QCA_WLAN_VENDOR_ATTR_NDP_NSS].len); + + if (ndp_confirm->num_channels) + data_len += ndp_confirm->num_channels * + nla_total_size(ch_info_len); + + return data_len; +} + +static QDF_STATUS os_if_ndp_confirm_pack_ch_info(struct sk_buff *event, + struct nan_datapath_confirm_event *ndp_confirm) +{ + int idx = 0; + struct nlattr *ch_array, *ch_element; + + cfg80211_debug("num_ch: %d", ndp_confirm->num_channels); + if (!ndp_confirm->num_channels) + return QDF_STATUS_SUCCESS; + + ch_array = nla_nest_start(event, QCA_WLAN_VENDOR_ATTR_NDP_CHANNEL_INFO); + if (!ch_array) + return QDF_STATUS_E_FAULT; + + for (idx = 0; idx < ndp_confirm->num_channels; idx++) { + cfg80211_debug("ch[%d]: freq: %d, width: %d, nss: %d", + idx, ndp_confirm->ch[idx].channel, + ndp_confirm->ch[idx].ch_width, + ndp_confirm->ch[idx].nss); + ch_element = nla_nest_start(event, idx); + if (!ch_element) + return QDF_STATUS_E_FAULT; + + if (nla_put_u32(event, QCA_WLAN_VENDOR_ATTR_NDP_CHANNEL, + ndp_confirm->ch[idx].channel)) + return QDF_STATUS_E_FAULT; + + if (nla_put_u32(event, QCA_WLAN_VENDOR_ATTR_NDP_CHANNEL_WIDTH, + ndp_confirm->ch[idx].ch_width)) + return QDF_STATUS_E_FAULT; + + if (nla_put_u32(event, QCA_WLAN_VENDOR_ATTR_NDP_NSS, + ndp_confirm->ch[idx].nss)) + return QDF_STATUS_E_FAULT; + nla_nest_end(event, ch_element); + } + nla_nest_end(event, ch_array); + + return QDF_STATUS_SUCCESS; +} + +/** + * os_if_ndp_confirm_ind_handler() - NDP confirm indication handler + * @vdev: pointer to vdev object + * @ind_params: indication parameters + * + * Following vendor event is sent to cfg80211: + * QCA_WLAN_VENDOR_ATTR_NDP_SUBCMD = + * QCA_WLAN_VENDOR_ATTR_NDP_CONFIRM_IND (4 bytes) + * QCA_WLAN_VENDOR_ATTR_NDP_INSTANCE_ID (4 bytes) + * QCA_WLAN_VENDOR_ATTR_NDP_NDI_MAC_ADDR (6 bytes) + * QCA_WLAN_VENDOR_ATTR_NDP_IFACE_STR (IFNAMSIZ) + * QCA_WLAN_VENDOR_ATTR_NDP_APP_INFO (ndp_app_info_len size) + * QCA_WLAN_VENDOR_ATTR_NDP_RESPONSE_CODE (4 bytes) + * QCA_WLAN_VENDOR_ATTR_NDP_DRV_RETURN_VALUE (4 bytes) + * QCA_WLAN_VENDOR_ATTR_NDP_IPV6_ADDR (16 bytes) + * QCA_WLAN_VENDOR_ATTR_NDP_TRANSPORT_PORT (2 bytes) + * QCA_WLAN_VENDOR_ATTR_NDP_TRANSPORT_PROTOCOL (1 byte) + * + * Return: none + */ +static void +os_if_ndp_confirm_ind_handler(struct wlan_objmgr_vdev *vdev, + struct nan_datapath_confirm_event *ndp_confirm) +{ + int idx = 0; + uint8_t *ifname; + uint32_t data_len; + QDF_STATUS status; + qdf_size_t ifname_len; + struct nan_callbacks cb_obj; + struct sk_buff *vendor_event; + struct wlan_objmgr_pdev *pdev = wlan_vdev_get_pdev(vdev); + struct wlan_objmgr_psoc *psoc = wlan_vdev_get_psoc(vdev); + struct pdev_osif_priv *os_priv = wlan_pdev_get_ospriv(pdev); + + if (!ndp_confirm) { + cfg80211_err("Invalid NDP Initator response"); + return; + } + + status = ucfg_nan_get_callbacks(psoc, &cb_obj); + if (QDF_IS_STATUS_ERROR(status)) { + cfg80211_err("couldn't get callbacks"); + return; + } + + /* ndp_confirm is called each time user generated ndp req succeeds */ + idx = cb_obj.get_peer_idx(wlan_vdev_get_id(vdev), + &ndp_confirm->peer_ndi_mac_addr); + + if (idx < 0) + cfg80211_err("can't find addr: %pM in vdev_id: %d, peer table.", + &ndp_confirm->peer_ndi_mac_addr, + wlan_vdev_get_id(vdev)); + else if (ndp_confirm->rsp_code == NAN_DATAPATH_RESPONSE_ACCEPT) { + uint32_t active_sessions = + ucfg_nan_get_active_ndp_sessions(vdev, idx); + ucfg_nan_set_active_ndp_sessions(vdev, active_sessions + 1, + idx); + } + + ifname = wlan_util_vdev_get_if_name(vdev); + if (!ifname) { + cfg80211_err("ifname is null"); + return; + } + ifname_len = qdf_str_len(ifname); + if (ifname_len > IFNAMSIZ) { + cfg80211_err("ifname(%zu) too long", ifname_len); + return; + } + + data_len = osif_ndp_get_ndp_confirm_ind_len(ndp_confirm); + vendor_event = cfg80211_vendor_event_alloc(os_priv->wiphy, NULL, + data_len, QCA_NL80211_VENDOR_SUBCMD_NDP_INDEX, + GFP_ATOMIC); + if (!vendor_event) { + cfg80211_err("cfg80211_vendor_event_alloc failed"); + return; + } + + if (nla_put_u32(vendor_event, QCA_WLAN_VENDOR_ATTR_NDP_SUBCMD, + QCA_WLAN_VENDOR_ATTR_NDP_CONFIRM_IND)) + goto ndp_confirm_nla_failed; + + if (nla_put_u32(vendor_event, QCA_WLAN_VENDOR_ATTR_NDP_INSTANCE_ID, + ndp_confirm->ndp_instance_id)) + goto ndp_confirm_nla_failed; + + if (nla_put(vendor_event, QCA_WLAN_VENDOR_ATTR_NDP_NDI_MAC_ADDR, + QDF_MAC_ADDR_SIZE, ndp_confirm->peer_ndi_mac_addr.bytes)) + goto ndp_confirm_nla_failed; + + if (nla_put(vendor_event, QCA_WLAN_VENDOR_ATTR_NDP_IFACE_STR, + ifname_len, ifname)) + goto ndp_confirm_nla_failed; + + if (ndp_confirm->ndp_info.ndp_app_info_len && + nla_put(vendor_event, + QCA_WLAN_VENDOR_ATTR_NDP_APP_INFO, + ndp_confirm->ndp_info.ndp_app_info_len, + ndp_confirm->ndp_info.ndp_app_info)) + goto ndp_confirm_nla_failed; + + if (nla_put_u32(vendor_event, + QCA_WLAN_VENDOR_ATTR_NDP_RESPONSE_CODE, + ndp_confirm->rsp_code)) + goto ndp_confirm_nla_failed; + + if (nla_put_u32(vendor_event, + QCA_WLAN_VENDOR_ATTR_NDP_DRV_RETURN_VALUE, + ndp_confirm->reason_code)) + goto ndp_confirm_nla_failed; + + if (nla_put_u32(vendor_event, QCA_WLAN_VENDOR_ATTR_NDP_NUM_CHANNELS, + ndp_confirm->num_channels)) + goto ndp_confirm_nla_failed; + + status = os_if_ndp_confirm_pack_ch_info(vendor_event, ndp_confirm); + if (QDF_IS_STATUS_ERROR(status)) + goto ndp_confirm_nla_failed; + + if (ndp_confirm->is_ipv6_addr_present) { + if (nla_put(vendor_event, QCA_WLAN_VENDOR_ATTR_NDP_IPV6_ADDR, + QDF_IPV6_ADDR_SIZE, ndp_confirm->ipv6_addr)) + goto ndp_confirm_nla_failed; + } + if (ndp_confirm->is_port_present) + if (nla_put_u16(vendor_event, + QCA_WLAN_VENDOR_ATTR_NDP_TRANSPORT_PORT, + ndp_confirm->port)) + goto ndp_confirm_nla_failed; + if (ndp_confirm->is_protocol_present) + if (nla_put_u8(vendor_event, + QCA_WLAN_VENDOR_ATTR_NDP_TRANSPORT_PROTOCOL, + ndp_confirm->protocol)) + goto ndp_confirm_nla_failed; + cfg80211_debug("ipv6 addr present: %d, addr: %pI6", + ndp_confirm->is_ipv6_addr_present, + ndp_confirm->ipv6_addr); + cfg80211_debug("port %d, present: %d", + ndp_confirm->port, ndp_confirm->is_port_present); + cfg80211_debug("protocol %d, present: %d", + ndp_confirm->protocol, ndp_confirm->is_protocol_present); + + cfg80211_vendor_event(vendor_event, GFP_ATOMIC); + cfg80211_debug("NDP confim sent, ndp instance id: %d, peer addr: %pM rsp_code: %d, reason_code: %d", + ndp_confirm->ndp_instance_id, + ndp_confirm->peer_ndi_mac_addr.bytes, + ndp_confirm->rsp_code, ndp_confirm->reason_code); + + cfg80211_debug("NDP confim, ndp app info dump"); + QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_HDD, QDF_TRACE_LEVEL_DEBUG, + ndp_confirm->ndp_info.ndp_app_info, + ndp_confirm->ndp_info.ndp_app_info_len); + return; +ndp_confirm_nla_failed: + cfg80211_err("nla_put api failed"); + kfree_skb(vendor_event); +} + +static inline uint32_t osif_ndp_get_ndp_end_rsp_len(void) +{ + uint32_t data_len = NLMSG_HDRLEN; + + data_len += nla_total_size(vendor_attr_policy[ + QCA_WLAN_VENDOR_ATTR_NDP_SUBCMD].len); + data_len += nla_total_size(vendor_attr_policy[ + QCA_WLAN_VENDOR_ATTR_NDP_DRV_RESPONSE_STATUS_TYPE].len); + data_len += nla_total_size(vendor_attr_policy[ + QCA_WLAN_VENDOR_ATTR_NDP_DRV_RETURN_VALUE].len); + data_len += nla_total_size(vendor_attr_policy[ + QCA_WLAN_VENDOR_ATTR_NDP_TRANSACTION_ID].len); + + return data_len; +} + +/** + * os_if_ndp_end_rsp_handler() - NDP end response handler + * @vdev: pointer to vdev object + * @rsp_params: response parameters + * + * Following vendor event is sent to cfg80211: + * QCA_WLAN_VENDOR_ATTR_NDP_SUBCMD = + * QCA_WLAN_VENDOR_ATTR_NDP_END_RESPONSE(4 bytest) + * QCA_WLAN_VENDOR_ATTR_NDP_DRV_RESPONSE_STATUS_TYPE (4 bytes) + * QCA_WLAN_VENDOR_ATTR_NDP_DRV_RETURN_VALUE (4 bytes) + * QCA_WLAN_VENDOR_ATTR_NDP_TRANSACTION_ID (2 bytes) + * + * Return: none + */ +static void os_if_ndp_end_rsp_handler(struct wlan_objmgr_vdev *vdev, + struct nan_datapath_end_rsp_event *rsp) +{ + uint32_t data_len; + struct sk_buff *vendor_event; + struct wlan_objmgr_pdev *pdev = wlan_vdev_get_pdev(vdev); + struct pdev_osif_priv *os_priv = wlan_pdev_get_ospriv(pdev); + + if (!rsp) { + cfg80211_err("Invalid ndp end response"); + return; + } + + data_len = osif_ndp_get_ndp_end_rsp_len(); + vendor_event = cfg80211_vendor_event_alloc(os_priv->wiphy, NULL, + data_len, QCA_NL80211_VENDOR_SUBCMD_NDP_INDEX, + GFP_ATOMIC); + if (!vendor_event) { + cfg80211_err("cfg80211_vendor_event_alloc failed"); + return; + } + + if (nla_put_u32(vendor_event, QCA_WLAN_VENDOR_ATTR_NDP_SUBCMD, + QCA_WLAN_VENDOR_ATTR_NDP_END_RESPONSE)) + goto ndp_end_rsp_nla_failed; + + if (nla_put_u32(vendor_event, + QCA_WLAN_VENDOR_ATTR_NDP_DRV_RESPONSE_STATUS_TYPE, + rsp->status)) + goto ndp_end_rsp_nla_failed; + + if (nla_put_u32(vendor_event, QCA_WLAN_VENDOR_ATTR_NDP_DRV_RETURN_VALUE, + rsp->reason)) + goto ndp_end_rsp_nla_failed; + + if (nla_put_u16(vendor_event, QCA_WLAN_VENDOR_ATTR_NDP_TRANSACTION_ID, + rsp->transaction_id)) + goto ndp_end_rsp_nla_failed; + + cfg80211_debug("NDP End rsp sent, transaction id: %d, status: %d, reason: %d", + rsp->transaction_id, rsp->status, rsp->reason); + cfg80211_vendor_event(vendor_event, GFP_ATOMIC); + return; + +ndp_end_rsp_nla_failed: + cfg80211_err("nla_put api failed"); + kfree_skb(vendor_event); +} + +static inline uint32_t osif_ndp_get_ndp_end_ind_len( + struct nan_datapath_end_indication_event *end_ind) +{ + uint32_t data_len = NLMSG_HDRLEN; + + data_len += nla_total_size(vendor_attr_policy[ + QCA_WLAN_VENDOR_ATTR_NDP_SUBCMD].len); + if (end_ind->num_ndp_ids) + data_len += nla_total_size(end_ind->num_ndp_ids * + sizeof(uint32_t)); + + return data_len; +} + +/** + * os_if_ndp_end_ind_handler() - NDP end indication handler + * @vdev: pointer to vdev object + * @ind_params: indication parameters + * + * Following vendor event is sent to cfg80211: + * QCA_WLAN_VENDOR_ATTR_NDP_SUBCMD = + * QCA_WLAN_VENDOR_ATTR_NDP_END_IND (4 bytes) + * QCA_WLAN_VENDOR_ATTR_NDP_INSTANCE_ID_ARRAY (4 * num of NDP Instances) + * + * Return: none + */ +static void os_if_ndp_end_ind_handler(struct wlan_objmgr_vdev *vdev, + struct nan_datapath_end_indication_event *end_ind) +{ + QDF_STATUS status; + uint32_t data_len, i; + struct nan_callbacks cb_obj; + uint32_t *ndp_instance_array; + struct sk_buff *vendor_event; + struct wlan_objmgr_vdev *vdev_itr; + struct wlan_objmgr_psoc *psoc = wlan_vdev_get_psoc(vdev); + struct wlan_objmgr_pdev *pdev = wlan_vdev_get_pdev(vdev); + struct pdev_osif_priv *os_priv = wlan_pdev_get_ospriv(pdev); + + status = ucfg_nan_get_callbacks(psoc, &cb_obj); + if (QDF_IS_STATUS_ERROR(status)) { + cfg80211_err("failed to get callbacks"); + return; + } + + if (!end_ind) { + cfg80211_err("Invalid ndp end indication"); + return; + } + + ndp_instance_array = qdf_mem_malloc(end_ind->num_ndp_ids * + sizeof(*ndp_instance_array)); + if (!ndp_instance_array) { + cfg80211_err("Failed to allocate ndp_instance_array"); + return; + } + for (i = 0; i < end_ind->num_ndp_ids; i++) { + int idx = 0; + + ndp_instance_array[i] = end_ind->ndp_map[i].ndp_instance_id; + vdev_itr = wlan_objmgr_get_vdev_by_id_from_psoc(psoc, + end_ind->ndp_map[i].vdev_id, WLAN_NAN_ID); + + if (vdev_itr == NULL) { + cfg80211_err("vdev not found for vdev_id: %d", + end_ind->ndp_map[i].vdev_id); + continue; + } + + idx = cb_obj.get_peer_idx(wlan_vdev_get_id(vdev_itr), + &end_ind->ndp_map[i].peer_ndi_mac_addr); + if (idx < 0) { + cfg80211_err("can't find addr: %pM in sta_ctx.", + &end_ind->ndp_map[i].peer_ndi_mac_addr); + wlan_objmgr_vdev_release_ref(vdev_itr, WLAN_NAN_ID); + continue; + } + /* save the value of active sessions on each peer */ + ucfg_nan_set_active_ndp_sessions(vdev_itr, + end_ind->ndp_map[i].num_active_ndp_sessions, + idx); + wlan_objmgr_vdev_release_ref(vdev_itr, WLAN_NAN_ID); + } + + data_len = osif_ndp_get_ndp_end_ind_len(end_ind); + vendor_event = cfg80211_vendor_event_alloc(os_priv->wiphy, NULL, + data_len, QCA_NL80211_VENDOR_SUBCMD_NDP_INDEX, + GFP_ATOMIC); + if (!vendor_event) { + cfg80211_err("cfg80211_vendor_event_alloc failed"); + return; + } + + if (nla_put_u32(vendor_event, QCA_WLAN_VENDOR_ATTR_NDP_SUBCMD, + QCA_WLAN_VENDOR_ATTR_NDP_END_IND)) + goto ndp_end_ind_nla_failed; + + if (nla_put(vendor_event, QCA_WLAN_VENDOR_ATTR_NDP_INSTANCE_ID_ARRAY, + end_ind->num_ndp_ids * sizeof(*ndp_instance_array), + ndp_instance_array)) + goto ndp_end_ind_nla_failed; + + cfg80211_vendor_event(vendor_event, GFP_ATOMIC); + qdf_mem_free(ndp_instance_array); + return; + +ndp_end_ind_nla_failed: + cfg80211_err("nla_put api failed"); + kfree_skb(vendor_event); + qdf_mem_free(ndp_instance_array); +} + +/** + * os_if_new_peer_ind_handler() - NDP new peer indication handler + * @adapter: pointer to adapter context + * @ind_params: indication parameters + * + * Return: none + */ +static void os_if_new_peer_ind_handler(struct wlan_objmgr_vdev *vdev, + struct nan_datapath_peer_ind *peer_ind) +{ + int ret; + QDF_STATUS status; + uint8_t vdev_id = wlan_vdev_get_id(vdev); + struct wlan_objmgr_psoc *psoc = wlan_vdev_get_psoc(vdev); + uint32_t active_peers = ucfg_nan_get_active_peers(vdev); + struct nan_callbacks cb_obj; + + if (NULL == peer_ind) { + cfg80211_err("Invalid new NDP peer params"); + return; + } + + status = ucfg_nan_get_callbacks(psoc, &cb_obj); + if (QDF_IS_STATUS_ERROR(status)) { + cfg80211_err("failed to get callbacks"); + return; + } + + cfg80211_debug("vdev_id: %d, peer_mac: %pM, sta_id: %d", + vdev_id, peer_ind->peer_mac_addr.bytes, + peer_ind->sta_id); + ret = cb_obj.new_peer_ind(vdev_id, peer_ind->sta_id, + &peer_ind->peer_mac_addr, + (active_peers == 0 ? true : false)); + if (ret) { + cfg80211_err("new peer handling at HDD failed %d", ret); + return; + } + + active_peers++; + ucfg_nan_set_active_peers(vdev, active_peers); + cfg80211_debug("vdev_id: %d, num_peers: %d", vdev_id, active_peers); +} + +/** + * os_if_peer_departed_ind_handler() - Handle NDP peer departed indication + * @adapter: pointer to adapter context + * @ind_params: indication parameters + * + * Return: none + */ +static void os_if_peer_departed_ind_handler(struct wlan_objmgr_vdev *vdev, + struct nan_datapath_peer_ind *peer_ind) +{ + QDF_STATUS status; + struct nan_callbacks cb_obj; + uint8_t vdev_id = wlan_vdev_get_id(vdev); + struct wlan_objmgr_psoc *psoc = wlan_vdev_get_psoc(vdev); + uint32_t active_peers = ucfg_nan_get_active_peers(vdev); + + status = ucfg_nan_get_callbacks(psoc, &cb_obj); + if (QDF_IS_STATUS_ERROR(status)) { + cfg80211_err("failed to get callbacks"); + return; + } + + if (NULL == peer_ind) { + cfg80211_err("Invalid new NDP peer params"); + return; + } + cfg80211_debug("vdev_id: %d, peer_mac: %pM, sta_id: %d", + vdev_id, peer_ind->peer_mac_addr.bytes, + peer_ind->sta_id); + active_peers--; + ucfg_nan_set_active_peers(vdev, active_peers); + cb_obj.peer_departed_ind(vdev_id, peer_ind->sta_id, + &peer_ind->peer_mac_addr, + (active_peers == 0 ? true : false)); +} + +static inline uint32_t osif_ndp_get_ndi_create_rsp_len(void) +{ + uint32_t data_len = NLMSG_HDRLEN; + + data_len += nla_total_size(vendor_attr_policy[ + QCA_WLAN_VENDOR_ATTR_NDP_SUBCMD].len); + data_len += nla_total_size(vendor_attr_policy[ + QCA_WLAN_VENDOR_ATTR_NDP_TRANSACTION_ID].len); + data_len += nla_total_size(vendor_attr_policy[ + QCA_WLAN_VENDOR_ATTR_NDP_DRV_RESPONSE_STATUS_TYPE].len); + data_len += nla_total_size(vendor_attr_policy[ + QCA_WLAN_VENDOR_ATTR_NDP_DRV_RETURN_VALUE].len); + + return data_len; +} + +/** + * os_if_ndp_iface_create_rsp_handler() - NDP iface create response handler + * @adapter: pointer to adapter context + * @rsp_params: response parameters + * + * The function is expected to send a response back to the user space + * even if the creation of BSS has failed + * + * Following vendor event is sent to cfg80211: + * QCA_WLAN_VENDOR_ATTR_NDP_SUBCMD = + * QCA_WLAN_VENDOR_ATTR_NDP_INTERFACE_CREATE (4 bytes) + * QCA_WLAN_VENDOR_ATTR_NDP_TRANSACTION_ID (2 bytes) + * QCA_WLAN_VENDOR_ATTR_NDP_DRV_RESPONSE_STATUS_TYPE (4 bytes) + * QCA_WLAN_VENDOR_ATTR_NDP_DRV_RETURN_VALUE + * + * Return: none + */ +static void os_if_ndp_iface_create_rsp_handler(struct wlan_objmgr_psoc *psoc, + struct wlan_objmgr_vdev *vdev, + void *rsp_params) +{ + uint32_t data_len; + QDF_STATUS status; + bool create_fail = false; + struct nan_callbacks cb_obj; + struct sk_buff *vendor_event; + uint16_t create_transaction_id; + struct wlan_objmgr_pdev *pdev = wlan_vdev_get_pdev(vdev); + struct pdev_osif_priv *os_priv = wlan_pdev_get_ospriv(pdev); + uint32_t create_status = NAN_DATAPATH_RSP_STATUS_ERROR; + uint32_t create_reason = NAN_DATAPATH_NAN_DATA_IFACE_CREATE_FAILED; + struct nan_datapath_inf_create_rsp *ndi_rsp = + (struct nan_datapath_inf_create_rsp *)rsp_params; + + status = ucfg_nan_get_callbacks(psoc, &cb_obj); + if (QDF_IS_STATUS_ERROR(status)) { + cfg80211_err("Couldn't get ballback object"); + return; + } + + if (ndi_rsp) { + create_status = ndi_rsp->status; + create_reason = ndi_rsp->reason; + } else { + cfg80211_err("Invalid ndi create response"); + create_fail = true; + } + + create_transaction_id = ucfg_nan_get_ndp_create_transaction_id(vdev); + data_len = osif_ndp_get_ndi_create_rsp_len(); + /* notify response to the upper layer */ + vendor_event = cfg80211_vendor_event_alloc(os_priv->wiphy, + NULL, + data_len, + QCA_NL80211_VENDOR_SUBCMD_NDP_INDEX, + GFP_KERNEL); + if (!vendor_event) { + cfg80211_err("cfg80211_vendor_event_alloc failed"); + create_fail = true; + goto close_ndi; + } + + /* Sub vendor command */ + if (nla_put_u32(vendor_event, QCA_WLAN_VENDOR_ATTR_NDP_SUBCMD, + QCA_WLAN_VENDOR_ATTR_NDP_INTERFACE_CREATE)) { + cfg80211_err("QCA_WLAN_VENDOR_ATTR_NDP_SUBCMD put fail"); + goto nla_put_failure; + } + + /* Transaction id */ + if (nla_put_u16(vendor_event, QCA_WLAN_VENDOR_ATTR_NDP_TRANSACTION_ID, + create_transaction_id)) { + cfg80211_err("VENDOR_ATTR_NDP_TRANSACTION_ID put fail"); + goto nla_put_failure; + } + + /* Status code */ + if (nla_put_u32(vendor_event, + QCA_WLAN_VENDOR_ATTR_NDP_DRV_RESPONSE_STATUS_TYPE, + create_status)) { + cfg80211_err("VENDOR_ATTR_NDP_DRV_RETURN_TYPE put fail"); + goto nla_put_failure; + } + + /* Status return value */ + if (nla_put_u32(vendor_event, + QCA_WLAN_VENDOR_ATTR_NDP_DRV_RETURN_VALUE, + create_reason)) { + cfg80211_err("VENDOR_ATTR_NDP_DRV_RETURN_VALUE put fail"); + goto nla_put_failure; + } + + cfg80211_debug("sub command: %d, value: %d", + QCA_NL80211_VENDOR_SUBCMD_NDP, + QCA_WLAN_VENDOR_ATTR_NDP_INTERFACE_CREATE); + cfg80211_debug("create transaction id: %d, value: %d", + QCA_WLAN_VENDOR_ATTR_NDP_TRANSACTION_ID, create_transaction_id); + cfg80211_debug("status code: %d, value: %d", + QCA_WLAN_VENDOR_ATTR_NDP_DRV_RESPONSE_STATUS_TYPE, + create_status); + cfg80211_debug("Return value: %d, value: %d", + QCA_WLAN_VENDOR_ATTR_NDP_DRV_RETURN_VALUE, create_reason); + + cfg80211_vendor_event(vendor_event, GFP_KERNEL); + + if (!create_fail) { + /* update txrx queues and register self sta */ + cb_obj.drv_ndi_create_rsp_handler(wlan_vdev_get_id(vdev), + ndi_rsp); + } else { + cfg80211_err("NDI interface creation failed with reason %d", + create_reason); + goto close_ndi; + } + + return; + +nla_put_failure: + kfree_skb(vendor_event); +close_ndi: + cb_obj.ndi_close(wlan_vdev_get_id(vdev)); + return; +} + +/** + * os_if_ndp_iface_delete_rsp_handler() - NDP iface delete response handler + * @adapter: pointer to adapter context + * @rsp_params: response parameters + * + * Return: none + */ +static void os_if_ndp_iface_delete_rsp_handler(struct wlan_objmgr_psoc *psoc, + struct wlan_objmgr_vdev *vdev, + void *rsp_params) +{ + QDF_STATUS status; + uint8_t vdev_id = wlan_vdev_get_id(vdev); + struct nan_datapath_inf_delete_rsp *ndi_rsp = rsp_params; + struct nan_callbacks cb_obj; + + if (!ndi_rsp) { + cfg80211_err("Invalid ndi delete response"); + return; + } + + status = ucfg_nan_get_callbacks(psoc, &cb_obj); + if (QDF_IS_STATUS_ERROR(status)) { + cfg80211_err("Couldn't get ballback object"); + return; + } + + if (ndi_rsp->status == NAN_DATAPATH_RSP_STATUS_SUCCESS) + cfg80211_debug("NDI BSS successfully stopped"); + else + cfg80211_debug("NDI BSS stop failed with reason %d", + ndi_rsp->reason); + + ucfg_nan_set_ndi_delete_rsp_reason(vdev, ndi_rsp->reason); + ucfg_nan_set_ndi_delete_rsp_status(vdev, ndi_rsp->status); + cb_obj.drv_ndi_delete_rsp_handler(vdev_id); +} + +static inline uint32_t osif_ndp_get_ndp_sch_update_ind_len( + struct nan_datapath_sch_update_event *sch_update) +{ + uint32_t ch_info_len = 0; + uint32_t data_len = NLMSG_HDRLEN; + + data_len += nla_total_size(vendor_attr_policy[ + QCA_WLAN_VENDOR_ATTR_NDP_SUBCMD].len); + data_len += nla_total_size(vendor_attr_policy[ + QCA_WLAN_VENDOR_ATTR_NDP_PEER_DISCOVERY_MAC_ADDR].len); + if (sch_update->num_ndp_instances) + data_len += nla_total_size(sch_update->num_ndp_instances * + sizeof(uint32_t)); + data_len += nla_total_size(vendor_attr_policy[ + QCA_WLAN_VENDOR_ATTR_NDP_SCHEDULE_UPDATE_REASON].len); + data_len += nla_total_size(vendor_attr_policy[ + QCA_WLAN_VENDOR_ATTR_NDP_NUM_CHANNELS].len); + /* ch_info is a nested array of following attributes */ + ch_info_len += nla_total_size( + vendor_attr_policy[QCA_WLAN_VENDOR_ATTR_NDP_CHANNEL].len); + ch_info_len += nla_total_size( + vendor_attr_policy[QCA_WLAN_VENDOR_ATTR_NDP_CHANNEL_WIDTH].len); + ch_info_len += nla_total_size( + vendor_attr_policy[QCA_WLAN_VENDOR_ATTR_NDP_NSS].len); + + if (sch_update->num_ndp_instances) + data_len += sch_update->num_ndp_instances * + nla_total_size(ch_info_len); + + return data_len; +} + +static QDF_STATUS os_if_ndp_sch_update_pack_ch_info(struct sk_buff *event, + struct nan_datapath_sch_update_event *sch_update) +{ + int idx = 0; + struct nlattr *ch_array, *ch_element; + + cfg80211_debug("num_ch: %d", sch_update->num_channels); + if (!sch_update->num_channels) + return QDF_STATUS_SUCCESS; + + ch_array = nla_nest_start(event, QCA_WLAN_VENDOR_ATTR_NDP_CHANNEL_INFO); + if (!ch_array) + return QDF_STATUS_E_FAULT; + + for (idx = 0; idx < sch_update->num_channels; idx++) { + cfg80211_debug("ch[%d]: freq: %d, width: %d, nss: %d", + idx, sch_update->ch[idx].channel, + sch_update->ch[idx].ch_width, + sch_update->ch[idx].nss); + ch_element = nla_nest_start(event, idx); + if (!ch_element) + return QDF_STATUS_E_FAULT; + + if (nla_put_u32(event, QCA_WLAN_VENDOR_ATTR_NDP_CHANNEL, + sch_update->ch[idx].channel)) + return QDF_STATUS_E_FAULT; + + if (nla_put_u32(event, QCA_WLAN_VENDOR_ATTR_NDP_CHANNEL_WIDTH, + sch_update->ch[idx].ch_width)) + return QDF_STATUS_E_FAULT; + + if (nla_put_u32(event, QCA_WLAN_VENDOR_ATTR_NDP_NSS, + sch_update->ch[idx].nss)) + return QDF_STATUS_E_FAULT; + nla_nest_end(event, ch_element); + } + nla_nest_end(event, ch_array); + + return QDF_STATUS_SUCCESS; +} + +/** + * os_if_ndp_sch_update_ind_handler() - NDP schedule update handler + * @vdev: vdev object pointer + * @ind: sch update pointer + * + * Following vendor event is sent to cfg80211: + * + * Return: none + */ +static void os_if_ndp_sch_update_ind_handler(struct wlan_objmgr_vdev *vdev, + void *ind) +{ + int idx = 0; + uint8_t *ifname; + QDF_STATUS status; + uint32_t data_len; + uint8_t ifname_len; + struct sk_buff *vendor_event; + struct nan_datapath_sch_update_event *sch_update = ind; + struct wlan_objmgr_pdev *pdev = wlan_vdev_get_pdev(vdev); + struct pdev_osif_priv *os_priv = wlan_pdev_get_ospriv(pdev); + + if (!sch_update) { + cfg80211_err("Invalid sch update params"); + return; + } + + ifname = wlan_util_vdev_get_if_name(vdev); + if (!ifname) { + cfg80211_err("ifname is null"); + return; + } + ifname_len = qdf_str_len(ifname); + if (ifname_len > IFNAMSIZ) { + cfg80211_err("ifname(%d) too long", ifname_len); + return; + } + + data_len = osif_ndp_get_ndp_sch_update_ind_len(sch_update); + vendor_event = cfg80211_vendor_event_alloc(os_priv->wiphy, NULL, + data_len, QCA_NL80211_VENDOR_SUBCMD_NDP_INDEX, + GFP_ATOMIC); + if (!vendor_event) { + cfg80211_err("cfg80211_vendor_event_alloc failed"); + return; + } + + if (nla_put_u32(vendor_event, QCA_WLAN_VENDOR_ATTR_NDP_SUBCMD, + QCA_WLAN_VENDOR_ATTR_NDP_SCHEDULE_UPDATE_IND)) + goto ndp_sch_ind_nla_failed; + + if (nla_put(vendor_event, + QCA_WLAN_VENDOR_ATTR_NDP_PEER_DISCOVERY_MAC_ADDR, + QDF_MAC_ADDR_SIZE, sch_update->peer_addr.bytes)) + goto ndp_sch_ind_nla_failed; + + if (nla_put(vendor_event, QCA_WLAN_VENDOR_ATTR_NDP_INSTANCE_ID_ARRAY, + sch_update->num_ndp_instances * sizeof(uint32_t), + sch_update->ndp_instances)) + goto ndp_sch_ind_nla_failed; + + if (nla_put_u32(vendor_event, + QCA_WLAN_VENDOR_ATTR_NDP_SCHEDULE_UPDATE_REASON, + sch_update->flags)) + goto ndp_sch_ind_nla_failed; + + if (nla_put_u32(vendor_event, QCA_WLAN_VENDOR_ATTR_NDP_NUM_CHANNELS, + sch_update->num_channels)) + goto ndp_sch_ind_nla_failed; + + status = os_if_ndp_sch_update_pack_ch_info(vendor_event, sch_update); + if (QDF_IS_STATUS_ERROR(status)) + goto ndp_sch_ind_nla_failed; + + cfg80211_debug("Flags: %d, num_instance_id: %d", sch_update->flags, + sch_update->num_ndp_instances); + + for (idx = 0; idx < sch_update->num_ndp_instances; idx++) + cfg80211_debug("ndp_instance[%d]: %d", idx, + sch_update->ndp_instances[idx]); + + cfg80211_vendor_event(vendor_event, GFP_ATOMIC); + return; + +ndp_sch_ind_nla_failed: + cfg80211_err("nla_put api failed"); + kfree_skb(vendor_event); +} + +void os_if_nan_event_handler(struct wlan_objmgr_psoc *psoc, + struct wlan_objmgr_vdev *vdev, + uint32_t type, void *msg) +{ + switch (type) { + case NAN_DATAPATH_INF_CREATE_RSP: + os_if_ndp_iface_create_rsp_handler(psoc, vdev, msg); + break; + case NAN_DATAPATH_INF_DELETE_RSP: + os_if_ndp_iface_delete_rsp_handler(psoc, vdev, msg); + break; + case NDP_CONFIRM: + os_if_ndp_confirm_ind_handler(vdev, msg); + break; + case NDP_INITIATOR_RSP: + os_if_ndp_initiator_rsp_handler(vdev, msg); + break; + case NDP_INDICATION: + os_if_ndp_indication_handler(vdev, msg); + break; + case NDP_NEW_PEER: + os_if_new_peer_ind_handler(vdev, msg); + break; + case NDP_RESPONDER_RSP: + os_if_ndp_responder_rsp_handler(vdev, msg); + break; + case NDP_END_RSP: + os_if_ndp_end_rsp_handler(vdev, msg); + break; + case NDP_END_IND: + os_if_ndp_end_ind_handler(vdev, msg); + break; + case NDP_PEER_DEPARTED: + os_if_peer_departed_ind_handler(vdev, msg); + break; + case NDP_SCHEDULE_UPDATE: + os_if_ndp_sch_update_ind_handler(vdev, msg); + break; + default: + break; + } +} + +int os_if_nan_register_hdd_callbacks(struct wlan_objmgr_psoc *psoc, + struct nan_callbacks *cb_obj) +{ + return ucfg_nan_register_hdd_callbacks(psoc, cb_obj, + os_if_nan_event_handler); +} + +int os_if_nan_register_lim_callbacks(struct wlan_objmgr_psoc *psoc, + struct nan_callbacks *cb_obj) +{ + return ucfg_nan_register_lim_callbacks(psoc, cb_obj); +} + +void os_if_nan_post_ndi_create_rsp(struct wlan_objmgr_psoc *psoc, + uint8_t vdev_id, bool success) +{ + struct nan_datapath_inf_create_rsp rsp = {0}; + struct wlan_objmgr_vdev *vdev = wlan_objmgr_get_vdev_by_id_from_psoc( + psoc, vdev_id, WLAN_NAN_ID); + + if (!vdev) { + cfg80211_err("vdev is null"); + return; + } + + if (success) { + rsp.status = NAN_DATAPATH_RSP_STATUS_SUCCESS; + rsp.reason = 0; + os_if_nan_event_handler(psoc, vdev, + NAN_DATAPATH_INF_CREATE_RSP, &rsp); + } else { + rsp.status = NAN_DATAPATH_RSP_STATUS_ERROR; + rsp.reason = NAN_DATAPATH_NAN_DATA_IFACE_CREATE_FAILED; + os_if_nan_event_handler(psoc, vdev, + NAN_DATAPATH_INF_CREATE_RSP, &rsp); + } + wlan_objmgr_vdev_release_ref(vdev, WLAN_NAN_ID); +} + +void os_if_nan_post_ndi_delete_rsp(struct wlan_objmgr_psoc *psoc, + uint8_t vdev_id, bool success) +{ + struct nan_datapath_inf_delete_rsp rsp = {0}; + struct wlan_objmgr_vdev *vdev = wlan_objmgr_get_vdev_by_id_from_psoc( + psoc, vdev_id, WLAN_NAN_ID); + if (!vdev) { + cfg80211_err("vdev is null"); + return; + } + + if (success) { + rsp.status = NAN_DATAPATH_RSP_STATUS_SUCCESS; + rsp.reason = 0; + os_if_nan_event_handler(psoc, vdev, + NAN_DATAPATH_INF_DELETE_RSP, &rsp); + } else { + rsp.status = NAN_DATAPATH_RSP_STATUS_ERROR; + rsp.reason = NAN_DATAPATH_NAN_DATA_IFACE_DELETE_FAILED; + os_if_nan_event_handler(psoc, vdev, + NAN_DATAPATH_INF_DELETE_RSP, &rsp); + } + wlan_objmgr_vdev_release_ref(vdev, WLAN_NAN_ID); +} + +static inline uint32_t osif_ndp_get_ndi_delete_rsp_len(void) +{ + uint32_t data_len = NLMSG_HDRLEN; + + data_len += nla_total_size(vendor_attr_policy[ + QCA_WLAN_VENDOR_ATTR_NDP_SUBCMD].len); + data_len += nla_total_size(vendor_attr_policy[ + QCA_WLAN_VENDOR_ATTR_NDP_TRANSACTION_ID].len); + data_len += nla_total_size(vendor_attr_policy[ + QCA_WLAN_VENDOR_ATTR_NDP_DRV_RESPONSE_STATUS_TYPE].len); + data_len += nla_total_size(vendor_attr_policy[ + QCA_WLAN_VENDOR_ATTR_NDP_DRV_RETURN_VALUE].len); + + return data_len; +} + +void os_if_nan_ndi_session_end(struct wlan_objmgr_vdev *vdev) +{ + uint32_t data_len; + struct sk_buff *vendor_event; + struct wlan_objmgr_pdev *pdev = wlan_vdev_get_pdev(vdev); + struct pdev_osif_priv *os_priv = wlan_pdev_get_ospriv(pdev); + + /* + * The virtual adapters are stopped and closed even during + * driver unload or stop, the service layer is not required + * to be informed in that case (response is not expected) + */ + if (NAN_DATA_NDI_DELETING_STATE != ucfg_nan_get_ndi_state(vdev)) { + cfg80211_err("NDI interface deleted"); + return; + } + + data_len = osif_ndp_get_ndi_delete_rsp_len(); + /* notify response to the upper layer */ + vendor_event = cfg80211_vendor_event_alloc(os_priv->wiphy, NULL, + data_len, QCA_NL80211_VENDOR_SUBCMD_NDP_INDEX, + GFP_KERNEL); + + if (!vendor_event) { + cfg80211_err("cfg80211_vendor_event_alloc failed"); + return; + } + + /* Sub vendor command goes first */ + if (nla_put_u32(vendor_event, QCA_WLAN_VENDOR_ATTR_NDP_SUBCMD, + QCA_WLAN_VENDOR_ATTR_NDP_INTERFACE_DELETE)) { + cfg80211_err("VENDOR_ATTR_NDP_SUBCMD put fail"); + goto failure; + } + + /* Transaction id */ + if (nla_put_u16(vendor_event, QCA_WLAN_VENDOR_ATTR_NDP_TRANSACTION_ID, + ucfg_nan_get_ndp_delete_transaction_id(vdev))) { + cfg80211_err("VENDOR_ATTR_NDP_TRANSACTION_ID put fail"); + goto failure; + } + + /* Status code */ + if (nla_put_u32(vendor_event, + QCA_WLAN_VENDOR_ATTR_NDP_DRV_RESPONSE_STATUS_TYPE, + ucfg_nan_get_ndi_delete_rsp_status(vdev))) { + cfg80211_err("VENDOR_ATTR_NDP_DRV_RETURN_TYPE put fail"); + goto failure; + } + + /* Status return value */ + if (nla_put_u32(vendor_event, + QCA_WLAN_VENDOR_ATTR_NDP_DRV_RETURN_VALUE, + ucfg_nan_get_ndi_delete_rsp_reason(vdev))) { + cfg80211_err("VENDOR_ATTR_NDP_DRV_RETURN_VALUE put fail"); + goto failure; + } + + cfg80211_debug("sub command: %d, value: %d", + QCA_WLAN_VENDOR_ATTR_NDP_SUBCMD, + QCA_WLAN_VENDOR_ATTR_NDP_INTERFACE_DELETE); + cfg80211_debug("delete transaction id: %d, value: %d", + QCA_WLAN_VENDOR_ATTR_NDP_TRANSACTION_ID, + ucfg_nan_get_ndp_delete_transaction_id(vdev)); + cfg80211_debug("status code: %d, value: %d", + QCA_WLAN_VENDOR_ATTR_NDP_DRV_RESPONSE_STATUS_TYPE, + ucfg_nan_get_ndi_delete_rsp_status(vdev)); + cfg80211_debug("Return value: %d, value: %d", + QCA_WLAN_VENDOR_ATTR_NDP_DRV_RETURN_VALUE, + ucfg_nan_get_ndi_delete_rsp_reason(vdev)); + + ucfg_nan_set_ndp_delete_transaction_id(vdev, 0); + ucfg_nan_set_ndi_state(vdev, NAN_DATA_NDI_DELETED_STATE); + cfg80211_vendor_event(vendor_event, GFP_KERNEL); + + return; +failure: + kfree_skb(vendor_event); +} diff --git a/drivers/staging/qca-wifi-host-cmn/os_if/linux/p2p/inc/wlan_cfg80211_p2p.h b/drivers/staging/qca-wifi-host-cmn/os_if/linux/p2p/inc/wlan_cfg80211_p2p.h new file mode 100644 index 0000000000000000000000000000000000000000..1c8484b8de4b9f199a6fe15dadbb0fc8b826592e --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/os_if/linux/p2p/inc/wlan_cfg80211_p2p.h @@ -0,0 +1,115 @@ +/* + * Copyright (c) 2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: declares P2P functions interfacing with linux kernel + */ + +#ifndef _WLAN_CFG80211_P2P_H_ +#define _WLAN_CFG80211_P2P_H_ + +#include + +struct wlan_objmgr_psoc; +struct wlan_objmgr_vdev; +struct ieee80211_channel; + +/** + * wlan_p2p_start() - start p2p component + * @psoc: soc object + * + * This function used to start P2P component and register events. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_p2p_start(struct wlan_objmgr_psoc *psoc); + +/** + * wlan_p2p_stop() - stop p2p component + * @psoc: soc object + * + * This function used to stop P2P component and unregister events. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_p2p_stop(struct wlan_objmgr_psoc *psoc); + +/** + * wlan_cfg80211_roc() - API to process cfg80211 roc request + * @vdev: Pointer to vdev object + * @chan: Pointer to channel + * @duration: Duration for this roc request + * @cookie: Pointer to return cookie to up layer + * + * API to trigger remain on channel request. It returns cookie + * as the identifier of roc. + * + * Return: 0 for success, non zero for failure + */ +int wlan_cfg80211_roc(struct wlan_objmgr_vdev *vdev, + struct ieee80211_channel *chan, uint32_t duration, + uint64_t *cookie); + +/** + * wlan_cfg80211_cancel_roc() - API to process cfg80211 cancel remain + * on channel request + * @vdev: Pointer to vdev object + * @cookie: Find out the roc request by cookie + * + * API to trigger cancel remain on channel request. + * + * Return: 0 for success, non zero for failure + */ +int wlan_cfg80211_cancel_roc(struct wlan_objmgr_vdev *vdev, + uint64_t cookie); + +/** + * wlan_cfg80211_mgmt_tx() - API to process cfg80211 mgmt tx request + * @vdev: Pointer to vdev object + * @chan: Pointer to channel + * @wait: wait time for this mgmt tx request + * @buf: TX buffer + * @len: Length of tx buffer + * @no_cck: Required cck or not + * @dont_wait_for_ack: Wait for ack or not + * @cookie: Return the cookie to caller + * + * API to trigger mgmt frame tx request. It returns cookie as the + * identifier of this tx. + * + * Return: 0 for success, non zero for failure + */ +int wlan_cfg80211_mgmt_tx(struct wlan_objmgr_vdev *vdev, + struct ieee80211_channel *chan, bool offchan, uint32_t wait, + const uint8_t *buf, uint32_t len, bool no_cck, + bool dont_wait_for_ack, uint64_t *cookie); + +/** + * wlan_cfg80211_mgmt_tx_cancel() - API to process cfg80211 cancel to + * wait mgmt tx + * @vdev: Pointer to vdev object + * @cookie: Find out the mgmt tx request by cookie + * + * API to trigger cancel mgmt frame tx request. + * + * Return: 0 for success, non zero for failure + */ +int wlan_cfg80211_mgmt_tx_cancel(struct wlan_objmgr_vdev *vdev, + uint64_t cookie); + +#endif /* _WLAN_CFG80211_P2P_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/os_if/linux/p2p/src/wlan_cfg80211_p2p.c b/drivers/staging/qca-wifi-host-cmn/os_if/linux/p2p/src/wlan_cfg80211_p2p.c new file mode 100644 index 0000000000000000000000000000000000000000..2694d8a0a7f8093fe301b870ae8d186be7321ad2 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/os_if/linux/p2p/src/wlan_cfg80211_p2p.c @@ -0,0 +1,487 @@ +/* + * Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: defines driver functions interfacing with linux kernel + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "wlan_cfg80211.h" +#include "wlan_cfg80211_p2p.h" + +#define MAX_NO_OF_2_4_CHANNELS 14 +#define MAX_OFFCHAN_TIME_FOR_DNBS 150 + +/** + * wlan_p2p_rx_callback() - Callback for rx mgmt frame + * @user_data: pointer to soc object + * @rx_frame: RX mgmt frame information + * + * This callback will be used to rx frames in os interface. + * + * Return: None + */ +static void wlan_p2p_rx_callback(void *user_data, + struct p2p_rx_mgmt_frame *rx_frame) +{ + struct wlan_objmgr_psoc *psoc; + struct wlan_objmgr_vdev *vdev; + struct vdev_osif_priv *osif_priv; + struct wireless_dev *wdev; + uint16_t freq; + + cfg80211_debug("user data:%pK, vdev id:%d, rssi:%d, buf:%pK, len:%d", + user_data, rx_frame->vdev_id, rx_frame->rx_rssi, + rx_frame->buf, rx_frame->frame_len); + + psoc = user_data; + if (!psoc) { + cfg80211_err("psoc is null"); + return; + } + + vdev = wlan_objmgr_get_vdev_by_id_from_psoc(psoc, + rx_frame->vdev_id, WLAN_P2P_ID); + if (!vdev) { + cfg80211_err("vdev is null"); + return; + } + + osif_priv = wlan_vdev_get_ospriv(vdev); + if (!osif_priv) { + cfg80211_err("osif_priv is null"); + goto fail; + } + + wdev = osif_priv->wdev; + if (!wdev) { + cfg80211_err("wdev is null"); + goto fail; + } + + if (rx_frame->rx_chan <= MAX_NO_OF_2_4_CHANNELS) + freq = ieee80211_channel_to_frequency( + rx_frame->rx_chan, NL80211_BAND_2GHZ); + else + freq = ieee80211_channel_to_frequency( + rx_frame->rx_chan, NL80211_BAND_5GHZ); + + cfg80211_debug("Indicate frame over nl80211, vdev id:%d, idx:%d", + rx_frame->vdev_id, wdev->netdev->ifindex); + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)) + cfg80211_rx_mgmt(wdev, freq, rx_frame->rx_rssi * 100, + rx_frame->buf, rx_frame->frame_len, + NL80211_RXMGMT_FLAG_ANSWERED); +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0)) + cfg80211_rx_mgmt(wdev, freq, rx_frame->rx_rssi * 100, + rx_frame->buf, rx_frame->frame_len, + NL80211_RXMGMT_FLAG_ANSWERED, GFP_ATOMIC); +#else + cfg80211_rx_mgmt(wdev, freq, rx_frame->rx_rssi * 100, + rx_frame->buf, rx_frame->frame_len, GFP_ATOMIC); +#endif /* LINUX_VERSION_CODE */ +fail: + wlan_objmgr_vdev_release_ref(vdev, WLAN_P2P_ID); +} + +/** + * wlan_p2p_action_tx_cnf_callback() - Callback for tx confirmation + * @user_data: pointer to soc object + * @tx_cnf: tx confirmation information + * + * This callback will be used to give tx mgmt frame confirmation to + * os interface. + * + * Return: None + */ +static void wlan_p2p_action_tx_cnf_callback(void *user_data, + struct p2p_tx_cnf *tx_cnf) +{ + struct wlan_objmgr_psoc *psoc; + struct wlan_objmgr_vdev *vdev; + struct vdev_osif_priv *osif_priv; + struct wireless_dev *wdev; + bool is_success; + + cfg80211_debug("user data:%pK, action cookie:%llx, buf:%pK, len:%d, tx status:%d", + user_data, tx_cnf->action_cookie, tx_cnf->buf, + tx_cnf->buf_len, tx_cnf->status); + + psoc = user_data; + if (!psoc) { + cfg80211_err("psoc is null"); + return; + } + + vdev = wlan_objmgr_get_vdev_by_id_from_psoc(psoc, + tx_cnf->vdev_id, WLAN_P2P_ID); + if (!vdev) { + cfg80211_err("vdev is null"); + return; + } + + osif_priv = wlan_vdev_get_ospriv(vdev); + if (!osif_priv) { + cfg80211_err("osif_priv is null"); + goto fail; + } + + wdev = osif_priv->wdev; + if (!wdev) { + cfg80211_err("wireless dev is null"); + goto fail; + } + + is_success = tx_cnf->status ? false : true; + cfg80211_mgmt_tx_status( + wdev, + tx_cnf->action_cookie, + tx_cnf->buf, tx_cnf->buf_len, + is_success, GFP_KERNEL); +fail: + wlan_objmgr_vdev_release_ref(vdev, WLAN_P2P_ID); +} + +/** + * wlan_p2p_lo_event_callback() - Callback for listen offload event + * @user_data: pointer to soc object + * @p2p_lo_event: listen offload event information + * + * This callback will be used to give listen offload event to os interface. + * + * Return: None + */ +static void wlan_p2p_lo_event_callback(void *user_data, + struct p2p_lo_event *p2p_lo_event) +{ + struct wlan_objmgr_psoc *psoc; + struct wlan_objmgr_vdev *vdev; + struct vdev_osif_priv *osif_priv; + struct wireless_dev *wdev; + struct sk_buff *vendor_event; + + cfg80211_debug("user data:%pK, vdev id:%d, reason code:%d", + user_data, p2p_lo_event->vdev_id, + p2p_lo_event->reason_code); + + psoc = user_data; + if (!psoc) { + cfg80211_err("psoc is null"); + return; + } + + vdev = wlan_objmgr_get_vdev_by_id_from_psoc(psoc, + p2p_lo_event->vdev_id, WLAN_P2P_ID); + if (!vdev) { + cfg80211_err("vdev is null"); + return; + } + + osif_priv = wlan_vdev_get_ospriv(vdev); + if (!osif_priv) { + cfg80211_err("osif_priv is null"); + goto fail; + } + + wdev = osif_priv->wdev; + if (!wdev) { + cfg80211_err("wireless dev is null"); + goto fail; + } + + vendor_event = cfg80211_vendor_event_alloc(wdev->wiphy, NULL, + sizeof(uint32_t) + NLMSG_HDRLEN, + QCA_NL80211_VENDOR_SUBCMD_P2P_LO_EVENT_INDEX, + GFP_KERNEL); + if (!vendor_event) { + cfg80211_err("cfg80211_vendor_event_alloc failed"); + goto fail; + } + + if (nla_put_u32(vendor_event, + QCA_WLAN_VENDOR_ATTR_P2P_LISTEN_OFFLOAD_STOP_REASON, + p2p_lo_event->reason_code)) { + cfg80211_err("nla put failed"); + kfree_skb(vendor_event); + goto fail; + } + + cfg80211_vendor_event(vendor_event, GFP_KERNEL); + +fail: + wlan_objmgr_vdev_release_ref(vdev, WLAN_P2P_ID); +} + +/** + * wlan_p2p_event_callback() - Callback for P2P event + * @user_data: pointer to soc object + * @p2p_event: p2p event information + * + * This callback will be used to give p2p event to os interface. + * + * Return: None + */ +static void wlan_p2p_event_callback(void *user_data, + struct p2p_event *p2p_event) +{ + struct wlan_objmgr_psoc *psoc; + struct wlan_objmgr_vdev *vdev; + struct ieee80211_channel *chan; + struct vdev_osif_priv *osif_priv; + struct wireless_dev *wdev; + + cfg80211_debug("user data:%pK, vdev id:%d, event type:%d", + user_data, p2p_event->vdev_id, p2p_event->roc_event); + + psoc = user_data; + if (!psoc) { + cfg80211_err("psoc is null"); + return; + } + + vdev = wlan_objmgr_get_vdev_by_id_from_psoc(psoc, + p2p_event->vdev_id, WLAN_P2P_ID); + if (!vdev) { + cfg80211_err("vdev is null"); + return; + } + + osif_priv = wlan_vdev_get_ospriv(vdev); + if (!osif_priv) { + cfg80211_err("osif_priv is null"); + goto fail; + } + + wdev = osif_priv->wdev; + if (!wdev) { + cfg80211_err("wireless dev is null"); + goto fail; + } + + chan = ieee80211_get_channel(wdev->wiphy, + wlan_chan_to_freq(p2p_event->chan)); + if (!chan) { + cfg80211_err("channel conversion failed"); + goto fail; + } + + if (p2p_event->roc_event == ROC_EVENT_READY_ON_CHAN) { + cfg80211_ready_on_channel(wdev, + p2p_event->cookie, chan, + p2p_event->duration, GFP_KERNEL); + } else if (p2p_event->roc_event == ROC_EVENT_COMPLETED) { + cfg80211_remain_on_channel_expired(wdev, + p2p_event->cookie, chan, GFP_KERNEL); + } else { + cfg80211_err("Invalid p2p event"); + } + +fail: + wlan_objmgr_vdev_release_ref(vdev, WLAN_P2P_ID); +} + +QDF_STATUS wlan_p2p_start(struct wlan_objmgr_psoc *psoc) +{ + struct p2p_start_param start_param; + + if (!psoc) { + cfg80211_err("psoc null"); + return QDF_STATUS_E_INVAL; + } + + start_param.rx_cb = wlan_p2p_rx_callback; + start_param.rx_cb_data = psoc; + start_param.event_cb = wlan_p2p_event_callback; + start_param.event_cb_data = psoc; + start_param.tx_cnf_cb = wlan_p2p_action_tx_cnf_callback; + start_param.tx_cnf_cb_data = psoc; + start_param.lo_event_cb = wlan_p2p_lo_event_callback; + start_param.lo_event_cb_data = psoc; + + return ucfg_p2p_psoc_start(psoc, &start_param); +} + +QDF_STATUS wlan_p2p_stop(struct wlan_objmgr_psoc *psoc) +{ + if (!psoc) { + cfg80211_err("psoc null"); + return QDF_STATUS_E_INVAL; + } + + return ucfg_p2p_psoc_stop(psoc); +} + +int wlan_cfg80211_roc(struct wlan_objmgr_vdev *vdev, + struct ieee80211_channel *chan, uint32_t duration, + uint64_t *cookie) +{ + struct p2p_roc_req roc_req = {0}; + struct wlan_objmgr_psoc *psoc; + uint8_t vdev_id; + bool ok; + int ret; + + if (!vdev) { + cfg80211_err("invalid vdev object"); + return -EINVAL; + } + + if (!chan) { + cfg80211_err("invalid channel"); + return -EINVAL; + } + + psoc = wlan_vdev_get_psoc(vdev); + vdev_id = wlan_vdev_get_id(vdev); + if (!psoc) { + cfg80211_err("psoc handle is NULL"); + return -EINVAL; + } + + roc_req.chan = (uint32_t)wlan_freq_to_chan(chan->center_freq); + roc_req.duration = duration; + roc_req.vdev_id = (uint32_t)vdev_id; + + ret = policy_mgr_is_chan_ok_for_dnbs(psoc, roc_req.chan, &ok); + if (QDF_IS_STATUS_ERROR(ret)) { + cfg80211_err("policy_mgr_is_chan_ok_for_dnbs():ret:%d", + ret); + return -EINVAL; + } + + if (!ok) { + cfg80211_err("channel%d not OK for DNBS", roc_req.chan); + return -EINVAL; + } + + return qdf_status_to_os_return( + ucfg_p2p_roc_req(psoc, &roc_req, cookie)); +} + +int wlan_cfg80211_cancel_roc(struct wlan_objmgr_vdev *vdev, + uint64_t cookie) +{ + struct wlan_objmgr_psoc *psoc; + + if (!vdev) { + cfg80211_err("invalid vdev object"); + return -EINVAL; + } + + psoc = wlan_vdev_get_psoc(vdev); + if (!psoc) { + cfg80211_err("psoc handle is NULL"); + return -EINVAL; + } + + return qdf_status_to_os_return( + ucfg_p2p_roc_cancel_req(psoc, cookie)); +} + +int wlan_cfg80211_mgmt_tx(struct wlan_objmgr_vdev *vdev, + struct ieee80211_channel *chan, bool offchan, + unsigned int wait, + const uint8_t *buf, uint32_t len, bool no_cck, + bool dont_wait_for_ack, uint64_t *cookie) +{ + struct p2p_mgmt_tx mgmt_tx = {0}; + struct wlan_objmgr_psoc *psoc; + uint8_t vdev_id; + uint32_t channel = 0; + + if (!vdev) { + cfg80211_err("invalid vdev object"); + return -EINVAL; + } + + if (chan) + channel = (uint32_t)wlan_freq_to_chan(chan->center_freq); + else + cfg80211_debug("NULL chan, set channel to 0"); + + psoc = wlan_vdev_get_psoc(vdev); + vdev_id = wlan_vdev_get_id(vdev); + if (!psoc) { + cfg80211_err("psoc handle is NULL"); + return -EINVAL; + } + + /** + * When offchannel time is more than MAX_OFFCHAN_TIME_FOR_DNBS, + * allow offchannel only if Do_Not_Switch_Channel is not set. + */ + if (wait > MAX_OFFCHAN_TIME_FOR_DNBS) { + int ret; + bool ok; + + ret = policy_mgr_is_chan_ok_for_dnbs(psoc, channel, &ok); + if (QDF_IS_STATUS_ERROR(ret)) { + cfg80211_err("policy_mgr_is_chan_ok_for_dnbs():ret:%d", + ret); + return -EINVAL; + } + if (!ok) { + cfg80211_err("Rejecting mgmt_tx for channel:%d as DNSC is set", + channel); + return -EINVAL; + } + } + + mgmt_tx.vdev_id = (uint32_t)vdev_id; + mgmt_tx.chan = channel; + mgmt_tx.wait = wait; + mgmt_tx.len = len; + mgmt_tx.no_cck = (uint32_t)no_cck; + mgmt_tx.dont_wait_for_ack = (uint32_t)dont_wait_for_ack; + mgmt_tx.off_chan = (uint32_t)offchan; + mgmt_tx.buf = buf; + + return qdf_status_to_os_return( + ucfg_p2p_mgmt_tx(psoc, &mgmt_tx, cookie)); +} + +int wlan_cfg80211_mgmt_tx_cancel(struct wlan_objmgr_vdev *vdev, + uint64_t cookie) +{ + struct wlan_objmgr_psoc *psoc; + + if (!vdev) { + cfg80211_err("invalid vdev object"); + return -EINVAL; + } + + psoc = wlan_vdev_get_psoc(vdev); + if (!psoc) { + cfg80211_err("psoc handle is NULL"); + return -EINVAL; + } + + return qdf_status_to_os_return( + ucfg_p2p_mgmt_tx_cancel(psoc, vdev, cookie)); +} diff --git a/drivers/staging/qca-wifi-host-cmn/os_if/linux/qca_vendor.h b/drivers/staging/qca-wifi-host-cmn/os_if/linux/qca_vendor.h new file mode 100644 index 0000000000000000000000000000000000000000..5b942bf0edb66da13fa0ef49ec7f1c47a0369a50 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/os_if/linux/qca_vendor.h @@ -0,0 +1,6543 @@ +/* + * Copyright (c) 2012-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: declares vendor commands interfacing with linux kernel + */ + + +#ifndef _WLAN_QCA_VENDOR_H_ +#define _WLAN_QCA_VENDOR_H_ + +/* Vendor id to be used in vendor specific command and events + * to user space. + * NOTE: The authoritative place for definition of QCA_NL80211_VENDOR_ID, + * vendor subcmd definitions prefixed with QCA_NL80211_VENDOR_SUBCMD, and + * qca_wlan_vendor_attr is open source file src/common/qca-vendor.h in + * git://w1.fi/srv/git/hostap.git; the values here are just a copy of that + */ + +#define QCA_NL80211_VENDOR_ID 0x001374 + +/** + * enum qca_nl80211_vendor_subcmds: NL 80211 vendor sub command + * + * @QCA_NL80211_VENDOR_SUBCMD_UNSPEC: Unspecified + * @QCA_NL80211_VENDOR_SUBCMD_TEST: Test + * Sub commands 2 to 8 are not used + * @QCA_NL80211_VENDOR_SUBCMD_ROAMING: Roaming + * @QCA_NL80211_VENDOR_SUBCMD_AVOID_FREQUENCY: Avoid frequency. + * @QCA_NL80211_VENDOR_SUBCMD_DFS_CAPABILITY: DFS capability + * @QCA_NL80211_VENDOR_SUBCMD_NAN: Nan + * @QCA_NL80211_VENDOR_SUBCMD_STATS_EXT: Ext stats + * @QCA_NL80211_VENDOR_SUBCMD_LL_STATS_SET: Link layer stats set + * @QCA_NL80211_VENDOR_SUBCMD_LL_STATS_GET: Link layer stats get + * @QCA_NL80211_VENDOR_SUBCMD_LL_STATS_CLR: Link layer stats clear + * @QCA_NL80211_VENDOR_SUBCMD_LL_STATS_RADIO_RESULTS: Link layer stats radio + * results + * @QCA_NL80211_VENDOR_SUBCMD_LL_STATS_IFACE_RESULTS: Link layer stats interface + * results + * @QCA_NL80211_VENDOR_SUBCMD_LL_STATS_PEERS_RESULTS: Link layer stats peer + * results + * @QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_START: Ext scan start + * @QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_STOP: Ext scan stop + * @QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_GET_VALID_CHANNELS: Ext scan get valid + * channels + * @QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_GET_CAPABILITIES: Ext scan get capability + * @QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_GET_CACHED_RESULTS: Ext scan get cached + * results + * @QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_SCAN_RESULTS_AVAILABLE: Ext scan results + * available. Used when report_threshold is reached in scan cache. + * @QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_FULL_SCAN_RESULT: Ext scan full scan + * result. Used to report scan results when each probe rsp. is received, + * if report_events enabled in wifi_scan_cmd_params. + * @QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_SCAN_EVENT: Ext scan event from target. + * Indicates progress of scanning state-machine. + * @QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_HOTLIST_AP_FOUND: Ext scan hotlist + * ap found + * @QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_SET_BSSID_HOTLIST: Ext scan set hotlist + * bssid + * @QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_RESET_BSSID_HOTLIST: Ext scan reset + * hotlist bssid + * @QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_SIGNIFICANT_CHANGE: Ext scan significant + * change + * @QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_SET_SIGNIFICANT_CHANGE: Ext scan set + * significant change + * ap found + * @QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_RESET_SIGNIFICANT_CHANGE: Ext scan reset + * significant change + * @QCA_NL80211_VENDOR_SUBCMD_TDLS_ENABLE: Ext tdls enable + * @QCA_NL80211_VENDOR_SUBCMD_TDLS_DISABLE: Ext tdls disable + * @QCA_NL80211_VENDOR_SUBCMD_TDLS_GET_STATUS: Ext tdls get status + * @QCA_NL80211_VENDOR_SUBCMD_TDLS_STATE: Ext tdls state + * @QCA_NL80211_VENDOR_SUBCMD_GET_SUPPORTED_FEATURES: Get supported features + * @QCA_NL80211_VENDOR_SUBCMD_SCANNING_MAC_OUI: Set scanning_mac_oui + * @QCA_NL80211_VENDOR_SUBCMD_NO_DFS_FLAG: No DFS flag + * @QCA_NL80211_VENDOR_SUBCMD_GET_CONCURRENCY_MATRIX: Get Concurrency Matrix + * @QCA_NL80211_VENDOR_SUBCMD_KEY_MGMT_SET_KEY: Get the key mgmt offload keys + * @QCA_NL80211_VENDOR_SUBCMD_KEY_MGMT_ROAM_AUTH: After roaming, send the + * roaming and auth information. + * @QCA_NL80211_VENDOR_SUBCMD_OCB_SET_SCHED: Set OCB schedule + * @QCA_NL80211_VENDOR_SUBCMD_DO_ACS: ACS offload flag + * @QCA_NL80211_VENDOR_SUBCMD_GET_FEATURES: Get the supported features by the + * driver. + * @QCA_NL80211_VENDOR_SUBCMD_DFS_OFFLOAD_CAC_STARTED: Indicate that driver + * started CAC on DFS channel + * @QCA_NL80211_VENDOR_SUBCMD_DFS_OFFLOAD_CAC_FINISHED: Indicate that driver + * completed the CAC check on DFS channel + * @QCA_NL80211_VENDOR_SUBCMD_DFS_OFFLOAD_CAC_ABORTED: Indicate that the CAC + * check was aborted by the driver + * @QCA_NL80211_VENDOR_SUBCMD_DFS_OFFLOAD_CAC_NOP_FINISHED: Indicate that the + * driver completed NOP + * @QCA_NL80211_VENDOR_SUBCMD_DFS_OFFLOAD_RADAR_DETECTED: Indicate that the + * driver detected radar signal on the current operating channel + * @QCA_NL80211_VENDOR_SUBCMD_GET_WIFI_INFO: get wlan driver information + * @QCA_NL80211_VENDOR_SUBCMD_WIFI_LOGGER_START: start wifi logger + * @QCA_NL80211_VENDOR_SUBCMD_WIFI_LOGGER_MEMORY_DUMP: memory dump request + * @QCA_NL80211_VENDOR_SUBCMD_GET_LOGGER_FEATURE_SET: get logger feature set + * @QCA_NL80211_VENDOR_SUBCMD_ROAM: roam + * @QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_SET_SSID_HOTLIST: extscan set ssid hotlist + * @QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_RESET_SSID_HOTLIST: + * extscan reset ssid hotlist + * @QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_HOTLIST_SSID_FOUND: hotlist ssid found + * @QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_HOTLIST_SSID_LOST: hotlist ssid lost + * @QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_PNO_SET_LIST: set pno list + * @QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_PNO_SET_PASSPOINT_LIST: set passpoint list + * @QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_PNO_RESET_PASSPOINT_LIST: + * reset passpoint list + * @QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_PNO_NETWORK_FOUND: pno network found + * @QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_PNO_PASSPOINT_NETWORK_FOUND: + * passpoint network found + * @QCA_NL80211_VENDOR_SUBCMD_SET_WIFI_CONFIGURATION: set wifi config + * @QCA_NL80211_VENDOR_SUBCMD_GET_WIFI_CONFIGURATION: get wifi config + * @QCA_NL80211_VENDOR_SUBCMD_GET_LOGGER_FEATURE_SET: get logging features + * @QCA_NL80211_VENDOR_SUBCMD_LINK_PROPERTIES: get link properties + * @QCA_NL80211_VENDOR_SUBCMD_GW_PARAM_CONFIG: set gateway parameters + * @QCA_NL80211_VENDOR_SUBCMD_GET_PREFERRED_FREQ_LIST: get preferred channel + list + * @QCA_NL80211_VENDOR_SUBCMD_SET_PROBABLE_OPER_CHANNEL: channel hint + * @QCA_NL80211_VENDOR_SUBCMD_SETBAND: vendor setband command + * @QCA_NL80211_VENDOR_SUBCMD_TRIGGER_SCAN: venodr scan command + * @QCA_NL80211_VENDOR_SUBCMD_SCAN_DONE: vendor scan complete + * @QCA_NL80211_VENDOR_SUBCMD_ABORT_SCAN: vendor abort scan + * @QCA_NL80211_VENDOR_SUBCMD_OTA_TEST: enable OTA test + * @QCA_NL80211_VENDOR_SUBCMD_SET_TXPOWER_SCALE: set tx power by percentage + * @QCA_NL80211_VENDOR_SUBCMD_SET_TXPOWER_SCALE_DECR_DB: reduce tx power by DB + * @QCA_NL80211_VENDOR_SUBCMD_SET_SAP_CONFIG: SAP configuration + * @QCA_NL80211_VENDOR_SUBCMD_TSF: TSF operations command + * @QCA_NL80211_VENDOR_SUBCMD_WISA: WISA mode configuration + * @QCA_NL80211_VENDOR_SUBCMD_P2P_LISTEN_OFFLOAD_START: Command used to + * start the P2P Listen Offload function in device and pass the listen + * channel, period, interval, count, number of device types, device + * types and vendor information elements to device driver and firmware. + * @QCA_NL80211_VENDOR_SUBCMD_P2P_LISTEN_OFFLOAD_STOP: Command/event used to + * indicate stop request/response of the P2P Listen Offload function in + * device. As an event, it indicates either the feature stopped after it + * was already running or feature has actually failed to start. + * @QCA_NL80211_VENDOR_SUBCMD_GET_STATION: send BSS Information + * @QCA_NL80211_VENDOR_SUBCMD_SAP_CONDITIONAL_CHAN_SWITCH: After SAP starts + * beaconing, this sub command provides the driver, the frequencies on the + * 5 GHz to check for any radar activity. Driver selects one channel from + * this priority list provided through + * @QCA_WLAN_VENDOR_ATTR_SAP_CONDITIONAL_CHAN_SWITCH_FREQ_LIST and starts + * to check for radar activity on it. If no radar activity is detected + * during the channel availability check period, driver internally switches + * to the selected frequency of operation. If the frequency is zero, driver + * internally selects a channel. The status of this conditional switch is + * indicated through an event using the same sub command through + * @QCA_WLAN_VENDOR_ATTR_SAP_CONDITIONAL_CHAN_SWITCH_STATUS. Attributes are + * listed in qca_wlan_vendor_attr_sap_conditional_chan_switch + * @QCA_NL80211_VENDOR_SUBCMD_LL_STATS_EXT: Command/event used to config + * indication period and threshold for MAC layer counters. + * @QCA_NL80211_VENDOR_SUBCMD_CONFIGURE_TDLS: Configure the TDLS behavior + * in the host driver. The different TDLS configurations are defined + * by the attributes in enum qca_wlan_vendor_attr_tdls_configuration. + * @QCA_NL80211_VENDOR_SUBCMD_GET_HE_CAPABILITIES: Get HE related capabilities + * @QCA_NL80211_VENDOR_SUBCMD_SET_SAR_LIMITS:Set the Specific Absorption Rate + * (SAR) power limits. A critical regulation for FCC compliance, OEMs + * require methods to set SAR limits on TX power of WLAN/WWAN. + * enum qca_vendor_attr_sar_limits attributes are used with this command. + * @QCA_NL80211_VENDOR_SUBCMD_EXTERNAL_ACS: Vendor command used to get/set + * configuration of vendor ACS. + * @QCA_NL80211_VENDOR_SUBCMD_CHIP_PWRSAVE_FAILURE: Vendor event carrying the + * requisite information leading to a power save failure. The information + * carried as part of this event is represented by the + * enum qca_attr_chip_power_save_failure attributes. + * @QCA_NL80211_VENDOR_SUBCMD_NUD_STATS_SET: Start/Stop the NUD statistics + * collection. Uses attributes defined in enum qca_attr_nud_stats_set. + * @QCA_NL80211_VENDOR_SUBCMD_NUD_STATS_GET: Get the NUD statistics. These + * statistics are represented by the enum qca_attr_nud_stats_get + * attributes. + * @QCA_NL80211_VENDOR_SUBCMD_FETCH_BSS_TRANSITION_STATUS: Sub-command to fetch + * the BSS transition status, whether accept or reject, for a list of + * candidate BSSIDs provided by the userspace. This uses the vendor + * attributes QCA_WLAN_VENDOR_ATTR_BTM_MBO_TRANSITION_REASON and + * QCA_WLAN_VENDOR_ATTR_BTM_CANDIDATE_INFO. The userspace shall specify + * the attributes QCA_WLAN_VENDOR_ATTR_BTM_MBO_TRANSITION_REASON and an + * array of QCA_WLAN_VENDOR_ATTR_BTM_CANDIDATE_INFO_BSSID nested in + * QCA_WLAN_VENDOR_ATTR_BTM_CANDIDATE_INFO in the request. In the response + * the driver shall specify array of + * QCA_WLAN_VENDOR_ATTR_BTM_CANDIDATE_INFO_BSSID and + * QCA_WLAN_VENDOR_ATTR_BTM_CANDIDATE_INFO_STATUS pairs nested in + * QCA_WLAN_VENDOR_ATTR_BTM_CANDIDATE_INFO. + * @QCA_NL80211_VENDOR_SUBCMD_SET_TRACE_LEVEL: Set the trace level for a + * specific QCA module. The trace levels are represented by + * enum qca_attr_trace_level attributes. + * @QCA_NL80211_VENDOR_SUBCMD_BRP_SET_ANT_LIMIT: Set the Beam Refinement + * Protocol antenna limit in different modes. See enum + * qca_wlan_vendor_attr_brp_ant_limit_mode. + * @QCA_NL80211_VENDOR_SUBCMD_SPECTRAL_SCAN_START: Start spectral scan. The scan + * parameters are specified by enum qca_wlan_vendor_attr_spectral_scan. + * This returns a cookie (%QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_COOKIE) + * identifying the operation in success case. + * @QCA_NL80211_VENDOR_SUBCMD_SPECTRAL_SCAN_STOP: Stop spectral scan. This uses + * a cookie (%QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_COOKIE) from + * @QCA_NL80211_VENDOR_SUBCMD_SPECTRAL_SCAN_START to identify the scan to + * be stopped. + * @QCA_NL80211_VENDOR_SUBCMD_ACTIVE_TOS: Set the active Type Of Service on the + * specific interface. This can be used to modify some of the low level + * scan parameters (off channel dwell time, home channel time) in the + * driver/firmware. These parameters are maintained within the host + * driver. + * This command is valid only when the interface is in the connected + * state. + * These scan parameters shall be reset by the driver/firmware once + * disconnected. The attributes used with this command are defined in + * enum qca_wlan_vendor_attr_active_tos. + * @QCA_NL80211_VENDOR_SUBCMD_HANG: Event indicating to the user space that the + * driver has detected an internal failure. This event carries the + * information indicating the reason that triggered this detection. The + * attributes for this command are defined in + * enum qca_wlan_vendor_attr_hang. + * @QCA_NL80211_VENDOR_SUBCMD_SPECTRAL_SCAN_GET_CONFIG: Get the current values + * of spectral parameters used. The spectral scan parameters are specified + * by enum qca_wlan_vendor_attr_spectral_scan. + * @QCA_NL80211_VENDOR_SUBCMD_SPECTRAL_SCAN_GET_DIAG_STATS: Get the debug stats + * for spectral scan functionality. The debug stats are specified by + * enum qca_wlan_vendor_attr_spectral_diag_stats. + * @QCA_NL80211_VENDOR_SUBCMD_SPECTRAL_SCAN_GET_CAP_INFO: Get spectral + * scan system capabilities. The capabilities are specified + * by enum qca_wlan_vendor_attr_spectral_cap. + * @QCA_NL80211_VENDOR_SUBCMD_SPECTRAL_SCAN_GET_STATUS: Get the current + * status of spectral scan. The status values are specified + * by enum qca_wlan_vendor_attr_spectral_scan_status. + * @QCA_NL80211_VENDOR_SUBCMD_HTT_STATS: Request the firmware + * DP stats for a particualr stats type for response evnet + * it carries the stats data sent from the FW + * @QCA_NL80211_VENDOR_SUBCMD_GET_RROP_INFO: Get vendor specific Representative + * RF Operating Parameter (RROP) information. The attributes for this + * information are defined in enum qca_wlan_vendor_attr_rrop_info. This is + * intended for use by external Auto Channel Selection applications. + * @QCA_NL80211_VENDOR_SUBCMD_GET_SAR_LIMITS: Get the Specific Absorption Rate + * (SAR) power limits. This is a companion to the command + * @QCA_NL80211_VENDOR_SUBCMD_SET_SAR_LIMITS and is used to retrieve the + * settings currently in use. The attributes returned by this command are + * defined by enum qca_vendor_attr_sar_limits. + * @QCA_NL80211_VENDOR_SUBCMD_WLAN_MAC_INFO: Provides the current behaviour of + * the WLAN hardware MAC's associated with each WLAN netdev interface. + * This works both as a query (user space asks the current mode) or event + * interface (driver advertizing the current mode to the user space). + * Driver does not trigger this event for temporary hardware mode changes. + * Mode changes w.r.t Wi-Fi connection updation ( VIZ creation / deletion, + * channel change etc ) are updated with this event. Attributes for this + * interface are defined in enum qca_wlan_vendor_attr_mac. + * @QCA_NL80211_VENDOR_SUBCMD_SET_QDEPTH_THRESH: Set MSDU queue depth threshold + * per peer per TID. Attributes for this command are define in + * enum qca_wlan_set_qdepth_thresh_attr + * @QCA_NL80211_VENDOR_SUBCMD_WIFI_TEST_CONFIGURATION: Sub command to set WiFi + * test configuration. Attributes for this command are defined in + * enum qca_wlan_vendor_attr_wifi_test_config. + * + * @QCA_NL80211_VENDOR_SUBCMD_NAN_EXT: An extendable version of NAN vendor + * command. The earlier command for NAN, QCA_NL80211_VENDOR_SUBCMD_NAN, + * carried a payload which was a binary blob of data. The command was not + * extendable to send more information. The newer version carries the + * legacy blob encapsulated within an attribute and can be extended with + * additional vendor attributes that can enhance the NAN command + * interface. + * @QCA_NL80211_VENDOR_SUBCMD_GET_FW_STATE: Sub command to get firmware state. + * The returned firmware state is specified in the attribute + * QCA_WLAN_VENDOR_ATTR_FW_STATE. + * @QCA_NL80211_VENDOR_SUBCMD_PEER_STATS_CACHE_FLUSH: This vendor subcommand + * is used by host driver to flush per-peer cached statistics to user space + * application. This interface is used as an event from host driver to + * user space application. Attributes for this event are specified in + * enum qca_wlan_vendor_attr_peer_stats_cache_params. + * QCA_WLAN_VENDOR_ATTR_PEER_STATS_CACHE_DATA attribute is expected to be + * sent as event from host driver. + * @QCA_NL80211_VENDOR_SUBCMD_MPTA_HELPER_CONFIG: This sub command is used to + * improve the success rate of Zigbee joining network. + * Due to PTA master limitation, zigbee joining network success rate is + * low while wlan is working. Wlan host driver need to configure some + * parameters including Zigbee state and specific WLAN periods to enhance + * PTA master. All this parameters are delivered by the NetLink attributes + * defined in "enum qca_mpta_helper_vendor_attr". + */ + +enum qca_nl80211_vendor_subcmds { + QCA_NL80211_VENDOR_SUBCMD_UNSPEC = 0, + QCA_NL80211_VENDOR_SUBCMD_TEST = 1, + QCA_NL80211_VENDOR_SUBCMD_ROAMING = 9, + QCA_NL80211_VENDOR_SUBCMD_AVOID_FREQUENCY = 10, + QCA_NL80211_VENDOR_SUBCMD_DFS_CAPABILITY = 11, + QCA_NL80211_VENDOR_SUBCMD_NAN = 12, + QCA_NL80211_VENDOR_SUBCMD_STATS_EXT = 13, + + QCA_NL80211_VENDOR_SUBCMD_LL_STATS_SET = 14, + QCA_NL80211_VENDOR_SUBCMD_LL_STATS_GET = 15, + QCA_NL80211_VENDOR_SUBCMD_LL_STATS_CLR = 16, + QCA_NL80211_VENDOR_SUBCMD_LL_STATS_RADIO_RESULTS = 17, + QCA_NL80211_VENDOR_SUBCMD_LL_STATS_IFACE_RESULTS = 18, + QCA_NL80211_VENDOR_SUBCMD_LL_STATS_PEERS_RESULTS = 19, + + QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_START = 20, + QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_STOP = 21, + QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_GET_VALID_CHANNELS = 22, + QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_GET_CAPABILITIES = 23, + QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_GET_CACHED_RESULTS = 24, + QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_SCAN_RESULTS_AVAILABLE = 25, + QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_FULL_SCAN_RESULT = 26, + QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_SCAN_EVENT = 27, + QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_HOTLIST_AP_FOUND = 28, + QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_SET_BSSID_HOTLIST = 29, + QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_RESET_BSSID_HOTLIST = 30, + QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_SIGNIFICANT_CHANGE = 31, + QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_SET_SIGNIFICANT_CHANGE = 32, + QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_RESET_SIGNIFICANT_CHANGE = 33, + + QCA_NL80211_VENDOR_SUBCMD_TDLS_ENABLE = 34, + QCA_NL80211_VENDOR_SUBCMD_TDLS_DISABLE = 35, + QCA_NL80211_VENDOR_SUBCMD_TDLS_GET_STATUS = 36, + QCA_NL80211_VENDOR_SUBCMD_TDLS_STATE = 37, + + QCA_NL80211_VENDOR_SUBCMD_GET_SUPPORTED_FEATURES = 38, + + QCA_NL80211_VENDOR_SUBCMD_SCANNING_MAC_OUI = 39, + QCA_NL80211_VENDOR_SUBCMD_NO_DFS_FLAG = 40, + + QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_HOTLIST_AP_LOST = 41, + + /* Get Concurrency Matrix */ + QCA_NL80211_VENDOR_SUBCMD_GET_CONCURRENCY_MATRIX = 42, + + QCA_NL80211_VENDOR_SUBCMD_KEY_MGMT_SET_KEY = 50, + QCA_NL80211_VENDOR_SUBCMD_KEY_MGMT_ROAM_AUTH = 51, + QCA_NL80211_VENDOR_SUBCMD_APFIND = 52, + + /* Deprecated */ + QCA_NL80211_VENDOR_SUBCMD_OCB_SET_SCHED = 53, + + QCA_NL80211_VENDOR_SUBCMD_DO_ACS = 54, + + QCA_NL80211_VENDOR_SUBCMD_GET_FEATURES = 55, + + /* Off loaded DFS events */ + QCA_NL80211_VENDOR_SUBCMD_DFS_OFFLOAD_CAC_STARTED = 56, + QCA_NL80211_VENDOR_SUBCMD_DFS_OFFLOAD_CAC_FINISHED = 57, + QCA_NL80211_VENDOR_SUBCMD_DFS_OFFLOAD_CAC_ABORTED = 58, + QCA_NL80211_VENDOR_SUBCMD_DFS_OFFLOAD_CAC_NOP_FINISHED = 59, + QCA_NL80211_VENDOR_SUBCMD_DFS_OFFLOAD_RADAR_DETECTED = 60, + + QCA_NL80211_VENDOR_SUBCMD_GET_WIFI_INFO = 61, + QCA_NL80211_VENDOR_SUBCMD_WIFI_LOGGER_START = 62, + QCA_NL80211_VENDOR_SUBCMD_WIFI_LOGGER_MEMORY_DUMP = 63, + QCA_NL80211_VENDOR_SUBCMD_ROAM = 64, + QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_SET_SSID_HOTLIST = 65, + QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_RESET_SSID_HOTLIST = 66, + QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_HOTLIST_SSID_FOUND = 67, + QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_HOTLIST_SSID_LOST = 68, + QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_PNO_SET_LIST = 69, + QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_PNO_SET_PASSPOINT_LIST = 70, + QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_PNO_RESET_PASSPOINT_LIST = 71, + QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_PNO_NETWORK_FOUND = 72, + QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_PNO_PASSPOINT_NETWORK_FOUND = 73, + + /* Wi-Fi Configuration subcommands */ + QCA_NL80211_VENDOR_SUBCMD_SET_WIFI_CONFIGURATION = 74, + QCA_NL80211_VENDOR_SUBCMD_GET_WIFI_CONFIGURATION = 75, + QCA_NL80211_VENDOR_SUBCMD_GET_LOGGER_FEATURE_SET = 76, + QCA_NL80211_VENDOR_SUBCMD_GET_RING_DATA = 77, + + QCA_NL80211_VENDOR_SUBCMD_TDLS_GET_CAPABILITIES = 78, + QCA_NL80211_VENDOR_SUBCMD_OFFLOADED_PACKETS = 79, + QCA_NL80211_VENDOR_SUBCMD_MONITOR_RSSI = 80, + QCA_NL80211_VENDOR_SUBCMD_NDP = 81, + + /* NS Offload enable/disable cmd */ + QCA_NL80211_VENDOR_SUBCMD_ND_OFFLOAD = 82, + + QCA_NL80211_VENDOR_SUBCMD_PACKET_FILTER = 83, + QCA_NL80211_VENDOR_SUBCMD_GET_BUS_SIZE = 84, + + QCA_NL80211_VENDOR_SUBCMD_GET_WAKE_REASON_STATS = 85, + + QCA_NL80211_VENDOR_SUBCMD_DATA_OFFLOAD = 91, + /* OCB commands */ + QCA_NL80211_VENDOR_SUBCMD_OCB_SET_CONFIG = 92, + QCA_NL80211_VENDOR_SUBCMD_OCB_SET_UTC_TIME = 93, + QCA_NL80211_VENDOR_SUBCMD_OCB_START_TIMING_ADVERT = 94, + QCA_NL80211_VENDOR_SUBCMD_OCB_STOP_TIMING_ADVERT = 95, + QCA_NL80211_VENDOR_SUBCMD_OCB_GET_TSF_TIMER = 96, + QCA_NL80211_VENDOR_SUBCMD_DCC_GET_STATS = 97, + QCA_NL80211_VENDOR_SUBCMD_DCC_CLEAR_STATS = 98, + QCA_NL80211_VENDOR_SUBCMD_DCC_UPDATE_NDL = 99, + QCA_NL80211_VENDOR_SUBCMD_DCC_STATS_EVENT = 100, + + /* subcommand to get link properties */ + QCA_NL80211_VENDOR_SUBCMD_LINK_PROPERTIES = 101, + /* LFR Subnet Detection */ + QCA_NL80211_VENDOR_SUBCMD_GW_PARAM_CONFIG = 102, + + /* DBS subcommands */ + QCA_NL80211_VENDOR_SUBCMD_GET_PREFERRED_FREQ_LIST = 103, + QCA_NL80211_VENDOR_SUBCMD_SET_PROBABLE_OPER_CHANNEL = 104, + + /* Vendor setband command */ + QCA_NL80211_VENDOR_SUBCMD_SETBAND = 105, + + /* Vendor scan commands */ + QCA_NL80211_VENDOR_SUBCMD_TRIGGER_SCAN = 106, + QCA_NL80211_VENDOR_SUBCMD_SCAN_DONE = 107, + + /* OTA test subcommand */ + QCA_NL80211_VENDOR_SUBCMD_OTA_TEST = 108, + /* Tx power scaling subcommands */ + QCA_NL80211_VENDOR_SUBCMD_SET_TXPOWER_SCALE = 109, + /* Tx power scaling in db subcommands */ + QCA_NL80211_VENDOR_SUBCMD_SET_TXPOWER_SCALE_DECR_DB = 115, + QCA_NL80211_VENDOR_SUBCMD_ACS_POLICY = 116, + QCA_NL80211_VENDOR_SUBCMD_STA_CONNECT_ROAM_POLICY = 117, + QCA_NL80211_VENDOR_SUBCMD_SET_SAP_CONFIG = 118, + QCA_NL80211_VENDOR_SUBCMD_TSF = 119, + QCA_NL80211_VENDOR_SUBCMD_WISA = 120, + QCA_NL80211_VENDOR_SUBCMD_GET_STATION = 121, + QCA_NL80211_VENDOR_SUBCMD_P2P_LISTEN_OFFLOAD_START = 122, + QCA_NL80211_VENDOR_SUBCMD_P2P_LISTEN_OFFLOAD_STOP = 123, + QCA_NL80211_VENDOR_SUBCMD_SAP_CONDITIONAL_CHAN_SWITCH = 124, + QCA_NL80211_VENDOR_SUBCMD_GPIO_CONFIG_COMMAND = 125, + + QCA_NL80211_VENDOR_SUBCMD_GET_HW_CAPABILITY = 126, + QCA_NL80211_VENDOR_SUBCMD_LL_STATS_EXT = 127, + /* FTM/indoor location subcommands */ + QCA_NL80211_VENDOR_SUBCMD_LOC_GET_CAPA = 128, + QCA_NL80211_VENDOR_SUBCMD_FTM_START_SESSION = 129, + QCA_NL80211_VENDOR_SUBCMD_FTM_ABORT_SESSION = 130, + QCA_NL80211_VENDOR_SUBCMD_FTM_MEAS_RESULT = 131, + QCA_NL80211_VENDOR_SUBCMD_FTM_SESSION_DONE = 132, + QCA_NL80211_VENDOR_SUBCMD_FTM_CFG_RESPONDER = 133, + QCA_NL80211_VENDOR_SUBCMD_AOA_MEAS = 134, + QCA_NL80211_VENDOR_SUBCMD_AOA_ABORT_MEAS = 135, + QCA_NL80211_VENDOR_SUBCMD_AOA_MEAS_RESULT = 136, + + /* Encrypt/Decrypt command */ + QCA_NL80211_VENDOR_SUBCMD_ENCRYPTION_TEST = 137, + + QCA_NL80211_VENDOR_SUBCMD_GET_CHAIN_RSSI = 138, + /* DMG low level RF sector operations */ + QCA_NL80211_VENDOR_SUBCMD_DMG_RF_GET_SECTOR_CFG = 139, + QCA_NL80211_VENDOR_SUBCMD_DMG_RF_SET_SECTOR_CFG = 140, + QCA_NL80211_VENDOR_SUBCMD_DMG_RF_GET_SELECTED_SECTOR = 141, + QCA_NL80211_VENDOR_SUBCMD_DMG_RF_SET_SELECTED_SECTOR = 142, + + /* Configure the TDLS mode from user space */ + QCA_NL80211_VENDOR_SUBCMD_CONFIGURE_TDLS = 143, + + QCA_NL80211_VENDOR_SUBCMD_GET_HE_CAPABILITIES = 144, + + /* Vendor abort scan command */ + QCA_NL80211_VENDOR_SUBCMD_ABORT_SCAN = 145, + + /* Set Specific Absorption Rate(SAR) Power Limits */ + QCA_NL80211_VENDOR_SUBCMD_SET_SAR_LIMITS = 146, + + /* External Auto channel configuration setting */ + QCA_NL80211_VENDOR_SUBCMD_EXTERNAL_ACS = 147, + + QCA_NL80211_VENDOR_SUBCMD_CHIP_PWRSAVE_FAILURE = 148, + QCA_NL80211_VENDOR_SUBCMD_NUD_STATS_SET = 149, + QCA_NL80211_VENDOR_SUBCMD_NUD_STATS_GET = 150, + QCA_NL80211_VENDOR_SUBCMD_FETCH_BSS_TRANSITION_STATUS = 151, + + /* Set the trace level for QDF */ + QCA_NL80211_VENDOR_SUBCMD_SET_TRACE_LEVEL = 152, + + QCA_NL80211_VENDOR_SUBCMD_BRP_SET_ANT_LIMIT = 153, + + QCA_NL80211_VENDOR_SUBCMD_SPECTRAL_SCAN_START = 154, + QCA_NL80211_VENDOR_SUBCMD_SPECTRAL_SCAN_STOP = 155, + QCA_NL80211_VENDOR_SUBCMD_ACTIVE_TOS = 156, + QCA_NL80211_VENDOR_SUBCMD_HANG = 157, + QCA_NL80211_VENDOR_SUBCMD_SPECTRAL_SCAN_GET_CONFIG = 158, + QCA_NL80211_VENDOR_SUBCMD_SPECTRAL_SCAN_GET_DIAG_STATS = 159, + QCA_NL80211_VENDOR_SUBCMD_SPECTRAL_SCAN_GET_CAP_INFO = 160, + QCA_NL80211_VENDOR_SUBCMD_SPECTRAL_SCAN_GET_STATUS = 161, + QCA_NL80211_VENDOR_SUBCMD_HTT_STATS = 162, + QCA_NL80211_VENDOR_SUBCMD_GET_RROP_INFO = 163, + QCA_NL80211_VENDOR_SUBCMD_GET_SAR_LIMITS = 164, + QCA_NL80211_VENDOR_SUBCMD_WLAN_MAC_INFO = 165, + QCA_NL80211_VENDOR_SUBCMD_SET_QDEPTH_THRESH = 166, + /* Wi-Fi test configuration subcommand */ + QCA_NL80211_VENDOR_SUBCMD_WIFI_TEST_CONFIGURATION = 169, + QCA_NL80211_VENDOR_SUBCMD_THROUGHPUT_CHANGE_EVENT = 174, + QCA_NL80211_VENDOR_SUBCMD_COEX_CONFIG = 175, + QCA_NL80211_VENDOR_SUBCMD_GET_FW_STATE = 177, + QCA_NL80211_VENDOR_SUBCMD_PEER_STATS_CACHE_FLUSH = 178, + QCA_NL80211_VENDOR_SUBCMD_MPTA_HELPER_CONFIG = 179, +}; + +enum qca_wlan_vendor_tos { + QCA_WLAN_VENDOR_TOS_BK = 0, + QCA_WLAN_VENDOR_TOS_BE = 1, + QCA_WLAN_VENDOR_TOS_VI = 2, + QCA_WLAN_VENDOR_TOS_VO = 3, +}; + +/** + * enum qca_wlan_vendor_attr_active_tos - Used by the vendor command + * QCA_NL80211_VENDOR_SUBCMD_ACTIVE_TOS. + */ +enum qca_wlan_vendor_attr_active_tos { + QCA_WLAN_VENDOR_ATTR_ACTIVE_TOS_INVALID = 0, + /* Type Of Service - Represented by qca_wlan_vendor_tos */ + QCA_WLAN_VENDOR_ATTR_ACTIVE_TOS = 1, + /* Flag attribute representing the start (attribute included) or stop + * (attribute not included) of the respective TOS. + */ + QCA_WLAN_VENDOR_ATTR_ACTIVE_TOS_START = 2, + QCA_WLAN_VENDOR_ATTR_ACTIVE_TOS_MAX = 3, +}; + +enum qca_wlan_vendor_hang_reason { + /* Unspecified reason */ + QCA_WLAN_HANG_REASON_UNSPECIFIED = 0, + /* No Map for the MAC entry for the received frame */ + QCA_WLAN_HANG_RX_HASH_NO_ENTRY_FOUND = 1, + /* peer deletion timeout happened */ + QCA_WLAN_HANG_PEER_DELETION_TIMEDOUT = 2, + /* peer unmap timeout */ + QCA_WLAN_HANG_PEER_UNMAP_TIMEDOUT = 3, + /* Scan request timed out */ + QCA_WLAN_HANG_SCAN_REQ_EXPIRED = 4, + /* Consecutive Scan attempt failures */ + QCA_WLAN_HANG_SCAN_ATTEMPT_FAILURES = 5, + /* Unable to get the message buffer */ + QCA_WLAN_HANG_GET_MSG_BUFF_FAILURE = 6, + /* Current command processing is timedout */ + QCA_WLAN_HANG_ACTIVE_LIST_TIMEOUT = 7, + /* Timeout for an ACK from FW for suspend request */ + QCA_WLAN_HANG_SUSPEND_TIMEOUT = 8, + /* Timeout for an ACK from FW for resume request */ + QCA_WLAN_HANG_RESUME_TIMEOUT = 9, + /* Transmission timeout for consecutive data frames */ + QCA_WLAN_HANG_TRANSMISSIONS_TIMEOUT = 10, + /* Timeout for the TX completion status of data frame */ + QCA_WLAN_HANG_TX_COMPLETE_TIMEOUT = 11, + /* DXE failure for tx/Rx, DXE resource unavailability */ + QCA_WLAN_HANG_DXE_FAILURE = 12, + /* WMI pending commands exceed the maximum count */ + QCA_WLAN_HANG_WMI_EXCEED_MAX_PENDING_CMDS = 13, +}; + +/** + * enum qca_wlan_vendor_attr_hang - Used by the vendor command + * QCA_NL80211_VENDOR_SUBCMD_HANG. + */ +enum qca_wlan_vendor_attr_hang { + QCA_WLAN_VENDOR_ATTR_HANG_INVALID = 0, + /* + * Reason for the Hang - Represented by enum + * qca_wlan_vendor_hang_reason. + */ + QCA_WLAN_VENDOR_ATTR_HANG_REASON = 1, + + QCA_WLAN_VENDOR_ATTR_HANG_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_HANG_MAX = + QCA_WLAN_VENDOR_ATTR_HANG_AFTER_LAST - 1, +}; + +/** + * enum qca_vendor_attr_set_trace_level - Config params for QDF set trace level + * @QCA_WLAN_VENDOR_ATTR_SET_TRACE_LEVEL_INVALID: Invalid trace level + * @QCA_WLAN_VENDOR_ATTR_SET_TRACE_LEVEL_PARAM : Trace level parameters + * @QCA_WLAN_VENDOR_ATTR_SET_TRACE_LEVEL_MODULE_ID : Module of which trace + level needs to be updated. + * @QCA_WLAN_VENDOR_ATTR_SET_TRACE_LEVEL_TRACE_MASK : verbose mask, which need + * to be set. + * @QCA_WLAN_VENDOR_ATTR_SET_TRACE_LEVEL_AFTER_LAST : after last. + * @QCA_WLAN_VENDOR_ATTR_SET_TRACE_LEVEL_MAX : Max attribute. + */ +enum qca_vendor_attr_set_trace_level { + QCA_WLAN_VENDOR_ATTR_SET_TRACE_LEVEL_INVALID = 0, + /* + * Array of QCA_WLAN_VENDOR_ATTR_SET_TRACE_LEVEL_PARAM + * attributes. + */ + QCA_WLAN_VENDOR_ATTR_SET_TRACE_LEVEL_PARAM = 1, + QCA_WLAN_VENDOR_ATTR_SET_TRACE_LEVEL_MODULE_ID = 2, + QCA_WLAN_VENDOR_ATTR_SET_TRACE_LEVEL_TRACE_MASK = 3, + QCA_WLAN_VENDOR_ATTR_SET_TRACE_LEVEL_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_SET_TRACE_LEVEL_MAX = + QCA_WLAN_VENDOR_ATTR_SET_TRACE_LEVEL_AFTER_LAST - 1, +}; + +/** + * enum qca_wlan_vendor_attr_get_station - Sub commands used by + * QCA_NL80211_VENDOR_SUBCMD_GET_STATION to get the corresponding + * station information. The information obtained through these + * commands signify the current info in connected state and + * latest cached information during the connected state , if queried + * when in disconnected state. + * + * @QCA_WLAN_VENDOR_ATTR_GET_STATION_INVALID: Invalid attribute + * @QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO: bss info + * @QCA_WLAN_VENDOR_ATTR_GET_STATION_ASSOC_FAIL_REASON: assoc fail reason + * @QCA_WLAN_VENDOR_ATTR_GET_STATION_REMOTE: remote station info + * @QCA_WLAN_VENDOR_ATTR_GET_STATION_AFTER_LAST: After last + */ +enum qca_wlan_vendor_attr_get_station { + QCA_WLAN_VENDOR_ATTR_GET_STATION_INVALID = 0, + QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO, + QCA_WLAN_VENDOR_ATTR_GET_STATION_ASSOC_FAIL_REASON, + QCA_WLAN_VENDOR_ATTR_GET_STATION_REMOTE, + + /* keep last */ + QCA_WLAN_VENDOR_ATTR_GET_STATION_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_GET_STATION_MAX = + QCA_WLAN_VENDOR_ATTR_GET_STATION_AFTER_LAST - 1, +}; + +/** + * enum qca_wlan_802_11_mode - dot11 mode + * @QCA_WLAN_802_11_MODE_11B: mode B + * @QCA_WLAN_802_11_MODE_11G: mode G + * @QCA_WLAN_802_11_MODE_11N: mode N + * @QCA_WLAN_802_11_MODE_11A: mode A + * @QCA_WLAN_802_11_MODE_11AC: mode AC + * @QCA_WLAN_802_11_MODE_INVALID: Invalid dot11 mode + */ +enum qca_wlan_802_11_mode { + QCA_WLAN_802_11_MODE_11B, + QCA_WLAN_802_11_MODE_11G, + QCA_WLAN_802_11_MODE_11N, + QCA_WLAN_802_11_MODE_11A, + QCA_WLAN_802_11_MODE_11AC, + QCA_WLAN_802_11_MODE_INVALID, +}; + +/** + * enum qca_wlan_auth_type - Authentication key management type + * @QCA_WLAN_AUTH_TYPE_INVALID: Invalid key management type + * @QCA_WLAN_AUTH_TYPE_OPEN: Open key + * @QCA_WLAN_AUTH_TYPE_SHARED: shared key + * @QCA_WLAN_AUTH_TYPE_WPA: wpa key + * @QCA_WLAN_AUTH_TYPE_WPA_PSK: wpa psk key + * @QCA_WLAN_AUTH_TYPE_WPA_NONE: wpa none key + * @QCA_WLAN_AUTH_TYPE_RSN: rsn key + * @QCA_WLAN_AUTH_TYPE_RSN_PSK: rsn psk key + * @QCA_WLAN_AUTH_TYPE_FT: ft key + * @QCA_WLAN_AUTH_TYPE_FT_PSK: ft psk key + * @QCA_WLAN_AUTH_TYPE_SHA256: shared 256 key + * @QCA_WLAN_AUTH_TYPE_SHA256_PSK: shared 256 psk + * @QCA_WLAN_AUTH_TYPE_WAI: wai key + * @QCA_WLAN_AUTH_TYPE_WAI_PSK wai psk key + * @QCA_WLAN_AUTH_TYPE_CCKM_WPA: cckm wpa key + * @QCA_WLAN_AUTH_TYPE_CCKM_RSN: cckm rsn key + */ +enum qca_wlan_auth_type { + QCA_WLAN_AUTH_TYPE_INVALID, + QCA_WLAN_AUTH_TYPE_OPEN, + QCA_WLAN_AUTH_TYPE_SHARED, + QCA_WLAN_AUTH_TYPE_WPA, + QCA_WLAN_AUTH_TYPE_WPA_PSK, + QCA_WLAN_AUTH_TYPE_WPA_NONE, + QCA_WLAN_AUTH_TYPE_RSN, + QCA_WLAN_AUTH_TYPE_RSN_PSK, + QCA_WLAN_AUTH_TYPE_FT, + QCA_WLAN_AUTH_TYPE_FT_PSK, + QCA_WLAN_AUTH_TYPE_SHA256, + QCA_WLAN_AUTH_TYPE_SHA256_PSK, + QCA_WLAN_AUTH_TYPE_WAI, + QCA_WLAN_AUTH_TYPE_WAI_PSK, + QCA_WLAN_AUTH_TYPE_CCKM_WPA, + QCA_WLAN_AUTH_TYPE_CCKM_RSN, + QCA_WLAN_AUTH_TYPE_AUTOSWITCH, +}; + +/** + * enum qca_wlan_vendor_attr_get_station_info - Station Info queried + * through QCA_NL80211_VENDOR_SUBCMD_GET_STATION. + * + * @QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_INVALID: Invalid Attribute + * @QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_LINK_STANDARD_NL80211_ATTR: + * Get the standard NL attributes Nested with this attribute. + * Ex : Query BW , BITRATE32 , NSS , Signal , Noise of the Link - + * NL80211_ATTR_SSID / NL80211_ATTR_SURVEY_INFO (Connected Channel) / + * NL80211_ATTR_STA_INFO + * @QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_AP_STANDARD_NL80211_ATTR: + * Get the standard NL attributes Nested with this attribute. + * Ex : Query HT/VHT Capability advertized by the AP. + * NL80211_ATTR_VHT_CAPABILITY / NL80211_ATTR_HT_CAPABILITY + * @QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_ROAM_COUNT: + * Number of successful Roam attempts before a + * disconnect, Unsigned 32 bit value + * @QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_AKM: + * Authentication Key Management Type used for the connected session. + * Signified by enum qca_wlan_auth_type + * @QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_802_11_MODE: 802.11 Mode of the + * connected Session, signified by enum qca_wlan_802_11_mode + * @QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_AP_HS20_INDICATION: + * HS20 Indication Element + * @QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_ASSOC_FAIL_REASON: + * Status Code Corresponding to the Association Failure. + * Unsigned 32 bit value. + * @QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_REMOTE_MAX_PHY_RATE: + * Max phy rate of remote station + * @QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_REMOTE_TX_PACKETS: + * TX packets to remote station + * @QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_REMOTE_TX_BYTES: + * TX bytes to remote station + * @QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_REMOTE_RX_PACKETS: + * RX packets from remote station + * @QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_REMOTE_RX_BYTES: + * RX bytes from remote station + * @QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_REMOTE_LAST_TX_RATE: + * Last TX rate with remote station + * @QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_REMOTE_LAST_RX_RATE: + * Last RX rate with remote station + * @QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_REMOTE_WMM: + * Remote station enable/disable WMM + * @QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_REMOTE_SUPPORTED_MODE: + * Remote station connection mode + * @QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_REMOTE_AMPDU: + * Remote station AMPDU enable/disable + * @QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_REMOTE_TX_STBC: + * Remote station TX Space-time block coding enable/disable + * @QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_REMOTE_RX_STBC: + * Remote station RX Space-time block coding enable/disable + * @QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_REMOTE_CH_WIDTH: + * Remote station channel width + * @QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_REMOTE_SGI_ENABLE: + * Remote station short GI enable/disable + * @QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_PAD: Attribute type for padding + * @QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_AFTER_LAST: After last + */ +enum qca_wlan_vendor_attr_get_station_info { + QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_INVALID = 0, + QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_LINK_STANDARD_NL80211_ATTR, + QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_AP_STANDARD_NL80211_ATTR, + QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_ROAM_COUNT, + QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_AKM, + QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_802_11_MODE, + QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_AP_HS20_INDICATION, + QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_HT_OPERATION, + QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_VHT_OPERATION, + QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_ASSOC_FAIL_REASON, + QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_REMOTE_MAX_PHY_RATE, + QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_REMOTE_TX_PACKETS, + QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_REMOTE_TX_BYTES, + QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_REMOTE_RX_PACKETS, + QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_REMOTE_RX_BYTES, + QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_REMOTE_LAST_TX_RATE, + QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_REMOTE_LAST_RX_RATE, + QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_REMOTE_WMM, + QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_REMOTE_SUPPORTED_MODE, + QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_REMOTE_AMPDU, + QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_REMOTE_TX_STBC, + QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_REMOTE_RX_STBC, + QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_REMOTE_CH_WIDTH, + QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_REMOTE_SGI_ENABLE, +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0)) + QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_PAD, +#endif + /* keep last */ + QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_MAX = + QCA_WLAN_VENDOR_ATTR_GET_STATION_INFO_AFTER_LAST - 1, +}; + +/** + * enum qca_nl80211_vendor_subcmds_index - vendor sub commands index + * + * @QCA_NL80211_VENDOR_SUBCMD_AVOID_FREQUENCY_INDEX: Avoid frequency + * @QCA_NL80211_VENDOR_SUBCMD_NAN_INDEX: Nan + * @QCA_NL80211_VENDOR_SUBCMD_STATS_EXT_INDEX: Ext stats + * @QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_START_INDEX: Ext scan start + * @QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_STOP_INDEX: Ext scan stop + * @QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_GET_CAPABILITIES_INDEX: Ext scan get + * capability + * @QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_GET_CACHED_RESULTS_INDEX: Ext scan get + * cached results + * @QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_SCAN_RESULTS_AVAILABLE_INDEX: Ext scan + * results available + * @QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_FULL_SCAN_RESULT_INDEX: Ext scan full + * scan result + * @QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_SCAN_EVENT_INDEX: Ext scan event + * @QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_HOTLIST_AP_FOUND_INDEX: Ext scan hot list + * AP found + * @QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_SET_BSSID_HOTLIST_INDEX: Ext scan set + * bssid hotlist + * @QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_RESET_BSSID_HOTLIST_INDEX: Ext scan reset + * bssid hotlist + * @QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_SIGNIFICANT_CHANGE_INDEX: Ext scan + * significant change + * @QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_SET_SIGNIFICANT_CHANGE_INDEX: Ext scan + * set significant change + * @QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_RESET_SIGNIFICANT_CHANGE_INDEX: Ext scan + * reset significant change + * @QCA_NL80211_VENDOR_SUBCMD_LL_STATS_SET_INDEX: Set stats + * @QCA_NL80211_VENDOR_SUBCMD_LL_STATS_GET_INDEX: Get stats + * @QCA_NL80211_VENDOR_SUBCMD_LL_STATS_CLR_INDEX: Clear stats + * @QCA_NL80211_VENDOR_SUBCMD_LL_RADIO_STATS_INDEX: Radio stats + * @QCA_NL80211_VENDOR_SUBCMD_LL_IFACE_STATS_INDEX: Iface stats + * @QCA_NL80211_VENDOR_SUBCMD_LL_PEER_INFO_STATS_INDEX: Peer info stats + * @QCA_NL80211_VENDOR_SUBCMD_LL_STATS_EXT_INDEX: MAC layer counters + * @QCA_NL80211_VENDOR_SUBCMD_TDLS_STATE_CHANGE_INDEX: Ext tdls state change + * @QCA_NL80211_VENDOR_SUBCMD_DO_ACS_INDEX: ACS command + * @QCA_NL80211_VENDOR_SUBCMD_KEY_MGMT_ROAM_AUTH_INDEX: Pass Roam and Auth info + * @QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_HOTLIST_AP_LOST_INDEX: hotlist ap lost + * @QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_PNO_NETWORK_FOUND_INDEX: + * pno network found index + * @QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_PNO_PASSPOINT_NETWORK_FOUND_INDEX: + * passpoint match found index + * @QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_SET_SSID_HOTLIST_INDEX: + * set ssid hotlist index + * @QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_RESET_SSID_HOTLIST_INDEX: + * reset ssid hotlist index + * @QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_HOTLIST_SSID_FOUND_INDEX: + * hotlist ssid found index + * @QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_HOTLIST_SSID_LOST_INDEX: + * hotlist ssid lost index + * @QCA_NL80211_VENDOR_SUBCMD_DCC_STATS_EVENT_INDEX + * dcc stats event index + * @QCA_NL80211_VENDOR_SUBCMD_SCAN_INDEX: vendor scan index + * @QCA_NL80211_VENDOR_SUBCMD_SCAN_DONE_INDEX: + * vendor scan complete event index + * @QCA_NL80211_VENDOR_SUBCMD_GW_PARAM_CONFIG_INDEX: + * update gateway parameters index + * @QCA_NL80211_VENDOR_SUBCMD_TSF_INDEX: TSF response events index + * @QCA_NL80211_VENDOR_SUBCMD_P2P_LO_EVENT_INDEX: + * P2P listen offload index + * @QCA_NL80211_VENDOR_SUBCMD_SAP_CONDITIONAL_CHAN_SWITCH_INDEX: SAP + * conditional channel switch index + * @QCA_NL80211_VENDOR_SUBCMD_NUD_STATS_GET_INDEX: NUD DEBUG Stats index + * @QCA_NL80211_VENDOR_SUBCMD_HANG_REASON_INDEX: hang event reason index + * @QCA_NL80211_VENDOR_SUBCMD_WLAN_MAC_INFO_INDEX: MAC mode info index + */ + +enum qca_nl80211_vendor_subcmds_index { + QCA_NL80211_VENDOR_SUBCMD_AVOID_FREQUENCY_INDEX = 0, + +#ifdef WLAN_FEATURE_NAN + QCA_NL80211_VENDOR_SUBCMD_NAN_INDEX, +#endif /* WLAN_FEATURE_NAN */ + +#ifdef WLAN_FEATURE_STATS_EXT + QCA_NL80211_VENDOR_SUBCMD_STATS_EXT_INDEX, +#endif /* WLAN_FEATURE_STATS_EXT */ + +#ifdef FEATURE_WLAN_EXTSCAN + QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_START_INDEX, + QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_STOP_INDEX, + QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_GET_CAPABILITIES_INDEX, + QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_GET_CACHED_RESULTS_INDEX, + QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_SCAN_RESULTS_AVAILABLE_INDEX, + QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_FULL_SCAN_RESULT_INDEX, + QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_SCAN_EVENT_INDEX, + QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_HOTLIST_AP_FOUND_INDEX, + QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_SET_BSSID_HOTLIST_INDEX, + QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_RESET_BSSID_HOTLIST_INDEX, + QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_SIGNIFICANT_CHANGE_INDEX, + QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_SET_SIGNIFICANT_CHANGE_INDEX, + QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_RESET_SIGNIFICANT_CHANGE_INDEX, +#endif /* FEATURE_WLAN_EXTSCAN */ + +#ifdef WLAN_FEATURE_LINK_LAYER_STATS + QCA_NL80211_VENDOR_SUBCMD_LL_STATS_SET_INDEX, + QCA_NL80211_VENDOR_SUBCMD_LL_STATS_GET_INDEX, + QCA_NL80211_VENDOR_SUBCMD_LL_STATS_CLR_INDEX, + QCA_NL80211_VENDOR_SUBCMD_LL_RADIO_STATS_INDEX, + QCA_NL80211_VENDOR_SUBCMD_LL_IFACE_STATS_INDEX, + QCA_NL80211_VENDOR_SUBCMD_LL_PEER_INFO_STATS_INDEX, + QCA_NL80211_VENDOR_SUBCMD_LL_STATS_EXT_INDEX, +#endif /* WLAN_FEATURE_LINK_LAYER_STATS */ + + QCA_NL80211_VENDOR_SUBCMD_TDLS_STATE_CHANGE_INDEX, + QCA_NL80211_VENDOR_SUBCMD_DO_ACS_INDEX, +#ifdef WLAN_FEATURE_ROAM_OFFLOAD + QCA_NL80211_VENDOR_SUBCMD_KEY_MGMT_ROAM_AUTH_INDEX, +#endif + /* DFS */ + QCA_NL80211_VENDOR_SUBCMD_DFS_OFFLOAD_CAC_STARTED_INDEX, + QCA_NL80211_VENDOR_SUBCMD_DFS_OFFLOAD_CAC_FINISHED_INDEX, + QCA_NL80211_VENDOR_SUBCMD_DFS_OFFLOAD_CAC_ABORTED_INDEX, + QCA_NL80211_VENDOR_SUBCMD_DFS_OFFLOAD_CAC_NOP_FINISHED_INDEX, + QCA_NL80211_VENDOR_SUBCMD_DFS_OFFLOAD_RADAR_DETECTED_INDEX, +#ifdef FEATURE_WLAN_EXTSCAN + QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_HOTLIST_AP_LOST_INDEX, + QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_PNO_NETWORK_FOUND_INDEX, + QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_PNO_PASSPOINT_NETWORK_FOUND_INDEX, + QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_SET_SSID_HOTLIST_INDEX, + QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_RESET_SSID_HOTLIST_INDEX, + QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_HOTLIST_SSID_FOUND_INDEX, + QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_HOTLIST_SSID_LOST_INDEX, +#endif /* FEATURE_WLAN_EXTSCAN */ + QCA_NL80211_VENDOR_SUBCMD_GET_WIFI_CONFIGURATION_INDEX, + QCA_NL80211_VENDOR_SUBCMD_MONITOR_RSSI_INDEX, +#ifdef WLAN_FEATURE_MEMDUMP + QCA_NL80211_VENDOR_SUBCMD_WIFI_LOGGER_MEMORY_DUMP_INDEX, +#endif /* WLAN_FEATURE_MEMDUMP */ + /* OCB events */ + QCA_NL80211_VENDOR_SUBCMD_DCC_STATS_EVENT_INDEX, + QCA_NL80211_VENDOR_SUBCMD_SCAN_INDEX, + QCA_NL80211_VENDOR_SUBCMD_SCAN_DONE_INDEX, + QCA_NL80211_VENDOR_SUBCMD_GW_PARAM_CONFIG_INDEX, +#ifdef WLAN_FEATURE_TSF + QCA_NL80211_VENDOR_SUBCMD_TSF_INDEX, +#endif +#ifdef WLAN_FEATURE_NAN_DATAPATH + QCA_NL80211_VENDOR_SUBCMD_NDP_INDEX, +#endif /* WLAN_FEATURE_NAN_DATAPATH */ + QCA_NL80211_VENDOR_SUBCMD_P2P_LO_EVENT_INDEX, + QCA_NL80211_VENDOR_SUBCMD_SAP_CONDITIONAL_CHAN_SWITCH_INDEX, + QCA_NL80211_VENDOR_SUBCMD_UPDATE_EXTERNAL_ACS_CONFIG, + QCA_NL80211_VENDOR_SUBCMD_PWR_SAVE_FAIL_DETECTED_INDEX, + QCA_NL80211_VENDOR_SUBCMD_NUD_STATS_GET_INDEX, + QCA_NL80211_VENDOR_SUBCMD_HANG_REASON_INDEX, + QCA_NL80211_VENDOR_SUBCMD_HTT_STATS_INDEX, + QCA_NL80211_VENDOR_SUBCMD_WLAN_MAC_INFO_INDEX, + QCA_NL80211_VENDOR_SUBCMD_THROUGHPUT_CHANGE_EVENT_INDEX, +}; + +/** + * enum qca_wlan_vendor_attr_tdls_enable - TDLS enable attribute + * + * @QCA_WLAN_VENDOR_ATTR_TDLS_ENABLE_INVALID: Invalid initial value + * @QCA_WLAN_VENDOR_ATTR_TDLS_ENABLE_MAC_ADDR: An array of 6 x Unsigned 8-bit + * value + * @QCA_WLAN_VENDOR_ATTR_TDLS_ENABLE_CHANNEL: Signed 32-bit value, but lets + * keep as unsigned for now + * @QCA_WLAN_VENDOR_ATTR_TDLS_ENABLE_GLOBAL_OPERATING_CLASS: operating class + * @QCA_WLAN_VENDOR_ATTR_TDLS_ENABLE_MAX_LATENCY_MS: Enable max latency in ms + * @QCA_WLAN_VENDOR_ATTR_TDLS_ENABLE_MIN_BANDWIDTH_KBPS: Enable min bandwidth + * in KBPS + * @QCA_WLAN_VENDOR_ATTR_TDLS_ENABLE_AFTER_LAST: After last + * @QCA_WLAN_VENDOR_ATTR_TDLS_ENABLE_MAX: Max value + */ +enum qca_wlan_vendor_attr_tdls_enable { + QCA_WLAN_VENDOR_ATTR_TDLS_ENABLE_INVALID = 0, + QCA_WLAN_VENDOR_ATTR_TDLS_ENABLE_MAC_ADDR, + QCA_WLAN_VENDOR_ATTR_TDLS_ENABLE_CHANNEL, + QCA_WLAN_VENDOR_ATTR_TDLS_ENABLE_GLOBAL_OPERATING_CLASS, + QCA_WLAN_VENDOR_ATTR_TDLS_ENABLE_MAX_LATENCY_MS, + QCA_WLAN_VENDOR_ATTR_TDLS_ENABLE_MIN_BANDWIDTH_KBPS, + QCA_WLAN_VENDOR_ATTR_TDLS_ENABLE_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_TDLS_ENABLE_MAX = + QCA_WLAN_VENDOR_ATTR_TDLS_ENABLE_AFTER_LAST - 1, +}; + +/** + * enum qca_wlan_vendor_attr_tdls_disable: tdls disable attribute + * + * @QCA_WLAN_VENDOR_ATTR_TDLS_DISABLE_INVALID: Invalid initial value + * @QCA_WLAN_VENDOR_ATTR_TDLS_DISABLE_MAC_ADDR: An array of 6 x Unsigned + * 8-bit value + * @QCA_WLAN_VENDOR_ATTR_TDLS_DISABLE_AFTER_LAST: After last + * @QCA_WLAN_VENDOR_ATTR_TDLS_DISABLE_MAX: Max value + */ +enum qca_wlan_vendor_attr_tdls_disable { + QCA_WLAN_VENDOR_ATTR_TDLS_DISABLE_INVALID = 0, + QCA_WLAN_VENDOR_ATTR_TDLS_DISABLE_MAC_ADDR, + QCA_WLAN_VENDOR_ATTR_TDLS_DISABLE_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_TDLS_DISABLE_MAX = + QCA_WLAN_VENDOR_ATTR_TDLS_DISABLE_AFTER_LAST - 1, +}; + +/** + * qca_chip_power_save_failure_reason: Power save failure reason + * @QCA_CHIP_POWER_SAVE_FAILURE_REASON_PROTOCOL: Indicates power save failure + * due to protocol/module. + * @QCA_CHIP_POWER_SAVE_FAILURE_REASON_HARDWARE: power save failure + * due to hardware + */ +enum qca_chip_power_save_failure_reason { + QCA_CHIP_POWER_SAVE_FAILURE_REASON_PROTOCOL = 0, + QCA_CHIP_POWER_SAVE_FAILURE_REASON_HARDWARE = 1, +}; + +/** + * qca_attr_chip_power_save_failure: attributes to vendor subcmd + * @QCA_NL80211_VENDOR_SUBCMD_CHIP_PWRSAVE_FAILURE. This carry the requisite + * information leading to the power save failure. + * @QCA_ATTR_CHIP_POWER_SAVE_FAILURE_INVALID : invalid + * @QCA_ATTR_CHIP_POWER_SAVE_FAILURE_REASON : power save failure reason + * represented by enum qca_chip_power_save_failure_reason + * @QCA_ATTR_CHIP_POWER_SAVE_FAILURE_LAST : Last + * @QCA_ATTR_CHIP_POWER_SAVE_FAILURE_MAX : Max value + */ +enum qca_attr_chip_power_save_failure { + QCA_ATTR_CHIP_POWER_SAVE_FAILURE_INVALID = 0, + + QCA_ATTR_CHIP_POWER_SAVE_FAILURE_REASON = 1, + + /* keep last */ + QCA_ATTR_CHIP_POWER_SAVE_FAILURE_LAST, + QCA_ATTR_CHIP_POWER_SAVE_FAILURE_MAX = + QCA_ATTR_CHIP_POWER_SAVE_FAILURE_LAST - 1, +}; + + +/** + * enum qca_wlan_vendor_attr_tdls_get_status - tdls get status attribute + * + * @QCA_WLAN_VENDOR_ATTR_TDLS_GET_STATUS_INVALID: Invalid initial value + * @QCA_WLAN_VENDOR_ATTR_TDLS_GET_STATUS_MAC_ADDR: An array of 6 x Unsigned + * 8-bit value + * @QCA_WLAN_VENDOR_ATTR_TDLS_GET_STATUS_STATE: get status state, + * unsigned 32-bit value + * @QCA_WLAN_VENDOR_ATTR_TDLS_GET_STATUS_REASON: get status reason + * @QCA_WLAN_VENDOR_ATTR_TDLS_GET_STATUS_CHANNEL: get status channel, + * unsigned 32-bit value + * @QCA_WLAN_VENDOR_ATTR_TDLS_GET_STATUS_GLOBAL_OPERATING_CLASS: get operating + * class, unsigned 32-bit value + * @QCA_WLAN_VENDOR_ATTR_TDLS_GET_STATUS_AFTER_LAST: After last + * @QCA_WLAN_VENDOR_ATTR_TDLS_GET_STATUS_MAX: Max value + */ +enum qca_wlan_vendor_attr_tdls_get_status { + QCA_WLAN_VENDOR_ATTR_TDLS_GET_STATUS_INVALID = 0, + QCA_WLAN_VENDOR_ATTR_TDLS_GET_STATUS_MAC_ADDR, + QCA_WLAN_VENDOR_ATTR_TDLS_GET_STATUS_STATE, + QCA_WLAN_VENDOR_ATTR_TDLS_GET_STATUS_REASON, + QCA_WLAN_VENDOR_ATTR_TDLS_GET_STATUS_CHANNEL, + QCA_WLAN_VENDOR_ATTR_TDLS_GET_STATUS_GLOBAL_OPERATING_CLASS, + QCA_WLAN_VENDOR_ATTR_TDLS_GET_STATUS_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_TDLS_GET_STATUS_MAX = + QCA_WLAN_VENDOR_ATTR_TDLS_GET_STATUS_AFTER_LAST - 1, +}; + +/** + * enum qca_wlan_vendor_attr_tdls_state - tdls state attribute + * + * @QCA_WLAN_VENDOR_ATTR_TDLS_STATE_INVALID: Initial invalid value + * @QCA_WLAN_VENDOR_ATTR_TDLS_STATE_MAC_ADDR: An array of 6 x Unsigned + * 8-bit value + * @QCA_WLAN_VENDOR_ATTR_TDLS_NEW_STATE: TDLS new state, + * unsigned 32-bit value + * @QCA_WLAN_VENDOR_ATTR_TDLS_STATE_REASON: TDLS state reason + * @QCA_WLAN_VENDOR_ATTR_TDLS_STATE_CHANNEL: TDLS state channel, + * unsigned 32-bit value + * @QCA_WLAN_VENDOR_ATTR_TDLS_STATE_GLOBAL_OPERATING_CLASS: TDLS state + * operating class, unsigned 32-bit value + * @QCA_WLAN_VENDOR_ATTR_TDLS_STATE_AFTER_LAST: After last + * @QCA_WLAN_VENDOR_ATTR_TDLS_STATE_MAX: Max value + */ +enum qca_wlan_vendor_attr_tdls_state { + QCA_WLAN_VENDOR_ATTR_TDLS_STATE_INVALID = 0, + QCA_WLAN_VENDOR_ATTR_TDLS_STATE_MAC_ADDR, + QCA_WLAN_VENDOR_ATTR_TDLS_NEW_STATE, + QCA_WLAN_VENDOR_ATTR_TDLS_STATE_REASON, + QCA_WLAN_VENDOR_ATTR_TDLS_STATE_CHANNEL, + QCA_WLAN_VENDOR_ATTR_TDLS_STATE_GLOBAL_OPERATING_CLASS, + QCA_WLAN_VENDOR_ATTR_TDLS_STATE_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_TDLS_STATE_MAX = + QCA_WLAN_VENDOR_ATTR_TDLS_STATE_AFTER_LAST - 1, +}; + +/* enum's to provide TDLS capabilities */ +enum qca_wlan_vendor_attr_get_tdls_capabilities { + QCA_WLAN_VENDOR_ATTR_TDLS_GET_CAPS_INVALID = 0, + QCA_WLAN_VENDOR_ATTR_TDLS_GET_CAPS_MAX_CONC_SESSIONS = 1, + QCA_WLAN_VENDOR_ATTR_TDLS_GET_CAPS_FEATURES_SUPPORTED = 2, + + /* keep last */ + QCA_WLAN_VENDOR_ATTR_TDLS_GET_CAPS_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_TDLS_GET_CAPS_MAX = + QCA_WLAN_VENDOR_ATTR_TDLS_GET_CAPS_AFTER_LAST - 1, +}; + +enum qca_wlan_vendor_attr { + QCA_WLAN_VENDOR_ATTR_INVALID = 0, + /* used by QCA_NL80211_VENDOR_SUBCMD_DFS_CAPABILITY */ + QCA_WLAN_VENDOR_ATTR_DFS = 1, + /* used by QCA_NL80211_VENDOR_SUBCMD_NAN */ + QCA_WLAN_VENDOR_ATTR_NAN = 2, + /* used by QCA_NL80211_VENDOR_SUBCMD_STATS_EXT */ + QCA_WLAN_VENDOR_ATTR_STATS_EXT = 3, + /* used by QCA_NL80211_VENDOR_SUBCMD_STATS_EXT */ + QCA_WLAN_VENDOR_ATTR_IFINDEX = 4, + /* + * used by QCA_NL80211_VENDOR_SUBCMD_ROAMING, u32 with values defined + * by enum qca_roaming_policy. + */ + QCA_WLAN_VENDOR_ATTR_ROAMING_POLICY = 5, + QCA_WLAN_VENDOR_ATTR_MAC_ADDR = 6, + /* used by QCA_NL80211_VENDOR_SUBCMD_GET_FEATURES */ + QCA_WLAN_VENDOR_ATTR_FEATURE_FLAGS = 7, + QCA_WLAN_VENDOR_ATTR_TEST = 8, + /* + * used by QCA_NL80211_VENDOR_SUBCMD_GET_FEATURES + * Unsigned 32-bit value. + */ + QCA_WLAN_VENDOR_ATTR_CONCURRENCY_CAPA = 9, + /* Unsigned 32-bit value */ + QCA_WLAN_VENDOR_ATTR_MAX_CONCURRENT_CHANNELS_2_4_BAND = 10, + /* Unsigned 32-bit value */ + QCA_WLAN_VENDOR_ATTR_MAX_CONCURRENT_CHANNELS_5_0_BAND = 11, + /* Unsigned 32-bit value from enum qca_set_band. */ + QCA_WLAN_VENDOR_ATTR_SETBAND_VALUE = 12, + /* Dummy (NOP) attribute for 64 bit padding */ + QCA_WLAN_VENDOR_ATTR_PAD = 13, + /* + * Unique FTM session cookie (Unsigned 64 bit). Specified in + * QCA_NL80211_VENDOR_SUBCMD_FTM_START_SESSION. Reported in + * the session in QCA_NL80211_VENDOR_SUBCMD_FTM_MEAS_RESULT and + * QCA_NL80211_VENDOR_SUBCMD_FTM_SESSION_DONE. + */ + QCA_WLAN_VENDOR_ATTR_FTM_SESSION_COOKIE = 14, + /* + * Indoor location capabilities, returned by + * QCA_NL80211_VENDOR_SUBCMD_LOC_GET_CAPA. + * see enum qca_wlan_vendor_attr_loc_capa. + */ + QCA_WLAN_VENDOR_ATTR_LOC_CAPA = 15, + /* + * Array of nested attributes containing information about each peer + * in FTM measurement session. See enum qca_wlan_vendor_attr_peer_info + * for supported attributes for each peer. + */ + QCA_WLAN_VENDOR_ATTR_FTM_MEAS_PEERS = 16, + /* + * Array of nested attributes containing measurement results for + * one or more peers, reported by the + * QCA_NL80211_VENDOR_SUBCMD_FTM_MEAS_RESULT event. + * See enum qca_wlan_vendor_attr_peer_result for list of supported + * attributes. + */ + QCA_WLAN_VENDOR_ATTR_FTM_MEAS_PEER_RESULTS = 17, + /* Flag attribute for enabling or disabling responder functionality. */ + QCA_WLAN_VENDOR_ATTR_FTM_RESPONDER_ENABLE = 18, + /* + * Used in the QCA_NL80211_VENDOR_SUBCMD_FTM_CFG_RESPONDER + * command to specify the LCI report that will be sent by + * the responder during a measurement exchange. The format is + * defined in IEEE P802.11-REVmc/D7.0, 9.4.2.22.10. + */ + QCA_WLAN_VENDOR_ATTR_FTM_LCI = 19, + /* + * Used in the QCA_NL80211_VENDOR_SUBCMD_FTM_CFG_RESPONDER + * command to specify the location civic report that will + * be sent by the responder during a measurement exchange. + * The format is defined in IEEE P802.11-REVmc/D7.0, 9.4.2.22.13. + */ + QCA_WLAN_VENDOR_ATTR_FTM_LCR = 20, + /* + * Session/measurement completion status code, + * reported in QCA_NL80211_VENDOR_SUBCMD_FTM_SESSION_DONE and + * QCA_NL80211_VENDOR_SUBCMD_AOA_MEAS_RESULT + * see enum qca_vendor_attr_loc_session_status. + */ + QCA_WLAN_VENDOR_ATTR_LOC_SESSION_STATUS = 21, + /* + * Initial dialog token used by responder (0 if not specified), + * unsigned 8 bit value. + */ + QCA_WLAN_VENDOR_ATTR_FTM_INITIAL_TOKEN = 22, + /* + * AOA measurement type. Requested in QCA_NL80211_VENDOR_SUBCMD_AOA_MEAS + * and optionally in QCA_NL80211_VENDOR_SUBCMD_FTM_START_SESSION if + * AOA measurements are needed as part of an FTM session. + * Reported by QCA_NL80211_VENDOR_SUBCMD_AOA_MEAS_RESULT. See + * enum qca_wlan_vendor_attr_aoa_type. + */ + QCA_WLAN_VENDOR_ATTR_AOA_TYPE = 23, + /* + * A bit mask (unsigned 32 bit value) of antenna arrays used + * by indoor location measurements. Refers to the antenna + * arrays described by QCA_VENDOR_ATTR_LOC_CAPA_ANTENNA_ARRAYS. + */ + QCA_WLAN_VENDOR_ATTR_LOC_ANTENNA_ARRAY_MASK = 24, + /* + * AOA measurement data. Its contents depends on the AOA measurement + * type and antenna array mask: + * QCA_WLAN_VENDOR_ATTR_AOA_TYPE_TOP_CIR_PHASE: array of U16 values, + * phase of the strongest CIR path for each antenna in the measured + * array(s). + * QCA_WLAN_VENDOR_ATTR_AOA_TYPE_TOP_CIR_PHASE_AMP: array of 2 U16 + * values, phase and amplitude of the strongest CIR path for each + * antenna in the measured array(s). + */ + QCA_WLAN_VENDOR_ATTR_AOA_MEAS_RESULT = 25, + /* + * Used in QCA_NL80211_VENDOR_SUBCMD_GET_CHAIN_RSSI command + * to specify the chain number (unsigned 32 bit value) to inquire + * the corresponding antenna RSSI value */ + QCA_WLAN_VENDOR_ATTR_CHAIN_INDEX = 26, + /* + * Used in QCA_NL80211_VENDOR_SUBCMD_GET_CHAIN_RSSI command + * to report the specific antenna RSSI value (unsigned 32 bit value) */ + QCA_WLAN_VENDOR_ATTR_CHAIN_RSSI = 27, + /* Frequency in MHz, various uses. Unsigned 32 bit value */ + QCA_WLAN_VENDOR_ATTR_FREQ = 28, + /* + * TSF timer value, unsigned 64 bit value. + * May be returned by various commands. + */ + QCA_WLAN_VENDOR_ATTR_TSF = 29, + /* + * DMG RF sector index, unsigned 16 bit number. Valid values are + * 0..127 for sector indices or 65535 as special value used to + * unlock sector selection in + * QCA_NL80211_VENDOR_SUBCMD_DMG_RF_SET_SELECTED_SECTOR. + */ + QCA_WLAN_VENDOR_ATTR_DMG_RF_SECTOR_INDEX = 30, + /* + * DMG RF sector type, unsigned 8 bit value. One of the values + * in enum qca_wlan_vendor_attr_dmg_rf_sector_type. + */ + QCA_WLAN_VENDOR_ATTR_DMG_RF_SECTOR_TYPE = 31, + /* + * Bitmask of DMG RF modules for which information is requested. Each + * bit corresponds to an RF module with the same index as the bit + * number. Unsigned 32 bit number but only low 8 bits can be set since + * all DMG chips currently have up to 8 RF modules. + */ + QCA_WLAN_VENDOR_ATTR_DMG_RF_MODULE_MASK = 32, + /* + * Array of nested attributes where each entry is DMG RF sector + * configuration for a single RF module. + * Attributes for each entry are taken from enum + * qca_wlan_vendor_attr_dmg_rf_sector_cfg. + * Specified in QCA_NL80211_VENDOR_SUBCMD_DMG_RF_SET_SECTOR_CFG + * and returned by QCA_NL80211_VENDOR_SUBCMD_DMG_RF_GET_SECTOR_CFG. + */ + QCA_WLAN_VENDOR_ATTR_DMG_RF_SECTOR_CFG = 33, + /* + * Used in QCA_NL80211_VENDOR_SUBCMD_STATS_EXT command + * to report frame aggregation statistics to userspace. + */ + QCA_WLAN_VENDOR_ATTR_RX_AGGREGATION_STATS_HOLES_NUM = 34, + QCA_WLAN_VENDOR_ATTR_RX_AGGREGATION_STATS_HOLES_INFO = 35, + /* + * Unsigned 8-bit value representing MBO transition reason code as + * provided by the AP used by subcommand + * QCA_NL80211_VENDOR_SUBCMD_FETCH_BSS_TRANSITION_STATUS. This is + * specified by the userspace in the request to the driver. + */ + QCA_WLAN_VENDOR_ATTR_BTM_MBO_TRANSITION_REASON = 36, + /* + * Array of nested attributes, BSSID and status code, used by subcommand + * QCA_NL80211_VENDOR_SUBCMD_FETCH_BSS_TRANSITION_STATUS, where each + * entry is taken from enum qca_wlan_vendor_attr_btm_candidate_info. + * The userspace space specifies the list/array of candidate BSSIDs in + * the order of preference in the request. The driver specifies the + * status code, for each BSSID in the list, in the response. The + * acceptable candidates are listed in the order preferred by the + * driver. + */ + QCA_WLAN_VENDOR_ATTR_BTM_CANDIDATE_INFO = 37, + /* + * Used in QCA_NL80211_VENDOR_SUBCMD_BRP_SET_ANT_LIMIT command + * See enum qca_wlan_vendor_attr_brp_ant_limit_mode. + */ + QCA_WLAN_VENDOR_ATTR_BRP_ANT_LIMIT_MODE = 38, + /* + * Used in QCA_NL80211_VENDOR_SUBCMD_BRP_SET_ANT_LIMIT command + * to define the number of antennas to use for BRP. + * different purpose in each ANT_LIMIT_MODE: + * DISABLE - ignored + * EFFECTIVE - upper limit to number of antennas to be used + * FORCE - exact number of antennas to be used + * unsigned 8 bit value + */ + QCA_WLAN_VENDOR_ATTR_BRP_ANT_NUM_LIMIT = 39, + /* + * Used in QCA_NL80211_VENDOR_SUBCMD_GET_CHAIN_RSSI command + * to report the corresponding antenna index to the chain RSSI value + */ + QCA_WLAN_VENDOR_ATTR_ANTENNA_INFO = 40, + /* + * Used in QCA_NL80211_VENDOR_SUBCMD_GET_CHAIN_RSSI command + * to report the specific antenna EVM value (unsigned 32 bit value). + * With a determinate group of antennas, the driver specifies the + * EVM value for each antenna ID, and application extract them + * in user space. + */ + QCA_WLAN_VENDOR_ATTR_CHAIN_EVM = 41, + + /* keep last */ + QCA_WLAN_VENDOR_ATTR_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_MAX = QCA_WLAN_VENDOR_ATTR_AFTER_LAST - 1 +}; + +#ifdef FEATURE_WLAN_EXTSCAN +enum qca_wlan_vendor_attr_extscan_config_params { + QCA_WLAN_VENDOR_ATTR_EXTSCAN_SUBCMD_CONFIG_PARAM_INVALID = 0, + + /* Unsigned 32-bit value */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_SUBCMD_CONFIG_PARAM_REQUEST_ID = 1, + + /* + * Attributes for data used by + * QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_GET_VALID_CHANNELS sub command. + */ + + /* Unsigned 32-bit value */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_GET_VALID_CHANNELS_CONFIG_PARAM_WIFI_BAND + = 2, + /* Unsigned 32-bit value */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_GET_VALID_CHANNELS_CONFIG_PARAM_MAX_CHANNELS + = 3, + + /* + * Attributes for input params used by + * QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_START sub command. + */ + + /* Unsigned 32-bit value; channel frequency */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_CHANNEL_SPEC_CHANNEL = 4, + /* Unsigned 32-bit value; dwell time in ms. */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_CHANNEL_SPEC_DWELL_TIME = 5, + /* Unsigned 8-bit value; 0: active; 1: passive; N/A for DFS */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_CHANNEL_SPEC_PASSIVE = 6, + /* Unsigned 8-bit value; channel class */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_CHANNEL_SPEC_CLASS = 7, + + /* Unsigned 8-bit value; bucket index, 0 based */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_BUCKET_SPEC_INDEX = 8, + /* Unsigned 8-bit value; band. */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_BUCKET_SPEC_BAND = 9, + /* Unsigned 32-bit value; desired period, in ms. */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_BUCKET_SPEC_PERIOD = 10, + /* Unsigned 8-bit value; report events semantics. */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_BUCKET_SPEC_REPORT_EVENTS = 11, + /* + * Unsigned 32-bit value. Followed by a nested array of + * EXTSCAN_CHANNEL_SPEC_* attributes. + */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_BUCKET_SPEC_NUM_CHANNEL_SPECS = 12, + + /* + * Array of QCA_WLAN_VENDOR_ATTR_EXTSCAN_CHANNEL_SPEC_* attributes. + * Array size: QCA_WLAN_VENDOR_ATTR_EXTSCAN_BUCKET_SPEC_NUM_CHANNEL_SPECS + */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_CHANNEL_SPEC = 13, + + /* Unsigned 32-bit value; base timer period in ms. */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_SCAN_CMD_PARAMS_BASE_PERIOD = 14, + /* + * Unsigned 32-bit value; number of APs to store in each scan in the + * BSSID/RSSI history buffer (keep the highest RSSI APs). + */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_SCAN_CMD_PARAMS_MAX_AP_PER_SCAN = 15, + /* + * Unsigned 8-bit value; in %, when scan buffer is this much full, wake + * up AP. + */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_SCAN_CMD_PARAMS_REPORT_THRESHOLD_PERCENT + = 16, + + /* + * Unsigned 8-bit value; number of scan bucket specs; followed by a + * nested array of_EXTSCAN_BUCKET_SPEC_* attributes and values. The size + * of the array is determined by NUM_BUCKETS. + */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_SCAN_CMD_PARAMS_NUM_BUCKETS = 17, + + /* + * Array of QCA_WLAN_VENDOR_ATTR_EXTSCAN_BUCKET_SPEC_* attributes. + * Array size: QCA_WLAN_VENDOR_ATTR_EXTSCAN_SCAN_CMD_PARAMS_NUM_BUCKETS + */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_BUCKET_SPEC = 18, + + /* Unsigned 8-bit value */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_GET_CACHED_SCAN_RESULTS_CONFIG_PARAM_FLUSH + = 19, + /* Unsigned 32-bit value; maximum number of results to be returned. */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_GET_CACHED_SCAN_RESULTS_CONFIG_PARAM_MAX + = 20, + + /* An array of 6 x unsigned 8-bit value */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_AP_THRESHOLD_PARAM_BSSID = 21, + /* Signed 32-bit value */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_AP_THRESHOLD_PARAM_RSSI_LOW = 22, + /* Signed 32-bit value */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_AP_THRESHOLD_PARAM_RSSI_HIGH = 23, + /* Unsigned 32-bit value */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_AP_THRESHOLD_PARAM_CHANNEL = 24, + + /* + * Number of hotlist APs as unsigned 32-bit value, followed by a nested + * array of AP_THRESHOLD_PARAM attributes and values. The size of the + * array is determined by NUM_AP. + */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_BSSID_HOTLIST_PARAMS_NUM_AP = 25, + + /* + * Array of QCA_WLAN_VENDOR_ATTR_EXTSCAN_AP_THRESHOLD_PARAM_* attributes. + * Array size: QCA_WLAN_VENDOR_ATTR_EXTSCAN_BUCKET_SPEC_NUM_CHANNEL_SPECS + */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_AP_THRESHOLD_PARAM = 26, + + /* Unsigned 32-bit value; number of samples for averaging RSSI. */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_SIGNIFICANT_CHANGE_PARAMS_RSSI_SAMPLE_SIZE + = 27, + /* Unsigned 32-bit value; number of samples to confirm AP loss. */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_SIGNIFICANT_CHANGE_PARAMS_LOST_AP_SAMPLE_SIZE + = 28, + /* Unsigned 32-bit value; number of APs breaching threshold. */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_SIGNIFICANT_CHANGE_PARAMS_MIN_BREACHING = 29, + /* + * Unsigned 32-bit value; number of APs. Followed by an array of + * AP_THRESHOLD_PARAM attributes. Size of the array is NUM_AP. + */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_SIGNIFICANT_CHANGE_PARAMS_NUM_AP = 30, + /* Unsigned 32-bit value; number of samples to confirm AP loss. */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_BSSID_HOTLIST_PARAMS_LOST_AP_SAMPLE_SIZE + = 31, + /* + * Unsigned 32-bit value. If max_period is non zero or different than + * period, then this bucket is an exponential backoff bucket. + */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_BUCKET_SPEC_MAX_PERIOD = 32, + /* Unsigned 32-bit value. */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_BUCKET_SPEC_BASE = 33, + /* + * Unsigned 32-bit value. For exponential back off bucket, number of + * scans to perform for a given period. + */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_BUCKET_SPEC_STEP_COUNT = 34, + /* + * Unsigned 8-bit value; in number of scans, wake up AP after these + * many scans. + */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_SCAN_CMD_PARAMS_REPORT_THRESHOLD_NUM_SCANS + = 35, + + /* + * Attributes for data used by + * QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_SET_SSID_HOTLIST sub command. + */ + + /* Unsigned 3-2bit value; number of samples to confirm SSID loss. */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_SSID_HOTLIST_PARAMS_LOST_SSID_SAMPLE_SIZE + = 36, + /* + * Number of hotlist SSIDs as unsigned 32-bit value, followed by a + * nested array of SSID_THRESHOLD_PARAM_* attributes and values. The + * size of the array is determined by NUM_SSID. + */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_SSID_HOTLIST_PARAMS_NUM_SSID = 37, + /* + * Array of QCA_WLAN_VENDOR_ATTR_EXTSCAN_SSID_THRESHOLD_PARAM_* + * attributes. + * Array size: QCA_WLAN_VENDOR_ATTR_EXTSCAN_SSID_HOTLIST_PARAMS_NUM_SSID + */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_SSID_THRESHOLD_PARAM = 38, + + /* An array of 33 x unsigned 8-bit value; NULL terminated SSID */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_SSID_THRESHOLD_PARAM_SSID = 39, + /* Unsigned 8-bit value */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_SSID_THRESHOLD_PARAM_BAND = 40, + /* Signed 32-bit value */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_SSID_THRESHOLD_PARAM_RSSI_LOW = 41, + /* Signed 32-bit value */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_SSID_THRESHOLD_PARAM_RSSI_HIGH = 42, + /* Unsigned 32-bit value; a bitmask with additional extscan config flag. + */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_CONFIGURATION_FLAGS = 43, + + /* keep last */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_SUBCMD_CONFIG_PARAM_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_EXTSCAN_SUBCMD_CONFIG_PARAM_MAX = + QCA_WLAN_VENDOR_ATTR_EXTSCAN_SUBCMD_CONFIG_PARAM_AFTER_LAST - 1, +}; + +enum qca_wlan_vendor_attr_extscan_results { + QCA_WLAN_VENDOR_ATTR_EXTSCAN_RESULTS_INVALID = 0, + + /* + * Unsigned 32-bit value; must match the request Id supplied by + * Wi-Fi HAL in the corresponding subcmd NL msg. + */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_RESULTS_REQUEST_ID = 1, + + /* + * Unsigned 32-bit value; used to indicate the status response from + * firmware/driver for the vendor sub-command. + */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_STATUS = 2, + + /* + * EXTSCAN Valid Channels attributes */ + /* Unsigned 32bit value; followed by a nested array of CHANNELS. */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_RESULTS_NUM_CHANNELS = 3, + /* + * An array of NUM_CHANNELS x unsigned 32-bit value integers + * representing channel numbers. + */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_RESULTS_CHANNELS = 4, + + /* EXTSCAN Capabilities attributes */ + + /* Unsigned 32-bit value */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_RESULTS_CAPABILITIES_MAX_SCAN_CACHE_SIZE = 5, + /* Unsigned 32-bit value */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_RESULTS_CAPABILITIES_MAX_SCAN_BUCKETS = 6, + /* Unsigned 32-bit value */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_RESULTS_CAPABILITIES_MAX_AP_CACHE_PER_SCAN + = 7, + /* Unsigned 32-bit value */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_RESULTS_CAPABILITIES_MAX_RSSI_SAMPLE_SIZE + = 8, + /* Signed 32-bit value */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_RESULTS_CAPABILITIES_MAX_SCAN_REPORTING_THRESHOLD + = 9, + /* Unsigned 32-bit value */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_RESULTS_CAPABILITIES_MAX_HOTLIST_BSSIDS = 10, + /* Unsigned 32-bit value */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_RESULTS_CAPABILITIES_MAX_SIGNIFICANT_WIFI_CHANGE_APS + = 11, + /* Unsigned 32-bit value */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_RESULTS_CAPABILITIES_MAX_BSSID_HISTORY_ENTRIES + = 12, + + /* + * EXTSCAN Attributes used with + * QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_SCAN_RESULTS_AVAILABLE sub-command. + */ + + /* Unsigned 32-bit value */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_RESULTS_NUM_RESULTS_AVAILABLE = 13, + + /* + * EXTSCAN attributes used with + * QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_FULL_SCAN_RESULT sub-command. + */ + + /* + * An array of NUM_RESULTS_AVAILABLE x + * QCA_WLAN_VENDOR_ATTR_EXTSCAN_RESULTS_SCAN_RESULT_* + */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_RESULTS_LIST = 14, + + /* Unsigned 64-bit value; age of sample at the time of retrieval */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_RESULTS_SCAN_RESULT_TIME_STAMP = 15, + /* 33 x unsigned 8-bit value; NULL terminated SSID */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_RESULTS_SCAN_RESULT_SSID = 16, + /* An array of 6 x unsigned 8-bit value */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_RESULTS_SCAN_RESULT_BSSID = 17, + /* Unsigned 32-bit value; channel frequency in MHz */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_RESULTS_SCAN_RESULT_CHANNEL = 18, + /* Signed 32-bit value */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_RESULTS_SCAN_RESULT_RSSI = 19, + /* Unsigned 32-bit value */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_RESULTS_SCAN_RESULT_RTT = 20, + /* Unsigned 32-bit value */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_RESULTS_SCAN_RESULT_RTT_SD = 21, + /* Unsigned 16-bit value */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_RESULTS_SCAN_RESULT_BEACON_PERIOD = 22, + /* Unsigned 16-bit value */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_RESULTS_SCAN_RESULT_CAPABILITY = 23, + /* Unsigned 32-bit value; size of the IE DATA blob */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_RESULTS_SCAN_RESULT_IE_LENGTH = 24, + /* + * An array of IE_LENGTH x unsigned 8-bit value; blob of all the + * information elements found in the beacon; this data should be a + * packed list of wifi_information_element objects, one after the + * other. + */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_RESULTS_SCAN_RESULT_IE_DATA = 25, + + /* + * Unsigned 8-bit value; set by driver to indicate more scan results are + * available. + */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_RESULTS_SCAN_RESULT_MORE_DATA = 26, + + /* + * EXTSCAN attributes for + * QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_SCAN_EVENT sub-command. + */ + /* Unsigned 8-bit value */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_RESULTS_SCAN_EVENT_TYPE = 27, + /* Unsigned 32-bit value */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_RESULTS_SCAN_EVENT_STATUS = 28, + + /* + * EXTSCAN attributes for + * QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_HOTLIST_AP_FOUND sub-command. + */ + /* + * Use attr QCA_WLAN_VENDOR_ATTR_EXTSCAN_RESULTS_NUM_RESULTS_AVAILABLE + * to indicate number of results. + * Also, use QCA_WLAN_VENDOR_ATTR_EXTSCAN_RESULTS_LIST to indicate the + * list of results. + */ + + /* + * EXTSCAN attributes for + * QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_SIGNIFICANT_CHANGE sub-command. + */ + /* An array of 6 x unsigned 8-bit value */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_RESULTS_SIGNIFICANT_CHANGE_RESULT_BSSID = 29, + /* Unsigned 32-bit value */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_RESULTS_SIGNIFICANT_CHANGE_RESULT_CHANNEL + = 30, + /* Unsigned 32-bit value. */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_RESULTS_SIGNIFICANT_CHANGE_RESULT_NUM_RSSI + = 31, + /* + * A nested array of signed 32-bit RSSI values. Size of the array is + * determined by (NUM_RSSI of SIGNIFICANT_CHANGE_RESULT_NUM_RSSI. + */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_RESULTS_SIGNIFICANT_CHANGE_RESULT_RSSI_LIST + = 32, + + /* + * EXTSCAN attributes used with + * QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_GET_CACHED_RESULTS sub-command. + */ + /* + * Use attr QCA_WLAN_VENDOR_ATTR_EXTSCAN_RESULTS_NUM_RESULTS_AVAILABLE + * to indicate number of extscan cached results returned. + * Also, use QCA_WLAN_VENDOR_ATTR_EXTSCAN_CACHED_RESULTS_LIST to indicate + * the list of extscan cached results. + */ + + /* + * An array of NUM_RESULTS_AVAILABLE x + * QCA_NL80211_VENDOR_ATTR_EXTSCAN_CACHED_RESULTS_* + */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_CACHED_RESULTS_LIST = 33, + /* Unsigned 32-bit value; a unique identifier for the scan unit. */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_CACHED_RESULTS_SCAN_ID = 34, + /* + * Unsigned 32-bit value; a bitmask w/additional information about scan. + */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_CACHED_RESULTS_FLAGS = 35, + /* + * Use attr QCA_WLAN_VENDOR_ATTR_EXTSCAN_RESULTS_NUM_RESULTS_AVAILABLE + * to indicate number of wifi scan results/bssids retrieved by the scan. + * Also, use QCA_WLAN_VENDOR_ATTR_EXTSCAN_RESULTS_LIST to indicate the + * list of wifi scan results returned for each cached result block. + */ + + /* + * EXTSCAN attributes for + * QCA_NL80211_VENDOR_SUBCMD_PNO_NETWORK_FOUND sub-command. + */ + /* + * Use QCA_WLAN_VENDOR_ATTR_EXTSCAN_RESULTS_NUM_RESULTS_AVAILABLE for + * number of results. + * Use QCA_WLAN_VENDOR_ATTR_EXTSCAN_RESULTS_LIST to indicate the nested + * list of wifi scan results returned for each + * wifi_passpoint_match_result block. + * Array size: QCA_WLAN_VENDOR_ATTR_EXTSCAN_RESULTS_NUM_RESULTS_AVAILABLE. + */ + + /* + * EXTSCAN attributes for + * QCA_NL80211_VENDOR_SUBCMD_PNO_PASSPOINT_NETWORK_FOUND sub-command. + */ + /* Unsigned 32-bit value */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_PNO_RESULTS_PASSPOINT_NETWORK_FOUND_NUM_MATCHES + = 36, + /* + * A nested array of + * QCA_WLAN_VENDOR_ATTR_EXTSCAN_PNO_RESULTS_PASSPOINT_MATCH_* + * attributes. Array size = + * *_ATTR_EXTSCAN_PNO_RESULTS_PASSPOINT_NETWORK_FOUND_NUM_MATCHES. + */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_PNO_RESULTS_PASSPOINT_MATCH_RESULT_LIST = 37, + + /* Unsigned 32-bit value; network block id for the matched network */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_PNO_RESULTS_PASSPOINT_MATCH_ID = 38, + /* + * Use QCA_WLAN_VENDOR_ATTR_EXTSCAN_RESULTS_LIST to indicate the nested + * list of wifi scan results returned for each + * wifi_passpoint_match_result block. + */ + /* Unsigned 32-bit value */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_PNO_RESULTS_PASSPOINT_MATCH_ANQP_LEN = 39, + /* + * An array size of PASSPOINT_MATCH_ANQP_LEN of unsigned 8-bit values; + * ANQP data in the information_element format. + */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_PNO_RESULTS_PASSPOINT_MATCH_ANQP = 40, + + /* Unsigned 32-bit value; a EXTSCAN Capabilities attribute. */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_RESULTS_CAPABILITIES_MAX_HOTLIST_SSIDS = 41, + /* Unsigned 32-bit value; a EXTSCAN Capabilities attribute. */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_RESULTS_CAPABILITIES_MAX_NUM_EPNO_NETS = 42, + /* Unsigned 32-bit value; a EXTSCAN Capabilities attribute. */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_RESULTS_CAPABILITIES_MAX_NUM_EPNO_NETS_BY_SSID + = 43, + /* Unsigned 32-bit value; a EXTSCAN Capabilities attribute. */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_RESULTS_CAPABILITIES_MAX_NUM_WHITELISTED_SSID + = 44, + + QCA_WLAN_VENDOR_ATTR_EXTSCAN_RESULTS_BUCKETS_SCANNED = 45, + QCA_WLAN_VENDOR_ATTR_EXTSCAN_MAX_NUM_BLACKLISTED_BSSID = 46, + + /* keep last */ + QCA_WLAN_VENDOR_ATTR_EXTSCAN_RESULTS_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_EXTSCAN_RESULTS_MAX = + QCA_WLAN_VENDOR_ATTR_EXTSCAN_RESULTS_AFTER_LAST - 1, +}; +#endif + +#ifdef WLAN_FEATURE_LINK_LAYER_STATS + +/** + * enum qca_wlan_vendor_attr_ll_stats_set - vendor attribute set stats + * + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_SET_INVALID: Invalid initial value + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_SET_CONFIG_MPDU_SIZE_THRESHOLD: Size threshold + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_SET_CONFIG_AGGRESSIVE_STATS_GATHERING: + * Aggresive stats gathering + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_SET_AFTER_LAST: After last + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_SET_MAX: Max value + */ +enum qca_wlan_vendor_attr_ll_stats_set { + QCA_WLAN_VENDOR_ATTR_LL_STATS_SET_INVALID = 0, + QCA_WLAN_VENDOR_ATTR_LL_STATS_SET_CONFIG_MPDU_SIZE_THRESHOLD = 1, + QCA_WLAN_VENDOR_ATTR_LL_STATS_SET_CONFIG_AGGRESSIVE_STATS_GATHERING, + /* keep last */ + QCA_WLAN_VENDOR_ATTR_LL_STATS_SET_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_LL_STATS_SET_MAX = + QCA_WLAN_VENDOR_ATTR_LL_STATS_SET_AFTER_LAST - 1 +}; + +/** + * enum qca_wlan_vendor_attr_ll_stats_get - vendor attribute get stats + * + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_GET_INVALID: Invalid initial value + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_GET_CONFIG_REQ_ID: Unsigned 32bit value + * provided by the caller issuing the GET stats command. When reporting + * the stats results, the driver uses the same value to indicate which + * GET request the results correspond to. + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_GET_CONFIG_REQ_MASK: Get config request mask + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_CLR_CONFIG_RSP_MASK: Config response mask + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_CLR_CONFIG_STOP_RSP: Config stop response + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_GET_AFTER_LAST: After last + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_GET_MAX: Max value + */ +enum qca_wlan_vendor_attr_ll_stats_get { + QCA_WLAN_VENDOR_ATTR_LL_STATS_GET_INVALID = 0, + QCA_WLAN_VENDOR_ATTR_LL_STATS_GET_CONFIG_REQ_ID, + QCA_WLAN_VENDOR_ATTR_LL_STATS_GET_CONFIG_REQ_MASK, + QCA_WLAN_VENDOR_ATTR_LL_STATS_GET_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_LL_STATS_GET_MAX = + QCA_WLAN_VENDOR_ATTR_LL_STATS_GET_AFTER_LAST - 1 +}; + +/** + * enum qca_wlan_vendor_attr_ll_stats_clr - vendor attribute clear stats + * + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_CLR_INVALID: Invalid initial value + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_CLR_CONFIG_REQ_MASK: Config request mask + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_CLR_CONFIG_STOP_REQ: Config stop mask + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_CLR_CONFIG_RSP_MASK: Config response mask + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_CLR_CONFIG_STOP_RSP: Config stop response + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_CLR_AFTER_LAST: After last + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_CLR_MAX: Max value + */ +enum qca_wlan_vendor_attr_ll_stats_clr { + QCA_WLAN_VENDOR_ATTR_LL_STATS_CLR_INVALID = 0, + QCA_WLAN_VENDOR_ATTR_LL_STATS_CLR_CONFIG_REQ_MASK, + QCA_WLAN_VENDOR_ATTR_LL_STATS_CLR_CONFIG_STOP_REQ, + QCA_WLAN_VENDOR_ATTR_LL_STATS_CLR_CONFIG_RSP_MASK, + QCA_WLAN_VENDOR_ATTR_LL_STATS_CLR_CONFIG_STOP_RSP, + QCA_WLAN_VENDOR_ATTR_LL_STATS_CLR_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_LL_STATS_CLR_MAX = + QCA_WLAN_VENDOR_ATTR_LL_STATS_CLR_AFTER_LAST - 1 +}; + +/** + * enum qca_wlan_vendor_attr_ll_stats_results_type - ll stats result type + * + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_TYPE_INVALID: Initial invalid value + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_TYPE_RADIO: Link layer stats type radio + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_TYPE_IFACE: Link layer stats type interface + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_TYPE_PEER: Link layer stats type peer + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_TYPE_AFTER_LAST: Last value + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_TYPE_MAX: Max value + */ +enum qca_wlan_vendor_attr_ll_stats_results_type { + QCA_WLAN_VENDOR_ATTR_LL_STATS_TYPE_INVALID = 0, + + QCA_WLAN_VENDOR_ATTR_LL_STATS_TYPE_RADIO = 1, + QCA_WLAN_VENDOR_ATTR_LL_STATS_TYPE_IFACE, + QCA_WLAN_VENDOR_ATTR_LL_STATS_TYPE_PEER, + + QCA_WLAN_VENDOR_ATTR_LL_STATS_TYPE_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_LL_STATS_TYPE_MAX = + QCA_WLAN_VENDOR_ATTR_LL_STATS_TYPE_AFTER_LAST - 1 +}; + +/** + * enum qca_wlan_vendor_attr_ll_stats_results - vendor attribute stats results + * + * Attributes of type QCA_WLAN_VENDOR_ATTR_LL_STATS_IFACE_INFO_* are nested + * within the interface stats. + * + * Attributes of type QCA_WLAN_VENDOR_ATTR_LL_STATS_WMM_AC_* could be nested + * within the interface stats. + * + * Attributes of type QCA_WLAN_VENDOR_ATTR_LL_STATS_PEER_INFO_* are nested + * within the interface stats. + * + * Attributes of type QCA_WLAN_VENDOR_ATTR_LL_STATS_RATE_* could be nested + * within the peer info stats. + * + * Attributes of type QCA_WLAN_VENDOR_ATTR_LL_STATS_CHANNEL_INFO_* could be + * nested within the channel stats. + * + * Attributes of type QCA_WLAN_VENDOR_ATTR_LL_STATS_CHANNEL_ could be nested + * within the radio stats. + * + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_INVALID: Invalid initial value + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_RESULTS_REQ_ID: Unsigned 32bit value + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_IFACE_BEACON_RX: Unsigned 32bit value + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_IFACE_MGMT_RX: Unsigned 32bit value + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_IFACE_MGMT_ACTION_RX: Unsigned 32bit value + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_IFACE_MGMT_ACTION_TX: Unsigned 32bit value + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_IFACE_RSSI_MGMT: Unsigned 32bit value + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_IFACE_RSSI_DATA: Unsigned 32bit value + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_IFACE_RSSI_ACK: Unsigned 32bit value + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_IFACE_INFO_MODE: Interface mode, e.g., STA, + * SOFTAP, IBSS, etc. Type = enum wifi_interface_mode + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_IFACE_INFO_MAC_ADDR: Interface MAC address. + * An array of 6 Unsigned int8_t + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_IFACE_INFO_STATE: + * Type = enum wifi_connection_state, e.g., DISCONNECTED, AUTHENTICATING, + * etc. Valid for STA, CLI only + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_IFACE_INFO_ROAMING: + * Type = enum wifi_roam_state. Roaming state, e.g., IDLE or ACTIVE + * (is that valid for STA only?) + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_IFACE_INFO_CAPABILITIES: Unsigned 32bit value. + * WIFI_CAPABILITY_XXX + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_IFACE_INFO_SSID: NULL terminated SSID. An + * array of 33 Unsigned 8bit values + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_IFACE_INFO_BSSID: BSSID. An array of 6 + * Unsigned 8bit values + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_IFACE_INFO_AP_COUNTRY_STR: Country string + * advertised by AP. An array of 3 Unsigned 8bit values + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_IFACE_INFO_COUNTRY_STR: Country string for + * this association. An array of 3 Unsigned 8bit values + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_WMM_AC_AC: Type = enum wifi_traffic_ac e.g. + * V0, VI, BE and BK + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_WMM_AC_TX_MPDU: Unsigned int 32 value + * corresponding to respective AC + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_WMM_AC_RX_MPDU: Unsigned int 32 value + * corresponding to respective AC + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_WMM_AC_TX_MCAST: Unsigned int 32 value + * corresponding to respective AC + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_WMM_AC_RX_MCAST: Unsigned int 32 value + * corresponding to respective AC + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_WMM_AC_RX_AMPDU: Unsigned int 32 value + * corresponding to respective AC + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_WMM_AC_TX_AMPDU: Unsigned int 32 value + * corresponding to respective AC + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_WMM_AC_MPDU_LOST: Unsigned int 32 value + * corresponding to respective AC + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_WMM_AC_RETRIES: Unsigned int 32 value + * corresponding to respective AC + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_WMM_AC_RETRIES_SHORT: Unsigned int 32 value + * corresponding to respective AC + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_WMM_AC_CONTENTION_TIME_MIN: Unsigned int 32 + * value corresponding to respective AC + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_WMM_AC_CONTENTION_TIME_MAX: Unsigned int 32 + * value corresponding to respective AC + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_WMM_AC_CONTENTION_TIME_AVG: Unsigned int 32 + * value corresponding to respective AC + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_WMM_AC_CONTENTION_NUM_SAMPLES: Unsigned int 32 + * value corresponding to respective AC + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_IFACE_NUM_PEERS: Unsigned int 32 + * value corresponding to respective AC + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_PEER_INFO_TYPE: Type = enum wifi_peer_type + * Peer type, e.g., STA, AP, P2P GO etc + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_PEER_INFO_MAC_ADDRESS: MAC addr corresponding + * to respective peer. An array of 6 Unsigned 8bit values + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_PEER_INFO_CAPABILITIES: Unsigned int 32bit + * value representing capabilities corresponding to respective peer. + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_PEER_INFO_NUM_RATES: Unsigned 32bit value. + * Number of rates + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_RATE_PREAMBLE: Unsigned int 8bit value: + * 0: OFDM, 1:CCK, 2:HT 3:VHT 4..7 reserved + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_RATE_NSS: Unsigned int 8bit value: + * 0:1x1, 1:2x2, 3:3x3, 4:4x4 + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_RATE_BW: Unsigned int 8bit value: + * 0:20MHz, 1:40Mhz, 2:80Mhz, 3:160Mhz + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_RATE_MCS_INDEX: Unsigned int 8bit value: + * OFDM/CCK rate code would be as per IEEE Std in the units of 0.5mbps + * HT/VHT it would be mcs index + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_RATE_BIT_RATE: Unsigned 32bit value. + * Bit rate in units of 100Kbps + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_RATE_TX_MPDU: Unsigned int 32bit value. + * Number of successfully transmitted data pkts i.e., with ACK received + * corresponding to the respective rate + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_RATE_RX_MPDU: Unsigned int 32bit value. + * Number of received data pkts corresponding to the respective rate + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_RATE_MPDU_LOST: Unsigned int 32bit value. + * Number of data pkts losses, i.e., no ACK received corresponding to + * the respective rate + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_RATE_RETRIES: Unsigned int 32bit value. + * Total number of data pkt retries for the respective rate + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_RATE_RETRIES_SHORT: Unsigned int 32bit value. + * Total number of short data pkt retries for the respective rate + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_RATE_RETRIES_LONG: Unsigned int 32bit value. + * Total number of long data pkt retries for the respective rate + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_RADIO_ID: Radio id + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_RADIO_ON_TIME: Unsigned 32bit value. + * Total number of msecs the radio is awake accruing over time + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_RADIO_TX_TIME: Unsigned 32bit value. + * Total number of msecs the radio is transmitting accruing over time + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_RADIO_RX_TIME: Unsigned 32bit value. + * Total number of msecs the radio is in active receive accruing over time + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_RADIO_ON_TIME_SCAN: Unsigned 32bit value. + * Total number of msecs the radio is awake due to all scan accruing + * over time + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_RADIO_ON_TIME_NBD: Unsigned 32bit value. + * Total number of msecs the radio is awake due to NAN accruing over time. + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_RADIO_ON_TIME_GSCAN: Unsigned 32bit value. + * Total number of msecs the radio is awake due to GSCAN accruing over time + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_RADIO_ON_TIME_ROAM_SCAN: Unsigned 32bit value. + * Total number of msecs the radio is awake due to roam scan accruing over + * time. + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_RADIO_ON_TIME_PNO_SCAN: Unsigned 32bit value. + * Total number of msecs the radio is awake due to PNO scan accruing over + * time. + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_RADIO_ON_TIME_HS20: Unsigned 32bit value. + * Total number of msecs the radio is awake due to HS2.0 scans and GAS + * exchange accruing over time. + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_RADIO_NUM_CHANNELS: Unsigned 32bit value. + * Number of channels + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_CHANNEL_INFO_WIDTH: + * Type = enum wifi_channel_width. Channel width, e.g., 20, 40, 80, etc. + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_CHANNEL_INFO_CENTER_FREQ: + * Unsigned 32bit value. Primary 20MHz channel. + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_CHANNEL_INFO_CENTER_FREQ0: + * Unsigned 32bit value. Center frequency (MHz) first segment. + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_CHANNEL_INFO_CENTER_FREQ1: + * Unsigned 32bit value. Center frequency (MHz) second segment. + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_CHANNEL_ON_TIME: Unsigned int 32bit value + * representing total number of msecs the radio is awake on that channel + * accruing over time, corresponding to the respective channel. + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_CHANNEL_CCA_BUSY_TIME: Unsigned int 32bit + * value representing total number of msecs the CCA register is busy + * accruing over time corresponding to the respective channel. + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_NUM_RADIOS: Number of radios + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_CH_INFO: Channel info + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_PEER_INFO: Peer info + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_PEER_INFO_RATE_INFO: Peer rate info + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_WMM_INFO: WMM info + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_RESULTS_MORE_DATA: Unsigned 8bit value. + * Used by the driver; if set to 1, it indicates that more stats, e.g., + * peers or radio, are to follow in the next + * QCA_NL80211_VENDOR_SUBCMD_LL_STATS_*_RESULTS event. Otherwise, it + * is set to 0. + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_IFACE_AVERAGE_TSF_OFFSET: tsf offset + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_IFACE_LEAKY_AP_DETECTED: leaky ap detected + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_IFACE_LEAKY_AP_AVG_NUM_FRAMES_LEAKED: + * average number of frames leaked + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_IFACE_LEAKY_AP_GUARD_TIME: guard time + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_TYPE: Link Layer stats type + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_RADIO_NUM_TX_LEVELS: LL Radio Number of + * Tx Levels + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_RADIO_TX_TIME_PER_LEVEL:Number of msecs the + * radio spent in transmitting for each power level + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_IFACE_RTS_SUCC_CNT: RTS successful count + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_IFACE_RTS_FAIL_CNT: RTS fail count + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_IFACE_PPDU_SUCC_CNT: PPDU successful count + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_IFACE_PPDU_FAIL_CNT: PPDU fail count + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_AFTER_LAST: After last + * @QCA_WLAN_VENDOR_ATTR_FEATURE_SET_MAX: Max value + */ + +enum qca_wlan_vendor_attr_ll_stats_results { + QCA_WLAN_VENDOR_ATTR_LL_STATS_INVALID = 0, + QCA_WLAN_VENDOR_ATTR_LL_STATS_RESULTS_REQ_ID = 1, + + QCA_WLAN_VENDOR_ATTR_LL_STATS_IFACE_BEACON_RX = 2, + QCA_WLAN_VENDOR_ATTR_LL_STATS_IFACE_MGMT_RX = 3, + QCA_WLAN_VENDOR_ATTR_LL_STATS_IFACE_MGMT_ACTION_RX = 4, + QCA_WLAN_VENDOR_ATTR_LL_STATS_IFACE_MGMT_ACTION_TX = 5, + QCA_WLAN_VENDOR_ATTR_LL_STATS_IFACE_RSSI_MGMT = 6, + QCA_WLAN_VENDOR_ATTR_LL_STATS_IFACE_RSSI_DATA = 7, + QCA_WLAN_VENDOR_ATTR_LL_STATS_IFACE_RSSI_ACK = 8, + QCA_WLAN_VENDOR_ATTR_LL_STATS_IFACE_INFO_MODE = 9, + QCA_WLAN_VENDOR_ATTR_LL_STATS_IFACE_INFO_MAC_ADDR = 10, + QCA_WLAN_VENDOR_ATTR_LL_STATS_IFACE_INFO_STATE = 11, + QCA_WLAN_VENDOR_ATTR_LL_STATS_IFACE_INFO_ROAMING = 12, + QCA_WLAN_VENDOR_ATTR_LL_STATS_IFACE_INFO_CAPABILITIES = 13, + QCA_WLAN_VENDOR_ATTR_LL_STATS_IFACE_INFO_SSID = 14, + QCA_WLAN_VENDOR_ATTR_LL_STATS_IFACE_INFO_BSSID = 15, + QCA_WLAN_VENDOR_ATTR_LL_STATS_IFACE_INFO_AP_COUNTRY_STR = 16, + QCA_WLAN_VENDOR_ATTR_LL_STATS_IFACE_INFO_COUNTRY_STR = 17, + + QCA_WLAN_VENDOR_ATTR_LL_STATS_WMM_AC_AC = 18, + QCA_WLAN_VENDOR_ATTR_LL_STATS_WMM_AC_TX_MPDU = 19, + QCA_WLAN_VENDOR_ATTR_LL_STATS_WMM_AC_RX_MPDU = 20, + QCA_WLAN_VENDOR_ATTR_LL_STATS_WMM_AC_TX_MCAST = 21, + QCA_WLAN_VENDOR_ATTR_LL_STATS_WMM_AC_RX_MCAST = 22, + QCA_WLAN_VENDOR_ATTR_LL_STATS_WMM_AC_RX_AMPDU = 23, + QCA_WLAN_VENDOR_ATTR_LL_STATS_WMM_AC_TX_AMPDU = 24, + QCA_WLAN_VENDOR_ATTR_LL_STATS_WMM_AC_MPDU_LOST = 25, + QCA_WLAN_VENDOR_ATTR_LL_STATS_WMM_AC_RETRIES = 26, + QCA_WLAN_VENDOR_ATTR_LL_STATS_WMM_AC_RETRIES_SHORT = 27, + QCA_WLAN_VENDOR_ATTR_LL_STATS_WMM_AC_RETRIES_LONG = 28, + QCA_WLAN_VENDOR_ATTR_LL_STATS_WMM_AC_CONTENTION_TIME_MIN = 29, + QCA_WLAN_VENDOR_ATTR_LL_STATS_WMM_AC_CONTENTION_TIME_MAX = 30, + QCA_WLAN_VENDOR_ATTR_LL_STATS_WMM_AC_CONTENTION_TIME_AVG = 31, + QCA_WLAN_VENDOR_ATTR_LL_STATS_WMM_AC_CONTENTION_NUM_SAMPLES = 32, + + QCA_WLAN_VENDOR_ATTR_LL_STATS_IFACE_NUM_PEERS = 33, + + QCA_WLAN_VENDOR_ATTR_LL_STATS_PEER_INFO_TYPE = 34, + QCA_WLAN_VENDOR_ATTR_LL_STATS_PEER_INFO_MAC_ADDRESS = 35, + QCA_WLAN_VENDOR_ATTR_LL_STATS_PEER_INFO_CAPABILITIES = 36, + QCA_WLAN_VENDOR_ATTR_LL_STATS_PEER_INFO_NUM_RATES = 37, + + QCA_WLAN_VENDOR_ATTR_LL_STATS_RATE_PREAMBLE = 38, + QCA_WLAN_VENDOR_ATTR_LL_STATS_RATE_NSS = 39, + QCA_WLAN_VENDOR_ATTR_LL_STATS_RATE_BW = 40, + QCA_WLAN_VENDOR_ATTR_LL_STATS_RATE_MCS_INDEX = 41, + QCA_WLAN_VENDOR_ATTR_LL_STATS_RATE_BIT_RATE = 42, + + QCA_WLAN_VENDOR_ATTR_LL_STATS_RATE_TX_MPDU = 43, + QCA_WLAN_VENDOR_ATTR_LL_STATS_RATE_RX_MPDU = 44, + QCA_WLAN_VENDOR_ATTR_LL_STATS_RATE_MPDU_LOST = 45, + QCA_WLAN_VENDOR_ATTR_LL_STATS_RATE_RETRIES = 46, + QCA_WLAN_VENDOR_ATTR_LL_STATS_RATE_RETRIES_SHORT = 47, + QCA_WLAN_VENDOR_ATTR_LL_STATS_RATE_RETRIES_LONG = 48, + + QCA_WLAN_VENDOR_ATTR_LL_STATS_RADIO_ID = 49, + QCA_WLAN_VENDOR_ATTR_LL_STATS_RADIO_ON_TIME = 50, + QCA_WLAN_VENDOR_ATTR_LL_STATS_RADIO_TX_TIME = 51, + QCA_WLAN_VENDOR_ATTR_LL_STATS_RADIO_RX_TIME = 52, + QCA_WLAN_VENDOR_ATTR_LL_STATS_RADIO_ON_TIME_SCAN = 53, + QCA_WLAN_VENDOR_ATTR_LL_STATS_RADIO_ON_TIME_NBD = 54, + QCA_WLAN_VENDOR_ATTR_LL_STATS_RADIO_ON_TIME_GSCAN = 55, + QCA_WLAN_VENDOR_ATTR_LL_STATS_RADIO_ON_TIME_ROAM_SCAN = 56, + QCA_WLAN_VENDOR_ATTR_LL_STATS_RADIO_ON_TIME_PNO_SCAN = 57, + QCA_WLAN_VENDOR_ATTR_LL_STATS_RADIO_ON_TIME_HS20 = 58, + QCA_WLAN_VENDOR_ATTR_LL_STATS_RADIO_NUM_CHANNELS = 59, + + QCA_WLAN_VENDOR_ATTR_LL_STATS_CHANNEL_INFO_WIDTH = 60, + QCA_WLAN_VENDOR_ATTR_LL_STATS_CHANNEL_INFO_CENTER_FREQ = 61, + QCA_WLAN_VENDOR_ATTR_LL_STATS_CHANNEL_INFO_CENTER_FREQ0 = 62, + QCA_WLAN_VENDOR_ATTR_LL_STATS_CHANNEL_INFO_CENTER_FREQ1 = 63, + + QCA_WLAN_VENDOR_ATTR_LL_STATS_CHANNEL_ON_TIME = 64, + QCA_WLAN_VENDOR_ATTR_LL_STATS_CHANNEL_CCA_BUSY_TIME = 65, + + QCA_WLAN_VENDOR_ATTR_LL_STATS_NUM_RADIOS = 66, + QCA_WLAN_VENDOR_ATTR_LL_STATS_CH_INFO = 67, + QCA_WLAN_VENDOR_ATTR_LL_STATS_PEER_INFO = 68, + QCA_WLAN_VENDOR_ATTR_LL_STATS_PEER_INFO_RATE_INFO = 69, + QCA_WLAN_VENDOR_ATTR_LL_STATS_WMM_INFO = 70, + + QCA_WLAN_VENDOR_ATTR_LL_STATS_RESULTS_MORE_DATA = 71, + + QCA_WLAN_VENDOR_ATTR_LL_STATS_IFACE_AVERAGE_TSF_OFFSET = 72, + QCA_WLAN_VENDOR_ATTR_LL_STATS_IFACE_LEAKY_AP_DETECTED = 73, + QCA_WLAN_VENDOR_ATTR_LL_STATS_IFACE_LEAKY_AP_AVG_NUM_FRAMES_LEAKED = 74, + QCA_WLAN_VENDOR_ATTR_LL_STATS_IFACE_LEAKY_AP_GUARD_TIME = 75, + + QCA_WLAN_VENDOR_ATTR_LL_STATS_TYPE = 76, + + QCA_WLAN_VENDOR_ATTR_LL_STATS_RADIO_NUM_TX_LEVELS = 77, + + QCA_WLAN_VENDOR_ATTR_LL_STATS_RADIO_TX_TIME_PER_LEVEL = 78, + + QCA_WLAN_VENDOR_ATTR_LL_STATS_IFACE_RTS_SUCC_CNT = 79, + QCA_WLAN_VENDOR_ATTR_LL_STATS_IFACE_RTS_FAIL_CNT = 80, + QCA_WLAN_VENDOR_ATTR_LL_STATS_IFACE_PPDU_SUCC_CNT = 81, + QCA_WLAN_VENDOR_ATTR_LL_STATS_IFACE_PPDU_FAIL_CNT = 82, + + /* keep last */ + QCA_WLAN_VENDOR_ATTR_LL_STATS_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_LL_STATS_MAX = + QCA_WLAN_VENDOR_ATTR_LL_STATS_AFTER_LAST - 1 +}; + +enum qca_wlan_vendor_attr_ll_stats_type { + QCA_NL80211_VENDOR_SUBCMD_LL_STATS_TYPE_INVALID = 0, + QCA_NL80211_VENDOR_SUBCMD_LL_STATS_TYPE_RADIO = 1, + QCA_NL80211_VENDOR_SUBCMD_LL_STATS_TYPE_IFACE = 2, + QCA_NL80211_VENDOR_SUBCMD_LL_STATS_TYPE_PEERS = 3, + + /* keep last */ + QCA_NL80211_VENDOR_SUBCMD_LL_STATS_TYPE_AFTER_LAST, + QCA_NL80211_VENDOR_SUBCMD_LL_STATS_TYPE_MAX = + QCA_NL80211_VENDOR_SUBCMD_LL_STATS_TYPE_AFTER_LAST - 1, +}; + +#endif /* WLAN_FEATURE_LINK_LAYER_STATS */ + +/** + * enum qca_wlan_vendor_attr_get_supported_features - get supported feature + * + * @QCA_WLAN_VENDOR_ATTR_FEATURE_SET_INVALID: Invalid initial value + * @QCA_WLAN_VENDOR_ATTR_FEATURE_SET: Unsigned 32bit value + * @QCA_WLAN_VENDOR_ATTR_FEATURE_SET_AFTER_LAST: After last + * @QCA_WLAN_VENDOR_ATTR_FEATURE_SET_MAX: Max value + */ +enum qca_wlan_vendor_attr_get_supported_features { + QCA_WLAN_VENDOR_ATTR_FEATURE_SET_INVALID = 0, + QCA_WLAN_VENDOR_ATTR_FEATURE_SET = 1, + QCA_WLAN_VENDOR_ATTR_FEATURE_SET_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_FEATURE_SET_MAX = + QCA_WLAN_VENDOR_ATTR_FEATURE_SET_AFTER_LAST - 1, +}; + +/** + * enum qca_wlan_vendor_attr_set_scanning_mac_oui - set scanning mac oui + * + * @QCA_WLAN_VENDOR_ATTR_SET_SCANNING_MAC_OUI_INVALID: Invalid initial value + * @QCA_WLAN_VENDOR_ATTR_SET_SCANNING_MAC_OUI: An array of 3 x Unsigned 8-bit + * value + * @QCA_WLAN_VENDOR_ATTR_SET_SCANNING_MAC_OUI_AFTER_LAST: After last + * @QCA_WLAN_VENDOR_ATTR_SET_SCANNING_MAC_OUI_MAX: Max value + */ +enum qca_wlan_vendor_attr_set_scanning_mac_oui { + QCA_WLAN_VENDOR_ATTR_SET_SCANNING_MAC_OUI_INVALID = 0, + QCA_WLAN_VENDOR_ATTR_SET_SCANNING_MAC_OUI = 1, + QCA_WLAN_VENDOR_ATTR_SET_SCANNING_MAC_OUI_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_SET_SCANNING_MAC_OUI_MAX = + QCA_WLAN_VENDOR_ATTR_SET_SCANNING_MAC_OUI_AFTER_LAST - 1, +}; + +/** + * enum qca_wlan_vendor_attr_scan - Specifies vendor scan attributes + * + * @QCA_WLAN_VENDOR_ATTR_SCAN_IE: IEs that should be included as part of scan + * @QCA_WLAN_VENDOR_ATTR_SCAN_FREQUENCIES: Nested unsigned 32-bit attributes + * with frequencies to be scanned (in MHz) + * @QCA_WLAN_VENDOR_ATTR_SCAN_SSIDS: Nested attribute with SSIDs to be scanned + * @QCA_WLAN_VENDOR_ATTR_SCAN_SUPP_RATES: Nested array attribute of supported + * rates to be included + * @QCA_WLAN_VENDOR_ATTR_SCAN_TX_NO_CCK_RATE: flag used to send probe requests + * at non CCK rate in 2GHz band + * @QCA_WLAN_VENDOR_ATTR_SCAN_FLAGS: Unsigned 32-bit scan flags + * @QCA_WLAN_VENDOR_ATTR_SCAN_COOKIE: Unsigned 64-bit cookie provided by the + * driver for the specific scan request + * @QCA_WLAN_VENDOR_ATTR_SCAN_STATUS: Unsigned 8-bit status of the scan + * request decoded as in enum scan_status + * @QCA_WLAN_VENDOR_ATTR_SCAN_MAC: 6-byte MAC address to use when randomisation + * scan flag is set + * @QCA_WLAN_VENDOR_ATTR_SCAN_MAC_MASK: 6-byte MAC address mask to be used with + * randomisation + * @QCA_WLAN_VENDOR_ATTR_SCAN_BSSID: BSSID provided to do scan for specific BSS + */ +enum qca_wlan_vendor_attr_scan { + QCA_WLAN_VENDOR_ATTR_SCAN_INVALID_PARAM = 0, + QCA_WLAN_VENDOR_ATTR_SCAN_IE, + QCA_WLAN_VENDOR_ATTR_SCAN_FREQUENCIES, + QCA_WLAN_VENDOR_ATTR_SCAN_SSIDS, + QCA_WLAN_VENDOR_ATTR_SCAN_SUPP_RATES, + QCA_WLAN_VENDOR_ATTR_SCAN_TX_NO_CCK_RATE, + QCA_WLAN_VENDOR_ATTR_SCAN_FLAGS, + QCA_WLAN_VENDOR_ATTR_SCAN_COOKIE, + QCA_WLAN_VENDOR_ATTR_SCAN_STATUS, + QCA_WLAN_VENDOR_ATTR_SCAN_MAC, + QCA_WLAN_VENDOR_ATTR_SCAN_MAC_MASK, + QCA_WLAN_VENDOR_ATTR_SCAN_BSSID, + QCA_WLAN_VENDOR_ATTR_SCAN_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_SCAN_MAX = + QCA_WLAN_VENDOR_ATTR_SCAN_AFTER_LAST - 1 +}; + +/** + * enum scan_status - Specifies the valid values the vendor scan attribute + * QCA_WLAN_VENDOR_ATTR_SCAN_STATUS can take + * @VENDOR_SCAN_STATUS_NEW_RESULTS: implies the vendor scan is successful with + * new scan results + * @VENDOR_SCAN_STATUS_ABORTED: implies the vendor scan was aborted in-between + */ +enum scan_status { + VENDOR_SCAN_STATUS_NEW_RESULTS, + VENDOR_SCAN_STATUS_ABORTED, + VENDOR_SCAN_STATUS_MAX, +}; + +/** + * enum qca_wlan_vendor_attr_get_concurrency_matrix - get concurrency matrix + * + * NL attributes for data used by + * QCA_NL80211_VENDOR_SUBCMD_GET_CONCURRENCY_MATRIX sub command. + * + * @QCA_WLAN_VENDOR_ATTR_GET_CONCURRENCY_MATRIX_INVALID: Invalid initial value + * @QCA_WLAN_VENDOR_ATTR_GET_CONCURRENCY_MATRIX_CONFIG_PARAM_SET_SIZE_MAX: + * Unsigned 32-bit value + * @QCA_WLAN_VENDOR_ATTR_GET_CONCURRENCY_MATRIX_RESULTS_SET_SIZE: + * Unsigned 32-bit value + * @QCA_WLAN_VENDOR_ATTR_GET_CONCURRENCY_MATRIX_RESULTS_SET: Set results. An + * array of SET_SIZE x Unsigned 32bit values representing concurrency + * combinations + * @QCA_WLAN_VENDOR_ATTR_GET_CONCURRENCY_MATRIX_AFTER_LAST: After last + * @QCA_WLAN_VENDOR_ATTR_GET_CONCURRENCY_MATRIX_MAX: Max value + */ +enum qca_wlan_vendor_attr_get_concurrency_matrix { + QCA_WLAN_VENDOR_ATTR_GET_CONCURRENCY_MATRIX_INVALID = 0, + QCA_WLAN_VENDOR_ATTR_GET_CONCURRENCY_MATRIX_CONFIG_PARAM_SET_SIZE_MAX + = 1, + QCA_WLAN_VENDOR_ATTR_GET_CONCURRENCY_MATRIX_RESULTS_SET_SIZE = 2, + QCA_WLAN_VENDOR_ATTR_GET_CONCURRENCY_MATRIX_RESULTS_SET = 3, + QCA_WLAN_VENDOR_ATTR_GET_CONCURRENCY_MATRIX_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_GET_CONCURRENCY_MATRIX_MAX = + QCA_WLAN_VENDOR_ATTR_GET_CONCURRENCY_MATRIX_AFTER_LAST - 1, +}; + +/** + * enum qca_wlan_vendor_attr_set_no_dfs_flag - vendor attribute set no dfs flag + * + * @QCA_WLAN_VENDOR_ATTR_SET_NO_DFS_FLAG_INVALID: Invalid initial value + * @QCA_WLAN_VENDOR_ATTR_SET_NO_DFS_FLAG: Unsigned 32-bit value + * @QCA_WLAN_VENDOR_ATTR_SET_NO_DFS_FLAG_AFTER_LAST: After last + * @QCA_WLAN_VENDOR_ATTR_SET_NO_DFS_FLAG_MAX: Max value + */ +enum qca_wlan_vendor_attr_set_no_dfs_flag { + QCA_WLAN_VENDOR_ATTR_SET_NO_DFS_FLAG_INVALID = 0, + QCA_WLAN_VENDOR_ATTR_SET_NO_DFS_FLAG = 1, + QCA_WLAN_VENDOR_ATTR_SET_NO_DFS_FLAG_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_SET_NO_DFS_FLAG_MAX = + QCA_WLAN_VENDOR_ATTR_SET_NO_DFS_FLAG_AFTER_LAST - 1, +}; + +/** + * enum qca_vendor_attr_wisa_cmd + * @QCA_WLAN_VENDOR_ATTR_WISA_INVALID: Invalid attr + * @QCA_WLAN_VENDOR_ATTR_WISA_MODE: WISA mode value attr (u32) + * @QCA_WLAN_VENDOR_ATTR_WISA_AFTER_LAST: After last + * @QCA_WLAN_VENDOR_ATTR_WISA_MAX: Max Value + * WISA setup vendor commands + */ +enum qca_vendor_attr_wisa_cmd { + QCA_WLAN_VENDOR_ATTR_WISA_INVALID = 0, + QCA_WLAN_VENDOR_ATTR_WISA_MODE, + QCA_WLAN_VENDOR_ATTR_WISA_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_WISA_MAX = + QCA_WLAN_VENDOR_ATTR_WISA_AFTER_LAST - 1 +}; + +enum qca_roaming_policy { + QCA_ROAMING_NOT_ALLOWED, + QCA_ROAMING_ALLOWED_WITHIN_ESS, +}; + +/** + * enum qca_roam_reason - Represents the reason codes for roaming. Used by + * QCA_WLAN_VENDOR_ATTR_ROAM_AUTH_REASON. + * + * @QCA_ROAM_REASON_UNKNOWN: Any reason that do not classify under the below + * reasons. + * + * @QCA_ROAM_REASON_PER: Roam triggered when packet error rates(PER) breached + * the configured threshold. + * + * @QCA_ROAM_REASON_BEACON_MISS: Roam triggered due to the continuous configured + * beacon misses from the then connected AP. + * + * @QCA_ROAM_REASON_POOR_RSSI: Roam triggered due to the poor RSSI reported + * by the connected AP. + * + * @QCA_ROAM_REASON_BETTER_RSSI: Roam triggered for finding a BSSID with a + * better RSSI than the connected BSSID. Here the RSSI of the current BSSID is + * not poor. + * + * @QCA_ROAM_REASON_CONGESTION: Roam triggered considering the connected channel + * or environment being very noisy / congested. + * + * @QCA_ROAM_REASON_EXPLICIT_REQUEST: Roam triggered due to an explicit request + * from the user (user space). + * + * @QCA_ROAM_REASON_BTM: Roam triggered due to BTM request frame received from + * connected AP. + * + * @QCA_ROAM_REASON_BSS_LOAD: Roam triggered due to the channel utilization + * breaching out the configured threshold. + * + */ +enum qca_roam_reason { + QCA_ROAM_REASON_UNKNOWN, + QCA_ROAM_REASON_PER, + QCA_ROAM_REASON_BEACON_MISS, + QCA_ROAM_REASON_POOR_RSSI, + QCA_ROAM_REASON_BETTER_RSSI, + QCA_ROAM_REASON_CONGESTION, + QCA_ROAM_REASON_USER_TRIGGER, + QCA_ROAM_REASON_BTM, + QCA_ROAM_REASON_BSS_LOAD, +}; + +/** + * enum qca_wlan_vendor_attr_roam_auth - vendor event for roaming + * @QCA_WLAN_VENDOR_ATTR_ROAM_AUTH_BSSID: BSSID of the roamed AP + * @QCA_WLAN_VENDOR_ATTR_ROAM_AUTH_REQ_IE: Request IE + * @QCA_WLAN_VENDOR_ATTR_ROAM_AUTH_RESP_IE: Response IE + * @QCA_WLAN_VENDOR_ATTR_ROAM_AUTH_AUTHORIZED: Authorization Status + * @QCA_WLAN_VENDOR_ATTR_ROAM_AUTH_KEY_REPLAY_CTR: Replay Counter + * @QCA_WLAN_VENDOR_ATTR_ROAM_AUTH_PTK_KCK: KCK of the PTK + * @QCA_WLAN_VENDOR_ATTR_ROAM_AUTH_PTK_KEK: KEK of the PTK + * @QCA_WLAN_VENDOR_ATTR_ROAM_AUTH_SUBNET_STATUS: subnet change status + * @QCA_WLAN_VENDOR_ATTR_ROAM_AUTH_STATUS: + * Indicates the status of re-association requested by user space for + * the BSSID specified by QCA_WLAN_VENDOR_ATTR_ROAM_AUTH_BSSID. + * Type u16. + * Represents the status code from AP. Use + * %WLAN_STATUS_UNSPECIFIED_FAILURE if the device cannot give you the + * real status code for failures. + * @QCA_WLAN_VENDOR_ATTR_ROAM_AUTH_RETAIN_CONNECTION: + * This attribute indicates that the old association was maintained when + * a re-association is requested by user space and that re-association + * attempt fails (i.e., cannot connect to the requested BSS, but can + * remain associated with the BSS with which the association was in + * place when being requested to roam). Used along with + * WLAN_VENDOR_ATTR_ROAM_AUTH_STATUS to indicate the current + * re-association status. Type flag. + * This attribute is applicable only for re-association failure cases. + * @QCA_WLAN_VENDOR_ATTR_ROAM_AUTH_PMK: AUTH PMK + * @QCA_WLAN_VENDOR_ATTR_ROAM_AUTH_PMKID: AUTH PMKID + * @QCA_WLAN_VENDOR_ATTR_ROAM_AUTH_FILS_ERP_NEXT_SEQ_NUM: FILS erp next + * seq number + * @QCA_WLAN_VENDOR_ATTR_ROAM_AUTH_REASON: A 16-bit unsigned value + * representing the reasons for the roaming. Defined by enum + * qca_roam_reason. + */ +enum qca_wlan_vendor_attr_roam_auth { + QCA_WLAN_VENDOR_ATTR_ROAM_AUTH_INVALID = 0, + QCA_WLAN_VENDOR_ATTR_ROAM_AUTH_BSSID, + QCA_WLAN_VENDOR_ATTR_ROAM_AUTH_REQ_IE, + QCA_WLAN_VENDOR_ATTR_ROAM_AUTH_RESP_IE, + QCA_WLAN_VENDOR_ATTR_ROAM_AUTH_AUTHORIZED, + QCA_WLAN_VENDOR_ATTR_ROAM_AUTH_KEY_REPLAY_CTR, + QCA_WLAN_VENDOR_ATTR_ROAM_AUTH_PTK_KCK, + QCA_WLAN_VENDOR_ATTR_ROAM_AUTH_PTK_KEK, + QCA_WLAN_VENDOR_ATTR_ROAM_AUTH_SUBNET_STATUS, + QCA_WLAN_VENDOR_ATTR_ROAM_AUTH_STATUS, + QCA_WLAN_VENDOR_ATTR_ROAM_AUTH_RETAIN_CONNECTION, + QCA_WLAN_VENDOR_ATTR_ROAM_AUTH_PMK, + QCA_WLAN_VENDOR_ATTR_ROAM_AUTH_PMKID, + QCA_WLAN_VENDOR_ATTR_ROAM_AUTH_FILS_ERP_NEXT_SEQ_NUM, + QCA_WLAN_VENDOR_ATTR_ROAM_AUTH_REASON, + QCA_WLAN_VENDOR_ATTR_ROAM_AUTH_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_ROAM_AUTH_MAX = + QCA_WLAN_VENDOR_ATTR_ROAM_AUTH_AFTER_LAST - 1 +}; + +/** + * enum qca_wlan_vendor_attr_wifi_config - wifi config + * + * @QCA_WLAN_VENDOR_ATTR_WIFI_CONFIG_INVALID: Invalid initial value + * @QCA_WLAN_VENDOR_ATTR_WIFI_CONFIG_DYNAMIC_DTIM: dynamic DTIM + * @QCA_WLAN_VENDOR_ATTR_WIFI_CONFIG_STATS_AVG_FACTOR: avg factor + * @QCA_WLAN_VENDOR_ATTR_WIFI_CONFIG_GUARD_TIME: guard time + * @QCA_WLAN_VENDOR_ATTR_WIFI_CONFIG_AFTER_LAST: after last + * @QCA_WLAN_VENDOR_ATTR_WIFI_CONFIG_MAX: max value + */ +enum qca_wlan_vendor_attr_wifi_config { + QCA_WLAN_VENDOR_ATTR_WIFI_CONFIG_INVALID = 0, + QCA_WLAN_VENDOR_ATTR_WIFI_CONFIG_DYNAMIC_DTIM = 1, + QCA_WLAN_VENDOR_ATTR_WIFI_CONFIG_STATS_AVG_FACTOR = 2, + QCA_WLAN_VENDOR_ATTR_WIFI_CONFIG_GUARD_TIME = 3, + + /* keep last */ + QCA_WLAN_VENDOR_ATTR_WIFI_CONFIG_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_WIFI_CONFIG_MAX = + QCA_WLAN_VENDOR_ATTR_WIFI_CONFIG_AFTER_LAST - 1, +}; + +/** + * enum qca_wlan_epno_type - the type of request to the EPNO command + * @QCA_WLAN_EPNO: epno type request + * @QCA_WLAN_PNO: pno type request + */ +enum qca_wlan_epno_type { + QCA_WLAN_EPNO, + QCA_WLAN_PNO +}; + +/** + * enum qca_wlan_vendor_attr_pno_config_params - pno config params + * + * @QCA_WLAN_VENDOR_ATTR_PNO_INVALID - Invalid initial value + * + * NL attributes for data used by + * QCA_NL80211_VENDOR_SUBCMD_PNO_SET_PASSPOINT_LIST sub command. + * @QCA_WLAN_VENDOR_ATTR_PNO_PASSPOINT_LIST_PARAM_NUM: + * Unsigned 32-bit value; pno passpoint number of networks + * @QCA_WLAN_VENDOR_ATTR_PNO_PASSPOINT_LIST_PARAM_NETWORK_ARRAY: + * Array of nested QCA_WLAN_VENDOR_ATTR_PNO_PASSPOINT_NETWORK_PARAM_* + * attributes. Array size = + * QCA_WLAN_VENDOR_ATTR_PNO_PASSPOINT_LIST_PARAM_NUM. + * @QCA_WLAN_VENDOR_ATTR_PNO_PASSPOINT_NETWORK_PARAM_ID: + * Unsigned 32-bit value; network id + * @QCA_WLAN_VENDOR_ATTR_PNO_PASSPOINT_NETWORK_PARAM_REALM: + * An array of 256 x Unsigned 8-bit value; NULL terminated UTF8 encoded + * realm, 0 if unspecified. + * @QCA_WLAN_VENDOR_ATTR_PNO_PASSPOINT_NETWORK_PARAM_ROAM_CNSRTM_ID: + * An array of 16 x Unsigned 32-bit value; roaming consortium ids + * to match, 0 if unspecified. + * @QCA_WLAN_VENDOR_ATTR_PNO_PASSPOINT_NETWORK_PARAM_ROAM_PLMN: + * An array of 6 x Unsigned 8-bit value; mcc/mnc combination, 0s if + * unspecified. + * + * NL attributes for data used by + * QCA_NL80211_VENDOR_SUBCMD_PNO_SET_LIST sub command. + * @QCA_WLAN_VENDOR_ATTR_PNO_SET_LIST_PARAM_NUM_NETWORKS: + * Unsigned 32-bit value; set pno number of networks + * @QCA_WLAN_VENDOR_ATTR_PNO_SET_LIST_PARAM_EPNO_NETWORKS_LIST: + * Array of nested + * QCA_WLAN_VENDOR_ATTR_PNO_SET_LIST_PARAM_EPNO_NETWORK_* + * attributes. Array size = + * QCA_WLAN_VENDOR_ATTR_PNO_SET_LIST_PARAM_NUM_NETWORKS + * @QCA_WLAN_VENDOR_ATTR_PNO_SET_LIST_PARAM_EPNO_NETWORK_SSID: + * An array of 33 x Unsigned 8-bit value; NULL terminated SSID + * @QCA_WLAN_VENDOR_ATTR_PNO_SET_LIST_PARAM_EPNO_NETWORK_RSSI_THRESHOLD: + * Signed 8-bit value; threshold for considering this SSID as found, + * required granularity for this threshold is 4dBm to 8dBm + * @QCA_WLAN_VENDOR_ATTR_PNO_SET_LIST_PARAM_EPNO_NETWORK_FLAGS: + * Unsigned 8-bit value; WIFI_PNO_FLAG_XXX + * @QCA_WLAN_VENDOR_ATTR_PNO_SET_LIST_PARAM_EPNO_NETWORK_AUTH_BIT: + * Unsigned 8-bit value; auth bit field for matching WPA IE + * @QCA_WLAN_VENDOR_ATTR_PNO_SET_LIST_PARAM_EPNO_TYPE + * Unsigned 8-bit to indicate ePNO type; values from qca_wlan_epno_type + *@QCA_WLAN_VENDOR_ATTR_PNO_SET_LIST_PARAM_EPNO_CHANNEL_LIST + * Nested attribute to send the channel list + *@QCA_WLAN_VENDOR_ATTR_PNO_SET_LIST_PARAM_EPNO_SCAN_INTERVAL + * Unsigned 32-bit value; indicates the Interval between PNO scan + * cycles in msec + *@QCA_WLAN_VENDOR_ATTR_EPNO_MIN5GHZ_RSSI + * Signed 32-bit value; minimum 5GHz RSSI for a BSSID to be considered + *@QCA_WLAN_VENDOR_ATTR_EPNO_MIN24GHZ_RSSI + * Signed 32-bit value; minimum 2.4GHz RSSI for a BSSID to be considered + * This attribute is obsolete now. + *@QCA_WLAN_VENDOR_ATTR_EPNO_INITIAL_SCORE_MAX + * Signed 32-bit value; the maximum score that a network + * can have before bonuses + *@QCA_WLAN_VENDOR_ATTR_EPNO_CURRENT_CONNECTION_BONUS + * Signed 32-bit value; only report when there is a network's + * score this much higher han the current connection + *@QCA_WLAN_VENDOR_ATTR_EPNO_SAME_NETWORK_BONUS + * Signed 32-bit value; score bonus for all networks with + * the same network flag + *@QCA_WLAN_VENDOR_ATTR_EPNO_SECURE_BONUS + * Signed 32-bit value; score bonus for networks that are not open + *@QCA_WLAN_VENDOR_ATTR_EPNO_BAND5GHZ_BONUS + * Signed 32-bit value; 5GHz RSSI score bonus applied to all + * 5GHz networks + *@QCA_WLAN_VENDOR_ATTR_PNO_CONFIG_REQUEST_ID + * Unsigned 32-bit value, representing the PNO Request ID + * @QCA_WLAN_VENDOR_ATTR_PNO_AFTER_LAST: After last + * @QCA_WLAN_VENDOR_ATTR_PNO_MAX: max + */ +enum qca_wlan_vendor_attr_pno_config_params { + QCA_WLAN_VENDOR_ATTR_PNO_INVALID = 0, + + QCA_WLAN_VENDOR_ATTR_PNO_PASSPOINT_LIST_PARAM_NUM = 1, + QCA_WLAN_VENDOR_ATTR_PNO_PASSPOINT_LIST_PARAM_NETWORK_ARRAY = 2, + QCA_WLAN_VENDOR_ATTR_PNO_PASSPOINT_NETWORK_PARAM_ID = 3, + QCA_WLAN_VENDOR_ATTR_PNO_PASSPOINT_NETWORK_PARAM_REALM = 4, + QCA_WLAN_VENDOR_ATTR_PNO_PASSPOINT_NETWORK_PARAM_ROAM_CNSRTM_ID = 5, + QCA_WLAN_VENDOR_ATTR_PNO_PASSPOINT_NETWORK_PARAM_ROAM_PLMN = 6, + + QCA_WLAN_VENDOR_ATTR_PNO_SET_LIST_PARAM_NUM_NETWORKS = 7, + QCA_WLAN_VENDOR_ATTR_PNO_SET_LIST_PARAM_EPNO_NETWORKS_LIST = 8, + QCA_WLAN_VENDOR_ATTR_PNO_SET_LIST_PARAM_EPNO_NETWORK_SSID = 9, + QCA_WLAN_VENDOR_ATTR_PNO_SET_LIST_PARAM_EPNO_NETWORK_RSSI_THRESHOLD = 10, + QCA_WLAN_VENDOR_ATTR_PNO_SET_LIST_PARAM_EPNO_NETWORK_FLAGS = 11, + QCA_WLAN_VENDOR_ATTR_PNO_SET_LIST_PARAM_EPNO_NETWORK_AUTH_BIT = 12, + QCA_WLAN_VENDOR_ATTR_PNO_SET_LIST_PARAM_EPNO_TYPE = 13, + QCA_WLAN_VENDOR_ATTR_PNO_SET_LIST_PARAM_EPNO_CHANNEL_LIST = 14, + QCA_WLAN_VENDOR_ATTR_PNO_SET_LIST_PARAM_EPNO_SCAN_INTERVAL = 15, + QCA_WLAN_VENDOR_ATTR_EPNO_MIN5GHZ_RSSI = 16, + QCA_WLAN_VENDOR_ATTR_EPNO_MIN24GHZ_RSSI = 17, + QCA_WLAN_VENDOR_ATTR_EPNO_INITIAL_SCORE_MAX = 18, + QCA_WLAN_VENDOR_ATTR_EPNO_CURRENT_CONNECTION_BONUS = 19, + QCA_WLAN_VENDOR_ATTR_EPNO_SAME_NETWORK_BONUS = 20, + QCA_WLAN_VENDOR_ATTR_EPNO_SECURE_BONUS = 21, + QCA_WLAN_VENDOR_ATTR_EPNO_BAND5GHZ_BONUS = 22, + + QCA_WLAN_VENDOR_ATTR_PNO_CONFIG_REQUEST_ID = 23, + /* keep last */ + QCA_WLAN_VENDOR_ATTR_PNO_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_PNO_MAX = + QCA_WLAN_VENDOR_ATTR_PNO_AFTER_LAST - 1, +}; + +/** + * enum qca_wlan_vendor_attr_roaming_config_params - roaming config params + * + * @QCA_WLAN_VENDOR_ATTR_ROAMING_PARAM_INVALID: Invalid initial value + * @QCA_WLAN_VENDOR_ATTR_ROAMING_SUBCMD: roaming sub command + * @QCA_WLAN_VENDOR_ATTR_ROAMING_REQ_ID: Request id + * @QCA_WLAN_VENDOR_ATTR_ROAMING_PARAM_WHITE_LIST_SSID_NUM_NETWORKS: + * number of whitelist networks + * @QCA_WLAN_VENDOR_ATTR_ROAMING_PARAM_WHITE_LIST_SSID_LIST: + * Whitelist ssid list + * @QCA_WLAN_VENDOR_ATTR_ROAMING_PARAM_WHITE_LIST_SSID: + * white list ssid + * @QCA_WLAN_VENDOR_ATTR_ROAMING_PARAM_A_BAND_BOOST_THRESHOLD: + * 'a' band boost threshold + * @QCA_WLAN_VENDOR_ATTR_ROAMING_PARAM_A_BAND_PENALTY_THRESHOLD: + * 'a' band penalty threshold + * @QCA_WLAN_VENDOR_ATTR_ROAMING_PARAM_A_BAND_BOOST_FACTOR: + * 'a' band boost factor + * @QCA_WLAN_VENDOR_ATTR_ROAMING_PARAM_A_BAND_PENALTY_FACTOR: + * 'a' band penalty factor + * @QCA_WLAN_VENDOR_ATTR_ROAMING_PARAM_A_BAND_MAX_BOOST: + * 'a' band max boost + * @QCA_WLAN_VENDOR_ATTR_ROAMING_PARAM_LAZY_ROAM_HISTERESYS: + * lazy roam histeresys + * @QCA_WLAN_VENDOR_ATTR_ROAMING_PARAM_ALERT_ROAM_RSSI_TRIGGER: + * alert roam rssi trigger + * @QCA_WLAN_VENDOR_ATTR_ROAMING_PARAM_SET_LAZY_ROAM_ENABLE: + * set lazy roam enable + * @QCA_WLAN_VENDOR_ATTR_ROAMING_PARAM_SET_BSSID_PREFS: + * set bssid preference + * @QCA_WLAN_VENDOR_ATTR_ROAMING_PARAM_SET_LAZY_ROAM_NUM_BSSID: + * set lazy roam number of bssid + * @QCA_WLAN_VENDOR_ATTR_ROAMING_PARAM_SET_LAZY_ROAM_BSSID: + * set lazy roam bssid + * @QCA_WLAN_VENDOR_ATTR_ROAMING_PARAM_SET_LAZY_ROAM_RSSI_MODIFIER: + * set lazy roam rssi modifier + * @QCA_WLAN_VENDOR_ATTR_ROAMING_PARAM_SET_BSSID_PARAMS: + * set bssid params + * @QCA_WLAN_VENDOR_ATTR_ROAMING_PARAM_SET_BSSID_PARAMS_NUM_BSSID: + * set bssid params num bssid + * @QCA_WLAN_VENDOR_ATTR_ROAMING_PARAM_SET_BSSID_PARAMS_BSSID: + * set bssid params bssid + * @QCA_WLAN_VENDOR_ATTR_ROAMING_PARAM_AFTER_LAST: After last + * @QCA_WLAN_VENDOR_ATTR_ROAMING_PARAM_MAX: Max + */ +enum qca_wlan_vendor_attr_roaming_config_params { + QCA_WLAN_VENDOR_ATTR_ROAMING_PARAM_INVALID = 0, + + QCA_WLAN_VENDOR_ATTR_ROAMING_SUBCMD = 1, + QCA_WLAN_VENDOR_ATTR_ROAMING_REQ_ID = 2, + + QCA_WLAN_VENDOR_ATTR_ROAMING_PARAM_WHITE_LIST_SSID_NUM_NETWORKS = 3, + QCA_WLAN_VENDOR_ATTR_ROAMING_PARAM_WHITE_LIST_SSID_LIST = 4, + QCA_WLAN_VENDOR_ATTR_ROAMING_PARAM_WHITE_LIST_SSID = 5, + + QCA_WLAN_VENDOR_ATTR_ROAMING_PARAM_A_BAND_BOOST_THRESHOLD = 6, + QCA_WLAN_VENDOR_ATTR_ROAMING_PARAM_A_BAND_PENALTY_THRESHOLD = 7, + QCA_WLAN_VENDOR_ATTR_ROAMING_PARAM_A_BAND_BOOST_FACTOR = 8, + QCA_WLAN_VENDOR_ATTR_ROAMING_PARAM_A_BAND_PENALTY_FACTOR = 9, + QCA_WLAN_VENDOR_ATTR_ROAMING_PARAM_A_BAND_MAX_BOOST = 10, + QCA_WLAN_VENDOR_ATTR_ROAMING_PARAM_LAZY_ROAM_HISTERESYS = 11, + QCA_WLAN_VENDOR_ATTR_ROAMING_PARAM_ALERT_ROAM_RSSI_TRIGGER = 12, + + QCA_WLAN_VENDOR_ATTR_ROAMING_PARAM_SET_LAZY_ROAM_ENABLE = 13, + + QCA_WLAN_VENDOR_ATTR_ROAMING_PARAM_SET_BSSID_PREFS = 14, + QCA_WLAN_VENDOR_ATTR_ROAMING_PARAM_SET_LAZY_ROAM_NUM_BSSID = 15, + QCA_WLAN_VENDOR_ATTR_ROAMING_PARAM_SET_LAZY_ROAM_BSSID = 16, + QCA_WLAN_VENDOR_ATTR_ROAMING_PARAM_SET_LAZY_ROAM_RSSI_MODIFIER = 17, + + QCA_WLAN_VENDOR_ATTR_ROAMING_PARAM_SET_BSSID_PARAMS = 18, + QCA_WLAN_VENDOR_ATTR_ROAMING_PARAM_SET_BSSID_PARAMS_NUM_BSSID = 19, + QCA_WLAN_VENDOR_ATTR_ROAMING_PARAM_SET_BSSID_PARAMS_BSSID = 20, + + /* keep last */ + QCA_WLAN_VENDOR_ATTR_ROAMING_PARAM_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_ROAMING_PARAM_MAX = + QCA_WLAN_VENDOR_ATTR_ROAMING_PARAM_AFTER_LAST - 1, +}; + +/** + * enum qca_wlan_vendor_attr_roam_subcmd - roam sub commands + * + * @QCA_WLAN_VENDOR_ATTR_ROAM_SUBCMD_INVALID: Invalid initial value + * @QCA_WLAN_VENDOR_ATTR_ROAM_SUBCMD_SSID_WHITE_LIST: ssid white list + * @QCA_WLAN_VENDOR_ATTR_ROAM_SUBCMD_SET_EXTSCAN_ROAM_PARAMS: roam params + * @QCA_WLAN_VENDOR_ATTR_ROAM_SUBCMD_SET_LAZY_ROAM: set lazy roam + * @QCA_WLAN_VENDOR_ATTR_ROAM_SUBCMD_SET_BSSID_PREFS: set bssid prefs + * @QCA_WLAN_VENDOR_ATTR_ROAM_SUBCMD_SET_BSSID_PARAMS: set bssid params + * @QCA_WLAN_VENDOR_ATTR_ROAM_SUBCMD_SET_BLACKLIST_BSSID: set blacklist bssid + * @QCA_WLAN_VENDOR_ATTR_ROAM_SUBCMD_AFTER_LAST: after last + * @QCA_WLAN_VENDOR_ATTR_ROAM_SUBCMD_MAX: subcmd max + */ +enum qca_wlan_vendor_attr_roam_subcmd { + QCA_WLAN_VENDOR_ATTR_ROAM_SUBCMD_INVALID = 0, + QCA_WLAN_VENDOR_ATTR_ROAM_SUBCMD_SSID_WHITE_LIST = 1, + QCA_WLAN_VENDOR_ATTR_ROAM_SUBCMD_SET_EXTSCAN_ROAM_PARAMS = 2, + QCA_WLAN_VENDOR_ATTR_ROAM_SUBCMD_SET_LAZY_ROAM = 3, + QCA_WLAN_VENDOR_ATTR_ROAM_SUBCMD_SET_BSSID_PREFS = 4, + QCA_WLAN_VENDOR_ATTR_ROAM_SUBCMD_SET_BSSID_PARAMS = 5, + QCA_WLAN_VENDOR_ATTR_ROAM_SUBCMD_SET_BLACKLIST_BSSID = 6, + + /* KEEP LAST */ + QCA_WLAN_VENDOR_ATTR_ROAM_SUBCMD_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_ROAM_SUBCMD_MAX = + QCA_WLAN_VENDOR_ATTR_ROAM_SUBCMD_AFTER_LAST - 1, +}; + +/** + * enum qca_wlan_vendor_attr_get_wifi_info - wifi driver information + * + * @QCA_WLAN_VENDOR_ATTR_WIFI_INFO_GET_INVALID: Invalid initial value + * @QCA_WLAN_VENDOR_ATTR_WIFI_INFO_DRIVER_VERSION: get host driver version + * @QCA_WLAN_VENDOR_ATTR_WIFI_INFO_FIRMWARE_VERSION: ger firmware version + * @QCA_WLAN_VENDOR_ATTR_WIFI_INFO_RADIO_INDEX - get radio index + * @QCA_WLAN_VENDOR_ATTR_WIFI_INFO_GET_AFTER_LAST: after last + * @QCA_WLAN_VENDOR_ATTR_WIFI_INFO_GET_MAX: subcmd max + */ +enum qca_wlan_vendor_attr_get_wifi_info { + QCA_WLAN_VENDOR_ATTR_WIFI_INFO_GET_INVALID = 0, + QCA_WLAN_VENDOR_ATTR_WIFI_INFO_DRIVER_VERSION = 1, + QCA_WLAN_VENDOR_ATTR_WIFI_INFO_FIRMWARE_VERSION = 2, + QCA_WLAN_VENDOR_ATTR_WIFI_INFO_RADIO_INDEX = 3, + + /* KEEP LAST */ + QCA_WLAN_VENDOR_ATTR_WIFI_INFO_GET_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_WIFI_INFO_GET_MAX = + QCA_WLAN_VENDOR_ATTR_WIFI_INFO_GET_AFTER_LAST - 1, +}; + +enum qca_wlan_vendor_attr_logger_results { + QCA_WLAN_VENDOR_ATTR_LOGGER_RESULTS_INVALID = 0, + + /* + * Unsigned 32-bit value; must match the request Id supplied by + * Wi-Fi HAL in the corresponding subcmd NL msg. + */ + QCA_WLAN_VENDOR_ATTR_LOGGER_RESULTS_REQUEST_ID = 1, + + /* + * Unsigned 32-bit value; used to indicate the size of memory + * dump to be allocated. + */ + QCA_WLAN_VENDOR_ATTR_LOGGER_RESULTS_MEMDUMP_SIZE = 2, + + /* keep last */ + QCA_WLAN_VENDOR_ATTR_LOGGER_RESULTS_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_LOGGER_RESULTS_MAX = + QCA_WLAN_VENDOR_ATTR_LOGGER_RESULTS_AFTER_LAST - 1, +}; + +/** + * qca_wlan_vendor_channel_prop_flags: This represent the flags for a channel. + * This is used by QCA_WLAN_VENDOR_EXTERNAL_ACS_EVENT_CHAN_INFO_ATTR_FLAGS. + */ +enum qca_wlan_vendor_channel_prop_flags { + /* Bits 0, 1, 2, and 3 are reserved */ + + /* Turbo channel */ + QCA_WLAN_VENDOR_CHANNEL_PROP_FLAG_TURBO = 1 << 4, + /* CCK channel */ + QCA_WLAN_VENDOR_CHANNEL_PROP_FLAG_CCK = 1 << 5, + /* OFDM channel */ + QCA_WLAN_VENDOR_CHANNEL_PROP_FLAG_OFDM = 1 << 6, + /* 2.4 GHz spectrum channel. */ + QCA_WLAN_VENDOR_CHANNEL_PROP_FLAG_2GHZ = 1 << 7, + /* 5 GHz spectrum channel */ + QCA_WLAN_VENDOR_CHANNEL_PROP_FLAG_5GHZ = 1 << 8, + /* Only passive scan allowed */ + QCA_WLAN_VENDOR_CHANNEL_PROP_FLAG_PASSIVE = 1 << 9, + /* Dynamic CCK-OFDM channel */ + QCA_WLAN_VENDOR_CHANNEL_PROP_FLAG_DYN = 1 << 10, + /* GFSK channel (FHSS PHY) */ + QCA_WLAN_VENDOR_CHANNEL_PROP_FLAG_GFSK = 1 << 11, + /* Radar found on channel */ + QCA_WLAN_VENDOR_CHANNEL_PROP_FLAG_RADAR = 1 << 12, + /* 11a static turbo channel only */ + QCA_WLAN_VENDOR_CHANNEL_PROP_FLAG_STURBO = 1 << 13, + /* Half rate channel */ + QCA_WLAN_VENDOR_CHANNEL_PROP_FLAG_HALF = 1 << 14, + /* Quarter rate channel */ + QCA_WLAN_VENDOR_CHANNEL_PROP_FLAG_QUARTER = 1 << 15, + /* HT 20 channel */ + QCA_WLAN_VENDOR_CHANNEL_PROP_FLAG_HT20 = 1 << 16, + /* HT 40 with extension channel above */ + QCA_WLAN_VENDOR_CHANNEL_PROP_FLAG_HT40PLUS = 1 << 17, + /* HT 40 with extension channel below */ + QCA_WLAN_VENDOR_CHANNEL_PROP_FLAG_HT40MINUS = 1 << 18, + /* HT 40 intolerant */ + QCA_WLAN_VENDOR_CHANNEL_PROP_FLAG_HT40INTOL = 1 << 19, + /* VHT 20 channel */ + QCA_WLAN_VENDOR_CHANNEL_PROP_FLAG_VHT20 = 1 << 20, + /* VHT 40 with extension channel above */ + QCA_WLAN_VENDOR_CHANNEL_PROP_FLAG_VHT40PLUS = 1 << 21, + /* VHT 40 with extension channel below */ + QCA_WLAN_VENDOR_CHANNEL_PROP_FLAG_VHT40MINUS = 1 << 22, + /* VHT 80 channel */ + QCA_WLAN_VENDOR_CHANNEL_PROP_FLAG_VHT80 = 1 << 23, + /* HT 40 intolerant mark bit for ACS use */ + QCA_WLAN_VENDOR_CHANNEL_PROP_FLAG_HT40INTOLMARK = 1 << 24, + /* Channel temporarily blocked due to noise */ + QCA_WLAN_VENDOR_CHANNEL_PROP_FLAG_BLOCKED = 1 << 25, + /* VHT 160 channel */ + QCA_WLAN_VENDOR_CHANNEL_PROP_FLAG_VHT160 = 1 << 26, + /* VHT 80+80 channel */ + QCA_WLAN_VENDOR_CHANNEL_PROP_FLAG_VHT80_80 = 1 << 27, + /* HE 20 channel */ + QCA_WLAN_VENDOR_CHANNEL_PROP_FLAG_HE20 = 1 << 28, + /* HE 40 with extension channel above */ + QCA_WLAN_VENDOR_CHANNEL_PROP_FLAG_HE40PLUS = 1 << 29, + /* HE 40 with extension channel below */ + QCA_WLAN_VENDOR_CHANNEL_PROP_FLAG_HE40MINUS = 1 << 30, + /* HE 40 intolerant */ + QCA_WLAN_VENDOR_CHANNEL_PROP_FLAG_HE40INTOL = 1 << 31, +}; + +/** + * qca_wlan_vendor_channel_prop_flags_2: This represents the flags for a + * channel, and is a continuation of qca_wlan_vendor_channel_prop_flags. This is + * used by QCA_WLAN_VENDOR_EXTERNAL_ACS_EVENT_CHAN_INFO_ATTR_FLAGS_2. + */ +enum qca_wlan_vendor_channel_prop_flags_2 { + /* HE 40 intolerant mark bit for ACS use */ + QCA_WLAN_VENDOR_CHANNEL_PROP_FLAG_HE40INTOLMARK = 1 << 0, + /* HE 80 channel */ + QCA_WLAN_VENDOR_CHANNEL_PROP_FLAG_HE80 = 1 << 1, + /* HE 160 channel */ + QCA_WLAN_VENDOR_CHANNEL_PROP_FLAG_HE160 = 1 << 2, + /* HE 80+80 channel */ + QCA_WLAN_VENDOR_CHANNEL_PROP_FLAG_HE80_80 = 1 << 3, +}; + +/** + * qca_wlan_vendor_channel_prop_flags_ext: This represent the extended flags for + * each channel. This is used by + * QCA_WLAN_VENDOR_EXTERNAL_ACS_EVENT_CHAN_INFO_ATTR_FLAG_EXT. + */ +enum qca_wlan_vendor_channel_prop_flags_ext { + /* Radar found on channel */ + QCA_WLAN_VENDOR_CHANNEL_PROP_FLAG_EXT_RADAR_FOUND = 1 << 0, + /* DFS required on channel */ + QCA_WLAN_VENDOR_CHANNEL_PROP_FLAG_EXT_DFS = 1 << 1, + /* DFS required on channel for 2nd band of 80+80 */ + QCA_WLAN_VENDOR_CHANNEL_PROP_FLAG_EXT_DFS_CFREQ2 = 1 << 2, + /* If channel has been checked for DFS */ + QCA_WLAN_VENDOR_CHANNEL_PROP_FLAG_EXT_DFS_CLEAR = 1 << 3, + /* Excluded in 802.11d */ + QCA_WLAN_VENDOR_CHANNEL_PROP_FLAG_EXT_11D_EXCLUDED = 1 << 4, + /* Channel Switch Announcement received on this channel */ + QCA_WLAN_VENDOR_CHANNEL_PROP_FLAG_EXT_CSA_RECEIVED = 1 << 5, + /* Ad-hoc is not allowed */ + QCA_WLAN_VENDOR_CHANNEL_PROP_FLAG_EXT_DISALLOW_ADHOC = 1 << 6, + /* Station only channel */ + QCA_WLAN_VENDOR_CHANNEL_PROP_FLAG_EXT_DISALLOW_HOSTAP = 1 << 7, + /* DFS radar history for slave device (STA mode) */ + QCA_WLAN_VENDOR_CHANNEL_PROP_FLAG_EXT_HISTORY_RADAR = 1 << 8, + /* DFS CAC valid for slave device (STA mode) */ + QCA_WLAN_VENDOR_CHANNEL_PROP_FLAG_EXT_CAC_VALID = 1 << 9, +}; + +/** + * qca_wlan_vendor_attr_nud_stats_set: Attributes to vendor subcmd + * QCA_NL80211_VENDOR_SUBCMD_NUD_STATS_SET. This carries the requisite + * information to start/stop the NUD statistics collection. + */ +enum qca_attr_nud_stats_set { + QCA_ATTR_NUD_STATS_SET_INVALID = 0, + + /* + * Flag to start/stop the NUD statistics collection. + * Start - If included, Stop - If not included + */ + QCA_ATTR_NUD_STATS_SET_START = 1, + /* IPv4 address of the default gateway (in network byte order) */ + QCA_ATTR_NUD_STATS_GW_IPV4 = 2, + /* + * Represents the data packet type to be monitored. + * Host driver tracks the stats corresponding to each data frame + * represented by these flags. + * These data packets are represented by + * enum qca_wlan_vendor_nud_stats_set_data_pkt_info. + */ + QCA_ATTR_NUD_STATS_SET_DATA_PKT_INFO = 3, + /* keep last */ + QCA_ATTR_NUD_STATS_SET_LAST, + QCA_ATTR_NUD_STATS_SET_MAX = + QCA_ATTR_NUD_STATS_SET_LAST - 1, +}; + +/** + * enum qca_attr_connectivity_check_stats_set - attribute to vendor subcmd + * QCA_NL80211_VENDOR_SUBCMD_NUD_STATS_SET. This carry the requisite + * information to start / stop the NUD stats collection. + * @QCA_ATTR_CONNECTIVITY_CHECK_STATS_STATS_PKT_INFO_TYPE: set pkt info stats + * Bitmap to Flag to Start / Stop the NUD stats collection + * Start - If included , Stop - If not included + * @QCA_ATTR_CONNECTIVITY_CHECK_STATS_DNS_DOMAIN_NAME: set gatway ipv4 address + * IPv4 address of Default Gateway (in network byte order) + * QCA_NL80211_VENDOR_SUBCMD_NUD_STATS_SET. This carry the requisite + * information to start / stop the NUD stats collection. + * @QCA_ATTR_CONNECTIVITY_CHECK_STATS_SRC_PORT: set nud debug stats + * Flag to Start / Stop the NUD stats collection + * Start - If included , Stop - If not included + * @QCA_ATTR_CONNECTIVITY_CHECK_STATS_DEST_PORT: set gatway ipv4 address + * IPv4 address of Default Gateway (in network byte order) + * QCA_NL80211_VENDOR_SUBCMD_NUD_STATS_SET. This carry the requisite + * information to start / stop the NUD stats collection. + * @QCA_ATTR_CONNECTIVITY_CHECK_STATS_DEST_IPV4: set nud debug stats + * Flag to Start / Stop the NUD stats collection + * Start - If included , Stop - If not included + * @QCA_ATTR_CONNECTIVITY_CHECK_STATS_DEST_IPV6: set gatway ipv4 address + * IPv4 address of Default Gateway (in network byte order) + */ +enum qca_attr_connectivity_check_stats_set { + QCA_ATTR_CONNECTIVITY_CHECK_STATS_SET_INVALID = 0, + QCA_ATTR_CONNECTIVITY_CHECK_STATS_STATS_PKT_INFO_TYPE = 1, + QCA_ATTR_CONNECTIVITY_CHECK_STATS_DNS_DOMAIN_NAME = 2, + QCA_ATTR_CONNECTIVITY_CHECK_STATS_SRC_PORT = 3, + QCA_ATTR_CONNECTIVITY_CHECK_STATS_DEST_PORT = 4, + QCA_ATTR_CONNECTIVITY_CHECK_STATS_DEST_IPV4 = 5, + QCA_ATTR_CONNECTIVITY_CHECK_STATS_DEST_IPV6 = 6, + /* keep last */ + QCA_ATTR_CONNECTIVITY_CHECK_STATS_SET_LAST, + QCA_ATTR_CONNECTIVITY_CHECK_STATS_SET_MAX = + QCA_ATTR_CONNECTIVITY_CHECK_STATS_SET_LAST - 1, +}; + +/** + * qca_wlan_vendor_nud_stats_data_pkt_flags: Flag representing the various + * data types for which the stats have to get collected. + */ +enum qca_wlan_vendor_connectivity_check_pkt_flags { + QCA_WLAN_VENDOR_CONNECTIVITY_CHECK_SET_ARP = 1 << 0, + QCA_WLAN_VENDOR_CONNECTIVITY_CHECK_SET_DNS = 1 << 1, + QCA_WLAN_VENDOR_CONNECTIVITY_CHECK_SET_TCP_HANDSHAKE = 1 << 2, + QCA_WLAN_VENDOR_CONNECTIVITY_CHECK_SET_ICMPV4 = 1 << 3, + QCA_WLAN_VENDOR_CONNECTIVITY_CHECK_SET_ICMPV6 = 1 << 4, + /* Used by QCA_ATTR_NUD_STATS_PKT_TYPE only in nud stats get + * to represent the stats of respective data type. + */ + QCA_WLAN_VENDOR_CONNECTIVITY_CHECK_SET_TCP_SYN = 1 << 5, + QCA_WLAN_VENDOR_CONNECTIVITY_CHECK_SET_TCP_SYN_ACK = 1 << 6, + QCA_WLAN_VENDOR_CONNECTIVITY_CHECK_SET_TCP_ACK = 1 << 7, +}; + +enum qca_attr_connectivity_check_stats { + QCA_ATTR_CONNECTIVITY_CHECK_STATS_INVALID = 0, + /* Data packet type for which the stats are collected. + * Represented by enum qca_wlan_vendor_nud_stats_data_pkt_flags + */ + QCA_ATTR_CONNECTIVITY_CHECK_STATS_PKT_TYPE = 1, + /* ID corresponding to the DNS frame for which the respective DNS stats + * are monitored (u32). + */ + QCA_ATTR_CONNECTIVITY_CHECK_STATS_PKT_DNS_DOMAIN_NAME = 2, + /* source / destination port on which the respective proto stats are + * collected (u32). + */ + QCA_ATTR_CONNECTIVITY_CHECK_STATS_PKT_SRC_PORT = 3, + QCA_ATTR_CONNECTIVITY_CHECK_STATS_PKT_DEST_PORT = 4, + /* IPv4/IPv6 address for which the destined data packets are + * monitored. (in network byte order) + */ + QCA_ATTR_CONNECTIVITY_CHECK_STATS_PKT_DEST_IPV4 = 5, + QCA_ATTR_CONNECTIVITY_CHECK_STATS_PKT_DEST_IPV6 = 6, + /* Data packet Request count received from netdev */ + QCA_ATTR_CONNECTIVITY_CHECK_STATS_PKT_REQ_COUNT_FROM_NETDEV = 7, + /* Data packet Request count sent to lower MAC from upper MAC */ + QCA_ATTR_CONNECTIVITY_CHECK_STATS_PKT_REQ_COUNT_TO_LOWER_MAC = 8, + /* Data packet Request count received by lower MAC from upper MAC */ + QCA_ATTR_CONNECTIVITY_CHECK_STATS_PKT_REQ_RX_COUNT_BY_LOWER_MAC = 9, + /* Data packet Request count successfully transmitted by the device */ + QCA_ATTR_CONNECTIVITY_CHECK_STATS_PKT_REQ_COUNT_TX_SUCCESS = 10, + /* Data packet Response count received by lower MAC */ + QCA_ATTR_CONNECTIVITY_CHECK_STATS_PKT_RSP_RX_COUNT_BY_LOWER_MAC = 11, + /* Data packet Response count received by upper MAC */ + QCA_ATTR_CONNECTIVITY_CHECK_STATS_PKT_RSP_RX_COUNT_BY_UPPER_MAC = 12, + /* Data packet Response count delivered to netdev */ + QCA_ATTR_CONNECTIVITY_CHECK_STATS_PKT_RSP_COUNT_TO_NETDEV = 13, + /* Data Packet Response count that are dropped out of order */ + QCA_ATTR_CONNECTIVITY_CHECK_STATS_PKT_RSP_COUNT_OUT_OF_ORDER_DROP = 14, + + /* keep last */ + QCA_ATTR_CONNECTIVITY_CHECK_DATA_STATS_LAST, + QCA_ATTR_CONNECTIVITY_CHECK_DATA_STATS_MAX = + QCA_ATTR_CONNECTIVITY_CHECK_DATA_STATS_LAST - 1, +}; + +/** + * qca_attr_nud_stats_get: Attributes to vendor subcmd + * QCA_NL80211_VENDOR_SUBCMD_NUD_STATS_GET. This carries the requisite + * NUD statistics collected when queried. + */ +enum qca_attr_nud_stats_get { + QCA_ATTR_NUD_STATS_GET_INVALID = 0, + /* ARP Request count from netdev */ + QCA_ATTR_NUD_STATS_ARP_REQ_COUNT_FROM_NETDEV = 1, + /* ARP Request count sent to lower MAC from upper MAC */ + QCA_ATTR_NUD_STATS_ARP_REQ_COUNT_TO_LOWER_MAC = 2, + /* ARP Request count received by lower MAC from upper MAC */ + QCA_ATTR_NUD_STATS_ARP_REQ_RX_COUNT_BY_LOWER_MAC = 3, + /* ARP Request count successfully transmitted by the device */ + QCA_ATTR_NUD_STATS_ARP_REQ_COUNT_TX_SUCCESS = 4, + /* ARP Response count received by lower MAC */ + QCA_ATTR_NUD_STATS_ARP_RSP_RX_COUNT_BY_LOWER_MAC = 5, + /* ARP Response count received by upper MAC */ + QCA_ATTR_NUD_STATS_ARP_RSP_RX_COUNT_BY_UPPER_MAC = 6, + /* ARP Response count delivered to netdev */ + QCA_ATTR_NUD_STATS_ARP_RSP_COUNT_TO_NETDEV = 7, + /* ARP Response count delivered to netdev */ + QCA_ATTR_NUD_STATS_ARP_RSP_COUNT_OUT_OF_ORDER_DROP = 8, + /* + * Flag indicating if the station's link to the AP is active. + * Active Link - If included, Inactive link - If not included + */ + QCA_ATTR_NUD_STATS_AP_LINK_ACTIVE = 9, + /* + * Flag indicating if there is any duplicate address detected (DAD). + * Yes - If detected, No - If not detected. + */ + QCA_ATTR_NUD_STATS_IS_DAD = 10, + /* + * List of Data types for which the stats are requested. + * This list does not carry ARP stats as they are done by the + * above attributes. Represented by enum qca_attr_nud_data_stats. + */ + QCA_ATTR_NUD_STATS_DATA_PKT_STATS = 11, + /* keep last */ + QCA_ATTR_NUD_STATS_GET_LAST, + QCA_ATTR_NUD_STATS_GET_MAX = + QCA_ATTR_NUD_STATS_GET_LAST - 1, +}; + +enum qca_wlan_btm_candidate_status { + QCA_STATUS_ACCEPT = 0, + QCA_STATUS_REJECT_EXCESSIVE_FRAME_LOSS_EXPECTED = 1, + QCA_STATUS_REJECT_EXCESSIVE_DELAY_EXPECTED = 2, + QCA_STATUS_REJECT_INSUFFICIENT_QOS_CAPACITY = 3, + QCA_STATUS_REJECT_LOW_RSSI = 4, + QCA_STATUS_REJECT_HIGH_INTERFERENCE = 5, + QCA_STATUS_REJECT_UNKNOWN = 6, +}; + +enum qca_wlan_vendor_attr_btm_candidate_info { + QCA_WLAN_VENDOR_ATTR_BTM_CANDIDATE_INFO_INVALID = 0, + + /* 6-byte MAC address representing the BSSID of transition candidate */ + QCA_WLAN_VENDOR_ATTR_BTM_CANDIDATE_INFO_BSSID = 1, + /* + * Unsigned 32-bit value from enum qca_wlan_btm_candidate_status + * returned by the driver. It says whether the BSSID provided in + * QCA_WLAN_VENDOR_ATTR_BTM_CANDIDATE_INFO_BSSID is acceptable by + * the driver, if not it specifies the reason for rejection. + * Note that the user-space can overwrite the transition reject reason + * codes provided by driver based on more information. + */ + QCA_WLAN_VENDOR_ATTR_BTM_CANDIDATE_INFO_STATUS = 2, + + /* keep last */ + QCA_WLAN_VENDOR_ATTR_BTM_CANDIDATE_INFO_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_BTM_CANDIDATE_INFO_MAX = + QCA_WLAN_VENDOR_ATTR_BTM_CANDIDATE_INFO_AFTER_LAST - 1, +}; + +enum qca_attr_trace_level { + QCA_ATTR_TRACE_LEVEL_INVALID = 0, + /* + * Nested array of the following attributes: + * QCA_ATTR_TRACE_LEVEL_MODULE, + * QCA_ATTR_TRACE_LEVEL_MASK. + */ + QCA_ATTR_TRACE_LEVEL_PARAM = 1, + /* + * Specific QCA host driver module. Please refer to the QCA host + * driver implementation to get the specific module ID. + */ + QCA_ATTR_TRACE_LEVEL_MODULE = 2, + /* Different trace level masks represented in the QCA host driver. */ + QCA_ATTR_TRACE_LEVEL_MASK = 3, + + /* keep last */ + QCA_ATTR_TRACE_LEVEL_AFTER_LAST, + QCA_ATTR_TRACE_LEVEL_MAX = + QCA_ATTR_TRACE_LEVEL_AFTER_LAST - 1, +}; + +/** + * enum qca_wlan_vendor_attr_get_logger_features - value for logger + * supported features + * @QCA_WLAN_VENDOR_ATTR_LOGGER_INVALID - Invalid + * @QCA_WLAN_VENDOR_ATTR_LOGGER_SUPPORTED - Indicate the supported features + * @QCA_WLAN_VENDOR_ATTR_LOGGER_AFTER_LAST - To keep track of the last enum + * @QCA_WLAN_VENDOR_ATTR_LOGGER_MAX - max value possible for this type + * + * enum values are used for NL attributes for data used by + * QCA_NL80211_VENDOR_SUBCMD_GET_LOGGER_FEATURE_SET sub command. + */ +enum qca_wlan_vendor_attr_get_logger_features { + QCA_WLAN_VENDOR_ATTR_LOGGER_INVALID = 0, + QCA_WLAN_VENDOR_ATTR_LOGGER_SUPPORTED = 1, + + /* keep last */ + QCA_WLAN_VENDOR_ATTR_LOGGER_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_LOGGER_MAX = + QCA_WLAN_VENDOR_ATTR_LOGGER_AFTER_LAST - 1, +}; + +/** + * enum qca_wlan_vendor_attr_link_properties - link properties + * + * @QCA_WLAN_VENDOR_ATTR_LINK_PROPERTIES_INVALID: Invalid initial value + * @QCA_WLAN_VENDOR_ATTR_LINK_PROPERTIES_NSS: Unsigned 8-bit value to + * specify the number of spatial streams negotiated + * @QCA_WLAN_VENDOR_ATTR_LINK_PROPERTIES_RATE_FLAGS: Unsigned 8-bit value + * to specify negotiated rate flags i.e. ht, vht and channel width + * @QCA_WLAN_VENDOR_ATTR_LINK_PROPERTIES_FREQ: Unsigned 32bit value to + * specify the operating frequency + * @QCA_WLAN_VENDOR_ATTR_LINK_PROPERTIES_AFTER_LAST: after last + * @QCA_WLAN_VENDOR_ATTR_LINK_PROPERTIES_MAX: max value + */ +enum qca_wlan_vendor_attr_link_properties { + QCA_WLAN_VENDOR_ATTR_LINK_PROPERTIES_INVALID = 0, + QCA_WLAN_VENDOR_ATTR_LINK_PROPERTIES_NSS = 1, + QCA_WLAN_VENDOR_ATTR_LINK_PROPERTIES_RATE_FLAGS = 2, + QCA_WLAN_VENDOR_ATTR_LINK_PROPERTIES_FREQ = 3, + + /* KEEP LAST */ + QCA_WLAN_VENDOR_ATTR_LINK_PROPERTIES_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_LINK_PROPERTIES_MAX = + QCA_WLAN_VENDOR_ATTR_LINK_PROPERTIES_AFTER_LAST - 1, +}; + +/** + * enum qca_wlan_vendor_attr_nd_offload - vendor NS offload support + * + * @QCA_WLAN_VENDOR_ATTR_ND_OFFLOAD_INVALID - Invalid + * @QCA_WLAN_VENDOR_ATTR_ND_OFFLOAD_FLAG - Flag to set NS offload + * @QCA_WLAN_VENDOR_ATTR_ND_OFFLOAD_AFTER_LAST - To keep track of the last enum + * @QCA_WLAN_VENDOR_ATTR_ND_OFFLOAD_MAX - max value possible for this type + * + * enum values are used for NL attributes for data used by + * QCA_NL80211_VENDOR_SUBCMD_ND_OFFLOAD sub command. + */ +enum qca_wlan_vendor_attr_nd_offload { + QCA_WLAN_VENDOR_ATTR_ND_OFFLOAD_INVALID = 0, + QCA_WLAN_VENDOR_ATTR_ND_OFFLOAD_FLAG, + + /* Keep last */ + QCA_WLAN_VENDOR_ATTR_ND_OFFLOAD_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_ND_OFFLOAD_MAX = + QCA_WLAN_VENDOR_ATTR_ND_OFFLOAD_AFTER_LAST - 1, +}; + +/** + * enum qca_wlan_vendor_features - vendor device/driver features + * @QCA_WLAN_VENDOR_FEATURE_KEY_MGMT_OFFLOAD: Device supports key + * management offload, a mechanism where the station's firmware + * does the exchange with the AP to establish the temporal keys + * after roaming, rather than having the supplicant do it. + * @QCA_WLAN_VENDOR_FEATURE_OFFCHANNEL_SIMULTANEOUS: Device supports + * simultaneous off-channel operations. + * @QQCA_WLAN_VENDOR_FEATURE_P2P_LISTEN_OFFLOAD: Device supports P2P + * Listen offload; a mechanism where the station's firmware + * takes care of responding to incoming Probe Request frames received + * from other P2P devices whilst in Listen state, rather than having the + * user space wpa_supplicant do it. Information from received P2P + * Requests are forwarded from firmware to host whenever the APPS + * processor exits power collapse state. + * @QCA_WLAN_VENDOR_FEATURE_OCE_STA: Device supports all OCE non-AP STA + * specific features + * @QCA_WLAN_VENDOR_FEATURE_OCE_AP: Device supports all OCE AP specific + * features. + * @QCA_WLAN_VENDOR_FEATURE_OCE_STA_CFON: Device supports OCE STA-CFON + * specific features only. If a Device sets this bit but not the + * QCA_WLAN_VENDOR_FEATURE_OCE_AP, the userspace shall assume that + * this Device may not support all OCE AP functionalities but can support + * only OCE STA-CFON functionalities. + * @QCA_WLAN_VENDOR_FEATURE_SELF_MANAGED_REGULATORY: Device supports self + * managed regulatory. + * @QCA_WLAN_VENDOR_FEATURE_TWT: Device supports TWT (Target Wake Time). + */ +enum qca_wlan_vendor_features { + QCA_WLAN_VENDOR_FEATURE_KEY_MGMT_OFFLOAD = 0, + QCA_WLAN_VENDOR_FEATURE_SUPPORT_HW_MODE_ANY = 1, + QCA_WLAN_VENDOR_FEATURE_OFFCHANNEL_SIMULTANEOUS = 2, + QCA_WLAN_VENDOR_FEATURE_P2P_LISTEN_OFFLOAD = 3, + QCA_WLAN_VENDOR_FEATURE_OCE_STA = 4, + QCA_WLAN_VENDOR_FEATURE_OCE_AP = 5, + QCA_WLAN_VENDOR_FEATURE_OCE_STA_CFON = 6, + QCA_WLAN_VENDOR_FEATURE_SELF_MANAGED_REGULATORY = 7, + QCA_WLAN_VENDOR_FEATURE_TWT = 8, + /* Additional features need to be added above this */ + NUM_QCA_WLAN_VENDOR_FEATURES +}; + +/** + * enum qca_wlan_vendor_attr_sap_conditional_chan_switch - Parameters for SAP + * conditional channel switch + * @QCA_WLAN_VENDOR_ATTR_SAP_CONDITIONAL_CHAN_SWITCH_INVALID: Invalid initial + * value + * @QCA_WLAN_VENDOR_ATTR_SAP_CONDITIONAL_CHAN_SWITCH_FREQ_LIST: Priority based + * frequency list (an array of u32 values in host byte order) + * @QCA_WLAN_VENDOR_ATTR_SAP_CONDITIONAL_CHAN_SWITCH_STATUS: Status of the + * conditional switch (u32)- 0: Success, Non-zero: Failure + * @QCA_WLAN_VENDOR_ATTR_SAP_CONDITIONAL_CHAN_SWITCH_AFTER_LAST: After last + * @QCA_WLAN_VENDOR_ATTR_SAP_CONDITIONAL_CHAN_SWITCH_MAX: Subcommand max + */ +enum qca_wlan_vendor_attr_sap_conditional_chan_switch { + QCA_WLAN_VENDOR_ATTR_SAP_CONDITIONAL_CHAN_SWITCH_INVALID = 0, + QCA_WLAN_VENDOR_ATTR_SAP_CONDITIONAL_CHAN_SWITCH_FREQ_LIST = 1, + QCA_WLAN_VENDOR_ATTR_SAP_CONDITIONAL_CHAN_SWITCH_STATUS = 2, + + /* Keep Last */ + QCA_WLAN_VENDOR_ATTR_SAP_CONDITIONAL_CHAN_SWITCH_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_SAP_CONDITIONAL_CHAN_SWITCH_MAX = + QCA_WLAN_VENDOR_ATTR_SAP_CONDITIONAL_CHAN_SWITCH_AFTER_LAST - 1, +}; + +/** + * enum wifi_logger_supported_features - values for supported logger features + * @WIFI_LOGGER_MEMORY_DUMP_SUPPORTED: Memory dump of FW + * @WIFI_LOGGER_PER_PACKET_TX_RX_STATUS_SUPPORTED: Per packet statistics + * @WIFI_LOGGER_CONNECT_EVENT_SUPPORTED: Logging of Connectivity events + * @WIFI_LOGGER_POWER_EVENT_SUPPORTED: Power of driver + * @WIFI_LOGGER_WAKE_LOCK_SUPPORTE: Wakelock of driver + * @WIFI_LOGGER_WATCHDOG_TIMER_SUPPORTED: monitor FW health + * @WIFI_LOGGER_DRIVER_DUMP_SUPPORTED: support driver dump + * @WIFI_LOGGER_PACKET_FATE_SUPPORTED: tracks connection packets fate + */ +enum wifi_logger_supported_features { + WIFI_LOGGER_MEMORY_DUMP_SUPPORTED = (1 << (0)), + WIFI_LOGGER_PER_PACKET_TX_RX_STATUS_SUPPORTED = (1 << (1)), + WIFI_LOGGER_CONNECT_EVENT_SUPPORTED = (1 << (2)), + WIFI_LOGGER_POWER_EVENT_SUPPORTED = (1 << (3)), + WIFI_LOGGER_WAKE_LOCK_SUPPORTED = (1 << (4)), + WIFI_LOGGER_VERBOSE_SUPPORTED = (1 << (5)), + WIFI_LOGGER_WATCHDOG_TIMER_SUPPORTED = (1 << (6)), + WIFI_LOGGER_DRIVER_DUMP_SUPPORTED = (1 << (7)), + WIFI_LOGGER_PACKET_FATE_SUPPORTED = (1 << (8)) +}; +/** + * enum qca_wlan_vendor_attr_acs_offload + * + * @QCA_WLAN_VENDOR_ATTR_ACS_PRIMARY_CHANNEL: ACS selected primary channel + * @QCA_WLAN_VENDOR_ATTR_ACS_SECONDARY_CHANNEL: ACS selected secondary channel + * @QCA_WLAN_VENDOR_ATTR_ACS_HW_MODE: hw_mode for ACS + * @QCA_WLAN_VENDOR_ATTR_ACS_HT_ENABLED: indicate if HT capability is enabled + * @QCA_WLAN_VENDOR_ATTR_ACS_HT40_ENABLED: indicate HT capability + */ +enum qca_wlan_vendor_attr_acs_offload { + QCA_WLAN_VENDOR_ATTR_ACS_CHANNEL_INVALID = 0, + QCA_WLAN_VENDOR_ATTR_ACS_PRIMARY_CHANNEL, + QCA_WLAN_VENDOR_ATTR_ACS_SECONDARY_CHANNEL, + QCA_WLAN_VENDOR_ATTR_ACS_HW_MODE, + QCA_WLAN_VENDOR_ATTR_ACS_HT_ENABLED, + QCA_WLAN_VENDOR_ATTR_ACS_HT40_ENABLED, + QCA_WLAN_VENDOR_ATTR_ACS_VHT_ENABLED, + QCA_WLAN_VENDOR_ATTR_ACS_CHWIDTH, + QCA_WLAN_VENDOR_ATTR_ACS_CH_LIST, + QCA_WLAN_VENDOR_ATTR_ACS_VHT_SEG0_CENTER_CHANNEL, + QCA_WLAN_VENDOR_ATTR_ACS_VHT_SEG1_CENTER_CHANNEL, + QCA_WLAN_VENDOR_ATTR_ACS_FREQ_LIST, + /* keep last */ + QCA_WLAN_VENDOR_ATTR_ACS_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_ACS_MAX = + QCA_WLAN_VENDOR_ATTR_ACS_AFTER_LAST - 1 +}; + +/** + * enum qca_wlan_vendor_acs_hw_mode + * + * @QCA_ACS_MODE_IEEE80211B: 11b mode + * @QCA_ACS_MODE_IEEE80211G: 11g mode + * @QCA_ACS_MODE_IEEE80211A: 11a mode + * @QCA_ACS_MODE_IEEE80211AD: 11ad mode + */ +enum qca_wlan_vendor_acs_hw_mode { + QCA_ACS_MODE_IEEE80211B, + QCA_ACS_MODE_IEEE80211G, + QCA_ACS_MODE_IEEE80211A, + QCA_ACS_MODE_IEEE80211AD, + QCA_ACS_MODE_IEEE80211ANY, +}; + +/** + * enum qca_access_policy - access control policy + * + * Access control policy is applied on the configured IE + * (QCA_WLAN_VENDOR_ATTR_CONFIG_ACCESS_POLICY_IE). + * To be set with QCA_WLAN_VENDOR_ATTR_CONFIG_ACCESS_POLICY. + * + * @QCA_ACCESS_POLICY_ACCEPT_UNLESS_LISTED: Deny Wi-Fi Connections which match + *» with the specific configuration (IE) set, i.e. allow all the + *» connections which do not match the configuration. + * @QCA_ACCESS_POLICY_DENY_UNLESS_LISTED: Accept Wi-Fi Connections which match + *» with the specific configuration (IE) set, i.e. deny all the + *» connections which do not match the configuration. + */ +enum qca_access_policy { + QCA_ACCESS_POLICY_ACCEPT_UNLESS_LISTED, + QCA_ACCESS_POLICY_DENY_UNLESS_LISTED, +}; + +/** + * enum qca_ignore_assoc_disallowed - Ignore assoc disallowed values + * + * The valid values for the ignore assoc disallowed + * + * @QCA_IGNORE_ASSOC_DISALLOWED_DISABLE: Disable ignore assoc disallowed + * @QCA_IGNORE_ASSOC_DISALLOWED_ENABLE: Enable ignore assoc disallowed + * + */ +enum qca_ignore_assoc_disallowed { + QCA_IGNORE_ASSOC_DISALLOWED_DISABLE, + QCA_IGNORE_ASSOC_DISALLOWED_ENABLE +}; + +/* Attributes for data used by + * QCA_NL80211_VENDOR_SUBCMD_SET_WIFI_CONFIGURATION and + * QCA_NL80211_VENDOR_SUBCMD_GET_WIFI_CONFIGURATION subcommands. + */ +enum qca_wlan_vendor_attr_config { + QCA_WLAN_VENDOR_ATTR_CONFIG_INVALID = 0, + /* + * Unsigned 32-bit value to set the DTIM period. + * Whether the wifi chipset wakes at every dtim beacon or a multiple of + * the DTIM period. If DTIM is set to 3, the STA shall wake up every 3 + * DTIM beacons. + */ + QCA_WLAN_VENDOR_ATTR_CONFIG_MODULATED_DTIM = 1, + /* + * Unsigned 32-bit value to set the wifi_iface stats averaging factor + * used to calculate statistics like average the TSF offset or average + * number of frame leaked. + * For instance, upon Beacon frame reception: + * current_avg = ((beacon_TSF - TBTT) * factor + previous_avg * (0x10000 - factor) ) / 0x10000 + * For instance, when evaluating leaky APs: + * current_avg = ((num frame received within guard time) * factor + previous_avg * (0x10000 - factor)) / 0x10000 + */ + QCA_WLAN_VENDOR_ATTR_CONFIG_STATS_AVG_FACTOR = 2, + /* + * Unsigned 32-bit value to configure guard time, i.e., when + * implementing IEEE power management based on frame control PM bit, how + * long the driver waits before shutting down the radio and after + * receiving an ACK frame for a Data frame with PM bit set. + */ + QCA_WLAN_VENDOR_ATTR_CONFIG_GUARD_TIME = 3, + /* Unsigned 32-bit value to change the FTM capability dynamically */ + QCA_WLAN_VENDOR_ATTR_CONFIG_FINE_TIME_MEASUREMENT = 4, + /* Unsigned 16-bit value to configure maximum TX rate dynamically */ + QCA_WLAN_VENDOR_ATTR_CONF_TX_RATE = 5, + /* + * Unsigned 32-bit value to configure the number of continuous + * Beacon Miss which shall be used by the firmware to penalize + * the RSSI. + */ + QCA_WLAN_VENDOR_ATTR_CONFIG_PENALIZE_AFTER_NCONS_BEACON_MISS = 6, + /* + * Unsigned 8-bit value to configure the channel avoidance indication + * behavior. Firmware to send only one indication and ignore duplicate + * indications when set to avoid multiple Apps wakeups. + */ + QCA_WLAN_VENDOR_ATTR_CONFIG_CHANNEL_AVOIDANCE_IND = 7, + /* + * 8-bit unsigned value to configure the maximum TX MPDU for + * aggregation. + */ + QCA_WLAN_VENDOR_ATTR_CONFIG_TX_MPDU_AGGREGATION = 8, + /* + * 8-bit unsigned value to configure the maximum RX MPDU for + * aggregation. + */ + QCA_WLAN_VENDOR_ATTR_CONFIG_RX_MPDU_AGGREGATION = 9, + /* + * 8-bit unsigned value to configure the Non aggregrate/11g sw + * retry threshold (0 disable, 31 max). + */ + QCA_WLAN_VENDOR_ATTR_CONFIG_NON_AGG_RETRY = 10, + /* + * 8-bit unsigned value to configure the aggregrate sw + * retry threshold (0 disable, 31 max). + */ + QCA_WLAN_VENDOR_ATTR_CONFIG_AGG_RETRY = 11, + /* + * 8-bit unsigned value to configure the MGMT frame + * retry threshold (0 disable, 31 max). + */ + QCA_WLAN_VENDOR_ATTR_CONFIG_MGMT_RETRY = 12, + /* + * 8-bit unsigned value to configure the CTRL frame + * retry threshold (0 disable, 31 max). + */ + QCA_WLAN_VENDOR_ATTR_CONFIG_CTRL_RETRY = 13, + /* + * 8-bit unsigned value to configure the propagation delay for + * 2G/5G band (0~63, units in us) + */ + QCA_WLAN_VENDOR_ATTR_CONFIG_PROPAGATION_DELAY = 14, + /* + * Unsigned 32-bit value to configure the number of unicast TX fail + * packet count. The peer is disconnected once this threshold is + * reached. + */ + QCA_WLAN_VENDOR_ATTR_CONFIG_TX_FAIL_COUNT = 15, + /* + * Attribute used to set scan default IEs to the driver. + * + * These IEs can be used by scan operations that will be initiated by + * the driver/firmware. + * + * For further scan requests coming to the driver, these IEs should be + * merged with the IEs received along with scan request coming to the + * driver. If a particular IE is present in the scan default IEs but not + * present in the scan request, then that IE should be added to the IEs + * sent in the Probe Request frames for that scan request. + */ + QCA_WLAN_VENDOR_ATTR_CONFIG_SCAN_DEFAULT_IES = 16, + /* Unsigned 32-bit attribute for generic commands */ + QCA_WLAN_VENDOR_ATTR_CONFIG_GENERIC_COMMAND = 17, + /* Unsigned 32-bit value attribute for generic commands */ + QCA_WLAN_VENDOR_ATTR_CONFIG_GENERIC_VALUE = 18, + /* Unsigned 32-bit data attribute for generic command response */ + QCA_WLAN_VENDOR_ATTR_CONFIG_GENERIC_DATA = 19, + /* + * Unsigned 32-bit length attribute for + * QCA_WLAN_VENDOR_ATTR_CONFIG_GENERIC_DATA + */ + QCA_WLAN_VENDOR_ATTR_CONFIG_GENERIC_LENGTH = 20, + /* + * Unsigned 32-bit flags attribute for + * QCA_WLAN_VENDOR_ATTR_CONFIG_GENERIC_DATA + */ + QCA_WLAN_VENDOR_ATTR_CONFIG_GENERIC_FLAGS = 21, + /* + * Unsigned 32-bit, defining the access policy. + * See enum qca_access_policy. Used with + * QCA_WLAN_VENDOR_ATTR_CONFIG_ACCESS_POLICY_IE_LIST. + */ + QCA_WLAN_VENDOR_ATTR_CONFIG_ACCESS_POLICY = 22, + /* + * Sets the list of full set of IEs for which a specific access policy + * has to be applied. Used along with + * QCA_WLAN_VENDOR_ATTR_CONFIG_ACCESS_POLICY to control the access. + * Zero length payload can be used to clear this access constraint. + */ + QCA_WLAN_VENDOR_ATTR_CONFIG_ACCESS_POLICY_IE_LIST = 23, + /* + * Unsigned 32-bit, specifies the interface index (netdev) for which the + * corresponding configurations are applied. If the interface index is + * not specified, the configurations are attributed to the respective + * wiphy. + */ + QCA_WLAN_VENDOR_ATTR_CONFIG_IFINDEX = 24, + /* 8-bit unsigned value to trigger QPower: 1-Enable, 0-Disable */ + QCA_WLAN_VENDOR_ATTR_CONFIG_QPOWER = 25, + /* + * 8-bit unsigned value to configure the driver and below layers to + * ignore the assoc disallowed set by APs while connecting + * 1-Ignore, 0-Don't ignore + */ + QCA_WLAN_VENDOR_ATTR_CONFIG_IGNORE_ASSOC_DISALLOWED = 26, + /* + * 32-bit unsigned value to trigger antenna diversity features: + * 1-Enable, 0-Disable + */ + QCA_WLAN_VENDOR_ATTR_CONFIG_ANT_DIV_ENA = 27, + /* 32-bit unsigned value to configure specific chain antenna */ + QCA_WLAN_VENDOR_ATTR_CONFIG_ANT_DIV_CHAIN = 28, + /* + * 32-bit unsigned value to trigger cycle selftest + * 1-Enable, 0-Disable + */ + QCA_WLAN_VENDOR_ATTR_CONFIG_ANT_DIV_SELFTEST = 29, + /* + * 32-bit unsigned to configure the cycle time of selftest + * the unit is micro-second + */ + QCA_WLAN_VENDOR_ATTR_CONFIG_ANT_DIV_SELFTEST_INTVL = 30, + /* 32-bit unsigned value to set reorder timeout for AC_VO */ + QCA_WLAN_VENDOR_ATTR_CONFIG_RX_REORDER_TIMEOUT_VOICE = 31, + /* 32-bit unsigned value to set reorder timeout for AC_VI */ + QCA_WLAN_VENDOR_ATTR_CONFIG_RX_REORDER_TIMEOUT_VIDEO = 32, + /* 32-bit unsigned value to set reorder timeout for AC_BE */ + QCA_WLAN_VENDOR_ATTR_CONFIG_RX_REORDER_TIMEOUT_BESTEFFORT = 33, + /* 32-bit unsigned value to set reorder timeout for AC_BK */ + QCA_WLAN_VENDOR_ATTR_CONFIG_RX_REORDER_TIMEOUT_BACKGROUND = 34, + /* 6-byte MAC address to point out the specific peer */ + QCA_WLAN_VENDOR_ATTR_CONFIG_RX_BLOCKSIZE_PEER_MAC = 35, + /* 32-bit unsigned value to set window size for specific peer */ + QCA_WLAN_VENDOR_ATTR_CONFIG_RX_BLOCKSIZE_WINLIMIT = 36, + /* 8-bit unsigned value to set the beacon miss threshold in 2.4 GHz */ + QCA_WLAN_VENDOR_ATTR_CONFIG_BEACON_MISS_THRESHOLD_24 = 37, + /* 8-bit unsigned value to set the beacon miss threshold in 5 GHz */ + QCA_WLAN_VENDOR_ATTR_CONFIG_BEACON_MISS_THRESHOLD_5 = 38, + /* + * 32-bit unsigned value to configure 5 or 10 MHz channel width for + * station device while in disconnect state. The attribute use the + * value of enum nl80211_chan_width: NL80211_CHAN_WIDTH_5 means 5 MHz, + * NL80211_CHAN_WIDTH_10 means 10 MHz. If set, the device work in 5 or + * 10 MHz channel width, the station will not connect to a BSS using 20 + * MHz or higher bandwidth. Set to NL80211_CHAN_WIDTH_20_NOHT to + * clear this constraint. + */ + QCA_WLAN_VENDOR_ATTR_CONFIG_SUB20_CHAN_WIDTH = 39, + /* + * 32-bit unsigned value to configure the propagation absolute delay + * for 2G/5G band (units in us) + */ + QCA_WLAN_VENDOR_ATTR_CONFIG_PROPAGATION_ABS_DELAY = 40, + /* 32-bit unsigned value to set probe period */ + QCA_WLAN_VENDOR_ATTR_CONFIG_ANT_DIV_PROBE_PERIOD = 41, + /* 32-bit unsigned value to set stay period */ + QCA_WLAN_VENDOR_ATTR_CONFIG_ANT_DIV_STAY_PERIOD = 42, + /* 32-bit unsigned value to set snr diff */ + QCA_WLAN_VENDOR_ATTR_CONFIG_ANT_DIV_SNR_DIFF = 43, + /* 32-bit unsigned value to set probe dwell time */ + QCA_WLAN_VENDOR_ATTR_CONFIG_ANT_DIV_PROBE_DWELL_TIME = 44, + /* 32-bit unsigned value to set mgmt snr weight */ + QCA_WLAN_VENDOR_ATTR_CONFIG_ANT_DIV_MGMT_SNR_WEIGHT = 45, + /* 32-bit unsigned value to set data snr weight */ + QCA_WLAN_VENDOR_ATTR_CONFIG_ANT_DIV_DATA_SNR_WEIGHT = 46, + /* 32-bit unsigned value to set ack snr weight */ + QCA_WLAN_VENDOR_ATTR_CONFIG_ANT_DIV_ACK_SNR_WEIGHT = 47, + /* + * 32-bit unsigned value to configure the listen interval. + * This is in units of beacon intervals. This configuration alters + * the negotiated listen interval with the AP during the connection. + * It is highly recommended to configure a value less than or equal to + * the one negotiated during the association. Configuring any greater + * value can have adverse effects (frame loss, AP disassociating STA, + * etc.). + */ + QCA_WLAN_VENDOR_ATTR_CONFIG_LISTEN_INTERVAL = 48, + /* + * 8 bit unsigned value that is set on an AP/GO virtual interface to + * disable operations that would cause the AP/GO to leave its operating + * channel. + * + * This will restrict the scans to the AP/GO operating channel and the + * channels of the other band, if DBS is supported.A STA/CLI interface + * brought up after this setting is enabled, will be restricted to + * connecting to devices only on the AP/GO interface's operating channel + * or on the other band in DBS case. P2P supported channel list is + * modified, to only include AP interface's operating-channel and the + * channels of the other band if DBS is supported. + * + * These restrictions are only applicable as long as the AP/GO interface + * is alive. If the AP/GO interface is brought down then this + * setting/restriction is forgotten. + * + * If this variable is set on an AP/GO interface while a multi-channel + * concurrent session is active, it has no effect on the operation of + * the current interfaces, other than restricting the scan to the AP/GO + * operating channel and the other band channels if DBS is supported. + * However, if the STA is brought down and restarted then the new STA + * connection will either be formed on the AP/GO channel or on the + * other band in a DBS case. This is because of the scan being + * restricted on these channels as mentioned above. + * + * 1-Disable offchannel operations, 0-Enable offchannel operations. + */ + QCA_WLAN_VENDOR_ATTR_CONFIG_RESTRICT_OFFCHANNEL = 49, + + /* + * 8 bit unsigned value to enable/disable LRO (Large Receive Offload) + * on an interface. + * 1 - Enable , 0 - Disable. + */ + QCA_WLAN_VENDOR_ATTR_CONFIG_LRO = 50, + + /* + * 8 bit unsigned value to globally enable/disable scan + * 1 - Enable, 0 - Disable. + */ + QCA_WLAN_VENDOR_ATTR_CONFIG_SCAN_ENABLE = 51, + + /* 8-bit unsigned value to set the total beacon miss count */ + QCA_WLAN_VENDOR_ATTR_CONFIG_TOTAL_BEACON_MISS_COUNT = 52, + + /* + * Unsigned 32-bit value to configure the number of continuous + * Beacon Miss which shall be used by the firmware to penalize + * the RSSI for BTC. + */ + QCA_WLAN_VENDOR_ATTR_CONFIG_PENALIZE_AFTER_NCONS_BEACON_MISS_BTC = 53, + + /* + * 8-bit unsigned value to configure the driver and below layers to + * enable/disable all fils features. + * 0-enable, 1-disable + */ + QCA_WLAN_VENDOR_ATTR_CONFIG_DISABLE_FILS = 54, + + /* 16-bit unsigned value to configure the level of WLAN latency + * module. See enum qca_wlan_vendor_attr_config_latency_level. + */ + QCA_WLAN_VENDOR_ATTR_CONFIG_LATENCY_LEVEL = 55, + + /* + * 8-bit unsigned value indicating the driver to use the RSNE as-is from + * the connect interface. Exclusively used for the scenarios where the + * device is used as a test bed device with special functionality and + * not recommended for production. This helps driver to not validate the + * RSNE passed from user space and thus allow arbitrary IE data to be + * used for testing purposes. + * 1-enable, 0-disable. + * Applications set/reset this configuration. If not reset, this + * parameter remains in use until the driver is unloaded. + */ + QCA_WLAN_VENDOR_ATTR_CONFIG_RSN_IE = 56, + + /* + * 8-bit unsigned value to trigger green Tx power saving. + * 1-Enable, 0-Disable + */ + QCA_WLAN_VENDOR_ATTR_CONFIG_GTX = 57, + + /* keep last */ + QCA_WLAN_VENDOR_ATTR_CONFIG_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_CONFIG_MAX = + QCA_WLAN_VENDOR_ATTR_CONFIG_AFTER_LAST - 1, +}; + +/** + * enum qca_wlan_vendor_attr_wifi_logger_start - Enum for wifi logger starting + * @QCA_WLAN_VENDOR_ATTR_WIFI_LOGGER_START_INVALID: Invalid attribute + * @QCA_WLAN_VENDOR_ATTR_WIFI_LOGGER_RING_ID: Ring ID + * @QCA_WLAN_VENDOR_ATTR_WIFI_LOGGER_VERBOSE_LEVEL: Verbose level + * @QCA_WLAN_VENDOR_ATTR_WIFI_LOGGER_FLAGS: Flag + * @QCA_WLAN_VENDOR_ATTR_WIFI_LOGGER_START_AFTER_LAST: Last value + * @QCA_WLAN_VENDOR_ATTR_WIFI_LOGGER_START_MAX: Max value + */ +enum qca_wlan_vendor_attr_wifi_logger_start { + QCA_WLAN_VENDOR_ATTR_WIFI_LOGGER_START_INVALID = 0, + QCA_WLAN_VENDOR_ATTR_WIFI_LOGGER_RING_ID = 1, + QCA_WLAN_VENDOR_ATTR_WIFI_LOGGER_VERBOSE_LEVEL = 2, + QCA_WLAN_VENDOR_ATTR_WIFI_LOGGER_FLAGS = 3, + /* keep last */ + QCA_WLAN_VENDOR_ATTR_WIFI_LOGGER_START_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_WIFI_LOGGER_START_MAX = + QCA_WLAN_VENDOR_ATTR_WIFI_LOGGER_START_AFTER_LAST - 1, +}; + +/* + * enum qca_wlan_vendor_attr_wifi_logger_get_ring_data - Get ring data + * @QCA_WLAN_VENDOR_ATTR_WIFI_LOGGER_GET_RING_DATA_INVALID: Invalid attribute + * @QCA_WLAN_VENDOR_ATTR_WIFI_LOGGER_GET_RING_DATA_ID: Ring ID + * @QCA_WLAN_VENDOR_ATTR_WIFI_LOGGER_GET_RING_DATA_AFTER_LAST: Last value + * @QCA_WLAN_VENDOR_ATTR_WIFI_LOGGER_GET_RING_DATA_MAX: Max value + */ +enum qca_wlan_vendor_attr_wifi_logger_get_ring_data { + QCA_WLAN_VENDOR_ATTR_WIFI_LOGGER_GET_RING_DATA_INVALID = 0, + QCA_WLAN_VENDOR_ATTR_WIFI_LOGGER_GET_RING_DATA_ID = 1, + /* keep last */ + QCA_WLAN_VENDOR_ATTR_WIFI_LOGGER_GET_RING_DATA_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_WIFI_LOGGER_GET_RING_DATA_MAX = + QCA_WLAN_VENDOR_ATTR_WIFI_LOGGER_GET_RING_DATA_AFTER_LAST - 1, +}; + +#ifdef WLAN_FEATURE_OFFLOAD_PACKETS +/** + * enum wlan_offloaded_packets_control - control commands + * @WLAN_START_OFFLOADED_PACKETS: start offloaded packets + * @WLAN_STOP_OFFLOADED_PACKETS: stop offloaded packets + * + */ +enum wlan_offloaded_packets_control { + WLAN_START_OFFLOADED_PACKETS = 1, + WLAN_STOP_OFFLOADED_PACKETS = 2 +}; + +/** + * enum qca_wlan_vendor_attr_data_offload_ind - Vendor Data Offload Indication + * + * @QCA_WLAN_VENDOR_ATTR_DATA_OFFLOAD_IND_SESSION: Session corresponding to + * the offloaded data. + * @QCA_WLAN_VENDOR_ATTR_DATA_OFFLOAD_IND_PROTOCOL: Protocol of the offloaded + * data. + * @QCA_WLAN_VENDOR_ATTR_DATA_OFFLOAD_IND_EVENT: Event type for the data offload + * indication. + */ +enum qca_wlan_vendor_attr_data_offload_ind { + QCA_WLAN_VENDOR_ATTR_DATA_OFFLOAD_IND_INVALID = 0, + QCA_WLAN_VENDOR_ATTR_DATA_OFFLOAD_IND_SESSION, + QCA_WLAN_VENDOR_ATTR_DATA_OFFLOAD_IND_PROTOCOL, + QCA_WLAN_VENDOR_ATTR_DATA_OFFLOAD_IND_EVENT, + + /* keep last */ + QCA_WLAN_VENDOR_ATTR_DATA_OFFLOAD_IND_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_DATA_OFFLOAD_IND_MAX = + QCA_WLAN_VENDOR_ATTR_DATA_OFFLOAD_IND_AFTER_LAST - 1 +}; + + +/** + * enum qca_wlan_vendor_attr_offloaded_packets - Used by the vendor command + * QCA_NL80211_VENDOR_SUBCMD_OFFLOADED_PACKETS. + */ +enum qca_wlan_vendor_attr_offloaded_packets { + QCA_WLAN_VENDOR_ATTR_OFFLOADED_PACKETS_INVALID = 0, + /* + * Takes valid value from the enum + * qca_wlan_offloaded_packets_sending_control + * Unsigned 32-bit value + **/ + QCA_WLAN_VENDOR_ATTR_OFFLOADED_PACKETS_SENDING_CONTROL, + /* Unsigned 32-bit value */ + QCA_WLAN_VENDOR_ATTR_OFFLOADED_PACKETS_REQUEST_ID, + /* array of u8 len: Max packet size */ + QCA_WLAN_VENDOR_ATTR_OFFLOADED_PACKETS_IP_PACKET_DATA, + /* 6-byte MAC address used to represent source MAC address */ + QCA_WLAN_VENDOR_ATTR_OFFLOADED_PACKETS_SRC_MAC_ADDR, + /* 6-byte MAC address used to represent destination MAC address */ + QCA_WLAN_VENDOR_ATTR_OFFLOADED_PACKETS_DST_MAC_ADDR, + /* Unsigned 32-bit value, in milli seconds */ + QCA_WLAN_VENDOR_ATTR_OFFLOADED_PACKETS_PERIOD, + /* + * This attribute is used and optional for specifying + * ethernet protocol type, if not specified it will default to ipv4 + * Unsigned 16-bit value + **/ + QCA_WLAN_VENDOR_ATTR_OFFLOADED_PACKETS_ETHER_PROTO_TYPE, + + /* keep last */ + QCA_WLAN_VENDOR_ATTR_OFFLOADED_PACKETS_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_OFFLOADED_PACKETS_MAX = + QCA_WLAN_VENDOR_ATTR_OFFLOADED_PACKETS_AFTER_LAST - 1, +}; + +#endif + +/** + * enum qca_wlan_rssi_monitoring_control - rssi control commands + * @QCA_WLAN_RSSI_MONITORING_CONTROL_INVALID: invalid + * @QCA_WLAN_RSSI_MONITORING_START: rssi monitoring start + * @QCA_WLAN_RSSI_MONITORING_STOP: rssi monitoring stop + */ +enum qca_wlan_rssi_monitoring_control { + QCA_WLAN_RSSI_MONITORING_CONTROL_INVALID = 0, + QCA_WLAN_RSSI_MONITORING_START, + QCA_WLAN_RSSI_MONITORING_STOP, +}; + +/** + * enum qca_wlan_vendor_attr_rssi_monitoring - rssi monitoring + * @QCA_WLAN_VENDOR_ATTR_RSSI_MONITORING_INVALID: Invalid + * @QCA_WLAN_VENDOR_ATTR_RSSI_MONITORING_CONTROL: control + * @QCA_WLAN_VENDOR_ATTR_RSSI_MONITORING_MAX_RSSI: max rssi + * @QCA_WLAN_VENDOR_ATTR_RSSI_MONITORING_MIN_RSSI: min rssi + * @QCA_WLAN_VENDOR_ATTR_RSSI_MONITORING_CUR_BSSID: current bssid + * @QCA_WLAN_VENDOR_ATTR_RSSI_MONITORING_CUR_RSSI: current rssi + * @QCA_WLAN_VENDOR_ATTR_RSSI_MONITORING_AFTER_LAST: after last + * @QCA_WLAN_VENDOR_ATTR_RSSI_MONITORING_MAX: max + */ +enum qca_wlan_vendor_attr_rssi_monitoring { + QCA_WLAN_VENDOR_ATTR_RSSI_MONITORING_INVALID = 0, + + QCA_WLAN_VENDOR_ATTR_RSSI_MONITORING_CONTROL, + QCA_WLAN_VENDOR_ATTR_RSSI_MONITORING_REQUEST_ID, + + QCA_WLAN_VENDOR_ATTR_RSSI_MONITORING_MAX_RSSI, + QCA_WLAN_VENDOR_ATTR_RSSI_MONITORING_MIN_RSSI, + + /* attributes to be used/received in callback */ + QCA_WLAN_VENDOR_ATTR_RSSI_MONITORING_CUR_BSSID, + QCA_WLAN_VENDOR_ATTR_RSSI_MONITORING_CUR_RSSI, + + /* keep last */ + QCA_WLAN_VENDOR_ATTR_RSSI_MONITORING_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_RSSI_MONITORING_MAX = + QCA_WLAN_VENDOR_ATTR_RSSI_MONITORING_AFTER_LAST - 1, +}; + +/** + * enum qca_wlan_vendor_attr_ndp_params - Used by the vendor command + * QCA_NL80211_VENDOR_SUBCMD_NDP. + * @QCA_WLAN_VENDOR_ATTR_NDP_PARAM_INVALID + * @QCA_WLAN_VENDOR_ATTR_NDP_SUBCMD: sub commands values in qca_wlan_ndp_sub_cmd + * @QCA_WLAN_VENDOR_ATTR_NDP_TRANSACTION_ID: + * @QCA_WLAN_VENDOR_ATTR_NDP_SERVICE_INSTANCE_ID: indicats a service info + * @QCA_WLAN_VENDOR_ATTR_NDP_CHANNEL: channel frequency in MHz + * @QCA_WLAN_VENDOR_ATTR_NDP_PEER_DISCOVERY_MAC_ADDR: Interface Discovery MAC + * address + * @QCA_WLAN_VENDOR_ATTR_NDP_IFACE_STR: Interface name on which NDP is being + * created + * @QCA_WLAN_VENDOR_ATTR_NDP_CONFIG_SECURITY: CONFIG_SECURITY is deprecated, use + * NCS_SK_TYPE/PMK/SCID instead + * @QCA_WLAN_VENDOR_ATTR_NDP_CONFIG_QOS: value for QoS + * @QCA_WLAN_VENDOR_ATTR_NDP_APP_INFO: app info + * @QCA_WLAN_VENDOR_ATTR_NDP_INSTANCE_ID: NDP instance Id + * @QCA_WLAN_VENDOR_ATTR_NDP_INSTANCE_ID_ARRAY: Array of instance Ids + * @QCA_WLAN_VENDOR_ATTR_NDP_RESPONSE_CODE: initiator/responder NDP response + * code: accept/reject + * @QCA_WLAN_VENDOR_ATTR_NDP_NDI_MAC_ADDR: NDI MAC address + * @QCA_WLAN_VENDOR_ATTR_NDP_DRV_RESPONSE_STATUS_TYPE: errors types returned by + * driver + * @QCA_WLAN_VENDOR_ATTR_NDP_DRV_RETURN_VALUE: value error values returned by + * driver + * @QCA_WLAN_VENDOR_ATTR_NDP_CHANNEL_CONFIG: Channel setup configuration + * @QCA_WLAN_VENDOR_ATTR_NDP_CSID: Cipher Suite Shared Key Type + * @QCA_WLAN_VENDOR_ATTR_NDP_PMK: PMK_INFO + * @QCA_WLAN_VENDOR_ATTR_NDP_SCID: Security Context Identifier that contains the + * PMKID + * @QCA_WLAN_VENDOR_ATTR_NDP_PASSPHRASE: passphrase + * @QCA_WLAN_VENDOR_ATTR_NDP_SERVICE_NAME: service name + * @QCA_WLAN_VENDOR_ATTR_NDP_SCHEDULE_UPDATE_REASON: bitmap indicating schedule + * update: + * BIT_0: NSS Update + * BIT_1: Channel list update + * @QCA_WLAN_VENDOR_ATTR_NDP_NSS: nss + * @QCA_WLAN_VENDOR_ATTR_NDP_NUM_CHANNELS: NUMBER NDP CHANNEL + * @QCA_WLAN_VENDOR_ATTR_NDP_CHANNEL_WIDTH: CHANNEL BANDWIDTH: + * 0:20 MHz, + * 1:40 MHz, + * 2:80 MHz, + * 3:160 MHz + * @QCA_WLAN_VENDOR_ATTR_NDP_CHANNEL_INFO: Array of channel/band width + * @QCA_WLAN_VENDOR_ATTR_NDP_PARAMS_AFTER_LAST: id after last valid attribute + * @QCA_WLAN_VENDOR_ATTR_NDP_PARAMS_MAX: max value of this enum type + * @QCA_WLAN_VENDOR_ATTR_NDP_IPV6_ADDR: IPv6 address used by NDP, 16 bytes array + * @QCA_WLAN_VENDOR_ATTR_NDP_TRANSPORT_PORT: Unsigned 16-bit value indicating + * transport port used by NDP. + * QCA_WLAN_VENDOR_ATTR_NDP_TRANSPORT_PROTOCOL: Unsigned 8-bit value indicating + * protocol used by NDP and assigned by the Internet Assigned Numbers Authority + * as per: www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml +*/ +enum qca_wlan_vendor_attr_ndp_params { + QCA_WLAN_VENDOR_ATTR_NDP_PARAM_INVALID = 0, + QCA_WLAN_VENDOR_ATTR_NDP_SUBCMD = 1, + QCA_WLAN_VENDOR_ATTR_NDP_TRANSACTION_ID = 2, + QCA_WLAN_VENDOR_ATTR_NDP_SERVICE_INSTANCE_ID = 3, + QCA_WLAN_VENDOR_ATTR_NDP_CHANNEL = 4, + QCA_WLAN_VENDOR_ATTR_NDP_PEER_DISCOVERY_MAC_ADDR = 5, + QCA_WLAN_VENDOR_ATTR_NDP_IFACE_STR = 6, + QCA_WLAN_VENDOR_ATTR_NDP_CONFIG_SECURITY = 7, + QCA_WLAN_VENDOR_ATTR_NDP_CONFIG_QOS = 8, + QCA_WLAN_VENDOR_ATTR_NDP_APP_INFO = 9, + QCA_WLAN_VENDOR_ATTR_NDP_INSTANCE_ID = 10, + QCA_WLAN_VENDOR_ATTR_NDP_INSTANCE_ID_ARRAY = 11, + QCA_WLAN_VENDOR_ATTR_NDP_RESPONSE_CODE = 12, + QCA_WLAN_VENDOR_ATTR_NDP_NDI_MAC_ADDR = 13, + QCA_WLAN_VENDOR_ATTR_NDP_DRV_RESPONSE_STATUS_TYPE = 14, + QCA_WLAN_VENDOR_ATTR_NDP_DRV_RETURN_VALUE = 15, + QCA_WLAN_VENDOR_ATTR_NDP_CHANNEL_CONFIG = 16, + QCA_WLAN_VENDOR_ATTR_NDP_CSID = 17, + QCA_WLAN_VENDOR_ATTR_NDP_PMK = 18, + QCA_WLAN_VENDOR_ATTR_NDP_SCID = 19, + QCA_WLAN_VENDOR_ATTR_NDP_PASSPHRASE = 20, + QCA_WLAN_VENDOR_ATTR_NDP_SERVICE_NAME = 21, + QCA_WLAN_VENDOR_ATTR_NDP_SCHEDULE_UPDATE_REASON = 22, + QCA_WLAN_VENDOR_ATTR_NDP_NSS = 23, + QCA_WLAN_VENDOR_ATTR_NDP_NUM_CHANNELS = 24, + QCA_WLAN_VENDOR_ATTR_NDP_CHANNEL_WIDTH = 25, + QCA_WLAN_VENDOR_ATTR_NDP_CHANNEL_INFO = 26, + QCA_WLAN_VENDOR_ATTR_NDP_IPV6_ADDR = 27, + QCA_WLAN_VENDOR_ATTR_NDP_TRANSPORT_PORT = 28, + QCA_WLAN_VENDOR_ATTR_NDP_TRANSPORT_PROTOCOL = 29, + + /* keep last */ + QCA_WLAN_VENDOR_ATTR_NDP_PARAMS_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_NDP_PARAMS_MAX = + QCA_WLAN_VENDOR_ATTR_NDP_PARAMS_AFTER_LAST - 1, +}; + +/** + * enum qca_wlan_ndp_sub_cmd - NDP sub comands types for + * QCA_NL80211_VENDOR_SUBCMD_NDP. + * @QCA_WLAN_VENDOR_ATTR_NDP_INVALID: invalid value + * @QCA_WLAN_VENDOR_ATTR_NDP_INTERFACE_CREATE: create a ndi + * @QCA_WLAN_VENDOR_ATTR_NDP_INTERFACE_DELETE: delete a ndi + * @QCA_WLAN_VENDOR_ATTR_NDP_INITIATOR_REQUEST: initiate a ndp session + * @QCA_WLAN_VENDOR_ATTR_NDP_INITIATOR_RESPONSE: response for above + * @QCA_WLAN_VENDOR_ATTR_NDP_RESPONDER_REQUEST: respond to ndp session + * @QCA_WLAN_VENDOR_ATTR_NDP_RESPONDER_RESPONSE: response for above + * @QCA_WLAN_VENDOR_ATTR_NDP_END_REQUEST: initiate a ndp end + * @QCA_WLAN_VENDOR_ATTR_NDP_END_RESPONSE: response for above + * @QCA_WLAN_VENDOR_ATTR_NDP_REQUEST_IND: notify the peer about the end request + * @QCA_WLAN_VENDOR_ATTR_NDP_CONFIRM_IND: confirm the ndp session is complete + * @QCA_WLAN_VENDOR_ATTR_NDP_END_IND: indicate the peer about the end request + * being received + * @QCA_WLAN_VENDOR_ATTR_NDP_SCHEDULE_UPDATE_IND: indicate the peer of schedule + * update + */ +enum qca_wlan_ndp_sub_cmd { + QCA_WLAN_VENDOR_ATTR_NDP_INVALID = 0, + QCA_WLAN_VENDOR_ATTR_NDP_INTERFACE_CREATE = 1, + QCA_WLAN_VENDOR_ATTR_NDP_INTERFACE_DELETE = 2, + QCA_WLAN_VENDOR_ATTR_NDP_INITIATOR_REQUEST = 3, + QCA_WLAN_VENDOR_ATTR_NDP_INITIATOR_RESPONSE = 4, + QCA_WLAN_VENDOR_ATTR_NDP_RESPONDER_REQUEST = 5, + QCA_WLAN_VENDOR_ATTR_NDP_RESPONDER_RESPONSE = 6, + QCA_WLAN_VENDOR_ATTR_NDP_END_REQUEST = 7, + QCA_WLAN_VENDOR_ATTR_NDP_END_RESPONSE = 8, + QCA_WLAN_VENDOR_ATTR_NDP_REQUEST_IND = 9, + QCA_WLAN_VENDOR_ATTR_NDP_CONFIRM_IND = 10, + QCA_WLAN_VENDOR_ATTR_NDP_END_IND = 11, + QCA_WLAN_VENDOR_ATTR_NDP_SCHEDULE_UPDATE_IND = 12 +}; + +/** + * qca_wlan_vendor_external_acs_event_chan_info_attr: Represents per channel + * information. These attributes are sent as part of + * QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_EVENT_CHAN_INFO. Each set of the following + * attributes correspond to a single channel. + * @QCA_WLAN_VENDOR_EXTERNAL_ACS_EVENT_CHAN_INFO_ATTR_FLAGS: A bitmask (u16) + * with flags specified in qca_wlan_vendor_channel_prop_flags_ext. + * @QCA_WLAN_VENDOR_EXTERNAL_ACS_EVENT_CHAN_INFO_ATTR_FLAG_EXT: A bitmask (u16) + * with flags specified in qca_wlan_vendor_channel_prop_flags_ext. + * @QCA_WLAN_VENDOR_EXTERNAL_ACS_EVENT_CHAN_INFO_ATTR_FREQ: frequency + * @QCA_WLAN_VENDOR_EXTERNAL_ACS_EVENT_CHAN_INFO_ATTR_MAX_REG_POWER: maximum + * regulatory transmission power + * @QCA_WLAN_VENDOR_EXTERNAL_ACS_EVENT_CHAN_INFO_ATTR_MAX_POWER: maximum + * transmission power + * @QCA_WLAN_VENDOR_EXTERNAL_ACS_EVENT_CHAN_INFO_ATTR_MIN_POWER: minimum + * transmission power + * @QCA_WLAN_VENDOR_EXTERNAL_ACS_EVENT_CHAN_INFO_ATTR_REG_CLASS_ID: regulatory + * class id + * @QCA_WLAN_VENDOR_EXTERNAL_ACS_EVENT_CHAN_INFO_ATTR_ANTENNA_GAIN: maximum + * antenna gain in dbm + * @QCA_WLAN_VENDOR_EXTERNAL_ACS_EVENT_CHAN_INFO_ATTR_VHT_SEG_0: vht segment 0 + * @QCA_WLAN_VENDOR_EXTERNAL_ACS_EVENT_CHAN_INFO_ATTR_VHT_SEG_1: vht segment 1 + * + */ +enum qca_wlan_vendor_external_acs_event_chan_info_attr { + QCA_WLAN_VENDOR_EXTERNAL_ACS_EVENT_CHAN_INFO_ATTR_INVALID = 0, + + QCA_WLAN_VENDOR_EXTERNAL_ACS_EVENT_CHAN_INFO_ATTR_FLAGS = 1, + QCA_WLAN_VENDOR_EXTERNAL_ACS_EVENT_CHAN_INFO_ATTR_FLAG_EXT = 2, + QCA_WLAN_VENDOR_EXTERNAL_ACS_EVENT_CHAN_INFO_ATTR_FREQ = 3, + QCA_WLAN_VENDOR_EXTERNAL_ACS_EVENT_CHAN_INFO_ATTR_MAX_REG_POWER = 4, + QCA_WLAN_VENDOR_EXTERNAL_ACS_EVENT_CHAN_INFO_ATTR_MAX_POWER = 5, + QCA_WLAN_VENDOR_EXTERNAL_ACS_EVENT_CHAN_INFO_ATTR_MIN_POWER = 6, + QCA_WLAN_VENDOR_EXTERNAL_ACS_EVENT_CHAN_INFO_ATTR_REG_CLASS_ID = 7, + QCA_WLAN_VENDOR_EXTERNAL_ACS_EVENT_CHAN_INFO_ATTR_ANTENNA_GAIN = 8, + QCA_WLAN_VENDOR_EXTERNAL_ACS_EVENT_CHAN_INFO_ATTR_VHT_SEG_0 = 9, + QCA_WLAN_VENDOR_EXTERNAL_ACS_EVENT_CHAN_INFO_ATTR_VHT_SEG_1 = 10, + /* + * A bitmask (u32) with flags specified in + * enum qca_wlan_vendor_channel_prop_flags_2. + */ + QCA_WLAN_VENDOR_EXTERNAL_ACS_EVENT_CHAN_INFO_ATTR_FLAGS_2 = 11, + + /* keep last */ + QCA_WLAN_VENDOR_EXTERNAL_ACS_EVENT_CHAN_INFO_ATTR_LAST, + QCA_WLAN_VENDOR_EXTERNAL_ACS_EVENT_CHAN_INFO_ATTR_MAX = + QCA_WLAN_VENDOR_EXTERNAL_ACS_EVENT_CHAN_INFO_ATTR_LAST - 1, +}; + +/** + * enum qca_wlan_vendor_attr_start_acs_config: attribute to vendor sub-command + * QCA_NL80211_VENDOR_SUBCMD_START_ACS. This will be triggered by host + * driver. + * @QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_EVENT_REASON: This reason refers to + * qca_wlan_vendor_acs_select_reason. This helps acs module to understand why + * ACS need to be started + * @QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_EVENT_IS_SPECTRAL_SUPPORTED: Does + * driver supports spectral scanning or not + * @QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_EVENT_IS_OFFLOAD_ENABLED: Is 11ac is + * offloaded to firmware. + * @QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_EVENT_ADD_CHAN_STATS_SUPPORT: Does driver + * provides additional channel capability as part of scan operation. + * @QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_EVENT_AP_UP:Flag attribute to indicate + * interface status is UP + * @QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_EVENT_SAP_MODE: Operating mode of + * interface. It takes one of nl80211_iftype values. + * @QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_EVENT_CHAN_WIDTH: This is the upper bound + * of chan width. ACS logic should try to get a channel with specified width + * if not found then look for lower values. + * @QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_EVENT_BAND: nl80211_bands + * @QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_EVENT_PHY_MODE: PHY/HW mode such as + * a/b/g/n/ac. + * @QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_EVENT_FREQ_LIST: Supported frequency list + * among which ACS should choose best frequency. + * @QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_EVENT_PCL:Preferred Chan List by the + * driver which will have format as array of + * nested values. + * @QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_EVENT_CHAN_INFO: Array of nested attribute + * for each channel. It takes attr as defined in + * @QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_EVENT_POLICY:External ACS policy such as + * PCL mandatory, PCL preferred, etc.It uses values defined in enum + * qca_wlan_vendor_attr_external_acs_policy. + * @QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_EVENT_RROPAVAIL_INFO: Reference RF + * Operating Parameter (RROP) availability information (u16). It uses values + * defined in enum qca_wlan_vendor_attr_rropavail_info. + */ +enum qca_wlan_vendor_attr_external_acs_event { + QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_EVENT_INVALID = 0, + QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_EVENT_REASON = 1, + QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_EVENT_IS_SPECTRAL_SUPPORTED = 2, + QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_EVENT_IS_OFFLOAD_ENABLED = 3, + QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_EVENT_ADD_CHAN_STATS_SUPPORT = 4, + QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_EVENT_AP_UP = 5, + QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_EVENT_SAP_MODE = 6, + QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_EVENT_CHAN_WIDTH = 7, + QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_EVENT_BAND = 8, + QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_EVENT_PHY_MODE = 9, + QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_EVENT_FREQ_LIST = 10, + QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_EVENT_PCL = 11, + QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_EVENT_CHAN_INFO = 12, + QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_EVENT_POLICY = 13, + QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_EVENT_RROPAVAIL_INFO = 14, + + /* keep last */ + QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_EVENT_LAST, + QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_EVENT_MAX = + QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_EVENT_LAST - 1, +}; + +enum qca_iface_type { + QCA_IFACE_TYPE_STA, + QCA_IFACE_TYPE_AP, + QCA_IFACE_TYPE_P2P_CLIENT, + QCA_IFACE_TYPE_P2P_GO, + QCA_IFACE_TYPE_IBSS, + QCA_IFACE_TYPE_TDLS, +}; + +/** + * enum qca_wlan_vendor_attr_pcl_config: attribute to vendor sub-command + * QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_EVENT_PCL. + * @QCA_WLAN_VENDOR_ATTR_PCL_CONFIG_INVALID: invalid value + * @QCA_WLAN_VENDOR_ATTR_PCL_CONFIG_CHANNEL: pcl channel number + * @QCA_WLAN_VENDOR_ATTR_PCL_CONFIG_WEIGHT: pcl channel weight + */ +enum qca_wlan_vendor_attr_pcl_config { + QCA_WLAN_VENDOR_ATTR_PCL_INVALID = 0, + QCA_WLAN_VENDOR_ATTR_PCL_CHANNEL = 1, + QCA_WLAN_VENDOR_ATTR_PCL_WEIGHT = 2, +}; + +enum qca_set_band { + QCA_SETBAND_AUTO, + QCA_SETBAND_5G, + QCA_SETBAND_2G, +}; + +/** + * enum set_reset_packet_filter - set packet filter control commands + * @QCA_WLAN_SET_PACKET_FILTER: Set Packet Filter + * @QCA_WLAN_GET_PACKET_FILTER: Get Packet filter + * @QCA_WLAN_WRITE_PACKET_FILTER: Write packet filter program/data + * @QCA_WLAN_READ_PACKET_FILTER: Read packet filter program/data + * @QCA_WLAN_ENABLE_PACKET_FILTER: Enable APF interpreter + * @QCA_WLAN_DISABLE_PACKET_FILTER: Disable APF interpreter + */ +enum set_reset_packet_filter { + QCA_WLAN_SET_PACKET_FILTER = 1, + QCA_WLAN_GET_PACKET_FILTER = 2, + QCA_WLAN_WRITE_PACKET_FILTER = 3, + QCA_WLAN_READ_PACKET_FILTER = 4, + QCA_WLAN_ENABLE_PACKET_FILTER = 5, + QCA_WLAN_DISABLE_PACKET_FILTER = 6, +}; + +/** + * enum qca_wlan_vendor_attr_packet_filter - APF control commands + * @QCA_WLAN_VENDOR_ATTR_PACKET_FILTER_INVALID: Invalid + * @QCA_WLAN_VENDOR_ATTR_SET_RESET_PACKET_FILTER: Filter ID + * @QCA_WLAN_VENDOR_ATTR_PACKET_FILTER_VERSION: Filter Version + * @QCA_WLAN_VENDOR_ATTR_PACKET_FILTER_SIZE: Total Length + * @QCA_WLAN_VENDOR_ATTR_PACKET_FILTER_CURRENT_OFFSET: Current offset + * @QCA_WLAN_VENDOR_ATTR_PACKET_FILTER_PROGRAM: length of APF instructions + * @QCA_WLAN_VENDOR_ATTR_PACKET_FILTER_PROG_LENGTH: length of the program + * section in packet filter buffer + */ +enum qca_wlan_vendor_attr_packet_filter { + QCA_WLAN_VENDOR_ATTR_PACKET_FILTER_INVALID = 0, + QCA_WLAN_VENDOR_ATTR_SET_RESET_PACKET_FILTER, + QCA_WLAN_VENDOR_ATTR_PACKET_FILTER_VERSION, + QCA_WLAN_VENDOR_ATTR_PACKET_FILTER_ID, + QCA_WLAN_VENDOR_ATTR_PACKET_FILTER_SIZE, + QCA_WLAN_VENDOR_ATTR_PACKET_FILTER_CURRENT_OFFSET, + QCA_WLAN_VENDOR_ATTR_PACKET_FILTER_PROGRAM, + QCA_WLAN_VENDOR_ATTR_PACKET_FILTER_PROG_LENGTH, + + /* keep last */ + QCA_WLAN_VENDOR_ATTR_PACKET_FILTER_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_PACKET_FILTER_MAX = + QCA_WLAN_VENDOR_ATTR_PACKET_FILTER_AFTER_LAST - 1, +}; + +/** + * enum qca_wlan_vendor_attr_wake_stats - wake lock stats + * @QCA_WLAN_VENDOR_ATTR_GET_WAKE_STATS_INVALID: invalid + * @QCA_WLAN_VENDOR_ATTR_TOTAL_CMD_EVENT_WAKE: + * @QCA_WLAN_VENDOR_ATTR_CMD_EVENT_WAKE_CNT_PTR: + * @QCA_WLAN_VENDOR_ATTR_CMD_EVENT_WAKE_CNT_SZ: + * @QCA_WLAN_VENDOR_ATTR_TOTAL_DRIVER_FW_LOCAL_WAKE: + * @QCA_WLAN_VENDOR_ATTR_DRIVER_FW_LOCAL_WAKE_CNT_PTR: + * @QCA_WLAN_VENDOR_ATTR_DRIVER_FW_LOCAL_WAKE_CNT_SZ: + * @QCA_WLAN_VENDOR_ATTR_TOTAL_RX_DATA_WAKE: + * total rx wakeup count + * @QCA_WLAN_VENDOR_ATTR_RX_UNICAST_CNT: + * Total rx unicast packet which woke up host + * @QCA_WLAN_VENDOR_ATTR_RX_MULTICAST_CNT: + * Total rx multicast packet which woke up host + * @QCA_WLAN_VENDOR_ATTR_RX_BROADCAST_CNT: + * Total rx broadcast packet which woke up host + * @QCA_WLAN_VENDOR_ATTR_ICMP_PKT: + * wake icmp packet count + * @QCA_WLAN_VENDOR_ATTR_ICMP6_PKT: + * wake icmp6 packet count + * @QCA_WLAN_VENDOR_ATTR_ICMP6_RA: + * wake icmp6 RA packet count + * @QCA_WLAN_VENDOR_ATTR_ICMP6_NA: + * wake icmp6 NA packet count + * @QCA_WLAN_VENDOR_ATTR_ICMP6_NS: + * wake icmp6 NS packet count + * @QCA_WLAN_VENDOR_ATTR_ICMP4_RX_MULTICAST_CNT: + * Rx wake packet count due to ipv4 multicast + * @QCA_WLAN_VENDOR_ATTR_ICMP6_RX_MULTICAST_CNT: + * Rx wake packet count due to ipv6 multicast + * @QCA_WLAN_VENDOR_ATTR_OTHER_RX_MULTICAST_CNT: + * Rx wake packet count due to non-ipv4 and non-ipv6 packets + * @QCA_WLAN_VENDOR_ATTR_RSSI_BREACH_CNT: + * wake rssi breach packet count + * @QCA_WLAN_VENDOR_ATTR_LOW_RSSI_CNT: + * wake low rssi packet count + * @QCA_WLAN_VENDOR_ATTR_GSCAN_CNT: + * wake gscan packet count + * @QCA_WLAN_VENDOR_ATTR_PNO_COMPLETE_CNT: + * wake pno complete packet count + * @QCA_WLAN_VENDOR_ATTR_PNO_MATCH_CNT: + * wake pno match packet count + */ +enum qca_wlan_vendor_attr_wake_stats { + QCA_WLAN_VENDOR_ATTR_GET_WAKE_STATS_INVALID = 0, + QCA_WLAN_VENDOR_ATTR_TOTAL_CMD_EVENT_WAKE, + QCA_WLAN_VENDOR_ATTR_CMD_EVENT_WAKE_CNT_PTR, + QCA_WLAN_VENDOR_ATTR_CMD_EVENT_WAKE_CNT_SZ, + QCA_WLAN_VENDOR_ATTR_TOTAL_DRIVER_FW_LOCAL_WAKE, + QCA_WLAN_VENDOR_ATTR_DRIVER_FW_LOCAL_WAKE_CNT_PTR, + QCA_WLAN_VENDOR_ATTR_DRIVER_FW_LOCAL_WAKE_CNT_SZ, + QCA_WLAN_VENDOR_ATTR_TOTAL_RX_DATA_WAKE, + QCA_WLAN_VENDOR_ATTR_RX_UNICAST_CNT, + QCA_WLAN_VENDOR_ATTR_RX_MULTICAST_CNT, + QCA_WLAN_VENDOR_ATTR_RX_BROADCAST_CNT, + QCA_WLAN_VENDOR_ATTR_ICMP_PKT, + QCA_WLAN_VENDOR_ATTR_ICMP6_PKT, + QCA_WLAN_VENDOR_ATTR_ICMP6_RA, + QCA_WLAN_VENDOR_ATTR_ICMP6_NA, + QCA_WLAN_VENDOR_ATTR_ICMP6_NS, + QCA_WLAN_VENDOR_ATTR_ICMP4_RX_MULTICAST_CNT, + QCA_WLAN_VENDOR_ATTR_ICMP6_RX_MULTICAST_CNT, + QCA_WLAN_VENDOR_ATTR_OTHER_RX_MULTICAST_CNT, + QCA_WLAN_VENDOR_ATTR_RSSI_BREACH_CNT, + QCA_WLAN_VENDOR_ATTR_LOW_RSSI_CNT, + QCA_WLAN_VENDOR_ATTR_GSCAN_CNT, + QCA_WLAN_VENDOR_ATTR_PNO_COMPLETE_CNT, + QCA_WLAN_VENDOR_ATTR_PNO_MATCH_CNT, + /* keep last */ + QCA_WLAN_VENDOR_GET_WAKE_STATS_AFTER_LAST, + QCA_WLAN_VENDOR_GET_WAKE_STATS_MAX = + QCA_WLAN_VENDOR_GET_WAKE_STATS_AFTER_LAST - 1, +}; + +/** + * enum qca_vendor_element_id - QCA Vendor Specific element types + * + * These values are used to identify QCA Vendor Specific elements. The + * payload of the element starts with the three octet OUI (OUI_QCA) and + * is followed by a single octet type which is defined by this enum. + * + * @QCA_VENDOR_ELEM_P2P_PREF_CHAN_LIST: P2P preferred channel list. + * This element can be used to specify preference order for supported + * channels. The channels in this list are in preference order (the first + * one has the highest preference) and are described as a pair of + * (global) Operating Class and Channel Number (each one octet) fields. + * + * This extends the standard P2P functionality by providing option to have + * more than one preferred operating channel. When this element is present, + * it replaces the preference indicated in the Operating Channel attribute. + * For supporting other implementations, the Operating Channel attribute is + * expected to be used with the highest preference channel. Similarly, all + * the channels included in this Preferred channel list element are + * expected to be included in the Channel List attribute. + * + * This vendor element may be included in GO Negotiation Request, P2P + * Invitation Request, and Provision Discovery Request frames. + * + * @QCA_VENDOR_ELEM_HE_CAPAB: HE Capabilities element. + * This element can be used for pre-standard publication testing of HE + * before P802.11ax draft assigns the element ID. The payload of this + * vendor specific element is defined by the latest P802.11ax draft. + * Please note that the draft is still work in progress and this element + * payload is subject to change. + * + * @QCA_VENDOR_ELEM_HE_OPER: HE Operation element. + * This element can be used for pre-standard publication testing of HE + * before P802.11ax draft assigns the element ID. The payload of this + * vendor specific element is defined by the latest P802.11ax draft. + * Please note that the draft is still work in progress and this element + * payload is subject to change. + * + * @QCA_VENDOR_ELEM_RAPS: RAPS element (OFDMA-based Random Access Parameter Set + * element). + * This element can be used for pre-standard publication testing of HE + * before P802.11ax draft assigns the element ID extension. The payload of + * this vendor specific element is defined by the latest P802.11ax draft + * (not including the Element ID Extension field). Please note that the + * draft is still work in progress and this element payload is subject to + * change. + * + * @QCA_VENDOR_ELEM_MU_EDCA_PARAMS: MU EDCA Parameter Set element. + * This element can be used for pre-standard publication testing of HE + * before P802.11ax draft assigns the element ID extension. The payload of + * this vendor specific element is defined by the latest P802.11ax draft + * (not including the Element ID Extension field). Please note that the + * draft is still work in progress and this element payload is subject to + * change. + * + * @QCA_VENDOR_ELEM_BSS_COLOR_CHANGE: BSS Color Change Announcement element. + * This element can be used for pre-standard publication testing of HE + * before P802.11ax draft assigns the element ID extension. The payload of + * this vendor specific element is defined by the latest P802.11ax draft + * (not including the Element ID Extension field). Please note that the + * draft is still work in progress and this element payload is subject to + * change. + */ +enum qca_vendor_element_id { + QCA_VENDOR_ELEM_P2P_PREF_CHAN_LIST = 0, + QCA_VENDOR_ELEM_HE_CAPAB = 1, + QCA_VENDOR_ELEM_HE_OPER = 2, + QCA_VENDOR_ELEM_RAPS = 3, + QCA_VENDOR_ELEM_MU_EDCA_PARAMS = 4, + QCA_VENDOR_ELEM_BSS_COLOR_CHANGE = 5, +}; + +/** + * enum qca_vendor_attr_get_tsf: Vendor attributes for TSF capture + * @QCA_WLAN_VENDOR_ATTR_TSF_INVALID: Invalid attribute value + * @QCA_WLAN_VENDOR_ATTR_TSF_CMD: enum qca_tsf_operation (u32) + * @QCA_WLAN_VENDOR_ATTR_TSF_TIMER_VALUE: Unsigned 64 bit TSF timer value + * @QCA_WLAN_VENDOR_ATTR_TSF_SOC_TIMER_VALUE: Unsigned 64 bit Synchronized + * SOC timer value at TSF capture + * @QCA_WLAN_VENDOR_ATTR_TSF_AFTER_LAST: after last + * @QCA_WLAN_VENDOR_ATTR_TSF_MAX: Max value + */ +enum qca_vendor_attr_tsf_cmd { + QCA_WLAN_VENDOR_ATTR_TSF_INVALID = 0, + QCA_WLAN_VENDOR_ATTR_TSF_CMD, + QCA_WLAN_VENDOR_ATTR_TSF_TIMER_VALUE, + QCA_WLAN_VENDOR_ATTR_TSF_SOC_TIMER_VALUE, + QCA_WLAN_VENDOR_ATTR_TSF_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_TSF_MAX = + QCA_WLAN_VENDOR_ATTR_TSF_AFTER_LAST - 1 +}; + +/** + * enum qca_tsf_operation: TSF driver commands + * @QCA_TSF_CAPTURE: Initiate TSF Capture + * @QCA_TSF_GET: Get TSF capture value + * @QCA_TSF_SYNC_GET: Initiate TSF capture and return with captured value + */ +enum qca_tsf_cmd { + QCA_TSF_CAPTURE, + QCA_TSF_GET, + QCA_TSF_SYNC_GET, +}; + +/** + * enum qca_vendor_attr_get_preferred_freq_list - get preferred channel list + * @QCA_WLAN_VENDOR_ATTR_GET_PREFERRED_FREQ_LIST_INVALID: invalid value + * @QCA_WLAN_VENDOR_ATTR_GET_PREFERRED_FREQ_LIST_IFACE_TYPE: interface type + * @QCA_WLAN_VENDOR_ATTR_GET_PREFERRED_FREQ_LIST: preferred frequency list + * @QCA_WLAN_VENDOR_ATTR_GET_PREFERRED_FREQ_LIST_AFTER_LAST: after last + * @QCA_WLAN_VENDOR_ATTR_GET_PREFERRED_FREQ_LIST_MAX: max + */ +enum qca_vendor_attr_get_preferred_freq_list { + QCA_WLAN_VENDOR_ATTR_GET_PREFERRED_FREQ_LIST_INVALID, + /* A 32-unsigned value; the interface type/mode for which the preferred + * frequency list is requested (see enum qca_iface_type for possible + * values); used in both south- and north-bound. + */ + QCA_WLAN_VENDOR_ATTR_GET_PREFERRED_FREQ_LIST_IFACE_TYPE, + /* An array of 32-unsigned values; values are frequency (MHz); used + * in north-bound only. + */ + QCA_WLAN_VENDOR_ATTR_GET_PREFERRED_FREQ_LIST, + /* keep last */ + QCA_WLAN_VENDOR_ATTR_GET_PREFERRED_FREQ_LIST_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_GET_PREFERRED_FREQ_LIST_MAX = + QCA_WLAN_VENDOR_ATTR_GET_PREFERRED_FREQ_LIST_AFTER_LAST - 1 +}; + +/** + * enum qca_vendor_attr_probable_oper_channel - channel hint + * @QCA_WLAN_VENDOR_ATTR_PROBABLE_OPER_CHANNEL_INVALID: invalid value + * @QCA_WLAN_VENDOR_ATTR_PROBABLE_OPER_CHANNEL_IFACE_TYPE: interface type + * @QCA_WLAN_VENDOR_ATTR_PROBABLE_OPER_CHANNEL_FREQ: frequency hint value + * @QCA_WLAN_VENDOR_ATTR_PROBABLE_OPER_CHANNEL_AFTER_LAST: last + * @QCA_WLAN_VENDOR_ATTR_PROBABLE_OPER_CHANNEL_MAX: max + */ +enum qca_vendor_attr_probable_oper_channel { + QCA_WLAN_VENDOR_ATTR_PROBABLE_OPER_CHANNEL_INVALID, + /* 32-bit unsigned value; indicates the connection/iface type likely to + * come on this channel (see enum qca_iface_type). + */ + QCA_WLAN_VENDOR_ATTR_PROBABLE_OPER_CHANNEL_IFACE_TYPE, + /* 32-bit unsigned value; the frequency (MHz) of the probable channel */ + QCA_WLAN_VENDOR_ATTR_PROBABLE_OPER_CHANNEL_FREQ, + /* keep last */ + QCA_WLAN_VENDOR_ATTR_PROBABLE_OPER_CHANNEL_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_PROBABLE_OPER_CHANNEL_MAX = + QCA_WLAN_VENDOR_ATTR_PROBABLE_OPER_CHANNEL_AFTER_LAST - 1 +}; + +/** + * enum qca_wlan_vendor_attr_gw_param_config - gateway param config + * @QCA_WLAN_VENDOR_ATTR_GW_PARAM_CONFIG_INVALID: Invalid + * @QCA_WLAN_VENDOR_ATTR_GW_PARAM_CONFIG_GW_MAC_ADDR: gateway mac addr + * @QCA_WLAN_VENDOR_ATTR_GW_PARAM_CONFIG_IPV4_ADDR: ipv4 addr + * @QCA_WLAN_VENDOR_ATTR_GW_PARAM_CONFIG_IPV6_ADDR: ipv6 addr + */ +enum qca_wlan_vendor_attr_gw_param_config { + QCA_WLAN_VENDOR_ATTR_GW_PARAM_CONFIG_INVALID = 0, + QCA_WLAN_VENDOR_ATTR_GW_PARAM_CONFIG_GW_MAC_ADDR, + QCA_WLAN_VENDOR_ATTR_GW_PARAM_CONFIG_IPV4_ADDR, + QCA_WLAN_VENDOR_ATTR_GW_PARAM_CONFIG_IPV6_ADDR, + QCA_WLAN_VENDOR_ATTR_GW_PARAM_CONFIG_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_GW_PARAM_CONFIG_MAX = + QCA_WLAN_VENDOR_ATTR_GW_PARAM_CONFIG_AFTER_LAST - 1, +}; + +/** + * enum drv_dbs_capability - DBS capability + * @DRV_DBS_CAPABILITY_DISABLED: DBS disabled + * @DRV_DBS_CAPABILITY_1X1: 1x1 + * @DRV_DBS_CAPABILITY_2X2: 2x2 + */ +enum drv_dbs_capability { + DRV_DBS_CAPABILITY_DISABLED, /* not supported or disabled */ + DRV_DBS_CAPABILITY_1X1, + DRV_DBS_CAPABILITY_2X2, +}; + +/** + * enum qca_vendor_attr_txpower_decr_db - Attributes for TX power decrease + * + * These attributes are used with QCA_NL80211_VENDOR_SUBCMD_SET_TXPOWER_DECR_DB. + */ +enum qca_vendor_attr_txpower_decr_db { + QCA_WLAN_VENDOR_ATTR_TXPOWER_DECR_DB_INVALID, + /* + * 8-bit unsigned value to indicate the reduction of TX power in dB for + * a virtual interface. + */ + QCA_WLAN_VENDOR_ATTR_TXPOWER_DECR_DB, + /* keep last */ + QCA_WLAN_VENDOR_ATTR_TXPOWER_DECR_DB_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_TXPOWER_DECR_DB_MAX = + QCA_WLAN_VENDOR_ATTR_TXPOWER_DECR_DB_AFTER_LAST - 1 +}; + +/** + * enum qca_vendor_attr_ota_test - Enable OTA test + * @QCA_WLAN_VENDOR_ATTR_OTA_TEST_INVALID: invalid value + * @QCA_WLAN_VENDOR_ATTR_OTA_TEST_ENABLE: enable OTA test + * @QCA_WLAN_VENDOR_ATTR_OTA_TEST_AFTER_LAST: after last + * @QCA_WLAN_VENDOR_ATTR_OTA_TEST_MAX: max + */ +enum qca_vendor_attr_ota_test { + QCA_WLAN_VENDOR_ATTR_OTA_TEST_INVALID, + /* 8-bit unsigned value to indicate if OTA test is enabled */ + QCA_WLAN_VENDOR_ATTR_OTA_TEST_ENABLE, + /* keep last */ + QCA_WLAN_VENDOR_ATTR_OTA_TEST_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_OTA_TEST_MAX = + QCA_WLAN_VENDOR_ATTR_OTA_TEST_AFTER_LAST - 1 +}; + +/** enum qca_vendor_attr_txpower_scale - vendor sub commands index + * @QCA_WLAN_VENDOR_ATTR_TXPOWER_SCALE_INVALID: invalid value + * @QCA_WLAN_VENDOR_ATTR_TXPOWER_SCALE: scaling value + * @QCA_WLAN_VENDOR_ATTR_TXPOWER_SCALE_AFTER_LAST: last value + * @QCA_WLAN_VENDOR_ATTR_TXPOWER_SCALE_MAX: max value + */ +enum qca_vendor_attr_txpower_scale { + QCA_WLAN_VENDOR_ATTR_TXPOWER_SCALE_INVALID, + /* 8-bit unsigned value to indicate the scaling of tx power */ + QCA_WLAN_VENDOR_ATTR_TXPOWER_SCALE, + /* keep last */ + QCA_WLAN_VENDOR_ATTR_TXPOWER_SCALE_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_TXPOWER_SCALE_MAX = + QCA_WLAN_VENDOR_ATTR_TXPOWER_SCALE_AFTER_LAST - 1 +}; + +/** + * enum qca_vendor_attr_txpower_scale_decr_db - vendor sub commands index + * @QCA_WLAN_VENDOR_ATTR_TXPOWER_SCALE_DECR_DB_INVALID: invalid value + * @QCA_WLAN_VENDOR_ATTR_TXPOWER_SCALE_DECR_DB: scaling value + * @QCA_WLAN_VENDOR_ATTR_TXPOWER_SCALE_DECR_DB_AFTER_LAST: last value + * @QCA_WLAN_VENDOR_ATTR_TXPOWER_SCALE_DECR_DB_MAX: max value + */ +enum qca_vendor_attr_txpower_scale_decr_db { + QCA_WLAN_VENDOR_ATTR_TXPOWER_SCALE_DECR_DB_INVALID, + /* 8-bit unsigned value to indicate the scaling of tx power */ + QCA_WLAN_VENDOR_ATTR_TXPOWER_SCALE_DECR_DB, + /* keep last */ + QCA_WLAN_VENDOR_ATTR_TXPOWER_SCALE_DECR_DB_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_TXPOWER_SCALE_DECR_DB_MAX = + QCA_WLAN_VENDOR_ATTR_TXPOWER_SCALE_DECR_DB_AFTER_LAST - 1 +}; + +/** + * enum dfs_mode - state of DFS mode + * @DFS_MODE_NONE: DFS mode attribute is none + * @DFS_MODE_ENABLE: DFS mode is enabled + * @DFS_MODE_DISABLE: DFS mode is disabled + * @DFS_MODE_DEPRIORITIZE: Deprioritize DFS channels in scanning + */ +enum dfs_mode { + DFS_MODE_NONE, + DFS_MODE_ENABLE, + DFS_MODE_DISABLE, + DFS_MODE_DEPRIORITIZE +}; + +/** + * enum qca_wlan_vendor_attr_acs_config - Config params for ACS + * @QCA_WLAN_VENDOR_ATTR_ACS_MODE_INVALID: Invalid + * @QCA_WLAN_VENDOR_ATTR_ACS_DFS_MODE: Dfs mode for ACS + * QCA_WLAN_VENDOR_ATTR_ACS_CHANNEL_HINT: channel_hint for ACS + * QCA_WLAN_VENDOR_ATTR_ACS_DFS_AFTER_LAST: after_last + * QCA_WLAN_VENDOR_ATTR_ACS_DFS_MAX: max attribute + */ +enum qca_wlan_vendor_attr_acs_config { + QCA_WLAN_VENDOR_ATTR_ACS_MODE_INVALID = 0, + QCA_WLAN_VENDOR_ATTR_ACS_DFS_MODE, + QCA_WLAN_VENDOR_ATTR_ACS_CHANNEL_HINT, + + QCA_WLAN_VENDOR_ATTR_ACS_DFS_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_ACS_DFS_MAX = + QCA_WLAN_VENDOR_ATTR_ACS_DFS_AFTER_LAST - 1, + +}; + +/** + * enum qca_wlan_vendor_attr_get_hw_capability - Wi-Fi hardware capability + */ +enum qca_wlan_vendor_attr_get_hw_capability { + QCA_WLAN_VENDOR_ATTR_HW_CAPABILITY_INVALID, + /* + * Antenna isolation + * An attribute used in the response. + * The content of this attribute is encoded in a byte array. Each byte + * value is an antenna isolation value. The array length is the number + * of antennas. + */ + QCA_WLAN_VENDOR_ATTR_ANTENNA_ISOLATION, + /* + * Request HW capability + * An attribute used in the request. + * The content of this attribute is a u32 array for one or more of + * hardware capabilities (attribute IDs) that are being requested. Each + * u32 value has a value from this + * enum qca_wlan_vendor_attr_get_hw_capability + * identifying which capabilities are requested. + */ + QCA_WLAN_VENDOR_ATTR_GET_HW_CAPABILITY, + + /* keep last */ + QCA_WLAN_VENDOR_ATTR_HW_CAPABILITY_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_HW_CAPABILITY_MAX = + QCA_WLAN_VENDOR_ATTR_HW_CAPABILITY_AFTER_LAST - 1, +}; + +/** + * enum qca_wlan_vendor_attr_sta_connect_roam_policy_config - + * config params for sta roam policy + * @QCA_WLAN_VENDOR_ATTR_STA_CONNECT_ROAM_POLICY_INVALID: Invalid + * @QCA_WLAN_VENDOR_ATTR_STA_DFS_MODE: If sta should skip Dfs channels + * @QCA_WLAN_VENDOR_ATTR_STA_SKIP_UNSAFE_CHANNEL: + * If sta should skip unsafe channels or not in scanning + * @QCA_WLAN_VENDOR_ATTR_STA_CONNECT_ROAM_POLICY_LAST: + * @QCA_WLAN_VENDOR_ATTR_STA_CONNECT_ROAM_POLICY_MAX: max attribute + */ +enum qca_wlan_vendor_attr_sta_connect_roam_policy_config { + QCA_WLAN_VENDOR_ATTR_STA_CONNECT_ROAM_POLICY_INVALID = 0, + QCA_WLAN_VENDOR_ATTR_STA_DFS_MODE, + QCA_WLAN_VENDOR_ATTR_STA_SKIP_UNSAFE_CHANNEL, + + QCA_WLAN_VENDOR_ATTR_STA_CONNECT_ROAM_POLICY_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_STA_CONNECT_ROAM_POLICY_MAX = + QCA_WLAN_VENDOR_ATTR_STA_CONNECT_ROAM_POLICY_AFTER_LAST - 1, +}; + +/* Attributes for FTM commands and events */ + +/** + * enum qca_wlan_vendor_attr_loc_capa - Indoor location capabilities + * + * @QCA_WLAN_VENDOR_ATTR_LOC_CAPA_FLAGS: Various flags. See + * enum qca_wlan_vendor_attr_loc_capa_flags. + * @QCA_WLAN_VENDOR_ATTR_FTM_CAPA_MAX_NUM_SESSIONS: Maximum number + * of measurement sessions that can run concurrently. + * Default is one session (no session concurrency). + * @QCA_WLAN_VENDOR_ATTR_FTM_CAPA_MAX_NUM_PEERS: The total number of unique + * peers that are supported in running sessions. For example, + * if the value is 8 and maximum number of sessions is 2, you can + * have one session with 8 unique peers, or 2 sessions with 4 unique + * peers each, and so on. + * @QCA_WLAN_VENDOR_ATTR_FTM_CAPA_MAX_NUM_BURSTS_EXP: Maximum number + * of bursts per peer, as an exponent (2^value). Default is 0, + * meaning no multi-burst support. + * @QCA_WLAN_VENDOR_ATTR_FTM_CAPA_MAX_MEAS_PER_BURST: Maximum number + * of measurement exchanges allowed in a single burst. + * @QCA_WLAN_VENDOR_ATTR_AOA_CAPA_SUPPORTED_TYPES: Supported AOA measurement + * types. A bit mask (unsigned 32 bit value), each bit corresponds + * to an AOA type as defined by enum qca_vendor_attr_aoa_type. + */ +enum qca_wlan_vendor_attr_loc_capa { + QCA_WLAN_VENDOR_ATTR_LOC_CAPA_INVALID, + QCA_WLAN_VENDOR_ATTR_LOC_CAPA_FLAGS, + QCA_WLAN_VENDOR_ATTR_FTM_CAPA_MAX_NUM_SESSIONS, + QCA_WLAN_VENDOR_ATTR_FTM_CAPA_MAX_NUM_PEERS, + QCA_WLAN_VENDOR_ATTR_FTM_CAPA_MAX_NUM_BURSTS_EXP, + QCA_WLAN_VENDOR_ATTR_FTM_CAPA_MAX_MEAS_PER_BURST, + QCA_WLAN_VENDOR_ATTR_AOA_CAPA_SUPPORTED_TYPES, + /* keep last */ + QCA_WLAN_VENDOR_ATTR_LOC_CAPA_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_LOC_CAPA_MAX = + QCA_WLAN_VENDOR_ATTR_LOC_CAPA_AFTER_LAST - 1, +}; + +/** + * enum qca_wlan_vendor_attr_loc_capa_flags: Indoor location capability flags + * + * @QCA_WLAN_VENDOR_ATTR_LOC_CAPA_FLAG_FTM_RESPONDER: Set if driver + * can be configured as an FTM responder (for example, an AP that + * services FTM requests). QCA_NL80211_VENDOR_SUBCMD_FTM_CFG_RESPONDER + * will be supported if set. + * @QCA_WLAN_VENDOR_ATTR_LOC_CAPA_FLAG_FTM_INITIATOR: Set if driver + * can run FTM sessions. QCA_NL80211_VENDOR_SUBCMD_FTM_START_SESSION + * will be supported if set. +* @QCA_WLAN_VENDOR_ATTR_LOC_CAPA_FLAG_ASAP: Set if FTM responder + * supports immediate (ASAP) response. + * @QCA_WLAN_VENDOR_ATTR_LOC_CAPA_FLAG_AOA: Set if driver supports standalone + * AOA measurement using QCA_NL80211_VENDOR_SUBCMD_AOA_MEAS. + * @QCA_WLAN_VENDOR_ATTR_LOC_CAPA_FLAG_AOA_IN_FTM: Set if driver supports + * requesting AOA measurements as part of an FTM session. + */ +enum qca_wlan_vendor_attr_loc_capa_flags { + QCA_WLAN_VENDOR_ATTR_LOC_CAPA_FLAG_FTM_RESPONDER = 1 << 0, + QCA_WLAN_VENDOR_ATTR_LOC_CAPA_FLAG_FTM_INITIATOR = 1 << 1, + QCA_WLAN_VENDOR_ATTR_LOC_CAPA_FLAG_ASAP = 1 << 2, + QCA_WLAN_VENDOR_ATTR_LOC_CAPA_FLAG_AOA = 1 << 3, + QCA_WLAN_VENDOR_ATTR_LOC_CAPA_FLAG_AOA_IN_FTM = 1 << 4, +}; + +/** + * enum qca_wlan_vendor_attr_sap_config - config params for sap configuration + * @QCA_WLAN_VENDOR_ATTR_SAP_CONFIG_INVALID: invalid + * @QCA_WLAN_VENDOR_ATTR_SAP_CONFIG_CHANNEL: Channel on which SAP should start + * @QCA_WLAN_VENDOR_ATTR_SAP_MANDATORY_FREQUENCY_LIST: List of frequencies on + * which AP is expected to operate. This is irrespective of ACS configuration. + * This list is a priority based one and is looked for before the AP is created + * to ensure the best concurrency sessions (avoid MCC and use DBS/SCC) co-exist + * in the system. + * @QCA_WLAN_VENDOR_ATTR_SAP_CONFIG_AFTER_LAST: after last + * @QCA_WLAN_VENDOR_ATTR_SAP_CONFIG_MAX: max attribute + */ +enum qca_wlan_vendor_attr_sap_config { + QCA_WLAN_VENDOR_ATTR_SAP_CONFIG_INVALID = 0, + QCA_WLAN_VENDOR_ATTR_SAP_CONFIG_CHANNEL, + QCA_WLAN_VENDOR_ATTR_SAP_MANDATORY_FREQUENCY_LIST = 2, + /* keep last */ + QCA_WLAN_VENDOR_ATTR_SAP_CONFIG_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_SAP_CONFIG_MAX = + QCA_WLAN_VENDOR_ATTR_SAP_CONFIG_AFTER_LAST - 1, +}; + +/** + * enum qca_wlan_vendor_attr_ftm_peer_info: Information about + * a single peer in a measurement session. + * + * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_MAC_ADDR: The MAC address of the peer. + * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_FLAGS: Various flags related + * to measurement. See enum qca_wlan_vendor_attr_ftm_peer_meas_flags. + * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_PARAMS: Nested attribute of + * FTM measurement parameters, as specified by IEEE P802.11-REVmc/D7.0 + * 9.4.2.167. See enum qca_wlan_vendor_attr_ftm_meas_param for + * list of supported attributes. + * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_SECURE_TOKEN_ID: Initial token ID for + * secure measurement. + * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_AOA_BURST_PERIOD: Request AOA + * measurement every bursts. If 0 or not specified, + * AOA measurements will be disabled for this peer. + * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_FREQ: Frequency in MHz where + * the measurement frames are exchanged. Optional; if not + * specified, try to locate the peer in the kernel scan + * results cache and use frequency from there. + */ +enum qca_wlan_vendor_attr_ftm_peer_info { + QCA_WLAN_VENDOR_ATTR_FTM_PEER_INVALID, + QCA_WLAN_VENDOR_ATTR_FTM_PEER_MAC_ADDR, + QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_FLAGS, + QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_PARAMS, + QCA_WLAN_VENDOR_ATTR_FTM_PEER_SECURE_TOKEN_ID, + QCA_WLAN_VENDOR_ATTR_FTM_PEER_AOA_BURST_PERIOD, + QCA_WLAN_VENDOR_ATTR_FTM_PEER_FREQ, + /* keep last */ + QCA_WLAN_VENDOR_ATTR_FTM_PEER_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_FTM_PEER_MAX = + QCA_WLAN_VENDOR_ATTR_FTM_PEER_AFTER_LAST - 1, +}; + +/** + * enum qca_wlan_vendor_attr_ftm_peer_meas_flags: Measurement request flags, + * per-peer + * + * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_FLAG_ASAP: If set, request + * immediate (ASAP) response from peer. + * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_FLAG_LCI: If set, request + * LCI report from peer. The LCI report includes the absolute + * location of the peer in "official" coordinates (similar to GPS). + * See IEEE P802.11-REVmc/D7.0, 11.24.6.7 for more information. + * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_FLAG_LCR: If set, request + * Location civic report from peer. The LCR includes the location + * of the peer in free-form format. See IEEE P802.11-REVmc/D7.0, + * 11.24.6.7 for more information. + * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_FLAG_SECURE: If set, + * request a secure measurement. + * QCA_WLAN_VENDOR_ATTR_FTM_PEER_SECURE_TOKEN_ID must also be provided. + */ +enum qca_wlan_vendor_attr_ftm_peer_meas_flags { + QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_FLAG_ASAP = 1 << 0, + QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_FLAG_LCI = 1 << 1, + QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_FLAG_LCR = 1 << 2, + QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_FLAG_SECURE = 1 << 3, +}; + +/** + * enum qca_wlan_vendor_attr_ftm_meas_param: Measurement parameters + * + * @QCA_WLAN_VENDOR_ATTR_FTM_PARAM_MEAS_PER_BURST: Number of measurements + * to perform in a single burst. + * @QCA_WLAN_VENDOR_ATTR_FTM_PARAM_NUM_BURSTS_EXP: Number of bursts to + * perform, specified as an exponent (2^value). + * @QCA_WLAN_VENDOR_ATTR_FTM_PARAM_BURST_DURATION: Duration of burst + * instance, as specified in IEEE P802.11-REVmc/D7.0, 9.4.2.167. + * @QCA_WLAN_VENDOR_ATTR_FTM_PARAM_BURST_PERIOD: Time between bursts, + * as specified in IEEE P802.11-REVmc/D7.0, 9.4.2.167. Must + * be larger than QCA_WLAN_VENDOR_ATTR_FTM_PARAM_BURST_DURATION. + */ +enum qca_wlan_vendor_attr_ftm_meas_param { + QCA_WLAN_VENDOR_ATTR_FTM_PARAM_INVALID, + QCA_WLAN_VENDOR_ATTR_FTM_PARAM_MEAS_PER_BURST, + QCA_WLAN_VENDOR_ATTR_FTM_PARAM_NUM_BURSTS_EXP, + QCA_WLAN_VENDOR_ATTR_FTM_PARAM_BURST_DURATION, + QCA_WLAN_VENDOR_ATTR_FTM_PARAM_BURST_PERIOD, + /* keep last */ + QCA_WLAN_VENDOR_ATTR_FTM_PARAM_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_FTM_PARAM_MAX = + QCA_WLAN_VENDOR_ATTR_FTM_PARAM_AFTER_LAST - 1, +}; + +/** + * enum qca_wlan_vendor_attr_ftm_peer_result: Per-peer results + * + * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_MAC_ADDR: MAC address of the reported + * peer. + * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_STATUS: Status of measurement + * request for this peer. + * See enum qca_wlan_vendor_attr_ftm_peer_result_status. + * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_FLAGS: Various flags related + * to measurement results for this peer. + * See enum qca_wlan_vendor_attr_ftm_peer_result_flags. + * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_VALUE_SECONDS: Specified when + * request failed and peer requested not to send an additional request + * for this number of seconds. + * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_LCI: LCI report when received + * from peer. In the format specified by IEEE P802.11-REVmc/D7.0, + * 9.4.2.22.10. + * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_LCR: Location civic report when + * received from peer. In the format specified by IEEE P802.11-REVmc/D7.0, + * 9.4.2.22.13. + * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_MEAS_PARAMS: Reported when peer + * overridden some measurement request parameters. See + * enum qca_wlan_vendor_attr_ftm_meas_param. + * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_AOA_MEAS: AOA measurement + * for this peer. Same contents as @QCA_WLAN_VENDOR_ATTR_AOA_MEAS_RESULT. + * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_MEAS: Array of measurement + * results. Each entry is a nested attribute defined + * by enum qca_wlan_vendor_attr_ftm_meas. + */ +enum qca_wlan_vendor_attr_ftm_peer_result { + QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_INVALID, + QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_MAC_ADDR, + QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_STATUS, + QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_FLAGS, + QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_VALUE_SECONDS, + QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_LCI, + QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_LCR, + QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_MEAS_PARAMS, + QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_AOA_MEAS, + QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_MEAS, + /* keep last */ + QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_MAX = + QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_AFTER_LAST - 1, +}; + +/** + * enum qca_wlan_vendor_attr_ftm_peer_result_status + * + * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_STATUS_OK: Request sent ok and results + * will be provided. Peer may have overridden some measurement parameters, + * in which case overridden parameters will be report by + * QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_MEAS_PARAM attribute. + * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_STATUS_INCAPABLE: Peer is incapable + * of performing the measurement request. No more results will be sent + * for this peer in this session. + * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_STATUS_FAILED: Peer reported request + * failed, and requested not to send an additional request for number + * of seconds specified by QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_VALUE_SECONDS + * attribute. + * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_STATUS_INVALID: Request validation + * failed. Request was not sent over the air. + */ +enum qca_wlan_vendor_attr_ftm_peer_result_status { + QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_STATUS_OK, + QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_STATUS_INCAPABLE, + QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_STATUS_FAILED, + QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_STATUS_INVALID, +}; + +/** + * enum qca_wlan_vendor_attr_ftm_peer_result_flags: Various flags + * for measurement result, per-peer + * + * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_FLAG_DONE: If set, + * measurement completed for this peer. No more results will be reported + * for this peer in this session. + */ +enum qca_wlan_vendor_attr_ftm_peer_result_flags { + QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_FLAG_DONE = 1 << 0, +}; + +/** + * enum qca_vendor_attr_loc_session_status: Session completion status code + * + * @QCA_WLAN_VENDOR_ATTR_LOC_SESSION_STATUS_OK: Session completed + * successfully. + * @QCA_WLAN_VENDOR_ATTR_LOC_SESSION_STATUS_ABORTED: Session aborted + * by request. + * @QCA_WLAN_VENDOR_ATTR_LOC_SESSION_STATUS_INVALID: Session request + * was invalid and was not started. + * @QCA_WLAN_VENDOR_ATTR_LOC_SESSION_STATUS_FAILED: Session had an error + * and did not complete normally (for example out of resources). + */ +enum qca_vendor_attr_loc_session_status { + QCA_WLAN_VENDOR_ATTR_LOC_SESSION_STATUS_OK, + QCA_WLAN_VENDOR_ATTR_LOC_SESSION_STATUS_ABORTED, + QCA_WLAN_VENDOR_ATTR_LOC_SESSION_STATUS_INVALID, + QCA_WLAN_VENDOR_ATTR_LOC_SESSION_STATUS_FAILED, +}; + +/** + * enum qca_wlan_vendor_attr_ftm_meas: Single measurement data + * + * @QCA_WLAN_VENDOR_ATTR_FTM_MEAS_T1: Time of departure (TOD) of FTM packet as + * recorded by responder, in picoseconds. + * See IEEE P802.11-REVmc/D7.0, 11.24.6.4 for more information. + * @QCA_WLAN_VENDOR_ATTR_FTM_MEAS_T2: Time of arrival (TOA) of FTM packet at + * initiator, in picoseconds. + * See IEEE P802.11-REVmc/D7.0, 11.24.6.4 for more information. + * @QCA_WLAN_VENDOR_ATTR_FTM_MEAS_T3: TOD of ACK packet as recorded by + * initiator, in picoseconds. + * See IEEE P802.11-REVmc/D7.0, 11.24.6.4 for more information. + * @QCA_WLAN_VENDOR_ATTR_FTM_MEAS_T4: TOA of ACK packet at + * responder, in picoseconds. + * See IEEE P802.11-REVmc/D7.0, 11.24.6.4 for more information. + * @QCA_WLAN_VENDOR_ATTR_FTM_MEAS_RSSI: RSSI (signal level) as recorded + * during this measurement exchange. Optional and will be provided if + * the hardware can measure it. + * @QCA_WLAN_VENDOR_ATTR_FTM_MEAS_TOD_ERR: TOD error reported by + * responder. Not always provided. + * See IEEE P802.11-REVmc/D7.0, 9.6.8.33 for more information. + * @QCA_WLAN_VENDOR_ATTR_FTM_MEAS_TOA_ERR: TOA error reported by + * responder. Not always provided. + * See IEEE P802.11-REVmc/D7.0, 9.6.8.33 for more information. + * @QCA_WLAN_VENDOR_ATTR_FTM_MEAS_INITIATOR_TOD_ERR: TOD error measured by + * initiator. Not always provided. + * See IEEE P802.11-REVmc/D7.0, 9.6.8.33 for more information. + * @QCA_WLAN_VENDOR_ATTR_FTM_MEAS_INITIATOR_TOA_ERR: TOA error measured by + * initiator. Not always provided. + * See IEEE P802.11-REVmc/D7.0, 9.6.8.33 for more information. + * @QCA_WLAN_VENDOR_ATTR_FTM_MEAS_PAD: Dummy attribute for padding. + */ +enum qca_wlan_vendor_attr_ftm_meas { + QCA_WLAN_VENDOR_ATTR_FTM_MEAS_INVALID, + QCA_WLAN_VENDOR_ATTR_FTM_MEAS_T1, + QCA_WLAN_VENDOR_ATTR_FTM_MEAS_T2, + QCA_WLAN_VENDOR_ATTR_FTM_MEAS_T3, + QCA_WLAN_VENDOR_ATTR_FTM_MEAS_T4, + QCA_WLAN_VENDOR_ATTR_FTM_MEAS_RSSI, + QCA_WLAN_VENDOR_ATTR_FTM_MEAS_TOD_ERR, + QCA_WLAN_VENDOR_ATTR_FTM_MEAS_TOA_ERR, + QCA_WLAN_VENDOR_ATTR_FTM_MEAS_INITIATOR_TOD_ERR, + QCA_WLAN_VENDOR_ATTR_FTM_MEAS_INITIATOR_TOA_ERR, + QCA_WLAN_VENDOR_ATTR_FTM_MEAS_PAD, + /* keep last */ + QCA_WLAN_VENDOR_ATTR_FTM_MEAS_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_FTM_MEAS_MAX = + QCA_WLAN_VENDOR_ATTR_FTM_MEAS_AFTER_LAST - 1, +}; + +/** + * enum qca_wlan_vendor_attr_p2p_listen_offload - vendor sub commands index + * @QCA_WLAN_VENDOR_ATTR_P2P_LISTEN_OFFLOAD_INVALID: invalid value + * @QCA_WLAN_VENDOR_ATTR_P2P_LISTEN_OFFLOAD_CHANNEL: + * A 32-bit unsigned value; the P2P listen frequency (MHz); must be one + * of the social channels. + * @QCA_WLAN_VENDOR_ATTR_P2P_LISTEN_OFFLOAD_PERIOD: listen offload period + * A 32-bit unsigned value; the P2P listen offload period (ms). + * @QCA_WLAN_VENDOR_ATTR_P2P_LISTEN_OFFLOAD_INTERVAL: + * A 32-bit unsigned value; the P2P listen interval duration (ms). + * @QCA_WLAN_VENDOR_ATTR_P2P_LISTEN_OFFLOAD_COUNT: + * A 32-bit unsigned value; number of interval times the Firmware needs + * to run the offloaded P2P listen operation before it stops. + * @QCA_WLAN_VENDOR_ATTR_P2P_LISTEN_OFFLOAD_DEVICE_TYPES: device types + * An array of unsigned 8-bit characters; vendor information elements. + * @QCA_WLAN_VENDOR_ATTR_P2P_LISTEN_OFFLOAD_VENDOR_IE: vendor IEs + * @QCA_WLAN_VENDOR_ATTR_P2P_LISTEN_OFFLOAD_CTRL_FLAG: control flag for FW + * A 32-bit unsigned value; a control flag to indicate whether listen + * results need to be flushed to wpa_supplicant. + * @QCA_WLAN_VENDOR_ATTR_P2P_LISTEN_OFFLOAD_STOP_REASON: offload stop reason + * A 8-bit unsigned value; reason code for P2P listen offload stop + * event. + * @QCA_WLAN_VENDOR_ATTR_P2P_LISTEN_OFFLOAD_AFTER_LAST: last value + * @QCA_WLAN_VENDOR_ATTR_P2P_LISTEN_OFFLOAD_MAX: max value + */ +enum qca_wlan_vendor_attr_p2p_listen_offload { + QCA_WLAN_VENDOR_ATTR_P2P_LISTEN_OFFLOAD_INVALID = 0, + QCA_WLAN_VENDOR_ATTR_P2P_LISTEN_OFFLOAD_CHANNEL, + QCA_WLAN_VENDOR_ATTR_P2P_LISTEN_OFFLOAD_PERIOD, + QCA_WLAN_VENDOR_ATTR_P2P_LISTEN_OFFLOAD_INTERVAL, + QCA_WLAN_VENDOR_ATTR_P2P_LISTEN_OFFLOAD_COUNT, + QCA_WLAN_VENDOR_ATTR_P2P_LISTEN_OFFLOAD_DEVICE_TYPES, + QCA_WLAN_VENDOR_ATTR_P2P_LISTEN_OFFLOAD_VENDOR_IE, + QCA_WLAN_VENDOR_ATTR_P2P_LISTEN_OFFLOAD_CTRL_FLAG, + QCA_WLAN_VENDOR_ATTR_P2P_LISTEN_OFFLOAD_STOP_REASON, + /* keep last */ + QCA_WLAN_VENDOR_ATTR_P2P_LISTEN_OFFLOAD_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_P2P_LISTEN_OFFLOAD_MAX = + QCA_WLAN_VENDOR_ATTR_P2P_LISTEN_OFFLOAD_AFTER_LAST - 1 +}; + +/** + * enum qca_wlan_vendor_drv_info - WLAN driver info + * @QCA_WLAN_VENDOR_ATTR_DRV_INFO_INVALID: Invalid + * @QCA_WLAN_VENDOR_ATTR_DRV_INFO_BUS_SIZE: Maximum Message size info + * between Firmware & Host. + */ +enum qca_wlan_vendor_drv_info { + QCA_WLAN_VENDOR_ATTR_DRV_INFO_INVALID = 0, + QCA_WLAN_VENDOR_ATTR_DRV_INFO_BUS_SIZE, + + /* keep last */ + QCA_WLAN_VENDOR_ATTR_DRV_INFO_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_DRV_INFO_MAX = + QCA_WLAN_VENDOR_ATTR_DRV_INFO_AFTER_LAST - 1, +}; + +/** + * enum qca_wlan_vendor_attr_aoa_type - AOA measurement type + * + * @QCA_WLAN_VENDOR_ATTR_AOA_TYPE_TOP_CIR_PHASE: Phase of the strongest + * CIR (channel impulse response) path for each antenna. + * @QCA_WLAN_VENDOR_ATTR_AOA_TYPE_TOP_CIR_PHASE_AMP: Phase and amplitude + * of the strongest CIR path for each antenna. + */ +enum qca_wlan_vendor_attr_aoa_type { + QCA_WLAN_VENDOR_ATTR_AOA_TYPE_TOP_CIR_PHASE, + QCA_WLAN_VENDOR_ATTR_AOA_TYPE_TOP_CIR_PHASE_AMP, + QCA_WLAN_VENDOR_ATTR_AOA_TYPE_MAX +}; + +/** + * enum qca_wlan_vendor_attr_encryption_test - Attributes to + * validate encryption engine + * + * @QCA_WLAN_VENDOR_ATTR_ENCRYPTION_TEST_NEEDS_DECRYPTION: Flag attribute. + * This will be included if the request is for decryption; if not included, + * the request is treated as a request for encryption by default. + * @QCA_WLAN_VENDOR_ATTR_ENCRYPTION_TEST_CIPHER: Unsigned 32-bit value + * indicating the key cipher suite. Takes same values as + * NL80211_ATTR_KEY_CIPHER. + * @QCA_WLAN_VENDOR_ATTR_ENCRYPTION_TEST_KEYID: Unsigned 8-bit value + * Key Id to be used for encryption + * @QCA_WLAN_VENDOR_ATTR_ENCRYPTION_TEST_TK: Array of 8-bit values. + * Key (TK) to be used for encryption/decryption + * @QCA_WLAN_VENDOR_ATTR_ENCRYPTION_TEST_PN: Array of 8-bit values. + * Packet number to be specified for encryption/decryption + * 6 bytes for TKIP/CCMP/GCMP. + * @QCA_WLAN_VENDOR_ATTR_ENCRYPTION_TEST_DATA: Array of 8-bit values + * representing the 802.11 packet (header + payload + FCS) that + * needs to be encrypted/decrypted. + * Encrypted/decrypted response from the driver will also be sent + * to userspace with the same attribute. + */ +enum qca_wlan_vendor_attr_encryption_test { + QCA_WLAN_VENDOR_ATTR_ENCRYPTION_TEST_INVALID = 0, + QCA_WLAN_VENDOR_ATTR_ENCRYPTION_TEST_NEEDS_DECRYPTION, + QCA_WLAN_VENDOR_ATTR_ENCRYPTION_TEST_CIPHER, + QCA_WLAN_VENDOR_ATTR_ENCRYPTION_TEST_KEYID, + QCA_WLAN_VENDOR_ATTR_ENCRYPTION_TEST_TK, + QCA_WLAN_VENDOR_ATTR_ENCRYPTION_TEST_PN, + QCA_WLAN_VENDOR_ATTR_ENCRYPTION_TEST_DATA, + + /* keep last */ + QCA_WLAN_VENDOR_ATTR_ENCRYPTION_TEST_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_ENCRYPTION_TEST_MAX = + QCA_WLAN_VENDOR_ATTR_ENCRYPTION_TEST_AFTER_LAST - 1 +}; + +/** + * enum qca_wlan_vendor_attr_dmg_rf_sector_type - Type of + * sector for DMG RF sector operations. + * + * @QCA_WLAN_VENDOR_ATTR_DMG_RF_SECTOR_TYPE_RX: RX sector + * @QCA_WLAN_VENDOR_ATTR_DMG_RF_SECTOR_TYPE_TX: TX sector + */ +enum qca_wlan_vendor_attr_dmg_rf_sector_type { + QCA_WLAN_VENDOR_ATTR_DMG_RF_SECTOR_TYPE_RX, + QCA_WLAN_VENDOR_ATTR_DMG_RF_SECTOR_TYPE_TX, + QCA_WLAN_VENDOR_ATTR_DMG_RF_SECTOR_TYPE_MAX +}; + +/** + * BRP antenna limit mode + * + * @QCA_WLAN_VENDOR_ATTR_BRP_ANT_LIMIT_MODE_DISABLE: Disable BRP force + * antenna limit, BRP will be performed as usual. + * @QCA_WLAN_VENDOR_ATTR_BRP_ANT_LIMIT_MODE_EFFECTIVE: Define maximal + * antennas limit. the hardware may use less antennas than the + * maximum limit. + * @QCA_WLAN_VENDOR_ATTR_BRP_ANT_LIMIT_MODE_FORCE: The hardware will + * use exactly the specified number of antennas for BRP. + */ +enum qca_wlan_vendor_attr_brp_ant_limit_mode { + QCA_WLAN_VENDOR_ATTR_BRP_ANT_LIMIT_MODE_DISABLE, + QCA_WLAN_VENDOR_ATTR_BRP_ANT_LIMIT_MODE_EFFECTIVE, + QCA_WLAN_VENDOR_ATTR_BRP_ANT_LIMIT_MODE_FORCE, + QCA_WLAN_VENDOR_ATTR_BRP_ANT_LIMIT_MODE_MAX +}; + +/** + * enum qca_wlan_vendor_attr_dmg_rf_sector_cfg - Attributes for + * DMG RF sector configuration for a single RF module. + * The values are defined in a compact way which closely matches + * the way it is stored in HW registers. + * The configuration provides values for 32 antennas and 8 distribution + * amplifiers, and together describes the characteristics of the RF + * sector - such as a beam in some direction with some gain. + * + * @QCA_WLAN_VENDOR_ATTR_DMG_RF_SECTOR_CFG_MODULE_INDEX: Index + * of RF module for this configuration. + * @QCA_WLAN_VENDOR_ATTR_DMG_RF_SECTOR_CFG_ETYPE0: Bit 0 of edge + * amplifier gain index. Unsigned 32 bit number containing + * bits for all 32 antennas. + * @QCA_WLAN_VENDOR_ATTR_DMG_RF_SECTOR_CFG_ETYPE1: Bit 1 of edge + * amplifier gain index. Unsigned 32 bit number containing + * bits for all 32 antennas. + * @QCA_WLAN_VENDOR_ATTR_DMG_RF_SECTOR_CFG_ETYPE2: Bit 2 of edge + * amplifier gain index. Unsigned 32 bit number containing + * bits for all 32 antennas. + * @QCA_WLAN_VENDOR_ATTR_DMG_RF_SECTOR_CFG_PSH_HI: Phase values + * for first 16 antennas, 2 bits per antenna. + * @QCA_WLAN_VENDOR_ATTR_DMG_RF_SECTOR_CFG_PSH_LO: Phase values + * for last 16 antennas, 2 bits per antenna. + * @QCA_WLAN_VENDOR_ATTR_DMG_RF_SECTOR_CFG_DTYPE_X16: Contains + * DTYPE values (3 bits) for each distribution amplifier, followed + * by X16 switch bits for each distribution amplifier. There are + * total of 8 distribution amplifiers. + */ +enum qca_wlan_vendor_attr_dmg_rf_sector_cfg { + QCA_WLAN_VENDOR_ATTR_DMG_RF_SECTOR_CFG_INVALID = 0, + QCA_WLAN_VENDOR_ATTR_DMG_RF_SECTOR_CFG_MODULE_INDEX = 1, + QCA_WLAN_VENDOR_ATTR_DMG_RF_SECTOR_CFG_ETYPE0 = 2, + QCA_WLAN_VENDOR_ATTR_DMG_RF_SECTOR_CFG_ETYPE1 = 3, + QCA_WLAN_VENDOR_ATTR_DMG_RF_SECTOR_CFG_ETYPE2 = 4, + QCA_WLAN_VENDOR_ATTR_DMG_RF_SECTOR_CFG_PSH_HI = 5, + QCA_WLAN_VENDOR_ATTR_DMG_RF_SECTOR_CFG_PSH_LO = 6, + QCA_WLAN_VENDOR_ATTR_DMG_RF_SECTOR_CFG_DTYPE_X16 = 7, + + /* keep last */ + QCA_WLAN_VENDOR_ATTR_DMG_RF_SECTOR_CFG_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_DMG_RF_SECTOR_CFG_MAX = + QCA_WLAN_VENDOR_ATTR_DMG_RF_SECTOR_CFG_AFTER_LAST - 1 +}; + +/** + * enum qca_wlan_vendor_attr_tdls_configuration - Attributes for + * @QCA_NL80211_VENDOR_SUBCMD_CONFIGURE_TDLS configuration to the host driver. + * + * @QCA_WLAN_VENDOR_ATTR_TDLS_CONFIG_TRIGGER_MODE: Configure the TDLS trigger + * mode in the host driver. enum qca_wlan_vendor_tdls_trigger_mode + * represents the different TDLS trigger modes. + * @QCA_WLAN_VENDOR_ATTR_TDLS_CONFIG_TX_STATS_PERIOD: Duration (u32) within + * which QCA_WLAN_VENDOR_ATTR_TDLS_CONFIG_TX_THRESHOLD number + * of packets shall meet the criteria for implicit TDLS setup. + * @QCA_WLAN_VENDOR_ATTR_TDLS_CONFIG_TX_THRESHOLD: Number (u32) of Tx/Rx + * packets within a duration. + * QCA_WLAN_VENDOR_ATTR_TDLS_CONFIG_TX_STATS_PERIOD to initiate + * a TDLS setup. + * @QCA_WLAN_VENDOR_ATTR_TDLS_CONFIG_DISCOVERY_PERIOD: Time (u32) to inititate + * a TDLS Discovery to the Peer. + * @QCA_WLAN_VENDOR_ATTR_TDLS_CONFIG_MAX_DISCOVERY_ATTEMPT: Max number (u32) of + * discovery attempts to know the TDLS capability of the peer. A peer is + * marked as TDLS not capable if there is no response for all the attempts. + * @QCA_WLAN_VENDOR_ATTR_TDLS_CONFIG_IDLE_TIMEOUT: Represents a duration (u32) + * within which QCA_WLAN_VENDOR_ATTR_TDLS_CONFIG_IDLE_PACKET_THRESHOLD + * number of TX / RX frames meet the criteria for TDLS teardown. + * @QCA_WLAN_VENDOR_ATTR_TDLS_CONFIG_IDLE_PACKET_THRESHOLD: Minimum number + * (u32) of Tx/Rx packets within a duration + * CA_WLAN_VENDOR_ATTR_TDLS_CONFIG_IDLE_TIMEOUT to tear down a TDLS link + * @QCA_WLAN_VENDOR_ATTR_TDLS_CONFIG_SETUP_RSSI_THRESHOLD: Threshold + * corresponding to the RSSI of the peer below which a TDLS + * setup is triggered. + * @QCA_WLAN_VENDOR_ATTR_TDLS_CONFIG_TEARDOWN_RSSI_THRESHOLD: Threshold + * corresponding to the RSSI of the peer above which + * a TDLS teardown is triggered. + */ +enum qca_wlan_vendor_attr_tdls_configuration { + QCA_WLAN_VENDOR_ATTR_TDLS_CONFIG_INVALID = 0, + QCA_WLAN_VENDOR_ATTR_TDLS_CONFIG_TRIGGER_MODE = 1, + + /* Attributes configuring the TDLS Implicit Trigger */ + QCA_WLAN_VENDOR_ATTR_TDLS_CONFIG_TX_STATS_PERIOD = 2, + QCA_WLAN_VENDOR_ATTR_TDLS_CONFIG_TX_THRESHOLD = 3, + QCA_WLAN_VENDOR_ATTR_TDLS_CONFIG_DISCOVERY_PERIOD = 4, + QCA_WLAN_VENDOR_ATTR_TDLS_CONFIG_MAX_DISCOVERY_ATTEMPT = 5, + QCA_WLAN_VENDOR_ATTR_TDLS_CONFIG_IDLE_TIMEOUT = 6, + QCA_WLAN_VENDOR_ATTR_TDLS_CONFIG_IDLE_PACKET_THRESHOLD = 7, + QCA_WLAN_VENDOR_ATTR_TDLS_CONFIG_SETUP_RSSI_THRESHOLD = 8, + QCA_WLAN_VENDOR_ATTR_TDLS_CONFIG_TEARDOWN_RSSI_THRESHOLD = 9, + + /* keep last */ + QCA_WLAN_VENDOR_ATTR_TDLS_CONFIG_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_TDLS_CONFIG_MAX = + QCA_WLAN_VENDOR_ATTR_TDLS_CONFIG_AFTER_LAST - 1 +}; + +/** + * enum qca_wlan_vendor_tdls_trigger_mode: Represents the TDLS trigger mode in + * the driver. + * + * The following are the different values for + * QCA_WLAN_VENDOR_ATTR_TDLS_CONFIG_TRIGGER_MODE. + * + * @QCA_WLAN_VENDOR_TDLS_TRIGGER_MODE_EXPLICIT: The trigger to + * initiate/teardown the TDLS connection to a respective peer comes + * from the user space. wpa_supplicant provides the commands + * TDLS_SETUP, TDLS_TEARDOWN, TDLS_DISCOVER to do this. + * @QCA_WLAN_VENDOR_TDLS_TRIGGER_MODE_IMPLICIT: Host driver triggers this TDLS + * setup/teardown to the eligible peer once the configured criteria + * (such as TX/RX threshold, RSSI) is met. The attributes + * in QCA_WLAN_VENDOR_ATTR_TDLS_CONFIG_IMPLICIT_PARAMS correspond to + * the different configuration criteria for the TDLS trigger from the + * host driver. + * @QCA_WLAN_VENDOR_TDLS_TRIGGER_MODE_EXTERNAL: Enables the driver to trigger + * the TDLS setup / teardown through the implicit mode, only to the + * configured MAC addresses(wpa_supplicant, with tdls_external_control = 1, + * configures the MAC address through TDLS_SETUP/TDLS_TEARDOWN commands). + * External mode works on top of the implicit mode, thus the host Driver + * is expected to be configured in TDLS Implicit mode too to operate in + * External mode. Configuring External mode alone without Implicit + * mode is invalid. + * + * All the above implementations work as expected only when the host driver + * advertises the capability WPA_DRIVER_FLAGS_TDLS_EXTERNAL_SETUP - + * representing that the TDLS message exchange is not internal to the host + * driver, but depends on wpa_supplicant to do the message exchange. + */ +enum qca_wlan_vendor_tdls_trigger_mode { + QCA_WLAN_VENDOR_TDLS_TRIGGER_MODE_EXPLICIT = 1 << 0, + QCA_WLAN_VENDOR_TDLS_TRIGGER_MODE_IMPLICIT = 1 << 1, + QCA_WLAN_VENDOR_TDLS_TRIGGER_MODE_EXTERNAL = 1 << 2, +}; + +/** + * enum qca_vendor_attr_sar_limits_selections - Source of SAR power limits + * @QCA_WLAN_VENDOR_ATTR_SAR_LIMITS_SELECT_BDF0: Select SAR profile #0 + * that is hard-coded in the Board Data File (BDF). + * @QCA_WLAN_VENDOR_ATTR_SAR_LIMITS_SELECT_BDF1: Select SAR profile #1 + * that is hard-coded in the Board Data File (BDF). + * @QCA_WLAN_VENDOR_ATTR_SAR_LIMITS_SELECT_BDF2: Select SAR profile #2 + * that is hard-coded in the Board Data File (BDF). + * @QCA_WLAN_VENDOR_ATTR_SAR_LIMITS_SELECT_BDF3: Select SAR profile #3 + * that is hard-coded in the Board Data File (BDF). + * @QCA_WLAN_VENDOR_ATTR_SAR_LIMITS_SELECT_BDF4: Select SAR profile #4 + * that is hard-coded in the Board Data File (BDF). + * @QCA_WLAN_VENDOR_ATTR_SAR_LIMITS_SELECT_NONE: Do not select any + * source of SAR power limits, thereby disabling the SAR power + * limit feature. + * @QCA_WLAN_VENDOR_ATTR_SAR_LIMITS_SELECT_USER: Select the SAR power + * limits configured by %QCA_NL80211_VENDOR_SUBCMD_SET_SAR. + * @QCA_WLAN_VENDOR_ATTR_SAR_LIMITS_SELECT_V2_0: Select the SAR power + * limits version 2.0 configured by %QCA_NL80211_VENDOR_SUBCMD_SET_SAR. + * + * This enumerates the valid set of values that may be supplied for + * attribute %QCA_WLAN_VENDOR_ATTR_SAR_LIMITS_SELECT in an instance of + * the %QCA_NL80211_VENDOR_SUBCMD_SET_SAR_LIMITS vendor command or in + * the response to an instance of the + * %QCA_NL80211_VENDOR_SUBCMD_GET_SAR_LIMITS vendor command. + */ +enum qca_vendor_attr_sar_limits_selections { + QCA_WLAN_VENDOR_ATTR_SAR_LIMITS_SELECT_BDF0 = 0, + QCA_WLAN_VENDOR_ATTR_SAR_LIMITS_SELECT_BDF1 = 1, + QCA_WLAN_VENDOR_ATTR_SAR_LIMITS_SELECT_BDF2 = 2, + QCA_WLAN_VENDOR_ATTR_SAR_LIMITS_SELECT_BDF3 = 3, + QCA_WLAN_VENDOR_ATTR_SAR_LIMITS_SELECT_BDF4 = 4, + QCA_WLAN_VENDOR_ATTR_SAR_LIMITS_SELECT_NONE = 5, + QCA_WLAN_VENDOR_ATTR_SAR_LIMITS_SELECT_USER = 6, + QCA_WLAN_VENDOR_ATTR_SAR_LIMITS_SELECT_V2_0 = 7, +}; + +/** + * enum qca_vendor_attr_sar_limits_spec_modulations - + * SAR limits specification modulation + * @QCA_WLAN_VENDOR_ATTR_SAR_LIMITS_SPEC_MODULATION_CCK - + * CCK modulation + * @QCA_WLAN_VENDOR_ATTR_SAR_LIMITS_SPEC_MODULATION_OFDM - + * OFDM modulation + * + * This enumerates the valid set of values that may be supplied for + * attribute %QCA_WLAN_VENDOR_ATTR_SAR_LIMITS_SPEC_MODULATION in an + * instance of attribute %QCA_WLAN_VENDOR_ATTR_SAR_LIMITS_SPEC in an + * instance of the %QCA_NL80211_VENDOR_SUBCMD_SET_SAR_LIMITS vendor + * command or in the response to an instance of the + * %QCA_NL80211_VENDOR_SUBCMD_GET_SAR_LIMITS vendor command. + */ +enum qca_vendor_attr_sar_limits_spec_modulations { + QCA_WLAN_VENDOR_ATTR_SAR_LIMITS_SPEC_MODULATION_CCK = 0, + QCA_WLAN_VENDOR_ATTR_SAR_LIMITS_SPEC_MODULATION_OFDM = 1, +}; + +/** + * enum qca_vendor_attr_sar_limits - Attributes for SAR power limits + * + * @QCA_WLAN_VENDOR_ATTR_SAR_LIMITS_SAR_ENABLE: Optional (u32) value to + * select which SAR power limit table should be used. Valid + * values are enumerated in enum + * %qca_vendor_attr_sar_limits_selections. The existing SAR + * power limit selection is unchanged if this attribute is not + * present. + * + * @QCA_WLAN_VENDOR_ATTR_SAR_LIMITS_NUM_SPECS: Optional (u32) value + * which specifies the number of SAR power limit specifications + * which will follow. + * + * @QCA_WLAN_VENDOR_ATTR_SAR_LIMITS_SPEC: Nested array of SAR power + * limit specifications. The number of specifications is + * specified by @QCA_WLAN_VENDOR_ATTR_SAR_LIMITS_NUM_SPECS. Each + * specification contains a set of + * QCA_WLAN_VENDOR_ATTR_SAR_LIMITS_SPEC_* attributes. A + * specification is uniquely identified by the attributes + * %QCA_WLAN_VENDOR_ATTR_SAR_LIMITS_SPEC_BAND, + * %QCA_WLAN_VENDOR_ATTR_SAR_LIMITS_SPEC_CHAIN, and + * %QCA_WLAN_VENDOR_ATTR_SAR_LIMITS_SPEC_MODULATION and always + * contains as a payload the attribute + * %QCA_WLAN_VENDOR_ATTR_SAR_LIMITS_SPEC_POWER_LIMIT. + * %QCA_WLAN_VENDOR_ATTR_SAR_LIMITS_SPEC_POWER_LIMIT_INDEX. + * Either %QCA_WLAN_VENDOR_ATTR_SAR_LIMITS_SPEC_POWER_LIMIT or + * %QCA_WLAN_VENDOR_ATTR_SAR_LIMITS_SPEC_POWER_LIMIT_INDEX is + * needed based upon the value of + * %QCA_WLAN_VENDOR_ATTR_SAR_LIMITS_SAR_ENABLE. + * + * @QCA_WLAN_VENDOR_ATTR_SAR_LIMITS_SPEC_BAND: Optional (u32) value to + * indicate for which band this specification applies. Valid + * values are enumerated in enum %nl80211_band (although not all + * bands may be supported by a given device). If the attribute is + * not supplied then the specification will be applied to all + * supported bands. + * + * @QCA_WLAN_VENDOR_ATTR_SAR_LIMITS_SPEC_CHAIN: Optional (u32) value + * to indicate for which antenna chain this specification + * applies, i.e. 1 for chain 1, 2 for chain 2, etc. If the + * attribute is not supplied then the specification will be + * applied to all chains. + * + * @QCA_WLAN_VENDOR_ATTR_SAR_LIMITS_SPEC_MODULATION: Optional (u32) + * value to indicate for which modulation scheme this + * specification applies. Valid values are enumerated in enum + * %qca_vendor_attr_sar_limits_spec_modulations. If the attribute + * is not supplied then the specification will be applied to all + * modulation schemes. + * + * @QCA_WLAN_VENDOR_ATTR_SAR_LIMITS_SPEC_POWER_LIMIT: Required (u32) + * value to specify the actual power limit value in units of 0.5 + * dBm (i.e., a value of 11 represents 5.5 dBm). + * This is required, when %QCA_WLAN_VENDOR_ATTR_SAR_LIMITS_SELECT is + * %QCA_WLAN_VENDOR_ATTR_SAR_LIMITS_SELECT_USER. + * + * @QCA_WLAN_VENDOR_ATTR_SAR_LIMITS_SPEC_POWER_LIMIT_INDEX: Required (u32) + * value to indicate SAR V2 indices (0 - 11) to select SAR V2 profiles. + * This is required, when %QCA_WLAN_VENDOR_ATTR_SAR_LIMITS_SELECT is + * %QCA_WLAN_VENDOR_ATTR_SAR_LIMITS_SELECT_V2_0. + * + * These attributes are used with %QCA_NL80211_VENDOR_SUBCMD_SET_SAR_LIMITS + * and %QCA_NL80211_VENDOR_SUBCMD_GET_SAR_LIMITS. + */ +enum qca_vendor_attr_sar_limits { + QCA_WLAN_VENDOR_ATTR_SAR_LIMITS_INVALID = 0, + QCA_WLAN_VENDOR_ATTR_SAR_LIMITS_SAR_ENABLE = 1, + QCA_WLAN_VENDOR_ATTR_SAR_LIMITS_NUM_SPECS = 2, + QCA_WLAN_VENDOR_ATTR_SAR_LIMITS_SPEC = 3, + QCA_WLAN_VENDOR_ATTR_SAR_LIMITS_SPEC_BAND = 4, + QCA_WLAN_VENDOR_ATTR_SAR_LIMITS_SPEC_CHAIN = 5, + QCA_WLAN_VENDOR_ATTR_SAR_LIMITS_SPEC_MODULATION = 6, + QCA_WLAN_VENDOR_ATTR_SAR_LIMITS_SPEC_POWER_LIMIT = 7, + QCA_WLAN_VENDOR_ATTR_SAR_LIMITS_SPEC_POWER_LIMIT_INDEX = 8, + + QCA_WLAN_VENDOR_ATTR_SAR_LIMITS_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_SAR_LIMITS_MAX = + QCA_WLAN_VENDOR_ATTR_SAR_LIMITS_AFTER_LAST - 1 +}; + +/** + * enum qca_wlan_vendor_attr_ll_stats_ext - Attributes for MAC layer monitoring + * offload which is an extension for LL_STATS. + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_CFG_PERIOD: Monitoring period. Unit in ms. + * If MAC counters do not exceed the threshold, FW will report monitored + * link layer counters periodically as this setting. The first report is + * always triggered by this timer. + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_CFG_THRESHOLD: It is a percentage (1-99). + * For each MAC layer counter, FW holds two copies. One is the current value. + * The other is the last report. Once a current counter's increment is larger + * than the threshold, FW will indicate that counter to host even if the + * monitoring timer does not expire. + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_PEER_PS_CHG: Peer STA power state change + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_TID: TID of MSDU + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_NUM_MSDU: Count of MSDU with the same + * failure code. + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_TX_STATUS: TX failure code + * 1: TX packet discarded + * 2: No ACK + * 3: Postpone + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_PEER_MAC_ADDRESS: peer MAC address + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_PEER_PS_STATE: Peer STA current state + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_GLOBAL: Global threshold. + * Threshold for all monitored parameters. If per counter dedicated threshold + * is not enabled, this threshold will take effect. + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_EVENT_MODE: Indicate what triggers this + * event, PERORID_TIMEOUT == 1, THRESH_EXCEED == 0. + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_IFACE_ID: interface ID + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_PEER_ID: peer ID + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_TX_BITMAP: bitmap for TX counters + * Bit0: TX counter unit in MSDU + * Bit1: TX counter unit in MPDU + * Bit2: TX counter unit in PPDU + * Bit3: TX counter unit in byte + * Bit4: Dropped MSDUs + * Bit5: Dropped Bytes + * Bit6: MPDU retry counter + * Bit7: MPDU failure counter + * Bit8: PPDU failure counter + * Bit9: MPDU aggregation counter + * Bit10: MCS counter for ACKed MPDUs + * Bit11: MCS counter for Failed MPDUs + * Bit12: TX Delay counter + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_RX_BITMAP: bitmap for RX counters + * Bit0: MAC RX counter unit in MPDU + * Bit1: MAC RX counter unit in byte + * Bit2: PHY RX counter unit in PPDU + * Bit3: PHY RX counter unit in byte + * Bit4: Disorder counter + * Bit5: Retry counter + * Bit6: Duplication counter + * Bit7: Discard counter + * Bit8: MPDU aggregation size counter + * Bit9: MCS counter + * Bit10: Peer STA power state change (wake to sleep) counter + * Bit11: Peer STA power save counter, total time in PS mode + * Bit12: Probe request counter + * Bit13: Other management frames counter + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_CCA_BSS_BITMAP: bitmap for CCA + * Bit0: Idle time + * Bit1: TX time + * Bit2: time RX in current bss + * Bit3: Out of current bss time + * Bit4: Wireless medium busy time + * Bit5: RX in bad condition time + * Bit6: TX in bad condition time + * Bit7: time wlan card not available + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_SIGNAL_BITMAP: bitmap for signal + * Bit0: Per channel SNR counter + * Bit1: Per channel noise floor counter + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_PEER_NUM: number of peers + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_CHANNEL_NUM: number of channels + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_PEER_AC_RX_NUM: number of RX stats + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_CCA_BSS: per channel BSS CCA stats + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_PEER: container for per PEER stats + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_TX_MSDU: Number of total TX MSDUs + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_TX_MPDU: Number of total TX MPDUs + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_TX_PPDU: Number of total TX PPDUs + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_TX_BYTES: bytes of TX data + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_TX_DROP: Number of dropped TX packets + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_TX_DROP_BYTES: Bytes dropped + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_TX_RETRY: waiting time without an ACK + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_TX_NO_ACK: number of MPDU not-ACKed + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_TX_NO_BACK: number of PPDU not-ACKed + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_TX_AGGR_NUM: + * aggregation stats buffer length + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_TX_SUCC_MCS_NUM: length of mcs stats + * buffer for ACKed MPDUs. + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_TX_FAIL_MCS_NUM: length of mcs stats + * buffer for failed MPDUs. + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_DELAY_ARRAY_SIZE: + * length of delay stats array. + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_TX_AGGR: TX aggregation stats + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_TX_SUCC_MCS: MCS stats for ACKed MPDUs + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_TX_FAIL_MCS: MCS stats for failed MPDUs + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_TX_DELAY: tx delay stats + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_RX_MPDU: MPDUs received + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_RX_MPDU_BYTES: bytes received + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_RX_PPDU: PPDU received + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_RX_PPDU_BYTES: PPDU bytes received + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_RX_MPDU_LOST: packets lost + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_RX_MPDU_RETRY: number of RX packets + * flagged as retransmissions + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_RX_MPDU_DUP: number of RX packets + * flagged as duplicated + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_RX_MPDU_DISCARD: number of RX + * packets discarded + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_RX_AGGR_NUM: length of RX aggregation + * stats buffer. + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_RX_MCS_NUM: length of RX mcs + * stats buffer. + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_RX_MCS: RX mcs stats buffer + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_RX_AGGR: aggregation stats buffer + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_PEER_PS_TIMES: times STAs go to sleep + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_PEER_PS_DURATION: STAs' total sleep time + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_RX_PROBE_REQ: number of probe + * requests received + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_RX_MGMT: number of other mgmt + * frames received + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_IDLE_TIME: Percentage of idle time + * there is no TX, nor RX, nor interference. + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_TX_TIME: percentage of time + * transmitting packets. + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_RX_TIME: percentage of time + * for receiving. + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_RX_BUSY: percentage of time + * interference detected. + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_RX_BAD: percentage of time + * receiving packets with errors. + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_TX_BAD: percentage of time + * TX no-ACK. + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_NO_AVAIL: percentage of time + * the chip is unable to work in normal conditions. + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_IN_BSS_TIME: percentage of time + * receiving packets in current BSS. + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_OUT_BSS_TIME: percentage of time + * receiving packets not in current BSS. + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_PEER_ANT_NUM: number of antennas + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_PEER_SIGNAL: + * This is a container for per antenna signal stats. + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_ANT_SNR: per antenna SNR value + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_ANT_NF: per antenna NF value + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_IFACE_RSSI_BEACON: RSSI of beacon + * @QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_IFACE_SNR_BEACON: SNR of beacon + */ +enum qca_wlan_vendor_attr_ll_stats_ext { + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_INVALID = 0, + + /* Attributes for configurations */ + QCA_WLAN_VENDOR_ATTR_LL_STATS_CFG_PERIOD, + QCA_WLAN_VENDOR_ATTR_LL_STATS_CFG_THRESHOLD, + + /* Peer STA power state change */ + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_PEER_PS_CHG, + + /* TX failure event */ + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_TID, + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_NUM_MSDU, + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_TX_STATUS, + + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_PEER_PS_STATE, + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_PEER_MAC_ADDRESS, + + /* MAC counters */ + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_GLOBAL, + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_EVENT_MODE, + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_IFACE_ID, + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_PEER_ID, + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_TX_BITMAP, + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_RX_BITMAP, + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_CCA_BSS_BITMAP, + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_SIGNAL_BITMAP, + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_PEER_NUM, + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_CHANNEL_NUM, + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_CCA_BSS, + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_PEER, + + /* Sub-attributes for PEER_AC_TX */ + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_TX_MSDU, + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_TX_MPDU, + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_TX_PPDU, + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_TX_BYTES, + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_TX_DROP, + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_TX_DROP_BYTES, + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_TX_RETRY, + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_TX_NO_ACK, + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_TX_NO_BACK, + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_TX_AGGR_NUM, + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_TX_SUCC_MCS_NUM, + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_TX_FAIL_MCS_NUM, + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_TX_AGGR, + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_TX_SUCC_MCS, + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_TX_FAIL_MCS, + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_DELAY_ARRAY_SIZE, + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_TX_DELAY, + + /* Sub-attributes for PEER_AC_RX */ + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_RX_MPDU, + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_RX_MPDU_BYTES, + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_RX_PPDU, + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_RX_PPDU_BYTES, + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_RX_MPDU_LOST, + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_RX_MPDU_RETRY, + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_RX_MPDU_DUP, + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_RX_MPDU_DISCARD, + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_RX_AGGR_NUM, + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_RX_MCS_NUM, + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_RX_MCS, + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_RX_AGGR, + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_PEER_PS_TIMES, + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_PEER_PS_DURATION, + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_RX_PROBE_REQ, + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_RX_MGMT, + + /* Sub-attributes for CCA_BSS */ + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_IDLE_TIME, + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_TX_TIME, + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_RX_TIME, + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_RX_BUSY, + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_RX_BAD, + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_TX_BAD, + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_NO_AVAIL, + + /* sub-attribute for BSS_RX_TIME */ + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_IN_BSS_TIME, + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_OUT_BSS_TIME, + + /* Sub-attributes for PEER_SIGNAL */ + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_PEER_ANT_NUM, + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_PEER_SIGNAL, + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_ANT_SNR, + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_ANT_NF, + + /* Sub-attributes for IFACE_BSS */ + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_IFACE_RSSI_BEACON, + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_IFACE_SNR_BEACON, + + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_LAST, + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_MAX = + QCA_WLAN_VENDOR_ATTR_LL_STATS_EXT_LAST - 1 +}; + +/** + * qca_wlan_vendor_attr_external_acs_channels: attribute to vendor subcmd + * QCA_NL80211_VENDOR_SUBCMD_EXTERNAL_ACS. This carry a list of channels + * in priority order as decided after acs operation in userspace. + * @QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_CHANNEL_REASON: One of reason code from + * qca_wlan_vendor_acs_select_reason. + * @QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_CHANNEL_COUNT: Number of channels in + * this list + * @QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_CHANNEL_LIST: Array of nested values + * for each channel with following attributes: + * QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_CHANNEL_PRIMARY, + * QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_CHANNEL_SECONDARY, + * QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_CHANNEL_CENTER_SEG0, + * QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_CHANNEL_CENTER_SEG1, + * QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_CHANNEL_WIDTH + * @QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_CHANNEL_PRIMARY: Primary channel (u8) + * @QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_CHANNEL_SECONDARY: Secondary channel (u8) + * required only for 160 / 80 + 80 + * @QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_CHANNEL_CENTER_SEG0: VHT seg0 channel (u8) + * @QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_CHANNEL_CENTER_SEG1: VHT seg1 channel (u8) + * QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_CHANNEL_WIDTH:channel width (u8) + */ +enum qca_wlan_vendor_attr_external_acs_channels { + QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_CHANNEL_INVALID = 0, + + QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_CHANNEL_REASON = 1, + QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_CHANNEL_LIST = 2, + QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_CHANNEL_BAND = 3, + QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_CHANNEL_PRIMARY = 4, + QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_CHANNEL_SECONDARY = 5, + QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_CHANNEL_CENTER_SEG0 = 6, + QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_CHANNEL_CENTER_SEG1 = 7, + QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_CHANNEL_WIDTH = 8, + + /* keep last */ + QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_CHANNEL_LAST, + QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_CHANNEL_MAX = + QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_CHANNEL_LAST - 1 +}; + +/** + * qca_wlan_vendor_acs_select_reason: This represents the different reasons why + * the ACS has to be triggered. These parameters are used by + * QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_EVENT_REASON and + * QCA_NL80211_VENDOR_SUBCMD_ACS_SET_CHANNELS + * @QCA_WLAN_VENDOR_ACS_SELECT_REASON_INIT: Represents the reason that the + * ACS triggered during the AP start + * @QCA_WLAN_VENDOR_ACS_SELECT_REASON_DFS: Represents the reason that + * DFS found with current channel + * @QCA_WLAN_VENDOR_ACS_SELECT_REASON_LTE_COEX: Represents the reason that + * LTE CO-Exist in current band + */ +enum qca_wlan_vendor_acs_select_reason { + QCA_WLAN_VENDOR_ACS_SELECT_REASON_INIT, + QCA_WLAN_VENDOR_ACS_SELECT_REASON_DFS, + QCA_WLAN_VENDOR_ACS_SELECT_REASON_LTE_COEX, +}; + +/** + * enum qca_wlan_gpio_attr - Parameters for GPIO configuration + */ +enum qca_wlan_gpio_attr { + QCA_WLAN_VENDOR_ATTR_GPIO_PARAM_INVALID = 0, + /* Unsigned 32-bit attribute for GPIO command */ + QCA_WLAN_VENDOR_ATTR_GPIO_PARAM_COMMAND, + /* Unsigned 32-bit attribute for GPIO PIN number to configure */ + QCA_WLAN_VENDOR_ATTR_GPIO_PARAM_PINNUM, + /* Unsigned 32-bit attribute for GPIO value to configure */ + QCA_WLAN_VENDOR_ATTR_GPIO_PARAM_VALUE, + /* Unsigned 32-bit attribute for GPIO pull type */ + QCA_WLAN_VENDOR_ATTR_GPIO_PARAM_PULL_TYPE, + /* Unsigned 32-bit attribute for GPIO interrupt mode */ + QCA_WLAN_VENDOR_ATTR_GPIO_PARAM_INTR_MODE, + + /* keep last */ + QCA_WLAN_VENDOR_ATTR_GPIO_PARAM_LAST, + QCA_WLAN_VENDOR_ATTR_GPIO_PARAM_MAX = + QCA_WLAN_VENDOR_ATTR_GPIO_PARAM_LAST - 1, +}; + +/** + * qca_wlan_set_qdepth_thresh_attr - Parameters for setting + * MSDUQ depth threshold per peer per tid in the target + * + * Associated Vendor Command: + * QCA_NL80211_VENDOR_SUBCMD_SET_QDEPTH_THRESH + */ +enum qca_wlan_set_qdepth_thresh_attr { + QCA_WLAN_VENDOR_ATTR_QDEPTH_THRESH_INVALID = 0, + /* 6-byte MAC address */ + QCA_WLAN_VENDOR_ATTR_QDEPTH_THRESH_MAC_ADDR, + /* Unsigned 32-bit attribute for holding the TID */ + QCA_WLAN_VENDOR_ATTR_QDEPTH_THRESH_TID, + /* Unsigned 32-bit attribute for holding the update mask + * bit 0 - Update high priority msdu qdepth threshold + * bit 1 - Update low priority msdu qdepth threshold + * bit 2 - Update UDP msdu qdepth threshold + * bit 3 - Update Non UDP msdu qdepth threshold + * rest of bits are reserved + */ + QCA_WLAN_VENDOR_ATTR_QDEPTH_THRESH_UPDATE_MASK, + /* Unsigned 32-bit attribute for holding the threshold value */ + QCA_WLAN_VENDOR_ATTR_QDEPTH_THRESH_VALUE, + + /* keep last */ + QCA_WLAN_VENDOR_ATTR_QDEPTH_THRESH_LAST, + QCA_WLAN_VENDOR_ATTR_QDEPTH_THRESH_MAX = + QCA_WLAN_VENDOR_ATTR_QDEPTH_THRESH_LAST - 1, +}; + +/** + * qca_wlan_vendor_attr_external_acs_policy: Attribute values for + * QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_EVENT_POLICY to the vendor subcmd + * QCA_NL80211_VENDOR_SUBCMD_EXTERNAL_ACS. This represents the + * external ACS policies to select the channels w.r.t. the PCL weights. + * (QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_EVENT_PCL represents the channels and + * their PCL weights.) + * @QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_POLICY_PCL_MANDATORY: Mandatory to + * select a channel with non-zero PCL weight. + * @QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_POLICY_PCL_PREFERRED: Prefer a + * channel with non-zero PCL weight. + * + */ +enum qca_wlan_vendor_attr_external_acs_policy { + QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_POLICY_PCL_PREFERRED, + QCA_WLAN_VENDOR_ATTR_EXTERNAL_ACS_POLICY_PCL_MANDATORY, +}; + +/** + * enum qca_wlan_vendor_attr_spectral_scan - Spectral scan config parameters + */ +enum qca_wlan_vendor_attr_spectral_scan { + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_INVALID = 0, + /* + * Number of times the chip enters spectral scan mode before + * deactivating spectral scans. When set to 0, chip will enter spectral + * scan mode continuously. u32 attribute. + */ + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_SCAN_COUNT = 1, + /* + * Spectral scan period. Period increment resolution is 256*Tclk, + * where Tclk = 1/44 MHz (Gmode), 1/40 MHz (Amode). u32 attribute. + */ + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_SCAN_PERIOD = 2, + /* Spectral scan priority. u32 attribute. */ + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_PRIORITY = 3, + /* Number of FFT data points to compute. u32 attribute. */ + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_FFT_SIZE = 4, + /* + * Enable targeted gain change before starting the spectral scan FFT. + * u32 attribute. + */ + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_GC_ENA = 5, + /* Restart a queued spectral scan. u32 attribute. */ + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_RESTART_ENA = 6, + /* + * Noise floor reference number for the calculation of bin power. + * u32 attribute. + */ + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_NOISE_FLOOR_REF = 7, + /* + * Disallow spectral scan triggers after TX/RX packets by setting + * this delay value to roughly SIFS time period or greater. + * u32 attribute. + */ + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_INIT_DELAY = 8, + /* + * Number of strong bins (inclusive) per sub-channel, below + * which a signal is declared a narrow band tone. u32 attribute. + */ + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_NB_TONE_THR = 9, + /* + * Specify the threshold over which a bin is declared strong (for + * scan bandwidth analysis). u32 attribute. + */ + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_STR_BIN_THR = 10, + /* Spectral scan report mode. u32 attribute. */ + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_WB_RPT_MODE = 11, + /* + * RSSI report mode, if the ADC RSSI is below + * QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_RSSI_THR, + * then FFTs will not trigger, but timestamps and summaries get + * reported. u32 attribute. + */ + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_RSSI_RPT_MODE = 12, + /* + * ADC RSSI must be greater than or equal to this threshold (signed dB) + * to ensure spectral scan reporting with normal error code. + * u32 attribute. + */ + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_RSSI_THR = 13, + /* + * Format of frequency bin magnitude for spectral scan triggered FFTs: + * 0: linear magnitude, 1: log magnitude (20*log10(lin_mag)). + * u32 attribute. + */ + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_PWR_FORMAT = 14, + /* + * Format of FFT report to software for spectral scan triggered FFTs. + * 0: No FFT report (only spectral scan summary report) + * 1: 2-dword summary of metrics for each completed FFT + spectral scan + * report + * 2: 2-dword summary of metrics for each completed FFT + 1x-oversampled + * bins (in-band) per FFT + spectral scan summary report + * 3: 2-dword summary of metrics for each completed FFT + 2x-oversampled + * bins (all) per FFT + spectral scan summary report + * u32 attribute. + */ + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_RPT_MODE = 15, + /* + * Number of LSBs to shift out in order to scale the FFT bins. + * u32 attribute. + */ + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_BIN_SCALE = 16, + /* + * Set to 1 (with spectral_scan_pwr_format=1), to report bin magnitudes + * in dBm power. u32 attribute. + */ + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_DBM_ADJ = 17, + /* + * Per chain enable mask to select input ADC for search FFT. + * u32 attribute. + */ + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_CHN_MASK = 18, + /* + * An unsigned 64-bit integer provided by host driver to identify the + * spectral scan request. This attribute is included in the scan + * response message for @QCA_NL80211_VENDOR_SUBCMD_SPECTRAL_SCAN_START + * and used as an attribute in + * @QCA_NL80211_VENDOR_SUBCMD_SPECTRAL_SCAN_STOP to identify the + * specific scan to be stopped. + */ + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_COOKIE = 19, + /* Skip interval for FFT reports. u32 attribute */ + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_FFT_PERIOD = 20, + /* Set to report only one set of FFT results. + * u32 attribute. + */ + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_SHORT_REPORT = 21, + /* Debug level for spectral module in driver. + * 0 : Verbosity level 0 + * 1 : Verbosity level 1 + * 2 : Verbosity level 2 + * 3 : Matched filterID display + * 4 : One time dump of FFT report + * u32 attribute. + */ + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_DEBUG_LEVEL = 22, + /* Type of spectral scan request. u32 attribute. + * It uses values defined in enum + * qca_wlan_vendor_attr_spectral_scan_request_type. + */ + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_REQUEST_TYPE = 23, + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_MAX = + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_AFTER_LAST - 1, +}; + +/** + * enum qca_wlan_vendor_attr_spectral_diag_stats - Used by the vendor command + * QCA_NL80211_VENDOR_SUBCMD_SPECTRAL_SCAN_GET_DIAG_STATS. + */ +enum qca_wlan_vendor_attr_spectral_diag_stats { + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_DIAG_INVALID = 0, + /* Number of spectral TLV signature mismatches. + * u64 attribute. + */ + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_DIAG_SIG_MISMATCH = 1, + /* Number of spectral phyerror events with insufficient length when + * parsing for secondary 80 search FFT report. u64 attribute. + */ + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_DIAG_SEC80_SFFT_INSUFFLEN = 2, + /* Number of spectral phyerror events without secondary 80 + * search FFT report. u64 attribute. + */ + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_DIAG_NOSEC80_SFFT = 3, + /* Number of spectral phyerror events with vht operation segment 1 id + * mismatches in search fft report. u64 attribute. + */ + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_DIAG_VHTSEG1ID_MISMATCH = 4, + /* Number of spectral phyerror events with vht operation segment 2 id + * mismatches in search fft report. u64 attribute. + */ + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_DIAG_VHTSEG2ID_MISMATCH = 5, + + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_DIAG_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_DIAG_MAX = + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_DIAG_AFTER_LAST - 1, +}; + +/** + * enum qca_wlan_vendor_attr_spectral_cap - Used by the vendor command + * QCA_NL80211_VENDOR_SUBCMD_SPECTRAL_SCAN_GET_CAP_INFO. + */ +enum qca_wlan_vendor_attr_spectral_cap { + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CAP_INVALID = 0, + /* Flag attribute to indicate phydiag capability */ + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CAP_PHYDIAG = 1, + /* Flag attribute to indicate radar detection capability */ + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CAP_RADAR = 2, + /* Flag attribute to indicate spectral capability */ + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CAP_SPECTRAL = 3, + /* Flag attribute to indicate advanced spectral capability */ + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CAP_ADVANCED_SPECTRAL = 4, + /* Spectral hardware generation. u32 attribute. + * It uses values defined in enum + * qca_wlan_vendor_spectral_scan_cap_hw_gen. + */ + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CAP_HW_GEN = 5, + + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CAP_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CAP_MAX = + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CAP_AFTER_LAST - 1, +}; + +/** + * enum qca_wlan_vendor_attr_spectral_scan_status - used by the vendor command + * QCA_NL80211_VENDOR_SUBCMD_SPECTRAL_SCAN_GET_STATUS. + */ +enum qca_wlan_vendor_attr_spectral_scan_status { + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_STATUS_INVALID = 0, + /* Flag attribute to indicate whether spectral scan is enabled */ + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_STATUS_IS_ENABLED = 1, + /* Flag attribute to indicate whether spectral scan is in progress*/ + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_STATUS_IS_ACTIVE = 2, + + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_STATUS_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_STATUS_MAX = + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_STATUS_AFTER_LAST - 1, +}; + +/** + * enum qca_wlan_vendor_attr_config_latency_level - Level for + * wlan latency module. + * + * There will be various of Wi-Fi functionality like scan/roaming/adaptive + * power saving which would causing data exchange out of service, this + * would be a big impact on latency. For latency sensitive applications over + * Wi-Fi are intolerant to such operations and thus would configure them + * to meet their respective needs. It is well understood by such applications + * that altering the default behavior would degrade the Wi-Fi functionality + * w.r.t the above pointed WLAN operations. + * + * @QCA_WLAN_VENDOR_ATTR_CONFIG_LATENCY_LEVEL_NORMAL: + * Default WLAN operation level which throughput orientated. + * @QCA_WLAN_VENDOR_ATTR_CONFIG_LATENCY_LEVEL_MODERATE: + * Use moderate level to improve latency by limit scan duration. + * @QCA_WLAN_VENDOR_ATTR_CONFIG_LATENCY_LEVEL_LOW: + * Use low latency level to benifit application like concurrent + * downloading or video streaming via constraint scan/adaptive PS. + * @QCA_WLAN_VENDOR_ATTR_CONFIG_LATENCY_LEVEL_ULTRALOW: + * Use ultra low latency level to benefit for gaming/voice + * application via constraint scan/roaming/adaptive PS. + */ +enum qca_wlan_vendor_attr_config_latency_level { + QCA_WLAN_VENDOR_ATTR_CONFIG_LATENCY_LEVEL_INVALID = 0, + QCA_WLAN_VENDOR_ATTR_CONFIG_LATENCY_LEVEL_NORMAL = 1, + QCA_WLAN_VENDOR_ATTR_CONFIG_LATENCY_LEVEL_MODERATE = 2, + QCA_WLAN_VENDOR_ATTR_CONFIG_LATENCY_LEVEL_LOW = 3, + QCA_WLAN_VENDOR_ATTR_CONFIG_LATENCY_LEVEL_ULTRALOW = 4, + + /* keep last */ + QCA_WLAN_VENDOR_ATTR_CONFIG_LATENCY_LEVEL_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_CONFIG_LATENCY_LEVEL_MAX = + QCA_WLAN_VENDOR_ATTR_CONFIG_LATENCY_LEVEL_AFTER_LAST - 1, +}; + +/** + * qca_wlan_vendor_attr_spectral_scan_request_type: Attribute values for + * QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_REQUEST_TYPE to the vendor subcmd + * QCA_NL80211_VENDOR_SUBCMD_SPECTRAL_SCAN_START. This represents the + * spectral scan request types. + * @QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_REQUEST_TYPE_SCAN_AND_CONFIG: Request to + * set the spectral parameters and start scan. + * @QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_REQUEST_TYPE_SCAN: Request to + * only set the spectral parameters. + * @QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_REQUEST_TYPE_CONFIG: Request to + * only start the spectral scan. + */ +enum qca_wlan_vendor_attr_spectral_scan_request_type { + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_REQUEST_TYPE_SCAN_AND_CONFIG, + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_REQUEST_TYPE_SCAN, + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_REQUEST_TYPE_CONFIG, +}; + +/** + * qca_wlan_vendor_spectral_scan_cap_hw_gen: Attribute values for + * QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CAP_HW_GEN to the vendor subcmd + * QCA_NL80211_VENDOR_SUBCMD_SPECTRAL_SCAN_GET_CAP_INFO. This represents the + * spectral hardware generation. + * @QCA_WLAN_VENDOR_SPECTRAL_SCAN_CAP_HW_GEN_1: generation 1 + * @QCA_WLAN_VENDOR_SPECTRAL_SCAN_CAP_HW_GEN_2: generation 2 + * @QCA_WLAN_VENDOR_SPECTRAL_SCAN_CAP_HW_GEN_3: generation 3 + */ +enum qca_wlan_vendor_spectral_scan_cap_hw_gen { + QCA_WLAN_VENDOR_SPECTRAL_SCAN_CAP_HW_GEN_1 = 0, + QCA_WLAN_VENDOR_SPECTRAL_SCAN_CAP_HW_GEN_2 = 1, + QCA_WLAN_VENDOR_SPECTRAL_SCAN_CAP_HW_GEN_3 = 2, +}; + +/** + * enum qca_wlan_vendor_attr_rropavail_info - Specifies whether Representative + * RF Operating Parameter (RROP) information is available, and if so, at which + * point in the application-driver interaction sequence it can be retrieved by + * the application from the driver. This point may vary by architecture and + * other factors. This is a u16 value. + */ +enum qca_wlan_vendor_attr_rropavail_info { + /* RROP information is unavailable. */ + QCA_WLAN_VENDOR_ATTR_RROPAVAIL_INFO_UNAVAILABLE, + /* RROP information is available and the application can retrieve the + * information after receiving an QCA_NL80211_VENDOR_SUBCMD_EXTERNAL_ACS + * event from the driver. + */ + QCA_WLAN_VENDOR_ATTR_RROPAVAIL_INFO_EXTERNAL_ACS_START, + /* RROP information is available only after a vendor specific scan + * (requested using QCA_NL80211_VENDOR_SUBCMD_TRIGGER_SCAN) has + * successfully completed. The application can retrieve the information + * after receiving the QCA_NL80211_VENDOR_SUBCMD_SCAN_DONE event from + * the driver. + */ + QCA_WLAN_VENDOR_ATTR_RROPAVAIL_INFO_VSCAN_END, +}; + +/** + * enum qca_wlan_vendor_attr_rrop_info - Specifies vendor specific + * Representative RF Operating Parameter (RROP) information. It is sent for the + * vendor command QCA_NL80211_VENDOR_SUBCMD_GET_RROP_INFO. This information is + * intended for use by external Auto Channel Selection applications. It provides + * guidance values for some RF parameters that are used by the system during + * operation. These values could vary by channel, band, radio, and so on. + */ +enum qca_wlan_vendor_attr_rrop_info { + QCA_WLAN_VENDOR_ATTR_RROP_INFO_INVALID = 0, + + /* Representative Tx Power List (RTPL) which has an array of nested + * values as per attributes in enum qca_wlan_vendor_attr_rtplinst. + */ + QCA_WLAN_VENDOR_ATTR_RROP_INFO_RTPL = 1, + + QCA_WLAN_VENDOR_ATTR_RROP_INFO_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_RROP_INFO_MAX = + QCA_WLAN_VENDOR_ATTR_RROP_INFO_AFTER_LAST - 1 +}; + +/** + * enum qca_wlan_vendor_attr_rtplinst - Specifies attributes for individual list + * entry instances in the Representative Tx Power List (RTPL). It provides + * simplified power values intended for helping external Auto channel Selection + * applications compare potential Tx power performance between channels, other + * operating conditions remaining identical. These values are not necessarily + * the actual Tx power values that will be used by the system. They are also not + * necessarily the max or average values that will be used. Instead, they are + * relative, summarized keys for algorithmic use computed by the driver or + * underlying firmware considering a number of vendor specific factors. + */ +enum qca_wlan_vendor_attr_rtplinst { + QCA_WLAN_VENDOR_ATTR_RTPLINST_INVALID = 0, + + /* Primary channel number (u8) */ + QCA_WLAN_VENDOR_ATTR_RTPLINST_PRIMARY = 1, + /* Representative Tx power in dBm (s32) with emphasis on throughput. */ + QCA_WLAN_VENDOR_ATTR_RTPLINST_TXPOWER_THROUGHPUT = 2, + /* Representative Tx power in dBm (s32) with emphasis on range. */ + QCA_WLAN_VENDOR_ATTR_RTPLINST_TXPOWER_RANGE = 3, + + QCA_WLAN_VENDOR_ATTR_RTPLINST_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_RTPLINST_MAX = + QCA_WLAN_VENDOR_ATTR_RTPLINST_AFTER_LAST - 1, +}; + +/** + * enum qca_wlan_vendor_attr_mac - Used by the vendor command + * QCA_NL80211_VENDOR_SUBCMD_WLAN_MAC_INFO. + * + * @QCA_WLAN_VENDOR_ATTR_MAC_INFO: MAC mode info list which has an + * array of nested values as per attributes in + * enum qca_wlan_vendor_attr_mac_mode_info. + */ +enum qca_wlan_vendor_attr_mac { + QCA_WLAN_VENDOR_ATTR_MAC_INVALID = 0, + QCA_WLAN_VENDOR_ATTR_MAC_INFO = 1, + + /* keep last */ + QCA_WLAN_VENDOR_ATTR_MAC_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_MAC_MAX = + QCA_WLAN_VENDOR_ATTR_MAC_AFTER_LAST - 1, +}; + +/** + * enum qca_wlan_vendor_attr_mac_iface_info - Information of the connected + * WiFi netdev interface on a respective MAC. Used by the attribute + * QCA_WLAN_VENDOR_ATTR_MAC_IFACE_INFO. + * + * @QCA_WLAN_VENDOR_ATTR_MAC_IFACE_INFO_ID: Wi-Fi Netdev's interface id(u32). + * @QCA_WLAN_VENDOR_ATTR_MAC_IFACE_INFO_FREQ: Associated frequency in MHz of + * the connected Wi-Fi interface(u32). + */ +enum qca_wlan_vendor_attr_mac_iface_info { + QCA_WLAN_VENDOR_ATTR_MAC_IFACE_INFO_INVALID = 0, + QCA_WLAN_VENDOR_ATTR_MAC_IFACE_INFO_IFINDEX = 1, + QCA_WLAN_VENDOR_ATTR_MAC_IFACE_INFO_FREQ = 2, + + /* keep last */ + QCA_WLAN_VENDOR_ATTR_MAC_IFACE_INFO_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_MAC_IFACE_INFO_MAX = + QCA_WLAN_VENDOR_ATTR_MAC_IFACE_INFO_AFTER_LAST - 1, +}; + +/** + * enum qca_wlan_vendor_attr_mac_info - Points to MAC the information. + * Used by the attribute QCA_WLAN_VENDOR_ATTR_MAC_INFO of the + * vendor command QCA_NL80211_VENDOR_SUBCMD_WLAN_MAC_INFO. + * + * @QCA_WLAN_VENDOR_ATTR_MAC_INFO_MAC_ID: Hardware MAC ID associated for the + * MAC (u32) + * @QCA_WLAN_VENDOR_ATTR_MAC_INFO_BAND: Band supported by the respective MAC + * at a given point. This is a u32 bitmask of BIT(NL80211_BAND_*) as described + * in enum nl80211_band. + * @QCA_WLAN_VENDOR_ATTR_MAC_IFACE_INFO: Refers to list of WLAN net dev + * interfaces associated with this MAC. Represented by enum + * qca_wlan_vendor_attr_mac_iface_info. + */ +enum qca_wlan_vendor_attr_mac_info { + QCA_WLAN_VENDOR_ATTR_MAC_INFO_INVALID = 0, + QCA_WLAN_VENDOR_ATTR_MAC_INFO_MAC_ID = 1, + QCA_WLAN_VENDOR_ATTR_MAC_INFO_BAND = 2, + QCA_WLAN_VENDOR_ATTR_MAC_IFACE_INFO = 3, + + /* keep last */ + QCA_WLAN_VENDOR_ATTR_MAC_INFO_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_MAC_INFO_MAX = + QCA_WLAN_VENDOR_ATTR_MAC_INFO_AFTER_LAST - 1, + +}; + +#if !(defined (SUPPORT_WDEV_CFG80211_VENDOR_EVENT_ALLOC)) && \ + (LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0)) && \ + !(defined(WITH_BACKPORTS)) + +static inline struct sk_buff * +backported_cfg80211_vendor_event_alloc(struct wiphy *wiphy, + struct wireless_dev *wdev, + int approxlen, + int event_idx, gfp_t gfp) +{ + struct sk_buff *skb; + + skb = cfg80211_vendor_event_alloc(wiphy, approxlen, event_idx, gfp); + + if (skb && wdev) { + struct nlattr *attr; + u32 ifindex = wdev->netdev->ifindex; + + nla_nest_cancel(skb, ((void **)skb->cb)[2]); + if (nla_put_u32(skb, NL80211_ATTR_IFINDEX, ifindex)) + goto nla_fail; + + attr = nla_nest_start(skb, NL80211_ATTR_VENDOR_DATA); + ((void **)skb->cb)[2] = attr; + } + + return skb; + +nla_fail: + kfree_skb(skb); + + return NULL; +} +#define cfg80211_vendor_event_alloc backported_cfg80211_vendor_event_alloc +#endif + +/** + * enum he_fragmentation_val - HE fragmentation support values + * Indicates level of dynamic fragmentation that is supported by + * a STA as a recipient. + * HE fragmentation values are defined as per 11ax spec and are used in + * HE capability IE to advertise the support. These values are validated + * in the driver to check the device capability and advertised in the HE + * capability element. + * + * @HE_FRAG_DISABLE: no support for dynamic fragmentation + * @HE_FRAG_LEVEL1: support for dynamic fragments that are + * contained within an MPDU or S-MPDU, no support for dynamic fragments + * within an A-MPDU that is not an S-MPDU. + * @HE_FRAG_LEVEL2: support for dynamic fragments that are + * contained within an MPDU or S-MPDU and support for up to one dynamic + * fragment for each MSDU, each A-MSDU if supported by the recipient, and + * each MMPDU within an A-MPDU or multi-TID A-MPDU that is not an + * MPDU or S-MPDU. + * @HE_FRAG_LEVEL3: support for dynamic fragments that are + * contained within an MPDU or S-MPDU and support for multiple dynamic + * fragments for each MSDU and for each A-MSDU if supported by the + * recipient within an A-MPDU or multi-TID AMPDU and up to one dynamic + * fragment for each MMPDU in a multi-TID A-MPDU that is not an S-MPDU. + */ +enum he_fragmentation_val { + HE_FRAG_DISABLE, + HE_FRAG_LEVEL1, + HE_FRAG_LEVEL2, + HE_FRAG_LEVEL3, +}; + +/** + * enum he_mcs_config - HE MCS support configuration + * + * Configures the HE Tx/Rx MCS map in HE capability IE for given bandwidth. + * These values are used in driver to configure the HE MCS map to advertise + * Tx/Rx MCS map in HE capability and these values are applied for all the + * streams supported by the device. To configure MCS for different bandwidths, + * vendor command needs to be sent using this attribute with appropriate value. + * For example, to configure HE_80_MCS_0_7, send vendor command using HE MCS + * attribute with QCA_WLAN_VENDOR_ATTR_HE_80_MCS0_7. And to configure HE MCS + * for HE_160_MCS0_11 send this command using HE MCS config attribute with + * value QCA_WLAN_VENDOR_ATTR_HE_160_MCS0_11; + * + * @HE_80_MCS0_7: support for HE 80/40/20MHz MCS 0 to 7 + * @HE_80_MCS0_9: support for HE 80/40/20MHz MCS 0 to 9 + * @HE_80_MCS0_11: support for HE 80/40/20MHz MCS 0 to 11 + * @HE_160_MCS0_7: support for HE 160MHz MCS 0 to 7 + * @HE_160_MCS0_9: support for HE 160MHz MCS 0 to 9 + * @HE_160_MCS0_11: support for HE 160MHz MCS 0 to 11 + * @HE_80p80_MCS0_7: support for HE 80p80MHz MCS 0 to 7 + * @HE_80p80_MCS0_9: support for HE 80p80MHz MCS 0 to 9 + * @HE_80p80_MCS0_11: support for HE 80p80MHz MCS 0 to 11 + */ +enum he_mcs_config { + HE_80_MCS0_7 = 0, + HE_80_MCS0_9 = 1, + HE_80_MCS0_11 = 2, + HE_160_MCS0_7 = 4, + HE_160_MCS0_9 = 5, + HE_160_MCS0_11 = 6, + HE_80p80_MCS0_7 = 8, + HE_80p80_MCS0_9 = 9, + HE_80p80_MCS0_11 = 10, +}; + +/** + * enum qca_wlan_ba_session_config - BA session configuration + * + * Indicates the configuration values for BA session configuration attribute. + * + * @QCA_WLAN_ADD_BA: Establish a new BA session with given configuration. + * @QCA_WLAN_DELETE_BA: Delete the existing BA session for given TID. + */ +enum qca_wlan_ba_session_config { + QCA_WLAN_ADD_BA = 1, + QCA_WLAN_DELETE_BA = 2, +}; + +/** + * enum qca_wlan_ac_type - access category type + * + * Indicates the access category type value. + * + * @QCA_WLAN_AC_BE: BE access category + * @QCA_WLAN_AC_BK: BK access category + * @QCA_WLAN_AC_VI: VI access category + * @QCA_WLAN_AC_VO: VO access category + * @QCA_WLAN_AC_ALL: All ACs + */ +enum qca_wlan_ac_type { + QCA_WLAN_AC_BE = 0, + QCA_WLAN_AC_BK = 1, + QCA_WLAN_AC_VI = 2, + QCA_WLAN_AC_VO = 3, + QCA_WLAN_AC_ALL = 4, +}; + +/** + * enum qca_wlan_he_ltf_cfg - HE LTF configuration + * + * Indicates the HE LTF configuration value. + * + * @QCA_WLAN_HE_LTF_AUTO: HE-LTF is automatically set to + * the mandatory HE-LTF based on the GI setting + * @QCA_WLAN_HE_LTF_1X: 1X HE LTF is 3.2us LTF + * @QCA_WLAN_HE_LTF_2X: 2X HE LTF is 6.4us LTF + * @QCA_WLAN_HE_LTF_4X: 4X HE LTF is 12.8us LTF + */ +enum qca_wlan_he_ltf_cfg { + QCA_WLAN_HE_LTF_AUTO = 0, + QCA_WLAN_HE_LTF_1X = 1, + QCA_WLAN_HE_LTF_2X = 2, + QCA_WLAN_HE_LTF_4X = 3, +}; + +/* Attributes for data used by + * QCA_NL80211_VENDOR_SUBCMD_WIFI_TEST_CONFIGURATION + */ +enum qca_wlan_vendor_attr_wifi_test_config { + QCA_WLAN_VENDOR_ATTR_WIFI_TEST_CONFIG_INVALID = 0, + /* 8-bit unsigned value to configure the driver to enable/disable + * WMM feature. This attribute is used to configure testbed device. + * 1-enable, 0-disable + */ + QCA_WLAN_VENDOR_ATTR_WIFI_TEST_CONFIG_WMM_ENABLE = 1, + + /* 8-bit unsigned value to configure the driver to accept/reject + * the addba request from peer. This attribute is used to configure + * the testbed device. + * 1-accept addba, 0-reject addba + */ + QCA_WLAN_VENDOR_ATTR_WIFI_TEST_CONFIG_ACCEPT_ADDBA_REQ = 2, + + /* 8-bit unsigned value to configure the driver to send or not to + * send the addba request to peer. + * This attribute is used to configure the testbed device. + * 1-send addba, 0-do not send addba + */ + QCA_WLAN_VENDOR_ATTR_WIFI_TEST_CONFIG_SEND_ADDBA_REQ = 3, + + /* 8-bit unsigned value to indicate the HE fragmentation support. + * Uses enum he_fragmentation_val values. + * This attribute is used to configure the testbed device to + * allow the advertised hardware capabilities to be downgraded + * for testing purposes. + */ + QCA_WLAN_VENDOR_ATTR_WIFI_TEST_CONFIG_HE_FRAGMENTATION = 4, + + /* 8-bit unsigned value to indicate the HE MCS support. + * Uses enum he_mcs_config values. + * This attribute is used to configure the testbed device to + * allow the advertised hardware capabilities to be downgraded + * for testing purposes. + */ + QCA_WLAN_VENDOR_ATTR_WIFI_TEST_CONFIG_HE_MCS = 5, + + /* 8-bit unsigned value to configure the driver to allow or not to + * allow the connection with WEP/TKIP in HT/VHT/HE modes. + * This attribute is used to configure the testbed device. + * 1-allow wep/tkip in HT/VHT/HE, 0-do not allow wep/tkip + */ + QCA_WLAN_VENDOR_ATTR_WIFI_TEST_CONFIG_WEP_TKIP_IN_HE = 6, + + /* 8-bit unsigned value to configure the driver to add a + * new BA session or delete the existing BA session for + * given TID. ADDBA command uses the buffer size and tid + * configuration if user specifies the values else default + * value for buffer size is used for all tids if the tid + * also not specified. For DEL_BA command TID value is + * required to process the command. + * Uses enum qca_wlan_ba_session_config values. + * This attribute is used to configure the testbed device. + */ + QCA_WLAN_VENDOR_ATTR_WIFI_TEST_CONFIG_ADD_DEL_BA_SESSION = 7, + + /* 8-bit unsigned value to configure the buffer size in addba + * request and response frames. + * This attribute is used to configure the testbed device. + * The range of the value is 0 to 255 + */ + QCA_WLAN_VENDOR_ATTR_WIFI_TEST_CONFIG_ADDBA_BUFF_SIZE = 8, + + /* 8-bit unsigned value to configure the buffer size in addba + * request and response frames. + * This attribute is used to configure the testbed device. + */ + QCA_WLAN_VENDOR_ATTR_WIFI_TEST_CONFIG_BA_TID = 9, + + /* 8-bit unsigned value to configure the no ack policy. + * To configure no ack policy, access category value + * is required to process the command. + * This attribute is used to configure the testbed device. + * 1 - enable no ack, 0 - disable no ack + */ + QCA_WLAN_VENDOR_ATTR_WIFI_TEST_CONFIG_ENABLE_NO_ACK = 10, + + /* 8-bit unsigned value to configure the AC for no ack policy + * This attribute is used to configure the testbed device. + * uses the enum qca_wlan_ac_type values + */ + QCA_WLAN_VENDOR_ATTR_WIFI_TEST_CONFIG_NO_ACK_AC = 11, + + /* 8-bit unsigned value to configure the HE LTF + * This attribute is used to configure the testbed device. + * Uses the enum qca_wlan_he_ltf_cfg values. + */ + QCA_WLAN_VENDOR_ATTR_WIFI_TEST_CONFIG_HE_LTF = 12, + + /* 8-bit unsigned value to configure the tx beamformee. + * This attribute is used to configure the testbed device. + * 1 - enable, 0 - disable. + */ + QCA_WLAN_VENDOR_ATTR_WIFI_TEST_CONFIG_ENABLE_TX_BEAMFORMEE = 13, + + /* 8-bit unsigned value to configure twt request support. + * This attribute is used to configure the testbed device. + * 1-enable, 0-disable. + */ + QCA_WLAN_VENDOR_ATTR_WIFI_TEST_CONFIG_HE_TWT_REQ_SUPPORT = 37, + + /* keep last */ + QCA_WLAN_VENDOR_ATTR_WIFI_TEST_CONFIG_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_WIFI_TEST_CONFIG_MAX = + QCA_WLAN_VENDOR_ATTR_WIFI_TEST_CONFIG_AFTER_LAST - 1, +}; + +/** + * enum qca_wlan_throughput_level - Current throughput level + * + * Indicates the current level of throughput calculated by driver. The driver + * may choose different thresholds to decide whether the throughput level is + * low or medium or high based on variety of parameters like physical link + * capacity of current connection, number of pakcets being dispatched per + * second etc. The throughput level events might not be consistent with the + * actual current throughput value being observed. + * + * @QCA_WLAN_THROUGHPUT_LEVEL_LOW: Low level of throughput + * @QCA_WLAN_THROUGHPUT_LEVEL_MEDIUM: Medium level of throughput + * @QCA_WLAN_THROUGHPUT_LEVEL_HIGH: High level of throughput + */ +enum qca_wlan_throughput_level { + QCA_WLAN_THROUGHPUT_LEVEL_LOW = 0, + QCA_WLAN_THROUGHPUT_LEVEL_MEDIUM = 1, + QCA_WLAN_THROUGHPUT_LEVEL_HIGH = 2, +}; + +/** + * enum qca_wlan_vendor_attr_throughput_change - Vendor subcmd attributes to + * report throughput changes from driver to user space. enum values are used + * for NL attributes sent with + * %QCA_NL80211_VENDOR_SUBCMD_THROUGHPUT_CHANGE_EVENT sub command. + */ +enum qca_wlan_vendor_attr_throughput_change { + QCA_WLAN_VENDOR_ATTR_THROUGHPUT_CHANGE_INVALID = 0, + /* + * Indicates the direction of throughput in which the change is being + * reported. u8 attribute. Value is 0 for TX and 1 for RX. + */ + QCA_WLAN_VENDOR_ATTR_THROUGHPUT_CHANGE_DIRECTION = 1, + + /* + * Indicates the newly observed throughput level. + * qca_wlan_throughput_level describes the possible range of values. + * u8 attribute. + */ + QCA_WLAN_VENDOR_ATTR_THROUGHPUT_CHANGE_THROUGHPUT_LEVEL = 2, + + /* + * Indicates the driver's guidance on the new value to be set to + * kernel's tcp parameter tcp_limit_output_bytes. u32 attribute. Driver + * may optionally include this attribute. + */ + QCA_WLAN_VENDOR_ATTR_THROUGHPUT_CHANGE_TCP_LIMIT_OUTPUT_BYTES = 3, + + /* + * Indicates the driver's guidance on the new value to be set to + * kernel's tcp parameter tcp_adv_win_scale. s8 attribute. Possible + * values are from -31 to 31. Driver may optionally include this + * attribute. + */ + QCA_WLAN_VENDOR_ATTR_THROUGHPUT_CHANGE_TCP_ADV_WIN_SCALE = 4, + + /* + * Indicates the driver's guidance on the new value to be set to + * kernel's tcp parameter tcp_delack_seg. u32 attribute. Driver may + * optionally include this attribute. + */ + QCA_WLAN_VENDOR_ATTR_THROUGHPUT_CHANGE_TCP_DELACK_SEG = 5, + + /* keep last */ + QCA_WLAN_VENDOR_ATTR_THROUGHPUT_CHANGE_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_THROUGHPUT_CHANGE_MAX = + QCA_WLAN_VENDOR_ATTR_THROUGHPUT_CHANGE_AFTER_LAST - 1, +}; + +/* enum qca_wlan_nan_subcmd_type - Type of NAN command used by attribute + * QCA_WLAN_VENDOR_ATTR_NAN_SUBCMD_TYPE as a part of vendor command + * QCA_NL80211_VENDOR_SUBCMD_NAN_EXT. + */ +enum qca_wlan_nan_ext_subcmd_type { + /* Subcmd of type NAN Enable Request */ + QCA_WLAN_NAN_EXT_SUBCMD_TYPE_ENABLE_REQ = 1, + /* Subcmd of type NAN Disable Request */ + QCA_WLAN_NAN_EXT_SUBCMD_TYPE_DISABLE_REQ = 2, +}; + +/** + * enum qca_wlan_vendor_attr_nan_params - Used by the vendor command + * QCA_NL80211_VENDOR_SUBCMD_NAN_EXT. + */ +enum qca_wlan_vendor_attr_nan_params { + QCA_WLAN_VENDOR_ATTR_NAN_INVALID = 0, + /* Carries NAN command for firmware component. Every vendor command + * QCA_NL80211_VENDOR_SUBCMD_NAN_EXT must contain this attribute with a + * payload containing the NAN command. NLA_BINARY attribute. + */ + QCA_WLAN_VENDOR_ATTR_NAN_CMD_DATA = 1, + /* Indicates the type of NAN command sent with + * QCA_NL80211_VENDOR_SUBCMD_NAN_EXT. enum qca_wlan_nan_ext_subcmd_type + * describes the possible range of values. This attribute is mandatory + * if the command being issued is either + * QCA_WLAN_NAN_EXT_SUBCMD_TYPE_ENABLE_REQ or + * QCA_WLAN_NAN_EXT_SUBCMD_TYPE_DISABLE_REQ. NLA_U32 attribute. + */ + QCA_WLAN_VENDOR_ATTR_NAN_SUBCMD_TYPE = 2, + /* Frequency (in MHz) of primary NAN discovery social channel in 2.4 GHz + * band. This attribute is mandatory when command type is + * QCA_WLAN_NAN_EXT_SUBCMD_TYPE_ENABLE_REQ. NLA_U32 attribute. + */ + QCA_WLAN_VENDOR_ATTR_NAN_DISC_24GHZ_BAND_FREQ = 3, + /* Frequency (in MHz) of secondary NAN discovery social channel in 5 GHz + * band. This attribute is optional and should be included when command + * type is QCA_WLAN_NAN_EXT_SUBCMD_TYPE_ENABLE_REQ and NAN discovery + * has to be started on 5GHz along with 2.4GHz. NLA_U32 attribute. + */ + QCA_WLAN_VENDOR_ATTR_NAN_DISC_5GHZ_BAND_FREQ = 4, + + /* keep last */ + QCA_WLAN_VENDOR_ATTR_NAN_PARAMS_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_NAN_PARAMS_MAX = + QCA_WLAN_VENDOR_ATTR_NAN_PARAMS_AFTER_LAST - 1 +}; + +/** + * enum qca_coex_config_profiles - This enum defines different types of + * traffic streams that can be prioritized one over the other during coex + * scenarios. + * The types defined in this enum are categorized in the below manner. + * 0 - 31 values corresponds to WLAN + * 32 - 63 values corresponds to BT + * 64 - 95 values corresponds to Zigbee + * @QCA_WIFI_STA_DISCOVERY: Prioritize discovery frames for WLAN STA + * @QCA_WIFI_STA_CONNECTION: Prioritize connection frames for WLAN STA + * @QCA_WIFI_STA_CLASS_3_MGMT: Prioritize class 3 mgmt frames for WLAN STA + * @QCA_WIFI_STA_DATA : Prioritize data frames for WLAN STA + * @QCA_WIFI_STA_ALL: Priritize all frames for WLAN STA + * @QCA_WIFI_SAP_DISCOVERY: Prioritize discovery frames for WLAN SAP + * @QCA_WIFI_SAP_CONNECTION: Prioritize connection frames for WLAN SAP + * @QCA_WIFI_SAP_CLASS_3_MGMT: Prioritize class 3 mgmt frames for WLAN SAP + * @QCA_WIFI_SAP_DATA: Prioritize data frames for WLAN SAP + * @QCA_WIFI_SAP_ALL: Prioritize all frames for WLAN SAP + * @QCA_BT_A2DP: Prioritize BT A2DP + * @QCA_BT_BLE: Prioritize BT BLE + * @QCA_BT_SCO: Prioritize BT SCO + * @QCA_ZB_LOW: Prioritize Zigbee Low + * @QCA_ZB_HIGH: Prioritize Zigbee High + */ +enum qca_coex_config_profiles { + /* 0 - 31 corresponds to WLAN */ + QCA_WIFI_STA_DISCOVERY = 0, + QCA_WIFI_STA_CONNECTION = 1, + QCA_WIFI_STA_CLASS_3_MGMT = 2, + QCA_WIFI_STA_DATA = 3, + QCA_WIFI_STA_ALL = 4, + QCA_WIFI_SAP_DISCOVERY = 5, + QCA_WIFI_SAP_CONNECTION = 6, + QCA_WIFI_SAP_CLASS_3_MGMT = 7, + QCA_WIFI_SAP_DATA = 8, + QCA_WIFI_SAP_ALL = 9, + QCA_WIFI_CASE_MAX = 31, + /* 32 - 63 corresponds to BT */ + QCA_BT_A2DP = 32, + QCA_BT_BLE = 33, + QCA_BT_SCO = 34, + QCA_BT_CASE_MAX = 63, + /* 64 - 95 corresponds to Zigbee */ + QCA_ZB_LOW = 64, + QCA_ZB_HIGH = 65, + QCA_ZB_CASE_MAX = 95, + /* 0xff is default value if the u8 profile value is not set. */ + QCA_PROFILE_DEFAULT_VALUE = 255 +}; + +/** + * enum qca_vendor_attr_coex_config_types - Coex configurations types. + * This enum defines the valid set of values of coex configuration types. These + * values may used by attribute + * %QCA_VENDOR_ATTR_COEX_CONFIG_THREE_WAY_CONFIG_TYPE. + * + * @QCA_WLAN_VENDOR_ATTR_COEX_CONFIG_THREE_WAY_COEX_RESET: Reset all the + * weights to default values. + * @QCA_WLAN_VENDOR_ATTR_COEX_CONFIG_THREE_WAY_COEX_START: Start to config + * weights with configurability value. + */ +enum qca_vendor_attr_coex_config_types { + QCA_WLAN_VENDOR_ATTR_COEX_CONFIG_INVALID = 0, + QCA_WLAN_VENDOR_ATTR_COEX_CONFIG_THREE_WAY_COEX_RESET = 1, + QCA_WLAN_VENDOR_ATTR_COEX_CONFIG_THREE_WAY_COEX_START = 2, + QCA_WLAN_VENDOR_ATTR_COEX_CONFIG_TYPE_MAX +}; + +/** + * enum qca_vendor_attr_coex_config_three_way - Specifies vendor coex config + * attributes + * Attributes for data used by + * QCA_NL80211_VENDOR_SUBCMD_COEX_CONFIG + * + * QCA_VENDOR_ATTR_COEX_CONFIG_THREE_WAY_CONFIG_TYPE: u32 attribute. + * Indicate config type. + * the config types are 32-bit values from qca_vendor_attr_coex_config_types + * + * @QCA_VENDOR_ATTR_COEX_CONFIG_THREE_WAY_PRIORITY_1: u32 attribute. + * Indicate the Priority 1 profiles. + * the profiles are 8-bit values from enum qca_coex_config_profiles + * In same priority level, maximum to 4 profiles can be set here. + * @QCA_VENDOR_ATTR_COEX_CONFIG_THREE_WAY_PRIORITY_2: u32 attribute. + * Indicate the Priority 2 profiles. + * the profiles are 8-bit values from enum qca_coex_config_profiles + * In same priority level, maximum to 4 profiles can be set here. + * @QCA_VENDOR_ATTR_COEX_CONFIG_THREE_WAY_PRIORITY_3: u32 attribute. + * Indicate the Priority 3 profiles. + * the profiles are 8-bit values from enum qca_coex_config_profiles + * In same priority level, maximum to 4 profiles can be set here. + * @QCA_VENDOR_ATTR_COEX_CONFIG_THREE_WAY_PRIORITY_4: u32 attribute. + * Indicate the Priority 4 profiles. + * the profiles are 8-bit values from enum qca_coex_config_profiles + * In same priority level, maximum to 4 profiles can be set here. + * NOTE: + * limitations for QCA_VENDOR_ATTR_COEX_CONFIG_THREE_WAY_PRIORITY_x priority + * arrangement: + * 1: In the same u32 attribute(priority x), the profiles enum values own + * same priority level. + * 2: 0xff is default value if the u8 profile value is not set. + * 3: max to 4 rules/profiles in same priority level. + * 4: max to 4 priority level (priority 1 - priority 4) + * 5: one priority level only supports one scenario from WLAN/BT/ZB, + * hybrid rules not support. + * 6: if WMI_COEX_CONFIG_THREE_WAY_COEX_RESET called, priority x will + * remain blank to reset all parameters. + * For example: + * + * If the attributes as follow: + * priority 1: + * ------------------------------------ + * | 0xff | 0 | 1 | 2 | + * ------------------------------------ + * priority 2: + * ------------------------------------- + * | 0xff | 0xff | 0xff | 32 | + * ------------------------------------- + * priority 3: + * ------------------------------------- + * | 0xff | 0xff | 0xff | 65 | + * ------------------------------------- + * then it means: + * 1: WIFI_STA_DISCOVERY, WIFI_STA_CLASS_3_MGMT and WIFI_STA_CONNECTION + * owns same priority level. + * 2: WIFI_STA_DISCOVERY, WIFI_STA_CLASS_3_MGMT and WIFI_STA_CONNECTION + * has priority over BT_A2DP and ZB_HIGH. + * 3: BT_A2DP has priority over ZB_HIGH. + */ + +enum qca_vendor_attr_coex_config_three_way { + QCA_VENDOR_ATTR_COEX_CONFIG_THREE_WAY_INVALID = 0, + QCA_VENDOR_ATTR_COEX_CONFIG_THREE_WAY_CONFIG_TYPE = 1, + QCA_VENDOR_ATTR_COEX_CONFIG_THREE_WAY_PRIORITY_1 = 2, + QCA_VENDOR_ATTR_COEX_CONFIG_THREE_WAY_PRIORITY_2 = 3, + QCA_VENDOR_ATTR_COEX_CONFIG_THREE_WAY_PRIORITY_3 = 4, + QCA_VENDOR_ATTR_COEX_CONFIG_THREE_WAY_PRIORITY_4 = 5, + + /* Keep last */ + QCA_VENDOR_ATTR_COEX_CONFIG_THREE_WAY_AFTER_LAST, + QCA_VENDOR_ATTR_COEX_CONFIG_THREE_WAY_MAX = + QCA_VENDOR_ATTR_COEX_CONFIG_THREE_WAY_AFTER_LAST - 1, +}; + +/** + * enum qca_vendor_attr_peer_stats_cache_type - Represents peer stats cache type + * This enum defines the valid set of values of peer stats cache types. These + * values are used by attribute + * %QCA_WLAN_VENDOR_ATTR_PEER_STATS_CACHE_TYPE_INVALID. + * + * @QCA_WLAN_VENDOR_ATTR_PEER_TX_RATE_STATS: Represents peer tx rate statistics. + * @QCA_WLAN_VENDOR_ATTR_PEER_RX_RATE_STATS: Represents peer rx rate statistics. + * @QCA_WLAN_VENDOR_ATTR_PEER_TX_SOJOURN_STATS: Represents peer tx sojourn + * statistics + */ +enum qca_vendor_attr_peer_stats_cache_type { + QCA_WLAN_VENDOR_ATTR_PEER_STATS_CACHE_TYPE_INVALID = 0, + + QCA_WLAN_VENDOR_ATTR_PEER_TX_RATE_STATS, + QCA_WLAN_VENDOR_ATTR_PEER_RX_RATE_STATS, + QCA_WLAN_VENDOR_ATTR_PEER_TX_SOJOURN_STATS, +}; + +/** + * enum qca_wlan_vendor_attr_peer_stats_cache_params - This enum defines + * attributes required for QCA_NL80211_VENDOR_SUBCMD_PEER_STATS_CACHE_FLUSH + * Attributes are required to flush peer rate statistics from driver to + * user application. + * + * @QCA_WLAN_VENDOR_ATTR_PEER_STATS_CACHE_TYPE: Unsigned 32-bit attribute + * Indicate peer stats cache type. + * The statistics types are 32-bit values from + * qca_vendor_attr_peer_stats_cache_type + * @QCA_WLAN_VENDOR_ATTR_PEER_STATS_CACHE_PEER_MAC: Unsigned 8-bit array + * of size 6, representing peer mac address. + * @QCA_WLAN_VENDOR_ATTR_PEER_STATS_CACHE_DATA: Opaque data attribute + * containing buffer of statistics to send event to application layer entity. + * @QCA_WLAN_VENDOR_ATTR_PEER_STATS_CACHE_PEER_COOKIE: Unsigned 64-bit attribute + * representing cookie for peer unique session. + */ +enum qca_wlan_vendor_attr_peer_stats_cache_params { + QCA_WLAN_VENDOR_ATTR_PEER_STATS_INVALID = 0, + + QCA_WLAN_VENDOR_ATTR_PEER_STATS_CACHE_TYPE = 1, + QCA_WLAN_VENDOR_ATTR_PEER_STATS_CACHE_PEER_MAC = 2, + QCA_WLAN_VENDOR_ATTR_PEER_STATS_CACHE_DATA = 3, + QCA_WLAN_VENDOR_ATTR_PEER_STATS_CACHE_PEER_COOKIE = 4, + + /* Keep last */ + QCA_WLAN_VENDOR_ATTR_PEER_STATS_CACHE_LAST, + QCA_WLAN_VENDOR_ATTR_PEER_STATS_CACHE_MAX = + QCA_WLAN_VENDOR_ATTR_PEER_STATS_CACHE_LAST - 1 +}; + +/** + * enum qca_mpta_helper_attr_zigbee_state - current states of zegbee. + * this enum defines all the possible state of zigbee, which can be + * delivered by NetLink attribute QCA_MPTA_HELPER_VENDOR_ATTR_ZIGBEE_STATE. + * + * @ZIGBEE_IDLE: zigbee in idle state + * @ZIGBEE_FORM_NETWORK: zibee forming network + * @ZIGBEE_WAIT_JOIN: zigbee waiting for joining network + * @ZIGBEE_JOIN: zigbee joining network + * @ZIGBEE_NETWORK_UP: zigbee network is up + * @ZIGBEE_HMI: zigbee in HMI mode + */ +enum qca_mpta_helper_attr_zigbee_state { + ZIGBEE_IDLE = 0, + ZIGBEE_FORM_NETWORK = 1, + ZIGBEE_WAIT_JOIN = 2, + ZIGBEE_JOIN = 3, + ZIGBEE_NETWORK_UP = 4, + ZIGBEE_HMI = 5, +}; + +/** + * enum qca_mpta_helper_vendor_attr - used for NL attributes sent by + * vendor sub-command QCA_NL80211_VENDOR_SUBCMD_MPTA_HELPER_CONFIG. + */ +enum qca_mpta_helper_vendor_attr { + QCA_MPTA_HELPER_VENDOR_ATTR_INVALID = 0, + /* Optional attribute used to update zigbee state. + * enum qca_mpta_helper_attr_zigbee_state. + * NLA_U32 attribute. + */ + QCA_MPTA_HELPER_VENDOR_ATTR_ZIGBEE_STATE = 1, + /* Optional attribute used to configure wlan duration for Shape-OCS + * during interrupt. + * Set in pair with QCA_MPTA_HELPER_VENDOR_ATTR_INT_NON_WLAN_DURATION. + * Value range 0 ~ 300 (ms). + * NLA_U32 attribute. + */ + QCA_MPTA_HELPER_VENDOR_ATTR_INT_WLAN_DURATION = 2, + /* Optional attribute used to configure non wlan duration for Shape-OCS + * during interrupt. + * Set in pair with QCA_MPTA_HELPER_VENDOR_ATTR_INT_WLAN_DURATION. + * Value range 0 ~ 300 (ms). + * NLA_U32 attribute. + */ + QCA_MPTA_HELPER_VENDOR_ATTR_INT_NON_WLAN_DURATION = 3, + /* Optional attribute used to configure wlan duration for Shape-OCS + * monitor period. + * Set in pair with QCA_MPTA_HELPER_VENDOR_ATTR_MON_NON_WLAN_DURATION. + * Value range 0 ~ 300 (ms) + * NLA_U32 attribute + */ + QCA_MPTA_HELPER_VENDOR_ATTR_MON_WLAN_DURATION = 4, + /* Optional attribute used to configure non wlan duration for Shape-OCS + * monitor period. + * Set in pair with QCA_MPTA_HELPER_VENDOR_ATTR_MON_WLAN_DURATION. + * Value range 0 ~ 300 (ms) + * NLA_U32 attribute + */ + QCA_MPTA_HELPER_VENDOR_ATTR_MON_NON_WLAN_DURATION = 5, + /* Optional attribute used to configure ocs interrupt duration. + * Set in pair with QCA_MPTA_HELPER_VENDOR_ATTR_MON_OCS_DURATION. + * Value range 1000 ~ 20000 (ms) + * NLA_U32 attribute + */ + QCA_MPTA_HELPER_VENDOR_ATTR_INT_OCS_DURATION = 6, + /* Optional attribute used to configure ocs monitor duration. + * Set in pair with QCA_MPTA_HELPER_VENDOR_ATTR_INT_OCS_DURATION. + * Value range 1000 ~ 20000 (ms) + * NLA_U32 attribute + */ + QCA_MPTA_HELPER_VENDOR_ATTR_MON_OCS_DURATION = 7, + /* Optional attribute used to notify wlan FW current zigbee channel. + * Value range 11 ~ 26 + * NLA_U32 attribute + */ + QCA_MPTA_HELPER_VENDOR_ATTR_ZIGBEE_CHAN = 8, + /* Optional attribute used to configure wlan mute duration. + * Value range 0 ~ 400 (ms) + * NLA_U32 attribute + */ + QCA_MPTA_HELPER_VENDOR_ATTR_WLAN_MUTE_DURATION = 9, + + /* keep last */ + QCA_MPTA_HELPER_VENDOR_ATTR_AFTER_LAST, + QCA_MPTA_HELPER_VENDOR_ATTR_MAX = + QCA_MPTA_HELPER_VENDOR_ATTR_AFTER_LAST - 1 +}; +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/os_if/linux/scan/inc/wlan_cfg80211_scan.h b/drivers/staging/qca-wifi-host-cmn/os_if/linux/scan/inc/wlan_cfg80211_scan.h new file mode 100644 index 0000000000000000000000000000000000000000..d3ed6bdd437dd4463d426be389c5468608c8c747 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/os_if/linux/scan/inc/wlan_cfg80211_scan.h @@ -0,0 +1,358 @@ +/* + * Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: declares driver functions interfacing with linux kernel + */ + + +#ifndef _WLAN_CFG80211_SCAN_H_ +#define _WLAN_CFG80211_SCAN_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* Max number of scans allowed from userspace */ +#define WLAN_MAX_SCAN_COUNT 8 + +/* GPS application requirement */ +#define QCOM_VENDOR_IE_ID 221 +#define QCOM_OUI1 0x00 +#define QCOM_OUI2 0xA0 +#define QCOM_OUI3 0xC6 +#define QCOM_VENDOR_IE_AGE_TYPE 0x100 +#define QCOM_VENDOR_IE_AGE_LEN (sizeof(qcom_ie_age) - 2) +#define SCAN_DONE_EVENT_BUF_SIZE 4096 +#define SCAN_WAKE_LOCK_CONNECT_DURATION (1 * 1000) /* in msec */ +#define SCAN_WAKE_LOCK_SCAN_DURATION (5 * 1000) /* in msec */ + +/** + * typedef struct qcom_ie_age - age ie + * + * @element_id: Element id + * @len: Length + * @oui_1: OUI 1 + * @oui_2: OUI 2 + * @oui_3: OUI 3 + * @type: Type + * @age: Age + * @tsf_delta: tsf delta from FW + * @beacon_tsf: original beacon TSF + * @seq_ctrl: sequence control field + */ +typedef struct { + u8 element_id; + u8 len; + u8 oui_1; + u8 oui_2; + u8 oui_3; + u32 type; + u32 age; + u32 tsf_delta; + u64 beacon_tsf; + u16 seq_ctrl; +} __attribute__ ((packed)) qcom_ie_age; + +/** + * struct osif_scan_pdev - OS scan private strcutre + * scan_req_q: Scan request queue + * scan_req_q_lock: Protect scan request queue + * req_id: Scan request Id + * runtime_pm_lock: Runtime suspend lock + * scan_wake_lock: Scan wake lock + */ +struct osif_scan_pdev{ + qdf_list_t scan_req_q; + qdf_mutex_t scan_req_q_lock; + wlan_scan_requester req_id; + qdf_runtime_lock_t runtime_pm_lock; + qdf_wake_lock_t scan_wake_lock; +}; + +/* + * enum scan_source - scan request source + * @NL_SCAN: Scan initiated from NL + * @VENDOR_SCAN: Scan intiated from vendor command + */ +enum scan_source { + NL_SCAN, + VENDOR_SCAN, +}; + +/** + * struct scan_req - Scan Request entry + * @node : List entry element + * @scan_request: scan request holder + * @scan_id: scan identifier used across host layers which is generated at WMI + * @source: scan request originator (NL/Vendor scan) + * @dev: net device (same as what is in scan_request) + * + * Scan request linked list element + */ +struct scan_req { + qdf_list_node_t node; + struct cfg80211_scan_request *scan_request; + uint32_t scan_id; + uint8_t source; + struct net_device *dev; +}; + +/** + * struct scan_params - Scan params + * @source: scan request source + * @default_ie: default scan ie + * @half_rate: Half rate flag + * @quarter_rate: Quarter rate flag + */ +struct scan_params { + uint8_t source; + struct element_info default_ie; + bool half_rate; + bool quarter_rate; +}; + +/** + * struct wlan_cfg80211_inform_bss - BSS inform data + * @chan: channel the frame was received on + * @mgmt: beacon/probe resp frame + * @frame_len: frame length + * @rssi: signal strength in mBm (100*dBm) + * @boottime_ns: timestamp (CLOCK_BOOTTIME) when the information was received. + * @per_chain_snr: per chain snr received + */ +struct wlan_cfg80211_inform_bss { + struct ieee80211_channel *chan; + struct ieee80211_mgmt *mgmt; + size_t frame_len; + int rssi; + uint64_t boottime_ns; + uint8_t per_chain_snr[WLAN_MGMT_TXRX_HOST_MAX_ANTENNA]; +}; + + +#ifdef FEATURE_WLAN_SCAN_PNO +/** + * wlan_cfg80211_sched_scan_start() - cfg80211 scheduled scan(pno) start + * @vdev: vdev pointer + * @request: Pointer to cfg80211 scheduled scan start request + * @scan_backoff_multiplier: multiply scan period by this after max cycles + * + * Return: 0 for success, non zero for failure + */ +int wlan_cfg80211_sched_scan_start(struct wlan_objmgr_vdev *vdev, + struct cfg80211_sched_scan_request *request, + uint8_t scan_backoff_multiplier); + +/** + * wlan_cfg80211_sched_scan_stop() - cfg80211 scheduled scan(pno) stop + * @vdev: vdev pointer + * + * Return: 0 for success, non zero for failure + */ +int wlan_cfg80211_sched_scan_stop(struct wlan_objmgr_vdev *vdev); +#endif + +/** + * wlan_scan_runtime_pm_init() - API to initialize runtime pm context for scan + * @pdev: Pointer to pdev + * + * This will help to initialize scan runtime pm context separately. + * + * Return: QDF_STATUS + */ +QDF_STATUS wlan_scan_runtime_pm_init(struct wlan_objmgr_pdev *pdev); + +/** + * wlan_scan_runtime_pm_deinit() - API to deinitialize runtime pm + * for scan. + * @pdev: Pointer to pdev + * + * This will help to deinitialize scan runtime pm before deinitialize + * HIF + * + * Return: void + */ +void wlan_scan_runtime_pm_deinit(struct wlan_objmgr_pdev *pdev); + +/** + * wlan_cfg80211_scan_priv_init() - API to initialize cfg80211 scan + * @pdev: Pointer to net device + * + * API to initialize cfg80211 scan module. + * + * Return: QDF_STATUS + */ +QDF_STATUS wlan_cfg80211_scan_priv_init(struct wlan_objmgr_pdev *pdev); + +/** + * wlan_cfg80211_scan_priv_deinit() - API to deinitialize cfg80211 scan + * @pdev: Pointer to net device + * + * API to deinitialize cfg80211 scan module. + * + * Return: QDF_STATUS + */ +QDF_STATUS wlan_cfg80211_scan_priv_deinit( + struct wlan_objmgr_pdev *pdev); + +/** + * wlan_cfg80211_scan() - API to process cfg80211 scan request + * @vdev: Pointer to vdev + * @request: Pointer to scan request + * @params: scan params + * + * API to trigger scan and update cfg80211 scan database. + * scan dump command can be used to fetch scan results + * on receipt of scan complete event. + * + * Return: 0 for success, non zero for failure + */ +int wlan_cfg80211_scan(struct wlan_objmgr_vdev *vdev, + struct cfg80211_scan_request *request, + struct scan_params *params); + +/** + * wlan_cfg80211_inform_bss_frame_data() - API to inform beacon to cfg80211 + * @wiphy: wiphy + * @bss_data: bss data + * + * API to inform beacon to cfg80211 + * + * Return: pointer to bss entry + */ +struct cfg80211_bss * +wlan_cfg80211_inform_bss_frame_data(struct wiphy *wiphy, + struct wlan_cfg80211_inform_bss *bss); + +/** + * wlan_cfg80211_inform_bss_frame() - API to inform beacon to cfg80211 + * @pdev: Pointer to pdev + * @scan_params: scan entry + * @request: Pointer to scan request + * + * API to inform beacon to cfg80211 + * + * Return: void + */ +void wlan_cfg80211_inform_bss_frame(struct wlan_objmgr_pdev *pdev, + struct scan_cache_entry *scan_params); + +/** + * __wlan_cfg80211_unlink_bss_list() - flush bss from the kernel cache + * @wiphy: wiphy + * @bssid: bssid of the BSS to find + * @ssid: ssid of the BSS to find + * @ssid_len: ssid len of of the BSS to find + * + * Return: None + */ +void __wlan_cfg80211_unlink_bss_list(struct wiphy *wiphy, uint8_t *bssid, + uint8_t *ssid, uint8_t ssid_len); + +/** + * wlan_cfg80211_get_bss() - Get the bss entry matching the chan, bssid and ssid + * @wiphy: wiphy + * @channel: channel of the BSS to find + * @bssid: bssid of the BSS to find + * @ssid: ssid of the BSS to find + * @ssid_len: ssid len of of the BSS to find + * + * The API is a wrapper to get bss from kernel matching the chan, + * bssid and ssid + * + * Return: bss structure if found else NULL + */ +struct cfg80211_bss *wlan_cfg80211_get_bss(struct wiphy *wiphy, + struct ieee80211_channel *channel, + const u8 *bssid, + const u8 *ssid, size_t ssid_len); + +/* + * wlan_cfg80211_unlink_bss_list : flush bss from the kernel cache + * @pdev: Pointer to pdev + * @scan_entry: scan entry + * + * Return: bss which is unlinked from kernel cache + */ +void wlan_cfg80211_unlink_bss_list(struct wlan_objmgr_pdev *pdev, + struct scan_cache_entry *scan_entry); + +/** + * wlan_vendor_abort_scan() - API to vendor abort scan + * @pdev: Pointer to pdev + * @data: pointer to data + * @data_len: Data length + * + * API to abort scan through vendor command + * + * Return: 0 for success, non zero for failure + */ +int wlan_vendor_abort_scan(struct wlan_objmgr_pdev *pdev, + const void *data, int data_len); + +/** + * wlan_cfg80211_abort_scan() - API to abort scan through cfg80211 + * @pdev: Pointer to pdev + * + * API to abort scan through cfg80211 request + * + * Return: 0 for success, non zero for failure + */ +int wlan_cfg80211_abort_scan(struct wlan_objmgr_pdev *pdev); + +/** + * wlan_abort_scan() - Generic API to abort scan request + * @pdev: Pointer to pdev + * @pdev_id: pdev id + * @vdev_id: vdev id + * @scan_id: scan id + * @sync: if wait for scan complete is required + * + * Generic API to abort scans + * + * Return: 0 for success, non zero for failure + */ +QDF_STATUS wlan_abort_scan(struct wlan_objmgr_pdev *pdev, + uint32_t pdev_id, + uint32_t vdev_id, + wlan_scan_id scan_id, + bool sync); + +/** + * wlan_cfg80211_cleanup_scan_queue() - remove entries in scan queue + * @pdev: pdev pointer + * @dev: net device pointer + * + * Removes entries in scan queue depending on dev provided and sends scan + * complete event to NL. + * Removes all entries in scan queue, if dev provided is NULL + * + * Return: None + */ +void wlan_cfg80211_cleanup_scan_queue(struct wlan_objmgr_pdev *pdev, + struct net_device *dev); + + +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/os_if/linux/scan/src/wlan_cfg80211_scan.c b/drivers/staging/qca-wifi-host-cmn/os_if/linux/scan/src/wlan_cfg80211_scan.c new file mode 100644 index 0000000000000000000000000000000000000000..d3b73e165e860acc3b8649774fe9f9cb939c95da --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/os_if/linux/scan/src/wlan_cfg80211_scan.c @@ -0,0 +1,2013 @@ +/* + * Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: defines driver functions interfacing with linux kernel + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef WLAN_POLICY_MGR_ENABLE +#include +#endif +#include +#ifdef FEATURE_WLAN_DIAG_SUPPORT +#include "host_diag_core_event.h" +#endif + +static const +struct nla_policy scan_policy[QCA_WLAN_VENDOR_ATTR_SCAN_MAX + 1] = { + [QCA_WLAN_VENDOR_ATTR_SCAN_FLAGS] = {.type = NLA_U32}, + [QCA_WLAN_VENDOR_ATTR_SCAN_TX_NO_CCK_RATE] = {.type = NLA_FLAG}, + [QCA_WLAN_VENDOR_ATTR_SCAN_COOKIE] = {.type = NLA_U64}, +}; + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)) +static uint32_t hdd_config_sched_scan_start_delay( + struct cfg80211_sched_scan_request *request) +{ + return request->delay; +} +#else +static uint32_t hdd_config_sched_scan_start_delay( + struct cfg80211_sched_scan_request *request) +{ + return 0; +} +#endif + +#if defined(CFG80211_SCAN_RANDOM_MAC_ADDR) || \ + (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)) +/** + * wlan_fill_scan_rand_attrs() - Populate the scan randomization attrs + * @vdev: pointer to objmgr vdev + * @flags: cfg80211 scan flags + * @mac_addr: random mac addr from cfg80211 + * @mac_addr_mask: mac addr mask from cfg80211 + * @randomize: output variable to check scan randomization status + * @addr: output variable to hold random addr + * @mask: output variable to hold mac mask + * + * Return: None + */ +static void wlan_fill_scan_rand_attrs(struct wlan_objmgr_vdev *vdev, + uint32_t flags, + uint8_t *mac_addr, + uint8_t *mac_addr_mask, + bool *randomize, + uint8_t *addr, + uint8_t *mask) +{ + *randomize = false; + if (!(flags & NL80211_SCAN_FLAG_RANDOM_ADDR)) + return; + + if (wlan_vdev_mlme_get_opmode(vdev) != QDF_STA_MODE) + return; + + if (wlan_vdev_is_up(vdev)) + return; + + *randomize = true; + memcpy(addr, mac_addr, QDF_MAC_ADDR_SIZE); + memcpy(mask, mac_addr_mask, QDF_MAC_ADDR_SIZE); + cfg80211_debug("Random mac addr: %pM and Random mac mask: %pM", + addr, mask); +} + +/** + * wlan_scan_rand_attrs() - Wrapper function to fill scan random attrs + * @vdev: pointer to objmgr vdev + * @request: pointer to cfg80211 scan request + * @req: pointer to cmn module scan request + * + * This is a wrapper function which invokes wlan_fill_scan_rand_attrs() + * to fill random attributes of internal scan request with cfg80211_scan_request + * + * Return: None + */ +static void wlan_scan_rand_attrs(struct wlan_objmgr_vdev *vdev, + struct cfg80211_scan_request *request, + struct scan_start_request *req) +{ + bool *randomize = &req->scan_req.scan_random.randomize; + uint8_t *mac_addr = req->scan_req.scan_random.mac_addr; + uint8_t *mac_mask = req->scan_req.scan_random.mac_mask; + + wlan_fill_scan_rand_attrs(vdev, request->flags, request->mac_addr, + request->mac_addr_mask, randomize, mac_addr, + mac_mask); + if (!*randomize) + return; + + req->scan_req.scan_f_add_spoofed_mac_in_probe = true; + req->scan_req.scan_f_add_rand_seq_in_probe = true; +} +#else +/** + * wlan_scan_rand_attrs() - Wrapper function to fill scan random attrs + * @vdev: pointer to objmgr vdev + * @request: pointer to cfg80211 scan request + * @req: pointer to cmn module scan request + * + * This is a wrapper function which invokes wlan_fill_scan_rand_attrs() + * to fill random attributes of internal scan request with cfg80211_scan_request + * + * Return: None + */ +static void wlan_scan_rand_attrs(struct wlan_objmgr_vdev *vdev, + struct cfg80211_scan_request *request, + struct scan_start_request *req) +{ +} +#endif + +#ifdef FEATURE_WLAN_SCAN_PNO +#if ((LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)) || \ + defined(CFG80211_MULTI_SCAN_PLAN_BACKPORT)) + +/** + * wlan_config_sched_scan_plan() - configures the sched scan plans + * from the framework. + * @psoc: Psoc pointer + * @pno_req: pointer to PNO scan request + * @request: pointer to scan request from framework + * + * Return: None + */ +static void wlan_config_sched_scan_plan(struct wlan_objmgr_psoc *psoc, + struct pno_scan_req_params *pno_req, + struct cfg80211_sched_scan_request *request) +{ + /* + * As of now max 2 scan plans were supported by firmware + * if number of scan plan supported by firmware increased below logic + * must change. + */ + if (request->n_scan_plans == SCAN_PNO_MAX_PLAN_REQUEST) { + pno_req->fast_scan_period = + request->scan_plans[0].interval * MSEC_PER_SEC; + pno_req->fast_scan_max_cycles = + request->scan_plans[0].iterations; + pno_req->slow_scan_period = + request->scan_plans[1].interval * MSEC_PER_SEC; + } else if (request->n_scan_plans == 1) { + pno_req->fast_scan_period = + request->scan_plans[0].interval * MSEC_PER_SEC; + /* + * if only one scan plan is configured from framework + * then both fast and slow scan should be configured with the + * same value that is why fast scan cycles are hardcoded to one + */ + pno_req->fast_scan_max_cycles = 1; + pno_req->slow_scan_period = + request->scan_plans[0].interval * MSEC_PER_SEC; + } else { + cfg80211_err("Invalid number of scan plans %d !!", + request->n_scan_plans); + } +} +#else +static void wlan_config_sched_scan_plan(struct wlan_objmgr_psoc *psoc, + struct pno_scan_req_params *pno_req, + struct cfg80211_sched_scan_request *request) +{ + uint32_t scan_timer_repeat_value, slow_scan_multiplier; + + scan_timer_repeat_value = ucfg_scan_get_scan_timer_repeat_value(psoc); + slow_scan_multiplier = ucfg_scan_get_slow_scan_multiplier(psoc); + pno_req->fast_scan_period = request->interval; + pno_req->fast_scan_max_cycles = scan_timer_repeat_value; + pno_req->slow_scan_period = slow_scan_multiplier * + pno_req->fast_scan_period; + cfg80211_debug("Base scan interval: %d sec PNO Scan Timer Repeat Value: %d", + (request->interval / 1000), scan_timer_repeat_value); +} +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0) +static inline void +wlan_cfg80211_sched_scan_results(struct wiphy *wiphy, uint64_t reqid) +{ + cfg80211_sched_scan_results(wiphy); +} +#else +static inline void +wlan_cfg80211_sched_scan_results(struct wiphy *wiphy, uint64_t reqid) +{ + cfg80211_sched_scan_results(wiphy, reqid); +} +#endif + +/** + * wlan_cfg80211_pno_callback() - pno callback function to handle + * pno events. + * @vdev: vdev ptr + * @event: scan events + * @args: argument + * + * Return: void + */ +static void wlan_cfg80211_pno_callback(struct wlan_objmgr_vdev *vdev, + struct scan_event *event, + void *args) +{ + struct wlan_objmgr_pdev *pdev; + struct pdev_osif_priv *pdev_ospriv; + + if (event->type != SCAN_EVENT_TYPE_NLO_COMPLETE) + return; + + cfg80211_debug("vdev id = %d", event->vdev_id); + + pdev = wlan_vdev_get_pdev(vdev); + if (!pdev) { + cfg80211_err("pdev is NULL"); + return; + } + + pdev_ospriv = wlan_pdev_get_ospriv(pdev); + if (!pdev_ospriv) { + cfg80211_err("pdev_ospriv is NULL"); + return; + } + wlan_cfg80211_sched_scan_results(pdev_ospriv->wiphy, 0); +} + +#ifdef WLAN_POLICY_MGR_ENABLE +static bool wlan_cfg80211_is_ap_go_present(struct wlan_objmgr_psoc *psoc) +{ + return policy_mgr_mode_specific_connection_count(psoc, + PM_SAP_MODE, + NULL) || + policy_mgr_mode_specific_connection_count(psoc, + PM_P2P_GO_MODE, + NULL); +} + +static QDF_STATUS wlan_cfg80211_is_chan_ok_for_dnbs( + struct wlan_objmgr_psoc *psoc, + u8 channel, bool *ok) +{ + QDF_STATUS status = policy_mgr_is_chan_ok_for_dnbs(psoc, channel, ok); + + if (QDF_IS_STATUS_ERROR(status)) { + cfg80211_err("DNBS check failed"); + return status; + } + + return QDF_STATUS_SUCCESS; +} +#else +static bool wlan_cfg80211_is_ap_go_present(struct wlan_objmgr_psoc *psoc) +{ + return false; +} + +static QDF_STATUS wlan_cfg80211_is_chan_ok_for_dnbs( + struct wlan_objmgr_psoc *psoc, + u8 channel, + bool *ok) +{ + if (!ok) + return QDF_STATUS_E_INVAL; + + *ok = true; + return QDF_STATUS_SUCCESS; +} +#endif + +#if defined(CFG80211_SCAN_RANDOM_MAC_ADDR) || \ + (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)) +/** + * wlan_pno_scan_rand_attr() - Wrapper function to fill sched scan random attrs + * @vdev: pointer to objmgr vdev + * @request: pointer to cfg80211 sched scan request + * @req: pointer to cmn module pno scan request + * + * This is a wrapper function which invokes wlan_fill_scan_rand_attrs() + * to fill random attributes of internal pno scan + * with cfg80211_sched_scan_request + * + * Return: None + */ +static void wlan_pno_scan_rand_attr(struct wlan_objmgr_vdev *vdev, + struct cfg80211_sched_scan_request *request, + struct pno_scan_req_params *req) +{ + bool *randomize = &req->scan_random.randomize; + uint8_t *mac_addr = req->scan_random.mac_addr; + uint8_t *mac_mask = req->scan_random.mac_mask; + + wlan_fill_scan_rand_attrs(vdev, request->flags, request->mac_addr, + request->mac_addr_mask, randomize, mac_addr, + mac_mask); +} +#else +/** + * wlan_pno_scan_rand_attr() - Wrapper function to fill sched scan random attrs + * @vdev: pointer to objmgr vdev + * @request: pointer to cfg80211 sched scan request + * @req: pointer to cmn module pno scan request + * + * This is a wrapper function which invokes wlan_fill_scan_rand_attrs() + * to fill random attributes of internal pno scan + * with cfg80211_sched_scan_request + * + * Return: None + */ +static void wlan_pno_scan_rand_attr(struct wlan_objmgr_vdev *vdev, + struct cfg80211_sched_scan_request *request, + struct pno_scan_req_params *req) +{ +} +#endif + +/** + * wlan_hdd_sched_scan_update_relative_rssi() - update CPNO params + * @pno_request: pointer to PNO scan request + * @request: Pointer to cfg80211 scheduled scan start request + * + * This function is used to update Connected PNO params sent by kernel + * + * Return: None + */ +#if defined(CFG80211_REPORT_BETTER_BSS_IN_SCHED_SCAN) || \ + (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)) +static inline void wlan_hdd_sched_scan_update_relative_rssi( + struct pno_scan_req_params *pno_request, + struct cfg80211_sched_scan_request *request) +{ + pno_request->relative_rssi_set = request->relative_rssi_set; + pno_request->relative_rssi = request->relative_rssi; + if (NL80211_BAND_2GHZ == request->rssi_adjust.band) + pno_request->band_rssi_pref.band = WLAN_BAND_2_4_GHZ; + else if (NL80211_BAND_5GHZ == request->rssi_adjust.band) + pno_request->band_rssi_pref.band = WLAN_BAND_5_GHZ; + pno_request->band_rssi_pref.rssi = request->rssi_adjust.delta; +} +#else +static inline void wlan_hdd_sched_scan_update_relative_rssi( + struct pno_scan_req_params *pno_request, + struct cfg80211_sched_scan_request *request) +{ +} +#endif + +int wlan_cfg80211_sched_scan_start(struct wlan_objmgr_vdev *vdev, + struct cfg80211_sched_scan_request *request, + uint8_t scan_backoff_multiplier) +{ + struct pno_scan_req_params *req; + int i, j, ret = 0; + QDF_STATUS status; + uint8_t num_chan = 0, channel; + struct wlan_objmgr_pdev *pdev = wlan_vdev_get_pdev(vdev); + struct wlan_objmgr_psoc *psoc; + uint32_t valid_ch[SCAN_PNO_MAX_NETW_CHANNELS_EX] = {0}; + bool enable_dfs_pno_chnl_scan; + + if (ucfg_scan_get_pno_in_progress(vdev)) { + cfg80211_debug("pno is already in progress"); + return -EBUSY; + } + + if (ucfg_scan_get_pdev_status(pdev) != + SCAN_NOT_IN_PROGRESS) { + status = wlan_abort_scan(pdev, + wlan_objmgr_pdev_get_pdev_id(pdev), + INVAL_VDEV_ID, INVAL_SCAN_ID, true); + if (QDF_IS_STATUS_ERROR(status)) { + cfg80211_err("aborting the existing scan is unsuccessful"); + return -EBUSY; + } + } + + req = qdf_mem_malloc(sizeof(*req)); + if (!req) { + cfg80211_err("req malloc failed"); + return -ENOMEM; + } + + wlan_pdev_obj_lock(pdev); + psoc = wlan_pdev_get_psoc(pdev); + wlan_pdev_obj_unlock(pdev); + + req->networks_cnt = request->n_match_sets; + req->vdev_id = wlan_vdev_get_id(vdev); + + if ((!req->networks_cnt) || + (req->networks_cnt > SCAN_PNO_MAX_SUPP_NETWORKS)) { + cfg80211_err("Network input is not correct %d", + req->networks_cnt); + ret = -EINVAL; + goto error; + } + + if (request->n_channels > SCAN_PNO_MAX_NETW_CHANNELS_EX) { + cfg80211_err("Incorrect number of channels %d", + request->n_channels); + ret = -EINVAL; + goto error; + } + + enable_dfs_pno_chnl_scan = ucfg_scan_is_dfs_chnl_scan_enabled(psoc); + if (request->n_channels) { + char *chl = qdf_mem_malloc((request->n_channels * 5) + 1); + int len = 0; + bool ap_or_go_present = wlan_cfg80211_is_ap_go_present(psoc); + + if (!chl) { + ret = -ENOMEM; + goto error; + } + for (i = 0; i < request->n_channels; i++) { + channel = request->channels[i]->hw_value; + if (wlan_reg_is_dsrc_chan(pdev, channel)) + continue; + if ((!enable_dfs_pno_chnl_scan) && + (wlan_reg_is_dfs_ch(pdev, channel))) { + cfg80211_debug("Dropping DFS channel :%d", + channel); + continue; + } + + if (ap_or_go_present) { + bool ok; + + status = + wlan_cfg80211_is_chan_ok_for_dnbs(psoc, + channel, + &ok); + if (QDF_IS_STATUS_ERROR(status)) { + cfg80211_err("DNBS check failed"); + qdf_mem_free(req); + qdf_mem_free(chl); + chl = NULL; + ret = -EINVAL; + goto error; + } + if (!ok) + continue; + } + len += snprintf(chl + len, 5, "%d ", channel); + valid_ch[num_chan++] = wlan_chan_to_freq(channel); + } + cfg80211_notice("No. of Scan Channels: %d", num_chan); + cfg80211_notice("Channel-List: %s", chl); + qdf_mem_free(chl); + chl = NULL; + /* If all channels are DFS and dropped, + * then ignore the PNO request + */ + if (!num_chan) { + cfg80211_notice("Channel list empty due to filtering of DSRC"); + ret = -EINVAL; + goto error; + } + } + + /* Filling per profile params */ + for (i = 0; i < req->networks_cnt; i++) { + req->networks_list[i].ssid.length = + request->match_sets[i].ssid.ssid_len; + + if ((!req->networks_list[i].ssid.length) || + (req->networks_list[i].ssid.length > WLAN_SSID_MAX_LEN)) { + cfg80211_err(" SSID Len %d is not correct for network %d", + req->networks_list[i].ssid.length, i); + ret = -EINVAL; + goto error; + } + + qdf_mem_copy(req->networks_list[i].ssid.ssid, + request->match_sets[i].ssid.ssid, + req->networks_list[i].ssid.length); + req->networks_list[i].authentication = 0; /*eAUTH_TYPE_ANY */ + req->networks_list[i].encryption = 0; /*eED_ANY */ + req->networks_list[i].bc_new_type = 0; /*eBCAST_UNKNOWN */ + + cfg80211_notice("Received ssid:%.*s", + req->networks_list[i].ssid.length, + req->networks_list[i].ssid.ssid); + + /*Copying list of valid channel into request */ + qdf_mem_copy(req->networks_list[i].channels, valid_ch, + num_chan * sizeof(uint32_t)); + req->networks_list[i].channel_cnt = num_chan; + req->networks_list[i].rssi_thresh = + request->match_sets[i].rssi_thold; + } + + /* set scan to passive if no SSIDs are specified in the request */ + if (0 == request->n_ssids) + req->do_passive_scan = true; + else + req->do_passive_scan = false; + + for (i = 0; i < request->n_ssids; i++) { + j = 0; + while (j < req->networks_cnt) { + if ((req->networks_list[j].ssid.length == + request->ssids[i].ssid_len) && + (!qdf_mem_cmp(req->networks_list[j].ssid.ssid, + request->ssids[i].ssid, + req->networks_list[j].ssid.length))) { + req->networks_list[j].bc_new_type = + SSID_BC_TYPE_HIDDEN; + break; + } + j++; + } + } + cfg80211_notice("Number of hidden networks being Configured = %d", + request->n_ssids); + + /* + * Before Kernel 4.4 + * Driver gets only one time interval which is hard coded in + * supplicant for 10000ms. + * + * After Kernel 4.4 + * User can configure multiple scan_plans, each scan would have + * separate scan cycle and interval. (interval is in unit of second.) + * For our use case, we would only have supplicant set one scan_plan, + * and firmware also support only one as well, so pick up the first + * index. + * + * Taking power consumption into account + * firmware after gPNOScanTimerRepeatValue times fast_scan_period + * switches slow_scan_period. This is less frequent scans and firmware + * shall be in slow_scan_period mode until next PNO Start. + */ + wlan_config_sched_scan_plan(psoc, req, request); + req->delay_start_time = hdd_config_sched_scan_start_delay(request); + req->scan_backoff_multiplier = scan_backoff_multiplier; + cfg80211_notice("Base scan interval: %d sec, scan cycles: %d, slow scan interval %d", + req->fast_scan_period, req->fast_scan_max_cycles, + req->slow_scan_period); + wlan_hdd_sched_scan_update_relative_rssi(req, request); + + psoc = wlan_pdev_get_psoc(pdev); + ucfg_scan_register_pno_cb(psoc, + wlan_cfg80211_pno_callback, NULL); + ucfg_scan_get_pno_def_params(vdev, req); + + if (req->scan_random.randomize) + wlan_pno_scan_rand_attr(vdev, request, req); + + if (ucfg_ie_whitelist_enabled(psoc, vdev)) + ucfg_copy_ie_whitelist_attrs(psoc, &req->ie_whitelist); + status = ucfg_scan_pno_start(vdev, req); + if (QDF_IS_STATUS_ERROR(status)) { + cfg80211_err("Failed to enable PNO"); + ret = -EINVAL; + goto error; + } + + cfg80211_info("PNO scan request offloaded"); + +error: + qdf_mem_free(req); + return ret; +} + +int wlan_cfg80211_sched_scan_stop(struct wlan_objmgr_vdev *vdev) +{ + QDF_STATUS status; + + status = ucfg_scan_pno_stop(vdev); + if (QDF_IS_STATUS_ERROR(status)) + cfg80211_err("Failed to disabled PNO"); + else + cfg80211_info("PNO scan disabled"); + + return 0; +} +#endif /*FEATURE_WLAN_SCAN_PNO */ + +/** + * wlan_copy_bssid_scan_request() - API to copy the bssid to Scan request + * @scan_req: Pointer to scan_start_request + * @request: scan request from Supplicant + * + * This API copies the BSSID in scan request from Supplicant and copies it to + * the scan_start_request + * + * Return: None + */ +#if defined(CFG80211_SCAN_BSSID) || \ + (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0)) +static inline void +wlan_copy_bssid_scan_request(struct scan_start_request *scan_req, + struct cfg80211_scan_request *request) +{ + qdf_mem_copy(scan_req->scan_req.bssid_list[0].bytes, + request->bssid, QDF_MAC_ADDR_SIZE); +} +#else +static inline void +wlan_copy_bssid_scan_request(struct scan_start_request *scan_req, + struct cfg80211_scan_request *request) +{ + +} +#endif + +/** + * wlan_scan_request_enqueue() - enqueue Scan Request + * @pdev: pointer to pdev object + * @req: Pointer to the scan request + * @source: source of the scan request + * @scan_id: scan identifier + * + * Enqueue scan request in the global scan list.This list + * stores the active scan request information. + * + * Return: 0 on success, error number otherwise + */ +static int wlan_scan_request_enqueue(struct wlan_objmgr_pdev *pdev, + struct cfg80211_scan_request *req, + uint8_t source, uint32_t scan_id) +{ + struct scan_req *scan_req; + QDF_STATUS status; + struct pdev_osif_priv *osif_ctx; + struct osif_scan_pdev *osif_scan; + + scan_req = qdf_mem_malloc(sizeof(*scan_req)); + if (NULL == scan_req) { + cfg80211_alert("malloc failed for Scan req"); + return -ENOMEM; + } + + /* Get NL global context from objmgr*/ + osif_ctx = wlan_pdev_get_ospriv(pdev); + osif_scan = osif_ctx->osif_scan; + scan_req->scan_request = req; + scan_req->source = source; + scan_req->scan_id = scan_id; + scan_req->dev = req->wdev->netdev; + + qdf_mutex_acquire(&osif_scan->scan_req_q_lock); + status = qdf_list_insert_back(&osif_scan->scan_req_q, + &scan_req->node); + qdf_mutex_release(&osif_scan->scan_req_q_lock); + if (QDF_STATUS_SUCCESS != status) { + cfg80211_err("Failed to enqueue Scan Req"); + qdf_mem_free(scan_req); + return -EINVAL; + } + + return 0; +} + +/** + * wlan_scan_request_dequeue() - dequeue scan request + * @nl_ctx: Global HDD context + * @scan_id: scan id + * @req: scan request + * @dev: net device + * @source : returns source of the scan request + * + * Return: QDF_STATUS + */ +static QDF_STATUS wlan_scan_request_dequeue( + struct wlan_objmgr_pdev *pdev, + uint32_t scan_id, struct cfg80211_scan_request **req, + uint8_t *source, struct net_device **dev) +{ + QDF_STATUS status = QDF_STATUS_E_FAILURE; + struct scan_req *scan_req; + qdf_list_node_t *node = NULL, *next_node = NULL; + struct pdev_osif_priv *osif_ctx; + struct osif_scan_pdev *scan_priv; + + cfg80211_debug("Dequeue Scan id: %d", scan_id); + + if ((source == NULL) || (req == NULL)) { + cfg80211_err("source or request is NULL"); + return QDF_STATUS_E_NULL_VALUE; + } + + /* Get NL global context from objmgr*/ + osif_ctx = wlan_pdev_get_ospriv(pdev); + if (!osif_ctx) { + cfg80211_err("Failed to retrieve osif context"); + return status; + } + scan_priv = osif_ctx->osif_scan; + + if (qdf_list_empty(&scan_priv->scan_req_q)) { + cfg80211_info("Scan List is empty"); + return QDF_STATUS_E_FAILURE; + } + + qdf_mutex_acquire(&scan_priv->scan_req_q_lock); + if (QDF_STATUS_SUCCESS != + qdf_list_peek_front(&scan_priv->scan_req_q, &next_node)) { + qdf_mutex_release(&scan_priv->scan_req_q_lock); + cfg80211_err("Failed to remove Scan Req from queue"); + return QDF_STATUS_E_FAILURE; + } + + do { + node = next_node; + scan_req = qdf_container_of(node, struct scan_req, + node); + if (scan_req->scan_id == scan_id) { + status = qdf_list_remove_node(&scan_priv->scan_req_q, + node); + if (status == QDF_STATUS_SUCCESS) { + *req = scan_req->scan_request; + *source = scan_req->source; + *dev = scan_req->dev; + qdf_mem_free(scan_req); + qdf_mutex_release(&scan_priv->scan_req_q_lock); + cfg80211_debug("removed Scan id: %d, req = %pK, pending scans %d", + scan_id, req, + qdf_list_size(&scan_priv-> + scan_req_q)); + return QDF_STATUS_SUCCESS; + } else { + qdf_mutex_release(&scan_priv->scan_req_q_lock); + cfg80211_err("Failed to remove node scan id %d, pending scans %d", + scan_id, + qdf_list_size(&scan_priv->scan_req_q)); + return status; + } + } + } while (QDF_STATUS_SUCCESS == + qdf_list_peek_next(&scan_priv->scan_req_q, node, &next_node)); + qdf_mutex_release(&scan_priv->scan_req_q_lock); + cfg80211_err("Failed to find scan id %d", scan_id); + + return status; +} + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0)) +/** + * wlan_cfg80211_scan_done() - Scan completed callback to cfg80211 + * @netdev: Net device + * @req : Scan request + * @aborted : true scan aborted false scan success + * + * This function notifies scan done to cfg80211 + * + * Return: none + */ +static void wlan_cfg80211_scan_done(struct net_device *netdev, + struct cfg80211_scan_request *req, + bool aborted) +{ + struct cfg80211_scan_info info = { + .aborted = aborted + }; + + if (netdev->flags & IFF_UP) + cfg80211_scan_done(req, &info); +} +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)) +/** + * wlan_cfg80211_scan_done() - Scan completed callback to cfg80211 + * @netdev: Net device + * @req : Scan request + * @aborted : true scan aborted false scan success + * + * This function notifies scan done to cfg80211 + * + * Return: none + */ +static void wlan_cfg80211_scan_done(struct net_device *netdev, + struct cfg80211_scan_request *req, + bool aborted) +{ + if (netdev->flags & IFF_UP) + cfg80211_scan_done(req, aborted); +} +#endif + +/** + * wlan_vendor_scan_callback() - Scan completed callback event + * + * @req : Scan request + * @aborted : true scan aborted false scan success + * + * This function sends scan completed callback event to NL. + * + * Return: none + */ +static void wlan_vendor_scan_callback(struct cfg80211_scan_request *req, + bool aborted) +{ + struct sk_buff *skb; + struct nlattr *attr; + int i; + uint8_t scan_status; + uint64_t cookie; + + skb = cfg80211_vendor_event_alloc(req->wdev->wiphy, req->wdev, + SCAN_DONE_EVENT_BUF_SIZE + 4 + NLMSG_HDRLEN, + QCA_NL80211_VENDOR_SUBCMD_SCAN_DONE_INDEX, + GFP_ATOMIC); + + if (!skb) { + cfg80211_err("skb alloc failed"); + qdf_mem_free(req); + return; + } + + cookie = (uintptr_t)req; + + attr = nla_nest_start(skb, QCA_WLAN_VENDOR_ATTR_SCAN_SSIDS); + if (!attr) + goto nla_put_failure; + for (i = 0; i < req->n_ssids; i++) { + if (nla_put(skb, i, req->ssids[i].ssid_len, req->ssids[i].ssid)) + goto nla_put_failure; + } + nla_nest_end(skb, attr); + + attr = nla_nest_start(skb, QCA_WLAN_VENDOR_ATTR_SCAN_FREQUENCIES); + if (!attr) + goto nla_put_failure; + for (i = 0; i < req->n_channels; i++) { + if (nla_put_u32(skb, i, req->channels[i]->center_freq)) + goto nla_put_failure; + } + nla_nest_end(skb, attr); + + if (req->ie && + nla_put(skb, QCA_WLAN_VENDOR_ATTR_SCAN_IE, req->ie_len, + req->ie)) + goto nla_put_failure; + + if (req->flags && + nla_put_u32(skb, QCA_WLAN_VENDOR_ATTR_SCAN_FLAGS, req->flags)) + goto nla_put_failure; + + if (wlan_cfg80211_nla_put_u64(skb, QCA_WLAN_VENDOR_ATTR_SCAN_COOKIE, + cookie)) + goto nla_put_failure; + + scan_status = (aborted == true) ? VENDOR_SCAN_STATUS_ABORTED : + VENDOR_SCAN_STATUS_NEW_RESULTS; + if (nla_put_u8(skb, QCA_WLAN_VENDOR_ATTR_SCAN_STATUS, scan_status)) + goto nla_put_failure; + + cfg80211_vendor_event(skb, GFP_ATOMIC); + qdf_mem_free(req); + + return; + +nla_put_failure: + kfree_skb(skb); + qdf_mem_free(req); +} + +/** + * wlan_scan_acquire_wake_lock_timeout() - acquire scan wake lock + * @psoc: psoc ptr + * @scan_wake_lock: Scan wake lock + * @timeout: timeout in ms + * + * Return: void + */ +static inline +void wlan_scan_acquire_wake_lock_timeout(struct wlan_objmgr_psoc *psoc, + qdf_wake_lock_t *scan_wake_lock, + uint32_t timeout) +{ + if (!psoc || !scan_wake_lock) + return; + + if (ucfg_scan_wake_lock_in_user_scan(psoc)) + qdf_wake_lock_timeout_acquire(scan_wake_lock, timeout); +} + +/** + * wlan_scan_release_wake_lock() - release scan wake lock + * @psoc: psoc ptr + * @scan_wake_lock: Scan wake lock + * + * Return: void + */ +#ifdef FEATURE_WLAN_DIAG_SUPPORT +static inline +void wlan_scan_release_wake_lock(struct wlan_objmgr_psoc *psoc, + qdf_wake_lock_t *scan_wake_lock) +{ + if (!psoc || !scan_wake_lock) + return; + + if (ucfg_scan_wake_lock_in_user_scan(psoc)) + qdf_wake_lock_release(scan_wake_lock, + WIFI_POWER_EVENT_WAKELOCK_SCAN); +} +#else +static inline +void wlan_scan_release_wake_lock(struct wlan_objmgr_psoc *psoc, + qdf_wake_lock_t *scan_wake_lock) +{ + if (!psoc || !scan_wake_lock) + return; + + if (ucfg_scan_wake_lock_in_user_scan(psoc)) + qdf_wake_lock_release(scan_wake_lock, 0); +} +#endif + +/** + * wlan_cfg80211_scan_done_callback() - scan done callback function called after + * scan is finished + * @vdev: vdev ptr + * @event: Scan event + * @args: Scan cb arg + * + * Return: void + */ +static void wlan_cfg80211_scan_done_callback( + struct wlan_objmgr_vdev *vdev, + struct scan_event *event, + void *args) +{ + struct cfg80211_scan_request *req = NULL; + bool success = false; + uint32_t scan_id = event->scan_id; + uint8_t source = NL_SCAN; + struct wlan_objmgr_pdev *pdev; + struct pdev_osif_priv *osif_priv; + struct net_device *netdev = NULL; + QDF_STATUS status; + + qdf_mtrace(QDF_MODULE_ID_SCAN, QDF_MODULE_ID_OS_IF, event->type, + event->vdev_id, event->scan_id); + + if (!util_is_scan_completed(event, &success)) + return; + + cfg80211_debug("scan ID = %d vdev id = %d, event type %s(%d) reason = %s(%d)", + scan_id, event->vdev_id, + util_scan_get_ev_type_name(event->type), event->type, + util_scan_get_ev_reason_name(event->reason), + event->reason); + + pdev = wlan_vdev_get_pdev(vdev); + status = wlan_scan_request_dequeue( + pdev, scan_id, &req, &source, &netdev); + if (QDF_IS_STATUS_ERROR(status)) { + cfg80211_err("Dequeue of scan request failed ID: %d", scan_id); + goto allow_suspend; + } + + if (!netdev) { + cfg80211_err("net dev is NULL,Drop scan event Id: %d", + scan_id); + goto allow_suspend; + } + + /* Make sure vdev is active */ + status = wlan_objmgr_vdev_try_get_ref(vdev, WLAN_OSIF_ID); + if (QDF_IS_STATUS_ERROR(status)) { + cfg80211_err("Failed to get vdev reference: scan Id: %d", + scan_id); + goto allow_suspend; + } + + /* + * Scan can be triggred from NL or vendor scan + * - If scan is triggered from NL then cfg80211 scan done should be + * called to updated scan completion to NL. + * - If scan is triggred through vendor command then + * scan done event will be posted + */ + if (NL_SCAN == source) + wlan_cfg80211_scan_done(netdev, req, !success); + else + wlan_vendor_scan_callback(req, !success); + + wlan_objmgr_vdev_release_ref(vdev, WLAN_OSIF_ID); +allow_suspend: + osif_priv = wlan_pdev_get_ospriv(pdev); + if (qdf_list_empty(&osif_priv->osif_scan->scan_req_q)) { + struct wlan_objmgr_psoc *psoc; + + qdf_runtime_pm_allow_suspend( + &osif_priv->osif_scan->runtime_pm_lock); + + psoc = wlan_pdev_get_psoc(pdev); + wlan_scan_release_wake_lock(psoc, + &osif_priv->osif_scan->scan_wake_lock); + /* + * Acquire wakelock to handle the case where APP's tries + * to suspend immediately after the driver gets connect + * request(i.e after scan) from supplicant, this result in + * app's is suspending and not able to process the connect + * request to AP + */ + wlan_scan_acquire_wake_lock_timeout(psoc, + &osif_priv->osif_scan->scan_wake_lock, + SCAN_WAKE_LOCK_CONNECT_DURATION); + } +} + +QDF_STATUS wlan_scan_runtime_pm_init(struct wlan_objmgr_pdev *pdev) +{ + struct pdev_osif_priv *osif_priv; + struct osif_scan_pdev *scan_priv; + + wlan_pdev_obj_lock(pdev); + osif_priv = wlan_pdev_get_ospriv(pdev); + wlan_pdev_obj_unlock(pdev); + + scan_priv = osif_priv->osif_scan; + + return qdf_runtime_lock_init(&scan_priv->runtime_pm_lock); +} + +void wlan_scan_runtime_pm_deinit(struct wlan_objmgr_pdev *pdev) +{ + struct pdev_osif_priv *osif_priv; + struct osif_scan_pdev *scan_priv; + + wlan_pdev_obj_lock(pdev); + osif_priv = wlan_pdev_get_ospriv(pdev); + wlan_pdev_obj_unlock(pdev); + + scan_priv = osif_priv->osif_scan; + qdf_runtime_lock_deinit(&scan_priv->runtime_pm_lock); +} + +QDF_STATUS wlan_cfg80211_scan_priv_init(struct wlan_objmgr_pdev *pdev) +{ + struct pdev_osif_priv *osif_priv; + struct osif_scan_pdev *scan_priv; + struct wlan_objmgr_psoc *psoc; + wlan_scan_requester req_id; + + psoc = wlan_pdev_get_psoc(pdev); + + req_id = ucfg_scan_register_requester(psoc, "CFG", + wlan_cfg80211_scan_done_callback, NULL); + + osif_priv = wlan_pdev_get_ospriv(pdev); + scan_priv = qdf_mem_malloc(sizeof(*scan_priv)); + if (!scan_priv) { + cfg80211_err("failed to allocate memory"); + return QDF_STATUS_E_NOMEM; + } + + osif_priv->osif_scan = scan_priv; + scan_priv->req_id = req_id; + /* Initialize the scan request queue */ + qdf_list_create(&scan_priv->scan_req_q, WLAN_MAX_SCAN_COUNT); + qdf_mutex_create(&scan_priv->scan_req_q_lock); + qdf_wake_lock_create(&scan_priv->scan_wake_lock, "scan_wake_lock"); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_cfg80211_scan_priv_deinit(struct wlan_objmgr_pdev *pdev) +{ + struct pdev_osif_priv *osif_priv; + struct osif_scan_pdev *scan_priv; + struct wlan_objmgr_psoc *psoc; + + psoc = wlan_pdev_get_psoc(pdev); + osif_priv = wlan_pdev_get_ospriv(pdev); + + wlan_cfg80211_cleanup_scan_queue(pdev, NULL); + scan_priv = osif_priv->osif_scan; + qdf_wake_lock_destroy(&scan_priv->scan_wake_lock); + qdf_mutex_destroy(&scan_priv->scan_req_q_lock); + qdf_list_destroy(&scan_priv->scan_req_q); + ucfg_scan_unregister_requester(psoc, scan_priv->req_id); + osif_priv->osif_scan = NULL; + qdf_mem_free(scan_priv); + + return QDF_STATUS_SUCCESS; +} + +/** + * wlan_cfg80211_enqueue_for_cleanup() - Function to populate scan cleanup queue + * @scan_cleanup_q: Scan cleanup queue to be populated + * @scan_priv: Pointer to scan related data used by cfg80211 scan + * @dev: Netdevice pointer + * + * The function synchrounously iterates through the global scan queue to + * identify entries that have to be cleaned up, copies identified entries + * to another queue(to send scan complete event to NL later) and removes the + * entry from the global scan queue. + * + * Return: None + */ +static void +wlan_cfg80211_enqueue_for_cleanup(qdf_list_t *scan_cleanup_q, + struct osif_scan_pdev *scan_priv, + struct net_device *dev) +{ + struct scan_req *scan_req, *scan_cleanup; + qdf_list_node_t *node = NULL, *next_node = NULL; + + qdf_mutex_acquire(&scan_priv->scan_req_q_lock); + if (QDF_STATUS_SUCCESS != + qdf_list_peek_front(&scan_priv->scan_req_q, + &node)) { + qdf_mutex_release(&scan_priv->scan_req_q_lock); + return; + } + + while (node) { + /* + * Keep track of the next node, to traverse through the list + * in the event of the current node being deleted. + */ + qdf_list_peek_next(&scan_priv->scan_req_q, + node, &next_node); + scan_req = qdf_container_of(node, struct scan_req, node); + if (!dev || (dev == scan_req->dev)) { + scan_cleanup = qdf_mem_malloc(sizeof(struct scan_req)); + if (!scan_cleanup) { + qdf_mutex_release(&scan_priv->scan_req_q_lock); + cfg80211_err("Failed to allocate memory"); + return; + } + scan_cleanup->scan_request = scan_req->scan_request; + scan_cleanup->scan_id = scan_req->scan_id; + scan_cleanup->source = scan_req->source; + scan_cleanup->dev = scan_req->dev; + qdf_list_insert_back(scan_cleanup_q, + &scan_cleanup->node); + if (QDF_STATUS_SUCCESS != + qdf_list_remove_node(&scan_priv->scan_req_q, + node)) { + qdf_mutex_release(&scan_priv->scan_req_q_lock); + cfg80211_err("Failed to remove scan request"); + return; + } + qdf_mem_free(scan_req); + } + node = next_node; + next_node = NULL; + } + qdf_mutex_release(&scan_priv->scan_req_q_lock); +} + +void wlan_cfg80211_cleanup_scan_queue(struct wlan_objmgr_pdev *pdev, + struct net_device *dev) +{ + struct scan_req *scan_req; + struct cfg80211_scan_request *req; + uint8_t source; + bool aborted = true; + struct pdev_osif_priv *osif_priv; + qdf_list_t scan_cleanup_q; + qdf_list_node_t *node = NULL; + + if (!pdev) { + cfg80211_err("pdev is Null"); + return; + } + + osif_priv = wlan_pdev_get_ospriv(pdev); + + /* + * To avoid any race conditions, create a local list to copy all the + * scan entries to be removed and then send scan complete for each of + * the identified entries to NL. + */ + qdf_list_create(&scan_cleanup_q, WLAN_MAX_SCAN_COUNT); + wlan_cfg80211_enqueue_for_cleanup(&scan_cleanup_q, + osif_priv->osif_scan, dev); + + while (!qdf_list_empty(&scan_cleanup_q)) { + if (QDF_STATUS_SUCCESS != qdf_list_remove_front(&scan_cleanup_q, + &node)) { + cfg80211_err("Failed to remove scan request"); + return; + } + scan_req = container_of(node, struct scan_req, node); + req = scan_req->scan_request; + source = scan_req->source; + if (NL_SCAN == source) + wlan_cfg80211_scan_done(scan_req->dev, req, + aborted); + else + wlan_vendor_scan_callback(req, aborted); + + qdf_mem_free(scan_req); + } + qdf_list_destroy(&scan_cleanup_q); + + return; +} + +/** + * wlan_cfg80211_update_scan_policy_type_flags() - Set scan flags according to + * scan request + * @scan_req: Pointer to csr scan req + * + * Return: None + */ +#if defined(CFG80211_SCAN_DBS_CONTROL_SUPPORT) || \ + (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 16, 0)) +static void wlan_cfg80211_update_scan_policy_type_flags( + struct cfg80211_scan_request *req, + struct scan_req_params *scan_req) +{ + if (req->flags & NL80211_SCAN_FLAG_HIGH_ACCURACY) + scan_req->scan_policy_high_accuracy = true; + if (req->flags & NL80211_SCAN_FLAG_LOW_SPAN) + scan_req->scan_policy_low_span = true; + if (req->flags & NL80211_SCAN_FLAG_LOW_POWER) + scan_req->scan_policy_low_power = true; +} +#else +static inline void wlan_cfg80211_update_scan_policy_type_flags( + struct cfg80211_scan_request *req, + struct scan_req_params *scan_req) +{ +} +#endif + +#ifdef WLAN_POLICY_MGR_ENABLE +static bool +wlan_cfg80211_allow_simultaneous_scan(struct wlan_objmgr_psoc *psoc) +{ + return policy_mgr_is_scan_simultaneous_capable(psoc); +} +#else +static bool +wlan_cfg80211_allow_simultaneous_scan(struct wlan_objmgr_psoc *psoc) +{ + return true; +} +#endif + +int wlan_cfg80211_scan(struct wlan_objmgr_vdev *vdev, + struct cfg80211_scan_request *request, + struct scan_params *params) +{ + struct scan_start_request *req; + struct wlan_ssid *pssid; + uint8_t i; + int ret = 0; + uint8_t num_chan = 0, channel; + uint32_t c_freq; + struct wlan_objmgr_pdev *pdev = wlan_vdev_get_pdev(vdev); + wlan_scan_requester req_id; + struct pdev_osif_priv *osif_priv; + struct wlan_objmgr_psoc *psoc; + wlan_scan_id scan_id; + bool is_p2p_scan = false; + enum wlan_band band; + struct net_device *netdev = NULL; + QDF_STATUS qdf_status; + + psoc = wlan_pdev_get_psoc(pdev); + if (!psoc) { + cfg80211_err("Invalid psoc object"); + return -EINVAL; + } + /* Get NL global context from objmgr*/ + osif_priv = wlan_pdev_get_ospriv(pdev); + if (!osif_priv) { + cfg80211_err("Invalid osif priv object"); + return -EINVAL; + } + + /* + * If a scan is already going on i.e the qdf_list ( scan que) is not + * empty, and the simultaneous scan is disabled, dont allow 2nd scan + */ + if (!wlan_cfg80211_allow_simultaneous_scan(psoc) && + !qdf_list_empty(&osif_priv->osif_scan->scan_req_q) && + wlan_vdev_mlme_get_opmode(vdev) != QDF_SAP_MODE) { + cfg80211_err("Simultaneous scan disabled, reject scan"); + return -EBUSY; + } + req = qdf_mem_malloc(sizeof(*req)); + if (!req) { + cfg80211_err("Failed to allocate scan request memory"); + return -EINVAL; + } + /* Initialize the scan global params */ + ucfg_scan_init_default_params(vdev, req); + + req_id = osif_priv->osif_scan->req_id; + scan_id = ucfg_scan_get_scan_id(psoc); + if (!scan_id) { + cfg80211_err("Invalid scan id"); + qdf_mem_free(req); + return -EINVAL; + } + /* fill the scan request structure */ + req->vdev = vdev; + req->scan_req.vdev_id = wlan_vdev_get_id(vdev); + req->scan_req.scan_id = scan_id; + req->scan_req.scan_req_id = req_id; + + /* Update scan policy type flags according to cfg scan request */ + wlan_cfg80211_update_scan_policy_type_flags(request, + &req->scan_req); + /* + * Even though supplicant doesn't provide any SSIDs, n_ssids is + * set to 1. Because of this, driver is assuming that this is not + * wildcard scan and so is not aging out the scan results. + */ + if ((request->ssids) && (request->n_ssids == 1) && + ('\0' == request->ssids->ssid[0])) { + request->n_ssids = 0; + } + + if ((request->ssids) && (0 < request->n_ssids)) { + int j; + req->scan_req.num_ssids = request->n_ssids; + + if (req->scan_req.num_ssids > WLAN_SCAN_MAX_NUM_SSID) { + cfg80211_info("number of ssid received %d is greater than MAX %d so copy only MAX nuber of SSIDs", + req->scan_req.num_ssids, + WLAN_SCAN_MAX_NUM_SSID); + req->scan_req.num_ssids = WLAN_SCAN_MAX_NUM_SSID; + } + /* copy all the ssid's and their length */ + for (j = 0; j < req->scan_req.num_ssids; j++) { + pssid = &req->scan_req.ssid[j]; + /* get the ssid length */ + pssid->length = request->ssids[j].ssid_len; + if (pssid->length > WLAN_SSID_MAX_LEN) + pssid->length = WLAN_SSID_MAX_LEN; + qdf_mem_copy(pssid->ssid, + &request->ssids[j].ssid[0], + pssid->length); + cfg80211_info("SSID number %d: %.*s", j, pssid->length, + pssid->ssid); + } + } + if (request->ssids || + (wlan_vdev_mlme_get_opmode(vdev) == QDF_P2P_GO_MODE)) + req->scan_req.scan_f_passive = false; + + if (params->half_rate) + req->scan_req.scan_f_half_rate = true; + else if (params->quarter_rate) + req->scan_req.scan_f_quarter_rate = true; + + if ((request->n_ssids == 1) && request->ssids && + !qdf_mem_cmp(&request->ssids[0], "DIRECT-", 7)) + is_p2p_scan = true; + + if (is_p2p_scan && request->no_cck) + req->scan_req.p2p_scan_type = SCAN_P2P_SEARCH; + + /* Set dwell time mode according to scan policy type flags */ + if (ucfg_scan_cfg_honour_nl_scan_policy_flags(psoc)) { + if (req->scan_req.scan_policy_high_accuracy) + req->scan_req.adaptive_dwell_time_mode = + SCAN_DWELL_MODE_STATIC; + if (req->scan_req.scan_policy_low_power || + req->scan_req.scan_policy_low_span) + req->scan_req.adaptive_dwell_time_mode = + SCAN_DWELL_MODE_AGGRESSIVE; + } + + /* + * FW require at least 1 MAC to send probe request. + * If MAC is all 0 set it to BC addr as this is the address on + * which fw will send probe req. + */ + req->scan_req.num_bssid = 1; + wlan_copy_bssid_scan_request(req, request); + if (qdf_is_macaddr_zero(&req->scan_req.bssid_list[0])) + qdf_set_macaddr_broadcast(&req->scan_req.bssid_list[0]); + + if (request->n_channels) { + char *chl = qdf_mem_malloc((request->n_channels * 5) + 1); + int len = 0; +#ifdef WLAN_POLICY_MGR_ENABLE + bool ap_or_go_present = + policy_mgr_mode_specific_connection_count( + psoc, PM_SAP_MODE, NULL) || + policy_mgr_mode_specific_connection_count( + psoc, PM_P2P_GO_MODE, NULL); +#endif + if (!chl) { + ret = -ENOMEM; + goto end; + } + for (i = 0; i < request->n_channels; i++) { + channel = request->channels[i]->hw_value; + c_freq = wlan_reg_chan_to_freq(pdev, channel); + if (wlan_reg_is_dsrc_chan(pdev, channel)) + continue; +#ifdef WLAN_POLICY_MGR_ENABLE + if (ap_or_go_present) { + bool ok; + + qdf_status = + policy_mgr_is_chan_ok_for_dnbs(psoc, + channel, + &ok); + + if (QDF_IS_STATUS_ERROR(qdf_status)) { + cfg80211_err("DNBS check failed"); + qdf_mem_free(req); + qdf_mem_free(chl); + chl = NULL; + ret = -EINVAL; + goto end; + } + if (!ok) + continue; + } +#endif + len += snprintf(chl + len, 5, "%d ", channel); + req->scan_req.chan_list.chan[num_chan].freq = c_freq; + band = util_scan_scm_freq_to_band(c_freq); + if (band == WLAN_BAND_2_4_GHZ) + req->scan_req.chan_list.chan[num_chan].phymode = + SCAN_PHY_MODE_11G; + else + req->scan_req.chan_list.chan[num_chan].phymode = + SCAN_PHY_MODE_11A; + num_chan++; + if (num_chan >= WLAN_SCAN_MAX_NUM_CHANNELS) + break; + } + cfg80211_info("Channel-List: %s", chl); + qdf_mem_free(chl); + chl = NULL; + cfg80211_info("No. of Scan Channels: %d", num_chan); + } + if (!num_chan) { + cfg80211_err("Received zero non-dsrc channels"); + qdf_mem_free(req); + ret = -EINVAL; + goto end; + } + req->scan_req.chan_list.num_chan = num_chan; + + /* P2P increase the scan priority */ + if (is_p2p_scan || wlan_vdev_mlme_get_opmode(vdev) == QDF_SAP_MODE) + req->scan_req.scan_priority = SCAN_PRIORITY_HIGH; + if (request->ie_len) { + req->scan_req.extraie.ptr = qdf_mem_malloc(request->ie_len); + if (!req->scan_req.extraie.ptr) { + cfg80211_err("Failed to allocate memory"); + ret = -ENOMEM; + qdf_mem_free(req); + goto end; + } + req->scan_req.extraie.len = request->ie_len; + qdf_mem_copy(req->scan_req.extraie.ptr, request->ie, + request->ie_len); + } else if (params->default_ie.ptr && params->default_ie.len) { + req->scan_req.extraie.ptr = + qdf_mem_malloc(params->default_ie.len); + if (!req->scan_req.extraie.ptr) { + cfg80211_err("Failed to allocate memory"); + ret = -ENOMEM; + qdf_mem_free(req); + goto end; + } + req->scan_req.extraie.len = params->default_ie.len; + qdf_mem_copy(req->scan_req.extraie.ptr, params->default_ie.ptr, + params->default_ie.len); + } + + if (!is_p2p_scan) { + if (req->scan_req.scan_random.randomize) + wlan_scan_rand_attrs(vdev, request, req); + if (ucfg_ie_whitelist_enabled(psoc, vdev) && + ucfg_copy_ie_whitelist_attrs(psoc, + &req->scan_req.ie_whitelist)) + req->scan_req.scan_f_en_ie_whitelist_in_probe = true; + } + + if (request->flags & NL80211_SCAN_FLAG_FLUSH) + ucfg_scan_flush_results(pdev, NULL); + + /* Enqueue the scan request */ + wlan_scan_request_enqueue(pdev, request, params->source, + req->scan_req.scan_id); + + /* + * Acquire wakelock to handle the case where APP's send scan to connect. + * If suspend is received during scan scan will be aborted and APP will + * not get scan result and not connect. eg if PNO is implemented in + * framework. + */ + wlan_scan_acquire_wake_lock_timeout(psoc, + &osif_priv->osif_scan->scan_wake_lock, + SCAN_WAKE_LOCK_SCAN_DURATION); + + qdf_runtime_pm_prevent_suspend( + &osif_priv->osif_scan->runtime_pm_lock); + + qdf_status = ucfg_scan_start(req); + if (QDF_IS_STATUS_ERROR(qdf_status)) { + cfg80211_err("ucfg_scan_start returned error %d", qdf_status); + if (qdf_status == QDF_STATUS_E_RESOURCES) + cfg80211_err("HO is in progress.So defer the scan by informing busy"); + wlan_scan_request_dequeue(pdev, scan_id, &request, + ¶ms->source, &netdev); + if (qdf_list_empty(&osif_priv->osif_scan->scan_req_q)) { + qdf_runtime_pm_allow_suspend( + &osif_priv->osif_scan->runtime_pm_lock); + wlan_scan_release_wake_lock(psoc, + &osif_priv->osif_scan->scan_wake_lock); + } + } + ret = qdf_status_to_os_return(qdf_status); + +end: + return ret; +} + +/** + * wlan_get_scanid() - API to get the scan id + * from the scan cookie attribute. + * @pdev: Pointer to pdev object + * @scan_id: Pointer to scan id + * @cookie : Scan cookie attribute + * + * API to get the scan id from the scan cookie attribute + * sent from supplicant by matching scan request. + * + * Return: 0 for success, non zero for failure + */ +static int wlan_get_scanid(struct wlan_objmgr_pdev *pdev, + uint32_t *scan_id, uint64_t cookie) +{ + struct scan_req *scan_req; + qdf_list_node_t *node = NULL; + qdf_list_node_t *ptr_node = NULL; + int ret = -EINVAL; + struct pdev_osif_priv *osif_ctx; + struct osif_scan_pdev *scan_priv; + + /* Get NL global context from objmgr*/ + osif_ctx = wlan_pdev_get_ospriv(pdev); + if (!osif_ctx) { + cfg80211_err("Failed to retrieve osif context"); + return ret; + } + scan_priv = osif_ctx->osif_scan; + qdf_mutex_acquire(&scan_priv->scan_req_q_lock); + if (qdf_list_empty(&scan_priv->scan_req_q)) { + qdf_mutex_release(&scan_priv->scan_req_q_lock); + cfg80211_err("Failed to retrieve scan id"); + return ret; + } + + if (QDF_STATUS_SUCCESS != + qdf_list_peek_front(&scan_priv->scan_req_q, + &ptr_node)) { + qdf_mutex_release(&scan_priv->scan_req_q_lock); + return ret; + } + + do { + node = ptr_node; + scan_req = qdf_container_of(node, struct scan_req, node); + if (cookie == + (uintptr_t)(scan_req->scan_request)) { + *scan_id = scan_req->scan_id; + ret = 0; + break; + } + } while (QDF_STATUS_SUCCESS == + qdf_list_peek_next(&scan_priv->scan_req_q, + node, &ptr_node)); + + qdf_mutex_release(&scan_priv->scan_req_q_lock); + + return ret; +} + +QDF_STATUS wlan_abort_scan(struct wlan_objmgr_pdev *pdev, + uint32_t pdev_id, uint32_t vdev_id, + wlan_scan_id scan_id, bool sync) +{ + struct scan_cancel_request *req; + struct pdev_osif_priv *osif_ctx; + struct osif_scan_pdev *scan_priv; + QDF_STATUS status; + struct wlan_objmgr_vdev *vdev; + + req = qdf_mem_malloc(sizeof(*req)); + if (!req) { + cfg80211_err("Failed to allocate memory"); + return QDF_STATUS_E_NOMEM; + } + + /* Get NL global context from objmgr*/ + osif_ctx = wlan_pdev_get_ospriv(pdev); + if (!osif_ctx) { + cfg80211_err("Failed to retrieve osif context"); + qdf_mem_free(req); + return QDF_STATUS_E_FAILURE; + } + if (vdev_id == INVAL_VDEV_ID) + vdev = wlan_objmgr_pdev_get_first_vdev(pdev, WLAN_OSIF_ID); + else + vdev = wlan_objmgr_get_vdev_by_id_from_pdev(pdev, + vdev_id, WLAN_OSIF_ID); + + if (!vdev) { + cfg80211_err("Failed get vdev"); + qdf_mem_free(req); + return QDF_STATUS_E_INVAL; + } + scan_priv = osif_ctx->osif_scan; + req->cancel_req.requester = scan_priv->req_id; + req->vdev = vdev; + req->cancel_req.scan_id = scan_id; + req->cancel_req.pdev_id = pdev_id; + req->cancel_req.vdev_id = vdev_id; + if (scan_id != INVAL_SCAN_ID) + req->cancel_req.req_type = WLAN_SCAN_CANCEL_SINGLE; + else if (vdev_id == INVAL_VDEV_ID) + req->cancel_req.req_type = WLAN_SCAN_CANCEL_PDEV_ALL; + else + req->cancel_req.req_type = WLAN_SCAN_CANCEL_VDEV_ALL; + + if (sync) + status = ucfg_scan_cancel_sync(req); + else + status = ucfg_scan_cancel(req); + if (QDF_IS_STATUS_ERROR(status)) + cfg80211_err("Cancel scan request failed"); + + wlan_objmgr_vdev_release_ref(vdev, WLAN_OSIF_ID); + + return status; +} + +int wlan_cfg80211_abort_scan(struct wlan_objmgr_pdev *pdev) +{ + uint8_t pdev_id; + + pdev_id = wlan_objmgr_pdev_get_pdev_id(pdev); + + if (ucfg_scan_get_pdev_status(pdev) != + SCAN_NOT_IN_PROGRESS) + wlan_abort_scan(pdev, pdev_id, + INVAL_VDEV_ID, INVAL_SCAN_ID, true); + + return 0; +} + +int wlan_vendor_abort_scan(struct wlan_objmgr_pdev *pdev, + const void *data, int data_len) +{ + struct nlattr *tb[QCA_WLAN_VENDOR_ATTR_SCAN_MAX + 1]; + int ret = -EINVAL; + wlan_scan_id scan_id; + uint64_t cookie; + uint8_t pdev_id; + + pdev_id = wlan_objmgr_pdev_get_pdev_id(pdev); + if (wlan_cfg80211_nla_parse(tb, QCA_WLAN_VENDOR_ATTR_SCAN_MAX, data, + data_len, scan_policy)) { + cfg80211_err("Invalid ATTR"); + return ret; + } + + if (tb[QCA_WLAN_VENDOR_ATTR_SCAN_COOKIE]) { + cookie = nla_get_u64( + tb[QCA_WLAN_VENDOR_ATTR_SCAN_COOKIE]); + ret = wlan_get_scanid(pdev, &scan_id, cookie); + if (ret != 0) + return ret; + if (ucfg_scan_get_pdev_status(pdev) != + SCAN_NOT_IN_PROGRESS) + wlan_abort_scan(pdev, INVAL_PDEV_ID, + INVAL_VDEV_ID, scan_id, true); + } + return 0; +} + +static inline struct ieee80211_channel * +wlan_get_ieee80211_channel(struct wiphy *wiphy, + struct wlan_objmgr_pdev *pdev, + int chan_no) +{ + unsigned int freq; + struct ieee80211_channel *chan; + + freq = wlan_reg_chan_to_freq(pdev, chan_no); + chan = ieee80211_get_channel(wiphy, freq); + if (!chan) + cfg80211_err("chan is NULL, chan_no: %d freq: %d", + chan_no, freq); + + return chan; +} + +#ifdef WLAN_ENABLE_AGEIE_ON_SCAN_RESULTS +static inline int wlan_get_frame_len(struct scan_cache_entry *scan_params) +{ + return util_scan_entry_frame_len(scan_params) + sizeof(qcom_ie_age); +} + +static inline void wlan_add_age_ie(uint8_t *mgmt_frame, + struct scan_cache_entry *scan_params) +{ + qcom_ie_age *qie_age = NULL; + + /* GPS Requirement: need age ie per entry. Using vendor specific. */ + /* Assuming this is the last IE, copy at the end */ + qie_age = (qcom_ie_age *) (mgmt_frame + + util_scan_entry_frame_len(scan_params)); + qie_age->element_id = QCOM_VENDOR_IE_ID; + qie_age->len = QCOM_VENDOR_IE_AGE_LEN; + qie_age->oui_1 = QCOM_OUI1; + qie_age->oui_2 = QCOM_OUI2; + qie_age->oui_3 = QCOM_OUI3; + qie_age->type = QCOM_VENDOR_IE_AGE_TYPE; + /* + * Lowi expects the timestamp of bss in units of 1/10 ms. In driver + * all bss related timestamp is in units of ms. Due to this when scan + * results are sent to lowi the scan age is high.To address this, + * send age in units of 1/10 ms. + */ + qie_age->age = + (uint32_t)(qdf_mc_timer_get_system_time() - + scan_params->scan_entry_time)/10; + qie_age->tsf_delta = scan_params->tsf_delta; + memcpy(&qie_age->beacon_tsf, scan_params->tsf_info.data, + sizeof(qie_age->beacon_tsf)); + memcpy(&qie_age->seq_ctrl, &scan_params->seq_num, + sizeof(qie_age->seq_ctrl)); +} +#else +static inline int wlan_get_frame_len(struct scan_cache_entry *scan_params) +{ + return util_scan_entry_frame_len(scan_params); +} + +static inline void wlan_add_age_ie(uint8_t *mgmt_frame, + struct scan_cache_entry *scan_params) +{ +} +#endif /* WLAN_ENABLE_AGEIE_ON_SCAN_RESULTS */ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)) || \ + defined(CFG80211_INFORM_BSS_FRAME_DATA) +/** + * wlan_fill_per_chain_rssi() - fill per chain RSSI in inform bss + * @data: bss data + * @per_chain_snr: per chain RSSI + * + * Return: void + */ +#if defined(CFG80211_SCAN_PER_CHAIN_RSSI_SUPPORT) || \ + (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 16, 0)) +static void wlan_fill_per_chain_rssi(struct cfg80211_inform_bss *data, + struct wlan_cfg80211_inform_bss *bss) +{ + + uint32_t i; + + if (!bss || !data) { + cfg80211_err("Received bss is NULL"); + return; + } + for (i = 0; i < WLAN_MGMT_TXRX_HOST_MAX_ANTENNA; i++) { + if (!bss->per_chain_snr[i] || + (bss->per_chain_snr[i] == WLAN_INVALID_PER_CHAIN_RSSI)) + continue; + /* Add noise margin to SNR to convert it to RSSI */ + data->chain_signal[i] = bss->per_chain_snr[i] + + WLAN_NOISE_FLOOR_DBM_DEFAULT; + data->chains |= BIT(i); + } +} +#else +static inline void +wlan_fill_per_chain_rssi(struct cfg80211_inform_bss *data, + struct wlan_cfg80211_inform_bss *bss) +{ +} +#endif + +struct cfg80211_bss * +wlan_cfg80211_inform_bss_frame_data(struct wiphy *wiphy, + struct wlan_cfg80211_inform_bss *bss) +{ + struct cfg80211_inform_bss data = {0}; + + if (!bss) { + cfg80211_err("bss is null"); + return NULL; + } + wlan_fill_per_chain_rssi(&data, bss); + + data.chan = bss->chan; + data.boottime_ns = bss->boottime_ns; + data.signal = bss->rssi; + return cfg80211_inform_bss_frame_data(wiphy, &data, bss->mgmt, + bss->frame_len, GFP_ATOMIC); +} +#else +struct cfg80211_bss * +wlan_cfg80211_inform_bss_frame_data(struct wiphy *wiphy, + struct wlan_cfg80211_inform_bss *bss) + +{ + return cfg80211_inform_bss_frame(wiphy, bss->chan, bss->mgmt, + bss->frame_len, + bss->rssi, GFP_ATOMIC); +} +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0)) +static inline void wlan_cfg80211_put_bss(struct wiphy *wiphy, + struct cfg80211_bss *bss) +{ + cfg80211_put_bss(wiphy, bss); +} +#else +static inline void wlan_cfg80211_put_bss(struct wiphy *wiphy, + struct cfg80211_bss *bss) +{ + cfg80211_put_bss(bss); +} +#endif + +void wlan_cfg80211_inform_bss_frame(struct wlan_objmgr_pdev *pdev, + struct scan_cache_entry *scan_params) +{ + struct pdev_osif_priv *pdev_ospriv = wlan_pdev_get_ospriv(pdev); + struct wiphy *wiphy; + struct cfg80211_bss *bss = NULL; + struct wlan_cfg80211_inform_bss bss_data = {0}; + + if (!pdev_ospriv) { + cfg80211_err("os_priv is NULL"); + return; + } + + wiphy = pdev_ospriv->wiphy; + + bss_data.frame_len = wlan_get_frame_len(scan_params); + bss_data.mgmt = qdf_mem_malloc_atomic(bss_data.frame_len); + if (!bss_data.mgmt) { + cfg80211_err("mem alloc failed for bss %pM seq %d", + bss_data.mgmt->bssid, scan_params->seq_num); + return; + } + qdf_mem_copy(bss_data.mgmt, + util_scan_entry_frame_ptr(scan_params), + util_scan_entry_frame_len(scan_params)); + /* + * Android does not want the timestamp from the frame. + * Instead it wants a monotonic increasing value + */ + bss_data.mgmt->u.probe_resp.timestamp = qdf_get_monotonic_boottime(); + wlan_add_age_ie((uint8_t *)bss_data.mgmt, scan_params); + /* + * Based on .ini configuration, raw rssi can be reported for bss. + * Raw rssi is typically used for estimating power. + */ + bss_data.rssi = scan_params->rssi_raw; + + bss_data.chan = wlan_get_ieee80211_channel(wiphy, pdev, + scan_params->channel.chan_idx); + if (!bss_data.chan) { + cfg80211_err("Channel not found for bss %pM seq %d chan %d", + bss_data.mgmt->bssid, scan_params->seq_num, + scan_params->channel.chan_idx); + qdf_mem_free(bss_data.mgmt); + return; + } + + /* + * Supplicant takes the signal strength in terms of + * mBm (1 dBm = 100 mBm). + */ + bss_data.rssi = QDF_MIN(bss_data.rssi, 0) * 100; + + bss_data.boottime_ns = scan_params->boottime_ns; + + qdf_mem_copy(bss_data.per_chain_snr, scan_params->per_chain_snr, + WLAN_MGMT_TXRX_HOST_MAX_ANTENNA); + + bss = wlan_cfg80211_inform_bss_frame_data(wiphy, &bss_data); + if (!bss) + cfg80211_err("failed to inform bss %pM seq %d", + bss_data.mgmt->bssid, scan_params->seq_num); + else + wlan_cfg80211_put_bss(wiphy, bss); + + qdf_mem_free(bss_data.mgmt); +} + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0)) && \ + !defined(WITH_BACKPORTS) && !defined(IEEE80211_PRIVACY) +struct cfg80211_bss *wlan_cfg80211_get_bss(struct wiphy *wiphy, + struct ieee80211_channel *channel, + const u8 *bssid, const u8 *ssid, + size_t ssid_len) +{ + return cfg80211_get_bss(wiphy, channel, bssid, + ssid, ssid_len, + WLAN_CAPABILITY_ESS, + WLAN_CAPABILITY_ESS); +} +#else +struct cfg80211_bss *wlan_cfg80211_get_bss(struct wiphy *wiphy, + struct ieee80211_channel *channel, + const u8 *bssid, const u8 *ssid, + size_t ssid_len) +{ + return cfg80211_get_bss(wiphy, channel, bssid, + ssid, ssid_len, + IEEE80211_BSS_TYPE_ESS, + IEEE80211_PRIVACY_ANY); +} +#endif + +void __wlan_cfg80211_unlink_bss_list(struct wiphy *wiphy, uint8_t *bssid, + uint8_t *ssid, uint8_t ssid_len) +{ + struct cfg80211_bss *bss = NULL; + + bss = wlan_cfg80211_get_bss(wiphy, NULL, bssid, + ssid, ssid_len); + if (!bss) { + cfg80211_info("BSS %pM not found", bssid); + } else { + cfg80211_debug("unlink entry for ssid:%.*s and BSSID %pM", + ssid_len, ssid, bssid); + cfg80211_unlink_bss(wiphy, bss); + wlan_cfg80211_put_bss(wiphy, bss); + } + + /* + * Kernel creates separate entries into it's bss list for probe resp + * and beacon for hidden AP. Both have separate ref count and thus + * deleting one will not delete other entry. + * If beacon entry of the hidden AP is not deleted and AP switch to + * broadcasting SSID from Hiding SSID, kernel will reject the beacon + * entry. So unlink the hidden beacon entry (if present) as well from + * kernel, to avoid such issue. + */ + bss = wlan_cfg80211_get_bss(wiphy, NULL, bssid, NULL, 0); + if (!bss) { + cfg80211_debug("Hidden bss not found for Ssid:%.*s BSSID: %pM sid_len %d", + ssid_len, ssid, bssid, ssid_len); + } else { + cfg80211_debug("unlink entry for Hidden ssid:%.*s and BSSID %pM", + ssid_len, ssid, bssid); + + cfg80211_unlink_bss(wiphy, bss); + /* cfg80211_get_bss get bss with ref count so release it */ + wlan_cfg80211_put_bss(wiphy, bss); + } +} +void wlan_cfg80211_unlink_bss_list(struct wlan_objmgr_pdev *pdev, + struct scan_cache_entry *scan_entry) +{ + struct pdev_osif_priv *pdev_ospriv = wlan_pdev_get_ospriv(pdev); + struct wiphy *wiphy; + + if (!pdev_ospriv) { + cfg80211_err("os_priv is NULL"); + return; + } + + wiphy = pdev_ospriv->wiphy; + + __wlan_cfg80211_unlink_bss_list(wiphy, scan_entry->bssid.bytes, + scan_entry->ssid.ssid, + scan_entry->ssid.length); +} diff --git a/drivers/staging/qca-wifi-host-cmn/os_if/linux/spectral/inc/os_if_spectral_netlink.h b/drivers/staging/qca-wifi-host-cmn/os_if/linux/spectral/inc/os_if_spectral_netlink.h new file mode 100644 index 0000000000000000000000000000000000000000..b94815c74686ee31a2c2c2faef8d3edc39fa5ab6 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/os_if/linux/spectral/inc/os_if_spectral_netlink.h @@ -0,0 +1,85 @@ +/* + * Copyright (c) 2011, 2017-2018 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _OS_IF_SPECTRAL_NETLINK_H +#define _OS_IF_SPECTRAL_NETLINK_H + +#include +#include +#include +#include +#include + +/* NETLINK related declarations */ +#if (KERNEL_VERSION(2, 6, 31) > LINUX_VERSION_CODE) +void os_if_spectral_nl_data_ready(struct sock *sk, int len); +#else +void os_if_spectral_nl_data_ready(struct sk_buff *skb); +#endif /* VERSION CHECK */ + +#ifndef SPECTRAL_NETLINK +#define SPECTRAL_NETLINK (NETLINK_GENERIC + 1) +#endif +#define MAX_SPECTRAL_PAYLOAD 1500 + +/* Init's network namespace */ +extern struct net init_net; + +/** + * os_if_spectral_netlink_init() - Initialize Spectral Netlink data structures + * and register the NL handlers with Spectral target_if + * @pdev: Pointer to pdev + * + * Preparing socket buffer and sending Netlink messages to application layer are + * defined in os_if layer, they need to be registered with Spectral target_if + * + * Return: None + */ +#ifdef WLAN_CONV_SPECTRAL_ENABLE +void os_if_spectral_netlink_init(struct wlan_objmgr_pdev *pdev); +/** + * os_if_spectral_prep_skb() - Prepare socket buffer + * @pdev : Pointer to pdev + * + * Prepare socket buffer to send the data to application layer + * + * Return: NLMSG_DATA of the created skb or NULL if no memory + */ +void *os_if_spectral_prep_skb(struct wlan_objmgr_pdev *pdev); + +/** + * os_if_spectral_netlink_deinit() - De-initialize Spectral Netlink data + * structures and de-register the NL handlers from Spectral target_if + * @pdev: Pointer to pdev + * + * Return: None + */ +void os_if_spectral_netlink_deinit(struct wlan_objmgr_pdev *pdev); +#else + +static inline void os_if_spectral_netlink_init(struct wlan_objmgr_pdev *pdev) +{ +} + +static inline void os_if_spectral_netlink_deinit(struct wlan_objmgr_pdev *pdev) +{ +} + +#endif +#endif /* _OS_IF_SPECTRAL_NETLINK_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/os_if/linux/spectral/inc/wlan_cfg80211_spectral.h b/drivers/staging/qca-wifi-host-cmn/os_if/linux/spectral/inc/wlan_cfg80211_spectral.h new file mode 100644 index 0000000000000000000000000000000000000000..39bae6fae644ec20eec95e9dadb0929431bf23b0 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/os_if/linux/spectral/inc/wlan_cfg80211_spectral.h @@ -0,0 +1,138 @@ +/* + * Copyright (c) 2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: declares driver functions interfacing with linux kernel + */ + +#ifndef _WLAN_CFG80211_SPECTRAL_H_ +#define _WLAN_CFG80211_SPECTRAL_H_ + +#include +#include +#include +#include +#include +#include +#include +#include + +#define CONFIG_REQUESTED(type) ((type == \ + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_REQUEST_TYPE_SCAN_AND_CONFIG) || \ + (type == QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_REQUEST_TYPE_CONFIG)) + +#define SCAN_REQUESTED(type) ((type == \ + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_REQUEST_TYPE_SCAN_AND_CONFIG) || \ + (type == QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_REQUEST_TYPE_SCAN)) + +/** + * wlan_cfg80211_register_spectral_cmd_handler() - Registration api for spectral + * @pdev: Pointer to pdev + * @idx: Index in function table + * @handler: Pointer to handler + * + * Return: 0 on success, negative value on failure + */ +void wlan_cfg80211_register_spectral_cmd_handler(struct wlan_objmgr_pdev *pdev, + int idx, + void *handler); + +/** + * wlan_cfg80211_spectral_scan_config_and_start() - Start spectral scan + * @wiphy: Pointer to wiphy + * @pdev: Pointer to pdev + * @data: Reference to data + * @data_len: Length of @data + * + * Return: 0 on success, negative value on failure + */ +int wlan_cfg80211_spectral_scan_config_and_start(struct wiphy *wiphy, + struct wlan_objmgr_pdev *pdev, + const void *data, + int data_len); + +/** + * wlan_cfg80211_spectral_scan_stop() - Stop spectral scan + * @wiphy: Pointer to wiphy + * @pdev: Pointer to pdev + * @data: Reference to data + * @data_len: Length of @data + * + * Return: 0 on success, negative value on failure + */ +int wlan_cfg80211_spectral_scan_stop(struct wiphy *wiphy, + struct wlan_objmgr_pdev *pdev, + const void *data, + int data_len); + +/** + * wlan_cfg80211_spectral_scan_get_config() - Get spectral scan config + * @wiphy: Pointer to wiphy + * @pdev: Pointer to pdev + * @data: Reference to data + * @data_len: Length of @data + * + * Return: 0 on success, negative value on failure + */ +int wlan_cfg80211_spectral_scan_get_config(struct wiphy *wiphy, + struct wlan_objmgr_pdev *pdev, + const void *data, + int data_len); + +/** + * wlan_cfg80211_spectral_scan_get_cap() - Get spectral system capabilities + * @wiphy: Pointer to wiphy + * @pdev: Pointer to pdev + * @data: Reference to data + * @data_len: Length of @data + * + * Return: 0 on success, negative value on failure + */ +int wlan_cfg80211_spectral_scan_get_cap(struct wiphy *wiphy, + struct wlan_objmgr_pdev *pdev, + const void *data, + int data_len); + +/** + * wlan_cfg80211_spectral_scan_get_diag_stats() - Get spectral diag stats + * @wiphy: Pointer to wiphy + * @pdev: Pointer to pdev + * @data: Reference to data + * @data_len: Length of @data + * + * Return: 0 on success, negative value on failure + */ +int wlan_cfg80211_spectral_scan_get_diag_stats(struct wiphy *wiphy, + struct wlan_objmgr_pdev *pdev, + const void *data, + int data_len); + +/** + * wlan_cfg80211_spectral_scan_get_status() - Get spectral scan status + * @wiphy: Pointer to wiphy + * @pdev: Pointer to pdev + * @data: Reference to data + * @data_len: Length of @data + * + * Return: 0 on success, negative value on failure + */ +int wlan_cfg80211_spectral_scan_get_status(struct wiphy *wiphy, + struct wlan_objmgr_pdev *pdev, + const void *data, + int data_len); +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/os_if/linux/spectral/src/os_if_spectral_netlink.c b/drivers/staging/qca-wifi-host-cmn/os_if/linux/spectral/src/os_if_spectral_netlink.c new file mode 100644 index 0000000000000000000000000000000000000000..8e56141a18354ca4e4f26d591f582a7d5694734f --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/os_if/linux/spectral/src/os_if_spectral_netlink.c @@ -0,0 +1,489 @@ +/* + * Copyright (c) 2011, 2017-2018 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include +#ifdef CNSS_GENL +#include +#endif + +#ifndef CNSS_GENL +static struct sock *os_if_spectral_nl_sock; +static atomic_t spectral_nl_users = ATOMIC_INIT(0); +#endif + +#if (KERNEL_VERSION(2, 6, 31) > LINUX_VERSION_CODE) +void +os_if_spectral_nl_data_ready(struct sock *sk, int len) +{ + spectral_debug("%d", __LINE__); +} + +#else +void +os_if_spectral_nl_data_ready(struct sk_buff *skb) +{ + spectral_debug("%d", __LINE__); +} +#endif /* VERSION */ + +#ifndef CNSS_GENL +/** + * os_if_spectral_init_nl_cfg() - Initialize netlink kernel + * configuration parameters + * @cfg : Pointer to netlink_kernel_cfg + * + * Initialize netlink kernel configuration parameters required + * for spectral module + * + * Return: None + */ +#if KERNEL_VERSION(3, 6, 0) <= LINUX_VERSION_CODE +static void +os_if_spectral_init_nl_cfg(struct netlink_kernel_cfg *cfg) +{ + cfg->groups = 1; + cfg->input = os_if_spectral_nl_data_ready; +} +#else +static void +os_if_spectral_init_nl_cfg(struct netlink_kernel_cfg *cfg) +{ +} +#endif +/** + * os_if_spectral_create_nl_sock() - Create Netlink socket + * @cfg : Pointer to netlink_kernel_cfg + * + * Create Netlink socket required for spectral module + * + * Return: None + */ +#if KERNEL_VERSION(3, 7, 0) <= LINUX_VERSION_CODE +static void +os_if_spectral_create_nl_sock(struct netlink_kernel_cfg *cfg) +{ + os_if_spectral_nl_sock = + (struct sock *)netlink_kernel_create(&init_net, + SPECTRAL_NETLINK, cfg); +} +#elif KERNEL_VERSION(3, 6, 0) <= LINUX_VERSION_CODE +static void +os_if_spectral_create_nl_sock(struct netlink_kernel_cfg *cfg) +{ + os_if_spectral_nl_sock = + (struct sock *)netlink_kernel_create(&init_net, + SPECTRAL_NETLINK, + THIS_MODULE, cfg); +} +#elif (KERNEL_VERSION(2, 6, 31) > LINUX_VERSION_CODE) +static void +os_if_spectral_create_nl_sock(struct netlink_kernel_cfg *cfg) +{ + os_if_spectral_nl_sock = + (struct sock *)netlink_kernel_create( + SPECTRAL_NETLINK, 1, + &os_if_spectral_nl_data_ready, + THIS_MODULE); +} +#else +#if (KERNEL_VERSION(3, 10, 0) <= LINUX_VERSION_CODE) +static void +os_if_spectral_create_nl_sock(struct netlink_kernel_cfg *cfg) +{ + memset(cfg, 0, sizeof(*cfg)); + cfg->groups = 1; + cfg->input = &os_if_spectral_nl_data_ready; + os_if_spectral_nl_sock = + (struct sock *)netlink_kernel_create(&init_net, + SPECTRAL_NETLINK, cfg); +} +#else +static void +os_if_spectral_create_nl_sock(struct netlink_kernel_cfg *cfg) +{ + os_if_spectral_nl_sock = + (struct sock *)netlink_kernel_create( + &init_net, + SPECTRAL_NETLINK, 1, + &os_if_spectral_nl_data_ready, + NULL, THIS_MODULE); +} +#endif +#endif + +/** + * os_if_spectral_init_nl() - Initialize netlink data structures for + * spectral module + * @pdev : Pointer to pdev + * + * Return: 0 on success else failure + */ +static int +os_if_spectral_init_nl(struct wlan_objmgr_pdev *pdev) +{ + struct pdev_spectral *ps = NULL; + struct netlink_kernel_cfg cfg; + + memset(&cfg, 0, sizeof(cfg)); + if (!pdev) { + spectral_err("PDEV is NULL!"); + return -EINVAL; + } + ps = wlan_objmgr_pdev_get_comp_private_obj(pdev, + WLAN_UMAC_COMP_SPECTRAL); + + if (!ps) { + spectral_err("PDEV SPECTRAL object is NULL!"); + return -EINVAL; + } + os_if_spectral_init_nl_cfg(&cfg); + + if (!os_if_spectral_nl_sock) { + os_if_spectral_create_nl_sock(&cfg); + + if (!os_if_spectral_nl_sock) { + spectral_err("NETLINK_KERNEL_CREATE FAILED"); + return -ENODEV; + } + } + ps->spectral_sock = os_if_spectral_nl_sock; + + if (!ps->spectral_sock) { + spectral_err("ps->spectral_sock is NULL"); + return -ENODEV; + } + atomic_inc(&spectral_nl_users); + + return 0; +} + +/** + * os_if_spectral_destroy_netlink() - De-initialize netlink data structures for + * spectral module + * @pdev : Pointer to pdev + * + * Return: Success/Failure + */ +static int +os_if_spectral_destroy_netlink(struct wlan_objmgr_pdev *pdev) +{ + struct pdev_spectral *ps = NULL; + + if (!pdev) { + spectral_err("PDEV is NULL!"); + return -EINVAL; + } + ps = wlan_objmgr_pdev_get_comp_private_obj(pdev, + WLAN_UMAC_COMP_SPECTRAL); + + if (!ps) { + spectral_err("PDEV SPECTRAL object is NULL!"); + return -EINVAL; + } + ps->spectral_sock = NULL; + if (atomic_dec_and_test(&spectral_nl_users)) { + sock_release(os_if_spectral_nl_sock->sk_socket); + os_if_spectral_nl_sock = NULL; + } + return 0; +} +#else + +static int +os_if_spectral_init_nl(struct wlan_objmgr_pdev *pdev) +{ + return 0; +} + +static int +os_if_spectral_destroy_netlink(struct wlan_objmgr_pdev *pdev) +{ + return 0; +} +#endif + +void * +os_if_spectral_prep_skb(struct wlan_objmgr_pdev *pdev) +{ + struct pdev_spectral *ps = NULL; + struct nlmsghdr *spectral_nlh = NULL; + + if (!pdev) { + spectral_err("PDEV is NULL!"); + return NULL; + } + ps = wlan_objmgr_pdev_get_comp_private_obj(pdev, + WLAN_UMAC_COMP_SPECTRAL); + + if (!ps) { + spectral_err("PDEV SPECTRAL object is NULL!"); + return NULL; + } + ps->skb = qdf_nbuf_alloc(NULL, MAX_SPECTRAL_PAYLOAD, 0, 0, false); + + if (!ps->skb) { + spectral_err("allocate skb (len=%u) failed", + MAX_SPECTRAL_PAYLOAD); + return NULL; + } + + qdf_nbuf_put_tail(ps->skb, MAX_SPECTRAL_PAYLOAD); + spectral_nlh = (struct nlmsghdr *)ps->skb->data; + + OS_MEMZERO(spectral_nlh, sizeof(*spectral_nlh)); + + /* + * Possible bug that size of struct spectral_samp_msg and + * SPECTRAL_MSG differ by 3 bytes so we miss 3 bytes + */ + + spectral_nlh->nlmsg_len = NLMSG_SPACE(sizeof(struct spectral_samp_msg)); + spectral_nlh->nlmsg_pid = 0; + spectral_nlh->nlmsg_flags = 0; + spectral_nlh->nlmsg_type = WLAN_NL_MSG_SPECTRAL_SCAN; + + return NLMSG_DATA(spectral_nlh); +} + +#if (KERNEL_VERSION(2, 6, 31) > LINUX_VERSION_CODE) +static inline void +os_if_init_spectral_skb_dst_pid( + struct sk_buff *skb, + struct pdev_spectral *ps) +{ + NETLINK_CB(skb).dst_pid = + ps->spectral_pid; +} +#else +static inline void +os_if_init_spectral_skb_dst_pid( + struct sk_buff *skb, + struct pdev_spectral *ps) +{ +} +#endif /* VERSION - field deprecated by newer kernels */ + +#if KERNEL_VERSION(3, 7, 0) > LINUX_VERSION_CODE +static inline void +os_if_init_spectral_skb_pid_portid(struct sk_buff *skb) +{ + NETLINK_CB(skb).pid = 0; /* from kernel */ +} +#else +static inline void +os_if_init_spectral_skb_pid_portid(struct sk_buff *skb) +{ + NETLINK_CB(skb).portid = 0; /* from kernel */ +} +#endif + + +/** + * os_if_spectral_nl_unicast_msg() - Sends unicast Spectral message to user + * space + * @pdev : Pointer to pdev + * + * Return: void + */ +#ifndef CNSS_GENL +static int +os_if_spectral_nl_unicast_msg(struct wlan_objmgr_pdev *pdev) +{ + struct pdev_spectral *ps = NULL; + int status; + + if (!pdev) { + spectral_err("PDEV is NULL!"); + return -EINVAL; + } + + ps = wlan_objmgr_pdev_get_comp_private_obj(pdev, + WLAN_UMAC_COMP_SPECTRAL); + if (!ps) { + spectral_err("PDEV SPECTRAL object is NULL!"); + return -EINVAL; + } + + if (!ps->skb) { + spectral_err("Socket buffer is null"); + return -EINVAL; + } + + if (!ps->spectral_sock) { + spectral_err("Spectral Socket is invalid"); + dev_kfree_skb(ps->skb); + return -EINVAL; + } + + os_if_init_spectral_skb_dst_pid(ps->skb, ps); + + os_if_init_spectral_skb_pid_portid(ps->skb); + + /* to mcast group 1<<0 */ + NETLINK_CB(ps->skb).dst_group = 0; + + status = netlink_unicast(ps->spectral_sock, + ps->skb, + ps->spectral_pid, MSG_DONTWAIT); + + return status; +} +#else + +static int +os_if_spectral_nl_unicast_msg(struct wlan_objmgr_pdev *pdev) +{ + struct pdev_spectral *ps = NULL; + int status; + + if (!pdev) { + spectral_err("PDEV is NULL!"); + return -EINVAL; + } + + ps = wlan_objmgr_pdev_get_comp_private_obj(pdev, + WLAN_UMAC_COMP_SPECTRAL); + if (!ps) { + spectral_err("PDEV SPECTRAL object is NULL!"); + return -EINVAL; + } + + if (!ps->skb) { + spectral_err("Socket buffer is null"); + return -EINVAL; + } + + spectral_debug("spectral unicast message"); + os_if_init_spectral_skb_pid_portid(ps->skb); + + status = nl_srv_ucast(ps->skb, ps->spectral_pid, MSG_DONTWAIT, + WLAN_NL_MSG_SPECTRAL_SCAN, CLD80211_MCGRP_OEM_MSGS); + if (status < 0) + spectral_err("failed to send to spectral scan app"); + + return status; +} + +#endif +/** + * os_if_spectral_nl_bcast_msg() - Sends broadcast Spectral message to user + * space + * @pdev : Pointer to pdev + * + * Return: void + */ +static int +os_if_spectral_nl_bcast_msg(struct wlan_objmgr_pdev *pdev) +{ +#if (KERNEL_VERSION(2, 6, 31) >= LINUX_VERSION_CODE) + fd_set write_set; +#endif + int status; + struct pdev_spectral *ps = NULL; + +#if (KERNEL_VERSION(2, 6, 31) >= LINUX_VERSION_CODE) + FD_ZERO(&write_set); +#endif + + if (!pdev) { + spectral_err("PDEV is NULL!"); + return -EINVAL; + } + ps = wlan_objmgr_pdev_get_comp_private_obj(pdev, + WLAN_UMAC_COMP_SPECTRAL); + + if (!ps) { + spectral_err("PDEV SPECTRAL object is NULL!"); + return -EINVAL; + } + + if (!ps->skb) { + spectral_err("Socket buffer is null"); + return -EINVAL; + } + + if (!ps->spectral_sock) { + dev_kfree_skb(ps->skb); + return -EINVAL; + } + + status = netlink_broadcast(ps->spectral_sock, + ps->skb, + 0, 1, GFP_ATOMIC); + + return status; +} + +void +os_if_spectral_netlink_init(struct wlan_objmgr_pdev *pdev) +{ + struct spectral_nl_cb nl_cb = {0}; + struct spectral_context *sptrl_ctx; + + if (!pdev) { + spectral_err("PDEV is NULL!"); + return; + } + + sptrl_ctx = spectral_get_spectral_ctx_from_pdev(pdev); + + if (!sptrl_ctx) { + spectral_err("Spectral context is NULL!"); + return; + } + + os_if_spectral_init_nl(pdev); + + /* Register Netlink handlers */ + nl_cb.get_nbuff = os_if_spectral_prep_skb; + nl_cb.send_nl_bcast = os_if_spectral_nl_bcast_msg; + nl_cb.send_nl_unicast = os_if_spectral_nl_unicast_msg; + + if (sptrl_ctx->sptrlc_register_netlink_cb) + sptrl_ctx->sptrlc_register_netlink_cb(pdev, &nl_cb); +} +qdf_export_symbol(os_if_spectral_netlink_init); + +void os_if_spectral_netlink_deinit(struct wlan_objmgr_pdev *pdev) +{ + struct spectral_context *sptrl_ctx; + + if (!pdev) { + spectral_err("PDEV is NULL!"); + return; + } + + sptrl_ctx = spectral_get_spectral_ctx_from_pdev(pdev); + + if (!sptrl_ctx) { + spectral_err("Spectral context is NULL!"); + return; + } + + if (sptrl_ctx->sptrlc_deregister_netlink_cb) + sptrl_ctx->sptrlc_deregister_netlink_cb(pdev); + + os_if_spectral_destroy_netlink(pdev); +} +qdf_export_symbol(os_if_spectral_netlink_deinit); diff --git a/drivers/staging/qca-wifi-host-cmn/os_if/linux/spectral/src/wlan_cfg80211_spectral.c b/drivers/staging/qca-wifi-host-cmn/os_if/linux/spectral/src/wlan_cfg80211_spectral.c new file mode 100644 index 0000000000000000000000000000000000000000..4c7404b9f967496b57a3da1259cc53133bee7bdc --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/os_if/linux/spectral/src/wlan_cfg80211_spectral.c @@ -0,0 +1,675 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: defines driver functions interfacing with linux kernel + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static const struct nla_policy spectral_scan_policy[ + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_MAX + 1] = { + [QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_SCAN_COUNT] = { + .type = NLA_U32}, + [QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_SCAN_PERIOD] = { + .type = NLA_U32}, + [QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_PRIORITY] = { + .type = NLA_U32}, + [QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_FFT_SIZE] = { + .type = NLA_U32}, + [QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_GC_ENA] = { + .type = NLA_U32}, + [QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_RESTART_ENA] = { + .type = NLA_U32}, + [QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_NOISE_FLOOR_REF] = { + .type = NLA_U32}, + [QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_INIT_DELAY] = { + .type = NLA_U32}, + [QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_NB_TONE_THR] = { + .type = NLA_U32}, + [QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_STR_BIN_THR] = { + .type = NLA_U32}, + [QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_WB_RPT_MODE] = { + .type = NLA_U32}, + [QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_RSSI_RPT_MODE] = { + .type = NLA_U32}, + [QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_RSSI_THR] = { + .type = NLA_U32}, + [QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_PWR_FORMAT] = { + .type = NLA_U32}, + [QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_RPT_MODE] = { + .type = NLA_U32}, + [QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_BIN_SCALE] = { + .type = NLA_U32}, + [QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_DBM_ADJ] = { + .type = NLA_U32}, + [QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_CHN_MASK] = { + .type = NLA_U32}, + [QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_REQUEST_TYPE] = { + .type = NLA_U32}, + [QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_COOKIE] = { + .type = NLA_U64}, + [QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_FFT_PERIOD] = { + .type = NLA_U32}, + [QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_SHORT_REPORT] = { + .type = NLA_U32}, + [QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_DEBUG_LEVEL] = { + .type = NLA_U32}, +}; + +static void wlan_spectral_intit_config(struct spectral_config *config_req) +{ + config_req->ss_period = SPECTRAL_PHYERR_PARAM_NOVAL; + config_req->ss_count = SPECTRAL_PHYERR_PARAM_NOVAL; + config_req->ss_fft_period = SPECTRAL_PHYERR_PARAM_NOVAL; + config_req->ss_short_report = SPECTRAL_PHYERR_PARAM_NOVAL; + config_req->ss_spectral_pri = SPECTRAL_PHYERR_PARAM_NOVAL; + config_req->ss_fft_size = SPECTRAL_PHYERR_PARAM_NOVAL; + config_req->ss_gc_ena = SPECTRAL_PHYERR_PARAM_NOVAL; + config_req->ss_restart_ena = SPECTRAL_PHYERR_PARAM_NOVAL; + config_req->ss_noise_floor_ref = SPECTRAL_PHYERR_PARAM_NOVAL; + config_req->ss_init_delay = SPECTRAL_PHYERR_PARAM_NOVAL; + config_req->ss_nb_tone_thr = SPECTRAL_PHYERR_PARAM_NOVAL; + config_req->ss_str_bin_thr = SPECTRAL_PHYERR_PARAM_NOVAL; + config_req->ss_wb_rpt_mode = SPECTRAL_PHYERR_PARAM_NOVAL; + config_req->ss_rssi_rpt_mode = SPECTRAL_PHYERR_PARAM_NOVAL; + config_req->ss_rssi_thr = SPECTRAL_PHYERR_PARAM_NOVAL; + config_req->ss_pwr_format = SPECTRAL_PHYERR_PARAM_NOVAL; + config_req->ss_rpt_mode = SPECTRAL_PHYERR_PARAM_NOVAL; + config_req->ss_bin_scale = SPECTRAL_PHYERR_PARAM_NOVAL; + config_req->ss_dbm_adj = SPECTRAL_PHYERR_PARAM_NOVAL; + config_req->ss_chn_mask = SPECTRAL_PHYERR_PARAM_NOVAL; +} + +static int wlan_spectral_set_config(struct wlan_objmgr_pdev *pdev, + struct spectral_config *config_req) +{ + int status; + + status = ucfg_spectral_control(pdev, + SPECTRAL_SET_CONFIG, + config_req, + sizeof(struct spectral_config), + NULL, + NULL); + if (status < 0) + return -EINVAL; + + return 0; +} + +static int wlan_spectral_set_debug_level(struct wlan_objmgr_pdev *pdev, + uint32_t spectral_dbg_level) +{ + int status; + + status = ucfg_spectral_control(pdev, + SPECTRAL_SET_DEBUG_LEVEL, + &spectral_dbg_level, + sizeof(uint32_t), + NULL, + NULL); + if (status < 0) + return -EINVAL; + + return 0; +} + +static int wlan_spectral_get_debug_level(struct wlan_objmgr_pdev *pdev, + uint32_t *spectral_dbg_level) +{ + int status; + uint32_t outsize; + + outsize = sizeof(uint32_t); + status = ucfg_spectral_control(pdev, + SPECTRAL_GET_DEBUG_LEVEL, + NULL, + 0, + spectral_dbg_level, + &outsize); + if (status < 0) + return -EINVAL; + + return 0; +} + +static int wlan_spectral_get_config(struct wlan_objmgr_pdev *pdev, + struct spectral_config *config_req) +{ + int status; + uint32_t outsize; + + outsize = sizeof(struct spectral_config); + status = ucfg_spectral_control(pdev, + SPECTRAL_GET_CONFIG, + NULL, + 0, + config_req, + &outsize); + if (status < 0) + return -EINVAL; + + return 0; +} + +static int wlan_spectral_get_cap(struct wlan_objmgr_pdev *pdev, + struct spectral_caps *spectral_cap) +{ + int status; + uint32_t outsize; + + outsize = sizeof(struct spectral_caps); + status = ucfg_spectral_control(pdev, + SPECTRAL_GET_CAPABILITY_INFO, + NULL, + 0, + spectral_cap, + &outsize); + if (status < 0) + return -EINVAL; + + return 0; +} + +static int wlan_spectral_get_diag_stats( + struct wlan_objmgr_pdev *pdev, + struct spectral_diag_stats *spectral_diag) +{ + int status; + uint32_t outsize; + + outsize = sizeof(struct spectral_diag_stats); + status = ucfg_spectral_control(pdev, + SPECTRAL_GET_DIAG_STATS, + NULL, + 0, + spectral_diag, + &outsize); + if (status < 0) + return -EINVAL; + + return 0; +} + +static int wlan_spectral_scan_get_status( + struct wlan_objmgr_pdev *pdev, + struct spectral_scan_state *sscan_state) +{ + uint32_t is_active; + uint32_t is_enabled; + int status; + uint32_t outsize; + + outsize = sizeof(uint32_t); + status = ucfg_spectral_control(pdev, + SPECTRAL_IS_ACTIVE, + NULL, + 0, + &is_active, + &outsize); + if (status < 0) + return -EINVAL; + + sscan_state->is_active = is_active; + + outsize = sizeof(uint32_t); + status = ucfg_spectral_control(pdev, + SPECTRAL_IS_ENABLED, + NULL, + 0, + &is_enabled, + &outsize); + if (status < 0) + return -EINVAL; + + sscan_state->is_enabled = is_enabled; + + return 0; +} + +static int wlan_start_spectral_scan(struct wlan_objmgr_pdev *pdev) +{ + int status; + + status = ucfg_spectral_control(pdev, + SPECTRAL_ACTIVATE_SCAN, + NULL, + 0, + NULL, + NULL); + if (status < 0) + return -EINVAL; + + return 0; +} + +static int wlan_stop_spectral_scan(struct wlan_objmgr_pdev *pdev) +{ + int status; + + status = ucfg_spectral_control(pdev, + SPECTRAL_STOP_SCAN, + NULL, + 0, + NULL, + NULL); + if (status < 0) + return -EINVAL; + + return 0; +} + +int wlan_cfg80211_spectral_scan_config_and_start(struct wiphy *wiphy, + struct wlan_objmgr_pdev *pdev, + const void *data, + int data_len) +{ + struct nlattr *tb[QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_MAX + 1]; + struct spectral_config config_req; + QDF_STATUS status; + uint64_t cookie; + struct sk_buff *skb; + uint32_t spectral_dbg_level; + uint32_t scan_req_type = 0; + + if (wlan_cfg80211_nla_parse( + tb, + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_MAX, + data, + data_len, + spectral_scan_policy)) { + qdf_print("Invalid Spectral Scan config ATTR"); + return -EINVAL; + } + + wlan_spectral_intit_config(&config_req); + + if (tb[QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_SCAN_COUNT]) + config_req.ss_count = nla_get_u32(tb + [QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_SCAN_COUNT]); + + if (tb[QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_SCAN_PERIOD]) + config_req.ss_period = nla_get_u32(tb + [QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_SCAN_PERIOD]); + + if (tb[QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_PRIORITY]) + config_req.ss_spectral_pri = nla_get_u32(tb + [QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_PRIORITY]); + + if (tb[QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_FFT_SIZE]) + config_req.ss_fft_size = nla_get_u32(tb + [QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_FFT_SIZE]); + + if (tb[QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_GC_ENA]) + config_req.ss_gc_ena = nla_get_u32(tb + [QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_GC_ENA]); + + if (tb[QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_RESTART_ENA]) + config_req.ss_restart_ena = nla_get_u32(tb + [QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_RESTART_ENA]); + + if (tb[QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_NOISE_FLOOR_REF]) + config_req.ss_noise_floor_ref = nla_get_u32(tb + [QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_NOISE_FLOOR_REF]); + + if (tb[QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_INIT_DELAY]) + config_req.ss_init_delay = nla_get_u32(tb + [QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_INIT_DELAY]); + + if (tb[QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_NB_TONE_THR]) + config_req.ss_nb_tone_thr = nla_get_u32(tb + [QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_NB_TONE_THR]); + + if (tb[QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_STR_BIN_THR]) + config_req.ss_str_bin_thr = nla_get_u32(tb + [QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_STR_BIN_THR]); + + if (tb[QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_WB_RPT_MODE]) + config_req.ss_wb_rpt_mode = nla_get_u32(tb + [QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_WB_RPT_MODE]); + + if (tb[QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_RSSI_RPT_MODE]) + config_req.ss_rssi_rpt_mode = nla_get_u32(tb + [QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_RSSI_RPT_MODE]); + + if (tb[QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_RSSI_THR]) + config_req.ss_rssi_thr = nla_get_u32(tb + [QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_RSSI_THR]); + + if (tb[QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_PWR_FORMAT]) + config_req.ss_pwr_format = nla_get_u32(tb + [QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_PWR_FORMAT]); + + if (tb[QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_RPT_MODE]) + config_req.ss_rpt_mode = nla_get_u32(tb + [QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_RPT_MODE]); + + if (tb[QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_BIN_SCALE]) + config_req.ss_bin_scale = nla_get_u32(tb + [QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_BIN_SCALE]); + + if (tb[QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_DBM_ADJ]) + config_req.ss_dbm_adj = nla_get_u32(tb + [QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_DBM_ADJ]); + + if (tb[QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_CHN_MASK]) + config_req.ss_chn_mask = nla_get_u32(tb + [QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_CHN_MASK]); + + if (tb[QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_FFT_PERIOD]) + config_req.ss_fft_period = nla_get_u32(tb + [QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_FFT_PERIOD]); + + if (tb[QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_SHORT_REPORT]) + config_req.ss_short_report = nla_get_u32(tb + [QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_SHORT_REPORT]); + + if (tb[QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_DEBUG_LEVEL]) { + spectral_dbg_level = nla_get_u32(tb + [QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_DEBUG_LEVEL]); + status = wlan_spectral_set_debug_level(pdev, + spectral_dbg_level); + if (QDF_STATUS_SUCCESS != status) + return -EINVAL; + } + + if (tb[QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_REQUEST_TYPE]) + scan_req_type = nla_get_u32(tb + [QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_REQUEST_TYPE]); + + if (CONFIG_REQUESTED(scan_req_type)) { + status = wlan_spectral_set_config(pdev, &config_req); + if (QDF_STATUS_SUCCESS != status) + return -EINVAL; + } + + if (SCAN_REQUESTED(scan_req_type)) { + status = wlan_start_spectral_scan(pdev); + if (QDF_STATUS_SUCCESS != status) + return -EINVAL; + } + + skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, sizeof(u64) + + NLA_HDRLEN + NLMSG_HDRLEN); + if (!skb) { + qdf_print(" reply skb alloc failed"); + return -ENOMEM; + } + + cookie = 0; + if (wlan_cfg80211_nla_put_u64(skb, + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_COOKIE, + cookie)) { + kfree_skb(skb); + return -EINVAL; + } + + cfg80211_vendor_cmd_reply(skb); + + return 0; +} + +int wlan_cfg80211_spectral_scan_stop(struct wiphy *wiphy, + struct wlan_objmgr_pdev *pdev, + const void *data, + int data_len) +{ + QDF_STATUS status; + + status = wlan_stop_spectral_scan(pdev); + if (QDF_STATUS_SUCCESS != status) + return -EINVAL; + return 0; +} + +int wlan_cfg80211_spectral_scan_get_config(struct wiphy *wiphy, + struct wlan_objmgr_pdev *pdev, + const void *data, + int data_len) +{ + struct spectral_config config_buf; + uint32_t spectral_dbg_level; + struct sk_buff *skb; + + wlan_spectral_get_config(pdev, &config_buf); + wlan_spectral_get_debug_level(pdev, &spectral_dbg_level); + + skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, (21 * sizeof(u32)) + + NLA_HDRLEN + NLMSG_HDRLEN); + if (!skb) { + qdf_print(" reply skb alloc failed"); + return -ENOMEM; + } + + if (nla_put_u32(skb, + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_SCAN_COUNT, + config_buf.ss_count) || + nla_put_u32(skb, + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_SCAN_PERIOD, + config_buf.ss_period) || + nla_put_u32(skb, + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_PRIORITY, + config_buf.ss_spectral_pri) || + nla_put_u32(skb, + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_FFT_SIZE, + config_buf.ss_fft_size) || + nla_put_u32(skb, + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_GC_ENA, + config_buf.ss_gc_ena) || + nla_put_u32(skb, + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_RESTART_ENA, + config_buf.ss_restart_ena) || + nla_put_u32( + skb, + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_NOISE_FLOOR_REF, + config_buf.ss_noise_floor_ref) || + nla_put_u32(skb, + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_INIT_DELAY, + config_buf.ss_init_delay) || + nla_put_u32(skb, + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_NB_TONE_THR, + config_buf.ss_nb_tone_thr) || + nla_put_u32(skb, + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_STR_BIN_THR, + config_buf.ss_str_bin_thr) || + nla_put_u32(skb, + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_WB_RPT_MODE, + config_buf.ss_wb_rpt_mode) || + nla_put_u32(skb, + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_RSSI_RPT_MODE, + config_buf.ss_rssi_rpt_mode) || + nla_put_u32(skb, + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_RSSI_THR, + config_buf.ss_rssi_thr) || + nla_put_u32(skb, + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_PWR_FORMAT, + config_buf.ss_pwr_format) || + nla_put_u32(skb, + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_RPT_MODE, + config_buf.ss_rpt_mode) || + nla_put_u32(skb, + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_BIN_SCALE, + config_buf.ss_bin_scale) || + nla_put_u32(skb, + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_DBM_ADJ, + config_buf.ss_dbm_adj) || + nla_put_u32(skb, + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_CHN_MASK, + config_buf.ss_chn_mask) || + nla_put_u32(skb, + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_FFT_PERIOD, + config_buf.ss_fft_period) || + nla_put_u32(skb, + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_SHORT_REPORT, + config_buf.ss_short_report) || + nla_put_u32(skb, + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CONFIG_DEBUG_LEVEL, + spectral_dbg_level)) { + kfree_skb(skb); + return -EINVAL; + } + cfg80211_vendor_cmd_reply(skb); + + return 0; +} + +int wlan_cfg80211_spectral_scan_get_cap(struct wiphy *wiphy, + struct wlan_objmgr_pdev *pdev, + const void *data, + int data_len) +{ + struct spectral_caps spectral_cap; + struct sk_buff *skb; + + wlan_spectral_get_cap(pdev, &spectral_cap); + + skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, 5 * sizeof(u32) + + NLA_HDRLEN + NLMSG_HDRLEN); + if (!skb) { + qdf_print(" reply skb alloc failed"); + return -ENOMEM; + } + + if (spectral_cap.phydiag_cap) + if (nla_put_flag( + skb, + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CAP_PHYDIAG)) + goto fail; + + if (spectral_cap.radar_cap) + if (nla_put_flag(skb, + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CAP_RADAR)) + goto fail; + + if (spectral_cap.spectral_cap) + if (nla_put_flag( + skb, + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CAP_SPECTRAL)) + goto fail; + + if (spectral_cap.advncd_spectral_cap) + if (nla_put_flag( + skb, + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CAP_ADVANCED_SPECTRAL)) + goto fail; + + if (nla_put_u32(skb, + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_CAP_HW_GEN, + spectral_cap.hw_gen)) + goto fail; + + cfg80211_vendor_cmd_reply(skb); + + return 0; + +fail: + kfree_skb(skb); + return -EINVAL; +} + +int wlan_cfg80211_spectral_scan_get_diag_stats(struct wiphy *wiphy, + struct wlan_objmgr_pdev *pdev, + const void *data, + int data_len) +{ + struct spectral_diag_stats spetcral_diag; + struct sk_buff *skb; + + wlan_spectral_get_diag_stats(pdev, &spetcral_diag); + + skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, 5 * sizeof(u64) + + NLA_HDRLEN + NLMSG_HDRLEN); + if (!skb) { + qdf_print(" reply skb alloc failed"); + return -ENOMEM; + } + + if (wlan_cfg80211_nla_put_u64( + skb, + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_DIAG_SIG_MISMATCH, + spetcral_diag.spectral_mismatch) || + wlan_cfg80211_nla_put_u64( + skb, + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_DIAG_SEC80_SFFT_INSUFFLEN, + spetcral_diag.spectral_sec80_sfft_insufflen) || + wlan_cfg80211_nla_put_u64( + skb, + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_DIAG_NOSEC80_SFFT, + spetcral_diag.spectral_no_sec80_sfft) || + wlan_cfg80211_nla_put_u64( + skb, + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_DIAG_VHTSEG1ID_MISMATCH, + spetcral_diag.spectral_vhtseg1id_mismatch) || + wlan_cfg80211_nla_put_u64( + skb, + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_DIAG_VHTSEG2ID_MISMATCH, + spetcral_diag.spectral_vhtseg2id_mismatch)) { + kfree_skb(skb); + return -EINVAL; + } + cfg80211_vendor_cmd_reply(skb); + + return 0; +} + +int wlan_cfg80211_spectral_scan_get_status(struct wiphy *wiphy, + struct wlan_objmgr_pdev *pdev, + const void *data, + int data_len) +{ + struct spectral_scan_state sscan_state = { 0 }; + struct sk_buff *skb; + + skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, 2 * sizeof(u32) + + NLA_HDRLEN + NLMSG_HDRLEN); + if (!skb) { + qdf_print(" reply skb alloc failed"); + return -ENOMEM; + } + + wlan_spectral_scan_get_status(pdev, &sscan_state); + + if (sscan_state.is_enabled) + if (nla_put_flag( + skb, + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_STATUS_IS_ENABLED)) + goto fail; + + if (sscan_state.is_active) + if (nla_put_flag( + skb, + QCA_WLAN_VENDOR_ATTR_SPECTRAL_SCAN_STATUS_IS_ACTIVE)) + goto fail; + cfg80211_vendor_cmd_reply(skb); + return 0; + +fail: + kfree_skb(skb); + return -EINVAL; +} diff --git a/drivers/staging/qca-wifi-host-cmn/os_if/linux/tdls/inc/wlan_cfg80211_tdls.h b/drivers/staging/qca-wifi-host-cmn/os_if/linux/tdls/inc/wlan_cfg80211_tdls.h new file mode 100644 index 0000000000000000000000000000000000000000..5168cfc59280f25af3a1e914fca613ef04045b2e --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/os_if/linux/tdls/inc/wlan_cfg80211_tdls.h @@ -0,0 +1,309 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: declares driver functions interfacing with linux kernel + */ + +#ifndef _WLAN_CFG80211_TDLS_H_ +#define _WLAN_CFG80211_TDLS_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef CONVERGED_TDLS_ENABLE + +#define TDLS_VDEV_MAGIC 0x54444c53 /* "TDLS" */ + +/** + * struct osif_tdls_vdev - OS tdls vdev private structure + * @tdls_add_peer_comp: Completion to add tdls peer + * @tdls_del_peer_comp: Completion to delete tdls peer + * @tdls_mgmt_comp: Completion to send tdls mgmt packets + * @tdls_link_establish_req_comp: Completion to establish link, sync to + * send establish params to firmware, not used today. + * @tdls_teardown_comp: Completion to teardown tdls peer + * @tdls_user_cmd_comp: tdls user command completion event + * @tdls_antenna_switch_comp: Completion to switch antenna + * @tdls_add_peer_status: Peer status after add peer + * @mgmt_tx_completion_status: Tdls mgmt frames TX completion status code + * @tdls_user_cmd_len: tdls user command written buffer length + * @tdls_antenna_switch_status: return status after antenna switch + */ +struct osif_tdls_vdev { + struct completion tdls_add_peer_comp; + struct completion tdls_del_peer_comp; + struct completion tdls_mgmt_comp; + struct completion tdls_link_establish_req_comp; + struct completion tdls_teardown_comp; + struct completion tdls_user_cmd_comp; + struct completion tdls_antenna_switch_comp; + QDF_STATUS tdls_add_peer_status; + uint32_t mgmt_tx_completion_status; + uint32_t tdls_user_cmd_len; + int tdls_antenna_switch_status; +}; + +/** + * enum qca_wlan_vendor_tdls_trigger_mode_vdev_map: Maps the user space TDLS + * trigger mode in the host driver. + * @WLAN_VENDOR_TDLS_TRIGGER_MODE_EXPLICIT: TDLS Connection and + * disconnection handled by user space. + * @WLAN_VENDOR_TDLS_TRIGGER_MODE_IMPLICIT: TDLS connection and + * disconnection controlled by host driver based on data traffic. + * @WLAN_VENDOR_TDLS_TRIGGER_MODE_EXTERNAL: TDLS connection and + * disconnection jointly controlled by user space and host driver. + */ +enum qca_wlan_vendor_tdls_trigger_mode_vdev_map { + WLAN_VENDOR_TDLS_TRIGGER_MODE_EXPLICIT = + QCA_WLAN_VENDOR_TDLS_TRIGGER_MODE_EXPLICIT, + WLAN_VENDOR_TDLS_TRIGGER_MODE_IMPLICIT = + QCA_WLAN_VENDOR_TDLS_TRIGGER_MODE_IMPLICIT, + WLAN_VENDOR_TDLS_TRIGGER_MODE_EXTERNAL = + ((QCA_WLAN_VENDOR_TDLS_TRIGGER_MODE_EXPLICIT | + QCA_WLAN_VENDOR_TDLS_TRIGGER_MODE_IMPLICIT) << 1), +}; + +/** + * wlan_cfg80211_tdls_osif_priv_init() - API to initialize tdls os private + * @vdev: vdev object + * + * API to initialize tdls os private + * + * Return: QDF_STATUS + */ +QDF_STATUS wlan_cfg80211_tdls_osif_priv_init(struct wlan_objmgr_vdev *vdev); + +/** + * wlan_cfg80211_tdls_osif_priv_deinit() - API to deinitialize tdls os private + * @vdev: vdev object + * + * API to deinitialize tdls os private + * + * Return: None + */ +void wlan_cfg80211_tdls_osif_priv_deinit(struct wlan_objmgr_vdev *vdev); + +/** + * wlan_cfg80211_tdls_add_peer() - process cfg80211 add TDLS peer request + * @vdev: vdev object + * @mac: MAC address for TDLS peer + * + * Return: 0 for success; negative errno otherwise + */ +int wlan_cfg80211_tdls_add_peer(struct wlan_objmgr_vdev *vdev, + const uint8_t *mac); + +/** + * wlan_cfg80211_tdls_update_peer() - process cfg80211 update TDLS peer request + * @vdev: vdev object + * @mac: MAC address for TDLS peer + * @params: Pointer to station parameters + * + * Return: 0 for success; negative errno otherwise + */ +int wlan_cfg80211_tdls_update_peer(struct wlan_objmgr_vdev *vdev, + const uint8_t *mac, + struct station_parameters *params); + +/** + * wlan_cfg80211_tdls_configure_mode() - configure tdls mode + * @vdev: vdev obj manager + * @trigger_mode: tdls trgger mode + * + * Return: 0 for success; negative errno otherwise + */ +int wlan_cfg80211_tdls_configure_mode(struct wlan_objmgr_vdev *vdev, + uint32_t trigger_mode); + +/** + * wlan_cfg80211_tdls_oper() - process cfg80211 operation on an TDLS peer + * @vdev: vdev object + * @peer: MAC address of the TDLS peer + * @oper: cfg80211 TDLS operation + * + * Return: 0 on success; negative errno otherwise + */ +int wlan_cfg80211_tdls_oper(struct wlan_objmgr_vdev *vdev, + const uint8_t *peer, + enum nl80211_tdls_operation oper); + +/** + * wlan_cfg80211_tdls_get_all_peers() - get all the TDLS peers from the list + * @vdev: vdev object + * @buf: output buffer + * @buflen: valid length of the output error + * + * Return: length of the output buffer + */ +int wlan_cfg80211_tdls_get_all_peers(struct wlan_objmgr_vdev *vdev, + char *buf, int buflen); + +/** + * wlan_cfg80211_tdls_mgmt() - process tdls management frames from the supplicant + * @vdev: vdev object + * @peer: MAC address of the TDLS peer + * @action_code: type of TDLS mgmt frame to be sent + * @dialog_token: dialog token used in the frame + * @status_code: status to be incuded in the frame + * @peer_capability: peer capability information + * @buf: additional IEs to be included + * @len: length of additional Ies + * @oper: cfg80211 TDLS operation + * + * Return: 0 on success; negative errno otherwise + */ +int wlan_cfg80211_tdls_mgmt(struct wlan_objmgr_vdev *vdev, + const uint8_t *peer, + uint8_t action_code, uint8_t dialog_token, + uint16_t status_code, uint32_t peer_capability, + const uint8_t *buf, size_t len); + +/** + * wlan_tdls_antenna_switch() - process tdls antenna switch + * @vdev: vdev object + * @mode: antenna mode + * + * Return: 0 on success; -EAGAIN to retry + */ +int wlan_tdls_antenna_switch(struct wlan_objmgr_vdev *vdev, uint32_t mode); + +/** + * wlan_cfg80211_tdls_event_callback() - callback for tdls module + * @userdata: user data + * @type: request callback type + * @param: passed parameter + * + * This is used by TDLS to sync with os interface + * + * Return: None + */ +void wlan_cfg80211_tdls_event_callback(void *userdata, + enum tdls_event_type type, + struct tdls_osif_indication *param); + +/** + * wlan_cfg80211_tdls_rx_callback() - Callback for rx mgmt frame + * @user_data: pointer to soc object + * @rx_frame: RX mgmt frame information + * + * This callback will be used to rx frames in os interface. + * + * Return: None + */ +void wlan_cfg80211_tdls_rx_callback(void *user_data, + struct tdls_rx_mgmt_frame *rx_frame); + +/** + * hdd_notify_tdls_reset_adapter() - notify reset adapter to TDLS + * @vdev: vdev object manager + * + * Notify hdd reset adapter to TDLS component + * + * Return: None + */ +void hdd_notify_tdls_reset_adapter(struct wlan_objmgr_vdev *vdev); + +/** + * hdd_notify_sta_connect() - notify sta connect to TDLS + * @session_id: pointer to soc object + * @tdls_chan_swit_prohibited: indicates channel switch capability + * @tdls_prohibited: indicates tdls allowed or not + * @vdev: vdev object manager + * + * Notify sta connect event to TDLS component + * + * Return: None + */ +void +hdd_notify_sta_connect(uint8_t session_id, + bool tdls_chan_swit_prohibited, + bool tdls_prohibited, + struct wlan_objmgr_vdev *vdev); + +/** + * hdd_notify_sta_disconnect() - notify sta disconnect to TDLS + * @session_id: pointer to soc object + * @lfr_roam: indicate, whether disconnect due to lfr roam + * @bool user_disconnect: disconnect from user space + * @vdev: vdev object manager + * + * Notify sta disconnect event to TDLS component + * + * Return: None + */ +void hdd_notify_sta_disconnect(uint8_t session_id, + bool lfr_roam, + bool user_disconnect, + struct wlan_objmgr_vdev *vdev); + +/** + * hdd_notify_teardown_tdls_links() - notify TDLS to teardown links + * @psoc: psoc object + * + * Notify tdls to teardown all the links, due to certain events + * in the system + * + * Return: None + */ +void hdd_notify_teardown_tdls_links(struct wlan_objmgr_psoc *psoc); + +#else +static inline void +hdd_notify_tdls_reset_adapter(struct wlan_objmgr_vdev *vdev) +{ +} + +static inline void +hdd_notify_sta_connect(uint8_t session_id, + bool tdls_chan_swit_prohibited, + bool tdls_prohibited, + struct wlan_objmgr_vdev *vdev) +{ +} + +static inline +void hdd_notify_sta_disconnect(uint8_t session_id, + bool lfr_roam, + bool user_disconnect, + struct wlan_objmgr_vdev *vdev) +{ + +} + +static inline +int wlan_cfg80211_tdls_configure_mode(struct wlan_objmgr_vdev *vdev, + uint32_t trigger_mode) +{ + return 0; +} + +static inline +void hdd_notify_teardown_tdls_links(struct wlan_objmgr_psoc *psoc) +{ + +} +#endif +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/os_if/linux/tdls/src/wlan_cfg80211_tdls.c b/drivers/staging/qca-wifi-host-cmn/os_if/linux/tdls/src/wlan_cfg80211_tdls.c new file mode 100644 index 0000000000000000000000000000000000000000..161ba2c508a6df0135b344ad5f7e3d72aaaa1241 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/os_if/linux/tdls/src/wlan_cfg80211_tdls.c @@ -0,0 +1,982 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: defines driver functions interfacing with linux kernel + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define MAX_CHANNEL (NUM_24GHZ_CHANNELS + NUM_5GHZ_CHANNELS) + + +#define TDLS_MAX_NO_OF_2_4_CHANNELS 14 + +static int wlan_cfg80211_tdls_validate_mac_addr(const uint8_t *mac) +{ + static const uint8_t temp_mac[QDF_MAC_ADDR_SIZE] = {0}; + + if (!qdf_mem_cmp(mac, temp_mac, QDF_MAC_ADDR_SIZE)) { + cfg80211_debug("Invalid Mac address " QDF_MAC_ADDR_STR " cmd declined.", + QDF_MAC_ADDR_ARRAY(mac)); + return -EINVAL; + } + + return 0; +} + +QDF_STATUS wlan_cfg80211_tdls_osif_priv_init(struct wlan_objmgr_vdev *vdev) +{ + struct osif_tdls_vdev *tdls_priv; + struct vdev_osif_priv *osif_priv; + + osif_priv = wlan_vdev_get_ospriv(vdev); + if (!osif_priv) { + cfg80211_err("osif_priv is NULL!"); + return QDF_STATUS_E_FAULT; + } + + cfg80211_debug("initialize tdls os if layer private structure"); + tdls_priv = qdf_mem_malloc(sizeof(*tdls_priv)); + if (!tdls_priv) { + cfg80211_err("failed to allocate memory for tdls_priv"); + return QDF_STATUS_E_NOMEM; + } + init_completion(&tdls_priv->tdls_add_peer_comp); + init_completion(&tdls_priv->tdls_del_peer_comp); + init_completion(&tdls_priv->tdls_mgmt_comp); + init_completion(&tdls_priv->tdls_link_establish_req_comp); + init_completion(&tdls_priv->tdls_teardown_comp); + init_completion(&tdls_priv->tdls_user_cmd_comp); + init_completion(&tdls_priv->tdls_antenna_switch_comp); + + osif_priv->osif_tdls = tdls_priv; + + return QDF_STATUS_SUCCESS; +} + +void wlan_cfg80211_tdls_osif_priv_deinit(struct wlan_objmgr_vdev *vdev) +{ + struct vdev_osif_priv *osif_priv; + + osif_priv = wlan_vdev_get_ospriv(vdev); + if (!osif_priv) { + cfg80211_err("osif_priv is NULL!"); + return; + } + + cfg80211_debug("deinitialize tdls os if layer private structure"); + if (osif_priv->osif_tdls) + qdf_mem_free(osif_priv->osif_tdls); + osif_priv->osif_tdls = NULL; +} + +void hdd_notify_teardown_tdls_links(struct wlan_objmgr_psoc *psoc) +{ + struct vdev_osif_priv *osif_priv; + struct osif_tdls_vdev *tdls_priv; + QDF_STATUS status; + unsigned long rc; + struct wlan_objmgr_vdev *vdev; + + vdev = ucfg_get_tdls_vdev(psoc, WLAN_OSIF_ID); + if (!vdev) { + cfg80211_err("Unable to get the vdev"); + return; + } + + osif_priv = wlan_vdev_get_ospriv(vdev); + + if (!osif_priv || !osif_priv->osif_tdls) { + cfg80211_err("osif priv or tdls priv is NULL"); + goto release_ref; + } + tdls_priv = osif_priv->osif_tdls; + + reinit_completion(&tdls_priv->tdls_teardown_comp); + status = ucfg_tdls_teardown_links(psoc); + if (QDF_IS_STATUS_ERROR(status)) { + cfg80211_err("ucfg_tdls_teardown_links failed err %d", status); + goto release_ref; + } + + cfg80211_debug("Wait for tdls teardown completion. Timeout %u ms", + WAIT_TIME_FOR_TDLS_TEARDOWN_LINKS); + + rc = wait_for_completion_timeout( + &tdls_priv->tdls_teardown_comp, + msecs_to_jiffies(WAIT_TIME_FOR_TDLS_TEARDOWN_LINKS)); + + if (0 == rc) { + cfg80211_err(" Teardown Completion timed out rc: %ld", rc); + goto release_ref; + } + + cfg80211_debug("TDLS teardown completion status %ld ", rc); +release_ref: + wlan_objmgr_vdev_release_ref(vdev, + WLAN_OSIF_ID); +} + +void hdd_notify_tdls_reset_adapter(struct wlan_objmgr_vdev *vdev) +{ + ucfg_tdls_notify_reset_adapter(vdev); +} + +void +hdd_notify_sta_connect(uint8_t session_id, + bool tdls_chan_swit_prohibited, + bool tdls_prohibited, + struct wlan_objmgr_vdev *vdev) +{ + struct tdls_sta_notify_params notify_info = {0}; + QDF_STATUS status; + + if (!vdev) { + cfg80211_err("vdev is NULL"); + return; + } + status = wlan_objmgr_vdev_try_get_ref(vdev, WLAN_TDLS_NB_ID); + if (QDF_IS_STATUS_ERROR(status)) { + cfg80211_err("can't get vdev"); + return; + } + + notify_info.session_id = session_id; + notify_info.vdev = vdev; + notify_info.tdls_chan_swit_prohibited = tdls_chan_swit_prohibited; + notify_info.tdls_prohibited = tdls_prohibited; + ucfg_tdls_notify_sta_connect(¬ify_info); +} + +void hdd_notify_sta_disconnect(uint8_t session_id, + bool lfr_roam, + bool user_disconnect, + struct wlan_objmgr_vdev *vdev) +{ + struct tdls_sta_notify_params notify_info = {0}; + QDF_STATUS status; + + if (!vdev) { + cfg80211_err("vdev is NULL"); + return; + } + + status = wlan_objmgr_vdev_try_get_ref(vdev, WLAN_TDLS_NB_ID); + if (QDF_IS_STATUS_ERROR(status)) { + cfg80211_err("can't get vdev"); + return; + } + + notify_info.session_id = session_id; + notify_info.lfr_roam = lfr_roam; + notify_info.tdls_chan_swit_prohibited = false; + notify_info.tdls_prohibited = false; + notify_info.vdev = vdev; + notify_info.user_disconnect = user_disconnect; + ucfg_tdls_notify_sta_disconnect(¬ify_info); +} + +int wlan_cfg80211_tdls_add_peer(struct wlan_objmgr_vdev *vdev, + const uint8_t *mac) +{ + struct tdls_add_peer_params *add_peer_req; + int status; + struct vdev_osif_priv *osif_priv; + struct osif_tdls_vdev *tdls_priv; + unsigned long rc; + + status = wlan_cfg80211_tdls_validate_mac_addr(mac); + + if (status) + return status; + + cfg80211_debug("Add TDLS peer " QDF_MAC_ADDR_STR, + QDF_MAC_ADDR_ARRAY(mac)); + + add_peer_req = qdf_mem_malloc(sizeof(*add_peer_req)); + if (!add_peer_req) { + cfg80211_err("Failed to allocate tdls add peer request mem"); + return -EINVAL; + } + + osif_priv = wlan_vdev_get_ospriv(vdev); + if (!osif_priv || !osif_priv->osif_tdls) { + cfg80211_err("osif_tdls_vdev or osif_priv is NULL for the current vdev"); + status = -EINVAL; + goto error; + } + tdls_priv = osif_priv->osif_tdls; + add_peer_req->vdev_id = wlan_vdev_get_id(vdev); + + qdf_mem_copy(add_peer_req->peer_addr, mac, QDF_MAC_ADDR_SIZE); + + reinit_completion(&tdls_priv->tdls_add_peer_comp); + status = ucfg_tdls_add_peer(vdev, add_peer_req); + if (QDF_IS_STATUS_ERROR(status)) { + cfg80211_err("ucfg_tdls_add_peer returned err %d", status); + status = -EIO; + goto error; + } + + rc = wait_for_completion_timeout( + &tdls_priv->tdls_add_peer_comp, + msecs_to_jiffies(WAIT_TIME_TDLS_ADD_STA)); + if (!rc) { + cfg80211_err("timeout for tdls add peer indication %ld", rc); + status = -EPERM; + goto error; + } + + if (QDF_IS_STATUS_ERROR(tdls_priv->tdls_add_peer_status)) { + cfg80211_err("tdls add peer failed, status:%d", + tdls_priv->tdls_add_peer_status); + status = -EPERM; + } +error: + qdf_mem_free(add_peer_req); + return status; +} + +static bool +is_duplicate_channel(uint8_t *arr, int index, uint8_t match) +{ + int i; + + for (i = 0; i < index; i++) { + if (arr[i] == match) + return true; + } + return false; +} + +static void +tdls_calc_channels_from_staparams(struct tdls_update_peer_params *req_info, + struct station_parameters *params) +{ + int i = 0, j = 0, k = 0, no_of_channels = 0; + int num_unique_channels; + int next; + uint8_t *dest_chans; + const uint8_t *src_chans; + + dest_chans = req_info->supported_channels; + src_chans = params->supported_channels; + + /* Convert (first channel , number of channels) tuple to + * the total list of channels. This goes with the assumption + * that if the first channel is < 14, then the next channels + * are an incremental of 1 else an incremental of 4 till the number + * of channels. + */ + for (i = 0; i < params->supported_channels_len && + j < WLAN_MAC_MAX_SUPP_CHANNELS; i += 2) { + int wifi_chan_index; + + if (!is_duplicate_channel(dest_chans, j, src_chans[i])) + dest_chans[j] = src_chans[i]; + else + continue; + + wifi_chan_index = ((dest_chans[j] <= WLAN_CHANNEL_14) ? 1 : 4); + no_of_channels = src_chans[i + 1]; + + cfg80211_debug("i:%d,j:%d,k:%d,[%d]:%d,index:%d,chans_num: %d", + i, j, k, j, + dest_chans[j], + wifi_chan_index, + no_of_channels); + + for (k = 1; k <= no_of_channels && + j < WLAN_MAC_MAX_SUPP_CHANNELS - 1; k++) { + next = dest_chans[j] + wifi_chan_index; + + if (!is_duplicate_channel(dest_chans, j + 1, next)) + dest_chans[j + 1] = next; + else + continue; + + cfg80211_debug("i: %d, j: %d, k: %d, [%d]: %d", + i, j, k, j + 1, dest_chans[j + 1]); + j += 1; + } + } + num_unique_channels = j + 1; + cfg80211_debug("Unique Channel List: supported_channels "); + for (i = 0; i < num_unique_channels; i++) + cfg80211_debug("[%d]: %d,", i, dest_chans[i]); + + if (MAX_CHANNEL < num_unique_channels) + num_unique_channels = MAX_CHANNEL; + req_info->supported_channels_len = num_unique_channels; + cfg80211_debug("After removing duplcates supported_channels_len: %d", + req_info->supported_channels_len); +} + +static void +wlan_cfg80211_tdls_extract_params(struct tdls_update_peer_params *req_info, + struct station_parameters *params) +{ + int i; + + cfg80211_debug("sta cap %d, uapsd_queue %d, max_sp %d", + params->capability, + params->uapsd_queues, params->max_sp); + + if (!req_info) { + cfg80211_err("reg_info is NULL"); + return; + } + req_info->capability = params->capability; + req_info->uapsd_queues = params->uapsd_queues; + req_info->max_sp = params->max_sp; + + if (params->supported_channels_len) + tdls_calc_channels_from_staparams(req_info, params); + + if (params->supported_oper_classes_len > WLAN_MAX_SUPP_OPER_CLASSES) { + cfg80211_debug("received oper classes:%d, resetting it to max supported: %d", + params->supported_oper_classes_len, + WLAN_MAX_SUPP_OPER_CLASSES); + params->supported_oper_classes_len = WLAN_MAX_SUPP_OPER_CLASSES; + } + + qdf_mem_copy(req_info->supported_oper_classes, + params->supported_oper_classes, + params->supported_oper_classes_len); + req_info->supported_oper_classes_len = + params->supported_oper_classes_len; + + if (params->ext_capab_len) + qdf_mem_copy(req_info->extn_capability, params->ext_capab, + sizeof(req_info->extn_capability)); + + if (params->ht_capa) { + req_info->htcap_present = 1; + qdf_mem_copy(&req_info->ht_cap, params->ht_capa, + sizeof(struct htcap_cmn_ie)); + } + + req_info->supported_rates_len = params->supported_rates_len; + + /* Note : The Maximum sizeof supported_rates sent by the Supplicant is + * 32. The supported_rates array , for all the structures propogating + * till Add Sta to the firmware has to be modified , if the supplicant + * (ieee80211) is modified to send more rates. + */ + + /* To avoid Data Currption , set to max length to SIR_MAC_MAX_SUPP_RATES + */ + if (req_info->supported_rates_len > WLAN_MAC_MAX_SUPP_RATES) + req_info->supported_rates_len = WLAN_MAC_MAX_SUPP_RATES; + + if (req_info->supported_rates_len) { + qdf_mem_copy(req_info->supported_rates, + params->supported_rates, + req_info->supported_rates_len); + cfg80211_debug("Supported Rates with Length %d", + req_info->supported_rates_len); + + for (i = 0; i < req_info->supported_rates_len; i++) + cfg80211_debug("[%d]: %0x", i, + req_info->supported_rates[i]); + } + + if (params->vht_capa) { + req_info->vhtcap_present = 1; + qdf_mem_copy(&req_info->vht_cap, params->vht_capa, + sizeof(struct vhtcap)); + } + + if (params->ht_capa || params->vht_capa || + (params->sta_flags_set & BIT(NL80211_STA_FLAG_WME))) + req_info->is_qos_wmm_sta = true; +} + +int wlan_cfg80211_tdls_update_peer(struct wlan_objmgr_vdev *vdev, + const uint8_t *mac, + struct station_parameters *params) +{ + struct tdls_update_peer_params *req_info; + int status; + struct vdev_osif_priv *osif_priv; + struct osif_tdls_vdev *tdls_priv; + unsigned long rc; + + status = wlan_cfg80211_tdls_validate_mac_addr(mac); + + if (status) + return status; + + cfg80211_debug("Update TDLS peer " QDF_MAC_ADDR_STR, + QDF_MAC_ADDR_ARRAY(mac)); + + req_info = qdf_mem_malloc(sizeof(*req_info)); + if (!req_info) { + cfg80211_err("Failed to allocate tdls add peer request mem"); + return -EINVAL; + } + wlan_cfg80211_tdls_extract_params(req_info, params); + + osif_priv = wlan_vdev_get_ospriv(vdev); + if (!osif_priv || !osif_priv->osif_tdls) { + cfg80211_err("osif priv or tdls priv is NULL"); + status = -EINVAL; + goto error; + } + + tdls_priv = osif_priv->osif_tdls; + req_info->vdev_id = wlan_vdev_get_id(vdev); + qdf_mem_copy(req_info->peer_addr, mac, QDF_MAC_ADDR_SIZE); + + reinit_completion(&tdls_priv->tdls_add_peer_comp); + status = ucfg_tdls_update_peer(vdev, req_info); + if (QDF_IS_STATUS_ERROR(status)) { + cfg80211_err("ucfg_tdls_update_peer returned err %d", status); + status = -EIO; + goto error; + } + + rc = wait_for_completion_timeout( + &tdls_priv->tdls_add_peer_comp, + msecs_to_jiffies(WAIT_TIME_TDLS_ADD_STA)); + if (!rc) { + cfg80211_err("timeout for tdls update peer indication %ld", rc); + status = -EPERM; + goto error; + } + + if (QDF_IS_STATUS_ERROR(tdls_priv->tdls_add_peer_status)) { + cfg80211_err("tdls update peer failed, status:%d", + tdls_priv->tdls_add_peer_status); + status = -EPERM; + } +error: + qdf_mem_free(req_info); + return status; +} + +#ifdef WLAN_DEBUG +static char *tdls_oper_to_str(enum nl80211_tdls_operation oper) +{ + switch (oper) { + case NL80211_TDLS_ENABLE_LINK: + return "TDLS_ENABLE_LINK"; + case NL80211_TDLS_DISABLE_LINK: + return "TDLS_DISABLE_LINK"; + case NL80211_TDLS_TEARDOWN: + return "TDLS_TEARDOWN"; + case NL80211_TDLS_SETUP: + return "TDLS_SETUP"; + default: + return "UNKNOWN:ERR"; + } +} +#endif + +static enum tdls_command_type tdls_oper_to_cmd(enum nl80211_tdls_operation oper) +{ + if (oper == NL80211_TDLS_ENABLE_LINK) + return TDLS_CMD_ENABLE_LINK; + else if (oper == NL80211_TDLS_DISABLE_LINK) + return TDLS_CMD_DISABLE_LINK; + else if (oper == NL80211_TDLS_TEARDOWN) + return TDLS_CMD_REMOVE_FORCE_PEER; + else if (oper == NL80211_TDLS_SETUP) + return TDLS_CMD_CONFIG_FORCE_PEER; + else + return 0; +} + +int wlan_cfg80211_tdls_configure_mode(struct wlan_objmgr_vdev *vdev, + uint32_t trigger_mode) +{ + enum tdls_feature_mode tdls_mode; + struct tdls_set_mode_params set_mode_params; + int status; + + if (!vdev) + return -EINVAL; + + switch (trigger_mode) { + case WLAN_VENDOR_TDLS_TRIGGER_MODE_EXPLICIT: + tdls_mode = TDLS_SUPPORT_EXP_TRIG_ONLY; + return 0; + case WLAN_VENDOR_TDLS_TRIGGER_MODE_EXTERNAL: + tdls_mode = TDLS_SUPPORT_EXT_CONTROL; + break; + case WLAN_VENDOR_TDLS_TRIGGER_MODE_IMPLICIT: + tdls_mode = TDLS_SUPPORT_IMP_MODE; + return 0; + default: + cfg80211_err("Invalid TDLS trigger mode"); + return -EINVAL; + } + + cfg80211_notice("cfg80211 tdls trigger mode %d", trigger_mode); + set_mode_params.source = TDLS_SET_MODE_SOURCE_USER; + set_mode_params.tdls_mode = tdls_mode; + set_mode_params.update_last = false; + set_mode_params.vdev = vdev; + + status = ucfg_tdls_set_operating_mode(&set_mode_params); + return status; +} + +int wlan_cfg80211_tdls_oper(struct wlan_objmgr_vdev *vdev, + const uint8_t *peer, + enum nl80211_tdls_operation oper) +{ + struct vdev_osif_priv *osif_priv; + struct osif_tdls_vdev *tdls_priv; + int status; + unsigned long rc; + enum tdls_command_type cmd; + + status = wlan_cfg80211_tdls_validate_mac_addr(peer); + + if (status) + return status; + + if (NL80211_TDLS_DISCOVERY_REQ == oper) { + cfg80211_warn( + "We don't support in-driver setup/teardown/discovery"); + return -ENOTSUPP; + } + + cfg80211_debug("%s start", tdls_oper_to_str(oper)); + cmd = tdls_oper_to_cmd(oper); + switch (oper) { + case NL80211_TDLS_ENABLE_LINK: + case NL80211_TDLS_TEARDOWN: + case NL80211_TDLS_SETUP: + status = ucfg_tdls_oper(vdev, peer, cmd); + if (QDF_IS_STATUS_ERROR(status)) { + cfg80211_err("%s fail %d", + tdls_oper_to_str(oper), status); + status = -EIO; + goto error; + } + break; + case NL80211_TDLS_DISABLE_LINK: + osif_priv = wlan_vdev_get_ospriv(vdev); + if (!osif_priv || !osif_priv->osif_tdls) { + cfg80211_err("osif priv or tdls priv is NULL"); + status = -EINVAL; + goto error; + } + tdls_priv = osif_priv->osif_tdls; + reinit_completion(&tdls_priv->tdls_del_peer_comp); + status = ucfg_tdls_oper(vdev, peer, cmd); + if (QDF_IS_STATUS_ERROR(status)) { + cfg80211_err("ucfg_tdls_disable_link fail %d", status); + status = -EIO; + goto error; + } + + rc = wait_for_completion_timeout( + &tdls_priv->tdls_del_peer_comp, + msecs_to_jiffies(WAIT_TIME_TDLS_DEL_STA)); + if (!rc) { + cfg80211_err("timeout for tdls disable link %ld", rc); + status = -EPERM; + } + break; + default: + cfg80211_err("unsupported event %d", oper); + status = -ENOTSUPP; + } + +error: + return status; +} + +void wlan_cfg80211_tdls_rx_callback(void *user_data, + struct tdls_rx_mgmt_frame *rx_frame) +{ + struct wlan_objmgr_psoc *psoc; + struct wlan_objmgr_vdev *vdev; + struct vdev_osif_priv *osif_priv; + struct wireless_dev *wdev; + uint16_t freq; + + cfg80211_debug("user data:%pK, vdev id:%d, rssi:%d, buf:%pK, len:%d", + user_data, rx_frame->vdev_id, rx_frame->rx_rssi, + rx_frame->buf, rx_frame->frame_len); + + psoc = user_data; + if (!psoc) { + cfg80211_err("psoc is null"); + return; + } + + vdev = wlan_objmgr_get_vdev_by_id_from_psoc(psoc, + rx_frame->vdev_id, WLAN_TDLS_NB_ID); + if (!vdev) { + cfg80211_err("vdev is null"); + return; + } + + osif_priv = wlan_vdev_get_ospriv(vdev); + if (!osif_priv) { + cfg80211_err("osif_priv is null"); + goto fail; + } + + wdev = osif_priv->wdev; + if (!wdev) { + cfg80211_err("wdev is null"); + goto fail; + } + + if (rx_frame->rx_chan <= TDLS_MAX_NO_OF_2_4_CHANNELS) + freq = ieee80211_channel_to_frequency( + rx_frame->rx_chan, NL80211_BAND_2GHZ); + else + freq = ieee80211_channel_to_frequency( + rx_frame->rx_chan, NL80211_BAND_5GHZ); + + cfg80211_notice("Indicate frame over nl80211, vdev id:%d, idx:%d", + rx_frame->vdev_id, wdev->netdev->ifindex); + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)) + cfg80211_rx_mgmt(wdev, freq, rx_frame->rx_rssi * 100, + rx_frame->buf, rx_frame->frame_len, + NL80211_RXMGMT_FLAG_ANSWERED); +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0)) + cfg80211_rx_mgmt(wdev, freq, rx_frame->rx_rssi * 100, + rx_frame->buf, rx_frame->frame_len, + NL80211_RXMGMT_FLAG_ANSWERED, GFP_ATOMIC); +#else + cfg80211_rx_mgmt(wdev, freq, rx_frame->rx_rssi * 100, + rx_frame->buf, rx_frame->frame_len, GFP_ATOMIC); +#endif /* LINUX_VERSION_CODE */ +fail: + wlan_objmgr_vdev_release_ref(vdev, WLAN_TDLS_NB_ID); +} + +int wlan_cfg80211_tdls_get_all_peers(struct wlan_objmgr_vdev *vdev, + char *buf, int buflen) +{ + struct vdev_osif_priv *osif_priv; + struct osif_tdls_vdev *tdls_priv; + int32_t len; + QDF_STATUS status; + unsigned long rc; + + osif_priv = wlan_vdev_get_ospriv(vdev); + if (!osif_priv || !osif_priv->osif_tdls) { + cfg80211_err("osif_tdls_vdev or osif_priv is NULL for the current vdev"); + len = scnprintf(buf, buflen, + "\n tdls_priv is null\n"); + goto error_get_tdls_peers; + } + + tdls_priv = osif_priv->osif_tdls; + reinit_completion(&tdls_priv->tdls_user_cmd_comp); + status = ucfg_tdls_get_all_peers(vdev, buf, buflen); + if (QDF_IS_STATUS_ERROR(status)) { + cfg80211_err("ucfg_tdls_get_all_peers failed err %d", status); + len = scnprintf(buf, buflen, + "\nucfg_tdls_send_mgmt failed\n"); + goto error_get_tdls_peers; + } + + cfg80211_debug("Wait for tdls_user_cmd_comp. Timeout %u ms", + WAIT_TIME_FOR_TDLS_USER_CMD); + + rc = wait_for_completion_timeout( + &tdls_priv->tdls_user_cmd_comp, + msecs_to_jiffies(WAIT_TIME_FOR_TDLS_USER_CMD)); + + if (0 == rc) { + cfg80211_err("TDLS user cmd get all peers timed out rc %ld", + rc); + len = scnprintf(buf, buflen, + "\nTDLS user cmd get all peers timed out\n"); + goto error_get_tdls_peers; + } + + len = tdls_priv->tdls_user_cmd_len; + +error_get_tdls_peers: + return len; +} + +int wlan_cfg80211_tdls_mgmt(struct wlan_objmgr_vdev *vdev, + const uint8_t *peer_mac, + uint8_t action_code, uint8_t dialog_token, + uint16_t status_code, uint32_t peer_capability, + const uint8_t *buf, size_t len) +{ + struct tdls_action_frame_request mgmt_req; + struct vdev_osif_priv *osif_priv; + struct osif_tdls_vdev *tdls_priv; + int status; + unsigned long rc; + struct tdls_set_responder_req set_responder; + + status = wlan_cfg80211_tdls_validate_mac_addr(peer_mac); + + if (status) + return status; + + osif_priv = wlan_vdev_get_ospriv(vdev); + + if (!osif_priv || !osif_priv->osif_tdls) { + cfg80211_err("osif priv or tdls priv is NULL"); + status = -EINVAL; + goto error_mgmt_req; + } + + tdls_priv = osif_priv->osif_tdls; + + /* make sure doesn't call send_mgmt() while it is pending */ + if (TDLS_VDEV_MAGIC == tdls_priv->mgmt_tx_completion_status) { + cfg80211_err(QDF_MAC_ADDR_STR " action %d couldn't sent, as one is pending. return EBUSY", + QDF_MAC_ADDR_ARRAY(peer_mac), action_code); + return -EBUSY; + } + + /* Reset TDLS VDEV magic */ + tdls_priv->mgmt_tx_completion_status = TDLS_VDEV_MAGIC; + + + /*prepare the request */ + + /* Validate the management Request */ + mgmt_req.chk_frame.action_code = action_code; + qdf_mem_copy(mgmt_req.chk_frame.peer_mac, peer_mac, QDF_MAC_ADDR_SIZE); + mgmt_req.chk_frame.dialog_token = dialog_token; + mgmt_req.chk_frame.action_code = action_code; + mgmt_req.chk_frame.status_code = status_code; + mgmt_req.chk_frame.len = len; + + + mgmt_req.vdev = vdev; + mgmt_req.vdev_id = wlan_vdev_get_id(vdev); + mgmt_req.session_id = mgmt_req.vdev_id; + /* populate management req params */ + qdf_mem_copy(mgmt_req.tdls_mgmt.peer_mac.bytes, + peer_mac, QDF_MAC_ADDR_SIZE); + mgmt_req.tdls_mgmt.dialog = dialog_token; + mgmt_req.tdls_mgmt.frame_type = action_code; + mgmt_req.tdls_mgmt.len = len; + mgmt_req.tdls_mgmt.peer_capability = peer_capability; + mgmt_req.tdls_mgmt.status_code = mgmt_req.chk_frame.status_code; + + /*populate the additional IE's */ + mgmt_req.cmd_buf = buf; + mgmt_req.len = len; + + reinit_completion(&tdls_priv->tdls_mgmt_comp); + status = ucfg_tdls_send_mgmt_frame(&mgmt_req); + if (QDF_IS_STATUS_ERROR(status)) { + cfg80211_err("ucfg_tdls_send_mgmt failed err %d", status); + status = -EIO; + tdls_priv->mgmt_tx_completion_status = false; + goto error_mgmt_req; + } + + cfg80211_debug("Wait for tdls_mgmt_comp. Timeout %u ms", + WAIT_TIME_FOR_TDLS_MGMT); + + rc = wait_for_completion_timeout( + &tdls_priv->tdls_mgmt_comp, + msecs_to_jiffies(WAIT_TIME_FOR_TDLS_MGMT)); + + if ((0 == rc) || (QDF_STATUS_SUCCESS != + tdls_priv->mgmt_tx_completion_status)) { + cfg80211_err("%s rc %ld mgmtTxCompletionStatus %u", + !rc ? "Mgmt Tx Completion timed out" : + "Mgmt Tx Completion failed", + rc, tdls_priv->mgmt_tx_completion_status); + + tdls_priv->mgmt_tx_completion_status = false; + status = -EINVAL; + goto error_mgmt_req; + } + + cfg80211_debug("Mgmt Tx Completion status %ld TxCompletion %u", + rc, tdls_priv->mgmt_tx_completion_status); + + if (TDLS_SETUP_RESPONSE == action_code || + TDLS_SETUP_CONFIRM == action_code) { + qdf_mem_copy(set_responder.peer_mac, peer_mac, + QDF_MAC_ADDR_SIZE); + set_responder.vdev = vdev; + if (TDLS_SETUP_RESPONSE == action_code) + set_responder.responder = false; + if (TDLS_SETUP_CONFIRM == action_code) + set_responder.responder = true; + ucfg_tdls_responder(&set_responder); + } + +error_mgmt_req: + return status; +} + +int wlan_tdls_antenna_switch(struct wlan_objmgr_vdev *vdev, uint32_t mode) +{ + struct vdev_osif_priv *osif_priv; + struct osif_tdls_vdev *tdls_priv; + int ret; + unsigned long rc; + + if (!vdev) { + cfg80211_err("vdev is NULL"); + return -EAGAIN; + } + + osif_priv = wlan_vdev_get_ospriv(vdev); + if (!osif_priv || !osif_priv->osif_tdls) { + cfg80211_err("osif priv or tdls priv is NULL"); + ret = -EINVAL; + goto error; + } + tdls_priv = osif_priv->osif_tdls; + + reinit_completion(&tdls_priv->tdls_antenna_switch_comp); + ret = ucfg_tdls_antenna_switch(vdev, mode); + if (QDF_IS_STATUS_ERROR(ret)) { + cfg80211_err("ucfg_tdls_antenna_switch failed err %d", ret); + ret = -EAGAIN; + goto error; + } + + rc = wait_for_completion_timeout( + &tdls_priv->tdls_antenna_switch_comp, + msecs_to_jiffies(WAIT_TIME_FOR_TDLS_ANTENNA_SWITCH)); + if (!rc) { + cfg80211_err("timeout for tdls antenna switch %ld", rc); + ret = -EAGAIN; + goto error; + } + + ret = tdls_priv->tdls_antenna_switch_status; + cfg80211_debug("tdls antenna switch status:%d", ret); +error: + return ret; +} + +static void +wlan_cfg80211_tdls_indicate_discovery(struct tdls_osif_indication *ind) +{ + struct vdev_osif_priv *osif_vdev; + + osif_vdev = wlan_vdev_get_ospriv(ind->vdev); + + cfg80211_debug("Implicit TDLS, request Send Discovery request"); + cfg80211_tdls_oper_request(osif_vdev->wdev->netdev, + ind->peer_mac, NL80211_TDLS_DISCOVERY_REQ, + false, GFP_KERNEL); +} + +static void +wlan_cfg80211_tdls_indicate_setup(struct tdls_osif_indication *ind) +{ + struct vdev_osif_priv *osif_vdev; + + osif_vdev = wlan_vdev_get_ospriv(ind->vdev); + + cfg80211_debug("Indication to request TDLS setup"); + cfg80211_tdls_oper_request(osif_vdev->wdev->netdev, + ind->peer_mac, NL80211_TDLS_SETUP, false, + GFP_KERNEL); +} + +static void +wlan_cfg80211_tdls_indicate_teardown(struct tdls_osif_indication *ind) +{ + struct vdev_osif_priv *osif_vdev; + + osif_vdev = wlan_vdev_get_ospriv(ind->vdev); + + cfg80211_debug("Teardown reason %d", ind->reason); + cfg80211_tdls_oper_request(osif_vdev->wdev->netdev, + ind->peer_mac, NL80211_TDLS_TEARDOWN, + ind->reason, GFP_KERNEL); +} + +void wlan_cfg80211_tdls_event_callback(void *user_data, + enum tdls_event_type type, + struct tdls_osif_indication *ind) +{ + struct vdev_osif_priv *osif_vdev; + struct osif_tdls_vdev *tdls_priv; + + if (!ind || !ind->vdev) { + cfg80211_err("ind: %pK", ind); + return; + } + osif_vdev = wlan_vdev_get_ospriv(ind->vdev); + + if (!osif_vdev || !osif_vdev->osif_tdls) { + cfg80211_err("osif priv or tdls priv is NULL"); + return; + } + tdls_priv = osif_vdev->osif_tdls; + + switch (type) { + case TDLS_EVENT_MGMT_TX_ACK_CNF: + tdls_priv->mgmt_tx_completion_status = ind->status; + complete(&tdls_priv->tdls_mgmt_comp); + break; + case TDLS_EVENT_ADD_PEER: + tdls_priv->tdls_add_peer_status = ind->status; + complete(&tdls_priv->tdls_add_peer_comp); + break; + case TDLS_EVENT_DEL_PEER: + complete(&tdls_priv->tdls_del_peer_comp); + break; + case TDLS_EVENT_DISCOVERY_REQ: + wlan_cfg80211_tdls_indicate_discovery(ind); + break; + case TDLS_EVENT_TEARDOWN_REQ: + wlan_cfg80211_tdls_indicate_teardown(ind); + break; + case TDLS_EVENT_SETUP_REQ: + wlan_cfg80211_tdls_indicate_setup(ind); + break; + case TDLS_EVENT_TEARDOWN_LINKS_DONE: + complete(&tdls_priv->tdls_teardown_comp); + break; + case TDLS_EVENT_USER_CMD: + tdls_priv->tdls_user_cmd_len = ind->status; + complete(&tdls_priv->tdls_user_cmd_comp); + break; + + case TDLS_EVENT_ANTENNA_SWITCH: + tdls_priv->tdls_antenna_switch_status = ind->status; + complete(&tdls_priv->tdls_antenna_switch_comp); + default: + break; + } +} diff --git a/drivers/staging/qca-wifi-host-cmn/os_if/linux/wifi_pos/inc/os_if_wifi_pos.h b/drivers/staging/qca-wifi-host-cmn/os_if/linux/wifi_pos/inc/os_if_wifi_pos.h new file mode 100644 index 0000000000000000000000000000000000000000..26db59415482c9632697510652054b3ed809465e --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/os_if/linux/wifi_pos/inc/os_if_wifi_pos.h @@ -0,0 +1,108 @@ +/* + * Copyright (c) 2012-2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: os_if_wifi_pos.h + * This file provide declaration of wifi_pos's os_if APIs + */ +#ifndef _OS_IF_WIFI_POS_H_ +#define _OS_IF_WIFI_POS_H_ + +#include "qdf_types.h" +#include "qdf_status.h" + + +/* forward declaration */ +struct wifi_pos_ch_info; +struct wlan_objmgr_psoc; +struct wifi_pos_driver_caps; + +#ifdef WIFI_POS_CONVERGED +/** + * os_if_wifi_pos_register_nl() - abstration API to register callback with GENL + * socket. + * + * Return: status of operation + */ +int os_if_wifi_pos_register_nl(void); + +/** + * os_if_wifi_pos_deregister_nl() - abstration API to deregister callback with + * GENL socket. + * + * Return: status of operation + */ +int os_if_wifi_pos_deregister_nl(void); + +/** + * os_if_wifi_pos_send_peer_status() - Function to send peer status to a + * registered application + * @peer_mac: MAC address of peer + * @peer_status: ePeerConnected or ePeerDisconnected + * @peer_timing_meas_cap: 0: RTT/RTT2, 1: RTT3. Default is 0 + * @session_id: SME session id, i.e. vdev_id + * @chan_info: operating channel information + * @dev_mode: dev mode for which indication is sent + * + * Return: none + */ +void os_if_wifi_pos_send_peer_status(struct qdf_mac_addr *peer_mac, + uint8_t peer_status, + uint8_t peer_timing_meas_cap, + uint8_t session_id, + struct wifi_pos_ch_info *chan_info, + enum QDF_OPMODE dev_mode); + +/** + * os_if_wifi_pos_populate_caps() - populate oem capabilities + * @psoc: psoc object + * @caps: pointer to populate the capabilities + * + * Return: error code + */ +int os_if_wifi_pos_populate_caps(struct wlan_objmgr_psoc *psoc, + struct wifi_pos_driver_caps *caps); +#else +static inline int os_if_wifi_pos_register_nl(void) +{ + return 0; +} + +static inline int os_if_wifi_pos_deregister_nl(void) +{ + return 0; +} + +static inline void os_if_wifi_pos_send_peer_status( + struct qdf_mac_addr *peer_mac, + uint8_t peer_status, + uint8_t peer_timing_meas_cap, + uint8_t session_id, + struct wifi_pos_ch_info *chan_info, + enum QDF_OPMODE dev_mode) +{ +} + +static inline int os_if_wifi_pos_populate_caps(struct wlan_objmgr_psoc *psoc, + struct wifi_pos_driver_caps *caps) +{ + return 0; +} +#endif + +#endif /* _OS_IF_WIFI_POS_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/os_if/linux/wifi_pos/src/os_if_wifi_pos.c b/drivers/staging/qca-wifi-host-cmn/os_if/linux/wifi_pos/src/os_if_wifi_pos.c new file mode 100644 index 0000000000000000000000000000000000000000..acddc176ed93b324ea255efa2689a953cdcc1c16 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/os_if/linux/wifi_pos/src/os_if_wifi_pos.c @@ -0,0 +1,361 @@ +/* + * Copyright (c) 2012-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_hdd_wifi_pos.c + * This file defines the important functions pertinent to wifi positioning + * component's os_if layer. + */ + +#include "wlan_nlink_srv.h" +#include "wlan_ptt_sock_svc.h" +#include "wlan_nlink_common.h" +#include "os_if_wifi_pos.h" +#include "wifi_pos_api.h" +#include "wlan_cfg80211.h" +#include "wlan_objmgr_psoc_obj.h" +#ifdef CNSS_GENL +#include +#endif + +/** + * os_if_wifi_pos_send_rsp() - send oem registration response + * + * This function sends oem message to registered application process + * + * Return: none + */ +static void os_if_wifi_pos_send_rsp(uint32_t pid, uint32_t rsp_msg_type, + uint32_t buf_len, uint8_t *buf) +{ + tAniMsgHdr *aniHdr; + struct sk_buff *skb; + struct nlmsghdr *nlh; + + /* OEM msg is always to a specific process and cannot be a broadcast */ + if (pid == 0) { + cfg80211_err("invalid dest pid"); + return; + } + + skb = alloc_skb(NLMSG_SPACE(sizeof(tAniMsgHdr) + buf_len), GFP_ATOMIC); + if (skb == NULL) { + cfg80211_alert("alloc_skb failed"); + return; + } + + nlh = (struct nlmsghdr *)skb->data; + nlh->nlmsg_pid = 0; /* from kernel */ + nlh->nlmsg_flags = 0; + nlh->nlmsg_seq = 0; + nlh->nlmsg_type = WLAN_NL_MSG_OEM; + nlh->nlmsg_len = NLMSG_LENGTH(sizeof(tAniMsgHdr) + buf_len); + + aniHdr = NLMSG_DATA(nlh); + aniHdr->type = rsp_msg_type; + qdf_mem_copy(&aniHdr[1], buf, buf_len); + aniHdr->length = buf_len; + + skb_put(skb, NLMSG_SPACE(sizeof(tAniMsgHdr) + buf_len)); + cfg80211_debug("sending oem rsp: type: %d len(%d) to pid (%d)", + rsp_msg_type, buf_len, pid); + nl_srv_ucast_oem(skb, pid, MSG_DONTWAIT); +} + +#ifdef CNSS_GENL +static int wifi_pos_parse_req(const void *data, int len, int pid, + struct wifi_pos_req_msg *req) +{ + tAniMsgHdr *msg_hdr; + struct nlattr *tb[CLD80211_ATTR_MAX + 1]; + uint32_t msg_len, id, nl_field_info_size, expected_field_info_size; + struct wifi_pos_field_info *field_info; + + if (wlan_cfg80211_nla_parse(tb, CLD80211_ATTR_MAX, data, len, NULL)) { + cfg80211_err("invalid data in request"); + return OEM_ERR_INVALID_MESSAGE_TYPE; + } + + if (!tb[CLD80211_ATTR_DATA]) { + cfg80211_err("CLD80211_ATTR_DATA not present"); + return OEM_ERR_INVALID_MESSAGE_TYPE; + } + + msg_len = nla_len(tb[CLD80211_ATTR_DATA]); + if (msg_len < sizeof(*msg_hdr)) { + cfg80211_err("Insufficient length for msg_hdr: %u", msg_len); + return OEM_ERR_INVALID_MESSAGE_LENGTH; + } + + msg_hdr = nla_data(tb[CLD80211_ATTR_DATA]); + req->msg_type = msg_hdr->type; + + if (msg_len < sizeof(*msg_hdr) + msg_hdr->length) { + cfg80211_err("Insufficient length for msg_hdr buffer: %u", + msg_len); + return OEM_ERR_INVALID_MESSAGE_LENGTH; + } + + req->buf_len = msg_hdr->length; + req->buf = (uint8_t *)&msg_hdr[1]; + req->pid = pid; + + id = CLD80211_ATTR_META_DATA; + if (!tb[id]) + return 0; + + nl_field_info_size = nla_len(tb[id]); + if (nl_field_info_size < sizeof(*field_info)) { + cfg80211_err("Insufficient length for field_info_buf: %u", + nl_field_info_size); + return OEM_ERR_INVALID_MESSAGE_LENGTH; + } + + field_info = nla_data(tb[id]); + if (!field_info->count) { + cfg80211_debug("field_info->count is zero, ignoring META_DATA"); + return 0; + } + + expected_field_info_size = sizeof(*field_info) + + (field_info->count - 1) * sizeof(struct wifi_pos_field); + + if (nl_field_info_size < expected_field_info_size) { + cfg80211_err("Insufficient len for total no.of %u fields", + field_info->count); + return OEM_ERR_INVALID_MESSAGE_LENGTH; + } + + req->field_info_buf = field_info; + req->field_info_buf_len = nl_field_info_size; + + return 0; +} +#else +static int wifi_pos_parse_req(struct sk_buff *skb, struct wifi_pos_req_msg *req) +{ + /* SKB->data contains NL msg */ + /* NLMSG_DATA(nlh) contains ANI msg */ + struct nlmsghdr *nlh; + tAniMsgHdr *msg_hdr; + + nlh = (struct nlmsghdr *)skb->data; + if (!nlh) { + cfg80211_err("Netlink header null"); + return OEM_ERR_NULL_MESSAGE_HEADER; + } + + if (nlh->nlmsg_len < NLMSG_LENGTH(sizeof(*msg_hdr))) { + cfg80211_err("nlmsg_len(%d) and msg_hdr_size(%zu) mis-match", + nlh->nlmsg_len, sizeof(*msg_hdr)); + return OEM_ERR_INVALID_MESSAGE_LENGTH; + } + + msg_hdr = NLMSG_DATA(nlh); + if (!msg_hdr) { + cfg80211_err("Message header null"); + return OEM_ERR_NULL_MESSAGE_HEADER; + } + + if (nlh->nlmsg_len < NLMSG_LENGTH(sizeof(*msg_hdr) + msg_hdr->length)) { + cfg80211_err("nlmsg_len(%d) and animsg_len(%d) mis-match", + nlh->nlmsg_len, msg_hdr->length); + return OEM_ERR_INVALID_MESSAGE_LENGTH; + } + + req->msg_type = msg_hdr->type; + req->buf_len = msg_hdr->length; + req->buf = (uint8_t *)&msg_hdr[1]; + req->pid = nlh->nlmsg_pid; + + return 0; +} +#endif + +/** + * os_if_wifi_pos_callback() - callback registered with NL service socket to + * process wifi pos request + * @skb: request message sk_buff + * + * Return: status of operation + */ +#ifdef CNSS_GENL +static void os_if_wifi_pos_callback(const void *data, int data_len, + void *ctx, int pid) +{ + uint8_t err; + QDF_STATUS status; + struct wifi_pos_req_msg req = {0}; + struct wlan_objmgr_psoc *psoc = wifi_pos_get_psoc(); + + cfg80211_debug("enter: pid %d", pid); + if (!psoc) { + cfg80211_err("global psoc object not registered yet."); + return; + } + + wlan_objmgr_psoc_get_ref(psoc, WLAN_WIFI_POS_OSIF_ID); + err = wifi_pos_parse_req(data, data_len, pid, &req); + if (err) { + os_if_wifi_pos_send_rsp(wifi_pos_get_app_pid(psoc), + ANI_MSG_OEM_ERROR, sizeof(err), &err); + status = QDF_STATUS_E_INVAL; + goto release_psoc_ref; + } + + status = ucfg_wifi_pos_process_req(psoc, &req, os_if_wifi_pos_send_rsp); + if (QDF_IS_STATUS_ERROR(status)) + cfg80211_err("ucfg_wifi_pos_process_req failed. status: %d", + status); + +release_psoc_ref: + wlan_objmgr_psoc_release_ref(psoc, WLAN_WIFI_POS_OSIF_ID); +} +#else +static int os_if_wifi_pos_callback(struct sk_buff *skb) +{ + uint8_t err; + QDF_STATUS status; + struct wifi_pos_req_msg req = {0}; + struct wlan_objmgr_psoc *psoc = wifi_pos_get_psoc(); + + cfg80211_debug("enter"); + if (!psoc) { + cfg80211_err("global psoc object not registered yet."); + return -EINVAL; + } + + wlan_objmgr_psoc_get_ref(psoc, WLAN_WIFI_POS_OSIF_ID); + err = wifi_pos_parse_req(skb, &req); + if (err) { + os_if_wifi_pos_send_rsp(wifi_pos_get_app_pid(psoc), + ANI_MSG_OEM_ERROR, sizeof(err), &err); + status = QDF_STATUS_E_INVAL; + goto release_psoc_ref; + } + + status = ucfg_wifi_pos_process_req(psoc, &req, os_if_wifi_pos_send_rsp); + if (QDF_IS_STATUS_ERROR(status)) + cfg80211_err("ucfg_wifi_pos_process_req failed. status: %d", + status); + +release_psoc_ref: + wlan_objmgr_psoc_release_ref(psoc, WLAN_WIFI_POS_OSIF_ID); + + return qdf_status_to_os_return(status); +} +#endif + +#ifdef CNSS_GENL +int os_if_wifi_pos_register_nl(void) +{ + int ret = register_cld_cmd_cb(WLAN_NL_MSG_OEM, + os_if_wifi_pos_callback, NULL); + if (ret) + cfg80211_err("register_cld_cmd_cb failed"); + + return ret; +} +#else +int os_if_wifi_pos_register_nl(void) +{ + return nl_srv_register(WLAN_NL_MSG_OEM, os_if_wifi_pos_callback); +} +#endif /* CNSS_GENL */ + +#ifdef CNSS_GENL +int os_if_wifi_pos_deregister_nl(void) +{ + int ret = deregister_cld_cmd_cb(WLAN_NL_MSG_OEM); + if (ret) + cfg80211_err("deregister_cld_cmd_cb failed"); + + return ret; +} +#else +int os_if_wifi_pos_deregister_nl(void) +{ + return 0; +} +#endif /* CNSS_GENL */ + +void os_if_wifi_pos_send_peer_status(struct qdf_mac_addr *peer_mac, + uint8_t peer_status, + uint8_t peer_timing_meas_cap, + uint8_t session_id, + struct wifi_pos_ch_info *chan_info, + enum QDF_OPMODE dev_mode) +{ + struct wlan_objmgr_psoc *psoc = wifi_pos_get_psoc(); + struct wmi_pos_peer_status_info *peer_info; + + if (!psoc) { + cfg80211_err("global wifi_pos psoc object not registered"); + return; + } + + if (!wifi_pos_is_app_registered(psoc) || + wifi_pos_get_app_pid(psoc) == 0) { + cfg80211_err("app is not registered or pid is invalid"); + return; + } + + peer_info = qdf_mem_malloc(sizeof(*peer_info)); + if (!peer_info) { + cfg80211_alert("malloc failed"); + return; + } + qdf_mem_copy(peer_info->peer_mac_addr, peer_mac->bytes, + sizeof(peer_mac->bytes)); + peer_info->peer_status = peer_status; + peer_info->vdev_id = session_id; + peer_info->peer_capability = peer_timing_meas_cap; + peer_info->reserved0 = 0; + /* Set 0th bit of reserved0 for STA mode */ + if (QDF_STA_MODE == dev_mode) + peer_info->reserved0 |= 0x01; + + if (chan_info) { + peer_info->peer_chan_info.chan_id = chan_info->chan_id; + peer_info->peer_chan_info.reserved0 = 0; + peer_info->peer_chan_info.mhz = chan_info->mhz; + peer_info->peer_chan_info.band_center_freq1 = + chan_info->band_center_freq1; + peer_info->peer_chan_info.band_center_freq2 = + chan_info->band_center_freq2; + peer_info->peer_chan_info.info = chan_info->info; + peer_info->peer_chan_info.reg_info_1 = chan_info->reg_info_1; + peer_info->peer_chan_info.reg_info_2 = chan_info->reg_info_2; + } + + os_if_wifi_pos_send_rsp(wifi_pos_get_app_pid(psoc), + ANI_MSG_PEER_STATUS_IND, + sizeof(*peer_info), (uint8_t *)peer_info); + qdf_mem_free(peer_info); +} + +int os_if_wifi_pos_populate_caps(struct wlan_objmgr_psoc *psoc, + struct wifi_pos_driver_caps *caps) +{ + if (!psoc || !caps) { + cfg80211_err("psoc or caps buffer is null"); + return -EINVAL; + } + + return qdf_status_to_os_return(wifi_pos_populate_caps(psoc, caps)); +} diff --git a/drivers/staging/qca-wifi-host-cmn/os_if/linux/wlan_cfg80211.c b/drivers/staging/qca-wifi-host-cmn/os_if/linux/wlan_cfg80211.c new file mode 100644 index 0000000000000000000000000000000000000000..37e0a993888cd36f22d69d2858cf4cc3712f75b0 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/os_if/linux/wlan_cfg80211.c @@ -0,0 +1,22 @@ +/* + * Copyright (c) 2016-2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: defines driver functions interfacing with linux kernel + */ +#include "wlan_cfg80211.h" diff --git a/drivers/staging/qca-wifi-host-cmn/os_if/linux/wlan_cfg80211.h b/drivers/staging/qca-wifi-host-cmn/os_if/linux/wlan_cfg80211.h new file mode 100644 index 0000000000000000000000000000000000000000..241fb0e4f06c3b2a939b9594c2d8d1e89fb17ef5 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/os_if/linux/wlan_cfg80211.h @@ -0,0 +1,123 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: declares driver functions interfacing with linux kernel + */ + + +#ifndef _WLAN_CFG80211_H_ +#define _WLAN_CFG80211_H_ + +#include +#include +#include +#include +#include + +#define cfg80211_log(level, args...) \ + QDF_TRACE(QDF_MODULE_ID_OS_IF, level, ## args) +#define cfg80211_logfl(level, format, args...) \ + cfg80211_log(level, FL(format), ## args) + +#define cfg80211_alert(format, args...) \ + cfg80211_logfl(QDF_TRACE_LEVEL_FATAL, format, ## args) +#define cfg80211_err(format, args...) \ + cfg80211_logfl(QDF_TRACE_LEVEL_ERROR, format, ## args) +#define cfg80211_warn(format, args...) \ + cfg80211_logfl(QDF_TRACE_LEVEL_WARN, format, ## args) +#define cfg80211_notice(format, args...) \ + cfg80211_logfl(QDF_TRACE_LEVEL_INFO, format, ## args) +#define cfg80211_info(format, args...) \ + cfg80211_logfl(QDF_TRACE_LEVEL_INFO_HIGH, format, ## args) +#define cfg80211_debug(format, args...) \ + cfg80211_logfl(QDF_TRACE_LEVEL_DEBUG, format, ## args) + +#define COMMON_VENDOR_COMMANDS \ +{ \ + .info.vendor_id = OUI_QCA, \ + .info.subcmd = QCA_NL80211_VENDOR_SUBCMD_SET_WIFI_CONFIGURATION,\ + .flags = WIPHY_VENDOR_CMD_NEED_WDEV | \ + WIPHY_VENDOR_CMD_NEED_NETDEV, \ + .doit = NULL \ +}, \ +{ \ + .info.vendor_id = OUI_QCA, \ + .info.subcmd = QCA_NL80211_VENDOR_SUBCMD_GET_WIFI_CONFIGURATION,\ + .flags = WIPHY_VENDOR_CMD_NEED_WDEV | \ + WIPHY_VENDOR_CMD_NEED_NETDEV, \ + .doit = NULL \ +}, + +#undef nla_parse +#undef nla_parse_nested +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0) +static inline int wlan_cfg80211_nla_parse(struct nlattr **tb, + int maxtype, + const struct nlattr *head, + int len, + const struct nla_policy *policy) +{ + return nla_parse(tb, maxtype, head, len, policy); +} + +static inline int +wlan_cfg80211_nla_parse_nested(struct nlattr *tb[], + int maxtype, + const struct nlattr *nla, + const struct nla_policy *policy) +{ + return nla_parse_nested(tb, maxtype, nla, policy); +} +#else +static inline int wlan_cfg80211_nla_parse(struct nlattr **tb, + int maxtype, + const struct nlattr *head, + int len, + const struct nla_policy *policy) +{ + return nla_parse(tb, maxtype, head, len, policy, NULL); +} + +static inline int +wlan_cfg80211_nla_parse_nested(struct nlattr *tb[], + int maxtype, + const struct nlattr *nla, + const struct nla_policy *policy) +{ + return nla_parse_nested(tb, maxtype, nla, policy, NULL); +} +#endif +#define nla_parse(...) (obsolete, use wlan_cfg80211_nla_parse) +#define nla_parse_nested(...) (obsolete, use wlan_cfg80211_nla_parse_nested) + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 7, 0)) +static inline int +wlan_cfg80211_nla_put_u64(struct sk_buff *skb, int attrtype, u64 value) +{ + return nla_put_u64(skb, attrtype, value); +} +#else +static inline int +wlan_cfg80211_nla_put_u64(struct sk_buff *skb, int attrtype, u64 value) +{ + return nla_put_u64_64bit(skb, attrtype, value, NL80211_ATTR_PAD); +} +#endif + +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/os_if/linux/wlan_osif_priv.h b/drivers/staging/qca-wifi-host-cmn/os_if/linux/wlan_osif_priv.h new file mode 100644 index 0000000000000000000000000000000000000000..f3eb7603d68af06c14383646f96029e12bea8ffa --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/os_if/linux/wlan_osif_priv.h @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2016-2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + + +#ifndef _WLAN_OSIF_PRIV_H_ +#define _WLAN_OSIF_PRIV_H_ + +struct osif_scan_pdev; +struct osif_tdls_vdev; + +/** + * struct pdev_osif_priv - OS private structure + * @wiphy: wiphy handle + * @legacy_osif_priv: legacy osif private handle + * @scan_priv: Scan related data used by cfg80211 scan + */ +struct pdev_osif_priv { + struct wiphy *wiphy; + void *legacy_osif_priv; + struct osif_scan_pdev *osif_scan; +}; + +/** + * struct vdev_osif_priv - OS private structure of vdev + * @wdev: wireless device handle + * @legacy_osif_priv: legacy osif private handle + */ +struct vdev_osif_priv { + struct wireless_dev *wdev; + void *legacy_osif_priv; + struct osif_tdls_vdev *osif_tdls; +}; + +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/os_if/linux/wlan_osif_request_manager.c b/drivers/staging/qca-wifi-host-cmn/os_if/linux/wlan_osif_request_manager.c new file mode 100644 index 0000000000000000000000000000000000000000..6943fae1a24f19618e0583d79c3fc7d967851628 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/os_if/linux/wlan_osif_request_manager.c @@ -0,0 +1,195 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include "qdf_mem.h" +#include "qdf_list.h" +#include "qdf_event.h" +#include "wlan_cfg80211.h" +#include "wlan_osif_request_manager.h" + +/* arbitrary value */ +#define MAX_NUM_REQUESTS 20 + +static bool is_initialized; +static qdf_list_t requests; +static qdf_spinlock_t spinlock; +static void *cookie; + +struct osif_request { + qdf_list_node_t node; + void *cookie; + uint32_t reference_count; + struct osif_request_params params; + qdf_event_t completed; +}; + +/* must be called with spinlock held */ +static void osif_request_unlink(struct osif_request *request) +{ + qdf_list_remove_node(&requests, &request->node); +} + +static void osif_request_destroy(struct osif_request *request) +{ + struct osif_request_params *params; + + params = &request->params; + if (params->dealloc) { + void *priv = osif_request_priv(request); + + params->dealloc(priv); + } + qdf_event_destroy(&request->completed); + qdf_mem_free(request); +} + +/* must be called with spinlock held */ +static struct osif_request *osif_request_find(void *cookie) +{ + QDF_STATUS status; + struct osif_request *request; + qdf_list_node_t *node; + + status = qdf_list_peek_front(&requests, &node); + while (QDF_IS_STATUS_SUCCESS(status)) { + request = qdf_container_of(node, struct osif_request, node); + if (request->cookie == cookie) + return request; + status = qdf_list_peek_next(&requests, node, &node); + } + + return NULL; +} + +struct osif_request *osif_request_alloc(const struct osif_request_params *params) +{ + size_t length; + struct osif_request *request; + + if (!is_initialized) { + cfg80211_err("invoked when not initialized from %pS", + (void *)_RET_IP_); + return NULL; + } + + length = sizeof(*request) + params->priv_size; + request = qdf_mem_malloc(length); + if (!request) { + cfg80211_err("allocation failed for %pS", (void *)_RET_IP_); + return NULL; + } + request->reference_count = 1; + request->params = *params; + qdf_event_create(&request->completed); + qdf_spin_lock_bh(&spinlock); + request->cookie = cookie++; + qdf_list_insert_back(&requests, &request->node); + qdf_spin_unlock_bh(&spinlock); + cfg80211_debug("request %pK, cookie %pK, caller %pS", + request, request->cookie, (void *)_RET_IP_); + + return request; +} + +void *osif_request_priv(struct osif_request *request) +{ + /* private data area immediately follows the struct osif_request */ + return request + 1; +} + +void *osif_request_cookie(struct osif_request *request) +{ + return request->cookie; +} + +struct osif_request *osif_request_get(void *cookie) +{ + struct osif_request *request; + + if (!is_initialized) { + cfg80211_err("invoked when not initialized from %pS", + (void *)_RET_IP_); + return NULL; + } + qdf_spin_lock_bh(&spinlock); + request = osif_request_find(cookie); + if (request) + request->reference_count++; + qdf_spin_unlock_bh(&spinlock); + cfg80211_debug("cookie %pK, request %pK, caller %pS", + cookie, request, (void *)_RET_IP_); + + return request; +} + +void osif_request_put(struct osif_request *request) +{ + bool unlinked = false; + + cfg80211_debug("request %pK, cookie %pK, caller %pS", + request, request->cookie, (void *)_RET_IP_); + qdf_spin_lock_bh(&spinlock); + request->reference_count--; + if (0 == request->reference_count) { + osif_request_unlink(request); + unlinked = true; + } + qdf_spin_unlock_bh(&spinlock); + if (unlinked) + osif_request_destroy(request); +} + +int osif_request_wait_for_response(struct osif_request *request) +{ + QDF_STATUS status; + + status = qdf_wait_for_event_completion(&request->completed, + request->params.timeout_ms); + + return qdf_status_to_os_return(status); +} + +void osif_request_complete(struct osif_request *request) +{ + (void) qdf_event_set(&request->completed); +} + +void osif_request_manager_init(void) +{ + cfg80211_debug("%pS", (void *)_RET_IP_); + if (is_initialized) + return; + + qdf_list_create(&requests, MAX_NUM_REQUESTS); + qdf_spinlock_create(&spinlock); + is_initialized = true; +} + +/* + * osif_request_manager_deinit implementation note: + * It is intentional that we do not destroy the list or the spinlock. + * This allows threads to still access the infrastructure even when it + * has been deinitialized. Since neither lists nor spinlocks consume + * resources this does not result in a resource leak. + */ +void osif_request_manager_deinit(void) +{ + cfg80211_debug("%pS", (void *)_RET_IP_); + is_initialized = false; +} diff --git a/drivers/staging/qca-wifi-host-cmn/os_if/linux/wlan_osif_request_manager.h b/drivers/staging/qca-wifi-host-cmn/os_if/linux/wlan_osif_request_manager.h new file mode 100644 index 0000000000000000000000000000000000000000..c32bdd92595215f69e997a312c4d659d65518858 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/os_if/linux/wlan_osif_request_manager.h @@ -0,0 +1,220 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef __WLAN_OSIF_REQUEST_MANAGER_H__ +#define __WLAN_OSIF_REQUEST_MANAGER_H__ + +/** + * DOC: WLAN OSIF REQUEST MANAGER + * + * Many operations within the wlan driver occur in an asynchronous + * manner. Requests are received by OSIF via one of the kernel + * interfaces (ioctl, nl80211, virtual file system, etc.). The + * requests are translated to an internal format and are then passed + * to lower layers, usually via SME, for processing. For requests + * which require a response, that response comes up from the lower + * layers in a separate thread of execution, ultimately resulting in a + * call to a callback function that was provided by OSIF as part of the + * initial request. So a mechanism is needed to synchronize the + * request and response. This framework provides that mechanism. + * + * Once the framework has been initialized, the typical sequence of + * events is as follows: + * + * Request Thread: + * 1. Create a &struct osif_request_params which describes the request. + * 2. Call osif_request_alloc() to allocate a &struct osif_request. + * 3. Call osif_request_priv() to get a pointer to the private data. + * 4. Place any information which must be shared with the Response + * Callback in the private data area. + * 5. Call osif_request_cookie() to get the unique cookie assigned + * to the request. + * 6. Call the underlying request handling API, passing the cookie + * as the callback's private context. + * 7. Call osif_request_wait_for_response() to wait for the response + * (or for the request to time out). + * 8. Use the return status to see if the request was successful. If + * it was, retrieve any response information from the private + * structure and prepare a response for userspace. + * 9. Call osif_request_put() to relinquish access to the request. + * 10. Return status to the caller. + * + * Response Callback: + * 1. Call osif_request_get() with the provided cookie to see if the + * request structure is still valid. If it returns %NULL then + * return since this means the request thread has already timed + * out. + * 2. Call osif_request_priv() to get access to the private data area. + * 3. Write response data into the private data area. + * 4. Call osif_request_complete() to indicate that the response is + * ready to be processed by the request thread. + * 5. Call osif_request_put() to relinquish the callback function's + * reference to the request. + */ + +/* this is opaque to clients */ +struct osif_request; + +/** + * typedef osif_request_dealloc - Private data deallocation function + */ +typedef void (*osif_request_dealloc)(void *priv); + +/** + * struct osif_request_params - OSIF request parameters + * @priv_size: Size of the private data area required to pass + * information between the request thread and the response callback. + * @timeout_ms: The amount of time to wait for a response in milliseconds. + * @dealloc: Function to be called when the request is destroyed to + * deallocate any allocations made in the private area of the + * request struct. Can be %NULL if no private allocations are + * made. + */ +struct osif_request_params { + uint32_t priv_size; + uint32_t timeout_ms; + osif_request_dealloc dealloc; +}; + +/** + * osif_request_alloc() - Allocate a request struct + * @params: parameter block that specifies the attributes of the + * request + * + * This function will attempt to allocate a &struct osif_request with + * the specified @params. If successful, the caller can then use + * request struct to make an asynchronous request. Once the request is + * no longer needed, the reference should be relinquished via a call + * to osif_request_put(). + * + * Return: A pointer to an allocated &struct osif_request (which also + * contains room for the private buffer) if the allocation is + * successful, %NULL if the allocation fails. + */ +struct osif_request *osif_request_alloc(const struct osif_request_params *params); + +/** + * osif_request_priv() - Get pointer to request private data + * @request: The request struct that contains the private data + * + * This function will return a pointer to the private data area that + * is part of the request struct. The caller must already have a valid + * reference to @request from either osif_request_alloc() or + * osif_request_get(). + * + * Returns: pointer to the private data area. Note that this pointer + * will always be an offset from the input @request pointer and hence + * this function will never return %NULL. + */ +void *osif_request_priv(struct osif_request *request); + +/** + * osif_request_cookie() - Get cookie of a request + * @request: The request struct associated with the request + * + * This function will return the unique cookie that has been assigned + * to the request. This cookie can subsequently be passed to + * osif_request_get() to retrieve the request. + * + * Note that the cookie is defined as a void pointer as it is intended + * to be passed as an opaque context pointer from OSIF to underlying + * layers when making a request, and subsequently passed back to OSIF + * as an opaque pointer in an asynchronous callback. + * + * Returns: The cookie assigned to the request. + */ +void *osif_request_cookie(struct osif_request *request); + +/** + * osif_request_get() - Get a reference to a request struct + * @cookie: The cookie of the request struct that needs to be + * referenced + * + * This function will use the cookie to determine if the associated + * request struct is valid, and if so, will increment the reference + * count of the struct. This means the caller is guaranteed that the + * request struct is valid and the underlying private data can be + * dereferenced. + * + * Returns: The pointer to the request struct associated with @cookie + * if the request is still valid, %NULL if the underlying request + * struct is no longer valid. + */ +struct osif_request *osif_request_get(void *cookie); + +/** + * osif_request_put() - Release a reference to a request struct + * @request: The request struct that no longer needs to be referenced + * + * This function will decrement the reference count of the struct, and + * will clean up the request if this is the last reference. The caller + * must already have a valid reference to @request, either from + * osif_request_alloc() or osif_request_get(). + * + * Returns: Nothing + */ +void osif_request_put(struct osif_request *request); + +/** + * osif_request_wait_for_response() - Wait for a response + * @request: The request struct associated with the request + * + * This function will wait until either a response is received and + * communicated via osif_request_complete(), or until the request + * timeout period expires. + * + * Returns: 0 if a response was received, -ETIMEDOUT if the response + * timed out. + */ +int osif_request_wait_for_response(struct osif_request *request); + +/** + * osif_request_complete() - Complete a request + * @request: The request struct associated with the request + * + * This function is used to indicate that a response has been received + * and that any information required by the request thread has been + * copied into the private data area of the request struct. This will + * unblock any osif_request_wait_for_response() that is pending on this + * @request. + * + * Returns: Nothing + */ +void osif_request_complete(struct osif_request *request); + +/** + * osif_request_manager_init() - Initialize the OSIF Request Manager + * + * This function must be called during system initialization to + * initialize the OSIF Request Manager. + * + * Returns: Nothing + */ +void osif_request_manager_init(void); + +/** + * osif_request_manager_deinit() - Deinitialize the OSIF Request Manager + * + * This function must be called during system shutdown to deinitialize + * the OSIF Request Manager. + * + * Returns: Nothing + */ +void osif_request_manager_deinit(void); + +#endif /* __WLAN_OSIF_REQUEST_MANAGER_H__ */ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/Kbuild b/drivers/staging/qca-wifi-host-cmn/qdf/Kbuild new file mode 100644 index 0000000000000000000000000000000000000000..3463afda20e5e8d0db30e5b598f1f85a49df78b0 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/Kbuild @@ -0,0 +1,114 @@ +ifeq ($(obj),) +obj := . +endif + +DEPTH := ../.. + +HOST_CMN_CONVG_SRC := $(DEPTH)/cmn_dev +HOST_CMN_CONVG_NLINK := $(DEPTH)/cmn_dev/utils/nlink +HOST_CMN_CONVG_LOGGING := $(DEPTH)/cmn_dev/utils/logging +HOST_CMN_CONVG_PTT := $(DEPTH)/cmn_dev/utils/ptt + +include $(obj)/$(DEPTH)/os/linux/Makefile-linux.common + +INCS += -Iinclude/nbuf -Iinclude/net -Iinclude/os +INCS += -Inbuf/linux -Inet/linux -Ios/linux +INCS += -I$(WLAN_TOP)/../../include +INCS += -I$(WLAN_TOP)/cmn_dev/qdf/inc +INCS += -I$(WLAN_TOP)/cmn_dev/qdf/linux/src +INCS += -I$(obj)/$(HOST_CMN_CONVG_PTT)/inc \ + -I$(obj)/$(HOST_CMN_CONVG_NLINK)/inc \ + -I$(obj)/$(HOST_CMN_CONVG_LOGGING)/inc \ + -I$(obj)/$(DEPTH)/cmn_dev/utils/host_diag_log/inc \ + -I$(obj)/$(DEPTH)/cmn_dev/utils/host_diag_log/src \ + -I$(obj)/$(DEPTH)/cmn_dev/utils/ptt/inc \ + -I$(WLAN_TOP)/pld/inc + +obj-m += qdf.o + +EXTRA_CFLAGS+= $(INCS) $(COPTS) -Wno-unused-function + +ifeq ($(strip ${QCA_PARTNER_MAKE_F_SUPPORT}),1) +MOD_CFLAGS = -D"KBUILD_STR(s)=\#s" -D"KBUILD_BASENAME=KBUILD_STR(qdf.mod)" -D"KBUILD_MODNAME=KBUILD_STR(qdf)" +endif + +qdf-objs := \ +linux/src/qdf_defer.o \ +linux/src/qdf_event.o \ +linux/src/qdf_list.o \ +linux/src/qdf_lock.o \ +linux/src/qdf_mc_timer.o \ +linux/src/qdf_mem.o \ +linux/src/qdf_module.o \ +linux/src/qdf_nbuf.o \ +linux/src/qdf_perf.o \ +linux/src/qdf_threads.o \ +linux/src/qdf_trace.o \ +linux/src/qdf_file.o \ +src/qdf_flex_mem.o \ +src/qdf_parse.o \ +src/qdf_str.o \ +src/qdf_types.o +#linux/src/qdf_net.o \ +#linux/src/qdf_net_event.o \ +#linux/src/qdf_net_ioctl.o +#linux/src/qdf_net_wext.o + +ifeq ($(MEMORY_DEBUG),1) +qdf-objs += src/qdf_debug_domain.o +endif + +ifeq ($(LOGGING_UTILS_SUPPORT),1) +qdf-objs += \ +$(HOST_CMN_CONVG_NLINK)/src/wlan_nlink_srv.o \ +$(HOST_CMN_CONVG_LOGGING)/src/wlan_logging_sock_svc.o +endif + +ifeq ($(WLAN_DEBUGFS),1) +qdf-objs += linux/src/qdf_debugfs.o +endif + +ifeq ($(BUILD_ADF_NET_IOCTL),1) +EXTRA_CFLAGS+= -DADF_NET_IOCTL_SUPPORT +#adf-objs += os/linux/adf_os_netlink_pvt.o \ +# net/linux/adf_net_event.o \ +# net/linux/adf_net_wext.o \ +# net/linux/adf_net_ioctl.o +endif + +ifeq ($(BUILD_ADF_DEFER_PVT),1) +#adf-objs += os/linux/adf_os_defer_pvt.o +endif +ifeq ($(BUILD_ADF_IRQ_PVT),1) +#adf-objs += os/linux/adf_os_irq_pvt.o +endif + +ifeq ($(BUILD_ADF_PERF_PROFILING),1) +#adf-objs += os/linux/adf_os_perf_pvt.o +ifeq ($(BUILD_ADF_MIPS_PERF_PROFILING),1) +#adf-objs += os/linux/adf_os_mips_perf_pvt.o +endif +endif + +# os/linux/adf_os_pci_pvt.o \ +# net/linux/adf_net_ioctl.o \ +# net/linux/adf_net_pseudo.o \ + +clean-files := modules.order + +ifeq ($(strip ${QCA_PARTNER_MAKE_F_SUPPORT}),1) +all: qdf.ko + +qdf.mod.o: qdf.mod.c + ${CC} -c -o $@ ${EXTRA_CFLAGS} ${MOD_CFLAGS} $< + +adf.o: ${adf-objs} + $(LD) -m elf32btsmip -r -o adf.o $(adf-objs) + $(KERNELPATH)/scripts/mod/modpost qdf.o + +qdf.ko: qdf.o qdf.mod.o + $(LD) $(LDOPTS) -o qdf.ko qdf.o qdf.mod.o + +%.o: %.c + ${CC} -c -o $@ ${EXTRA_CFLAGS} $< +endif diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/inc/i_qdf_nbuf_api_m.h b/drivers/staging/qca-wifi-host-cmn/qdf/inc/i_qdf_nbuf_api_m.h new file mode 100644 index 0000000000000000000000000000000000000000..b3f622908b23f1c71d8fce5e471d2404ee3348c7 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/inc/i_qdf_nbuf_api_m.h @@ -0,0 +1,58 @@ +/* + * Copyright (c) 2014-2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: i_qdf_nbuf_api_m.h + * + * Platform specific qdf_nbuf_public network buffer API + * This file defines the network buffer abstraction. + * Included by qdf_nbuf.h and should not be included + * directly from other files. + */ + +#ifndef _QDF_NBUF_M_H +#define _QDF_NBUF_M_H + +static inline int qdf_nbuf_ipa_owned_get(qdf_nbuf_t buf) +{ + return __qdf_nbuf_ipa_owned_get(buf); +} + +static inline void qdf_nbuf_ipa_owned_set(qdf_nbuf_t buf) +{ + __qdf_nbuf_ipa_owned_set(buf); +} + +static inline void qdf_nbuf_ipa_owned_clear(qdf_nbuf_t buf) +{ + __qdf_nbuf_ipa_owned_clear(buf); +} + +static inline int qdf_nbuf_ipa_priv_get(qdf_nbuf_t buf) +{ + return __qdf_nbuf_ipa_priv_get(buf); +} + +static inline void qdf_nbuf_ipa_priv_set(qdf_nbuf_t buf, uint32_t priv) +{ + + QDF_BUG(!(priv & QDF_NBUF_IPA_CHECK_MASK)); + __qdf_nbuf_ipa_priv_set(buf, priv); +} + +#endif /* _QDF_NBUF_M_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/inc/i_qdf_nbuf_api_w.h b/drivers/staging/qca-wifi-host-cmn/qdf/inc/i_qdf_nbuf_api_w.h new file mode 100644 index 0000000000000000000000000000000000000000..f1197c9a1079c8c883e64348803cae59c6c90855 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/inc/i_qdf_nbuf_api_w.h @@ -0,0 +1,67 @@ +/* + * Copyright (c) 2014-2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: i_qdf_nbuf_api_w.h + * + * Platform specific qdf_nbuf_public network buffer API + * This file defines the network buffer abstraction. + * Included by qdf_nbuf.h and should not be included + * directly from other files. + */ + +#ifndef _QDF_NBUF_W_H +#define _QDF_NBUF_W_H + +static inline void *qdf_nbuf_get_tx_fctx(qdf_nbuf_t buf) +{ + return __qdf_nbuf_get_tx_fctx(buf); +} + +static inline void *qdf_nbuf_get_rx_fctx(qdf_nbuf_t buf) +{ + return __qdf_nbuf_get_rx_fctx(buf); +} + + +static inline void +qdf_nbuf_set_tx_fctx_type(qdf_nbuf_t buf, void *ctx, uint8_t type) +{ + __qdf_nbuf_set_tx_fctx_type(buf, ctx, type); +} + +static inline void +qdf_nbuf_set_rx_fctx_type(qdf_nbuf_t buf, void *ctx, uint8_t type) +{ + __qdf_nbuf_set_rx_fctx_type(buf, ctx, type); +} + + +static inline void * +qdf_nbuf_get_ext_cb(qdf_nbuf_t buf) +{ + return __qdf_nbuf_get_ext_cb(buf); +} + +static inline void +qdf_nbuf_set_ext_cb(qdf_nbuf_t buf, void *ref) +{ + __qdf_nbuf_set_ext_cb(buf, ref); +} + +#endif /* _QDF_NBUF_W_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/inc/osdep.h b/drivers/staging/qca-wifi-host-cmn/qdf/inc/osdep.h new file mode 100644 index 0000000000000000000000000000000000000000..b8456e687b5411107f6b4c4af163ff4e407570d6 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/inc/osdep.h @@ -0,0 +1,145 @@ +/* + * Copyright (c) 2013-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: osdep + * This file provides OS abstraction for osdependent APIs. + */ + +#ifndef _OSDEP_H +#define _OSDEP_H + +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * ATH_DEBUG - + * Control whether debug features (printouts, assertions) are compiled + * into the driver. + */ +#ifndef ATH_DEBUG +#define ATH_DEBUG 1 /* default: include debug code */ +#endif + +#if ATH_DEBUG +#ifndef ASSERT +#define ASSERT(expr) qdf_assert(expr) +#endif +#else +#define ASSERT(expr) +#endif /* ATH_DEBUG */ + +/* + * Need to define byte order based on the CPU configuration. + */ +#ifndef _LITTLE_ENDIAN +#define _LITTLE_ENDIAN 1234 +#endif +#ifndef _BIG_ENDIAN +#define _BIG_ENDIAN 4321 +#endif +#ifdef __BIG_ENDIAN +#define _BYTE_ORDER _BIG_ENDIAN +#else +#define _BYTE_ORDER _LITTLE_ENDIAN +#endif + +/* + * Deduce if tasklets are available. If not then + * fall back to using the immediate work queue. + */ +#define qdf_sysctl_decl(f, ctl, write, filp, buffer, lenp, ppos) \ + f(struct ctl_table *ctl, int32_t write, void *buffer, \ + size_t *lenp, loff_t *ppos) + +#define QDF_SYSCTL_PROC_DOINTVEC(ctl, write, filp, buffer, lenp, ppos) \ + __QDF_SYSCTL_PROC_DOINTVEC(ctl, write, filp, buffer, lenp, ppos) + +#define EOK (0) + +#ifndef ARPHRD_IEEE80211 +#define ARPHRD_IEEE80211 801 /* IEEE 802.11. */ +#endif + +/* + * Normal Delay functions. Time specified in microseconds. + */ +#define OS_DELAY(_us) qdf_udelay(_us) + +/* + * memory data manipulation functions. + */ +#define OS_MEMCPY(_dst, _src, _len) qdf_mem_copy(_dst, _src, _len) +#define OS_MEMMOVE(_dst, _src, _len) qdf_mem_move(_dst, _src, _len) +#define OS_MEMZERO(_buf, _len) qdf_mem_zero(_buf, _len) +#define OS_MEMSET(_buf, _ch, _len) qdf_mem_set(_buf, _len, _ch) +#define OS_MEMCMP(_mem1, _mem2, _len) qdf_mem_cmp(_mem1, _mem2, _len) + + +/* + * System time interface + */ +typedef qdf_time_t systime_t; + +/** + * os_get_timestamp() - gives the timestamp in ticks + * Return: unsigned long + */ +static inline qdf_time_t os_get_timestamp(void) +{ + /* Fix double conversion from jiffies to ms */ + return qdf_system_ticks(); +} + +struct _NIC_DEV; + +#define OS_FREE(_p) qdf_mem_free(_p) + +#define OS_DMA_MEM_CONTEXT(context) \ + dma_addr_t context + +#define OS_GET_DMA_MEM_CONTEXT(var, field) \ + &(var->field) + +/* + * Timer Interfaces. Use these macros to declare timer + * and retrieve timer argument. This is mainly for resolving + * different argument types for timer function in different OS. + */ +#define os_timer_func(_fn) \ + void _fn(void *timer_arg) + +#define OS_GET_TIMER_ARG(_arg, _type) \ + ((_arg) = (_type)(timer_arg)) + +#define OS_SET_TIMER(_timer, _ms) qdf_timer_mod(_timer, _ms) + +/* + * These are required for network manager support + */ +#ifndef SET_NETDEV_DEV +#define SET_NETDEV_DEV(ndev, pdev) +#endif + +#endif /* end of _OSDEP_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_atomic.h b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_atomic.h new file mode 100644 index 0000000000000000000000000000000000000000..76285675a5a85b1ffee1e37fc80ccafedd00fcf8 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_atomic.h @@ -0,0 +1,235 @@ +/* + * Copyright (c) 2014-2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: qdf_atomic.h + * This file provides OS abstraction for atomic APIs. + */ + +#ifndef _QDF_ATOMIC_H +#define _QDF_ATOMIC_H + +#include + +/** + * qdf_atomic_t - atomic type of variable + * + * Use this when you want a simple resource counter etc. which is atomic + * across multiple CPU's. These maybe slower than usual counters on some + * platforms/OS'es, so use them with caution. + */ + +typedef __qdf_atomic_t qdf_atomic_t; + +/** + * qdf_atomic_init() - initialize an atomic type variable + * @v: A pointer to an opaque atomic variable + * + * Return: None + */ +static inline QDF_STATUS qdf_atomic_init(qdf_atomic_t *v) +{ + return __qdf_atomic_init(v); +} + +/** + * qdf_atomic_read() - read the value of an atomic variable + * @v: A pointer to an opaque atomic variable + * + * Return: The current value of the variable + */ +static inline int32_t qdf_atomic_read(qdf_atomic_t *v) +{ + return __qdf_atomic_read(v); +} + +/** + * qdf_atomic_inc() - increment the value of an atomic variable + * @v: A pointer to an opaque atomic variable + * + * Return: None + */ +static inline void qdf_atomic_inc(qdf_atomic_t *v) +{ + __qdf_atomic_inc(v); +} + +/** + * qdf_atomic_dec() - decrement the value of an atomic variable + * @v: A pointer to an opaque atomic variable + * + * Return: None + */ +static inline void qdf_atomic_dec(qdf_atomic_t *v) +{ + __qdf_atomic_dec(v); +} + +/** + * qdf_atomic_add() - add a value to the value of an atomic variable + * @i: The amount by which to increase the atomic counter + * @v: A pointer to an opaque atomic variable + * + * Return: None + */ +static inline void qdf_atomic_add(int i, qdf_atomic_t *v) +{ + __qdf_atomic_add(i, v); +} + +/** + * qdf_atomic_sub() - Subtract a value from an atomic variable + * @i: the amount by which to decrease the atomic counter + * @v: a pointer to an opaque atomic variable + * + * Return: none + */ +static inline void qdf_atomic_sub(int i, qdf_atomic_t *v) +{ + __qdf_atomic_sub(i, v); +} + +/** + * qdf_atomic_dec_and_test() - decrement an atomic variable and check if the + * new value is zero + * @v: A pointer to an opaque atomic variable + * + * Return: + * true (non-zero) if the new value is zero, + * false (0) if the new value is non-zero + */ +static inline int32_t qdf_atomic_dec_and_test(qdf_atomic_t *v) +{ + return __qdf_atomic_dec_and_test(v); +} + +/** + * qdf_atomic_set() - set a value to the value of an atomic variable + * @v: A pointer to an opaque atomic variable + * @i: required value to set + * + * Atomically sets the value of v to i + * Return: None + */ +static inline void qdf_atomic_set(qdf_atomic_t *v, int i) +{ + __qdf_atomic_set(v, i); +} + +/** + * qdf_atomic_inc_return() - return the incremented value of an atomic variable + * @v: A pointer to an opaque atomic variable + * + * Return: The current value of the variable + */ +static inline int32_t qdf_atomic_inc_return(qdf_atomic_t *v) +{ + return __qdf_atomic_inc_return(v); +} + +/** + * qdf_atomic_set_bit - Atomically set a bit in memory + * @nr: bit to set + * @addr: the address to start counting from + * + * Return: none + */ +static inline void qdf_atomic_set_bit(int nr, volatile unsigned long *addr) +{ + __qdf_atomic_set_bit(nr, addr); +} + +/** + * qdf_atomic_clear_bit - Atomically clear a bit in memory + * @nr: bit to clear + * @addr: the address to start counting from + * + * Return: none + */ +static inline void qdf_atomic_clear_bit(int nr, volatile unsigned long *addr) +{ + __qdf_atomic_clear_bit(nr, addr); +} + +/** + * qdf_atomic_change_bit - Atomically toggle a bit in memory + * from addr + * @nr: bit to change + * @addr: the address to start counting from + * + * Return: none + */ +static inline void qdf_atomic_change_bit(int nr, volatile unsigned long *addr) +{ + __qdf_atomic_change_bit(nr, addr); +} + +/** + * qdf_atomic_test_and_set_bit - Atomically set a bit and return its old value + * @nr: Bit to set + * @addr: the address to start counting from + * + * Return: return nr bit old value + */ +static inline int qdf_atomic_test_and_set_bit(int nr, + volatile unsigned long *addr) +{ + return __qdf_atomic_test_and_set_bit(nr, addr); +} + +/** + * qdf_atomic_test_and_clear_bit - Atomically clear a bit and return its old + * value + * @nr: bit to clear + * @addr: the address to start counting from + * + * Return: return nr bit old value + */ +static inline int qdf_atomic_test_and_clear_bit(int nr, + volatile unsigned long *addr) +{ + return __qdf_atomic_test_and_clear_bit(nr, addr); +} + +/** + * qdf_atomic_test_and_change_bit - Atomically toggle a bit and return its old + * value + * @nr: bit to change + * @addr: the address to start counting from + * + * Return: return nr bit old value + */ +static inline int qdf_atomic_test_and_change_bit(int nr, + volatile unsigned long *addr) +{ + return __qdf_atomic_test_and_change_bit(nr, addr); +} + +/** + * qdf_atomic_test_bit - Atomically get the nr-th bit value starting from addr + * @nr: bit to get + * @addr: the address to start counting from + * + * Return: return nr bit value + */ +static inline int qdf_atomic_test_bit(int nr, volatile unsigned long *addr) +{ + return __qdf_atomic_test_bit(nr, addr); +} + +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_cpuhp.h b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_cpuhp.h new file mode 100644 index 0000000000000000000000000000000000000000..c3e99bad9094a6673851f6232a5f1257398e10f1 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_cpuhp.h @@ -0,0 +1,106 @@ +/* + * Copyright (c) 2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: qdf_cpuhp (CPU hotplug) + * QCA driver framework (QDF) CPU hotplug APIs + */ + +#ifndef __QDF_CPUHP_H +#define __QDF_CPUHP_H + +#include "qdf_status.h" +#include "qdf_types.h" + +/** + * struct qdf_cpuhp_handler - an opaque hotplug event registration handle + */ +struct qdf_cpuhp_handler; + +typedef void (*qdf_cpuhp_callback)(void *context, uint32_t cpu); + +#ifdef QCA_CONFIG_SMP +/** + * qdf_cpuhp_init() - Initialize the CPU hotplug event infrastructure + * + * To be called once, globally. + * + * Return: None + */ +QDF_STATUS qdf_cpuhp_init(void); + +/** + * qdf_cpuhp_deinit() - De-initialize the CPU hotplug event infrastructure + * + * To be called once, globally. + * + * Return: None + */ +QDF_STATUS qdf_cpuhp_deinit(void); + +/** + * qdf_cpuhp_register() - Register for CPU up/down event notifications + * @handler: a double pointer to the event registration handle to allocate + * @context: an opaque context to pass back to event listeners + * @up_callback: the function pointer to invoke for CPU up events + * @down_callback: the function pointer to invoke for CPU down events + * + * "Up" happens just after the CPU is up. Inversely, "down" happens just before + * the CPU goes down. + * + * @handler will point to a valid memory address on success, or NULL on failure. + * + * Return: QDF_STATUS + */ +QDF_STATUS qdf_cpuhp_register(struct qdf_cpuhp_handler **handler, + void *context, + qdf_cpuhp_callback up_callback, + qdf_cpuhp_callback down_callback); + +/** + * qdf_cpuhp_unregister() - Un-register for CPU up/down event notifications + * @handler: a double pointer to the event registration handle to de-allocate + * + * @handler point to NULL upon completion + * + * Return: None + */ +void qdf_cpuhp_unregister(struct qdf_cpuhp_handler **handler); +#else +QDF_STATUS qdf_cpuhp_init(void) +{ + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS qdf_cpuhp_deinit(void) +{ + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS qdf_cpuhp_register(struct qdf_cpuhp_handler **handler, + void *context, + qdf_cpuhp_callback up_callback, + qdf_cpuhp_callback down_callback) +{ + return QDF_STATUS_SUCCESS; +} + +void qdf_cpuhp_unregister(struct qdf_cpuhp_handler **handler) {} +#endif /* QCA_CONFIG_SMP */ + +#endif /* __QDF_CPUHP_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_crypto.h b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_crypto.h new file mode 100644 index 0000000000000000000000000000000000000000..8d592fdd44bf5e5379a8b85797a4f67a3e44a6ff --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_crypto.h @@ -0,0 +1,162 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: qdf_crypto.h + * This file provides OS abstraction for crypto APIs. + */ + +#if !defined(__QDF_CRYPTO_H) +#define __QDF_CRYPTO_H + +/* Include Files */ +#include "qdf_status.h" +#include +#include + +/* Preprocessor definitions and constants */ +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +#define AES_BLOCK_SIZE 16 +#define HMAC_SHA256_CRYPTO_TYPE "hmac(sha256)" +#define HMAC_SHA386_CRYPTO_TYPE "hmac(sha384)" + +#define SHA256_CRYPTO_TYPE "sha256" +#define SHA386_CRYPTO_TYPE "sha384" + +#define SHA256_DIGEST_SIZE 32 +#define SHA384_DIGEST_SIZE 48 + +#define FIXED_PARAM_OFFSET_ASSOC_REQ 4 +#define FIXED_PARAM_OFFSET_ASSOC_RSP 6 + +#define AAD_LEN 20 +#define IEEE80211_MMIE_GMAC_MICLEN 16 + +#define IS_VALID_CTR_KEY_LEN(len) ((((len) == 16) || ((len) == 32) || \ + ((len) == 48)) ? 1 : 0) + +/* Function declarations and documenation */ + +/** + * qdf_get_hash: API to get hash using specific crypto and scatterlist + * @type: crypto type + * @element_cnt: scatterlist element count + * @addr: scatterlist element array + * @addr_len: element length array + * @hash: new hash + * + * Return: 0 if success else error code + */ +int qdf_get_hash(uint8_t *type, uint8_t element_cnt, + uint8_t *addr[], uint32_t *addr_len, + int8_t *hash); + +/** + * qdf_get_hmac_hash: API to get hmac hash using specific crypto and + * scatterlist elements. + * @type: crypto type + * @key: key needs to be used for hmac api + * @keylen: length of key + * @element_cnt: scatterlist element count + * @addr: scatterlist element array + * @addr_len: element length array + * @hash: new hash + * + * Return: 0 if success else error code + */ +int qdf_get_hmac_hash(uint8_t *type, uint8_t *key, + uint32_t keylen, uint8_t element_cnt, + uint8_t *addr[], uint32_t *addr_len, int8_t *hash); + +/** + * qdf_get_keyed_hash: API to get hash using specific crypto and + * scatterlist elements. + * @type: crypto type + * @key: key needs to be used for hmac api + * @keylen: length of key + * @element_cnt: scatterlist element count + * @addr: scatterlist element array + * @addr_len: element length array + * @hash: new hash + * + * Return: 0 if success else error code + */ +int qdf_get_keyed_hash(const char *alg, const uint8_t *key, + unsigned int key_len, const uint8_t *src[], + size_t *src_len, size_t num_elements, uint8_t *out); +/** + * qdf_update_dbl: This API does the doubling operation as defined in RFC5297 + * @d: input for doubling + * + * Return: None + */ +void qdf_update_dbl(uint8_t *d); + +/** + * qdf_aes_s2v: This API gets vector from AES string as defined in RFC5297 + * output length will be AES_BLOCK_SIZE. + * @key: key used for operation + * @key_len: key len + * @s: addresses of elements to be used + * @s_len: array of element length + * @num_s: number of elements + * @out: pointer to output vector + * + * Return: 0 if success else Error number + */ +int qdf_aes_s2v(const uint8_t *key, unsigned int key_len, const uint8_t *s[], + size_t s_len[], size_t num_s, uint8_t *out); + +/** + * qdf_aes_ctr: This API defines AES Counter Mode + * @key: key used for operation + * @key_len: key len + * @siv: Initialization vector + * @src: input + * @src_len: input len + * @dest: output + * @enc: if encryption needs to be done or decryption + * + * Return: 0 if success else Error number + */ +int qdf_aes_ctr(const uint8_t *key, unsigned int key_len, uint8_t *siv, + const uint8_t *src, size_t src_len, uint8_t *dest, bool enc); + +/** + * qdf_crypto_aes_gmac: This API calculates MIC for GMAC + * @key: key used for operation + * @key_length: key length + * @iv: Initialization vector + * @aad: Additional authentication data + * @data: Pointer to data + * @data_len: Length of data + * @mic: Pointer to MIC + * + * Return: 0 if success else Error number + */ +int qdf_crypto_aes_gmac(uint8_t *key, uint16_t key_length, + uint8_t *iv, uint8_t *aad, uint8_t *data, + uint16_t data_len, uint8_t *mic); + +#ifdef __cplusplus +} +#endif /* __cplusplus */ +#endif /* __QDF_CRYPTO_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_debug_domain.h b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_debug_domain.h new file mode 100644 index 0000000000000000000000000000000000000000..39210d77bc6c026327ae6e1e3914d99fe041a728 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_debug_domain.h @@ -0,0 +1,76 @@ +/* + * Copyright (c) 2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: qdf_debug_domain + * QCA driver framework (QDF) debug domain APIs. Debug domains are used to track + * resource allocations across different driver states, particularly for runtime + * leak detection. + */ + +#ifndef __QDF_DEBUG_DOMAIN_H +#define __QDF_DEBUG_DOMAIN_H + +#include "qdf_types.h" + +/** + * struct qdf_debug_domain - debug domains for tracking resource allocations + * @QDF_DEBUG_DOMAIN_INIT: The default debug domain, tied to driver load + * @QDF_DEBUG_DOMAIN_ACTIVE: The active debug domain, tied some "running" state + * @QDF_DEBUG_DOMAIN_COUNT: The number of debug domains for iterating, etc. + */ +enum qdf_debug_domain { + QDF_DEBUG_DOMAIN_INIT, + QDF_DEBUG_DOMAIN_ACTIVE, + + /* keep last */ + QDF_DEBUG_DOMAIN_COUNT, +}; + +/** + * qdf_debug_domain_get() - Get the current debug domain + * + * Return: the current debug domain + */ +enum qdf_debug_domain qdf_debug_domain_get(void); + +/** + * qdf_debug_domain_set() - Set the current debug domain + * @domain: the domain to change to + * + * Return: None + */ +void qdf_debug_domain_set(enum qdf_debug_domain domain); + +/** + * qdf_debug_domain_name() - Get the human readable name of a debug domain + * @domain: The domain to return the name of + * + * Return: name of the given domain + */ +const char *qdf_debug_domain_name(enum qdf_debug_domain domain); + +/** + * qdf_debug_domain_valid() - bounds checks the given domain + * @domain: the domain to validate + * + * Return: true is the given domain is a valid debug domain + */ +bool qdf_debug_domain_valid(enum qdf_debug_domain domain); + +#endif /* __QDF_DEBUG_DOMAIN_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_debugfs.h b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_debugfs.h new file mode 100644 index 0000000000000000000000000000000000000000..99017e1b38aa1375b68dadf49bb4ccaf9cb747c0 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_debugfs.h @@ -0,0 +1,323 @@ +/* + * Copyright (c) 2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: qdf_debugfs.h + * This file provides OS abstraction for debug filesystem APIs. + */ + +#ifndef _QDF_DEBUGFS_H +#define _QDF_DEBUGFS_H + +#include +#include +#include +#include + +/* representation of qdf dentry */ +typedef __qdf_dentry_t qdf_dentry_t; +typedef __qdf_debugfs_file_t qdf_debugfs_file_t; + +/* qdf file modes */ +#define QDF_FILE_USR_READ 00400 +#define QDF_FILE_USR_WRITE 00200 + +#define QDF_FILE_GRP_READ 00040 +#define QDF_FILE_GRP_WRITE 00020 + +#define QDF_FILE_OTH_READ 00004 +#define QDF_FILE_OTH_WRITE 00002 + +/** + * struct qdf_debugfs_fops - qdf debugfs operations + * @show: Callback for show operation. + * Following functions can be used to print data in the show function, + * qdf_debugfs_print() + * qdf_debugfs_hexdump() + * qdf_debugfs_write() + * @write: Callback for write operation. + * @priv: Private pointer which will be passed in the registered callbacks. + */ +struct qdf_debugfs_fops { + QDF_STATUS(*show)(qdf_debugfs_file_t file, void *arg); + QDF_STATUS(*write)(void *priv, const char *buf, qdf_size_t len); + void *priv; +}; + +#ifdef WLAN_DEBUGFS +/** + * qdf_debugfs_init() - initialize debugfs + * + * Return: QDF_STATUS + */ +QDF_STATUS qdf_debugfs_init(void); + +/** + * qdf_debugfs_exit() - cleanup debugfs + * + * Return: QDF_STATUS + */ +QDF_STATUS qdf_debugfs_exit(void); + +/** + * qdf_debugfs_create_dir() - create a debugfs directory + * @name: name of the new directory + * @parent: parent node. If NULL, defaults to base qdf_debugfs_root + * + * Return: dentry structure pointer in case of success, otherwise NULL. + * + */ +qdf_dentry_t qdf_debugfs_create_dir(const char *name, qdf_dentry_t parent); + +/** + * qdf_debugfs_create_file() - create a debugfs file + * @name: name of the file + * @mode: qdf file mode + * @parent: parent node. If NULL, defaults to base qdf_debugfs_root + * @fops: file operations { .read, .write ... } + * + * Return: dentry structure pointer in case of success, otherwise NULL. + * + */ +qdf_dentry_t qdf_debugfs_create_file(const char *name, uint16_t mode, + qdf_dentry_t parent, + struct qdf_debugfs_fops *fops); + +/** + * qdf_debugfs_printf() - print formated string into debugfs file + * @file: debugfs file handle passed in fops->show() function + * @f: the format string to use + * @...: arguments for the format string + */ +void qdf_debugfs_printf(qdf_debugfs_file_t file, const char *f, ...); + +/** + * qdf_debugfs_hexdump() - print hexdump into debugfs file + * @file: debugfs file handle passed in fops->show() function. + * @buf: data + * @len: data length + * + */ +void qdf_debugfs_hexdump(qdf_debugfs_file_t file, const uint8_t *buf, + qdf_size_t len); + +/** + * qdf_debugfs_write() - write data into debugfs file + * @file: debugfs file handle passed in fops->show() function. + * @buf: data + * @len: data length + * + */ +void qdf_debugfs_write(qdf_debugfs_file_t file, const uint8_t *buf, + qdf_size_t len); + +/** + * qdf_debugfs_create_u8() - create a debugfs file for a u8 variable + * @name: name of the file + * @mode: qdf file mode + * @parent: parent node. If NULL, defaults to base 'qdf_debugfs_root' + * @value: pointer to a u8 variable (global/static) + * + * Return: dentry for the file; NULL in case of failure. + * + */ +qdf_dentry_t qdf_debugfs_create_u8(const char *name, uint16_t mode, + qdf_dentry_t parent, u8 *value); + +/** + * qdf_debugfs_create_u16() - create a debugfs file for a u16 variable + * @name: name of the file + * @mode: qdf file mode + * @parent: parent node. If NULL, defaults to base 'qdf_debugfs_root' + * @value: pointer to a u16 variable (global/static) + * + * Return: dentry for the file; NULL in case of failure. + * + */ +qdf_dentry_t qdf_debugfs_create_u16(const char *name, uint16_t mode, + qdf_dentry_t parent, u16 *value); + +/** + * qdf_debugfs_create_u32() - create a debugfs file for a u32 variable + * @name: name of the file + * @mode: qdf file mode + * @parent: parent node. If NULL, defaults to base 'qdf_debugfs_root' + * @value: pointer to a u32 variable (global/static) + * + * Return: dentry for the file; NULL in case of failure. + * + */ +qdf_dentry_t qdf_debugfs_create_u32(const char *name, uint16_t mode, + qdf_dentry_t parent, u32 *value); + +/** + * qdf_debugfs_create_u64() - create a debugfs file for a u64 variable + * @name: name of the file + * @mode: qdf file mode + * @parent: parent node. If NULL, defaults to base 'qdf_debugfs_root' + * @value: pointer to a u64 variable (global/static) + * + * Return: dentry for the file; NULL in case of failure. + * + */ +qdf_dentry_t qdf_debugfs_create_u64(const char *name, uint16_t mode, + qdf_dentry_t parent, u64 *value); + +/** + * qdf_debugfs_create_atomic() - create a debugfs file for an atomic variable + * @name: name of the file + * @mode: qdf file mode + * @parent: parent node. If NULL, defaults to base 'qdf_debugfs_root' + * @value: pointer to an atomic variable (global/static) + * + * Return: dentry for the file; NULL in case of failure. + * + */ +qdf_dentry_t qdf_debugfs_create_atomic(const char *name, uint16_t mode, + qdf_dentry_t parent, + qdf_atomic_t *value); + +/** + * qdf_debugfs_create_string() - create a debugfs file for a string + * @name: name of the file + * @mode: qdf file mode + * @parent: parent node. If NULL, defaults to base 'qdf_debugfs_root' + * @str: a pointer to NULL terminated string (global/static). + * + * Return: dentry for the file; NULL in case of failure. + * + */ +qdf_dentry_t qdf_debugfs_create_string(const char *name, uint16_t mode, + qdf_dentry_t parent, char *str); + +/** + * qdf_debugfs_remove_dir_recursive() - remove directory recursively + * @d: debugfs node + * + * This function will recursively removes a dreictory in debugfs that was + * previously createed with a call to qdf_debugfs_create_file() or it's + * variant functions. + */ +void qdf_debugfs_remove_dir_recursive(qdf_dentry_t d); + +/** + * qdf_debugfs_remove_dir() - remove debugfs directory + * @d: debugfs node + * + */ +void qdf_debugfs_remove_dir(qdf_dentry_t d); + +/** + * qdf_debugfs_remove_file() - remove debugfs file + * @d: debugfs node + * + */ +void qdf_debugfs_remove_file(qdf_dentry_t d); + +#else /* WLAN_DEBUGFS */ + +static inline QDF_STATUS qdf_debugfs_init(void) +{ + return QDF_STATUS_E_NOSUPPORT; +} + +static inline QDF_STATUS qdf_debugfs_exit(void) +{ + return QDF_STATUS_E_NOSUPPORT; +} + +static inline qdf_dentry_t qdf_debugfs_create_dir(const char *name, + qdf_dentry_t parent) +{ + return NULL; +} + +static inline qdf_dentry_t +qdf_debugfs_create_file(const char *name, uint16_t mode, qdf_dentry_t parent, + struct qdf_debugfs_fops *fops) +{ + return NULL; +} + +static inline void qdf_debugfs_printf(qdf_debugfs_file_t file, const char *f, + ...) +{ +} + +static inline void qdf_debugfs_hexdump(qdf_debugfs_file_t file, + const uint8_t *buf, qdf_size_t len) +{ +} + +static inline void qdf_debugfs_write(qdf_debugfs_file_t file, + const uint8_t *buf, qdf_size_t len) +{ +} + +static inline qdf_dentry_t qdf_debugfs_create_u8(const char *name, + uint16_t mode, + qdf_dentry_t parent, u8 *value) +{ + return NULL; +} + +static inline qdf_dentry_t qdf_debugfs_create_u16(const char *name, + uint16_t mode, + qdf_dentry_t parent, + u16 *value) +{ + return NULL; +} + +static inline qdf_dentry_t qdf_debugfs_create_u32(const char *name, + uint16_t mode, + qdf_dentry_t parent, + u32 *value) +{ + return NULL; +} + +static inline qdf_dentry_t qdf_debugfs_create_u64(const char *name, + uint16_t mode, + qdf_dentry_t parent, + u64 *value) +{ + return NULL; +} + +static inline qdf_dentry_t qdf_debugfs_create_atomic(const char *name, + uint16_t mode, + qdf_dentry_t parent, + qdf_atomic_t *value) +{ + return NULL; +} + +static inline qdf_dentry_t debugfs_create_string(const char *name, + uint16_t mode, + qdf_dentry_t parent, char *str) +{ + return NULL; +} + +static inline void qdf_debugfs_remove_dir_recursive(qdf_dentry_t d) {} +static inline void qdf_debugfs_remove_dir(qdf_dentry_t d) {} +static inline void qdf_debugfs_remove_file(qdf_dentry_t d) {} + +#endif /* WLAN_DEBUGFS */ +#endif /* _QDF_DEBUGFS_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_defer.h b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_defer.h new file mode 100644 index 0000000000000000000000000000000000000000..2b0f418584a5c16586bea29db7b6d4c97d35d61c --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_defer.h @@ -0,0 +1,310 @@ +/* + * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: qdf_defer.h + * This file abstracts deferred execution API's. + */ + +#ifndef __QDF_DEFER_H +#define __QDF_DEFER_H + +#include +#include + +/** + * TODO This implements work queues (worker threads, kernel threads etc.). + * Note that there is no cancel on a scheduled work. You cannot free a work + * item if its queued. You cannot know if a work item is queued or not unless + * its running, hence you know its not queued. + * + * so if, say, a module is asked to unload itself, how exactly will it make + * sure that the work's not queued, for OS'es that dont provide such a + * mechanism?? + */ + +/* + * Representation of a work queue. + */ +typedef __qdf_work_t qdf_work_t; +typedef __qdf_delayed_work_t qdf_delayed_work_t; +typedef __qdf_workqueue_t qdf_workqueue_t; + +/* + * Representation of a bottom half. + */ +typedef __qdf_bh_t qdf_bh_t; + +/** + * qdf_create_bh - creates the bottom half deferred handler + * @bh: pointer to bottom + * @func: deferred function to run at bottom half interrupt context. + * @arg: argument for the deferred function + * Return: none + */ +static inline void +qdf_create_bh(qdf_bh_t *bh, qdf_defer_fn_t func, void *arg) +{ + __qdf_init_bh(bh, func, arg); +} + +/** + * qdf_sched - schedule a bottom half (DPC) + * @bh: pointer to bottom + * Return: none + */ +static inline void qdf_sched_bh(qdf_bh_t *bh) +{ + __qdf_sched_bh(bh); +} + +/** + * qdf_destroy_bh - destroy the bh (synchronous) + * @bh: pointer to bottom + * Return: none + */ +static inline void qdf_destroy_bh(qdf_bh_t *bh) +{ + __qdf_disable_bh(bh); +} + +/*********************Non-Interrupt Context deferred Execution***************/ + +/** + * qdf_create_work - create a work/task queue, This runs in non-interrupt + * context, so can be preempted by H/W & S/W intr + * @hdl: OS handle + * @work: pointer to work + * @func: deferred function to run at bottom half non-interrupt context. + * @arg: argument for the deferred function + * + * Return: QDF status + */ +static inline QDF_STATUS qdf_create_work(qdf_handle_t hdl, qdf_work_t *work, + qdf_defer_fn_t func, void *arg) +{ + return __qdf_init_work(work, func, arg); +} + +/** + * qdf_create_delayed_work - create a delayed work/task, This runs in + * non-interrupt context, so can be preempted by H/W & S/W intr + * @work: pointer to work + * @func: deferred function to run at bottom half non-interrupt context. + * @arg: argument for the deferred function + * Return: none + */ +static inline void qdf_create_delayed_work(qdf_delayed_work_t *work, + qdf_defer_fn_t func, + void *arg) +{ + __qdf_init_delayed_work(work, func, arg); +} + +/** + * qdf_create_workqueue - create a workqueue, This runs in non-interrupt + * context, so can be preempted by H/W & S/W intr + * @name: string + * Return: pointer of type qdf_workqueue_t + */ +static inline qdf_workqueue_t *qdf_create_workqueue(char *name) +{ + return __qdf_create_workqueue(name); +} + +/** + * qdf_create_singlethread_workqueue() - create a single threaded workqueue + * @name: string + * + * This API creates a dedicated work queue with a single worker thread to avoid + * wasting unnecessary resources when works which needs to be submitted in this + * queue are not very critical and frequent. + * + * Return: pointer of type qdf_workqueue_t + */ +static inline qdf_workqueue_t *qdf_create_singlethread_workqueue(char *name) +{ + return __qdf_create_singlethread_workqueue(name); +} + +/** + * qdf_alloc_unbound_workqueue - allocate an unbound workqueue + * @name: string + * + * Return: pointer of type qdf_workqueue_t + */ +static inline qdf_workqueue_t *qdf_alloc_unbound_workqueue(char *name) +{ + return __qdf_alloc_unbound_workqueue(name); +} + +/** + * qdf_queue_work - Queue the work/task + * @hdl: OS handle + * @wqueue: pointer to workqueue + * @work: pointer to work + * Return: none + */ +static inline void +qdf_queue_work(qdf_handle_t hdl, qdf_workqueue_t *wqueue, qdf_work_t *work) +{ + return __qdf_queue_work(wqueue, work); +} + +/** + * qdf_queue_delayed_work - Queue the delayed work/task + * @wqueue: pointer to workqueue + * @work: pointer to work + * @delay: delay interval in milliseconds + * Return: none + */ +static inline void qdf_queue_delayed_work(qdf_workqueue_t *wqueue, + qdf_delayed_work_t *work, + uint32_t delay) +{ + return __qdf_queue_delayed_work(wqueue, work, delay); +} + +/** + * qdf_flush_workqueue - flush the workqueue + * @hdl: OS handle + * @wqueue: pointer to workqueue + * Return: none + */ +static inline void qdf_flush_workqueue(qdf_handle_t hdl, + qdf_workqueue_t *wqueue) +{ + return __qdf_flush_workqueue(wqueue); +} + +/** + * qdf_destroy_workqueue - Destroy the workqueue + * @hdl: OS handle + * @wqueue: pointer to workqueue + * Return: none + */ +static inline void qdf_destroy_workqueue(qdf_handle_t hdl, + qdf_workqueue_t *wqueue) +{ + return __qdf_destroy_workqueue(wqueue); +} + +/** + * qdf_sched_work - Schedule a deferred task on non-interrupt context + * @hdl: OS handle + * @work: pointer to work + * Retrun: none + */ +static inline void qdf_sched_work(qdf_handle_t hdl, qdf_work_t *work) +{ + __qdf_sched_work(work); +} + +/** + * qdf_sched_delayed_work() - Schedule a delayed task + * @work: pointer to delayed work + * @delay: delay interval in milliseconds + * Return: none + */ +static inline void +qdf_sched_delayed_work(qdf_delayed_work_t *work, uint32_t delay) +{ + __qdf_sched_delayed_work(work, delay); +} + +/** + * qdf_cancel_work() - Cancel a work + * @work: pointer to work + * + * Cancel work and wait for its execution to finish. + * This function can be used even if the work re-queues + * itself or migrates to another workqueue. On return + * from this function, work is guaranteed to be not + * pending or executing on any CPU. The caller must + * ensure that the workqueue on which work was last + * queued can't be destroyed before this function returns. + * + * Return: true if work was pending, false otherwise + */ +static inline bool qdf_cancel_work(qdf_work_t *work) +{ + return __qdf_cancel_work(work); +} + +/** + * qdf_cancel_delayed_work() - Cancel a delayed work + * @work: pointer to delayed work + * + * This is qdf_cancel_work for delayed works. + * + * Return: true if work was pending, false otherwise + */ +static inline bool qdf_cancel_delayed_work(qdf_delayed_work_t *work) +{ + return __qdf_cancel_delayed_work(work); +} + +/** + * qdf_flush_work - Flush a deferred task on non-interrupt context + * @work: pointer to work + * + * Wait until work has finished execution. work is guaranteed to be + * idle on return if it hasn't been requeued since flush started. + * + * Return: none + */ +static inline void qdf_flush_work(qdf_work_t *work) +{ + __qdf_flush_work(work); +} + +/** + * qdf_flush_delayed_work() - Flush a delayed work + * @work: pointer to delayed work + * + * This is qdf_flush_work for delayed works. + * + * Return: none + */ +static inline void qdf_flush_delayed_work(qdf_delayed_work_t *work) +{ + __qdf_flush_delayed_work(work); +} + +/** + * qdf_disable_work - disable the deferred task (synchronous) + * @work: pointer to work + * Return: unsigned int + */ +static inline uint32_t qdf_disable_work(qdf_work_t *work) +{ + return __qdf_disable_work(work); +} + +/** + * qdf_destroy_work - destroy the deferred task (synchronous) + * @hdl: OS handle + * @work: pointer to work + * Return: none + */ +static inline void qdf_destroy_work(qdf_handle_t hdl, qdf_work_t *work) +{ + __qdf_disable_work(work); +} + +#endif /*_QDF_DEFER_H*/ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_event.h b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_event.h new file mode 100644 index 0000000000000000000000000000000000000000..27de4c79369c114c82fd985dace2beaa3c157d96 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_event.h @@ -0,0 +1,112 @@ +/* + * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: qdf_event.h + * This file provides OS abstraction for event APIs. + */ + +#if !defined(__QDF_EVENT_H) +#define __QDF_EVENT_H + +/* Include Files */ +#include "qdf_status.h" +#include +#include +#include +#include + +/* Preprocessor definitions and constants */ +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +typedef __qdf_event_t qdf_event_t; +/* Function declarations and documenation */ + +QDF_STATUS qdf_event_create(qdf_event_t *event); + +QDF_STATUS qdf_event_set(qdf_event_t *event); + +QDF_STATUS qdf_event_reset(qdf_event_t *event); + +QDF_STATUS qdf_event_destroy(qdf_event_t *event); + +QDF_STATUS qdf_wait_single_event(qdf_event_t *event, + uint32_t timeout); + +/** + * qdf_complete_wait_events() - Sets all the events which are in the list. + * + * This function traverses the list of events and sets all of them. It + * sets the flag force_set as TRUE to indicate that these events have + * been forcefully set. + * + * Return: None + */ +void qdf_complete_wait_events(void); + +/** + * qdf_wait_for_event_completion() - Waits for an event to be set. + * @event: Pointer to an event to wait on. + * @timeout: Timeout value (in milliseconds). + * + * This function adds the event in a list and waits on it until it + * is set or the timeout duration elapses. The purpose of waiting + * is considered complete only if the event is set and the flag + * force_set is FALSE, it returns success in this case. In other + * cases it returns appropriate error status. + * + * Return: QDF status + */ +QDF_STATUS qdf_wait_for_event_completion(qdf_event_t *event, + uint32_t timeout); + +/** + * qdf_event_list_init() - Creates a list and spinlock for events. + * + * This function creates a list for maintaining events on which threads + * wait for completion. A spinlock is also created to protect related + * operations. + * + * Return: None + */ +void qdf_event_list_init(void); + +/** + * qdf_event_list_destroy() - Destroys list and spinlock created for events. + * + * This function destroys the list and spinlock created for events on which + * threads wait for completion. + * + * Return: None + */ +void qdf_event_list_destroy(void); + +/** + * qdf_exit_thread() - exit thread execution + * @status: QDF status + * + * Return: QDF status + */ +QDF_STATUS qdf_exit_thread(QDF_STATUS status); + +#ifdef __cplusplus +} +#endif /* __cplusplus */ +#endif /* __QDF_EVENT_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_file.h b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_file.h new file mode 100644 index 0000000000000000000000000000000000000000..a90b921400101e3d93114a0807332ca48ea91d5e --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_file.h @@ -0,0 +1,54 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: Thin filesystem API abstractions + */ + +#ifndef __QDF_FILE_H +#define __QDF_FILE_H + +#include "qdf_status.h" + +/** + * qdf_file_read() - read the entire contents of a file + * @path: the full path of the file to read + * @out_buf: double pointer for referring to the file contents buffer + * + * This API allocates a new, null-terminated buffer containing the contents of + * the file at @path. On success, @out_buf points to this new buffer, otherwise + * @out_buf is set to NULL. + * + * Consumers must free the allocated buffer by calling qdf_file_buf_free(). + * + * Return: QDF_STATUS + */ +QDF_STATUS qdf_file_read(const char *path, char **out_buf); + +/** + * qdf_file_buf_free() - free a previously allocated file buffer + * @file_buf: pointer to the file buffer to free + * + * This API is used in conjunction with qdf_file_read(). + * + * Return: None + */ +void qdf_file_buf_free(char *file_buf); + +#endif /* __QDF_FILE_H */ + diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_flex_mem.h b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_flex_mem.h new file mode 100644 index 0000000000000000000000000000000000000000..80842e9c58d8ca62e9646269c10ca34ed1122731 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_flex_mem.h @@ -0,0 +1,143 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: qdf_flex_mem (flexibly sized memory allocator) + * QCA driver framework (QDF) flex mem APIs + * + * A flex memory allocator is a memory pool which not only dynamically expands, + * but also dynamically reduces as well. Benefits over full dynamic memory + * allocation are amoritized allocation cost, and reduced memory fragmentation. + * + * The allocator consists of 3 parts: the pool, segments, and items. Items are + * the smallest chuncks of memory that are handed out via the alloc call, and + * are all of a uniform size. Segments are groups of items, representing the + * smallest amount of memory that can be dynamically allocated or freed. A pool + * is simply a collection of segments. + */ + +#ifndef __QDF_FLEX_MEM_H +#define __QDF_FLEX_MEM_H + +#include "qdf_list.h" +#include "qdf_lock.h" + +#define QDF_FM_BITMAP uint32_t +#define QDF_FM_BITMAP_BITS (sizeof(QDF_FM_BITMAP) * 8) + +/** + * qdf_flex_mem_pool - a pool of memory segments + * @seg_list: the list containing the memory segments + * @lock: spinlock for protecting internal data structures + * @reduction_limit: the minimum number of segments to keep during reduction + * @item_size: the size of the items the pool will allocate + */ +struct qdf_flex_mem_pool { + qdf_list_t seg_list; + struct qdf_spinlock lock; + uint16_t reduction_limit; + uint16_t item_size; +}; + +/** + * qdf_flex_mem_segment - a memory pool segment + * @node: the list node for membership in the memory pool + * @dynamic: true if this segment was dynamically allocated + * @used_bitmap: bitmap for tracking which items in the segment are in use + * @bytes: raw memory for allocating items from + */ +struct qdf_flex_mem_segment { + qdf_list_node_t node; + bool dynamic; + QDF_FM_BITMAP used_bitmap; + uint8_t *bytes; +}; + +/** + * DEFINE_QDF_FLEX_MEM_POOL() - define a new flex mem pool with one segment + * @name: the name of the pool variable + * @size_of_item: size of the items the pool will allocate + * @rm_limit: min number of segments to keep during reduction + */ +#define DEFINE_QDF_FLEX_MEM_POOL(name, size_of_item, rm_limit) \ + struct qdf_flex_mem_pool name; \ + uint8_t __ ## name ## _head_bytes[QDF_FM_BITMAP_BITS * (size_of_item)];\ + struct qdf_flex_mem_segment __ ## name ## _head = { \ + .node = QDF_LIST_NODE_INIT_SINGLE( \ + QDF_LIST_ANCHOR(name.seg_list)), \ + .bytes = __ ## name ## _head_bytes, \ + }; \ + struct qdf_flex_mem_pool name = { \ + .seg_list = QDF_LIST_INIT_SINGLE(__ ## name ## _head.node), \ + .reduction_limit = (rm_limit), \ + .item_size = (size_of_item), \ + } + +/** + * qdf_flex_mem_init() - initialize a qdf_flex_mem_pool + * @pool: the pool to initialize + * + * Return: None + */ +void qdf_flex_mem_init(struct qdf_flex_mem_pool *pool); + +/** + * qdf_flex_mem_deinit() - deinitialize a qdf_flex_mem_pool + * @pool: the pool to deinitialize + * + * Return: None + */ +void qdf_flex_mem_deinit(struct qdf_flex_mem_pool *pool); + +/** + * qdf_flex_mem_alloc() - logically allocate memory from the pool + * @pool: the pool to allocate from + * + * This function returns any unused item from any existing segment in the pool. + * If there are no unused items in the pool, a new segment is dynamically + * allocated to service the request. The size of the allocated memory is the + * size originally used to create the pool. + * + * Return: Point to newly allocated memory, NULL on failure + */ +void *qdf_flex_mem_alloc(struct qdf_flex_mem_pool *pool); + +/** + * qdf_flex_mem_free() - logically frees @ptr from the pool + * @pool: the pool to return the memory to + * @ptr: a pointer received via a call to qdf_flex_mem_alloc() + * + * This function marks the item corresponding to @ptr as unused. If that item + * was the last used item in the segment it belongs to, and the segment was + * dynamically allocated, the segment will be freed. + * + * Return: None + */ +void qdf_flex_mem_free(struct qdf_flex_mem_pool *pool, void *ptr); + +/** + * qdf_flex_mem_release() - release unused segments + * @pool: the pool to operate against + * + * This function physically releases as much unused pool memory as possible. + * + * Return: None + */ +void qdf_flex_mem_release(struct qdf_flex_mem_pool *pool); + +#endif /* __QDF_FLEX_MEM_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_hrtimer.h b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_hrtimer.h new file mode 100644 index 0000000000000000000000000000000000000000..21f584a130b6e5e842b7d01c5d2bae444eef6e85 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_hrtimer.h @@ -0,0 +1,190 @@ +/* + * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: qdf_hrtimer + * This file abstracts high resolution timers running in hardware context. + */ + +#ifndef _QDF_HRTIMER_H +#define _QDF_HRTIMER_H + +#include +#include +#include + +/* Context independent hrtimer object */ +typedef __qdf_hrtimer_data_t qdf_hrtimer_data_t; + +/* Platform independent timer callback function */ +typedef enum qdf_hrtimer_restart_status(*qdf_hrtimer_func_t) + (qdf_hrtimer_data_t *timer); + +/** + * qdf_hrtimer_start() - Starts hrtimer in given context + * @timer: pointer to the qdf_hrtimer_data_t object + * @interval: interval to forward as qdf_ktime_t object + * @mode: mode of qdf_hrtimer_data_t + * + * Starts hrtimer in given context + * + * Return: void + */ +static inline +void qdf_hrtimer_start(qdf_hrtimer_data_t *timer, qdf_ktime_t interval, + enum qdf_hrtimer_mode mode) +{ + __qdf_hrtimer_start(timer, interval, mode); +} + +/** + * qdf_hrtimer_cancel() - Cancels hrtimer in given context + * @timer: pointer to the qdf_hrtimer_data_t object + * + * Cancels hrtimer in given context + * + * Return: void + */ +static inline +void qdf_hrtimer_cancel(qdf_hrtimer_data_t *timer) +{ + __qdf_hrtimer_cancel(timer); +} + +/** + * qdf_hrtimer_init() - init hrtimer based on context + * @timer: pointer to the qdf_hrtimer_data_t object + * @callback: callback function to be fired + * @qdf_clock_id: clock type + * @qdf_hrtimer_mode: mode of qdf_hrtimer_data_t + * @qdf_context_mode: interrupt context mode + * + * starts hrtimer in a context passed as per qdf_context_mode + * + * Return: void + */ +static inline void qdf_hrtimer_init(qdf_hrtimer_data_t *timer, + qdf_hrtimer_func_t callback, + enum qdf_clock_id clock, + enum qdf_hrtimer_mode mode, + enum qdf_context_mode ctx) +{ + __qdf_hrtimer_init(timer, callback, clock, mode, ctx); +} + +/** + * qdf_hrtimer_kill() - kills hrtimer in given context + * @timer: pointer to the hrtimer object + * + * kills hrtimer in given context + * + * Return: void + */ +static inline +void qdf_hrtimer_kill(__qdf_hrtimer_data_t *timer) +{ + __qdf_hrtimer_kill(timer); +} + +/** + * qdf_hrtimer_get_remaining() - check remaining time in the timer + * @timer: pointer to the qdf_hrtimer_data_t object + * + * check whether the timer is on one of the queues + * + * Return: remaining time as qdf_ktime_t object + */ +static inline qdf_ktime_t qdf_hrtimer_get_remaining(qdf_hrtimer_data_t *timer) +{ + return __qdf_hrtimer_get_remaining(timer); +} + +/** + * qdf_hrtimer_is_queued() - check whether the timer is on one of the queues + * @timer: pointer to the qdf_hrtimer_data_t object + * + * check whether the timer is on one of the queues + * + * Return: false when the timer was not in queue + * true when the timer was in queue + */ +static inline bool qdf_hrtimer_is_queued(qdf_hrtimer_data_t *timer) +{ + return __qdf_hrtimer_is_queued(timer); +} + +/** + * qdf_hrtimer_callback_running() - check if callback is running + * @timer: pointer to the qdf_hrtimer_data_t object + * + * check whether the timer is running the callback function + * + * Return: false when callback is not running + * true when callback is running + */ +static inline bool qdf_hrtimer_callback_running(qdf_hrtimer_data_t *timer) +{ + return __qdf_hrtimer_callback_running(timer); +} + +/** + * qdf_hrtimer_active() - check if timer is active + * @timer: pointer to the qdf_hrtimer_data_t object + * + * Check if timer is active. A timer is active, when it is enqueued into + * the rbtree or the callback function is running. + * + * Return: false if timer is not active + * true if timer is active + */ +static inline bool qdf_hrtimer_active(qdf_hrtimer_data_t *timer) +{ + return __qdf_hrtimer_active(timer); +} + +/** + * qdf_hrtimer_cb_get_time() - get remaining time in callback + * @timer: pointer to the qdf_hrtimer_data_t object + * + * Get remaining time in the hrtimer callback + * + * Return: time remaining as qdf_ktime_t object + */ +static inline qdf_ktime_t qdf_hrtimer_cb_get_time(qdf_hrtimer_data_t *timer) +{ + return __qdf_hrtimer_cb_get_time(timer); +} + +/** + * qdf_hrtimer_forward() - forward the hrtimer + * @timer: pointer to the qdf_hrtimer_data_t object + * @now: current time as qdf_ktime_t object + * @interval: interval to forward as qdf_ktime_t object + * + * Forward the timer expiry so it will expire in the future + * + * Return: the number of overruns + */ +static inline uint64_t qdf_hrtimer_forward(qdf_hrtimer_data_t *timer, + qdf_ktime_t now, + qdf_ktime_t interval) +{ + return __qdf_hrtimer_forward(timer, now, interval); +} + +#endif /* _QDF_HRTIMER_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_idr.h b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_idr.h new file mode 100644 index 0000000000000000000000000000000000000000..7ad63aca898e3c579d2740cefc1d45a95cc2282e --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_idr.h @@ -0,0 +1,82 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: qdf_idr(ID Allocation) + * QCA driver framework (QDF) ID allocation APIs + */ + +#if !defined(__QDF_IDR_H) +#define __QDF_IDR_H + +/* Include Files */ +#include +#include +#include + +/** + * qdf_idr - platform idr object + */ +typedef __qdf_idr qdf_idr; + +/** + * qdf_idr_create() - idr initialization function + * @idp: pointer to qdf idr + * + * Return: QDF status + */ +QDF_STATUS qdf_idr_create(qdf_idr *idp); + +/** + * qdf_idr_destroy() - idr deinitialization function + * @idp: pointer to qdf idr + * + * Return: QDF status + */ +QDF_STATUS qdf_idr_destroy(qdf_idr *idp); + +/** + * qdf_idr_alloc() - Allocates an unused ID + * @idp: pointer to qdf idr + * @ptr: pointer to be associated with the new ID + * @id: pointer to return new ID + * + * Return: QDF status + */ +QDF_STATUS qdf_idr_alloc(qdf_idr *idp, void *ptr, int32_t *id); + +/** + * qdf_idr_remove() - Removes this ID from the IDR. + * @idp: pointer to qdf idr + * @id: ID to be remove + * + * Return: QDF status + */ +QDF_STATUS qdf_idr_remove(qdf_idr *idp, int32_t id); + +/** + * qdf_idr_find() - find the user pointer from the IDR by id. + * @idp: pointer to qdf idr + * @id: ID to be remove + * @ptr: pointer to return user pointer for given ID + * + * Return: QDF status + */ +QDF_STATUS qdf_idr_find(qdf_idr *idp, int32_t id, void **ptr); + +#endif /* __QDF_IDR_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_ipa.h b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_ipa.h new file mode 100644 index 0000000000000000000000000000000000000000..3dd9b601b72256c928006804244397124ff1b10e --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_ipa.h @@ -0,0 +1,647 @@ +/* + * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _QDF_IPA_H +#define _QDF_IPA_H + +#ifdef IPA_OFFLOAD + +#include + +/** + * enum qdf_ipa_wlan_event - QDF IPA events + * @QDF_IPA_CLIENT_CONNECT: Client Connects + * @QDF_IPA_CLIENT_DISCONNECT: Client Disconnects + * @QDF_IPA_AP_CONNECT: SoftAP is started + * @QDF_IPA_AP_DISCONNECT: SoftAP is stopped + * @QDF_IPA_STA_CONNECT: STA associates to AP + * @QDF_IPA_STA_DISCONNECT: STA dissociates from AP + * @QDF_IPA_CLIENT_CONNECT_EX: Peer associates/re-associates to softap + * @QDF_SWITCH_TO_SCC: WLAN interfaces in scc mode + * @QDF_SWITCH_TO_MCC: WLAN interfaces in mcc mode + * @QDF_WDI_ENABLE: WDI enable complete + * @QDF_WDI_DISABLE: WDI teardown + * @QDF_FWR_SSR_BEFORE_SHUTDOWN: WLAN FW recovery + * @QDF_IPA_WLAN_EVENT_MAX: Max value for the enum + */ +typedef enum { + QDF_IPA_CLIENT_CONNECT, + QDF_IPA_CLIENT_DISCONNECT, + QDF_IPA_AP_CONNECT, + QDF_IPA_AP_DISCONNECT, + QDF_IPA_STA_CONNECT, + QDF_IPA_STA_DISCONNECT, + QDF_IPA_CLIENT_CONNECT_EX, + QDF_SWITCH_TO_SCC, + QDF_SWITCH_TO_MCC, + QDF_WDI_ENABLE, + QDF_WDI_DISABLE, + QDF_FWR_SSR_BEFORE_SHUTDOWN, + QDF_IPA_WLAN_EVENT_MAX +} qdf_ipa_wlan_event; + +/** + * qdf_ipa_wdi_meter_evt_type_t - type of event client callback is + * for AP+STA mode metering + * @IPA_GET_WDI_SAP_STATS: get IPA_stats betwen SAP and STA - + * use ipa_get_wdi_sap_stats structure + * @IPA_SET_WIFI_QUOTA: set quota limit on STA - + * use ipa_set_wifi_quota structure + */ +typedef __qdf_ipa_wdi_meter_evt_type_t qdf_ipa_wdi_meter_evt_type_t; + +typedef __qdf_ipa_get_wdi_sap_stats_t qdf_ipa_get_wdi_sap_stats_t; + +/** + * qdf_ipa_set_wifi_quota_t - structure used for + * IPA_SET_WIFI_QUOTA. + */ +typedef __qdf_ipa_set_wifi_quota_t qdf_ipa_set_wifi_quota_t; + +/** + * qdf_ipa_connect_params_t - low-level client connect input parameters. Either + * client allocates the data and desc FIFO and specifies that in data+desc OR + * specifies sizes and pipe_mem pref and IPA does the allocation. + */ +typedef __qdf_ipa_connect_params_t qdf_ipa_connect_params_t; + +/** + * qdf_ipa_tx_meta_t - meta-data for the TX packet + */ +typedef __qdf_ipa_tx_meta_t qdf_ipa_tx_meta_t; + +/** + * __qdf_ipa_sps_params_t - SPS related output parameters resulting from + */ +typedef __qdf_ipa_sps_params_t qdf_ipa_sps_params_t; + +/** + * qdf_ipa_tx_intf_t - interface tx properties + */ +typedef __qdf_ipa_tx_intf_t qdf_ipa_tx_intf_t; + +/** + * qdf_ipa_rx_intf_t - interface rx properties + */ +typedef __qdf_ipa_rx_intf_t qdf_ipa_rx_intf_t; + +/** + * qdf_ipa_ext_intf_t - interface ext properties + */ +typedef __qdf_ipa_ext_intf_t qdf_ipa_ext_intf_t; + +/** + * qdf_ipa_sys_connect_params_t - information needed to setup an IPA end-point + * in system-BAM mode + */ +typedef __qdf_ipa_sys_connect_params_t qdf_ipa_sys_connect_params_t; + +/** + * __qdf_pa_rm_event_t - IPA RM events + * + * Indicate the resource state change + */ +typedef __qdf_ipa_rm_event_t qdf_ipa_rm_event_t; + +/** + * struct qdf_ipa_rm_register_params_t - information needed to + * register IPA RM client with IPA RM + */ +typedef __qdf_ipa_rm_register_params_t qdf_ipa_rm_register_params_t; + +/** + * struct qdf_ipa_rm_create_params_t - information needed to initialize + * the resource + * + * IPA RM client is expected to perform non blocking operations only + * in request_resource and release_resource functions and + * release notification context as soon as possible. + */ +typedef __qdf_ipa_rm_create_params_t qdf_ipa_rm_create_params_t; + +/** + * qdf_ipa_rm_perf_profile_t - information regarding IPA RM client performance + * profile + */ +typedef __qdf_ipa_rm_perf_profile_t qdf_ipa_rm_perf_profile_t; + +/** + * qdf_ipa_tx_data_desc_t - information needed + * to send data packet to HW link: link to data descriptors + * priv: client specific private data + */ +typedef __qdf_ipa_tx_data_desc_t qdf_ipa_tx_data_desc_t; + +/** + * qdf_ipa_rx_data_t - information needed + * to send to wlan driver on receiving data from ipa hw + */ +typedef __qdf_ipa_rx_data_t qdf_ipa_rx_data_t; + +/** + * qdf_ipa_wdi_ul_params_t - WDI_RX configuration + */ +typedef __qdf_ipa_wdi_ul_params_t qdf_ipa_wdi_ul_params_t; + +/** + * qdf_ipa_wdi_ul_params_smmu_t - WDI_RX configuration (with WLAN SMMU) + */ +typedef __qdf_ipa_wdi_ul_params_smmu_t qdf_ipa_wdi_ul_params_smmu_t; + +/** + * qdf_ipa_wdi_dl_params_t - WDI_TX configuration + */ +typedef __qdf_ipa_wdi_dl_params_t qdf_ipa_wdi_dl_params_t; + +/** + * qdf_ipa_wdi_dl_params_smmu_t - WDI_TX configuration (with WLAN SMMU) + */ +typedef __qdf_ipa_wdi_dl_params_smmu_t qdf_ipa_wdi_dl_params_smmu_t; + +/** + * qdf_ipa_wdi_in_params_t - information provided by WDI client + */ +typedef __qdf_ipa_wdi_in_params_t qdf_ipa_wdi_in_params_t; + +/** + * qdf_ipa_wdi_out_params_t - information provided to WDI client + */ +typedef __qdf_ipa_wdi_out_params_t qdf_ipa_wdi_out_params_t; + +/** + * qdf_ipa_wdi_db_params_t - information provided to retrieve + * physical address of uC doorbell + */ +typedef __qdf_ipa_wdi_db_params_t qdf_ipa_wdi_db_params_t; + +/** + * qdf_ipa_wdi_uc_ready_params_t - uC ready CB parameters + */ +typedef void (*qdf_ipa_uc_ready_cb)(void *priv); +typedef __qdf_ipa_wdi_uc_ready_params_t qdf_ipa_wdi_uc_ready_params_t; + +/** + * qdf_ipa_wdi_buffer_info_t - address info of a WLAN allocated buffer + * + * IPA driver will create/release IOMMU mapping in IPA SMMU from iova->pa + */ +typedef __qdf_ipa_wdi_buffer_info_t qdf_ipa_wdi_buffer_info_t; + +/** + * qdf_ipa_gsi_ep_config_t - IPA GSI endpoint configurations + */ +typedef __qdf_ipa_gsi_ep_config_t qdf_ipa_gsi_ep_config_t; + +/** + * qdf_ipa_dp_evt_type_t - type of event client callback is + * invoked for on data path + * @IPA_RECEIVE: data is struct sk_buff + * @IPA_WRITE_DONE: data is struct sk_buff + */ +typedef __qdf_ipa_dp_evt_type_t qdf_ipa_dp_evt_type_t; + +typedef __qdf_ipa_hdr_add_t qdf_ipa_hdr_add_t; +typedef __qdf_ipa_hdr_del_t qdf_ipa_hdr_del_t; +typedef __qdf_ipa_ioc_add_hdr_t qdf_ipa_ioc_add_hdr_t; +typedef __qdf_ipa_ioc_del_hdr_t qdf_ipa_ioc_del_hdr_t; +typedef __qdf_ipa_ioc_get_hdr_t qdf_ipa_ioc_get_hdr_t; +typedef __qdf_ipa_ioc_copy_hdr_t qdf_ipa_ioc_copy_hdr_t; +typedef __qdf_ipa_ioc_add_hdr_proc_ctx_t qdf_ipa_ioc_add_hdr_proc_ctx_t; +typedef __qdf_ipa_ioc_del_hdr_proc_ctx_t qdf_ipa_ioc_del_hdr_proc_ctx_t; +typedef __qdf_ipa_msg_meta_t qdf_ipa_msg_meta_t; +typedef __qdf_ipa_client_type_t qdf_ipa_client_type_t; +typedef __qdf_ipa_hw_stats_wdi_info_data_t qdf_ipa_hw_stats_wdi_info_data_t; +typedef __qdf_ipa_rm_resource_name_t qdf_ipa_rm_resource_name_t; +typedef __qdf_ipa_wlan_event_t qdf_ipa_wlan_event_t; +typedef __qdf_ipa_wlan_msg_t qdf_ipa_wlan_msg_t; +typedef __qdf_ipa_wlan_msg_ex_t qdf_ipa_wlan_msg_ex_t; +typedef __qdf_ipa_ioc_tx_intf_prop_t qdf_ipa_ioc_tx_intf_prop_t; +typedef __qdf_ipa_ioc_rx_intf_prop_t qdf_ipa_ioc_rx_intf_prop_t; +typedef __qdf_ipa_wlan_hdr_attrib_val_t qdf_ipa_wlan_hdr_attrib_val_t; +typedef int (*qdf_ipa_msg_pull_fn)(void *buff, u32 len, u32 type); +typedef void (*qdf_ipa_ready_cb)(void *user_data); + +#define QDF_IPA_SET_META_MSG_TYPE(meta, msg_type) \ + __QDF_IPA_SET_META_MSG_TYPE(meta, msg_type) + +#define QDF_IPA_RM_RESOURCE_GRANTED __QDF_IPA_RM_RESOURCE_GRANTED +#define QDF_IPA_RM_RESOURCE_RELEASED __QDF_IPA_RM_RESOURCE_RELEASED + +#define QDF_IPA_VOLTAGE_LEVEL __QDF_IPA_VOLTAGE_LEVEL + +#define QDF_IPA_RM_RESOURCE_WLAN_PROD __QDF_IPA_RM_RESOURCE_WLAN_PROD +#define QDF_IPA_RM_RESOURCE_WLAN_CONS __QDF_IPA_RM_RESOURCE_WLAN_CONS +#define QDF_IPA_RM_RESOURCE_APPS_CONS __QDF_IPA_RM_RESOURCE_APPS_CONS + +#define QDF_IPA_CLIENT_WLAN1_PROD __QDF_IPA_CLIENT_WLAN1_PROD +#define QDF_IPA_CLIENT_WLAN1_CONS __QDF_IPA_CLIENT_WLAN1_CONS +#define QDF_IPA_CLIENT_WLAN2_CONS __QDF_IPA_CLIENT_WLAN2_CONS +#define QDF_IPA_CLIENT_WLAN3_CONS __QDF_IPA_CLIENT_WLAN3_CONS +#define QDF_IPA_CLIENT_WLAN4_CONS __QDF_IPA_CLIENT_WLAN4_CONS + +/* + * Resume / Suspend + */ +static inline int qdf_ipa_reset_endpoint(u32 clnt_hdl) +{ + return __qdf_ipa_reset_endpoint(clnt_hdl); +} + +/* + * Remove ep delay + */ +static inline int qdf_ipa_clear_endpoint_delay(u32 clnt_hdl) +{ + return __qdf_ipa_clear_endpoint_delay(clnt_hdl); +} + +/* + * Header removal / addition + */ +static inline int qdf_ipa_add_hdr(qdf_ipa_ioc_add_hdr_t *hdrs) +{ + return __qdf_ipa_add_hdr(hdrs); +} + +static inline int qdf_ipa_del_hdr(qdf_ipa_ioc_del_hdr_t *hdls) +{ + return __qdf_ipa_del_hdr(hdls); +} + +static inline int qdf_ipa_commit_hdr(void) +{ + return __qdf_ipa_commit_hdr(); +} + +static inline int qdf_ipa_get_hdr(qdf_ipa_ioc_get_hdr_t *lookup) +{ + return __qdf_ipa_get_hdr(lookup); +} + +static inline int qdf_ipa_put_hdr(u32 hdr_hdl) +{ + return __qdf_ipa_put_hdr(hdr_hdl); +} + +static inline int qdf_ipa_copy_hdr(qdf_ipa_ioc_copy_hdr_t *copy) +{ + return __qdf_ipa_copy_hdr(copy); +} + +/* + * Messaging + */ +static inline int qdf_ipa_send_msg(qdf_ipa_msg_meta_t *meta, void *buff, + ipa_msg_free_fn callback) +{ + return __qdf_ipa_send_msg(meta, buff, callback); +} + +static inline int qdf_ipa_register_pull_msg(qdf_ipa_msg_meta_t *meta, + qdf_ipa_msg_pull_fn callback) +{ + return __qdf_ipa_register_pull_msg(meta, callback); +} + +static inline int qdf_ipa_deregister_pull_msg(qdf_ipa_msg_meta_t *meta) +{ + return __qdf_ipa_deregister_pull_msg(meta); +} + +/* + * Interface + */ +static inline int qdf_ipa_register_intf(const char *name, + const qdf_ipa_tx_intf_t *tx, + const qdf_ipa_rx_intf_t *rx) +{ + return __qdf_ipa_register_intf(name, tx, rx); +} + +static inline int qdf_ipa_register_intf_ext(const char *name, + const qdf_ipa_tx_intf_t *tx, + const qdf_ipa_rx_intf_t *rx, + const qdf_ipa_ext_intf_t *ext) +{ + return __qdf_ipa_register_intf_ext(name, tx, rx, ext); +} + +static inline int qdf_ipa_deregister_intf(const char *name) +{ + return __qdf_ipa_deregister_intf(name); +} + +/* + * Data path + */ +static inline int qdf_ipa_tx_dp(qdf_ipa_client_type_t dst, struct sk_buff *skb, + qdf_ipa_tx_meta_t *metadata) +{ + return __qdf_ipa_tx_dp(dst, skb, metadata); +} + +/* + * To transfer multiple data packets + */ +static inline int qdf_ipa_tx_dp_mul( + qdf_ipa_client_type_t dst, + qdf_ipa_tx_data_desc_t *data_desc) +{ + return __qdf_ipa_tx_dp_mul(dst, data_desc); +} + +static inline void qdf_ipa_free_skb(qdf_ipa_rx_data_t *rx_in) +{ + return __qdf_ipa_free_skb(rx_in);; +} + +/* + * System pipes + */ +static inline u16 qdf_ipa_get_smem_restr_bytes(void) +{ + return __qdf_ipa_get_smem_restr_bytes(); +} + +static inline int qdf_ipa_setup_sys_pipe(qdf_ipa_sys_connect_params_t *sys_in, + u32 *clnt_hdl) +{ + return __qdf_ipa_setup_sys_pipe(sys_in, clnt_hdl); +} + +static inline int qdf_ipa_teardown_sys_pipe(u32 clnt_hdl) +{ + return __qdf_ipa_teardown_sys_pipe(clnt_hdl); +} + +static inline int qdf_ipa_connect_wdi_pipe(qdf_ipa_wdi_in_params_t *in, + qdf_ipa_wdi_out_params_t *out) +{ + return __qdf_ipa_connect_wdi_pipe(in, out); +} + +static inline int qdf_ipa_disconnect_wdi_pipe(u32 clnt_hdl) +{ + return __qdf_ipa_disconnect_wdi_pipe(clnt_hdl); +} + +static inline int qdf_ipa_enable_wdi_pipe(u32 clnt_hdl) +{ + return __qdf_ipa_enable_wdi_pipe(clnt_hdl); +} + +static inline int qdf_ipa_disable_wdi_pipe(u32 clnt_hdl) +{ + return __qdf_ipa_disable_wdi_pipe(clnt_hdl); +} + +static inline int qdf_ipa_resume_wdi_pipe(u32 clnt_hdl) +{ + return __qdf_ipa_resume_wdi_pipe(clnt_hdl); +} + +static inline int qdf_ipa_suspend_wdi_pipe(u32 clnt_hdl) +{ + return __qdf_ipa_suspend_wdi_pipe(clnt_hdl); +} + +static inline int qdf_ipa_uc_wdi_get_dbpa( + qdf_ipa_wdi_db_params_t *out) +{ + return __qdf_ipa_uc_wdi_get_dbpa(out); +} + +static inline int qdf_ipa_uc_reg_rdyCB( + qdf_ipa_wdi_uc_ready_params_t *param) +{ + return __qdf_ipa_uc_reg_rdyCB(param); +} + +static inline int qdf_ipa_uc_dereg_rdyCB(void) +{ + return __qdf_ipa_uc_dereg_rdyCB(); +} + + +/* + * Resource manager + */ +static inline int qdf_ipa_rm_create_resource( + qdf_ipa_rm_create_params_t *create_params) +{ + return __qdf_ipa_rm_create_resource(create_params); +} + +static inline int qdf_ipa_rm_delete_resource( + qdf_ipa_rm_resource_name_t resource_name) +{ + return __qdf_ipa_rm_delete_resource(resource_name); +} + +static inline int qdf_ipa_rm_register(qdf_ipa_rm_resource_name_t resource_name, + qdf_ipa_rm_register_params_t *reg_params) +{ + return __qdf_ipa_rm_register(resource_name, reg_params); +} + +static inline int qdf_ipa_rm_set_perf_profile( + qdf_ipa_rm_resource_name_t resource_name, + qdf_ipa_rm_perf_profile_t *profile) +{ + return __qdf_ipa_rm_set_perf_profile(resource_name, profile); +} + +static inline int qdf_ipa_rm_deregister(qdf_ipa_rm_resource_name_t resource_name, + qdf_ipa_rm_register_params_t *reg_params) +{ + return __qdf_ipa_rm_deregister(resource_name, reg_params); +} + +static inline int qdf_ipa_rm_add_dependency( + qdf_ipa_rm_resource_name_t resource_name, + qdf_ipa_rm_resource_name_t depends_on_name) +{ + return __qdf_ipa_rm_add_dependency(resource_name, depends_on_name); +} + +static inline int qdf_ipa_rm_add_dependency_sync( + qdf_ipa_rm_resource_name_t resource_name, + qdf_ipa_rm_resource_name_t depends_on_name) +{ + return __qdf_ipa_rm_add_dependency_sync(resource_name, depends_on_name); +} + +static inline int qdf_ipa_rm_delete_dependency( + qdf_ipa_rm_resource_name_t resource_name, + qdf_ipa_rm_resource_name_t depends_on_name) +{ + return __qdf_ipa_rm_delete_dependency(resource_name, depends_on_name); +} + +static inline int qdf_ipa_rm_request_resource( + qdf_ipa_rm_resource_name_t resource_name) +{ + return __qdf_ipa_rm_request_resource(resource_name); +} + +static inline int qdf_ipa_rm_release_resource( + qdf_ipa_rm_resource_name_t resource_name) +{ + return __qdf_ipa_rm_release_resource(resource_name); +} + +static inline int qdf_ipa_rm_notify_completion(qdf_ipa_rm_event_t event, + qdf_ipa_rm_resource_name_t resource_name) +{ + return __qdf_ipa_rm_notify_completion(event, resource_name); +} + +static inline int qdf_ipa_rm_inactivity_timer_init( + qdf_ipa_rm_resource_name_t resource_name, + unsigned long msecs) +{ + return __qdf_ipa_rm_inactivity_timer_init(resource_name, msecs); +} + +static inline int qdf_ipa_rm_inactivity_timer_destroy( + qdf_ipa_rm_resource_name_t resource_name) +{ + return __qdf_ipa_rm_inactivity_timer_destroy(resource_name); +} + +static inline int qdf_ipa_rm_inactivity_timer_request_resource( + qdf_ipa_rm_resource_name_t resource_name) +{ + return __qdf_ipa_rm_inactivity_timer_request_resource(resource_name); +} + +static inline int qdf_ipa_rm_inactivity_timer_release_resource( + qdf_ipa_rm_resource_name_t resource_name) +{ + return __qdf_ipa_rm_inactivity_timer_release_resource(resource_name); +} + +/* + * Miscellaneous + */ +static inline void qdf_ipa_bam_reg_dump(void) +{ + return __qdf_ipa_bam_reg_dump(); +} + +static inline int qdf_ipa_get_wdi_stats(qdf_ipa_hw_stats_wdi_info_data_t *stats) +{ + return __qdf_ipa_get_wdi_stats(stats); +} + +static inline int qdf_ipa_get_ep_mapping(qdf_ipa_client_type_t client) +{ + return __qdf_ipa_get_ep_mapping(client); +} + +static inline bool qdf_ipa_is_ready(void) +{ + return __qdf_ipa_is_ready(); +} + +static inline void qdf_ipa_proxy_clk_vote(void) +{ + return __qdf_ipa_proxy_clk_vote(); +} + +static inline void qdf_ipa_proxy_clk_unvote(void) +{ + return __qdf_ipa_proxy_clk_unvote(); +} + +static inline bool qdf_ipa_is_client_handle_valid(u32 clnt_hdl) +{ + return __qdf_ipa_is_client_handle_valid(clnt_hdl); +} + +static inline qdf_ipa_client_type_t qdf_ipa_get_client_mapping(int pipe_idx) +{ + return __qdf_ipa_get_client_mapping(pipe_idx); +} + +static inline qdf_ipa_rm_resource_name_t qdf_ipa_get_rm_resource_from_ep( + int pipe_idx) +{ + return __qdf_ipa_get_rm_resource_from_ep(pipe_idx); +} + +static inline bool qdf_ipa_get_modem_cfg_emb_pipe_flt(void) +{ + return __qdf_ipa_get_modem_cfg_emb_pipe_flt(); +} + +static inline int qdf_ipa_create_wdi_mapping(u32 num_buffers, + __qdf_ipa_wdi_buffer_info_t *info) +{ + return __qdf_ipa_create_wdi_mapping(num_buffers, info); +} + +static inline int qdf_ipa_release_wdi_mapping(u32 num_buffers, + qdf_ipa_wdi_buffer_info_t *info) +{ + return __qdf_ipa_release_wdi_mapping(num_buffers, info); +} + +static inline int qdf_ipa_disable_apps_wan_cons_deaggr(uint32_t agg_size, + uint32_t agg_count) +{ + return __qdf_ipa_disable_apps_wan_cons_deaggr(agg_size, agg_count); +} + +static inline const qdf_ipa_gsi_ep_config_t *qdf_ipa_get_gsi_ep_info(qdf_ipa_client_type_t client) +{ + return __qdf_ipa_get_gsi_ep_info(client); +} + +static inline int qdf_ipa_stop_gsi_channel(u32 clnt_hdl) +{ + return __qdf_ipa_stop_gsi_channel(clnt_hdl); +} + +static inline int qdf_ipa_register_ipa_ready_cb( + void (*qdf_ipa_ready_cb)(void *user_data), + void *user_data) +{ + return __qdf_ipa_register_ipa_ready_cb(qdf_ipa_ready_cb, user_data); +} + +#ifdef FEATURE_METERING +static inline int qdf_ipa_broadcast_wdi_quota_reach_ind(uint32_t index, + uint64_t quota_bytes) +{ + return __qdf_ipa_broadcast_wdi_quota_reach_ind(index, quota_bytes); +} +#endif + +#ifdef ENABLE_SMMU_S1_TRANSLATION +/** + * qdf_get_ipa_smmu_enabled() - to get IPA SMMU enable status + * + * Return: true when IPA SMMU enabled, otherwise false + */ +static inline bool qdf_get_ipa_smmu_enabled(void) +{ + return __qdf_get_ipa_smmu_enabled(); +} +#endif + +#endif /* IPA_OFFLOAD */ +#endif /* _QDF_IPA_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_ipa_wdi3.h b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_ipa_wdi3.h new file mode 100644 index 0000000000000000000000000000000000000000..a9e0493ff18b2ae3a4a831307a306955ce193c61 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_ipa_wdi3.h @@ -0,0 +1,377 @@ +/* + * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: qdf_ipa_wdi3.h + * This file provides OS abstraction for IPA WDI APIs. + */ + +#ifndef _QDF_IPA_WDI3_H +#define _QDF_IPA_WDI3_H + +#ifdef IPA_OFFLOAD + +#ifdef CONFIG_IPA_WDI_UNIFIED_API + +#include +#include + +/** + * qdf_ipa_wdi_version_t - IPA WDI version + */ +typedef __qdf_ipa_wdi_version_t qdf_ipa_wdi_version_t; + +/** + * qdf_ipa_wdi_init_in_params_t - wdi init input parameters + */ +typedef __qdf_ipa_wdi_init_in_params_t qdf_ipa_wdi_init_in_params_t; + +/** + * qdf_ipa_wdi_init_out_params_t - wdi init output parameters + */ +typedef __qdf_ipa_wdi_init_out_params_t qdf_ipa_wdi_init_out_params_t; + +/** + * qdf_ipa_wdi_pipe_setup_info_smmu_t - WDI TX/Rx configuration + */ +typedef __qdf_ipa_wdi_pipe_setup_info_smmu_t qdf_ipa_wdi_pipe_setup_info_smmu_t; + +typedef __qdf_ipa_ep_cfg_t qdf_ipa_ep_cfg_t; + +/** + * qdf_ipa_wdi_init - Client should call this function to + * init WDI IPA offload data path + * + * Note: Should not be called from atomic context and only + * after checking IPA readiness using ipa_register_ipa_ready_cb() + * + * @Return 0 on success, negative on failure + */ +static inline int qdf_ipa_wdi_init(qdf_ipa_wdi_init_in_params_t *in, + qdf_ipa_wdi_init_out_params_t *out) +{ + return __qdf_ipa_wdi_init(in, out); +} + +/** + * qdf_ipa_wdi_cleanup - Client should call this function to + * clean up WDI IPA offload data path + * + * @Return 0 on success, negative on failure + */ +static inline int qdf_ipa_wdi_cleanup(void) +{ + return __qdf_ipa_wdi_cleanup(); +} + +/** + * qdf_ipa_wdi_hdr_info_t - Header to install on IPA HW + */ +typedef __qdf_ipa_wdi_hdr_info_t qdf_ipa_wdi_hdr_info_t; + +/** + * qdf_ipa_wdi_reg_intf_in_params_t - parameters for uC offload + * interface registration + */ +typedef __qdf_ipa_wdi_reg_intf_in_params_t qdf_ipa_wdi_reg_intf_in_params_t; + +/** + * qdf_ipa_wdi_pipe_setup_info_t - WDI TX/Rx configuration + */ +typedef __qdf_ipa_wdi_pipe_setup_info_t qdf_ipa_wdi_pipe_setup_info_t; + +/** + * qdf_ipa_wdi_conn_in_params_t - information provided by + * uC offload client + */ +typedef __qdf_ipa_wdi_conn_in_params_t qdf_ipa_wdi_conn_in_params_t; + +/** + * qdf_ipa_wdi_conn_out_params_t - information provided + * to WLAN druver + */ +typedef __qdf_ipa_wdi_conn_out_params_t qdf_ipa_wdi_conn_out_params_t; + +/** + * qdf_ipa_wdi_perf_profile_t - To set BandWidth profile + */ +typedef __qdf_ipa_wdi_perf_profile_t qdf_ipa_wdi_perf_profile_t; + +/** + * qdf_ipa_wdi_reg_intf - Client should call this function to + * init WDI IPA offload data path + * + * Note: Should not be called from atomic context and only + * after checking IPA readiness using ipa_register_ipa_ready_cb() + * + * @Return 0 on success, negative on failure + */ +static inline int qdf_ipa_wdi_reg_intf( + qdf_ipa_wdi_reg_intf_in_params_t *in) +{ + return __qdf_ipa_wdi_reg_intf(in); +} + +/** + * qdf_ipa_wdi_dereg_intf - Client Driver should call this + * function to deregister before unload and after disconnect + * + * @Return 0 on success, negative on failure + */ +static inline int qdf_ipa_wdi_dereg_intf(const char *netdev_name) +{ + return __qdf_ipa_wdi_dereg_intf(netdev_name); +} + +/** + * qdf_ipa_wdi_conn_pipes - Client should call this + * function to connect pipes + * + * @in: [in] input parameters from client + * @out: [out] output params to client + * + * Note: Should not be called from atomic context and only + * after checking IPA readiness using ipa_register_ipa_ready_cb() + * + * @Return 0 on success, negative on failure + */ +static inline int qdf_ipa_wdi_conn_pipes(qdf_ipa_wdi_conn_in_params_t *in, + qdf_ipa_wdi_conn_out_params_t *out) +{ + return __qdf_ipa_wdi_conn_pipes(in, out); +} + +/** + * qdf_ipa_wdi_disconn_pipes() - Client should call this + * function to disconnect pipes + * + * Note: Should not be called from atomic context + * + * Returns: 0 on success, negative on failure + */ +static inline int qdf_ipa_wdi_disconn_pipes(void) +{ + return __qdf_ipa_wdi_disconn_pipes(); +} + +/** + * qdf_ipa_wdi_enable_pipes() - Client should call this + * function to enable IPA offload data path + * + * Note: Should not be called from atomic context + * + * Returns: 0 on success, negative on failure + */ +static inline int qdf_ipa_wdi_enable_pipes(void) +{ + return __qdf_ipa_wdi_enable_pipes(); +} + +/** + * qdf_ipa_wdi_disable_pipes() - Client should call this + * function to disable IPA offload data path + * + * Note: Should not be called from atomic context + * + * Returns: 0 on success, negative on failure + */ +static inline int qdf_ipa_wdi_disable_pipes(void) +{ + return __qdf_ipa_wdi_disable_pipes(); +} + +/** + * qdf_ipa_wdi_set_perf_profile() - Client should call this function to + * set IPA clock bandwidth based on data rates + * + * @profile: [in] BandWidth profile to use + * + * Returns: 0 on success, negative on failure + */ +static inline int qdf_ipa_wdi_set_perf_profile( + qdf_ipa_wdi_perf_profile_t *profile) +{ + return __qdf_ipa_wdi_set_perf_profile(profile); +} + +/** + * qdf_ipa_wdi_create_smmu_mapping() - Client should call this function to + * create smmu mapping + * + * @num_buffers: [in] number of buffers + * @info: [in] wdi buffer info + * + * Returns: 0 on success, negative on failure + */ +static inline int qdf_ipa_wdi_create_smmu_mapping(uint32_t num_buffers, + qdf_ipa_wdi_buffer_info_t *info) +{ + return __qdf_ipa_wdi_create_smmu_mapping(num_buffers, info); +} + +/** + * qdf_ipa_wdi_release_smmu_mapping() - Client should call this function to + * release smmu mapping + * + * @num_buffers: [in] number of buffers + * @info: [in] wdi buffer info + * + * Returns: 0 on success, negative on failure + */ +static inline int qdf_ipa_wdi_release_smmu_mapping(uint32_t num_buffers, + qdf_ipa_wdi_buffer_info_t *info) +{ + return __qdf_ipa_wdi_release_smmu_mapping(num_buffers, info); +} + +#else /* CONFIG_IPA_WDI_UNIFIED_API */ + +#include + +/** + * qdf_ipa_wdi3_hdr_info_t - Header to install on IPA HW + */ +typedef __qdf_ipa_wdi3_hdr_info_t qdf_ipa_wdi3_hdr_info_t; + +/** + * qdf_ipa_wdi3_reg_intf_in_params_t - parameters for uC offload + * interface registration + */ +typedef __qdf_ipa_wdi3_reg_intf_in_params_t qdf_ipa_wdi3_reg_intf_in_params_t; + +/** + * qdf_ipa_wdi3_setup_info_t - WDI3 TX/Rx configuration + */ +typedef __qdf_ipa_wdi3_setup_info_t qdf_ipa_wdi3_setup_info_t; + +/** + * qdf_ipa_wdi3_conn_in_params_t - information provided by + * uC offload client + */ +typedef __qdf_ipa_wdi3_conn_in_params_t qdf_ipa_wdi3_conn_in_params_t; + +/** + * qdf_ipa_wdi3_conn_out_params_t - information provided + * to WLAN druver + */ +typedef __qdf_ipa_wdi3_conn_out_params_t qdf_ipa_wdi3_conn_out_params_t; + +/** + * qdf_ipa_wdi3_perf_profile_t - To set BandWidth profile + */ +typedef __qdf_ipa_wdi3_perf_profile_t qdf_ipa_wdi3_perf_profile_t; + +/** + * qdf_ipa_wdi3_reg_intf - Client should call this function to + * init WDI3 IPA offload data path + * + * Note: Should not be called from atomic context and only + * after checking IPA readiness using ipa_register_ipa_ready_cb() + * + * @Return 0 on success, negative on failure + */ +static inline int qdf_ipa_wdi3_reg_intf( + struct ipa_wdi3_reg_intf_in_params *in) +{ + return __qdf_ipa_wdi3_reg_intf(in); +} + +/** + * qdf_ipa_wdi3_dereg_intf - Client Driver should call this + * function to deregister before unload and after disconnect + * + * @Return 0 on success, negative on failure + */ +static inline int qdf_ipa_wdi3_dereg_intf(const char *netdev_name) +{ + return __qdf_ipa_wdi3_dereg_intf(netdev_name); +} + +/** + * qdf_ipa_wdi3_conn_pipes - Client should call this + * function to connect pipes + * + * @in: [in] input parameters from client + * @out: [out] output params to client + * + * Note: Should not be called from atomic context and only + * after checking IPA readiness using ipa_register_ipa_ready_cb() + * + * @Return 0 on success, negative on failure + */ +static inline int qdf_ipa_wdi3_conn_pipes(struct ipa_wdi3_conn_in_params *in, + struct ipa_wdi3_conn_out_params *out) +{ + return __qdf_ipa_wdi3_conn_pipes(in, out); +} + +/** + * qdf_ipa_wdi3_disconn_pipes() - Client should call this + * function to disconnect pipes + * + * Note: Should not be called from atomic context + * + * Returns: 0 on success, negative on failure + */ +static inline int qdf_ipa_wdi3_disconn_pipes(void) +{ + return __qdf_ipa_wdi3_disconn_pipes(); +} + +/** + * qdf_ipa_wdi3_enable_pipes() - Client should call this + * function to enable IPA offload data path + * + * Note: Should not be called from atomic context + * + * Returns: 0 on success, negative on failure + */ +static inline int qdf_ipa_wdi3_enable_pipes(void) +{ + return __qdf_ipa_wdi3_enable_pipes(); +} + +/** + * qdf_ipa_wdi3_disable_pipes() - Client should call this + * function to disable IPA offload data path + * + * Note: Should not be called from atomic context + * + * Returns: 0 on success, negative on failure + */ +static inline int qdf_ipa_wdi3_disable_pipes(void) +{ + return __qdf_ipa_wdi3_disable_pipes(); +} + +/** + * qdf_ipa_wdi3_set_perf_profile() - Client should call this function to + * set IPA clock bandwidth based on data rates + * + * @profile: [in] BandWidth profile to use + * + * Returns: 0 on success, negative on failure + */ +static inline int qdf_ipa_wdi3_set_perf_profile( + struct ipa_wdi3_perf_profile *profile) +{ + return __qdf_ipa_wdi3_set_perf_profile(profile); +} + +#endif /* CONFIG_IPA_WDI_UNIFIED_API */ + +#endif /* IPA_OFFLOAD */ +#endif /* _QDF_IPA_WDI3_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_list.h b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_list.h new file mode 100644 index 0000000000000000000000000000000000000000..a57d423ae67af0ecd9be2d6a3b182de3a9fecb15 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_list.h @@ -0,0 +1,154 @@ +/* + * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: qdf_list.h + * QCA driver framework (QDF) list APIs + * Definitions for QDF Linked Lists API + * + * Lists are implemented as a doubly linked list. An item in a list can + * be of any type as long as the datatype contains a field of type + * qdf_link_t. + * + * In general, a list is a doubly linked list of items with a pointer + * to the front of the list and a pointer to the end of the list. The + * list items contain a forward and back link. + * + * QDF linked list APIs are NOT thread safe so make sure to use appropriate + * locking mechanisms to assure operations on the list are thread safe. + */ + +#if !defined(__QDF_LIST_H) +#define __QDF_LIST_H + +/* Include Files */ +#include +#include +#include +#include + +typedef __qdf_list_node_t qdf_list_node_t; +typedef __qdf_list_t qdf_list_t; + +/* Function declarations */ + +/** + * qdf_list_insert_before() - insert new node before the node + * @list: Pointer to list + * @new_node: Pointer to input node + * @node: node before which new node should be added. + * + * Return: QDF status + */ +QDF_STATUS qdf_list_insert_before(qdf_list_t *list, + qdf_list_node_t *new_node, qdf_list_node_t *node); +/** + * qdf_list_insert_after() - insert new node after the node + * @list: Pointer to list + * @new_node: Pointer to input node + * @node: node after which new node should be added. + * + * Return: QDF status + */ +QDF_STATUS qdf_list_insert_after(qdf_list_t *list, + qdf_list_node_t *new_node, qdf_list_node_t *node); +QDF_STATUS qdf_list_insert_front(qdf_list_t *list, qdf_list_node_t *node); + +QDF_STATUS qdf_list_insert_back_size(qdf_list_t *list, qdf_list_node_t *node, + uint32_t *size); + +QDF_STATUS qdf_list_remove_front(qdf_list_t *list, qdf_list_node_t **node1); + +QDF_STATUS qdf_list_peek_next(qdf_list_t *list, qdf_list_node_t *node, + qdf_list_node_t **node1); + +/** + * qdf_list_create() - Create qdf list and initialize list head + * @list: object of list + * @max_size: max size of the list + * + * Return: none + */ +static inline void qdf_list_create(__qdf_list_t *list, uint32_t max_size) +{ + __qdf_list_create(list, max_size); +} + +#define QDF_LIST_ANCHOR(list) __QDF_LIST_ANCHOR(list) + +#define QDF_LIST_NODE_INIT(prev, next) __QDF_LIST_NODE_INIT(prev, next) +#define QDF_LIST_NODE_INIT_SINGLE(node) __QDF_LIST_NODE_INIT_SINGLE(node) + +#define QDF_LIST_INIT(tail, head) __QDF_LIST_INIT(tail, head) +#define QDF_LIST_INIT_SINGLE(node) __QDF_LIST_INIT_SINGLE(node) +#define QDF_LIST_INIT_EMPTY(list) __QDF_LIST_INIT_EMPTY(list) + +#define qdf_list_for_each(list_ptr, cursor, node_field) \ + __qdf_list_for_each(list_ptr, cursor, node_field) + +#define qdf_list_for_each_del(list_ptr, cursor, next, node_field) \ + __qdf_list_for_each_del(list_ptr, cursor, next, node_field) + +/** + * qdf_init_list_head() - initialize list head + * @list_head: pointer to list head + * + * Return: none + */ +static inline void qdf_init_list_head(__qdf_list_node_t *list_head) +{ + __qdf_init_list_head(list_head); +} + +/** + * qdf_list_destroy() - Destroy the list + * @list: object of list + * Return: none + */ +static inline void qdf_list_destroy(qdf_list_t *list) +{ + if (list->count != 0) { + QDF_TRACE(QDF_MODULE_ID_HDD, QDF_TRACE_LEVEL_ERROR, + "%s: list length not equal to zero", __func__); + QDF_ASSERT(0); + } +} + +/** + * qdf_list_size() - gives the size of the list + * @list: object of list + * @size: size of the list + * Return: uint32_t + */ +static inline uint32_t qdf_list_size(qdf_list_t *list) +{ + return list->count; +} + +QDF_STATUS qdf_list_insert_back(qdf_list_t *list, qdf_list_node_t *node); + +QDF_STATUS qdf_list_remove_back(qdf_list_t *list, qdf_list_node_t **node1); + +QDF_STATUS qdf_list_peek_front(qdf_list_t *list, qdf_list_node_t **node1); + +QDF_STATUS qdf_list_remove_node(qdf_list_t *list, + qdf_list_node_t *node_to_remove); + +bool qdf_list_empty(qdf_list_t *list); + +#endif /* __QDF_LIST_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_lock.h b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_lock.h new file mode 100644 index 0000000000000000000000000000000000000000..a5a623d139a206aeadbf70ecaa1db92689b99b7f --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_lock.h @@ -0,0 +1,497 @@ +/* + * Copyright (c) 2014-2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * @file qdf_lock.h + * This file abstracts locking operations. + */ + +#ifndef _QDF_LOCK_H +#define _QDF_LOCK_H + +#include +#include +#include +#include + +#ifndef QDF_LOCK_STATS +#define QDF_LOCK_STATS 0 +#endif +#ifndef QDF_LOCK_STATS_DESTROY_PRINT +#define QDF_LOCK_STATS_DESTROY_PRINT 0 +#endif +#ifndef QDF_LOCK_STATS_BUG_ON +#define QDF_LOCK_STATS_BUG_ON 0 +#endif +#ifndef QDF_LOCK_STATS_LIST +#define QDF_LOCK_STATS_LIST 0 +#endif + +/* Max hold time in micro seconds, 0 to disable detection*/ +#define QDF_MAX_HOLD_TIME_ALOWED_SPINLOCK_IRQ 10000 +#define QDF_MAX_HOLD_TIME_ALOWED_SPINLOCK_BH 1000000 +#define QDF_MAX_HOLD_TIME_ALOWED_SPINLOCK 0 + +#if !QDF_LOCK_STATS +struct lock_stats {}; +#define BEFORE_LOCK(x...) do {} while (0) +#define AFTER_LOCK(x...) do {} while (0) +#define BEFORE_TRYLOCK(x...) do {} while (0) +#define AFTER_TRYLOCK(x...) do {} while (0) +#define BEFORE_UNLOCK(x...) do {} while (0) +#define qdf_lock_stats_create(x...) do {} while (0) +#define qdf_lock_stats_destroy(x...) do {} while (0) +#define qdf_lock_stats_init(x...) do {} while (0) +#define qdf_lock_stats_deinit(x...) do {} while (0) +#else +void qdf_lock_stats_init(void); +void qdf_lock_stats_deinit(void); +struct qdf_lock_cookie; +struct lock_stats { + const char *initialization_fn; + const char *acquired_by; + int line; + int acquired; + int contended; + uint64_t contention_time; + uint64_t non_contention_time; + uint64_t held_time; + uint64_t last_acquired; + uint64_t max_contention_wait; + uint64_t max_held_time; + int num_large_contentions; + int num_large_holds; + struct qdf_lock_cookie *cookie; +}; +#define LARGE_CONTENTION QDF_LOG_TIMESTAMP_CYCLES_PER_10_US + +#define BEFORE_LOCK(lock, was_locked) \ +do { \ + uint64_t BEFORE_LOCK_time; \ + uint64_t AFTER_LOCK_time; \ + bool BEFORE_LOCK_is_locked = was_locked; \ + BEFORE_LOCK_time = qdf_get_log_timestamp(); \ + do {} while (0) + + +#define AFTER_LOCK(lock, func) \ + lock->stats.acquired_by = func; \ + AFTER_LOCK_time = qdf_get_log_timestamp(); \ + lock->stats.acquired++; \ + lock->stats.last_acquired = AFTER_LOCK_time; \ + if (BEFORE_LOCK_is_locked) { \ + lock->stats.contended++; \ + lock->stats.contention_time += \ + (AFTER_LOCK_time - BEFORE_LOCK_time); \ + } else { \ + lock->stats.non_contention_time += \ + (AFTER_LOCK_time - BEFORE_LOCK_time); \ + } \ +\ + if (AFTER_LOCK_time - BEFORE_LOCK_time > LARGE_CONTENTION) \ + lock->stats.num_large_contentions++; \ +\ + if (AFTER_LOCK_time - BEFORE_LOCK_time > \ + lock->stats.max_contention_wait) \ + lock->stats.max_contention_wait = \ + AFTER_LOCK_time - BEFORE_LOCK_time; \ +} while (0) + +#define BEFORE_TRYLOCK(lock) \ +do { \ + uint64_t BEFORE_LOCK_time; \ + uint64_t AFTER_LOCK_time; \ + BEFORE_LOCK_time = qdf_get_log_timestamp(); \ + do {} while (0) + +#define AFTER_TRYLOCK(lock, trylock_return, func) \ + AFTER_LOCK_time = qdf_get_log_timestamp(); \ + if (trylock_return) { \ + lock->stats.acquired++; \ + lock->stats.last_acquired = AFTER_LOCK_time; \ + lock->stats.non_contention_time += \ + (AFTER_LOCK_time - BEFORE_LOCK_time); \ + lock->stats.acquired_by = func; \ + } \ +} while (0) + +/* max_hold_time in US */ +#define BEFORE_UNLOCK(lock, max_hold_time) \ +do {\ + uint64_t held_time = qdf_get_log_timestamp() - \ + lock->stats.last_acquired; \ + lock->stats.held_time += held_time; \ +\ + if (held_time > lock->stats.max_held_time) \ + lock->stats.max_held_time = held_time; \ +\ + if (held_time > LARGE_CONTENTION) \ + lock->stats.num_large_holds++; \ + if (QDF_LOCK_STATS_BUG_ON && max_hold_time && \ + held_time > qdf_usecs_to_log_timestamp(max_hold_time)) { \ + qdf_print("BEFORE_UNLOCK: lock held too long (%lluus)\n", \ + qdf_log_timestamp_to_usecs(held_time)); \ + QDF_BUG(0); \ + } \ + lock->stats.acquired_by = NULL; \ +} while (0) + +void qdf_lock_stats_cookie_destroy(struct lock_stats *stats); +void qdf_lock_stats_cookie_create(struct lock_stats *stats, + const char *func, int line); + +static inline void qdf_lock_stats_destroy(struct lock_stats *stats) +{ + if (QDF_LOCK_STATS_DESTROY_PRINT) { + qdf_print("%s: lock: %s %d \t" + "acquired:\t%d\tcontended:\t%d\t" + "contention_time\t%llu\tmax_contention_wait:\t%llu\t" + "non_contention_time\t%llu\t" + "held_time\t%llu\tmax_held:\t%llu\t\n" + , __func__, stats->initialization_fn, stats->line, + stats->acquired, stats->contended, + qdf_log_timestamp_to_usecs(stats->contention_time), + qdf_log_timestamp_to_usecs(stats->max_contention_wait), + qdf_log_timestamp_to_usecs(stats->non_contention_time), + qdf_log_timestamp_to_usecs(stats->held_time), + qdf_log_timestamp_to_usecs(stats->max_held_time)); + } + + if (QDF_LOCK_STATS_LIST) + qdf_lock_stats_cookie_destroy(stats); +} + +#ifndef MEMORY_DEBUG +#define qdf_mem_malloc_debug(x, y, z) qdf_mem_malloc(x) +#endif + +/* qdf_lock_stats_create() - initialize the lock stats structure + * + */ +static inline void qdf_lock_stats_create(struct lock_stats *stats, + const char *func, int line) +{ + qdf_mem_zero(stats, sizeof(*stats)); + stats->initialization_fn = func; + stats->line = line; + + if (QDF_LOCK_STATS_LIST) + qdf_lock_stats_cookie_create(stats, func, line); +} +#endif + +#include + +#define WIFI_POWER_EVENT_DEFAULT_WAKELOCK_TIMEOUT 0 +#define WIFI_POWER_EVENT_WAKELOCK_TAKEN 0 +#define WIFI_POWER_EVENT_WAKELOCK_RELEASED 1 + +/** + * qdf_semaphore_acquire_timeout() - Take the semaphore before timeout + * @m: semaphore to take + * @timeout: maximum time to try to take the semaphore + * Return: int + */ +static inline int qdf_semaphore_acquire_timeout(struct semaphore *m, + unsigned long timeout) +{ + return __qdf_semaphore_acquire_timeout(m, timeout); +} + +struct qdf_spinlock { + __qdf_spinlock_t lock; + struct lock_stats stats; +}; + +/** + * @brief Platform spinlock object + */ +typedef struct qdf_spinlock qdf_spinlock_t; + + +/** + * @brief Platform mutex object + */ +typedef __qdf_semaphore_t qdf_semaphore_t; +typedef __qdf_mutex_t qdf_mutex_t; + +/* function Declaration */ +QDF_STATUS qdf_mutex_create(qdf_mutex_t *m, const char *func, int line); +#define qdf_mutex_create(m) qdf_mutex_create(m, __func__, __LINE__) + +QDF_STATUS qdf_mutex_acquire(qdf_mutex_t *m); + +QDF_STATUS qdf_mutex_release(qdf_mutex_t *m); + +QDF_STATUS qdf_mutex_destroy(qdf_mutex_t *lock); + +/** + * qdf_spinlock_create - Initialize a spinlock + * @lock: spinlock object pointer + * Return: none + */ +static inline void qdf_spinlock_create(qdf_spinlock_t *lock, const char *func, + int line) +{ + __qdf_spinlock_create(&lock->lock); + + /* spinlock stats create relies on the spinlock working allread */ + qdf_lock_stats_create(&lock->stats, func, line); +} + +#define qdf_spinlock_create(x) qdf_spinlock_create(x, __func__, __LINE__) + +/** + * qdf_spinlock_destroy - Delete a spinlock + * @lock: spinlock object pointer + * Return: none + */ +static inline void qdf_spinlock_destroy(qdf_spinlock_t *lock) +{ + qdf_lock_stats_destroy(&lock->stats); + __qdf_spinlock_destroy(&lock->lock); +} + +/** + * qdf_spin_is_locked() - check if the spinlock is locked + * @lock: spinlock object + * + * Return: nonzero if lock is held. + */ +static inline int qdf_spin_is_locked(qdf_spinlock_t *lock) +{ + return __qdf_spin_is_locked(&lock->lock); +} + +/** + * qdf_spin_trylock_bh() - spin trylock bottomhalf + * @lock: spinlock object + * + * Return: nonzero if lock is acquired + */ +static inline int qdf_spin_trylock_bh(qdf_spinlock_t *lock, const char *func) +{ + int trylock_return; + + BEFORE_TRYLOCK(lock); + trylock_return = __qdf_spin_trylock_bh(&lock->lock); + AFTER_TRYLOCK(lock, trylock_return, func); + + return trylock_return; +} +#define qdf_spin_trylock_bh(lock) qdf_spin_trylock_bh(lock, __func__) + +int qdf_spin_trylock_bh_outline(qdf_spinlock_t *lock); + +/** + * qdf_spin_lock_bh() - locks the spinlock mutex in soft irq context + * @lock: spinlock object pointer + * Return: none + */ +static inline void qdf_spin_lock_bh(qdf_spinlock_t *lock, const char *func) +{ + BEFORE_LOCK(lock, qdf_spin_is_locked(lock)); + __qdf_spin_lock_bh(&lock->lock); + AFTER_LOCK(lock, func); +} + +#define qdf_spin_lock_bh(lock) qdf_spin_lock_bh(lock, __func__) + +void qdf_spin_lock_bh_outline(qdf_spinlock_t *lock); + +/** + * qdf_spin_unlock_bh() - unlocks the spinlock mutex in soft irq context + * @lock: spinlock object pointer + * Return: none + */ +static inline void qdf_spin_unlock_bh(qdf_spinlock_t *lock) +{ + BEFORE_UNLOCK(lock, QDF_MAX_HOLD_TIME_ALOWED_SPINLOCK_BH); + __qdf_spin_unlock_bh(&lock->lock); +} + +void qdf_spin_unlock_bh_outline(qdf_spinlock_t *lock); + +/** + * qdf_spinlock_irq_exec - Execute the input function with spinlock held + * and interrupt disabled. + * @hdl: OS handle + * @lock: spinlock to be held for the critical region + * @func: critical region function that to be executed + * @context: context of the critical region function + * Return: Boolean status returned by the critical region function + */ +static inline bool qdf_spinlock_irq_exec(qdf_handle_t hdl, + qdf_spinlock_t *lock, + qdf_irqlocked_func_t func, void *arg) +{ + return __qdf_spinlock_irq_exec(hdl, &lock->lock, func, arg); +} + +/** + * qdf_spin_lock() - Acquire a Spinlock(SMP) & disable Preemption (Preemptive) + * @lock: Lock object + * + * Return: none + */ +static inline void qdf_spin_lock(qdf_spinlock_t *lock, const char *func) +{ + BEFORE_LOCK(lock, qdf_spin_is_locked(lock)); + __qdf_spin_lock(&lock->lock); + AFTER_LOCK(lock, func); +} +#define qdf_spin_lock(lock) qdf_spin_lock(lock, __func__) + +/** + * qdf_spin_unlock() - Unlock the spinlock and enables the Preemption + * @lock: Lock object + * + * Return: none + */ +static inline void qdf_spin_unlock(qdf_spinlock_t *lock) +{ + BEFORE_UNLOCK(lock, QDF_MAX_HOLD_TIME_ALOWED_SPINLOCK); + __qdf_spin_unlock(&lock->lock); +} + +/** + * qdf_spin_lock_irq() - Acquire a Spinlock(SMP) & save the irq state + * @lock: Lock object + * @flags: flags + * + * Return: none + */ +static inline void qdf_spin_lock_irq(qdf_spinlock_t *lock, unsigned long flags, + const char *func) +{ + BEFORE_LOCK(lock, qdf_spin_is_locked(lock)); + __qdf_spin_lock_irq(&lock->lock.spinlock, flags); + AFTER_LOCK(lock, func); +} +#define qdf_spin_lock_irq(lock, flags) qdf_spin_lock_irq(lock, flags, __func__) + +/** + * qdf_spin_lock_irqsave() - Acquire a Spinlock (SMP) & disable Preemption + * (Preemptive) and disable IRQs + * @lock: Lock object + * + * Return: none + */ +static inline void qdf_spin_lock_irqsave(qdf_spinlock_t *lock, const char *func) +{ + BEFORE_LOCK(lock, qdf_spin_is_locked(lock)); + __qdf_spin_lock_irqsave(&lock->lock); + AFTER_LOCK(lock, func); +} +#define qdf_spin_lock_irqsave(lock) qdf_spin_lock_irqsave(lock, __func__) + +/** + * qdf_spin_unlock_irqrestore() - Unlock the spinlock and enables the + * Preemption and enable IRQ + * @lock: Lock object + * + * Return: none + */ +static inline void qdf_spin_unlock_irqrestore(qdf_spinlock_t *lock) +{ + BEFORE_UNLOCK(lock, QDF_MAX_HOLD_TIME_ALOWED_SPINLOCK_IRQ); + __qdf_spin_unlock_irqrestore(&lock->lock); +} + +/** + * qdf_spin_unlock_irq() - Unlock a Spinlock(SMP) & save the restore state + * @lock: Lock object + * @flags: flags + * + * Return: none + */ +static inline void qdf_spin_unlock_irq(qdf_spinlock_t *lock, + unsigned long flags) +{ + BEFORE_UNLOCK(lock, QDF_MAX_HOLD_TIME_ALOWED_SPINLOCK_IRQ); + __qdf_spin_unlock_irq(&lock->lock.spinlock, flags); +} + +/** + * qdf_semaphore_init() - initialize a semaphore + * @m: Semaphore to initialize + * Return: None + */ +static inline void qdf_semaphore_init(qdf_semaphore_t *m) +{ + __qdf_semaphore_init(m); +} + +/** + * qdf_semaphore_acquire() - take the semaphore + * @m: Semaphore to take + * Return: int + */ +static inline int qdf_semaphore_acquire(qdf_semaphore_t *m) +{ + return __qdf_semaphore_acquire(m); +} + +/** + * qdf_semaphore_release() - give the semaphore + * @m: Semaphore to give + * Return: None + */ +static inline void qdf_semaphore_release(qdf_semaphore_t *m) +{ + __qdf_semaphore_release(m); +} + +/** + * qdf_semaphore_acquire_intr - Take the semaphore, interruptible version + * @osdev: OS Device + * @m: mutex to take + * Return: int + */ +static inline int qdf_semaphore_acquire_intr(qdf_semaphore_t *m) +{ + return __qdf_semaphore_acquire_intr(m); +} + +QDF_STATUS qdf_wake_lock_create(qdf_wake_lock_t *lock, const char *name); + +QDF_STATUS qdf_wake_lock_acquire(qdf_wake_lock_t *lock, uint32_t reason); + +const char *qdf_wake_lock_name(qdf_wake_lock_t *lock); +QDF_STATUS qdf_wake_lock_timeout_acquire(qdf_wake_lock_t *lock, + uint32_t msec); + +QDF_STATUS qdf_wake_lock_release(qdf_wake_lock_t *lock, uint32_t reason); + +QDF_STATUS qdf_wake_lock_destroy(qdf_wake_lock_t *lock); + +QDF_STATUS qdf_runtime_pm_get(void); +QDF_STATUS qdf_runtime_pm_put(void); +QDF_STATUS qdf_runtime_pm_prevent_suspend(qdf_runtime_lock_t *lock); +QDF_STATUS qdf_runtime_pm_allow_suspend(qdf_runtime_lock_t *lock); + +QDF_STATUS __qdf_runtime_lock_init(qdf_runtime_lock_t *lock, const char *name); + +#define qdf_runtime_lock_init(lock) __qdf_runtime_lock_init(lock, #lock) + +void qdf_runtime_lock_deinit(qdf_runtime_lock_t *lock); + +QDF_STATUS qdf_spinlock_acquire(qdf_spinlock_t *lock); + +QDF_STATUS qdf_spinlock_release(qdf_spinlock_t *lock); +#endif /* _QDF_LOCK_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_lro.h b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_lro.h new file mode 100644 index 0000000000000000000000000000000000000000..8049cf1e8575448bec91a4ebfa1f3425c88d1849 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_lro.h @@ -0,0 +1,129 @@ +/* + * Copyright (c) 2015-2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: Large Receive Offload API + * This file defines the Large receive offload API. + */ +#ifndef _QDF_LRO_H +#define _QDF_LRO_H + +#include +#include + +/** + * @qdf_nbuf_t - Platform indepedent LRO context abstraction + */ +typedef __qdf_lro_ctx_t qdf_lro_ctx_t; + +/** + * qdf_lro_info_s - LRO information + * @iph: IP header + * @tcph: TCP header + */ +struct qdf_lro_info { + uint8_t *iph; + uint8_t *tcph; +}; + +#if defined(FEATURE_LRO) + +/** + * qdf_lro_init() - LRO initialization function + * + * Return: LRO context + */ +qdf_lro_ctx_t qdf_lro_init(void); + +/** + * qdf_lro_deinit() - LRO deinitialization function + * @lro_ctx: LRO context + * + * Return: nothing + */ +void qdf_lro_deinit(qdf_lro_ctx_t lro_ctx); + +/** + * qdf_lro_get_info() - Update the LRO information + * + * @lro_ctx: LRO context + * @nbuf: network buffer + * @info: LRO related information passed in by the caller + * @plro_desc: lro information returned as output + * + * Look-up the LRO descriptor based on the LRO information and + * the network buffer provided. Update the skb cb with the + * descriptor found + * + * Return: true: LRO eligible false: LRO ineligible + */ +bool qdf_lro_get_info(qdf_lro_ctx_t lro_ctx, qdf_nbuf_t nbuf, + struct qdf_lro_info *info, + void **plro_desc); + +/** + * qdf_lro_flush_pkt() - function to flush the LRO flow + * @info: LRO related information passed by the caller + * @lro_ctx: LRO context + * + * Flush all the packets aggregated in the LRO manager for the + * flow indicated by the TCP and IP header + * + * Return: none + */ +void qdf_lro_flush_pkt(qdf_lro_ctx_t lro_ctx, + struct qdf_lro_info *info); + +/** + * qdf_lro_flush() - LRO flush API + * @lro_ctx: LRO context + * + * Flush all the packets aggregated in the LRO manager for all + * the flows + * + * Return: none + */ +void qdf_lro_flush(qdf_lro_ctx_t lro_ctx); + +/** + * qdf_lro_desc_free() - Free the LRO descriptor + * @desc: LRO descriptor + * @lro_ctx: LRO context + * + * Return the LRO descriptor to the free pool + * + * Return: none + */ +void qdf_lro_desc_free(qdf_lro_ctx_t lro_ctx, void *desc); + +#else + +static inline qdf_lro_ctx_t qdf_lro_init(void) +{ + return NULL; +} + +static inline void qdf_lro_deinit(qdf_lro_ctx_t lro_ctx) +{ +} + +static inline void qdf_lro_flush(qdf_lro_ctx_t lro_ctx) +{ +} +#endif /* FEATURE_LRO */ +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_mc_timer.h b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_mc_timer.h new file mode 100644 index 0000000000000000000000000000000000000000..5d94df6d98352f26384ecce718adb9cb4e873e19 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_mc_timer.h @@ -0,0 +1,330 @@ +/* + * Copyright (c) 2014-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: qdf_mc_timer + * QCA driver framework timer APIs serialized to MC thread + */ + +#if !defined(__QDF_MC_TIMER_H) +#define __QDF_MC_TIMER_H + +/* Include Files */ +#include +#include +#include +#include + +#ifdef TIMER_MANAGER +#include +#endif + +/* Preprocessor definitions and constants */ +#define QDF_TIMER_STATE_COOKIE (0x12) +#define QDF_MC_TIMER_TO_MS_UNIT (1000) +#define QDF_MC_TIMER_TO_SEC_UNIT (1000000) + +/* Type declarations */ +/* qdf Timer callback function prototype (well, actually a prototype for + * a pointer to this callback function) + */ +typedef void (*qdf_mc_timer_callback_t)(void *user_data); + +typedef enum { + QDF_TIMER_STATE_UNUSED = QDF_TIMER_STATE_COOKIE, + QDF_TIMER_STATE_STOPPED, + QDF_TIMER_STATE_STARTING, + QDF_TIMER_STATE_RUNNING, +} QDF_TIMER_STATE; + +#ifdef TIMER_MANAGER +struct qdf_mc_timer_s; +typedef struct qdf_mc_timer_node_s { + qdf_list_node_t node; + char *file_name; + uint32_t line_num; + struct qdf_mc_timer_s *qdf_timer; +} qdf_mc_timer_node_t; +#endif + +typedef struct qdf_mc_timer_s { +#ifdef TIMER_MANAGER + qdf_mc_timer_node_t *timer_node; +#endif + qdf_mc_timer_platform_t platform_info; + qdf_mc_timer_callback_t callback; + void *user_data; + qdf_mutex_t lock; + QDF_TIMER_TYPE type; + QDF_TIMER_STATE state; +} qdf_mc_timer_t; + + +void qdf_try_allowing_sleep(QDF_TIMER_TYPE type); + +/* Function declarations and documenation */ +#ifdef TIMER_MANAGER +void qdf_mc_timer_manager_init(void); +void qdf_mc_timer_manager_exit(void); +void qdf_mc_timer_check_for_leaks(void); +#else +/** + * qdf_mc_timer_manager_init() - initialize QDF debug timer manager + * This API initializes QDF timer debug functionality. + * + * Return: none + */ +static inline void qdf_mc_timer_manager_init(void) +{ +} + +/** + * qdf_mc_timer_manager_exit() - exit QDF timer debug functionality + * This API exists QDF timer debug functionality + * + * Return: none + */ +static inline void qdf_mc_timer_manager_exit(void) +{ +} + +/** + * qdf_mc_timer_check_for_leaks() - Assert there are no active mc timers + * + * If there are active timers, this API prints them and panics. + * + * Return: None + */ +static inline void qdf_mc_timer_check_for_leaks(void) { } +#endif +/** + * qdf_mc_timer_get_current_state() - get the current state of the timer + * @timer: Pointer to timer object + * + * Return: + * QDF_TIMER_STATE - qdf timer state + */ + +QDF_TIMER_STATE qdf_mc_timer_get_current_state(qdf_mc_timer_t *timer); + +/** + * qdf_mc_timer_init() - initialize a QDF timer + * @timer: Pointer to timer object + * @timer_type: Type of timer + * @callback: Callback to be called after timer expiry + * @ser_data: User data which will be passed to callback function + * + * This API initializes a QDF Timer object. + * + * qdf_mc_timer_init() initializes a QDF Timer object. A timer must be + * initialized by calling qdf_mc_timer_initialize() before it may be used in + * any other timer functions. + * + * Attempting to initialize timer that is already initialized results in + * a failure. A destroyed timer object can be re-initialized with a call to + * qdf_mc_timer_init(). The results of otherwise referencing the object + * after it has been destroyed are undefined. + * + * Calls to QDF timer functions to manipulate the timer such + * as qdf_mc_timer_set() will fail if the timer is not initialized or has + * been destroyed. Therefore, don't use the timer after it has been + * destroyed until it has been re-initialized. + * + * All callback will be executed within the CDS main thread unless it is + * initialized from the Tx thread flow, in which case it will be executed + * within the tx thread flow. + * + * Return: + * QDF_STATUS_SUCCESS - Timer is initialized successfully + * QDF failure status - Timer initialization failed + */ +#ifdef TIMER_MANAGER +#define qdf_mc_timer_init(timer, timer_type, callback, userdata) \ + qdf_mc_timer_init_debug(timer, timer_type, callback, userdata, \ + __FILE__, __LINE__) + +QDF_STATUS qdf_mc_timer_init_debug(qdf_mc_timer_t *timer, + QDF_TIMER_TYPE timer_type, + qdf_mc_timer_callback_t callback, + void *user_data, char *file_name, + uint32_t line_num); +#else +QDF_STATUS qdf_mc_timer_init(qdf_mc_timer_t *timer, QDF_TIMER_TYPE timer_type, + qdf_mc_timer_callback_t callback, + void *user_data); +#endif + +/** + * qdf_mc_timer_destroy() - destroy QDF timer + * @timer: Pointer to timer object + * + * qdf_mc_timer_destroy() function shall destroy the timer object. + * After a successful return from \a qdf_mc_timer_destroy() the timer + * object becomes, in effect, uninitialized. + * + * A destroyed timer object can be re-initialized by calling + * qdf_mc_timer_init(). The results of otherwise referencing the object + * after it has been destroyed are undefined. + * + * Calls to QDF timer functions to manipulate the timer, such + * as qdf_mc_timer_set() will fail if the lock is destroyed. Therefore, + * don't use the timer after it has been destroyed until it has + * been re-initialized. + * + * Return: + * QDF_STATUS_SUCCESS - Timer is initialized successfully + * QDF failure status - Timer initialization failed + */ +QDF_STATUS qdf_mc_timer_destroy(qdf_mc_timer_t *timer); + +/** + * qdf_mc_timer_start() - start a QDF Timer object + * @timer: Pointer to timer object + * @expiration_time: Time to expire + * + * qdf_mc_timer_start() function starts a timer to expire after the + * specified interval, thus running the timer callback function when + * the interval expires. + * + * A timer only runs once (a one-shot timer). To re-start the + * timer, qdf_mc_timer_start() has to be called after the timer runs + * or has been cancelled. + * + * Return: + * QDF_STATUS_SUCCESS - Timer is initialized successfully + * QDF failure status - Timer initialization failed + */ +QDF_STATUS qdf_mc_timer_start(qdf_mc_timer_t *timer, uint32_t expiration_time); + +/** + * qdf_mc_timer_stop() - stop a QDF Timer + * @timer: Pointer to timer object + * qdf_mc_timer_stop() function stops a timer that has been started but + * has not expired, essentially cancelling the 'start' request. + * + * After a timer is stopped, it goes back to the state it was in after it + * was created and can be started again via a call to qdf_mc_timer_start(). + * + * Return: + * QDF_STATUS_SUCCESS - Timer is initialized successfully + * QDF failure status - Timer initialization failed + */ +QDF_STATUS qdf_mc_timer_stop(qdf_mc_timer_t *timer); + +/** + * qdf_mc_timer_stop_sync() - stop a QDF Timer + * @timer: Pointer to timer object + * qdf_mc_timer_stop_sync() function stops a timer synchronously + * that has been started but has not expired, essentially + * cancelling the 'start' request. + * + * After a timer is stopped, it goes back to the state it was in after it + * was created and can be started again via a call to qdf_mc_timer_start(). + * + * Return: + * QDF_STATUS_SUCCESS - Timer is initialized successfully + * QDF failure status - Timer initialization failed + */ +QDF_STATUS qdf_mc_timer_stop_sync(qdf_mc_timer_t *timer); + +/** + * qdf_mc_timer_get_system_ticks() - get the system time in 10ms ticks + * + * qdf_mc_timer_get_system_ticks() function returns the current number + * of timer ticks in 10msec intervals. This function is suitable timestamping + * and calculating time intervals by calculating the difference between two + * timestamps. + * + * Return: + * The current system tick count (in 10msec intervals). This + * function cannot fail. + */ +unsigned long qdf_mc_timer_get_system_ticks(void); + +/** + * qdf_mc_timer_get_system_time() - Get the system time in milliseconds + * + * qdf_mc_timer_get_system_time() function returns the number of milliseconds + * that have elapsed since the system was started + * + * Return: + * The current system time in milliseconds + */ +unsigned long qdf_mc_timer_get_system_time(void); + +/** + * qdf_get_monotonic_boottime_ns() - Get kernel boottime in ns + * + * Return: kernel boottime in nano sec (includes time spent in suspend) + */ +s64 qdf_get_monotonic_boottime_ns(void); + +/** + * qdf_timer_module_init() - initializes a QDF timer module. + * + * This API initializes the QDF timer module. This needs to be called + * exactly once prior to using any QDF timers. + * + * Return: none + */ +void qdf_timer_module_init(void); + +/** + * qdf_get_time_of_the_day_ms() - get time of the day in millisec + * + * Return: time of the day in ms + */ +qdf_time_t qdf_get_time_of_the_day_ms(void); + +/** + * qdf_timer_module_deinit() - Deinitializes a QDF timer module. + * + * This API deinitializes the QDF timer module. + * Return: none + */ +void qdf_timer_module_deinit(void); + +/** + * qdf_get_time_of_the_day_in_hr_min_sec_usec() - Get system time + * @tbuf: Pointer to time stamp buffer + * @len: Time buffer size + * + * This function updates the 'tbuf' with system time in hr:min:sec:msec format + * + * Return: None + */ +void qdf_get_time_of_the_day_in_hr_min_sec_usec(char *tbuf, int len); + +void qdf_register_mc_timer_callback(void (*callback) (qdf_mc_timer_t *data)); + +/** + * qdf_timer_set_multiplier() - set the global QDF timer scalar value + * @multiplier: the scalar value to apply + * + * Return: None + */ +void qdf_timer_set_multiplier(uint32_t multiplier); + +/** + * qdf_timer_get_multiplier() - get the global QDF timer scalar value + * + * Return: the global QDF timer scalar value + */ +uint32_t qdf_timer_get_multiplier(void); + +#endif /* __QDF_MC_TIMER_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_mem.h b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_mem.h new file mode 100644 index 0000000000000000000000000000000000000000..f3ff81606b9b36919556219e09d79a9521461c27 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_mem.h @@ -0,0 +1,641 @@ +/* + * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: qdf_mem + * QCA driver framework (QDF) memory management APIs + */ + +#if !defined(__QDF_MEMORY_H) +#define __QDF_MEMORY_H + +/* Include Files */ +#include +#include + +#define QDF_CACHE_LINE_SZ __qdf_cache_line_sz + +/** + * qdf_align() - align to the given size. + * @a: input that needs to be aligned. + * @align_size: boundary on which 'a' has to be alinged. + * + * Return: aligned value. + */ +#define qdf_align(a, align_size) __qdf_align(a, align_size) + +/** + * struct qdf_mem_dma_page_t - Allocated dmaable page + * @page_v_addr_start: Page start virtual address + * @page_v_addr_end: Page end virtual address + * @page_p_addr: Page start physical address + */ +struct qdf_mem_dma_page_t { + char *page_v_addr_start; + char *page_v_addr_end; + qdf_dma_addr_t page_p_addr; +}; + +/** + * struct qdf_mem_multi_page_t - multiple page allocation information storage + * @num_element_per_page: Number of element in single page + * @num_pages: Number of allocation needed pages + * @dma_pages: page information storage in case of coherent memory + * @cacheable_pages: page information storage in case of cacheable memory + */ +struct qdf_mem_multi_page_t { + uint16_t num_element_per_page; + uint16_t num_pages; + struct qdf_mem_dma_page_t *dma_pages; + void **cacheable_pages; +}; + + +/* Preprocessor definitions and constants */ + +typedef __qdf_mempool_t qdf_mempool_t; + +/** + * qdf_mem_init() - Initialize QDF memory module + * + * Return: None + * + */ +void qdf_mem_init(void); + +/** + * qdf_mem_exit() - Exit QDF memory module + * + * Return: None + * + */ +void qdf_mem_exit(void); + +#define QDF_MEM_FILE_NAME_SIZE 48 + +#ifdef MEMORY_DEBUG +/** + * qdf_mem_malloc_debug() - debug version of QDF memory allocation API + * @size: Number of bytes of memory to allocate. + * @file: File name of the call site + * @line: Line number of the call site + * @caller: Address of the caller function + * @@flag: GFP flag + * + * This function will dynamicallly allocate the specified number of bytes of + * memory and add it to the qdf tracking list to check for memory leaks and + * corruptions + * + * Return: A valid memory location on success, or NULL on failure + */ +void *qdf_mem_malloc_debug(size_t size, const char *file, uint32_t line, + void *caller, uint32_t flag); + +#define qdf_mem_malloc(size) \ + qdf_mem_malloc_debug(size, __FILE__, __LINE__, QDF_RET_IP, 0) + +#define qdf_mem_malloc_atomic(size) \ + qdf_mem_malloc_debug(size, __FILE__, __LINE__, QDF_RET_IP, GFP_ATOMIC) +/** + * qdf_mem_free_debug() - debug version of qdf_mem_free + * @ptr: Pointer to the starting address of the memory to be freed. + * + * This function will free the memory pointed to by 'ptr'. It also checks for + * memory corruption, underrun, overrun, double free, domain mismatch, etc. + * + * Return: none + */ +void qdf_mem_free_debug(void *ptr, const char *file, uint32_t line); + +#define qdf_mem_free(ptr) \ + qdf_mem_free_debug(ptr, __FILE__, __LINE__) + +/** + * qdf_mem_check_for_leaks() - Assert that the current memory domain is empty + * + * Call this to ensure there are no active memory allocations being tracked + * against the current debug domain. For example, one should call this function + * immediately before a call to qdf_debug_domain_set() as a memory leak + * detection mechanism. + * + * e.g. + * qdf_debug_domain_set(QDF_DEBUG_DOMAIN_ACTIVE); + * + * ... + * + * // memory is allocated and freed + * + * ... + * + * // before transitioning back to inactive state, + * // make sure all active memory has been freed + * qdf_mem_check_for_leaks(); + * qdf_debug_domain_set(QDF_DEBUG_DOMAIN_INIT); + * + * ... + * + * // also, before program exit, make sure init time memory is freed + * qdf_mem_check_for_leaks(); + * exit(); + * + * Return: None + */ +void qdf_mem_check_for_leaks(void); + +/** + * qdf_mem_alloc_consistent_debug() - allocates consistent qdf memory + * @osdev: OS device handle + * @dev: Pointer to device handle + * @size: Size to be allocated + * @paddr: Physical address + * @file: file name of the call site + * @line: line numbe rof the call site + * @caller: Address of the caller function + * @flag: GFP flag + * + * Return: pointer of allocated memory or null if memory alloc fails + */ +void *qdf_mem_alloc_consistent_debug(qdf_device_t osdev, void *dev, + qdf_size_t size, qdf_dma_addr_t *paddr, + const char *file, uint32_t line, + void *caller); + +#define qdf_mem_alloc_consistent(osdev, dev, size, paddr) \ + qdf_mem_alloc_consistent_debug(osdev, dev, size, paddr, \ + __FILE__, __LINE__, QDF_RET_IP) + +/** + * qdf_mem_free_consistent_debug() - free consistent qdf memory + * @osdev: OS device handle + * @size: Size to be allocated + * @vaddr: virtual address + * @paddr: Physical address + * @memctx: Pointer to DMA context + * @file: file name of the call site + * @line: line numbe rof the call site + * + * Return: none + */ +void qdf_mem_free_consistent_debug(qdf_device_t osdev, void *dev, + qdf_size_t size, void *vaddr, + qdf_dma_addr_t paddr, + qdf_dma_context_t memctx, + const char *file, uint32_t line); + +#define qdf_mem_free_consistent(osdev, dev, size, vaddr, paddr, memctx) \ + qdf_mem_free_consistent_debug(osdev, dev, size, vaddr, paddr, memctx, \ + __FILE__, __LINE__) +#else +void *qdf_mem_malloc(qdf_size_t size); +void *qdf_mem_malloc_atomic(qdf_size_t size); + +/** + * qdf_mem_free() - free QDF memory + * @ptr: Pointer to the starting address of the memory to be freed. + * + * Return: None + */ +void qdf_mem_free(void *ptr); + +static inline void qdf_mem_check_for_leaks(void) { } + +void *qdf_mem_alloc_consistent(qdf_device_t osdev, void *dev, + qdf_size_t size, qdf_dma_addr_t *paddr); + +void qdf_mem_free_consistent(qdf_device_t osdev, void *dev, + qdf_size_t size, void *vaddr, + qdf_dma_addr_t paddr, qdf_dma_context_t memctx); + +#endif /* MEMORY_DEBUG */ + +void *qdf_mem_alloc_outline(qdf_device_t osdev, qdf_size_t size); + +void qdf_mem_set(void *ptr, uint32_t num_bytes, uint32_t value); + +void qdf_mem_zero(void *ptr, uint32_t num_bytes); + +void qdf_mem_copy(void *dst_addr, const void *src_addr, uint32_t num_bytes); + +void qdf_mem_move(void *dst_addr, const void *src_addr, uint32_t num_bytes); + +void qdf_mem_free_outline(void *buf); + +void qdf_mem_zero_outline(void *buf, qdf_size_t size); + +void qdf_ether_addr_copy(void *dst_addr, const void *src_addr); + +/** + * qdf_mem_cmp() - memory compare + * @memory1: pointer to one location in memory to compare. + * @memory2: pointer to second location in memory to compare. + * @num_bytes: the number of bytes to compare. + * + * Function to compare two pieces of memory, similar to memcmp function + * in standard C. + * Return: + * int32_t - returns an int value that tells if the memory + * locations are equal or not equal. + * 0 -- equal + * < 0 -- *memory1 is less than *memory2 + * > 0 -- *memory1 is bigger than *memory2 + */ +static inline int32_t qdf_mem_cmp(const void *memory1, const void *memory2, + uint32_t num_bytes) +{ + return __qdf_mem_cmp(memory1, memory2, num_bytes); +} + +/** + * qdf_mem_map_nbytes_single - Map memory for DMA + * @osdev: pomter OS device context + * @buf: pointer to memory to be dma mapped + * @dir: DMA map direction + * @nbytes: number of bytes to be mapped. + * @phy_addr: ponter to recive physical address. + * + * Return: success/failure + */ +static inline uint32_t qdf_mem_map_nbytes_single(qdf_device_t osdev, void *buf, + qdf_dma_dir_t dir, int nbytes, + qdf_dma_addr_t *phy_addr) +{ +#if defined(HIF_PCI) + return __qdf_mem_map_nbytes_single(osdev, buf, dir, nbytes, phy_addr); +#else + return 0; +#endif +} + +/** + * qdf_mem_unmap_nbytes_single() - un_map memory for DMA + * @osdev: pomter OS device context + * @phy_addr: physical address of memory to be dma unmapped + * @dir: DMA unmap direction + * @nbytes: number of bytes to be unmapped. + * + * Return: none + */ +static inline void qdf_mem_unmap_nbytes_single(qdf_device_t osdev, + qdf_dma_addr_t phy_addr, + qdf_dma_dir_t dir, + int nbytes) +{ +#if defined(HIF_PCI) + __qdf_mem_unmap_nbytes_single(osdev, phy_addr, dir, nbytes); +#endif +} + +/** + * qdf_mempool_init - Create and initialize memory pool + * @osdev: platform device object + * @pool_addr: address of the pool created + * @elem_cnt: no. of elements in pool + * @elem_size: size of each pool element in bytes + * @flags: flags + * Return: Handle to memory pool or NULL if allocation failed + */ +static inline int qdf_mempool_init(qdf_device_t osdev, + qdf_mempool_t *pool_addr, int elem_cnt, + size_t elem_size, uint32_t flags) +{ + return __qdf_mempool_init(osdev, pool_addr, elem_cnt, elem_size, + flags); +} + +/** + * qdf_mempool_destroy - Destroy memory pool + * @osdev: platform device object + * @Handle: to memory pool + * Return: none + */ +static inline void qdf_mempool_destroy(qdf_device_t osdev, qdf_mempool_t pool) +{ + __qdf_mempool_destroy(osdev, pool); +} + +/** + * qdf_mempool_alloc - Allocate an element memory pool + * @osdev: platform device object + * @Handle: to memory pool + * Return: Pointer to the allocated element or NULL if the pool is empty + */ +static inline void *qdf_mempool_alloc(qdf_device_t osdev, qdf_mempool_t pool) +{ + return (void *)__qdf_mempool_alloc(osdev, pool); +} + +/** + * qdf_mempool_free - Free a memory pool element + * @osdev: Platform device object + * @pool: Handle to memory pool + * @buf: Element to be freed + * Return: none + */ +static inline void qdf_mempool_free(qdf_device_t osdev, qdf_mempool_t pool, + void *buf) +{ + __qdf_mempool_free(osdev, pool, buf); +} + +void qdf_mem_dma_sync_single_for_device(qdf_device_t osdev, + qdf_dma_addr_t bus_addr, + qdf_size_t size, + __dma_data_direction direction); + +void qdf_mem_dma_sync_single_for_cpu(qdf_device_t osdev, + qdf_dma_addr_t bus_addr, + qdf_size_t size, + __dma_data_direction direction); + +void qdf_mem_multi_pages_alloc(qdf_device_t osdev, + struct qdf_mem_multi_page_t *pages, + size_t element_size, uint16_t element_num, + qdf_dma_context_t memctxt, bool cacheable); +void qdf_mem_multi_pages_free(qdf_device_t osdev, + struct qdf_mem_multi_page_t *pages, + qdf_dma_context_t memctxt, bool cacheable); +int qdf_mem_multi_page_link(qdf_device_t osdev, + struct qdf_mem_multi_page_t *pages, + uint32_t elem_size, uint32_t elem_count, uint8_t cacheable); +/** + * qdf_mem_skb_inc() - increment total skb allocation size + * @size: size to be added + * + * Return: none + */ +void qdf_mem_skb_inc(qdf_size_t size); + +/** + * qdf_mem_skb_dec() - decrement total skb allocation size + * @size: size to be decremented + * + * Return: none + */ +void qdf_mem_skb_dec(qdf_size_t size); + +/** + * qdf_mem_map_table_alloc() - Allocate shared memory info structure + * @num: number of required storage + * + * Allocate mapping table for DMA memory allocation. This is needed for + * IPA-WLAN buffer sharing when SMMU Stage1 Translation is enabled. + * + * Return: shared memory info storage table pointer + */ +static inline qdf_mem_info_t *qdf_mem_map_table_alloc(uint32_t num) +{ + qdf_mem_info_t *mem_info_arr; + + mem_info_arr = qdf_mem_malloc(num * sizeof(mem_info_arr[0])); + return mem_info_arr; +} + +/** + * qdf_update_mem_map_table() - Update DMA memory map info + * @osdev: Parent device instance + * @mem_info: Pointer to shared memory information + * @dma_addr: dma address + * @mem_size: memory size allocated + * + * Store DMA shared memory information + * + * Return: none + */ +static inline void qdf_update_mem_map_table(qdf_device_t osdev, + qdf_mem_info_t *mem_info, + qdf_dma_addr_t dma_addr, + uint32_t mem_size) +{ + if (!mem_info) { + __qdf_print("%s: NULL mem_info\n", __func__); + return; + } + + __qdf_update_mem_map_table(osdev, mem_info, dma_addr, mem_size); +} + +/** + * qdf_mem_smmu_s1_enabled() - Return SMMU stage 1 translation enable status + * @osdev parent device instance + * + * Return: true if smmu s1 enabled, false if smmu s1 is bypassed + */ +static inline bool qdf_mem_smmu_s1_enabled(qdf_device_t osdev) +{ + return __qdf_mem_smmu_s1_enabled(osdev); +} + +/** + * qdf_mem_paddr_from_dmaaddr() - get actual physical address from dma address + * @osdev: Parent device instance + * @dma_addr: DMA/IOVA address + * + * Get actual physical address from dma_addr based on SMMU enablement status. + * IF SMMU Stage 1 tranlation is enabled, DMA APIs return IO virtual address + * (IOVA) otherwise returns physical address. So get SMMU physical address + * mapping from IOVA. + * + * Return: dmaable physical address + */ +static inline qdf_dma_addr_t qdf_mem_paddr_from_dmaaddr(qdf_device_t osdev, + qdf_dma_addr_t dma_addr) +{ + return __qdf_mem_paddr_from_dmaaddr(osdev, dma_addr); +} + +/** + * qdf_mem_dma_get_sgtable() - Returns DMA memory scatter gather table + * @dev: device instace + * @sgt: scatter gather table pointer + * @cpu_addr: HLOS virtual address + * @dma_addr: dma address + * @size: allocated memory size + * + * Return: physical address + */ +static inline int +qdf_mem_dma_get_sgtable(struct device *dev, void *sgt, void *cpu_addr, + qdf_dma_addr_t dma_addr, size_t size) +{ + return __qdf_os_mem_dma_get_sgtable(dev, sgt, cpu_addr, dma_addr, size); +} + +/** + * qdf_mem_free_sgtable() - Free a previously allocated sg table + * @sgt: the mapped sg table header + * + * Return: None + */ +static inline void +qdf_mem_free_sgtable(struct sg_table *sgt) +{ + __qdf_os_mem_free_sgtable(sgt); +} + +/** + * qdf_dma_get_sgtable_dma_addr() - Assigns DMA address to scatterlist elements + * @sgt: scatter gather table pointer + * + * Return: None + */ +static inline void +qdf_dma_get_sgtable_dma_addr(struct sg_table *sgt) +{ + __qdf_dma_get_sgtable_dma_addr(sgt); +} + +/** + * qdf_mem_get_dma_addr() - Return dma address based on SMMU translation status. + * @osdev: Parent device instance + * @mem_info: Pointer to allocated memory information + * + * Get dma address based on SMMU enablement status. If SMMU Stage 1 + * tranlation is enabled, DMA APIs return IO virtual address otherwise + * returns physical address. + * + * Return: dma address + */ +static inline qdf_dma_addr_t qdf_mem_get_dma_addr(qdf_device_t osdev, + qdf_mem_info_t *mem_info) +{ + return __qdf_mem_get_dma_addr(osdev, mem_info); +} + +/** + * qdf_mem_get_dma_addr_ptr() - Return DMA address pointer from mem info struct + * @osdev: Parent device instance + * @mem_info: Pointer to allocated memory information + * + * Based on smmu stage 1 translation enablement, return corresponding dma + * address storage pointer. + * + * Return: dma address storage pointer + */ +static inline qdf_dma_addr_t *qdf_mem_get_dma_addr_ptr(qdf_device_t osdev, + qdf_mem_info_t *mem_info) +{ + return __qdf_mem_get_dma_addr_ptr(osdev, mem_info); +} + + +/** + * qdf_mem_get_dma_size() - Return DMA memory size + * @osdev: parent device instance + * @mem_info: Pointer to allocated memory information + * + * Return: DMA memory size + */ +static inline uint32_t +qdf_mem_get_dma_size(qdf_device_t osdev, + qdf_mem_info_t *mem_info) +{ + return __qdf_mem_get_dma_size(osdev, mem_info); +} + +/** + * qdf_mem_set_dma_size() - Set DMA memory size + * @osdev: parent device instance + * @mem_info: Pointer to allocated memory information + * @mem_size: memory size allocated + * + * Return: none + */ +static inline void +qdf_mem_set_dma_size(qdf_device_t osdev, + qdf_mem_info_t *mem_info, + uint32_t mem_size) +{ + __qdf_mem_set_dma_size(osdev, mem_info, mem_size); +} + +/** + * qdf_mem_get_dma_size() - Return DMA physical address + * @osdev: parent device instance + * @mem_info: Pointer to allocated memory information + * + * Return: DMA physical address + */ +static inline qdf_dma_addr_t +qdf_mem_get_dma_pa(qdf_device_t osdev, + qdf_mem_info_t *mem_info) +{ + return __qdf_mem_get_dma_pa(osdev, mem_info); +} + +/** + * qdf_mem_set_dma_size() - Set DMA physical address + * @osdev: parent device instance + * @mem_info: Pointer to allocated memory information + * @dma_pa: DMA phsical address + * + * Return: none + */ +static inline void +qdf_mem_set_dma_pa(qdf_device_t osdev, + qdf_mem_info_t *mem_info, + qdf_dma_addr_t dma_pa) +{ + __qdf_mem_set_dma_pa(osdev, mem_info, dma_pa); +} + +/** + * qdf_mem_shared_mem_alloc() - Allocate DMA memory for shared resource + * @osdev: parent device instance + * @mem_info: Pointer to allocated memory information + * @size: size to be allocated + * + * Allocate DMA memory which will be shared with external kernel module. This + * information is needed for SMMU mapping. + * + * Return: 0 success + */ +qdf_shared_mem_t *qdf_mem_shared_mem_alloc(qdf_device_t osdev, uint32_t size); + +/** + * qdf_mem_shared_mem_free() - Free shared memory + * @osdev: parent device instance + * @shared_mem: shared memory information storage + * + * Free DMA shared memory resource + * + * Return: None + */ +static inline void qdf_mem_shared_mem_free(qdf_device_t osdev, + qdf_shared_mem_t *shared_mem) +{ + if (!shared_mem) { + __qdf_print("%s: NULL shared mem struct passed\n", + __func__); + return; + } + + if (shared_mem->vaddr) { + qdf_mem_free_consistent(osdev, osdev->dev, + qdf_mem_get_dma_size(osdev, + &shared_mem->mem_info), + shared_mem->vaddr, + qdf_mem_get_dma_addr(osdev, + &shared_mem->mem_info), + qdf_get_dma_mem_context(shared_mem, + memctx)); + } + qdf_mem_free_sgtable(&shared_mem->sgtable); + qdf_mem_free(shared_mem); +} + +#endif /* __QDF_MEMORY_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_module.h b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_module.h new file mode 100644 index 0000000000000000000000000000000000000000..4ff235bb6a6315be7a5963558889bdec11cf4df9 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_module.h @@ -0,0 +1,65 @@ +/* + * Copyright (c) 2014-2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * @file qdf_module.h + * This file abstracts "kernel module" semantics. + */ + +#ifndef _QDF_MODULE_H +#define _QDF_MODULE_H + +#include + +typedef uint32_t (*module_init_func_t)(void); + +/** + * qdf_virt_module_init - Specify the module's entry point. + */ +#define qdf_virt_module_init(_mod_init_func) \ + __qdf_virt_module_init(_mod_init_func) + +/** + * qdf_virt_module_exit - Specify the module's exit point. + */ +#define qdf_virt_module_exit(_mod_exit_func) \ + __qdf_virt_module_exit(_mod_exit_func) + +/** + * qdf_virt_module_name - Specify the module's name. + */ +#define qdf_virt_module_name(_name) __qdf_virt_module_name(_name) + + +/** + * qdf_export_symbol - Export a symbol from a module. + */ +#define qdf_export_symbol(_sym) __qdf_export_symbol(_sym) + +/** + * qdf_declare_param - Declare a module parameter. + */ +#define qdf_declare_param(name, _type) __qdf_declare_param(name, _type) + +/** + * qdf_declare_param_array - Declare a module parameter. + */ +#define qdf_declare_param_array(name, _type, _num) \ + __qdf_declare_param_array(name, _type, _num) + +#endif /*_QDF_MODULE_H*/ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_nbuf.h b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_nbuf.h new file mode 100644 index 0000000000000000000000000000000000000000..f855d7d12b9759937972a317fbf2e0a77ce18e10 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_nbuf.h @@ -0,0 +1,3058 @@ +/* + * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: qdf_nbuf_public network buffer API + * This file defines the network buffer abstraction. + */ + +#ifndef _QDF_NBUF_H +#define _QDF_NBUF_H + +#include +#include +#include +#include +#include +#include + +#define IPA_NBUF_OWNER_ID 0xaa55aa55 +#define QDF_NBUF_PKT_TRAC_TYPE_EAPOL 0x02 +#define QDF_NBUF_PKT_TRAC_TYPE_DHCP 0x04 +#define QDF_NBUF_PKT_TRAC_TYPE_MGMT_ACTION 0x08 +#define QDF_NBUF_PKT_TRAC_TYPE_ARP 0x10 +#define QDF_NBUF_PKT_TRAC_TYPE_ICMP 0x20 +#define QDF_NBUF_PKT_TRAC_TYPE_ICMPv6 0x40 + +#define QDF_NBUF_PKT_TRAC_MAX_STRING 12 +#define QDF_NBUF_PKT_TRAC_PROTO_STRING 4 +#define QDF_NBUF_PKT_ERROR 1 + +#define QDF_NBUF_TRAC_IPV4_OFFSET 14 +#define QDF_NBUF_TRAC_IPV4_HEADER_SIZE 20 +#define QDF_NBUF_TRAC_DHCP_SRV_PORT 67 +#define QDF_NBUF_TRAC_DHCP_CLI_PORT 68 +#define QDF_NBUF_TRAC_ETH_TYPE_OFFSET 12 +#define QDF_NBUF_TRAC_EAPOL_ETH_TYPE 0x888E +#define QDF_NBUF_TRAC_WAPI_ETH_TYPE 0x88b4 +#define QDF_NBUF_TRAC_ARP_ETH_TYPE 0x0806 +#define QDF_NBUF_TRAC_TDLS_ETH_TYPE 0x890D +#define QDF_NBUF_TRAC_IPV4_ETH_TYPE 0x0800 +#define QDF_NBUF_TRAC_IPV6_ETH_TYPE 0x86dd +#define QDF_NBUF_DEST_MAC_OFFSET 0 +#define QDF_NBUF_SRC_MAC_OFFSET 6 +#define QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET 23 +#define QDF_NBUF_TRAC_IPV4_DEST_ADDR_OFFSET 30 +#define QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET 20 +#define QDF_NBUF_TRAC_IPV4_ADDR_MCAST_MASK 0xE0000000 +#define QDF_NBUF_TRAC_IPV4_ADDR_BCAST_MASK 0xF0000000 +#define QDF_NBUF_TRAC_IPV6_DEST_ADDR_OFFSET 38 +#define QDF_NBUF_TRAC_IPV6_DEST_ADDR 0xFF00 +#define QDF_NBUF_TRAC_IPV6_OFFSET 14 +#define QDF_NBUF_TRAC_IPV6_HEADER_SIZE 40 +#define QDF_NBUF_TRAC_ICMP_TYPE 1 +#define QDF_NBUF_TRAC_TCP_TYPE 6 +#define QDF_NBUF_TRAC_UDP_TYPE 17 +#define QDF_NBUF_TRAC_ICMPV6_TYPE 0x3a +#define QDF_NBUF_TRAC_DHCP6_SRV_PORT 547 +#define QDF_NBUF_TRAC_DHCP6_CLI_PORT 546 + +/* EAPOL Related MASK */ +#define EAPOL_PACKET_TYPE_OFFSET 15 +#define EAPOL_KEY_INFO_OFFSET 19 +#define EAPOL_PKT_LEN_OFFSET 16 +#define EAPOL_KEY_LEN_OFFSET 21 +#define EAPOL_MASK 0x8013 +#define EAPOL_M1_BIT_MASK 0x8000 +#define EAPOL_M2_BIT_MASK 0x0001 +#define EAPOL_M3_BIT_MASK 0x8013 +#define EAPOL_M4_BIT_MASK 0x0003 + +/* ARP Related MASK */ +#define QDF_NBUF_PKT_ARP_OPCODE_OFFSET 20 +#define QDF_NBUF_PKT_ARPOP_REQ 1 +#define QDF_NBUF_PKT_ARPOP_REPLY 2 +#define QDF_NBUF_PKT_ARP_SRC_IP_OFFSET 28 +#define QDF_NBUF_PKT_ARP_TGT_IP_OFFSET 38 + +/* ICMPv4 Related MASK */ +#define QDF_NBUF_PKT_ICMPv4_OPCODE_OFFSET 34 +#define QDF_NBUF_PKT_ICMPv4OP_REQ 0x08 +#define QDF_NBUF_PKT_ICMPv4OP_REPLY 0x00 +#define QDF_NBUF_PKT_ICMPv4_SRC_IP_OFFSET 26 +#define QDF_NBUF_PKT_ICMPv4_TGT_IP_OFFSET 30 + +/* TCP Related MASK */ +#define QDF_NBUF_PKT_TCP_OPCODE_OFFSET 47 +#define QDF_NBUF_PKT_TCPOP_SYN 0x02 +#define QDF_NBUF_PKT_TCPOP_SYN_ACK 0x12 +#define QDF_NBUF_PKT_TCPOP_ACK 0x10 +#define QDF_NBUF_PKT_TCP_SRC_PORT_OFFSET 34 +#define QDF_NBUF_PKT_TCP_DST_PORT_OFFSET 36 + +/* DNS Related MASK */ +#define QDF_NBUF_PKT_DNS_OVER_UDP_OPCODE_OFFSET 44 +#define QDF_NBUF_PKT_DNSOP_BITMAP 0xF800 +#define QDF_NBUF_PKT_DNSOP_STANDARD_QUERY 0x0000 +#define QDF_NBUF_PKT_DNSOP_STANDARD_RESPONSE 0x8000 +#define QDF_NBUF_PKT_DNS_SRC_PORT_OFFSET 34 +#define QDF_NBUF_PKT_DNS_DST_PORT_OFFSET 36 +#define QDF_NBUF_PKT_DNS_NAME_OVER_UDP_OFFSET 54 +#define QDF_NBUF_PKT_DNS_STANDARD_PORT 53 + +/* Tracked Packet types */ +#define QDF_NBUF_TX_PKT_INVALID 0 +#define QDF_NBUF_TX_PKT_DATA_TRACK 1 +#define QDF_NBUF_TX_PKT_MGMT_TRACK 2 +#define QDF_NBUF_RX_PKT_DATA_TRACK 3 + +/* Different Packet states */ +#define QDF_NBUF_TX_PKT_HDD 1 +#define QDF_NBUF_TX_PKT_TXRX_ENQUEUE 2 +#define QDF_NBUF_TX_PKT_TXRX_DEQUEUE 3 +#define QDF_NBUF_TX_PKT_TXRX 4 +#define QDF_NBUF_TX_PKT_HTT 5 +#define QDF_NBUF_TX_PKT_HTC 6 +#define QDF_NBUF_TX_PKT_HIF 7 +#define QDF_NBUF_TX_PKT_CE 8 +#define QDF_NBUF_TX_PKT_FREE 9 +#define QDF_NBUF_TX_PKT_STATE_MAX 10 +#define QDF_NBUF_TX_PKT_LI_DP 11 + +/* qdf_nbuf allocate and map max retry threshold when failed */ +#define QDF_NBUF_ALLOC_MAP_RETRY_THRESHOLD 20 + +/* Enable flag to print TSO specific prints in datapath */ +#ifdef TSO_DEBUG_LOG_ENABLE +#define TSO_DEBUG(fmt, args ...) \ + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_NONE, \ + fmt, ## args) +#else +#define TSO_DEBUG(fmt, args ...) +#endif + +#define IEEE80211_AMPDU_FLAG 0x01 + +#ifdef GET_MSDU_AGGREGATION +#define IEEE80211_AMSDU_FLAG 0x02 +#endif + +/** + * struct mon_rx_status - This will have monitor mode rx_status extracted from + * htt_rx_desc used later to update radiotap information. + * @tsft: Time Synchronization Function timer + * @preamble_type: Preamble type in radio header + * @chan_freq: Capture channel frequency + * @chan_num: Capture channel number + * @chan_flags: Bitmap of Channel flags, IEEE80211_CHAN_TURBO, + * IEEE80211_CHAN_CCK... + * @ht_flags: HT flags, only present for HT frames. + * @vht_flags: VHT flags, only present for VHT frames. + * @vht_flag_values1-5: Contains corresponding data for flags field + * @he_flags: HE (11ax) flags, only present in HE frames + * @he_mu_flags: HE-MU (11ax) flags, only present in HE frames + * @he_mu_other_flags: HE-MU-OTHER (11ax) flags, only present in HE frames + * @he_sig_A1_known: HE (11ax) sig A1 known field + * @he_sig_A2_known: HE (11ax) sig A2 known field + * @he_sig_b_common: HE (11ax) sig B common field + * @he_sig_b_common_known: HE (11ax) sig B common known field + * @rate: Rate in terms 500Kbps + * @rtap_flags: Bit map of available fields in the radiotap + * @ant_signal_db: Rx packet RSSI + * @nr_ant: Number of Antennas used for streaming + * @mcs: MCS index of Rx frame + * @nss: Number of spatial streams + * @bw: bandwidth of rx frame + * @is_stbc: Is STBC enabled + * @sgi: Rx frame short guard interval + * @he_re: HE range extension + * @ldpc: ldpc enabled + * @beamformed: Is frame beamformed. + * @he_sig_b_common_RU[4]: HE (11ax) common RU assignment index + * @rssi_comb: Combined RSSI + * @duration: 802.11 Duration + * @first_data_seq_ctrl: Sequence ctrl field of first data frame + * @ast_index: AST table hash index + * @tid: QoS traffic tid number + * @rs_fcs_err: FCS error flag + * @rs_flags: Flags to indicate AMPDU or AMSDU aggregation + * @cck_flag: Flag to indicate CCK modulation + * @ofdm_flag: Flag to indicate OFDM modulation + * @he_per_user_1: HE per user info1 + * @he_per_user_2: HE per user info2 + * @he_per_user_position: HE per user position info + * @he_per_user_known: HE per user known info + * @he_flags1: HE flags + * @he_flags2: HE flags + * @he_RU[4]: HE RU assignment index + * @he_data1: HE property of received frame + * @he_data2: HE property of received frame + * @he_data3: HE property of received frame + * @he_data4: HE property of received frame + * @he_data5: HE property of received frame + * @prev_ppdu_id: ppdu_id in previously received message + * @ppdu_id: Id of the PLCP protocol data unit + */ +struct mon_rx_status { + uint64_t tsft; + uint32_t preamble_type; + uint16_t chan_freq; + uint16_t chan_num; + uint16_t chan_flags; + uint16_t ht_flags; + uint16_t vht_flags; + uint16_t vht_flag_values6; + uint16_t he_flags; + uint16_t he_mu_flags; + uint16_t he_mu_other_flags; + uint16_t he_sig_A1_known; + uint16_t he_sig_A2_known; + uint16_t he_sig_b_common; + uint16_t he_sig_b_common_known; + uint8_t rate; + uint8_t rtap_flags; + uint8_t ant_signal_db; + uint8_t nr_ant; + uint8_t mcs; + uint8_t nss; + uint16_t tcp_msdu_count; + uint16_t udp_msdu_count; + uint16_t other_msdu_count; + uint8_t bw; + uint8_t vht_flag_values1; + uint8_t vht_flag_values2; + uint8_t vht_flag_values3[4]; + uint8_t vht_flag_values4; + uint8_t vht_flag_values5; + uint8_t is_stbc; + uint8_t sgi; + uint8_t he_re; + uint8_t ldpc; + uint8_t beamformed; + uint8_t he_sig_b_common_RU[4]; + int8_t rssi_comb; + uint8_t reception_type; + uint16_t duration; + uint8_t frame_control_info_valid; + int16_t first_data_seq_ctrl; + uint32_t ast_index; + uint32_t tid; + uint8_t rs_fcs_err; + uint8_t rs_flags; + uint8_t cck_flag; + uint8_t ofdm_flag; + /* New HE radiotap fields */ + uint16_t he_per_user_1; + uint16_t he_per_user_2; + uint8_t he_per_user_position; + uint8_t he_per_user_known; + uint16_t he_flags1; + uint16_t he_flags2; + uint8_t he_RU[4]; + uint16_t he_data1; + uint16_t he_data2; + uint16_t he_data3; + uint16_t he_data4; + uint16_t he_data5; + uint16_t he_data6; + uint32_t ppdu_len; + uint32_t prev_ppdu_id; + uint32_t ppdu_id; +}; + +/* Masks for HE SIG known fields in mon_rx_status structure */ +#define QDF_MON_STATUS_HE_SIG_B_COMMON_KNOWN_RU0 0x00000001 +#define QDF_MON_STATUS_HE_SIG_B_COMMON_KNOWN_RU1 0x00000002 +#define QDF_MON_STATUS_HE_SIG_B_COMMON_KNOWN_RU2 0x00000004 +#define QDF_MON_STATUS_HE_SIG_B_COMMON_KNOWN_RU3 0x00000008 +#define QDF_MON_STATUS_HE_SIG_B_USER_KNOWN_SIG_B_ALL 0x00fe0000 +#define QDF_MON_STATUS_HE_SIG_A1_HE_FORMAT_SU 0x00000000 +#define QDF_MON_STATUS_HE_SIG_A1_HE_FORMAT_EXT_SU 0x40000000 +#define QDF_MON_STATUS_HE_SIG_A1_HE_FORMAT_TRIG 0xc0000000 + +/* DHCP Related Mask */ +#define QDF_DHCP_OPTION53 (0x35) +#define QDF_DHCP_OPTION53_LENGTH (1) +#define QDF_DHCP_OPTION53_OFFSET (0x11A) +#define QDF_DHCP_OPTION53_LENGTH_OFFSET (0x11B) +#define QDF_DHCP_OPTION53_STATUS_OFFSET (0x11C) +#define DHCP_PKT_LEN_OFFSET 16 +#define DHCP_TRANSACTION_ID_OFFSET 46 +#define QDF_DHCP_DISCOVER (1) +#define QDF_DHCP_OFFER (2) +#define QDF_DHCP_REQUEST (3) +#define QDF_DHCP_DECLINE (4) +#define QDF_DHCP_ACK (5) +#define QDF_DHCP_NAK (6) +#define QDF_DHCP_RELEASE (7) +#define QDF_DHCP_INFORM (8) + +/* ARP Related Mask */ +#define ARP_SUB_TYPE_OFFSET 20 +#define ARP_REQUEST (1) +#define ARP_RESPONSE (2) + +/* IPV4 header fields offset values */ +#define IPV4_PKT_LEN_OFFSET 16 +#define IPV4_TCP_SEQ_NUM_OFFSET 38 +#define IPV4_SRC_ADDR_OFFSET 26 +#define IPV4_DST_ADDR_OFFSET 30 +#define IPV4_SRC_PORT_OFFSET 34 +#define IPV4_DST_PORT_OFFSET 36 + +/* IPV4 ICMP Related Mask */ +#define ICMP_SEQ_NUM_OFFSET 40 +#define ICMP_SUBTYPE_OFFSET 34 +#define ICMP_REQUEST 0x08 +#define ICMP_RESPONSE 0x00 + +#define IPV6_ADDR_STR "%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:"\ + "%02x%02x:%02x%02x" + +/* IPV6 header fields offset values */ +#define IPV6_PKT_LEN_OFFSET 18 +#define IPV6_TCP_SEQ_NUM_OFFSET 58 +#define IPV6_SRC_ADDR_OFFSET 22 +#define IPV6_DST_ADDR_OFFSET 38 +#define IPV6_SRC_PORT_OFFSET 54 +#define IPV6_DST_PORT_OFFSET 56 + +/* IPV6 ICMPV6 Related Mask */ +#define ICMPV6_SEQ_NUM_OFFSET 60 +#define ICMPV6_SUBTYPE_OFFSET 54 +#define ICMPV6_REQUEST 0x80 +#define ICMPV6_RESPONSE 0x81 +#define ICMPV6_RS 0x85 +#define ICMPV6_RA 0x86 +#define ICMPV6_NS 0x87 +#define ICMPV6_NA 0x88 + +#define QDF_NBUF_IPA_CHECK_MASK 0x80000000 + +/* HE Radiotap data1 Mask */ +#define QDF_MON_STATUS_HE_SU_FORMAT_TYPE 0x0000 +#define QDF_MON_STATUS_HE_EXT_SU_FORMAT_TYPE 0x0001 +#define QDF_MON_STATUS_HE_MU_FORMAT_TYPE 0x0002 +#define QDF_MON_STATUS_HE_TRIG_FORMAT_TYPE 0x0003 + + +#define QDF_MON_STATUS_HE_BEAM_CHANGE_KNOWN 0x0008 +#define QDF_MON_STATUS_HE_DL_UL_KNOWN 0x0010 +#define QDF_MON_STATUS_HE_MCS_KNOWN 0x0020 +#define QDF_MON_STATUS_HE_DCM_KNOWN 0x0040 +#define QDF_MON_STATUS_HE_CODING_KNOWN 0x0080 +#define QDF_MON_STATUS_HE_LDPC_EXTRA_SYMBOL_KNOWN 0x0100 +#define QDF_MON_STATUS_HE_STBC_KNOWN 0x0200 +#define QDF_MON_STATUS_HE_DATA_BW_RU_KNOWN 0x4000 +#define QDF_MON_STATUS_HE_DOPPLER_KNOWN 0x8000 +#define QDF_MON_STATUS_HE_BSS_COLOR_KNOWN 0x0004 + +/* HE Radiotap data2 Mask */ +#define QDF_MON_STATUS_HE_GI_KNOWN 0x0002 +#define QDF_MON_STATUS_TXBF_KNOWN 0x0010 +#define QDF_MON_STATUS_PE_DISAMBIGUITY_KNOWN 0x0020 +#define QDF_MON_STATUS_TXOP_KNOWN 0x0040 +#define QDF_MON_STATUS_LTF_SYMBOLS_KNOWN 0x0004 +#define QDF_MON_STATUS_PRE_FEC_PADDING_KNOWN 0x0008 +#define QDF_MON_STATUS_MIDABLE_PERIODICITY_KNOWN 0x0080 + +/* HE radiotap data3 shift values */ +#define QDF_MON_STATUS_BEAM_CHANGE_SHIFT 6 +#define QDF_MON_STATUS_DL_UL_SHIFT 7 +#define QDF_MON_STATUS_TRANSMIT_MCS_SHIFT 8 +#define QDF_MON_STATUS_DCM_SHIFT 12 +#define QDF_MON_STATUS_CODING_SHIFT 13 +#define QDF_MON_STATUS_LDPC_EXTRA_SYMBOL_SHIFT 14 +#define QDF_MON_STATUS_STBC_SHIFT 15 + +/* HE radiotap data4 shift values */ +#define QDF_MON_STATUS_STA_ID_SHIFT 4 + +/* HE radiotap data5 */ +#define QDF_MON_STATUS_GI_SHIFT 4 +#define QDF_MON_STATUS_HE_LTF_SHIFT 8 +#define QDF_MON_STATUS_TXBF_SHIFT 14 +#define QDF_MON_STATUS_PE_DISAMBIGUITY_SHIFT 15 +#define QDF_MON_STATUS_PRE_FEC_PAD_SHIFT 12 + +/* HE radiotap data6 */ +#define QDF_MON_STATUS_DOPPLER_SHIFT 4 +#define QDF_MON_STATUS_TXOP_SHIFT 8 + +/* HE radiotap HE-MU flags1 */ +#define QDF_MON_STATUS_SIG_B_MCS_KNOWN 0x0010 +#define QDF_MON_STATUS_SIG_B_DCM_KNOWN 0x0040 +#define QDF_MON_STATUS_SIG_B_SYM_NUM_KNOWN 0x8000 +#define QDF_MON_STATUS_RU_0_KNOWN 0x0100 +#define QDF_MON_STATUS_RU_1_KNOWN 0x0200 +#define QDF_MON_STATUS_RU_2_KNOWN 0x0400 +#define QDF_MON_STATUS_RU_3_KNOWN 0x0800 +#define QDF_MON_STATUS_DCM_FLAG_1_SHIFT 5 +#define QDF_MON_STATUS_SPATIAL_REUSE_MU_KNOWN 0x0100 +#define QDF_MON_STATUS_SIG_B_COMPRESSION_FLAG_1_KNOWN 0x4000 + +/* HE radiotap HE-MU flags2 */ +#define QDF_MON_STATUS_SIG_B_COMPRESSION_FLAG_2_SHIFT 3 +#define QDF_MON_STATUS_BW_KNOWN 0x0004 +#define QDF_MON_STATUS_NUM_SIG_B_SYMBOLS_SHIFT 4 +#define QDF_MON_STATUS_SIG_B_COMPRESSION_FLAG_2_KNOWN 0x0100 +#define QDF_MON_STATUS_NUM_SIG_B_FLAG_2_SHIFT 9 +#define QDF_MON_STATUS_LTF_FLAG_2_SYMBOLS_SHIFT 12 +#define QDF_MON_STATUS_LTF_KNOWN 0x8000 + +/* HE radiotap per_user_1 */ +#define QDF_MON_STATUS_STA_SPATIAL_SHIFT 11 +#define QDF_MON_STATUS_TXBF_SHIFT 14 +#define QDF_MON_STATUS_RESERVED_SET_TO_1_SHIFT 19 +#define QDF_MON_STATUS_STA_CODING_SHIFT 20 + +/* HE radiotap per_user_2 */ +#define QDF_MON_STATUS_STA_MCS_SHIFT 4 +#define QDF_MON_STATUS_STA_DCM_SHIFT 5 + +/* HE radiotap per user known */ +#define QDF_MON_STATUS_USER_FIELD_POSITION_KNOWN 0x01 +#define QDF_MON_STATUS_STA_ID_PER_USER_KNOWN 0x02 +#define QDF_MON_STATUS_STA_NSTS_KNOWN 0x04 +#define QDF_MON_STATUS_STA_TX_BF_KNOWN 0x08 +#define QDF_MON_STATUS_STA_SPATIAL_CONFIG_KNOWN 0x10 +#define QDF_MON_STATUS_STA_MCS_KNOWN 0x20 +#define QDF_MON_STATUS_STA_DCM_KNOWN 0x40 +#define QDF_MON_STATUS_STA_CODING_KNOWN 0x80 + +/** + * qdf_proto_type - protocol type + * @QDF_PROTO_TYPE_DHCP - DHCP + * @QDF_PROTO_TYPE_EAPOL - EAPOL + * @QDF_PROTO_TYPE_ARP - ARP + * @QDF_PROTO_TYPE_MGMT - MGMT + * @QDF_PROTO_TYPE_ICMP - ICMP + * @QDF_PROTO_TYPE_ICMPv6 - ICMPv6 + * QDF_PROTO_TYPE_EVENT - EVENT + */ +enum qdf_proto_type { + QDF_PROTO_TYPE_DHCP, + QDF_PROTO_TYPE_EAPOL, + QDF_PROTO_TYPE_ARP, + QDF_PROTO_TYPE_MGMT, + QDF_PROTO_TYPE_ICMP, + QDF_PROTO_TYPE_ICMPv6, + QDF_PROTO_TYPE_EVENT, + QDF_PROTO_TYPE_MAX +}; + +/** + * qdf_proto_subtype - subtype of packet + * @QDF_PROTO_EAPOL_M1 - EAPOL 1/4 + * @QDF_PROTO_EAPOL_M2 - EAPOL 2/4 + * @QDF_PROTO_EAPOL_M3 - EAPOL 3/4 + * @QDF_PROTO_EAPOL_M4 - EAPOL 4/4 + * @QDF_PROTO_DHCP_DISCOVER - discover + * @QDF_PROTO_DHCP_REQUEST - request + * @QDF_PROTO_DHCP_OFFER - offer + * @QDF_PROTO_DHCP_ACK - ACK + * @QDF_PROTO_DHCP_NACK - NACK + * @QDF_PROTO_DHCP_RELEASE - release + * @QDF_PROTO_DHCP_INFORM - inform + * @QDF_PROTO_DHCP_DECLINE - decline + * @QDF_PROTO_ARP_REQ - arp request + * @QDF_PROTO_ARP_RES - arp response + * @QDF_PROTO_ICMP_REQ - icmp request + * @QDF_PROTO_ICMP_RES - icmp response + * @QDF_PROTO_ICMPV6_REQ - icmpv6 request + * @QDF_PROTO_ICMPV6_RES - icmpv6 response + * @QDF_PROTO_ICMPV6_RS - icmpv6 rs packet + * @QDF_PROTO_ICMPV6_RA - icmpv6 ra packet + * @QDF_PROTO_ICMPV6_NS - icmpv6 ns packet + * @QDF_PROTO_ICMPV6_NA - icmpv6 na packet + * @QDF_PROTO_IPV4_UDP - ipv4 udp + * @QDF_PROTO_IPV4_TCP - ipv4 tcp + * @QDF_PROTO_IPV6_UDP - ipv6 udp + * @QDF_PROTO_IPV6_TCP - ipv6 tcp + * @QDF_PROTO_MGMT_ASSOC -assoc + * @QDF_PROTO_MGMT_DISASSOC - disassoc + * @QDF_PROTO_MGMT_AUTH - auth + * @QDF_PROTO_MGMT_DEAUTH - deauth + * QDF_ROAM_SYNCH - roam synch indication from fw + * QDF_ROAM_COMPLETE - roam complete cmd to fw + * QDF_ROAM_EVENTID - roam eventid from fw + */ +enum qdf_proto_subtype { + QDF_PROTO_INVALID, + QDF_PROTO_EAPOL_M1, + QDF_PROTO_EAPOL_M2, + QDF_PROTO_EAPOL_M3, + QDF_PROTO_EAPOL_M4, + QDF_PROTO_DHCP_DISCOVER, + QDF_PROTO_DHCP_REQUEST, + QDF_PROTO_DHCP_OFFER, + QDF_PROTO_DHCP_ACK, + QDF_PROTO_DHCP_NACK, + QDF_PROTO_DHCP_RELEASE, + QDF_PROTO_DHCP_INFORM, + QDF_PROTO_DHCP_DECLINE, + QDF_PROTO_ARP_REQ, + QDF_PROTO_ARP_RES, + QDF_PROTO_ICMP_REQ, + QDF_PROTO_ICMP_RES, + QDF_PROTO_ICMPV6_REQ, + QDF_PROTO_ICMPV6_RES, + QDF_PROTO_ICMPV6_RS, + QDF_PROTO_ICMPV6_RA, + QDF_PROTO_ICMPV6_NS, + QDF_PROTO_ICMPV6_NA, + QDF_PROTO_IPV4_UDP, + QDF_PROTO_IPV4_TCP, + QDF_PROTO_IPV6_UDP, + QDF_PROTO_IPV6_TCP, + QDF_PROTO_MGMT_ASSOC, + QDF_PROTO_MGMT_DISASSOC, + QDF_PROTO_MGMT_AUTH, + QDF_PROTO_MGMT_DEAUTH, + QDF_ROAM_SYNCH, + QDF_ROAM_COMPLETE, + QDF_ROAM_EVENTID, + QDF_PROTO_SUBTYPE_MAX +}; + +/** + * @qdf_nbuf_t - Platform indepedent packet abstraction + */ +typedef __qdf_nbuf_t qdf_nbuf_t; + +/** + * @qdf_dma_map_cb_t - Dma map callback prototype + */ +typedef void (*qdf_dma_map_cb_t)(void *arg, qdf_nbuf_t buf, + qdf_dma_map_t dmap); + +/** + * @qdf_nbuf_queue_t - Platform independent packet queue abstraction + */ +typedef __qdf_nbuf_queue_t qdf_nbuf_queue_t; + +/* BUS/DMA mapping routines */ + +static inline QDF_STATUS +qdf_nbuf_dmamap_create(qdf_device_t osdev, qdf_dma_map_t *dmap) +{ + return __qdf_nbuf_dmamap_create(osdev, dmap); +} + +static inline void +qdf_nbuf_dmamap_destroy(qdf_device_t osdev, qdf_dma_map_t dmap) +{ + __qdf_nbuf_dmamap_destroy(osdev, dmap); +} + +static inline void +qdf_nbuf_dmamap_set_cb(qdf_dma_map_t dmap, qdf_dma_map_cb_t cb, void *arg) +{ + __qdf_nbuf_dmamap_set_cb(dmap, cb, arg); +} + +static inline void +qdf_nbuf_set_send_complete_flag(qdf_nbuf_t buf, bool flag) +{ + __qdf_nbuf_set_send_complete_flag(buf, flag); +} + +#ifdef NBUF_MAP_UNMAP_DEBUG +/** + * qdf_nbuf_map_check_for_leaks() - check for nbut map leaks + * + * Check for net buffers that have been mapped, but never unmapped. + * + * Returns: None + */ +void qdf_nbuf_map_check_for_leaks(void); + +QDF_STATUS qdf_nbuf_map_debug(qdf_device_t osdev, + qdf_nbuf_t buf, + qdf_dma_dir_t dir, + const char *file, + uint32_t line); + +#define qdf_nbuf_map(osdev, buf, dir) \ + qdf_nbuf_map_debug(osdev, buf, dir, __FILE__, __LINE__) + +void qdf_nbuf_unmap_debug(qdf_device_t osdev, + qdf_nbuf_t buf, + qdf_dma_dir_t dir, + const char *file, + uint32_t line); + +#define qdf_nbuf_unmap(osdev, buf, dir) \ + qdf_nbuf_unmap_debug(osdev, buf, dir, __FILE__, __LINE__) + +QDF_STATUS qdf_nbuf_map_single_debug(qdf_device_t osdev, + qdf_nbuf_t buf, + qdf_dma_dir_t dir, + const char *file, + uint32_t line); + +#define qdf_nbuf_map_single(osdev, buf, dir) \ + qdf_nbuf_map_single_debug(osdev, buf, dir, __FILE__, __LINE__) + +void qdf_nbuf_unmap_single_debug(qdf_device_t osdev, + qdf_nbuf_t buf, + qdf_dma_dir_t dir, + const char *file, + uint32_t line); + +#define qdf_nbuf_unmap_single(osdev, buf, dir) \ + qdf_nbuf_unmap_single_debug(osdev, buf, dir, __FILE__, __LINE__) + +QDF_STATUS qdf_nbuf_map_nbytes_debug(qdf_device_t osdev, + qdf_nbuf_t buf, + qdf_dma_dir_t dir, + int nbytes, + const char *file, + uint32_t line); + +#define qdf_nbuf_map_nbytes(osdev, buf, dir, nbytes) \ + qdf_nbuf_map_nbytes_debug(osdev, buf, dir, nbytes, __FILE__, __LINE__) + +void qdf_nbuf_unmap_nbytes_debug(qdf_device_t osdev, + qdf_nbuf_t buf, + qdf_dma_dir_t dir, + int nbytes, + const char *file, + uint32_t line); + +#define qdf_nbuf_unmap_nbytes(osdev, buf, dir, nbytes) \ + qdf_nbuf_unmap_nbytes_debug(osdev, buf, dir, nbytes, __FILE__, __LINE__) + +QDF_STATUS qdf_nbuf_map_nbytes_single_debug(qdf_device_t osdev, + qdf_nbuf_t buf, + qdf_dma_dir_t dir, + int nbytes, + const char *file, + uint32_t line); + +#define qdf_nbuf_map_nbytes_single(osdev, buf, dir, nbytes) \ + qdf_nbuf_map_nbytes_single_debug(osdev, buf, dir, nbytes, \ + __FILE__, __LINE__) + +void qdf_nbuf_unmap_nbytes_single_debug(qdf_device_t osdev, + qdf_nbuf_t buf, + qdf_dma_dir_t dir, + int nbytes, + const char *file, + uint32_t line); + +#define qdf_nbuf_unmap_nbytes_single(osdev, buf, dir, nbytes) \ + qdf_nbuf_unmap_nbytes_single_debug(osdev, buf, dir, nbytes, \ + __FILE__, __LINE__) + +#else /* NBUF_MAP_UNMAP_DEBUG */ + +static inline void qdf_nbuf_map_check_for_leaks(void) {} + +static inline QDF_STATUS +qdf_nbuf_map(qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir) +{ + return __qdf_nbuf_map(osdev, buf, dir); +} + +static inline void +qdf_nbuf_unmap(qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir) +{ + __qdf_nbuf_unmap(osdev, buf, dir); +} + +static inline QDF_STATUS +qdf_nbuf_map_single(qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir) +{ + return __qdf_nbuf_map_single(osdev, buf, dir); +} + +static inline void +qdf_nbuf_unmap_single(qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir) +{ + __qdf_nbuf_unmap_single(osdev, buf, dir); +} + +static inline QDF_STATUS +qdf_nbuf_map_nbytes(qdf_device_t osdev, qdf_nbuf_t buf, + qdf_dma_dir_t dir, int nbytes) +{ + return __qdf_nbuf_map_nbytes(osdev, buf, dir, nbytes); +} + +static inline void +qdf_nbuf_unmap_nbytes(qdf_device_t osdev, + qdf_nbuf_t buf, qdf_dma_dir_t dir, int nbytes) +{ + __qdf_nbuf_unmap_nbytes(osdev, buf, dir, nbytes); +} + +static inline QDF_STATUS +qdf_nbuf_map_nbytes_single( + qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir, int nbytes) +{ + return __qdf_nbuf_map_nbytes_single(osdev, buf, dir, nbytes); +} + +static inline void +qdf_nbuf_unmap_nbytes_single( + qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir, int nbytes) +{ + return __qdf_nbuf_unmap_nbytes_single(osdev, buf, dir, nbytes); +} +#endif /* NBUF_MAP_UNMAP_DEBUG */ + +static inline void +qdf_nbuf_sync_for_cpu(qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir) +{ + __qdf_nbuf_sync_for_cpu(osdev, buf, dir); +} + +static inline int qdf_nbuf_get_num_frags(qdf_nbuf_t buf) +{ + return __qdf_nbuf_get_num_frags(buf); +} + +/** + * qdf_nbuf_get_frag_len() - get fragment length + * @buf: Network buffer + * @frag_num: Fragment number + * + * Return: Fragment length + */ +static inline int qdf_nbuf_get_frag_len(qdf_nbuf_t buf, int frag_num) +{ + QDF_BUG(!(frag_num >= QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS)); + return __qdf_nbuf_get_frag_len(buf, frag_num); +} + +/** + * qdf_nbuf_get_frag_vaddr() - get fragment virtual address + * @buf: Network buffer + * @frag_num: Fragment number + * + * Return: Fragment virtual address + */ +static inline unsigned char *qdf_nbuf_get_frag_vaddr(qdf_nbuf_t buf, + int frag_num) +{ + QDF_BUG(!(frag_num >= QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS)); + return __qdf_nbuf_get_frag_vaddr(buf, frag_num); +} + +/** + * qdf_nbuf_get_frag_vaddr_always() - get fragment virtual address + * @buf: Network buffer + * + * Return: Fragment virtual address + */ +static inline unsigned char * +qdf_nbuf_get_frag_vaddr_always(qdf_nbuf_t buf) +{ + return __qdf_nbuf_get_frag_vaddr_always(buf); +} + +/** + * qdf_nbuf_get_frag_paddr() - get physical address for skb linear buffer + * or skb fragment, based on frag_num passed + * @buf: Network buffer + * @frag_num: Fragment number + * + * Return: Fragment physical address + */ +static inline qdf_dma_addr_t qdf_nbuf_get_frag_paddr(qdf_nbuf_t buf, + unsigned int frag_num) +{ + QDF_BUG(!(frag_num >= QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS)); + return __qdf_nbuf_get_frag_paddr(buf, frag_num); +} + +/** + * qdf_nbuf_get_tx_frag_paddr() - get physical address for skb fragments only + * @buf: Network buffer + * + * Return: Fragment physical address + * Usage guideline: Use “qdf_nbuf_frag_map()†to dma map the specific + * skb fragment , followed by “qdf_nbuf_get_tx_frag_paddr†+ */ +static inline qdf_dma_addr_t qdf_nbuf_get_tx_frag_paddr(qdf_nbuf_t buf) +{ + return __qdf_nbuf_get_tx_frag_paddr(buf); +} + +/** + * qdf_nbuf_get_frag_is_wordstream() - is fragment wordstream + * @buf: Network buffer + * @frag_num: Fragment number + * + * Return: Fragment wordstream or not + */ +static inline int qdf_nbuf_get_frag_is_wordstream(qdf_nbuf_t buf, int frag_num) +{ + QDF_BUG(!(frag_num >= QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS)); + return __qdf_nbuf_get_frag_is_wordstream(buf, frag_num); +} + +/** + * qdf_nbuf_set_frag_is_wordstream() - set fragment wordstream + * @buf: Network buffer + * @frag_num: Fragment number + * @is_wordstream: Wordstream + * + * Return: none + */ +static inline void +qdf_nbuf_set_frag_is_wordstream(qdf_nbuf_t buf, + int frag_num, int is_wordstream) +{ + QDF_BUG(!(frag_num >= QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS)); + __qdf_nbuf_set_frag_is_wordstream(buf, frag_num, is_wordstream); +} + +static inline void +qdf_nbuf_set_vdev_ctx(qdf_nbuf_t buf, uint8_t vdev_id) +{ + __qdf_nbuf_set_vdev_ctx(buf, vdev_id); +} + +static inline void +qdf_nbuf_set_tx_ftype(qdf_nbuf_t buf, uint8_t type) +{ + __qdf_nbuf_set_tx_ftype(buf, type); +} + +static inline void +qdf_nbuf_set_rx_ftype(qdf_nbuf_t buf, uint8_t type) +{ + __qdf_nbuf_set_rx_ftype(buf, type); +} + + + +static inline uint8_t +qdf_nbuf_get_vdev_ctx(qdf_nbuf_t buf) +{ + return __qdf_nbuf_get_vdev_ctx(buf); +} + +static inline uint8_t qdf_nbuf_get_tx_ftype(qdf_nbuf_t buf) +{ + return __qdf_nbuf_get_tx_ftype(buf); +} + +static inline uint8_t qdf_nbuf_get_rx_ftype(qdf_nbuf_t buf) +{ + return __qdf_nbuf_get_rx_ftype(buf); +} + + +static inline qdf_dma_addr_t +qdf_nbuf_mapped_paddr_get(qdf_nbuf_t buf) +{ + return __qdf_nbuf_mapped_paddr_get(buf); +} + +static inline void +qdf_nbuf_mapped_paddr_set(qdf_nbuf_t buf, qdf_dma_addr_t paddr) +{ + __qdf_nbuf_mapped_paddr_set(buf, paddr); +} + +static inline void +qdf_nbuf_frag_push_head(qdf_nbuf_t buf, + int frag_len, char *frag_vaddr, + qdf_dma_addr_t frag_paddr) +{ + __qdf_nbuf_frag_push_head(buf, frag_len, frag_vaddr, frag_paddr); +} + +#define qdf_nbuf_num_frags_init(_nbuf) __qdf_nbuf_num_frags_init((_nbuf)) + +/** + * qdf_nbuf_set_rx_chfrag_start() - set msdu start bit + * @buf: Network buffer + * @val: 0/1 + * + * Return: void + */ +static inline void +qdf_nbuf_set_rx_chfrag_start(qdf_nbuf_t buf, uint8_t val) +{ + __qdf_nbuf_set_rx_chfrag_start(buf, val); +} + +/** + * qdf_nbuf_is_rx_chfrag_start() - get msdu start bit + * @buf: Network buffer + * + * Return: integer value - 0/1 + */ +static inline int qdf_nbuf_is_rx_chfrag_start(qdf_nbuf_t buf) +{ + return __qdf_nbuf_is_rx_chfrag_start(buf); +} + +/** + * qdf_nbuf_set_rx_chfrag_cont() - set msdu continuation bit + * @buf: Network buffer + * @val: 0/1 + * + * Return: void + */ +static inline void +qdf_nbuf_set_rx_chfrag_cont(qdf_nbuf_t buf, uint8_t val) +{ + __qdf_nbuf_set_rx_chfrag_cont(buf, val); +} + +/** + * qdf_nbuf_is_rx_chfrag_cont() - get msdu continuation bit + * @buf: Network buffer + * + * Return: integer value - 0/1 + */ +static inline int qdf_nbuf_is_rx_chfrag_cont(qdf_nbuf_t buf) +{ + return __qdf_nbuf_is_rx_chfrag_cont(buf); +} + +/** + * qdf_nbuf_set_rx_chfrag_end() - set msdu end bit + * @buf: Network buffer + * @val: 0/1 + * + * Return: void + */ +static inline void qdf_nbuf_set_rx_chfrag_end(qdf_nbuf_t buf, uint8_t val) +{ + __qdf_nbuf_set_rx_chfrag_end(buf, val); +} + +/** + * qdf_nbuf_is_rx_chfrag_end() - set msdu end bit + * @buf: Network buffer + * + * Return: integer value - 0/1 + */ +static inline int qdf_nbuf_is_rx_chfrag_end(qdf_nbuf_t buf) +{ + return __qdf_nbuf_is_rx_chfrag_end(buf); +} + +/** + * qdf_nbuf_set_tx_chfrag_start() - set msdu start bit + * @buf: Network buffer + * @val: 0/1 + * + * Return: void + */ +static inline void +qdf_nbuf_set_tx_chfrag_start(qdf_nbuf_t buf, uint8_t val) +{ + __qdf_nbuf_set_tx_chfrag_start(buf, val); +} + +/** + * qdf_nbuf_is_tx_chfrag_start() - get msdu start bit + * @buf: Network buffer + * + * Return: integer value - 0/1 + */ +static inline int qdf_nbuf_is_tx_chfrag_start(qdf_nbuf_t buf) +{ + return __qdf_nbuf_is_tx_chfrag_start(buf); +} + +/** + * qdf_nbuf_set_tx_chfrag_cont() - set msdu continuation bit + * @buf: Network buffer + * @val: 0/1 + * + * Return: void + */ +static inline void +qdf_nbuf_set_tx_chfrag_cont(qdf_nbuf_t buf, uint8_t val) +{ + __qdf_nbuf_set_tx_chfrag_cont(buf, val); +} + +/** + * qdf_nbuf_is_tx_chfrag_cont() - get msdu continuation bit + * @buf: Network buffer + * + * Return: integer value - 0/1 + */ +static inline int qdf_nbuf_is_tx_chfrag_cont(qdf_nbuf_t buf) +{ + return __qdf_nbuf_is_tx_chfrag_cont(buf); +} + +/** + * qdf_nbuf_set_tx_chfrag_end() - set msdu end bit + * @buf: Network buffer + * @val: 0/1 + * + * Return: void + */ +static inline void qdf_nbuf_set_tx_chfrag_end(qdf_nbuf_t buf, uint8_t val) +{ + __qdf_nbuf_set_tx_chfrag_end(buf, val); +} + +/** + * qdf_nbuf_is_tx_chfrag_end() - set msdu end bit + * @buf: Network buffer + * + * Return: integer value - 0/1 + */ +static inline int qdf_nbuf_is_tx_chfrag_end(qdf_nbuf_t buf) +{ + return __qdf_nbuf_is_tx_chfrag_end(buf); +} + +static inline void +qdf_nbuf_dma_map_info(qdf_dma_map_t bmap, qdf_dmamap_info_t *sg) +{ + __qdf_nbuf_dma_map_info(bmap, sg); +} + +/** + * qdf_nbuf_is_tso() - is the network buffer a jumbo packet? + * @buf: Network buffer + * + * Return: 1 - this is a jumbo packet 0 - not a jumbo packet + */ +static inline uint8_t qdf_nbuf_is_tso(qdf_nbuf_t nbuf) +{ + return __qdf_nbuf_is_tso(nbuf); +} + +/** + * qdf_nbuf_get_users() - function to get the number of users referencing this + * network buffer + * + * @nbuf: network buffer + * + * Return: number of user references to nbuf. + */ +static inline int qdf_nbuf_get_users(qdf_nbuf_t nbuf) +{ + return __qdf_nbuf_get_users(nbuf); +} + +/** + * qdf_nbuf_next() - get the next packet in the linked list + * @buf: Network buffer + * + * This function can be used when nbufs are directly linked into a list, + * rather than using a separate network buffer queue object. + * + * Return: next network buffer in the linked list + */ +static inline qdf_nbuf_t qdf_nbuf_next(qdf_nbuf_t buf) +{ + return __qdf_nbuf_next(buf); +} + +#ifdef NBUF_MEMORY_DEBUG +void qdf_net_buf_debug_init(void); +void qdf_net_buf_debug_exit(void); +void qdf_net_buf_debug_clean(void); +void qdf_net_buf_debug_add_node(qdf_nbuf_t net_buf, size_t size, + uint8_t *file_name, uint32_t line_num); +/** + * qdf_net_buf_debug_update_node() - update nbuf in debug hash table + * + * Return: none + */ +void qdf_net_buf_debug_update_node(qdf_nbuf_t net_buf, uint8_t *file_name, + uint32_t line_num); +void qdf_net_buf_debug_delete_node(qdf_nbuf_t net_buf); + +/** + * qdf_net_buf_debug_acquire_skb() - acquire skb to avoid memory leak + * @net_buf: Network buf holding head segment (single) + * @file_name: pointer to file name + * @line_num: line number + * + * WLAN driver module's SKB which are allocated by network stack are + * suppose to call this API before freeing it such that the SKB + * is not reported as memory leak. + * + * Return: none + */ +void qdf_net_buf_debug_acquire_skb(qdf_nbuf_t net_buf, + uint8_t *file_name, uint32_t line_num); +void qdf_net_buf_debug_release_skb(qdf_nbuf_t net_buf); + +/* nbuf allocation rouines */ + +#define qdf_nbuf_alloc(d, s, r, a, p) \ + qdf_nbuf_alloc_debug(d, s, r, a, p, __FILE__, __LINE__) + +qdf_nbuf_t qdf_nbuf_alloc_debug(qdf_device_t osdev, qdf_size_t size, + int reserve, int align, int prio, + uint8_t *file, uint32_t line); + +#define qdf_nbuf_free(d) \ + qdf_nbuf_free_debug(d, __FILE__, __LINE__) + +void qdf_nbuf_free_debug(qdf_nbuf_t nbuf, uint8_t *file, uint32_t line); + +#define qdf_nbuf_clone(buf) \ + qdf_nbuf_clone_debug(buf, __FILE__, __LINE__) + +/** + * qdf_nbuf_clone_debug() - clone the nbuf (copy is readonly) + * @buf: nbuf to clone from + * @file: pointer to file name + * @line: line number + * + * This function clones the nbuf and creates a memory tracking + * node corresponding to that cloned skbuff structure. + * + * Return: cloned buffer + */ +qdf_nbuf_t qdf_nbuf_clone_debug(qdf_nbuf_t buf, uint8_t *file, uint32_t line); + +#define qdf_nbuf_copy(buf) \ + qdf_nbuf_copy_debug(buf, __FILE__, __LINE__) + +/** + * qdf_nbuf_copy_debug() - returns a private copy of the buf + * @buf: nbuf to copy from + * @file: pointer to file name + * @line: line number + * + * This API returns a private copy of the buf, the buf returned is completely + * modifiable by callers. It also creates a memory tracking node corresponding + * to that new skbuff structure. + * + * Return: copied buffer + */ +qdf_nbuf_t qdf_nbuf_copy_debug(qdf_nbuf_t buf, uint8_t *file, uint32_t line); + +#else /* NBUF_MEMORY_DEBUG */ + +static inline void qdf_net_buf_debug_init(void) {} +static inline void qdf_net_buf_debug_exit(void) {} + +static inline void qdf_net_buf_debug_acquire_skb(qdf_nbuf_t net_buf, + uint8_t *file_name, uint32_t line_num) +{ +} + +static inline void qdf_net_buf_debug_release_skb(qdf_nbuf_t net_buf) +{ +} + +static inline void +qdf_net_buf_debug_update_node(qdf_nbuf_t net_buf, uint8_t *file_name, + uint32_t line_num) +{ +} + +/* Nbuf allocation rouines */ + +#define qdf_nbuf_alloc(osdev, size, reserve, align, prio) \ + qdf_nbuf_alloc_fl(osdev, size, reserve, align, prio, \ + __func__, __LINE__) +static inline qdf_nbuf_t +qdf_nbuf_alloc_fl(qdf_device_t osdev, qdf_size_t size, int reserve, int align, + int prio, const char *func, uint32_t line) +{ + return __qdf_nbuf_alloc(osdev, size, reserve, align, prio, func, line); +} + +static inline void qdf_nbuf_free(qdf_nbuf_t buf) +{ + if (qdf_likely(buf)) + __qdf_nbuf_free(buf); +} + +/** + * qdf_nbuf_clone() - clone the nbuf (copy is readonly) + * @buf: Pointer to network buffer + * + * This function clones the nbuf and returns new sk_buff + * structure. + * + * Return: cloned skb + */ +static inline qdf_nbuf_t qdf_nbuf_clone(qdf_nbuf_t buf) +{ + return __qdf_nbuf_clone(buf); +} + +/** + * qdf_nbuf_copy() - returns a private copy of the buf + * @buf: Pointer to network buffer + * + * This API returns a private copy of the buf, the buf returned is completely + * modifiable by callers + * + * Return: skb or NULL + */ +static inline qdf_nbuf_t qdf_nbuf_copy(qdf_nbuf_t buf) +{ + return __qdf_nbuf_copy(buf); +} + +#endif /* NBUF_MEMORY_DEBUG */ + +#ifdef WLAN_FEATURE_FASTPATH +/** + * qdf_nbuf_init_fast() - before put buf into pool,turn it to init state + * + * @buf: buf instance + * Return: data pointer of this buf where new data has to be + * put, or NULL if there is not enough room in this buf. + */ +void qdf_nbuf_init_fast(qdf_nbuf_t nbuf); +#endif /* WLAN_FEATURE_FASTPATH */ + +static inline void qdf_nbuf_tx_free(qdf_nbuf_t buf_list, int tx_err) +{ + while (buf_list) { + qdf_nbuf_t next = qdf_nbuf_next(buf_list); + + qdf_nbuf_free(buf_list); + buf_list = next; + } +} + +static inline void qdf_nbuf_ref(qdf_nbuf_t buf) +{ + __qdf_nbuf_ref(buf); +} + +static inline int qdf_nbuf_shared(qdf_nbuf_t buf) +{ + return __qdf_nbuf_shared(buf); +} + +static inline QDF_STATUS qdf_nbuf_cat(qdf_nbuf_t dst, qdf_nbuf_t src) +{ + return __qdf_nbuf_cat(dst, src); +} + +/** + * @qdf_nbuf_copy_bits() - return the length of the copy bits for skb + * @skb: SKB pointer + * @offset: offset + * @len: Length + * @to: To + * + * Return: int32_t + */ +static inline int32_t +qdf_nbuf_copy_bits(qdf_nbuf_t nbuf, uint32_t offset, uint32_t len, void *to) +{ + return __qdf_nbuf_copy_bits(nbuf, offset, len, to); +} + + +/* nbuf manipulation routines */ + +/** + * @qdf_nbuf_head() - return the address of an nbuf's buffer + * @buf: netbuf + * + * Return: head address + */ +static inline uint8_t *qdf_nbuf_head(qdf_nbuf_t buf) +{ + return __qdf_nbuf_head(buf); +} + +/** + * qdf_nbuf_data() - Return the address of the start of data within an nbuf + * @buf: Network buffer + * + * Return: Data address + */ +static inline uint8_t *qdf_nbuf_data(qdf_nbuf_t buf) +{ + return __qdf_nbuf_data(buf); +} + +/** + * qdf_nbuf_data_addr() - Return the address of skb->data + * @buf: Network buffer + * + * Return: Data address + */ +static inline uint8_t *qdf_nbuf_data_addr(qdf_nbuf_t buf) +{ + return __qdf_nbuf_data_addr(buf); +} + +/** + * qdf_nbuf_headroom() - amount of headroom int the current nbuf + * @buf: Network buffer + * + * Return: Amount of head room + */ +static inline uint32_t qdf_nbuf_headroom(qdf_nbuf_t buf) +{ + return __qdf_nbuf_headroom(buf); +} + +/** + * qdf_nbuf_tailroom() - amount of tail space available + * @buf: Network buffer + * + * Return: amount of tail room + */ +static inline uint32_t qdf_nbuf_tailroom(qdf_nbuf_t buf) +{ + return __qdf_nbuf_tailroom(buf); +} + +/** + * qdf_nbuf_push_head() - push data in the front + * @buf: Network buf instance + * @size: Size to be pushed + * + * Return: New data pointer of this buf after data has been pushed, + * or NULL if there is not enough room in this buf. + */ +static inline uint8_t *qdf_nbuf_push_head(qdf_nbuf_t buf, qdf_size_t size) +{ + return __qdf_nbuf_push_head(buf, size); +} + +/** + * qdf_nbuf_put_tail() - puts data in the end + * @buf: Network buf instance + * @size: Size to be pushed + * + * Return: Data pointer of this buf where new data has to be + * put, or NULL if there is not enough room in this buf. + */ +static inline uint8_t *qdf_nbuf_put_tail(qdf_nbuf_t buf, qdf_size_t size) +{ + return __qdf_nbuf_put_tail(buf, size); +} + +/** + * qdf_nbuf_pull_head() - pull data out from the front + * @buf: Network buf instance + * @size: Size to be popped + * + * Return: New data pointer of this buf after data has been popped, + * or NULL if there is not sufficient data to pull. + */ +static inline uint8_t *qdf_nbuf_pull_head(qdf_nbuf_t buf, qdf_size_t size) +{ + return __qdf_nbuf_pull_head(buf, size); +} + +/** + * qdf_nbuf_trim_tail() - trim data out from the end + * @buf: Network buf instance + * @size: Size to be popped + * + * Return: none + */ +static inline void qdf_nbuf_trim_tail(qdf_nbuf_t buf, qdf_size_t size) +{ + __qdf_nbuf_trim_tail(buf, size); +} + +/** + * qdf_nbuf_len() - get the length of the buf + * @buf: Network buf instance + * + * Return: total length of this buf. + */ +static inline qdf_size_t qdf_nbuf_len(qdf_nbuf_t buf) +{ + return __qdf_nbuf_len(buf); +} + +/** + * qdf_nbuf_set_pktlen() - set the length of the buf + * @buf: Network buf instance + * @size: Size to be set + * + * Return: none + */ +static inline void qdf_nbuf_set_pktlen(qdf_nbuf_t buf, uint32_t len) +{ + __qdf_nbuf_set_pktlen(buf, len); +} + +/** + * qdf_nbuf_reserve() - trim data out from the end + * @buf: Network buf instance + * @size: Size to be popped + * + * Return: none + */ +static inline void qdf_nbuf_reserve(qdf_nbuf_t buf, qdf_size_t size) +{ + __qdf_nbuf_reserve(buf, size); +} + +/** + * qdf_nbuf_reset() - reset the buffer data and pointer + * @buf: Network buf instance + * @reserve: reserve + * @align: align + * + * Return: none + */ +static inline void qdf_nbuf_reset(qdf_nbuf_t buf, int reserve, int align) +{ + __qdf_nbuf_reset(buf, reserve, align); +} + +/** + * qdf_nbuf_dev_scratch_is_supported() - dev_scratch support for network buffer + * in kernel + * + * Return: true if dev_scratch is supported + * false if dev_scratch is not supported + */ +static inline bool qdf_nbuf_is_dev_scratch_supported(void) +{ + return __qdf_nbuf_is_dev_scratch_supported(); +} + +/** + * qdf_nbuf_get_dev_scratch() - get dev_scratch of network buffer + * @buf: Pointer to network buffer + * + * Return: dev_scratch if dev_scratch supported + * 0 if dev_scratch not supported + */ +static inline unsigned long qdf_nbuf_get_dev_scratch(qdf_nbuf_t buf) +{ + return __qdf_nbuf_get_dev_scratch(buf); +} + +/** + * qdf_nbuf_set_dev_scratch() - set dev_scratch of network buffer + * @buf: Pointer to network buffer + * @value: value to be set in dev_scratch of network buffer + * + * Return: void + */ +static inline void qdf_nbuf_set_dev_scratch(qdf_nbuf_t buf, unsigned long value) +{ + __qdf_nbuf_set_dev_scratch(buf, value); +} + +/** + * qdf_nbuf_peek_header() - return the data pointer & length of the header + * @buf: Network nbuf + * @addr: Data pointer + * @len: Length of the data + * + * Return: none + */ +static inline void +qdf_nbuf_peek_header(qdf_nbuf_t buf, uint8_t **addr, uint32_t *len) +{ + __qdf_nbuf_peek_header(buf, addr, len); +} + +/* nbuf queue routines */ + +/** + * qdf_nbuf_queue_init() - initialize buf queue + * @head: Network buf queue head + * + * Return: none + */ +static inline void qdf_nbuf_queue_init(qdf_nbuf_queue_t *head) +{ + __qdf_nbuf_queue_init(head); +} + +/** + * qdf_nbuf_queue_add() - append a nbuf to the tail of the buf queue + * @head: Network buf queue head + * @buf: Network buf + * + * Return: none + */ +static inline void qdf_nbuf_queue_add(qdf_nbuf_queue_t *head, qdf_nbuf_t buf) +{ + __qdf_nbuf_queue_add(head, buf); +} + +/** + * qdf_nbuf_queue_insert_head() - insert nbuf at the head of queue + * @head: Network buf queue head + * @buf: Network buf + * + * Return: none + */ +static inline void +qdf_nbuf_queue_insert_head(qdf_nbuf_queue_t *head, qdf_nbuf_t buf) +{ + __qdf_nbuf_queue_insert_head(head, buf); +} + +/** + * qdf_nbuf_queue_remove() - retrieve a buf from the head of the buf queue + * @head: Network buf queue head + * + * Return: The head buf in the buf queue. + */ +static inline qdf_nbuf_t qdf_nbuf_queue_remove(qdf_nbuf_queue_t *head) +{ + return __qdf_nbuf_queue_remove(head); +} + +/** + * qdf_nbuf_queue_len() - get the length of the queue + * @head: Network buf queue head + * + * Return: length of the queue + */ +static inline uint32_t qdf_nbuf_queue_len(qdf_nbuf_queue_t *head) +{ + return __qdf_nbuf_queue_len(head); +} + +/** + * qdf_nbuf_queue_next() - get the next guy/packet of the given buffer + * @buf: Network buffer + * + * Return: next buffer/packet + */ +static inline qdf_nbuf_t qdf_nbuf_queue_next(qdf_nbuf_t buf) +{ + return __qdf_nbuf_queue_next(buf); +} + +/** + * @qdf_nbuf_is_queue_empty() - check if the buf queue is empty + * @nbq: Network buf queue handle + * + * Return: true if queue is empty + * false if queue is not emty + */ +static inline bool qdf_nbuf_is_queue_empty(qdf_nbuf_queue_t *nbq) +{ + return __qdf_nbuf_is_queue_empty(nbq); +} + +static inline qdf_nbuf_queue_t * +qdf_nbuf_queue_append(qdf_nbuf_queue_t *dest, qdf_nbuf_queue_t *src) +{ + return __qdf_nbuf_queue_append(dest, src); +} + +static inline void +qdf_nbuf_queue_free(qdf_nbuf_queue_t *head) +{ + __qdf_nbuf_queue_free(head); +} + +static inline qdf_nbuf_t +qdf_nbuf_queue_first(qdf_nbuf_queue_t *head) +{ + return __qdf_nbuf_queue_first(head); +} + +/** + * qdf_nbuf_get_protocol() - return the protocol value of the skb + * @skb: Pointer to network buffer + * + * Return: skb protocol + */ +static inline uint16_t qdf_nbuf_get_protocol(struct sk_buff *skb) +{ + return __qdf_nbuf_get_protocol(skb); +} + +/** + * qdf_nbuf_get_ip_summed() - return the ip checksum value of the skb + * @skb: Pointer to network buffer + * + * Return: skb ip_summed + */ +static inline uint8_t qdf_nbuf_get_ip_summed(struct sk_buff *skb) +{ + return __qdf_nbuf_get_ip_summed(skb); +} + +/** + * qdf_nbuf_set_ip_summed() - sets the ip_summed value of the skb + * @skb: Pointer to network buffer + * @ip_summed: ip checksum + * + * Return: none + */ +static inline void qdf_nbuf_set_ip_summed(struct sk_buff *skb, + uint8_t ip_summed) +{ + __qdf_nbuf_set_ip_summed(skb, ip_summed); +} + +/** + * qdf_nbuf_set_next() - add a packet to a linked list + * @this_buf: Predecessor buffer + * @next_buf: Successor buffer + * + * This function can be used to directly link nbufs, rather than using + * a separate network buffer queue object. + * + * Return: none + */ +static inline void qdf_nbuf_set_next(qdf_nbuf_t this_buf, qdf_nbuf_t next_buf) +{ + __qdf_nbuf_set_next(this_buf, next_buf); +} + +/* nbuf extension routines */ + +/** + * qdf_nbuf_set_next_ext() - link extension of this packet contained in a new + * nbuf + * @this_buf: predecessor buffer + * @next_buf: successor buffer + * + * This function is used to link up many nbufs containing a single logical + * packet - not a collection of packets. Do not use for linking the first + * extension to the head + * + * Return: none + */ +static inline void +qdf_nbuf_set_next_ext(qdf_nbuf_t this_buf, qdf_nbuf_t next_buf) +{ + __qdf_nbuf_set_next_ext(this_buf, next_buf); +} + +/** + * qdf_nbuf_next_ext() - get the next packet extension in the linked list + * @buf: Network buffer + * + * Return: Next network buffer in the linked list + */ +static inline qdf_nbuf_t qdf_nbuf_next_ext(qdf_nbuf_t buf) +{ + return __qdf_nbuf_next_ext(buf); +} + +/** + * qdf_nbuf_append_ext_list() - link list of packet extensions to the head + * segment + * @head_buf: Network buf holding head segment (single) + * @ext_list: Network buf list holding linked extensions to the head + * @ext_len: Total length of all buffers in the extension list + * + * This function is used to link up a list of packet extensions (seg1, 2, + * ...) to the nbuf holding the head segment (seg0) + * + * Return: none + */ +static inline void +qdf_nbuf_append_ext_list(qdf_nbuf_t head_buf, qdf_nbuf_t ext_list, + qdf_size_t ext_len) +{ + __qdf_nbuf_append_ext_list(head_buf, ext_list, ext_len); +} + +/** + * qdf_nbuf_get_ext_list() - Get the link to extended nbuf list. + * @head_buf: Network buf holding head segment (single) + * + * This ext_list is populated when we have Jumbo packet, for example in case of + * monitor mode amsdu packet reception, and are stiched using frags_list. + * + * Return: Network buf list holding linked extensions from head buf. + */ +static inline qdf_nbuf_t qdf_nbuf_get_ext_list(qdf_nbuf_t head_buf) +{ + return (qdf_nbuf_t)__qdf_nbuf_get_ext_list(head_buf); +} + +/** + * qdf_nbuf_get_tx_cksum() - gets the tx checksum offload demand + * @buf: Network buffer + * + * Return: qdf_nbuf_tx_cksum_t checksum offload demand for the frame + */ +static inline qdf_nbuf_tx_cksum_t qdf_nbuf_get_tx_cksum(qdf_nbuf_t buf) +{ + return __qdf_nbuf_get_tx_cksum(buf); +} + +/** + * qdf_nbuf_set_rx_cksum() - drivers that support hw checksumming use this to + * indicate checksum info to the stack. + * @buf: Network buffer + * @cksum: Checksum + * + * Return: none + */ +static inline void +qdf_nbuf_set_rx_cksum(qdf_nbuf_t buf, qdf_nbuf_rx_cksum_t *cksum) +{ + __qdf_nbuf_set_rx_cksum(buf, cksum); +} + +/** + * qdf_nbuf_get_tid() - this function extracts the TID value from nbuf + * @buf: Network buffer + * + * Return: TID value + */ +static inline uint8_t qdf_nbuf_get_tid(qdf_nbuf_t buf) +{ + return __qdf_nbuf_get_tid(buf); +} + +/** + * qdf_nbuf_set_tid() - this function sets the TID value in nbuf + * @buf: Network buffer + * @tid: TID value + * + * Return: none + */ +static inline void qdf_nbuf_set_tid(qdf_nbuf_t buf, uint8_t tid) +{ + __qdf_nbuf_set_tid(buf, tid); +} + +/** + * qdf_nbuf_get_exemption_type() - this function extracts the exemption type + * from nbuf + * @buf: Network buffer + * + * Return: Exemption type + */ +static inline uint8_t qdf_nbuf_get_exemption_type(qdf_nbuf_t buf) +{ + return __qdf_nbuf_get_exemption_type(buf); +} + +/** + * qdf_nbuf_set_protocol() - this function peeks data into the buffer at given + * offset + * @buf: Network buffer + * @proto: Protocol + * + * Return: none + */ +static inline void qdf_nbuf_set_protocol(qdf_nbuf_t buf, uint16_t proto) +{ + __qdf_nbuf_set_protocol(buf, proto); +} + +/** + * qdf_nbuf_trace_get_proto_type() - this function return packet proto type + * @buf: Network buffer + * + * Return: Packet protocol type + */ +static inline uint8_t qdf_nbuf_trace_get_proto_type(qdf_nbuf_t buf) +{ + return __qdf_nbuf_trace_get_proto_type(buf); +} + +/** + * qdf_nbuf_reg_trace_cb() - this function registers protocol trace callback + * @cb_func_ptr: Callback pointer + * + * Return: none + */ +static inline void qdf_nbuf_reg_trace_cb(qdf_nbuf_trace_update_t cb_func_ptr) +{ + __qdf_nbuf_reg_trace_cb(cb_func_ptr); +} + + +/** + * qdf_nbuf_set_tx_parallel_dnload_frm() - set tx parallel download + * @buf: Network buffer + * @candi: Candidate of parallel download frame + * + * This function stores a flag specifying this TX frame is suitable for + * downloading though a 2nd TX data pipe that is used for short frames for + * protocols that can accept out-of-order delivery. + * + * Return: none + */ +static inline void +qdf_nbuf_set_tx_parallel_dnload_frm(qdf_nbuf_t buf, uint8_t candi) +{ + __qdf_nbuf_set_tx_htt2_frm(buf, candi); +} + +/** + * qdf_nbuf_get_tx_parallel_dnload_frm() - get tx parallel download + * @buf: Network buffer + * + * This function return whether this TX frame is allow to download though a 2nd + * TX data pipe or not. + * + * Return: none + */ +static inline uint8_t qdf_nbuf_get_tx_parallel_dnload_frm(qdf_nbuf_t buf) +{ + return __qdf_nbuf_get_tx_htt2_frm(buf); +} + +/** + * qdf_nbuf_get_dhcp_subtype() - get the subtype + * of DHCP packet. + * @buf: Pointer to DHCP packet buffer + * + * This func. returns the subtype of DHCP packet. + * + * Return: subtype of the DHCP packet. + */ +static inline enum qdf_proto_subtype +qdf_nbuf_get_dhcp_subtype(qdf_nbuf_t buf) +{ + return __qdf_nbuf_data_get_dhcp_subtype(qdf_nbuf_data(buf)); +} + +/** + * qdf_nbuf_data_get_dhcp_subtype() - get the subtype + * of DHCP packet. + * @buf: Pointer to DHCP packet data buffer + * + * This func. returns the subtype of DHCP packet. + * + * Return: subtype of the DHCP packet. + */ +static inline enum qdf_proto_subtype +qdf_nbuf_data_get_dhcp_subtype(uint8_t *data) +{ + return __qdf_nbuf_data_get_dhcp_subtype(data); +} + +/** + * qdf_nbuf_get_eapol_subtype() - get the subtype + * of EAPOL packet. + * @buf: Pointer to EAPOL packet buffer + * + * This func. returns the subtype of EAPOL packet. + * + * Return: subtype of the EAPOL packet. + */ +static inline enum qdf_proto_subtype +qdf_nbuf_get_eapol_subtype(qdf_nbuf_t buf) +{ + return __qdf_nbuf_data_get_eapol_subtype(qdf_nbuf_data(buf)); +} + +/** + * qdf_nbuf_data_get_eapol_subtype() - get the subtype + * of EAPOL packet. + * @data: Pointer to EAPOL packet data buffer + * + * This func. returns the subtype of EAPOL packet. + * + * Return: subtype of the EAPOL packet. + */ +static inline enum qdf_proto_subtype +qdf_nbuf_data_get_eapol_subtype(uint8_t *data) +{ + return __qdf_nbuf_data_get_eapol_subtype(data); +} + +/** + * qdf_nbuf_get_arp_subtype() - get the subtype + * of ARP packet. + * @buf: Pointer to ARP packet buffer + * + * This func. returns the subtype of ARP packet. + * + * Return: subtype of the ARP packet. + */ +static inline enum qdf_proto_subtype +qdf_nbuf_get_arp_subtype(qdf_nbuf_t buf) +{ + return __qdf_nbuf_data_get_arp_subtype(qdf_nbuf_data(buf)); +} + +/** + * qdf_nbuf_data_get_arp_subtype() - get the subtype + * of ARP packet. + * @data: Pointer to ARP packet data buffer + * + * This func. returns the subtype of ARP packet. + * + * Return: subtype of the ARP packet. + */ +static inline enum qdf_proto_subtype +qdf_nbuf_data_get_arp_subtype(uint8_t *data) +{ + return __qdf_nbuf_data_get_arp_subtype(data); +} + +/** + * qdf_nbuf_get_icmp_subtype() - get the subtype + * of IPV4 ICMP packet. + * @buf: Pointer to IPV4 ICMP packet buffer + * + * This func. returns the subtype of ICMP packet. + * + * Return: subtype of the ICMP packet. + */ +static inline enum qdf_proto_subtype +qdf_nbuf_get_icmp_subtype(qdf_nbuf_t buf) +{ + return __qdf_nbuf_data_get_icmp_subtype(qdf_nbuf_data(buf)); +} + +/** + * qdf_nbuf_data_get_icmp_subtype() - get the subtype + * of IPV4 ICMP packet. + * @data: Pointer to IPV4 ICMP packet data buffer + * + * This func. returns the subtype of ICMP packet. + * + * Return: subtype of the ICMP packet. + */ +static inline enum qdf_proto_subtype +qdf_nbuf_data_get_icmp_subtype(uint8_t *data) +{ + return __qdf_nbuf_data_get_icmp_subtype(data); +} + +/** + * qdf_nbuf_get_icmpv6_subtype() - get the subtype + * of IPV6 ICMPV6 packet. + * @buf: Pointer to IPV6 ICMPV6 packet buffer + * + * This func. returns the subtype of ICMPV6 packet. + * + * Return: subtype of the ICMPV6 packet. + */ +static inline enum qdf_proto_subtype +qdf_nbuf_get_icmpv6_subtype(qdf_nbuf_t buf) +{ + return __qdf_nbuf_data_get_icmpv6_subtype(qdf_nbuf_data(buf)); +} + +/** + * qdf_nbuf_data_get_icmpv6_subtype() - get the subtype + * of IPV6 ICMPV6 packet. + * @data: Pointer to IPV6 ICMPV6 packet data buffer + * + * This func. returns the subtype of ICMPV6 packet. + * + * Return: subtype of the ICMPV6 packet. + */ +static inline enum qdf_proto_subtype +qdf_nbuf_data_get_icmpv6_subtype(uint8_t *data) +{ + return __qdf_nbuf_data_get_icmpv6_subtype(data); +} + +/** + * qdf_nbuf_data_get_ipv4_proto() - get the proto type + * of IPV4 packet. + * @data: Pointer to IPV4 packet data buffer + * + * This func. returns the proto type of IPV4 packet. + * + * Return: proto type of IPV4 packet. + */ +static inline uint8_t +qdf_nbuf_data_get_ipv4_proto(uint8_t *data) +{ + return __qdf_nbuf_data_get_ipv4_proto(data); +} + +/** + * qdf_nbuf_data_get_ipv6_proto() - get the proto type + * of IPV6 packet. + * @data: Pointer to IPV6 packet data buffer + * + * This func. returns the proto type of IPV6 packet. + * + * Return: proto type of IPV6 packet. + */ +static inline uint8_t +qdf_nbuf_data_get_ipv6_proto(uint8_t *data) +{ + return __qdf_nbuf_data_get_ipv6_proto(data); +} + +/** + * qdf_nbuf_is_ipv4_pkt() - check if packet is a ipv4 packet or not + * @buf: buffer + * + * This api is for Tx packets. + * + * Return: true if packet is ipv4 packet + */ +static inline +bool qdf_nbuf_is_ipv4_pkt(qdf_nbuf_t buf) +{ + return __qdf_nbuf_data_is_ipv4_pkt(qdf_nbuf_data(buf)); +} + +/** + * qdf_nbuf_data_is_ipv4_pkt() - check if packet is a ipv4 packet or not + * @data: data + * + * This api is for Tx packets. + * + * Return: true if packet is ipv4 packet + */ +static inline +bool qdf_nbuf_data_is_ipv4_pkt(uint8_t *data) +{ + return __qdf_nbuf_data_is_ipv4_pkt(data); +} + +/** + * qdf_nbuf_is_ipv4_dhcp_pkt() - check if packet is a dhcp packet or not + * @buf: buffer + * + * This api is for ipv4 packet. + * + * Return: true if packet is DHCP packet + */ +static inline +bool qdf_nbuf_is_ipv4_dhcp_pkt(qdf_nbuf_t buf) +{ + return __qdf_nbuf_data_is_ipv4_dhcp_pkt(qdf_nbuf_data(buf)); +} + +/** + * qdf_nbuf_data_is_ipv4_dhcp_pkt() - check if it is DHCP packet. + * @data: Pointer to DHCP packet data buffer + * + * This func. checks whether it is a DHCP packet or not. + * + * Return: true if it is a DHCP packet + * false if not + */ +static inline +bool qdf_nbuf_data_is_ipv4_dhcp_pkt(uint8_t *data) +{ + return __qdf_nbuf_data_is_ipv4_dhcp_pkt(data); +} + +/** + * qdf_nbuf_data_is_ipv6_dhcp_pkt() - check if it is DHCP packet. + * @data: Pointer to DHCP packet data buffer + * + * This func. checks whether it is a DHCP packet or not. + * + * Return: true if it is a DHCP packet + * false if not + */ +static inline +bool qdf_nbuf_is_ipv6_dhcp_pkt(qdf_nbuf_t buf) +{ + return __qdf_nbuf_data_is_ipv6_dhcp_pkt(qdf_nbuf_data(buf)); +} + +/** + * qdf_nbuf_is_ipv4_eapol_pkt() - check if packet is a eapol packet or not + * @buf: buffer + * + * This api is for ipv4 packet. + * + * Return: true if packet is EAPOL packet + */ +static inline +bool qdf_nbuf_is_ipv4_eapol_pkt(qdf_nbuf_t buf) +{ + return __qdf_nbuf_data_is_ipv4_eapol_pkt(qdf_nbuf_data(buf)); +} + +/** + * qdf_nbuf_data_is_ipv4_eapol_pkt() - check if it is EAPOL packet. + * @data: Pointer to EAPOL packet data buffer + * + * This func. checks whether it is a EAPOL packet or not. + * + * Return: true if it is a EAPOL packet + * false if not + */ +static inline +bool qdf_nbuf_data_is_ipv4_eapol_pkt(uint8_t *data) +{ + return __qdf_nbuf_data_is_ipv4_eapol_pkt(data); +} + +/** + * qdf_nbuf_is_ipv4_wapi_pkt() - check if packet is a wapi packet or not + * @buf: buffer + * + * This api is for ipv4 packet. + * + * Return: true if packet is WAPI packet + */ +static inline +bool qdf_nbuf_is_ipv4_wapi_pkt(qdf_nbuf_t buf) +{ + return __qdf_nbuf_is_ipv4_wapi_pkt(buf); +} + +/** + * qdf_nbuf_is_ipv4_tdls_pkt() - check if packet is a tdls packet or not + * @buf: buffer + * + * This api is for ipv4 packet. + * + * Return: true if packet is TDLS packet + */ +static inline +bool qdf_nbuf_is_ipv4_tdls_pkt(qdf_nbuf_t buf) +{ + return __qdf_nbuf_is_ipv4_tdls_pkt(buf); +} + +/** + * qdf_nbuf_is_ipv4_arp_pkt() - check if packet is a arp packet or not + * @buf: buffer + * + * This api is for ipv4 packet. + * + * Return: true if packet is ARP packet + */ +static inline +bool qdf_nbuf_is_ipv4_arp_pkt(qdf_nbuf_t buf) +{ + return __qdf_nbuf_data_is_ipv4_arp_pkt(qdf_nbuf_data(buf)); +} + +/** + * qdf_nbuf_data_is_ipv4_arp_pkt() - check if it is ARP packet. + * @data: Pointer to ARP packet data buffer + * + * This func. checks whether it is a ARP packet or not. + * + * Return: TRUE if it is a ARP packet + * FALSE if not + */ +static inline +bool qdf_nbuf_data_is_ipv4_arp_pkt(uint8_t *data) +{ + return __qdf_nbuf_data_is_ipv4_arp_pkt(data); +} + +/** + * qdf_nbuf_data_is_arp_req() - check if ARP packet is request. + * @buf: buffer + * + * This func. checks whether it is a ARP request or not. + * + * Return: true if it is a ARP request or FALSE if not + */ +static inline +bool qdf_nbuf_data_is_arp_req(qdf_nbuf_t buf) +{ + return __qdf_nbuf_data_is_arp_req(qdf_nbuf_data(buf)); +} + +/** + * qdf_nbuf_data_is_arp_rsp() - check if ARP packet is response. + * @buf: buffer + * + * This func. checks whether it is a ARP response or not. + * + * Return: true if it is a ARP response or FALSE if not + */ +static inline +bool qdf_nbuf_data_is_arp_rsp(qdf_nbuf_t buf) +{ + return __qdf_nbuf_data_is_arp_rsp(qdf_nbuf_data(buf)); +} + +/** + * qdf_nbuf_data_get_arp_src_ip() - get ARP packet source IP gateway. + * @buf: buffer + * + * Return: ARP packet source IP value. + */ +static inline +uint32_t qdf_nbuf_get_arp_src_ip(qdf_nbuf_t buf) +{ + return __qdf_nbuf_get_arp_src_ip(qdf_nbuf_data(buf)); +} + +/** + * qdf_nbuf_data_get_arp_tgt_ip() - get ARP packet target IP gateway. + * @buf: buffer + * + * Return: ARP packet target IP value. + */ +static inline +uint32_t qdf_nbuf_get_arp_tgt_ip(qdf_nbuf_t buf) +{ + return __qdf_nbuf_get_arp_tgt_ip(qdf_nbuf_data(buf)); +} + +/** + * qdf_nbuf_get_dns_domain_name() - get dns domain name of required length + * @buf: buffer + * @len: length to copy + * + * Return: dns domain name + */ +static inline +uint8_t *qdf_nbuf_get_dns_domain_name(qdf_nbuf_t buf, uint32_t len) +{ + return __qdf_nbuf_get_dns_domain_name(qdf_nbuf_data(buf), len); +} + +/** + * qdf_nbuf_data_is_dns_query() - check if skb data is a dns query + * @buf: buffer + * + * Return: true if packet is dns query packet. + * false otherwise. + */ +static inline +bool qdf_nbuf_data_is_dns_query(qdf_nbuf_t buf) +{ + return __qdf_nbuf_data_is_dns_query(qdf_nbuf_data(buf)); +} + +/** + * qdf_nbuf_data_is_dns_response() - check if skb data is a dns response + * @buf: buffer + * + * Return: true if packet is dns response packet. + * false otherwise. + */ +static inline +bool qdf_nbuf_data_is_dns_response(qdf_nbuf_t buf) +{ + return __qdf_nbuf_data_is_dns_response(qdf_nbuf_data(buf)); +} + +/** + * qdf_nbuf_data_is_tcp_syn() - check if skb data is a tcp syn + * @buf: buffer + * + * Return: true if packet is tcp syn packet. + * false otherwise. + */ +static inline +bool qdf_nbuf_data_is_tcp_syn(qdf_nbuf_t buf) +{ + return __qdf_nbuf_data_is_tcp_syn(qdf_nbuf_data(buf)); +} + +/** + * qdf_nbuf_data_is_tcp_syn_ack() - check if skb data is a tcp syn ack + * @buf: buffer + * + * Return: true if packet is tcp syn ack packet. + * false otherwise. + */ +static inline +bool qdf_nbuf_data_is_tcp_syn_ack(qdf_nbuf_t buf) +{ + return __qdf_nbuf_data_is_tcp_syn_ack(qdf_nbuf_data(buf)); +} + +/** + * qdf_nbuf_data_is_tcp_ack() - check if skb data is a tcp ack + * @buf: buffer + * + * Return: true if packet is tcp ack packet. + * false otherwise. + */ +static inline +bool qdf_nbuf_data_is_tcp_ack(qdf_nbuf_t buf) +{ + return __qdf_nbuf_data_is_tcp_ack(qdf_nbuf_data(buf)); +} + +/** + * qdf_nbuf_data_get_tcp_src_port() - get tcp src port + * @buf: buffer + * + * Return: tcp source port value. + */ +static inline +uint16_t qdf_nbuf_data_get_tcp_src_port(qdf_nbuf_t buf) +{ + return __qdf_nbuf_data_get_tcp_src_port(qdf_nbuf_data(buf)); +} + +/** + * qdf_nbuf_data_get_tcp_dst_port() - get tcp dst port + * @buf: buffer + * + * Return: tcp destination port value. + */ +static inline +uint16_t qdf_nbuf_data_get_tcp_dst_port(qdf_nbuf_t buf) +{ + return __qdf_nbuf_data_get_tcp_dst_port(qdf_nbuf_data(buf)); +} + +/** + * qdf_nbuf_data_is_icmpv4_req() - check if ICMPv4 packet is request. + * @buf: buffer + * + * This func. checks whether it is a ICMPv4 request or not. + * + * Return: true if it is a ICMPv4 request or fALSE if not + */ +static inline +bool qdf_nbuf_data_is_icmpv4_req(qdf_nbuf_t buf) +{ + return __qdf_nbuf_data_is_icmpv4_req(qdf_nbuf_data(buf)); +} + +/** + * qdf_nbuf_data_is_icmpv4_rsp() - check if ICMPv4 packet is res + * @buf: buffer + * + * Return: true if packet is icmpv4 response + * false otherwise. + */ +static inline +bool qdf_nbuf_data_is_icmpv4_rsp(qdf_nbuf_t buf) +{ + return __qdf_nbuf_data_is_icmpv4_rsp(qdf_nbuf_data(buf)); +} + +/** + * qdf_nbuf_get_icmpv4_src_ip() - get icmpv4 src IP + * @buf: buffer + * + * Return: icmpv4 packet source IP value. + */ +static inline +uint32_t qdf_nbuf_get_icmpv4_src_ip(qdf_nbuf_t buf) +{ + return __qdf_nbuf_get_icmpv4_src_ip(qdf_nbuf_data(buf)); +} + +/** + * qdf_nbuf_data_get_icmpv4_tgt_ip() - get icmpv4 target IP + * @buf: buffer + * + * Return: icmpv4 packet target IP value. + */ +static inline +uint32_t qdf_nbuf_get_icmpv4_tgt_ip(qdf_nbuf_t buf) +{ + return __qdf_nbuf_get_icmpv4_tgt_ip(qdf_nbuf_data(buf)); +} + +/** + * qdf_nbuf_is_ipv6_pkt() - check if it is IPV6 packet. + * @buf: Pointer to IPV6 packet buffer + * + * This func. checks whether it is a IPV6 packet or not. + * + * Return: TRUE if it is a IPV6 packet + * FALSE if not + */ +static inline +bool qdf_nbuf_is_ipv6_pkt(qdf_nbuf_t buf) +{ + return __qdf_nbuf_data_is_ipv6_pkt(qdf_nbuf_data(buf)); +} + +/** + * qdf_nbuf_data_is_ipv6_pkt() - check if it is IPV6 packet. + * @data: Pointer to IPV6 packet data buffer + * + * This func. checks whether it is a IPV6 packet or not. + * + * Return: TRUE if it is a IPV6 packet + * FALSE if not + */ +static inline +bool qdf_nbuf_data_is_ipv6_pkt(uint8_t *data) +{ + return __qdf_nbuf_data_is_ipv6_pkt(data); +} + +/** + * qdf_nbuf_data_is_ipv4_mcast_pkt() - check if it is IPV4 multicast packet. + * @data: Pointer to IPV4 packet data buffer + * + * This func. checks whether it is a IPV4 multicast packet or not. + * + * Return: TRUE if it is a IPV4 multicast packet + * FALSE if not + */ +static inline +bool qdf_nbuf_data_is_ipv4_mcast_pkt(uint8_t *data) +{ + return __qdf_nbuf_data_is_ipv4_mcast_pkt(data); +} + +/** + * qdf_nbuf_data_is_ipv6_mcast_pkt() - check if it is IPV6 multicast packet. + * @data: Pointer to IPV6 packet data buffer + * + * This func. checks whether it is a IPV6 multicast packet or not. + * + * Return: TRUE if it is a IPV6 multicast packet + * FALSE if not + */ +static inline +bool qdf_nbuf_data_is_ipv6_mcast_pkt(uint8_t *data) +{ + return __qdf_nbuf_data_is_ipv6_mcast_pkt(data); +} + +/** + * qdf_nbuf_is_icmp_pkt() - check if it is IPV4 ICMP packet. + * @buf: Pointer to IPV4 ICMP packet buffer + * + * This func. checks whether it is a ICMP packet or not. + * + * Return: TRUE if it is a ICMP packet + * FALSE if not + */ +static inline +bool qdf_nbuf_is_icmp_pkt(qdf_nbuf_t buf) +{ + return __qdf_nbuf_data_is_icmp_pkt(qdf_nbuf_data(buf)); +} + +/** + * qdf_nbuf_data_is_icmp_pkt() - check if it is IPV4 ICMP packet. + * @data: Pointer to IPV4 ICMP packet data buffer + * + * This func. checks whether it is a ICMP packet or not. + * + * Return: TRUE if it is a ICMP packet + * FALSE if not + */ +static inline +bool qdf_nbuf_data_is_icmp_pkt(uint8_t *data) +{ + return __qdf_nbuf_data_is_icmp_pkt(data); +} + +/** + * qdf_nbuf_is_icmpv6_pkt() - check if it is IPV6 ICMPV6 packet. + * @buf: Pointer to IPV6 ICMPV6 packet buffer + * + * This func. checks whether it is a ICMPV6 packet or not. + * + * Return: TRUE if it is a ICMPV6 packet + * FALSE if not + */ +static inline +bool qdf_nbuf_is_icmpv6_pkt(qdf_nbuf_t buf) +{ + return __qdf_nbuf_data_is_icmpv6_pkt(qdf_nbuf_data(buf)); +} + +/** + * qdf_nbuf_data_is_icmpv6_pkt() - check if it is IPV6 ICMPV6 packet. + * @data: Pointer to IPV6 ICMPV6 packet data buffer + * + * This func. checks whether it is a ICMPV6 packet or not. + * + * Return: TRUE if it is a ICMPV6 packet + * FALSE if not + */ +static inline +bool qdf_nbuf_data_is_icmpv6_pkt(uint8_t *data) +{ + return __qdf_nbuf_data_is_icmpv6_pkt(data); +} + +/** + * qdf_nbuf_is_ipv4_udp_pkt() - check if it is IPV4 UDP packet. + * @buf: Pointer to IPV4 UDP packet buffer + * + * This func. checks whether it is a IPV4 UDP packet or not. + * + * Return: TRUE if it is a IPV4 UDP packet + * FALSE if not + */ +static inline +bool qdf_nbuf_is_ipv4_udp_pkt(qdf_nbuf_t buf) +{ + return __qdf_nbuf_data_is_ipv4_udp_pkt(qdf_nbuf_data(buf)); +} + +/** + * qdf_nbuf_data_is_ipv4_udp_pkt() - check if it is IPV4 UDP packet. + * @data: Pointer to IPV4 UDP packet data buffer + * + * This func. checks whether it is a IPV4 UDP packet or not. + * + * Return: TRUE if it is a IPV4 UDP packet + * FALSE if not + */ +static inline +bool qdf_nbuf_data_is_ipv4_udp_pkt(uint8_t *data) +{ + return __qdf_nbuf_data_is_ipv4_udp_pkt(data); +} + +/** + * qdf_nbuf_is_ipv4_tcp_pkt() - check if it is IPV4 TCP packet. + * @buf: Pointer to IPV4 TCP packet buffer + * + * This func. checks whether it is a IPV4 TCP packet or not. + * + * Return: TRUE if it is a IPV4 TCP packet + * FALSE if not + */ +static inline +bool qdf_nbuf_is_ipv4_tcp_pkt(qdf_nbuf_t buf) +{ + return __qdf_nbuf_data_is_ipv4_tcp_pkt(qdf_nbuf_data(buf)); +} + +/** + * qdf_nbuf_data_is_ipv4_tcp_pkt() - check if it is IPV4 TCP packet. + * @data: Pointer to IPV4 TCP packet data buffer + * + * This func. checks whether it is a IPV4 TCP packet or not. + * + * Return: TRUE if it is a IPV4 TCP packet + * FALSE if not + */ +static inline +bool qdf_nbuf_data_is_ipv4_tcp_pkt(uint8_t *data) +{ + return __qdf_nbuf_data_is_ipv4_tcp_pkt(data); +} + +/** + * qdf_nbuf_is_ipv6_udp_pkt() - check if it is IPV6 UDP packet. + * @buf: Pointer to IPV6 UDP packet buffer + * + * This func. checks whether it is a IPV6 UDP packet or not. + * + * Return: TRUE if it is a IPV6 UDP packet + * FALSE if not + */ +static inline +bool qdf_nbuf_is_ipv6_udp_pkt(qdf_nbuf_t buf) +{ + return __qdf_nbuf_data_is_ipv6_udp_pkt(qdf_nbuf_data(buf)); +} + +/** + * qdf_nbuf_data_is_ipv6_udp_pkt() - check if it is IPV6 UDP packet. + * @data: Pointer to IPV6 UDP packet data buffer + * + * This func. checks whether it is a IPV6 UDP packet or not. + * + * Return: TRUE if it is a IPV6 UDP packet + * FALSE if not + */ +static inline +bool qdf_nbuf_data_is_ipv6_udp_pkt(uint8_t *data) +{ + return __qdf_nbuf_data_is_ipv6_udp_pkt(data); +} + +/** + * qdf_nbuf_is_ipv6_tcp_pkt() - check if it is IPV6 TCP packet. + * @buf: Pointer to IPV6 TCP packet buffer + * + * This func. checks whether it is a IPV6 TCP packet or not. + * + * Return: TRUE if it is a IPV6 TCP packet + * FALSE if not + */ +static inline +bool qdf_nbuf_is_ipv6_tcp_pkt(qdf_nbuf_t buf) +{ + return __qdf_nbuf_data_is_ipv6_tcp_pkt(qdf_nbuf_data(buf)); +} + +/** + * qdf_nbuf_data_is_ipv6_tcp_pkt() - check if it is IPV6 TCP packet. + * @data: Pointer to IPV6 TCP packet data buffer + * + * This func. checks whether it is a IPV6 TCP packet or not. + * + * Return: TRUE if it is a IPV6 TCP packet + * FALSE if not + */ +static inline +bool qdf_nbuf_data_is_ipv6_tcp_pkt(uint8_t *data) +{ + return __qdf_nbuf_data_is_ipv6_tcp_pkt(data); +} + +/** + * qdf_nbuf_is_bcast_pkt() - check if it is broadcast packet. + * @buf: Network buffer + * + * This func. checks whether packet is broadcast or not. + * + * Return: TRUE if it is broadcast packet + * FALSE if not + */ +static inline +bool qdf_nbuf_is_bcast_pkt(qdf_nbuf_t buf) +{ + return __qdf_nbuf_is_bcast_pkt(buf); +} + +/** + * qdf_nbuf_reset_num_frags() - decrement the number of fragments + * @buf: Network buffer + * + * Return: Number of fragments + */ +static inline void qdf_nbuf_reset_num_frags(qdf_nbuf_t buf) +{ + __qdf_nbuf_reset_num_frags(buf); +} + +/** + * qdf_dmaaddr_to_32s - return high and low parts of dma_addr + * + * Returns the high and low 32-bits of the DMA addr in the provided ptrs + * + * Return: N/A + */ +static inline void qdf_dmaaddr_to_32s(qdf_dma_addr_t dmaaddr, + uint32_t *lo, uint32_t *hi) +{ + return __qdf_dmaaddr_to_32s(dmaaddr, lo, hi); +} + +/** + * qdf_nbuf_get_tso_info() - function to divide a jumbo TSO + * network buffer into segments + * @nbuf: network buffer to be segmented + * @tso_info: This is the output. The information about the + * TSO segments will be populated within this. + * + * This function fragments a TCP jumbo packet into smaller + * segments to be transmitted by the driver. It chains the TSO + * segments created into a list. + * + * Return: number of TSO segments + */ +static inline uint32_t qdf_nbuf_get_tso_info(qdf_device_t osdev, + qdf_nbuf_t nbuf, struct qdf_tso_info_t *tso_info) +{ + return __qdf_nbuf_get_tso_info(osdev, nbuf, tso_info); +} + +/** + * qdf_nbuf_unmap_tso_segment() - function to dma unmap TSO segment element + * + * @osdev: qdf device handle + * @tso_seg: TSO segment element to be unmapped + * @is_last_seg: whether this is last tso seg or not + * + * Return: none + */ +static inline void qdf_nbuf_unmap_tso_segment(qdf_device_t osdev, + struct qdf_tso_seg_elem_t *tso_seg, + bool is_last_seg) +{ + return __qdf_nbuf_unmap_tso_segment(osdev, tso_seg, is_last_seg); +} + +/** + * qdf_nbuf_get_tso_num_seg() - function to calculate the number + * of TCP segments within the TSO jumbo packet + * @nbuf: TSO jumbo network buffer to be segmented + * + * This function calculates the number of TCP segments that the + network buffer can be divided into. + * + * Return: number of TCP segments + */ +static inline uint32_t qdf_nbuf_get_tso_num_seg(qdf_nbuf_t nbuf) +{ + return __qdf_nbuf_get_tso_num_seg(nbuf); +} + +/** + * qdf_nbuf_inc_users() - function to increment the number of + * users referencing this network buffer + * + * @nbuf: network buffer + * + * This function increments the number of users referencing this + * network buffer + * + * Return: the network buffer + */ +static inline qdf_nbuf_t qdf_nbuf_inc_users(qdf_nbuf_t nbuf) +{ + return __qdf_nbuf_inc_users(nbuf); +} + +/** + * qdf_nbuf_data_attr_get() - Get data_attr field from cvg_nbuf_cb + * + * @nbuf: Network buffer (skb on linux) + * + * This function returns the values of data_attr field + * in struct cvg_nbuf_cb{}, to which skb->cb is typecast. + * This value is actually the value programmed in CE descriptor. + * + * Return: Value of data_attr + */ +static inline uint32_t qdf_nbuf_data_attr_get(qdf_nbuf_t buf) +{ + return __qdf_nbuf_data_attr_get(buf); +} + +/** + * qdf_nbuf_data_attr_set() - Sets data_attr field in cvg_nbuf_cb + * + * @nbuf: Network buffer (skb on linux) + * @data_attr: Value to be stored cvg_nbuf_cb->data_attr + * + * This function stores the value to be programmed in CE + * descriptor as part skb->cb which is typecast to struct cvg_nbuf_cb{} + * + * Return: void + */ +static inline +void qdf_nbuf_data_attr_set(qdf_nbuf_t buf, uint32_t data_attr) +{ + __qdf_nbuf_data_attr_set(buf, data_attr); +} + +/** + * qdf_nbuf_tx_info_get() - Parse skb and get Tx metadata + * + * @nbuf: Network buffer (skb on linux) + * + * This function parses the payload to figure out relevant + * Tx meta-data e.g. whether to enable tx_classify bit + * in CE. + * + * Return: void + */ +#define qdf_nbuf_tx_info_get __qdf_nbuf_tx_info_get + +void qdf_nbuf_set_state(qdf_nbuf_t nbuf, uint8_t current_state); +void qdf_nbuf_tx_desc_count_display(void); +void qdf_nbuf_tx_desc_count_clear(void); + +static inline qdf_nbuf_t +qdf_nbuf_realloc_headroom(qdf_nbuf_t buf, uint32_t headroom) +{ + return __qdf_nbuf_realloc_headroom(buf, headroom); +} + +static inline qdf_nbuf_t +qdf_nbuf_realloc_tailroom(qdf_nbuf_t buf, uint32_t tailroom) +{ + return __qdf_nbuf_realloc_tailroom(buf, tailroom); +} + +static inline qdf_nbuf_t +qdf_nbuf_expand(qdf_nbuf_t buf, uint32_t headroom, uint32_t tailroom) +{ + return __qdf_nbuf_expand(buf, headroom, tailroom); +} + +static inline int +qdf_nbuf_linearize(qdf_nbuf_t buf) +{ + return __qdf_nbuf_linearize(buf); +} + +static inline qdf_nbuf_t +qdf_nbuf_unshare(qdf_nbuf_t buf) +{ + return __qdf_nbuf_unshare(buf); +} + +static inline bool +qdf_nbuf_is_cloned(qdf_nbuf_t buf) +{ + return __qdf_nbuf_is_cloned(buf); +} + +static inline void +qdf_nbuf_frag_info(qdf_nbuf_t buf, qdf_sglist_t *sg) +{ + __qdf_nbuf_frag_info(buf, sg); +} + +static inline qdf_nbuf_tx_cksum_t +qdf_nbuf_tx_cksum_info(qdf_nbuf_t buf, uint8_t **hdr_off, uint8_t **where) +{ + return __qdf_nbuf_tx_cksum_info(buf, hdr_off, where); +} + +static inline void qdf_nbuf_reset_ctxt(__qdf_nbuf_t nbuf) +{ + __qdf_nbuf_reset_ctxt(nbuf); +} + +static inline void qdf_nbuf_init(qdf_nbuf_t buf) +{ + __qdf_nbuf_init(buf); +} + +static inline void *qdf_nbuf_network_header(qdf_nbuf_t buf) +{ + return __qdf_nbuf_network_header(buf); +} + +static inline void *qdf_nbuf_transport_header(qdf_nbuf_t buf) +{ + return __qdf_nbuf_transport_header(buf); +} + +static inline qdf_size_t qdf_nbuf_tcp_tso_size(qdf_nbuf_t buf) +{ + return __qdf_nbuf_tcp_tso_size(buf); +} + +static inline void *qdf_nbuf_get_cb(qdf_nbuf_t nbuf) +{ + return __qdf_nbuf_get_cb(nbuf); +} + +static inline uint32_t qdf_nbuf_get_nr_frags(qdf_nbuf_t nbuf) +{ + return __qdf_nbuf_get_nr_frags(nbuf); +} + +static inline qdf_size_t qdf_nbuf_headlen(qdf_nbuf_t buf) +{ + return __qdf_nbuf_headlen(buf); +} + +static inline QDF_STATUS qdf_nbuf_frag_map(qdf_device_t osdev, + qdf_nbuf_t buf, int offset, + qdf_dma_dir_t dir, int cur_frag) +{ + return __qdf_nbuf_frag_map(osdev, buf, offset, dir, cur_frag); +} + +static inline bool qdf_nbuf_tso_tcp_v4(qdf_nbuf_t buf) +{ + return __qdf_nbuf_tso_tcp_v4(buf); +} + +static inline bool qdf_nbuf_tso_tcp_v6(qdf_nbuf_t buf) +{ + return __qdf_nbuf_tso_tcp_v6(buf); +} + +static inline uint32_t qdf_nbuf_tcp_seq(qdf_nbuf_t buf) +{ + return __qdf_nbuf_tcp_seq(buf); +} + +static inline qdf_size_t qdf_nbuf_l2l3l4_hdr_len(qdf_nbuf_t buf) +{ + return __qdf_nbuf_l2l3l4_hdr_len(buf); +} + +static inline bool qdf_nbuf_is_nonlinear(qdf_nbuf_t buf) +{ + return __qdf_nbuf_is_nonlinear(buf); +} + +static inline uint32_t +qdf_nbuf_get_frag_size(qdf_nbuf_t buf, uint32_t frag_num) +{ + return __qdf_nbuf_get_frag_size(buf, frag_num); +} + +static inline uint32_t qdf_nbuf_get_priority(qdf_nbuf_t buf) +{ + return __qdf_nbuf_get_priority(buf); +} + +static inline void qdf_nbuf_set_priority(qdf_nbuf_t buf, uint32_t p) +{ + __qdf_nbuf_set_priority(buf, p); +} + +static inline void qdf_nbuf_record_rx_queue(qdf_nbuf_t buf, uint32_t queue_id) +{ + __qdf_nbuf_record_rx_queue(buf, queue_id); +} + +static inline uint16_t +qdf_nbuf_get_queue_mapping(qdf_nbuf_t buf) +{ + return __qdf_nbuf_get_queue_mapping(buf); +} + +static inline uint8_t * +qdf_nbuf_get_priv_ptr(qdf_nbuf_t buf) +{ + return __qdf_nbuf_get_priv_ptr(buf); +} + +/** + * qdf_nbuf_update_radiotap() - update radiotap at head of nbuf. + * @rx_status: rx_status containing required info to update radiotap + * @nbuf: Pointer to nbuf + * @headroom_sz: Available headroom size + * + * Return: radiotap length. + */ +unsigned int qdf_nbuf_update_radiotap(struct mon_rx_status *rx_status, + qdf_nbuf_t nbuf, uint32_t headroom_sz); + +/** + * qdf_nbuf_mark_wakeup_frame() - mark wakeup frame. + * @buf: Pointer to nbuf + * + * Return: None + */ +static inline void +qdf_nbuf_mark_wakeup_frame(qdf_nbuf_t buf) +{ + __qdf_nbuf_mark_wakeup_frame(buf); +} + +/** + * qdf_nbuf_reg_free_cb - Registers nbuf free callback + * @cb_func_ptr: Callback pointer + * + * This function registers nbuf free callback + * + * Return: void + */ +static inline void +qdf_nbuf_reg_free_cb(qdf_nbuf_free_t cb_func_ptr) +{ + __qdf_nbuf_reg_free_cb(cb_func_ptr); +} + +/** + * qdf_nbuf_set_timestamp() - set the timestamp for frame + * + * @buf: sk buff + * + * Return: void + */ +static inline void +qdf_nbuf_set_timestamp(struct sk_buff *skb) +{ + __qdf_nbuf_set_timestamp(skb); +} + +/** + * qdf_nbuf_get_timedelta_ms() - get time difference in ms + * + * @buf: sk buff + * + * Return: time difference ms + */ +static inline uint64_t +qdf_nbuf_get_timedelta_ms(struct sk_buff *skb) +{ + return __qdf_nbuf_get_timedelta_ms(skb); +} + +/** + * qdf_nbuf_get_timedelta_us() - get time difference in micro seconds + * + * @buf: sk buff + * + * Return: time difference in micro seconds + */ +static inline uint64_t +qdf_nbuf_get_timedelta_us(struct sk_buff *skb) +{ + return __qdf_nbuf_get_timedelta_us(skb); +} + +/** + * qdf_nbuf_count_get() - get global nbuf gauge + * + * Return: global nbuf gauge + */ +static inline int qdf_nbuf_count_get(void) +{ + return __qdf_nbuf_count_get(); +} + +/** + * qdf_nbuf_count_inc() - increment nbuf global count + * + * @buf: sk buff + * + * Return: void + */ +static inline void qdf_nbuf_count_inc(qdf_nbuf_t buf) +{ + return __qdf_nbuf_count_inc(buf); +} + +/** + * qdf_nbuf_count_dec() - decrement nbuf global count + * + * @buf: sk buff + * + * Return: void + */ +static inline void qdf_nbuf_count_dec(qdf_nbuf_t buf) +{ + return __qdf_nbuf_count_dec(buf); +} + +/** + * qdf_nbuf_mod_init() - Intialization routine for qdf_nbuf + * + * Return void + */ +static inline void qdf_nbuf_mod_init(void) +{ + return __qdf_nbuf_mod_init(); +} + +/** + * qdf_nbuf_mod_init() - Unintialization routine for qdf_nbuf + * + * Return void + */ +static inline void qdf_nbuf_mod_exit(void) +{ + return __qdf_nbuf_mod_exit(); +} + +/** + * qdf_nbuf_orphan() - orphan a nbuf + * @buf: Pointer to network buffer + * + * If a buffer currently has an owner then we call the + * owner's destructor function + * + * Return: void + */ +static inline void qdf_nbuf_orphan(qdf_nbuf_t buf) +{ + return __qdf_nbuf_orphan(buf); +} + +#ifdef CONFIG_WIN +#include +#else +#include +#endif +#endif /* _QDF_NBUF_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_net_types.h b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_net_types.h new file mode 100644 index 0000000000000000000000000000000000000000..7acde8aa2197986d907ddf3733344cb0706bd1b0 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_net_types.h @@ -0,0 +1,574 @@ +/* + * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: qdf_net_types + * This file defines types used in the networking stack abstraction. + */ + +#ifndef _QDF_NET_TYPES_H +#define _QDF_NET_TYPES_H + +#include /* uint8_t, etc. */ +#include + +/* Extended Traffic ID passed to target if the TID is unknown */ +#define QDF_NBUF_TX_EXT_TID_INVALID 0x1f + +/** + * qdf_nbuf_exemption_type - QDF net buf exemption types for encryption + * @QDF_NBUF_EXEMPT_NO_EXEMPTION: No exemption + * @QDF_NBUF_EXEMPT_ALWAYS: Exempt always + * @QDF_NBUF_EXEMPT_ON_KEY_MAPPING_KEY_UNAVAILABLE: Exempt on key mapping + */ +enum qdf_nbuf_exemption_type { + QDF_NBUF_EXEMPT_NO_EXEMPTION = 0, + QDF_NBUF_EXEMPT_ALWAYS, + QDF_NBUF_EXEMPT_ON_KEY_MAPPING_KEY_UNAVAILABLE +}; + +typedef __wsum_t wsum_t; +typedef __in6_addr_t in6_addr_t; + + +#define QDF_NET_MAC_ADDR_MAX_LEN 6 +#define QDF_NET_IF_NAME_SIZE 64 +#define QDF_NET_ETH_LEN QDF_NET_MAC_ADDR_MAX_LEN +#define QDF_NET_MAX_MCAST_ADDR 64 +#define QDF_NET_IPV4_LEN 4 + +/* Extended Traffic ID passed to target if the TID is unknown */ +#define QDF_NBUF_TX_EXT_TID_INVALID 0x1f + +#define QDF_ETH_TYPE_IPV4 0x0800 /* IPV4 */ +#define QDF_ETH_TYPE_IPV6 0x86dd /* IPV6 */ +#define QDF_IEEE80211_4ADDR_HDR_LEN 30 +#define QDF_IEEE80211_3ADDR_HDR_LEN 24 +#define QDF_IEEE80211_FC0_SUBTYPE_QOS 0x80 +#define QDF_IEEE80211_FC1_TODS 0x01 +#define QDF_IEEE80211_FC1_FROMDS 0x02 + +#define QDF_NET_IS_MAC_MULTICAST(_a) (*(_a) & 0x01) + +typedef struct qdf_net_ethaddr { + uint8_t addr[QDF_NET_ETH_LEN]; +} qdf_net_ethaddr_t; + +/** + * typedef qdf_net_arphdr_t - ARP header info + * @ar_hrd: hardware type + * @ar_pro: protocol type + * @ar_hln: hardware address length + * @ar_pln: protocol length + * @ar_op: arp operation code + * @ar_sha: sender hardware address + * @ar_sip: sender IP address + * @ar_tha: target hardware address + * @ar_tip: target IP address + */ +typedef struct qdf_net_arphdr { + uint16_t ar_hrd; + uint16_t ar_pro; + uint8_t ar_hln; + uint8_t ar_pln; + uint16_t ar_op; + uint8_t ar_sha[QDF_NET_ETH_LEN]; + uint8_t ar_sip[QDF_NET_IPV4_LEN]; + uint8_t ar_tha[QDF_NET_ETH_LEN]; + uint8_t ar_tip[QDF_NET_IPV4_LEN]; +} qdf_net_arphdr_t; + +/** + * typedef qdf_net_icmp6_11addr_t - ICMP6 header info + * @type: hardware type + * @len: hardware address length + * @addr: hardware address + */ +typedef struct qdf_net_icmp6_11addr { + uint8_t type; + uint8_t len; + uint8_t addr[QDF_NET_ETH_LEN]; +} qdf_net_icmp6_11addr_t; + +#define QDF_TCPHDR_FIN __QDF_TCPHDR_FIN +#define QDF_TCPHDR_SYN __QDF_TCPHDR_SYN +#define QDF_TCPHDR_RST __QDF_TCPHDR_RST +#define QDF_TCPHDR_PSH __QDF_TCPHDR_PSH +#define QDF_TCPHDR_ACK __QDF_TCPHDR_ACK +#define QDF_TCPHDR_URG __QDF_TCPHDR_URG +#define QDF_TCPHDR_ECE __QDF_TCPHDR_ECE +#define QDF_TCPHDR_CWR __QDF_TCPHDR_CWR + +typedef struct { + uint16_t source; + uint16_t dest; + uint32_t seq; + uint32_t ack_seq; +#if defined(QDF_LITTLE_ENDIAN_MACHINE) + uint16_t res1:4, + doff:4, + fin:1, + syn:1, + rst:1, + psh:1, + ack:1, + urg:1, + ece:1, + cwr:1; +#elif defined(QDF_BIG_ENDIAN_MACHINE) + uint16_t doff:4, + res1:4, + cwr:1, + ece:1, + urg:1, + ack:1, + psh:1, + rst:1, + syn:1, + fin:1; +#else +#error "Adjust your byte order" +#endif + uint16_t window; + uint16_t check; + uint16_t urg_ptr; +} qdf_net_tcphdr_t; + +typedef struct { +#if defined(QDF_LITTLE_ENDIAN_MACHINE) + uint8_t ip_hl:4, + ip_version:4; +#elif defined(QDF_BIG_ENDIAN_MACHINE) + uint8_t ip_version:4, + ip_hl:4; +#else +#error "Please fix" +#endif + uint8_t ip_tos; + uint16_t ip_len; + uint16_t ip_id; + uint16_t ip_frag_off; + uint8_t ip_ttl; + uint8_t ip_proto; + uint16_t ip_check; + uint32_t ip_saddr; + uint32_t ip_daddr; + /*The options start here. */ +} qdf_net_iphdr_t; + +/* V3 group record types [grec_type] */ +#define IGMPV3_MODE_IS_INCLUDE 1 +#define IGMPV3_MODE_IS_EXCLUDE 2 +#define IGMPV3_CHANGE_TO_INCLUDE 3 +#define IGMPV3_CHANGE_TO_EXCLUDE 4 +#define IGMPV3_ALLOW_NEW_SOURCES 5 +#define IGMPV3_BLOCK_OLD_SOURCES 6 + +/** + * qdf_net_cmd_vid_t - Command for set/unset vid + */ +typedef uint16_t qdf_net_cmd_vid_t ; /*get/set vlan id*/ + +/** + * typedef qdf_nbuf_tx_cksum_t - transmit checksum offload types + * @QDF_NBUF_TX_CKSUM_NONE: No checksum offload + * @QDF_NBUF_TX_CKSUM_IP: IP header checksum offload + * @QDF_NBUF_TX_CKSUM_TCP_UDP: TCP/UDP checksum offload + * @QDF_NBUF_TX_CKSUM_TCP_UDP_IP: TCP/UDP and IP header checksum offload + */ + +typedef enum { + QDF_NBUF_TX_CKSUM_NONE, + QDF_NBUF_TX_CKSUM_IP, + QDF_NBUF_TX_CKSUM_TCP_UDP, + QDF_NBUF_TX_CKSUM_TCP_UDP_IP, + +} qdf_nbuf_tx_cksum_t; + +/** + * typedef qdf_nbuf_l4_rx_cksum_type_t - receive checksum API types + * @QDF_NBUF_RX_CKSUM_ZERO: Rx checksum zero + * @QDF_NBUF_RX_CKSUM_TCP: Rx checksum TCP + * @QDF_NBUF_RX_CKSUM_UDP: Rx checksum UDP + * @QDF_NBUF_RX_CKSUM_TCPIPV6: Rx checksum TCP IPV6 + * @QDF_NBUF_RX_CKSUM_UDPIPV6: Rx checksum UDP IPV6 + * @QDF_NBUF_RX_CKSUM_TCP_NOPSEUDOHEADER: Rx checksum TCP no pseudo header + * @QDF_NBUF_RX_CKSUM_UDP_NOPSEUDOHEADER: Rx checksum UDP no pseudo header + * @QDF_NBUF_RX_CKSUM_TCPSUM16: Rx checksum TCP SUM16 + */ +typedef enum { + QDF_NBUF_RX_CKSUM_ZERO = 0x0000, + QDF_NBUF_RX_CKSUM_TCP = 0x0001, + QDF_NBUF_RX_CKSUM_UDP = 0x0002, + QDF_NBUF_RX_CKSUM_TCPIPV6 = 0x0010, + QDF_NBUF_RX_CKSUM_UDPIPV6 = 0x0020, + QDF_NBUF_RX_CKSUM_TCP_NOPSEUDOHEADER = 0x0100, + QDF_NBUF_RX_CKSUM_UDP_NOPSEUDOHEADER = 0x0200, + QDF_NBUF_RX_CKSUM_TCPSUM16 = 0x1000, +} qdf_nbuf_l4_rx_cksum_type_t; + +/** + * typedef qdf_nbuf_l4_rx_cksum_result_t - receive checksum status types + * @QDF_NBUF_RX_CKSUM_NONE: Device failed to checksum + * @QDF_NBUF_RX_CKSUM_TCP_UDP_HW: TCP/UDP cksum successful and value returned + * @QDF_NBUF_RX_CKSUM_TCP_UDP_UNNECESSARY: TCP/UDP cksum successful, no value + */ +typedef enum { + QDF_NBUF_RX_CKSUM_NONE = 0x0000, + QDF_NBUF_RX_CKSUM_TCP_UDP_HW = 0x0010, + QDF_NBUF_RX_CKSUM_TCP_UDP_UNNECESSARY = 0x0020, +} qdf_nbuf_l4_rx_cksum_result_t; + +/** + * typedef qdf_nbuf_rx_cksum_t - receive checksum type + * @l4_type: L4 type + * @l4_result: L4 result + */ +typedef struct { + qdf_nbuf_l4_rx_cksum_type_t l4_type; + qdf_nbuf_l4_rx_cksum_result_t l4_result; + uint32_t val; +} qdf_nbuf_rx_cksum_t; + +#define QDF_ARP_REQ 1 /* ARP request */ +#define QDF_ARP_RSP 2 /* ARP response */ +#define QDF_ARP_RREQ 3 /* RARP request */ +#define QDF_ARP_RRSP 4 /* RARP response */ + +#define QDF_NEXTHDR_ICMP 58 /* ICMP for IPv6. */ + +/* Neighbor Discovery */ +#define QDF_ND_RSOL 133 /* Router Solicitation */ +#define QDF_ND_RADVT 134 /* Router Advertisement */ +#define QDF_ND_NSOL 135 /* Neighbor Solicitation */ +#define QDF_ND_NADVT 136 /* Neighbor Advertisement */ + +/** + * typedef qdf_net_udphdr_t - UDP header info + * @src_port: source port + * @dst_port: destination port + * @udp_len: length + * @udp_cksum: checksum + */ +typedef struct { + uint16_t src_port; + uint16_t dst_port; + uint16_t udp_len; + uint16_t udp_cksum; +} qdf_net_udphdr_t; + +/** + * typedef qdf_net_dhcphdr_t - DHCP header info + * @dhcp_msg_type: message type + * @dhcp_hw_type: hardware type + * @dhcp_hw_addr_len: hardware address length + * @dhcp_num_hops: number of hops + * @dhcp_transc_id: transaction id + * @dhcp_secs_elapsed: time elapsed + * @dhcp_flags: flags + * @dhcp_ciaddr: client IP + * @dhcp_yiaddr: device IP + * @dhcp_siaddr_nip: Server IP + * @dhcp_gateway_nip: relay agent IP + * @dhcp_chaddr: LLC hardware address + * @dhcp_sname: server host name + * @dhcp_file: boot file name + * @dhcp_cookie: cookie + */ +typedef struct { + uint8_t dhcp_msg_type; + uint8_t dhcp_hw_type; + uint8_t dhcp_hw_addr_len; + uint8_t dhcp_num_hops; + uint32_t dhcp_transc_id; + uint16_t dhcp_secs_elapsed; + uint16_t dhcp_flags; + uint32_t dhcp_ciaddr; + uint32_t dhcp_yiaddr; + uint32_t dhcp_siaddr_nip; + uint32_t dhcp_gateway_nip; + uint8_t dhcp_chaddr[16]; + uint8_t dhcp_sname[64]; + uint8_t dhcp_file[128]; + uint8_t dhcp_cookie[4]; +} qdf_net_dhcphdr_t; + + +/** + * qdf_net_vlanhdr_t - Vlan header + */ +typedef struct qdf_net_vlanhdr { + uint16_t tpid; +#if defined(QDF_LITTLE_ENDIAN_MACHINE) + uint16_t vid:12; /* Vlan id*/ + uint8_t cfi:1; /* reserved for CFI, don't use*/ + uint8_t prio:3; /* Priority*/ +#elif defined(QDF_BIG_ENDIAN_MACHINE) + uint8_t prio:3; /* Priority*/ + uint8_t cfi:1; /* reserved for CFI, don't use*/ + uint16_t vid:12; /* Vlan id*/ +#else +#error "Please fix" +#endif +} qdf_net_vlanhdr_t; + +typedef struct qdf_net_vid { +#if defined(QDF_LITTLE_ENDIAN_MACHINE) + uint16_t val:12; + uint8_t res:4; +#elif defined(QDF_BIG_ENDIAN_MACHINE) + uint8_t res:4; + uint16_t val:12; +#else +#error "Please fix" +#endif +} qdf_net_vid_t; + +typedef enum { + QDF_NET_TSO_NONE, + QDF_NET_TSO_IPV4, /**< for tsp ipv4 only*/ + QDF_NET_TSO_ALL, /**< ip4 & ipv6*/ +} qdf_net_tso_type_t; + +/** + * qdf_net_dev_info_t - Basic device info + */ +typedef struct { + uint8_t if_name[QDF_NET_IF_NAME_SIZE]; + uint8_t dev_addr[QDF_NET_MAC_ADDR_MAX_LEN]; + uint16_t header_len; + uint16_t mtu_size; + uint32_t unit; +} qdf_net_dev_info_t; + +/** + * qdf_nbuf_tso_t - For TCP large Segment Offload + */ +typedef struct { + qdf_net_tso_type_t type; + uint16_t mss; + uint8_t hdr_off; +} qdf_nbuf_tso_t; + +/** + * qdf_net_wireless_event_t - Wireless events + * QDF_IEEE80211_ASSOC = station associate (bss mode) + * QDF_IEEE80211_REASSOC = station re-associate (bss mode) + * QDF_IEEE80211_DISASSOC = station disassociate (bss mode) + * QDF_IEEE80211_JOIN = station join (ap mode) + * QDF_IEEE80211_LEAVE = station leave (ap mode) + * QDF_IEEE80211_SCAN = scan complete, results available + * QDF_IEEE80211_REPLAY = sequence counter replay detected + * QDF_IEEE80211_MICHAEL = Michael MIC failure detected + * QDF_IEEE80211_REJOIN = station re-associate (ap mode) + * QDF_CUSTOM_PUSH_BUTTON = WPS push button + */ +typedef enum qdf_net_wireless_events { + QDF_IEEE80211_ASSOC = __QDF_IEEE80211_ASSOC, + QDF_IEEE80211_REASSOC = __QDF_IEEE80211_REASSOC, + QDF_IEEE80211_DISASSOC = __QDF_IEEE80211_DISASSOC, + QDF_IEEE80211_JOIN = __QDF_IEEE80211_JOIN, + QDF_IEEE80211_LEAVE = __QDF_IEEE80211_LEAVE, + QDF_IEEE80211_SCAN = __QDF_IEEE80211_SCAN, + QDF_IEEE80211_REPLAY = __QDF_IEEE80211_REPLAY, + QDF_IEEE80211_MICHAEL = __QDF_IEEE80211_MICHAEL, + QDF_IEEE80211_REJOIN = __QDF_IEEE80211_REJOIN, + QDF_CUSTOM_PUSH_BUTTON = __QDF_CUSTOM_PUSH_BUTTON +} qdf_net_wireless_event_t; + +/** + * qdf_net_ipv6_addr_t - IPv6 Address + */ +typedef struct { + union { + uint8_t u6_addr8[16]; + uint16_t u6_addr16[8]; + uint32_t u6_addr32[4]; + } in6_u; +#define s6_addr32 in6_u.u6_addr32 +} qdf_net_ipv6_addr_t; + +/** + * qdf_net_ipv6hdr_t - IPv6 Header + */ +typedef struct { +#if defined(QDF_LITTLE_ENDIAN_MACHINE) + uint8_t ipv6_priority:4, + ipv6_version:4; +#elif defined(QDF_BIG_ENDIAN_MACHINE) + uint8_t ipv6_version:4, + ipv6_priority:4; +#else +#error "Please fix" +#endif + uint8_t ipv6_flow_lbl[3]; + + uint16_t ipv6_payload_len; + uint8_t ipv6_nexthdr, + ipv6_hop_limit; + + qdf_net_ipv6_addr_t ipv6_saddr, + ipv6_daddr; +} qdf_net_ipv6hdr_t; + +/** + * qdf_net_icmpv6hdr_t - ICMPv6 Header + */ +typedef struct { + uint8_t icmp6_type; + uint8_t icmp6_code; + uint16_t icmp6_cksum; + + union { + uint32_t un_data32[1]; + uint16_t un_data16[2]; + uint8_t un_data8[4]; + + struct { + uint16_t identifier; + uint16_t sequence; + } u_echo; + + struct { +#if defined(QDF_LITTLE_ENDIAN_MACHINE) + uint32_t reserved:5, + override:1, + solicited:1, + router:1, + reserved2:24; +#elif defined(QDF_BIG_ENDIAN_MACHINE) + uint32_t router:1, + solicited:1, + override:1, + reserved:29; +#else +#error "Please fix" +#endif + } u_nd_advt; + + struct { + uint8_t hop_limit; +#if defined(QDF_LITTLE_ENDIAN_MACHINE) + uint8_t reserved:6, + other:1, + managed:1; + +#elif defined(QDF_BIG_ENDIAN_MACHINE) + uint8_t managed:1, + other:1, + reserved:6; +#else +#error "Please fix" +#endif + uint16_t rt_lifetime; + } u_nd_ra; + + } icmp6_dataun; + +} qdf_net_icmpv6hdr_t; + +/** + * qdf_net_nd_msg_t - Neighbor Discovery Message + */ +typedef struct { + qdf_net_icmpv6hdr_t nd_icmph; + qdf_net_ipv6_addr_t nd_target; + uint8_t nd_opt[0]; +} qdf_net_nd_msg_t; + + +static inline int32_t qdf_csum_ipv6(const in6_addr_t *saddr, + const in6_addr_t *daddr, + __u32 len, unsigned short proto, + wsum_t sum) +{ + return (int32_t)__qdf_csum_ipv6(saddr, daddr, len, proto, sum); +} + +typedef struct { + uint8_t i_fc[2]; + uint8_t i_dur[2]; + uint8_t i_addr1[QDF_NET_MAC_ADDR_MAX_LEN]; + uint8_t i_addr2[QDF_NET_MAC_ADDR_MAX_LEN]; + uint8_t i_addr3[QDF_NET_MAC_ADDR_MAX_LEN]; + uint8_t i_seq[2]; + uint8_t i_qos[2]; +} qdf_dot3_qosframe_t; + +typedef struct { + uint8_t ether_dhost[QDF_NET_MAC_ADDR_MAX_LEN]; + uint8_t ether_shost[QDF_NET_MAC_ADDR_MAX_LEN]; + uint16_t vlan_TCI; + uint16_t vlan_encapsulated_proto; + uint16_t ether_type; +} qdf_ethervlan_header_t; + +/** + * typedef qdf_ether_header_t - ethernet header info + * @ether_dhost: destination hardware address + * @ether_shost: source hardware address + * @ether_type: ethernet type + */ +typedef struct { + uint8_t ether_dhost[QDF_NET_ETH_LEN]; + uint8_t ether_shost[QDF_NET_ETH_LEN]; + uint16_t ether_type; +} qdf_ether_header_t; + +typedef struct { + uint8_t llc_dsap; + uint8_t llc_ssap; + union { + struct { + uint8_t control; + uint8_t format_id; + uint8_t class; + uint8_t window_x2; + } __packed type_u; + struct { + uint8_t num_snd_x2; + uint8_t num_rcv_x2; + } __packed type_i; + struct { + uint8_t control; + uint8_t num_rcv_x2; + } __packed type_s; + struct { + uint8_t control; + /* + * We cannot put the following fields in a structure + * because the structure rounding might cause padding. + */ + uint8_t frmr_rej_pdu0; + uint8_t frmr_rej_pdu1; + uint8_t frmr_control; + uint8_t frmr_control_ext; + uint8_t frmr_cause; + } __packed type_frmr; + struct { + uint8_t control; + uint8_t org_code[3]; + uint16_t ether_type; + } __packed type_snap; + struct { + uint8_t control; + uint8_t control_ext; + } __packed type_raw; + } llc_un /* XXX __packed ??? */; +} qdf_llc_t; +#endif /*_QDF_NET_TYPES_H*/ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_parse.h b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_parse.h new file mode 100644 index 0000000000000000000000000000000000000000..781e6542b027f8de67d40c77621a0313ba4c5aec --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_parse.h @@ -0,0 +1,63 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: Text parsing related abstractions, not related to a specific type + */ + +#ifndef __QDF_PARSE_H +#define __QDF_PARSE_H + +#include "qdf_status.h" + +typedef QDF_STATUS (*qdf_ini_section_cb)(void *context, const char *name); +typedef QDF_STATUS (*qdf_ini_item_cb)(void *context, + const char *key, + const char *value); + +/** + * qdf_ini_parse() - parse an ini file + * @ini_path: The full file path of the ini file to parse + * @context: The caller supplied context to pass into callbacks + * @item_cb: Ini item (key/value pair) handler callback function + * Return QDF_STATUS_SUCCESS to continue parsing, else to abort + * @section_cb: Ini section header handler callback function + * Return QDF_STATUS_SUCCESS to continue parsing, else to abort + * + * The *.ini file format is a simple format consisting of a list of key/value + * pairs (items), separated by an '=' character. Comments are initiated with + * a '#' character. Sections are also supported, using '[' and ']' around the + * section name. e.g. + * + * # comments are started with a '#' character + * # items are key/value string pairs, separated by the '=' character + * someKey1=someValue1 + * someKey2=someValue2 # this is also a comment + * + * # section headers are enclosed in square brackets + * [some section header] # new section begins + * someKey3=someValue3 + * + * Return: QDF_STATUS + */ +QDF_STATUS +qdf_ini_parse(const char *ini_path, void *context, + qdf_ini_item_cb item_cb, qdf_ini_section_cb section_cb); + +#endif /* __QDF_PARSE_H */ + diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_perf.h b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_perf.h new file mode 100644 index 0000000000000000000000000000000000000000..aad7982c77f2e8c0885205917f7db501a8062aea --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_perf.h @@ -0,0 +1,110 @@ +/* + * Copyright (c) 2012-2016 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: qdf_perf + * This file provides OS abstraction perf API's. + */ + +#ifndef _QDF_PERF_H +#define _QDF_PERF_H + +/* headers */ +#include + +#ifdef QCA_PERF_PROFILING + +/* Typedefs */ +typedef __qdf_perf_id_t qdf_perf_id_t; + +typedef int (*proc_read_t)(char *page, char **start, off_t off, int count, + int *eof, void *data); +typedef int (*proc_write_t)(struct file *file, const char *buf, + unsigned long count, void *data); +typedef void (*perf_sample_t)(struct qdf_perf_entry *entry, + uint8_t done); + +typedef void (*perf_init_t)(struct qdf_perf_entry *entry, uint32_t def_val); + +/** + * typedef proc_api_tbl_t - contains functions to read, write to proc FS + * @proc_read: function pointer to read function + * @proc_write: function pointer to write function + * @sample: function pointer to sample function + * @init: function pointer to init function + * @def_val: int contains default value + */ +typedef struct proc_api_tbl { + proc_read_t proc_read; + proc_write_t proc_write; + perf_sample_t sample; + perf_init_t init; + uint32_t def_val; +} proc_api_tbl_t; + +proc_api_tbl_t api_tbl[]; + +/* Macros */ +#define INIT_API(name, val) { \ + .proc_read = read_##name, \ + .proc_write = write_##name, \ + .sample = sample_event, \ + .init = init_##name, \ + .def_val = val, \ +} + +#define PERF_ENTRY(hdl) ((qdf_perf_entry_t *)hdl) + +#define qdf_perf_init(_parent, _id, _ctr_type) \ + __qdf_perf_init((_parent), (_id), (_ctr_type)) + +#define qdf_perf_destroy(_id) __qdf_perf_destroy((_id)) + +#define qdf_perf_start(_id) __qdf_perf_start((_id)) + +#define qdf_perf_end(_id) __qdf_perf_end((_id)) + +/* Extern declarations */ +extern __qdf_perf_id_t + __qdf_perf_init(qdf_perf_id_t parent, + uint8_t *id_name, + qdf_perf_cntr_t type)(__qdf_perf_id_t parent, + uint8_t *id_name, + uint32_t type); + +extern bool __qdf_perf_destroy(qdf_perf_id_t id)(__qdf_perf_id_t id); + +extern void __qdf_perf_start(qdf_perf_id_t id)(__qdf_perf_id_t id); +extern void __qdf_perf_end(qdf_perf_id_t id)(__qdf_perf_id_t id); + +extern int +qdf_perfmod_init(void); +extern void +qdf_perfmod_exit(void); + +#else /* !QCA_PERF_PROFILING */ + +#define qdf_perfmod_init() +#define qdf_perfmod_exit() +#define DECLARE_N_EXPORT_PERF_CNTR(id) +#define START_PERF_CNTR(_id, _name) +#define END_PERF_CNTR(_id) + +#endif /* QCA_PERF_PROFILING */ + +#endif /* end of _QDF_PERF_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_platform.h b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_platform.h new file mode 100644 index 0000000000000000000000000000000000000000..417b661c7e957fb8761182eff764899fe8ffb1a1 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_platform.h @@ -0,0 +1,154 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: qdf_platform.h + * This file defines platform API abstractions. + */ + +#ifndef _QDF_PLATFORM_H +#define _QDF_PLATFORM_H + +/** + * qdf_self_recovery_callback() - callback for self recovery + * @reason: the reason for the recovery request + * @func: the caller's function name + * @line: the line number of the callsite + * + * Return: none + */ +typedef void (*qdf_self_recovery_callback)(enum qdf_hang_reason reason, + const char *func, + const uint32_t line); + +/** + * qdf_ssr_callback() - callback for ssr + * + * Return: true if fw is down and false if fw is not down + */ +typedef void (*qdf_ssr_callback)(const char *); + +/** + * qdf_is_module_state_transitioning_cb() - callback to check module state + * + * Return: true if module is in transition, else false + */ +typedef int (*qdf_is_module_state_transitioning_cb)(void); + +/** + * qdf_is_fw_down_callback() - callback to query if fw is down + * + * Return: true if fw is down and false if fw is not down + */ +typedef bool (*qdf_is_fw_down_callback)(void); + +/** + * qdf_register_fw_down_callback() - API to register fw down callback + * @is_fw_down: callback to query if fw is down or not + * + * Return: none + */ +void qdf_register_fw_down_callback(qdf_is_fw_down_callback is_fw_down); + +/** + * qdf_is_fw_down() - API to check if fw is down or not + * + * Return: true: if fw is down + * false: if fw is not down + */ +bool qdf_is_fw_down(void); + +/** + * qdf_register_self_recovery_callback() - register self recovery callback + * @callback: self recovery callback + * + * Return: None + */ +void qdf_register_self_recovery_callback(qdf_self_recovery_callback callback); + +/** + * qdf_trigger_self_recovery () - tirgger self recovery + * + * Return: None + */ +#define qdf_trigger_self_recovery() \ + __qdf_trigger_self_recovery(__func__, __LINE__) +void __qdf_trigger_self_recovery(const char *func, const uint32_t line); + +/** + * qdf_register_ssr_protect_callbacks() - register [un]protect callbacks + * + * Return: None + */ +void qdf_register_ssr_protect_callbacks(qdf_ssr_callback protect, + qdf_ssr_callback unprotect); + +/** + * qdf_ssr_protect() - start SSR protection + * + * Return: None + */ +void qdf_ssr_protect(const char *caller); + +/** + * qdf_ssr_unprotect() - remove SSR protection + * + * Return: None + */ +void qdf_ssr_unprotect(const char *caller); + +/** + * qdf_register_module_state_query_callback() - register module state query + * + * Return: None + */ +void qdf_register_module_state_query_callback( + qdf_is_module_state_transitioning_cb query); + +/** + * qdf_is_module_state_transitioning() - query module state transition + * + * Return: true if in transition else false + */ +bool qdf_is_module_state_transitioning(void); + +/** + * qdf_is_recovering_callback() - callback to get driver recovering in progress + * or not + * + * Return: true if driver is doing recovering else false + */ +typedef bool (*qdf_is_recovering_callback)(void); + +/** + * qdf_register_recovering_state_query_callback() - register recover status + * query callback + * + * Return: none + */ +void qdf_register_recovering_state_query_callback( + qdf_is_recovering_callback is_recovering); + +/** + * qdf_is_recovering() - get driver recovering in progress status + * or not + * + * Return: true if driver is doing recovering else false + */ +bool qdf_is_recovering(void); +#endif /*_QDF_PLATFORM_H*/ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_status.h b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_status.h new file mode 100644 index 0000000000000000000000000000000000000000..00fe806028ec23d34ab2fe66e1ab609d8ef46f23 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_status.h @@ -0,0 +1,142 @@ +/* + * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: qdf_status + * QCA driver framework (QDF) status codes + * Basic status codes/definitions used by QDF + */ + +#if !defined(__QDF_STATUS_H) +#define __QDF_STATUS_H + +/** + * typedef QDF_STATUS - QDF error codes + * @QDF_STATUS_SUCCESS: success + * @QDF_STATUS_E_RESOURCES: system resource(other than memory) not available + * @QDF_STATUS_E_NOMEM: not enough memory + * @QDF_STATUS_E_AGAIN: try again + * @QDF_STATUS_E_INVAL: invalid request + * @QDF_STATUS_E_FAULT: system fault + * @QDF_STATUS_E_ALREADY: another request already in progress + * @QDF_STATUS_E_BADMSG: bad message + * @QDF_STATUS_E_BUSY: device or resource busy + * @QDF_STATUS_E_CANCELED: request cancelled + * @QDF_STATUS_E_ABORTED: request aborted + * @QDF_STATUS_E_NOSUPPORT: request not supported + * @QDF_STATUS_E_PERM: operation not permitted + * @QDF_STATUS_E_EMPTY: empty condition + * @QDF_STATUS_E_EXISTS: existence failure + * @QDF_STATUS_E_TIMEOUT: operation timeout + * @QDF_STATUS_E_FAILURE: unknown reason do not use unless nothign else applies + * @QDF_STATUS_E_NOENT: No such file or directory + * @QDF_STATUS_E_E2BIG: Arg list too long + * @QDF_STATUS_E_NOSPC: no space left on device + * @QDF_STATUS_E_ADDRNOTAVAIL: Cannot assign requested address + * @QDF_STATUS_E_ENXIO: No such device or address + * @QDF_STATUS_E_NETDOWN: network is down + * @QDF_STATUS_E_IO: I/O Error + * @QDF_STATUS_E_PENDING: pending status + * @QDF_STATUS_E_NETRESET: Network dropped connection because of reset + * @QDF_STATUS_E_SIG: Exit due to received SIGINT + * @QDF_STATUS_E_PROTO: protocol error + * @QDF_STATUS_NOT_INITIALIZED: resource not initialized + * @QDF_STATUS_E_NULL_VALUE: request is null + * @QDF_STATUS_PMC_PENDING: request pendign in pmc + * @QDF_STATUS_PMC_DISABLED: pmc is disabled + * @QDF_STATUS_PMC_NOT_NOW: pmc not ready now + * @QDF_STATUS_PMC_AC_POWER: pmc ac power + * @QDF_STATUS_PMC_SYS_ERROR: pmc system error + * @QDF_STATUS_HEARTBEAT_TMOUT: hearbeat timeout error + * @QDF_STATUS_NTH_BEACON_DELIVERY: Nth beacon delivery + * @QDF_STATUS_CSR_WRONG_STATE: csr in wrong state + * @QDF_STATUS_FT_PREAUTH_KEY_SUCCESS: ft preauth key success + * @QDF_STATUS_FT_PREAUTH_KEY_FAILED: ft preauth key failed + * @QDF_STATUS_CMD_NOT_QUEUED: command not queued + * @QDF_STATUS_FW_MSG_TIMEDOUT: target message timeout + * @QDF_STATUS_E_USB_ERROR: USB transaction error + * @QDF_STATUS_MAXCOMP_FAIL: Component id is more than MAX UMAC components + * @QDF_STATUS_COMP_DISABLED: UMAC Component is disabled + * @QDF_STATUS_COMP_ASYNC: UMAC component runs in asynchronous communication + * @QDF_STATUS_CRYPTO_PN_ERROR: PN ERROR in received frame + * @QDF_STATUS_CRYPTO_MIC_FAILURE: MIC failure in received frame + * @QDF_STATUS_CRYPTO_ENCRYPT_FAILED: encryption failed + * @QDF_STATUS_CRYPTO_DECRYPT_FAILED: decryption failed + * @QDF_STATUS_E_RANGE: result/parameter/operation was out of range + * @QDF_STATUS_MAX: not a realy value just a place holder for max + */ +typedef enum { + QDF_STATUS_SUCCESS, + QDF_STATUS_E_RESOURCES, + QDF_STATUS_E_NOMEM, + QDF_STATUS_E_AGAIN, + QDF_STATUS_E_INVAL, + QDF_STATUS_E_FAULT, + QDF_STATUS_E_ALREADY, + QDF_STATUS_E_BADMSG, + QDF_STATUS_E_BUSY, + QDF_STATUS_E_CANCELED, + QDF_STATUS_E_ABORTED, + QDF_STATUS_E_NOSUPPORT, + QDF_STATUS_E_PERM, + QDF_STATUS_E_EMPTY, + QDF_STATUS_E_EXISTS, + QDF_STATUS_E_TIMEOUT, + QDF_STATUS_E_FAILURE, + QDF_STATUS_E_NOENT, + QDF_STATUS_E_E2BIG, + QDF_STATUS_E_NOSPC, + QDF_STATUS_E_ADDRNOTAVAIL, + QDF_STATUS_E_ENXIO, + QDF_STATUS_E_NETDOWN, + QDF_STATUS_E_IO, + QDF_STATUS_E_PENDING, + QDF_STATUS_E_NETRESET, + QDF_STATUS_E_SIG, + QDF_STATUS_E_PROTO, + QDF_STATUS_NOT_INITIALIZED, + QDF_STATUS_E_NULL_VALUE, + QDF_STATUS_PMC_PENDING, + QDF_STATUS_PMC_DISABLED, + QDF_STATUS_PMC_NOT_NOW, + QDF_STATUS_PMC_AC_POWER, + QDF_STATUS_PMC_SYS_ERROR, + QDF_STATUS_HEARTBEAT_TMOUT, + QDF_STATUS_NTH_BEACON_DELIVERY, + QDF_STATUS_CSR_WRONG_STATE, + QDF_STATUS_FT_PREAUTH_KEY_SUCCESS, + QDF_STATUS_FT_PREAUTH_KEY_FAILED, + QDF_STATUS_CMD_NOT_QUEUED, + QDF_STATUS_FW_MSG_TIMEDOUT, + QDF_STATUS_E_USB_ERROR, + QDF_STATUS_MAXCOMP_FAIL, + QDF_STATUS_COMP_DISABLED, + QDF_STATUS_COMP_ASYNC, + QDF_STATUS_CRYPTO_PN_ERROR, + QDF_STATUS_CRYPTO_MIC_FAILURE, + QDF_STATUS_CRYPTO_ENCRYPT_FAILED, + QDF_STATUS_CRYPTO_DECRYPT_FAILED, + QDF_STATUS_E_DEFRAG_ERROR, + QDF_STATUS_E_RANGE, + QDF_STATUS_MAX +} QDF_STATUS; + +#define QDF_IS_STATUS_SUCCESS(status) (QDF_STATUS_SUCCESS == (status)) +#define QDF_IS_STATUS_ERROR(status) (QDF_STATUS_SUCCESS != (status)) + +#endif /* if !defined __QDF_STATUS_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_str.h b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_str.h new file mode 100644 index 0000000000000000000000000000000000000000..7864a4412e3a6a73ffed019e6fb181d2d40cda9c --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_str.h @@ -0,0 +1,157 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: qdf_str + * QCA driver framework (QDF) string APIs. + */ + +#ifndef __QDF_STR_H +#define __QDF_STR_H + +#include "i_qdf_str.h" +#include "qdf_types.h" + +/** + * qdf_is_space() - check if @c is a whitespace character + * @c: the character to check + * + * Whitespace characters include HT, LF, VT, FF, CR, space, and nbsp + * + * Return: true if @ is a whitespace character + */ +static inline bool qdf_is_space(char c) +{ + return __qdf_is_space(c); +} + +/** + * qdf_str_cmp - Compare two strings + * @str1: First string + * @str2: Second string + * Return: + * 0 - strings are equal + * <0 - str1 sorts lexicographically before str2 + * >0 - str1 sorts lexicographically after str2 + */ +static inline int32_t qdf_str_cmp(const char *str1, const char *str2) +{ + return __qdf_str_cmp(str1, str2); +} + +/** + * qdf_str_dup() - duplicate null-terminated string @src + * @dest: double pointer to be populated + * @src: the null-terminated string to be duplicated + * + * @dest must be freed using qdf_mem_free() to avoid memory leaks. + * + * Return: QDF_STATUS; @dest set to NULL on failure, a valid address on success + */ +QDF_STATUS qdf_str_dup(char **dest, const char *src); + +/** + * qdf_str_eq - compare two null-terminated strings for equality + * @left: the string left of the equality + * @right: the string right of the equality + * + * This is a thin wrapper over `if (strcmp(left, right) == 0)` for clarity. + * + * Return: true if strings are equal + */ +static inline bool qdf_str_eq(const char *left, const char *right) +{ + return qdf_str_cmp(left, right) == 0; +} + +/** + * qdf_str_lcopy - Bounded copy from one string to another + * @dest: destination string + * @src: source string + * @dest_size: max number of bytes to copy (incl. null terminator) + * + * If the return value is >= @dest_size, @dest has been truncated. + * + * Return: length of @src + */ +static inline qdf_size_t +qdf_str_lcopy(char *dest, const char *src, uint32_t dest_size) +{ + return __qdf_str_lcopy(dest, src, dest_size); +} + +/** + * qdf_str_left_trim() - Trim any leading whitespace from @str + * @str: the string to trim + * + * Return: A pointer to the first non-space character in @str + */ +static inline const char *qdf_str_left_trim(const char *str) +{ + return __qdf_str_left_trim(str); +} + +/** + * qdf_str_len() - returns the length of a null-terminated string + * @str: input string + * + * Return: length of @str (without null terminator) + */ +static inline qdf_size_t qdf_str_len(const char *str) +{ + return __qdf_str_len(str); +} + +/** + * qdf_str_right_trim() - Trim any trailing whitespace from @str + * @str: the string to trim + * + * Note: The first trailing whitespace character is replaced with a + * null-terminator + * + * Return: None + */ +void qdf_str_right_trim(char *str); + +/** + * qdf_str_trim() - Trim any leading/trailing whitespace from @str + * @str: the string to trim + * + * Note: The first trailing whitespace character is replaced with a + * null-terminator + * + * Return: A pointer to the first non-space character in @str + */ +static inline char *qdf_str_trim(char *str) +{ + return __qdf_str_trim(str); +} + +/** + * qdf_str_nlen() - Get string length up to @limit characters + * @str: the string to get the length of + * @limit: the maximum number of characters to check + * + * Return: the less of @limit or the length of @str (without null terminator) + */ +static inline qdf_size_t qdf_str_nlen(const char *str, qdf_size_t limit) +{ + return __qdf_str_nlen(str, limit); +} + +#endif /* __QDF_STR_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_threads.h b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_threads.h new file mode 100644 index 0000000000000000000000000000000000000000..82c0fcccfb84bfc1feb6a3ff0de235ce1876947c --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_threads.h @@ -0,0 +1,114 @@ +/* + * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: qdf_threads + * QCA driver framework (QDF) thread related APIs + */ + +#if !defined(__QDF_THREADS_H) +#define __QDF_THREADS_H + +#include +#include "i_qdf_threads.h" + +typedef __qdf_thread_t qdf_thread_t; +typedef QDF_STATUS (*qdf_thread_func)(void *context); + +/* Function declarations and documenation */ + +void qdf_sleep(uint32_t ms_interval); + +void qdf_sleep_us(uint32_t us_interval); + +void qdf_busy_wait(uint32_t us_interval); + +/** + * qdf_set_user_nice() - set thread's nice value + * @thread: pointer to thread + * @nice: nice value + * + * Return: none + */ +void qdf_set_user_nice(qdf_thread_t *thread, long nice); + +/** + * qdf_create_thread() - create a kernel thread + * @thread: pointer to thread + * @nice: nice value + * + * Return: pointer to created kernel thread + */ +qdf_thread_t *qdf_create_thread(int (*thread_handler)(void *data), void *data, + const char thread_name[]); + +/** + * qdf_thread_run() - run the given function in a new thread + * + * You must call qdf_thread_join() to avoid a reasource leak! + * + * For more flexibility, use qdf_create_thread() instead. + * + * Return: a new qdf_thread pointer + */ +qdf_thread_t *qdf_thread_run(qdf_thread_func callback, void *context); + +/** + * qdf_thread_join() - signal and wait for a thread to stop + * + * This sets a flag that the given thread can check to see if it should exit. + * The thread can check to see if this flag has been set by calling + * qdf_thread_should_stop(). + * + * Return: QDF_STATUS - the return value from the thread function + */ +QDF_STATUS qdf_thread_join(qdf_thread_t *thread); + +/** + * qdf_thread_should_stop() - true if the current thread was signalled to stop + * + * If qdf_thread_join() has been called on the current thread, this API returns + * true. Otherwise, this returns false. + * + * Return: true if the current thread should stop + */ +bool qdf_thread_should_stop(void); + +/** + * qdf_wake_up_process() - wake up given thread + * @thread: pointer to thread which needs to be woken up + * + * Return: none + */ +int qdf_wake_up_process(qdf_thread_t *thread); + +/** + * qdf_print_stack_trace_thread() - prints the stack trace of the given thread + * @thread: the thread for which the stack trace will be printed + * + * Return: None + */ +void qdf_print_thread_trace(qdf_thread_t *thread); + +/** + * qdf_get_current_task() - get current task struct + * + * Return: pointer to task struct + */ +qdf_thread_t *qdf_get_current_task(void); +#endif /* __QDF_THREADS_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_time.h b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_time.h new file mode 100644 index 0000000000000000000000000000000000000000..fd151dabe38d3b0523e7d6289ce5c48261489be7 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_time.h @@ -0,0 +1,331 @@ +/* + * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: qdf_time + * This file abstracts time related functionality. + */ + +#ifndef _QDF_OS_TIME_H +#define _QDF_OS_TIME_H + +#include + +typedef __qdf_time_t qdf_time_t; +typedef __qdf_ktime_t qdf_ktime_t; + +/** + * qdf_ns_to_ktime - Converts nanoseconds to a qdf_ktime_t object + * @ns: time in nanoseconds + * + * Return: nanoseconds as qdf_ktime_t object + */ + +static inline qdf_ktime_t qdf_ns_to_ktime(uint64_t ns) +{ + return __qdf_ns_to_ktime(ns); +} + +/** + * qdf_ktime_add - Adds two qdf_ktime_t objects and returns + * a qdf_ktime_t object + * @ktime1: time as qdf_ktime_t object + * @ktime2: time as qdf_ktime_t object + * + * Return: sum of both qdf_ktime_t as qdf_ktime_t object + */ + +static inline qdf_ktime_t qdf_ktime_add(qdf_ktime_t ktime1, qdf_ktime_t ktime2) +{ + return __qdf_ktime_add(ktime1, ktime2); +} + +/** + * qdf_ktime_get - Gets the current time as qdf_ktime_t object + * + * Return: current time as qdf_ktime_t object + */ + +static inline qdf_ktime_t qdf_ktime_get(void) +{ + return __qdf_ktime_get(); +} + +/** + * qdf_ktime_add_ns - Adds qdf_ktime_t object and nanoseconds value and + * returns the qdf_ktime_t object + * @ktime: time as qdf_ktime_t object + * @ns: time in nanoseconds + * + * Return: qdf_ktime_t object + */ + +static inline qdf_ktime_t qdf_ktime_add_ns(qdf_ktime_t ktime, int64_t ns) +{ + return __qdf_ktime_add_ns(ktime, ns); +} + +/** + * qdf_ktime_to_ms - Convert the qdf_ktime_t object into milliseconds + * @ktime: time as qdf_ktime_t object + * + * Return: qdf_ktime_t in milliseconds + */ + +static inline int64_t qdf_ktime_to_ms(qdf_ktime_t ktime) +{ + return __qdf_ktime_to_ms(ktime); +} + +/** + * qdf_ktime_to_ns - Convert the qdf_ktime_t object into nanoseconds + * @ktime: time as qdf_ktime_t object + * + * Return: qdf_ktime_t in nanoseconds + */ + +static inline int64_t qdf_ktime_to_ns(qdf_ktime_t ktime) +{ + return __qdf_ktime_to_ns(ktime); +} + +/** + * qdf_system_ticks - Count the number of ticks elapsed from the time when + * the system booted + * + * Return: ticks + */ +static inline qdf_time_t qdf_system_ticks(void) +{ + return __qdf_system_ticks(); +} + +/** + * qdf_system_ticks_to_msecs - convert ticks to milliseconds + * @clock_ticks: Number of ticks + * + * Return: unsigned int Time in milliseconds + */ +static inline uint32_t qdf_system_ticks_to_msecs(unsigned long clock_ticks) +{ + return __qdf_system_ticks_to_msecs(clock_ticks); +} + +/** + * qdf_system_msecs_to_ticks - convert milliseconds to ticks + * @msec: Time in milliseconds + * + * Return: unsigned long number of ticks + */ +static inline qdf_time_t qdf_system_msecs_to_ticks(uint32_t msecs) +{ + return __qdf_system_msecs_to_ticks(msecs); +} + +/** + * qdf_get_system_uptime - Return a monotonically increasing time + * This increments once per HZ ticks + * + * Return: qdf_time_t system up time in ticks + */ +static inline qdf_time_t qdf_get_system_uptime(void) +{ + return __qdf_get_system_uptime(); +} + +/** + * qdf_get_bootbased_boottime_ns() - Get the bootbased time in nanoseconds + * + * qdf_get_bootbased_boottime_ns() function returns the number of nanoseconds + * that have elapsed since the system was booted. It also includes the time when + * system was suspended. + * + * Return: + * The time since system booted in nanoseconds + */ + +static inline uint64_t qdf_get_bootbased_boottime_ns(void) +{ + return __qdf_get_bootbased_boottime_ns(); +} + +/** + * qdf_get_system_timestamp - Return current timestamp + * + * Return: unsigned long timestamp in ms. + */ +static inline unsigned long qdf_get_system_timestamp(void) +{ + return __qdf_get_system_timestamp(); +} + +/** + * qdf_udelay - delay in microseconds + * @usecs: Number of microseconds to delay + * + * Return: none + */ +static inline void qdf_udelay(int usecs) +{ + __qdf_udelay(usecs); +} + +/** + * qdf_mdelay - Delay in milliseconds. + * @msec: Number of milliseconds to delay + * + * Return: none + */ +static inline void qdf_mdelay(int msecs) +{ + __qdf_mdelay(msecs); +} + +/** + * qdf_system_time_after() - Check if a is later than b + * @a: Time stamp value a + * @b: Time stamp value b + * + * Return: + * true if a < b else false + */ +static inline bool qdf_system_time_after(qdf_time_t a, qdf_time_t b) +{ + return __qdf_system_time_after(a, b); +} + +/** + * qdf_system_time_before() - Check if a is before b + * @a: Time stamp value a + * @b: Time stamp value b + * + * Return: + * true if a is before b else false + */ +static inline bool qdf_system_time_before(qdf_time_t a, qdf_time_t b) +{ + return __qdf_system_time_before(a, b); +} + +/** + * qdf_system_time_after_eq() - Check if a atleast as recent as b, if not + * later + * @a: Time stamp value a + * @b: Time stamp value b + * + * Return: + * true if a >= b else false + */ +static inline bool qdf_system_time_after_eq(qdf_time_t a, qdf_time_t b) +{ + return __qdf_system_time_after_eq(a, b); +} + +/** + * enum qdf_timestamp_unit - what unit the qdf timestamp is in + * @KERNEL_LOG: boottime time in uS (micro seconds) + * @QTIMER: QTIME in (1/19200)S + * + * This enum is used to distinguish which timer source is used. + */ +enum qdf_timestamp_unit { + KERNEL_LOG, + QTIMER, +}; + +#ifdef QCA_WIFI_3_0_ADRASTEA +#define QDF_LOG_TIMESTAMP_UNIT QTIMER +#define QDF_LOG_TIMESTAMP_CYCLES_PER_10_US 192 + +static inline uint64_t qdf_log_timestamp_to_usecs(uint64_t time) +{ + /* + * Try to preserve precision by multiplying by 10 first. + * If that would cause a wrap around, divide first instead. + */ + if (time * 10 < time) { + do_div(time, QDF_LOG_TIMESTAMP_CYCLES_PER_10_US); + return time * 10; + } + + time = time * 10; + do_div(time, QDF_LOG_TIMESTAMP_CYCLES_PER_10_US); + + return time; +} +#else +#define QDF_LOG_TIMESTAMP_UNIT KERNEL_LOG +#define QDF_LOG_TIMESTAMP_CYCLES_PER_10_US 10 + +static inline uint64_t qdf_log_timestamp_to_usecs(uint64_t time) +{ + /* timestamps are already in micro seconds */ + return time; +} +#endif + +static inline void qdf_log_timestamp_to_secs(uint64_t time, uint64_t *secs, + uint64_t *usecs) +{ + *secs = qdf_log_timestamp_to_usecs(time); + *usecs = do_div(*secs, 1000000ul); +} + +static inline uint64_t qdf_usecs_to_log_timestamp(uint64_t usecs) +{ + return (usecs * QDF_LOG_TIMESTAMP_CYCLES_PER_10_US) / 10; +} + +/** + * qdf_get_log_timestamp - get time stamp for logging + * For adrastea this API returns QTIMER tick which is needed to synchronize + * host and fw log timestamps + * For ROME and other discrete solution this API returns system boot time stamp + * + * Return: + * QTIMER ticks(19.2MHz) for adrastea + * System tick for rome and other future discrete solutions + */ +static inline uint64_t qdf_get_log_timestamp(void) +{ + return __qdf_get_log_timestamp(); +} + +/** + * qdf_get_log_timestamp_usecs() - get time stamp for logging in microseconds + * + * Return: The current logging timestamp normalized to microsecond precision + */ +static inline uint64_t qdf_get_log_timestamp_usecs(void) +{ + return qdf_log_timestamp_to_usecs(qdf_get_log_timestamp()); +} + +/** + * qdf_get_monotonic_boottime - get monotonic kernel boot time + * This API is similar to qdf_get_system_boottime but it includes + * time spent in suspend. + * + * Return: Time in microseconds + */ +static inline uint64_t qdf_get_monotonic_boottime(void) +{ + return __qdf_get_monotonic_boottime(); +} + +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_timer.h b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_timer.h new file mode 100644 index 0000000000000000000000000000000000000000..f7ac35e41365e98ccbf767f4e39de367dc7639dc --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_timer.h @@ -0,0 +1,122 @@ +/* + * Copyright (c) 2014-2016, 2018-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: qdf_timer + * This file abstracts OS timers running in soft IRQ context. + */ + +#ifndef _QDF_TIMER_H +#define _QDF_TIMER_H + +#include +#include + +typedef struct __qdf_timer_t qdf_timer_t; + +/** + * qdf_timer_init() - initialize a timer + * @hdl: OS handle + * @timer: Timer object pointer + * @func: Timer function + * @arg: Argument of timer function + * @type: deferrable or non deferrable timer type + * + * Timer type QDF_TIMER_TYPE_SW means its a deferrable sw timer which will + * not cause CPU wake upon expiry + * Timer type QDF_TIMER_TYPE_WAKE_APPS means its a non-deferrable timer which + * will cause CPU wake up on expiry + * + * Return: QDF_STATUS + */ +static inline QDF_STATUS +qdf_timer_init(qdf_handle_t hdl, qdf_timer_t *timer, qdf_timer_func_t func, + void *arg, QDF_TIMER_TYPE type) +{ + return __qdf_timer_init(timer, func, arg, type); +} + +/** + * qdf_timer_start() - start a timer + * @timer: timer to start + * @msec: Expiration period in milliseconds + * + * Return: none + */ +static inline void qdf_timer_start(qdf_timer_t *timer, int msec) +{ + __qdf_timer_start(timer, msec); +} + +/** + * qdf_timer_mod() - modify the timeout on a timer + * @timer: timer to modify + * @msec: Expiration period in milliseconds + * + * If @timer is not active, it will be activated. + * + * Return: none + */ +static inline void qdf_timer_mod(qdf_timer_t *timer, int msec) +{ + __qdf_timer_mod(timer, msec); +} + +/** + * qdf_timer_stop() - cancel a timer + * @timer: timer to cancel + * + * Note! The timer callback may be executing when this function call returns. + * If you want to ensure that it is not, use qdf_timer_sync_cancel() instead. + * + * Return: true if @timer was deactivated, false if @timer was not active + */ +static inline bool qdf_timer_stop(qdf_timer_t *timer) +{ + return __qdf_timer_stop(timer); +} + +/** + * qdf_timer_sync_cancel - Cancel a timer synchronously + * @timer: timer to cancel + * + * If the timer callback is already running, this function blocks until it + * completes. + * + * Return: true if @timer was deactivated, false if @timer was not active + */ +static inline bool qdf_timer_sync_cancel(qdf_timer_t *timer) +{ + return __qdf_timer_sync_cancel(timer); +} + +/** + * qdf_timer_free() - free a timer + * @timer: timer to free + * + * If the timer callback is already running, this function blocks until it + * completes. + * + * Return: none + */ +static inline void qdf_timer_free(qdf_timer_t *timer) +{ + __qdf_timer_free(timer); +} + +#endif /* _QDF_TIMER_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_trace.h b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_trace.h new file mode 100644 index 0000000000000000000000000000000000000000..03c69da15b39f2bcc2b90bd5f181fdca703327ba --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_trace.h @@ -0,0 +1,1332 @@ +/* + * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#if !defined(__QDF_TRACE_H) +#define __QDF_TRACE_H + +/** + * DOC: qdf_trace + * QCA driver framework trace APIs + * Trace, logging, and debugging definitions and APIs + */ + +/* Include Files */ +#include /* For QDF_MODULE_ID... */ +#include /* For va_list... */ +#include +#include +#include +#include + + +/* Type declarations */ + +#define FL(x) "%s: %d: " x, __func__, __LINE__ + +#define QDF_TRACE_BUFFER_SIZE (512) + +/* + * Extracts the 8-bit group id from the wmi command id by performing the + * reverse operation of WMI_CMD_GRP_START_ID + */ +#define QDF_WMI_MTRACE_GRP_ID(message_id) (((message_id) >> 12) & 0xFF) +/* + * Number of bits reserved for WMI mtrace command id + */ + #define QDF_WMI_MTRACE_CMD_NUM_BITS 7 +/* + * Extracts the 7-bit group specific command id from the wmi command id + */ +#define QDF_WMI_MTRACE_CMD_ID(message_id) ((message_id) & 0x7F) + +#ifdef CONFIG_MCL +#define QDF_DEFAULT_TRACE_LEVEL \ + ((1 << QDF_TRACE_LEVEL_FATAL) | (1 << QDF_TRACE_LEVEL_ERROR)) +#else +#define QDF_DEFAULT_TRACE_LEVEL (1 << QDF_TRACE_LEVEL_INFO) +#endif + +#define QDF_CATEGORY_INFO_U16(val) (((val >> 16) & 0x0000FFFF)) +#define QDF_TRACE_LEVEL_INFO_L16(val) (val & 0x0000FFFF) + +typedef int (qdf_abstract_print)(void *priv, const char *fmt, ...); + +/* + * Log levels + */ +#define QDF_DEBUG_FUNCTRACE 0x01 +#define QDF_DEBUG_LEVEL0 0x02 +#define QDF_DEBUG_LEVEL1 0x04 +#define QDF_DEBUG_LEVEL2 0x08 +#define QDF_DEBUG_LEVEL3 0x10 +#define QDF_DEBUG_ERROR 0x20 +#define QDF_DEBUG_CFG 0x40 + + +/* DP Trace Implementation */ +#ifdef CONFIG_DP_TRACE +#define DPTRACE(p) p +#define DPTRACE_PRINT(args...) \ + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_DEBUG, args) +#else +#define DPTRACE(p) +#define DPTRACE_PRINT(args...) +#endif + +/* By default Data Path module will have all log levels enabled, except debug + * log level. Debug level will be left up to the framework or user space modules + * to be enabled when issue is detected + */ +#define QDF_DATA_PATH_TRACE_LEVEL \ + ((1 << QDF_TRACE_LEVEL_FATAL) | (1 << QDF_TRACE_LEVEL_ERROR) | \ + (1 << QDF_TRACE_LEVEL_WARN) | (1 << QDF_TRACE_LEVEL_INFO) | \ + (1 << QDF_TRACE_LEVEL_INFO_HIGH) | (1 << QDF_TRACE_LEVEL_INFO_MED) | \ + (1 << QDF_TRACE_LEVEL_INFO_LOW)) + +/* Preprocessor definitions and constants */ +#define ASSERT_BUFFER_SIZE (512) + +#define QDF_TRACE_DEFAULT_PDEV_ID 0xff +#define MAX_QDF_TRACE_RECORDS 4000 +#define INVALID_QDF_TRACE_ADDR 0xffffffff +#define DEFAULT_QDF_TRACE_DUMP_COUNT 0 + +/* + * first parameter to iwpriv command - dump_dp_trace + * iwpriv wlan0 dump_dp_trace 0 0 -> dump full buffer + * iwpriv wlan0 dump_dp_trace 1 0 -> enable live view mode + * iwpriv wlan0 dump_dp_trace 2 0 -> clear dp trace buffer + * iwpriv wlan0 dump_dp_trace 3 0 -> disable live view mode + */ +#define DUMP_DP_TRACE 0 +#define ENABLE_DP_TRACE_LIVE_MODE 1 +#define CLEAR_DP_TRACE_BUFFER 2 +#define DISABLE_DP_TRACE_LIVE_MODE 3 + + +#ifdef TRACE_RECORD + +#define MTRACE(p) p + +#else +#define MTRACE(p) do { } while (0) + +#endif +#define NO_SESSION 0xFF + +/** + * typedef struct qdf_trace_record_s - keep trace record + * @qtime: qtimer ticks + * @time: user timestamp + * @module: module name + * @code: hold record of code + * @session: hold record of session + * @data: hold data + * @pid: hold pid of the process + */ +typedef struct qdf_trace_record_s { + uint64_t qtime; + char time[18]; + uint8_t module; + uint8_t code; + uint16_t session; + uint32_t data; + uint32_t pid; +} qdf_trace_record_t, *tp_qdf_trace_record; + +/** + * typedef struct s_qdf_trace_data - MTRACE logs are stored in ring buffer + * @head: position of first record + * @tail: position of last record + * @num: count of total record + * @num_since_last_dump: count from last dump + * @enable: config for controlling the trace + * @dump_count: Dump after number of records reach this number + */ +typedef struct s_qdf_trace_data { + uint32_t head; + uint32_t tail; + uint32_t num; + uint16_t num_since_last_dump; + uint8_t enable; + uint16_t dump_count; +} t_qdf_trace_data; + +#define CASE_RETURN_STRING(str) case ((str)): return (uint8_t *)(# str); + +#ifndef MAX_QDF_DP_TRACE_RECORDS +#define MAX_QDF_DP_TRACE_RECORDS 2000 +#endif +#define QDF_DP_TRACE_RECORD_SIZE 40 +#define INVALID_QDF_DP_TRACE_ADDR 0xffffffff +#define QDF_DP_TRACE_VERBOSITY_HIGH 4 +#define QDF_DP_TRACE_VERBOSITY_MEDIUM 3 +#define QDF_DP_TRACE_VERBOSITY_LOW 2 +#define QDF_DP_TRACE_VERBOSITY_ULTRA_LOW 1 +#define QDF_DP_TRACE_VERBOSITY_BASE 0 + +/** + * enum QDF_DP_TRACE_ID - Generic ID to identify various events in data path + * @QDF_DP_TRACE_INVALID - invalid + * @QDF_DP_TRACE_DROP_PACKET_RECORD - record drop packet + * @QDF_DP_TRACE_EAPOL_PACKET_RECORD - record EAPOL packet + * @QDF_DP_TRACE_DHCP_PACKET_RECORD - record DHCP packet + * @QDF_DP_TRACE_ARP_PACKET_RECORD - record ARP packet + * @QDF_DP_TRACE_MGMT_PACKET_RECORD - record MGMT pacekt + * @QDF_DP_TRACE_EVENT_RECORD - record events + * @QDF_DP_TRACE_BASE_VERBOSITY - below this are part of base verbosity + * @QDF_DP_TRACE_ICMP_PACKET_RECORD - record ICMP packet + * @QDF_DP_TRACE_ICMPv6_PACKET_RECORD - record ICMPv6 packet + * @QDF_DP_TRACE_HDD_TX_TIMEOUT - HDD tx timeout + * @QDF_DP_TRACE_HDD_SOFTAP_TX_TIMEOUT- SOFTAP HDD tx timeout + * @QDF_DP_TRACE_ULTRA_LOW_VERBOSITY - Below this is not logged for >4PPS + * @QDF_DP_TRACE_TX_PACKET_RECORD - record 32 bytes of tx pkt at any layer + * @QDF_DP_TRACE_RX_PACKET_RECORD - record 32 bytes of rx pkt at any layer + * @QDF_DP_TRACE_HDD_TX_PACKET_RECORD - record 32 bytes of tx pkt at HDD + * @QDF_DP_TRACE_HDD_RX_PACKET_RECORD - record 32 bytes of rx pkt at HDD + * @QDF_DP_TRACE_LI_DP_TX_PACKET_RECORD - record data bytes of tx pkt at LI_DP + * @QDF_DP_TRACE_LI_DP_RX_PACKET_RECORD - record data bytes of rx pkt at LI_DP + * @QDF_DP_TRACE_LI_DP_FREE_PACKET_PTR_RECORD - tx completion ptr record for + * lithium + * @QDF_DP_TRACE_FREE_PACKET_PTR_RECORD - tx completion ptr record + * @QDF_DP_TRACE_LOW_VERBOSITY - below this are part of low verbosity + * @QDF_DP_TRACE_HDD_TX_PACKET_PTR_RECORD - HDD layer ptr record + * @QDF_DP_TRACE_LI_DP_TX_PACKET_PTR_RECORD - Lithium DP layer ptr record + * @QDF_DP_TRACE_RX_HDD_PACKET_PTR_RECORD - HDD RX record + * @QDF_DP_TRACE_CE_PACKET_PTR_RECORD - CE layer ptr record + * @QDF_DP_TRACE_CE_FAST_PACKET_PTR_RECORD- CE fastpath ptr record + * @QDF_DP_TRACE_CE_FAST_PACKET_ERR_RECORD- CE fastpath error record + * @QDF_DP_TRACE_RX_HTT_PACKET_PTR_RECORD - HTT RX record + * @QDF_DP_TRACE_RX_OFFLOAD_HTT_PACKET_PTR_RECORD- HTT RX offload record + * @QDF_DP_TRACE_RX_LI_DP_PACKET_PTR_RECORD - Lithium DP RX record + * @QDF_DP_TRACE_MED_VERBOSITY - below this are part of med verbosity + * @QDF_DP_TRACE_TXRX_QUEUE_PACKET_PTR_RECORD -tx queue ptr record + * @QDF_DP_TRACE_TXRX_PACKET_PTR_RECORD - txrx packet ptr record + * @QDF_DP_TRACE_TXRX_FAST_PACKET_PTR_RECORD - txrx fast path record + * @QDF_DP_TRACE_HTT_PACKET_PTR_RECORD - htt packet ptr record + * @QDF_DP_TRACE_HTC_PACKET_PTR_RECORD - htc packet ptr record + * @QDF_DP_TRACE_HIF_PACKET_PTR_RECORD - hif packet ptr record + * @QDF_DP_TRACE_RX_TXRX_PACKET_PTR_RECORD - txrx packet ptr record + * @QDF_DP_TRACE_LI_DP_NULL_RX_PACKET_RECORD + * - record data bytes of rx null_queue pkt at LI_DP + * @QDF_DP_TRACE_HIGH_VERBOSITY - below this are part of high verbosity + */ + +enum QDF_DP_TRACE_ID { + QDF_DP_TRACE_INVALID, + QDF_DP_TRACE_DROP_PACKET_RECORD, + QDF_DP_TRACE_EAPOL_PACKET_RECORD, + QDF_DP_TRACE_DHCP_PACKET_RECORD, + QDF_DP_TRACE_ARP_PACKET_RECORD, + QDF_DP_TRACE_MGMT_PACKET_RECORD, + QDF_DP_TRACE_EVENT_RECORD, + QDF_DP_TRACE_BASE_VERBOSITY, + QDF_DP_TRACE_ICMP_PACKET_RECORD, + QDF_DP_TRACE_ICMPv6_PACKET_RECORD, + QDF_DP_TRACE_HDD_TX_TIMEOUT, + QDF_DP_TRACE_HDD_SOFTAP_TX_TIMEOUT, + QDF_DP_TRACE_ULTRA_LOW_VERBOSITY, + QDF_DP_TRACE_TX_PACKET_RECORD, + QDF_DP_TRACE_RX_PACKET_RECORD, + QDF_DP_TRACE_HDD_TX_PACKET_RECORD, + QDF_DP_TRACE_HDD_RX_PACKET_RECORD, + QDF_DP_TRACE_LI_DP_TX_PACKET_RECORD, + QDF_DP_TRACE_LI_DP_RX_PACKET_RECORD, + QDF_DP_TRACE_LI_DP_FREE_PACKET_PTR_RECORD, + QDF_DP_TRACE_FREE_PACKET_PTR_RECORD, + QDF_DP_TRACE_LOW_VERBOSITY, + QDF_DP_TRACE_HDD_TX_PACKET_PTR_RECORD, + QDF_DP_TRACE_LI_DP_TX_PACKET_PTR_RECORD, + QDF_DP_TRACE_RX_HDD_PACKET_PTR_RECORD, + QDF_DP_TRACE_CE_PACKET_PTR_RECORD, + QDF_DP_TRACE_CE_FAST_PACKET_PTR_RECORD, + QDF_DP_TRACE_CE_FAST_PACKET_ERR_RECORD, + QDF_DP_TRACE_RX_HTT_PACKET_PTR_RECORD, + QDF_DP_TRACE_RX_OFFLOAD_HTT_PACKET_PTR_RECORD, + QDF_DP_TRACE_RX_LI_DP_PACKET_PTR_RECORD, + QDF_DP_TRACE_MED_VERBOSITY, + QDF_DP_TRACE_TXRX_QUEUE_PACKET_PTR_RECORD, + QDF_DP_TRACE_TXRX_PACKET_PTR_RECORD, + QDF_DP_TRACE_TXRX_FAST_PACKET_PTR_RECORD, + QDF_DP_TRACE_HTT_PACKET_PTR_RECORD, + QDF_DP_TRACE_HTC_PACKET_PTR_RECORD, + QDF_DP_TRACE_HIF_PACKET_PTR_RECORD, + QDF_DP_TRACE_RX_TXRX_PACKET_PTR_RECORD, + QDF_DP_TRACE_LI_DP_NULL_RX_PACKET_RECORD, + QDF_DP_TRACE_HIGH_VERBOSITY, + QDF_DP_TRACE_MAX +}; + +/** + * qdf_proto_dir - direction + * @QDF_TX: TX direction + * @QDF_RX: RX direction + * @QDF_NA: not applicable + */ +enum qdf_proto_dir { + QDF_TX, + QDF_RX, + QDF_NA +}; + +/** + * struct qdf_dp_trace_ptr_buf - pointer record buffer + * @cookie: cookie value + * @msdu_id: msdu_id + * @status: completion status + */ +struct qdf_dp_trace_ptr_buf { + uint64_t cookie; + uint16_t msdu_id; + uint16_t status; +}; + +/** + * struct qdf_dp_trace_proto_buf - proto packet buffer + * @sa: source address + * @da: destination address + * @vdev_id : vdev id + * @type: packet type + * @subtype: packet subtype + * @dir: direction + */ +struct qdf_dp_trace_proto_buf { + struct qdf_mac_addr sa; + struct qdf_mac_addr da; + uint8_t vdev_id; + uint8_t type; + uint8_t subtype; + uint8_t dir; +}; + +/** + * struct qdf_dp_trace_mgmt_buf - mgmt packet buffer + * @vdev_id : vdev id + * @type: packet type + * @subtype: packet subtype + */ +struct qdf_dp_trace_mgmt_buf { + uint8_t vdev_id; + uint8_t type; + uint8_t subtype; +}; + +/** + * struct qdf_dp_trace_event_buf - event buffer + * @vdev_id : vdev id + * @type: packet type + * @subtype: packet subtype + */ +struct qdf_dp_trace_event_buf { + uint8_t vdev_id; + uint8_t type; + uint8_t subtype; +}; + +/** + * struct qdf_dp_trace_data_buf - nbuf data buffer + * @msdu_id : msdu id + */ +struct qdf_dp_trace_data_buf { + uint16_t msdu_id; +}; + +/** + * struct qdf_dp_trace_record_s - Describes a record in DP trace + * @time: time when it got stored + * @code: Describes the particular event + * @data: buffer to store data + * @size: Length of the valid data stored in this record + * @pid : process id which stored the data in this record + */ +struct qdf_dp_trace_record_s { + uint64_t time; + uint8_t code; + uint8_t data[QDF_DP_TRACE_RECORD_SIZE]; + uint8_t size; + uint32_t pid; + uint8_t pdev_id; +}; + +/** + * struct qdf_dp_trace_data - Parameters to configure/control DP trace + * @head: Position of first record + * @tail: Position of last record + * @num: Current index + * @proto_bitmap: defines which protocol to be traced + * @no_of_record: defines every nth packet to be traced + * @num_records_to_dump: defines number of records to be dumped + * @dump_counter: counter to track number of records dumped + * @verbosity : defines verbosity level + * @ini_conf_verbosity: Configured verbosity from INI + * @enable: enable/disable DP trace + * @count: current packet number + * @live_mode_config: configuration as received during initialization + * @live_mode: current live mode, enabled or disabled, can be throttled based + * on throughput + * @force_live_mode: flag to enable live mode all the time for all packets. + * This can be set/unset from userspace and overrides other + * live mode flags. + * @dynamic_verbosity_modify: Dynamic user configured verbosity overrides all + * @print_pkt_cnt: count of number of packets printed in live mode + * @high_tput_thresh: thresh beyond which live mode is turned off + * @thresh_time_limit: max time, in terms of BW timer intervals to wait, + * for determining if high_tput_thresh has been crossed. ~1s + * @arp_req: stats for arp reqs + * @arp_resp: stats for arp resps + * @icmp_req: stats for icmp reqs + * @icmp_resp: stats for icmp resps + * @dhcp_disc: stats for dhcp discover msgs + * @dhcp_req: stats for dhcp req msgs + * @dhcp_off: stats for dhcp offer msgs + * @dhcp_ack: stats for dhcp ack msgs + * @dhcp_nack: stats for dhcp nack msgs + * @dhcp_others: stats for other dhcp pkts types + * @eapol_m1: stats for eapol m1 + * @eapol_m2: stats for eapol m2 + * @eapol_m3: stats for eapol m3 + * @eapol_m4: stats for eapol m4 + * @eapol_others: stats for other eapol pkt types + * @icmpv6_req: stats for icmpv6 reqs + * @icmpv6_resp: stats for icmpv6 resps + * @icmpv6_ns: stats for icmpv6 nss + * @icmpv6_na: stats for icmpv6 nas + * @icmpv6_rs: stats for icmpv6 rss + * @icmpv6_ra: stats for icmpv6 ras + */ +struct s_qdf_dp_trace_data { + uint32_t head; + uint32_t tail; + uint32_t num; + uint8_t proto_bitmap; + uint8_t no_of_record; + uint16_t num_records_to_dump; + uint16_t dump_counter; + uint8_t verbosity; + uint8_t ini_conf_verbosity; + bool enable; + bool live_mode_config; + bool live_mode; + uint32_t curr_pos; + uint32_t saved_tail; + bool force_live_mode; + bool dynamic_verbosity_modify; + uint8_t print_pkt_cnt; + uint8_t high_tput_thresh; + uint16_t thresh_time_limit; + /* Stats */ + uint32_t tx_count; + uint32_t rx_count; + u16 arp_req; + u16 arp_resp; + u16 dhcp_disc; + u16 dhcp_req; + u16 dhcp_off; + u16 dhcp_ack; + u16 dhcp_nack; + u16 dhcp_others; + u16 eapol_m1; + u16 eapol_m2; + u16 eapol_m3; + u16 eapol_m4; + u16 eapol_others; + u16 icmp_req; + u16 icmp_resp; + u16 icmpv6_req; + u16 icmpv6_resp; + u16 icmpv6_ns; + u16 icmpv6_na; + u16 icmpv6_rs; + u16 icmpv6_ra; +}; + +/** + * struct qdf_dpt_debugfs_state - state to control read to debugfs file + * @QDF_DPT_DEBUGFS_STATE_SHOW_STATE_INVALID: invalid state + * @QDF_DPT_DEBUGFS_STATE_SHOW_STATE_INIT: initial state + * @QDF_DPT_DEBUGFS_STATE_SHOW_IN_PROGRESS: read is in progress + * @QDF_DPT_DEBUGFS_STATE_SHOW_COMPLETE: read complete + */ + +enum qdf_dpt_debugfs_state { + QDF_DPT_DEBUGFS_STATE_SHOW_STATE_INVALID, + QDF_DPT_DEBUGFS_STATE_SHOW_STATE_INIT, + QDF_DPT_DEBUGFS_STATE_SHOW_IN_PROGRESS, + QDF_DPT_DEBUGFS_STATE_SHOW_COMPLETE, +}; + +/* Function declarations and documenation */ + +/** + * qdf_trace_set_level() - Set the trace level for a particular module + * @level : trace level + * + * Trace level is a member of the QDF_TRACE_LEVEL enumeration indicating + * the severity of the condition causing the trace message to be issued. + * More severe conditions are more likely to be logged. + * + * This is an external API that allows trace levels to be set for each module. + * + * Return: nothing + */ +void qdf_trace_set_level(QDF_MODULE_ID module, QDF_TRACE_LEVEL level); + +/** + * qdf_trace_get_level() - get the trace level + * @level : trace level + * + * This is an external API that returns a bool value to signify if a + * particular trace level is set for the specified module. + * A member of the QDF_TRACE_LEVEL enumeration indicating the severity + * of the condition causing the trace message to be issued. + * + * Note that individual trace levels are the only valid values + * for this API. QDF_TRACE_LEVEL_NONE and QDF_TRACE_LEVEL_ALL + * are not valid input and will return false + * + * Return: + * false - the specified trace level for the specified module is OFF + * true - the specified trace level for the specified module is ON + */ +bool qdf_trace_get_level(QDF_MODULE_ID module, QDF_TRACE_LEVEL level); + +typedef void (*tp_qdf_trace_cb)(void *p_mac, tp_qdf_trace_record, uint16_t); +typedef void (*tp_qdf_state_info_cb) (char **buf, uint16_t *size); +#ifdef WLAN_FEATURE_MEMDUMP_ENABLE +void qdf_register_debugcb_init(void); +void qdf_register_debug_callback(QDF_MODULE_ID module_id, + tp_qdf_state_info_cb qdf_state_infocb); +QDF_STATUS qdf_state_info_dump_all(char *buf, uint16_t size, + uint16_t *driver_dump_size); +#else /* WLAN_FEATURE_MEMDUMP_ENABLE */ +static inline void qdf_register_debugcb_init(void) +{ +} +#endif /* WLAN_FEATURE_MEMDUMP_ENABLE */ + +#ifdef TRACE_RECORD +void qdf_trace_register(QDF_MODULE_ID, tp_qdf_trace_cb); +void qdf_trace_init(void); +void qdf_trace(uint8_t module, uint8_t code, uint16_t session, uint32_t data); +void qdf_trace_enable(uint32_t, uint8_t enable); +void qdf_trace_dump_all(void *, uint8_t, uint8_t, uint32_t, uint32_t); +QDF_STATUS qdf_trace_spin_lock_init(void); +#else +#ifdef CONFIG_MCL +static inline +void qdf_trace_init(void) +{ +} + +static inline +void qdf_trace_enable(uint32_t bitmask_of_module_id, uint8_t enable) +{ +} + +static inline +void qdf_trace(uint8_t module, uint8_t code, uint16_t session, uint32_t data) +{ +} + +static inline +void qdf_trace_dump_all(void *p_mac, uint8_t code, uint8_t session, + uint32_t count, uint32_t bitmask_of_module) +{ +} + +static inline +QDF_STATUS qdf_trace_spin_lock_init(void) +{ + return QDF_STATUS_E_INVAL; +} +#endif +#endif + +#ifdef ENABLE_MTRACE_LOG +/** + * qdf_mtrace_log() - Logs a message tracepoint to DIAG + * Infrastructure. + * @src_module: Enum of source module (basically module id) + * from where the message with message_id is posted. + * @dst_module: Enum of destination module (basically module id) + * to which the message with message_id is posted. + * @message_id: Id of the message to be posted + * @vdev_id: Vdev Id + * + * This function logs to the DIAG Infrastructure a tracepoint for a + * message being sent from a source module to a destination module + * with a specific ID for the benefit of a specific vdev. + * For non-vdev messages vdev_id will be NO_SESSION + * Return: None + */ +void qdf_mtrace_log(QDF_MODULE_ID src_module, QDF_MODULE_ID dst_module, + uint16_t message_id, uint8_t vdev_id); +#else +static inline +void qdf_mtrace_log(QDF_MODULE_ID src_module, QDF_MODULE_ID dst_module, + uint16_t message_id, uint8_t vdev_id) +{ +} +#endif + +#ifdef TRACE_RECORD +/** + * qdf_mtrace() - puts the messages in to ring-buffer + * and logs a message tracepoint to DIAG Infrastructure. + * @src_module: Enum of source module (basically module id) + * from where the message with message_id is posted. + * @dst_module: Enum of destination module (basically module id) + * to which the message with message_id is posted. + * @message_id: Id of the message to be posted + * @vdev_id: Vdev Id + * @data: Actual message contents + * + * This function will be called from each module which wants to record the + * messages in circular queue. Before calling this function make sure you + * have registered your module with qdf through qdf_trace_register function. + * In addition of the recording the messages in circular queue this function + * will log the message tracepoint to the DIAG infrastructure. + * these logs will be later used by post processing script. + * + * Return: None + */ +void qdf_mtrace(QDF_MODULE_ID src_module, QDF_MODULE_ID dst_module, + uint16_t message_id, uint8_t vdev_id, uint32_t data); +#else +static inline +void qdf_mtrace(QDF_MODULE_ID src_module, QDF_MODULE_ID dst_module, + uint16_t message_id, uint8_t vdev_id, uint32_t data) +{ +} +#endif + +#ifdef CONFIG_DP_TRACE +void qdf_dp_set_proto_bitmap(uint32_t val); +void qdf_dp_trace_set_verbosity(uint32_t val); +void qdf_dp_set_no_of_record(uint32_t val); +#define QDF_DP_TRACE_RECORD_INFO_LIVE (0x1) +#define QDF_DP_TRACE_RECORD_INFO_THROTTLED (0x1 << 1) + +bool qdf_dp_trace_log_pkt(uint8_t session_id, struct sk_buff *skb, + enum qdf_proto_dir dir, uint8_t pdev_id); +void qdf_dp_trace_init(bool live_mode_config, uint8_t thresh, + uint16_t time_limit, uint8_t verbosity, + uint8_t proto_bitmap); +void qdf_dp_trace_spin_lock_init(void); +void qdf_dp_trace_set_value(uint8_t proto_bitmap, uint8_t no_of_records, + uint8_t verbosity); +void qdf_dp_trace_set_track(qdf_nbuf_t nbuf, enum qdf_proto_dir dir); +void qdf_dp_trace(qdf_nbuf_t nbuf, enum QDF_DP_TRACE_ID code, uint8_t pdev_id, + uint8_t *data, uint8_t size, enum qdf_proto_dir dir); +void qdf_dp_trace_dump_all(uint32_t count, uint8_t pdev_id); + +/** + * qdf_dpt_get_curr_pos_debugfs() - get curr position to start read + * @file: debugfs file to read + * @state: state to control read to debugfs file + * + * Return: curr pos + */ +uint32_t qdf_dpt_get_curr_pos_debugfs(qdf_debugfs_file_t file, + enum qdf_dpt_debugfs_state state); +/** + * qdf_dpt_dump_stats_debugfs() - dump DP Trace stats to debugfs file + * @file: debugfs file to read + * @curr_pos: curr position to start read + * + * Return: QDF_STATUS + */ +QDF_STATUS qdf_dpt_dump_stats_debugfs(qdf_debugfs_file_t file, + uint32_t curr_pos); + +/** + * qdf_dpt_set_value_debugfs() - set value of DP Trace debugfs params + * @proto_bitmap: defines which protocol to be traced + * @no_of_record: defines every nth packet to be traced + * @verbosity : defines verbosity level + * @num_records_to_dump: defines number of records to be dumped + * + * Return: none + */ +void qdf_dpt_set_value_debugfs(uint8_t proto_bitmap, uint8_t no_of_record, + uint8_t verbosity, uint16_t num_records_to_dump); + + +/** + * qdf_dp_trace_dump_stats() - dump DP Trace stats + * + * Return: none + */ +void qdf_dp_trace_dump_stats(void); +typedef void (*tp_qdf_dp_trace_cb)(struct qdf_dp_trace_record_s*, + uint16_t, uint8_t, uint8_t info); +/** + * qdf_dp_display_record() - Displays a record in DP trace + * @record: pointer to a record in DP trace + * @index: record index + * @pdev_id: pdev id for the mgmt pkt + * @info: info used to display pkt (live mode, throttling) + * + * Return: None + */ +void qdf_dp_display_record(struct qdf_dp_trace_record_s *record, + uint16_t index, uint8_t pdev_id, + uint8_t info); + +/** + * qdf_dp_display_ptr_record() - display record + * @record: dptrace record + * @rec_index: index + * @pdev_id: pdev id for the mgmt pkt + * @info: info used to display pkt (live mode, throttling) + * + * Return: none + */ +void qdf_dp_display_ptr_record(struct qdf_dp_trace_record_s *record, + uint16_t rec_index, uint8_t pdev_id, + uint8_t info); + +/** + * qdf_dp_display_proto_pkt_debug() - display proto packet only + * for debug. + * @record: dptrace record + * @index: index + * @pdev_id: pdev id for the mgmt pkt + * @info: info used to display pkt (live mode, throttling) + * + * Return: none + */ +void qdf_dp_display_proto_pkt_debug(struct qdf_dp_trace_record_s *record, + uint16_t index, uint8_t pdev_id, + uint8_t info); + +/** + * qdf_dp_display_proto_pkt_always() - display proto packets all + * the time. + * @record: dptrace record + * @index: index + * @pdev_id: pdev id for the mgmt pkt + * @info: info used to display pkt (live mode, throttling) + * + * Return: none + */ +void qdf_dp_display_proto_pkt_always(struct qdf_dp_trace_record_s *record, + uint16_t index, uint8_t pdev_id, + uint8_t info); + +/** + * qdf_dp_display_data_pkt_record() - Displays a data packet in DP trace + * @record: pointer to a record in DP trace + * @rec_index: record index + * @pdev_id: pdev id + * @info: display info regarding record + * + * Return: None + */ +void +qdf_dp_display_data_pkt_record(struct qdf_dp_trace_record_s *record, + uint16_t rec_index, uint8_t pdev_id, + uint8_t info); + +void qdf_dp_trace_ptr(qdf_nbuf_t nbuf, enum QDF_DP_TRACE_ID code, + uint8_t pdev_id, uint8_t *data, uint8_t size, + uint16_t msdu_id, uint16_t status); +void qdf_dp_trace_throttle_live_mode(bool high_bw_request); + +/** + * qdf_dp_trace_tput_policy() - Change verbosity based on the TPUT + * @is_data_traffic: Is traffic more than low TPUT threashould + * + * Return: None + */ +void qdf_dp_trace_apply_tput_policy(bool is_data_traffic); + +/** + * qdf_dp_trace_data_pkt() - trace data packet + * @nbuf: nbuf which needs to be traced + * @pdev_id: pdev_id + * @code: QDF_DP_TRACE_ID for the packet (TX or RX) + * @msdu_id: tx desc id for the nbuf (Only applies to TX packets) + * @dir: TX or RX packet direction + * + * Return: None + */ +void qdf_dp_trace_data_pkt(qdf_nbuf_t nbuf, uint8_t pdev_id, + enum QDF_DP_TRACE_ID code, uint16_t msdu_id, + enum qdf_proto_dir dir); + +uint8_t qdf_dp_get_proto_bitmap(void); +uint8_t qdf_dp_get_verbosity(void); +uint8_t qdf_dp_get_no_of_record(void); + +/** + * qdf_dp_trace_proto_pkt() - record proto packet + * @code: dptrace code + * @vdev_id: vdev id + * @sa: source mac address + * @da: destination mac address + * @type: proto type + * @subtype: proto subtype + * @dir: direction + * @pdev_id: pdev id + * @print: to print this proto pkt or not + * + * Return: none + */ +void +qdf_dp_trace_proto_pkt(enum QDF_DP_TRACE_ID code, uint8_t vdev_id, + uint8_t *sa, uint8_t *da, enum qdf_proto_type type, + enum qdf_proto_subtype subtype, enum qdf_proto_dir dir, + uint8_t pdev_id, bool print); + +void qdf_dp_trace_disable_live_mode(void); +void qdf_dp_trace_enable_live_mode(void); +void qdf_dp_trace_clear_buffer(void); +/** + * qdf_dp_trace_mgmt_pkt() - record mgmt packet + * @code: dptrace code + * @vdev_id: vdev id + * @pdev_id: pdev_id + * @type: proto type + * @subtype: proto subtype + * + * Return: none + */ +void qdf_dp_trace_mgmt_pkt(enum QDF_DP_TRACE_ID code, uint8_t vdev_id, + uint8_t pdev_id, enum qdf_proto_type type, + enum qdf_proto_subtype subtype); + +/** + * qdf_dp_display_mgmt_pkt() - display proto packet + * @record: dptrace record + * @index: index + * @pdev_id: pdev id for the mgmt pkt + * @info: info used to display pkt (live mode, throttling) + * + * Return: none + */ +void qdf_dp_display_mgmt_pkt(struct qdf_dp_trace_record_s *record, + uint16_t index, uint8_t pdev_id, uint8_t info); + +/** + * qdf_dp_display_event_record() - display event records + * @record: dptrace record + * @index: index + * @pdev_id: pdev id for the mgmt pkt + * @info: info used to display pkt (live mode, throttling) + * + * Return: none + */ +void qdf_dp_display_event_record(struct qdf_dp_trace_record_s *record, + uint16_t index, uint8_t pdev_id, uint8_t info); + +void qdf_dp_trace_record_event(enum QDF_DP_TRACE_ID code, uint8_t vdev_id, + uint8_t pdev_id, enum qdf_proto_type type, + enum qdf_proto_subtype subtype); +#else +static inline +bool qdf_dp_trace_log_pkt(uint8_t session_id, struct sk_buff *skb, + enum qdf_proto_dir dir, uint8_t pdev_id) +{ + return false; +} +static inline +void qdf_dp_trace_init(bool live_mode_config, uint8_t thresh, + uint16_t time_limit, uint8_t verbosity, + uint8_t proto_bitmap) +{ +} +static inline +void qdf_dp_trace_set_track(qdf_nbuf_t nbuf, enum qdf_proto_dir dir) +{ +} +static inline +void qdf_dp_trace_set_value(uint8_t proto_bitmap, uint8_t no_of_records, + uint8_t verbosity) +{ +} + +static inline +void qdf_dp_trace_dump_all(uint32_t count, uint8_t pdev_id) +{ +} + +static inline +uint32_t qdf_dpt_get_curr_pos_debugfs(qdf_debugfs_file_t file, + enum qdf_dpt_debugfs_state state) +{ + return 0; +} + +static inline +QDF_STATUS qdf_dpt_dump_stats_debugfs(qdf_debugfs_file_t file, + uint32_t curr_pos) +{ + return QDF_STATUS_SUCCESS; +} + +static inline +void qdf_dpt_set_value_debugfs(uint8_t proto_bitmap, uint8_t no_of_record, + uint8_t verbosity, uint16_t num_records_to_dump) +{ +} + +static inline void qdf_dp_trace_dump_stats(void) +{ +} + +static inline +void qdf_dp_trace_disable_live_mode(void) +{ +} + +static inline +void qdf_dp_trace_enable_live_mode(void) +{ +} + +static inline +void qdf_dp_trace_throttle_live_mode(bool high_bw_request) +{ +} + +static inline +void qdf_dp_trace_clear_buffer(void) +{ +} + +static inline +void qdf_dp_trace_data_pkt(qdf_nbuf_t nbuf, uint8_t pdev_id, + enum QDF_DP_TRACE_ID code, uint16_t msdu_id, + enum qdf_proto_dir dir) +{ +} +#endif + +void qdf_trace_display(void); + +void qdf_trace_set_value(QDF_MODULE_ID module, QDF_TRACE_LEVEL level, + uint8_t on); + +void qdf_trace_set_module_trace_level(QDF_MODULE_ID module, uint32_t level); + +void __printf(3, 4) qdf_snprintf(char *str_buffer, unsigned int size, + char *str_format, ...); + +#define QDF_SNPRINTF qdf_snprintf + +#ifdef TSOSEG_DEBUG + +static inline void qdf_tso_seg_dbg_bug(char *msg) +{ + qdf_print("%s", msg); + QDF_BUG(0); +}; + +/** + * qdf_tso_seg_dbg_init - initialize TSO segment debug structure + * @tsoseg : structure to initialize + * + * TSO segment dbg structures are attached to qdf_tso_seg_elem_t + * structures and are allocated only of TSOSEG_DEBUG is defined. + * When allocated, at the time of the tso_seg_pool initialization, + * which goes with tx_desc initialization (1:1), each structure holds + * a number of (currently 16) history entries, basically describing + * what operation has been performed on this particular tso_seg_elem. + * This history buffer is a circular buffer and the current index is + * held in an atomic variable called cur. It is incremented every + * operation. Each of these operations are added with the function + * qdf_tso_seg_dbg_record. + * For each segment, this initialization function MUST be called PRIOR + * TO any _dbg_record() function calls. + * On free, qdf_tso_seg_elem structure is cleared (using qdf_tso_seg_dbg_zero) + * which clears the tso_desc, BUT DOES NOT CLEAR THE HISTORY element. + * + * Return: + * None + */ +static inline +void qdf_tso_seg_dbg_init(struct qdf_tso_seg_elem_t *tsoseg) +{ + tsoseg->dbg.txdesc = NULL; + qdf_atomic_init(&tsoseg->dbg.cur); /* history empty */ +} + +/** + * qdf_tso_seg_dbg_record - add a history entry to TSO debug structure + * @tsoseg : structure to initialize + * @id : operation ID (identifies the caller) + * + * Adds a history entry to the history circular buffer. Each entry + * contains an operation id (caller, as currently each ID is used only + * once in the source, so it directly identifies the src line that invoked + * the recording. + * + * qdf_tso_seg_dbg_record CAN ONLY BE CALLED AFTER the entry is initialized + * by qdf_tso_seg_dbg_init. + * + * The entry to be added is written at the location pointed by the atomic + * variable called cur. Cur is an ever increasing atomic variable. It is + * masked so that only the lower 4 bits are used (16 history entries). + * + * Return: + * int: the entry this record was recorded at + */ +static inline +int qdf_tso_seg_dbg_record(struct qdf_tso_seg_elem_t *tsoseg, short id) +{ + int rc = -1; + unsigned int c; + + qdf_assert(tsoseg); + + if (id == TSOSEG_LOC_ALLOC) { + c = qdf_atomic_read(&tsoseg->dbg.cur); + /* dont crash on the very first alloc on the segment */ + c &= 0x0f; + /* allow only INIT and FREE ops before ALLOC */ + if (tsoseg->dbg.h[c].id >= id) + qdf_tso_seg_dbg_bug("Rogue TSO seg alloc"); + } + c = qdf_atomic_inc_return(&tsoseg->dbg.cur); + + c &= 0x0f; + tsoseg->dbg.h[c].ts = qdf_get_log_timestamp(); + tsoseg->dbg.h[c].id = id; + rc = c; + + return rc; +}; + +static inline void +qdf_tso_seg_dbg_setowner(struct qdf_tso_seg_elem_t *tsoseg, void *owner) +{ + if (tsoseg) + tsoseg->dbg.txdesc = owner; +}; + +static inline void +qdf_tso_seg_dbg_zero(struct qdf_tso_seg_elem_t *tsoseg) +{ + memset(tsoseg, 0, offsetof(struct qdf_tso_seg_elem_t, dbg)); + return; +}; + +#else +static inline +void qdf_tso_seg_dbg_init(struct qdf_tso_seg_elem_t *tsoseg) +{ +}; +static inline +int qdf_tso_seg_dbg_record(struct qdf_tso_seg_elem_t *tsoseg, short id) +{ + return 0; +}; +static inline void qdf_tso_seg_dbg_bug(char *msg) +{ +}; +static inline void +qdf_tso_seg_dbg_setowner(struct qdf_tso_seg_elem_t *tsoseg, void *owner) +{ +}; +static inline int +qdf_tso_seg_dbg_zero(struct qdf_tso_seg_elem_t *tsoseg) +{ + memset(tsoseg, 0, sizeof(struct qdf_tso_seg_elem_t)); + return 0; +}; + +#endif /* TSOSEG_DEBUG */ + +void qdf_trace_hex_dump(QDF_MODULE_ID module, QDF_TRACE_LEVEL level, + void *data, int buf_len); + +#define ERROR_CODE -1 +#define QDF_MAX_NAME_SIZE 32 +#define MAX_PRINT_CONFIG_SUPPORTED 32 + +#define MAX_SUPPORTED_CATEGORY QDF_MODULE_ID_MAX + +/** + * qdf_set_pidx() - Sets the global qdf_pidx. + * @pidx : Index of print control object assigned to the module + * + */ +void qdf_set_pidx(int pidx); + +/** + * qdf_get_pidx() - Returns the global qdf_pidx. + * + * Return : Current qdf print index. + */ +int qdf_get_pidx(void); +/* + * Shared print control index + * for converged debug framework + */ + +#define QDF_PRINT_IDX_SHARED -1 + +/** + * QDF_PRINT_INFO() - Generic wrapper API for logging + * @idx : Index of print control object + * @module : Module identifier. A member of QDF_MODULE_ID enumeration that + * identifies the module issuing the trace message + * @level : Trace level. A member of QDF_TRACE_LEVEL enumeration indicating + * the severity of the condition causing the trace message to be + * issued. + * @str_format : Format string that contains the message to be logged. + * + * + * This wrapper will be used for any generic logging messages. Wrapper will + * compile a call to converged QDF trace message API. + * + * Return : Nothing + * + */ +void QDF_PRINT_INFO(unsigned int idx, QDF_MODULE_ID module, + QDF_TRACE_LEVEL level, + char *str_format, ...); + +/** + * struct category_info : Category information structure + * @category_verbose_mask: Embeds information about category's verbose level + */ +struct category_info { + uint16_t category_verbose_mask; +}; + +/** + * struct category_name_info : Category name information structure + * @category_name_str: Embeds information about category name + */ +struct category_name_info { + unsigned char category_name_str[QDF_MAX_NAME_SIZE]; +}; + +/** + * qdf_trace_msg_cmn()- Converged logging API + * @idx: Index of print control object assigned to the module + * @category: Category identifier. A member of the QDF_MODULE_ID enumeration + * that identifies the category issuing the trace message. + * @verbose: Verbose level. A member of the QDF_TRACE_LEVEL enumeration + * indicating the severity of the condition causing the trace + * message to be issued. More severe conditions are more likely + * to be logged. + * @str_format: Format string. The message to be logged. This format string + * contains printf-like replacement parameters, which follow this + * parameter in the variable argument list. + * @val: Variable argument list part of the log message + * + * Return: nothing + * + */ +void qdf_trace_msg_cmn(unsigned int idx, + QDF_MODULE_ID category, + QDF_TRACE_LEVEL verbose, + const char *str_format, + va_list val); + +/** + * struct qdf_print_ctrl: QDF Print Control structure + * Statically allocated objects of print control + * structure are declared that will support maximum of + * 32 print control objects. Any module that needs to + * register to the print control framework needs to + * obtain a print control object using + * qdf_print_ctrl_register API. It will have to pass + * pointer to category info structure, name and + * custom print function to be used if required. + * @name : Optional name for the control object + * @cat_info : Array of category_info struct + * @custom_print : Custom print handler + * @custom_ctxt : Custom print context + * @dbglvlmac_on : Flag to enable/disable MAC level filtering + * @in_use : Boolean to indicate if control object is in use + */ +struct qdf_print_ctrl { + char name[QDF_MAX_NAME_SIZE]; + struct category_info cat_info[MAX_SUPPORTED_CATEGORY]; + void (*custom_print)(void *ctxt, const char *fmt, va_list args); + void *custom_ctxt; +#ifdef DBG_LVL_MAC_FILTERING + unsigned char dbglvlmac_on; +#endif + bool in_use; +}; + +/** + * qdf_print_ctrl_register() - Allocate QDF print control object, assign + * pointer to category info or print control + * structure and return the index to the callee + * @cinfo : Pointer to array of category info structure + * @custom_print_handler : Pointer to custom print handler + * @custom_ctx : Pointer to custom context + * @pctrl_name : Pointer to print control object name + * + * Return : Index of qdf_print_ctrl structure + * + */ +int qdf_print_ctrl_register(const struct category_info *cinfo, + void *custom_print_handler, + void *custom_ctx, + const char *pctrl_name); + +/** + * qdf_shared_print_ctrl_init() - Initialize the shared print ctrl obj with + * all categories set to the default level + * + * Return : void + * + */ +void qdf_shared_print_ctrl_init(void); + +/** + * qdf_print_setup() - Setup default values to all the print control objects + * + * Register new print control object for the callee + * + * Return : QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE + * on failure + */ +QDF_STATUS qdf_print_setup(void); + +/** + * qdf_print_ctrl_cleanup() - Clean up a print control object + * + * Cleanup the print control object for the callee + * + * @pctrl : Index of print control object + * + * Return : QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE on failure + */ +QDF_STATUS qdf_print_ctrl_cleanup(unsigned int idx); + +/** + * qdf_print_ctrl_shared_cleanup() - Clean up of the shared object + * + * Cleanup the shared print-ctrl-object + * + * Return : void + */ +void qdf_shared_print_ctrl_cleanup(void); + +/** + * qdf_print_set_category_verbose() - Enable/Disable category for a + * print control object with + * user provided verbose level + * + * @idx : Index of the print control object assigned to callee + * @category : Category information + * @verbose: Verbose information + * @is_set: Flag indicating if verbose level needs to be enabled or disabled + * + * Return : QDF_STATUS_SUCCESS for success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS qdf_print_set_category_verbose(unsigned int idx, + QDF_MODULE_ID category, + QDF_TRACE_LEVEL verbose, + bool is_set); + +/** + * qdf_print_is_category_enabled() - Get category information for the + * print control object + * + * @idx : Index of print control object + * @category : Category information + * + * Return : Verbose enabled(true) or disabled(false) or invalid input (false) + */ +bool qdf_print_is_category_enabled(unsigned int idx, + QDF_MODULE_ID category); + +/** + * qdf_print_is_verbose_enabled() - Get verbose information of a category for + * the print control object + * + * @idx : Index of print control object + * @category : Category information + * @verbose : Verbose information + * + * Return : Verbose enabled(true) or disabled(false) or invalid input (false) + */ +bool qdf_print_is_verbose_enabled(unsigned int idx, + QDF_MODULE_ID category, + QDF_TRACE_LEVEL verbose); + +/** + * qdf_print_clean_node_flag() - Clean up node flag for print control object + * + * @idx : Index of print control object + * + * Return : None + */ +void qdf_print_clean_node_flag(unsigned int idx); + +#ifdef DBG_LVL_MAC_FILTERING + +/** + * qdf_print_set_node_flag() - Set flag to enable MAC level filtering + * + * @idx : Index of print control object + * @enable : Enable/Disable bit sent by callee + * + * Return : QDF_STATUS_SUCCESS on Success and QDF_STATUS_E_FAILURE on Failure + */ +QDF_STATUS qdf_print_set_node_flag(unsigned int idx, + uint8_t enable); + +/** + * qdf_print_get_node_flag() - Get flag that controls MAC level filtering + * + * @idx : Index of print control object + * + * Return : Flag that indicates enable(1) or disable(0) or invalid(-1) + */ +bool qdf_print_get_node_flag(unsigned int idx); + +#endif + +/** + * qdf_logging_init() - Initialize msg logging functionality + * + * + * Return : void + */ +void qdf_logging_init(void); + +/** + * qdf_logging_exit() - Cleanup msg logging functionality + * + * + * Return : void + */ +void qdf_logging_exit(void); + +#define QDF_SYMBOL_LEN __QDF_SYMBOL_LEN + +/** + * qdf_sprint_symbol() - prints the name of a symbol into a string buffer + * @buffer: the string buffer to print into + * @addr: address of the symbol to lookup and print + * + * Return: number of characters printed + */ +int qdf_sprint_symbol(char *buffer, void *addr); + +#endif /* __QDF_TRACE_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_types.h b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_types.h new file mode 100644 index 0000000000000000000000000000000000000000..a9af1d9d5f958df459041bdd4d2cd1831322ad91 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_types.h @@ -0,0 +1,1194 @@ +/* + * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: qdf_types.h + * QCA driver framework (QDF) basic type definitions + */ + +#if !defined(__QDF_TYPES_H) +#define __QDF_TYPES_H + +#ifndef CONFIG_MCL +#if !defined(__printf) +#define __printf(a, b) +#endif +#endif + + +/* Include Files */ +#include +#include +#ifdef TSOSEG_DEBUG +#include +#endif + +/* Preprocessor definitions and constants */ +#define QDF_MAX_SGLIST 4 + +/** + * struct qdf_sglist - scatter-gather list + * @nsegs: total number of segments + * struct __sg_segs - scatter-gather segment list + * @vaddr: Virtual address of the segment + * @len: Length of the segment + */ +typedef struct qdf_sglist { + uint32_t nsegs; + struct __sg_segs { + uint8_t *vaddr; + uint32_t len; + } sg_segs[QDF_MAX_SGLIST]; +} qdf_sglist_t; + +#define QDF_MAX_SCATTER __QDF_MAX_SCATTER +#define QDF_NSEC_PER_MSEC __QDF_NSEC_PER_MSEC + +/** + * QDF_SWAP_U16 - swap input u16 value + * @_x: variable to swap + */ +#define QDF_SWAP_U16(_x) \ + ((((_x) << 8) & 0xFF00) | (((_x) >> 8) & 0x00FF)) + +/** + * QDF_SWAP_U32 - swap input u32 value + * @_x: variable to swap + */ +#define QDF_SWAP_U32(_x) \ + (((((_x) << 24) & 0xFF000000) | (((_x) >> 24) & 0x000000FF)) | \ + ((((_x) << 8) & 0x00FF0000) | (((_x) >> 8) & 0x0000FF00))) + +/* ticks per second */ +#define QDF_TICKS_PER_SECOND (1000) + +/** + * QDF_ARRAY_SIZE - get array size + * @_arr: array variable name + */ +#define QDF_ARRAY_SIZE(_arr) (sizeof(_arr) / sizeof((_arr)[0])) + +#define QDF_MAX_SCATTER __QDF_MAX_SCATTER + +/** + * qdf_packed - denotes structure is packed. + */ +#define qdf_packed __qdf_packed + +/** + * qdf_toupper - char lower to upper. + */ +#define qdf_toupper __qdf_toupper + +typedef void *qdf_net_handle_t; + +typedef void *qdf_netlink_handle_t; +typedef void *qdf_drv_handle_t; +typedef void *qdf_os_handle_t; +typedef void *qdf_pm_t; + + +/** + * typedef qdf_handle_t - handles opaque to each other + */ +typedef void *qdf_handle_t; + +/** + * typedef qdf_device_t - Platform/bus generic handle. + * Used for bus specific functions. + */ +typedef __qdf_device_t qdf_device_t; + +/* Byte order identifiers */ +typedef __qdf_le16_t qdf_le16_t; +typedef __qdf_le32_t qdf_le32_t; +typedef __qdf_le64_t qdf_le64_t; +typedef __qdf_be16_t qdf_be16_t; +typedef __qdf_be32_t qdf_be32_t; +typedef __qdf_be64_t qdf_be64_t; + +/** + * typedef qdf_size_t - size of an object + */ +typedef __qdf_size_t qdf_size_t; + +/** + * typedef __qdf_off_t - offset for API's that need them. + */ +typedef __qdf_off_t qdf_off_t; + +/** + * typedef qdf_dma_map_t - DMA mapping object. + */ +typedef __qdf_dma_map_t qdf_dma_map_t; + +/** + * tyepdef qdf_dma_addr_t - DMA address. + */ +typedef __qdf_dma_addr_t qdf_dma_addr_t; + +/** + * typedef __qdf_dma_size_t - DMA size. + */ +typedef __qdf_dma_size_t qdf_dma_size_t; + +/** + * tyepdef qdf_dma_context_t - DMA context. + */ +typedef __qdf_dma_context_t qdf_dma_context_t; + +typedef __qdf_mem_info_t qdf_mem_info_t; +typedef __sgtable_t sgtable_t; + +/** + * pointer to net device + */ +typedef __qdf_netdev_t qdf_netdev_t; + +/** + * struct qdf_dma_map_info - Information inside a DMA map. + * @nsegs: total number mapped segments + * struct __dma_segs - Information of physical address. + * @paddr: physical(dam'able) address of the segment + * @len: length of the segment + */ +typedef struct qdf_dma_map_info { + uint32_t nsegs; + struct __dma_segs { + qdf_dma_addr_t paddr; + qdf_dma_size_t len; + } dma_segs[QDF_MAX_SCATTER]; +} qdf_dmamap_info_t; + +/** + * struct qdf_shared_mem - Shared memory resource + * @mem_info: memory info struct + * @vaddr: virtual address + * @sgtable: scatter-gather table + * @memctx: dma address + */ +typedef struct qdf_shared_mem { + qdf_mem_info_t mem_info; + void *vaddr; + sgtable_t sgtable; + qdf_dma_mem_context(memctx); +} qdf_shared_mem_t; + +#define qdf_iomem_t __qdf_iomem_t; + +/** + * typedef enum QDF_TIMER_TYPE - QDF timer type + * @QDF_TIMER_TYPE_SW: Deferrable SW timer it will not cause CPU to wake up + * on expiry + * @QDF_TIMER_TYPE_WAKE_APPS: Non deferrable timer which will cause CPU to + * wake up on expiry + */ +typedef enum { + QDF_TIMER_TYPE_SW, + QDF_TIMER_TYPE_WAKE_APPS +} QDF_TIMER_TYPE; + +/** + * tyepdef qdf_resource_type_t - hw resources + * @QDF_RESOURCE_TYPE_MEM: memory resource + * @QDF_RESOURCE_TYPE_IO: io resource + * Define the hw resources the OS has allocated for the device + * Note that start defines a mapped area. + */ +typedef enum { + QDF_RESOURCE_TYPE_MEM, + QDF_RESOURCE_TYPE_IO, +} qdf_resource_type_t; + +/** + * tyepdef qdf_resource_t - representation of a h/w resource. + * @start: start + * @end: end + * @type: resource type + */ +typedef struct { + uint64_t start; + uint64_t end; + qdf_resource_type_t type; +} qdf_resource_t; + +/** + * typedef qdf_dma_dir_t - DMA directions + * @QDF_DMA_BIDIRECTIONAL: bidirectional data + * @QDF_DMA_TO_DEVICE: data going from device to memory + * @QDF_DMA_FROM_DEVICE: data going from memory to device + */ +typedef enum { + QDF_DMA_BIDIRECTIONAL = __QDF_DMA_BIDIRECTIONAL, + QDF_DMA_TO_DEVICE = __QDF_DMA_TO_DEVICE, + QDF_DMA_FROM_DEVICE = __QDF_DMA_FROM_DEVICE, +} qdf_dma_dir_t; + +/** + * enum qdf_driver_type - Indicate the driver type and based on this + * do appropriate initialization. + * + * @QDF_DRIVER_TYPE_PRODUCTION: Driver used in the production + * @QDF_DRIVER_TYPE_MFG: Driver used in the Factory + * @QDF_DRIVER_TYPE_INVALID: Invalid and unrecognized type + * + */ +enum qdf_driver_type { + QDF_DRIVER_TYPE_PRODUCTION = 0, + QDF_DRIVER_TYPE_MFG = 1, + QDF_DRIVER_TYPE_INVALID = 0x7FFFFFFF +}; + +/* work queue(kernel thread)/DPC function callback */ +typedef void (*qdf_defer_fn_t)(void *); + +/* + * Prototype of the critical region function that is to be + * executed with spinlock held and interrupt disalbed + */ +typedef bool (*qdf_irqlocked_func_t)(void *); + +#define qdf_offsetof(type, field) offsetof(type, field) + +/** + * typedef enum QDF_MODULE_ID - Debug category level + * @QDF_MODULE_ID_TDLS: TDLS + * @QDF_MODULE_ID_ACS: auto channel selection + * @QDF_MODULE_ID_SCAN_SM: scan state machine + * @QDF_MODULE_ID_SCANENTRY: scan entry + * @QDF_MODULE_ID_WDS: WDS handling + * @QDF_MODULE_ID_ACTION: action management frames + * @QDF_MODULE_ID_ROAM: sta mode roaming + * @QDF_MODULE_ID_INACT: inactivity handling + * @QDF_MODULE_ID_DOTH: 11.h + * @QDF_MODULE_ID_IQUE: IQUE features + * @QDF_MODULE_ID_WME: WME protocol + * @QDF_MODULE_ID_ACL: ACL handling + * @QDF_MODULE_ID_WPA: WPA/RSN protocol + * @QDF_MODULE_ID_RADKEYS: dump 802.1x keys + * @QDF_MODULE_ID_RADDUMP: dump 802.1x radius packets + * @QDF_MODULE_ID_RADIUS: 802.1x radius client + * @QDF_MODULE_ID_DOT1XSM: 802.1x state machine + * @QDF_MODULE_ID_DOT1X: 802.1x authenticator + * @QDF_MODULE_ID_POWER: power save handling + * @QDF_MODULE_ID_STATS: state machine + * @QDF_MODULE_ID_OUTPUT: output handling + * @QDF_MODULE_ID_SCAN: scanning + * @QDF_MODULE_ID_AUTH: authentication handling + * @QDF_MODULE_ID_ASSOC: association handling + * @QDF_MODULE_ID_NODE: node handling + * @QDF_MODULE_ID_ELEMID: element id parsing + * @QDF_MODULE_ID_XRATE: rate set handling + * @QDF_MODULE_ID_INPUT: input handling + * @QDF_MODULE_ID_CRYPTO: crypto work + * @QDF_MODULE_ID_DUMPPKTS: IFF_LINK2 equivalant + * @QDF_MODULE_ID_DEBUG: IFF_DEBUG equivalent + * @QDF_MODULE_ID_MLME: MLME + * @QDF_MODULE_ID_RRM: Radio resource measurement + * @QDF_MODULE_ID_WNM: Wireless Network Management + * @QDF_MODULE_ID_P2P_PROT: P2P Protocol driver + * @QDF_MODULE_ID_PROXYARP: 11v Proxy ARP + * @QDF_MODULE_ID_L2TIF: Hotspot 2.0 L2 TIF + * @QDF_MODULE_ID_WIFIPOS: WifiPositioning Feature + * @QDF_MODULE_ID_WRAP: WRAP or Wireless ProxySTA + * @QDF_MODULE_ID_DFS: DFS debug mesg + * @QDF_MODULE_ID_TLSHIM: TLSHIM module ID + * @QDF_MODULE_ID_WMI: WMI module ID + * @QDF_MODULE_ID_HTT: HTT module ID + * @QDF_MODULE_ID_HDD: HDD module ID + * @QDF_MODULE_ID_SME: SME module ID + * @QDF_MODULE_ID_PE: PE module ID + * @QDF_MODULE_ID_WMA: WMA module ID + * @QDF_MODULE_ID_SYS: SYS module ID + * @QDF_MODULE_ID_QDF: QDF module ID + * @QDF_MODULE_ID_SAP: SAP module ID + * @QDF_MODULE_ID_HDD_SOFTAP: HDD SAP module ID + * @QDF_MODULE_ID_HDD_DATA: HDD DATA module ID + * @QDF_MODULE_ID_HDD_SAP_DATA: HDD SAP DATA module ID + * @QDF_MODULE_ID_HIF: HIF module ID + * @QDF_MODULE_ID_HTC: HTC module ID + * @QDF_MODULE_ID_TXRX: TXRX module ID + * @QDF_MODULE_ID_QDF_DEVICE: QDF DEVICE module ID + * @QDF_MODULE_ID_CFG: CFG module ID + * @QDF_MODULE_ID_BMI: BMI module ID + * @QDF_MODULE_ID_EPPING: EPPING module ID + * @QDF_MODULE_ID_QVIT: QVIT module ID + * @QDF_MODULE_ID_DP: Data-path module ID + * @QDF_MODULE_ID_SOC: SOC module ID + * @QDF_MODULE_ID_OS_IF: OS-interface module ID + * @QDF_MODULE_ID_TARGET_IF: targer interface module ID + * @QDF_MODULE_ID_SCHEDULER: schduler module ID + * @QDF_MODULE_ID_MGMT_TXRX: management TX/RX module ID + * @QDF_MODULE_ID_SERIALIZATION: serialization module ID + * @QDF_MODULE_ID_PMO: PMO (power manager and offloads) Module ID + * @QDF_MODULE_ID_P2P: P2P module ID + * @QDF_MODULE_ID_POLICY_MGR: Policy Manager module ID + * @QDF_MODULE_ID_CONFIG: CFG (configuration) component ID + * @QDF_MODULE_ID_REGULATORY: REGULATORY module ID + * @QDF_MODULE_ID_NAN: NAN module ID + * @QDF_MODULE_ID_SPECTRAL: Spectral module ID + * @QDF_MODULE_ID_ROAM_DEBUG: Roam Debug logging + * @QDF_MODULE_ID_CDP: Converged Data Path module ID + * @QDF_MODULE_ID_DIRECT_BUF_RX: Direct Buffer Receive module ID + * @QDF_MODULE_ID_DISA: DISA (encryption test) module ID + * @QDF_MODULE_ID_GREEN_AP: Green AP related logging + * @QDF_MODULE_ID_FTM: FTM module ID + * @QDF_MODULE_ID_EXTAP: Extender AP module ID + * @QDF_MODULE_ID_FD: FILS discovery logging + * @QDF_MODULE_ID_OCB: OCB module ID + * @QDF_MODULE_ID_IPA: IPA module ID + * @QDF_MODULE_ID_CP_STATS: Control Plane Statistics ID + * @QDF_MODULE_ID_ACTION_OUI: ACTION OUI module ID + * @QDF_MODULE_ID_TARGET: Target module ID + * @QDF_MODULE_ID_ANY: anything + * @QDF_MODULE_ID_MAX: Max place holder module ID + */ +typedef enum { + QDF_MODULE_ID_TDLS = 0, + QDF_MODULE_ID_ACS, + QDF_MODULE_ID_SCAN_SM, + QDF_MODULE_ID_SCANENTRY, + QDF_MODULE_ID_WDS, + QDF_MODULE_ID_ACTION, + QDF_MODULE_ID_ROAM, + QDF_MODULE_ID_INACT, + QDF_MODULE_ID_DOTH = 8, + QDF_MODULE_ID_IQUE, + QDF_MODULE_ID_WME, + QDF_MODULE_ID_ACL, + QDF_MODULE_ID_WPA, + QDF_MODULE_ID_RADKEYS, + QDF_MODULE_ID_RADDUMP, + QDF_MODULE_ID_RADIUS, + QDF_MODULE_ID_DOT1XSM = 16, + QDF_MODULE_ID_DOT1X, + QDF_MODULE_ID_POWER, + QDF_MODULE_ID_STATE, + QDF_MODULE_ID_OUTPUT, + QDF_MODULE_ID_SCAN, + QDF_MODULE_ID_AUTH, + QDF_MODULE_ID_ASSOC, + QDF_MODULE_ID_NODE = 24, + QDF_MODULE_ID_ELEMID, + QDF_MODULE_ID_XRATE, + QDF_MODULE_ID_INPUT, + QDF_MODULE_ID_CRYPTO, + QDF_MODULE_ID_DUMPPKTS, + QDF_MODULE_ID_DEBUG, + QDF_MODULE_ID_MLME, + QDF_MODULE_ID_RRM = 32, + QDF_MODULE_ID_WNM, + QDF_MODULE_ID_P2P_PROT, + QDF_MODULE_ID_PROXYARP, + QDF_MODULE_ID_L2TIF, + QDF_MODULE_ID_WIFIPOS, + QDF_MODULE_ID_WRAP, + QDF_MODULE_ID_DFS, + QDF_MODULE_ID_ATF = 40, + QDF_MODULE_ID_SPLITMAC, + QDF_MODULE_ID_IOCTL, + QDF_MODULE_ID_NAC, + QDF_MODULE_ID_MESH, + QDF_MODULE_ID_MBO, + QDF_MODULE_ID_EXTIOCTL_CHANSWITCH, + QDF_MODULE_ID_EXTIOCTL_CHANSSCAN, + QDF_MODULE_ID_TLSHIM = 48, + QDF_MODULE_ID_WMI, + QDF_MODULE_ID_HTT, + QDF_MODULE_ID_HDD, + QDF_MODULE_ID_SME, + QDF_MODULE_ID_PE, + QDF_MODULE_ID_WMA, + QDF_MODULE_ID_SYS, + QDF_MODULE_ID_QDF = 56, + QDF_MODULE_ID_SAP, + QDF_MODULE_ID_HDD_SOFTAP, + QDF_MODULE_ID_HDD_DATA, + QDF_MODULE_ID_HDD_SAP_DATA, + QDF_MODULE_ID_HIF, + QDF_MODULE_ID_HTC, + QDF_MODULE_ID_TXRX, + QDF_MODULE_ID_QDF_DEVICE = 64, + QDF_MODULE_ID_CFG, + QDF_MODULE_ID_BMI, + QDF_MODULE_ID_EPPING, + QDF_MODULE_ID_QVIT, + QDF_MODULE_ID_DP, + QDF_MODULE_ID_SOC, + QDF_MODULE_ID_OS_IF, + QDF_MODULE_ID_TARGET_IF, + QDF_MODULE_ID_SCHEDULER, + QDF_MODULE_ID_MGMT_TXRX, + QDF_MODULE_ID_SERIALIZATION, + QDF_MODULE_ID_PMO, + QDF_MODULE_ID_P2P, + QDF_MODULE_ID_POLICY_MGR, + QDF_MODULE_ID_CONFIG, + QDF_MODULE_ID_REGULATORY, + QDF_MODULE_ID_SA_API, + QDF_MODULE_ID_NAN, + QDF_MODULE_ID_OFFCHAN_TXRX, + QDF_MODULE_ID_SON, + QDF_MODULE_ID_SPECTRAL, + QDF_MODULE_ID_OBJ_MGR, + QDF_MODULE_ID_NSS, + QDF_MODULE_ID_ROAM_DEBUG, + QDF_MODULE_ID_CDP, + QDF_MODULE_ID_DIRECT_BUF_RX, + QDF_MODULE_ID_DISA, + QDF_MODULE_ID_GREEN_AP, + QDF_MODULE_ID_FTM, + QDF_MODULE_ID_EXTAP, + QDF_MODULE_ID_FD, + QDF_MODULE_ID_OCB, + QDF_MODULE_ID_IPA, + QDF_MODULE_ID_CP_STATS, + QDF_MODULE_ID_ACTION_OUI, + QDF_MODULE_ID_TARGET, + QDF_MODULE_ID_ANY, + QDF_MODULE_ID_MAX, +} QDF_MODULE_ID; + +/** + * typedef enum QDF_TRACE_LEVEL - Debug verbose level + * @QDF_TRACE_LEVEL_NONE: no trace will be logged. This value is in place + * for the qdf_trace_setlevel() to allow the user + * to turn off all traces + * @QDF_TRACE_LEVEL_FATAL: Indicates fatal error conditions + * @QDF_TRACE_LEVEL_ERROR: Indicates error conditions + * @QDF_TRACE_LEVEL_WARN: May indicate that an error will occur if action + * is not taken + * @QDF_TRACE_LEVEL_INFO: Normal operational messages that require no action + * @QDF_TRACE_LEVEL_INFO_HIGH: High level operational messages that require + * no action + * @QDF_TRACE_LEVEL_INFO_MED: Middle level operational messages that require + * no action + * @QDF_TRACE_LEVEL_INFO_LOW: Low level operational messages that require + * no action + * @QDF_TRACE_LEVEL_DEBUG: Information useful to developers for debugging + * @QDF_TRACE_LEVEL_TRACE: Indicates trace level for automation scripts, + * whenever there is a context switch in driver, one + * print using this trace level will be added with + * the help of qdf_trace api. + * @QDF_TRACE_LEVEL_ALL: All trace levels + * @QDF_TRACE_LEVEL_MAX: Max trace level + */ +typedef enum { + QDF_TRACE_LEVEL_NONE, + QDF_TRACE_LEVEL_FATAL, + QDF_TRACE_LEVEL_ERROR, + QDF_TRACE_LEVEL_WARN, + QDF_TRACE_LEVEL_INFO, + QDF_TRACE_LEVEL_INFO_HIGH, + QDF_TRACE_LEVEL_INFO_MED, + QDF_TRACE_LEVEL_INFO_LOW, + QDF_TRACE_LEVEL_DEBUG, + QDF_TRACE_LEVEL_TRACE, + QDF_TRACE_LEVEL_ALL, + QDF_TRACE_LEVEL_MAX +} QDF_TRACE_LEVEL; + +/** + * enum QDF_OPMODE - vdev operating mode + * @QDF_STA_MODE: STA mode + * @QDF_SAP_MODE: SAP mode + * @QDF_P2P_CLIENT_MODE: P2P client mode + * @QDF_P2P_GO_MODE: P2P GO mode + * @QDF_FTM_MODE: FTM mode + * @QDF_IBSS_MODE: IBSS mode + * @QDF_MONITOR_MODE: Monitor mode + * @QDF_P2P_DEVICE_MODE: P2P device mode + * @QDF_OCB_MODE: OCB device mode + * @QDF_EPPING_MODE: EPPING device mode + * @QDF_QVIT_MODE: QVIT device mode + * @QDF_NDI_MODE: NAN datapath mode + * @QDF_WDS_MODE: WDS mode + * @QDF_BTAMP_MODE: BTAMP mode + * @QDF_AHDEMO_MODE: AHDEMO mode + * @QDF_TDLS_MODE: TDLS MODE + * @QDF_MAX_NO_OF_MODE: Max place holder + * + * These are generic IDs that identify the various roles + * in the software system + */ +enum QDF_OPMODE { + QDF_STA_MODE, + QDF_SAP_MODE, + QDF_P2P_CLIENT_MODE, + QDF_P2P_GO_MODE, + QDF_FTM_MODE, + QDF_IBSS_MODE, + QDF_MONITOR_MODE, + QDF_P2P_DEVICE_MODE, + QDF_OCB_MODE, + QDF_EPPING_MODE, + QDF_QVIT_MODE, + QDF_NDI_MODE, + QDF_WDS_MODE, + QDF_BTAMP_MODE, + QDF_AHDEMO_MODE, + QDF_MAX_NO_OF_MODE, + QDF_TDLS_MODE, +}; + +/** + * enum QDF_GLOBAL_MODE - global mode when driver is loaded. + * + * @QDF_GLOBAL_MISSION_MODE: mission mode (STA, SAP...) + * @QDF_GLOBAL_MONITOR_MODE: Monitor Mode + * @QDF_GLOBAL_FTM_MODE: FTM mode + * @QDF_GLOBAL_IBSS_MODE: IBSS mode + * @QDF_GLOBAL_EPPING_MODE: EPPING mode + * @QDF_GLOBAL_QVIT_MODE: QVIT global mode + * @QDF_GLOBAL_MAX_MODE: Max place holder + */ +enum QDF_GLOBAL_MODE { + QDF_GLOBAL_MISSION_MODE, + QDF_GLOBAL_MONITOR_MODE = 4, + QDF_GLOBAL_FTM_MODE = 5, + QDF_GLOBAL_IBSS_MODE = 6, + QDF_GLOBAL_COLDBOOT_CALIB_MODE = 7, + QDF_GLOBAL_EPPING_MODE = 8, + QDF_GLOBAL_QVIT_MODE = 9, + QDF_GLOBAL_MAX_MODE +}; + +#define QDF_IS_EPPING_ENABLED(mode) (mode == QDF_GLOBAL_EPPING_MODE) + +/** + * qdf_trace_msg()- logging API + * @module: Module identifier. A member of the QDF_MODULE_ID enumeration that + * identifies the module issuing the trace message. + * @level: Trace level. A member of the QDF_TRACE_LEVEL enumeration indicating + * the severity of the condition causing the trace message to be issued. + * More severe conditions are more likely to be logged. + * @str_format: Format string. The message to be logged. This format string + * contains printf-like replacement parameters, which follow this + * parameter in the variable argument list. + * + * Users wishing to add tracing information to their code should use + * QDF_TRACE. QDF_TRACE() will compile into a call to qdf_trace_msg() when + * tracing is enabled. + * + * Return: nothing + * + * implemented in qdf_trace.c + */ +void __printf(3, 4) qdf_trace_msg(QDF_MODULE_ID module, QDF_TRACE_LEVEL level, + char *str_format, ...); +/** + * qdf_vtrace_msg() - the va_list version of qdf_trace_msg + * @module: the calling module's Id + * @level: the logging level to log using + * @str_format: the log format string + * @val: the va_list containing the values to format according to str_format + * + * Return: None + */ +void qdf_vtrace_msg(QDF_MODULE_ID module, QDF_TRACE_LEVEL level, + char *str_format, va_list val); + +#ifdef CONFIG_MCL +#define qdf_print(args...) \ + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, ## args) + +#define qdf_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_QDF, ## params) +#define qdf_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_QDF, ## params) +#define qdf_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_QDF, ## params) +#define qdf_info(params...) QDF_TRACE_INFO(QDF_MODULE_ID_QDF, ## params) +#define qdf_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_QDF, ## params) + +#else /* CONFIG_MCL */ + +#define qdf_print printk +#define qdf_alert printk +#define qdf_err printk +#define qdf_warn printk +#define qdf_info printk +#define qdf_debug printk + +#endif /* CONFIG_MCL */ + +#define qdf_nofl_alert(params...) \ + QDF_TRACE_FATAL_NO_FL(QDF_MODULE_ID_QDF, ## params) +#define qdf_nofl_err(params...) \ + QDF_TRACE_ERROR_NO_FL(QDF_MODULE_ID_QDF, ## params) +#define qdf_nofl_warn(params...) \ + QDF_TRACE_WARN_NO_FL(QDF_MODULE_ID_QDF, ## params) +#define qdf_nofl_info(params...) \ + QDF_TRACE_INFO_NO_FL(QDF_MODULE_ID_QDF, ## params) +#define qdf_nofl_debug(params...) \ + QDF_TRACE_DEBUG_NO_FL(QDF_MODULE_ID_QDF, ## params) + +#define qdf_rl_alert(params...) QDF_TRACE_FATAL_RL(QDF_MODULE_ID_QDF, ## params) +#define qdf_rl_err(params...) QDF_TRACE_ERROR_RL(QDF_MODULE_ID_QDF, ## params) +#define qdf_rl_warn(params...) QDF_TRACE_WARN_RL(QDF_MODULE_ID_QDF, ## params) +#define qdf_rl_info(params...) QDF_TRACE_INFO_RL(QDF_MODULE_ID_QDF, ## params) +#define qdf_rl_debug(params...) QDF_TRACE_DEBUG_RL(QDF_MODULE_ID_QDF, ## params) + +#define qdf_rl_nofl_alert(params...) \ + QDF_TRACE_FATAL_RL_NO_FL(QDF_MODULE_ID_QDF, ## params) +#define qdf_rl_nofl_err(params...) \ + QDF_TRACE_ERROR_RL_NO_FL(QDF_MODULE_ID_QDF, ## params) +#define qdf_rl_nofl_warn(params...) \ + QDF_TRACE_WARN_RL_NO_FL(QDF_MODULE_ID_QDF, ## params) +#define qdf_rl_nofl_info(params...) \ + QDF_TRACE_INFO_RL_NO_FL(QDF_MODULE_ID_QDF, ## params) +#define qdf_rl_nofl_debug(params...) \ + QDF_TRACE_DEBUG_RL_NO_FL(QDF_MODULE_ID_QDF, ## params) + +#define qdf_vprint __qdf_vprint +#define qdf_snprint __qdf_snprint + +#define qdf_kstrtoint __qdf_kstrtoint + +#ifdef WLAN_OPEN_P2P_INTERFACE +/* This should match with WLAN_MAX_INTERFACES */ +#define QDF_MAX_CONCURRENCY_PERSONA (4) +#else +#define QDF_MAX_CONCURRENCY_PERSONA (3) +#endif + +#define QDF_STA_MASK (1 << QDF_STA_MODE) +#define QDF_SAP_MASK (1 << QDF_SAP_MODE) +#define QDF_P2P_CLIENT_MASK (1 << QDF_P2P_CLIENT_MODE) +#define QDF_P2P_GO_MASK (1 << QDF_P2P_GO_MODE) + +#ifdef FEATURE_WLAN_MCC_TO_SCC_SWITCH + +/** + * typedef tQDF_MCC_TO_SCC_SWITCH_MODE - MCC to SCC switch mode. + * @QDF_MCC_TO_SCC_SWITCH_DISABLE: Disable switch + * @QDF_MCC_TO_SCC_SWITCH_ENABLE: Enable switch + * @QDF_MCC_TO_SCC_SWITCH_FORCE: Force switch with SAP restart + * @QDF_MCC_TO_SCC_SWITCH_FORCE_WITHOUT_DISCONNECTION: Force switch without + * restart of SAP + * @QDF_MCC_TO_SCC_SWITCH_WITH_FAVORITE_CHANNEL: Switch using fav channel(s) + * without SAP restart + * @QDF_MCC_TO_SCC_SWITCH_FORCE_PREFERRED_WITHOUT_DISCONNECTION: Force switch + * without SAP restart. MCC is allowed only in below exception cases: + * Exception Case-1: When STA is operating on DFS channel. + * Exception Case-2: When STA is operating on LTE-CoEx channel. + * Exception Case-3: When STA is operating on AP disabled channel. + * @QDF_MCC_TO_SCC_WITH_PREFERRED_BAND: Force SCC only in user preferred band. + * Allow MCC if STA is operating or comes up on other than user preferred band. + * + * @QDF_MCC_TO_SCC_SWITCH_MAX: max switch + */ +typedef enum { + QDF_MCC_TO_SCC_SWITCH_DISABLE = 0, + QDF_MCC_TO_SCC_SWITCH_ENABLE, + QDF_MCC_TO_SCC_SWITCH_FORCE, + QDF_MCC_TO_SCC_SWITCH_FORCE_WITHOUT_DISCONNECTION, + QDF_MCC_TO_SCC_SWITCH_WITH_FAVORITE_CHANNEL, + QDF_MCC_TO_SCC_SWITCH_FORCE_PREFERRED_WITHOUT_DISCONNECTION, + QDF_MCC_TO_SCC_WITH_PREFERRED_BAND, + QDF_MCC_TO_SCC_SWITCH_MAX +} tQDF_MCC_TO_SCC_SWITCH_MODE; +#endif + +#if !defined(NULL) +#ifdef __cplusplus +#define NULL 0 +#else +#define NULL ((void *)0) +#endif +#endif + +/** + * qdf_bool_parse() - parse the given string as a boolean value + * @bool_str: the input boolean string to parse + * @out_bool: the output boolean value, populated on success + * + * 1, y, Y are mapped to true, 0, n, N are mapped to false. + * Leading/trailing whitespace is ignored. + * + * Return: QDF_STATUS + */ +QDF_STATUS qdf_bool_parse(const char *bool_str, bool *out_bool); + +/** + * qdf_int32_parse() - parse the given string as a 32-bit signed integer + * @int_str: the input integer string to parse + * @out_int: the output integer value, populated on success + * + * Supports binary (0b), octal (0o), decimal (no prefix), and hexadecimal (0x) + * encodings via typical prefix notation. Leading/trailing whitespace is + * ignored. + * + * Return: QDF_STATUS + */ +QDF_STATUS qdf_int32_parse(const char *int_str, int32_t *out_int); + +/** + * qdf_uint32_parse() - parse the given string as a 32-bit unsigned integer + * @int_str: the input integer string to parse + * @out_int: the output integer value, populated on success + * + * Supports binary (0b), octal (0o), decimal (no prefix), and hexadecimal (0x) + * encodings via typical prefix notation. Leading/trailing whitespace is + * ignored. + * + * Return: QDF_STATUS + */ +QDF_STATUS qdf_uint32_parse(const char *int_str, uint32_t *out_int); + +/** + * qdf_int64_parse() - parse the given string as a 64-bit signed integer + * @int_str: the input integer string to parse + * @out_int: the output integer value, populated on success + * + * Supports binary (0b), octal (0o), decimal (no prefix), and hexadecimal (0x) + * encodings via typical prefix notation. Leading/trailing whitespace is + * ignored. + * + * Return: QDF_STATUS + */ +QDF_STATUS qdf_int64_parse(const char *int_str, int64_t *out_int); + +/** + * qdf_uint64_parse() - parse the given string as a 64-bit unsigned integer + * @int_str: the input integer string to parse + * @out_int: the output integer value, populated on success + * + * Supports binary (0b), octal (0o), decimal (no prefix), and hexadecimal (0x) + * encodings via typical prefix notation. Leading/trailing whitespace is + * ignored. + * + * Return: QDF_STATUS + */ +QDF_STATUS qdf_uint64_parse(const char *int_str, uint64_t *out_int); + +#define QDF_MAC_ADDR_SIZE 6 +#define QDF_MAC_ADDR_STR "%02x:%02x:%02x:%02x:%02x:%02x" +#define QDF_MAC_ADDR_ARRAY(a) (a)[0], (a)[1], (a)[2], (a)[3], (a)[4], (a)[5] +#define QDF_MAC_ADDR_BCAST_INIT { { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff } } +#define QDF_MAC_ADDR_ZERO_INIT { { 0, 0, 0, 0, 0, 0 } } + +/** + * struct qdf_mac_addr - A MAC address + * @bytes: the raw address bytes array + */ +struct qdf_mac_addr { + uint8_t bytes[QDF_MAC_ADDR_SIZE]; +}; + +/** + * qdf_mac_parse() - parse the given string as a MAC address + * @mac_str: the input MAC address string to parse + * @out_addr: the output MAC address value, populated on success + * + * A MAC address is a set of 6, colon-delimited, hexadecimal encoded octets. + * + * E.g. + * 00:00:00:00:00:00 (zero address) + * ff:ff:ff:ff:ff:ff (broadcast address) + * 12:34:56:78:90:ab (an arbitrary address) + * + * This implementation also accepts MAC addresses without colons. Historically, + * other delimiters and groupings have been used to represent MAC addresses, but + * these are not supported here. Hexadecimal digits may be in either upper or + * lower case. + * + * Return: QDF_STATUS + */ +QDF_STATUS qdf_mac_parse(const char *mac_str, struct qdf_mac_addr *out_addr); + +#define QDF_IPV4_ADDR_SIZE 4 +#define QDF_IPV4_ADDR_STR "%d.%d.%d.%d" +#define QDF_IPV4_ADDR_ARRAY(a) (a)[0], (a)[1], (a)[2], (a)[3] +#define QDF_IPV4_ADDR_ZERO_INIT { { 0, 0, 0, 0 } } + +/** + * struct qdf_ipv4_addr - An IPV4 address + * @bytes: the raw address bytes array + */ +struct qdf_ipv4_addr { + uint8_t bytes[QDF_IPV4_ADDR_SIZE]; +}; + +/** + * qdf_ipv4_parse() - parse the given string as an IPV4 address + * @ipv4_str: the input IPV4 address string to parse + * @out_addr: the output IPV4 address value, populated on success + * + * An IPV4 address is a set of 4, dot-delimited, decimal encoded octets. + * + * E.g. + * 0.0.0.0 (wildcard address) + * 127.0.0.1 (loopback address) + * 255.255.255.255 (broadcast address) + * 192.168.0.1 (an arbitrary address) + * + * Historically, non-decimal encodings have also been used to represent IPV4 + * addresses, but these are not supported here. + * + * Return: QDF_STATUS + */ +QDF_STATUS qdf_ipv4_parse(const char *ipv4_str, struct qdf_ipv4_addr *out_addr); + +#define QDF_IPV6_ADDR_SIZE 16 +#define QDF_IPV6_ADDR_HEXTET_COUNT 8 +#define QDF_IPV6_ADDR_STR "%x:%x:%x:%x:%x:%x:%x:%x" +#define QDF_IPV6_ADDR_ARRAY(a) \ + ((a)[0] << 8) + (a)[1], ((a)[2] << 8) + (a)[3], \ + ((a)[4] << 8) + (a)[5], ((a)[6] << 8) + (a)[7], \ + ((a)[8] << 8) + (a)[9], ((a)[10] << 8) + (a)[11], \ + ((a)[12] << 8) + (a)[13], ((a)[14] << 8) + (a)[15] +#define QDF_IPV6_ADDR_ZERO_INIT \ + { { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } } + +/** + * struct qdf_ipv6_addr - An IPV6 address + * @bytes: the raw address bytes array + */ +struct qdf_ipv6_addr { + uint8_t bytes[QDF_IPV6_ADDR_SIZE]; +}; + +/** + * qdf_ipv6_parse() - parse the given string as an IPV6 address + * @ipv6_str: the input IPV6 address string to parse + * @out_addr: the output IPV6 address value, populated on success + * + * A hextet is a pair of octets. An IPV6 address is a set of 8, colon-delimited, + * hexadecimal encoded hextets. Each hextet may omit leading zeros. One or more + * zero-hextets may be "compressed" using a pair of colons ("::"). Up to one + * such zero-compression is allowed per address. + * + * E.g. + * 0:0:0:0:0:0:0:0 (unspecified address) + * :: (also the unspecified address) + * 0:0:0:0:0:0:0:1 (loopback address) + * ::1 (also the loopback address) + * 900a:ae7::6 (an arbitrary address) + * 900a:ae7:0:0:0:0:0:6 (the same arbitrary address) + * + * Hexadecimal digits may be in either upper or lower case. + * + * Return: QDF_STATUS + */ +QDF_STATUS qdf_ipv6_parse(const char *ipv6_str, struct qdf_ipv6_addr *out_addr); + +#define QDF_MAX_NUM_CHAN (128) + +#define QDF_BCAST_MAC_ADDR (0xFF) +#define QDF_MCAST_IPV4_MAC_ADDR (0x01) +#define QDF_MCAST_IPV6_MAC_ADDR (0x33) + +/** + * struct qdf_tso_frag_t - fragments of a single TCP segment + * @paddr_low_32: Lower 32 bits of the buffer pointer + * @paddr_upper_16: upper 16 bits of the buffer pointer + * @length: length of the buffer + * @vaddr: virtual address + * + * This structure holds the fragments of a single TCP segment of a + * given jumbo TSO network buffer + */ +struct qdf_tso_frag_t { + uint16_t length; + unsigned char *vaddr; + qdf_dma_addr_t paddr; +}; + +#define FRAG_NUM_MAX 6 +#define TSO_SEG_MAGIC_COOKIE 0x1EED + +/** + * struct qdf_tso_flags_t - TSO specific flags + * @tso_enable: Enable transmit segmentation offload + * @tcp_flags_mask: Tcp_flag is inserted into the header based + * on the mask + * @l2_len: L2 length for the msdu + * @ip_len: IP length for the msdu + * @tcp_seq_num: TCP sequence number + * @ip_id: IP identification number + * + * This structure holds the TSO specific flags extracted from the TSO network + * buffer for a given TCP segment + */ +struct qdf_tso_flags_t { + uint32_t tso_enable:1, + reserved_0a:6, + fin:1, + syn:1, + rst:1, + psh:1, + ack:1, + urg:1, + ece:1, + cwr:1, + ns:1, + tcp_flags_mask:9, + reserved_0b:7; + + uint32_t l2_len:16, + ip_len:16; + + uint32_t tcp_seq_num; + + uint32_t ip_id:16, + ipv4_checksum_en:1, + udp_ipv4_checksum_en:1, + udp_ipv6_checksum_en:1, + tcp_ipv4_checksum_en:1, + tcp_ipv6_checksum_en:1, + partial_checksum_en:1, + reserved_3a:10; + + uint32_t checksum_offset:14, + reserved_4a:2, + payload_start_offset:14, + reserved_4b:2; + + uint32_t payload_end_offset:14, + reserved_5:18; +}; + +/** + * struct qdf_tso_seg_t - single TSO segment + * @tso_flags: TSO flags + * @num_frags: number of fragments + * @total_len: total length of the packet + * @tso_frags: array holding the fragments + * + * This structure holds the information of a single TSO segment of a jumbo + * TSO network buffer + */ +struct qdf_tso_seg_t { + struct qdf_tso_flags_t tso_flags; + uint32_t num_frags; + uint32_t total_len; + struct qdf_tso_frag_t tso_frags[FRAG_NUM_MAX]; +}; + +/** + * TSO seg elem action caller locations: goes into dbg.history below. + * Needed to be defined outside of the feature so that + * callers can be coded without ifdefs (even if they get + * resolved to nothing) + */ +enum tsoseg_dbg_caller_e { + TSOSEG_LOC_UNDEFINED, + TSOSEG_LOC_INIT1, + TSOSEG_LOC_INIT2, + TSOSEG_LOC_FREE, + TSOSEG_LOC_ALLOC, + TSOSEG_LOC_DEINIT, + TSOSEG_LOC_GETINFO, + TSOSEG_LOC_FILLHTTSEG, + TSOSEG_LOC_FILLCMNSEG, + TSOSEG_LOC_PREPARETSO, + TSOSEG_LOC_TXPREPLLFAST, + TSOSEG_LOC_UNMAPTSO, + TSOSEG_LOC_UNMAPLAST, + TSOSEG_LOC_FORCE_FREE, +}; +#ifdef TSOSEG_DEBUG + +/** + * WARNING: Don't change the history size without changing the wrap + * code in qdf_tso_seg_dbg_record function + */ +#define MAX_TSO_SEG_ACT_HISTORY 16 +struct qdf_tso_seg_dbg_history_t { + uint64_t ts; + short id; +}; +struct qdf_tso_seg_dbg_t { + void *txdesc; /* owner - (ol_txrx_tx_desc_t *) */ + qdf_atomic_t cur; /* index of last valid entry */ + struct qdf_tso_seg_dbg_history_t h[MAX_TSO_SEG_ACT_HISTORY]; +}; +#endif /* TSOSEG_DEBUG */ + +/** + * qdf_tso_seg_elem_t - tso segment element + * @seg: instance of segment + * @next: pointer to the next segment + */ +struct qdf_tso_seg_elem_t { + struct qdf_tso_seg_t seg; + uint32_t cookie:13, + on_freelist:1, + sent_to_target:1, + force_free:1; + struct qdf_tso_seg_elem_t *next; +#ifdef TSOSEG_DEBUG + struct qdf_tso_seg_dbg_t dbg; +#endif /* TSOSEG_DEBUG */ +}; + +/** + * struct qdf_tso_num_seg_t - single element to count for num of seg + * @tso_cmn_num_seg: num of seg in a jumbo skb + * + * This structure holds the information of num of segments of a jumbo + * TSO network buffer. + */ +struct qdf_tso_num_seg_t { + uint32_t tso_cmn_num_seg; +}; + +/** + * qdf_tso_num_seg_elem_t - num of tso segment element for jumbo skb + * @num_seg: instance of num of seg + * @next: pointer to the next segment + */ +struct qdf_tso_num_seg_elem_t { + struct qdf_tso_num_seg_t num_seg; + struct qdf_tso_num_seg_elem_t *next; +}; + +/** + * struct qdf_tso_info_t - TSO information extracted + * @is_tso: is this is a TSO frame + * @num_segs: number of segments + * @tso_seg_list: list of TSO segments for this jumbo packet + * @curr_seg: segment that is currently being processed + * @tso_num_seg_list: num of tso seg for this jumbo packet + * @msdu_stats_idx: msdu index for tso stats + * + * This structure holds the TSO information extracted after parsing the TSO + * jumbo network buffer. It contains a chain of the TSO segments belonging to + * the jumbo packet + */ +struct qdf_tso_info_t { + uint8_t is_tso; + uint32_t num_segs; + struct qdf_tso_seg_elem_t *tso_seg_list; + struct qdf_tso_seg_elem_t *curr_seg; + struct qdf_tso_num_seg_elem_t *tso_num_seg_list; + uint32_t msdu_stats_idx; +}; + +/** + * Used to set classify bit in CE desc. + */ +#define QDF_CE_TX_CLASSIFY_BIT_S 5 + +/** + * QDF_CE_TX_PKT_TYPE_BIT_S - 2 bits starting at bit 6 in CE desc. + */ +#define QDF_CE_TX_PKT_TYPE_BIT_S 6 + +/** + * QDF_CE_TX_PKT_OFFSET_BIT_S - 12 bits --> 16-27, in the CE desciptor + * the length of HTT/HTC descriptor + */ +#define QDF_CE_TX_PKT_OFFSET_BIT_S 16 + +/** + * QDF_CE_TX_PKT_OFFSET_BIT_M - Mask for packet offset in the CE descriptor. + */ +#define QDF_CE_TX_PKT_OFFSET_BIT_M 0x0fff0000 + +/** + * enum qdf_suspend_type - type of suspend + * @QDF_SYSTEM_SUSPEND: System suspend triggered wlan suspend + * @QDF_RUNTIME_SUSPEND: Runtime pm inactivity timer triggered wlan suspend + */ +enum qdf_suspend_type { + QDF_SYSTEM_SUSPEND, + QDF_RUNTIME_SUSPEND +}; + +/** + * enum qdf_hang_reason - host hang/ssr reason + * @CDS_REASON_UNSPECIFIED: Unspecified reason + * @CDS_RX_HASH_NO_ENTRY_FOUND: No Map for the MAC entry for the received frame + * @CDS_PEER_DELETION_TIMEDOUT: peer deletion timeout happened + * @CDS_PEER_UNMAP_TIMEDOUT: peer unmap timeout + * @CDS_SCAN_REQ_EXPIRED: Scan request timed out + * @CDS_SCAN_ATTEMPT_FAILURES: Consecutive Scan attempt failures + * @CDS_GET_MSG_BUFF_FAILURE: Unable to get the message buffer + * @CDS_ACTIVE_LIST_TIMEOUT: Current command processing is timedout + * @CDS_SUSPEND_TIMEOUT: Timeout for an ACK from FW for suspend request + * @CDS_RESUME_TIMEOUT: Timeout for an ACK from FW for resume request + */ +enum qdf_hang_reason { + QDF_REASON_UNSPECIFIED = 0, + QDF_RX_HASH_NO_ENTRY_FOUND = 1, + QDF_PEER_DELETION_TIMEDOUT = 2, + QDF_PEER_UNMAP_TIMEDOUT = 3, + QDF_SCAN_REQ_EXPIRED = 4, + QDF_SCAN_ATTEMPT_FAILURES = 5, + QDF_GET_MSG_BUFF_FAILURE = 6, + QDF_ACTIVE_LIST_TIMEOUT = 7, + QDF_SUSPEND_TIMEOUT = 8, + QDF_RESUME_TIMEOUT = 9, +}; + +/** + * enum qdf_stats_verbosity_level - Verbosity levels for stats + * for which want to have different levels + * @QDF_STATS_VERBOSITY_LEVEL_LOW: Stats verbosity level low + * @QDF_STATS_VERBOSITY_LEVEL_HIGH: Stats verbosity level high + */ +enum qdf_stats_verbosity_level { + QDF_STATS_VERBOSITY_LEVEL_LOW, + QDF_STATS_VERBOSITY_LEVEL_HIGH +}; + +/** + * enum qdf_clock_id - The clock IDs of the various system clocks + * @QDF_CLOCK_REALTIME: Clock is close to current time of day + * @QDF_CLOCK_MONOTONIC: Clock is absolute elapsed time + */ +enum qdf_clock_id { + QDF_CLOCK_REALTIME = __QDF_CLOCK_REALTIME, + QDF_CLOCK_MONOTONIC = __QDF_CLOCK_MONOTONIC +}; + +/** + * enum qdf_hrtimer_mode - Mode arguments of qdf_hrtimer_data_t + * related functions + * @QDF_HRTIMER_MODE_ABS: Time value is absolute + * @QDF_HRTIMER_MODE_REL: Time value is relative to now + * @QDF_HRTIMER_MODE_PINNED: Timer is bound to CPU + */ +enum qdf_hrtimer_mode { + QDF_HRTIMER_MODE_ABS = __QDF_HRTIMER_MODE_ABS, + QDF_HRTIMER_MODE_REL = __QDF_HRTIMER_MODE_REL, + QDF_HRTIMER_MODE_PINNED = __QDF_HRTIMER_MODE_PINNED, +}; + +/** + * enum qdf_hrtimer_restart_status - Return values for the + * qdf_hrtimer_data_t callback function + * @QDF_HRTIMER_NORESTART: Timer is not restarted + * @QDF_HRTIMER_RESTART: Timer must be restarted + */ +enum qdf_hrtimer_restart_status { + QDF_HRTIMER_NORESTART = __QDF_HRTIMER_NORESTART, + QDF_HRTIMER_RESTART = __QDF_HRTIMER_RESTART, +}; + +/** + * enum qdf_context_mode - Values for the + * hrtimer context + * @QDF_CONTEXT_HARDWARE: Runs in hw interrupt context + * @QDF_CONTEXT_TASKLET: Runs in tasklet context + */ +enum qdf_context_mode { + QDF_CONTEXT_HARDWARE = 0, + QDF_CONTEXT_TASKLET = 1, +}; + +#endif /* __QDF_TYPES_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_util.h b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_util.h new file mode 100644 index 0000000000000000000000000000000000000000..ccd47b9cd6a498f40766e893dbaa0893dd9e6ebf --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_util.h @@ -0,0 +1,755 @@ +/* + * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: qdf_util.h + * This file defines utility functions. + */ + +#ifndef _QDF_UTIL_H +#define _QDF_UTIL_H + +#include + +#ifdef QCA_CONFIG_SMP +#define QDF_MAX_AVAILABLE_CPU 8 +#else +#define QDF_MAX_AVAILABLE_CPU 1 +#endif + +typedef __qdf_wait_queue_head_t qdf_wait_queue_head_t; + +/** + * qdf_unlikely - Compiler-dependent macro denoting code likely to execute + * @_expr: expression to be checked + */ +#define qdf_unlikely(_expr) __qdf_unlikely(_expr) + +/** + * qdf_likely - Compiler-dependent macro denoting code unlikely to execute + * @_expr: expression to be checked + */ +#define qdf_likely(_expr) __qdf_likely(_expr) + +/** + * qdf_mb - read + write memory barrier. + */ +#define qdf_mb() __qdf_mb() + +/** + * qdf_ioread32 - read a register + * @offset: register address + */ +#define qdf_ioread32(offset) __qdf_ioread32(offset) +/** + * qdf_iowrite32 - write a register + * @offset: register address + * @value: value to write (32bit value) + */ +#define qdf_iowrite32(offset, value) __qdf_iowrite32(offset, value) + +/** + * qdf_assert - assert "expr" evaluates to false. + */ +#ifdef QDF_DEBUG +#define qdf_assert(expr) __qdf_assert(expr) +#else +#define qdf_assert(expr) +#endif /* QDF_DEBUG */ + +/** + * qdf_assert_always - alway assert "expr" evaluates to false. + */ +#define qdf_assert_always(expr) __qdf_assert(expr) + +/** + * qdf_target_assert_always - alway target assert "expr" evaluates to false. + */ +#define qdf_target_assert_always(expr) __qdf_target_assert(expr) + +/** + * QDF_MAX - get maximum of two values + * @_x: 1st argument + * @_y: 2nd argument + */ +#define QDF_MAX(_x, _y) (((_x) > (_y)) ? (_x) : (_y)) + +/** + * QDF_MIN - get minimum of two values + * @_x: 1st argument + * @_y: 2nd argument + */ +#define QDF_MIN(_x, _y) (((_x) < (_y)) ? (_x) : (_y)) + +/** + * QDF_IS_ADDR_BROADCAST - is mac address broadcast mac address + * @_a: pointer to mac address + */ +#define QDF_IS_ADDR_BROADCAST(_a) \ + ((_a)[0] == 0xff && \ + (_a)[1] == 0xff && \ + (_a)[2] == 0xff && \ + (_a)[3] == 0xff && \ + (_a)[4] == 0xff && \ + (_a)[5] == 0xff) + +/** + * qdf_status_to_os_return - returns the status to OS. + * @status: enum QDF_STATUS + * + * returns: int status success/failure + */ +static inline int qdf_status_to_os_return(QDF_STATUS status) +{ + return __qdf_status_to_os_return(status); +} + +/** + * qdf_status_from_os_return() - map OS specific return code to a QDF_STATUS + * @rc: the input return code to map + * + * Return: QDF_STATUS + */ +static inline QDF_STATUS qdf_status_from_os_return(int rc) +{ + return __qdf_status_from_os_return(rc); +} + +/** + * qdf_set_bit() - set bit in address + * @nr: bit number to be set + * @addr: address buffer pointer + * + * Return: none + */ +#define qdf_set_bit(nr, addr) __qdf_set_bit(nr, addr) + +/** + * qdf_clear_bit() - clear bit in address + * @nr: bit number to be clear + * @addr: address buffer pointer + * + * Return: none + */ +#define qdf_clear_bit(nr, addr) __qdf_clear_bit(nr, addr) + +/** + * qdf_test_bit() - test bit position in address + * @nr: bit number to be tested + * @addr: address buffer pointer + * + * Return: none + */ +#define qdf_test_bit(nr, addr) __qdf_test_bit(nr, addr) + +/** + * qdf_test_and_clear_bit() - test and clear bit position in address + * @nr: bit number to be tested + * @addr: address buffer pointer + * + * Return: none + */ +#define qdf_test_and_clear_bit(nr, addr) __qdf_test_and_clear_bit(nr, addr) + +/** + * qdf_find_first_bit() - find first bit position in address + * @addr: address buffer pointer + * @nbits: number of bits + * + * Return: position first set bit in addr + */ +#define qdf_find_first_bit(addr, nbits) __qdf_find_first_bit(addr, nbits) + +#define qdf_wait_queue_interruptible(wait_queue, condition) \ + __qdf_wait_queue_interruptible(wait_queue, condition) + +/** + * qdf_wait_queue_timeout() - wait for specified time on given condition + * @wait_queue: wait queue to wait on + * @condition: condition to wait on + * @timeout: timeout value in jiffies + * + * Return: 0 if condition becomes false after timeout + * 1 or remaining jiffies, if condition becomes true during timeout + */ +#define qdf_wait_queue_timeout(wait_queue, condition, timeout) \ + __qdf_wait_queue_timeout(wait_queue, \ + condition, timeout) + + +#define qdf_init_waitqueue_head(_q) __qdf_init_waitqueue_head(_q) + +#define qdf_wake_up_interruptible(_q) __qdf_wake_up_interruptible(_q) + +/** + * qdf_wake_up() - wakes up sleeping waitqueue + * @wait_queue: wait queue, which needs wake up + * + * Return: none + */ +#define qdf_wake_up(_q) __qdf_wake_up(_q) + +#define qdf_wake_up_completion(_q) __qdf_wake_up_completion(_q) + +/** + * qdf_container_of - cast a member of a structure out to the containing + * structure + * @ptr: the pointer to the member. + * @type: the type of the container struct this is embedded in. + * @member: the name of the member within the struct. + */ +#define qdf_container_of(ptr, type, member) \ + __qdf_container_of(ptr, type, member) + +/** + * qdf_is_pwr2 - test input value is power of 2 integer + * @value: input integer + */ +#define QDF_IS_PWR2(value) (((value) ^ ((value)-1)) == ((value) << 1) - 1) + +/** + * qdf_roundup() - roundup the input value + * @x: value to roundup + * @y: input value rounded to multiple of this + * + * Return: rounded value + */ +#define qdf_roundup(x, y) __qdf_roundup(x, y) + +/** + * qdf_is_macaddr_equal() - compare two QDF MacAddress + * @mac_addr1: Pointer to one qdf MacAddress to compare + * @mac_addr2: Pointer to the other qdf MacAddress to compare + * + * This function returns a bool that tells if a two QDF MacAddress' + * are equivalent. + * + * Return: true if the MacAddress's are equal + * not true if the MacAddress's are not equal + */ +static inline bool qdf_is_macaddr_equal(struct qdf_mac_addr *mac_addr1, + struct qdf_mac_addr *mac_addr2) +{ + return __qdf_is_macaddr_equal(mac_addr1, mac_addr2); +} + + +/** + * qdf_is_macaddr_zero() - check for a MacAddress of all zeros. + * @mac_addr: pointer to the struct qdf_mac_addr to check. + * + * This function returns a bool that tells if a MacAddress is made up of + * all zeros. + * + * Return: true if the MacAddress is all Zeros + * false if the MacAddress is not all Zeros. + */ +static inline bool qdf_is_macaddr_zero(struct qdf_mac_addr *mac_addr) +{ + struct qdf_mac_addr zero_mac_addr = QDF_MAC_ADDR_ZERO_INIT; + + return qdf_is_macaddr_equal(mac_addr, &zero_mac_addr); +} + +/** + * qdf_zero_macaddr() - zero out a MacAddress + * @mac_addr: pointer to the struct qdf_mac_addr to zero. + * + * This function zeros out a QDF MacAddress type. + * + * Return: none + */ +static inline void qdf_zero_macaddr(struct qdf_mac_addr *mac_addr) +{ + __qdf_zero_macaddr(mac_addr); +} + + +/** + * qdf_is_macaddr_group() - check for a MacAddress is a 'group' address + * @mac_addr1: pointer to the qdf MacAddress to check + * + * This function returns a bool that tells if a the input QDF MacAddress + * is a "group" address. Group addresses have the 'group address bit' turned + * on in the MacAddress. Group addresses are made up of Broadcast and + * Multicast addresses. + * + * Return: true if the input MacAddress is a Group address + * false if the input MacAddress is not a Group address + */ +static inline bool qdf_is_macaddr_group(struct qdf_mac_addr *mac_addr) +{ + return mac_addr->bytes[0] & 0x01; +} + + +/** + * qdf_is_macaddr_broadcast() - check for a MacAddress is a broadcast address + * @mac_addr: Pointer to the qdf MacAddress to check + * + * This function returns a bool that tells if a the input QDF MacAddress + * is a "broadcast" address. + * + * Return: true if the input MacAddress is a broadcast address + * flase if the input MacAddress is not a broadcast address + */ +static inline bool qdf_is_macaddr_broadcast(struct qdf_mac_addr *mac_addr) +{ + struct qdf_mac_addr broadcast_mac_addr = QDF_MAC_ADDR_BCAST_INIT; + return qdf_is_macaddr_equal(mac_addr, &broadcast_mac_addr); +} + +/** + * qdf_copy_macaddr() - copy a QDF MacAddress + * @dst_addr: pointer to the qdf MacAddress to copy TO (the destination) + * @src_addr: pointer to the qdf MacAddress to copy FROM (the source) + * + * This function copies a QDF MacAddress into another QDF MacAddress. + * + * Return: none + */ +static inline void qdf_copy_macaddr(struct qdf_mac_addr *dst_addr, + struct qdf_mac_addr *src_addr) +{ + *dst_addr = *src_addr; +} + +/** + * qdf_set_macaddr_broadcast() - set a QDF MacAddress to the 'broadcast' + * @mac_addr: pointer to the qdf MacAddress to set to broadcast + * + * This function sets a QDF MacAddress to the 'broadcast' MacAddress. Broadcast + * MacAddress contains all 0xFF bytes. + * + * Return: none + */ +static inline void qdf_set_macaddr_broadcast(struct qdf_mac_addr *mac_addr) +{ + __qdf_set_macaddr_broadcast(mac_addr); +} + +/** + * qdf_set_u16() - Assign 16-bit unsigned value to a byte array base on CPU's + * endianness. + * @ptr: Starting address of a byte array + * @value: The value to assign to the byte array + * + * Caller must validate the byte array has enough space to hold the vlaue + * + * Return: The address to the byte after the assignment. This may or may not + * be valid. Caller to verify. + */ +static inline uint8_t *qdf_set_u16(uint8_t *ptr, uint16_t value) +{ +#if defined(ANI_BIG_BYTE_ENDIAN) + *(ptr) = (uint8_t) (value >> 8); + *(ptr + 1) = (uint8_t) (value); +#else + *(ptr + 1) = (uint8_t) (value >> 8); + *(ptr) = (uint8_t) (value); +#endif + return ptr + 2; +} + +/** + * qdf_get_u16() - Retrieve a 16-bit unsigned value from a byte array base on + * CPU's endianness. + * @ptr: Starting address of a byte array + * @value: Pointer to a caller allocated buffer for 16 bit value. Value is to + * assign to this location. + * + * Caller must validate the byte array has enough space to hold the vlaue + * + * Return: The address to the byte after the assignment. This may or may not + * be valid. Caller to verify. + */ +static inline uint8_t *qdf_get_u16(uint8_t *ptr, uint16_t *value) +{ +#if defined(ANI_BIG_BYTE_ENDIAN) + *value = (((uint16_t) (*ptr << 8)) | ((uint16_t) (*(ptr + 1)))); +#else + *value = (((uint16_t) (*(ptr + 1) << 8)) | ((uint16_t) (*ptr))); +#endif + return ptr + 2; +} + +/** + * qdf_get_u32() - retrieve a 32-bit unsigned value from a byte array base on + * CPU's endianness. + * @ptr: Starting address of a byte array + * @value: Pointer to a caller allocated buffer for 32 bit value. Value is to + * assign to this location. + * + * Caller must validate the byte array has enough space to hold the vlaue + * + * Return: The address to the byte after the assignment. This may or may not + * be valid. Caller to verify. + */ +static inline uint8_t *qdf_get_u32(uint8_t *ptr, uint32_t *value) +{ +#if defined(ANI_BIG_BYTE_ENDIAN) + *value = ((uint32_t) (*(ptr) << 24) | + (uint32_t) (*(ptr + 1) << 16) | + (uint32_t) (*(ptr + 2) << 8) | (uint32_t) (*(ptr + 3))); +#else + *value = ((uint32_t) (*(ptr + 3) << 24) | + (uint32_t) (*(ptr + 2) << 16) | + (uint32_t) (*(ptr + 1) << 8) | (uint32_t) (*(ptr))); +#endif + return ptr + 4; +} + +/** + * qdf_ntohs - Convert a 16-bit value from network byte order to host byte order + */ +#define qdf_ntohs(x) __qdf_ntohs(x) + +/** + * qdf_ntohl - Convert a 32-bit value from network byte order to host byte order + */ +#define qdf_ntohl(x) __qdf_ntohl(x) + +/** + * qdf_htons - Convert a 16-bit value from host byte order to network byte order + */ +#define qdf_htons(x) __qdf_htons(x) + +/** + * qdf_htonl - Convert a 32-bit value from host byte order to network byte order + */ +#define qdf_htonl(x) __qdf_htonl(x) + +/** + * qdf_cpu_to_le16 - Convert a 16-bit value from CPU byte order to + * little-endian byte order + * + * @x: value to be converted + */ +#define qdf_cpu_to_le16(x) __qdf_cpu_to_le16(x) + +/** + * qdf_cpu_to_le32 - Convert a 32-bit value from CPU byte order to + * little-endian byte order + * + * @x: value to be converted + */ +#define qdf_cpu_to_le32(x) __qdf_cpu_to_le32(x) + +/** + * qdf_cpu_to_le64 - Convert a 64-bit value from CPU byte order to + * little-endian byte order + * + * @x: value to be converted + */ +#define qdf_cpu_to_le64(x) __qdf_cpu_to_le64(x) + +/** + * qdf_le16_to_cpu - Convert a 16-bit value from little-endian byte order + * to CPU byte order + * + * @x: value to be converted + */ +#define qdf_le16_to_cpu(x) __qdf_le16_to_cpu(x) + +/** + * qdf_le32_to_cpu - Convert a 32-bit value from little-endian byte + * order to CPU byte order + * + * @x: value to be converted + */ +#define qdf_le32_to_cpu(x) __qdf_le32_to_cpu(x) + +/** + * qdf_le64_to_cpu - Convert a 64-bit value from little-endian byte + * order to CPU byte order + * + * @x: value to be converted + */ +#define qdf_le64_to_cpu(x) __qdf_le64_to_cpu(x) + +/** + * qdf_cpu_to_be16 - Convert a 16-bit value from CPU byte order to + * big-endian byte order + * + * @x: value to be converted + */ +#define qdf_cpu_to_be16(x) __qdf_cpu_to_be16(x) + +/** + * qdf_cpu_to_be32 - Convert a 32-bit value from CPU byte order to + * big-endian byte order + * + * @x: value to be converted + */ +#define qdf_cpu_to_be32(x) __qdf_cpu_to_be32(x) + +/** + * qdf_cpu_to_be64 - Convert a 64-bit value from CPU byte order to + * big-endian byte order + * + * @x: value to be converted + */ +#define qdf_cpu_to_be64(x) __qdf_cpu_to_be64(x) + + +/** + * qdf_be16_to_cpu - Convert a 16-bit value from big-endian byte order + * to CPU byte order + * + * @x: value to be converted + */ +#define qdf_be16_to_cpu(x) __qdf_be16_to_cpu(x) + +/** + * qdf_be32_to_cpu - Convert a 32-bit value from big-endian byte order + * to CPU byte order + * + * @x: value to be converted + */ +#define qdf_be32_to_cpu(x) __qdf_be32_to_cpu(x) + +/** + * qdf_be64_to_cpu - Convert a 64-bit value from big-endian byte order + * to CPU byte order + * + * @x: value to be converted + */ +#define qdf_be64_to_cpu(x) __qdf_be64_to_cpu(x) + +/** + * qdf_function - replace with the name of the current function + */ +#define qdf_function __qdf_function + +/** + * qdf_min - minimum of two numbers + */ +#define qdf_min(a, b) __qdf_min(a, b) + +/** + * qdf_ffz() - find first (least significant) zero bit + * @mask: the bitmask to check + * + * Return: The zero-based index of the first zero bit, or -1 if none are found + */ +#define qdf_ffz(mask) __qdf_ffz(mask) + +/** + * qdf_get_pwr2() - get next power of 2 integer from input value + * @value: input value to find next power of 2 integer + * + * Get next power of 2 integer from input value + * + * Return: Power of 2 integer + */ +static inline int qdf_get_pwr2(int value) +{ + int log2; + + if (QDF_IS_PWR2(value)) + return value; + + log2 = 0; + while (value) { + value >>= 1; + log2++; + } + return 1 << log2; +} + +static inline +int qdf_get_cpu(void) +{ + return __qdf_get_cpu(); +} + +/** + * qdf_get_hweight8() - count num of 1's in bitmap + * @value: input bitmap + * + * Count num of 1's set in the bitmap + * + * Return: num of 1's + */ +static inline +unsigned int qdf_get_hweight8(unsigned int w) +{ + unsigned int res = w - ((w >> 1) & 0x55); + res = (res & 0x33) + ((res >> 2) & 0x33); + return (res + (res >> 4)) & 0x0F; +} + +/** + * qdf_device_init_wakeup() - allow a device to wake up the aps system + * @qdf_dev: the qdf device context + * @enable: enable/disable the device as a wakup source + * + * Return: 0 or errno + */ +static inline int qdf_device_init_wakeup(qdf_device_t qdf_dev, bool enable) +{ + return __qdf_device_init_wakeup(qdf_dev, enable); +} + +static inline +uint64_t qdf_get_totalramsize(void) +{ + return __qdf_get_totalramsize(); +} + +/** + * qdf_get_lower_32_bits() - get lower 32 bits from an address. + * @addr: address + * + * This api returns the lower 32 bits of an address. + * + * Return: lower 32 bits. + */ +static inline +uint32_t qdf_get_lower_32_bits(qdf_dma_addr_t addr) +{ + return __qdf_get_lower_32_bits(addr); +} + +/** + * qdf_get_upper_32_bits() - get upper 32 bits from an address. + * @addr: address + * + * This api returns the upper 32 bits of an address. + * + * Return: upper 32 bits. + */ +static inline +uint32_t qdf_get_upper_32_bits(qdf_dma_addr_t addr) +{ + return __qdf_get_upper_32_bits(addr); +} + +/** + * qdf_rounddown_pow_of_two() - Round down to nearest power of two + * @n: number to be tested + * + * Test if the input number is power of two, and return the nearest power of two + * + * Return: number rounded down to the nearest power of two + */ +static inline +unsigned long qdf_rounddown_pow_of_two(unsigned long n) +{ + return __qdf_rounddown_pow_of_two(n); +} + +/** + * qdf_set_dma_coherent_mask() - set max number of bits allowed in dma addr + * @dev: device pointer + * @addr_bits: max number of bits allowed in dma address + * + * This API sets the maximum allowed number of bits in the dma address. + * + * Return: 0 - success, non zero - failure + */ +static inline +int qdf_set_dma_coherent_mask(struct device *dev, uint8_t addr_bits) +{ + return __qdf_set_dma_coherent_mask(dev, addr_bits); +} + +/** + * qdf_do_div() - wrapper function for kernel macro(do_div). + * @dividend: Dividend value + * @divisor : Divisor value + * + * Return: Quotient + */ +static inline +uint64_t qdf_do_div(uint64_t dividend, uint32_t divisor) +{ + return __qdf_do_div(dividend, divisor); +} + +/** + * qdf_do_div_rem() - wrapper function for kernel macro(do_div) + * to get remainder. + * @dividend: Dividend value + * @divisor : Divisor value + * + * Return: remainder + */ +static inline +uint64_t qdf_do_div_rem(uint64_t dividend, uint32_t divisor) +{ + return __qdf_do_div_rem(dividend, divisor); +} + +/** + * qdf_get_random_bytes() - returns nbytes bytes of random + * data + * + * Return: random bytes of data + */ +static inline +void qdf_get_random_bytes(void *buf, int nbytes) +{ + return __qdf_get_random_bytes(buf, nbytes); +} + +/** + * qdf_hex_to_bin() - QDF API to Convert hexa decimal ASCII character to + * unsigned integer value. + * @ch: hexa decimal ASCII character + * + * Return: For hexa decimal ASCII char return actual decimal value + * else -1 for bad input. + */ +static inline +int qdf_hex_to_bin(char ch) +{ + return __qdf_hex_to_bin(ch); +} + +/** + * qdf_hex_str_to_binary() - QDF API to Convert string of hexa decimal + * ASCII characters to array of unsigned integers. + * @dst: output array to hold converted values + * @src: input string of hexa decimal ASCII characters + * @count: size of dst string + * + * This function is used to convert string of hexa decimal characters to + * array of unsigned integers and caller should ensure: + * a) @dst, @src are not NULL, + * b) size of @dst should be (size of src / 2) + * + * Example 1: + * src = 11aa, means, src[0] = '1', src[1] = '2', src[2] = 'a', src[3] = 'a' + * count = (size of src / 2) = 2 + * after conversion, dst[0] = 0x11, dst[1] = oxAA and return (0). + * + * Example 2: + * src = 11az, means, src[0] = '1', src[1] = '2', src[2] = 'a', src[3] = 'z' + * src[3] is not ASCII hexa decimal character, return negative value (-1). + * + * Return: For a string of hexa decimal ASCII characters return 0 + * else -1 for bad input. + */ +static inline +int qdf_hex_str_to_binary(u8 *dst, const char *src, size_t count) +{ + return __qdf_hex_str_to_binary(dst, src, count); +} + +#endif /*_QDF_UTIL_H*/ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/libc/inc/i_qdf_str.h b/drivers/staging/qca-wifi-host-cmn/qdf/libc/inc/i_qdf_str.h new file mode 100644 index 0000000000000000000000000000000000000000..22b93c2825973ef5bc710c4f1f5b556bf13f94f9 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/libc/inc/i_qdf_str.h @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: i_qdf_str.h + * Libc-specific implementations for qdf_str + */ + +#ifndef __I_QDF_STR_H +#define __I_QDF_STR_H + +#include "string.h" + +#define __qdf_is_space(c) isspace(c) +#define __qdf_str_cmp(left, right) strcmp(left, right) +#define __qdf_str_lcopy(dest, src, dest_size) strlcpy(dest, src, dest_size) +const char *__qdf_str_left_trim(const char *str); +#define __qdf_str_len(str) strlen(str) +char *__qdf_str_trim(char *str); +#define __qdf_str_nlen(str, limit) strnlen(str, limit) + +#endif /* __I_QDF_STR_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/libc/inc/qdf_str.c b/drivers/staging/qca-wifi-host-cmn/qdf/libc/inc/qdf_str.c new file mode 100644 index 0000000000000000000000000000000000000000..f44a74a9435e190e05d5c9927e3797d8220c25f1 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/libc/inc/qdf_str.c @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "i_qdf_str.h" + +const char *__qdf_str_left_trim(const char *str) +{ + while (qdf_str_is_space(str)) + str++; + + return str; +} + +char *__qdf_str_trim(char *str) +{ + char *trimmed = (char *)qdf_str_left_trim(str); + + qdf_str_right_trim(str); + + return trimmed; +} + diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_osdep.h b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_osdep.h new file mode 100644 index 0000000000000000000000000000000000000000..7abb45abb4c9b89ccc6351a3fec2aacd08d60097 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_osdep.h @@ -0,0 +1,198 @@ +/* + * Copyright (c) 2013-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: i_osdep + * QCA driver framework OS dependent types + */ + +#ifndef _I_OSDEP_H +#define _I_OSDEP_H + +#ifdef CONFIG_MCL +#include +#include +#else +#include +#endif + +/* + * Byte Order stuff + */ +#define le16toh(_x) le16_to_cpu(_x) +#define htole16(_x) cpu_to_le16(_x) +#define htobe16(_x) cpu_to_be16(_x) +#define le32toh(_x) le32_to_cpu(_x) +#define htole32(_x) cpu_to_le32(_x) +#define be16toh(_x) be16_to_cpu(_x) +#define be32toh(_x) be32_to_cpu(_x) +#define htobe32(_x) cpu_to_be32(_x) + +#ifdef CONFIG_SMP +/* Undo the one provided by the kernel to debug spin locks */ +#undef spin_lock +#undef spin_unlock +#undef spin_trylock + +#define spin_lock(x) spin_lock_bh(x) + +#define spin_unlock(x) \ + do { \ + if (!spin_is_locked(x)) { \ + WARN_ON(1); \ + printk(KERN_EMERG " %s:%d unlock addr=%pK, %s \n", __func__, __LINE__, x, \ + !spin_is_locked(x) ? "Not locked" : ""); \ + } \ + spin_unlock_bh(x); \ + } while (0) +#define spin_trylock(x) spin_trylock_bh(x) +#define OS_SUPPORT_ASYNC_Q 1 /* support for handling asyn function calls */ + +#else +#define OS_SUPPORT_ASYNC_Q 0 +#endif /* ifdef CONFIG_SMP */ + +/** + * struct os_mest_t - maintain attributes of message + * @mesg_next: pointer to the nexgt message + * @mest_type: type of message + * @mesg_len: length of the message + */ +typedef struct _os_mesg_t { + STAILQ_ENTRY(_os_mesg_t) mesg_next; + uint16_t mesg_type; + uint16_t mesg_len; +} os_mesg_t; + +/** + * struct qdf_bus_context - Bus to hal context handoff + * @bc_tag: bus context tag + * @cal_in_flash: calibration data stored in flash + * @bc_handle: bus context handle + * @bc_bustype: bus type + */ +typedef struct qdf_bus_context { + void *bc_tag; + int cal_in_flash; + char *bc_handle; + enum qdf_bus_type bc_bustype; +} QDF_BUS_CONTEXT; + +typedef struct _NIC_DEV *osdev_t; + +typedef void (*os_mesg_handler_t)(void *ctx, uint16_t mesg_type, + uint16_t mesg_len, + void *mesg); + + +/** + * typedef os_mesg_queue_t - Object to maintain message queue + * @dev_handle: OS handle + * @num_queued: number of queued messages + * @mesg_len: message length + * @mesg_queue_buf: pointer to message queue buffer + * @mesg_head: queued mesg buffers + * @mesg_free_head: free mesg buffers + * @lock: spinlock object + * @ev_handler_lock: spinlock object to event handler + * @task: pointer to task + * @_timer: instance of timer + * @handler: message handler + * @ctx: pointer to context + * @is_synchronous: bit to save synchronous status + * @del_progress: delete in progress + */ +typedef struct { + osdev_t dev_handle; + int32_t num_queued; + int32_t mesg_len; + uint8_t *mesg_queue_buf; + + STAILQ_HEAD(, _os_mesg_t) mesg_head; + STAILQ_HEAD(, _os_mesg_t) mesg_free_head; + spinlock_t lock; + spinlock_t ev_handler_lock; +#ifdef USE_SOFTINTR + void *_task; +#else + qdf_timer_t _timer; +#endif + os_mesg_handler_t handler; + void *ctx; + uint8_t is_synchronous:1; + uint8_t del_progress; +} os_mesg_queue_t; + +/** + * struct _NIC_DEV - Definition of OS-dependent device structure. + * It'll be opaque to the actual ATH layer. + * @qdf_dev: qdf device + * @bdev: bus device handle + * @netdev: net device handle (wifi%d) + * @intr_tq: tasklet + * @devstats: net device statistics + * @bc: hal bus context + * @device: generic device + * @event_queue: instance to wait queue + * @is_device_asleep: keep device status, sleep or awakei + * @acfg_event_list: event list + * @acfg_event_queue_lock: queue lock + * @acfg_event_os_work: schedule or create work + * @acfg_netlink_wq_init_done: Work queue ready + * @osdev_acfg_handle: acfg handle + * @vap_hardstart: Tx function specific to the radio + * initiailzed during VAP create + */ +struct _NIC_DEV { + qdf_device_t qdf_dev; + void *bdev; + struct net_device *netdev; + qdf_bh_t intr_tq; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36) + struct rtnl_link_stats64 devstats; +#else + struct net_device_stats devstats; +#endif + QDF_BUS_CONTEXT bc; +#ifdef ATH_PERF_PWR_OFFLOAD + struct device *device; + wait_queue_head_t event_queue; +#endif /* PERF_PWR_OFFLOAD */ +#if OS_SUPPORT_ASYNC_Q + os_mesg_queue_t async_q; +#endif +#ifdef ATH_BUS_PM + uint8_t is_device_asleep; +#endif /* ATH_BUS_PM */ + qdf_nbuf_queue_t acfg_event_list; + qdf_spinlock_t acfg_event_queue_lock; + qdf_work_t acfg_event_os_work; + uint8_t acfg_netlink_wq_init_done; + +#ifdef UMAC_SUPPORT_ACFG +#ifdef ACFG_NETLINK_TX + void *osdev_acfg_handle; +#endif /* ACFG_NETLINK_TX */ +#endif /* UMAC_SUPPORT_ACFG */ + int (*vap_hardstart)(struct sk_buff *skb, struct net_device *dev); +}; + +#define __QDF_SYSCTL_PROC_DOINTVEC(ctl, write, filp, buffer, lenp, ppos) \ + proc_dointvec(ctl, write, buffer, lenp, ppos) + +#endif /* _I_OSDEP_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_atomic.h b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_atomic.h new file mode 100644 index 0000000000000000000000000000000000000000..8e7110b52e64eb98bf345f3fb85ed2b6dd0d41d2 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_atomic.h @@ -0,0 +1,228 @@ +/* + * Copyright (c) 2014-2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: i_qdf_atomic.h + * This file provides OS dependent atomic APIs. + */ + +#ifndef I_QDF_ATOMIC_H +#define I_QDF_ATOMIC_H + +#include /* QDF_STATUS */ +#include +#include + +typedef atomic_t __qdf_atomic_t; + +/** + * __qdf_atomic_init() - initialize an atomic type variable + * @v: A pointer to an opaque atomic variable + * + * Return: QDF_STATUS + */ +static inline QDF_STATUS __qdf_atomic_init(__qdf_atomic_t *v) +{ + atomic_set(v, 0); + return QDF_STATUS_SUCCESS; +} + +/** + * __qdf_atomic_read() - read the value of an atomic variable + * @v: A pointer to an opaque atomic variable + * + * Return: The current value of the variable + */ +static inline int32_t __qdf_atomic_read(__qdf_atomic_t *v) +{ + return atomic_read(v); +} + +/** + * __qdf_atomic_inc() - increment the value of an atomic variable + * @v: A pointer to an opaque atomic variable + * + * Return: None + */ +static inline void __qdf_atomic_inc(__qdf_atomic_t *v) +{ + atomic_inc(v); +} + +/** + * __qdf_atomic_dec() - decrement the value of an atomic variable + * @v: A pointer to an opaque atomic variable + * + * Return: None + */ +static inline void __qdf_atomic_dec(__qdf_atomic_t *v) +{ + atomic_dec(v); +} + +/** + * __qdf_atomic_add() - add a value to the value of an atomic variable + * @i: The amount by which to increase the atomic counter + * @v: A pointer to an opaque atomic variable + * + * Return: None + */ +static inline void __qdf_atomic_add(int i, __qdf_atomic_t *v) +{ + atomic_add(i, v); +} + +/** + * __qdf_atomic_sub() - Subtract a value from an atomic variable + * @i: the amount by which to decrease the atomic counter + * @v: a pointer to an opaque atomic variable + * + * Return: none + */ +static inline void __qdf_atomic_sub(int i, __qdf_atomic_t *v) +{ + atomic_sub(i, v); +} + +/** + * __qdf_atomic_dec_and_test() - decrement an atomic variable and check if the + * new value is zero + * @v: A pointer to an opaque atomic variable + * + * Return: + * true (non-zero) if the new value is zero, + * false (0) if the new value is non-zero + */ +static inline int32_t __qdf_atomic_dec_and_test(__qdf_atomic_t *v) +{ + return atomic_dec_and_test(v); +} + +/** + * __qdf_atomic_set() - set a value to the value of an atomic variable + * @v: A pointer to an opaque atomic variable + * + * Return: None + */ +static inline void __qdf_atomic_set(__qdf_atomic_t *v, int i) +{ + atomic_set(v, i); +} + +/** + * __qdf_atomic_inc_return() - return the incremented value of an atomic variable + * @v: A pointer to an opaque atomic variable + * + * Return: The current value of the variable + */ +static inline int32_t __qdf_atomic_inc_return(__qdf_atomic_t *v) +{ + return atomic_inc_return(v); +} + +/** + * __qdf_atomic_set_bit - Atomically set a bit in memory + * @nr: bit to set + * @addr: the address to start counting from + * + * Return: none + */ +static inline void __qdf_atomic_set_bit(int nr, volatile unsigned long *addr) +{ + set_bit(nr, addr); +} + +/** + * __qdf_atomic_clear_bit - Atomically clear a bit in memory + * @nr: bit to clear + * @addr: the address to start counting from + * + * Return: none + */ +static inline void __qdf_atomic_clear_bit(int nr, volatile unsigned long *addr) +{ + clear_bit(nr, addr); +} + +/** + * __qdf_atomic_change_bit - Atomically toggle a bit in memory + * from addr + * @nr: bit to change + * @addr: the address to start counting from + * + * Return: none + */ +static inline void __qdf_atomic_change_bit(int nr, volatile unsigned long *addr) +{ + change_bit(nr, addr); +} + +/** + * __qdf_atomic_test_and_set_bit - Atomically set a bit and return its old value + * @nr: Bit to set + * @addr: the address to start counting from + * + * Return: return nr bit old value + */ +static inline int __qdf_atomic_test_and_set_bit(int nr, + volatile unsigned long *addr) +{ + return test_and_set_bit(nr, addr); +} + +/** + * __qdf_atomic_test_and_clear_bit - Atomically clear a bit and return its old + * value + * @nr: bit to clear + * @addr: the address to start counting from + * + * Return: return nr bit old value + */ +static inline int __qdf_atomic_test_and_clear_bit(int nr, + volatile unsigned long *addr) +{ + return test_and_clear_bit(nr, addr); +} + +/** + * __qdf_atomic_test_and_change_bit - Atomically toggle a bit and return its old + * value + * @nr: bit to change + * @addr: the address to start counting from + * + * Return: return nr bit old value + */ +static inline int __qdf_atomic_test_and_change_bit(int nr, + volatile unsigned long *addr) +{ + return test_and_change_bit(nr, addr); +} + +/** + * __qdf_atomic_test_bit - Atomically get the nr-th bit value starting from addr + * @nr: bit to get + * @addr: the address to start counting from + * + * Return: return nr bit value + */ +static inline int __qdf_atomic_test_bit(int nr, volatile unsigned long *addr) +{ + return test_bit(nr, addr); +} + +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_cpuhp.h b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_cpuhp.h new file mode 100644 index 0000000000000000000000000000000000000000..7cee453c6502eaa5c1f87b94b564b54dbfe16b15 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_cpuhp.h @@ -0,0 +1,34 @@ +/* + * Copyright (c) 2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: i_qdf_cpuhp.h (CPU hotplug) + * Linux-specific definitions for QDF CPU hotplug API's + */ + +#ifndef __I_QDF_CPUHP_H +#define __I_QDF_CPUHP_H + +#include "linux/types.h" + +typedef void (*__qdf_cpuhp_emit)(uint32_t cpu); + +void __qdf_cpuhp_os_init(__qdf_cpuhp_emit on_up, __qdf_cpuhp_emit on_down); +void __qdf_cpuhp_os_deinit(void); + +#endif /* __I_QDF_CPUHP_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_debugfs.h b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_debugfs.h new file mode 100644 index 0000000000000000000000000000000000000000..506b6f04add108e8739562b427acf8212502779f --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_debugfs.h @@ -0,0 +1,58 @@ +/* + * Copyright (c) 2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: qdf_debugfs.h + * Linux specific implementation for debug filesystem APIs. + */ + + +#ifndef _I_QDF_DEBUGFS_H +#define _I_QDF_DEBUGFS_H + +#include +#include + +typedef struct dentry *__qdf_dentry_t; +typedef struct seq_file *__qdf_debugfs_file_t; + +#ifdef WLAN_DEBUGFS + +/** + * qdf_debugfs_get_root() - get debugfs root + * + * Return: dentry * or NULL in case of failure + */ +struct dentry *qdf_debugfs_get_root(void); + +/** + * qdf_debugfs_get_filemode() - get Linux specific file mode + * @mode: This is a bitmap of file modes, + * QDF_FILE_USR_READ + * QDF_FILE_USR_WRITE + * QDF_FILE_OTH_READ + * QDF_FILE_OTH_WRITE + * QDF_FILE_GRP_READ + * QDF_FILE_GRP_WRITE + * + * Return: Linux specific file mode + */ +umode_t qdf_debugfs_get_filemode(uint16_t mode); + +#endif /* WLAN_DEBUGFS */ +#endif /* _I_QDF_DEBUGFS_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_defer.h b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_defer.h new file mode 100644 index 0000000000000000000000000000000000000000..28209f5ca9355a8d0c23f35adb320dd1e89d1f14 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_defer.h @@ -0,0 +1,299 @@ +/* + * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: i_qdf_defer.h + * This file provides OS dependent deferred API's. + */ + +#ifndef _I_QDF_DEFER_H +#define _I_QDF_DEFER_H + +#include +#include +#include +#include +#include + +typedef struct tasklet_struct __qdf_bh_t; +typedef struct workqueue_struct __qdf_workqueue_t; + +/** + * __qdf_work_t - wrapper around the real task func + * @work: Instance of work + * @fn: function pointer to the handler + * @arg: pointer to argument + */ +typedef struct { + struct work_struct work; + qdf_defer_fn_t fn; + void *arg; +} __qdf_work_t; + +/** + * __qdf_delayed_work_t - wrapper around the real work func + * @dwork: Instance of delayed work + * @fn: function pointer to the handler + * @arg: pointer to argument + */ +typedef struct { + struct delayed_work dwork; + qdf_defer_fn_t fn; + void *arg; +} __qdf_delayed_work_t; + +extern void __qdf_defer_func(struct work_struct *work); +extern void __qdf_defer_delayed_func(struct work_struct *work); + +typedef void (*__qdf_bh_fn_t)(unsigned long arg); + +/** + * __qdf_init_work - Initialize a work/task queue, This runs in non-interrupt + * context, so can be preempted by H/W & S/W intr + * @work: pointer to work + * @func: deferred function to run at bottom half non-interrupt context. + * @arg: argument for the deferred function + * Return: none + */ +static inline QDF_STATUS +__qdf_init_work(__qdf_work_t *work, qdf_defer_fn_t func, void *arg) +{ + work->fn = func; + work->arg = arg; + INIT_WORK(&work->work, __qdf_defer_func); + return QDF_STATUS_SUCCESS; +} + +/** + * __qdf_init_delayed_work - create a work/task, This runs in non-interrupt + * context, so can be preempted by H/W & S/W intr + * @work: pointer to work + * @func: deferred function to run at bottom half non-interrupt context. + * @arg: argument for the deferred function + * Return: none + */ +static inline uint32_t __qdf_init_delayed_work(__qdf_delayed_work_t *work, + qdf_defer_fn_t func, + void *arg) +{ + /*Initialize func and argument in work struct */ + work->fn = func; + work->arg = arg; + INIT_DELAYED_WORK(&work->dwork, __qdf_defer_delayed_func); + return QDF_STATUS_SUCCESS; +} + +/** + * __qdf_queue_work - Queue the work/task + * @wqueue: pointer to workqueue + * @work: pointer to work + * Return: none + */ +static inline void +__qdf_queue_work(__qdf_workqueue_t *wqueue, __qdf_work_t *work) +{ + queue_work(wqueue, &work->work); +} + +/** + * __qdf_queue_delayed_work - Queue the delayed work/task + * @wqueue: pointer to workqueue + * @work: pointer to work + * @delay: delay interval + * Return: none + */ +static inline void __qdf_queue_delayed_work(__qdf_workqueue_t *wqueue, + __qdf_delayed_work_t *work, + uint32_t delay) +{ + queue_delayed_work(wqueue, &work->dwork, msecs_to_jiffies(delay)); +} + +/** + * __qdf_sched_work - Schedule a deferred task on non-interrupt context + * @work: pointer to work + * Retrun: none + */ +static inline QDF_STATUS __qdf_sched_work(__qdf_work_t *work) +{ + schedule_work(&work->work); + return QDF_STATUS_SUCCESS; +} + +/** + * __qdf_sched_delayed_work() - Schedule a delayed work + * @work: pointer to delayed work + * @delay: delay interval + * Return: none + */ +static inline QDF_STATUS +__qdf_sched_delayed_work(__qdf_delayed_work_t *work, uint32_t delay) +{ + schedule_delayed_work(&work->dwork, msecs_to_jiffies(delay)); + return QDF_STATUS_SUCCESS; +} + +/** + * __qdf_cancel_work() - Cancel a work + * @work: pointer to work + * Return: true if work was pending, false otherwise + */ +static inline bool __qdf_cancel_work(__qdf_work_t *work) +{ + return cancel_work_sync(&work->work); +} + +/** + * __qdf_cancel_delayed_work() - Cancel a delayed work + * @work: pointer to delayed work + * Return: true if work was pending, false otherwise + */ +static inline bool __qdf_cancel_delayed_work(__qdf_delayed_work_t *work) +{ + return cancel_delayed_work_sync(&work->dwork); +} + +/** + * __qdf_flush_work - Flush a deferred task on non-interrupt context + * @work: pointer to work + * Return: none + */ +static inline uint32_t __qdf_flush_work(__qdf_work_t *work) +{ + flush_work(&work->work); + return QDF_STATUS_SUCCESS; +} + +/** + * __qdf_flush_delayed_work() - Flush a delayed work + * @work: pointer to delayed work + * Return: none + */ +static inline uint32_t __qdf_flush_delayed_work(__qdf_delayed_work_t *work) +{ + flush_delayed_work(&work->dwork); + return QDF_STATUS_SUCCESS; +} + +/** + * __qdf_create_workqueue - create a workqueue, This runs in non-interrupt + * context, so can be preempted by H/W & S/W intr + * @name: string + * Return: pointer of type qdf_workqueue_t + */ +static inline __qdf_workqueue_t *__qdf_create_workqueue(char *name) +{ + return create_workqueue(name); +} + +/** + * __qdf_create_singlethread_workqueue() - create a single threaded workqueue + * @name: string + * + * This API creates a dedicated work queue with a single worker thread to avoid + * wasting unnecessary resources when works which needs to be submitted in this + * queue are not very critical and frequent. + * + * Return: pointer of type qdf_workqueue_t + */ +static inline __qdf_workqueue_t *__qdf_create_singlethread_workqueue(char *name) +{ + return create_singlethread_workqueue(name); +} + +/** + * __qdf_alloc_unbound_workqueue - alloc an unbound workqueue + * @name: string + * + * Return: pointer of type qdf_workqueue_t + */ +static inline __qdf_workqueue_t *__qdf_alloc_unbound_workqueue(char *name) +{ + return alloc_workqueue(name, WQ_UNBOUND, 0); +} + +/** + * __qdf_flush_workqueue - flush the workqueue + * @wqueue: pointer to workqueue + * Return: none + */ +static inline void __qdf_flush_workqueue(__qdf_workqueue_t *wqueue) +{ + flush_workqueue(wqueue); +} + +/** + * __qdf_destroy_workqueue - Destroy the workqueue + * @wqueue: pointer to workqueue + * Return: none + */ +static inline void __qdf_destroy_workqueue(__qdf_workqueue_t *wqueue) +{ + destroy_workqueue(wqueue); +} + +/** + * __qdf_init_bh - creates the Bottom half deferred handler + * @bh: pointer to bottom + * @func: deferred function to run at bottom half interrupt context. + * @arg: argument for the deferred function + * Return: none + */ +static inline QDF_STATUS +__qdf_init_bh(struct tasklet_struct *bh, qdf_defer_fn_t func, void *arg) +{ + tasklet_init(bh, (__qdf_bh_fn_t) func, (unsigned long)arg); + return QDF_STATUS_SUCCESS; +} + +/** + * __qdf_sched_bh - schedule a bottom half (DPC) + * @bh: pointer to bottom + * Return: none + */ +static inline QDF_STATUS __qdf_sched_bh(struct tasklet_struct *bh) +{ + tasklet_schedule(bh); + return QDF_STATUS_SUCCESS; +} + +/** + * __qdf_disable_work - disable the deferred task (synchronous) + * @work: pointer to work + * Return: unsigned int + */ +static inline QDF_STATUS __qdf_disable_work(__qdf_work_t *work) +{ + if (cancel_work_sync(&work->work)) + return QDF_STATUS_E_ALREADY; + + return QDF_STATUS_SUCCESS; +} + +/** + * __qdf_disable_bh - destroy the bh (synchronous) + * @bh: pointer to bottom + * Return: none + */ +static inline QDF_STATUS __qdf_disable_bh(struct tasklet_struct *bh) +{ + tasklet_kill(bh); + return QDF_STATUS_SUCCESS; +} + +#endif /*_I_QDF_DEFER_H*/ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_event.h b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_event.h new file mode 100644 index 0000000000000000000000000000000000000000..41726f934dc1f96f48b1bf52286e7e90388cee55 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_event.h @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2014-2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: i_qdf_event.h + * This file provides OS dependent event API's. + */ + +#if !defined(__I_QDF_EVENT_H) +#define __I_QDF_EVENT_H + +#include + +/** + * qdf_event_t - manages events + * @complete: instance to completion + * @cookie: unsigned int + * @force_set: indicate forceful completion + */ +typedef struct qdf_evt { + struct completion complete; + uint32_t cookie; + bool force_set; +} __qdf_event_t; + +/* Preprocessor definitions and constants */ +#define LINUX_EVENT_COOKIE 0x12341234 + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)) +#define INIT_COMPLETION(event) reinit_completion(&event) +#endif + +#endif /*__I_QDF_EVENT_H*/ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_hrtimer.h b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_hrtimer.h new file mode 100644 index 0000000000000000000000000000000000000000..0998ecb8b7dab767d2752554ccbde75caec5059a --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_hrtimer.h @@ -0,0 +1,249 @@ +/* + * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: i_qdf_hrtimer + * This file provides OS dependent timer API's. + */ + +#ifndef _I_QDF_HRTIMER_H +#define _I_QDF_HRTIMER_H + +#include +#include +#include +#include +#include + +/* hrtimer data type */ +typedef struct { + union { + struct hrtimer hrtimer; + struct tasklet_hrtimer tasklet_hrtimer; + } u; + enum qdf_context_mode ctx; +} __qdf_hrtimer_data_t; + +/** + * __qdf_hrtimer_start() - Starts hrtimer in given context + * @timer: pointer to the hrtimer object + * @interval: interval to forward as qdf_ktime_t object + * @mode: mode of hrtimer + * + * Starts hrtimer in given context + * + * Return: void + */ +static inline +void __qdf_hrtimer_start(__qdf_hrtimer_data_t *timer, ktime_t interval, + enum qdf_hrtimer_mode mode) +{ + if (timer->ctx == QDF_CONTEXT_HARDWARE) + hrtimer_start(&timer->u.hrtimer, interval, mode); + else if (timer->ctx == QDF_CONTEXT_TASKLET) + tasklet_hrtimer_start(&timer->u.tasklet_hrtimer, + interval, mode); +} + +/** + * __qdf_hrtimer_cancel() - cancels hrtimer in given context + * @timer: pointer to the hrtimer object + * + * cancels hrtimer in given context + * + * Return: void + */ +static inline +void __qdf_hrtimer_cancel(__qdf_hrtimer_data_t *timer) +{ + if (timer->ctx == QDF_CONTEXT_HARDWARE) + hrtimer_cancel(&timer->u.hrtimer); + else if (timer->ctx == QDF_CONTEXT_TASKLET) + hrtimer_cancel(&timer->u.tasklet_hrtimer.timer); +} + +/** + * __qdf_hrtimer_init() - init hrtimer in a given context + * @timer: pointer to the hrtimer object + * @cback: callback function to be fired + * @clock: clock id + * @hrtimer_mode: mode of hrtimer + * + * starts hrtimer in a context passed as per the context + * + * Return: void + */ +static inline void __qdf_hrtimer_init(__qdf_hrtimer_data_t *timer, + void *cback, + enum qdf_clock_id clock, + enum qdf_hrtimer_mode mode, + enum qdf_context_mode ctx) +{ + struct hrtimer *hrtimer = &timer->u.hrtimer; + struct tasklet_hrtimer *tasklet_hrtimer = &timer->u.tasklet_hrtimer; + + timer->ctx = ctx; + + if (timer->ctx == QDF_CONTEXT_HARDWARE) { + hrtimer_init(hrtimer, clock, mode); + hrtimer->function = cback; + } else if (timer->ctx == QDF_CONTEXT_TASKLET) { + tasklet_hrtimer_init(tasklet_hrtimer, cback, clock, mode); + } +} + +/** + * __qdf_hrtimer_kill() - kills hrtimer in given context + * @timer: pointer to the hrtimer object + * + * kills hrtimer in given context + * + * Return: void + */ +static inline +void __qdf_hrtimer_kill(__qdf_hrtimer_data_t *timer) +{ + if (timer->ctx == QDF_CONTEXT_HARDWARE) + hrtimer_cancel(&timer->u.hrtimer); + else if (timer->ctx == QDF_CONTEXT_TASKLET) + tasklet_hrtimer_cancel(&timer->u.tasklet_hrtimer); +} + +/** + * __qdf_hrtimer_get_remaining() - check remaining time in the timer + * @timer: pointer to the hrtimer object + * + * check whether the timer is on one of the queues + * + * Return: remaining time as ktime object + */ +static inline ktime_t __qdf_hrtimer_get_remaining(__qdf_hrtimer_data_t *timer) +{ + struct hrtimer *hrtimer = &timer->u.hrtimer; + struct tasklet_hrtimer *tasklet_hrtimer = &timer->u.tasklet_hrtimer; + + if (timer->ctx == QDF_CONTEXT_HARDWARE) + return hrtimer_get_remaining(hrtimer); + else + return hrtimer_get_remaining(&tasklet_hrtimer->timer); +} + +/** + * __qdf_hrtimer_is_queued() - check whether the timer is on one of the queues + * @timer: pointer to the hrtimer object + * + * check whether the timer is on one of the queues + * + * Return: false when the timer was not in queue + * true when the timer was in queue + */ +static inline bool __qdf_hrtimer_is_queued(__qdf_hrtimer_data_t *timer) +{ + struct hrtimer *hrtimer = &timer->u.hrtimer; + struct tasklet_hrtimer *tasklet_hrtimer = &timer->u.tasklet_hrtimer; + + if (timer->ctx == QDF_CONTEXT_HARDWARE) + return hrtimer_is_queued(hrtimer); + else + return hrtimer_is_queued(&tasklet_hrtimer->timer); +} + +/** + * __qdf_hrtimer_callback_running() - check if callback is running + * @timer: pointer to the hrtimer object + * + * check whether the timer is running the callback function + * + * Return: false when callback is not running + * true when callback is running + */ +static inline bool __qdf_hrtimer_callback_running(__qdf_hrtimer_data_t *timer) +{ + struct hrtimer *hrtimer = &timer->u.hrtimer; + struct tasklet_hrtimer *tasklet_hrtimer = &timer->u.tasklet_hrtimer; + + if (timer->ctx == QDF_CONTEXT_HARDWARE) + return hrtimer_callback_running(hrtimer); + else + return hrtimer_callback_running(&tasklet_hrtimer->timer); +} + +/** + * __qdf_hrtimer_active() - check if timer is active + * @timer: pointer to the hrtimer object + * + * Check if timer is active. A timer is active, when it is enqueued into + * the rbtree or the callback function is running. + * + * Return: false if timer is not active + * true if timer is active + */ +static inline bool __qdf_hrtimer_active(__qdf_hrtimer_data_t *timer) +{ + struct hrtimer *hrtimer = &timer->u.hrtimer; + struct tasklet_hrtimer *tasklet_hrtimer = &timer->u.tasklet_hrtimer; + + if (timer->ctx == QDF_CONTEXT_HARDWARE) + return hrtimer_active(hrtimer); + else + return hrtimer_active(&tasklet_hrtimer->timer); +} + +/** + * __qdf_hrtimer_cb_get_time() - get remaining time in callback + * @timer: pointer to the hrtimer object + * + * Get remaining time in the hrtimer callback + * + * Return: time remaining as ktime object + */ +static inline ktime_t __qdf_hrtimer_cb_get_time(__qdf_hrtimer_data_t *timer) +{ + struct hrtimer *hrtimer = &timer->u.hrtimer; + struct tasklet_hrtimer *tasklet_hrtimer = &timer->u.tasklet_hrtimer; + + if (timer->ctx == QDF_CONTEXT_HARDWARE) + return hrtimer_cb_get_time(hrtimer); + else + return hrtimer_cb_get_time(&tasklet_hrtimer->timer); +} + +/** + * __qdf_hrtimer_forward() - forward the hrtimer + * @timer: pointer to the hrtimer object + * @now: current ktime + * @interval: interval to forward as ktime object + * + * Forward the timer expiry so it will expire in the future + * + * Return:the number of overruns + */ +static inline uint64_t __qdf_hrtimer_forward(__qdf_hrtimer_data_t *timer, + ktime_t now, + ktime_t interval) +{ + struct hrtimer *hrtimer = &timer->u.hrtimer; + struct tasklet_hrtimer *tasklet_hrtimer = &timer->u.tasklet_hrtimer; + + if (timer->ctx == QDF_CONTEXT_HARDWARE) + return hrtimer_forward(hrtimer, now, interval); + else + return hrtimer_forward(&tasklet_hrtimer->timer, now, interval); +} + +#endif /* _I_QDF_HRTIMER_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_idr.h b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_idr.h new file mode 100644 index 0000000000000000000000000000000000000000..57b56c031104db90637f3858cc93872ebf2f14cb --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_idr.h @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: i_qdf_idr.h (ID Allocation) + * Linux-specific definitions for QDF ID Allocation API's + */ + +#if !defined(__I_QDF_IDR_H) +#define __I_QDF_IDR_H + +#include +#include + +/** + * struct __qdf_idr_s + * @lock: qdf spinlock + * @idr: idr handler + */ +struct __qdf_idr_s { + qdf_spinlock_t lock; + struct idr idr; +}; + +typedef struct __qdf_idr_s __qdf_idr; + +#endif /* __I_QDF_IDR_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_ipa.h b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_ipa.h new file mode 100644 index 0000000000000000000000000000000000000000..09be8ff9b409dc1f5635d54f3cbd7dfbba6bb338 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_ipa.h @@ -0,0 +1,935 @@ +/* + * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _I_QDF_IPA_H +#define _I_QDF_IPA_H + +#ifdef IPA_OFFLOAD + +#include + +/** + * __qdf_ipa_wdi_meter_evt_type_t - type of event client callback is + * for AP+STA mode metering + * @IPA_GET_WDI_SAP_STATS: get IPA_stats betwen SAP and STA - + * use ipa_get_wdi_sap_stats structure + * @IPA_SET_WIFI_QUOTA: set quota limit on STA - + * use ipa_set_wifi_quota structure + */ +typedef enum ipa_wdi_meter_evt_type __qdf_ipa_wdi_meter_evt_type_t; + +typedef struct ipa_get_wdi_sap_stats __qdf_ipa_get_wdi_sap_stats_t; + +#define QDF_IPA_GET_WDI_SAP_STATS_RESET_STATS(wdi_sap_stats) \ + (((struct ipa_get_wdi_sap_stats *)(wdi_sap_stats))->reset_stats) +#define QDF_IPA_GET_WDI_SAP_STATS_STATS_VALID(wdi_sap_stats) \ + (((struct ipa_get_wdi_sap_stats *)(wdi_sap_stats))->stats_valid) +#define QDF_IPA_GET_WDI_SAP_STATS_IPV4_TX_PACKETS(wdi_sap_stats) \ + (((struct ipa_get_wdi_sap_stats *)(wdi_sap_stats))->ipv4_tx_packets) +#define QDF_IPA_GET_WDI_SAP_STATS_IPV4_TX_BYTES(wdi_sap_stats) \ + (((struct ipa_get_wdi_sap_stats *)(wdi_sap_stats))->ipv4_tx_bytes) +#define QDF_IPA_GET_WDI_SAP_STATS_IPV4_RX_PACKETS(wdi_sap_stats) \ + (((struct ipa_get_wdi_sap_stats *)(wdi_sap_stats))->ipv4_rx_packets) +#define QDF_IPA_GET_WDI_SAP_STATS_IPV4_RX_BYTES(wdi_sap_stats) \ + (((struct ipa_get_wdi_sap_stats *)(wdi_sap_stats))->ipv4_rx_bytes) +#define QDF_IPA_GET_WDI_SAP_STATS_IPV6_TX_PACKETS(wdi_sap_stats) \ + (((struct ipa_get_wdi_sap_stats *)(wdi_sap_stats))->ipv6_tx_packets) +#define QDF_IPA_GET_WDI_SAP_STATS_IPV6_TX_BYTES(wdi_sap_stats) \ + (((struct ipa_get_wdi_sap_stats *)(wdi_sap_stats))->ipv6_tx_bytes) +#define QDF_IPA_GET_WDI_SAP_STATS_IPV6_RX_PACKETS(wdi_sap_stats) \ + (((struct ipa_get_wdi_sap_stats *)(wdi_sap_stats))->ipv6_rx_packets) +#define QDF_IPA_GET_WDI_SAP_STATS_IPV6_RX_BYTES(wdi_sap_stats) \ + (((struct ipa_get_wdi_sap_stats *)(wdi_sap_stats))->ipv6_rx_bytes) + +/** + * __qdf_ipa_set_wifi_quota_t - structure used for + * IPA_SET_WIFI_QUOTA. + */ +typedef struct ipa_set_wifi_quota __qdf_ipa_set_wifi_quota_t; + +#define QDF_IPA_SET_WIFI_QUOTA_BYTES(ipa_set_quota) \ + (((struct ipa_set_wifi_quota *)(ipa_set_quota))->quota_bytes) +#define QDF_IPA_SET_WIFI_QUOTA_SET_QUOTA(ipa_set_quota) \ + (((struct ipa_set_wifi_quota *)(ipa_set_quota))->set_quota) +#define QDF_IPA_SET_WIFI_QUOTA_SET_VALID(ipa_set_quota) \ + (((struct ipa_set_wifi_quota *)(ipa_set_quota))->set_valid) + +/** + * __qdf_ipa_connect_params_t - low-level client connect input parameters. Either + * client allocates the data and desc FIFO and specifies that in data+desc OR + * specifies sizes and pipe_mem pref and IPA does the allocation. + */ +typedef struct ipa_connect_params __qdf_ipa_connect_params_t; + +/** + * __qdf_ipa_tx_meta_t - meta-data for the TX packet + */ +typedef struct ipa_tx_meta __qdf_ipa_tx_meta_t; + +/** + * __qdf_ipa_msg_free_fn_t - callback function + * + * Message callback registered by kernel client with IPA driver to + * free message payload after IPA driver processing is complete + */ +typedef void (*__qdf_ipa_msg_free_fn_t)(void *buff, u32 len, u32 type); + +/** + * __qdf_ipa_sps_params_t - SPS related output parameters resulting from + */ +typedef struct ipa_sps_params __qdf_ipa_sps_params_t; + +/** + * __qdf_ipa_tx_intf_t - interface tx properties + */ +typedef struct ipa_tx_intf __qdf_ipa_tx_intf_t; + +#define QDF_IPA_TX_INTF_PROP(tx_intf) \ + (((struct ipa_tx_intf *)(tx_intf))->prop) + +/** + * __qdf_ipa_rx_intf_t - interface rx properties + */ +typedef struct ipa_rx_intf __qdf_ipa_rx_intf_t; + +#define QDF_IPA_RX_INTF_PROP(rx_intf) \ + (((struct ipa_rx_intf *)(rx_intf))->prop) + +/** + * __qdf_ipa_ext_intf_t - interface ext properties + */ +typedef struct ipa_ext_intf __qdf_ipa_ext_intf_t; + +/** + * __qdf_ipa_sys_connect_params_t - information needed to setup an IPA end-point + * in system-BAM mode + */ +typedef struct ipa_sys_connect_params __qdf_ipa_sys_connect_params_t; + +#define QDF_IPA_SYS_PARAMS_NAT_EN(ipa_sys_params) \ + (((struct ipa_sys_connect_params *)(ipa_sys_params))->ipa_ep_cfg.nat.nat_en) +#define QDF_IPA_SYS_PARAMS_HDR_LEN(ipa_sys_params) \ + (((struct ipa_sys_connect_params *)(ipa_sys_params))->ipa_ep_cfg.hdr.hdr_len) +#define QDF_IPA_SYS_PARAMS_HDR_ADDITIONAL_CONST_LEN(ipa_sys_params) \ + (((struct ipa_sys_connect_params *)(ipa_sys_params))->ipa_ep_cfg.hdr.hdr_additional_const_len) +#define QDF_IPA_SYS_PARAMS_HDR_OFST_PKT_SIZE_VALID(ipa_sys_params) \ + (((struct ipa_sys_connect_params *)(ipa_sys_params))->ipa_ep_cfg.hdr.hdr_ofst_pkt_size_valid) +#define QDF_IPA_SYS_PARAMS_HDR_OFST_PKT_SIZE(ipa_sys_params) \ + (((struct ipa_sys_connect_params *)(ipa_sys_params))->ipa_ep_cfg.hdr.hdr_ofst_pkt_size) +#define QDF_IPA_SYS_PARAMS_HDR_LITTLE_ENDIAN(ipa_sys_params) \ + (((struct ipa_sys_connect_params *)(ipa_sys_params))->ipa_ep_cfg.hdr_ext.hdr_little_endian) +#define QDF_IPA_SYS_PARAMS_MODE(ipa_sys_params) \ + (((struct ipa_sys_connect_params *)(ipa_sys_params))->ipa_ep_cfg.mode.mode) +#define QDF_IPA_SYS_PARAMS_CLIENT(ipa_sys_params) \ + (((struct ipa_sys_connect_params *)(ipa_sys_params))->client) +#define QDF_IPA_SYS_PARAMS_DESC_FIFO_SZ(ipa_sys_params) \ + (((struct ipa_sys_connect_params *)(ipa_sys_params))->desc_fifo_sz) +#define QDF_IPA_SYS_PARAMS_PRIV(ipa_sys_params) \ + (((struct ipa_sys_connect_params *)(ipa_sys_params))->priv) +#define QDF_IPA_SYS_PARAMS_NOTIFY(ipa_sys_params) \ + (((struct ipa_sys_connect_params *)(ipa_sys_params))->notify) +#define QDF_IPA_SYS_PARAMS_SKIP_EP_CFG(ipa_sys_params) \ + (((struct ipa_sys_connect_params *)(ipa_sys_params))->skip_ep_cfg) +#define QDF_IPA_SYS_PARAMS_KEEP_IPA_AWAKE(ipa_sys_params) \ + (((struct ipa_sys_connect_params *)(ipa_sys_params))->keep_ipa_awake) + +/** + * __qdf_pa_rm_event_t - IPA RM events + * + * Indicate the resource state change + */ +typedef enum ipa_rm_event __qdf_ipa_rm_event_t; + +/** + * struct __qdf_ipa_rm_register_params_t - information needed to + * register IPA RM client with IPA RM + */ +typedef struct ipa_rm_register_params __qdf_ipa_rm_register_params_t; + +/** + * struct __qdf_ipa_rm_create_params_t - information needed to initialize + * the resource + * + * IPA RM client is expected to perform non blocking operations only + * in request_resource and release_resource functions and + * release notification context as soon as possible. + */ +typedef struct ipa_rm_create_params __qdf_ipa_rm_create_params_t; + +#define QDF_IPA_RM_CREATE_PARAMS_NAME(create_params) \ + (((struct ipa_rm_create_params *)(create_params))->name) +#define QDF_IPA_RM_CREATE_PARAMS_USER_DATA(create_params) \ + (((struct ipa_rm_create_params *)(create_params))->reg_params.user_data) +#define QDF_IPA_RM_CREATE_PARAMS_NOTIFY_CB(create_params) \ + (((struct ipa_rm_create_params *)(create_params))->reg_params.notify_cb) +#define QDF_IPA_RM_CREATE_PARAMS_REQUEST_RESOURCE(create_params) \ + (((struct ipa_rm_create_params *)(create_params))->request_resource) +#define QDF_IPA_RM_CREATE_PARAMS_RELEASE_RESOURCE(create_params) \ + (((struct ipa_rm_create_params *)(create_params))->release_resource) +#define QDF_IPA_RM_CREATE_PARAMS_FLOOR_VOLTAGE(create_params) \ + (((struct ipa_rm_create_params *)(create_params))->floor_voltage) + +/** + * __qdf_ipa_rm_perf_profile_t - information regarding IPA RM client performance + * profile + */ +typedef struct ipa_rm_perf_profile __qdf_ipa_rm_perf_profile_t; + +#define QDF_IPA_RM_PERF_PROFILE_MAX_SUPPORTED_BANDWIDTH_MBPS(profile) \ + (((struct ipa_rm_perf_profile *)(profile))->max_supported_bandwidth_mbps) + +/** + * __qdf_ipa_tx_data_desc_t - information needed + * to send data packet to HW link: link to data descriptors + * priv: client specific private data + */ +typedef struct ipa_tx_data_desc __qdf_ipa_tx_data_desc_t; + +/** + * __qdf_ipa_rx_data_t - information needed + * to send to wlan driver on receiving data from ipa hw + */ +typedef struct ipa_rx_data __qdf_ipa_rx_data_t; + +#define QDF_IPA_RX_DATA_SKB(desc) \ + (((struct ipa_rx_data *)(desc))->skb) +#define QDF_IPA_RX_DATA_SKB_LEN(desc) \ + (((struct ipa_rx_data *)(desc))->skb->len) +#define QDF_IPA_RX_DATA_DMA_ADDR(desc) \ + (((struct ipa_rx_data *)(desc))->dma_addr) + +/** + * __qdf_ipa_wdi_ul_params_t - WDI_RX configuration + */ +typedef struct ipa_wdi_ul_params __qdf_ipa_wdi_ul_params_t; + +/** + * __qdf_ipa_wdi_ul_params_smmu_t - WDI_RX configuration (with WLAN SMMU) + */ +typedef struct ipa_wdi_ul_params_smmu __qdf_ipa_wdi_ul_params_smmu_t; + +/** + * __qdf_ipa_wdi_dl_params_t - WDI_TX configuration + */ +typedef struct ipa_wdi_dl_params __qdf_ipa_wdi_dl_params_t; + +/** + * __qdf_ipa_wdi_dl_params_smmu_t - WDI_TX configuration (with WLAN SMMU) + */ +typedef struct ipa_wdi_dl_params_smmu __qdf_ipa_wdi_dl_params_smmu_t; + +/** + * __qdf_ipa_wdi_in_params_t - information provided by WDI client + */ +typedef struct ipa_wdi_in_params __qdf_ipa_wdi_in_params_t; + +#define QDF_IPA_PIPE_IN_NAT_EN(pipe_in) \ + (((struct ipa_wdi_in_params *)(pipe_in))->sys.ipa_ep_cfg.nat.nat_en) +#define QDF_IPA_PIPE_IN_HDR_LEN(pipe_in) \ + (((struct ipa_wdi_in_params *)(pipe_in))->sys.ipa_ep_cfg.hdr.hdr_len) +#define QDF_IPA_PIPE_IN_HDR_OFST_METADATA_VALID(pipe_in) \ + (((struct ipa_wdi_in_params *)(pipe_in))->sys.ipa_ep_cfg.hdr.hdr_ofst_metadata_valid) +#define QDF_IPA_PIPE_IN_HDR_METADATA_REG_VALID(pipe_in) \ + (((struct ipa_wdi_in_params *)(pipe_in))->sys.ipa_ep_cfg.hdr.hdr_metadata_reg_valid) +#define QDF_IPA_PIPE_IN_HDR_OFST_PKT_SIZE_VALID(pipe_in) \ + (((struct ipa_wdi_in_params *)(pipe_in))->sys.ipa_ep_cfg.hdr.hdr_ofst_pkt_size_valid) +#define QDF_IPA_PIPE_IN_HDR_OFST_PKT_SIZE(pipe_in) \ + (((struct ipa_wdi_in_params *)(pipe_in))->sys.ipa_ep_cfg.hdr.hdr_ofst_pkt_size) +#define QDF_IPA_PIPE_IN_HDR_ADDITIONAL_CONST_LEN(pipe_in) \ + (((struct ipa_wdi_in_params *)(pipe_in))->sys.ipa_ep_cfg.hdr.hdr_additional_const_len) +#define QDF_IPA_PIPE_IN_MODE(pipe_in) \ + (((struct ipa_wdi_in_params *)(pipe_in))->sys.ipa_ep_cfg.mode.mode) +#define QDF_IPA_PIPE_IN_CLIENT(pipe_in) \ + (((struct ipa_wdi_in_params *)(pipe_in))->sys.client) +#define QDF_IPA_PIPE_IN_DESC_FIFO_SZ(pipe_in) \ + (((struct ipa_wdi_in_params *)(pipe_in))->sys.desc_fifo_sz) +#define QDF_IPA_PIPE_IN_PRIV(pipe_in) \ + (((struct ipa_wdi_in_params *)(pipe_in))->sys.priv) +#define QDF_IPA_PIPE_IN_HDR_LITTLE_ENDIAN(pipe_in) \ + (((struct ipa_wdi_in_params *)(pipe_in))->sys.ipa_ep_cfg.hdr_ext.hdr_little_endian) +#define QDF_IPA_PIPE_IN_NOTIFY(pipe_in) \ + (((struct ipa_wdi_in_params *)(pipe_in))->sys.notify) +#define QDF_IPA_PIPE_IN_KEEP_IPA_AWAKE(pipe_in) \ + (((struct ipa_wdi_in_params *)(pipe_in))->sys.keep_ipa_awake) +#define QDF_IPA_PIPE_IN_KEEP_IPA_AWAKE(pipe_in) \ + (((struct ipa_wdi_in_params *)(pipe_in))->sys.keep_ipa_awake) +#ifdef FEATURE_METERING +#define QDF_IPA_PIPE_IN_WDI_NOTIFY(pipe_in) \ + (((struct ipa_wdi_in_params *)(pipe_in))->wdi_notify) +#endif + +#ifdef ENABLE_SMMU_S1_TRANSLATION +#define QDF_IPA_PIPE_IN_SMMU_ENABLED(pipe_in) \ + (((struct ipa_wdi_in_params *)(pipe_in))->smmu_enabled) + +#define QDF_IPA_PIPE_IN_DL_SMMU_COMP_RING(pipe_in) \ + (((struct ipa_wdi_in_params *)(pipe_in))->u.dl_smmu.comp_ring) +#define QDF_IPA_PIPE_IN_DL_SMMU_CE_RING(pipe_in) \ + (((struct ipa_wdi_in_params *)(pipe_in))->u.dl_smmu.ce_ring) +#define QDF_IPA_PIPE_IN_DL_SMMU_COMP_RING_SIZE(pipe_in) \ + (((struct ipa_wdi_in_params *)(pipe_in))->u.dl_smmu.comp_ring_size) +#define QDF_IPA_PIPE_IN_DL_SMMU_CE_RING_SIZE(pipe_in) \ + (((struct ipa_wdi_in_params *)(pipe_in))->u.dl_smmu.ce_ring_size) +#define QDF_IPA_PIPE_IN_DL_SMMU_CE_DOOR_BELL_PA(pipe_in) \ + (((struct ipa_wdi_in_params *)(pipe_in))->u.dl_smmu.ce_door_bell_pa) +#define QDF_IPA_PIPE_IN_DL_SMMU_NUM_TX_BUFFERS(pipe_in) \ + (((struct ipa_wdi_in_params *)(pipe_in))->u.dl_smmu.num_tx_buffers) + +#define QDF_IPA_PIPE_IN_UL_SMMU_RDY_RING(pipe_in) \ + (((struct ipa_wdi_in_params *)(pipe_in))->u.ul_smmu.rdy_ring) +#define QDF_IPA_PIPE_IN_UL_SMMU_RDY_RING_SIZE(pipe_in) \ + (((struct ipa_wdi_in_params *)(pipe_in))->u.ul_smmu.rdy_ring_size) +#define QDF_IPA_PIPE_IN_UL_SMMU_RDY_RING_RP_PA(pipe_in) \ + (((struct ipa_wdi_in_params *)(pipe_in))->u.ul_smmu.rdy_ring_rp_pa) +#define QDF_IPA_PIPE_IN_UL_SMMU_RDY_RING_RP_VA(pipe_in) \ + (((struct ipa_wdi_in_params *)(pipe_in))->u.ul_smmu.rdy_ring_rp_va) +#define QDF_IPA_PIPE_IN_UL_SMMU_RDY_COMP_RING(pipe_in) \ + (((struct ipa_wdi_in_params *)(pipe_in))->u.ul_smmu.rdy_comp_ring) +#define QDF_IPA_PIPE_IN_UL_SMMU_RDY_COMP_RING_SIZE(pipe_in) \ + (((struct ipa_wdi_in_params *)(pipe_in))->u.ul_smmu.rdy_comp_ring_size) +#define QDF_IPA_PIPE_IN_UL_SMMU_RDY_COMP_RING_WP_PA(pipe_in) \ + (((struct ipa_wdi_in_params *)(pipe_in))->u.ul_smmu.rdy_comp_ring_wp_pa) +#define QDF_IPA_PIPE_IN_UL_SMMU_RDY_COMP_RING_WP_VA(pipe_in) \ + (((struct ipa_wdi_in_params *)(pipe_in))->u.ul_smmu.rdy_comp_ring_wp_va) +#endif + +#define QDF_IPA_PIPE_IN_DL_COMP_RING_BASE_PA(pipe_in) \ + (((struct ipa_wdi_in_params *)(pipe_in))->u.dl.comp_ring_base_pa) +#define QDF_IPA_PIPE_IN_DL_COMP_RING_SIZE(pipe_in) \ + (((struct ipa_wdi_in_params *)(pipe_in))->u.dl.comp_ring_size) +#define QDF_IPA_PIPE_IN_DL_CE_RING_BASE_PA(pipe_in) \ + (((struct ipa_wdi_in_params *)(pipe_in))->u.dl.ce_ring_base_pa) +#define QDF_IPA_PIPE_IN_DL_CE_RING_SIZE(pipe_in) \ + (((struct ipa_wdi_in_params *)(pipe_in))->u.dl.ce_ring_size) +#define QDF_IPA_PIPE_IN_DL_CE_DOOR_BELL_PA(pipe_in) \ + (((struct ipa_wdi_in_params *)(pipe_in))->u.dl.ce_door_bell_pa) +#define QDF_IPA_PIPE_IN_DL_NUM_TX_BUFFERS(pipe_in) \ + (((struct ipa_wdi_in_params *)(pipe_in))->u.dl.num_tx_buffers) + +#define QDF_IPA_PIPE_IN_UL_RDY_RING_BASE_PA(pipe_in) \ + (((struct ipa_wdi_in_params *)(pipe_in))->u.ul.rdy_ring_base_pa) +#define QDF_IPA_PIPE_IN_UL_RDY_RING_SIZE(pipe_in) \ + (((struct ipa_wdi_in_params *)(pipe_in))->u.ul.rdy_ring_size) +#define QDF_IPA_PIPE_IN_UL_RDY_RING_RP_PA(pipe_in) \ + (((struct ipa_wdi_in_params *)(pipe_in))->u.ul.rdy_ring_rp_pa) +#define QDF_IPA_PIPE_IN_UL_RDY_RING_RP_VA(pipe_in) \ + (((struct ipa_wdi_in_params *)(pipe_in))->u.ul.rdy_ring_rp_va) +#define QDF_IPA_PIPE_IN_UL_RDY_COMP_RING(pipe_in) \ + (((struct ipa_wdi_in_params *)(pipe_in))->u.ul.rdy_comp_ring_base_pa) +#define QDF_IPA_PIPE_IN_UL_RDY_COMP_RING_SIZE(pipe_in) \ + (((struct ipa_wdi_in_params *)(pipe_in))->u.ul.rdy_comp_ring_size) +#define QDF_IPA_PIPE_IN_UL_RDY_COMP_RING_WP_PA(pipe_in) \ + (((struct ipa_wdi_in_params *)(pipe_in))->u.ul.rdy_comp_ring_wp_pa) +#define QDF_IPA_PIPE_IN_UL_RDY_COMP_RING_WP_VA(pipe_in) \ + (((struct ipa_wdi_in_params *)(pipe_in))->u.ul.rdy_comp_ring_wp_va) + +/** + * __qdf_ipa_wdi_out_params_t - information provided to WDI client + */ +typedef struct ipa_wdi_out_params __qdf_ipa_wdi_out_params_t; + +#define QDF_IPA_PIPE_OUT_UC_DOOR_BELL_PA(pipe_out) \ + (((struct ipa_wdi_out_params *)(pipe_out))->uc_door_bell_pa) +#define QDF_IPA_PIPE_OUT_CLNT_HDL(pipe_out) \ + (((struct ipa_wdi_out_params *)(pipe_out))->clnt_hdl) + +/** + * __qdf_ipa_wdi_db_params_t - information provided to retrieve + * physical address of uC doorbell + */ +typedef struct ipa_wdi_db_params __qdf_ipa_wdi_db_params_t; + +/** + * __qdf_ipa_wdi_uc_ready_params_t - uC ready CB parameters + */ +typedef void (*__qdf_ipa_uc_ready_cb)(void *priv); +typedef struct ipa_wdi_uc_ready_params __qdf_ipa_wdi_uc_ready_params_t; + +#define QDF_IPA_UC_READY_PARAMS_IS_UC_READY(uc_ready_param) \ + (((struct ipa_wdi_uc_ready_params *)(uc_ready_param))->is_uC_ready) +#define QDF_IPA_UC_READY_PARAMS_PRIV(uc_ready_param) \ + (((struct ipa_wdi_uc_ready_params *)(uc_ready_param))->priv) +#define QDF_IPA_UC_READY_PARAMS_NOTIFY(uc_ready_param) \ + (((struct ipa_wdi_uc_ready_params *)(uc_ready_param))->notify) + +/** + * __qdf_ipa_wdi_buffer_info_t - address info of a WLAN allocated buffer + * + * IPA driver will create/release IOMMU mapping in IPA SMMU from iova->pa + */ +typedef struct ipa_wdi_buffer_info __qdf_ipa_wdi_buffer_info_t; + +/** + * __qdf_ipa_gsi_ep_config_t - IPA GSI endpoint configurations + */ +typedef struct ipa_gsi_ep_config __qdf_ipa_gsi_ep_config_t; + +/** + * __qdf_ipa_dp_evt_type_t - type of event client callback is + * invoked for on data path + * @IPA_RECEIVE: data is struct sk_buff + * @IPA_WRITE_DONE: data is struct sk_buff + */ +typedef enum ipa_dp_evt_type __qdf_ipa_dp_evt_type_t; + +typedef struct ipa_hdr_add __qdf_ipa_hdr_add_t; +typedef struct ipa_hdr_del __qdf_ipa_hdr_del_t; +typedef struct ipa_ioc_add_hdr __qdf_ipa_ioc_add_hdr_t; + +#define QDF_IPA_IOC_ADD_HDR_COMMIT(ipa_hdr) \ + (((struct ipa_ioc_add_hdr *)(ipa_hdr))->commit) +#define QDF_IPA_IOC_ADD_HDR_NUM_HDRS(ipa_hdr) \ + (((struct ipa_ioc_add_hdr *)(ipa_hdr))->num_hdrs) +#define QDF_IPA_IOC_ADD_HDR_NAME(ipa_hdr) \ + (((struct ipa_ioc_add_hdr *)(ipa_hdr))->hdr[0].name) +#define QDF_IPA_IOC_ADD_HDR_HDR(ipa_hdr) \ + (((struct ipa_ioc_add_hdr *)(ipa_hdr))->hdr[0].hdr) +#define QDF_IPA_IOC_ADD_HDR_HDR_LEN(ipa_hdr) \ + (((struct ipa_ioc_add_hdr *)(ipa_hdr))->hdr[0].hdr_len) +#define QDF_IPA_IOC_ADD_HDR_TYPE(ipa_hdr) \ + (((struct ipa_ioc_add_hdr *)(ipa_hdr))->hdr[0].type) +#define QDF_IPA_IOC_ADD_HDR_IS_PARTIAL(ipa_hdr) \ + (((struct ipa_ioc_add_hdr *)(ipa_hdr))->hdr[0].is_partial) +#define QDF_IPA_IOC_ADD_HDR_HDR_HDL(ipa_hdr) \ + (((struct ipa_ioc_add_hdr *)(ipa_hdr))->hdr[0].hdr_hdl) +#define QDF_IPA_IOC_ADD_HDR_STATUS(ipa_hdr) \ + (((struct ipa_ioc_add_hdr *)(ipa_hdr))->hdr[0].status) +#define QDF_IPA_IOC_ADD_HDR_IS_ETH2_OFST_VALID(ipa_hdr) \ + (((struct ipa_ioc_add_hdr *)(ipa_hdr))->hdr[0].is_eth2_ofst_valid) +#define QDF_IPA_IOC_ADD_HDR_ETH2_OFST(ipa_hdr) \ + (((struct ipa_ioc_add_hdr *)(ipa_hdr))->hdr[0].eth2_ofst) + +typedef struct ipa_ioc_del_hdr __qdf_ipa_ioc_del_hdr_t; + +#define QDF_IPA_IOC_DEL_HDR_COMMIT(ipa_hdr) \ + (((struct ipa_ioc_del_hdr *)(ipa_hdr))->commit) +#define QDF_IPA_IOC_DEL_HDR_NUM_HDRS(ipa_hdr) \ + (((struct ipa_ioc_del_hdr *)(ipa_hdr))->num_hdls) +#define QDF_IPA_IOC_DEL_HDR_HDL(ipa_hdr) \ + (((struct ipa_ioc_del_hdr *)(ipa_hdr))->hdl[0].hdl) +#define QDF_IPA_IOC_DEL_HDR_STATUS(ipa_hdr) \ + (((struct ipa_ioc_del_hdr *)(ipa_hdr))->hdl[0].status) + +typedef struct ipa_ioc_get_hdr __qdf_ipa_ioc_get_hdr_t; + +#define QDF_IPA_IOC_GET_HDR_NAME(ipa_hdr) \ + (((struct ipa_ioc_get_hdr *)(ipa_hdr))->name) +#define QDF_IPA_IOC_GET_HDR_HDL(ipa_hdr) \ + (((struct ipa_ioc_get_hdr *)(ipa_hdr))->hdl) + +typedef struct ipa_ioc_copy_hdr __qdf_ipa_ioc_copy_hdr_t; +typedef struct ipa_ioc_add_hdr_proc_ctx __qdf_ipa_ioc_add_hdr_proc_ctx_t; +typedef struct ipa_ioc_del_hdr_proc_ctx __qdf_ipa_ioc_del_hdr_proc_ctx_t; +typedef struct ipa_msg_meta __qdf_ipa_msg_meta_t; + +#define QDF_IPA_MSG_META_MSG_TYPE(meta) \ + (((struct ipa_msg_meta *)(meta))->msg_type) +#define QDF_IPA_MSG_META_MSG_LEN(meta) \ + (((struct ipa_msg_meta *)(meta))->msg_len) + +typedef enum ipa_client_type __qdf_ipa_client_type_t; +typedef struct IpaHwStatsWDIInfoData_t __qdf_ipa_hw_stats_wdi_info_data_t; +typedef enum ipa_rm_resource_name __qdf_ipa_rm_resource_name_t; +typedef enum ipa_wlan_event __qdf_ipa_wlan_event_t; +typedef struct ipa_wlan_msg __qdf_ipa_wlan_msg_t; + +#define QDF_IPA_WLAN_MSG_NAME(ipa_msg) \ + (((struct ipa_wlan_msg *)(ipa_msg))->name) +#define QDF_IPA_WLAN_MSG_MAC_ADDR(ipa_msg) \ + (((struct ipa_wlan_msg *)(ipa_msg))->mac_addr) + +typedef struct ipa_wlan_msg_ex __qdf_ipa_wlan_msg_ex_t; + +#define QDF_IPA_WLAN_MSG_EX_NAME(ipa_msg) \ + (((struct ipa_wlan_msg_ex *)(ipa_msg))->name) +#define QDF_IPA_WLAN_MSG_EX_EXNUM_OF_ATTRIBS(ipa_msg) \ + (((struct ipa_wlan_msg_ex *)(ipa_msg))->num_of_attribs) +#define QDF_IPA_WLAN_MSG_EX_ATTRIB_TYPE(ipa_msg) \ + (((struct ipa_wlan_msg_ex *)(ipa_msg))->attribs.attrib_type) +#define QDF_IPA_WLAN_MSG_EX_OFFSET(ipa_msg) \ + (((struct ipa_wlan_msg_ex *)(ipa_msg))->attribs.offset) +#define QDF_IPA_WLAN_MSG_EX_MAC_ADDR(ipa_msg) \ + (((struct ipa_wlan_msg_ex *)(ipa_msg))->attribs.u.mac_addr) + +typedef struct ipa_ioc_tx_intf_prop __qdf_ipa_ioc_tx_intf_prop_t; + +#define QDF_IPA_IOC_TX_INTF_PROP_IP(tx_prop) \ + (((struct ipa_ioc_tx_intf_prop *)(tx_prop))->ip) +#define QDF_IPA_IOC_TX_INTF_PROP_ATTRIB_MASK(tx_prop) \ + (((struct ipa_ioc_tx_intf_prop *)(tx_prop))->attrib.attrib_mask) +#define QDF_IPA_IOC_TX_INTF_PROP_META_DATA(rx_prop) \ + (((struct ipa_ioc_tx_intf_prop *)(tx_prop))->attrib.meta_data) +#define QDF_IPA_IOC_TX_INTF_PROP_META_DATA_MASK(rx_prop) \ + (((struct ipa_ioc_tx_intf_prop *)(tx_prop))->attrib.meta_data_mask) +#define QDF_IPA_IOC_TX_INTF_PROP_DST_PIPE(tx_prop) \ + (((struct ipa_ioc_tx_intf_prop *)(tx_prop))->dst_pipe) +#define QDF_IPA_IOC_TX_INTF_PROP_ALT_DST_PIPE(tx_prop) \ + (((struct ipa_ioc_tx_intf_prop *)(tx_prop))->alt_dst_pipe) +#define QDF_IPA_IOC_TX_INTF_PROP_HDR_NAME(tx_prop) \ + (((struct ipa_ioc_tx_intf_prop *)(tx_prop))->hdr_name) +#define QDF_IPA_IOC_TX_INTF_PROP_HDR_L2_TYPE(tx_prop) \ + (((struct ipa_ioc_tx_intf_prop *)(tx_prop))->hdr_l2_type) + +typedef struct ipa_ioc_rx_intf_prop __qdf_ipa_ioc_rx_intf_prop_t; + +#define QDF_IPA_IOC_RX_INTF_PROP_IP(rx_prop) \ + (((struct ipa_ioc_rx_intf_prop *)(rx_prop))->ip) +#define QDF_IPA_IOC_RX_INTF_PROP_ATTRIB_MASK(rx_prop) \ + (((struct ipa_ioc_rx_intf_prop *)(rx_prop))->attrib.attrib_mask) +#define QDF_IPA_IOC_RX_INTF_PROP_META_DATA(rx_prop) \ + (((struct ipa_ioc_rx_intf_prop *)(rx_prop))->attrib.meta_data) +#define QDF_IPA_IOC_RX_INTF_PROP_META_DATA_MASK(rx_prop) \ + (((struct ipa_ioc_rx_intf_prop *)(rx_prop))->attrib.meta_data_mask) +#define QDF_IPA_IOC_RX_INTF_PROP_SRC_PIPE(rx_prop) \ + (((struct ipa_ioc_rx_intf_prop *)(rx_prop))->src_pipe) +#define QDF_IPA_IOC_RX_INTF_PROP_HDR_L2_TYPE(rx_prop) \ + (((struct ipa_ioc_rx_intf_prop *)(rx_prop))->hdr_l2_type) + +typedef struct ipa_wlan_hdr_attrib_val __qdf_ipa_wlan_hdr_attrib_val_t; + +#define __QDF_IPA_SET_META_MSG_TYPE(meta, msg_type) \ + __qdf_ipa_set_meta_msg_type(meta, msg_type) + +#define __QDF_IPA_RM_RESOURCE_GRANTED IPA_RM_RESOURCE_GRANTED +#define __QDF_IPA_RM_RESOURCE_RELEASED IPA_RM_RESOURCE_RELEASED + +#define __QDF_IPA_RM_RESOURCE_WLAN_PROD IPA_RM_RESOURCE_WLAN_PROD +#define __QDF_IPA_RM_RESOURCE_WLAN_CONS IPA_RM_RESOURCE_WLAN_CONS +#define __QDF_IPA_RM_RESOURCE_APPS_CONS IPA_RM_RESOURCE_APPS_CONS + +#define __QDF_IPA_VOLTAGE_LEVEL IPA_VOLTAGE_SVS + +#define __QDF_IPA_CLIENT_WLAN1_PROD IPA_CLIENT_WLAN1_PROD +#define __QDF_IPA_CLIENT_WLAN1_CONS IPA_CLIENT_WLAN1_CONS +#define __QDF_IPA_CLIENT_WLAN2_CONS IPA_CLIENT_WLAN2_CONS +#define __QDF_IPA_CLIENT_WLAN3_CONS IPA_CLIENT_WLAN3_CONS +#define __QDF_IPA_CLIENT_WLAN4_CONS IPA_CLIENT_WLAN4_CONS + +/* + * Resume / Suspend + */ +static inline int __qdf_ipa_reset_endpoint(u32 clnt_hdl) +{ + return ipa_reset_endpoint(clnt_hdl); +} + +/* + * Remove ep delay + */ +static inline int __qdf_ipa_clear_endpoint_delay(u32 clnt_hdl) +{ + return ipa_clear_endpoint_delay(clnt_hdl); +} + +/* + * Header removal / addition + */ +static inline int __qdf_ipa_add_hdr(struct ipa_ioc_add_hdr *hdrs) +{ + return ipa_add_hdr(hdrs); +} + +static inline int __qdf_ipa_del_hdr(struct ipa_ioc_del_hdr *hdls) +{ + return ipa_del_hdr(hdls); +} + +static inline int __qdf_ipa_commit_hdr(void) +{ + return ipa_commit_hdr(); +} + +static inline int __qdf_ipa_get_hdr(struct ipa_ioc_get_hdr *lookup) +{ + return ipa_get_hdr(lookup); +} + +static inline int __qdf_ipa_put_hdr(u32 hdr_hdl) +{ + return ipa_put_hdr(hdr_hdl); +} + +static inline int __qdf_ipa_copy_hdr(struct ipa_ioc_copy_hdr *copy) +{ + return ipa_copy_hdr(copy); +} + +/* + * Messaging + */ +static inline int __qdf_ipa_send_msg(struct ipa_msg_meta *meta, void *buff, + ipa_msg_free_fn callback) +{ + return ipa_send_msg(meta, buff, callback); +} + +static inline int __qdf_ipa_register_pull_msg(struct ipa_msg_meta *meta, + ipa_msg_pull_fn callback) +{ + return ipa_register_pull_msg(meta, callback); +} + +static inline int __qdf_ipa_deregister_pull_msg(struct ipa_msg_meta *meta) +{ + return ipa_deregister_pull_msg(meta); +} + +/* + * Interface + */ +static inline int __qdf_ipa_register_intf(const char *name, + const struct ipa_tx_intf *tx, + const struct ipa_rx_intf *rx) +{ + return ipa_register_intf(name, tx, rx); +} + +static inline int __qdf_ipa_register_intf_ext(const char *name, + const struct ipa_tx_intf *tx, + const struct ipa_rx_intf *rx, + const struct ipa_ext_intf *ext) +{ + return ipa_register_intf_ext(name, tx, rx, ext); +} + +static inline int __qdf_ipa_deregister_intf(const char *name) +{ + return ipa_deregister_intf(name); +} + +/* + * Data path + */ +static inline int __qdf_ipa_tx_dp(enum ipa_client_type dst, struct sk_buff *skb, + struct ipa_tx_meta *metadata) +{ + return ipa_tx_dp(dst, skb, metadata); +} + +/* + * To transfer multiple data packets + */ +static inline int __qdf_ipa_tx_dp_mul( + enum ipa_client_type dst, + struct ipa_tx_data_desc *data_desc) +{ + return ipa_tx_dp_mul(dst, data_desc); +} + +static inline void __qdf_ipa_free_skb(struct ipa_rx_data *rx_in) +{ + return ipa_free_skb(rx_in);; +} + +/* + * System pipes + */ +static inline u16 __qdf_ipa_get_smem_restr_bytes(void) +{ + return ipa_get_smem_restr_bytes(); +} + +static inline int __qdf_ipa_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, + u32 *clnt_hdl) +{ + return ipa_setup_sys_pipe(sys_in, clnt_hdl); +} + +static inline int __qdf_ipa_teardown_sys_pipe(u32 clnt_hdl) +{ + return ipa_teardown_sys_pipe(clnt_hdl); +} + +static inline int __qdf_ipa_connect_wdi_pipe(struct ipa_wdi_in_params *in, + struct ipa_wdi_out_params *out) +{ + return ipa_connect_wdi_pipe(in, out); +} + +static inline int __qdf_ipa_disconnect_wdi_pipe(u32 clnt_hdl) +{ + return ipa_disconnect_wdi_pipe(clnt_hdl); +} + +static inline int __qdf_ipa_enable_wdi_pipe(u32 clnt_hdl) +{ + return ipa_enable_wdi_pipe(clnt_hdl); +} + +static inline int __qdf_ipa_disable_wdi_pipe(u32 clnt_hdl) +{ + return ipa_disable_wdi_pipe(clnt_hdl); +} + +static inline int __qdf_ipa_resume_wdi_pipe(u32 clnt_hdl) +{ + return ipa_resume_wdi_pipe(clnt_hdl); +} + +static inline int __qdf_ipa_suspend_wdi_pipe(u32 clnt_hdl) +{ + return ipa_suspend_wdi_pipe(clnt_hdl); +} + +static inline int __qdf_ipa_uc_wdi_get_dbpa( + struct ipa_wdi_db_params *out) +{ + return ipa_uc_wdi_get_dbpa(out); +} + +static inline int __qdf_ipa_uc_reg_rdyCB( + struct ipa_wdi_uc_ready_params *param) +{ + return ipa_uc_reg_rdyCB(param); +} + +static inline int __qdf_ipa_uc_dereg_rdyCB(void) +{ + return ipa_uc_dereg_rdyCB(); +} + + +/* + * Resource manager + */ +static inline int __qdf_ipa_rm_create_resource( + struct ipa_rm_create_params *create_params) +{ + return ipa_rm_create_resource(create_params); +} + +static inline int __qdf_ipa_rm_delete_resource( + enum ipa_rm_resource_name resource_name) +{ + return ipa_rm_delete_resource(resource_name); +} + +static inline int __qdf_ipa_rm_register(enum ipa_rm_resource_name resource_name, + struct ipa_rm_register_params *reg_params) +{ + return ipa_rm_register(resource_name, reg_params); +} + +static inline int __qdf_ipa_rm_set_perf_profile( + enum ipa_rm_resource_name resource_name, + struct ipa_rm_perf_profile *profile) +{ + return ipa_rm_set_perf_profile(resource_name, profile); +} + +static inline int __qdf_ipa_rm_deregister(enum ipa_rm_resource_name resource_name, + struct ipa_rm_register_params *reg_params) +{ + return ipa_rm_deregister(resource_name, reg_params); +} + +static inline int __qdf_ipa_rm_add_dependency( + enum ipa_rm_resource_name resource_name, + enum ipa_rm_resource_name depends_on_name) +{ + return ipa_rm_add_dependency(resource_name, depends_on_name); +} + +static inline int __qdf_ipa_rm_add_dependency_sync( + enum ipa_rm_resource_name resource_name, + enum ipa_rm_resource_name depends_on_name) +{ + return ipa_rm_add_dependency_sync(resource_name, depends_on_name); +} + +static inline int __qdf_ipa_rm_delete_dependency( + enum ipa_rm_resource_name resource_name, + enum ipa_rm_resource_name depends_on_name) +{ + return ipa_rm_delete_dependency(resource_name, depends_on_name); +} + +static inline int __qdf_ipa_rm_request_resource( + enum ipa_rm_resource_name resource_name) +{ + return ipa_rm_request_resource(resource_name); +} + +static inline int __qdf_ipa_rm_release_resource( + enum ipa_rm_resource_name resource_name) +{ + return ipa_rm_release_resource(resource_name); +} + +static inline int __qdf_ipa_rm_notify_completion(enum ipa_rm_event event, + enum ipa_rm_resource_name resource_name) +{ + return ipa_rm_notify_completion(event, resource_name); +} + +static inline int __qdf_ipa_rm_inactivity_timer_init( + enum ipa_rm_resource_name resource_name, + unsigned long msecs) +{ + return ipa_rm_inactivity_timer_init(resource_name, msecs); +} + +static inline int __qdf_ipa_rm_inactivity_timer_destroy( + enum ipa_rm_resource_name resource_name) +{ + return ipa_rm_inactivity_timer_destroy(resource_name); +} + +static inline int __qdf_ipa_rm_inactivity_timer_request_resource( + enum ipa_rm_resource_name resource_name) +{ + return ipa_rm_inactivity_timer_request_resource(resource_name); +} + +static inline int __qdf_ipa_rm_inactivity_timer_release_resource( + enum ipa_rm_resource_name resource_name) +{ + return ipa_rm_inactivity_timer_release_resource(resource_name); +} + +/* + * Miscellaneous + */ +static inline void __qdf_ipa_bam_reg_dump(void) +{ + return ipa_bam_reg_dump(); +} + +static inline int __qdf_ipa_get_wdi_stats(struct IpaHwStatsWDIInfoData_t *stats) +{ + return ipa_get_wdi_stats(stats); +} + +static inline int __qdf_ipa_get_ep_mapping(enum ipa_client_type client) +{ + return ipa_get_ep_mapping(client); +} + +static inline bool __qdf_ipa_is_ready(void) +{ + return ipa_is_ready(); +} + +static inline void __qdf_ipa_proxy_clk_vote(void) +{ + return ipa_proxy_clk_vote(); +} + +static inline void __qdf_ipa_proxy_clk_unvote(void) +{ + return ipa_proxy_clk_unvote(); +} + +static inline bool __qdf_ipa_is_client_handle_valid(u32 clnt_hdl) +{ + return ipa_is_client_handle_valid(clnt_hdl); +} + +static inline enum ipa_client_type __qdf_ipa_get_client_mapping(int pipe_idx) +{ + return ipa_get_client_mapping(pipe_idx); +} + +static inline enum ipa_rm_resource_name __qdf_ipa_get_rm_resource_from_ep( + int pipe_idx) +{ + return ipa_get_rm_resource_from_ep(pipe_idx); +} + +static inline bool __qdf_ipa_get_modem_cfg_emb_pipe_flt(void) +{ + return ipa_get_modem_cfg_emb_pipe_flt(); +} + +static inline enum ipa_transport_type __qdf_ipa_get_transport_type(void) +{ + return ipa_get_transport_type(); +} + +static inline struct device *__qdf_ipa_get_dma_dev(void) +{ + return ipa_get_dma_dev(); +} + +static inline struct iommu_domain *__qdf_ipa_get_smmu_domain(void) +{ + return ipa_get_smmu_domain(); +} + +static inline int __qdf_ipa_create_wdi_mapping(u32 num_buffers, + __qdf_ipa_wdi_buffer_info_t *info) +{ + return ipa_create_wdi_mapping(num_buffers, info); +} + +static inline int __qdf_ipa_release_wdi_mapping(u32 num_buffers, + struct ipa_wdi_buffer_info *info) +{ + return ipa_release_wdi_mapping(num_buffers, info); +} + +static inline int __qdf_ipa_disable_apps_wan_cons_deaggr(uint32_t agg_size, uint32_t agg_count) +{ + return ipa_disable_apps_wan_cons_deaggr(agg_size, agg_count); +} + +static inline const struct ipa_gsi_ep_config *__qdf_ipa_get_gsi_ep_info(enum ipa_client_type client) +{ + return ipa_get_gsi_ep_info(client); +} + +static inline int __qdf_ipa_stop_gsi_channel(u32 clnt_hdl) +{ + return ipa_stop_gsi_channel(clnt_hdl); +} + +static inline int __qdf_ipa_register_ipa_ready_cb( + void (*ipa_ready_cb)(void *user_data), + void *user_data) +{ + return ipa_register_ipa_ready_cb(ipa_ready_cb, user_data); +} + +#ifdef FEATURE_METERING +static inline int __qdf_ipa_broadcast_wdi_quota_reach_ind(uint32_t index, + uint64_t quota_bytes) +{ + return ipa_broadcast_wdi_quota_reach_ind(index, quota_bytes); +} +#endif + +void __qdf_ipa_set_meta_msg_type(__qdf_ipa_msg_meta_t *meta, int type); + +#ifdef ENABLE_SMMU_S1_TRANSLATION +/** + * __qdf_get_ipa_smmu_enabled() - to get IPA SMMU enable status + * + * Return: true when IPA SMMU enabled, otherwise false + */ +static bool __qdf_get_ipa_smmu_enabled(void) +{ + struct ipa_smmu_in_params params_in; + struct ipa_smmu_out_params params_out; + + params_in.smmu_client = IPA_SMMU_WLAN_CLIENT; + ipa_get_smmu_params(¶ms_in, ¶ms_out); + + return params_out.smmu_enable; +} +#endif + +#endif /* IPA_OFFLOAD */ +#endif /* _I_QDF_IPA_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_ipa_wdi3.h b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_ipa_wdi3.h new file mode 100644 index 0000000000000000000000000000000000000000..b2263d69b1c9758fca1bcac3f597cc2090a3cb4e --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_ipa_wdi3.h @@ -0,0 +1,588 @@ +/* + * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: i_qdf_ipa_wdi3.h + * This file provides OS dependent IPA WDI APIs. + */ + +#ifndef I_QDF_IPA_WDI_H +#define I_QDF_IPA_WDI_H + +#ifdef IPA_OFFLOAD + +#include /* QDF_STATUS */ +#include + +#ifdef CONFIG_IPA_WDI_UNIFIED_API + +/** + * __qdf_ipa_wdi_version_t - IPA WDI version + */ +typedef enum ipa_wdi_version __qdf_ipa_wdi_version_t; + +/** + * __qdf_ipa_wdi_init_in_params_t - wdi init input parameters + */ +typedef struct ipa_wdi_init_in_params __qdf_ipa_wdi_init_in_params_t; + +#define QDF_IPA_WDI_INIT_IN_PARAMS_WDI_VERSION(in_params) \ + (((struct ipa_wdi_init_in_params *)(in_params))->wdi_version) +#define QDF_IPA_WDI_INIT_IN_PARAMS_NOTIFY(in_params) \ + (((struct ipa_wdi_init_in_params *)(in_params))->notify) +#define QDF_IPA_WDI_INIT_IN_PARAMS_PRIV(in_params) \ + (((struct ipa_wdi_init_in_params *)(in_params))->priv) +#define QDF_IPA_WDI_INIT_IN_PARAMS_WDI_NOTIFY(in_params) \ + (((struct ipa_wdi_init_in_params *)(in_params))->wdi_notify) + +/** + * __qdf_ipa_wdi_init_out_params_t - wdi init output parameters + */ +typedef struct ipa_wdi_init_out_params __qdf_ipa_wdi_init_out_params_t; + +#define QDF_IPA_WDI_INIT_OUT_PARAMS_IS_UC_READY(out_params) \ + (((struct ipa_wdi_init_out_params *)(out_params))->is_uC_ready) +#define QDF_IPA_WDI_INIT_OUT_PARAMS_IS_SMMU_ENABLED(out_params) \ + (((struct ipa_wdi_init_out_params *)(out_params))->is_smmu_enabled) + +/** + * __qdf_ipa_wdi_hdr_info_t - Header to install on IPA HW + */ +typedef struct ipa_wdi_hdr_info __qdf_ipa_wdi_hdr_info_t; + +#define QDF_IPA_WDI_HDR_INFO_HDR(hdr_info) \ + (((struct ipa_wdi_hdr_info *)(hdr_info))->hdr) +#define QDF_IPA_WDI_HDR_INFO_HDR_LEN(hdr_info) \ + (((struct ipa_wdi_hdr_info *)(hdr_info))->hdr_len) +#define QDF_IPA_WDI_HDR_INFO_DST_MAC_ADDR_OFFSET(hdr_info) \ + (((struct ipa_wdi_hdr_info *)(hdr_info))->dst_mac_addr_offset) +#define QDF_IPA_WDI_HDR_INFO_HDR_TYPE(hdr_info) \ + (((struct ipa_wdi_hdr_info *)(hdr_info))->hdr_type) + +/** + * __qdf_ipa_wdi_reg_intf_in_params_t - parameters for uC offload + * interface registration + */ +typedef struct ipa_wdi_reg_intf_in_params __qdf_ipa_wdi_reg_intf_in_params_t; + +#define QDF_IPA_WDI_REG_INTF_IN_PARAMS_NETDEV_NAME(in) \ + (((struct ipa_wdi_reg_intf_in_params *)(in))->netdev_name) +#define QDF_IPA_WDI_REG_INTF_IN_PARAMS_HDR_INFO(in) \ + (((struct ipa_wdi_reg_intf_in_params *)(in))->hdr_info) +#define QDF_IPA_WDI_REG_INTF_IN_PARAMS_ALT_DST_PIPE(in) \ + (((struct ipa_wdi_reg_intf_in_params *)(in))->alt_dst_pipe) +#define QDF_IPA_WDI_REG_INTF_IN_PARAMS_IS_META_DATA_VALID(in) \ + (((struct ipa_wdi_reg_intf_in_params *)(in))->is_meta_data_valid) +#define QDF_IPA_WDI_REG_INTF_IN_PARAMS_META_DATA(in) \ + (((struct ipa_wdi_reg_intf_in_params *)(in))->meta_data) +#define QDF_IPA_WDI_REG_INTF_IN_PARAMS_META_DATA_MASK(in) \ + (((struct ipa_wdi_reg_intf_in_params *)(in))->meta_data_mask) + +typedef struct ipa_ep_cfg __qdf_ipa_ep_cfg_t; + +#define QDF_IPA_EP_CFG_NAT_EN(cfg) \ + (((struct ipa_ep_cfg *)(cfg))->nat.nat_en) +#define QDF_IPA_EP_CFG_HDR_LEN(cfg) \ + (((struct ipa_ep_cfg *)(cfg))->hdr.hdr_len) +#define QDF_IPA_EP_CFG_HDR_OFST_METADATA_VALID(cfg) \ + (((struct ipa_ep_cfg *)(cfg))->hdr.hdr_ofst_metadata_valid) +#define QDF_IPA_EP_CFG_HDR_METADATA_REG_VALID(cfg) \ + (((struct ipa_ep_cfg *)(cfg))->hdr.hdr_metadata_reg_valid) +#define QDF_IPA_EP_CFG_HDR_OFST_PKT_SIZE_VALID(cfg) \ + (((struct ipa_ep_cfg *)(cfg))->hdr.hdr_ofst_pkt_size_valid) +#define QDF_IPA_EP_CFG_HDR_OFST_PKT_SIZE(cfg) \ + (((struct ipa_ep_cfg *)(cfg))->hdr.hdr_ofst_pkt_size) +#define QDF_IPA_EP_CFG_HDR_ADDITIONAL_CONST_LEN(cfg) \ + (((struct ipa_ep_cfg *)(cfg))->hdr.hdr_additional_const_len) +#define QDF_IPA_EP_CFG_MODE(cfg) \ + (((struct ipa_ep_cfg *)(cfg))->mode.mode) +#define QDF_IPA_EP_CFG_HDR_LITTLE_ENDIAN(cfg) \ + (((struct ipa_ep_cfg *)(cfg))->hdr_ext.hdr_little_endian) + +/** + * __qdf_ipa_wdi_pipe_setup_info_t - WDI TX/Rx configuration + */ +typedef struct ipa_wdi_pipe_setup_info __qdf_ipa_wdi_pipe_setup_info_t; + +#define QDF_IPA_WDI_SETUP_INFO_EP_CFG(txrx) \ + (((struct ipa_wdi_pipe_setup_info *)(txrx))->ipa_ep_cfg) + +#define QDF_IPA_WDI_SETUP_INFO_CLIENT(txrx) \ + (((struct ipa_wdi_pipe_setup_info *)(txrx))->client) +#define QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_BASE_PA(txrx) \ + (((struct ipa_wdi_pipe_setup_info *)(txrx))->transfer_ring_base_pa) +#define QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_SIZE(txrx) \ + (((struct ipa_wdi_pipe_setup_info *)(txrx))->transfer_ring_size) +#define QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(txrx) \ + (((struct ipa_wdi_pipe_setup_info *)(txrx))->transfer_ring_doorbell_pa) +#define QDF_IPA_WDI_SETUP_INFO_EVENT_RING_BASE_PA(txrx) \ + (((struct ipa_wdi_pipe_setup_info *)(txrx))->event_ring_base_pa) +#define QDF_IPA_WDI_SETUP_INFO_EVENT_RING_SIZE(txrx) \ + (((struct ipa_wdi_pipe_setup_info *)(txrx))->event_ring_size) +#define QDF_IPA_WDI_SETUP_INFO_EVENT_RING_DOORBELL_PA(txrx) \ + (((struct ipa_wdi_pipe_setup_info *)(txrx))->event_ring_doorbell_pa) +#define QDF_IPA_WDI_SETUP_INFO_NUM_PKT_BUFFERS(txrx) \ + (((struct ipa_wdi_pipe_setup_info *)(txrx))->num_pkt_buffers) +#define QDF_IPA_WDI_SETUP_INFO_PKT_OFFSET(txrx) \ + (((struct ipa_wdi_pipe_setup_info *)(txrx))->pkt_offset) +#define QDF_IPA_WDI_SETUP_INFO_DESC_FORMAT_TEMPLATE(txrx) \ + (((struct ipa_wdi_pipe_setup_info *)(txrx))->desc_format_template) + +/** + * __qdf_ipa_wdi_pipe_setup_info_smmu_t - WDI TX/Rx configuration + */ +typedef struct ipa_wdi_pipe_setup_info_smmu __qdf_ipa_wdi_pipe_setup_info_smmu_t; + +#define QDF_IPA_WDI_SETUP_INFO_SMMU_EP_CFG(txrx) \ + (((struct ipa_wdi_pipe_setup_info_smmu *)(txrx))->ipa_ep_cfg) + +#define QDF_IPA_WDI_SETUP_INFO_SMMU_CLIENT(txrx) \ + (((struct ipa_wdi_pipe_setup_info_smmu *)(txrx))->client) +#define QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_BASE(txrx) \ + (((struct ipa_wdi_pipe_setup_info_smmu *)(txrx))->transfer_ring_base) +#define QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_SIZE(txrx) \ + (((struct ipa_wdi_pipe_setup_info_smmu *)(txrx))->transfer_ring_size) +#define QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_DOORBELL_PA(txrx) \ + (((struct ipa_wdi_pipe_setup_info_smmu *)(txrx))->transfer_ring_doorbell_pa) +#define QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_BASE(txrx) \ + (((struct ipa_wdi_pipe_setup_info_smmu *)(txrx))->event_ring_base) +#define QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_SIZE(txrx) \ + (((struct ipa_wdi_pipe_setup_info_smmu *)(txrx))->event_ring_size) +#define QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_DOORBELL_PA(txrx) \ + (((struct ipa_wdi_pipe_setup_info_smmu *)(txrx))->event_ring_doorbell_pa) +#define QDF_IPA_WDI_SETUP_INFO_SMMU_NUM_PKT_BUFFERS(txrx) \ + (((struct ipa_wdi_pipe_setup_info_smmu *)(txrx))->num_pkt_buffers) +#define QDF_IPA_WDI_SETUP_INFO_SMMU_PKT_OFFSET(txrx) \ + (((struct ipa_wdi_pipe_setup_info_smmu *)(txrx))->pkt_offset) +#define QDF_IPA_WDI_SETUP_INFO_SMMU_DESC_FORMAT_TEMPLATE(txrx) \ + (((struct ipa_wdi_pipe_setup_info_smmu *)(txrx))->desc_format_template) + +/** + * __qdf_ipa_wdi_conn_in_params_t - information provided by + * uC offload client + */ +typedef struct ipa_wdi_conn_in_params __qdf_ipa_wdi_conn_in_params_t; + +#define QDF_IPA_WDI_CONN_IN_PARAMS_NOTIFY(pipe_in) \ + (((struct ipa_wdi_conn_in_params *)(pipe_in))->notify) +#define QDF_IPA_WDI_CONN_IN_PARAMS_PRIV(pipe_in) \ + (((struct ipa_wdi_conn_in_params *)(pipe_in))->priv) +#define QDF_IPA_WDI_CONN_IN_PARAMS_SMMU_ENABLED(pipe_in) \ + (((struct ipa_wdi_conn_in_params *)(pipe_in))->is_smmu_enabled) +#define QDF_IPA_WDI_CONN_IN_PARAMS_NUM_SYS_PIPE_NEEDED(pipe_in) \ + (((struct ipa_wdi_conn_in_params *)(pipe_in))->num_sys_pipe_needed) +#define QDF_IPA_WDI_CONN_IN_PARAMS_SYS_IN(in) \ + (((struct ipa_wdi_conn_in_params *)(pipe_in))->sys_in) +#define QDF_IPA_WDI_CONN_IN_PARAMS_TX(pipe_in) \ + (((struct ipa_wdi_conn_in_params *)(pipe_in))->u_tx.tx) +#define QDF_IPA_WDI_CONN_IN_PARAMS_TX_SMMU(pipe_in) \ + (((struct ipa_wdi_conn_in_params *)(pipe_in))->u_tx.tx_smmu) +#define QDF_IPA_WDI_CONN_IN_PARAMS_RX(pipe_in) \ + (((struct ipa_wdi_conn_in_params *)(pipe_in))->u_rx.rx) +#define QDF_IPA_WDI_CONN_IN_PARAMS_RX_SMMU(pipe_in) \ + (((struct ipa_wdi_conn_in_params *)(pipe_in))->u_rx.rx_smmu) + +/** + * __qdf_ipa_wdi_conn_out_params_t - information provided + * to WLAN druver + */ +typedef struct ipa_wdi_conn_out_params __qdf_ipa_wdi_conn_out_params_t; + +#define QDF_IPA_WDI_CONN_OUT_PARAMS_TX_UC_DB_PA(pipe_out) \ + (((struct ipa_wdi_conn_out_params *)(pipe_out))->tx_uc_db_pa) +#define QDF_IPA_WDI_CONN_OUT_PARAMS_RX_UC_DB_PA(pipe_out) \ + (((struct ipa_wdi_conn_out_params *)(pipe_out))->rx_uc_db_pa) + +/** + * __qdf_ipa_wdi_perf_profile_t - To set BandWidth profile + */ +typedef struct ipa_wdi_perf_profile __qdf_ipa_wdi_perf_profile_t; + +#define QDF_IPA_WDI_PERF_PROFILE_CLIENT(profile) \ + (((struct ipa_wdi_perf_profile *)(profile))->client) +#define QDF_IPA_WDI_PERF_PROFILE_MAX_SUPPORTED_BW_MBPS(profile) \ + (((struct ipa_wdi_perf_profile *)(profile))->max_supported_bw_mbps) + +/** + * __qdf_ipa_wdi_init - Client should call this function to + * init WDI IPA offload data path + * + * Note: Should not be called from atomic context and only + * after checking IPA readiness using ipa_register_ipa_ready_cb() + * + * @Return 0 on success, negative on failure + */ +static inline int __qdf_ipa_wdi_init(struct ipa_wdi_init_in_params *in, + struct ipa_wdi_init_out_params *out) +{ + return ipa_wdi_init(in, out); +} + +/** + * __qdf_ipa_wdi_cleanup - Client should call this function to + * clean up WDI IPA offload data path + * + * @Return 0 on success, negative on failure + */ +static inline int __qdf_ipa_wdi_cleanup(void) +{ + return ipa_wdi_cleanup(); +} + +/** + * __qdf_ipa_wdi_reg_intf - Client should call this function to + * init WDI IPA offload data path + * + * Note: Should not be called from atomic context and only + * after checking IPA readiness using ipa_register_ipa_ready_cb() + * + * @Return 0 on success, negative on failure + */ +static inline int __qdf_ipa_wdi_reg_intf( + struct ipa_wdi_reg_intf_in_params *in) +{ + return ipa_wdi_reg_intf(in); +} + +/** + * __qdf_ipa_wdi_dereg_intf - Client Driver should call this + * function to deregister before unload and after disconnect + * + * @Return 0 on success, negative on failure + */ +static inline int __qdf_ipa_wdi_dereg_intf(const char *netdev_name) +{ + return ipa_wdi_dereg_intf(netdev_name); +} + +/** + * __qdf_ipa_wdi_conn_pipes - Client should call this + * function to connect pipes + * + * @in: [in] input parameters from client + * @out: [out] output params to client + * + * Note: Should not be called from atomic context and only + * after checking IPA readiness using ipa_register_ipa_ready_cb() + * + * @Return 0 on success, negative on failure + */ +static inline int __qdf_ipa_wdi_conn_pipes(struct ipa_wdi_conn_in_params *in, + struct ipa_wdi_conn_out_params *out) +{ + return ipa_wdi_conn_pipes(in, out); +} + +/** + * __qdf_ipa_wdi_disconn_pipes() - Client should call this + * function to disconnect pipes + * + * Note: Should not be called from atomic context + * + * Returns: 0 on success, negative on failure + */ +static inline int __qdf_ipa_wdi_disconn_pipes(void) +{ + return ipa_wdi_disconn_pipes(); +} + +/** + * __qdf_ipa_wdi_enable_pipes() - Client should call this + * function to enable IPA offload data path + * + * Note: Should not be called from atomic context + * + * Returns: 0 on success, negative on failure + */ +static inline int __qdf_ipa_wdi_enable_pipes(void) +{ + return ipa_wdi_enable_pipes(); +} + +/** + * __qdf_ipa_wdi_disable_pipes() - Client should call this + * function to disable IPA offload data path + * + * Note: Should not be called from atomic context + * + * Returns: 0 on success, negative on failure + */ +static inline int __qdf_ipa_wdi_disable_pipes(void) +{ + return ipa_wdi_disable_pipes(); +} + +/** + * __qdf_ipa_wdi_set_perf_profile() - Client should call this function to + * set IPA clock bandwidth based on data rates + * + * @profile: [in] BandWidth profile to use + * + * Returns: 0 on success, negative on failure + */ +static inline int __qdf_ipa_wdi_set_perf_profile( + struct ipa_wdi_perf_profile *profile) +{ + return ipa_wdi_set_perf_profile(profile); +} + +/** + * __qdf_ipa_wdi_create_smmu_mapping() - Client should call this function to + * create smmu mapping + * + * @num_buffers: [in] number of buffers + * @info: [in] wdi buffer info + * + * Returns: 0 on success, negative on failure + */ +static inline int __qdf_ipa_wdi_create_smmu_mapping(u32 num_buffers, + struct ipa_wdi_buffer_info *info) +{ + return ipa_wdi_create_smmu_mapping(num_buffers, info); +} + +/** + * __qdf_ipa_wdi_release_smmu_mapping() - Client should call this function to + * release smmu mapping + * + * @num_buffers: [in] number of buffers + * @info: [in] wdi buffer info + * + * Returns: 0 on success, negative on failure + */ +static inline int __qdf_ipa_wdi_release_smmu_mapping(u32 num_buffers, + struct ipa_wdi_buffer_info *info) +{ + return ipa_wdi_release_smmu_mapping(num_buffers, info); +} + +#else /* CONFIG_IPA_WDI_UNIFIED_API */ + +/** + * __qdf_ipa_wdi3_hdr_info_t - Header to install on IPA HW + */ +typedef struct ipa_wdi3_hdr_info __qdf_ipa_wdi3_hdr_info_t; + +#define QDF_IPA_WDI3_HDR_INFO_HDR(hdr_info) \ + (((struct ipa_wdi3_hdr_info *)(hdr_info))->hdr) +#define QDF_IPA_WDI3_HDR_INFO_HDR_LEN(hdr_info) \ + (((struct ipa_wdi3_hdr_info *)(hdr_info))->hdr_len) +#define QDF_IPA_WDI3_HDR_INFO_DST_MAC_ADDR_OFFSET(hdr_info) \ + (((struct ipa_wdi3_hdr_info *)(hdr_info))->dst_mac_addr_offset) +#define QDF_IPA_WDI3_HDR_INFO_HDR_TYPE(hdr_info) \ + (((struct ipa_wdi3_hdr_info *)(hdr_info))->hdr_type) + +/** + * __qdf_ipa_wdi3_reg_intf_in_params_t - parameters for uC offload + * interface registration + */ +typedef struct ipa_wdi3_reg_intf_in_params __qdf_ipa_wdi3_reg_intf_in_params_t; + +#define QDF_IPA_WDI3_REG_INTF_IN_PARAMS_NETDEV_NAME(in) \ + (((struct ipa_wdi3_reg_intf_in_params *)(in))->netdev_name) +#define QDF_IPA_WDI3_REG_INTF_IN_PARAMS_HDR_INFO(in) \ + (((struct ipa_wdi3_reg_intf_in_params *)(in))->hdr_info) +#define QDF_IPA_WDI3_REG_INTF_IN_PARAMS_IS_META_DATA_VALID(in) \ + (((struct ipa_wdi3_reg_intf_in_params *)(in))->is_meta_data_valid) +#define QDF_IPA_WDI3_REG_INTF_IN_PARAMS_META_DATA(in) \ + (((struct ipa_wdi3_reg_intf_in_params *)(in))->meta_data) +#define QDF_IPA_WDI3_REG_INTF_IN_PARAMS_META_DATA_MASK(in) \ + (((struct ipa_wdi3_reg_intf_in_params *)(in))->meta_data_mask) + +/** + * __qdf_ipa_wdi3_setup_info_t - WDI3 TX/Rx configuration + */ +typedef struct ipa_wdi3_setup_info __qdf_ipa_wdi3_setup_info_t; + +#define QDF_IPA_WDI3_SETUP_INFO_NAT_EN(txrx) \ + (((struct ipa_wdi3_setup_info *)(txrx))->ipa_ep_cfg.nat.nat_en) +#define QDF_IPA_WDI3_SETUP_INFO_HDR_LEN(txrx) \ + (((struct ipa_wdi3_setup_info *)(txrx))->ipa_ep_cfg.hdr.hdr_len) +#define QDF_IPA_WDI3_SETUP_INFO_HDR_OFST_METADATA_VALID(txrx) \ + (((struct ipa_wdi3_setup_info *)(txrx))->ipa_ep_cfg.hdr.hdr_ofst_metadata_valid) +#define QDF_IPA_WDI3_SETUP_INFO_HDR_METADATA_REG_VALID(txrx) \ + (((struct ipa_wdi3_setup_info *)(txrx))->ipa_ep_cfg.hdr.hdr_metadata_reg_valid) +#define QDF_IPA_WDI3_SETUP_INFO_HDR_OFST_PKT_SIZE_VALID(txrx) \ + (((struct ipa_wdi3_setup_info *)(txrx))->ipa_ep_cfg.hdr.hdr_ofst_pkt_size_valid) +#define QDF_IPA_WDI3_SETUP_INFO_HDR_OFST_PKT_SIZE(txrx) \ + (((struct ipa_wdi3_setup_info *)(txrx))->ipa_ep_cfg.hdr.hdr_ofst_pkt_size) +#define QDF_IPA_WDI3_SETUP_INFO_HDR_ADDITIONAL_CONST_LEN(txrx) \ + (((struct ipa_wdi3_setup_info *)(txrx))->ipa_ep_cfg.hdr.hdr_additional_const_len) +#define QDF_IPA_WDI3_SETUP_INFO_MODE(txrx) \ + (((struct ipa_wdi3_setup_info *)(txrx))->ipa_ep_cfg.mode.mode) +#define QDF_IPA_WDI3_SETUP_INFO_HDR_LITTLE_ENDIAN(txrx) \ + (((struct ipa_wdi3_setup_info *)(txrx))->ipa_ep_cfg.hdr_ext.hdr_little_endian) + +#define QDF_IPA_WDI3_SETUP_INFO_CLIENT(txrx) \ + (((struct ipa_wdi3_setup_info *)(txrx))->client) +#define QDF_IPA_WDI3_SETUP_INFO_TRANSFER_RING_BASE_PA(txrx) \ + (((struct ipa_wdi3_setup_info *)(txrx))->transfer_ring_base_pa) +#define QDF_IPA_WDI3_SETUP_INFO_TRANSFER_RING_SIZE(txrx) \ + (((struct ipa_wdi3_setup_info *)(txrx))->transfer_ring_size) +#define QDF_IPA_WDI3_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(txrx) \ + (((struct ipa_wdi3_setup_info *)(txrx))->transfer_ring_doorbell_pa) +#define QDF_IPA_WDI3_SETUP_INFO_EVENT_RING_BASE_PA(txrx) \ + (((struct ipa_wdi3_setup_info *)(txrx))->event_ring_base_pa) +#define QDF_IPA_WDI3_SETUP_INFO_EVENT_RING_SIZE(txrx) \ + (((struct ipa_wdi3_setup_info *)(txrx))->event_ring_size) +#define QDF_IPA_WDI3_SETUP_INFO_EVENT_RING_DOORBELL_PA(txrx) \ + (((struct ipa_wdi3_setup_info *)(txrx))->event_ring_doorbell_pa) +#define QDF_IPA_WDI3_SETUP_INFO_NUM_PKT_BUFFERS(txrx) \ + (((struct ipa_wdi3_setup_info *)(txrx))->num_pkt_buffers) +#define QDF_IPA_WDI3_SETUP_INFO_PKT_OFFSET(txrx) \ + (((struct ipa_wdi3_setup_info *)(txrx))->pkt_offset) +#define QDF_IPA_WDI3_SETUP_INFO_DESC_FORMAT_TEMPLATE(txrx) \ + (((struct ipa_wdi3_setup_info *)(txrx))->desc_format_template) + +/** + * __qdf_ipa_wdi3_conn_in_params_t - information provided by + * uC offload client + */ +typedef struct ipa_wdi3_conn_in_params __qdf_ipa_wdi3_conn_in_params_t; + +#define QDF_IPA_WDI3_CONN_IN_PARAMS_NOTIFY(pipe_in) \ + (((struct ipa_wdi3_conn_in_params *)(pipe_in))->notify) +#define QDF_IPA_WDI3_CONN_IN_PARAMS_PRIV(pipe_in) \ + (((struct ipa_wdi3_conn_in_params *)(pipe_in))->priv) +#define QDF_IPA_WDI3_CONN_IN_PARAMS_TX(pipe_in) \ + (((struct ipa_wdi3_conn_in_params *)(pipe_in))->tx) +#define QDF_IPA_WDI3_CONN_IN_PARAMS_RX(pipe_in) \ + (((struct ipa_wdi3_conn_in_params *)(pipe_in))->rx) + +/** + * __qdf_ipa_wdi3_conn_out_params_t - information provided + * to WLAN druver + */ +typedef struct ipa_wdi3_conn_out_params __qdf_ipa_wdi3_conn_out_params_t; + +#define QDF_IPA_WDI3_CONN_OUT_PARAMS_TX_UC_DB_PA(pipe_out) \ + (((struct ipa_wdi3_conn_out_params *)(pipe_out))->tx_uc_db_pa) +#define QDF_IPA_WDI3_CONN_OUT_PARAMS_TX_UC_DB_VA(pipe_out) \ + (((struct ipa_wdi3_conn_out_params *)(pipe_out))->tx_uc_db_va) +#define QDF_IPA_WDI3_CONN_OUT_PARAMS_RX_UC_DB_PA(pipe_out) \ + (((struct ipa_wdi3_conn_out_params *)(pipe_out))->rx_uc_db_pa) + +/** + * __qdf_ipa_wdi3_perf_profile_t - To set BandWidth profile + */ +typedef struct ipa_wdi3_perf_profile __qdf_ipa_wdi3_perf_profile_t; + +#define QDF_IPA_WDI3_PERF_PROFILE_CLIENT(profile) \ + (((struct ipa_wdi3_perf_profile *)(profile))->client) +#define QDF_IPA_WDI3_PERF_PROFILE_MAX_SUPPORTED_BW_MBPS(profile) \ + (((struct ipa_wdi3_perf_profile *)(profile))->max_supported_bw_mbps) + +/** + * __qdf_ipa_wdi3_reg_intf - Client should call this function to + * init WDI3 IPA offload data path + * + * Note: Should not be called from atomic context and only + * after checking IPA readiness using ipa_register_ipa_ready_cb() + * + * @Return 0 on success, negative on failure + */ +static inline int __qdf_ipa_wdi3_reg_intf( + struct ipa_wdi3_reg_intf_in_params *in) +{ + return ipa_wdi3_reg_intf(in); +} + +/** + * __qdf_ipa_wdi3_dereg_intf - Client Driver should call this + * function to deregister before unload and after disconnect + * + * @Return 0 on success, negative on failure + */ +static inline int __qdf_ipa_wdi3_dereg_intf(const char *netdev_name) +{ + return ipa_wdi3_dereg_intf(netdev_name); +} + +/** + * __qdf_ipa_wdi3_conn_pipes - Client should call this + * function to connect pipes + * + * @in: [in] input parameters from client + * @out: [out] output params to client + * + * Note: Should not be called from atomic context and only + * after checking IPA readiness using ipa_register_ipa_ready_cb() + * + * @Return 0 on success, negative on failure + */ +static inline int __qdf_ipa_wdi3_conn_pipes(struct ipa_wdi3_conn_in_params *in, + struct ipa_wdi3_conn_out_params *out) +{ + return ipa_wdi3_conn_pipes(in, out); +} + +/** + * __qdf_ipa_wdi3_disconn_pipes() - Client should call this + * function to disconnect pipes + * + * Note: Should not be called from atomic context + * + * Returns: 0 on success, negative on failure + */ +static inline int __qdf_ipa_wdi3_disconn_pipes(void) +{ + return ipa_wdi3_disconn_pipes(); +} + +/** + * __qdf_ipa_wdi3_enable_pipes() - Client should call this + * function to enable IPA offload data path + * + * Note: Should not be called from atomic context + * + * Returns: 0 on success, negative on failure + */ +static inline int __qdf_ipa_wdi3_enable_pipes(void) +{ + return ipa_wdi3_enable_pipes(); +} + +/** + * __qdf_ipa_wdi3_disable_pipes() - Client should call this + * function to disable IPA offload data path + * + * Note: Should not be called from atomic context + * + * Returns: 0 on success, negative on failure + */ +static inline int __qdf_ipa_wdi3_disable_pipes(void) +{ + return ipa_wdi3_disable_pipes(); +} + +/** + * __qdf_ipa_wdi3_set_perf_profile() - Client should call this function to + * set IPA clock bandwidth based on data rates + * + * @profile: [in] BandWidth profile to use + * + * Returns: 0 on success, negative on failure + */ +static inline int __qdf_ipa_wdi3_set_perf_profile( + struct ipa_wdi3_perf_profile *profile) +{ + return ipa_wdi3_set_perf_profile(profile); +} + +#endif /* CONFIG_IPA_WDI_UNIFIED_API */ + +#endif /* IPA_OFFLOAD */ +#endif /* I_QDF_IPA_WDI_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_list.h b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_list.h new file mode 100644 index 0000000000000000000000000000000000000000..1a20615088195262bbba3058a77dda58ba4f47cf --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_list.h @@ -0,0 +1,89 @@ +/* + * Copyright (c) 2014-2016, 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: i_qdf_list.h + * This file provides OS dependent list API's. + */ + +#if !defined(__I_QDF_LIST_H) +#define __I_QDF_LIST_H + +#include + +/* Type declarations */ +typedef struct list_head __qdf_list_node_t; + +/* Preprocessor definitions and constants */ + +typedef struct qdf_list_s { + __qdf_list_node_t anchor; + uint32_t count; + uint32_t max_size; +} __qdf_list_t; + +/** + * __qdf_list_create() - Create qdf list and initialize list head + * @list: object of list + * @max_size: max size of the list + * + * Return: none + */ +static inline void __qdf_list_create(__qdf_list_t *list, uint32_t max_size) +{ + INIT_LIST_HEAD(&list->anchor); + list->count = 0; + list->max_size = max_size; +} + +#define __QDF_LIST_ANCHOR(list) ((list).anchor) + +#define __QDF_LIST_NODE_INIT(prev_node, next_node) \ + { .prev = &(prev_node), .next = &(next_node), } + +#define __QDF_LIST_NODE_INIT_SINGLE(node) \ + __QDF_LIST_NODE_INIT(node, node) + +#define __QDF_LIST_INIT(tail, head) \ + { .anchor = __QDF_LIST_NODE_INIT(tail, head), } + +#define __QDF_LIST_INIT_SINGLE(node) \ + __QDF_LIST_INIT(node, node) + +#define __QDF_LIST_INIT_EMPTY(list) \ + __QDF_LIST_INIT_SINGLE(list.anchor) + +#define __qdf_list_for_each(list_ptr, cursor, node_field) \ + list_for_each_entry(cursor, &(list_ptr)->anchor, node_field) + +#define __qdf_list_for_each_del(list_ptr, cursor, next, node_field) \ + list_for_each_entry_safe(cursor, next, &(list_ptr)->anchor, node_field) + +/** + * __qdf_init_list_head() - initialize list head + * @list_head: pointer to list head + * + * Return: none + */ +static inline void __qdf_init_list_head(__qdf_list_node_t *list_head) +{ + INIT_LIST_HEAD(list_head); +} + +bool qdf_list_has_node(__qdf_list_t *list, __qdf_list_node_t *node); +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_lock.h b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_lock.h new file mode 100644 index 0000000000000000000000000000000000000000..0a26bdbd5aa8003dce70b78e3bd6d4d91c8a20b2 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_lock.h @@ -0,0 +1,337 @@ +/* + * Copyright (c) 2014-2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: i_qdf_lock.h + * Linux-specific definitions for QDF Lock API's + */ + +#if !defined(__I_QDF_LOCK_H) +#define __I_QDF_LOCK_H + +/* Include Files */ +#include +#include +#include +#include +#include +#include +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27) +#include +#else +#include +#endif +#include +#include + +/* define for flag */ +#define QDF_LINUX_UNLOCK_BH 1 + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +enum { + LOCK_RELEASED = 0x11223344, + LOCK_ACQUIRED, + LOCK_DESTROYED +}; + +/** + * typedef struct - __qdf_mutex_t + * @m_lock: Mutex lock + * @cookie: Lock cookie + * @process_id: Process ID to track lock + * @state: Lock status + * @refcount: Reference count for recursive lock + * @stats: a structure that contains usage statistics + */ +struct qdf_lock_s { + struct mutex m_lock; + uint32_t cookie; + int process_id; + uint32_t state; + uint8_t refcount; + struct lock_stats stats; +}; + +typedef struct qdf_lock_s __qdf_mutex_t; + +/** + * typedef struct - qdf_spinlock_t + * @spinlock: Spin lock + * @flags: Lock flag + */ +typedef struct __qdf_spinlock { + spinlock_t spinlock; + unsigned long flags; +} __qdf_spinlock_t; + +typedef struct semaphore __qdf_semaphore_t; + +typedef struct wakeup_source qdf_wake_lock_t; + +struct hif_pm_runtime_lock; +typedef struct qdf_runtime_lock { + struct hif_pm_runtime_lock *lock; +} qdf_runtime_lock_t; + +#define LINUX_LOCK_COOKIE 0x12345678 + +/* Function declarations and documenation */ + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 37) +/** + * __qdf_semaphore_init() - initialize the semaphore + * @m: Semaphore object + * + * Return: QDF_STATUS_SUCCESS + */ +static inline QDF_STATUS __qdf_semaphore_init(struct semaphore *m) +{ + init_MUTEX(m); + return QDF_STATUS_SUCCESS; +} +#else +static inline QDF_STATUS __qdf_semaphore_init(struct semaphore *m) +{ + sema_init(m, 1); + return QDF_STATUS_SUCCESS; +} +#endif + +/** + * __qdf_semaphore_acquire() - acquire semaphore + * @m: Semaphore object + * + * Return: 0 + */ +static inline int __qdf_semaphore_acquire(struct semaphore *m) +{ + down(m); + return 0; +} + +/** + * __qdf_semaphore_acquire_intr() - down_interruptible allows a user-space + * process that is waiting on a semaphore to be interrupted by the user. + * If the operation is interrupted, the function returns a nonzero value, + * and the caller does not hold the semaphore. + * Always checking the return value and responding accordingly. + * @osdev: OS device handle + * @m: Semaphore object + * + * Return: int + */ +static inline int __qdf_semaphore_acquire_intr(struct semaphore *m) +{ + return down_interruptible(m); +} + +/** + * __qdf_semaphore_release() - release semaphore + * @m: Semaphore object + * + * Return: result of UP operation in integer + */ +static inline void __qdf_semaphore_release(struct semaphore *m) +{ + up(m); +} + +/** + * __qdf_semaphore_acquire_timeout() - Take the semaphore before timeout + * @m: semaphore to take + * @timeout: maximum time to try to take the semaphore + * Return: int + */ +static inline int __qdf_semaphore_acquire_timeout(struct semaphore *m, + unsigned long timeout) +{ + unsigned long jiffie_val = msecs_to_jiffies(timeout); + + return down_timeout(m, jiffie_val); +} + +/** + * __qdf_spinlock_create() - initialize spin lock + * @lock: Spin lock object + * + * Return: QDF_STATUS_SUCCESS + */ +static inline QDF_STATUS __qdf_spinlock_create(__qdf_spinlock_t *lock) +{ + spin_lock_init(&lock->spinlock); + lock->flags = 0; + return QDF_STATUS_SUCCESS; +} + +#define __qdf_spinlock_destroy(lock) + +/** + * __qdf_spin_lock() - Acquire a Spinlock(SMP) & disable Preemption (Preemptive) + * @lock: Lock object + * + * Return: none + */ +static inline void __qdf_spin_lock(__qdf_spinlock_t *lock) +{ + spin_lock(&lock->spinlock); +} + +/** + * __qdf_spin_unlock() - Unlock the spinlock and enables the Preemption + * @lock: Lock object + * + * Return: none + */ +static inline void __qdf_spin_unlock(__qdf_spinlock_t *lock) +{ + spin_unlock(&lock->spinlock); +} + +/** + * __qdf_spin_lock_irqsave() - Acquire a Spinlock (SMP) & disable Preemption + * (Preemptive) and disable IRQs + * @lock: Lock object + * + * Return: none + */ +static inline void __qdf_spin_lock_irqsave(__qdf_spinlock_t *lock) +{ + spin_lock_irqsave(&lock->spinlock, lock->flags); +} + +/** + * __qdf_spin_unlock_irqrestore() - Unlock the spinlock and enables the + * Preemption and enable IRQ + * @lock: Lock object + * + * Return: none + */ +static inline void __qdf_spin_unlock_irqrestore(__qdf_spinlock_t *lock) +{ + spin_unlock_irqrestore(&lock->spinlock, lock->flags); +} + +/* + * Synchronous versions - only for OS' that have interrupt disable + */ +#define __qdf_spin_lock_irq(_p_lock, _flags) spin_lock_irqsave(_p_lock, _flags) +#define __qdf_spin_unlock_irq(_p_lock, _flags) \ + spin_unlock_irqrestore(_p_lock, _flags) + +/** + * __qdf_spin_is_locked(__qdf_spinlock_t *lock) + * @lock: spinlock object + * + * Return: nonzero if lock is held. + */ +static inline int __qdf_spin_is_locked(__qdf_spinlock_t *lock) +{ + return spin_is_locked(&lock->spinlock); +} + +/** + * __qdf_spin_trylock_bh() - spin trylock bottomhalf + * @lock: spinlock object + * + * Return: nonzero if lock is acquired + */ +static inline int __qdf_spin_trylock_bh(__qdf_spinlock_t *lock) +{ + if (likely(irqs_disabled() || in_irq() || in_softirq())) + return spin_trylock(&lock->spinlock); + + if (spin_trylock_bh(&lock->spinlock)) { + lock->flags |= QDF_LINUX_UNLOCK_BH; + return 1; + } + + return 0; +} + +/** + * __qdf_spin_lock_bh() - Acquire the spinlock and disable bottom halves + * @lock: Lock object + * + * Return: none + */ +static inline void __qdf_spin_lock_bh(__qdf_spinlock_t *lock) +{ + if (likely(irqs_disabled() || in_irq() || in_softirq())) { + spin_lock(&lock->spinlock); + } else { + spin_lock_bh(&lock->spinlock); + lock->flags |= QDF_LINUX_UNLOCK_BH; + } +} + +/** + * __qdf_spin_unlock_bh() - Release the spinlock and enable bottom halves + * @lock: Lock object + * + * Return: none + */ +static inline void __qdf_spin_unlock_bh(__qdf_spinlock_t *lock) +{ + if (unlikely(lock->flags & QDF_LINUX_UNLOCK_BH)) { + lock->flags &= (unsigned long)~QDF_LINUX_UNLOCK_BH; + spin_unlock_bh(&lock->spinlock); + } else + spin_unlock(&lock->spinlock); +} + +/** + * __qdf_spinlock_irq_exec - Execute the input function with spinlock held and interrupt disabled. + * @hdl: OS handle + * @lock: spinlock to be held for the critical region + * @func: critical region function that to be executed + * @context: context of the critical region function + * @return - Boolean status returned by the critical region function + */ +static inline bool __qdf_spinlock_irq_exec(qdf_handle_t hdl, + __qdf_spinlock_t *lock, + qdf_irqlocked_func_t func, + void *arg) +{ + unsigned long flags; + bool ret; + + spin_lock_irqsave(&lock->spinlock, flags); + ret = func(arg); + spin_unlock_irqrestore(&lock->spinlock, flags); + + return ret; +} + +/** + * __qdf_in_softirq() - in soft irq context + * + * Return: true if in softirs context else false + */ +static inline bool __qdf_in_softirq(void) +{ + return in_softirq(); +} + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#endif /* __I_QDF_LOCK_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_lro.h b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_lro.h new file mode 100644 index 0000000000000000000000000000000000000000..cf2ff42ce75af1c16600c353b1ad2ade3a4780ef --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_lro.h @@ -0,0 +1,108 @@ +/* + * Copyright (c) 2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: i_qdf_lro.h + * This file provides OS dependent LRO API's. + */ + +#ifndef _I_QDF_LRO_H +#define _I_QDF_LRO_H + +#if defined(FEATURE_LRO) +#include +#include +#include + +#include + +/** + * qdf_lro_desc_entry - defines the LRO descriptor + * element stored in the list + * @lro_node: node of the list + * @lro_desc: the LRO descriptor contained in this list entry + */ +struct qdf_lro_desc_entry { + struct list_head lro_node; + struct net_lro_desc *lro_desc; +}; + +/** + * qdf_lro_desc_pool - pool of free LRO descriptors + * @lro_desc_array: array of LRO descriptors allocated + * @lro_free_list_head: head of the list + * @lro_pool_lock: lock to protect access to the list + */ +struct qdf_lro_desc_pool { + struct qdf_lro_desc_entry *lro_desc_array; + struct list_head lro_free_list_head; +}; + +/** + * qdf_lro_desc_table - defines each entry of the LRO hash table + * @lro_desc_list: list of LRO descriptors + */ +struct qdf_lro_desc_table { + struct list_head lro_desc_list; +}; + +/** + * qdf_lro_desc_info - structure containing the LRO descriptor + * information + * @lro_hash_table: hash table used for a quick desc. look-up + * @lro_hash_lock: lock to protect access to the hash table + * @lro_desc_pool: Free pool of LRO descriptors + */ +struct qdf_lro_desc_info { + struct qdf_lro_desc_table *lro_hash_table; + struct qdf_lro_desc_pool lro_desc_pool; +}; + +/** + * qdf_lro_info_s - LRO information + * @lro_mgr: LRO manager + * @lro_desc_info: LRO descriptor information + * @lro_mgr_arr_access_lock: Lock to access LRO manager array. + * @lro_stats: LRO statistics + */ +struct qdf_lro_s { + struct net_lro_mgr *lro_mgr; + struct qdf_lro_desc_info lro_desc_info; +}; + +typedef struct qdf_lro_s *__qdf_lro_ctx_t; + +/* LRO_DESC_TABLE_SZ must be a power of 2 */ +#define QDF_LRO_DESC_TABLE_SZ 16 +#define QDF_LRO_DESC_TABLE_SZ_MASK (QDF_LRO_DESC_TABLE_SZ - 1) +#define QDF_LRO_DESC_POOL_SZ 10 + +#define QDF_LRO_DESC_TABLE_SZ 16 +#define QDF_LRO_DESC_TABLE_SZ_MASK (QDF_LRO_DESC_TABLE_SZ - 1) +#define QDF_LRO_DESC_POOL_SZ 10 + +#define QDF_LRO_MAX_AGGR_SIZE 100 + +#else + +struct qdf_lro_s {}; + +typedef struct qdf_lro_s *__qdf_lro_ctx_t; + +#endif /* FEATURE_LRO */ +#endif /*_I_QDF_NET_BUF_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_mc_timer.h b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_mc_timer.h new file mode 100644 index 0000000000000000000000000000000000000000..e0ccaf9ad2bdef1097b394c95eeafb1cde81e451 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_mc_timer.h @@ -0,0 +1,52 @@ +/* + * Copyright (c) 2014-2016 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: i_qdf_mc_timer.h + * Linux-specific definitions for QDF timers serialized to MC thread + */ + +#if !defined(__I_QDF_MC_TIMER_H) +#define __I_QDF_MC_TIMER_H + +/* Include Files */ +#include +#include +#include +#include +#include +#include + +/* Preprocessor definitions and constants */ + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ +/* Type declarations */ + +typedef struct qdf_mc_timer_platform_s { + struct timer_list timer; + int thread_id; + uint32_t cookie; + qdf_spinlock_t spinlock; +} qdf_mc_timer_platform_t; + +#ifdef __cplusplus +} +#endif /* __cplusplus */ +#endif /* __I_QDF_MC_TIMER_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_mem.h b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_mem.h new file mode 100644 index 0000000000000000000000000000000000000000..9799b71599c72af228b5282f3b3f1962683ec0a9 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_mem.h @@ -0,0 +1,442 @@ +/* + * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: i_qdf_mem.h + * Linux-specific definitions for QDF memory API's + */ + +#ifndef __I_QDF_MEM_H +#define __I_QDF_MEM_H + +#ifdef __KERNEL__ +#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17) +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33) +#include +#else +#include +#endif +#endif +#include +#include +#include +#include /* pci_alloc_consistent */ +#include /* L1_CACHE_BYTES */ + +#define __qdf_cache_line_sz L1_CACHE_BYTES +#if CONFIG_MCL +#include +#else +#include +#endif +#else +/* + * Provide dummy defs for kernel data types, functions, and enums + * used in this header file. + */ +#define GFP_KERNEL 0 +#define GFP_ATOMIC 0 +#define kzalloc(size, flags) NULL +#define vmalloc(size) NULL +#define kfree(buf) +#define vfree(buf) +#define pci_alloc_consistent(dev, size, paddr) NULL +#define __qdf_mempool_t void* +#define QDF_RET_IP NULL +#endif /* __KERNEL__ */ +#include + +#ifdef CONFIG_ARM_SMMU +#include +#include +#include +#endif + +#ifdef __KERNEL__ +typedef struct mempool_elem { + STAILQ_ENTRY(mempool_elem) mempool_entry; +} mempool_elem_t; + +/** + * typedef __qdf_mempool_ctxt_t - Memory pool context + * @pool_id: pool identifier + * @flags: flags + * @elem_size: size of each pool element in bytes + * @pool_mem: pool_addr address of the pool created + * @mem_size: Total size of the pool in bytes + * @free_list: free pool list + * @lock: spinlock object + * @max_elem: Maximum number of elements in tha pool + * @free_cnt: Number of free elements available + */ +typedef struct __qdf_mempool_ctxt { + int pool_id; + u_int32_t flags; + size_t elem_size; + void *pool_mem; + u_int32_t mem_size; + + STAILQ_HEAD(, mempool_elem) free_list; + spinlock_t lock; + u_int32_t max_elem; + u_int32_t free_cnt; +} __qdf_mempool_ctxt_t; + +#endif /* __KERNEL__ */ + +#define __qdf_align(a, mask) ALIGN(a, mask) + +/* typedef for dma_data_direction */ +typedef enum dma_data_direction __dma_data_direction; + +/** + * __qdf_dma_dir_to_os() - Convert DMA data direction to OS specific enum + * @dir: QDF DMA data direction + * + * Return: + * enum dma_data_direction + */ +static inline +enum dma_data_direction __qdf_dma_dir_to_os(qdf_dma_dir_t qdf_dir) +{ + switch (qdf_dir) { + case QDF_DMA_BIDIRECTIONAL: + return DMA_BIDIRECTIONAL; + case QDF_DMA_TO_DEVICE: + return DMA_TO_DEVICE; + case QDF_DMA_FROM_DEVICE: + return DMA_FROM_DEVICE; + default: + return DMA_NONE; + } +} + + +/** + * __qdf_mem_map_nbytes_single - Map memory for DMA + * @osdev: pomter OS device context + * @buf: pointer to memory to be dma mapped + * @dir: DMA map direction + * @nbytes: number of bytes to be mapped. + * @phy_addr: ponter to recive physical address. + * + * Return: success/failure + */ +static inline uint32_t __qdf_mem_map_nbytes_single(qdf_device_t osdev, + void *buf, qdf_dma_dir_t dir, + int nbytes, + qdf_dma_addr_t *phy_addr) +{ + /* assume that the OS only provides a single fragment */ + *phy_addr = dma_map_single(osdev->dev, buf, nbytes, + __qdf_dma_dir_to_os(dir)); + return dma_mapping_error(osdev->dev, *phy_addr) ? + QDF_STATUS_E_FAILURE : QDF_STATUS_SUCCESS; +} + +/** + * __qdf_mem_unmap_nbytes_single() - un_map memory for DMA + * + * @osdev: pomter OS device context + * @phy_addr: physical address of memory to be dma unmapped + * @dir: DMA unmap direction + * @nbytes: number of bytes to be unmapped. + * + * Return - none + */ +static inline void __qdf_mem_unmap_nbytes_single(qdf_device_t osdev, + qdf_dma_addr_t phy_addr, + qdf_dma_dir_t dir, int nbytes) +{ + dma_unmap_single(osdev->dev, phy_addr, nbytes, + __qdf_dma_dir_to_os(dir)); +} +#ifdef __KERNEL__ + +typedef __qdf_mempool_ctxt_t *__qdf_mempool_t; + +int __qdf_mempool_init(qdf_device_t osdev, __qdf_mempool_t *pool, int pool_cnt, + size_t pool_entry_size, u_int32_t flags); +void __qdf_mempool_destroy(qdf_device_t osdev, __qdf_mempool_t pool); +void *__qdf_mempool_alloc(qdf_device_t osdev, __qdf_mempool_t pool); +void __qdf_mempool_free(qdf_device_t osdev, __qdf_mempool_t pool, void *buf); +#define QDF_RET_IP ((void *)_RET_IP_) + +#define __qdf_mempool_elem_size(_pool) ((_pool)->elem_size) +#endif + +/** + * __qdf_mem_cmp() - memory compare + * @memory1: pointer to one location in memory to compare. + * @memory2: pointer to second location in memory to compare. + * @num_bytes: the number of bytes to compare. + * + * Function to compare two pieces of memory, similar to memcmp function + * in standard C. + * Return: + * int32_t - returns an int value that tells if the memory + * locations are equal or not equal. + * 0 -- equal + * < 0 -- *memory1 is less than *memory2 + * > 0 -- *memory1 is bigger than *memory2 + */ +static inline int32_t __qdf_mem_cmp(const void *memory1, const void *memory2, + uint32_t num_bytes) +{ + return (int32_t) memcmp(memory1, memory2, num_bytes); +} + +/** + * __qdf_mem_smmu_s1_enabled() - Return SMMU stage 1 translation enable status + * @osdev parent device instance + * + * Return: true if smmu s1 enabled, false if smmu s1 is bypassed + */ +static inline bool __qdf_mem_smmu_s1_enabled(qdf_device_t osdev) +{ + return osdev->smmu_s1_enabled; +} + +#ifdef CONFIG_ARM_SMMU +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0)) +/** + * __qdf_dev_get_domain() - get iommu domain from osdev + * @osdev: parent device instance + * + * Return: iommu domain + */ +static inline struct iommu_domain * +__qdf_dev_get_domain(qdf_device_t osdev) +{ + return osdev->domain; +} +#else +static inline struct iommu_domain * +__qdf_dev_get_domain(qdf_device_t osdev) +{ + if (osdev->iommu_mapping) + return osdev->iommu_mapping->domain; + + return NULL; +} +#endif + +/** + * __qdf_mem_paddr_from_dmaaddr() - get actual physical address from dma_addr + * @osdev: parent device instance + * @dma_addr: dma_addr + * + * Get actual physical address from dma_addr based on SMMU enablement status. + * IF SMMU Stage 1 translation is enabled, DMA APIs return IO virtual address + * (IOVA) otherwise returns physical address. So get SMMU physical address + * mapping from IOVA. + * + * Return: dmaable physical address + */ +static inline unsigned long +__qdf_mem_paddr_from_dmaaddr(qdf_device_t osdev, + qdf_dma_addr_t dma_addr) +{ + struct iommu_domain *domain; + + if (__qdf_mem_smmu_s1_enabled(osdev)) { + domain = __qdf_dev_get_domain(osdev); + if (domain) + return iommu_iova_to_phys(domain, dma_addr); + } + + return dma_addr; +} +#else +static inline unsigned long +__qdf_mem_paddr_from_dmaaddr(qdf_device_t osdev, + qdf_dma_addr_t dma_addr) +{ + return dma_addr; +} +#endif + +/** + * __qdf_os_mem_dma_get_sgtable() - Returns DMA memory scatter gather table + * @dev: device instace + * @sgt: scatter gather table pointer + * @cpu_addr: HLOS virtual address + * @dma_addr: dma/iova + * @size: allocated memory size + * + * Return: physical address + */ +static inline int +__qdf_os_mem_dma_get_sgtable(struct device *dev, void *sgt, void *cpu_addr, + qdf_dma_addr_t dma_addr, size_t size) +{ + return dma_get_sgtable(dev, (struct sg_table *)sgt, cpu_addr, dma_addr, + size); +} + +/** + * __qdf_os_mem_free_sgtable() - Free a previously allocated sg table + * @sgt: the mapped sg table header + * + * Return: None + */ +static inline void +__qdf_os_mem_free_sgtable(struct sg_table *sgt) +{ + sg_free_table(sgt); +} + +/** + * __qdf_dma_get_sgtable_dma_addr()-Assigns DMA address to scatterlist elements + * @sgt: scatter gather table pointer + * + * Return: None + */ +static inline void +__qdf_dma_get_sgtable_dma_addr(struct sg_table *sgt) +{ + struct scatterlist *sg; + int i; + + for_each_sg(sgt->sgl, sg, sgt->nents, i) + sg->dma_address = sg_phys(sg); +} + +/** + * __qdf_mem_get_dma_addr() - Return dma addr based on SMMU translation status + * @osdev: parent device instance + * @mem_info: Pointer to allocated memory information + * + * Based on smmu stage 1 translation enablement status, return corresponding dma + * address from qdf_mem_info_t. If stage 1 translation enabled, return + * IO virtual address otherwise return physical address. + * + * Return: dma address + */ +static inline qdf_dma_addr_t __qdf_mem_get_dma_addr(qdf_device_t osdev, + qdf_mem_info_t *mem_info) +{ + if (__qdf_mem_smmu_s1_enabled(osdev)) + return (qdf_dma_addr_t)mem_info->iova; + else + return (qdf_dma_addr_t)mem_info->pa; +} + +/** + * __qdf_mem_get_dma_addr_ptr() - Return DMA address storage pointer + * @osdev: parent device instance + * @mem_info: Pointer to allocated memory information + * + * Based on smmu stage 1 translation enablement status, return corresponding + * dma address pointer from qdf_mem_info_t structure. If stage 1 translation + * enabled, return pointer to IO virtual address otherwise return pointer to + * physical address + * + * Return: dma address storage pointer + */ +static inline qdf_dma_addr_t * +__qdf_mem_get_dma_addr_ptr(qdf_device_t osdev, + qdf_mem_info_t *mem_info) +{ + if (__qdf_mem_smmu_s1_enabled(osdev)) + return (qdf_dma_addr_t *)(&mem_info->iova); + else + return (qdf_dma_addr_t *)(&mem_info->pa); +} + +/** + * __qdf_update_mem_map_table() - Update DMA memory map info + * @osdev: Parent device instance + * @mem_info: Pointer to shared memory information + * @dma_addr: dma address + * @mem_size: memory size allocated + * + * Store DMA shared memory information + * + * Return: none + */ +static inline void __qdf_update_mem_map_table(qdf_device_t osdev, + qdf_mem_info_t *mem_info, + qdf_dma_addr_t dma_addr, + uint32_t mem_size) +{ + mem_info->pa = __qdf_mem_paddr_from_dmaaddr(osdev, dma_addr); + mem_info->iova = dma_addr; + mem_info->size = mem_size; +} + +/** + * __qdf_mem_get_dma_size() - Return DMA memory size + * @osdev: parent device instance + * @mem_info: Pointer to allocated memory information + * + * Return: DMA memory size + */ +static inline uint32_t +__qdf_mem_get_dma_size(qdf_device_t osdev, + qdf_mem_info_t *mem_info) +{ + return mem_info->size; +} + +/** + * __qdf_mem_set_dma_size() - Set DMA memory size + * @osdev: parent device instance + * @mem_info: Pointer to allocated memory information + * @mem_size: memory size allocated + * + * Return: none + */ +static inline void +__qdf_mem_set_dma_size(qdf_device_t osdev, + qdf_mem_info_t *mem_info, + uint32_t mem_size) +{ + mem_info->size = mem_size; +} + +/** + * __qdf_mem_get_dma_size() - Return DMA physical address + * @osdev: parent device instance + * @mem_info: Pointer to allocated memory information + * + * Return: DMA physical address + */ +static inline qdf_dma_addr_t +__qdf_mem_get_dma_pa(qdf_device_t osdev, + qdf_mem_info_t *mem_info) +{ + return mem_info->pa; +} + +/** + * __qdf_mem_set_dma_size() - Set DMA physical address + * @osdev: parent device instance + * @mem_info: Pointer to allocated memory information + * @dma_pa: DMA phsical address + * + * Return: none + */ +static inline void +__qdf_mem_set_dma_pa(qdf_device_t osdev, + qdf_mem_info_t *mem_info, + qdf_dma_addr_t dma_pa) +{ + mem_info->pa = dma_pa; +} +#endif /* __I_QDF_MEM_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_module.h b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_module.h new file mode 100644 index 0000000000000000000000000000000000000000..1d049bd41a9ce974fa02bf76235d36e8191d1256 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_module.h @@ -0,0 +1,61 @@ +/* + * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: i_qdf_module.h + * Linux-specific definitions for QDF module API's + */ + +#ifndef _I_QDF_MODULE_H +#define _I_QDF_MODULE_H + +#include +#include +#include +#include + + +#define __qdf_virt_module_init(_x) \ + static int _x##_mod(void) \ + { \ + uint32_t st; \ + st = (_x)(); \ + if (st != QDF_STATUS_SUCCESS) \ + return QDF_STATUS_E_INVAL; \ + else \ + return 0; \ + } \ + module_init(_x##_mod); + +#define __qdf_virt_module_exit(_x) module_exit(_x) + +#define __qdf_virt_module_name(_name) MODULE_LICENSE("Dual BSD/GPL") + +#ifdef WLAN_DISABLE_EXPORT_SYMBOL +#define __qdf_export_symbol(_sym) +#else +#define __qdf_export_symbol(_sym) EXPORT_SYMBOL(_sym) +#endif + +#define __qdf_declare_param(_name, _type) \ + module_param(_name, _type, 0600) + +#define __qdf_declare_param_array(_name, _type, _num) \ + module_param_array(_name, _type, _num, 0600) + +#endif /* _I_QDF_MODULE_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_nbuf.h b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_nbuf.h new file mode 100644 index 0000000000000000000000000000000000000000..38bcfa4e4248515971418a875cd471ac5560a12a --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_nbuf.h @@ -0,0 +1,1981 @@ +/* + * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: i_qdf_nbuf.h + * This file provides OS dependent nbuf API's. + */ + +#ifndef _I_QDF_NBUF_H +#define _I_QDF_NBUF_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * Use socket buffer as the underlying implementation as skbuf . + * Linux use sk_buff to represent both packet and data, + * so we use sk_buffer to represent both skbuf . + */ +typedef struct sk_buff *__qdf_nbuf_t; + +#define QDF_NBUF_CB_TX_MAX_OS_FRAGS 1 + +/* QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS - + * max tx fragments added by the driver + * The driver will always add one tx fragment (the tx descriptor) + */ +#define QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS 2 +#define QDF_NBUF_CB_PACKET_TYPE_EAPOL 1 +#define QDF_NBUF_CB_PACKET_TYPE_ARP 2 +#define QDF_NBUF_CB_PACKET_TYPE_WAPI 3 +#define QDF_NBUF_CB_PACKET_TYPE_DHCP 4 +#define QDF_NBUF_CB_PACKET_TYPE_ICMP 5 +#define QDF_NBUF_CB_PACKET_TYPE_ICMPv6 6 + + +/* mark the first packet after wow wakeup */ +#define QDF_MARK_FIRST_WAKEUP_PACKET 0x80000000 + +/* + * Make sure that qdf_dma_addr_t in the cb block is always 64 bit aligned + */ +typedef union { + uint64_t u64; + qdf_dma_addr_t dma_addr; +} qdf_paddr_t; + +/** + * struct qdf_nbuf_cb - network buffer control block contents (skb->cb) + * - data passed between layers of the driver. + * + * Notes: + * 1. Hard limited to 48 bytes. Please count your bytes + * 2. The size of this structure has to be easily calculatable and + * consistently so: do not use any conditional compile flags + * 3. Split into a common part followed by a tx/rx overlay + * 4. There is only one extra frag, which represents the HTC/HTT header + * 5. "ext_cb_pt" must be the first member in both TX and RX unions + * for the priv_cb_w since it must be at same offset for both + * TX and RX union + * + * @paddr : physical addressed retrieved by dma_map of nbuf->data + * + * @rx.dev.priv_cb_w.ext_cb_ptr: extended cb pointer + * @rx.dev.priv_cb_w.fctx: ctx to handle special pkts defined by ftype + * @rx.dev.priv_cb_w.reserved1: reserved + * @rx.dev.priv_cb_w.reserved2: reserved + * + * @rx.dev.priv_cb_m.tcp_seq_num: TCP sequence number + * @rx.dev.priv_cb_m.tcp_ack_num: TCP ACK number + * @rx.dev.priv_cb_m.lro_ctx: LRO context + * @rx.dev.priv_cb_m.map_index: + * @rx.dev.priv_cb_m.reserved: reserved + * + * @rx.lro_eligible: flag to indicate whether the MSDU is LRO eligible + * @rx.peer_cached_buf_frm: peer cached buffer + * @rx.tcp_proto: L4 protocol is TCP + * @rx.tcp_pure_ack: A TCP ACK packet with no payload + * @rx.ipv6_proto: L3 protocol is IPV6 + * @rx.ip_offset: offset to IP header + * @rx.tcp_offset: offset to TCP header + * @rx_ctx_id: Rx context id + * + * @rx.tcp_udp_chksum: L4 payload checksum + * @rx.tcp_wim: TCP window size + * + * @rx.flow_id: 32bit flow id + * + * @rx.flag_chfrag_start: first MSDU in an AMSDU + * @rx.flag_chfrag_cont: middle or part of MSDU in an AMSDU + * @rx.flag_chfrag_end: last MSDU in an AMSDU + * @rx.packet_buff_pool: indicate packet from pre-allocated pool for Rx ring + * @rx.rsrvd: reserved + * + * @rx.trace: combined structure for DP and protocol trace + * @rx.trace.packet_stat: {NBUF_TX_PKT_[(HDD)|(TXRX_ENQUEUE)|(TXRX_DEQUEUE)| + * + (TXRX)|(HTT)|(HTC)|(HIF)|(CE)|(FREE)] + * @rx.trace.dp_trace: flag (Datapath trace) + * @rx.trace.packet_track: RX_DATA packet + * @rx.trace.rsrvd: enable packet logging + * + * @rx.ftype: mcast2ucast, TSO, SG, MESH + * @rx.reserved: reserved + * + * @tx.dev.priv_cb_w.fctx: ctx to handle special pkts defined by ftype + * @tx.dev.priv_cb_w.ext_cb_ptr: extended cb pointer + * + * @tx.dev.priv_cb_w.data_attr: value that is programmed in CE descr, includes + * + (1) CE classification enablement bit + * + (2) packet type (802.3 or Ethernet type II) + * + (3) packet offset (usually length of HTC/HTT descr) + * @tx.dev.priv_cb_m.ipa.owned: packet owned by IPA + * @tx.dev.priv_cb_m.ipa.priv: private data, used by IPA + * @tx.dev.priv_cb_m.desc_id: tx desc id, used to sync between host and fw + * @tx.dev.priv_cb_m.mgmt_desc_id: mgmt descriptor for tx completion cb + * @tx.dev.priv_cb_m.dma_option.bi_map: flag to do bi-direction dma map + * @tx.dev.priv_cb_m.dma_option.reserved: reserved bits for future use + * @tx.dev.priv_cb_m.reserved: reserved + * + * @tx.ftype: mcast2ucast, TSO, SG, MESH + * @tx.vdev_id: vdev (for protocol trace) + * @tx.len: length of efrag pointed by the above pointers + * + * @tx.flags.bits.flag_efrag: flag, efrag payload to be swapped (wordstream) + * @tx.flags.bits.num: number of extra frags ( 0 or 1) + * @tx.flags.bits.nbuf: flag, nbuf payload to be swapped (wordstream) + * @tx.flags.bits.flag_chfrag_start: first MSDU in an AMSDU + * @tx.flags.bits.flag_chfrag_cont: middle or part of MSDU in an AMSDU + * @tx.flags.bits.flag_chfrag_end: last MSDU in an AMSDU + * @tx.flags.bits.flag_ext_header: extended flags + * @tx.flags.bits.reserved: reserved + * @tx.trace: combined structure for DP and protocol trace + * @tx.trace.packet_stat: {NBUF_TX_PKT_[(HDD)|(TXRX_ENQUEUE)|(TXRX_DEQUEUE)| + * + (TXRX)|(HTT)|(HTC)|(HIF)|(CE)|(FREE)] + * @tx.trace.is_packet_priv: + * @tx.trace.packet_track: {NBUF_TX_PKT_[(DATA)|(MGMT)]_TRACK} + * @tx.trace.proto_type: bitmap of NBUF_PKT_TRAC_TYPE[(EAPOL)|(DHCP)| + * + (MGMT_ACTION)] - 4 bits + * @tx.trace.dp_trace: flag (Datapath trace) + * @tx.trace.is_bcast: flag (Broadcast packet) + * @tx.trace.is_mcast: flag (Multicast packet) + * @tx.trace.packet_type: flag (Packet type) + * @tx.trace.htt2_frm: flag (high-latency path only) + * @tx.trace.print: enable packet logging + * + * @tx.vaddr: virtual address of ~ + * @tx.paddr: physical/DMA address of ~ + */ +struct qdf_nbuf_cb { + /* common */ + qdf_paddr_t paddr; /* of skb->data */ + /* valid only in one direction */ + union { + /* Note: MAX: 40 bytes */ + struct { + union { + struct { + void *ext_cb_ptr; + void *fctx; + uint32_t reserved1; + uint32_t reserved2; + } priv_cb_w; + struct { + uint32_t tcp_seq_num; + uint32_t tcp_ack_num; + unsigned char *lro_ctx; + uint32_t map_index; + uint32_t reserved; + } priv_cb_m; + } dev; + uint32_t lro_eligible:1, + peer_cached_buf_frm:1, + tcp_proto:1, + tcp_pure_ack:1, + ipv6_proto:1, + ip_offset:7, + tcp_offset:7, + rx_ctx_id:4; + uint32_t tcp_udp_chksum:16, + tcp_win:16; + uint32_t flow_id; + uint8_t flag_chfrag_start:1, + flag_chfrag_cont:1, + flag_chfrag_end:1, + packet_buff_pool:1, + rsrvd:4; + union { + uint8_t packet_state; + uint8_t dp_trace:1, + packet_track:4, + rsrvd:3; + } trace; + uint8_t ftype; + uint8_t reserved; + } rx; + + /* Note: MAX: 40 bytes */ + struct { + union { + struct { + void *ext_cb_ptr; + void *fctx; + } priv_cb_w; + struct { + uint32_t data_attr; + struct { + uint32_t owned:1, + priv:31; + } ipa; + uint16_t desc_id; + uint16_t mgmt_desc_id; + struct { + uint8_t bi_map:1, + reserved:7; + } dma_option; + uint8_t reserved[3]; + } priv_cb_m; + } dev; + uint8_t ftype; + uint8_t vdev_id; + uint16_t len; + union { + struct { + uint8_t flag_efrag:1, + flag_nbuf:1, + num:1, + flag_chfrag_start:1, + flag_chfrag_cont:1, + flag_chfrag_end:1, + flag_ext_header:1, + flag_notify_comp:1; + } bits; + uint8_t u8; + } flags; + struct { + uint8_t packet_state:7, + is_packet_priv:1; + uint8_t packet_track:4, + proto_type:4; + uint8_t dp_trace:1, + is_bcast:1, + is_mcast:1, + packet_type:3, + /* used only for hl*/ + htt2_frm:1, + print:1; + } trace; + unsigned char *vaddr; + qdf_paddr_t paddr; + } tx; + } u; +}; /* struct qdf_nbuf_cb: MAX 48 bytes */ + +QDF_COMPILE_TIME_ASSERT(qdf_nbuf_cb_size, + (sizeof(struct qdf_nbuf_cb)) <= FIELD_SIZEOF(struct sk_buff, cb)); + +/** + * access macros to qdf_nbuf_cb + * Note: These macros can be used as L-values as well as R-values. + * When used as R-values, they effectively function as "get" macros + * When used as L_values, they effectively function as "set" macros + */ + +#define QDF_NBUF_CB_PADDR(skb) \ + (((struct qdf_nbuf_cb *)((skb)->cb))->paddr.dma_addr) + +#define QDF_NBUF_CB_RX_LRO_ELIGIBLE(skb) \ + (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.lro_eligible) +#define QDF_NBUF_CB_RX_PEER_CACHED_FRM(skb) \ + (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.peer_cached_buf_frm) +#define QDF_NBUF_CB_RX_TCP_PROTO(skb) \ + (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_proto) +#define QDF_NBUF_CB_RX_TCP_PURE_ACK(skb) \ + (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_pure_ack) +#define QDF_NBUF_CB_RX_IPV6_PROTO(skb) \ + (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.ipv6_proto) +#define QDF_NBUF_CB_RX_IP_OFFSET(skb) \ + (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.ip_offset) +#define QDF_NBUF_CB_RX_TCP_OFFSET(skb) \ + (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_offset) +#define QDF_NBUF_CB_RX_CTX_ID(skb) \ + (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.rx_ctx_id) + +#define QDF_NBUF_CB_RX_TCP_CHKSUM(skb) \ + (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_udp_chksum) +#define QDF_NBUF_CB_RX_TCP_WIN(skb) \ + (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_win) + +#define QDF_NBUF_CB_RX_FLOW_ID(skb) \ + (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.flow_id) + +#define QDF_NBUF_CB_RX_PACKET_STATE(skb)\ + (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.trace.packet_state) +#define QDF_NBUF_CB_RX_DP_TRACE(skb) \ + (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.trace.dp_trace) + +#define QDF_NBUF_CB_RX_FTYPE(skb) \ + (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.ftype) + +#define QDF_NBUF_CB_RX_CHFRAG_START(skb) \ + (((struct qdf_nbuf_cb *) \ + ((skb)->cb))->u.rx.flag_chfrag_start) +#define QDF_NBUF_CB_RX_CHFRAG_CONT(skb) \ + (((struct qdf_nbuf_cb *) \ + ((skb)->cb))->u.rx.flag_chfrag_cont) +#define QDF_NBUF_CB_RX_CHFRAG_END(skb) \ + (((struct qdf_nbuf_cb *) \ + ((skb)->cb))->u.rx.flag_chfrag_end) +#define QDF_NBUF_CB_RX_PACKET_BUFF_POOL(skb) \ + (((struct qdf_nbuf_cb *) \ + ((skb)->cb))->u.rx.packet_buff_pool) + +#define QDF_NBUF_UPDATE_TX_PKT_COUNT(skb, PACKET_STATE) \ + qdf_nbuf_set_state(skb, PACKET_STATE) + +#define QDF_NBUF_CB_TX_DATA_ATTR(skb) \ + (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.dev.priv_cb_m.data_attr) + +#define QDF_NBUF_CB_TX_FTYPE(skb) \ + (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.ftype) + + +#define QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb) \ + (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.len) +#define QDF_NBUF_CB_TX_VDEV_CTX(skb) \ + (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.vdev_id) + +/* Tx Flags Accessor Macros*/ +#define QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) \ + (((struct qdf_nbuf_cb *) \ + ((skb)->cb))->u.tx.flags.bits.flag_efrag) +#define QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) \ + (((struct qdf_nbuf_cb *) \ + ((skb)->cb))->u.tx.flags.bits.flag_nbuf) +#define QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) \ + (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.flags.bits.num) +#define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_NOTIFY_COMP(skb) \ + (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.flags.bits.flag_notify_comp) +#define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_START(skb) \ + (((struct qdf_nbuf_cb *) \ + ((skb)->cb))->u.tx.flags.bits.flag_chfrag_start) +#define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_CONT(skb) \ + (((struct qdf_nbuf_cb *) \ + ((skb)->cb))->u.tx.flags.bits.flag_chfrag_cont) +#define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_END(skb) \ + (((struct qdf_nbuf_cb *) \ + ((skb)->cb))->u.tx.flags.bits.flag_chfrag_end) +#define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_EXT_HEADER(skb) \ + (((struct qdf_nbuf_cb *) \ + ((skb)->cb))->u.tx.flags.bits.flag_ext_header) +#define QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_FLAGS(skb) \ + (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.flags.u8) +/* End of Tx Flags Accessor Macros */ + +/* Tx trace accessor macros */ +#define QDF_NBUF_CB_TX_PACKET_STATE(skb)\ + (((struct qdf_nbuf_cb *) \ + ((skb)->cb))->u.tx.trace.packet_state) + +#define QDF_NBUF_CB_TX_IS_PACKET_PRIV(skb) \ + (((struct qdf_nbuf_cb *) \ + ((skb)->cb))->u.tx.trace.is_packet_priv) + +#define QDF_NBUF_CB_TX_PACKET_TRACK(skb)\ + (((struct qdf_nbuf_cb *) \ + ((skb)->cb))->u.tx.trace.packet_track) + +#define QDF_NBUF_CB_RX_PACKET_TRACK(skb)\ + (((struct qdf_nbuf_cb *) \ + ((skb)->cb))->u.rx.trace.packet_track) + +#define QDF_NBUF_CB_TX_PROTO_TYPE(skb)\ + (((struct qdf_nbuf_cb *) \ + ((skb)->cb))->u.tx.trace.proto_type) + +#define QDF_NBUF_CB_TX_DP_TRACE(skb)\ + (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.dp_trace) + +#define QDF_NBUF_CB_DP_TRACE_PRINT(skb) \ + (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.print) + +#define QDF_NBUF_CB_TX_HL_HTT2_FRM(skb) \ + (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.htt2_frm) + +#define QDF_NBUF_CB_GET_IS_BCAST(skb)\ + (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.is_bcast) + +#define QDF_NBUF_CB_GET_IS_MCAST(skb)\ + (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.is_mcast) + +#define QDF_NBUF_CB_GET_PACKET_TYPE(skb)\ + (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.packet_type) + +#define QDF_NBUF_CB_SET_BCAST(skb) \ + (((struct qdf_nbuf_cb *) \ + ((skb)->cb))->u.tx.trace.is_bcast = true) + +#define QDF_NBUF_CB_SET_MCAST(skb) \ + (((struct qdf_nbuf_cb *) \ + ((skb)->cb))->u.tx.trace.is_mcast = true) +/* End of Tx trace accessor macros */ + + +#define QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb) \ + (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.vaddr) +#define QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb) \ + (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.paddr.dma_addr) + +/* assume the OS provides a single fragment */ +#define __qdf_nbuf_get_num_frags(skb) \ + (QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) + 1) + +#define __qdf_nbuf_reset_num_frags(skb) \ + (QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) = 0) + +/** + * end of nbuf->cb access macros + */ + +typedef void (*qdf_nbuf_trace_update_t)(char *); +typedef void (*qdf_nbuf_free_t)(__qdf_nbuf_t); + +#define __qdf_nbuf_mapped_paddr_get(skb) QDF_NBUF_CB_PADDR(skb) + +#define __qdf_nbuf_mapped_paddr_set(skb, paddr) \ + (QDF_NBUF_CB_PADDR(skb) = paddr) + +#define __qdf_nbuf_frag_push_head( \ + skb, frag_len, frag_vaddr, frag_paddr) \ + do { \ + QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) = 1; \ + QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb) = frag_vaddr; \ + QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb) = frag_paddr; \ + QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb) = frag_len; \ + } while (0) + +#define __qdf_nbuf_get_frag_vaddr(skb, frag_num) \ + ((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb)) ? \ + QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb) : ((skb)->data)) + +#define __qdf_nbuf_get_frag_vaddr_always(skb) \ + QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb) + +#define __qdf_nbuf_get_frag_paddr(skb, frag_num) \ + ((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb)) ? \ + QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb) : \ + /* assume that the OS only provides a single fragment */ \ + QDF_NBUF_CB_PADDR(skb)) + +#define __qdf_nbuf_get_tx_frag_paddr(skb) QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb) + +#define __qdf_nbuf_get_frag_len(skb, frag_num) \ + ((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb)) ? \ + QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb) : (skb)->len) + +#define __qdf_nbuf_get_frag_is_wordstream(skb, frag_num) \ + ((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb)) \ + ? (QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb)) \ + : (QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb))) + +#define __qdf_nbuf_set_frag_is_wordstream(skb, frag_num, is_wstrm) \ + do { \ + if (frag_num >= QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb)) \ + frag_num = QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS; \ + if (frag_num) \ + QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) = \ + is_wstrm; \ + else \ + QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) = \ + is_wstrm; \ + } while (0) + +#define __qdf_nbuf_set_vdev_ctx(skb, vdev_id) \ + do { \ + QDF_NBUF_CB_TX_VDEV_CTX((skb)) = (vdev_id); \ + } while (0) + +#define __qdf_nbuf_get_vdev_ctx(skb) \ + QDF_NBUF_CB_TX_VDEV_CTX((skb)) + +#define __qdf_nbuf_set_tx_ftype(skb, type) \ + do { \ + QDF_NBUF_CB_TX_FTYPE((skb)) = (type); \ + } while (0) + +#define __qdf_nbuf_get_tx_ftype(skb) \ + QDF_NBUF_CB_TX_FTYPE((skb)) + + +#define __qdf_nbuf_set_rx_ftype(skb, type) \ + do { \ + QDF_NBUF_CB_RX_FTYPE((skb)) = (type); \ + } while (0) + +#define __qdf_nbuf_get_rx_ftype(skb) \ + QDF_NBUF_CB_RX_FTYPE((skb)) + +#define __qdf_nbuf_set_rx_chfrag_start(skb, val) \ + ((QDF_NBUF_CB_RX_CHFRAG_START((skb))) = val) + +#define __qdf_nbuf_is_rx_chfrag_start(skb) \ + (QDF_NBUF_CB_RX_CHFRAG_START((skb))) + +#define __qdf_nbuf_set_rx_chfrag_cont(skb, val) \ + do { \ + (QDF_NBUF_CB_RX_CHFRAG_CONT((skb))) = val; \ + } while (0) + +#define __qdf_nbuf_is_rx_chfrag_cont(skb) \ + (QDF_NBUF_CB_RX_CHFRAG_CONT((skb))) + +#define __qdf_nbuf_set_rx_chfrag_end(skb, val) \ + ((QDF_NBUF_CB_RX_CHFRAG_END((skb))) = val) + +#define __qdf_nbuf_is_rx_chfrag_end(skb) \ + (QDF_NBUF_CB_RX_CHFRAG_END((skb))) + + +#define __qdf_nbuf_set_tx_chfrag_start(skb, val) \ + ((QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_START((skb))) = val) + +#define __qdf_nbuf_is_tx_chfrag_start(skb) \ + (QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_START((skb))) + +#define __qdf_nbuf_set_tx_chfrag_cont(skb, val) \ + do { \ + (QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_CONT((skb))) = val; \ + } while (0) + +#define __qdf_nbuf_is_tx_chfrag_cont(skb) \ + (QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_CONT((skb))) + +#define __qdf_nbuf_set_tx_chfrag_end(skb, val) \ + ((QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_END((skb))) = val) + +#define __qdf_nbuf_is_tx_chfrag_end(skb) \ + (QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_END((skb))) + +#define __qdf_nbuf_trace_set_proto_type(skb, proto_type) \ + (QDF_NBUF_CB_TX_PROTO_TYPE(skb) = (proto_type)) + +#define __qdf_nbuf_trace_get_proto_type(skb) \ + QDF_NBUF_CB_TX_PROTO_TYPE(skb) + +#define __qdf_nbuf_data_attr_get(skb) \ + QDF_NBUF_CB_TX_DATA_ATTR(skb) +#define __qdf_nbuf_data_attr_set(skb, data_attr) \ + (QDF_NBUF_CB_TX_DATA_ATTR(skb) = (data_attr)) + +/** + * __qdf_nbuf_num_frags_init() - init extra frags + * @skb: sk buffer + * + * Return: none + */ +static inline +void __qdf_nbuf_num_frags_init(struct sk_buff *skb) +{ + QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) = 0; +} + +typedef enum { + CB_FTYPE_INVALID = 0, + CB_FTYPE_MCAST2UCAST = 1, + CB_FTYPE_TSO = 2, + CB_FTYPE_TSO_SG = 3, + CB_FTYPE_SG = 4, + CB_FTYPE_INTRABSS_FWD = 5, + CB_FTYPE_RX_INFO = 6, + CB_FTYPE_MESH_RX_INFO = 7, + CB_FTYPE_MESH_TX_INFO = 8, +} CB_FTYPE; + +/* + * prototypes. Implemented in qdf_nbuf.c + */ + +/** + * __qdf_nbuf_alloc() - Allocate nbuf + * @osdev: Device handle + * @size: Netbuf requested size + * @reserve: headroom to start with + * @align: Align + * @prio: Priority + * @func: Function name of the call site + * @line: line number of the call site + * + * This allocates an nbuf aligns if needed and reserves some space in the front, + * since the reserve is done after alignment the reserve value if being + * unaligned will result in an unaligned address. + * + * Return: nbuf or %NULL if no memory + */ +__qdf_nbuf_t +__qdf_nbuf_alloc(__qdf_device_t osdev, size_t size, int reserve, int align, + int prio, const char *func, uint32_t line); + +void __qdf_nbuf_free(struct sk_buff *skb); +QDF_STATUS __qdf_nbuf_map(__qdf_device_t osdev, + struct sk_buff *skb, qdf_dma_dir_t dir); +void __qdf_nbuf_unmap(__qdf_device_t osdev, + struct sk_buff *skb, qdf_dma_dir_t dir); +QDF_STATUS __qdf_nbuf_map_single(__qdf_device_t osdev, + struct sk_buff *skb, qdf_dma_dir_t dir); +void __qdf_nbuf_unmap_single(__qdf_device_t osdev, + struct sk_buff *skb, qdf_dma_dir_t dir); +void __qdf_nbuf_reg_trace_cb(qdf_nbuf_trace_update_t cb_func_ptr); +void __qdf_nbuf_reg_free_cb(qdf_nbuf_free_t cb_func_ptr); + +QDF_STATUS __qdf_nbuf_dmamap_create(qdf_device_t osdev, __qdf_dma_map_t *dmap); +void __qdf_nbuf_dmamap_destroy(qdf_device_t osdev, __qdf_dma_map_t dmap); +void __qdf_nbuf_dmamap_set_cb(__qdf_dma_map_t dmap, void *cb, void *arg); +QDF_STATUS __qdf_nbuf_map_nbytes(qdf_device_t osdev, struct sk_buff *skb, + qdf_dma_dir_t dir, int nbytes); +void __qdf_nbuf_unmap_nbytes(qdf_device_t osdev, struct sk_buff *skb, + qdf_dma_dir_t dir, int nbytes); + +void __qdf_nbuf_sync_for_cpu(qdf_device_t osdev, struct sk_buff *skb, + qdf_dma_dir_t dir); + +QDF_STATUS __qdf_nbuf_map_nbytes_single( + qdf_device_t osdev, struct sk_buff *buf, qdf_dma_dir_t dir, int nbytes); +void __qdf_nbuf_unmap_nbytes_single( + qdf_device_t osdev, struct sk_buff *buf, qdf_dma_dir_t dir, int nbytes); +void __qdf_nbuf_dma_map_info(__qdf_dma_map_t bmap, qdf_dmamap_info_t *sg); +uint32_t __qdf_nbuf_get_frag_size(__qdf_nbuf_t nbuf, uint32_t cur_frag); +void __qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t *sg); +QDF_STATUS __qdf_nbuf_frag_map( + qdf_device_t osdev, __qdf_nbuf_t nbuf, + int offset, qdf_dma_dir_t dir, int cur_frag); +void qdf_nbuf_classify_pkt(struct sk_buff *skb); + +bool __qdf_nbuf_is_ipv4_wapi_pkt(struct sk_buff *skb); +bool __qdf_nbuf_is_ipv4_tdls_pkt(struct sk_buff *skb); +bool __qdf_nbuf_data_is_ipv4_pkt(uint8_t *data); +bool __qdf_nbuf_data_is_ipv6_pkt(uint8_t *data); +bool __qdf_nbuf_data_is_ipv4_mcast_pkt(uint8_t *data); +bool __qdf_nbuf_data_is_ipv6_mcast_pkt(uint8_t *data); +bool __qdf_nbuf_data_is_icmp_pkt(uint8_t *data); +bool __qdf_nbuf_data_is_icmpv6_pkt(uint8_t *data); +bool __qdf_nbuf_data_is_ipv4_udp_pkt(uint8_t *data); +bool __qdf_nbuf_data_is_ipv4_tcp_pkt(uint8_t *data); +bool __qdf_nbuf_data_is_ipv6_udp_pkt(uint8_t *data); +bool __qdf_nbuf_data_is_ipv6_tcp_pkt(uint8_t *data); +bool __qdf_nbuf_data_is_ipv4_dhcp_pkt(uint8_t *data); +bool __qdf_nbuf_data_is_ipv6_dhcp_pkt(uint8_t *data); +bool __qdf_nbuf_data_is_ipv4_eapol_pkt(uint8_t *data); +bool __qdf_nbuf_data_is_ipv4_arp_pkt(uint8_t *data); +bool __qdf_nbuf_is_bcast_pkt(__qdf_nbuf_t nbuf); +bool __qdf_nbuf_data_is_arp_req(uint8_t *data); +bool __qdf_nbuf_data_is_arp_rsp(uint8_t *data); +uint32_t __qdf_nbuf_get_arp_src_ip(uint8_t *data); +uint32_t __qdf_nbuf_get_arp_tgt_ip(uint8_t *data); +uint8_t *__qdf_nbuf_get_dns_domain_name(uint8_t *data, uint32_t len); +bool __qdf_nbuf_data_is_dns_query(uint8_t *data); +bool __qdf_nbuf_data_is_dns_response(uint8_t *data); +bool __qdf_nbuf_data_is_tcp_syn(uint8_t *data); +bool __qdf_nbuf_data_is_tcp_syn_ack(uint8_t *data); +bool __qdf_nbuf_data_is_tcp_ack(uint8_t *data); +uint16_t __qdf_nbuf_data_get_tcp_src_port(uint8_t *data); +uint16_t __qdf_nbuf_data_get_tcp_dst_port(uint8_t *data); +bool __qdf_nbuf_data_is_icmpv4_req(uint8_t *data); +bool __qdf_nbuf_data_is_icmpv4_rsp(uint8_t *data); +uint32_t __qdf_nbuf_get_icmpv4_src_ip(uint8_t *data); +uint32_t __qdf_nbuf_get_icmpv4_tgt_ip(uint8_t *data); +enum qdf_proto_subtype __qdf_nbuf_data_get_dhcp_subtype(uint8_t *data); +enum qdf_proto_subtype __qdf_nbuf_data_get_eapol_subtype(uint8_t *data); +enum qdf_proto_subtype __qdf_nbuf_data_get_arp_subtype(uint8_t *data); +enum qdf_proto_subtype __qdf_nbuf_data_get_icmp_subtype(uint8_t *data); +enum qdf_proto_subtype __qdf_nbuf_data_get_icmpv6_subtype(uint8_t *data); +uint8_t __qdf_nbuf_data_get_ipv4_proto(uint8_t *data); +uint8_t __qdf_nbuf_data_get_ipv6_proto(uint8_t *data); + +#ifdef QDF_NBUF_GLOBAL_COUNT +int __qdf_nbuf_count_get(void); +void __qdf_nbuf_count_inc(struct sk_buff *skb); +void __qdf_nbuf_count_dec(struct sk_buff *skb); +void __qdf_nbuf_mod_init(void); +void __qdf_nbuf_mod_exit(void); + +#else + +static inline int __qdf_nbuf_count_get(void) +{ + return 0; +} + +static inline void __qdf_nbuf_count_inc(struct sk_buff *skb) +{ + return; +} + +static inline void __qdf_nbuf_count_dec(struct sk_buff *skb) +{ + return; +} + +static inline void __qdf_nbuf_mod_init(void) +{ + return; +} + +static inline void __qdf_nbuf_mod_exit(void) +{ + return; +} +#endif + +/** + * __qdf_to_status() - OS to QDF status conversion + * @error : OS error + * + * Return: QDF status + */ +static inline QDF_STATUS __qdf_to_status(signed int error) +{ + switch (error) { + case 0: + return QDF_STATUS_SUCCESS; + case ENOMEM: + case -ENOMEM: + return QDF_STATUS_E_NOMEM; + default: + return QDF_STATUS_E_NOSUPPORT; + } +} + +/** + * __qdf_nbuf_len() - return the amount of valid data in the skb + * @skb: Pointer to network buffer + * + * This API returns the amount of valid data in the skb, If there are frags + * then it returns total length. + * + * Return: network buffer length + */ +static inline size_t __qdf_nbuf_len(struct sk_buff *skb) +{ + int i, extra_frag_len = 0; + + i = QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb); + if (i > 0) + extra_frag_len = QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb); + + return extra_frag_len + skb->len; +} + +/** + * __qdf_nbuf_cat() - link two nbufs + * @dst: Buffer to piggyback into + * @src: Buffer to put + * + * Concat two nbufs, the new buf(src) is piggybacked into the older one. + * It is callers responsibility to free the src skb. + * + * Return: QDF_STATUS (status of the call) if failed the src skb + * is released + */ +static inline QDF_STATUS +__qdf_nbuf_cat(struct sk_buff *dst, struct sk_buff *src) +{ + QDF_STATUS error = 0; + + qdf_assert(dst && src); + + /* + * Since pskb_expand_head unconditionally reallocates the skb->head + * buffer, first check whether the current buffer is already large + * enough. + */ + if (skb_tailroom(dst) < src->len) { + error = pskb_expand_head(dst, 0, src->len, GFP_ATOMIC); + if (error) + return __qdf_to_status(error); + } + + memcpy(skb_tail_pointer(dst), src->data, src->len); + skb_put(dst, src->len); + return __qdf_to_status(error); +} + +/* + * nbuf manipulation routines + */ +/** + * __qdf_nbuf_headroom() - return the amount of tail space available + * @buf: Pointer to network buffer + * + * Return: amount of tail room + */ +static inline int __qdf_nbuf_headroom(struct sk_buff *skb) +{ + return skb_headroom(skb); +} + +/** + * __qdf_nbuf_tailroom() - return the amount of tail space available + * @buf: Pointer to network buffer + * + * Return: amount of tail room + */ +static inline uint32_t __qdf_nbuf_tailroom(struct sk_buff *skb) +{ + return skb_tailroom(skb); +} + +/** + * __qdf_nbuf_put_tail() - Puts data in the end + * @skb: Pointer to network buffer + * @size: size to be pushed + * + * Return: data pointer of this buf where new data has to be + * put, or NULL if there is not enough room in this buf. + */ +static inline uint8_t *__qdf_nbuf_put_tail(struct sk_buff *skb, size_t size) +{ + if (skb_tailroom(skb) < size) { + if (unlikely(pskb_expand_head(skb, 0, + size - skb_tailroom(skb), GFP_ATOMIC))) { + dev_kfree_skb_any(skb); + return NULL; + } + } + return skb_put(skb, size); +} + +/** + * __qdf_nbuf_trim_tail() - trim data out from the end + * @skb: Pointer to network buffer + * @size: size to be popped + * + * Return: none + */ +static inline void __qdf_nbuf_trim_tail(struct sk_buff *skb, size_t size) +{ + return skb_trim(skb, skb->len - size); +} + + +/* + * prototypes. Implemented in qdf_nbuf.c + */ +qdf_nbuf_tx_cksum_t __qdf_nbuf_get_tx_cksum(struct sk_buff *skb); +QDF_STATUS __qdf_nbuf_set_rx_cksum(struct sk_buff *skb, + qdf_nbuf_rx_cksum_t *cksum); +uint8_t __qdf_nbuf_get_tid(struct sk_buff *skb); +void __qdf_nbuf_set_tid(struct sk_buff *skb, uint8_t tid); +uint8_t __qdf_nbuf_get_exemption_type(struct sk_buff *skb); +void __qdf_nbuf_ref(struct sk_buff *skb); +int __qdf_nbuf_shared(struct sk_buff *skb); + +/* + * qdf_nbuf_pool_delete() implementation - do nothing in linux + */ +#define __qdf_nbuf_pool_delete(osdev) + +/** + * __qdf_nbuf_clone() - clone the nbuf (copy is readonly) + * @skb: Pointer to network buffer + * + * if GFP_ATOMIC is overkill then we can check whether its + * called from interrupt context and then do it or else in + * normal case use GFP_KERNEL + * + * example use "in_irq() || irqs_disabled()" + * + * Return: cloned skb + */ +static inline struct sk_buff *__qdf_nbuf_clone(struct sk_buff *skb) +{ + struct sk_buff *skb_new = NULL; + + skb_new = skb_clone(skb, GFP_ATOMIC); + if (skb_new) + __qdf_nbuf_count_inc(skb_new); + + return skb_new; +} + +/** + * __qdf_nbuf_copy() - returns a private copy of the skb + * @skb: Pointer to network buffer + * + * This API returns a private copy of the skb, the skb returned is completely + * modifiable by callers + * + * Return: skb or NULL + */ +static inline struct sk_buff *__qdf_nbuf_copy(struct sk_buff *skb) +{ + struct sk_buff *skb_new = NULL; + + skb_new = skb_copy(skb, GFP_ATOMIC); + if (skb_new) + __qdf_nbuf_count_inc(skb_new); + + return skb_new; +} + +#define __qdf_nbuf_reserve skb_reserve + +/** + * __qdf_nbuf_reset() - reset the buffer data and pointer + * @buf: Network buf instance + * @reserve: reserve + * @align: align + * + * Return: none + */ +static inline void +__qdf_nbuf_reset(struct sk_buff *skb, int reserve, int align) +{ + int offset; + + skb_push(skb, skb_headroom(skb)); + skb_put(skb, skb_tailroom(skb)); + memset(skb->data, 0x0, skb->len); + skb_trim(skb, 0); + skb_reserve(skb, NET_SKB_PAD); + memset(skb->cb, 0x0, sizeof(skb->cb)); + + /* + * The default is for netbuf fragments to be interpreted + * as wordstreams rather than bytestreams. + */ + QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) = 1; + QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) = 1; + + /* + * Align & make sure that the tail & data are adjusted properly + */ + + if (align) { + offset = ((unsigned long)skb->data) % align; + if (offset) + skb_reserve(skb, align - offset); + } + + skb_reserve(skb, reserve); +} + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)) +/** + * qdf_nbuf_dev_scratch_is_supported() - dev_scratch support for network buffer + * in kernel + * + * Return: true if dev_scratch is supported + * false if dev_scratch is not supported + */ +static inline bool __qdf_nbuf_is_dev_scratch_supported(void) +{ + return true; +} + +/** + * qdf_nbuf_get_dev_scratch() - get dev_scratch of network buffer + * @skb: Pointer to network buffer + * + * Return: dev_scratch if dev_scratch supported + * 0 if dev_scratch not supported + */ +static inline unsigned long __qdf_nbuf_get_dev_scratch(struct sk_buff *skb) +{ + return skb->dev_scratch; +} + +/** + * qdf_nbuf_set_dev_scratch() - set dev_scratch of network buffer + * @skb: Pointer to network buffer + * @value: value to be set in dev_scratch of network buffer + * + * Return: void + */ +static inline void +__qdf_nbuf_set_dev_scratch(struct sk_buff *skb, unsigned long value) +{ + skb->dev_scratch = value; +} +#else +static inline bool __qdf_nbuf_is_dev_scratch_supported(void) +{ + return false; +} + +static inline unsigned long __qdf_nbuf_get_dev_scratch(struct sk_buff *skb) +{ + return 0; +} + +static inline void +__qdf_nbuf_set_dev_scratch(struct sk_buff *skb, unsigned long value) +{ +} +#endif /* KERNEL_VERSION(4, 14, 0) */ + +/** + * __qdf_nbuf_head() - return the pointer the skb's head pointer + * @skb: Pointer to network buffer + * + * Return: Pointer to head buffer + */ +static inline uint8_t *__qdf_nbuf_head(struct sk_buff *skb) +{ + return skb->head; +} + +/** + * __qdf_nbuf_data() - return the pointer to data header in the skb + * @skb: Pointer to network buffer + * + * Return: Pointer to skb data + */ +static inline uint8_t *__qdf_nbuf_data(struct sk_buff *skb) +{ + return skb->data; +} + +static inline uint8_t *__qdf_nbuf_data_addr(struct sk_buff *skb) +{ + return (uint8_t *)&skb->data; +} + +/** + * __qdf_nbuf_get_protocol() - return the protocol value of the skb + * @skb: Pointer to network buffer + * + * Return: skb protocol + */ +static inline uint16_t __qdf_nbuf_get_protocol(struct sk_buff *skb) +{ + return skb->protocol; +} + +/** + * __qdf_nbuf_get_ip_summed() - return the ip checksum value of the skb + * @skb: Pointer to network buffer + * + * Return: skb ip_summed + */ +static inline uint8_t __qdf_nbuf_get_ip_summed(struct sk_buff *skb) +{ + return skb->ip_summed; +} + +/** + * __qdf_nbuf_set_ip_summed() - sets the ip_summed value of the skb + * @skb: Pointer to network buffer + * @ip_summed: ip checksum + * + * Return: none + */ +static inline void __qdf_nbuf_set_ip_summed(struct sk_buff *skb, + uint8_t ip_summed) +{ + skb->ip_summed = ip_summed; +} + +/** + * __qdf_nbuf_get_priority() - return the priority value of the skb + * @skb: Pointer to network buffer + * + * Return: skb priority + */ +static inline uint32_t __qdf_nbuf_get_priority(struct sk_buff *skb) +{ + return skb->priority; +} + +/** + * __qdf_nbuf_set_priority() - sets the priority value of the skb + * @skb: Pointer to network buffer + * @p: priority + * + * Return: none + */ +static inline void __qdf_nbuf_set_priority(struct sk_buff *skb, uint32_t p) +{ + skb->priority = p; +} + +/** + * __qdf_nbuf_set_next() - sets the next skb pointer of the current skb + * @skb: Current skb + * @next_skb: Next skb + * + * Return: void + */ +static inline void +__qdf_nbuf_set_next(struct sk_buff *skb, struct sk_buff *skb_next) +{ + skb->next = skb_next; +} + +/** + * __qdf_nbuf_next() - return the next skb pointer of the current skb + * @skb: Current skb + * + * Return: the next skb pointed to by the current skb + */ +static inline struct sk_buff *__qdf_nbuf_next(struct sk_buff *skb) +{ + return skb->next; +} + +/** + * __qdf_nbuf_set_next_ext() - sets the next skb pointer of the current skb + * @skb: Current skb + * @next_skb: Next skb + * + * This fn is used to link up extensions to the head skb. Does not handle + * linking to the head + * + * Return: none + */ +static inline void +__qdf_nbuf_set_next_ext(struct sk_buff *skb, struct sk_buff *skb_next) +{ + skb->next = skb_next; +} + +/** + * __qdf_nbuf_next_ext() - return the next skb pointer of the current skb + * @skb: Current skb + * + * Return: the next skb pointed to by the current skb + */ +static inline struct sk_buff *__qdf_nbuf_next_ext(struct sk_buff *skb) +{ + return skb->next; +} + +/** + * __qdf_nbuf_append_ext_list() - link list of packet extensions to the head + * @skb_head: head_buf nbuf holding head segment (single) + * @ext_list: nbuf list holding linked extensions to the head + * @ext_len: Total length of all buffers in the extension list + * + * This function is used to link up a list of packet extensions (seg1, 2,* ...) + * to the nbuf holding the head segment (seg0) + * + * Return: none + */ +static inline void +__qdf_nbuf_append_ext_list(struct sk_buff *skb_head, + struct sk_buff *ext_list, size_t ext_len) +{ + skb_shinfo(skb_head)->frag_list = ext_list; + skb_head->data_len = ext_len; + skb_head->len += skb_head->data_len; +} + +/** + * __qdf_nbuf_get_ext_list() - Get the link to extended nbuf list. + * @head_buf: Network buf holding head segment (single) + * + * This ext_list is populated when we have Jumbo packet, for example in case of + * monitor mode amsdu packet reception, and are stiched using frags_list. + * + * Return: Network buf list holding linked extensions from head buf. + */ +static inline struct sk_buff *__qdf_nbuf_get_ext_list(struct sk_buff *head_buf) +{ + return (skb_shinfo(head_buf)->frag_list); +} + +/** + * __qdf_nbuf_get_age() - return the checksum value of the skb + * @skb: Pointer to network buffer + * + * Return: checksum value + */ +static inline uint32_t __qdf_nbuf_get_age(struct sk_buff *skb) +{ + return skb->csum; +} + +/** + * __qdf_nbuf_set_age() - sets the checksum value of the skb + * @skb: Pointer to network buffer + * @v: Value + * + * Return: none + */ +static inline void __qdf_nbuf_set_age(struct sk_buff *skb, uint32_t v) +{ + skb->csum = v; +} + +/** + * __qdf_nbuf_adj_age() - adjusts the checksum/age value of the skb + * @skb: Pointer to network buffer + * @adj: Adjustment value + * + * Return: none + */ +static inline void __qdf_nbuf_adj_age(struct sk_buff *skb, uint32_t adj) +{ + skb->csum -= adj; +} + +/** + * __qdf_nbuf_copy_bits() - return the length of the copy bits for skb + * @skb: Pointer to network buffer + * @offset: Offset value + * @len: Length + * @to: Destination pointer + * + * Return: length of the copy bits for skb + */ +static inline int32_t +__qdf_nbuf_copy_bits(struct sk_buff *skb, int32_t offset, int32_t len, void *to) +{ + return skb_copy_bits(skb, offset, to, len); +} + +/** + * __qdf_nbuf_set_pktlen() - sets the length of the skb and adjust the tail + * @skb: Pointer to network buffer + * @len: Packet length + * + * Return: none + */ +static inline void __qdf_nbuf_set_pktlen(struct sk_buff *skb, uint32_t len) +{ + if (skb->len > len) { + skb_trim(skb, len); + } else { + if (skb_tailroom(skb) < len - skb->len) { + if (unlikely(pskb_expand_head(skb, 0, + len - skb->len - skb_tailroom(skb), + GFP_ATOMIC))) { + dev_kfree_skb_any(skb); + qdf_assert(0); + } + } + skb_put(skb, (len - skb->len)); + } +} + +/** + * __qdf_nbuf_set_protocol() - sets the protocol value of the skb + * @skb: Pointer to network buffer + * @protocol: Protocol type + * + * Return: none + */ +static inline void +__qdf_nbuf_set_protocol(struct sk_buff *skb, uint16_t protocol) +{ + skb->protocol = protocol; +} + +#define __qdf_nbuf_set_tx_htt2_frm(skb, candi) \ + (QDF_NBUF_CB_TX_HL_HTT2_FRM(skb) = (candi)) + +#define __qdf_nbuf_get_tx_htt2_frm(skb) \ + QDF_NBUF_CB_TX_HL_HTT2_FRM(skb) + +void __qdf_dmaaddr_to_32s(qdf_dma_addr_t dmaaddr, + uint32_t *lo, uint32_t *hi); + +uint32_t __qdf_nbuf_get_tso_info(qdf_device_t osdev, struct sk_buff *skb, + struct qdf_tso_info_t *tso_info); + +void __qdf_nbuf_unmap_tso_segment(qdf_device_t osdev, + struct qdf_tso_seg_elem_t *tso_seg, + bool is_last_seg); + +#ifdef FEATURE_TSO +uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb); + +#else +static inline uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb) +{ + return 0; +} + +#endif /* FEATURE_TSO */ + +static inline bool __qdf_nbuf_is_tso(struct sk_buff *skb) +{ + if (skb_is_gso(skb) && + (skb_is_gso_v6(skb) || + (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4))) + return true; + else + return false; +} + +struct sk_buff *__qdf_nbuf_inc_users(struct sk_buff *skb); + +int __qdf_nbuf_get_users(struct sk_buff *skb); + +/** + * __qdf_nbuf_tx_info_get() - Modify pkt_type, set pkt_subtype, + * and get hw_classify by peeking + * into packet + * @nbuf: Network buffer (skb on Linux) + * @pkt_type: Pkt type (from enum htt_pkt_type) + * @pkt_subtype: Bit 4 of this field in HTT descriptor + * needs to be set in case of CE classification support + * Is set by this macro. + * @hw_classify: This is a flag which is set to indicate + * CE classification is enabled. + * Do not set this bit for VLAN packets + * OR for mcast / bcast frames. + * + * This macro parses the payload to figure out relevant Tx meta-data e.g. + * whether to enable tx_classify bit in CE. + * + * Overrides pkt_type only if required for 802.3 frames (original ethernet) + * If protocol is less than ETH_P_802_3_MIN (0x600), then + * it is the length and a 802.3 frame else it is Ethernet Type II + * (RFC 894). + * Bit 4 in pkt_subtype is the tx_classify bit + * + * Return: void + */ +#define __qdf_nbuf_tx_info_get(skb, pkt_type, \ + pkt_subtype, hw_classify) \ +do { \ + struct ethhdr *eh = (struct ethhdr *)skb->data; \ + uint16_t ether_type = ntohs(eh->h_proto); \ + bool is_mc_bc; \ + \ + is_mc_bc = is_broadcast_ether_addr((uint8_t *)eh) || \ + is_multicast_ether_addr((uint8_t *)eh); \ + \ + if (likely((ether_type != ETH_P_8021Q) && !is_mc_bc)) { \ + hw_classify = 1; \ + pkt_subtype = 0x01 << \ + HTT_TX_CLASSIFY_BIT_S; \ + } \ + \ + if (unlikely(ether_type < ETH_P_802_3_MIN)) \ + pkt_type = htt_pkt_type_ethernet; \ + \ +} while (0) + +/** + * nbuf private buffer routines + */ + +/** + * __qdf_nbuf_peek_header() - return the header's addr & m_len + * @skb: Pointer to network buffer + * @addr: Pointer to store header's addr + * @m_len: network buffer length + * + * Return: none + */ +static inline void +__qdf_nbuf_peek_header(struct sk_buff *skb, uint8_t **addr, uint32_t *len) +{ + *addr = skb->data; + *len = skb->len; +} + +/** + * typedef struct __qdf_nbuf_queue_t - network buffer queue + * @head: Head pointer + * @tail: Tail pointer + * @qlen: Queue length + */ +typedef struct __qdf_nbuf_qhead { + struct sk_buff *head; + struct sk_buff *tail; + unsigned int qlen; +} __qdf_nbuf_queue_t; + +/******************Functions *************/ + +/** + * __qdf_nbuf_queue_init() - initiallize the queue head + * @qhead: Queue head + * + * Return: QDF status + */ +static inline QDF_STATUS __qdf_nbuf_queue_init(__qdf_nbuf_queue_t *qhead) +{ + memset(qhead, 0, sizeof(struct __qdf_nbuf_qhead)); + return QDF_STATUS_SUCCESS; +} + +/** + * __qdf_nbuf_queue_add() - add an skb in the tail of the queue + * @qhead: Queue head + * @skb: Pointer to network buffer + * + * This is a lockless version, driver must acquire locks if it + * needs to synchronize + * + * Return: none + */ +static inline void +__qdf_nbuf_queue_add(__qdf_nbuf_queue_t *qhead, struct sk_buff *skb) +{ + skb->next = NULL; /*Nullify the next ptr */ + + if (!qhead->head) + qhead->head = skb; + else + qhead->tail->next = skb; + + qhead->tail = skb; + qhead->qlen++; +} + +/** + * __qdf_nbuf_queue_append() - Append src list at the end of dest list + * @dest: target netbuf queue + * @src: source netbuf queue + * + * Return: target netbuf queue + */ +static inline __qdf_nbuf_queue_t * +__qdf_nbuf_queue_append(__qdf_nbuf_queue_t *dest, __qdf_nbuf_queue_t *src) +{ + if (!dest) + return NULL; + else if (!src || !(src->head)) + return dest; + + if (!(dest->head)) + dest->head = src->head; + else + dest->tail->next = src->head; + + dest->tail = src->tail; + dest->qlen += src->qlen; + return dest; +} + +/** + * __qdf_nbuf_queue_insert_head() - add an skb at the head of the queue + * @qhead: Queue head + * @skb: Pointer to network buffer + * + * This is a lockless version, driver must acquire locks if it needs to + * synchronize + * + * Return: none + */ +static inline void +__qdf_nbuf_queue_insert_head(__qdf_nbuf_queue_t *qhead, __qdf_nbuf_t skb) +{ + if (!qhead->head) { + /*Empty queue Tail pointer Must be updated */ + qhead->tail = skb; + } + skb->next = qhead->head; + qhead->head = skb; + qhead->qlen++; +} + +/** + * __qdf_nbuf_queue_remove() - remove a skb from the head of the queue + * @qhead: Queue head + * + * This is a lockless version. Driver should take care of the locks + * + * Return: skb or NULL + */ +static inline +struct sk_buff *__qdf_nbuf_queue_remove(__qdf_nbuf_queue_t *qhead) +{ + __qdf_nbuf_t tmp = NULL; + + if (qhead->head) { + qhead->qlen--; + tmp = qhead->head; + if (qhead->head == qhead->tail) { + qhead->head = NULL; + qhead->tail = NULL; + } else { + qhead->head = tmp->next; + } + tmp->next = NULL; + } + return tmp; +} + +/** + * __qdf_nbuf_queue_free() - free a queue + * @qhead: head of queue + * + * Return: QDF status + */ +static inline QDF_STATUS +__qdf_nbuf_queue_free(__qdf_nbuf_queue_t *qhead) +{ + __qdf_nbuf_t buf = NULL; + + while ((buf = __qdf_nbuf_queue_remove(qhead)) != NULL) + __qdf_nbuf_free(buf); + return QDF_STATUS_SUCCESS; +} + + +/** + * __qdf_nbuf_queue_first() - returns the first skb in the queue + * @qhead: head of queue + * + * Return: NULL if the queue is empty + */ +static inline struct sk_buff * +__qdf_nbuf_queue_first(__qdf_nbuf_queue_t *qhead) +{ + return qhead->head; +} + +/** + * __qdf_nbuf_queue_len() - return the queue length + * @qhead: Queue head + * + * Return: Queue length + */ +static inline uint32_t __qdf_nbuf_queue_len(__qdf_nbuf_queue_t *qhead) +{ + return qhead->qlen; +} + +/** + * __qdf_nbuf_queue_next() - return the next skb from packet chain + * @skb: Pointer to network buffer + * + * This API returns the next skb from packet chain, remember the skb is + * still in the queue + * + * Return: NULL if no packets are there + */ +static inline struct sk_buff *__qdf_nbuf_queue_next(struct sk_buff *skb) +{ + return skb->next; +} + +/** + * __qdf_nbuf_is_queue_empty() - check if the queue is empty or not + * @qhead: Queue head + * + * Return: true if length is 0 else false + */ +static inline bool __qdf_nbuf_is_queue_empty(__qdf_nbuf_queue_t *qhead) +{ + return qhead->qlen == 0; +} + +/* + * Use sk_buff_head as the implementation of qdf_nbuf_queue_t. + * Because the queue head will most likely put in some structure, + * we don't use pointer type as the definition. + */ + +/* + * Use sk_buff_head as the implementation of qdf_nbuf_queue_t. + * Because the queue head will most likely put in some structure, + * we don't use pointer type as the definition. + */ + +static inline void +__qdf_nbuf_set_send_complete_flag(struct sk_buff *skb, bool flag) +{ +} + +/** + * __qdf_nbuf_realloc_headroom() - This keeps the skb shell intact + * expands the headroom + * in the data region. In case of failure the skb is released. + * @skb: sk buff + * @headroom: size of headroom + * + * Return: skb or NULL + */ +static inline struct sk_buff * +__qdf_nbuf_realloc_headroom(struct sk_buff *skb, uint32_t headroom) +{ + if (pskb_expand_head(skb, headroom, 0, GFP_ATOMIC)) { + dev_kfree_skb_any(skb); + skb = NULL; + } + return skb; +} + +/** + * __qdf_nbuf_realloc_tailroom() - This keeps the skb shell intact + * exapnds the tailroom + * in data region. In case of failure it releases the skb. + * @skb: sk buff + * @tailroom: size of tailroom + * + * Return: skb or NULL + */ +static inline struct sk_buff * +__qdf_nbuf_realloc_tailroom(struct sk_buff *skb, uint32_t tailroom) +{ + if (likely(!pskb_expand_head(skb, 0, tailroom, GFP_ATOMIC))) + return skb; + /** + * unlikely path + */ + dev_kfree_skb_any(skb); + return NULL; +} + +/** + * __qdf_nbuf_linearize() - skb linearize + * @skb: sk buff + * + * create a version of the specified nbuf whose contents + * can be safely modified without affecting other + * users.If the nbuf is non-linear then this function + * linearize. if unable to linearize returns -ENOMEM on + * success 0 is returned + * + * Return: 0 on Success, -ENOMEM on failure is returned. + */ +static inline int +__qdf_nbuf_linearize(struct sk_buff *skb) +{ + return skb_linearize(skb); +} + +/** + * __qdf_nbuf_unshare() - skb unshare + * @skb: sk buff + * + * create a version of the specified nbuf whose contents + * can be safely modified without affecting other + * users.If the nbuf is a clone then this function + * creates a new copy of the data. If the buffer is not + * a clone the original buffer is returned. + * + * Return: skb or NULL + */ +static inline struct sk_buff * +__qdf_nbuf_unshare(struct sk_buff *skb) +{ + return skb_unshare(skb, GFP_ATOMIC); +} + +/** + * __qdf_nbuf_is_cloned() - test whether the nbuf is cloned or not + *@buf: sk buff + * + * Return: true/false + */ +static inline bool __qdf_nbuf_is_cloned(struct sk_buff *skb) +{ + return skb_cloned(skb); +} + +/** + * __qdf_nbuf_pool_init() - init pool + * @net: net handle + * + * Return: QDF status + */ +static inline QDF_STATUS __qdf_nbuf_pool_init(qdf_net_handle_t net) +{ + return QDF_STATUS_SUCCESS; +} + +/* + * adf_nbuf_pool_delete() implementation - do nothing in linux + */ +#define __qdf_nbuf_pool_delete(osdev) + +/** + * __qdf_nbuf_expand() - Expand both tailroom & headroom. In case of failure + * release the skb. + * @skb: sk buff + * @headroom: size of headroom + * @tailroom: size of tailroom + * + * Return: skb or NULL + */ +static inline struct sk_buff * +__qdf_nbuf_expand(struct sk_buff *skb, uint32_t headroom, uint32_t tailroom) +{ + if (likely(!pskb_expand_head(skb, headroom, tailroom, GFP_ATOMIC))) + return skb; + + dev_kfree_skb_any(skb); + return NULL; +} + +/** + * __qdf_nbuf_tx_cksum_info() - tx checksum info + * + * Return: true/false + */ +static inline bool +__qdf_nbuf_tx_cksum_info(struct sk_buff *skb, uint8_t **hdr_off, + uint8_t **where) +{ + qdf_assert(0); + return false; +} + +/** + * __qdf_nbuf_reset_ctxt() - mem zero control block + * @nbuf: buffer + * + * Return: none + */ +static inline void __qdf_nbuf_reset_ctxt(__qdf_nbuf_t nbuf) +{ + qdf_mem_zero(nbuf->cb, sizeof(nbuf->cb)); +} + +/** + * __qdf_nbuf_network_header() - get network header + * @buf: buffer + * + * Return: network header pointer + */ +static inline void *__qdf_nbuf_network_header(__qdf_nbuf_t buf) +{ + return skb_network_header(buf); +} + +/** + * __qdf_nbuf_transport_header() - get transport header + * @buf: buffer + * + * Return: transport header pointer + */ +static inline void *__qdf_nbuf_transport_header(__qdf_nbuf_t buf) +{ + return skb_transport_header(buf); +} + +/** + * __qdf_nbuf_tcp_tso_size() - return the size of TCP segment size (MSS), + * passed as part of network buffer by network stack + * @skb: sk buff + * + * Return: TCP MSS size + * + */ +static inline size_t __qdf_nbuf_tcp_tso_size(struct sk_buff *skb) +{ + return skb_shinfo(skb)->gso_size; +} + +/** + * __qdf_nbuf_init() - Re-initializes the skb for re-use + * @nbuf: sk buff + * + * Return: none + */ +void __qdf_nbuf_init(__qdf_nbuf_t nbuf); + +/* + * __qdf_nbuf_get_cb() - returns a pointer to skb->cb + * @nbuf: sk buff + * + * Return: void ptr + */ +static inline void * +__qdf_nbuf_get_cb(__qdf_nbuf_t nbuf) +{ + return (void *)nbuf->cb; +} + +/** + * __qdf_nbuf_headlen() - return the length of linear buffer of the skb + * @skb: sk buff + * + * Return: head size + */ +static inline size_t +__qdf_nbuf_headlen(struct sk_buff *skb) +{ + return skb_headlen(skb); +} + +/** + * __qdf_nbuf_get_nr_frags() - return the number of fragments in an skb, + * @skb: sk buff + * + * Return: number of fragments + */ +static inline size_t __qdf_nbuf_get_nr_frags(struct sk_buff *skb) +{ + return skb_shinfo(skb)->nr_frags; +} + +/** + * __qdf_nbuf_tso_tcp_v4() - to check if the TSO TCP pkt is a IPv4 or not. + * @buf: sk buff + * + * Return: true/false + */ +static inline bool __qdf_nbuf_tso_tcp_v4(struct sk_buff *skb) +{ + return skb_shinfo(skb)->gso_type == SKB_GSO_TCPV4 ? 1 : 0; +} + +/** + * __qdf_nbuf_tso_tcp_v6() - to check if the TSO TCP pkt is a IPv6 or not. + * @buf: sk buff + * + * Return: true/false + */ +static inline bool __qdf_nbuf_tso_tcp_v6(struct sk_buff *skb) +{ + return skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6 ? 1 : 0; +} + +/** + * __qdf_nbuf_l2l3l4_hdr_len() - return the l2+l3+l4 hdr length of the skb + * @skb: sk buff + * + * Return: size of l2+l3+l4 header length + */ +static inline size_t __qdf_nbuf_l2l3l4_hdr_len(struct sk_buff *skb) +{ + return skb_transport_offset(skb) + tcp_hdrlen(skb); +} + +/** + * __qdf_nbuf_is_nonlinear() - test whether the nbuf is nonlinear or not + * @buf: sk buff + * + * Return: true/false + */ +static inline bool __qdf_nbuf_is_nonlinear(struct sk_buff *skb) +{ + if (skb_is_nonlinear(skb)) + return true; + else + return false; +} + +/** + * __qdf_nbuf_tcp_seq() - get the TCP sequence number of the skb + * @buf: sk buff + * + * Return: TCP sequence number + */ +static inline uint32_t __qdf_nbuf_tcp_seq(struct sk_buff *skb) +{ + return ntohl(tcp_hdr(skb)->seq); +} + +/** + * __qdf_nbuf_get_priv_ptr() - get the priv pointer from the nbuf'f private space + *@buf: sk buff + * + * Return: data pointer to typecast into your priv structure + */ +static inline uint8_t * +__qdf_nbuf_get_priv_ptr(struct sk_buff *skb) +{ + return &skb->cb[8]; +} + +/** + * __qdf_nbuf_mark_wakeup_frame() - mark wakeup frame. + * @buf: Pointer to nbuf + * + * Return: None + */ +static inline void +__qdf_nbuf_mark_wakeup_frame(__qdf_nbuf_t buf) +{ + buf->mark |= QDF_MARK_FIRST_WAKEUP_PACKET; +} + +/** + * __qdf_nbuf_record_rx_queue() - set rx queue in skb + * + * @buf: sk buff + * @queue_id: Queue id + * + * Return: void + */ +static inline void +__qdf_nbuf_record_rx_queue(struct sk_buff *skb, uint16_t queue_id) +{ + skb_record_rx_queue(skb, queue_id); +} + +/** + * __qdf_nbuf_get_queue_mapping() - get the queue mapping set by linux kernel + * + * @buf: sk buff + * + * Return: Queue mapping + */ +static inline uint16_t +__qdf_nbuf_get_queue_mapping(struct sk_buff *skb) +{ + return skb->queue_mapping; +} + +/** + * __qdf_nbuf_set_timestamp() - set the timestamp for frame + * + * @buf: sk buff + * + * Return: void + */ +static inline void +__qdf_nbuf_set_timestamp(struct sk_buff *skb) +{ + __net_timestamp(skb); +} + +/** + * __qdf_nbuf_get_timedelta_ms() - get time difference in ms + * + * @buf: sk buff + * + * Return: time difference in ms + */ +static inline uint64_t +__qdf_nbuf_get_timedelta_ms(struct sk_buff *skb) +{ + return ktime_to_ms(net_timedelta(skb->tstamp)); +} + +/** + * __qdf_nbuf_get_timedelta_us() - get time difference in micro seconds + * + * @buf: sk buff + * + * Return: time difference in micro seconds + */ +static inline uint64_t +__qdf_nbuf_get_timedelta_us(struct sk_buff *skb) +{ + return ktime_to_us(net_timedelta(skb->tstamp)); +} + +/** + * __qdf_nbuf_orphan() - orphan a nbuf + * @skb: sk buff + * + * If a buffer currently has an owner then we call the + * owner's destructor function + * + * Return: void + */ +static inline void __qdf_nbuf_orphan(struct sk_buff *skb) +{ + return skb_orphan(skb); +} +#ifdef CONFIG_WIN +#include +#else +#include +#endif +#endif /*_I_QDF_NET_BUF_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_nbuf_m.h b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_nbuf_m.h new file mode 100644 index 0000000000000000000000000000000000000000..ff4160342317c2cdd6793d248cb5246c906c44ec --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_nbuf_m.h @@ -0,0 +1,128 @@ +/* + * Copyright (c) 2014-2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: i_qdf_nbuf_m.h + * + * This file provides platform specific nbuf API's. + * Included by i_qdf_nbuf.h and should not be included + * directly from other files. + */ + +#ifndef _I_QDF_NBUF_M_H +#define _I_QDF_NBUF_M_H + +#define QDF_NBUF_CB_RX_TCP_SEQ_NUM(skb) \ + (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.dev.priv_cb_m.tcp_seq_num) +#define QDF_NBUF_CB_RX_TCP_ACK_NUM(skb) \ + (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.dev.priv_cb_m.tcp_ack_num) +#define QDF_NBUF_CB_RX_LRO_CTX(skb) \ + (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.dev.priv_cb_m.lro_ctx) + +#define QDF_NBUF_CB_TX_IPA_OWNED(skb) \ + (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.dev.priv_cb_m.ipa.owned) +#define QDF_NBUF_CB_TX_IPA_PRIV(skb) \ + (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.dev.priv_cb_m.ipa.priv) +#define QDF_NBUF_CB_TX_DESC_ID(skb)\ + (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.dev.priv_cb_m.desc_id) +#define QDF_NBUF_CB_MGMT_TXRX_DESC_ID(skb)\ + (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.dev.priv_cb_m.mgmt_desc_id) +#define QDF_NBUF_CB_TX_DMA_BI_MAP(skb) \ + (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.dev.priv_cb_m. \ + dma_option.bi_map) + +#define __qdf_nbuf_ipa_owned_get(skb) \ + QDF_NBUF_CB_TX_IPA_OWNED(skb) + +#define __qdf_nbuf_ipa_owned_set(skb) \ + (QDF_NBUF_CB_TX_IPA_OWNED(skb) = 1) + +#define __qdf_nbuf_ipa_owned_clear(skb) \ + (QDF_NBUF_CB_TX_IPA_OWNED(skb) = 0) + +#define __qdf_nbuf_ipa_priv_get(skb) \ + QDF_NBUF_CB_TX_IPA_PRIV(skb) + +#define __qdf_nbuf_ipa_priv_set(skb, priv) \ + (QDF_NBUF_CB_TX_IPA_PRIV(skb) = (priv)) + + + +void __qdf_nbuf_init_replenish_timer(void); +void __qdf_nbuf_deinit_replenish_timer(void); + +/** + * __qdf_nbuf_push_head() - Push data in the front + * @skb: Pointer to network buffer + * @size: size to be pushed + * + * Return: New data pointer of this buf after data has been pushed, + * or NULL if there is not enough room in this buf. + */ +static inline uint8_t *__qdf_nbuf_push_head(struct sk_buff *skb, size_t size) +{ + if (QDF_NBUF_CB_PADDR(skb)) + QDF_NBUF_CB_PADDR(skb) -= size; + + return skb_push(skb, size); +} + + +/** + * __qdf_nbuf_pull_head() - pull data out from the front + * @skb: Pointer to network buffer + * @size: size to be popped + * + * Return: New data pointer of this buf after data has been popped, + * or NULL if there is not sufficient data to pull. + */ +static inline uint8_t *__qdf_nbuf_pull_head(struct sk_buff *skb, size_t size) +{ + if (QDF_NBUF_CB_PADDR(skb)) + QDF_NBUF_CB_PADDR(skb) += size; + + return skb_pull(skb, size); +} + +/** + * qdf_nbuf_init_replenish_timer - Initialize the alloc replenish timer + * + * This function initializes the nbuf alloc fail replenish timer. + * + * Return: void + */ +static inline void +qdf_nbuf_init_replenish_timer(void) +{ + __qdf_nbuf_init_replenish_timer(); +} + +/** + * qdf_nbuf_deinit_replenish_timer - Deinitialize the alloc replenish timer + * + * This function deinitializes the nbuf alloc fail replenish timer. + * + * Return: void + */ +static inline void +qdf_nbuf_deinit_replenish_timer(void) +{ + __qdf_nbuf_deinit_replenish_timer(); +} + +#endif /*_I_QDF_NBUF_M_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_nbuf_w.h b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_nbuf_w.h new file mode 100644 index 0000000000000000000000000000000000000000..9411a8e798e945f0fcafbc5c920c7f279b27b9ee --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_nbuf_w.h @@ -0,0 +1,100 @@ +/* + * Copyright (c) 2014-2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: i_qdf_nbuf_w.h + * + * This file provides platform specific nbuf API's. + * Included by i_qdf_nbuf.h and should not be included + * directly from other files. + */ + +#ifndef _I_QDF_NBUF_W_H +#define _I_QDF_NBUF_W_H + +/* ext_cb accesor macros and internal API's */ + +#define QDF_NBUF_CB_EXT_CB(skb) \ + (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.dev.priv_cb_w.ext_cb_ptr) + +#define __qdf_nbuf_set_ext_cb(skb, ref) \ + do { \ + QDF_NBUF_CB_EXT_CB((skb)) = (ref); \ + } while (0) + +#define __qdf_nbuf_get_ext_cb(skb) \ + QDF_NBUF_CB_EXT_CB((skb)) + +/* fctx accesor macros and internal API's*/ + +#define QDF_NBUF_CB_RX_FCTX(skb) \ + (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.dev.priv_cb_w.fctx) + +#define QDF_NBUF_CB_TX_FCTX(skb) \ + (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.dev.priv_cb_w.fctx) + +#define __qdf_nbuf_set_rx_fctx_type(skb, ctx, type) \ + do { \ + QDF_NBUF_CB_RX_FCTX((skb)) = (ctx); \ + QDF_NBUF_CB_RX_FTYPE((skb)) = (type); \ + } while (0) + +#define __qdf_nbuf_get_rx_fctx(skb) \ + QDF_NBUF_CB_RX_FCTX((skb)) + +#define __qdf_nbuf_set_tx_fctx_type(skb, ctx, type) \ + do { \ + QDF_NBUF_CB_TX_FCTX((skb)) = (ctx); \ + QDF_NBUF_CB_TX_FTYPE((skb)) = (type); \ + } while (0) + +#define __qdf_nbuf_get_tx_fctx(skb) \ + QDF_NBUF_CB_TX_FCTX((skb)) + + + +/** + * __qdf_nbuf_push_head() - Push data in the front + * @skb: Pointer to network buffer + * @size: size to be pushed + * + * Return: New data pointer of this buf after data has been pushed, + * or NULL if there is not enough room in this buf. + */ +static inline uint8_t *__qdf_nbuf_push_head(struct sk_buff *skb, size_t size) +{ + return skb_push(skb, size); +} + +/** + * __qdf_nbuf_pull_head() - pull data out from the front + * @skb: Pointer to network buffer + * @size: size to be popped + * + * Return: New data pointer of this buf after data has been popped, + * or NULL if there is not sufficient data to pull. + */ +static inline uint8_t *__qdf_nbuf_pull_head(struct sk_buff *skb, size_t size) +{ + return skb_pull(skb, size); +} + +static inline void qdf_nbuf_init_replenish_timer(void) {} +static inline void qdf_nbuf_deinit_replenish_timer(void) {} + +#endif /*_I_QDF_NBUF_W_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_net_types.h b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_net_types.h new file mode 100644 index 0000000000000000000000000000000000000000..796f8b7b1f404ad539ed4255e2e7378ce489c9a6 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_net_types.h @@ -0,0 +1,52 @@ +/* + * Copyright (c) 2014-2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: i_qdf_net_types + * This file provides OS dependent net types API's. + */ + +#ifndef _I_QDF_NET_TYPES_H +#define _I_QDF_NET_TYPES_H + +#include /* uint8_t, etc. */ +#include +#include +#include + +typedef struct in6_addr __in6_addr_t; +typedef __wsum __wsum_t; + +static inline int32_t __qdf_csum_ipv6(const struct in6_addr *saddr, + const struct in6_addr *daddr, + __u32 len, unsigned short proto, + __wsum sum) +{ + return csum_ipv6_magic((struct in6_addr *)saddr, + (struct in6_addr *)daddr, len, proto, sum); +} + +#define __QDF_TCPHDR_FIN TCPHDR_FIN +#define __QDF_TCPHDR_SYN TCPHDR_SYN +#define __QDF_TCPHDR_RST TCPHDR_RST +#define __QDF_TCPHDR_PSH TCPHDR_PSH +#define __QDF_TCPHDR_ACK TCPHDR_ACK +#define __QDF_TCPHDR_URG TCPHDR_URG +#define __QDF_TCPHDR_ECE TCPHDR_ECE +#define __QDF_TCPHDR_CWR TCPHDR_CWR +#endif /* _I_QDF_NET_TYPES_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_perf.h b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_perf.h new file mode 100644 index 0000000000000000000000000000000000000000..a57e068b686ccc33b8cde2f3bf5b41b67c0bea18 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_perf.h @@ -0,0 +1,79 @@ +/* + * Copyright (c) 2016 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: i_qdf_perf + * This file provides OS dependent perf API's. + */ + +#ifndef _I_QDF_PERF_H +#define _I_QDF_PERF_H + +#ifdef QCA_PERF_PROFILING + +#if (QCA_MIPS74K_PERF_PROFILING || QCA_MIPS24KK_PERF_PROFILING) +#include +#endif + +/* #defines required for structures */ +#define MAX_SAMPLES_SHIFT 5 /* change this only*/ +#define MAX_SAMPLES (1 << MAX_SAMPLES_SHIFT) +#define INC_SAMPLES(x) ((x + 1) & (MAX_SAMPLES - 1)) +#define MAX_SAMPLE_SZ (sizeof(uint32_t) * MAX_SAMPLES) +#define PER_SAMPLE_SZ sizeof(uint32_t) + +/** + * typedef qdf_perf_entry_t - performance entry + * @list: pointer to next + * @child: pointer tochild + * @parent: pointer to top + * @type: perf cntr + * @name: string + * @proc: pointer to proc entry + * @start_tsc: array at start tsc + * @end_tsc: array at ent tsc + * @samples: array of samples + * @sample_idx: sample index + * @lock_irq: lock irq + */ +typedef struct qdf_os_perf_entry { + struct list_head list; + struct list_head child; + + struct qdf_perf_entry *parent; + + qdf_perf_cntr_t type; + uint8_t *name; + + struct proc_dir_entry *proc; + + uint64_t start_tsc[MAX_SAMPLES]; + uint64_t end_tsc[MAX_SAMPLES]; + + uint32_t samples[MAX_SAMPLES]; + uint32_t sample_idx; + + spinlock_t lock_irq; + +} qdf_perf_entry_t; + +/* typedefs */ +typedef void *__qdf_perf_id_t; + +#endif /* QCA_PERF_PROFILING */ +#endif /* _I_QDF_PERF_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_str.h b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_str.h new file mode 100644 index 0000000000000000000000000000000000000000..0f54196c36e5261244fbfa8b66f3edbbd1b30a87 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_str.h @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: i_qdf_str.h + * Linux-specific implementations for qdf_str + */ + +#ifndef __I_QDF_STR_H +#define __I_QDF_STR_H + +#include "linux/string.h" + +#define __qdf_is_space(c) isspace(c) +#define __qdf_str_cmp(left, right) strcmp(left, right) +#define __qdf_str_lcopy(dest, src, dest_size) strlcpy(dest, src, dest_size) +#define __qdf_str_left_trim(str) skip_spaces(str) +#define __qdf_str_len(str) strlen(str) +#define __qdf_str_trim(str) strim(str) +#define __qdf_str_nlen(str, limit) strnlen(str, limit) + +#endif /* __I_QDF_STR_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_threads.h b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_threads.h new file mode 100644 index 0000000000000000000000000000000000000000..4f6e1a9f64e0afe3dca3bcf4171711a449b30c67 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_threads.h @@ -0,0 +1,29 @@ +/* + * Copyright (c) 2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: i_qdf_threads + * Header file for linux-specific thead abstractions + */ + +#ifndef __I_QDF_THREADS_H +#define __I_QDF_THREADS_H + +typedef struct task_struct __qdf_thread_t; + +#endif /* __I_QDF_THREADS_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_time.h b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_time.h new file mode 100644 index 0000000000000000000000000000000000000000..7f0b41e914ceb44089f74cd84c4b3230fc6bc4a9 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_time.h @@ -0,0 +1,327 @@ +/* + * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: i_qdf_time + * This file provides OS dependent time API's. + */ + +#ifndef _I_QDF_TIME_H +#define _I_QDF_TIME_H + +#include +#include +#include +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 17, 0)) +#include +#else +#include +#endif +#ifdef MSM_PLATFORM +#include +#endif +#ifdef CONFIG_CNSS +#include +#endif +#include + +typedef unsigned long __qdf_time_t; +typedef ktime_t __qdf_ktime_t; + +/** + * __qdf_ns_to_ktime() - Converts nanoseconds to a ktime object + * @ns: time in nanoseconds + * + * Return: nanoseconds as ktime object + */ +static inline ktime_t __qdf_ns_to_ktime(uint64_t ns) +{ + return ns_to_ktime(ns); +} + +/** + * __qdf_ktime_add() - Adds two ktime objects and returns + * a ktime object + * @time1: time as ktime object + * @time2: time as ktime object + * + * Return: sum of ktime objects as ktime object + */ +static inline ktime_t __qdf_ktime_add(ktime_t ktime1, ktime_t ktime2) +{ + return ktime_add(ktime1, ktime2); +} + +/** + * __qdf_ktime_get() - Gets the current time as ktime object + * + * Return: current time as ktime object + */ +static inline ktime_t __qdf_ktime_get(void) +{ + return ktime_get(); +} + +/** + * __qdf_ktime_add_ns() - Adds ktime object and nanoseconds value and + * returns the ktime object + * + * Return: ktime object + */ +static inline ktime_t __qdf_ktime_add_ns(ktime_t ktime, int64_t ns) +{ + return ktime_add_ns(ktime, ns); +} + +/** + * __qdf_ktime_to_ns() - convert ktime to nanoseconds + * @ktime: time as ktime object + * @ns: time in nanoseconds + * + * Return: ktime in nanoseconds + */ +static inline int64_t __qdf_ktime_to_ns(ktime_t ktime) +{ + return ktime_to_ns(ktime); +} + +/** + * __qdf_ktime_to_ms() - convert ktime to milliseconds + * @ktime: time as ktime object + * + * Return: ktime in milliseconds + */ +static inline int64_t __qdf_ktime_to_ms(ktime_t ktime) +{ + return ktime_to_ms(ktime); +} + + +/** + * __qdf_system_ticks() - get system ticks + * + * Return: system tick in jiffies + */ +static inline __qdf_time_t __qdf_system_ticks(void) +{ + return jiffies; +} + +/** + * __qdf_system_ticks_to_msecs() - convert system ticks into milli seconds + * @ticks: System ticks + * + * Return: system tick converted into milli seconds + */ +static inline uint32_t __qdf_system_ticks_to_msecs(unsigned long ticks) +{ + return jiffies_to_msecs(ticks); +} + +/** + * __qdf_system_msecs_to_ticks() - convert milli seconds into system ticks + * @msecs: Milli seconds + * + * Return: milli seconds converted into system ticks + */ +static inline __qdf_time_t __qdf_system_msecs_to_ticks(uint32_t msecs) +{ + return msecs_to_jiffies(msecs); +} + +/** + * __qdf_get_system_uptime() - get system uptime + * + * Return: system uptime in jiffies + */ +static inline __qdf_time_t __qdf_get_system_uptime(void) +{ + return jiffies; +} + +static inline unsigned long __qdf_get_system_timestamp(void) +{ + return (jiffies / HZ) * 1000 + (jiffies % HZ) * (1000 / HZ); +} + +#ifdef CONFIG_ARM +/** + * __qdf_udelay() - delay execution for given microseconds + * @usecs: Micro seconds to delay + * + * Return: none + */ +static inline void __qdf_udelay(uint32_t usecs) +{ + /* + * This is in support of XScale build. They have a limit on the udelay + * value, so we have to make sure we don't approach the limit + */ + uint32_t mticks; + uint32_t leftover; + int i; + /* slice into 1024 usec chunks (simplifies calculation) */ + mticks = usecs >> 10; + leftover = usecs - (mticks << 10); + for (i = 0; i < mticks; i++) + udelay(1024); + udelay(leftover); +} +#else +static inline void __qdf_udelay(uint32_t usecs) +{ + /* Normal Delay functions. Time specified in microseconds */ + udelay(usecs); +} +#endif + +/** + * __qdf_mdelay() - delay execution for given milliseconds + * @usecs: Milliseconds to delay + * + * Return: none + */ +static inline void __qdf_mdelay(uint32_t msecs) +{ + mdelay(msecs); +} + +/** + * __qdf_system_time_after() - Check if a is later than b + * @a: Time stamp value a + * @b: Time stamp value b + * + * Return: + * true if a < b else false + */ +static inline bool __qdf_system_time_after(__qdf_time_t a, __qdf_time_t b) +{ + return (long)(b) - (long)(a) < 0; +} + +/** + * __qdf_system_time_before() - Check if a is before b + * @a: Time stamp value a + * @b: Time stamp value b + * + * Return: + * true if a is before b else false + */ +static inline bool __qdf_system_time_before(__qdf_time_t a, __qdf_time_t b) +{ + return __qdf_system_time_after(b, a); +} + +/** + * __qdf_system_time_after_eq() - Check if a atleast as recent as b, if not + * later + * @a: Time stamp value a + * @b: Time stamp value b + * + * Return: + * true if a >= b else false + */ +static inline bool __qdf_system_time_after_eq(__qdf_time_t a, __qdf_time_t b) +{ + return (long)(a) - (long)(b) >= 0; +} + +/** + * __qdf_get_monotonic_boottime() - get monotonic kernel boot time + * This API is similar to qdf_get_system_boottime but it includes + * time spent in suspend. + * + * Return: Time in microseconds + */ +static inline uint64_t __qdf_get_monotonic_boottime(void) +{ + struct timespec ts; + + get_monotonic_boottime(&ts); + + return ((uint64_t) ts.tv_sec * 1000000) + (ts.tv_nsec / 1000); +} + +#if defined (QCA_WIFI_3_0_ADRASTEA) && defined (MSM_PLATFORM) + +/** + * __qdf_get_log_timestamp() - get QTIMER ticks + * + * Returns QTIMER(19.2 MHz) clock ticks. To convert it into seconds + * divide it by 19200. + * + * Return: QTIMER(19.2 MHz) clock ticks + */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)) +static inline uint64_t __qdf_get_log_timestamp(void) +{ + return arch_counter_get_cntvct(); +} +#else +static inline uint64_t __qdf_get_log_timestamp(void) +{ + return arch_counter_get_cntpct(); +} +#endif /* LINUX_VERSION_CODE */ +#else + +/** + * __qdf_get_log_timestamp - get time stamp for logging + * For adrastea this API returns QTIMER tick which is needed to synchronize + * host and fw log timestamps + * For ROME and other discrete solution this API returns system boot time stamp + * + * Return: + * QTIMER ticks(19.2MHz) for adrastea + * System tick for rome and other future discrete solutions + */ +static inline uint64_t __qdf_get_log_timestamp(void) +{ + struct timespec ts; + + ktime_get_ts(&ts); + + return ((uint64_t) ts.tv_sec * 1000000) + (ts.tv_nsec / 1000); +} +#endif /* QCA_WIFI_3_0_ADRASTEA */ + +/** + * __qdf_get_bootbased_boottime_ns() - Get the bootbased time in nanoseconds + * + * __qdf_get_bootbased_boottime_ns() function returns the number of nanoseconds + * that have elapsed since the system was booted. It also includes the time when + * system was suspended. + * + * Return: + * The time since system booted in nanoseconds + */ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 17, 0)) +static inline uint64_t __qdf_get_bootbased_boottime_ns(void) +{ + return ktime_get_boot_ns(); +} + +#else +static inline uint64_t __qdf_get_bootbased_boottime_ns(void) +{ + return ktime_to_ns(ktime_get_boottime()); +} +#endif + +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_timer.h b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_timer.h new file mode 100644 index 0000000000000000000000000000000000000000..ce1fa8db8a5fe24d85b47124f9c575d0ff259fc3 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_timer.h @@ -0,0 +1,148 @@ +/* + * Copyright (c) 2014-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: i_qdf_timer + * This file provides OS dependent timer API's. + */ + +#ifndef _I_QDF_TIMER_H +#define _I_QDF_TIMER_H + +#include +#include +#include +#include +#include +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0) +#include +#endif + +typedef void (*qdf_timer_func_t)(void *); +struct __qdf_timer_t { + struct timer_list os_timer; + qdf_timer_func_t callback; + void *context; +}; + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0) +static inline void __os_timer_shim(struct timer_list *os_timer) +{ + struct __qdf_timer_t *timer = from_timer(timer, os_timer, os_timer); + + timer->callback(timer->context); +} + +static inline QDF_STATUS __qdf_timer_init(struct __qdf_timer_t *timer, + qdf_timer_func_t func, void *arg, + QDF_TIMER_TYPE type) +{ + struct timer_list *os_timer = &timer->os_timer; + uint32_t flags = 0; + + timer->callback = func; + timer->context = arg; + + if (type == QDF_TIMER_TYPE_SW) + flags |= TIMER_DEFERRABLE; + + if (object_is_on_stack(os_timer)) + timer_setup_on_stack(os_timer, __os_timer_shim, flags); + else + timer_setup(os_timer, __os_timer_shim, flags); + + return QDF_STATUS_SUCCESS; +} + +#else + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 0) +#define setup_deferrable_timer(timer, fn, data) \ + __setup_timer((timer), (fn), (data), TIMER_DEFERRABLE) +#endif + +static inline void __os_timer_shim(unsigned long addr) +{ + struct __qdf_timer_t *timer = (void *)addr; + + timer->callback(timer->context); +} + +static inline QDF_STATUS __qdf_timer_init(struct __qdf_timer_t *timer, + qdf_timer_func_t func, void *arg, + QDF_TIMER_TYPE type) +{ + struct timer_list *os_timer = &timer->os_timer; + bool is_on_stack = object_is_on_stack(os_timer); + unsigned long addr = (unsigned long)timer; + + timer->callback = func; + timer->context = arg; + + if (type == QDF_TIMER_TYPE_SW) { + if (is_on_stack) + setup_deferrable_timer_on_stack(os_timer, + __os_timer_shim, + addr); + else + setup_deferrable_timer(os_timer, __os_timer_shim, addr); + } else { + if (is_on_stack) + setup_timer_on_stack(os_timer, __os_timer_shim, addr); + else + setup_timer(os_timer, __os_timer_shim, addr); + } + + return QDF_STATUS_SUCCESS; +} +#endif /* KERNEL_VERSION(4, 15, 0)*/ + +static inline void __qdf_timer_start(struct __qdf_timer_t *timer, uint32_t msec) +{ + struct timer_list *os_timer = &timer->os_timer; + + os_timer->expires = jiffies + msecs_to_jiffies(msec); + add_timer(os_timer); +} + +static inline void __qdf_timer_mod(struct __qdf_timer_t *timer, uint32_t msec) +{ + mod_timer(&timer->os_timer, jiffies + msecs_to_jiffies(msec)); +} + +static inline bool __qdf_timer_stop(struct __qdf_timer_t *timer) +{ + return !!del_timer(&timer->os_timer); +} + +static inline void __qdf_timer_free(struct __qdf_timer_t *timer) +{ + struct timer_list *os_timer = &timer->os_timer; + + del_timer_sync(os_timer); + + if (object_is_on_stack(os_timer)) + destroy_timer_on_stack(os_timer); +} + +static inline bool __qdf_timer_sync_cancel(struct __qdf_timer_t *timer) +{ + return del_timer_sync(&timer->os_timer); +} + +#endif /* _I_QDF_TIMER_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_trace.h b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_trace.h new file mode 100644 index 0000000000000000000000000000000000000000..fbc436e216513828c3b06bb1e759ba7f46812fb8 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_trace.h @@ -0,0 +1,327 @@ +/* + * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: i_qdf_trace.h + * + * Linux-specific definitions for QDF trace + * + */ + +#if !defined(__I_QDF_TRACE_H) +#define __I_QDF_TRACE_H + +/* older kernels have a bug in kallsyms, so ensure module.h is included */ +#include +#include + +#if !defined(__printf) +#define __printf(a, b) +#endif + +#ifdef CONFIG_MCL +/* QDF_TRACE is the macro invoked to add trace messages to code. See the + * documenation for qdf_trace_msg() for the parameters etc. for this function. + * + * NOTE: Code QDF_TRACE() macros into the source code. Do not code directly + * to the qdf_trace_msg() function. + * + * NOTE 2: qdf tracing is totally turned off if WLAN_DEBUG is *not* defined. + * This allows us to build 'performance' builds where we can measure performance + * without being bogged down by all the tracing in the code + */ +#if defined(WLAN_DEBUG) || defined(DEBUG) +#define QDF_TRACE qdf_trace_msg +#define QDF_VTRACE qdf_vtrace_msg +#define QDF_TRACE_HEX_DUMP qdf_trace_hex_dump +#define QDF_MAX_LOGS_PER_SEC 2 +/** + * __QDF_TRACE_RATE_LIMITED() - rate limited version of QDF_TRACE + * @params: parameters to pass through to QDF_TRACE + * + * This API prevents logging a message more than QDF_MAX_LOGS_PER_SEC times per + * second. This means any subsequent calls to this API from the same location + * within 1/QDF_MAX_LOGS_PER_SEC seconds will be dropped. + * + * Return: None + */ +#define __QDF_TRACE_RATE_LIMITED(params...)\ + do {\ + static ulong __last_ticks;\ + ulong __ticks = jiffies;\ + if (time_after(__ticks,\ + __last_ticks + HZ / QDF_MAX_LOGS_PER_SEC)) {\ + QDF_TRACE(params);\ + __last_ticks = __ticks;\ + } \ + } while (0) +#else +#define QDF_TRACE(arg ...) +#define QDF_VTRACE(arg ...) +#define QDF_TRACE_HEX_DUMP(arg ...) +#define __QDF_TRACE_RATE_LIMITED(arg ...) +#endif +#else /* CONFIG_MCL */ + +#define qdf_trace(log_level, args...) \ + do { \ + extern int qdf_dbg_mask; \ + if (qdf_dbg_mask >= log_level) { \ + printk(args); \ + printk("\n"); \ + } \ + } while (0) + +#define QDF_TRACE qdf_trace_msg + +#define QDF_VTRACE qdf_vtrace_msg +#define QDF_TRACE_HEX_DUMP qdf_trace_hex_dump +#endif /* CONFIG_MCL */ + +#define __QDF_TRACE_NO_FL(log_level, module_id, format, args...) \ + QDF_TRACE(module_id, log_level, format, ## args) + +#define __QDF_TRACE_FL(log_level, module_id, format, args...) \ + QDF_TRACE(module_id, log_level, FL(format), ## args) + +#define __QDF_TRACE_RL(log_level, module_id, format, args...) \ + __QDF_TRACE_RATE_LIMITED(module_id, log_level, FL(format), ## args) + +#define __QDF_TRACE_RL_NO_FL(log_level, module_id, format, args...) \ + __QDF_TRACE_RATE_LIMITED(module_id, log_level, format, ## args) + +static inline void __qdf_trace_noop(QDF_MODULE_ID module, char *format, ...) { } + +#ifdef WLAN_LOG_FATAL +#define QDF_TRACE_FATAL(params...) \ + __QDF_TRACE_FL(QDF_TRACE_LEVEL_FATAL, ## params) +#define QDF_TRACE_FATAL_NO_FL(params...) \ + __QDF_TRACE_NO_FL(QDF_TRACE_LEVEL_FATAL, ## params) +#define QDF_TRACE_FATAL_RL(params...) \ + __QDF_TRACE_RL(QDF_TRACE_LEVEL_FATAL, ## params) +#define QDF_TRACE_FATAL_RL_NO_FL(params...) \ + __QDF_TRACE_RL_NO_FL(QDF_TRACE_LEVEL_FATAL, ## params) +#else +#define QDF_TRACE_FATAL(params...) __qdf_trace_noop(params) +#define QDF_TRACE_FATAL_NO_FL(params...) __qdf_trace_noop(params) +#define QDF_TRACE_FATAL_RL(params...) __qdf_trace_noop(params) +#define QDF_TRACE_FATAL_RL_NO_FL(params...) __qdf_trace_noop(params) +#endif + +#ifdef WLAN_LOG_ERROR +#define QDF_TRACE_ERROR(params...) \ + __QDF_TRACE_FL(QDF_TRACE_LEVEL_ERROR, ## params) +#define QDF_TRACE_ERROR_NO_FL(params...) \ + __QDF_TRACE_NO_FL(QDF_TRACE_LEVEL_ERROR, ## params) +#define QDF_TRACE_ERROR_RL(params...) \ + __QDF_TRACE_RL(QDF_TRACE_LEVEL_ERROR, ## params) +#define QDF_TRACE_ERROR_RL_NO_FL(params...) \ + __QDF_TRACE_RL_NO_FL(QDF_TRACE_LEVEL_ERROR, ## params) +#else +#define QDF_TRACE_ERROR(params...) __qdf_trace_noop(params) +#define QDF_TRACE_ERROR_NO_FL(params...) __qdf_trace_noop(params) +#define QDF_TRACE_ERROR_RL(params...) __qdf_trace_noop(params) +#define QDF_TRACE_ERROR_RL_NO_FL(params...) __qdf_trace_noop(params) +#endif + +#ifdef WLAN_LOG_WARN +#define QDF_TRACE_WARN(params...) \ + __QDF_TRACE_FL(QDF_TRACE_LEVEL_WARN, ## params) +#define QDF_TRACE_WARN_NO_FL(params...) \ + __QDF_TRACE_NO_FL(QDF_TRACE_LEVEL_WARN, ## params) +#define QDF_TRACE_WARN_RL(params...) \ + __QDF_TRACE_RL(QDF_TRACE_LEVEL_WARN, ## params) +#define QDF_TRACE_WARN_RL_NO_FL(params...) \ + __QDF_TRACE_RL_NO_FL(QDF_TRACE_LEVEL_WARN, ## params) +#else +#define QDF_TRACE_WARN(params...) __qdf_trace_noop(params) +#define QDF_TRACE_WARN_NO_FL(params...) __qdf_trace_noop(params) +#define QDF_TRACE_WARN_RL(params...) __qdf_trace_noop(params) +#define QDF_TRACE_WARN_RL_NO_FL(params...) __qdf_trace_noop(params) +#endif + +#ifdef WLAN_LOG_INFO +#define QDF_TRACE_INFO(params...) \ + __QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO, ## params) +#define QDF_TRACE_INFO_NO_FL(params...) \ + __QDF_TRACE_NO_FL(QDF_TRACE_LEVEL_INFO, ## params) +#define QDF_TRACE_INFO_RL(params...) \ + __QDF_TRACE_RL(QDF_TRACE_LEVEL_INFO, ## params) +#define QDF_TRACE_INFO_RL_NO_FL(params...) \ + __QDF_TRACE_RL_NO_FL(QDF_TRACE_LEVEL_INFO, ## params) +#else +#define QDF_TRACE_INFO(params...) __qdf_trace_noop(params) +#define QDF_TRACE_INFO_NO_FL(params...) __qdf_trace_noop(params) +#define QDF_TRACE_INFO_RL(params...) __qdf_trace_noop(params) +#define QDF_TRACE_INFO_RL_NO_FL(params...) __qdf_trace_noop(params) +#endif + +#ifdef WLAN_LOG_DEBUG +#define QDF_TRACE_DEBUG(params...) \ + __QDF_TRACE_FL(QDF_TRACE_LEVEL_DEBUG, ## params) +#define QDF_TRACE_DEBUG_NO_FL(params...) \ + __QDF_TRACE_NO_FL(QDF_TRACE_LEVEL_DEBUG, ## params) +#define QDF_TRACE_DEBUG_RL(params...) \ + __QDF_TRACE_RL(QDF_TRACE_LEVEL_DEBUG, ## params) +#define QDF_TRACE_DEBUG_RL_NO_FL(params...) \ + __QDF_TRACE_RL_NO_FL(QDF_TRACE_LEVEL_DEBUG, ## params) +#else +#define QDF_TRACE_DEBUG(params...) __qdf_trace_noop(params) +#define QDF_TRACE_DEBUG_NO_FL(params...) __qdf_trace_noop(params) +#define QDF_TRACE_DEBUG_RL(params...) __qdf_trace_noop(params) +#define QDF_TRACE_DEBUG_RL_NO_FL(params...) __qdf_trace_noop(params) +#endif + +#define QDF_ENABLE_TRACING +#define qdf_scnprintf scnprintf + +#ifdef QDF_ENABLE_TRACING + +#ifdef WLAN_WARN_ON_ASSERT +#define QDF_ASSERT(_condition) \ + do { \ + if (!(_condition)) { \ + pr_err("QDF ASSERT in %s Line %d\n", \ + __func__, __LINE__); \ + WARN_ON(1); \ + } \ + } while (0) +#else +#define QDF_ASSERT(_condition) \ + do { \ + if (!(_condition)) { \ + /* no-op */ \ + } \ + } while (0) +#endif /* WLAN_WARN_ON_ASSERT */ + +#else + +/* This code will be used for compilation if tracing is to be compiled out */ +/* of the code so these functions/macros are 'do nothing' */ +static inline void qdf_trace_msg(QDF_MODULE_ID module, QDF_TRACE_LEVEL level, + char *str_format, ...) +{ +} + +#define QDF_ASSERT(_condition) + +#endif + +#ifdef PANIC_ON_BUG +#ifdef CONFIG_SLUB_DEBUG +/** + * __qdf_bug() - Calls BUG() when the PANIC_ON_BUG compilation option is enabled + * + * Note: Calling BUG() can cause a compiler to assume any following code is + * unreachable. Because these BUG's may or may not be enabled by the build + * configuration, this can cause developers some pain. Consider: + * + * bool bit; + * + * if (ptr) + * bit = ptr->returns_bool(); + * else + * __qdf_bug(); + * + * // do stuff with @bit + * + * return bit; + * + * In this case, @bit is potentially uninitialized when we return! However, the + * compiler can correctly assume this case is impossible when PANIC_ON_BUG is + * enabled. Because developers typically enable this feature, the "maybe + * uninitialized" warning will not be emitted, and the bug remains uncaught + * until someone tries to make a build without PANIC_ON_BUG. + * + * A simple workaround for this, is to put the definition of __qdf_bug in + * another compilation unit, which prevents the compiler from assuming + * subsequent code is unreachable. For CONFIG_SLUB_DEBUG, do this to catch more + * bugs. Otherwise, use the typical inlined approach. + * + * Return: None + */ +void __qdf_bug(void); +#else /* CONFIG_SLUB_DEBUG */ +static inline void __qdf_bug(void) +{ + BUG(); +} +#endif /* CONFIG_SLUB_DEBUG */ + +/** + * QDF_DEBUG_PANIC() - In debug builds, panic, otherwise do nothing + * @reason: An optional reason format string, followed by args + * + * Return: None + */ +#define QDF_DEBUG_PANIC(reason...) \ + QDF_DEBUG_PANIC_FL(__func__, __LINE__, "" reason) + +/** + * QDF_DEBUG_PANIC_FL() - In debug builds, panic, otherwise do nothing + * @func: origin function name to be logged + * @line: origin line number to be logged + * @fmt: printf compatible format string to be logged + * @args: zero or more printf compatible logging arguments + * + * Return: None + */ +#define QDF_DEBUG_PANIC_FL(func, line, fmt, args...) \ + do { \ + pr_err("WLAN Panic @ %s:%d: " fmt "\n", func, line, ##args); \ + __qdf_bug(); \ + } while (false) + +#define QDF_BUG(_condition) \ + do { \ + if (!(_condition)) { \ + pr_err("QDF BUG in %s Line %d: Failed assertion '" \ + #_condition "'\n", __func__, __LINE__); \ + __qdf_bug(); \ + } \ + } while (0) + +#else /* PANIC_ON_BUG */ + +#define QDF_DEBUG_PANIC(reason...) \ + do { \ + /* no-op */ \ + } while (false) + +#define QDF_DEBUG_PANIC_FL(func, line, fmt, args...) \ + do { \ + /* no-op */ \ + } while (false) + +#define QDF_BUG(_condition) \ + do { \ + if (!(_condition)) { \ + /* no-op */ \ + } \ + } while (0) + +#endif /* PANIC_ON_BUG */ + +#ifdef KSYM_SYMBOL_LEN +#define __QDF_SYMBOL_LEN KSYM_SYMBOL_LEN +#else +#define __QDF_SYMBOL_LEN 1 +#endif + +#endif /* __I_QDF_TRACE_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_types.h b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_types.h new file mode 100644 index 0000000000000000000000000000000000000000..eb0044ff0faaa706b8c931ef74bee43b7786d3b2 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_types.h @@ -0,0 +1,387 @@ +/* + * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: i_qdf_types.h + * This file provides OS dependent types API's. + */ + +#if !defined(__I_QDF_TYPES_H) +#define __I_QDF_TYPES_H + +#include + +#ifndef __KERNEL__ +#define __iomem +#endif +#include +#include +#include + +#ifndef __ahdecl +#ifdef __i386__ +#define __ahdecl __attribute__((regparm(0))) +#else +#define __ahdecl +#endif +#endif + +#ifdef __KERNEL__ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef IPA_OFFLOAD +#include +#endif + +typedef struct sg_table __sgtable_t; + +/* + * The IDs of the various system clocks + */ +#define __QDF_CLOCK_REALTIME CLOCK_REALTIME +#define __QDF_CLOCK_MONOTONIC CLOCK_MONOTONIC + +/* + * Return values for the qdf_hrtimer_data_t callback function + */ +#define __QDF_HRTIMER_NORESTART HRTIMER_NORESTART +#define __QDF_HRTIMER_RESTART HRTIMER_RESTART + +/* + * Mode arguments of qdf_hrtimer_data_t related functions + */ +#define __QDF_HRTIMER_MODE_ABS HRTIMER_MODE_ABS +#define __QDF_HRTIMER_MODE_REL HRTIMER_MODE_REL +#define __QDF_HRTIMER_MODE_PINNED HRTIMER_MODE_PINNED + +#else + +/* + * Hack - coexist with prior defs of dma_addr_t. + * Eventually all other defs of dma_addr_t should be removed. + * At that point, the "already_defined" wrapper can be removed. + */ +#ifndef __dma_addr_t_already_defined__ +#define __dma_addr_t_already_defined__ +typedef unsigned long dma_addr_t; +#endif + +typedef unsigned long phys_addr_t; +typedef unsigned long __sgtable_t; + +#define SIOCGIWAP 0 +#define IWEVCUSTOM 0 +#define IWEVREGISTERED 0 +#define IWEVEXPIRED 0 +#define SIOCGIWSCAN 0 +#define DMA_TO_DEVICE 0 +#define DMA_BIDIRECTIONAL 0 +#define DMA_FROM_DEVICE 0 +#define __QDF_CLOCK_REALTIME 0 +#define __QDF_CLOCK_MONOTONIC 0 +#define __QDF_HRTIMER_MODE_ABS 0 +#define __QDF_HRTIMER_MODE_REL 0 +#define __QDF_HRTIMER_MODE_PINNED 0 +#define __QDF_HRTIMER_NORESTART 0 +#define __QDF_HRTIMER_RESTART 0 +#define __iomem +#endif /* __KERNEL__ */ + +/* + * max sg that we support + */ +#define __QDF_MAX_SCATTER 1 +#define __QDF_NSEC_PER_MSEC NSEC_PER_MSEC + +#if defined(__LITTLE_ENDIAN_BITFIELD) +#define QDF_LITTLE_ENDIAN_MACHINE +#elif defined(__BIG_ENDIAN_BITFIELD) +#define QDF_BIG_ENDIAN_MACHINE +#else +#error "Please fix " +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20) || !defined(__KERNEL__) +#ifndef __bool_already_defined__ +#define __bool_already_defined__ + +/** + * bool - This is an enum for boolean + * @false: zero + * @true: one + */ +typedef enum bool { + false = 0, + true = 1, +} bool; +#endif /* __bool_already_defined__ */ +#endif + +#define __qdf_packed __attribute__((packed)) + +typedef int (*__qdf_os_intr)(void *); +/** + * Private definitions of general data types + */ +typedef dma_addr_t __qdf_dma_addr_t; +typedef size_t __qdf_dma_size_t; +typedef dma_addr_t __qdf_dma_context_t; +typedef struct net_device *__qdf_netdev_t; +typedef __le16 __qdf_le16_t; +typedef __le32 __qdf_le32_t; +typedef __le64 __qdf_le64_t; +typedef __be16 __qdf_be16_t; +typedef __be32 __qdf_be32_t; +typedef __be64 __qdf_be64_t; + +#ifdef IPA_OFFLOAD +typedef struct ipa_wdi_buffer_info __qdf_mem_info_t; +#else +/** + * struct __qdf_shared_mem_info - shared mem info struct + * @pa : physical address + * @iova: i/o virtual address + * @size: allocated memory size + * @result: status + */ +typedef struct __qdf_shared_mem_info { + phys_addr_t pa; + unsigned long iova; + size_t size; + int result; +} __qdf_mem_info_t; +#endif /* IPA_OFFLOAD */ + +#define qdf_dma_mem_context(context) dma_addr_t context +#define qdf_get_dma_mem_context(var, field) ((qdf_dma_context_t)(var->field)) + +/** + * typedef struct __qdf_resource_t - qdf resource type + * @paddr: Physical address + * @paddr: Virtual address + * @len: Length + */ +typedef struct __qdf_resource { + unsigned long paddr; + void __iomem *vaddr; + unsigned long len; +} __qdf_resource_t; + +struct __qdf_mempool_ctxt; + +#define MAX_MEM_POOLS 64 + +/** + * enum qdf_bus_type - Supported Bus types + * @QDF_BUS_TYPE_NONE: None Bus type for error check + * @QDF_BUS_TYPE_PCI: PCI Bus + * @QDF_BUS_TYPE_AHB: AHB Bus + * @QDF_BUS_TYPE_SNOC: SNOC Bus + * @QDF_BUS_TYPE_SIM: Simulator + * @QDF_BUS_TYPE_USB: USB Bus + */ +enum qdf_bus_type { + QDF_BUS_TYPE_NONE = -1, + QDF_BUS_TYPE_PCI = 0, + QDF_BUS_TYPE_AHB, + QDF_BUS_TYPE_SNOC, + QDF_BUS_TYPE_SIM, + QDF_BUS_TYPE_SDIO, + QDF_BUS_TYPE_USB +}; + +/** + * struct __qdf_device - generic qdf device type + * @drv: Pointer to driver + * @drv_hdl: Pointer to driver handle + * @drv_name: Pointer to driver name + * @irq: IRQ + * @dev: Pointer to device + * @res: QDF resource + * @func: Interrupt handler + * @mem_pool: array of pointers to mem pool context + * @bus_type: Bus type + * @bid: Bus ID + * @smmu_s1_enabled: SMMU S1 enabled or not + * @iommu_mapping: DMA iommu mapping pointer + */ +struct __qdf_device { + void *drv; + void *drv_hdl; + char *drv_name; + int irq; + struct device *dev; + __qdf_resource_t res; + __qdf_os_intr func; + struct __qdf_mempool_ctxt *mem_pool[MAX_MEM_POOLS]; + enum qdf_bus_type bus_type; +#ifdef CONFIG_MCL + const struct hif_bus_id *bid; +#endif + bool smmu_s1_enabled; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0)) + struct iommu_domain *domain; +#else + struct dma_iommu_mapping *iommu_mapping; +#endif +}; +typedef struct __qdf_device *__qdf_device_t; + +typedef size_t __qdf_size_t; +typedef off_t __qdf_off_t; +typedef uint8_t __iomem *__qdf_iomem_t; + +typedef uint32_t ath_dma_addr_t; + +/** + * typedef __qdf_segment_t - segment of memory + * @daddr: dma address + * @len: length of segment + */ +typedef struct __qdf_segment { + dma_addr_t daddr; + uint32_t len; +} __qdf_segment_t; + +/** + * __qdf_dma_map - dma map of memory + * @mapped: mapped address + * @nsegs: number of segments + * @coherent: coherency status + * @seg: segment of memory + */ +struct __qdf_dma_map { + uint32_t mapped; + uint32_t nsegs; + uint32_t coherent; + __qdf_segment_t seg[__QDF_MAX_SCATTER]; +}; +typedef struct __qdf_dma_map *__qdf_dma_map_t; + +/** + * __qdf_net_wireless_evcode - enum for event code + * @__QDF_IEEE80211_ASSOC: association event code + * @__QDF_IEEE80211_REASSOC: reassociation event code + * @__QDF_IEEE80211_DISASSOC: disassociation event code + * @__QDF_IEEE80211_JOIN: join event code + * @__QDF_IEEE80211_LEAVE: leave event code + * @__QDF_IEEE80211_SCAN: scan event code + * @__QDF_IEEE80211_REPLAY: replay event code + * @__QDF_IEEE80211_MICHAEL:michael event code + * @__QDF_IEEE80211_REJOIN: rejoin event code + * @__QDF_CUSTOM_PUSH_BUTTON: push button event code + */ +enum __qdf_net_wireless_evcode { + __QDF_IEEE80211_ASSOC = SIOCGIWAP, + __QDF_IEEE80211_REASSOC = IWEVCUSTOM, + __QDF_IEEE80211_DISASSOC = SIOCGIWAP, + __QDF_IEEE80211_JOIN = IWEVREGISTERED, + __QDF_IEEE80211_LEAVE = IWEVEXPIRED, + __QDF_IEEE80211_SCAN = SIOCGIWSCAN, + __QDF_IEEE80211_REPLAY = IWEVCUSTOM, + __QDF_IEEE80211_MICHAEL = IWEVCUSTOM, + __QDF_IEEE80211_REJOIN = IWEVCUSTOM, + __QDF_CUSTOM_PUSH_BUTTON = IWEVCUSTOM, +}; + +#define __qdf_print printk +#define __qdf_vprint vprintk +#define __qdf_snprint snprintf +#define __qdf_vsnprint vsnprintf +#define __qdf_toupper toupper +#define qdf_kstrtoint __qdf_kstrtoint + +#define __qdf_kstrtoint kstrtoint + +#define __QDF_DMA_BIDIRECTIONAL DMA_BIDIRECTIONAL +#define __QDF_DMA_TO_DEVICE DMA_TO_DEVICE +#ifndef __ubicom32__ +#define __QDF_DMA_FROM_DEVICE DMA_FROM_DEVICE +#else +#define __QDF_DMA_FROM_DEVICE DMA_TO_DEVICE +#endif +#define __qdf_inline inline + +/* + * 1. GNU C/C++ Compiler + * + * How to detect gcc : __GNUC__ + * How to detect gcc version : + * major version : __GNUC__ (2 = 2.x, 3 = 3.x, 4 = 4.x) + * minor version : __GNUC_MINOR__ + * + * 2. Microsoft C/C++ Compiler + * + * How to detect msc : _MSC_VER + * How to detect msc version : + * _MSC_VER (1200 = MSVC 6.0, 1300 = MSVC 7.0, ...) + * + */ + +/* + * MACROs to help with compiler and OS specifics. May need to get a little + * more sophisticated than this and define these to specific 'VERSIONS' of + * the compiler and OS. Until we have a need for that, lets go with this + */ +#if defined(_MSC_VER) + +#define QDF_COMPILER_MSC +/* assuming that if we build with MSC, OS is WinMobile */ +#define QDF_OS_WINMOBILE + +#elif defined(__GNUC__) + +#define QDF_COMPILER_GNUC +#define QDF_OS_LINUX /* assuming if building with GNUC, OS is Linux */ + +#endif + +#if defined(QDF_COMPILER_MSC) + + +/* + * Does nothing on Windows. packing individual structs is not + * supported on the Windows compiler + */ +#define QDF_PACK_STRUCT_1 +#define QDF_PACK_STRUCT_2 +#define QDF_PACK_STRUCT_4 +#define QDF_PACK_STRUCT_8 +#define QDF_PACK_STRUCT_16 + +#elif defined(QDF_COMPILER_GNUC) + +#else +#error "Compiling with an unknown compiler!!" +#endif + +#endif /* __I_QDF_TYPES_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_util.h b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_util.h new file mode 100644 index 0000000000000000000000000000000000000000..ddf164f2dac7e3d871ed65a9ed8c0cb98f9bf12e --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_util.h @@ -0,0 +1,544 @@ +/* + * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: i_qdf_util.h + * This file provides OS dependent API's. + */ + +#ifndef _I_QDF_UTIL_H +#define _I_QDF_UTIL_H + +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include + +#if LINUX_VERSION_CODE <= KERNEL_VERSION(3, 3, 8) +#include +#else +#if defined(__LINUX_MIPS32_ARCH__) || defined(__LINUX_MIPS64_ARCH__) +#include +#else +#endif +#endif + +#include +#include +#include + +#ifdef QCA_PARTNER_PLATFORM +#include "ath_carr_pltfrm.h" +#else +#include +#endif + +typedef wait_queue_head_t __qdf_wait_queue_head_t; + +/* Generic compiler-dependent macros if defined by the OS */ +#define __qdf_wait_queue_interruptible(wait_queue, condition) \ + wait_event_interruptible(wait_queue, condition) + +#define __qdf_wait_queue_timeout( \ + wait_queue, condition, timeout) \ + wait_event_timeout(wait_queue, condition,\ + timeout) + + +#define __qdf_init_waitqueue_head(_q) init_waitqueue_head(_q) + +#define __qdf_wake_up_interruptible(_q) wake_up_interruptible(_q) + +#define __qdf_wake_up(_q) wake_up(_q) + + +#define __qdf_wake_up_completion(_q) wake_up_completion(_q) + +#define __qdf_unlikely(_expr) unlikely(_expr) +#define __qdf_likely(_expr) likely(_expr) + +/** + * __qdf_status_to_os_return() - translates qdf_status types to linux return types + * @status: status to translate + * + * Translates error types that linux may want to handle specially. + * + * return: 0 or the linux error code that most closely matches the QDF_STATUS. + * defaults to -1 (EPERM) + */ +static inline int __qdf_status_to_os_return(QDF_STATUS status) +{ + switch (status) { + case QDF_STATUS_SUCCESS: + return 0; + case QDF_STATUS_E_RESOURCES: + return -EBUSY; + case QDF_STATUS_E_NOMEM: + return -ENOMEM; + case QDF_STATUS_E_AGAIN: + return -EAGAIN; + case QDF_STATUS_E_INVAL: + return -EINVAL; + case QDF_STATUS_E_FAULT: + return -EFAULT; + case QDF_STATUS_E_ALREADY: + return -EALREADY; + case QDF_STATUS_E_BADMSG: + return -EBADMSG; + case QDF_STATUS_E_BUSY: + return -EBUSY; + case QDF_STATUS_E_CANCELED: + return -ECANCELED; + case QDF_STATUS_E_ABORTED: + return -ECONNABORTED; + case QDF_STATUS_E_PERM: + return -EPERM; + case QDF_STATUS_E_EXISTS: + return -EEXIST; + case QDF_STATUS_E_NOENT: + return -ENOENT; + case QDF_STATUS_E_E2BIG: + return -E2BIG; + case QDF_STATUS_E_NOSPC: + return -ENOSPC; + case QDF_STATUS_E_ADDRNOTAVAIL: + return -EADDRNOTAVAIL; + case QDF_STATUS_E_ENXIO: + return -ENXIO; + case QDF_STATUS_E_NETDOWN: + return -ENETDOWN; + case QDF_STATUS_E_IO: + return -EIO; + case QDF_STATUS_E_NETRESET: + return -ENETRESET; + case QDF_STATUS_E_PENDING: + return -EINPROGRESS; + case QDF_STATUS_E_TIMEOUT: + return -ETIMEDOUT; + default: + return -EPERM; + } +} + +static inline QDF_STATUS __qdf_status_from_os_return(int rc) +{ + switch (rc) { + case 0: + return QDF_STATUS_SUCCESS; + case -ENOMEM: + return QDF_STATUS_E_NOMEM; + case -EAGAIN: + return QDF_STATUS_E_AGAIN; + case -EINVAL: + return QDF_STATUS_E_INVAL; + case -EFAULT: + return QDF_STATUS_E_FAULT; + case -EALREADY: + return QDF_STATUS_E_ALREADY; + case -EBADMSG: + return QDF_STATUS_E_BADMSG; + case -EBUSY: + return QDF_STATUS_E_BUSY; + case -ECANCELED: + return QDF_STATUS_E_CANCELED; + case -ECONNABORTED: + return QDF_STATUS_E_ABORTED; + case -EPERM: + return QDF_STATUS_E_PERM; + case -EEXIST: + return QDF_STATUS_E_EXISTS; + case -ENOENT: + return QDF_STATUS_E_NOENT; + case -E2BIG: + return QDF_STATUS_E_E2BIG; + case -ENOSPC: + return QDF_STATUS_E_NOSPC; + case -EADDRNOTAVAIL: + return QDF_STATUS_E_ADDRNOTAVAIL; + case -ENXIO: + return QDF_STATUS_E_ENXIO; + case -ENETDOWN: + return QDF_STATUS_E_NETDOWN; + case -EIO: + return QDF_STATUS_E_IO; + case -ENETRESET: + return QDF_STATUS_E_NETRESET; + case -EINPROGRESS: + return QDF_STATUS_E_PENDING; + case -ETIMEDOUT: + return QDF_STATUS_E_TIMEOUT; + default: + return QDF_STATUS_E_PERM; + } +} + +/** + * __qdf_set_bit() - set bit in address + * @nr: bit number to be set + * @addr: address buffer pointer + * + * Return: none + */ +static inline void __qdf_set_bit(unsigned int nr, unsigned long *addr) +{ + __set_bit(nr, addr); +} + +static inline void __qdf_clear_bit(unsigned int nr, unsigned long *addr) +{ + __clear_bit(nr, addr); +} + +static inline bool __qdf_test_bit(unsigned int nr, unsigned long *addr) +{ + return test_bit(nr, addr); +} + +static inline bool __qdf_test_and_clear_bit(unsigned int nr, + unsigned long *addr) +{ + return __test_and_clear_bit(nr, addr); +} + +static inline unsigned long __qdf_find_first_bit(unsigned long *addr, + unsigned long nbits) +{ + return find_first_bit(addr, nbits); +} + +/** + * __qdf_set_macaddr_broadcast() - set a QDF MacAddress to the 'broadcast' + * @mac_addr: pointer to the qdf MacAddress to set to broadcast + * + * This function sets a QDF MacAddress to the 'broadcast' MacAddress. Broadcast + * MacAddress contains all 0xFF bytes. + * + * Return: none + */ +static inline void __qdf_set_macaddr_broadcast(struct qdf_mac_addr *mac_addr) +{ + memset(mac_addr, 0xff, QDF_MAC_ADDR_SIZE); +} + +/** + * __qdf_zero_macaddr() - zero out a MacAddress + * @mac_addr: pointer to the struct qdf_mac_addr to zero. + * + * This function zeros out a QDF MacAddress type. + * + * Return: none + */ +static inline void __qdf_zero_macaddr(struct qdf_mac_addr *mac_addr) +{ + memset(mac_addr, 0, QDF_MAC_ADDR_SIZE); +} + +/** + * __qdf_is_macaddr_equal() - compare two QDF MacAddress + * @mac_addr1: Pointer to one qdf MacAddress to compare + * @mac_addr2: Pointer to the other qdf MacAddress to compare + * + * This function returns a bool that tells if a two QDF MacAddress' + * are equivalent. + * + * Return: true if the MacAddress's are equal + * not true if the MacAddress's are not equal + */ +static inline bool __qdf_is_macaddr_equal(struct qdf_mac_addr *mac_addr1, + struct qdf_mac_addr *mac_addr2) +{ + return 0 == memcmp(mac_addr1, mac_addr2, QDF_MAC_ADDR_SIZE); +} + +/** + * qdf_in_interrupt - returns true if in interrupt context + */ +#define qdf_in_interrupt in_interrupt + +#define __qdf_min(_a, _b) min(_a, _b) +#define __qdf_max(_a, _b) max(_a, _b) + +#define __qdf_ffz(mask) (~(mask) == 0 ? -1 : ffz(mask)) + +#define MEMINFO_KB(x) ((x) << (PAGE_SHIFT - 10)) /* In kilobytes */ + +/** + * @brief Assert + */ +#define __qdf_assert(expr) do { \ + if (unlikely(!(expr))) { \ + pr_err("Assertion failed! %s:%s %s:%d\n", \ + # expr, __func__, __FILE__, __LINE__); \ + dump_stack(); \ + QDF_BUG(0); \ + } \ +} while (0) + +/** + * @brief Assert + */ +#define __qdf_target_assert(expr) do { \ + if (unlikely(!(expr))) { \ + qdf_print("Assertion failed! %s:%s %s:%d\n", \ + #expr, __FUNCTION__, __FILE__, __LINE__); \ + dump_stack(); \ + panic("Take care of the TARGET ASSERT first\n"); \ + } \ +} while (0) + +/** + * @brief Compile time Assert + */ +#define QDF_COMPILE_TIME_ASSERT(assertion_name, predicate) \ + typedef char assertion_name[(predicate) ? 1 : -1] + +#define __qdf_container_of(ptr, type, member) container_of(ptr, type, member) + +#define __qdf_ntohs ntohs +#define __qdf_ntohl ntohl + +#define __qdf_htons htons +#define __qdf_htonl htonl + +#define __qdf_cpu_to_le16 cpu_to_le16 +#define __qdf_cpu_to_le32 cpu_to_le32 +#define __qdf_cpu_to_le64 cpu_to_le64 + +#define __qdf_le16_to_cpu le16_to_cpu +#define __qdf_le32_to_cpu le32_to_cpu +#define __qdf_le64_to_cpu le64_to_cpu + +#define __qdf_cpu_to_be16 cpu_to_be16 +#define __qdf_cpu_to_be32 cpu_to_be32 +#define __qdf_cpu_to_be64 cpu_to_be64 + +#define __qdf_be16_to_cpu be16_to_cpu +#define __qdf_be32_to_cpu be32_to_cpu +#define __qdf_be64_to_cpu be64_to_cpu + +/** + * @brief memory barriers. + */ +#define __qdf_wmb() wmb() +#define __qdf_rmb() rmb() +#define __qdf_mb() mb() +#define __qdf_ioread32(offset) ioread32(offset) +#define __qdf_iowrite32(offset, value) iowrite32(value, offset) + +#define __qdf_roundup(x, y) roundup(x, y) + +#ifdef QCA_CONFIG_SMP +/** + * __qdf_get_cpu() - get cpu_index + * + * Return: cpu_index + */ +static inline +int __qdf_get_cpu(void) +{ + int cpu_index = get_cpu(); + + put_cpu(); + return cpu_index; +} +#else +static inline +int __qdf_get_cpu(void) +{ + return 0; +} +#endif + +static inline int __qdf_device_init_wakeup(__qdf_device_t qdf_dev, bool enable) +{ + return device_init_wakeup(qdf_dev->dev, enable); +} + +/** + * __qdf_get_totalramsize() - Get total ram size in Kb + * + * Return: Total ram size in Kb + */ +static inline uint64_t +__qdf_get_totalramsize(void) +{ + struct sysinfo meminfo; + + si_meminfo(&meminfo); + return MEMINFO_KB(meminfo.totalram); +} + +/** + * __qdf_get_lower_32_bits() - get lower 32 bits from an address. + * @addr: address + * + * This api returns the lower 32 bits of an address. + * + * Return: lower 32 bits. + */ +static inline +uint32_t __qdf_get_lower_32_bits(__qdf_dma_addr_t addr) +{ + return lower_32_bits(addr); +} + +/** + * __qdf_get_upper_32_bits() - get upper 32 bits from an address. + * @addr: address + * + * This api returns the upper 32 bits of an address. + * + * Return: upper 32 bits. + */ +static inline +uint32_t __qdf_get_upper_32_bits(__qdf_dma_addr_t addr) +{ + return upper_32_bits(addr); +} + +/** + * __qdf_rounddown_pow_of_two() - Round down to nearest power of two + * @n: number to be tested + * + * Test if the input number is power of two, and return the nearest power of two + * + * Return: number rounded down to the nearest power of two + */ +static inline +unsigned long __qdf_rounddown_pow_of_two(unsigned long n) +{ + if (is_power_of_2(n)) + return n; /* already a power of 2 */ + + return __rounddown_pow_of_two(n); +} + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0) + +/** + * __qdf_set_dma_coherent_mask() - set max number of bits allowed in dma addr + * @dev: device pointer + * @addr_bits: max number of bits allowed in dma address + * + * This API sets the maximum allowed number of bits in the dma address. + * + * Return: 0 - success, non zero - failure + */ +static inline +int __qdf_set_dma_coherent_mask(struct device *dev, uint8_t addr_bits) +{ + return dma_set_mask_and_coherent(dev, DMA_BIT_MASK(addr_bits)); +} + +#else + +/** + * __qdf_set_dma_coherent_mask() - set max number of bits allowed in dma addr + * @dev: device pointer + * @addr_bits: max number of bits allowed in dma address + * + * This API sets the maximum allowed number of bits in the dma address. + * + * Return: 0 - success, non zero - failure + */ +static inline +int __qdf_set_dma_coherent_mask(struct device *dev, uint8_t addr_bits) +{ + return dma_set_coherent_mask(dev, DMA_BIT_MASK(addr_bits)); +} +#endif +/** + * qdf_get_random_bytes() - returns nbytes bytes of random + * data + * + * Return: random bytes of data + */ +static inline +void __qdf_get_random_bytes(void *buf, int nbytes) +{ + return get_random_bytes(buf, nbytes); +} + +/** + * __qdf_do_div() - wrapper function for kernel macro(do_div). + * @dividend: Dividend value + * @divisor : Divisor value + * + * Return: Quotient + */ +static inline +uint64_t __qdf_do_div(uint64_t dividend, uint32_t divisor) +{ + do_div(dividend, divisor); + /*do_div macro updates dividend with Quotient of dividend/divisor */ + return dividend; +} + +/** + * __qdf_do_div_rem() - wrapper function for kernel macro(do_div) + * to get remainder. + * @dividend: Dividend value + * @divisor : Divisor value + * + * Return: remainder + */ +static inline +uint64_t __qdf_do_div_rem(uint64_t dividend, uint32_t divisor) +{ + return do_div(dividend, divisor); +} + +/** + * __qdf_hex_to_bin() - Wrapper function to kernel API to get unsigned + * integer from hexa decimal ASCII character. + * @ch: hexa decimal ASCII character + * + * Return: For hexa decimal ASCII char return actual decimal value + * else -1 for bad input. + */ +static inline +int __qdf_hex_to_bin(char ch) +{ + return hex_to_bin(ch); +} + +/** + * __qdf_hex_str_to_binary() - Wrapper function to get array of unsigned + * integers from string of hexa decimal ASCII characters. + * @dst: output array to hold converted values + * @src: input string of hexa decimal ASCII characters + * @count: size of dst string + * + * Return: For a string of hexa decimal ASCII characters return 0 + * else -1 for bad input. + */ +static inline +int __qdf_hex_str_to_binary(u8 *dst, const char *src, size_t count) +{ + return hex2bin(dst, src, count); +} + +#endif /*_I_QDF_UTIL_H*/ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_cpuhp.c b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_cpuhp.c new file mode 100644 index 0000000000000000000000000000000000000000..e192055a2e7f024cdb00ffef5a674d07a59e2ca3 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_cpuhp.c @@ -0,0 +1,119 @@ +/* + * Copyright (c) 2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: qdf_cpuhp + * This file provides OS dependent QDF CPU hotplug APIs + */ + +#include "i_qdf_cpuhp.h" +#include "qdf_trace.h" +#include "linux/cpu.h" +#include "linux/notifier.h" +#include "linux/version.h" + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0) +#include "linux/cpuhotplug.h" +#endif + +static __qdf_cpuhp_emit __qdf_cpuhp_on_up; +static __qdf_cpuhp_emit __qdf_cpuhp_on_down; + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0) +static int qdf_cpuhp_legacy_handler(struct notifier_block *block, + unsigned long state, + void *hcpu) +{ + unsigned long cpu = (unsigned long)hcpu; + + switch (state) { + case CPU_ONLINE: + __qdf_cpuhp_on_up(cpu); + break; + + case CPU_DOWN_PREPARE: + case CPU_DOWN_PREPARE_FROZEN: + __qdf_cpuhp_on_down(cpu); + break; + + default: + break; + } + + return NOTIFY_OK; +} + +static struct notifier_block qdf_cpuhp_notifier_block = { + .notifier_call = qdf_cpuhp_legacy_handler, +}; + +static inline void qdf_cpuhp_register_callbacks(void) +{ + register_hotcpu_notifier(&qdf_cpuhp_notifier_block); +} + +static inline void qdf_cpuhp_unregister_callbacks(void) +{ + unregister_hotcpu_notifier(&qdf_cpuhp_notifier_block); +} +#else +static enum cpuhp_state registered_hotplug_state; + +static int qdf_cpuhp_up_handler(unsigned int cpu) +{ + __qdf_cpuhp_on_up(cpu); + + return 0; +} + +static int qdf_cpuhp_down_handler(unsigned int cpu) +{ + __qdf_cpuhp_on_down(cpu); + + return 0; +} + +static inline void qdf_cpuhp_register_callbacks(void) +{ + registered_hotplug_state = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, + "wlan/qca-qdf:online", + qdf_cpuhp_up_handler, + qdf_cpuhp_down_handler); +} + +static inline void qdf_cpuhp_unregister_callbacks(void) +{ + QDF_BUG(registered_hotplug_state); + if (registered_hotplug_state) + cpuhp_remove_state(registered_hotplug_state); +} +#endif /* KERNEL_VERSION(4, 6, 0) */ + +void __qdf_cpuhp_os_init(__qdf_cpuhp_emit on_up, __qdf_cpuhp_emit on_down) +{ + __qdf_cpuhp_on_up = on_up; + __qdf_cpuhp_on_down = on_down; + + qdf_cpuhp_register_callbacks(); +} + +void __qdf_cpuhp_os_deinit(void) +{ + qdf_cpuhp_unregister_callbacks(); +} + diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_crypto.c b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_crypto.c new file mode 100644 index 0000000000000000000000000000000000000000..c0e6234342da52654a5c1cf6cf0995eedf56f4e5 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_crypto.c @@ -0,0 +1,437 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: qdf_crypto.c + * + * This source file contains linux specific definitions for QDF crypto APIs + */ + +/* Include Files */ +#include "qdf_crypto.h" +#include +#include +#include +#include +#include +#include + +/* Function Definitions and Documentation */ +#define MAX_HMAC_ELEMENT_CNT 10 + +/* + * xor: API to calculate xor + * @a: first variable + * @b: second variable + * @len: length of variables + */ +static void xor(uint8_t *a, const uint8_t *b, size_t len) +{ + unsigned int i; + + for (i = 0; i < len; i++) + a[i] ^= b[i]; +} + +int qdf_get_hash(uint8_t *type, + uint8_t element_cnt, uint8_t *addr[], uint32_t *addr_len, + int8_t *hash) +{ + return qdf_get_hmac_hash(type, NULL, 0, element_cnt, + addr, addr_len, hash); +} + +int qdf_get_hmac_hash(uint8_t *type, uint8_t *key, + uint32_t keylen, + uint8_t element_cnt, uint8_t *addr[], uint32_t *addr_len, + int8_t *hash) +{ + int i; + size_t src_len[MAX_HMAC_ELEMENT_CNT]; + + if (element_cnt > MAX_HMAC_ELEMENT_CNT) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + FL("Invalid element count %d"), element_cnt); + return -EINVAL; + } + + for (i = 0; i < element_cnt; i++) + src_len[i] = addr_len[i]; + + return qdf_get_keyed_hash(type, key, keylen, (const uint8_t **)addr, + src_len, element_cnt, hash); +} + +/* qdf_update_dbl from RFC 5297. Length of d is AES_BLOCK_SIZE (128 bits) */ +void qdf_update_dbl(uint8_t *d) +{ + int i; + uint8_t msb, msb_prev = 0; + + /* left shift by 1 */ + for (i = AES_BLOCK_SIZE - 1; i >= 0; i--) { + msb = d[i] & 0x80; + d[i] = d[i] << 1; + d[i] += msb_prev ? 1 : 0; + msb_prev = msb; + } + + if (msb) + d[AES_BLOCK_SIZE - 1] ^= 0x87; +} + +int qdf_get_keyed_hash(const char *alg, const uint8_t *key, + unsigned int key_len, const uint8_t *src[], + size_t *src_len, size_t num_elements, uint8_t *out) +{ + struct crypto_shash *tfm; + int ret; + size_t i; + + tfm = crypto_alloc_shash(alg, 0, CRYPTO_ALG_ASYNC); + if (IS_ERR(tfm)) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + FL("Failed to allocate transformation for %s: %ld"), + alg, PTR_ERR(tfm)); + return -EINVAL; + } + + if (key && key_len) { + ret = crypto_shash_setkey(tfm, key, key_len); + if (ret) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + FL("Set key failed for %s, ret:%d"), + alg, -ret); + goto error; + } + } + + do { + SHASH_DESC_ON_STACK(desc, tfm); + desc->tfm = tfm; + desc->flags = crypto_shash_get_flags(tfm); + + ret = crypto_shash_init(desc); + if (ret) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + FL("Failed to init hash for %s, ret:%d"), + alg, -ret); + goto error; + } + + for (i = 0; i < num_elements; i++) { + ret = crypto_shash_update(desc, src[i], src_len[i]); + if (ret) { + QDF_TRACE(QDF_MODULE_ID_QDF, + QDF_TRACE_LEVEL_ERROR, + FL("Failed to update hash for %s, ret:%d"), + alg, -ret); + goto error; + } + } + + ret = crypto_shash_final(desc, out); + if (ret) + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + FL("Failed to get digest for %s, ret:%d"), + alg, -ret); + } while (0); + +error: + crypto_free_shash(tfm); + return ret; +} + +/* AES String to Vector from RFC 5297, 'out' should be of length AES_BLOCK_SIZE + */ +int qdf_aes_s2v(const uint8_t *key, unsigned int key_len, const uint8_t *s[], + size_t s_len[], size_t num_s, uint8_t *out) +{ + const char *alg = "cmac(aes)"; + uint8_t d[AES_BLOCK_SIZE]; + uint8_t buf[AES_BLOCK_SIZE] = { 0 }; + size_t buf_len = AES_BLOCK_SIZE; + const uint8_t *a[1]; + unsigned int i; + uint8_t *t = NULL; + size_t t_len; + int ret; + + if (num_s == 0) { + /* V = AES-CMAC(K, ) */ + buf[0] = 0x01; + a[0] = buf; + ret = qdf_get_keyed_hash(alg, key, key_len, a, &buf_len, 1, + out); + return ret; + } + + /* D = AES-CMAC(K, ) */ + a[0] = buf; + ret = qdf_get_keyed_hash(alg, key, key_len, a, &buf_len, 1, d); + if (ret) + goto error; + + for (i = 0; i < num_s - 1; i++) { + /* D = qdf_update_dbl(D) xor AES-CMAC(K, Si) */ + qdf_update_dbl(d); + ret = qdf_get_keyed_hash(alg, key, key_len, &s[i], &s_len[i], 1, + buf); + if (ret) + goto error; + xor(d, buf, AES_BLOCK_SIZE); + } + + if (s_len[i] >= AES_BLOCK_SIZE) { + /* len(Sn) >= 128 */ + /* T = Sn xorend D */ + t = qdf_mem_malloc(s_len[i]); + if (!t) + return -EINVAL; + qdf_mem_copy(t, s[i], s_len[i]); + xor(t + s_len[i] - AES_BLOCK_SIZE, d, AES_BLOCK_SIZE); + t_len = s_len[i]; + } else { + /* len(Sn) < 128 */ + /* T = qdf_update_dbl(D) xor pad(Sn) */ + qdf_update_dbl(d); + qdf_mem_set(buf, AES_BLOCK_SIZE, 0); + qdf_mem_copy(buf, s[i], s_len[i]); + buf[s_len[i]] = 0x80; + xor(d, s[i], AES_BLOCK_SIZE); + t = d; + t_len = AES_BLOCK_SIZE; + } + + /* V = AES-CMAC(K, T) */ + a[0] = t; + ret = qdf_get_keyed_hash(alg, key, key_len, a, &t_len, 1, out); + +error: + if (t != NULL && t != d) + qdf_mem_free(t); + return ret; +} + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 3, 0)) +int qdf_aes_ctr(const uint8_t *key, unsigned int key_len, uint8_t *siv, + const uint8_t *src, size_t src_len, uint8_t *dest, bool enc) +{ + struct crypto_skcipher *tfm; + struct skcipher_request *req = NULL; + struct scatterlist sg_in, sg_out; + int ret; + + if (!IS_VALID_CTR_KEY_LEN(key_len)) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + FL("Invalid key length: %u"), key_len); + return -EINVAL; + } + + tfm = crypto_alloc_skcipher("ctr(aes)", 0, CRYPTO_ALG_ASYNC); + if (IS_ERR(tfm)) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + FL("Failed to alloc transformation for ctr(aes):%ld"), + PTR_ERR(tfm)); + return -EAGAIN; + } + + req = skcipher_request_alloc(tfm, GFP_KERNEL); + if (!req) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + FL("Failed to allocate request for ctr(aes)")); + crypto_free_skcipher(tfm); + return -EAGAIN; + } + + ret = crypto_skcipher_setkey(tfm, key, key_len); + if (ret) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + FL("Set key failed for ctr(aes), ret:%d"), -ret); + skcipher_request_free(req); + crypto_free_skcipher(tfm); + return ret; + } + + sg_init_one(&sg_in, src, src_len); + sg_init_one(&sg_out, dest, src_len); + skcipher_request_set_crypt(req, &sg_in, &sg_out, src_len, siv); + + if (enc) + ret = crypto_skcipher_encrypt(req); + else + ret = crypto_skcipher_decrypt(req); + + if (ret) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + FL("%s failed for ctr(aes), ret:%d"), + enc ? "Encryption" : "Decryption", -ret); + } + + skcipher_request_free(req); + crypto_free_skcipher(tfm); + return ret; +} +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)) +int qdf_aes_ctr(const uint8_t *key, unsigned int key_len, uint8_t *siv, + const uint8_t *src, size_t src_len, uint8_t *dest, bool enc) +{ + struct crypto_ablkcipher *tfm; + struct ablkcipher_request *req = NULL; + struct scatterlist sg_in, sg_out; + int ret; + + if (!IS_VALID_CTR_KEY_LEN(key_len)) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + FL("Invalid key length: %u"), key_len); + return -EINVAL; + } + + tfm = crypto_alloc_ablkcipher("ctr(aes)", 0, CRYPTO_ALG_ASYNC); + if (IS_ERR(tfm)) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + FL("Failed to alloc transformation for ctr(aes):%ld"), + PTR_ERR(tfm)); + return -EAGAIN; + } + + req = ablkcipher_request_alloc(tfm, GFP_KERNEL); + if (!req) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + FL("Failed to allocate request for ctr(aes)")); + crypto_free_ablkcipher(tfm); + return -EAGAIN; + } + + ret = crypto_ablkcipher_setkey(tfm, key, key_len); + if (ret) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + FL("Set key failed for ctr(aes), ret:%d"), -ret); + ablkcipher_request_free(req); + crypto_free_ablkcipher(tfm); + return ret; + } + + sg_init_one(&sg_in, src, src_len); + sg_init_one(&sg_out, dest, src_len); + ablkcipher_request_set_crypt(req, &sg_in, &sg_out, src_len, siv); + + if (enc) + ret = crypto_ablkcipher_encrypt(req); + else + ret = crypto_ablkcipher_decrypt(req); + + if (ret) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + FL("%s failed for ctr(aes), ret:%d"), + enc ? "Encryption" : "Decryption", -ret); + } + + ablkcipher_request_free(req); + crypto_free_ablkcipher(tfm); + + return ret; +} +#else +int qdf_aes_ctr(const uint8_t *key, unsigned int key_len, uint8_t *siv, + const uint8_t *src, size_t src_len, uint8_t *dest, bool enc) +{ + return -EINVAL; +} +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)) +int qdf_crypto_aes_gmac(uint8_t *key, uint16_t key_length, + uint8_t *iv, uint8_t *aad, uint8_t *data, + uint16_t data_len, uint8_t *mic) +{ + struct crypto_aead *tfm; + int ret = 0; + struct scatterlist sg[4]; + uint16_t req_size; + struct aead_request *req = NULL; + uint8_t *aad_ptr, *input; + + tfm = crypto_alloc_aead("gcm(aes)", 0, CRYPTO_ALG_ASYNC); + if (IS_ERR(tfm)) { + ret = PTR_ERR(tfm); + tfm = NULL; + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "%s: crypto_alloc_aead failed (%d)", __func__, ret); + goto err_tfm; + } + + ret = crypto_aead_setkey(tfm, key, key_length); + if (ret) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "crypto_aead_setkey failed (%d)", ret); + goto err_tfm; + } + + ret = crypto_aead_setauthsize(tfm, IEEE80211_MMIE_GMAC_MICLEN); + if (ret) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "crypto_aead_setauthsize failed (%d)", ret); + goto err_tfm; + } + + /* Prepare aead request */ + req_size = sizeof(*req) + crypto_aead_reqsize(tfm) + + IEEE80211_MMIE_GMAC_MICLEN + AAD_LEN; + req = qdf_mem_malloc(req_size); + if (!req) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "Memory allocation failed"); + ret = -ENOMEM; + goto err_tfm; + } + + input = (uint8_t *)req + sizeof(*req) + crypto_aead_reqsize(tfm); + aad_ptr = input + IEEE80211_MMIE_GMAC_MICLEN; + qdf_mem_copy(aad_ptr, aad, AAD_LEN); + + /* Scatter list operations */ + sg_init_table(sg, 4); + sg_set_buf(&sg[0], aad_ptr, AAD_LEN); + sg_set_buf(&sg[1], data, data_len); + sg_set_buf(&sg[2], input, IEEE80211_MMIE_GMAC_MICLEN); + sg_set_buf(&sg[3], mic, IEEE80211_MMIE_GMAC_MICLEN); + + aead_request_set_tfm(req, tfm); + aead_request_set_crypt(req, sg, sg, 0, iv); + aead_request_set_ad(req, + AAD_LEN + data_len + IEEE80211_MMIE_GMAC_MICLEN); + crypto_aead_encrypt(req); + +err_tfm: + if (tfm) + crypto_free_aead(tfm); + + if (req) + qdf_mem_free(req); + + return ret; +} +#else +int qdf_crypto_aes_gmac(uint8_t *key, uint16_t key_length, + uint8_t *iv, uint8_t *aad, uint8_t *data, + uint16_t data_len, uint8_t *mic) +{ + return -EINVAL; +} +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_debugfs.c b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_debugfs.c new file mode 100644 index 0000000000000000000000000000000000000000..054a1ee12d3f8c902946a3a4c6b17e382e5d4133 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_debugfs.c @@ -0,0 +1,466 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: qdf_debugfs + * This file provides QDF debug file system APIs + */ + +#include +#include +#include +#include +#include + +/* A private structure definition to qdf sequence */ +struct qdf_debugfs_seq_priv { + bool stop; +}; + +/* entry for root debugfs directory*/ +static qdf_dentry_t qdf_debugfs_root; + +QDF_STATUS qdf_debugfs_init(void) +{ + qdf_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL); + + if (!qdf_debugfs_root) + return QDF_STATUS_E_FAILURE; + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(qdf_debugfs_init); + +QDF_STATUS qdf_debugfs_exit(void) +{ + if (!qdf_debugfs_root) + return QDF_STATUS_SUCCESS; + + debugfs_remove_recursive(qdf_debugfs_root); + qdf_debugfs_root = NULL; + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(qdf_debugfs_exit); + +qdf_dentry_t qdf_debugfs_get_root(void) +{ + return qdf_debugfs_root; +} +qdf_export_symbol(qdf_debugfs_get_root); + +umode_t qdf_debugfs_get_filemode(uint16_t mode) +{ + umode_t ret = 0; + + if (mode & QDF_FILE_USR_READ) + ret |= 0400; + if (mode & QDF_FILE_USR_WRITE) + ret |= 0200; + + if (mode & QDF_FILE_GRP_READ) + ret |= 0040; + if (mode & QDF_FILE_GRP_WRITE) + ret |= 0020; + + if (mode & QDF_FILE_OTH_READ) + ret |= 0004; + if (mode & QDF_FILE_OTH_WRITE) + ret |= 0002; + + return ret; +} + +/** + * ---------------------- Implementation note --------------------------------- + * + * A read in debugfs file triggers seq_read() which calls seq_read api. A + * sequence begins with the call of the function start(). If the return is a non + * NULL value, the function next() is called. This function is an iterator, the + * goal is to go though all the data. Each time next() is called, the function + * show() is also called. It writes data values in the buffer read by the user. + * The function next() is called until it returns NULL. The sequence ends when + * next() returns NULL, then the function stop() is called. + * + * NOTE: When a sequence is finished, another one starts. That means that + * at the end of function stop(), the function start() is called again. This + * loop finishes when the function start() returns NULL. + * ---------------------------------------------------------------------------- + */ + +/* .seq_start() */ +static void *qdf_debugfs_seq_start(struct seq_file *seq, loff_t *pos) +{ + struct qdf_debugfs_seq_priv *priv; + + priv = qdf_mem_malloc(sizeof(*priv)); + if (!priv) + return NULL; + + priv->stop = false; + + return priv; +} + +/* .seq_next() */ +static void *qdf_debugfs_seq_next(struct seq_file *seq, void *v, loff_t *pos) +{ + struct qdf_debugfs_seq_priv *priv = v; + + if (priv) + ++*pos; + + if (priv && priv->stop) { + qdf_mem_free(priv); + priv = NULL; + } + + return priv; +} + +/* .seq_stop() */ +static void qdf_debugfs_seq_stop(struct seq_file *seq, void *v) +{ + qdf_mem_free(v); +} + +/* .seq_show() */ +static int qdf_debugfs_seq_show(struct seq_file *seq, void *v) +{ + struct qdf_debugfs_seq_priv *priv = v; + struct qdf_debugfs_fops *fops; + QDF_STATUS status; + + fops = seq->private; + + if (fops && fops->show) { + status = fops->show(seq, fops->priv); + + if (priv && (status != QDF_STATUS_E_AGAIN)) + priv->stop = true; + } + + return 0; +} + +void qdf_debugfs_printf(qdf_debugfs_file_t file, const char *f, ...) +{ + va_list args; + + va_start(args, f); + seq_vprintf(file, f, args); + va_end(args); +} + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)) + +void qdf_debugfs_hexdump(qdf_debugfs_file_t file, const uint8_t *buf, + qdf_size_t len) +{ + seq_hex_dump(file, "", DUMP_PREFIX_OFFSET, 16, 4, buf, len, false); +} + +#else + +void qdf_debugfs_hexdump(qdf_debugfs_file_t file, const uint8_t *buf, + qdf_size_t len) +{ + const size_t rowsize = 16; + const size_t groupsize = 4; + char *dst; + size_t dstlen, readlen; + int prefix = 0; + size_t commitlen; + + while (len > 0 && (file->size > file->count)) { + seq_printf(file, "%.8x: ", prefix); + + readlen = min(len, rowsize); + dstlen = seq_get_buf(file, &dst); + hex_dump_to_buffer(buf, readlen, rowsize, groupsize, dst, + dstlen, false); + commitlen = strnlen(dst, dstlen); + seq_commit(file, commitlen); + seq_putc(file, '\n'); + + len = (len > rowsize) ? len - rowsize : 0; + buf += readlen; + prefix += rowsize; + } +} + +#endif + +void qdf_debugfs_write(qdf_debugfs_file_t file, const uint8_t *buf, + qdf_size_t len) +{ + seq_write(file, buf, len); +} + +/* sequential file operation table */ +static const struct seq_operations __qdf_debugfs_seq_ops = { + .start = qdf_debugfs_seq_start, + .next = qdf_debugfs_seq_next, + .stop = qdf_debugfs_seq_stop, + .show = qdf_debugfs_seq_show, +}; + +/* .open() */ +static int qdf_seq_open(struct inode *inode, struct file *file) +{ + void *private = inode->i_private; + struct seq_file *seq; + int rc; + + /** + * Note: seq_open() will allocate a struct seq_file and store its + * pointer in @file->private_data. It warns if private_data is not NULL. + */ + + rc = seq_open(file, &__qdf_debugfs_seq_ops); + + if (rc == 0) { + seq = file->private_data; + seq->private = private; + } + + return rc; +} + +/* .write() */ +static ssize_t qdf_seq_write(struct file *filp, const char __user *ubuf, + size_t len, loff_t *ppos) +{ + struct qdf_debugfs_fops *fops; + struct seq_file *seq; + u8 *buf; + ssize_t rc = 0; + + if (len == 0) + return 0; + + seq = filp->private_data; + fops = seq->private; + if (fops && fops->write) { + buf = qdf_mem_malloc(len + 1); + if (buf) { + buf[len] = '\0'; + rc = simple_write_to_buffer(buf, len, ppos, ubuf, len); + fops->write(fops->priv, buf, len + 1); + qdf_mem_free(buf); + } + } + + return rc; +} + +/* debugfs file operation table */ +static const struct file_operations __qdf_debugfs_fops = { + .owner = THIS_MODULE, + .open = qdf_seq_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, + .write = qdf_seq_write, +}; + +qdf_dentry_t qdf_debugfs_create_dir(const char *name, qdf_dentry_t parent) +{ + qdf_dentry_t dir; + + if (!name) + return NULL; + if (!parent) + parent = qdf_debugfs_get_root(); + + dir = debugfs_create_dir(name, parent); + + if (IS_ERR_OR_NULL(dir)) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "%s creation failed", name); + dir = NULL; + } + + return dir; +} +qdf_export_symbol(qdf_debugfs_create_dir); + +qdf_dentry_t qdf_debugfs_create_file(const char *name, uint16_t mode, + qdf_dentry_t parent, + struct qdf_debugfs_fops *fops) +{ + qdf_dentry_t file; + umode_t filemode; + + if (!name || !fops) + return NULL; + + if (!parent) + parent = qdf_debugfs_get_root(); + + filemode = qdf_debugfs_get_filemode(mode); + file = debugfs_create_file(name, filemode, parent, fops, + &__qdf_debugfs_fops); + + if (IS_ERR_OR_NULL(file)) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "%s creation failed 0x%pK", name, file); + file = NULL; + } + + return file; +} +qdf_export_symbol(qdf_debugfs_create_file); + +qdf_dentry_t qdf_debugfs_create_u8(const char *name, uint16_t mode, + qdf_dentry_t parent, u8 *value) +{ + umode_t filemode; + + if (!name) + return NULL; + + if (!parent) + parent = qdf_debugfs_get_root(); + + filemode = qdf_debugfs_get_filemode(mode); + return debugfs_create_u8(name, filemode, parent, value); +} + +qdf_dentry_t qdf_debugfs_create_u16(const char *name, uint16_t mode, + qdf_dentry_t parent, u16 *value) +{ + umode_t filemode; + + if (!name) + return NULL; + + if (!parent) + parent = qdf_debugfs_get_root(); + + filemode = qdf_debugfs_get_filemode(mode); + return debugfs_create_u16(name, filemode, parent, value); +} +qdf_export_symbol(qdf_debugfs_create_u16); + +qdf_dentry_t qdf_debugfs_create_u32(const char *name, + uint16_t mode, + qdf_dentry_t parent, u32 *value) +{ + umode_t filemode; + + if (!name) + return NULL; + + if (!parent) + parent = qdf_debugfs_get_root(); + + filemode = qdf_debugfs_get_filemode(mode); + return debugfs_create_u32(name, filemode, parent, value); +} +qdf_export_symbol(qdf_debugfs_create_u32); + +qdf_dentry_t qdf_debugfs_create_u64(const char *name, uint16_t mode, + qdf_dentry_t parent, u64 *value) +{ + umode_t filemode; + + if (!name) + return NULL; + + if (!parent) + parent = qdf_debugfs_get_root(); + + filemode = qdf_debugfs_get_filemode(mode); + return debugfs_create_u64(name, filemode, parent, value); +} +qdf_export_symbol(qdf_debugfs_create_u64); + +qdf_dentry_t qdf_debugfs_create_atomic(const char *name, uint16_t mode, + qdf_dentry_t parent, qdf_atomic_t *value) +{ + umode_t filemode; + + if (!name) + return NULL; + + if (!parent) + parent = qdf_debugfs_get_root(); + + filemode = qdf_debugfs_get_filemode(mode); + return debugfs_create_atomic_t(name, filemode, parent, value); +} +qdf_export_symbol(qdf_debugfs_create_atomic); + +static int qdf_debugfs_string_show(struct seq_file *seq, void *pos) +{ + char *str = seq->private; + + seq_puts(seq, str); + seq_putc(seq, '\n'); + + return 0; +} + +static int qdf_debugfs_string_open(struct inode *inode, struct file *file) +{ + return single_open(file, qdf_debugfs_string_show, inode->i_private); +} + +static const struct file_operations qdf_string_fops = { + .owner = THIS_MODULE, + .open = qdf_debugfs_string_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release +}; + +qdf_dentry_t qdf_debugfs_create_string(const char *name, uint16_t mode, + qdf_dentry_t parent, char *str) +{ + umode_t filemode; + + if (!name) + return NULL; + + if (!parent) + parent = qdf_debugfs_get_root(); + + filemode = qdf_debugfs_get_filemode(mode); + return debugfs_create_file(name, filemode, parent, str, + &qdf_string_fops); +} +qdf_export_symbol(qdf_debugfs_create_string); + +void qdf_debugfs_remove_dir_recursive(qdf_dentry_t d) +{ + debugfs_remove_recursive(d); +} +qdf_export_symbol(qdf_debugfs_remove_dir_recursive); + +void qdf_debugfs_remove_dir(qdf_dentry_t d) +{ + debugfs_remove(d); +} +qdf_export_symbol(qdf_debugfs_remove_dir); + +void qdf_debugfs_remove_file(qdf_dentry_t d) +{ + debugfs_remove(d); +} +qdf_export_symbol(qdf_debugfs_remove_file); diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_defer.c b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_defer.c new file mode 100644 index 0000000000000000000000000000000000000000..fe93299eaebe0aa3bc7058ea085ccb9b3a3558d9 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_defer.c @@ -0,0 +1,68 @@ +/* + * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: qdf_defer.c + * This file provides OS dependent deferred API's. + */ + +#include +#include +#include + +#include "i_qdf_defer.h" +#include + +/** + * __qdf_defer_func() - defer work handler + * @work: Pointer to defer work + * + * Return: none + */ +void __qdf_defer_func(struct work_struct *work) +{ + __qdf_work_t *ctx = container_of(work, __qdf_work_t, work); + + if (ctx->fn == NULL) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "No callback registered !!"); + return; + } + ctx->fn(ctx->arg); +} +qdf_export_symbol(__qdf_defer_func); + +/** + * __qdf_defer_delayed_func() - defer work handler + * @dwork: Pointer to defer work + * + * Return: none + */ +void +__qdf_defer_delayed_func(struct work_struct *dwork) +{ + __qdf_delayed_work_t *ctx = container_of(dwork, __qdf_delayed_work_t, + dwork.work); + if (ctx->fn == NULL) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "BugCheck: Callback is not initilized while creating delayed work queue"); + return; + } + ctx->fn(ctx->arg); +} +qdf_export_symbol(__qdf_defer_delayed_func); diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_event.c b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_event.c new file mode 100644 index 0000000000000000000000000000000000000000..a4494b36a5dbd76c4d6e4f10e0c2dbb5219175b9 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_event.c @@ -0,0 +1,389 @@ +/* + * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: qdf_event.c + * + * This source file contains linux specific definitions for QDF event APIs + * The APIs mentioned in this file are used for initializing, setting, + * resetting, destroying an event and waiting on an occurrence of an event + * among multiple events. + */ + +/* Include Files */ +#include "qdf_event.h" +#include "qdf_mc_timer.h" +#include + +struct qdf_evt_node { + qdf_list_node_t node; + qdf_event_t *pevent; +}; + +#define MAX_WAIT_EVENTS 10 + +static qdf_list_t qdf_wait_event_list; +static qdf_spinlock_t qdf_wait_event_lock; + +/* Function Definitions and Documentation */ + +/** + * qdf_event_create() - initializes a QDF event + * @event: Pointer to the opaque event object to initialize + * + * The qdf_event_create() function initializes the specified event. Upon + * successful initialization, the state of the event becomes initialized + * and not signalled. + * + * An event must be initialized before it may be used in any other event + * functions. + * Attempting to initialize an already initialized event results in + * a failure. + * + * Return: QDF status + */ +QDF_STATUS qdf_event_create(qdf_event_t *event) +{ + QDF_BUG(event); + if (!event) + return QDF_STATUS_E_FAULT; + + /* check for 'already initialized' event */ + QDF_BUG(event->cookie != LINUX_EVENT_COOKIE); + if (event->cookie == LINUX_EVENT_COOKIE) + return QDF_STATUS_E_BUSY; + + /* initialize new event */ + init_completion(&event->complete); + event->cookie = LINUX_EVENT_COOKIE; + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(qdf_event_create); + +/** + * qdf_event_set() - sets a QDF event + * @event: The event to set to the signalled state + * + * The state of the specified event is set to signalled by calling + * qdf_event_set(). + * + * Any threads waiting on the event as a result of a qdf_event_wait() will + * be unblocked and available to be scheduled for execution when the event + * is signaled by a call to qdf_event_set(). + * + * Return: QDF status + */ +QDF_STATUS qdf_event_set(qdf_event_t *event) +{ + QDF_BUG(event); + if (!event) + return QDF_STATUS_E_FAULT; + + /* ensure event is initialized */ + QDF_BUG(event->cookie == LINUX_EVENT_COOKIE); + if (event->cookie != LINUX_EVENT_COOKIE) + return QDF_STATUS_E_INVAL; + + complete(&event->complete); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(qdf_event_set); + +/** + * qdf_event_reset() - resets a QDF event + * @event: The event to set to the NOT signalled state + * + * This function isn't required for Linux. Therefore, it doesn't do much. + * + * The state of the specified event is set to 'NOT signalled' by calling + * qdf_event_reset(). The state of the event remains NOT signalled until an + * explicit call to qdf_event_set(). + * + * This function sets the event to a NOT signalled state even if the event was + * signalled multiple times before being signaled. + * + * Return: QDF status + */ +QDF_STATUS qdf_event_reset(qdf_event_t *event) +{ + QDF_BUG(event); + if (!event) + return QDF_STATUS_E_FAULT; + + /* ensure event is initialized */ + QDF_BUG(event->cookie == LINUX_EVENT_COOKIE); + if (event->cookie != LINUX_EVENT_COOKIE) + return QDF_STATUS_E_INVAL; + + /* (re)initialize event */ + event->force_set = false; + INIT_COMPLETION(event->complete); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(qdf_event_reset); + +/** + * qdf_event_destroy() - Destroys a QDF event + * @event: The event object to be destroyed. + * + * This function doesn't do much in Linux. There is no need for the caller + * to explicitly destroy an event after use. + * + * The os_event_destroy() function shall destroy the event object + * referenced by event. After a successful return from qdf_event_destroy() + * the event object becomes, in effect, uninitialized. + * + * A destroyed event object can be reinitialized using qdf_event_create(); + * the results of otherwise referencing the object after it has been destroyed + * are undefined. Calls to QDF event functions to manipulate the lock such + * as qdf_event_set() will fail if the event is destroyed. Therefore, + * don't use the event after it has been destroyed until it has + * been re-initialized. + * + * Return: QDF status + */ +QDF_STATUS qdf_event_destroy(qdf_event_t *event) +{ + QDF_BUG(event); + if (!event) + return QDF_STATUS_E_FAULT; + + /* ensure event is initialized */ + QDF_BUG(event->cookie == LINUX_EVENT_COOKIE); + if (event->cookie != LINUX_EVENT_COOKIE) + return QDF_STATUS_E_INVAL; + + /* make sure nobody is waiting on the event */ + complete_all(&event->complete); + + /* destroy the event */ + memset(event, 0, sizeof(qdf_event_t)); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(qdf_event_destroy); + +/** + * qdf_wait_single_event() - Waits for a single event to be set. + * This API waits for the event to be set. + * + * @event: Pointer to an event to wait on. + * @timeout: Timeout value (in milliseconds). This function returns + * if this interval elapses, regardless if any of the events have + * been set. An input value of 0 for this timeout parameter means + * to wait infinitely, meaning a timeout will never occur. + * + * Return: QDF status + */ +QDF_STATUS qdf_wait_single_event(qdf_event_t *event, uint32_t timeout) +{ + QDF_BUG(!in_interrupt()); + if (in_interrupt()) + return QDF_STATUS_E_FAULT; + + QDF_BUG(event); + if (!event) + return QDF_STATUS_E_FAULT; + + /* ensure event is initialized */ + QDF_BUG(event->cookie == LINUX_EVENT_COOKIE); + if (event->cookie != LINUX_EVENT_COOKIE) + return QDF_STATUS_E_INVAL; + + if (timeout) { + long ret; + + /* update the timeout if it's on an emulation platform */ + timeout *= qdf_timer_get_multiplier(); + ret = wait_for_completion_timeout(&event->complete, + msecs_to_jiffies(timeout)); + + if (ret <= 0) + return QDF_STATUS_E_TIMEOUT; + } else { + wait_for_completion(&event->complete); + } + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(qdf_wait_single_event); + +/** + * qdf_complete_wait_events() - Sets all the events which are in the list. + * + * This function traverses the list of events and sets all of them. It + * sets the flag force_set as TRUE to indicate that these events have + * been forcefully set. + * + * Return: None + */ +void qdf_complete_wait_events(void) +{ + struct qdf_evt_node *event_node = NULL; + qdf_list_node_t *list_node = NULL; + QDF_STATUS status; + + if (qdf_list_empty(&qdf_wait_event_list)) + return; + + qdf_spin_lock(&qdf_wait_event_lock); + qdf_list_peek_front(&qdf_wait_event_list, + &list_node); + + while (list_node != NULL) { + event_node = qdf_container_of(list_node, + struct qdf_evt_node, node); + + event_node->pevent->force_set = true; + qdf_event_set(event_node->pevent); + + status = qdf_list_peek_next(&qdf_wait_event_list, + &event_node->node, &list_node); + + if (!QDF_IS_STATUS_SUCCESS(status)) + break; + } + qdf_spin_unlock(&qdf_wait_event_lock); +} +qdf_export_symbol(qdf_complete_wait_events); + +/** + * qdf_wait_for_event_completion() - Waits for an event to be set. + * + * @event: Pointer to an event to wait on. + * @timeout: Timeout value (in milliseconds). + * + * This function adds the event in a list and waits on it until it + * is set or the timeout duration elapses. The purpose of waiting + * is considered complete only if the event is set and the flag + * force_set is FALSE, it returns success in this case. In other + * cases it returns appropriate error status. + * + * Return: QDF status + */ +QDF_STATUS qdf_wait_for_event_completion(qdf_event_t *event, uint32_t timeout) +{ + struct qdf_evt_node *event_node; + QDF_STATUS status; + + QDF_BUG(!in_interrupt()); + if (in_interrupt()) + return QDF_STATUS_E_FAULT; + + QDF_BUG(event); + if (!event) + return QDF_STATUS_E_FAULT; + + /* ensure event is initialized */ + QDF_BUG(event->cookie == LINUX_EVENT_COOKIE); + if (event->cookie != LINUX_EVENT_COOKIE) + return QDF_STATUS_E_INVAL; + + event_node = qdf_mem_malloc(sizeof(*event_node)); + if (!event_node) { + qdf_err("Out of memory"); + return QDF_STATUS_E_NOMEM; + } + + event_node->pevent = event; + + qdf_spin_lock(&qdf_wait_event_lock); + status = qdf_list_insert_back(&qdf_wait_event_list, &event_node->node); + qdf_spin_unlock(&qdf_wait_event_lock); + + if (QDF_STATUS_SUCCESS != status) { + qdf_err("Failed to insert event into tracking list"); + goto free_node; + } + + if (timeout) { + long ret; + + /* update the timeout if it's on an emulation platform */ + timeout *= qdf_timer_get_multiplier(); + ret = wait_for_completion_timeout(&event->complete, + msecs_to_jiffies(timeout)); + + if (ret <= 0) { + status = QDF_STATUS_E_TIMEOUT; + goto list_remove; + } + } else { + wait_for_completion(&event->complete); + } + + /* if event was forcefully completed, return failure */ + if (event->force_set) + status = QDF_STATUS_E_FAULT; + +list_remove: + qdf_spin_lock(&qdf_wait_event_lock); + qdf_list_remove_node(&qdf_wait_event_list, &event_node->node); + qdf_spin_unlock(&qdf_wait_event_lock); + +free_node: + qdf_mem_free(event_node); + + return status; +} +qdf_export_symbol(qdf_wait_for_event_completion); + +/** + * qdf_event_list_init() - Creates a list and spinlock for events. + * + * This function creates a list for maintaining events on which threads + * wait for completion. A spinlock is also created to protect related + * oprations. + * + * Return: None + */ +void qdf_event_list_init(void) +{ + qdf_list_create(&qdf_wait_event_list, MAX_WAIT_EVENTS); + qdf_spinlock_create(&qdf_wait_event_lock); +} +qdf_export_symbol(qdf_event_list_init); + +/** + * qdf_event_list_destroy() - Destroys list and spinlock created for events. + * + * This function destroys the list and spinlock created for events on which + * threads wait for completion. + * + * Return: None + */ +void qdf_event_list_destroy(void) +{ + qdf_list_destroy(&qdf_wait_event_list); + qdf_spinlock_destroy(&qdf_wait_event_lock); +} +qdf_export_symbol(qdf_event_list_destroy); + +QDF_STATUS qdf_exit_thread(QDF_STATUS status) +{ + if (status == QDF_STATUS_SUCCESS) + do_exit(0); + else + do_exit(SIGKILL); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(qdf_exit_thread); diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_file.c b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_file.c new file mode 100644 index 0000000000000000000000000000000000000000..340afd0a02f583a4a54be8eae0aed2c0255c467d --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_file.c @@ -0,0 +1,66 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include "qdf_file.h" +#include "qdf_mem.h" +#include "qdf_module.h" +#include "qdf_status.h" +#include "qdf_trace.h" +#include "qdf_types.h" + +QDF_STATUS qdf_file_read(const char *path, char **out_buf) +{ + int errno; + const struct firmware *fw; + char *buf; + + *out_buf = NULL; + + errno = request_firmware(&fw, path, NULL); + if (errno) { + qdf_err("Failed to read file %s", path); + return QDF_STATUS_E_FAILURE; + } + + /* qdf_mem_malloc zeros new memory; +1 size ensures null-termination */ + buf = qdf_mem_malloc(fw->size + 1); + if (!buf) { + qdf_err("Failed to allocate file buffer of %zuB", fw->size + 1); + release_firmware(fw); + return QDF_STATUS_E_NOMEM; + } + + qdf_mem_copy(buf, fw->data, fw->size); + release_firmware(fw); + *out_buf = buf; + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(qdf_file_read); + +void qdf_file_buf_free(char *file_buf) +{ + QDF_BUG(file_buf); + if (!file_buf) + return; + + qdf_mem_free(file_buf); +} +qdf_export_symbol(qdf_file_buf_free); + diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_idr.c b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_idr.c new file mode 100644 index 0000000000000000000000000000000000000000..b6341e2cb72f7c74bba30ea1ad04011e9973f207 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_idr.c @@ -0,0 +1,143 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: qdf_idr + * This file provides the ability to map an ID to a pointer + */ + +/* Include files */ +#include +#include + +#define QDF_IDR_START 0x100 +#define QDF_IDR_END 0 + +static int qdf_idr_gpf_flag(void) +{ + if (in_interrupt() || irqs_disabled() || in_atomic()) + return GFP_ATOMIC; + + return GFP_KERNEL; +} + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0) +/** + * __qdf_idr_alloc() - Allocates an unused ID + * @idp: pointer to qdf idr + * @ptr: pointer to be associated with the new ID + * @start: the minimum ID + * @end: the maximum ID + * + * Return: new ID + */ +static inline int32_t +__qdf_idr_alloc(qdf_idr *idp, void *ptr, int32_t start, int32_t end) +{ + int32_t id = 0; + + idr_get_new(&idp->idr, ptr, &id); + + return id; +} +#else +static inline int32_t +__qdf_idr_alloc(qdf_idr *idp, void *ptr, int32_t start, int32_t end) +{ + return idr_alloc(&idp->idr, ptr, start, end, qdf_idr_gpf_flag()); +} +#endif + +QDF_STATUS qdf_idr_create(qdf_idr *idp) +{ + if (!idp) + return QDF_STATUS_E_INVAL; + + qdf_spinlock_create(&idp->lock); + + idr_init(&idp->idr); + + return QDF_STATUS_SUCCESS; +} + +qdf_export_symbol(qdf_idr_create); + +QDF_STATUS qdf_idr_destroy(qdf_idr *idp) +{ + if (!idp) + return QDF_STATUS_E_INVAL; + + qdf_spinlock_destroy(&idp->lock); + idr_destroy(&idp->idr); + + return QDF_STATUS_SUCCESS; +} + +qdf_export_symbol(qdf_idr_destroy); + +QDF_STATUS qdf_idr_alloc(qdf_idr *idp, void *ptr, int32_t *id) +{ + int local_id; + + if (!idp || !ptr) + return QDF_STATUS_E_INVAL; + + qdf_spinlock_acquire(&idp->lock); + local_id = __qdf_idr_alloc(idp, ptr, QDF_IDR_START, QDF_IDR_END); + qdf_spinlock_release(&idp->lock); + if (local_id < QDF_IDR_START) + return QDF_STATUS_E_FAILURE; + + *id = local_id; + + return QDF_STATUS_SUCCESS; +} + +qdf_export_symbol(qdf_idr_alloc); + +QDF_STATUS qdf_idr_remove(qdf_idr *idp, int32_t id) +{ + if (!idp || id < QDF_IDR_START) + return QDF_STATUS_E_INVAL; + + qdf_spinlock_acquire(&idp->lock); + if (idr_find(&idp->idr, id)) + idr_remove(&idp->idr, id); + qdf_spinlock_release(&idp->lock); + + return QDF_STATUS_SUCCESS; +} + +qdf_export_symbol(qdf_idr_remove); + +QDF_STATUS qdf_idr_find(qdf_idr *idp, int32_t id, void **ptr) +{ + if (!ptr || (id < QDF_IDR_START)) + return QDF_STATUS_E_INVAL; + + qdf_spinlock_acquire(&idp->lock); + *ptr = idr_find(&idp->idr, id); + qdf_spinlock_release(&idp->lock); + if (!(*ptr)) + return QDF_STATUS_E_INVAL; + else + return QDF_STATUS_SUCCESS; +} + +qdf_export_symbol(qdf_idr_find); + diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_ipa.c b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_ipa.c new file mode 100644 index 0000000000000000000000000000000000000000..093c79901132ab83725c6ebfcfd7828d25d065db --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_ipa.c @@ -0,0 +1,81 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: qdf_ipa.c + * + * This source file contains linux specific definitions for QDF IPA APIs + */ + +/* Include Files */ +#include + +static uint8_t __qdf_to_ipa_wlan_event(int qdf_ipa_event) +{ + uint8_t ipa_event; + + switch (qdf_ipa_event) { + case QDF_IPA_CLIENT_CONNECT: + ipa_event = WLAN_CLIENT_CONNECT; + break; + case QDF_IPA_CLIENT_DISCONNECT: + ipa_event = WLAN_CLIENT_DISCONNECT; + break; + case QDF_IPA_AP_CONNECT: + ipa_event = WLAN_AP_CONNECT; + break; + case QDF_IPA_AP_DISCONNECT: + ipa_event = WLAN_AP_DISCONNECT; + break; + case QDF_IPA_STA_CONNECT: + ipa_event = WLAN_STA_CONNECT; + break; + case QDF_IPA_STA_DISCONNECT: + ipa_event = WLAN_STA_DISCONNECT; + break; + case QDF_IPA_CLIENT_CONNECT_EX: + ipa_event = WLAN_CLIENT_CONNECT_EX; + break; + case QDF_SWITCH_TO_SCC: + ipa_event = WLAN_SWITCH_TO_SCC; + break; + case QDF_SWITCH_TO_MCC: + ipa_event = WLAN_SWITCH_TO_MCC; + break; + case QDF_WDI_ENABLE: + ipa_event = WLAN_WDI_ENABLE; + break; + case QDF_WDI_DISABLE: + ipa_event = WLAN_WDI_DISABLE; + break; + case QDF_FWR_SSR_BEFORE_SHUTDOWN: + ipa_event = WLAN_FWR_SSR_BEFORE_SHUTDOWN; + break; + case QDF_IPA_WLAN_EVENT_MAX: + default: + ipa_event = IPA_WLAN_EVENT_MAX; + break; + } + + return ipa_event; +} + +void __qdf_ipa_set_meta_msg_type(__qdf_ipa_msg_meta_t *meta, int type) +{ + meta->msg_type = __qdf_to_ipa_wlan_event(type); +} diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_list.c b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_list.c new file mode 100644 index 0000000000000000000000000000000000000000..a44b6aa5a2435e9ade9207097a9d1b9ffacd2721 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_list.c @@ -0,0 +1,250 @@ +/* + * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: qdf_list.c + * + * QCA driver framework list manipulation APIs. QDF linked list + * APIs are NOT thread safe so make sure to use appropriate locking mechanisms + * to assure operations on the list are thread safe. + */ + +/* Include files */ +#include +#include + +/* Function declarations and documenation */ + +QDF_STATUS qdf_list_insert_before(qdf_list_t *list, + qdf_list_node_t *new_node, qdf_list_node_t *node) +{ + list_add_tail(new_node, node); + list->count++; + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(qdf_list_insert_before); + +QDF_STATUS qdf_list_insert_after(qdf_list_t *list, + qdf_list_node_t *new_node, qdf_list_node_t *node) +{ + list_add(new_node, node); + list->count++; + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(qdf_list_insert_after); + +/** + * qdf_list_insert_front() - insert input node at front of the list + * @list: Pointer to list + * @node: Pointer to input node + * + * Return: QDF status + */ +QDF_STATUS qdf_list_insert_front(qdf_list_t *list, qdf_list_node_t *node) +{ + list_add(node, &list->anchor); + list->count++; + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(qdf_list_insert_front); + +/** + * qdf_list_insert_back() - insert input node at back of the list + * @list: Pointer to list + * @node: Pointer to input node + * + * Return: QDF status + */ +QDF_STATUS qdf_list_insert_back(qdf_list_t *list, qdf_list_node_t *node) +{ + list_add_tail(node, &list->anchor); + list->count++; + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(qdf_list_insert_back); + +/** + * qdf_list_insert_back_size() - insert input node at back of list and save + * list size + * @list: Pointer to list + * @node: Pointer to input node + * @p_size: Pointer to store list size + * + * Return: QDF status + */ +QDF_STATUS qdf_list_insert_back_size(qdf_list_t *list, + qdf_list_node_t *node, uint32_t *p_size) +{ + list_add_tail(node, &list->anchor); + list->count++; + *p_size = list->count; + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(qdf_list_insert_back_size); + +/** + * qdf_list_remove_front() - remove node from front of the list + * @list: Pointer to list + * @node2: Double pointer to store the node which is removed from list + * + * Return: QDF status + */ +QDF_STATUS qdf_list_remove_front(qdf_list_t *list, qdf_list_node_t **node2) +{ + struct list_head *listptr; + + if (list_empty(&list->anchor)) + return QDF_STATUS_E_EMPTY; + + listptr = list->anchor.next; + *node2 = listptr; + list_del(list->anchor.next); + list->count--; + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(qdf_list_remove_front); + +/** + * qdf_list_remove_back() - remove node from end of the list + * @list: Pointer to list + * @node2: Double pointer to store node which is removed from list + * + * Return: QDF status + */ +QDF_STATUS qdf_list_remove_back(qdf_list_t *list, qdf_list_node_t **node2) +{ + struct list_head *listptr; + + if (list_empty(&list->anchor)) + return QDF_STATUS_E_EMPTY; + + listptr = list->anchor.prev; + *node2 = listptr; + list_del(list->anchor.prev); + list->count--; + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(qdf_list_remove_back); + +/** + * qdf_list_has_node() - check if a node is in a list + * @list: pointer to the list being searched + * @node: pointer to the node to search for + * + * It is expected that the list being checked is locked + * when this function is being called. + * + * Return: true if the node is in the list + */ +bool qdf_list_has_node(qdf_list_t *list, qdf_list_node_t *node) +{ + qdf_list_node_t *tmp; + + list_for_each(tmp, &list->anchor) { + if (tmp == node) + return true; + } + return false; +} + +/** + * qdf_list_remove_node() - remove input node from list + * @list: Pointer to list + * @node_to_remove: Pointer to node which needs to be removed + * + * verifies that the node is in the list before removing it. + * It is expected that the list being removed from is locked + * when this function is being called. + * + * Return: QDF status + */ +QDF_STATUS qdf_list_remove_node(qdf_list_t *list, + qdf_list_node_t *node_to_remove) +{ + if (list_empty(&list->anchor)) + return QDF_STATUS_E_EMPTY; + + list_del(node_to_remove); + list->count--; + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(qdf_list_remove_node); + +/** + * qdf_list_peek_front() - peek front node from list + * @list: Pointer to list + * @node2: Double pointer to store peeked node pointer + * + * Return: QDF status + */ +QDF_STATUS qdf_list_peek_front(qdf_list_t *list, qdf_list_node_t **node2) +{ + struct list_head *listptr; + + if (list_empty(&list->anchor)) + return QDF_STATUS_E_EMPTY; + + listptr = list->anchor.next; + *node2 = listptr; + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(qdf_list_peek_front); + +/** + * qdf_list_peek_next() - peek next node of input node in the list + * @list: Pointer to list + * @node: Pointer to input node + * @node2: Double pointer to store peeked node pointer + * + * Return: QDF status + */ +QDF_STATUS qdf_list_peek_next(qdf_list_t *list, + qdf_list_node_t *node, + qdf_list_node_t **node2) +{ + if (!list || !node || !node2) + return QDF_STATUS_E_FAULT; + + if (list_empty(&list->anchor)) + return QDF_STATUS_E_EMPTY; + + if (node->next == &list->anchor) + return QDF_STATUS_E_EMPTY; + + *node2 = node->next; + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(qdf_list_peek_next); + +/** + * qdf_list_empty() - check if the list is empty + * @list: pointer to the list + * + * Return: true if the list is empty and false otherwise. + */ +bool qdf_list_empty(qdf_list_t *list) +{ + return list_empty(&list->anchor); +} +qdf_export_symbol(qdf_list_empty); diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_lock.c b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_lock.c new file mode 100644 index 0000000000000000000000000000000000000000..90e1fdd38afa169a89be3a1b29699d7e452d3747 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_lock.c @@ -0,0 +1,875 @@ +/* + * Copyright (c) 2014-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include +#include +#include + +#include +#include +#ifdef CONFIG_MCL +#include +#include +#endif +#include + +/** + * qdf_mutex_create() - Initialize a mutex + * @m: mutex to initialize + * + * Returns: QDF_STATUS + * =0 success + * else fail status + */ +#undef qdf_mutex_create +QDF_STATUS qdf_mutex_create(qdf_mutex_t *lock, const char *func, int line) +{ + /* check for invalid pointer */ + if (lock == NULL) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "%s: NULL pointer passed in", __func__); + return QDF_STATUS_E_FAULT; + } + /* check for 'already initialized' lock */ + if (LINUX_LOCK_COOKIE == lock->cookie) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "%s: already initialized lock", __func__); + return QDF_STATUS_E_BUSY; + } + + if (in_interrupt()) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "%s cannot be called from interrupt context!!!", + __func__); + return QDF_STATUS_E_FAULT; + } + + qdf_lock_stats_create(&lock->stats, func, line); + + /* initialize new lock */ + mutex_init(&lock->m_lock); + lock->cookie = LINUX_LOCK_COOKIE; + lock->state = LOCK_RELEASED; + lock->process_id = 0; + lock->refcount = 0; + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(qdf_mutex_create); + +/** + * qdf_mutex_acquire() - acquire a QDF lock + * @lock: Pointer to the opaque lock object to acquire + * + * A lock object is acquired by calling qdf_mutex_acquire(). If the lock + * is already locked, the calling thread shall block until the lock becomes + * available. This operation shall return with the lock object referenced by + * lock in the locked state with the calling thread as its owner. + * + * Return: + * QDF_STATUS_SUCCESS: lock was successfully initialized + * QDF failure reason codes: lock is not initialized and can't be used + */ +QDF_STATUS qdf_mutex_acquire(qdf_mutex_t *lock) +{ + int rc; + /* check for invalid pointer */ + if (lock == NULL) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "%s: NULL pointer passed in", __func__); + QDF_ASSERT(0); + return QDF_STATUS_E_FAULT; + } + /* check if lock refers to an initialized object */ + if (LINUX_LOCK_COOKIE != lock->cookie) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "%s: uninitialized lock", __func__); + QDF_ASSERT(0); + return QDF_STATUS_E_INVAL; + } + + if (in_interrupt()) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "%s cannot be called from interrupt context!!!", + __func__); + QDF_ASSERT(0); + return QDF_STATUS_E_FAULT; + } + if ((lock->process_id == current->pid) && + (lock->state == LOCK_ACQUIRED)) { + lock->refcount++; +#ifdef QDF_NESTED_LOCK_DEBUG + pe_err("%s: %x %d %d", __func__, lock, current->pid, + lock->refcount); +#endif + return QDF_STATUS_SUCCESS; + } + + BEFORE_LOCK(lock, mutex_is_locked(&lock->m_lock)); + /* acquire a Lock */ + mutex_lock(&lock->m_lock); + AFTER_LOCK(lock, __func__); + rc = mutex_is_locked(&lock->m_lock); + if (rc == 0) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "%s: unable to lock mutex (rc = %d)", __func__, rc); + QDF_ASSERT(0); + return QDF_STATUS_E_FAILURE; + } +#ifdef QDF_NESTED_LOCK_DEBUG + pe_err("%s: %x %d", __func__, lock, current->pid); +#endif + if (LOCK_DESTROYED != lock->state) { + lock->process_id = current->pid; + lock->refcount++; + lock->state = LOCK_ACQUIRED; + return QDF_STATUS_SUCCESS; + } + + /* lock is already destroyed */ + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "%s: Lock is already destroyed", __func__); + mutex_unlock(&lock->m_lock); + QDF_ASSERT(0); + return QDF_STATUS_E_FAILURE; +} +qdf_export_symbol(qdf_mutex_acquire); + +/** + * qdf_mutex_release() - release a QDF lock + * @lock: Pointer to the opaque lock object to be released + * + * qdf_mutex_release() function shall release the lock object + * referenced by 'lock'. + * + * If a thread attempts to release a lock that it unlocked or is not + * initialized, an error is returned. + * + * Return: + * QDF_STATUS_SUCCESS: lock was successfully initialized + * QDF failure reason codes: lock is not initialized and can't be used + */ +QDF_STATUS qdf_mutex_release(qdf_mutex_t *lock) +{ + /* check for invalid pointer */ + if (lock == NULL) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "%s: NULL pointer passed in", __func__); + QDF_ASSERT(0); + return QDF_STATUS_E_FAULT; + } + + /* check if lock refers to an uninitialized object */ + if (LINUX_LOCK_COOKIE != lock->cookie) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "%s: uninitialized lock", __func__); + QDF_ASSERT(0); + return QDF_STATUS_E_INVAL; + } + + if (in_interrupt()) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "%s cannot be called from interrupt context!!!", + __func__); + QDF_ASSERT(0); + return QDF_STATUS_E_FAULT; + } + + /* current_thread = get_current_thread_id(); + * Check thread ID of caller against thread ID + * of the thread which acquire the lock + */ + if (lock->process_id != current->pid) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "%s: current task pid does not match original task pid!!", + __func__); +#ifdef QDF_NESTED_LOCK_DEBUG + pe_err("%s: Lock held by=%d being released by=%d", + __func__, lock->process_id, current->pid); +#endif + QDF_ASSERT(0); + return QDF_STATUS_E_PERM; + } + if ((lock->process_id == current->pid) && + (lock->state == LOCK_ACQUIRED)) { + if (lock->refcount > 0) + lock->refcount--; + } +#ifdef QDF_NESTED_LOCK_DEBUG + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, "%s: %x %d %d", __func__, lock, lock->process_id, + lock->refcount); +#endif + if (lock->refcount) + return QDF_STATUS_SUCCESS; + + lock->process_id = 0; + lock->refcount = 0; + lock->state = LOCK_RELEASED; + /* release a Lock */ + BEFORE_UNLOCK(lock, 0); + mutex_unlock(&lock->m_lock); +#ifdef QDF_NESTED_LOCK_DEBUG + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, "%s: Freeing lock %x %d %d", lock, lock->process_id, + lock->refcount); +#endif + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(qdf_mutex_release); + +/** + * qdf_wake_lock_name() - This function returns the name of the wakelock + * @lock: Pointer to the wakelock + * + * This function returns the name of the wakelock + * + * Return: Pointer to the name if it is valid or a default string + */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)) +const char *qdf_wake_lock_name(qdf_wake_lock_t *lock) +{ + if (lock->name) + return lock->name; + return "UNNAMED_WAKELOCK"; +} +#else +const char *qdf_wake_lock_name(qdf_wake_lock_t *lock) +{ + return "NO_WAKELOCK_SUPPORT"; +} +#endif +qdf_export_symbol(qdf_wake_lock_name); + +/** + * qdf_wake_lock_create() - initializes a wake lock + * @lock: The wake lock to initialize + * @name: Name of wake lock + * + * Return: + * QDF status success: if wake lock is initialized + * QDF status failure: if wake lock was not initialized + */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)) +QDF_STATUS qdf_wake_lock_create(qdf_wake_lock_t *lock, const char *name) +{ + wakeup_source_init(lock, name); + return QDF_STATUS_SUCCESS; +} +#else +QDF_STATUS qdf_wake_lock_create(qdf_wake_lock_t *lock, const char *name) +{ + return QDF_STATUS_SUCCESS; +} +#endif +qdf_export_symbol(qdf_wake_lock_create); + +/** + * qdf_wake_lock_acquire() - acquires a wake lock + * @lock: The wake lock to acquire + * @reason: Reason for wakelock + * + * Return: + * QDF status success: if wake lock is acquired + * QDF status failure: if wake lock was not acquired + */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)) +QDF_STATUS qdf_wake_lock_acquire(qdf_wake_lock_t *lock, uint32_t reason) +{ + host_diag_log_wlock(reason, qdf_wake_lock_name(lock), + WIFI_POWER_EVENT_DEFAULT_WAKELOCK_TIMEOUT, + WIFI_POWER_EVENT_WAKELOCK_TAKEN); + __pm_stay_awake(lock); + + return QDF_STATUS_SUCCESS; +} +#else +QDF_STATUS qdf_wake_lock_acquire(qdf_wake_lock_t *lock, uint32_t reason) +{ + return QDF_STATUS_SUCCESS; +} +#endif +qdf_export_symbol(qdf_wake_lock_acquire); + +/** + * qdf_wake_lock_timeout_acquire() - acquires a wake lock with a timeout + * @lock: The wake lock to acquire + * @reason: Reason for wakelock + * + * Return: + * QDF status success: if wake lock is acquired + * QDF status failure: if wake lock was not acquired + */ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) +QDF_STATUS qdf_wake_lock_timeout_acquire(qdf_wake_lock_t *lock, uint32_t msec) +{ + pm_wakeup_ws_event(lock, msec, true); + return QDF_STATUS_SUCCESS; +} +#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0) +QDF_STATUS qdf_wake_lock_timeout_acquire(qdf_wake_lock_t *lock, uint32_t msec) +{ + /* Wakelock for Rx is frequent. + * It is reported only during active debug + */ + __pm_wakeup_event(lock, msec); + return QDF_STATUS_SUCCESS; +} +#else /* LINUX_VERSION_CODE */ +QDF_STATUS qdf_wake_lock_timeout_acquire(qdf_wake_lock_t *lock, uint32_t msec) +{ + return QDF_STATUS_SUCCESS; +} +#endif /* LINUX_VERSION_CODE */ +qdf_export_symbol(qdf_wake_lock_timeout_acquire); + +/** + * qdf_wake_lock_release() - releases a wake lock + * @lock: the wake lock to release + * @reason: Reason for wakelock + * + * Return: + * QDF status success: if wake lock is acquired + * QDF status failure: if wake lock was not acquired + */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)) +QDF_STATUS qdf_wake_lock_release(qdf_wake_lock_t *lock, uint32_t reason) +{ + host_diag_log_wlock(reason, qdf_wake_lock_name(lock), + WIFI_POWER_EVENT_DEFAULT_WAKELOCK_TIMEOUT, + WIFI_POWER_EVENT_WAKELOCK_RELEASED); + __pm_relax(lock); + + return QDF_STATUS_SUCCESS; +} +#else +QDF_STATUS qdf_wake_lock_release(qdf_wake_lock_t *lock, uint32_t reason) +{ + return QDF_STATUS_SUCCESS; +} +#endif +qdf_export_symbol(qdf_wake_lock_release); + +/** + * qdf_wake_lock_destroy() - destroys a wake lock + * @lock: The wake lock to destroy + * + * Return: + * QDF status success: if wake lock is acquired + * QDF status failure: if wake lock was not acquired + */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)) +QDF_STATUS qdf_wake_lock_destroy(qdf_wake_lock_t *lock) +{ + wakeup_source_trash(lock); + return QDF_STATUS_SUCCESS; +} +#else +QDF_STATUS qdf_wake_lock_destroy(qdf_wake_lock_t *lock) +{ + return QDF_STATUS_SUCCESS; +} +#endif +qdf_export_symbol(qdf_wake_lock_destroy); + +#ifdef CONFIG_MCL +/** + * qdf_runtime_pm_get() - do a get opperation on the device + * + * A get opperation will prevent a runtime suspend until a + * corresponding put is done. This api should be used when sending + * data. + * + * CONTRARY TO THE REGULAR RUNTIME PM, WHEN THE BUS IS SUSPENDED, + * THIS API WILL ONLY REQUEST THE RESUME AND NOT TO A GET!!! + * + * return: success if the bus is up and a get has been issued + * otherwise an error code. + */ +QDF_STATUS qdf_runtime_pm_get(void) +{ + void *ol_sc; + int ret; + + ol_sc = cds_get_context(QDF_MODULE_ID_HIF); + + if (ol_sc == NULL) { + QDF_ASSERT(0); + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "%s: HIF context is null!", __func__); + return QDF_STATUS_E_INVAL; + } + + ret = hif_pm_runtime_get(ol_sc); + + if (ret) + return QDF_STATUS_E_FAILURE; + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(qdf_runtime_pm_get); + +/** + * qdf_runtime_pm_put() - do a put opperation on the device + * + * A put opperation will allow a runtime suspend after a corresponding + * get was done. This api should be used when sending data. + * + * This api will return a failure if the hif module hasn't been + * initialized + * + * return: QDF_STATUS_SUCCESS if the put is performed + */ +QDF_STATUS qdf_runtime_pm_put(void) +{ + void *ol_sc; + int ret; + + ol_sc = cds_get_context(QDF_MODULE_ID_HIF); + + if (ol_sc == NULL) { + QDF_ASSERT(0); + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "%s: HIF context is null!", __func__); + return QDF_STATUS_E_INVAL; + } + + ret = hif_pm_runtime_put(ol_sc); + + if (ret) + return QDF_STATUS_E_FAILURE; + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(qdf_runtime_pm_put); + +/** + * qdf_runtime_pm_prevent_suspend() - prevent a runtime bus suspend + * @lock: an opaque context for tracking + * + * The lock can only be acquired once per lock context and is tracked. + * + * return: QDF_STATUS_SUCCESS or failure code. + */ +QDF_STATUS qdf_runtime_pm_prevent_suspend(qdf_runtime_lock_t *lock) +{ + void *ol_sc; + int ret; + + ol_sc = cds_get_context(QDF_MODULE_ID_HIF); + + if (ol_sc == NULL) { + QDF_ASSERT(0); + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "%s: HIF context is null!", __func__); + return QDF_STATUS_E_INVAL; + } + + ret = hif_pm_runtime_prevent_suspend(ol_sc, lock->lock); + + if (ret) + return QDF_STATUS_E_FAILURE; + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(qdf_runtime_pm_prevent_suspend); + +/** + * qdf_runtime_pm_allow_suspend() - prevent a runtime bus suspend + * @lock: an opaque context for tracking + * + * The lock can only be acquired once per lock context and is tracked. + * + * return: QDF_STATUS_SUCCESS or failure code. + */ +QDF_STATUS qdf_runtime_pm_allow_suspend(qdf_runtime_lock_t *lock) +{ + void *ol_sc; + int ret; + + ol_sc = cds_get_context(QDF_MODULE_ID_HIF); + if (ol_sc == NULL) { + QDF_ASSERT(0); + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "%s: HIF context is null!", __func__); + return QDF_STATUS_E_INVAL; + } + + ret = hif_pm_runtime_allow_suspend(ol_sc, lock->lock); + if (ret) + return QDF_STATUS_E_FAILURE; + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(qdf_runtime_pm_allow_suspend); + +/** + * qdf_runtime_lock_init() - initialize runtime lock + * @name: name of the runtime lock + * + * Initialize a runtime pm lock. This lock can be used + * to prevent the runtime pm system from putting the bus + * to sleep. + * + * Return: runtime_pm_lock_t + */ +QDF_STATUS __qdf_runtime_lock_init(qdf_runtime_lock_t *lock, const char *name) +{ + int ret = hif_runtime_lock_init(lock, name); + + if (ret) + return QDF_STATUS_E_NOMEM; + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(__qdf_runtime_lock_init); + +/** + * qdf_runtime_lock_deinit() - deinitialize runtime pm lock + * @lock: the lock to deinitialize + * + * Ensures the lock is released. Frees the runtime lock. + * + * Return: void + */ +void qdf_runtime_lock_deinit(qdf_runtime_lock_t *lock) +{ + void *hif_ctx = cds_get_context(QDF_MODULE_ID_HIF); + hif_runtime_lock_deinit(hif_ctx, lock->lock); +} +qdf_export_symbol(qdf_runtime_lock_deinit); + +#else + +QDF_STATUS qdf_runtime_pm_get(void) +{ + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(qdf_runtime_pm_get); + +QDF_STATUS qdf_runtime_pm_put(void) +{ + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(qdf_runtime_pm_put); + +QDF_STATUS qdf_runtime_pm_prevent_suspend(qdf_runtime_lock_t *lock) +{ + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(qdf_runtime_pm_prevent_suspend); + +QDF_STATUS qdf_runtime_pm_allow_suspend(qdf_runtime_lock_t *lock) +{ + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(qdf_runtime_pm_allow_suspend); + +QDF_STATUS __qdf_runtime_lock_init(qdf_runtime_lock_t *lock, const char *name) +{ + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(__qdf_runtime_lock_init); + +void qdf_runtime_lock_deinit(qdf_runtime_lock_t *lock) +{ +} +qdf_export_symbol(qdf_runtime_lock_deinit); + +#endif /* CONFIG_MCL */ + +/** + * qdf_spinlock_acquire() - acquires a spin lock + * @lock: Spin lock to acquire + * + * Return: + * QDF status success: if wake lock is acquired + */ +QDF_STATUS qdf_spinlock_acquire(qdf_spinlock_t *lock) +{ + spin_lock(&lock->lock.spinlock); + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(qdf_spinlock_acquire); + + +/** + * qdf_spinlock_release() - release a spin lock + * @lock: Spin lock to release + * + * Return: + * QDF status success : if wake lock is acquired + */ +QDF_STATUS qdf_spinlock_release(qdf_spinlock_t *lock) +{ + spin_unlock(&lock->lock.spinlock); + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(qdf_spinlock_release); + +/** + * qdf_mutex_destroy() - destroy a QDF lock + * @lock: Pointer to the opaque lock object to be destroyed + * + * function shall destroy the lock object referenced by lock. After a + * successful return from qdf_mutex_destroy() + * the lock object becomes, in effect, uninitialized. + * + * A destroyed lock object can be reinitialized using qdf_mutex_create(); + * the results of otherwise referencing the object after it has been destroyed + * are undefined. Calls to QDF lock functions to manipulate the lock such + * as qdf_mutex_acquire() will fail if the lock is destroyed. Therefore, + * don't use the lock after it has been destroyed until it has + * been re-initialized. + * + * Return: + * QDF_STATUS_SUCCESS: lock was successfully initialized + * QDF failure reason codes: lock is not initialized and can't be used + */ +QDF_STATUS qdf_mutex_destroy(qdf_mutex_t *lock) +{ + /* check for invalid pointer */ + if (NULL == lock) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "%s: NULL pointer passed in", __func__); + return QDF_STATUS_E_FAULT; + } + + if (LINUX_LOCK_COOKIE != lock->cookie) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "%s: uninitialized lock", __func__); + return QDF_STATUS_E_INVAL; + } + + if (in_interrupt()) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "%s cannot be called from interrupt context!!!", + __func__); + return QDF_STATUS_E_FAULT; + } + + /* check if lock is released */ + if (!mutex_trylock(&lock->m_lock)) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "%s: lock is not released", __func__); + return QDF_STATUS_E_BUSY; + } + lock->cookie = 0; + lock->state = LOCK_DESTROYED; + lock->process_id = 0; + lock->refcount = 0; + + qdf_lock_stats_destroy(&lock->stats); + mutex_unlock(&lock->m_lock); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(qdf_mutex_destroy); + +/** + * qdf_spin_trylock_bh_outline() - spin trylock bottomhalf + * @lock: spinlock object + * Return: nonzero if lock is acquired + */ +int qdf_spin_trylock_bh_outline(qdf_spinlock_t *lock) +{ + return qdf_spin_trylock_bh(lock); +} +qdf_export_symbol(qdf_spin_trylock_bh_outline); + +/** + * qdf_spin_lock_bh_outline() - locks the spinlock in soft irq context + * @lock: spinlock object pointer + * Return: none + */ +void qdf_spin_lock_bh_outline(qdf_spinlock_t *lock) +{ + qdf_spin_lock_bh(lock); +} +qdf_export_symbol(qdf_spin_lock_bh_outline); + +/** + * qdf_spin_unlock_bh_outline() - unlocks spinlock in soft irq context + * @lock: spinlock object pointer + * Return: none + */ +void qdf_spin_unlock_bh_outline(qdf_spinlock_t *lock) +{ + qdf_spin_unlock_bh(lock); +} +qdf_export_symbol(qdf_spin_unlock_bh_outline); + +#if QDF_LOCK_STATS_LIST +struct qdf_lock_cookie { + union { + struct { + struct lock_stats *stats; + const char *func; + int line; + } cookie; + struct { + struct qdf_lock_cookie *next; + } empty_node; + } u; +}; + +#ifndef QDF_LOCK_STATS_LIST_SIZE +#define QDF_LOCK_STATS_LIST_SIZE 256 +#endif + +static qdf_spinlock_t qdf_lock_list_spinlock; +static struct qdf_lock_cookie lock_cookies[QDF_LOCK_STATS_LIST_SIZE]; +static struct qdf_lock_cookie *lock_cookie_freelist; +static qdf_atomic_t lock_cookie_get_failures; +static qdf_atomic_t lock_cookie_untracked_num; +/* dummy value */ +#define DUMMY_LOCK_COOKIE 0xc00c1e + +/** + * qdf_is_lock_cookie - check if memory is a valid lock cookie + * + * return true if the memory is within the range of the lock cookie + * memory. + */ +static bool qdf_is_lock_cookie(struct qdf_lock_cookie *lock_cookie) +{ + return lock_cookie >= &lock_cookies[0] && + lock_cookie <= &lock_cookies[QDF_LOCK_STATS_LIST_SIZE-1]; +} + +/** + * qdf_is_lock_cookie_free() - check if the lock cookie is on the freelist + * @lock_cookie: lock cookie to check + * + * Check that the next field of the lock cookie points to a lock cookie. + * currently this is only true if the cookie is on the freelist. + * + * Checking for the function and line being NULL and 0 should also have worked. + */ +static bool qdf_is_lock_cookie_free(struct qdf_lock_cookie *lock_cookie) +{ + struct qdf_lock_cookie *tmp = lock_cookie->u.empty_node.next; + + return qdf_is_lock_cookie(tmp) || (tmp == NULL); +} + +static struct qdf_lock_cookie *qdf_get_lock_cookie(void) +{ + struct qdf_lock_cookie *lock_cookie; + + qdf_spin_lock_bh(&qdf_lock_list_spinlock); + lock_cookie = lock_cookie_freelist; + if (lock_cookie_freelist) + lock_cookie_freelist = lock_cookie_freelist->u.empty_node.next; + qdf_spin_unlock_bh(&qdf_lock_list_spinlock); + return lock_cookie; +} + +static void __qdf_put_lock_cookie(struct qdf_lock_cookie *lock_cookie) +{ + if (!qdf_is_lock_cookie(lock_cookie)) + QDF_BUG(0); + + lock_cookie->u.empty_node.next = lock_cookie_freelist; + lock_cookie_freelist = lock_cookie; +} + +static void qdf_put_lock_cookie(struct qdf_lock_cookie *lock_cookie) +{ + qdf_spin_lock_bh(&qdf_lock_list_spinlock); + __qdf_put_lock_cookie(lock_cookie); + qdf_spin_unlock_bh(&qdf_lock_list_spinlock); +} + +void qdf_lock_stats_init(void) +{ + int i; + + for (i = 0; i < QDF_LOCK_STATS_LIST_SIZE; i++) + __qdf_put_lock_cookie(&lock_cookies[i]); + + /* stats must be allocated for the spinlock before the cookie, + * otherwise this qdf_lock_list_spinlock wouldnt get initialized + * properly + */ + qdf_spinlock_create(&qdf_lock_list_spinlock); + qdf_atomic_init(&lock_cookie_get_failures); + qdf_atomic_init(&lock_cookie_untracked_num); +} + +void qdf_lock_stats_deinit(void) +{ + int i; + + qdf_spinlock_destroy(&qdf_lock_list_spinlock); + for (i = 0; i < QDF_LOCK_STATS_LIST_SIZE; i++) { + if (!qdf_is_lock_cookie_free(&lock_cookies[i])) + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_DEBUG, + "%s: lock_not_destroyed, fun: %s, line %d", + __func__, lock_cookies[i].u.cookie.func, + lock_cookies[i].u.cookie.line); + } +} + +/* allocated separate memory in case the lock memory is freed without + * running the deinitialization code. The cookie list will not be + * corrupted. + */ +void qdf_lock_stats_cookie_create(struct lock_stats *stats, + const char *func, int line) +{ + struct qdf_lock_cookie *cookie = qdf_get_lock_cookie(); + + if (cookie == NULL) { + int count; + + qdf_atomic_inc(&lock_cookie_get_failures); + count = qdf_atomic_inc_return(&lock_cookie_untracked_num); + stats->cookie = (void *) DUMMY_LOCK_COOKIE; + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_DEBUG, + "%s: cookie allocation failure, using dummy (%s:%d) count %d", + __func__, func, line, count); + return; + } + + stats->cookie = cookie; + stats->cookie->u.cookie.stats = stats; + stats->cookie->u.cookie.func = func; + stats->cookie->u.cookie.line = line; +} + +void qdf_lock_stats_cookie_destroy(struct lock_stats *stats) +{ + struct qdf_lock_cookie *cookie = stats->cookie; + + if (cookie == NULL) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "%s: Double cookie destroy", __func__); + QDF_ASSERT(0); + return; + } + + stats->cookie = NULL; + if (cookie == (void *)DUMMY_LOCK_COOKIE) { + qdf_atomic_dec(&lock_cookie_untracked_num); + return; + } + + cookie->u.cookie.stats = NULL; + cookie->u.cookie.func = NULL; + cookie->u.cookie.line = 0; + + qdf_put_lock_cookie(cookie); +} +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_lro.c b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_lro.c new file mode 100644 index 0000000000000000000000000000000000000000..0745daadbaaa09b6a158d0d39226af7430ddfda3 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_lro.c @@ -0,0 +1,497 @@ +/* + * Copyright (c) 2015-2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: qdf_lro.c + * QCA driver framework(QDF) Large Receive Offload + */ + +#include +#include +#include + +#include +#include + +/** + * qdf_lro_desc_pool_init() - Initialize the free pool of LRO + * descriptors + * @lro_desc_pool: free pool of the LRO descriptors + * @lro_mgr: LRO manager + * + * Initialize a list that holds the free LRO descriptors + * + * Return: none + */ +static void qdf_lro_desc_pool_init(struct qdf_lro_desc_pool *lro_desc_pool, + struct net_lro_mgr *lro_mgr) +{ + int i; + + INIT_LIST_HEAD(&lro_desc_pool->lro_free_list_head); + + for (i = 0; i < QDF_LRO_DESC_POOL_SZ; i++) { + lro_desc_pool->lro_desc_array[i].lro_desc = + &lro_mgr->lro_arr[i]; + list_add_tail(&lro_desc_pool->lro_desc_array[i].lro_node, + &lro_desc_pool->lro_free_list_head); + } +} + +/** + * qdf_lro_desc_info_init() - Initialize the LRO descriptors + * @qdf_info: QDF LRO data structure + * + * Initialize the free pool of LRO descriptors and the entries + * of the hash table + * + * Return: none + */ +static void qdf_lro_desc_info_init(struct qdf_lro_s *qdf_info) +{ + int i; + + /* Initialize pool of free LRO desc.*/ + qdf_lro_desc_pool_init(&qdf_info->lro_desc_info.lro_desc_pool, + qdf_info->lro_mgr); + + /* Initialize the hash table of LRO desc.*/ + for (i = 0; i < QDF_LRO_DESC_TABLE_SZ; i++) { + /* initialize the flows in the hash table */ + INIT_LIST_HEAD(&qdf_info->lro_desc_info. + lro_hash_table[i].lro_desc_list); + } + +} + +/** + * qdf_lro_get_skb_header() - LRO callback function + * @skb: network buffer + * @ip_hdr: contains a pointer to the IP header + * @tcpudp_hdr: contains a pointer to the TCP header + * @hdr_flags: indicates if this is a TCP, IPV4 frame + * @priv: private driver specific opaque pointer + * + * Get the IP and TCP headers from the skb + * + * Return: 0 - success, < 0 - failure + */ +static int qdf_lro_get_skb_header(struct sk_buff *skb, void **ip_hdr, + void **tcpudp_hdr, u64 *hdr_flags, void *priv) +{ + if (QDF_NBUF_CB_RX_IPV6_PROTO(skb)) { + hdr_flags = 0; + return -EINVAL; + } + + *hdr_flags |= (LRO_IPV4 | LRO_TCP); + (*ip_hdr) = skb->data; + (*tcpudp_hdr) = skb->data + QDF_NBUF_CB_RX_TCP_OFFSET(skb); + return 0; +} + +/** + * qdf_lro_init() - LRO initialization function + * + * Return: LRO context + */ +qdf_lro_ctx_t qdf_lro_init(void) +{ + struct qdf_lro_s *lro_ctx; + size_t lro_info_sz, lro_mgr_sz, desc_arr_sz, desc_pool_sz; + size_t hash_table_sz; + uint8_t *lro_mem_ptr; + + /* + * Allocate all the LRO data structures at once and then carve + * them up as needed + */ + lro_info_sz = sizeof(struct qdf_lro_s); + lro_mgr_sz = sizeof(struct net_lro_mgr); + desc_arr_sz = + (QDF_LRO_DESC_POOL_SZ * sizeof(struct net_lro_desc)); + desc_pool_sz = + (QDF_LRO_DESC_POOL_SZ * sizeof(struct qdf_lro_desc_entry)); + hash_table_sz = + (sizeof(struct qdf_lro_desc_table) * QDF_LRO_DESC_TABLE_SZ); + + lro_mem_ptr = qdf_mem_malloc(lro_info_sz + lro_mgr_sz + desc_arr_sz + + desc_pool_sz + hash_table_sz); + + if (unlikely(!lro_mem_ptr)) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "Unable to allocate memory for LRO"); + return NULL; + } + + lro_ctx = (struct qdf_lro_s *)lro_mem_ptr; + lro_mem_ptr += lro_info_sz; + /* LRO manager */ + lro_ctx->lro_mgr = (struct net_lro_mgr *)lro_mem_ptr; + lro_mem_ptr += lro_mgr_sz; + + /* LRO decriptor array */ + lro_ctx->lro_mgr->lro_arr = (struct net_lro_desc *)lro_mem_ptr; + lro_mem_ptr += desc_arr_sz; + + /* LRO descriptor pool */ + lro_ctx->lro_desc_info.lro_desc_pool.lro_desc_array = + (struct qdf_lro_desc_entry *)lro_mem_ptr; + lro_mem_ptr += desc_pool_sz; + + /* hash table to store the LRO descriptors */ + lro_ctx->lro_desc_info.lro_hash_table = + (struct qdf_lro_desc_table *)lro_mem_ptr; + + /* Initialize the LRO descriptors */ + qdf_lro_desc_info_init(lro_ctx); + + /* LRO TODO - NAPI or RX thread */ + lro_ctx->lro_mgr->features |= LRO_F_NAPI; + + lro_ctx->lro_mgr->ip_summed_aggr = CHECKSUM_UNNECESSARY; + lro_ctx->lro_mgr->max_aggr = QDF_LRO_MAX_AGGR_SIZE; + lro_ctx->lro_mgr->get_skb_header = qdf_lro_get_skb_header; + lro_ctx->lro_mgr->ip_summed = CHECKSUM_UNNECESSARY; + lro_ctx->lro_mgr->max_desc = QDF_LRO_DESC_POOL_SZ; + + return lro_ctx; +} + +/** + * qdf_lro_deinit() - LRO deinitialization function + * @lro_ctx: LRO context + * + * Return: nothing + */ +void qdf_lro_deinit(qdf_lro_ctx_t lro_ctx) +{ + if (likely(lro_ctx)) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "LRO instance %pK is being freed", lro_ctx); + qdf_mem_free(lro_ctx); + } +} + +/** + * qdf_lro_tcp_flow_match() - function to check for a flow match + * @iph: IP header + * @tcph: TCP header + * @lro_desc: LRO decriptor + * + * Checks if the descriptor belongs to the same flow as the one + * indicated by the TCP and IP header. + * + * Return: true - flow match, false - flow does not match + */ +static inline bool qdf_lro_tcp_flow_match(struct net_lro_desc *lro_desc, + struct iphdr *iph, + struct tcphdr *tcph) +{ + if ((lro_desc->tcph->source != tcph->source) || + (lro_desc->tcph->dest != tcph->dest) || + (lro_desc->iph->saddr != iph->saddr) || + (lro_desc->iph->daddr != iph->daddr)) + return false; + + return true; + +} + +/** + * qdf_lro_desc_find() - LRO descriptor look-up function + * + * @lro_ctx: LRO context + * @skb: network buffer + * @iph: IP header + * @tcph: TCP header + * @flow_hash: toeplitz hash + * @lro_desc: LRO descriptor to be returned + * + * Look-up the LRO descriptor in the hash table based on the + * flow ID toeplitz. If the flow is not found, allocates a new + * LRO descriptor and places it in the hash table + * + * Return: 0 - success, < 0 - failure + */ +static int qdf_lro_desc_find(struct qdf_lro_s *lro_ctx, + struct sk_buff *skb, struct iphdr *iph, struct tcphdr *tcph, + uint32_t flow_hash, struct net_lro_desc **lro_desc) +{ + uint32_t i; + struct qdf_lro_desc_table *lro_hash_table; + struct list_head *ptr; + struct qdf_lro_desc_entry *entry; + struct qdf_lro_desc_pool *free_pool; + struct qdf_lro_desc_info *desc_info = &lro_ctx->lro_desc_info; + + *lro_desc = NULL; + i = flow_hash & QDF_LRO_DESC_TABLE_SZ_MASK; + + lro_hash_table = &desc_info->lro_hash_table[i]; + + if (unlikely(!lro_hash_table)) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "Invalid hash entry"); + QDF_ASSERT(0); + return -EINVAL; + } + + /* Check if this flow exists in the descriptor list */ + list_for_each(ptr, &lro_hash_table->lro_desc_list) { + struct net_lro_desc *tmp_lro_desc = NULL; + + entry = list_entry(ptr, struct qdf_lro_desc_entry, lro_node); + tmp_lro_desc = entry->lro_desc; + if (qdf_lro_tcp_flow_match(entry->lro_desc, iph, tcph)) { + *lro_desc = entry->lro_desc; + return 0; + } + } + + /* no existing flow found, a new LRO desc needs to be allocated */ + free_pool = &lro_ctx->lro_desc_info.lro_desc_pool; + entry = list_first_entry_or_null( + &free_pool->lro_free_list_head, + struct qdf_lro_desc_entry, lro_node); + if (unlikely(!entry)) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "Could not allocate LRO desc!"); + return -ENOMEM; + } + + list_del_init(&entry->lro_node); + + if (unlikely(!entry->lro_desc)) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "entry->lro_desc is NULL!"); + return -EINVAL; + } + + memset(entry->lro_desc, 0, sizeof(struct net_lro_desc)); + + /* + * lro_desc->active should be 0 and lro_desc->tcp_rcv_tsval + * should be 0 for newly allocated lro descriptors + */ + list_add_tail(&entry->lro_node, + &lro_hash_table->lro_desc_list); + + *lro_desc = entry->lro_desc; + return 0; +} + +/** + * qdf_lro_get_info() - Update the LRO information + * + * @lro_ctx: LRO context + * @nbuf: network buffer + * @info: LRO related information passed in by the caller + * @plro_desc: lro information returned as output + * + * Look-up the LRO descriptor based on the LRO information and + * the network buffer provided. Update the skb cb with the + * descriptor found + * + * Return: true: LRO eligible false: LRO ineligible + */ +bool qdf_lro_get_info(qdf_lro_ctx_t lro_ctx, qdf_nbuf_t nbuf, + struct qdf_lro_info *info, + void **plro_desc) +{ + struct net_lro_desc *lro_desc; + struct iphdr *iph; + struct tcphdr *tcph; + int hw_lro_eligible = + QDF_NBUF_CB_RX_LRO_ELIGIBLE(nbuf) && + (!QDF_NBUF_CB_RX_TCP_PURE_ACK(nbuf)); + + if (unlikely(!lro_ctx)) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "Invalid LRO context"); + return false; + } + + if (!hw_lro_eligible) + return false; + + iph = (struct iphdr *)info->iph; + tcph = (struct tcphdr *)info->tcph; + if (0 != qdf_lro_desc_find(lro_ctx, nbuf, iph, tcph, + QDF_NBUF_CB_RX_FLOW_ID(nbuf), + (struct net_lro_desc **)plro_desc)) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "finding the LRO desc failed"); + return false; + } + + lro_desc = (struct net_lro_desc *)(*plro_desc); + if (unlikely(!lro_desc)) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "finding the LRO desc failed"); + return false; + } + + /* if this is not the first skb, check the timestamp option */ + if (lro_desc->tcp_rcv_tsval) { + if (tcph->doff == 8) { + __be32 *topt = (__be32 *)(tcph + 1); + + if (*topt != htonl((TCPOPT_NOP << 24) + |(TCPOPT_NOP << 16) + | (TCPOPT_TIMESTAMP << 8) + | TCPOLEN_TIMESTAMP)) + return true; + + /* timestamp should be in right order */ + topt++; + if (after(ntohl(lro_desc->tcp_rcv_tsval), + ntohl(*topt))) + return false; + + /* timestamp reply should not be zero */ + topt++; + if (*topt == 0) + return false; + } + } + + return true; +} + +/** + * qdf_lro_desc_free() - Free the LRO descriptor + * @desc: LRO descriptor + * @lro_ctx: LRO context + * + * Return the LRO descriptor to the free pool + * + * Return: none + */ +void qdf_lro_desc_free(qdf_lro_ctx_t lro_ctx, + void *data) +{ + struct qdf_lro_desc_entry *entry; + struct net_lro_mgr *lro_mgr; + struct net_lro_desc *arr_base; + struct qdf_lro_desc_info *desc_info; + int i; + struct net_lro_desc *desc = (struct net_lro_desc *)data; + + qdf_assert(desc); + qdf_assert(lro_ctx); + + if (unlikely(!desc || !lro_ctx)) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "invalid input"); + return; + } + + lro_mgr = lro_ctx->lro_mgr; + arr_base = lro_mgr->lro_arr; + i = desc - arr_base; + + if (unlikely(i >= QDF_LRO_DESC_POOL_SZ)) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "invalid index %d", i); + return; + } + + desc_info = &lro_ctx->lro_desc_info; + entry = &desc_info->lro_desc_pool.lro_desc_array[i]; + + list_del_init(&entry->lro_node); + + list_add_tail(&entry->lro_node, &desc_info-> + lro_desc_pool.lro_free_list_head); +} + +/** + * qdf_lro_flush() - LRO flush API + * @lro_ctx: LRO context + * + * Flush all the packets aggregated in the LRO manager for all + * the flows + * + * Return: none + */ +void qdf_lro_flush(qdf_lro_ctx_t lro_ctx) +{ + struct net_lro_mgr *lro_mgr = lro_ctx->lro_mgr; + int i; + + for (i = 0; i < lro_mgr->max_desc; i++) { + if (lro_mgr->lro_arr[i].active) { + qdf_lro_desc_free(lro_ctx, &lro_mgr->lro_arr[i]); + lro_flush_desc(lro_mgr, &lro_mgr->lro_arr[i]); + } + } +} +/** + * qdf_lro_get_desc() - LRO descriptor look-up function + * @iph: IP header + * @tcph: TCP header + * @lro_arr: Array of LRO decriptors + * @lro_mgr: LRO manager + * + * Looks-up the LRO descriptor for a given flow + * + * Return: LRO descriptor + */ +static struct net_lro_desc *qdf_lro_get_desc(struct net_lro_mgr *lro_mgr, + struct net_lro_desc *lro_arr, + struct iphdr *iph, + struct tcphdr *tcph) +{ + int i; + + for (i = 0; i < lro_mgr->max_desc; i++) { + if (lro_arr[i].active) + if (qdf_lro_tcp_flow_match(&lro_arr[i], iph, tcph)) + return &lro_arr[i]; + } + + return NULL; +} + +/** + * qdf_lro_flush_pkt() - function to flush the LRO flow + * @info: LRO related information passed by the caller + * @lro_ctx: LRO context + * + * Flush all the packets aggregated in the LRO manager for the + * flow indicated by the TCP and IP header + * + * Return: none + */ +void qdf_lro_flush_pkt(qdf_lro_ctx_t lro_ctx, + struct qdf_lro_info *info) +{ + struct net_lro_desc *lro_desc; + struct net_lro_mgr *lro_mgr = lro_ctx->lro_mgr; + struct iphdr *iph = (struct iphdr *) info->iph; + struct tcphdr *tcph = (struct tcphdr *) info->tcph; + + lro_desc = qdf_lro_get_desc(lro_mgr, lro_mgr->lro_arr, iph, tcph); + + if (lro_desc) { + /* statistics */ + qdf_lro_desc_free(lro_ctx, lro_desc); + lro_flush_desc(lro_mgr, lro_desc); + } +} diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_mc_timer.c b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_mc_timer.c new file mode 100644 index 0000000000000000000000000000000000000000..57d542b7e21250ea8fbf2454f2a546023757a7ae --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_mc_timer.c @@ -0,0 +1,899 @@ +/* + * Copyright (c) 2014-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: qdf_mc_timer + * QCA driver framework timer APIs serialized to MC thread + */ + +/* Include Files */ +#include +#include +#include +#include "qdf_lock.h" +#include "qdf_list.h" +#include "qdf_mem.h" +#include + +/* Preprocessor definitions and constants */ +#define LINUX_TIMER_COOKIE 0x12341234 +#define LINUX_INVALID_TIMER_COOKIE 0xfeedface +#define TMR_INVALID_ID (0) + +/* qdf timer multiplier */ +#ifdef QCA_WIFI_NAPIER_EMULATION +static uint32_t g_qdf_timer_multiplier = 100; +#else +static uint32_t g_qdf_timer_multiplier = 1; +#endif + +inline void qdf_timer_set_multiplier(uint32_t multiplier) +{ + g_qdf_timer_multiplier = multiplier; +} +qdf_export_symbol(qdf_timer_set_multiplier); + +inline uint32_t qdf_timer_get_multiplier(void) +{ + return g_qdf_timer_multiplier; +} +qdf_export_symbol(qdf_timer_get_multiplier); + +/* Type declarations */ + +/* Static Variable Definitions */ +static unsigned int persistent_timer_count; +static qdf_mutex_t persistent_timer_count_lock; + +static void (*scheduler_timer_callback)(qdf_mc_timer_t *); +void qdf_register_mc_timer_callback(void (*callback) (qdf_mc_timer_t *)) +{ + scheduler_timer_callback = callback; +} + +qdf_export_symbol(qdf_register_mc_timer_callback); + +/* Function declarations and documenation */ + +/** + * qdf_try_allowing_sleep() - clean up timer states after it has been deactivated + * @type: timer type + * + * Clean up timer states after it has been deactivated check and try to allow + * sleep after a timer has been stopped or expired. + * + * Return: none + */ +void qdf_try_allowing_sleep(QDF_TIMER_TYPE type) +{ + if (QDF_TIMER_TYPE_WAKE_APPS == type) { + + persistent_timer_count--; + if (0 == persistent_timer_count) { + /* since the number of persistent timers has + * decreased from 1 to 0, the timer should allow + * sleep + */ + } + } +} +qdf_export_symbol(qdf_try_allowing_sleep); + +/** + * qdf_mc_timer_get_current_state() - get the current state of the timer + * @timer: Pointer to timer object + * + * Return: + * QDF_TIMER_STATE - qdf timer state + */ +QDF_TIMER_STATE qdf_mc_timer_get_current_state(qdf_mc_timer_t *timer) +{ + if (NULL == timer) { + QDF_ASSERT(0); + return QDF_TIMER_STATE_UNUSED; + } + + switch (timer->state) { + case QDF_TIMER_STATE_STOPPED: + case QDF_TIMER_STATE_STARTING: + case QDF_TIMER_STATE_RUNNING: + case QDF_TIMER_STATE_UNUSED: + return timer->state; + default: + QDF_ASSERT(0); + return QDF_TIMER_STATE_UNUSED; + } +} +qdf_export_symbol(qdf_mc_timer_get_current_state); + +/** + * qdf_timer_module_init() - initializes a QDF timer module. + * + * This API initializes the QDF timer module. This needs to be called + * exactly once prior to using any QDF timers. + * + * Return: none + */ +void qdf_timer_module_init(void) +{ + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO_HIGH, + "Initializing the QDF MC timer module"); + qdf_mutex_create(&persistent_timer_count_lock); +} +qdf_export_symbol(qdf_timer_module_init); + +#ifdef TIMER_MANAGER + +static qdf_list_t qdf_timer_domains[QDF_DEBUG_DOMAIN_COUNT]; +static qdf_spinlock_t qdf_timer_list_lock; + +static inline qdf_list_t *qdf_timer_list_get(enum qdf_debug_domain domain) +{ + return &qdf_timer_domains[domain]; +} + +/** + * qdf_mc_timer_manager_init() - initialize QDF debug timer manager + * + * This API initializes QDF timer debug functionality. + * + * Return: none + */ +void qdf_mc_timer_manager_init(void) +{ + int i; + + for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i) + qdf_list_create(&qdf_timer_domains[i], 1000); + qdf_spinlock_create(&qdf_timer_list_lock); +} +qdf_export_symbol(qdf_mc_timer_manager_init); + +static void qdf_mc_timer_print_list(qdf_list_t *timers) +{ + QDF_STATUS status; + qdf_list_node_t *node; + + qdf_spin_lock_irqsave(&qdf_timer_list_lock); + status = qdf_list_peek_front(timers, &node); + while (QDF_IS_STATUS_SUCCESS(status)) { + qdf_mc_timer_node_t *timer_node = (qdf_mc_timer_node_t *)node; + const char *filename = kbasename(timer_node->file_name); + uint32_t line = timer_node->line_num; + + qdf_spin_unlock_irqrestore(&qdf_timer_list_lock); + qdf_err("timer Leak@ File %s, @Line %u", filename, line); + qdf_spin_lock_irqsave(&qdf_timer_list_lock); + + status = qdf_list_peek_next(timers, node, &node); + } + qdf_spin_unlock_irqrestore(&qdf_timer_list_lock); +} + +void qdf_mc_timer_check_for_leaks(void) +{ + enum qdf_debug_domain current_domain = qdf_debug_domain_get(); + qdf_list_t *timers = qdf_timer_list_get(current_domain); + + if (qdf_list_empty(timers)) + return; + + qdf_err("Timer leaks detected in %s domain!", + qdf_debug_domain_name(current_domain)); + qdf_mc_timer_print_list(timers); + QDF_DEBUG_PANIC("Previously reported timer leaks detected"); +} + +static void qdf_mc_timer_free_leaked_timers(qdf_list_t *timers) +{ + QDF_STATUS status; + qdf_list_node_t *node; + + qdf_spin_lock_irqsave(&qdf_timer_list_lock); + status = qdf_list_remove_front(timers, &node); + while (QDF_IS_STATUS_SUCCESS(status)) { + qdf_mem_free(node); + status = qdf_list_remove_front(timers, &node); + } + qdf_spin_unlock_irqrestore(&qdf_timer_list_lock); +} + +/** + * qdf_timer_clean() - clean up QDF timer debug functionality + * + * This API cleans up QDF timer debug functionality and prints which QDF timers + * are leaked. This is called during driver unload. + * + * Return: none + */ +static void qdf_timer_clean(void) +{ + bool leaks_detected = false; + int i; + + /* detect and print leaks */ + for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i) { + qdf_list_t *timers = &qdf_timer_domains[i]; + + if (qdf_list_empty(timers)) + continue; + + leaks_detected = true; + + qdf_err("\nTimer leaks detected in the %s (Id %d) domain!\n", + qdf_debug_domain_name(i), i); + qdf_mc_timer_print_list(timers); + } + + /* we're done if there were no leaks */ + if (!leaks_detected) + return; + + /* panic, if enabled */ + QDF_DEBUG_PANIC("Previously reported timer leaks detected"); + + /* if we didn't crash, release the leaked timers */ + for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i) + qdf_mc_timer_free_leaked_timers(&qdf_timer_domains[i]); +} +qdf_export_symbol(qdf_timer_clean); + +/** + * qdf_mc_timer_manager_exit() - exit QDF timer debug functionality + * + * This API exists QDF timer debug functionality + * + * Return: none + */ +void qdf_mc_timer_manager_exit(void) +{ + int i; + + qdf_timer_clean(); + + for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i) + qdf_list_destroy(&qdf_timer_domains[i]); + + qdf_spinlock_destroy(&qdf_timer_list_lock); +} +qdf_export_symbol(qdf_mc_timer_manager_exit); +#endif + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0) +static void __os_mc_timer_shim(struct timer_list *os_timer) +{ + qdf_mc_timer_platform_t *platform_info_ptr = + qdf_container_of(os_timer, + qdf_mc_timer_platform_t, + timer); + qdf_mc_timer_t *timer = qdf_container_of(platform_info_ptr, + qdf_mc_timer_t, + platform_info); + + scheduler_timer_callback(timer); +} + +static void qdf_mc_timer_setup(qdf_mc_timer_t *timer, + QDF_TIMER_TYPE timer_type) +{ + uint32_t flags = 0; + + if (QDF_TIMER_TYPE_SW == timer_type) + flags |= TIMER_DEFERRABLE; + + timer_setup(&timer->platform_info.timer, + __os_mc_timer_shim, flags); +} +#else +static void __os_mc_timer_shim(unsigned long data) +{ + qdf_mc_timer_t *timer = (qdf_mc_timer_t *)data; + + scheduler_timer_callback(timer); +} + +static void qdf_mc_timer_setup(qdf_mc_timer_t *timer, + QDF_TIMER_TYPE timer_type) +{ + if (QDF_TIMER_TYPE_SW == timer_type) + init_timer_deferrable(&timer->platform_info.timer); + else + init_timer(&timer->platform_info.timer); + + timer->platform_info.timer.function = __os_mc_timer_shim; + timer->platform_info.timer.data = (unsigned long)timer; +} +#endif +/** + * qdf_mc_timer_init() - initialize a QDF timer + * @timer: Pointer to timer object + * @timer_type: Type of timer + * @callback: Callback to be called after timer expiry + * @ser_data: User data which will be passed to callback function + * + * This API initializes a QDF timer object. + * + * qdf_mc_timer_init() initializes a QDF timer object. A timer must be + * initialized by calling qdf_mc_timer_initialize() before it may be used in + * any other timer functions. + * + * Attempting to initialize timer that is already initialized results in + * a failure. A destroyed timer object can be re-initialized with a call to + * qdf_mc_timer_init(). The results of otherwise referencing the object + * after it has been destroyed are undefined. + * + * Calls to QDF timer functions to manipulate the timer such + * as qdf_mc_timer_set() will fail if the timer is not initialized or has + * been destroyed. Therefore, don't use the timer after it has been + * destroyed until it has been re-initialized. + * + * All callback will be executed within the CDS main thread unless it is + * initialized from the Tx thread flow, in which case it will be executed + * within the tx thread flow. + * + * Return: + * QDF_STATUS_SUCCESS: timer is initialized successfully + * QDF failure status: timer initialization failed + */ +#ifdef TIMER_MANAGER +QDF_STATUS qdf_mc_timer_init_debug(qdf_mc_timer_t *timer, + QDF_TIMER_TYPE timer_type, + qdf_mc_timer_callback_t callback, + void *user_data, char *file_name, + uint32_t line_num) +{ + enum qdf_debug_domain current_domain = qdf_debug_domain_get(); + qdf_list_t *active_timers = qdf_timer_list_get(current_domain); + QDF_STATUS qdf_status; + + /* check for invalid pointer */ + if ((timer == NULL) || (callback == NULL)) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "%s: Null params being passed", __func__); + QDF_ASSERT(0); + return QDF_STATUS_E_FAULT; + } + + timer->timer_node = qdf_mem_malloc(sizeof(qdf_mc_timer_node_t)); + + if (timer->timer_node == NULL) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "%s: Not able to allocate memory for time_node", + __func__); + QDF_ASSERT(0); + return QDF_STATUS_E_NOMEM; + } + + timer->timer_node->file_name = file_name; + timer->timer_node->line_num = line_num; + timer->timer_node->qdf_timer = timer; + + qdf_spin_lock_irqsave(&qdf_timer_list_lock); + qdf_status = qdf_list_insert_front(active_timers, + &timer->timer_node->node); + qdf_spin_unlock_irqrestore(&qdf_timer_list_lock); + if (QDF_STATUS_SUCCESS != qdf_status) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "%s: Unable to insert node into List qdf_status %d", + __func__, qdf_status); + } + + /* set the various members of the timer structure + * with arguments passed or with default values + */ + qdf_spinlock_create(&timer->platform_info.spinlock); + qdf_mc_timer_setup(timer, timer_type); + timer->callback = callback; + timer->user_data = user_data; + timer->type = timer_type; + timer->platform_info.cookie = LINUX_TIMER_COOKIE; + timer->platform_info.thread_id = 0; + timer->state = QDF_TIMER_STATE_STOPPED; + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(qdf_mc_timer_init_debug); +#else +QDF_STATUS qdf_mc_timer_init(qdf_mc_timer_t *timer, QDF_TIMER_TYPE timer_type, + qdf_mc_timer_callback_t callback, + void *user_data) +{ + /* check for invalid pointer */ + if ((timer == NULL) || (callback == NULL)) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "%s: Null params being passed", __func__); + QDF_ASSERT(0); + return QDF_STATUS_E_FAULT; + } + + /* set the various members of the timer structure + * with arguments passed or with default values + */ + qdf_spinlock_create(&timer->platform_info.spinlock); + qdf_mc_timer_setup(timer, timer_type); + timer->callback = callback; + timer->user_data = user_data; + timer->type = timer_type; + timer->platform_info.cookie = LINUX_TIMER_COOKIE; + timer->platform_info.thread_id = 0; + timer->state = QDF_TIMER_STATE_STOPPED; + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(qdf_mc_timer_init); +#endif + +/** + * qdf_mc_timer_destroy() - destroy QDF timer + * @timer: Pointer to timer object + * + * qdf_mc_timer_destroy() function shall destroy the timer object. + * After a successful return from \a qdf_mc_timer_destroy() the timer + * object becomes, in effect, uninitialized. + * + * A destroyed timer object can be re-initialized by calling + * qdf_mc_timer_init(). The results of otherwise referencing the object + * after it has been destroyed are undefined. + * + * Calls to QDF timer functions to manipulate the timer, such + * as qdf_mc_timer_set() will fail if the lock is destroyed. Therefore, + * don't use the timer after it has been destroyed until it has + * been re-initialized. + * + * Return: + * QDF_STATUS_SUCCESS - timer is initialized successfully + * QDF failure status - timer initialization failed + */ +#ifdef TIMER_MANAGER +QDF_STATUS qdf_mc_timer_destroy(qdf_mc_timer_t *timer) +{ + enum qdf_debug_domain current_domain = qdf_debug_domain_get(); + qdf_list_t *active_timers = qdf_timer_list_get(current_domain); + QDF_STATUS v_status = QDF_STATUS_SUCCESS; + + /* check for invalid pointer */ + if (NULL == timer) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "%s: Null timer pointer being passed", __func__); + QDF_ASSERT(0); + return QDF_STATUS_E_FAULT; + } + + /* Check if timer refers to an uninitialized object */ + if (LINUX_TIMER_COOKIE != timer->platform_info.cookie) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "%s: Cannot destroy uninitialized timer", __func__); + QDF_ASSERT(0); + return QDF_STATUS_E_INVAL; + } + + qdf_spin_lock_irqsave(&qdf_timer_list_lock); + v_status = qdf_list_remove_node(active_timers, + &timer->timer_node->node); + qdf_spin_unlock_irqrestore(&qdf_timer_list_lock); + if (v_status != QDF_STATUS_SUCCESS) { + QDF_ASSERT(0); + return QDF_STATUS_E_INVAL; + } + qdf_mem_free(timer->timer_node); + + qdf_spin_lock_irqsave(&timer->platform_info.spinlock); + + switch (timer->state) { + + case QDF_TIMER_STATE_STARTING: + v_status = QDF_STATUS_E_BUSY; + break; + + case QDF_TIMER_STATE_RUNNING: + /* Stop the timer first */ + del_timer(&(timer->platform_info.timer)); + v_status = QDF_STATUS_SUCCESS; + break; + case QDF_TIMER_STATE_STOPPED: + v_status = QDF_STATUS_SUCCESS; + break; + + case QDF_TIMER_STATE_UNUSED: + v_status = QDF_STATUS_E_ALREADY; + break; + + default: + v_status = QDF_STATUS_E_FAULT; + break; + } + + if (QDF_STATUS_SUCCESS == v_status) { + timer->platform_info.cookie = LINUX_INVALID_TIMER_COOKIE; + timer->state = QDF_TIMER_STATE_UNUSED; + qdf_spin_unlock_irqrestore(&timer->platform_info.spinlock); + qdf_spinlock_destroy(&timer->platform_info.spinlock); + return v_status; + } + + qdf_spin_unlock_irqrestore(&timer->platform_info.spinlock); + + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO_HIGH, + "%s: Cannot destroy timer in state = %d", __func__, + timer->state); + QDF_ASSERT(0); + + return v_status; +} +qdf_export_symbol(qdf_mc_timer_destroy); + +#else + +/** + * qdf_mc_timer_destroy() - destroy QDF timer + * @timer: Pointer to timer object + * + * qdf_mc_timer_destroy() function shall destroy the timer object. + * After a successful return from \a qdf_mc_timer_destroy() the timer + * object becomes, in effect, uninitialized. + * + * A destroyed timer object can be re-initialized by calling + * qdf_mc_timer_init(). The results of otherwise referencing the object + * after it has been destroyed are undefined. + * + * Calls to QDF timer functions to manipulate the timer, such + * as qdf_mc_timer_set() will fail if the lock is destroyed. Therefore, + * don't use the timer after it has been destroyed until it has + * been re-initialized. + * + * Return: + * QDF_STATUS_SUCCESS - timer is initialized successfully + * QDF failure status - timer initialization failed + */ +QDF_STATUS qdf_mc_timer_destroy(qdf_mc_timer_t *timer) +{ + QDF_STATUS v_status = QDF_STATUS_SUCCESS; + + /* check for invalid pointer */ + if (NULL == timer) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "%s: Null timer pointer being passed", __func__); + QDF_ASSERT(0); + return QDF_STATUS_E_FAULT; + } + + /* check if timer refers to an uninitialized object */ + if (LINUX_TIMER_COOKIE != timer->platform_info.cookie) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "%s: Cannot destroy uninitialized timer", __func__); + QDF_ASSERT(0); + return QDF_STATUS_E_INVAL; + } + qdf_spin_lock_irqsave(&timer->platform_info.spinlock); + + switch (timer->state) { + + case QDF_TIMER_STATE_STARTING: + v_status = QDF_STATUS_E_BUSY; + break; + + case QDF_TIMER_STATE_RUNNING: + /* Stop the timer first */ + del_timer(&(timer->platform_info.timer)); + v_status = QDF_STATUS_SUCCESS; + break; + + case QDF_TIMER_STATE_STOPPED: + v_status = QDF_STATUS_SUCCESS; + break; + + case QDF_TIMER_STATE_UNUSED: + v_status = QDF_STATUS_E_ALREADY; + break; + + default: + v_status = QDF_STATUS_E_FAULT; + break; + } + + if (QDF_STATUS_SUCCESS == v_status) { + timer->platform_info.cookie = LINUX_INVALID_TIMER_COOKIE; + timer->state = QDF_TIMER_STATE_UNUSED; + qdf_spin_unlock_irqrestore(&timer->platform_info.spinlock); + return v_status; + } + + qdf_spin_unlock_irqrestore(&timer->platform_info.spinlock); + + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO_HIGH, + "%s: Cannot destroy timer in state = %d", __func__, + timer->state); + QDF_ASSERT(0); + + return v_status; +} +qdf_export_symbol(qdf_mc_timer_destroy); +#endif + +/** + * qdf_mc_timer_start() - start a QDF timer object + * @timer: Pointer to timer object + * @expiration_time: Time to expire + * + * qdf_mc_timer_start() function starts a timer to expire after the + * specified interval, thus running the timer callback function when + * the interval expires. + * + * A timer only runs once (a one-shot timer). To re-start the + * timer, qdf_mc_timer_start() has to be called after the timer runs + * or has been cancelled. + * + * Return: + * QDF_STATUS_SUCCESS: timer is initialized successfully + * QDF failure status: timer initialization failed + */ +QDF_STATUS qdf_mc_timer_start(qdf_mc_timer_t *timer, uint32_t expiration_time) +{ + /* check for invalid pointer */ + if (NULL == timer) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "%s Null timer pointer being passed", __func__); + QDF_ASSERT(0); + return QDF_STATUS_E_INVAL; + } + + /* check if timer refers to an uninitialized object */ + if (LINUX_TIMER_COOKIE != timer->platform_info.cookie) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "%s: Cannot start uninitialized timer", __func__); + QDF_ASSERT(0); + + return QDF_STATUS_E_INVAL; + } + + /* check if timer has expiration time less than 10 ms */ + if (expiration_time < 10) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "%s: Cannot start a timer with expiration less than 10 ms", + __func__); + QDF_ASSERT(0); + return QDF_STATUS_E_INVAL; + } + + /* update expiration time based on if emulation platform */ + expiration_time *= qdf_timer_get_multiplier(); + + /* make sure the remainer of the logic isn't interrupted */ + qdf_spin_lock_irqsave(&timer->platform_info.spinlock); + + /* ensure if the timer can be started */ + if (QDF_TIMER_STATE_STOPPED != timer->state) { + qdf_spin_unlock_irqrestore(&timer->platform_info.spinlock); + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "%s: Cannot start timer in state = %d ", __func__, + timer->state); + return QDF_STATUS_E_ALREADY; + } + + /* start the timer */ + mod_timer(&(timer->platform_info.timer), + jiffies + msecs_to_jiffies(expiration_time)); + + timer->state = QDF_TIMER_STATE_RUNNING; + + /* get the thread ID on which the timer is being started */ + timer->platform_info.thread_id = current->pid; + + if (QDF_TIMER_TYPE_WAKE_APPS == timer->type) { + persistent_timer_count++; + if (1 == persistent_timer_count) { + /* since we now have one persistent timer, + * we need to disallow sleep + * sleep_negate_okts(sleep_client_handle); + */ + } + } + + qdf_spin_unlock_irqrestore(&timer->platform_info.spinlock); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(qdf_mc_timer_start); + +/** + * qdf_mc_timer_stop() - stop a QDF timer + * @timer: Pointer to timer object + * qdf_mc_timer_stop() function stops a timer that has been started but + * has not expired, essentially cancelling the 'start' request. + * + * After a timer is stopped, it goes back to the state it was in after it + * was created and can be started again via a call to qdf_mc_timer_start(). + * + * Return: + * QDF_STATUS_SUCCESS: timer is initialized successfully + * QDF failure status: timer initialization failed + */ +QDF_STATUS qdf_mc_timer_stop(qdf_mc_timer_t *timer) +{ + /* check for invalid pointer */ + if (NULL == timer) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "%s Null timer pointer being passed", __func__); + QDF_ASSERT(0); + return QDF_STATUS_E_INVAL; + } + + /* check if timer refers to an uninitialized object */ + if (LINUX_TIMER_COOKIE != timer->platform_info.cookie) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "%s: Cannot stop uninitialized timer", __func__); + QDF_ASSERT(0); + + return QDF_STATUS_E_INVAL; + } + + /* ensure the timer state is correct */ + qdf_spin_lock_irqsave(&timer->platform_info.spinlock); + + if (QDF_TIMER_STATE_RUNNING != timer->state) { + qdf_spin_unlock_irqrestore(&timer->platform_info.spinlock); + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO_HIGH, + "%s: Cannot stop timer in state = %d", + __func__, timer->state); + return QDF_STATUS_SUCCESS; + } + + timer->state = QDF_TIMER_STATE_STOPPED; + + qdf_spin_unlock_irqrestore(&timer->platform_info.spinlock); + + del_timer(&(timer->platform_info.timer)); + + qdf_try_allowing_sleep(timer->type); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(qdf_mc_timer_stop); + +QDF_STATUS qdf_mc_timer_stop_sync(qdf_mc_timer_t *timer) +{ + /* check for invalid pointer */ + if (!timer) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "%s Null timer pointer being passed", __func__); + QDF_ASSERT(0); + return QDF_STATUS_E_INVAL; + } + + /* check if timer refers to an uninitialized object */ + if (LINUX_TIMER_COOKIE != timer->platform_info.cookie) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "%s: Cannot stop uninitialized timer", __func__); + QDF_ASSERT(0); + + return QDF_STATUS_E_INVAL; + } + + /* ensure the timer state is correct */ + qdf_spin_lock_irqsave(&timer->platform_info.spinlock); + + if (QDF_TIMER_STATE_RUNNING != timer->state) { + qdf_spin_unlock_irqrestore(&timer->platform_info.spinlock); + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO_HIGH, + "%s: Cannot stop timer in state = %d", + __func__, timer->state); + return QDF_STATUS_SUCCESS; + } + + timer->state = QDF_TIMER_STATE_STOPPED; + + qdf_spin_unlock_irqrestore(&timer->platform_info.spinlock); + del_timer_sync(&(timer->platform_info.timer)); + + qdf_try_allowing_sleep(timer->type); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(qdf_mc_timer_stop_sync); +/** + * qdf_mc_timer_get_system_ticks() - get the system time in 10ms ticks + + * qdf_mc_timer_get_system_ticks() function returns the current number + * of timer ticks in 10msec intervals. This function is suitable timestamping + * and calculating time intervals by calculating the difference between two + * timestamps. + * + * Return: + * The current system tick count (in 10msec intervals). This + * function cannot fail. + */ +unsigned long qdf_mc_timer_get_system_ticks(void) +{ + return jiffies_to_msecs(jiffies) / 10; +} +qdf_export_symbol(qdf_mc_timer_get_system_ticks); + +/** + * qdf_mc_timer_get_system_time() - Get the system time in milliseconds + * + * qdf_mc_timer_get_system_time() function returns the number of milliseconds + * that have elapsed since the system was started + * + * Return: + * The current system time in milliseconds + */ +unsigned long qdf_mc_timer_get_system_time(void) +{ + struct timeval tv; + + do_gettimeofday(&tv); + return tv.tv_sec * 1000 + tv.tv_usec / 1000; +} +qdf_export_symbol(qdf_mc_timer_get_system_time); + +s64 qdf_get_monotonic_boottime_ns(void) +{ + struct timespec ts; + + get_monotonic_boottime(&ts); + + return timespec_to_ns(&ts); +} +qdf_export_symbol(qdf_get_monotonic_boottime_ns); + +qdf_time_t qdf_get_time_of_the_day_ms(void) +{ + struct timeval tv; + qdf_time_t local_time; + struct rtc_time tm; + + do_gettimeofday(&tv); + local_time = (qdf_time_t)(tv.tv_sec - (sys_tz.tz_minuteswest * 60)); + rtc_time_to_tm(local_time, &tm); + + return (tm.tm_hour * 60 * 60 * 1000) + + (tm.tm_min * 60 * 1000) + (tm.tm_sec * 1000) + + (tv.tv_usec / 1000); +} +qdf_export_symbol(qdf_get_time_of_the_day_ms); + +/** + * qdf_timer_module_deinit() - Deinitializes a QDF timer module. + * + * This API deinitializes the QDF timer module. + * Return: none + */ +void qdf_timer_module_deinit(void) +{ + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO_HIGH, + "De-Initializing the QDF MC timer module"); + qdf_mutex_destroy(&persistent_timer_count_lock); +} +qdf_export_symbol(qdf_timer_module_deinit); + +void qdf_get_time_of_the_day_in_hr_min_sec_usec(char *tbuf, int len) +{ + struct timeval tv; + struct rtc_time tm; + unsigned long local_time; + + /* Format the Log time R#: [hr:min:sec.microsec] */ + do_gettimeofday(&tv); + /* Convert rtc to local time */ + local_time = (u32)(tv.tv_sec - (sys_tz.tz_minuteswest * 60)); + rtc_time_to_tm(local_time, &tm); + scnprintf(tbuf, len, + "[%02d:%02d:%02d.%06lu]", + tm.tm_hour, tm.tm_min, tm.tm_sec, tv.tv_usec); +} +qdf_export_symbol(qdf_get_time_of_the_day_in_hr_min_sec_usec); diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_mem.c b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_mem.c new file mode 100644 index 0000000000000000000000000000000000000000..453bb3bf4f85b8c7eb53de9da6044af5d2d10660 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_mem.c @@ -0,0 +1,1877 @@ +/* + * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: qdf_mem + * This file provides OS dependent memory management APIs + */ + +#include "qdf_debugfs.h" +#include "qdf_mem.h" +#include "qdf_nbuf.h" +#include "qdf_lock.h" +#include "qdf_mc_timer.h" +#include "qdf_module.h" +#include +#include "qdf_atomic.h" +#include "qdf_str.h" +#include +#include +#include + +#ifdef CONFIG_MCL +#include +#else +#define host_log_low_resource_failure(code) do {} while (0) +#endif + +#if defined(CONFIG_CNSS) +#include +#endif + +#ifdef CONFIG_WCNSS_MEM_PRE_ALLOC +#include +#endif + +#ifdef MEMORY_DEBUG +#include "qdf_debug_domain.h" +#include + +/* Preprocessor Definitions and Constants */ +#define QDF_MEM_MAX_MALLOC (4096 * 1024) /* 4 Mega Bytes */ +#define QDF_MEM_WARN_THRESHOLD 300 /* ms */ +#define QDF_DEBUG_STRING_SIZE 512 + +static qdf_list_t qdf_mem_domains[QDF_DEBUG_DOMAIN_COUNT]; +static qdf_spinlock_t qdf_mem_list_lock; + +static qdf_list_t qdf_mem_dma_domains[QDF_DEBUG_DOMAIN_COUNT]; +static qdf_spinlock_t qdf_mem_dma_list_lock; + +static inline qdf_list_t *qdf_mem_list_get(enum qdf_debug_domain domain) +{ + return &qdf_mem_domains[domain]; +} + +static inline qdf_list_t *qdf_mem_dma_list(enum qdf_debug_domain domain) +{ + return &qdf_mem_dma_domains[domain]; +} + +/** + * struct qdf_mem_header - memory object to dubug + * @node: node to the list + * @domain: the active memory domain at time of allocation + * @freed: flag set during free, used to detect double frees + * Use uint8_t so we can detect corruption + * @file: name of the file the allocation was made from + * @line: line number of the file the allocation was made from + * @size: size of the allocation in bytes + * @caller: Caller of the function for which memory is allocated + * @header: a known value, used to detect out-of-bounds access + * @time: timestamp at which allocation was made + */ +struct qdf_mem_header { + qdf_list_node_t node; + enum qdf_debug_domain domain; + uint8_t freed; + char file[QDF_MEM_FILE_NAME_SIZE]; + uint32_t line; + uint32_t size; + void *caller; + uint64_t header; + uint64_t time; +}; + +static uint64_t WLAN_MEM_HEADER = 0x6162636465666768; +static uint64_t WLAN_MEM_TRAILER = 0x8081828384858687; + +static inline struct qdf_mem_header *qdf_mem_get_header(void *ptr) +{ + return (struct qdf_mem_header *)ptr - 1; +} + +static inline struct qdf_mem_header *qdf_mem_dma_get_header(void *ptr, + qdf_size_t size) +{ + return (struct qdf_mem_header *) ((uint8_t *) ptr + size); +} + +static inline uint64_t *qdf_mem_get_trailer(struct qdf_mem_header *header) +{ + return (uint64_t *)((void *)(header + 1) + header->size); +} + +static inline void *qdf_mem_get_ptr(struct qdf_mem_header *header) +{ + return (void *)(header + 1); +} + +/* number of bytes needed for the qdf memory debug information */ +#define QDF_MEM_DEBUG_SIZE \ + (sizeof(struct qdf_mem_header) + sizeof(WLAN_MEM_TRAILER)) + +/* number of bytes needed for the qdf dma memory debug information */ +#define QDF_DMA_MEM_DEBUG_SIZE \ + (sizeof(struct qdf_mem_header)) + +static void qdf_mem_trailer_init(struct qdf_mem_header *header) +{ + QDF_BUG(header); + if (!header) + return; + *qdf_mem_get_trailer(header) = WLAN_MEM_TRAILER; +} + +static void qdf_mem_header_init(struct qdf_mem_header *header, qdf_size_t size, + const char *file, uint32_t line, void *caller) +{ + QDF_BUG(header); + if (!header) + return; + + header->domain = qdf_debug_domain_get(); + header->freed = false; + + /* copy the file name, rather than pointing to it */ + qdf_str_lcopy(header->file, kbasename(file), QDF_MEM_FILE_NAME_SIZE); + + header->line = line; + header->size = size; + header->caller = caller; + header->header = WLAN_MEM_HEADER; + header->time = qdf_get_log_timestamp(); +} + +enum qdf_mem_validation_bitmap { + QDF_MEM_BAD_HEADER = 1 << 0, + QDF_MEM_BAD_TRAILER = 1 << 1, + QDF_MEM_BAD_SIZE = 1 << 2, + QDF_MEM_DOUBLE_FREE = 1 << 3, + QDF_MEM_BAD_FREED = 1 << 4, + QDF_MEM_BAD_NODE = 1 << 5, + QDF_MEM_BAD_DOMAIN = 1 << 6, + QDF_MEM_WRONG_DOMAIN = 1 << 7, +}; + +/** + * qdf_mem_validate_list_node() - validate that the node is in a list + * @qdf_node: node to check for being in a list + * + * Return: true if the node validly linked in an anchored doubly linked list + */ +static bool qdf_mem_validate_list_node(qdf_list_node_t *qdf_node) +{ + struct list_head *node = qdf_node; + + /* + * if the node is an empty list, it is not tied to an anchor node + * and must have been removed with list_del_init + */ + if (list_empty(node)) + return false; + + if (!node->prev || !node->next) + return false; + + if (node->prev->next != node || node->next->prev != node) + return false; + + return true; +} + +static enum qdf_mem_validation_bitmap +qdf_mem_trailer_validate(struct qdf_mem_header *header) +{ + enum qdf_mem_validation_bitmap error_bitmap = 0; + + if (*qdf_mem_get_trailer(header) != WLAN_MEM_TRAILER) + error_bitmap |= QDF_MEM_BAD_TRAILER; + return error_bitmap; +} + +static enum qdf_mem_validation_bitmap +qdf_mem_header_validate(struct qdf_mem_header *header, + enum qdf_debug_domain domain) +{ + enum qdf_mem_validation_bitmap error_bitmap = 0; + + if (header->header != WLAN_MEM_HEADER) + error_bitmap |= QDF_MEM_BAD_HEADER; + + if (header->size > QDF_MEM_MAX_MALLOC) + error_bitmap |= QDF_MEM_BAD_SIZE; + + if (header->freed == true) + error_bitmap |= QDF_MEM_DOUBLE_FREE; + else if (header->freed) + error_bitmap |= QDF_MEM_BAD_FREED; + + if (!qdf_mem_validate_list_node(&header->node)) + error_bitmap |= QDF_MEM_BAD_NODE; + + if (header->domain < QDF_DEBUG_DOMAIN_INIT || + header->domain >= QDF_DEBUG_DOMAIN_COUNT) + error_bitmap |= QDF_MEM_BAD_DOMAIN; + else if (header->domain != domain) + error_bitmap |= QDF_MEM_WRONG_DOMAIN; + + return error_bitmap; +} + +static void +qdf_mem_header_assert_valid(struct qdf_mem_header *header, + enum qdf_debug_domain current_domain, + enum qdf_mem_validation_bitmap error_bitmap, + const char *file, + uint32_t line) +{ + if (!error_bitmap) + return; + + if (error_bitmap & QDF_MEM_BAD_HEADER) + qdf_err("Corrupted memory header 0x%llx (expected 0x%llx)", + header->header, WLAN_MEM_HEADER); + + if (error_bitmap & QDF_MEM_BAD_SIZE) + qdf_err("Corrupted memory size %u (expected < %d)", + header->size, QDF_MEM_MAX_MALLOC); + + if (error_bitmap & QDF_MEM_BAD_TRAILER) + qdf_err("Corrupted memory trailer 0x%llx (expected 0x%llx)", + *qdf_mem_get_trailer(header), WLAN_MEM_TRAILER); + + if (error_bitmap & QDF_MEM_DOUBLE_FREE) + qdf_err("Memory has previously been freed"); + + if (error_bitmap & QDF_MEM_BAD_FREED) + qdf_err("Corrupted memory freed flag 0x%x", header->freed); + + if (error_bitmap & QDF_MEM_BAD_NODE) + qdf_err("Corrupted memory header node or double free"); + + if (error_bitmap & QDF_MEM_BAD_DOMAIN) + qdf_err("Corrupted memory domain 0x%x", header->domain); + + if (error_bitmap & QDF_MEM_WRONG_DOMAIN) + qdf_err("Memory domain mismatch; allocated:%s(%d), current:%s(%d)", + qdf_debug_domain_name(header->domain), header->domain, + qdf_debug_domain_name(current_domain), current_domain); + + QDF_DEBUG_PANIC("Fatal memory error detected @ %s:%d", file, line); +} +#endif /* MEMORY_DEBUG */ + +u_int8_t prealloc_disabled = 1; +qdf_declare_param(prealloc_disabled, byte); +qdf_export_symbol(prealloc_disabled); + +#if defined WLAN_DEBUGFS + +/* Debugfs root directory for qdf_mem */ +static struct dentry *qdf_mem_debugfs_root; + +/** + * struct __qdf_mem_stat - qdf memory statistics + * @kmalloc: total kmalloc allocations + * @dma: total dma allocations + * @skb: total skb allocations + */ +static struct __qdf_mem_stat { + qdf_atomic_t kmalloc; + qdf_atomic_t dma; + qdf_atomic_t skb; +} qdf_mem_stat; + +static inline void qdf_mem_kmalloc_inc(qdf_size_t size) +{ + qdf_atomic_add(size, &qdf_mem_stat.kmalloc); +} + +static inline void qdf_mem_dma_inc(qdf_size_t size) +{ + qdf_atomic_add(size, &qdf_mem_stat.dma); +} + +void qdf_mem_skb_inc(qdf_size_t size) +{ + qdf_atomic_add(size, &qdf_mem_stat.skb); +} + +static inline void qdf_mem_kmalloc_dec(qdf_size_t size) +{ + qdf_atomic_sub(size, &qdf_mem_stat.kmalloc); +} + +static inline void qdf_mem_dma_dec(qdf_size_t size) +{ + qdf_atomic_sub(size, &qdf_mem_stat.dma); +} + +void qdf_mem_skb_dec(qdf_size_t size) +{ + qdf_atomic_sub(size, &qdf_mem_stat.skb); +} + +#ifdef MEMORY_DEBUG +static int qdf_err_printer(void *priv, const char *fmt, ...) +{ + va_list args; + + va_start(args, fmt); + QDF_VTRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, (char *)fmt, args); + va_end(args); + + return 0; +} + +static int seq_printf_printer(void *priv, const char *fmt, ...) +{ + struct seq_file *file = priv; + va_list args; + + va_start(args, fmt); + seq_vprintf(file, fmt, args); + seq_puts(file, "\n"); + va_end(args); + + return 0; +} + +/** + * struct __qdf_mem_info - memory statistics + * @file: the file which allocated memory + * @line: the line at which allocation happened + * @size: the size of allocation + * @caller: Address of the caller function + * @count: how many allocations of same type + * @time: timestamp at which allocation happened + */ +struct __qdf_mem_info { + char file[QDF_MEM_FILE_NAME_SIZE]; + uint32_t line; + uint32_t size; + void *caller; + uint32_t count; + uint64_t time; +}; + +/* + * The table depth defines the de-duplication proximity scope. + * A deeper table takes more time, so choose any optimum value. + */ +#define QDF_MEM_STAT_TABLE_SIZE 8 + +/** + * qdf_mem_domain_print_header() - memory domain header print logic + * @print: the print adapter function + * @print_priv: the private data to be consumed by @print + * + * Return: None + */ +static void qdf_mem_domain_print_header(qdf_abstract_print print, + void *print_priv) +{ + print(print_priv, + "--------------------------------------------------------------"); + print(print_priv, + " count size total filename caller timestamp"); + print(print_priv, + "--------------------------------------------------------------"); +} + +/** + * qdf_mem_meta_table_print() - memory metadata table print logic + * @table: the memory metadata table to print + * @print: the print adapter function + * @print_priv: the private data to be consumed by @print + * + * Return: None + */ +static void qdf_mem_meta_table_print(struct __qdf_mem_info *table, + qdf_abstract_print print, + void *print_priv) +{ + int i; + char debug_str[QDF_DEBUG_STRING_SIZE]; + size_t len = 0; + char *debug_prefix = "WLAN_BUG_RCA: memory leak detected"; + + len += qdf_scnprintf(debug_str, sizeof(debug_str) - len, + "%s", debug_prefix); + + for (i = 0; i < QDF_MEM_STAT_TABLE_SIZE; i++) { + if (!table[i].count) + break; + + print(print_priv, + "%6u x %5u = %7uB @ %s:%u %pS %llu", + table[i].count, + table[i].size, + table[i].count * table[i].size, + table[i].file, + table[i].line, table[i].caller, + table[i].time); + len += qdf_scnprintf(debug_str + len, + sizeof(debug_str) - len, + " @ %s:%u %pS", + table[i].file, + table[i].line, + table[i].caller); + } + print(print_priv, "%s", debug_str); +} + +/** + * qdf_mem_meta_table_insert() - insert memory metadata into the given table + * @table: the memory metadata table to insert into + * @meta: the memory metadata to insert + * + * Return: true if the table is full after inserting, false otherwise + */ +static bool qdf_mem_meta_table_insert(struct __qdf_mem_info *table, + struct qdf_mem_header *meta) +{ + int i; + + for (i = 0; i < QDF_MEM_STAT_TABLE_SIZE; i++) { + if (!table[i].count) { + qdf_str_lcopy(table[i].file, meta->file, + QDF_MEM_FILE_NAME_SIZE); + table[i].line = meta->line; + table[i].size = meta->size; + table[i].count = 1; + table[i].caller = meta->caller; + table[i].time = meta->time; + break; + } + + if (qdf_str_eq(table[i].file, meta->file) && + table[i].line == meta->line && + table[i].size == meta->size && + table[i].caller == meta->caller) { + table[i].count++; + break; + } + } + + /* return true if the table is now full */ + return i >= QDF_MEM_STAT_TABLE_SIZE - 1; +} + +/** + * qdf_mem_domain_print() - output agnostic memory domain print logic + * @domain: the memory domain to print + * @print: the print adapter function + * @print_priv: the private data to be consumed by @print + * + * Return: None + */ +static void qdf_mem_domain_print(qdf_list_t *domain, + qdf_abstract_print print, + void *print_priv) +{ + QDF_STATUS status; + struct __qdf_mem_info table[QDF_MEM_STAT_TABLE_SIZE]; + qdf_list_node_t *node; + + qdf_mem_zero(table, sizeof(table)); + qdf_mem_domain_print_header(print, print_priv); + + /* hold lock while inserting to avoid use-after free of the metadata */ + qdf_spin_lock(&qdf_mem_list_lock); + status = qdf_list_peek_front(domain, &node); + while (QDF_IS_STATUS_SUCCESS(status)) { + struct qdf_mem_header *meta = (struct qdf_mem_header *)node; + bool is_full = qdf_mem_meta_table_insert(table, meta); + + qdf_spin_unlock(&qdf_mem_list_lock); + + if (is_full) { + qdf_mem_meta_table_print(table, print, print_priv); + qdf_mem_zero(table, sizeof(table)); + } + + qdf_spin_lock(&qdf_mem_list_lock); + status = qdf_list_peek_next(domain, node, &node); + } + qdf_spin_unlock(&qdf_mem_list_lock); + + qdf_mem_meta_table_print(table, print, print_priv); +} + +/** + * qdf_mem_seq_start() - sequential callback to start + * @seq: seq_file handle + * @pos: The start position of the sequence + * + * Return: iterator pointer, or NULL if iteration is complete + */ +static void *qdf_mem_seq_start(struct seq_file *seq, loff_t *pos) +{ + enum qdf_debug_domain domain = *pos; + + if (!qdf_debug_domain_valid(domain)) + return NULL; + + /* just use the current position as our iterator */ + return pos; +} + +/** + * qdf_mem_seq_next() - next sequential callback + * @seq: seq_file handle + * @v: the current iterator + * @pos: the current position + * + * Get the next node and release previous node. + * + * Return: iterator pointer, or NULL if iteration is complete + */ +static void *qdf_mem_seq_next(struct seq_file *seq, void *v, loff_t *pos) +{ + ++*pos; + + return qdf_mem_seq_start(seq, pos); +} + +/** + * qdf_mem_seq_stop() - stop sequential callback + * @seq: seq_file handle + * @v: current iterator + * + * Return: None + */ +static void qdf_mem_seq_stop(struct seq_file *seq, void *v) { } + +/** + * qdf_mem_seq_show() - print sequential callback + * @seq: seq_file handle + * @v: current iterator + * + * Return: 0 - success + */ +static int qdf_mem_seq_show(struct seq_file *seq, void *v) +{ + enum qdf_debug_domain domain_id = *(enum qdf_debug_domain *)v; + + seq_printf(seq, "\n%s Memory Domain (Id %d)\n", + qdf_debug_domain_name(domain_id), domain_id); + qdf_mem_domain_print(qdf_mem_list_get(domain_id), + seq_printf_printer, seq); + + return 0; +} + +/* sequential file operation table */ +static const struct seq_operations qdf_mem_seq_ops = { + .start = qdf_mem_seq_start, + .next = qdf_mem_seq_next, + .stop = qdf_mem_seq_stop, + .show = qdf_mem_seq_show, +}; + + +static int qdf_mem_debugfs_open(struct inode *inode, struct file *file) +{ + return seq_open(file, &qdf_mem_seq_ops); +} + +/* debugfs file operation table */ +static const struct file_operations fops_qdf_mem_debugfs = { + .owner = THIS_MODULE, + .open = qdf_mem_debugfs_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; + +static QDF_STATUS qdf_mem_debug_debugfs_init(void) +{ + if (!qdf_mem_debugfs_root) + return QDF_STATUS_E_FAILURE; + + debugfs_create_file("list", + S_IRUSR, + qdf_mem_debugfs_root, + NULL, + &fops_qdf_mem_debugfs); + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS qdf_mem_debug_debugfs_exit(void) +{ + return QDF_STATUS_SUCCESS; +} + +#else /* MEMORY_DEBUG */ + +static QDF_STATUS qdf_mem_debug_debugfs_init(void) +{ + return QDF_STATUS_E_NOSUPPORT; +} + +static QDF_STATUS qdf_mem_debug_debugfs_exit(void) +{ + return QDF_STATUS_E_NOSUPPORT; +} + +#endif /* MEMORY_DEBUG */ + + +static void qdf_mem_debugfs_exit(void) +{ + debugfs_remove_recursive(qdf_mem_debugfs_root); + qdf_mem_debugfs_root = NULL; +} + +static QDF_STATUS qdf_mem_debugfs_init(void) +{ + struct dentry *qdf_debugfs_root = qdf_debugfs_get_root(); + + if (!qdf_debugfs_root) + return QDF_STATUS_E_FAILURE; + + qdf_mem_debugfs_root = debugfs_create_dir("mem", qdf_debugfs_root); + + if (!qdf_mem_debugfs_root) + return QDF_STATUS_E_FAILURE; + + + debugfs_create_atomic_t("kmalloc", + S_IRUSR, + qdf_mem_debugfs_root, + &qdf_mem_stat.kmalloc); + + debugfs_create_atomic_t("dma", + S_IRUSR, + qdf_mem_debugfs_root, + &qdf_mem_stat.dma); + + debugfs_create_atomic_t("skb", + S_IRUSR, + qdf_mem_debugfs_root, + &qdf_mem_stat.skb); + + return QDF_STATUS_SUCCESS; +} + +#else /* WLAN_DEBUGFS */ + +static inline void qdf_mem_kmalloc_inc(qdf_size_t size) {} +static inline void qdf_mem_dma_inc(qdf_size_t size) {} +static inline void qdf_mem_kmalloc_dec(qdf_size_t size) {} +static inline void qdf_mem_dma_dec(qdf_size_t size) {} + + +static QDF_STATUS qdf_mem_debugfs_init(void) +{ + return QDF_STATUS_E_NOSUPPORT; +} +static void qdf_mem_debugfs_exit(void) {} + + +static QDF_STATUS qdf_mem_debug_debugfs_init(void) +{ + return QDF_STATUS_E_NOSUPPORT; +} + +static QDF_STATUS qdf_mem_debug_debugfs_exit(void) +{ + return QDF_STATUS_E_NOSUPPORT; +} + +#endif /* WLAN_DEBUGFS */ + +/** + * __qdf_mempool_init() - Create and initialize memory pool + * + * @osdev: platform device object + * @pool_addr: address of the pool created + * @elem_cnt: no. of elements in pool + * @elem_size: size of each pool element in bytes + * @flags: flags + * + * return: Handle to memory pool or NULL if allocation failed + */ +int __qdf_mempool_init(qdf_device_t osdev, __qdf_mempool_t *pool_addr, + int elem_cnt, size_t elem_size, u_int32_t flags) +{ + __qdf_mempool_ctxt_t *new_pool = NULL; + u_int32_t align = L1_CACHE_BYTES; + unsigned long aligned_pool_mem; + int pool_id; + int i; + + if (prealloc_disabled) { + /* TBD: We can maintain a list of pools in qdf_device_t + * to help debugging + * when pre-allocation is not enabled + */ + new_pool = (__qdf_mempool_ctxt_t *) + kmalloc(sizeof(__qdf_mempool_ctxt_t), GFP_KERNEL); + if (new_pool == NULL) + return QDF_STATUS_E_NOMEM; + + memset(new_pool, 0, sizeof(*new_pool)); + /* TBD: define flags for zeroing buffers etc */ + new_pool->flags = flags; + new_pool->elem_size = elem_size; + new_pool->max_elem = elem_cnt; + *pool_addr = new_pool; + return 0; + } + + for (pool_id = 0; pool_id < MAX_MEM_POOLS; pool_id++) { + if (osdev->mem_pool[pool_id] == NULL) + break; + } + + if (pool_id == MAX_MEM_POOLS) + return -ENOMEM; + + new_pool = osdev->mem_pool[pool_id] = (__qdf_mempool_ctxt_t *) + kmalloc(sizeof(__qdf_mempool_ctxt_t), GFP_KERNEL); + if (new_pool == NULL) + return -ENOMEM; + + memset(new_pool, 0, sizeof(*new_pool)); + /* TBD: define flags for zeroing buffers etc */ + new_pool->flags = flags; + new_pool->pool_id = pool_id; + + /* Round up the element size to cacheline */ + new_pool->elem_size = roundup(elem_size, L1_CACHE_BYTES); + new_pool->mem_size = elem_cnt * new_pool->elem_size + + ((align)?(align - 1):0); + + new_pool->pool_mem = kzalloc(new_pool->mem_size, GFP_KERNEL); + if (new_pool->pool_mem == NULL) { + /* TBD: Check if we need get_free_pages above */ + kfree(new_pool); + osdev->mem_pool[pool_id] = NULL; + return -ENOMEM; + } + + spin_lock_init(&new_pool->lock); + + /* Initialize free list */ + aligned_pool_mem = (unsigned long)(new_pool->pool_mem) + + ((align) ? (unsigned long)(new_pool->pool_mem)%align:0); + STAILQ_INIT(&new_pool->free_list); + + for (i = 0; i < elem_cnt; i++) + STAILQ_INSERT_TAIL(&(new_pool->free_list), + (mempool_elem_t *)(aligned_pool_mem + + (new_pool->elem_size * i)), mempool_entry); + + + new_pool->free_cnt = elem_cnt; + *pool_addr = new_pool; + return 0; +} +qdf_export_symbol(__qdf_mempool_init); + +/** + * __qdf_mempool_destroy() - Destroy memory pool + * @osdev: platform device object + * @Handle: to memory pool + * + * Returns: none + */ +void __qdf_mempool_destroy(qdf_device_t osdev, __qdf_mempool_t pool) +{ + int pool_id = 0; + + if (!pool) + return; + + if (prealloc_disabled) { + kfree(pool); + return; + } + + pool_id = pool->pool_id; + + /* TBD: Check if free count matches elem_cnt if debug is enabled */ + kfree(pool->pool_mem); + kfree(pool); + osdev->mem_pool[pool_id] = NULL; +} +qdf_export_symbol(__qdf_mempool_destroy); + +/** + * __qdf_mempool_alloc() - Allocate an element memory pool + * + * @osdev: platform device object + * @Handle: to memory pool + * + * Return: Pointer to the allocated element or NULL if the pool is empty + */ +void *__qdf_mempool_alloc(qdf_device_t osdev, __qdf_mempool_t pool) +{ + void *buf = NULL; + + if (!pool) + return NULL; + + if (prealloc_disabled) + return qdf_mem_malloc(pool->elem_size); + + spin_lock_bh(&pool->lock); + + buf = STAILQ_FIRST(&pool->free_list); + if (buf != NULL) { + STAILQ_REMOVE_HEAD(&pool->free_list, mempool_entry); + pool->free_cnt--; + } + + /* TBD: Update free count if debug is enabled */ + spin_unlock_bh(&pool->lock); + + return buf; +} +qdf_export_symbol(__qdf_mempool_alloc); + +/** + * __qdf_mempool_free() - Free a memory pool element + * @osdev: Platform device object + * @pool: Handle to memory pool + * @buf: Element to be freed + * + * Returns: none + */ +void __qdf_mempool_free(qdf_device_t osdev, __qdf_mempool_t pool, void *buf) +{ + if (!pool) + return; + + + if (prealloc_disabled) + return qdf_mem_free(buf); + + spin_lock_bh(&pool->lock); + pool->free_cnt++; + + STAILQ_INSERT_TAIL + (&pool->free_list, (mempool_elem_t *)buf, mempool_entry); + spin_unlock_bh(&pool->lock); +} +qdf_export_symbol(__qdf_mempool_free); + +/** + * qdf_mem_alloc_outline() - allocation QDF memory + * @osdev: platform device object + * @size: Number of bytes of memory to allocate. + * + * This function will dynamicallly allocate the specified number of bytes of + * memory. + * + * Return: + * Upon successful allocate, returns a non-NULL pointer to the allocated + * memory. If this function is unable to allocate the amount of memory + * specified (for any reason) it returns NULL. + */ +void * +qdf_mem_alloc_outline(qdf_device_t osdev, size_t size) +{ + return qdf_mem_malloc(size); +} +qdf_export_symbol(qdf_mem_alloc_outline); + +/** + * qdf_mem_free_outline() - QDF memory free API + * @ptr: Pointer to the starting address of the memory to be free'd. + * + * This function will free the memory pointed to by 'ptr'. It also checks + * is memory is corrupted or getting double freed and panic. + * + * Return: none + */ +void +qdf_mem_free_outline(void *buf) +{ + qdf_mem_free(buf); +} +qdf_export_symbol(qdf_mem_free_outline); + +/** + * qdf_mem_zero_outline() - zero out memory + * @buf: pointer to memory that will be set to zero + * @size: number of bytes zero + * + * This function sets the memory location to all zeros, essentially clearing + * the memory. + * + * Return: none + */ +void +qdf_mem_zero_outline(void *buf, qdf_size_t size) +{ + qdf_mem_zero(buf, size); +} +qdf_export_symbol(qdf_mem_zero_outline); + +#ifdef CONFIG_WCNSS_MEM_PRE_ALLOC +/** + * qdf_mem_prealloc_get() - conditionally pre-allocate memory + * @size: the number of bytes to allocate + * + * If size if greater than WCNSS_PRE_ALLOC_GET_THRESHOLD, this function returns + * a chunk of pre-allocated memory. If size if less than or equal to + * WCNSS_PRE_ALLOC_GET_THRESHOLD, or an error occurs, NULL is returned instead. + * + * Return: NULL on failure, non-NULL on success + */ +static void *qdf_mem_prealloc_get(size_t size) +{ + void *ptr; + + if (size <= WCNSS_PRE_ALLOC_GET_THRESHOLD) + return NULL; + + ptr = wcnss_prealloc_get(size); + if (!ptr) + return NULL; + + memset(ptr, 0, size); + + return ptr; +} + +static inline bool qdf_mem_prealloc_put(void *ptr) +{ + return wcnss_prealloc_put(ptr); +} +#else +static inline void *qdf_mem_prealloc_get(size_t size) +{ + return NULL; +} + +static inline bool qdf_mem_prealloc_put(void *ptr) +{ + return false; +} +#endif /* CONFIG_WCNSS_MEM_PRE_ALLOC */ + +static int qdf_mem_malloc_flags(void) +{ + if (in_interrupt() || irqs_disabled() || in_atomic()) + return GFP_ATOMIC; + + return GFP_KERNEL; +} + +/* External Function implementation */ +#ifdef MEMORY_DEBUG + +/** + * qdf_mem_debug_init() - initialize qdf memory debug functionality + * + * Return: none + */ +static void qdf_mem_debug_init(void) +{ + int i; + + /* Initalizing the list with maximum size of 60000 */ + for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i) + qdf_list_create(&qdf_mem_domains[i], 60000); + qdf_spinlock_create(&qdf_mem_list_lock); + + /* dma */ + for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i) + qdf_list_create(&qdf_mem_dma_domains[i], 0); + qdf_spinlock_create(&qdf_mem_dma_list_lock); + + /* skb */ + qdf_net_buf_debug_init(); +} + +static uint32_t +qdf_mem_domain_check_for_leaks(enum qdf_debug_domain domain, + qdf_list_t *mem_list) +{ + if (qdf_list_empty(mem_list)) + return 0; + + qdf_err("Memory leaks detected in %s domain!", + qdf_debug_domain_name(domain)); + qdf_mem_domain_print(mem_list, qdf_err_printer, NULL); + + return mem_list->count; +} + +static void qdf_mem_domain_set_check_for_leaks(qdf_list_t *domains) +{ + uint32_t leak_count = 0; + int i; + + /* detect and print leaks */ + for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i) + leak_count += qdf_mem_domain_check_for_leaks(i, domains + i); + + if (leak_count) + panic("%u fatal memory leaks detected!", leak_count); +} + +/** + * qdf_mem_debug_exit() - exit qdf memory debug functionality + * + * Return: none + */ +static void qdf_mem_debug_exit(void) +{ + int i; + + /* skb */ + qdf_net_buf_debug_exit(); + + /* mem */ + qdf_mem_domain_set_check_for_leaks(qdf_mem_domains); + for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i) + qdf_list_destroy(qdf_mem_list_get(i)); + + qdf_spinlock_destroy(&qdf_mem_list_lock); + + /* dma */ + qdf_mem_domain_set_check_for_leaks(qdf_mem_dma_domains); + for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i) + qdf_list_destroy(&qdf_mem_dma_domains[i]); + qdf_spinlock_destroy(&qdf_mem_dma_list_lock); +} + +void *qdf_mem_malloc_debug(size_t size, const char *file, uint32_t line, + void *caller, uint32_t flag) +{ + QDF_STATUS status; + enum qdf_debug_domain current_domain = qdf_debug_domain_get(); + qdf_list_t *mem_list = qdf_mem_list_get(current_domain); + struct qdf_mem_header *header; + void *ptr; + unsigned long start, duration; + + if (!size || size > QDF_MEM_MAX_MALLOC) { + qdf_err("Cannot malloc %zu bytes @ %s:%d", size, file, line); + return NULL; + } + + ptr = qdf_mem_prealloc_get(size); + if (ptr) + return ptr; + if (!flag) + flag = qdf_mem_malloc_flags(); + start = qdf_mc_timer_get_system_time(); + header = kzalloc(size + QDF_MEM_DEBUG_SIZE, flag); + duration = qdf_mc_timer_get_system_time() - start; + + if (duration > QDF_MEM_WARN_THRESHOLD) + qdf_warn("Malloc slept; %lums, %zuB @ %s:%d", + duration, size, file, line); + + if (!header) { + qdf_warn("Failed to malloc %zuB @ %s:%d", size, file, line); + return NULL; + } + + qdf_mem_header_init(header, size, file, line, caller); + qdf_mem_trailer_init(header); + ptr = qdf_mem_get_ptr(header); + + qdf_spin_lock_irqsave(&qdf_mem_list_lock); + status = qdf_list_insert_front(mem_list, &header->node); + qdf_spin_unlock_irqrestore(&qdf_mem_list_lock); + if (QDF_IS_STATUS_ERROR(status)) + qdf_err("Failed to insert memory header; status %d", status); + + qdf_mem_kmalloc_inc(size); + + return ptr; +} +qdf_export_symbol(qdf_mem_malloc_debug); + +void qdf_mem_free_debug(void *ptr, const char *file, uint32_t line) +{ + enum qdf_debug_domain current_domain = qdf_debug_domain_get(); + struct qdf_mem_header *header; + enum qdf_mem_validation_bitmap error_bitmap; + + /* freeing a null pointer is valid */ + if (qdf_unlikely(!ptr)) + return; + + if (qdf_mem_prealloc_put(ptr)) + return; + + if (qdf_unlikely((qdf_size_t)ptr <= sizeof(*header))) + panic("Failed to free invalid memory location %pK", ptr); + + qdf_spin_lock_irqsave(&qdf_mem_list_lock); + header = qdf_mem_get_header(ptr); + error_bitmap = qdf_mem_header_validate(header, current_domain); + error_bitmap |= qdf_mem_trailer_validate(header); + + if (!error_bitmap) { + header->freed = true; + list_del_init(&header->node); + qdf_mem_list_get(header->domain)->count--; + } + qdf_spin_unlock_irqrestore(&qdf_mem_list_lock); + + qdf_mem_header_assert_valid(header, current_domain, error_bitmap, + file, line); + + qdf_mem_kmalloc_dec(header->size); + kfree(header); +} +qdf_export_symbol(qdf_mem_free_debug); + +void qdf_mem_check_for_leaks(void) +{ + enum qdf_debug_domain current_domain = qdf_debug_domain_get(); + qdf_list_t *mem_list = qdf_mem_list_get(current_domain); + qdf_list_t *dma_list = qdf_mem_dma_list(current_domain); + uint32_t leaks_count = 0; + + leaks_count += qdf_mem_domain_check_for_leaks(current_domain, mem_list); + leaks_count += qdf_mem_domain_check_for_leaks(current_domain, dma_list); + + if (leaks_count) + panic("%u fatal memory leaks detected!", leaks_count); +} + +#else +static void qdf_mem_debug_init(void) {} + +static void qdf_mem_debug_exit(void) {} + +/** + * qdf_mem_malloc() - allocation QDF memory + * @size: Number of bytes of memory to allocate. + * + * This function will dynamicallly allocate the specified number of bytes of + * memory. + * + * Return: + * Upon successful allocate, returns a non-NULL pointer to the allocated + * memory. If this function is unable to allocate the amount of memory + * specified (for any reason) it returns NULL. + */ +void *qdf_mem_malloc(size_t size) +{ + void *ptr; + + ptr = qdf_mem_prealloc_get(size); + if (ptr) + return ptr; + + ptr = kzalloc(size, qdf_mem_malloc_flags()); + if (!ptr) + return NULL; + + qdf_mem_kmalloc_inc(ksize(ptr)); + + return ptr; +} +qdf_export_symbol(qdf_mem_malloc); + +/** + * qdf_mem_malloc_atomic() - allocation QDF memory atomically + * @size: Number of bytes of memory to allocate. + * + * This function will dynamicallly allocate the specified number of bytes of + * memory. + * + * Return: + * Upon successful allocate, returns a non-NULL pointer to the allocated + * memory. If this function is unable to allocate the amount of memory + *specified (for any reason) it returns NULL. +*/ +void *qdf_mem_malloc_atomic(size_t size) +{ + void *ptr; + + ptr = qdf_mem_prealloc_get(size); + if (ptr) + return ptr; + + ptr = kzalloc(size, GFP_ATOMIC); + if (!ptr) + return NULL; + + qdf_mem_kmalloc_inc(ksize(ptr)); + + return ptr; +} + +qdf_export_symbol(qdf_mem_malloc_atomic); + +/** + * qdf_mem_free() - free QDF memory + * @ptr: Pointer to the starting address of the memory to be free'd. + * + * This function will free the memory pointed to by 'ptr'. + * + * Return: None + */ +void qdf_mem_free(void *ptr) +{ + if (ptr == NULL) + return; + + if (qdf_mem_prealloc_put(ptr)) + return; + + qdf_mem_kmalloc_dec(ksize(ptr)); + + kfree(ptr); +} +qdf_export_symbol(qdf_mem_free); +#endif + +/** + * qdf_mem_multi_pages_alloc() - allocate large size of kernel memory + * @osdev: OS device handle pointer + * @pages: Multi page information storage + * @element_size: Each element size + * @element_num: Total number of elements should be allocated + * @memctxt: Memory context + * @cacheable: Coherent memory or cacheable memory + * + * This function will allocate large size of memory over multiple pages. + * Large size of contiguous memory allocation will fail frequently, then + * instead of allocate large memory by one shot, allocate through multiple, non + * contiguous memory and combine pages when actual usage + * + * Return: None + */ +void qdf_mem_multi_pages_alloc(qdf_device_t osdev, + struct qdf_mem_multi_page_t *pages, + size_t element_size, uint16_t element_num, + qdf_dma_context_t memctxt, bool cacheable) +{ + uint16_t page_idx; + struct qdf_mem_dma_page_t *dma_pages; + void **cacheable_pages = NULL; + uint16_t i; + + pages->num_element_per_page = PAGE_SIZE / element_size; + if (!pages->num_element_per_page) { + qdf_print("Invalid page %d or element size %d", + (int)PAGE_SIZE, (int)element_size); + goto out_fail; + } + + pages->num_pages = element_num / pages->num_element_per_page; + if (element_num % pages->num_element_per_page) + pages->num_pages++; + + if (cacheable) { + /* Pages information storage */ + pages->cacheable_pages = qdf_mem_malloc( + pages->num_pages * sizeof(pages->cacheable_pages)); + if (!pages->cacheable_pages) { + qdf_print("Cacheable page storage alloc fail"); + goto out_fail; + } + + cacheable_pages = pages->cacheable_pages; + for (page_idx = 0; page_idx < pages->num_pages; page_idx++) { + cacheable_pages[page_idx] = qdf_mem_malloc(PAGE_SIZE); + if (!cacheable_pages[page_idx]) { + qdf_print("cacheable page alloc fail, pi %d", + page_idx); + goto page_alloc_fail; + } + } + pages->dma_pages = NULL; + } else { + pages->dma_pages = qdf_mem_malloc( + pages->num_pages * sizeof(struct qdf_mem_dma_page_t)); + if (!pages->dma_pages) { + qdf_print("dmaable page storage alloc fail"); + goto out_fail; + } + + dma_pages = pages->dma_pages; + for (page_idx = 0; page_idx < pages->num_pages; page_idx++) { + dma_pages->page_v_addr_start = + qdf_mem_alloc_consistent(osdev, osdev->dev, + PAGE_SIZE, + &dma_pages->page_p_addr); + if (!dma_pages->page_v_addr_start) { + qdf_print("dmaable page alloc fail pi %d", + page_idx); + goto page_alloc_fail; + } + dma_pages->page_v_addr_end = + dma_pages->page_v_addr_start + PAGE_SIZE; + dma_pages++; + } + pages->cacheable_pages = NULL; + } + return; + +page_alloc_fail: + if (cacheable) { + for (i = 0; i < page_idx; i++) + qdf_mem_free(pages->cacheable_pages[i]); + qdf_mem_free(pages->cacheable_pages); + } else { + dma_pages = pages->dma_pages; + for (i = 0; i < page_idx; i++) { + qdf_mem_free_consistent(osdev, osdev->dev, PAGE_SIZE, + dma_pages->page_v_addr_start, + dma_pages->page_p_addr, memctxt); + dma_pages++; + } + qdf_mem_free(pages->dma_pages); + } + +out_fail: + pages->cacheable_pages = NULL; + pages->dma_pages = NULL; + pages->num_pages = 0; + return; +} +qdf_export_symbol(qdf_mem_multi_pages_alloc); + +/** + * qdf_mem_multi_pages_free() - free large size of kernel memory + * @osdev: OS device handle pointer + * @pages: Multi page information storage + * @memctxt: Memory context + * @cacheable: Coherent memory or cacheable memory + * + * This function will free large size of memory over multiple pages. + * + * Return: None + */ +void qdf_mem_multi_pages_free(qdf_device_t osdev, + struct qdf_mem_multi_page_t *pages, + qdf_dma_context_t memctxt, bool cacheable) +{ + unsigned int page_idx; + struct qdf_mem_dma_page_t *dma_pages; + + if (cacheable) { + for (page_idx = 0; page_idx < pages->num_pages; page_idx++) + qdf_mem_free(pages->cacheable_pages[page_idx]); + qdf_mem_free(pages->cacheable_pages); + } else { + dma_pages = pages->dma_pages; + for (page_idx = 0; page_idx < pages->num_pages; page_idx++) { + qdf_mem_free_consistent(osdev, osdev->dev, PAGE_SIZE, + dma_pages->page_v_addr_start, + dma_pages->page_p_addr, memctxt); + dma_pages++; + } + qdf_mem_free(pages->dma_pages); + } + + pages->cacheable_pages = NULL; + pages->dma_pages = NULL; + pages->num_pages = 0; + return; +} +qdf_export_symbol(qdf_mem_multi_pages_free); + +/** + * qdf_mem_multi_page_link() - Make links for multi page elements + * @osdev: OS device handle pointer + * @pages: Multi page information storage + * @elem_size: Single element size + * @elem_count: elements count should be linked + * @cacheable: Coherent memory or cacheable memory + * + * This function will make links for multi page allocated structure + * + * Return: 0 success + */ +int qdf_mem_multi_page_link(qdf_device_t osdev, + struct qdf_mem_multi_page_t *pages, + uint32_t elem_size, uint32_t elem_count, uint8_t cacheable) +{ + uint16_t i, i_int; + void *page_info; + void **c_elem = NULL; + uint32_t num_link = 0; + + for (i = 0; i < pages->num_pages; i++) { + if (cacheable) + page_info = pages->cacheable_pages[i]; + else + page_info = pages->dma_pages[i].page_v_addr_start; + + if (!page_info) + return -ENOMEM; + + c_elem = (void **)page_info; + for (i_int = 0; i_int < pages->num_element_per_page; i_int++) { + if (i_int == (pages->num_element_per_page - 1)) { + if (cacheable) + *c_elem = pages-> + cacheable_pages[i + 1]; + else + *c_elem = pages-> + dma_pages[i + 1]. + page_v_addr_start; + num_link++; + break; + } else { + *c_elem = + (void *)(((char *)c_elem) + elem_size); + } + num_link++; + c_elem = (void **)*c_elem; + + /* Last link established exit */ + if (num_link == (elem_count - 1)) + break; + } + } + + if (c_elem) + *c_elem = NULL; + + return 0; +} +qdf_export_symbol(qdf_mem_multi_page_link); + +/** + * qdf_mem_copy() - copy memory + * @dst_addr: Pointer to destination memory location (to copy to) + * @src_addr: Pointer to source memory location (to copy from) + * @num_bytes: Number of bytes to copy. + * + * Copy host memory from one location to another, similar to memcpy in + * standard C. Note this function does not specifically handle overlapping + * source and destination memory locations. Calling this function with + * overlapping source and destination memory locations will result in + * unpredictable results. Use qdf_mem_move() if the memory locations + * for the source and destination are overlapping (or could be overlapping!) + * + * Return: none + */ +void qdf_mem_copy(void *dst_addr, const void *src_addr, uint32_t num_bytes) +{ + if (0 == num_bytes) { + /* special case where dst_addr or src_addr can be NULL */ + return; + } + + if ((dst_addr == NULL) || (src_addr == NULL)) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "%s called with NULL parameter, source:%pK destination:%pK", + __func__, src_addr, dst_addr); + QDF_ASSERT(0); + return; + } + memcpy(dst_addr, src_addr, num_bytes); +} +qdf_export_symbol(qdf_mem_copy); + +qdf_shared_mem_t *qdf_mem_shared_mem_alloc(qdf_device_t osdev, uint32_t size) +{ + qdf_shared_mem_t *shared_mem; + qdf_dma_addr_t dma_addr, paddr; + int ret; + + shared_mem = qdf_mem_malloc(sizeof(*shared_mem)); + if (!shared_mem) { + qdf_err("Unable to allocate memory for shared resource struct"); + return NULL; + } + + shared_mem->vaddr = qdf_mem_alloc_consistent(osdev, osdev->dev, + size, qdf_mem_get_dma_addr_ptr(osdev, + &shared_mem->mem_info)); + if (!shared_mem->vaddr) { + qdf_err("Unable to allocate DMA memory for shared resource"); + qdf_mem_free(shared_mem); + return NULL; + } + + qdf_mem_set_dma_size(osdev, &shared_mem->mem_info, size); + size = qdf_mem_get_dma_size(osdev, &shared_mem->mem_info); + + qdf_mem_zero(shared_mem->vaddr, size); + dma_addr = qdf_mem_get_dma_addr(osdev, &shared_mem->mem_info); + paddr = qdf_mem_paddr_from_dmaaddr(osdev, dma_addr); + + qdf_mem_set_dma_pa(osdev, &shared_mem->mem_info, paddr); + ret = qdf_mem_dma_get_sgtable(osdev->dev, &shared_mem->sgtable, + shared_mem->vaddr, dma_addr, size); + if (ret) { + qdf_err("Unable to get DMA sgtable"); + qdf_mem_free_consistent(osdev, osdev->dev, + shared_mem->mem_info.size, + shared_mem->vaddr, + dma_addr, + qdf_get_dma_mem_context(shared_mem, + memctx)); + qdf_mem_free(shared_mem); + return NULL; + } + + qdf_dma_get_sgtable_dma_addr(&shared_mem->sgtable); + + return shared_mem; +} + +qdf_export_symbol(qdf_mem_shared_mem_alloc); + +/** + * qdf_mem_zero() - zero out memory + * @ptr: pointer to memory that will be set to zero + * @num_bytes: number of bytes zero + * + * This function sets the memory location to all zeros, essentially clearing + * the memory. + * + * Return: None + */ +void qdf_mem_zero(void *ptr, uint32_t num_bytes) +{ + if (0 == num_bytes) { + /* special case where ptr can be NULL */ + return; + } + + if (ptr == NULL) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "%s called with NULL parameter ptr", __func__); + return; + } + memset(ptr, 0, num_bytes); +} +qdf_export_symbol(qdf_mem_zero); + +/** + * qdf_mem_set() - set (fill) memory with a specified byte value. + * @ptr: Pointer to memory that will be set + * @num_bytes: Number of bytes to be set + * @value: Byte set in memory + * + * Return: None + */ +void qdf_mem_set(void *ptr, uint32_t num_bytes, uint32_t value) +{ + if (ptr == NULL) { + qdf_print("%s called with NULL parameter ptr", __func__); + return; + } + memset(ptr, value, num_bytes); +} +qdf_export_symbol(qdf_mem_set); + +/** + * qdf_mem_move() - move memory + * @dst_addr: pointer to destination memory location (to move to) + * @src_addr: pointer to source memory location (to move from) + * @num_bytes: number of bytes to move. + * + * Move host memory from one location to another, similar to memmove in + * standard C. Note this function *does* handle overlapping + * source and destination memory locations. + + * Return: None + */ +void qdf_mem_move(void *dst_addr, const void *src_addr, uint32_t num_bytes) +{ + if (0 == num_bytes) { + /* special case where dst_addr or src_addr can be NULL */ + return; + } + + if ((dst_addr == NULL) || (src_addr == NULL)) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "%s called with NULL parameter, source:%pK destination:%pK", + __func__, src_addr, dst_addr); + QDF_ASSERT(0); + return; + } + memmove(dst_addr, src_addr, num_bytes); +} +qdf_export_symbol(qdf_mem_move); + +#if defined(A_SIMOS_DEVHOST) || defined(HIF_SDIO) || defined(HIF_USB) +/** + * qdf_mem_dma_alloc() - allocates memory for dma + * @osdev: OS device handle + * @dev: Pointer to device handle + * @size: Size to be allocated + * @phy_addr: Physical address + * + * Return: pointer of allocated memory or null if memory alloc fails + */ +static inline void *qdf_mem_dma_alloc(qdf_device_t osdev, void *dev, + qdf_size_t size, + qdf_dma_addr_t *phy_addr) +{ + void *vaddr; + + vaddr = qdf_mem_malloc(size); + *phy_addr = ((uintptr_t) vaddr); + /* using this type conversion to suppress "cast from pointer to integer + * of different size" warning on some platforms + */ + BUILD_BUG_ON(sizeof(*phy_addr) < sizeof(vaddr)); + return vaddr; +} + +#elif defined(QCA_WIFI_QCA8074) && defined(BUILD_X86) +#define QCA8074_RAM_BASE 0x50000000 +#define QDF_MEM_ALLOC_X86_MAX_RETRIES 10 +void *qdf_mem_dma_alloc(qdf_device_t osdev, void *dev, qdf_size_t size, + qdf_dma_addr_t *phy_addr) +{ + void *vaddr = NULL; + int i; + + *phy_addr = 0; + + for (i = 0; i < QDF_MEM_ALLOC_X86_MAX_RETRIES; i++) { + vaddr = dma_alloc_coherent(dev, size, phy_addr, + qdf_mem_malloc_flags()); + + if (!vaddr) { + qdf_print("%s failed , size: %zu!\n", __func__, size); + return NULL; + } + + if (*phy_addr >= QCA8074_RAM_BASE) + return vaddr; + + dma_free_coherent(dev, size, vaddr, *phy_addr); + } + + return NULL; +} + +#else +static inline void *qdf_mem_dma_alloc(qdf_device_t osdev, void *dev, + qdf_size_t size, qdf_dma_addr_t *paddr) +{ + return dma_alloc_coherent(dev, size, paddr, qdf_mem_malloc_flags()); +} +#endif + +#if defined(A_SIMOS_DEVHOST) || defined(HIF_SDIO) || defined(HIF_USB) +static inline void +qdf_mem_dma_free(void *dev, qdf_size_t size, void *vaddr, qdf_dma_addr_t paddr) +{ + qdf_mem_free(vaddr); +} +#else + +static inline void +qdf_mem_dma_free(void *dev, qdf_size_t size, void *vaddr, qdf_dma_addr_t paddr) +{ + dma_free_coherent(dev, size, vaddr, paddr); +} +#endif + +#ifdef MEMORY_DEBUG +void *qdf_mem_alloc_consistent_debug(qdf_device_t osdev, void *dev, + qdf_size_t size, qdf_dma_addr_t *paddr, + const char *file, uint32_t line, + void *caller) +{ + QDF_STATUS status; + enum qdf_debug_domain current_domain = qdf_debug_domain_get(); + qdf_list_t *mem_list = qdf_mem_dma_list(current_domain); + struct qdf_mem_header *header; + void *vaddr; + + if (!size || size > QDF_MEM_MAX_MALLOC) { + qdf_err("Cannot malloc %zu bytes @ %s:%d", size, file, line); + return NULL; + } + + vaddr = qdf_mem_dma_alloc(osdev, dev, size + QDF_DMA_MEM_DEBUG_SIZE, + paddr); + + if (!vaddr) { + qdf_warn("Failed to malloc %zuB @ %s:%d", size, file, line); + return NULL; + } + + header = qdf_mem_dma_get_header(vaddr, size); + /* For DMA buffers we only add trailers, this function will init + * the header structure at the tail + * Prefix the header into DMA buffer causes SMMU faults, so + * do not prefix header into the DMA buffers + */ + qdf_mem_header_init(header, size, file, line, caller); + + qdf_spin_lock_irqsave(&qdf_mem_dma_list_lock); + status = qdf_list_insert_front(mem_list, &header->node); + qdf_spin_unlock_irqrestore(&qdf_mem_dma_list_lock); + if (QDF_IS_STATUS_ERROR(status)) + qdf_err("Failed to insert memory header; status %d", status); + + qdf_mem_dma_inc(size); + + return vaddr; +} +qdf_export_symbol(qdf_mem_alloc_consistent_debug); + +void qdf_mem_free_consistent_debug(qdf_device_t osdev, void *dev, + qdf_size_t size, void *vaddr, + qdf_dma_addr_t paddr, + qdf_dma_context_t memctx, + const char *file, uint32_t line) +{ + enum qdf_debug_domain domain = qdf_debug_domain_get(); + struct qdf_mem_header *header; + enum qdf_mem_validation_bitmap error_bitmap; + + /* freeing a null pointer is valid */ + if (qdf_unlikely(!vaddr)) + return; + + qdf_spin_lock_irqsave(&qdf_mem_dma_list_lock); + /* For DMA buffers we only add trailers, this function will retrieve + * the header structure at the tail + * Prefix the header into DMA buffer causes SMMU faults, so + * do not prefix header into the DMA buffers + */ + header = qdf_mem_dma_get_header(vaddr, size); + error_bitmap = qdf_mem_header_validate(header, domain); + if (!error_bitmap) { + header->freed = true; + list_del_init(&header->node); + qdf_mem_dma_list(header->domain)->count--; + } + qdf_spin_unlock_irqrestore(&qdf_mem_dma_list_lock); + + qdf_mem_header_assert_valid(header, domain, error_bitmap, file, line); + + qdf_mem_dma_dec(header->size); + qdf_mem_dma_free(dev, size + QDF_DMA_MEM_DEBUG_SIZE, vaddr, paddr); +} +qdf_export_symbol(qdf_mem_free_consistent_debug); + +#else + +void *qdf_mem_alloc_consistent(qdf_device_t osdev, void *dev, + qdf_size_t size, qdf_dma_addr_t *paddr) +{ + void *vaddr = qdf_mem_dma_alloc(osdev, dev, size, paddr); + + if (vaddr) + qdf_mem_dma_inc(size); + + return vaddr; +} +qdf_export_symbol(qdf_mem_alloc_consistent); + +void qdf_mem_free_consistent(qdf_device_t osdev, void *dev, + qdf_size_t size, void *vaddr, + qdf_dma_addr_t paddr, qdf_dma_context_t memctx) +{ + qdf_mem_dma_dec(size); + qdf_mem_dma_free(dev, size, vaddr, paddr); +} +qdf_export_symbol(qdf_mem_free_consistent); + +#endif /* MEMORY_DEBUG */ + +/** + * qdf_mem_dma_sync_single_for_device() - assign memory to device + * @osdev: OS device handle + * @bus_addr: dma address to give to the device + * @size: Size of the memory block + * @direction: direction data will be DMAed + * + * Assign memory to the remote device. + * The cache lines are flushed to ram or invalidated as needed. + * + * Return: none + */ +void qdf_mem_dma_sync_single_for_device(qdf_device_t osdev, + qdf_dma_addr_t bus_addr, + qdf_size_t size, + enum dma_data_direction direction) +{ + dma_sync_single_for_device(osdev->dev, bus_addr, size, direction); +} +qdf_export_symbol(qdf_mem_dma_sync_single_for_device); + +/** + * qdf_mem_dma_sync_single_for_cpu() - assign memory to CPU + * @osdev: OS device handle + * @bus_addr: dma address to give to the cpu + * @size: Size of the memory block + * @direction: direction data will be DMAed + * + * Assign memory to the CPU. + * + * Return: none + */ +void qdf_mem_dma_sync_single_for_cpu(qdf_device_t osdev, + qdf_dma_addr_t bus_addr, + qdf_size_t size, + enum dma_data_direction direction) +{ + dma_sync_single_for_cpu(osdev->dev, bus_addr, size, direction); +} +qdf_export_symbol(qdf_mem_dma_sync_single_for_cpu); + +void qdf_mem_init(void) +{ + qdf_mem_debug_init(); + qdf_mem_debugfs_init(); + qdf_mem_debug_debugfs_init(); +} +qdf_export_symbol(qdf_mem_init); + +void qdf_mem_exit(void) +{ + qdf_mem_debug_debugfs_exit(); + qdf_mem_debugfs_exit(); + qdf_mem_debug_exit(); +} +qdf_export_symbol(qdf_mem_exit); + +/** + * qdf_ether_addr_copy() - copy an Ethernet address + * + * @dst_addr: A six-byte array Ethernet address destination + * @src_addr: A six-byte array Ethernet address source + * + * Please note: dst & src must both be aligned to u16. + * + * Return: none + */ +void qdf_ether_addr_copy(void *dst_addr, const void *src_addr) +{ + if ((dst_addr == NULL) || (src_addr == NULL)) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "%s called with NULL parameter, source:%pK destination:%pK", + __func__, src_addr, dst_addr); + QDF_ASSERT(0); + return; + } + ether_addr_copy(dst_addr, src_addr); +} +qdf_export_symbol(qdf_ether_addr_copy); + diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_module.c b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_module.c new file mode 100644 index 0000000000000000000000000000000000000000..ae5c6f1ae33499f7002e940719e343103fbdaa4e --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_module.c @@ -0,0 +1,73 @@ +/* + * Copyright (c) 2012-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: i_qdf_module.h + * Linux-specific definitions for QDF module API's + */ + +#include +#include +#include +#include +#include +#include + +MODULE_AUTHOR("Qualcomm Atheros Inc."); +MODULE_DESCRIPTION("Qualcomm Atheros Device Framework Module"); +MODULE_LICENSE("Dual BSD/GPL"); + +#ifndef EXPORT_SYMTAB +#define EXPORT_SYMTAB +#endif + +/** + * qdf_mod_init() - module initialization + * + * Return: int + */ +static int __init +qdf_mod_init(void) +{ + qdf_shared_print_ctrl_init(); + qdf_mem_init(); + qdf_logging_init(); + qdf_perfmod_init(); + qdf_nbuf_mod_init(); + qdf_event_list_init(); + return 0; +} +module_init(qdf_mod_init); + +/** + * qdf_mod_exit() - module remove + * + * Return: int + */ +static void __exit +qdf_mod_exit(void) +{ + qdf_event_list_destroy(); + qdf_nbuf_mod_exit(); + qdf_perfmod_exit(); + qdf_logging_exit(); + qdf_mem_exit(); + qdf_shared_print_ctrl_cleanup(); +} +module_exit(qdf_mod_exit); + diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_nbuf.c b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_nbuf.c new file mode 100644 index 0000000000000000000000000000000000000000..ef9bc34474d1c72a9dd8abec7a0acccdbb767b71 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_nbuf.c @@ -0,0 +1,4387 @@ +/* + * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: qdf_nbuf.c + * QCA driver framework(QDF) network buffer management APIs + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "qdf_flex_mem.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "qdf_str.h" + +#if defined(FEATURE_TSO) +#include +#include +#include +#include +#include +#endif /* FEATURE_TSO */ + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0) + +#define qdf_nbuf_users_inc atomic_inc +#define qdf_nbuf_users_dec atomic_dec +#define qdf_nbuf_users_set atomic_set +#define qdf_nbuf_users_read atomic_read +#else +#define qdf_nbuf_users_inc refcount_inc +#define qdf_nbuf_users_dec refcount_dec +#define qdf_nbuf_users_set refcount_set +#define qdf_nbuf_users_read refcount_read +#endif /* KERNEL_VERSION(4, 13, 0) */ + +#define IEEE80211_RADIOTAP_VHT_BW_20 0 +#define IEEE80211_RADIOTAP_VHT_BW_40 1 +#define IEEE80211_RADIOTAP_VHT_BW_80 2 +#define IEEE80211_RADIOTAP_VHT_BW_160 3 + +#define RADIOTAP_VHT_BW_20 0 +#define RADIOTAP_VHT_BW_40 1 +#define RADIOTAP_VHT_BW_80 4 +#define RADIOTAP_VHT_BW_160 11 + +/* channel number to freq conversion */ +#define CHANNEL_NUM_14 14 +#define CHANNEL_NUM_15 15 +#define CHANNEL_NUM_27 27 +#define CHANNEL_NUM_35 35 +#define CHANNEL_NUM_182 182 +#define CHANNEL_NUM_197 197 +#define CHANNEL_FREQ_2484 2484 +#define CHANNEL_FREQ_2407 2407 +#define CHANNEL_FREQ_2512 2512 +#define CHANNEL_FREQ_5000 5000 +#define CHANNEL_FREQ_4000 4000 +#define FREQ_MULTIPLIER_CONST_5MHZ 5 +#define FREQ_MULTIPLIER_CONST_20MHZ 20 +#define RADIOTAP_5G_SPECTRUM_CHANNEL 0x0100 +#define RADIOTAP_2G_SPECTRUM_CHANNEL 0x0080 +#define RADIOTAP_CCK_CHANNEL 0x0020 +#define RADIOTAP_OFDM_CHANNEL 0x0040 + +#ifdef CONFIG_MCL +#include + +struct qdf_track_timer { + qdf_mc_timer_t track_timer; + qdf_atomic_t alloc_fail_cnt; +}; + +static struct qdf_track_timer alloc_track_timer; + +#define QDF_NBUF_ALLOC_EXPIRE_TIMER_MS 5000 +#define QDF_NBUF_ALLOC_EXPIRE_CNT_THRESHOLD 50 +#endif + +/* Packet Counter */ +static uint32_t nbuf_tx_mgmt[QDF_NBUF_TX_PKT_STATE_MAX]; +static uint32_t nbuf_tx_data[QDF_NBUF_TX_PKT_STATE_MAX]; +#ifdef QDF_NBUF_GLOBAL_COUNT +#define NBUF_DEBUGFS_NAME "nbuf_counters" +static qdf_atomic_t nbuf_count; +#endif + +/** + * qdf_nbuf_tx_desc_count_display() - Displays the packet counter + * + * Return: none + */ +void qdf_nbuf_tx_desc_count_display(void) +{ + qdf_print("Current Snapshot of the Driver:\n"); + qdf_print("Data Packets:\n"); + qdf_print("HDD %d TXRX_Q %d TXRX %d HTT %d", + nbuf_tx_data[QDF_NBUF_TX_PKT_HDD] - + (nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX] + + nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_ENQUEUE] - + nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_DEQUEUE]), + nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_ENQUEUE] - + nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_DEQUEUE], + nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX] - + nbuf_tx_data[QDF_NBUF_TX_PKT_HTT], + nbuf_tx_data[QDF_NBUF_TX_PKT_HTT] - + nbuf_tx_data[QDF_NBUF_TX_PKT_HTC]); + qdf_print(" HTC %d HIF %d CE %d TX_COMP %d\n", + nbuf_tx_data[QDF_NBUF_TX_PKT_HTC] - + nbuf_tx_data[QDF_NBUF_TX_PKT_HIF], + nbuf_tx_data[QDF_NBUF_TX_PKT_HIF] - + nbuf_tx_data[QDF_NBUF_TX_PKT_CE], + nbuf_tx_data[QDF_NBUF_TX_PKT_CE] - + nbuf_tx_data[QDF_NBUF_TX_PKT_FREE], + nbuf_tx_data[QDF_NBUF_TX_PKT_FREE]); + qdf_print("Mgmt Packets:\n"); + qdf_print("TXRX_Q %d TXRX %d HTT %d HTC %d HIF %d CE %d TX_COMP %d\n", + nbuf_tx_mgmt[QDF_NBUF_TX_PKT_TXRX_ENQUEUE] - + nbuf_tx_mgmt[QDF_NBUF_TX_PKT_TXRX_DEQUEUE], + nbuf_tx_mgmt[QDF_NBUF_TX_PKT_TXRX] - + nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTT], + nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTT] - + nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTC], + nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTC] - + nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HIF], + nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HIF] - + nbuf_tx_mgmt[QDF_NBUF_TX_PKT_CE], + nbuf_tx_mgmt[QDF_NBUF_TX_PKT_CE] - + nbuf_tx_mgmt[QDF_NBUF_TX_PKT_FREE], + nbuf_tx_mgmt[QDF_NBUF_TX_PKT_FREE]); +} +qdf_export_symbol(qdf_nbuf_tx_desc_count_display); + +/** + * qdf_nbuf_tx_desc_count_update() - Updates the layer packet counter + * @packet_type : packet type either mgmt/data + * @current_state : layer at which the packet currently present + * + * Return: none + */ +static inline void qdf_nbuf_tx_desc_count_update(uint8_t packet_type, + uint8_t current_state) +{ + switch (packet_type) { + case QDF_NBUF_TX_PKT_MGMT_TRACK: + nbuf_tx_mgmt[current_state]++; + break; + case QDF_NBUF_TX_PKT_DATA_TRACK: + nbuf_tx_data[current_state]++; + break; + default: + break; + } +} +qdf_export_symbol(qdf_nbuf_tx_desc_count_update); + +/** + * qdf_nbuf_tx_desc_count_clear() - Clears packet counter for both data, mgmt + * + * Return: none + */ +void qdf_nbuf_tx_desc_count_clear(void) +{ + memset(nbuf_tx_mgmt, 0, sizeof(nbuf_tx_mgmt)); + memset(nbuf_tx_data, 0, sizeof(nbuf_tx_data)); +} +qdf_export_symbol(qdf_nbuf_tx_desc_count_clear); + +/** + * qdf_nbuf_set_state() - Updates the packet state + * @nbuf: network buffer + * @current_state : layer at which the packet currently is + * + * This function updates the packet state to the layer at which the packet + * currently is + * + * Return: none + */ +void qdf_nbuf_set_state(qdf_nbuf_t nbuf, uint8_t current_state) +{ + /* + * Only Mgmt, Data Packets are tracked. WMI messages + * such as scan commands are not tracked + */ + uint8_t packet_type; + + packet_type = QDF_NBUF_CB_TX_PACKET_TRACK(nbuf); + + if ((packet_type != QDF_NBUF_TX_PKT_DATA_TRACK) && + (packet_type != QDF_NBUF_TX_PKT_MGMT_TRACK)) { + return; + } + QDF_NBUF_CB_TX_PACKET_STATE(nbuf) = current_state; + qdf_nbuf_tx_desc_count_update(packet_type, + current_state); +} +qdf_export_symbol(qdf_nbuf_set_state); + +#ifdef CONFIG_MCL +/** + * __qdf_nbuf_start_replenish_timer - Start alloc fail replenish timer + * + * This function starts the alloc fail replenish timer. + * + * Return: void + */ +static void __qdf_nbuf_start_replenish_timer(void) +{ + qdf_atomic_inc(&alloc_track_timer.alloc_fail_cnt); + if (qdf_mc_timer_get_current_state(&alloc_track_timer.track_timer) != + QDF_TIMER_STATE_RUNNING) + qdf_mc_timer_start(&alloc_track_timer.track_timer, + QDF_NBUF_ALLOC_EXPIRE_TIMER_MS); +} + +/** + * __qdf_nbuf_stop_replenish_timer - Stop alloc fail replenish timer + * + * This function stops the alloc fail replenish timer. + * + * Return: void + */ +static void __qdf_nbuf_stop_replenish_timer(void) +{ + if (qdf_atomic_read(&alloc_track_timer.alloc_fail_cnt) == 0) + return; + + qdf_atomic_set(&alloc_track_timer.alloc_fail_cnt, 0); + if (qdf_mc_timer_get_current_state(&alloc_track_timer.track_timer) == + QDF_TIMER_STATE_RUNNING) + qdf_mc_timer_stop(&alloc_track_timer.track_timer); +} + +/** + * qdf_replenish_expire_handler - Replenish expire handler + * + * This function triggers when the alloc fail replenish timer expires. + * + * Return: void + */ +static void qdf_replenish_expire_handler(void *arg) +{ + if (qdf_atomic_read(&alloc_track_timer.alloc_fail_cnt) > + QDF_NBUF_ALLOC_EXPIRE_CNT_THRESHOLD) { + qdf_print("ERROR: NBUF allocation timer expired Fail count %d", + qdf_atomic_read(&alloc_track_timer.alloc_fail_cnt)); + + /* Error handling here */ + } +} + +/** + * __qdf_nbuf_init_replenish_timer - Initialize the alloc replenish timer + * + * This function initializes the nbuf alloc fail replenish timer. + * + * Return: void + */ +void __qdf_nbuf_init_replenish_timer(void) +{ + qdf_mc_timer_init(&alloc_track_timer.track_timer, QDF_TIMER_TYPE_SW, + qdf_replenish_expire_handler, NULL); +} + +/** + * __qdf_nbuf_deinit_replenish_timer - Deinitialize the alloc replenish timer + * + * This function deinitializes the nbuf alloc fail replenish timer. + * + * Return: void + */ +void __qdf_nbuf_deinit_replenish_timer(void) +{ + __qdf_nbuf_stop_replenish_timer(); + qdf_mc_timer_destroy(&alloc_track_timer.track_timer); +} +#else + +static inline void __qdf_nbuf_start_replenish_timer(void) {} +static inline void __qdf_nbuf_stop_replenish_timer(void) {} +#endif + +/* globals do not need to be initialized to NULL/0 */ +qdf_nbuf_trace_update_t qdf_trace_update_cb; +qdf_nbuf_free_t nbuf_free_cb; + +#ifdef QDF_NBUF_GLOBAL_COUNT + +/** + * __qdf_nbuf_count_get() - get nbuf global count + * + * Return: nbuf global count + */ +int __qdf_nbuf_count_get(void) +{ + return qdf_atomic_read(&nbuf_count); +} +qdf_export_symbol(__qdf_nbuf_count_get); + +/** + * __qdf_nbuf_count_inc() - increment nbuf global count + * + * @buf: sk buff + * + * Return: void + */ +void __qdf_nbuf_count_inc(qdf_nbuf_t nbuf) +{ + qdf_atomic_inc(&nbuf_count); +} +qdf_export_symbol(__qdf_nbuf_count_inc); + +/** + * __qdf_nbuf_count_dec() - decrement nbuf global count + * + * @buf: sk buff + * + * Return: void + */ +void __qdf_nbuf_count_dec(__qdf_nbuf_t nbuf) +{ + qdf_atomic_dec(&nbuf_count); +} +qdf_export_symbol(__qdf_nbuf_count_dec); +#endif + +#if defined(QCA_WIFI_QCA8074) && defined (BUILD_X86) +struct sk_buff *__qdf_nbuf_alloc(qdf_device_t osdev, size_t size, int reserve, + int align, int prio, const char *func, + uint32_t line) +{ + struct sk_buff *skb; + unsigned long offset; + uint32_t lowmem_alloc_tries = 0; + + if (align) + size += (align - 1); + +realloc: + skb = dev_alloc_skb(size); + + if (skb) + goto skb_alloc; + + skb = pld_nbuf_pre_alloc(size); + + if (!skb) { + qdf_nofl_err("NBUF alloc failed %zuB @ %s:%d", + size, func, line); + return NULL; + } + +skb_alloc: + /* Hawkeye M2M emulation cannot handle memory addresses below 0x50000040 + * Though we are trying to reserve low memory upfront to prevent this, + * we sometimes see SKBs allocated from low memory. + */ + if (virt_to_phys(qdf_nbuf_data(skb)) < 0x50000040) { + lowmem_alloc_tries++; + if (lowmem_alloc_tries > 100) { + qdf_nofl_err("NBUF alloc failed %zuB @ %s:%d", + size, func, line); + return NULL; + } else { + /* Not freeing to make sure it + * will not get allocated again + */ + goto realloc; + } + } + memset(skb->cb, 0x0, sizeof(skb->cb)); + + /* + * The default is for netbuf fragments to be interpreted + * as wordstreams rather than bytestreams. + */ + QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) = 1; + QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) = 1; + + /* + * XXX:how about we reserve first then align + * Align & make sure that the tail & data are adjusted properly + */ + + if (align) { + offset = ((unsigned long)skb->data) % align; + if (offset) + skb_reserve(skb, align - offset); + } + + /* + * NOTE:alloc doesn't take responsibility if reserve unaligns the data + * pointer + */ + skb_reserve(skb, reserve); + qdf_nbuf_count_inc(skb); + + return skb; +} +#else +struct sk_buff *__qdf_nbuf_alloc(qdf_device_t osdev, size_t size, int reserve, + int align, int prio, const char *func, + uint32_t line) +{ + struct sk_buff *skb; + unsigned long offset; + int flags = GFP_KERNEL; + + if (align) + size += (align - 1); + + if (in_interrupt() || irqs_disabled() || in_atomic()) { + flags = GFP_ATOMIC; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0) + /* + * Observed that kcompactd burns out CPU to make order-3 page. + *__netdev_alloc_skb has 4k page fallback option just in case of + * failing high order page allocation so we don't need to be + * hard. Make kcompactd rest in piece. + */ + flags = flags & ~__GFP_KSWAPD_RECLAIM; +#endif + } + + skb = __netdev_alloc_skb(NULL, size, flags); + + if (skb) + goto skb_alloc; + + skb = pld_nbuf_pre_alloc(size); + + if (!skb) { + qdf_rl_nofl_err("NBUF alloc failed %zuB @ %s:%d", + size, func, line); + __qdf_nbuf_start_replenish_timer(); + return NULL; + } else { + __qdf_nbuf_stop_replenish_timer(); + } + +skb_alloc: + memset(skb->cb, 0x0, sizeof(skb->cb)); + + /* + * The default is for netbuf fragments to be interpreted + * as wordstreams rather than bytestreams. + */ + QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) = 1; + QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) = 1; + + /* + * XXX:how about we reserve first then align + * Align & make sure that the tail & data are adjusted properly + */ + + if (align) { + offset = ((unsigned long)skb->data) % align; + if (offset) + skb_reserve(skb, align - offset); + } + + /* + * NOTE:alloc doesn't take responsibility if reserve unaligns the data + * pointer + */ + skb_reserve(skb, reserve); + qdf_nbuf_count_inc(skb); + + return skb; +} +#endif +qdf_export_symbol(__qdf_nbuf_alloc); + +/** + * __qdf_nbuf_free() - free the nbuf its interrupt safe + * @skb: Pointer to network buffer + * + * Return: none + */ + +#ifdef CONFIG_MCL +void __qdf_nbuf_free(struct sk_buff *skb) +{ + if (pld_nbuf_pre_alloc_free(skb)) + return; + + qdf_nbuf_count_dec(skb); + if (nbuf_free_cb) + nbuf_free_cb(skb); + else + dev_kfree_skb_any(skb); +} +#else +void __qdf_nbuf_free(struct sk_buff *skb) +{ + if (pld_nbuf_pre_alloc_free(skb)) + return; + + qdf_nbuf_count_dec(skb); + dev_kfree_skb_any(skb); +} +#endif + +qdf_export_symbol(__qdf_nbuf_free); + +#ifdef NBUF_MEMORY_DEBUG +enum qdf_nbuf_event_type { + QDF_NBUF_ALLOC, + QDF_NBUF_ALLOC_CLONE, + QDF_NBUF_ALLOC_COPY, + QDF_NBUF_ALLOC_FAILURE, + QDF_NBUF_FREE, + QDF_NBUF_MAP, + QDF_NBUF_UNMAP, +}; + +struct qdf_nbuf_event { + qdf_nbuf_t nbuf; + char file[QDF_MEM_FILE_NAME_SIZE]; + uint32_t line; + enum qdf_nbuf_event_type type; + uint64_t timestamp; +}; + +#define QDF_NBUF_HISTORY_SIZE 4096 +static qdf_atomic_t qdf_nbuf_history_index; +static struct qdf_nbuf_event qdf_nbuf_history[QDF_NBUF_HISTORY_SIZE]; + +static int32_t qdf_nbuf_circular_index_next(qdf_atomic_t *index, int size) +{ + int32_t next = qdf_atomic_inc_return(index); + + if (next == size) + qdf_atomic_sub(size, index); + + return next % size; +} + +static void +qdf_nbuf_history_add(qdf_nbuf_t nbuf, const char *file, uint32_t line, + enum qdf_nbuf_event_type type) +{ + int32_t idx = qdf_nbuf_circular_index_next(&qdf_nbuf_history_index, + QDF_NBUF_HISTORY_SIZE); + struct qdf_nbuf_event *event = &qdf_nbuf_history[idx]; + + event->nbuf = nbuf; + qdf_str_lcopy(event->file, kbasename(file), QDF_MEM_FILE_NAME_SIZE); + event->line = line; + event->type = type; + event->timestamp = qdf_get_log_timestamp(); +} +#endif /* NBUF_MEMORY_DEBUG */ + +#ifdef NBUF_MAP_UNMAP_DEBUG +struct qdf_nbuf_map_metadata { + struct hlist_node node; + qdf_nbuf_t nbuf; + char file[QDF_MEM_FILE_NAME_SIZE]; + uint32_t line; +}; + +DEFINE_QDF_FLEX_MEM_POOL(qdf_nbuf_map_pool, + sizeof(struct qdf_nbuf_map_metadata), 0); +#define QDF_NBUF_MAP_HT_BITS 10 /* 1024 buckets */ +static DECLARE_HASHTABLE(qdf_nbuf_map_ht, QDF_NBUF_MAP_HT_BITS); +static qdf_spinlock_t qdf_nbuf_map_lock; + +static void qdf_nbuf_map_tracking_init(void) +{ + qdf_flex_mem_init(&qdf_nbuf_map_pool); + hash_init(qdf_nbuf_map_ht); + qdf_spinlock_create(&qdf_nbuf_map_lock); +} + +void qdf_nbuf_map_check_for_leaks(void) +{ + struct qdf_nbuf_map_metadata *meta; + int bucket; + uint32_t count = 0; + bool is_empty; + + qdf_flex_mem_release(&qdf_nbuf_map_pool); + + qdf_spin_lock_irqsave(&qdf_nbuf_map_lock); + is_empty = hash_empty(qdf_nbuf_map_ht); + qdf_spin_unlock_irqrestore(&qdf_nbuf_map_lock); + + if (is_empty) + return; + + qdf_err("Nbuf map without unmap events detected!"); + qdf_err("------------------------------------------------------------"); + + /* Hold the lock for the entire iteration for safe list/meta access. We + * are explicitly preferring the chance to watchdog on the print, over + * the posibility of invalid list/memory access. Since we are going to + * panic anyway, the worst case is loading up the crash dump to find out + * what was in the hash table. + */ + qdf_spin_lock_irqsave(&qdf_nbuf_map_lock); + hash_for_each(qdf_nbuf_map_ht, bucket, meta, node) { + count++; + qdf_err("0x%pk @ %s:%u", + meta->nbuf, meta->file, meta->line); + } + qdf_spin_unlock_irqrestore(&qdf_nbuf_map_lock); + + panic("%u fatal nbuf map without unmap events detected!", count); +} + +static void qdf_nbuf_map_tracking_deinit(void) +{ + qdf_nbuf_map_check_for_leaks(); + qdf_spinlock_destroy(&qdf_nbuf_map_lock); + qdf_flex_mem_deinit(&qdf_nbuf_map_pool); +} + +static struct qdf_nbuf_map_metadata *qdf_nbuf_meta_get(qdf_nbuf_t nbuf) +{ + struct qdf_nbuf_map_metadata *meta; + + hash_for_each_possible(qdf_nbuf_map_ht, meta, node, (size_t)nbuf) { + if (meta->nbuf == nbuf) + return meta; + } + + return NULL; +} + +static QDF_STATUS +qdf_nbuf_track_map(qdf_nbuf_t nbuf, const char *file, uint32_t line) +{ + struct qdf_nbuf_map_metadata *meta; + + QDF_BUG(nbuf); + if (!nbuf) { + qdf_err("Cannot map null nbuf"); + return QDF_STATUS_E_INVAL; + } + + qdf_spin_lock_irqsave(&qdf_nbuf_map_lock); + meta = qdf_nbuf_meta_get(nbuf); + qdf_spin_unlock_irqrestore(&qdf_nbuf_map_lock); + if (meta) + QDF_DEBUG_PANIC( + "Double nbuf map detected @ %s:%u; last map from %s:%u", + kbasename(file), line, meta->file, meta->line); + + meta = qdf_flex_mem_alloc(&qdf_nbuf_map_pool); + if (!meta) { + qdf_err("Failed to allocate nbuf map tracking metadata"); + return QDF_STATUS_E_NOMEM; + } + + meta->nbuf = nbuf; + qdf_str_lcopy(meta->file, kbasename(file), QDF_MEM_FILE_NAME_SIZE); + meta->line = line; + + qdf_spin_lock_irqsave(&qdf_nbuf_map_lock); + hash_add(qdf_nbuf_map_ht, &meta->node, (size_t)nbuf); + qdf_spin_unlock_irqrestore(&qdf_nbuf_map_lock); + + qdf_nbuf_history_add(nbuf, file, line, QDF_NBUF_MAP); + + return QDF_STATUS_SUCCESS; +} + +static void +qdf_nbuf_untrack_map(qdf_nbuf_t nbuf, const char *file, uint32_t line) +{ + struct qdf_nbuf_map_metadata *meta; + + QDF_BUG(nbuf); + if (!nbuf) { + qdf_err("Cannot unmap null nbuf"); + return; + } + + qdf_spin_lock_irqsave(&qdf_nbuf_map_lock); + meta = qdf_nbuf_meta_get(nbuf); + + if (!meta) + QDF_DEBUG_PANIC( + "Double nbuf unmap or unmap without map detected @ %s:%u", + kbasename(file), line); + + hash_del(&meta->node); + qdf_spin_unlock_irqrestore(&qdf_nbuf_map_lock); + + qdf_flex_mem_free(&qdf_nbuf_map_pool, meta); + + qdf_nbuf_history_add(nbuf, file, line, QDF_NBUF_UNMAP); +} + +QDF_STATUS qdf_nbuf_map_debug(qdf_device_t osdev, + qdf_nbuf_t buf, + qdf_dma_dir_t dir, + const char *file, + uint32_t line) +{ + QDF_STATUS status; + + status = qdf_nbuf_track_map(buf, file, line); + if (QDF_IS_STATUS_ERROR(status)) + return status; + + status = __qdf_nbuf_map(osdev, buf, dir); + if (QDF_IS_STATUS_ERROR(status)) + qdf_nbuf_untrack_map(buf, file, line); + + return status; +} + +qdf_export_symbol(qdf_nbuf_map_debug); + +void qdf_nbuf_unmap_debug(qdf_device_t osdev, + qdf_nbuf_t buf, + qdf_dma_dir_t dir, + const char *file, + uint32_t line) +{ + qdf_nbuf_untrack_map(buf, file, line); + __qdf_nbuf_unmap_single(osdev, buf, dir); +} + +qdf_export_symbol(qdf_nbuf_unmap_debug); + +QDF_STATUS qdf_nbuf_map_single_debug(qdf_device_t osdev, + qdf_nbuf_t buf, + qdf_dma_dir_t dir, + const char *file, + uint32_t line) +{ + QDF_STATUS status; + + status = qdf_nbuf_track_map(buf, file, line); + if (QDF_IS_STATUS_ERROR(status)) + return status; + + status = __qdf_nbuf_map_single(osdev, buf, dir); + if (QDF_IS_STATUS_ERROR(status)) + qdf_nbuf_untrack_map(buf, file, line); + + return status; +} + +qdf_export_symbol(qdf_nbuf_map_single_debug); + +void qdf_nbuf_unmap_single_debug(qdf_device_t osdev, + qdf_nbuf_t buf, + qdf_dma_dir_t dir, + const char *file, + uint32_t line) +{ + qdf_nbuf_untrack_map(buf, file, line); + __qdf_nbuf_unmap_single(osdev, buf, dir); +} + +qdf_export_symbol(qdf_nbuf_unmap_single_debug); + +QDF_STATUS qdf_nbuf_map_nbytes_debug(qdf_device_t osdev, + qdf_nbuf_t buf, + qdf_dma_dir_t dir, + int nbytes, + const char *file, + uint32_t line) +{ + QDF_STATUS status; + + status = qdf_nbuf_track_map(buf, file, line); + if (QDF_IS_STATUS_ERROR(status)) + return status; + + status = __qdf_nbuf_map_nbytes(osdev, buf, dir, nbytes); + if (QDF_IS_STATUS_ERROR(status)) + qdf_nbuf_untrack_map(buf, file, line); + + return status; +} + +qdf_export_symbol(qdf_nbuf_map_nbytes_debug); + +void qdf_nbuf_unmap_nbytes_debug(qdf_device_t osdev, + qdf_nbuf_t buf, + qdf_dma_dir_t dir, + int nbytes, + const char *file, + uint32_t line) +{ + qdf_nbuf_untrack_map(buf, file, line); + __qdf_nbuf_unmap_nbytes(osdev, buf, dir, nbytes); +} + +qdf_export_symbol(qdf_nbuf_unmap_nbytes_debug); + +QDF_STATUS qdf_nbuf_map_nbytes_single_debug(qdf_device_t osdev, + qdf_nbuf_t buf, + qdf_dma_dir_t dir, + int nbytes, + const char *file, + uint32_t line) +{ + QDF_STATUS status; + + status = qdf_nbuf_track_map(buf, file, line); + if (QDF_IS_STATUS_ERROR(status)) + return status; + + status = __qdf_nbuf_map_nbytes_single(osdev, buf, dir, nbytes); + if (QDF_IS_STATUS_ERROR(status)) + qdf_nbuf_untrack_map(buf, file, line); + + return status; +} + +qdf_export_symbol(qdf_nbuf_map_nbytes_single_debug); + +void qdf_nbuf_unmap_nbytes_single_debug(qdf_device_t osdev, + qdf_nbuf_t buf, + qdf_dma_dir_t dir, + int nbytes, + const char *file, + uint32_t line) +{ + qdf_nbuf_untrack_map(buf, file, line); + __qdf_nbuf_unmap_nbytes_single(osdev, buf, dir, nbytes); +} + +qdf_export_symbol(qdf_nbuf_unmap_nbytes_single_debug); + +static void qdf_nbuf_panic_on_free_if_mapped(qdf_nbuf_t nbuf, uint8_t *file, + uint32_t line) +{ + struct qdf_nbuf_map_metadata *meta; + + qdf_spin_lock_irqsave(&qdf_nbuf_map_lock); + meta = qdf_nbuf_meta_get(nbuf); + if (meta) + QDF_DEBUG_PANIC( + "Nbuf freed @ %s:%u while mapped from %s:%u", + kbasename(file), line, meta->file, meta->line); + qdf_spin_unlock_irqrestore(&qdf_nbuf_map_lock); +} +#else +static inline void qdf_nbuf_map_tracking_init(void) +{ +} + +static inline void qdf_nbuf_map_tracking_deinit(void) +{ +} + +static inline void qdf_nbuf_panic_on_free_if_mapped(qdf_nbuf_t nbuf, + uint8_t *file, + uint32_t line) +{ +} +#endif /* NBUF_MAP_UNMAP_DEBUG */ + +/** + * __qdf_nbuf_map() - map a buffer to local bus address space + * @osdev: OS device + * @bmap: Bitmap + * @skb: Pointer to network buffer + * @dir: Direction + * + * Return: QDF_STATUS + */ +#ifdef QDF_OS_DEBUG +QDF_STATUS +__qdf_nbuf_map(qdf_device_t osdev, struct sk_buff *skb, qdf_dma_dir_t dir) +{ + struct skb_shared_info *sh = skb_shinfo(skb); + + qdf_assert((dir == QDF_DMA_TO_DEVICE) + || (dir == QDF_DMA_FROM_DEVICE)); + + /* + * Assume there's only a single fragment. + * To support multiple fragments, it would be necessary to change + * qdf_nbuf_t to be a separate object that stores meta-info + * (including the bus address for each fragment) and a pointer + * to the underlying sk_buff. + */ + qdf_assert(sh->nr_frags == 0); + + return __qdf_nbuf_map_single(osdev, skb, dir); +} +qdf_export_symbol(__qdf_nbuf_map); + +#else +QDF_STATUS +__qdf_nbuf_map(qdf_device_t osdev, struct sk_buff *skb, qdf_dma_dir_t dir) +{ + return __qdf_nbuf_map_single(osdev, skb, dir); +} +qdf_export_symbol(__qdf_nbuf_map); +#endif +/** + * __qdf_nbuf_unmap() - to unmap a previously mapped buf + * @osdev: OS device + * @skb: Pointer to network buffer + * @dir: dma direction + * + * Return: none + */ +void +__qdf_nbuf_unmap(qdf_device_t osdev, struct sk_buff *skb, + qdf_dma_dir_t dir) +{ + qdf_assert((dir == QDF_DMA_TO_DEVICE) + || (dir == QDF_DMA_FROM_DEVICE)); + + /* + * Assume there's a single fragment. + * If this is not true, the assertion in __qdf_nbuf_map will catch it. + */ + __qdf_nbuf_unmap_single(osdev, skb, dir); +} +qdf_export_symbol(__qdf_nbuf_unmap); + +/** + * __qdf_nbuf_map_single() - map a single buffer to local bus address space + * @osdev: OS device + * @skb: Pointer to network buffer + * @dir: Direction + * + * Return: QDF_STATUS + */ +#if defined(A_SIMOS_DEVHOST) || defined(HIF_USB) || defined(HIF_SDIO) +QDF_STATUS +__qdf_nbuf_map_single(qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir) +{ + qdf_dma_addr_t paddr; + + QDF_NBUF_CB_PADDR(buf) = paddr = (uintptr_t)buf->data; + BUILD_BUG_ON(sizeof(paddr) < sizeof(buf->data)); + BUILD_BUG_ON(sizeof(QDF_NBUF_CB_PADDR(buf)) < sizeof(buf->data)); + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(__qdf_nbuf_map_single); +#else +QDF_STATUS +__qdf_nbuf_map_single(qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir) +{ + qdf_dma_addr_t paddr; + + /* assume that the OS only provides a single fragment */ + QDF_NBUF_CB_PADDR(buf) = paddr = + dma_map_single(osdev->dev, buf->data, + skb_end_pointer(buf) - buf->data, + __qdf_dma_dir_to_os(dir)); + return dma_mapping_error(osdev->dev, paddr) + ? QDF_STATUS_E_FAILURE + : QDF_STATUS_SUCCESS; +} +qdf_export_symbol(__qdf_nbuf_map_single); +#endif +/** + * __qdf_nbuf_unmap_single() - unmap a previously mapped buf + * @osdev: OS device + * @skb: Pointer to network buffer + * @dir: Direction + * + * Return: none + */ +#if defined(A_SIMOS_DEVHOST) || defined(HIF_USB) || defined(HIF_SDIO) +void __qdf_nbuf_unmap_single(qdf_device_t osdev, qdf_nbuf_t buf, + qdf_dma_dir_t dir) +{ +} +#else +void __qdf_nbuf_unmap_single(qdf_device_t osdev, qdf_nbuf_t buf, + qdf_dma_dir_t dir) +{ + if (QDF_NBUF_CB_PADDR(buf)) + dma_unmap_single(osdev->dev, QDF_NBUF_CB_PADDR(buf), + skb_end_pointer(buf) - buf->data, + __qdf_dma_dir_to_os(dir)); +} +#endif +qdf_export_symbol(__qdf_nbuf_unmap_single); + +/** + * __qdf_nbuf_set_rx_cksum() - set rx checksum + * @skb: Pointer to network buffer + * @cksum: Pointer to checksum value + * + * Return: QDF_STATUS + */ +QDF_STATUS +__qdf_nbuf_set_rx_cksum(struct sk_buff *skb, qdf_nbuf_rx_cksum_t *cksum) +{ + switch (cksum->l4_result) { + case QDF_NBUF_RX_CKSUM_NONE: + skb->ip_summed = CHECKSUM_NONE; + break; + case QDF_NBUF_RX_CKSUM_TCP_UDP_UNNECESSARY: + skb->ip_summed = CHECKSUM_UNNECESSARY; + break; + case QDF_NBUF_RX_CKSUM_TCP_UDP_HW: + skb->ip_summed = CHECKSUM_PARTIAL; + skb->csum = cksum->val; + break; + default: + pr_err("Unknown checksum type\n"); + qdf_assert(0); + return QDF_STATUS_E_NOSUPPORT; + } + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(__qdf_nbuf_set_rx_cksum); + +/** + * __qdf_nbuf_get_tx_cksum() - get tx checksum + * @skb: Pointer to network buffer + * + * Return: TX checksum value + */ +qdf_nbuf_tx_cksum_t __qdf_nbuf_get_tx_cksum(struct sk_buff *skb) +{ + switch (skb->ip_summed) { + case CHECKSUM_NONE: + return QDF_NBUF_TX_CKSUM_NONE; + case CHECKSUM_PARTIAL: + return QDF_NBUF_TX_CKSUM_TCP_UDP; + case CHECKSUM_COMPLETE: + return QDF_NBUF_TX_CKSUM_TCP_UDP_IP; + default: + return QDF_NBUF_TX_CKSUM_NONE; + } +} +qdf_export_symbol(__qdf_nbuf_get_tx_cksum); + +/** + * __qdf_nbuf_get_tid() - get tid + * @skb: Pointer to network buffer + * + * Return: tid + */ +uint8_t __qdf_nbuf_get_tid(struct sk_buff *skb) +{ + return skb->priority; +} +qdf_export_symbol(__qdf_nbuf_get_tid); + +/** + * __qdf_nbuf_set_tid() - set tid + * @skb: Pointer to network buffer + * + * Return: none + */ +void __qdf_nbuf_set_tid(struct sk_buff *skb, uint8_t tid) +{ + skb->priority = tid; +} +qdf_export_symbol(__qdf_nbuf_set_tid); + +/** + * __qdf_nbuf_set_tid() - set tid + * @skb: Pointer to network buffer + * + * Return: none + */ +uint8_t __qdf_nbuf_get_exemption_type(struct sk_buff *skb) +{ + return QDF_NBUF_EXEMPT_NO_EXEMPTION; +} +qdf_export_symbol(__qdf_nbuf_get_exemption_type); + +/** + * __qdf_nbuf_reg_trace_cb() - register trace callback + * @cb_func_ptr: Pointer to trace callback function + * + * Return: none + */ +void __qdf_nbuf_reg_trace_cb(qdf_nbuf_trace_update_t cb_func_ptr) +{ + qdf_trace_update_cb = cb_func_ptr; +} +qdf_export_symbol(__qdf_nbuf_reg_trace_cb); + +/** + * __qdf_nbuf_data_get_dhcp_subtype() - get the subtype + * of DHCP packet. + * @data: Pointer to DHCP packet data buffer + * + * This func. returns the subtype of DHCP packet. + * + * Return: subtype of the DHCP packet. + */ +enum qdf_proto_subtype +__qdf_nbuf_data_get_dhcp_subtype(uint8_t *data) +{ + enum qdf_proto_subtype subtype = QDF_PROTO_INVALID; + + if ((data[QDF_DHCP_OPTION53_OFFSET] == QDF_DHCP_OPTION53) && + (data[QDF_DHCP_OPTION53_LENGTH_OFFSET] == + QDF_DHCP_OPTION53_LENGTH)) { + + switch (data[QDF_DHCP_OPTION53_STATUS_OFFSET]) { + case QDF_DHCP_DISCOVER: + subtype = QDF_PROTO_DHCP_DISCOVER; + break; + case QDF_DHCP_REQUEST: + subtype = QDF_PROTO_DHCP_REQUEST; + break; + case QDF_DHCP_OFFER: + subtype = QDF_PROTO_DHCP_OFFER; + break; + case QDF_DHCP_ACK: + subtype = QDF_PROTO_DHCP_ACK; + break; + case QDF_DHCP_NAK: + subtype = QDF_PROTO_DHCP_NACK; + break; + case QDF_DHCP_RELEASE: + subtype = QDF_PROTO_DHCP_RELEASE; + break; + case QDF_DHCP_INFORM: + subtype = QDF_PROTO_DHCP_INFORM; + break; + case QDF_DHCP_DECLINE: + subtype = QDF_PROTO_DHCP_DECLINE; + break; + default: + break; + } + } + + return subtype; +} + +/** + * __qdf_nbuf_data_get_eapol_subtype() - get the subtype + * of EAPOL packet. + * @data: Pointer to EAPOL packet data buffer + * + * This func. returns the subtype of EAPOL packet. + * + * Return: subtype of the EAPOL packet. + */ +enum qdf_proto_subtype +__qdf_nbuf_data_get_eapol_subtype(uint8_t *data) +{ + uint16_t eapol_key_info; + enum qdf_proto_subtype subtype = QDF_PROTO_INVALID; + uint16_t mask; + + eapol_key_info = (uint16_t)(*(uint16_t *) + (data + EAPOL_KEY_INFO_OFFSET)); + + mask = eapol_key_info & EAPOL_MASK; + switch (mask) { + case EAPOL_M1_BIT_MASK: + subtype = QDF_PROTO_EAPOL_M1; + break; + case EAPOL_M2_BIT_MASK: + subtype = QDF_PROTO_EAPOL_M2; + break; + case EAPOL_M3_BIT_MASK: + subtype = QDF_PROTO_EAPOL_M3; + break; + case EAPOL_M4_BIT_MASK: + subtype = QDF_PROTO_EAPOL_M4; + break; + default: + break; + } + + return subtype; +} + +/** + * __qdf_nbuf_data_get_arp_subtype() - get the subtype + * of ARP packet. + * @data: Pointer to ARP packet data buffer + * + * This func. returns the subtype of ARP packet. + * + * Return: subtype of the ARP packet. + */ +enum qdf_proto_subtype +__qdf_nbuf_data_get_arp_subtype(uint8_t *data) +{ + uint16_t subtype; + enum qdf_proto_subtype proto_subtype = QDF_PROTO_INVALID; + + subtype = (uint16_t)(*(uint16_t *) + (data + ARP_SUB_TYPE_OFFSET)); + + switch (QDF_SWAP_U16(subtype)) { + case ARP_REQUEST: + proto_subtype = QDF_PROTO_ARP_REQ; + break; + case ARP_RESPONSE: + proto_subtype = QDF_PROTO_ARP_RES; + break; + default: + break; + } + + return proto_subtype; +} + +/** + * __qdf_nbuf_data_get_icmp_subtype() - get the subtype + * of IPV4 ICMP packet. + * @data: Pointer to IPV4 ICMP packet data buffer + * + * This func. returns the subtype of ICMP packet. + * + * Return: subtype of the ICMP packet. + */ +enum qdf_proto_subtype +__qdf_nbuf_data_get_icmp_subtype(uint8_t *data) +{ + uint8_t subtype; + enum qdf_proto_subtype proto_subtype = QDF_PROTO_INVALID; + + subtype = (uint8_t)(*(uint8_t *) + (data + ICMP_SUBTYPE_OFFSET)); + + switch (subtype) { + case ICMP_REQUEST: + proto_subtype = QDF_PROTO_ICMP_REQ; + break; + case ICMP_RESPONSE: + proto_subtype = QDF_PROTO_ICMP_RES; + break; + default: + break; + } + + return proto_subtype; +} + +/** + * __qdf_nbuf_data_get_icmpv6_subtype() - get the subtype + * of IPV6 ICMPV6 packet. + * @data: Pointer to IPV6 ICMPV6 packet data buffer + * + * This func. returns the subtype of ICMPV6 packet. + * + * Return: subtype of the ICMPV6 packet. + */ +enum qdf_proto_subtype +__qdf_nbuf_data_get_icmpv6_subtype(uint8_t *data) +{ + uint8_t subtype; + enum qdf_proto_subtype proto_subtype = QDF_PROTO_INVALID; + + subtype = (uint8_t)(*(uint8_t *) + (data + ICMPV6_SUBTYPE_OFFSET)); + + switch (subtype) { + case ICMPV6_REQUEST: + proto_subtype = QDF_PROTO_ICMPV6_REQ; + break; + case ICMPV6_RESPONSE: + proto_subtype = QDF_PROTO_ICMPV6_RES; + break; + case ICMPV6_RS: + proto_subtype = QDF_PROTO_ICMPV6_RS; + break; + case ICMPV6_RA: + proto_subtype = QDF_PROTO_ICMPV6_RA; + break; + case ICMPV6_NS: + proto_subtype = QDF_PROTO_ICMPV6_NS; + break; + case ICMPV6_NA: + proto_subtype = QDF_PROTO_ICMPV6_NA; + break; + default: + break; + } + + return proto_subtype; +} + +/** + * __qdf_nbuf_data_get_ipv4_proto() - get the proto type + * of IPV4 packet. + * @data: Pointer to IPV4 packet data buffer + * + * This func. returns the proto type of IPV4 packet. + * + * Return: proto type of IPV4 packet. + */ +uint8_t +__qdf_nbuf_data_get_ipv4_proto(uint8_t *data) +{ + uint8_t proto_type; + + proto_type = (uint8_t)(*(uint8_t *)(data + + QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET)); + return proto_type; +} + +/** + * __qdf_nbuf_data_get_ipv6_proto() - get the proto type + * of IPV6 packet. + * @data: Pointer to IPV6 packet data buffer + * + * This func. returns the proto type of IPV6 packet. + * + * Return: proto type of IPV6 packet. + */ +uint8_t +__qdf_nbuf_data_get_ipv6_proto(uint8_t *data) +{ + uint8_t proto_type; + + proto_type = (uint8_t)(*(uint8_t *)(data + + QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET)); + return proto_type; +} + +/** + * __qdf_nbuf_data_is_ipv4_pkt() - check if packet is a ipv4 packet + * @data: Pointer to network data + * + * This api is for Tx packets. + * + * Return: true if packet is ipv4 packet + * false otherwise + */ +bool __qdf_nbuf_data_is_ipv4_pkt(uint8_t *data) +{ + uint16_t ether_type; + + ether_type = (uint16_t)(*(uint16_t *)(data + + QDF_NBUF_TRAC_ETH_TYPE_OFFSET)); + + if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_IPV4_ETH_TYPE)) + return true; + else + return false; +} +qdf_export_symbol(__qdf_nbuf_data_is_ipv4_pkt); + +/** + * __qdf_nbuf_data_is_ipv4_dhcp_pkt() - check if skb data is a dhcp packet + * @data: Pointer to network data buffer + * + * This api is for ipv4 packet. + * + * Return: true if packet is DHCP packet + * false otherwise + */ +bool __qdf_nbuf_data_is_ipv4_dhcp_pkt(uint8_t *data) +{ + uint16_t sport; + uint16_t dport; + + sport = (uint16_t)(*(uint16_t *)(data + QDF_NBUF_TRAC_IPV4_OFFSET + + QDF_NBUF_TRAC_IPV4_HEADER_SIZE)); + dport = (uint16_t)(*(uint16_t *)(data + QDF_NBUF_TRAC_IPV4_OFFSET + + QDF_NBUF_TRAC_IPV4_HEADER_SIZE + + sizeof(uint16_t))); + + if (((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_SRV_PORT)) && + (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_CLI_PORT))) || + ((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_CLI_PORT)) && + (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_SRV_PORT)))) + return true; + else + return false; +} +qdf_export_symbol(__qdf_nbuf_data_is_ipv4_dhcp_pkt); + +/** + * __qdf_nbuf_data_is_ipv4_eapol_pkt() - check if skb data is a eapol packet + * @data: Pointer to network data buffer + * + * This api is for ipv4 packet. + * + * Return: true if packet is EAPOL packet + * false otherwise. + */ +bool __qdf_nbuf_data_is_ipv4_eapol_pkt(uint8_t *data) +{ + uint16_t ether_type; + + ether_type = (uint16_t)(*(uint16_t *)(data + + QDF_NBUF_TRAC_ETH_TYPE_OFFSET)); + + if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_EAPOL_ETH_TYPE)) + return true; + else + return false; +} +qdf_export_symbol(__qdf_nbuf_data_is_ipv4_eapol_pkt); + +/** + * __qdf_nbuf_is_ipv4_wapi_pkt() - check if skb data is a wapi packet + * @skb: Pointer to network buffer + * + * This api is for ipv4 packet. + * + * Return: true if packet is WAPI packet + * false otherwise. + */ +bool __qdf_nbuf_is_ipv4_wapi_pkt(struct sk_buff *skb) +{ + uint16_t ether_type; + + ether_type = (uint16_t)(*(uint16_t *)(skb->data + + QDF_NBUF_TRAC_ETH_TYPE_OFFSET)); + + if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_WAPI_ETH_TYPE)) + return true; + else + return false; +} +qdf_export_symbol(__qdf_nbuf_is_ipv4_wapi_pkt); + +/** + * __qdf_nbuf_is_ipv4_tdls_pkt() - check if skb data is a tdls packet + * @skb: Pointer to network buffer + * + * This api is for ipv4 packet. + * + * Return: true if packet is tdls packet + * false otherwise. + */ +bool __qdf_nbuf_is_ipv4_tdls_pkt(struct sk_buff *skb) +{ + uint16_t ether_type; + + ether_type = *(uint16_t *)(skb->data + + QDF_NBUF_TRAC_ETH_TYPE_OFFSET); + + if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_TDLS_ETH_TYPE)) + return true; + else + return false; +} +qdf_export_symbol(__qdf_nbuf_is_ipv4_tdls_pkt); + +/** + * __qdf_nbuf_data_is_ipv4_arp_pkt() - check if skb data is a arp packet + * @data: Pointer to network data buffer + * + * This api is for ipv4 packet. + * + * Return: true if packet is ARP packet + * false otherwise. + */ +bool __qdf_nbuf_data_is_ipv4_arp_pkt(uint8_t *data) +{ + uint16_t ether_type; + + ether_type = (uint16_t)(*(uint16_t *)(data + + QDF_NBUF_TRAC_ETH_TYPE_OFFSET)); + + if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_ARP_ETH_TYPE)) + return true; + else + return false; +} +qdf_export_symbol(__qdf_nbuf_data_is_ipv4_arp_pkt); + +/** + * __qdf_nbuf_data_is_arp_req() - check if skb data is a arp request + * @data: Pointer to network data buffer + * + * This api is for ipv4 packet. + * + * Return: true if packet is ARP request + * false otherwise. + */ +bool __qdf_nbuf_data_is_arp_req(uint8_t *data) +{ + uint16_t op_code; + + op_code = (uint16_t)(*(uint16_t *)(data + + QDF_NBUF_PKT_ARP_OPCODE_OFFSET)); + + if (op_code == QDF_SWAP_U16(QDF_NBUF_PKT_ARPOP_REQ)) + return true; + return false; +} + +/** + * __qdf_nbuf_data_is_arp_rsp() - check if skb data is a arp response + * @data: Pointer to network data buffer + * + * This api is for ipv4 packet. + * + * Return: true if packet is ARP response + * false otherwise. + */ +bool __qdf_nbuf_data_is_arp_rsp(uint8_t *data) +{ + uint16_t op_code; + + op_code = (uint16_t)(*(uint16_t *)(data + + QDF_NBUF_PKT_ARP_OPCODE_OFFSET)); + + if (op_code == QDF_SWAP_U16(QDF_NBUF_PKT_ARPOP_REPLY)) + return true; + return false; +} + +/** + * __qdf_nbuf_data_get_arp_src_ip() - get arp src IP + * @data: Pointer to network data buffer + * + * This api is for ipv4 packet. + * + * Return: ARP packet source IP value. + */ +uint32_t __qdf_nbuf_get_arp_src_ip(uint8_t *data) +{ + uint32_t src_ip; + + src_ip = (uint32_t)(*(uint32_t *)(data + + QDF_NBUF_PKT_ARP_SRC_IP_OFFSET)); + + return src_ip; +} + +/** + * __qdf_nbuf_data_get_arp_tgt_ip() - get arp target IP + * @data: Pointer to network data buffer + * + * This api is for ipv4 packet. + * + * Return: ARP packet target IP value. + */ +uint32_t __qdf_nbuf_get_arp_tgt_ip(uint8_t *data) +{ + uint32_t tgt_ip; + + tgt_ip = (uint32_t)(*(uint32_t *)(data + + QDF_NBUF_PKT_ARP_TGT_IP_OFFSET)); + + return tgt_ip; +} + +/** + * __qdf_nbuf_get_dns_domain_name() - get dns domain name + * @data: Pointer to network data buffer + * @len: length to copy + * + * This api is for dns domain name + * + * Return: dns domain name. + */ +uint8_t *__qdf_nbuf_get_dns_domain_name(uint8_t *data, uint32_t len) +{ + uint8_t *domain_name; + + domain_name = (uint8_t *) + (data + QDF_NBUF_PKT_DNS_NAME_OVER_UDP_OFFSET); + return domain_name; +} + + +/** + * __qdf_nbuf_data_is_dns_query() - check if skb data is a dns query + * @data: Pointer to network data buffer + * + * This api is for dns query packet. + * + * Return: true if packet is dns query packet. + * false otherwise. + */ +bool __qdf_nbuf_data_is_dns_query(uint8_t *data) +{ + uint16_t op_code; + uint16_t tgt_port; + + tgt_port = (uint16_t)(*(uint16_t *)(data + + QDF_NBUF_PKT_DNS_DST_PORT_OFFSET)); + /* Standard DNS query always happen on Dest Port 53. */ + if (tgt_port == QDF_SWAP_U16(QDF_NBUF_PKT_DNS_STANDARD_PORT)) { + op_code = (uint16_t)(*(uint16_t *)(data + + QDF_NBUF_PKT_DNS_OVER_UDP_OPCODE_OFFSET)); + if ((QDF_SWAP_U16(op_code) & QDF_NBUF_PKT_DNSOP_BITMAP) == + QDF_NBUF_PKT_DNSOP_STANDARD_QUERY) + return true; + } + return false; +} + +/** + * __qdf_nbuf_data_is_dns_response() - check if skb data is a dns response + * @data: Pointer to network data buffer + * + * This api is for dns query response. + * + * Return: true if packet is dns response packet. + * false otherwise. + */ +bool __qdf_nbuf_data_is_dns_response(uint8_t *data) +{ + uint16_t op_code; + uint16_t src_port; + + src_port = (uint16_t)(*(uint16_t *)(data + + QDF_NBUF_PKT_DNS_SRC_PORT_OFFSET)); + /* Standard DNS response always comes on Src Port 53. */ + if (src_port == QDF_SWAP_U16(QDF_NBUF_PKT_DNS_STANDARD_PORT)) { + op_code = (uint16_t)(*(uint16_t *)(data + + QDF_NBUF_PKT_DNS_OVER_UDP_OPCODE_OFFSET)); + + if ((QDF_SWAP_U16(op_code) & QDF_NBUF_PKT_DNSOP_BITMAP) == + QDF_NBUF_PKT_DNSOP_STANDARD_RESPONSE) + return true; + } + return false; +} + +/** + * __qdf_nbuf_data_is_tcp_syn() - check if skb data is a tcp syn + * @data: Pointer to network data buffer + * + * This api is for tcp syn packet. + * + * Return: true if packet is tcp syn packet. + * false otherwise. + */ +bool __qdf_nbuf_data_is_tcp_syn(uint8_t *data) +{ + uint8_t op_code; + + op_code = (uint8_t)(*(uint8_t *)(data + + QDF_NBUF_PKT_TCP_OPCODE_OFFSET)); + + if (op_code == QDF_NBUF_PKT_TCPOP_SYN) + return true; + return false; +} + +/** + * __qdf_nbuf_data_is_tcp_syn_ack() - check if skb data is a tcp syn ack + * @data: Pointer to network data buffer + * + * This api is for tcp syn ack packet. + * + * Return: true if packet is tcp syn ack packet. + * false otherwise. + */ +bool __qdf_nbuf_data_is_tcp_syn_ack(uint8_t *data) +{ + uint8_t op_code; + + op_code = (uint8_t)(*(uint8_t *)(data + + QDF_NBUF_PKT_TCP_OPCODE_OFFSET)); + + if (op_code == QDF_NBUF_PKT_TCPOP_SYN_ACK) + return true; + return false; +} + +/** + * __qdf_nbuf_data_is_tcp_ack() - check if skb data is a tcp ack + * @data: Pointer to network data buffer + * + * This api is for tcp ack packet. + * + * Return: true if packet is tcp ack packet. + * false otherwise. + */ +bool __qdf_nbuf_data_is_tcp_ack(uint8_t *data) +{ + uint8_t op_code; + + op_code = (uint8_t)(*(uint8_t *)(data + + QDF_NBUF_PKT_TCP_OPCODE_OFFSET)); + + if (op_code == QDF_NBUF_PKT_TCPOP_ACK) + return true; + return false; +} + +/** + * __qdf_nbuf_data_get_tcp_src_port() - get tcp src port + * @data: Pointer to network data buffer + * + * This api is for tcp packet. + * + * Return: tcp source port value. + */ +uint16_t __qdf_nbuf_data_get_tcp_src_port(uint8_t *data) +{ + uint16_t src_port; + + src_port = (uint16_t)(*(uint16_t *)(data + + QDF_NBUF_PKT_TCP_SRC_PORT_OFFSET)); + + return src_port; +} + +/** + * __qdf_nbuf_data_get_tcp_dst_port() - get tcp dst port + * @data: Pointer to network data buffer + * + * This api is for tcp packet. + * + * Return: tcp destination port value. + */ +uint16_t __qdf_nbuf_data_get_tcp_dst_port(uint8_t *data) +{ + uint16_t tgt_port; + + tgt_port = (uint16_t)(*(uint16_t *)(data + + QDF_NBUF_PKT_TCP_DST_PORT_OFFSET)); + + return tgt_port; +} + +/** + * __qdf_nbuf_data_is_icmpv4_req() - check if skb data is a icmpv4 request + * @data: Pointer to network data buffer + * + * This api is for ipv4 req packet. + * + * Return: true if packet is icmpv4 request + * false otherwise. + */ +bool __qdf_nbuf_data_is_icmpv4_req(uint8_t *data) +{ + uint8_t op_code; + + op_code = (uint8_t)(*(uint8_t *)(data + + QDF_NBUF_PKT_ICMPv4_OPCODE_OFFSET)); + + if (op_code == QDF_NBUF_PKT_ICMPv4OP_REQ) + return true; + return false; +} + +/** + * __qdf_nbuf_data_is_icmpv4_rsp() - check if skb data is a icmpv4 res + * @data: Pointer to network data buffer + * + * This api is for ipv4 res packet. + * + * Return: true if packet is icmpv4 response + * false otherwise. + */ +bool __qdf_nbuf_data_is_icmpv4_rsp(uint8_t *data) +{ + uint8_t op_code; + + op_code = (uint8_t)(*(uint8_t *)(data + + QDF_NBUF_PKT_ICMPv4_OPCODE_OFFSET)); + + if (op_code == QDF_NBUF_PKT_ICMPv4OP_REPLY) + return true; + return false; +} + +/** + * __qdf_nbuf_data_get_icmpv4_src_ip() - get icmpv4 src IP + * @data: Pointer to network data buffer + * + * This api is for ipv4 packet. + * + * Return: icmpv4 packet source IP value. + */ +uint32_t __qdf_nbuf_get_icmpv4_src_ip(uint8_t *data) +{ + uint32_t src_ip; + + src_ip = (uint32_t)(*(uint32_t *)(data + + QDF_NBUF_PKT_ICMPv4_SRC_IP_OFFSET)); + + return src_ip; +} + +/** + * __qdf_nbuf_data_get_icmpv4_tgt_ip() - get icmpv4 target IP + * @data: Pointer to network data buffer + * + * This api is for ipv4 packet. + * + * Return: icmpv4 packet target IP value. + */ +uint32_t __qdf_nbuf_get_icmpv4_tgt_ip(uint8_t *data) +{ + uint32_t tgt_ip; + + tgt_ip = (uint32_t)(*(uint32_t *)(data + + QDF_NBUF_PKT_ICMPv4_TGT_IP_OFFSET)); + + return tgt_ip; +} + + +/** + * __qdf_nbuf_data_is_ipv6_pkt() - check if it is IPV6 packet. + * @data: Pointer to IPV6 packet data buffer + * + * This func. checks whether it is a IPV6 packet or not. + * + * Return: TRUE if it is a IPV6 packet + * FALSE if not + */ +bool __qdf_nbuf_data_is_ipv6_pkt(uint8_t *data) +{ + uint16_t ether_type; + + ether_type = (uint16_t)(*(uint16_t *)(data + + QDF_NBUF_TRAC_ETH_TYPE_OFFSET)); + + if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_IPV6_ETH_TYPE)) + return true; + else + return false; +} +qdf_export_symbol(__qdf_nbuf_data_is_ipv6_pkt); + +/** + * __qdf_nbuf_data_is_ipv6_dhcp_pkt() - check if skb data is a dhcp packet + * @data: Pointer to network data buffer + * + * This api is for ipv6 packet. + * + * Return: true if packet is DHCP packet + * false otherwise + */ +bool __qdf_nbuf_data_is_ipv6_dhcp_pkt(uint8_t *data) +{ + uint16_t sport; + uint16_t dport; + + sport = *(uint16_t *)(data + QDF_NBUF_TRAC_IPV6_OFFSET + + QDF_NBUF_TRAC_IPV6_HEADER_SIZE); + dport = *(uint16_t *)(data + QDF_NBUF_TRAC_IPV6_OFFSET + + QDF_NBUF_TRAC_IPV6_HEADER_SIZE + + sizeof(uint16_t)); + + if (((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_SRV_PORT)) && + (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_CLI_PORT))) || + ((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_CLI_PORT)) && + (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_SRV_PORT)))) + return true; + else + return false; +} +qdf_export_symbol(__qdf_nbuf_data_is_ipv6_dhcp_pkt); + +/** + * __qdf_nbuf_data_is_ipv4_mcast_pkt() - check if it is IPV4 multicast packet. + * @data: Pointer to IPV4 packet data buffer + * + * This func. checks whether it is a IPV4 multicast packet or not. + * + * Return: TRUE if it is a IPV4 multicast packet + * FALSE if not + */ +bool __qdf_nbuf_data_is_ipv4_mcast_pkt(uint8_t *data) +{ + if (__qdf_nbuf_data_is_ipv4_pkt(data)) { + uint32_t *dst_addr = + (uint32_t *)(data + QDF_NBUF_TRAC_IPV4_DEST_ADDR_OFFSET); + + /* + * Check first word of the IPV4 address and if it is + * equal to 0xE then it represents multicast IP. + */ + if ((*dst_addr & QDF_NBUF_TRAC_IPV4_ADDR_BCAST_MASK) == + QDF_NBUF_TRAC_IPV4_ADDR_MCAST_MASK) + return true; + else + return false; + } else + return false; +} + +/** + * __qdf_nbuf_data_is_ipv6_mcast_pkt() - check if it is IPV6 multicast packet. + * @data: Pointer to IPV6 packet data buffer + * + * This func. checks whether it is a IPV6 multicast packet or not. + * + * Return: TRUE if it is a IPV6 multicast packet + * FALSE if not + */ +bool __qdf_nbuf_data_is_ipv6_mcast_pkt(uint8_t *data) +{ + if (__qdf_nbuf_data_is_ipv6_pkt(data)) { + uint16_t *dst_addr; + + dst_addr = (uint16_t *) + (data + QDF_NBUF_TRAC_IPV6_DEST_ADDR_OFFSET); + + /* + * Check first byte of the IP address and if it + * 0xFF00 then it is a IPV6 mcast packet. + */ + if (*dst_addr == + QDF_SWAP_U16(QDF_NBUF_TRAC_IPV6_DEST_ADDR)) + return true; + else + return false; + } else + return false; +} + +/** + * __qdf_nbuf_data_is_icmp_pkt() - check if it is IPV4 ICMP packet. + * @data: Pointer to IPV4 ICMP packet data buffer + * + * This func. checks whether it is a ICMP packet or not. + * + * Return: TRUE if it is a ICMP packet + * FALSE if not + */ +bool __qdf_nbuf_data_is_icmp_pkt(uint8_t *data) +{ + if (__qdf_nbuf_data_is_ipv4_pkt(data)) { + uint8_t pkt_type; + + pkt_type = (uint8_t)(*(uint8_t *)(data + + QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET)); + + if (pkt_type == QDF_NBUF_TRAC_ICMP_TYPE) + return true; + else + return false; + } else + return false; +} + +/** + * __qdf_nbuf_data_is_icmpv6_pkt() - check if it is IPV6 ICMPV6 packet. + * @data: Pointer to IPV6 ICMPV6 packet data buffer + * + * This func. checks whether it is a ICMPV6 packet or not. + * + * Return: TRUE if it is a ICMPV6 packet + * FALSE if not + */ +bool __qdf_nbuf_data_is_icmpv6_pkt(uint8_t *data) +{ + if (__qdf_nbuf_data_is_ipv6_pkt(data)) { + uint8_t pkt_type; + + pkt_type = (uint8_t)(*(uint8_t *)(data + + QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET)); + + if (pkt_type == QDF_NBUF_TRAC_ICMPV6_TYPE) + return true; + else + return false; + } else + return false; +} + +/** + * __qdf_nbuf_data_is_ipv4_udp_pkt() - check if it is IPV4 UDP packet. + * @data: Pointer to IPV4 UDP packet data buffer + * + * This func. checks whether it is a IPV4 UDP packet or not. + * + * Return: TRUE if it is a IPV4 UDP packet + * FALSE if not + */ +bool __qdf_nbuf_data_is_ipv4_udp_pkt(uint8_t *data) +{ + if (__qdf_nbuf_data_is_ipv4_pkt(data)) { + uint8_t pkt_type; + + pkt_type = (uint8_t)(*(uint8_t *)(data + + QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET)); + + if (pkt_type == QDF_NBUF_TRAC_UDP_TYPE) + return true; + else + return false; + } else + return false; +} + +/** + * __qdf_nbuf_data_is_ipv4_tcp_pkt() - check if it is IPV4 TCP packet. + * @data: Pointer to IPV4 TCP packet data buffer + * + * This func. checks whether it is a IPV4 TCP packet or not. + * + * Return: TRUE if it is a IPV4 TCP packet + * FALSE if not + */ +bool __qdf_nbuf_data_is_ipv4_tcp_pkt(uint8_t *data) +{ + if (__qdf_nbuf_data_is_ipv4_pkt(data)) { + uint8_t pkt_type; + + pkt_type = (uint8_t)(*(uint8_t *)(data + + QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET)); + + if (pkt_type == QDF_NBUF_TRAC_TCP_TYPE) + return true; + else + return false; + } else + return false; +} + +/** + * __qdf_nbuf_data_is_ipv6_udp_pkt() - check if it is IPV6 UDP packet. + * @data: Pointer to IPV6 UDP packet data buffer + * + * This func. checks whether it is a IPV6 UDP packet or not. + * + * Return: TRUE if it is a IPV6 UDP packet + * FALSE if not + */ +bool __qdf_nbuf_data_is_ipv6_udp_pkt(uint8_t *data) +{ + if (__qdf_nbuf_data_is_ipv6_pkt(data)) { + uint8_t pkt_type; + + pkt_type = (uint8_t)(*(uint8_t *)(data + + QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET)); + + if (pkt_type == QDF_NBUF_TRAC_UDP_TYPE) + return true; + else + return false; + } else + return false; +} + +/** + * __qdf_nbuf_data_is_ipv6_tcp_pkt() - check if it is IPV6 TCP packet. + * @data: Pointer to IPV6 TCP packet data buffer + * + * This func. checks whether it is a IPV6 TCP packet or not. + * + * Return: TRUE if it is a IPV6 TCP packet + * FALSE if not + */ +bool __qdf_nbuf_data_is_ipv6_tcp_pkt(uint8_t *data) +{ + if (__qdf_nbuf_data_is_ipv6_pkt(data)) { + uint8_t pkt_type; + + pkt_type = (uint8_t)(*(uint8_t *)(data + + QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET)); + + if (pkt_type == QDF_NBUF_TRAC_TCP_TYPE) + return true; + else + return false; + } else + return false; +} + +/** + * __qdf_nbuf_is_bcast_pkt() - is destination address broadcast + * @nbuf - sk buff + * + * Return: true if packet is broadcast + * false otherwise + */ +bool __qdf_nbuf_is_bcast_pkt(qdf_nbuf_t nbuf) +{ + struct ethhdr *eh = (struct ethhdr *)qdf_nbuf_data(nbuf); + return qdf_is_macaddr_broadcast((struct qdf_mac_addr *)eh->h_dest); +} +qdf_export_symbol(__qdf_nbuf_is_bcast_pkt); + +#ifdef NBUF_MEMORY_DEBUG +#define QDF_NET_BUF_TRACK_MAX_SIZE (1024) + +/** + * struct qdf_nbuf_track_t - Network buffer track structure + * + * @p_next: Pointer to next + * @net_buf: Pointer to network buffer + * @file_name: File name + * @line_num: Line number + * @size: Size + */ +struct qdf_nbuf_track_t { + struct qdf_nbuf_track_t *p_next; + qdf_nbuf_t net_buf; + char file_name[QDF_MEM_FILE_NAME_SIZE]; + uint32_t line_num; + size_t size; +}; + +static spinlock_t g_qdf_net_buf_track_lock[QDF_NET_BUF_TRACK_MAX_SIZE]; +typedef struct qdf_nbuf_track_t QDF_NBUF_TRACK; + +static QDF_NBUF_TRACK *gp_qdf_net_buf_track_tbl[QDF_NET_BUF_TRACK_MAX_SIZE]; +static struct kmem_cache *nbuf_tracking_cache; +static QDF_NBUF_TRACK *qdf_net_buf_track_free_list; +static spinlock_t qdf_net_buf_track_free_list_lock; +static uint32_t qdf_net_buf_track_free_list_count; +static uint32_t qdf_net_buf_track_used_list_count; +static uint32_t qdf_net_buf_track_max_used; +static uint32_t qdf_net_buf_track_max_free; +static uint32_t qdf_net_buf_track_max_allocated; + +/** + * update_max_used() - update qdf_net_buf_track_max_used tracking variable + * + * tracks the max number of network buffers that the wlan driver was tracking + * at any one time. + * + * Return: none + */ +static inline void update_max_used(void) +{ + int sum; + + if (qdf_net_buf_track_max_used < + qdf_net_buf_track_used_list_count) + qdf_net_buf_track_max_used = qdf_net_buf_track_used_list_count; + sum = qdf_net_buf_track_free_list_count + + qdf_net_buf_track_used_list_count; + if (qdf_net_buf_track_max_allocated < sum) + qdf_net_buf_track_max_allocated = sum; +} + +/** + * update_max_free() - update qdf_net_buf_track_free_list_count + * + * tracks the max number tracking buffers kept in the freelist. + * + * Return: none + */ +static inline void update_max_free(void) +{ + if (qdf_net_buf_track_max_free < + qdf_net_buf_track_free_list_count) + qdf_net_buf_track_max_free = qdf_net_buf_track_free_list_count; +} + +/** + * qdf_nbuf_track_alloc() - allocate a cookie to track nbufs allocated by wlan + * + * This function pulls from a freelist if possible and uses kmem_cache_alloc. + * This function also ads fexibility to adjust the allocation and freelist + * scheems. + * + * Return: a pointer to an unused QDF_NBUF_TRACK structure may not be zeroed. + */ +static QDF_NBUF_TRACK *qdf_nbuf_track_alloc(void) +{ + int flags = GFP_KERNEL; + unsigned long irq_flag; + QDF_NBUF_TRACK *new_node = NULL; + + spin_lock_irqsave(&qdf_net_buf_track_free_list_lock, irq_flag); + qdf_net_buf_track_used_list_count++; + if (qdf_net_buf_track_free_list != NULL) { + new_node = qdf_net_buf_track_free_list; + qdf_net_buf_track_free_list = + qdf_net_buf_track_free_list->p_next; + qdf_net_buf_track_free_list_count--; + } + update_max_used(); + spin_unlock_irqrestore(&qdf_net_buf_track_free_list_lock, irq_flag); + + if (new_node != NULL) + return new_node; + + if (in_interrupt() || irqs_disabled() || in_atomic()) + flags = GFP_ATOMIC; + + return kmem_cache_alloc(nbuf_tracking_cache, flags); +} + +/* FREEQ_POOLSIZE initial and minimum desired freelist poolsize */ +#define FREEQ_POOLSIZE 2048 + +/** + * qdf_nbuf_track_free() - free the nbuf tracking cookie. + * + * Matches calls to qdf_nbuf_track_alloc. + * Either frees the tracking cookie to kernel or an internal + * freelist based on the size of the freelist. + * + * Return: none + */ +static void qdf_nbuf_track_free(QDF_NBUF_TRACK *node) +{ + unsigned long irq_flag; + + if (!node) + return; + + /* Try to shrink the freelist if free_list_count > than FREEQ_POOLSIZE + * only shrink the freelist if it is bigger than twice the number of + * nbufs in use. If the driver is stalling in a consistent bursty + * fasion, this will keep 3/4 of thee allocations from the free list + * while also allowing the system to recover memory as less frantic + * traffic occurs. + */ + + spin_lock_irqsave(&qdf_net_buf_track_free_list_lock, irq_flag); + + qdf_net_buf_track_used_list_count--; + if (qdf_net_buf_track_free_list_count > FREEQ_POOLSIZE && + (qdf_net_buf_track_free_list_count > + qdf_net_buf_track_used_list_count << 1)) { + kmem_cache_free(nbuf_tracking_cache, node); + } else { + node->p_next = qdf_net_buf_track_free_list; + qdf_net_buf_track_free_list = node; + qdf_net_buf_track_free_list_count++; + } + update_max_free(); + spin_unlock_irqrestore(&qdf_net_buf_track_free_list_lock, irq_flag); +} + +/** + * qdf_nbuf_track_prefill() - prefill the nbuf tracking cookie freelist + * + * Removes a 'warmup time' characteristic of the freelist. Prefilling + * the freelist first makes it performant for the first iperf udp burst + * as well as steady state. + * + * Return: None + */ +static void qdf_nbuf_track_prefill(void) +{ + int i; + QDF_NBUF_TRACK *node, *head; + + /* prepopulate the freelist */ + head = NULL; + for (i = 0; i < FREEQ_POOLSIZE; i++) { + node = qdf_nbuf_track_alloc(); + if (node == NULL) + continue; + node->p_next = head; + head = node; + } + while (head) { + node = head->p_next; + qdf_nbuf_track_free(head); + head = node; + } + + /* prefilled buffers should not count as used */ + qdf_net_buf_track_max_used = 0; +} + +/** + * qdf_nbuf_track_memory_manager_create() - manager for nbuf tracking cookies + * + * This initializes the memory manager for the nbuf tracking cookies. Because + * these cookies are all the same size and only used in this feature, we can + * use a kmem_cache to provide tracking as well as to speed up allocations. + * To avoid the overhead of allocating and freeing the buffers (including SLUB + * features) a freelist is prepopulated here. + * + * Return: None + */ +static void qdf_nbuf_track_memory_manager_create(void) +{ + spin_lock_init(&qdf_net_buf_track_free_list_lock); + nbuf_tracking_cache = kmem_cache_create("qdf_nbuf_tracking_cache", + sizeof(QDF_NBUF_TRACK), + 0, 0, NULL); + + qdf_nbuf_track_prefill(); +} + +/** + * qdf_nbuf_track_memory_manager_destroy() - manager for nbuf tracking cookies + * + * Empty the freelist and print out usage statistics when it is no longer + * needed. Also the kmem_cache should be destroyed here so that it can warn if + * any nbuf tracking cookies were leaked. + * + * Return: None + */ +static void qdf_nbuf_track_memory_manager_destroy(void) +{ + QDF_NBUF_TRACK *node, *tmp; + unsigned long irq_flag; + + spin_lock_irqsave(&qdf_net_buf_track_free_list_lock, irq_flag); + node = qdf_net_buf_track_free_list; + + if (qdf_net_buf_track_max_used > FREEQ_POOLSIZE * 4) + qdf_print("%s: unexpectedly large max_used count %d", + __func__, qdf_net_buf_track_max_used); + + if (qdf_net_buf_track_max_used < qdf_net_buf_track_max_allocated) + qdf_print("%s: %d unused trackers were allocated", + __func__, + qdf_net_buf_track_max_allocated - + qdf_net_buf_track_max_used); + + if (qdf_net_buf_track_free_list_count > FREEQ_POOLSIZE && + qdf_net_buf_track_free_list_count > 3*qdf_net_buf_track_max_used/4) + qdf_print("%s: check freelist shrinking functionality", + __func__); + + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO, + "%s: %d residual freelist size\n", + __func__, qdf_net_buf_track_free_list_count); + + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO, + "%s: %d max freelist size observed\n", + __func__, qdf_net_buf_track_max_free); + + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO, + "%s: %d max buffers used observed\n", + __func__, qdf_net_buf_track_max_used); + + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO, + "%s: %d max buffers allocated observed\n", + __func__, qdf_net_buf_track_max_allocated); + + while (node) { + tmp = node; + node = node->p_next; + kmem_cache_free(nbuf_tracking_cache, tmp); + qdf_net_buf_track_free_list_count--; + } + + if (qdf_net_buf_track_free_list_count != 0) + qdf_print("%s: %d unfreed tracking memory lost in freelist\n", + __func__, qdf_net_buf_track_free_list_count); + + if (qdf_net_buf_track_used_list_count != 0) + qdf_print("%s: %d unfreed tracking memory still in use\n", + __func__, qdf_net_buf_track_used_list_count); + + spin_unlock_irqrestore(&qdf_net_buf_track_free_list_lock, irq_flag); + kmem_cache_destroy(nbuf_tracking_cache); + qdf_net_buf_track_free_list = NULL; +} + +/** + * qdf_net_buf_debug_init() - initialize network buffer debug functionality + * + * QDF network buffer debug feature tracks all SKBs allocated by WLAN driver + * in a hash table and when driver is unloaded it reports about leaked SKBs. + * WLAN driver module whose allocated SKB is freed by network stack are + * suppose to call qdf_net_buf_debug_release_skb() such that the SKB is not + * reported as memory leak. + * + * Return: none + */ +void qdf_net_buf_debug_init(void) +{ + uint32_t i; + + qdf_atomic_set(&qdf_nbuf_history_index, -1); + + qdf_nbuf_map_tracking_init(); + qdf_nbuf_track_memory_manager_create(); + + for (i = 0; i < QDF_NET_BUF_TRACK_MAX_SIZE; i++) { + gp_qdf_net_buf_track_tbl[i] = NULL; + spin_lock_init(&g_qdf_net_buf_track_lock[i]); + } +} +qdf_export_symbol(qdf_net_buf_debug_init); + +/** + * qdf_net_buf_debug_init() - exit network buffer debug functionality + * + * Exit network buffer tracking debug functionality and log SKB memory leaks + * As part of exiting the functionality, free the leaked memory and + * cleanup the tracking buffers. + * + * Return: none + */ +void qdf_net_buf_debug_exit(void) +{ + uint32_t i; + uint32_t count = 0; + unsigned long irq_flag; + QDF_NBUF_TRACK *p_node; + QDF_NBUF_TRACK *p_prev; + + for (i = 0; i < QDF_NET_BUF_TRACK_MAX_SIZE; i++) { + spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag); + p_node = gp_qdf_net_buf_track_tbl[i]; + while (p_node) { + p_prev = p_node; + p_node = p_node->p_next; + count++; + qdf_print("SKB buf memory Leak@ File %s, @Line %d, size %zu, nbuf %pK\n", + p_prev->file_name, p_prev->line_num, + p_prev->size, p_prev->net_buf); + qdf_nbuf_track_free(p_prev); + } + spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag); + } + + qdf_nbuf_track_memory_manager_destroy(); + qdf_nbuf_map_tracking_deinit(); + +#ifdef CONFIG_HALT_KMEMLEAK + if (count) { + qdf_print("%d SKBs leaked .. please fix the SKB leak", count); + QDF_BUG(0); + } +#endif +} +qdf_export_symbol(qdf_net_buf_debug_exit); + +/** + * qdf_net_buf_debug_hash() - hash network buffer pointer + * + * Return: hash value + */ +static uint32_t qdf_net_buf_debug_hash(qdf_nbuf_t net_buf) +{ + uint32_t i; + + i = (uint32_t) (((uintptr_t) net_buf) >> 4); + i += (uint32_t) (((uintptr_t) net_buf) >> 14); + i &= (QDF_NET_BUF_TRACK_MAX_SIZE - 1); + + return i; +} + +/** + * qdf_net_buf_debug_look_up() - look up network buffer in debug hash table + * + * Return: If skb is found in hash table then return pointer to network buffer + * else return %NULL + */ +static QDF_NBUF_TRACK *qdf_net_buf_debug_look_up(qdf_nbuf_t net_buf) +{ + uint32_t i; + QDF_NBUF_TRACK *p_node; + + i = qdf_net_buf_debug_hash(net_buf); + p_node = gp_qdf_net_buf_track_tbl[i]; + + while (p_node) { + if (p_node->net_buf == net_buf) + return p_node; + p_node = p_node->p_next; + } + + return NULL; +} + +/** + * qdf_net_buf_debug_add_node() - store skb in debug hash table + * + * Return: none + */ +void qdf_net_buf_debug_add_node(qdf_nbuf_t net_buf, size_t size, + uint8_t *file_name, uint32_t line_num) +{ + uint32_t i; + unsigned long irq_flag; + QDF_NBUF_TRACK *p_node; + QDF_NBUF_TRACK *new_node; + + new_node = qdf_nbuf_track_alloc(); + + i = qdf_net_buf_debug_hash(net_buf); + spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag); + + p_node = qdf_net_buf_debug_look_up(net_buf); + + if (p_node) { + qdf_print("Double allocation of skb ! Already allocated from %pK %s %d current alloc from %pK %s %d", + p_node->net_buf, p_node->file_name, p_node->line_num, + net_buf, kbasename(file_name), line_num); + qdf_nbuf_track_free(new_node); + } else { + p_node = new_node; + if (p_node) { + p_node->net_buf = net_buf; + qdf_str_lcopy(p_node->file_name, kbasename(file_name), + QDF_MEM_FILE_NAME_SIZE); + p_node->line_num = line_num; + p_node->size = size; + qdf_mem_skb_inc(size); + p_node->p_next = gp_qdf_net_buf_track_tbl[i]; + gp_qdf_net_buf_track_tbl[i] = p_node; + } else + qdf_print( + "Mem alloc failed ! Could not track skb from %s %d of size %zu", + kbasename(file_name), line_num, size); + } + + spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag); +} +qdf_export_symbol(qdf_net_buf_debug_add_node); + +void qdf_net_buf_debug_update_node(qdf_nbuf_t net_buf, uint8_t *file_name, + uint32_t line_num) +{ + uint32_t i; + unsigned long irq_flag; + QDF_NBUF_TRACK *p_node; + + i = qdf_net_buf_debug_hash(net_buf); + spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag); + + p_node = qdf_net_buf_debug_look_up(net_buf); + + if (p_node) { + qdf_str_lcopy(p_node->file_name, kbasename(file_name), + QDF_MEM_FILE_NAME_SIZE); + p_node->line_num = line_num; + } + + spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag); +} + +qdf_export_symbol(qdf_net_buf_debug_update_node); + +/** + * qdf_net_buf_debug_delete_node() - remove skb from debug hash table + * + * Return: none + */ +void qdf_net_buf_debug_delete_node(qdf_nbuf_t net_buf) +{ + uint32_t i; + QDF_NBUF_TRACK *p_head; + QDF_NBUF_TRACK *p_node = NULL; + unsigned long irq_flag; + QDF_NBUF_TRACK *p_prev; + + i = qdf_net_buf_debug_hash(net_buf); + spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag); + + p_head = gp_qdf_net_buf_track_tbl[i]; + + /* Unallocated SKB */ + if (!p_head) + goto done; + + p_node = p_head; + /* Found at head of the table */ + if (p_head->net_buf == net_buf) { + gp_qdf_net_buf_track_tbl[i] = p_node->p_next; + goto done; + } + + /* Search in collision list */ + while (p_node) { + p_prev = p_node; + p_node = p_node->p_next; + if ((NULL != p_node) && (p_node->net_buf == net_buf)) { + p_prev->p_next = p_node->p_next; + break; + } + } + +done: + spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag); + + if (p_node) { + qdf_mem_skb_dec(p_node->size); + qdf_nbuf_track_free(p_node); + } else { + qdf_print("Unallocated buffer ! Double free of net_buf %pK ?", + net_buf); + QDF_BUG(0); + } +} +qdf_export_symbol(qdf_net_buf_debug_delete_node); + +void qdf_net_buf_debug_acquire_skb(qdf_nbuf_t net_buf, + uint8_t *file_name, uint32_t line_num) +{ + qdf_nbuf_t ext_list = qdf_nbuf_get_ext_list(net_buf); + + while (ext_list) { + /* + * Take care to add if it is Jumbo packet connected using + * frag_list + */ + qdf_nbuf_t next; + + next = qdf_nbuf_queue_next(ext_list); + qdf_net_buf_debug_add_node(ext_list, 0, file_name, line_num); + ext_list = next; + } + qdf_net_buf_debug_add_node(net_buf, 0, file_name, line_num); +} +qdf_export_symbol(qdf_net_buf_debug_acquire_skb); + +/** + * qdf_net_buf_debug_release_skb() - release skb to avoid memory leak + * @net_buf: Network buf holding head segment (single) + * + * WLAN driver module whose allocated SKB is freed by network stack are + * suppose to call this API before returning SKB to network stack such + * that the SKB is not reported as memory leak. + * + * Return: none + */ +void qdf_net_buf_debug_release_skb(qdf_nbuf_t net_buf) +{ + qdf_nbuf_t ext_list = qdf_nbuf_get_ext_list(net_buf); + + while (ext_list) { + /* + * Take care to free if it is Jumbo packet connected using + * frag_list + */ + qdf_nbuf_t next; + + next = qdf_nbuf_queue_next(ext_list); + + if (qdf_nbuf_is_tso(ext_list) && + qdf_nbuf_get_users(ext_list) > 1) { + ext_list = next; + continue; + } + + qdf_net_buf_debug_delete_node(ext_list); + ext_list = next; + } + + if (qdf_nbuf_is_tso(net_buf) && qdf_nbuf_get_users(net_buf) > 1) + return; + + qdf_net_buf_debug_delete_node(net_buf); +} +qdf_export_symbol(qdf_net_buf_debug_release_skb); + +qdf_nbuf_t qdf_nbuf_alloc_debug(qdf_device_t osdev, qdf_size_t size, + int reserve, int align, int prio, + uint8_t *file, uint32_t line) +{ + qdf_nbuf_t nbuf; + + nbuf = __qdf_nbuf_alloc(osdev, size, reserve, align, prio, file, line); + + /* Store SKB in internal QDF tracking table */ + if (qdf_likely(nbuf)) { + qdf_net_buf_debug_add_node(nbuf, size, file, line); + qdf_nbuf_history_add(nbuf, file, line, QDF_NBUF_ALLOC); + } else { + qdf_nbuf_history_add(nbuf, file, line, QDF_NBUF_ALLOC_FAILURE); + } + + return nbuf; +} +qdf_export_symbol(qdf_nbuf_alloc_debug); + +void qdf_nbuf_free_debug(qdf_nbuf_t nbuf, uint8_t *file, uint32_t line) +{ + qdf_nbuf_t ext_list; + + if (qdf_unlikely(!nbuf)) + return; + + if (qdf_nbuf_is_tso(nbuf) && qdf_nbuf_get_users(nbuf) > 1) + goto free_buf; + + /* Remove SKB from internal QDF tracking table */ + qdf_nbuf_panic_on_free_if_mapped(nbuf, file, line); + qdf_net_buf_debug_delete_node(nbuf); + qdf_nbuf_history_add(nbuf, file, line, QDF_NBUF_FREE); + + /* Take care to delete the debug entries for frag_list */ + ext_list = qdf_nbuf_get_ext_list(nbuf); + while (ext_list) { + if (qdf_nbuf_get_users(ext_list) == 1) { + qdf_nbuf_panic_on_free_if_mapped(ext_list, file, line); + qdf_net_buf_debug_delete_node(ext_list); + } + + ext_list = qdf_nbuf_queue_next(ext_list); + } + +free_buf: + __qdf_nbuf_free(nbuf); +} +qdf_export_symbol(qdf_nbuf_free_debug); + +qdf_nbuf_t qdf_nbuf_clone_debug(qdf_nbuf_t buf, uint8_t *file, uint32_t line) +{ + qdf_nbuf_t cloned_buf = __qdf_nbuf_clone(buf); + + if (qdf_unlikely(!cloned_buf)) + return NULL; + + /* Store SKB in internal QDF tracking table */ + qdf_net_buf_debug_add_node(cloned_buf, 0, file, line); + qdf_nbuf_history_add(cloned_buf, file, line, QDF_NBUF_ALLOC_CLONE); + + return cloned_buf; +} +qdf_export_symbol(qdf_nbuf_clone_debug); + +qdf_nbuf_t qdf_nbuf_copy_debug(qdf_nbuf_t buf, uint8_t *file, uint32_t line) +{ + qdf_nbuf_t copied_buf = __qdf_nbuf_copy(buf); + + if (qdf_unlikely(!copied_buf)) + return NULL; + + /* Store SKB in internal QDF tracking table */ + qdf_net_buf_debug_add_node(copied_buf, 0, file, line); + qdf_nbuf_history_add(copied_buf, file, line, QDF_NBUF_ALLOC_COPY); + + return copied_buf; +} +qdf_export_symbol(qdf_nbuf_copy_debug); + +#endif /* NBUF_MEMORY_DEBUG */ + +#if defined(FEATURE_TSO) + +/** + * struct qdf_tso_cmn_seg_info_t - TSO common info structure + * + * @ethproto: ethernet type of the msdu + * @ip_tcp_hdr_len: ip + tcp length for the msdu + * @l2_len: L2 length for the msdu + * @eit_hdr: pointer to EIT header + * @eit_hdr_len: EIT header length for the msdu + * @eit_hdr_dma_map_addr: dma addr for EIT header + * @tcphdr: pointer to tcp header + * @ipv4_csum_en: ipv4 checksum enable + * @tcp_ipv4_csum_en: TCP ipv4 checksum enable + * @tcp_ipv6_csum_en: TCP ipv6 checksum enable + * @ip_id: IP id + * @tcp_seq_num: TCP sequence number + * + * This structure holds the TSO common info that is common + * across all the TCP segments of the jumbo packet. + */ +struct qdf_tso_cmn_seg_info_t { + uint16_t ethproto; + uint16_t ip_tcp_hdr_len; + uint16_t l2_len; + uint8_t *eit_hdr; + uint32_t eit_hdr_len; + qdf_dma_addr_t eit_hdr_dma_map_addr; + struct tcphdr *tcphdr; + uint16_t ipv4_csum_en; + uint16_t tcp_ipv4_csum_en; + uint16_t tcp_ipv6_csum_en; + uint16_t ip_id; + uint32_t tcp_seq_num; +}; + +/** + * __qdf_nbuf_get_tso_cmn_seg_info() - get TSO common + * information + * @osdev: qdf device handle + * @skb: skb buffer + * @tso_info: Parameters common to all segements + * + * Get the TSO information that is common across all the TCP + * segments of the jumbo packet + * + * Return: 0 - success 1 - failure + */ +static uint8_t __qdf_nbuf_get_tso_cmn_seg_info(qdf_device_t osdev, + struct sk_buff *skb, + struct qdf_tso_cmn_seg_info_t *tso_info) +{ + /* Get ethernet type and ethernet header length */ + tso_info->ethproto = vlan_get_protocol(skb); + + /* Determine whether this is an IPv4 or IPv6 packet */ + if (tso_info->ethproto == htons(ETH_P_IP)) { /* IPv4 */ + /* for IPv4, get the IP ID and enable TCP and IP csum */ + struct iphdr *ipv4_hdr = ip_hdr(skb); + + tso_info->ip_id = ntohs(ipv4_hdr->id); + tso_info->ipv4_csum_en = 1; + tso_info->tcp_ipv4_csum_en = 1; + if (qdf_unlikely(ipv4_hdr->protocol != IPPROTO_TCP)) { + qdf_print("TSO IPV4 proto 0x%x not TCP\n", + ipv4_hdr->protocol); + return 1; + } + } else if (tso_info->ethproto == htons(ETH_P_IPV6)) { /* IPv6 */ + /* for IPv6, enable TCP csum. No IP ID or IP csum */ + tso_info->tcp_ipv6_csum_en = 1; + } else { + qdf_print("TSO: ethertype 0x%x is not supported!\n", + tso_info->ethproto); + return 1; + } + tso_info->l2_len = (skb_network_header(skb) - skb_mac_header(skb)); + tso_info->tcphdr = tcp_hdr(skb); + tso_info->tcp_seq_num = ntohl(tcp_hdr(skb)->seq); + /* get pointer to the ethernet + IP + TCP header and their length */ + tso_info->eit_hdr = skb->data; + tso_info->eit_hdr_len = (skb_transport_header(skb) + - skb_mac_header(skb)) + tcp_hdrlen(skb); + tso_info->eit_hdr_dma_map_addr = dma_map_single(osdev->dev, + tso_info->eit_hdr, + tso_info->eit_hdr_len, + DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(osdev->dev, + tso_info->eit_hdr_dma_map_addr))) { + qdf_print("DMA mapping error!\n"); + qdf_assert(0); + return 1; + } + + if (tso_info->ethproto == htons(ETH_P_IP)) { + /* inlcude IPv4 header length for IPV4 (total length) */ + tso_info->ip_tcp_hdr_len = + tso_info->eit_hdr_len - tso_info->l2_len; + } else if (tso_info->ethproto == htons(ETH_P_IPV6)) { + /* exclude IPv6 header length for IPv6 (payload length) */ + tso_info->ip_tcp_hdr_len = tcp_hdrlen(skb); + } + /* + * The length of the payload (application layer data) is added to + * tso_info->ip_tcp_hdr_len before passing it on to the msdu link ext + * descriptor. + */ + + TSO_DEBUG("%s seq# %u eit hdr len %u l2 len %u skb len %u\n", __func__, + tso_info->tcp_seq_num, + tso_info->eit_hdr_len, + tso_info->l2_len, + skb->len); + return 0; +} + + +/** + * qdf_dmaaddr_to_32s - return high and low parts of dma_addr + * + * Returns the high and low 32-bits of the DMA addr in the provided ptrs + * + * Return: N/A + */ +void __qdf_dmaaddr_to_32s(qdf_dma_addr_t dmaaddr, + uint32_t *lo, uint32_t *hi) +{ + if (sizeof(dmaaddr) > sizeof(uint32_t)) { + *lo = lower_32_bits(dmaaddr); + *hi = upper_32_bits(dmaaddr); + } else { + *lo = dmaaddr; + *hi = 0; + } +} +qdf_export_symbol(__qdf_dmaaddr_to_32s); + +/** + * __qdf_nbuf_fill_tso_cmn_seg_info() - Init function for each TSO nbuf segment + * + * @curr_seg: Segment whose contents are initialized + * @tso_cmn_info: Parameters common to all segements + * + * Return: None + */ +static inline void __qdf_nbuf_fill_tso_cmn_seg_info( + struct qdf_tso_seg_elem_t *curr_seg, + struct qdf_tso_cmn_seg_info_t *tso_cmn_info) +{ + /* Initialize the flags to 0 */ + memset(&curr_seg->seg, 0x0, sizeof(curr_seg->seg)); + + /* + * The following fields remain the same across all segments of + * a jumbo packet + */ + curr_seg->seg.tso_flags.tso_enable = 1; + curr_seg->seg.tso_flags.ipv4_checksum_en = + tso_cmn_info->ipv4_csum_en; + curr_seg->seg.tso_flags.tcp_ipv6_checksum_en = + tso_cmn_info->tcp_ipv6_csum_en; + curr_seg->seg.tso_flags.tcp_ipv4_checksum_en = + tso_cmn_info->tcp_ipv4_csum_en; + curr_seg->seg.tso_flags.tcp_flags_mask = 0x1FF; + + /* The following fields change for the segments */ + curr_seg->seg.tso_flags.ip_id = tso_cmn_info->ip_id; + tso_cmn_info->ip_id++; + + curr_seg->seg.tso_flags.syn = tso_cmn_info->tcphdr->syn; + curr_seg->seg.tso_flags.rst = tso_cmn_info->tcphdr->rst; + curr_seg->seg.tso_flags.psh = tso_cmn_info->tcphdr->psh; + curr_seg->seg.tso_flags.ack = tso_cmn_info->tcphdr->ack; + curr_seg->seg.tso_flags.urg = tso_cmn_info->tcphdr->urg; + curr_seg->seg.tso_flags.ece = tso_cmn_info->tcphdr->ece; + curr_seg->seg.tso_flags.cwr = tso_cmn_info->tcphdr->cwr; + + curr_seg->seg.tso_flags.tcp_seq_num = tso_cmn_info->tcp_seq_num; + + /* + * First fragment for each segment always contains the ethernet, + * IP and TCP header + */ + curr_seg->seg.tso_frags[0].vaddr = tso_cmn_info->eit_hdr; + curr_seg->seg.tso_frags[0].length = tso_cmn_info->eit_hdr_len; + curr_seg->seg.total_len = curr_seg->seg.tso_frags[0].length; + curr_seg->seg.tso_frags[0].paddr = tso_cmn_info->eit_hdr_dma_map_addr; + + TSO_DEBUG("%s %d eit hdr %pK eit_hdr_len %d tcp_seq_num %u tso_info->total_len %u\n", + __func__, __LINE__, tso_cmn_info->eit_hdr, + tso_cmn_info->eit_hdr_len, + curr_seg->seg.tso_flags.tcp_seq_num, + curr_seg->seg.total_len); + qdf_tso_seg_dbg_record(curr_seg, TSOSEG_LOC_FILLCMNSEG); +} + +/** + * __qdf_nbuf_get_tso_info() - function to divide a TSO nbuf + * into segments + * @nbuf: network buffer to be segmented + * @tso_info: This is the output. The information about the + * TSO segments will be populated within this. + * + * This function fragments a TCP jumbo packet into smaller + * segments to be transmitted by the driver. It chains the TSO + * segments created into a list. + * + * Return: number of TSO segments + */ +uint32_t __qdf_nbuf_get_tso_info(qdf_device_t osdev, struct sk_buff *skb, + struct qdf_tso_info_t *tso_info) +{ + /* common across all segments */ + struct qdf_tso_cmn_seg_info_t tso_cmn_info; + /* segment specific */ + void *tso_frag_vaddr; + qdf_dma_addr_t tso_frag_paddr = 0; + uint32_t num_seg = 0; + struct qdf_tso_seg_elem_t *curr_seg; + struct qdf_tso_num_seg_elem_t *total_num_seg; + struct skb_frag_struct *frag = NULL; + uint32_t tso_frag_len = 0; /* tso segment's fragment length*/ + uint32_t skb_frag_len = 0; /* skb's fragment length (contiguous memory)*/ + uint32_t skb_proc = skb->len; /* bytes of skb pending processing */ + uint32_t tso_seg_size = skb_shinfo(skb)->gso_size; + int j = 0; /* skb fragment index */ + + memset(&tso_cmn_info, 0x0, sizeof(tso_cmn_info)); + + if (qdf_unlikely(__qdf_nbuf_get_tso_cmn_seg_info(osdev, + skb, &tso_cmn_info))) { + qdf_print("TSO: error getting common segment info\n"); + return 0; + } + + total_num_seg = tso_info->tso_num_seg_list; + curr_seg = tso_info->tso_seg_list; + + /* length of the first chunk of data in the skb */ + skb_frag_len = skb_headlen(skb); + + /* the 0th tso segment's 0th fragment always contains the EIT header */ + /* update the remaining skb fragment length and TSO segment length */ + skb_frag_len -= tso_cmn_info.eit_hdr_len; + skb_proc -= tso_cmn_info.eit_hdr_len; + + /* get the address to the next tso fragment */ + tso_frag_vaddr = skb->data + tso_cmn_info.eit_hdr_len; + /* get the length of the next tso fragment */ + tso_frag_len = min(skb_frag_len, tso_seg_size); + + if (tso_frag_len != 0) { + tso_frag_paddr = dma_map_single(osdev->dev, + tso_frag_vaddr, tso_frag_len, DMA_TO_DEVICE); + } + + if (unlikely(dma_mapping_error(osdev->dev, + tso_frag_paddr))) { + qdf_print("%s:%d DMA mapping error!\n", __func__, __LINE__); + qdf_assert(0); + return 0; + } + TSO_DEBUG("%s[%d] skb frag len %d tso frag len %d\n", __func__, + __LINE__, skb_frag_len, tso_frag_len); + num_seg = tso_info->num_segs; + tso_info->num_segs = 0; + tso_info->is_tso = 1; + total_num_seg->num_seg.tso_cmn_num_seg = 0; + + while (num_seg && curr_seg) { + int i = 1; /* tso fragment index */ + uint8_t more_tso_frags = 1; + + curr_seg->seg.num_frags = 0; + tso_info->num_segs++; + total_num_seg->num_seg.tso_cmn_num_seg++; + + __qdf_nbuf_fill_tso_cmn_seg_info(curr_seg, + &tso_cmn_info); + + if (unlikely(skb_proc == 0)) + return tso_info->num_segs; + + curr_seg->seg.tso_flags.ip_len = tso_cmn_info.ip_tcp_hdr_len; + curr_seg->seg.tso_flags.l2_len = tso_cmn_info.l2_len; + /* frag len is added to ip_len in while loop below*/ + + curr_seg->seg.num_frags++; + + while (more_tso_frags) { + if (tso_frag_len != 0) { + curr_seg->seg.tso_frags[i].vaddr = + tso_frag_vaddr; + curr_seg->seg.tso_frags[i].length = + tso_frag_len; + curr_seg->seg.total_len += tso_frag_len; + curr_seg->seg.tso_flags.ip_len += tso_frag_len; + curr_seg->seg.num_frags++; + skb_proc = skb_proc - tso_frag_len; + + /* increment the TCP sequence number */ + + tso_cmn_info.tcp_seq_num += tso_frag_len; + curr_seg->seg.tso_frags[i].paddr = + tso_frag_paddr; + } + + TSO_DEBUG("%s[%d] frag %d frag len %d total_len %u vaddr %pK\n", + __func__, __LINE__, + i, + tso_frag_len, + curr_seg->seg.total_len, + curr_seg->seg.tso_frags[i].vaddr); + + /* if there is no more data left in the skb */ + if (!skb_proc) + return tso_info->num_segs; + + /* get the next payload fragment information */ + /* check if there are more fragments in this segment */ + if (tso_frag_len < tso_seg_size) { + more_tso_frags = 1; + if (tso_frag_len != 0) { + tso_seg_size = tso_seg_size - + tso_frag_len; + i++; + if (curr_seg->seg.num_frags == + FRAG_NUM_MAX) { + more_tso_frags = 0; + /* + * reset i and the tso + * payload size + */ + i = 1; + tso_seg_size = + skb_shinfo(skb)-> + gso_size; + } + } + } else { + more_tso_frags = 0; + /* reset i and the tso payload size */ + i = 1; + tso_seg_size = skb_shinfo(skb)->gso_size; + } + + /* if the next fragment is contiguous */ + if ((tso_frag_len != 0) && (tso_frag_len < skb_frag_len)) { + tso_frag_vaddr = tso_frag_vaddr + tso_frag_len; + skb_frag_len = skb_frag_len - tso_frag_len; + tso_frag_len = min(skb_frag_len, tso_seg_size); + + } else { /* the next fragment is not contiguous */ + if (skb_shinfo(skb)->nr_frags == 0) { + qdf_print("TSO: nr_frags == 0!\n"); + qdf_assert(0); + return 0; + } + if (j >= skb_shinfo(skb)->nr_frags) { + qdf_print("TSO: nr_frags %d j %d\n", + skb_shinfo(skb)->nr_frags, j); + qdf_assert(0); + return 0; + } + frag = &skb_shinfo(skb)->frags[j]; + skb_frag_len = skb_frag_size(frag); + tso_frag_len = min(skb_frag_len, tso_seg_size); + tso_frag_vaddr = skb_frag_address_safe(frag); + j++; + } + + TSO_DEBUG("%s[%d] skb frag len %d tso frag %d len tso_seg_size %d\n", + __func__, __LINE__, skb_frag_len, tso_frag_len, + tso_seg_size); + + if (!(tso_frag_vaddr)) { + TSO_DEBUG("%s: Fragment virtual addr is NULL", + __func__); + return 0; + } + + tso_frag_paddr = + dma_map_single(osdev->dev, + tso_frag_vaddr, + tso_frag_len, + DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(osdev->dev, + tso_frag_paddr))) { + qdf_print("%s:%d DMA mapping error!\n", + __func__, __LINE__); + qdf_assert(0); + return 0; + } + } + TSO_DEBUG("%s tcp_seq_num: %u", __func__, + curr_seg->seg.tso_flags.tcp_seq_num); + num_seg--; + /* if TCP FIN flag was set, set it in the last segment */ + if (!num_seg) + curr_seg->seg.tso_flags.fin = tso_cmn_info.tcphdr->fin; + + qdf_tso_seg_dbg_record(curr_seg, TSOSEG_LOC_GETINFO); + curr_seg = curr_seg->next; + } + return tso_info->num_segs; +} +qdf_export_symbol(__qdf_nbuf_get_tso_info); + +/** + * __qdf_nbuf_unmap_tso_segment() - function to dma unmap TSO segment element + * + * @osdev: qdf device handle + * @tso_seg: TSO segment element to be unmapped + * @is_last_seg: whether this is last tso seg or not + * + * Return: none + */ +void __qdf_nbuf_unmap_tso_segment(qdf_device_t osdev, + struct qdf_tso_seg_elem_t *tso_seg, + bool is_last_seg) +{ + uint32_t num_frags = 0; + + if (tso_seg->seg.num_frags > 0) + num_frags = tso_seg->seg.num_frags - 1; + + /*Num of frags in a tso seg cannot be less than 2 */ + if (num_frags < 1) { + qdf_assert(0); + qdf_print("ERROR: num of frags in a tso segment is %d\n", + (num_frags + 1)); + return; + } + + while (num_frags) { + /*Do dma unmap the tso seg except the 0th frag */ + if (0 == tso_seg->seg.tso_frags[num_frags].paddr) { + qdf_print("ERROR: TSO seg frag %d mapped physical address is NULL\n", + num_frags); + qdf_assert(0); + return; + } + dma_unmap_single(osdev->dev, + tso_seg->seg.tso_frags[num_frags].paddr, + tso_seg->seg.tso_frags[num_frags].length, + __qdf_dma_dir_to_os(QDF_DMA_TO_DEVICE)); + tso_seg->seg.tso_frags[num_frags].paddr = 0; + num_frags--; + qdf_tso_seg_dbg_record(tso_seg, TSOSEG_LOC_UNMAPTSO); + } + + if (is_last_seg) { + /*Do dma unmap for the tso seg 0th frag */ + if (0 == tso_seg->seg.tso_frags[0].paddr) { + qdf_print("ERROR: TSO seg frag 0 mapped physical address is NULL\n"); + qdf_assert(0); + return; + } + dma_unmap_single(osdev->dev, + tso_seg->seg.tso_frags[0].paddr, + tso_seg->seg.tso_frags[0].length, + __qdf_dma_dir_to_os(QDF_DMA_TO_DEVICE)); + tso_seg->seg.tso_frags[0].paddr = 0; + qdf_tso_seg_dbg_record(tso_seg, TSOSEG_LOC_UNMAPLAST); + } +} +qdf_export_symbol(__qdf_nbuf_unmap_tso_segment); + +/** + * __qdf_nbuf_get_tso_num_seg() - function to divide a TSO nbuf + * into segments + * @nbuf: network buffer to be segmented + * @tso_info: This is the output. The information about the + * TSO segments will be populated within this. + * + * This function fragments a TCP jumbo packet into smaller + * segments to be transmitted by the driver. It chains the TSO + * segments created into a list. + * + * Return: 0 - success, 1 - failure + */ +#ifndef BUILD_X86 +uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb) +{ + uint32_t tso_seg_size = skb_shinfo(skb)->gso_size; + uint32_t remainder, num_segs = 0; + uint8_t skb_nr_frags = skb_shinfo(skb)->nr_frags; + uint8_t frags_per_tso = 0; + uint32_t skb_frag_len = 0; + uint32_t eit_hdr_len = (skb_transport_header(skb) + - skb_mac_header(skb)) + tcp_hdrlen(skb); + struct skb_frag_struct *frag = NULL; + int j = 0; + uint32_t temp_num_seg = 0; + + /* length of the first chunk of data in the skb minus eit header*/ + skb_frag_len = skb_headlen(skb) - eit_hdr_len; + + /* Calculate num of segs for skb's first chunk of data*/ + remainder = skb_frag_len % tso_seg_size; + num_segs = skb_frag_len / tso_seg_size; + /** + * Remainder non-zero and nr_frags zero implies end of skb data. + * In that case, one more tso seg is required to accommodate + * remaining data, hence num_segs++. If nr_frags is non-zero, + * then remaining data will be accomodated while doing the calculation + * for nr_frags data. Hence, frags_per_tso++. + */ + if (remainder) { + if (!skb_nr_frags) + num_segs++; + else + frags_per_tso++; + } + + while (skb_nr_frags) { + if (j >= skb_shinfo(skb)->nr_frags) { + qdf_print("TSO: nr_frags %d j %d\n", + skb_shinfo(skb)->nr_frags, j); + qdf_assert(0); + return 0; + } + /** + * Calculate the number of tso seg for nr_frags data: + * Get the length of each frag in skb_frag_len, add to + * remainder.Get the number of segments by dividing it to + * tso_seg_size and calculate the new remainder. + * Decrement the nr_frags value and keep + * looping all the skb_fragments. + */ + frag = &skb_shinfo(skb)->frags[j]; + skb_frag_len = skb_frag_size(frag); + temp_num_seg = num_segs; + remainder += skb_frag_len; + num_segs += remainder / tso_seg_size; + remainder = remainder % tso_seg_size; + skb_nr_frags--; + if (remainder) { + if (num_segs > temp_num_seg) + frags_per_tso = 0; + /** + * increment the tso per frags whenever remainder is + * positive. If frags_per_tso reaches the (max-1), + * [First frags always have EIT header, therefore max-1] + * increment the num_segs as no more data can be + * accomodated in the curr tso seg. Reset the remainder + * and frags per tso and keep looping. + */ + frags_per_tso++; + if (frags_per_tso == FRAG_NUM_MAX - 1) { + num_segs++; + frags_per_tso = 0; + remainder = 0; + } + /** + * If this is the last skb frag and still remainder is + * non-zero(frags_per_tso is not reached to the max-1) + * then increment the num_segs to take care of the + * remaining length. + */ + if (!skb_nr_frags && remainder) { + num_segs++; + frags_per_tso = 0; + } + } else { + /* Whenever remainder is 0, reset the frags_per_tso. */ + frags_per_tso = 0; + } + j++; + } + + return num_segs; +} +#else +uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb) +{ + uint32_t i, gso_size, tmp_len, num_segs = 0; + struct skb_frag_struct *frag = NULL; + + /* + * Check if the head SKB or any of frags are allocated in < 0x50000000 + * region which cannot be accessed by Target + */ + if (virt_to_phys(skb->data) < 0x50000040) { + TSO_DEBUG("%s %d: Invalid Address nr_frags = %d, paddr = %pK \n", + __func__, __LINE__, skb_shinfo(skb)->nr_frags, + virt_to_phys(skb->data)); + goto fail; + + } + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + frag = &skb_shinfo(skb)->frags[i]; + + if (!frag) + goto fail; + + if (virt_to_phys(skb_frag_address_safe(frag)) < 0x50000040) + goto fail; + } + + + gso_size = skb_shinfo(skb)->gso_size; + tmp_len = skb->len - ((skb_transport_header(skb) - skb_mac_header(skb)) + + tcp_hdrlen(skb)); + while (tmp_len) { + num_segs++; + if (tmp_len > gso_size) + tmp_len -= gso_size; + else + break; + } + + return num_segs; + + /* + * Do not free this frame, just do socket level accounting + * so that this is not reused. + */ +fail: + if (skb->sk) + atomic_sub(skb->truesize, &(skb->sk->sk_wmem_alloc)); + + return 0; +} +#endif +qdf_export_symbol(__qdf_nbuf_get_tso_num_seg); + +#endif /* FEATURE_TSO */ + +struct sk_buff *__qdf_nbuf_inc_users(struct sk_buff *skb) +{ + qdf_nbuf_users_inc(&skb->users); + return skb; +} +qdf_export_symbol(__qdf_nbuf_inc_users); + +int __qdf_nbuf_get_users(struct sk_buff *skb) +{ + return qdf_nbuf_users_read(&skb->users); +} +qdf_export_symbol(__qdf_nbuf_get_users); + +/** + * __qdf_nbuf_ref() - Reference the nbuf so it can get held until the last free. + * @skb: sk_buff handle + * + * Return: none + */ + +void __qdf_nbuf_ref(struct sk_buff *skb) +{ + skb_get(skb); +} +qdf_export_symbol(__qdf_nbuf_ref); + +/** + * __qdf_nbuf_shared() - Check whether the buffer is shared + * @skb: sk_buff buffer + * + * Return: true if more than one person has a reference to this buffer. + */ +int __qdf_nbuf_shared(struct sk_buff *skb) +{ + return skb_shared(skb); +} +qdf_export_symbol(__qdf_nbuf_shared); + +/** + * __qdf_nbuf_dmamap_create() - create a DMA map. + * @osdev: qdf device handle + * @dmap: dma map handle + * + * This can later be used to map networking buffers. They : + * - need space in adf_drv's software descriptor + * - are typically created during adf_drv_create + * - need to be created before any API(qdf_nbuf_map) that uses them + * + * Return: QDF STATUS + */ +QDF_STATUS +__qdf_nbuf_dmamap_create(qdf_device_t osdev, __qdf_dma_map_t *dmap) +{ + QDF_STATUS error = QDF_STATUS_SUCCESS; + /* + * driver can tell its SG capablity, it must be handled. + * Bounce buffers if they are there + */ + (*dmap) = kzalloc(sizeof(struct __qdf_dma_map), GFP_KERNEL); + if (!(*dmap)) + error = QDF_STATUS_E_NOMEM; + + return error; +} +qdf_export_symbol(__qdf_nbuf_dmamap_create); +/** + * __qdf_nbuf_dmamap_destroy() - delete a dma map + * @osdev: qdf device handle + * @dmap: dma map handle + * + * Return: none + */ +void +__qdf_nbuf_dmamap_destroy(qdf_device_t osdev, __qdf_dma_map_t dmap) +{ + kfree(dmap); +} +qdf_export_symbol(__qdf_nbuf_dmamap_destroy); + +/** + * __qdf_nbuf_map_nbytes_single() - map nbytes + * @osdev: os device + * @buf: buffer + * @dir: direction + * @nbytes: number of bytes + * + * Return: QDF_STATUS + */ +#ifdef A_SIMOS_DEVHOST +QDF_STATUS __qdf_nbuf_map_nbytes_single( + qdf_device_t osdev, struct sk_buff *buf, + qdf_dma_dir_t dir, int nbytes) +{ + qdf_dma_addr_t paddr; + + QDF_NBUF_CB_PADDR(buf) = paddr = buf->data; + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(__qdf_nbuf_map_nbytes_single); +#else +QDF_STATUS __qdf_nbuf_map_nbytes_single( + qdf_device_t osdev, struct sk_buff *buf, + qdf_dma_dir_t dir, int nbytes) +{ + qdf_dma_addr_t paddr; + + /* assume that the OS only provides a single fragment */ + QDF_NBUF_CB_PADDR(buf) = paddr = + dma_map_single(osdev->dev, buf->data, + nbytes, __qdf_dma_dir_to_os(dir)); + return dma_mapping_error(osdev->dev, paddr) ? + QDF_STATUS_E_FAULT : QDF_STATUS_SUCCESS; +} +qdf_export_symbol(__qdf_nbuf_map_nbytes_single); +#endif +/** + * __qdf_nbuf_unmap_nbytes_single() - unmap nbytes + * @osdev: os device + * @buf: buffer + * @dir: direction + * @nbytes: number of bytes + * + * Return: none + */ +#if defined(A_SIMOS_DEVHOST) +void +__qdf_nbuf_unmap_nbytes_single( + qdf_device_t osdev, struct sk_buff *buf, qdf_dma_dir_t dir, int nbytes) +{ +} +qdf_export_symbol(__qdf_nbuf_unmap_nbytes_single); + +#else +void +__qdf_nbuf_unmap_nbytes_single( + qdf_device_t osdev, struct sk_buff *buf, qdf_dma_dir_t dir, int nbytes) +{ + if (0 == QDF_NBUF_CB_PADDR(buf)) { + qdf_print("ERROR: NBUF mapped physical address is NULL\n"); + return; + } + dma_unmap_single(osdev->dev, QDF_NBUF_CB_PADDR(buf), + nbytes, __qdf_dma_dir_to_os(dir)); +} +qdf_export_symbol(__qdf_nbuf_unmap_nbytes_single); +#endif +/** + * __qdf_nbuf_map_nbytes() - get the dma map of the nbuf + * @osdev: os device + * @skb: skb handle + * @dir: dma direction + * @nbytes: number of bytes to be mapped + * + * Return: QDF_STATUS + */ +#ifdef QDF_OS_DEBUG +QDF_STATUS +__qdf_nbuf_map_nbytes( + qdf_device_t osdev, + struct sk_buff *skb, + qdf_dma_dir_t dir, + int nbytes) +{ + struct skb_shared_info *sh = skb_shinfo(skb); + + qdf_assert((dir == QDF_DMA_TO_DEVICE) || (dir == QDF_DMA_FROM_DEVICE)); + + /* + * Assume there's only a single fragment. + * To support multiple fragments, it would be necessary to change + * adf_nbuf_t to be a separate object that stores meta-info + * (including the bus address for each fragment) and a pointer + * to the underlying sk_buff. + */ + qdf_assert(sh->nr_frags == 0); + + return __qdf_nbuf_map_nbytes_single(osdev, skb, dir, nbytes); +} +qdf_export_symbol(__qdf_nbuf_map_nbytes); +#else +QDF_STATUS +__qdf_nbuf_map_nbytes( + qdf_device_t osdev, + struct sk_buff *skb, + qdf_dma_dir_t dir, + int nbytes) +{ + return __qdf_nbuf_map_nbytes_single(osdev, skb, dir, nbytes); +} +qdf_export_symbol(__qdf_nbuf_map_nbytes); +#endif +/** + * __qdf_nbuf_unmap_nbytes() - to unmap a previously mapped buf + * @osdev: OS device + * @skb: skb handle + * @dir: direction + * @nbytes: number of bytes + * + * Return: none + */ +void +__qdf_nbuf_unmap_nbytes( + qdf_device_t osdev, + struct sk_buff *skb, + qdf_dma_dir_t dir, + int nbytes) +{ + qdf_assert((dir == QDF_DMA_TO_DEVICE) || (dir == QDF_DMA_FROM_DEVICE)); + + /* + * Assume there's a single fragment. + * If this is not true, the assertion in __adf_nbuf_map will catch it. + */ + __qdf_nbuf_unmap_nbytes_single(osdev, skb, dir, nbytes); +} +qdf_export_symbol(__qdf_nbuf_unmap_nbytes); + +/** + * __qdf_nbuf_dma_map_info() - return the dma map info + * @bmap: dma map + * @sg: dma map info + * + * Return: none + */ +void +__qdf_nbuf_dma_map_info(__qdf_dma_map_t bmap, qdf_dmamap_info_t *sg) +{ + qdf_assert(bmap->mapped); + qdf_assert(bmap->nsegs <= QDF_MAX_SCATTER); + + memcpy(sg->dma_segs, bmap->seg, bmap->nsegs * + sizeof(struct __qdf_segment)); + sg->nsegs = bmap->nsegs; +} +qdf_export_symbol(__qdf_nbuf_dma_map_info); +/** + * __qdf_nbuf_frag_info() - return the frag data & len, where frag no. is + * specified by the index + * @skb: sk buff + * @sg: scatter/gather list of all the frags + * + * Return: none + */ +#if defined(__QDF_SUPPORT_FRAG_MEM) +void +__qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t *sg) +{ + qdf_assert(skb != NULL); + sg->sg_segs[0].vaddr = skb->data; + sg->sg_segs[0].len = skb->len; + sg->nsegs = 1; + + for (int i = 1; i <= sh->nr_frags; i++) { + skb_frag_t *f = &sh->frags[i - 1]; + + sg->sg_segs[i].vaddr = (uint8_t *)(page_address(f->page) + + f->page_offset); + sg->sg_segs[i].len = f->size; + + qdf_assert(i < QDF_MAX_SGLIST); + } + sg->nsegs += i; + +} +qdf_export_symbol(__qdf_nbuf_frag_info); +#else +#ifdef QDF_OS_DEBUG +void +__qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t *sg) +{ + + struct skb_shared_info *sh = skb_shinfo(skb); + + qdf_assert(skb != NULL); + sg->sg_segs[0].vaddr = skb->data; + sg->sg_segs[0].len = skb->len; + sg->nsegs = 1; + + qdf_assert(sh->nr_frags == 0); +} +qdf_export_symbol(__qdf_nbuf_frag_info); +#else +void +__qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t *sg) +{ + sg->sg_segs[0].vaddr = skb->data; + sg->sg_segs[0].len = skb->len; + sg->nsegs = 1; +} +qdf_export_symbol(__qdf_nbuf_frag_info); +#endif +#endif +/** + * __qdf_nbuf_get_frag_size() - get frag size + * @nbuf: sk buffer + * @cur_frag: current frag + * + * Return: frag size + */ +uint32_t +__qdf_nbuf_get_frag_size(__qdf_nbuf_t nbuf, uint32_t cur_frag) +{ + struct skb_shared_info *sh = skb_shinfo(nbuf); + const skb_frag_t *frag = sh->frags + cur_frag; + + return skb_frag_size(frag); +} +qdf_export_symbol(__qdf_nbuf_get_frag_size); + +/** + * __qdf_nbuf_frag_map() - dma map frag + * @osdev: os device + * @nbuf: sk buff + * @offset: offset + * @dir: direction + * @cur_frag: current fragment + * + * Return: QDF status + */ +#ifdef A_SIMOS_DEVHOST +QDF_STATUS __qdf_nbuf_frag_map( + qdf_device_t osdev, __qdf_nbuf_t nbuf, + int offset, qdf_dma_dir_t dir, int cur_frag) +{ + int32_t paddr, frag_len; + + QDF_NBUF_CB_PADDR(nbuf) = paddr = nbuf->data; + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(__qdf_nbuf_frag_map); +#else +QDF_STATUS __qdf_nbuf_frag_map( + qdf_device_t osdev, __qdf_nbuf_t nbuf, + int offset, qdf_dma_dir_t dir, int cur_frag) +{ + dma_addr_t paddr, frag_len; + struct skb_shared_info *sh = skb_shinfo(nbuf); + const skb_frag_t *frag = sh->frags + cur_frag; + + frag_len = skb_frag_size(frag); + + QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(nbuf) = paddr = + skb_frag_dma_map(osdev->dev, frag, offset, frag_len, + __qdf_dma_dir_to_os(dir)); + return dma_mapping_error(osdev->dev, paddr) ? + QDF_STATUS_E_FAULT : QDF_STATUS_SUCCESS; +} +qdf_export_symbol(__qdf_nbuf_frag_map); +#endif +/** + * __qdf_nbuf_dmamap_set_cb() - setup the map callback for a dma map + * @dmap: dma map + * @cb: callback + * @arg: argument + * + * Return: none + */ +void +__qdf_nbuf_dmamap_set_cb(__qdf_dma_map_t dmap, void *cb, void *arg) +{ + return; +} +qdf_export_symbol(__qdf_nbuf_dmamap_set_cb); + + +/** + * __qdf_nbuf_sync_single_for_cpu() - nbuf sync + * @osdev: os device + * @buf: sk buff + * @dir: direction + * + * Return: none + */ +#if defined(A_SIMOS_DEVHOST) +static void __qdf_nbuf_sync_single_for_cpu( + qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir) +{ + return; +} +#else +static void __qdf_nbuf_sync_single_for_cpu( + qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir) +{ + if (0 == QDF_NBUF_CB_PADDR(buf)) { + qdf_print("ERROR: NBUF mapped physical address is NULL\n"); + return; + } + dma_sync_single_for_cpu(osdev->dev, QDF_NBUF_CB_PADDR(buf), + skb_end_offset(buf) - skb_headroom(buf), + __qdf_dma_dir_to_os(dir)); +} +#endif +/** + * __qdf_nbuf_sync_for_cpu() - nbuf sync + * @osdev: os device + * @skb: sk buff + * @dir: direction + * + * Return: none + */ +void +__qdf_nbuf_sync_for_cpu(qdf_device_t osdev, + struct sk_buff *skb, qdf_dma_dir_t dir) +{ + qdf_assert( + (dir == QDF_DMA_TO_DEVICE) || (dir == QDF_DMA_FROM_DEVICE)); + + /* + * Assume there's a single fragment. + * If this is not true, the assertion in __adf_nbuf_map will catch it. + */ + __qdf_nbuf_sync_single_for_cpu(osdev, skb, dir); +} +qdf_export_symbol(__qdf_nbuf_sync_for_cpu); + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)) +/** + * qdf_nbuf_update_radiotap_vht_flags() - Update radiotap header VHT flags + * @rx_status: Pointer to rx_status. + * @rtap_buf: Buf to which VHT info has to be updated. + * @rtap_len: Current length of radiotap buffer + * + * Return: Length of radiotap after VHT flags updated. + */ +static unsigned int qdf_nbuf_update_radiotap_vht_flags( + struct mon_rx_status *rx_status, + int8_t *rtap_buf, + uint32_t rtap_len) +{ + uint16_t vht_flags = 0; + + /* IEEE80211_RADIOTAP_VHT u16, u8, u8, u8[4], u8, u8, u16 */ + vht_flags |= IEEE80211_RADIOTAP_VHT_KNOWN_STBC | + IEEE80211_RADIOTAP_VHT_KNOWN_GI | + IEEE80211_RADIOTAP_VHT_KNOWN_LDPC_EXTRA_OFDM_SYM | + IEEE80211_RADIOTAP_VHT_KNOWN_BEAMFORMED | + IEEE80211_RADIOTAP_VHT_KNOWN_BANDWIDTH | + IEEE80211_RADIOTAP_VHT_KNOWN_GROUP_ID; + put_unaligned_le16(vht_flags, &rtap_buf[rtap_len]); + rtap_len += 2; + + rtap_buf[rtap_len] |= + (rx_status->is_stbc ? + IEEE80211_RADIOTAP_VHT_FLAG_STBC : 0) | + (rx_status->sgi ? IEEE80211_RADIOTAP_VHT_FLAG_SGI : 0) | + (rx_status->ldpc ? + IEEE80211_RADIOTAP_VHT_FLAG_LDPC_EXTRA_OFDM_SYM : 0) | + (rx_status->beamformed ? + IEEE80211_RADIOTAP_VHT_FLAG_BEAMFORMED : 0); + rtap_len += 1; + switch (rx_status->vht_flag_values2) { + case IEEE80211_RADIOTAP_VHT_BW_20: + rtap_buf[rtap_len] = RADIOTAP_VHT_BW_20; + break; + case IEEE80211_RADIOTAP_VHT_BW_40: + rtap_buf[rtap_len] = RADIOTAP_VHT_BW_40; + break; + case IEEE80211_RADIOTAP_VHT_BW_80: + rtap_buf[rtap_len] = RADIOTAP_VHT_BW_80; + break; + case IEEE80211_RADIOTAP_VHT_BW_160: + rtap_buf[rtap_len] = RADIOTAP_VHT_BW_160; + break; + } + rtap_len += 1; + rtap_buf[rtap_len] = (rx_status->vht_flag_values3[0]); + rtap_len += 1; + rtap_buf[rtap_len] = (rx_status->vht_flag_values3[1]); + rtap_len += 1; + rtap_buf[rtap_len] = (rx_status->vht_flag_values3[2]); + rtap_len += 1; + rtap_buf[rtap_len] = (rx_status->vht_flag_values3[3]); + rtap_len += 1; + rtap_buf[rtap_len] = (rx_status->vht_flag_values4); + rtap_len += 1; + rtap_buf[rtap_len] = (rx_status->vht_flag_values5); + rtap_len += 1; + put_unaligned_le16(rx_status->vht_flag_values6, + &rtap_buf[rtap_len]); + rtap_len += 2; + + return rtap_len; +} + +/** + * qdf_nbuf_update_radiotap_he_flags() - Update radiotap header from rx_status + * @rx_status: Pointer to rx_status. + * @rtap_buf: buffer to which radiotap has to be updated + * @rtap_len: radiotap length + * + * API update high-efficiency (11ax) fields in the radiotap header + * + * Return: length of rtap_len updated. + */ +static unsigned int +qdf_nbuf_update_radiotap_he_flags(struct mon_rx_status *rx_status, + int8_t *rtap_buf, uint32_t rtap_len) +{ + /* + * IEEE80211_RADIOTAP_HE u16, u16, u16, u16, u16, u16 + * Enable all "known" HE radiotap flags for now + */ + put_unaligned_le16(rx_status->he_data1, &rtap_buf[rtap_len]); + rtap_len += 2; + + put_unaligned_le16(rx_status->he_data2, &rtap_buf[rtap_len]); + rtap_len += 2; + + put_unaligned_le16(rx_status->he_data3, &rtap_buf[rtap_len]); + rtap_len += 2; + + put_unaligned_le16(rx_status->he_data4, &rtap_buf[rtap_len]); + rtap_len += 2; + + put_unaligned_le16(rx_status->he_data5, &rtap_buf[rtap_len]); + rtap_len += 2; + + put_unaligned_le16(rx_status->he_data6, &rtap_buf[rtap_len]); + rtap_len += 2; + qdf_info("he data %x %x %x %x %x %x", + rx_status->he_data1, + rx_status->he_data2, rx_status->he_data3, + rx_status->he_data4, rx_status->he_data5, + rx_status->he_data6); + return rtap_len; +} + + +/** + * qdf_nbuf_update_radiotap_he_mu_flags() - update he-mu radiotap flags + * @rx_status: Pointer to rx_status. + * @rtap_buf: buffer to which radiotap has to be updated + * @rtap_len: radiotap length + * + * API update HE-MU fields in the radiotap header + * + * Return: length of rtap_len updated. + */ +static unsigned int +qdf_nbuf_update_radiotap_he_mu_flags(struct mon_rx_status *rx_status, + int8_t *rtap_buf, uint32_t rtap_len) +{ + /* + * IEEE80211_RADIOTAP_HE_MU u16, u16, u8[4] + * Enable all "known" he-mu radiotap flags for now + */ + put_unaligned_le16(rx_status->he_flags1, &rtap_buf[rtap_len]); + rtap_len += 2; + + put_unaligned_le16(rx_status->he_flags2, &rtap_buf[rtap_len]); + rtap_len += 2; + + rtap_buf[rtap_len] = rx_status->he_RU[0]; + rtap_len += 1; + + rtap_buf[rtap_len] = rx_status->he_RU[1]; + rtap_len += 1; + + rtap_buf[rtap_len] = rx_status->he_RU[2]; + rtap_len += 1; + + rtap_buf[rtap_len] = rx_status->he_RU[3]; + rtap_len += 1; + qdf_info("he_flags %x %x he-RU %x %x %x %x", + rx_status->he_flags1, + rx_status->he_flags2, rx_status->he_RU[0], + rx_status->he_RU[1], rx_status->he_RU[2], + rx_status->he_RU[3]); + + return rtap_len; +} + +/** + * qdf_nbuf_update_radiotap_he_mu_other_flags() - update he_mu_other flags + * @rx_status: Pointer to rx_status. + * @rtap_buf: buffer to which radiotap has to be updated + * @rtap_len: radiotap length + * + * API update he-mu-other fields in the radiotap header + * + * Return: length of rtap_len updated. + */ +static unsigned int +qdf_nbuf_update_radiotap_he_mu_other_flags(struct mon_rx_status *rx_status, + int8_t *rtap_buf, uint32_t rtap_len) +{ + /* + * IEEE80211_RADIOTAP_HE-MU-OTHER u16, u16, u8, u8 + * Enable all "known" he-mu-other radiotap flags for now + */ + put_unaligned_le16(rx_status->he_per_user_1, &rtap_buf[rtap_len]); + rtap_len += 2; + + put_unaligned_le16(rx_status->he_per_user_2, &rtap_buf[rtap_len]); + rtap_len += 2; + + rtap_buf[rtap_len] = rx_status->he_per_user_position; + rtap_len += 1; + + rtap_buf[rtap_len] = rx_status->he_per_user_known; + rtap_len += 1; + qdf_info("he_per_user %x %x pos %x knwn %x", + rx_status->he_per_user_1, + rx_status->he_per_user_2, rx_status->he_per_user_position, + rx_status->he_per_user_known); + return rtap_len; +} + +#define NORMALIZED_TO_NOISE_FLOOR (-96) + +/* This is the length for radiotap, combined length + * (Mandatory part struct ieee80211_radiotap_header + RADIOTAP_HEADER_LEN) + * cannot be more than available headroom_sz. + * increase this when we add more radiotap elements. + */ + +#define RADIOTAP_VHT_FLAGS_LEN 12 +#define RADIOTAP_HE_FLAGS_LEN 12 +#define RADIOTAP_HE_MU_FLAGS_LEN 8 +#define RADIOTAP_HE_MU_OTHER_FLAGS_LEN 18 +#define RADIOTAP_FIXED_HEADER_LEN 16 +#define RADIOTAP_HT_FLAGS_LEN 3 +#define RADIOTAP_AMPDU_STATUS_LEN 8 +#define RADIOTAP_HEADER_LEN (sizeof(struct ieee80211_radiotap_header) + \ + RADIOTAP_FIXED_HEADER_LEN + \ + RADIOTAP_HT_FLAGS_LEN + \ + RADIOTAP_VHT_FLAGS_LEN + \ + RADIOTAP_AMPDU_STATUS_LEN + \ + RADIOTAP_HE_FLAGS_LEN + \ + RADIOTAP_HE_MU_FLAGS_LEN + \ + RADIOTAP_HE_MU_OTHER_FLAGS_LEN) + +#define IEEE80211_RADIOTAP_HE 23 +#define IEEE80211_RADIOTAP_HE_MU 24 +#define IEEE80211_RADIOTAP_HE_MU_OTHER 25 + +/** + * radiotap_num_to_freq() - Get frequency from chan number + * @chan_num - Input channel number + * + * Return - Channel frequency in Mhz + */ +static uint16_t radiotap_num_to_freq (uint16_t chan_num) +{ + if (chan_num == CHANNEL_NUM_14) + return CHANNEL_FREQ_2484; + if (chan_num < CHANNEL_NUM_14) + return CHANNEL_FREQ_2407 + + (chan_num * FREQ_MULTIPLIER_CONST_5MHZ); + + if (chan_num < CHANNEL_NUM_27) + return CHANNEL_FREQ_2512 + + ((chan_num - CHANNEL_NUM_15) * + FREQ_MULTIPLIER_CONST_20MHZ); + + if (chan_num > CHANNEL_NUM_182 && + chan_num < CHANNEL_NUM_197) + return ((chan_num * FREQ_MULTIPLIER_CONST_5MHZ) + + CHANNEL_FREQ_4000); + + return CHANNEL_FREQ_5000 + + (chan_num * FREQ_MULTIPLIER_CONST_5MHZ); +} + +/** + * qdf_nbuf_update_radiotap_ampdu_flags() - Update radiotap header ampdu flags + * @rx_status: Pointer to rx_status. + * @rtap_buf: Buf to which AMPDU info has to be updated. + * @rtap_len: Current length of radiotap buffer + * + * Return: Length of radiotap after AMPDU flags updated. + */ +static unsigned int qdf_nbuf_update_radiotap_ampdu_flags( + struct mon_rx_status *rx_status, + uint8_t *rtap_buf, + uint32_t rtap_len) +{ + /* + * IEEE80211_RADIOTAP_AMPDU_STATUS u32 u16 u8 u8 + * First 32 bits of AMPDU represents the reference number + */ + + uint32_t ampdu_reference_num = rx_status->ppdu_id; + uint16_t ampdu_flags = 0; + uint16_t ampdu_reserved_flags = 0; + + put_unaligned_le32(ampdu_reference_num, &rtap_buf[rtap_len]); + rtap_len += 4; + put_unaligned_le16(ampdu_flags, &rtap_buf[rtap_len]); + rtap_len += 2; + put_unaligned_le16(ampdu_reserved_flags, &rtap_buf[rtap_len]); + rtap_len += 2; + + return rtap_len; +} + +/** + * qdf_nbuf_update_radiotap() - Update radiotap header from rx_status + * @rx_status: Pointer to rx_status. + * @nbuf: nbuf pointer to which radiotap has to be updated + * @headroom_sz: Available headroom size. + * + * Return: length of rtap_len updated. + */ +unsigned int qdf_nbuf_update_radiotap(struct mon_rx_status *rx_status, + qdf_nbuf_t nbuf, uint32_t headroom_sz) +{ + uint8_t rtap_buf[RADIOTAP_HEADER_LEN] = {0}; + struct ieee80211_radiotap_header *rthdr = + (struct ieee80211_radiotap_header *)rtap_buf; + uint32_t rtap_hdr_len = sizeof(struct ieee80211_radiotap_header); + uint32_t rtap_len = rtap_hdr_len; + uint8_t length = rtap_len; + + /* IEEE80211_RADIOTAP_TSFT __le64 microseconds*/ + rthdr->it_present = cpu_to_le32(1 << IEEE80211_RADIOTAP_TSFT); + put_unaligned_le64(rx_status->tsft, &rtap_buf[rtap_len]); + rtap_len += 8; + + /* IEEE80211_RADIOTAP_FLAGS u8 */ + rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_FLAGS); + + if (rx_status->rs_fcs_err) + rx_status->rtap_flags |= IEEE80211_RADIOTAP_F_BADFCS; + + rtap_buf[rtap_len] = rx_status->rtap_flags; + rtap_len += 1; + + /* IEEE80211_RADIOTAP_RATE u8 500kb/s */ + if (!rx_status->ht_flags && !rx_status->vht_flags && + !rx_status->he_flags) { + rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_RATE); + rtap_buf[rtap_len] = rx_status->rate; + } else + rtap_buf[rtap_len] = 0; + rtap_len += 1; + + /* IEEE80211_RADIOTAP_CHANNEL 2 x __le16 MHz, bitmap */ + rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_CHANNEL); + rx_status->chan_freq = radiotap_num_to_freq(rx_status->chan_num); + put_unaligned_le16(rx_status->chan_freq, &rtap_buf[rtap_len]); + rtap_len += 2; + /* Channel flags. */ + if (rx_status->chan_num > CHANNEL_NUM_35) + rx_status->chan_flags = RADIOTAP_5G_SPECTRUM_CHANNEL; + else + rx_status->chan_flags = RADIOTAP_2G_SPECTRUM_CHANNEL; + if (rx_status->cck_flag) + rx_status->chan_flags |= RADIOTAP_CCK_CHANNEL; + if (rx_status->ofdm_flag) + rx_status->chan_flags |= RADIOTAP_OFDM_CHANNEL; + put_unaligned_le16(rx_status->chan_flags, &rtap_buf[rtap_len]); + rtap_len += 2; + + /* IEEE80211_RADIOTAP_DBM_ANTSIGNAL s8 decibels from one milliwatt + * (dBm) + */ + rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL); + /* + * rssi_comb is int dB, need to convert it to dBm. + * normalize value to noise floor of -96 dBm + */ + rtap_buf[rtap_len] = rx_status->rssi_comb + + NORMALIZED_TO_NOISE_FLOOR; + rtap_len += 1; + + /* IEEE80211_RADIOTAP_ANTENNA u8 antenna index */ + rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_ANTENNA); + rtap_buf[rtap_len] = rx_status->nr_ant; + rtap_len += 1; + + if ((rtap_len - length) > RADIOTAP_FIXED_HEADER_LEN) { + qdf_print("length is greater than RADIOTAP_FIXED_HEADER_LEN"); + return 0; + } + + if (rx_status->ht_flags) { + length = rtap_len; + /* IEEE80211_RADIOTAP_VHT u8, u8, u8 */ + rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_MCS); + rtap_buf[rtap_len] = IEEE80211_RADIOTAP_MCS_HAVE_BW | + IEEE80211_RADIOTAP_MCS_HAVE_MCS | + IEEE80211_RADIOTAP_MCS_HAVE_GI; + rtap_len += 1; + + if (rx_status->sgi) + rtap_buf[rtap_len] |= IEEE80211_RADIOTAP_MCS_SGI; + if (rx_status->bw) + rtap_buf[rtap_len] |= IEEE80211_RADIOTAP_MCS_BW_40; + else + rtap_buf[rtap_len] |= IEEE80211_RADIOTAP_MCS_BW_20; + rtap_len += 1; + + rtap_buf[rtap_len] = rx_status->mcs; + rtap_len += 1; + + if ((rtap_len - length) > RADIOTAP_HT_FLAGS_LEN) { + qdf_print("length is greater than RADIOTAP_HT_FLAGS_LEN"); + return 0; + } + } + + if (rx_status->rs_flags & IEEE80211_AMPDU_FLAG) { + /* IEEE80211_RADIOTAP_AMPDU_STATUS u32 u16 u8 u8 */ + rthdr->it_present |= + cpu_to_le32(1 << IEEE80211_RADIOTAP_AMPDU_STATUS); + rtap_len = qdf_nbuf_update_radiotap_ampdu_flags(rx_status, + rtap_buf, + rtap_len); + } + + if (rx_status->vht_flags) { + length = rtap_len; + /* IEEE80211_RADIOTAP_VHT u16, u8, u8, u8[4], u8, u8, u16 */ + rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_VHT); + rtap_len = qdf_nbuf_update_radiotap_vht_flags(rx_status, + rtap_buf, + rtap_len); + + if ((rtap_len - length) > RADIOTAP_VHT_FLAGS_LEN) { + qdf_print("length is greater than RADIOTAP_VHT_FLAGS_LEN"); + return 0; + } + } + + if (rx_status->he_flags) { + length = rtap_len; + /* IEEE80211_RADIOTAP_HE */ + rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_HE); + rtap_len = qdf_nbuf_update_radiotap_he_flags(rx_status, + rtap_buf, + rtap_len); + + if ((rtap_len - length) > RADIOTAP_HE_FLAGS_LEN) { + qdf_print("length is greater than RADIOTAP_HE_FLAGS_LEN"); + return 0; + } + } + + if (rx_status->he_mu_flags) { + length = rtap_len; + /* IEEE80211_RADIOTAP_HE-MU */ + rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_HE_MU); + rtap_len = qdf_nbuf_update_radiotap_he_mu_flags(rx_status, + rtap_buf, + rtap_len); + + if ((rtap_len - length) > RADIOTAP_HE_MU_FLAGS_LEN) { + qdf_print("length is greater than RADIOTAP_HE_MU_FLAGS_LEN"); + return 0; + } + } + + if (rx_status->he_mu_other_flags) { + length = rtap_len; + /* IEEE80211_RADIOTAP_HE-MU-OTHER */ + rthdr->it_present |= + cpu_to_le32(1 << IEEE80211_RADIOTAP_HE_MU_OTHER); + rtap_len = + qdf_nbuf_update_radiotap_he_mu_other_flags(rx_status, + rtap_buf, + rtap_len); + + if ((rtap_len - length) > RADIOTAP_HE_MU_OTHER_FLAGS_LEN) { + qdf_print("length is greater than RADIOTAP_HE_MU_OTHER_FLAGS_LEN"); + return 0; + } + } + + rthdr->it_len = cpu_to_le16(rtap_len); + + if (headroom_sz < rtap_len) { + qdf_print("ERROR: not enough space to update radiotap\n"); + return 0; + } + qdf_nbuf_push_head(nbuf, rtap_len); + qdf_mem_copy(qdf_nbuf_data(nbuf), rtap_buf, rtap_len); + return rtap_len; +} +#else +static unsigned int qdf_nbuf_update_radiotap_vht_flags( + struct mon_rx_status *rx_status, + int8_t *rtap_buf, + uint32_t rtap_len) +{ + qdf_print("ERROR: struct ieee80211_radiotap_header not supported"); + return 0; +} + +unsigned int qdf_nbuf_update_radiotap_he_flags(struct mon_rx_status *rx_status, + int8_t *rtap_buf, uint32_t rtap_len) +{ + qdf_print("ERROR: struct ieee80211_radiotap_header not supported"); + return 0; +} + +static unsigned int qdf_nbuf_update_radiotap_ampdu_flags( + struct mon_rx_status *rx_status, + uint8_t *rtap_buf, + uint32_t rtap_len) +{ + qdf_print("ERROR: struct ieee80211_radiotap_header not supported"); + return 0; +} + +unsigned int qdf_nbuf_update_radiotap(struct mon_rx_status *rx_status, + qdf_nbuf_t nbuf, uint32_t headroom_sz) +{ + qdf_print("ERROR: struct ieee80211_radiotap_header not supported"); + return 0; +} +#endif +qdf_export_symbol(qdf_nbuf_update_radiotap); + +/** + * __qdf_nbuf_reg_free_cb() - register nbuf free callback + * @cb_func_ptr: function pointer to the nbuf free callback + * + * This function registers a callback function for nbuf free. + * + * Return: none + */ +void __qdf_nbuf_reg_free_cb(qdf_nbuf_free_t cb_func_ptr) +{ + nbuf_free_cb = cb_func_ptr; +} + +/** + * qdf_nbuf_classify_pkt() - classify packet + * @skb - sk buff + * + * Return: none + */ +void qdf_nbuf_classify_pkt(struct sk_buff *skb) +{ + struct ethhdr *eh = (struct ethhdr *)skb->data; + + /* check destination mac address is broadcast/multicast */ + if (is_broadcast_ether_addr((uint8_t *)eh)) + QDF_NBUF_CB_SET_BCAST(skb); + else if (is_multicast_ether_addr((uint8_t *)eh)) + QDF_NBUF_CB_SET_MCAST(skb); + + if (qdf_nbuf_is_ipv4_arp_pkt(skb)) + QDF_NBUF_CB_GET_PACKET_TYPE(skb) = + QDF_NBUF_CB_PACKET_TYPE_ARP; + else if (qdf_nbuf_is_ipv4_dhcp_pkt(skb)) + QDF_NBUF_CB_GET_PACKET_TYPE(skb) = + QDF_NBUF_CB_PACKET_TYPE_DHCP; + else if (qdf_nbuf_is_ipv4_eapol_pkt(skb)) + QDF_NBUF_CB_GET_PACKET_TYPE(skb) = + QDF_NBUF_CB_PACKET_TYPE_EAPOL; + else if (qdf_nbuf_is_ipv4_wapi_pkt(skb)) + QDF_NBUF_CB_GET_PACKET_TYPE(skb) = + QDF_NBUF_CB_PACKET_TYPE_WAPI; +} +qdf_export_symbol(qdf_nbuf_classify_pkt); + +void __qdf_nbuf_init(__qdf_nbuf_t nbuf) +{ + qdf_nbuf_users_set(&nbuf->users, 1); + nbuf->data = nbuf->head + NET_SKB_PAD; + skb_reset_tail_pointer(nbuf); +} +qdf_export_symbol(__qdf_nbuf_init); + +#ifdef WLAN_FEATURE_FASTPATH +void qdf_nbuf_init_fast(qdf_nbuf_t nbuf) +{ + qdf_nbuf_users_set(&nbuf->users, 1); + nbuf->data = nbuf->head + NET_SKB_PAD; + skb_reset_tail_pointer(nbuf); +} +qdf_export_symbol(qdf_nbuf_init_fast); +#endif /* WLAN_FEATURE_FASTPATH */ + + +#ifdef QDF_NBUF_GLOBAL_COUNT +#ifdef WLAN_DEBUGFS +/** + * __qdf_nbuf_mod_init() - Intialization routine for qdf_nuf + * + * Return void + */ +void __qdf_nbuf_mod_init(void) +{ + qdf_atomic_init(&nbuf_count); + qdf_debugfs_init(); + qdf_debugfs_create_atomic(NBUF_DEBUGFS_NAME, S_IRUSR, NULL, &nbuf_count); +} + +/** + * __qdf_nbuf_mod_init() - Unintialization routine for qdf_nuf + * + * Return void + */ +void __qdf_nbuf_mod_exit(void) +{ + qdf_debugfs_exit(); +} + +#else + +void __qdf_nbuf_mod_init(void) +{ + qdf_atomic_init(&nbuf_count); +} + +void __qdf_nbuf_mod_exit(void) +{ +} + +#endif +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_perf.c b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_perf.c new file mode 100644 index 0000000000000000000000000000000000000000..157cb832b272244b9e43ccd9882e83d1d7b27aad --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_perf.c @@ -0,0 +1,190 @@ +/* + * Copyright (c) 2012-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: qdf_perf + * This file provides OS dependent perf API's. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#ifdef QCA_PERF_PROFILING + +qdf_perf_entry_t perf_root = {{0, 0} }; + +/** + * qdf_perfmod_init() - Module init + * + * return: int + */ +int +qdf_perfmod_init(void) +{ + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO, + "Perf Debug Module Init"); + INIT_LIST_HEAD(&perf_root.list); + INIT_LIST_HEAD(&perf_root.child); + perf_root.proc = proc_mkdir(PROCFS_PERF_DIRNAME, 0); + return 0; +} +qdf_export_symbol(qdf_perfmod_init); + +/** + * qdf_perfmod_exit() - Module exit + * + * Return: none + */ +void +qdf_perfmod_exit(void) +{ + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO, + "Perf Debug Module Exit"); + remove_proc_entry(PROCFS_PERF_DIRNAME, 0); +} +qdf_export_symbol(qdf_perfmod_exit); + +/** + * __qdf_perf_init() - Create the perf entry + * @parent: parent perf id + * @id_name: name of perf id + * @type: type of perf counter + * + * return: perf id + */ +qdf_perf_id_t +__qdf_perf_init(qdf_perf_id_t parent, uint8_t *id_name, + qdf_perf_cntr_t type) +{ + qdf_perf_entry_t *entry = NULL; + qdf_perf_entry_t *pentry = PERF_ENTRY(parent); + + if (type >= CNTR_LAST) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "%s:%s Invalid perf-type", __FILE__, __func__); + goto done; + } + + if (!pentry) + pentry = &perf_root; + entry = kmalloc(sizeof(struct qdf_perf_entry), GFP_ATOMIC); + + if (!entry) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + " Out of Memory,:%s", __func__); + return NULL; + } + + memset(entry, 0, sizeof(struct qdf_perf_entry)); + + INIT_LIST_HEAD(&entry->list); + INIT_LIST_HEAD(&entry->child); + + spin_lock_init(&entry->lock_irq); + + list_add_tail(&entry->list, &pentry->child); + + entry->name = id_name; + entry->type = type; + + if (type == CNTR_GROUP) { + entry->proc = proc_mkdir(id_name, pentry->proc); + goto done; + } + + entry->parent = pentry; + entry->proc = create_proc_entry(id_name, S_IFREG|S_IRUGO|S_IWUSR, + pentry->proc); + entry->proc->data = entry; + entry->proc->read_proc = api_tbl[type].proc_read; + entry->proc->write_proc = api_tbl[type].proc_write; + + /* + * Initialize the Event with default values + */ + api_tbl[type].init(entry, api_tbl[type].def_val); + +done: + return entry; +} +qdf_export_symbol(__qdf_perf_init); + +/** + * __qdf_perf_destroy - Destroy the perf entry + * @id: pointer to qdf_perf_id_t + * + * @return: bool + */ +bool __qdf_perf_destroy(qdf_perf_id_t id) +{ + qdf_perf_entry_t *entry = PERF_ENTRY(id), + *parent = entry->parent; + + if (!list_empty(&entry->child)) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "Child's are alive, Can't delete"); + return A_FALSE; + } + + remove_proc_entry(entry->name, parent->proc); + + list_del(&entry->list); + + vfree(entry); + + return true; +} +qdf_export_symbol(__qdf_perf_destroy); + +/** + * __qdf_perf_start - Start the sampling + * @id: Instance of qdf_perf_id_t + * + * Returns: none + */ +void __qdf_perf_start(qdf_perf_id_t id) +{ + qdf_perf_entry_t *entry = PERF_ENTRY(id); + + api_tbl[entry->type].sample(entry, 0); +} +qdf_export_symbol(__qdf_perf_start); + +/** + * __qdf_perf_end - Stop sampling + * @id: Instance of qdf_perf_id_t + * + * Returns: none + */ +void __qdf_perf_end(qdf_perf_id_t id) +{ + qdf_perf_entry_t *entry = PERF_ENTRY(id); + + api_tbl[entry->type].sample(entry, 1); +} +qdf_export_symbol(__qdf_perf_end); + +#endif /* QCA_PERF_PROFILING */ diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_threads.c b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_threads.c new file mode 100644 index 0000000000000000000000000000000000000000..ad4540cf0f813864faaf1ec5e4a8d4f0b6a50dc3 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_threads.c @@ -0,0 +1,198 @@ +/* + * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: qdf_threads + * QCA driver framework (QDF) thread APIs + */ + +/* Include Files */ +#include +#include +#include +#include +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0) +#include +#else +#include +#endif /* KERNEL_VERSION(4, 11, 0) */ +#include +#include +#include +#include +#include +#include + +/* Function declarations and documenation */ + +typedef int (*qdf_thread_os_func)(void *data); + +/** + * qdf_sleep() - sleep + * @ms_interval : Number of milliseconds to suspend the current thread. + * A value of 0 may or may not cause the current thread to yield. + * + * This function suspends the execution of the current thread + * until the specified time out interval elapses. + * + * Return: none + */ +void qdf_sleep(uint32_t ms_interval) +{ + if (in_interrupt()) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "%s cannot be called from interrupt context!!!", + __func__); + return; + } + msleep_interruptible(ms_interval); +} +qdf_export_symbol(qdf_sleep); + +/** + * qdf_sleep_us() - sleep + * @us_interval : Number of microseconds to suspend the current thread. + * A value of 0 may or may not cause the current thread to yield. + * + * This function suspends the execution of the current thread + * until the specified time out interval elapses. + * + * Return : none + */ +void qdf_sleep_us(uint32_t us_interval) +{ + unsigned long timeout = usecs_to_jiffies(us_interval) + 1; + + if (in_interrupt()) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "%s cannot be called from interrupt context!!!", + __func__); + return; + } + + while (timeout && !signal_pending(current)) + timeout = schedule_timeout_interruptible(timeout); +} +qdf_export_symbol(qdf_sleep_us); + +/** + * qdf_busy_wait() - busy wait + * @us_interval : Number of microseconds to busy wait. + * + * This function places the current thread in busy wait until the specified + * time out interval elapses. If the interval is greater than 50us on WM, the + * behaviour is undefined. + * + * Return : none + */ +void qdf_busy_wait(uint32_t us_interval) +{ + udelay(us_interval); +} +qdf_export_symbol(qdf_busy_wait); + +void qdf_set_user_nice(qdf_thread_t *thread, long nice) +{ + set_user_nice(thread, nice); +} +qdf_export_symbol(qdf_set_user_nice); + +qdf_thread_t *qdf_create_thread(int (*thread_handler)(void *data), void *data, + const char thread_name[]) +{ + return kthread_create(thread_handler, data, thread_name); +} +qdf_export_symbol(qdf_create_thread); + +static uint16_t qdf_thread_id; + +qdf_thread_t *qdf_thread_run(qdf_thread_func callback, void *context) +{ + struct task_struct *thread; + + thread = kthread_create((qdf_thread_os_func)callback, context, + "qdf %u", qdf_thread_id++); + if (IS_ERR(thread)) + return NULL; + + get_task_struct(thread); + wake_up_process(thread); + + return thread; +} +qdf_export_symbol(qdf_thread_run); + +QDF_STATUS qdf_thread_join(qdf_thread_t *thread) +{ + QDF_STATUS status; + + QDF_BUG(thread); + + status = (QDF_STATUS)kthread_stop(thread); + put_task_struct(thread); + + return status; +} +qdf_export_symbol(qdf_thread_join); + +bool qdf_thread_should_stop(void) +{ + return kthread_should_stop(); +} +qdf_export_symbol(qdf_thread_should_stop); + +int qdf_wake_up_process(qdf_thread_t *thread) +{ + return wake_up_process(thread); +} +qdf_export_symbol(qdf_wake_up_process); + +/* save_stack_trace_tsk() is exported for: + * 1) non-arm architectures + * 2) arm architectures in kernel versions >=4.14 + * 3) backported kernels defining BACKPORTED_EXPORT_SAVE_STACK_TRACE_TSK_ARM + */ +#if (defined(WLAN_HOST_ARCH_ARM) && !WLAN_HOST_ARCH_ARM) || \ + LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0) || \ + defined(BACKPORTED_EXPORT_SAVE_STACK_TRACE_TSK_ARM) +#define QDF_PRINT_TRACE_COUNT 32 +void qdf_print_thread_trace(qdf_thread_t *thread) +{ + const int spaces = 4; + struct task_struct *task = thread; + unsigned long entries[QDF_PRINT_TRACE_COUNT] = {0}; + struct stack_trace trace = { + .nr_entries = 0, + .skip = 0, + .entries = &entries[0], + .max_entries = QDF_PRINT_TRACE_COUNT, + }; + + save_stack_trace_tsk(task, &trace); + print_stack_trace(&trace, spaces); +} +#else +void qdf_print_thread_trace(qdf_thread_t *thread) { } +#endif /* KERNEL_VERSION(4, 14, 0) */ +qdf_export_symbol(qdf_print_thread_trace); + +qdf_thread_t *qdf_get_current_task(void) +{ + return current; +} +qdf_export_symbol(qdf_get_current_task); diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_trace.c b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_trace.c new file mode 100644 index 0000000000000000000000000000000000000000..16887087cac927f93095c687afefe7a9f57b5141 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_trace.c @@ -0,0 +1,3574 @@ +/* + * Copyright (c) 2014-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: qdf_trace + * QCA driver framework (QDF) trace APIs + * Trace, logging, and debugging definitions and APIs + */ + +/* Include Files */ +#include "qdf_str.h" +#include +#include + +/* macro to map qdf trace levels into the bitmask */ +#define QDF_TRACE_LEVEL_TO_MODULE_BITMASK(_level) ((1 << (_level))) + +#include +#include +static int qdf_pidx = -1; + +#include "qdf_time.h" +#include "qdf_mc_timer.h" + +/* Global qdf print id */ + +/* Preprocessor definitions and constants */ + +enum qdf_timestamp_unit qdf_log_timestamp_type = QDF_LOG_TIMESTAMP_UNIT; + +/** + * typedef struct module_trace_info - Trace level for a module, as a bitmask. + * The bits in this mask are ordered by QDF_TRACE_LEVEL. For example, + * each bit represents one of the bits in QDF_TRACE_LEVEL that may be turned + * on to have traces at that level logged, i.e. if QDF_TRACE_LEVEL_ERROR is + * == 2, then if bit 2 (low order) is turned ON, then ERROR traces will be + * printed to the trace log. Note that all bits turned OFF means no traces + * @module_trace_level: trace level + * @module_name_str: 3 character string name for the module + */ +typedef struct { + uint16_t module_trace_level; + unsigned char module_name_str[4]; +} module_trace_info; + +#define DP_TRACE_META_DATA_STRLEN 50 + +/* Array of static data that contains all of the per module trace + * information. This includes the trace level for the module and + * the 3 character 'name' of the module for marking the trace logs + */ +module_trace_info g_qdf_trace_info[QDF_MODULE_ID_MAX] = { + [QDF_MODULE_ID_TLSHIM] = {QDF_DEFAULT_TRACE_LEVEL, "DP"}, + [QDF_MODULE_ID_WMI] = {QDF_DEFAULT_TRACE_LEVEL, "WMI"}, + [QDF_MODULE_ID_HDD] = {QDF_DEFAULT_TRACE_LEVEL, "HDD"}, + [QDF_MODULE_ID_SME] = {QDF_DEFAULT_TRACE_LEVEL, "SME"}, + [QDF_MODULE_ID_PE] = {QDF_DEFAULT_TRACE_LEVEL, "PE "}, + [QDF_MODULE_ID_WMA] = {QDF_DEFAULT_TRACE_LEVEL, "WMA"}, + [QDF_MODULE_ID_SYS] = {QDF_DEFAULT_TRACE_LEVEL, "SYS"}, + [QDF_MODULE_ID_QDF] = {QDF_DEFAULT_TRACE_LEVEL, "QDF"}, + [QDF_MODULE_ID_SAP] = {QDF_DEFAULT_TRACE_LEVEL, "SAP"}, + [QDF_MODULE_ID_HDD_SOFTAP] = {QDF_DEFAULT_TRACE_LEVEL, "HSP"}, + [QDF_MODULE_ID_HDD_DATA] = {QDF_DEFAULT_TRACE_LEVEL, "HDP"}, + [QDF_MODULE_ID_HDD_SAP_DATA] = {QDF_DEFAULT_TRACE_LEVEL, "SDP"}, + [QDF_MODULE_ID_BMI] = {QDF_DEFAULT_TRACE_LEVEL, "BMI"}, + [QDF_MODULE_ID_HIF] = {QDF_DEFAULT_TRACE_LEVEL, "HIF"}, + [QDF_MODULE_ID_TXRX] = {QDF_DEFAULT_TRACE_LEVEL, "TRX"}, + [QDF_MODULE_ID_HTT] = {QDF_DEFAULT_TRACE_LEVEL, "HTT"}, + [QDF_MODULE_ID_SERIALIZATION] = {QDF_DEFAULT_TRACE_LEVEL, "SER"}, + [QDF_MODULE_ID_REGULATORY] = {QDF_DEFAULT_TRACE_LEVEL, "REG"}, +}; + +#ifdef TRACE_RECORD +/* Static and Global variables */ +static spinlock_t ltrace_lock; + +static qdf_trace_record_t g_qdf_trace_tbl[MAX_QDF_TRACE_RECORDS]; +/* global qdf trace data */ +static t_qdf_trace_data g_qdf_trace_data; +/* + * all the call back functions for dumping MTRACE messages from ring buffer + * are stored in qdf_trace_cb_table,these callbacks are initialized during init + * only so, we will make a copy of these call back functions and maintain in to + * qdf_trace_restore_cb_table. Incase if we make modifications to + * qdf_trace_cb_table, we can certainly retrieve all the call back functions + * back from Restore Table + */ +static tp_qdf_trace_cb qdf_trace_cb_table[QDF_MODULE_ID_MAX]; +static tp_qdf_trace_cb qdf_trace_restore_cb_table[QDF_MODULE_ID_MAX]; +#endif + +#ifdef WLAN_FEATURE_MEMDUMP_ENABLE +static tp_qdf_state_info_cb qdf_state_info_table[QDF_MODULE_ID_MAX]; +#endif + +#ifdef CONFIG_DP_TRACE +/* Static and Global variables */ +static spinlock_t l_dp_trace_lock; + +static struct qdf_dp_trace_record_s + g_qdf_dp_trace_tbl[MAX_QDF_DP_TRACE_RECORDS]; + +/* + * all the options to configure/control DP trace are + * defined in this structure + */ +static struct s_qdf_dp_trace_data g_qdf_dp_trace_data; +/* + * all the call back functions for dumping DPTRACE messages from ring buffer + * are stored in qdf_dp_trace_cb_table, callbacks are initialized during init + */ +static tp_qdf_dp_trace_cb qdf_dp_trace_cb_table[QDF_DP_TRACE_MAX + 1]; +#endif + +/** + * qdf_trace_set_level() - Set the trace level for a particular module + * @module: Module id + * @level : trace level + * + * Trace level is a member of the QDF_TRACE_LEVEL enumeration indicating + * the severity of the condition causing the trace message to be issued. + * More severe conditions are more likely to be logged. + * + * This is an external API that allows trace levels to be set for each module. + * + * Return: None + */ +void qdf_trace_set_level(QDF_MODULE_ID module, QDF_TRACE_LEVEL level) +{ + /* make sure the caller is passing in a valid LEVEL */ + if (level >= QDF_TRACE_LEVEL_MAX) { + pr_err("%s: Invalid trace level %d passed in!\n", __func__, + level); + return; + } + + /* Treat 'none' differently. NONE means we have to run off all + * the bits in the bit mask so none of the traces appear. Anything + * other than 'none' means we need to turn ON a bit in the bitmask + */ + if (QDF_TRACE_LEVEL_NONE == level) + g_qdf_trace_info[module].module_trace_level = + QDF_TRACE_LEVEL_NONE; + else + /* set the desired bit in the bit mask for the module trace + * level + */ + g_qdf_trace_info[module].module_trace_level |= + QDF_TRACE_LEVEL_TO_MODULE_BITMASK(level); +} +qdf_export_symbol(qdf_trace_set_level); + +/** + * qdf_trace_set_module_trace_level() - Set module trace level + * @module: Module id + * @level: Trace level for a module, as a bitmask as per 'module_trace_info' + * + * Sets the module trace level where the trace level is given as a bit mask + * + * Return: None + */ +void qdf_trace_set_module_trace_level(QDF_MODULE_ID module, uint32_t level) +{ + if (module < 0 || module >= QDF_MODULE_ID_MAX) { + pr_err("%s: Invalid module id %d passed\n", __func__, module); + return; + } + g_qdf_trace_info[module].module_trace_level = level; +} +qdf_export_symbol(qdf_trace_set_module_trace_level); + +/** + * qdf_trace_set_value() - Set module trace value + * @module: Module id + * @level: Trace level for a module, as a bitmask as per 'module_trace_info' + * @on: set/clear the desired bit in the bit mask + * + * Return: None + */ +void qdf_trace_set_value(QDF_MODULE_ID module, QDF_TRACE_LEVEL level, + uint8_t on) +{ + /* make sure the caller is passing in a valid LEVEL */ + if (level < 0 || level >= QDF_TRACE_LEVEL_MAX) { + pr_err("%s: Invalid trace level %d passed in!\n", __func__, + level); + return; + } + + /* make sure the caller is passing in a valid module */ + if (module < 0 || module >= QDF_MODULE_ID_MAX) { + pr_err("%s: Invalid module id %d passed in!\n", __func__, + module); + return; + } + + /* Treat 'none' differently. NONE means we have to turn off all + * the bits in the bit mask so none of the traces appear + */ + if (QDF_TRACE_LEVEL_NONE == level) { + g_qdf_trace_info[module].module_trace_level = + QDF_TRACE_LEVEL_NONE; + } + /* Treat 'All' differently. All means we have to turn on all + * the bits in the bit mask so all of the traces appear + */ + else if (QDF_TRACE_LEVEL_ALL == level) { + g_qdf_trace_info[module].module_trace_level = 0xFFFF; + } else { + if (on) + /* set the desired bit in the bit mask for the module + * trace level + */ + g_qdf_trace_info[module].module_trace_level |= + QDF_TRACE_LEVEL_TO_MODULE_BITMASK(level); + else + /* clear the desired bit in the bit mask for the module + * trace level + */ + g_qdf_trace_info[module].module_trace_level &= + ~(QDF_TRACE_LEVEL_TO_MODULE_BITMASK(level)); + } +} +qdf_export_symbol(qdf_trace_set_value); + +/** + * qdf_trace_get_level() - get the trace level + * @module: module Id + * @level: trace level + * + * This is an external API that returns a bool value to signify if a + * particular trace level is set for the specified module. + * A member of the QDF_TRACE_LEVEL enumeration indicating the severity + * of the condition causing the trace message to be issued. + * + * Note that individual trace levels are the only valid values + * for this API. QDF_TRACE_LEVEL_NONE and QDF_TRACE_LEVEL_ALL + * are not valid input and will return false + * + * Return: + * false - the specified trace level for the specified module is OFF + * true - the specified trace level for the specified module is ON + */ +bool qdf_trace_get_level(QDF_MODULE_ID module, QDF_TRACE_LEVEL level) +{ + bool trace_on = false; + + if ((QDF_TRACE_LEVEL_NONE == level) || + (QDF_TRACE_LEVEL_ALL == level) || (level >= QDF_TRACE_LEVEL_MAX)) { + trace_on = false; + } else { + trace_on = (level & g_qdf_trace_info[module].module_trace_level) + ? true : false; + } + + return trace_on; +} +qdf_export_symbol(qdf_trace_get_level); + +/** + * qdf_snprintf() - wrapper function to snprintf + * @str_buffer: string Buffer + * @size: defines the size of the data record + * @str_format: Format string in which the message to be logged. This format + * string contains printf-like replacement parameters, which follow + * this parameter in the variable argument list. + * + * Return: None + */ +void qdf_snprintf(char *str_buffer, unsigned int size, char *str_format, ...) +{ + va_list val; + + va_start(val, str_format); + snprintf(str_buffer, size, str_format, val); + va_end(val); +} +qdf_export_symbol(qdf_snprintf); + +#ifdef QDF_ENABLE_TRACING + +/** + * qdf_trace_msg() - externally called trace function + * @module: Module identifier a member of the QDF_MODULE_ID + * enumeration that identifies the module issuing the trace message. + * @level: Trace level a member of the QDF_TRACE_LEVEL enumeration + * indicating the severity of the condition causing the trace message + * to be issued. More severe conditions are more likely to be logged. + * @str_format: Format string in which the message to be logged. This format + * string contains printf-like replacement parameters, which follow + * this parameter in the variable argument list. + * + * Checks the level of severity and accordingly prints the trace messages + * + * Return: None + */ +void qdf_trace_msg(QDF_MODULE_ID module, QDF_TRACE_LEVEL level, + char *str_format, ...) +{ + va_list val; + + va_start(val, str_format); + qdf_trace_msg_cmn(qdf_pidx, module, level, str_format, val); + va_end(val); +} +qdf_export_symbol(qdf_trace_msg); + +void qdf_vtrace_msg(QDF_MODULE_ID module, QDF_TRACE_LEVEL level, + char *str_format, va_list val) +{ + qdf_trace_msg_cmn(qdf_pidx, module, level, str_format, val); +} +qdf_export_symbol(qdf_vtrace_msg); + +#define ROW_SIZE 16 +/* Buffer size = data bytes(2 hex chars plus space) + NULL */ +#define BUFFER_SIZE ((QDF_DP_TRACE_RECORD_SIZE * 3) + 1) + +/** + * qdf_trace_hex_dump() - externally called hex dump function + * @module: Module identifier a member of the QDF_MODULE_ID enumeration that + * identifies the module issuing the trace message. + * @level: Trace level a member of the QDF_TRACE_LEVEL enumeration indicating + * the severity of the condition causing the trace message to be + * issued. More severe conditions are more likely to be logged. + * @data: The base address of the buffer to be logged. + * @buf_len: The size of the buffer to be logged. + * + * Checks the level of severity and accordingly prints the trace messages + * + * Return: None + */ +void qdf_trace_hex_dump(QDF_MODULE_ID module, QDF_TRACE_LEVEL level, + void *data, int buf_len) +{ + const u8 *ptr = data; + int i = 0; + + if (!qdf_print_is_verbose_enabled(qdf_pidx, module, level)) + return; + + while (buf_len > 0) { + unsigned char linebuf[BUFFER_SIZE] = {0}; + int linelen = min(buf_len, ROW_SIZE); + + buf_len -= ROW_SIZE; + + hex_dump_to_buffer(ptr, linelen, ROW_SIZE, 1, + linebuf, sizeof(linebuf), false); + + qdf_trace_msg(module, level, "%.8x: %s", i, linebuf); + ptr += ROW_SIZE; + i += ROW_SIZE; + } +} +qdf_export_symbol(qdf_trace_hex_dump); + +#endif + +#ifdef TRACE_RECORD +/** + * qdf_trace_enable() - Enable MTRACE for specific modules + * @bitmask_of_module_id: Bitmask according to enum of the modules. + * 32[dec] = 0010 0000 [bin] + * 64[dec] = 0100 0000 [bin] + * 128[dec] = 1000 0000 [bin] + * @enable: can be true or false true implies enabling MTRACE false implies + * disabling MTRACE. + * + * Enable MTRACE for specific modules whose bits are set in bitmask and enable + * is true. if enable is false it disables MTRACE for that module. set the + * bitmask according to enum value of the modules. + * This functions will be called when you issue ioctl as mentioned following + * [iwpriv wlan0 setdumplog ]. + * - Decimal number, i.e. 64 decimal value shows only SME module, + * 128 decimal value shows only PE module, 192 decimal value shows PE and SME. + * + * Return: None + */ +void qdf_trace_enable(uint32_t bitmask_of_module_id, uint8_t enable) +{ + int i; + + if (bitmask_of_module_id) { + for (i = 0; i < QDF_MODULE_ID_MAX; i++) { + if (((bitmask_of_module_id >> i) & 1)) { + if (enable) { + if (NULL != + qdf_trace_restore_cb_table[i]) { + qdf_trace_cb_table[i] = + qdf_trace_restore_cb_table[i]; + } + } else { + qdf_trace_restore_cb_table[i] = + qdf_trace_cb_table[i]; + qdf_trace_cb_table[i] = NULL; + } + } + } + } else { + if (enable) { + for (i = 0; i < QDF_MODULE_ID_MAX; i++) { + if (NULL != qdf_trace_restore_cb_table[i]) { + qdf_trace_cb_table[i] = + qdf_trace_restore_cb_table[i]; + } + } + } else { + for (i = 0; i < QDF_MODULE_ID_MAX; i++) { + qdf_trace_restore_cb_table[i] = + qdf_trace_cb_table[i]; + qdf_trace_cb_table[i] = NULL; + } + } + } +} +qdf_export_symbol(qdf_trace_enable); + +/** + * qdf_trace_init() - initializes qdf trace structures and variables + * + * Called immediately after cds_preopen, so that we can start recording HDD + * events ASAP. + * + * Return: None + */ +void qdf_trace_init(void) +{ + uint8_t i; + + g_qdf_trace_data.head = INVALID_QDF_TRACE_ADDR; + g_qdf_trace_data.tail = INVALID_QDF_TRACE_ADDR; + g_qdf_trace_data.num = 0; + g_qdf_trace_data.enable = true; + g_qdf_trace_data.dump_count = DEFAULT_QDF_TRACE_DUMP_COUNT; + g_qdf_trace_data.num_since_last_dump = 0; + + for (i = 0; i < QDF_MODULE_ID_MAX; i++) { + qdf_trace_cb_table[i] = NULL; + qdf_trace_restore_cb_table[i] = NULL; + } +} +qdf_export_symbol(qdf_trace_init); + +/** + * qdf_trace() - puts the messages in to ring-buffer + * @module: Enum of module, basically module id. + * @code: Code to be recorded + * @session: Session ID of the log + * @data: Actual message contents + * + * This function will be called from each module who wants record the messages + * in circular queue. Before calling this functions make sure you have + * registered your module with qdf through qdf_trace_register function. + * + * Return: None + */ +void qdf_trace(uint8_t module, uint8_t code, uint16_t session, uint32_t data) +{ + tp_qdf_trace_record rec = NULL; + unsigned long flags; + char time[18]; + + if (!g_qdf_trace_data.enable) + return; + + /* if module is not registered, don't record for that module */ + if (NULL == qdf_trace_cb_table[module]) + return; + + qdf_get_time_of_the_day_in_hr_min_sec_usec(time, sizeof(time)); + /* Aquire the lock so that only one thread at a time can fill the ring + * buffer + */ + spin_lock_irqsave(<race_lock, flags); + + g_qdf_trace_data.num++; + + if (g_qdf_trace_data.num > MAX_QDF_TRACE_RECORDS) + g_qdf_trace_data.num = MAX_QDF_TRACE_RECORDS; + + if (INVALID_QDF_TRACE_ADDR == g_qdf_trace_data.head) { + /* first record */ + g_qdf_trace_data.head = 0; + g_qdf_trace_data.tail = 0; + } else { + /* queue is not empty */ + uint32_t tail = g_qdf_trace_data.tail + 1; + + if (MAX_QDF_TRACE_RECORDS == tail) + tail = 0; + + if (g_qdf_trace_data.head == tail) { + /* full */ + if (MAX_QDF_TRACE_RECORDS == ++g_qdf_trace_data.head) + g_qdf_trace_data.head = 0; + } + g_qdf_trace_data.tail = tail; + } + + rec = &g_qdf_trace_tbl[g_qdf_trace_data.tail]; + rec->code = code; + rec->session = session; + rec->data = data; + rec->qtime = qdf_get_log_timestamp(); + scnprintf(rec->time, sizeof(rec->time), "%s", time); + rec->module = module; + rec->pid = (in_interrupt() ? 0 : current->pid); + g_qdf_trace_data.num_since_last_dump++; + spin_unlock_irqrestore(<race_lock, flags); +} +qdf_export_symbol(qdf_trace); + +#ifdef ENABLE_MTRACE_LOG +void qdf_mtrace_log(QDF_MODULE_ID src_module, QDF_MODULE_ID dst_module, + uint16_t message_id, uint8_t vdev_id) +{ + uint32_t trace_log, payload; + static uint16_t counter; + + trace_log = (src_module << 23) | (dst_module << 15) | message_id; + payload = (vdev_id << 16) | counter++; + + QDF_TRACE(src_module, QDF_TRACE_LEVEL_TRACE, "%x %x", + trace_log, payload); +} + +qdf_export_symbol(qdf_mtrace_log); +#endif + +void qdf_mtrace(QDF_MODULE_ID src_module, QDF_MODULE_ID dst_module, + uint16_t message_id, uint8_t vdev_id, uint32_t data) +{ + qdf_trace(src_module, message_id, vdev_id, data); + qdf_mtrace_log(src_module, dst_module, message_id, vdev_id); +} + +qdf_export_symbol(qdf_mtrace); + +/** + * qdf_trace_spin_lock_init() - initializes the lock variable before use + * + * This function will be called from cds_alloc_global_context, we will have lock + * available to use ASAP + * + * Return: None + */ +QDF_STATUS qdf_trace_spin_lock_init(void) +{ + spin_lock_init(<race_lock); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(qdf_trace_spin_lock_init); + +/** + * qdf_trace_register() - registers the call back functions + * @module_iD: enum value of module + * @qdf_trace_callback: call back functions to display the messages in + * particular format. + * + * Registers the call back functions to display the messages in particular + * format mentioned in these call back functions. This functions should be + * called by interested module in their init part as we will be ready to + * register as soon as modules are up. + * + * Return: None + */ +void qdf_trace_register(QDF_MODULE_ID module_id, + tp_qdf_trace_cb qdf_trace_callback) +{ + qdf_trace_cb_table[module_id] = qdf_trace_callback; +} +qdf_export_symbol(qdf_trace_register); + +/** + * qdf_trace_dump_all() - Dump data from ring buffer via call back functions + * registered with QDF + * @p_mac: Context of particular module + * @code: Reason code + * @session: Session id of log + * @count: Number of lines to dump starting from tail to head + * + * This function will be called up on issueing ioctl call as mentioned following + * [iwpriv wlan0 dumplog 0 0 ] + * + * - number lines to dump starting from tail to head. + * + * - if anybody wants to know how many messages were + * recorded for particular module/s mentioned by setbit in bitmask from last + * messages. It is optional, if you don't provide then it will dump + * everything from buffer. + * + * Return: None + */ +void qdf_trace_dump_all(void *p_mac, uint8_t code, uint8_t session, + uint32_t count, uint32_t bitmask_of_module) +{ + qdf_trace_record_t p_record; + int32_t i, tail; + + if (!g_qdf_trace_data.enable) { + QDF_TRACE(QDF_MODULE_ID_SYS, + QDF_TRACE_LEVEL_ERROR, "Tracing Disabled"); + return; + } + + QDF_TRACE(QDF_MODULE_ID_SYS, QDF_TRACE_LEVEL_INFO, + "DPT: Total Records: %d, Head: %d, Tail: %d", + g_qdf_trace_data.num, g_qdf_trace_data.head, + g_qdf_trace_data.tail); + + /* aquire the lock so that only one thread at a time can read + * the ring buffer + */ + spin_lock(<race_lock); + + if (g_qdf_trace_data.head != INVALID_QDF_TRACE_ADDR) { + i = g_qdf_trace_data.head; + tail = g_qdf_trace_data.tail; + + if (count) { + if (count > g_qdf_trace_data.num) + count = g_qdf_trace_data.num; + if (tail >= (count - 1)) + i = tail - count + 1; + else if (count != MAX_QDF_TRACE_RECORDS) + i = MAX_QDF_TRACE_RECORDS - ((count - 1) - + tail); + } + + p_record = g_qdf_trace_tbl[i]; + /* right now we are not using num_since_last_dump member but + * in future we might re-visit and use this member to track + * how many latest messages got added while we were dumping + * from ring buffer + */ + g_qdf_trace_data.num_since_last_dump = 0; + spin_unlock(<race_lock); + for (;; ) { + if ((code == 0 || (code == p_record.code)) && + (qdf_trace_cb_table[p_record.module] != NULL)) { + if (0 == bitmask_of_module) { + qdf_trace_cb_table[p_record. + module] (p_mac, + &p_record, + (uint16_t) + i); + } else { + if (bitmask_of_module & + (1 << p_record.module)) { + qdf_trace_cb_table[p_record. + module] + (p_mac, &p_record, + (uint16_t) i); + } + } + } + + if (i == tail) + break; + i += 1; + + spin_lock(<race_lock); + if (MAX_QDF_TRACE_RECORDS == i) { + i = 0; + p_record = g_qdf_trace_tbl[0]; + } else { + p_record = g_qdf_trace_tbl[i]; + } + spin_unlock(<race_lock); + } + } else { + spin_unlock(<race_lock); + } +} +qdf_export_symbol(qdf_trace_dump_all); +#endif + +#ifdef WLAN_FEATURE_MEMDUMP_ENABLE +/** + * qdf_register_debugcb_init() - initializes debug callbacks + * to NULL + * + * Return: None + */ +void qdf_register_debugcb_init(void) +{ + uint8_t i; + + for (i = 0; i < QDF_MODULE_ID_MAX; i++) + qdf_state_info_table[i] = NULL; +} +qdf_export_symbol(qdf_register_debugcb_init); + +/** + * qdf_register_debug_callback() - stores callback handlers to print + * state information + * @module_id: module id of layer + * @qdf_state_infocb: callback to be registered + * + * This function is used to store callback handlers to print + * state information + * + * Return: None + */ +void qdf_register_debug_callback(QDF_MODULE_ID module_id, + tp_qdf_state_info_cb qdf_state_infocb) +{ + qdf_state_info_table[module_id] = qdf_state_infocb; +} +qdf_export_symbol(qdf_register_debug_callback); + +/** + * qdf_state_info_dump_all() - it invokes callback of layer which registered + * its callback to print its state information. + * @buf: buffer pointer to be passed + * @size: size of buffer to be filled + * @driver_dump_size: actual size of buffer used + * + * Return: QDF_STATUS_SUCCESS on success + */ +QDF_STATUS qdf_state_info_dump_all(char *buf, uint16_t size, + uint16_t *driver_dump_size) +{ + uint8_t module, ret = QDF_STATUS_SUCCESS; + uint16_t buf_len = size; + char *buf_ptr = buf; + + for (module = 0; module < QDF_MODULE_ID_MAX; module++) { + if (NULL != qdf_state_info_table[module]) { + qdf_state_info_table[module](&buf_ptr, &buf_len); + if (!buf_len) { + ret = QDF_STATUS_E_NOMEM; + break; + } + } + } + + *driver_dump_size = size - buf_len; + return ret; +} +qdf_export_symbol(qdf_state_info_dump_all); +#endif + +#ifdef CONFIG_DP_TRACE +#define QDF_DP_TRACE_PREPEND_STR_SIZE 100 +/* + * one dp trace record can't be greater than 300 bytes. + * Max Size will be QDF_DP_TRACE_PREPEND_STR_SIZE(100) + BUFFER_SIZE(121). + * Always make sure to change this QDF_DP_TRACE_MAX_RECORD_SIZE + * value accordingly whenever above two mentioned MACRO value changes. + */ +#define QDF_DP_TRACE_MAX_RECORD_SIZE 300 + +static void qdf_dp_unused(struct qdf_dp_trace_record_s *record, + uint16_t index, uint8_t pdev_id, uint8_t info) +{ + qdf_print("%s: QDF_DP_TRACE_MAX event should not be generated", + __func__); +} + +/** + * qdf_dp_trace_init() - enables the DP trace + * @live_mode_config: live mode configuration + * @thresh: high throughput threshold for disabling live mode + * @thresh_time_limit: max time to wait before deciding if thresh is crossed + * @verbosity: dptrace verbosity level + * @proto_bitmap: bitmap to enable/disable specific protocols + * + * Called during driver load to init dptrace + * + * A brief note on the 'thresh' param - + * Total # of packets received in a bandwidth timer interval beyond which + * DP Trace logging for data packets (including ICMP) will be disabled. + * In memory logging will still continue for these packets. Other packets for + * which proto.bitmap is set will continue to be recorded in logs and in memory. + + * Return: None + */ +void qdf_dp_trace_init(bool live_mode_config, uint8_t thresh, + uint16_t time_limit, uint8_t verbosity, + uint8_t proto_bitmap) +{ + uint8_t i; + + qdf_dp_trace_spin_lock_init(); + qdf_dp_trace_clear_buffer(); + g_qdf_dp_trace_data.enable = true; + g_qdf_dp_trace_data.no_of_record = 1; + + g_qdf_dp_trace_data.live_mode_config = live_mode_config; + g_qdf_dp_trace_data.live_mode = live_mode_config; + g_qdf_dp_trace_data.high_tput_thresh = thresh; + g_qdf_dp_trace_data.thresh_time_limit = time_limit; + g_qdf_dp_trace_data.proto_bitmap = proto_bitmap; + g_qdf_dp_trace_data.verbosity = verbosity; + g_qdf_dp_trace_data.ini_conf_verbosity = verbosity; + + for (i = 0; i < ARRAY_SIZE(qdf_dp_trace_cb_table); i++) + qdf_dp_trace_cb_table[i] = qdf_dp_display_record; + + qdf_dp_trace_cb_table[QDF_DP_TRACE_HDD_TX_PACKET_RECORD] = + qdf_dp_trace_cb_table[QDF_DP_TRACE_HDD_RX_PACKET_RECORD] = + qdf_dp_trace_cb_table[QDF_DP_TRACE_TX_PACKET_RECORD] = + qdf_dp_trace_cb_table[QDF_DP_TRACE_RX_PACKET_RECORD] = + qdf_dp_trace_cb_table[QDF_DP_TRACE_DROP_PACKET_RECORD] = + qdf_dp_trace_cb_table[QDF_DP_TRACE_LI_DP_TX_PACKET_RECORD] = + qdf_dp_trace_cb_table[QDF_DP_TRACE_LI_DP_RX_PACKET_RECORD] = + qdf_dp_display_data_pkt_record; + + qdf_dp_trace_cb_table[QDF_DP_TRACE_TXRX_PACKET_PTR_RECORD] = + qdf_dp_trace_cb_table[QDF_DP_TRACE_TXRX_FAST_PACKET_PTR_RECORD] = + qdf_dp_trace_cb_table[QDF_DP_TRACE_FREE_PACKET_PTR_RECORD] = + qdf_dp_trace_cb_table[QDF_DP_TRACE_LI_DP_FREE_PACKET_PTR_RECORD] = + qdf_dp_display_ptr_record; + qdf_dp_trace_cb_table[QDF_DP_TRACE_EAPOL_PACKET_RECORD] = + qdf_dp_trace_cb_table[QDF_DP_TRACE_DHCP_PACKET_RECORD] = + qdf_dp_display_proto_pkt_always; + qdf_dp_trace_cb_table[QDF_DP_TRACE_ARP_PACKET_RECORD] = + qdf_dp_trace_cb_table[QDF_DP_TRACE_ICMP_PACKET_RECORD] = + qdf_dp_trace_cb_table[QDF_DP_TRACE_ICMPv6_PACKET_RECORD] = + qdf_dp_display_proto_pkt_debug; + qdf_dp_trace_cb_table[QDF_DP_TRACE_MGMT_PACKET_RECORD] = + qdf_dp_display_mgmt_pkt; + qdf_dp_trace_cb_table[QDF_DP_TRACE_EVENT_RECORD] = + qdf_dp_display_event_record; + + qdf_dp_trace_cb_table[QDF_DP_TRACE_MAX] = qdf_dp_unused; +} +qdf_export_symbol(qdf_dp_trace_init); + +/** + * qdf_dp_trace_set_value() - Configure the value to control DP trace + * @proto_bitmap: defines the protocol to be tracked + * @no_of_records: defines the nth packet which is traced + * @verbosity: defines the verbosity level + * + * Return: None + */ +void qdf_dp_trace_set_value(uint8_t proto_bitmap, uint8_t no_of_record, + uint8_t verbosity) +{ + g_qdf_dp_trace_data.proto_bitmap = proto_bitmap; + g_qdf_dp_trace_data.no_of_record = no_of_record; + g_qdf_dp_trace_data.verbosity = verbosity; + g_qdf_dp_trace_data.dynamic_verbosity_modify = true; +} +qdf_export_symbol(qdf_dp_trace_set_value); + +/** + * qdf_dp_trace_set_verbosity() - set verbosity value + * + * @val: Value to set + * + * Return: Null + */ +void qdf_dp_trace_set_verbosity(uint32_t val) +{ + g_qdf_dp_trace_data.verbosity = val; +} +qdf_export_symbol(qdf_dp_trace_set_verbosity); + +/** + * qdf_dp_get_verbosity) - get verbosity value + * + * Return: int + */ +uint8_t qdf_dp_get_verbosity(void) +{ + return g_qdf_dp_trace_data.verbosity; +} +qdf_export_symbol(qdf_dp_get_verbosity); + +/** + * qdf_dp_set_proto_bitmap() - set dp trace proto bitmap + * + * @val : unsigned bitmap to set + * + * Return: proto bitmap + */ +void qdf_dp_set_proto_bitmap(uint32_t val) +{ + g_qdf_dp_trace_data.proto_bitmap = val; +} +qdf_export_symbol(qdf_dp_set_proto_bitmap); + +/** + * qdf_dp_set_no_of_record() - set dp trace no_of_record + * + * @val : unsigned no_of_record to set + * + * Return: null + */ +void qdf_dp_set_no_of_record(uint32_t val) +{ + g_qdf_dp_trace_data.no_of_record = val; +} +qdf_export_symbol(qdf_dp_set_no_of_record); + +/** + * qdf_dp_get_no_of_record() - get dp trace no_of_record + * + * Return: number of records + */ +uint8_t qdf_dp_get_no_of_record(void) +{ + return g_qdf_dp_trace_data.no_of_record; +} +qdf_export_symbol(qdf_dp_get_no_of_record); + + +/** + * qdf_dp_trace_verbosity_check() - check whether verbosity level is enabled + * @code: defines the event + * + * In High verbosity all codes are logged. + * For Med/Low and Default case code which has + * less value than corresponding verbosity codes + * are logged. + * + * Return: true or false depends on whether tracing enabled + */ +static bool qdf_dp_trace_verbosity_check(enum QDF_DP_TRACE_ID code) +{ + switch (g_qdf_dp_trace_data.verbosity) { + case QDF_DP_TRACE_VERBOSITY_HIGH: + return true; + case QDF_DP_TRACE_VERBOSITY_MEDIUM: + if (code <= QDF_DP_TRACE_MED_VERBOSITY) + return true; + return false; + case QDF_DP_TRACE_VERBOSITY_LOW: + if (code <= QDF_DP_TRACE_LOW_VERBOSITY) + return true; + return false; + case QDF_DP_TRACE_VERBOSITY_ULTRA_LOW: + if (code <= QDF_DP_TRACE_ULTRA_LOW_VERBOSITY) + return true; + return false; + case QDF_DP_TRACE_VERBOSITY_BASE: + if (code <= QDF_DP_TRACE_BASE_VERBOSITY) + return true; + return false; + default: + return false; + } +} + +/** + * qdf_dp_get_proto_bitmap() - get dp trace proto bitmap + * + * Return: proto bitmap + */ +uint8_t qdf_dp_get_proto_bitmap(void) +{ + if (g_qdf_dp_trace_data.enable) + return g_qdf_dp_trace_data.proto_bitmap; + else + return 0; +} + +/** + * qdf_dp_trace_set_track() - Marks whether the packet needs to be traced + * @nbuf: defines the netbuf + * @dir: direction + * + * Return: None + */ +void qdf_dp_trace_set_track(qdf_nbuf_t nbuf, enum qdf_proto_dir dir) +{ + uint32_t count = 0; + + if (!g_qdf_dp_trace_data.enable) + return; + + spin_lock_bh(&l_dp_trace_lock); + if (QDF_TX == dir) + count = ++g_qdf_dp_trace_data.tx_count; + else if (QDF_RX == dir) + count = ++g_qdf_dp_trace_data.rx_count; + + if ((g_qdf_dp_trace_data.no_of_record != 0) && + (count % g_qdf_dp_trace_data.no_of_record == 0)) { + if (QDF_TX == dir) + QDF_NBUF_CB_TX_DP_TRACE(nbuf) = 1; + else if (QDF_RX == dir) + QDF_NBUF_CB_RX_DP_TRACE(nbuf) = 1; + } + spin_unlock_bh(&l_dp_trace_lock); +} +qdf_export_symbol(qdf_dp_trace_set_track); + +/* Number of bytes to be grouped together while printing DP-Trace data */ +#define QDF_DUMP_DP_GROUP_SIZE 6 + +/** + * dump_dp_hex_trace() - Display the data in buffer + * @prepend_str: string to prepend the hexdump with. + * @inbuf: buffer which contains data to be displayed + * @inbuf_len: defines the size of the data to be displayed + * + * Return: None + */ +static void +dump_dp_hex_trace(char *prepend_str, uint8_t *inbuf, uint8_t inbuf_len) +{ + unsigned char outbuf[BUFFER_SIZE]; + const uint8_t *inbuf_ptr = inbuf; + char *outbuf_ptr = outbuf; + int outbytes_written = 0; + + qdf_mem_set(outbuf, sizeof(outbuf), 0); + do { + outbytes_written += scnprintf(outbuf_ptr, + BUFFER_SIZE - outbytes_written, + "%02x", *inbuf_ptr); + outbuf_ptr = outbuf + outbytes_written; + + if ((inbuf_ptr - inbuf) && + (inbuf_ptr - inbuf + 1) % QDF_DUMP_DP_GROUP_SIZE == 0) { + outbytes_written += scnprintf(outbuf_ptr, + BUFFER_SIZE - outbytes_written, + " "); + outbuf_ptr = outbuf + outbytes_written; + } + inbuf_ptr++; + } while (inbuf_ptr < (inbuf + inbuf_len)); + DPTRACE_PRINT("%s %s", prepend_str, outbuf); +} + +/** + * qdf_dp_code_to_string() - convert dptrace code to string + * @code: dptrace code + * + * Return: string version of code + */ +static +const char *qdf_dp_code_to_string(enum QDF_DP_TRACE_ID code) +{ + switch (code) { + case QDF_DP_TRACE_DROP_PACKET_RECORD: + return "DROP:"; + case QDF_DP_TRACE_EAPOL_PACKET_RECORD: + return "EAPOL:"; + case QDF_DP_TRACE_DHCP_PACKET_RECORD: + return "DHCP:"; + case QDF_DP_TRACE_ARP_PACKET_RECORD: + return "ARP:"; + case QDF_DP_TRACE_ICMP_PACKET_RECORD: + return "ICMP:"; + case QDF_DP_TRACE_ICMPv6_PACKET_RECORD: + return "ICMPv6:"; + case QDF_DP_TRACE_MGMT_PACKET_RECORD: + return "MGMT:"; + case QDF_DP_TRACE_EVENT_RECORD: + return "EVENT:"; + case QDF_DP_TRACE_HDD_TX_PACKET_PTR_RECORD: + return "HDD: TX: PTR:"; + case QDF_DP_TRACE_LI_DP_TX_PACKET_PTR_RECORD: + return "LI_DP: TX: PTR:"; + case QDF_DP_TRACE_HDD_TX_PACKET_RECORD: + return "HDD: TX: DATA:"; + case QDF_DP_TRACE_LI_DP_TX_PACKET_RECORD: + case QDF_DP_TRACE_TX_PACKET_RECORD: + return "TX:"; + case QDF_DP_TRACE_CE_PACKET_PTR_RECORD: + return "CE: TX: PTR:"; + case QDF_DP_TRACE_CE_FAST_PACKET_PTR_RECORD: + return "CE: TX: FAST: PTR:"; + case QDF_DP_TRACE_CE_FAST_PACKET_ERR_RECORD: + return "CE: TX: FAST: ERR:"; + case QDF_DP_TRACE_LI_DP_FREE_PACKET_PTR_RECORD: + case QDF_DP_TRACE_FREE_PACKET_PTR_RECORD: + return "FREE: TX: PTR:"; + case QDF_DP_TRACE_RX_HTT_PACKET_PTR_RECORD: + return "HTT: RX: PTR:"; + case QDF_DP_TRACE_RX_OFFLOAD_HTT_PACKET_PTR_RECORD: + return "HTT: RX: OF: PTR:"; + case QDF_DP_TRACE_RX_HDD_PACKET_PTR_RECORD: + return "HDD: RX: PTR:"; + case QDF_DP_TRACE_RX_LI_DP_PACKET_PTR_RECORD: + return "LI_DP: RX: PTR:"; + case QDF_DP_TRACE_HDD_RX_PACKET_RECORD: + return "HDD: RX: DATA:"; + case QDF_DP_TRACE_LI_DP_NULL_RX_PACKET_RECORD: + return "LI_DP_NULL: RX: DATA:"; + case QDF_DP_TRACE_LI_DP_RX_PACKET_RECORD: + case QDF_DP_TRACE_RX_PACKET_RECORD: + return "RX:"; + case QDF_DP_TRACE_TXRX_QUEUE_PACKET_PTR_RECORD: + return "TXRX: TX: Q: PTR:"; + case QDF_DP_TRACE_TXRX_PACKET_PTR_RECORD: + return "TXRX: TX: PTR:"; + case QDF_DP_TRACE_TXRX_FAST_PACKET_PTR_RECORD: + return "TXRX: TX: FAST: PTR:"; + case QDF_DP_TRACE_HTT_PACKET_PTR_RECORD: + return "HTT: TX: PTR:"; + case QDF_DP_TRACE_HTC_PACKET_PTR_RECORD: + return "HTC: TX: PTR:"; + case QDF_DP_TRACE_HIF_PACKET_PTR_RECORD: + return "HIF: TX: PTR:"; + case QDF_DP_TRACE_RX_TXRX_PACKET_PTR_RECORD: + return "TXRX: RX: PTR:"; + case QDF_DP_TRACE_HDD_TX_TIMEOUT: + return "HDD: STA: TO:"; + case QDF_DP_TRACE_HDD_SOFTAP_TX_TIMEOUT: + return "HDD: SAP: TO:"; + default: + return "Invalid"; + } +} + +/** + * qdf_dp_dir_to_str() - convert direction to string + * @dir: direction + * + * Return: string version of direction + */ +static const char *qdf_dp_dir_to_str(enum qdf_proto_dir dir) +{ + switch (dir) { + case QDF_TX: + return " --> "; + case QDF_RX: + return " <-- "; + default: + return "invalid"; + } +} + +/** + * qdf_dp_type_to_str() - convert packet type to string + * @type: type + * + * Return: string version of packet type + */ +static const char *qdf_dp_type_to_str(enum qdf_proto_type type) +{ + switch (type) { + case QDF_PROTO_TYPE_DHCP: + return "DHCP"; + case QDF_PROTO_TYPE_EAPOL: + return "EAPOL"; + case QDF_PROTO_TYPE_ARP: + return "ARP"; + case QDF_PROTO_TYPE_ICMP: + return "ICMP"; + case QDF_PROTO_TYPE_ICMPv6: + return "ICMPv6"; + case QDF_PROTO_TYPE_MGMT: + return "MGMT"; + case QDF_PROTO_TYPE_EVENT: + return "EVENT"; + default: + return "invalid"; + } +} + +/** + * qdf_dp_subtype_to_str() - convert packet subtype to string + * @type: type + * + * Return: string version of packet subtype + */ +static const char *qdf_dp_subtype_to_str(enum qdf_proto_subtype subtype) +{ + switch (subtype) { + case QDF_PROTO_EAPOL_M1: + return "M1"; + case QDF_PROTO_EAPOL_M2: + return "M2"; + case QDF_PROTO_EAPOL_M3: + return "M3"; + case QDF_PROTO_EAPOL_M4: + return "M4"; + case QDF_PROTO_DHCP_DISCOVER: + return "DISC"; + case QDF_PROTO_DHCP_REQUEST: + return "REQ"; + case QDF_PROTO_DHCP_OFFER: + return "OFF"; + case QDF_PROTO_DHCP_ACK: + return "ACK"; + case QDF_PROTO_DHCP_NACK: + return "NACK"; + case QDF_PROTO_DHCP_RELEASE: + return "REL"; + case QDF_PROTO_DHCP_INFORM: + return "INFORM"; + case QDF_PROTO_DHCP_DECLINE: + return "DECL"; + case QDF_PROTO_ARP_REQ: + case QDF_PROTO_ICMP_REQ: + case QDF_PROTO_ICMPV6_REQ: + return "REQ"; + case QDF_PROTO_ARP_RES: + case QDF_PROTO_ICMP_RES: + case QDF_PROTO_ICMPV6_RES: + return "RSP"; + case QDF_PROTO_ICMPV6_RS: + return "RS"; + case QDF_PROTO_ICMPV6_RA: + return "RA"; + case QDF_PROTO_ICMPV6_NS: + return "NS"; + case QDF_PROTO_ICMPV6_NA: + return "NA"; + case QDF_PROTO_MGMT_ASSOC: + return "ASSOC"; + case QDF_PROTO_MGMT_DISASSOC: + return "DISASSOC"; + case QDF_PROTO_MGMT_AUTH: + return "AUTH"; + case QDF_PROTO_MGMT_DEAUTH: + return "DEAUTH"; + case QDF_ROAM_SYNCH: + return "ROAM SYNCH"; + case QDF_ROAM_COMPLETE: + return "ROAM COMP"; + case QDF_ROAM_EVENTID: + return "ROAM EVENTID"; + default: + return "invalid"; + } +} + +/** + * qdf_dp_enable_check() - check if dptrace, TX/RX tracing is enabled + * @nbuf: nbuf + * @code: dptrace code + * @dir: TX or RX direction + * + * Return: true/false + */ +static bool qdf_dp_enable_check(qdf_nbuf_t nbuf, enum QDF_DP_TRACE_ID code, + enum qdf_proto_dir dir) +{ + /* Return when Dp trace is not enabled */ + if (!g_qdf_dp_trace_data.enable) + return false; + + if (qdf_dp_trace_verbosity_check(code) == false) + return false; + + if (nbuf && (dir == QDF_TX && ((QDF_NBUF_CB_TX_DP_TRACE(nbuf) == 0) || + (QDF_NBUF_CB_TX_PACKET_TRACK(nbuf) != + QDF_NBUF_TX_PKT_DATA_TRACK)))) + return false; + + if (nbuf && (dir == QDF_RX && (QDF_NBUF_CB_RX_DP_TRACE(nbuf) == 0))) + return false; + + /* + * Special packets called with NULL nbuf and this API is expected to + * return true + */ + return true; +} + +/** + * qdf_dp_trace_fill_meta_str() - fill up a common meta string + * @prepend_str: pointer to string + * @size: size of prepend_str + * @rec_index: index of record + * @info: info related to the record + * @record: pointer to the record + * + * Return: ret value from scnprintf + */ +static inline +int qdf_dp_trace_fill_meta_str(char *prepend_str, int size, + int rec_index, uint8_t info, + struct qdf_dp_trace_record_s *record) +{ + char buffer[20]; + int ret = 0; + bool live = info & QDF_DP_TRACE_RECORD_INFO_LIVE ? true : false; + bool throttled = info & QDF_DP_TRACE_RECORD_INFO_THROTTLED ? + true : false; + + scnprintf(buffer, sizeof(buffer), "%llu", record->time); + ret = scnprintf(prepend_str, size, + "%s DPT: %04d:%02d%s %s", + throttled ? "*" : "", + rec_index, + record->pdev_id, + live ? "" : buffer, + qdf_dp_code_to_string(record->code)); + + return ret; +} + +/** + * qdf_dp_fill_record_data() - fill meta data and data into the record + * @rec: pointer to record data + * @data: pointer to data + * @data_size: size of the data + * @meta_data: pointer to metadata + * @metadata_size: size of metadata + * + * Should be called from within a spin_lock for the qdf record. + * Fills up rec->data with |metadata|data| + * + * Return: none + */ +static void qdf_dp_fill_record_data + (struct qdf_dp_trace_record_s *rec, + uint8_t *data, uint8_t data_size, + uint8_t *meta_data, uint8_t metadata_size) +{ + int32_t available = QDF_DP_TRACE_RECORD_SIZE; + uint8_t *rec_data = rec->data; + uint8_t data_to_copy = 0; + + qdf_mem_set(rec_data, QDF_DP_TRACE_RECORD_SIZE, 0); + + /* copy meta data */ + if (meta_data) { + if (metadata_size > available) { + QDF_TRACE_WARN(QDF_MODULE_ID_QDF, + "%s: meta data does not fit into the record", + __func__); + goto end; + } + qdf_mem_copy(rec_data, meta_data, metadata_size); + available = available - metadata_size; + } else { + metadata_size = 0; + } + + /* copy data */ + if (data && (data_size > 0) && (available > 0)) { + data_to_copy = data_size; + if (data_size > available) + data_to_copy = available; + qdf_mem_copy(&rec_data[metadata_size], data, data_to_copy); + } +end: + rec->size = data_to_copy; +} + +/** + * qdf_dp_add_record() - add dp trace record + * @code: dptrace code + * @pdev_id: pdev_id + * @print: true to print it in kmsg + * @data: data pointer + * @data_size: size of data to be copied + * @meta_data: meta data to be prepended to data + * @metadata_size: sizeof meta data + * @print: whether to print record + * + * Return: none + */ +static void qdf_dp_add_record(enum QDF_DP_TRACE_ID code, uint8_t pdev_id, + uint8_t *data, uint8_t data_size, + uint8_t *meta_data, uint8_t metadata_size, + bool print) + +{ + struct qdf_dp_trace_record_s *rec = NULL; + int index; + bool print_this_record = false; + u8 info = 0; + + if (code >= QDF_DP_TRACE_MAX) { + QDF_TRACE_ERROR(QDF_MODULE_ID_QDF, + "invalid record code %u, max code %u", + code, QDF_DP_TRACE_MAX); + return; + } + + spin_lock_bh(&l_dp_trace_lock); + + if (print || g_qdf_dp_trace_data.force_live_mode) { + print_this_record = true; + } else if (g_qdf_dp_trace_data.live_mode == 1) { + print_this_record = true; + g_qdf_dp_trace_data.print_pkt_cnt++; + if (g_qdf_dp_trace_data.print_pkt_cnt > + g_qdf_dp_trace_data.high_tput_thresh) { + g_qdf_dp_trace_data.live_mode = 0; + g_qdf_dp_trace_data.verbosity = + QDF_DP_TRACE_VERBOSITY_ULTRA_LOW; + info |= QDF_DP_TRACE_RECORD_INFO_THROTTLED; + } + } + + g_qdf_dp_trace_data.num++; + + if (g_qdf_dp_trace_data.num > MAX_QDF_DP_TRACE_RECORDS) + g_qdf_dp_trace_data.num = MAX_QDF_DP_TRACE_RECORDS; + + if (INVALID_QDF_DP_TRACE_ADDR == g_qdf_dp_trace_data.head) { + /* first record */ + g_qdf_dp_trace_data.head = 0; + g_qdf_dp_trace_data.tail = 0; + } else { + /* queue is not empty */ + g_qdf_dp_trace_data.tail++; + + if (MAX_QDF_DP_TRACE_RECORDS == g_qdf_dp_trace_data.tail) + g_qdf_dp_trace_data.tail = 0; + + if (g_qdf_dp_trace_data.head == g_qdf_dp_trace_data.tail) { + /* full */ + if (MAX_QDF_DP_TRACE_RECORDS == + ++g_qdf_dp_trace_data.head) + g_qdf_dp_trace_data.head = 0; + } + } + + rec = &g_qdf_dp_trace_tbl[g_qdf_dp_trace_data.tail]; + index = g_qdf_dp_trace_data.tail; + rec->code = code; + rec->pdev_id = pdev_id; + rec->size = 0; + qdf_dp_fill_record_data(rec, data, data_size, + meta_data, metadata_size); + rec->time = qdf_get_log_timestamp(); + rec->pid = (in_interrupt() ? 0 : current->pid); + + if (rec->code >= QDF_DP_TRACE_MAX) { + QDF_TRACE_ERROR(QDF_MODULE_ID_QDF, + "invalid record code %u, max code %u", + rec->code, QDF_DP_TRACE_MAX); + return; + } + + spin_unlock_bh(&l_dp_trace_lock); + + info |= QDF_DP_TRACE_RECORD_INFO_LIVE; + if (print_this_record) + qdf_dp_trace_cb_table[rec->code] (rec, index, + QDF_TRACE_DEFAULT_PDEV_ID, info); +} + + +/** + * qdf_log_icmpv6_pkt() - log ICMPv6 packet + * @session_id: vdev_id + * @skb: skb pointer + * @dir: direction + * + * Return: true/false + */ +static bool qdf_log_icmpv6_pkt(uint8_t session_id, struct sk_buff *skb, + enum qdf_proto_dir dir, uint8_t pdev_id) +{ + enum qdf_proto_subtype subtype; + + if ((qdf_dp_get_proto_bitmap() & QDF_NBUF_PKT_TRAC_TYPE_ICMPv6) && + ((dir == QDF_TX && QDF_NBUF_CB_PACKET_TYPE_ICMPv6 == + QDF_NBUF_CB_GET_PACKET_TYPE(skb)) || + (dir == QDF_RX && qdf_nbuf_is_icmpv6_pkt(skb) == true))) { + + subtype = qdf_nbuf_get_icmpv6_subtype(skb); + + QDF_NBUF_CB_DP_TRACE_PRINT(skb) = false; + if (dir == QDF_TX) + QDF_NBUF_CB_TX_DP_TRACE(skb) = 1; + else if (dir == QDF_RX) + QDF_NBUF_CB_RX_DP_TRACE(skb) = 1; + + DPTRACE(qdf_dp_trace_proto_pkt( + QDF_DP_TRACE_ICMPv6_PACKET_RECORD, + session_id, (skb->data + QDF_NBUF_SRC_MAC_OFFSET), + (skb->data + QDF_NBUF_DEST_MAC_OFFSET), + QDF_PROTO_TYPE_ICMPv6, subtype, dir, pdev_id, false)); + + switch (subtype) { + case QDF_PROTO_ICMPV6_REQ: + g_qdf_dp_trace_data.icmpv6_req++; + break; + case QDF_PROTO_ICMPV6_RES: + g_qdf_dp_trace_data.icmpv6_resp++; + break; + case QDF_PROTO_ICMPV6_RS: + g_qdf_dp_trace_data.icmpv6_rs++; + break; + case QDF_PROTO_ICMPV6_RA: + g_qdf_dp_trace_data.icmpv6_ra++; + break; + case QDF_PROTO_ICMPV6_NS: + g_qdf_dp_trace_data.icmpv6_ns++; + break; + case QDF_PROTO_ICMPV6_NA: + g_qdf_dp_trace_data.icmpv6_na++; + break; + default: + break; + } + return true; + } + + return false; +} + +/** + * qdf_log_icmp_pkt() - log ICMP packet + * @session_id: vdev_id + * @skb: skb pointer + * @dir: direction + * + * Return: true/false + */ +static bool qdf_log_icmp_pkt(uint8_t session_id, struct sk_buff *skb, + enum qdf_proto_dir dir, uint8_t pdev_id) +{ + enum qdf_proto_subtype proto_subtype; + + if ((qdf_dp_get_proto_bitmap() & QDF_NBUF_PKT_TRAC_TYPE_ICMP) && + (qdf_nbuf_is_icmp_pkt(skb) == true)) { + + QDF_NBUF_CB_DP_TRACE_PRINT(skb) = false; + proto_subtype = qdf_nbuf_get_icmp_subtype(skb); + + if (QDF_TX == dir) + QDF_NBUF_CB_TX_DP_TRACE(skb) = 1; + else if (QDF_RX == dir) + QDF_NBUF_CB_RX_DP_TRACE(skb) = 1; + + DPTRACE(qdf_dp_trace_proto_pkt(QDF_DP_TRACE_ICMP_PACKET_RECORD, + session_id, + skb->data + + QDF_NBUF_SRC_MAC_OFFSET, + skb->data + + QDF_NBUF_DEST_MAC_OFFSET, + QDF_PROTO_TYPE_ICMP, + proto_subtype, dir, pdev_id, + false)); + + if (proto_subtype == QDF_PROTO_ICMP_REQ) + g_qdf_dp_trace_data.icmp_req++; + else + g_qdf_dp_trace_data.icmp_resp++; + + return true; + } + return false; +} + +/** + * qdf_log_eapol_pkt() - log EAPOL packet + * @session_id: vdev_id + * @skb: skb pointer + * @dir: direction + * @pdev_id: pdev_id + * + * Return: true/false + */ +static bool qdf_log_eapol_pkt(uint8_t session_id, struct sk_buff *skb, + enum qdf_proto_dir dir, uint8_t pdev_id) +{ + enum qdf_proto_subtype subtype; + + if ((qdf_dp_get_proto_bitmap() & QDF_NBUF_PKT_TRAC_TYPE_EAPOL) && + ((dir == QDF_TX && QDF_NBUF_CB_PACKET_TYPE_EAPOL == + QDF_NBUF_CB_GET_PACKET_TYPE(skb)) || + (dir == QDF_RX && qdf_nbuf_is_ipv4_eapol_pkt(skb) == true))) { + + subtype = qdf_nbuf_get_eapol_subtype(skb); + QDF_NBUF_CB_DP_TRACE_PRINT(skb) = true; + if (QDF_TX == dir) + QDF_NBUF_CB_TX_DP_TRACE(skb) = 1; + else if (QDF_RX == dir) + QDF_NBUF_CB_RX_DP_TRACE(skb) = 1; + + DPTRACE(qdf_dp_trace_proto_pkt(QDF_DP_TRACE_EAPOL_PACKET_RECORD, + session_id, + skb->data + + QDF_NBUF_SRC_MAC_OFFSET, + skb->data + + QDF_NBUF_DEST_MAC_OFFSET, + QDF_PROTO_TYPE_EAPOL, subtype, + dir, pdev_id, true)); + + switch (subtype) { + case QDF_PROTO_EAPOL_M1: + g_qdf_dp_trace_data.eapol_m1++; + break; + case QDF_PROTO_EAPOL_M2: + g_qdf_dp_trace_data.eapol_m2++; + break; + case QDF_PROTO_EAPOL_M3: + g_qdf_dp_trace_data.eapol_m3++; + break; + case QDF_PROTO_EAPOL_M4: + g_qdf_dp_trace_data.eapol_m4++; + break; + default: + g_qdf_dp_trace_data.eapol_others++; + break; + } + return true; + } + return false; +} + +/** + * qdf_log_dhcp_pkt() - log DHCP packet + * @session_id: vdev_id + * @skb: skb pointer + * @dir: direction + * @pdev_id: pdev_id + * + * Return: true/false + */ +static bool qdf_log_dhcp_pkt(uint8_t session_id, struct sk_buff *skb, + enum qdf_proto_dir dir, uint8_t pdev_id) +{ + enum qdf_proto_subtype subtype = QDF_PROTO_INVALID; + + if ((qdf_dp_get_proto_bitmap() & QDF_NBUF_PKT_TRAC_TYPE_DHCP) && + ((dir == QDF_TX && QDF_NBUF_CB_PACKET_TYPE_DHCP == + QDF_NBUF_CB_GET_PACKET_TYPE(skb)) || + (dir == QDF_RX && qdf_nbuf_is_ipv4_dhcp_pkt(skb) == true))) { + + subtype = qdf_nbuf_get_dhcp_subtype(skb); + QDF_NBUF_CB_DP_TRACE_PRINT(skb) = true; + if (QDF_TX == dir) + QDF_NBUF_CB_TX_DP_TRACE(skb) = 1; + else if (QDF_RX == dir) + QDF_NBUF_CB_RX_DP_TRACE(skb) = 1; + + DPTRACE(qdf_dp_trace_proto_pkt(QDF_DP_TRACE_DHCP_PACKET_RECORD, + session_id, + skb->data + + QDF_NBUF_SRC_MAC_OFFSET, + skb->data + + QDF_NBUF_DEST_MAC_OFFSET, + QDF_PROTO_TYPE_DHCP, subtype, + dir, pdev_id, true)); + + switch (subtype) { + case QDF_PROTO_DHCP_DISCOVER: + g_qdf_dp_trace_data.dhcp_disc++; + break; + case QDF_PROTO_DHCP_OFFER: + g_qdf_dp_trace_data.dhcp_off++; + break; + case QDF_PROTO_DHCP_REQUEST: + g_qdf_dp_trace_data.dhcp_req++; + break; + case QDF_PROTO_DHCP_ACK: + g_qdf_dp_trace_data.dhcp_ack++; + break; + case QDF_PROTO_DHCP_NACK: + g_qdf_dp_trace_data.dhcp_nack++; + break; + default: + g_qdf_dp_trace_data.eapol_others++; + break; + } + + return true; + } + return false; +} + +/** + * qdf_log_arp_pkt() - log ARP packet + * @session_id: vdev_id + * @skb: skb pointer + * @dir: direction + * @pdev_id: pdev_id + * + * Return: true/false + */ +static bool qdf_log_arp_pkt(uint8_t session_id, struct sk_buff *skb, + enum qdf_proto_dir dir, uint8_t pdev_id) +{ + enum qdf_proto_subtype proto_subtype; + + if ((qdf_dp_get_proto_bitmap() & QDF_NBUF_PKT_TRAC_TYPE_ARP) && + ((dir == QDF_TX && QDF_NBUF_CB_PACKET_TYPE_ARP == + QDF_NBUF_CB_GET_PACKET_TYPE(skb)) || + (dir == QDF_RX && qdf_nbuf_is_ipv4_arp_pkt(skb) == true))) { + + proto_subtype = qdf_nbuf_get_arp_subtype(skb); + QDF_NBUF_CB_DP_TRACE_PRINT(skb) = true; + if (QDF_TX == dir) + QDF_NBUF_CB_TX_DP_TRACE(skb) = 1; + else if (QDF_RX == dir) + QDF_NBUF_CB_RX_DP_TRACE(skb) = 1; + + DPTRACE(qdf_dp_trace_proto_pkt(QDF_DP_TRACE_ARP_PACKET_RECORD, + session_id, + skb->data + + QDF_NBUF_SRC_MAC_OFFSET, + skb->data + + QDF_NBUF_DEST_MAC_OFFSET, + QDF_PROTO_TYPE_ARP, + proto_subtype, dir, pdev_id, + true)); + + if (QDF_PROTO_ARP_REQ == proto_subtype) + g_qdf_dp_trace_data.arp_req++; + else + g_qdf_dp_trace_data.arp_resp++; + + return true; + } + return false; +} + + +/** + * qdf_dp_trace_log_pkt() - log packet type enabled through iwpriv + * @session_id: vdev_id + * @skb: skb pointer + * @dir: direction + * @pdev_id: pdev_id + * + * Return: true: some protocol was logged, false: no protocol was logged. + */ +bool qdf_dp_trace_log_pkt(uint8_t session_id, struct sk_buff *skb, + enum qdf_proto_dir dir, uint8_t pdev_id) +{ + if (!qdf_dp_get_proto_bitmap()) + return false; + if (qdf_log_arp_pkt(session_id, skb, dir, pdev_id)) + return true; + if (qdf_log_dhcp_pkt(session_id, skb, dir, pdev_id)) + return true; + if (qdf_log_eapol_pkt(session_id, skb, dir, pdev_id)) + return true; + if (qdf_log_icmp_pkt(session_id, skb, dir, pdev_id)) + return true; + if (qdf_log_icmpv6_pkt(session_id, skb, dir, pdev_id)) + return true; + return false; +} +qdf_export_symbol(qdf_dp_trace_log_pkt); + +void qdf_dp_display_mgmt_pkt(struct qdf_dp_trace_record_s *record, + uint16_t index, uint8_t pdev_id, uint8_t info) +{ + int loc; + char prepend_str[QDF_DP_TRACE_PREPEND_STR_SIZE]; +#ifdef WLAN_DEBUG + struct qdf_dp_trace_mgmt_buf *buf = + (struct qdf_dp_trace_mgmt_buf *)record->data; +#endif + + qdf_mem_set(prepend_str, sizeof(prepend_str), 0); + loc = qdf_dp_trace_fill_meta_str(prepend_str, sizeof(prepend_str), + index, info, record); + + DPTRACE_PRINT("%s [%d] [%s %s]", + prepend_str, + buf->vdev_id, + qdf_dp_type_to_str(buf->type), + qdf_dp_subtype_to_str(buf->subtype)); +} +qdf_export_symbol(qdf_dp_display_mgmt_pkt); + + +void qdf_dp_trace_mgmt_pkt(enum QDF_DP_TRACE_ID code, uint8_t vdev_id, + uint8_t pdev_id, enum qdf_proto_type type, + enum qdf_proto_subtype subtype) +{ + struct qdf_dp_trace_mgmt_buf buf; + int buf_size = sizeof(struct qdf_dp_trace_mgmt_buf); + + if (qdf_dp_enable_check(NULL, code, QDF_NA) == false) + return; + + if (buf_size > QDF_DP_TRACE_RECORD_SIZE) + QDF_BUG(0); + + buf.type = type; + buf.subtype = subtype; + buf.vdev_id = vdev_id; + qdf_dp_add_record(code, pdev_id, (uint8_t *)&buf, buf_size, + NULL, 0, true); +} +qdf_export_symbol(qdf_dp_trace_mgmt_pkt); + +void qdf_dp_display_event_record(struct qdf_dp_trace_record_s *record, + uint16_t index, uint8_t pdev_id, uint8_t info) +{ + char prepend_str[QDF_DP_TRACE_PREPEND_STR_SIZE]; +#ifdef WLAN_DEBUG + struct qdf_dp_trace_event_buf *buf = + (struct qdf_dp_trace_event_buf *)record->data; +#endif + + qdf_mem_set(prepend_str, sizeof(prepend_str), 0); + qdf_dp_trace_fill_meta_str(prepend_str, sizeof(prepend_str), + index, info, record); + + DPTRACE_PRINT("%s [%d] [%s %s]", + prepend_str, + buf->vdev_id, + qdf_dp_type_to_str(buf->type), + qdf_dp_subtype_to_str(buf->subtype)); +} +qdf_export_symbol(qdf_dp_display_event_record); + +/** + * qdf_dp_trace_record_event() - record events + * @code: dptrace code + * @vdev_id: vdev id + * @pdev_id: pdev_id + * @type: proto type + * @subtype: proto subtype + * + * Return: none + */ +void qdf_dp_trace_record_event(enum QDF_DP_TRACE_ID code, uint8_t vdev_id, + uint8_t pdev_id, enum qdf_proto_type type, + enum qdf_proto_subtype subtype) +{ + struct qdf_dp_trace_event_buf buf; + int buf_size = sizeof(struct qdf_dp_trace_event_buf); + + if (qdf_dp_enable_check(NULL, code, QDF_NA) == false) + return; + + if (buf_size > QDF_DP_TRACE_RECORD_SIZE) + QDF_BUG(0); + + buf.type = type; + buf.subtype = subtype; + buf.vdev_id = vdev_id; + qdf_dp_add_record(code, pdev_id, + (uint8_t *)&buf, buf_size, NULL, 0, true); +} +qdf_export_symbol(qdf_dp_trace_record_event); + + +void qdf_dp_display_proto_pkt_debug(struct qdf_dp_trace_record_s *record, + uint16_t index, uint8_t pdev_id, uint8_t info) +{ + int loc; + char prepend_str[QDF_DP_TRACE_PREPEND_STR_SIZE]; +#ifdef WLAN_DEBUG + struct qdf_dp_trace_proto_buf *buf = + (struct qdf_dp_trace_proto_buf *)record->data; +#endif + + qdf_mem_set(prepend_str, sizeof(prepend_str), 0); + loc = qdf_dp_trace_fill_meta_str(prepend_str, sizeof(prepend_str), + index, info, record); + DPTRACE_PRINT("%s [%d] [%s] SA: " + QDF_MAC_ADDR_STR " %s DA: " + QDF_MAC_ADDR_STR, + prepend_str, + buf->vdev_id, + qdf_dp_subtype_to_str(buf->subtype), + QDF_MAC_ADDR_ARRAY(buf->sa.bytes), + qdf_dp_dir_to_str(buf->dir), + QDF_MAC_ADDR_ARRAY(buf->da.bytes)); +} +qdf_export_symbol(qdf_dp_display_proto_pkt_debug); + +void qdf_dp_display_proto_pkt_always(struct qdf_dp_trace_record_s *record, + uint16_t index, uint8_t pdev_id, uint8_t info) +{ + int loc; + char prepend_str[QDF_DP_TRACE_PREPEND_STR_SIZE]; + struct qdf_dp_trace_proto_buf *buf = + (struct qdf_dp_trace_proto_buf *)record->data; + + qdf_mem_zero(prepend_str, sizeof(prepend_str)); + loc = qdf_dp_trace_fill_meta_str(prepend_str, sizeof(prepend_str), + index, info, record); + qdf_info("%s [%d] [%s] SA: "QDF_MAC_ADDR_STR " %s DA: " + QDF_MAC_ADDR_STR, prepend_str, + buf->vdev_id, qdf_dp_subtype_to_str(buf->subtype), + QDF_MAC_ADDR_ARRAY(buf->sa.bytes), + qdf_dp_dir_to_str(buf->dir), + QDF_MAC_ADDR_ARRAY(buf->da.bytes)); +} +qdf_export_symbol(qdf_dp_display_proto_pkt_always); + +void qdf_dp_trace_proto_pkt(enum QDF_DP_TRACE_ID code, uint8_t vdev_id, + uint8_t *sa, uint8_t *da, enum qdf_proto_type type, + enum qdf_proto_subtype subtype, enum qdf_proto_dir dir, + uint8_t pdev_id, bool print) +{ + struct qdf_dp_trace_proto_buf buf; + int buf_size = sizeof(struct qdf_dp_trace_ptr_buf); + + if (qdf_dp_enable_check(NULL, code, dir) == false) + return; + + if (buf_size > QDF_DP_TRACE_RECORD_SIZE) + QDF_BUG(0); + + memcpy(&buf.sa, sa, QDF_NET_ETH_LEN); + memcpy(&buf.da, da, QDF_NET_ETH_LEN); + buf.dir = dir; + buf.type = type; + buf.subtype = subtype; + buf.vdev_id = vdev_id; + qdf_dp_add_record(code, pdev_id, + (uint8_t *)&buf, buf_size, NULL, 0, print); +} +qdf_export_symbol(qdf_dp_trace_proto_pkt); + +void qdf_dp_display_ptr_record(struct qdf_dp_trace_record_s *record, + uint16_t index, uint8_t pdev_id, uint8_t info) +{ + int loc; + char prepend_str[QDF_DP_TRACE_PREPEND_STR_SIZE]; + struct qdf_dp_trace_ptr_buf *buf = + (struct qdf_dp_trace_ptr_buf *)record->data; + bool is_free_pkt_ptr_record = false; + + if ((record->code == QDF_DP_TRACE_FREE_PACKET_PTR_RECORD) || + (record->code == QDF_DP_TRACE_LI_DP_FREE_PACKET_PTR_RECORD)) + is_free_pkt_ptr_record = true; + + qdf_mem_set(prepend_str, sizeof(prepend_str), 0); + loc = qdf_dp_trace_fill_meta_str(prepend_str, sizeof(prepend_str), + index, info, record); + + if (loc < sizeof(prepend_str)) + scnprintf(&prepend_str[loc], sizeof(prepend_str) - loc, + "[msdu id %d %s %d]", + buf->msdu_id, + is_free_pkt_ptr_record ? "status" : "vdev_id", + buf->status); + + if (info & QDF_DP_TRACE_RECORD_INFO_LIVE) { + /* In live mode donot dump the contents of the cookie */ + DPTRACE_PRINT("%s", prepend_str); + } else { + dump_dp_hex_trace(prepend_str, (uint8_t *)&buf->cookie, + sizeof(buf->cookie)); + } +} +qdf_export_symbol(qdf_dp_display_ptr_record); + +/** + * qdf_dp_trace_ptr() - record dptrace + * @code: dptrace code + * @pdev_id: pdev_id + * @data: data + * @size: size of data + * @msdu_id: msdu_id + * @status: return status + * + * Return: none + */ +void qdf_dp_trace_ptr(qdf_nbuf_t nbuf, enum QDF_DP_TRACE_ID code, + uint8_t pdev_id, uint8_t *data, uint8_t size, + uint16_t msdu_id, uint16_t status) +{ + struct qdf_dp_trace_ptr_buf buf; + int buf_size = sizeof(struct qdf_dp_trace_ptr_buf); + + if (qdf_dp_enable_check(nbuf, code, QDF_TX) == false) + return; + + if (buf_size > QDF_DP_TRACE_RECORD_SIZE) + QDF_BUG(0); + + qdf_mem_copy(&buf.cookie, data, size); + buf.msdu_id = msdu_id; + buf.status = status; + qdf_dp_add_record(code, pdev_id, (uint8_t *)&buf, buf_size, NULL, 0, + QDF_NBUF_CB_DP_TRACE_PRINT(nbuf)); +} +qdf_export_symbol(qdf_dp_trace_ptr); + +void qdf_dp_trace_data_pkt(qdf_nbuf_t nbuf, uint8_t pdev_id, + enum QDF_DP_TRACE_ID code, uint16_t msdu_id, + enum qdf_proto_dir dir) +{ + struct qdf_dp_trace_data_buf buf; + + buf.msdu_id = msdu_id; + if (!qdf_dp_enable_check(nbuf, code, dir)) + return; + + qdf_dp_add_record(code, pdev_id, + nbuf ? qdf_nbuf_data(nbuf) : NULL, + nbuf ? nbuf->len - nbuf->data_len : 0, + (uint8_t *)&buf, sizeof(struct qdf_dp_trace_data_buf), + (nbuf) ? QDF_NBUF_CB_DP_TRACE_PRINT(nbuf) : false); +} + +qdf_export_symbol(qdf_dp_trace_data_pkt); + +void qdf_dp_display_record(struct qdf_dp_trace_record_s *record, + uint16_t index, uint8_t pdev_id, uint8_t info) +{ + int loc; + char prepend_str[QDF_DP_TRACE_PREPEND_STR_SIZE]; + + if (!(pdev_id == QDF_TRACE_DEFAULT_PDEV_ID || + pdev_id == record->pdev_id)) + return; + + qdf_mem_set(prepend_str, sizeof(prepend_str), 0); + loc = qdf_dp_trace_fill_meta_str(prepend_str, sizeof(prepend_str), + index, info, record); + + switch (record->code) { + case QDF_DP_TRACE_HDD_TX_TIMEOUT: + DPTRACE_PRINT(" %s: HDD TX Timeout", prepend_str); + break; + case QDF_DP_TRACE_HDD_SOFTAP_TX_TIMEOUT: + DPTRACE_PRINT(" %s: HDD SoftAP TX Timeout", prepend_str); + break; + case QDF_DP_TRACE_CE_FAST_PACKET_ERR_RECORD: + DPTRACE_PRINT(" %s: CE Fast Packet Error", prepend_str); + break; + case QDF_DP_TRACE_LI_DP_NULL_RX_PACKET_RECORD: + default: + dump_dp_hex_trace(prepend_str, record->data, record->size); + break; + }; +} +qdf_export_symbol(qdf_dp_display_record); + +void +qdf_dp_display_data_pkt_record(struct qdf_dp_trace_record_s *record, + uint16_t rec_index, uint8_t pdev_id, + uint8_t info) +{ + int loc; + char prepend_str[DP_TRACE_META_DATA_STRLEN + 10]; + struct qdf_dp_trace_data_buf *buf = + (struct qdf_dp_trace_data_buf *)record->data; + + qdf_mem_set(prepend_str, sizeof(prepend_str), 0); + + loc = qdf_dp_trace_fill_meta_str(prepend_str, sizeof(prepend_str), + rec_index, info, record); + if (loc < sizeof(prepend_str)) + loc += snprintf(&prepend_str[loc], sizeof(prepend_str) - loc, + "[%d]", buf->msdu_id); + dump_dp_hex_trace(prepend_str, + &record->data[sizeof(struct qdf_dp_trace_data_buf)], + record->size); +} + +/** + * qdf_dp_trace() - Stores the data in buffer + * @nbuf : defines the netbuf + * @code : defines the event + * @pdev_id: pdev_id + * @data : defines the data to be stored + * @size : defines the size of the data record + * + * Return: None + */ +void qdf_dp_trace(qdf_nbuf_t nbuf, enum QDF_DP_TRACE_ID code, uint8_t pdev_id, + uint8_t *data, uint8_t size, enum qdf_proto_dir dir) +{ + + if (qdf_dp_enable_check(nbuf, code, dir) == false) + return; + + qdf_dp_add_record(code, pdev_id, nbuf ? qdf_nbuf_data(nbuf) : NULL, + size, NULL, 0, + (nbuf) ? QDF_NBUF_CB_DP_TRACE_PRINT(nbuf) : false); +} +qdf_export_symbol(qdf_dp_trace); + +/** + * qdf_dp_trace_spin_lock_init() - initializes the lock variable before use + * This function will be called from cds_alloc_global_context, we will have lock + * available to use ASAP + * + * Return: None + */ +void qdf_dp_trace_spin_lock_init(void) +{ + spin_lock_init(&l_dp_trace_lock); +} +qdf_export_symbol(qdf_dp_trace_spin_lock_init); + +/** + * qdf_dp_trace_disable_live_mode - disable live mode for dptrace + * + * Return: none + */ +void qdf_dp_trace_disable_live_mode(void) +{ + g_qdf_dp_trace_data.force_live_mode = 0; +} +qdf_export_symbol(qdf_dp_trace_disable_live_mode); + +/** + * qdf_dp_trace_enable_live_mode() - enable live mode for dptrace + * + * Return: none + */ +void qdf_dp_trace_enable_live_mode(void) +{ + g_qdf_dp_trace_data.force_live_mode = 1; +} +qdf_export_symbol(qdf_dp_trace_enable_live_mode); + +/** + * qdf_dp_trace_clear_buffer() - clear dp trace buffer + * + * Return: none + */ +void qdf_dp_trace_clear_buffer(void) +{ + g_qdf_dp_trace_data.head = INVALID_QDF_DP_TRACE_ADDR; + g_qdf_dp_trace_data.tail = INVALID_QDF_DP_TRACE_ADDR; + g_qdf_dp_trace_data.num = 0; + g_qdf_dp_trace_data.dump_counter = 0; + g_qdf_dp_trace_data.num_records_to_dump = MAX_QDF_DP_TRACE_RECORDS; + + if (g_qdf_dp_trace_data.enable) + memset(g_qdf_dp_trace_tbl, 0, + MAX_QDF_DP_TRACE_RECORDS * + sizeof(struct qdf_dp_trace_record_s)); +} +qdf_export_symbol(qdf_dp_trace_clear_buffer); + +void qdf_dp_trace_dump_stats(void) +{ + DPTRACE_PRINT("STATS |DPT: tx %u rx %u icmp(%u %u) arp(%u %u) icmpv6(%u %u %u %u %u %u) dhcp(%u %u %u %u %u %u) eapol(%u %u %u %u %u)", + g_qdf_dp_trace_data.tx_count, + g_qdf_dp_trace_data.rx_count, + g_qdf_dp_trace_data.icmp_req, + g_qdf_dp_trace_data.icmp_resp, + g_qdf_dp_trace_data.arp_req, + g_qdf_dp_trace_data.arp_resp, + g_qdf_dp_trace_data.icmpv6_req, + g_qdf_dp_trace_data.icmpv6_resp, + g_qdf_dp_trace_data.icmpv6_ns, + g_qdf_dp_trace_data.icmpv6_na, + g_qdf_dp_trace_data.icmpv6_rs, + g_qdf_dp_trace_data.icmpv6_ra, + g_qdf_dp_trace_data.dhcp_disc, + g_qdf_dp_trace_data.dhcp_off, + g_qdf_dp_trace_data.dhcp_req, + g_qdf_dp_trace_data.dhcp_ack, + g_qdf_dp_trace_data.dhcp_nack, + g_qdf_dp_trace_data.dhcp_others, + g_qdf_dp_trace_data.eapol_m1, + g_qdf_dp_trace_data.eapol_m2, + g_qdf_dp_trace_data.eapol_m3, + g_qdf_dp_trace_data.eapol_m4, + g_qdf_dp_trace_data.eapol_others); +} +qdf_export_symbol(qdf_dp_trace_dump_stats); + +/** + * qdf_dpt_dump_hex_trace_debugfs() - read data in file + * @file: file to read + * @str: string to prepend the hexdump with. + * @buf: buffer which contains data to be written + * @buf_len: defines the size of the data to be written + * + * Return: None + */ +static void qdf_dpt_dump_hex_trace_debugfs(qdf_debugfs_file_t file, + char *str, uint8_t *buf, uint8_t buf_len) +{ + unsigned char linebuf[BUFFER_SIZE]; + const u8 *ptr = buf; + int i, linelen, remaining = buf_len; + + /* Dump the bytes in the last line */ + for (i = 0; i < buf_len; i += ROW_SIZE) { + linelen = min(remaining, ROW_SIZE); + remaining -= ROW_SIZE; + + hex_dump_to_buffer(ptr + i, linelen, ROW_SIZE, 1, + linebuf, sizeof(linebuf), false); + + qdf_debugfs_printf(file, "%s %s\n", str, linebuf); + } +} + +/** + * qdf_dpt_display_proto_pkt_debugfs() - display proto packet + * @file: file to read + * @record: dptrace record + * @index: index + * + * Return: none + */ +static void qdf_dpt_display_proto_pkt_debugfs(qdf_debugfs_file_t file, + struct qdf_dp_trace_record_s *record, + uint32_t index) +{ + int loc; + char prepend_str[QDF_DP_TRACE_PREPEND_STR_SIZE]; + struct qdf_dp_trace_proto_buf *buf = + (struct qdf_dp_trace_proto_buf *)record->data; + + loc = qdf_dp_trace_fill_meta_str(prepend_str, sizeof(prepend_str), + index, 0, record); + qdf_debugfs_printf(file, "%s [%d] [%s] SA: " + QDF_MAC_ADDR_STR " %s DA: " + QDF_MAC_ADDR_STR, + prepend_str, + buf->vdev_id, + qdf_dp_subtype_to_str(buf->subtype), + QDF_MAC_ADDR_ARRAY(buf->sa.bytes), + qdf_dp_dir_to_str(buf->dir), + QDF_MAC_ADDR_ARRAY(buf->da.bytes)); + qdf_debugfs_printf(file, "\n"); +} + +/** + * qdf_dpt_display_mgmt_pkt_debugfs() - display mgmt packet + * @file: file to read + * @record: dptrace record + * @index: index + * + * Return: none + */ +static void qdf_dpt_display_mgmt_pkt_debugfs(qdf_debugfs_file_t file, + struct qdf_dp_trace_record_s *record, + uint32_t index) +{ + + int loc; + char prepend_str[QDF_DP_TRACE_PREPEND_STR_SIZE]; + struct qdf_dp_trace_mgmt_buf *buf = + (struct qdf_dp_trace_mgmt_buf *)record->data; + + loc = qdf_dp_trace_fill_meta_str(prepend_str, sizeof(prepend_str), + index, 0, record); + + qdf_debugfs_printf(file, "%s [%d] [%s %s]\n", + prepend_str, + buf->vdev_id, + qdf_dp_type_to_str(buf->type), + qdf_dp_subtype_to_str(buf->subtype)); +} + +/** + * qdf_dpt_display_event_record_debugfs() - display event records + * @file: file to read + * @record: dptrace record + * @index: index + * + * Return: none + */ +static void qdf_dpt_display_event_record_debugfs(qdf_debugfs_file_t file, + struct qdf_dp_trace_record_s *record, + uint32_t index) +{ + char prepend_str[QDF_DP_TRACE_PREPEND_STR_SIZE]; + struct qdf_dp_trace_event_buf *buf = + (struct qdf_dp_trace_event_buf *)record->data; + + qdf_dp_trace_fill_meta_str(prepend_str, sizeof(prepend_str), + index, 0, record); + qdf_debugfs_printf(file, "%s [%d] [%s %s]\n", + prepend_str, + buf->vdev_id, + qdf_dp_type_to_str(buf->type), + qdf_dp_subtype_to_str(buf->subtype)); +} + +/** + * qdf_dpt_display_ptr_record_debugfs() - display record ptr + * @file: file to read + * @record: dptrace record + * @index: index + * + * Return: none + */ +static void qdf_dpt_display_ptr_record_debugfs(qdf_debugfs_file_t file, + struct qdf_dp_trace_record_s *record, + uint32_t index) +{ + char prepend_str[QDF_DP_TRACE_PREPEND_STR_SIZE]; + int loc; + struct qdf_dp_trace_ptr_buf *buf = + (struct qdf_dp_trace_ptr_buf *)record->data; + loc = qdf_dp_trace_fill_meta_str(prepend_str, sizeof(prepend_str), + index, 0, record); + + if (loc < sizeof(prepend_str)) + scnprintf(&prepend_str[loc], sizeof(prepend_str) - loc, + "[msdu id %d %s %d]", + buf->msdu_id, + (record->code == + QDF_DP_TRACE_FREE_PACKET_PTR_RECORD) ? + "status" : "vdev_id", + buf->status); + + qdf_dpt_dump_hex_trace_debugfs(file, prepend_str, + (uint8_t *)&buf->cookie, + sizeof(buf->cookie)); +} + +/** + * qdf_dpt_display_ptr_record_debugfs() - display record + * @file: file to read + * @record: dptrace record + * @index: index + * + * Return: none + */ +static void qdf_dpt_display_record_debugfs(qdf_debugfs_file_t file, + struct qdf_dp_trace_record_s *record, + uint32_t index) +{ + int loc; + char prepend_str[QDF_DP_TRACE_PREPEND_STR_SIZE]; + struct qdf_dp_trace_data_buf *buf = + (struct qdf_dp_trace_data_buf *)record->data; + + loc = qdf_dp_trace_fill_meta_str(prepend_str, sizeof(prepend_str), + index, 0, record); + if (loc < sizeof(prepend_str)) + loc += snprintf(&prepend_str[loc], sizeof(prepend_str) - loc, + "[%d]", buf->msdu_id); + qdf_dpt_dump_hex_trace_debugfs(file, prepend_str, + record->data, record->size); +} + +uint32_t qdf_dpt_get_curr_pos_debugfs(qdf_debugfs_file_t file, + enum qdf_dpt_debugfs_state state) +{ + uint32_t i = 0; + uint32_t tail; + uint32_t count = g_qdf_dp_trace_data.num; + + if (!g_qdf_dp_trace_data.enable) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_DEBUG, + "%s: Tracing Disabled", __func__); + return QDF_STATUS_E_EMPTY; + } + + if (!count) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_DEBUG, + "%s: no packets", __func__); + return QDF_STATUS_E_EMPTY; + } + + if (state == QDF_DPT_DEBUGFS_STATE_SHOW_IN_PROGRESS) + return g_qdf_dp_trace_data.curr_pos; + + qdf_debugfs_printf(file, + "DPT: config - bitmap 0x%x verb %u #rec %u rec_requested %u live_config %u thresh %u time_limit %u\n", + g_qdf_dp_trace_data.proto_bitmap, + g_qdf_dp_trace_data.verbosity, + g_qdf_dp_trace_data.no_of_record, + g_qdf_dp_trace_data.num_records_to_dump, + g_qdf_dp_trace_data.live_mode_config, + g_qdf_dp_trace_data.high_tput_thresh, + g_qdf_dp_trace_data.thresh_time_limit); + + qdf_debugfs_printf(file, + "STATS |DPT: icmp(%u %u) arp(%u %u) icmpv6(%u %u %u %u %u %u) dhcp(%u %u %u %u %u %u) eapol(%u %u %u %u %u)\n", + g_qdf_dp_trace_data.icmp_req, + g_qdf_dp_trace_data.icmp_resp, + g_qdf_dp_trace_data.arp_req, + g_qdf_dp_trace_data.arp_resp, + g_qdf_dp_trace_data.icmpv6_req, + g_qdf_dp_trace_data.icmpv6_resp, + g_qdf_dp_trace_data.icmpv6_ns, + g_qdf_dp_trace_data.icmpv6_na, + g_qdf_dp_trace_data.icmpv6_rs, + g_qdf_dp_trace_data.icmpv6_ra, + g_qdf_dp_trace_data.dhcp_disc, + g_qdf_dp_trace_data.dhcp_off, + g_qdf_dp_trace_data.dhcp_req, + g_qdf_dp_trace_data.dhcp_ack, + g_qdf_dp_trace_data.dhcp_nack, + g_qdf_dp_trace_data.dhcp_others, + g_qdf_dp_trace_data.eapol_m1, + g_qdf_dp_trace_data.eapol_m2, + g_qdf_dp_trace_data.eapol_m3, + g_qdf_dp_trace_data.eapol_m4, + g_qdf_dp_trace_data.eapol_others); + + qdf_debugfs_printf(file, + "DPT: Total Records: %d, Head: %d, Tail: %d\n", + g_qdf_dp_trace_data.num, g_qdf_dp_trace_data.head, + g_qdf_dp_trace_data.tail); + + spin_lock_bh(&l_dp_trace_lock); + if (g_qdf_dp_trace_data.head != INVALID_QDF_DP_TRACE_ADDR) { + i = g_qdf_dp_trace_data.head; + tail = g_qdf_dp_trace_data.tail; + + if (count > g_qdf_dp_trace_data.num) + count = g_qdf_dp_trace_data.num; + + if (tail >= (count - 1)) + i = tail - count + 1; + else if (count != MAX_QDF_DP_TRACE_RECORDS) + i = MAX_QDF_DP_TRACE_RECORDS - ((count - 1) - + tail); + g_qdf_dp_trace_data.curr_pos = 0; + g_qdf_dp_trace_data.saved_tail = tail; + } + spin_unlock_bh(&l_dp_trace_lock); + + return g_qdf_dp_trace_data.saved_tail; +} +qdf_export_symbol(qdf_dpt_get_curr_pos_debugfs); + +QDF_STATUS qdf_dpt_dump_stats_debugfs(qdf_debugfs_file_t file, + uint32_t curr_pos) +{ + struct qdf_dp_trace_record_s p_record; + uint32_t i = curr_pos; + uint16_t num_records_to_dump = g_qdf_dp_trace_data.num_records_to_dump; + + if (!g_qdf_dp_trace_data.enable) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "%s: Tracing Disabled", __func__); + return QDF_STATUS_E_FAILURE; + } + + if (num_records_to_dump > g_qdf_dp_trace_data.num) + num_records_to_dump = g_qdf_dp_trace_data.num; + + /* + * Max dp trace record size should always be less than + * QDF_DP_TRACE_PREPEND_STR_SIZE(100) + BUFFER_SIZE(121). + */ + if (WARN_ON(QDF_DP_TRACE_MAX_RECORD_SIZE < + QDF_DP_TRACE_PREPEND_STR_SIZE + BUFFER_SIZE)) + return QDF_STATUS_E_FAILURE; + + spin_lock_bh(&l_dp_trace_lock); + p_record = g_qdf_dp_trace_tbl[i]; + spin_unlock_bh(&l_dp_trace_lock); + + for (;; ) { + /* + * Initially we get file as 1 page size, and + * if remaining size in file is less than one record max size, + * then return so that it gets an extra page. + */ + if ((file->size - file->count) < QDF_DP_TRACE_MAX_RECORD_SIZE) { + spin_lock_bh(&l_dp_trace_lock); + g_qdf_dp_trace_data.curr_pos = i; + spin_unlock_bh(&l_dp_trace_lock); + return QDF_STATUS_E_FAILURE; + } + + switch (p_record.code) { + case QDF_DP_TRACE_TXRX_PACKET_PTR_RECORD: + case QDF_DP_TRACE_TXRX_FAST_PACKET_PTR_RECORD: + case QDF_DP_TRACE_FREE_PACKET_PTR_RECORD: + qdf_dpt_display_ptr_record_debugfs(file, &p_record, i); + break; + + case QDF_DP_TRACE_EAPOL_PACKET_RECORD: + case QDF_DP_TRACE_DHCP_PACKET_RECORD: + case QDF_DP_TRACE_ARP_PACKET_RECORD: + case QDF_DP_TRACE_ICMP_PACKET_RECORD: + case QDF_DP_TRACE_ICMPv6_PACKET_RECORD: + qdf_dpt_display_proto_pkt_debugfs(file, &p_record, i); + break; + + case QDF_DP_TRACE_MGMT_PACKET_RECORD: + qdf_dpt_display_mgmt_pkt_debugfs(file, &p_record, i); + break; + + case QDF_DP_TRACE_EVENT_RECORD: + qdf_dpt_display_event_record_debugfs(file, &p_record, + i); + break; + + case QDF_DP_TRACE_HDD_TX_TIMEOUT: + qdf_debugfs_printf(file, "DPT: %04d: %s %s\n", + i, p_record.time, + qdf_dp_code_to_string(p_record.code)); + qdf_debugfs_printf(file, "%s: HDD TX Timeout\n"); + break; + + case QDF_DP_TRACE_HDD_SOFTAP_TX_TIMEOUT: + qdf_debugfs_printf(file, "%04d: %s %s\n", + i, p_record.time, + qdf_dp_code_to_string(p_record.code)); + qdf_debugfs_printf(file, + "%s: HDD SoftAP TX Timeout\n"); + break; + + case QDF_DP_TRACE_CE_FAST_PACKET_ERR_RECORD: + qdf_debugfs_printf(file, "DPT: %04d: %s %s\n", + i, p_record.time, + qdf_dp_code_to_string(p_record.code)); + qdf_debugfs_printf(file, + "%s: CE Fast Packet Error\n"); + break; + + case QDF_DP_TRACE_MAX: + qdf_debugfs_printf(file, + "%s: QDF_DP_TRACE_MAX event should not be generated\n", + __func__); + break; + + case QDF_DP_TRACE_HDD_TX_PACKET_RECORD: + case QDF_DP_TRACE_HDD_RX_PACKET_RECORD: + case QDF_DP_TRACE_TX_PACKET_RECORD: + case QDF_DP_TRACE_RX_PACKET_RECORD: + case QDF_DP_TRACE_LI_DP_TX_PACKET_RECORD: + case QDF_DP_TRACE_LI_DP_RX_PACKET_RECORD: + + default: + qdf_dpt_display_record_debugfs(file, &p_record, i); + break; + } + + if (++g_qdf_dp_trace_data.dump_counter == num_records_to_dump) + break; + + spin_lock_bh(&l_dp_trace_lock); + if (i == 0) + i = MAX_QDF_DP_TRACE_RECORDS; + + i -= 1; + p_record = g_qdf_dp_trace_tbl[i]; + spin_unlock_bh(&l_dp_trace_lock); + } + + g_qdf_dp_trace_data.dump_counter = 0; + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(qdf_dpt_dump_stats_debugfs); + +/** + * qdf_dpt_set_value_debugfs() - Configure the value to control DP trace + * @proto_bitmap: defines the protocol to be tracked + * @no_of_records: defines the nth packet which is traced + * @verbosity: defines the verbosity level + * + * Return: None + */ +void qdf_dpt_set_value_debugfs(uint8_t proto_bitmap, uint8_t no_of_record, + uint8_t verbosity, uint16_t num_records_to_dump) +{ + if (g_qdf_dp_trace_data.enable) { + g_qdf_dp_trace_data.proto_bitmap = proto_bitmap; + g_qdf_dp_trace_data.no_of_record = no_of_record; + g_qdf_dp_trace_data.verbosity = verbosity; + g_qdf_dp_trace_data.num_records_to_dump = num_records_to_dump; + } +} +qdf_export_symbol(qdf_dpt_set_value_debugfs); + + +/** + * qdf_dp_trace_dump_all() - Dump data from ring buffer via call back functions + * registered with QDF + * @count: Number of lines to dump starting from tail to head + * @pdev_id: pdev_id + * + * Return: None + */ +void qdf_dp_trace_dump_all(uint32_t count, uint8_t pdev_id) +{ + struct qdf_dp_trace_record_s p_record; + int32_t i, tail; + + if (!g_qdf_dp_trace_data.enable) { + DPTRACE_PRINT("Tracing Disabled"); + return; + } + + DPTRACE_PRINT( + "DPT: config - bitmap 0x%x verb %u #rec %u live_config %u thresh %u time_limit %u", + g_qdf_dp_trace_data.proto_bitmap, + g_qdf_dp_trace_data.verbosity, + g_qdf_dp_trace_data.no_of_record, + g_qdf_dp_trace_data.live_mode_config, + g_qdf_dp_trace_data.high_tput_thresh, + g_qdf_dp_trace_data.thresh_time_limit); + + qdf_dp_trace_dump_stats(); + + DPTRACE_PRINT("DPT: Total Records: %d, Head: %d, Tail: %d", + g_qdf_dp_trace_data.num, g_qdf_dp_trace_data.head, + g_qdf_dp_trace_data.tail); + + /* aquire the lock so that only one thread at a time can read + * the ring buffer + */ + spin_lock_bh(&l_dp_trace_lock); + + if (g_qdf_dp_trace_data.head != INVALID_QDF_DP_TRACE_ADDR) { + i = g_qdf_dp_trace_data.head; + tail = g_qdf_dp_trace_data.tail; + + if (count) { + if (count > g_qdf_dp_trace_data.num) + count = g_qdf_dp_trace_data.num; + if (tail >= (count - 1)) + i = tail - count + 1; + else if (count != MAX_QDF_DP_TRACE_RECORDS) + i = MAX_QDF_DP_TRACE_RECORDS - ((count - 1) - + tail); + } + + p_record = g_qdf_dp_trace_tbl[i]; + spin_unlock_bh(&l_dp_trace_lock); + for (;; ) { + qdf_dp_trace_cb_table[p_record.code](&p_record, + (uint16_t)i, pdev_id, false); + if (i == tail) + break; + i += 1; + + spin_lock_bh(&l_dp_trace_lock); + if (MAX_QDF_DP_TRACE_RECORDS == i) + i = 0; + + p_record = g_qdf_dp_trace_tbl[i]; + spin_unlock_bh(&l_dp_trace_lock); + } + } else { + spin_unlock_bh(&l_dp_trace_lock); + } +} +qdf_export_symbol(qdf_dp_trace_dump_all); + +/** + * qdf_dp_trace_throttle_live_mode() - Throttle DP Trace live mode + * @high_bw_request: whether this is a high BW req or not + * + * The function tries to prevent excessive logging into the live buffer by + * having an upper limit on number of packets that can be logged per second. + * + * The intention is to allow occasional pings and data packets and really low + * throughput levels while suppressing bursts and higher throughput levels so + * that we donot hog the live buffer. + * + * If the number of packets printed in a particular second exceeds the thresh, + * disable printing in the next second. + * + * Return: None + */ +void qdf_dp_trace_throttle_live_mode(bool high_bw_request) +{ + static int bw_interval_counter; + + if (g_qdf_dp_trace_data.enable == false || + g_qdf_dp_trace_data.live_mode_config == false) + return; + + if (high_bw_request) { + g_qdf_dp_trace_data.live_mode = 0; + bw_interval_counter = 0; + return; + } + + bw_interval_counter++; + + if (0 == (bw_interval_counter % + g_qdf_dp_trace_data.thresh_time_limit)) { + + spin_lock_bh(&l_dp_trace_lock); + if (g_qdf_dp_trace_data.print_pkt_cnt <= + g_qdf_dp_trace_data.high_tput_thresh) + g_qdf_dp_trace_data.live_mode = 1; + + g_qdf_dp_trace_data.print_pkt_cnt = 0; + spin_unlock_bh(&l_dp_trace_lock); + } +} +qdf_export_symbol(qdf_dp_trace_throttle_live_mode); + +void qdf_dp_trace_apply_tput_policy(bool is_data_traffic) +{ + if (g_qdf_dp_trace_data.dynamic_verbosity_modify) { + goto check_live_mode; + return; + } + + if (is_data_traffic) { + g_qdf_dp_trace_data.verbosity = + QDF_DP_TRACE_VERBOSITY_ULTRA_LOW; + } else { + g_qdf_dp_trace_data.verbosity = + g_qdf_dp_trace_data.ini_conf_verbosity; + } +check_live_mode: + qdf_dp_trace_throttle_live_mode(is_data_traffic); +} +#endif + +struct qdf_print_ctrl print_ctrl_obj[MAX_PRINT_CONFIG_SUPPORTED]; + +struct category_name_info g_qdf_category_name[MAX_SUPPORTED_CATEGORY] = { + [QDF_MODULE_ID_TDLS] = {"tdls"}, + [QDF_MODULE_ID_ACS] = {"ACS"}, + [QDF_MODULE_ID_SCAN_SM] = {"scan state machine"}, + [QDF_MODULE_ID_SCANENTRY] = {"scan entry"}, + [QDF_MODULE_ID_WDS] = {"WDS"}, + [QDF_MODULE_ID_ACTION] = {"action"}, + [QDF_MODULE_ID_ROAM] = {"STA roaming"}, + [QDF_MODULE_ID_INACT] = {"inactivity"}, + [QDF_MODULE_ID_DOTH] = {"11h"}, + [QDF_MODULE_ID_IQUE] = {"IQUE"}, + [QDF_MODULE_ID_WME] = {"WME"}, + [QDF_MODULE_ID_ACL] = {"ACL"}, + [QDF_MODULE_ID_WPA] = {"WPA/RSN"}, + [QDF_MODULE_ID_RADKEYS] = {"dump 802.1x keys"}, + [QDF_MODULE_ID_RADDUMP] = {"dump radius packet"}, + [QDF_MODULE_ID_RADIUS] = {"802.1x radius client"}, + [QDF_MODULE_ID_DOT1XSM] = {"802.1x state machine"}, + [QDF_MODULE_ID_DOT1X] = {"802.1x authenticator"}, + [QDF_MODULE_ID_POWER] = {"power save"}, + [QDF_MODULE_ID_STATE] = {"state"}, + [QDF_MODULE_ID_OUTPUT] = {"output"}, + [QDF_MODULE_ID_SCAN] = {"scan"}, + [QDF_MODULE_ID_AUTH] = {"authentication"}, + [QDF_MODULE_ID_ASSOC] = {"association"}, + [QDF_MODULE_ID_NODE] = {"node"}, + [QDF_MODULE_ID_ELEMID] = {"element ID"}, + [QDF_MODULE_ID_XRATE] = {"rate"}, + [QDF_MODULE_ID_INPUT] = {"input"}, + [QDF_MODULE_ID_CRYPTO] = {"crypto"}, + [QDF_MODULE_ID_DUMPPKTS] = {"dump packet"}, + [QDF_MODULE_ID_DEBUG] = {"debug"}, + [QDF_MODULE_ID_MLME] = {"mlme"}, + [QDF_MODULE_ID_RRM] = {"rrm"}, + [QDF_MODULE_ID_WNM] = {"wnm"}, + [QDF_MODULE_ID_P2P_PROT] = {"p2p_prot"}, + [QDF_MODULE_ID_PROXYARP] = {"proxyarp"}, + [QDF_MODULE_ID_L2TIF] = {"l2tif"}, + [QDF_MODULE_ID_WIFIPOS] = {"wifipos"}, + [QDF_MODULE_ID_WRAP] = {"wrap"}, + [QDF_MODULE_ID_DFS] = {"dfs"}, + [QDF_MODULE_ID_ATF] = {"atf"}, + [QDF_MODULE_ID_SPLITMAC] = {"splitmac"}, + [QDF_MODULE_ID_IOCTL] = {"ioctl"}, + [QDF_MODULE_ID_NAC] = {"nac"}, + [QDF_MODULE_ID_MESH] = {"mesh"}, + [QDF_MODULE_ID_MBO] = {"mbo"}, + [QDF_MODULE_ID_EXTIOCTL_CHANSWITCH] = {"extchanswitch"}, + [QDF_MODULE_ID_EXTIOCTL_CHANSSCAN] = {"extchanscan"}, + [QDF_MODULE_ID_TLSHIM] = {"tlshim"}, + [QDF_MODULE_ID_WMI] = {"WMI"}, + [QDF_MODULE_ID_HTT] = {"HTT"}, + [QDF_MODULE_ID_HDD] = {"HDD"}, + [QDF_MODULE_ID_SME] = {"SME"}, + [QDF_MODULE_ID_PE] = {"PE"}, + [QDF_MODULE_ID_WMA] = {"WMA"}, + [QDF_MODULE_ID_SYS] = {"SYS"}, + [QDF_MODULE_ID_QDF] = {"QDF"}, + [QDF_MODULE_ID_SAP] = {"SAP"}, + [QDF_MODULE_ID_HDD_SOFTAP] = {"HDD_SAP"}, + [QDF_MODULE_ID_HDD_DATA] = {"DATA"}, + [QDF_MODULE_ID_HDD_SAP_DATA] = {"SAP_DATA"}, + [QDF_MODULE_ID_HIF] = {"HIF"}, + [QDF_MODULE_ID_HTC] = {"HTC"}, + [QDF_MODULE_ID_TXRX] = {"TXRX"}, + [QDF_MODULE_ID_QDF_DEVICE] = {"QDF_DEV"}, + [QDF_MODULE_ID_CFG] = {"CFG"}, + [QDF_MODULE_ID_BMI] = {"BMI"}, + [QDF_MODULE_ID_EPPING] = {"EPPING"}, + [QDF_MODULE_ID_QVIT] = {"QVIT"}, + [QDF_MODULE_ID_DP] = {"DP"}, + [QDF_MODULE_ID_SOC] = {"SOC"}, + [QDF_MODULE_ID_OS_IF] = {"OSIF"}, + [QDF_MODULE_ID_TARGET_IF] = {"TIF"}, + [QDF_MODULE_ID_SCHEDULER] = {"SCH"}, + [QDF_MODULE_ID_MGMT_TXRX] = {"MGMT_TXRX"}, + [QDF_MODULE_ID_PMO] = {"PMO"}, + [QDF_MODULE_ID_POLICY_MGR] = {"POLICY_MGR"}, + [QDF_MODULE_ID_NAN] = {"NAN"}, + [QDF_MODULE_ID_SPECTRAL] = {"SPECTRAL"}, + [QDF_MODULE_ID_P2P] = {"P2P"}, + [QDF_MODULE_ID_OFFCHAN_TXRX] = {"OFFCHAN"}, + [QDF_MODULE_ID_REGULATORY] = {"REGULATORY"}, + [QDF_MODULE_ID_OBJ_MGR] = {"OBJMGR"}, + [QDF_MODULE_ID_SERIALIZATION] = {"SER"}, + [QDF_MODULE_ID_NSS] = {"NSS"}, + [QDF_MODULE_ID_ROAM_DEBUG] = {"roam debug"}, + [QDF_MODULE_ID_DIRECT_BUF_RX] = {"DIRECT_BUF_RX"}, + [QDF_MODULE_ID_DISA] = {"disa"}, + [QDF_MODULE_ID_GREEN_AP] = {"GREEN_AP"}, + [QDF_MODULE_ID_EXTAP] = {"EXTAP"}, + [QDF_MODULE_ID_FD] = {"FILS discovery"}, + [QDF_MODULE_ID_FTM] = {"FTM"}, + [QDF_MODULE_ID_OCB] = {"OCB"}, + [QDF_MODULE_ID_CONFIG] = {"CONFIG"}, + [QDF_MODULE_ID_IPA] = {"IPA"}, + [QDF_MODULE_ID_CP_STATS] = {"CP_STATS"}, + [QDF_MODULE_ID_ACTION_OUI] = {"action_oui"}, + [QDF_MODULE_ID_TARGET] = {"TARGET"}, + [QDF_MODULE_ID_ANY] = {"ANY"}, +}; +qdf_export_symbol(g_qdf_category_name); + +/** + * qdf_trace_display() - Display trace + * + * Return: None + */ +void qdf_trace_display(void) +{ + QDF_MODULE_ID module_id; + + pr_err(" 1)FATAL 2)ERROR 3)WARN 4)INFO 5)INFO_H 6)INFO_M 7)INFO_L 8)DEBUG\n"); + for (module_id = 0; module_id < QDF_MODULE_ID_MAX; ++module_id) { + pr_err("%2d)%s %s %s %s %s %s %s %s %s\n", + (int)module_id, + g_qdf_category_name[module_id].category_name_str, + qdf_print_is_verbose_enabled(qdf_pidx, module_id, + QDF_TRACE_LEVEL_FATAL) ? "X" : " ", + qdf_print_is_verbose_enabled(qdf_pidx, module_id, + QDF_TRACE_LEVEL_ERROR) ? "X" : " ", + qdf_print_is_verbose_enabled(qdf_pidx, module_id, + QDF_TRACE_LEVEL_WARN) ? "X" : " ", + qdf_print_is_verbose_enabled(qdf_pidx, module_id, + QDF_TRACE_LEVEL_INFO) ? "X" : " ", + qdf_print_is_verbose_enabled(qdf_pidx, module_id, + QDF_TRACE_LEVEL_INFO_HIGH) ? "X" : " ", + qdf_print_is_verbose_enabled(qdf_pidx, module_id, + QDF_TRACE_LEVEL_INFO_MED) ? "X" : " ", + qdf_print_is_verbose_enabled(qdf_pidx, module_id, + QDF_TRACE_LEVEL_INFO_LOW) ? "X" : " ", + qdf_print_is_verbose_enabled(qdf_pidx, module_id, + QDF_TRACE_LEVEL_DEBUG) ? "X" : " "); + } +} +qdf_export_symbol(qdf_trace_display); + +#ifdef CONFIG_MCL +#define print_to_console(str) +#else +static inline void print_to_console(char *str_buffer) +{ + pr_err("%s\n", str_buffer); +} +#endif + +#ifdef MULTI_IF_NAME +static const char *qdf_trace_wlan_modname(void) +{ + return MULTI_IF_NAME; +} +#else +static const char *qdf_trace_wlan_modname(void) +{ + return "wlan"; +} +#endif + +void qdf_trace_msg_cmn(unsigned int idx, + QDF_MODULE_ID category, + QDF_TRACE_LEVEL verbose, + const char *str_format, va_list val) +{ + char str_buffer[QDF_TRACE_BUFFER_SIZE]; + int n; + + /* Check if index passed is valid */ + if (idx < 0 || idx >= MAX_PRINT_CONFIG_SUPPORTED) { + pr_info("%s: Invalid index - %d\n", __func__, idx); + return; + } + + /* Check if print control object is in use */ + if (!print_ctrl_obj[idx].in_use) { + pr_info("%s: Invalid print control object\n", __func__); + return; + } + + /* Check if category passed is valid */ + if (category < 0 || category >= MAX_SUPPORTED_CATEGORY) { + pr_info("%s: Invalid category: %d\n", __func__, category); + return; + } + + /* Check if verbose mask is valid */ + if (verbose < 0 || verbose >= QDF_TRACE_LEVEL_MAX) { + pr_info("%s: Invalid verbose level %d\n", __func__, verbose); + return; + } + + /* + * Print the trace message when the desired verbose level is set in + * the desired category for the print control object + */ + if (print_ctrl_obj[idx].cat_info[category].category_verbose_mask & + QDF_TRACE_LEVEL_TO_MODULE_BITMASK(verbose)) { + static const char * const VERBOSE_STR[] = { + [QDF_TRACE_LEVEL_NONE] = "", + [QDF_TRACE_LEVEL_FATAL] = "F", + [QDF_TRACE_LEVEL_ERROR] = "E", + [QDF_TRACE_LEVEL_WARN] = "W", + [QDF_TRACE_LEVEL_INFO] = "I", + [QDF_TRACE_LEVEL_INFO_HIGH] = "IH", + [QDF_TRACE_LEVEL_INFO_MED] = "IM", + [QDF_TRACE_LEVEL_INFO_LOW] = "IL", + [QDF_TRACE_LEVEL_DEBUG] = "D", + [QDF_TRACE_LEVEL_TRACE] = "T", + [QDF_TRACE_LEVEL_ALL] = "" }; + + /* print the prefix string into the string buffer... */ + n = scnprintf(str_buffer, QDF_TRACE_BUFFER_SIZE, + "%s: [%d:%s:%s] ", qdf_trace_wlan_modname(), + in_interrupt() ? 0 : current->pid, + VERBOSE_STR[verbose], + g_qdf_category_name[category].category_name_str); + + /* print the formatted log message after the prefix string */ + vscnprintf(str_buffer + n, QDF_TRACE_BUFFER_SIZE - n, + str_format, val); +#if defined(WLAN_LOGGING_SOCK_SVC_ENABLE) + wlan_log_to_user(verbose, (char *)str_buffer, + strlen(str_buffer)); + print_to_console(str_buffer); +#else + pr_err("%s\n", str_buffer); +#endif + } +} +qdf_export_symbol(qdf_trace_msg_cmn); + +QDF_STATUS qdf_print_setup(void) +{ + int i; + + /* Loop through all print ctrl objects */ + for (i = 0; i < MAX_PRINT_CONFIG_SUPPORTED; i++) { + if (qdf_print_ctrl_cleanup(i)) + return QDF_STATUS_E_FAILURE; + } + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(qdf_print_setup); + +QDF_STATUS qdf_print_ctrl_cleanup(unsigned int idx) +{ + int i = 0; + + if (idx < 0 || idx >= MAX_PRINT_CONFIG_SUPPORTED) { + pr_info("%s: Invalid index - %d\n", __func__, idx); + return QDF_STATUS_E_FAILURE; + } + + /* Clean up the print control object corresponding to that index + * If success, callee to change print control index to -1 + */ + + for (i = 0; i < MAX_SUPPORTED_CATEGORY; i++) { + print_ctrl_obj[idx].cat_info[i].category_verbose_mask = + QDF_TRACE_LEVEL_NONE; + } + print_ctrl_obj[idx].custom_print = NULL; + print_ctrl_obj[idx].custom_ctxt = NULL; + qdf_print_clean_node_flag(idx); + print_ctrl_obj[idx].in_use = false; + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(qdf_print_ctrl_cleanup); + +int qdf_print_ctrl_register(const struct category_info *cinfo, + void *custom_print_handler, + void *custom_ctx, + const char *pctrl_name) +{ + int idx = -1; + int i = 0; + + for (i = 0; i < MAX_PRINT_CONFIG_SUPPORTED; i++) { + if (!print_ctrl_obj[i].in_use) { + idx = i; + break; + } + } + + /* Callee to handle idx -1 appropriately */ + if (idx == -1) { + pr_info("%s: Allocation failed! No print control object free\n", + __func__); + return idx; + } + + print_ctrl_obj[idx].in_use = true; + + /* + * In case callee does not pass category info, + * custom print handler, custom context and print control name, + * we do not set any value here. Clean up for the print control + * getting allocated would have taken care of initializing + * default values. + * + * We need to only set in_use to 1 in such a case + */ + + if (pctrl_name) { + qdf_str_lcopy(print_ctrl_obj[idx].name, pctrl_name, + sizeof(print_ctrl_obj[idx].name)); + } + + if (custom_print_handler) + print_ctrl_obj[idx].custom_print = custom_print_handler; + + if (custom_ctx) + print_ctrl_obj[idx].custom_ctxt = custom_ctx; + + if (cinfo) { + for (i = 0; i < MAX_SUPPORTED_CATEGORY; i++) { + if (cinfo[i].category_verbose_mask == + QDF_TRACE_LEVEL_ALL) { + print_ctrl_obj[idx].cat_info[i] + .category_verbose_mask = 0xFFFF; + } else if ((cinfo[i].category_verbose_mask == + QDF_TRACE_LEVEL_NONE) || + (cinfo[i].category_verbose_mask == + QDF_TRACE_LEVEL_TO_MODULE_BITMASK( + QDF_TRACE_LEVEL_NONE))) { + print_ctrl_obj[idx].cat_info[i] + .category_verbose_mask = 0; + } else { + print_ctrl_obj[idx].cat_info[i] + .category_verbose_mask = + cinfo[i].category_verbose_mask; + } + } + } + + pr_info("%s: Allocated print control object %d\n", + __func__, idx); + return idx; +} +qdf_export_symbol(qdf_print_ctrl_register); + +#ifndef CONFIG_MCL +void qdf_shared_print_ctrl_cleanup(void) +{ + qdf_print_ctrl_cleanup(qdf_pidx); +} +qdf_export_symbol(qdf_shared_print_ctrl_cleanup); + +/* + * Set this to invalid value to differentiate with user-provided + * value. + */ +int qdf_dbg_mask = 0; +qdf_export_symbol(qdf_dbg_mask); +qdf_declare_param(qdf_dbg_mask, int); + +/* + * QDF can be passed parameters which indicate the + * debug level for each module. + * an array of string values are passed, each string hold the following form + * + * = + * + * The array qdf_dbg_arr will hold these module-string=value strings + * The variable qdf_dbg_arr_cnt will have the count of how many such + * string values were passed. + */ +static char *qdf_dbg_arr[QDF_MODULE_ID_MAX]; +static int qdf_dbg_arr_cnt; +qdf_declare_param_array(qdf_dbg_arr, charp, &qdf_dbg_arr_cnt); + +static uint16_t set_cumulative_verbose_mask(QDF_TRACE_LEVEL max_level) +{ + uint16_t category_verbose_mask = 0; + QDF_TRACE_LEVEL level; + + for (level = QDF_TRACE_LEVEL_FATAL; level <= max_level; level++) { + category_verbose_mask |= + QDF_TRACE_LEVEL_TO_MODULE_BITMASK(level); + } + return category_verbose_mask; +} + +static QDF_MODULE_ID find_qdf_module_from_string(char *str) +{ + QDF_MODULE_ID mod_id; + + for (mod_id = 0; mod_id < QDF_MODULE_ID_MAX; mod_id++) { + if (strcasecmp(str, + g_qdf_category_name[mod_id].category_name_str) + == 0) { + break; + } + } + return mod_id; +} + +static void process_qdf_dbg_arr_param(struct category_info *cinfo, + int array_index) +{ + char *mod_val_str, *mod_str, *val_str; + unsigned long dbg_level; + QDF_MODULE_ID mod_id; + + mod_val_str = qdf_dbg_arr[array_index]; + mod_str = strsep(&mod_val_str, "="); + val_str = mod_val_str; + if (val_str == NULL) { + pr_info("qdf_dbg_arr: %s not in the = form\n", + mod_str); + return; + } + + mod_id = find_qdf_module_from_string(mod_str); + if (mod_id >= QDF_MODULE_ID_MAX) { + pr_info("ERROR!!Module name %s not in the list of modules\n", + mod_str); + return; + } + + if (kstrtol(val_str, 10, &dbg_level) < 0) { + pr_info("ERROR!!Invalid debug level for module: %s\n", + mod_str); + return; + } + + if (dbg_level >= QDF_TRACE_LEVEL_MAX) { + pr_info("ERROR!!Debug level for %s too high", mod_str); + pr_info("max: %d given %lu\n", QDF_TRACE_LEVEL_MAX, + dbg_level); + return; + } + + pr_info("User passed setting module %s(%d) to level %lu\n", + mod_str, + mod_id, + dbg_level); + cinfo[mod_id].category_verbose_mask = + set_cumulative_verbose_mask((QDF_TRACE_LEVEL)dbg_level); +} + +static void set_default_trace_levels(struct category_info *cinfo) +{ + int i; + static QDF_TRACE_LEVEL module_trace_default_level[QDF_MODULE_ID_MAX] = { + [QDF_MODULE_ID_TDLS] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_ACS] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_SCAN_SM] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_SCANENTRY] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_WDS] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_ACTION] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_ROAM] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_INACT] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_DOTH] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_IQUE] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_WME] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_ACL] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_WPA] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_RADKEYS] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_RADDUMP] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_RADIUS] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_DOT1XSM] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_DOT1X] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_POWER] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_STATE] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_OUTPUT] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_SCAN] = QDF_TRACE_LEVEL_ERROR, + [QDF_MODULE_ID_AUTH] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_ASSOC] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_NODE] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_ELEMID] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_XRATE] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_INPUT] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_CRYPTO] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_DUMPPKTS] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_DEBUG] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_MLME] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_RRM] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_WNM] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_P2P_PROT] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_PROXYARP] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_L2TIF] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_WIFIPOS] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_WRAP] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_DFS] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_ATF] = QDF_TRACE_LEVEL_ERROR, + [QDF_MODULE_ID_SPLITMAC] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_IOCTL] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_NAC] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_MESH] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_MBO] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_EXTIOCTL_CHANSWITCH] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_EXTIOCTL_CHANSSCAN] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_TLSHIM] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_WMI] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_HTT] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_HDD] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_SME] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_PE] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_WMA] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_SYS] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_QDF] = QDF_TRACE_LEVEL_ERROR, + [QDF_MODULE_ID_SAP] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_HDD_SOFTAP] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_HDD_DATA] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_HDD_SAP_DATA] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_HIF] = QDF_TRACE_LEVEL_ERROR, + [QDF_MODULE_ID_HTC] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_TXRX] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_QDF_DEVICE] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_CFG] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_BMI] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_EPPING] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_QVIT] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_DP] = QDF_TRACE_LEVEL_FATAL, + [QDF_MODULE_ID_SOC] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_OS_IF] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_TARGET_IF] = QDF_TRACE_LEVEL_INFO, + [QDF_MODULE_ID_SCHEDULER] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_MGMT_TXRX] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_SERIALIZATION] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_PMO] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_P2P] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_POLICY_MGR] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_CONFIG] = QDF_TRACE_LEVEL_ERROR, + [QDF_MODULE_ID_REGULATORY] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_SA_API] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_NAN] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_OFFCHAN_TXRX] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_SON] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_SPECTRAL] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_OBJ_MGR] = QDF_TRACE_LEVEL_FATAL, + [QDF_MODULE_ID_NSS] = QDF_TRACE_LEVEL_ERROR, + [QDF_MODULE_ID_ROAM_DEBUG] = QDF_TRACE_LEVEL_ERROR, + [QDF_MODULE_ID_CDP] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_DIRECT_BUF_RX] = QDF_TRACE_LEVEL_ERROR, + [QDF_MODULE_ID_DISA] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_GREEN_AP] = QDF_TRACE_LEVEL_ERROR, + [QDF_MODULE_ID_FTM] = QDF_TRACE_LEVEL_ERROR, + [QDF_MODULE_ID_EXTAP] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_FD] = QDF_TRACE_LEVEL_ERROR, + [QDF_MODULE_ID_OCB] = QDF_TRACE_LEVEL_ERROR, + [QDF_MODULE_ID_IPA] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_ACTION_OUI] = QDF_TRACE_LEVEL_NONE, + [QDF_MODULE_ID_CP_STATS] = QDF_TRACE_LEVEL_ERROR, + [QDF_MODULE_ID_ANY] = QDF_TRACE_LEVEL_NONE, + }; + + for (i = 0; i < MAX_SUPPORTED_CATEGORY; i++) { + cinfo[i].category_verbose_mask = set_cumulative_verbose_mask( + module_trace_default_level[i]); + } +} + +void qdf_shared_print_ctrl_init(void) +{ + int i; + struct category_info cinfo[MAX_SUPPORTED_CATEGORY]; + + set_default_trace_levels(cinfo); + + /* + * User specified across-module single debug level + */ + if ((qdf_dbg_mask > 0) && (qdf_dbg_mask <= QDF_TRACE_LEVEL_MAX)) { + pr_info("User specified module debug level of %d\n", + qdf_dbg_mask); + for (i = 0; i < MAX_SUPPORTED_CATEGORY; i++) { + cinfo[i].category_verbose_mask = + set_cumulative_verbose_mask(qdf_dbg_mask); + } + } else { + pr_info("qdf_dbg_mask value is invalid\n"); + pr_info("Using the default module debug levels instead\n"); + } + + /* + * Module ID-Level specified as array during module load + */ + for (i = 0; i < qdf_dbg_arr_cnt; i++) { + process_qdf_dbg_arr_param(cinfo, i); + } + qdf_pidx = qdf_print_ctrl_register(cinfo, NULL, NULL, + "LOG_SHARED_OBJ"); +} +qdf_export_symbol(qdf_shared_print_ctrl_init); +#endif + +QDF_STATUS qdf_print_set_category_verbose(unsigned int idx, + QDF_MODULE_ID category, + QDF_TRACE_LEVEL verbose, + bool is_set) +{ + /* Check if index passed is valid */ + if (idx < 0 || idx >= MAX_PRINT_CONFIG_SUPPORTED) { + pr_err("%s: Invalid index - %d\n", __func__, idx); + return QDF_STATUS_E_FAILURE; + } + + /* Check if print control object is in use */ + if (!print_ctrl_obj[idx].in_use) { + pr_err("%s: Invalid print control object\n", __func__); + return QDF_STATUS_E_FAILURE; + } + + /* Check if category passed is valid */ + if (category < 0 || category >= MAX_SUPPORTED_CATEGORY) { + pr_err("%s: Invalid category: %d\n", __func__, category); + return QDF_STATUS_E_FAILURE; + } + + /* Check if verbose mask is valid */ + if (verbose < 0 || verbose >= QDF_TRACE_LEVEL_MAX) { + pr_err("%s: Invalid verbose level %d\n", __func__, verbose); + return QDF_STATUS_E_FAILURE; + } + + if (verbose == QDF_TRACE_LEVEL_ALL) { + print_ctrl_obj[idx].cat_info[category].category_verbose_mask = + 0xFFFF; + return QDF_STATUS_SUCCESS; + } + + if (verbose == QDF_TRACE_LEVEL_NONE) { + print_ctrl_obj[idx].cat_info[category].category_verbose_mask = + QDF_TRACE_LEVEL_NONE; + return QDF_STATUS_SUCCESS; + } + + if (!is_set) { + if (print_ctrl_obj[idx].cat_info[category].category_verbose_mask + & QDF_TRACE_LEVEL_TO_MODULE_BITMASK(verbose)) { + print_ctrl_obj[idx].cat_info[category] + .category_verbose_mask &= + ~QDF_TRACE_LEVEL_TO_MODULE_BITMASK(verbose); + } + } else { + print_ctrl_obj[idx].cat_info[category].category_verbose_mask |= + QDF_TRACE_LEVEL_TO_MODULE_BITMASK(verbose); + } + + pr_debug("%s: Print control object %d, Category %d, Verbose level %d\n", + __func__, + idx, + category, + print_ctrl_obj[idx].cat_info[category].category_verbose_mask); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(qdf_print_set_category_verbose); + +bool qdf_print_is_category_enabled(unsigned int idx, QDF_MODULE_ID category) +{ + QDF_TRACE_LEVEL verbose_mask; + + /* Check if index passed is valid */ + if (idx < 0 || idx >= MAX_PRINT_CONFIG_SUPPORTED) { + pr_info("%s: Invalid index - %d\n", __func__, idx); + return false; + } + + /* Check if print control object is in use */ + if (!print_ctrl_obj[idx].in_use) { + pr_info("%s: Invalid print control object\n", __func__); + return false; + } + + /* Check if category passed is valid */ + if (category < 0 || category >= MAX_SUPPORTED_CATEGORY) { + pr_info("%s: Invalid category: %d\n", __func__, category); + return false; + } + + verbose_mask = + print_ctrl_obj[idx].cat_info[category].category_verbose_mask; + + if (verbose_mask == QDF_TRACE_LEVEL_NONE) + return false; + else + return true; +} +qdf_export_symbol(qdf_print_is_category_enabled); + +bool qdf_print_is_verbose_enabled(unsigned int idx, QDF_MODULE_ID category, + QDF_TRACE_LEVEL verbose) +{ + bool verbose_enabled = false; + + /* Check if index passed is valid */ + if (idx < 0 || idx >= MAX_PRINT_CONFIG_SUPPORTED) { + pr_info("%s: Invalid index - %d\n", __func__, idx); + return verbose_enabled; + } + + /* Check if print control object is in use */ + if (!print_ctrl_obj[idx].in_use) { + pr_info("%s: Invalid print control object\n", __func__); + return verbose_enabled; + } + + /* Check if category passed is valid */ + if (category < 0 || category >= MAX_SUPPORTED_CATEGORY) { + pr_info("%s: Invalid category: %d\n", __func__, category); + return verbose_enabled; + } + + if ((verbose == QDF_TRACE_LEVEL_NONE) || + (verbose >= QDF_TRACE_LEVEL_MAX)) { + verbose_enabled = false; + } else if (verbose == QDF_TRACE_LEVEL_ALL) { + if (print_ctrl_obj[idx].cat_info[category] + .category_verbose_mask == 0xFFFF) + verbose_enabled = true; + } else { + verbose_enabled = + (print_ctrl_obj[idx].cat_info[category].category_verbose_mask & + QDF_TRACE_LEVEL_TO_MODULE_BITMASK(verbose)) ? true : false; + } + + return verbose_enabled; +} +qdf_export_symbol(qdf_print_is_verbose_enabled); + +#ifdef DBG_LVL_MAC_FILTERING + +QDF_STATUS qdf_print_set_node_flag(unsigned int idx, uint8_t enable) +{ + /* Check if index passed is valid */ + if (idx < 0 || idx >= MAX_PRINT_CONFIG_SUPPORTED) { + pr_info("%s: Invalid index - %d\n", __func__, idx); + return QDF_STATUS_E_FAILURE; + } + + /* Check if print control object is in use */ + if (!print_ctrl_obj[idx].in_use) { + pr_info("%s: Invalid print control object\n", __func__); + return QDF_STATUS_E_FAILURE; + } + + if (enable > 1) { + pr_info("%s: Incorrect input: Use 1 or 0 to enable or disable\n", + __func__); + return QDF_STATUS_E_FAILURE; + } + + print_ctrl_obj[idx].dbglvlmac_on = enable; + pr_info("%s: DbgLVLmac feature %s\n", + __func__, + ((enable) ? "enabled" : "disabled")); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(qdf_print_set_node_flag); + +bool qdf_print_get_node_flag(unsigned int idx) +{ + bool node_flag = false; + + /* Check if index passed is valid */ + if (idx < 0 || idx >= MAX_PRINT_CONFIG_SUPPORTED) { + pr_info("%s: Invalid index - %d\n", __func__, idx); + return node_flag; + } + + /* Check if print control object is in use */ + if (!print_ctrl_obj[idx].in_use) { + pr_info("%s: Invalid print control object\n", __func__); + return node_flag; + } + + if (print_ctrl_obj[idx].dbglvlmac_on) + node_flag = true; + + return node_flag; +} +qdf_export_symbol(qdf_print_get_node_flag); + +void qdf_print_clean_node_flag(unsigned int idx) +{ + /* Disable dbglvlmac_on during cleanup */ + print_ctrl_obj[idx].dbglvlmac_on = 0; +} + +#else + +void qdf_print_clean_node_flag(unsigned int idx) +{ + /* No operation in case of no support for DBG_LVL_MAC_FILTERING */ + return; +} +#endif + +void QDF_PRINT_INFO(unsigned int idx, QDF_MODULE_ID module, + QDF_TRACE_LEVEL level, + char *str_format, ...) +{ + va_list args; + + /* Generic wrapper API will compile qdf_vprint in order to + * log the message. Once QDF converged debug framework is in + * place, this will be changed to adapt to the framework, compiling + * call to converged tracing API + */ + va_start(args, str_format); + qdf_vprint(str_format, args); + va_end(args); +} +qdf_export_symbol(QDF_PRINT_INFO); + +#ifdef WLAN_LOGGING_SOCK_SVC_ENABLE +void qdf_logging_init(void) +{ + wlan_logging_sock_init_svc(); + nl_srv_init(NULL); +} + +void qdf_logging_exit(void) +{ + nl_srv_exit(); + wlan_logging_sock_deinit_svc(); +} +#else +void qdf_logging_init(void) +{ +} + +void qdf_logging_exit(void) +{ +} +#endif + +#ifdef CONFIG_KALLSYMS +inline int qdf_sprint_symbol(char *buffer, void *addr) +{ + return sprint_symbol(buffer, (unsigned long)addr); +} +#else +int qdf_sprint_symbol(char *buffer, void *addr) +{ + if (!buffer) + return 0; + + buffer[0] = '\0'; + return 1; +} +#endif +qdf_export_symbol(qdf_sprint_symbol); + +void qdf_set_pidx(int pidx) +{ + qdf_pidx = pidx; +} +qdf_export_symbol(qdf_set_pidx); + +int qdf_get_pidx(void) +{ + return qdf_pidx; +} +qdf_export_symbol(qdf_get_pidx); + +#ifdef PANIC_ON_BUG +#ifdef CONFIG_SLUB_DEBUG +void __qdf_bug(void) +{ + BUG(); +} +qdf_export_symbol(__qdf_bug); +#endif /* CONFIG_SLUB_DEBUG */ +#endif /* PANIC_ON_BUG */ + diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/src/qdf_cpuhp.c b/drivers/staging/qca-wifi-host-cmn/qdf/src/qdf_cpuhp.c new file mode 100644 index 0000000000000000000000000000000000000000..0573583f3c9fdcfb1020d616280b99cbc770d93e --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/src/qdf_cpuhp.c @@ -0,0 +1,158 @@ +/* + * Copyright (c) 2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: qdf_cpuhp (CPU hotplug) + * QCA driver framework (QDF) CPU hotplug APIs + */ + +#include "qdf_cpuhp.h" +#include "i_qdf_cpuhp.h" +#include "qdf_list.h" +#include "qdf_lock.h" + +static qdf_mutex_t qdf_cpuhp_lock; +static qdf_list_t qdf_cpuhp_handlers; + +struct qdf_cpuhp_handler { + qdf_list_node_t node; + void *context; + qdf_cpuhp_callback up_callback; + qdf_cpuhp_callback down_callback; +}; + +static void qdf_cpuhp_on_up(uint32_t cpu) +{ + QDF_STATUS status; + qdf_list_node_t *node; + + qdf_mutex_acquire(&qdf_cpuhp_lock); + + status = qdf_list_peek_front(&qdf_cpuhp_handlers, &node); + while (QDF_IS_STATUS_SUCCESS(status)) { + struct qdf_cpuhp_handler *handler = + qdf_container_of(node, struct qdf_cpuhp_handler, node); + if (handler->up_callback) + handler->up_callback(handler->context, cpu); + + status = qdf_list_peek_next(&qdf_cpuhp_handlers, node, &node); + } + + qdf_mutex_release(&qdf_cpuhp_lock); +} + +static void qdf_cpuhp_on_down(uint32_t cpu) +{ + QDF_STATUS status; + qdf_list_node_t *node; + + qdf_mutex_acquire(&qdf_cpuhp_lock); + + status = qdf_list_peek_front(&qdf_cpuhp_handlers, &node); + while (QDF_IS_STATUS_SUCCESS(status)) { + struct qdf_cpuhp_handler *handler = + qdf_container_of(node, struct qdf_cpuhp_handler, node); + if (handler->down_callback) + handler->down_callback(handler->context, cpu); + + status = qdf_list_peek_next(&qdf_cpuhp_handlers, node, &node); + } + + qdf_mutex_release(&qdf_cpuhp_lock); +} + +QDF_STATUS qdf_cpuhp_init(void) +{ + QDF_STATUS status; + + status = qdf_mutex_create(&qdf_cpuhp_lock); + if (QDF_IS_STATUS_ERROR(status)) + return status; + + qdf_list_create(&qdf_cpuhp_handlers, 0); + + __qdf_cpuhp_os_init(qdf_cpuhp_on_up, qdf_cpuhp_on_down); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS qdf_cpuhp_deinit(void) +{ + __qdf_cpuhp_os_deinit(); + qdf_list_destroy(&qdf_cpuhp_handlers); + return qdf_mutex_destroy(&qdf_cpuhp_lock); +} + +QDF_STATUS qdf_cpuhp_register(struct qdf_cpuhp_handler **out_handler, + void *context, + qdf_cpuhp_callback up_callback, + qdf_cpuhp_callback down_callback) +{ + QDF_STATUS status; + struct qdf_cpuhp_handler *handler; + + *out_handler = NULL; + + handler = qdf_mem_malloc(sizeof(*handler)); + if (!handler) + return QDF_STATUS_E_NOMEM; + + handler->context = context; + handler->up_callback = up_callback; + handler->down_callback = down_callback; + + status = qdf_mutex_acquire(&qdf_cpuhp_lock); + if (QDF_IS_STATUS_ERROR(status)) + goto free_handler; + + status = qdf_list_insert_back(&qdf_cpuhp_handlers, &handler->node); + if (QDF_IS_STATUS_ERROR(status)) + goto release_lock; + + /* this can fail, but there isn't a good way to recover... */ + qdf_mutex_release(&qdf_cpuhp_lock); + + *out_handler = handler; + + return QDF_STATUS_SUCCESS; + +release_lock: + qdf_mutex_release(&qdf_cpuhp_lock); + +free_handler: + qdf_mem_free(handler); + + return status; +} + +void qdf_cpuhp_unregister(struct qdf_cpuhp_handler **out_handler) +{ + struct qdf_cpuhp_handler *handler = *out_handler; + + QDF_BUG(handler); + if (!handler) + return; + + qdf_mutex_acquire(&qdf_cpuhp_lock); + qdf_list_remove_node(&qdf_cpuhp_handlers, &handler->node); + qdf_mutex_release(&qdf_cpuhp_lock); + + qdf_mem_free(handler); + *out_handler = NULL; +} + diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/src/qdf_debug_domain.c b/drivers/staging/qca-wifi-host-cmn/qdf/src/qdf_debug_domain.c new file mode 100644 index 0000000000000000000000000000000000000000..0230f985fd5cb77c1278811b160e68d1838cfc54 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/src/qdf_debug_domain.c @@ -0,0 +1,61 @@ +/* + * Copyright (c) 2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: qdf_debug_domain + * QCA driver framework (QDF) debug domain APIs. Debug domains are used to track + * resource allocations across different driver states, particularly for runtime + * leak detection. + */ + +#include "qdf_debug_domain.h" +#include "qdf_trace.h" + +static enum qdf_debug_domain qdf_debug_domain_current = QDF_DEBUG_DOMAIN_INIT; + +enum qdf_debug_domain qdf_debug_domain_get(void) +{ + return qdf_debug_domain_current; +} + +void qdf_debug_domain_set(enum qdf_debug_domain domain) +{ + QDF_BUG(qdf_debug_domain_valid(domain)); + if (!qdf_debug_domain_valid(domain)) + return; + + qdf_debug_domain_current = domain; +} + +const char *qdf_debug_domain_name(enum qdf_debug_domain domain) +{ + switch (domain) { + case QDF_DEBUG_DOMAIN_INIT: + return "Init"; + case QDF_DEBUG_DOMAIN_ACTIVE: + return "Active"; + default: + return "Invalid"; + } +} + +bool qdf_debug_domain_valid(enum qdf_debug_domain domain) +{ + return domain >= QDF_DEBUG_DOMAIN_INIT && + domain < QDF_DEBUG_DOMAIN_COUNT; +} diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/src/qdf_flex_mem.c b/drivers/staging/qca-wifi-host-cmn/qdf/src/qdf_flex_mem.c new file mode 100644 index 0000000000000000000000000000000000000000..db005e4b5efebf9479ba0cb2b59076ee44a57e37 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/src/qdf_flex_mem.c @@ -0,0 +1,197 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "qdf_flex_mem.h" +#include "qdf_list.h" +#include "qdf_lock.h" +#include "qdf_mem.h" +#include "qdf_module.h" +#include "qdf_trace.h" +#include "qdf_util.h" + +static struct qdf_flex_mem_segment * +qdf_flex_mem_seg_alloc(struct qdf_flex_mem_pool *pool) +{ + struct qdf_flex_mem_segment *seg; + size_t total_size = sizeof(struct qdf_flex_mem_segment) + + pool->item_size * QDF_FM_BITMAP_BITS; + + seg = qdf_mem_malloc(total_size); + if (!seg) + return NULL; + + seg->dynamic = true; + seg->bytes = (uint8_t *)(seg + 1); + seg->used_bitmap = 0; + qdf_list_insert_back(&pool->seg_list, &seg->node); + + return seg; +} + +void qdf_flex_mem_init(struct qdf_flex_mem_pool *pool) +{ + int i; + + qdf_spinlock_create(&pool->lock); + + for (i = 0; i < pool->reduction_limit; i++) + qdf_flex_mem_seg_alloc(pool); +} +qdf_export_symbol(qdf_flex_mem_init); + +void qdf_flex_mem_deinit(struct qdf_flex_mem_pool *pool) +{ + qdf_flex_mem_release(pool); + QDF_BUG(!qdf_list_size(&pool->seg_list)); + + qdf_spinlock_destroy(&pool->lock); +} +qdf_export_symbol(qdf_flex_mem_deinit); + +static void *__qdf_flex_mem_alloc(struct qdf_flex_mem_pool *pool) +{ + struct qdf_flex_mem_segment *seg; + + qdf_list_for_each(&pool->seg_list, seg, node) { + int index; + void *ptr; + + index = qdf_ffz(seg->used_bitmap); + if (index < 0) + continue; + + QDF_BUG(index < QDF_FM_BITMAP_BITS); + + seg->used_bitmap ^= (QDF_FM_BITMAP)1 << index; + ptr = &seg->bytes[index * pool->item_size]; + qdf_mem_zero(ptr, pool->item_size); + + return ptr; + } + + seg = qdf_flex_mem_seg_alloc(pool); + if (!seg) + return NULL; + + seg->used_bitmap = 1; + + return seg->bytes; +} + +void *qdf_flex_mem_alloc(struct qdf_flex_mem_pool *pool) +{ + void *ptr; + + QDF_BUG(pool); + if (!pool) + return NULL; + + qdf_spin_lock_bh(&pool->lock); + ptr = __qdf_flex_mem_alloc(pool); + qdf_spin_unlock_bh(&pool->lock); + + return ptr; +} +qdf_export_symbol(qdf_flex_mem_alloc); + +static void qdf_flex_mem_seg_free(struct qdf_flex_mem_pool *pool, + struct qdf_flex_mem_segment *seg) +{ + if (!seg->dynamic) + return; + + if (qdf_list_size(&pool->seg_list) <= pool->reduction_limit) + return; + + + qdf_list_remove_node(&pool->seg_list, &seg->node); + qdf_mem_free(seg); +} + +static void __qdf_flex_mem_free(struct qdf_flex_mem_pool *pool, void *ptr) +{ + struct qdf_flex_mem_segment *seg; + void *low_addr; + void *high_addr; + unsigned long index; + + qdf_list_for_each(&pool->seg_list, seg, node) { + low_addr = seg->bytes; + high_addr = low_addr + pool->item_size * QDF_FM_BITMAP_BITS; + + if (ptr < low_addr || ptr > high_addr) + continue; + + index = (ptr - low_addr) / pool->item_size; + QDF_BUG(index < QDF_FM_BITMAP_BITS); + + seg->used_bitmap ^= (QDF_FM_BITMAP)1 << index; + if (!seg->used_bitmap) + qdf_flex_mem_seg_free(pool, seg); + + return; + } + + QDF_DEBUG_PANIC("Failed to find pointer in segment pool"); +} + +void qdf_flex_mem_free(struct qdf_flex_mem_pool *pool, void *ptr) +{ + QDF_BUG(pool); + if (!pool) + return; + + QDF_BUG(ptr); + if (!ptr) + return; + + qdf_spin_lock_bh(&pool->lock); + __qdf_flex_mem_free(pool, ptr); + qdf_spin_unlock_bh(&pool->lock); +} +qdf_export_symbol(qdf_flex_mem_free); + +static void __qdf_flex_mem_release(struct qdf_flex_mem_pool *pool) +{ + struct qdf_flex_mem_segment *seg; + struct qdf_flex_mem_segment *next; + + qdf_list_for_each_del(&pool->seg_list, seg, next, node) { + if (!seg->dynamic) + continue; + + if (seg->used_bitmap != 0) + continue; + + qdf_list_remove_node(&pool->seg_list, &seg->node); + qdf_mem_free(seg); + } +} + +void qdf_flex_mem_release(struct qdf_flex_mem_pool *pool) +{ + QDF_BUG(pool); + if (!pool) + return; + + qdf_spin_lock_bh(&pool->lock); + __qdf_flex_mem_release(pool); + qdf_spin_unlock_bh(&pool->lock); +} +qdf_export_symbol(qdf_flex_mem_release); + diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/src/qdf_parse.c b/drivers/staging/qca-wifi-host-cmn/qdf/src/qdf_parse.c new file mode 100644 index 0000000000000000000000000000000000000000..577f8eedaea575d4fe17744554e201fe7359ef07 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/src/qdf_parse.c @@ -0,0 +1,130 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "qdf_file.h" +#include "qdf_module.h" +#include "qdf_parse.h" +#include "qdf_status.h" +#include "qdf_str.h" +#include "qdf_trace.h" +#include "qdf_types.h" + +QDF_STATUS qdf_ini_parse(const char *ini_path, void *context, + qdf_ini_item_cb item_cb, qdf_ini_section_cb section_cb) +{ + QDF_STATUS status; + char *fbuf; + char *cursor; + + status = qdf_file_read(ini_path, &fbuf); + if (QDF_IS_STATUS_ERROR(status)) { + qdf_err("Failed to read *.ini file @ %s", ini_path); + return status; + } + + /* foreach line */ + cursor = fbuf; + while (*cursor != '\0') { + char *key = cursor; + char *value = NULL; + bool comment = false; + bool eol = false; + + /* + * Look for the end of the line, while noting any + * value ('=') or comment ('#') indicators + */ + while (!eol) { + switch (*cursor) { + case '\r': + case '\n': + *cursor = '\0'; + cursor++; + /* fall through */ + case '\0': + eol = true; + break; + + case '=': + /* + * The first '=' is the value indicator. + * Subsequent '=' are valid value characters. + */ + if (!value && !comment) { + value = cursor + 1; + *cursor = '\0'; + } + + cursor++; + break; + + case '#': + /* + * We don't process comments, so we can null- + * terminate unconditionally here (unlike '='). + */ + comment = true; + *cursor = '\0'; + /* fall through */ + default: + cursor++; + break; + } + } + + key = qdf_str_trim(key); + + /* + * Ignoring comments, a valid ini line contains one of: + * 1) some 'key=value' config item + * 2) section header + * 3) a line containing whitespace + */ + if (value) { + status = item_cb(context, key, value); + if (QDF_IS_STATUS_ERROR(status)) + goto free_fbuf; + } else if (key[0] == '[') { + qdf_size_t len = qdf_str_len(key); + + if (key[len - 1] != ']') { + qdf_err("Invalid *.ini syntax '%s'", key); + } else { + key[len - 1] = '\0'; + status = section_cb(context, key + 1); + if (QDF_IS_STATUS_ERROR(status)) + goto free_fbuf; + } + } else if (key[0] != '\0') { + qdf_err("Invalid *.ini syntax '%s'", key); + } + + /* skip remaining EoL characters */ + while (*cursor == '\n' || *cursor == '\r') + cursor++; + } + + status = QDF_STATUS_SUCCESS; + +free_fbuf: + qdf_file_buf_free(fbuf); + + return status; +} +qdf_export_symbol(qdf_ini_parse); + diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/src/qdf_platform.c b/drivers/staging/qca-wifi-host-cmn/qdf/src/qdf_platform.c new file mode 100644 index 0000000000000000000000000000000000000000..3347b71ccf2453e6164f195dd8d3824cc58307fb --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/src/qdf_platform.c @@ -0,0 +1,124 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "qdf_module.h" +#include "qdf_trace.h" +#include "qdf_platform.h" + +/** + * The following callbacks should be defined static to make sure they are + * initialized to NULL + */ +static qdf_self_recovery_callback self_recovery_cb; +static qdf_ssr_callback ssr_protect_cb; +static qdf_ssr_callback ssr_unprotect_cb; +static qdf_is_module_state_transitioning_cb module_state_transitioning_cb; +static qdf_is_fw_down_callback is_fw_down_cb; +static qdf_is_recovering_callback is_recovering_cb; + +void qdf_register_fw_down_callback(qdf_is_fw_down_callback is_fw_down) +{ + is_fw_down_cb = is_fw_down; +} + +qdf_export_symbol(qdf_register_fw_down_callback); + +bool qdf_is_fw_down(void) +{ + if (!is_fw_down_cb) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "fw down callback is not registered"); + return false; + } + + return is_fw_down_cb(); +} + +qdf_export_symbol(qdf_is_fw_down); + +void qdf_register_self_recovery_callback(qdf_self_recovery_callback callback) +{ + self_recovery_cb = callback; +} + +qdf_export_symbol(qdf_register_self_recovery_callback); + +void __qdf_trigger_self_recovery(const char *func, const uint32_t line) +{ + if (self_recovery_cb) + self_recovery_cb(QDF_REASON_UNSPECIFIED, func, line); +} + +qdf_export_symbol(__qdf_trigger_self_recovery); + +void qdf_register_ssr_protect_callbacks(qdf_ssr_callback protect, + qdf_ssr_callback unprotect) +{ + ssr_protect_cb = protect; + ssr_unprotect_cb = unprotect; +} + +qdf_export_symbol(qdf_register_ssr_protect_callbacks); + +void qdf_ssr_protect(const char *caller) +{ + if (ssr_protect_cb) + ssr_protect_cb(caller); +} + +qdf_export_symbol(qdf_ssr_protect); + +void qdf_ssr_unprotect(const char *caller) +{ + if (ssr_unprotect_cb) + ssr_unprotect_cb(caller); +} + +qdf_export_symbol(qdf_ssr_unprotect); + +void qdf_register_module_state_query_callback( + qdf_is_module_state_transitioning_cb query) +{ + module_state_transitioning_cb = query; +} + +qdf_export_symbol(qdf_register_module_state_query_callback); + +bool qdf_is_module_state_transitioning(void) +{ + if (module_state_transitioning_cb) + return module_state_transitioning_cb(); + return false; +} + +qdf_export_symbol(qdf_is_module_state_transitioning); + +void qdf_register_recovering_state_query_callback( + qdf_is_recovering_callback is_recovering) +{ + is_recovering_cb = is_recovering; +} + +bool qdf_is_recovering(void) +{ + if (is_recovering_cb) + return is_recovering_cb(); + return false; +} + +qdf_export_symbol(qdf_is_recovering); diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/src/qdf_str.c b/drivers/staging/qca-wifi-host-cmn/qdf/src/qdf_str.c new file mode 100644 index 0000000000000000000000000000000000000000..f610397d6babca0a32cbf6deb8d3a71e038b6292 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/src/qdf_str.c @@ -0,0 +1,58 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "qdf_mem.h" +#include "qdf_module.h" +#include "qdf_str.h" +#include "qdf_trace.h" + +QDF_STATUS qdf_str_dup(char **dest, const char *src) +{ + qdf_size_t size; + char *dup; + + *dest = NULL; + + QDF_BUG(src); + if (!src) + return QDF_STATUS_E_INVAL; + + /* size = length + null-terminator */ + size = qdf_str_len(src) + 1; + dup = qdf_mem_malloc(size); + if (!dup) + return QDF_STATUS_E_NOMEM; + + qdf_mem_copy(dup, src, size); + *dest = dup; + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(qdf_str_dup); + +void qdf_str_right_trim(char *str) +{ + char *end = str + qdf_str_len(str) - 1; + + while (end >= str && qdf_is_space(*end)) + end--; + + end[1] = '\0'; +} +qdf_export_symbol(qdf_str_right_trim); + diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/src/qdf_types.c b/drivers/staging/qca-wifi-host-cmn/qdf/src/qdf_types.c new file mode 100644 index 0000000000000000000000000000000000000000..5886aa5082d9f772d00a171f828bc4acc213a6bf --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/qdf/src/qdf_types.c @@ -0,0 +1,526 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "qdf_mem.h" +#include "qdf_module.h" +#include "qdf_status.h" +#include "qdf_str.h" +#include "qdf_trace.h" +#include "qdf_types.h" + +static QDF_STATUS qdf_consume_char(const char **str, char c) +{ + if ((*str)[0] != c) + return QDF_STATUS_E_FAILURE; + + (*str)++; + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS qdf_consume_dec(const char **str, uint8_t *out_digit) +{ + uint8_t c = (*str)[0]; + + if (c >= '0' && c <= '9') + *out_digit = c - '0'; + else + return QDF_STATUS_E_FAILURE; + + (*str)++; + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS qdf_consume_hex(const char **str, uint8_t *out_nibble) +{ + uint8_t c = (*str)[0]; + + if (c >= '0' && c <= '9') + *out_nibble = c - '0'; + else if (c >= 'a' && c <= 'f') + *out_nibble = c - 'a' + 10; + else if (c >= 'A' && c <= 'F') + *out_nibble = c - 'A' + 10; + else + return QDF_STATUS_E_FAILURE; + + (*str)++; + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS qdf_consume_octet_dec(const char **str, uint8_t *out_octet) +{ + uint8_t len = 0; + uint16_t octet = 0; + int i; + + /* consume up to 3 decimal digits */ + for (i = 0; i < 3; i++) { + uint8_t digit; + + if (QDF_IS_STATUS_ERROR(qdf_consume_dec(str, &digit))) + break; + + len++; + octet = octet * 10 + digit; + } + + /* require at least 1 digit */ + if (!len) + return QDF_STATUS_E_FAILURE; + + if (octet > 255) { + (*str) -= len; + return QDF_STATUS_E_FAILURE; + } + + *out_octet = octet; + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS qdf_consume_hex_pair(const char **str, uint8_t *out_byte) +{ + QDF_STATUS status; + uint8_t hi, low; + + status = qdf_consume_hex(str, &hi); + if (QDF_IS_STATUS_ERROR(status)) + return status; + + status = qdf_consume_hex(str, &low); + if (QDF_IS_STATUS_ERROR(status)) { + (*str)--; + return status; + } + + *out_byte = hi << 4 | low; + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS qdf_consume_hextet(const char **str, uint16_t *out_hextet) +{ + uint8_t len = 0; + uint16_t hextet = 0; + int i; + + /* consume up to 4 hex digits */ + for (i = 0; i < 4; i++) { + uint8_t digit; + + if (QDF_IS_STATUS_ERROR(qdf_consume_hex(str, &digit))) + break; + + len++; + hextet = (hextet << 4) + digit; + } + + /* require at least 1 digit */ + if (!len) + return QDF_STATUS_E_FAILURE; + + /* no need to check for overflow */ + + *out_hextet = hextet; + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS qdf_consume_radix(const char **str, uint8_t *out_radix) +{ + if ((*str)[0] == '0') { + switch ((*str)[1]) { + case 'b': + *out_radix = 2; + *str += 2; + break; + case 'o': + *out_radix = 8; + *str += 2; + break; + case 'x': + *out_radix = 16; + *str += 2; + break; + default: + *out_radix = 10; + break; + } + + return QDF_STATUS_SUCCESS; + } + + if (*str[0] >= '0' && *str[0] <= '9') { + *out_radix = 10; + return QDF_STATUS_SUCCESS; + } + + return QDF_STATUS_E_FAILURE; +} + +static QDF_STATUS +qdf_int_parse(const char *int_str, uint64_t *out_int, bool *out_negate) +{ + QDF_STATUS status; + bool negate = false; + uint8_t radix; + uint8_t digit; + uint64_t value = 0; + uint64_t next_value; + + QDF_BUG(int_str); + if (!int_str) + return QDF_STATUS_E_INVAL; + + QDF_BUG(out_int); + if (!out_int) + return QDF_STATUS_E_INVAL; + + int_str = qdf_str_left_trim(int_str); + + status = qdf_consume_char(&int_str, '-'); + if (QDF_IS_STATUS_SUCCESS(status)) + negate = true; + else + qdf_consume_char(&int_str, '+'); + + status = qdf_consume_radix(&int_str, &radix); + if (QDF_IS_STATUS_ERROR(status)) + return status; + + while (QDF_IS_STATUS_SUCCESS(qdf_consume_hex(&int_str, &digit))) { + if (digit >= radix) + return QDF_STATUS_E_FAILURE; + + next_value = value * radix + digit; + if (next_value < value) + return QDF_STATUS_E_RANGE; + + value = next_value; + } + + int_str = qdf_str_left_trim(int_str); + if (int_str[0] != '\0') + return QDF_STATUS_E_FAILURE; + + *out_negate = negate; + *out_int = value; + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS qdf_int32_parse(const char *int_str, int32_t *out_int) +{ + QDF_STATUS status; + int64_t value; + + status = qdf_int64_parse(int_str, &value); + if (QDF_IS_STATUS_ERROR(status)) + return status; + + if ((int32_t)value != value) + return QDF_STATUS_E_RANGE; + + *out_int = value; + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(qdf_int32_parse); + +QDF_STATUS qdf_uint32_parse(const char *int_str, uint32_t *out_int) +{ + QDF_STATUS status; + uint64_t value; + + status = qdf_uint64_parse(int_str, &value); + if (QDF_IS_STATUS_ERROR(status)) + return status; + + if ((uint32_t)value != value) + return QDF_STATUS_E_RANGE; + + *out_int = value; + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(qdf_uint32_parse); + +QDF_STATUS qdf_int64_parse(const char *int_str, int64_t *out_int) +{ + QDF_STATUS status; + bool negate; + uint64_t value; + int64_t signed_value; + + status = qdf_int_parse(int_str, &value, &negate); + if (QDF_IS_STATUS_ERROR(status)) + return status; + + if (negate) { + signed_value = -value; + if (signed_value > 0) + return QDF_STATUS_E_RANGE; + } else { + signed_value = value; + if (signed_value < 0) + return QDF_STATUS_E_RANGE; + } + + *out_int = signed_value; + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(qdf_int64_parse); + +QDF_STATUS qdf_uint64_parse(const char *int_str, uint64_t *out_int) +{ + QDF_STATUS status; + bool negate; + uint64_t value; + + status = qdf_int_parse(int_str, &value, &negate); + if (QDF_IS_STATUS_ERROR(status)) + return status; + + if (negate) + return QDF_STATUS_E_RANGE; + + *out_int = value; + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(qdf_uint64_parse); + +QDF_STATUS qdf_bool_parse(const char *bool_str, bool *out_bool) +{ + bool value; + + QDF_BUG(bool_str); + if (!bool_str) + return QDF_STATUS_E_INVAL; + + QDF_BUG(out_bool); + if (!out_bool) + return QDF_STATUS_E_INVAL; + + bool_str = qdf_str_left_trim(bool_str); + + switch (bool_str[0]) { + case '1': + case 'y': + case 'Y': + value = true; + break; + case '0': + case 'n': + case 'N': + value = false; + break; + default: + return QDF_STATUS_E_FAILURE; + } + + bool_str++; + bool_str = qdf_str_left_trim(bool_str); + if (bool_str[0] != '\0') + return QDF_STATUS_E_FAILURE; + + *out_bool = value; + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(qdf_bool_parse); + +QDF_STATUS qdf_mac_parse(const char *mac_str, struct qdf_mac_addr *out_addr) +{ + QDF_STATUS status; + struct qdf_mac_addr addr; + bool colons; + int i; + + QDF_BUG(mac_str); + if (!mac_str) + return QDF_STATUS_E_INVAL; + + QDF_BUG(out_addr); + if (!out_addr) + return QDF_STATUS_E_INVAL; + + mac_str = qdf_str_left_trim(mac_str); + + /* parse leading hex pair */ + status = qdf_consume_hex_pair(&mac_str, &addr.bytes[0]); + if (QDF_IS_STATUS_ERROR(status)) + return status; + + /* dynamically detect colons */ + colons = mac_str[0] == ':'; + + for (i = 1; i < QDF_MAC_ADDR_SIZE; i++) { + /* ensure colon separator if previously detected */ + if (colons) { + status = qdf_consume_char(&mac_str, ':'); + if (QDF_IS_STATUS_ERROR(status)) + return status; + } + + /* parse next hex pair */ + status = qdf_consume_hex_pair(&mac_str, &addr.bytes[i]); + if (QDF_IS_STATUS_ERROR(status)) + return status; + } + + mac_str = qdf_str_left_trim(mac_str); + if (mac_str[0] != '\0') + return QDF_STATUS_E_FAILURE; + + *out_addr = addr; + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(qdf_mac_parse); + +QDF_STATUS qdf_ipv4_parse(const char *ipv4_str, struct qdf_ipv4_addr *out_addr) +{ + QDF_STATUS status; + struct qdf_ipv4_addr addr; + int i; + + QDF_BUG(ipv4_str); + if (!ipv4_str) + return QDF_STATUS_E_INVAL; + + QDF_BUG(out_addr); + if (!out_addr) + return QDF_STATUS_E_INVAL; + + ipv4_str = qdf_str_left_trim(ipv4_str); + + /* parse leading octet */ + status = qdf_consume_octet_dec(&ipv4_str, &addr.bytes[0]); + if (QDF_IS_STATUS_ERROR(status)) + return status; + + for (i = 1; i < QDF_IPV4_ADDR_SIZE; i++) { + /* ensure dot separator */ + status = qdf_consume_char(&ipv4_str, '.'); + if (QDF_IS_STATUS_ERROR(status)) + return status; + + /* parse next octet */ + status = qdf_consume_octet_dec(&ipv4_str, &addr.bytes[i]); + if (QDF_IS_STATUS_ERROR(status)) + return status; + } + + ipv4_str = qdf_str_left_trim(ipv4_str); + if (ipv4_str[0] != '\0') + return QDF_STATUS_E_FAILURE; + + *out_addr = addr; + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(qdf_ipv4_parse); + +QDF_STATUS qdf_ipv6_parse(const char *ipv6_str, struct qdf_ipv6_addr *out_addr) +{ + QDF_STATUS status; + struct qdf_ipv6_addr addr; + int8_t zero_comp = -1; + uint8_t hextets_found = 0; + + QDF_BUG(ipv6_str); + if (!ipv6_str) + return QDF_STATUS_E_INVAL; + + QDF_BUG(out_addr); + if (!out_addr) + return QDF_STATUS_E_INVAL; + + ipv6_str = qdf_str_left_trim(ipv6_str); + + /* check for leading zero-compression ("::") */ + status = qdf_consume_char(&ipv6_str, ':'); + if (QDF_IS_STATUS_SUCCESS(status)) { + status = qdf_consume_char(&ipv6_str, ':'); + if (QDF_IS_STATUS_SUCCESS(status)) + zero_comp = 0; + else + return QDF_STATUS_E_FAILURE; + } + + while (hextets_found < QDF_IPV6_ADDR_HEXTET_COUNT) { + uint16_t hextet; + + /* parse hextet */ + status = qdf_consume_hextet(&ipv6_str, &hextet); + if (QDF_IS_STATUS_ERROR(status)) { + /* we must end with hextet or zero compression */ + if (hextets_found != zero_comp) + return QDF_STATUS_E_FAILURE; + + break; + } + + addr.bytes[hextets_found * 2] = hextet >> 8; + addr.bytes[hextets_found * 2 + 1] = hextet; + hextets_found++; + + /* parse ':' char */ + status = qdf_consume_char(&ipv6_str, ':'); + if (QDF_IS_STATUS_ERROR(status)) + break; + + /* check for zero compression ("::") */ + status = qdf_consume_char(&ipv6_str, ':'); + if (QDF_IS_STATUS_SUCCESS(status)) { + /* only one zero compression is allowed */ + if (zero_comp >= 0) + return QDF_STATUS_E_FAILURE; + + zero_comp = hextets_found; + } + } + + /* we must have max hextets or a zero compression */ + if (hextets_found < QDF_IPV6_ADDR_HEXTET_COUNT && zero_comp == -1) + return QDF_STATUS_E_FAILURE; + + ipv6_str = qdf_str_left_trim(ipv6_str); + if (ipv6_str[0] != '\0') + return QDF_STATUS_E_FAILURE; + + /* shift lower hextets if zero compressed */ + if (zero_comp >= 0) { + uint8_t shift = QDF_IPV6_ADDR_HEXTET_COUNT - hextets_found; + void *to = &addr.bytes[(zero_comp + shift) * 2]; + void *from = &addr.bytes[zero_comp * 2]; + + qdf_mem_move(to, from, (hextets_found - zero_comp) * 2); + qdf_mem_set(from, shift * 2, 0); + } + + *out_addr = addr; + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(qdf_ipv6_parse); diff --git a/drivers/staging/qca-wifi-host-cmn/scheduler/inc/scheduler_api.h b/drivers/staging/qca-wifi-host-cmn/scheduler/inc/scheduler_api.h new file mode 100644 index 0000000000000000000000000000000000000000..eaea207b477e1a5d56db19c6fa8cc8443aae4c3e --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/scheduler/inc/scheduler_api.h @@ -0,0 +1,331 @@ +/* + * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#if !defined(__SCHEDULER_API_H) +#define __SCHEDULER_API_H + +#include +#include +#include +#include +#include + +/* Controller thread various event masks + * MC_POST_EVENT_MASK: wake up thread after posting message + * MC_SUSPEND_EVENT_MASK: signal thread to suspend during kernel pm suspend + * MC_SHUTDOWN_EVENT_MASK: signal thread to shutdown and exit during unload + */ +#define MC_POST_EVENT_MASK 0x001 +#define MC_SUSPEND_EVENT_MASK 0x002 +#define MC_SHUTDOWN_EVENT_MASK 0x010 + +/* + * Cookie for timer messages. Note that anyone posting a timer message + * has to write the COOKIE in the reserved field of the message. The + * timer queue handler relies on this COOKIE + */ +#define SYS_MSG_COOKIE 0xFACE + +#define scheduler_get_src_id(qid) (((qid) >> 20) & 0x3FF) +#define scheduler_get_dest_id(qid) (((qid) >> 10) & 0x3FF) +#define scheduler_get_que_id(qid) ((qid) & 0x3FF) +#define scheduler_get_qid(src, dest, que_id) ((que_id) | ((dest) << 10) |\ + ((src) << 20)) + +typedef enum { + SYS_MSG_ID_MC_TIMER, + SYS_MSG_ID_FTM_RSP, + SYS_MSG_ID_QVIT, + SYS_MSG_ID_DATA_STALL_MSG, + SYS_MSG_ID_UMAC_STOP, +} SYS_MSG_ID; + +/** + * struct scheduler_msg: scheduler message structure + * @type: message type + * @reserved: reserved field + * @bodyval: message body val + * @bodyptr: message body pointer based on the type either a bodyptr pointer + * into memory or bodyval as a 32 bit data is used. bodyptr is always a + * freeable pointer, one should always make sure that bodyptr is always + * freeable. + * Messages should use either bodyptr or bodyval; not both !!! + * @callback: callback to be called by scheduler thread once message is posted + * and scheduler thread has started processing the message. + * @flush_callback: flush callback which will be invoked during driver unload + * such that component can release the ref count of common global objects + * like PSOC, PDEV, VDEV and PEER. A component needs to populate flush + * callback in message body pointer for those messages which have taken ref + * count for above mentioned common objects. + * @node: list node for queue membership + * @queue_id: Id of the queue the message was added to + * @queue_depth: depth of the queue when the message was queued + * @queued_at_us: timestamp when the message was queued in microseconds + */ +struct scheduler_msg { + uint16_t type; + uint16_t reserved; + uint32_t bodyval; + void *bodyptr; + void *callback; + void *flush_callback; + qdf_list_node_t node; +#ifdef WLAN_SCHED_HISTORY_SIZE + QDF_MODULE_ID queue_id; + uint32_t queue_depth; + uint64_t queued_at_us; +#endif /* WLAN_SCHED_HISTORY_SIZE */ +}; + +typedef QDF_STATUS (*scheduler_msg_process_fn_t) (struct scheduler_msg *msg); +typedef void (*hdd_suspend_callback)(void); + +/** + * scheduler_init() - initialize control path scheduler + * + * This API initializes control path scheduler. + * + * Return: QDF status + */ +QDF_STATUS scheduler_init(void); + +/** + * scheduler_deinit() - de-initialize control path scheduler + * + * This API de-initializes control path scheduler. + * + * Return: QDF status + */ +QDF_STATUS scheduler_deinit(void); + +/** + * scheduler_enable() - start the scheduler module + * + * Ready the scheduler module to service requests, and start the scheduler's + * message processing thread. Must only be called after scheduler_init(). + * + * Return: QDF_STATUS + */ +QDF_STATUS scheduler_enable(void); + +/** + * scheduler_disable() - stop the scheduler module + * + * Stop the scheduler module from servicing requests, and terminate the + * scheduler's message processing thread. Must be called before + * scheduler_deinit(). + * + * Return: QDF_STATUS + */ +QDF_STATUS scheduler_disable(void); + +/** + * scheduler_register_module() - register input module/queue id + * @qid: queue id to get registered + * @callback: queue message to be called when a message is posted + * + * Return: QDF status + */ +QDF_STATUS scheduler_register_module(QDF_MODULE_ID qid, + scheduler_msg_process_fn_t callback); + +/** + * scheduler_deregister_module() - deregister input module/queue id + * @qid: queue id to get deregistered + * + * Return: QDF status + */ +QDF_STATUS scheduler_deregister_module(QDF_MODULE_ID qid); + +/** + * scheduler_post_msg_by_priority() - post messages by priority + * @qid: queue id to which the message has to be posted. + * @msg: message pointer + * @is_high_priority: set to true for high priority message else false + * + * Return: QDF status + */ +QDF_STATUS scheduler_post_msg_by_priority(uint32_t qid, + struct scheduler_msg *msg, + bool is_high_priority); + +/** + * scheduler_post_msg() - post normal messages(no priority) + * @qid: queue id to which the message has to be posted. + * @msg: message pointer + * + * Return: QDF status + */ +static inline QDF_STATUS scheduler_post_msg(uint32_t qid, + struct scheduler_msg *msg) +{ + return scheduler_post_msg_by_priority(qid, msg, false); +} + +/** + * scheduler_post_message() - post normal messages(no priority) + * @src_id: Source module of the message + * @dest_id: Destination module of the message + * @que_id: Queue to which the message has to posted. + * @msg: message pointer + * + * This function will mask the src_id, and destination id to qid of + * scheduler_post_msg + * Return: QDF status + */ +static inline QDF_STATUS scheduler_post_message(QDF_MODULE_ID src_id, + QDF_MODULE_ID dest_id, + QDF_MODULE_ID que_id, + struct scheduler_msg *msg) +{ + return scheduler_post_msg(scheduler_get_qid(src_id, dest_id, que_id), + msg); +} + +/** + * scheduler_resume() - resume scheduler thread + * + * Complete scheduler thread resume wait event such that scheduler + * thread can wake up and process message queues + * + * Return: none + */ +void scheduler_resume(void); + +/** + * scheduler_register_hdd_suspend_callback() - suspend callback to hdd + * @callback: hdd callback to be called when controllred thread is suspended + * + * Return: none + */ +void scheduler_register_hdd_suspend_callback(hdd_suspend_callback callback); + +/** + * scheduler_wake_up_controller_thread() - wake up controller thread + * + * Wake up controller thread to process a critical message. + * + * Return: none + */ +void scheduler_wake_up_controller_thread(void); + +/** + * scheduler_set_event_mask() - set given event mask + * @event_mask: event mask to set + * + * Set given event mask such that controller scheduler thread can do + * specified work after wake up. + * + * Return: none + */ +void scheduler_set_event_mask(uint32_t event_mask); + +/** + * scheduler_clear_event_mask() - clear given event mask + * @event_mask: event mask to set + * + * Return: none + */ +void scheduler_clear_event_mask(uint32_t event_mask); + +/** + * scheduler_target_if_mq_handler() - top level message queue handler for + * target_if message queue + * @msg: pointer to actual message being handled + * + * Return: none + */ +QDF_STATUS scheduler_target_if_mq_handler(struct scheduler_msg *msg); + +/** + * scheduler_os_if_mq_handler() - top level message queue handler for + * os_if message queue + * @msg: pointer to actual message being handled + * + * Return: none + */ +QDF_STATUS scheduler_os_if_mq_handler(struct scheduler_msg *msg); + +/** + * scheduler_timer_q_mq_handler() - top level message queue handler for + * timer queue + * @msg: pointer to actual message being handled + * + * Return: none + */ +QDF_STATUS scheduler_timer_q_mq_handler(struct scheduler_msg *msg); + +/** + * scheduler_scan_mq_handler() - top level message queue handler for + * scan queue + * @msg: pointer to actual message being handled + * + * Return: QDF status + */ +QDF_STATUS scheduler_scan_mq_handler(struct scheduler_msg *msg); + +/** + * scheduler_register_wma_legacy_handler() - register legacy wma handler + * @callback: legacy wma handler to be called for WMA messages + * + * Return: QDF status + */ +QDF_STATUS scheduler_register_wma_legacy_handler(scheduler_msg_process_fn_t + callback); + +/** + * scheduler_register_sys_legacy_handler() - register legacy sys handler + * @callback: legacy sys handler to be called for sys messages + * + * Return: QDF status + */ +QDF_STATUS scheduler_register_sys_legacy_handler(scheduler_msg_process_fn_t + callback); +/** + * scheduler_deregister_sys_legacy_handler() - deregister legacy sys handler + * + * Return: QDF status + */ +QDF_STATUS scheduler_deregister_sys_legacy_handler(void); + +/** + * scheduler_deregister_wma_legacy_handler() - deregister legacy wma handler + * + * Return: QDF status + */ +QDF_STATUS scheduler_deregister_wma_legacy_handler(void); + +/** + * scheduler_mc_timer_callback() - timer callback, gets called at time out + * @timer: holds the mc timer object. + * + * Return: None + */ +void scheduler_mc_timer_callback(qdf_mc_timer_t *timer); + +/** + * scheduler_get_queue_size() - Get the current size of the scheduler queue + * @qid: Queue ID for which the size is requested + * @size: Pointer to size where the size would be returned to the caller + * + * This API finds the size of the scheduler queue for the given Queue ID + * + * Return: QDF Status + */ +QDF_STATUS scheduler_get_queue_size(QDF_MODULE_ID qid, uint32_t *size); +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/scheduler/inc/scheduler_core.h b/drivers/staging/qca-wifi-host-cmn/scheduler/inc/scheduler_core.h new file mode 100644 index 0000000000000000000000000000000000000000..cc273003c41c58c7c765d9c846b5b3c43e5a3489 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/scheduler/inc/scheduler_core.h @@ -0,0 +1,232 @@ +/* + * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#if !defined(__SCHEDULER_CORE_H) +#define __SCHEDULER_CORE_H + +#include +#include +#include +#include + +#ifdef CONFIG_MCL +#define SCHEDULER_CORE_MAX_MESSAGES 1000 +#else +#define SCHEDULER_CORE_MAX_MESSAGES 2000 +#define WLAN_SCHED_REDUCTION_LIMIT 32 +#endif +#define SCHEDULER_NUMBER_OF_MSG_QUEUE 6 +#define SCHEDULER_WRAPPER_MAX_FAIL_COUNT (SCHEDULER_CORE_MAX_MESSAGES * 3) +#define SCHEDULER_WATCHDOG_TIMEOUT (10 * 1000) /* 10s */ + +#define __sched_log(level, format, args...) \ + QDF_TRACE(QDF_MODULE_ID_SCHEDULER, level, FL(format), ## args) + +#define sched_fatal(format, args...) \ + __sched_log(QDF_TRACE_LEVEL_FATAL, format, ## args) +#define sched_err(format, args...) \ + __sched_log(QDF_TRACE_LEVEL_ERROR, format, ## args) +#define sched_warn(format, args...) \ + __sched_log(QDF_TRACE_LEVEL_WARN, format, ## args) +#define sched_info(format, args...) \ + __sched_log(QDF_TRACE_LEVEL_INFO, format, ## args) +#define sched_debug(format, args...) \ + __sched_log(QDF_TRACE_LEVEL_DEBUG, format, ## args) + +#define sched_enter() sched_debug("Enter") +#define sched_exit() sched_debug("Exit") + +/** + * struct scheduler_mq_type - scheduler message queue + * @mq_lock: message queue lock + * @mq_list: message queue list + * @qid: queue id + */ +struct scheduler_mq_type { + qdf_spinlock_t mq_lock; + qdf_list_t mq_list; + QDF_MODULE_ID qid; +}; + +/** + * struct scheduler_mq_ctx - scheduler message queue context + * @sch_msg_q: scheduler message queue + * @scheduler_msg_qid_to_qidx: message qid to qidx mapping + * @scheduler_msg_process_fn: array of message queue handler function pointers + */ +struct scheduler_mq_ctx { + struct scheduler_mq_type sch_msg_q[SCHEDULER_NUMBER_OF_MSG_QUEUE]; + uint8_t scheduler_msg_qid_to_qidx[QDF_MODULE_ID_MAX]; + QDF_STATUS (*scheduler_msg_process_fn[SCHEDULER_NUMBER_OF_MSG_QUEUE]) + (struct scheduler_msg *msg); +}; + +/** + * struct scheduler_ctx - scheduler context + * @queue_ctx: message queue context + * @sch_start_event: scheduler thread start wait event + * @sch_thread: scheduler thread + * @sch_shutdown: scheduler thread shutdown wait event + * @sch_wait_queue: scheduler wait queue + * @sch_event_flag: scheduler events flag + * @resume_sch_event: scheduler resume wait event + * @sch_thread_lock: scheduler thread lock + * @sch_last_qidx: scheduler last qidx allocation + * @hdd_callback: os if suspend callback + * @legacy_wma_handler: legacy wma message handler + * @legacy_sys_handler: legacy sys message handler + * @watchdog_timer: timer for triggering a scheduler watchdog bite + * @watchdog_msg_type: 'type' of the current msg being processed + * @watchdog_callback: the callback of the current msg being processed + */ +struct scheduler_ctx { + struct scheduler_mq_ctx queue_ctx; + qdf_event_t sch_start_event; + qdf_thread_t *sch_thread; + qdf_event_t sch_shutdown; + qdf_wait_queue_head_t sch_wait_queue; + unsigned long sch_event_flag; + qdf_event_t resume_sch_event; + qdf_spinlock_t sch_thread_lock; + uint8_t sch_last_qidx; + hdd_suspend_callback hdd_callback; + scheduler_msg_process_fn_t legacy_wma_handler; + scheduler_msg_process_fn_t legacy_sys_handler; + qdf_timer_t watchdog_timer; + uint16_t watchdog_msg_type; + void *watchdog_callback; +}; + +/** + * scheduler_core_msg_dup() duplicate the given scheduler message + * @msg: the message to duplicated + * + * Note: Duplicated messages must be freed using scheduler_core_msg_free(). + * + * Return: pointer to the duplicated message + */ +struct scheduler_msg *scheduler_core_msg_dup(struct scheduler_msg *msg); + +/** + * scheduler_core_msg_free() - free the given scheduler message + * @msg: the duplicated message to free + * + * Return: None + */ +void scheduler_core_msg_free(struct scheduler_msg *msg); + +/** + * scheduler_get_context() - to get scheduler context + * + * This routine is used retrieve scheduler context + * + * Return: Pointer to scheduler context + */ +struct scheduler_ctx *scheduler_get_context(void); + +/** + * scheduler_thread() - spawned thread will execute this routine + * @arg: pointer to scheduler context + * + * Newly created thread will use this routine to perform its duty + * + * Return: none + */ +int scheduler_thread(void *arg); + +/** + * scheduler_create_ctx() - to create scheduler context + * + * This routine is used to create scheduler context + * + * Return: QDF_STATUS based on success or failure + */ +QDF_STATUS scheduler_create_ctx(void); +/** + * scheduler_destroy_ctx() - to destroy scheduler context + * + * This routine is used to destroy scheduler context + * + * Return: QDF_STATUS based on success or failure + */ +QDF_STATUS scheduler_destroy_ctx(void); + +/** + * scheduler_mq_put() - put message in the back of queue + * @msg_q: Pointer to the message queue + * @msg: the message to enqueue + * + * This function is used to put message in back of provided message + * queue + * + * Return: none + */ +void scheduler_mq_put(struct scheduler_mq_type *msg_q, + struct scheduler_msg *msg); +/** + * scheduler_mq_put_front() - put message in the front of queue + * @msg_q: Pointer to the message queue + * @msg: the message to enqueue + * + * This function is used to put message in front of provided message + * queue + * + * Return: none + */ +void scheduler_mq_put_front(struct scheduler_mq_type *msg_q, + struct scheduler_msg *msg); +/** + * scheduler_mq_get() - to get message from message queue + * @msg_q: Pointer to the message queue + * + * This function is used to get message from given message queue + * + * Return: none + */ +struct scheduler_msg *scheduler_mq_get(struct scheduler_mq_type *msg_q); + +/** + * scheduler_queues_init() - to initialize all the modules' queues + * @sched_ctx: pointer to scheduler context + * + * This function is used to initialize the queues for all the modules + * + * Return: QDF_STATUS based on success of failure + */ +QDF_STATUS scheduler_queues_init(struct scheduler_ctx *sched_ctx); + +/** + * scheduler_queues_deinit() - to de-initialize all the modules' queues + * @sched_ctx: pointer to scheduler context + * + * This function is used to de-initialize the queues for all the modules + * + * Return: QDF_STATUS based on success of failure + */ +QDF_STATUS scheduler_queues_deinit(struct scheduler_ctx *gp_sch_ctx); + +/** + * scheduler_queues_flush() - flush all of the scheduler queues + * @sch_ctx: pointer to scheduler context + * + * This routine is used to clean the module's queues + * + * Return: none + */ +void scheduler_queues_flush(struct scheduler_ctx *sched_ctx); +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/scheduler/src/scheduler_api.c b/drivers/staging/qca-wifi-host-cmn/scheduler/src/scheduler_api.c new file mode 100644 index 0000000000000000000000000000000000000000..632544d8cb6ef72e04337a5ce8c1deae5630f082 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/scheduler/src/scheduler_api.c @@ -0,0 +1,662 @@ +/* + * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include +#include + +QDF_STATUS scheduler_disable(void) +{ + struct scheduler_ctx *sched_ctx; + + sched_debug("Disabling Scheduler"); + + sched_ctx = scheduler_get_context(); + QDF_BUG(sched_ctx); + if (!sched_ctx) + return QDF_STATUS_E_INVAL; + + /* send shutdown signal to scheduler thread */ + qdf_atomic_set_bit(MC_SHUTDOWN_EVENT_MASK, &sched_ctx->sch_event_flag); + qdf_atomic_set_bit(MC_POST_EVENT_MASK, &sched_ctx->sch_event_flag); + qdf_wake_up_interruptible(&sched_ctx->sch_wait_queue); + + /* wait for scheduler thread to shutdown */ + qdf_wait_single_event(&sched_ctx->sch_shutdown, 0); + sched_ctx->sch_thread = NULL; + + /* flush any unprocessed scheduler messages */ + scheduler_queues_flush(sched_ctx); + + return QDF_STATUS_SUCCESS; +} + +static inline void scheduler_watchdog_notify(struct scheduler_ctx *sched) +{ + char symbol[QDF_SYMBOL_LEN]; + + if (sched->watchdog_callback) + qdf_sprint_symbol(symbol, sched->watchdog_callback); + + sched_err("WLAN_BUG_RCA: Callback %s (type 0x%x) exceeded its allotted time of %ds", + sched->watchdog_callback ? symbol : "", + sched->watchdog_msg_type, SCHEDULER_WATCHDOG_TIMEOUT / 1000); +} + +#ifdef CONFIG_SLUB_DEBUG_ON +static void scheduler_watchdog_timeout(void *arg) +{ + struct scheduler_ctx *sched = arg; + + scheduler_watchdog_notify(sched); + if (sched->sch_thread) + qdf_print_thread_trace(sched->sch_thread); + + /* avoid crashing during shutdown */ + if (qdf_atomic_test_bit(MC_SHUTDOWN_EVENT_MASK, &sched->sch_event_flag)) + return; + + QDF_DEBUG_PANIC("Going down for Scheduler Watchdog Bite!"); +} +#else +static void scheduler_watchdog_timeout(void *arg) +{ + scheduler_watchdog_notify((struct scheduler_ctx *)arg); +} +#endif + +QDF_STATUS scheduler_enable(void) +{ + struct scheduler_ctx *sched_ctx; + + sched_debug("Enabling Scheduler"); + + sched_ctx = scheduler_get_context(); + QDF_BUG(sched_ctx); + if (!sched_ctx) + return QDF_STATUS_E_INVAL; + + qdf_atomic_clear_bit(MC_SHUTDOWN_EVENT_MASK, + &sched_ctx->sch_event_flag); + qdf_atomic_clear_bit(MC_POST_EVENT_MASK, + &sched_ctx->sch_event_flag); + + /* create the scheduler thread */ + sched_ctx->sch_thread = qdf_create_thread(scheduler_thread, sched_ctx, + "scheduler_thread"); + if (IS_ERR(sched_ctx->sch_thread)) { + sched_err("Failed to create scheduler thread"); + return QDF_STATUS_E_RESOURCES; + } + + sched_debug("Scheduler thread created"); + + /* wait for the scheduler thread to startup */ + qdf_wake_up_process(sched_ctx->sch_thread); + qdf_wait_single_event(&sched_ctx->sch_start_event, 0); + + sched_debug("Scheduler thread started"); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS scheduler_init(void) +{ + QDF_STATUS status; + struct scheduler_ctx *sched_ctx; + + sched_debug("Initializing Scheduler"); + + status = scheduler_create_ctx(); + if (QDF_IS_STATUS_ERROR(status)) { + sched_err("Failed to create context; status:%d", status); + return status; + } + + sched_ctx = scheduler_get_context(); + QDF_BUG(sched_ctx); + if (!sched_ctx) { + status = QDF_STATUS_E_FAILURE; + goto ctx_destroy; + } + + status = scheduler_queues_init(sched_ctx); + if (QDF_IS_STATUS_ERROR(status)) { + sched_err("Failed to init queues; status:%d", status); + goto ctx_destroy; + } + + status = qdf_event_create(&sched_ctx->sch_start_event); + if (QDF_IS_STATUS_ERROR(status)) { + sched_err("Failed to create start event; status:%d", status); + goto queues_deinit; + } + + status = qdf_event_create(&sched_ctx->sch_shutdown); + if (QDF_IS_STATUS_ERROR(status)) { + sched_err("Failed to create shutdown event; status:%d", status); + goto start_event_destroy; + } + + status = qdf_event_create(&sched_ctx->resume_sch_event); + if (QDF_IS_STATUS_ERROR(status)) { + sched_err("Failed to create resume event; status:%d", status); + goto shutdown_event_destroy; + } + + qdf_spinlock_create(&sched_ctx->sch_thread_lock); + qdf_init_waitqueue_head(&sched_ctx->sch_wait_queue); + sched_ctx->sch_event_flag = 0; + qdf_timer_init(NULL, + &sched_ctx->watchdog_timer, + &scheduler_watchdog_timeout, + sched_ctx, + QDF_TIMER_TYPE_SW); + + qdf_register_mc_timer_callback(scheduler_mc_timer_callback); + + return QDF_STATUS_SUCCESS; + +shutdown_event_destroy: + qdf_event_destroy(&sched_ctx->sch_shutdown); + +start_event_destroy: + qdf_event_destroy(&sched_ctx->sch_start_event); + +queues_deinit: + scheduler_queues_deinit(sched_ctx); + +ctx_destroy: + scheduler_destroy_ctx(); + + return status; +} + +QDF_STATUS scheduler_deinit(void) +{ + QDF_STATUS status; + struct scheduler_ctx *sched_ctx; + + sched_debug("Deinitializing Scheduler"); + + sched_ctx = scheduler_get_context(); + QDF_BUG(sched_ctx); + if (!sched_ctx) + return QDF_STATUS_E_INVAL; + + qdf_timer_free(&sched_ctx->watchdog_timer); + qdf_spinlock_destroy(&sched_ctx->sch_thread_lock); + qdf_event_destroy(&sched_ctx->resume_sch_event); + qdf_event_destroy(&sched_ctx->sch_shutdown); + qdf_event_destroy(&sched_ctx->sch_start_event); + + status = scheduler_queues_deinit(sched_ctx); + if (QDF_IS_STATUS_ERROR(status)) + sched_err("Failed to deinit queues; status:%d", status); + + status = scheduler_destroy_ctx(); + if (QDF_IS_STATUS_ERROR(status)) + sched_err("Failed to destroy context; status:%d", status); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS scheduler_post_msg_by_priority(uint32_t qid, + struct scheduler_msg *msg, + bool is_high_priority) +{ + uint8_t qidx; + struct scheduler_mq_type *target_mq; + struct scheduler_msg *queue_msg; + struct scheduler_ctx *sched_ctx; + uint16_t src_id; + uint16_t dest_id; + uint16_t que_id; + + QDF_BUG(msg); + if (!msg) + return QDF_STATUS_E_INVAL; + + sched_ctx = scheduler_get_context(); + QDF_BUG(sched_ctx); + if (!sched_ctx) + return QDF_STATUS_E_INVAL; + + if (!sched_ctx->sch_thread) { + sched_err("Cannot post message; scheduler thread is stopped"); + return QDF_STATUS_E_FAILURE; + } + + if (msg->reserved != 0 && msg->reserved != SYS_MSG_COOKIE) { + QDF_DEBUG_PANIC("Scheduler messages must be initialized"); + return QDF_STATUS_E_FAILURE; + } + + dest_id = scheduler_get_dest_id(qid); + src_id = scheduler_get_src_id(qid); + que_id = scheduler_get_que_id(qid); + + if (que_id >= QDF_MODULE_ID_MAX || src_id >= QDF_MODULE_ID_MAX || + dest_id >= QDF_MODULE_ID_MAX) { + sched_err("Src_id/Dest_id invalid, cannot post message"); + return QDF_STATUS_E_FAILURE; + } + /* Target_If is a special message queue in phase 3 convergence beacause + * its used by both legacy WMA and as well as new UMAC components which + * directly populate callback handlers in message body. + * 1) WMA legacy messages should not have callback + * 2) New target_if message needs to have valid callback + * Clear callback handler for legacy WMA messages such that in case + * if someone is sending legacy WMA message from stack which has + * uninitialized callback then its handled properly. Also change + * legacy WMA message queue id to target_if queue such that its always + * handled in right order. + */ + if (QDF_MODULE_ID_WMA == que_id) { + msg->callback = NULL; + /* change legacy WMA message id to new target_if mq id */ + que_id = QDF_MODULE_ID_TARGET_IF; + } + qdf_mtrace(src_id, dest_id, msg->type, 0xFF, 0); + + qidx = sched_ctx->queue_ctx.scheduler_msg_qid_to_qidx[que_id]; + if (qidx >= SCHEDULER_NUMBER_OF_MSG_QUEUE) { + sched_err("Scheduler is deinitialized ignore msg"); + return QDF_STATUS_E_FAILURE; + } + + if (!sched_ctx->queue_ctx.scheduler_msg_process_fn[qidx]) { + QDF_DEBUG_PANIC("callback not registered for qid[%d]", que_id); + return QDF_STATUS_E_FAILURE; + } + + target_mq = &(sched_ctx->queue_ctx.sch_msg_q[qidx]); + + queue_msg = scheduler_core_msg_dup(msg); + if (!queue_msg) + return QDF_STATUS_E_NOMEM; + + if (is_high_priority) + scheduler_mq_put_front(target_mq, queue_msg); + else + scheduler_mq_put(target_mq, queue_msg); + + qdf_atomic_set_bit(MC_POST_EVENT_MASK, &sched_ctx->sch_event_flag); + qdf_wake_up_interruptible(&sched_ctx->sch_wait_queue); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS scheduler_register_module(QDF_MODULE_ID qid, + scheduler_msg_process_fn_t callback) +{ + struct scheduler_mq_ctx *ctx; + struct scheduler_ctx *sched_ctx = scheduler_get_context(); + + sched_enter(); + + QDF_BUG(sched_ctx); + if (!sched_ctx) + return QDF_STATUS_E_FAILURE; + + if (sched_ctx->sch_last_qidx >= SCHEDULER_NUMBER_OF_MSG_QUEUE) { + sched_err("Already registered max %d no of message queues", + SCHEDULER_NUMBER_OF_MSG_QUEUE); + return QDF_STATUS_E_FAILURE; + } + + ctx = &sched_ctx->queue_ctx; + ctx->scheduler_msg_qid_to_qidx[qid] = sched_ctx->sch_last_qidx; + ctx->sch_msg_q[sched_ctx->sch_last_qidx].qid = qid; + ctx->scheduler_msg_process_fn[sched_ctx->sch_last_qidx] = callback; + sched_ctx->sch_last_qidx++; + + sched_exit(); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS scheduler_deregister_module(QDF_MODULE_ID qid) +{ + struct scheduler_mq_ctx *ctx; + struct scheduler_ctx *sched_ctx = scheduler_get_context(); + uint8_t qidx; + + sched_enter(); + + QDF_BUG(sched_ctx); + if (!sched_ctx) + return QDF_STATUS_E_FAILURE; + + ctx = &sched_ctx->queue_ctx; + qidx = ctx->scheduler_msg_qid_to_qidx[qid]; + ctx->scheduler_msg_process_fn[qidx] = NULL; + sched_ctx->sch_last_qidx--; + ctx->scheduler_msg_qid_to_qidx[qidx] = SCHEDULER_NUMBER_OF_MSG_QUEUE; + + sched_exit(); + + return QDF_STATUS_SUCCESS; +} + +void scheduler_resume(void) +{ + struct scheduler_ctx *sched_ctx = scheduler_get_context(); + + if (sched_ctx) + qdf_event_set(&sched_ctx->resume_sch_event); +} + +void scheduler_register_hdd_suspend_callback(hdd_suspend_callback callback) +{ + struct scheduler_ctx *sched_ctx = scheduler_get_context(); + + if (sched_ctx) + sched_ctx->hdd_callback = callback; +} +void scheduler_wake_up_controller_thread(void) +{ + struct scheduler_ctx *sched_ctx = scheduler_get_context(); + + if (sched_ctx) + qdf_wake_up_interruptible(&sched_ctx->sch_wait_queue); +} +void scheduler_set_event_mask(uint32_t event_mask) +{ + struct scheduler_ctx *sched_ctx = scheduler_get_context(); + + if (sched_ctx) + qdf_atomic_set_bit(event_mask, &sched_ctx->sch_event_flag); +} + +void scheduler_clear_event_mask(uint32_t event_mask) +{ + struct scheduler_ctx *sched_ctx = scheduler_get_context(); + + if (sched_ctx) + qdf_atomic_clear_bit(event_mask, &sched_ctx->sch_event_flag); +} + +QDF_STATUS scheduler_target_if_mq_handler(struct scheduler_msg *msg) +{ + QDF_STATUS status; + struct scheduler_ctx *sched_ctx = scheduler_get_context(); + QDF_STATUS (*target_if_msg_handler)(struct scheduler_msg *); + + QDF_BUG(msg); + if (!msg) + return QDF_STATUS_E_FAILURE; + + QDF_BUG(sched_ctx); + if (!sched_ctx) + return QDF_STATUS_E_FAILURE; + + target_if_msg_handler = msg->callback; + + /* Target_If is a special message queue in phase 3 convergence beacause + * its used by both legacy WMA and as well as new UMAC components. New + * UMAC components directly pass their message handlers as callback in + * message body. + * 1) All Legacy WMA messages do not contain message callback so invoke + * registered legacy WMA handler. Scheduler message posting APIs + * makes sure legacy WMA messages do not have callbacks. + * 2) For new messages which have valid callbacks invoke their callbacks + * directly. + */ + if (!target_if_msg_handler) + status = sched_ctx->legacy_wma_handler(msg); + else + status = target_if_msg_handler(msg); + + return status; +} + +QDF_STATUS scheduler_os_if_mq_handler(struct scheduler_msg *msg) +{ + QDF_STATUS (*os_if_msg_handler)(struct scheduler_msg *); + + QDF_BUG(msg); + if (!msg) + return QDF_STATUS_E_FAILURE; + + os_if_msg_handler = msg->callback; + + QDF_BUG(os_if_msg_handler); + if (!os_if_msg_handler) + return QDF_STATUS_E_FAILURE; + + os_if_msg_handler(msg); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS scheduler_timer_q_mq_handler(struct scheduler_msg *msg) +{ + struct scheduler_ctx *sched_ctx = scheduler_get_context(); + qdf_mc_timer_callback_t timer_callback; + + QDF_BUG(msg); + if (!msg) + return QDF_STATUS_E_FAILURE; + + QDF_BUG(sched_ctx); + if (!sched_ctx) + return QDF_STATUS_E_FAILURE; + + /* legacy sys message handler? */ + if (msg->reserved != SYS_MSG_COOKIE || msg->type != SYS_MSG_ID_MC_TIMER) + return sched_ctx->legacy_sys_handler(msg); + + timer_callback = msg->callback; + QDF_BUG(timer_callback); + if (!timer_callback) + return QDF_STATUS_E_FAILURE; + + timer_callback(msg->bodyptr); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS scheduler_scan_mq_handler(struct scheduler_msg *msg) +{ + QDF_STATUS (*scan_q_msg_handler)(struct scheduler_msg *); + + QDF_BUG(msg); + if (!msg) + return QDF_STATUS_E_FAILURE; + + scan_q_msg_handler = msg->callback; + + QDF_BUG(scan_q_msg_handler); + if (!scan_q_msg_handler) + return QDF_STATUS_E_FAILURE; + + scan_q_msg_handler(msg); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS scheduler_register_wma_legacy_handler(scheduler_msg_process_fn_t + wma_callback) +{ + struct scheduler_ctx *sched_ctx = scheduler_get_context(); + + QDF_BUG(sched_ctx); + if (!sched_ctx) + return QDF_STATUS_E_FAILURE; + + sched_ctx->legacy_wma_handler = wma_callback; + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS scheduler_register_sys_legacy_handler(scheduler_msg_process_fn_t + sys_callback) +{ + struct scheduler_ctx *sched_ctx = scheduler_get_context(); + + QDF_BUG(sched_ctx); + if (!sched_ctx) + return QDF_STATUS_E_FAILURE; + + sched_ctx->legacy_sys_handler = sys_callback; + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS scheduler_deregister_wma_legacy_handler(void) +{ + struct scheduler_ctx *sched_ctx = scheduler_get_context(); + + QDF_BUG(sched_ctx); + if (!sched_ctx) + return QDF_STATUS_E_FAILURE; + + sched_ctx->legacy_wma_handler = NULL; + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS scheduler_deregister_sys_legacy_handler(void) +{ + struct scheduler_ctx *sched_ctx = scheduler_get_context(); + + QDF_BUG(sched_ctx); + if (!sched_ctx) + return QDF_STATUS_E_FAILURE; + + sched_ctx->legacy_sys_handler = NULL; + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS scheduler_msg_flush_noop(struct scheduler_msg *msg) +{ + return QDF_STATUS_SUCCESS; +} + +void scheduler_mc_timer_callback(qdf_mc_timer_t *timer) +{ + struct scheduler_msg msg = {0}; + QDF_STATUS status; + + qdf_mc_timer_callback_t callback = NULL; + void *user_data = NULL; + QDF_TIMER_TYPE type = QDF_TIMER_TYPE_SW; + + QDF_BUG(timer); + if (!timer) + return; + + qdf_spin_lock_irqsave(&timer->platform_info.spinlock); + + switch (timer->state) { + case QDF_TIMER_STATE_STARTING: + /* we are in this state because someone just started the timer, + * MC timer got started and expired, but the time content have + * not been updated this is a rare race condition! + */ + timer->state = QDF_TIMER_STATE_STOPPED; + status = QDF_STATUS_E_ALREADY; + break; + + case QDF_TIMER_STATE_STOPPED: + status = QDF_STATUS_E_ALREADY; + break; + + case QDF_TIMER_STATE_UNUSED: + status = QDF_STATUS_E_EXISTS; + break; + + case QDF_TIMER_STATE_RUNNING: + /* need to go to stop state here because the call-back function + * may restart timer (to emulate periodic timer) + */ + timer->state = QDF_TIMER_STATE_STOPPED; + /* copy the relevant timer information to local variables; + * once we exits from this critical section, the timer content + * may be modified by other tasks + */ + callback = timer->callback; + user_data = timer->user_data; + type = timer->type; + status = QDF_STATUS_SUCCESS; + break; + + default: + QDF_ASSERT(0); + status = QDF_STATUS_E_FAULT; + break; + } + + qdf_spin_unlock_irqrestore(&timer->platform_info.spinlock); + + if (QDF_IS_STATUS_ERROR(status)) { + sched_debug("MC timer fired but is not running; skip callback"); + return; + } + + qdf_try_allowing_sleep(type); + + QDF_BUG(callback); + if (!callback) + return; + + /* serialize to scheduler controller thread */ + msg.type = SYS_MSG_ID_MC_TIMER; + msg.reserved = SYS_MSG_COOKIE; + msg.callback = callback; + msg.bodyptr = user_data; + msg.bodyval = 0; + + /* bodyptr points to user data, do not free it during msg flush */ + msg.flush_callback = scheduler_msg_flush_noop; + + status = scheduler_post_message(QDF_MODULE_ID_SCHEDULER, + QDF_MODULE_ID_SCHEDULER, + QDF_MODULE_ID_SYS, &msg); + if (QDF_IS_STATUS_ERROR(status)) + sched_err("Could not enqueue timer to timer queue"); +} + +QDF_STATUS scheduler_get_queue_size(QDF_MODULE_ID qid, uint32_t *size) +{ + uint8_t qidx; + struct scheduler_mq_type *target_mq; + struct scheduler_ctx *sched_ctx; + + sched_ctx = scheduler_get_context(); + if (!sched_ctx) + return QDF_STATUS_E_INVAL; + + /* WMA also uses the target_if queue, so replace the QID */ + if (QDF_MODULE_ID_WMA == qid) + qid = QDF_MODULE_ID_TARGET_IF; + + qidx = sched_ctx->queue_ctx.scheduler_msg_qid_to_qidx[qid]; + if (qidx >= SCHEDULER_NUMBER_OF_MSG_QUEUE) { + sched_err("Scheduler is deinitialized"); + return QDF_STATUS_E_FAILURE; + } + + target_mq = &(sched_ctx->queue_ctx.sch_msg_q[qidx]); + + *size = qdf_list_size(&target_mq->mq_list); + + return QDF_STATUS_SUCCESS; +} diff --git a/drivers/staging/qca-wifi-host-cmn/scheduler/src/scheduler_core.c b/drivers/staging/qca-wifi-host-cmn/scheduler/src/scheduler_core.c new file mode 100644 index 0000000000000000000000000000000000000000..9a73fa6beb5f8aaa0653ec1e5f6ec804533a5ce4 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/scheduler/src/scheduler_core.c @@ -0,0 +1,455 @@ +/* + * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include +#include "qdf_flex_mem.h" + +static struct scheduler_ctx g_sched_ctx; +static struct scheduler_ctx *gp_sched_ctx; + +#ifndef WLAN_SCHED_REDUCTION_LIMIT +#define WLAN_SCHED_REDUCTION_LIMIT 0 +#endif + +DEFINE_QDF_FLEX_MEM_POOL(sched_pool, sizeof(struct scheduler_msg), + WLAN_SCHED_REDUCTION_LIMIT); + +#ifdef WLAN_SCHED_HISTORY_SIZE + +/** + * struct sched_history_item - metrics for a scheduler message + * @callback: the message's execution callback + * @type_id: the message's type_id + * @queue_id: Id of the queue the message was added to + * @queue_start_us: timestamp when the message was queued in microseconds + * @queue_duration_us: duration the message was queued in microseconds + * @queue_depth: depth of the queue when the message was queued + * @run_start_us: timesatmp when the message started execution in microseconds + * @run_duration_us: duration the message was executed in microseconds + */ +struct sched_history_item { + void *callback; + uint32_t type_id; + QDF_MODULE_ID queue_id; + uint64_t queue_start_us; + uint32_t queue_duration_us; + uint32_t queue_depth; + uint64_t run_start_us; + uint32_t run_duration_us; +}; + +static struct sched_history_item sched_history[WLAN_SCHED_HISTORY_SIZE]; +static uint32_t sched_history_index; + +static void sched_history_queue(struct scheduler_mq_type *queue, + struct scheduler_msg *msg) +{ + msg->queue_id = queue->qid; + msg->queue_depth = qdf_list_size(&queue->mq_list); + msg->queued_at_us = qdf_get_log_timestamp_usecs(); +} + +static void sched_history_start(struct scheduler_msg *msg) +{ + uint64_t started_at_us = qdf_get_log_timestamp_usecs(); + struct sched_history_item hist = { + .callback = msg->callback, + .type_id = msg->type, + .queue_start_us = msg->queued_at_us, + .queue_duration_us = started_at_us - msg->queued_at_us, + .queue_depth = msg->queue_depth, + .run_start_us = started_at_us, + }; + + sched_history[sched_history_index] = hist; +} + +static void sched_history_stop(void) +{ + struct sched_history_item *hist = &sched_history[sched_history_index]; + uint64_t stopped_at_us = qdf_get_log_timestamp_usecs(); + + hist->run_duration_us = stopped_at_us - hist->run_start_us; + + sched_history_index++; + sched_history_index %= WLAN_SCHED_HISTORY_SIZE; +} + +#else /* WLAN_SCHED_HISTORY_SIZE */ + +static inline void sched_history_queue(struct scheduler_mq_type *queue, + struct scheduler_msg *msg) { } +static inline void sched_history_start(struct scheduler_msg *msg) { } +static inline void sched_history_stop(void) { } + +#endif /* WLAN_SCHED_HISTORY_SIZE */ + +QDF_STATUS scheduler_create_ctx(void) +{ + qdf_flex_mem_init(&sched_pool); + gp_sched_ctx = &g_sched_ctx; + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS scheduler_destroy_ctx(void) +{ + gp_sched_ctx = NULL; + qdf_flex_mem_deinit(&sched_pool); + + return QDF_STATUS_SUCCESS; +} + +struct scheduler_ctx *scheduler_get_context(void) +{ + QDF_BUG(gp_sched_ctx); + + return gp_sched_ctx; +} + +static QDF_STATUS scheduler_mq_init(struct scheduler_mq_type *msg_q) +{ + sched_enter(); + + qdf_spinlock_create(&msg_q->mq_lock); + qdf_list_create(&msg_q->mq_list, SCHEDULER_CORE_MAX_MESSAGES); + + sched_exit(); + + return QDF_STATUS_SUCCESS; +} + +static void scheduler_mq_deinit(struct scheduler_mq_type *msg_q) +{ + sched_enter(); + + qdf_list_destroy(&msg_q->mq_list); + qdf_spinlock_destroy(&msg_q->mq_lock); + + sched_exit(); +} + +static qdf_atomic_t __sched_queue_depth; + +static QDF_STATUS scheduler_all_queues_init(struct scheduler_ctx *sched_ctx) +{ + QDF_STATUS status; + int i; + + sched_enter(); + + QDF_BUG(sched_ctx); + if (!sched_ctx) + return QDF_STATUS_E_FAILURE; + + qdf_atomic_set(&__sched_queue_depth, 0); + + /* Initialize all message queues */ + for (i = 0; i < SCHEDULER_NUMBER_OF_MSG_QUEUE; i++) { + status = scheduler_mq_init(&sched_ctx->queue_ctx.sch_msg_q[i]); + if (QDF_STATUS_SUCCESS != status) + return status; + } + + /* Initialize all qid to qidx mapping to invalid values */ + for (i = 0; i < QDF_MODULE_ID_MAX; i++) + sched_ctx->queue_ctx.scheduler_msg_qid_to_qidx[i] = + SCHEDULER_NUMBER_OF_MSG_QUEUE; + + sched_exit(); + + return status; +} + +static QDF_STATUS scheduler_all_queues_deinit(struct scheduler_ctx *sched_ctx) +{ + int i; + + sched_enter(); + + QDF_BUG(sched_ctx); + if (!sched_ctx) + return QDF_STATUS_E_FAILURE; + + /* De-Initialize all message queues */ + for (i = 0; i < SCHEDULER_NUMBER_OF_MSG_QUEUE; i++) + scheduler_mq_deinit(&sched_ctx->queue_ctx.sch_msg_q[i]); + + /* Initialize all qid to qidx mapping to invalid values */ + for (i = 0; i < QDF_MODULE_ID_MAX; i++) + sched_ctx->queue_ctx.scheduler_msg_qid_to_qidx[i] = + SCHEDULER_NUMBER_OF_MSG_QUEUE; + + sched_exit(); + + return QDF_STATUS_SUCCESS; +} + +void scheduler_mq_put(struct scheduler_mq_type *msg_q, + struct scheduler_msg *msg) +{ + qdf_spin_lock_irqsave(&msg_q->mq_lock); + sched_history_queue(msg_q, msg); + qdf_list_insert_back(&msg_q->mq_list, &msg->node); + qdf_spin_unlock_irqrestore(&msg_q->mq_lock); +} + +void scheduler_mq_put_front(struct scheduler_mq_type *msg_q, + struct scheduler_msg *msg) +{ + qdf_spin_lock_irqsave(&msg_q->mq_lock); + sched_history_queue(msg_q, msg); + qdf_list_insert_front(&msg_q->mq_list, &msg->node); + qdf_spin_unlock_irqrestore(&msg_q->mq_lock); +} + +struct scheduler_msg *scheduler_mq_get(struct scheduler_mq_type *msg_q) +{ + QDF_STATUS status; + qdf_list_node_t *node; + + qdf_spin_lock_irqsave(&msg_q->mq_lock); + status = qdf_list_remove_front(&msg_q->mq_list, &node); + qdf_spin_unlock_irqrestore(&msg_q->mq_lock); + + if (QDF_IS_STATUS_ERROR(status)) + return NULL; + + return qdf_container_of(node, struct scheduler_msg, node); +} + +QDF_STATUS scheduler_queues_deinit(struct scheduler_ctx *sched_ctx) +{ + return scheduler_all_queues_deinit(sched_ctx); +} + +QDF_STATUS scheduler_queues_init(struct scheduler_ctx *sched_ctx) +{ + QDF_STATUS status; + + sched_enter(); + + QDF_BUG(sched_ctx); + if (!sched_ctx) + return QDF_STATUS_E_FAILURE; + + status = scheduler_all_queues_init(sched_ctx); + if (QDF_IS_STATUS_ERROR(status)) { + scheduler_all_queues_deinit(sched_ctx); + sched_err("Failed to initialize the msg queues"); + return status; + } + + sched_debug("Queue init passed"); + + sched_exit(); + + return QDF_STATUS_SUCCESS; +} + +struct scheduler_msg *scheduler_core_msg_dup(struct scheduler_msg *msg) +{ + struct scheduler_msg *dup; + + if (qdf_atomic_inc_return(&__sched_queue_depth) > + SCHEDULER_CORE_MAX_MESSAGES) + goto buffer_full; + + dup = qdf_flex_mem_alloc(&sched_pool); + if (!dup) { + sched_err("out of memory"); + goto dec_queue_count; + } + + qdf_mem_copy(dup, msg, sizeof(*dup)); + + return dup; + +buffer_full: + QDF_DEBUG_PANIC("Scheduler buffer is full"); + +dec_queue_count: + qdf_atomic_dec(&__sched_queue_depth); + + return NULL; +} + +void scheduler_core_msg_free(struct scheduler_msg *msg) +{ + qdf_flex_mem_free(&sched_pool, msg); + qdf_atomic_dec(&__sched_queue_depth); +} + +static void scheduler_thread_process_queues(struct scheduler_ctx *sch_ctx, + bool *shutdown) +{ + int i; + QDF_STATUS status; + struct scheduler_msg *msg; + + if (!sch_ctx) { + QDF_DEBUG_PANIC("sch_ctx is null"); + return; + } + + /* start with highest priority queue : timer queue at index 0 */ + i = 0; + while (i < SCHEDULER_NUMBER_OF_MSG_QUEUE) { + /* Check if MC needs to shutdown */ + if (qdf_atomic_test_bit(MC_SHUTDOWN_EVENT_MASK, + &sch_ctx->sch_event_flag)) { + sched_debug("scheduler thread signaled to shutdown"); + *shutdown = true; + + /* Check for any Suspend Indication */ + if (qdf_atomic_test_and_clear_bit(MC_SUSPEND_EVENT_MASK, + &sch_ctx->sch_event_flag)) { + /* Unblock anyone waiting on suspend */ + if (gp_sched_ctx->hdd_callback) + gp_sched_ctx->hdd_callback(); + } + + break; + } + + msg = scheduler_mq_get(&sch_ctx->queue_ctx.sch_msg_q[i]); + if (!msg) { + /* check next queue */ + i++; + continue; + } + + if (sch_ctx->queue_ctx.scheduler_msg_process_fn[i]) { + sch_ctx->watchdog_msg_type = msg->type; + sch_ctx->watchdog_callback = msg->callback; + + sched_history_start(msg); + qdf_timer_start(&sch_ctx->watchdog_timer, + SCHEDULER_WATCHDOG_TIMEOUT); + status = sch_ctx->queue_ctx. + scheduler_msg_process_fn[i](msg); + qdf_timer_stop(&sch_ctx->watchdog_timer); + sched_history_stop(); + + if (QDF_IS_STATUS_ERROR(status)) + sched_err("Failed processing Qid[%d] message", + sch_ctx->queue_ctx.sch_msg_q[i].qid); + + scheduler_core_msg_free(msg); + } + + /* start again with highest priority queue at index 0 */ + i = 0; + } + + /* Check for any Suspend Indication */ + if (qdf_atomic_test_and_clear_bit(MC_SUSPEND_EVENT_MASK, + &sch_ctx->sch_event_flag)) { + qdf_spin_lock(&sch_ctx->sch_thread_lock); + qdf_event_reset(&sch_ctx->resume_sch_event); + /* controller thread suspend completion callback */ + if (gp_sched_ctx->hdd_callback) + gp_sched_ctx->hdd_callback(); + qdf_spin_unlock(&sch_ctx->sch_thread_lock); + /* Wait for resume indication */ + qdf_wait_single_event(&sch_ctx->resume_sch_event, 0); + } + + return; /* Nothing to process wait on wait queue */ +} + +int scheduler_thread(void *arg) +{ + struct scheduler_ctx *sch_ctx = (struct scheduler_ctx *)arg; + int retWaitStatus = 0; + bool shutdown = false; + + if (!arg) { + QDF_DEBUG_PANIC("arg is null"); + return 0; + } + qdf_set_user_nice(current, -2); + + /* Ack back to the context from which the main controller thread + * has been created + */ + qdf_event_set(&sch_ctx->sch_start_event); + sched_debug("scheduler thread %d (%s) starting up", + current->pid, current->comm); + + while (!shutdown) { + /* This implements the execution model algorithm */ + retWaitStatus = qdf_wait_queue_interruptible( + sch_ctx->sch_wait_queue, + qdf_atomic_test_bit(MC_POST_EVENT_MASK, + &sch_ctx->sch_event_flag) || + qdf_atomic_test_bit(MC_SUSPEND_EVENT_MASK, + &sch_ctx->sch_event_flag)); + + if (retWaitStatus == -ERESTARTSYS) + QDF_DEBUG_PANIC("Scheduler received -ERESTARTSYS"); + + qdf_atomic_clear_bit(MC_POST_EVENT_MASK, &sch_ctx->sch_event_flag); + scheduler_thread_process_queues(sch_ctx, &shutdown); + } + + /* If we get here the scheduler thread must exit */ + sched_debug("Scheduler thread exiting"); + qdf_event_set(&sch_ctx->sch_shutdown); + qdf_exit_thread(QDF_STATUS_SUCCESS); + + return 0; +} + +static void scheduler_flush_single_queue(struct scheduler_mq_type *mq) +{ + struct scheduler_msg *msg; + QDF_STATUS (*flush_cb)(struct scheduler_msg *); + + while ((msg = scheduler_mq_get(mq))) { + if (msg->flush_callback) { + sched_debug("Calling flush callback; type: %x", + msg->type); + flush_cb = msg->flush_callback; + flush_cb(msg); + } else if (msg->bodyptr) { + sched_debug("Freeing scheduler msg bodyptr; type: %x", + msg->type); + qdf_mem_free(msg->bodyptr); + } + + scheduler_core_msg_free(msg); + } +} + +void scheduler_queues_flush(struct scheduler_ctx *sched_ctx) +{ + struct scheduler_mq_type *mq; + int i; + + sched_debug("Flushing scheduler message queues"); + + for (i = 0; i < SCHEDULER_NUMBER_OF_MSG_QUEUE; i++) { + mq = &sched_ctx->queue_ctx.sch_msg_q[i]; + scheduler_flush_single_queue(mq); + } + + qdf_flex_mem_release(&sched_pool); +} + diff --git a/drivers/staging/qca-wifi-host-cmn/spectral/Kbuild b/drivers/staging/qca-wifi-host-cmn/spectral/Kbuild new file mode 100644 index 0000000000000000000000000000000000000000..2a7eeba99ce6c2919f8040f742d7c427ab26faba --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/spectral/Kbuild @@ -0,0 +1,224 @@ +ifeq ($(obj),) +obj := . +endif + +DEPTH := ../.. + +ifeq ($(strip ${QCA_PARTNER_MAKE_F_SUPPORT}),1) +export QCA_PARTNER_MAKE_F_INC=1 +endif + +include $(obj)/$(DEPTH)/os/linux/Makefile-linux.common + +INCS += -I$(HAL) -I$(HAL)/$(OS) -I$(ATH) -I$(ATH_RATE) -I$(ATH_PKTLOG) -I$(WLAN) -I$(IF_WLAN) -I$(ATH_SPECTRAL) -I$(ATHEROSPATH) -I$(obj)/$(DEPTH)/../../apps/spectral/common +INCS += -I$(obj)/$(DEPTH)/cmn_dev/umac/cmn_services/inc -I$(obj)/$(DEPTH)/cmn_dev/umac/cmn_services/obj_mgr/inc +INCS += -I$(obj)/$(DEPTH)/cmn_dev/umac/cmn_services/cmn_defs/inc +INCS += -I$(obj)/$(DEPTH)/cmn_dev/utils/nlink/inc +INCS += -I$(obj)/$(DEPTH)/cmn_dev/umac/scan/dispatcher/inc +INCS += -I$(obj)/$(DEPTH)/cmn_dev/umac/cmn_services/cmn_defs/inc +INCS += -I$(obj)/$(DEPTH)/cmn_dev/umac/global_umac_dispatcher/lmac_if/inc +INCS += -I$(obj)/$(DEPTH)/cmn_dev/scheduler/inc +INCS += -I$(obj)/$(DEPTH)/cmn_dev/global_lmac_if/inc +INCS += -I$(obj)/$(DEPTH)/umac/scan +INCS += -I$(obj)/$(DEPTH)/cmn_dev/umac/cmn_services/mgmt_txrx/dispatcher/inc +INCS += -I$(obj)/$(DEPTH)/cmn_dev/umac/regulatory/dispatcher/inc +INCS += -I$(obj)/$(DEPTH)/umac/son/dispatcher/inc +INCS += -I$(obj)/$(DEPTH)/cmn_dev/umac/dfs/dispatcher/inc +INCS += -I$(obj)/$(DEPTH)/cmn_dev/os_if/linux/spectral/inc +INCS += -I$(obj)/$(DEPTH)/cmn_dev/os_if/linux/scan/inc +INCS += -I$(obj)/$(DEPTH)/cmn_dev/spectral/core +INCS += -I$(obj)/$(DEPTH)/cmn_dev/target_if/direct_buf_rx/inc + +ifeq ($(WLAN_CONV_CRYPTO_SUPPORTED), 1) +INCS += -I$(obj)/$(DEPTH)/cmn_dev/umac/cmn_services/crypto/inc +endif + +ifeq ($(WLAN_SUPPORT_GREEN_AP), 1) +INCS += -I$(obj)/$(DEPTH)/cmn_dev/umac/green_ap/dispatcher/inc +endif + +#Start of offload related deifines +HOST_CMN_CONVG_SRC := $(DEPTH)/cmn_dev +HOST_CMN_CONVG_HIF_SRC := $(DEPTH)/cmn_dev/hif/src +HOST_CMN_CONVG_HIF_INC1 := $(DEPTH)/cmn_dev/hif +HOST_CMN_CONVG_HTC_INC := $(DEPTH)/cmn_dev/htc +HOST_CMN_CONVG_DP_INC := $(DEPTH)/cmn_dev/dp/wifi3.0 +HOST_CMN_CONVG_CFG_INC := $(DEPTH)/cmn_dev/wlan_cfg +HOST_CMN_CONVG_HAL_INC := $(DEPTH)/cmn_dev/hal/inc +HOST_CMN_CONVG_HAL_WIFI_INC := $(DEPTH)/cmn_dev/hal/wifi3.0 + +INCS += -I$(obj)/$(DEPTH)/include -I$(obj)/$(DEPTH)/umac/include \ + -I$(obj)/$(DEPTH)/umac/if_lmac -I$(obj)/$(DEPTH)/umac/crypto \ + -I$(obj)/$(DEPTH)/umac/scan -I$(obj)/$(DEPTH)/umac/resmgr \ + -I$(obj)/$(DEPTH)/umac/pm -I$(obj)/$(DEPTH)/umac/txrx \ + -I$(obj)/$(DEPTH)/umac/acs -I$(obj)/$(DEPTH)/umac/txbf \ + -I$(obj)/$(DEPTH)/umac/wnm \ + -I$(obj)/$(DEPTH)/umac/tdls \ + -I$(obj)/$(DEPTH)/umac/rpt_placement \ + -I$(obj)/$(DEPTH)/umac/wifipos \ + -I$(obj)/$(DEPTH)/umac/wds -I$(obj)/$(DEPTH)/umac/ique \ + -I$(obj)/$(DEPTH)/hal -I$(obj)/$(DEPTH)/lmac/ath_dev \ + -I$(obj)/$(DEPTH)/hal/$(OS) \ + -I$(obj)/$(DEPTH)/umac/vi_dbg \ + -I$(obj)/$(DEPTH)/umac/smart_antenna \ + -I$(obj)/$(DEPTH)/umac/smart_ant \ + -I$(obj)/$(DEPTH)/umac/ald \ + -I$(obj)/$(DEPTH)/lmac/ath_pktlog \ + -I$(obj)/$(DEPTH)/lmac/ratectrl \ + -I$(obj)/$(DEPTH)/os/linux/mem/ \ + -I$(obj)/$(DEPTH)/umac/base \ + -I$(obj)/$(DEPTH)/qca_ol \ + -I$(obj)/$(DEPTH)/cmn_dev/qdf/inc \ + -I$(obj)/$(DEPTH)/cmn_dev/qdf/linux/src \ + -I$(obj)/$(DEPTH)/cmn_dev/hif \ + -I$(obj)/$(DEPTH)/cmn_dev/hif/inc \ + -I$(obj)/$(DEPTH)/cmn_dev/hif/src \ + -I$(obj)/$(DEPTH)/cmn_dev/hif/src/ce \ + -I$(obj)/$(DEPTH)/cmn_dev/hif/src/pcie \ + -I$(obj)/$(DEPTH)/cmn_dev/hif/src/snoc \ + -I$(obj)/$(DEPTH)/cmn_dev/hif/src/dispatcher \ + -I$(obj)/$(DEPTH)/cmn_dev/pld_stub/inc \ + -I$(obj)/$(DEPTH)/cmn_dev/hal/inc \ + -I$(obj)/$(DEPTH)/cmn_dev/hal/wifi3.0 \ + -I$(obj)/$(DEPTH)/cmn_dev/dp/inc \ + -I$(obj)/$(DEPTH)/cmn_dev/dp/wifi3.0 \ + -I$(obj)/$(DEPTH)/cmn_dev/wlan_cfg \ + -I$(obj)/$(HOST_CMN_CONVG_SRC)/htc \ + -I$(obj)/$(DEPTH)/cmn_dev/wmi/inc \ + -I$(obj)/$(DEPTH)/cmn_dev/umac/cmn_services/obj_mgr/inc \ + -I$(obj)/$(HOST_CMN_CONVG_SRC)/scheduler/inc \ + -I$(obj)/$(HOST_CMN_CONVG_SRC)/init_deinit/dispatcher/inc \ + -I$(obj)/$(DEPTH)/cmn_dev/umac/cmn_services/inc \ + -I$(obj)/$(DEPTH)/cmn_dev/umac/global_umac_dispatcher/lmac_if/inc \ + -I$(obj)/$(DEPTH)/cmn_dev/umac/cmn_services/mgmt_txrx/dispatcher/inc \ + -I$(obj)/$(DEPTH)/cmn_dev/target_if/init_deinit/inc \ + -I$(obj)/$(DEPTH)/cmn_dev/global_lmac_if/inc \ + -I$(obj)/$(DEPTH)/cmn_dev/os_if/linux \ + -I$(obj)/$(DEPTH)/cmn_dev/umac/cmn_services/cmn_defs/inc \ + -I$(obj)/$(DEPTH)/cmn_dev/target_if/core/inc \ + -I$(obj)/$(DEPTH)/cmn_dev/umac/scan/dispatcher/inc \ + -I$(obj)/$(DEPTH)/umac/scan \ + -I$(obj)/$(DEPTH)/cmn_dev/ol_if \ + -I$(obj)/$(DEPTH)/cmn_dev/target_if/scan/inc \ + -I$(obj)/$(DEPTH)/cmn_dev/umac/cmn_services/serialization/core/inc \ + -I$(obj)/$(DEPTH)/cmn_dev/umac/regulatory/dispatcher/inc \ + -I$(obj)/$(DEPTH)/cmn_dev/target_if/regulatory/inc \ + +PERF_PWR_OFFLOAD_INC += -I$(PERF_PWR_OFFLOAD_DIR_PATH)/wlan/include \ + -I$(PERF_PWR_OFFLOAD_DIR_PATH)/wlan/ath_pktlog/include \ + -I$(PERF_PWR_OFFLOAD_DIR_PATH)/htt/include \ + -I$(PERF_PWR_OFFLOAD_DIR_PATH)/wlan/txrx/include \ + -I$(PERF_PWR_OFFLOAD_DIR_PATH)/include \ + -I$(PERF_PWR_OFFLOAD_DIR_PATH)/include \ + -I$(PERF_PWR_OFFLOAD_DIR_PATH)/hif/pci \ + -I$(PERF_PWR_OFFLOAD_DIR_PATH)/hif/pci/linux \ + -I$(PERF_PWR_OFFLOAD_DIR_PATH)/os/linux/include \ + -I$(PERF_PWR_OFFLOAD_DIR_PATH)/wlan/regdmn \ + -I$(PERF_PWR_OFFLOAD_DIR_PATH)/wlan/lmac_offload_if \ + -I$(HOST_CMN_CONVG_HIF_INC1)/inc \ + -I$(HOST_CMN_CONVG_HIF_INC1)/src \ + -I$(HOST_CMN_CONVG_HIF_INC1)/src/pcie \ + -I$(HOST_CMN_CONVG_HIF_INC1)/src/snoc \ + -I$(HOST_CMN_CONVG_SRC)/pld_stub/inc \ + -I$(HOST_CMN_CONVG_HIF_SRC)/ce \ + -I$(HOST_CMN_CONVG_HTC_INC) \ + -I$(HOST_CMN_CONVG_CFG_INC) \ + -I$(HOST_CMN_CONVG_DP_INC) \ + -I$(HOST_CMN_CONVG_HAL_INC) \ + -I$(HOST_CMN_CONVG_HAL_WIFI_INC) \ + -I$(PERF_PWR_OFFLOAD_WMI_SRC)/inc \ + -I$(obj)/$(DEPTH)/offload/extra_include + +#Add HK/BL Fw header path required by common files +ifeq (1, ${WIFI_TARGET_3_0}) +PERF_PWR_OFFLOAD_INC += -I$(PERF_PWR_OFFLOAD_DIR_PATH)/include/fwcommon \ + -I$(PERF_PWR_OFFLOAD_DIR_PATH)/include/fwcommon/htt_stats +else +PERF_PWR_OFFLOAD_INC += -I$(PERF_PWR_OFFLOAD_DIR_PATH)/include/legacy +endif + +INCS += $(PERF_PWR_OFFLOAD_INC) +INCS += -I$(obj)/$(DEPTH)/cmn_dev/target_if/spectral +INCS += -I$(obj)/$(DEPTH)/cmn_dev/wmi/inc +INCS += -I$(PERF_PWR_OFFLOAD_DIR_PATH)/hw/include +#end of offload related defines + +#Start of Legacy spectral related defines +INCS += -I$(HAL) -I$(HAL)/$(OS) -I$(ATH) -I$(ATH_RATE) -I$(ATH_PKTLOG) -I$(WLAN) -I$(IF_WLAN) -I$(ATH_SPECTRAL) -I$(ATHEROSPATH) -I$(obj)/$(DEPTH)/../../apps/spectral/common +INCS += -I$(obj)/$(DEPTH)/cmn_dev/umac/cmn_services/inc -I$(obj)/$(DEPTH)/cmn_dev/umac/cmn_services/obj_mgr/inc +INCS += -I$(obj)/$(DEPTH)/cmn_dev/umac/cmn_services/cmn_defs/inc +INCS += -I$(obj)/$(DEPTH)/cmn_dev/umac/scan/dispatcher/inc +INCS += -I$(obj)/$(DEPTH)/cmn_dev/umac/cmn_services/cmn_defs/inc +INCS += -I$(obj)/$(DEPTH)/cmn_dev/umac/global_umac_dispatcher/lmac_if/inc +INCS += -I$(obj)/$(DEPTH)/cmn_dev/scheduler/inc +INCS += -I$(obj)/$(DEPTH)/umac/scan +INCS += -I$(obj)/$(DEPTH)/cmn_dev/umac/cmn_services/mgmt_txrx/dispatcher/inc +INCS += -I$(obj)/$(DEPTH)/cmn_dev/umac/regulatory/dispatcher/inc +INCS += -I$(obj)/$(DEPTH)/cmn_dev/umac/dfs/dispatcher/inc + +SPECTRAL_DA_OBJS := $(DEPTH)/lmac/spectral/spectral.o \ + $(DEPTH)/lmac/spectral/spectral_netlink.o \ + $(DEPTH)/lmac/spectral/spectral_cmds.o \ + $(DEPTH)/lmac/spectral/spectral_process_data.o \ + $(DEPTH)/lmac/spectral/spectral_phyerr.o +#End of legacy spectral defines + +ifeq ($(QCA_AIRTIME_FAIRNESS), 1) +ccflags-y+= -DWLAN_ATF_ENABLE +INCS += -I$(obj)/$(DEPTH)/umac/airtime_fairness/dispatcher/inc +endif + +ifeq ($(UNIFIED_SMARTANTENNA), 1) +ccflags-y+= -DWLAN_SA_API_ENABLE +INCS += -I$(obj)/$(DEPTH)/umac/sa_api/dispatcher/inc +endif + +ifeq ($(strip ${QCA_DFS_COMPONENT_ENABLE}),1) +ccflags-y+= -DDFS_COMPONENT_ENABLE +endif + +obj-m += qca_spectral.o + +ccflags-y+= $(INCS) $(COPTS) -DSPECTRAL_USE_NETLINK_SOCKETS=1 -DWLAN_SPECTRAL_ENABLE=1 + +ifeq ($(strip ${QCA_PARTNER_MAKE_F_SUPPORT}),1) +MOD_CFLAGS = -D"KBUILD_STR(s)=\#s" -D"KBUILD_BASENAME=KBUILD_STR(qca_spectral.mod)" -D"KBUILD_MODNAME=KBUILD_STR(qca_spectral)" +endif + +INCS += -I$(obj)/$(DEPTH)/spectral/dispatcher/inc + +SPECTRAL_TIF_OBJS += $(DEPTH)/cmn_dev/target_if/spectral/target_if_spectral.o \ + $(DEPTH)/cmn_dev/target_if/spectral/target_if_spectral_netlink.o \ + $(DEPTH)/cmn_dev/target_if/spectral/target_if_spectral_phyerr.o \ + $(DEPTH)/cmn_dev/target_if/spectral/target_if_spectral_sim.o + +SPECTRAL_CMN_OBJS += core/spectral_direct_attach.o \ + core/spectral_offload.o \ + core/spectral_common.o \ + dispatcher/src/wlan_spectral_utils_api.o \ + dispatcher/src/wlan_spectral_ucfg_api.o \ + dispatcher/src/wlan_spectral_tgt_api.o \ + core/spectral_module.o \ + $(DEPTH)/cmn_dev/os_if/linux/spectral/src/wlan_cfg80211_spectral.o \ + $(DEPTH)/cmn_dev/os_if/linux/spectral/src/os_if_spectral_netlink.o + +qca_spectral-objs += ${SPECTRAL_CMN_OBJS} \ + ${SPECTRAL_TIF_OBJS} \ + ${SPECTRAL_DA_OBJS} + +ifeq ($(strip ${QCA_PARTNER_MAKE_F_SUPPORT}),1) +all: qca_spectral.ko + +qca_spectral.mod.o: qca_spectral.mod.c + ${CC} -c -o $@ ${ccflags-y} ${MOD_CFLAGS} $< + +qca_spectral.o: ${qca_spectral-objs} + $(LD) -m elf32btsmip -r -o qca_spectral.o $(qca_spectral-objs) + $(KERNELPATH)/scripts/mod/modpost qca_spectral.o + +qca_spectral.ko: qca_spectral.o qca_spectral.mod.o + $(LD) $(LDOPTS) -o qca_spectral.ko qca_spectral.o qca_spectral.mod.o + +%.o: %.c + ${CC} -c -o $@ ${ccflags-y} $< +endif diff --git a/drivers/staging/qca-wifi-host-cmn/spectral/core/spectral_cmn_api_i.h b/drivers/staging/qca-wifi-host-cmn/spectral/core/spectral_cmn_api_i.h new file mode 100644 index 0000000000000000000000000000000000000000..ed66871d52ade6b3fb6f8c237784462db98f1fdf --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/spectral/core/spectral_cmn_api_i.h @@ -0,0 +1,199 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _SPECTRAL_CMN_API_I_H_ +#define _SPECTRAL_CMN_API_I_H_ + +#include "spectral_defs_i.h" + +/** + * wlan_spectral_psoc_obj_create_handler() - handler for psoc object create + * @psoc: reference to global psoc object + * @arg: reference to argument provided during registration of handler + * + * This is a handler to indicate psoc object created. Hence spectral_context + * object can be created and attached to psoc component list. + * + * Return: QDF_STATUS_SUCCESS on success + * QDF_STATUS_E_FAILURE if psoc is null + * QDF_STATUS_E_NOMEM on failure of spectral object allocation + */ +QDF_STATUS wlan_spectral_psoc_obj_create_handler(struct wlan_objmgr_psoc *psoc, + void *arg); + +/** + * wlan_spectral_psoc_obj_destroy_handler() - handler for psoc object delete + * @psoc: reference to global psoc object + * @arg: reference to argument provided during registration of handler + * + * This is a handler to indicate psoc object going to be deleted. + * Hence spectral_context object can be detached from psoc component list. + * Then spectral_context object can be deleted. + * + * Return: QDF_STATUS_SUCCESS on success + * QDF_STATUS_E_FAILURE on failure + */ +QDF_STATUS wlan_spectral_psoc_obj_destroy_handler(struct wlan_objmgr_psoc *psoc, + void *arg); + +/** + * wlan_spectral_pdev_obj_create_handler() - handler for pdev object create + * @pdev: reference to global pdev object + * @arg: reference to argument provided during registration of handler + * + * This is a handler to indicate pdev object created. Hence pdev specific + * spectral object can be created and attached to pdev component list. + * + * Return: QDF_STATUS_SUCCESS on success + * QDF_STATUS_E_FAILURE if pdev is null + * QDF_STATUS_E_NOMEM on failure of spectral object allocation + */ +QDF_STATUS wlan_spectral_pdev_obj_create_handler(struct wlan_objmgr_pdev *pdev, + void *arg); + +/** + * wlan_spectral_pdev_obj_destroy_handler() - handler for pdev object delete + * @pdev: reference to global pdev object + * @arg: reference to argument provided during registration of handler + * + * This is a handler to indicate pdev object going to be deleted. + * Hence pdev specific spectral object can be detached from pdev component list. + * Then pdev_spectral object can be deleted. + * + * Return: QDF_STATUS_SUCCESS on success + * QDF_STATUS_E_FAILURE on failure + */ +QDF_STATUS wlan_spectral_pdev_obj_destroy_handler(struct wlan_objmgr_pdev *pdev, + void *arg); + +/** + * spectral_control_cmn()- common handler for demultiplexing requests from + * higher layer + * @pdev: reference to global pdev object + * @id: spectral config command id + * @indata: reference to input data + * @insize: input data size + * @outdata: reference to output data + * @outsize: reference to output data size + * + * This function processes the spectral config command + * and appropriate handlers are invoked. + * + * Return: 0 success else failure + */ +int spectral_control_cmn(struct wlan_objmgr_pdev *pdev, + u_int id, + void *indata, + uint32_t insize, void *outdata, uint32_t *outsize); + +/** + * spectral_control_ol(): Offload handler for demultiplexing requests from + * higher layer + * @pdev: reference to global pdev object + * @id: spectral config command id + * @indata: reference to input data + * @insize: input data size + * @outdata: reference to output data + * @outsize: reference to output data size + * + * This function processes the spectral config command + * and appropriate handlers are invoked. + * + * Return: 0 success else failure + */ +int spectral_control_ol( + struct wlan_objmgr_pdev *pdev, u_int id, + void *indata, uint32_t insize, void *outdata, uint32_t *outsize); + +/** + * spectral_get_spectral_ctx_from_pdev() - API to get spectral context object + * from pdev + * @pdev : Reference to pdev global object + * + * This API used to get spectral context object from global pdev reference. + * Null check should be done before invoking this inline function. + * + * Return : Reference to spectral_context object + */ +static inline struct spectral_context * +spectral_get_spectral_ctx_from_pdev(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_objmgr_psoc *psoc = NULL; + struct spectral_context *sc = NULL; + + psoc = wlan_pdev_get_psoc(pdev); + if (psoc) { + sc = wlan_objmgr_psoc_get_comp_private_obj( + psoc, + WLAN_UMAC_COMP_SPECTRAL); + } + + return sc; +} + +/** + * spectral_get_spectral_ctx_from_pscoc() - API to get spectral context object + * from psoc + * @psoc : Reference to psoc global object + * + * This API used to get spectral context object from global psoc reference. + * Null check should be done before invoking this inline function. + * + * Return : Reference to spectral_context object + */ +static inline struct spectral_context * +spectral_get_spectral_ctx_from_psoc(struct wlan_objmgr_psoc *psoc) +{ + struct spectral_context *sc = NULL; + + if (psoc) { + sc = wlan_objmgr_psoc_get_comp_private_obj( + psoc, + WLAN_UMAC_COMP_SPECTRAL); + } + + return sc; +} + +/** + * spectral_get_spectral_ctx_from_vdev() - API to get spectral context object + * from vdev + * @vdev : Reference to vdev global object + * + * This API used to get spectral context object from global vdev reference. + * Null check should be done before invoking this inline function. + * + * Return : Reference to spectral_context object + */ +static inline struct spectral_context * +spectral_get_spectral_ctx_from_vdev(struct wlan_objmgr_vdev *vdev) +{ + struct wlan_objmgr_psoc *psoc = NULL; + struct spectral_context *sc = NULL; + + psoc = wlan_vdev_get_psoc(vdev); + if (psoc) { + sc = wlan_objmgr_psoc_get_comp_private_obj( + psoc, + WLAN_UMAC_COMP_SPECTRAL); + } + + return sc; +} +#endif /* _SPECTRAL_CMN_API_I_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/spectral/core/spectral_common.c b/drivers/staging/qca-wifi-host-cmn/spectral/core/spectral_common.c new file mode 100644 index 0000000000000000000000000000000000000000..4b031547513c900178f5ef7d60615a7e2fcb5ff0 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/spectral/core/spectral_common.c @@ -0,0 +1,652 @@ +/* + * Copyright (c) 2011,2017-2018 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "spectral_cmn_api_i.h" +#include "spectral_da_api_i.h" +#include "spectral_ol_api_i.h" +#include +#include +#ifdef CONFIG_WIN +#include +#include +#endif /*CONFIG_WIN*/ +#include +#include + +/** + * spectral_get_vdev() - Get pointer to vdev to be used for Spectral + * operations + * @pdev: Pointer to pdev + * + * Spectral operates on pdev. However, in order to retrieve some WLAN + * properties, a vdev is required. To facilitate this, the function returns the + * first vdev in our pdev. The caller should release the reference to the vdev + * once it is done using it. Additionally, the caller should ensure it has a + * reference to the pdev at the time of calling this function, and should + * release the pdev reference either after this function returns or at a later + * time when the caller is done using pdev. + * TODO: + * - If the framework later provides an API to obtain the first active + * vdev, then it would be preferable to use this API. + * - Use a common get_vdev() handler for core and target_if using Rx ops. This + * is deferred till details emerge on framework providing API to get first + * active vdev. + * + * Return: Pointer to vdev on success, NULL on failure + */ +static struct wlan_objmgr_vdev* +spectral_get_vdev(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_objmgr_vdev *vdev = NULL; + + qdf_assert_always(pdev); + + vdev = wlan_objmgr_pdev_get_first_vdev(pdev, WLAN_SPECTRAL_ID); + + if (!vdev) { + spectral_warn("Unable to get first vdev of pdev"); + return NULL; + } + + return vdev; +} + +#ifndef CONFIG_MCL +/** + * spectral_register_cfg80211_handlers() - Register spectral cfg80211 handlers + * @pdev: Pointer to pdev + * + * Register spectral cfg80211 handlers + * Handlers can be different for WIN and MCL + * + * Return: None + */ +static void +spectral_register_cfg80211_handlers(struct wlan_objmgr_pdev *pdev) +{ + wlan_cfg80211_register_spectral_cmd_handler( + pdev, + SPECTRAL_SCAN_START_HANDLER_IDX, + wlan_cfg80211_spectral_scan_config_and_start); + wlan_cfg80211_register_spectral_cmd_handler( + pdev, + SPECTRAL_SCAN_STOP_HANDLER_IDX, + wlan_cfg80211_spectral_scan_stop); + wlan_cfg80211_register_spectral_cmd_handler( + pdev, + SPECTRAL_SCAN_GET_CONFIG_HANDLER_IDX, + wlan_cfg80211_spectral_scan_get_config); + wlan_cfg80211_register_spectral_cmd_handler( + pdev, + SPECTRAL_SCAN_GET_DIAG_STATS_HANDLER_IDX, + wlan_cfg80211_spectral_scan_get_diag_stats); + wlan_cfg80211_register_spectral_cmd_handler( + pdev, + SPECTRAL_SCAN_GET_CAP_HANDLER_IDX, + wlan_cfg80211_spectral_scan_get_cap); + wlan_cfg80211_register_spectral_cmd_handler( + pdev, + SPECTRAL_SCAN_GET_STATUS_HANDLER_IDX, + wlan_cfg80211_spectral_scan_get_status); +} +#else +static void +spectral_register_cfg80211_handlers(struct wlan_objmgr_pdev *pdev) +{ +} +#endif + +int +spectral_control_cmn(struct wlan_objmgr_pdev *pdev, + u_int id, + void *indata, + uint32_t insize, void *outdata, uint32_t *outsize) +{ + int error = 0; + int temp_debug; + struct spectral_config sp_out; + struct spectral_config *sp_in; + struct spectral_config *spectralparams; + struct spectral_context *sc; + struct wlan_objmgr_vdev *vdev = NULL; + uint8_t vdev_rxchainmask = 0; + + if (!pdev) { + spectral_err("PDEV is NULL!"); + error = -EINVAL; + goto bad; + } + sc = spectral_get_spectral_ctx_from_pdev(pdev); + if (!sc) { + spectral_err("atf context is NULL!"); + error = -EINVAL; + goto bad; + } + + switch (id) { + case SPECTRAL_SET_CONFIG: + { + if (insize < sizeof(struct spectral_config) || + !indata) { + error = -EINVAL; + break; + } + sp_in = (struct spectral_config *)indata; + if (sp_in->ss_count != + SPECTRAL_PHYERR_PARAM_NOVAL) { + if (sc->sptrlc_set_spectral_config( + pdev, + SPECTRAL_PARAM_SCAN_COUNT, + sp_in->ss_count)) + error = -EINVAL; + } + + if (sp_in->ss_fft_period != + SPECTRAL_PHYERR_PARAM_NOVAL) { + if (sc->sptrlc_set_spectral_config( + pdev, + SPECTRAL_PARAM_FFT_PERIOD, + sp_in->ss_fft_period)) + error = -EINVAL; + } + + if (sp_in->ss_period != SPECTRAL_PHYERR_PARAM_NOVAL) { + if (sc->sptrlc_set_spectral_config( + pdev, + SPECTRAL_PARAM_SCAN_PERIOD, + sp_in->ss_period)) + error = -EINVAL; + } + + if (sp_in->ss_short_report != + SPECTRAL_PHYERR_PARAM_NOVAL) { + if (sc->sptrlc_set_spectral_config( + pdev, + SPECTRAL_PARAM_SHORT_REPORT, + (uint32_t) + (sp_in->ss_short_report ? 1 : 0))) + error = -EINVAL; + } + + if (sp_in->ss_spectral_pri != + SPECTRAL_PHYERR_PARAM_NOVAL) { + if (sc->sptrlc_set_spectral_config( + pdev, + SPECTRAL_PARAM_SPECT_PRI, + (uint32_t)(sp_in->ss_spectral_pri))) + error = -EINVAL; + } + + if (sp_in->ss_fft_size != SPECTRAL_PHYERR_PARAM_NOVAL) { + if (sc->sptrlc_set_spectral_config( + pdev, + SPECTRAL_PARAM_FFT_SIZE, + sp_in->ss_fft_size)) + error = -EINVAL; + } + + if (sp_in->ss_gc_ena != SPECTRAL_PHYERR_PARAM_NOVAL) { + if (sc->sptrlc_set_spectral_config( + pdev, + SPECTRAL_PARAM_GC_ENA, + sp_in->ss_gc_ena)) + error = -EINVAL; + } + + if (sp_in->ss_restart_ena != + SPECTRAL_PHYERR_PARAM_NOVAL) { + if (sc->sptrlc_set_spectral_config( + pdev, + SPECTRAL_PARAM_RESTART_ENA, + sp_in->ss_restart_ena)) + error = -EINVAL; + } + + if (sp_in->ss_noise_floor_ref != + SPECTRAL_PHYERR_PARAM_NOVAL) { + if (sc->sptrlc_set_spectral_config( + pdev, + SPECTRAL_PARAM_NOISE_FLOOR_REF, + sp_in->ss_noise_floor_ref)) + error = -EINVAL; + } + + if (sp_in->ss_init_delay != + SPECTRAL_PHYERR_PARAM_NOVAL) { + if (sc->sptrlc_set_spectral_config( + pdev, + SPECTRAL_PARAM_INIT_DELAY, + sp_in->ss_init_delay)) + error = -EINVAL; + } + + if (sp_in->ss_nb_tone_thr != + SPECTRAL_PHYERR_PARAM_NOVAL) { + if (sc->sptrlc_set_spectral_config( + pdev, + SPECTRAL_PARAM_NB_TONE_THR, + sp_in->ss_nb_tone_thr)) + error = -EINVAL; + } + + if (sp_in->ss_str_bin_thr != + SPECTRAL_PHYERR_PARAM_NOVAL) { + if (sc->sptrlc_set_spectral_config( + pdev, + SPECTRAL_PARAM_STR_BIN_THR, + sp_in->ss_str_bin_thr)) + error = -EINVAL; + } + + if (sp_in->ss_wb_rpt_mode != + SPECTRAL_PHYERR_PARAM_NOVAL) { + if (sc->sptrlc_set_spectral_config( + pdev, + SPECTRAL_PARAM_WB_RPT_MODE, + sp_in->ss_wb_rpt_mode)) + error = -EINVAL; + } + + if (sp_in->ss_rssi_rpt_mode != + SPECTRAL_PHYERR_PARAM_NOVAL) { + if (sc->sptrlc_set_spectral_config( + pdev, + SPECTRAL_PARAM_RSSI_RPT_MODE, + sp_in->ss_rssi_rpt_mode)) + error = -EINVAL; + } + + if (sp_in->ss_rssi_thr != SPECTRAL_PHYERR_PARAM_NOVAL) { + if (sc->sptrlc_set_spectral_config( + pdev, + SPECTRAL_PARAM_RSSI_THR, + sp_in->ss_rssi_thr)) + error = -EINVAL; + } + + if (sp_in->ss_pwr_format != + SPECTRAL_PHYERR_PARAM_NOVAL) { + if (sc->sptrlc_set_spectral_config( + pdev, + SPECTRAL_PARAM_PWR_FORMAT, + sp_in->ss_pwr_format)) + error = -EINVAL; + } + + if (sp_in->ss_rpt_mode != SPECTRAL_PHYERR_PARAM_NOVAL) { + if (sc->sptrlc_set_spectral_config( + pdev, + SPECTRAL_PARAM_RPT_MODE, + sp_in->ss_rpt_mode)) + error = -EINVAL; + } + + if (sp_in->ss_bin_scale != + SPECTRAL_PHYERR_PARAM_NOVAL) { + if (sc->sptrlc_set_spectral_config( + pdev, + SPECTRAL_PARAM_BIN_SCALE, + sp_in->ss_bin_scale)) + error = -EINVAL; + } + + if (sp_in->ss_dbm_adj != SPECTRAL_PHYERR_PARAM_NOVAL) { + if (sc->sptrlc_set_spectral_config( + pdev, + SPECTRAL_PARAM_DBM_ADJ, + sp_in->ss_dbm_adj)) + error = -EINVAL; + } + + if (sp_in->ss_chn_mask != SPECTRAL_PHYERR_PARAM_NOVAL) { + /* + * Check if any of the inactive Rx antenna + * chains is set active in spectral chainmask + */ + vdev = spectral_get_vdev(pdev); + if (!vdev) { + error = -ENOENT; + break; + } + + vdev_rxchainmask = + wlan_vdev_mlme_get_rxchainmask(vdev); + wlan_objmgr_vdev_release_ref(vdev, + WLAN_SPECTRAL_ID); + + if (!(sp_in->ss_chn_mask & vdev_rxchainmask)) { + spectral_err("Invalid Spectral Chainmask - Inactive Rx antenna chain cannot be an active spectral chain"); + error = -EINVAL; + break; + } else if (sc->sptrlc_set_spectral_config( + pdev, + SPECTRAL_PARAM_CHN_MASK, + sp_in->ss_chn_mask)) { + error = -EINVAL; + } + } + } + break; + + case SPECTRAL_GET_CONFIG: + { + if (!outdata || !outsize || + (*outsize < sizeof(struct spectral_config))) { + error = -EINVAL; + break; + } + *outsize = sizeof(struct spectral_config); + sc->sptrlc_get_spectral_config(pdev, &sp_out); + spectralparams = (struct spectral_config *)outdata; + spectralparams->ss_fft_period = sp_out.ss_fft_period; + spectralparams->ss_period = sp_out.ss_period; + spectralparams->ss_count = sp_out.ss_count; + spectralparams->ss_short_report = + sp_out.ss_short_report; + spectralparams->ss_spectral_pri = + sp_out.ss_spectral_pri; + spectralparams->ss_fft_size = sp_out.ss_fft_size; + spectralparams->ss_gc_ena = sp_out.ss_gc_ena; + spectralparams->ss_restart_ena = sp_out.ss_restart_ena; + spectralparams->ss_noise_floor_ref = + sp_out.ss_noise_floor_ref; + spectralparams->ss_init_delay = sp_out.ss_init_delay; + spectralparams->ss_nb_tone_thr = sp_out.ss_nb_tone_thr; + spectralparams->ss_str_bin_thr = sp_out.ss_str_bin_thr; + spectralparams->ss_wb_rpt_mode = sp_out.ss_wb_rpt_mode; + spectralparams->ss_rssi_rpt_mode = + sp_out.ss_rssi_rpt_mode; + spectralparams->ss_rssi_thr = sp_out.ss_rssi_thr; + spectralparams->ss_pwr_format = sp_out.ss_pwr_format; + spectralparams->ss_rpt_mode = sp_out.ss_rpt_mode; + spectralparams->ss_bin_scale = sp_out.ss_bin_scale; + spectralparams->ss_dbm_adj = sp_out.ss_dbm_adj; + spectralparams->ss_chn_mask = sp_out.ss_chn_mask; + } + break; + + case SPECTRAL_IS_ACTIVE: + { + if (!outdata || !outsize || + *outsize < sizeof(uint32_t)) { + error = -EINVAL; + break; + } + *outsize = sizeof(uint32_t); + *((uint32_t *)outdata) = + (uint32_t)sc->sptrlc_is_spectral_active(pdev); + } + break; + + case SPECTRAL_IS_ENABLED: + { + if (!outdata || !outsize || + *outsize < sizeof(uint32_t)) { + error = -EINVAL; + break; + } + *outsize = sizeof(uint32_t); + *((uint32_t *)outdata) = + (uint32_t)sc->sptrlc_is_spectral_enabled(pdev); + } + break; + + case SPECTRAL_SET_DEBUG_LEVEL: + { + if (insize < sizeof(uint32_t) || !indata) { + error = -EINVAL; + break; + } + temp_debug = *(uint32_t *)indata; + sc->sptrlc_set_debug_level(pdev, temp_debug); + } + break; + + case SPECTRAL_GET_DEBUG_LEVEL: + { + if (!outdata || !outsize || + *outsize < sizeof(uint32_t)) { + error = -EINVAL; + break; + } + *outsize = sizeof(uint32_t); + *((uint32_t *)outdata) = + (uint32_t)sc->sptrlc_get_debug_level(pdev); + } + break; + + case SPECTRAL_ACTIVATE_SCAN: + { + sc->sptrlc_start_spectral_scan(pdev); + } + break; + + case SPECTRAL_STOP_SCAN: + { + sc->sptrlc_stop_spectral_scan(pdev); + } + break; + + case SPECTRAL_GET_CAPABILITY_INFO: + { + if (!outdata || !outsize || + *outsize < sizeof(struct spectral_caps)) { + error = -EINVAL; + break; + } + *outsize = sizeof(struct spectral_caps); + sc->sptrlc_get_spectral_capinfo(pdev, outdata); + } + break; + + case SPECTRAL_GET_DIAG_STATS: + { + if (!outdata || !outsize || + (*outsize < sizeof(struct spectral_diag_stats))) { + error = -EINVAL; + break; + } + *outsize = sizeof(struct spectral_diag_stats); + sc->sptrlc_get_spectral_diagstats(pdev, outdata); + } + break; + + case SPECTRAL_GET_CHAN_WIDTH: + { + uint32_t chan_width; + + vdev = spectral_get_vdev(pdev); + if (!vdev) + return -ENOENT; + + chan_width = spectral_vdev_get_ch_width(vdev); + wlan_objmgr_vdev_release_ref(vdev, WLAN_SPECTRAL_ID); + + if (!outdata || !outsize || + *outsize < sizeof(chan_width)) { + error = -EINVAL; + break; + } + *outsize = sizeof(chan_width); + *((uint32_t *)outdata) = (uint32_t)chan_width; + } + break; + + default: + error = -EINVAL; + break; + } + + bad: + return error; +} + +/** + * spectral_ctx_deinit() - De-initialize function pointers from spectral context + * @sc - Reference to spectral_context object + * + * Return: None + */ +static void +spectral_ctx_deinit(struct spectral_context *sc) +{ + if (sc) { + sc->sptrlc_ucfg_phyerr_config = NULL; + sc->sptrlc_pdev_spectral_init = NULL; + sc->sptrlc_pdev_spectral_deinit = NULL; + sc->sptrlc_set_spectral_config = NULL; + sc->sptrlc_get_spectral_config = NULL; + sc->sptrlc_start_spectral_scan = NULL; + sc->sptrlc_stop_spectral_scan = NULL; + sc->sptrlc_is_spectral_active = NULL; + sc->sptrlc_is_spectral_enabled = NULL; + sc->sptrlc_set_debug_level = NULL; + sc->sptrlc_get_debug_level = NULL; + sc->sptrlc_get_spectral_capinfo = NULL; + sc->sptrlc_get_spectral_diagstats = NULL; + } +} + +QDF_STATUS +wlan_spectral_psoc_obj_create_handler(struct wlan_objmgr_psoc *psoc, void *arg) +{ + struct spectral_context *sc = NULL; + + if (!psoc) { + spectral_err("PSOC is NULL"); + return QDF_STATUS_E_FAILURE; + } + sc = (struct spectral_context *) + qdf_mem_malloc(sizeof(struct spectral_context)); + if (!sc) { + spectral_err("Failed to allocate spectral_ctx object"); + return QDF_STATUS_E_NOMEM; + } + qdf_mem_zero(sc, sizeof(struct spectral_context)); + sc->psoc_obj = psoc; + if (wlan_objmgr_psoc_get_dev_type(psoc) == WLAN_DEV_OL) + spectral_ctx_init_ol(sc); +#ifdef CONFIG_WIN + else if (wlan_objmgr_psoc_get_dev_type(psoc) == WLAN_DEV_DA) + spectral_ctx_init_da(sc); +#endif + wlan_objmgr_psoc_component_obj_attach(psoc, WLAN_UMAC_COMP_SPECTRAL, + (void *)sc, QDF_STATUS_SUCCESS); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS +wlan_spectral_psoc_obj_destroy_handler(struct wlan_objmgr_psoc *psoc, + void *arg) +{ + struct spectral_context *sc = NULL; + + if (!psoc) { + spectral_err("PSOC is NULL"); + return QDF_STATUS_E_FAILURE; + } + sc = wlan_objmgr_psoc_get_comp_private_obj(psoc, + WLAN_UMAC_COMP_SPECTRAL); + if (sc) { + wlan_objmgr_psoc_component_obj_detach(psoc, + WLAN_UMAC_COMP_SPECTRAL, + (void *)sc); + /* Deinitilise function pointers from spectral context */ + spectral_ctx_deinit(sc); + qdf_mem_free(sc); + } + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS +wlan_spectral_pdev_obj_create_handler(struct wlan_objmgr_pdev *pdev, void *arg) +{ + struct pdev_spectral *ps = NULL; + struct spectral_context *sc = NULL; + void *target_handle = NULL; + + if (!pdev) { + spectral_err("PDEV is NULL"); + return QDF_STATUS_E_FAILURE; + } + ps = (struct pdev_spectral *) + qdf_mem_malloc(sizeof(struct pdev_spectral)); + if (!ps) { + spectral_err("Failed to allocate pdev_spectral object"); + return QDF_STATUS_E_NOMEM; + } + sc = spectral_get_spectral_ctx_from_pdev(pdev); + if (!sc) { + spectral_err("Spectral context is NULL!"); + goto cleanup; + } + + qdf_mem_zero(ps, sizeof(struct pdev_spectral)); + ps->psptrl_pdev = pdev; + + spectral_register_cfg80211_handlers(pdev); + if (sc->sptrlc_pdev_spectral_init) { + target_handle = sc->sptrlc_pdev_spectral_init(pdev); + if (!target_handle) { + spectral_err("Spectral lmac object is NULL!"); + goto cleanup; + } + ps->psptrl_target_handle = target_handle; + } + wlan_objmgr_pdev_component_obj_attach(pdev, WLAN_UMAC_COMP_SPECTRAL, + (void *)ps, QDF_STATUS_SUCCESS); + + return QDF_STATUS_SUCCESS; + cleanup: + qdf_mem_free(ps); + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wlan_spectral_pdev_obj_destroy_handler(struct wlan_objmgr_pdev *pdev, + void *arg) +{ + struct pdev_spectral *ps = NULL; + struct spectral_context *sc = NULL; + + if (!pdev) { + spectral_err("PDEV is NULL"); + return QDF_STATUS_E_FAILURE; + } + sc = spectral_get_spectral_ctx_from_pdev(pdev); + if (!sc) { + spectral_err("Spectral context is NULL!"); + return QDF_STATUS_E_FAILURE; + } + ps = wlan_objmgr_pdev_get_comp_private_obj(pdev, + WLAN_UMAC_COMP_SPECTRAL); + if (ps) { + if (sc->sptrlc_pdev_spectral_deinit) + sc->sptrlc_pdev_spectral_deinit(pdev); + ps->psptrl_target_handle = NULL; + wlan_objmgr_pdev_component_obj_detach(pdev, + WLAN_UMAC_COMP_SPECTRAL, + (void *)ps); + qdf_mem_free(ps); + } + + return QDF_STATUS_SUCCESS; +} diff --git a/drivers/staging/qca-wifi-host-cmn/spectral/core/spectral_da_api_i.h b/drivers/staging/qca-wifi-host-cmn/spectral/core/spectral_da_api_i.h new file mode 100644 index 0000000000000000000000000000000000000000..5cc48b02490c46a43bcbf4ad93c089708f1552c4 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/spectral/core/spectral_da_api_i.h @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _SPECTRAL_DA_API_I_H_ +#define _SPECTRAL_DA_API_I_H_ + +#include "spectral_defs_i.h" + +/** + * spectral_ctx_init_da() - Internal function to initialize spectral context + * with direct attach specific functions + * @sc : spectral context + * + * Internal function to initialize spectral context with direct attach + * specific functions + * + * Return : None + */ +void spectral_ctx_init_da(struct spectral_context *sc); + +#endif /* _SPECTRAL_DA_API_I_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/spectral/core/spectral_defs_i.h b/drivers/staging/qca-wifi-host-cmn/spectral/core/spectral_defs_i.h new file mode 100644 index 0000000000000000000000000000000000000000..aa92ef886c8e0ed43c61e7a379e4916d4d07b0d8 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/spectral/core/spectral_defs_i.h @@ -0,0 +1,139 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _SPECTRAL_DEFS_I_H_ +#define _SPECTRAL_DEFS_I_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef CONFIG_WIN +#include +#endif /*CONFIG_WIN*/ +#include + +#define spectral_log(level, args...) \ +QDF_PRINT_INFO(QDF_PRINT_IDX_SHARED, QDF_MODULE_ID_SPECTRAL, level, ## args) + +#define spectral_logfl(level, format, args...) \ + spectral_log(level, FL(format), ## args) + +#define spectral_fatal(format, args...) \ + spectral_logfl(QDF_TRACE_LEVEL_FATAL, format, ## args) +#define spectral_err(format, args...) \ + spectral_logfl(QDF_TRACE_LEVEL_ERROR, format, ## args) +#define spectral_warn(format, args...) \ + spectral_logfl(QDF_TRACE_LEVEL_WARN, format, ## args) +#define spectral_info(format, args...) \ + spectral_logfl(QDF_TRACE_LEVEL_INFO, format, ## args) +#define spectral_debug(format, args...) \ + spectral_logfl(QDF_TRACE_LEVEL_DEBUG, format, ## args) + +/** + * struct pdev_spectral - Radio specific spectral object + * @psptrl_pdev: Back-pointer to struct wlan_objmgr_pdev + * @spectral_sock: Spectral Netlink socket for sending samples to + * applications + * @psptrl_target_handle: reference to spectral lmac object + * @skb: Socket buffer for sending samples to applications + * @spectral_pid : Spectral port ID + */ +struct pdev_spectral { + struct wlan_objmgr_pdev *psptrl_pdev; + struct sock *spectral_sock; + void *psptrl_target_handle; + struct sk_buff *skb; + uint32_t spectral_pid; +}; + +struct wmi_spectral_cmd_ops; +/** + * struct spectral_context - spectral global context + * @psoc_obj: Reference to psoc global object + * @spectral_legacy_cbacks: Spectral legacy callbacks + * + * Call back functions to invoke independent of OL/DA + * @sptrlc_ucfg_phyerr_config: ucfg handler for phyerr + * @sptrlc_pdev_spectral_init: Init spectral + * @sptrlc_pdev_spectral_deinit: Deinit spectral + * @sptrlc_set_spectral_config: Set spectral configurations + * @sptrlc_get_spectral_config: Get spectral configurations + * @sptrlc_start_spectral_scan: Start spectral scan + * @sptrlc_stop_spectral_scan: Stop spectral scan + * @sptrlc_is_spectral_active: Check if spectral scan is active + * @sptrlc_is_spectral_enabled: Check if spectral is enabled + * @sptrlc_set_debug_level: Set debug level + * @sptrlc_get_debug_level: Get debug level + * @sptrlc_get_spectral_capinfo: Get spectral capability info + * @sptrlc_get_spectral_diagstats: Get spectral diag status + * @sptrlc_register_wmi_spectral_cmd_ops: Register wmi_spectral_cmd operations + * @sptrlc_register_netlink_cb: Register Netlink callbacks + * @sptrlc_use_nl_bcast: Check whether to use Netlink broadcast/unicast + * @sptrlc_deregister_netlink_cb: De-register Netlink callbacks + * @sptrlc_process_spectral_report: Process spectral report + */ +struct spectral_context { + struct wlan_objmgr_psoc *psoc_obj; + struct spectral_legacy_cbacks legacy_cbacks; + int (*sptrlc_spectral_control)(struct wlan_objmgr_pdev *pdev, + u_int id, void *indata, + uint32_t insize, void *outdata, + uint32_t *outsize); + int (*sptrlc_ucfg_phyerr_config)(struct wlan_objmgr_pdev *pdev, + void *ad); + void * (*sptrlc_pdev_spectral_init)(struct wlan_objmgr_pdev *pdev); + void (*sptrlc_pdev_spectral_deinit)(struct wlan_objmgr_pdev *pdev); + int (*sptrlc_set_spectral_config)(struct wlan_objmgr_pdev *pdev, + const uint32_t threshtype, + const uint32_t value); + void (*sptrlc_get_spectral_config)( + struct wlan_objmgr_pdev *pdev, + struct spectral_config *sptrl_config); + int (*sptrlc_start_spectral_scan)(struct wlan_objmgr_pdev *pdev); + void (*sptrlc_stop_spectral_scan)(struct wlan_objmgr_pdev *pdev); + bool (*sptrlc_is_spectral_active)(struct wlan_objmgr_pdev *pdev); + bool (*sptrlc_is_spectral_enabled)(struct wlan_objmgr_pdev *pdev); + int (*sptrlc_set_debug_level)(struct wlan_objmgr_pdev *pdev, + uint32_t debug_level); + uint32_t (*sptrlc_get_debug_level)(struct wlan_objmgr_pdev *pdev); + void (*sptrlc_get_spectral_capinfo)(struct wlan_objmgr_pdev *pdev, + void *outdata); + void (*sptrlc_get_spectral_diagstats)(struct wlan_objmgr_pdev *pdev, + void *outdata); + void (*sptrlc_register_wmi_spectral_cmd_ops)( + struct wlan_objmgr_pdev *pdev, + struct wmi_spectral_cmd_ops *cmd_ops); + void (*sptrlc_register_netlink_cb)( + struct wlan_objmgr_pdev *pdev, + struct spectral_nl_cb *nl_cb); + bool (*sptrlc_use_nl_bcast)(struct wlan_objmgr_pdev *pdev); + void (*sptrlc_deregister_netlink_cb)(struct wlan_objmgr_pdev *pdev); + int (*sptrlc_process_spectral_report)( + struct wlan_objmgr_pdev *pdev, + void *payload); +}; + +#endif /* _SPECTRAL_DEFS_I_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/spectral/core/spectral_direct_attach.c b/drivers/staging/qca-wifi-host-cmn/spectral/core/spectral_direct_attach.c new file mode 100644 index 0000000000000000000000000000000000000000..9df316ecc8cd2e34e93b3078f0e93e0d4bf834b6 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/spectral/core/spectral_direct_attach.c @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2011,2017-2018 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "spectral_cmn_api_i.h" +#include "../dispatcher/inc/wlan_spectral_tgt_api.h" + +void +spectral_ctx_init_da(struct spectral_context *sc) +{ + if (!sc) { + spectral_err("spectral context is null!"); + return; + } + sc->sptrlc_spectral_control = tgt_spectral_control; + sc->sptrlc_pdev_spectral_init = tgt_pdev_spectral_init; + sc->sptrlc_pdev_spectral_deinit = tgt_pdev_spectral_deinit; + sc->sptrlc_set_spectral_config = tgt_set_spectral_config; + sc->sptrlc_get_spectral_config = tgt_get_spectral_config; + sc->sptrlc_start_spectral_scan = tgt_start_spectral_scan; + sc->sptrlc_stop_spectral_scan = tgt_stop_spectral_scan; + sc->sptrlc_is_spectral_active = tgt_is_spectral_active; + sc->sptrlc_is_spectral_enabled = tgt_is_spectral_enabled; + sc->sptrlc_set_debug_level = tgt_set_debug_level; + sc->sptrlc_get_debug_level = tgt_get_debug_level; + sc->sptrlc_get_spectral_capinfo = tgt_get_spectral_capinfo; + sc->sptrlc_get_spectral_diagstats = tgt_get_spectral_diagstats; + sc->sptrlc_register_wmi_spectral_cmd_ops = + tgt_register_wmi_spectral_cmd_ops; + sc->sptrlc_register_netlink_cb = tgt_spectral_register_nl_cb; + sc->sptrlc_use_nl_bcast = tgt_spectral_use_nl_bcast; + sc->sptrlc_deregister_netlink_cb = tgt_spectral_deregister_nl_cb; + sc->sptrlc_process_spectral_report = tgt_spectral_process_report; +} diff --git a/drivers/staging/qca-wifi-host-cmn/spectral/core/spectral_module.c b/drivers/staging/qca-wifi-host-cmn/spectral/core/spectral_module.c new file mode 100644 index 0000000000000000000000000000000000000000..0f946e7e89c0b347a788a340fe1e75df584f5dc3 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/spectral/core/spectral_module.c @@ -0,0 +1,62 @@ +/* + * Copyright (c) 2011,2017-2018 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include +#include +#include +#include "spectral_defs_i.h" +#include + +MODULE_LICENSE("Dual BSD/GPL"); + +/** + * spectral_init_module() - Initialize Spectral module + * + * Return: None + */ +static int __init +spectral_init_module(void) +{ + spectral_info("qca_spectral module loaded"); + wlan_spectral_init(); + /* register spectral rxops */ + wlan_lmac_if_sptrl_set_rx_ops_register_cb + (wlan_lmac_if_sptrl_register_rx_ops); + /* register spectral pdev open handler */ + dispatcher_register_spectral_pdev_open_handler( + spectral_pdev_open); + + return 0; +} + +/** + * spectral_exit_module() - De-initialize and exit Spectral module + * + * Return: None + */ +static void __exit +spectral_exit_module(void) +{ + wlan_spectral_deinit(); + spectral_info("qca_spectral module unloaded"); +} + +module_init(spectral_init_module); +module_exit(spectral_exit_module); diff --git a/drivers/staging/qca-wifi-host-cmn/spectral/core/spectral_offload.c b/drivers/staging/qca-wifi-host-cmn/spectral/core/spectral_offload.c new file mode 100644 index 0000000000000000000000000000000000000000..b07c7a7fc20f2dc9b87c18a44abcfc6328e4193e --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/spectral/core/spectral_offload.c @@ -0,0 +1,50 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "spectral_cmn_api_i.h" +#include "spectral_ol_api_i.h" +#include "../dispatcher/inc/wlan_spectral_tgt_api.h" + +void +spectral_ctx_init_ol(struct spectral_context *sc) +{ + if (!sc) { + spectral_err("spectral context is null!"); + return; + } + sc->sptrlc_spectral_control = tgt_spectral_control; + sc->sptrlc_pdev_spectral_init = tgt_pdev_spectral_init; + sc->sptrlc_pdev_spectral_deinit = tgt_pdev_spectral_deinit; + sc->sptrlc_set_spectral_config = tgt_set_spectral_config; + sc->sptrlc_get_spectral_config = tgt_get_spectral_config; + sc->sptrlc_start_spectral_scan = tgt_start_spectral_scan; + sc->sptrlc_stop_spectral_scan = tgt_stop_spectral_scan; + sc->sptrlc_is_spectral_active = tgt_is_spectral_active; + sc->sptrlc_is_spectral_enabled = tgt_is_spectral_enabled; + sc->sptrlc_set_debug_level = tgt_set_debug_level; + sc->sptrlc_get_debug_level = tgt_get_debug_level; + sc->sptrlc_get_spectral_capinfo = tgt_get_spectral_capinfo; + sc->sptrlc_get_spectral_diagstats = tgt_get_spectral_diagstats; + sc->sptrlc_register_wmi_spectral_cmd_ops = + tgt_register_wmi_spectral_cmd_ops; + sc->sptrlc_register_netlink_cb = tgt_spectral_register_nl_cb; + sc->sptrlc_use_nl_bcast = tgt_spectral_use_nl_bcast; + sc->sptrlc_deregister_netlink_cb = tgt_spectral_deregister_nl_cb; + sc->sptrlc_process_spectral_report = tgt_spectral_process_report; +} diff --git a/drivers/staging/qca-wifi-host-cmn/spectral/core/spectral_ol_api_i.h b/drivers/staging/qca-wifi-host-cmn/spectral/core/spectral_ol_api_i.h new file mode 100644 index 0000000000000000000000000000000000000000..0ac69348b2ecd6a758e2cee6de600f921e086a57 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/spectral/core/spectral_ol_api_i.h @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _SPECTRAL_OL_API_I_H_ +#define _SPECTRAL_OL_API_I_H_ + +#include "spectral_defs_i.h" + +/** + * spectral_ctx_init_ol() - Internal function to initialize spectral context + * with offload specific functions + * @sc : spectral context + * + * Internal function to initialize spectral context with offload specific + * functions + * + * Return : None + */ +void spectral_ctx_init_ol(struct spectral_context *sc); + +#endif /* _SPECTRAL_OL_API_I_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/spectral/dispatcher/inc/spectral_ioctl.h b/drivers/staging/qca-wifi-host-cmn/spectral/dispatcher/inc/spectral_ioctl.h new file mode 100644 index 0000000000000000000000000000000000000000..19c58b2407a540a1cbcf31dcc6e0c7ceaac6bcf1 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/spectral/dispatcher/inc/spectral_ioctl.h @@ -0,0 +1,97 @@ +/* + * Copyright (c) 2011, 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _SPECTRAL_IOCTL_H_ +#define _SPECTRAL_IOCTL_H_ + +#include + +/* + * ioctl defines + */ + +#define SPECTRAL_SET_CONFIG (DFS_LAST_IOCTL + 1) +#define SPECTRAL_GET_CONFIG (DFS_LAST_IOCTL + 2) +#define SPECTRAL_SHOW_INTERFERENCE (DFS_LAST_IOCTL + 3) +#define SPECTRAL_ENABLE_SCAN (DFS_LAST_IOCTL + 4) +#define SPECTRAL_DISABLE_SCAN (DFS_LAST_IOCTL + 5) +#define SPECTRAL_ACTIVATE_SCAN (DFS_LAST_IOCTL + 6) +#define SPECTRAL_STOP_SCAN (DFS_LAST_IOCTL + 7) +#define SPECTRAL_SET_DEBUG_LEVEL (DFS_LAST_IOCTL + 8) +#define SPECTRAL_IS_ACTIVE (DFS_LAST_IOCTL + 9) +#define SPECTRAL_IS_ENABLED (DFS_LAST_IOCTL + 10) +#define SPECTRAL_CLASSIFY_SCAN (DFS_LAST_IOCTL + 11) +#define SPECTRAL_GET_CLASSIFIER_CONFIG (DFS_LAST_IOCTL + 12) +#define SPECTRAL_EACS (DFS_LAST_IOCTL + 13) +#define SPECTRAL_ACTIVATE_FULL_SCAN (DFS_LAST_IOCTL + 14) +#define SPECTRAL_STOP_FULL_SCAN (DFS_LAST_IOCTL + 15) +#define SPECTRAL_GET_CAPABILITY_INFO (DFS_LAST_IOCTL + 16) +#define SPECTRAL_GET_DIAG_STATS (DFS_LAST_IOCTL + 17) +#define SPECTRAL_GET_CHAN_WIDTH (DFS_LAST_IOCTL + 18) +#define SPECTRAL_GET_CHANINFO (DFS_LAST_IOCTL + 19) +#define SPECTRAL_CLEAR_CHANINFO (DFS_LAST_IOCTL + 20) +#define SPECTRAL_SET_ICM_ACTIVE (DFS_LAST_IOCTL + 21) +#define SPECTRAL_GET_NOMINAL_NOISEFLOOR (DFS_LAST_IOCTL + 22) +#define SPECTRAL_GET_DEBUG_LEVEL (DFS_LAST_IOCTL + 23) + +/* + * ioctl parameter types + */ + +#define SPECTRAL_PARAM_FFT_PERIOD (1) +#define SPECTRAL_PARAM_SCAN_PERIOD (2) +#define SPECTRAL_PARAM_SCAN_COUNT (3) +#define SPECTRAL_PARAM_SHORT_REPORT (4) +#define SPECTRAL_PARAM_SPECT_PRI (5) +#define SPECTRAL_PARAM_FFT_SIZE (6) +#define SPECTRAL_PARAM_GC_ENA (7) +#define SPECTRAL_PARAM_RESTART_ENA (8) +#define SPECTRAL_PARAM_NOISE_FLOOR_REF (9) +#define SPECTRAL_PARAM_INIT_DELAY (10) +#define SPECTRAL_PARAM_NB_TONE_THR (11) +#define SPECTRAL_PARAM_STR_BIN_THR (12) +#define SPECTRAL_PARAM_WB_RPT_MODE (13) +#define SPECTRAL_PARAM_RSSI_RPT_MODE (14) +#define SPECTRAL_PARAM_RSSI_THR (15) +#define SPECTRAL_PARAM_PWR_FORMAT (16) +#define SPECTRAL_PARAM_RPT_MODE (17) +#define SPECTRAL_PARAM_BIN_SCALE (18) +#define SPECTRAL_PARAM_DBM_ADJ (19) +#define SPECTRAL_PARAM_CHN_MASK (20) +#define SPECTRAL_PARAM_ACTIVE (21) +#define SPECTRAL_PARAM_STOP (22) +#define SPECTRAL_PARAM_ENABLE (23) + +struct spectral_ioctl_params { + int16_t spectral_fft_period; + int16_t pectral_period; + int16_t spectral_count; + uint16_t spectral_short_report; + uint16_t spectral_pri; +}; + +struct ath_spectral_caps { + uint8_t phydiag_cap; + uint8_t radar_cap; + uint8_t spectral_cap; + uint8_t advncd_spectral_cap; +}; + +#define SPECTRAL_IOCTL_PARAM_NOVAL (65535) + +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/spectral/dispatcher/inc/wlan_spectral_public_structs.h b/drivers/staging/qca-wifi-host-cmn/spectral/dispatcher/inc/wlan_spectral_public_structs.h new file mode 100644 index 0000000000000000000000000000000000000000..3217811bb6d8761af5b98148f8f7ce7b33b5b513 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/spectral/dispatcher/inc/wlan_spectral_public_structs.h @@ -0,0 +1,530 @@ +/* + * Copyright (c) 2011,2017-2018 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include "wlan_dfs_ioctl.h" + +#ifndef _WLAN_SPECTRAL_PUBLIC_STRUCTS_H_ +#define _WLAN_SPECTRAL_PUBLIC_STRUCTS_H_ + +#ifdef WIN32 +#pragma pack(push, spectral, 1) +#define __ATTRIB_PACKED +#else +#ifndef __ATTRIB_PACKED +#define __ATTRIB_PACKED __attribute__ ((packed)) +#endif +#endif + +#ifndef AH_MAX_CHAINS +#define AH_MAX_CHAINS 3 +#endif + +#define MAX_NUM_CHANNELS 255 +#define MAX_SPECTRAL_CHAINS 3 +#define MAX_NUM_BINS 520 +#define SPECTRAL_PHYERR_PARAM_NOVAL 65535 +/* 5 categories x (lower + upper) bands */ +#define MAX_INTERF 10 + +/* ioctl parameter types */ +#define SPECTRAL_PARAM_FFT_PERIOD (1) +#define SPECTRAL_PARAM_SCAN_PERIOD (2) +#define SPECTRAL_PARAM_SCAN_COUNT (3) +#define SPECTRAL_PARAM_SHORT_REPORT (4) +#define SPECTRAL_PARAM_SPECT_PRI (5) +#define SPECTRAL_PARAM_FFT_SIZE (6) +#define SPECTRAL_PARAM_GC_ENA (7) +#define SPECTRAL_PARAM_RESTART_ENA (8) +#define SPECTRAL_PARAM_NOISE_FLOOR_REF (9) +#define SPECTRAL_PARAM_INIT_DELAY (10) +#define SPECTRAL_PARAM_NB_TONE_THR (11) +#define SPECTRAL_PARAM_STR_BIN_THR (12) +#define SPECTRAL_PARAM_WB_RPT_MODE (13) +#define SPECTRAL_PARAM_RSSI_RPT_MODE (14) +#define SPECTRAL_PARAM_RSSI_THR (15) +#define SPECTRAL_PARAM_PWR_FORMAT (16) +#define SPECTRAL_PARAM_RPT_MODE (17) +#define SPECTRAL_PARAM_BIN_SCALE (18) +#define SPECTRAL_PARAM_DBM_ADJ (19) +#define SPECTRAL_PARAM_CHN_MASK (20) +#define SPECTRAL_PARAM_ACTIVE (21) +#define SPECTRAL_PARAM_STOP (22) +#define SPECTRAL_PARAM_ENABLE (23) + +#ifdef SPECTRAL_USE_EMU_DEFAULTS +/* Use defaults from emulation */ +#define SPECTRAL_SCAN_ACTIVE_DEFAULT (0x0) +#define SPECTRAL_SCAN_ENABLE_DEFAULT (0x0) +#define SPECTRAL_SCAN_COUNT_DEFAULT (0x0) +#define SPECTRAL_SCAN_PERIOD_DEFAULT (250) +#define SPECTRAL_SCAN_PRIORITY_DEFAULT (0x1) +#define SPECTRAL_SCAN_FFT_SIZE_DEFAULT (0x7) +#define SPECTRAL_SCAN_GC_ENA_DEFAULT (0x1) +#define SPECTRAL_SCAN_RESTART_ENA_DEFAULT (0x0) +#define SPECTRAL_SCAN_NOISE_FLOOR_REF_DEFAULT (0xa0) +#define SPECTRAL_SCAN_INIT_DELAY_DEFAULT (0x50) +#define SPECTRAL_SCAN_NB_TONE_THR_DEFAULT (0xc) +#define SPECTRAL_SCAN_STR_BIN_THR_DEFAULT (0x7) +#define SPECTRAL_SCAN_WB_RPT_MODE_DEFAULT (0x0) +#define SPECTRAL_SCAN_RSSI_RPT_MODE_DEFAULT (0x1) +#define SPECTRAL_SCAN_RSSI_THR_DEFAULT (0xf) +#define SPECTRAL_SCAN_PWR_FORMAT_DEFAULT (0x1) +#define SPECTRAL_SCAN_RPT_MODE_DEFAULT (0x2) +#define SPECTRAL_SCAN_BIN_SCALE_DEFAULT (0x1) +#define SPECTRAL_SCAN_DBM_ADJ_DEFAULT (0x0) +#define SPECTRAL_SCAN_CHN_MASK_DEFAULT (0x1) +#else +/* + * Static default values for spectral state and configuration. + * These definitions should be treated as temporary. Ideally, + * we should get the defaults from firmware - this will be discussed. + * + * Use defaults from Spectral Hardware Micro-Architecture + * document (v1.0) + */ +#define SPECTRAL_SCAN_ACTIVE_DEFAULT (0) +#define SPECTRAL_SCAN_ENABLE_DEFAULT (0) +#define SPECTRAL_SCAN_COUNT_DEFAULT (0) +#define SPECTRAL_SCAN_PERIOD_GEN_I_DEFAULT (35) +#define SPECTRAL_SCAN_PERIOD_GEN_II_DEFAULT (35) +#define SPECTRAL_SCAN_PERIOD_GEN_III_DEFAULT (224) +#define SPECTRAL_SCAN_PRIORITY_DEFAULT (1) +#define SPECTRAL_SCAN_FFT_SIZE_DEFAULT (7) +#define SPECTRAL_SCAN_GC_ENA_DEFAULT (1) +#define SPECTRAL_SCAN_RESTART_ENA_DEFAULT (0) +#define SPECTRAL_SCAN_NOISE_FLOOR_REF_DEFAULT (-96) +#define SPECTRAL_SCAN_INIT_DELAY_DEFAULT (80) +#define SPECTRAL_SCAN_NB_TONE_THR_DEFAULT (12) +#define SPECTRAL_SCAN_STR_BIN_THR_DEFAULT (8) +#define SPECTRAL_SCAN_WB_RPT_MODE_DEFAULT (0) +#define SPECTRAL_SCAN_RSSI_RPT_MODE_DEFAULT (0) +#define SPECTRAL_SCAN_RSSI_THR_DEFAULT (0xf0) +#define SPECTRAL_SCAN_PWR_FORMAT_DEFAULT (0) +#define SPECTRAL_SCAN_RPT_MODE_DEFAULT (2) +#define SPECTRAL_SCAN_BIN_SCALE_DEFAULT (1) +#define SPECTRAL_SCAN_DBM_ADJ_DEFAULT (1) +#define SPECTRAL_SCAN_CHN_MASK_DEFAULT (1) +#endif /* SPECTRAL_USE_EMU_DEFAULTS */ + +/* The below two definitions apply only to pre-11ac chipsets */ +#define SPECTRAL_SCAN_SHORT_REPORT_DEFAULT (1) +#define SPECTRAL_SCAN_FFT_PERIOD_DEFAULT (1) + +/** + * enum wlan_cfg80211_spectral_vendorcmd_handler_idx - Indices to cfg80211 + * spectral vendor command handlers + * @SPECTRAL_SCAN_START_HANDLER_IDX: Index to SPECTRAL_SCAN_START handler + * @SPECTRAL_SCAN_STOP_HANDLER_IDX: Index to SPECTRAL_SCAN_STOP handler + * @SPECTRAL_SCAN_GET_CONFIG_HANDLER_IDX: Index to SPECTRAL_SCAN_GET_CONFIG + * handler + * @SPECTRAL_SCAN_GET_DIAG_STATS_HANDLER_IDX: Index to + * SPECTRAL_SCAN_GET_DIAG_STATS handler + * @SPECTRAL_SCAN_GET_CAP_HANDLER_IDX: Index to SPECTRAL_SCAN_GET_CAP handler + * @SPECTRAL_SCAN_GET_STATUS_HANDLER_IDX: Index to SPECTRAL_SCAN_GET_STATUS + * handler + * @SPECTRAL_SCAN_VENDOR_CMD_HANDLER_MAX: Number of cfg80211 spectral + * vendor command handlers supported + */ +enum wlan_cfg80211_spectral_vendorcmd_handler_idx { + SPECTRAL_SCAN_START_HANDLER_IDX, + SPECTRAL_SCAN_STOP_HANDLER_IDX, + SPECTRAL_SCAN_GET_CONFIG_HANDLER_IDX, + SPECTRAL_SCAN_GET_DIAG_STATS_HANDLER_IDX, + SPECTRAL_SCAN_GET_CAP_HANDLER_IDX, + SPECTRAL_SCAN_GET_STATUS_HANDLER_IDX, + SPECTRAL_SCAN_VENDOR_CMD_HANDLER_MAX, +}; + +/** + * enum spectral_debug - Spectral debug level + * @DEBUG_SPECTRAL: Minimal SPECTRAL debug + * @DEBUG_SPECTRAL1: Normal SPECTRAL debug + * @DEBUG_SPECTRAL2: Maximal SPECTRAL debug + * @DEBUG_SPECTRAL3: Matched filterID display + * @DEBUG_SPECTRAL4: One time dump of FFT report + */ +enum spectral_debug { + DEBUG_SPECTRAL = 0x00000100, + DEBUG_SPECTRAL1 = 0x00000200, + DEBUG_SPECTRAL2 = 0x00000400, + DEBUG_SPECTRAL3 = 0x00000800, + DEBUG_SPECTRAL4 = 0x00001000, +}; + +/** + * enum spectral_capability_type - Spectral capability type + * @SPECTRAL_CAP_PHYDIAG: Phydiag capability + * @SPECTRAL_CAP_RADAR: Radar detection capability + * @SPECTRAL_CAP_SPECTRAL_SCAN: Spectral capability + * @SPECTRAL_CAP_ADVNCD_SPECTRAL_SCAN: Advanced spectral capability + */ +enum spectral_capability_type { + SPECTRAL_CAP_PHYDIAG, + SPECTRAL_CAP_RADAR, + SPECTRAL_CAP_SPECTRAL_SCAN, + SPECTRAL_CAP_ADVNCD_SPECTRAL_SCAN, +}; + +/** + * struct spectral_chan_stats - channel status info + * @cycle_count: Cycle count + * @channel_load: Channel load + * @per: Period + * @noisefloor: Noise floor + * @comp_usablity: Computed usability + * @maxregpower: Maximum allowed regulatory power + * @comp_usablity_sec80: Computed usability of secondary 80 Mhz + * @maxregpower_sec80: Max regulatory power of secondary 80 Mhz + */ +struct spectral_chan_stats { + int cycle_count; + int channel_load; + int per; + int noisefloor; + uint16_t comp_usablity; + int8_t maxregpower; + uint16_t comp_usablity_sec80; + int8_t maxregpower_sec80; +}; + +/** + * struct spectral_diag_stats - spectral diag stats + * @spectral_mismatch: Spectral TLV signature mismatches + * @spectral_sec80_sfft_insufflen: Insufficient length when parsing for + * Secondary 80 Search FFT report + * @spectral_no_sec80_sfft: Secondary 80 Search FFT report + * TLV not found + * @spectral_vhtseg1id_mismatch: VHT Operation Segment 1 ID + * mismatches in Search FFT report + * @spectral_vhtseg2id_mismatch: VHT Operation Segment 2 ID + * mismatches in Search FFT report + */ +struct spectral_diag_stats { + uint64_t spectral_mismatch; + uint64_t spectral_sec80_sfft_insufflen; + uint64_t spectral_no_sec80_sfft; + uint64_t spectral_vhtseg1id_mismatch; + uint64_t spectral_vhtseg2id_mismatch; +}; + +/** + * struct spectral_caps - Spectral capabilities structure + * @phydiag_cap: Phydiag capability + * @radar_cap: Radar detection capability + * @spectral_cap: Spectral capability + * @advncd_spectral_cap: Advanced spectral capability + * @hw_gen: Spectral hw generation + */ +struct spectral_caps { + uint8_t phydiag_cap; + uint8_t radar_cap; + uint8_t spectral_cap; + uint8_t advncd_spectral_cap; + uint32_t hw_gen; +}; + +/** + * struct spectral_config - spectral config parameters + * @ss_fft_period: Skip interval for FFT reports + * @ss_period: Spectral scan period + * @ss_count: # of reports to return from ss_active + * @ss_short_report: Set to report only 1 set of FFT results + * @radar_bin_thresh_sel: Select threshold to classify strong bin for FFT + * @ss_spectral_pri: Priority, and are we doing a noise power cal ? + * @ss_fft_size: Defines the number of FFT data points to compute, + * defined as a log index num_fft_pts = + * 2^ss_fft_size + * @ss_gc_ena: Set, to enable targeted gain change before + * starting the spectral scan FFT + * @ss_restart_ena: Set, to enable abort of receive frames when in high + * priority and a spectral scan is queued + * @ss_noise_floor_ref: Noise floor reference number (signed) for the + * calculation of bin power (dBm) Though stored as an + * unsigned this should be treated as a signed 8-bit int. + * @ss_init_delay: Disallow spectral scan triggers after tx/rx packets + * by setting this delay value to roughly SIFS time + * period or greater Delay timer count in units of 0.25us + * @ss_nb_tone_thr: Number of strong bins (inclusive) per sub-channel, + * below which a signal is declared a narrowband tone + * @ss_str_bin_thr: Bin/max_bin ratio threshold over which a bin is + * declared strong (for spectral scan bandwidth analysis) + * @ss_wb_rpt_mode: Set this bit to report spectral scans as EXT_BLOCKER + * (phy_error=36), if none of the sub-channels are + * deemed narrowband + * @ss_rssi_rpt_mode: Set this bit to report spectral scans as EXT_BLOCKER + * (phy_error=36), if the ADC RSSI is below the + * threshold ss_rssi_thr + * @ss_rssi_thr: ADC RSSI must be greater than or equal to this + * threshold (signed Db) to ensure spectral scan + * reporting with normal phy error codes (please see + * ss_rssi_rpt_mode above).Though stored as an unsigned + * value, this should be treated as a signed 8-bit int + * @ss_pwr_format: Format of frequency bin magnitude for spectral scan + * triggered FFTs 0: linear magnitude + * 1: log magnitude (20*log10(lin_mag), 1/2 dB step size) + * @ss_rpt_mode: Format of per-FFT reports to software for spectral + * scan triggered FFTs + * 0: No FFT report (only pulse end summary) + * 1: 2-dword summary of metrics for each completed FFT + * 2: 2-dword summary + 1x-oversampled bins(in-band) per + * FFT + * 3: 2-dword summary + 2x-oversampled bins (all) per FFT + * @ss_bin_scale: Number of LSBs to shift out to scale the FFT bins + * for spectral scan triggered FFTs + * @ss_dbm_adj: Set (with ss_pwr_format=1), to report bin + * magnitudes + * converted to dBm power using the noisefloor + * calibration results + * @ss_chn_mask: Per chain enable mask to select input ADC for search + * FFT + * @ss_nf_cal: nf calibrated values for ctl+ext + * @ss_nf_pwr: nf pwr values for ctl+ext + * @ss_nf_temp_data: temperature data taken during nf scan + */ +struct spectral_config { + uint16_t ss_fft_period; + uint16_t ss_period; + uint16_t ss_count; + uint16_t ss_short_report; + uint8_t radar_bin_thresh_sel; + uint16_t ss_spectral_pri; + uint16_t ss_fft_size; + uint16_t ss_gc_ena; + uint16_t ss_restart_ena; + uint16_t ss_noise_floor_ref; + uint16_t ss_init_delay; + uint16_t ss_nb_tone_thr; + uint16_t ss_str_bin_thr; + uint16_t ss_wb_rpt_mode; + uint16_t ss_rssi_rpt_mode; + uint16_t ss_rssi_thr; + uint16_t ss_pwr_format; + uint16_t ss_rpt_mode; + uint16_t ss_bin_scale; + uint16_t ss_dbm_adj; + uint16_t ss_chn_mask; + int8_t ss_nf_cal[AH_MAX_CHAINS * 2]; + int8_t ss_nf_pwr[AH_MAX_CHAINS * 2]; + int32_t ss_nf_temp_data; +}; + +/** + * struct spectral_scan_state - State of spectral scan + * @is_active: Is spectral scan active + * @is_enabled: Is spectral scan enabled + */ +struct spectral_scan_state { + uint8_t is_active; + uint8_t is_enabled; +}; + +/** + * enum dcs_int_type - Interference type indicated by DCS + * @SPECTRAL_DCS_INT_NONE: No interference + * @SPECTRAL_DCS_INT_CW: CW interference + * @SPECTRAL_DCS_INT_WIFI: WLAN interference + */ +enum dcs_int_type { + SPECTRAL_DCS_INT_NONE, + SPECTRAL_DCS_INT_CW, + SPECTRAL_DCS_INT_WIFI +}; + +/** + * struct interf_rsp - Interference record + * @interf_type: eINTERF_TYPE giving type of interference + * @interf_min_freq: Minimum frequency in MHz at which interference has been + * found + * @interf_max_freq: Maximum frequency in MHz at which interference has been + * found + * @advncd_spectral_cap: Advanced spectral capability + */ +struct interf_rsp { + uint8_t interf_type; + uint16_t interf_min_freq; + uint16_t interf_max_freq; +} __ATTRIB_PACKED; + +/** + * struct interf_src_rsp - List of interference sources + * @count: Number of interference records + * @interf: Array of interference records + */ +struct interf_src_rsp { + uint16_t count; + struct interf_rsp interf[MAX_INTERF]; +} __ATTRIB_PACKED; + +/** + * struct spectral_classifier_params - spectral classifier parameters + * @spectral_20_40_mode: Is AP in 20/40 mode? + * @spectral_dc_index: DC index + * @spectral_dc_in_mhz: DC in MHz + * @upper_chan_in_mhz: Upper channel in MHz + * @lower_chan_in_mhz: Lower channel in MHz + */ +struct spectral_classifier_params { + int spectral_20_40_mode; + int spectral_dc_index; + int spectral_dc_in_mhz; + int upper_chan_in_mhz; + int lower_chan_in_mhz; +} __ATTRIB_PACKED; + +/** + * struct spectral_samp_data - Spectral Analysis Messaging Protocol Data format + * @spectral_data_len: Indicates the bin size + * @spectral_data_len_sec80: Indicates the bin size for secondary 80 segment + * @spectral_rssi: Indicates RSSI + * @spectral_rssi_sec80: Indicates RSSI for secondary 80 segment + * @spectral_combined_rssi: Indicates combined RSSI from all antennas + * @spectral_upper_rssi: Indicates RSSI of upper band + * @spectral_lower_rssi: Indicates RSSI of lower band + * @spectral_chain_ctl_rssi: RSSI for control channel, for all antennas + * @spectral_chain_ext_rssi: RSSI for extension channel, for all antennas + * @spectral_max_scale: Indicates scale factor + * @spectral_bwinfo: Indicates bandwidth info + * @spectral_tstamp: Indicates timestamp + * @spectral_max_index: Indicates the index of max magnitude + * @spectral_max_index_sec80: Indicates the index of max magnitude for secondary + * 80 segment + * @spectral_max_mag: Indicates the maximum magnitude + * @spectral_max_mag_sec80: Indicates the maximum magnitude for secondary 80 + * segment + * @spectral_max_exp: Indicates the max exp + * @spectral_last_tstamp: Indicates the last time stamp + * @spectral_upper_max_index: Indicates the index of max mag in upper band + * @spectral_lower_max_index: Indicates the index of max mag in lower band + * @spectral_nb_upper: Not Used + * @spectral_nb_lower: Not Used + * @classifier_params: Indicates classifier parameters + * @bin_pwr_count: Indicates the number of FFT bins + * @lb_edge_extrabins: Number of extra bins on left band edge + * @rb_edge_extrabins: Number of extra bins on right band edge + * @bin_pwr_count_sec80: Indicates the number of FFT bins in secondary 80 + * segment + * @bin_pwr: Contains FFT magnitudes + * @bin_pwr_sec80: Contains FFT magnitudes for the secondary 80 + * segment + * @interf_list: List of interfernce sources + * @noise_floor: Indicates the current noise floor + * @noise_floor_sec80: Indicates the current noise floor for secondary 80 + * segment + * @ch_width: Channel width 20/40/80/160 MHz + */ +struct spectral_samp_data { + int16_t spectral_data_len; + int16_t spectral_data_len_sec80; + int16_t spectral_rssi; + int16_t spectral_rssi_sec80; + int8_t spectral_combined_rssi; + int8_t spectral_upper_rssi; + int8_t spectral_lower_rssi; + int8_t spectral_chain_ctl_rssi[MAX_SPECTRAL_CHAINS]; + int8_t spectral_chain_ext_rssi[MAX_SPECTRAL_CHAINS]; + uint8_t spectral_max_scale; + int16_t spectral_bwinfo; + int32_t spectral_tstamp; + int16_t spectral_max_index; + int16_t spectral_max_index_sec80; + int16_t spectral_max_mag; + int16_t spectral_max_mag_sec80; + uint8_t spectral_max_exp; + int32_t spectral_last_tstamp; + int16_t spectral_upper_max_index; + int16_t spectral_lower_max_index; + uint8_t spectral_nb_upper; + uint8_t spectral_nb_lower; + struct spectral_classifier_params classifier_params; + uint16_t bin_pwr_count; + /* + * For 11ac chipsets prior to AR900B version 2.0, a max of 512 bins are + * delivered. However, there can be additional bins reported for + * AR900B version 2.0 and QCA9984 as described next: + * + * AR900B version 2.0: An additional tone is processed on the right + * hand side in order to facilitate detection of radar pulses out to + * the extreme band-edge of the channel frequency. + * Since the HW design processes four tones at a time, + * this requires one additional Dword to be added to the + * search FFT report. + * + * QCA9984: When spectral_scan_rpt_mode=2, i.e 2-dword summary + + * 1x-oversampled bins (in-band) per FFT, + * then 8 more bins (4 more on left side and 4 more on right side) + * are added. + */ + uint8_t lb_edge_extrabins; + uint8_t rb_edge_extrabins; + uint16_t bin_pwr_count_sec80; + uint8_t bin_pwr[MAX_NUM_BINS]; + uint8_t bin_pwr_sec80[MAX_NUM_BINS]; + struct interf_src_rsp interf_list; + int16_t noise_floor; + int16_t noise_floor_sec80; + uint32_t ch_width; +} __ATTRIB_PACKED; + +/** + * struct spectral_samp_msg - Spectral SAMP message + * @signature: Validates the SAMP message + * @freq: Operating frequency in MHz + * @vhtop_ch_freq_seg1: VHT Segment 1 centre frequency in MHz + * @vhtop_ch_freq_seg2: VHT Segment 2 centre frequency in MHz + * @freq_loading: How busy was the channel + * @dcs_enabled: Whether DCS is enabled + * @int_type: Interference type indicated by DCS + * @macaddr: Indicates the device interface + * @samp_data: SAMP Data + */ +struct spectral_samp_msg { + uint32_t signature; + uint16_t freq; + uint16_t vhtop_ch_freq_seg1; + uint16_t vhtop_ch_freq_seg2; + uint16_t freq_loading; + uint16_t dcs_enabled; + enum dcs_int_type int_type; + uint8_t macaddr[6]; + struct spectral_samp_data samp_data; +} __ATTRIB_PACKED; + +/* Forward declarations */ +struct wlan_objmgr_pdev; + +/** + * struct spectral_nl_cb - Spectral Netlink callbacks + * @get_nbuff: Get the socket buffer to send the data to the application + * @send_nl_bcast: Send data to the application using netlink broadcast + * @send_nl_unicast: Send data to the application using netlink unicast + */ +struct spectral_nl_cb { + void *(*get_nbuff)(struct wlan_objmgr_pdev *pdev); + int (*send_nl_bcast)(struct wlan_objmgr_pdev *pdev); + int (*send_nl_unicast)(struct wlan_objmgr_pdev *pdev); +}; +#ifdef WIN32 +#pragma pack(pop, spectral) +#endif +#ifdef __ATTRIB_PACKED +#undef __ATTRIB_PACKED +#endif + +#endif /* _WLAN_SPECTRAL_PUBLIC_STRUCTS_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/spectral/dispatcher/inc/wlan_spectral_tgt_api.h b/drivers/staging/qca-wifi-host-cmn/spectral/dispatcher/inc/wlan_spectral_tgt_api.h new file mode 100644 index 0000000000000000000000000000000000000000..8eac4cbf6c94d5fcc82b600d404c5dfe1f26d26d --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/spectral/dispatcher/inc/wlan_spectral_tgt_api.h @@ -0,0 +1,251 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _WLAN_SPECTRAL_TGT_API_H_ +#define _WLAN_SPECTRAL_TGT_API_H_ + +#include +#include +#include "../../core/spectral_cmn_api_i.h" + +/** + * tgt_get_target_handle() - Get target_if handle + * @pdev: Pointer to pdev + * + * Get handle to target_if internal Spectral data + * + * Return: Handle to target_if internal Spectral data on success, NULL on + * failure + */ +void *tgt_get_target_handle(struct wlan_objmgr_pdev *pdev); + +/** + * tgt_spectral_control()- handler for demultiplexing requests from higher layer + * @pdev: reference to global pdev object + * @id: spectral config command id + * @indata: reference to input data + * @insize: input data size + * @outdata: reference to output data + * @outsize: output data size + * + * This function processes the spectral config command + * and appropriate handlers are invoked. + * + * Return: 0 success else failure + */ +int tgt_spectral_control(struct wlan_objmgr_pdev *pdev, + u_int id, void *indata, u_int32_t insize, + void *outdata, u_int32_t *outsize); + +/** + * tgt_pdev_spectral_init() - implementation for spectral init + * @pdev: Pointer to pdev + * + * Return: On success, pointer to Spectral target_if internal private data, on + * failure, NULL + */ +void *tgt_pdev_spectral_init(struct wlan_objmgr_pdev *pdev); + +/** + * tgt_pdev_spectral_deinit() - implementation for spectral de-init + * @pdev: Pointer to pdev + * + * Return: None + */ +void tgt_pdev_spectral_deinit(struct wlan_objmgr_pdev *pdev); + +/** + * tgt_set_spectral_config() - Set spectral config + * @pdev: Pointer to pdev object + * @threshtype: spectral parameter type + * @value: value to be configured for the given spectral parameter + * + * Implementation for setting spectral config + * + * Return: 0 on success else failure + */ +int tgt_set_spectral_config(struct wlan_objmgr_pdev *pdev, + const u_int32_t threshtype, + const u_int32_t value); + +/** + * tgt_get_spectral_config() - Get spectral configuration + * @pdev: Pointer to pdev object + * @param: Pointer to spectral_config structure in which the configuration + * should be returned + * + * Implementation for getting the current spectral configuration + * + * Return: None + */ +void tgt_get_spectral_config(struct wlan_objmgr_pdev *pdev, + struct spectral_config *sptrl_config); + +/** + * tgt_start_spectral_scan() - Start spectral scan + * @pdev: Pointer to pdev object + * + * Implementation for starting spectral scan + * + * Return: 0 in case of success, -1 on failure + */ +int tgt_start_spectral_scan(struct wlan_objmgr_pdev *pdev); + +/** + * tgt_stop_spectral_scan() - Stop spectral scan + * @pdev: Pointer to pdev object + * + * Implementation for stop spectral scan + * + * Return: None + */ +void tgt_stop_spectral_scan(struct wlan_objmgr_pdev *pdev); + +/** + * tgt_is_spectral_active() - Get whether Spectral is active + * @pdev: Pointer to pdev object + * + * Implementation to get whether Spectral is active + * + * Return: True if Spectral is active, false if Spectral is not active + */ +bool tgt_is_spectral_active(struct wlan_objmgr_pdev *pdev); + +/** + * tgt_is_spectral_enabled() - Get whether Spectral is active + * @pdev: Pointer to pdev object + * + * Implementation to get whether Spectral is active + * + * Return: True if Spectral is active, false if Spectral is not active + */ +bool tgt_is_spectral_enabled(struct wlan_objmgr_pdev *pdev); + +/** + * tgt_set_debug_level() - Set debug level for Spectral + * @pdev: Pointer to pdev object + * @debug_level: Debug level + * + * Implementation to set the debug level for Spectral + * + * Return: 0 in case of success + */ +int tgt_set_debug_level(struct wlan_objmgr_pdev *pdev, u_int32_t debug_level); + +/** + * tgt_get_debug_level() - Get debug level for Spectral + * @pdev: Pointer to pdev object + * + * Implementation to get the debug level for Spectral + * + * Return: Current debug level + */ +uint32_t tgt_get_debug_level(struct wlan_objmgr_pdev *pdev); + +/** + * tgt_get_spectral_capinfo() - Get Spectral capability information + * @pdev: Pointer to pdev object + * @outdata: Buffer into which data should be copied + * + * Implementation to get the spectral capability information + * + * Return: void + */ +void tgt_get_spectral_capinfo(struct wlan_objmgr_pdev *pdev, void *outdata); + +/** + * tgt_get_spectral_diagstats() - Get Spectral diagnostic statistics + * @pdev: Pointer to pdev object + * @outdata: Buffer into which data should be copied + * + * Implementation to get the spectral diagnostic statistics + * + * Return: void + */ +void tgt_get_spectral_diagstats(struct wlan_objmgr_pdev *pdev, void *outdata); + +/** + * tgt_register_wmi_spectral_cmd_ops() - Register wmi_spectral_cmd_ops + * @cmd_ops: Pointer to the structure having wmi_spectral_cmd function pointers + * @pdev: Pointer to pdev object + * + * Implementation to register wmi_spectral_cmd_ops in spectral + * internal data structure + * + * Return: void + */ +void tgt_register_wmi_spectral_cmd_ops(struct wlan_objmgr_pdev *pdev, + struct wmi_spectral_cmd_ops *cmd_ops); + +/** + * tgt_spectral_register_nl_cb() - Register Netlink callbacks + * @pdev: Pointer to pdev object + * @nl_cb: Netlink callbacks to register + * + * Return: void + */ +void tgt_spectral_register_nl_cb(struct wlan_objmgr_pdev *pdev, + struct spectral_nl_cb *nl_cb); + +/** + * tgt_spectral_use_nl_bcast() - Get whether to use broadcast/unicast while + * sending Netlink messages to the application layer + * @pdev: Pointer to pdev object + * + * Return: true for broadcast, false for unicast + */ +bool tgt_spectral_use_nl_bcast(struct wlan_objmgr_pdev *pdev); + +/** + * tgt_spectral_deregister_nl_cb() - De-register Netlink callbacks + * @pdev: Pointer to pdev object + * + * Return: void + */ +void tgt_spectral_deregister_nl_cb(struct wlan_objmgr_pdev *pdev); + +/** + * tgt_spectral_process_report() - Process spectral report + * @pdev: Pointer to pdev object + * @payload: Pointer to spectral report buffer + * + * Return: status + */ +int +tgt_spectral_process_report(struct wlan_objmgr_pdev *pdev, + void *payload); + +/** + * tgt_spectral_register_to_dbr() - Register to direct dma + * @pdev: Pointer to pdev object + * + * Return: QDF_STATUS + */ +QDF_STATUS +tgt_spectral_register_to_dbr(struct wlan_objmgr_pdev *pdev); + +/** + * tgt_spectral_get_target_type() - Get target type + * @psoc: Pointer to psoc object + * + * Return: target type + */ +uint32_t +tgt_spectral_get_target_type(struct wlan_objmgr_psoc *psoc); +#endif /* _WLAN_SPECTRAL_TGT_API_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/spectral/dispatcher/inc/wlan_spectral_ucfg_api.h b/drivers/staging/qca-wifi-host-cmn/spectral/dispatcher/inc/wlan_spectral_ucfg_api.h new file mode 100644 index 0000000000000000000000000000000000000000..264cce0b6f566455c12a27b175666a3aad014e9f --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/spectral/dispatcher/inc/wlan_spectral_ucfg_api.h @@ -0,0 +1,58 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _WLAN_SPECTRAL_UCFG_API_H_ +#define _WLAN_SPECTRAL_UCFG_API_H_ + +#include +#include + +/* Spectral specific UCFG set operations */ + +/** + * ucfg_spectral_control() - Carry out Spectral control operations + * @pdev: Pointer to pdev + * @id: Spectral operation ID + * @indata: Pointer to input data + * @insize: Size of indata buffer + * @outdata: Pointer to buffer where the output should be stored + * @outsize: Size of outdata buffer + * + * Carry out Spectral specific UCFG control get/set operations + * + * Return: 0 on success, negative value on failure + */ +int ucfg_spectral_control(struct wlan_objmgr_pdev *pdev, + u_int id, + void *indata, + uint32_t insize, void *outdata, uint32_t *outsize); + +/** + * ucfg_spectral_scan_set_ppid() - configure pid of spectral tool + * @pdev: Pointer to pdev + * @ppid: Spectral tool pid + * + * Configure pid of spectral tool + * + * Return: None + */ +void ucfg_spectral_scan_set_ppid(struct wlan_objmgr_pdev *pdev, + uint32_t ppid); + +#endif /* _WLAN_SPECTRAL_UCFG_API_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/spectral/dispatcher/inc/wlan_spectral_utils_api.h b/drivers/staging/qca-wifi-host-cmn/spectral/dispatcher/inc/wlan_spectral_utils_api.h new file mode 100644 index 0000000000000000000000000000000000000000..fcc72520a16d33f982008cbaa822145e8c04b1ff --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/spectral/dispatcher/inc/wlan_spectral_utils_api.h @@ -0,0 +1,162 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _WLAN_SPECTRAL_UTILS_API_H_ +#define _WLAN_SPECTRAL_UTILS_API_H_ + +#include +#include + +/* Forward declaration */ +struct direct_buf_rx_data; +struct wmi_spectral_cmd_ops; + +/** + * wlan_spectral_init() - API to init spectral component + * + * This API is invoked from dispatcher init during all component init. + * This API will register all required handlers for pdev and peer object + * create/delete notification. + * + * Return: SUCCESS, + * Failure + */ +QDF_STATUS wlan_spectral_init(void); + +/** + * wlan_spectral_deinit() - API to deinit spectral component + * + * This API is invoked from dispatcher deinit during all component deinit. + * This API will unregister all registered handlers for pdev and peer object + * create/delete notification. + * + * Return: SUCCESS, + * Failure + */ +QDF_STATUS wlan_spectral_deinit(void); + +/** + * wlan_lmac_if_sptrl_register_rx_ops() - Register lmac interface Rx operations + * @rx_ops: Pointer to lmac interface Rx operations structure + * + * API to register spectral related lmac interface Rx operations + * + * Return: None + */ +void +wlan_lmac_if_sptrl_register_rx_ops(struct wlan_lmac_if_rx_ops *rx_ops); + +/** +* wlan_register_wmi_spectral_cmd_ops() - Register operations related to wmi +* commands on spectral parameters +* @pdev - the physical device object +* @cmd_ops - pointer to the structure holding the operations +* related to wmi commands on spectral parameters +* +* API to register operations related to wmi commands on spectral parameters +* +* Return: None +*/ +void +wlan_register_wmi_spectral_cmd_ops(struct wlan_objmgr_pdev *pdev, + struct wmi_spectral_cmd_ops *cmd_ops); + +/** + * struct spectral_legacy_cbacks - Spectral legacy callbacks + * @vdev_get_chan_freq: Get channel frequency + * @vdev_get_ch_width: Get channel width + * @vdev_get_sec20chan_freq_mhz: Get seconadry 20 frequency + */ +struct spectral_legacy_cbacks { + int16_t (*vdev_get_chan_freq)(struct wlan_objmgr_vdev *vdev); + enum phy_ch_width (*vdev_get_ch_width)(struct wlan_objmgr_vdev *vdev); + int (*vdev_get_sec20chan_freq_mhz)(struct wlan_objmgr_vdev *vdev, + uint16_t *sec20chan_freq); +}; + +/** + * spectral_vdev_get_chan_freq - Get vdev channel frequency + * @vdev: vdev object + * + * Return: vdev operating frequency + */ +int16_t spectral_vdev_get_chan_freq(struct wlan_objmgr_vdev *vdev); + +/** + * spectral_vdev_get_sec20chan_freq_mhz - Get vdev secondary channel frequency + * @vdev: vdev object + * @sec20chan_freq: secondary channel frequency + * + * Return: secondary channel freq + */ +int spectral_vdev_get_sec20chan_freq_mhz(struct wlan_objmgr_vdev *vdev, + uint16_t *sec20chan_freq); + +/** + * spectral_register_legacy_cb() - Register spectral legacy callbacks + * commands on spectral parameters + * @psoc - the physical device object + * @legacy_cbacks - Reference to struct spectral_legacy_cbacks from which + * function pointers need to be copied + * + * API to register spectral related legacy callbacks + * + * Return: QDF_STATUS_SUCCESS upon successful registration, + * QDF_STATUS_E_FAILURE upon failure + */ +QDF_STATUS spectral_register_legacy_cb( + struct wlan_objmgr_psoc *psoc, + struct spectral_legacy_cbacks *legacy_cbacks); + +/** + * spectral_vdev_get_ch_width() - Get the channel bandwidth + * @vdev - Pointer to vdev + * + * API to get the channel bandwidth of a given vdev + * + * Return: Enumeration corresponding to the channel bandwidth + */ +enum phy_ch_width +spectral_vdev_get_ch_width(struct wlan_objmgr_vdev *vdev); + +/** + * spectral_pdev_open() - Spectral pdev open handler + * @pdev: pointer to pdev object + * + * API to execute operations on pdev open + * + * Return: QDF_STATUS_SUCCESS upon successful registration, + * QDF_STATUS_E_FAILURE upon failure + */ +QDF_STATUS spectral_pdev_open(struct wlan_objmgr_pdev *pdev); + +#ifdef DIRECT_BUF_RX_ENABLE +/** + * spectral_dbr_event_handler() - Spectral dbr event handler + * @pdev: pointer to pdev object + * @payload: dbr event buffer + * + * API to handle spectral dbr event + * + * Return: status + */ +int spectral_dbr_event_handler(struct wlan_objmgr_pdev *pdev, + struct direct_buf_rx_data *payload); +#endif +#endif /* _WLAN_SPECTRAL_UTILS_API_H_*/ diff --git a/drivers/staging/qca-wifi-host-cmn/spectral/dispatcher/src/wlan_spectral_tgt_api.c b/drivers/staging/qca-wifi-host-cmn/spectral/dispatcher/src/wlan_spectral_tgt_api.c new file mode 100644 index 0000000000000000000000000000000000000000..fcc2f66ee0af197b420466ffa26a4b91d8a68eee --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/spectral/dispatcher/src/wlan_spectral_tgt_api.c @@ -0,0 +1,303 @@ +/* + * Copyright (c) 2011,2017-2018 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include +#include + +void * +tgt_get_target_handle(struct wlan_objmgr_pdev *pdev) +{ + struct pdev_spectral *ps; + + if (!pdev) { + spectral_err("PDEV is NULL!"); + return NULL; + } + ps = wlan_objmgr_pdev_get_comp_private_obj(pdev, + WLAN_UMAC_COMP_SPECTRAL); + if (!ps) { + spectral_err("PDEV SPECTRAL object is NULL!"); + return NULL; + } + return ps->psptrl_target_handle; +} + +int +tgt_spectral_control( + struct wlan_objmgr_pdev *pdev, + u_int id, + void *indata, + u_int32_t insize, void *outdata, u_int32_t *outsize) +{ + struct spectral_context *sc; + + if (!pdev) { + spectral_err("PDEV is NULL!"); + return -EPERM; + } + sc = spectral_get_spectral_ctx_from_pdev(pdev); + if (!sc) { + spectral_err("spectral context is NULL!"); + return -EPERM; + } + return spectral_control_cmn(pdev, id, indata, insize, outdata, outsize); +} + +void * +tgt_pdev_spectral_init(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_objmgr_psoc *psoc = NULL; + + psoc = wlan_pdev_get_psoc(pdev); + return psoc->soc_cb.tx_ops.sptrl_tx_ops.sptrlto_pdev_spectral_init( + pdev); +} + +void +tgt_pdev_spectral_deinit(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_objmgr_psoc *psoc = NULL; + + psoc = wlan_pdev_get_psoc(pdev); + psoc->soc_cb.tx_ops.sptrl_tx_ops.sptrlto_pdev_spectral_deinit(pdev); +} + +int +tgt_set_spectral_config( + struct wlan_objmgr_pdev *pdev, + const u_int32_t threshtype, const u_int32_t value) +{ + struct wlan_objmgr_psoc *psoc = NULL; + + psoc = wlan_pdev_get_psoc(pdev); + return psoc->soc_cb.tx_ops.sptrl_tx_ops.sptrlto_set_spectral_config( + pdev, threshtype, value); +} + +void +tgt_get_spectral_config( + struct wlan_objmgr_pdev *pdev, + struct spectral_config *sptrl_config) +{ + struct wlan_objmgr_psoc *psoc = NULL; + + psoc = wlan_pdev_get_psoc(pdev); + psoc->soc_cb.tx_ops.sptrl_tx_ops.sptrlto_get_spectral_config( + pdev, + sptrl_config); +} + +int +tgt_start_spectral_scan(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_objmgr_psoc *psoc = NULL; + + psoc = wlan_pdev_get_psoc(pdev); + return psoc->soc_cb.tx_ops.sptrl_tx_ops.sptrlto_start_spectral_scan( + pdev); +} + +void +tgt_stop_spectral_scan(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_objmgr_psoc *psoc = NULL; + + psoc = wlan_pdev_get_psoc(pdev); + psoc->soc_cb.tx_ops.sptrl_tx_ops.sptrlto_stop_spectral_scan(pdev); +} + +bool +tgt_is_spectral_active(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_objmgr_psoc *psoc = NULL; + + psoc = wlan_pdev_get_psoc(pdev); + return psoc->soc_cb.tx_ops.sptrl_tx_ops.sptrlto_is_spectral_active( + pdev); +} + +bool +tgt_is_spectral_enabled(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_objmgr_psoc *psoc = NULL; + + psoc = wlan_pdev_get_psoc(pdev); + return psoc->soc_cb.tx_ops.sptrl_tx_ops.sptrlto_is_spectral_enabled( + pdev); +} + +int +tgt_set_debug_level(struct wlan_objmgr_pdev *pdev, u_int32_t debug_level) +{ + struct wlan_objmgr_psoc *psoc = NULL; + + psoc = wlan_pdev_get_psoc(pdev); + return psoc->soc_cb.tx_ops.sptrl_tx_ops.sptrlto_set_debug_level( + pdev, + debug_level); +} + +u_int32_t +tgt_get_debug_level(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_objmgr_psoc *psoc = NULL; + + psoc = wlan_pdev_get_psoc(pdev); + return psoc->soc_cb.tx_ops.sptrl_tx_ops.sptrlto_get_debug_level(pdev); +} + +void +tgt_get_spectral_capinfo(struct wlan_objmgr_pdev *pdev, void *outdata) +{ + struct wlan_objmgr_psoc *psoc = NULL; + + psoc = wlan_pdev_get_psoc(pdev); + return psoc->soc_cb.tx_ops.sptrl_tx_ops.sptrlto_get_spectral_capinfo( + pdev, outdata); +} + +void +tgt_get_spectral_diagstats(struct wlan_objmgr_pdev *pdev, void *outdata) +{ + struct wlan_objmgr_psoc *psoc = NULL; + + psoc = wlan_pdev_get_psoc(pdev); + return psoc->soc_cb.tx_ops.sptrl_tx_ops.sptrlto_get_spectral_diagstats( + pdev, outdata); +} + +void +tgt_register_wmi_spectral_cmd_ops( + struct wlan_objmgr_pdev *pdev, + struct wmi_spectral_cmd_ops *cmd_ops) +{ + struct wlan_objmgr_psoc *psoc = NULL; + struct wlan_lmac_if_sptrl_tx_ops *psptrl_tx_ops = NULL; + + psoc = wlan_pdev_get_psoc(pdev); + + psptrl_tx_ops = &psoc->soc_cb.tx_ops.sptrl_tx_ops; + + return psptrl_tx_ops->sptrlto_register_wmi_spectral_cmd_ops(pdev, + cmd_ops); +} + +void +tgt_spectral_register_nl_cb( + struct wlan_objmgr_pdev *pdev, + struct spectral_nl_cb *nl_cb) +{ + struct wlan_objmgr_psoc *psoc = NULL; + struct wlan_lmac_if_sptrl_tx_ops *psptrl_tx_ops = NULL; + + if (!pdev) { + spectral_err("PDEV is NULL!"); + return; + } + psoc = wlan_pdev_get_psoc(pdev); + + psptrl_tx_ops = &psoc->soc_cb.tx_ops.sptrl_tx_ops; + + return psptrl_tx_ops->sptrlto_register_netlink_cb(pdev, + nl_cb); +} + +bool +tgt_spectral_use_nl_bcast(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_objmgr_psoc *psoc = NULL; + struct wlan_lmac_if_sptrl_tx_ops *psptrl_tx_ops = NULL; + + psoc = wlan_pdev_get_psoc(pdev); + + psptrl_tx_ops = &psoc->soc_cb.tx_ops.sptrl_tx_ops; + + return psptrl_tx_ops->sptrlto_use_nl_bcast(pdev); +} + +void tgt_spectral_deregister_nl_cb(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_objmgr_psoc *psoc = NULL; + struct wlan_lmac_if_sptrl_tx_ops *psptrl_tx_ops = NULL; + + if (!pdev) { + spectral_err("PDEV is NULL!"); + return; + } + psoc = wlan_pdev_get_psoc(pdev); + + psptrl_tx_ops = &psoc->soc_cb.tx_ops.sptrl_tx_ops; + + psptrl_tx_ops->sptrlto_deregister_netlink_cb(pdev); +} + +int +tgt_spectral_process_report(struct wlan_objmgr_pdev *pdev, + void *payload) +{ + struct wlan_objmgr_psoc *psoc = NULL; + struct wlan_lmac_if_sptrl_tx_ops *psptrl_tx_ops = NULL; + + psoc = wlan_pdev_get_psoc(pdev); + + psptrl_tx_ops = &psoc->soc_cb.tx_ops.sptrl_tx_ops; + + return psptrl_tx_ops->sptrlto_process_spectral_report(pdev, payload); +} + +uint32_t +tgt_spectral_get_target_type(struct wlan_objmgr_psoc *psoc) +{ + uint32_t target_type = 0; + struct wlan_lmac_if_target_tx_ops *target_type_tx_ops; + + target_type_tx_ops = &psoc->soc_cb.tx_ops.target_tx_ops; + + if (target_type_tx_ops->tgt_get_tgt_type) + target_type = target_type_tx_ops->tgt_get_tgt_type(psoc); + + return target_type; +} + +#ifdef DIRECT_BUF_RX_ENABLE +QDF_STATUS +tgt_spectral_register_to_dbr(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_objmgr_psoc *psoc; + struct wlan_lmac_if_direct_buf_rx_tx_ops *dbr_tx_ops = NULL; + + psoc = wlan_pdev_get_psoc(pdev); + dbr_tx_ops = &psoc->soc_cb.tx_ops.dbr_tx_ops; + + if (tgt_spectral_get_target_type(psoc) == TARGET_TYPE_QCA8074) + if (dbr_tx_ops->direct_buf_rx_module_register) + return dbr_tx_ops->direct_buf_rx_module_register + (pdev, 0, + spectral_dbr_event_handler); + + return QDF_STATUS_E_FAILURE; +} +#else +QDF_STATUS +tgt_spectral_register_to_dbr(struct wlan_objmgr_pdev *pdev) +{ + return QDF_STATUS_SUCCESS; +} +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/spectral/dispatcher/src/wlan_spectral_ucfg_api.c b/drivers/staging/qca-wifi-host-cmn/spectral/dispatcher/src/wlan_spectral_ucfg_api.c new file mode 100644 index 0000000000000000000000000000000000000000..0dce01fdf09dbbac023063f12899f891b5fd87f7 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/spectral/dispatcher/src/wlan_spectral_ucfg_api.c @@ -0,0 +1,67 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include "../../core/spectral_cmn_api_i.h" +#include +#include + +int +ucfg_spectral_control(struct wlan_objmgr_pdev *pdev, + u_int id, + void *indata, + uint32_t insize, void *outdata, uint32_t *outsize) +{ + struct spectral_context *sc; + + if (!pdev) { + spectral_err("PDEV is NULL!"); + return -EPERM; + } + sc = spectral_get_spectral_ctx_from_pdev(pdev); + if (!sc) { + spectral_err("spectral context is NULL!"); + return -EPERM; + } + + return sc->sptrlc_spectral_control(pdev, + id, + indata, insize, outdata, outsize); +} +qdf_export_symbol(ucfg_spectral_control); + +void ucfg_spectral_scan_set_ppid(struct wlan_objmgr_pdev *pdev, uint32_t ppid) +{ + struct pdev_spectral *ps = NULL; + + if (!pdev) { + spectral_err("PDEV is NULL!"); + return; + } + ps = wlan_objmgr_pdev_get_comp_private_obj(pdev, + WLAN_UMAC_COMP_SPECTRAL); + if (!ps) { + spectral_err("spectral context is NULL!"); + return; + } + ps->spectral_pid = ppid; + spectral_debug("spectral ppid: %d", ppid); + + return; +} diff --git a/drivers/staging/qca-wifi-host-cmn/spectral/dispatcher/src/wlan_spectral_utils_api.c b/drivers/staging/qca-wifi-host-cmn/spectral/dispatcher/src/wlan_spectral_utils_api.c new file mode 100644 index 0000000000000000000000000000000000000000..406b427bf196a021a62e14b0c11ef201761df90c --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/spectral/dispatcher/src/wlan_spectral_utils_api.c @@ -0,0 +1,221 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include +#include "../../core/spectral_cmn_api_i.h" +#include + +QDF_STATUS +wlan_spectral_init(void) +{ + if (wlan_objmgr_register_psoc_create_handler( + WLAN_UMAC_COMP_SPECTRAL, + wlan_spectral_psoc_obj_create_handler, + NULL) != + QDF_STATUS_SUCCESS) { + return QDF_STATUS_E_FAILURE; + } + if (wlan_objmgr_register_psoc_destroy_handler( + WLAN_UMAC_COMP_SPECTRAL, + wlan_spectral_psoc_obj_destroy_handler, + NULL) != + QDF_STATUS_SUCCESS) { + return QDF_STATUS_E_FAILURE; + } + if (wlan_objmgr_register_pdev_create_handler( + WLAN_UMAC_COMP_SPECTRAL, + wlan_spectral_pdev_obj_create_handler, + NULL) != + QDF_STATUS_SUCCESS) { + return QDF_STATUS_E_FAILURE; + } + if (wlan_objmgr_register_pdev_destroy_handler( + WLAN_UMAC_COMP_SPECTRAL, + wlan_spectral_pdev_obj_destroy_handler, + NULL) != + QDF_STATUS_SUCCESS) { + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS +wlan_spectral_deinit(void) +{ + if (wlan_objmgr_unregister_psoc_create_handler( + WLAN_UMAC_COMP_SPECTRAL, + wlan_spectral_psoc_obj_create_handler, + NULL) != + QDF_STATUS_SUCCESS) { + return QDF_STATUS_E_FAILURE; + } + if (wlan_objmgr_unregister_psoc_destroy_handler( + WLAN_UMAC_COMP_SPECTRAL, + wlan_spectral_psoc_obj_destroy_handler, + NULL) != + QDF_STATUS_SUCCESS) { + return QDF_STATUS_E_FAILURE; + } + if (wlan_objmgr_unregister_pdev_create_handler( + WLAN_UMAC_COMP_SPECTRAL, + wlan_spectral_pdev_obj_create_handler, + NULL) != + QDF_STATUS_SUCCESS) { + return QDF_STATUS_E_FAILURE; + } + if (wlan_objmgr_unregister_pdev_destroy_handler( + WLAN_UMAC_COMP_SPECTRAL, + wlan_spectral_pdev_obj_destroy_handler, + NULL) != + QDF_STATUS_SUCCESS) { + return QDF_STATUS_E_FAILURE; + } + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS +spectral_register_legacy_cb(struct wlan_objmgr_psoc *psoc, + struct spectral_legacy_cbacks *legacy_cbacks) +{ + struct spectral_context *sc; + + sc = spectral_get_spectral_ctx_from_psoc(psoc); + if (!sc) { + spectral_err("Invalid Context"); + return QDF_STATUS_E_FAILURE; + } + + sc->legacy_cbacks.vdev_get_chan_freq = + legacy_cbacks->vdev_get_chan_freq; + sc->legacy_cbacks.vdev_get_ch_width = legacy_cbacks->vdev_get_ch_width; + sc->legacy_cbacks.vdev_get_sec20chan_freq_mhz = + legacy_cbacks->vdev_get_sec20chan_freq_mhz; + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(spectral_register_legacy_cb); + +int16_t +spectral_vdev_get_chan_freq(struct wlan_objmgr_vdev *vdev) +{ + struct spectral_context *sc; + + sc = spectral_get_spectral_ctx_from_vdev(vdev); + if (!sc) { + spectral_err("spectral context is Null"); + return -EINVAL; + } + + return sc->legacy_cbacks.vdev_get_chan_freq(vdev); +} + +enum phy_ch_width +spectral_vdev_get_ch_width(struct wlan_objmgr_vdev *vdev) +{ + struct spectral_context *sc; + + sc = spectral_get_spectral_ctx_from_vdev(vdev); + if (!sc) { + spectral_err("spectral context is Null"); + return CH_WIDTH_INVALID; + } + + return sc->legacy_cbacks.vdev_get_ch_width(vdev); +} + +int +spectral_vdev_get_sec20chan_freq_mhz(struct wlan_objmgr_vdev *vdev, + uint16_t *sec20chan_freq) +{ + struct spectral_context *sc; + + sc = spectral_get_spectral_ctx_from_vdev(vdev); + if (!sc) { + spectral_err("spectral context is Null"); + return -EINVAL; + } + + return sc->legacy_cbacks.vdev_get_sec20chan_freq_mhz(vdev, + sec20chan_freq); +} + +void +wlan_lmac_if_sptrl_register_rx_ops(struct wlan_lmac_if_rx_ops *rx_ops) +{ + struct wlan_lmac_if_sptrl_rx_ops *sptrl_rx_ops = &rx_ops->sptrl_rx_ops; + + /* Spectral rx ops */ + sptrl_rx_ops->sptrlro_get_target_handle = tgt_get_target_handle; + sptrl_rx_ops->sptrlro_vdev_get_chan_freq = spectral_vdev_get_chan_freq; + sptrl_rx_ops->sptrlro_vdev_get_ch_width = spectral_vdev_get_ch_width; + sptrl_rx_ops->sptrlro_vdev_get_sec20chan_freq_mhz = + spectral_vdev_get_sec20chan_freq_mhz; +} + +void +wlan_register_wmi_spectral_cmd_ops(struct wlan_objmgr_pdev *pdev, + struct wmi_spectral_cmd_ops *cmd_ops) +{ + struct spectral_context *sc; + + if (!pdev) { + spectral_err("PDEV is NULL!"); + return; + } + + sc = spectral_get_spectral_ctx_from_pdev(pdev); + if (!sc) { + spectral_err("spectral context is NULL!"); + return; + } + + return sc->sptrlc_register_wmi_spectral_cmd_ops(pdev, cmd_ops); +} +qdf_export_symbol(wlan_register_wmi_spectral_cmd_ops); + +#ifdef DIRECT_BUF_RX_ENABLE +int spectral_dbr_event_handler(struct wlan_objmgr_pdev *pdev, + struct direct_buf_rx_data *payload) +{ + struct spectral_context *sc; + + if (!pdev) { + spectral_err("PDEV is NULL!"); + return -EINVAL; + } + sc = spectral_get_spectral_ctx_from_pdev(pdev); + if (!sc) { + spectral_err("spectral context is NULL!"); + return -EINVAL; + } + + return sc->sptrlc_process_spectral_report(pdev, payload); +} +#endif + +QDF_STATUS spectral_pdev_open(struct wlan_objmgr_pdev *pdev) +{ + QDF_STATUS status; + + status = tgt_spectral_register_to_dbr(pdev); + + return QDF_STATUS_SUCCESS; +} diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/core/inc/target_if.h b/drivers/staging/qca-wifi-host-cmn/target_if/core/inc/target_if.h new file mode 100644 index 0000000000000000000000000000000000000000..9b97623c6994b87969764f26a7d2ec8fc0b82ffe --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/target_if/core/inc/target_if.h @@ -0,0 +1,2038 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: This target interface shall be used + * to communicate with target using WMI. + */ +#ifndef _WLAN_TARGET_IF_H_ +#define _WLAN_TARGET_IF_H_ + +#include "qdf_types.h" +#include "qdf_util.h" +#include "wlan_objmgr_psoc_obj.h" +#include "wmi_unified_api.h" +#include "wmi_unified_priv.h" +#include "wmi_unified_param.h" + +#define TGT_WMI_PDEV_ID_SOC 0 /* WMI SOC ID */ + +/* ASCII "TGT\0" */ +#define TGT_MAGIC 0x54575400 + +#define target_if_log(level, args...) \ + QDF_TRACE(QDF_MODULE_ID_TARGET_IF, level, ## args) +#define target_if_logfl(level, format, args...) \ + target_if_log(level, FL(format), ## args) + +#define target_if_fatal(format, args...) \ + target_if_logfl(QDF_TRACE_LEVEL_FATAL, format, ## args) +#define target_if_err(format, args...) \ + target_if_logfl(QDF_TRACE_LEVEL_ERROR, format, ## args) +#define target_if_warn(format, args...) \ + target_if_logfl(QDF_TRACE_LEVEL_WARN, format, ## args) +#define target_if_info(format, args...) \ + target_if_logfl(QDF_TRACE_LEVEL_INFO, format, ## args) +#define target_if_debug(format, args...) \ + target_if_logfl(QDF_TRACE_LEVEL_DEBUG, format, ## args) + +#define TARGET_IF_ENTER() target_if_logfl(QDF_TRACE_LEVEL_DEBUG, "enter") +#define TARGET_IF_EXIT() target_if_logfl(QDF_TRACE_LEVEL_DEBUG, "exit") + +#define target_if_err_rl(params...) \ + QDF_TRACE_ERROR_RL(QDF_MODULE_ID_TARGET_IF, params) + +#ifdef CONFIG_MCL +#define TARGET_TYPE_AR900B 9 /* Beeliner */ +#define TARGET_TYPE_QCA9984 15 /* cascade */ +#define TARGET_TYPE_IPQ4019 16 /* dakota */ +#define TARGET_TYPE_QCA9888 17 /* besra */ +#define TARGET_TYPE_AR9888 7 /* Peregrine */ +#endif + +typedef struct wlan_objmgr_psoc *(*get_psoc_handle_callback)( + void *scn_handle); + +typedef struct wlan_objmgr_pdev *(*get_pdev_handle_callback)( + void *scn_handle); + +typedef int (*wmi_legacy_service_ready_callback)(uint32_t event_id, + void *handle, + uint8_t *event_data, + uint32_t length); + +/** + * struct target_if_ctx - target_interface context + * @magic: magic for target if ctx + * @get_psoc_hdl_cb: function pointer to get psoc + * @get_pdev_hdl_cb: function pointer to get pdev + * @lock: spin lock for protecting the ctx + */ +struct target_if_ctx { + uint32_t magic; + get_psoc_handle_callback get_psoc_hdl_cb; + get_pdev_handle_callback get_pdev_hdl_cb; + wmi_legacy_service_ready_callback service_ready_cb; + qdf_spinlock_t lock; +}; + +struct target_psoc_info; +/** + * struct host_fw_ver - holds host fw version + * @host_ver: Host version + * @target_ver: Target version ID + * @target_rev: Target revision ID + * @wlan_ver: FW SW version + * @wlan_ver_1: FW SW version second dword + * @abi_ver: ABI version + */ +struct host_fw_ver { + uint32_t host_ver; + uint32_t target_ver; + uint32_t target_rev; + uint32_t wlan_ver; + uint32_t wlan_ver_1; + uint32_t abi_ver; +}; + +struct common_dbglog_handle; +struct common_hif_handle; +struct common_htc_handle; +struct common_wmi_handle; +struct common_accelerator_handle; + +/** + * struct comp_hdls - Non-umac/lower layer components handles, it is a sub + * structure of target psoc information + * @hif_hdl: HIF handle + * @htc_hdl: HTC handle + * @wmi_hdl: WMI handle + * @accelerator_hdl: NSS offload/IPA handle + * @dbglog_hdl: Debug log handle + */ +struct comp_hdls { + struct common_hif_handle *hif_hdl; + struct common_htc_handle *htc_hdl; + struct common_wmi_handle *wmi_hdl; + struct common_accelerator_handle *accelerator_hdl; + struct common_dbglog_handle *dbglog_hdl; +}; + +/** + * struct tgt_info - FW or lower layer related info(required by target_if), + * it is a sub structure of taarget psoc information + * @version: Host FW version struct + * @wlan_res_cfg: target_resource_config info + * @wlan_ext_res_cfg: wmi_host_ext_resource_config info + * @wmi_service_ready: is service ready received + * @wmi_ready: is ready event received + * @total_mac_phy_cnt: num of mac phys + * @num_radios: number of radios + * @wlan_init_status: Target init status + * @target_type: Target type + * @max_descs: Max descriptors + * @preferred_hw_mode: preferred hw mode + * @wmi_timeout: wait timeout for target events + * @event: qdf_event for target events + * @service_bitmap: WMI service bitmap + * @target_cap: target capabilities + * @service_ext_param: ext service params + * @mac_phy_cap: phy caps array + * @reg_cap: regulatory caps array + * @num_mem_chunks: number of mem chunks allocated + * @mem_chunks: allocated memory blocks for FW + */ +struct tgt_info { + struct host_fw_ver version; + target_resource_config wlan_res_cfg; + wmi_host_ext_resource_config wlan_ext_res_cfg; + bool wmi_service_ready; + bool wmi_ready; + uint8_t total_mac_phy_cnt; + uint8_t num_radios; + uint32_t wlan_init_status; + uint32_t target_type; + uint32_t max_descs; + uint32_t preferred_hw_mode; + uint32_t wmi_timeout; + qdf_event_t event; + uint32_t service_bitmap[PSOC_SERVICE_BM_SIZE]; + struct wlan_psoc_target_capability_info target_caps; + struct wlan_psoc_host_service_ext_param service_ext_param; + struct wlan_psoc_host_mac_phy_caps + mac_phy_cap[PSOC_MAX_MAC_PHY_CAP]; + struct wlan_psoc_host_dbr_ring_caps *dbr_ring_cap; + uint32_t num_mem_chunks; + struct wmi_host_mem_chunk mem_chunks[MAX_MEM_CHUNKS]; +}; + +/** + * struct target_ops - Holds feature specific function pointers, which would be + * invoked as part of service ready or ext service ready + * @ext_resource_config_enable: Ext resource config + * @peer_config: Peer config enable + * @mesh_support_enable: Mesh support enable + * @smart_antenna_enable: Smart antenna enable + * @atf_config_enable: ATF config enable + * @qwrap_config_enable: QWRAP config enable + * @btcoex_config_enable: BTCOEX config enable + * @lteu_ext_support_enable: LTE-U Ext config enable + * @set_init_cmd_dev_based_params: Sets Init command params + * @alloc_pdevs: Allocates PDEVs + * @update_pdev_tgt_info: Updates PDEV target info + * @mem_mgr_alloc_chunk: Allocates memory through MEM manager + * @mem_mgr_free_chunks: Free memory chunks through MEM manager + * @print_svc_ready_ex_param: Print service ready ext params + * @add_11ax_modes: Adds 11ax modes to reg cap + * @set_default_tgt_config: Sets target config with default values + * @sw_version_check: Checks the SW version + */ +struct target_ops { + QDF_STATUS (*ext_resource_config_enable) + (struct wlan_objmgr_psoc *psoc, + struct target_psoc_info *tgt_info, uint8_t *event); + void (*peer_config) + (struct wlan_objmgr_psoc *psoc, + struct target_psoc_info *tgt_info, uint8_t *event); + void (*mesh_support_enable) + (struct wlan_objmgr_psoc *psoc, + struct target_psoc_info *tgt_info, uint8_t *event); + void (*smart_antenna_enable) + (struct wlan_objmgr_psoc *psoc, + struct target_psoc_info *tgt_info, uint8_t *event); + void (*atf_config_enable) + (struct wlan_objmgr_psoc *psoc, + struct target_psoc_info *tgt_info, uint8_t *event); + void (*qwrap_config_enable) + (struct wlan_objmgr_psoc *psoc, + struct target_psoc_info *tgt_info, uint8_t *event); + void (*btcoex_config_enable) + (struct wlan_objmgr_psoc *psoc, + struct target_psoc_info *tgt_info, uint8_t *event); + void (*lteu_ext_support_enable) + (struct wlan_objmgr_psoc *psoc, + struct target_psoc_info *tgt_info, uint8_t *event); + void (*set_init_cmd_dev_based_params) + (struct wlan_objmgr_psoc *psoc, + struct target_psoc_info *tgt_info); + QDF_STATUS (*alloc_pdevs) + (struct wlan_objmgr_psoc *psoc, + struct target_psoc_info *tgt_info); + QDF_STATUS (*update_pdev_tgt_info) + (struct wlan_objmgr_psoc *psoc, + struct target_psoc_info *tgt_info); + uint32_t (*mem_mgr_alloc_chunk)(struct wlan_objmgr_psoc *psoc, + struct target_psoc_info *tgt_info, + u_int32_t req_id, u_int32_t idx, u_int32_t num_units, + u_int32_t unit_len, u_int32_t num_unit_info); + QDF_STATUS (*mem_mgr_free_chunks)(struct wlan_objmgr_psoc *psoc, + struct target_psoc_info *tgt_hdl); + void (*print_svc_ready_ex_param)( + struct wlan_objmgr_psoc *psoc, + struct target_psoc_info *tgt_info); + void (*add_11ax_modes)( + struct wlan_objmgr_psoc *psoc, + struct target_psoc_info *tgt_info); + void (*set_default_tgt_config)( + struct wlan_objmgr_psoc *psoc, + struct target_psoc_info *tgt_info); + QDF_STATUS (*sw_version_check)( + struct wlan_objmgr_psoc *psoc, + struct target_psoc_info *tgt_hdl, + uint8_t *evt_buf); +}; + + + +/** + * struct target_psoc_info - target psoc information + * @hdls: component handles (htc/htt/wmi) sub structure + * @info: target related info sub structure + * @feature_ptr: stores legacy pointer or few driver specific structures + * @tif_ops: holds driver specific function pointers + */ +struct target_psoc_info { + struct comp_hdls hdls; + struct tgt_info info; + void *feature_ptr; + struct target_ops *tif_ops; +}; + +/** + * struct target_pdev_info - target pdev information + * @wmi_handle: WMI handle + * @accelerator_hdl: NSS offload/IPA handles + * @pdev_idx: pdev id (of FW) + * @phy_idx: phy id (of FW) + * @feature_ptr: stores legacy pointer or few driver specific structures + */ +struct target_pdev_info { + struct common_wmi_handle *wmi_handle; + struct common_accelerator_handle *accelerator_hdl; + int32_t pdev_idx; + int32_t phy_idx; + void *feature_ptr; +}; + + +/** + * target_if_open() - target_if open + * @get_wmi_handle: function pointer to get wmi handle + * + * + * Return: QDF_STATUS + */ +QDF_STATUS target_if_open(get_psoc_handle_callback psoc_hdl_cb); + +/** + * target_if_close() - Close target_if + * @scn_handle: scn handle + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS target_if_close(void); + +/** + * target_if_store_pdev_target_if_ctx() - stores objmgr pdev in target if ctx + * @pdev_hdl_cb: function pointer to get objmgr pdev + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS target_if_store_pdev_target_if_ctx( + get_pdev_handle_callback pdev_hdl_cb); + +/** + * wlan_get_tgt_if_ctx() -Get target if ctx + * + * Return: target if ctx + */ +struct target_if_ctx *target_if_get_ctx(void); + +/** + * target_if_get_psoc_from_scn_hdl() - get psoc from scn handle + * @scn_handle: scn handle + * + * This API is generally used while processing wmi event. + * In wmi event SCN handle will be passed by wmi hence + * using this API we can get psoc from scn handle. + * + * Return: index for matching scn handle + */ +struct wlan_objmgr_psoc *target_if_get_psoc_from_scn_hdl(void *scn_handle); + +/** + * target_if_get_pdev_from_scn_hdl() - get pdev from scn handle + * @scn_handle: scn handle + * + * This API is generally used while processing wmi event. + * In wmi event SCN handle will be passed by wmi hence + * using this API we can get pdev from scn handle. + * + * Return: pdev for matching scn handle + */ +struct wlan_objmgr_pdev *target_if_get_pdev_from_scn_hdl(void *scn_handle); + +/** target_if_register_tx_ops() - register tx_ops + * @tx_ops: tx_ops structure + * + * This function is to be used by components to populate + * the OL function pointers (tx_ops) required by the component + * for UMAC-LMAC interaction, with the appropriate handler + * + * Return: QDF STATUS + */ +QDF_STATUS target_if_register_tx_ops(struct wlan_lmac_if_tx_ops *tx_ops); + +/** + * target_if_get_psoc_legacy_service_ready_cb() - get psoc from scn handle + * + * This API is generally used while processing wmi event. + * In wmi event SCN handle will be passed by wmi hence + * using this API we can get psoc from scn handle. + * + * Return: wmi_legacy_service_ready_callback + */ +wmi_legacy_service_ready_callback + target_if_get_psoc_legacy_service_ready_cb(void); + +/** + * target_if_register_legacy_service_ready_cb() - get legacy + * service ready handler from scn handle + * + * @service_ready_cb: function pointer to service ready callback + * + * Return: QDF Status + */ +QDF_STATUS target_if_register_legacy_service_ready_cb( + wmi_legacy_service_ready_callback service_ready_cb); + +/** + * target_if_alloc_pdev_tgt_info() - alloc pdev tgt info + * @pdev: pointer to pdev + * + * API to allocate memory for target_pdev_info + * + * Return: SUCCESS on successful memory allocation or Failure + */ +QDF_STATUS target_if_alloc_pdev_tgt_info(struct wlan_objmgr_pdev *pdev); + +/** + * target_if_free_pdev_tgt_info() - free pdev tgt info + * @pdev: pointer to pdev + * + * API to free allocated memory for target_pdev_info + * + * Return: SUCCESS on successful memory deallocation or Failure + */ +QDF_STATUS target_if_free_pdev_tgt_info(struct wlan_objmgr_pdev *pdev); + +/** + * target_if_alloc_psoc_tgt_info() - alloc psoc tgt info + * @psoc: pointer to psoc + * + * API to allocate memory for target_psoc_info + * + * Return: SUCCESS on successful memory allocation or Failure + */ +QDF_STATUS target_if_alloc_psoc_tgt_info(struct wlan_objmgr_psoc *psoc); + +/** + * target_if_free_psoc_tgt_info() - free psoc tgt info + * @psoc: pointer to psoc + * + * API to free allocated memory for target_psoc_info + * + * Return: SUCCESS on successful memory deallocation or Failure + */ +QDF_STATUS target_if_free_psoc_tgt_info(struct wlan_objmgr_psoc *psoc); + +/** + * target_is_tgt_type_ar900b() - Check if the target type is AR900B + * @target_type: target type to be checked. + * + * Return: true if the target_type is AR900B, else false. + */ +bool target_is_tgt_type_ar900b(uint32_t target_type); + +/** + * target_is_tgt_type_ipq4019() - Check if the target type is IPQ4019 + * @target_type: target type to be checked. + * + * Return: true if the target_type is IPQ4019, else false. + */ +bool target_is_tgt_type_ipq4019(uint32_t target_type); + +/** + * target_is_tgt_type_qca9984() - Check if the target type is QCA9984 + * @target_type: target type to be checked. + * + * Return: true if the target_type is QCA9984, else false. + */ +bool target_is_tgt_type_qca9984(uint32_t target_type); + +/** + * target_is_tgt_type_qca9888() - Check if the target type is QCA9888 + * @target_type: target type to be checked. + * + * Return: true if the target_type is QCA9888, else false. + */ +bool target_is_tgt_type_qca9888(uint32_t target_type); + + +/** + * target_psoc_set_wlan_init_status() - set info wlan_init_status + * @psoc_info: pointer to structure target_psoc_info + * @wlan_init_status: FW init status + * + * API to set wlan_init_status + * + * Return: void + */ +static inline void target_psoc_set_wlan_init_status + (struct target_psoc_info *psoc_info, uint32_t wlan_init_status) +{ + if (psoc_info == NULL) + return; + + psoc_info->info.wlan_init_status = wlan_init_status; +} + +/** + * target_psoc_get_wlan_init_status() - get info wlan_init_status + * @psoc_info: pointer to structure target_psoc_info + * + * API to get wlan_init_status + * + * Return: uint32_t + */ +static inline uint32_t target_psoc_get_wlan_init_status + (struct target_psoc_info *psoc_info) +{ + if (psoc_info == NULL) + return (uint32_t)-1; + + return psoc_info->info.wlan_init_status; +} + +/** + * target_psoc_set_target_type() - set info target_type + * @psoc_info: pointer to structure target_psoc_info + * @target_type: Target type + * + * API to set target_type + * + * Return: void + */ +static inline void target_psoc_set_target_type + (struct target_psoc_info *psoc_info, uint32_t target_type) +{ + if (psoc_info == NULL) + return; + + psoc_info->info.target_type = target_type; +} + +/** + * target_psoc_get_target_type() - get info target_type + * @psoc_info: pointer to structure target_psoc_info + * + * API to get target_type + * + * Return: unit32_t + */ +static inline uint32_t target_psoc_get_target_type + (struct target_psoc_info *psoc_info) +{ + if (psoc_info == NULL) + return (uint32_t)-1; + + return psoc_info->info.target_type; +} + +/** + * target_psoc_set_max_descs() - set info max_descs + * @psoc_info: pointer to structure target_psoc_info + * @max_descs: Max descriptors + * + * API to set max_descs + * + * Return: void + */ +static inline void target_psoc_set_max_descs + (struct target_psoc_info *psoc_info, uint32_t max_descs) +{ + if (psoc_info == NULL) + return; + + psoc_info->info.max_descs = max_descs; +} + +/** + * target_psoc_get_max_descs() - get info max_descs + * @psoc_info: pointer to structure target_psoc_info + * + * API to get max_descs + * + * Return: unint32_t + */ +static inline uint32_t target_psoc_get_max_descs + (struct target_psoc_info *psoc_info) +{ + if (psoc_info == NULL) + return (uint32_t)-1; + + return psoc_info->info.max_descs; +} + +/** + * target_psoc_set_wmi_service_ready() - set info wmi_service_ready + * @psoc_info: pointer to structure target_psoc_info + * @wmi_service_ready: service ready flag + * + * API to set wmi_service_ready + * + * Return: void + */ +static inline void target_psoc_set_wmi_service_ready + (struct target_psoc_info *psoc_info, bool wmi_service_ready) +{ + if (psoc_info == NULL) + return; + + psoc_info->info.wmi_service_ready = wmi_service_ready; +} + +/** + * target_psoc_get_wmi_service_ready() - get info wmi_service_ready + * @psoc_info: pointer to structure target_psoc_info + * + * API to get wmi_service_ready + * + * Return: bool + */ +static inline bool target_psoc_get_wmi_service_ready + (struct target_psoc_info *psoc_info) +{ + return psoc_info->info.wmi_service_ready; +} + +/** + * target_psoc_set_wmi_ready() - set info wmi_ready + * @psoc_info: pointer to structure target_psoc_info + * @wmi_ready: Ready event flag + * + * API to set wmi_ready + * + * Return: void + */ +static inline void target_psoc_set_wmi_ready + (struct target_psoc_info *psoc_info, bool wmi_ready) +{ + if (psoc_info == NULL) + return; + + psoc_info->info.wmi_ready = wmi_ready; +} + +/** + * target_psoc_get_wmi_ready() - get info wmi_ready + * @psoc_info: pointer to structure target_psoc_info + * + * API to get wmi_ready + * + * Return: bool + */ +static inline bool target_psoc_get_wmi_ready + (struct target_psoc_info *psoc_info) +{ + return psoc_info->info.wmi_ready; +} + +/** + * target_psoc_set_preferred_hw_mode() - set preferred_hw_mode + * @psoc_info: pointer to structure target_psoc_info + * @preferred_hw_mode: Preferred HW mode + * + * API to set preferred_hw_mode + * + * Return: void + */ +static inline void target_psoc_set_preferred_hw_mode( + struct target_psoc_info *psoc_info, uint32_t preferred_hw_mode) +{ + if (psoc_info == NULL) + return; + + psoc_info->info.preferred_hw_mode = preferred_hw_mode; +} + +/** + * target_psoc_get_preferred_hw_mode() - get preferred_hw_mode + * @psoc_info: pointer to structure target_psoc_info + * + * API to get preferred_hw_mode + * + * Return: unint32_t + */ +static inline uint32_t target_psoc_get_preferred_hw_mode + (struct target_psoc_info *psoc_info) +{ + if (psoc_info == NULL) + return WMI_HOST_HW_MODE_MAX; + + return psoc_info->info.preferred_hw_mode; +} + +/** + * target_psoc_set_wmi_timeout() - set wmi_timeout + * @psoc_info: pointer to structure target_psoc_info + * @wmi_timeout: WMI timeout value in sec + * + * API to set wmi_timeout + * + * Return: void + */ +static inline void target_psoc_set_wmi_timeout + (struct target_psoc_info *psoc_info, uint32_t wmi_timeout) +{ + if (psoc_info == NULL) + return; + + psoc_info->info.wmi_timeout = wmi_timeout; +} + +/** + * target_psoc_get_wmi_timeout() - get wmi_timeout + * @psoc_info: pointer to structure target_psoc_info + * + * API to get wmi_timeout + * + * Return: unint32_t + */ +static inline uint32_t target_psoc_get_wmi_timeout + (struct target_psoc_info *psoc_info) +{ + if (psoc_info == NULL) + return (uint32_t)-1; + + return psoc_info->info.wmi_timeout; +} + +/** + * target_psoc_set_total_mac_phy_cnt() - set total_mac_phy + * @psoc_info: pointer to structure target_psoc_infoa + * @total_mac_phy_cnt: Total MAC PHY cnt + * + * API to set total_mac_phy + * + * Return: void + */ +static inline void target_psoc_set_total_mac_phy_cnt + (struct target_psoc_info *psoc_info, uint8_t total_mac_phy_cnt) +{ + if (psoc_info == NULL) + return; + + psoc_info->info.total_mac_phy_cnt = total_mac_phy_cnt; +} + +/** + * target_psoc_get_total_mac_phy_cnt() - get total_mac_phy + * @psoc_info: pointer to structure target_psoc_info + * + * API to get total_mac_phy + * + * Return: unint8_t + */ +static inline uint8_t target_psoc_get_total_mac_phy_cnt( + struct target_psoc_info *psoc_info) +{ + if (psoc_info == NULL) + return 0; + + return psoc_info->info.total_mac_phy_cnt; +} + +/** + * target_psoc_set_num_radios() - set num of radios + * @psoc_info: pointer to structure target_psoc_info + * @num_radios: Number of radios + * + * API to set number of radios + * + * Return: number of radios + */ +static inline void target_psoc_set_num_radios( + struct target_psoc_info *psoc_info, uint8_t num_radios) +{ + if (psoc_info == NULL) + return; + + psoc_info->info.num_radios = num_radios; +} + +/** + * target_psoc_get_num_radios() - get number of radios + * @psoc_info: pointer to structure target_psoc_info + * + * API to get number_of_radios + * + * Return: number of radios + */ +static inline uint8_t target_psoc_get_num_radios + (struct target_psoc_info *psoc_info) +{ + if (psoc_info == NULL) + return 0; + + return psoc_info->info.num_radios; +} + +/** + * target_psoc_set_service_bitmap() - set service_bitmap + * @psoc_info: pointer to structure target_psoc_info + * @service_bitmap: FW service bitmap + * + * API to set service_bitmap + * + * Return: void + */ +static inline void target_psoc_set_service_bitmap + (struct target_psoc_info *psoc_info, uint32_t *service_bitmap) +{ + qdf_mem_copy(psoc_info->info.service_bitmap, service_bitmap, + sizeof(psoc_info->info.service_bitmap)); +} + +/** + * target_psoc_get_service_bitmap() - get service_bitmap + * @psoc_info: pointer to structure target_psoc_info + * + * API to get service_bitmap + * + * Return: unint32_t + */ +static inline uint32_t *target_psoc_get_service_bitmap + (struct target_psoc_info *psoc_info) +{ + return psoc_info->info.service_bitmap; +} + +/** + * target_psoc_set_num_mem_chunks - set num_mem_chunks + * @psoc_info: pointer to structure target_psoc_info + & @num_mem_chunks: Num Memory chunks allocated for FW + * + * API to set num_mem_chunks + * + * Return: void + */ +static inline void target_psoc_set_num_mem_chunks( + struct target_psoc_info *psoc_info, uint32_t num_mem_chunks) +{ + if (psoc_info == NULL) + return; + psoc_info->info.num_mem_chunks = num_mem_chunks; +} + +/** + * target_psoc_get_num_mem_chunks() - get num_mem_chunks + * @psoc_info: pointer to structure target_psoc_info + * + * API to get total_mac_phy + * + * Return: unint8_t + */ +static inline uint32_t target_psoc_get_num_mem_chunks + (struct target_psoc_info *psoc_info) +{ + if (psoc_info == NULL) + return (uint32_t)-1; + + return psoc_info->info.num_mem_chunks; +} +/** + * target_psoc_set_hif_hdl - set hif_hdl + * @psoc_info: pointer to structure target_psoc_info + * @hif_hdl: HIF handle + * + * API to set hif_hdl + * + * Return: void + */ +static inline void target_psoc_set_hif_hdl + (struct target_psoc_info *psoc_info, + struct common_hif_handle *hif_hdl) +{ + if (psoc_info == NULL) + return; + + psoc_info->hdls.hif_hdl = hif_hdl; +} + +/** + * target_psoc_get_hif_hdl() - get hif_hdl + * @psoc_info: pointer to structure target_psoc_info + * + * API to get hif_hdl + * + * Return: hif_hdl + */ +static inline struct common_hif_handle *target_psoc_get_hif_hdl + (struct target_psoc_info *psoc_info) +{ + if (psoc_info == NULL) + return NULL; + + return psoc_info->hdls.hif_hdl; +} + +/** + * target_psoc_set_hif_hdl - set htc_hdl + * @psoc_info: pointer to structure target_psoc_info + * @htc_hdl: HTC handle + * + * API to set htc_hdl + * + * Return: void + */ +static inline void target_psoc_set_htc_hdl + (struct target_psoc_info *psoc_info, + struct common_htc_handle *htc_hdl) +{ + if (psoc_info == NULL) + return; + + psoc_info->hdls.htc_hdl = htc_hdl; +} + +/** + * target_psoc_get_htc_hdl() - get htc_hdl + * @psoc_info: pointer to structure target_psoc_info + * + * API to get htc_hdl + * + * Return: htc_hdl + */ +static inline struct common_htc_handle *target_psoc_get_htc_hdl + (struct target_psoc_info *psoc_info) +{ + if (psoc_info == NULL) + return NULL; + + return psoc_info->hdls.htc_hdl; +} +/** + * target_psoc_set_wmi_hdl - set wmi_hdl + * @psoc_info: pointer to structure target_psoc_info + * @wmi_hdl: WMI handle + * + * API to set wmi_hdl + * + * Return: void + */ +static inline void target_psoc_set_wmi_hdl + (struct target_psoc_info *psoc_info, + struct common_wmi_handle *wmi_hdl) +{ + if (psoc_info == NULL) + return; + + psoc_info->hdls.wmi_hdl = wmi_hdl; +} + +/** + * target_psoc_get_wmi_hdl() - get wmi_hdl + * @psoc_info: pointer to structure target_psoc_info + * + * API to get wmi_hdl + * + * Return: wmi_hdl + */ +static inline struct common_wmi_handle *target_psoc_get_wmi_hdl + (struct target_psoc_info *psoc_info) +{ + if (psoc_info == NULL) + return NULL; + + return psoc_info->hdls.wmi_hdl; +} + +/** + * target_psoc_set_accelerator_hdl - set accelerator_hdl + * @psoc_info: pointer to structure target_psoc_info + * @accelerator_hdl: Accelator handle + * + * API to set accelerator_hdl + * + * Return: void + */ +static inline void target_psoc_set_accelerator_hdl + (struct target_psoc_info *psoc_info, + struct common_accelerator_handle *accelerator_hdl) +{ + if (psoc_info == NULL) + return; + + psoc_info->hdls.accelerator_hdl = accelerator_hdl; +} + +/** + * target_psoc_get_accelerator_hdl() - get accelerator_hdl + * @psoc_info: pointer to structure target_psoc_info + * + * API to get accelerator_hdl + * + * Return: accelerator_hdl + */ +static inline +struct common_accelerator_handle *target_psoc_get_accelerator_hdl + (struct target_psoc_info *psoc_info) +{ + if (psoc_info == NULL) + return NULL; + + return psoc_info->hdls.accelerator_hdl; +} + +/** + * target_psoc_set_feature_ptr - set feature_ptr + * @psoc_info: pointer to structure target_psoc_info + * @feature_ptr: set feature pointer + * + * API to set feature_ptr + * + * Return: void + */ +static inline void target_psoc_set_feature_ptr + (struct target_psoc_info *psoc_info, void *feature_ptr) +{ + if (psoc_info == NULL) + return; + + psoc_info->feature_ptr = feature_ptr; +} + +/** + * target_psoc_get_feature_ptr() - get feature_ptr + * @psoc_info: pointer to structure target_psoc_info + * + * API to get feature_ptr + * + * Return: feature_ptr + */ +static inline void *target_psoc_get_feature_ptr + (struct target_psoc_info *psoc_info) +{ + if (psoc_info == NULL) + return NULL; + + return psoc_info->feature_ptr; +} + +/** + * target_psoc_get_version()- get host_fw_ver version + * @psoc_info: pointer to structure target_psoc_info + * + * API to get host_fw_ver version + * + * Return: void + */ +static inline struct host_fw_ver *target_psoc_get_version + (struct target_psoc_info *psoc_info) +{ + return &psoc_info->info.version; +} + +/** + * target_psoc_get_target_ver()- get target version + * @psoc_info: pointer to structure target_psoc_info + * + * API to get target version + * + * Return: target version + */ +static inline uint32_t target_psoc_get_target_ver + (struct target_psoc_info *psoc_info) +{ + return psoc_info->info.version.target_ver; +} + +/** + * target_psoc_set_target_ver()- set target version + * @psoc_info: pointer to structure target_psoc_info + * @target_ver: Target version + * + * API to set target version + * + * Return: void + */ +static inline void target_psoc_set_target_ver + (struct target_psoc_info *psoc_info, uint32_t target_ver) +{ + if (psoc_info == NULL) + return; + + psoc_info->info.version.target_ver = target_ver; +} + +/** + * target_psoc_set_target_rev()- set target revision + * @psoc_info: pointer to structure target_psoc_info + * @target_rev: Target revision + * + * API to get target version + * + * Return: void + */ +static inline void target_psoc_set_target_rev + (struct target_psoc_info *psoc_info, uint32_t target_rev) +{ + if (psoc_info == NULL) + return; + + psoc_info->info.version.target_rev = target_rev; +} + +/** + * target_psoc_get_target_rev()- get target revision + * @psoc_info: pointer to structure target_psoc_info + * + * API to get target revision + * + * Return: target revision + */ +static inline uint32_t target_psoc_get_target_rev + (struct target_psoc_info *psoc_info) +{ + return psoc_info->info.version.target_rev; +} + +/** + * target_psoc_set_dbglog_hdl - set dbglog_hdl + * @psoc_info: pointer to structure target_psoc_info + * @dbglog_hdl: dbglog handle + * + * API to set dbglog_hdl + * + * Return: void + */ +static inline void target_psoc_set_dbglog_hdl + (struct target_psoc_info *psoc_info, + struct common_dbglog_handle *dbglog_hdl) +{ + if (psoc_info == NULL) + return; + + psoc_info->hdls.dbglog_hdl = dbglog_hdl; +} + +/** + * target_psoc_get_dbglog_hdl() - get dbglog_hdl + * @psoc_info: pointer to structure target_psoc_info + * + * API to get dbglog_hdl + * + * Return: dbglog_hdl + */ +static inline struct common_dbglog_handle *target_psoc_get_dbglog_hdl + (struct target_psoc_info *psoc_info) +{ + if (psoc_info == NULL) + return NULL; + + return psoc_info->hdls.dbglog_hdl; +} + +/** + * target_psoc_get_wlan_res_cfg() - get wlan_res_cfg + * @psoc_info: pointer to structure target_psoc_info + * + * API to get wlan_res_cfg + * + * Return: structure pointer to host_fw_ver + */ +static inline target_resource_config *target_psoc_get_wlan_res_cfg + (struct target_psoc_info *psoc_info) +{ + if (psoc_info == NULL) + return NULL; + + return &psoc_info->info.wlan_res_cfg; +} + +/** + * target_psoc_get_wlan_ext_res_cfg() - get wlan_ext_res_cfg + * @psoc_info: pointer to structure target_psoc_info + * + * API to get wlan_ext_res_cfg + * + * Return: structure pointer to wmi_host_ext_resource_config + */ +static inline wmi_host_ext_resource_config *target_psoc_get_wlan_ext_res_cfg + (struct target_psoc_info *psoc_info) +{ + if (psoc_info == NULL) + return NULL; + + return &psoc_info->info.wlan_ext_res_cfg; +} + +/** + * target_psoc_get_event_queue() - get event_queue + * @psoc_info: pointer to structure target_psoc_info + * + * API to get event_queue + * + * Return: structure pointer to qdf_wait_queue_head_t + */ +static inline qdf_event_t *target_psoc_get_event + (struct target_psoc_info *psoc_info) +{ + if (psoc_info == NULL) + return NULL; + + return &psoc_info->info.event; +} + +/** + * target_psoc_get_target_caps() - get target_caps + * @psoc_info: pointer to structure target_psoc_info + * + * API to get target_caps + * + * Return: structure pointer to wlan_psoc_target_capability_info + */ +static inline struct wlan_psoc_target_capability_info + *target_psoc_get_target_caps(struct target_psoc_info *psoc_info) +{ + if (psoc_info == NULL) + return NULL; + + return &psoc_info->info.target_caps; +} + +/** + * target_psoc_get_service_ext_param() - get service_ext_param + * @psoc_info: pointer to structure target_psoc_info + * + * API to get service_ext_param + * + * Return: structure pointer to wlan_psoc_host_service_ext_param + */ +static inline struct wlan_psoc_host_service_ext_param + *target_psoc_get_service_ext_param + (struct target_psoc_info *psoc_info) +{ + if (psoc_info == NULL) + return NULL; + + return &psoc_info->info.service_ext_param; +} + + +/** + * target_psoc_get_mac_phy_cap() - get mac_phy_cap + * @psoc_info: pointer to structure target_psoc_info + * + * API to get mac_phy_cap + * + * Return: structure pointer to wlan_psoc_host_mac_phy_caps + */ +static inline struct wlan_psoc_host_mac_phy_caps *target_psoc_get_mac_phy_cap + (struct target_psoc_info *psoc_info) +{ + if (psoc_info == NULL) + return NULL; + + return psoc_info->info.mac_phy_cap; +} + +/** + * target_psoc_get_dbr_ring_caps() - get dbr_ring_cap + * @psoc_info: pointer to structure target_psoc_info + * + * API to get dbr_ring_cap + * + * Return: structure pointer to wlan_psoc_host_dbr_ring_caps + */ +static inline struct wlan_psoc_host_dbr_ring_caps + *target_psoc_get_dbr_ring_caps(struct target_psoc_info *psoc_info) +{ + if (psoc_info == NULL) + return NULL; + + return psoc_info->info.dbr_ring_cap; +} +/** + * target_psoc_get_mem_chunks() - get mem_chunks + * @psoc_info: pointer to structure target_psoc_info + * + * API to get mem_chunks + * + * Return: structure pointer to wmi_host_mem_chunk + */ +static inline struct wmi_host_mem_chunk *target_psoc_get_mem_chunks + (struct target_psoc_info *psoc_info) +{ + if (psoc_info == NULL) + return NULL; + + return psoc_info->info.mem_chunks; +} + +/** + * target_psoc_get_tif_ops() - get tif_ops + * @psoc_info: pointer to structure target_psoc_info + * + * API to get tif_ops + * + * Return: structure pointer to target_ops + */ +static inline struct target_ops *target_psoc_get_tif_ops + (struct target_psoc_info *psoc_info) +{ + if (psoc_info == NULL) + return NULL; + + return psoc_info->tif_ops; +} + +/** + * target_pdev_set_feature_ptr - set feature_ptr + * @pdev_info: pointer to structure target_pdev_info + * @feature_ptr: Feature pointer + * + * API to set feature_ptr + * + * Return: void + */ +static inline void target_pdev_set_feature_ptr + (struct target_pdev_info *pdev_info, void *feature_ptr) +{ + if (pdev_info == NULL) + return; + + pdev_info->feature_ptr = feature_ptr; +} + +/** + * target_pdev_get_feature_ptr() - get feature_ptr + * @pdev_info: pointer to structure target_pdev_info + * + * API to get feature_ptr + * + * Return: feature_ptr + */ +static inline void *target_pdev_get_feature_ptr + (struct target_pdev_info *pdev_info) +{ + if (pdev_info == NULL) + return NULL; + + return pdev_info->feature_ptr; +} + +/** + * target_pdev_set_wmi_handle - set wmi_handle + * @pdev_info: pointer to structure target_pdev_info + * @wmi_handle: WMI handle + * + * API to set wmi_handle + * + * Return: void + */ +static inline void target_pdev_set_wmi_handle + (struct target_pdev_info *pdev_info, + struct common_wmi_handle *wmi_handle) +{ + if (pdev_info == NULL) + return; + + pdev_info->wmi_handle = wmi_handle; +} + +/** + * target_pdev_get_wmi_handle - get wmi_handle + * @pdev_info: pointer to structure target_dev_info + * + * API to get wmi_handle + * + * Return: wmi_handle + */ +static inline struct common_wmi_handle *target_pdev_get_wmi_handle + (struct target_pdev_info *pdev_info) +{ + if (pdev_info == NULL) + return NULL; + + return pdev_info->wmi_handle; +} + +/** + * target_pdev_set_accelerator_hdl - set accelerator_hdl + * @pdev_info: pointer to structure target_pdev_info + * @accelerator_hdl: Accelator handle + * + * API to set accelerator_hdl + * + * Return: void + */ +static inline void target_pdev_set_accelerator_hdl + (struct target_pdev_info *pdev_info, + struct common_accelerator_handle *accelerator_hdl) +{ + if (pdev_info == NULL) + return; + + pdev_info->accelerator_hdl = accelerator_hdl; +} + +/** + * target_pdev_get_accelerator_hdl - get accelerator_hdl + * @pdev_info: pointer to structure target_dev_info + * + * API to get accelerator_hdl + * + * Return: accelerator_hdl + */ +static inline struct common_accelerator_handle * +target_pdev_get_accelerator_hdl(struct target_pdev_info *pdev_info) +{ + if (pdev_info == NULL) + return NULL; + + return pdev_info->accelerator_hdl; +} + +/** + * target_pdev_set_pdev_idx - set pdev_idx + * @pdev_info: pointer to structure target_pdev_info + * @pdev_idx: PDEV id of FW + * + * API to set pdev_idx + * + * Return: void + */ +static inline void target_pdev_set_pdev_idx + (struct target_pdev_info *pdev_info, int32_t pdev_idx) +{ + if (pdev_info == NULL) + return; + + pdev_info->pdev_idx = pdev_idx; +} + +/** + * target_pdev_get_pdev_idx - get pdev_idx + * @pdev_info: pointer to structure target_dev_info + * + * API to get pdev_idx + * + * Return: int32_t + */ +static inline int32_t target_pdev_get_pdev_idx + (struct target_pdev_info *pdev_info) +{ + if (pdev_info == NULL) + return -EINVAL; + + return pdev_info->pdev_idx; +} + +/** + * target_pdev_set_phy_idx - set phy_idx + * @pdev_info: pointer to structure target_pdev_info + * @phy_idx: phy ID of FW + * + * API to set phy_idx + * + * Return: void + */ +static inline void target_pdev_set_phy_idx + (struct target_pdev_info *pdev_info, int32_t phy_idx) +{ + if (pdev_info == NULL) + return; + + pdev_info->phy_idx = phy_idx; +} + +/** + * target_pdev_get_phy_idx - get phy_idx + * @pdev_info: pointer to structure target_dev_info + * + * API to get phy_idx + * + * Return: int32_t + */ +static inline int32_t target_pdev_get_phy_idx + (struct target_pdev_info *pdev_info) +{ + if (pdev_info == NULL) + return -EINVAL; + + return pdev_info->phy_idx; +} + +/** + * GET_WMI_HDL_FROM_PSOC - get wmi handle from psoc + * @psoc: psoc object + * + * API to get wmi_handle from psoc + * + * Return: wmi_handle on success + * if tgt handle is not initialized, it returns NULL + */ +static inline struct common_wmi_handle *GET_WMI_HDL_FROM_PSOC( + struct wlan_objmgr_psoc *psoc) +{ + void *tgt_if_handle; + + if (psoc) { + tgt_if_handle = psoc->tgt_if_handle; + + if (tgt_if_handle) + return (target_psoc_get_wmi_hdl( + (struct target_psoc_info *)tgt_if_handle)); + else + return NULL; + } + + return NULL; +} + +/** + * GET_WMI_HDL_FROM_PDEV - get wmi handle from pdev + * @pdev: pdev object + * + * API to get wmi_handle from pdev + * + * Return: wmi_handle on success + * if tgt handle is not initialized, it returns NULL + */ +static inline struct common_wmi_handle *GET_WMI_HDL_FROM_PDEV( + struct wlan_objmgr_pdev *pdev) +{ + void *tgt_if_handle; + + if (pdev) { + tgt_if_handle = pdev->tgt_if_handle; + + if (tgt_if_handle) + return target_pdev_get_wmi_handle(tgt_if_handle); + else + return NULL; + } + + return NULL; +} + +/** + * get_wmi_unified_hdl_from_psoc - get wmi handle from psoc + * @psoc: psoc object + * + * API to get wmi_handle from psoc + * + * Return: wmi_handle on success + * if tgt handle is not initialized, it returns NULL + */ +static inline wmi_unified_t +get_wmi_unified_hdl_from_psoc(struct wlan_objmgr_psoc *psoc) +{ + return (wmi_unified_t)GET_WMI_HDL_FROM_PSOC(psoc); +} + +/** + * get_wmi_unified_hdl_from_pdev - get wmi handle from pdev + * @pdev: pdev object + * + * API to get wmi_handle from pdev + * + * Return: wmi_handle on success + * if tgt handle is not initialized, it returns NULL + */ +static inline wmi_unified_t +get_wmi_unified_hdl_from_pdev(struct wlan_objmgr_pdev *pdev) +{ + return (wmi_unified_t)GET_WMI_HDL_FROM_PDEV(pdev); +} + +/** + * target_if_ext_res_cfg_enable - Enable ext resource config + * @psoc: psoc object + * @tgt_hdl: target_psoc_info pointer + * @evt_buf: Event buffer received from FW + * + * API to enable Ext resource config + * + * Return: none + */ +static inline void target_if_ext_res_cfg_enable(struct wlan_objmgr_psoc *psoc, + struct target_psoc_info *tgt_hdl, uint8_t *evt_buf) +{ + if ((tgt_hdl->tif_ops) && + (tgt_hdl->tif_ops->ext_resource_config_enable)) + tgt_hdl->tif_ops->ext_resource_config_enable(psoc, + tgt_hdl, evt_buf); +} + +/** + * target_if_peer_cfg_enable - Enable peer config + * @psoc: psoc object + * @tgt_hdl: target_psoc_info pointer + * @evt_buf: Event buffer received from FW + * + * API to enable peer config + * + * Return: none + */ +static inline void target_if_peer_cfg_enable(struct wlan_objmgr_psoc *psoc, + struct target_psoc_info *tgt_hdl, uint8_t *evt_buf) +{ + if ((tgt_hdl->tif_ops) && + (tgt_hdl->tif_ops->peer_config)) + tgt_hdl->tif_ops->peer_config(psoc, tgt_hdl, evt_buf); +} + +/** + * target_if_mesh_support_enable - Enable MESH mode support + * @psoc: psoc object + * @tgt_hdl: target_psoc_info pointer + * @evt_buf: Event buffer received from FW + * + * API to enable Mesh mode + * + * Return: none + */ +static inline void target_if_mesh_support_enable(struct wlan_objmgr_psoc *psoc, + struct target_psoc_info *tgt_hdl, uint8_t *evt_buf) +{ + if ((tgt_hdl->tif_ops) && + (tgt_hdl->tif_ops->mesh_support_enable)) + tgt_hdl->tif_ops->mesh_support_enable(psoc, tgt_hdl, evt_buf); +} + +/** + * target_if_smart_antenna_enable - Enable Smart antenna module + * @psoc: psoc object + * @tgt_hdl: target_psoc_info pointer + * @evt_buf: Event buffer received from FW + * + * API to enable Smart antenna + * + * Return: none + */ +static inline void target_if_smart_antenna_enable(struct wlan_objmgr_psoc *psoc, + struct target_psoc_info *tgt_hdl, uint8_t *evt_buf) +{ + if ((tgt_hdl->tif_ops) && + (tgt_hdl->tif_ops->smart_antenna_enable)) + tgt_hdl->tif_ops->smart_antenna_enable(psoc, tgt_hdl, evt_buf); +} + +/** + * target_if_atf_cfg_enable - Enable ATF config + * @psoc: psoc object + * @tgt_hdl: target_psoc_info pointer + * @evt_buf: Event buffer received from FW + * + * API to enable ATF config + * + * Return: none + */ +static inline void target_if_atf_cfg_enable(struct wlan_objmgr_psoc *psoc, + struct target_psoc_info *tgt_hdl, uint8_t *evt_buf) +{ + if ((tgt_hdl->tif_ops) && + (tgt_hdl->tif_ops->atf_config_enable)) + tgt_hdl->tif_ops->atf_config_enable(psoc, tgt_hdl, evt_buf); +} + +/** + * target_if_qwrap_cfg_enable - Enable QWRAP config + * @psoc: psoc object + * @tgt_hdl: target_psoc_info pointer + * @evt_buf: Event buffer received from FW + * + * API to enable QWRAP config + * + * Return: none + */ +static inline void target_if_qwrap_cfg_enable(struct wlan_objmgr_psoc *psoc, + struct target_psoc_info *tgt_hdl, uint8_t *evt_buf) +{ + if ((tgt_hdl->tif_ops) && + (tgt_hdl->tif_ops->qwrap_config_enable)) + tgt_hdl->tif_ops->qwrap_config_enable(psoc, tgt_hdl, evt_buf); +} + +/** + * target_if_btcoex_cfg_enable - Enable BT coex config + * @psoc: psoc object + * @tgt_hdl: target_psoc_info pointer + * @evt_buf: Event buffer received from FW + * + * API to enable BT coex config + * + * Return: none + */ +static inline void target_if_btcoex_cfg_enable(struct wlan_objmgr_psoc *psoc, + struct target_psoc_info *tgt_hdl, uint8_t *evt_buf) +{ + if ((tgt_hdl->tif_ops) && + (tgt_hdl->tif_ops->btcoex_config_enable)) + tgt_hdl->tif_ops->btcoex_config_enable(psoc, tgt_hdl, evt_buf); +} + +/** + * target_if_lteu_cfg_enable - Enable LTEU config + * @psoc: psoc object + * @tgt_hdl: target_psoc_info pointer + * @evt_buf: Event buffer received from FW + * + * API to enable LTEU coex config + * + * Return: none + */ +static inline void target_if_lteu_cfg_enable(struct wlan_objmgr_psoc *psoc, + struct target_psoc_info *tgt_hdl, uint8_t *evt_buf) +{ + if ((tgt_hdl->tif_ops) && + (tgt_hdl->tif_ops->lteu_ext_support_enable)) + tgt_hdl->tif_ops->lteu_ext_support_enable(psoc, tgt_hdl, + evt_buf); +} + +/** + * target_if_set_init_cmd_dev_param - Set init command params + * @psoc: psoc object + * @tgt_hdl: target_psoc_info pointer + * + * API to set init command param based on config + * + * Return: none + */ +static inline void target_if_set_init_cmd_dev_param( + struct wlan_objmgr_psoc *psoc, struct target_psoc_info *tgt_hdl) +{ + if ((tgt_hdl->tif_ops) && + (tgt_hdl->tif_ops->set_init_cmd_dev_based_params)) { + tgt_hdl->tif_ops->set_init_cmd_dev_based_params(psoc, + tgt_hdl); + } +} + +/** + * target_if_alloc_pdevs - Allocate PDEVs + * @psoc: psoc object + * @tgt_hdl: target_psoc_info pointer + * + * API allocates PDEVs based on ext service ready param + * + * Return: SUCCESS on pdev allocation or PDEV allocation is not needed + * FAILURE, if allocation fails + */ +static inline QDF_STATUS target_if_alloc_pdevs(struct wlan_objmgr_psoc *psoc, + struct target_psoc_info *tgt_hdl) +{ + QDF_STATUS ret_val; + + if ((tgt_hdl->tif_ops) && + (tgt_hdl->tif_ops->alloc_pdevs)) + ret_val = tgt_hdl->tif_ops->alloc_pdevs(psoc, tgt_hdl); + else + ret_val = QDF_STATUS_SUCCESS; + + return ret_val; +} + +/** + * target_if_update_pdev_tgt_info - Update PDEVs info + * @psoc: psoc object + * @tgt_hdl: target_psoc_info pointer + * + * API updates PDEVs info based on config + * + * Return: SUCCESS on pdev updation or PDEV updation is not needed + * FAILURE, if updation fails + */ +static inline QDF_STATUS target_if_update_pdev_tgt_info( + struct wlan_objmgr_psoc *psoc, struct target_psoc_info *tgt_hdl) +{ + QDF_STATUS ret_val; + + if ((tgt_hdl->tif_ops) && + (tgt_hdl->tif_ops->update_pdev_tgt_info)) + ret_val = tgt_hdl->tif_ops->update_pdev_tgt_info(psoc, + tgt_hdl); + else + ret_val = QDF_STATUS_SUCCESS; + + return ret_val; +} + +/** + * target_if_print_service_ready_ext_param - Print Service ready ext param + * @psoc: psoc object + * @tgt_hdl: target_psoc_info pointer + * + * API to print service ready ext param + * + * Return: none + */ +static inline void target_if_print_service_ready_ext_param( + struct wlan_objmgr_psoc *psoc, struct target_psoc_info *tgt_hdl) +{ + if ((tgt_hdl->tif_ops) && + (tgt_hdl->tif_ops->print_svc_ready_ex_param)) { + tgt_hdl->tif_ops->print_svc_ready_ex_param(psoc, + tgt_hdl); + } +} + +/** + * target_if_add_11ax_modes - Add 11ax modes explicitly + * @psoc: psoc object + * @tgt_hdl: target_psoc_info pointer + * + * API to adds 11ax modes + * + * Return: none + */ +static inline void target_if_add_11ax_modes(struct wlan_objmgr_psoc *psoc, + struct target_psoc_info *tgt_hdl) +{ + if ((tgt_hdl->tif_ops) && + (tgt_hdl->tif_ops->add_11ax_modes)) { + tgt_hdl->tif_ops->add_11ax_modes(psoc, tgt_hdl); + } +} + +/** + * target_if_set_default_config - Set default config in init command + * @psoc: psoc object + * @tgt_hdl: target_psoc_info pointer + * + * API to set default config in init command + * + * Return: none + */ +static inline void target_if_set_default_config(struct wlan_objmgr_psoc *psoc, + struct target_psoc_info *tgt_hdl) +{ + if ((tgt_hdl->tif_ops) && + (tgt_hdl->tif_ops->set_default_tgt_config)) { + tgt_hdl->tif_ops->set_default_tgt_config(psoc, tgt_hdl); + } +} + +/** + * target_if_sw_version_check - SW version check + * @psoc: psoc object + * @tgt_hdl: target_psoc_info pointer + * @evt_buf: Event buffer received from FW + * + * API checks the SW version + * + * Return: SUCCESS on version matches or version check is not needed + * FAILURE, if check fails + */ +static inline QDF_STATUS target_if_sw_version_check( + struct wlan_objmgr_psoc *psoc, + struct target_psoc_info *tgt_hdl, uint8_t *evt_buf) +{ + QDF_STATUS ret_val; + + if ((tgt_hdl->tif_ops) && + (tgt_hdl->tif_ops->sw_version_check)) + ret_val = tgt_hdl->tif_ops->sw_version_check(psoc, tgt_hdl, + evt_buf); + else + ret_val = QDF_STATUS_SUCCESS; + + return ret_val; +} + +/** + * target_if_get_phy_capability - get phy capability + * @target_psoc_info: pointer to structure target_psoc_info + * + * API to get phy capability from the target caps + * + * Return: int32_t + */ +static inline int32_t target_if_get_phy_capability + (struct target_psoc_info *target_psoc_info) +{ + if (target_psoc_info == NULL) + return -EINVAL; + + return target_psoc_info->info.target_caps.phy_capability; +} + +/** + * target_if_set_phy_capability - set phy capability + * @target_psoc_info: pointer to structure target_psoc_info + * @phy_capab: PHY capabilities + * + * API to set phy capability in the target caps + * + * Return: None + */ +static inline void target_if_set_phy_capability + (struct target_psoc_info *target_psoc_info, int phy_capability) +{ + if (target_psoc_info == NULL) + return; + + target_psoc_info->info.target_caps.phy_capability = phy_capability; +} + +/** + * target_if_set_max_frag_entry - set Maximum frag entries + * @target_psoc_info: pointer to structure target_psoc_info + * @max_frag_entry: Maximum frag entries + * + * API to set Maximum frag entries from the target caps + * + * Return: None + */ +static inline void target_if_set_max_frag_entry + (struct target_psoc_info *target_psoc_info, int max_frag_entry) +{ + if (target_psoc_info == NULL) + return; + + target_psoc_info->info.target_caps.max_frag_entry = max_frag_entry; +} + +/** + * target_if_get_max_frag_entry - get Maximum frag entries + * @target_psoc_info: pointer to structure target_psoc_info + * + * API to get Maximum frag entries from the target caps + * + * Return: int32_t + */ +static inline int32_t target_if_get_max_frag_entry + (struct target_psoc_info *target_psoc_info) +{ + if (target_psoc_info == NULL) + return -EINVAL; + + return target_psoc_info->info.target_caps.max_frag_entry; +} + +/** + * target_if_get_ht_cap_info - get ht capabilities info + * @target_psoc_info: pointer to structure target_psoc_info + * + * API to get ht capabilities info from the target caps + * + * Return: int32_t + */ +static inline int32_t target_if_get_ht_cap_info + (struct target_psoc_info *target_psoc_info) +{ + if (target_psoc_info == NULL) + return -EINVAL; + + return target_psoc_info->info.target_caps.ht_cap_info; +} + +/** + * target_if_get_vht_cap_info - get vht capabilities info + * @target_psoc_info: pointer to structure target_psoc_info + * + * API to get vht capabilities info from the target caps + * + * Return: int32_t + */ +static inline int32_t target_if_get_vht_cap_info + (struct target_psoc_info *target_psoc_info) +{ + if (target_psoc_info == NULL) + return -EINVAL; + + return target_psoc_info->info.target_caps.vht_cap_info; +} + +/** + * target_if_get_num_rf_chains - get Number of RF chains supported + * @target_psoc_info: pointer to structure target_psoc_info + * + * API to get Number of RF chains supported from the target caps + * + * Return: int32_t + */ +static inline int32_t target_if_get_num_rf_chains + (struct target_psoc_info *target_psoc_info) +{ + if (target_psoc_info == NULL) + return -EINVAL; + + return target_psoc_info->info.target_caps.num_rf_chains; +} + +/** + * target_if_get_fw_version - get firmware version + * @target_psoc_info: pointer to structure target_psoc_info + * + * API to get firmware version from the target caps + * + * Return: int32_t + */ +static inline int32_t target_if_get_fw_version + (struct target_psoc_info *target_psoc_info) +{ + if (target_psoc_info == NULL) + return 0; + + return target_psoc_info->info.target_caps.fw_version; +} + +/** + * target_if_get_wmi_fw_sub_feat_caps - FW sub feature capabilities + * @target_psoc_info: pointer to structure target_psoc_info + * + * API to get FW sub feature capabilities from the target caps + * + * Return: int32_t + */ +static inline int32_t target_if_get_wmi_fw_sub_feat_caps + (struct target_psoc_info *target_psoc_info) +{ + if (target_psoc_info == NULL) + return -EINVAL; + + return target_psoc_info->info.target_caps.wmi_fw_sub_feat_caps; +} + +/** + * target_if_get_conc_scan_config_bits - Default concurrenct scan config + * @tgt_hdl: pointer to structure target_psoc_info + * + * API to get Default concurrenct scan config from the target caps + * + * Return: int32_t + */ +static inline int32_t target_if_get_conc_scan_config_bits + (struct target_psoc_info *tgt_hdl) +{ + if (tgt_hdl == NULL) + return -EINVAL; + + return tgt_hdl->info.service_ext_param.default_conc_scan_config_bits; +} + +/** + * target_if_get_fw_config_bits - Default HW config bits + * @tgt_hdl: pointer to structure target_psoc_info + * + * API to get Default HW config bits from the target caps + * + * Return: int32_t + */ +static inline int32_t target_if_get_fw_config_bits + (struct target_psoc_info *tgt_hdl) +{ + if (tgt_hdl == NULL) + return -EINVAL; + + return tgt_hdl->info.service_ext_param.default_fw_config_bits; +} + +/** + * target_psoc_get_num_hw_modes - get number of dbs hardware modes + * @tgt_hdl: pointer to structure target_psoc_info + * + * API to get Number of Dual Band Simultaneous (DBS) hardware modes + * + * Return: int32_t + */ +static inline int32_t target_psoc_get_num_hw_modes + (struct target_psoc_info *tgt_hdl) +{ + if (tgt_hdl == NULL) + return -EINVAL; + + return tgt_hdl->info.service_ext_param.num_hw_modes; +} +#endif + diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/core/src/target_if_main.c b/drivers/staging/qca-wifi-host-cmn/target_if/core/src/target_if_main.c new file mode 100644 index 0000000000000000000000000000000000000000..b17f664f96a56a6e95bf5de37470e4c7b348a5c0 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/target_if/core/src/target_if_main.c @@ -0,0 +1,577 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: API for interacting with target interface. + * + */ + +#include "target_if.h" +#include "target_type.h" +#ifdef WLAN_ATF_ENABLE +#include "target_if_atf.h" +#endif +#ifdef WLAN_SA_API_ENABLE +#include "target_if_sa_api.h" +#endif +#ifdef WLAN_CONV_SPECTRAL_ENABLE +#include "target_if_spectral.h" +#endif +#include +#include +#include +#ifdef DFS_COMPONENT_ENABLE +#include +#endif + +#ifdef CONVERGED_P2P_ENABLE +#include "target_if_p2p.h" +#endif + +#ifdef WIFI_POS_CONVERGED +#include "target_if_wifi_pos.h" +#endif + +#ifdef WLAN_FEATURE_NAN_CONVERGENCE +#include "target_if_nan.h" +#endif /* WLAN_FEATURE_NAN_CONVERGENCE */ +#ifdef CONVERGED_TDLS_ENABLE +#include "target_if_tdls.h" +#endif +#ifdef QCA_SUPPORT_SON +#include +#endif +#ifdef WLAN_OFFCHAN_TXRX_ENABLE +#include +#endif +#ifdef WLAN_SUPPORT_GREEN_AP +#include +#endif +#include +#include + +#ifdef DIRECT_BUF_RX_ENABLE +#include +#endif + +#ifdef WLAN_SUPPORT_FILS +#include +#endif +#include "qdf_module.h" + +#include + +static struct target_if_ctx *g_target_if_ctx; + +struct target_if_ctx *target_if_get_ctx() +{ + return g_target_if_ctx; +} + +struct wlan_objmgr_psoc *target_if_get_psoc_from_scn_hdl(void *scn_handle) +{ + struct wlan_objmgr_psoc *psoc; + + qdf_spin_lock_bh(&g_target_if_ctx->lock); + if (scn_handle && g_target_if_ctx->get_psoc_hdl_cb) + psoc = g_target_if_ctx->get_psoc_hdl_cb(scn_handle); + else + psoc = NULL; + qdf_spin_unlock_bh(&g_target_if_ctx->lock); + + return psoc; +} + +struct wlan_objmgr_pdev *target_if_get_pdev_from_scn_hdl(void *scn_handle) +{ + struct wlan_objmgr_pdev *pdev; + + qdf_spin_lock_bh(&g_target_if_ctx->lock); + if (scn_handle && g_target_if_ctx->get_pdev_hdl_cb) + pdev = g_target_if_ctx->get_pdev_hdl_cb(scn_handle); + else + pdev = NULL; + qdf_spin_unlock_bh(&g_target_if_ctx->lock); + + return pdev; +} + +#ifdef DIRECT_BUF_RX_ENABLE +static QDF_STATUS target_if_direct_buf_rx_init(void) +{ + return direct_buf_rx_init(); +} + +static QDF_STATUS target_if_direct_buf_rx_deinit(void) +{ + return direct_buf_rx_deinit(); +} +#else +static QDF_STATUS target_if_direct_buf_rx_init(void) +{ + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS target_if_direct_buf_rx_deinit(void) +{ + return QDF_STATUS_SUCCESS; +} +#endif /* DIRECT_BUF_RX_ENABLE */ + +QDF_STATUS target_if_open(get_psoc_handle_callback psoc_hdl_cb) +{ + g_target_if_ctx = qdf_mem_malloc(sizeof(*g_target_if_ctx)); + if (!g_target_if_ctx) { + target_if_err("Cannot allocate target if ctx"); + QDF_ASSERT(0); + return QDF_STATUS_E_NOMEM; + } + + qdf_spinlock_create(&g_target_if_ctx->lock); + + qdf_spin_lock_bh(&g_target_if_ctx->lock); + g_target_if_ctx->magic = TGT_MAGIC; + g_target_if_ctx->get_psoc_hdl_cb = psoc_hdl_cb; + qdf_spin_unlock_bh(&g_target_if_ctx->lock); + + target_if_direct_buf_rx_init(); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS target_if_close(void) +{ + if (!g_target_if_ctx) { + QDF_ASSERT(0); + target_if_err("target if ctx is null"); + return QDF_STATUS_E_INVAL; + } + + qdf_spin_lock_bh(&g_target_if_ctx->lock); + g_target_if_ctx->magic = 0; + g_target_if_ctx->get_psoc_hdl_cb = NULL; + g_target_if_ctx->get_pdev_hdl_cb = NULL; + g_target_if_ctx->service_ready_cb = NULL; + qdf_spin_unlock_bh(&g_target_if_ctx->lock); + + qdf_spinlock_destroy(&g_target_if_ctx->lock); + qdf_mem_free(g_target_if_ctx); + g_target_if_ctx = NULL; + + target_if_direct_buf_rx_deinit(); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(target_if_close); + +QDF_STATUS target_if_store_pdev_target_if_ctx( + get_pdev_handle_callback pdev_hdl_cb) +{ + if (!g_target_if_ctx) { + QDF_ASSERT(0); + target_if_err("target if ctx is null"); + return QDF_STATUS_E_INVAL; + } + + qdf_spin_lock_bh(&g_target_if_ctx->lock); + g_target_if_ctx->get_pdev_hdl_cb = pdev_hdl_cb; + qdf_spin_unlock_bh(&g_target_if_ctx->lock); + + return QDF_STATUS_SUCCESS; +} + +#ifndef WLAN_OFFCHAN_TXRX_ENABLE +static void target_if_offchan_txrx_ops_register( + struct wlan_lmac_if_tx_ops *tx_ops) +{ +} +#endif /* WLAN_OFFCHAN_TXRX_ENABLE */ + +#ifndef WLAN_ATF_ENABLE +static void target_if_atf_tx_ops_register(struct wlan_lmac_if_tx_ops *tx_ops) +{ +} +#endif /* WLAN_ATF_ENABLE */ + +#ifndef WLAN_SA_API_ENABLE +static void target_if_sa_api_tx_ops_register(struct wlan_lmac_if_tx_ops *tx_ops) +{ +} +#endif /* WLAN_SA_API_ENABLE */ + +#ifdef WLAN_SUPPORT_FILS +static void target_if_fd_tx_ops_register(struct wlan_lmac_if_tx_ops *tx_ops) +{ + target_if_fd_register_tx_ops(tx_ops); +} +#else +static void target_if_fd_tx_ops_register(struct wlan_lmac_if_tx_ops *tx_ops) +{ +} +#endif + +#ifdef WIFI_POS_CONVERGED +static void target_if_wifi_pos_tx_ops_register( + struct wlan_lmac_if_tx_ops *tx_ops) +{ + target_if_wifi_pos_register_tx_ops(tx_ops); +} +#else +static void target_if_wifi_pos_tx_ops_register( + struct wlan_lmac_if_tx_ops *tx_ops) +{ +} +#endif +#ifdef QCA_SUPPORT_SON +static void target_if_son_tx_ops_register( + struct wlan_lmac_if_tx_ops *tx_ops) +{ + target_if_son_register_tx_ops(tx_ops); + return; +} +#else +static void target_if_son_tx_ops_register( + struct wlan_lmac_if_tx_ops *tx_ops) +{ + return; +} +#endif + +#ifdef WLAN_FEATURE_NAN_CONVERGENCE +static void target_if_nan_tx_ops_register( + struct wlan_lmac_if_tx_ops *tx_ops) +{ + target_if_nan_register_tx_ops(tx_ops); +} +#else +static void target_if_nan_tx_ops_register( + struct wlan_lmac_if_tx_ops *tx_ops) +{ +} +#endif /* WLAN_FEATURE_NAN_CONVERGENCE */ + +#ifdef CONVERGED_TDLS_ENABLE +static void target_if_tdls_tx_ops_register(struct wlan_lmac_if_tx_ops *tx_ops) +{ + target_if_tdls_register_tx_ops(tx_ops); +} +#else +static void target_if_tdls_tx_ops_register(struct wlan_lmac_if_tx_ops *tx_ops) +{ +} +#endif /* CONVERGED_TDLS_ENABLE */ + +#ifdef DFS_COMPONENT_ENABLE +static void target_if_dfs_tx_ops_register( + struct wlan_lmac_if_tx_ops *tx_ops) +{ + target_if_register_dfs_tx_ops(tx_ops); +} +#else +static void target_if_dfs_tx_ops_register( + struct wlan_lmac_if_tx_ops *tx_ops) +{ +} +#endif /* DFS_COMPONENT_ENABLE */ + +#ifdef WLAN_CONV_SPECTRAL_ENABLE +static void target_if_sptrl_tx_ops_register( + struct wlan_lmac_if_tx_ops *tx_ops) +{ + target_if_sptrl_register_tx_ops(tx_ops); +} +#else +static void target_if_sptrl_tx_ops_register( + struct wlan_lmac_if_tx_ops *tx_ops) +{ +} +#endif /* WLAN_CONV_SPECTRAL_ENABLE */ + +#ifdef DIRECT_BUF_RX_ENABLE +static void target_if_direct_buf_rx_tx_ops_register( + struct wlan_lmac_if_tx_ops *tx_ops) +{ + target_if_direct_buf_rx_register_tx_ops(tx_ops); +} +#else +static void target_if_direct_buf_rx_tx_ops_register( + struct wlan_lmac_if_tx_ops *tx_ops) +{ +} +#endif /* DIRECT_BUF_RX_ENABLE */ + +#ifdef WLAN_SUPPORT_GREEN_AP +static QDF_STATUS target_if_green_ap_tx_ops_register( + struct wlan_lmac_if_tx_ops *tx_ops) +{ + return target_if_register_green_ap_tx_ops(tx_ops); +} +#else +static QDF_STATUS target_if_green_ap_tx_ops_register( + struct wlan_lmac_if_tx_ops *tx_ops) +{ + return QDF_STATUS_SUCCESS; +} +#endif /* WLAN_SUPPORT_GREEN_AP */ + +static void target_if_target_tx_ops_register( + struct wlan_lmac_if_tx_ops *tx_ops) +{ + struct wlan_lmac_if_target_tx_ops *target_tx_ops; + + if (!tx_ops) { + target_if_err("invalid tx_ops"); + return; + } + + target_tx_ops = &tx_ops->target_tx_ops; + + target_tx_ops->tgt_is_tgt_type_ar900b = + target_is_tgt_type_ar900b; + + target_tx_ops->tgt_is_tgt_type_ipq4019 = + target_is_tgt_type_ipq4019; + + target_tx_ops->tgt_is_tgt_type_qca9984 = + target_is_tgt_type_qca9984; + + target_tx_ops->tgt_is_tgt_type_qca9888 = + target_is_tgt_type_qca9888; + + target_tx_ops->tgt_get_tgt_type = + lmac_get_tgt_type; + + target_tx_ops->tgt_get_tgt_version = + lmac_get_tgt_version; + + target_tx_ops->tgt_get_tgt_revision = + lmac_get_tgt_revision; +} + +static QDF_STATUS +target_if_cp_stats_tx_ops_register(struct wlan_lmac_if_tx_ops *tx_ops) +{ + return target_if_cp_stats_register_tx_ops(tx_ops); +} + +static +void target_if_ftm_tx_ops_register(struct wlan_lmac_if_tx_ops *tx_ops) +{ + target_if_ftm_register_tx_ops(tx_ops); +} + +static +QDF_STATUS target_if_register_umac_tx_ops(struct wlan_lmac_if_tx_ops *tx_ops) +{ + /* call regulatory callback to register tx ops */ + target_if_register_regulatory_tx_ops(tx_ops); + + /* call umac callback to register legacy tx ops */ + wlan_lmac_if_umac_tx_ops_register(tx_ops); + + /* Register scan tx ops */ + target_if_scan_tx_ops_register(tx_ops); + + target_if_atf_tx_ops_register(tx_ops); + + target_if_sa_api_tx_ops_register(tx_ops); + + target_if_wifi_pos_tx_ops_register(tx_ops); + + target_if_nan_tx_ops_register(tx_ops); + + target_if_dfs_tx_ops_register(tx_ops); + + target_if_son_tx_ops_register(tx_ops); + + target_if_tdls_tx_ops_register(tx_ops); + + target_if_fd_tx_ops_register(tx_ops); + + target_if_target_tx_ops_register(tx_ops); + + target_if_offchan_txrx_ops_register(tx_ops); + + target_if_green_ap_tx_ops_register(tx_ops); + + target_if_ftm_tx_ops_register(tx_ops); + + target_if_cp_stats_tx_ops_register(tx_ops); + + /* Converged UMAC components to register their TX-ops here */ + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS target_if_register_tx_ops(struct wlan_lmac_if_tx_ops *tx_ops) +{ + /* Converged UMAC components to register their TX-ops */ + target_if_register_umac_tx_ops(tx_ops); + + /* Components parallel to UMAC to register their TX-ops here */ + target_if_sptrl_tx_ops_register(tx_ops); + + /* Register direct buffer rx component tx ops here */ + target_if_direct_buf_rx_tx_ops_register(tx_ops); + +#ifdef CONVERGED_P2P_ENABLE + /* Converged UMAC components to register P2P TX-ops */ + target_if_p2p_register_tx_ops(tx_ops); +#endif + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(target_if_register_tx_ops); + +wmi_legacy_service_ready_callback +target_if_get_psoc_legacy_service_ready_cb(void) +{ + wmi_legacy_service_ready_callback service_ready_cb; + + qdf_spin_lock_bh(&g_target_if_ctx->lock); + if (g_target_if_ctx->service_ready_cb) + service_ready_cb = g_target_if_ctx->service_ready_cb; + else + service_ready_cb = NULL; + qdf_spin_unlock_bh(&g_target_if_ctx->lock); + + return service_ready_cb; +} +qdf_export_symbol(target_if_get_psoc_legacy_service_ready_cb); + +QDF_STATUS target_if_register_legacy_service_ready_cb( + wmi_legacy_service_ready_callback service_ready_cb) +{ + qdf_spin_lock_bh(&g_target_if_ctx->lock); + g_target_if_ctx->service_ready_cb = service_ready_cb; + qdf_spin_unlock_bh(&g_target_if_ctx->lock); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(target_if_register_legacy_service_ready_cb); + +QDF_STATUS target_if_alloc_pdev_tgt_info(struct wlan_objmgr_pdev *pdev) +{ + struct target_pdev_info *tgt_pdev_info; + + if (!pdev) { + target_if_err("pdev is null"); + return QDF_STATUS_E_INVAL; + } + + tgt_pdev_info = qdf_mem_malloc(sizeof(*tgt_pdev_info)); + + if (tgt_pdev_info == NULL) { + target_if_err("Failed to allocate pdev target info"); + return QDF_STATUS_E_NOMEM; + } + + wlan_pdev_set_tgt_if_handle(pdev, tgt_pdev_info); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS target_if_free_pdev_tgt_info(struct wlan_objmgr_pdev *pdev) +{ + struct target_pdev_info *tgt_pdev_info; + + if (!pdev) { + target_if_err("pdev is null"); + return QDF_STATUS_E_INVAL; + } + + tgt_pdev_info = wlan_pdev_get_tgt_if_handle(pdev); + + wlan_pdev_set_tgt_if_handle(pdev, NULL); + + qdf_mem_free(tgt_pdev_info); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS target_if_alloc_psoc_tgt_info(struct wlan_objmgr_psoc *psoc) +{ + struct target_psoc_info *tgt_psoc_info; + + if (!psoc) { + target_if_err("psoc is null"); + return QDF_STATUS_E_INVAL; + } + + tgt_psoc_info = qdf_mem_malloc(sizeof(*tgt_psoc_info)); + + if (tgt_psoc_info == NULL) { + target_if_err("Failed to allocate psoc target info"); + return QDF_STATUS_E_NOMEM; + } + + wlan_psoc_set_tgt_if_handle(psoc, tgt_psoc_info); + target_psoc_set_preferred_hw_mode(tgt_psoc_info, WMI_HOST_HW_MODE_MAX); + + qdf_event_create(&tgt_psoc_info->info.event); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS target_if_free_psoc_tgt_info(struct wlan_objmgr_psoc *psoc) +{ + struct target_psoc_info *tgt_psoc_info; + struct wlan_psoc_host_service_ext_param *ext_param; + + if (!psoc) { + target_if_err("psoc is null"); + return QDF_STATUS_E_INVAL; + } + + tgt_psoc_info = wlan_psoc_get_tgt_if_handle(psoc); + + ext_param = target_psoc_get_service_ext_param(tgt_psoc_info); + if (!ext_param) { + target_if_err("tgt_psoc_info is NULL"); + return QDF_STATUS_E_INVAL; + } + init_deinit_chainmask_table_free(ext_param); + init_deinit_dbr_ring_cap_free(tgt_psoc_info); + + qdf_event_destroy(&tgt_psoc_info->info.event); + + wlan_psoc_set_tgt_if_handle(psoc, NULL); + + qdf_mem_free(tgt_psoc_info); + + return QDF_STATUS_SUCCESS; +} + +bool target_is_tgt_type_ar900b(uint32_t target_type) +{ + return target_type == TARGET_TYPE_AR900B; +} + +bool target_is_tgt_type_ipq4019(uint32_t target_type) +{ + return target_type == TARGET_TYPE_IPQ4019; +} + +bool target_is_tgt_type_qca9984(uint32_t target_type) +{ + return target_type == TARGET_TYPE_QCA9984; +} + +bool target_is_tgt_type_qca9888(uint32_t target_type) +{ + return target_type == TARGET_TYPE_QCA9888; +} diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/cp_stats/inc/target_if_cp_stats.h b/drivers/staging/qca-wifi-host-cmn/target_if/cp_stats/inc/target_if_cp_stats.h new file mode 100644 index 0000000000000000000000000000000000000000..58d940a6543908948869030bc339ac52936f2a7c --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/target_if/cp_stats/inc/target_if_cp_stats.h @@ -0,0 +1,75 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: target_if_cp_stats.h + * + * This header file provide declarations required for Rx and Tx events from + * firmware + */ + +#ifndef __TARGET_IF_CP_STATS_H__ +#define __TARGET_IF_CP_STATS_H__ + +#include +#include +#include + +#ifdef QCA_SUPPORT_CP_STATS + +/** + * target_if_cp_stats_get_rx_ops() - get rx ops + * @tx_ops: pointer to lmac tx ops + * + * Return: pointer to rx ops + */ +static inline struct wlan_lmac_if_cp_stats_rx_ops * +target_if_cp_stats_get_rx_ops(struct wlan_objmgr_psoc *psoc) +{ + return &psoc->soc_cb.rx_ops.cp_stats_rx_ops; +} + +/** + * target_if_cp_stats_get_tx_ops() - get tx ops + * @tx_ops: pointer to lmac tx ops + * + * Return: pointer to tx ops + */ +static inline struct wlan_lmac_if_cp_stats_tx_ops * +target_if_cp_stats_get_tx_ops(struct wlan_objmgr_psoc *psoc) +{ + return &psoc->soc_cb.tx_ops.cp_stats_tx_ops; +} + +/** + * target_if_cp_stats_register_tx_ops() - define cp_stats lmac tx ops functions + * @tx_ops: pointer to lmac tx ops + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +QDF_STATUS +target_if_cp_stats_register_tx_ops(struct wlan_lmac_if_tx_ops *tx_ops); +#else +static inline QDF_STATUS +target_if_cp_stats_register_tx_ops(struct wlan_lmac_if_tx_ops *tx_ops) +{ + return QDF_STATUS_SUCCESS; +} +#endif /* QCA_SUPPORT_CP_STATS */ + +#endif /* __TARGET_IF_CP_STATS_H__ */ diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/cp_stats/src/target_if_cp_stats.c b/drivers/staging/qca-wifi-host-cmn/target_if/cp_stats/src/target_if_cp_stats.c new file mode 100644 index 0000000000000000000000000000000000000000..e0e99fedf8b7ef520b091001a3db49c3f46b1756 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/target_if/cp_stats/src/target_if_cp_stats.c @@ -0,0 +1,81 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: target_if_cp_stats.c + * + * This file provide definition for APIs registered through lmac Tx Ops + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static QDF_STATUS +target_if_cp_stats_register_event_handler(struct wlan_objmgr_psoc *psoc) +{ + if (!psoc) { + cp_stats_err("PSOC is NULL!"); + return QDF_STATUS_E_INVAL; + } + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS +target_if_cp_stats_unregister_event_handler(struct wlan_objmgr_psoc *psoc) +{ + if (!psoc) { + cp_stats_err("PSOC is NULL!"); + return QDF_STATUS_E_INVAL; + } + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS +target_if_cp_stats_register_tx_ops(struct wlan_lmac_if_tx_ops *tx_ops) +{ + struct wlan_lmac_if_cp_stats_tx_ops *cp_stats_tx_ops; + + if (!tx_ops) { + cp_stats_err("lmac tx ops is NULL!"); + return QDF_STATUS_E_INVAL; + } + + cp_stats_tx_ops = &tx_ops->cp_stats_tx_ops; + if (!cp_stats_tx_ops) { + cp_stats_err("lmac tx ops is NULL!"); + return QDF_STATUS_E_FAILURE; + } + + cp_stats_tx_ops->cp_stats_attach = + target_if_cp_stats_register_event_handler; + cp_stats_tx_ops->cp_stats_detach = + target_if_cp_stats_unregister_event_handler; + + return QDF_STATUS_SUCCESS; +} + diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/cp_stats/src/target_if_mc_cp_stats.c b/drivers/staging/qca-wifi-host-cmn/target_if/cp_stats/src/target_if_mc_cp_stats.c new file mode 100644 index 0000000000000000000000000000000000000000..ee72192752d034297d36d9024a8d2fc1a77ff0f9 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/target_if/cp_stats/src/target_if_mc_cp_stats.c @@ -0,0 +1,633 @@ +/* + * Copyright (c) 2013-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: target_if_cp_stats.c + * + * This file provide definition for APIs registered through lmac Tx Ops + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define TGT_INVALID_SNR (0) +#define TGT_NOISE_FLOOR_DBM (-96) +#define TGT_MAX_SNR (TGT_NOISE_FLOOR_DBM * (-1)) +#define TGT_IS_VALID_SNR(x) ((x) >= 0 && (x) < TGT_MAX_SNR) + +static void target_if_cp_stats_free_stats_event(struct stats_event *ev) +{ + qdf_mem_free(ev->pdev_stats); + ev->pdev_stats = NULL; + qdf_mem_free(ev->peer_stats); + ev->peer_stats = NULL; + qdf_mem_free(ev->peer_adv_stats); + ev->peer_adv_stats = NULL; + qdf_mem_free(ev->cca_stats); + ev->cca_stats = NULL; + qdf_mem_free(ev->vdev_summary_stats); + ev->vdev_summary_stats = NULL; + qdf_mem_free(ev->vdev_chain_rssi); + ev->vdev_chain_rssi = NULL; +} + +static QDF_STATUS target_if_cp_stats_extract_pdev_stats( + struct wmi_unified *wmi_hdl, + wmi_host_stats_event *stats_param, + struct stats_event *ev, + uint8_t *data) +{ + uint32_t i; + QDF_STATUS status; + wmi_host_pdev_stats pdev_stats; + + ev->num_pdev_stats = stats_param->num_pdev_stats; + if (!ev->num_pdev_stats) + return QDF_STATUS_SUCCESS; + + /* + * num_pdev_stats is validated within function wmi_extract_stats_param + * which is called to populated wmi_host_stats_event stats_param + */ + ev->pdev_stats = qdf_mem_malloc(sizeof(*ev->pdev_stats) * + ev->num_pdev_stats); + if (!ev->pdev_stats) { + cp_stats_err("malloc failed"); + return QDF_STATUS_E_NOMEM; + } + + for (i = 0; i < ev->num_pdev_stats; i++) { + status = wmi_extract_pdev_stats(wmi_hdl, data, i, &pdev_stats); + if (QDF_IS_STATUS_ERROR(status)) { + cp_stats_err("wmi_extract_pdev_stats failed"); + return status; + } + ev->pdev_stats[i].max_pwr = pdev_stats.chan_tx_pwr; + } + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS target_if_cp_stats_extract_peer_stats( + struct wmi_unified *wmi_hdl, + wmi_host_stats_event *stats_param, + struct stats_event *ev, + uint8_t *data) +{ + uint32_t i; + QDF_STATUS status; + wmi_host_peer_stats peer_stats; + struct wmi_host_peer_adv_stats *peer_adv_stats; + + /* Extract peer_stats */ + ev->num_peer_stats = stats_param->num_peer_stats; + if (!ev->num_peer_stats) + return QDF_STATUS_SUCCESS; + + ev->peer_stats = qdf_mem_malloc(sizeof(*ev->peer_stats) * + ev->num_peer_stats); + if (!ev->peer_stats) { + cp_stats_err("malloc failed"); + return QDF_STATUS_E_NOMEM; + } + + for (i = 0; i < ev->num_peer_stats; i++) { + status = wmi_extract_peer_stats(wmi_hdl, data, i, &peer_stats); + if (QDF_IS_STATUS_ERROR(status)) { + cp_stats_err("wmi_extract_peer_stats failed"); + continue; + } + WMI_MAC_ADDR_TO_CHAR_ARRAY(&peer_stats.peer_macaddr, + ev->peer_stats[i].peer_macaddr); + ev->peer_stats[i].tx_rate = peer_stats.peer_tx_rate; + ev->peer_stats[i].rx_rate = peer_stats.peer_rx_rate; + ev->peer_stats[i].peer_rssi = peer_stats.peer_rssi + + TGT_NOISE_FLOOR_DBM; + } + + /* Extract peer_adv_stats */ + ev->num_peer_adv_stats = stats_param->num_peer_adv_stats; + if (!ev->num_peer_adv_stats) + return QDF_STATUS_SUCCESS; + + ev->peer_adv_stats = qdf_mem_malloc(sizeof(*ev->peer_adv_stats) * + ev->num_peer_adv_stats); + if (!ev->peer_adv_stats) + return QDF_STATUS_E_NOMEM; + + peer_adv_stats = qdf_mem_malloc(sizeof(*peer_adv_stats) * + ev->num_peer_adv_stats); + if (!peer_adv_stats) { + qdf_mem_free(ev->peer_adv_stats); + return QDF_STATUS_E_NOMEM; + } + + status = wmi_extract_peer_adv_stats(wmi_hdl, data, peer_adv_stats); + if (QDF_IS_STATUS_ERROR(status)) { + cp_stats_err("wmi_extract_peer_stats failed"); + qdf_mem_free(peer_adv_stats); + qdf_mem_free(ev->peer_adv_stats); + ev->peer_adv_stats = NULL; + return QDF_STATUS_SUCCESS; + } + + for (i = 0; i < ev->num_peer_adv_stats; i++) { + qdf_mem_copy(&ev->peer_adv_stats[i].peer_macaddr, + &peer_adv_stats[i].peer_macaddr, + WLAN_MACADDR_LEN); + ev->peer_adv_stats[i].fcs_count = peer_adv_stats[i].fcs_count; + ev->peer_adv_stats[i].rx_bytes = peer_adv_stats[i].rx_bytes; + ev->peer_adv_stats[i].rx_count = peer_adv_stats[i].rx_count; + } + qdf_mem_free(peer_adv_stats); + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS target_if_cp_stats_extract_cca_stats( + struct wmi_unified *wmi_hdl, + wmi_host_stats_event *stats_param, + struct stats_event *ev, uint8_t *data) +{ + QDF_STATUS status; + struct wmi_host_congestion_stats stats = {0}; + + status = wmi_extract_cca_stats(wmi_hdl, data, &stats); + if (QDF_IS_STATUS_ERROR(status)) { + cp_stats_debug("no congestion stats"); + return QDF_STATUS_SUCCESS; + } + + ev->cca_stats = qdf_mem_malloc(sizeof(*ev->cca_stats)); + if (!ev->cca_stats) { + cp_stats_err("malloc failed"); + return QDF_STATUS_E_NOMEM; + } + + + ev->cca_stats->vdev_id = stats.vdev_id; + ev->cca_stats->congestion = stats.congestion; + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS target_if_cp_stats_extract_vdev_summary_stats( + struct wmi_unified *wmi_hdl, + wmi_host_stats_event *stats_param, + struct stats_event *ev, uint8_t *data) +{ + uint32_t i, j; + QDF_STATUS status; + int32_t bcn_snr, dat_snr; + wmi_host_vdev_stats vdev_stats; + + ev->num_summary_stats = stats_param->num_vdev_stats; + if (!ev->num_summary_stats) + return QDF_STATUS_SUCCESS; + + ev->vdev_summary_stats = qdf_mem_malloc(sizeof(*ev->vdev_summary_stats) + * ev->num_summary_stats); + + if (!ev->vdev_summary_stats) { + cp_stats_err("malloc failed"); + return QDF_STATUS_E_NOMEM; + } + + for (i = 0; i < ev->num_summary_stats; i++) { + status = wmi_extract_vdev_stats(wmi_hdl, data, i, &vdev_stats); + if (QDF_IS_STATUS_ERROR(status)) + continue; + + bcn_snr = vdev_stats.vdev_snr.bcn_snr; + dat_snr = vdev_stats.vdev_snr.dat_snr; + ev->vdev_summary_stats[i].vdev_id = vdev_stats.vdev_id; + + for (j = 0; j < 4; j++) { + ev->vdev_summary_stats[i].stats.tx_frm_cnt[j] + = vdev_stats.tx_frm_cnt[j]; + ev->vdev_summary_stats[i].stats.fail_cnt[j] + = vdev_stats.fail_cnt[j]; + ev->vdev_summary_stats[i].stats.multiple_retry_cnt[j] + = vdev_stats.multiple_retry_cnt[j]; + } + + ev->vdev_summary_stats[i].stats.rx_frm_cnt = + vdev_stats.rx_frm_cnt; + ev->vdev_summary_stats[i].stats.rx_error_cnt = + vdev_stats.rx_err_cnt; + ev->vdev_summary_stats[i].stats.rx_discard_cnt = + vdev_stats.rx_discard_cnt; + ev->vdev_summary_stats[i].stats.ack_fail_cnt = + vdev_stats.ack_fail_cnt; + ev->vdev_summary_stats[i].stats.rts_succ_cnt = + vdev_stats.rts_succ_cnt; + ev->vdev_summary_stats[i].stats.rts_fail_cnt = + vdev_stats.rts_fail_cnt; + /* Update SNR and RSSI in SummaryStats */ + if (TGT_IS_VALID_SNR(bcn_snr)) { + ev->vdev_summary_stats[i].stats.snr = bcn_snr; + ev->vdev_summary_stats[i].stats.rssi = + bcn_snr + TGT_NOISE_FLOOR_DBM; + } else if (TGT_IS_VALID_SNR(dat_snr)) { + ev->vdev_summary_stats[i].stats.snr = dat_snr; + ev->vdev_summary_stats[i].stats.rssi = + dat_snr + TGT_NOISE_FLOOR_DBM; + } else { + ev->vdev_summary_stats[i].stats.snr = TGT_INVALID_SNR; + ev->vdev_summary_stats[i].stats.rssi = 0; + } + } + + return QDF_STATUS_SUCCESS; +} + + +static QDF_STATUS target_if_cp_stats_extract_vdev_chain_rssi_stats( + struct wmi_unified *wmi_hdl, + wmi_host_stats_event *stats_param, + struct stats_event *ev, uint8_t *data) +{ + uint32_t i, j; + QDF_STATUS status; + int32_t bcn_snr, dat_snr; + struct wmi_host_per_chain_rssi_stats rssi_stats; + + ev->num_chain_rssi_stats = stats_param->num_rssi_stats; + if (!ev->num_chain_rssi_stats) + return QDF_STATUS_SUCCESS; + + ev->vdev_chain_rssi = qdf_mem_malloc(sizeof(*ev->vdev_chain_rssi) * + ev->num_chain_rssi_stats); + if (!ev->vdev_chain_rssi) { + cp_stats_err("malloc failed"); + return QDF_STATUS_E_NOMEM; + } + + for (i = 0; i < ev->num_chain_rssi_stats; i++) { + status = wmi_extract_per_chain_rssi_stats(wmi_hdl, data, i, + &rssi_stats); + if (QDF_IS_STATUS_ERROR(status)) + continue; + + for (j = 0; j < MAX_NUM_CHAINS; j++) { + dat_snr = rssi_stats.rssi_avg_data[j]; + bcn_snr = rssi_stats.rssi_avg_beacon[j]; + cp_stats_err("Chain %d SNR bcn: %d data: %d", j, + bcn_snr, dat_snr); + if (TGT_IS_VALID_SNR(bcn_snr)) + ev->vdev_chain_rssi[i].chain_rssi[j] = bcn_snr; + else if (TGT_IS_VALID_SNR(dat_snr)) + ev->vdev_chain_rssi[i].chain_rssi[j] = dat_snr; + else + /* + * Firmware sends invalid snr till it sees + * Beacon/Data after connection since after + * vdev up fw resets the snr to invalid. In this + * duartion Host will return an invalid rssi + * value. + */ + ev->vdev_chain_rssi[i].chain_rssi[j] = + TGT_INVALID_SNR; + /* + * Get the absolute rssi value from the current rssi + * value the snr value is hardcoded into 0 in the + * qcacld-new/CORE stack + */ + ev->vdev_chain_rssi[i].chain_rssi[j] += + TGT_NOISE_FLOOR_DBM; + } + } + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS target_if_cp_stats_extract_event(struct wmi_unified *wmi_hdl, + struct stats_event *ev, + uint8_t *data) +{ + QDF_STATUS status; + wmi_host_stats_event stats_param = {0}; + + status = wmi_extract_stats_param(wmi_hdl, data, &stats_param); + if (QDF_IS_STATUS_ERROR(status)) { + cp_stats_err("stats param extract failed: %d", status); + return status; + } + cp_stats_debug("num: pdev: %d, vdev: %d, peer: %d, rssi: %d", + stats_param.num_pdev_stats, stats_param.num_vdev_stats, + stats_param.num_peer_stats, stats_param.num_rssi_stats); + + ev->last_event = stats_param.last_event; + status = target_if_cp_stats_extract_pdev_stats(wmi_hdl, &stats_param, + ev, data); + if (QDF_IS_STATUS_ERROR(status)) + return status; + + status = target_if_cp_stats_extract_peer_stats(wmi_hdl, &stats_param, + ev, data); + if (QDF_IS_STATUS_ERROR(status)) + return status; + + status = target_if_cp_stats_extract_cca_stats(wmi_hdl, &stats_param, + ev, data); + if (QDF_IS_STATUS_ERROR(status)) + return status; + + status = target_if_cp_stats_extract_vdev_summary_stats(wmi_hdl, + &stats_param, + ev, data); + if (QDF_IS_STATUS_ERROR(status)) + return status; + + status = target_if_cp_stats_extract_vdev_chain_rssi_stats(wmi_hdl, + &stats_param, + ev, data); + if (QDF_IS_STATUS_ERROR(status)) + return status; + + return QDF_STATUS_SUCCESS; +} + +/** + * target_if_mc_cp_stats_stats_event_handler() - function to handle stats event + * from firmware. + * @scn: scn handle + * @data: data buffer for event + * @datalen: data length + * + * Return: status of operation. + */ +static int target_if_mc_cp_stats_stats_event_handler(ol_scn_t scn, + uint8_t *data, + uint32_t datalen) +{ + QDF_STATUS status; + struct stats_event ev = {0}; + struct wlan_objmgr_psoc *psoc; + struct wmi_unified *wmi_handle; + struct wlan_lmac_if_cp_stats_rx_ops *rx_ops; + + if (!scn || !data) { + cp_stats_err("scn: 0x%pK, data: 0x%pK", scn, data); + return -EINVAL; + } + psoc = target_if_get_psoc_from_scn_hdl(scn); + if (!psoc) { + cp_stats_err("null psoc"); + return -EINVAL; + } + + rx_ops = target_if_cp_stats_get_rx_ops(psoc); + if (!rx_ops || !rx_ops->process_stats_event) { + cp_stats_err("callback not registered"); + return -EINVAL; + } + + wmi_handle = get_wmi_unified_hdl_from_psoc(psoc); + if (!wmi_handle) { + cp_stats_err("wmi_handle is null"); + return -EINVAL; + } + + status = target_if_cp_stats_extract_event(wmi_handle, &ev, data); + if (QDF_IS_STATUS_ERROR(status)) { + cp_stats_err("extract event failed"); + goto end; + } + + status = rx_ops->process_stats_event(psoc, &ev); + +end: + target_if_cp_stats_free_stats_event(&ev); + + return qdf_status_to_os_return(status); +} + +static void target_if_cp_stats_inc_wake_lock_stats(uint32_t reason, + struct wake_lock_stats *stats, + uint32_t *unspecified_wake_count) +{ + switch (reason) { + case WOW_REASON_UNSPECIFIED: + (*unspecified_wake_count)++; + break; + + case WOW_REASON_ASSOC_REQ_RECV: + stats->mgmt_assoc++; + break; + + case WOW_REASON_DISASSOC_RECVD: + stats->mgmt_disassoc++; + break; + + case WOW_REASON_ASSOC_RES_RECV: + stats->mgmt_assoc_resp++; + break; + + case WOW_REASON_REASSOC_REQ_RECV: + stats->mgmt_reassoc++; + break; + + case WOW_REASON_REASSOC_RES_RECV: + stats->mgmt_reassoc_resp++; + break; + + case WOW_REASON_AUTH_REQ_RECV: + stats->mgmt_auth++; + break; + + case WOW_REASON_DEAUTH_RECVD: + stats->mgmt_deauth++; + break; + + case WOW_REASON_ACTION_FRAME_RECV: + stats->mgmt_action++; + break; + + case WOW_REASON_NLOD: + stats->pno_match_wake_up_count++; + break; + + case WOW_REASON_NLO_SCAN_COMPLETE: + stats->pno_complete_wake_up_count++; + break; + + case WOW_REASON_LOW_RSSI: + stats->low_rssi_wake_up_count++; + break; + + case WOW_REASON_EXTSCAN: + stats->gscan_wake_up_count++; + break; + + case WOW_REASON_RSSI_BREACH_EVENT: + stats->rssi_breach_wake_up_count++; + break; + + case WOW_REASON_OEM_RESPONSE_EVENT: + stats->oem_response_wake_up_count++; + break; + + case WOW_REASON_11D_SCAN: + stats->scan_11d++; + break; + + case WOW_REASON_CHIP_POWER_FAILURE_DETECT: + stats->pwr_save_fail_detected++; + break; + + default: + break; + } +} + +static QDF_STATUS +target_if_cp_stats_register_event_handler(struct wlan_objmgr_psoc *psoc) +{ + int ret_val; + struct wmi_unified *wmi_handle; + + if (!psoc) { + cp_stats_err("PSOC is NULL!"); + return QDF_STATUS_E_NULL_VALUE; + } + + wmi_handle = get_wmi_unified_hdl_from_psoc(psoc); + if (!wmi_handle) { + cp_stats_err("wmi_handle is null"); + return QDF_STATUS_E_INVAL; + } + + ret_val = wmi_unified_register_event_handler( + wmi_handle, + wmi_update_stats_event_id, + target_if_mc_cp_stats_stats_event_handler, + WMI_RX_WORK_CTX); + if (ret_val) + cp_stats_err("Failed to register stats event cb"); + + return qdf_status_from_os_return(ret_val); +} + +static QDF_STATUS +target_if_cp_stats_unregister_event_handler(struct wlan_objmgr_psoc *psoc) +{ + struct wmi_unified *wmi_handle; + + if (!psoc) { + cp_stats_err("PSOC is NULL!"); + return QDF_STATUS_E_INVAL; + } + + wmi_handle = get_wmi_unified_hdl_from_psoc(psoc); + if (!wmi_handle) { + cp_stats_err("wmi_handle is null"); + return QDF_STATUS_E_INVAL; + } + wmi_unified_unregister_event_handler(wmi_handle, + wmi_update_stats_event_id); + + return QDF_STATUS_SUCCESS; +} + +static uint32_t get_stats_id(enum stats_req_type type) +{ + switch (type) { + default: + break; + case TYPE_CONNECTION_TX_POWER: + return WMI_REQUEST_PDEV_STAT; + case TYPE_PEER_STATS: + return WMI_REQUEST_PEER_STAT; + case TYPE_STATION_STATS: + return (WMI_REQUEST_AP_STAT | + WMI_REQUEST_PEER_STAT | + WMI_REQUEST_VDEV_STAT | + WMI_REQUEST_PDEV_STAT | + WMI_REQUEST_PEER_EXTD2_STAT | + WMI_REQUEST_RSSI_PER_CHAIN_STAT); + } + + return 0; +} + +/** + * target_if_cp_stats_send_stats_req() - API to send stats request to wmi + * @psoc: pointer to psoc object + * @req: pointer to object containing stats request parameters + * + * Return: status of operation. + */ +static QDF_STATUS target_if_cp_stats_send_stats_req( + struct wlan_objmgr_psoc *psoc, + enum stats_req_type type, + struct request_info *req) + +{ + struct wmi_unified *wmi_handle; + struct stats_request_params param = {0}; + + wmi_handle = get_wmi_unified_hdl_from_psoc(psoc); + if (!wmi_handle) { + cp_stats_err("wmi_handle is null."); + return QDF_STATUS_E_NULL_VALUE; + } + /* refer (WMI_REQUEST_STATS_CMDID) */ + param.stats_id = get_stats_id(type); + param.vdev_id = req->vdev_id; + param.pdev_id = req->pdev_id; + + return wmi_unified_stats_request_send(wmi_handle, req->peer_mac_addr, + ¶m); +} + +QDF_STATUS +target_if_cp_stats_register_tx_ops(struct wlan_lmac_if_tx_ops *tx_ops) +{ + struct wlan_lmac_if_cp_stats_tx_ops *cp_stats_tx_ops; + + if (!tx_ops) { + cp_stats_err("lmac tx ops is NULL!"); + return QDF_STATUS_E_INVAL; + } + + cp_stats_tx_ops = &tx_ops->cp_stats_tx_ops; + if (!cp_stats_tx_ops) { + cp_stats_err("lmac tx ops is NULL!"); + return QDF_STATUS_E_FAILURE; + } + + cp_stats_tx_ops->cp_stats_attach = + target_if_cp_stats_register_event_handler; + cp_stats_tx_ops->cp_stats_detach = + target_if_cp_stats_unregister_event_handler; + cp_stats_tx_ops->inc_wake_lock_stats = + target_if_cp_stats_inc_wake_lock_stats; + cp_stats_tx_ops->send_req_stats = target_if_cp_stats_send_stats_req; + + return QDF_STATUS_SUCCESS; +} + diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/dfs/inc/target_if_dfs.h b/drivers/staging/qca-wifi-host-cmn/target_if/dfs/inc/target_if_dfs.h new file mode 100644 index 0000000000000000000000000000000000000000..2e7b95a8434019e10acee368fef95966cd6e78db --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/target_if/dfs/inc/target_if_dfs.h @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + + /** + * DOC: target_if_dfs.h + * This file contains dfs target interface + */ + +/** + * target_if_register_dfs_tx_ops() - register dfs tx ops + * @dfs_tx_ops: tx ops pointer + * + * Register dfs tx ops + * + * Return: QDF_STATUS + */ +QDF_STATUS target_if_register_dfs_tx_ops(struct wlan_lmac_if_tx_ops *tx_ops); + +/** + * target_if_dfs_get_rx_ops() - Get dfs_rx_ops + * @psoc: psoc handle. + * + * Return: dfs_rx_ops. + */ +static inline struct wlan_lmac_if_dfs_rx_ops * +target_if_dfs_get_rx_ops(struct wlan_objmgr_psoc *psoc) +{ + return &psoc->soc_cb.rx_ops.dfs_rx_ops; +} + diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/dfs/inc/target_if_dfs_full_offload.h b/drivers/staging/qca-wifi-host-cmn/target_if/dfs/inc/target_if_dfs_full_offload.h new file mode 100644 index 0000000000000000000000000000000000000000..bc1dc5808f749fcf5ff72c7beb493f8b9eb4850f --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/target_if/dfs/inc/target_if_dfs_full_offload.h @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + + /** + * DOC: target_if_dfs_full_offload.h + * This file contains dfs target interface for full-offload. + */ + +#ifndef _TARGET_IF_DFS_FULL_OFFLOAD_H_ +#define _TARGET_IF_DFS_FULL_OFFLOAD_H_ + +/** + * target_if_dfs_reg_offload_events() - registers dfs events for full offload. + * @psoc: Pointer to psoc object. + * + * Return: QDF_STATUS + */ +#if defined(WLAN_DFS_FULL_OFFLOAD) +QDF_STATUS target_if_dfs_reg_offload_events(struct wlan_objmgr_psoc *psoc); +#else +static QDF_STATUS +target_if_dfs_reg_offload_events(struct wlan_objmgr_psoc *psoc) +{ + return QDF_STATUS_SUCCESS; +} +#endif + +/** + * target_process_bang_radar_cmd() - fill unit test args and send bangradar + * command to firmware. + * @pdev: Pointer to DFS pdev object. + * @dfs_unit_test: Pointer to dfs_unit_test structure. + * + * Return: QDF_STATUS + */ +#if defined(WLAN_DFS_FULL_OFFLOAD) +QDF_STATUS target_process_bang_radar_cmd(struct wlan_objmgr_pdev *pdev, + struct dfs_emulate_bang_radar_test_cmd *dfs_unit_test); +#else +static QDF_STATUS target_process_bang_radar_cmd(struct wlan_objmgr_pdev *pdev, + struct dfs_emulate_bang_radar_test_cmd *dfs_unit_test) +{ + return QDF_STATUS_SUCCESS; +} +#endif + +#endif /* _TARGET_IF_DFS_FULL_OFFLOAD_H_ */ + diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/dfs/inc/target_if_dfs_partial_offload.h b/drivers/staging/qca-wifi-host-cmn/target_if/dfs/inc/target_if_dfs_partial_offload.h new file mode 100644 index 0000000000000000000000000000000000000000..f1f092906459c50f5d825b278f83cfbec0b9713e --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/target_if/dfs/inc/target_if_dfs_partial_offload.h @@ -0,0 +1,105 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + + /** + * DOC: target_if_dfs_partial_offload.h + * This file contains dfs target interface for partial offload. + */ + +#ifndef _TARGET_IF_DFS_PARTIAL_OFFLOAD_H_ +#define _TARGET_IF_DFS_PARTIAL_OFFLOAD_H_ + +/** + * target_if_dfs_reg_phyerr_events() - register phyerror events. + * @psoc: Pointer to psoc object. + * + * Return: QDF_STATUS + */ +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) +QDF_STATUS target_if_dfs_reg_phyerr_events(struct wlan_objmgr_psoc *psoc); +#else +static QDF_STATUS +target_if_dfs_reg_phyerr_events(struct wlan_objmgr_psoc *psoc) +{ + return QDF_STATUS_SUCCESS; +} +#endif + +/** + * target_if_dfs_get_caps() - get dfs caps. + * @pdev: Pointer to DFS pdev object. + * @dfs_caps: Pointer to dfs_caps structure. + * + * Return: QDF_STATUS + */ +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) +QDF_STATUS target_if_dfs_get_caps(struct wlan_objmgr_pdev *pdev, + struct wlan_dfs_caps *dfs_caps); +#else +static inline QDF_STATUS target_if_dfs_get_caps(struct wlan_objmgr_pdev *pdev, + struct wlan_dfs_caps *dfs_caps) +{ + return QDF_STATUS_SUCCESS; +} +#endif + +/** + * target_if_dfs_status_check_event_handler() - Host dfs confirmation event + * handler. + * @scn: Handle to HIF context + * @data: radar event buffer + * @datalen: radar event buffer length + * + * Return: 0 on success + */ +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) && defined(HOST_DFS_SPOOF_TEST) +int target_if_dfs_status_check_event_handler(ol_scn_t scn, + uint8_t *data, + uint32_t datalen); +#else +static inline +int target_if_dfs_status_check_event_handler(ol_scn_t scn, + uint8_t *data, + uint32_t datalen) +{ + return 0; +} +#endif + +/** + * target_if_dfs_send_avg_params_to_fw() - Send average parameters to FW. + * @pdev: pdev pointer + * @params: Pointer to dfs_radar_found_params structure. + * + * Return: QDF_STATUS + */ +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) && defined(HOST_DFS_SPOOF_TEST) +QDF_STATUS target_if_dfs_send_avg_params_to_fw( + struct wlan_objmgr_pdev *pdev, + struct dfs_radar_found_params *params); +#else +static inline +QDF_STATUS target_if_dfs_send_avg_params_to_fw( + struct wlan_objmgr_pdev *pdev, + struct dfs_radar_found_params *params) +{ + return QDF_STATUS_SUCCESS; +} +#endif +#endif /* _TARGET_IF_DFS_PARTIAL_OFFLOAD_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/dfs/src/target_if_dfs.c b/drivers/staging/qca-wifi-host-cmn/target_if/dfs/src/target_if_dfs.c new file mode 100644 index 0000000000000000000000000000000000000000..17e0844e68e8e0166559b128f569797ef17f278e --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/target_if/dfs/src/target_if_dfs.c @@ -0,0 +1,392 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + + /** + * DOC: target_if_dfs.c + * This file contains dfs target interface + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "wlan_dfs_tgt_api.h" +#include "target_type.h" +#include +#include +#include +#include + +/** + * target_if_dfs_register_host_status_check_event() - Register host dfs + * confirmation event. + * @psoc: pointer to psoc. + * + * Return: QDF_STATUS. + */ +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) && defined(HOST_DFS_SPOOF_TEST) +static QDF_STATUS target_if_dfs_register_host_status_check_event( + struct wlan_objmgr_psoc *psoc) + +{ + wmi_unified_t wmi_handle; + QDF_STATUS retval; + + wmi_handle = get_wmi_unified_hdl_from_psoc(psoc); + if (!wmi_handle) { + target_if_err("null wmi_handle"); + return QDF_STATUS_E_FAILURE; + } + + retval = wmi_unified_register_event(wmi_handle, + wmi_host_dfs_status_check_event_id, + target_if_dfs_status_check_event_handler); + if (QDF_IS_STATUS_ERROR(retval)) + target_if_err("wmi_dfs_radar_detection_event_id ret=%d", + retval); + + return retval; +} +#else +static QDF_STATUS target_if_dfs_register_host_status_check_event( + struct wlan_objmgr_psoc *psoc) +{ + return QDF_STATUS_SUCCESS; +} +#endif + +/** + * target_if_is_dfs_3() - Is dfs3 support or not + * @target_type: target type being used. + * + * Return: true if dfs3 is supported, false otherwise. + */ +static bool target_if_is_dfs_3(uint32_t target_type) +{ + bool is_dfs_3; + + switch (target_type) { + case TARGET_TYPE_AR6320: + is_dfs_3 = false; + break; + case TARGET_TYPE_ADRASTEA: + is_dfs_3 = true; + break; + default: + is_dfs_3 = true; + } + + return is_dfs_3; +} + +#ifdef QCA_MCL_DFS_SUPPORT +/** + * target_if_radar_event_handler() - handle radar event when + * phyerr filter offload is enabled. + * @scn: Handle to HIF context + * @data: radar event buffer + * @datalen: radar event buffer length + * + * Return: 0 on success; error code otherwise +*/ +static int target_if_radar_event_handler( + ol_scn_t scn, uint8_t *data, uint32_t datalen) +{ + struct radar_event_info wlan_radar_event; + struct wlan_objmgr_psoc *psoc; + struct wlan_objmgr_pdev *pdev; + struct wlan_lmac_if_dfs_rx_ops *dfs_rx_ops; + struct wmi_unified *wmi_handle; + + if (!scn || !data) { + target_if_err("scn: %pK, data: %pK", scn, data); + return -EINVAL; + } + psoc = target_if_get_psoc_from_scn_hdl(scn); + if (!psoc) { + target_if_err("null psoc"); + return -EINVAL; + } + dfs_rx_ops = target_if_dfs_get_rx_ops(psoc); + + if (!dfs_rx_ops || !dfs_rx_ops->dfs_process_phyerr_filter_offload) { + target_if_err("Invalid dfs_rx_ops: %pK", dfs_rx_ops); + return -EINVAL; + } + + wmi_handle = get_wmi_unified_hdl_from_psoc(psoc); + if (!wmi_handle) { + target_if_err("Invalid WMI context"); + return -EINVAL; + } + + if (QDF_IS_STATUS_ERROR(wmi_extract_wlan_radar_event_info( + wmi_handle, data, + &wlan_radar_event, datalen))) { + target_if_err("failed to extract wlan radar event"); + return -EFAULT; + } + pdev = wlan_objmgr_get_pdev_by_id(psoc, wlan_radar_event.pdev_id, + WLAN_DFS_ID); + if (!pdev) { + target_if_err("null pdev"); + return -EINVAL; + } + dfs_rx_ops->dfs_process_phyerr_filter_offload(pdev, + &wlan_radar_event); + wlan_objmgr_pdev_release_ref(pdev, WLAN_DFS_ID); + + return 0; +} + +/** + * target_if_reg_phyerr_events() - register dfs phyerr radar event. + * @psoc: pointer to psoc. + * @pdev: pointer to pdev. + * + * Return: QDF_STATUS. + */ +static QDF_STATUS target_if_reg_phyerr_events_dfs2( + struct wlan_objmgr_psoc *psoc) +{ + int ret = -1; + struct wlan_lmac_if_dfs_rx_ops *dfs_rx_ops; + bool is_phyerr_filter_offload; + wmi_unified_t wmi_handle; + + wmi_handle = get_wmi_unified_hdl_from_psoc(psoc); + if (!wmi_handle) { + target_if_err("null wmi_handle"); + return QDF_STATUS_E_INVAL; + } + + dfs_rx_ops = target_if_dfs_get_rx_ops(psoc); + if (dfs_rx_ops && dfs_rx_ops->dfs_is_phyerr_filter_offload) + if (QDF_IS_STATUS_SUCCESS( + dfs_rx_ops->dfs_is_phyerr_filter_offload(psoc, + &is_phyerr_filter_offload))) + if (is_phyerr_filter_offload) + ret = wmi_unified_register_event( + wmi_handle, + wmi_dfs_radar_event_id, + target_if_radar_event_handler); + + if (ret) { + target_if_err("failed to register wmi_dfs_radar_event_id"); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} +#else +static QDF_STATUS target_if_reg_phyerr_events_dfs2( + struct wlan_objmgr_psoc *psoc) +{ + return QDF_STATUS_SUCCESS; +} +#endif + +static bool target_if_dfs_offload(struct wlan_objmgr_psoc *psoc) +{ + wmi_unified_t wmi_handle; + + wmi_handle = get_wmi_unified_hdl_from_psoc(psoc); + if (!wmi_handle) { + target_if_err("null wmi_handle"); + return false; + } + + return wmi_service_enabled(wmi_handle, + wmi_service_dfs_phyerr_offload); +} + +static QDF_STATUS target_if_dfs_register_event_handler( + struct wlan_objmgr_psoc *psoc) +{ + struct target_psoc_info *tgt_psoc_info; + + if (!psoc) { + target_if_err("null psoc"); + return QDF_STATUS_E_FAILURE; + } + + if (!target_if_dfs_offload(psoc)) { + tgt_psoc_info = wlan_psoc_get_tgt_if_handle(psoc); + if (!tgt_psoc_info) { + target_if_err("null tgt_psoc_info"); + return QDF_STATUS_E_FAILURE; + } + + target_if_dfs_register_host_status_check_event(psoc); + + if (target_if_is_dfs_3( + target_psoc_get_target_type(tgt_psoc_info))) + return target_if_dfs_reg_phyerr_events(psoc); + else + return target_if_reg_phyerr_events_dfs2(psoc); + } else { + return target_if_dfs_reg_offload_events(psoc); + } +} + +static QDF_STATUS target_if_dfs_is_pdev_5ghz(struct wlan_objmgr_pdev *pdev, + bool *is_5ghz) +{ + struct wlan_objmgr_psoc *psoc; + uint8_t pdev_id; + struct wlan_psoc_host_hal_reg_capabilities_ext *reg_cap_ptr; + + psoc = wlan_pdev_get_psoc(pdev); + if (!psoc) { + target_if_err("dfs: null psoc"); + return QDF_STATUS_E_FAILURE; + } + + pdev_id = wlan_objmgr_pdev_get_pdev_id(pdev); + + reg_cap_ptr = ucfg_reg_get_hal_reg_cap(psoc); + if (!reg_cap_ptr) { + target_if_err("dfs: reg cap null"); + return QDF_STATUS_E_FAILURE; + } + + if (reg_cap_ptr[pdev_id].wireless_modes & + WMI_HOST_REGDMN_MODE_11A) + *is_5ghz = true; + else + *is_5ghz = false; + + return QDF_STATUS_SUCCESS; +} + +#ifdef QCA_MCL_DFS_SUPPORT +/** + * target_if_dfs_set_phyerr_filter_offload() - config phyerr filter offload. + * @pdev: Pointer to DFS pdev object. + * @dfs_phyerr_filter_offload: Phyerr filter offload value. + * + * Return: QDF_STATUS + */ +static QDF_STATUS target_if_dfs_set_phyerr_filter_offload( + struct wlan_objmgr_pdev *pdev, + bool dfs_phyerr_filter_offload) +{ + QDF_STATUS status; + void *wmi_handle; + + if (!pdev) { + target_if_err("null pdev"); + return QDF_STATUS_E_FAILURE; + } + + wmi_handle = GET_WMI_HDL_FROM_PDEV(pdev); + if (!wmi_handle) { + target_if_err("null wmi_handle"); + return QDF_STATUS_E_FAILURE; + } + + status = wmi_unified_dfs_phyerr_filter_offload_en_cmd(wmi_handle, + dfs_phyerr_filter_offload); + if (QDF_IS_STATUS_ERROR(status)) + target_if_err("phyerr filter offload %d set fail: %d", + dfs_phyerr_filter_offload, status); + + return status; +} +#else +static QDF_STATUS target_if_dfs_set_phyerr_filter_offload( + struct wlan_objmgr_pdev *pdev, + bool dfs_phyerr_filter_offload) +{ + return QDF_STATUS_SUCCESS; +} +#endif + +static QDF_STATUS target_send_dfs_offload_enable_cmd( + struct wlan_objmgr_pdev *pdev, bool enable) +{ + QDF_STATUS status = QDF_STATUS_SUCCESS; + uint8_t pdev_id; + void *wmi_hdl; + + if (!pdev) { + target_if_err("null pdev"); + return QDF_STATUS_E_FAILURE; + } + + wmi_hdl = GET_WMI_HDL_FROM_PDEV(pdev); + if (!wmi_hdl) { + target_if_err("null wmi_hdl"); + return QDF_STATUS_E_FAILURE; + } + + pdev_id = wlan_objmgr_pdev_get_pdev_id(pdev); + + if (enable) + status = wmi_unified_dfs_phyerr_offload_en_cmd(wmi_hdl, + pdev_id); + else + status = wmi_unified_dfs_phyerr_offload_dis_cmd(wmi_hdl, + pdev_id); + + if (QDF_IS_STATUS_ERROR(status)) + target_if_err("dfs: dfs offload cmd failed, enable:%d, pdev:%d", + enable, pdev_id); + else + target_if_debug("dfs: sent dfs offload cmd, enable:%d, pdev:%d", + enable, pdev_id); + + return status; +} + +QDF_STATUS target_if_register_dfs_tx_ops(struct wlan_lmac_if_tx_ops *tx_ops) +{ + struct wlan_lmac_if_dfs_tx_ops *dfs_tx_ops; + + if (!tx_ops) { + target_if_err("invalid tx_ops"); + return QDF_STATUS_E_FAILURE; + } + + dfs_tx_ops = &tx_ops->dfs_tx_ops; + dfs_tx_ops->dfs_reg_ev_handler = &target_if_dfs_register_event_handler; + + dfs_tx_ops->dfs_process_emulate_bang_radar_cmd = + &target_process_bang_radar_cmd; + dfs_tx_ops->dfs_is_pdev_5ghz = &target_if_dfs_is_pdev_5ghz; + dfs_tx_ops->dfs_send_offload_enable_cmd = + &target_send_dfs_offload_enable_cmd; + + dfs_tx_ops->dfs_set_phyerr_filter_offload = + &target_if_dfs_set_phyerr_filter_offload; + + dfs_tx_ops->dfs_get_caps = &target_if_dfs_get_caps; + dfs_tx_ops->dfs_send_avg_radar_params_to_fw = + &target_if_dfs_send_avg_params_to_fw; + dfs_tx_ops->dfs_is_tgt_offload = &target_if_dfs_offload; + + return QDF_STATUS_SUCCESS; +} diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/dfs/src/target_if_dfs_full_offload.c b/drivers/staging/qca-wifi-host-cmn/target_if/dfs/src/target_if_dfs_full_offload.c new file mode 100644 index 0000000000000000000000000000000000000000..654d59b4368c8e6aabe83fb6624ecd22616b8baa --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/target_if/dfs/src/target_if_dfs_full_offload.c @@ -0,0 +1,238 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: target_if_dfs_full_offload.c + * This file contains dfs target interface for full offload + */ + +#include +#include +#include +#include +#include +#include + +/** + * target_if_dfs_cac_complete_event_handler() - CAC complete indication. + * @scn: scn handle. + * @data: Pointer to data buffer. + * @datalen: data length. + * + * Return: 0 on successful indication. + */ +static int target_if_dfs_cac_complete_event_handler( + ol_scn_t scn, uint8_t *data, uint32_t datalen) +{ + struct wlan_lmac_if_dfs_rx_ops *dfs_rx_ops; + struct wlan_objmgr_psoc *psoc; + struct wlan_objmgr_vdev *vdev; + struct wlan_objmgr_pdev *pdev; + int ret = 0; + uint32_t vdev_id = 0; + struct wmi_unified *wmi_handle; + + if (!scn || !data) { + target_if_err("scn: %pK, data: %pK", scn, data); + return -EINVAL; + } + + psoc = target_if_get_psoc_from_scn_hdl(scn); + if (!psoc) { + target_if_err("null psoc"); + return -EINVAL; + } + + dfs_rx_ops = target_if_dfs_get_rx_ops(psoc); + if (!dfs_rx_ops || !dfs_rx_ops->dfs_dfs_cac_complete_ind) { + target_if_err("Invalid dfs_rx_ops: %pK", dfs_rx_ops); + return -EINVAL; + } + + wmi_handle = get_wmi_unified_hdl_from_psoc(psoc); + if (!wmi_handle) { + target_if_err("Invalid WMI handle"); + return -EINVAL; + } + + if (wmi_extract_dfs_cac_complete_event(wmi_handle, data, &vdev_id, + datalen) != QDF_STATUS_SUCCESS) { + target_if_err("failed to extract cac complete event"); + return -EFAULT; + } + + vdev = wlan_objmgr_get_vdev_by_id_from_psoc(psoc, vdev_id, WLAN_DFS_ID); + if (!vdev) { + target_if_err("null vdev"); + return -EINVAL; + } + + pdev = wlan_vdev_get_pdev(vdev); + if (!pdev) { + target_if_err("null pdev"); + ret = -EINVAL; + } + + if (!ret && (QDF_STATUS_SUCCESS != + dfs_rx_ops->dfs_dfs_cac_complete_ind(pdev, vdev_id))) { + target_if_err("dfs_dfs_cac_complete_ind failed"); + ret = -EINVAL; + } + wlan_objmgr_vdev_release_ref(vdev, WLAN_DFS_ID); + + return ret; +} + +/** + * target_if_dfs_radar_detection_event_handler() - Indicate RADAR detection and + * process RADAR detection. + * @scn: scn handle. + * @data: pointer to data buffer. + * @datalen: data length. + * + * Return: 0 on successful indication. + */ +static int target_if_dfs_radar_detection_event_handler( + ol_scn_t scn, uint8_t *data, uint32_t datalen) +{ + struct radar_found_info radar; + struct wlan_objmgr_psoc *psoc = NULL; + struct wlan_objmgr_pdev *pdev = NULL; + struct wlan_lmac_if_dfs_rx_ops *dfs_rx_ops; + int ret = 0; + struct wmi_unified *wmi_handle; + + if (!scn || !data) { + target_if_err("scn: %pK, data: %pK", scn, data); + return -EINVAL; + } + + psoc = target_if_get_psoc_from_scn_hdl(scn); + if (!psoc) { + target_if_err("null psoc"); + return -EINVAL; + } + + dfs_rx_ops = target_if_dfs_get_rx_ops(psoc); + if (!dfs_rx_ops || !dfs_rx_ops->dfs_process_radar_ind) { + target_if_err("Invalid dfs_rx_ops: %pK", dfs_rx_ops); + return -EINVAL; + } + + wmi_handle = get_wmi_unified_hdl_from_psoc(psoc); + if (!wmi_handle) { + target_if_err("Invalid WMI handle"); + return -EINVAL; + } + + if (wmi_extract_dfs_radar_detection_event(wmi_handle, data, &radar, + datalen) + != QDF_STATUS_SUCCESS) { + target_if_err("failed to extract cac complete event"); + return -EFAULT; + } + + pdev = wlan_objmgr_get_pdev_by_id(psoc, radar.pdev_id, WLAN_DFS_ID); + if (!pdev) { + target_if_err("null pdev"); + return -EINVAL; + } + + if (dfs_rx_ops->dfs_process_radar_ind(pdev, + &radar) != QDF_STATUS_SUCCESS) { + target_if_err("dfs_process_radar_ind failed pdev_id=%d", + radar.pdev_id); + ret = -EINVAL; + } + + wlan_objmgr_pdev_release_ref(pdev, WLAN_DFS_ID); + + return ret; +} + +QDF_STATUS target_if_dfs_reg_offload_events( + struct wlan_objmgr_psoc *psoc) +{ + int ret1, ret2; + + ret1 = wmi_unified_register_event( + get_wmi_unified_hdl_from_psoc(psoc), + wmi_dfs_radar_detection_event_id, + target_if_dfs_radar_detection_event_handler); + target_if_debug("wmi_dfs_radar_detection_event_id ret=%d", ret1); + + ret2 = wmi_unified_register_event( + get_wmi_unified_hdl_from_psoc(psoc), + wmi_dfs_cac_complete_id, + target_if_dfs_cac_complete_event_handler); + target_if_debug("wmi_dfs_cac_complete_id ret=%d", ret2); + + if (ret1 || ret2) + return QDF_STATUS_E_FAILURE; + else + return QDF_STATUS_SUCCESS; +} + +#if (defined(CONFIG_MCL) || (QCA_WIFI_QCA8074)) +QDF_STATUS target_process_bang_radar_cmd( + struct wlan_objmgr_pdev *pdev, + struct dfs_emulate_bang_radar_test_cmd *dfs_unit_test) +{ + QDF_STATUS status; + struct wmi_unit_test_cmd wmi_utest; + int i; + wmi_unified_t wmi_handle; + + if (!pdev) { + target_if_err("null pdev"); + return QDF_STATUS_E_FAILURE; + } + + wmi_handle = get_wmi_unified_hdl_from_pdev(pdev); + if (!wmi_handle) { + target_if_err("null wmi_handle"); + return QDF_STATUS_E_FAILURE; + } + + wmi_utest.vdev_id = dfs_unit_test->vdev_id; + wmi_utest.module_id = WLAN_MODULE_PHYERR_DFS; + wmi_utest.num_args = dfs_unit_test->num_args; + + for (i = 0; i < dfs_unit_test->num_args; i++) + wmi_utest.args[i] = dfs_unit_test->args[i]; + /* + * Host to Target conversion for pdev id required + * before we send a wmi unit test command + */ + wmi_utest.args[IDX_PDEV_ID] = wmi_handle->ops-> + convert_pdev_id_host_to_target(pdev->pdev_objmgr.wlan_pdev_id); + + status = wmi_unified_unit_test_cmd(wmi_handle, &wmi_utest); + if (QDF_IS_STATUS_ERROR(status)) + target_if_err("dfs: unit_test_cmd send failed %d", status); + return status; +} +#else +static QDF_STATUS target_process_bang_radar_cmd( + struct wlan_objmgr_pdev *pdev, + struct dfs_emulate_bang_radar_test_cmd *dfs_unit_test) +{ + return QDF_STATUS_SUCCESS; +} +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/dfs/src/target_if_dfs_partial_offload.c b/drivers/staging/qca-wifi-host-cmn/target_if/dfs/src/target_if_dfs_partial_offload.c new file mode 100644 index 0000000000000000000000000000000000000000..af09b710a0219c7cbd197c4c0d28bd8d3b4c2079 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/target_if/dfs/src/target_if_dfs_partial_offload.c @@ -0,0 +1,188 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: target_if_dfs_partial_offload.c + * This file contains dfs target interface for partial offload + */ + +#include +#include "target_type.h" +#include "target_if_dfs_partial_offload.h" +#include "target_if_dfs.h" + +QDF_STATUS target_if_dfs_reg_phyerr_events(struct wlan_objmgr_psoc *psoc) +{ + /* TODO: dfs non-offload case */ + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS target_if_dfs_get_caps(struct wlan_objmgr_pdev *pdev, + struct wlan_dfs_caps *dfs_caps) +{ + struct wlan_objmgr_psoc *psoc = NULL; + struct target_psoc_info *tgt_psoc_info; + + if (!dfs_caps) { + target_if_err("null dfs_caps"); + return QDF_STATUS_E_FAILURE; + } + + dfs_caps->wlan_dfs_combined_rssi_ok = 0; + dfs_caps->wlan_dfs_ext_chan_ok = 0; + dfs_caps->wlan_dfs_use_enhancement = 0; + dfs_caps->wlan_strong_signal_diversiry = 0; + dfs_caps->wlan_fastdiv_val = 0; + dfs_caps->wlan_chip_is_bb_tlv = 1; + dfs_caps->wlan_chip_is_over_sampled = 0; + dfs_caps->wlan_chip_is_ht160 = 0; + dfs_caps->wlan_chip_is_false_detect = 0; + + psoc = wlan_pdev_get_psoc(pdev); + if (!psoc) { + target_if_err("null psoc"); + return QDF_STATUS_E_FAILURE; + } + + tgt_psoc_info = wlan_psoc_get_tgt_if_handle(psoc); + if (!tgt_psoc_info) { + target_if_err("null tgt_psoc_info"); + return QDF_STATUS_E_FAILURE; + } + + switch (target_psoc_get_target_type(tgt_psoc_info)) { + case TARGET_TYPE_AR900B: + break; + + case TARGET_TYPE_IPQ4019: + dfs_caps->wlan_chip_is_false_detect = 0; + break; + + case TARGET_TYPE_AR9888: + dfs_caps->wlan_chip_is_over_sampled = 1; + break; + + case TARGET_TYPE_QCA9984: + case TARGET_TYPE_QCA9888: + dfs_caps->wlan_chip_is_ht160 = 1; + break; + default: + break; + } + + return QDF_STATUS_SUCCESS; +} + +#if defined(HOST_DFS_SPOOF_TEST) +QDF_STATUS target_if_dfs_send_avg_params_to_fw( + struct wlan_objmgr_pdev *pdev, + struct dfs_radar_found_params *params) +{ + QDF_STATUS status; + wmi_unified_t wmi_handle; + + if (!pdev) { + target_if_err("null pdev"); + return QDF_STATUS_E_FAILURE; + } + + wmi_handle = get_wmi_unified_hdl_from_pdev(pdev); + if (!wmi_handle) { + target_if_err("null wmi_handle"); + return QDF_STATUS_E_FAILURE; + } + + status = wmi_unified_dfs_send_avg_params_cmd(wmi_handle, + params); + if (QDF_IS_STATUS_ERROR(status)) + target_if_err("dfs radar found average parameters send failed: %d", + status); + + return status; +} + +int target_if_dfs_status_check_event_handler(ol_scn_t scn, + uint8_t *data, + uint32_t datalen) +{ + struct wlan_objmgr_psoc *psoc; + struct wlan_objmgr_pdev *pdev; + struct wlan_lmac_if_dfs_rx_ops *dfs_rx_ops; + u_int32_t dfs_status_check; + wmi_unified_t wmi_hdl; + + if (!scn || !data) { + target_if_err("scn: %pK, data: %pK", scn, data); + return -EINVAL; + } + + psoc = target_if_get_psoc_from_scn_hdl(scn); + if (!psoc) { + target_if_err("null psoc"); + return -EINVAL; + } + + /* Since Partial Offload chipsets have only one pdev per psoc, the first + * pdev from the pdev list is used. + */ + pdev = wlan_objmgr_get_pdev_by_id(psoc, 0, WLAN_DFS_ID); + if (!pdev) { + target_if_err("null pdev"); + return -EINVAL; + } + + dfs_rx_ops = target_if_dfs_get_rx_ops(psoc); + if (!dfs_rx_ops) { + target_if_err("null dfs_rx_ops"); + wlan_objmgr_pdev_release_ref(pdev, WLAN_DFS_ID); + return -EINVAL; + } + + if (!dfs_rx_ops->dfs_action_on_status) { + target_if_err("dfs_rx_ops->dfs_action_on_status is NULL"); + wlan_objmgr_pdev_release_ref(pdev, WLAN_DFS_ID); + return -EINVAL; + } + + wmi_hdl = get_wmi_unified_hdl_from_pdev(pdev); + if (!wmi_hdl) { + target_if_err("wmi_hdl is NULL"); + wlan_objmgr_pdev_release_ref(pdev, WLAN_DFS_ID); + return -EINVAL; + } + + if (wmi_extract_dfs_status_from_fw(wmi_hdl, data, &dfs_status_check) != + QDF_STATUS_SUCCESS) { + target_if_err("failed to extract status response from FW"); + wlan_objmgr_pdev_release_ref(pdev, WLAN_DFS_ID); + return -EINVAL; + } + + if (dfs_rx_ops->dfs_action_on_status(pdev, &dfs_status_check) != + QDF_STATUS_SUCCESS) { + target_if_err("dfs action on host dfs status from FW failed"); + wlan_objmgr_pdev_release_ref(pdev, WLAN_DFS_ID); + return -EINVAL; + } + + wlan_objmgr_pdev_release_ref(pdev, WLAN_DFS_ID); + + return 0; +} +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/direct_buf_rx/inc/target_if_direct_buf_rx_api.h b/drivers/staging/qca-wifi-host-cmn/target_if/direct_buf_rx/inc/target_if_direct_buf_rx_api.h new file mode 100644 index 0000000000000000000000000000000000000000..f13888a177acb64a66ca27d6cbbe7414d5e057bf --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/target_if/direct_buf_rx/inc/target_if_direct_buf_rx_api.h @@ -0,0 +1,100 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _TARGET_IF_DIRECT_BUF_RX_API_H_ +#define _TARGET_IF_DIRECT_BUF_RX_API_H_ + +#include "qdf_nbuf.h" +#include "qdf_atomic.h" + +#define direct_buf_rx_log(level, args...) \ + QDF_TRACE(QDF_MODULE_ID_DIRECT_BUF_RX, level, ## args) +#define direct_buf_rx_logfl(level, format, args...) \ + direct_buf_rx_log(level, FL(format), ## args) +#define direct_buf_alert(format, args...) \ + direct_buf_rx_logfl(QDF_TRACE_LEVEL_FATAL, format, ## args) +#define direct_buf_rx_err(format, args...) \ + direct_buf_rx_logfl(QDF_TRACE_LEVEL_ERROR, format, ## args) +#define direct_buf_rx_warn(format, args...) \ + direct_buf_rx_logfl(QDF_TRACE_LEVEL_WARN, format, ## args) +#define direct_buf_rx_notice(format, args...) \ + direct_buf_rx_logfl(QDF_TRACE_LEVEL_INFO, format, ## args) +#define direct_buf_rx_info(format, args...) \ + direct_buf_rx_logfl(QDF_TRACE_LEVEL_INFO_HIGH, format, ## args) +#define direct_buf_rx_debug(format, args...) \ + direct_buf_rx_logfl(QDF_TRACE_LEVEL_DEBUG, format, ## args) +#define direct_buf_rx_enter() \ + direct_buf_rx_logfl(QDF_TRACE_LEVEL_DEBUG, "enter") +#define direct_buf_rx_exit() \ + direct_buf_rx_logfl(QDF_TRACE_LEVEL_DEBUG, "exit") + +#define DBR_MAX_CHAINS (8) + +struct wlan_objmgr_psoc; +struct wlan_lmac_if_tx_ops; + +/** + * struct direct_buf_rx_data - direct buffer rx data + * @dbr_len: Length of the buffer DMAed + * @vaddr: Virtual address of the buffer that has DMAed data + * @meta_data_valid: Indicates that metadata is valid + * @meta_data: Meta data + */ +struct direct_buf_rx_data { + size_t dbr_len; + void *vaddr; + bool meta_data_valid; + struct direct_buf_rx_metadata meta_data; +}; + +/** + * direct_buf_rx_init() - Function to initialize direct buf rx module + * + * Return: QDF status of operation + */ +QDF_STATUS direct_buf_rx_init(void); + +/** + * direct_buf_rx_deinit() - Function to deinitialize direct buf rx module + * + * Return: QDF status of operation + */ +QDF_STATUS direct_buf_rx_deinit(void); + +/** + * direct_buf_rx_target_attach() - Attach hal_soc,osdev in direct buf rx psoc obj + * @psoc: pointer to psoc object + * @hal_soc: Opaque HAL SOC handle + * @osdev: QDF os device handle + * + * Return: QDF status of operation + */ +QDF_STATUS direct_buf_rx_target_attach(struct wlan_objmgr_psoc *psoc, + void *hal_soc, qdf_device_t osdev); + +/** + * target_if_direct_buf_rx_register_tx_ops() - Register tx ops for direct buffer + * rx module + * @tx_ops: pointer to lmac interface tx ops + * + * Return: None + */ +void target_if_direct_buf_rx_register_tx_ops( + struct wlan_lmac_if_tx_ops *tx_ops); + +#endif /* _TARGET_IF_DIRECT_BUF_RX_API_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/direct_buf_rx/src/target_if_direct_buf_rx_api.c b/drivers/staging/qca-wifi-host-cmn/target_if/direct_buf_rx/src/target_if_direct_buf_rx_api.c new file mode 100644 index 0000000000000000000000000000000000000000..75adb50227e22f8691a416eb1c22f01073a1f8ea --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/target_if/direct_buf_rx/src/target_if_direct_buf_rx_api.c @@ -0,0 +1,175 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include "target_if_direct_buf_rx_main.h" + +QDF_STATUS direct_buf_rx_init(void) +{ + QDF_STATUS status; + + status = wlan_objmgr_register_psoc_create_handler( + WLAN_TARGET_IF_COMP_DIRECT_BUF_RX, + target_if_direct_buf_rx_psoc_create_handler, + NULL); + + if (QDF_IS_STATUS_ERROR(status)) { + direct_buf_rx_err("Failed to register psoc create handler"); + return status; + } + + status = wlan_objmgr_register_psoc_destroy_handler( + WLAN_TARGET_IF_COMP_DIRECT_BUF_RX, + target_if_direct_buf_rx_psoc_destroy_handler, + NULL); + + if (QDF_IS_STATUS_ERROR(status)) { + direct_buf_rx_err("Failed to register psoc destroy handler"); + goto dbr_unreg_psoc_create; + } + + status = wlan_objmgr_register_pdev_create_handler( + WLAN_TARGET_IF_COMP_DIRECT_BUF_RX, + target_if_direct_buf_rx_pdev_create_handler, + NULL); + + if (QDF_IS_STATUS_ERROR(status)) { + direct_buf_rx_err("Failed to register pdev create handler"); + goto dbr_unreg_psoc_destroy; + } + + status = wlan_objmgr_register_pdev_destroy_handler( + WLAN_TARGET_IF_COMP_DIRECT_BUF_RX, + target_if_direct_buf_rx_pdev_destroy_handler, + NULL); + + if (QDF_IS_STATUS_ERROR(status)) { + direct_buf_rx_err("Failed to register pdev destroy handler"); + goto dbr_unreg_pdev_create; + } + + direct_buf_rx_info("Direct Buffer RX pdev,psoc create and destroy handlers registered"); + + return QDF_STATUS_SUCCESS; + +dbr_unreg_pdev_create: + status = wlan_objmgr_unregister_pdev_create_handler( + WLAN_TARGET_IF_COMP_DIRECT_BUF_RX, + target_if_direct_buf_rx_pdev_create_handler, + NULL); + +dbr_unreg_psoc_destroy: + status = wlan_objmgr_unregister_psoc_destroy_handler( + WLAN_TARGET_IF_COMP_DIRECT_BUF_RX, + target_if_direct_buf_rx_psoc_destroy_handler, + NULL); + +dbr_unreg_psoc_create: + status = wlan_objmgr_unregister_psoc_create_handler( + WLAN_TARGET_IF_COMP_DIRECT_BUF_RX, + target_if_direct_buf_rx_psoc_create_handler, + NULL); + + return QDF_STATUS_E_FAILURE; +} +qdf_export_symbol(direct_buf_rx_init); + +QDF_STATUS direct_buf_rx_deinit(void) +{ + QDF_STATUS status; + + status = wlan_objmgr_unregister_pdev_destroy_handler( + WLAN_TARGET_IF_COMP_DIRECT_BUF_RX, + target_if_direct_buf_rx_pdev_destroy_handler, + NULL); + + if (QDF_IS_STATUS_ERROR(status)) + direct_buf_rx_err("Failed to unregister pdev destroy handler"); + + status = wlan_objmgr_unregister_pdev_create_handler( + WLAN_TARGET_IF_COMP_DIRECT_BUF_RX, + target_if_direct_buf_rx_pdev_create_handler, + NULL); + + if (QDF_IS_STATUS_ERROR(status)) + direct_buf_rx_err("Failed to unregister pdev create handler"); + + status = wlan_objmgr_unregister_psoc_destroy_handler( + WLAN_TARGET_IF_COMP_DIRECT_BUF_RX, + target_if_direct_buf_rx_psoc_destroy_handler, + NULL); + + if (QDF_IS_STATUS_ERROR(status)) + direct_buf_rx_err("Failed to unregister psoc destroy handler"); + + status = wlan_objmgr_unregister_psoc_create_handler( + WLAN_TARGET_IF_COMP_DIRECT_BUF_RX, + target_if_direct_buf_rx_psoc_create_handler, + NULL); + + if (QDF_IS_STATUS_ERROR(status)) + direct_buf_rx_err("Failed to unregister psoc create handler"); + + direct_buf_rx_info("Direct Buffer RX pdev,psoc create and destroy handlers unregistered"); + + return status; +} +qdf_export_symbol(direct_buf_rx_deinit); + +QDF_STATUS direct_buf_rx_target_attach(struct wlan_objmgr_psoc *psoc, + void *hal_soc, qdf_device_t osdev) +{ + struct direct_buf_rx_psoc_obj *dbr_psoc_obj; + + if (hal_soc == NULL || osdev == NULL) { + direct_buf_rx_err("hal soc or osdev is null"); + return QDF_STATUS_E_INVAL; + } + + dbr_psoc_obj = wlan_objmgr_psoc_get_comp_private_obj(psoc, + WLAN_TARGET_IF_COMP_DIRECT_BUF_RX); + + direct_buf_rx_info("Dbr psoc obj %pK", dbr_psoc_obj); + + if (dbr_psoc_obj == NULL) { + direct_buf_rx_err("dir buf rx psoc obj is null"); + return QDF_STATUS_E_FAILURE; + } + + dbr_psoc_obj->hal_soc = hal_soc; + dbr_psoc_obj->osdev = osdev; + + return QDF_STATUS_SUCCESS; +} + +void target_if_direct_buf_rx_register_tx_ops(struct wlan_lmac_if_tx_ops *tx_ops) +{ + tx_ops->dbr_tx_ops.direct_buf_rx_module_register = + target_if_direct_buf_rx_module_register; + tx_ops->dbr_tx_ops.direct_buf_rx_register_events = + target_if_direct_buf_rx_register_events; + tx_ops->dbr_tx_ops.direct_buf_rx_unregister_events = + target_if_direct_buf_rx_unregister_events; +} +qdf_export_symbol(target_if_direct_buf_rx_register_tx_ops); diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/direct_buf_rx/src/target_if_direct_buf_rx_main.c b/drivers/staging/qca-wifi-host-cmn/target_if/direct_buf_rx/src/target_if_direct_buf_rx_main.c new file mode 100644 index 0000000000000000000000000000000000000000..36cfeb9644cee1f67057248b1a87c201ab402b55 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/target_if/direct_buf_rx/src/target_if_direct_buf_rx_main.c @@ -0,0 +1,1039 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "target_if.h" +#include "wmi_unified_api.h" +#include "wlan_lmac_if_def.h" +#include "target_if_direct_buf_rx_main.h" +#include +#include "hal_api.h" +#include +#include + +static uint8_t get_num_dbr_modules_per_pdev(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_objmgr_psoc *psoc; + struct wlan_psoc_host_dbr_ring_caps *dbr_ring_cap; + uint8_t num_dbr_ring_caps, cap_idx, pdev_id, num_modules; + struct target_psoc_info *tgt_psoc_info; + struct wlan_psoc_host_service_ext_param *ext_svc_param; + + psoc = wlan_pdev_get_psoc(pdev); + + if (psoc == NULL) { + direct_buf_rx_err("psoc is null"); + return 0; + } + + tgt_psoc_info = wlan_psoc_get_tgt_if_handle(psoc); + if (tgt_psoc_info == NULL) { + direct_buf_rx_err("target_psoc_info is null"); + return 0; + } + ext_svc_param = target_psoc_get_service_ext_param(tgt_psoc_info); + num_dbr_ring_caps = ext_svc_param->num_dbr_ring_caps; + dbr_ring_cap = target_psoc_get_dbr_ring_caps(tgt_psoc_info); + pdev_id = wlan_objmgr_pdev_get_pdev_id(pdev); + num_modules = 0; + + for (cap_idx = 0; cap_idx < num_dbr_ring_caps; cap_idx++) { + if (dbr_ring_cap[cap_idx].pdev_id == pdev_id) + num_modules++; + } + + return num_modules; +} + +static QDF_STATUS populate_dbr_cap_mod_param(struct wlan_objmgr_pdev *pdev, + struct direct_buf_rx_module_param *mod_param) +{ + struct wlan_objmgr_psoc *psoc; + struct wlan_psoc_host_dbr_ring_caps *dbr_ring_cap; + uint8_t cap_idx; + bool cap_found = false; + enum DBR_MODULE mod_id = mod_param->mod_id; + uint32_t num_dbr_ring_caps, pdev_id; + struct target_psoc_info *tgt_psoc_info; + struct wlan_psoc_host_service_ext_param *ext_svc_param; + + psoc = wlan_pdev_get_psoc(pdev); + + if (psoc == NULL) { + direct_buf_rx_err("psoc is null"); + return QDF_STATUS_E_INVAL; + } + + tgt_psoc_info = wlan_psoc_get_tgt_if_handle(psoc); + if (tgt_psoc_info == NULL) { + direct_buf_rx_err("target_psoc_info is null"); + return QDF_STATUS_E_INVAL; + } + + ext_svc_param = target_psoc_get_service_ext_param(tgt_psoc_info); + num_dbr_ring_caps = ext_svc_param->num_dbr_ring_caps; + dbr_ring_cap = target_psoc_get_dbr_ring_caps(tgt_psoc_info); + pdev_id = wlan_objmgr_pdev_get_pdev_id(pdev); + + for (cap_idx = 0; cap_idx < num_dbr_ring_caps; cap_idx++) { + if (dbr_ring_cap[cap_idx].pdev_id == pdev_id) { + if (dbr_ring_cap[cap_idx].mod_id == mod_id) { + mod_param->dbr_ring_cap->ring_elems_min = + dbr_ring_cap[cap_idx].ring_elems_min; + mod_param->dbr_ring_cap->min_buf_size = + dbr_ring_cap[cap_idx].min_buf_size; + mod_param->dbr_ring_cap->min_buf_align = + dbr_ring_cap[cap_idx].min_buf_align; + cap_found = true; + } + } + } + + if (!cap_found) { + direct_buf_rx_err("No cap found for module %d in pdev %d", + mod_id, pdev_id); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS target_if_direct_buf_rx_pdev_create_handler( + struct wlan_objmgr_pdev *pdev, void *data) +{ + struct direct_buf_rx_pdev_obj *dbr_pdev_obj; + struct wlan_objmgr_psoc *psoc; + uint8_t num_modules; + QDF_STATUS status; + + direct_buf_rx_enter(); + + if (pdev == NULL) { + direct_buf_rx_err("pdev context passed is null"); + return QDF_STATUS_E_INVAL; + } + + psoc = wlan_pdev_get_psoc(pdev); + + if (psoc == NULL) { + direct_buf_rx_err("psoc is null"); + return QDF_STATUS_E_INVAL; + } + + dbr_pdev_obj = qdf_mem_malloc(sizeof(*dbr_pdev_obj)); + + if (dbr_pdev_obj == NULL) { + direct_buf_rx_err("Failed to allocate dir buf rx pdev obj"); + return QDF_STATUS_E_NOMEM; + } + + direct_buf_rx_info("Dbr pdev obj %pK", dbr_pdev_obj); + + status = wlan_objmgr_pdev_component_obj_attach(pdev, + WLAN_TARGET_IF_COMP_DIRECT_BUF_RX, + dbr_pdev_obj, QDF_STATUS_SUCCESS); + + if (status != QDF_STATUS_SUCCESS) { + direct_buf_rx_err("Failed to attach dir buf rx component %d", + status); + qdf_mem_free(dbr_pdev_obj); + return status; + } + + num_modules = get_num_dbr_modules_per_pdev(pdev); + direct_buf_rx_info("Number of modules = %d pdev %d", num_modules, + wlan_objmgr_pdev_get_pdev_id(pdev)); + dbr_pdev_obj->num_modules = num_modules; + + if (!dbr_pdev_obj->num_modules) { + direct_buf_rx_info("Number of modules = %d", num_modules); + return QDF_STATUS_SUCCESS; + } + + dbr_pdev_obj->dbr_mod_param = qdf_mem_malloc(num_modules * + sizeof(struct direct_buf_rx_module_param)); + + if (dbr_pdev_obj->dbr_mod_param == NULL) { + direct_buf_rx_err("Failed to allocate dir buf rx mod param"); + wlan_objmgr_pdev_component_obj_detach(pdev, + WLAN_TARGET_IF_COMP_DIRECT_BUF_RX, + dbr_pdev_obj); + qdf_mem_free(dbr_pdev_obj); + return QDF_STATUS_E_NOMEM; + } + + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS target_if_direct_buf_rx_pdev_destroy_handler( + struct wlan_objmgr_pdev *pdev, void *data) +{ + struct direct_buf_rx_pdev_obj *dbr_pdev_obj; + QDF_STATUS status; + uint8_t num_modules, mod_idx; + + if (pdev == NULL) { + direct_buf_rx_err("pdev context passed is null"); + return QDF_STATUS_E_INVAL; + } + + dbr_pdev_obj = wlan_objmgr_pdev_get_comp_private_obj(pdev, + WLAN_TARGET_IF_COMP_DIRECT_BUF_RX); + + if (dbr_pdev_obj == NULL) { + direct_buf_rx_err("dir buf rx object is null"); + return QDF_STATUS_E_FAILURE; + } + + num_modules = dbr_pdev_obj->num_modules; + for (mod_idx = 0; mod_idx < num_modules; mod_idx++) + target_if_deinit_dbr_ring(pdev, dbr_pdev_obj, mod_idx); + + qdf_mem_free(dbr_pdev_obj->dbr_mod_param); + dbr_pdev_obj->dbr_mod_param = NULL; + + status = wlan_objmgr_pdev_component_obj_detach(pdev, + WLAN_TARGET_IF_COMP_DIRECT_BUF_RX, + dbr_pdev_obj); + + if (status != QDF_STATUS_SUCCESS) { + direct_buf_rx_err("failed to detach dir buf rx component %d", + status); + } + + qdf_mem_free(dbr_pdev_obj); + + return status; +} + +QDF_STATUS target_if_direct_buf_rx_psoc_create_handler( + struct wlan_objmgr_psoc *psoc, void *data) +{ + struct direct_buf_rx_psoc_obj *dbr_psoc_obj; + QDF_STATUS status; + + direct_buf_rx_enter(); + + if (psoc == NULL) { + direct_buf_rx_err("psoc context passed is null"); + return QDF_STATUS_E_INVAL; + } + + dbr_psoc_obj = qdf_mem_malloc(sizeof(*dbr_psoc_obj)); + + if (!dbr_psoc_obj) { + direct_buf_rx_err("failed to alloc dir buf rx psoc obj"); + return QDF_STATUS_E_NOMEM; + } + + direct_buf_rx_info("Dbr psoc obj %pK", dbr_psoc_obj); + + status = wlan_objmgr_psoc_component_obj_attach(psoc, + WLAN_TARGET_IF_COMP_DIRECT_BUF_RX, dbr_psoc_obj, + QDF_STATUS_SUCCESS); + + if (status != QDF_STATUS_SUCCESS) { + direct_buf_rx_err("Failed to attach dir buf rx component %d", + status); + goto attach_error; + } + + return status; + +attach_error: + qdf_mem_free(dbr_psoc_obj); + + return status; +} + +QDF_STATUS target_if_direct_buf_rx_psoc_destroy_handler( + struct wlan_objmgr_psoc *psoc, void *data) +{ + QDF_STATUS status; + struct direct_buf_rx_psoc_obj *dbr_psoc_obj; + + direct_buf_rx_enter(); + + dbr_psoc_obj = wlan_objmgr_psoc_get_comp_private_obj(psoc, + WLAN_TARGET_IF_COMP_DIRECT_BUF_RX); + + if (!dbr_psoc_obj) { + direct_buf_rx_err("dir buf rx psoc obj is null"); + return QDF_STATUS_E_FAILURE; + } + + status = wlan_objmgr_psoc_component_obj_detach(psoc, + WLAN_TARGET_IF_COMP_DIRECT_BUF_RX, + dbr_psoc_obj); + + if (status != QDF_STATUS_SUCCESS) { + direct_buf_rx_err("failed to detach dir buf rx component %d", + status); + } + + qdf_mem_free(dbr_psoc_obj); + + return status; +} + +static QDF_STATUS target_if_dbr_replenish_ring(struct wlan_objmgr_pdev *pdev, + struct direct_buf_rx_module_param *mod_param, + void *aligned_vaddr, uint8_t cookie) +{ + uint64_t *ring_entry; + uint32_t dw_lo, dw_hi = 0, map_status; + void *hal_soc, *srng; + qdf_dma_addr_t paddr; + struct wlan_objmgr_psoc *psoc; + struct direct_buf_rx_psoc_obj *dbr_psoc_obj; + struct direct_buf_rx_ring_cfg *dbr_ring_cfg; + struct direct_buf_rx_ring_cap *dbr_ring_cap; + struct direct_buf_rx_buf_info *dbr_buf_pool; + + direct_buf_rx_enter(); + + dbr_ring_cfg = mod_param->dbr_ring_cfg; + dbr_ring_cap = mod_param->dbr_ring_cap; + dbr_buf_pool = mod_param->dbr_buf_pool; + + psoc = wlan_pdev_get_psoc(pdev); + + if (!psoc) { + direct_buf_rx_err("psoc is null"); + return QDF_STATUS_E_FAILURE; + } + + dbr_psoc_obj = wlan_objmgr_psoc_get_comp_private_obj(psoc, + WLAN_TARGET_IF_COMP_DIRECT_BUF_RX); + + if (dbr_psoc_obj == NULL) { + direct_buf_rx_err("dir buf rx psoc object is null"); + return QDF_STATUS_E_FAILURE; + } + + hal_soc = dbr_psoc_obj->hal_soc; + srng = dbr_ring_cfg->srng; + if (!aligned_vaddr) { + direct_buf_rx_err("aligned vaddr is null"); + return QDF_STATUS_SUCCESS; + } + + map_status = qdf_mem_map_nbytes_single(dbr_psoc_obj->osdev, + aligned_vaddr, + QDF_DMA_FROM_DEVICE, + dbr_ring_cap->min_buf_size, + &paddr); + if (map_status) { + direct_buf_rx_err("mem map failed status = %d", map_status); + return QDF_STATUS_E_FAILURE; + } + + QDF_ASSERT(!((uint64_t)paddr & dbr_ring_cap->min_buf_align)); + dbr_buf_pool[cookie].paddr = paddr; + + hal_srng_access_start(hal_soc, srng); + ring_entry = hal_srng_src_get_next(hal_soc, srng); + QDF_ASSERT(ring_entry != NULL); + dw_lo = (uint64_t)paddr & 0xFFFFFFFF; + WMI_HOST_DBR_RING_ADDR_HI_SET(dw_hi, (uint64_t)paddr >> 32); + WMI_HOST_DBR_DATA_ADDR_HI_HOST_DATA_SET(dw_hi, cookie); + direct_buf_rx_info("Cookie = %d", cookie); + direct_buf_rx_info("dw_lo = %x dw_hi = %x", dw_lo, dw_hi); + *ring_entry = (uint64_t)dw_hi << 32 | dw_lo; + direct_buf_rx_info("Valid ring entry"); + hal_srng_access_end(hal_soc, srng); + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS target_if_dbr_fill_ring(struct wlan_objmgr_pdev *pdev, + struct direct_buf_rx_module_param *mod_param) +{ + uint8_t idx; + void *buf, *buf_aligned; + struct direct_buf_rx_ring_cfg *dbr_ring_cfg; + struct direct_buf_rx_ring_cap *dbr_ring_cap; + struct direct_buf_rx_buf_info *dbr_buf_pool; + QDF_STATUS status; + + direct_buf_rx_enter(); + + dbr_ring_cfg = mod_param->dbr_ring_cfg; + dbr_ring_cap = mod_param->dbr_ring_cap; + dbr_buf_pool = mod_param->dbr_buf_pool; + + for (idx = 0; idx < dbr_ring_cfg->num_ptr - 1; idx++) { + buf = qdf_mem_malloc(dbr_ring_cap->min_buf_size + + dbr_ring_cap->min_buf_align - 1); + if (!buf) { + direct_buf_rx_err("dir buf rx ring buf alloc failed"); + return QDF_STATUS_E_NOMEM; + } + dbr_buf_pool[idx].vaddr = buf; + buf_aligned = (void *)(uintptr_t)qdf_roundup( + (uint64_t)(uintptr_t)buf, DBR_RING_BASE_ALIGN); + dbr_buf_pool[idx].offset = buf_aligned - buf; + dbr_buf_pool[idx].cookie = idx; + status = target_if_dbr_replenish_ring(pdev, mod_param, + buf_aligned, idx); + if (QDF_IS_STATUS_ERROR(status)) { + direct_buf_rx_err("replenish failed with status : %d", + status); + qdf_mem_free(buf); + return QDF_STATUS_E_FAILURE; + } + } + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS target_if_dbr_init_ring(struct wlan_objmgr_pdev *pdev, + struct direct_buf_rx_module_param *mod_param) +{ + void *srng; + uint32_t num_entries, ring_alloc_size, max_entries, entry_size; + qdf_dma_addr_t paddr; + struct hal_srng_params ring_params = {0}; + struct wlan_objmgr_psoc *psoc; + struct direct_buf_rx_psoc_obj *dbr_psoc_obj; + struct direct_buf_rx_ring_cap *dbr_ring_cap; + struct direct_buf_rx_ring_cfg *dbr_ring_cfg; + QDF_STATUS status; + + direct_buf_rx_enter(); + + psoc = wlan_pdev_get_psoc(pdev); + + if (!psoc) { + direct_buf_rx_err("psoc is null"); + return QDF_STATUS_E_FAILURE; + } + + dbr_psoc_obj = wlan_objmgr_psoc_get_comp_private_obj(psoc, + WLAN_TARGET_IF_COMP_DIRECT_BUF_RX); + + if (dbr_psoc_obj == NULL) { + direct_buf_rx_err("dir buf rx psoc object is null"); + return QDF_STATUS_E_FAILURE; + } + + if (dbr_psoc_obj->hal_soc == NULL || + dbr_psoc_obj->osdev == NULL) { + direct_buf_rx_err("dir buf rx target attach failed"); + return QDF_STATUS_E_FAILURE; + } + + max_entries = hal_srng_max_entries(dbr_psoc_obj->hal_soc, + DIR_BUF_RX_DMA_SRC); + entry_size = hal_srng_get_entrysize(dbr_psoc_obj->hal_soc, + DIR_BUF_RX_DMA_SRC); + direct_buf_rx_info("Max Entries = %d", max_entries); + direct_buf_rx_info("Entry Size = %d", entry_size); + + status = populate_dbr_cap_mod_param(pdev, mod_param); + if (QDF_IS_STATUS_ERROR(status)) { + direct_buf_rx_err("Module cap population failed"); + return QDF_STATUS_E_FAILURE; + } + + dbr_ring_cap = mod_param->dbr_ring_cap; + dbr_ring_cfg = mod_param->dbr_ring_cfg; + num_entries = dbr_ring_cap->ring_elems_min > max_entries ? + max_entries : dbr_ring_cap->ring_elems_min; + direct_buf_rx_info("Num entries = %d", num_entries); + dbr_ring_cfg->num_ptr = num_entries; + mod_param->dbr_buf_pool = qdf_mem_malloc(num_entries * sizeof( + struct direct_buf_rx_buf_info)); + if (!mod_param->dbr_buf_pool) { + direct_buf_rx_err("dir buf rx buf pool alloc failed"); + return QDF_STATUS_E_NOMEM; + } + + ring_alloc_size = (num_entries * entry_size) + DBR_RING_BASE_ALIGN - 1; + dbr_ring_cfg->ring_alloc_size = ring_alloc_size; + direct_buf_rx_info("dbr_psoc_obj %pK", dbr_psoc_obj); + dbr_ring_cfg->base_vaddr_unaligned = qdf_mem_alloc_consistent( + dbr_psoc_obj->osdev, dbr_psoc_obj->osdev->dev, ring_alloc_size, + &paddr); + direct_buf_rx_info("vaddr aligned allocated"); + dbr_ring_cfg->base_paddr_unaligned = paddr; + if (!dbr_ring_cfg->base_vaddr_unaligned) { + direct_buf_rx_err("dir buf rx vaddr alloc failed"); + qdf_mem_free(mod_param->dbr_buf_pool); + return QDF_STATUS_E_NOMEM; + } + + /* Alignment is defined to 8 for now. Will be advertised by FW */ + dbr_ring_cfg->base_vaddr_aligned = (void *)(uintptr_t)qdf_roundup( + (uint64_t)(uintptr_t)dbr_ring_cfg->base_vaddr_unaligned, + DBR_RING_BASE_ALIGN); + ring_params.ring_base_vaddr = dbr_ring_cfg->base_vaddr_aligned; + dbr_ring_cfg->base_paddr_aligned = qdf_roundup( + (uint64_t)dbr_ring_cfg->base_paddr_unaligned, + DBR_RING_BASE_ALIGN); + ring_params.ring_base_paddr = + (qdf_dma_addr_t)dbr_ring_cfg->base_paddr_aligned; + ring_params.num_entries = num_entries; + srng = hal_srng_setup(dbr_psoc_obj->hal_soc, DIR_BUF_RX_DMA_SRC, 0, + wlan_objmgr_pdev_get_pdev_id(pdev), &ring_params); + + if (!srng) { + direct_buf_rx_err("srng setup failed"); + qdf_mem_free(mod_param->dbr_buf_pool); + qdf_mem_free_consistent(dbr_psoc_obj->osdev, + dbr_psoc_obj->osdev->dev, + ring_alloc_size, + dbr_ring_cfg->base_vaddr_unaligned, + (qdf_dma_addr_t)dbr_ring_cfg->base_paddr_unaligned, 0); + return QDF_STATUS_E_FAILURE; + } + dbr_ring_cfg->srng = srng; + dbr_ring_cfg->tail_idx_addr = + hal_srng_get_tp_addr(dbr_psoc_obj->hal_soc, srng); + dbr_ring_cfg->head_idx_addr = + hal_srng_get_hp_addr(dbr_psoc_obj->hal_soc, srng); + dbr_ring_cfg->buf_size = dbr_ring_cap->min_buf_size; + + return target_if_dbr_fill_ring(pdev, mod_param); +} + +static QDF_STATUS target_if_dbr_init_srng(struct wlan_objmgr_pdev *pdev, + struct direct_buf_rx_module_param *mod_param) +{ + QDF_STATUS status; + + direct_buf_rx_info("Init DBR srng"); + + if (!mod_param) { + direct_buf_rx_err("dir buf rx module param is null"); + return QDF_STATUS_E_INVAL; + } + + mod_param->dbr_ring_cap = qdf_mem_malloc(sizeof( + struct direct_buf_rx_ring_cap)); + + if (!mod_param->dbr_ring_cap) { + direct_buf_rx_err("Ring cap alloc failed"); + return QDF_STATUS_E_NOMEM; + } + + /* Allocate memory for DBR Ring Config */ + mod_param->dbr_ring_cfg = qdf_mem_malloc(sizeof( + struct direct_buf_rx_ring_cfg)); + + if (!mod_param->dbr_ring_cfg) { + direct_buf_rx_err("Ring config alloc failed"); + qdf_mem_free(mod_param->dbr_ring_cap); + return QDF_STATUS_E_NOMEM; + } + + status = target_if_dbr_init_ring(pdev, mod_param); + + if (QDF_IS_STATUS_ERROR(status)) { + direct_buf_rx_err("DBR ring init failed"); + qdf_mem_free(mod_param->dbr_ring_cfg); + qdf_mem_free(mod_param->dbr_ring_cap); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS target_if_dbr_cfg_tgt(struct wlan_objmgr_pdev *pdev, + struct direct_buf_rx_module_param *mod_param) +{ + QDF_STATUS status; + struct wlan_objmgr_psoc *psoc; + void *wmi_hdl; + struct direct_buf_rx_cfg_req dbr_cfg_req = {0}; + struct direct_buf_rx_ring_cfg *dbr_ring_cfg; + struct direct_buf_rx_ring_cap *dbr_ring_cap; + + direct_buf_rx_enter(); + + psoc = wlan_pdev_get_psoc(pdev); + if (!psoc) { + direct_buf_rx_err("psoc is null"); + return QDF_STATUS_E_FAILURE; + } + + dbr_ring_cfg = mod_param->dbr_ring_cfg; + dbr_ring_cap = mod_param->dbr_ring_cap; + wmi_hdl = lmac_get_pdev_wmi_handle(pdev); + if (!wmi_hdl) { + direct_buf_rx_err("WMI handle null. Can't send WMI CMD"); + return QDF_STATUS_E_INVAL; + } + + direct_buf_rx_debug("Sending DBR Ring CFG to target"); + dbr_cfg_req.pdev_id = wlan_objmgr_pdev_get_pdev_id(pdev); + /* Module ID numbering starts from 1 in FW. need to fix it */ + dbr_cfg_req.mod_id = mod_param->mod_id; + dbr_cfg_req.base_paddr_lo = (uint64_t)dbr_ring_cfg->base_paddr_aligned + & 0xFFFFFFFF; + dbr_cfg_req.base_paddr_hi = (uint64_t)dbr_ring_cfg->base_paddr_aligned + & 0xFFFFFFFF00000000; + dbr_cfg_req.head_idx_paddr_lo = (uint64_t)dbr_ring_cfg->head_idx_addr + & 0xFFFFFFFF; + dbr_cfg_req.head_idx_paddr_hi = (uint64_t)dbr_ring_cfg->head_idx_addr + & 0xFFFFFFFF00000000; + dbr_cfg_req.tail_idx_paddr_lo = (uint64_t)dbr_ring_cfg->tail_idx_addr + & 0xFFFFFFFF; + dbr_cfg_req.tail_idx_paddr_hi = (uint64_t)dbr_ring_cfg->tail_idx_addr + & 0xFFFFFFFF00000000; + dbr_cfg_req.num_elems = dbr_ring_cap->ring_elems_min; + dbr_cfg_req.buf_size = dbr_ring_cap->min_buf_size; + dbr_cfg_req.num_resp_per_event = DBR_NUM_RESP_PER_EVENT; + dbr_cfg_req.event_timeout_ms = DBR_EVENT_TIMEOUT_IN_MS; + direct_buf_rx_info("pdev id %d mod id %d base addr lo %x\n" + "base addr hi %x head idx addr lo %x\n" + "head idx addr hi %x tail idx addr lo %x\n" + "tail idx addr hi %x num ptr %d\n" + "num resp %d event timeout %d\n", + dbr_cfg_req.pdev_id, dbr_cfg_req.mod_id, + dbr_cfg_req.base_paddr_lo, dbr_cfg_req.base_paddr_hi, + dbr_cfg_req.head_idx_paddr_lo, + dbr_cfg_req.head_idx_paddr_hi, + dbr_cfg_req.tail_idx_paddr_lo, + dbr_cfg_req.tail_idx_paddr_hi, + dbr_cfg_req.num_elems, + dbr_cfg_req.num_resp_per_event, + dbr_cfg_req.event_timeout_ms); + status = wmi_unified_dbr_ring_cfg(wmi_hdl, &dbr_cfg_req); + + return status; +} + +static QDF_STATUS target_if_init_dbr_ring(struct wlan_objmgr_pdev *pdev, + struct direct_buf_rx_pdev_obj *dbr_pdev_obj, + enum DBR_MODULE mod_id) +{ + QDF_STATUS status = QDF_STATUS_SUCCESS; + struct direct_buf_rx_module_param *mod_param; + + direct_buf_rx_info("Init DBR ring for module %d", mod_id); + + if (!dbr_pdev_obj) { + direct_buf_rx_err("dir buf rx object is null"); + return QDF_STATUS_E_INVAL; + } + + mod_param = &(dbr_pdev_obj->dbr_mod_param[mod_id]); + + if (!mod_param) { + direct_buf_rx_err("dir buf rx module param is null"); + return QDF_STATUS_E_FAILURE; + } + + direct_buf_rx_info("mod_param %pK", mod_param); + + mod_param->mod_id = mod_id; + + /* Initialize DMA ring now */ + status = target_if_dbr_init_srng(pdev, mod_param); + if (QDF_IS_STATUS_ERROR(status)) { + direct_buf_rx_err("DBR ring init failed %d", status); + return status; + } + + /* Send CFG request command to firmware */ + status = target_if_dbr_cfg_tgt(pdev, mod_param); + if (QDF_IS_STATUS_ERROR(status)) { + direct_buf_rx_err("DBR config to target failed %d", status); + goto dbr_srng_init_failed; + } + + return QDF_STATUS_SUCCESS; + +dbr_srng_init_failed: + target_if_deinit_dbr_ring(pdev, dbr_pdev_obj, mod_id); + return status; +} + +QDF_STATUS target_if_direct_buf_rx_module_register( + struct wlan_objmgr_pdev *pdev, uint8_t mod_id, + int (*dbr_rsp_handler)(struct wlan_objmgr_pdev *pdev, + struct direct_buf_rx_data *dbr_data)) +{ + QDF_STATUS status; + struct direct_buf_rx_pdev_obj *dbr_pdev_obj; + + if (pdev == NULL) { + direct_buf_rx_err("pdev context passed is null"); + return QDF_STATUS_E_INVAL; + } + + if (dbr_rsp_handler == NULL) { + direct_buf_rx_err("Response handler is null"); + return QDF_STATUS_E_INVAL; + } + + if (mod_id >= DBR_MODULE_MAX) { + direct_buf_rx_err("Invalid module id"); + return QDF_STATUS_E_INVAL; + } + + dbr_pdev_obj = wlan_objmgr_pdev_get_comp_private_obj(pdev, + WLAN_TARGET_IF_COMP_DIRECT_BUF_RX); + + if (dbr_pdev_obj == NULL) { + direct_buf_rx_err("dir buf rx object is null"); + return QDF_STATUS_E_FAILURE; + } + direct_buf_rx_info("Dbr pdev obj %pK", dbr_pdev_obj); + + if (!dbr_pdev_obj->dbr_mod_param && + (mod_id >= dbr_pdev_obj->num_modules)) { + direct_buf_rx_err("Module %d not supported in target", mod_id); + return QDF_STATUS_E_FAILURE; + } + + dbr_pdev_obj->dbr_mod_param[mod_id].dbr_rsp_handler = + dbr_rsp_handler; + + status = target_if_init_dbr_ring(pdev, dbr_pdev_obj, + (enum DBR_MODULE)mod_id); + + return status; +} + +static void *target_if_dbr_vaddr_lookup( + struct direct_buf_rx_module_param *mod_param, + qdf_dma_addr_t paddr, uint32_t cookie) +{ + struct direct_buf_rx_buf_info *dbr_buf_pool; + + dbr_buf_pool = mod_param->dbr_buf_pool; + + if (dbr_buf_pool[cookie].paddr == paddr) { + return dbr_buf_pool[cookie].vaddr + + dbr_buf_pool[cookie].offset; + } + + direct_buf_rx_err("Incorrect paddr found on cookie slot"); + return NULL; +} + +static QDF_STATUS target_if_get_dbr_data(struct wlan_objmgr_pdev *pdev, + struct direct_buf_rx_module_param *mod_param, + struct direct_buf_rx_rsp *dbr_rsp, + struct direct_buf_rx_data *dbr_data, + uint8_t idx, uint32_t *cookie) +{ + qdf_dma_addr_t paddr = 0; + uint32_t addr_hi; + struct direct_buf_rx_psoc_obj *dbr_psoc_obj; + struct direct_buf_rx_ring_cap *dbr_ring_cap; + struct wlan_objmgr_psoc *psoc; + + psoc = wlan_pdev_get_psoc(pdev); + if (!psoc) { + direct_buf_rx_err("psoc is null"); + return QDF_STATUS_E_FAILURE; + } + + dbr_psoc_obj = wlan_objmgr_psoc_get_comp_private_obj(psoc, + WLAN_TARGET_IF_COMP_DIRECT_BUF_RX); + + if (dbr_psoc_obj == NULL) { + direct_buf_rx_err("dir buf rx psoc object is null"); + return QDF_STATUS_E_FAILURE; + } + + dbr_ring_cap = mod_param->dbr_ring_cap; + addr_hi = (uint64_t)WMI_HOST_DBR_DATA_ADDR_HI_GET( + dbr_rsp->dbr_entries[idx].paddr_hi); + paddr = (qdf_dma_addr_t)((uint64_t)addr_hi << 32 | + dbr_rsp->dbr_entries[idx].paddr_lo); + *cookie = WMI_HOST_DBR_DATA_ADDR_HI_HOST_DATA_GET( + dbr_rsp->dbr_entries[idx].paddr_hi); + direct_buf_rx_info("Cookie = %d", *cookie); + dbr_data->vaddr = target_if_dbr_vaddr_lookup(mod_param, paddr, *cookie); + direct_buf_rx_info("Vaddr look up = %x", dbr_data->vaddr); + dbr_data->dbr_len = dbr_rsp->dbr_entries[idx].len; + qdf_mem_unmap_nbytes_single(dbr_psoc_obj->osdev, (qdf_dma_addr_t)paddr, + QDF_DMA_FROM_DEVICE, + dbr_ring_cap->min_buf_size); + + return QDF_STATUS_SUCCESS; +} + +static int target_if_direct_buf_rx_rsp_event_handler(ol_scn_t scn, + uint8_t *data_buf, + uint32_t data_len) +{ + int ret = 0; + uint8_t i = 0; + QDF_STATUS status; + uint32_t cookie = 0; + struct direct_buf_rx_rsp dbr_rsp = {0}; + struct direct_buf_rx_data dbr_data = {0}; + struct wlan_objmgr_psoc *psoc; + struct wlan_objmgr_pdev *pdev; + struct direct_buf_rx_buf_info *dbr_buf_pool; + struct direct_buf_rx_pdev_obj *dbr_pdev_obj; + struct direct_buf_rx_module_param *mod_param; + + direct_buf_rx_enter(); + + psoc = target_if_get_psoc_from_scn_hdl(scn); + if (!psoc) { + direct_buf_rx_err("psoc is null"); + return QDF_STATUS_E_FAILURE; + } + + if (wmi_extract_dbr_buf_release_fixed(GET_WMI_HDL_FROM_PSOC(psoc), + data_buf, &dbr_rsp) != QDF_STATUS_SUCCESS) { + direct_buf_rx_err("unable to extract DBR rsp fixed param"); + return QDF_STATUS_E_FAILURE; + } + + direct_buf_rx_info("Num buf release entry = %d", + dbr_rsp.num_buf_release_entry); + + pdev = wlan_objmgr_get_pdev_by_id(psoc, dbr_rsp.pdev_id, + WLAN_DIRECT_BUF_RX_ID); + if (!pdev) { + direct_buf_rx_err("pdev is null"); + wlan_objmgr_pdev_release_ref(pdev, WLAN_DIRECT_BUF_RX_ID); + return QDF_STATUS_E_INVAL; + } + + dbr_pdev_obj = wlan_objmgr_pdev_get_comp_private_obj(pdev, + WLAN_TARGET_IF_COMP_DIRECT_BUF_RX); + + if (dbr_pdev_obj == NULL) { + direct_buf_rx_err("dir buf rx object is null"); + wlan_objmgr_pdev_release_ref(pdev, WLAN_DIRECT_BUF_RX_ID); + return QDF_STATUS_E_FAILURE; + } + + if (dbr_rsp.mod_id >= dbr_pdev_obj->num_modules) { + direct_buf_rx_err("Invalid module id:%d", dbr_rsp.mod_id); + wlan_objmgr_pdev_release_ref(pdev, dbr_mod_id); + return QDF_STATUS_E_FAILURE; + } + mod_param = &(dbr_pdev_obj->dbr_mod_param[dbr_rsp.mod_id]); + + if (!mod_param) { + direct_buf_rx_err("dir buf rx module param is null"); + wlan_objmgr_pdev_release_ref(pdev, WLAN_DIRECT_BUF_RX_ID); + return QDF_STATUS_E_FAILURE; + } + + dbr_buf_pool = mod_param->dbr_buf_pool; + dbr_rsp.dbr_entries = qdf_mem_malloc(dbr_rsp.num_buf_release_entry * + sizeof(struct direct_buf_rx_entry)); + + if (dbr_rsp.num_meta_data_entry > dbr_rsp.num_buf_release_entry) { + direct_buf_rx_err("More than expected number of metadata"); + wlan_objmgr_pdev_release_ref(pdev, + WLAN_DIRECT_BUF_RX_ID); + return QDF_STATUS_E_FAILURE; + } + + for (i = 0; i < dbr_rsp.num_buf_release_entry; i++) { + if (wmi_extract_dbr_buf_release_entry( + GET_WMI_HDL_FROM_PSOC(psoc), data_buf, i, + &dbr_rsp.dbr_entries[i]) != QDF_STATUS_SUCCESS) { + direct_buf_rx_err("Unable to extract DBR buf entry %d", + i+1); + qdf_mem_free(dbr_rsp.dbr_entries); + wlan_objmgr_pdev_release_ref(pdev, + WLAN_DIRECT_BUF_RX_ID); + return QDF_STATUS_E_FAILURE; + } + status = target_if_get_dbr_data(pdev, mod_param, &dbr_rsp, + &dbr_data, i, &cookie); + + if (QDF_IS_STATUS_ERROR(status)) { + direct_buf_rx_err("DBR data get failed"); + qdf_mem_free(dbr_rsp.dbr_entries); + wlan_objmgr_pdev_release_ref(pdev, + WLAN_DIRECT_BUF_RX_ID); + return QDF_STATUS_E_FAILURE; + } + + dbr_data.meta_data_valid = false; + if (i < dbr_rsp.num_meta_data_entry) { + if (wmi_extract_dbr_buf_metadata( + GET_WMI_HDL_FROM_PSOC(psoc), data_buf, i, + &dbr_data.meta_data) == QDF_STATUS_SUCCESS) + dbr_data.meta_data_valid = true; + } + ret = mod_param->dbr_rsp_handler(pdev, &dbr_data); + status = target_if_dbr_replenish_ring(pdev, mod_param, + dbr_data.vaddr, cookie); + if (QDF_IS_STATUS_ERROR(status)) { + direct_buf_rx_err("dir buf rx ring replenish failed"); + qdf_mem_free(dbr_rsp.dbr_entries); + wlan_objmgr_pdev_release_ref(pdev, + WLAN_DIRECT_BUF_RX_ID); + return QDF_STATUS_E_FAILURE; + } + } + + qdf_mem_free(dbr_rsp.dbr_entries); + wlan_objmgr_pdev_release_ref(pdev, WLAN_DIRECT_BUF_RX_ID); + + return ret; +} + +static QDF_STATUS target_if_dbr_empty_ring(struct wlan_objmgr_pdev *pdev, + struct direct_buf_rx_psoc_obj *dbr_psoc_obj, + struct direct_buf_rx_module_param *mod_param) +{ + uint8_t idx; + struct direct_buf_rx_ring_cfg *dbr_ring_cfg; + struct direct_buf_rx_ring_cap *dbr_ring_cap; + struct direct_buf_rx_buf_info *dbr_buf_pool; + + direct_buf_rx_enter(); + dbr_ring_cfg = mod_param->dbr_ring_cfg; + dbr_ring_cap = mod_param->dbr_ring_cap; + dbr_buf_pool = mod_param->dbr_buf_pool; + + direct_buf_rx_info("dbr_ring_cfg %pK, dbr_ring_cap %pK dbr_buf_pool %pK", + dbr_ring_cfg, dbr_ring_cap, dbr_buf_pool); + + for (idx = 0; idx < dbr_ring_cfg->num_ptr - 1; idx++) { + direct_buf_rx_info("dbr buf pool unmap and free for ptr %d", + idx); + qdf_mem_unmap_nbytes_single(dbr_psoc_obj->osdev, + (qdf_dma_addr_t)dbr_buf_pool[idx].paddr, + QDF_DMA_FROM_DEVICE, + dbr_ring_cap->min_buf_size); + qdf_mem_free(dbr_buf_pool[idx].vaddr); + } + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS target_if_dbr_deinit_ring(struct wlan_objmgr_pdev *pdev, + struct direct_buf_rx_module_param *mod_param) +{ + struct wlan_objmgr_psoc *psoc; + struct direct_buf_rx_psoc_obj *dbr_psoc_obj; + struct direct_buf_rx_ring_cfg *dbr_ring_cfg; + + direct_buf_rx_enter(); + psoc = wlan_pdev_get_psoc(pdev); + if (!psoc) { + direct_buf_rx_err("psoc is null"); + return QDF_STATUS_E_FAILURE; + } + + dbr_psoc_obj = wlan_objmgr_psoc_get_comp_private_obj(psoc, + WLAN_TARGET_IF_COMP_DIRECT_BUF_RX); + + if (dbr_psoc_obj == NULL) { + direct_buf_rx_err("dir buf rx psoc object is null"); + return QDF_STATUS_E_FAILURE; + } + direct_buf_rx_info("dbr_psoc_obj %pK", dbr_psoc_obj); + + dbr_ring_cfg = mod_param->dbr_ring_cfg; + if (dbr_ring_cfg) { + target_if_dbr_empty_ring(pdev, dbr_psoc_obj, mod_param); + hal_srng_cleanup(dbr_psoc_obj->hal_soc, dbr_ring_cfg->srng); + qdf_mem_free_consistent(dbr_psoc_obj->osdev, + dbr_psoc_obj->osdev->dev, + dbr_ring_cfg->ring_alloc_size, + dbr_ring_cfg->base_vaddr_unaligned, + (qdf_dma_addr_t)dbr_ring_cfg->base_paddr_unaligned, 0); + } + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS target_if_dbr_deinit_srng( + struct wlan_objmgr_pdev *pdev, + struct direct_buf_rx_module_param *mod_param) +{ + struct direct_buf_rx_buf_info *dbr_buf_pool; + + direct_buf_rx_enter(); + dbr_buf_pool = mod_param->dbr_buf_pool; + direct_buf_rx_info("dbr buf pool %pK", dbr_buf_pool); + target_if_dbr_deinit_ring(pdev, mod_param); + qdf_mem_free(dbr_buf_pool); + mod_param->dbr_buf_pool = NULL; + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS target_if_deinit_dbr_ring(struct wlan_objmgr_pdev *pdev, + struct direct_buf_rx_pdev_obj *dbr_pdev_obj, + enum DBR_MODULE mod_id) +{ + struct direct_buf_rx_module_param *mod_param; + + direct_buf_rx_enter(); + mod_param = &(dbr_pdev_obj->dbr_mod_param[mod_id]); + + if (!mod_param) { + direct_buf_rx_err("dir buf rx module param is null"); + return QDF_STATUS_E_FAILURE; + } + direct_buf_rx_info("mod_param %pK", mod_param); + direct_buf_rx_info("dbr_ring_cap %pK", mod_param->dbr_ring_cap); + target_if_dbr_deinit_srng(pdev, mod_param); + qdf_mem_free(mod_param->dbr_ring_cap); + mod_param->dbr_ring_cap = NULL; + qdf_mem_free(mod_param->dbr_ring_cfg); + mod_param->dbr_ring_cfg = NULL; + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS target_if_direct_buf_rx_register_events( + struct wlan_objmgr_psoc *psoc) +{ + int ret; + + if (!psoc || !GET_WMI_HDL_FROM_PSOC(psoc)) { + direct_buf_rx_err("psoc or psoc->tgt_if_handle is null"); + return QDF_STATUS_E_INVAL; + } + + ret = wmi_unified_register_event_handler( + get_wmi_unified_hdl_from_psoc(psoc), + wmi_dma_buf_release_event_id, + target_if_direct_buf_rx_rsp_event_handler, + WMI_RX_UMAC_CTX); + + if (ret) + direct_buf_rx_info("event handler not supported", ret); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS target_if_direct_buf_rx_unregister_events( + struct wlan_objmgr_psoc *psoc) +{ + if (!psoc || !GET_WMI_HDL_FROM_PSOC(psoc)) { + direct_buf_rx_err("psoc or psoc->tgt_if_handle is null"); + return QDF_STATUS_E_INVAL; + } + + wmi_unified_unregister_event_handler( + get_wmi_unified_hdl_from_psoc(psoc), + wmi_dma_buf_release_event_id); + + return QDF_STATUS_SUCCESS; +} diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/direct_buf_rx/src/target_if_direct_buf_rx_main.h b/drivers/staging/qca-wifi-host-cmn/target_if/direct_buf_rx/src/target_if_direct_buf_rx_main.h new file mode 100644 index 0000000000000000000000000000000000000000..73e875d3b1e1e0e3cc59cd0134764f56d2e6174d --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/target_if/direct_buf_rx/src/target_if_direct_buf_rx_main.h @@ -0,0 +1,225 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _TARGET_IF_DIRECT_BUF_RX_MAIN_H_ +#define _TARGET_IF_DIRECT_BUF_RX_MAIN_H_ + +#include "qdf_types.h" +#include "qdf_status.h" + +struct wlan_objmgr_psoc; +struct wlan_lmac_if_tx_ops; +struct direct_buf_rx_data; + +#define DBR_RING_BASE_ALIGN 8 +#define DBR_EVENT_TIMEOUT_IN_MS 1 +#define DBR_NUM_RESP_PER_EVENT 1 + +/** + * enum DBR_MODULE - Enum containing the modules supporting direct buf rx + * @DBR_MODULE_SPECTRAL: Module ID for Spectral + * @DBR_MODULE_MAX: Max module ID + */ +enum DBR_MODULE { + DBR_MODULE_SPECTRAL = 1, + DBR_MODULE_MAX, +}; + +/** + * struct direct_buf_rx_info - direct buffer rx operation info struct + * @cookie: SW cookie used to get the virtual address + * @paddr: Physical address pointer for DMA operation + * @vaddr: Virtual address pointer + * @offset: Offset of aligned address from unaligned + */ +struct direct_buf_rx_buf_info { + uint32_t cookie; + qdf_dma_addr_t paddr; + void *vaddr; + uint8_t offset; +}; + +/** + * struct direct_buf_rx_ring_cfg - DMA ring config parameters + * @num_ptr: Depth or the number of physical address pointers in the ring + * @ring_alloc_size: Size of the HAL ring + * @base_paddr_unaligned: base physical addr unaligned + * @base_vaddr_unaligned: base virtual addr unaligned + * @base_paddr_aligned: base physical addr aligned + * @base_vaddr_aligned: base virtual addr unaligned + * @head_idx_addr: head index addr + * @tail_idx_addr: tail index addr + * @srng: HAL srng context + */ +struct direct_buf_rx_ring_cfg { + uint32_t num_ptr; + uint32_t ring_alloc_size; + qdf_dma_addr_t base_paddr_unaligned; + void *base_vaddr_unaligned; + qdf_dma_addr_t base_paddr_aligned; + void *base_vaddr_aligned; + qdf_dma_addr_t head_idx_addr; + qdf_dma_addr_t tail_idx_addr; + void *srng; + uint32_t buf_size; +}; + +/** + * struct direct_buf_rx_ring_cap - DMA ring capabilities + * @ring_elems_min: Minimum number of pointers in the ring + * @min_buf_size: Minimum size of each buffer entry in the ring + * @min_buf_align: Minimum alignment of the addresses in the ring + */ +struct direct_buf_rx_ring_cap { + uint32_t ring_elems_min; + uint32_t min_buf_size; + uint32_t min_buf_align; +}; + +/** + * struct direct_buf_rx_module_param - DMA module param + * @mod_id: Module ID + * @dbr_ring_cap: Pointer to direct buf rx ring capabilities struct + * @dbr_ring_cfg: Pointer to direct buf rx ring config struct + * @dbr_buf_pool: Pointer to direct buf rx buffer pool struct + * @dbr_rsp_handler: Pointer to direct buf rx response handler for the module + */ +struct direct_buf_rx_module_param { + enum DBR_MODULE mod_id; + struct direct_buf_rx_ring_cap *dbr_ring_cap; + struct direct_buf_rx_ring_cfg *dbr_ring_cfg; + struct direct_buf_rx_buf_info *dbr_buf_pool; + int (*dbr_rsp_handler)(struct wlan_objmgr_pdev *pdev, + struct direct_buf_rx_data *dbr_data); +}; + +/** + * struct direct_buf_rx_pdev_obj - Direct Buf RX pdev object struct + * @num_modules: Number of modules registered to DBR for the pdev + * @dbr_mod_param: Pointer to direct buf rx module param struct + */ +struct direct_buf_rx_pdev_obj { + uint32_t num_modules; + struct direct_buf_rx_module_param *dbr_mod_param; +}; + +/** + * struct direct_buf_rx_psoc_obj - Direct Buf RX psoc object struct + * @hal_soc: Opaque HAL SOC handle + * @osdev: QDF os device handle + */ +struct direct_buf_rx_psoc_obj { + void *hal_soc; + qdf_device_t osdev; +}; + +/** + * target_if_direct_buf_rx_register_events() - Register WMI events to direct + * buffer rx module + * @psoc: pointer to psoc object + * + * Return : QDF status of operation + */ +QDF_STATUS target_if_direct_buf_rx_register_events( + struct wlan_objmgr_psoc *psoc); + +/** + * target_if_direct_buf_rx_unregister_events() - Unregister WMI events to direct + * buffer rx module + * @psoc: pointer to psoc object + * + * Return : QDF status of operation + */ +QDF_STATUS target_if_direct_buf_rx_unregister_events( + struct wlan_objmgr_psoc *psoc); + +/** + * target_if_direct_buf_rx_pdev_create_handler() - Handler to be invoked for + * direct buffer rx module + * during pdev object create + * @pdev: pointer to pdev object + * @data: pointer to data + * + * Return : QDF status of operation + */ +QDF_STATUS target_if_direct_buf_rx_pdev_create_handler( + struct wlan_objmgr_pdev *pdev, void *data); + +/** + * target_if_direct_buf_rx_pdev_destroy_handler() - Handler to be invoked for + * direct buffer rx module + * during pdev object destroy + * @pdev: pointer to pdev object + * @data: pointer to data + * + * Return : QDF status of operation + */ +QDF_STATUS target_if_direct_buf_rx_pdev_destroy_handler( + struct wlan_objmgr_pdev *pdev, void *data); + +/** + * target_if_direct_buf_rx_psoc_create_handler() - Handler invoked for + * direct buffer rx module + * during attach + * @pdev: pointer to psoc object + * + * Return : QDF status of operation + */ +QDF_STATUS target_if_direct_buf_rx_psoc_create_handler( + struct wlan_objmgr_psoc *psoc, void *data); + +/** + * target_if_direct_buf_rx_psoc_destroy_handler() - Handler invoked for + * direct buffer rx module + * during detach + * @pdev: pointer to psoc object + * + * Return : QDF status of operation + */ +QDF_STATUS target_if_direct_buf_rx_psoc_destroy_handler( + struct wlan_objmgr_psoc *psoc, void *data); + +/** + * target_if_deinit_dbr_ring() - Function to deinitialize buffers and ring + * allocated for direct buffer rx module + * @pdev: pointer to pdev object + * @dbr_pdev_obj: pointer to direct buffer rx module pdev obj + * @mod_id: module id indicating the module using direct buffer rx framework + * + * Return : QDF status of operation + */ +QDF_STATUS target_if_deinit_dbr_ring(struct wlan_objmgr_pdev *pdev, + struct direct_buf_rx_pdev_obj *dbr_pdev_obj, + enum DBR_MODULE mod_id); +/** + * target_if_direct_buf_rx_module_register() - Function to register to direct + * buffer rx module + * @pdev: pointer to pdev object + * @mod_id: module id indicating the module using direct buffer rx framework + * @dbr_rsp_handler: function pointer pointing to the response handler to be + * invoked for the module registering to direct buffer rx + * module + * + * Return: QDF status of operation + */ +QDF_STATUS target_if_direct_buf_rx_module_register( + struct wlan_objmgr_pdev *pdev, uint8_t mod_id, + int (*dbr_rsp_handler)(struct wlan_objmgr_pdev *pdev, + struct direct_buf_rx_data *dbr_data)); + +#endif /* _TARGET_IF_DIRECT_BUF_RX_MAIN_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/ftm/inc/target_if_ftm.h b/drivers/staging/qca-wifi-host-cmn/target_if/ftm/inc/target_if_ftm.h new file mode 100644 index 0000000000000000000000000000000000000000..97ca6f5f84a58e55cc12196b1f1b4fdf4a8f7cd4 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/target_if/ftm/inc/target_if_ftm.h @@ -0,0 +1,70 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: declare the ftm service data structure and apis + */ +#ifndef _TARGET_IF_FTM_H_ +#define _TARGET_IF_FTM_H_ + +#include +#include +#include + +/** + * target_if_ftm_register_tx_ops() - register ftm tx ops + * @tx_ops: tx ops pointer + * + * Register ftm tx ops + * + * Return: QDF_STATUS + */ +QDF_STATUS target_if_ftm_register_tx_ops(struct wlan_lmac_if_tx_ops *tx_ops); + +/** + * target_if_ftm_attach() - Register FW event handler + * @psoc: psoc pointer + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +QDF_STATUS +target_if_ftm_attach(struct wlan_objmgr_psoc *psoc); + +/** + * target_if_ftm_detach() - De-Register FW event handler + * @psoc: psoc pointer + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +QDF_STATUS +target_if_ftm_detach(struct wlan_objmgr_psoc *psoc); + +/** + * target_if_ftm_cmd_send() - Send WMI command for FTM requests + * @pdev: pdev pointer + * buf: data to be sent to FW + * len: length of the data + * pdev_id: pdev id + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +QDF_STATUS +target_if_ftm_cmd_send(struct wlan_objmgr_pdev *pdev, + uint8_t *buf, uint32_t len, uint8_t pdev_id); +#endif /* _TARGET_IF_FTM_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/ftm/src/target_if_ftm.c b/drivers/staging/qca-wifi-host-cmn/target_if/ftm/src/target_if_ftm.c new file mode 100644 index 0000000000000000000000000000000000000000..0e4e76563233230cdc3564b71c8821b304e237b9 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/target_if/ftm/src/target_if_ftm.c @@ -0,0 +1,205 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: offload lmac interface APIs definitions for FTM + */ + +#include +#include +#include +#include +#include +#include +#include + +static inline struct wlan_lmac_if_ftm_rx_ops * +target_if_ftm_get_rx_ops(struct wlan_objmgr_psoc *psoc) +{ + return &psoc->soc_cb.rx_ops.ftm_rx_ops; +} + +static int +target_if_ftm_process_utf_event(ol_scn_t sc, uint8_t *event_buf, uint32_t len) +{ + struct wlan_objmgr_psoc *psoc; + struct wlan_objmgr_pdev *pdev; + struct wmi_host_pdev_utf_event event; + struct wlan_lmac_if_ftm_rx_ops *ftm_rx_ops; + QDF_STATUS status = QDF_STATUS_E_FAILURE; + uint32_t pdev_id; + struct wmi_unified *wmi_handle; + + psoc = target_if_get_psoc_from_scn_hdl(sc); + if (!psoc) { + ftm_err("null psoc"); + return QDF_STATUS_E_INVAL; + } + + status = wlan_objmgr_psoc_try_get_ref(psoc, WLAN_FTM_ID); + if (QDF_IS_STATUS_ERROR(status)) { + ftm_err("unable to get psoc reference"); + return QDF_STATUS_E_INVAL; + } + + event.datalen = len; + + wmi_handle = get_wmi_unified_hdl_from_psoc(psoc); + if (!wmi_handle) { + ftm_err("Invalid WMI handle"); + wlan_objmgr_psoc_release_ref(psoc, WLAN_FTM_ID); + return QDF_STATUS_E_INVAL; + } + + if (wmi_extract_pdev_utf_event(wmi_handle, event_buf, &event) + != QDF_STATUS_SUCCESS) { + ftm_err("Extracting utf event failed"); + wlan_objmgr_psoc_release_ref(psoc, WLAN_FTM_ID); + return QDF_STATUS_E_INVAL; + } + + pdev_id = event.pdev_id; + pdev = wlan_objmgr_get_pdev_by_id(psoc, pdev_id, WLAN_FTM_ID); + if (!pdev) { + pdev_id = TGT_WMI_PDEV_ID_SOC; + ftm_debug("Can't find pdev by pdev_id %d, try soc_id", + event.pdev_id); + pdev = wlan_objmgr_get_pdev_by_id(psoc, pdev_id, WLAN_FTM_ID); + if (!pdev) { + ftm_err("null pdev"); + wlan_objmgr_psoc_release_ref(psoc, WLAN_FTM_ID); + return QDF_STATUS_E_INVAL; + } + } + + ftm_rx_ops = target_if_ftm_get_rx_ops(psoc); + + if (ftm_rx_ops->ftm_ev_handler) { + status = ftm_rx_ops->ftm_ev_handler(pdev, + event.data, event.datalen); + if (QDF_IS_STATUS_ERROR(status)) + status = QDF_STATUS_E_INVAL; + } else { + status = QDF_STATUS_E_INVAL; + } + + wlan_objmgr_pdev_release_ref(pdev, WLAN_FTM_ID); + wlan_objmgr_psoc_release_ref(psoc, WLAN_FTM_ID); + + return status; +} + +QDF_STATUS target_if_ftm_cmd_send(struct wlan_objmgr_pdev *pdev, + uint8_t *buf, uint32_t len, + uint8_t pdev_id) +{ + QDF_STATUS ret; + wmi_unified_t handle; + struct pdev_utf_params param; + + if (!pdev) { + target_if_err("null pdev"); + return QDF_STATUS_E_FAILURE; + } + + handle = get_wmi_unified_hdl_from_pdev(pdev); + if (!handle) { + target_if_err("null handle"); + return QDF_STATUS_E_FAILURE; + } + param.utf_payload = buf; + param.len = len; + + ret = wmi_unified_pdev_utf_cmd_send(handle, ¶m, pdev_id); + if (QDF_IS_STATUS_ERROR(ret)) + ftm_err("wmi utf cmd send failed, ret: %d", ret); + + return ret; +} + +QDF_STATUS target_if_ftm_attach(struct wlan_objmgr_psoc *psoc) +{ + int ret; + wmi_unified_t handle; + + if (!psoc) { + target_if_err("null psoc"); + return QDF_STATUS_E_FAILURE; + } + + handle = get_wmi_unified_hdl_from_psoc(psoc); + if (!handle) { + target_if_err("null handle"); + return QDF_STATUS_E_FAILURE; + } + ret = wmi_unified_register_event_handler(handle, + wmi_pdev_utf_event_id, + target_if_ftm_process_utf_event, + WMI_RX_UMAC_CTX); + if (ret) { + ftm_err("wmi event registration failed, ret: %d", ret); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS target_if_ftm_detach(struct wlan_objmgr_psoc *psoc) + +{ + int ret; + wmi_unified_t handle; + + if (!psoc) { + target_if_err("null psoc"); + return QDF_STATUS_E_FAILURE; + } + + handle = get_wmi_unified_hdl_from_psoc(psoc); + if (!handle) { + target_if_err("null handle"); + return QDF_STATUS_E_FAILURE; + } + ret = wmi_unified_unregister_event_handler(handle, + wmi_pdev_utf_event_id); + + if (ret) { + ftm_err("wmi event deregistration failed, ret: %d", ret); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS target_if_ftm_register_tx_ops(struct wlan_lmac_if_tx_ops *tx_ops) +{ + struct wlan_lmac_if_ftm_tx_ops *ftm_tx_ops; + + if (!tx_ops) { + ftm_err("invalid tx_ops"); + return QDF_STATUS_E_FAILURE; + } + + ftm_tx_ops = &tx_ops->ftm_tx_ops; + ftm_tx_ops->ftm_attach = target_if_ftm_attach; + ftm_tx_ops->ftm_detach = target_if_ftm_detach; + ftm_tx_ops->ftm_cmd_send = target_if_ftm_cmd_send; + + return QDF_STATUS_SUCCESS; +} diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/green_ap/inc/target_if_green_ap.h b/drivers/staging/qca-wifi-host-cmn/target_if/green_ap/inc/target_if_green_ap.h new file mode 100644 index 0000000000000000000000000000000000000000..824a66be415d2a93c0e2a8a3f374e98adc9ca7ef --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/target_if/green_ap/inc/target_if_green_ap.h @@ -0,0 +1,99 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: offload lmac interface APIs for green ap + */ +#ifndef __TARGET_IF_GREEN_AP_H__ +#define __TARGET_IF_GREEN_AP_H__ + +#include +#include +#include +#include + +struct wlan_green_ap_egap_params; + +/** + * target_if_register_green_ap_tx_ops() - lmac handler to register + * green ap tx_ops callback functions + * @tx_ops: wlan_lmac_if_tx_ops object + * + * Return: QDF_STATUS in case of success + */ +QDF_STATUS target_if_register_green_ap_tx_ops( + struct wlan_lmac_if_tx_ops *tx_ops); + +/** + * target_if_green_ap_register_egap_event_handler() - registers enhanced + * green ap event handler + * @pdev: objmgr pdev + * + * Return: QDF_STATUS in case of success + */ +QDF_STATUS target_if_green_ap_register_egap_event_handler( + struct wlan_objmgr_pdev *pdev); + +/** + * target_if_green_ap_enable_egap() - enable enhanced green ap + * @pdev: pdev pointer + * @egap_params: enhanced green ap params + * + * @Return: QDF_STATUS_SUCCESS in case of success + */ +QDF_STATUS target_if_green_ap_enable_egap( + struct wlan_objmgr_pdev *pdev, + struct wlan_green_ap_egap_params *egap_params); + +/** + * target_if_green_ap_set_ps_on_off() - Green AP PS toggle + * @pdev: pdev pointer + * @value: Value to send PS on/off to FW + * @pdev_id: pdev id + * + * @Return: QDF_STATUS_SUCCESS in case of success + */ +QDF_STATUS target_if_green_ap_set_ps_on_off(struct wlan_objmgr_pdev *pdev, + bool value, uint8_t pdev_id); + +/** + * target_if_green_ap_get_current_channel() - Get current channel + * @pdev: pdev pointer + * + * @Return: current channel freq + */ +uint16_t target_if_green_ap_get_current_channel(struct wlan_objmgr_pdev *pdev); + +/** + * target_if_green_ap_get_current_channel_flags() - Get current channel flags + * @pdev: pdev pointer + * + * @Return: current channel flags + */ +uint64_t target_if_green_ap_get_current_channel_flags( + struct wlan_objmgr_pdev *pdev); + +/** + * target_if_green_ap_reset_dev() - Reset dev + * @pdev: pdev pointer + * + * @Return: QDF_STATUS_SUCCESS if device resetted + */ +QDF_STATUS target_if_green_ap_reset_dev(struct wlan_objmgr_pdev *pdev); + +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/green_ap/src/target_if_green_ap.c b/drivers/staging/qca-wifi-host-cmn/target_if/green_ap/src/target_if_green_ap.c new file mode 100644 index 0000000000000000000000000000000000000000..3ecf935c678c710f40dcc5d780d8f423b14edea8 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/target_if/green_ap/src/target_if_green_ap.c @@ -0,0 +1,193 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: offload lmac interface APIs definitions for Green ap + */ + +#include +#include +#include <../../core/src/wlan_green_ap_main_i.h> +#include +#include + +QDF_STATUS target_if_register_green_ap_tx_ops( + struct wlan_lmac_if_tx_ops *tx_ops) +{ + struct wlan_lmac_if_green_ap_tx_ops *green_ap_tx_ops; + + if (!tx_ops) { + target_if_err("invalid tx_ops"); + return QDF_STATUS_E_FAILURE; + } + + green_ap_tx_ops = &tx_ops->green_ap_tx_ops; + + green_ap_tx_ops->enable_egap = target_if_green_ap_enable_egap; + green_ap_tx_ops->ps_on_off_send = target_if_green_ap_set_ps_on_off; + green_ap_tx_ops->reset_dev = NULL; + green_ap_tx_ops->get_current_channel = NULL; + green_ap_tx_ops->get_current_channel_flags = NULL; + green_ap_tx_ops->get_capab = NULL; + + return QDF_STATUS_SUCCESS; +} + +/** + * target_if_green_ap_egap_status_info_event() - egap status info event + * @scn: pointer to scn handle + * @evt_buf: pointer to event buffer + * @data_len: data len of the event buffer + * + * Return: 0 for success, otherwise appropriate error code + */ +static int target_if_green_ap_egap_status_info_event( + ol_scn_t scn, uint8_t *evt_buf, uint32_t data_len) +{ + struct wlan_objmgr_pdev *pdev; + struct wlan_green_ap_egap_status_info egap_status_info_params; + void *wmi_hdl; + + pdev = target_if_get_pdev_from_scn_hdl(scn); + if (!pdev) { + green_ap_err("pdev is null"); + return QDF_STATUS_E_FAILURE; + } + + wmi_hdl = GET_WMI_HDL_FROM_PDEV(pdev); + if (!wmi_hdl) { + green_ap_err("null wmi_hdl"); + return QDF_STATUS_E_FAILURE; + } + + if (wmi_extract_green_ap_egap_status_info(wmi_hdl, + evt_buf, + &egap_status_info_params) != + QDF_STATUS_SUCCESS) { + green_ap_err("unable to extract green ap egap status info"); + return QDF_STATUS_E_FAILURE; + } + + green_ap_debug("mac_id: %d, status: %d, tx_mask: %x, rx_mask: %d", + egap_status_info_params.mac_id, + egap_status_info_params.status, + egap_status_info_params.tx_chainmask, + egap_status_info_params.rx_chainmask); + + return 0; +} + +QDF_STATUS target_if_green_ap_register_egap_event_handler( + struct wlan_objmgr_pdev *pdev) +{ + struct wlan_pdev_green_ap_ctx *green_ap_ctx; + struct wlan_green_ap_egap_params *egap_params; + int ret; + void *wmi_hdl; + + if (!pdev) { + green_ap_err("pdev is null"); + return QDF_STATUS_E_INVAL; + } + + wmi_hdl = GET_WMI_HDL_FROM_PDEV(pdev); + if (!wmi_hdl) { + green_ap_err("null wmi_hdl"); + return QDF_STATUS_E_FAILURE; + } + + green_ap_ctx = wlan_objmgr_pdev_get_comp_private_obj( + pdev, WLAN_UMAC_COMP_GREEN_AP); + if (!green_ap_ctx) { + green_ap_err("green ap context obtained is NULL"); + return QDF_STATUS_E_FAILURE; + } + egap_params = &green_ap_ctx->egap_params; + + ret = wmi_unified_register_event_handler( + wmi_hdl, + wmi_ap_ps_egap_info_event_id, + target_if_green_ap_egap_status_info_event, + WMI_RX_UMAC_CTX); + if (ret < 0) { + green_ap_err("Failed to register Enhance Green AP event"); + egap_params->fw_egap_support = false; + } else { + green_ap_info("Set the Enhance Green AP event handler"); + egap_params->fw_egap_support = true; + } + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS target_if_green_ap_enable_egap( + struct wlan_objmgr_pdev *pdev, + struct wlan_green_ap_egap_params *egap_params) +{ + struct wlan_pdev_green_ap_ctx *green_ap_ctx; + void *wmi_hdl; + + if (!pdev) { + green_ap_err("pdev context passed is NULL"); + return QDF_STATUS_E_INVAL; + } + + wmi_hdl = GET_WMI_HDL_FROM_PDEV(pdev); + if (!wmi_hdl) { + green_ap_err("null wmi_hdl"); + return QDF_STATUS_E_FAILURE; + } + + green_ap_ctx = wlan_objmgr_pdev_get_comp_private_obj( + pdev, WLAN_UMAC_COMP_GREEN_AP); + if (!green_ap_ctx) { + green_ap_err("green ap context obtained is NULL"); + return QDF_STATUS_E_FAILURE; + } + + qdf_spin_lock_bh(&green_ap_ctx->lock); + if (!wlan_is_egap_enabled(green_ap_ctx)) { + green_ap_info("enhanced green ap support is not present"); + qdf_spin_unlock_bh(&green_ap_ctx->lock); + return QDF_STATUS_SUCCESS; + } + qdf_spin_unlock_bh(&green_ap_ctx->lock); + + return wmi_unified_egap_conf_params_cmd(wmi_hdl, + egap_params); +} + +QDF_STATUS target_if_green_ap_set_ps_on_off(struct wlan_objmgr_pdev *pdev, + bool value, uint8_t pdev_id) +{ + void *wmi_hdl; + + if (!pdev) { + green_ap_err("pdev context passed is NULL"); + return QDF_STATUS_E_INVAL; + } + + wmi_hdl = GET_WMI_HDL_FROM_PDEV(pdev); + if (!wmi_hdl) { + green_ap_err("null wmi_hdl"); + return QDF_STATUS_E_FAILURE; + } + + return wmi_unified_green_ap_ps_send(wmi_hdl, + value, pdev_id); +} diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/init_deinit/inc/init_cmd_api.h b/drivers/staging/qca-wifi-host-cmn/target_if/init_deinit/inc/init_cmd_api.h new file mode 100644 index 0000000000000000000000000000000000000000..e96d3802c54b0c28a89a9be3ff81b08f3b46aa15 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/target_if/init_deinit/inc/init_cmd_api.h @@ -0,0 +1,104 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: init_cmd_api.h + * + * Public APIs to prepare and send init command + */ + +#ifndef _INIT_DEINIT_INIT_CMD_H_ +#define _INIT_DEINIT_INIT_CMD_H_ + +/* max size if 256k */ +#define HOST_MEM_CHUNK_MAX_SIZE (256 * 1024) +#define HOST_MEM_CHUNK_MAX_SIZE_POWER2 (8 + 10) +#define TXBF_CV_POOL0 2 +#define TXBF_CV_POOL1 3 +#define TXBF_CV_POOL2 4 +#define HOST_CONTIGUOUS_MEM_CHUNK_REQUIRED 0x8 + +/** + * enum wlan_fw_mem_prio - defines FW Memory requirement type + * @FW_MEM_HIGH_PRIORITY: Memory requires contiguous memory allocation + * @FW_MEM_LOW_PRIORITY: Memory can be fragmented + * @FW_PRIORITY_MAX: Invalid type + */ +enum wlan_fw_mem_prio { + FW_MEM_HIGH_PRIORITY = 0, + FW_MEM_LOW_PRIORITY, + FW_PRIORITY_MAX +}; + +/** + * init_deinit_handle_host_mem_req() - handle host memory request + * @psoc: PSOC object + * @tgt_info: PSOC_INFO object + * @event: Event buffer from FW + * + * API to handle memory request from FW and allocate memory chunks + * + * Return: SUCCESS on successful memory allocation + * On FAILURE (appropriate failure codes are returned) + */ +QDF_STATUS init_deinit_handle_host_mem_req( + struct wlan_objmgr_psoc *psoc, + struct target_psoc_info *tgt_info, uint8_t *event); + +/** + * init_deinit_free_num_units() - Free allocated mem chunks + * @psoc: PSOC object + * @tgt_info: PSOC_INFO object + * + * API to free memory + * + * Return: SUCCESS on successful memory free + * On FAILURE (appropriate failure codes are returned) + */ +QDF_STATUS init_deinit_free_num_units(struct wlan_objmgr_psoc *psoc, + struct target_psoc_info *tgt_hdl); + +/** + * init_deinit_derive_band_to_mac_param() - Derive band to mac param + * @psoc: PSOC object + * @tgt_info: PSOC_INFO object + * @band_to_mac: BAND_TO_MAC object + * + * API to derive band to mac param + * + * Return: void + */ +void init_deinit_derive_band_to_mac_param( + struct wlan_objmgr_psoc *psoc, + struct target_psoc_info *tgt_info, + struct wmi_host_pdev_band_to_mac *band_to_mac); + +/** + * init_deinit_prepare_send_init_cmd() - prepare send init cmd + * @psoc: PSOC object + * @tgt_info: PSOC_INFO object + * + * API to prepare send init command + * + * Return: void + */ +void init_deinit_prepare_send_init_cmd( + struct wlan_objmgr_psoc *psoc, + struct target_psoc_info *tgt_info); + +#endif /* _INIT_DEINIT_INIT_CMD_H_*/ diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/init_deinit/inc/init_deinit_lmac.h b/drivers/staging/qca-wifi-host-cmn/target_if/init_deinit/inc/init_deinit_lmac.h new file mode 100644 index 0000000000000000000000000000000000000000..3afb5b90fba84d1500467f34092a41f43a1b0209 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/target_if/init_deinit/inc/init_deinit_lmac.h @@ -0,0 +1,226 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: init_deinit_lmac.h + * + * Public APIs to get target_if info + */ + +#ifndef _INIT_DEINIT_LMAC_H_ +#define _INIT_DEINIT_LMAC_H_ + +/** + * lmac_get_tgt_res_cfg() - get target resource config + * @psoc: pointer to psoc + * + * API to get target resource config + * + * Return: target resource configuration + */ +target_resource_config *lmac_get_tgt_res_cfg(struct wlan_objmgr_psoc *psoc); + +/** + * lmac_get_target_cap() - get target capability + * @psoc: pointer to psoc + * + * API to get target capability + * + * Return: target capability Information + */ +struct wlan_psoc_target_capability_info *lmac_get_target_cap( + struct wlan_objmgr_psoc *psoc); + +/** + * lmac_get_pdev_idx() - get pdev id + * @pdev: pointer to pdev + * + * API to get pdev id + * + * Return: pdev id + */ +int32_t lmac_get_pdev_idx(struct wlan_objmgr_pdev *pdev); + +/** + * lmac_get_pdev_target_type() - check pdev target type + * @pdev: pointer to pdev + * @target_type: target type ptr, it is assigned with pdev target_type + * target type stores the radio code + * + * API to check pdev target type + * + * Return: Success if found required target type else Failure + */ +QDF_STATUS lmac_get_pdev_target_type(struct wlan_objmgr_pdev *pdev, + uint32_t *target_type); + +/** + * lmac_get_tgt_type() - get target type + * @psoc: pointer to psoc + * + * API to get target type + * + * Return: target type (value to identify particular radio) + */ +uint32_t lmac_get_tgt_type(struct wlan_objmgr_psoc *psoc); + +/** + * lmac_get_tgt_version() - get target version + * @psoc: pointer to psoc + * + * API to get target version + * + * Return: target version + */ +uint32_t lmac_get_tgt_version(struct wlan_objmgr_psoc *psoc); + +/** + * lmac_get_tgt_revision() - get target revision + * @psoc: pointer to psoc + * + * API to get target revision + * + * Return: target revision + */ +uint32_t lmac_get_tgt_revision(struct wlan_objmgr_psoc *psoc); + +/** + * lmac_is_target_ar900b() - checks the target type + * @psoc: pointer to psoc + * + * API to check target type + * + * Return: True on presence of required target type else false + */ +bool lmac_is_target_ar900b(struct wlan_objmgr_psoc *psoc); + +/** + * lmac_get_wmi_hdl() - get wmi handle + * @psoc: pointer to psoc + * + * API to get wmi handle + * + * Return: wmi handler + */ +struct common_wmi_handle *lmac_get_wmi_hdl(struct wlan_objmgr_psoc *psoc); + +/** + * lmac_get_wmi_unified_hdl() - get wmi handle + * @psoc: pointer to psoc + * + * API to get wmi handle + * + * Return: wmi handler + */ +wmi_unified_t lmac_get_wmi_unified_hdl(struct wlan_objmgr_psoc *psoc); + +/** + * lmac_get_htc_hdl() - get htc handler + * @psoc: pointer to psoc + * + * API to get htc handle + * + * Return: htc handler + */ +struct common_htc_handle *lmac_get_htc_hdl(struct wlan_objmgr_psoc *psoc); + +/** + * lmac_set_htc_hdl() - set htc handle + * @psoc: pointer to psoc + * @htc_hdl: HTC handle + * + * API to set htc handle + * + * Return: void + */ +void lmac_set_htc_hdl(struct wlan_objmgr_psoc *psoc, + struct common_htc_handle *htc_hdl); + +/** + * lmac_get_hif_hdl() - get hif handle + * @psoc: pointer to psoc + * + * API to get hif handle + * + * Return: hif handler + */ +struct common_hif_handle *lmac_get_hif_hdl(struct wlan_objmgr_psoc *psoc); + +/** + * lmac_get_ol_hif_hdl() - get hif handle + * @psoc: pointer to psoc + * + * API to get hif handle + * + * Return: hif handler + */ +struct hif_opaque_softc *lmac_get_ol_hif_hdl(struct wlan_objmgr_psoc *psoc); + +/** + * lmac_get_pdev_wmi_handle() - get pdev wmi handle + * @pdev: pointer to dev + * + * API to get wmi handle + * + * Return: wmi handle + */ +struct common_wmi_handle *lmac_get_pdev_wmi_handle( + struct wlan_objmgr_pdev *pdev); + +/** + * lmac_get_pdev_wmi_unified_handle() - get pdev wmi handle + * @pdev: pointer to dev + * + * API to get wmi handle + * + * Return: wmi handle + */ +wmi_unified_t lmac_get_pdev_wmi_unified_handle( + struct wlan_objmgr_pdev *pdev); + +/** + * lmac_get_psoc_feature_ptr() - get feature pointer + * @psoc: pointer to psoc + * + * API to get legacy pointer + * + * Return: feature pointer + */ +void *lmac_get_psoc_feature_ptr(struct wlan_objmgr_psoc *psoc); + +/** + * lmac_get_pdev_feature_ptr() - get feature pointer + * @pdev: pointer to pdev + * + * API to get legacy pointer + * + * Return: pdev feature pointer + */ +void *lmac_get_pdev_feature_ptr(struct wlan_objmgr_pdev *pdev); + +/** + * lmac_get_num_radios() - get number of radios + * @psoc: pointer to psoc + * + * API to get number of radios + * + * Return: number of radios + */ +uint32_t lmac_get_num_radios(struct wlan_objmgr_psoc *psoc); + +#endif /* _INIT_DEINIT_LMAC_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/init_deinit/inc/init_event_handler.h b/drivers/staging/qca-wifi-host-cmn/target_if/init_deinit/inc/init_event_handler.h new file mode 100644 index 0000000000000000000000000000000000000000..6db62d87ea71eefaaaf3fe16e15a1da3eca5d239 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/target_if/init_deinit/inc/init_event_handler.h @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: init_event_handler.h + * + * Public API file for common WMI event handlers + */ +#ifndef _INIT_EVENT_HANDLER_H_ +#define _INIT_EVENT_HANDLER_H_ + +/** + * init_deinit_register_tgt_psoc_ev_handlers() - register tgt if handlers + * @psoc: PSOC object + * + * API to register tgt handlers + * + * Return: SUCCESS on successful registration + */ +QDF_STATUS init_deinit_register_tgt_psoc_ev_handlers( + struct wlan_objmgr_psoc *psoc); + +#endif /* _INIT_EVENT_HANDLER_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/init_deinit/inc/service_ready_param.h b/drivers/staging/qca-wifi-host-cmn/target_if/init_deinit/inc/service_ready_param.h new file mode 100644 index 0000000000000000000000000000000000000000..4687cd5c4fee38508c0b4c7f7026a5e74bd877e1 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/target_if/init_deinit/inc/service_ready_param.h @@ -0,0 +1,302 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ +/** + * DOC: service_ready_param.h + * + * Public structures to access (ext)service ready data + */ +#ifndef _SERVICE_READY_PARAM_H_ +#define _SERVICE_READY_PARAM_H_ + +#include "qdf_types.h" + +/** + * struct wlan_psoc_hal_reg_capability - hal reg table in psoc + * @eeprom_rd: regdomain value specified in EEPROM + * @eeprom_rd_ext: regdomain + * @regcap1: CAP1 capabilities bit map + * @regcap2: REGDMN EEPROM CAP + * @wireless_modes: REGDMN MODE + * @low_2ghz_chan: lower 2.4GHz channels + * @high_2ghz_chan: higher 2.4 GHz channels + * @low_5ghz_chan: lower 5 GHz channels + * @high_5ghz_chan: higher 5 GHz channels + */ +struct wlan_psoc_hal_reg_capability { + uint32_t eeprom_rd; + uint32_t eeprom_rd_ext; + uint32_t regcap1; + uint32_t regcap2; + uint32_t wireless_modes; + uint32_t low_2ghz_chan; + uint32_t high_2ghz_chan; + uint32_t low_5ghz_chan; + uint32_t high_5ghz_chan; +}; + +/** + * struct wlan_psoc_target_capability_info - target capabilities in psoc + * @phy_capability: PHY capabilities + * @max_frag_entry: Maximum frag entries + * @num_rf_chains: Number of RF chains supported + * @ht_cap_info: HT cap info + * @vht_cap_info: VHT cap info + * @vht_supp_mcs: VHT Supported MCS + * @hw_min_tx_power: HW minimum tx power + * @hw_max_tx_power: HW maximum tx power + * @sys_cap_info: sys capability info + * @min_pkt_size_enable: Enterprise mode short pkt enable + * @max_bcn_ie_size: Max beacon and probe rsp IE offload size + * @max_num_scan_channels: Max scan channels + * @max_supported_macs: max supported MCS + * @wmi_fw_sub_feat_caps: FW sub feature capabilities + * @txrx_chainmask: TXRX chain mask + * @default_dbs_hw_mode_index: DBS hw mode index + * @num_msdu_desc: number of msdu desc + * @fw_version: FW build version + * @fw_version_1: Second dword of FW version (Valid for non-tlv FW) + */ +struct wlan_psoc_target_capability_info { + uint32_t phy_capability; + uint32_t max_frag_entry; + uint32_t num_rf_chains; + uint32_t ht_cap_info; + uint32_t vht_cap_info; + uint32_t vht_supp_mcs; + uint32_t hw_min_tx_power; + uint32_t hw_max_tx_power; + uint32_t sys_cap_info; + uint32_t min_pkt_size_enable; + uint32_t max_bcn_ie_size; + uint32_t max_num_scan_channels; + uint32_t max_supported_macs; + uint32_t wmi_fw_sub_feat_caps; + uint32_t txrx_chainmask; + uint32_t default_dbs_hw_mode_index; + uint32_t num_msdu_desc; + uint32_t fw_version; + uint32_t fw_version_1; +}; + +/** + * struct wlan_psoc_host_ppe_threshold - PPE threshold + * @numss_m1: NSS - 1 + * @ru_bit_mask: RU bit mask indicating the supported RU's + * @ppet16_ppet8_ru3_ru0: ppet8 and ppet16 for max num ss + */ +struct wlan_psoc_host_ppe_threshold { + uint32_t numss_m1; + uint32_t ru_bit_mask; + uint32_t ppet16_ppet8_ru3_ru0[PSOC_HOST_MAX_NUM_SS]; +}; + +/** + * struct wlan_psoc_host_mac_phy_caps - Phy caps recvd in EXT service + * @hw_mode_id: identify a particular set of HW characteristics, + * as specified by the subsequent fields. WMI_MAC_PHY_CAPABILITIES + * element must be mapped to its parent WMI_HW_MODE_CAPABILITIES + * element using hw_mode_id. No particular ordering of + * WMI_MAC_PHY_CAPABILITIES elements should be + * assumed, though in practice the elements may always be ordered + * by hw_mode_id. + * @pdev_id: pdev_id starts with 1. pdev_id 1 => phy_id 0, pdev_id 2 => phy_id 1 + * @phy_id: Starts with 0 + * @hw_mode_config_type: holds the enum wmi_hw_mode_config_type + * @bitmap of supported modulations + * @supported_bands: supported bands, enum WLAN_BAND_CAPABILITY + * @ampdu_density: ampdu density 0 for no restriction, 1 for 1/4 us, + * 2 for 1/2 us, 3 for 1 us,4 for 2 us, 5 for 4 us, + * 6 for 8 us,7 for 16 us + * @max_bw_supported_2G: max bw supported 2G, enum wmi_channel_width + * @ht_cap_info_2G: WMI HT Capability, WMI_HT_CAP defines + * @vht_cap_info_2G: VHT capability info field of 802.11ac, WMI_VHT_CAP defines + * @vht_supp_mcs_2G: VHT Supported MCS Set field Rx/Tx same + * The max VHT-MCS for n SS subfield (where n = 1,...,8) is encoded as + * follows + * - 0 indicates support for VHT-MCS 0-7 for n spatial streams + * - 1 indicates support for VHT-MCS 0-8 for n spatial streams + * - 2 indicates support for VHT-MCS 0-9 for n spatial streams + * - 3 indicates that n spatial streams is not supported + * @he_cap_info_2G: HE capability info field of 802.11ax, WMI_HE_CAP defines + * @he_supp_mcs_2G: HE Supported MCS Set field Rx/Tx same + * @tx_chain_mask_2G: Valid Transmit chain mask + * @rx_chain_mask_2G: Valid Receive chain mask + * @max_bw_supported_5G: max bw supported 5G, enum wmi_channel_width + * @ht_cap_info_5G: WMI HT Capability, WMI_HT_CAP defines + * @vht_cap_info_5G: VHT capability info field of 802.11ac, WMI_VHT_CAP defines + * @vht_supp_mcs_5G: VHT Supported MCS Set field Rx/Tx same + * The max VHT-MCS for n SS subfield (where n = 1,...,8) is encoded as + * follows + * - 0 indicates support for VHT-MCS 0-7 for n spatial streams + * - 1 indicates support for VHT-MCS 0-8 for n spatial streams + * - 2 indicates support for VHT-MCS 0-9 for n spatial streams + * - 3 indicates that n spatial streams is not supported + * @he_cap_info_5G: HE capability info field of 802.11ax, WMI_HE_CAP defines + * @he_supp_mcs_5G: HE Supported MCS Set field Rx/Tx same + * @tx_chain_mask_5G: Valid Transmit chain mask + * @rx_chain_mask_5G: Valid Receive chain mask + * @he_cap_phy_info_2G: 2G HE capability phy field + * @he_cap_phy_info_5G: 5G HE capability phy field + * @he_ppet2G: 2G HE PPET info + * @he_ppet5G: 5G HE PPET info + * @chainmask_table_id: chain mask table id + */ +struct wlan_psoc_host_mac_phy_caps { + uint32_t hw_mode_id; + uint32_t pdev_id; + uint32_t phy_id; + int hw_mode_config_type; + uint32_t supports_11b:1, + supports_11g:1, + supports_11a:1, + supports_11n:1, + supports_11ac:1, + supports_11ax:1; + uint32_t supported_bands; + uint32_t ampdu_density; + uint32_t max_bw_supported_2G; + uint32_t ht_cap_info_2G; + uint32_t vht_cap_info_2G; + uint32_t vht_supp_mcs_2G; + uint32_t he_cap_info_2G; + uint32_t he_supp_mcs_2G; + uint32_t tx_chain_mask_2G; + uint32_t rx_chain_mask_2G; + uint32_t max_bw_supported_5G; + uint32_t ht_cap_info_5G; + uint32_t vht_cap_info_5G; + uint32_t vht_supp_mcs_5G; + uint32_t he_cap_info_5G; + uint32_t he_supp_mcs_5G; + uint32_t tx_chain_mask_5G; + uint32_t rx_chain_mask_5G; + uint32_t he_cap_phy_info_2G[PSOC_HOST_MAX_PHY_SIZE]; + uint32_t he_cap_phy_info_5G[PSOC_HOST_MAX_PHY_SIZE]; + struct wlan_psoc_host_ppe_threshold he_ppet2G; + struct wlan_psoc_host_ppe_threshold he_ppet5G; + uint32_t chainmask_table_id; +}; + +/** + * struct wlan_psoc_host_hw_mode_caps - HW mode capabilities in EXT event + * @hw_mode_id: identify a particular set of HW characteristics, + * as specified by the subsequent fields + * @phy_id_map: BIT0 represents phy_id 0, BIT1 represent phy_id 1 and so on + * @hw_mode_config_type: HW mode config type + */ +struct wlan_psoc_host_hw_mode_caps { + uint32_t hw_mode_id; + uint32_t phy_id_map; + uint32_t hw_mode_config_type; +}; + +/** + * struct wlan_psoc_host_dbr_ring_caps - Direct buffer rx module ring + * capability maintained by PSOC + * @pdev_id: Pdev id of the pdev + * @mod_id: Module id + * @ring_elems_min: Minimum number of pointers in the ring + * @min_buf_size: Minimum size of each buffer entry in the ring + * @min_buf_align: Minimum alignment of the addresses in the ring + */ +struct wlan_psoc_host_dbr_ring_caps { + uint32_t pdev_id; + uint32_t mod_id; + uint32_t ring_elems_min; + uint32_t min_buf_size; + uint32_t min_buf_align; +}; + +/** + * struct wlan_psoc_host_chainmask_capabilities - chain mask capabilities list + * @supports_chan_width_20: channel width 20 support for this chain mask. + * @supports_chan_width_40: channel width 40 support for this chain mask. + * @supports_chan_width_80: channel width 80 support for this chain mask. + * @supports_chan_width_160: channel width 160 support for this chain mask. + * @supports_chan_width_80P80: channel width 80P80 support for this chain mask. + * @chain_mask_2G: 2G support for this chain mask. + * @chain_mask_5G: 5G support for this chain mask. + * @chain_mask_tx: Tx support for this chain mask. + * @chain_mask_rx: Rx support for this chain mask. + * @supports_aDFS: Agile DFS support for this chain mask. + * @chainmask: chain mask value. + */ +struct wlan_psoc_host_chainmask_capabilities { + uint32_t supports_chan_width_20:1, + supports_chan_width_40:1, + supports_chan_width_80:1, + supports_chan_width_160:1, + supports_chan_width_80P80:1, + reserved:22, + chain_mask_2G:1, + chain_mask_5G:1, + chain_mask_tx:1, + chain_mask_rx:1, + supports_aDFS:1; + uint32_t chainmask; +}; + +/** + * struct wlan_psoc_host_chainmask_table - chain mask table + * @table_id: tableid. + * @num_valid_chainmasks: num valid chainmasks. + * @cap_list: pointer to wlan_psoc_host_chainmask_capabilities list. + */ +struct wlan_psoc_host_chainmask_table { + uint32_t table_id; + uint32_t num_valid_chainmasks; + struct wlan_psoc_host_chainmask_capabilities *cap_list; +}; + +/** + * struct wlan_psoc_host_service_ext_param - EXT service base params in event + * @default_conc_scan_config_bits: Default concurrenct scan config + * @default_fw_config_bits: Default HW config bits + * @wlan_psoc_host_ppe_threshold ppet: Host PPE threshold struct + * @he_cap_info: HE capabality info + * @mpdu_density: units are microseconds + * @max_bssid_rx_filters: Maximum no of BSSID based RX filters host can program + * Value 0 means FW hasn't given any limit to host. + * @fw_build_vers_ext: Extended FW build version info. + * bits 27:0 rsvd + * bits 31:28 CRM sub ID + * @num_hw_modes: Number of HW modes in event + * @num_phy: Number of Phy mode. + * @num_chainmask_tables: Number of chain mask tables. + * @num_dbr_ring_caps: Number of direct buf rx ring capabilities + * @chainmask_table: Available chain mask tables. + * @sar_version: SAR version info + */ +struct wlan_psoc_host_service_ext_param { + uint32_t default_conc_scan_config_bits; + uint32_t default_fw_config_bits; + struct wlan_psoc_host_ppe_threshold ppet; + uint32_t he_cap_info; + uint32_t mpdu_density; + uint32_t max_bssid_rx_filters; + uint32_t fw_build_vers_ext; + uint32_t num_hw_modes; + uint32_t num_phy; + uint32_t num_chainmask_tables; + uint32_t num_dbr_ring_caps; + struct wlan_psoc_host_chainmask_table + chainmask_table[PSOC_MAX_CHAINMASK_TABLES]; + uint32_t sar_version; +}; + +#endif /* _SERVICE_READY_PARAM_H_*/ diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/init_deinit/inc/service_ready_util.h b/drivers/staging/qca-wifi-host-cmn/target_if/init_deinit/inc/service_ready_util.h new file mode 100644 index 0000000000000000000000000000000000000000..3d1cb32c12d76f83327034dca7dfb834c55d362f --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/target_if/init_deinit/inc/service_ready_util.h @@ -0,0 +1,252 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ +/** + * DOC: service_ready_util.h + * + * Public APIs to access (ext)service ready data from psoc object + */ +#ifndef _SERVICE_READY_UTIL_H_ +#define _SERVICE_READY_UTIL_H_ + +#include "wlan_objmgr_psoc_obj.h" +#include "service_ready_param.h" +#include "target_if.h" + +/** + * init_deinit_chainmask_table_alloc() + * - allocate chainmask table capability list. + * @service_ext_param: pointer to server ext param. + * + * Allocates capability list based on num_valid_chainmasks for that table. + * + * Return: QDF Status. + */ +QDF_STATUS init_deinit_chainmask_table_alloc( + struct wlan_psoc_host_service_ext_param *service_ext_param); + +/** + * init_deinit_chainmask_table_free() + * -free chainmask table capability list. + * @service_ext_param: pointer to server ext param. + * + * free capability list based on num_valid_chainmasks for that table. + * + * Return: QDF Status. + */ +QDF_STATUS init_deinit_chainmask_table_free( + struct wlan_psoc_host_service_ext_param *service_ext_param); + +/** + * init_deinit_populate_service_bitmap() - populate service bitmap + * @wmi_handle: wmi handle + * @event: event buffer received from FW + * @service_bitmap: service bitmap information + * + * API to populate service bit map + * + * Return: zero on successful population of service bitmap or failure flag + */ +int init_deinit_populate_service_bitmap(void *wmi_handle, uint8_t *event, + uint32_t *service_bitmap); + +/** + * init_deinit_populate_fw_version_cmd() - populate FW version + * @wmi_handle: wmi handle + * @event: event buffer received from FW + * + * API to populate FW version + * + * Return: zero on successful population of fw_version command or failure flag + */ +int init_deinit_populate_fw_version_cmd(void *wmi_handle, uint8_t *event); + +/** + * init_deinit_populate_target_cap() - populate target cap + * @wmi_handle: wmi handle + * @event: event buffer received from FW + * @cap: target capability info object + * + * API to populate target cap + * + * Return: zero on successful population of target cap or failure flag + */ +int init_deinit_populate_target_cap(void *wmi_handle, uint8_t *event, + struct wlan_psoc_target_capability_info *cap); + +/** + * init_deinit_populate_service_ready_ext_param() - populate service ready ext + * parameter + * @handle: WMI handle pointer + * @evt: event buffer received from FW + * @param: service ext param object + * + * API to populate service ready ext param + * + * Return: zero on successful parsing of service ready ext parameter or failure + */ +int init_deinit_populate_service_ready_ext_param(void *handle, uint8_t *evt, + struct wlan_psoc_host_service_ext_param *param); + +/** + * init_deinit_populate_chainmask_tables() - populate chainmaks tables + * @handle: WMI handle pointer + * @evt: event buffer received from FW + * @param: chainmaks_table object + * + * API to populate chainmaks tables + * + * Return: zero on successful parsing of chainmaks tables or failure flag + */ +int init_deinit_populate_chainmask_tables(void *handle, uint8_t *evt, + struct wlan_psoc_host_chainmask_table *param); + +/** + * init_deinit_populate_mac_phy_capability() - populate mac phy capability + * @handle: WMI handle pointer + * @evt: event buffer received from FW + * @hw_cap: hw_mode_caps object + * @info: tgt_info object + * + * API to populate mac phy capability + * + * Return: zero on successful population of mac physical capability or failure + */ +int init_deinit_populate_mac_phy_capability(void *handle, uint8_t *evt, + struct wlan_psoc_host_hw_mode_caps *hw_cap, struct tgt_info *info); + +/** + * init_deinit_populate_hw_mode_capability() - populate hw mode capability + * @wmi_handle: WMI handle pointer + * @event: event buffer received from FW + * @tgt_hdl: target_psoc_info object + * + * API to populate hw mode capability + * + * Return: zero on successful parsing of hw mode capability or failure + */ +int init_deinit_populate_hw_mode_capability(void *wmi_handle, + uint8_t *event, struct target_psoc_info *tgt_hdl); + +/** + * init_deinit_populate_dbr_ring_cap() - populate dbr ring capability + * @psoc: PSOC object + * @handle: WMI handle pointer + * @event: event buffer received from FW + * @info: tgt_info object + * + * API to populate dbr ring capability + * + * Return: zero on successful parsing of dbr ring capability or failure + */ +int init_deinit_populate_dbr_ring_cap(struct wlan_objmgr_psoc *psoc, + void *handle, uint8_t *event, + struct tgt_info *info); + +/** + * init_deinit_dbr_ring_cap_free() - free dbr ring capability + * @tgt_psoc_info: target psoc info object + * + * API to free dbr ring capability + * + * Return: QDF_STATUS + */ +QDF_STATUS init_deinit_dbr_ring_cap_free( + struct target_psoc_info *tgt_psoc_info); + +/** + * init_deinit_populate_phy_reg_cap() - populate phy reg capability + * @psoc: PSOC object + * @wmi_handle: WMI handle pointer + * @event: event buffer received from FW + * @info: tgt_info object + * @service_ready: service ready determiner + * + * API to populate phy reg capability + * + * Return: zero on successful parsing of physical reg capability or failure flag + */ +int init_deinit_populate_phy_reg_cap(struct wlan_objmgr_psoc *psoc, + void *wmi_handle, uint8_t *event, + struct tgt_info *info, bool service_ready); + +/** + * init_deinit_validate_160_80p80_fw_caps() - validate 160 80p80 fw caps + * @psoc: PSOC object + * @tgt_info: target_psoc_info object + * + * API to validate 160 80p80 fw caps + * + * Return: SUCCESS on successful validation of 160 80p80 forward caps or Failure + */ +QDF_STATUS init_deinit_validate_160_80p80_fw_caps( + struct wlan_objmgr_psoc *psoc, + struct target_psoc_info *tgt_info); + +/** + * init_deinit_chainmask_config() - config chainmask + * @psoc: PSOC object + * @tgt_info: target_psoc_info object + * + * API to config chainmask + * + * Return: none + */ +void init_deinit_chainmask_config( + struct wlan_objmgr_psoc *psoc, + struct target_psoc_info *tgt_info); + +/** + * init_deinit_is_service_ext_msg() - check service ext message + * @psoc: PSOC object + * @tgt_info: target_psoc_info object + * + * API to check whether service ext message is enabled + * + * Return: SUCCESS on successful check of service_ext message or Failure + */ +QDF_STATUS init_deinit_is_service_ext_msg( + struct wlan_objmgr_psoc *psoc, + struct target_psoc_info *tgt_info); +/** + * init_deinit_is_preferred_hw_mode_supported() - check support of preferred + * hw mode + * @psoc: PSOC object + * @tgt_info: target_psoc_info object + * + * API to check whether preferred hardware mode is enabled + * + * Return: True on support of preferred hardware support or False + */ +bool init_deinit_is_preferred_hw_mode_supported( + struct wlan_objmgr_psoc *psoc, + struct target_psoc_info *tgt_info); + +/** + * init_deinit_wakeup_host_wait() - wakeup host wait + * @psoc: PSOC object + * @tgt_info: target_psoc_info object + * + * API to wakeup FW ready wait queue + * + * Return: None + */ +void init_deinit_wakeup_host_wait( + struct wlan_objmgr_psoc *psoc, + struct target_psoc_info *tgt_info); + +#endif /* _SERVICE_READY_UTIL_H_*/ diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/init_deinit/src/init_cmd_api.c b/drivers/staging/qca-wifi-host-cmn/target_if/init_deinit/src/init_cmd_api.c new file mode 100644 index 0000000000000000000000000000000000000000..e9a6b405875ed16665812201bc4ba9c57f336138 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/target_if/init_deinit/src/init_cmd_api.c @@ -0,0 +1,490 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: init_cmd_api.c + * + * WMI Init command prepare & send APIs + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/** + * init_deinit_alloc_host_mem_chunk() - allocates chunk of memory requested + * by FW. + * @psoc: PSOC object + * @tgt_hdl: Target PSOC info + * @req_id: request id + * @idx: chunk id + * @num_units: Number of units + * @unit_len: Unit length + * @num_unit_info: Num unit info + * + * API to allocate host memory chunk requested by FW + * + * Return: num_units on successful allocation + * 0 on failure + */ +static uint32_t init_deinit_alloc_host_mem_chunk(struct wlan_objmgr_psoc *psoc, + struct target_psoc_info *tgt_hdl, + u_int32_t req_id, u_int32_t idx, u_int32_t num_units, + u_int32_t unit_len, u_int32_t num_unit_info) +{ + qdf_dma_addr_t paddr; + uint32_t ichunk = 0; + struct tgt_info *info; + qdf_device_t qdf_dev; + + info = (&tgt_hdl->info); + + if (!num_units || !unit_len) + return 0; + + qdf_dev = wlan_psoc_get_qdf_dev(psoc); + if (!qdf_dev) + return 0; + + /* + * We have skip smaller chunks memory allocation for TXBF_CV buffer + * as Firmware is expecting continuous memory + */ + if (!((num_unit_info & HOST_CONTIGUOUS_MEM_CHUNK_REQUIRED) && + (req_id == TXBF_CV_POOL0 || req_id == TXBF_CV_POOL1 || + req_id == TXBF_CV_POOL2))) { + ichunk = ((num_units * unit_len) >> + HOST_MEM_CHUNK_MAX_SIZE_POWER2); + if (ichunk) + num_units = num_units / (ichunk + 1); + } + + info->mem_chunks[idx].vaddr = NULL; + /* reduce the requested allocation by half until allocation succeeds */ + while (!info->mem_chunks[idx].vaddr && num_units) { + info->mem_chunks[idx].vaddr = qdf_mem_alloc_consistent(qdf_dev, + qdf_dev->dev, num_units * unit_len, &paddr); + if (info->mem_chunks[idx].vaddr == NULL) { + if (num_unit_info & + HOST_CONTIGUOUS_MEM_CHUNK_REQUIRED) { + num_units = 0; + target_if_err("mem chink alloc failed for %d", + idx); + break; + } + /* reduce length by half */ + num_units = (num_units >> 1); + } else { + info->mem_chunks[idx].paddr = paddr; + info->mem_chunks[idx].len = num_units*unit_len; + info->mem_chunks[idx].req_id = req_id; + } + } + target_if_debug("req_id %d idx %d num_units %d unit_len %d", + req_id, idx, num_units, unit_len); + + return num_units; +} + +/* Host mem size units, it is used for round-off */ +#define HOST_MEM_SIZE_UNIT 4 + +/** + * init_deinit_alloc_host_mem() - allocates amount of memory requested by FW. + * @psoc: PSOC object + * @tgt_hdl: Target PSOC info + * @req_id: request id + * @num_units: Number of units + * @unit_len: Unit length + * @num_unit_info: Num unit info + * + * API to allocate host memory requested by FW + * + * Return: QDF_STATUS_SUCCESS on successful allocation + * QDF_STATUS_E_FAILURE on failure + */ +static QDF_STATUS init_deinit_alloc_host_mem(struct wlan_objmgr_psoc *psoc, + struct target_psoc_info *tgt_hdl, u_int32_t req_id, + u_int32_t num_units, u_int32_t unit_len, + u_int32_t num_unit_info) +{ + struct tgt_info *info; + uint32_t remaining_units; + uint32_t allocated_units = 0; + uint32_t idx; + + info = (&tgt_hdl->info); + /* adjust the length to nearest multiple of unit size */ + unit_len = (unit_len + (HOST_MEM_SIZE_UNIT - 1)) & + (~(HOST_MEM_SIZE_UNIT - 1)); + idx = info->num_mem_chunks; + remaining_units = num_units; + + while (remaining_units) { + if (idx == MAX_MEM_CHUNKS) { + target_if_err( + "REACHED MAX CHUNK LIMIT for mem units %d", + num_units); + target_if_err( + "unit len %d requested by FW, only allocated %d", + unit_len, (num_units - remaining_units)); + info->num_mem_chunks = idx; + return QDF_STATUS_E_FAILURE; + } + + if ((tgt_hdl->tif_ops) && + (tgt_hdl->tif_ops->mem_mgr_alloc_chunk)) + allocated_units = tgt_hdl->tif_ops->mem_mgr_alloc_chunk( + psoc, tgt_hdl, req_id, idx, + remaining_units, + unit_len, num_unit_info); + else + allocated_units = init_deinit_alloc_host_mem_chunk( + psoc, tgt_hdl, req_id, idx, + remaining_units, + unit_len, num_unit_info); + if (allocated_units == 0) { + target_if_err("FAILED TO ALLOC mem unit len %d", + unit_len); + target_if_err("units requested %d units allocated %d", + num_units, (num_units - remaining_units)); + info->num_mem_chunks = idx; + return QDF_STATUS_E_NOMEM; + } + remaining_units -= allocated_units; + ++idx; + } + info->num_mem_chunks = idx; + + return QDF_STATUS_SUCCESS; +} + +/** + * init_deinit_alloc_num_units() - allocates num units requested by FW. + * @psoc: PSOC object + * @tgt_hdl: Target PSOC info + * @mem_reqs: pointer to mem req + * @num_units: Number + * @i: FW priority + * @idx: Index + * + * API to allocate num units of host memory requested by FW + * + * Return: QDF_STATUS_SUCCESS on successful allocation + * QDF_STATUS_E_FAILURE on failure + */ +static QDF_STATUS init_deinit_alloc_num_units(struct wlan_objmgr_psoc *psoc, + struct target_psoc_info *tgt_hdl, + host_mem_req *mem_reqs, uint16_t fw_prio, + uint16_t idx) +{ + struct tgt_info *info; + uint32_t num_units; + QDF_STATUS status; + + if (!tgt_hdl) { + target_if_err("target_psoc_info is null"); + return QDF_STATUS_E_INVAL; + } + + info = (&tgt_hdl->info); + + if (((fw_prio == FW_MEM_HIGH_PRIORITY) && + (mem_reqs[idx].num_unit_info & + HOST_CONTIGUOUS_MEM_CHUNK_REQUIRED)) || + ((fw_prio == FW_MEM_LOW_PRIORITY) && + (!(mem_reqs[idx].num_unit_info & + HOST_CONTIGUOUS_MEM_CHUNK_REQUIRED)))) { + /* First allocate the memory that requires contiguous memory */ + num_units = mem_reqs[idx].num_units; + if (mem_reqs[idx].num_unit_info) { + if (mem_reqs[idx].num_unit_info & + NUM_UNITS_IS_NUM_PEERS) { + /* + * number of units allocated is equal to number + * of peers, 1 extra for self peer on target. + * this needs to be fixed, host and target can + * get out of sync + */ + num_units = info->wlan_res_cfg.num_peers + 1; + } + if (mem_reqs[idx].num_unit_info & + NUM_UNITS_IS_NUM_ACTIVE_PEERS) { + /* + * Requesting allocation of memory using + * num_active_peers in qcache. if qcache is + * disabled in host, then it should allocate + * memory for num_peers instead of + * num_active_peers. + */ + if (info->wlan_res_cfg.num_active_peers) + num_units = + info->wlan_res_cfg.num_active_peers + 1; + else + num_units = + info->wlan_res_cfg.num_peers + 1; + } + } + + target_if_debug("idx %d req %d num_units %d num_unit_info %d unit size %d actual units %d", + idx, mem_reqs[idx].req_id, + mem_reqs[idx].num_units, + mem_reqs[idx].num_unit_info, + mem_reqs[idx].unit_size, num_units); + + status = init_deinit_alloc_host_mem(psoc, tgt_hdl, + mem_reqs[idx].req_id, num_units, + mem_reqs[idx].unit_size, + mem_reqs[idx].num_unit_info); + if (status == QDF_STATUS_E_FAILURE) { + target_if_err( + "psoc:(%pK) num_mem_chunk exceeds supp number", + psoc); + return QDF_STATUS_E_FAILURE; + } else if (status == QDF_STATUS_E_NOMEM) { + target_if_err("soc:(%pK) mem alloc failure", psoc); + return QDF_STATUS_E_NOMEM; + } + } + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS init_deinit_free_num_units(struct wlan_objmgr_psoc *psoc, + struct target_psoc_info *tgt_hdl) +{ + struct tgt_info *info; + qdf_device_t qdf_dev; + uint32_t idx; + QDF_STATUS status; + + if (!tgt_hdl) { + target_if_err("target_psoc_info is null"); + return QDF_STATUS_E_INVAL; + } + + if ((tgt_hdl->tif_ops) && + (tgt_hdl->tif_ops->mem_mgr_free_chunks)) { + status = tgt_hdl->tif_ops->mem_mgr_free_chunks(psoc, tgt_hdl); + } else { + qdf_dev = wlan_psoc_get_qdf_dev(psoc); + if (!qdf_dev) { + target_if_err("qdf_dev is null"); + QDF_BUG(0); + return QDF_STATUS_E_INVAL; + } + info = (&tgt_hdl->info); + for (idx = 0; idx < info->num_mem_chunks; idx++) { + qdf_mem_free_consistent( + qdf_dev, qdf_dev->dev, + info->mem_chunks[idx].len, + info->mem_chunks[idx].vaddr, + info->mem_chunks[idx].paddr, + qdf_get_dma_mem_context( + (&(info->mem_chunks[idx])), memctx)); + + info->mem_chunks[idx].vaddr = NULL; + info->mem_chunks[idx].paddr = 0; + info->mem_chunks[idx].len = 0; + } + info->num_mem_chunks = 0; + status = QDF_STATUS_SUCCESS; + } + + return status; +} + +QDF_STATUS init_deinit_handle_host_mem_req( + struct wlan_objmgr_psoc *psoc, + struct target_psoc_info *tgt_hdl, uint8_t *event) +{ + uint8_t num_mem_reqs; + host_mem_req *mem_reqs; + uint32_t i; + uint32_t idx; + QDF_STATUS status = QDF_STATUS_SUCCESS; + struct common_wmi_handle *wmi_handle; + struct tgt_info *info; + + if (!tgt_hdl) { + target_if_err("target_psoc_info is null"); + return QDF_STATUS_E_INVAL; + } + + wmi_handle = target_psoc_get_wmi_hdl(tgt_hdl); + info = (&tgt_hdl->info); + + mem_reqs = wmi_extract_host_mem_req_from_service_ready( + wmi_handle, event, &num_mem_reqs); + if (!num_mem_reqs) + return QDF_STATUS_SUCCESS; + + if (num_mem_reqs > MAX_MEM_CHUNKS) { + target_if_err_rl("num_mem_reqs:%u is out of bounds", + num_mem_reqs); + return QDF_STATUS_E_FAILURE; + } + + for (i = 0; i < FW_PRIORITY_MAX; i++) { + for (idx = 0; idx < num_mem_reqs; idx++) { + status = init_deinit_alloc_num_units(psoc, tgt_hdl, + mem_reqs, i, idx); + if (status != QDF_STATUS_SUCCESS) + return status; + } + } + + return status; +} + +void init_deinit_derive_band_to_mac_param( + struct wlan_objmgr_psoc *psoc, + struct target_psoc_info *tgt_hdl, + struct wmi_host_pdev_band_to_mac *band_to_mac) +{ + uint8_t i; + struct wlan_psoc_host_mac_phy_caps *mac_phy_cap; + struct wlan_psoc_host_hal_reg_capabilities_ext *reg_cap; + struct tgt_info *info; + + if (!tgt_hdl) { + target_if_err("target_psoc_info is null "); + return; + } + + info = (&tgt_hdl->info); + + reg_cap = ucfg_reg_get_hal_reg_cap(psoc); + if (!reg_cap) { + target_if_err("reg cap is NULL"); + return; + } + + for (i = 0; i < target_psoc_get_num_radios(tgt_hdl); i++) { + mac_phy_cap = &info->mac_phy_cap[i]; + if (mac_phy_cap->supported_bands == + (WMI_HOST_WLAN_5G_CAPABILITY | + WMI_HOST_WLAN_2G_CAPABILITY)) { + /*Supports both 5G and 2G. Use freq from both radios*/ + target_if_debug("Supports both 2G and 5G"); + band_to_mac[i].pdev_id = mac_phy_cap->pdev_id; + band_to_mac[i].start_freq = + reg_cap[i].low_2ghz_chan; + band_to_mac[i].end_freq = + reg_cap[i].high_5ghz_chan; + + } else if (mac_phy_cap->supported_bands == + WMI_HOST_WLAN_2G_CAPABILITY) { + band_to_mac[i].pdev_id = mac_phy_cap->pdev_id; + band_to_mac[i].start_freq = + reg_cap[i].low_2ghz_chan; + band_to_mac[i].end_freq = + reg_cap[i].high_2ghz_chan; + + reg_cap[mac_phy_cap->phy_id].low_5ghz_chan = 0; + reg_cap[mac_phy_cap->phy_id].high_5ghz_chan = 0; + + target_if_debug("2G radio - pdev_id = %d start_freq = %d end_freq= %d", + band_to_mac[i].pdev_id, + band_to_mac[i].start_freq, + band_to_mac[i].end_freq); + + } else if (mac_phy_cap->supported_bands == + WMI_HOST_WLAN_5G_CAPABILITY) { + band_to_mac[i].pdev_id = mac_phy_cap->pdev_id; + band_to_mac[i].start_freq = + reg_cap[i].low_5ghz_chan; + band_to_mac[i].end_freq = + reg_cap[i].high_5ghz_chan; + + reg_cap[mac_phy_cap->phy_id].low_2ghz_chan = 0; + reg_cap[mac_phy_cap->phy_id].high_2ghz_chan = 0; + + target_if_debug("5G radio -pdev_id = %d start_freq = %d end_freq =%d\n", + band_to_mac[i].pdev_id, + band_to_mac[i].start_freq, + band_to_mac[i].end_freq); + } + } +} + +void init_deinit_prepare_send_init_cmd( + struct wlan_objmgr_psoc *psoc, + struct target_psoc_info *tgt_hdl) +{ + struct wmi_init_cmd_param init_param = {0}; + struct tgt_info *info; + struct common_wmi_handle *wmi_handle; + QDF_STATUS ret_val; + + if (!tgt_hdl) { + target_if_err("target_psoc_info is null"); + return; + } + + wmi_handle = target_psoc_get_wmi_hdl(tgt_hdl); + info = (&tgt_hdl->info); + + init_param.res_cfg = &info->wlan_res_cfg; + init_param.num_mem_chunks = info->num_mem_chunks; + init_param.mem_chunks = info->mem_chunks; + + if (init_deinit_is_service_ext_msg(psoc, tgt_hdl) == + QDF_STATUS_SUCCESS) { + init_param.hw_mode_id = info->preferred_hw_mode; + /* Temp change, until FW submits support for handling this TLV + * For single mode, skip sending hw_mode + */ + if (info->preferred_hw_mode == WMI_HOST_HW_MODE_SINGLE) + init_param.hw_mode_id = WMI_HOST_HW_MODE_MAX; + + init_param.num_band_to_mac = target_psoc_get_num_radios( + tgt_hdl); + + init_deinit_derive_band_to_mac_param(psoc, tgt_hdl, + init_param.band_to_mac); + } + + ret_val = target_if_alloc_pdevs(psoc, tgt_hdl); + if (ret_val != QDF_STATUS_SUCCESS) + return; + + ret_val = target_if_update_pdev_tgt_info(psoc, tgt_hdl); + if (ret_val != QDF_STATUS_SUCCESS) + return; + + target_if_debug("FW version 0x%x ", info->target_caps.fw_version); + if (init_deinit_is_service_ext_msg(psoc, tgt_hdl) == QDF_STATUS_SUCCESS) + target_if_debug("0x%x\n", + info->service_ext_param.fw_build_vers_ext); + else + target_if_debug("0x%x\n", info->target_caps.fw_version_1); + + wmi_unified_init_cmd_send(wmi_handle, &init_param); + + /* Set Max scans allowed */ + target_if_scan_set_max_active_scans(psoc, + WLAN_MAX_ACTIVE_SCANS_ALLOWED); +} diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/init_deinit/src/init_deinit_lmac.c b/drivers/staging/qca-wifi-host-cmn/target_if/init_deinit/src/init_deinit_lmac.c new file mode 100644 index 0000000000000000000000000000000000000000..e170455f13e1d38b466fdde3ef589bcb907f36f3 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/target_if/init_deinit/src/init_deinit_lmac.c @@ -0,0 +1,353 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: init_deinit_lmac.c + * + * APIs to get/set target_if params + */ +#include +#include +#include +#include +#include +#include +#include + +struct wlan_psoc_target_capability_info *lmac_get_target_cap( + struct wlan_objmgr_psoc *psoc) +{ + struct target_psoc_info *tgt_hdl; + + if (!psoc) { + target_if_err("psoc is null"); + return NULL; + } + + tgt_hdl = wlan_psoc_get_tgt_if_handle(psoc); + if (!tgt_hdl) { + target_if_err("target_psoc_info is null"); + return NULL; + } + + return target_psoc_get_target_caps(tgt_hdl); +} + +target_resource_config *lmac_get_tgt_res_cfg(struct wlan_objmgr_psoc *psoc) +{ + struct target_psoc_info *tgt_hdl; + + if (!psoc) { + target_if_err("psoc is null"); + return NULL; + } + + tgt_hdl = wlan_psoc_get_tgt_if_handle(psoc); + if (!tgt_hdl) { + target_if_err("target_psoc_info is null"); + return NULL; + } + + return target_psoc_get_wlan_res_cfg(tgt_hdl); +} + +int32_t lmac_get_pdev_idx(struct wlan_objmgr_pdev *pdev) +{ + struct target_pdev_info *tgt_hdl; + + if (!pdev) { + target_if_err("pdev is null"); + return 0xffffffff; + } + + tgt_hdl = wlan_pdev_get_tgt_if_handle(pdev); + if (!tgt_hdl) { + target_if_err("target_pdev_info is null"); + return 0xffffffff; + } + + return target_pdev_get_pdev_idx(tgt_hdl); +} + +uint32_t lmac_get_tgt_type(struct wlan_objmgr_psoc *psoc) +{ + struct target_psoc_info *tgt_hdl; + + if (!psoc) { + target_if_err("psoc is null"); + return 0; + } + + tgt_hdl = wlan_psoc_get_tgt_if_handle(psoc); + if (!tgt_hdl) { + target_if_err("target_psoc_info is null"); + return 0; + } + + return target_psoc_get_target_type(tgt_hdl); +} +qdf_export_symbol(lmac_get_tgt_type); + +QDF_STATUS lmac_get_pdev_target_type(struct wlan_objmgr_pdev *pdev, + uint32_t *target_type) +{ + struct wlan_objmgr_psoc *psoc; + + psoc = wlan_pdev_get_psoc(pdev); + if (!psoc) { + target_if_err("psoc is NULL"); + return QDF_STATUS_E_FAILURE; + } + + *target_type = lmac_get_tgt_type(psoc); + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(lmac_get_pdev_target_type); + +uint32_t lmac_get_tgt_version(struct wlan_objmgr_psoc *psoc) +{ + struct target_psoc_info *tgt_hdl; + + if (!psoc) { + target_if_err("psoc is null"); + return -EINVAL; + } + + tgt_hdl = wlan_psoc_get_tgt_if_handle(psoc); + if (!tgt_hdl) { + target_if_err("target_psoc_info is null"); + return -EINVAL; + } + + return target_psoc_get_target_ver(tgt_hdl); +} + +uint32_t lmac_get_tgt_revision(struct wlan_objmgr_psoc *psoc) +{ + struct target_psoc_info *tgt_hdl; + + if (!psoc) { + target_if_err("psoc is null"); + return -EINVAL; + } + + tgt_hdl = wlan_psoc_get_tgt_if_handle(psoc); + if (!tgt_hdl) { + target_if_err("target_psoc_info is null"); + return -EINVAL; + } + + return target_psoc_get_target_rev(tgt_hdl); +} +qdf_export_symbol(lmac_get_tgt_revision); + +bool lmac_is_target_ar900b(struct wlan_objmgr_psoc *psoc) +{ + struct target_psoc_info *tgt_hdl; + uint32_t target_type; + + if (!psoc) { + target_if_err("psoc is null\n"); + return false; + } + + tgt_hdl = wlan_psoc_get_tgt_if_handle(psoc); + if (!tgt_hdl) { + target_if_err("target_psoc_info is null"); + return false; + } + target_type = tgt_hdl->info.target_type; + + switch (target_type) { + case TARGET_TYPE_AR900B: + case TARGET_TYPE_QCA9984: + case TARGET_TYPE_IPQ4019: + case TARGET_TYPE_QCA9888: + return true; + default: + return false; + } + return false; +} +qdf_export_symbol(lmac_is_target_ar900b); + +struct common_wmi_handle *lmac_get_wmi_hdl(struct wlan_objmgr_psoc *psoc) +{ + struct target_psoc_info *tgt_hdl; + + if (!psoc) { + target_if_err("psoc is null"); + return NULL; + } + + tgt_hdl = wlan_psoc_get_tgt_if_handle(psoc); + if (!tgt_hdl) { + target_if_err("target_psoc_info is null"); + return NULL; + } + + return target_psoc_get_wmi_hdl(tgt_hdl); +} +qdf_export_symbol(lmac_get_wmi_hdl); + +wmi_unified_t lmac_get_wmi_unified_hdl(struct wlan_objmgr_psoc *psoc) +{ + return (wmi_unified_t)lmac_get_wmi_hdl(psoc); +} +qdf_export_symbol(lmac_get_wmi_unified_hdl); + +struct common_htc_handle *lmac_get_htc_hdl(struct wlan_objmgr_psoc *psoc) +{ + struct target_psoc_info *tgt_hdl; + + if (!psoc) { + target_if_err("psoc is null"); + return NULL; + } + + tgt_hdl = wlan_psoc_get_tgt_if_handle(psoc); + if (!tgt_hdl) { + target_if_err("target_psoc_info is null"); + return NULL; + } + + return target_psoc_get_htc_hdl(tgt_hdl); +} +qdf_export_symbol(lmac_get_htc_hdl); + +void lmac_set_htc_hdl(struct wlan_objmgr_psoc *psoc, + struct common_htc_handle *htc_hdl) +{ + struct target_psoc_info *tgt_hdl; + + if (!psoc) { + target_if_err("psoc is null"); + return; + } + tgt_hdl = (struct target_psoc_info *)wlan_psoc_get_tgt_if_handle(psoc); + if (!tgt_hdl) { + target_if_err("target_psoc_info is null"); + return; + } + + target_psoc_set_htc_hdl(tgt_hdl, htc_hdl); +} + +struct common_hif_handle *lmac_get_hif_hdl(struct wlan_objmgr_psoc *psoc) +{ + struct target_psoc_info *tgt_hdl; + + if (!psoc) { + target_if_err("psoc is null"); + return NULL; + } + + tgt_hdl = wlan_psoc_get_tgt_if_handle(psoc); + if (!tgt_hdl) { + target_if_err("target_psoc_info is null"); + return NULL; + } + + return target_psoc_get_hif_hdl(tgt_hdl); +} +qdf_export_symbol(lmac_get_hif_hdl); + +struct hif_opaque_softc *lmac_get_ol_hif_hdl(struct wlan_objmgr_psoc *psoc) +{ + return (struct hif_opaque_softc *)lmac_get_hif_hdl(psoc); +} +qdf_export_symbol(lmac_get_ol_hif_hdl); + +struct common_wmi_handle *lmac_get_pdev_wmi_handle( + struct wlan_objmgr_pdev *pdev) +{ + struct target_pdev_info *tgt_hdl; + + if (!pdev) { + target_if_err("pdev is null"); + return NULL; + } + + tgt_hdl = wlan_pdev_get_tgt_if_handle(pdev); + if (!tgt_hdl) { + target_if_err("target_pdev_info is null"); + return NULL; + } + + return target_pdev_get_wmi_handle(tgt_hdl); +} + +wmi_unified_t +lmac_get_pdev_wmi_unified_handle(struct wlan_objmgr_pdev *pdev) +{ + return (wmi_unified_t)lmac_get_pdev_wmi_handle(pdev); +} + +uint32_t lmac_get_num_radios(struct wlan_objmgr_psoc *psoc) +{ + struct target_psoc_info *tgt_hdl; + + if (!psoc) { + target_if_err("psoc is null"); + return 0; + } + tgt_hdl = wlan_psoc_get_tgt_if_handle(psoc); + if (!tgt_hdl) { + target_if_err("target_psoc_info is null"); + return 0; + } + + return target_psoc_get_num_radios(tgt_hdl); +} +qdf_export_symbol(lmac_get_num_radios); + +void *lmac_get_psoc_feature_ptr(struct wlan_objmgr_psoc *psoc) +{ + struct target_psoc_info *tgt_hdl; + + if (!psoc) { + target_if_err("psoc is null"); + return NULL; + } + tgt_hdl = wlan_psoc_get_tgt_if_handle(psoc); + if (!tgt_hdl) { + target_if_err("target_psoc_info is null"); + return NULL; + } + + return target_psoc_get_feature_ptr(tgt_hdl); +} +qdf_export_symbol(lmac_get_psoc_feature_ptr); + +void *lmac_get_pdev_feature_ptr(struct wlan_objmgr_pdev *pdev) +{ + struct target_pdev_info *tgt_hdl; + + if (!pdev) { + target_if_err("pdev is null"); + return NULL; + } + tgt_hdl = wlan_pdev_get_tgt_if_handle(pdev); + if (!tgt_hdl) { + target_if_err("target_pdev_info is null"); + return NULL; + } + + return target_pdev_get_feature_ptr(tgt_hdl); +} +qdf_export_symbol(lmac_get_pdev_feature_ptr); diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/init_deinit/src/init_event_handler.c b/drivers/staging/qca-wifi-host-cmn/target_if/init_deinit/src/init_event_handler.c new file mode 100644 index 0000000000000000000000000000000000000000..4efdd0721d82a0b897b6e9cf43e01bc01bd300cd --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/target_if/init_deinit/src/init_event_handler.c @@ -0,0 +1,494 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: init_event_handler.c + * + * WMI common event handler implementation source file + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static int init_deinit_service_ready_event_handler(ol_scn_t scn_handle, + uint8_t *event, + uint32_t data_len) +{ + int err_code; + struct wlan_objmgr_psoc *psoc; + struct target_psoc_info *tgt_hdl; + wmi_legacy_service_ready_callback legacy_callback; + struct common_wmi_handle *wmi_handle; + QDF_STATUS ret_val; + + if (!scn_handle) { + target_if_err("scn handle NULL in service ready handler"); + return -EINVAL; + } + + psoc = target_if_get_psoc_from_scn_hdl(scn_handle); + if (!psoc) { + target_if_err("psoc is null in service ready handler"); + return -EINVAL; + } + + tgt_hdl = (struct target_psoc_info *)wlan_psoc_get_tgt_if_handle( + psoc); + if (!tgt_hdl) { + target_if_err("target_psoc_info is null in service ready ev"); + return -EINVAL; + } + + ret_val = target_if_sw_version_check(psoc, tgt_hdl, event); + + wmi_handle = target_psoc_get_wmi_hdl(tgt_hdl); + + err_code = init_deinit_populate_service_bitmap(wmi_handle, event, + tgt_hdl->info.service_bitmap); + if (err_code) + goto exit; + + err_code = init_deinit_populate_fw_version_cmd(wmi_handle, event); + if (err_code) + goto exit; + + err_code = init_deinit_populate_target_cap(wmi_handle, event, + &(tgt_hdl->info.target_caps)); + if (err_code) + goto exit; + + err_code = init_deinit_populate_phy_reg_cap(psoc, wmi_handle, event, + &(tgt_hdl->info), true); + if (err_code) + goto exit; + + if (init_deinit_validate_160_80p80_fw_caps(psoc, tgt_hdl) != + QDF_STATUS_SUCCESS) { + wlan_psoc_nif_op_flag_set(psoc, WLAN_SOC_OP_VHT_INVALID_CAP); + } + + target_if_ext_res_cfg_enable(psoc, tgt_hdl, event); + + if (wmi_service_enabled(wmi_handle, wmi_service_tt)) + wlan_psoc_nif_fw_ext_cap_set(psoc, WLAN_SOC_CEXT_TT_SUPPORT); + + if (wmi_service_enabled(wmi_handle, wmi_service_widebw_scan)) + wlan_psoc_nif_fw_ext_cap_set(psoc, WLAN_SOC_CEXT_WIDEBAND_SCAN); + + if (wmi_service_enabled(wmi_handle, wmi_service_check_cal_version)) + wlan_psoc_nif_fw_ext_cap_set(psoc, WLAN_SOC_CEXT_SW_CAL); + + target_if_debug(" TT support %d, Wide BW Scan %d, SW cal %d", + wlan_psoc_nif_fw_ext_cap_get(psoc, WLAN_SOC_CEXT_TT_SUPPORT), + wlan_psoc_nif_fw_ext_cap_get(psoc, WLAN_SOC_CEXT_WIDEBAND_SCAN), + wlan_psoc_nif_fw_ext_cap_get(psoc, WLAN_SOC_CEXT_SW_CAL)); + + target_if_mesh_support_enable(psoc, tgt_hdl, event); + + target_if_smart_antenna_enable(psoc, tgt_hdl, event); + + target_if_peer_cfg_enable(psoc, tgt_hdl, event); + + target_if_atf_cfg_enable(psoc, tgt_hdl, event); + + target_if_qwrap_cfg_enable(psoc, tgt_hdl, event); + + target_if_lteu_cfg_enable(psoc, tgt_hdl, event); + + /* override derived value, if it exceeds max peer count */ + if ((wlan_psoc_get_max_peer_count(psoc) > + tgt_hdl->info.wlan_res_cfg.num_active_peers) && + (wlan_psoc_get_max_peer_count(psoc) < + (tgt_hdl->info.wlan_res_cfg.num_peers - + tgt_hdl->info.wlan_res_cfg.num_vdevs))) { + tgt_hdl->info.wlan_res_cfg.num_peers = + wlan_psoc_get_max_peer_count(psoc) + + tgt_hdl->info.wlan_res_cfg.num_vdevs; + } + legacy_callback = target_if_get_psoc_legacy_service_ready_cb(); + if (!legacy_callback) { + err_code = -EINVAL; + goto exit; + } + + err_code = legacy_callback(wmi_service_ready_event_id, + scn_handle, event, data_len); + init_deinit_chainmask_config(psoc, tgt_hdl); + + if (wmi_service_enabled(wmi_handle, wmi_service_mgmt_tx_wmi)) { + wlan_psoc_nif_fw_ext_cap_set(psoc, WLAN_SOC_CEXT_WMI_MGMT_REF); + target_if_debug("WMI mgmt service enabled"); + } else { + wlan_psoc_nif_fw_ext_cap_clear(psoc, + WLAN_SOC_CEXT_WMI_MGMT_REF); + target_if_debug("WMI mgmt service disabled"); + } + + err_code = init_deinit_handle_host_mem_req(psoc, tgt_hdl, event); + if (err_code != QDF_STATUS_SUCCESS) + goto exit; + + target_if_reg_set_offloaded_info(psoc); + if (!wmi_service_enabled(wmi_handle, wmi_service_ext_msg)) { + target_if_debug("No EXT message, send init command"); + tgt_hdl->info.wmi_service_ready = TRUE; + target_psoc_set_num_radios(tgt_hdl, 1); + /* send init command */ + init_deinit_prepare_send_init_cmd(psoc, tgt_hdl); + } else { + target_if_debug("Wait for EXT message"); + } + +exit: + return err_code; +} + +static int init_deinit_service_ext_ready_event_handler(ol_scn_t scn_handle, + uint8_t *event, + uint32_t data_len) +{ + int err_code; + struct wlan_objmgr_psoc *psoc; + struct target_psoc_info *tgt_hdl; + struct common_wmi_handle *wmi_handle; + struct tgt_info *info; + wmi_legacy_service_ready_callback legacy_callback; + + if (!scn_handle) { + target_if_err("scn handle NULL in service ready handler"); + return -EINVAL; + } + + psoc = target_if_get_psoc_from_scn_hdl(scn_handle); + if (!psoc) { + target_if_err("psoc is null in service ready handler"); + return -EINVAL; + } + + tgt_hdl = (struct target_psoc_info *)wlan_psoc_get_tgt_if_handle( + psoc); + if (!tgt_hdl) { + target_if_err("target_psoc_info is null in service ready ev"); + return -EINVAL; + } + + wmi_handle = target_psoc_get_wmi_hdl(tgt_hdl); + info = (&tgt_hdl->info); + + err_code = init_deinit_populate_service_ready_ext_param(wmi_handle, + event, &(info->service_ext_param)); + if (err_code) + goto exit; + + target_psoc_set_num_radios(tgt_hdl, 0); + err_code = init_deinit_populate_hw_mode_capability(wmi_handle, + event, tgt_hdl); + if (err_code) + goto exit; + + if (init_deinit_is_preferred_hw_mode_supported(psoc, tgt_hdl) + == FALSE) + return -EINVAL; + + target_if_print_service_ready_ext_param(psoc, tgt_hdl); + + err_code = init_deinit_populate_phy_reg_cap(psoc, wmi_handle, + event, info, false); + if (err_code) + goto exit; + + target_if_add_11ax_modes(psoc, tgt_hdl); + + if (init_deinit_chainmask_table_alloc( + &(info->service_ext_param)) == + QDF_STATUS_SUCCESS) { + err_code = init_deinit_populate_chainmask_tables(wmi_handle, + event, + &(info->service_ext_param.chainmask_table[0])); + if (err_code) + goto exit; + } + + err_code = init_deinit_populate_dbr_ring_cap(psoc, wmi_handle, + event, info); + if (err_code) + goto exit; + + legacy_callback = target_if_get_psoc_legacy_service_ready_cb(); + if (legacy_callback) + legacy_callback(wmi_service_ready_ext_event_id, + scn_handle, event, data_len); + + info->wlan_res_cfg.num_vdevs = (target_psoc_get_num_radios(tgt_hdl) * + info->wlan_res_cfg.num_vdevs); + info->wlan_res_cfg.beacon_tx_offload_max_vdev = + (target_psoc_get_num_radios(tgt_hdl) * + info->wlan_res_cfg.beacon_tx_offload_max_vdev); + + info->wmi_service_ready = TRUE; + + init_deinit_prepare_send_init_cmd(psoc, tgt_hdl); + +exit: + return err_code; +} + +static int init_deinit_service_available_handler(ol_scn_t scn_handle, + uint8_t *event, + uint32_t data_len) +{ + struct wlan_objmgr_psoc *psoc; + struct target_psoc_info *tgt_hdl; + struct common_wmi_handle *wmi_handle; + + if (!scn_handle) { + target_if_err("scn handle NULL"); + return -EINVAL; + } + + psoc = target_if_get_psoc_from_scn_hdl(scn_handle); + if (!psoc) { + target_if_err("psoc is null"); + return -EINVAL; + } + + tgt_hdl = (struct target_psoc_info *)wlan_psoc_get_tgt_if_handle( + psoc); + if (!tgt_hdl) { + target_if_err("target_psoc_info is null"); + return -EINVAL; + } + + wmi_handle = target_psoc_get_wmi_hdl(tgt_hdl); + + if (wmi_save_ext_service_bitmap(wmi_handle, event, NULL) != + QDF_STATUS_SUCCESS) { + target_if_err("Failed to save ext service bitmap"); + return -EINVAL; + } + + return 0; +} + +/* MAC address fourth byte index */ +#define MAC_BYTE_4 4 + +static int init_deinit_ready_event_handler(ol_scn_t scn_handle, + uint8_t *event, + uint32_t data_len) +{ + struct wlan_objmgr_psoc *psoc; + struct wlan_objmgr_pdev *pdev; + struct target_psoc_info *tgt_hdl; + struct common_wmi_handle *wmi_handle; + struct wmi_host_fw_abi_ver fw_ver; + uint8_t myaddr[QDF_MAC_ADDR_SIZE]; + struct tgt_info *info; + struct wmi_host_ready_ev_param ready_ev; + wmi_legacy_service_ready_callback legacy_callback; + uint8_t num_radios, i; + uint32_t max_peers; + + if (!scn_handle) { + target_if_err("scn handle NULL"); + return -EINVAL; + } + + psoc = target_if_get_psoc_from_scn_hdl(scn_handle); + if (!psoc) { + target_if_err("psoc is null"); + return -EINVAL; + } + + tgt_hdl = (struct target_psoc_info *)wlan_psoc_get_tgt_if_handle( + psoc); + if (!tgt_hdl) { + target_if_err("target_psoc_info is null"); + return -EINVAL; + } + + wmi_handle = target_psoc_get_wmi_hdl(tgt_hdl); + info = (&tgt_hdl->info); + + if (wmi_extract_fw_abi_version(wmi_handle, event, &fw_ver) == + QDF_STATUS_SUCCESS) { + info->version.wlan_ver = fw_ver.sw_version; + info->version.wlan_ver = fw_ver.abi_version; + } + + if (wmi_check_and_update_fw_version(wmi_handle, event) < 0) { + target_if_err("Version mismatch with FW"); + return -EINVAL; + } + + if (wmi_extract_ready_event_params(wmi_handle, event, &ready_ev) != + QDF_STATUS_SUCCESS) { + target_if_err("Failed to extract ready event"); + return -EINVAL; + } + + if ((ready_ev.num_total_peer != 0) && + (info->wlan_res_cfg.num_peers != ready_ev.num_total_peer)) { + /* FW allocated number of peers is different than host + * requested. Update host max with FW reported value. + */ + target_if_err("Host Requested %d peers. FW Supports %d peers", + info->wlan_res_cfg.num_peers, + ready_ev.num_total_peer); + info->wlan_res_cfg.num_peers = ready_ev.num_total_peer; + } + + /* for non legacy num_total_peer will be non zero + * allocate peer memory in this case + */ + if (ready_ev.num_total_peer != 0) { + max_peers = info->wlan_res_cfg.num_peers + + ready_ev.num_extra_peer + 1; + + cdp_peer_map_attach(wlan_psoc_get_dp_handle(psoc), max_peers); + } + + /* Indicate to the waiting thread that the ready + * event was received + */ + info->wlan_init_status = wmi_ready_extract_init_status( + wmi_handle, event); + + legacy_callback = target_if_get_psoc_legacy_service_ready_cb(); + if (legacy_callback) + if (legacy_callback(wmi_ready_event_id, + scn_handle, event, data_len)) { + target_if_err("Legacy callback returned error!"); + tgt_hdl->info.wmi_ready = FALSE; + goto exit; + } + + num_radios = target_psoc_get_num_radios(tgt_hdl); + /* + * For non-legacy HW, MAC addr list is extracted. + */ + if (num_radios > 1) { + uint8_t num_mac_addr; + wmi_host_mac_addr *addr_list; + int i; + + addr_list = wmi_ready_extract_mac_addr_list(wmi_handle, event, + &num_mac_addr); + if ((num_mac_addr >= num_radios) && (addr_list)) { + for (i = 0; i < num_radios; i++) { + WMI_HOST_MAC_ADDR_TO_CHAR_ARRAY(&addr_list[i], + myaddr); + pdev = wlan_objmgr_get_pdev_by_id(psoc, i, + WLAN_INIT_DEINIT_ID); + if (!pdev) { + target_if_err(" PDEV %d is NULL", i); + return -EINVAL; + } + wlan_pdev_set_hw_macaddr(pdev, myaddr); + wlan_objmgr_pdev_release_ref(pdev, + WLAN_INIT_DEINIT_ID); + + /* assign 1st radio addr to psoc */ + if (i == 0) + wlan_psoc_set_hw_macaddr(psoc, myaddr); + } + goto out; + } else { + target_if_err("Using default MAC addr for all radios.."); + } + } + + /* + * We extract single MAC address in two scenarios: + * 1. In non-legacy case, if addr list is NULL or num_mac_addr < num_radios + * 2. In all legacy cases + */ + for (i = 0; i < num_radios; i++) { + wmi_ready_extract_mac_addr(wmi_handle, event, myaddr); + myaddr[MAC_BYTE_4] += i; + pdev = wlan_objmgr_get_pdev_by_id(psoc, i, WLAN_INIT_DEINIT_ID); + if (!pdev) { + target_if_err(" PDEV %d is NULL", i); + return -EINVAL; + } + wlan_pdev_set_hw_macaddr(pdev, myaddr); + wlan_objmgr_pdev_release_ref(pdev, WLAN_INIT_DEINIT_ID); + /* assign 1st radio addr to psoc */ + if (i == 0) + wlan_psoc_set_hw_macaddr(psoc, myaddr); + } + +out: + target_if_btcoex_cfg_enable(psoc, tgt_hdl, event); + tgt_hdl->info.wmi_ready = TRUE; +exit: + init_deinit_wakeup_host_wait(psoc, tgt_hdl); + + return 0; +} + + +QDF_STATUS init_deinit_register_tgt_psoc_ev_handlers( + struct wlan_objmgr_psoc *psoc) +{ + struct target_psoc_info *tgt_hdl; + wmi_unified_t wmi_handle; + QDF_STATUS retval = QDF_STATUS_SUCCESS; + + if (!psoc) { + target_if_err("psoc is null in register wmi handler"); + return QDF_STATUS_E_FAILURE; + } + + tgt_hdl = (struct target_psoc_info *)wlan_psoc_get_tgt_if_handle( + psoc); + if (!tgt_hdl) { + target_if_err("target_psoc_info null in register wmi hadler"); + return QDF_STATUS_E_FAILURE; + } + + wmi_handle = (wmi_unified_t)target_psoc_get_wmi_hdl(tgt_hdl); + + retval = wmi_unified_register_event_handler(wmi_handle, + wmi_service_ready_event_id, + init_deinit_service_ready_event_handler, + WMI_RX_WORK_CTX); + retval = wmi_unified_register_event_handler(wmi_handle, + wmi_service_ready_ext_event_id, + init_deinit_service_ext_ready_event_handler, + WMI_RX_WORK_CTX); + retval = wmi_unified_register_event_handler(wmi_handle, + wmi_service_available_event_id, + init_deinit_service_available_handler, + WMI_RX_UMAC_CTX); + retval = wmi_unified_register_event_handler(wmi_handle, + wmi_ready_event_id, + init_deinit_ready_event_handler, + WMI_RX_WORK_CTX); + + return retval; +} + diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/init_deinit/src/service_ready_util.c b/drivers/staging/qca-wifi-host-cmn/target_if/init_deinit/src/service_ready_util.c new file mode 100644 index 0000000000000000000000000000000000000000..d19f03299b73f54eb6bc9b2453f65d9fc858256b --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/target_if/init_deinit/src/service_ready_util.c @@ -0,0 +1,580 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ +/** + * DOC: service_ready_util.c + * + * Public APIs implementation source file for accessing (ext)service ready + * data from psoc object + */ +#include "service_ready_util.h" +#include +#include +#include + +QDF_STATUS init_deinit_chainmask_table_alloc( + struct wlan_psoc_host_service_ext_param *ser_ext_par) +{ + int i; + uint32_t alloc_size; + QDF_STATUS status = QDF_STATUS_SUCCESS; + + if (ser_ext_par->num_chainmask_tables == 0) + return QDF_STATUS_E_NOSUPPORT; + + for (i = 0; i < ser_ext_par->num_chainmask_tables; i++) { + if (ser_ext_par->chainmask_table[i].num_valid_chainmasks > + (UINT_MAX / sizeof( + struct wlan_psoc_host_chainmask_capabilities))) { + target_if_err("invalid valid chanmask num %d", + ser_ext_par->chainmask_table[i]. + num_valid_chainmasks); + status = QDF_STATUS_E_FAILURE; + break; + } + alloc_size = + (sizeof(struct wlan_psoc_host_chainmask_capabilities) * + ser_ext_par->chainmask_table[i].num_valid_chainmasks); + + ser_ext_par->chainmask_table[i].cap_list = + qdf_mem_alloc_outline(NULL, alloc_size); + if (!ser_ext_par->chainmask_table[i].cap_list) { + init_deinit_chainmask_table_free(ser_ext_par); + status = QDF_STATUS_E_NOMEM; + break; + } + } + + return status; +} + +qdf_export_symbol(init_deinit_chainmask_table_alloc); + +QDF_STATUS init_deinit_chainmask_table_free( + struct wlan_psoc_host_service_ext_param *ser_ext_par) +{ + struct wlan_psoc_host_chainmask_table *table; + int i; + + for (i = 0; i < ser_ext_par->num_chainmask_tables; i++) { + table = &(ser_ext_par->chainmask_table[i]); + if (table->cap_list) { + qdf_mem_free(table->cap_list); + table->cap_list = NULL; + } + } + + return QDF_STATUS_SUCCESS; +} + +qdf_export_symbol(init_deinit_chainmask_table_free); + +int init_deinit_populate_service_bitmap(void *wmi_handle, uint8_t *event, + uint32_t *service_bitmap) +{ + QDF_STATUS status; + + status = wmi_save_service_bitmap(wmi_handle, event, service_bitmap); + if (QDF_IS_STATUS_ERROR(status)) { + target_if_err("failed to parse service bitmap"); + return qdf_status_to_os_return(status); + } + + return 0; +} + +int init_deinit_populate_fw_version_cmd(void *wmi_handle, uint8_t *event) +{ + QDF_STATUS status; + + status = wmi_unified_save_fw_version_cmd(wmi_handle, event); + if (QDF_IS_STATUS_ERROR(status)) + target_if_err("failed to save fw version"); + + return 0; +} + +int init_deinit_populate_target_cap(void *wmi_handle, uint8_t *event, + struct wlan_psoc_target_capability_info *cap) +{ + QDF_STATUS status; + + status = wmi_get_target_cap_from_service_ready(wmi_handle, event, cap); + if (QDF_IS_STATUS_ERROR(status)) { + target_if_err("failed to parse target cap"); + return qdf_status_to_os_return(status); + } + + return 0; +} + +int init_deinit_populate_service_ready_ext_param(void *handle, uint8_t *evt, + struct wlan_psoc_host_service_ext_param *param) +{ + QDF_STATUS status; + + status = wmi_extract_service_ready_ext(handle, evt, param); + if (QDF_IS_STATUS_ERROR(status)) { + target_if_err("failed to parse wmi service ready ext param"); + return qdf_status_to_os_return(status); + } + + return 0; +} + +int init_deinit_populate_chainmask_tables(void *handle, uint8_t *evt, + struct wlan_psoc_host_chainmask_table *param) +{ + QDF_STATUS status; + + status = wmi_extract_chainmask_tables(handle, evt, param); + if (QDF_IS_STATUS_ERROR(status)) { + target_if_err("failed to parse wmi service ready ext param"); + return qdf_status_to_os_return(status); + } + + return 0; +} + +int init_deinit_populate_mac_phy_capability(void *handle, uint8_t *evt, + struct wlan_psoc_host_hw_mode_caps *hw_cap, struct tgt_info *info) +{ + QDF_STATUS status; + uint32_t hw_mode_id; + uint32_t phy_bit_map; + uint8_t mac_phy_id; + + hw_mode_id = hw_cap->hw_mode_id; + phy_bit_map = hw_cap->phy_id_map; + target_if_debug("hw_mode_id %d phy_bit_map 0x%x", + hw_mode_id, phy_bit_map); + + mac_phy_id = 0; + while (phy_bit_map) { + if (info->total_mac_phy_cnt >= PSOC_MAX_MAC_PHY_CAP) { + target_if_err("total mac phy exceeds max limit %d", + info->total_mac_phy_cnt); + return -EINVAL; + } + + status = wmi_extract_mac_phy_cap_service_ready_ext(handle, + evt, hw_mode_id, mac_phy_id, + &(info->mac_phy_cap[info->total_mac_phy_cnt])); + if (QDF_IS_STATUS_ERROR(status)) { + target_if_err("failed to parse mac phy capability"); + return qdf_status_to_os_return(status); + } + info->mac_phy_cap[info->total_mac_phy_cnt].hw_mode_config_type + = hw_cap->hw_mode_config_type; + info->total_mac_phy_cnt++; + phy_bit_map &= (phy_bit_map - 1); + mac_phy_id++; + } + target_if_debug("total_mac_phy_cnt %d", info->total_mac_phy_cnt); + + return 0; +} + +static int get_hw_mode(void *handle, uint8_t *evt, uint8_t hw_idx, + struct wlan_psoc_host_hw_mode_caps *cap) +{ + QDF_STATUS status; + + status = wmi_extract_hw_mode_cap_service_ready_ext(handle, evt, + hw_idx, cap); + if (QDF_IS_STATUS_ERROR(status)) { + target_if_err("failed to parse hw mode capability"); + return qdf_status_to_os_return(status); + } + + return 0; +} + +static int get_sar_version(void *handle, uint8_t *evt, + struct wlan_psoc_host_service_ext_param *ext_param) +{ + QDF_STATUS status; + + status = wmi_extract_sar_cap_service_ready_ext(handle, evt, ext_param); + if (QDF_IS_STATUS_ERROR(status)) { + target_if_err("failed to parse sar capability"); + return qdf_status_to_os_return(status); + } + + return 0; +} + +int init_deinit_populate_hw_mode_capability(void *wmi_handle, + uint8_t *event, struct target_psoc_info *tgt_hdl) +{ + QDF_STATUS status = QDF_STATUS_SUCCESS; + uint8_t hw_idx; + uint32_t num_hw_modes; + struct wlan_psoc_host_hw_mode_caps hw_mode_caps[PSOC_MAX_HW_MODE]; + uint32_t preferred_mode; + struct tgt_info *info; + + info = &tgt_hdl->info; + num_hw_modes = info->service_ext_param.num_hw_modes; + if (num_hw_modes > PSOC_MAX_HW_MODE) { + target_if_err("invalid num_hw_modes %d", num_hw_modes); + return -EINVAL; + } + target_if_debug("num_hw_modes %d", num_hw_modes); + + qdf_mem_zero(&hw_mode_caps, sizeof(hw_mode_caps)); + + preferred_mode = target_psoc_get_preferred_hw_mode(tgt_hdl); + for (hw_idx = 0; hw_idx < num_hw_modes; hw_idx++) { + status = get_hw_mode(wmi_handle, event, hw_idx, + &hw_mode_caps[hw_idx]); + if (status) + goto return_exit; + + if ((preferred_mode != WMI_HOST_HW_MODE_MAX) && + (hw_mode_caps[hw_idx].hw_mode_id != preferred_mode)) + continue; + + status = init_deinit_populate_mac_phy_capability(wmi_handle, + event, &hw_mode_caps[hw_idx], info); + if (status) + goto return_exit; + + if ((preferred_mode != WMI_HOST_HW_MODE_MAX) && + (hw_mode_caps[hw_idx].hw_mode_id == preferred_mode)) { + info->num_radios = info->total_mac_phy_cnt; + target_if_debug("num radios is %d\n", info->num_radios); + } + } + status = get_sar_version(wmi_handle, event, &info->service_ext_param); + target_if_debug("sar version %d", info->service_ext_param.sar_version); + +return_exit: + return qdf_status_to_os_return(status); +} + +int init_deinit_populate_dbr_ring_cap(struct wlan_objmgr_psoc *psoc, + void *handle, uint8_t *event, struct tgt_info *info) + +{ + uint8_t cap_idx; + uint32_t num_dbr_ring_caps; + QDF_STATUS status = QDF_STATUS_SUCCESS; + + num_dbr_ring_caps = info->service_ext_param.num_dbr_ring_caps; + + target_if_debug("Num DMA Capabilities = %d", num_dbr_ring_caps); + + if (!num_dbr_ring_caps) + return 0; + + info->dbr_ring_cap = qdf_mem_malloc( + sizeof(struct wlan_psoc_host_dbr_ring_caps) * + num_dbr_ring_caps); + + if (!info->dbr_ring_cap) { + target_if_err("Mem alloc for DMA cap failed"); + return -EINVAL; + } + + for (cap_idx = 0; cap_idx < num_dbr_ring_caps; cap_idx++) { + status = wmi_extract_dbr_ring_cap_service_ready_ext(handle, + event, cap_idx, + &(info->dbr_ring_cap[cap_idx])); + if (QDF_IS_STATUS_ERROR(status)) { + target_if_err("Extraction of DMA cap failed"); + goto free_and_return; + } + } + + return 0; + +free_and_return: + qdf_mem_free(info->dbr_ring_cap); + info->dbr_ring_cap = NULL; + + return qdf_status_to_os_return(status); +} + +QDF_STATUS init_deinit_dbr_ring_cap_free( + struct target_psoc_info *tgt_psoc_info) +{ + QDF_STATUS status = QDF_STATUS_SUCCESS; + + if (tgt_psoc_info->info.dbr_ring_cap) { + qdf_mem_free(tgt_psoc_info->info.dbr_ring_cap); + tgt_psoc_info->info.dbr_ring_cap = NULL; + } + + return status; +} +qdf_export_symbol(init_deinit_dbr_ring_cap_free); + +int init_deinit_populate_phy_reg_cap(struct wlan_objmgr_psoc *psoc, + void *handle, uint8_t *event, + struct tgt_info *info, bool service_ready) +{ + uint8_t reg_idx; + uint32_t num_phy_reg_cap; + QDF_STATUS status = QDF_STATUS_SUCCESS; + struct wlan_psoc_hal_reg_capability cap; + struct wlan_psoc_host_hal_reg_capabilities_ext + reg_cap[PSOC_MAX_PHY_REG_CAP] = {{0} }; + + if (service_ready) { + status = wmi_extract_hal_reg_cap(handle, event, &cap); + if (QDF_IS_STATUS_ERROR(status)) { + target_if_err("failed to parse hal reg cap"); + return qdf_status_to_os_return(status); + } + info->service_ext_param.num_phy = 1; + num_phy_reg_cap = 1; + reg_cap[0].phy_id = 0; + qdf_mem_copy(&(reg_cap[0].eeprom_reg_domain), &cap, + sizeof(struct wlan_psoc_hal_reg_capability)); + target_if_debug("FW wireless modes 0x%x", + reg_cap[0].wireless_modes); + } else { + num_phy_reg_cap = info->service_ext_param.num_phy; + if (num_phy_reg_cap > PSOC_MAX_PHY_REG_CAP) { + target_if_err("Invalid num_phy_reg_cap %d", + num_phy_reg_cap); + return -EINVAL; + } + target_if_debug("num_phy_reg_cap %d", num_phy_reg_cap); + + for (reg_idx = 0; reg_idx < num_phy_reg_cap; reg_idx++) { + status = wmi_extract_reg_cap_service_ready_ext(handle, + event, reg_idx, &(reg_cap[reg_idx])); + if (QDF_IS_STATUS_ERROR(status)) { + target_if_err("failed to parse reg cap"); + return qdf_status_to_os_return(status); + } + } + } + + status = ucfg_reg_set_hal_reg_cap(psoc, reg_cap, num_phy_reg_cap); + + return qdf_status_to_os_return(status); +} + +static bool init_deinit_regdmn_160mhz_support( + struct wlan_psoc_host_hal_reg_capabilities_ext *hal_cap) +{ + return ((hal_cap->wireless_modes & + WMI_HOST_REGDMN_MODE_11AC_VHT160) != 0); +} + +static bool init_deinit_regdmn_80p80mhz_support( + struct wlan_psoc_host_hal_reg_capabilities_ext *hal_cap) +{ + return ((hal_cap->wireless_modes & + WMI_HOST_REGDMN_MODE_11AC_VHT80_80) != 0); +} + +static bool init_deinit_vht_160mhz_is_supported(uint32_t vhtcap) +{ + return ((vhtcap & WLAN_VHTCAP_SUP_CHAN_WIDTH_160) != 0); +} + +static bool init_deinit_vht_80p80mhz_is_supported(uint32_t vhtcap) +{ + return ((vhtcap & WLAN_VHTCAP_SUP_CHAN_WIDTH_80_160) != 0); +} + +static bool init_deinit_vht_160mhz_shortgi_is_supported(uint32_t vhtcap) +{ + return ((vhtcap & WLAN_VHTCAP_SHORTGI_160) != 0); +} + +QDF_STATUS init_deinit_validate_160_80p80_fw_caps( + struct wlan_objmgr_psoc *psoc, + struct target_psoc_info *tgt_hdl) +{ + bool wireless_mode_160mhz = false; + bool wireless_mode_80p80mhz = false; + bool vhtcap_160mhz = false; + bool vhtcap_80p80_160mhz = false; + bool vhtcap_160mhz_sgi = false; + bool valid = false; + struct wlan_psoc_host_hal_reg_capabilities_ext *reg_cap; + struct common_wmi_handle *wmi_handle; + + if (!tgt_hdl) { + target_if_err( + "target_psoc_info is null in validate 160n80p80 cap check"); + return QDF_STATUS_E_INVAL; + } + + wmi_handle = target_psoc_get_wmi_hdl(tgt_hdl); + + if ((tgt_hdl->info.target_type == TARGET_TYPE_QCA8074) || + (tgt_hdl->info.target_type == TARGET_TYPE_QCA6290)) { + /** + * Return true for now. This is not available in + * qca8074 fw yet + */ + return QDF_STATUS_SUCCESS; + } + + reg_cap = ucfg_reg_get_hal_reg_cap(psoc); + if (reg_cap == NULL) { + target_if_err("reg cap is NULL"); + return QDF_STATUS_E_FAILURE; + } + + /* NOTE: Host driver gets vht capability and supported channel + * width / channel frequency range from FW/HALPHY and obeys it. + * Host driver is unaware of any physical filters or any other + * hardware factors that can impact these capabilities. + * These need to be correctly determined by firmware. + */ + + /*This table lists all valid and invalid combinations + * WMODE160 WMODE80_80 VHTCAP_160 VHTCAP_80+80_160 IsCombinationvalid? + * 0 0 0 0 YES + * 0 0 0 1 NO + * 0 0 1 0 NO + * 0 0 1 1 NO + * 0 1 0 0 NO + * 0 1 0 1 NO + * 0 1 1 0 NO + * 0 1 1 1 NO + * 1 0 0 0 NO + * 1 0 0 1 NO + * 1 0 1 0 YES + * 1 0 1 1 NO + * 1 1 0 0 NO + * 1 1 0 1 YES + * 1 1 1 0 NO + * 1 1 1 1 NO + */ + + /* NOTE: Last row in above table is invalid because value corresponding + * to both VHTCAP_160 and VHTCAP_80+80_160 being set is reserved as per + * 802.11ac. Only one of them can be set at a time. + */ + + wireless_mode_160mhz = init_deinit_regdmn_160mhz_support(reg_cap); + wireless_mode_80p80mhz = init_deinit_regdmn_80p80mhz_support(reg_cap); + vhtcap_160mhz = init_deinit_vht_160mhz_is_supported( + tgt_hdl->info.target_caps.vht_cap_info); + vhtcap_80p80_160mhz = init_deinit_vht_80p80mhz_is_supported( + tgt_hdl->info.target_caps.vht_cap_info); + vhtcap_160mhz_sgi = init_deinit_vht_160mhz_shortgi_is_supported( + tgt_hdl->info.target_caps.vht_cap_info); + + if (!(wireless_mode_160mhz || wireless_mode_80p80mhz || + vhtcap_160mhz || vhtcap_80p80_160mhz)) { + valid = QDF_STATUS_SUCCESS; + } else if (wireless_mode_160mhz && !wireless_mode_80p80mhz && + vhtcap_160mhz && !vhtcap_80p80_160mhz) { + valid = QDF_STATUS_SUCCESS; + } else if (wireless_mode_160mhz && wireless_mode_80p80mhz && + !vhtcap_160mhz && vhtcap_160mhz_sgi) { + valid = QDF_STATUS_SUCCESS; + } + + if (valid == QDF_STATUS_SUCCESS) { + /* + * Ensure short GI for 160 MHz is enabled + * only if 160/80+80 is supported. + */ + if (vhtcap_160mhz_sgi && + !(vhtcap_160mhz || vhtcap_80p80_160mhz)) { + valid = QDF_STATUS_E_FAILURE; + } + } + + /* Invalid config specified by FW */ + if (valid != QDF_STATUS_SUCCESS) { + target_if_err("Invalid 160/80+80 MHz config specified by FW. Take care of it first"); + target_if_err("wireless_mode_160mhz: %d, wireless_mode_80p80mhz: %d", + wireless_mode_160mhz, wireless_mode_80p80mhz); + target_if_err("vhtcap_160mhz: %d, vhtcap_80p80_160mhz: %d,vhtcap_160mhz_sgi: %d", + vhtcap_160mhz, vhtcap_80p80_160mhz, + vhtcap_160mhz_sgi); + } + return valid; +} + +void init_deinit_chainmask_config( + struct wlan_objmgr_psoc *psoc, + struct target_psoc_info *tgt_hdl) +{ + tgt_hdl->info.wlan_res_cfg.tx_chain_mask = + ((1 << tgt_hdl->info.target_caps.num_rf_chains) - 1); + tgt_hdl->info.wlan_res_cfg.rx_chain_mask = + ((1 << tgt_hdl->info.target_caps.num_rf_chains) - 1); +} + +QDF_STATUS init_deinit_is_service_ext_msg( + struct wlan_objmgr_psoc *psoc, + struct target_psoc_info *tgt_hdl) +{ + struct common_wmi_handle *wmi_handle; + + if (!tgt_hdl) { + target_if_err( + "psoc target_psoc_info is null in service ext msg"); + return QDF_STATUS_E_INVAL; + } + + wmi_handle = target_psoc_get_wmi_hdl(tgt_hdl); + + if (wmi_service_enabled(wmi_handle, wmi_service_ext_msg)) + return QDF_STATUS_SUCCESS; + else + return QDF_STATUS_E_FAILURE; +} + +bool init_deinit_is_preferred_hw_mode_supported( + struct wlan_objmgr_psoc *psoc, + struct target_psoc_info *tgt_hdl) +{ + uint16_t i; + struct tgt_info *info; + + if (!tgt_hdl) { + target_if_err( + "psoc target_psoc_info is null in service ext msg"); + return FALSE; + } + + info = &tgt_hdl->info; + + if (info->preferred_hw_mode == WMI_HOST_HW_MODE_MAX) + return TRUE; + + for (i = 0; i < target_psoc_get_total_mac_phy_cnt(tgt_hdl); i++) { + if (info->mac_phy_cap[i].hw_mode_id == info->preferred_hw_mode) + return TRUE; + } + + return FALSE; +} + +void init_deinit_wakeup_host_wait( + struct wlan_objmgr_psoc *psoc, + struct target_psoc_info *tgt_hdl) +{ + if (!tgt_hdl) { + target_if_err("psoc target_psoc_info is null in target ready"); + return; + } + qdf_event_set(&tgt_hdl->info.event); +} diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/nan/inc/target_if_nan.h b/drivers/staging/qca-wifi-host-cmn/target_if/nan/inc/target_if_nan.h new file mode 100644 index 0000000000000000000000000000000000000000..2563efae7d0ea97ec43b0a5d9ddd975bca636a43 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/target_if/nan/inc/target_if_nan.h @@ -0,0 +1,97 @@ +/* + * Copyright (c) 2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: contains nan target if declarations + */ + +#ifndef _WLAN_NAN_TGT_IF_H_ +#define _WLAN_NAN_TGT_IF_H_ + +#include "qdf_types.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct wlan_objmgr_psoc; +struct wlan_lmac_if_rx_ops; +struct wlan_lmac_if_tx_ops; +struct wlan_lmac_if_nan_rx_ops; + +/** + * target_if_nan_get_tx_ops() - retrieve the nan tx_ops + * @psoc: psoc context + * + * API to retrieve the nan tx_ops from the psoc context + * + * Return: nan tx_ops pointer + */ +struct wlan_lmac_if_nan_tx_ops *target_if_nan_get_tx_ops( + struct wlan_objmgr_psoc *psoc); + +/** + * target_if_nan_get_rx_ops() - retrieve the nan rx_ops + * @psoc: psoc context + * + * API to retrieve the nan rx_ops from the psoc context + * + * Return: nan rx_ops pointer + */ +struct wlan_lmac_if_nan_rx_ops *target_if_nan_get_rx_ops( + struct wlan_objmgr_psoc *psoc); + +/** + * target_if_nan_register_tx_ops() - registers nan tx ops + * @tx_ops: tx ops + * + * Return: none + */ +void target_if_nan_register_tx_ops(struct wlan_lmac_if_tx_ops *tx_ops); + +/** + * target_if_nan_register_rx_ops() - registers nan rx ops + * @tx_ops: rx ops + * + * Return: none + */ +void target_if_nan_register_rx_ops(struct wlan_lmac_if_rx_ops *rx_ops); + +/** + * target_if_nan_register_events() - registers with NDP events + * @psoc: pointer to psoc object + * + * Return: status of operation + */ +QDF_STATUS target_if_nan_register_events(struct wlan_objmgr_psoc *psoc); + +/** + * target_if_nan_deregister_events() - registers nan rx ops + * @psoc: pointer to psoc object + * + * Return: status of operation + */ +QDF_STATUS target_if_nan_deregister_events(struct wlan_objmgr_psoc *psoc); + +#endif /* _WIFI_POS_TGT_IF_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/nan/src/target_if_nan.c b/drivers/staging/qca-wifi-host-cmn/target_if/nan/src/target_if_nan.c new file mode 100644 index 0000000000000000000000000000000000000000..abb5eec245a9be4ccfff559f23d5d4bb419fd96c --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/target_if/nan/src/target_if_nan.c @@ -0,0 +1,834 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: contains nan target if functions + */ + +#include "../../../umac/nan/core/src/nan_main_i.h" +#include "nan_public_structs.h" +#include "nan_ucfg_api.h" +#include "target_if_nan.h" +#include "wmi_unified_api.h" +#include "scheduler_api.h" + +static QDF_STATUS target_if_nan_event_flush_cb(struct scheduler_msg *msg) +{ + void *ptr = msg->bodyptr; + struct wlan_objmgr_vdev *vdev = NULL; + + switch (msg->type) { + case NDP_INITIATOR_RSP: + vdev = ((struct nan_datapath_initiator_rsp *)ptr)->vdev; + break; + case NDP_INDICATION: + vdev = ((struct nan_datapath_indication_event *)ptr)->vdev; + break; + case NDP_CONFIRM: + vdev = ((struct nan_datapath_confirm_event *)ptr)->vdev; + break; + case NDP_RESPONDER_RSP: + vdev = ((struct nan_datapath_responder_rsp *)ptr)->vdev; + break; + case NDP_END_RSP: + vdev = ((struct nan_datapath_end_rsp_event *)ptr)->vdev; + break; + case NDP_END_IND: + vdev = ((struct nan_datapath_end_indication_event *)ptr)->vdev; + break; + case NDP_SCHEDULE_UPDATE: + vdev = ((struct nan_datapath_sch_update_event *)ptr)->vdev; + break; + default: + break; + } + + if (vdev) + wlan_objmgr_vdev_release_ref(vdev, WLAN_NAN_ID); + qdf_mem_free(msg->bodyptr); + msg->bodyptr = NULL; + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS target_if_nan_event_dispatcher(struct scheduler_msg *msg) +{ + QDF_STATUS status; + void *ptr = msg->bodyptr; + struct wlan_objmgr_psoc *psoc; + struct wlan_objmgr_vdev *vdev = NULL; + struct wlan_lmac_if_nan_rx_ops *nan_rx_ops; + + switch (msg->type) { + case NDP_INITIATOR_RSP: + vdev = ((struct nan_datapath_initiator_rsp *)ptr)->vdev; + break; + case NDP_INDICATION: + vdev = ((struct nan_datapath_indication_event *)ptr)->vdev; + break; + case NDP_CONFIRM: + vdev = ((struct nan_datapath_confirm_event *)ptr)->vdev; + break; + case NDP_RESPONDER_RSP: + vdev = ((struct nan_datapath_responder_rsp *)ptr)->vdev; + break; + case NDP_END_RSP: + vdev = ((struct nan_datapath_end_rsp_event *)ptr)->vdev; + break; + case NDP_END_IND: + vdev = ((struct nan_datapath_end_indication_event *)ptr)->vdev; + break; + case NDP_SCHEDULE_UPDATE: + vdev = ((struct nan_datapath_sch_update_event *)ptr)->vdev; + break; + default: + target_if_err("invalid msg type %d", msg->type); + status = QDF_STATUS_E_INVAL; + goto free_res; + } + + if (!vdev) { + target_if_err("vdev is null"); + status = QDF_STATUS_E_NULL_VALUE; + goto free_res; + } + + psoc = wlan_vdev_get_psoc(vdev); + if (!psoc) { + target_if_err("psoc is null"); + status = QDF_STATUS_E_NULL_VALUE; + goto free_res; + } + + nan_rx_ops = target_if_nan_get_rx_ops(psoc); + if (!nan_rx_ops) { + target_if_err("nan_rx_ops is null"); + status = QDF_STATUS_E_NULL_VALUE; + goto free_res; + } + + status = nan_rx_ops->nan_event_rx(msg); +free_res: + if (vdev) + wlan_objmgr_vdev_release_ref(vdev, WLAN_NAN_ID); + qdf_mem_free(msg->bodyptr); + msg->bodyptr = NULL; + return status; +} + +static QDF_STATUS target_if_nan_ndp_initiator_req( + struct nan_datapath_initiator_req *ndp_req) +{ + QDF_STATUS status; + struct wmi_unified *wmi_handle; + struct wlan_objmgr_psoc *psoc; + struct scheduler_msg pe_msg = {0}; + struct wlan_lmac_if_nan_rx_ops *nan_rx_ops; + struct nan_datapath_initiator_rsp ndp_rsp = {0}; + + if (!ndp_req) { + target_if_err("ndp_req is null."); + return QDF_STATUS_E_INVAL; + } + + psoc = wlan_vdev_get_psoc(ndp_req->vdev); + if (!psoc) { + target_if_err("psoc is null."); + return QDF_STATUS_E_INVAL; + } + + wmi_handle = get_wmi_unified_hdl_from_psoc(psoc); + if (!wmi_handle) { + target_if_err("wmi_handle is null."); + return QDF_STATUS_E_INVAL; + } + + nan_rx_ops = target_if_nan_get_rx_ops(psoc); + if (!nan_rx_ops) { + target_if_err("nan_rx_ops is null."); + return QDF_STATUS_E_INVAL; + } + + status = wmi_unified_ndp_initiator_req_cmd_send(wmi_handle, ndp_req); + if (QDF_IS_STATUS_SUCCESS(status)) + return status; + + ndp_rsp.vdev = ndp_req->vdev; + ndp_rsp.transaction_id = ndp_req->transaction_id; + ndp_rsp.ndp_instance_id = ndp_req->service_instance_id; + ndp_rsp.status = NAN_DATAPATH_DATA_INITIATOR_REQ_FAILED; + pe_msg.type = NDP_INITIATOR_RSP; + pe_msg.bodyptr = &ndp_rsp; + if (nan_rx_ops->nan_event_rx) + nan_rx_ops->nan_event_rx(&pe_msg); + + return status; +} + +static int target_if_ndp_initiator_rsp_handler(ol_scn_t scn, uint8_t *data, + uint32_t len) +{ + QDF_STATUS status; + struct wmi_unified *wmi_handle; + struct wlan_objmgr_psoc *psoc; + struct scheduler_msg msg = {0}; + struct nan_datapath_initiator_rsp *rsp; + + psoc = target_if_get_psoc_from_scn_hdl(scn); + if (!psoc) { + target_if_err("psoc is null"); + return -EINVAL; + } + + wmi_handle = get_wmi_unified_hdl_from_psoc(psoc); + if (!wmi_handle) { + target_if_err("wmi_handle is null"); + return -EINVAL; + } + + rsp = qdf_mem_malloc(sizeof(*rsp)); + if (!rsp) { + target_if_err("malloc failed"); + return -ENOMEM; + } + + status = wmi_extract_ndp_initiator_rsp(wmi_handle, data, rsp); + if (QDF_IS_STATUS_ERROR(status)) { + target_if_err("parsing of event failed, %d", status); + qdf_mem_free(rsp); + return -EINVAL; + } + + msg.bodyptr = rsp; + msg.type = NDP_INITIATOR_RSP; + msg.callback = target_if_nan_event_dispatcher; + msg.flush_callback = target_if_nan_event_flush_cb; + target_if_debug("NDP_INITIATOR_RSP sent: %d", msg.type); + status = scheduler_post_message(QDF_MODULE_ID_TARGET_IF, + QDF_MODULE_ID_TARGET_IF, + QDF_MODULE_ID_TARGET_IF, &msg); + if (QDF_IS_STATUS_ERROR(status)) { + target_if_err("failed to post msg, status: %d", status); + target_if_nan_event_flush_cb(&msg); + return -EINVAL; + } + + return 0; +} + +static int target_if_ndp_ind_handler(ol_scn_t scn, uint8_t *data, + uint32_t data_len) +{ + QDF_STATUS status; + struct wlan_objmgr_psoc *psoc; + struct wmi_unified *wmi_handle; + struct scheduler_msg msg = {0}; + struct nan_datapath_indication_event *rsp; + + psoc = target_if_get_psoc_from_scn_hdl(scn); + if (!psoc) { + target_if_err("psoc is null"); + return -EINVAL; + } + + wmi_handle = get_wmi_unified_hdl_from_psoc(psoc); + if (!wmi_handle) { + target_if_err("wmi_handle is null"); + return -EINVAL; + } + + rsp = qdf_mem_malloc(sizeof(*rsp)); + if (!rsp) { + target_if_err("malloc failed"); + return -ENOMEM; + } + + status = wmi_extract_ndp_ind(wmi_handle, data, rsp); + if (QDF_IS_STATUS_ERROR(status)) { + target_if_err("parsing of event failed, %d", status); + qdf_mem_free(rsp); + return -EINVAL; + } + + msg.bodyptr = rsp; + msg.type = NDP_INDICATION; + msg.callback = target_if_nan_event_dispatcher; + msg.flush_callback = target_if_nan_event_flush_cb; + target_if_debug("NDP_INDICATION sent: %d", msg.type); + status = scheduler_post_message(QDF_MODULE_ID_TARGET_IF, + QDF_MODULE_ID_TARGET_IF, + QDF_MODULE_ID_TARGET_IF, &msg); + if (QDF_IS_STATUS_ERROR(status)) { + target_if_err("failed to post msg, status: %d", status); + target_if_nan_event_flush_cb(&msg); + return -EINVAL; + } + + return 0; +} + +static int target_if_ndp_confirm_handler(ol_scn_t scn, uint8_t *data, + uint32_t data_len) +{ + QDF_STATUS status; + struct wlan_objmgr_psoc *psoc; + struct wmi_unified *wmi_handle; + struct scheduler_msg msg = {0}; + struct nan_datapath_confirm_event *rsp; + + psoc = target_if_get_psoc_from_scn_hdl(scn); + if (!psoc) { + target_if_err("psoc is null"); + return -EINVAL; + } + + wmi_handle = get_wmi_unified_hdl_from_psoc(psoc); + if (!wmi_handle) { + target_if_err("wmi_handle is null"); + return -EINVAL; + } + + rsp = qdf_mem_malloc(sizeof(*rsp)); + if (!rsp) { + target_if_err("malloc failed"); + return -ENOMEM; + } + + status = wmi_extract_ndp_confirm(wmi_handle, data, rsp); + if (QDF_IS_STATUS_ERROR(status)) { + target_if_err("parsing of event failed, %d", status); + qdf_mem_free(rsp); + return -EINVAL; + } + + msg.bodyptr = rsp; + msg.type = NDP_CONFIRM; + msg.callback = target_if_nan_event_dispatcher; + msg.flush_callback = target_if_nan_event_flush_cb; + target_if_debug("NDP_CONFIRM sent: %d", msg.type); + status = scheduler_post_message(QDF_MODULE_ID_TARGET_IF, + QDF_MODULE_ID_TARGET_IF, + QDF_MODULE_ID_TARGET_IF, &msg); + if (QDF_IS_STATUS_ERROR(status)) { + target_if_err("failed to post msg, status: %d", status); + target_if_nan_event_flush_cb(&msg); + return -EINVAL; + } + + return 0; +} + +static QDF_STATUS target_if_nan_ndp_responder_req( + struct nan_datapath_responder_req *req) +{ + QDF_STATUS status; + struct wmi_unified *wmi_handle; + struct wlan_objmgr_psoc *psoc; + struct scheduler_msg pe_msg = {0}; + struct wlan_lmac_if_nan_rx_ops *nan_rx_ops; + struct nan_datapath_responder_rsp rsp = {0}; + + if (!req) { + target_if_err("Invalid req."); + return QDF_STATUS_E_INVAL; + } + + psoc = wlan_vdev_get_psoc(req->vdev); + if (!psoc) { + target_if_err("psoc is null."); + return QDF_STATUS_E_NULL_VALUE; + } + + wmi_handle = get_wmi_unified_hdl_from_psoc(psoc); + if (!wmi_handle) { + target_if_err("wmi_handle is null."); + return QDF_STATUS_E_NULL_VALUE; + } + + nan_rx_ops = target_if_nan_get_rx_ops(psoc); + if (!nan_rx_ops) { + target_if_err("nan_rx_ops is null."); + return QDF_STATUS_E_NULL_VALUE; + } + + status = wmi_unified_ndp_responder_req_cmd_send(wmi_handle, req); + if (QDF_IS_STATUS_SUCCESS(status)) + return status; + + rsp.vdev = req->vdev; + rsp.transaction_id = req->transaction_id; + rsp.status = NAN_DATAPATH_RSP_STATUS_ERROR; + rsp.reason = NAN_DATAPATH_DATA_RESPONDER_REQ_FAILED; + pe_msg.bodyptr = &rsp; + pe_msg.type = NDP_RESPONDER_RSP; + if (nan_rx_ops->nan_event_rx) + nan_rx_ops->nan_event_rx(&pe_msg); + + return status; +} + +static int target_if_ndp_responder_rsp_handler(ol_scn_t scn, uint8_t *data, + uint32_t len) +{ + QDF_STATUS status; + struct wlan_objmgr_psoc *psoc; + struct wmi_unified *wmi_handle; + struct scheduler_msg msg = {0}; + struct nan_datapath_responder_rsp *rsp; + + psoc = target_if_get_psoc_from_scn_hdl(scn); + if (!psoc) { + target_if_err("psoc is null"); + return -EINVAL; + } + + wmi_handle = get_wmi_unified_hdl_from_psoc(psoc); + if (!wmi_handle) { + target_if_err("wmi_handle is null."); + return -EINVAL; + } + + rsp = qdf_mem_malloc(sizeof(*rsp)); + if (!rsp) { + target_if_err("malloc failed"); + return -ENOMEM; + } + + status = wmi_extract_ndp_responder_rsp(wmi_handle, data, rsp); + if (QDF_IS_STATUS_ERROR(status)) { + target_if_err("parsing of event failed, %d", status); + qdf_mem_free(rsp); + return -EINVAL; + } + + msg.bodyptr = rsp; + msg.type = NDP_RESPONDER_RSP; + msg.callback = target_if_nan_event_dispatcher; + msg.flush_callback = target_if_nan_event_flush_cb; + target_if_debug("NDP_INITIATOR_RSP sent: %d", msg.type); + status = scheduler_post_message(QDF_MODULE_ID_TARGET_IF, + QDF_MODULE_ID_TARGET_IF, + QDF_MODULE_ID_TARGET_IF, &msg); + if (QDF_IS_STATUS_ERROR(status)) { + target_if_err("failed to post msg, status: %d", status); + target_if_nan_event_flush_cb(&msg); + return -EINVAL; + } + + return 0; +} + +static QDF_STATUS target_if_nan_ndp_end_req(struct nan_datapath_end_req *req) +{ + QDF_STATUS status; + struct wmi_unified *wmi_handle; + struct wlan_objmgr_psoc *psoc; + struct scheduler_msg msg = {0}; + struct wlan_lmac_if_nan_rx_ops *nan_rx_ops; + struct nan_datapath_end_rsp_event end_rsp = {0}; + + if (!req) { + target_if_err("req is null"); + return QDF_STATUS_E_INVAL; + } + + psoc = wlan_vdev_get_psoc(req->vdev); + if (!psoc) { + target_if_err("psoc is null."); + return QDF_STATUS_E_NULL_VALUE; + } + + wmi_handle = get_wmi_unified_hdl_from_psoc(psoc); + if (!wmi_handle) { + target_if_err("wmi_handle is null."); + return QDF_STATUS_E_NULL_VALUE; + } + + nan_rx_ops = target_if_nan_get_rx_ops(psoc); + if (!nan_rx_ops) { + target_if_err("nan_rx_ops is null."); + return QDF_STATUS_E_NULL_VALUE; + } + + status = wmi_unified_ndp_end_req_cmd_send(wmi_handle, req); + if (QDF_IS_STATUS_SUCCESS(status)) + return status; + + end_rsp.vdev = req->vdev; + msg.type = NDP_END_RSP; + end_rsp.status = NAN_DATAPATH_RSP_STATUS_ERROR; + end_rsp.reason = NAN_DATAPATH_END_FAILED; + end_rsp.transaction_id = req->transaction_id; + msg.bodyptr = &end_rsp; + + if (nan_rx_ops->nan_event_rx) + nan_rx_ops->nan_event_rx(&msg); + + return status; +} + +static int target_if_ndp_end_rsp_handler(ol_scn_t scn, uint8_t *data, + uint32_t data_len) +{ + QDF_STATUS status; + struct wlan_objmgr_psoc *psoc; + struct wmi_unified *wmi_handle; + struct scheduler_msg msg = {0}; + struct nan_datapath_end_rsp_event *end_rsp; + + psoc = target_if_get_psoc_from_scn_hdl(scn); + if (!psoc) { + target_if_err("psoc is null"); + return -EINVAL; + } + + wmi_handle = get_wmi_unified_hdl_from_psoc(psoc); + if (!wmi_handle) { + target_if_err("wmi_handle is null."); + return -EINVAL; + } + + end_rsp = qdf_mem_malloc(sizeof(*end_rsp)); + if (!end_rsp) { + target_if_err("malloc failed"); + return -ENOMEM; + } + + status = wmi_extract_ndp_end_rsp(wmi_handle, data, end_rsp); + if (QDF_IS_STATUS_ERROR(status)) { + target_if_err("parsing of event failed, %d", status); + qdf_mem_free(end_rsp); + return -EINVAL; + } + + msg.bodyptr = end_rsp; + msg.type = NDP_END_RSP; + msg.callback = target_if_nan_event_dispatcher; + msg.flush_callback = target_if_nan_event_flush_cb; + target_if_debug("NDP_END_RSP sent: %d", msg.type); + status = scheduler_post_message(QDF_MODULE_ID_TARGET_IF, + QDF_MODULE_ID_TARGET_IF, + QDF_MODULE_ID_TARGET_IF, &msg); + if (QDF_IS_STATUS_ERROR(status)) { + target_if_err("failed to post msg, status: %d", status); + target_if_nan_event_flush_cb(&msg); + return -EINVAL; + } + + return 0; +} + +static int target_if_ndp_end_ind_handler(ol_scn_t scn, uint8_t *data, + uint32_t data_len) +{ + QDF_STATUS status; + struct wlan_objmgr_psoc *psoc; + struct wmi_unified *wmi_handle; + struct scheduler_msg msg = {0}; + struct nan_datapath_end_indication_event *rsp = NULL; + + psoc = target_if_get_psoc_from_scn_hdl(scn); + if (!psoc) { + target_if_err("psoc is null"); + return -EINVAL; + } + + wmi_handle = get_wmi_unified_hdl_from_psoc(psoc); + if (!wmi_handle) { + target_if_err("wmi_handle is null."); + return -EINVAL; + } + + status = wmi_extract_ndp_end_ind(wmi_handle, data, &rsp); + if (QDF_IS_STATUS_ERROR(status)) { + target_if_err("parsing of event failed, %d", status); + return -EINVAL; + } + + rsp->vdev = wlan_objmgr_get_vdev_by_opmode_from_psoc( + wmi_handle->soc->wmi_psoc, QDF_NDI_MODE, WLAN_NAN_ID); + if (!rsp->vdev) { + target_if_err("vdev is null"); + qdf_mem_free(rsp); + return -EINVAL; + } + + msg.bodyptr = rsp; + msg.type = NDP_END_IND; + msg.callback = target_if_nan_event_dispatcher; + msg.flush_callback = target_if_nan_event_flush_cb; + target_if_debug("NDP_END_IND sent: %d", msg.type); + status = scheduler_post_message(QDF_MODULE_ID_TARGET_IF, + QDF_MODULE_ID_TARGET_IF, + QDF_MODULE_ID_TARGET_IF, &msg); + if (QDF_IS_STATUS_ERROR(status)) { + target_if_err("failed to post msg, status: %d", status); + target_if_nan_event_flush_cb(&msg); + return -EINVAL; + } + + return 0; +} + +static int target_if_ndp_sch_update_handler(ol_scn_t scn, uint8_t *data, + uint32_t data_len) +{ + QDF_STATUS status; + struct wlan_objmgr_psoc *psoc; + struct wmi_unified *wmi_handle; + struct scheduler_msg msg = {0}; + struct nan_datapath_sch_update_event *rsp; + + psoc = target_if_get_psoc_from_scn_hdl(scn); + if (!psoc) { + target_if_err("psoc is null"); + return -EINVAL; + } + + wmi_handle = get_wmi_unified_hdl_from_psoc(psoc); + if (!wmi_handle) { + target_if_err("wmi_handle is null."); + return -EINVAL; + } + + rsp = qdf_mem_malloc(sizeof(*rsp)); + if (!rsp) { + target_if_err("malloc failed"); + return -ENOMEM; + } + + status = wmi_extract_ndp_sch_update(wmi_handle, data, rsp); + if (QDF_IS_STATUS_ERROR(status)) { + target_if_err("parsing of event failed, %d", status); + qdf_mem_free(rsp); + return -EINVAL; + } + + msg.bodyptr = rsp; + msg.type = NDP_SCHEDULE_UPDATE; + msg.callback = target_if_nan_event_dispatcher; + msg.flush_callback = target_if_nan_event_flush_cb; + target_if_debug("NDP_SCHEDULE_UPDATE sent: %d", msg.type); + status = scheduler_post_message(QDF_MODULE_ID_TARGET_IF, + QDF_MODULE_ID_TARGET_IF, + QDF_MODULE_ID_TARGET_IF, &msg); + if (QDF_IS_STATUS_ERROR(status)) { + target_if_err("failed to post msg, status: %d", status); + target_if_nan_event_flush_cb(&msg); + return -EINVAL; + } + + return 0; +} + +static QDF_STATUS target_if_nan_req(void *req, uint32_t req_type) +{ + /* send cmd to fw */ + switch (req_type) { + case NDP_INITIATOR_REQ: + target_if_nan_ndp_initiator_req(req); + break; + case NDP_RESPONDER_REQ: + target_if_nan_ndp_responder_req(req); + break; + case NDP_END_REQ: + target_if_nan_ndp_end_req(req); + break; + default: + target_if_err("invalid req type"); + break; + } + return QDF_STATUS_SUCCESS; +} + +void target_if_nan_register_tx_ops(struct wlan_lmac_if_tx_ops *tx_ops) +{ + tx_ops->nan_tx_ops.nan_req_tx = target_if_nan_req; +} + +void target_if_nan_register_rx_ops(struct wlan_lmac_if_rx_ops *rx_ops) +{ + rx_ops->nan_rx_ops.nan_event_rx = nan_event_handler; +} + +inline struct wlan_lmac_if_nan_tx_ops *target_if_nan_get_tx_ops( + struct wlan_objmgr_psoc *psoc) +{ + if (!psoc) { + target_if_err("psoc is null"); + return NULL; + } + + return &psoc->soc_cb.tx_ops.nan_tx_ops; +} + +inline struct wlan_lmac_if_nan_rx_ops *target_if_nan_get_rx_ops( + struct wlan_objmgr_psoc *psoc) +{ + if (!psoc) { + target_if_err("psoc is null"); + return NULL; + } + + return &psoc->soc_cb.rx_ops.nan_rx_ops; +} + +QDF_STATUS target_if_nan_register_events(struct wlan_objmgr_psoc *psoc) +{ + int ret; + wmi_unified_t handle = get_wmi_unified_hdl_from_psoc(psoc); + + if (!handle) { + target_if_err("handle is NULL"); + return QDF_STATUS_E_FAILURE; + } + ret = wmi_unified_register_event_handler(handle, + wmi_ndp_initiator_rsp_event_id, + target_if_ndp_initiator_rsp_handler, + WMI_RX_UMAC_CTX); + if (ret) { + target_if_err("wmi event registration failed, ret: %d", ret); + return QDF_STATUS_E_FAILURE; + } + + ret = wmi_unified_register_event_handler(handle, + wmi_ndp_indication_event_id, + target_if_ndp_ind_handler, + WMI_RX_UMAC_CTX); + if (ret) { + target_if_err("wmi event registration failed, ret: %d", ret); + target_if_nan_deregister_events(psoc); + return QDF_STATUS_E_FAILURE; + } + + ret = wmi_unified_register_event_handler(handle, + wmi_ndp_confirm_event_id, + target_if_ndp_confirm_handler, + WMI_RX_UMAC_CTX); + if (ret) { + target_if_err("wmi event registration failed, ret: %d", ret); + target_if_nan_deregister_events(psoc); + return QDF_STATUS_E_FAILURE; + } + + ret = wmi_unified_register_event_handler(handle, + wmi_ndp_responder_rsp_event_id, + target_if_ndp_responder_rsp_handler, + WMI_RX_UMAC_CTX); + if (ret) { + target_if_err("wmi event registration failed, ret: %d", ret); + target_if_nan_deregister_events(psoc); + return QDF_STATUS_E_FAILURE; + } + + ret = wmi_unified_register_event_handler(handle, + wmi_ndp_end_indication_event_id, + target_if_ndp_end_ind_handler, + WMI_RX_UMAC_CTX); + if (ret) { + target_if_err("wmi event registration failed, ret: %d", ret); + target_if_nan_deregister_events(psoc); + return QDF_STATUS_E_FAILURE; + } + + ret = wmi_unified_register_event_handler(handle, + wmi_ndp_end_rsp_event_id, + target_if_ndp_end_rsp_handler, + WMI_RX_UMAC_CTX); + if (ret) { + target_if_err("wmi event registration failed, ret: %d", ret); + target_if_nan_deregister_events(psoc); + return QDF_STATUS_E_FAILURE; + } + + ret = wmi_unified_register_event_handler(handle, + wmi_ndl_schedule_update_event_id, + target_if_ndp_sch_update_handler, + WMI_RX_UMAC_CTX); + if (ret) { + target_if_err("wmi event registration failed, ret: %d", ret); + target_if_nan_deregister_events(psoc); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS target_if_nan_deregister_events(struct wlan_objmgr_psoc *psoc) +{ + int ret, status = 0; + wmi_unified_t handle = get_wmi_unified_hdl_from_psoc(psoc); + + if (!handle) { + target_if_err("handle is NULL"); + return QDF_STATUS_E_FAILURE; + } + ret = wmi_unified_unregister_event_handler(handle, + wmi_ndl_schedule_update_event_id); + if (ret) { + target_if_err("wmi event deregistration failed, ret: %d", ret); + status = ret; + } + + ret = wmi_unified_unregister_event_handler(handle, + wmi_ndp_end_rsp_event_id); + if (ret) { + target_if_err("wmi event deregistration failed, ret: %d", ret); + status = ret; + } + + ret = wmi_unified_unregister_event_handler(handle, + wmi_ndp_end_indication_event_id); + if (ret) { + target_if_err("wmi event deregistration failed, ret: %d", ret); + status = ret; + } + + ret = wmi_unified_unregister_event_handler(handle, + wmi_ndp_responder_rsp_event_id); + if (ret) { + target_if_err("wmi event deregistration failed, ret: %d", ret); + status = ret; + } + + ret = wmi_unified_unregister_event_handler(handle, + wmi_ndp_confirm_event_id); + if (ret) { + target_if_err("wmi event deregistration failed, ret: %d", ret); + status = ret; + } + + ret = wmi_unified_unregister_event_handler(handle, + wmi_ndp_indication_event_id); + if (ret) { + target_if_err("wmi event deregistration failed, ret: %d", ret); + status = ret; + } + + ret = wmi_unified_unregister_event_handler(handle, + wmi_ndp_initiator_rsp_event_id); + if (ret) { + target_if_err("wmi event deregistration failed, ret: %d", ret); + status = ret; + } + + if (status) + return QDF_STATUS_E_FAILURE; + else + return QDF_STATUS_SUCCESS; +} diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/p2p/inc/target_if_p2p.h b/drivers/staging/qca-wifi-host-cmn/target_if/p2p/inc/target_if_p2p.h new file mode 100644 index 0000000000000000000000000000000000000000..dcb46b9519e93e2c18711be7df7752c42f2225ce --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/target_if/p2p/inc/target_if_p2p.h @@ -0,0 +1,137 @@ +/* + * Copyright (c) 2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: offload lmac interface APIs for P2P + */ + +#ifndef _TARGET_IF_P2P_H_ +#define _TARGET_IF_P2P_H_ + +#include + +struct wlan_objmgr_psoc; +struct p2p_ps_config; +struct p2p_lo_start; + +/** + * target_if_p2p_register_tx_ops() - Register P2P component TX OPS + * @tx_ops: lmac if transmit ops + * + * Return: None + */ +void target_if_p2p_register_tx_ops(struct wlan_lmac_if_tx_ops *tx_ops); + +/** + * target_if_p2p_register_lo_event_handler() - Register lo event handler + * @psoc: soc object + * @arg: additional argument + * + * Target interface API to register P2P listen offload event handler. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS target_if_p2p_register_lo_event_handler( + struct wlan_objmgr_psoc *psoc, void *arg); + +/** + * target_if_p2p_register_noa_event_handler() - Register noa event handler + * @psoc: soc object + * @arg: additional argument + * + * Target interface API to register P2P noa event handler. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS target_if_p2p_register_noa_event_handler( + struct wlan_objmgr_psoc *psoc, void *arg); + +/** + * target_if_p2p_unregister_lo_event_handler() - Unregister lo event handler + * @psoc: soc object + * @arg: additional argument + * + * Target interface API to unregister P2P listen offload event handler. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS target_if_p2p_unregister_lo_event_handler( + struct wlan_objmgr_psoc *psoc, void *arg); + +/** + * target_if_p2p_unregister_noa_event_handler() - Unregister noa event handler + * @psoc: soc object + * @arg: additional argument + * + * Target interface API to unregister P2P listen offload event handler. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS target_if_p2p_unregister_noa_event_handler( + struct wlan_objmgr_psoc *psoc, void *arg); + +/** + * target_if_p2p_set_ps() - Set power save + * @psoc: soc object + * @arg: additional argument + * + * Target interface API to set power save. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS target_if_p2p_set_ps(struct wlan_objmgr_psoc *psoc, + struct p2p_ps_config *ps_config); + +/** + * target_if_p2p_lo_start() - Start listen offload + * @psoc: soc object + * @lo_start: lo start information + * + * Target interface API to start listen offload. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS target_if_p2p_lo_start(struct wlan_objmgr_psoc *psoc, + struct p2p_lo_start *lo_start); + +/** + * target_if_p2p_lo_stop() - Stop listen offload + * @psoc: soc object + * @vdev_id: vdev id + * + * Target interface API to stop listen offload. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS target_if_p2p_lo_stop(struct wlan_objmgr_psoc *psoc, + uint32_t vdev_id); + +/** + * target_if_p2p_set_noa() - Disable / Enable NOA + * @psoc: soc object + * @vdev_id: vdev id + * @disable_noa: TRUE - Disable NoA, FALSE - Enable NoA + * + * Target interface API to Disable / Enable P2P NOA. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS target_if_p2p_set_noa(struct wlan_objmgr_psoc *psoc, + uint32_t vdev_id, bool disable_noa); + +#endif /* _TARGET_IF_P2P_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/p2p/src/target_if_p2p.c b/drivers/staging/qca-wifi-host-cmn/target_if/p2p/src/target_if_p2p.c new file mode 100644 index 0000000000000000000000000000000000000000..e8281f0cb100d673499977d52549b2ab6c8b6726 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/target_if/p2p/src/target_if_p2p.c @@ -0,0 +1,462 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: offload lmac interface APIs definitions for P2P + */ + +#include +#include +#include "target_if.h" +#include "target_if_p2p.h" +#include "init_deinit_lmac.h" + +static inline struct wlan_lmac_if_p2p_rx_ops * +target_if_psoc_get_p2p_rx_ops(struct wlan_objmgr_psoc *psoc) +{ + return &(psoc->soc_cb.rx_ops.p2p); +} + +/** + * target_p2p_lo_event_handler() - WMI callback for lo stop event + * @scn: pointer to scn + * @event_buf: event buffer + * @len: buffer length + * + * This function gets called from WMI when triggered wmi event + * wmi_p2p_lo_stop_event_id. + * + * Return: 0 - success + * others - failure + */ +static int target_p2p_lo_event_handler(ol_scn_t scn, uint8_t *data, + uint32_t datalen) +{ + struct wlan_objmgr_psoc *psoc; + struct wmi_unified *wmi_handle; + struct p2p_lo_event *event_info; + struct wlan_lmac_if_p2p_rx_ops *p2p_rx_ops; + QDF_STATUS status = QDF_STATUS_E_FAILURE; + + target_if_debug("scn:%pK, data:%pK, datalen:%d", scn, data, datalen); + + if (!scn || !data) { + target_if_err("scn: 0x%pK, data: 0x%pK", scn, data); + return -EINVAL; + } + + psoc = target_if_get_psoc_from_scn_hdl(scn); + if (!psoc) { + target_if_err("null psoc"); + return -EINVAL; + } + + wmi_handle = get_wmi_unified_hdl_from_psoc(psoc); + if (!wmi_handle) { + target_if_err("null wmi handle"); + return -EINVAL; + } + + event_info = qdf_mem_malloc(sizeof(*event_info)); + if (!event_info) { + target_if_err("Failed to allocate p2p lo event"); + return -ENOMEM; + } + + if (wmi_extract_p2p_lo_stop_ev_param(wmi_handle, data, + event_info)) { + target_if_err("Failed to extract wmi p2p lo stop event"); + qdf_mem_free(event_info); + return -EINVAL; + } + + p2p_rx_ops = target_if_psoc_get_p2p_rx_ops(psoc); + if (p2p_rx_ops->lo_ev_handler) { + status = p2p_rx_ops->lo_ev_handler(psoc, event_info); + target_if_debug("call lo event handler, status:%d", + status); + } else { + qdf_mem_free(event_info); + target_if_debug("no valid lo event handler"); + } + + return qdf_status_to_os_return(status); +} + +/** + * target_p2p_noa_event_handler() - WMI callback for noa event + * @scn: pointer to scn + * @event_buf: event buffer + * @len: buffer length + * + * This function gets called from WMI when triggered WMI event + * wmi_p2p_noa_event_id. + * + * Return: 0 - success + * others - failure + */ +static int target_p2p_noa_event_handler(ol_scn_t scn, uint8_t *data, + uint32_t datalen) +{ + struct wlan_objmgr_psoc *psoc; + struct wmi_unified *wmi_handle; + struct p2p_noa_info *event_info; + struct wlan_lmac_if_p2p_rx_ops *p2p_rx_ops; + QDF_STATUS status = QDF_STATUS_E_FAILURE; + + target_if_debug("scn:%pK, data:%pK, datalen:%d", scn, data, datalen); + + if (!scn || !data) { + target_if_err("scn: 0x%pK, data: 0x%pK", scn, data); + return -EINVAL; + } + + psoc = target_if_get_psoc_from_scn_hdl(scn); + if (!psoc) { + target_if_err("null psoc"); + return -EINVAL; + } + + wmi_handle = get_wmi_unified_hdl_from_psoc(psoc); + if (!wmi_handle) { + target_if_err("null wmi handle"); + return -EINVAL; + } + + event_info = qdf_mem_malloc(sizeof(*event_info)); + if (!event_info) { + target_if_err("failed to allocate p2p noa information"); + return -ENOMEM; + } + + if (wmi_extract_p2p_noa_ev_param(wmi_handle, data, + event_info)) { + target_if_err("failed to extract wmi p2p noa event"); + qdf_mem_free(event_info); + return -EINVAL; + } + + p2p_rx_ops = target_if_psoc_get_p2p_rx_ops(psoc); + if (p2p_rx_ops->noa_ev_handler) { + status = p2p_rx_ops->noa_ev_handler(psoc, event_info); + target_if_debug("call noa event handler, status:%d", + status); + } else { + qdf_mem_free(event_info); + target_if_debug("no valid noa event handler"); + } + + return qdf_status_to_os_return(status); +} + +QDF_STATUS target_if_p2p_register_lo_event_handler( + struct wlan_objmgr_psoc *psoc, void *arg) +{ + int status; + wmi_unified_t wmi_handle = lmac_get_wmi_unified_hdl(psoc); + + target_if_debug("psoc:%pK, arg:%pK", psoc, arg); + + if (!wmi_handle) { + target_if_err("Invalid wmi handle"); + return QDF_STATUS_E_INVAL; + } + + status = wmi_unified_register_event(wmi_handle, + wmi_p2p_lo_stop_event_id, + target_p2p_lo_event_handler); + + target_if_debug("wmi register lo event handle, status:%d", + status); + + return status == 0 ? QDF_STATUS_SUCCESS : QDF_STATUS_E_FAILURE; +} + +QDF_STATUS target_if_p2p_register_noa_event_handler( + struct wlan_objmgr_psoc *psoc, void *arg) +{ + int status; + wmi_unified_t wmi_handle = lmac_get_wmi_unified_hdl(psoc); + + target_if_debug("psoc:%pK, arg:%pK", psoc, arg); + + if (!wmi_handle) { + target_if_err("Invalid wmi handle"); + return QDF_STATUS_E_INVAL; + } + + status = wmi_unified_register_event(wmi_handle, + wmi_p2p_noa_event_id, + target_p2p_noa_event_handler); + + target_if_debug("wmi register noa event handle, status:%d", + status); + + return status == 0 ? QDF_STATUS_SUCCESS : QDF_STATUS_E_FAILURE; +} + +QDF_STATUS target_if_p2p_unregister_lo_event_handler( + struct wlan_objmgr_psoc *psoc, void *arg) +{ + int status; + wmi_unified_t wmi_handle = lmac_get_wmi_unified_hdl(psoc); + + target_if_debug("psoc:%pK, arg:%pK", psoc, arg); + + if (!wmi_handle) { + target_if_err("Invalid wmi handle"); + return QDF_STATUS_E_INVAL; + } + + status = wmi_unified_unregister_event(wmi_handle, + wmi_p2p_lo_stop_event_id); + + target_if_debug("wmi unregister lo event handle, status:%d", + status); + + return status == 0 ? QDF_STATUS_SUCCESS : QDF_STATUS_E_FAILURE; +} + +QDF_STATUS target_if_p2p_unregister_noa_event_handler( + struct wlan_objmgr_psoc *psoc, void *arg) +{ + int status; + wmi_unified_t wmi_handle = lmac_get_wmi_unified_hdl(psoc); + + target_if_debug("psoc:%pK, arg:%pK", psoc, arg); + + if (!wmi_handle) { + target_if_err("Invalid wmi handle"); + return QDF_STATUS_E_INVAL; + } + + status = wmi_unified_unregister_event(wmi_handle, + wmi_p2p_noa_event_id); + + target_if_debug("wmi unregister noa event handle, status:%d", + status); + + return status == 0 ? QDF_STATUS_SUCCESS : QDF_STATUS_E_FAILURE; +} + +QDF_STATUS target_if_p2p_set_ps(struct wlan_objmgr_psoc *psoc, + struct p2p_ps_config *ps_config) +{ + struct p2p_ps_params cmd; + QDF_STATUS status; + wmi_unified_t wmi_handle = lmac_get_wmi_unified_hdl(psoc); + + if (!wmi_handle) { + target_if_err("Invalid wmi handle"); + return QDF_STATUS_E_INVAL; + } + + if (!ps_config) { + target_if_err("ps config parameters is null"); + return QDF_STATUS_E_INVAL; + } + + target_if_debug("psoc:%pK, vdev_id:%d, opp_ps:%d", psoc, + ps_config->vdev_id, ps_config->opp_ps); + + cmd.opp_ps = ps_config->opp_ps; + cmd.ctwindow = ps_config->ct_window; + cmd.count = ps_config->count; + cmd.duration = ps_config->duration; + cmd.interval = ps_config->interval; + cmd.single_noa_duration = ps_config->single_noa_duration; + cmd.ps_selection = ps_config->ps_selection; + cmd.session_id = ps_config->vdev_id; + + if (ps_config->opp_ps) + status = wmi_unified_set_p2pgo_oppps_req(wmi_handle, + &cmd); + else + status = wmi_unified_set_p2pgo_noa_req_cmd(wmi_handle, + &cmd); + + if (status != QDF_STATUS_SUCCESS) + target_if_err("Failed to send set uapsd param, %d", + status); + + return status; +} + +QDF_STATUS target_if_p2p_lo_start(struct wlan_objmgr_psoc *psoc, + struct p2p_lo_start *lo_start) +{ + wmi_unified_t wmi_handle = lmac_get_wmi_unified_hdl(psoc); + + if (!wmi_handle) { + target_if_err("Invalid wmi handle"); + return QDF_STATUS_E_INVAL; + } + + if (!lo_start) { + target_if_err("lo start parameters is null"); + return QDF_STATUS_E_INVAL; + } + target_if_debug("psoc:%pK, vdev_id:%d", psoc, lo_start->vdev_id); + + return wmi_unified_p2p_lo_start_cmd(wmi_handle, lo_start); +} + +QDF_STATUS target_if_p2p_lo_stop(struct wlan_objmgr_psoc *psoc, + uint32_t vdev_id) +{ + wmi_unified_t wmi_handle = lmac_get_wmi_unified_hdl(psoc); + + target_if_debug("psoc:%pK, vdev_id:%d", psoc, vdev_id); + + if (!wmi_handle) { + target_if_err("Invalid wmi handle"); + return QDF_STATUS_E_INVAL; + } + + return wmi_unified_p2p_lo_stop_cmd(wmi_handle, + (uint8_t)vdev_id); +} + +QDF_STATUS target_if_p2p_set_noa(struct wlan_objmgr_psoc *psoc, + uint32_t vdev_id, bool disable_noa) +{ + struct vdev_set_params param; + wmi_unified_t wmi_handle = lmac_get_wmi_unified_hdl(psoc); + + if (!wmi_handle) { + target_if_err("Invalid wmi handle"); + return QDF_STATUS_E_INVAL; + } + + target_if_debug("psoc:%pK, vdev_id:%d disable_noa:%d", + psoc, vdev_id, disable_noa); + param.if_id = vdev_id; + param.param_id = WMI_VDEV_PARAM_DISABLE_NOA_P2P_GO; + param.param_value = (uint32_t)disable_noa; + + return wmi_unified_vdev_set_param_send(wmi_handle, ¶m); +} + +static int target_p2p_mac_rx_filter_event_handler(ol_scn_t scn, uint8_t *data, + uint32_t datalen) +{ + struct wlan_objmgr_psoc *psoc; + struct wmi_unified *wmi_handle; + struct p2p_set_mac_filter_evt event_info; + struct wlan_lmac_if_p2p_rx_ops *p2p_rx_ops; + QDF_STATUS status = QDF_STATUS_E_FAILURE; + + if (!scn || !data) { + target_if_err("scn: 0x%pK, data: 0x%pK", scn, data); + return -EINVAL; + } + + psoc = target_if_get_psoc_from_scn_hdl(scn); + if (!psoc) { + target_if_err("null psoc"); + return -EINVAL; + } + + wmi_handle = get_wmi_unified_hdl_from_psoc(psoc); + if (!wmi_handle) { + target_if_err("null wmi handle"); + return -EINVAL; + } + + if (wmi_extract_mac_addr_rx_filter_evt_param(wmi_handle, data, + &event_info)) { + target_if_err("failed to extract wmi p2p noa event"); + return -EINVAL; + } + target_if_debug("vdev_id %d status %d", event_info.vdev_id, + event_info.status); + p2p_rx_ops = target_if_psoc_get_p2p_rx_ops(psoc); + if (p2p_rx_ops && p2p_rx_ops->add_mac_addr_filter_evt_handler) + status = p2p_rx_ops->add_mac_addr_filter_evt_handler( + psoc, &event_info); + else + target_if_debug("no add mac addr filter event handler"); + + return qdf_status_to_os_return(status); +} + +static QDF_STATUS target_if_p2p_register_macaddr_rx_filter_evt_handler( + struct wlan_objmgr_psoc *psoc, bool reg) +{ + int status; + wmi_unified_t wmi_handle = lmac_get_wmi_unified_hdl(psoc); + + target_if_debug("psoc:%pK, register %d mac addr rx evt", psoc, reg); + + if (!wmi_handle) { + target_if_err("Invalid wmi handle"); + return QDF_STATUS_E_INVAL; + } + if (reg) + status = wmi_unified_register_event( + wmi_handle, + wmi_vdev_add_macaddr_rx_filter_event_id, + target_p2p_mac_rx_filter_event_handler); + else + status = wmi_unified_unregister_event( + wmi_handle, + wmi_vdev_add_macaddr_rx_filter_event_id); + + return status == 0 ? QDF_STATUS_SUCCESS : QDF_STATUS_E_FAILURE; +} + +static QDF_STATUS target_if_p2p_set_mac_addr_rx_filter_cmd( + struct wlan_objmgr_psoc *psoc, struct p2p_set_mac_filter *param) +{ + wmi_unified_t wmi_handle = lmac_get_wmi_unified_hdl(psoc); + + if (!wmi_handle) { + target_if_err("Invalid wmi handle"); + return QDF_STATUS_E_INVAL; + } + + return wmi_send_set_mac_addr_rx_filter_cmd(wmi_handle, param); +} + +void target_if_p2p_register_tx_ops(struct wlan_lmac_if_tx_ops *tx_ops) +{ + struct wlan_lmac_if_p2p_tx_ops *p2p_tx_ops; + + if (!tx_ops) { + target_if_err("lmac tx_ops is null"); + return; + } + + p2p_tx_ops = &tx_ops->p2p; + p2p_tx_ops->set_ps = target_if_p2p_set_ps; + p2p_tx_ops->lo_start = target_if_p2p_lo_start; + p2p_tx_ops->lo_stop = target_if_p2p_lo_stop; + p2p_tx_ops->set_noa = target_if_p2p_set_noa; + p2p_tx_ops->reg_lo_ev_handler = + target_if_p2p_register_lo_event_handler; + p2p_tx_ops->reg_noa_ev_handler = + target_if_p2p_register_noa_event_handler; + p2p_tx_ops->unreg_lo_ev_handler = + target_if_p2p_unregister_lo_event_handler; + p2p_tx_ops->unreg_noa_ev_handler = + target_if_p2p_unregister_noa_event_handler; + p2p_tx_ops->reg_mac_addr_rx_filter_handler = + target_if_p2p_register_macaddr_rx_filter_evt_handler; + p2p_tx_ops->set_mac_addr_rx_filter_cmd = + target_if_p2p_set_mac_addr_rx_filter_cmd; +} diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/regulatory/inc/target_if_reg.h b/drivers/staging/qca-wifi-host-cmn/target_if/regulatory/inc/target_if_reg.h new file mode 100644 index 0000000000000000000000000000000000000000..c0cf63c40fa89032528e4dee087c09a90b0aeadf --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/target_if/regulatory/inc/target_if_reg.h @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: target_if_reg.h + * This file contains regulatory target interface + */ +#ifndef __TARGET_IF_REG_H__ +#define __TARGET_IF_REG_H__ + +/** + * target_if_register_regulatory_tx_ops() - register regulatory tx ops + * + * @tx_ops: tx_ops pointer + * Return: Success or Failure + */ +QDF_STATUS target_if_register_regulatory_tx_ops(struct wlan_lmac_if_tx_ops + *tx_ops); + +/** + * target_if_reg_set_offloaded_info() - populate regulatory offloaded info + * + * @psoc: psoc pointer + * Return: Success or Failure + */ +QDF_STATUS target_if_reg_set_offloaded_info(struct wlan_objmgr_psoc *psoc); + +#endif /* __TARGET_IF_REG_H__ */ diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/regulatory/src/target_if_reg.c b/drivers/staging/qca-wifi-host-cmn/target_if/regulatory/src/target_if_reg.c new file mode 100644 index 0000000000000000000000000000000000000000..e5d54de4ab927fca653645d58f8dfa7c3da79a0a --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/target_if/regulatory/src/target_if_reg.c @@ -0,0 +1,440 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + + /** + * DOC: target_if_reg.c + * This file contains regulatory target interface + */ + + +#include +#include +#include +#include +#include +#include + +static inline uint32_t get_chan_list_cc_event_id(void) +{ + return wmi_reg_chan_list_cc_event_id; +} + +static bool tgt_if_regulatory_is_11d_offloaded(struct wlan_objmgr_psoc + *psoc) +{ + wmi_unified_t wmi_handle = get_wmi_unified_hdl_from_psoc(psoc); + + if (!wmi_handle) + return false; + + return wmi_service_enabled(wmi_handle, + wmi_service_11d_offload); +} + +static bool tgt_if_regulatory_is_regdb_offloaded(struct wlan_objmgr_psoc + *psoc) +{ + wmi_unified_t wmi_handle = get_wmi_unified_hdl_from_psoc(psoc); + + if (!wmi_handle) + return false; + + return wmi_service_enabled(wmi_handle, + wmi_service_regulatory_db); +} + +static bool tgt_if_regulatory_is_there_serv_ready_extn(struct wlan_objmgr_psoc + *psoc) +{ + wmi_unified_t wmi_handle = get_wmi_unified_hdl_from_psoc(psoc); + + if (!wmi_handle) + return false; + + return wmi_service_enabled(wmi_handle, + wmi_service_ext_msg); +} + +static inline struct wlan_lmac_if_reg_rx_ops * +target_if_regulatory_get_rx_ops(struct wlan_objmgr_psoc *psoc) +{ + return &psoc->soc_cb.rx_ops.reg_rx_ops; +} + +QDF_STATUS target_if_reg_set_offloaded_info(struct wlan_objmgr_psoc *psoc) +{ + struct wlan_lmac_if_reg_rx_ops *reg_rx_ops; + + reg_rx_ops = target_if_regulatory_get_rx_ops(psoc); + if (!reg_rx_ops) { + target_if_err("reg_rx_ops is NULL"); + return QDF_STATUS_E_FAILURE; + } + + if (reg_rx_ops->reg_set_regdb_offloaded) + reg_rx_ops->reg_set_regdb_offloaded(psoc, + tgt_if_regulatory_is_regdb_offloaded(psoc)); + + if (reg_rx_ops->reg_set_11d_offloaded) + reg_rx_ops->reg_set_11d_offloaded(psoc, + tgt_if_regulatory_is_11d_offloaded(psoc)); + + return QDF_STATUS_SUCCESS; +} + +static int tgt_reg_chan_list_update_handler(ol_scn_t handle, + uint8_t *event_buf, + uint32_t len) +{ + struct wlan_objmgr_psoc *psoc; + struct wlan_lmac_if_reg_rx_ops *reg_rx_ops; + struct cur_regulatory_info *reg_info; + QDF_STATUS status; + struct wmi_unified *wmi_handle; + int ret_val = 0; + + TARGET_IF_ENTER(); + + psoc = target_if_get_psoc_from_scn_hdl(handle); + if (!psoc) { + target_if_err("psoc ptr is NULL"); + return -EINVAL; + } + + reg_rx_ops = target_if_regulatory_get_rx_ops(psoc); + if (!reg_rx_ops->master_list_handler) { + target_if_err("master_list_handler is NULL"); + return -EINVAL; + } + + wmi_handle = get_wmi_unified_hdl_from_psoc(psoc); + if (!wmi_handle) { + target_if_err("invalid wmi handle"); + return -EINVAL; + } + + reg_info = qdf_mem_malloc(sizeof(*reg_info)); + if (!reg_info) { + target_if_err("memory allocation failed"); + return -ENOMEM; + } + + if (wmi_extract_reg_chan_list_update_event(wmi_handle, + event_buf, reg_info, len) + != QDF_STATUS_SUCCESS) { + target_if_err("Extraction of channel list event failed"); + ret_val = -EFAULT; + goto clean; + } + + if (reg_info->phy_id >= PSOC_MAX_PHY_REG_CAP) { + target_if_err_rl("phy_id %d is out of bounds", + reg_info->phy_id); + ret_val = -EFAULT; + goto clean; + } + + reg_info->psoc = psoc; + + status = reg_rx_ops->master_list_handler(reg_info); + if (status != QDF_STATUS_SUCCESS) { + target_if_err("Failed to process master channel list handler"); + ret_val = -EFAULT; + } + +clean: + qdf_mem_free(reg_info->reg_rules_2g_ptr); + qdf_mem_free(reg_info->reg_rules_5g_ptr); + qdf_mem_free(reg_info); + + target_if_debug("processed reg channel list ret_val %d", ret_val); + + return ret_val; +} + +static int tgt_reg_11d_new_cc_handler(ol_scn_t handle, + uint8_t *event_buf, uint32_t len) +{ + struct wlan_objmgr_psoc *psoc; + struct wlan_lmac_if_reg_rx_ops *reg_rx_ops; + struct reg_11d_new_country reg_11d_new_cc; + QDF_STATUS status; + struct wmi_unified *wmi_handle; + + TARGET_IF_ENTER(); + + psoc = target_if_get_psoc_from_scn_hdl(handle); + if (!psoc) { + target_if_err("psoc ptr is NULL"); + return -EINVAL; + } + + reg_rx_ops = target_if_regulatory_get_rx_ops(psoc); + + if (!reg_rx_ops->reg_11d_new_cc_handler) { + target_if_err("reg_11d_new_cc_handler is NULL"); + return -EINVAL; + } + + wmi_handle = get_wmi_unified_hdl_from_psoc(psoc); + if (!wmi_handle) { + target_if_err("Invalid WMI handle"); + return -EINVAL; + } + if (wmi_extract_reg_11d_new_cc_event(wmi_handle, event_buf, + ®_11d_new_cc, len) + != QDF_STATUS_SUCCESS) { + + target_if_err("Extraction of new country event failed"); + return -EFAULT; + } + + status = reg_rx_ops->reg_11d_new_cc_handler(psoc, ®_11d_new_cc); + if (status != QDF_STATUS_SUCCESS) { + target_if_err("Failed to process new country code event"); + return -EFAULT; + } + + target_if_debug("processed 11d new country code event"); + + return 0; +} + +static int tgt_reg_ch_avoid_event_handler(ol_scn_t handle, + uint8_t *event_buf, uint32_t len) +{ + struct wlan_objmgr_psoc *psoc; + struct wlan_lmac_if_reg_rx_ops *reg_rx_ops; + struct ch_avoid_ind_type ch_avoid_event; + QDF_STATUS status; + struct wmi_unified *wmi_handle; + + TARGET_IF_ENTER(); + + psoc = target_if_get_psoc_from_scn_hdl(handle); + if (!psoc) { + target_if_err("psoc ptr is NULL"); + return -EINVAL; + } + + reg_rx_ops = target_if_regulatory_get_rx_ops(psoc); + + if (!reg_rx_ops->reg_ch_avoid_event_handler) { + target_if_err("reg_ch_avoid_event_handler is NULL"); + return -EINVAL; + } + + wmi_handle = get_wmi_unified_hdl_from_psoc(psoc); + if (!wmi_handle) { + target_if_err("Invalid WMI handle"); + return -EINVAL; + } + if (wmi_extract_reg_ch_avoid_event(wmi_handle, event_buf, + &ch_avoid_event, len) + != QDF_STATUS_SUCCESS) { + + target_if_err("Extraction of CH avoid event failed"); + return -EFAULT; + } + + status = reg_rx_ops->reg_ch_avoid_event_handler(psoc, &ch_avoid_event); + if (status != QDF_STATUS_SUCCESS) { + target_if_err("Failed to process CH avoid event"); + return -EFAULT; + } + + target_if_debug("processed CH avoid event"); + + return 0; +} + +static QDF_STATUS tgt_if_regulatory_register_master_list_handler( + struct wlan_objmgr_psoc *psoc, void *arg) +{ + wmi_unified_t wmi_handle = get_wmi_unified_hdl_from_psoc(psoc); + + if (!wmi_handle) + return QDF_STATUS_E_FAILURE; + + return wmi_unified_register_event_handler(wmi_handle, + wmi_reg_chan_list_cc_event_id, + tgt_reg_chan_list_update_handler, + WMI_RX_UMAC_CTX); + +} + +static QDF_STATUS tgt_if_regulatory_unregister_master_list_handler( + struct wlan_objmgr_psoc *psoc, void *arg) +{ + wmi_unified_t wmi_handle = get_wmi_unified_hdl_from_psoc(psoc); + + if (!wmi_handle) + return QDF_STATUS_E_FAILURE; + + return wmi_unified_unregister_event_handler(wmi_handle, + wmi_reg_chan_list_cc_event_id); +} + +static QDF_STATUS tgt_if_regulatory_set_country_code( + struct wlan_objmgr_psoc *psoc, void *arg) +{ + wmi_unified_t wmi_handle = get_wmi_unified_hdl_from_psoc(psoc); + + if (!wmi_handle) + return QDF_STATUS_E_FAILURE; + + return wmi_unified_set_country_cmd_send(wmi_handle, arg); + +} + +static QDF_STATUS tgt_if_regulatory_set_user_country_code( + struct wlan_objmgr_psoc *psoc, uint8_t pdev_id, struct cc_regdmn_s *rd) +{ + wmi_unified_t wmi_handle = get_wmi_unified_hdl_from_psoc(psoc); + + if (!wmi_handle) + return QDF_STATUS_E_FAILURE; + + if (wmi_unified_set_user_country_code_cmd_send(wmi_handle, pdev_id, + rd) != QDF_STATUS_SUCCESS) { + target_if_err("Set user country code failed"); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS tgt_if_regulatory_register_11d_new_cc_handler( + struct wlan_objmgr_psoc *psoc, void *arg) +{ + wmi_unified_t wmi_handle = get_wmi_unified_hdl_from_psoc(psoc); + + if (!wmi_handle) + return QDF_STATUS_E_FAILURE; + + return wmi_unified_register_event(wmi_handle, + wmi_11d_new_country_event_id, + tgt_reg_11d_new_cc_handler); +} + +static QDF_STATUS tgt_if_regulatory_unregister_11d_new_cc_handler( + struct wlan_objmgr_psoc *psoc, void *arg) +{ + wmi_unified_t wmi_handle = get_wmi_unified_hdl_from_psoc(psoc); + + if (!wmi_handle) + return QDF_STATUS_E_FAILURE; + + return wmi_unified_unregister_event(wmi_handle, + wmi_11d_new_country_event_id); +} + +static QDF_STATUS tgt_if_regulatory_register_ch_avoid_event_handler( + struct wlan_objmgr_psoc *psoc, void *arg) +{ + wmi_unified_t wmi_handle = get_wmi_unified_hdl_from_psoc(psoc); + + if (!wmi_handle) + return QDF_STATUS_E_FAILURE; + + return wmi_unified_register_event(wmi_handle, + wmi_wlan_freq_avoid_event_id, + tgt_reg_ch_avoid_event_handler); +} + +static QDF_STATUS tgt_if_regulatory_unregister_ch_avoid_event_handler( + struct wlan_objmgr_psoc *psoc, void *arg) +{ + wmi_unified_t wmi_handle = get_wmi_unified_hdl_from_psoc(psoc); + + if (!wmi_handle) + return QDF_STATUS_E_FAILURE; + + return wmi_unified_unregister_event(wmi_handle, + wmi_wlan_freq_avoid_event_id); +} +static QDF_STATUS tgt_if_regulatory_start_11d_scan( + struct wlan_objmgr_psoc *psoc, + struct reg_start_11d_scan_req *reg_start_11d_scan_req) +{ + wmi_unified_t wmi_handle = get_wmi_unified_hdl_from_psoc(psoc); + + if (!wmi_handle) + return QDF_STATUS_E_FAILURE; + + return wmi_unified_send_start_11d_scan_cmd(wmi_handle, + reg_start_11d_scan_req); +} + +static QDF_STATUS tgt_if_regulatory_stop_11d_scan( + struct wlan_objmgr_psoc *psoc, + struct reg_stop_11d_scan_req *reg_stop_11d_scan_req) +{ + wmi_unified_t wmi_handle = get_wmi_unified_hdl_from_psoc(psoc); + + if (!wmi_handle) + return QDF_STATUS_E_FAILURE; + + return wmi_unified_send_stop_11d_scan_cmd(wmi_handle, + reg_stop_11d_scan_req); +} + + +QDF_STATUS target_if_register_regulatory_tx_ops(struct wlan_lmac_if_tx_ops + *tx_ops) +{ + struct wlan_lmac_if_reg_tx_ops *reg_ops = &tx_ops->reg_ops; + + reg_ops->register_master_handler = + tgt_if_regulatory_register_master_list_handler; + + reg_ops->unregister_master_handler = + tgt_if_regulatory_unregister_master_list_handler; + + reg_ops->set_country_code = tgt_if_regulatory_set_country_code; + + reg_ops->fill_umac_legacy_chanlist = NULL; + + reg_ops->set_country_failed = NULL; + + reg_ops->register_11d_new_cc_handler = + tgt_if_regulatory_register_11d_new_cc_handler; + + reg_ops->unregister_11d_new_cc_handler = + tgt_if_regulatory_unregister_11d_new_cc_handler; + + reg_ops->start_11d_scan = tgt_if_regulatory_start_11d_scan; + + reg_ops->stop_11d_scan = tgt_if_regulatory_stop_11d_scan; + + reg_ops->is_there_serv_ready_extn = + tgt_if_regulatory_is_there_serv_ready_extn; + + reg_ops->set_user_country_code = + tgt_if_regulatory_set_user_country_code; + + reg_ops->register_ch_avoid_event_handler = + tgt_if_regulatory_register_ch_avoid_event_handler; + + reg_ops->unregister_ch_avoid_event_handler = + tgt_if_regulatory_unregister_ch_avoid_event_handler; + + return QDF_STATUS_SUCCESS; +} + diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/scan/inc/target_if_scan.h b/drivers/staging/qca-wifi-host-cmn/target_if/scan/inc/target_if_scan.h new file mode 100644 index 0000000000000000000000000000000000000000..5e1830c45a6a1401d9a609704305111b04a908b2 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/target_if/scan/inc/target_if_scan.h @@ -0,0 +1,132 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: offload lmac interface APIs for scan + */ +#ifndef __TARGET_SCAN_IF_H__ +#define __TARGET_SCAN_IF_H__ + +#include + +struct scan_req_params; +struct scan_cancel_param; +struct wlan_objmgr_psoc; + +#define WLAN_MAX_ACTIVE_SCANS_ALLOWED 8 + +#ifdef FEATURE_WLAN_SCAN_PNO +/** + * target_if_nlo_match_event_handler() - nlo match event handler + * @scn: scn handle + * @event: event data + * @len: data length + * + * Record NLO match event comes from FW. It's a indication that + * one of the profile is matched. + * + * Return: 0 for success or error code. + */ +int target_if_nlo_match_event_handler(ol_scn_t scn, uint8_t *data, + uint32_t len); + +/** + * target_if_nlo_complete_handler() - nlo complete event handler + * @scn: scn handle + * @event: event data + * @len: data length + * + * Record NLO match event comes from FW. It's a indication that + * one of the profile is matched. + * + * Return: 0 for success or error code. + */ +int target_if_nlo_complete_handler(ol_scn_t scn, uint8_t *data, + uint32_t len); +#endif + +/** + * target_if_scan_register_event_handler() - lmac handler API + * to register for scan events + * @psoc: psoc object + * @arg: argument to lmac + * + * Return: QDF_STATUS + */ +QDF_STATUS +target_if_scan_register_event_handler(struct wlan_objmgr_psoc *psoc, + void *arg); + +/** + * target_if_scan_unregister_event_handler() - lmac handler API + * to unregister for scan events + * @psoc: psoc object + * @arg: argument to lmac + * + * Return: QDF_STATUS + */ +QDF_STATUS +target_if_scan_unregister_event_handler(struct wlan_objmgr_psoc *psoc, + void *arg); + +/** + * target_if_scan_start() - lmac handler API to start scan + * @pdev: pdev object + * @req: scan_req_params object + * + * Return: QDF_STATUS + */ + +QDF_STATUS +target_if_scan_start(struct wlan_objmgr_pdev *pdev, + struct scan_start_request *req); + +/** + * target_if_scan_cancel() - lmac handler API to cancel a previous active scan + * @pdev: pdev object + * @req: scan_cancel_param object + * + * Return: QDF_STATUS + */ +QDF_STATUS +target_if_scan_cancel(struct wlan_objmgr_pdev *pdev, + struct scan_cancel_param *req); + +/** + * target_if_scan_tx_ops_register() - lmac handler to register scan tx_ops + * callback functions + * @tx_ops: wlan_lmac_if_tx_ops object + * + * Return: QDF_STATUS + */ + +QDF_STATUS +target_if_scan_tx_ops_register(struct wlan_lmac_if_tx_ops *tx_ops); + +/** + * target_if_scan_set_max_active_scans() - lmac handler to set max active scans + * @psoc: psoc object + * @max_active_scans: maximum active scans allowed on underlying psoc + * + * Return: QDF_STATUS + */ +QDF_STATUS +target_if_scan_set_max_active_scans(struct wlan_objmgr_psoc *psoc, + uint32_t max_active_scans); + +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/scan/src/target_if_scan.c b/drivers/staging/qca-wifi-host-cmn/target_if/scan/src/target_if_scan.c new file mode 100644 index 0000000000000000000000000000000000000000..2c90e18120dc75af7bdd7c0da9b61b93d7caf459 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/target_if/scan/src/target_if_scan.c @@ -0,0 +1,449 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: offload lmac interface APIs definitions for scan + */ + +#include +#include +#include +#include +#include +#include +#include +#include + + +static inline struct wlan_lmac_if_scan_rx_ops * +target_if_scan_get_rx_ops(struct wlan_objmgr_psoc *psoc) +{ + return &psoc->soc_cb.rx_ops.scan; +} + +static int +target_if_scan_event_handler(ol_scn_t scn, uint8_t *data, uint32_t datalen) +{ + struct scan_event_info *event_info; + struct wlan_objmgr_psoc *psoc; + struct wmi_unified *wmi_handle; + struct wlan_lmac_if_scan_rx_ops *scan_rx_ops; + QDF_STATUS status; + + if (!scn || !data) { + target_if_err("scn: 0x%pK, data: 0x%pK\n", scn, data); + return -EINVAL; + } + psoc = target_if_get_psoc_from_scn_hdl(scn); + if (!psoc) { + target_if_err("null psoc\n"); + return -EINVAL; + } + wmi_handle = get_wmi_unified_hdl_from_psoc(psoc); + + if (!wmi_handle) { + target_if_err("wmi_handle is NULL"); + return -EINVAL; + } + + event_info = qdf_mem_malloc(sizeof(*event_info)); + + if (!event_info) { + target_if_err("unable to allocate scan_event"); + return -ENOMEM; + } + + if (wmi_extract_vdev_scan_ev_param(wmi_handle, data, + &(event_info->event))) { + target_if_err("Failed to extract wmi scan event"); + qdf_mem_free(event_info); + return -EINVAL; + } + + scan_rx_ops = target_if_scan_get_rx_ops(psoc); + if (scan_rx_ops->scan_ev_handler) { + status = scan_rx_ops->scan_ev_handler(psoc, event_info); + if (status != QDF_STATUS_SUCCESS) { + qdf_mem_free(event_info); + return -EINVAL; + } + } else { + qdf_mem_free(event_info); + return -EINVAL; + } + + return 0; +} + +#ifdef FEATURE_WLAN_SCAN_PNO + +int target_if_nlo_complete_handler(ol_scn_t scn, uint8_t *data, + uint32_t len) +{ + wmi_nlo_event *nlo_event; + struct scan_event_info *event_info; + struct wlan_objmgr_psoc *psoc; + struct wlan_lmac_if_scan_rx_ops *scan_rx_ops; + WMI_NLO_MATCH_EVENTID_param_tlvs *param_buf = + (WMI_NLO_MATCH_EVENTID_param_tlvs *) data; + QDF_STATUS status; + + if (!scn || !data) { + target_if_err("scn: 0x%pK, data: 0x%pK", scn, data); + return -EINVAL; + } + + psoc = target_if_get_psoc_from_scn_hdl(scn); + if (!psoc) { + target_if_err("null psoc"); + return -EINVAL; + } + + event_info = qdf_mem_malloc(sizeof(*event_info)); + if (!event_info) { + target_if_err("unable to allocate scan_event"); + return -ENOMEM; + } + + nlo_event = param_buf->fixed_param; + target_if_debug("PNO complete event received for vdev %d", + nlo_event->vdev_id); + + event_info->event.type = SCAN_EVENT_TYPE_NLO_COMPLETE; + event_info->event.vdev_id = nlo_event->vdev_id; + + scan_rx_ops = target_if_scan_get_rx_ops(psoc); + if (scan_rx_ops->scan_ev_handler) { + status = scan_rx_ops->scan_ev_handler(psoc, event_info); + if (status != QDF_STATUS_SUCCESS) { + qdf_mem_free(event_info); + return -EINVAL; + } + } else { + qdf_mem_free(event_info); + return -EINVAL; + } + + return 0; +} + +int target_if_nlo_match_event_handler(ol_scn_t scn, uint8_t *data, + uint32_t len) +{ + wmi_nlo_event *nlo_event; + struct scan_event_info *event_info; + struct wlan_objmgr_psoc *psoc; + struct wlan_lmac_if_scan_rx_ops *scan_rx_ops; + WMI_NLO_MATCH_EVENTID_param_tlvs *param_buf = + (WMI_NLO_MATCH_EVENTID_param_tlvs *) data; + QDF_STATUS status; + + if (!scn || !data) { + target_if_err("scn: 0x%pK, data: 0x%pK", scn, data); + return -EINVAL; + } + + psoc = target_if_get_psoc_from_scn_hdl(scn); + if (!psoc) { + target_if_err("null psoc"); + return -EINVAL; + } + + event_info = qdf_mem_malloc(sizeof(*event_info)); + if (!event_info) { + target_if_err("unable to allocate scan_event"); + return -ENOMEM; + } + + nlo_event = param_buf->fixed_param; + target_if_debug("PNO match event received for vdev %d", + nlo_event->vdev_id); + + event_info->event.type = SCAN_EVENT_TYPE_NLO_MATCH; + event_info->event.vdev_id = nlo_event->vdev_id; + + scan_rx_ops = target_if_scan_get_rx_ops(psoc); + if (scan_rx_ops->scan_ev_handler) { + status = scan_rx_ops->scan_ev_handler(psoc, event_info); + if (status != QDF_STATUS_SUCCESS) { + qdf_mem_free(event_info); + return -EINVAL; + } + } else { + qdf_mem_free(event_info); + return -EINVAL; + } + + return 0; +} + +static QDF_STATUS +target_if_scan_register_pno_event_handler(struct wlan_objmgr_psoc *psoc, + void *arg) +{ + QDF_STATUS status; + struct wmi_unified *wmi_handle; + + wmi_handle = get_wmi_unified_hdl_from_psoc(psoc); + if (!wmi_handle) { + target_if_err("Invalid WMI handle"); + return QDF_STATUS_E_FAILURE; + } + + status = wmi_unified_register_event( + wmi_handle, + wmi_nlo_match_event_id, + target_if_nlo_match_event_handler); + if (status) { + target_if_err("Failed to register nlo match event cb"); + return QDF_STATUS_E_FAILURE; + } + + status = wmi_unified_register_event( + wmi_handle, + wmi_nlo_scan_complete_event_id, + target_if_nlo_complete_handler); + if (status) { + target_if_err("Failed to register nlo scan comp event cb"); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS +target_if_scan_unregister_pno_event_handler(struct wlan_objmgr_psoc *psoc, + void *arg) +{ + QDF_STATUS status; + struct wmi_unified *wmi_handle; + + wmi_handle = get_wmi_unified_hdl_from_psoc(psoc); + if (!wmi_handle) { + target_if_err("Invalid WMI handle"); + return QDF_STATUS_E_FAILURE; + } + + status = wmi_unified_unregister_event( + wmi_handle, + wmi_nlo_match_event_id); + if (status) { + target_if_err("Failed to unregister nlo match event cb"); + return QDF_STATUS_E_FAILURE; + } + + status = wmi_unified_unregister_event( + wmi_handle, + wmi_nlo_scan_complete_event_id); + if (status) { + target_if_err("Failed to unregister nlo scan comp event cb"); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS +target_if_pno_start(struct wlan_objmgr_psoc *psoc, + struct pno_scan_req_params *req) +{ + QDF_STATUS status; + struct wmi_unified *wmi_handle; + + wmi_handle = get_wmi_unified_hdl_from_psoc(psoc); + if (!wmi_handle) { + target_if_err("Invalid WMI handle"); + return QDF_STATUS_E_FAILURE; + } + + status = wmi_unified_pno_start_cmd(wmi_handle, req); + if (status == QDF_STATUS_SUCCESS) { + if (req->mawc_params.enable) + status = wmi_unified_nlo_mawc_cmd(wmi_handle, + &req->mawc_params); + } + + return status; +} + +static QDF_STATUS +target_if_pno_stop(struct wlan_objmgr_psoc *psoc, + uint8_t vdev_id) +{ + struct wmi_unified *wmi_handle; + + wmi_handle = get_wmi_unified_hdl_from_psoc(psoc); + if (!wmi_handle) { + target_if_err("Invalid WMI handle"); + return QDF_STATUS_E_FAILURE; + } + + return wmi_unified_pno_stop_cmd(wmi_handle, vdev_id); +} + +#else + +static inline QDF_STATUS +target_if_scan_register_pno_event_handler(struct wlan_objmgr_psoc *psoc, + void *arg) +{ + return QDF_STATUS_SUCCESS; +} + +static inline QDF_STATUS +target_if_scan_unregister_pno_event_handler(struct wlan_objmgr_psoc *psoc, + void *arg) +{ + return QDF_STATUS_SUCCESS; +} + +static inline QDF_STATUS +target_if_pno_start(struct wlan_objmgr_psoc *psoc, + struct pno_scan_req_params *req) +{ + return QDF_STATUS_SUCCESS; +} + +static inline QDF_STATUS +target_if_pno_stop(struct wlan_objmgr_psoc *psoc, + uint8_t vdev_id) +{ + return QDF_STATUS_SUCCESS; +} +#endif + + +QDF_STATUS +target_if_scan_register_event_handler(struct wlan_objmgr_psoc *psoc, void *arg) +{ + QDF_STATUS status; + struct wmi_unified *wmi_handle; + + wmi_handle = get_wmi_unified_hdl_from_psoc(psoc); + if (!wmi_handle) { + target_if_err("Invalid WMI handle"); + return QDF_STATUS_E_FAILURE; + } + + status = wmi_unified_register_event( + wmi_handle, + wmi_scan_event_id, + target_if_scan_event_handler); + if (status) { + target_if_err("Failed to register Scan match event cb"); + return QDF_STATUS_E_FAILURE; + } + + status = target_if_scan_register_pno_event_handler(psoc, arg); + + return status; +} + +QDF_STATUS +target_if_scan_unregister_event_handler(struct wlan_objmgr_psoc *psoc, + void *arg) +{ + QDF_STATUS status; + struct wmi_unified *wmi_handle; + + wmi_handle = get_wmi_unified_hdl_from_psoc(psoc); + if (!wmi_handle) { + target_if_err("Invalid WMI handle"); + return QDF_STATUS_E_FAILURE; + } + + status = wmi_unified_unregister_event( + wmi_handle, + wmi_scan_event_id); + if (status) { + target_if_err("Failed to unregister Scan match event cb"); + return QDF_STATUS_E_FAILURE; + } + + status = target_if_scan_unregister_pno_event_handler(psoc, arg); + + return status; +} + +QDF_STATUS +target_if_scan_start(struct wlan_objmgr_pdev *pdev, + struct scan_start_request *req) +{ + void *pdev_wmi_handle; + + pdev_wmi_handle = GET_WMI_HDL_FROM_PDEV(pdev); + if (!pdev_wmi_handle) { + target_if_err("Invalid PDEV WMI handle"); + return QDF_STATUS_E_FAILURE; + } + return wmi_unified_scan_start_cmd_send(pdev_wmi_handle, &req->scan_req); +} + +QDF_STATUS +target_if_scan_cancel(struct wlan_objmgr_pdev *pdev, + struct scan_cancel_param *req) +{ + void *pdev_wmi_handle; + + pdev_wmi_handle = GET_WMI_HDL_FROM_PDEV(pdev); + if (!pdev_wmi_handle) { + target_if_err("Invalid PDEV WMI handle"); + return QDF_STATUS_E_NULL_VALUE; + } + return wmi_unified_scan_stop_cmd_send(pdev_wmi_handle, req); +} + +QDF_STATUS +target_if_scan_tx_ops_register(struct wlan_lmac_if_tx_ops *tx_ops) +{ + struct wlan_lmac_if_scan_tx_ops *scan; + + scan = &tx_ops->scan; + if (!scan) { + target_if_err("Scan txops NULL"); + return QDF_STATUS_E_FAILURE; + } + + scan->scan_start = target_if_scan_start; + scan->scan_cancel = target_if_scan_cancel; + scan->pno_start = target_if_pno_start; + scan->pno_stop = target_if_pno_stop; + scan->scan_reg_ev_handler = target_if_scan_register_event_handler; + scan->scan_unreg_ev_handler = target_if_scan_unregister_event_handler; + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS +target_if_scan_set_max_active_scans(struct wlan_objmgr_psoc *psoc, + uint32_t max_active_scans) +{ + struct wlan_lmac_if_scan_rx_ops *scan_rx_ops; + QDF_STATUS status; + + scan_rx_ops = target_if_scan_get_rx_ops(psoc); + if (scan_rx_ops->scan_set_max_active_scans) { + status = scan_rx_ops->scan_set_max_active_scans(psoc, + max_active_scans); + } else { + target_if_err("scan_set_max_active_scans uninitialized"); + status = QDF_STATUS_E_FAULT; + } + + return status; +} diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/son/inc/target_if_son.h b/drivers/staging/qca-wifi-host-cmn/target_if/son/inc/target_if_son.h new file mode 100644 index 0000000000000000000000000000000000000000..f63807da05fef93a69c8efd25cbf6b6d16147450 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/target_if/son/inc/target_if_son.h @@ -0,0 +1,63 @@ +/* + * Copyright (c) 2017 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ +#include +#include +#include +#include +#include +#include + +void target_if_son_register_tx_ops(struct wlan_lmac_if_tx_ops *tx_ops); + +bool son_ol_is_peer_inact(struct wlan_objmgr_peer *); + +u_int32_t son_ol_get_peer_rate(struct wlan_objmgr_peer *peer, u_int8_t type); + +int8_t son_ol_sanitize_util_invtl(struct wlan_objmgr_pdev *pdev, + u_int32_t *sample_period, + u_int32_t *num_of_sample); + +bool son_ol_enable(struct wlan_objmgr_pdev *pdev, bool enable); + +/* Function pointer to set overload status */ + +void son_ol_set_overload(struct wlan_objmgr_pdev *pdev, bool overload); + +/* Function pointer to set band steering parameters */ + +bool son_ol_set_params(struct wlan_objmgr_pdev *pdev, + u_int32_t inactivity_check_period, + u_int32_t inactivity_threshold_normal, + u_int32_t inactivity_threshold_overload); + +QDF_STATUS son_ol_send_null(struct wlan_objmgr_pdev *pdev, + u_int8_t *macaddr, + struct wlan_objmgr_vdev *vdev); + +int son_ol_lmac_create(struct wlan_objmgr_pdev *pdev); + + +int son_ol_lmac_destroy(struct wlan_objmgr_pdev *pdev); + + +void son_ol_rx_rssi_update(struct wlan_objmgr_pdev *pdev, u_int8_t *macaddres, + u_int8_t status, int8_t rssi, u_int8_t subtype); + +void son_ol_rx_rate_update(struct wlan_objmgr_pdev *pdev, u_int8_t *macaddres, + u_int8_t status, u_int32_t rateKbps); diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/son/src/target_if_son.c b/drivers/staging/qca-wifi-host-cmn/target_if/son/src/target_if_son.c new file mode 100644 index 0000000000000000000000000000000000000000..e52c549b3ec60f854527be04bd33492adef9948b --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/target_if/son/src/target_if_son.c @@ -0,0 +1,246 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include +#include +#include +#include + +#if QCA_SUPPORT_SON + +bool son_ol_is_peer_inact(struct wlan_objmgr_peer *peer) +{ + struct wlan_objmgr_vdev *vdev; + struct wlan_objmgr_psoc *psoc; + + vdev = wlan_peer_get_vdev(peer); + if (!vdev) + return false; + + psoc = wlan_vdev_get_psoc(vdev); + if (!psoc) + return false; + + return cdp_peer_is_inact(wlan_psoc_get_dp_handle(psoc), + (void *)(wlan_peer_get_dp_handle(peer))); +} + +u_int32_t son_ol_get_peer_rate(struct wlan_objmgr_peer *peer, u_int8_t type) +{ + return ol_if_peer_get_rate(peer, type); +} + + +bool son_ol_enable(struct wlan_objmgr_pdev *pdev, bool enable) +{ + struct wlan_objmgr_psoc *psoc; + + psoc = wlan_pdev_get_psoc(pdev); + if (!psoc) + return false; + + return cdp_start_inact_timer(wlan_psoc_get_dp_handle(psoc), + (void *)(wlan_pdev_get_dp_handle(pdev)), + enable); +} + +/* Function pointer to set overload status */ +void son_ol_set_overload(struct wlan_objmgr_pdev *pdev, bool overload) +{ + struct wlan_objmgr_psoc *psoc; + + psoc = wlan_pdev_get_psoc(pdev); + + return cdp_set_overload(wlan_psoc_get_dp_handle(psoc), + (void *)(wlan_pdev_get_dp_handle(pdev)), + overload); +} +/* Function pointer to set band steering parameters */ +bool son_ol_set_params(struct wlan_objmgr_pdev *pdev, + u_int32_t inactivity_check_period, + u_int32_t inactivity_threshold_normal, + u_int32_t inactivity_threshold_overload) +{ + struct wlan_objmgr_psoc *psoc; + + psoc = wlan_pdev_get_psoc(pdev); + return cdp_set_inact_params(wlan_psoc_get_dp_handle(psoc), + (void *)wlan_pdev_get_dp_handle(pdev), + inactivity_check_period, + inactivity_threshold_normal, + inactivity_threshold_overload); +} + +int8_t son_ol_sanitize_util_invtl(struct wlan_objmgr_pdev *pdev, + u_int32_t *sample_period, + u_int32_t *num_of_sample) +{ + return EOK; +} + +QDF_STATUS son_ol_send_null(struct wlan_objmgr_pdev *pdev, + u_int8_t *macaddr, + struct wlan_objmgr_vdev *vdev) +{ + struct stats_request_params param = {0}; + struct wlan_objmgr_psoc *psoc = NULL; + + psoc = wlan_pdev_get_psoc(pdev); + + if (!psoc) + return QDF_STATUS_E_FAILURE; + + param.vdev_id = wlan_vdev_get_id(vdev); + param.stats_id = WMI_HOST_REQUEST_INST_STAT; + + return wmi_unified_stats_request_send(GET_WMI_HDL_FROM_PSOC(psoc), + macaddr, ¶m); +} + +int son_ol_lmac_create(struct wlan_objmgr_pdev *pdev) +{ + return EOK; +} + +int son_ol_lmac_destroy(struct wlan_objmgr_pdev *pdev) +{ + return EOK; + +} + +void son_ol_rx_rssi_update(struct wlan_objmgr_pdev *pdev, u_int8_t *macaddres, + u_int8_t status, int8_t rssi, u_int8_t subtype) +{ + return; + +} + +void son_ol_rx_rate_update(struct wlan_objmgr_pdev *pdev, u_int8_t *macaddres, + u_int8_t status, u_int32_t rateKbps) +{ + return; +} + +void target_if_son_register_tx_ops(struct wlan_lmac_if_tx_ops *tx_ops) +{ + /* wlan son related function handler */ + tx_ops->son_tx_ops.son_enable = son_ol_enable; + tx_ops->son_tx_ops.set_overload = son_ol_set_overload; + tx_ops->son_tx_ops.set_params = son_ol_set_params; + tx_ops->son_tx_ops.lmac_create = son_ol_lmac_create; + tx_ops->son_tx_ops.lmac_destroy = son_ol_lmac_destroy; + tx_ops->son_tx_ops.son_send_null = son_ol_send_null; + tx_ops->son_tx_ops.son_rssi_update = son_ol_rx_rssi_update; + tx_ops->son_tx_ops.son_rate_update = son_ol_rx_rate_update; + tx_ops->son_tx_ops.son_sanity_util_intvl = son_ol_sanitize_util_invtl; + tx_ops->son_tx_ops.get_peer_rate = son_ol_get_peer_rate; + tx_ops->son_tx_ops.son_node_isinact = son_ol_is_peer_inact; + return; +} +#else +void target_if_son_register_tx_ops(struct wlan_lmac_if_tx_ops *tx_ops) +{ + return; +} +int8_t son_ol_sanitize_util_intvl(struct wlan_objmgr_pdev *pdev, + u_int32_t *sample_period, + u_int32_t *num_of_sample) +{ + return -EINVAL; + +} + +u_int32_t son_ol_get_peer_rate(struct wlan_objmgr_peer *peer, u_int8_t type) +{ + return 0; +} + + +bool son_ol_enable(struct wlan_objmgr_pdev *pdev, bool enable) +{ + return -EINVAL; + +} + + +/* Function pointer to set overload status */ + +void son_ol_set_overload(struct wlan_objmgr_pdev *pdev, bool overload) +{ + return; +} + + +/* Function pointer to set band steering parameters */ + +bool son_ol_set_params(struct wlan_objmgr_pdev *dev, + u_int32_t inactivity_check_period, + u_int32_t inactivity_threshold_normal, + u_int32_t inactivity_threshold_overload) +{ + return -EINVAL; +} + + + +QDF_STATUS son_ol_send_null(struct wlan_objmgr_pdev *pdev, + u_int8_t *macaddr, + struct wlan_objmgr_vdev *vdev) +{ + return EOK; +} +int8_t son_ol_sanitize_util_invtl(struct wlan_objmgr_pdev *pdev, + u_int32_t *sample_period, + u_int32_t *num_of_sample) +{ + return EOK; +} + +int son_ol_lmac_create(struct wlan_objmgr_pdev *pdev) +{ + return EOK; +} + + +int son_ol_lmac_destroy(struct wlan_objmgr_pdev *pdev) +{ + return EOK; + +} + + +void son_ol_rx_rssi_update(struct wlan_objmgr_pdev *pdev, u_int8_t *macaddres, + u_int8_t status, int8_t rssi, u_int8_t subtype) +{ + return; + +} + +void son_ol_rx_rate_update(struct wlan_objmgr_pdev *pdev, u_int8_t *macaddres, + u_int8_t status, u_int32_t rateKbps) +{ + return; +} + +bool son_ol_is_peer_inact(struct wlan_objmgr_peer *peer) +{ + return false; +} +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/spectral/target_if_spectral.c b/drivers/staging/qca-wifi-host-cmn/target_if/spectral/target_if_spectral.c new file mode 100644 index 0000000000000000000000000000000000000000..b64e7cb66bbee58db8875bd5dd7dbe303ffcf6d2 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/target_if/spectral/target_if_spectral.c @@ -0,0 +1,2936 @@ +/* + * Copyright (c) 2011,2017-2018 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef CONFIG_WIN +#include +#endif /*CONFIG_WIN*/ +#include +#include +#include +#include +/** + * @spectral_ops - Spectral function table, holds the Spectral functions that + * depend on whether the architecture is Direct Attach or Offload. This is used + * to populate the actual Spectral function table present in the Spectral + * module. + */ +struct target_if_spectral_ops spectral_ops; +int spectral_debug_level = DEBUG_SPECTRAL; + +static void target_if_spectral_get_firstvdev_pdev(struct wlan_objmgr_pdev *pdev, + void *obj, void *arg) +{ + struct wlan_objmgr_vdev *vdev = obj; + struct wlan_objmgr_vdev **first_vdev = arg; + + if (!(*first_vdev)) + *first_vdev = vdev; +} + +struct wlan_objmgr_vdev * +target_if_spectral_get_vdev(struct target_if_spectral *spectral) +{ + struct wlan_objmgr_pdev *pdev = NULL; + struct wlan_objmgr_vdev *first_vdev = NULL; + + qdf_assert_always(spectral); + pdev = spectral->pdev_obj; + qdf_assert_always(pdev); + + if (wlan_objmgr_pdev_try_get_ref(pdev, WLAN_SPECTRAL_ID) != + QDF_STATUS_SUCCESS) { + spectral_err("Unable to get pdev reference."); + return NULL; + } + + wlan_objmgr_pdev_iterate_obj_list(pdev, WLAN_VDEV_OP, + target_if_spectral_get_firstvdev_pdev, + &first_vdev, 0, WLAN_SPECTRAL_ID); + + wlan_objmgr_pdev_release_ref(pdev, WLAN_SPECTRAL_ID); + + if (wlan_objmgr_vdev_try_get_ref(first_vdev, WLAN_SPECTRAL_ID) != + QDF_STATUS_SUCCESS) + first_vdev = NULL; + + if (!first_vdev) { + spectral_warn("Unable to get first vdev of pdev."); + return NULL; + } + + return first_vdev; +} + +/** + * target_if_send_vdev_spectral_configure_cmd() - Send WMI command to configure + * spectral parameters + * @spectral: Pointer to Spectral target_if internal private data + * @param: Pointer to spectral_config giving the Spectral configuration + * + * Return: QDF_STATUS_SUCCESS on success, negative error code on failure + */ +static int +target_if_send_vdev_spectral_configure_cmd(struct target_if_spectral *spectral, + struct spectral_config *param) +{ + struct vdev_spectral_configure_params sparam; + struct wlan_objmgr_pdev *pdev = NULL; + struct wlan_objmgr_vdev *vdev = NULL; + + qdf_assert_always(spectral && param); + + pdev = spectral->pdev_obj; + + qdf_assert_always(pdev); + + vdev = target_if_spectral_get_vdev(spectral); + if (!vdev) + return QDF_STATUS_E_NOENT; + + qdf_mem_set(&sparam, sizeof(sparam), 0); + + sparam.vdev_id = wlan_vdev_get_id(vdev); + wlan_objmgr_vdev_release_ref(vdev, WLAN_SPECTRAL_ID); + + sparam.count = param->ss_count; + sparam.period = param->ss_period; + sparam.spectral_pri = param->ss_spectral_pri; + sparam.fft_size = param->ss_fft_size; + sparam.gc_enable = param->ss_gc_ena; + sparam.restart_enable = param->ss_restart_ena; + sparam.noise_floor_ref = param->ss_noise_floor_ref; + sparam.init_delay = param->ss_init_delay; + sparam.nb_tone_thr = param->ss_nb_tone_thr; + sparam.str_bin_thr = param->ss_str_bin_thr; + sparam.wb_rpt_mode = param->ss_wb_rpt_mode; + sparam.rssi_rpt_mode = param->ss_rssi_rpt_mode; + sparam.rssi_thr = param->ss_rssi_thr; + sparam.pwr_format = param->ss_pwr_format; + sparam.rpt_mode = param->ss_rpt_mode; + sparam.bin_scale = param->ss_bin_scale; + sparam.dbm_adj = param->ss_dbm_adj; + sparam.chn_mask = param->ss_chn_mask; + + return spectral->param_wmi_cmd_ops.wmi_spectral_configure_cmd_send( + GET_WMI_HDL_FROM_PDEV(pdev), &sparam); +} + +/** + * target_if_send_vdev_spectral_enable_cmd() - Send WMI command to + * enable/disable Spectral + * @spectral: Pointer to Spectral target_if internal private data + * @is_spectral_active_valid: Flag to indicate if spectral activate (trigger) is + * valid + * @is_spectral_active: Value of spectral activate + * @is_spectral_enabled_valid: Flag to indicate if spectral enable is valid + * @is_spectral_enabled: Value of spectral enable + * + * Return: QDF_STATUS_SUCCESS on success, negative error code on failure + */ +static int +target_if_send_vdev_spectral_enable_cmd(struct target_if_spectral *spectral, + uint8_t is_spectral_active_valid, + uint8_t is_spectral_active, + uint8_t is_spectral_enabled_valid, + uint8_t is_spectral_enabled) +{ + struct vdev_spectral_enable_params param; + struct wlan_objmgr_pdev *pdev = NULL; + struct wlan_objmgr_vdev *vdev = NULL; + + qdf_assert_always(spectral); + + pdev = spectral->pdev_obj; + + qdf_assert_always(pdev); + + vdev = target_if_spectral_get_vdev(spectral); + if (!vdev) + return QDF_STATUS_E_NOENT; + + qdf_mem_set(¶m, sizeof(param), 0); + + param.vdev_id = wlan_vdev_get_id(vdev); + wlan_objmgr_vdev_release_ref(vdev, WLAN_SPECTRAL_ID); + + param.active_valid = is_spectral_active_valid; + param.enabled_valid = is_spectral_enabled_valid; + param.active = is_spectral_active; + param.enabled = is_spectral_enabled; + + return spectral->param_wmi_cmd_ops.wmi_spectral_enable_cmd_send( + GET_WMI_HDL_FROM_PDEV(pdev), ¶m); +} + +/** + * target_if_spectral_info_init_defaults() - Helper function to load defaults + * for Spectral information (parameters and state) into cache. + * @spectral: Pointer to Spectral target_if internal private data + * + * It is assumed that the caller has obtained the requisite lock if applicable. + * Note that this is currently treated as a temporary function. Ideally, we + * would like to get defaults from the firmware. + * + * Return: QDF_STATUS_SUCCESS on success, negative error code on failure + */ +static int +target_if_spectral_info_init_defaults(struct target_if_spectral *spectral) +{ + struct target_if_spectral_param_state_info *info = + &spectral->param_info; + struct wlan_objmgr_vdev *vdev = NULL; + + /* State */ + info->osps_cache.osc_spectral_active = SPECTRAL_SCAN_ACTIVE_DEFAULT; + + info->osps_cache.osc_spectral_enabled = SPECTRAL_SCAN_ENABLE_DEFAULT; + + /* Parameters */ + info->osps_cache.osc_params.ss_count = SPECTRAL_SCAN_COUNT_DEFAULT; + + if (spectral->spectral_gen == SPECTRAL_GEN3) + info->osps_cache.osc_params.ss_period = + SPECTRAL_SCAN_PERIOD_GEN_III_DEFAULT; + else + info->osps_cache.osc_params.ss_period = + SPECTRAL_SCAN_PERIOD_GEN_II_DEFAULT; + + info->osps_cache.osc_params.ss_spectral_pri = + SPECTRAL_SCAN_PRIORITY_DEFAULT; + + info->osps_cache.osc_params.ss_fft_size = + SPECTRAL_SCAN_FFT_SIZE_DEFAULT; + + info->osps_cache.osc_params.ss_gc_ena = SPECTRAL_SCAN_GC_ENA_DEFAULT; + + info->osps_cache.osc_params.ss_restart_ena = + SPECTRAL_SCAN_RESTART_ENA_DEFAULT; + + info->osps_cache.osc_params.ss_noise_floor_ref = + SPECTRAL_SCAN_NOISE_FLOOR_REF_DEFAULT; + + info->osps_cache.osc_params.ss_init_delay = + SPECTRAL_SCAN_INIT_DELAY_DEFAULT; + + info->osps_cache.osc_params.ss_nb_tone_thr = + SPECTRAL_SCAN_NB_TONE_THR_DEFAULT; + + info->osps_cache.osc_params.ss_str_bin_thr = + SPECTRAL_SCAN_STR_BIN_THR_DEFAULT; + + info->osps_cache.osc_params.ss_wb_rpt_mode = + SPECTRAL_SCAN_WB_RPT_MODE_DEFAULT; + + info->osps_cache.osc_params.ss_rssi_rpt_mode = + SPECTRAL_SCAN_RSSI_RPT_MODE_DEFAULT; + + info->osps_cache.osc_params.ss_rssi_thr = + SPECTRAL_SCAN_RSSI_THR_DEFAULT; + + info->osps_cache.osc_params.ss_pwr_format = + SPECTRAL_SCAN_PWR_FORMAT_DEFAULT; + + info->osps_cache.osc_params.ss_rpt_mode = + SPECTRAL_SCAN_RPT_MODE_DEFAULT; + + info->osps_cache.osc_params.ss_bin_scale = + SPECTRAL_SCAN_BIN_SCALE_DEFAULT; + + info->osps_cache.osc_params.ss_dbm_adj = SPECTRAL_SCAN_DBM_ADJ_DEFAULT; + + vdev = target_if_spectral_get_vdev(spectral); + if (!vdev) + return QDF_STATUS_E_NOENT; + + info->osps_cache.osc_params.ss_chn_mask = + wlan_vdev_mlme_get_rxchainmask(vdev); + wlan_objmgr_vdev_release_ref(vdev, WLAN_SPECTRAL_ID); + + /* The cache is now valid */ + info->osps_cache.osc_is_valid = 1; + + return QDF_STATUS_SUCCESS; +} + +#ifdef OL_SPECTRAL_DEBUG_CONFIG_INTERACTIONS +/** + * target_if_log_read_spectral_active() - Helper function to log whether + * spectral is active after reading cache + * @function_name: Function name + * @output: whether spectral is active or not + * + * Helper function to log whether spectral is active after reading cache + * + * Return: none + */ +static void +target_if_log_read_spectral_active( + const char *function_name, + unsigned char output) +{ + spectral_debug("%s: TARGET_IF_SPECTRAL_INFO_ACTIVE. Returning val=%u", + function_name, output); +} + +/** + * target_if_log_read_spectral_enabled() - Helper function to log whether + * spectral is enabled after reading cache + * @function_name: Function name + * @output: whether spectral is enabled or not + * + * Helper function to log whether spectral is enabled after reading cache + * + * Return: none + */ +static void +target_if_log_read_spectral_enabled( + const char *function_name, + unsigned char output) +{ + spectral_debug("%s: TARGET_IF_SPECTRAL_INFO_ENABLED. Returning val=%u", + function_name, output); +} + +/** + * target_if_log_read_spectral_enabled() - Helper function to log spectral + * parameters after reading cache + * @function_name: Function name + * @pparam: Spectral parameters + * + * Helper function to log spectral parameters after reading cache + * + * Return: none + */ +static void +target_if_log_read_spectral_params( + const char *function_name, + struct spectral_config *pparam) +{ + spectral_debug("%s: TARGET_IF_SPECTRAL_INFO_PARAMS. Returning following params:\nss_count = %u\nss_period = %u\nss_spectral_pri = %u\nss_fft_size = %u\nss_gc_ena = %u\nss_restart_ena = %u\nss_noise_floor_ref = %d\nss_init_delay = %u\nss_nb_tone_thr = %u\nss_str_bin_thr = %u\nss_wb_rpt_mode = %u\nss_rssi_rpt_mode = %u\nss_rssi_thr = %d\nss_pwr_format = %u\nss_rpt_mode = %u\nss_bin_scale = %u\nss_dbm_adj = %u\nss_chn_mask = %u\n", + function_name, + pparam->ss_count, + pparam->ss_period, + pparam->ss_spectral_pri, + pparam->ss_fft_size, + pparam->ss_gc_ena, + pparam->ss_restart_ena, + (int8_t)pparam->ss_noise_floor_ref, + pparam->ss_init_delay, + pparam->ss_nb_tone_thr, + pparam->ss_str_bin_thr, + pparam->ss_wb_rpt_mode, + pparam->ss_rssi_rpt_mode, + (int8_t)pparam->ss_rssi_thr, + pparam->ss_pwr_format, + pparam->ss_rpt_mode, + pparam->ss_bin_scale, + pparam->ss_dbm_adj, + pparam->ss_chn_mask); +} + +/** + * target_if_log_read_spectral_active_catch_validate() - Helper function to + * log whether spectral is active after intializing the cache + * @function_name: Function name + * @output: whether spectral is active or not + * + * Helper function to log whether spectral is active after intializing cache + * + * Return: none + */ +static void +target_if_log_read_spectral_active_catch_validate( + const char *function_name, + unsigned char output) +{ + spectral_debug("%s: TARGET_IF_SPECTRAL_INFO_ACTIVE on initial cache validation\nReturning val=%u", + function_name, output); +} + +/** + * target_if_log_read_spectral_enabled_catch_validate() - Helper function to + * log whether spectral is enabled after intializing the cache + * @function_name: Function name + * @output: whether spectral is enabled or not + * + * Helper function to log whether spectral is enabled after intializing cache + * + * Return: none + */ +static void +target_if_log_read_spectral_enabled_catch_validate( + const char *function_name, + unsigned char output) +{ + spectral_debug("%s: TARGET_IF_SPECTRAL_INFO_ENABLED on initial cache validation\nReturning val=%u\n", + function_name, output); +} + +/** + * target_if_log_read_spectral_params_catch_validate() - Helper function to + * log spectral parameters after intializing the cache + * @function_name: Function name + * @pparam: Spectral parameters + * + * Helper function to log spectral parameters after intializing the cache + * + * Return: none + */ +static void +target_if_log_read_spectral_params_catch_validate( + const char *function_name, + struct spectral_config *pparam) +{ + spectral_debug("%s: TARGET_IF_SPECTRAL_INFO_PARAMS on initial cache validation\nReturning following params:\nss_count = %u\nss_period = %u\nss_spectral_pri = %u\nss_fft_size = %u\nss_gc_ena = %u\nss_restart_ena = %u\nss_noise_floor_ref = %d\nss_init_delay = %u\nss_nb_tone_thr = %u\nss_str_bin_thr = %u\nss_wb_rpt_mode = %u\nss_rssi_rpt_mode = %u\nss_rssi_thr = %d\nss_pwr_format = %u\nss_rpt_mode = %u\nss_bin_scale = %u\nss_dbm_adj = %u\nss_chn_mask = %u", + function_name, + pparam->ss_count, + pparam->ss_period, + pparam->ss_spectral_pri, + pparam->ss_fft_size, + pparam->ss_gc_ena, + pparam->ss_restart_ena, + (int8_t)pparam->ss_noise_floor_ref, + pparam->ss_init_delay, + pparam->ss_nb_tone_thr, + pparam->ss_str_bin_thr, + pparam->ss_wb_rpt_mode, + pparam->ss_rssi_rpt_mode, + (int8_t)pparam->ss_rssi_thr, + pparam->ss_pwr_format, + pparam->ss_rpt_mode, + pparam->ss_bin_scale, + pparam->ss_dbm_adj, pparam->ss_chn_mask); +} + +#else +static void +target_if_log_read_spectral_active( + const char *function_name, + unsigned char output) +{ +} + +static void +target_if_log_read_spectral_enabled( + const char *function_name, + unsigned char output) +{ +} + +static void +target_if_log_read_spectral_params( + const char *function_name, + struct spectral_config *pparam) +{ +} + +static void +target_if_log_read_spectral_active_catch_validate( + const char *function_name, + unsigned char output) +{ +} + +static void +target_if_log_read_spectral_enabled_catch_validate( + const char *function_name, + unsigned char output) +{ +} + +static void +target_if_log_read_spectral_params_catch_validate( + const char *function_name, + struct spectral_config *pparam) +{ +} +#endif + +/** + * target_if_spectral_info_read() - Read spectral information from the cache. + * @spectral: Pointer to Spectral target_if internal private data + * @specifier: target_if_spectral_info enumeration specifying which + * information is required + * @output: Void output pointer into which the information will be read + * @output_len: size of object pointed to by output pointer + * + * Read spectral parameters or the desired state information from the cache. + * + * Return: 0 on success, negative error code on failure + */ +static int +target_if_spectral_info_read( + struct target_if_spectral *spectral, + enum target_if_spectral_info specifier, + void *output, int output_len) +{ + /* + * Note: This function is designed to be able to accommodate + * WMI reads for defaults, non-cacheable information, etc + * if required. + */ + struct target_if_spectral_param_state_info *info = + &spectral->param_info; + int is_cacheable = 0; + int init_def_retval = 0; + + if (!output) + return -EINVAL; + + switch (specifier) { + case TARGET_IF_SPECTRAL_INFO_ACTIVE: + if (output_len != sizeof(info->osps_cache.osc_spectral_active)) + return -EINVAL; + is_cacheable = 1; + break; + + case TARGET_IF_SPECTRAL_INFO_ENABLED: + if (output_len != sizeof(info->osps_cache.osc_spectral_enabled)) + return -EINVAL; + is_cacheable = 1; + break; + + case TARGET_IF_SPECTRAL_INFO_PARAMS: + if (output_len != sizeof(info->osps_cache.osc_params)) + return -EINVAL; + is_cacheable = 1; + break; + + default: + spectral_err("Unknown target_if_spectral_info specifier"); + return -EINVAL; + } + + qdf_spin_lock(&info->osps_lock); + + if (is_cacheable) { + if (info->osps_cache.osc_is_valid) { + switch (specifier) { + case TARGET_IF_SPECTRAL_INFO_ACTIVE: + qdf_mem_copy( + output, + &info->osps_cache.osc_spectral_active, + sizeof(info->osps_cache.osc_spectral_active)); + + target_if_log_read_spectral_active( + __func__, + *((unsigned char *)output)); + break; + + case TARGET_IF_SPECTRAL_INFO_ENABLED: + qdf_mem_copy( + output, + &info->osps_cache.osc_spectral_enabled, + sizeof( + info->osps_cache.osc_spectral_enabled)); + + target_if_log_read_spectral_enabled( + __func__, + *((unsigned char *)output)); + break; + + case TARGET_IF_SPECTRAL_INFO_PARAMS: + qdf_mem_copy( + output, + &info->osps_cache.osc_params, + sizeof(info->osps_cache.osc_params)); + + target_if_log_read_spectral_params( + __func__, + (struct spectral_config *)output); + break; + + default: + /* We can't reach this point */ + break; + } + qdf_spin_unlock(&info->osps_lock); + return 0; + } + } + + /* Cache is invalid */ + + /* + * If WMI Reads are implemented to fetch defaults/non-cacheable info, + * then the below implementation will change + */ + init_def_retval = target_if_spectral_info_init_defaults(spectral); + if (init_def_retval != QDF_STATUS_SUCCESS) { + qdf_spin_unlock(&info->osps_lock); + if (init_def_retval == QDF_STATUS_E_NOENT) + return -ENOENT; + else + return -EINVAL; + } + /* target_if_spectral_info_init_defaults() has set cache to valid */ + + switch (specifier) { + case TARGET_IF_SPECTRAL_INFO_ACTIVE: + qdf_mem_copy(output, + &info->osps_cache.osc_spectral_active, + sizeof(info->osps_cache.osc_spectral_active)); + + target_if_log_read_spectral_active_catch_validate( + __func__, + *((unsigned char *)output)); + break; + + case TARGET_IF_SPECTRAL_INFO_ENABLED: + qdf_mem_copy(output, + &info->osps_cache.osc_spectral_enabled, + sizeof(info->osps_cache.osc_spectral_enabled)); + + target_if_log_read_spectral_enabled_catch_validate( + __func__, + *((unsigned char *)output)); + break; + + case TARGET_IF_SPECTRAL_INFO_PARAMS: + qdf_mem_copy(output, + &info->osps_cache.osc_params, + sizeof(info->osps_cache.osc_params)); + + target_if_log_read_spectral_params_catch_validate( + __func__, + (struct spectral_config *)output); + + break; + + default: + /* We can't reach this point */ + break; + } + + qdf_spin_unlock(&info->osps_lock); + + return 0; +} + +#ifdef OL_SPECTRAL_DEBUG_CONFIG_INTERACTIONS +/** + * target_if_log_write_spectral_active() - Helper function to log inputs and + * return value of call to configure the Spectral 'active' configuration, + * TARGET_IF_SPECTRAL_INFO_ACTIVE into firmware + * @function_name: Function name in which this is called + * @pval: whether spectral is active or not + * @ret: return value of the firmware write function + * + * Return: none + */ +static void +target_if_log_write_spectral_active( + const char *function_name, + uint8_t pval, + int ret) +{ + spectral_debug("%s: TARGET_IF_SPECTRAL_INFO_ACTIVE with val=%u status=%d", + function_name, pval, ret); +} + +/** + * target_if_log_write_spectral_enabled() - Helper function to log inputs and + * return value of call to configure the Spectral 'enabled' configuration, + * TARGET_IF_SPECTRAL_INFO_ENABLED into firmware + * @function_name: Function name in which this is called + * @pval: whether spectral is enabled or not + * @ret: return value of the firmware write function + * + * Return: none + */ +static void +target_if_log_write_spectral_enabled( + const char *function_name, + uint8_t pval, + int ret) +{ + spectral_debug("%s: TARGET_IF_SPECTRAL_INFO_ENABLED with val=%u status=%d", + function_name, pval, ret); +} + +/** + * target_if_log_write_spectral_params() - Helper function to log inputs and + * return value of call to configure Spectral parameters, + * TARGET_IF_SPECTRAL_INFO_PARAMS into firmware + * @param: Spectral parameters + * @function_name: Function name in which this is called + * @ret: return value of the firmware write function + * + * Return: none + */ +static void +target_if_log_write_spectral_params( + struct spectral_config *param, + const char *function_name, + int ret) +{ + spectral_debug("%s: TARGET_IF_SPECTRAL_INFO_PARAMS. Params:\nss_count = %u\nss_period = %u\nss_spectral_pri = %u\nss_fft_size = %u\nss_gc_ena = %u\nss_restart_ena = %u\nss_noise_floor_ref = %d\nss_init_delay = %u\nss_nb_tone_thr = %u\nss_str_bin_thr = %u\nss_wb_rpt_mode = %u\nss_rssi_rpt_mode = %u\nss_rssi_thr = %d\nss_pwr_format = %u\nss_rpt_mode = %u\nss_bin_scale = %u\nss_dbm_adj = %u\nss_chn_mask = %u\nstatus = %d", + function_name, + param->ss_count, + param->ss_period, + param->ss_spectral_pri, + param->ss_fft_size, + param->ss_gc_ena, + param->ss_restart_ena, + (int8_t)param->ss_noise_floor_ref, + param->ss_init_delay, + param->ss_nb_tone_thr, + param->ss_str_bin_thr, + param->ss_wb_rpt_mode, + param->ss_rssi_rpt_mode, + (int8_t)param->ss_rssi_thr, + param->ss_pwr_format, + param->ss_rpt_mode, + param->ss_bin_scale, + param->ss_dbm_adj, param->ss_chn_mask, ret); +} +#else +static void +target_if_log_write_spectral_active( + const char *function_name, + uint8_t pval, + int ret) +{ +} + +static void +target_if_log_write_spectral_enabled( + const char *function_name, + uint8_t pval, + int ret) +{ +} + +static void +target_if_log_write_spectral_params( + struct spectral_config *param, + const char *function_name, + int ret) +{ +} + +#endif + +/** + * target_if_spectral_info_write() - Write Spectral information to the + * firmware, and update cache + * @spectral: Pointer to Spectral target_if internal private data + * @specifier: target_if_spectral_info enumeration specifying which + * information is involved + * @input: void input pointer containing the information to be written + * @input_len: size of object pointed to by input pointer + * + * Write Spectral parameters or the desired state information to + * the firmware, and update cache + * + * Return: 0 on success, negative error code on failure + */ +static int +target_if_spectral_info_write( + struct target_if_spectral *spectral, + enum target_if_spectral_info specifier, + void *input, int input_len) +{ + struct target_if_spectral_param_state_info *info = + &spectral->param_info; + int ret; + uint8_t *pval = NULL; + struct spectral_config *param = NULL; + + if (!input) + return -EINVAL; + + switch (specifier) { + case TARGET_IF_SPECTRAL_INFO_ACTIVE: + if (input_len != sizeof(info->osps_cache.osc_spectral_active)) + return -EINVAL; + + pval = (uint8_t *)input; + + qdf_spin_lock(&info->osps_lock); + ret = target_if_send_vdev_spectral_enable_cmd(spectral, + 1, *pval, 0, 0); + + target_if_log_write_spectral_active( + __func__, + *pval, + ret); + + if (ret < 0) { + spectral_err("target_if_send_vdev_spectral_enable_cmd failed with error=%d", + ret); + qdf_spin_unlock(&info->osps_lock); + return ret; + } + + info->osps_cache.osc_spectral_active = *pval; + + /* The cache is now valid */ + info->osps_cache.osc_is_valid = 1; + + qdf_spin_unlock(&info->osps_lock); + break; + + case TARGET_IF_SPECTRAL_INFO_ENABLED: + if (input_len != sizeof(info->osps_cache.osc_spectral_enabled)) + return -EINVAL; + + pval = (uint8_t *)input; + + qdf_spin_lock(&info->osps_lock); + ret = target_if_send_vdev_spectral_enable_cmd(spectral, + 0, 0, 1, *pval); + + target_if_log_write_spectral_enabled( + __func__, + *pval, + ret); + + if (ret < 0) { + spectral_err("target_if_send_vdev_spectral_enable_cmd failed with error=%d", + ret); + qdf_spin_unlock(&info->osps_lock); + return ret; + } + + info->osps_cache.osc_spectral_enabled = *pval; + + /* The cache is now valid */ + info->osps_cache.osc_is_valid = 1; + + qdf_spin_unlock(&info->osps_lock); + break; + + case TARGET_IF_SPECTRAL_INFO_PARAMS: + if (input_len != sizeof(info->osps_cache.osc_params)) + return -EINVAL; + + param = (struct spectral_config *)input; + + qdf_spin_lock(&info->osps_lock); + ret = target_if_send_vdev_spectral_configure_cmd(spectral, + param); + + target_if_log_write_spectral_params( + param, + __func__, + ret); + + if (ret < 0) { + spectral_err("target_if_send_vdev_spectral_configure_cmd failed with error=%d", + ret); + qdf_spin_unlock(&info->osps_lock); + return ret; + } + + qdf_mem_copy(&info->osps_cache.osc_params, + param, sizeof(info->osps_cache.osc_params)); + + /* The cache is now valid */ + info->osps_cache.osc_is_valid = 1; + + qdf_spin_unlock(&info->osps_lock); + break; + + default: + spectral_err("Unknown target_if_spectral_info specifier"); + return -EINVAL; + } + + return 0; +} + +/** + * target_if_spectral_get_tsf64() - Function to get the TSF value + * @arg: Pointer to handle for Spectral target_if internal private data + * + * Get the last TSF received in WMI buffer + * + * Return: TSF value + */ +static uint64_t +target_if_spectral_get_tsf64(void *arg) +{ + struct target_if_spectral *spectral = (struct target_if_spectral *)arg; + + return spectral->tsf64; +} + +/** + * target_if_spectral_get_capability() - Function to get whether a + * given Spectral hardware capability is available + * @arg: Pointer to handle for Spectral target_if internal private data + * @type: Spectral hardware capability type + * + * Get whether a given Spectral hardware capability is available + * + * Return: True if the capability is available, false if the capability is not + * available + */ +uint32_t +target_if_spectral_get_capability(void *arg, enum spectral_capability_type type) +{ + int status = STATUS_FAIL; + + switch (type) { + case SPECTRAL_CAP_PHYDIAG: + case SPECTRAL_CAP_RADAR: + case SPECTRAL_CAP_SPECTRAL_SCAN: + case SPECTRAL_CAP_ADVNCD_SPECTRAL_SCAN: + status = STATUS_PASS; + break; + default: + status = STATUS_FAIL; + } + return status; +} + +/** + * target_if_spectral_set_rxfilter() - Set the RX Filter before Spectral start + * @arg: Pointer to handle for Spectral target_if internal private data + * @rxfilter: Rx filter to be used + * + * Note: This is only a placeholder function. It is not currently required since + * FW should be taking care of setting the required filters. + * + * Return: 0 + */ +uint32_t +target_if_spectral_set_rxfilter(void *arg, int rxfilter) +{ + /* + * Will not be required since enabling of spectral in firmware + * will take care of this + */ + return 0; +} + +/** + * target_if_spectral_get_rxfilter() - Get the current RX Filter settings + * @arg: Pointer to handle for Spectral target_if internal private data + * + * Note: This is only a placeholder function. It is not currently required since + * FW should be taking care of setting the required filters. + * + * Return: 0 + */ +uint32_t +target_if_spectral_get_rxfilter(void *arg) +{ + /* + * Will not be required since enabling of spectral in firmware + * will take care of this + */ + return 0; +} + +/** + * target_if_sops_is_spectral_active() - Get whether Spectral is active + * @arg: Pointer to handle for Spectral target_if internal private data + * + * Function to check whether Spectral is active + * + * Return: True if Spectral is active, false if Spectral is not active + */ +uint32_t +target_if_sops_is_spectral_active(void *arg) +{ + struct target_if_spectral *spectral = (struct target_if_spectral *)arg; + uint8_t val = 0; + int ret; + + ret = target_if_spectral_info_read( + spectral, + TARGET_IF_SPECTRAL_INFO_ACTIVE, + &val, sizeof(val)); + + if (ret != 0) { + /* + * Could not determine if Spectral is active. + * Return false as a safe value. + * XXX: Consider changing the function prototype + * to be able to indicate failure to fetch value. + */ + return 0; + } + + return val; +} + +/** + * target_if_sops_is_spectral_enabled() - Get whether Spectral is enabled + * @arg: Pointer to handle for Spectral target_if internal private data + * + * Function to check whether Spectral is enabled + * + * Return: True if Spectral is enabled, false if Spectral is not enabled + */ +uint32_t +target_if_sops_is_spectral_enabled(void *arg) +{ + struct target_if_spectral *spectral = (struct target_if_spectral *)arg; + uint8_t val = 0; + int ret; + + ret = target_if_spectral_info_read( + spectral, + TARGET_IF_SPECTRAL_INFO_ENABLED, + &val, sizeof(val)); + + if (ret != 0) { + /* + * Could not determine if Spectral is enabled. + * Return false as a safe value. + * XXX: Consider changing the function prototype + * to be able to indicate failure to fetch value. + */ + return 0; + } + + return val; +} + +/** + * target_if_sops_start_spectral_scan() - Start Spectral scan + * @arg: Pointer to handle for Spectral target_if internal private data + * + * Function to start spectral scan + * + * Return: 0 on success else failure + */ +uint32_t +target_if_sops_start_spectral_scan(void *arg) +{ + struct target_if_spectral *spectral = (struct target_if_spectral *)arg; + uint8_t val = 1; + uint8_t enabled = 0; + int ret; + + ret = target_if_spectral_info_read( + spectral, + TARGET_IF_SPECTRAL_INFO_ENABLED, + &enabled, sizeof(enabled)); + + if (ret != 0) { + /* + * Could not determine if Spectral is enabled. Assume we need + * to enable it + */ + enabled = 0; + } + + if (!enabled) { + ret = target_if_spectral_info_write( + spectral, + TARGET_IF_SPECTRAL_INFO_ENABLED, + &val, sizeof(val)); + + if (ret != 0) + return ret; + } + + ret = target_if_spectral_info_write( + spectral, + TARGET_IF_SPECTRAL_INFO_ACTIVE, + &val, sizeof(val)); + + if (ret != 0) + return ret; + + return 0; +} + +/** + * target_if_sops_stop_spectral_scan() - Stop Spectral scan + * @arg: Pointer to handle for Spectral target_if internal private data + * + * Function to stop spectral scan + * + * Return: 0 on success else failure + */ +uint32_t +target_if_sops_stop_spectral_scan(void *arg) +{ + struct target_if_spectral *spectral = (struct target_if_spectral *)arg; + uint8_t val = 0; + int tempret, ret = 0; + + tempret = target_if_spectral_info_write( + spectral, + TARGET_IF_SPECTRAL_INFO_ACTIVE, + &val, sizeof(val)); + + if (tempret != 0) + ret = tempret; + + tempret = target_if_spectral_info_write( + spectral, + TARGET_IF_SPECTRAL_INFO_ENABLED, + &val, sizeof(val)); + + if (tempret != 0) + ret = tempret; + + return ret; +} + +/** + * target_if_spectral_get_extension_channel() - Get the Extension channel + * @arg: Pointer to handle for Spectral target_if internal private data + * + * Function to get the current Extension channel (in MHz) + * + * Return: Current Extension channel (in MHz) on success, 0 on failure or if + * extension channel is not present. + */ +uint32_t +target_if_spectral_get_extension_channel(void *arg) +{ + /* + * XXX: Once we expand to use cases where Spectral could be activated + * without a channel being set to VDEV, we need to consider returning a + * negative value in case of failure and having all callers handle this. + */ + + struct target_if_spectral *spectral = NULL; + struct wlan_objmgr_vdev *vdev = NULL; + uint16_t sec20chan_freq = 0; + + qdf_assert_always(arg); + spectral = (struct target_if_spectral *)arg; + + vdev = target_if_spectral_get_vdev(spectral); + if (!vdev) + return 0; + + if (target_if_vdev_get_sec20chan_freq_mhz(vdev, &sec20chan_freq) < 0) { + wlan_objmgr_vdev_release_ref(vdev, WLAN_SPECTRAL_ID); + return 0; + } + + wlan_objmgr_vdev_release_ref(vdev, WLAN_SPECTRAL_ID); + + return sec20chan_freq; +} + +/** + * target_if_spectral_get_current_channel() - Get the current channel + * @arg: Pointer to handle for Spectral target_if internal private data + * + * Function to get the current channel (in MHz) + * + * Return: Current channel (in MHz) on success, 0 on failure + */ +uint32_t +target_if_spectral_get_current_channel(void *arg) +{ + /* + * XXX: Once we expand to use cases where Spectral could be activated + * without a channel being set to VDEV, we need to consider returning a + * negative value in case of failure and having all callers handle this. + */ + + struct target_if_spectral *spectral = NULL; + int16_t chan_freq = 0; + struct wlan_objmgr_vdev *vdev = NULL; + + qdf_assert_always(arg); + spectral = (struct target_if_spectral *)arg; + + vdev = target_if_spectral_get_vdev(spectral); + if (!vdev) + return 0; + + chan_freq = target_if_vdev_get_chan_freq(vdev); + if (chan_freq < 0) { + wlan_objmgr_vdev_release_ref(vdev, WLAN_SPECTRAL_ID); + return 0; + } + + wlan_objmgr_vdev_release_ref(vdev, WLAN_SPECTRAL_ID); + + return chan_freq; +} + +/** + * target_if_spectral_reset_hw() - Reset the hardware + * @arg: Pointer to handle for Spectral target_if internal private data + * + * This is only a placeholder since it is not currently required in the offload + * case. + * + * Return: 0 + */ +uint32_t +target_if_spectral_reset_hw(void *arg) +{ + not_yet_implemented(); + return 0; +} + +/** + * target_if_spectral_get_chain_noise_floor() - Get the Chain noise floor from + * Noisefloor history buffer + * @arg: Pointer to handle for Spectral target_if internal private data + * @nf_buf: Pointer to buffer into which chain Noise Floor data should be copied + * + * This is only a placeholder since it is not currently required in the offload + * case. + * + * Return: 0 + */ +uint32_t +target_if_spectral_get_chain_noise_floor(void *arg, int16_t *nf_buf) +{ + not_yet_implemented(); + return 0; +} + +/** + * target_if_spectral_get_ext_noisefloor() - Get the extension channel + * noisefloor + * @arg: Pointer to handle for Spectral target_if internal private data + * + * This is only a placeholder since it is not currently required in the offload + * case. + * + * Return: 0 + */ +int8_t +target_if_spectral_get_ext_noisefloor(void *arg) +{ + not_yet_implemented(); + return 0; +} + +/** + * target_if_spectral_get_ctl_noisefloor() - Get the control channel noisefloor + * @arg: Pointer to handle for Spectral target_if internal private data + * + * This is only a placeholder since it is not currently required in the offload + * case. + * + * Return: 0 + */ +int8_t +target_if_spectral_get_ctl_noisefloor(void *arg) +{ + not_yet_implemented(); + return 0; +} + +/** + * target_if_spectral_sops_configure_params() - Configure user supplied Spectral + * parameters + * @arg: Pointer to handle for Spectral target_if internal private data + * @params: Spectral parameters + * + * Function to configure spectral parameters + * + * Return: 0 on success else failure + */ +uint32_t +target_if_spectral_sops_configure_params( + void *arg, struct spectral_config *params) +{ + struct target_if_spectral *spectral = (struct target_if_spectral *)arg; + + return target_if_spectral_info_write( + spectral, + TARGET_IF_SPECTRAL_INFO_PARAMS, + params, sizeof(*params)); +} + +/** + * target_if_spectral_sops_get_params() - Get user configured Spectral + * parameters + * @arg: Pointer to handle for Spectral target_if internal private data + * @params: Pointer to buffer into which Spectral parameters should be copied + * + * Function to get the configured spectral parameters + * + * Return: 0 on success else failure + */ +uint32_t +target_if_spectral_sops_get_params(void *arg, struct spectral_config *params) +{ + struct target_if_spectral *spectral = (struct target_if_spectral *)arg; + + return target_if_spectral_info_read( + spectral, + TARGET_IF_SPECTRAL_INFO_PARAMS, + params, sizeof(*params)); +} + +/** + * target_if_spectral_get_ent_mask() - Get enterprise mask + * @arg: Pointer to handle for Spectral target_if internal private data + * + * This is only a placeholder since it is not currently required in the offload + * case. + * + * Return: 0 + */ +static uint32_t +target_if_spectral_get_ent_mask(void *arg) +{ + not_yet_implemented(); + return 0; +} + +/** + * target_if_spectral_get_macaddr() - Get radio MAC address + * @arg: Pointer to handle for Spectral target_if internal private data + * @addr: Pointer to buffer into which MAC address should be copied + * + * Function to get the MAC address of the pdev + * + * Return: 0 on success, -1 on failure + */ +static uint32_t +target_if_spectral_get_macaddr(void *arg, char *addr) +{ + uint8_t *myaddr = NULL; + struct target_if_spectral *spectral = (struct target_if_spectral *)arg; + struct wlan_objmgr_pdev *pdev = NULL; + + pdev = spectral->pdev_obj; + + wlan_pdev_obj_lock(pdev); + myaddr = wlan_pdev_get_hw_macaddr(pdev); + wlan_pdev_obj_unlock(pdev); + qdf_mem_copy(addr, myaddr, IEEE80211_ADDR_LEN); + + return 0; +} + +/** + * target_if_init_spectral_capability() - Initialize Spectral capability + * @spectral: Pointer to Spectral target_if internal private data + * + * This is a workaround. + * + * Return: None + */ +void +target_if_init_spectral_capability(struct target_if_spectral *spectral) +{ + struct spectral_caps *pcap = &spectral->capability; + + /* XXX : Workaround: Set Spectral capability */ + pcap = &spectral->capability; + pcap->phydiag_cap = 1; + pcap->radar_cap = 1; + pcap->spectral_cap = 1; + pcap->advncd_spectral_cap = 1; + pcap->hw_gen = spectral->spectral_gen; +} + +#ifdef QCA_SUPPORT_SPECTRAL_SIMULATION +/** + * target_if_init_spectral_simulation_ops() - Initialize spectral target_if + * internal operations with functions related to spectral simulation + * @p_sops: spectral low level ops table + * + * Initialize spectral target_if internal operations with functions + * related to spectral simulation + * + * Return: None + */ +static void +target_if_init_spectral_simulation_ops(struct target_if_spectral_ops *p_sops) +{ + /* + * Spectral simulation is currently intended for platform transitions + * where underlying HW support may not be available for some time. + * Hence, we do not currently provide a runtime switch to turn the + * simulation on or off. + * In case of future requirements where runtime switches are required, + * this can be added. But it is suggested to use application layer + * simulation as far as possible in such cases, since the main + * use of record and replay of samples would concern higher + * level sample processing rather than lower level delivery. + */ + p_sops->is_spectral_enabled = target_if_spectral_sops_sim_is_enabled; + p_sops->is_spectral_active = target_if_spectral_sops_sim_is_active; + p_sops->start_spectral_scan = target_if_spectral_sops_sim_start_scan; + p_sops->stop_spectral_scan = target_if_spectral_sops_sim_stop_scan; + p_sops->configure_spectral = + target_if_spectral_sops_sim_configure_params; + p_sops->get_spectral_config = target_if_spectral_sops_sim_get_params; +} + +#else +/** + * target_if_init_spectral_simulation_ops() - Initialize spectral target_if + * internal operations + * @p_sops: spectral low level ops table + * + * Return: None + */ +static void +target_if_init_spectral_simulation_ops(struct target_if_spectral_ops *p_sops) +{ + p_sops->is_spectral_enabled = target_if_sops_is_spectral_enabled; + p_sops->is_spectral_active = target_if_sops_is_spectral_active; + p_sops->start_spectral_scan = target_if_sops_start_spectral_scan; + p_sops->stop_spectral_scan = target_if_sops_stop_spectral_scan; + p_sops->configure_spectral = target_if_spectral_sops_configure_params; + p_sops->get_spectral_config = target_if_spectral_sops_get_params; +} +#endif + +/** + * target_if_init_spectral_ops_common() - Initialize Spectral target_if internal + * operations common to all Spectral chipset generations + * + * Initializes target_if_spectral_ops common to all chipset generations + * + * Return: None + */ +static void +target_if_init_spectral_ops_common(void) +{ + struct target_if_spectral_ops *p_sops = &spectral_ops; + + p_sops->get_tsf64 = target_if_spectral_get_tsf64; + p_sops->get_capability = target_if_spectral_get_capability; + p_sops->set_rxfilter = target_if_spectral_set_rxfilter; + p_sops->get_rxfilter = target_if_spectral_get_rxfilter; + + target_if_init_spectral_simulation_ops(p_sops); + + p_sops->get_extension_channel = + target_if_spectral_get_extension_channel; + p_sops->get_ctl_noisefloor = target_if_spectral_get_ctl_noisefloor; + p_sops->get_ext_noisefloor = target_if_spectral_get_ext_noisefloor; + p_sops->get_ent_spectral_mask = target_if_spectral_get_ent_mask; + p_sops->get_mac_address = target_if_spectral_get_macaddr; + p_sops->get_current_channel = target_if_spectral_get_current_channel; + p_sops->reset_hw = target_if_spectral_reset_hw; + p_sops->get_chain_noise_floor = + target_if_spectral_get_chain_noise_floor; +} + +/** + * target_if_init_spectral_ops_gen2() - Initialize Spectral target_if internal + * operations specific to Spectral chipset generation 2. + * + * Initializes target_if_spectral_ops specific to Spectral chipset generation 2. + * + * Return: None + */ +static void +target_if_init_spectral_ops_gen2(void) +{ + struct target_if_spectral_ops *p_sops = &spectral_ops; + + p_sops->spectral_process_phyerr = target_if_process_phyerr_gen2; +} + +/** + * target_if_init_spectral_ops_gen3() - Initialize Spectral target_if internal + * operations specific to Spectral chipset generation 3. + * + * Initializes target_if_spectral_ops specific to Spectral chipset generation 3. + * + * Return: None + */ +static void +target_if_init_spectral_ops_gen3(void) +{ + struct target_if_spectral_ops *p_sops = &spectral_ops; + + p_sops->process_spectral_report = + target_if_spectral_process_report_gen3; + return; +} + +/** + * target_if_init_spectral_ops() - Initialize target_if internal Spectral + * operations. + * @spectral: Pointer to Spectral target_if internal private data + * + * Initializes all function pointers in target_if_spectral_ops for + * all generations + * + * Return: None + */ +static void +target_if_init_spectral_ops(struct target_if_spectral *spectral) +{ + target_if_init_spectral_ops_common(); + if (spectral->spectral_gen == SPECTRAL_GEN2) + target_if_init_spectral_ops_gen2(); + else if (spectral->spectral_gen == SPECTRAL_GEN3) + target_if_init_spectral_ops_gen3(); + else + spectral_err("Invalid Spectral generation"); +} + +/* + * Dummy Functions: + * These functions are initially registered to avoid any crashes due to + * invocation of spectral functions before they are registered. + */ + +static uint64_t +null_get_tsf64(void *arg) +{ + spectral_ops_not_registered("get_tsf64"); + return 0; +} + +static uint32_t +null_get_capability(void *arg, enum spectral_capability_type type) +{ + /* + * TODO : We should have conditional compilation to get the capability + * : We have not yet attahced ATH layer here, so there is no + * : way to check the HAL capbalities + */ + spectral_ops_not_registered("get_capability"); + + /* TODO : For the time being, we are returning TRUE */ + return true; +} + +static uint32_t +null_set_rxfilter(void *arg, int rxfilter) +{ + spectral_ops_not_registered("set_rxfilter"); + return 1; +} + +static uint32_t +null_get_rxfilter(void *arg) +{ + spectral_ops_not_registered("get_rxfilter"); + return 0; +} + +static uint32_t +null_is_spectral_active(void *arg) +{ + spectral_ops_not_registered("is_spectral_active"); + return 1; +} + +static uint32_t +null_is_spectral_enabled(void *arg) +{ + spectral_ops_not_registered("is_spectral_enabled"); + return 1; +} + +static uint32_t +null_start_spectral_scan(void *arg) +{ + spectral_ops_not_registered("start_spectral_scan"); + return 1; +} + +static uint32_t +null_stop_spectral_scan(void *arg) +{ + spectral_ops_not_registered("stop_spectral_scan"); + return 1; +} + +static uint32_t +null_get_extension_channel(void *arg) +{ + spectral_ops_not_registered("get_extension_channel"); + return 1; +} + +static int8_t +null_get_ctl_noisefloor(void *arg) +{ + spectral_ops_not_registered("get_ctl_noisefloor"); + return 1; +} + +static int8_t +null_get_ext_noisefloor(void *arg) +{ + spectral_ops_not_registered("get_ext_noisefloor"); + return 0; +} + +static uint32_t +null_configure_spectral(void *arg, struct spectral_config *params) +{ + spectral_ops_not_registered("configure_spectral"); + return 0; +} + +static uint32_t +null_get_spectral_config(void *arg, struct spectral_config *params) +{ + spectral_ops_not_registered("get_spectral_config"); + return 0; +} + +static uint32_t +null_get_ent_spectral_mask(void *arg) +{ + spectral_ops_not_registered("get_ent_spectral_mask"); + return 0; +} + +static uint32_t +null_get_mac_address(void *arg, char *addr) +{ + spectral_ops_not_registered("get_mac_address"); + return 0; +} + +static uint32_t +null_get_current_channel(void *arg) +{ + spectral_ops_not_registered("get_current_channel"); + return 0; +} + +static uint32_t +null_reset_hw(void *arg) +{ + spectral_ops_not_registered("get_current_channel"); + return 0; +} + +static uint32_t +null_get_chain_noise_floor(void *arg, int16_t *nf_buf) +{ + spectral_ops_not_registered("get_chain_noise_floor"); + return 0; +} + +static int +null_spectral_process_phyerr(struct target_if_spectral *spectral, + uint8_t *data, + uint32_t datalen, + struct target_if_spectral_rfqual_info *p_rfqual, + struct target_if_spectral_chan_info *p_chaninfo, + uint64_t tsf64, + struct target_if_spectral_acs_stats *acs_stats) +{ + spectral_ops_not_registered("spectral_process_phyerr"); + return 0; +} + +static int +null_process_spectral_report(struct wlan_objmgr_pdev *pdev, + void *payload) +{ + spectral_ops_not_registered("process_spectral_report"); + return 0; +} +/** + * target_if_spectral_init_dummy_function_table() - + * Initialize target_if internal + * Spectral operations to dummy functions + * @ps: Pointer to Spectral target_if internal private data + * + * Initialize all the function pointers in target_if_spectral_ops with + * dummy functions. + * + * Return: None + */ +static void +target_if_spectral_init_dummy_function_table(struct target_if_spectral *ps) +{ + struct target_if_spectral_ops *p_sops = GET_TARGET_IF_SPECTRAL_OPS(ps); + + p_sops->get_tsf64 = null_get_tsf64; + p_sops->get_capability = null_get_capability; + p_sops->set_rxfilter = null_set_rxfilter; + p_sops->get_rxfilter = null_get_rxfilter; + p_sops->is_spectral_enabled = null_is_spectral_enabled; + p_sops->is_spectral_active = null_is_spectral_active; + p_sops->start_spectral_scan = null_start_spectral_scan; + p_sops->stop_spectral_scan = null_stop_spectral_scan; + p_sops->get_extension_channel = null_get_extension_channel; + p_sops->get_ctl_noisefloor = null_get_ctl_noisefloor; + p_sops->get_ext_noisefloor = null_get_ext_noisefloor; + p_sops->configure_spectral = null_configure_spectral; + p_sops->get_spectral_config = null_get_spectral_config; + p_sops->get_ent_spectral_mask = null_get_ent_spectral_mask; + p_sops->get_mac_address = null_get_mac_address; + p_sops->get_current_channel = null_get_current_channel; + p_sops->reset_hw = null_reset_hw; + p_sops->get_chain_noise_floor = null_get_chain_noise_floor; + p_sops->spectral_process_phyerr = null_spectral_process_phyerr; + p_sops->process_spectral_report = null_process_spectral_report; +} + +/** + * target_if_spectral_register_funcs() - Initialize target_if internal Spectral + * operations + * @spectral: Pointer to Spectral target_if internal private data + * @p: Pointer to Spectral function table + * + * Return: None + */ +static void +target_if_spectral_register_funcs(struct target_if_spectral *spectral, + struct target_if_spectral_ops *p) +{ + struct target_if_spectral_ops *p_sops = + GET_TARGET_IF_SPECTRAL_OPS(spectral); + + p_sops->get_tsf64 = p->get_tsf64; + p_sops->get_capability = p->get_capability; + p_sops->set_rxfilter = p->set_rxfilter; + p_sops->get_rxfilter = p->get_rxfilter; + p_sops->is_spectral_enabled = p->is_spectral_enabled; + p_sops->is_spectral_active = p->is_spectral_active; + p_sops->start_spectral_scan = p->start_spectral_scan; + p_sops->stop_spectral_scan = p->stop_spectral_scan; + p_sops->get_extension_channel = p->get_extension_channel; + p_sops->get_ctl_noisefloor = p->get_ctl_noisefloor; + p_sops->get_ext_noisefloor = p->get_ext_noisefloor; + p_sops->configure_spectral = p->configure_spectral; + p_sops->get_spectral_config = p->get_spectral_config; + p_sops->get_ent_spectral_mask = p->get_ent_spectral_mask; + p_sops->get_mac_address = p->get_mac_address; + p_sops->get_current_channel = p->get_current_channel; + p_sops->reset_hw = p->reset_hw; + p_sops->get_chain_noise_floor = p->get_chain_noise_floor; + p_sops->spectral_process_phyerr = p->spectral_process_phyerr; + p_sops->process_spectral_report = p->process_spectral_report; +} + +/** + * target_if_spectral_clear_stats() - Clear Spectral stats + * @spectral: Pointer to Spectral target_if internal private data + * + * Function to clear spectral stats + * + * Return: None + */ +static void +target_if_spectral_clear_stats(struct target_if_spectral *spectral) +{ + struct target_if_spectral_ops *p_sops = + GET_TARGET_IF_SPECTRAL_OPS(spectral); + + qdf_mem_zero(&spectral->spectral_stats, + sizeof(struct target_if_spectral_stats)); + spectral->spectral_stats.last_reset_tstamp = + p_sops->get_tsf64(spectral); +} + +/** + * target_if_spectral_check_hw_capability() - Check whether HW supports spectral + * @spectral: Pointer to Spectral target_if internal private data + * + * Function to check whether hardware supports spectral + * + * Return: True if HW supports Spectral, false if HW does not support Spectral + */ +static int +target_if_spectral_check_hw_capability(struct target_if_spectral *spectral) +{ + struct target_if_spectral_ops *p_sops = NULL; + struct spectral_caps *pcap = NULL; + int is_spectral_supported = true; + + p_sops = GET_TARGET_IF_SPECTRAL_OPS(spectral); + pcap = &spectral->capability; + + if (p_sops->get_capability(spectral, SPECTRAL_CAP_PHYDIAG) == false) { + is_spectral_supported = false; + spectral_info("SPECTRAL : No PHYDIAG support"); + return is_spectral_supported; + } + pcap->phydiag_cap = 1; + + if (p_sops->get_capability(spectral, SPECTRAL_CAP_RADAR) == false) { + is_spectral_supported = false; + spectral_info("SPECTRAL : No RADAR support"); + return is_spectral_supported; + } + pcap->radar_cap = 1; + + if (p_sops->get_capability(spectral, + SPECTRAL_CAP_SPECTRAL_SCAN) == false) { + is_spectral_supported = false; + spectral_info("SPECTRAL : No SPECTRAL SUPPORT"); + return is_spectral_supported; + } + pcap->spectral_cap = 1; + + if (p_sops->get_capability(spectral, SPECTRAL_CAP_ADVNCD_SPECTRAL_SCAN) + == false) { + spectral_info("SPECTRAL : No ADVANCED SPECTRAL SUPPORT"); + } else { + pcap->advncd_spectral_cap = 1; + } + + return is_spectral_supported; +} + +/** + * target_if_spectral_init_param_defaults() - Initialize Spectral + * parameter defaults + * @spectral: Pointer to Spectral target_if internal private data + * + * It is the caller's responsibility to ensure that the Spectral parameters + * structure passed as part of Spectral target_if internal private data is + * valid. + * + * Return: None + */ +static void +target_if_spectral_init_param_defaults(struct target_if_spectral *spectral) +{ + struct spectral_config *params = &spectral->params; + + params->ss_count = SPECTRAL_SCAN_COUNT_DEFAULT; + if (spectral->spectral_gen == SPECTRAL_GEN3) + params->ss_period = SPECTRAL_SCAN_PERIOD_GEN_III_DEFAULT; + else + params->ss_period = SPECTRAL_SCAN_PERIOD_GEN_II_DEFAULT; + params->ss_spectral_pri = SPECTRAL_SCAN_PRIORITY_DEFAULT; + params->ss_fft_size = SPECTRAL_SCAN_FFT_SIZE_DEFAULT; + params->ss_gc_ena = SPECTRAL_SCAN_GC_ENA_DEFAULT; + params->ss_restart_ena = SPECTRAL_SCAN_RESTART_ENA_DEFAULT; + params->ss_noise_floor_ref = SPECTRAL_SCAN_NOISE_FLOOR_REF_DEFAULT; + params->ss_init_delay = SPECTRAL_SCAN_INIT_DELAY_DEFAULT; + params->ss_nb_tone_thr = SPECTRAL_SCAN_NB_TONE_THR_DEFAULT; + params->ss_str_bin_thr = SPECTRAL_SCAN_STR_BIN_THR_DEFAULT; + params->ss_wb_rpt_mode = SPECTRAL_SCAN_WB_RPT_MODE_DEFAULT; + params->ss_rssi_rpt_mode = SPECTRAL_SCAN_RSSI_RPT_MODE_DEFAULT; + params->ss_rssi_thr = SPECTRAL_SCAN_RSSI_THR_DEFAULT; + params->ss_pwr_format = SPECTRAL_SCAN_PWR_FORMAT_DEFAULT; + params->ss_rpt_mode = SPECTRAL_SCAN_RPT_MODE_DEFAULT; + params->ss_bin_scale = SPECTRAL_SCAN_BIN_SCALE_DEFAULT; + params->ss_dbm_adj = SPECTRAL_SCAN_DBM_ADJ_DEFAULT; + /* + * XXX + * SPECTRAL_SCAN_CHN_MASK_DEFAULT (0x1) specifies that chain 0 is to be + * used + * for Spectral. This is expected to be an optimal configuration for + * most chipsets considering aspects like power save. But this can later + * optionally be changed to be set to the default system Rx chainmask + * advertised by FW (if required for some purpose), once the Convergence + * framework supports such retrieval at pdev attach time. + */ + params->ss_chn_mask = SPECTRAL_SCAN_CHN_MASK_DEFAULT; + params->ss_short_report = SPECTRAL_SCAN_SHORT_REPORT_DEFAULT; + params->ss_fft_period = SPECTRAL_SCAN_FFT_PERIOD_DEFAULT; +} + +#ifdef QCA_SUPPORT_SPECTRAL_SIMULATION +/** + * target_if_spectral_detach_simulation() - De-initialize Spectral + * Simulation functionality + * @spectral: Pointer to Spectral target_if internal private data + * + * Function to de-initialize Spectral Simulation functionality + * + * Return: None + */ +static void +target_if_spectral_detach_simulation(struct target_if_spectral *spectral) +{ + target_if_spectral_sim_detach(spectral); +} + +#else +static void +target_if_spectral_detach_simulation(struct target_if_spectral *spectral) +{ +} +#endif + +/** + * target_if_spectral_detach() - De-initialize target_if Spectral + * @pdev: Pointer to pdev object + * + * Function to detach target_if spectral + * + * Return: None + */ +static void +target_if_spectral_detach(struct target_if_spectral *spectral) +{ + spectral_info("spectral detach"); + + if (spectral) { + qdf_spinlock_destroy(&spectral->param_info.osps_lock); + + target_if_spectral_detach_simulation(spectral); + + qdf_spinlock_destroy(&spectral->spectral_lock); + qdf_spinlock_destroy(&spectral->noise_pwr_reports_lock); + + qdf_mem_free(spectral); + spectral = NULL; + } +} + +#ifdef QCA_SUPPORT_SPECTRAL_SIMULATION +/** + * target_if_spectral_attach_simulation() - Initialize Spectral Simulation + * functionality + * @spectral: Pointer to Spectral target_if internal private data + * + * Function to initialize spectral simulation functionality + * + * Return: 0 on success, negative error code on failure + */ +static int +target_if_spectral_attach_simulation(struct target_if_spectral *spectral) +{ + if (target_if_spectral_sim_attach(spectral)) { + qdf_mem_free(spectral); + return -EPERM; + } + return 0; +} + +#else +static int +target_if_spectral_attach_simulation(struct target_if_spectral *spectral) +{ + return 0; +} +#endif + +/** + * target_if_pdev_spectral_init() - Initialize target_if Spectral + * functionality for the given pdev + * @pdev: Pointer to pdev object + * + * Function to initialize pointer to spectral target_if internal private data + * + * Return: On success, pointer to Spectral target_if internal private data, on + * failure, NULL + */ +void * +target_if_pdev_spectral_init(struct wlan_objmgr_pdev *pdev) +{ + struct target_if_spectral_ops *p_sops = NULL; + struct target_if_spectral *spectral = NULL; +#ifdef CONFIG_WIN + uint32_t target_type; + uint32_t target_revision; + struct wlan_objmgr_psoc *psoc; + struct wlan_lmac_if_target_tx_ops *tx_ops; +#endif + + if (!pdev) { + spectral_err("SPECTRAL: pdev is NULL!"); + return NULL; + } + spectral = (struct target_if_spectral *)qdf_mem_malloc( + sizeof(struct target_if_spectral)); + if (!spectral) { + spectral_err("SPECTRAL : Memory allocation failed"); + return spectral; + } + qdf_mem_zero(spectral, sizeof(struct target_if_spectral)); + /* Store pdev in Spectral */ + spectral->pdev_obj = pdev; + +#ifdef CONFIG_WIN + psoc = wlan_pdev_get_psoc(pdev); + + tx_ops = &psoc->soc_cb.tx_ops.target_tx_ops; + + if (tx_ops->tgt_get_tgt_type) { + target_type = tx_ops->tgt_get_tgt_type(psoc); + } else { + qdf_mem_free(spectral); + return NULL; + } + + if (tx_ops->tgt_get_tgt_revision) { + target_revision = tx_ops->tgt_get_tgt_revision(psoc); + } else { + qdf_mem_free(spectral); + return NULL; + } +#endif + + /* init the function ptr table */ + target_if_spectral_init_dummy_function_table(spectral); + + /* get spectral function table */ + p_sops = GET_TARGET_IF_SPECTRAL_OPS(spectral); + /* TODO : Should this be called here of after ath_attach ? */ + if (p_sops->get_capability(spectral, SPECTRAL_CAP_PHYDIAG)) + spectral_info("HAL_CAP_PHYDIAG : Capable"); + + /* TODO: Need to fix the capablity check for RADAR */ + if (p_sops->get_capability(spectral, SPECTRAL_CAP_RADAR)) + spectral_info("HAL_CAP_RADAR : Capable"); + + /* TODO : Need to fix the capablity check for SPECTRAL */ + /* TODO : Should this be called here of after ath_attach ? */ + if (p_sops->get_capability(spectral, SPECTRAL_CAP_SPECTRAL_SCAN)) + spectral_info("HAL_CAP_SPECTRAL_SCAN : Capable"); + + qdf_spinlock_create(&spectral->spectral_lock); + qdf_spinlock_create(&spectral->noise_pwr_reports_lock); + target_if_spectral_clear_stats(spectral); + +#ifdef CONFIG_WIN + if (target_type == TARGET_TYPE_QCA8074) { + spectral->fftbin_size_war = 1; + spectral->inband_fftbin_size_adj = 1; + } else { + spectral->fftbin_size_war = 0; + spectral->inband_fftbin_size_adj = 0; + } + if ((target_type == TARGET_TYPE_QCA8074) || ( + target_type == TARGET_TYPE_QCA6290)) { + spectral->spectral_gen = SPECTRAL_GEN3; + spectral->hdr_sig_exp = SPECTRAL_PHYERR_SIGNATURE_GEN3; + spectral->tag_sscan_summary_exp = + TLV_TAG_SPECTRAL_SUMMARY_REPORT_GEN3; + spectral->tag_sscan_fft_exp = TLV_TAG_SEARCH_FFT_REPORT_GEN3; + spectral->tlvhdr_size = SPECTRAL_PHYERR_TLVSIZE_GEN3; + } else +#endif + { + spectral->spectral_gen = SPECTRAL_GEN2; + spectral->hdr_sig_exp = SPECTRAL_PHYERR_SIGNATURE_GEN2; + spectral->tag_sscan_summary_exp = + TLV_TAG_SPECTRAL_SUMMARY_REPORT_GEN2; + spectral->tag_sscan_fft_exp = TLV_TAG_SEARCH_FFT_REPORT_GEN2; + spectral->tlvhdr_size = sizeof(struct spectral_phyerr_tlv_gen2); + } + + /* Set the default values for spectral parameters */ + target_if_spectral_init_param_defaults(spectral); + /* Init spectral capability */ + target_if_init_spectral_capability(spectral); + if (target_if_spectral_attach_simulation(spectral) < 0) + return NULL; + + target_if_init_spectral_ops(spectral); + + qdf_spinlock_create(&spectral->param_info.osps_lock); + spectral->param_info.osps_cache.osc_is_valid = 0; + + target_if_spectral_register_funcs(spectral, &spectral_ops); + + if (target_if_spectral_check_hw_capability(spectral) == false) { + target_if_spectral_detach(spectral); + spectral = NULL; + } else { + /* + * TODO: Once the driver architecture transitions to chipset + * versioning based checks, reflect this here. + */ + spectral->is_160_format = false; + spectral->is_lb_edge_extrabins_format = false; + spectral->is_rb_edge_extrabins_format = false; +#ifdef CONFIG_WIN + + if (target_type == TARGET_TYPE_QCA9984 || + target_type == TARGET_TYPE_QCA9888) { + spectral->is_160_format = true; + spectral->is_lb_edge_extrabins_format = true; + spectral->is_rb_edge_extrabins_format = true; + } else if ((target_type == TARGET_TYPE_AR900B) && + (target_revision == AR900B_REV_2)) { + spectral->is_rb_edge_extrabins_format = true; + } + + if (target_type == TARGET_TYPE_QCA9984 || + target_type == TARGET_TYPE_QCA9888) + spectral->is_sec80_rssi_war_required = true; + spectral->use_nl_bcast = true; +#else + spectral->use_nl_bcast = false; +#endif + } + + return spectral; +} + +/** + * target_if_pdev_spectral_deinit() - De-initialize target_if Spectral + * functionality for the given pdev + * @pdev: Pointer to pdev object + * + * Function to de-initialize pointer to spectral target_if internal private data + * + * Return: None + */ +void +target_if_pdev_spectral_deinit(struct wlan_objmgr_pdev *pdev) +{ + struct target_if_spectral *spectral = NULL; + + spectral = get_target_if_spectral_handle_from_pdev(pdev); + if (!spectral) { + spectral_err("SPECTRAL : Module doesn't exist"); + return; + } + target_if_spectral_detach(spectral); + + return; +} + +/** + * target_if_set_spectral_config() - Set spectral config + * @pdev: Pointer to pdev object + * @threshtype: config type + * @value: config value + * + * API to set spectral configurations + * + * Return: 0 on success else failure + */ +int +target_if_set_spectral_config(struct wlan_objmgr_pdev *pdev, + const uint32_t threshtype, const uint32_t value) +{ + struct spectral_config params; + struct target_if_spectral_ops *p_sops = NULL; + struct target_if_spectral *spectral = NULL; + + spectral = get_target_if_spectral_handle_from_pdev(pdev); + p_sops = GET_TARGET_IF_SPECTRAL_OPS(spectral); + if (!spectral) { + spectral_err("spectral object is NULL"); + return -EPERM; + } + + switch (threshtype) { + case SPECTRAL_PARAM_FFT_PERIOD: + spectral->params.ss_fft_period = value; + break; + case SPECTRAL_PARAM_SCAN_PERIOD: + spectral->params.ss_period = value; + break; + case SPECTRAL_PARAM_SCAN_COUNT: + spectral->params.ss_count = value; + break; + case SPECTRAL_PARAM_SHORT_REPORT: + spectral->params.ss_short_report = (!!value) ? true : false; + break; + case SPECTRAL_PARAM_SPECT_PRI: + spectral->params.ss_spectral_pri = (!!value) ? true : false; + break; + case SPECTRAL_PARAM_FFT_SIZE: + spectral->params.ss_fft_size = value; + break; + case SPECTRAL_PARAM_GC_ENA: + spectral->params.ss_gc_ena = !!value; + break; + case SPECTRAL_PARAM_RESTART_ENA: + spectral->params.ss_restart_ena = !!value; + break; + case SPECTRAL_PARAM_NOISE_FLOOR_REF: + spectral->params.ss_noise_floor_ref = value; + break; + case SPECTRAL_PARAM_INIT_DELAY: + spectral->params.ss_init_delay = value; + break; + case SPECTRAL_PARAM_NB_TONE_THR: + spectral->params.ss_nb_tone_thr = value; + break; + case SPECTRAL_PARAM_STR_BIN_THR: + spectral->params.ss_str_bin_thr = value; + break; + case SPECTRAL_PARAM_WB_RPT_MODE: + spectral->params.ss_wb_rpt_mode = !!value; + break; + case SPECTRAL_PARAM_RSSI_RPT_MODE: + spectral->params.ss_rssi_rpt_mode = !!value; + break; + case SPECTRAL_PARAM_RSSI_THR: + spectral->params.ss_rssi_thr = value; + break; + case SPECTRAL_PARAM_PWR_FORMAT: + spectral->params.ss_pwr_format = !!value; + break; + case SPECTRAL_PARAM_RPT_MODE: + spectral->params.ss_rpt_mode = value; + break; + case SPECTRAL_PARAM_BIN_SCALE: + spectral->params.ss_bin_scale = value; + break; + case SPECTRAL_PARAM_DBM_ADJ: + spectral->params.ss_dbm_adj = !!value; + break; + case SPECTRAL_PARAM_CHN_MASK: + spectral->params.ss_chn_mask = value; + break; + } + + p_sops->configure_spectral(spectral, &spectral->params); + /* only to validate the writes */ + p_sops->get_spectral_config(spectral, ¶ms); + return 0; +} + +/** + * target_if_get_fft_bin_count() - Get fft bin count for a given fft length + * @fft_len: FFT length + * @pdev: Pointer to pdev object + * + * API to get fft bin count for a given fft length + * + * Return: FFt bin count + */ +static int +target_if_get_fft_bin_count(int fft_len) +{ + int bin_count = 0; + + switch (fft_len) { + case 5: + bin_count = 16; + break; + case 6: + bin_count = 32; + break; + case 7: + bin_count = 64; + break; + case 8: + bin_count = 128; + break; + case 9: + bin_count = 256; + break; + default: + break; + } + + return bin_count; +} + +/** + * target_if_init_upper_lower_flags() - Initializes control and extension + * segment flags + * @fft_len: FFT length + * @pdev: Pointer to pdev object + * + * API to initialize the control and extension flags with the lower/upper + * segment based on the HT mode + * + * Return: FFt bin count + */ +static void +target_if_init_upper_lower_flags(struct target_if_spectral *spectral) +{ + int current_channel = 0; + int ext_channel = 0; + struct target_if_spectral_ops *p_sops = + GET_TARGET_IF_SPECTRAL_OPS(spectral); + + current_channel = p_sops->get_current_channel(spectral); + ext_channel = p_sops->get_extension_channel(spectral); + + if ((current_channel == 0) || (ext_channel == 0)) + return; + + if (spectral->sc_spectral_20_40_mode) { + /* HT40 mode */ + if (ext_channel < current_channel) { + spectral->lower_is_extension = 1; + spectral->upper_is_control = 1; + spectral->lower_is_control = 0; + spectral->upper_is_extension = 0; + } else { + spectral->lower_is_extension = 0; + spectral->upper_is_control = 0; + spectral->lower_is_control = 1; + spectral->upper_is_extension = 1; + } + } else { + /* HT20 mode, lower is always control */ + spectral->lower_is_extension = 0; + spectral->upper_is_control = 0; + spectral->lower_is_control = 1; + spectral->upper_is_extension = 0; + } +} + +/** + * target_if_get_spectral_config() - Get spectral configuration + * @pdev: Pointer to pdev object + * @param: Pointer to spectral_config structure in which the configuration + * should be returned + * + * API to get the current spectral configuration + * + * Return: None + */ +void +target_if_get_spectral_config(struct wlan_objmgr_pdev *pdev, + struct spectral_config *param) +{ + struct target_if_spectral_ops *p_sops = NULL; + struct target_if_spectral *spectral = NULL; + + spectral = get_target_if_spectral_handle_from_pdev(pdev); + p_sops = GET_TARGET_IF_SPECTRAL_OPS(spectral); + + qdf_mem_zero(param, sizeof(struct spectral_config)); + p_sops->get_spectral_config(spectral, param); +} + +/** + * target_if_spectral_scan_enable_params() - Enable use of desired Spectral + * parameters + * @spectral: Pointer to Spectral target_if internal private data + * @spectral_params: Pointer to Spectral parameters + * + * Enable use of desired Spectral parameters by configuring them into HW, and + * starting Spectral scan + * + * Return: 0 on success, 1 on failure + */ +int +target_if_spectral_scan_enable_params(struct target_if_spectral *spectral, + struct spectral_config *spectral_params) +{ + int extension_channel = 0; + int current_channel = 0; + struct target_if_spectral_ops *p_sops = NULL; + struct wlan_objmgr_vdev *vdev = NULL; + + if (!spectral) { + spectral_err("SPECTRAL : Spectral is NULL"); + return 1; + } + + p_sops = GET_TARGET_IF_SPECTRAL_OPS(spectral); + + if (!p_sops) { + spectral_err("SPECTRAL : p_sops is NULL"); + return 1; + } + + spectral->sc_spectral_noise_pwr_cal = + spectral_params->ss_spectral_pri ? 1 : 0; + + /* check if extension channel is present */ + extension_channel = p_sops->get_extension_channel(spectral); + current_channel = p_sops->get_current_channel(spectral); + + vdev = target_if_spectral_get_vdev(spectral); + if (!vdev) + return 1; + + spectral->ch_width = target_if_vdev_get_ch_width(vdev); + wlan_objmgr_vdev_release_ref(vdev, WLAN_SPECTRAL_ID); + + if (spectral->ch_width == CH_WIDTH_INVALID) + return 1; + + if (spectral->capability.advncd_spectral_cap) { + spectral->lb_edge_extrabins = 0; + spectral->rb_edge_extrabins = 0; + + if (spectral->is_lb_edge_extrabins_format && + spectral->params.ss_rpt_mode == 2) { + spectral->lb_edge_extrabins = 4; + } + + if (spectral->is_rb_edge_extrabins_format && + spectral->params.ss_rpt_mode == 2) { + spectral->rb_edge_extrabins = 4; + } + + if (spectral->ch_width == CH_WIDTH_20MHZ) { + spectral->sc_spectral_20_40_mode = 0; + + spectral->spectral_numbins = + target_if_get_fft_bin_count( + spectral->params.ss_fft_size); + spectral->spectral_fft_len = + target_if_get_fft_bin_count( + spectral->params.ss_fft_size); + spectral->spectral_data_len = + target_if_get_fft_bin_count( + spectral->params.ss_fft_size); + /* + * Initialize classifier params to be sent to user + * space classifier + */ + spectral->classifier_params.lower_chan_in_mhz = + current_channel; + spectral->classifier_params.upper_chan_in_mhz = 0; + + } else if (spectral->ch_width == CH_WIDTH_40MHZ) { + /* TODO : Remove this variable */ + spectral->sc_spectral_20_40_mode = 1; + spectral->spectral_numbins = + target_if_get_fft_bin_count( + spectral->params.ss_fft_size); + spectral->spectral_fft_len = + target_if_get_fft_bin_count( + spectral->params.ss_fft_size); + spectral->spectral_data_len = + target_if_get_fft_bin_count( + spectral->params.ss_fft_size); + + /* + * Initialize classifier params to be sent to user + * space classifier + */ + if (extension_channel < current_channel) { + spectral->classifier_params.lower_chan_in_mhz = + extension_channel; + spectral->classifier_params.upper_chan_in_mhz = + current_channel; + } else { + spectral->classifier_params.lower_chan_in_mhz = + current_channel; + spectral->classifier_params.upper_chan_in_mhz = + extension_channel; + } + + } else if (spectral->ch_width == CH_WIDTH_80MHZ) { + /* Set the FFT Size */ + /* TODO : Remove this variable */ + spectral->sc_spectral_20_40_mode = 0; + spectral->spectral_numbins = + target_if_get_fft_bin_count( + spectral->params.ss_fft_size); + spectral->spectral_fft_len = + target_if_get_fft_bin_count( + spectral->params.ss_fft_size); + spectral->spectral_data_len = + target_if_get_fft_bin_count( + spectral->params.ss_fft_size); + + /* + * Initialize classifier params to be sent to user + * space classifier + */ + spectral->classifier_params.lower_chan_in_mhz = + current_channel; + spectral->classifier_params.upper_chan_in_mhz = 0; + + /* + * Initialize classifier params to be sent to user + * space classifier + */ + if (extension_channel < current_channel) { + spectral->classifier_params.lower_chan_in_mhz = + extension_channel; + spectral->classifier_params.upper_chan_in_mhz = + current_channel; + } else { + spectral->classifier_params.lower_chan_in_mhz = + current_channel; + spectral->classifier_params.upper_chan_in_mhz = + extension_channel; + } + + } else if (spectral->ch_width == CH_WIDTH_160MHZ) { + /* Set the FFT Size */ + + /* The below applies to both 160 and 80+80 cases */ + + /* TODO : Remove this variable */ + spectral->sc_spectral_20_40_mode = 0; + spectral->spectral_numbins = + target_if_get_fft_bin_count( + spectral->params.ss_fft_size); + spectral->spectral_fft_len = + target_if_get_fft_bin_count( + spectral->params.ss_fft_size); + spectral->spectral_data_len = + target_if_get_fft_bin_count( + spectral->params.ss_fft_size); + + /* + * Initialize classifier params to be sent to user + * space classifier + */ + spectral->classifier_params.lower_chan_in_mhz = + current_channel; + spectral->classifier_params.upper_chan_in_mhz = 0; + + /* + * Initialize classifier params to be sent to user + * space classifier + */ + if (extension_channel < current_channel) { + spectral->classifier_params.lower_chan_in_mhz = + extension_channel; + spectral->classifier_params.upper_chan_in_mhz = + current_channel; + } else { + spectral->classifier_params.lower_chan_in_mhz = + current_channel; + spectral->classifier_params.upper_chan_in_mhz = + extension_channel; + } + } + + if (spectral->spectral_numbins) { + spectral->spectral_numbins += + spectral->lb_edge_extrabins; + spectral->spectral_numbins += + spectral->rb_edge_extrabins; + } + + if (spectral->spectral_fft_len) { + spectral->spectral_fft_len += + spectral->lb_edge_extrabins; + spectral->spectral_fft_len += + spectral->rb_edge_extrabins; + } + + if (spectral->spectral_data_len) { + spectral->spectral_data_len += + spectral->lb_edge_extrabins; + spectral->spectral_data_len += + spectral->rb_edge_extrabins; + } + } else { + /* + * The decision to find 20/40 mode is found based on the + * presence of extension channel + * instead of channel width, as the channel width can + * dynamically change + */ + + if (extension_channel == 0) { + spectral->spectral_numbins = SPECTRAL_HT20_NUM_BINS; + spectral->spectral_dc_index = SPECTRAL_HT20_DC_INDEX; + spectral->spectral_fft_len = SPECTRAL_HT20_FFT_LEN; + spectral->spectral_data_len = + SPECTRAL_HT20_TOTAL_DATA_LEN; + /* only valid in 20-40 mode */ + spectral->spectral_lower_max_index_offset = -1; + /* only valid in 20-40 mode */ + spectral->spectral_upper_max_index_offset = -1; + spectral->spectral_max_index_offset = + spectral->spectral_fft_len + 2; + spectral->sc_spectral_20_40_mode = 0; + + /* + * Initialize classifier params to be sent to user + * space classifier + */ + spectral->classifier_params.lower_chan_in_mhz = + current_channel; + spectral->classifier_params.upper_chan_in_mhz = 0; + + } else { + spectral->spectral_numbins = + SPECTRAL_HT40_TOTAL_NUM_BINS; + spectral->spectral_fft_len = SPECTRAL_HT40_FFT_LEN; + spectral->spectral_data_len = + SPECTRAL_HT40_TOTAL_DATA_LEN; + spectral->spectral_dc_index = SPECTRAL_HT40_DC_INDEX; + /* only valid in 20 mode */ + spectral->spectral_max_index_offset = -1; + spectral->spectral_lower_max_index_offset = + spectral->spectral_fft_len + 2; + spectral->spectral_upper_max_index_offset = + spectral->spectral_fft_len + 5; + spectral->sc_spectral_20_40_mode = 1; + + /* + * Initialize classifier params to be sent to user + * space classifier + */ + if (extension_channel < current_channel) { + spectral->classifier_params.lower_chan_in_mhz = + extension_channel; + spectral->classifier_params.upper_chan_in_mhz = + current_channel; + } else { + spectral->classifier_params.lower_chan_in_mhz = + current_channel; + spectral->classifier_params.upper_chan_in_mhz = + extension_channel; + } + } + } + + spectral->send_single_packet = 0; + spectral->classifier_params.spectral_20_40_mode = + spectral->sc_spectral_20_40_mode; + spectral->classifier_params.spectral_dc_index = + spectral->spectral_dc_index; + spectral->spectral_sent_msg = 0; + spectral->classify_scan = 0; + spectral->num_spectral_data = 0; + + if (!p_sops->is_spectral_active(spectral)) { + p_sops->configure_spectral(spectral, spectral_params); + p_sops->start_spectral_scan(spectral); + } else { + } + + /* get current spectral configuration */ + p_sops->get_spectral_config(spectral, &spectral->params); + + target_if_init_upper_lower_flags(spectral); + + return 0; +} + +/** + * target_if_start_spectral_scan() - Start spectral scan + * @pdev: Pointer to pdev object + * + * API to start spectral scan + * + * Return: 0 in case of success, -1 on failure + */ +int +target_if_start_spectral_scan(struct wlan_objmgr_pdev *pdev) +{ + struct target_if_spectral_ops *p_sops = NULL; + struct target_if_spectral *spectral = NULL; + + spectral = get_target_if_spectral_handle_from_pdev(pdev); + if (!spectral) { + spectral_err("SPECTRAL : Spectral LMAC object is NUll"); + return -EPERM; + } + p_sops = GET_TARGET_IF_SPECTRAL_OPS(spectral); + + qdf_spin_lock(&spectral->spectral_lock); + target_if_spectral_scan_enable_params(spectral, &spectral->params); + qdf_spin_unlock(&spectral->spectral_lock); + + return 0; +} + +void +target_if_stop_spectral_scan(struct wlan_objmgr_pdev *pdev) +{ + struct target_if_spectral_ops *p_sops = NULL; + struct target_if_spectral *spectral = NULL; + + spectral = get_target_if_spectral_handle_from_pdev(pdev); + if (!spectral) { + spectral_err("SPECTRAL : Spectral LMAC object is NUll "); + return; + } + p_sops = GET_TARGET_IF_SPECTRAL_OPS(spectral); + + qdf_spin_lock(&spectral->spectral_lock); + p_sops->stop_spectral_scan(spectral); + if (spectral->classify_scan) { + /* TODO : Check if this logic is necessary */ + spectral->detects_control_channel = 0; + spectral->detects_extension_channel = 0; + spectral->detects_above_dc = 0; + spectral->detects_below_dc = 0; + spectral->classify_scan = 0; + } + + spectral->send_single_packet = 0; + spectral->sc_spectral_scan = 0; + spectral->sc_spectral_noise_pwr_cal = 0; + + /* + * Reset the priority because it stops WLAN rx. + * If it is needed to set, user has to set it explicitly + * + */ + /* Reset Priority */ + spectral->params.ss_spectral_pri = 0; + qdf_spin_unlock(&spectral->spectral_lock); +} + +/** + * target_if_is_spectral_active() - Get whether Spectral is active + * @pdev: Pointer to pdev object + * + * API to get whether Spectral is active + * + * Return: True if Spectral is active, false if Spectral is not active + */ +bool +target_if_is_spectral_active(struct wlan_objmgr_pdev *pdev) +{ + struct target_if_spectral *spectral = NULL; + struct target_if_spectral_ops *p_sops = NULL; + + spectral = get_target_if_spectral_handle_from_pdev(pdev); + p_sops = GET_TARGET_IF_SPECTRAL_OPS(spectral); + return p_sops->is_spectral_active(spectral); +} + +/** + * target_if_is_spectral_enabled() - Get whether Spectral is enabled + * @pdev: Pointer to pdev object + * + * API to get whether Spectral is enabled + * + * Return: True if Spectral is enabled, false if Spectral is not enabled + */ +bool +target_if_is_spectral_enabled(struct wlan_objmgr_pdev *pdev) +{ + struct target_if_spectral *spectral = NULL; + struct target_if_spectral_ops *p_sops = NULL; + + spectral = get_target_if_spectral_handle_from_pdev(pdev); + p_sops = GET_TARGET_IF_SPECTRAL_OPS(spectral); + return p_sops->is_spectral_enabled(spectral); +} + +/** + * target_if_set_debug_level() - Set debug level for Spectral + * @pdev: Pointer to pdev object + * @debug_level: Debug level + * + * API to set the debug level for Spectral + * + * Return: 0 in case of success + */ +int +target_if_set_debug_level(struct wlan_objmgr_pdev *pdev, uint32_t debug_level) +{ + spectral_debug_level = (DEBUG_SPECTRAL << debug_level); + return 0; +} + +/** + * target_if_get_debug_level() - Get debug level for Spectral + * @pdev: Pointer to pdev object + * + * API to get the debug level for Spectral + * + * Return: Current debug level + */ +uint32_t +target_if_get_debug_level(struct wlan_objmgr_pdev *pdev) +{ + return spectral_debug_level; +} + +/** + * target_if_get_spectral_capinfo() - Get Spectral capability information + * @pdev: Pointer to pdev object + * @outdata: Buffer into which data should be copied + * + * API to get the spectral capability information + * + * Return: void + */ +void +target_if_get_spectral_capinfo(struct wlan_objmgr_pdev *pdev, void *outdata) +{ + struct target_if_spectral *spectral = NULL; + + spectral = get_target_if_spectral_handle_from_pdev(pdev); + qdf_mem_copy(outdata, &spectral->capability, + sizeof(struct spectral_caps)); +} + +/** + * target_if_get_spectral_diagstats() - Get Spectral diagnostic statistics + * @pdev: Pointer to pdev object + * @outdata: Buffer into which data should be copied + * + * API to get the spectral diagnostic statistics + * + * Return: void + */ +void +target_if_get_spectral_diagstats(struct wlan_objmgr_pdev *pdev, void *outdata) +{ + struct target_if_spectral *spectral = NULL; + + spectral = get_target_if_spectral_handle_from_pdev(pdev); + qdf_mem_copy(outdata, &spectral->diag_stats, + sizeof(struct spectral_diag_stats)); +} + +/** + * target_if_register_wmi_spectral_cmd_ops() - Register wmi_spectral_cmd_ops + * @cmd_ops: Pointer to the structure having wmi_spectral_cmd function pointers + * @pdev: Pointer to pdev object + * + * API for register wmi_spectral_cmd_ops in spectral internal data structure + * + * Return: void + */ +void +target_if_register_wmi_spectral_cmd_ops(struct wlan_objmgr_pdev *pdev, + struct wmi_spectral_cmd_ops *cmd_ops) +{ + struct target_if_spectral *spectral = NULL; + + spectral = get_target_if_spectral_handle_from_pdev(pdev); + spectral->param_wmi_cmd_ops.wmi_spectral_configure_cmd_send = + cmd_ops->wmi_spectral_configure_cmd_send; + spectral->param_wmi_cmd_ops.wmi_spectral_enable_cmd_send = + cmd_ops->wmi_spectral_enable_cmd_send; +} + +/** + * target_if_register_netlink_cb() - Register Netlink callbacks + * @pdev: Pointer to pdev object + * @nl_cb: Netlink callbacks to register + * + * Return: void + */ +static void +target_if_register_netlink_cb( + struct wlan_objmgr_pdev *pdev, + struct spectral_nl_cb *nl_cb) +{ + struct target_if_spectral *spectral = NULL; + + spectral = get_target_if_spectral_handle_from_pdev(pdev); + qdf_mem_copy(&spectral->nl_cb, nl_cb, sizeof(struct spectral_nl_cb)); + + if (spectral->use_nl_bcast) + spectral->send_phy_data = spectral->nl_cb.send_nl_bcast; + else + spectral->send_phy_data = spectral->nl_cb.send_nl_unicast; +} + +/** + * target_if_use_nl_bcast() - Get whether to use broadcast/unicast while sending + * Netlink messages to the application layer + * @pdev: Pointer to pdev object + * + * Return: true for broadcast, false for unicast + */ +static bool +target_if_use_nl_bcast(struct wlan_objmgr_pdev *pdev) +{ + struct target_if_spectral *spectral = NULL; + + spectral = get_target_if_spectral_handle_from_pdev(pdev); + return spectral->use_nl_bcast; +} + +/** + * target_if_deregister_netlink_cb() - De-register Netlink callbacks + * @pdev: Pointer to pdev object + * + * Return: void + */ +static void +target_if_deregister_netlink_cb(struct wlan_objmgr_pdev *pdev) +{ + struct target_if_spectral *spectral = NULL; + + spectral = get_target_if_spectral_handle_from_pdev(pdev); + if (!spectral) { + spectral_err("SPECTRAL : Module doesn't exist"); + return; + } + + qdf_mem_zero(&spectral->nl_cb, sizeof(struct spectral_nl_cb)); +} + +static int +target_if_process_spectral_report(struct wlan_objmgr_pdev *pdev, + void *payload) +{ + struct target_if_spectral *spectral = NULL; + struct target_if_spectral_ops *p_sops = NULL; + + spectral = get_target_if_spectral_handle_from_pdev(pdev); + p_sops = GET_TARGET_IF_SPECTRAL_OPS(spectral); + + return p_sops->process_spectral_report(pdev, payload); +} + +void +target_if_sptrl_register_tx_ops(struct wlan_lmac_if_tx_ops *tx_ops) +{ + tx_ops->sptrl_tx_ops.sptrlto_pdev_spectral_init = + target_if_pdev_spectral_init; + tx_ops->sptrl_tx_ops.sptrlto_pdev_spectral_deinit = + target_if_pdev_spectral_deinit; + tx_ops->sptrl_tx_ops.sptrlto_set_spectral_config = + target_if_set_spectral_config; + tx_ops->sptrl_tx_ops.sptrlto_get_spectral_config = + target_if_get_spectral_config; + tx_ops->sptrl_tx_ops.sptrlto_start_spectral_scan = + target_if_start_spectral_scan; + tx_ops->sptrl_tx_ops.sptrlto_stop_spectral_scan = + target_if_stop_spectral_scan; + tx_ops->sptrl_tx_ops.sptrlto_is_spectral_active = + target_if_is_spectral_active; + tx_ops->sptrl_tx_ops.sptrlto_is_spectral_enabled = + target_if_is_spectral_enabled; + tx_ops->sptrl_tx_ops.sptrlto_set_debug_level = + target_if_set_debug_level; + tx_ops->sptrl_tx_ops.sptrlto_get_debug_level = + target_if_get_debug_level; + tx_ops->sptrl_tx_ops.sptrlto_get_spectral_capinfo = + target_if_get_spectral_capinfo; + tx_ops->sptrl_tx_ops.sptrlto_get_spectral_diagstats = + target_if_get_spectral_diagstats; + tx_ops->sptrl_tx_ops.sptrlto_register_wmi_spectral_cmd_ops = + target_if_register_wmi_spectral_cmd_ops; + tx_ops->sptrl_tx_ops.sptrlto_register_netlink_cb = + target_if_register_netlink_cb; + tx_ops->sptrl_tx_ops.sptrlto_use_nl_bcast = + target_if_use_nl_bcast; + tx_ops->sptrl_tx_ops.sptrlto_deregister_netlink_cb = + target_if_deregister_netlink_cb; + tx_ops->sptrl_tx_ops.sptrlto_process_spectral_report = + target_if_process_spectral_report; +} +qdf_export_symbol(target_if_sptrl_register_tx_ops); + +void +target_if_spectral_send_intf_found_msg(struct wlan_objmgr_pdev *pdev, + uint16_t cw_int, uint32_t dcs_enabled) +{ + struct spectral_samp_msg *msg = NULL; + struct target_if_spectral_ops *p_sops = NULL; + struct target_if_spectral *spectral = NULL; + + spectral = get_target_if_spectral_handle_from_pdev(pdev); + msg = (struct spectral_samp_msg *)spectral->nl_cb.get_nbuff( + spectral->pdev_obj); + + if (msg) { + msg->int_type = cw_int ? + SPECTRAL_DCS_INT_CW : SPECTRAL_DCS_INT_WIFI; + msg->dcs_enabled = dcs_enabled; + msg->signature = SPECTRAL_SIGNATURE; + p_sops = GET_TARGET_IF_SPECTRAL_OPS(spectral); + p_sops->get_mac_address(spectral, msg->macaddr); + if (spectral->send_phy_data(pdev) == 0) + spectral->spectral_sent_msg++; + } +} +qdf_export_symbol(target_if_spectral_send_intf_found_msg); diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/spectral/target_if_spectral.h b/drivers/staging/qca-wifi-host-cmn/target_if/spectral/target_if_spectral.h new file mode 100644 index 0000000000000000000000000000000000000000..8064d91465a008d2226f33fe33384cec85410e8a --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/target_if/spectral/target_if_spectral.h @@ -0,0 +1,1612 @@ +/* + * Copyright (c) 2011,2017-2018 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _TARGET_IF_SPECTRAL_H_ +#define _TARGET_IF_SPECTRAL_H_ + +#include +#include +#include +#include +#include +#include +#include +#ifdef DIRECT_BUF_RX_ENABLE +#include +#endif +#ifdef WIN32 +#pragma pack(push, target_if_spectral, 1) +#define __ATTRIB_PACK +#else +#ifndef __ATTRIB_PACK +#define __ATTRIB_PACK __attribute__ ((packed)) +#endif +#endif + +#define spectral_log(level, args...) \ +QDF_PRINT_INFO(QDF_PRINT_IDX_SHARED, QDF_MODULE_ID_SPECTRAL, level, ## args) + +#define spectral_logfl(level, format, args...) \ + spectral_log(level, FL(format), ## args) + +#define spectral_fatal(format, args...) \ + spectral_logfl(QDF_TRACE_LEVEL_FATAL, format, ## args) +#define spectral_err(format, args...) \ + spectral_logfl(QDF_TRACE_LEVEL_ERROR, format, ## args) +#define spectral_warn(format, args...) \ + spectral_logfl(QDF_TRACE_LEVEL_WARN, format, ## args) +#define spectral_info(format, args...) \ + spectral_logfl(QDF_TRACE_LEVEL_INFO, format, ## args) +#define spectral_debug(format, args...) \ + spectral_logfl(QDF_TRACE_LEVEL_DEBUG, format, ## args) + +#define STATUS_PASS 1 +#define STATUS_FAIL 0 +#undef spectral_dbg_line +#define spectral_dbg_line() \ + spectral_debug("----------------------------------------------------\n") + +#undef spectral_ops_not_registered +#define spectral_ops_not_registered(str) \ + spectral_info("SPECTRAL : %s not registered\n", (str)) +#undef not_yet_implemented +#define not_yet_implemented() \ + spectral_info("SPECTRAL : %s : %d Not yet implemented\n", \ + __func__, __LINE__) + +#define SPECTRAL_HT20_NUM_BINS 56 +#define SPECTRAL_HT20_FFT_LEN 56 +#define SPECTRAL_HT20_DC_INDEX (SPECTRAL_HT20_FFT_LEN / 2) +#define SPECTRAL_HT20_DATA_LEN 60 +#define SPECTRAL_HT20_TOTAL_DATA_LEN (SPECTRAL_HT20_DATA_LEN + 3) +#define SPECTRAL_HT40_TOTAL_NUM_BINS 128 +#define SPECTRAL_HT40_DATA_LEN 135 +#define SPECTRAL_HT40_TOTAL_DATA_LEN (SPECTRAL_HT40_DATA_LEN + 3) +#define SPECTRAL_HT40_FFT_LEN 128 +#define SPECTRAL_HT40_DC_INDEX (SPECTRAL_HT40_FFT_LEN / 2) + +/* + * Used for the SWAR to obtain approximate combined rssi + * in secondary 80Mhz segment + */ +#define OFFSET_CH_WIDTH_20 65 +#define OFFSET_CH_WIDTH_40 62 +#define OFFSET_CH_WIDTH_80 56 +#define OFFSET_CH_WIDTH_160 50 + +#ifdef BIG_ENDIAN_HOST +#define SPECTRAL_MESSAGE_COPY_CHAR_ARRAY(destp, srcp, len) do { \ + int j; \ + uint32_t *src, *dest; \ + src = (uint32_t *)(srcp); \ + dest = (uint32_t *)(destp); \ + for (j = 0; j < roundup((len), sizeof(uint32_t)) / 4; j++) { \ + *(dest + j) = qdf_le32_to_cpu(*(src + j)); \ + } \ + } while (0) +#else +#define SPECTRAL_MESSAGE_COPY_CHAR_ARRAY(destp, srcp, len) \ + OS_MEMCPY((destp), (srcp), (len)); +#endif + +#define DUMMY_NF_VALUE (-123) +/* 5 categories x (lower + upper) bands */ +#define MAX_INTERF 10 +#define HOST_MAX_ANTENNA 3 +/* Mask for time stamp from descriptor */ +#define SPECTRAL_TSMASK 0xFFFFFFFF +#define SPECTRAL_SIGNATURE 0xdeadbeef + +/* START of spectral GEN II HW specific details */ +#define SPECTRAL_PHYERR_SIGNATURE_GEN2 0xbb +#define TLV_TAG_SPECTRAL_SUMMARY_REPORT_GEN2 0xF9 +#define TLV_TAG_ADC_REPORT_GEN2 0xFA +#define TLV_TAG_SEARCH_FFT_REPORT_GEN2 0xFB + +/** + * struct spectral_search_fft_info_gen2 - spectral search fft report for gen2 + * @relpwr_db: Total bin power in db + * @num_str_bins_ib: Number of strong bins + * @base_pwr: Base power + * @total_gain_info: Total gain + * @fft_chn_idx: FFT chain on which report is originated + * @avgpwr_db: Average power in db + * @peak_mag: Peak power seen in the bins + * @peak_inx: Index of bin holding peak power + */ +struct spectral_search_fft_info_gen2 { + uint32_t relpwr_db; + uint32_t num_str_bins_ib; + uint32_t base_pwr; + uint32_t total_gain_info; + uint32_t fft_chn_idx; + uint32_t avgpwr_db; + uint32_t peak_mag; + int16_t peak_inx; +}; + +/* + * XXX Check if we should be handling the endinness difference in some + * other way opaque to the host + */ +#ifdef BIG_ENDIAN_HOST + +/** + * struct spectral_phyerr_tlv_gen2 - phyerr tlv info for big endian host + * @signature: signature + * @tag: tag + * @length: length + */ +struct spectral_phyerr_tlv_gen2 { + uint8_t signature; + uint8_t tag; + uint16_t length; +} __ATTRIB_PACK; + +#else + +/** + * struct spectral_phyerr_tlv_gen2 - phyerr tlv info for little endian host + * @length: length + * @tag: tag + * @signature: signature + */ +struct spectral_phyerr_tlv_gen2 { + uint16_t length; + uint8_t tag; + uint8_t signature; +} __ATTRIB_PACK; + +#endif /* BIG_ENDIAN_HOST */ + +/** + * struct spectral_phyerr_hdr_gen2 - phyerr header for gen2 HW + * @hdr_a: Header[0:31] + * @hdr_b: Header[32:63] + */ +struct spectral_phyerr_hdr_gen2 { + uint32_t hdr_a; + uint32_t hdr_b; +}; + +/* + * Segment ID information for 80+80. + * + * If the HW micro-architecture specification extends this DWORD for other + * purposes, then redefine+rename accordingly. For now, the specification + * mentions only segment ID (though this doesn't require an entire DWORD) + * without mention of any generic terminology for the DWORD, or any reservation. + * We use nomenclature accordingly. + */ +typedef uint32_t SPECTRAL_SEGID_INFO; + +/** + * struct spectral_phyerr_fft_gen2 - fft info in phyerr event + * @buf: fft report + */ +struct spectral_phyerr_fft_gen2 { + uint8_t buf[0]; +}; +/* END of spectral GEN II HW specific details */ + +/* START of spectral GEN III HW specific details */ + +#define get_bitfield(value, size, pos) \ + (((value) >> (pos)) & ((1 << (size)) - 1)) +#define unsigned_to_signed(value, width) \ + (((value) >= (1 << ((width) - 1))) ? \ + (value - (1 << (width))) : (value)) + +#define SPECTRAL_PHYERR_SIGNATURE_GEN3 (0xFA) +#define TLV_TAG_SPECTRAL_SUMMARY_REPORT_GEN3 (0x02) +#define TLV_TAG_SEARCH_FFT_REPORT_GEN3 (0x03) +#define SPECTRAL_PHYERR_TLVSIZE_GEN3 (4) + +#define PHYERR_HDR_SIG_POS \ + (offsetof(struct spectral_phyerr_fft_report_gen3, fft_hdr_sig)) +#define PHYERR_HDR_TAG_POS \ + (offsetof(struct spectral_phyerr_fft_report_gen3, fft_hdr_tag)) +#define SPECTRAL_FFT_BINS_POS \ + (offsetof(struct spectral_phyerr_fft_report_gen3, buf)) + +/** + * struct phyerr_info - spectral search fft report for gen3 + * @data: handle to phyerror buffer + * @datalen: length of phyerror bufer + * @p_rfqual: rf quality matrices + * @p_chaninfo: pointer to chaninfo + * @tsf64: 64 bit TSF + * @acs_stats: acs stats + */ +struct phyerr_info { + uint8_t *data; + uint32_t datalen; + struct target_if_spectral_rfqual_info *p_rfqual; + struct target_if_spectral_chan_info *p_chaninfo; + uint64_t tsf64; + struct target_if_spectral_acs_stats *acs_stats; +}; + +/** + * struct spectral_search_fft_info_gen3 - spectral search fft report for gen3 + * @timestamp: Timestamp at which fft report was generated + * @fft_detector_id: Which radio generated this report + * @fft_num: The FFT count number. Set to 0 for short FFT. + * @fft_radar_check: NA for spectral + * @fft_peak_sidx: Index of bin with maximum power + * @fft_chn_idx: Rx chain index + * @fft_base_pwr_db: Base power in dB + * @fft_total_gain_db: Total gain in dB + * @fft_num_str_bins_ib: Number of strong bins in the report + * @fft_peak_mag: Peak magnitude + * @fft_avgpwr_db: Average power in dB + * @fft_relpwr_db: Relative power in dB + */ +struct spectral_search_fft_info_gen3 { + uint32_t timestamp; + uint32_t fft_detector_id; + uint32_t fft_num; + uint32_t fft_radar_check; + int32_t fft_peak_sidx; + uint32_t fft_chn_idx; + uint32_t fft_base_pwr_db; + uint32_t fft_total_gain_db; + uint32_t fft_num_str_bins_ib; + int32_t fft_peak_mag; + uint32_t fft_avgpwr_db; + uint32_t fft_relpwr_db; +}; + +/** + * struct spectral_phyerr_sfftreport_gen3 - fft info in phyerr event + * @fft_timestamp: Timestamp at which fft report was generated + * @fft_hdr_sig: signature + * @fft_hdr_tag: tag + * @fft_hdr_length: length + * @hdr_a: Header[0:31] + * @hdr_b: Header[32:63] + * @hdr_c: Header[64:95] + * @resv: Header[96:127] + * @buf: fft bins + */ +struct spectral_phyerr_fft_report_gen3 { + uint32_t fft_timestamp; +#ifdef BIG_ENDIAN_HOST + uint8_t fft_hdr_sig; + uint8_t fft_hdr_tag; + uint16_t fft_hdr_length; +#else + uint16_t fft_hdr_length; + uint8_t fft_hdr_tag; + uint8_t fft_hdr_sig; +#endif /* BIG_ENDIAN_HOST */ + uint32_t hdr_a; + uint32_t hdr_b; + uint32_t hdr_c; + uint32_t resv; + uint8_t buf[0]; +} __ATTRIB_PACK; + +/** + * struct spectral_sscan_report_gen3 - spectral report in phyerr event + * @sscan_timestamp: Timestamp at which fft report was generated + * @sscan_hdr_sig: signature + * @sscan_hdr_tag: tag + * @sscan_hdr_length: length + * @hdr_a: Header[0:31] + * @resv: Header[32:63] + * @hdr_b: Header[64:95] + * @resv: Header[96:127] + */ +struct spectral_sscan_report_gen3 { + u_int32_t sscan_timestamp; +#ifdef BIG_ENDIAN_HOST + u_int8_t sscan_hdr_sig; + u_int8_t sscan_hdr_tag; + u_int16_t sscan_hdr_length; +#else + u_int16_t sscan_hdr_length; + u_int8_t sscan_hdr_tag; + u_int8_t sscan_hdr_sig; +#endif /* BIG_ENDIAN_HOST */ + u_int32_t hdr_a; + u_int32_t res1; + u_int32_t hdr_b; + u_int32_t res2; +} __ATTRIB_PACK; + +#ifdef DIRECT_BUF_RX_ENABLE +/** + * struct Spectral_report - spectral report + * @data: Report buffer + * @noisefloor: Noise floor values + */ +struct spectral_report { + uint8_t *data; + int32_t noisefloor[DBR_MAX_CHAINS]; +}; +#endif +/* END of spectral GEN III HW specific details */ + +typedef signed char pwr_dbm; + +/** + * enum spectral_gen - spectral hw generation + * @SPECTRAL_GEN1 : spectral hw gen 1 + * @SPECTRAL_GEN2 : spectral hw gen 2 + * @SPECTRAL_GEN3 : spectral hw gen 3 + */ +enum spectral_gen { + SPECTRAL_GEN1, + SPECTRAL_GEN2, + SPECTRAL_GEN3, +}; + +#if ATH_PERF_PWR_OFFLOAD +/** + * enum target_if_spectral_info - Enumerations for specifying which spectral + * information (among parameters and states) + * is desired. + * @TARGET_IF_SPECTRAL_INFO_ACTIVE: Indicated whether spectral is active + * @TARGET_IF_SPECTRAL_INFO_ENABLED: Indicated whether spectral is enabled + * @TARGET_IF_SPECTRAL_INFO_PARAMS: Config params + */ +enum target_if_spectral_info { + TARGET_IF_SPECTRAL_INFO_ACTIVE, + TARGET_IF_SPECTRAL_INFO_ENABLED, + TARGET_IF_SPECTRAL_INFO_PARAMS, +}; +#endif /* ATH_PERF_PWR_OFFLOAD */ + +/* forward declaration */ +struct target_if_spectral; + +/** + * struct target_if_spectral_chan_info - Channel information + * @center_freq1: center frequency 1 in MHz + * @center_freq2: center frequency 2 in MHz -valid only for + * 11ACVHT 80PLUS80 mode + * @chan_width: channel width in MHz + */ +struct target_if_spectral_chan_info { + uint16_t center_freq1; + uint16_t center_freq2; + uint8_t chan_width; +}; + +/** + * struct target_if_spectral_acs_stats - EACS stats from spectral samples + * @nfc_ctl_rssi: Control chan rssi + * @nfc_ext_rssi: Extension chan rssi + * @ctrl_nf: Control chan Noise Floor + * @ext_nf: Extension chan Noise Floor + */ +struct target_if_spectral_acs_stats { + int8_t nfc_ctl_rssi; + int8_t nfc_ext_rssi; + int8_t ctrl_nf; + int8_t ext_nf; +}; + +/** + * struct target_if_spectral_perchain_rssi_info - per chain rssi info + * @rssi_pri20: Rssi of primary 20 Mhz + * @rssi_sec20: Rssi of secondary 20 Mhz + * @rssi_sec40: Rssi of secondary 40 Mhz + * @rssi_sec80: Rssi of secondary 80 Mhz + */ +struct target_if_spectral_perchain_rssi_info { + int8_t rssi_pri20; + int8_t rssi_sec20; + int8_t rssi_sec40; + int8_t rssi_sec80; +}; + +/** + * struct target_if_spectral_rfqual_info - RF measurement information + * @rssi_comb: RSSI Information + * @pc_rssi_info: XXX : For now, we know we are getting information + * for only 4 chains at max. For future extensions + * use a define + * @noise_floor: Noise floor information + */ +struct target_if_spectral_rfqual_info { + int8_t rssi_comb; + struct target_if_spectral_perchain_rssi_info pc_rssi_info[4]; + int16_t noise_floor[4]; +}; + +#define GET_TARGET_IF_SPECTRAL_OPS(spectral) \ + ((struct target_if_spectral_ops *)(&((spectral)->spectral_ops))) + +/** + * struct target_if_spectral_ops - spectral low level ops table + * @get_tsf64: Get 64 bit TSF value + * @get_capability: Get capability info + * @set_rxfilter: Set rx filter + * @get_rxfilter: Get rx filter + * @is_spectral_active: Check whether icm is active + * @is_spectral_enabled: Check whether spectral is enabled + * @start_spectral_scan: Start spectral scan + * @stop_spectral_scan: Stop spectral scan + * @get_extension_channel: Get extension channel + * @get_ctl_noisefloor: Get control noise floor + * @get_ext_noisefloor: Get extension noise floor + * @configure_spectral: Set spectral configurations + * @get_spectral_config: Get spectral configurations + * @get_ent_spectral_mask: Get spectral mask + * @get_mac_address: Get mac address + * @get_current_channel: Get current channel + * @reset_hw: Reset HW + * @get_chain_noise_floor: Get Channel noise floor + * @spectral_process_phyerr: Process phyerr event + * @process_spectral_report: Process spectral report + */ +struct target_if_spectral_ops { + uint64_t (*get_tsf64)(void *arg); + uint32_t (*get_capability)( + void *arg, enum spectral_capability_type type); + uint32_t (*set_rxfilter)(void *arg, int rxfilter); + uint32_t (*get_rxfilter)(void *arg); + uint32_t (*is_spectral_active)(void *arg); + uint32_t (*is_spectral_enabled)(void *arg); + uint32_t (*start_spectral_scan)(void *arg); + uint32_t (*stop_spectral_scan)(void *arg); + uint32_t (*get_extension_channel)(void *arg); + int8_t (*get_ctl_noisefloor)(void *arg); + int8_t (*get_ext_noisefloor)(void *arg); + uint32_t (*configure_spectral)( + void *arg, + struct spectral_config *params); + uint32_t (*get_spectral_config)( + void *arg, + struct spectral_config *params); + uint32_t (*get_ent_spectral_mask)(void *arg); + uint32_t (*get_mac_address)(void *arg, char *addr); + uint32_t (*get_current_channel)(void *arg); + uint32_t (*reset_hw)(void *arg); + uint32_t (*get_chain_noise_floor)(void *arg, int16_t *nf_buf); + int (*spectral_process_phyerr)(struct target_if_spectral *spectral, + uint8_t *data, uint32_t datalen, + struct target_if_spectral_rfqual_info *p_rfqual, + struct target_if_spectral_chan_info *p_chaninfo, + uint64_t tsf64, + struct target_if_spectral_acs_stats *acs_stats); + int (*process_spectral_report)(struct wlan_objmgr_pdev *pdev, + void *payload); +}; + +/** + * struct target_if_spectral_stats - spectral stats info + * @num_spectral_detects: Total num. of spectral detects + * @total_phy_errors: Total number of phyerrors + * @owl_phy_errors: Indicated phyerrors in old gen1 chipsets + * @pri_phy_errors: Phyerrors in primary channel + * @ext_phy_errors: Phyerrors in secondary channel + * @dc_phy_errors: Phyerrors due to dc + * @early_ext_phy_errors: Early secondary channel phyerrors + * @bwinfo_errors: Bandwidth info errors + * @datalen_discards: Invalid data length errors, seen in gen1 chipsets + * @rssi_discards bw: Indicates reports dropped due to RSSI threshold + * @last_reset_tstamp: Last reset time stamp + */ +struct target_if_spectral_stats { + uint32_t num_spectral_detects; + uint32_t total_phy_errors; + uint32_t owl_phy_errors; + uint32_t pri_phy_errors; + uint32_t ext_phy_errors; + uint32_t dc_phy_errors; + uint32_t early_ext_phy_errors; + uint32_t bwinfo_errors; + uint32_t datalen_discards; + uint32_t rssi_discards; + uint64_t last_reset_tstamp; +}; + +/** + * struct target_if_spectral_event - spectral event structure + * @se_ts: Original 15 bit recv timestamp + * @se_full_ts: 64-bit full timestamp from interrupt time + * @se_rssi: Rssi of spectral event + * @se_bwinfo: Rssi of spectral event + * @se_dur: Duration of spectral pulse + * @se_chanindex: Channel of event + * @se_list: List of spectral events + */ +struct target_if_spectral_event { + uint32_t se_ts; + uint64_t se_full_ts; + uint8_t se_rssi; + uint8_t se_bwinfo; + uint8_t se_dur; + uint8_t se_chanindex; + + STAILQ_ENTRY(spectral_event) se_list; +}; + +/** + * struct target_if_chain_noise_pwr_info - Noise power info for each channel + * @rptcount: Count of reports in pwr array + * @un_cal_nf: Uncalibrated noise floor + * @factory_cal_nf: Noise floor as calibrated at the factory for module + * @median_pwr: Median power (median of pwr array) + * @pwr: Power reports + */ +struct target_if_chain_noise_pwr_info { + int rptcount; + pwr_dbm un_cal_nf; + pwr_dbm factory_cal_nf; + pwr_dbm median_pwr; + pwr_dbm pwr[]; +} __ATTRIB_PACK; + +/** + * struct target_if_spectral_chan_stats - Channel information + * @cycle_count: Cycle count + * @channel_load: Channel load + * @per: Period + * @noisefloor: Noise floor + * @comp_usablity: Computed usability + * @maxregpower: Maximum allowed regulatary power + * @comp_usablity_sec80: Computed usability of secondary 80 Mhz + * @maxregpower_sec80: Max regulatory power in secondary 80 Mhz + */ +struct target_if_spectral_chan_stats { + int cycle_count; + int channel_load; + int per; + int noisefloor; + uint16_t comp_usablity; + int8_t maxregpower; + uint16_t comp_usablity_sec80; + int8_t maxregpower_sec80; +}; + +#if ATH_PERF_PWR_OFFLOAD + +/** + * struct target_if_spectral_cache - Cache used to minimize WMI operations + * in offload architecture + * @osc_spectral_enabled: Whether Spectral is enabled + * @osc_spectral_active: Whether spectral is active + * XXX: Ideally, we should NOT cache this + * since the hardware can self clear the bit, + * the firmware can possibly stop spectral due to + * intermittent off-channel activity, etc + * A WMI read command should be introduced to handle + * this This will be discussed. + * @osc_params: Spectral parameters + * @osc_is_valid: Whether the cache is valid + */ +struct target_if_spectral_cache { + uint8_t osc_spectral_enabled; + uint8_t osc_spectral_active; + struct spectral_config osc_params; + uint8_t osc_is_valid; +}; + +/** + * struct target_if_spectral_param_state_info - Structure used to represent and + * manage spectral information + * (parameters and states) + * @osps_lock: Lock to synchronize accesses to information + * @osps_cache: Cacheable' information + */ +struct target_if_spectral_param_state_info { + qdf_spinlock_t osps_lock; + struct target_if_spectral_cache osps_cache; + /* XXX - Non-cacheable information goes here, in the future */ +}; +#endif /* ATH_PERF_PWR_OFFLOAD */ + +struct vdev_spectral_configure_params; +struct vdev_spectral_enable_params; + +/** + * struct wmi_spectral_cmd_ops - structure used holding the operations + * related to wmi commands on spectral parameters. + * @wmi_spectral_configure_cmd_send: + * @wmi_spectral_enable_cmd_send: + */ +struct wmi_spectral_cmd_ops { + QDF_STATUS (*wmi_spectral_configure_cmd_send)( + void *wmi_hdl, + struct vdev_spectral_configure_params *param); + QDF_STATUS (*wmi_spectral_enable_cmd_send)( + void *wmi_hdl, + struct vdev_spectral_enable_params *param); +}; + +/** + * struct target_if_spectral - main spectral structure + * @pdev: Pointer to pdev + * @spectral_ops: Target if internal Spectral low level operations table + * @capability: Spectral capabilities structure + * @spectral_lock: Lock used for internal Spectral operations + * @spectral_curchan_radindex: Current channel spectral index + * @spectral_extchan_radindex: Extension channel spectral index + * @spectraldomain: Current Spectral domain + * @spectral_proc_phyerr: Flags to process for PHY errors + * @spectral_defaultparams: Default PHY params per Spectral stat + * @spectral_stats: Spectral related stats + * @events: Events structure + * @sc_spectral_ext_chan_ok: Can spectral be detected on the extension channel? + * @sc_spectral_combined_rssi_ok: Can use combined spectral RSSI? + * @sc_spectral_20_40_mode: Is AP in 20-40 mode? + * @sc_spectral_noise_pwr_cal: Noise power cal required? + * @sc_spectral_non_edma: Is the spectral capable device Non-EDMA? + * @upper_is_control: Upper segment is primary + * @upper_is_extension: Upper segment is secondary + * @lower_is_control: Lower segment is primary + * @lower_is_extension: Lower segment is secondary + * @sc_spectraltest_ieeechan: IEEE channel number to return to after a spectral + * mute test + * @spectral_numbins: Number of bins + * @spectral_fft_len: FFT length + * @spectral_data_len: Total phyerror report length + * @lb_edge_extrabins: Number of extra bins on left band edge + * @rb_edge_extrabins: Number of extra bins on right band edge + * @spectral_max_index_offset: Max FFT index offset (20 MHz mode) + * @spectral_upper_max_index_offset: Upper max FFT index offset (20/40 MHz mode) + * @spectral_lower_max_index_offset: Lower max FFT index offset (20/40 MHz mode) + * @spectral_dc_index: At which index DC is present + * @send_single_packet: Deprecated + * @spectral_sent_msg: Indicates whether we send report to upper layers + * @params: Spectral parameters + * @last_capture_time: Indicates timestamp of previouse report + * @num_spectral_data: Number of Spectral samples received in current session + * @total_spectral_data: Total number of Spectral samples received + * @max_rssi: Maximum RSSI + * @detects_control_channel: NA + * @detects_extension_channel: NA + * @detects_below_dc: NA + * @detects_above_dc: NA + * @sc_scanning: Indicates active wifi scan + * @sc_spectral_scan: Indicates active specral scan + * @sc_spectral_full_scan: Deprecated + * @scan_start_tstamp: Deprecated + * @last_tstamp: Deprecated + * @first_tstamp: Deprecated + * @spectral_samp_count: Deprecated + * @sc_spectral_samp_count: Deprecated + * @noise_pwr_reports_reqd: Number of noise power reports required + * @noise_pwr_reports_recv: Number of noise power reports received + * @noise_pwr_reports_lock: Lock used for Noise power report processing + * @noise_pwr_chain_ctl: Noise power report - control channel + * @noise_pwr_chain_ext: Noise power report - extension channel + * @chaninfo: Channel statistics + * @tsf64: Latest TSF Value + * @param_info: Offload architecture Spectral parameter cache information + * @ch_width: Indicates Channel Width 20/40/80/160 MHz with values 0, 1, 2, 3 + * respectively + * @diag_stats: Diagnostic statistics + * @is_160_format: Indicates whether information provided by HW is in altered + * format for 802.11ac 160/80+80 MHz support (QCA9984 onwards) + * @is_lb_edge_extrabins_format: Indicates whether information provided by + * HW has 4 extra bins, at left band edge, for report mode 2 + * @is_rb_edge_extrabins_format: Indicates whether information provided + * by HW has 4 extra bins, at right band edge, for report mode 2 + * @is_sec80_rssi_war_required: Indicates whether the software workaround is + * required to obtain approximate combined RSSI for secondary 80Mhz segment + * @simctx: Spectral Simulation context + * @spectral_gen: Spectral hardware generation + * @hdr_sig_exp: Expected signature in PHYERR TLV header, for the given hardware + * generation + * @tag_sscan_summary_exp: Expected Spectral Scan Summary tag in PHYERR TLV + * header, for the given hardware generation + * @tag_sscan_fft_exp: Expected Spectral Scan FFT report tag in PHYERR TLV + * header, for the given hardware generation + * @tlvhdr_size: Expected PHYERR TLV header size, for the given hardware + * generation + * @nl_cb: Netlink callbacks + * @use_nl_bcast: Whether to use Netlink broadcast/unicast + * @send_phy_data: Send data to the applicaton layer + * @inband_fftbin_size_adj: Whether to carry out FFT bin size adjustment for + * in-band report format. This would be required on some chipsets under the + * following circumstances: In report mode 2 only the in-band bins are DMA'ed. + * Scatter/gather is used. However, the HW generates all bins, not just in-band, + * and reports the number of bins accordingly. The subsystem arranging for the + * DMA cannot change this value. On such chipsets the adjustment required at the + * host driver is to check if report format is 2, and if so halve the number of + * bins reported to get the number actually DMA'ed. + */ +struct target_if_spectral { + struct wlan_objmgr_pdev *pdev_obj; + struct target_if_spectral_ops spectral_ops; + struct spectral_caps capability; + qdf_spinlock_t spectral_lock; + int16_t spectral_curchan_radindex; + int16_t spectral_extchan_radindex; + uint32_t spectraldomain; + uint32_t spectral_proc_phyerr; + struct spectral_config spectral_defaultparams; + struct target_if_spectral_stats spectral_stats; + struct target_if_spectral_event *events; + unsigned int sc_spectral_ext_chan_ok:1, + sc_spectral_combined_rssi_ok:1, + sc_spectral_20_40_mode:1, + sc_spectral_noise_pwr_cal:1, + sc_spectral_non_edma:1; + int upper_is_control; + int upper_is_extension; + int lower_is_control; + int lower_is_extension; + uint8_t sc_spectraltest_ieeechan; + int spectral_numbins; + int spectral_fft_len; + int spectral_data_len; + + /* + * For 11ac chipsets prior to AR900B version 2.0, a max of 512 bins are + * delivered. However, there can be additional bins reported for + * AR900B version 2.0 and QCA9984 as described next: + * + * AR900B version 2.0: An additional tone is processed on the right + * hand side in order to facilitate detection of radar pulses out to + * the extreme band-edge of the channel frequency. Since the HW design + * processes four tones at a time, this requires one additional Dword + * to be added to the search FFT report. + * + * QCA9984: When spectral_scan_rpt_mode = 2, i.e 2-dword summary + + * 1x-oversampled bins (in-band) per FFT, then 8 more bins + * (4 more on left side and 4 more on right side)are added. + */ + + int lb_edge_extrabins; + int rb_edge_extrabins; + int spectral_max_index_offset; + int spectral_upper_max_index_offset; + int spectral_lower_max_index_offset; + int spectral_dc_index; + int send_single_packet; + int spectral_sent_msg; + int classify_scan; + qdf_timer_t classify_timer; + struct spectral_config params; + struct spectral_classifier_params classifier_params; + int last_capture_time; + int num_spectral_data; + int total_spectral_data; + int max_rssi; + int detects_control_channel; + int detects_extension_channel; + int detects_below_dc; + int detects_above_dc; + int sc_scanning; + int sc_spectral_scan; + int sc_spectral_full_scan; + uint64_t scan_start_tstamp; + uint32_t last_tstamp; + uint32_t first_tstamp; + uint32_t spectral_samp_count; + uint32_t sc_spectral_samp_count; + int noise_pwr_reports_reqd; + int noise_pwr_reports_recv; + qdf_spinlock_t noise_pwr_reports_lock; + struct target_if_chain_noise_pwr_info + *noise_pwr_chain_ctl[HOST_MAX_ANTENNA]; + struct target_if_chain_noise_pwr_info + *noise_pwr_chain_ext[HOST_MAX_ANTENNA]; + uint64_t tsf64; +#if ATH_PERF_PWR_OFFLOAD + struct target_if_spectral_param_state_info param_info; +#endif + uint32_t ch_width; + struct spectral_diag_stats diag_stats; + bool is_160_format; + bool is_lb_edge_extrabins_format; + bool is_rb_edge_extrabins_format; + bool is_sec80_rssi_war_required; +#ifdef QCA_SUPPORT_SPECTRAL_SIMULATION + void *simctx; +#endif + enum spectral_gen spectral_gen; + uint8_t hdr_sig_exp; + uint8_t tag_sscan_summary_exp; + uint8_t tag_sscan_fft_exp; + uint8_t tlvhdr_size; + struct wmi_spectral_cmd_ops param_wmi_cmd_ops; + struct spectral_nl_cb nl_cb; + bool use_nl_bcast; + int (*send_phy_data)(struct wlan_objmgr_pdev *pdev); + u_int8_t fftbin_size_war; + u_int8_t inband_fftbin_size_adj; +}; + +/** + * struct target_if_samp_msg_params - Spectral Analysis Messaging Protocol + * data format + * @rssi: RSSI (except for secondary 80 segment) + * @rssi_sec80: RSSI for secondary 80 segment + * @lower_rssi: RSSI of lower band + * @upper_rssi: RSSI of upper band + * @chain_ctl_rssi: RSSI for control channel, for all antennas + * @chain_ext_rssi: RSSI for extension channel, for all antennas + * @bwinfo: bandwidth info + * @data_len: length of FFT data (except for secondary 80 segment) + * @data_len_sec80: length of FFT data for secondary 80 segment + * @tstamp: timestamp + * @last_tstamp: last time stamp + * @max_mag: maximum magnitude (except for secondary 80 segment) + * @max_mag_sec80: maximum magnitude for secondary 80 segment + * @max_index: index of max magnitude (except for secondary 80 segment) + * @max_index_sec80: index of max magnitude for secondary 80 segment + * @max_exp: max exp + * @peak: peak frequency (obsolete) + * @pwr_count: number of FFT bins (except for secondary 80 segment) + * @pwr_count_sec80: number of FFT bins in secondary 80 segment + * @nb_lower: This is deprecated + * @nb_upper: This is deprecated + * @max_upper_index: index of max mag in upper band + * @max_lower_index: index of max mag in lower band + * @bin_pwr_data: Contains FFT magnitudes (except for secondary 80 segment) + * @bin_pwr_data_sec80: Contains FFT magnitudes for the secondary 80 segment + * @freq: Center frequency of primary 20MHz channel in MHz + * @vhtop_ch_freq_seg1: VHT operation first segment center frequency in MHz + * @vhtop_ch_freq_seg2: VHT operation second segment center frequency in MHz + * @freq_loading: spectral control duty cycles + * @noise_floor: current noise floor (except for secondary 80 segment) + * @noise_floor_sec80: current noise floor for secondary 80 segment + * @interf_list: List of interfernce sources + * @classifier_params: classifier parameters + * @sc: classifier parameters + */ +struct target_if_samp_msg_params { + int8_t rssi; + int8_t rssi_sec80; + int8_t lower_rssi; + int8_t upper_rssi; + int8_t chain_ctl_rssi[HOST_MAX_ANTENNA]; + int8_t chain_ext_rssi[HOST_MAX_ANTENNA]; + uint16_t bwinfo; + uint16_t datalen; + uint16_t datalen_sec80; + uint32_t tstamp; + uint32_t last_tstamp; + uint16_t max_mag; + uint16_t max_mag_sec80; + uint16_t max_index; + uint16_t max_index_sec80; + uint8_t max_exp; + int peak; + int pwr_count; + int pwr_count_sec80; + int8_t nb_lower; + int8_t nb_upper; + uint16_t max_lower_index; + uint16_t max_upper_index; + uint8_t *bin_pwr_data; + uint8_t *bin_pwr_data_sec80; + uint16_t freq; + uint16_t vhtop_ch_freq_seg1; + uint16_t vhtop_ch_freq_seg2; + uint16_t freq_loading; + int16_t noise_floor; + int16_t noise_floor_sec80; + struct interf_src_rsp interf_list; + struct spectral_classifier_params classifier_params; + struct ath_softc *sc; +}; + +#ifdef WLAN_CONV_SPECTRAL_ENABLE +/** + * target_if_spectral_dump_fft() - Dump Spectral FFT + * @pfft: Pointer to Spectral Phyerr FFT + * @fftlen: FFT length + * + * Return: Success or failure + */ +int target_if_spectral_dump_fft(uint8_t *pfft, int fftlen); + +/** + * target_if_dbg_print_samp_param() - Print contents of SAMP struct + * @p: Pointer to SAMP message + * + * Return: Void + */ +void target_if_dbg_print_samp_param(struct target_if_samp_msg_params *p); + +/** + * target_if_get_offset_swar_sec80() - Get offset for SWAR according to + * the channel width + * @channel_width: Channel width + * + * Return: Offset for SWAR + */ +uint32_t target_if_get_offset_swar_sec80(uint32_t channel_width); + +/** + * target_if_sptrl_register_tx_ops() - Register Spectral target_if Tx Ops + * @tx_ops: Tx Ops + * + * Return: void + */ +void target_if_sptrl_register_tx_ops(struct wlan_lmac_if_tx_ops *tx_ops); + +/** + * target_if_spectral_create_samp_msg() - Create the spectral samp message + * @spectral : Pointer to spectral internal structure + * @params : spectral samp message parameters + * + * API to create the spectral samp message + * + * Return: void + */ +void target_if_spectral_create_samp_msg( + struct target_if_spectral *spectral, + struct target_if_samp_msg_params *params); + +/** + * target_if_spectral_process_phyerr_gen3() - Process phyerror event for gen3 + * @pdev: Pointer to pdev object + * @payload: Pointer to spectral report + * + * Process phyerror event for gen3 + * + * Return: Success/Failure + */ +int target_if_spectral_process_report_gen3( + struct wlan_objmgr_pdev *pdev, + void *buf); + +/** + * target_if_process_phyerr_gen2() - Process PHY Error for gen2 + * @spectral: Pointer to Spectral object + * @data: Pointer to phyerror event buffer + * @datalen: Data length + * @p_rfqual: RF quality info + * @p_chaninfo: Channel info + * @tsf64: 64 bit tsf timestamp + * @acs_stats: ACS stats + * + * Process PHY Error for gen2 + * + * Return: Success/Failure + */ +int target_if_process_phyerr_gen2( + struct target_if_spectral *spectral, + uint8_t *data, + uint32_t datalen, struct target_if_spectral_rfqual_info *p_rfqual, + struct target_if_spectral_chan_info *p_chaninfo, + uint64_t tsf64, + struct target_if_spectral_acs_stats *acs_stats); + +/** + * target_if_spectral_send_intf_found_msg() - Indicate to application layer that + * interference has been found + * @pdev: Pointer to pdev + * @cw_int: 1 if CW interference is found, 0 if WLAN interference is found + * @dcs_enabled: 1 if DCS is enabled, 0 if DCS is disabled + * + * Send message to application layer + * indicating that interference has been found + * + * Return: None + */ +void target_if_spectral_send_intf_found_msg( + struct wlan_objmgr_pdev *pdev, + uint16_t cw_int, uint32_t dcs_enabled); + +/** + * target_if_stop_spectral_scan() - Stop spectral scan + * @pdev: Pointer to pdev object + * + * API to stop the current on-going spectral scan + * + * Return: None + */ +void target_if_stop_spectral_scan(struct wlan_objmgr_pdev *pdev); + +/** + * target_if_spectral_get_vdev() - Get pointer to vdev to be used for Spectral + * operations + * @spectral: Pointer to Spectral target_if internal private data + * + * Spectral operates on pdev. However, in order to retrieve some WLAN + * properties, a vdev is required. To facilitate this, the function returns the + * first vdev in our pdev. The caller should release the reference to the vdev + * once it is done using it. + * TODO: If the framework later provides an API to obtain the first active + * vdev, then it would be preferable to use this API. + * + * Return: Pointer to vdev on success, NULL on failure + */ +struct wlan_objmgr_vdev *target_if_spectral_get_vdev( + struct target_if_spectral *spectral); + +/** + * target_if_spectral_dump_hdr_gen2() - Dump Spectral header for gen2 + * @phdr: Pointer to Spectral Phyerr Header + * + * Dump Spectral header + * + * Return: Success/Failure + */ +int target_if_spectral_dump_hdr_gen2(struct spectral_phyerr_hdr_gen2 *phdr); + +/** + * target_if_get_combrssi_sec80_seg_gen2() - Get approximate combined RSSI + * for Secondary 80 segment + * @spectral: Pointer to spectral object + * @p_sfft_sec80: Pointer to search fft info of secondary 80 segment + * + * Get approximate combined RSSI for Secondary 80 segment + * + * Return: Combined RSSI for secondary 80Mhz segment + */ +int8_t target_if_get_combrssi_sec80_seg_gen2( + struct target_if_spectral *spectral, + struct spectral_search_fft_info_gen2 *p_sfft_sec80); + +/** + * target_if_spectral_dump_tlv_gen2() - Dump Spectral TLV for gen2 + * @ptlv: Pointer to Spectral Phyerr TLV + * @is_160_format: Indicates 160 format + * + * Dump Spectral TLV for gen2 + * + * Return: Success/Failure + */ +int target_if_spectral_dump_tlv_gen2( + struct spectral_phyerr_tlv_gen2 *ptlv, bool is_160_format); + +/** + * target_if_spectral_dump_phyerr_data_gen2() - Dump Spectral + * related PHY Error for gen2 + * @data: Pointer to phyerror buffer + * @datalen: Data length + * @is_160_format: Indicates 160 format + * + * Dump Spectral related PHY Error for gen2 + * + * Return: Success/Failure + */ +int target_if_spectral_dump_phyerr_data_gen2( + uint8_t *data, + uint32_t datalen, + bool is_160_format); + +/** + * target_if_dump_fft_report_gen3() - Dump FFT Report for gen3 + * @spectral: Pointer to Spectral object + * @p_fft_report: Pointer to fft report + * @p_sfft: Pointer to search fft report + * + * Dump FFT Report for gen3 + * + * Return: Success/Failure + */ +int target_if_dump_fft_report_gen3(struct target_if_spectral *spectral, + struct spectral_phyerr_fft_report_gen3 *p_fft_report, + struct spectral_search_fft_info_gen3 *p_sfft); + +/** + * target_if_dbg_print_samp_msg() - Print contents of SAMP Message + * @p: Pointer to SAMP message + * + * Print contents of SAMP Message + * + * Return: Void + */ +void target_if_dbg_print_samp_msg(struct spectral_samp_msg *pmsg); + +/** + * target_if_process_sfft_report_gen3() - Process Search FFT Report for gen3 + * @p_fft_report: Pointer to fft report + * @p_sfft: Pointer to search fft report + * + * Process Search FFT Report for gen3 + * + * Return: Success/Failure + */ +int target_if_process_sfft_report_gen3( + struct spectral_phyerr_fft_report_gen3 *p_fft_report, + struct spectral_search_fft_info_gen3 *p_fft_info); + +/** + * get_target_if_spectral_handle_from_pdev() - Get handle to target_if internal + * Spectral data + * @pdev: Pointer to pdev + * + * Return: Handle to target_if internal Spectral data on success, NULL on + * failure + */ +static inline +struct target_if_spectral *get_target_if_spectral_handle_from_pdev( + struct wlan_objmgr_pdev *pdev) +{ + struct target_if_spectral *spectral = NULL; + struct wlan_objmgr_psoc *psoc = NULL; + + psoc = wlan_pdev_get_psoc(pdev); + + spectral = (struct target_if_spectral *) + psoc->soc_cb.rx_ops.sptrl_rx_ops.sptrlro_get_target_handle( + pdev); + return spectral; +} + +/** + * target_if_vdev_get_chan_freq() - Get the operating channel frequency of a + * given vdev + * @pdev: Pointer to vdev + * + * Get the operating channel frequency of a given vdev + * + * Return: Operating channel frequency of a vdev + */ +static inline +int16_t target_if_vdev_get_chan_freq(struct wlan_objmgr_vdev *vdev) +{ + struct wlan_objmgr_psoc *psoc = NULL; + + psoc = wlan_vdev_get_psoc(vdev); + if (!psoc) { + spectral_err("psoc is NULL"); + return -EINVAL; + } + + return psoc->soc_cb.rx_ops.sptrl_rx_ops.sptrlro_vdev_get_chan_freq( + vdev); +} + +/** + * target_if_vdev_get_ch_width() - Get the operating channel bandwidth of a + * given vdev + * @pdev: Pointer to vdev + * + * Get the operating channel bandwidth of a given vdev + * + * Return: channel bandwidth enumeration corresponding to the vdev + */ +static inline +enum phy_ch_width target_if_vdev_get_ch_width(struct wlan_objmgr_vdev *vdev) +{ + struct wlan_objmgr_psoc *psoc = NULL; + + psoc = wlan_vdev_get_psoc(vdev); + if (!psoc) { + spectral_err("psoc is NULL"); + return CH_WIDTH_INVALID; + } + + return psoc->soc_cb.rx_ops.sptrl_rx_ops.sptrlro_vdev_get_ch_width( + vdev); +} + +/** + * target_if_vdev_get_sec20chan_freq_mhz() - Get the frequency of secondary + * 20 MHz channel for a given vdev + * @pdev: Pointer to vdev + * + * Get the frequency of secondary 20Mhz channel for a given vdev + * + * Return: Frequency of secondary 20Mhz channel for a given vdev + */ +static inline +int target_if_vdev_get_sec20chan_freq_mhz( + struct wlan_objmgr_vdev *vdev, + uint16_t *sec20chan_freq) +{ + struct wlan_objmgr_psoc *psoc = NULL; + + psoc = wlan_vdev_get_psoc(vdev); + if (!psoc) { + spectral_err("psoc is NULL"); + return -EINVAL; + } + + return psoc->soc_cb.rx_ops.sptrl_rx_ops. + sptrlro_vdev_get_sec20chan_freq_mhz(vdev, sec20chan_freq); +} + +/** + * target_if_spectral_set_rxchainmask() - Set Spectral Rx chainmask + * @pdev: Pointer to pdev + * @spectral_rx_chainmask: Spectral Rx chainmask + * + * Return: None + */ +static inline +void target_if_spectral_set_rxchainmask(struct wlan_objmgr_pdev *pdev, + uint8_t spectral_rx_chainmask) +{ + struct target_if_spectral *spectral = NULL; + + spectral = get_target_if_spectral_handle_from_pdev(pdev); + spectral->params.ss_chn_mask = spectral_rx_chainmask; +} + +/** + * target_if_spectral_process_phyerr() - Process Spectral PHY error + * @pdev: Pointer to pdev + * @data: PHY error data received from FW + * @datalen: Length of data + * @p_rfqual: Pointer to RF Quality information + * @p_chaninfo: Pointer to channel information + * @tsf: TSF time instance at which the Spectral sample was received + * @acs_stats: ACS stats + * + * Process Spectral PHY error by extracting necessary information from the data + * sent by FW, and send the extracted information to application layer. + * + * Return: None + */ +static inline +void target_if_spectral_process_phyerr( + struct wlan_objmgr_pdev *pdev, + uint8_t *data, uint32_t datalen, + struct target_if_spectral_rfqual_info *p_rfqual, + struct target_if_spectral_chan_info *p_chaninfo, + uint64_t tsf64, + struct target_if_spectral_acs_stats *acs_stats) +{ + struct target_if_spectral *spectral = NULL; + struct target_if_spectral_ops *p_sops = NULL; + + spectral = get_target_if_spectral_handle_from_pdev(pdev); + p_sops = GET_TARGET_IF_SPECTRAL_OPS(spectral); + p_sops->spectral_process_phyerr(spectral, data, datalen, + p_rfqual, p_chaninfo, + tsf64, acs_stats); +} + +/** + * target_if_sops_is_spectral_enabled() - Get whether Spectral is enabled + * @arg: Pointer to handle for Spectral target_if internal private data + * + * Function to check whether Spectral is enabled + * + * Return: True if Spectral is enabled, false if Spectral is not enabled + */ +uint32_t target_if_sops_is_spectral_enabled(void *arg); + +/** + * target_if_sops_is_spectral_active() - Get whether Spectral is active + * @arg: Pointer to handle for Spectral target_if internal private data + * + * Function to check whether Spectral is active + * + * Return: True if Spectral is active, false if Spectral is not active + */ +uint32_t target_if_sops_is_spectral_active(void *arg); + +/** + * target_if_sops_start_spectral_scan() - Start Spectral scan + * @arg: Pointer to handle for Spectral target_if internal private data + * + * Function to start spectral scan + * + * Return: 0 on success else failure + */ +uint32_t target_if_sops_start_spectral_scan(void *arg); + +/** + * target_if_sops_stop_spectral_scan() - Stop Spectral scan + * @arg: Pointer to handle for Spectral target_if internal private data + * + * Function to stop spectral scan + * + * Return: 0 in case of success, -1 on failure + */ +uint32_t target_if_sops_stop_spectral_scan(void *arg); + +/** + * target_if_spectral_get_extension_channel() - Get the current Extension + * channel (in MHz) + * @arg: Pointer to handle for Spectral target_if internal private data + * + * Return: Current Extension channel (in MHz) on success, 0 on failure or if + * extension channel is not present. + */ +uint32_t target_if_spectral_get_extension_channel(void *arg); + +/** + * target_if_spectral_get_current_channel() - Get the current channel (in MHz) + * @arg: Pointer to handle for Spectral target_if internal private data + * + * Return: Current channel (in MHz) on success, 0 on failure + */ +uint32_t target_if_spectral_get_current_channel(void *arg); + + +/** + * target_if_spectral_reset_hw() - Reset the hardware + * @arg: Pointer to handle for Spectral target_if internal private data + * + * This is only a placeholder since it is not currently required in the offload + * case. + * + * Return: 0 + */ +uint32_t target_if_spectral_reset_hw(void *arg); + +/** + * target_if_spectral_get_chain_noise_floor() - Get the Chain noise floor from + * Noisefloor history buffer + * @arg: Pointer to handle for Spectral target_if internal private data + * @nf_buf: Pointer to buffer into which chain Noise Floor data should be copied + * + * This is only a placeholder since it is not currently required in the offload + * case. + * + * Return: 0 + */ +uint32_t target_if_spectral_get_chain_noise_floor(void *arg, int16_t *nf_buf); + +/** + * target_if_spectral_get_ext_noisefloor() - Get the extension channel + * noisefloor + * @arg: Pointer to handle for Spectral target_if internal private data + * + * This is only a placeholder since it is not currently required in the offload + * case. + * + * Return: 0 + */ +int8_t target_if_spectral_get_ext_noisefloor(void *arg); + +/** + * target_if_spectral_get_ctl_noisefloor() - Get the control channel noisefloor + * @arg: Pointer to handle for Spectral target_if internal private data + * + * This is only a placeholder since it is not currently required in the offload + * case. + * + * Return: 0 + */ +int8_t target_if_spectral_get_ctl_noisefloor(void *arg); + +/** + * target_if_spectral_get_capability() - Get whether a given Spectral hardware + * capability is available + * @arg: Pointer to handle for Spectral target_if internal private data + * @type: Spectral hardware capability type + * + * Return: True if the capability is available, false if the capability is not + * available + */ +uint32_t target_if_spectral_get_capability( + void *arg, enum spectral_capability_type type); + +/** + * target_if_spectral_set_rxfilter() - Set the RX Filter before Spectral start + * @arg: Pointer to handle for Spectral target_if internal private data + * @rxfilter: Rx filter to be used + * + * Note: This is only a placeholder function. It is not currently required since + * FW should be taking care of setting the required filters. + * + * Return: 0 + */ +uint32_t target_if_spectral_set_rxfilter(void *arg, int rxfilter); + +/** + * target_if_spectral_sops_configure_params() - Configure user supplied Spectral + * parameters + * @arg: Pointer to handle for Spectral target_if internal private data + * @params: Spectral parameters + * + * Return: 0 in case of success, -1 on failure + */ +uint32_t target_if_spectral_sops_configure_params( + void *arg, struct spectral_config *params); + +/** + * target_if_spectral_get_rxfilter() - Get the current RX Filter settings + * @arg: Pointer to handle for Spectral target_if internal private data + * + * Note: This is only a placeholder function. It is not currently required since + * FW should be taking care of setting the required filters. + * + * Return: 0 + */ +uint32_t target_if_spectral_get_rxfilter(void *arg); + +/** + * target_if_pdev_spectral_deinit() - De-initialize target_if Spectral + * functionality for the given pdev + * @pdev: Pointer to pdev object + * + * Return: None + */ +void target_if_pdev_spectral_deinit(struct wlan_objmgr_pdev *pdev); + +/** + * target_if_set_spectral_config() - Set spectral config + * @pdev: Pointer to pdev object + * @threshtype: config type + * @value: config value + * + * API to set spectral configurations + * + * Return: 0 in case of success, -1 on failure + */ +int target_if_set_spectral_config(struct wlan_objmgr_pdev *pdev, + const uint32_t threshtype, + const uint32_t value); + +/** + * target_if_pdev_spectral_init() - Initialize target_if Spectral + * functionality for the given pdev + * @pdev: Pointer to pdev object + * + * Return: On success, pointer to Spectral target_if internal private data, on + * failure, NULL + */ +void *target_if_pdev_spectral_init(struct wlan_objmgr_pdev *pdev); + +/** + * target_if_spectral_sops_get_params() - Get user configured Spectral + * parameters + * @arg: Pointer to handle for Spectral target_if internal private data + * @params: Pointer to buffer into which Spectral parameters should be copied + * + * Return: 0 in case of success, -1 on failure + */ +uint32_t target_if_spectral_sops_get_params( + void *arg, struct spectral_config *params); + +/** + * target_if_init_spectral_capability() - Initialize Spectral capability + * @spectral: Pointer to Spectral target_if internal private data + * + * This is a workaround. + * + * Return: None + */ +void target_if_init_spectral_capability(struct target_if_spectral *spectral); + +/** + * target_if_start_spectral_scan() - Start spectral scan + * @pdev: Pointer to pdev object + * + * API to start spectral scan + * + * Return: 0 in case of success, -1 on failure + */ +int target_if_start_spectral_scan(struct wlan_objmgr_pdev *pdev); + +/** + * target_if_get_spectral_config() - Get spectral configuration + * @pdev: Pointer to pdev object + * @param: Pointer to spectral_config structure in which the configuration + * should be returned + * + * API to get the current spectral configuration + * + * Return: 0 in case of success, -1 on failure + */ +void target_if_get_spectral_config(struct wlan_objmgr_pdev *pdev, + struct spectral_config *param); + +/** + * target_if_spectral_scan_enable_params() - Enable use of desired Spectral + * parameters + * @spectral: Pointer to Spectral target_if internal private data + * @spectral_params: Pointer to Spectral parameters + * + * Enable use of desired Spectral parameters by configuring them into HW, and + * starting Spectral scan + * + * Return: 0 on success, 1 on failure + */ +int target_if_spectral_scan_enable_params( + struct target_if_spectral *spectral, struct spectral_config *spectral_params); + +/** + * target_if_is_spectral_active() - Get whether Spectral is active + * @pdev: Pointer to pdev object + * + * Return: True if Spectral is active, false if Spectral is not active + */ +bool target_if_is_spectral_active(struct wlan_objmgr_pdev *pdev); + +/** + * target_if_is_spectral_enabled() - Get whether Spectral is enabled + * @pdev: Pointer to pdev object + * + * Return: True if Spectral is enabled, false if Spectral is not enabled + */ +bool target_if_is_spectral_enabled(struct wlan_objmgr_pdev *pdev); + +/** + * target_if_set_debug_level() - Set debug level for Spectral + * @pdev: Pointer to pdev object + * @debug_level: Debug level + * + * Return: 0 in case of success + */ +int target_if_set_debug_level(struct wlan_objmgr_pdev *pdev, + uint32_t debug_level); + +/** + * target_if_get_debug_level() - Get debug level for Spectral + * @pdev: Pointer to pdev object + * + * Return: Current debug level + */ +uint32_t target_if_get_debug_level(struct wlan_objmgr_pdev *pdev); + + +/** + * target_if_get_spectral_capinfo() - Get Spectral capability information + * @pdev: Pointer to pdev object + * @outdata: Buffer into which data should be copied + * + * Return: void + */ +void target_if_get_spectral_capinfo( + struct wlan_objmgr_pdev *pdev, + void *outdata); + + +/** + * target_if_get_spectral_diagstats() - Get Spectral diagnostic statistics + * @pdev: Pointer to pdev object + * @outdata: Buffer into which data should be copied + * + * Return: void + */ +void target_if_get_spectral_diagstats(struct wlan_objmgr_pdev *pdev, + void *outdata); + +/* + * target_if_spectral_send_tlv_to_host - target_if_spectral_send_tlv_to_host + * @spectral: Send the TLV information to Host + * @data: Pointer to the TLV + * @datalen: tlv length + * + * Return: Success/Failure + * + */ +int target_if_spectral_send_tlv_to_host( + struct target_if_spectral *spectral, + uint8_t *data, uint32_t datalen); + +void target_if_register_wmi_spectral_cmd_ops( + struct wlan_objmgr_pdev *pdev, + struct wmi_spectral_cmd_ops *cmd_ops); + +#ifdef DIRECT_BUF_RX_ENABLE +/** + * target_if_consume_sfft_report_gen3() - Process fft report for gen3 + * @spectral: Pointer to spectral object + * @report: Pointer to spectral report + * + * Process fft report for gen3 + * + * Return: Success/Failure + */ +int +target_if_consume_spectral_report_gen3( + struct target_if_spectral *spectral, + struct spectral_report *report); +#endif + +#ifdef WIN32 +#pragma pack(pop, target_if_spectral) +#endif +#ifdef __ATTRIB_PACK +#undef __ATTRIB_PACK +#endif + +#endif /* WLAN_CONV_SPECTRAL_ENABLE */ +#endif /* _TARGET_IF_SPECTRAL_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/spectral/target_if_spectral_netlink.c b/drivers/staging/qca-wifi-host-cmn/target_if/spectral/target_if_spectral_netlink.c new file mode 100644 index 0000000000000000000000000000000000000000..e8bed9457a96c12e3ad7aea959c33650932b746e --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/target_if/spectral/target_if_spectral_netlink.c @@ -0,0 +1,259 @@ +/* + * Copyright (c) 2011,2017-2018 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static void +target_if_spectral_process_noise_pwr_report( + struct target_if_spectral *spectral, + const struct spectral_samp_msg *spec_samp_msg) +{ + int i, done; + + qdf_spin_lock(&spectral->noise_pwr_reports_lock); + + if (!spectral->noise_pwr_reports_reqd) { + qdf_spin_unlock(&spectral->noise_pwr_reports_lock); + return; + } + + if (spectral->noise_pwr_reports_recv < + spectral->noise_pwr_reports_reqd) { + spectral->noise_pwr_reports_recv++; + + for (i = 0; i < HOST_MAX_ANTENNA; i++) { + uint32_t index; + + if (spectral->noise_pwr_chain_ctl[i]) { + index = + spectral->noise_pwr_chain_ctl[i]-> + rptcount++; + spectral->noise_pwr_chain_ctl[i]->pwr[index] = + spec_samp_msg->samp_data. + spectral_chain_ctl_rssi[i]; + } + if (spectral->noise_pwr_chain_ext[i]) { + index = + spectral->noise_pwr_chain_ext[i]-> + rptcount++; + spectral->noise_pwr_chain_ext[i]->pwr[index] = + spec_samp_msg->samp_data. + spectral_chain_ext_rssi[i]; + } + } + } + + done = (spectral->noise_pwr_reports_recv >= + spectral->noise_pwr_reports_reqd); + + qdf_spin_unlock(&spectral->noise_pwr_reports_lock); + + if (done) { + qdf_spin_lock(&spectral->spectral_lock); + target_if_stop_spectral_scan(spectral->pdev_obj); + spectral->sc_spectral_scan = 0; + qdf_spin_unlock(&spectral->spectral_lock); + } +} + +/* + * Function : spectral_create_samp_msg + * Description : create SAMP message and send it host + * Input : + * Output : + * + */ + +#ifdef SPECTRAL_CLASSIFIER_IN_KERNEL +static void +target_if_spectral_init_interf_list( + struct spectral_samp_data *data, + struct target_if_samp_msg_params *params) +{ + if (params->interf_list.count) + OS_MEMCPY(&data->interf_list, + ¶ms->interf_list, sizeof(struct interf_src_rsp)); + else + data->interf_list.count = 0; +} +#else +static void +target_if_spectral_init_interf_list( + struct spectral_samp_data *data, + struct target_if_samp_msg_params *params) +{ + data->interf_list.count = 0; +} +#endif + +void +target_if_spectral_create_samp_msg(struct target_if_spectral *spectral, + struct target_if_samp_msg_params *params) +{ + /* + * XXX : Non-Rentrant. Will be an issue with dual concurrent + * operation on multi-processor system + */ + + int temp_samp_msg_len = 0; + + struct spectral_samp_msg *spec_samp_msg; + + struct spectral_samp_data *data = NULL; + uint8_t *bin_pwr_data = NULL; + struct spectral_classifier_params *cp = NULL; + struct spectral_classifier_params *pcp = NULL; + struct target_if_spectral_ops *p_sops = NULL; + uint32_t *binptr = NULL; + int idx = 0; + + static int samp_msg_index; + + spec_samp_msg = (struct spectral_samp_msg *)spectral->nl_cb.get_nbuff( + spectral->pdev_obj); + if (!spec_samp_msg) + return; + + p_sops = GET_TARGET_IF_SPECTRAL_OPS(spectral); + temp_samp_msg_len = sizeof(struct spectral_samp_msg) - + (MAX_NUM_BINS * sizeof(uint8_t)); + temp_samp_msg_len += (params->pwr_count * sizeof(uint8_t)); + if (spectral->ch_width == CH_WIDTH_160MHZ) + temp_samp_msg_len += + (params->pwr_count_sec80 * sizeof(uint8_t)); + bin_pwr_data = params->bin_pwr_data; + + data = &spec_samp_msg->samp_data; + + spec_samp_msg->signature = SPECTRAL_SIGNATURE; + spec_samp_msg->freq = params->freq; + spec_samp_msg->freq_loading = params->freq_loading; + spec_samp_msg->samp_data.spectral_data_len = params->datalen; + spec_samp_msg->samp_data.spectral_rssi = params->rssi; + spec_samp_msg->samp_data.ch_width = spectral->ch_width; + + spec_samp_msg->samp_data.spectral_combined_rssi = + (uint8_t)params->rssi; + spec_samp_msg->samp_data.spectral_upper_rssi = params->upper_rssi; + spec_samp_msg->samp_data.spectral_lower_rssi = params->lower_rssi; + + OS_MEMCPY(spec_samp_msg->samp_data.spectral_chain_ctl_rssi, + params->chain_ctl_rssi, sizeof(params->chain_ctl_rssi)); + OS_MEMCPY(spec_samp_msg->samp_data.spectral_chain_ext_rssi, + params->chain_ext_rssi, sizeof(params->chain_ext_rssi)); + + spec_samp_msg->samp_data.spectral_bwinfo = params->bwinfo; + spec_samp_msg->samp_data.spectral_tstamp = params->tstamp; + spec_samp_msg->samp_data.spectral_max_index = params->max_index; + + /* Classifier in user space needs access to these */ + spec_samp_msg->samp_data.spectral_lower_max_index = + params->max_lower_index; + spec_samp_msg->samp_data.spectral_upper_max_index = + params->max_upper_index; + spec_samp_msg->samp_data.spectral_nb_lower = params->nb_lower; + spec_samp_msg->samp_data.spectral_nb_upper = params->nb_upper; + spec_samp_msg->samp_data.spectral_last_tstamp = params->last_tstamp; + spec_samp_msg->samp_data.spectral_max_mag = params->max_mag; + spec_samp_msg->samp_data.bin_pwr_count = params->pwr_count; + spec_samp_msg->samp_data.lb_edge_extrabins = + spectral->lb_edge_extrabins; + spec_samp_msg->samp_data.rb_edge_extrabins = + spectral->rb_edge_extrabins; + spec_samp_msg->samp_data.spectral_combined_rssi = params->rssi; + spec_samp_msg->samp_data.spectral_max_scale = params->max_exp; + + /* + * This is a dirty hack to get the Windows build pass. + * Currently Windows and Linux builds source spectral_data.h + * form two different place. The windows version do not + * have noise_floor member in it. + * + * As a temp workaround this variable is set under the + * SPECTRAL_USE_NETLINK_SOCKETS as this is called only + * under the linux build and this saves the day + * + * The plan to sync of header files in under the way + * + */ + + spec_samp_msg->samp_data.noise_floor = params->noise_floor; + + /* Classifier in user space needs access to these */ + cp = &spec_samp_msg->samp_data.classifier_params; + pcp = ¶ms->classifier_params; + + OS_MEMCPY(cp, pcp, sizeof(struct spectral_classifier_params)); + + if (spectral->fftbin_size_war) { + binptr = (uint32_t *)bin_pwr_data; + for (idx = 0; idx < params->pwr_count; idx++) + data->bin_pwr[idx] = *(binptr++); + } else { + SPECTRAL_MESSAGE_COPY_CHAR_ARRAY(&data->bin_pwr[0], + bin_pwr_data, params->pwr_count); + } + + spec_samp_msg->vhtop_ch_freq_seg1 = params->vhtop_ch_freq_seg1; + spec_samp_msg->vhtop_ch_freq_seg2 = params->vhtop_ch_freq_seg2; + + if (spectral->ch_width == CH_WIDTH_160MHZ) { + spec_samp_msg->samp_data.spectral_rssi_sec80 = + params->rssi_sec80; + spec_samp_msg->samp_data.noise_floor_sec80 = + params->noise_floor_sec80; + + spec_samp_msg->samp_data.spectral_data_len_sec80 = + params->datalen_sec80; + spec_samp_msg->samp_data.spectral_max_index_sec80 = + params->max_index_sec80; + spec_samp_msg->samp_data.spectral_max_mag_sec80 = + params->max_mag_sec80; + spec_samp_msg->samp_data.bin_pwr_count_sec80 = + params->pwr_count_sec80; + SPECTRAL_MESSAGE_COPY_CHAR_ARRAY(&data->bin_pwr_sec80[0], + (params->bin_pwr_data_sec80), + params->pwr_count_sec80); + + /* + * Note: REVERSE_ORDER is not a known use case for + * secondary 80 data at this point. + */ + } + + target_if_spectral_init_interf_list(data, params); + p_sops->get_mac_address(spectral, spec_samp_msg->macaddr); + + if (spectral->sc_spectral_noise_pwr_cal) + target_if_spectral_process_noise_pwr_report( + spectral, spec_samp_msg); + + if (spectral->send_phy_data(spectral->pdev_obj) == 0) + spectral->spectral_sent_msg++; + + samp_msg_index++; +} diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/spectral/target_if_spectral_phyerr.c b/drivers/staging/qca-wifi-host-cmn/target_if/spectral/target_if_spectral_phyerr.c new file mode 100644 index 0000000000000000000000000000000000000000..9f6c5276b553c3456ba89047528c0d55ae6c755f --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/target_if/spectral/target_if_spectral_phyerr.c @@ -0,0 +1,1628 @@ +/* + * Copyright (c) 2011,2017-2018 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef CONFIG_WIN +#include +#endif /*CONFIG_WIN*/ +#include +#include +extern int spectral_debug_level; + +#ifdef WLAN_CONV_SPECTRAL_ENABLE + +/** + * target_if_print_buf() - Prints given buffer for given length + * @pbuf: Pointer to buffer + * @len: length + * + * Prints given buffer for given length + * + * Return: void + */ +static void +target_if_print_buf(uint8_t *pbuf, int len) +{ + int i = 0; + + for (i = 0; i < len; i++) { + spectral_debug("%02X ", pbuf[i]); + if (i % 32 == 31) + spectral_debug("\n"); + } +} + +int +target_if_spectral_dump_fft(uint8_t *pfft, int fftlen) +{ + int i = 0; + + /* + * TODO : Do not delete the following print + * The scripts used to validate Spectral depend on this Print + */ + spectral_debug("SPECTRAL : FFT Length is 0x%x (%d)", fftlen, fftlen); + + spectral_debug("fft_data # "); + for (i = 0; i < fftlen; i++) + spectral_debug("%d ", pfft[i]); + spectral_debug("\n"); + return 0; +} + +/** + * target_if_spectral_send_tlv_to_host() - Send the TLV information to Host + * @spectral: Pointer to target_if spectral object + * @data: Pointer to the TLV + * @datalen: data length + * + * Send the TLV information to Host + * + * Return: Success or failure + */ +int +target_if_spectral_send_tlv_to_host(struct target_if_spectral *spectral, + uint8_t *data, uint32_t datalen) +{ + int status = true; + void *nl_data = spectral->nl_cb.get_nbuff(spectral->pdev_obj); + + if (nl_data) { + memcpy(nl_data, data, datalen); + if (spectral->send_phy_data(spectral->pdev_obj) == 0) + spectral->spectral_sent_msg++; + } else { + status = false; + } + return status; +} + +void +target_if_dbg_print_samp_param(struct target_if_samp_msg_params *p) +{ + spectral_debug("\nSAMP Packet : -------------------- START --------------------"); + spectral_debug("Freq = %d", p->freq); + spectral_debug("RSSI = %d", p->rssi); + spectral_debug("Bin Count = %d", p->pwr_count); + spectral_debug("Timestamp = %d", p->tstamp); + spectral_debug("SAMP Packet : -------------------- END -----------------------"); +} + +void +target_if_dbg_print_samp_msg(struct spectral_samp_msg *ss_msg) +{ + int i = 0; + + struct spectral_samp_data *p = &ss_msg->samp_data; + struct spectral_classifier_params *pc = &p->classifier_params; + struct interf_src_rsp *pi = &p->interf_list; + + spectral_dbg_line(); + spectral_debug("Spectral Message"); + spectral_dbg_line(); + spectral_debug("Signature : 0x%x", ss_msg->signature); + spectral_debug("Freq : %d", ss_msg->freq); + spectral_debug("Freq load : %d", ss_msg->freq_loading); + spectral_debug("Intfnc type : %d", ss_msg->int_type); + spectral_dbg_line(); + spectral_debug("Spectral Data info"); + spectral_dbg_line(); + spectral_debug("data length : %d", p->spectral_data_len); + spectral_debug("rssi : %d", p->spectral_rssi); + spectral_debug("combined rssi : %d", p->spectral_combined_rssi); + spectral_debug("upper rssi : %d", p->spectral_upper_rssi); + spectral_debug("lower rssi : %d", p->spectral_lower_rssi); + spectral_debug("bw info : %d", p->spectral_bwinfo); + spectral_debug("timestamp : %d", p->spectral_tstamp); + spectral_debug("max index : %d", p->spectral_max_index); + spectral_debug("max exp : %d", p->spectral_max_exp); + spectral_debug("max mag : %d", p->spectral_max_mag); + spectral_debug("last timstamp : %d", p->spectral_last_tstamp); + spectral_debug("upper max idx : %d", p->spectral_upper_max_index); + spectral_debug("lower max idx : %d", p->spectral_lower_max_index); + spectral_debug("bin power count : %d", p->bin_pwr_count); + spectral_dbg_line(); + spectral_debug("Classifier info"); + spectral_dbg_line(); + spectral_debug("20/40 Mode : %d", pc->spectral_20_40_mode); + spectral_debug("dc index : %d", pc->spectral_dc_index); + spectral_debug("dc in MHz : %d", pc->spectral_dc_in_mhz); + spectral_debug("upper channel : %d", pc->upper_chan_in_mhz); + spectral_debug("lower channel : %d", pc->lower_chan_in_mhz); + spectral_dbg_line(); + spectral_debug("Interference info"); + spectral_dbg_line(); + spectral_debug("inter count : %d", pi->count); + + for (i = 0; i < pi->count; i++) { + spectral_debug("inter type : %d", + pi->interf[i].interf_type); + spectral_debug("min freq : %d", + pi->interf[i].interf_min_freq); + spectral_debug("max freq : %d", + pi->interf[i].interf_max_freq); + } +} + +uint32_t +target_if_get_offset_swar_sec80(uint32_t channel_width) +{ + uint32_t offset = 0; + + switch (channel_width) { + case CH_WIDTH_20MHZ: + offset = OFFSET_CH_WIDTH_20; + break; + case CH_WIDTH_40MHZ: + offset = OFFSET_CH_WIDTH_40; + break; + case CH_WIDTH_80MHZ: + offset = OFFSET_CH_WIDTH_80; + break; + case CH_WIDTH_160MHZ: + offset = OFFSET_CH_WIDTH_160; + break; + default: + offset = OFFSET_CH_WIDTH_80; + break; + } + return offset; +} + +/** + * target_if_dump_summary_report_gen2() - Dump Spectral Summary Report for gen2 + * @ptlv: Pointer to Spectral Phyerr TLV + * @tlvlen: length + * @is_160_format: Indicates whether information provided by HW is in altered + * format for 802.11ac 160/80+80 MHz support (QCA9984 onwards) + * + * Dump Spectral Summary Report for gen2 + * + * Return: Success/Failure + */ +static int +target_if_dump_summary_report_gen2(struct spectral_phyerr_tlv_gen2 *ptlv, + int tlvlen, bool is_160_format) +{ + /* + * For simplicity, everything is defined as uint32_t (except one). + * Proper code will later use the right sizes. + */ + + /* + * For easy comparision between MDK team and OS team, the MDK script + * variable names have been used + */ + + uint32_t agc_mb_gain; + uint32_t sscan_gidx; + uint32_t agc_total_gain; + uint32_t recent_rfsat; + uint32_t ob_flag; + uint32_t nb_mask; + uint32_t peak_mag; + int16_t peak_inx; + + uint32_t ss_summary_A = 0; + uint32_t ss_summary_B = 0; + uint32_t ss_summary_C = 0; + uint32_t ss_summary_D = 0; + uint32_t ss_summary_E = 0; + struct spectral_phyerr_hdr_gen2 *phdr = + (struct spectral_phyerr_hdr_gen2 *)( + (uint8_t *)ptlv + + sizeof(struct spectral_phyerr_tlv_gen2)); + + spectral_debug("SPECTRAL : SPECTRAL SUMMARY REPORT"); + + if (is_160_format) { + if (tlvlen != 20) { + spectral_err("Unexpected TLV length %d for Spectral Summary Report! Hexdump follows", + tlvlen); + target_if_print_buf((uint8_t *)ptlv, tlvlen + 4); + return -EPERM; + } + + /* Doing copy as the contents may not be aligned */ + qdf_mem_copy(&ss_summary_A, (uint8_t *)phdr, sizeof(int)); + qdf_mem_copy(&ss_summary_B, + (uint8_t *)((uint8_t *)phdr + sizeof(int)), + sizeof(int)); + qdf_mem_copy(&ss_summary_C, + (uint8_t *)((uint8_t *)phdr + 2 * sizeof(int)), + sizeof(int)); + qdf_mem_copy(&ss_summary_D, + (uint8_t *)((uint8_t *)phdr + 3 * sizeof(int)), + sizeof(int)); + qdf_mem_copy(&ss_summary_E, + (uint8_t *)((uint8_t *)phdr + 4 * sizeof(int)), + sizeof(int)); + + /* + * The following is adapted from MDK scripts for + * easier comparability + */ + + recent_rfsat = ((ss_summary_A >> 8) & 0x1); + sscan_gidx = (ss_summary_A & 0xff); + spectral_debug("sscan_gidx=%d, is_recent_rfsat=%d", + sscan_gidx, recent_rfsat); + + /* First segment */ + agc_mb_gain = ((ss_summary_B >> 10) & 0x7f); + agc_total_gain = (ss_summary_B & 0x3ff); + nb_mask = ((ss_summary_C >> 22) & 0xff); + ob_flag = ((ss_summary_B >> 17) & 0x1); + peak_inx = (ss_summary_C & 0xfff); + if (peak_inx > 2047) + peak_inx = peak_inx - 4096; + peak_mag = ((ss_summary_C >> 12) & 0x3ff); + + spectral_debug("agc_total_gain_segid0 = 0x%.2x, agc_mb_gain_segid0=%d", + agc_total_gain, agc_mb_gain); + spectral_debug("nb_mask_segid0 = 0x%.2x, ob_flag_segid0=%d, peak_index_segid0=%d, peak_mag_segid0=%d", + nb_mask, ob_flag, peak_inx, peak_mag); + + /* Second segment */ + agc_mb_gain = ((ss_summary_D >> 10) & 0x7f); + agc_total_gain = (ss_summary_D & 0x3ff); + nb_mask = ((ss_summary_E >> 22) & 0xff); + ob_flag = ((ss_summary_D >> 17) & 0x1); + peak_inx = (ss_summary_E & 0xfff); + if (peak_inx > 2047) + peak_inx = peak_inx - 4096; + peak_mag = ((ss_summary_E >> 12) & 0x3ff); + + spectral_debug("agc_total_gain_segid1 = 0x%.2x, agc_mb_gain_segid1=%d", + agc_total_gain, agc_mb_gain); + spectral_debug("nb_mask_segid1 = 0x%.2x, ob_flag_segid1=%d, peak_index_segid1=%d, peak_mag_segid1=%d", + nb_mask, ob_flag, peak_inx, peak_mag); + } else { + if (tlvlen != 8) { + spectral_err("Unexpected TLV length %d for Spectral Summary Report! Hexdump follows", + tlvlen); + target_if_print_buf((uint8_t *)ptlv, tlvlen + 4); + return -EPERM; + } + + /* Doing copy as the contents may not be aligned */ + qdf_mem_copy(&ss_summary_A, (uint8_t *)phdr, sizeof(int)); + qdf_mem_copy(&ss_summary_B, + (uint8_t *)((uint8_t *)phdr + sizeof(int)), + sizeof(int)); + + nb_mask = ((ss_summary_B >> 22) & 0xff); + ob_flag = ((ss_summary_B >> 30) & 0x1); + peak_inx = (ss_summary_B & 0xfff); + + if (peak_inx > 2047) + peak_inx = peak_inx - 4096; + + peak_mag = ((ss_summary_B >> 12) & 0x3ff); + agc_mb_gain = ((ss_summary_A >> 24) & 0x7f); + agc_total_gain = (ss_summary_A & 0x3ff); + sscan_gidx = ((ss_summary_A >> 16) & 0xff); + recent_rfsat = ((ss_summary_B >> 31) & 0x1); + + spectral_debug("nb_mask = 0x%.2x, ob_flag=%d, peak_index=%d, peak_mag=%d, agc_mb_gain=%d, agc_total_gain=%d, sscan_gidx=%d, recent_rfsat=%d", + nb_mask, ob_flag, peak_inx, peak_mag, + agc_mb_gain, agc_total_gain, sscan_gidx, + recent_rfsat); + } + + return 0; +} + +/** + * target_if_process_sfft_report_gen2() - Process Search FFT Report + * @ptlv: Pointer to Spectral Phyerr TLV + * @tlvlen: length + * @p_fft_info: Pointer to search fft info + * + * Dump Spectral Summary Report for gen2 + * + * Return: Success/Failure + */ +static int +target_if_process_sfft_report_gen2( + struct spectral_phyerr_tlv_gen2 *ptlv, + int tlvlen, + struct spectral_search_fft_info_gen2 *p_fft_info) +{ + /* + * For simplicity, everything is defined as uint32_t (except one). + * Proper code will later use the right sizes. + */ + /* + * For easy comparision between MDK team and OS team, the MDK script + * variable names have been used + */ + uint32_t relpwr_db; + uint32_t num_str_bins_ib; + uint32_t base_pwr; + uint32_t total_gain_info; + + uint32_t fft_chn_idx; + int16_t peak_inx; + uint32_t avgpwr_db; + uint32_t peak_mag; + + uint32_t fft_summary_A = 0; + uint32_t fft_summary_B = 0; + uint8_t *tmp = (uint8_t *)ptlv; + struct spectral_phyerr_hdr_gen2 *phdr = + (struct spectral_phyerr_hdr_gen2 *)( + tmp + + sizeof(struct spectral_phyerr_tlv_gen2)); + + /* Relook this */ + if (tlvlen < 8) { + spectral_err("Unexpected TLV length %d for Spectral Summary Report! Hexdump follows", + tlvlen); + target_if_print_buf((uint8_t *)ptlv, tlvlen + 4); + return -EPERM; + } + + /* Doing copy as the contents may not be aligned */ + qdf_mem_copy(&fft_summary_A, (uint8_t *)phdr, sizeof(int)); + qdf_mem_copy(&fft_summary_B, + (uint8_t *)((uint8_t *)phdr + sizeof(int)), + sizeof(int)); + + relpwr_db = ((fft_summary_B >> 26) & 0x3f); + num_str_bins_ib = fft_summary_B & 0xff; + base_pwr = ((fft_summary_A >> 14) & 0x1ff); + total_gain_info = ((fft_summary_A >> 23) & 0x1ff); + + fft_chn_idx = ((fft_summary_A >> 12) & 0x3); + peak_inx = fft_summary_A & 0xfff; + + if (peak_inx > 2047) + peak_inx = peak_inx - 4096; + + avgpwr_db = ((fft_summary_B >> 18) & 0xff); + peak_mag = ((fft_summary_B >> 8) & 0x3ff); + + /* Populate the Search FFT Info */ + if (p_fft_info) { + p_fft_info->relpwr_db = relpwr_db; + p_fft_info->num_str_bins_ib = num_str_bins_ib; + p_fft_info->base_pwr = base_pwr; + p_fft_info->total_gain_info = total_gain_info; + p_fft_info->fft_chn_idx = fft_chn_idx; + p_fft_info->peak_inx = peak_inx; + p_fft_info->avgpwr_db = avgpwr_db; + p_fft_info->peak_mag = peak_mag; + } + + return 0; +} + +/** + * target_if_dump_adc_report_gen2() - Dump ADC Reports for gen2 + * @ptlv: Pointer to Spectral Phyerr TLV + * @tlvlen: length + * + * Dump ADC Reports for gen2 + * + * Return: Success/Failure + */ +static int +target_if_dump_adc_report_gen2( + struct spectral_phyerr_tlv_gen2 *ptlv, int tlvlen) +{ + int i; + uint32_t *pdata; + uint32_t data; + + /* + * For simplicity, everything is defined as uint32_t (except one). + * Proper code will later use the right sizes. + */ + uint32_t samp_fmt; + uint32_t chn_idx; + uint32_t recent_rfsat; + uint32_t agc_mb_gain; + uint32_t agc_total_gain; + + uint32_t adc_summary = 0; + + uint8_t *ptmp = (uint8_t *)ptlv; + + spectral_debug("SPECTRAL : ADC REPORT"); + + /* Relook this */ + if (tlvlen < 4) { + spectral_err("Unexpected TLV length %d for ADC Report! Hexdump follows", + tlvlen); + target_if_print_buf((uint8_t *)ptlv, tlvlen + 4); + return -EPERM; + } + + qdf_mem_copy(&adc_summary, (uint8_t *)(ptlv + 4), sizeof(int)); + + samp_fmt = ((adc_summary >> 28) & 0x1); + chn_idx = ((adc_summary >> 24) & 0x3); + recent_rfsat = ((adc_summary >> 23) & 0x1); + agc_mb_gain = ((adc_summary >> 16) & 0x7f); + agc_total_gain = adc_summary & 0x3ff; + + spectral_debug("samp_fmt= %u, chn_idx= %u, recent_rfsat= %u, agc_mb_gain=%u agc_total_gain=%u", + samp_fmt, chn_idx, recent_rfsat, agc_mb_gain, + agc_total_gain); + + for (i = 0; i < (tlvlen / 4); i++) { + pdata = (uint32_t *)(ptmp + 4 + i * 4); + data = *pdata; + + /* Interpreting capture format 1 */ + if (1) { + uint8_t i1; + uint8_t q1; + uint8_t i2; + uint8_t q2; + int8_t si1; + int8_t sq1; + int8_t si2; + int8_t sq2; + + i1 = data & 0xff; + q1 = (data >> 8) & 0xff; + i2 = (data >> 16) & 0xff; + q2 = (data >> 24) & 0xff; + + if (i1 > 127) + si1 = i1 - 256; + else + si1 = i1; + + if (q1 > 127) + sq1 = q1 - 256; + else + sq1 = q1; + + if (i2 > 127) + si2 = i2 - 256; + else + si2 = i2; + + if (q2 > 127) + sq2 = q2 - 256; + else + sq2 = q2; + + spectral_debug("SPECTRAL ADC : Interpreting capture format 1"); + spectral_debug("adc_data_format_1 # %d %d %d", + 2 * i, si1, sq1); + spectral_debug("adc_data_format_1 # %d %d %d", + 2 * i + 1, si2, sq2); + } + + /* Interpreting capture format 0 */ + if (1) { + uint16_t i1; + uint16_t q1; + int16_t si1; + int16_t sq1; + + i1 = data & 0xffff; + q1 = (data >> 16) & 0xffff; + if (i1 > 32767) + si1 = i1 - 65536; + else + si1 = i1; + + if (q1 > 32767) + sq1 = q1 - 65536; + else + sq1 = q1; + spectral_debug("SPECTRAL ADC : Interpreting capture format 0"); + spectral_debug("adc_data_format_2 # %d %d %d", + i, si1, sq1); + } + } + + spectral_debug("\n"); + + return 0; +} + +/** + * target_if_dump_sfft_report_gen2() - Process Search FFT Report for gen2 + * @ptlv: Pointer to Spectral Phyerr TLV + * @tlvlen: length + * @is_160_format: Indicates 160 format + * + * Process Search FFT Report for gen2 + * + * Return: Success/Failure + */ +static int +target_if_dump_sfft_report_gen2(struct spectral_phyerr_tlv_gen2 *ptlv, + int tlvlen, bool is_160_format) +{ + int i; + uint32_t fft_mag; + + /* + * For simplicity, everything is defined as uint32_t (except one). + * Proper code will later use the right sizes. + */ + /* + * For easy comparision between MDK team and OS team, the MDK script + * variable names have been used + */ + uint32_t relpwr_db; + uint32_t num_str_bins_ib; + uint32_t base_pwr; + uint32_t total_gain_info; + + uint32_t fft_chn_idx; + int16_t peak_inx; + uint32_t avgpwr_db; + uint32_t peak_mag; + uint8_t segid; + + uint32_t fft_summary_A = 0; + uint32_t fft_summary_B = 0; + uint32_t fft_summary_C = 0; + uint8_t *tmp = (uint8_t *)ptlv; + struct spectral_phyerr_hdr_gen2 *phdr = + (struct spectral_phyerr_hdr_gen2 *)( + tmp + + sizeof(struct spectral_phyerr_tlv_gen2)); + uint32_t segid_skiplen = 0; + + if (is_160_format) + segid_skiplen = sizeof(SPECTRAL_SEGID_INFO); + + spectral_debug("SPECTRAL : SEARCH FFT REPORT"); + + /* Relook this */ + if (tlvlen < (8 + segid_skiplen)) { + spectral_err("Unexpected TLV length %d for Spectral Summary Report! Hexdump follows", + tlvlen); + target_if_print_buf((uint8_t *)ptlv, tlvlen + 4); + return -EPERM; + } + + /* Doing copy as the contents may not be aligned */ + qdf_mem_copy(&fft_summary_A, (uint8_t *)phdr, sizeof(int)); + qdf_mem_copy(&fft_summary_B, + (uint8_t *)((uint8_t *)phdr + sizeof(int)), + sizeof(int)); + if (is_160_format) + qdf_mem_copy(&fft_summary_C, + (uint8_t *)((uint8_t *)phdr + 2 * sizeof(int)), + sizeof(int)); + + relpwr_db = ((fft_summary_B >> 26) & 0x3f); + num_str_bins_ib = fft_summary_B & 0xff; + base_pwr = ((fft_summary_A >> 14) & 0x1ff); + total_gain_info = ((fft_summary_A >> 23) & 0x1ff); + + fft_chn_idx = ((fft_summary_A >> 12) & 0x3); + peak_inx = fft_summary_A & 0xfff; + + if (peak_inx > 2047) + peak_inx = peak_inx - 4096; + + avgpwr_db = ((fft_summary_B >> 18) & 0xff); + peak_mag = ((fft_summary_B >> 8) & 0x3ff); + + spectral_debug("Header A = 0x%x Header B = 0x%x", + phdr->hdr_a, phdr->hdr_b); + spectral_debug("Base Power= 0x%x, Total Gain= %d, relpwr_db=%d, num_str_bins_ib=%d fft_chn_idx=%d peak_inx=%d avgpwr_db=%d peak_mag=%d", + base_pwr, total_gain_info, relpwr_db, num_str_bins_ib, + fft_chn_idx, peak_inx, avgpwr_db, peak_mag); + if (is_160_format) { + segid = fft_summary_C & 0x1; + spectral_debug("Segment ID: %hhu", segid); + } + + spectral_debug("FFT bins:"); + for (i = 0; i < (tlvlen - 8 - segid_skiplen); i++) { + fft_mag = ((uint8_t *)ptlv)[12 + segid_skiplen + i]; + spectral_debug("%d %d, ", i, fft_mag); + } + + spectral_debug("\n"); + + return 0; +} + +#ifdef SPECTRAL_DEBUG_SAMP_MSG +/** + * target_if_spectral_log_SAMP_param() - Log SAMP parameters + * @params: Reference to target_if_samp_msg_params + * + * API to log spectral SAMP message parameters + * + * Return: None + */ +static void +target_if_spectral_log_SAMP_param(struct target_if_samp_msg_params *params) +{ + target_if_dbg_print_samp_param(params); +} + +#else +static void +target_if_spectral_log_SAMP_param(struct target_if_samp_msg_params *params) +{ +} +#endif + +int +target_if_process_phyerr_gen2(struct target_if_spectral *spectral, + uint8_t *data, + uint32_t datalen, + struct target_if_spectral_rfqual_info *p_rfqual, + struct target_if_spectral_chan_info *p_chaninfo, + uint64_t tsf64, + struct target_if_spectral_acs_stats *acs_stats) +{ + /* + * XXX : The classifier do not use all the members of the SAMP + * message data format. + * The classifier only depends upon the following parameters + * + * 1. Frequency (freq, msg->freq) + * 2. Spectral RSSI (spectral_rssi, + * msg->samp_data.spectral_rssi) + * 3. Bin Power Count (bin_pwr_count, + * msg->samp_data.bin_pwr_count) + * 4. Bin Power values (bin_pwr, msg->samp_data.bin_pwr[0] + * 5. Spectral Timestamp (spectral_tstamp, + * msg->samp_data.spectral_tstamp) + * 6. MAC Address (macaddr, msg->macaddr) + * + * This function prepares the params structure and populates it + * with + * relevant values, this is in turn passed to + * spectral_create_samp_msg() + * to prepare fully formatted Spectral SAMP message + * + * XXX : Need to verify + * 1. Order of FFT bin values + * + */ + + struct target_if_samp_msg_params params; + struct spectral_search_fft_info_gen2 search_fft_info; + struct spectral_search_fft_info_gen2 *p_sfft = &search_fft_info; + struct spectral_search_fft_info_gen2 search_fft_info_sec80; + struct spectral_search_fft_info_gen2 *p_sfft_sec80 = + &search_fft_info_sec80; + uint32_t segid_skiplen = 0; + + int8_t rssi_up = 0; + int8_t rssi_low = 0; + + int8_t chn_idx_highest_enabled = 0; + int8_t chn_idx_lowest_enabled = 0; + + uint8_t control_rssi = 0; + uint8_t extension_rssi = 0; + uint8_t combined_rssi = 0; + + uint32_t tstamp = 0; + + struct target_if_spectral_ops *p_sops = + GET_TARGET_IF_SPECTRAL_OPS(spectral); + + struct spectral_phyerr_tlv_gen2 *ptlv = + (struct spectral_phyerr_tlv_gen2 *)data; + struct spectral_phyerr_tlv_gen2 *ptlv_sec80 = NULL; + struct spectral_phyerr_fft_gen2 *pfft = NULL; + struct spectral_phyerr_fft_gen2 *pfft_sec80 = NULL; + + uint8_t segid = 0; + uint8_t segid_sec80 = 0; + + if (spectral->is_160_format) + segid_skiplen = sizeof(SPECTRAL_SEGID_INFO); + + pfft = (struct spectral_phyerr_fft_gen2 *)( + data + + sizeof(struct spectral_phyerr_tlv_gen2) + + sizeof(struct spectral_phyerr_hdr_gen2) + + segid_skiplen); + + /* + * XXX Extend SPECTRAL_DPRINTK() to use spectral_debug_level, + * and use this facility inside spectral_dump_phyerr_data() + * and supporting functions. + */ + if (spectral_debug_level & DEBUG_SPECTRAL2) + target_if_spectral_dump_phyerr_data_gen2( + data, datalen, + spectral->is_160_format); + + if (spectral_debug_level & DEBUG_SPECTRAL4) { + target_if_spectral_dump_phyerr_data_gen2( + data, datalen, + spectral->is_160_format); + spectral_debug_level = DEBUG_SPECTRAL; + } + + if (ptlv->signature != SPECTRAL_PHYERR_SIGNATURE_GEN2) { + /* + * EV# 118023: We tentatively disable the below print + * and provide stats instead. + */ + spectral->diag_stats.spectral_mismatch++; + return -EPERM; + } + + OS_MEMZERO(¶ms, sizeof(params)); + + if (ptlv->tag == TLV_TAG_SEARCH_FFT_REPORT_GEN2) { + if (spectral->is_160_format) { + segid = *((SPECTRAL_SEGID_INFO *)( + (uint8_t *)ptlv + + sizeof(struct spectral_phyerr_tlv_gen2) + + sizeof(struct spectral_phyerr_hdr_gen2))); + + if (segid != 0) { + struct spectral_diag_stats *p_diag_stats = + &spectral->diag_stats; + p_diag_stats->spectral_vhtseg1id_mismatch++; + return -EPERM; + } + } + + target_if_process_sfft_report_gen2(ptlv, ptlv->length, + &search_fft_info); + + tstamp = p_sops->get_tsf64(spectral) & SPECTRAL_TSMASK; + + combined_rssi = p_rfqual->rssi_comb; + + if (spectral->upper_is_control) + rssi_up = control_rssi; + else + rssi_up = extension_rssi; + + if (spectral->lower_is_control) + rssi_low = control_rssi; + else + rssi_low = extension_rssi; + + params.rssi = p_rfqual->rssi_comb; + params.lower_rssi = rssi_low; + params.upper_rssi = rssi_up; + + if (spectral->sc_spectral_noise_pwr_cal) { + params.chain_ctl_rssi[0] = + p_rfqual->pc_rssi_info[0].rssi_pri20; + params.chain_ctl_rssi[1] = + p_rfqual->pc_rssi_info[1].rssi_pri20; + params.chain_ctl_rssi[2] = + p_rfqual->pc_rssi_info[2].rssi_pri20; + params.chain_ext_rssi[0] = + p_rfqual->pc_rssi_info[0].rssi_sec20; + params.chain_ext_rssi[1] = + p_rfqual->pc_rssi_info[1].rssi_sec20; + params.chain_ext_rssi[2] = + p_rfqual->pc_rssi_info[2].rssi_sec20; + } + + /* + * XXX : This actually depends on the programmed chain mask + * This value decides the per-chain enable mask to select + * the input ADC for search FTT. + * For modes upto VHT80, if more than one chain is + * enabled, the max valid chain + * is used. LSB corresponds to chain zero. + * For VHT80_80 and VHT160, the lowest enabled chain is + * used for primary + * detection and highest enabled chain is used for + * secondary detection. + * + * XXX : The current algorithm do not use these control and + * extension channel + * Instead, it just relies on the combined RSSI values + * only. + * For fool-proof detection algorithm, we should take + * these RSSI values in to account. + * This is marked for future enhancements. + */ + chn_idx_highest_enabled = + ((spectral->params.ss_chn_mask & 0x8) ? 3 : + (spectral->params.ss_chn_mask & 0x4) ? 2 : + (spectral->params.ss_chn_mask & 0x2) ? 1 : 0); + chn_idx_lowest_enabled = + ((spectral->params.ss_chn_mask & 0x1) ? 0 : + (spectral->params.ss_chn_mask & 0x2) ? 1 : + (spectral->params.ss_chn_mask & 0x4) ? 2 : 3); + control_rssi = (uint8_t) + p_rfqual->pc_rssi_info[chn_idx_highest_enabled].rssi_pri20; + extension_rssi = (uint8_t) + p_rfqual->pc_rssi_info[chn_idx_highest_enabled].rssi_sec20; + + params.bwinfo = 0; + params.tstamp = 0; + params.max_mag = p_sfft->peak_mag; + + params.max_index = p_sfft->peak_inx; + params.max_exp = 0; + params.peak = 0; + params.bin_pwr_data = (uint8_t *)pfft; + params.freq = p_sops->get_current_channel(spectral); + params.freq_loading = 0; + + params.interf_list.count = 0; + params.max_lower_index = 0; + params.max_upper_index = 0; + params.nb_lower = 0; + params.nb_upper = 0; + /* + * For modes upto VHT80, the noise floor is populated with the + * one corresponding + * to the highest enabled antenna chain + */ + params.noise_floor = + p_rfqual->noise_floor[chn_idx_highest_enabled]; + params.datalen = ptlv->length; + params.pwr_count = ptlv->length - + sizeof(struct spectral_phyerr_hdr_gen2) - segid_skiplen; + params.tstamp = (tsf64 & SPECTRAL_TSMASK); + + acs_stats->ctrl_nf = params.noise_floor; + acs_stats->ext_nf = params.noise_floor; + acs_stats->nfc_ctl_rssi = control_rssi; + acs_stats->nfc_ext_rssi = extension_rssi; + + if (spectral->is_160_format && + spectral->ch_width == CH_WIDTH_160MHZ) { + /* + * We expect to see one more Search FFT report, and it + * should be equal in size to the current one. + */ + if (datalen < ( + 2 * ( + sizeof(struct spectral_phyerr_tlv_gen2) + + ptlv->length))) { + struct spectral_diag_stats *p_diag_stats = + &spectral->diag_stats; + p_diag_stats->spectral_sec80_sfft_insufflen++; + return -EPERM; + } + + ptlv_sec80 = (struct spectral_phyerr_tlv_gen2 *)( + data + + sizeof(struct spectral_phyerr_tlv_gen2) + + ptlv->length); + + if (ptlv_sec80->signature != + SPECTRAL_PHYERR_SIGNATURE_GEN2) { + spectral->diag_stats.spectral_mismatch++; + return -EPERM; + } + + if (ptlv_sec80->tag != TLV_TAG_SEARCH_FFT_REPORT_GEN2) { + spectral->diag_stats.spectral_no_sec80_sfft++; + return -EPERM; + } + + segid_sec80 = *((SPECTRAL_SEGID_INFO *)( + (uint8_t *)ptlv_sec80 + + sizeof(struct spectral_phyerr_tlv_gen2) + + sizeof(struct spectral_phyerr_hdr_gen2))); + + if (segid_sec80 != 1) { + struct spectral_diag_stats *p_diag_stats = + &spectral->diag_stats; + p_diag_stats->spectral_vhtseg2id_mismatch++; + return -EPERM; + } + + params.vhtop_ch_freq_seg1 = p_chaninfo->center_freq1; + params.vhtop_ch_freq_seg2 = p_chaninfo->center_freq2; + + target_if_process_sfft_report_gen2( + ptlv_sec80, + ptlv_sec80->length, + &search_fft_info_sec80); + + pfft_sec80 = (struct spectral_phyerr_fft_gen2 *)( + ((uint8_t *)ptlv_sec80) + + sizeof(struct spectral_phyerr_tlv_gen2) + + sizeof(struct spectral_phyerr_hdr_gen2) + + segid_skiplen); + + /* XXX: Confirm. TBD at SoD. */ + params.rssi_sec80 = p_rfqual->rssi_comb; + if (spectral->is_sec80_rssi_war_required) + params.rssi_sec80 = + target_if_get_combrssi_sec80_seg_gen2 + (spectral, &search_fft_info_sec80); + /* XXX: Determine dynamically. TBD at SoD. */ + /* + * For VHT80_80/VHT160, the noise floor for primary + * 80MHz segment is populated with the + * lowest enabled antenna chain and the noise floor for + * secondary 80MHz segment is populated + * with the highest enabled antenna chain + */ + params.noise_floor_sec80 = + p_rfqual->noise_floor[chn_idx_highest_enabled]; + params.noise_floor = + p_rfqual->noise_floor[chn_idx_lowest_enabled]; + + params.max_mag_sec80 = p_sfft_sec80->peak_mag; + params.max_index_sec80 = p_sfft_sec80->peak_inx; + /* XXX Does this definition of datalen *still hold? */ + params.datalen_sec80 = ptlv_sec80->length; + params.pwr_count_sec80 = + ptlv_sec80->length - + sizeof(struct spectral_phyerr_hdr_gen2) - + segid_skiplen; + params.bin_pwr_data_sec80 = (uint8_t *)pfft_sec80; + } + qdf_mem_copy(¶ms.classifier_params, + &spectral->classifier_params, + sizeof(struct spectral_classifier_params)); + + target_if_spectral_log_SAMP_param(¶ms); + target_if_spectral_create_samp_msg(spectral, ¶ms); + } + + return 0; +} + +int +target_if_spectral_dump_hdr_gen2(struct spectral_phyerr_hdr_gen2 *phdr) +{ + uint32_t a = 0; + uint32_t b = 0; + + qdf_mem_copy(&a, (uint8_t *)phdr, sizeof(int)); + qdf_mem_copy(&b, + (uint8_t *)((uint8_t *)phdr + sizeof(int)), + sizeof(int)); + + spectral_debug("SPECTRAL : HEADER A 0x%x (%d)", a, a); + spectral_debug("SPECTRAL : HEADER B 0x%x (%d)", b, b); + return 0; +} + +int8_t +target_if_get_combrssi_sec80_seg_gen2( + struct target_if_spectral *spectral, + struct spectral_search_fft_info_gen2 *p_sfft_sec80) +{ + uint32_t avgpwr_db = 0; + uint32_t total_gain_db = 0; + uint32_t offset = 0; + int8_t comb_rssi = 0; + + /* Obtain required parameters for algorithm from search FFT report */ + avgpwr_db = p_sfft_sec80->avgpwr_db; + total_gain_db = p_sfft_sec80->total_gain_info; + + /* Calculate offset */ + offset = target_if_get_offset_swar_sec80(spectral->ch_width); + + /* Calculate RSSI */ + comb_rssi = ((avgpwr_db - total_gain_db) + offset); + + return comb_rssi; +} + +int +target_if_spectral_dump_tlv_gen2( + struct spectral_phyerr_tlv_gen2 *ptlv, bool is_160_format) +{ + int ret = 0; + + /* + * TODO : Do not delete the following print + * The scripts used to validate Spectral depend on this Print + */ + spectral_debug("SPECTRAL : TLV Length is 0x%x (%d)", + ptlv->length, ptlv->length); + + switch (ptlv->tag) { + case TLV_TAG_SPECTRAL_SUMMARY_REPORT_GEN2: + ret = + target_if_dump_summary_report_gen2( + ptlv, ptlv->length, is_160_format); + break; + + case TLV_TAG_SEARCH_FFT_REPORT_GEN2: + ret = + target_if_dump_sfft_report_gen2(ptlv, ptlv->length, + is_160_format); + break; + + case TLV_TAG_ADC_REPORT_GEN2: + ret = target_if_dump_adc_report_gen2(ptlv, ptlv->length); + break; + + default: + spectral_warn("INVALID TLV"); + ret = -1; + break; + } + + return ret; +} + +int +target_if_spectral_dump_phyerr_data_gen2(uint8_t *data, uint32_t datalen, + bool is_160_format) +{ + struct spectral_phyerr_tlv_gen2 *ptlv = NULL; + uint32_t bytes_processed = 0; + uint32_t bytes_remaining = datalen; + uint32_t curr_tlv_complete_size = 0; + + if (datalen < sizeof(struct spectral_phyerr_tlv_gen2)) { + spectral_err("Total PHY error data length %u too short to contain any TLVs", + datalen); + return -EPERM; + } + + while (bytes_processed < datalen) { + if (bytes_remaining < sizeof(struct spectral_phyerr_tlv_gen2)) { + spectral_err("Remaining PHY error data length %u too short to contain a TLV", + bytes_remaining); + return -EPERM; + } + + ptlv = (struct spectral_phyerr_tlv_gen2 *)(data + + bytes_processed); + + if (ptlv->signature != SPECTRAL_PHYERR_SIGNATURE_GEN2) { + spectral_err("Invalid signature 0x%x!", + ptlv->signature); + return -EPERM; + } + + curr_tlv_complete_size = + sizeof(struct spectral_phyerr_tlv_gen2) + + ptlv->length; + + if (curr_tlv_complete_size > bytes_remaining) { + spectral_err("TLV size %d greater than number of bytes remaining %d", + curr_tlv_complete_size, bytes_remaining); + return -EPERM; + } + + if (target_if_spectral_dump_tlv_gen2(ptlv, is_160_format) == -1) + return -EPERM; + + bytes_processed += curr_tlv_complete_size; + bytes_remaining = datalen - bytes_processed; + } + + return 0; +} + +int +target_if_process_sfft_report_gen3( + struct spectral_phyerr_fft_report_gen3 *p_fft_report, + struct spectral_search_fft_info_gen3 *p_sfft) +{ + /* + * For simplicity, everything is defined as uint32_t (except one). + * Proper code will later use the right sizes. + */ + /* + * For easy comparision between MDK team and OS team, the MDK script + * variable names have been used + */ + int32_t peak_sidx; + int32_t peak_mag; + + /* Populate the Search FFT Info */ + if (p_sfft) { + p_sfft->timestamp = p_fft_report->fft_timestamp; + + p_sfft->fft_detector_id = get_bitfield(p_fft_report->hdr_a, + 2, 0); + p_sfft->fft_num = get_bitfield(p_fft_report->hdr_a, 3, 2); + p_sfft->fft_radar_check = get_bitfield(p_fft_report->hdr_a, + 12, 5); + + peak_sidx = get_bitfield(p_fft_report->hdr_a, 11, 17); + p_sfft->fft_peak_sidx = unsigned_to_signed(peak_sidx, 11); + p_sfft->fft_chn_idx = get_bitfield(p_fft_report->hdr_a, 3, 28); + + p_sfft->fft_base_pwr_db = get_bitfield(p_fft_report->hdr_b, + 9, 0); + p_sfft->fft_total_gain_db = get_bitfield(p_fft_report->hdr_b, + 8, 9); + + p_sfft->fft_num_str_bins_ib = get_bitfield(p_fft_report->hdr_c, + 8, 0); + peak_mag = get_bitfield(p_fft_report->hdr_c, 10, 8); + p_sfft->fft_peak_mag = unsigned_to_signed(peak_mag, 10); + p_sfft->fft_avgpwr_db = get_bitfield(p_fft_report->hdr_c, + 7, 18); + p_sfft->fft_relpwr_db = get_bitfield(p_fft_report->hdr_c, + 7, 25); + } + + return 0; +} + +int +target_if_dump_fft_report_gen3(struct target_if_spectral *spectral, + struct spectral_phyerr_fft_report_gen3 *p_fft_report, + struct spectral_search_fft_info_gen3 *p_sfft) +{ + int i = 0; + int fft_mag = 0; + int fft_hdr_length = (p_fft_report->fft_hdr_length * 4); + int report_len = (fft_hdr_length + 8); + int fft_bin_len = (fft_hdr_length - 16); + int fft_bin_len_adj = fft_bin_len >> 2; + int fft_bin_len_inband_tfer = 0; + int fft_bin_len_to_dump = fft_bin_len; + + if ((spectral->params.ss_rpt_mode == 2) && + spectral->inband_fftbin_size_adj) { + fft_bin_len_adj >>= 1; + fft_bin_len_inband_tfer = fft_bin_len >> 1; + fft_bin_len_to_dump = fft_bin_len_inband_tfer; + } + + spectral_debug("#############################################################"); + spectral_debug("Spectral search fft_report"); + spectral_debug("fft_timestamp = 0x%x\nfft_hdr_length = %d(32 bit words)\nfft_hdr_tag = 0x%x\nfft_hdr_sig = 0x%x", + p_fft_report->fft_timestamp, + p_fft_report->fft_hdr_length, + p_fft_report->fft_hdr_tag, p_fft_report->fft_hdr_sig); + + spectral_debug("Length field in search fft report is %d(0x%x) bytes", + fft_hdr_length, fft_hdr_length); + spectral_debug("Total length of search fft report is %d(0x%x) bytes", + report_len, report_len); + spectral_debug("FW reported fftbins in report is %d(0x%x)", fft_bin_len, + fft_bin_len); + if ((spectral->params.ss_rpt_mode == 2) && + spectral->inband_fftbin_size_adj) { + spectral_debug("FW fftbins actually transferred (in-band report mode) " + "%d(0x%x)", + fft_bin_len_inband_tfer, fft_bin_len_inband_tfer); + } + spectral_debug("Actual number of fftbins in report is %d(0x%x)\n", + fft_bin_len_adj, fft_bin_len_adj); + + spectral_debug("fft_detector_id = %u\nfft_num = %u\nfft_radar_check = %u\nfft_peak_sidx = %d\nfft_chn_idx = %u\nfft_base_pwr_db = %u\nfft_total_gain_db = %u\nfft_num_str_bins_ib = %u\nfft_peak_mag = %d\nfft_avgpwr_db = %u\nfft_relpwr_db = %u", + p_sfft->fft_detector_id, + p_sfft->fft_num, + p_sfft->fft_radar_check, + p_sfft->fft_peak_sidx, + p_sfft->fft_chn_idx, + p_sfft->fft_base_pwr_db, + p_sfft->fft_total_gain_db, + p_sfft->fft_num_str_bins_ib, + p_sfft->fft_peak_mag, + p_sfft->fft_avgpwr_db, p_sfft->fft_relpwr_db); + + spectral_debug("FFT bins:"); + for (i = 0; i < fft_bin_len_to_dump; i++) { + if (i % 16 == 0) + spectral_debug("\n%d :", i); + fft_mag = + ((uint8_t *)p_fft_report)[SPECTRAL_FFT_BINS_POS + i]; + spectral_debug("%d ", fft_mag); + } + spectral_debug("\n"); + spectral_debug("#############################################################"); + + return 0; +} + +#ifdef DIRECT_BUF_RX_ENABLE +/** + * target_if_consume_sscan_report_gen3() - Consume spectral summary report + * @spectral: Pointer to spectral object + * @data: Pointer to spectral summary + * + * Consume spectral summary report for gen3 + * + * Return: rssi + */ +static int +target_if_consume_sscan_report_gen3(struct target_if_spectral *spectral, + uint8_t *data) { + int rssi; + struct spectral_sscan_report_gen3 *psscan_report; + + psscan_report = (struct spectral_sscan_report_gen3 *)data; + /* RSSI is in 1/2 dBm steps, Covert it to dBm scale */ + rssi = (get_bitfield(psscan_report->hdr_a, 10, 18)) >> 1; + + return rssi; +} + +/** + * target_if_verify_sig_and_tag_gen3() - Verify tag and signature + * of spectral report + * @spectral: Pointer to spectral object + * @data: Pointer to spectral summary report + * @exp_tag: iexpected tag value + * + * Process fft report for gen3 + * + * Return: SUCCESS/FAILURE + */ +static int +target_if_verify_sig_and_tag_gen3(struct target_if_spectral *spectral, + uint8_t *data, uint8_t exp_tag) +{ + uint8_t tag = 0; + uint8_t signature = 0; + + /* Peek into the data to figure out whether + * 1) Signature matches the expected value + * 2) What is inside the package (TAG ID is used for finding this) + */ + tag = *(data + PHYERR_HDR_TAG_POS); + signature = *(data + PHYERR_HDR_SIG_POS); + + if (signature != SPECTRAL_PHYERR_SIGNATURE_GEN3) { + if (spectral_debug_level & DEBUG_SPECTRAL4) + spectral_err("Unexpected sig %x in spectral phyerror", + signature); + spectral_err("Expected sig is %x\n", + SPECTRAL_PHYERR_SIGNATURE_GEN3); + spectral->diag_stats.spectral_mismatch++; + return -EINVAL; + } + + if (tag != exp_tag) { + if (spectral_debug_level & DEBUG_SPECTRAL4) + spectral_err("Unexpected tag %x in spectral phyerror", + tag); + spectral_err("Expected tag is %x\n", exp_tag); + spectral->diag_stats.spectral_mismatch++; + return -EINVAL; + } + + return 0; +} + +static uint8_t +target_if_spectral_get_lowest_chn_idx(uint8_t chainmask) +{ + uint8_t idx; + + for (idx = 0; idx < DBR_MAX_CHAINS; idx++) { + if (chainmask & 0x1) + break; + chainmask >>= 1; + } + return idx; +} + +int +target_if_consume_spectral_report_gen3( + struct target_if_spectral *spectral, + struct spectral_report *report) +{ + /* + * XXX : The classifier do not use all the members of the SAMP + * message data format. + * The classifier only depends upon the following parameters + * + * 1. Frequency (freq, msg->freq) + * 2. Spectral RSSI (spectral_rssi, + * msg->samp_data.spectral_rssi) + * 3. Bin Power Count (bin_pwr_count, + * msg->samp_data.bin_pwr_count) + * 4. Bin Power values (bin_pwr, msg->samp_data.bin_pwr[0] + * 5. Spectral Timestamp (spectral_tstamp, + * msg->samp_data.spectral_tstamp) + * 6. MAC Address (macaddr, msg->macaddr) + * + * This function prepares the params structure and populates it + * with + * relevant values, this is in turn passed to + * spectral_create_samp_msg() + * to prepare fully formatted Spectral SAMP message + * + * XXX : Need to verify + * 1. Order of FFT bin values + * + */ + uint64_t tsf64 = 0; + struct target_if_samp_msg_params params; + struct spectral_search_fft_info_gen3 search_fft_info; + struct spectral_search_fft_info_gen3 *p_sfft = &search_fft_info; + int8_t rssi_up = 0; + int8_t rssi_low = 0; + int8_t chn_idx_lowest_enabled = 0; + uint8_t control_rssi = 0; + uint8_t extension_rssi = 0; + int fft_hdr_length = 0; + int report_len = 0; + int fft_bin_len = 0; + struct target_if_spectral_ops *p_sops = + GET_TARGET_IF_SPECTRAL_OPS(spectral); + struct spectral_phyerr_fft_report_gen3 *p_fft_report; + int8_t rssi; + uint8_t *data = report->data; + struct wlan_objmgr_vdev *vdev; + uint8_t vdev_rxchainmask; + + OS_MEMZERO(¶ms, sizeof(params)); + + if (target_if_verify_sig_and_tag_gen3( + spectral, data, + TLV_TAG_SPECTRAL_SUMMARY_REPORT_GEN3) != 0) + goto fail; + rssi = target_if_consume_sscan_report_gen3(spectral, data); + /* Advance buf pointer to the search fft report */ + data += sizeof(struct spectral_sscan_report_gen3); + + if (target_if_verify_sig_and_tag_gen3( + spectral, data, + TLV_TAG_SEARCH_FFT_REPORT_GEN3) != 0) + goto fail; + p_fft_report = (struct spectral_phyerr_fft_report_gen3 *)data; + fft_hdr_length = p_fft_report->fft_hdr_length * 4; + if (fft_hdr_length < 16) { + spectral_err("Unexpected TLV length %u for FFT Report! Hexdump follows", + fft_hdr_length); + goto fail; + } + + report_len = (fft_hdr_length + 8); + + fft_bin_len = (fft_hdr_length - 16); + fft_bin_len >>= 2; + if ((spectral->params.ss_rpt_mode == 2) && + spectral->inband_fftbin_size_adj) { + fft_bin_len >>= 1; + } + + tsf64 = p_fft_report->fft_timestamp; + + target_if_process_sfft_report_gen3(p_fft_report, p_sfft); + + if (p_sfft->fft_detector_id != 0) { + spectral_err("Expected segid is 0 but we got %d", + p_sfft->fft_detector_id); + spectral->diag_stats.spectral_vhtseg1id_mismatch++; + goto fail; + } + + if (spectral_debug_level & (DEBUG_SPECTRAL2 | DEBUG_SPECTRAL4)) + target_if_dump_fft_report_gen3(spectral, p_fft_report, p_sfft); + + if (spectral->upper_is_control) + rssi_up = control_rssi; + else + rssi_up = extension_rssi; + + if (spectral->lower_is_control) + rssi_low = control_rssi; + else + rssi_low = extension_rssi; + + params.rssi = rssi; + params.lower_rssi = rssi_low; + params.upper_rssi = rssi_up; + + if (spectral->sc_spectral_noise_pwr_cal) { + /* Fill 0's till FW provides them */ + params.chain_ctl_rssi[0] = 0; + params.chain_ctl_rssi[1] = 0; + params.chain_ctl_rssi[2] = 0; + params.chain_ext_rssi[0] = 0; + params.chain_ext_rssi[1] = 0; + params.chain_ext_rssi[2] = 0; + } + + vdev = target_if_spectral_get_vdev(spectral); + if (!vdev) + return -ENOENT; + + vdev_rxchainmask = + wlan_vdev_mlme_get_rxchainmask(vdev); + QDF_ASSERT(vdev_rxchainmask != 0); + wlan_objmgr_vdev_release_ref(vdev, + WLAN_SPECTRAL_ID); + + chn_idx_lowest_enabled = + target_if_spectral_get_lowest_chn_idx(vdev_rxchainmask); + if (chn_idx_lowest_enabled >= DBR_MAX_CHAINS) + return -EINVAL; + + control_rssi = 0; + extension_rssi = 0; + + params.bwinfo = 0; + params.tstamp = 0; + params.max_mag = p_sfft->fft_peak_mag; + + /* params.max_index = p_sfft->peak_inx; */ + params.max_exp = 0; + params.peak = 0; + params.bin_pwr_data = (uint8_t *)((uint8_t *)p_fft_report + + SPECTRAL_FFT_BINS_POS); + params.freq = p_sops->get_current_channel(spectral); + params.freq_loading = 0; + + params.interf_list.count = 0; + params.max_lower_index = 0; + params.max_upper_index = 0; + params.nb_lower = 0; + params.nb_upper = 0; + /* + * For modes upto VHT80, the noise floor is populated with the one + * corresponding + * to the highest enabled antenna chain + */ + /* TODO: Fill proper values once FW provides them*/ + params.noise_floor = report->noisefloor[chn_idx_lowest_enabled]; + params.datalen = (fft_hdr_length * 4); + params.pwr_count = fft_bin_len; + params.tstamp = (tsf64 & SPECTRAL_TSMASK); + + if (spectral->ch_width == CH_WIDTH_160MHZ) { + /* We expect to see one more Search FFT report, and it should be + * equal in size to the current one. + */ + /* Advance to the secondary 80 Mhz spectral report */ + data += report_len; + + if (target_if_verify_sig_and_tag_gen3( + spectral, data, + TLV_TAG_SPECTRAL_SUMMARY_REPORT_GEN3) != 0) + goto fail; + rssi = target_if_consume_sscan_report_gen3(spectral, data); + /* Advance buf pointer to the search fft report */ + data += sizeof(struct spectral_sscan_report_gen3); + + if (target_if_verify_sig_and_tag_gen3( + spectral, data, + TLV_TAG_SEARCH_FFT_REPORT_GEN3) != 0) + goto fail; + p_fft_report = (struct spectral_phyerr_fft_report_gen3 *)(data); + fft_hdr_length = p_fft_report->fft_hdr_length * 4; + report_len = (fft_hdr_length + 8); + + fft_bin_len = (fft_hdr_length - 16); + fft_bin_len >>= 2; + if ((spectral->params.ss_rpt_mode == 2) && + spectral->inband_fftbin_size_adj) { + fft_bin_len >>= 1; + } + + target_if_process_sfft_report_gen3(p_fft_report, p_sfft); + + if (p_sfft->fft_detector_id != 1) { + spectral_err("Expected segid is 1 but we got %d", + p_sfft->fft_detector_id); + spectral->diag_stats.spectral_vhtseg2id_mismatch++; + goto fail; + } + + if (spectral_debug_level & + (DEBUG_SPECTRAL2 | DEBUG_SPECTRAL4)) + target_if_dump_fft_report_gen3(spectral, p_fft_report, p_sfft); + + params.vhtop_ch_freq_seg1 = 0; + params.vhtop_ch_freq_seg2 = 0; + + /* XXX: Confirm. TBD at SoD. */ + params.rssi_sec80 = rssi; + + /* XXX: Determine dynamically. TBD at SoD. */ + + /* + * For VHT80_80/VHT160,the noise floor for primary 80MHz segment + * populated with the + * lowest enabled antenna chain and the nf for secondary 80MHz + * segment is populated + * with the highest enabled antenna chain + */ + /* TODO: Fill proper values once FW provides them*/ + params.noise_floor_sec80 = DUMMY_NF_VALUE; + params.noise_floor = DUMMY_NF_VALUE; + + params.max_mag_sec80 = p_sfft->fft_peak_mag; + /* params.max_index_sec80 = p_sfft->peak_inx; */ + /* XXX Does this definition of datalen *still hold? */ + params.datalen_sec80 = fft_hdr_length; + params.pwr_count_sec80 = fft_bin_len; + params.bin_pwr_data_sec80 = (u_int8_t *)( + (uint8_t *)p_fft_report + SPECTRAL_FFT_BINS_POS); + } + + qdf_mem_copy(¶ms.classifier_params, + &spectral->classifier_params, + sizeof(struct spectral_classifier_params)); + + target_if_spectral_log_SAMP_param(¶ms); + target_if_spectral_create_samp_msg(spectral, ¶ms); + + return 0; + + fail: + spectral_err("Error in function while processing search fft report"); + return -EPERM; +} + +int target_if_spectral_process_report_gen3( + struct wlan_objmgr_pdev *pdev, + void *buf) +{ + int ret = 0; + struct direct_buf_rx_data *payload = buf; + struct target_if_spectral *spectral; + struct spectral_report report; + + spectral = get_target_if_spectral_handle_from_pdev(pdev); + if (spectral == NULL) { + spectral_err("Spectral target object is null"); + return -EINVAL; + } + + report.data = payload->vaddr; + if (payload->meta_data_valid) + qdf_mem_copy(report.noisefloor, &payload->meta_data, + sizeof(payload->meta_data)); + + if (spectral_debug_level & (DEBUG_SPECTRAL2 | DEBUG_SPECTRAL4)) { + spectral_debug("Printing the spectral phyerr buffer for debug"); + spectral_debug("Datalength of buffer = 0x%x(%d) bufptr = 0x%pK", + payload->dbr_len, payload->dbr_len, + payload->vaddr); +#ifdef CONFIG_WIN + RAWSIM_PKT_HEXDUMP((unsigned char *)payload->vaddr, 1024); +#endif + } + + ret = target_if_consume_spectral_report_gen3(spectral, &report); + + if (spectral_debug_level & DEBUG_SPECTRAL4) + spectral_debug_level = DEBUG_SPECTRAL; + + return ret; +} +#else +int target_if_spectral_process_report_gen3( + struct wlan_objmgr_pdev *pdev, + void *buf) +{ + spectral_err("Direct dma support is not enabled"); + return -EINVAL; +} +#endif +qdf_export_symbol(target_if_spectral_process_report_gen3); +/* END of spectral GEN III HW specific functions */ + +#endif /* WLAN_CONV_SPECTRAL_ENABLE */ diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/spectral/target_if_spectral_sim.c b/drivers/staging/qca-wifi-host-cmn/target_if/spectral/target_if_spectral_sim.c new file mode 100644 index 0000000000000000000000000000000000000000..d35cc2ad7767d7ef2470f979efdbc9069b9247c6 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/target_if/spectral/target_if_spectral_sim.c @@ -0,0 +1,994 @@ +/* + * Copyright (c) 2015,2017-2018 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifdef QCA_SUPPORT_SPECTRAL_SIMULATION +#include "target_if_spectral.h" +#include "target_if_spectral_sim.h" +#include "target_if_spectral_sim_int.h" +#include "_ieee80211.h" +#include "ieee80211_api.h" +#include "ieee80211_defines.h" +#include "qdf_types.h" +#include "ieee80211_var.h" +#include +#include + +/* Helper functions */ + +static int target_if_populate_report_static_gen2( + struct spectralsim_report *report, + enum phy_ch_width width, bool is_80_80); +static int target_if_populate_report_static_gen3( + struct spectralsim_report *report, + enum phy_ch_width width, bool is_80_80); +static void target_if_depopulate_report( + struct spectralsim_report *report); + +static int target_if_populate_reportset_static( + struct spectralsim_context *simctx, + struct spectralsim_reportset *reportset, + enum phy_ch_width width, bool is_80_80); +static void target_if_depopulate_reportset( + struct spectralsim_reportset * + reportset); + +static int target_if_populate_simdata(struct spectralsim_context *simctx); +static void target_if_depopulate_simdata(struct spectralsim_context *simctx); +static OS_TIMER_FUNC(target_if_spectral_sim_phyerrdelivery_handler); + +/* + * Static configuration. + * For now, we will be having a single configuration per BW, and a single + * report per configuration (since we need the data only for ensuring correct + * format handling). + * + * Extend this for more functionality if required in the future. + */ + +/** + * target_if_populate_report_static_gen2() - Statically populate simulation + * data for one report for generation 2 chipsets + * @report: Pointer to spectral report data instance + * @width : Channel bandwidth enumeration + * @is_80_80: Whether the channel is operating in 80-80 mode + * + * Statically populate simulation data for one report for generation 2 chipsets + * + * Return: 0 on success, negative error code on failure + */ +static int +target_if_populate_report_static_gen2( + struct spectralsim_report *report, + enum phy_ch_width width, bool is_80_80) +{ + qdf_assert_always(report); + + switch (width) { + case CH_WIDTH_20MHZ: + report->data = NULL; + report->data = (uint8_t *) + qdf_mem_malloc(sizeof(reportdata_20_gen2)); + + if (!report->data) { + spectral_err("Spectral simulation: Could not allocate memory for report data"); + goto bad; + } + + report->datasize = sizeof(reportdata_20_gen2); + qdf_mem_copy(report->data, + reportdata_20_gen2, report->datasize); + + qdf_mem_copy(&report->rfqual_info, + &rfqual_info_20, sizeof(report->rfqual_info)); + + qdf_mem_copy(&report->chan_info, + &chan_info_20, sizeof(report->chan_info)); + + break; + case CH_WIDTH_40MHZ: + report->data = NULL; + report->data = (uint8_t *) + qdf_mem_malloc(sizeof(reportdata_40_gen2)); + + if (!report->data) { + spectral_err("Spectral simulation: Could not allocate memory for report data"); + goto bad; + } + + report->datasize = sizeof(reportdata_40_gen2); + qdf_mem_copy(report->data, + reportdata_40_gen2, report->datasize); + + qdf_mem_copy(&report->rfqual_info, + &rfqual_info_40, sizeof(report->rfqual_info)); + + qdf_mem_copy(&report->chan_info, + &chan_info_40, sizeof(report->chan_info)); + + break; + case CH_WIDTH_80MHZ: + report->data = NULL; + report->data = (uint8_t *) + qdf_mem_malloc(sizeof(reportdata_80_gen2)); + + if (!report->data) { + spectral_err("Spectral simulation: Could not allocate memory for report data"); + goto bad; + } + + report->datasize = sizeof(reportdata_80_gen2); + qdf_mem_copy(report->data, + reportdata_80_gen2, report->datasize); + + qdf_mem_copy(&report->rfqual_info, + &rfqual_info_80, sizeof(report->rfqual_info)); + + qdf_mem_copy(&report->chan_info, + &chan_info_80, sizeof(report->chan_info)); + + break; + case CH_WIDTH_160MHZ: + if (is_80_80) { + report->data = NULL; + report->data = (uint8_t *) + qdf_mem_malloc(sizeof(reportdata_80_80_gen2)); + + if (!report->data) { + spectral_err("Spectral simulation: Could not allocate memory for report data"); + goto bad; + } + + report->datasize = sizeof(reportdata_80_80_gen2); + qdf_mem_copy(report->data, + reportdata_80_80_gen2, report->datasize); + + qdf_mem_copy(&report->rfqual_info, + &rfqual_info_80_80, + sizeof(report->rfqual_info)); + + qdf_mem_copy(&report->chan_info, + &chan_info_80_80, + sizeof(report->chan_info)); + + } else { + report->data = NULL; + report->data = (uint8_t *) + qdf_mem_malloc(sizeof(reportdata_160_gen2)); + + if (!report->data) { + spectral_err("Spectral simulation: Could not allocate memory for report data"); + goto bad; + } + + report->datasize = sizeof(reportdata_160_gen2); + qdf_mem_copy(report->data, + reportdata_160_gen2, report->datasize); + + qdf_mem_copy(&report->rfqual_info, + &rfqual_info_160, + sizeof(report->rfqual_info)); + + qdf_mem_copy(&report->chan_info, + &chan_info_160, sizeof(report->chan_info)); + } + break; + default: + spectral_err("Unhandled width. Please correct. Asserting"); + qdf_assert_always(0); + } + + return 0; + + bad: + return -EPERM; +} + +/** + * target_if_populate_report_static_gen3() - Statically populate simulation + * data for one report for generation 3 chipsets + * @report: Pointer to spectral report data instance + * @width : Channel bandwidth enumeration + * @is_80_80: Whether the channel is operating in 80-80 mode + * + * Statically populate simulation data for one report for generation 3 chipsets + * + * Return: 0 on success, negative error code on failure + */ +static int +target_if_populate_report_static_gen3( + struct spectralsim_report *report, + enum phy_ch_width width, bool is_80_80) +{ + qdf_assert_always(report); + + switch (width) { + case CH_WIDTH_20MHZ: + report->data = NULL; + report->data = (uint8_t *) + qdf_mem_malloc(sizeof(reportdata_20_gen3)); + + if (!report->data) { + spectral_err("Spectral simulation: Could not allocate memory for report data"); + goto bad; + } + + report->datasize = sizeof(reportdata_20_gen3); + qdf_mem_copy(report->data, + reportdata_20_gen3, report->datasize); + + qdf_mem_copy(&report->rfqual_info, + &rfqual_info_20, sizeof(report->rfqual_info)); + + qdf_mem_copy(&report->chan_info, + &chan_info_20, sizeof(report->chan_info)); + + break; + case CH_WIDTH_40MHZ: + report->data = NULL; + report->data = (uint8_t *) + qdf_mem_malloc(sizeof(reportdata_40_gen3)); + + if (!report->data) { + spectral_err("Spectral simulation: Could not allocate memory for report data"); + goto bad; + } + + report->datasize = sizeof(reportdata_40_gen3); + qdf_mem_copy(report->data, + reportdata_40_gen3, report->datasize); + + qdf_mem_copy(&report->rfqual_info, + &rfqual_info_40, sizeof(report->rfqual_info)); + + qdf_mem_copy(&report->chan_info, + &chan_info_40, sizeof(report->chan_info)); + + break; + case CH_WIDTH_80MHZ: + report->data = NULL; + report->data = (uint8_t *) + qdf_mem_malloc(sizeof(reportdata_80_gen3)); + + if (!report->data) { + spectral_err("Spectral simulation: Could not allocate memory for report data"); + goto bad; + } + + report->datasize = sizeof(reportdata_80_gen3); + qdf_mem_copy(report->data, + reportdata_80_gen3, report->datasize); + + qdf_mem_copy(&report->rfqual_info, + &rfqual_info_80, sizeof(report->rfqual_info)); + + qdf_mem_copy(&report->chan_info, + &chan_info_80, sizeof(report->chan_info)); + + break; + case CH_WIDTH_160MHZ: + if (is_80_80) { + report->data = NULL; + report->data = (uint8_t *) + qdf_mem_malloc(sizeof(reportdata_80_80_gen3)); + + if (!report->data) { + spectral_err("Spectral simulation: Could not allocate memory for report data"); + goto bad; + } + + report->datasize = sizeof(reportdata_80_80_gen3); + qdf_mem_copy(report->data, + reportdata_80_80_gen3, report->datasize); + + qdf_mem_copy(&report->rfqual_info, + &rfqual_info_80_80, + sizeof(report->rfqual_info)); + + qdf_mem_copy(&report->chan_info, + &chan_info_80_80, + sizeof(report->chan_info)); + + } else { + report->data = NULL; + report->data = (uint8_t *) + qdf_mem_malloc(sizeof(reportdata_160_gen3)); + + if (!report->data) { + spectral_err("Spectral simulation: Could not allocate memory for report data"); + goto bad; + } + + report->datasize = sizeof(reportdata_160_gen3); + qdf_mem_copy(report->data, + reportdata_160_gen3, report->datasize); + + qdf_mem_copy(&report->rfqual_info, + &rfqual_info_160, + sizeof(report->rfqual_info)); + + qdf_mem_copy(&report->chan_info, + &chan_info_160, sizeof(report->chan_info)); + } + break; + default: + spectral_err("Unhandled width. Please correct. Asserting"); + qdf_assert_always(0); + } + + return 0; + + bad: + return -EPERM; +} + +/** + * target_if_depopulate_report() - Free the given instances of + * struct spectralsim_report + * @report: instance of struct spectralsim_report + * + * Free the given instances of struct spectralsim_report + * + * Return: None + */ +static void +target_if_depopulate_report( + struct spectralsim_report *report) +{ + if (!report) + return; + + if (report->data) { + qdf_mem_free(report->data); + report->data = NULL; + report->datasize = 0; + } +} + +/** + * target_if_populate_reportset_static() - Statically populate simulation data + * for a given configuration + * @simctx: Pointer to struct spectralsim_context + * @reportset: Set of spectral report data instances + * @width : Channel bandwidth enumeration + * @is_80_80: Whether the channel is operating in 80+80 mode + * + * Statically populate simulation data for a given configuration + * + * Return: 0 on success, negative error code on failure + */ +static int +target_if_populate_reportset_static( + struct spectralsim_context *simctx, + struct spectralsim_reportset *reportset, + enum phy_ch_width width, bool is_80_80) +{ + int ret = 0; + struct spectralsim_report *report = NULL; + + qdf_assert_always(reportset); + + reportset->headreport = NULL; + reportset->curr_report = NULL; + + /* For now, we populate only one report */ + report = (struct spectralsim_report *) + qdf_mem_malloc(sizeof(struct spectralsim_report)); + + if (!report) { + spectral_err("Spectral simulation: Could not allocate memory for report."); + goto bad; + } + + qdf_mem_zero(report, sizeof(*report)); + + switch (width) { + case CH_WIDTH_20MHZ: + qdf_mem_copy(&reportset->config, + &config_20_1, sizeof(reportset->config)); + + ret = simctx->populate_report_static(report, CH_WIDTH_20MHZ, 0); + if (ret != 0) + goto bad; + + report->next = NULL; + reportset->headreport = report; + break; + case CH_WIDTH_40MHZ: + qdf_mem_copy(&reportset->config, + &config_40_1, sizeof(reportset->config)); + + ret = simctx->populate_report_static(report, CH_WIDTH_40MHZ, 0); + if (ret != 0) + goto bad; + + report->next = NULL; + reportset->headreport = report; + break; + case CH_WIDTH_80MHZ: + qdf_mem_copy(&reportset->config, + &config_80_1, sizeof(reportset->config)); + + ret = simctx->populate_report_static(report, CH_WIDTH_80MHZ, 0); + if (ret != 0) + goto bad; + + report->next = NULL; + reportset->headreport = report; + break; + case CH_WIDTH_160MHZ: + if (is_80_80) { + qdf_mem_copy(&reportset->config, + &config_80_80_1, + sizeof(reportset->config)); + + ret = simctx->populate_report_static(report, + CH_WIDTH_160MHZ, + 1); + if (ret != 0) + goto bad; + + report->next = NULL; + reportset->headreport = report; + } else { + qdf_mem_copy(&reportset->config, + &config_160_1, sizeof(reportset->config)); + + ret = simctx->populate_report_static(report, + CH_WIDTH_160MHZ, + 0); + if (ret != 0) + goto bad; + + report->next = NULL; + reportset->headreport = report; + } + break; + default: + spectral_err("Unhandled width. Please rectify."); + qdf_assert_always(0); + }; + + reportset->curr_report = reportset->headreport; + + return 0; + + bad: + target_if_depopulate_reportset(reportset); + return -EPERM; +} + +/** + * target_if_depopulate_reportset() - Free all the instances of + * struct spectralsim_reportset + * @report: head pointer to struct spectralsim_reportset linked list + * + * Free all the instances of struct spectralsim_reportset + * + * Return: None + */ +static void +target_if_depopulate_reportset( + struct spectralsim_reportset *reportset) +{ + struct spectralsim_report *curr_report = NULL; + struct spectralsim_report *next_report = NULL; + + if (!reportset) + return; + + curr_report = reportset->headreport; + + while (curr_report) { + next_report = curr_report->next; + target_if_depopulate_report(curr_report); + qdf_mem_free(curr_report); + curr_report = next_report; + } +} + +/** + * target_if_populate_simdata() - Populate simulation data + * @simctx: Pointer to struct spectralsim_context + * + * Populate simulation data + * + * Return: 0 on success, negative error code on failure + */ +static int +target_if_populate_simdata( + struct spectralsim_context *simctx) +{ + /* + * For now, we use static population. Switch to loading from a file if + * needed in the future. + */ + + simctx->bw20_headreportset = NULL; + SPECTRAL_SIM_REPORTSET_ALLOCPOPL_SINGLE(simctx, + simctx->bw20_headreportset, + CH_WIDTH_20MHZ, 0); + + simctx->bw40_headreportset = NULL; + SPECTRAL_SIM_REPORTSET_ALLOCPOPL_SINGLE(simctx, + simctx->bw40_headreportset, + CH_WIDTH_40MHZ, 0); + + simctx->bw80_headreportset = NULL; + SPECTRAL_SIM_REPORTSET_ALLOCPOPL_SINGLE(simctx, + simctx->bw80_headreportset, + CH_WIDTH_80MHZ, 0); + + simctx->bw160_headreportset = NULL; + SPECTRAL_SIM_REPORTSET_ALLOCPOPL_SINGLE(simctx, + simctx->bw160_headreportset, + CH_WIDTH_160MHZ, 0); + + simctx->bw80_80_headreportset = NULL; + SPECTRAL_SIM_REPORTSET_ALLOCPOPL_SINGLE(simctx, + simctx->bw80_80_headreportset, + CH_WIDTH_160MHZ, 1); + + simctx->curr_reportset = NULL; + + simctx->is_enabled = false; + simctx->is_active = false; + + simctx->ssim_starting_tsf64 = 0; + simctx->ssim_count = 0; + simctx->ssim_period_ms = 0; + + return 0; +} + +/** + * target_if_depopulate_simdata() - De-populate simulation data + * @simctx: Pointer to struct spectralsim_context + * + * De-populate simulation data + * + * Return: none + */ +static void +target_if_depopulate_simdata( + struct spectralsim_context *simctx) +{ + if (!simctx) + return; + + SPECTRAL_SIM_REPORTSET_DEPOPLFREE_LIST(simctx->bw20_headreportset); + SPECTRAL_SIM_REPORTSET_DEPOPLFREE_LIST(simctx->bw40_headreportset); + SPECTRAL_SIM_REPORTSET_DEPOPLFREE_LIST(simctx->bw80_headreportset); + SPECTRAL_SIM_REPORTSET_DEPOPLFREE_LIST(simctx->bw160_headreportset); + SPECTRAL_SIM_REPORTSET_DEPOPLFREE_LIST(simctx->bw80_80_headreportset); +} + +/** + * target_if_spectral_sim_phyerrdelivery_handler() - Phyerr delivery handler + * + * Return: none + */ +static +OS_TIMER_FUNC(target_if_spectral_sim_phyerrdelivery_handler) +{ + struct target_if_spectral *spectral = NULL; + struct spectralsim_context *simctx = NULL; + struct spectralsim_reportset *curr_reportset = NULL; + struct spectralsim_report *curr_report = NULL; + struct target_if_spectral_acs_stats acs_stats; + uint64_t curr_tsf64 = 0; + struct target_if_spectral_ops *p_sops; + + OS_GET_TIMER_ARG(spectral, struct target_if_spectral *); + qdf_assert_always(spectral); + + p_sops = GET_TARGET_IF_SPECTRAL_OPS(spectral); + qdf_assert_always(spectral); + + simctx = (struct spectralsim_context *)spectral->simctx; + qdf_assert_always(simctx); + + if (!simctx->is_active) + return; + + curr_reportset = simctx->curr_reportset; + qdf_assert_always(curr_reportset); + + curr_report = curr_reportset->curr_report; + qdf_assert_always(curr_report); + + qdf_assert_always(curr_reportset->headreport); + + /* + * We use a simulation TSF since in offload architectures we can't + * expect to + * get an accurate current TSF from HW. + * In case of TSF wrap over, we'll use it as-is for now since the + * simulation + * is intended only for format verification. + */ + curr_tsf64 = simctx->ssim_starting_tsf64 + + ((simctx->ssim_period_ms * simctx->ssim_count) * 1000); + + p_sops->spectral_process_phyerr(spectral, + curr_report->data, + curr_report->datasize, + &curr_report->rfqual_info, + &curr_report->chan_info, + curr_tsf64, &acs_stats); + + simctx->ssim_count++; + + if (curr_report->next) + curr_reportset->curr_report = curr_report->next; + else + curr_reportset->curr_report = curr_reportset->headreport; + + if (curr_reportset->config.ss_count != 0 && + simctx->ssim_count == curr_reportset->config.ss_count) { + target_if_spectral_sops_sim_stop_scan(spectral); + } else { + qdf_timer_start(&simctx->ssim_pherrdelivery_timer, + simctx->ssim_period_ms); + } +} + +/* Module services */ + +int +target_if_spectral_sim_attach(struct target_if_spectral *spectral) +{ + struct spectralsim_context *simctx = NULL; + + qdf_assert_always(spectral); + + simctx = (struct spectralsim_context *) + qdf_mem_malloc(sizeof(struct spectralsim_context)); + + if (!simctx) { + spectral_err("Spectral simulation: Could not allocate memory for context"); + return -EPERM; + } + + qdf_mem_zero(simctx, sizeof(*simctx)); + + spectral->simctx = simctx; + + if (spectral->spectral_gen == SPECTRAL_GEN2) + simctx->populate_report_static = + target_if_populate_report_static_gen2; + else if (spectral->spectral_gen == SPECTRAL_GEN3) + simctx->populate_report_static = + target_if_populate_report_static_gen3; + + if (target_if_populate_simdata(simctx) != 0) { + qdf_mem_free(simctx); + spectral->simctx = NULL; + spectral_err("Spectral simulation attach failed"); + return -EPERM; + } + + qdf_timer_init(NULL, + &simctx->ssim_pherrdelivery_timer, + target_if_spectral_sim_phyerrdelivery_handler, + (void *)(spectral), QDF_TIMER_TYPE_WAKE_APPS); + + spectral_info("Spectral simulation attached"); + + return 0; +} + +void +target_if_spectral_sim_detach(struct target_if_spectral *spectral) +{ + struct spectralsim_context *simctx = NULL; + + qdf_assert_always(spectral); + + simctx = (struct spectralsim_context *)spectral->simctx; + qdf_assert_always(simctx); + + qdf_timer_free(&simctx->ssim_pherrdelivery_timer); + + target_if_depopulate_simdata(simctx); + qdf_mem_free(simctx); + spectral->simctx = NULL; + + spectral_info("Spectral simulation detached"); +} + +uint32_t +target_if_spectral_sops_sim_is_active(void *arg) +{ + struct target_if_spectral *spectral = NULL; + struct spectralsim_context *simctx = NULL; + + spectral = (struct target_if_spectral *)arg; + qdf_assert_always(spectral); + + simctx = (struct spectralsim_context *)spectral->simctx; + qdf_assert_always(simctx); + + return simctx->is_active; +} +qdf_export_symbol(target_if_spectral_sops_sim_is_active); + +uint32_t +target_if_spectral_sops_sim_is_enabled(void *arg) +{ + struct target_if_spectral *spectral = NULL; + struct spectralsim_context *simctx = NULL; + + spectral = (struct target_if_spectral *)arg; + qdf_assert_always(spectral); + + simctx = (struct spectralsim_context *)spectral->simctx; + qdf_assert_always(simctx); + + return simctx->is_enabled; +} +qdf_export_symbol(target_if_spectral_sops_sim_is_enabled); + +uint32_t +target_if_spectral_sops_sim_start_scan(void *arg) +{ + struct target_if_spectral *spectral = NULL; + struct spectralsim_context *simctx = NULL; + + spectral = (struct target_if_spectral *)arg; + qdf_assert_always(spectral); + + simctx = (struct spectralsim_context *)spectral->simctx; + qdf_assert_always(simctx); + + if (!simctx->curr_reportset) { + spectral_err("Spectral simulation: No current report set configured - unable to start simulated Spectral scan"); + return 0; + } + + if (!simctx->curr_reportset->curr_report) { + spectral_err("Spectral simulation: No report data instances populated - unable to start simulated Spectral scan"); + return 0; + } + + if (!simctx->is_enabled) + simctx->is_enabled = true; + + simctx->is_active = true; + + /* Hardcoding current time as zero since it is simulation */ + simctx->ssim_starting_tsf64 = 0; + simctx->ssim_count = 0; + + /* + * TODO: Support high resolution timer in microseconds if required, so + * that + * we can support default periods such as ~200 us. For now, we use 1 + * millisecond since the current use case for the simulation is to + * validate + * formats rather than have a time dependent classification. + */ + simctx->ssim_period_ms = 1; + + qdf_timer_start(&simctx->ssim_pherrdelivery_timer, + simctx->ssim_period_ms); + + return 1; +} +qdf_export_symbol(target_if_spectral_sops_sim_start_scan); + +uint32_t +target_if_spectral_sops_sim_stop_scan(void *arg) +{ + struct target_if_spectral *spectral = NULL; + struct spectralsim_context *simctx = NULL; + + spectral = (struct target_if_spectral *)arg; + qdf_assert_always(spectral); + + simctx = (struct spectralsim_context *)spectral->simctx; + qdf_assert_always(simctx); + + qdf_timer_stop(&simctx->ssim_pherrdelivery_timer); + + simctx->is_active = false; + simctx->is_enabled = false; + + simctx->ssim_starting_tsf64 = 0; + simctx->ssim_count = 0; + simctx->ssim_period_ms = 0; + + return 1; +} +qdf_export_symbol(target_if_spectral_sops_sim_stop_scan); + +#ifdef SPECTRAL_SIM_DUMP_PARAM_DATA +static void +target_if_log_sim_spectral_params(struct spectral_config *params) +{ + int i = 0; + + spectral_debug("\n"); + + spectral_debug("Spectral simulation: Param data dump:\nss_fft_period=%hu\nss_period=%hu\nss_count=%hu\nss_short_report=%hu\nradar_bin_thresh_sel=%hhu\nss_spectral_pri=%hu\nss_fft_size=%hu\nss_gc_ena=%hu\nss_restart_ena=%hu\nss_noise_floor_ref=%hu\nss_init_delay=%hu\nss_nb_tone_thr=%hu\nss_str_bin_thr=%hu\nss_wb_rpt_mode=%hu\nss_rssi_rpt_mode=%hu\nss_rssi_thr=%hu\nss_pwr_format=%hu\nss_rpt_mode=%hu\nss_bin_scale=%hu\nss_dbm_adj=%hu\nss_chn_mask=%hu\nss_nf_temp_data=%d", + params->ss_fft_period, + params->ss_period, + params->ss_count, + params->ss_short_report, + params->radar_bin_thresh_sel, + params->ss_spectral_pri, + params->ss_fft_size, + params->ss_gc_ena, + params->ss_restart_ena, + params->ss_noise_floor_ref, + params->ss_init_delay, + params->ss_nb_tone_thr, + params->ss_str_bin_thr, + params->ss_wb_rpt_mode, + params->ss_rssi_rpt_mode, + params->ss_rssi_thr, + params->ss_pwr_format, + params->ss_rpt_mode, + params->ss_bin_scale, + params->ss_dbm_adj, + params->ss_chn_mask, params->ss_nf_temp_data); + + for (i = 0; i < AH_MAX_CHAINS * 2; i++) + spectral_debug("ss_nf_cal[%d]=%hhd", i, params->ss_nf_cal[i]); + + for (i = 0; i < AH_MAX_CHAINS * 2; i++) + spectral_debug("ss_nf_pwr[%d]=%hhd", i, params->ss_nf_pwr[i]); + + spectral_info("\n"); +} +#else + +static void +target_if_log_sim_spectral_params(struct spectral_config *params) +{ +} +#endif /* SPECTRAL_SIM_DUMP_PARAM_DATA */ + +uint32_t +target_if_spectral_sops_sim_configure_params( + void *arg, + struct spectral_config *params) +{ + struct target_if_spectral *spectral = NULL; + struct spectralsim_context *simctx = NULL; + enum wlan_phymode phymode; + uint8_t bw; + struct spectralsim_reportset *des_headreportset = NULL; + struct spectralsim_reportset *temp_reportset = NULL; + bool is_invalid_width = false; + struct wlan_objmgr_vdev *vdev = NULL; + + qdf_assert_always(params); + target_if_log_sim_spectral_params(params); + spectral = (struct target_if_spectral *)arg; + qdf_assert_always(spectral); + + simctx = (struct spectralsim_context *)spectral->simctx; + qdf_assert_always(simctx); + + vdev = target_if_spectral_get_vdev(spectral); + if (!vdev) { + spectral_warn("Spectral simulation: No VAPs found - not proceeding with param config."); + return 0; + } + + bw = target_if_vdev_get_ch_width(vdev); + + switch (bw) { + case CH_WIDTH_20MHZ: + des_headreportset = simctx->bw20_headreportset; + break; + case CH_WIDTH_40MHZ: + des_headreportset = simctx->bw40_headreportset; + break; + case CH_WIDTH_80MHZ: + des_headreportset = simctx->bw80_headreportset; + break; + case CH_WIDTH_160MHZ: + phymode = wlan_vdev_get_phymode(vdev); + if (phymode == WLAN_PHYMODE_11AC_VHT160) { + des_headreportset = simctx->bw160_headreportset; + } else if (phymode == WLAN_PHYMODE_11AC_VHT80_80) { + des_headreportset = simctx->bw80_80_headreportset; + } else { + spectral_err("Spectral simulation: Unexpected PHY mode %u found for width 160 MHz...asserting.", + phymode); + qdf_assert_always(0); + } + break; + + case IEEE80211_CWM_WIDTHINVALID: + spectral_err("Spectral simulation: Invalid width configured - not proceeding with param config."); + is_invalid_width = true; + default: + spectral_err("Spectral simulation: Unknown width %u...asserting", + bw); + qdf_assert_always(0); + break; + } + + wlan_objmgr_vdev_release_ref(vdev, WLAN_SPECTRAL_ID); + + if (is_invalid_width) + return 0; + + if (!des_headreportset) { + spectral_warn("Spectral simulation: No simulation data present for configured bandwidth/PHY mode - unable to proceed with param config."); + return 0; + } + + simctx->curr_reportset = NULL; + temp_reportset = des_headreportset; + + while (temp_reportset) { + if (qdf_mem_cmp(&temp_reportset->config, + params, sizeof(struct spectral_config)) == 0) { + /* Found a matching config. We are done. */ + simctx->curr_reportset = temp_reportset; + break; + } + + temp_reportset = temp_reportset->next; + } + + if (!simctx->curr_reportset) { + spectral_warn("Spectral simulation: No simulation data present for desired Spectral configuration - unable to proceed with param config."); + return 0; + } + + if (!simctx->curr_reportset->curr_report) { + spectral_warn("Spectral simulation: No report data instances populated for desired Spectral configuration - unable to proceed with param config"); + return 0; + } + + return 1; +} +qdf_export_symbol(target_if_spectral_sops_sim_configure_params); + +uint32_t +target_if_spectral_sops_sim_get_params( + void *arg, struct spectral_config *params) +{ + struct target_if_spectral *spectral = NULL; + struct spectralsim_context *simctx = NULL; + + qdf_assert_always(params); + + spectral = (struct target_if_spectral *)arg; + qdf_assert_always(spectral); + + simctx = (struct spectralsim_context *)spectral->simctx; + qdf_assert_always(simctx); + + if (!simctx->curr_reportset) { + spectral_warn("Spectral simulation: No configured reportset found."); + return 0; + } + + qdf_mem_copy(params, &simctx->curr_reportset->config, sizeof(*params)); + + return 1; +} +qdf_export_symbol(target_if_spectral_sops_sim_get_params); + +#endif /* QCA_SUPPORT_SPECTRAL_SIMULATION */ diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/spectral/target_if_spectral_sim.h b/drivers/staging/qca-wifi-host-cmn/target_if/spectral/target_if_spectral_sim.h new file mode 100644 index 0000000000000000000000000000000000000000..308186c1be3ed65e08d3c9203afb8a5a4694cef7 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/target_if/spectral/target_if_spectral_sim.h @@ -0,0 +1,125 @@ +/* + * Copyright (c) 2015,2017-2018 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _SPECTRAL_SIM_H_ +#define _SPECTRAL_SIM_H_ + +#ifdef QCA_SUPPORT_SPECTRAL_SIMULATION +#include "target_if_spectral.h" + +/** + * target_if_spectral_sim_attach() - Initialize Spectral Simulation + * functionality + * @spectral - pointer to spectral internal data structure + * + * Setup data structures to be used for serving out data corresponding to + * various bandwidths and configurations. + * + * Return: Integer status value. 0:Success, -1:Failure + */ +int target_if_spectral_sim_attach(struct target_if_spectral *spectral); + +/** + * target_if_spectral_sim_detach() - De-initialize Spectral Simulation + * functionality + * @spectral - pointer to spectral internal data structure + * + * Free up data structures used for serving out data corresponding to various + * bandwidths and configurations. + * + * Return: None + */ +void target_if_spectral_sim_detach(struct target_if_spectral *spectral); + +/** + * target_if_spectral_sops_sim_is_active() - Check if Spectral(simulated) is + * active + * @arg - pointer to spectral internal data structure + * + * Check if Spectral (simulated) is active + * + * Return: Integer status value. 0: Not active, 1: Active + */ +uint32_t target_if_spectral_sops_sim_is_active(void *arg); + +/** + * target_if_spectral_sops_sim_is_enabled() - Check if Spectral(simulated) is + * enabled + * @arg - pointer to spectral internal data structure + * + * Check if Spectral(simulated) is enabled + * + * Return: Integer status value. 0: Not enabled, 1: Enabled + */ +uint32_t target_if_spectral_sops_sim_is_enabled(void *arg); + +/** + * target_if_spectral_sops_sim_start_scan() - Start Spectral simulation + * @arg - pointer to spectral internal data structure + * + * Start Spectral simulation + * + * Return: Integer status value. 0: Failure, 1: Success + */ +uint32_t target_if_spectral_sops_sim_start_scan(void *arg); + +/** + * target_if_spectral_sops_sim_stop_scan() - Stop Spectral simulation + * @arg - pointer to spectral internal data structure + * + * Stop Spectral simulation + * + * Return: Integer status value. 0: Failure, 1: Success + */ +uint32_t target_if_spectral_sops_sim_stop_scan(void *arg); + +/** + * target_if_spectral_sops_sim_configure_params() - Configure Spectral + * parameters into simulation + * arg - pointer to ath_spectral structure + * params - pointer to struct spectral_config structure bearing Spectral + * configuration + * + * Internally, this function actually searches if a record set with the desired + * configuration has been loaded. If so, it points to the record set for + * later usage when the simulation is started. If not, it returns an error. + * + * Return: Integer status value. 0: Failure, 1: Success + */ +uint32_t target_if_spectral_sops_sim_configure_params( + void *arg, + struct spectral_config *params); + +/** + * target_if_spectral_sops_sim_get_params() - Get Spectral parameters configured + * into simulation + * arg - pointer to ath_spectral structure + * params - pointer to struct spectral_config structure which should be + * populated with Spectral configuration + * + * Get Spectral parameters configured into simulation + * + * Return: Integer status value. 0: Failure, 1: Success + */ +uint32_t target_if_spectral_sops_sim_get_params( + void *arg, + struct spectral_config *params); + +#endif /* QCA_SUPPORT_SPECTRAL_SIMULATION */ +#endif /* _SPECTRAL_SIM_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/spectral/target_if_spectral_sim_int.h b/drivers/staging/qca-wifi-host-cmn/target_if/spectral/target_if_spectral_sim_int.h new file mode 100644 index 0000000000000000000000000000000000000000..0bc58401531bfd4d13dd9f723f5af426cd5a039e --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/target_if/spectral/target_if_spectral_sim_int.h @@ -0,0 +1,1015 @@ +/* + * Copyright (c) 2015,2017-2018 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _SPECTRAL_SIM_INTERNAL_H_ +#define _SPECTRAL_SIM_INTERNAL_H_ + +#ifdef QCA_SUPPORT_SPECTRAL_SIMULATION +#include "target_if_spectral.h" + +/* #define SPECTRAL_SIM_DUMP_PARAM_DATA 1 */ +/** + * struct spectralsim_report - Linked list node of spectal simulation report + * Spectral report data instance. Usable in a linked list. + * In the case of Direct Attach chipsets, one instance should correspond to + * one PHY Data Error frame received from the HW. + * XXX Direct Attach support to be implemented if needed. Any modifications + * required here can be made at the time of implementation. + * In the case of 802.11ac offload chipsets, one instance should correspond to + * one report received from HW, inclusive of all TLVs. + * + * @rfqual_info: RF measurement information + * @chan_info: Channel information + * @datasize: Length of report data + * @data: Pointer to report data + * @next: Pointer to next node in the struct spectralsim_report + */ +struct spectralsim_report { + /* 11ac onwards only */ + struct target_if_spectral_rfqual_info rfqual_info; + /* 11ac onwards only */ + struct target_if_spectral_chan_info chan_info; + uint32_t datasize; + uint8_t *data; + struct spectralsim_report *next; +}; + +/** + * struct spectralsim_reportset - Set of Spectral report data instances + * corresponding to one particular configuration. Usable in a linked list. + * @config: Spectral config parameters + * @headreport: Pointer to the linked list of struct spectralsim_report + * @curr_report: Pointer to current node in the linked list of + * struct spectralsim_report + * @next: Pointer to next node in the struct spectralsim_reportset + */ +struct spectralsim_reportset { + struct spectral_config config; + struct spectralsim_report *headreport; + struct spectralsim_report *curr_report; + struct spectralsim_reportset *next; +}; + +/* + * struct spectralsim_context - Main structure for Spectral simulation. + * All data and controls get linked here. + * + * For each width (20/40/80/160/80+80), we will have a linked list of + * spectralsim_reportset nodes. Each struct spectralsim_reportset will have a + * linked list of struct spectralsim_report nodes. When the user requests for a + * given PHY mode and Spectral configuration, we find the appropriate + * spectralsim_reportset, and then serve struct spectralsim_report instances + * from the linked list. If required report count is higher than size of linked + * list (or infinite), we repeatedly cycle through the linked list. There can + * be more elaborate data structures devised taking care of a large number of + * possibilities, but we stick to a simple scheme given limited simulation + * needs. + * + * @bw20_headreportset : Linked list of spectralsim_reportset for 20MHz width + * @bw20_headreportset : Linked list of spectralsim_reportset for 40MHz width + * @bw20_headreportset : Linked list of spectralsim_reportset for 80MHz width + * @bw20_headreportset : Linked list of spectralsim_reportset for 160MHz width + * @bw20_headreportset : Linked list of spectralsim_reportset for 80_80MHz width + * @curr_reportset : Pointer to current node in the linked list of + * struct spectralsim_reportset + * @is_enabled : Whether the simulated spectral scan is set as enabled + * @is_active : Whether the simulated spectral scan is set as active + * @ssim_pherrdelivery_timer : Simulated Phyerr delivery timer + * @ssim_starting_tsf64 : Starting 64-bit TSF value for spectral simulation + * @ssim_period_ms : Simulated Phyerr delivery period in ms + * @ssim_count : Number of simulated spectral samples to deliver + * @populate_report_static : Pointer to function to populate static spectral + * report data + */ +struct spectralsim_context { + struct spectralsim_reportset *bw20_headreportset; + struct spectralsim_reportset *bw40_headreportset; + struct spectralsim_reportset *bw80_headreportset; + struct spectralsim_reportset *bw160_headreportset; + struct spectralsim_reportset *bw80_80_headreportset; + + struct spectralsim_reportset *curr_reportset; + bool is_enabled; + bool is_active; + + qdf_timer_t ssim_pherrdelivery_timer; + uint64_t ssim_starting_tsf64; + uint32_t ssim_period_ms; /* TODO: Support in microseconds */ + uint32_t ssim_count; + int (*populate_report_static)(struct spectralsim_report *report, + enum phy_ch_width width, bool is_80_80); +}; + +/* Helper Macros */ + +/* Allocate and populate reportset for a single configuration */ +#define SPECTRAL_SIM_REPORTSET_ALLOCPOPL_SINGLE(simctx, reportset, width, \ + is_80_80) \ + { \ + (reportset) = (struct spectralsim_reportset *) \ + qdf_mem_malloc(sizeof(struct spectralsim_reportset)); \ + \ + if ((reportset) == NULL) { \ + spectral_err("Spectral simulation: Could not allocate memory for report set"); \ + target_if_depopulate_simdata((simctx)); \ + return -EPERM; \ + } \ + \ + qdf_mem_zero((reportset), sizeof(struct spectralsim_reportset)); \ + \ + if (target_if_populate_reportset_static( \ + (simctx), (reportset), (width), (is_80_80)) != 0) { \ + target_if_depopulate_simdata((simctx)); \ + return -EPERM; \ + } \ + \ + (reportset)->next = NULL; \ + } + +/* Depopulate and free list of report sets */ +#define SPECTRAL_SIM_REPORTSET_DEPOPLFREE_LIST(reportset) \ + { \ + struct spectralsim_reportset *curr_reportset = NULL; \ + struct spectralsim_reportset *next_reportset = NULL; \ + \ + curr_reportset = (reportset); \ + \ + while (curr_reportset) { \ + next_reportset = curr_reportset->next; \ + target_if_depopulate_reportset(curr_reportset); \ + qdf_mem_free(curr_reportset); \ + curr_reportset = next_reportset; \ + } \ + \ + (reportset) = NULL; \ + } + +/* Values for static population */ + +/* 20 MHz */ + +/* Report data for 20MHz bandwidth for generation 2 chipsets */ +static uint8_t reportdata_20_gen2[] = { +#ifdef BIG_ENDIAN_HOST + 0xbb, /* Signature */ + 0xfb, /* Tag */ + 0x00, /* Size */ + 0x54, + 0x2e, 0x60, 0x0f, 0xe8, /* FFT Summary A */ + 0x00, 0x00, 0x04, 0x00, /* FFT Summary B */ + 0x00, 0x00, 0x00, 0x00, /* Segment ID */ +#else + 0x54, /* Length */ + 0x00, + 0xfb, /* Tag */ + 0xbb, /* Signature */ + 0xe8, 0x0f, 0x60, 0x2e, /* FFT Summary A */ + 0x00, 0x04, 0x00, 0x00, /* FFT Summary B */ + 0x00, 0x00, 0x00, 0x00, /* Segment ID */ +#endif /* BIG_ENDIAN_HOST */ + /* FFT Data */ + 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 0, 1, 2, 0, 1, 1, 1, 0, + 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, + 1, 1, 0, 2, 1, 2, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, +}; + +/* Report data for 20MHz bandwidth for generation 3 chipsets */ +static uint8_t reportdata_20_gen3[] = { +#ifdef BIG_ENDIAN_HOST + 0x12, 0x34, 0x56, 0x78, /* fft_timestamp */ + 0xfa, /* fft_hdr_sig */ + 0x03, /* fft_hdr_tag */ + 0x00, /* fft_hdr_length */ + 0x14, + 0x0f, 0xf6, 0x00, 0xe0, + 0x00, 0x00, 0x2f, 0xba, + 0x20, 0xb4, 0x2c, 0x01, + 0x00, 0x00, 0x00, 0x00, /* reserved */ +#else + 0x78, 0x56, 0x34, 0x12, /* fft_timestamp */ + 0x14, /* fft_hdr_length */ + 0x00, + 0x03, /* fft_hdr_tag */ + 0xfa, /* fft_hdr_sig */ + 0xe0, 0x00, 0xf6, 0x0f, + 0xba, 0x2f, 0x00, 0x00, + 0x01, 0x2c, 0xb4, 0x20, + 0x00, 0x00, 0x00, 0x00, /* reserved */ +#endif /* BIG_ENDIAN_HOST */ + /* FFT Data */ + 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 0, 1, 2, 0, 1, 1, 1, 0, + 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, + 1, 1, 0, 2, 1, 2, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, +}; + +/* RF measurement information for 20 MHz bandwidth */ +static struct target_if_spectral_rfqual_info rfqual_info_20 = { + .rssi_comb = 1, + + .pc_rssi_info[0].rssi_pri20 = 1, + .pc_rssi_info[0].rssi_sec20 = 128, + .pc_rssi_info[0].rssi_sec40 = 128, + .pc_rssi_info[0].rssi_sec80 = 128, + + .pc_rssi_info[1].rssi_pri20 = 128, + .pc_rssi_info[1].rssi_sec20 = 128, + .pc_rssi_info[1].rssi_sec40 = 128, + .pc_rssi_info[1].rssi_sec80 = 128, + + .pc_rssi_info[2].rssi_pri20 = 128, + .pc_rssi_info[2].rssi_sec20 = 128, + .pc_rssi_info[2].rssi_sec40 = 128, + .pc_rssi_info[2].rssi_sec80 = 128, + + .pc_rssi_info[3].rssi_pri20 = 128, + .pc_rssi_info[3].rssi_sec20 = 128, + .pc_rssi_info[3].rssi_sec40 = 128, + .pc_rssi_info[3].rssi_sec80 = 128, + + .noise_floor[0] = -90, + .noise_floor[1] = -90, + .noise_floor[2] = -90, + .noise_floor[3] = -90, +}; + +/* Channel information for 20 MHz bandwidth */ +static struct target_if_spectral_chan_info chan_info_20 = { + .center_freq1 = 5180, + .center_freq2 = 0, + .chan_width = 20, +}; + +/* Spectral config parameters for 20 MHz bandwidth */ +static struct spectral_config config_20_1 = { + .ss_fft_period = 1, + .ss_period = 35, + .ss_count = 0, + .ss_short_report = 1, + .radar_bin_thresh_sel = 0, + .ss_spectral_pri = 1, + .ss_fft_size = 7, + .ss_gc_ena = 1, + .ss_restart_ena = 0, + .ss_noise_floor_ref = 65440, + .ss_init_delay = 80, + .ss_nb_tone_thr = 12, + .ss_str_bin_thr = 8, + .ss_wb_rpt_mode = 0, + .ss_rssi_rpt_mode = 0, + .ss_rssi_thr = 240, + .ss_pwr_format = 0, + .ss_rpt_mode = 2, + .ss_bin_scale = 1, + .ss_dbm_adj = 1, + .ss_chn_mask = 1, + .ss_nf_cal[0] = 0, + .ss_nf_cal[1] = 0, + .ss_nf_cal[2] = 0, + .ss_nf_cal[3] = 0, + .ss_nf_cal[4] = 0, + .ss_nf_cal[5] = 0, + .ss_nf_pwr[0] = 0, + .ss_nf_pwr[1] = 0, + .ss_nf_pwr[2] = 0, + .ss_nf_pwr[3] = 0, + .ss_nf_pwr[4] = 0, + .ss_nf_pwr[5] = 0, + .ss_nf_temp_data = 0, +}; + +/* 40 MHz */ + +/* Report data for 40MHz bandwidth for generation 2 chipsets */ +static uint8_t reportdata_40_gen2[] = { +#ifdef BIG_ENDIAN_HOST + 0xbb, /* Signature */ + 0xfb, /* Tag */ + 0x00, /* Size */ + 0x94, + 0x2e, 0x61, 0x0f, 0x80, /* FFT Summary A */ + 0x00, 0x00, 0x06, 0x00, /* FFT Summary B */ + 0x00, 0x00, 0x00, 0x00, /* Segment ID */ +#else + 0x94, /* Length */ + 0x00, + 0xfb, /* Tag */ + 0xbb, /* Signature */ + 0x80, 0x0f, 0x61, 0x2e, /* FFT Summary A */ + 0x00, 0x06, 0x00, 0x00, /* FFT Summary B */ + 0x00, 0x00, 0x00, 0x00, /* Segment ID */ +#endif /* BIG_ENDIAN_HOST */ + /* FFT Data */ + 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, + 0, 0, 0, 1, 0, 0, 0, 0, 2, 1, 0, 2, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, + 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 2, 0, 0, 0, 0, 0, 0, 0, 1, 0, + 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, +}; + +/* Report data for 40MHz bandwidth for generation 3 chipsets */ +static uint8_t reportdata_40_gen3[] = { +#ifdef BIG_ENDIAN_HOST + 0x12, 0x34, 0x56, 0x78, /* fft_timestamp */ + 0xfa, /* fft_hdr_sig */ + 0x03, /* fft_hdr_tag */ + 0x00, /* fft_hdr_length */ + 0x24, + 0x0f, 0xf6, 0x00, 0xe0, + 0x00, 0x00, 0x2f, 0xba, + 0x20, 0xb4, 0x2c, 0x01, + 0x00, 0x00, 0x00, 0x00, /* reserved */ +#else + 0x78, 0x56, 0x34, 0x12, /* fft_timestamp */ + 0x24, /* fft_hdr_length */ + 0x00, + 0x03, /* fft_hdr_tag */ + 0xfa, /* fft_hdr_sig */ + 0xe0, 0x00, 0xf6, 0x0f, + 0xba, 0x2f, 0x00, 0x00, + 0x01, 0x2c, 0xb4, 0x20, + 0x00, 0x00, 0x00, 0x00, /* reserved */ +#endif /* BIG_ENDIAN_HOST */ + /* FFT Data */ + 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, + 0, 0, 0, 1, 0, 0, 0, 0, 2, 1, 0, 2, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, + 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 2, 0, 0, 0, 0, 0, 0, 0, 1, 0, + 0, 0, 0, 1, 0, 0, 0, 0, +}; + +/* RF measurement information for 40 MHz bandwidth */ +static struct target_if_spectral_rfqual_info rfqual_info_40 = { + .rssi_comb = 1, + + .pc_rssi_info[0].rssi_pri20 = 1, + .pc_rssi_info[0].rssi_sec20 = 2, + .pc_rssi_info[0].rssi_sec40 = 128, + .pc_rssi_info[0].rssi_sec80 = 128, + + .pc_rssi_info[1].rssi_pri20 = 128, + .pc_rssi_info[1].rssi_sec20 = 128, + .pc_rssi_info[1].rssi_sec40 = 128, + .pc_rssi_info[1].rssi_sec80 = 128, + + .pc_rssi_info[2].rssi_pri20 = 128, + .pc_rssi_info[2].rssi_sec20 = 128, + .pc_rssi_info[2].rssi_sec40 = 128, + .pc_rssi_info[2].rssi_sec80 = 128, + + .pc_rssi_info[3].rssi_pri20 = 128, + .pc_rssi_info[3].rssi_sec20 = 128, + .pc_rssi_info[3].rssi_sec40 = 128, + .pc_rssi_info[3].rssi_sec80 = 128, + + .noise_floor[0] = -90, + .noise_floor[1] = -90, + .noise_floor[2] = -90, + .noise_floor[3] = -90, +}; + +/* Channel information for 40 MHz bandwidth */ +static struct target_if_spectral_chan_info chan_info_40 = { + .center_freq1 = 5180, + .center_freq2 = 0, + .chan_width = 40, +}; + +/* Spectral config parameters for 40 MHz bandwidth */ +static struct spectral_config config_40_1 = { + .ss_fft_period = 1, + .ss_period = 35, + .ss_count = 0, + .ss_short_report = 1, + .radar_bin_thresh_sel = 0, + .ss_spectral_pri = 1, + .ss_fft_size = 8, + .ss_gc_ena = 1, + .ss_restart_ena = 0, + .ss_noise_floor_ref = 65440, + .ss_init_delay = 80, + .ss_nb_tone_thr = 12, + .ss_str_bin_thr = 8, + .ss_wb_rpt_mode = 0, + .ss_rssi_rpt_mode = 0, + .ss_rssi_thr = 240, + .ss_pwr_format = 0, + .ss_rpt_mode = 2, + .ss_bin_scale = 1, + .ss_dbm_adj = 1, + .ss_chn_mask = 1, + .ss_nf_cal[0] = 0, + .ss_nf_cal[1] = 0, + .ss_nf_cal[2] = 0, + .ss_nf_cal[3] = 0, + .ss_nf_cal[4] = 0, + .ss_nf_cal[5] = 0, + .ss_nf_pwr[0] = 0, + .ss_nf_pwr[1] = 0, + .ss_nf_pwr[2] = 0, + .ss_nf_pwr[3] = 0, + .ss_nf_pwr[4] = 0, + .ss_nf_pwr[5] = 0, + .ss_nf_temp_data = 0, +}; + +/* 80 MHz */ + +/* Report data for 80MHz bandwidth for generation 2 chipsets */ +static uint8_t reportdata_80_gen2[] = { +#ifdef BIG_ENDIAN_HOST + 0xbb, /* Signature */ + 0xfb, /* Tag */ + 0x01, /* Size */ + 0x14, + 0x19, 0xeb, 0x80, 0x40, /* FFT Summary A */ + 0x00, 0x00, 0x10, 0x00, /* FFT Summary B */ + 0x00, 0x00, 0x00, 0x00, /* Segment ID */ +#else + 0x14, /* Length */ + 0x01, + 0xfb, /* Tag */ + 0xbb, /* Signature */ + 0x40, 0x80, 0xeb, 0x19, /* FFT Summary A */ + 0x00, 0x10, 0x00, 0x00, /* FFT Summary B */ + 0x00, 0x00, 0x00, 0x00, /* Segment ID */ +#endif /* BIG_ENDIAN_HOST */ + /* FFT Data */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 1, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +}; + +/* Report data for 80MHz bandwidth for generation 3 chipsets */ +static uint8_t reportdata_80_gen3[] = { +#ifdef BIG_ENDIAN_HOST + 0x12, 0x34, 0x56, 0x78, /* fft_timestamp */ + 0xfa, /* fft_hdr_sig */ + 0x03, /* fft_hdr_tag */ + 0x00, /* fft_hdr_length */ + 0x44, + 0x0f, 0xf6, 0x00, 0xe0, + 0x00, 0x00, 0x2f, 0xba, + 0x20, 0xb4, 0x2c, 0x01, + 0x00, 0x00, 0x00, 0x00, /* reserved */ +#else + 0x78, 0x56, 0x34, 0x12, /* fft_timestamp */ + 0x44, /* fft_hdr_length */ + 0x00, + 0x03, /* fft_hdr_tag */ + 0xfa, /* fft_hdr_sig */ + 0xe0, 0x00, 0xf6, 0x0f, + 0xba, 0x2f, 0x00, 0x00, + 0x01, 0x2c, 0xb4, 0x20, + 0x00, 0x00, 0x00, 0x00, /* reserved */ +#endif /* BIG_ENDIAN_HOST */ + /* FFT Data */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 1, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +}; + +/* RF measurement information for 80 MHz bandwidth */ +static struct target_if_spectral_rfqual_info rfqual_info_80 = { + .rssi_comb = 16, + + .pc_rssi_info[0].rssi_pri20 = 16, + .pc_rssi_info[0].rssi_sec20 = 17, + .pc_rssi_info[0].rssi_sec40 = 0, + .pc_rssi_info[0].rssi_sec80 = 128, + + .pc_rssi_info[1].rssi_pri20 = 128, + .pc_rssi_info[1].rssi_sec20 = 128, + .pc_rssi_info[1].rssi_sec40 = 128, + .pc_rssi_info[1].rssi_sec80 = 128, + + .pc_rssi_info[2].rssi_pri20 = 128, + .pc_rssi_info[2].rssi_sec20 = 128, + .pc_rssi_info[2].rssi_sec40 = 128, + .pc_rssi_info[2].rssi_sec80 = 128, + + .pc_rssi_info[3].rssi_pri20 = 128, + .pc_rssi_info[3].rssi_sec20 = 128, + .pc_rssi_info[3].rssi_sec40 = 128, + .pc_rssi_info[3].rssi_sec80 = 128, + + .noise_floor[0] = -90, + .noise_floor[1] = -90, + .noise_floor[2] = -90, + .noise_floor[3] = -90, +}; + +/* Channel information for 80 MHz bandwidth */ +static struct target_if_spectral_chan_info chan_info_80 = { + .center_freq1 = 5210, + .center_freq2 = 0, + .chan_width = 80, +}; + +/* Spectral config parameters for 80 MHz bandwidth */ +static struct spectral_config config_80_1 = { + .ss_fft_period = 1, + .ss_period = 35, + .ss_count = 0, + .ss_short_report = 1, + .radar_bin_thresh_sel = 0, + .ss_spectral_pri = 1, + .ss_fft_size = 9, + .ss_gc_ena = 1, + .ss_restart_ena = 0, + .ss_noise_floor_ref = 65440, + .ss_init_delay = 80, + .ss_nb_tone_thr = 12, + .ss_str_bin_thr = 8, + .ss_wb_rpt_mode = 0, + .ss_rssi_rpt_mode = 0, + .ss_rssi_thr = 240, + .ss_pwr_format = 0, + .ss_rpt_mode = 2, + .ss_bin_scale = 1, + .ss_dbm_adj = 1, + .ss_chn_mask = 1, + .ss_nf_cal[0] = 0, + .ss_nf_cal[1] = 0, + .ss_nf_cal[2] = 0, + .ss_nf_cal[3] = 0, + .ss_nf_cal[4] = 0, + .ss_nf_cal[5] = 0, + .ss_nf_pwr[0] = 0, + .ss_nf_pwr[1] = 0, + .ss_nf_pwr[2] = 0, + .ss_nf_pwr[3] = 0, + .ss_nf_pwr[4] = 0, + .ss_nf_pwr[5] = 0, + .ss_nf_temp_data = 0, +}; + +/* 160 MHz */ + +/* Report data for 160MHz bandwidth for generation 2 chipsets */ +static uint8_t reportdata_160_gen2[] = { + /* Segment 1 */ +#ifdef BIG_ENDIAN_HOST + 0xbb, /* Signature */ + 0xfb, /* Tag */ + 0x01, /* Size */ + 0x14, + 0x23, 0x66, 0x00, 0x40, /* FFT Summary A */ + 0x5c, 0x5c, 0x78, 0x00, /* FFT Summary B */ + 0x00, 0x00, 0x00, 0x00, /* Segment ID */ +#else + 0x14, /* Length */ + 0x01, + 0xfb, /* Tag */ + 0xbb, /* Signature */ + 0x40, 0x00, 0x66, 0x23, /* FFT Summary A */ + 0x00, 0x78, 0x5c, 0x5c, /* FFT Summary B */ + 0x00, 0x00, 0x00, 0x00, /* Segment ID */ +#endif /* BIG_ENDIAN_HOST */ + /* FFT Data */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 1, 1, 2, 4, 60, 4, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, + + /* Segment 2 */ +#ifdef BIG_ENDIAN_HOST + 0xbb, /* Signature */ + 0xfb, /* Tag */ + 0x01, /* Size */ + 0x14, + 0x23, 0x66, 0x00, 0x40, /* FFT Summary A */ + 0x5c, 0x5c, 0x78, 0x00, /* FFT Summary B */ + 0x00, 0x00, 0x00, 0x01, /* Segment ID */ +#else + 0x14, /* Length */ + 0x01, + 0xfb, /* Tag */ + 0xbb, /* Signature */ + 0x40, 0x00, 0x66, 0x23, /* FFT Summary A */ + 0x00, 0x78, 0x5c, 0x5c, /* FFT Summary B */ + 0x01, 0x00, 0x00, 0x00, /* Segment ID */ +#endif /* BIG_ENDIAN_HOST */ + /* FFT Data */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 1, 1, 2, 4, 60, 4, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, +}; + +/* Report data for 160MHz bandwidth for generation 3 chipsets */ +static uint8_t reportdata_160_gen3[] = { + /* Segment 1 */ +#ifdef BIG_ENDIAN_HOST + 0x12, 0x34, 0x56, 0x78, /* fft_timestamp */ + 0xfa, /* fft_hdr_sig */ + 0x03, /* fft_hdr_tag */ + 0x00, /* fft_hdr_length */ + 0x44, + 0x0f, 0xf6, 0x00, 0xe0, + 0x00, 0x00, 0x2f, 0xba, + 0x20, 0xb4, 0x2c, 0x01, + 0x00, 0x00, 0x00, 0x00, /* reserved */ +#else + 0x78, 0x56, 0x34, 0x12, /* fft_timestamp */ + 0x44, /* fft_hdr_length */ + 0x00, + 0x03, /* fft_hdr_tag */ + 0xfa, /* fft_hdr_sig */ + 0xe0, 0x00, 0xf6, 0x0f, + 0xba, 0x2f, 0x00, 0x00, + 0x01, 0x2c, 0xb4, 0x20, + 0x00, 0x00, 0x00, 0x00, /* reserved */ +#endif /* BIG_ENDIAN_HOST */ + /* FFT Data */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 1, 1, 2, 4, 60, 4, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + + /* Segment 2 */ +#ifdef BIG_ENDIAN_HOST + 0x12, 0x34, 0x56, 0x78, /* fft_timestamp */ + 0xfa, /* fft_hdr_sig */ + 0x03, /* fft_hdr_tag */ + 0x00, /* fft_hdr_length */ + 0x44, + 0x0f, 0xf6, 0x00, 0xe1, + 0x00, 0x00, 0x2f, 0xba, + 0x20, 0xb4, 0x2c, 0x01, + 0x00, 0x00, 0x00, 0x00, /* reserved */ +#else + 0x78, 0x56, 0x34, 0x12, /* fft_timestamp */ + 0x44, /* fft_hdr_length */ + 0x00, + 0x03, /* fft_hdr_tag */ + 0xfa, /* fft_hdr_sig */ + 0xe1, 0x00, 0xf6, 0x0f, + 0xba, 0x2f, 0x00, 0x00, + 0x01, 0x2c, 0xb4, 0x20, + 0x00, 0x00, 0x00, 0x00, /* reserved */ +#endif /* BIG_ENDIAN_HOST */ + /* FFT Data */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 1, 1, 2, 4, 60, 4, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +}; + +/* RF measurement information for 160 MHz bandwidth */ +static struct target_if_spectral_rfqual_info rfqual_info_160 = { + .rssi_comb = 3, + + .pc_rssi_info[0].rssi_pri20 = 3, + .pc_rssi_info[0].rssi_sec20 = 12, + .pc_rssi_info[0].rssi_sec40 = 41, + .pc_rssi_info[0].rssi_sec80 = 128, + + .pc_rssi_info[1].rssi_pri20 = 128, + .pc_rssi_info[1].rssi_sec20 = 128, + .pc_rssi_info[1].rssi_sec40 = 128, + .pc_rssi_info[1].rssi_sec80 = 128, + + .pc_rssi_info[2].rssi_pri20 = 128, + .pc_rssi_info[2].rssi_sec20 = 128, + .pc_rssi_info[2].rssi_sec40 = 128, + .pc_rssi_info[2].rssi_sec80 = 128, + + .pc_rssi_info[3].rssi_pri20 = 128, + .pc_rssi_info[3].rssi_sec20 = 128, + .pc_rssi_info[3].rssi_sec40 = 128, + .pc_rssi_info[3].rssi_sec80 = 128, + + .noise_floor[0] = -90, + .noise_floor[1] = -90, + .noise_floor[2] = -90, + .noise_floor[3] = -90, +}; + +/* Channel information for 160 MHz bandwidth */ +static struct target_if_spectral_chan_info chan_info_160 = { + .center_freq1 = 5250, + .center_freq2 = 0, + .chan_width = 160, +}; + +/* Spectral config parameters for 160 MHz bandwidth */ +static struct spectral_config config_160_1 = { + .ss_fft_period = 1, + .ss_period = 35, + .ss_count = 0, + .ss_short_report = 1, + .radar_bin_thresh_sel = 0, + .ss_spectral_pri = 1, + .ss_fft_size = 9, + .ss_gc_ena = 1, + .ss_restart_ena = 0, + .ss_noise_floor_ref = 65440, + .ss_init_delay = 80, + .ss_nb_tone_thr = 12, + .ss_str_bin_thr = 8, + .ss_wb_rpt_mode = 0, + .ss_rssi_rpt_mode = 0, + .ss_rssi_thr = 240, + .ss_pwr_format = 0, + .ss_rpt_mode = 2, + .ss_bin_scale = 1, + .ss_dbm_adj = 1, + .ss_chn_mask = 1, + .ss_nf_cal[0] = 0, + .ss_nf_cal[1] = 0, + .ss_nf_cal[2] = 0, + .ss_nf_cal[3] = 0, + .ss_nf_cal[4] = 0, + .ss_nf_cal[5] = 0, + .ss_nf_pwr[0] = 0, + .ss_nf_pwr[1] = 0, + .ss_nf_pwr[2] = 0, + .ss_nf_pwr[3] = 0, + .ss_nf_pwr[4] = 0, + .ss_nf_pwr[5] = 0, + .ss_nf_temp_data = 0, +}; + +/* 80+80 MHz */ + +/* Report data for 80_80MHz bandwidth for generation 2 chipsets */ +static uint8_t reportdata_80_80_gen2[] = { + /* Segment 1 */ +#ifdef BIG_ENDIAN_HOST + 0xbb, /* Signature */ + 0xfb, /* Tag */ + 0x01, /* Size */ + 0x14, + 0x23, 0x66, 0x00, 0x40, /* FFT Summary A */ + 0x64, 0x64, 0x89, 0x00, /* FFT Summary B */ + 0x00, 0x00, 0x00, 0x00, /* Segment ID */ +#else + 0x14, /* Length */ + 0x01, + 0xfb, /* Tag */ + 0xbb, /* Signature */ + 0x40, 0x00, 0x66, 0x23, /* FFT Summary A */ + 0x00, 0x89, 0x64, 0x64, /* FFT Summary B */ + 0x00, 0x00, 0x00, 0x00, /* Segment ID */ +#endif /* BIG_ENDIAN_HOST */ + /* FFT Data */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 2, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, + 1, 1, 2, 6, 68, 5, 2, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, + + /* Segment 2 */ +#ifdef BIG_ENDIAN_HOST + 0xbb, /* Signature */ + 0xfb, /* Tag */ + 0x01, /* Size */ + 0x14, + 0x23, 0x66, 0x00, 0x40, /* FFT Summary A */ + 0x64, 0x64, 0x89, 0x00, /* FFT Summary B */ + 0x00, 0x00, 0x00, 0x01, /* Segment ID */ +#else + 0x14, /* Length */ + 0x01, + 0xfb, /* Tag */ + 0xbb, /* Signature */ + 0x40, 0x00, 0x66, 0x23, /* FFT Summary A */ + 0x00, 0x89, 0x64, 0x64, /* FFT Summary B */ + 0x01, 0x00, 0x00, 0x00, /* Segment ID */ +#endif /* BIG_ENDIAN_HOST */ + /* FFT Data */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 2, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, + 1, 1, 2, 6, 68, 5, 2, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, +}; + +/* Report data for 80_80MHz bandwidth for generation 3 chipsets */ +static uint8_t reportdata_80_80_gen3[] = { + /* Segment 1 */ +#ifdef BIG_ENDIAN_HOST + 0x12, 0x34, 0x56, 0x78, /* fft_timestamp */ + 0xfa, /* fft_hdr_sig */ + 0x03, /* fft_hdr_tag */ + 0x00, /* fft_hdr_length */ + 0x44, + 0x0f, 0xf6, 0x00, 0xe0, + 0x00, 0x00, 0x2f, 0xba, + 0x20, 0xb4, 0x2c, 0x01, + 0x00, 0x00, 0x00, 0x00, /* reserved */ +#else + 0x78, 0x56, 0x34, 0x12, /* fft_timestamp */ + 0x44, /* fft_hdr_length */ + 0x00, + 0x03, /* fft_hdr_tag */ + 0xfa, /* fft_hdr_sig */ + 0xe0, 0x00, 0xf6, 0x0f, + 0xba, 0x2f, 0x00, 0x00, + 0x01, 0x2c, 0xb4, 0x20, + 0x00, 0x00, 0x00, 0x00, /* reserved */ +#endif /* BIG_ENDIAN_HOST */ + /* FFT Data */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 2, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, + 1, 1, 2, 6, 68, 5, 2, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + + /* Segment 2 */ +#ifdef BIG_ENDIAN_HOST + 0x12, 0x34, 0x56, 0x78, /* fft_timestamp */ + 0xfa, /* fft_hdr_sig */ + 0x03, /* fft_hdr_tag */ + 0x00, /* fft_hdr_length */ + 0x44, + 0x0f, 0xf6, 0x00, 0xe1, + 0x00, 0x00, 0x2f, 0xba, + 0x20, 0xb4, 0x2c, 0x01, + 0x00, 0x00, 0x00, 0x00, /* reserved */ +#else + 0x78, 0x56, 0x34, 0x12, /* fft_timestamp */ + 0x44, /* fft_hdr_length */ + 0x00, + 0x03, /* fft_hdr_tag */ + 0xfa, /* fft_hdr_sig */ + 0xe1, 0x00, 0xf6, 0x0f, + 0xba, 0x2f, 0x00, 0x00, + 0x01, 0x2c, 0xb4, 0x20, + 0x00, 0x00, 0x00, 0x00, /* reserved */ +#endif /* BIG_ENDIAN_HOST */ + /* FFT Data */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 2, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, + 1, 1, 2, 6, 68, 5, 2, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +}; + +/* RF measurement information for 80_80 MHz bandwidth */ +static struct target_if_spectral_rfqual_info rfqual_info_80_80 = { + .rssi_comb = 1, + + .pc_rssi_info[0].rssi_pri20 = 1, + .pc_rssi_info[0].rssi_sec20 = 17, + .pc_rssi_info[0].rssi_sec40 = 40, + .pc_rssi_info[0].rssi_sec80 = 128, + + .pc_rssi_info[1].rssi_pri20 = 128, + .pc_rssi_info[1].rssi_sec20 = 128, + .pc_rssi_info[1].rssi_sec40 = 128, + .pc_rssi_info[1].rssi_sec80 = 128, + + .pc_rssi_info[2].rssi_pri20 = 128, + .pc_rssi_info[2].rssi_sec20 = 128, + .pc_rssi_info[2].rssi_sec40 = 128, + .pc_rssi_info[2].rssi_sec80 = 128, + + .pc_rssi_info[3].rssi_pri20 = 128, + .pc_rssi_info[3].rssi_sec20 = 128, + .pc_rssi_info[3].rssi_sec40 = 128, + .pc_rssi_info[3].rssi_sec80 = 128, + + .noise_floor[0] = -90, + .noise_floor[1] = -90, + .noise_floor[2] = -90, + .noise_floor[3] = -90, +}; + +/* Channel information for 80_80 MHz bandwidth */ +static struct target_if_spectral_chan_info chan_info_80_80 = { + .center_freq1 = 5210, + .center_freq2 = 5530, + .chan_width = 160, +}; + +/* Spectral config parameters for 80_80 MHz bandwidth */ +static struct spectral_config config_80_80_1 = { + .ss_fft_period = 1, + .ss_period = 35, + .ss_count = 0, + .ss_short_report = 1, + .radar_bin_thresh_sel = 0, + .ss_spectral_pri = 1, + .ss_fft_size = 9, + .ss_gc_ena = 1, + .ss_restart_ena = 0, + .ss_noise_floor_ref = 65440, + .ss_init_delay = 80, + .ss_nb_tone_thr = 12, + .ss_str_bin_thr = 8, + .ss_wb_rpt_mode = 0, + .ss_rssi_rpt_mode = 0, + .ss_rssi_thr = 240, + .ss_pwr_format = 0, + .ss_rpt_mode = 2, + .ss_bin_scale = 1, + .ss_dbm_adj = 1, + .ss_chn_mask = 1, + .ss_nf_cal[0] = 0, + .ss_nf_cal[1] = 0, + .ss_nf_cal[2] = 0, + .ss_nf_cal[3] = 0, + .ss_nf_cal[4] = 0, + .ss_nf_cal[5] = 0, + .ss_nf_pwr[0] = 0, + .ss_nf_pwr[1] = 0, + .ss_nf_pwr[2] = 0, + .ss_nf_pwr[3] = 0, + .ss_nf_pwr[4] = 0, + .ss_nf_pwr[5] = 0, + .ss_nf_temp_data = 0, +}; + +#endif /* QCA_SUPPORT_SPECTRAL_SIMULATION */ +#endif /* _SPECTRAL_SIM_INTERNAL_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/tdls/inc/target_if_tdls.h b/drivers/staging/qca-wifi-host-cmn/target_if/tdls/inc/target_if_tdls.h new file mode 100644 index 0000000000000000000000000000000000000000..7fe21cdf0c4e903f0881bf7d719aa01ab0eb3798 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/target_if/tdls/inc/target_if_tdls.h @@ -0,0 +1,116 @@ +/* + * Copyright (c) 2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: offload lmac interface APIs for tdls + * + */ + +#ifndef __TARGET_IF_TDLS_H__ +#define __TARGET_IF_TDLS_H__ + +struct tdls_info; +struct wlan_objmgr_psoc; +struct tdls_peer_update_state; +struct tdls_channel_switch_params; +struct sta_uapsd_trig_params; + +/** + * target_if_tdls_update_fw_state() - lmac handler to update tdls fw state + * @psoc: psoc object + * @param: tdls state parameter + * + * Return: QDF_STATUS + */ +QDF_STATUS +target_if_tdls_update_fw_state(struct wlan_objmgr_psoc *psoc, + struct tdls_info *param); + +/** + * target_if_tdls_update_peer_state() - lmac handler to update tdls peer state + * @psoc: psoc object + * @peer_params: tdls peer state params + * + * Return: QDF_STATUS + */ +QDF_STATUS +target_if_tdls_update_peer_state(struct wlan_objmgr_psoc *psoc, + struct tdls_peer_update_state *peer_params); + +/** + * target_if_tdls_set_offchan_mode() - lmac handler to set tdls off channel mode + * @psoc: psoc object + * @params: tdls channel swithc params + * + * Return: QDF_STATUS + */ +QDF_STATUS +target_if_tdls_set_offchan_mode(struct wlan_objmgr_psoc *psoc, + struct tdls_channel_switch_params *params); + +/** + * target_if_tdls_set_uapsd() - lmac handler to set uapsd auto trigger cmd + * @psoc: psoc object + * @params: upasd parameters + * + * This function sets the trigger + * uapsd params such as service interval, delay interval + * and suspend interval which will be used by the firmware + * to send trigger frames periodically when there is no + * traffic on the transmit side. + * + * Return: QDF_STATUS + */ +QDF_STATUS +target_if_tdls_set_uapsd(struct wlan_objmgr_psoc *psoc, + struct sta_uapsd_trig_params *params); + +/** + * target_if_tdls_register_event_handler() - lmac handler to register tdls event + * handler + * @psoc : psoc object + * @arg: argument passed to lmac + * + * Return: QDF_STATUS + */ +QDF_STATUS +target_if_tdls_register_event_handler(struct wlan_objmgr_psoc *psoc, + void *arg); + +/** + * target_if_tdls_unregister_event_handler() - lmac handler to unregister tdls + * event handler + * @psoc : psoc object + * @arg: argument passed to lmac + * + * Return: QDF_STATUS + */ +QDF_STATUS +target_if_tdls_unregister_event_handler(struct wlan_objmgr_psoc *psoc, + void *arg); + +/** + * target_if_tdls_register_tx_ops() - lmac handler to register tdls tx ops + * callback functions + * @tx_ops: wlan_lmac_if_tx_ops object + * + * Return: QDF_STATUS + */ +QDF_STATUS +target_if_tdls_register_tx_ops(struct wlan_lmac_if_tx_ops *tx_ops); +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/tdls/src/target_if_tdls.c b/drivers/staging/qca-wifi-host-cmn/target_if/tdls/src/target_if_tdls.c new file mode 100644 index 0000000000000000000000000000000000000000..6adb2622319e512eda7fd465828a450108ca0565 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/target_if/tdls/src/target_if_tdls.c @@ -0,0 +1,214 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: offload lmac interface APIs for tdls + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static inline struct wlan_lmac_if_tdls_rx_ops * +target_if_tdls_get_rx_ops(struct wlan_objmgr_psoc *psoc) +{ + return &psoc->soc_cb.rx_ops.tdls_rx_ops; +} + +static int +target_if_tdls_event_handler(ol_scn_t scn, uint8_t *data, uint32_t datalen) +{ + struct wlan_objmgr_psoc *psoc; + struct wmi_unified *wmi_handle; + struct wlan_lmac_if_tdls_rx_ops *tdls_rx_ops; + struct tdls_event_info info; + QDF_STATUS status; + + if (!scn || !data) { + target_if_err("scn: 0x%pK, data: 0x%pK", scn, data); + return -EINVAL; + } + psoc = target_if_get_psoc_from_scn_hdl(scn); + if (!psoc) { + target_if_err("null psoc"); + return -EINVAL; + } + wmi_handle = get_wmi_unified_hdl_from_psoc(psoc); + + if (!wmi_handle) { + target_if_err("null wmi_handle"); + return -EINVAL; + } + + if (wmi_extract_vdev_tdls_ev_param(wmi_handle, data, &info)) { + target_if_err("Failed to extract wmi tdls event"); + return -EINVAL; + } + + tdls_rx_ops = target_if_tdls_get_rx_ops(psoc); + if (tdls_rx_ops && tdls_rx_ops->tdls_ev_handler) { + status = tdls_rx_ops->tdls_ev_handler(psoc, &info); + if (QDF_IS_STATUS_ERROR(status)) { + target_if_err("fail to handle tdls event"); + return -EINVAL; + } + } + + return 0; +} + +QDF_STATUS +target_if_tdls_update_fw_state(struct wlan_objmgr_psoc *psoc, + struct tdls_info *param) +{ + QDF_STATUS status; + uint8_t tdls_state; + struct wmi_unified *wmi_handle; + + wmi_handle = get_wmi_unified_hdl_from_psoc(psoc); + if (!wmi_handle) { + target_if_err("Invalid WMI handle"); + return QDF_STATUS_E_FAILURE; + } + + if (TDLS_SUPPORT_EXP_TRIG_ONLY == param->tdls_state) + tdls_state = WMI_TDLS_ENABLE_PASSIVE; + else if (TDLS_SUPPORT_IMP_MODE == param->tdls_state || + TDLS_SUPPORT_EXT_CONTROL == param->tdls_state) + tdls_state = WMI_TDLS_ENABLE_CONNECTION_TRACKER_IN_HOST; + else + tdls_state = WMI_TDLS_DISABLE; + + status = wmi_unified_update_fw_tdls_state_cmd(wmi_handle, + param, tdls_state); + + target_if_debug("vdev_id %d", param->vdev_id); + return status; +} + +QDF_STATUS +target_if_tdls_update_peer_state(struct wlan_objmgr_psoc *psoc, + struct tdls_peer_update_state *peer_params) +{ + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS +target_if_tdls_set_offchan_mode(struct wlan_objmgr_psoc *psoc, + struct tdls_channel_switch_params *params) +{ + QDF_STATUS status; + struct wmi_unified *wmi_handle; + + wmi_handle = get_wmi_unified_hdl_from_psoc(psoc); + if (!wmi_handle) { + target_if_err("Invalid WMI handle"); + return QDF_STATUS_E_FAILURE; + } + status = wmi_unified_set_tdls_offchan_mode_cmd(wmi_handle, + params); + + return status; +} + +QDF_STATUS +target_if_tdls_set_uapsd(struct wlan_objmgr_psoc *psoc, + struct sta_uapsd_trig_params *params) +{ + QDF_STATUS ret; + struct wmi_unified *wmi_handle; + + wmi_handle = get_wmi_unified_hdl_from_psoc(psoc); + if (!wmi_handle) { + target_if_err("Invalid WMI handle"); + return QDF_STATUS_E_FAILURE; + } + if (!wmi_service_enabled(wmi_handle, + wmi_sta_uapsd_basic_auto_trig) || + !wmi_service_enabled(wmi_handle, + wmi_sta_uapsd_var_auto_trig)) { + target_if_debug("Trigger uapsd is not supported vdev id %d", + params->vdevid); + return QDF_STATUS_SUCCESS; + } + ret = wmi_unified_set_sta_uapsd_auto_trig_cmd(wmi_handle, + params); + + if (QDF_IS_STATUS_ERROR(ret)) + target_if_err("Failed to send set uapsd param ret = %d", ret); + + return ret; +} + +QDF_STATUS +target_if_tdls_register_event_handler(struct wlan_objmgr_psoc *psoc, + void *arg) +{ + struct wmi_unified *wmi_handle; + + wmi_handle = get_wmi_unified_hdl_from_psoc(psoc); + if (!wmi_handle) { + target_if_err("null wmi_handle"); + return QDF_STATUS_E_INVAL; + } + return wmi_unified_register_event(wmi_handle, + wmi_tdls_peer_event_id, + target_if_tdls_event_handler); +} + +QDF_STATUS +target_if_tdls_unregister_event_handler(struct wlan_objmgr_psoc *psoc, + void *arg) +{ + struct wmi_unified *wmi_handle; + + wmi_handle = get_wmi_unified_hdl_from_psoc(psoc); + if (!wmi_handle) { + target_if_err("null wmi_handle"); + return QDF_STATUS_E_INVAL; + } + return wmi_unified_unregister_event(wmi_handle, + wmi_tdls_peer_event_id); +} + +QDF_STATUS +target_if_tdls_register_tx_ops(struct wlan_lmac_if_tx_ops *tx_ops) +{ + struct wlan_lmac_if_tdls_tx_ops *tdls_txops; + + tdls_txops = &tx_ops->tdls_tx_ops; + + tdls_txops->update_fw_state = target_if_tdls_update_fw_state; + tdls_txops->update_peer_state = target_if_tdls_update_peer_state; + tdls_txops->set_offchan_mode = target_if_tdls_set_offchan_mode; + tdls_txops->tdls_reg_ev_handler = target_if_tdls_register_event_handler; + tdls_txops->tdls_unreg_ev_handler = + target_if_tdls_unregister_event_handler; + tdls_txops->tdls_set_uapsd = target_if_tdls_set_uapsd; + + return QDF_STATUS_SUCCESS; +} diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/wifi_pos/inc/target_if_wifi_pos.h b/drivers/staging/qca-wifi-host-cmn/target_if/wifi_pos/inc/target_if_wifi_pos.h new file mode 100644 index 0000000000000000000000000000000000000000..b1a6b3fd856dceffd3ef2501f0d7de9ba44ba4d5 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/target_if/wifi_pos/inc/target_if_wifi_pos.h @@ -0,0 +1,149 @@ +/* + * Copyright (c) 2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: target_if_wifi_pos.h + * This file declares the functions pertinent to wifi positioning component's + * target if layer. + */ +#ifndef _WIFI_POS_TGT_IF_H_ +#define _WIFI_POS_TGT_IF_H_ + +#include "qdf_types.h" +#include "qdf_status.h" +struct oem_data_req; +struct oem_data_rsp; +struct wlan_objmgr_psoc; +struct wlan_soc_southbound_cb; +struct wlan_lmac_if_tx_ops; +struct wlan_lmac_if_rx_ops; + +#ifdef WIFI_POS_CONVERGED + +/** + * target_if_wifi_pos_get_txops: api to get tx ops + * @psoc: pointer to psoc object + * + * Return: tx ops + */ +struct wlan_lmac_if_wifi_pos_tx_ops *target_if_wifi_pos_get_txops( + struct wlan_objmgr_psoc *psoc); + +/** + * target_if_wifi_pos_get_rxops: api to get rx ops + * @psoc: pointer to psoc object + * + * Return: rx ops + */ +struct wlan_lmac_if_wifi_pos_rx_ops *target_if_wifi_pos_get_rxops( + struct wlan_objmgr_psoc *psoc); + +/** + * target_if_wifi_pos_register_events: function to register with wmi event + * @psoc: pointer to psoc object + * + * Return: status of operation + */ +QDF_STATUS target_if_wifi_pos_register_events(struct wlan_objmgr_psoc *psoc); + +/** + * target_if_wifi_pos_deregister_events: function to deregister wmi event + * @psoc: pointer to psoc object + * + * Return: status of operation + */ +QDF_STATUS target_if_wifi_pos_deregister_events(struct wlan_objmgr_psoc *psoc); + + +/** + * target_if_wifi_pos_register_tx_ops: function to register with lmac tx ops + * @tx_ops: lmac tx ops struct object + * + * Return: none + */ +void target_if_wifi_pos_register_tx_ops(struct wlan_lmac_if_tx_ops *tx_ops); + +/** + * target_if_wifi_pos_register_rx_ops: function to register with lmac rx ops + * @rx_ops: lmac rx ops struct object + * + * Return: none + */ +void target_if_wifi_pos_register_rx_ops(struct wlan_lmac_if_rx_ops *rx_ops); + +#else +static inline struct wlan_lmac_if_wifi_pos_tx_ops *target_if_wifi_pos_get_txops( + struct wlan_objmgr_psoc *psoc) +{ + return NULL; +} + +static inline struct wlan_lmac_if_wifi_pos_rx_ops *target_if_wifi_pos_get_rxops( + struct wlan_objmgr_psoc *psoc) +{ + return NULL; +} + +static inline void target_if_wifi_pos_register_tx_ops( + struct wlan_lmac_if_tx_ops *tx_ops) +{ +} + +static inline void target_if_wifi_pos_register_rx_ops( + struct wlan_lmac_if_rx_ops *rx_ops) +{ +} +#endif + +#if defined(WLAN_FEATURE_CIF_CFR) && defined(WIFI_POS_CONVERGED) +/** + * target_if_wifi_pos_init_cir_cfr_rings: set DMA ring cap in psoc private + * object + * @psoc: pointer to psoc object + * @hal_soc: pointer to hal soc + * @num_mac: number of mac + * @buf: buffer containing DMA ring cap + * + * Return: status of operation + */ +QDF_STATUS target_if_wifi_pos_init_cir_cfr_rings(struct wlan_objmgr_psoc *psoc, + void *hal_soc, uint8_t num_mac, + void *buf); +/** + * target_if_wifi_pos_deinit_dma_rings: frees up DMA rings + * @psoc: pointer to psoc + * + * Return: status of operation + */ +QDF_STATUS target_if_wifi_pos_deinit_dma_rings(struct wlan_objmgr_psoc *psoc); +#else +static inline QDF_STATUS target_if_wifi_pos_init_cir_cfr_rings( + struct wlan_objmgr_psoc *psoc, void *hal_soc, + uint8_t num_mac, void *buf) +{ + return QDF_STATUS_SUCCESS; +} + +static inline QDF_STATUS target_if_wifi_pos_deinit_dma_rings( + struct wlan_objmgr_psoc *psoc) +{ + return QDF_STATUS_SUCCESS; +} +#endif + +#endif /* _WIFI_POS_TGT_IF_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/wifi_pos/src/target_if_wifi_pos.c b/drivers/staging/qca-wifi-host-cmn/target_if/wifi_pos/src/target_if_wifi_pos.c new file mode 100644 index 0000000000000000000000000000000000000000..f9592196e578ad1970eccf25ea5756cf3d3839fc --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/target_if/wifi_pos/src/target_if_wifi_pos.c @@ -0,0 +1,671 @@ +/* + * Copyright (c) 2013-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: target_if_wifi_pos.c + * This file defines the functions pertinent to wifi positioning component's + * target if layer. + */ +#include "../../../../umac/wifi_pos/src/wifi_pos_utils_i.h" +#include "wmi_unified_api.h" +#include "wlan_lmac_if_def.h" +#include "target_if_wifi_pos.h" +#include "../../../../umac/wifi_pos/src/wifi_pos_main_i.h" +#include "target_if.h" +#ifdef WLAN_FEATURE_CIF_CFR +#include "hal_api.h" + +#define RING_BASE_ALIGN 8 + +static void *target_if_wifi_pos_vaddr_lookup( + struct wifi_pos_psoc_priv_obj *priv, + void *paddr, uint8_t ring_num, uint32_t cookie) +{ + if (priv->dma_buf_pool[ring_num][cookie].paddr == paddr) { + return priv->dma_buf_pool[ring_num][cookie].vaddr + + priv->dma_buf_pool[ring_num][cookie].offset; + } else { + target_if_err("incorrect paddr found on cookie slot"); + return NULL; + } +} + +static QDF_STATUS target_if_wifi_pos_replenish_ring( + struct wifi_pos_psoc_priv_obj *priv, uint8_t ring_idx, + void *alinged_vaddr, uint32_t cookie) +{ + uint64_t *ring_entry; + uint32_t dw_lo, dw_hi = 0, map_status; + void *hal_soc = priv->hal_soc; + void *srng = priv->dma_cfg[ring_idx].srng; + void *paddr; + + if (!alinged_vaddr) { + target_if_debug("NULL alinged_vaddr provided"); + return QDF_STATUS_SUCCESS; + } + + map_status = qdf_mem_map_nbytes_single(NULL, alinged_vaddr, + QDF_DMA_FROM_DEVICE, + priv->dma_cap[ring_idx].min_buf_size, + (qdf_dma_addr_t *)&paddr); + if (map_status) { + target_if_err("mem map failed status: %d", map_status); + return QDF_STATUS_E_FAILURE; + } + QDF_ASSERT(!((uint64_t)paddr % priv->dma_cap[ring_idx].min_buf_align)); + priv->dma_buf_pool[ring_idx][cookie].paddr = paddr; + + hal_srng_access_start(hal_soc, srng); + ring_entry = hal_srng_src_get_next(hal_soc, srng); + dw_lo = (uint64_t)paddr & 0xFFFFFFFF; + WMI_OEM_DMA_DATA_ADDR_HI_SET(dw_hi, (uint64_t)paddr >> 32); + WMI_OEM_DMA_DATA_ADDR_HI_HOST_DATA_SET(dw_hi, cookie); + *ring_entry = (uint64_t)dw_hi << 32 | dw_lo; + hal_srng_access_end(hal_soc, srng); + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS target_if_wifi_pos_get_indirect_data( + struct wifi_pos_psoc_priv_obj *priv_obj, + wmi_oem_indirect_data *indirect, + struct oem_data_rsp *rsp, uint32_t *cookie) +{ + void *paddr = NULL; + uint32_t addr_hi; + uint8_t ring_idx = 0, num_rings; + + if (!indirect) { + target_if_debug("no indirect data. regular event received"); + return QDF_STATUS_SUCCESS; + } + + ring_idx = indirect->pdev_id - 1; + num_rings = priv_obj->num_rings; + if (ring_idx >= num_rings) { + target_if_err("incorrect pdev_id: %d", indirect->pdev_id); + return QDF_STATUS_E_INVAL; + } + addr_hi = (uint64_t)WMI_OEM_DMA_DATA_ADDR_HI_GET( + indirect->addr_hi); + paddr = (void *)((uint64_t)addr_hi << 32 | indirect->addr_lo); + *cookie = WMI_OEM_DMA_DATA_ADDR_HI_HOST_DATA_GET( + indirect->addr_hi); + rsp->vaddr = target_if_wifi_pos_vaddr_lookup(priv_obj, + paddr, ring_idx, *cookie); + rsp->dma_len = indirect->len; + qdf_mem_unmap_nbytes_single(NULL, (qdf_dma_addr_t)paddr, + QDF_DMA_FROM_DEVICE, + priv_obj->dma_cap[ring_idx].min_buf_size); + + return QDF_STATUS_SUCCESS; +} + +#else +static QDF_STATUS target_if_wifi_pos_replenish_ring( + struct wifi_pos_psoc_priv_obj *priv, uint8_t ring_idx, + void *vaddr, uint32_t cookie) +{ + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS target_if_wifi_pos_get_indirect_data( + struct wifi_pos_psoc_priv_obj *priv_obj, + wmi_oem_indirect_data *indirect, + struct oem_data_rsp *rsp, uint32_t *cookie) +{ + return QDF_STATUS_SUCCESS; +} +#endif + +/** + * target_if_wifi_pos_oem_rsp_ev_handler: handler registered with + * WMI_OEM_RESPONSE_EVENTID + * @scn: scn handle + * @data_buf: event buffer + * @data_len: event buffer length + * + * Return: status of operation + */ +static int target_if_wifi_pos_oem_rsp_ev_handler(ol_scn_t scn, + uint8_t *data_buf, + uint32_t data_len) +{ + int ret; + uint8_t ring_idx = 0; + QDF_STATUS status; + uint32_t cookie = 0; + wmi_oem_indirect_data *indirect; + struct oem_data_rsp oem_rsp = {0}; + struct wifi_pos_psoc_priv_obj *priv_obj; + struct wlan_objmgr_psoc *psoc = wifi_pos_get_psoc(); + struct wlan_lmac_if_wifi_pos_rx_ops *wifi_pos_rx_ops = NULL; + WMI_OEM_RESPONSE_EVENTID_param_tlvs *param_buf = + (WMI_OEM_RESPONSE_EVENTID_param_tlvs *)data_buf; + + if (!psoc) { + target_if_err("psoc is null"); + return QDF_STATUS_NOT_INITIALIZED; + } + + wlan_objmgr_psoc_get_ref(psoc, WLAN_WIFI_POS_TGT_IF_ID); + + priv_obj = wifi_pos_get_psoc_priv_obj(psoc); + if (!priv_obj) { + target_if_err("priv_obj is null"); + wlan_objmgr_psoc_release_ref(psoc, WLAN_WIFI_POS_TGT_IF_ID); + return QDF_STATUS_NOT_INITIALIZED; + } + + wifi_pos_rx_ops = target_if_wifi_pos_get_rxops(psoc); + if (!wifi_pos_rx_ops || !wifi_pos_rx_ops->oem_rsp_event_rx) { + wlan_objmgr_psoc_release_ref(psoc, WLAN_WIFI_POS_TGT_IF_ID); + target_if_err("lmac callbacks not registered"); + return QDF_STATUS_NOT_INITIALIZED; + } + + oem_rsp.rsp_len_1 = param_buf->num_data; + oem_rsp.data_1 = param_buf->data; + + if (param_buf->num_data2) { + oem_rsp.rsp_len_2 = param_buf->num_data2; + oem_rsp.data_2 = param_buf->data2; + } + + indirect = (wmi_oem_indirect_data *)param_buf->indirect_data; + status = target_if_wifi_pos_get_indirect_data(priv_obj, indirect, + &oem_rsp, &cookie); + if (QDF_IS_STATUS_ERROR(status)) { + target_if_err("get indirect data failed status: %d", status); + wlan_objmgr_psoc_release_ref(psoc, WLAN_WIFI_POS_TGT_IF_ID); + return QDF_STATUS_E_INVAL; + } + + ret = wifi_pos_rx_ops->oem_rsp_event_rx(psoc, &oem_rsp); + if (indirect) + ring_idx = indirect->pdev_id - 1; + status = target_if_wifi_pos_replenish_ring(priv_obj, ring_idx, + oem_rsp.vaddr, cookie); + if (QDF_IS_STATUS_ERROR(status)) { + target_if_err("replenish failed status: %d", status); + ret = QDF_STATUS_E_FAILURE; + } + + wlan_objmgr_psoc_release_ref(psoc, WLAN_WIFI_POS_TGT_IF_ID); + + return ret; +} + +/** + * wifi_pos_oem_cap_ev_handler: handler registered with wmi_oem_cap_event_id + * @scn: scn handle + * @buf: event buffer + * @len: event buffer length + * + * Return: status of operation + */ +static int wifi_pos_oem_cap_ev_handler(ol_scn_t scn, uint8_t *buf, uint32_t len) +{ + /* TBD */ + return 0; +} + +/** + * wifi_pos_oem_meas_rpt_ev_handler: handler registered with + * wmi_oem_meas_report_event_id + * @scn: scn handle + * @buf: event buffer + * @len: event buffer length + * + * Return: status of operation + */ +static int wifi_pos_oem_meas_rpt_ev_handler(ol_scn_t scn, uint8_t *buf, + uint32_t len) +{ + /* TBD */ + return 0; +} + +/** + * wifi_pos_oem_err_rpt_ev_handler: handler registered with + * wmi_oem_err_report_event_id + * @scn: scn handle + * @buf: event buffer + * @len: event buffer length + * + * Return: status of operation + */ +static int wifi_pos_oem_err_rpt_ev_handler(ol_scn_t scn, uint8_t *buf, + uint32_t len) +{ + /* TBD */ + return 0; +} + +/** + * wifi_pos_oem_data_req() - start OEM data request to target + * @wma_handle: wma handle + * @req: start request params + * + * Return: QDF_STATUS + */ +static QDF_STATUS wifi_pos_oem_data_req(struct wlan_objmgr_psoc *psoc, + struct oem_data_req *req) +{ + QDF_STATUS status; + void *wmi_hdl = GET_WMI_HDL_FROM_PSOC(psoc); + + target_if_debug("Send oem data req to target"); + + if (!req || !req->data) { + target_if_err("oem_data_req is null"); + return QDF_STATUS_E_INVAL; + } + + if (!wmi_hdl) { + target_if_err("WMA closed, can't send oem data req cmd"); + return QDF_STATUS_E_INVAL; + } + + status = wmi_unified_start_oem_data_cmd(wmi_hdl, req->data_len, + req->data); + + if (!QDF_IS_STATUS_SUCCESS(status)) + target_if_err("wmi cmd send failed"); + + return status; +} + +void target_if_wifi_pos_register_tx_ops(struct wlan_lmac_if_tx_ops *tx_ops) +{ + struct wlan_lmac_if_wifi_pos_tx_ops *wifi_pos_tx_ops; + wifi_pos_tx_ops = &tx_ops->wifi_pos_tx_ops; + wifi_pos_tx_ops->data_req_tx = wifi_pos_oem_data_req; +} + +void target_if_wifi_pos_register_rx_ops(struct wlan_lmac_if_rx_ops *rx_ops) +{ + struct wlan_lmac_if_wifi_pos_rx_ops *wifi_pos_rx_ops; + wifi_pos_rx_ops = &rx_ops->wifi_pos_rx_ops; + wifi_pos_rx_ops->oem_rsp_event_rx = wifi_pos_oem_rsp_handler; +} + +inline struct wlan_lmac_if_wifi_pos_tx_ops *target_if_wifi_pos_get_txops( + struct wlan_objmgr_psoc *psoc) +{ + if (!psoc) { + target_if_err("passed psoc is NULL"); + return NULL; + } + + return &psoc->soc_cb.tx_ops.wifi_pos_tx_ops; +} + +inline struct wlan_lmac_if_wifi_pos_rx_ops *target_if_wifi_pos_get_rxops( + struct wlan_objmgr_psoc *psoc) +{ + if (!psoc) { + target_if_err("passed psoc is NULL"); + return NULL; + } + + return &psoc->soc_cb.rx_ops.wifi_pos_rx_ops; +} + +QDF_STATUS target_if_wifi_pos_register_events(struct wlan_objmgr_psoc *psoc) +{ + int ret; + + if (!psoc || !GET_WMI_HDL_FROM_PSOC(psoc)) { + target_if_err("psoc or psoc->tgt_if_handle is null"); + return QDF_STATUS_E_INVAL; + } + + ret = wmi_unified_register_event_handler( + get_wmi_unified_hdl_from_psoc(psoc), + wmi_oem_response_event_id, + target_if_wifi_pos_oem_rsp_ev_handler, + WMI_RX_WORK_CTX); + if (ret) { + target_if_err("register_event_handler failed: err %d", ret); + return QDF_STATUS_E_INVAL; + } + + ret = wmi_unified_register_event_handler( + get_wmi_unified_hdl_from_psoc(psoc), + wmi_oem_cap_event_id, + wifi_pos_oem_cap_ev_handler, + WMI_RX_WORK_CTX); + if (ret) { + target_if_err("register_event_handler failed: err %d", ret); + return QDF_STATUS_E_INVAL; + } + + ret = wmi_unified_register_event_handler( + get_wmi_unified_hdl_from_psoc(psoc), + wmi_oem_meas_report_event_id, + wifi_pos_oem_meas_rpt_ev_handler, + WMI_RX_WORK_CTX); + if (ret) { + target_if_err("register_event_handler failed: err %d", ret); + return QDF_STATUS_E_INVAL; + } + + ret = wmi_unified_register_event_handler( + get_wmi_unified_hdl_from_psoc(psoc), + wmi_oem_report_event_id, + wifi_pos_oem_err_rpt_ev_handler, + WMI_RX_WORK_CTX); + if (ret) { + target_if_err("register_event_handler failed: err %d", ret); + return QDF_STATUS_E_INVAL; + } + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS target_if_wifi_pos_deregister_events(struct wlan_objmgr_psoc *psoc) +{ + if (!psoc || !GET_WMI_HDL_FROM_PSOC(psoc)) { + target_if_err("psoc or psoc->tgt_if_handle is null"); + return QDF_STATUS_E_INVAL; + } + + wmi_unified_unregister_event_handler( + get_wmi_unified_hdl_from_psoc(psoc), + wmi_oem_response_event_id); + wmi_unified_unregister_event_handler( + get_wmi_unified_hdl_from_psoc(psoc), + wmi_oem_cap_event_id); + wmi_unified_unregister_event_handler( + get_wmi_unified_hdl_from_psoc(psoc), + wmi_oem_meas_report_event_id); + wmi_unified_unregister_event_handler( + get_wmi_unified_hdl_from_psoc(psoc), + wmi_oem_report_event_id); + + return QDF_STATUS_SUCCESS; +} + +#ifdef WLAN_FEATURE_CIF_CFR +static QDF_STATUS target_if_wifi_pos_fill_ring(uint8_t ring_idx, + struct hal_srng *srng, + struct wifi_pos_psoc_priv_obj *priv) +{ + uint32_t i; + void *buf, *buf_aligned; + + for (i = 0; i < priv->dma_cfg[ring_idx].num_ptr; i++) { + buf = qdf_mem_malloc(priv->dma_cap[ring_idx].min_buf_size + + priv->dma_cap[ring_idx].min_buf_align - 1); + if (!buf) { + target_if_err("malloc failed"); + return QDF_STATUS_E_NOMEM; + } + priv->dma_buf_pool[ring_idx][i].vaddr = buf; + buf_aligned = (void *)qdf_roundup((uint64_t)buf, + priv->dma_cap[ring_idx].min_buf_align); + priv->dma_buf_pool[ring_idx][i].offset = buf_aligned - buf; + priv->dma_buf_pool[ring_idx][i].cookie = i; + target_if_wifi_pos_replenish_ring(priv, ring_idx, + buf_aligned, i); + } + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS target_if_wifi_pos_empty_ring(uint8_t ring_idx, + struct wifi_pos_psoc_priv_obj *priv) +{ + uint32_t i; + + for (i = 0; i < priv->dma_cfg[ring_idx].num_ptr; i++) { + qdf_mem_unmap_nbytes_single(NULL, + (qdf_dma_addr_t)priv->dma_buf_pool[ring_idx][i].vaddr, + QDF_DMA_FROM_DEVICE, + priv->dma_cap[ring_idx].min_buf_size); + qdf_mem_free(priv->dma_buf_pool[ring_idx][i].vaddr); + } + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS target_if_wifi_pos_init_ring(uint8_t ring_idx, + struct wifi_pos_psoc_priv_obj *priv) +{ + void *srng; + uint32_t num_entries; + qdf_dma_addr_t paddr; + uint32_t ring_alloc_size; + void *hal_soc = priv->hal_soc; + struct hal_srng_params ring_params = {0}; + uint32_t max_entries = hal_srng_max_entries(hal_soc, WIFI_POS_SRC); + uint32_t entry_size = hal_srng_get_entrysize(hal_soc, WIFI_POS_SRC); + + num_entries = priv->dma_cap[ring_idx].min_num_ptr > max_entries ? + max_entries : priv->dma_cap[ring_idx].min_num_ptr; + priv->dma_cfg[ring_idx].num_ptr = num_entries; + priv->dma_buf_pool[ring_idx] = qdf_mem_malloc(num_entries * + sizeof(struct wifi_pos_dma_buf_info)); + if (!priv->dma_buf_pool[ring_idx]) { + target_if_err("malloc failed"); + return QDF_STATUS_E_NOMEM; + } + + ring_alloc_size = (num_entries * entry_size) + RING_BASE_ALIGN - 1; + priv->dma_cfg[ring_idx].ring_alloc_size = ring_alloc_size; + priv->dma_cfg[ring_idx].base_vaddr_unaligned = + qdf_mem_alloc_consistent(NULL, NULL, ring_alloc_size, &paddr); + priv->dma_cfg[ring_idx].base_paddr_unaligned = (void *)paddr; + if (!priv->dma_cfg[ring_idx].base_vaddr_unaligned) { + target_if_err("malloc failed"); + return QDF_STATUS_E_NOMEM; + } + + priv->dma_cfg[ring_idx].base_vaddr_aligned = (void *)qdf_roundup( + (uint64_t)priv->dma_cfg[ring_idx].base_vaddr_unaligned, + RING_BASE_ALIGN); + ring_params.ring_base_vaddr = + priv->dma_cfg[ring_idx].base_vaddr_aligned; + priv->dma_cfg[ring_idx].base_paddr_aligned = (void *)qdf_roundup( + (uint64_t)priv->dma_cfg[ring_idx].base_paddr_unaligned, + RING_BASE_ALIGN); + ring_params.ring_base_paddr = + (qdf_dma_addr_t)priv->dma_cfg[ring_idx].base_paddr_aligned; + ring_params.num_entries = num_entries; + srng = hal_srng_setup(hal_soc, WIFI_POS_SRC, 0, + priv->dma_cap[ring_idx].pdev_id, &ring_params); + if (!srng) { + target_if_err("srng setup failed"); + return QDF_STATUS_E_FAILURE; + } + priv->dma_cfg[ring_idx].srng = srng; + priv->dma_cfg[ring_idx].tail_idx_addr = + (void *)hal_srng_get_tp_addr(hal_soc, srng); + priv->dma_cfg[ring_idx].head_idx_addr = + (void *)hal_srng_get_tp_addr(hal_soc, srng); + + return target_if_wifi_pos_fill_ring(ring_idx, srng, priv); +} + +static QDF_STATUS target_if_wifi_pos_deinit_ring(uint8_t ring_idx, + struct wifi_pos_psoc_priv_obj *priv) +{ + target_if_wifi_pos_empty_ring(ring_idx, priv); + priv->dma_buf_pool[ring_idx] = NULL; + hal_srng_cleanup(priv->hal_soc, priv->dma_cfg[ring_idx].srng); + qdf_mem_free_consistent(NULL, NULL, + priv->dma_cfg[ring_idx].ring_alloc_size, + priv->dma_cfg[ring_idx].base_vaddr_unaligned, + (qdf_dma_addr_t)priv->dma_cfg[ring_idx].base_paddr_unaligned, + 0); + qdf_mem_free(priv->dma_buf_pool[ring_idx]); + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS target_if_wifi_pos_init_srngs( + struct wifi_pos_psoc_priv_obj *priv) +{ + uint8_t i; + QDF_STATUS status; + + /* allocate memory for num_rings pointers */ + priv->dma_cfg = qdf_mem_malloc(priv->num_rings * + sizeof(struct wifi_pos_dma_rings_cap)); + if (!priv->dma_cfg) { + target_if_err("malloc failed"); + return QDF_STATUS_E_NOMEM; + } + + priv->dma_buf_pool = qdf_mem_malloc(priv->num_rings * + sizeof(struct wifi_pos_dma_buf_info *)); + if (!priv->dma_buf_pool) { + target_if_err("malloc failed"); + return QDF_STATUS_E_NOMEM; + } + + for (i = 0; i < priv->num_rings; i++) { + status = target_if_wifi_pos_init_ring(i, priv); + if (QDF_IS_STATUS_ERROR(status)) { + target_if_err("init for ring[%d] failed", i); + return status; + } + } + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS target_if_wifi_pos_deinit_srngs( + struct wifi_pos_psoc_priv_obj *priv) +{ + uint8_t i; + + for (i = 0; i < priv->num_rings; i++) + target_if_wifi_pos_deinit_ring(i, priv); + + qdf_mem_free(priv->dma_buf_pool); + priv->dma_buf_pool = NULL; + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS target_if_wifi_pos_cfg_fw(struct wlan_objmgr_psoc *psoc, + struct wifi_pos_psoc_priv_obj *priv) +{ + uint8_t i; + QDF_STATUS status; + void *wmi_hdl = GET_WMI_HDL_FROM_PSOC(psoc); + wmi_oem_dma_ring_cfg_req_fixed_param cfg = {0}; + + if (!wmi_hdl) { + target_if_err("WMA closed, can't send oem data req cmd"); + return QDF_STATUS_E_INVAL; + } + + target_if_debug("Sending oem dma ring cfg to target"); + + for (i = 0; i < priv->num_rings; i++) { + cfg.pdev_id = priv->dma_cfg[i].pdev_id; + cfg.base_addr_lo = (uint64_t)priv->dma_cfg[i].base_paddr_aligned + & 0xFFFFFFFF; + cfg.base_addr_hi = (uint64_t)priv->dma_cfg[i].base_paddr_aligned + & 0xFFFFFFFF00000000; + cfg.head_idx_addr_lo = (uint64_t)priv->dma_cfg[i].head_idx_addr + & 0xFFFFFFFF; + cfg.head_idx_addr_hi = (uint64_t)priv->dma_cfg[i].head_idx_addr + & 0xFFFFFFFF00000000; + cfg.tail_idx_addr_lo = (uint64_t)priv->dma_cfg[i].tail_idx_addr + & 0xFFFFFFFF; + cfg.tail_idx_addr_hi = (uint64_t)priv->dma_cfg[i].tail_idx_addr + & 0xFFFFFFFF00000000; + cfg.num_ptr = priv->dma_cfg[i].num_ptr; + status = wmi_unified_oem_dma_ring_cfg(wmi_hdl, &cfg); + if (!QDF_IS_STATUS_SUCCESS(status)) { + target_if_err("wmi cmd send failed"); + return status; + } + } + + return status; +} + +QDF_STATUS target_if_wifi_pos_deinit_dma_rings(struct wlan_objmgr_psoc *psoc) +{ + struct wifi_pos_psoc_priv_obj *priv = wifi_pos_get_psoc_priv_obj(psoc); + + target_if_wifi_pos_deinit_srngs(priv); + qdf_mem_free(priv->dma_cap); + priv->dma_cap = NULL; + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS target_if_wifi_pos_init_cir_cfr_rings(struct wlan_objmgr_psoc *psoc, + void *hal_soc, uint8_t num_mac, + void *buf) +{ + uint8_t i; + QDF_STATUS status = QDF_STATUS_SUCCESS; + WMI_OEM_DMA_RING_CAPABILITIES *dma_cap = buf; + struct wifi_pos_psoc_priv_obj *priv = wifi_pos_get_psoc_priv_obj(psoc); + + if (!priv) { + target_if_err("unable to get wifi_pos psoc obj"); + return QDF_STATUS_E_NULL_VALUE; + } + + priv->hal_soc = hal_soc; + priv->num_rings = num_mac; + priv->dma_cap = qdf_mem_malloc(priv->num_rings * + sizeof(struct wifi_pos_dma_rings_cap)); + if (!priv->dma_cap) { + target_if_err("unable to get wifi_pos psoc obj"); + return QDF_STATUS_E_NOMEM; + } + + for (i = 0; i < num_mac; i++) { + priv->dma_cap[i].pdev_id = dma_cap[i].pdev_id; + priv->dma_cap[i].min_num_ptr = dma_cap[i].min_num_ptr; + priv->dma_cap[i].min_buf_size = dma_cap[i].min_buf_size; + priv->dma_cap[i].min_buf_align = dma_cap[i].min_buf_align; + } + + /* initialize DMA rings now */ + status = target_if_wifi_pos_init_srngs(priv); + if (QDF_IS_STATUS_ERROR(status)) { + target_if_err("dma init failed: %d", status); + goto dma_init_failed; + } + + /* send cfg req cmd to firmware */ + status = target_if_wifi_pos_cfg_fw(psoc, priv); + if (QDF_IS_STATUS_ERROR(status)) { + target_if_err("configure to FW failed: %d", status); + goto dma_init_failed; + } + + return QDF_STATUS_SUCCESS; + +dma_init_failed: + target_if_wifi_pos_deinit_dma_rings(psoc); + return status; +} + +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/cmn_defs/inc/wlan_cmn_ieee80211.h b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/cmn_defs/inc/wlan_cmn_ieee80211.h new file mode 100644 index 0000000000000000000000000000000000000000..66f58c0cd6ba7f76d8d4675a5be3176ca4a859dd --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/cmn_defs/inc/wlan_cmn_ieee80211.h @@ -0,0 +1,1722 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: contains commnon ieee80211 definitions + */ + +#ifndef _WLAN_CMN_IEEE80211_H_ +#define _WLAN_CMN_IEEE80211_H_ +#include +#include + +#define IEEE80211_CCMP_HEADERLEN 8 +#define IEEE80211_CCMP_MICLEN 8 +#define WLAN_IEEE80211_GCMP_HEADERLEN 8 +#define WLAN_IEEE80211_GCMP_MICLEN 16 +#define IEEE80211_FC1_WEP 0x40 +#define WLAN_HDR_IV_LEN 3 +#define WLAN_HDR_EXT_IV_BIT 0x20 +#define WLAN_HDR_EXT_IV_LEN 4 + +#define WLAN_SEQ_SEQ_SHIFT 4 + +#define P2P_WFA_OUI {0x50, 0x6f, 0x9a} +#define P2P_WFA_VER 0x09 + +#define WSC_OUI 0x0050f204 +#define MBO_OCE_OUI 0x506f9a16 +#define MBO_OCE_OUI_SIZE 4 +#define REDUCED_WAN_METRICS_ATTR 103 + +/* WCN IE */ +/* Microsoft OUI */ +#define WCN_OUI 0xf25000 +/* WCN */ +#define WCN_OUI_TYPE 0x04 +#define WME_OUI 0xf25000 +#define WME_OUI_TYPE 0x02 +#define WME_PARAM_OUI_SUBTYPE 0x01 +#define WME_INFO_OUI_SUBTYPE 0x00 + /* Atheros OUI */ +#define ATH_OUI 0x7f0300 +#define ATH_OUI_TYPE 0x01 +/* Atheros Extended Cap Type */ +#define ATH_OUI_EXTCAP_TYPE 0x04 +/* QCA Bandwidth NSS Mapping Type */ +#define ATH_OUI_BW_NSS_MAP_TYPE 0x05 +#define SFA_OUI 0x964000 +#define SFA_OUI_TYPE 0x14 +/* QCA OUI (in little endian) */ +#define QCA_OUI 0xf0fd8c +#define QCA_OUI_WHC_TYPE 0x00 + +/* Temporary vendor specific IE for 11n pre-standard interoperability */ +#define VENDOR_HT_OUI 0x00904c +#define VENDOR_HT_CAP_ID 51 +#define VENDOR_HT_INFO_ID 52 + +#define VHT_INTEROP_OUI 0x00904c +#define VHT_INTEROP_TYPE 0x04 +#define VHT_INTEROP_OUI_SUBTYPE 0x08 +#define VHT_INTEROP_OUI_SUBTYPE_VENDORSPEC 0x18 + +/* ATH HE OUI ( in little endian) */ +#define ATH_HE_OUI 0x741300 +#define ATH_HE_CAP_SUBTYPE 0x01 +#define ATH_HE_OP_SUBTYPE 0x02 + +/* EPR information element flags */ +#define ERP_NON_ERP_PRESENT 0x01 +#define ERP_USE_PROTECTION 0x02 +#define ERP_LONG_PREAMBLE 0x04 + +#define QCA_OUI_WHC_AP_INFO_SUBTYPE 0x00 + +#define WLAN_MAX_IE_LEN 255 +#define WLAN_RSN_IE_LEN 22 + +/* Individual element IEs length checks */ + +#define WLAN_SUPPORTED_RATES_IE_MAX_LEN 12 +#define WLAN_DS_PARAM_IE_MAX_LEN 1 +#define WLAN_COUNTRY_IE_MIN_LEN 3 +#define WLAN_QUIET_IE_MAX_LEN 6 +#define WLAN_CSA_IE_MAX_LEN 3 +#define WLAN_XCSA_IE_MAX_LEN 4 +#define WLAN_SECCHANOFF_IE_MAX_LEN 1 +#define WLAN_EXT_SUPPORTED_RATES_IE_MAX_LEN 12 +#define WLAN_EXTCAP_IE_MAX_LEN 15 +#define WLAN_FILS_INDICATION_IE_MIN_LEN 2 +#define WLAN_MOBILITY_DOMAIN_IE_MAX_LEN 3 +#define WLAN_OPMODE_IE_MAX_LEN 1 +#define WLAN_IBSSDFS_IE_MIN_LEN 7 + +/* HT capability flags */ +#define WLAN_HTCAP_C_ADVCODING 0x0001 +#define WLAN_HTCAP_C_CHWIDTH40 0x0002 +/* Capable of SM Power Save (Static) */ +#define WLAN_HTCAP_C_SMPOWERSAVE_STATIC 0x0000 +/* Capable of SM Power Save (Dynamic) */ +#define WLAN_HTCAP_C_SMPOWERSAVE_DYNAMIC 0x0004 +/* Reserved */ +#define WLAN_HTCAP_C_SM_RESERVED 0x0008 +/* SM enabled, no SM Power Save */ +#define WLAN_HTCAP_C_SMPOWERSAVE_DISABLED 0x000c +#define WLAN_HTCAP_C_GREENFIELD 0x0010 +#define WLAN_HTCAP_C_SHORTGI20 0x0020 +#define WLAN_HTCAP_C_SHORTGI40 0x0040 +#define WLAN_HTCAP_C_TXSTBC 0x0080 +#define WLAN_HTCAP_C_TXSTBC_S 7 +/* 2 bits */ +#define WLAN_HTCAP_C_RXSTBC 0x0300 +#define WLAN_HTCAP_C_RXSTBC_S 8 +#define WLAN_HTCAP_C_DELAYEDBLKACK 0x0400 +/* 1 = 8K, 0 = 3839B */ +#define WLAN_HTCAP_C_MAXAMSDUSIZE 0x0800 +#define WLAN_HTCAP_C_DSSSCCK40 0x1000 +#define WLAN_HTCAP_C_PSMP 0x2000 +#define WLAN_HTCAP_C_INTOLERANT40 0x4000 +#define WLAN_HTCAP_C_LSIGTXOPPROT 0x8000 +/* Spatial Multiplexing (SM) capabitlity bitmask */ +#define WLAN_HTCAP_C_SM_MASK 0x000c + +/* VHT Operation */ +/* 20/40 MHz Operating Channel */ +#define WLAN_VHTOP_CHWIDTH_2040 0 +/* 80 MHz Operating Channel */ +#define WLAN_VHTOP_CHWIDTH_80 1 +/* 160 MHz Operating Channel */ +#define WLAN_VHTOP_CHWIDTH_160 2 +/* 80 + 80 MHz Operating Channel */ +#define WLAN_VHTOP_CHWIDTH_80_80 3 +/* 160 MHz Operating Channel (revised signalling) */ +#define WLAN_VHTOP_CHWIDTH_REVSIG_160 1 +/* 80 + 80 MHz Operating Channel (revised signalling) */ +#define WLAN_VHTOP_CHWIDTH_REVSIG_80_80 1 + +#define WLAN_RATE_VAL 0x7f + +#define WLAN_RV(v) ((v) & WLAN_RATE_VAL) + +#define WLAN_CHAN_IS_5GHZ(chanidx) \ + ((chanidx > 30) ? true : false) +#define WLAN_CHAN_IS_2GHZ(chanidx) \ + (((chanidx > 0) && (chanidx < 15)) ? true : false) + +/* Check if revised signalling is being used for VHT160 in vhtop */ +#define WLAN_IS_REVSIG_VHT160(vhtop) (((vhtop)->vht_op_chwidth == \ + WLAN_VHTOP_CHWIDTH_REVSIG_160) && \ + ((vhtop)->vht_op_ch_freq_seg2 != 0) && \ + (abs((vhtop)->vht_op_ch_freq_seg2 - (vhtop)->vht_op_ch_freq_seg1) == 8)) + +/* Check if revised signalling is being used for VHT80p80 in vhtop */ +#define WLAN_IS_REVSIG_VHT80_80(vhtop) (((vhtop)->vht_op_chwidth == \ + WLAN_VHTOP_CHWIDTH_REVSIG_80_80) && \ + ((vhtop)->vht_op_ch_freq_seg2 != 0) && \ + (abs((vhtop)->vht_op_ch_freq_seg2 - (vhtop)->vht_op_ch_freq_seg1) > 8)) + +#define LE_READ_2(p) \ + ((uint16_t)\ + ((((const uint8_t *)(p))[0]) |\ + (((const uint8_t *)(p))[1] << 8))) + +#define LE_READ_4(p) \ + ((uint32_t)\ + ((((const uint8_t *)(p))[0]) |\ + (((const uint8_t *)(p))[1] << 8) | \ + (((const uint8_t *)(p))[2] << 16) |\ + (((const uint8_t *)(p))[3] << 24))) + +#define BE_READ_4(p) \ + ((uint32_t)\ + ((((const uint8_t *)(p))[0] << 24) |\ + (((const uint8_t *)(p))[1] << 16) |\ + (((const uint8_t *)(p))[2] << 8) |\ + (((const uint8_t *)(p))[3]))) + +/** + * enum ext_chan_offset: extension channel offset + * @WLAN_HTINFO_EXTOFFSET_NA: no extension channel is present + * @WLAN_HTINFO_EXTOFFSET_ABOVE: above control channel + * @WLAN_HTINFO_EXTOFFSET_UNDEF: undefined + * @WLAN_HTINFO_EXTOFFSET_BELOW: below control channel + */ +enum ext_chan_offset { + WLAN_HTINFO_EXTOFFSET_NA = 0, + WLAN_HTINFO_EXTOFFSET_ABOVE = 1, + WLAN_HTINFO_EXTOFFSET_UNDEF = 2, + WLAN_HTINFO_EXTOFFSET_BELOW = 3 +}; + +/** + * enum element_ie :- Management information element + * @WLAN_ELEMID_SSID: ssid IE + * @WLAN_ELEMID_RATES: Rates IE + * @WLAN_ELEMID_FHPARMS: FH param IE + * @WLAN_ELEMID_DSPARMS: DS Param IE + * @WLAN_ELEMID_CFPARMS : CF Param IE + * @WLAN_ELEMID_TIM: TIM IE + * @WLAN_ELEMID_IBSSPARMS: Ibss params IE + * @WLAN_ELEMID_COUNTRY: Country code IE + * @WLAN_ELEMID_REQINFO: Req Info IE + * @WLAN_ELEMID_QBSS_LOAD: Qbss load IE + * @WLAN_ELEMID_TCLAS: TCLAS IE + * @WLAN_ELEMID_CHALLENGE: Challenge IE + * @WLAN_ELEMID_PWRCNSTR: Power cn IE + * @WLAN_ELEMID_PWRCAP: power cap IE + * @WLAN_ELEMID_TPCREQ: TPC req IE + * @WLAN_ELEMID_TPCREP: TPC rsp IE + * @WLAN_ELEMID_SUPPCHAN: Supported channel IE + * @WLAN_ELEMID_CHANSWITCHANN: Channel switch IE + * @WLAN_ELEMID_MEASREQ: Measurement request IE + * @WLAN_ELEMID_MEASREP: Measurement Resp IE + * @WLAN_ELEMID_QUIET: Quiet IE + * @WLAN_ELEMID_IBSSDFS: IBSS DFS IE + * @WLAN_ELEMID_ERP: ERP IE + * @WLAN_ELEMID_TCLAS_PROCESS: TCLAS process IE + * @WLAN_ELEMID_HTCAP_ANA: HTT Capability IE + * @WLAN_ELEMID_RSN: RSN IE + * @WLAN_ELEMID_XRATES: Extended rate IE + * @WLAN_ELEMID_HTCAP_VENDOR: HT cap vendor IE + * @WLAN_ELEMID_HTINFO_VENDOR: HT info vendor IE + * @WLAN_ELEMID_MOBILITY_DOMAIN: MD IE + * @WLAN_ELEMID_FT: FT IE + * @WLAN_ELEMID_TIMEOUT_INTERVAL: Timeout interval IE + * @WLAN_ELEMID_SUPP_OP_CLASS: OP class IE + * @WLAN_ELEMID_EXTCHANSWITCHANN: Extended Channel switch IE + * @WLAN_ELEMID_HTINFO_ANA: HT info IE + * @WLAN_ELEMID_SECCHANOFFSET: Sec channel Offset IE + * @WLAN_ELEMID_WAPI: WAPI IE + * @WLAN_ELEMID_TIME_ADVERTISEMENT: Time IE + * @WLAN_ELEMID_RRM: Radio resource measurement IE + * @WLAN_ELEMID_2040_COEXT: 20-40 COext ext IE + * @WLAN_ELEMID_2040_INTOL:20-40 INT OL IE + * @WLAN_ELEMID_OBSS_SCAN: OBSS scan IE + * @WLAN_ELEMID_MMIE: 802.11w Management MIC IE + * @WLAN_ELEMID_FMS_DESCRIPTOR: 802.11v FMS descriptor IE + * @WLAN_ELEMID_FMS_REQUEST: 802.11v FMS request IE + * @WLAN_ELEMID_FMS_RESPONSE: 802.11v FMS response IE + * @WLAN_ELEMID_BSSMAX_IDLE_PERIOD = 90: BSS MAX IDLE PERIOD + * @WLAN_ELEMID_TFS_REQUEST: TFS req IE + * @WLAN_ELEMID_TFS_RESPONSE: TFS resp IE + * @WLAN_ELEMID_TIM_BCAST_REQUEST: TIM bcast req IE + * @WLAN_ELEMID_TIM_BCAST_RESPONSE: TIM bcast resp IE + * @WLAN_ELEMID_INTERWORKING: Interworking IE + * @WLAN_ELEMID_QOS_MAP: QOS MAP IE + * @WLAN_ELEMID_XCAPS: Extended capability IE + * @WLAN_ELEMID_TPC: TPC IE + * @WLAN_ELEMID_CCKM: CCKM IE + * @WLAN_ELEMID_VHTCAP: VHT Capabilities + * @WLAN_ELEMID_VHTOP: VHT Operation + * @WLAN_ELEMID_EXT_BSS_LOAD: Extended BSS Load + * @WLAN_ELEMID_WIDE_BAND_CHAN_SWITCH: Wide Band Channel Switch + * @WLAN_ELEMID_VHT_TX_PWR_ENVLP: VHT Transmit Power Envelope + * @WLAN_ELEMID_CHAN_SWITCH_WRAP: Channel Switch Wrapper + * @WLAN_ELEMID_AID: AID + * @WLAN_ELEMID_QUIET_CHANNEL: Quiet Channel + * @WLAN_ELEMID_OP_MODE_NOTIFY: Operating Mode Notification + * @WLAN_ELEMID_VENDOR: vendor private + * @WLAN_ELEMID_EXTN_ELEM: extended IE + */ +enum element_ie { + WLAN_ELEMID_SSID = 0, + WLAN_ELEMID_RATES = 1, + WLAN_ELEMID_FHPARMS = 2, + WLAN_ELEMID_DSPARMS = 3, + WLAN_ELEMID_CFPARMS = 4, + WLAN_ELEMID_TIM = 5, + WLAN_ELEMID_IBSSPARMS = 6, + WLAN_ELEMID_COUNTRY = 7, + WLAN_ELEMID_REQINFO = 10, + WLAN_ELEMID_QBSS_LOAD = 11, + WLAN_ELEMID_TCLAS = 14, + WLAN_ELEMID_CHALLENGE = 16, + /* 17-31 reserved for challenge text extension */ + WLAN_ELEMID_PWRCNSTR = 32, + WLAN_ELEMID_PWRCAP = 33, + WLAN_ELEMID_TPCREQ = 34, + WLAN_ELEMID_TPCREP = 35, + WLAN_ELEMID_SUPPCHAN = 36, + WLAN_ELEMID_CHANSWITCHANN = 37, + WLAN_ELEMID_MEASREQ = 38, + WLAN_ELEMID_MEASREP = 39, + WLAN_ELEMID_QUIET = 40, + WLAN_ELEMID_IBSSDFS = 41, + WLAN_ELEMID_ERP = 42, + WLAN_ELEMID_TCLAS_PROCESS = 44, + WLAN_ELEMID_HTCAP_ANA = 45, + WLAN_ELEMID_RSN = 48, + WLAN_ELEMID_XRATES = 50, + WLAN_ELEMID_HTCAP_VENDOR = 51, + WLAN_ELEMID_HTINFO_VENDOR = 52, + WLAN_ELEMID_MOBILITY_DOMAIN = 54, + WLAN_ELEMID_FT = 55, + WLAN_ELEMID_TIMEOUT_INTERVAL = 56, + WLAN_ELEMID_SUPP_OP_CLASS = 59, + WLAN_ELEMID_EXTCHANSWITCHANN = 60, + WLAN_ELEMID_HTINFO_ANA = 61, + WLAN_ELEMID_SECCHANOFFSET = 62, + WLAN_ELEMID_WAPI = 68, + WLAN_ELEMID_TIME_ADVERTISEMENT = 69, + WLAN_ELEMID_RRM = 70, + WLAN_ELEMID_2040_COEXT = 72, + WLAN_ELEMID_2040_INTOL = 73, + WLAN_ELEMID_OBSS_SCAN = 74, + WLAN_ELEMID_MMIE = 76, + WLAN_ELEMID_FMS_DESCRIPTOR = 86, + WLAN_ELEMID_FMS_REQUEST = 87, + WLAN_ELEMID_FMS_RESPONSE = 88, + WLAN_ELEMID_BSSMAX_IDLE_PERIOD = 90, + WLAN_ELEMID_TFS_REQUEST = 91, + WLAN_ELEMID_TFS_RESPONSE = 92, + WLAN_ELEMID_TIM_BCAST_REQUEST = 94, + WLAN_ELEMID_TIM_BCAST_RESPONSE = 95, + WLAN_ELEMID_INTERWORKING = 107, + WLAN_ELEMID_QOS_MAP = 110, + WLAN_ELEMID_XCAPS = 127, + WLAN_ELEMID_TPC = 150, + WLAN_ELEMID_CCKM = 156, + WLAN_ELEMID_VHTCAP = 191, + WLAN_ELEMID_VHTOP = 192, + WLAN_ELEMID_EXT_BSS_LOAD = 193, + WLAN_ELEMID_WIDE_BAND_CHAN_SWITCH = 194, + WLAN_ELEMID_VHT_TX_PWR_ENVLP = 195, + WLAN_ELEMID_CHAN_SWITCH_WRAP = 196, + WLAN_ELEMID_AID = 197, + WLAN_ELEMID_QUIET_CHANNEL = 198, + WLAN_ELEMID_OP_MODE_NOTIFY = 199, + WLAN_ELEMID_VENDOR = 221, + WLAN_ELEMID_FILS_INDICATION = 240, + WLAN_ELEMID_EXTN_ELEM = 255, +}; + +/** + * enum extn_element_ie :- extended management information element + * @WLAN_EXTN_ELEMID_HECAP: HE capabilities IE + * @WLAN_EXTN_ELEMID_HEOP: HE Operation IE + * @WLAN_EXTN_ELEMID_MUEDCA: MU-EDCA IE + * @WLAN_EXTN_ELEMID_SRP: spatial reuse parameter IE + */ +enum extn_element_ie { + WLAN_EXTN_ELEMID_HECAP = 35, + WLAN_EXTN_ELEMID_HEOP = 36, + WLAN_EXTN_ELEMID_MUEDCA = 38, + WLAN_EXTN_ELEMID_SRP = 39, + WLAN_EXTN_ELEMID_ESP = 11, +}; + +#define WLAN_OUI_SIZE 4 +#define WLAN_MAX_CIPHER 6 +#define WLAN_RSN_SELECTOR_LEN 4 +#define WLAN_WPA_SELECTOR_LEN 4 +#define PMKID_LEN 16 +#define MAX_PMKID 4 + +#define WLAN_WPA_OUI 0xf25000 +#define WLAN_WPA_OUI_TYPE 0x01 +#define WPA_VERSION 1 +#define WLAN_WPA_SEL(x) (((x) << 24) | WLAN_WPA_OUI) + +#define WLAN_RSN_OUI 0xac0f00 +#define WLAN_CCKM_OUI 0x964000 +#define WLAN_CCKM_ASE_UNSPEC 0 +#define WLAN_WPA_CCKM_AKM 0x00964000 +#define WLAN_RSN_CCKM_AKM 0x00964000 +#define WLAN_RSN_DPP_AKM 0x029A6F50 +#define WLAN_RSN_OSEN_AKM 0x019A6F50 + +#define RSN_VERSION 1 +#define WLAN_RSN_SEL(x) (((x) << 24) | WLAN_RSN_OUI) +#define WLAN_CCKM_SEL(x) (((x) << 24) | WLAN_CCKM_OUI) + +#define WLAN_CSE_NONE 0x00 +#define WLAN_CSE_WEP40 0x01 +#define WLAN_CSE_TKIP 0x02 +#define WLAN_CSE_RESERVED 0x03 +#define WLAN_CSE_CCMP 0x04 +#define WLAN_CSE_WEP104 0x05 +#define WLAN_CSE_AES_CMAC 0x06 +#define WLAN_CSE_GCMP_128 0x08 +#define WLAN_CSE_GCMP_256 0x09 +#define WLAN_CSE_CCMP_256 0x0A +#define WLAN_CSE_BIP_GMAC_128 0x0B +#define WLAN_CSE_BIP_GMAC_256 0x0C +#define WLAN_CSE_BIP_CMAC_256 0x0D + +#define WLAN_AKM_IEEE8021X 0x01 +#define WLAN_AKM_PSK 0x02 +#define WLAN_AKM_FT_IEEE8021X 0x03 +#define WLAN_AKM_FT_PSK 0x04 +#define WLAN_AKM_SHA256_IEEE8021X 0x05 +#define WLAN_AKM_SHA256_PSK 0x06 +#define WLAN_AKM_SAE 0x08 +#define WLAN_AKM_SUITEB_EAP_SHA256 0x0B +#define WLAN_AKM_SUITEB_EAP_SHA384 0x0C +#define WLAN_AKM_FILS_SHA256 0x0E +#define WLAN_AKM_FILS_SHA384 0x0F +#define WLAN_AKM_FILS_FT_SHA256 0x10 +#define WLAN_AKM_FILS_FT_SHA384 0x11 +#define WLAN_AKM_OWE 0x12 + +#define WLAN_ASE_NONE 0x00 +#define WLAN_ASE_8021X_UNSPEC 0x01 +#define WLAN_ASE_8021X_PSK 0x02 +#define WLAN_ASE_FT_IEEE8021X 0x20 +#define WLAN_ASE_FT_PSK 0x40 +#define WLAN_ASE_SHA256_IEEE8021X 0x80 +#define WLAN_ASE_SHA256_PSK 0x100 +#define WLAN_ASE_WPS 0x200 + +#define RSN_CAP_MFP_CAPABLE 0x80 +#define RSN_CAP_MFP_REQUIRED 0x40 + +/** + * struct wlan_rsn_ie_hdr: rsn ie header + * @elem_id: RSN element id WLAN_ELEMID_RSN. + * @len: rsn ie length + * @version: RSN ver + */ +struct wlan_rsn_ie_hdr { + u8 elem_id; + u8 len; + u8 version[2]; +}; + +#define WLAN_RSN_IE_MIN_LEN 2 + +/** + * struct wlan_rsn_ie: rsn ie info + * @ver: RSN ver + * @gp_cipher_suite: group cipher + * @pwise_cipher_count: number of pw cipher + * @pwise_cipher_suites: pair wise cipher list + * @akm_suite_count: Number of akm suite + * @akm_suites: akm suites list + * @cap: RSN capability + * @pmkid_count: number of PMKID + * @pmkid: PMKID list + * @mgmt_cipher_suite: management (11w) cipher suite + */ +struct wlan_rsn_ie { + uint16_t ver; + uint32_t gp_cipher_suite; + uint16_t pwise_cipher_count; + uint32_t pwise_cipher_suites[WLAN_MAX_CIPHER]; + uint16_t akm_suite_count; + uint32_t akm_suites[WLAN_MAX_CIPHER]; + uint16_t cap; + uint16_t pmkid_count; + uint8_t pmkid[MAX_PMKID][PMKID_LEN]; + uint32_t mgmt_cipher_suite; +}; + +#define WLAN_WAPI_IE_MIN_LEN 20 + +/** + * struct wlan_wpa_ie_hdr: wpa ie header + * @elem_id: Wpa element id, vender specific. + * @len: wpa ie length + * @oui: 24-bit OUI followed by 8-bit OUI type + * @version: wpa ver + */ +struct wlan_wpa_ie_hdr { + u8 elem_id; + u8 len; + u8 oui[4]; + u8 version[2]; +}; + +/** + * struct wlan_wpa_ie: WPA ie info + * @ver: WPA ver + * @mc_cipher: multicast cipher + * @uc_cipher_count: number of unicast cipher + * @uc_ciphers: unicast cipher list + * @auth_suite_count: Number of akm suite + * @auth_suites: akm suites list + * @cap: WPA capability + */ +struct wlan_wpa_ie { + uint16_t ver; + uint32_t mc_cipher; + uint16_t uc_cipher_count; + uint32_t uc_ciphers[WLAN_MAX_CIPHER]; + uint16_t auth_suite_count; + uint32_t auth_suites[WLAN_MAX_CIPHER]; + uint16_t cap; +}; + +#define WAPI_VERSION 1 +#define WLAN_WAPI_OUI 0x721400 + +#define WLAN_WAPI_SEL(x) (((x) << 24) | WLAN_WAPI_OUI) + +#define WLAN_WAI_CERT_OR_SMS4 0x01 +#define WLAN_WAI_PSK 0x02 + +/** + * struct wlan_wapi_ie: WAPI ie info + * @ver: WAPI ver + * @akm_suite_count: Number of akm suite + * @akm_suites: akm suites list + * @uc_cipher_suites:unicast cipher count + * @uc_cipher_suites: unicast cipher suite + * @mc_cipher_suite: mc cipher suite + */ +struct wlan_wapi_ie { + uint16_t ver; + uint16_t akm_suite_count; + uint32_t akm_suites[WLAN_MAX_CIPHER]; + uint16_t uc_cipher_count; + uint32_t uc_cipher_suites[WLAN_MAX_CIPHER]; + uint32_t mc_cipher_suite; +}; + +/** + * struct wlan_frame_hdr: generic IEEE 802.11 frames + * @i_fc: frame control + * @i_dur: duration field + * @i_addr1: mac address 1 + * @i_addr2: mac address 2 + * @i_addr3: mac address 3 + * @i_seq: seq info + */ +struct wlan_frame_hdr { + uint8_t i_fc[2]; + uint8_t i_dur[2]; + union { + struct { + uint8_t i_addr1[QDF_MAC_ADDR_SIZE]; + uint8_t i_addr2[QDF_MAC_ADDR_SIZE]; + uint8_t i_addr3[QDF_MAC_ADDR_SIZE]; + }; + uint8_t i_addr_all[3 * QDF_MAC_ADDR_SIZE]; + }; + uint8_t i_seq[2]; +} qdf_packed; + +/* sequence number offset base on begin of mac header */ +#define WLAN_SEQ_CTL_OFFSET 22 +#define WLAN_LOW_SEQ_NUM_MASK 0x000F +#define WLAN_HIGH_SEQ_NUM_MASK 0x0FF0 +#define WLAN_HIGH_SEQ_NUM_OFFSET 4 + +/** + * struct wlan_seq_ctl: sequence number control + * @frag_num: frag number + * @seq_num_lo: sequence number low byte + * @seq_num_hi: sequence number high byte + */ +struct wlan_seq_ctl { + uint8_t frag_num:4; + uint8_t seq_num_lo:4; + uint8_t seq_num_hi:8; +} qdf_packed; + +/** + * union wlan_capability : wlan_capability info + * @value: capability value + */ +union wlan_capability { + struct caps { + uint16_t ess:1; + uint16_t ibss:1; + uint16_t cf_pollable:1; + uint16_t cf_poll_request:1; + uint16_t privacy:1; + uint16_t short_preamble:1; + uint16_t pbcc:1; + uint16_t channel_agility:1; + uint16_t spectrum_management:1; + uint16_t qos:1; + uint16_t short_slot_time:1; + uint16_t apsd:1; + uint16_t reserved2:1; + uint16_t dsss_ofdm:1; + uint16_t del_block_ack:1; + uint16_t immed_block_ack:1; + } wlan_caps; + uint16_t value; +} qdf_packed; + +/** + * struct ie_header : IE header + * @ie_id: Element Id + * @ie_len: IE Length + */ +struct ie_header { + uint8_t ie_id; + uint8_t ie_len; +} qdf_packed; + +/** + * struct extn_ie_header : Extension IE header + * @ie_id: Element Id + * @ie_len: IE Length + * @ie_extn_id: extension id + */ +struct extn_ie_header { + uint8_t ie_id; + uint8_t ie_len; + uint8_t ie_extn_id; +} qdf_packed; + + +/** + * struct ie_ssid : ssid IE + * @ssid_id: SSID Element Id + * @ssid_len: SSID IE Length + * @ssid: ssid value + */ +struct ie_ssid { + uint8_t ssid_id; + uint8_t ssid_len; + uint8_t ssid[WLAN_SSID_MAX_LEN]; +} qdf_packed; + +/** + * struct ds_ie : ds IE + * @ie: DS Element Id + * @len: DS IE Length + * @cur_chan: channel info + */ +struct ds_ie { + uint8_t ie; + uint8_t len; + uint8_t cur_chan; +} qdf_packed; + +/** + * struct erp_ie: ERP IE + * @ie: ERP Element Id + * @len: ERP IE Length + * @value: EP Info + */ +struct erp_ie { + uint8_t ie; + uint8_t len; + uint8_t value; +} qdf_packed; + +/** + * struct htcap_cmn_ie: HT common IE info + * @hc_cap: HT capabilities + * @ampdu_param: ampdu params + * @mcsset: supported MCS set + * @extcap: extended HT capabilities + * @txbf_cap: txbf capabilities + * @antenna: antenna capabilities + */ +struct htcap_cmn_ie { + uint16_t hc_cap; + uint8_t ampdu_param; + uint8_t mcsset[16]; + uint16_t extcap; + uint32_t txbf_cap; + uint8_t antenna; +} qdf_packed; + +/** + * struct htcap_ie: HT Capability IE + * @id: HT IE + * @len: HT IE LEN + * @ie: HT cap info + */ +struct htcap_ie { + uint8_t id; + uint8_t len; + struct htcap_cmn_ie ie; +} qdf_packed; + +/** + * struct fils_indication_ie: FILS indication IE element + * @id: id + * @len: len + * @public_key_identifiers_cnt: public key identifiers count + * @realm_identifiers_cnt: realm identifiers count + * @is_ip_config_supported: whether ip config is supported in AP + * @is_cache_id_present: whether cache identifier is present + * @is_hessid_present: whether hessid is present + * @is_fils_sk_auth_supported: FILS shared key authentication is supported + * @is_fils_sk_auth_pfs_supported: FILS shared key auth with PFS is supported + * @is_pk_auth_supported: FILS public key authentication is supported + * @reserved: reserved + * @variable_data: pointer to data depends on initial variables + */ +struct fils_indication_ie { + uint8_t id; + uint8_t len; + uint16_t public_key_identifiers_cnt:3; + uint16_t realm_identifiers_cnt:3; + uint16_t is_ip_config_supported:1; + uint16_t is_cache_id_present:1; + uint16_t is_hessid_present:1; + uint16_t is_fils_sk_auth_supported:1; + uint16_t is_fils_sk_auth_pfs_supported:1; + uint16_t is_pk_auth_supported:1; + uint16_t reserved:4; + uint8_t variable_data[253]; +} qdf_packed; + +#define WLAN_VENDOR_HT_IE_OFFSET_LEN 4 + +/** + * struct wlan_vendor_ie_htcap: vendor private HT Capability IE + * @id: HT IE + * @hlen: HT IE len + * @oui: vendor OUI + * @oui_type: Oui type + * @ie: HT cap info + */ +struct wlan_vendor_ie_htcap { + uint8_t id; + uint8_t hlen; + uint8_t oui[3]; + uint8_t oui_type; + struct htcap_cmn_ie ie; +} qdf_packed; + +/** + * struct wlan_ie_htinfo_cmn: ht info comman + * @hi_ctrlchannel: control channel + * @hi_extchoff: B0-1 extension channel offset + * @hi_txchwidth: B2 recommended xmiss width set + * @hi_rifsmode: rifs mode + * @hi_ctrlaccess: controlled access only + * @hi_serviceinterval: B5-7 svc interval granularity + * @uhi_opmode: B0-1 operating mode + * @hi_nongfpresent: B2 non greenfield devices present + * @hi_txburstlimit: B3 transmit burst limit + * @hi_obssnonhtpresent: B4 OBSS non-HT STA present + * @hi_reserved0: B5-15 reserved + * @hi_reserved2: B0-5 reserved + * @hi_dualbeacon: B6 dual beacon + * @hi_dualctsprot: B7 dual CTS protection + * @hi_stbcbeacon: B8 STBC beacon + * @hi_lsigtxopprot: B9 l-sig txop protection full support + * @hi_pcoactive: B10 pco active + * @hi_pcophase: B11 pco phase + * @hi_reserved1: B12-15 reserved + * @hi_basicmcsset[16]: basic MCS set + */ +struct wlan_ie_htinfo_cmn { + uint8_t hi_ctrlchannel; + uint8_t hi_extchoff:2, + hi_txchwidth:1, + hi_rifsmode:1, + hi_ctrlaccess:1, + hi_serviceinterval:3; + uint16_t hi_opmode:2, + hi_nongfpresent:1, + hi_txburstlimit:1, + hi_obssnonhtpresent:1, + hi_reserved0:11; + uint16_t hi_reserved2:6, + hi_dualbeacon:1, + hi_dualctsprot:1, + hi_stbcbeacon:1, + hi_lsigtxopprot:1, + hi_pcoactive:1, + hi_pcophase:1, + hi_reserved1:4; + uint8_t hi_basicmcsset[16]; +} qdf_packed; + +/** + * struct wlan_ie_htinfo: HT info IE + * @hi_id: HT info IE + * @hi_len: HT info IE len + * @hi_ie: HT info info + */ +struct wlan_ie_htinfo { + uint8_t hi_id; + uint8_t hi_len; + struct wlan_ie_htinfo_cmn hi_ie; +} qdf_packed; + +/** + * struct wlan_ie_htinfo: vendor private HT info IE + * @hi_id: HT info IE + * @hi_len: HT info IE len + * @hi_oui: vendor OUI + * @hi_ouitype: Oui type + * @hi_ie: HT info info + */ +struct wlan_vendor_ie_htinfo { + uint8_t hi_id; + uint8_t hi_len; + uint8_t hi_oui[3]; + uint8_t hi_ouitype; + struct wlan_ie_htinfo_cmn hi_ie; +} qdf_packed; + +#define WLAN_VENDOR_VHTCAP_IE_OFFSET 7 +#define WLAN_VENDOR_VHTOP_IE_OFFSET 21 + +/** + * struct wlan_ie_vhtcaps - VHT capabilities + * @elem_id: VHT caps IE + * @elem_len: VHT caps IE len + * @max_mpdu_len: MPDU length + * @supported_channel_widthset: channel width set + * @ldpc_coding: LDPC coding capability + * @shortgi80: short GI 80 support + * @shortgi160and80plus80: short Gi 160 & 80+80 support + * @tx_stbc; Tx STBC cap + * @tx_stbc: Rx STBC cap + * @su_beam_former: SU beam former cap + * @su_beam_formee: SU beam formee cap + * @csnof_beamformer_antSup: Antenna support for beamforming + * @num_soundingdim: Sound dimensions + * @mu_beam_former: MU beam former cap + * @mu_beam_formee: MU beam formee cap + * @vht_txops: TXOP power save + * @htc_vhtcap: HTC VHT capability + * @max_ampdu_lenexp: AMPDU length + * @vht_link_adapt: VHT link adapatation capable + * @rx_antpattern: Rx Antenna pattern + * @tx_antpattern: Tx Antenna pattern + * @rx_mcs_map: RX MCS map + * @rx_high_sup_data_rate : highest RX supported data rate + * @tx_mcs_map: TX MCS map + * @tx_sup_data_rate: highest TX supported data rate + */ +struct wlan_ie_vhtcaps { + uint8_t elem_id; + uint8_t elem_len; + uint32_t max_mpdu_len:2; + uint32_t supported_channel_widthset:2; + uint32_t ldpc_coding:1; + uint32_t shortgi80:1; + uint32_t shortgi160and80plus80:1; + uint32_t tx_stbc:1; + uint32_t rx_stbc:3; + uint32_t su_beam_former:1; + uint32_t su_beam_formee:1; + uint32_t csnof_beamformer_antSup:3; + uint32_t num_soundingdim:3; + uint32_t mu_beam_former:1; + uint32_t mu_beam_formee:1; + uint32_t vht_txops:1; + uint32_t htc_vhtcap:1; + uint32_t max_ampdu_lenexp:3; + uint32_t vht_link_adapt:2; + uint32_t rx_antpattern:1; + uint32_t tx_antpattern:1; + uint32_t unused:2; + uint16_t rx_mcs_map; + uint16_t rx_high_sup_data_rate:13; + uint16_t reserved2:3; + uint16_t tx_mcs_map; + uint16_t tx_sup_data_rate:13; + uint16_t reserved3:3; +} qdf_packed; + +/** + * struct wlan_ie_vhtop: VHT op IE + * @elem_id: VHT op IE + * @elem_len: VHT op IE len + * @vht_op_chwidth: BSS Operational Channel width + * @vht_op_ch_freq_seg1: Channel Center frequency + * @vht_op_ch_freq_seg2: Channel Center frequency for 80+80MHz + * @vhtop_basic_mcs_set: Basic MCS set + */ +struct wlan_ie_vhtop { + uint8_t elem_id; + uint8_t elem_len; + uint8_t vht_op_chwidth; + uint8_t vht_op_ch_freq_seg1; + uint8_t vht_op_ch_freq_seg2; + uint16_t vhtop_basic_mcs_set; +} qdf_packed; + +/** + * struct wlan_country_ie: country IE + * @ie: country IE + * @len: IE len + * @cc: country code + */ +struct wlan_country_ie { + uint8_t ie; + uint8_t len; + uint8_t cc[3]; +} qdf_packed; + +/** + * struct wlan_country_ie: country IE + * @ie: QBSS IE + * @len: IE len + * @station_count: number of station associated + * @qbss_chan_load: qbss channel load + * @qbss_load_avail: qbss_load_avail + */ +struct qbss_load_ie { + uint8_t ie; + uint8_t len; + uint16_t station_count; + uint8_t qbss_chan_load; + uint16_t qbss_load_avail; +} qdf_packed; + +/** + * struct wlan_bcn_frame: beacon frame fixed params + * @timestamp: the value of sender's TSFTIMER + * @beacon_interval: beacon interval + * @capability: capability + * @ie: variable IE + */ +struct wlan_bcn_frame { + uint8_t timestamp[8]; + uint16_t beacon_interval; + union wlan_capability capability; + struct ie_header ie; +} qdf_packed; + +#define WLAN_TIM_IE_MIN_LENGTH 4 + +/** + * struct wlan_tim_ie: tim IE + * @tim_ie: Time IE + * @tim_len: TIM IE len + * @tim_count: dtim count + * @tim_period: dtim period + * @tim_bitctl: bitmap control + * @tim_bitmap: variable length bitmap + */ +struct wlan_tim_ie { + uint8_t tim_ie; /* WLAN_ELEMID_TIM */ + uint8_t tim_len; + uint8_t tim_count; /* DTIM count */ + uint8_t tim_period; /* DTIM period */ + uint8_t tim_bitctl; /* bitmap control */ + uint8_t tim_bitmap[251]; /* variable-length bitmap */ +} qdf_packed; + +/** + * struct rsn_mdie: mobility domain IE + * @rsn_id: RSN IE id + * @rsn_len: RSN IE len + * @mobility_domain: mobility domain info + * @ft_capab: ft capability + * + * Reference 9.4.2.47 Mobility Domain element (MDE) of 802.11-2016 + */ +struct rsn_mdie { + uint8_t rsn_id; + uint8_t rsn_len; + uint8_t mobility_domain[2]; + uint8_t ft_capab; +} qdf_packed; + +/** + * struct srp_ie: Spatial reuse parameter IE + * @srp_id: SRP IE id + * @srp_len: SRP IE len + * @srp_id_extn: SRP Extension ID + * @sr_control: sr control + * @non_srg_obsspd_max_offset: non srg obsspd max offset + * @srg_obss_pd_min_offset: srg obss pd min offset + * @srg_obss_pd_max_offset: srg obss pd max offset + * @srg_bss_color_bitmap: srg bss color bitmap + * @srg_partial_bssid_bitmap: srg partial bssid bitmap + */ +struct wlan_srp_ie { + uint8_t srp_id; + uint8_t srp_len; + uint8_t srp_id_extn; + uint8_t sr_control; + union { + struct { + uint8_t non_srg_obsspd_max_offset; + uint8_t srg_obss_pd_min_offset; + uint8_t srg_obss_pd_max_offset; + uint8_t srg_bss_color_bitmap[8]; + uint8_t srg_partial_bssid_bitmap[8]; + } qdf_packed nonsrg_srg_info; + struct { + uint8_t non_srg_obsspd_max_offset; + } qdf_packed nonsrg_info; + struct { + uint8_t srg_obss_pd_min_offset; + uint8_t srg_obss_pd_max_offset; + uint8_t srg_bss_color_bitmap[8]; + uint8_t srg_partial_bssid_bitmap[8]; + } qdf_packed srg_info; + }; +} qdf_packed; + +#define ESP_INFORMATION_LIST_LENGTH 3 +#define MAX_ESP_INFORMATION_FIELD 4 +/* + * enum access_category: tells about access category in ESP paramameter + * @ESP_AC_BK: ESP access category for background + * @ESP_AC_BE: ESP access category for best effort + * @ESP_AC_VI: ESP access category for video + * @ESP_AC_VO: ESP access category for Voice + */ +enum access_category { + ESP_AC_BK, + ESP_AC_BE, + ESP_AC_VI, + ESP_AC_VO, + +}; +/* + * struct wlan_esp_info: structure for Esp information parameter + * @access_category: access category info + * @reserved: reserved + * @data_format: two bits in length and tells about data format + * i.e. 0 = No aggregation is expected to be performed for MSDUs or MPDUs with + * the Type subfield equal to Data for the corresponding AC + * 1 = A-MSDU aggregation is expected to be performed for MSDUs for the + * corresponding AC, but A-MPDU aggregation is not expected to be performed + * for MPDUs with the Type subfield equal to Data for the corresponding AC + * 2 = A-MPDU aggregation is expected to be performed for MPDUs with the Type + * subfield equal to Data for the corresponding AC, but A-MSDU aggregation is + * not expected to be performed for MSDUs for the corresponding AC + * 3 = A-MSDU aggregation is expected to be performed for MSDUs for the + * corresponding AC and A-MPDU aggregation is expected to be performed for + * MPDUs with the Type subfield equal to Data for the corresponding AC + * @ba_window_size: BA Window Size subfield is three bits in length and + * indicates the size of the Block Ack window that is + * expected for the corresponding access category + * @estimated_air_fraction: Estimated Air Time Fraction subfield is 8 bits in + * length and contains an unsigned integer that represents + * the predicted percentage of time, linearly scaled with 255 representing + * 100%, that a new STA joining the + * BSS will be allocated for PPDUs that contain only + * MPDUs with the Type + * subfield equal to Data of the + * corresponding access category for that STA. + * @ppdu_duration: Data PPDU Duration Target field + * is 8 bits in length and is + * an unsigned integer that indicates the + * expected target duration of PPDUs that contain only MPDUs with the Type + * subfield equal to Data for the + * corresponding access category in units of 50 μs + */ +struct wlan_esp_info { + uint8_t access_category:2; + uint8_t reserved:1; + uint8_t data_format:2; + uint8_t ba_window_size:3; + uint8_t estimated_air_fraction; + uint8_t ppdu_duration; +}; + +/** + * struct wlan_esp_ie: struct for ESP information + * @esp_id: ESP IE id + * @esp_len: ESP IE len + * @esp_id_extn: ESP Extension ID + * @esp_info_AC_BK: ESP information related to BK category + * @esp_info_AC_BE: ESP information related to BE category + * @esp_info_AC_VI: ESP information related to VI category + * @esp_info_AC_VO: ESP information related to VO category + */ +struct wlan_esp_ie { + uint8_t esp_id; + uint8_t esp_len; + uint8_t esp_id_extn; + struct wlan_esp_info esp_info_AC_BK; + struct wlan_esp_info esp_info_AC_BE; + struct wlan_esp_info esp_info_AC_VI; + struct wlan_esp_info esp_info_AC_VO; +} qdf_packed; + +/** + * struct oce_reduced_wan_metrics: struct for oce wan metrics + * @downlink_av_cap: Download available capacity + * @uplink_av_cap: Upload available capacity + */ +struct oce_reduced_wan_metrics { + uint8_t downlink_av_cap:4; + uint8_t uplink_av_cap:4; +}; + +/** + * is_wpa_oui() - If vendor IE is WPA type + * @frm: vendor IE pointer + * + * API to check if vendor IE is WPA + * + * Return: true if its WPA IE + */ +static inline bool +is_wpa_oui(uint8_t *frm) +{ + return (frm[1] > 3) && (LE_READ_4(frm + 2) == + ((WLAN_WPA_OUI_TYPE << 24) | WLAN_WPA_OUI)); +} + +/** + * is_wps_oui() - If vendor IE is WPS type + * @frm: vendor IE pointer + * + * API to check if vendor IE is WPS + * + * Return: true if its WPS IE + */ +static inline bool +is_wps_oui(const uint8_t *frm) +{ + return frm[1] > 3 && BE_READ_4(frm + 2) == WSC_OUI; +} + +/** + * is_mbo_oce_oui() - If vendor IE is MBO/OCE type + * @frm: vendor IE pointer + * + * API to check if vendor IE is MBO/OCE + * + * Return: true if its MBO/OCE IE + */ +static inline bool +is_mbo_oce_oui(const uint8_t *frm) +{ + return frm[1] > 3 && BE_READ_4(frm + 2) == MBO_OCE_OUI; +} + +/** + * is_wcn_oui() - If vendor IE is WCN type + * @frm: vendor IE pointer + * + * API to check if vendor IE is WCN + * + * Return: true if its WCN IE + */ +static inline bool +is_wcn_oui(uint8_t *frm) +{ + return (frm[1] > 4) && (LE_READ_4(frm + 2) == + ((WCN_OUI_TYPE << 24) | WCN_OUI)); +} + +/** + * is_wme_param() - If vendor IE is WME param type + * @frm: vendor IE pointer + * + * API to check if vendor IE is WME param + * + * Return: true if its WME param IE + */ +static inline bool +is_wme_param(const uint8_t *frm) +{ + return (frm[1] > 5) && (LE_READ_4(frm + 2) == + ((WME_OUI_TYPE << 24) | WME_OUI)) && + (frm[6] == WME_PARAM_OUI_SUBTYPE); +} + +/** + * is_wme_info() - If vendor IE is WME info type + * @frm: vendor IE pointer + * + * API to check if vendor IE is WME info + * + * Return: true if its WME info IE + */ +static inline bool +is_wme_info(const uint8_t *frm) +{ + return (frm[1] > 5) && (LE_READ_4(frm + 2) == + ((WME_OUI_TYPE << 24) | WME_OUI)) && + (frm[6] == WME_INFO_OUI_SUBTYPE); +} + +/** + * is_atheros_oui() - If vendor IE is Atheros type + * @frm: vendor IE pointer + * + * API to check if vendor IE is Atheros + * + * Return: true if its Atheros IE + */ +static inline bool +is_atheros_oui(const uint8_t *frm) +{ + return (frm[1] > 3) && LE_READ_4(frm + 2) == + ((ATH_OUI_TYPE << 24) | ATH_OUI); +} + +/** + * is_atheros_extcap_oui() - If vendor IE is Atheros ext cap + * @frm: vendor IE pointer + * + * API to check if vendor IE is Atheros ext cap + * + * Return: true if its Atheros ext cap IE + */ +static inline int +is_atheros_extcap_oui(uint8_t *frm) +{ + return (frm[1] > 3) && (LE_READ_4(frm + 2) == + ((ATH_OUI_EXTCAP_TYPE << 24) | ATH_OUI)); +} + +/** + * is_sfa_oui() - If vendor IE is SFA type + * @frm: vendor IE pointer + * + * API to check if vendor IE is SFA + * + * Return: true if its SFA IE + */ +static inline bool +is_sfa_oui(uint8_t *frm) +{ + return (frm[1] > 4) && (LE_READ_4(frm + 2) == + ((SFA_OUI_TYPE << 24) | SFA_OUI)); +} + +/** + * is_p2p_oui() - If vendor IE is P2P type + * @frm: vendor IE pointer + * + * API to check if vendor IE is P2P + * + * Return: true if its P2P IE + */ +static inline bool +is_p2p_oui(const uint8_t *frm) +{ + const uint8_t wfa_oui[3] = P2P_WFA_OUI; + + return (frm[1] >= 4) && + (frm[2] == wfa_oui[0]) && + (frm[3] == wfa_oui[1]) && + (frm[4] == wfa_oui[2]) && + (frm[5] == P2P_WFA_VER); +} + +/** + * is_qca_son_oui() - If vendor IE is QCA WHC type + * @frm: vendor IE pointer + * @whc_subtype: subtype + * + * API to check if vendor IE is QCA WHC + * + * Return: true if its QCA WHC IE + */ +static inline bool +is_qca_son_oui(uint8_t *frm, uint8_t whc_subtype) +{ + return (frm[1] > 4) && (LE_READ_4(frm + 2) == + ((QCA_OUI_WHC_TYPE << 24) | QCA_OUI)) && + (*(frm + 6) == whc_subtype); +} + +/** + * is_ht_cap() - If vendor IE is vendor HT cap type + * @frm: vendor IE pointer + * + * API to check if vendor IE is vendor HT cap + * + * Return: true if its vendor HT cap IE + */ +static inline bool +is_ht_cap(uint8_t *frm) +{ + return (frm[1] > 3) && (BE_READ_4(frm + 2) == + ((VENDOR_HT_OUI << 8) | VENDOR_HT_CAP_ID)); +} + +/** + * is_ht_info() - If vendor IE is vendor HT info type + * @frm: vendor IE pointer + * + * API to check if vendor IE is vendor HT info + * + * Return: true if its vendor HT info IE + */ +static inline bool +is_ht_info(uint8_t *frm) +{ + return (frm[1] > 3) && (BE_READ_4(frm + 2) == + ((VENDOR_HT_OUI << 8) | VENDOR_HT_INFO_ID)); +} + +/** + * is_interop_vht() - If vendor IE is VHT interop + * @frm: vendor IE pointer + * + * API to check if vendor IE is VHT interop + * + * Return: true if its VHT interop IE + */ +static inline bool +is_interop_vht(uint8_t *frm) +{ + return (frm[1] > 12) && (BE_READ_4(frm + 2) == + ((VHT_INTEROP_OUI << 8) | VHT_INTEROP_TYPE)) && + ((*(frm + 6) == VHT_INTEROP_OUI_SUBTYPE) || + (*(frm + 6) == VHT_INTEROP_OUI_SUBTYPE_VENDORSPEC)); +} + +/** + * is_bwnss_oui() - If vendor IE is BW NSS type + * @frm: vendor IE pointer + * + * API to check if vendor IE is BW NSS + * + * Return: true if its BW NSS IE + */ +static inline bool +is_bwnss_oui(uint8_t *frm) +{ + return (frm[1] > 3) && (LE_READ_4(frm + 2) == + ((ATH_OUI_BW_NSS_MAP_TYPE << 24) | ATH_OUI)); +} + +/** + * is_he_cap_oui() - If vendor IE is HE CAP OUI + * @frm: vendor IE pointer + * + * API to check if vendor IE is HE CAP + * + * Return: true if its HE CAP IE + */ +static inline bool +is_he_cap_oui(uint8_t *frm) +{ + return (frm[1] > 4) && (LE_READ_4(frm + 2) == + ((ATH_HE_CAP_SUBTYPE << 24) | ATH_HE_OUI)); +} + +/** + * is_he_op_oui() - If vendor IE is HE OP OUI + * @frm: vendor IE pointer + * + * API to check if vendor IE is HE OP OUT + * + * Return: true if its HE OP OUI + */ +static inline bool +is_he_op_oui(uint8_t *frm) +{ + return (frm[1] > 4) && (LE_READ_4(frm + 2) == + ((ATH_HE_OP_SUBTYPE << 24) | ATH_HE_OUI)); +} + +/** + * wlan_parse_rsn_ie() - parse rsn ie + * @rsn_ie: rsn ie ptr + * @rsn: out structure for the parsed ie + * + * API, function to parse rsn ie, if optional fields are not present use the + * default values defined by standard. + * + * Return: QDF_STATUS + */ +static inline QDF_STATUS wlan_parse_rsn_ie(uint8_t *rsn_ie, + struct wlan_rsn_ie *rsn) +{ + uint8_t rsn_ie_len, i; + uint8_t *ie; + int rem_len; + const struct wlan_rsn_ie_hdr *hdr; + + if (!rsn_ie) + return QDF_STATUS_E_NULL_VALUE; + + ie = rsn_ie; + rsn_ie_len = ie[1] + 2; + + /* + * Check the length once for fixed parts: + * element id, len and version. Other, variable-length data, + * must be checked separately. + */ + if (rsn_ie_len < sizeof(struct wlan_rsn_ie_hdr)) + return QDF_STATUS_E_INVAL; + + hdr = (struct wlan_rsn_ie_hdr *) rsn_ie; + + if (hdr->elem_id != WLAN_ELEMID_RSN || + LE_READ_2(hdr->version) != RSN_VERSION) + return QDF_STATUS_E_INVAL; + + /* Set default values for optional field. */ + rsn->gp_cipher_suite = WLAN_RSN_SEL(WLAN_CSE_CCMP); + rsn->pwise_cipher_count = 1; + rsn->pwise_cipher_suites[0] = WLAN_RSN_SEL(WLAN_CSE_CCMP); + rsn->akm_suite_count = 1; + rsn->akm_suites[0] = WLAN_RSN_SEL(WLAN_AKM_IEEE8021X); + + rsn->ver = LE_READ_2(hdr->version); + + ie = (uint8_t *) (hdr + 1); + rem_len = rsn_ie_len - sizeof(*hdr); + + /* Check if optional group cipher is present */ + if (rem_len >= WLAN_RSN_SELECTOR_LEN) { + rsn->gp_cipher_suite = LE_READ_4(ie); + ie += WLAN_RSN_SELECTOR_LEN; + rem_len -= WLAN_RSN_SELECTOR_LEN; + } else if (rem_len > 0) { + /* RSN IE is invalid as group cipher is of invalid length */ + return QDF_STATUS_E_INVAL; + } + + /* Check if optional pairwise cipher is present */ + if (rem_len >= 2) { + rsn->pwise_cipher_count = LE_READ_2(ie); + ie += 2; + rem_len -= 2; + if (rsn->pwise_cipher_count == 0 || + rsn->pwise_cipher_count > WLAN_MAX_CIPHER || + rsn->pwise_cipher_count > rem_len / WLAN_RSN_SELECTOR_LEN) + return QDF_STATUS_E_INVAL; + for (i = 0; i < rsn->pwise_cipher_count; i++) { + rsn->pwise_cipher_suites[i] = LE_READ_4(ie); + ie += WLAN_RSN_SELECTOR_LEN; + rem_len -= WLAN_RSN_SELECTOR_LEN; + } + } else if (rem_len == 1) { + /* RSN IE is invalid as pairwise cipher is of invalid length */ + return QDF_STATUS_E_INVAL; + } + + /* Check if optional akm suite is present */ + if (rem_len >= 2) { + rsn->akm_suite_count = LE_READ_2(ie); + ie += 2; + rem_len -= 2; + if (rsn->akm_suite_count == 0 || + rsn->akm_suite_count > WLAN_MAX_CIPHER || + rsn->akm_suite_count > rem_len / WLAN_RSN_SELECTOR_LEN) + return QDF_STATUS_E_INVAL; + for (i = 0; i < rsn->akm_suite_count; i++) { + rsn->akm_suites[i] = LE_READ_4(ie); + ie += WLAN_RSN_SELECTOR_LEN; + rem_len -= WLAN_RSN_SELECTOR_LEN; + } + } else if (rem_len == 1) { + /* RSN IE is invalid as akm suite is of invalid length */ + return QDF_STATUS_E_INVAL; + } + + /* Update capabilty if present */ + if (rem_len >= 2) { + rsn->cap = LE_READ_2(ie); + ie += 2; + rem_len -= 2; + } else if (rem_len == 1) { + /* RSN IE is invalid as cap field is truncated */ + return QDF_STATUS_E_INVAL; + } + + /* Update PMKID if present */ + if (rem_len >= 2) { + rsn->pmkid_count = LE_READ_2(ie); + ie += 2; + rem_len -= 2; + if (rsn->pmkid_count > (unsigned int) rem_len / PMKID_LEN) { + rsn->pmkid_count = 0; + return QDF_STATUS_E_INVAL; + } + + qdf_mem_copy(rsn->pmkid, ie, + rsn->pmkid_count * PMKID_LEN); + ie += rsn->pmkid_count * PMKID_LEN; + rem_len -= rsn->pmkid_count * PMKID_LEN; + } else if (rem_len == 1) { + /* RSN IE is invalid as pmkid count field is truncated */ + return QDF_STATUS_E_INVAL; + } + + /* Update mgmt cipher if present */ + if (rem_len >= WLAN_RSN_SELECTOR_LEN) { + rsn->mgmt_cipher_suite = LE_READ_4(ie); + ie += WLAN_RSN_SELECTOR_LEN; + rem_len -= WLAN_RSN_SELECTOR_LEN; + } else if (rem_len > 0) { + /* RSN IE is invalid as mgmt cipher is truncated */ + return QDF_STATUS_E_INVAL; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * wlan_parse_wpa_ie() - parse wpa ie + * @wpa_ie: wpa ie ptr + * @wpa: out structure for the parsed ie + * + * API, function to parse wpa ie, if optional fields are not present use the + * default values defined by standard. + * + * Return: QDF_STATUS + */ +static inline QDF_STATUS wlan_parse_wpa_ie(uint8_t *wpa_ie, + struct wlan_wpa_ie *wpa) +{ + uint8_t wpa_ie_len, i; + uint8_t *ie; + int rem_len; + struct wlan_wpa_ie_hdr *hdr; + + if (!wpa_ie) + return QDF_STATUS_E_NULL_VALUE; + + ie = wpa_ie; + wpa_ie_len = ie[1] + 2; + + /* + * Check the length once for fixed parts: + * element id, len, oui and version. Other, variable-length data, + * must be checked separately. + */ + if (wpa_ie_len < sizeof(struct wlan_wpa_ie_hdr)) + return QDF_STATUS_E_INVAL; + + hdr = (struct wlan_wpa_ie_hdr *) wpa_ie; + + if (hdr->elem_id != WLAN_ELEMID_VENDOR || + !is_wpa_oui(wpa_ie) || + LE_READ_2(hdr->version) != WPA_VERSION) + return QDF_STATUS_E_INVAL; + + /* Set default values for optional field. */ + wpa->mc_cipher = WLAN_WPA_SEL(WLAN_CSE_TKIP); + wpa->uc_cipher_count = 1; + wpa->uc_ciphers[0] = WLAN_WPA_SEL(WLAN_CSE_TKIP); + wpa->auth_suite_count = 1; + wpa->auth_suites[0] = WLAN_WPA_SEL(WLAN_ASE_8021X_UNSPEC); + + wpa->ver = LE_READ_2(hdr->version); + ie = (uint8_t *) (hdr + 1); + rem_len = wpa_ie_len - sizeof(*hdr); + + /* Check if optional group cipher is present */ + if (rem_len >= WLAN_WPA_SELECTOR_LEN) { + wpa->mc_cipher = LE_READ_4(ie); + ie += WLAN_WPA_SELECTOR_LEN; + rem_len -= WLAN_WPA_SELECTOR_LEN; + } else if (rem_len > 0) { + /* WPA IE is invalid as group cipher is of invalid length */ + return QDF_STATUS_E_INVAL; + } + + /* Check if optional pairwise cipher is present */ + if (rem_len >= 2) { + wpa->uc_cipher_count = LE_READ_2(ie); + ie += 2; + rem_len -= 2; + if (wpa->uc_cipher_count == 0 || + wpa->uc_cipher_count > WLAN_MAX_CIPHER || + wpa->uc_cipher_count > rem_len / WLAN_WPA_SELECTOR_LEN) + return QDF_STATUS_E_INVAL; + for (i = 0; i < wpa->uc_cipher_count; i++) { + wpa->uc_ciphers[i] = LE_READ_4(ie); + ie += WLAN_WPA_SELECTOR_LEN; + rem_len -= WLAN_WPA_SELECTOR_LEN; + } + } else if (rem_len == 1) { + /* WPA IE is invalid as pairwise cipher is of invalid length */ + return QDF_STATUS_E_INVAL; + } + + /* Check if optional akm suite is present */ + if (rem_len >= 2) { + wpa->auth_suite_count = LE_READ_2(ie); + ie += 2; + rem_len -= 2; + if (wpa->auth_suite_count == 0 || + wpa->auth_suite_count > WLAN_MAX_CIPHER || + wpa->auth_suite_count > rem_len / WLAN_WPA_SELECTOR_LEN) + return QDF_STATUS_E_INVAL; + for (i = 0; i < wpa->auth_suite_count; i++) { + wpa->auth_suites[i] = LE_READ_4(ie); + ie += WLAN_WPA_SELECTOR_LEN; + rem_len -= WLAN_WPA_SELECTOR_LEN; + } + } else if (rem_len == 1) { + /* WPA IE is invalid as akm suite is of invalid length */ + return QDF_STATUS_E_INVAL; + } + + /* Update capabilty if optional capabilty is present */ + if (rem_len >= 2) { + wpa->cap = LE_READ_2(ie); + ie += 2; + rem_len -= 2; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * wlan_parse_wapi_ie() - parse wapi ie + * @wapi_ie: wpa ie ptr + * @wapi: out structure for the parsed IE + * + * API, function to parse wapi ie + * + * Return: void + */ +static inline void wlan_parse_wapi_ie(uint8_t *wapi_ie, + struct wlan_wapi_ie *wapi) +{ + uint8_t len, i; + uint8_t *ie; + + if (!wapi_ie) + return; + + ie = wapi_ie; + len = ie[1]; + /* + * Check the length once for fixed parts: OUI, type, + * version, mcast cipher, and 2 selector counts. + * Other, variable-length data, must be checked separately. + */ + if (len < 20) + return; + + ie += 2; + + wapi->ver = LE_READ_2(ie); + if (wapi->ver != WAPI_VERSION) + return; + + ie += 2; + len -= 2; + + /* akm */ + wapi->akm_suite_count = LE_READ_2(ie); + + ie += 2; + len -= 2; + + if ((wapi->akm_suite_count > WLAN_MAX_CIPHER) || + len < (wapi->akm_suite_count * WLAN_OUI_SIZE)) + return; + for (i = 0 ; i < wapi->akm_suite_count; i++) { + wapi->akm_suites[i] = LE_READ_4(ie); + ie += WLAN_OUI_SIZE; + len -= WLAN_OUI_SIZE; + } + + wapi->uc_cipher_count = LE_READ_2(ie); + ie += 2; + len -= 2; + if ((wapi->uc_cipher_count > WLAN_MAX_CIPHER) || + len < (wapi->uc_cipher_count * WLAN_OUI_SIZE + 2)) + return; + for (i = 0 ; i < wapi->uc_cipher_count; i++) { + wapi->uc_cipher_suites[i] = LE_READ_4(ie); + ie += WLAN_OUI_SIZE; + len -= WLAN_OUI_SIZE; + } + + if (len >= WLAN_OUI_SIZE) + wapi->mc_cipher_suite = LE_READ_4(ie); +} + +/** + * wlan_parse_oce_reduced_wan_metrics_ie() - parse oce wan metrics + * @mbo_oce_ie: MBO/OCE ie ptr + * @wan_metrics: out structure for the reduced wan metric + * + * API, function to parse reduced wan metric + * + * Return: true if oce wan metrics is present + */ +static inline bool +wlan_parse_oce_reduced_wan_metrics_ie(uint8_t *mbo_oce_ie, + struct oce_reduced_wan_metrics *wan_metrics) +{ + uint8_t len, attribute_len, attribute_id; + uint8_t *ie; + + if (!mbo_oce_ie) + return false; + + ie = mbo_oce_ie; + len = ie[1]; + ie += 2; + + if (len <= MBO_OCE_OUI_SIZE) + return false; + + ie += MBO_OCE_OUI_SIZE; + len -= MBO_OCE_OUI_SIZE; + + while (len > 2) { + attribute_id = ie[0]; + attribute_len = ie[1]; + len -= 2; + if (attribute_len > len) + return false; + + if (attribute_id == REDUCED_WAN_METRICS_ATTR) { + wan_metrics->downlink_av_cap = ie[2] & 0xff; + wan_metrics->uplink_av_cap = ie[2] >> 4; + return true; + } + + ie += (attribute_len + 2); + len -= attribute_len; + } + + return false; +} + +#endif /* _WLAN_CMN_IEEE80211_DEFS_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/inc/wlan_crypto_fils_api.h b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/inc/wlan_crypto_fils_api.h new file mode 100644 index 0000000000000000000000000000000000000000..ae402422b0177c610abfe76765edffa77d0f878b --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/inc/wlan_crypto_fils_api.h @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _WLAN_CRYPTO_FILS_API_H_ +#define _WLAN_CRYPTO_FILS_API_H_ + +/** + * fils_register() - Register all callback functions to Crypto manager + * + * This function is invoked from crypto object manager to register + * FILS specific callbacks. + * + * Return: Pointer to wlan_crypto_cipher Object + */ +const struct wlan_crypto_cipher *fils_register(void); + +#ifdef WLAN_SUPPORT_FILS +/** + * wlan_crypto_fils_delkey - Delete fils aad key + * @peer: Peer object + * + * This function delete the peer specific FILS AAD key data. + * + * Return: None + */ +void wlan_crypto_fils_delkey(struct wlan_objmgr_peer *peer); +#endif /* WLAN_SUPPORT_FILS */ + +#endif /* End of _WLAN_CRYPTO_FILS_API_H_ */ + diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/inc/wlan_crypto_fils_def.h b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/inc/wlan_crypto_fils_def.h new file mode 100644 index 0000000000000000000000000000000000000000..72be42236578e2738570ed8d85434db0c89021b0 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/inc/wlan_crypto_fils_def.h @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _WLAN_CRYPTO_FILS_DEF_H_ +#define _WLAN_CRYPTO_FILS_DEF_H_ + +/* Element ID Extension (EID 255) values */ +#define WLAN_ELEMID_EXT_ASSOC_DELAY_INFO (1) +#define WLAN_ELEMID_EXT_FILS_REQ_PARAMS (2) +#define WLAN_ELEMID_EXT_FILS_KEY_CONFIRM (3) +#define WLAN_ELEMID_EXT_FILS_SESSION (4) +#define WLAN_ELEMID_EXT_FILS_HLP_CONTAINER (5) +#define WLAN_ELEMID_EXT_FILS_IP_ADDR_ASSIGN (6) +#define WLAN_ELEMID_EXT_KEY_DELIVERY (7) +#define WLAN_ELEMID_EXT_FILS_WRAPPED_DATA (8) +#define WLAN_ELEMID_EXT_FILS_PUBLIC_KEY (12) +#define WLAN_ELEMID_EXT_FILS_NONCE (13) + +#define WLAN_MAX_WPA_KEK_LEN (64) +#define WLAN_FILS_NONCE_LEN (16) + +/* FILS AAD Crypto key data */ +struct wlan_crypto_fils_aad_key { + /* FILS aad ANounce */ + uint8_t a_nonce[WLAN_FILS_NONCE_LEN]; + /* FILS aad SNounce */ + uint8_t s_nonce[WLAN_FILS_NONCE_LEN]; + /* FILS aad kek */ + uint8_t kek[WLAN_MAX_WPA_KEK_LEN]; + /* FILS aad kek length */ + uint32_t kek_len; +}; +#endif /* end of _WLAN_CRYPTO_FILS_DEF_H_ */ + diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/inc/wlan_crypto_global_api.h b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/inc/wlan_crypto_global_api.h new file mode 100644 index 0000000000000000000000000000000000000000..c154e534e88c296ecd59c957cd0d8bd0daf38bd8 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/inc/wlan_crypto_global_api.h @@ -0,0 +1,591 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + + /** + * DOC: Public APIs for crypto service + */ +#ifndef _WLAN_CRYPTO_GLOBAL_API_H_ +#define _WLAN_CRYPTO_GLOBAL_API_H_ + + +/** + * wlan_crypto_set_vdev_param - called by ucfg to set crypto param + * @vdev: vdev + * @param: param to be set. + * @value: value + * + * This function gets called from ucfg to set param + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_crypto_set_vdev_param(struct wlan_objmgr_vdev *vdev, + wlan_crypto_param_type param, + uint32_t value); + +/** + * wlan_crypto_set_peer_param - called by ucfg to set crypto param + * + * @peer: peer + * @param: param to be set. + * @value: value + * + * This function gets called from ucfg to set param + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_crypto_set_peer_param(struct wlan_objmgr_peer *peer, + wlan_crypto_param_type param, + uint32_t value); + +/** + * wlan_crypto_get_param - called by ucfg to get crypto param + * @vdev: vdev + * @param: param to be get. + * + * This function gets called from ucfg to get param + * + * Return: value or -1 for failure + */ +int32_t wlan_crypto_get_param(struct wlan_objmgr_vdev *vdev, + wlan_crypto_param_type param); +/** + * wlan_crypto_get_peer_param - called by ucfg to get crypto peer param + * @peer: peer + * @param: param to be get. + * + * This function gets called from ucfg to get peer param + * + * Return: value or -1 for failure + */ +int32_t wlan_crypto_get_peer_param(struct wlan_objmgr_peer *peer, + wlan_crypto_param_type param); + +/** + * wlan_crypto_is_htallowed - called by ucfg to check if HT rates is allowed + * @vdev: Vdev + * @peer: Peer + * + * This function is called to check if HT rates is allowed + * + * Return: 0 for not allowed and +ve for allowed + */ +uint8_t wlan_crypto_is_htallowed(struct wlan_objmgr_vdev *vdev, + struct wlan_objmgr_peer *peer); +/** + * wlan_crypto_setkey - called by ucfg to setkey + * @vdev: vdev + * @req_key: req_key with cipher type, key macaddress + * + * This function gets called from ucfg to sey key + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_crypto_setkey(struct wlan_objmgr_vdev *vdev, + struct wlan_crypto_req_key *req_key); + +/** + * wlan_crypto_getkey - called by ucfg to get key + * @vdev: vdev + * @req_key: key value will be copied in this req_key + * @mac_address: mac address of the peer for unicast key + * or broadcast address if group key is requested. + * + * This function gets called from ucfg to get key + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_crypto_getkey(struct wlan_objmgr_vdev *vdev, + struct wlan_crypto_req_key *req_key, + uint8_t *mac_addr); + +/** + * wlan_crypto_delkey - called by ucfg to delete key + * @vdev: vdev + * @mac_address: mac address of the peer for unicast key + * or broadcast address if group key is deleted. + * @key_idx: key index to be deleted + * + * This function gets called from ucfg to delete key + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_crypto_delkey(struct wlan_objmgr_vdev *vdev, + uint8_t *macaddr, + uint8_t key_idx); + +/** + * wlan_crypto_default_key - called by ucfg to set default tx key + * @vdev: vdev + * @mac_address: mac address of the peer for unicast key + * or broadcast address if group key need to made default. + * @key_idx: key index to be made as default key + * @unicast: is key was unicast or group key. + * + * This function gets called from ucfg to set default key + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_crypto_default_key(struct wlan_objmgr_vdev *vdev, + uint8_t *macaddr, + uint8_t key_idx, + bool unicast); + +/** + * wlan_crypto_encap - called by mgmt for encap the frame based on cipher + * @vdev: vdev + * @wbuf: wbuf + * @macaddr: macaddr + * @encapdone: is encapdone already or not. + * + * This function gets called from mgmt txrx to encap frame. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_crypto_encap(struct wlan_objmgr_vdev *vdev, + qdf_nbuf_t wbuf, + uint8_t *macaddr, + uint8_t encapdone); + +/** + * wlan_crypto_decap - called by mgmt for decap the frame based on cipher + * @vdev: vdev + * @wbuf: wbuf + * @macaddr: macaddr + * @tid: tid of the packet. + * + * This function gets called from mgmt txrx to decap frame. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_crypto_decap(struct wlan_objmgr_vdev *vdev, + qdf_nbuf_t wbuf, + uint8_t *macaddr, + uint8_t tid); + +/** + * wlan_crypto_enmic - called by mgmt for adding mic in frame based on cipher + * @vdev: vdev + * @wbuf: wbuf + * @macaddr: macaddr + * @encapdone: is encapdone already or not. + * + * This function gets called from mgmt txrx to adding mic to the frame. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_crypto_enmic(struct wlan_objmgr_vdev *vdev, + qdf_nbuf_t wbuf, + uint8_t *macaddr, + uint8_t encapdone); + +/** + * wlan_crypto_demic - called by mgmt for remove and check mic for + * the frame based on cipher + * @vdev: vdev + * @wbuf: wbuf + * @macaddr: macaddr + * @tid: tid of the frame + * @keyid: keyid in the received frame + * + * This function gets called from mgmt txrx to decap frame. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_crypto_demic(struct wlan_objmgr_vdev *vdev, + qdf_nbuf_t wbuf, + uint8_t *macaddr, + uint8_t tid, + uint8_t keyid); + +/** + * wlan_crypto_vdev_is_pmf_enabled - called to check is pmf enabled in vdev + * @vdev: vdev + * + * This function gets called to check is pmf enabled or not in vdev. + * + * Return: true or false + */ +bool wlan_crypto_vdev_is_pmf_enabled(struct wlan_objmgr_vdev *vdev); + +/** + * wlan_crypto_is_pmf_enabled - called by mgmt txrx to check is pmf enabled + * @vdev: vdev + * @peer: peer + * + * This function gets called by mgmt txrx to check is pmf enabled or not. + * + * Return: true or false + */ +bool wlan_crypto_is_pmf_enabled(struct wlan_objmgr_vdev *vdev, + struct wlan_objmgr_peer *peer); + +/** + * wlan_crypto_add_mmie - called by mgmt txrx to add mmie in frame + * @vdev: vdev + * @frm: frame starting pointer + * @len: length of the frame + * + * This function gets called by mgmt txrx to add mmie in frame + * + * Return: end of frame or NULL in case failure + */ +uint8_t *wlan_crypto_add_mmie(struct wlan_objmgr_vdev *vdev, + uint8_t *frm, + uint32_t len); + +/** + * wlan_crypto_is_mmie_valid - called by mgmt txrx to check mmie of the frame + * @vdev: vdev + * @frm: frame starting pointer + * @efrm: end of frame pointer + * + * This function gets called by mgmt txrx to check mmie of the frame + * + * Return: true or false + */ +bool wlan_crypto_is_mmie_valid(struct wlan_objmgr_vdev *vdev, + uint8_t *frm, + uint8_t *efrm); + +/** + * wlan_crypto_wpaie_check - called by mlme to check the wpaie + * @crypto params: crypto params + * @iebuf: ie buffer + * + * This function gets called by mlme to check the contents of wpa is + * matching with given crypto params + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_crypto_wpaie_check(struct wlan_crypto_params *, uint8_t *frm); + +/** + * wlan_crypto_rsnie_check - called by mlme to check the rsnie + * @crypto params: crypto params + * @iebuf: ie buffer + * + * This function gets called by mlme to check the contents of rsn is + * matching with given crypto params + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_crypto_rsnie_check(struct wlan_crypto_params *, uint8_t *frm); +/** + * wlan_crypto_build_wpaie - called by mlme to build wpaie + * @vdev: vdev + * @iebuf: ie buffer + * + * This function gets called by mlme to build wpaie from given vdev + * + * Return: end of buffer + */ +uint8_t *wlan_crypto_build_wpaie(struct wlan_objmgr_vdev *vdev, + uint8_t *iebuf); +/** + * wlan_crypto_build_rsnie - called by mlme to build rsnie + * @vdev: vdev + * @iebuf: ie buffer + * + * This function gets called by mlme to build rsnie from given vdev + * + * Return: end of buffer + */ +uint8_t *wlan_crypto_build_rsnie(struct wlan_objmgr_vdev *vdev, + uint8_t *iebuf); + +/** + * wlan_crypto_wapiie_check - called by mlme to check the wapiie + * @crypto params: crypto params + * @iebuf: ie buffer + * + * This function gets called by mlme to check the contents of wapi is + * matching with given crypto params + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_crypto_wapiie_check(struct wlan_crypto_params *crypto_params, + uint8_t *frm); + +/** + * wlan_crypto_build_wapiie - called by mlme to build wapi ie + * @vdev: vdev + * @iebuf: ie buffer + * + * This function gets called by mlme to build wapi ie from given vdev + * + * Return: end of buffer + */ +uint8_t *wlan_crypto_build_wapiie(struct wlan_objmgr_vdev *vdev, + uint8_t *iebuf); +/** + * wlan_crypto_rsn_info - check is given params matching with vdev params. + * @vdev: vdev + * @crypto params: crypto params + * + * This function gets called by mlme to check is given params matching with + * vdev params. + * + * Return: true success or false for failure. + */ +bool wlan_crypto_rsn_info(struct wlan_objmgr_vdev *vdev, + struct wlan_crypto_params *crypto_params); +/** + * wlan_crypto_pn_check - called by data patch for PN check + * @vdev: vdev + * @wbuf: wbuf + * + * This function gets called by data patch for PN check + * + * Return: QDF_STATUS + */ +QDF_STATUS wlan_crypto_pn_check(struct wlan_objmgr_vdev *vdev, + qdf_nbuf_t wbuf); +/** + * wlan_crypto_vdev_get_crypto_params - called by mlme to get crypto params + * @vdev:vdev + * + * This function gets called by mlme to get crypto params + * + * Return: wlan_crypto_params or NULL in case of failure + */ +struct wlan_crypto_params *wlan_crypto_vdev_get_crypto_params( + struct wlan_objmgr_vdev *vdev); +/** + * wlan_crypto_peer_get_crypto_params - called by mlme to get crypto params + * @peer:peer + * + * This function gets called by mlme to get crypto params + * + * Return: wlan_crypto_params or NULL in case of failure + */ +struct wlan_crypto_params *wlan_crypto_peer_get_crypto_params( + struct wlan_objmgr_peer *peer); + +/** + * wlan_crypto_set_peer_wep_keys - set wep keys into peer entries + * @vdev:vdev + * @peer:peer + * + * This function gets called by mlme, when auth frame is received. + * this helps in setting wep keys into peer data structure. + * + * Return: QDF_STATUS + */ +QDF_STATUS wlan_crypto_set_peer_wep_keys(struct wlan_objmgr_vdev *vdev, + struct wlan_objmgr_peer *peer); + +/** + * wlan_crypto_register_crypto_rx_ops - set crypto_rx_ops + * @crypto_rx_ops: crypto_rx_ops + * + * This function gets called by object manger to register crypto rx ops. + * + * Return: QDF_STATUS + */ +QDF_STATUS wlan_crypto_register_crypto_rx_ops( + struct wlan_lmac_if_crypto_rx_ops *crypto_rx_ops); + +/** + * wlan_crypto_get_crypto_rx_ops - get crypto_rx_ops from psoc + * @psoc: psoc + * + * This function gets called by umac to get the crypto_rx_ops + * + * Return: crypto_rx_ops + */ +struct wlan_lmac_if_crypto_rx_ops *wlan_crypto_get_crypto_rx_ops( + struct wlan_objmgr_psoc *psoc); +/** + * wlan_crypto_vdev_has_auth_mode - check authmode for vdev + * @vdev: vdev + * @authvalue: authvalue to be checked + * + * This function check is authvalue passed is set in vdev or not + * + * Return: true or false + */ +bool wlan_crypto_vdev_has_auth_mode(struct wlan_objmgr_vdev *vdev, + wlan_crypto_auth_mode authmode); + +/** + * wlan_crypto_peer_has_auth_mode - check authmode for peer + * @peer: peer + * @authvalue: authvalue to be checked + * + * This function check is authvalue passed is set in peer or not + * + * Return: true or false + */ +bool wlan_crypto_peer_has_auth_mode(struct wlan_objmgr_peer *peer, + wlan_crypto_auth_mode authvalue); + +/** + * wlan_crypto_vdev_has_ucastcipher - check ucastcipher for vdev + * @vdev: vdev + * @ucastcipher: ucastcipher to be checked + * + * This function check is ucastcipher passed is set in vdev or not + * + * Return: true or false + */ +bool wlan_crypto_vdev_has_ucastcipher(struct wlan_objmgr_vdev *vdev, + wlan_crypto_cipher_type ucastcipher); + +/** + * wlan_crypto_peer_has_ucastcipher - check ucastcipher for peer + * @peer: peer + * @ucastcipher: ucastcipher to be checked + * + * This function check is ucastcipher passed is set in peer or not + * + * Return: true or false + */ +bool wlan_crypto_peer_has_ucastcipher(struct wlan_objmgr_peer *peer, + wlan_crypto_cipher_type ucastcipher); + + +/** + * wlan_crypto_vdev_has_mcastcipher - check mcastcipher for vdev + * @vdev: vdev + * @mcastcipher: mcastcipher to be checked + * + * This function check is mcastcipher passed is set in vdev or not + * + * Return: true or false + */ +bool wlan_crypto_vdev_has_mcastcipher(struct wlan_objmgr_vdev *vdev, + wlan_crypto_cipher_type mcastcipher); + +/** + * wlan_crypto_peer_has_mcastcipher - check mcastcipher for peer + * @peer: peer + * @mcastcipher: mcastcipher to be checked + * + * This function check is mcastcipher passed is set in peer or not + * + * Return: true or false + */ +bool wlan_crypto_peer_has_mcastcipher(struct wlan_objmgr_peer *peer, + wlan_crypto_cipher_type mcastcipher); + +/** + * wlan_crypto_get_keytype - get keytype + * @key: key + * + * This function gets keytype from key + * + * Return: keytype + */ +wlan_crypto_cipher_type wlan_crypto_get_key_type( + struct wlan_crypto_key *key); + +/** + * wlan_crypto_vdev_getkey - get key from vdev + * @vdev: vdev + * @keyix: keyix + * + * This function gets key from vdev + * + * Return: key or NULL + */ +struct wlan_crypto_key *wlan_crypto_vdev_getkey(struct wlan_objmgr_vdev *vdev, + uint16_t keyix); +/** + * wlan_crypto_peer_getkey - get key from peer + * @peer: peer + * @keyix: keyix + * + * This function gets key from peer + * + * Return: key or NULL + */ +struct wlan_crypto_key *wlan_crypto_peer_getkey(struct wlan_objmgr_peer *peer, + uint16_t keyix); +/** + * wlan_crypto_get_peer_fils_aead - Get peer fils aead set flag + * @peer: Peer object + * + * This function returns the peer fils aead set flag value. + * + * Return: 1 for enabled, 0 for disabled + */ +uint8_t wlan_crypto_get_peer_fils_aead(struct wlan_objmgr_peer *peer); + +/** + * wlan_crypto_set_peer_fils_aead - Set peer fils aead set flag + * @peer: Peer object + * @value: Value to set the flag + * + * This function set the peer fils aead set flag once FILS AUTH received. + * + * Return: None + */ +void wlan_crypto_set_peer_fils_aead( + struct wlan_objmgr_peer *peer, uint8_t value); + +/** + * wlan_crypto_get_key_header - get header length + * @key: key + * + * This function gets header length based on keytype + * + * Return: header length + */ +uint8_t wlan_crypto_get_key_header(struct wlan_crypto_key *key); + +/** + * wlan_crypto_get_key_trailer - get cipher trailer length + * @key: key + * + * This function gets cipher trailer length based on keytype + * + * Return: cipher trailer length + */ +uint8_t wlan_crypto_get_key_trailer(struct wlan_crypto_key *key); + +/** + * wlan_crypto_get_key_miclen - get cipher miclen length + * @key: key + * + * This function gets cipher miclen length based on keytype + * + * Return: cipher miclen length + */ +uint8_t wlan_crypto_get_key_miclen(struct wlan_crypto_key *key); + +/** + * wlan_crypto_get_keyid - get keyid from frame + * @data: frame + * @hdrlen: 802.11 header length + * + * This function parse frame and returns keyid + * + * Return: keyid + */ +uint16_t wlan_crypto_get_keyid(uint8_t *data, int hdrlen); + +/** + * wlan_crypto_restore_keys - restore crypto keys in hw keycache + * @vdev: vdev + * + * This function restores keys in hw keycache + * + * Return: void + */ +void wlan_crypto_restore_keys(struct wlan_objmgr_vdev *vdev); +#endif /* end of _WLAN_CRYPTO_GLOBAL_API_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/inc/wlan_crypto_global_def.h b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/inc/wlan_crypto_global_def.h new file mode 100644 index 0000000000000000000000000000000000000000..0c058d6b55a858d9f282382aab00cc6ad23feb3d --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/inc/wlan_crypto_global_def.h @@ -0,0 +1,343 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + + /** + * DOC: Public definations for crypto service + */ + +#ifndef _WLAN_CRYPTO_GLOBAL_DEF_H_ +#define _WLAN_CRYPTO_GLOBAL_DEF_H_ + +#include +#include "wlan_crypto_fils_def.h" + +#define WLAN_CRYPTO_TID_SIZE (17) +#define WLAN_CRYPTO_KEYBUF_SIZE (32) +#define WLAN_CRYPTO_MICBUF_SIZE (16) +#define WLAN_CRYPTO_MIC_LEN (8) +#define WLAN_CRYPTO_MIC256_LEN (16) +#define WLAN_CRYPTO_TXMIC_OFFSET (0) +#define WLAN_CRYPTO_RXMIC_OFFSET (WLAN_CRYPTO_TXMIC_OFFSET + \ + WLAN_CRYPTO_MIC_LEN) +#define WLAN_CRYPTO_WAPI_IV_SIZE (16) +#define WLAN_CRYPTO_CRC_LEN (4) +#define WLAN_CRYPTO_IV_LEN (3) +#define WLAN_CRYPTO_KEYID_LEN (1) +#define WLAN_CRYPTO_EXT_IV_LEN (4) +#define WLAN_CRYPTO_EXT_IV_BIT (0x20) +#define WLAN_CRYPTO_KEYIX_NONE ((uint16_t)-1) +#define WLAN_CRYPTO_MAXKEYIDX (4) +#define WLAN_CRYPTO_MAXIGTKKEYIDX (2) + +/* 40 bit wep key len */ +#define WLAN_CRYPTO_KEY_WEP40_LEN (5) +/* 104 bit wep key len */ +#define WLAN_CRYPTO_KEY_WEP104_LEN (13) +/* 128 bit wep key len */ +#define WLAN_CRYPTO_KEY_WEP128_LEN (16) + +#define WLAN_CRYPTO_WPI_SMS4_IVLEN (16) +#define WLAN_CRYPTO_WPI_SMS4_KIDLEN (1) +#define WLAN_CRYPTO_WPI_SMS4_PADLEN (1) +#define WLAN_CRYPTO_WPI_SMS4_MICLEN (16) + +/* key used for xmit */ +#define WLAN_CRYPTO_KEY_XMIT (0x01) +/* key used for recv */ +#define WLAN_CRYPTO_KEY_RECV (0x02) +/* key used for WPA group operation */ +#define WLAN_CRYPTO_KEY_GROUP (0x04) +/* key also used for management frames */ +#define WLAN_CRYPTO_KEY_MFP (0x08) +/* host-based encryption */ +#define WLAN_CRYPTO_KEY_SWENCRYPT (0x10) +/* host-based enmic */ +#define WLAN_CRYPTO_KEY_SWENMIC (0x20) +/* do not remove unless OS commands us to do so */ +#define WLAN_CRYPTO_KEY_PERSISTENT (0x40) +/* per STA default key */ +#define WLAN_CRYPTO_KEY_DEFAULT (0x80) +/* host-based decryption */ +#define WLAN_CRYPTO_KEY_SWDECRYPT (0x100) +/* host-based demic */ +#define WLAN_CRYPTO_KEY_SWDEMIC (0x200) + +#define WLAN_CRYPTO_KEY_SWCRYPT (WLAN_CRYPTO_KEY_SWENCRYPT \ + | WLAN_CRYPTO_KEY_SWDECRYPT) + +#define WLAN_CRYPTO_KEY_SWMIC (WLAN_CRYPTO_KEY_SWENMIC \ + | WLAN_CRYPTO_KEY_SWDEMIC) + +/* + * Cipher types + */ +typedef enum wlan_crypto_cipher_type { + WLAN_CRYPTO_CIPHER_WEP = 0, + WLAN_CRYPTO_CIPHER_TKIP = 1, + WLAN_CRYPTO_CIPHER_AES_OCB = 2, + WLAN_CRYPTO_CIPHER_AES_CCM = 3, + WLAN_CRYPTO_CIPHER_WAPI_SMS4 = 4, + WLAN_CRYPTO_CIPHER_CKIP = 5, + WLAN_CRYPTO_CIPHER_AES_CMAC = 6, + WLAN_CRYPTO_CIPHER_AES_CCM_256 = 7, + WLAN_CRYPTO_CIPHER_AES_CMAC_256 = 8, + WLAN_CRYPTO_CIPHER_AES_GCM = 9, + WLAN_CRYPTO_CIPHER_AES_GCM_256 = 10, + WLAN_CRYPTO_CIPHER_AES_GMAC = 11, + WLAN_CRYPTO_CIPHER_AES_GMAC_256 = 12, + WLAN_CRYPTO_CIPHER_WAPI_GCM4 = 13, + WLAN_CRYPTO_CIPHER_FILS_AEAD = 14, + WLAN_CRYPTO_CIPHER_NONE = 15, + WLAN_CRYPTO_CIPHER_MAX = WLAN_CRYPTO_CIPHER_NONE, +} wlan_crypto_cipher_type; + +/* Auth types */ +typedef enum wlan_crypto_auth_mode { + WLAN_CRYPTO_AUTH_NONE = 0, + WLAN_CRYPTO_AUTH_OPEN = 1, + WLAN_CRYPTO_AUTH_SHARED = 2, + WLAN_CRYPTO_AUTH_8021X = 3, + WLAN_CRYPTO_AUTH_AUTO = 4, + WLAN_CRYPTO_AUTH_WPA = 5, + WLAN_CRYPTO_AUTH_RSNA = 6, + WLAN_CRYPTO_AUTH_CCKM = 7, + WLAN_CRYPTO_AUTH_WAPI = 8, +} wlan_crypto_auth_mode; + +/* crypto capabilities */ +typedef enum wlan_crypto_cap { + WLAN_CRYPTO_CAP_PRIVACY = 0, + WLAN_CRYPTO_CAP_WPA1 = 1, + WLAN_CRYPTO_CAP_WPA2 = 2, + WLAN_CRYPTO_CAP_WPA = 3, + WLAN_CRYPTO_CAP_AES = 4, + WLAN_CRYPTO_CAP_WEP = 5, + WLAN_CRYPTO_CAP_CKIP = 6, + WLAN_CRYPTO_CAP_TKIP_MIC = 7, + WLAN_CRYPTO_CAP_CCM256 = 8, + WLAN_CRYPTO_CAP_GCM = 9, + WLAN_CRYPTO_CAP_GCM_256 = 10, + WLAN_CRYPTO_CAP_WAPI_SMS4 = 11, + WLAN_CRYPTO_CAP_WAPI_GCM4 = 12, + WLAN_CRYPTO_CAP_KEY_MGMT_OFFLOAD = 13, + WLAN_CRYPTO_CAP_PMF_OFFLOAD = 14, + WLAN_CRYPTO_CAP_PN_TID_BASED = 15, + WLAN_CRYPTO_CAP_FILS_AEAD = 16, +} wlan_crypto_cap; + +typedef enum wlan_crypto_rsn_cap { + WLAN_CRYPTO_RSN_CAP_PREAUTH = 0x01, + WLAN_CRYPTO_RSN_CAP_MFP_ENABLED = 0x80, + WLAN_CRYPTO_RSN_CAP_MFP_REQUIRED = 0x40, +} wlan_crypto_rsn_cap; + +typedef enum wlan_crypto_key_mgmt { + WLAN_CRYPTO_KEY_MGMT_IEEE8021X = 0, + WLAN_CRYPTO_KEY_MGMT_PSK = 1, + WLAN_CRYPTO_KEY_MGMT_NONE = 2, + WLAN_CRYPTO_KEY_MGMT_IEEE8021X_NO_WPA = 3, + WLAN_CRYPTO_KEY_MGMT_WPA_NONE = 4, + WLAN_CRYPTO_KEY_MGMT_FT_IEEE8021X = 5, + WLAN_CRYPTO_KEY_MGMT_FT_PSK = 6, + WLAN_CRYPTO_KEY_MGMT_IEEE8021X_SHA256 = 7, + WLAN_CRYPTO_KEY_MGMT_PSK_SHA256 = 8, + WLAN_CRYPTO_KEY_MGMT_WPS = 9, + WLAN_CRYPTO_KEY_MGMT_SAE = 10, + WLAN_CRYPTO_KEY_MGMT_FT_SAE = 11, + WLAN_CRYPTO_KEY_MGMT_WAPI_PSK = 12, + WLAN_CRYPTO_KEY_MGMT_WAPI_CERT = 13, + WLAN_CRYPTO_KEY_MGMT_CCKM = 14, + WLAN_CRYPTO_KEY_MGMT_OSEN = 15, + WLAN_CRYPTO_KEY_MGMT_IEEE8021X_SUITE_B = 16, + WLAN_CRYPTO_KEY_MGMT_IEEE8021X_SUITE_B_192 = 17, + WLAN_CRYPTO_KEY_MGMT_FILS_SHA256 = 18, + WLAN_CRYPTO_KEY_MGMT_FILS_SHA384 = 19, + WLAN_CRYPTO_KEY_MGMT_FT_FILS_SHA256 = 20, + WLAN_CRYPTO_KEY_MGMT_FT_FILS_SHA384 = 21, + WLAN_CRYPTO_KEY_MGMT_OWE = 22, + WLAN_CRYPTO_KEY_MGMT_DPP = 23, +} wlan_crypto_key_mgmt; + +/** + * struct wlan_crypto_params - holds crypto params + * @authmodeset: authentication mode + * @ucastcipherset: unicast ciphers + * @mcastcipherset: multicast cipher + * @mgmtcipherset: mgmt cipher + * @cipher_caps: cipher capability + * @rsn_caps: rsn_capability + * @key_mgmt: key mgmt + * + * This structure holds crypto params for peer or vdev + */ +struct wlan_crypto_params { + uint32_t authmodeset; + uint32_t ucastcipherset; + uint32_t mcastcipherset; + uint32_t mgmtcipherset; + uint32_t cipher_caps; + uint32_t key_mgmt; + uint16_t rsn_caps; +}; + +typedef enum wlan_crypto_param_type { + WLAN_CRYPTO_PARAM_AUTH_MODE, + WLAN_CRYPTO_PARAM_UCAST_CIPHER, + WLAN_CRYPTO_PARAM_MCAST_CIPHER, + WLAN_CRYPTO_PARAM_MGMT_CIPHER, + WLAN_CRYPTO_PARAM_CIPHER_CAP, + WLAN_CRYPTO_PARAM_RSN_CAP, + WLAN_CRYPTO_PARAM_KEY_MGMT, +} wlan_crypto_param_type; + +/** + * struct wlan_crypto_key - key structure + * @keylen: length of the key + * @valid: is key valid or not + * @flags: key flags + * @keyix: key id + * @cipher_table: table which stores cipher related info + * @private: private pointer to save cipher context + * @keylock: spin lock + * @recviv: WAPI key receive sequence counter + * @txiv: WAPI key transmit sequence counter + * @keytsc: key transmit sequence counter + * @keyrsc: key receive sequence counter + * @keyrsc_suspect: key receive sequence counter under + * suspect when pN jump is detected + * @keyglobal: key receive global sequence counter used with suspect + * @keyval: key value buffer + * + * This key structure to key related details. + */ +struct wlan_crypto_key { + uint8_t keylen; + bool valid; + uint16_t flags; + uint16_t keyix; + void *cipher_table; + void *private; + qdf_spinlock_t keylock; + uint8_t recviv[WLAN_CRYPTO_WAPI_IV_SIZE]; + uint8_t txiv[WLAN_CRYPTO_WAPI_IV_SIZE]; + uint64_t keytsc; + uint64_t keyrsc[WLAN_CRYPTO_TID_SIZE]; + uint64_t keyrsc_suspect[WLAN_CRYPTO_TID_SIZE]; + uint64_t keyglobal; + uint8_t keyval[WLAN_CRYPTO_KEYBUF_SIZE + + WLAN_CRYPTO_MICBUF_SIZE]; +#define txmic (keyval + WLAN_CRYPTO_KEYBUF_SIZE \ + + WLAN_CRYPTO_TXMIC_OFFSET) +#define rxmic (keyval + WLAN_CRYPTO_KEYBUF_SIZE \ + + WLAN_CRYPTO_RXMIC_OFFSET) +}; + +/** + * struct wlan_crypto_req_key - key request structure + * @type: key/cipher type + * @pad: padding member + * @keyix: key index + * @keylen: length of the key value + * @flags: key flags + * @macaddr: macaddr of the key + * @keyrsc: key receive sequence counter + * @keytsc: key transmit sequence counter + * @keydata: key value + * @txiv: wapi key tx iv + * @rxiv: wapi key rx iv + * @filsaad: FILS AEAD data + * + * Key request structure used for setkey, getkey or delkey + */ +struct wlan_crypto_req_key { + uint8_t type; + uint8_t pad; + uint16_t keyix; + uint8_t keylen; + uint16_t flags; + uint8_t macaddr[QDF_MAC_ADDR_SIZE]; + uint64_t keyrsc; + uint64_t keytsc; + uint8_t keydata[WLAN_CRYPTO_KEYBUF_SIZE + WLAN_CRYPTO_MICBUF_SIZE]; + uint8_t txiv[WLAN_CRYPTO_WAPI_IV_SIZE]; + uint8_t recviv[WLAN_CRYPTO_WAPI_IV_SIZE]; + struct wlan_crypto_fils_aad_key filsaad; +}; + +/** + * struct wlan_lmac_if_crypto_tx_ops - structure of crypto function + * pointers + * @allockey: function pointer to alloc key in hw + * @setkey: function pointer to setkey in hw + * @delkey: function pointer to delkey in hw + * @defaultkey: function pointer to set default key + */ + +struct wlan_lmac_if_crypto_tx_ops { + QDF_STATUS(*allockey)(struct wlan_objmgr_vdev *vdev, + struct wlan_crypto_key *key, + uint8_t *macaddr, uint32_t key_type); + QDF_STATUS(*setkey)(struct wlan_objmgr_vdev *vdev, + struct wlan_crypto_key *key, + uint8_t *macaddr, uint32_t key_type); + QDF_STATUS(*delkey)(struct wlan_objmgr_vdev *vdev, + struct wlan_crypto_key *key, + uint8_t *macaddr, uint32_t key_type); + QDF_STATUS(*defaultkey)(struct wlan_objmgr_vdev *vdev, + uint8_t keyix, uint8_t *macaddr); +}; + + +/** + * struct wlan_lmac_if_crypto_rx_ops - structure of crypto rx function + * pointers + * @encap: function pointer to encap tx frame + * @decap: function pointer to decap rx frame in hw + * @enmic: function pointer to enmic tx frame + * @demic: function pointer to demic rx frame + */ + +struct wlan_lmac_if_crypto_rx_ops { + QDF_STATUS(*crypto_encap)(struct wlan_objmgr_vdev *vdev, + qdf_nbuf_t wbuf, uint8_t *macaddr, + uint8_t encapdone); + QDF_STATUS(*crypto_decap)(struct wlan_objmgr_vdev *vdev, + qdf_nbuf_t wbuf, uint8_t *macaddr, + uint8_t tid); + QDF_STATUS(*crypto_enmic)(struct wlan_objmgr_vdev *vdev, + qdf_nbuf_t wbuf, uint8_t *macaddr, + uint8_t encapdone); + QDF_STATUS(*crypto_demic)(struct wlan_objmgr_vdev *vdev, + qdf_nbuf_t wbuf, uint8_t *macaddr, + uint8_t tid, uint8_t keyid); + QDF_STATUS(*set_peer_wep_keys)(struct wlan_objmgr_vdev *vdev, + struct wlan_objmgr_peer *peer); +}; + +#define WLAN_CRYPTO_RX_OPS_ENCAP(crypto_rx_ops) \ + (crypto_rx_ops->crypto_encap) +#define WLAN_CRYPTO_RX_OPS_DECAP(crypto_rx_ops) \ + (crypto_rx_ops->crypto_decap) +#define WLAN_CRYPTO_RX_OPS_ENMIC(crypto_rx_ops) \ + (crypto_rx_ops->crypto_enmic) +#define WLAN_CRYPTO_RX_OPS_DEMIC(crypto_rx_ops) \ + (crypto_rx_ops->crypto_demic) +#define WLAN_CRYPTO_RX_OPS_SET_PEER_WEP_KEYS(crypto_rx_ops) \ + (crypto_rx_ops->set_peer_wep_keys) + +#endif /* end of _WLAN_CRYPTO_GLOBAL_DEF_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/inc/wlan_crypto_main.h b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/inc/wlan_crypto_main.h new file mode 100644 index 0000000000000000000000000000000000000000..7f4e59f9591cb0b18870d747676f698e0ba1d963 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/inc/wlan_crypto_main.h @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + + /** + * DOC: Private API for crypto service with object manager handler + */ +#ifndef _WLAN_CRYPTO_MAIN_H_ +#define _WLAN_CRYPTO_MAIN_H_ + +/** + * wlan_crypto_init - Init the crypto service with object manager + * Called from umac init context. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_crypto_init(void); + +/** + * wlan_crypto_deinit - Deinit the crypto service with object manager + * Called from umac deinit context. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_crypto_deinit(void); + + +#endif /* end of _WLAN_CRYPTO_MAIN_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/src/wlan_crypto_aes.h b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/src/wlan_crypto_aes.h new file mode 100644 index 0000000000000000000000000000000000000000..911b6a526e81c726cc1d5677d9e4f9c502430e14 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/src/wlan_crypto_aes.h @@ -0,0 +1,18 @@ +/* + * Copyright (c) 2017 The Linux Foundation. All rights reserved. + */ +/* + * AES functions + * Copyright (c) 2003-2006, Jouni Malinen + * + * This software may be distributed under the terms of the BSD license. + * See README for more details. + */ + +#ifndef AES_H +#define AES_H + +#define AES_BLOCK_SIZE 16 + + +#endif /* AES_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/src/wlan_crypto_aes_ccm.c b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/src/wlan_crypto_aes_ccm.c new file mode 100644 index 0000000000000000000000000000000000000000..9f046c5167bc5cee2ac57e8761864bafa8090ece --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/src/wlan_crypto_aes_ccm.c @@ -0,0 +1,218 @@ +/* + * Copyright (c) 2017 The Linux Foundation. All rights reserved. + */ +/* + * Counter with CBC-MAC (CCM) with AES + * + * Copyright (c) 2010-2012, Jouni Malinen + * + * This software may be distributed under the terms of the BSD license. + * See README for more details. + */ +#include +#include +#include +#include "wlan_crypto_aes_i.h" +#include "wlan_crypto_def_i.h" + +static void xor_aes_block(uint8_t *dst, const uint8_t *src) +{ + uint32_t *d = (uint32_t *) dst; + uint32_t *s = (uint32_t *) src; + *d++ ^= *s++; + *d++ ^= *s++; + *d++ ^= *s++; + *d++ ^= *s++; +} + + +static void aes_ccm_auth_start(void *aes, size_t M, size_t L, + const uint8_t *nonce, const uint8_t *aad, + size_t aad_len, size_t plain_len, + uint8_t *x){ + uint8_t aad_buf[2 * AES_BLOCK_SIZE]; + uint8_t b[AES_BLOCK_SIZE]; + + /* Authentication */ + /* B_0: Flags | Nonce N | l(m) */ + b[0] = aad_len ? 0x40 : 0 /* Adata */; + b[0] |= (((M - 2) / 2) /* M' */ << 3); + b[0] |= (L - 1) /* L' */; + qdf_mem_copy(&b[1], nonce, 15 - L); + wlan_crypto_put_be16(&b[AES_BLOCK_SIZE - L], plain_len); + + wpa_hexdump_key(MSG_EXCESSIVE, "CCM B_0", b, AES_BLOCK_SIZE); + wlan_crypto_aes_encrypt(aes, b, x); /* X_1 = E(K, B_0) */ + + if (!aad_len) { + qdf_print("%s[%d] aad length should be non zero\n", + __func__, __LINE__); + return; + } + + wlan_crypto_put_be16(aad_buf, aad_len); + qdf_mem_copy(aad_buf + 2, aad, aad_len); + qdf_mem_set(aad_buf + 2 + aad_len, sizeof(aad_buf) - 2 - aad_len, 0); + + xor_aes_block(aad_buf, x); + wlan_crypto_aes_encrypt(aes, aad_buf, x); /* X_2 = E(K, X_1 XOR B_1) */ + + if (aad_len > AES_BLOCK_SIZE - 2) { + xor_aes_block(&aad_buf[AES_BLOCK_SIZE], x); + /* X_3 = E(K, X_2 XOR B_2) */ + wlan_crypto_aes_encrypt(aes, &aad_buf[AES_BLOCK_SIZE], x); + } +} + + +static void aes_ccm_auth(void *aes, const uint8_t *data, size_t len, uint8_t *x) +{ + size_t last = len % AES_BLOCK_SIZE; + size_t i; + + for (i = 0; i < len / AES_BLOCK_SIZE; i++) { + /* X_i+1 = E(K, X_i XOR B_i) */ + xor_aes_block(x, data); + data += AES_BLOCK_SIZE; + wlan_crypto_aes_encrypt(aes, x, x); + } + if (last) { + /* XOR zero-padded last block */ + for (i = 0; i < last; i++) + x[i] ^= *data++; + wlan_crypto_aes_encrypt(aes, x, x); + } +} + + +static void aes_ccm_encr_start(size_t L, const uint8_t *nonce, uint8_t *a) +{ + /* A_i = Flags | Nonce N | Counter i */ + a[0] = L - 1; /* Flags = L' */ + qdf_mem_copy(&a[1], nonce, 15 - L); +} + + +static void aes_ccm_encr(void *aes, size_t L, const uint8_t *in, size_t len, + uint8_t *out, uint8_t *a){ + size_t last = len % AES_BLOCK_SIZE; + size_t i; + + /* crypt = msg XOR (S_1 | S_2 | ... | S_n) */ + for (i = 1; i <= len / AES_BLOCK_SIZE; i++) { + wlan_crypto_put_be16(&a[AES_BLOCK_SIZE - 2], i); + /* S_i = E(K, A_i) */ + wlan_crypto_aes_encrypt(aes, a, out); + xor_aes_block(out, in); + out += AES_BLOCK_SIZE; + in += AES_BLOCK_SIZE; + } + if (last) { + wlan_crypto_put_be16(&a[AES_BLOCK_SIZE - 2], i); + wlan_crypto_aes_encrypt(aes, a, out); + /* XOR zero-padded last block */ + for (i = 0; i < last; i++) + *out++ ^= *in++; + } +} + + +static void aes_ccm_encr_auth(void *aes, size_t M, uint8_t *x, uint8_t *a, + uint8_t *auth){ + size_t i; + uint8_t tmp[AES_BLOCK_SIZE]; + + wpa_hexdump_key(MSG_EXCESSIVE, "CCM T", x, M); + /* U = T XOR S_0; S_0 = E(K, A_0) */ + wlan_crypto_put_be16(&a[AES_BLOCK_SIZE - 2], 0); + wlan_crypto_aes_encrypt(aes, a, tmp); + for (i = 0; i < M; i++) + auth[i] = x[i] ^ tmp[i]; + wpa_hexdump_key(MSG_EXCESSIVE, "CCM U", auth, M); +} + + +static void aes_ccm_decr_auth(void *aes, size_t M, uint8_t *a, + const uint8_t *auth, uint8_t *t){ + size_t i; + uint8_t tmp[AES_BLOCK_SIZE]; + + wpa_hexdump_key(MSG_EXCESSIVE, "CCM U", auth, M); + /* U = T XOR S_0; S_0 = E(K, A_0) */ + wlan_crypto_put_be16(&a[AES_BLOCK_SIZE - 2], 0); + wlan_crypto_aes_encrypt(aes, a, tmp); + for (i = 0; i < M; i++) + t[i] = auth[i] ^ tmp[i]; + wpa_hexdump_key(MSG_EXCESSIVE, "CCM T", t, M); +} + + +/* AES-CCM with fixed L=2 and aad_len <= 30 assumption */ +int wlan_crypto_aes_ccm_ae(const uint8_t *key, size_t key_len, + const uint8_t *nonce, size_t M, + const uint8_t *plain, size_t plain_len, + const uint8_t *aad, size_t aad_len, + uint8_t *crypt, uint8_t *auth){ + const size_t L = 2; + void *aes; + uint8_t x[AES_BLOCK_SIZE], a[AES_BLOCK_SIZE]; + int32_t status = -1; + + if (aad_len > 30 || M > AES_BLOCK_SIZE) + return status; + + aes = wlan_crypto_aes_encrypt_init(key, key_len); + if (aes == NULL) + return status; + + aes_ccm_auth_start(aes, M, L, nonce, aad, aad_len, plain_len, x); + aes_ccm_auth(aes, plain, plain_len, x); + + /* Encryption */ + aes_ccm_encr_start(L, nonce, a); + aes_ccm_encr(aes, L, plain, plain_len, crypt, a); + aes_ccm_encr_auth(aes, M, x, a, auth); + + wlan_crypto_aes_encrypt_deinit(aes); + + return 0; +} + + +/* AES-CCM with fixed L=2 and aad_len <= 30 assumption */ +int wlan_crypto_aes_ccm_ad(const uint8_t *key, size_t key_len, + const uint8_t *nonce, size_t M, const uint8_t *crypt, + size_t crypt_len, const uint8_t *aad, size_t aad_len, + const uint8_t *auth, uint8_t *plain){ + const size_t L = 2; + void *aes; + uint8_t x[AES_BLOCK_SIZE], a[AES_BLOCK_SIZE]; + uint8_t t[AES_BLOCK_SIZE]; + int32_t status = -1; + + if (aad_len > 30 || M > AES_BLOCK_SIZE) + return status; + + aes = wlan_crypto_aes_encrypt_init(key, key_len); + if (aes == NULL) + return status; + + /* Decryption */ + aes_ccm_encr_start(L, nonce, a); + aes_ccm_decr_auth(aes, M, a, auth, t); + + /* plaintext = msg XOR (S_1 | S_2 | ... | S_n) */ + aes_ccm_encr(aes, L, crypt, crypt_len, plain, a); + + aes_ccm_auth_start(aes, M, L, nonce, aad, aad_len, crypt_len, x); + aes_ccm_auth(aes, plain, crypt_len, x); + + wlan_crypto_aes_encrypt_deinit(aes); + + if (qdf_mem_cmp(x, t, M) != 0) { + wpa_printf(MSG_EXCESSIVE, "CCM: Auth mismatch"); + return status; + } + + return 0; +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/src/wlan_crypto_aes_ctr.c b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/src/wlan_crypto_aes_ctr.c new file mode 100644 index 0000000000000000000000000000000000000000..a376a2eb16e02ff2c2dea320dce3941d80aa25fd --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/src/wlan_crypto_aes_ctr.c @@ -0,0 +1,63 @@ +/* + * Copyright (c) 2017 The Linux Foundation. All rights reserved. + */ + +/* + * AES-128/192/256 CTR + * + * Copyright (c) 2003-2007, Jouni Malinen + * + * This software may be distributed under the terms of the BSD license. + * See README for more details. + */ + +#ifdef WLAN_SUPPORT_FILS + +#include +#include "wlan_crypto_aes_i.h" + +int32_t wlan_crypto_aes_ctr_encrypt(const uint8_t *key, size_t key_len, + const uint8_t *nonce, uint8_t *data, + size_t data_len) +{ + void *ctx; + size_t j, len, left = data_len; + int32_t i; + uint8_t *pos = data; + uint8_t counter[AES_BLOCK_SIZE], buf[AES_BLOCK_SIZE]; + int32_t status = -1; + + ctx = wlan_crypto_aes_encrypt_init(key, key_len); + if (!ctx) + return status; + + qdf_mem_copy(counter, nonce, AES_BLOCK_SIZE); + + while (left > 0) { + wlan_crypto_aes_encrypt(ctx, counter, buf); + + len = (left < AES_BLOCK_SIZE) ? left : AES_BLOCK_SIZE; + for (j = 0; j < len; j++) + pos[j] ^= buf[j]; + pos += len; + left -= len; + + for (i = AES_BLOCK_SIZE - 1; i >= 0; i--) { + counter[i]++; + if (counter[i]) + break; + } + } + wlan_crypto_aes_encrypt_deinit(ctx); + + return 0; +} + +int32_t wlan_crypto_aes_128_ctr_encrypt(const uint8_t *key, + const uint8_t *nonce, uint8_t *data, + size_t data_len) +{ + return wlan_crypto_aes_ctr_encrypt(key, 16, nonce, data, data_len); +} + +#endif /* WLAN_SUPPORT_FILS */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/src/wlan_crypto_aes_ctr_i.h b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/src/wlan_crypto_aes_ctr_i.h new file mode 100644 index 0000000000000000000000000000000000000000..6e408cfecc3bc0f5c4ef1d874a081164183c2267 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/src/wlan_crypto_aes_ctr_i.h @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2017 The Linux Foundation. All rights reserved. + */ + +/* + * AES functions + * + * Copyright (c) 2003-2006, Jouni Malinen + * + * This software may be distributed under the terms of the BSD license. + * See README for more details. + */ + +#ifndef _WLAN_CRYPTO_AES_CTR_I_H_ +#define _WLAN_CRYPTO_AES_CTR_I_H_ + +#ifdef WLAN_SUPPORT_FILS +/** + * wlan_crypto_aes_ctr_encrypt - AES-128/192/256 CTR mode encryption + * @key: Key for encryption (key_len bytes) + * @key_len: Length of the key (16, 24, or 32 bytes) + * @nonce: Nonce for counter mode (16 bytes) + * @data: Data to encrypt in-place + * @data_len: Length of data in bytes + * + * Returns: 0 on success, -1 on failure + */ +int32_t wlan_crypto_aes_ctr_encrypt(const uint8_t *key, size_t key_len, + const uint8_t *nonce, uint8_t *data, + size_t data_len); + +/** + * wlan_crypto_aes_128_ctr_encrypt - AES-128 CTR mode encryption + * @key: Key for encryption (key_len bytes) + * @nonce: Nonce for counter mode (16 bytes) + * @data: Data to encrypt in-place + * @data_len: Length of data in bytes + * + * Returns: 0 on success, -1 on failure + */ +int32_t wlan_crypto_aes_128_ctr_encrypt(const uint8_t *key, + const uint8_t *nonce, uint8_t *data, + size_t data_len); +#endif /* WLAN_SUPPORT_FILS */ + +#endif /* end of _WLAN_CRYPTO_AES_CTR_I_H_ */ + diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/src/wlan_crypto_aes_gcm.c b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/src/wlan_crypto_aes_gcm.c new file mode 100644 index 0000000000000000000000000000000000000000..41bc7fdfae44eac77f092316403411db8f9e2f3b --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/src/wlan_crypto_aes_gcm.c @@ -0,0 +1,331 @@ +/* + * Copyright (c) 2017 The Linux Foundation. All rights reserved. + */ +/* + * Galois/Counter Mode (GCM) and GMAC with AES + * + * Copyright (c) 2012, Jouni Malinen + * + * This software may be distributed under the terms of the BSD license. + * See README for more details. + */ + +#include +#include +#include +#include "wlan_crypto_aes_i.h" +#include "wlan_crypto_def_i.h" + +static void inc32(uint8_t *block) +{ + uint32_t val; + val = wlan_crypto_get_be32(block + AES_BLOCK_SIZE - 4); + val++; + wlan_crypto_put_be32(block + AES_BLOCK_SIZE - 4, val); +} + + +static void xor_block(uint8_t *dst, const uint8_t *src) +{ + uint32_t *d = (uint32_t *) dst; + uint32_t *s = (uint32_t *) src; + *d++ ^= *s++; + *d++ ^= *s++; + *d++ ^= *s++; + *d++ ^= *s++; +} + + +static void shift_right_block(uint8_t *v) +{ + uint32_t val; + + val = wlan_crypto_get_be32(v + 12); + val >>= 1; + if (v[11] & 0x01) + val |= 0x80000000; + wlan_crypto_put_be32(v + 12, val); + + val = wlan_crypto_get_be32(v + 8); + val >>= 1; + if (v[7] & 0x01) + val |= 0x80000000; + wlan_crypto_put_be32(v + 8, val); + + val = wlan_crypto_get_be32(v + 4); + val >>= 1; + if (v[3] & 0x01) + val |= 0x80000000; + wlan_crypto_put_be32(v + 4, val); + + val = wlan_crypto_get_be32(v); + val >>= 1; + wlan_crypto_put_be32(v, val); +} + + +/* Multiplication in GF(2^128) */ +static void gf_mult(const uint8_t *x, const uint8_t *y, uint8_t *z) +{ + uint8_t v[16]; + int i, j; + + qdf_mem_set(z, 16, 0); /* Z_0 = 0^128 */ + qdf_mem_copy(v, y, 16); /* V_0 = Y */ + + for (i = 0; i < 16; i++) { + for (j = 0; j < 8; j++) { + if (x[i] & BIT(7 - j)) { + /* Z_(i + 1) = Z_i XOR V_i */ + xor_block(z, v); + } else { + /* Z_(i + 1) = Z_i */ + } + + if (v[15] & 0x01) { + /* V_(i + 1) = (V_i >> 1) XOR R */ + shift_right_block(v); + /* R = 11100001 || 0^120 */ + v[0] ^= 0xe1; + } else { + /* V_(i + 1) = V_i >> 1 */ + shift_right_block(v); + } + } + } +} + + +static void ghash_start(uint8_t *y) +{ + /* Y_0 = 0^128 */ + qdf_mem_set(y, 16, 0); +} + + +static void ghash(const uint8_t *h, const uint8_t *x, size_t xlen, uint8_t *y) +{ + size_t m, i; + const uint8_t *xpos = x; + uint8_t tmp[16]; + + m = xlen / 16; + + for (i = 0; i < m; i++) { + /* Y_i = (Y^(i-1) XOR X_i) dot H */ + xor_block(y, xpos); + xpos += 16; + + /* dot operation: + * multiplication operation for binary Galois (finite) field of + * 2^128 elements */ + gf_mult(y, h, tmp); + qdf_mem_copy(y, tmp, 16); + } + + if (x + xlen > xpos) { + /* Add zero padded last block */ + size_t last = x + xlen - xpos; + qdf_mem_copy(tmp, xpos, last); + qdf_mem_set(tmp + last, sizeof(tmp) - last, 0); + + /* Y_i = (Y^(i-1) XOR X_i) dot H */ + xor_block(y, tmp); + + /* dot operation: + * multiplication operation for binary Galois (finite) field of + * 2^128 elements */ + gf_mult(y, h, tmp); + qdf_mem_copy(y, tmp, 16); + } + + /* Return Y_m */ +} + + +static void aes_gctr(void *aes, const uint8_t *icb, const uint8_t *x, + size_t xlen, uint8_t *y){ + size_t i, n, last; + uint8_t cb[AES_BLOCK_SIZE], tmp[AES_BLOCK_SIZE]; + const uint8_t *xpos = x; + uint8_t *ypos = y; + + if (xlen == 0) + return; + + n = xlen / 16; + + qdf_mem_copy(cb, icb, AES_BLOCK_SIZE); + /* Full blocks */ + for (i = 0; i < n; i++) { + wlan_crypto_aes_encrypt(aes, cb, ypos); + xor_block(ypos, xpos); + xpos += AES_BLOCK_SIZE; + ypos += AES_BLOCK_SIZE; + inc32(cb); + } + + last = x + xlen - xpos; + if (last) { + /* Last, partial block */ + wlan_crypto_aes_encrypt(aes, cb, tmp); + for (i = 0; i < last; i++) + *ypos++ = *xpos++ ^ tmp[i]; + } +} + + +static void *aes_gcm_init_hash_subkey(const uint8_t *key, size_t key_len, + uint8_t *H){ + void *aes; + + aes = wlan_crypto_aes_encrypt_init(key, key_len); + if (aes == NULL) + return NULL; + + /* Generate hash subkey H = AES_K(0^128) */ + qdf_mem_set(H, AES_BLOCK_SIZE, 0); + wlan_crypto_aes_encrypt(aes, H, H); + wpa_hexdump_key(MSG_EXCESSIVE, "Hash subkey H for GHASH", + H, AES_BLOCK_SIZE); + return aes; +} + + +static void aes_gcm_prepare_j0(const uint8_t *iv, size_t iv_len, + const uint8_t *H, uint8_t *J0){ + uint8_t len_buf[16]; + + if (iv_len == 12) { + /* Prepare block J_0 = IV || 0^31 || 1 [len(IV) = 96] */ + qdf_mem_copy(J0, iv, iv_len); + qdf_mem_set(J0 + iv_len, AES_BLOCK_SIZE - iv_len, 0); + J0[AES_BLOCK_SIZE - 1] = 0x01; + } else { + /* + * s = 128 * ceil(len(IV)/128) - len(IV) + * J_0 = GHASH_H(IV || 0^(s+64) || [len(IV)]_64) + */ + ghash_start(J0); + ghash(H, iv, iv_len, J0); + wlan_crypto_put_be64(len_buf, 0); + wlan_crypto_put_be64(len_buf + 8, iv_len * 8); + ghash(H, len_buf, sizeof(len_buf), J0); + } +} + + +static void aes_gcm_gctr(void *aes, const uint8_t *J0, const uint8_t *in, + size_t len, uint8_t *out){ + uint8_t J0inc[AES_BLOCK_SIZE]; + + if (len == 0) + return; + + qdf_mem_copy(J0inc, J0, AES_BLOCK_SIZE); + inc32(J0inc); + aes_gctr(aes, J0inc, in, len, out); +} + + +static void aes_gcm_ghash(const uint8_t *H, const uint8_t *aad, size_t aad_len, + const uint8_t *crypt, size_t crypt_len, uint8_t *S) +{ + uint8_t len_buf[16]; + + /* + * u = 128 * ceil[len(C)/128] - len(C) + * v = 128 * ceil[len(A)/128] - len(A) + * S = GHASH_H(A || 0^v || C || 0^u || [len(A)]64 || [len(C)]64) + * (i.e., zero padded to block size A || C and lengths of each in bits) + */ + ghash_start(S); + ghash(H, aad, aad_len, S); + ghash(H, crypt, crypt_len, S); + wlan_crypto_put_be64(len_buf, aad_len * 8); + wlan_crypto_put_be64(len_buf + 8, crypt_len * 8); + ghash(H, len_buf, sizeof(len_buf), S); + + wpa_hexdump_key(MSG_EXCESSIVE, "S = GHASH_H(...)", S, 16); +} + + +/** + * aes_gcm_ae - GCM-AE_K(IV, P, A) + */ +int wlan_crypto_aes_gcm_ae(const uint8_t *key, size_t key_len, + const uint8_t *iv, size_t iv_len, const uint8_t *plain, + size_t plain_len, const uint8_t *aad, size_t aad_len, + uint8_t *crypt, uint8_t *tag){ + uint8_t H[AES_BLOCK_SIZE]; + uint8_t J0[AES_BLOCK_SIZE]; + uint8_t S[16]; + void *aes; + int32_t status = -1; + + aes = aes_gcm_init_hash_subkey(key, key_len, H); + if (aes == NULL) + return status; + + aes_gcm_prepare_j0(iv, iv_len, H, J0); + + /* C = GCTR_K(inc_32(J_0), P) */ + aes_gcm_gctr(aes, J0, plain, plain_len, crypt); + + aes_gcm_ghash(H, aad, aad_len, crypt, plain_len, S); + + /* T = MSB_t(GCTR_K(J_0, S)) */ + aes_gctr(aes, J0, S, sizeof(S), tag); + + /* Return (C, T) */ + + wlan_crypto_aes_encrypt_deinit(aes); + + return 0; +} + + +/** + * aes_gcm_ad - GCM-AD_K(IV, C, A, T) + */ +int wlan_crypto_aes_gcm_ad(const uint8_t *key, size_t key_len, + const uint8_t *iv, size_t iv_len, const uint8_t *crypt, + size_t crypt_len, const uint8_t *aad, size_t aad_len, + const uint8_t *tag, uint8_t *plain){ + uint8_t H[AES_BLOCK_SIZE]; + uint8_t J0[AES_BLOCK_SIZE]; + uint8_t S[16], T[16]; + void *aes; + int32_t status = -1; + + aes = aes_gcm_init_hash_subkey(key, key_len, H); + if (aes == NULL) + return status; + + aes_gcm_prepare_j0(iv, iv_len, H, J0); + + /* P = GCTR_K(inc_32(J_0), C) */ + aes_gcm_gctr(aes, J0, crypt, crypt_len, plain); + + aes_gcm_ghash(H, aad, aad_len, crypt, crypt_len, S); + + /* T' = MSB_t(GCTR_K(J_0, S)) */ + aes_gctr(aes, J0, S, sizeof(S), T); + + wlan_crypto_aes_encrypt_deinit(aes); + + if (qdf_mem_cmp(tag, T, 16) != 0) { + wpa_printf(MSG_EXCESSIVE, "GCM: Tag mismatch"); + return status; + } + + return 0; +} + + +int wlan_crypto_aes_gmac(const uint8_t *key, size_t key_len, + const uint8_t *iv, size_t iv_len, const uint8_t *aad, + size_t aad_len, uint8_t *tag){ + return wlan_crypto_aes_gcm_ae(key, key_len, iv, iv_len, NULL, 0, + aad, aad_len, NULL, tag); +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/src/wlan_crypto_aes_i.h b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/src/wlan_crypto_aes_i.h new file mode 100644 index 0000000000000000000000000000000000000000..de55210a9db2f75ecbc74dfad29292a9ccd1f3f7 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/src/wlan_crypto_aes_i.h @@ -0,0 +1,260 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + */ +/* + * AES (Rijndael) cipher + * Copyright (c) 2003-2012, Jouni Malinen + * + * This software may be distributed under the terms of the BSD license. + * See README for more details. + */ + +#ifndef WLAN_CRYPTO_AES_I_H +#define WLAN_CRYPTO_AES_I_H + +#include +#include +#include +#include +#include +#include +#include +#include "wlan_crypto_global_def.h" + + +#define CCMP_IV_SIZE (WLAN_CRYPTO_IV_LEN + WLAN_CRYPTO_KEYID_LEN \ + + WLAN_CRYPTO_EXT_IV_LEN) + +#define AES_BLOCK_SIZE 16 +#define wpa_printf(args...) do { } while (0) +#define wpa_hexdump(l, t, b, le) do { } while (0) +#define wpa_hexdump_buf(l, t, b) do { } while (0) +#define wpa_hexdump_key(l, t, b, le) do { } while (0) +#define wpa_hexdump_buf_key(l, t, b) do { } while (0) +#define wpa_hexdump_ascii(l, t, b, le) do { } while (0) +#define wpa_hexdump_ascii_key(l, t, b, le) do { } while (0) +/* #define FULL_UNROLL */ +#define AES_SMALL_TABLES + +extern const uint32_t Te0[256]; +extern const uint32_t Te1[256]; +extern const uint32_t Te2[256]; +extern const uint32_t Te3[256]; +extern const uint32_t Te4[256]; +extern const uint32_t Td0[256]; +extern const uint32_t Td1[256]; +extern const uint32_t Td2[256]; +extern const uint32_t Td3[256]; +extern const uint32_t Td4[256]; +extern const uint32_t rcon[10]; +extern const uint8_t Td4s[256]; +extern const uint8_t rcons[10]; + +#ifndef AES_SMALL_TABLES + +#define RCON(i) rcon[(i)] +static inline uint32_t rotr(uint32_t val, int bits) +{ + return (val >> bits) | (val << (32 - bits)); +} + +#define TE0(i) Te0[((i) >> 24) & 0xff] +#define TE1(i) Te1[((i) >> 16) & 0xff] +#define TE2(i) Te2[((i) >> 8) & 0xff] +#define TE3(i) Te3[(i) & 0xff] +#define TE41(i) (Te4[((i) >> 24) & 0xff] & 0xff000000) +#define TE42(i) (Te4[((i) >> 16) & 0xff] & 0x00ff0000) +#define TE43(i) (Te4[((i) >> 8) & 0xff] & 0x0000ff00) +#define TE44(i) (Te4[(i) & 0xff] & 0x000000ff) +#define TE421(i) (Te4[((i) >> 16) & 0xff] & 0xff000000) +#define TE432(i) (Te4[((i) >> 8) & 0xff] & 0x00ff0000) +#define TE443(i) (Te4[(i) & 0xff] & 0x0000ff00) +#define TE414(i) (Te4[((i) >> 24) & 0xff] & 0x000000ff) +#define TE411(i) (Te4[((i) >> 24) & 0xff] & 0xff000000) +#define TE422(i) (Te4[((i) >> 16) & 0xff] & 0x00ff0000) +#define TE433(i) (Te4[((i) >> 8) & 0xff] & 0x0000ff00) +#define TE444(i) (Te4[(i) & 0xff] & 0x000000ff) +#define TE4(i) (Te4[(i)] & 0x000000ff) + +#define TD0(i) Td0[((i) >> 24) & 0xff] +#define TD1(i) Td1[((i) >> 16) & 0xff] +#define TD2(i) Td2[((i) >> 8) & 0xff] +#define TD3(i) Td3[(i) & 0xff] +#define TD41(i) (Td4[((i) >> 24) & 0xff] & 0xff000000) +#define TD42(i) (Td4[((i) >> 16) & 0xff] & 0x00ff0000) +#define TD43(i) (Td4[((i) >> 8) & 0xff] & 0x0000ff00) +#define TD44(i) (Td4[(i) & 0xff] & 0x000000ff) +#define TD0_(i) Td0[(i) & 0xff] +#define TD1_(i) Td1[(i) & 0xff] +#define TD2_(i) Td2[(i) & 0xff] +#define TD3_(i) Td3[(i) & 0xff] + +#else /* AES_SMALL_TABLES */ + +#define RCON(i) (rcons[(i)] << 24) + +static inline uint32_t rotr(uint32_t val, int bits) +{ + return (val >> bits) | (val << (32 - bits)); +} + +#define TE0(i) Te0[((i) >> 24) & 0xff] +#define TE1(i) rotr(Te0[((i) >> 16) & 0xff], 8) +#define TE2(i) rotr(Te0[((i) >> 8) & 0xff], 16) +#define TE3(i) rotr(Te0[(i) & 0xff], 24) +#define TE41(i) ((Te0[((i) >> 24) & 0xff] << 8) & 0xff000000) +#define TE42(i) (Te0[((i) >> 16) & 0xff] & 0x00ff0000) +#define TE43(i) (Te0[((i) >> 8) & 0xff] & 0x0000ff00) +#define TE44(i) ((Te0[(i) & 0xff] >> 8) & 0x000000ff) +#define TE421(i) ((Te0[((i) >> 16) & 0xff] << 8) & 0xff000000) +#define TE432(i) (Te0[((i) >> 8) & 0xff] & 0x00ff0000) +#define TE443(i) (Te0[(i) & 0xff] & 0x0000ff00) +#define TE414(i) ((Te0[((i) >> 24) & 0xff] >> 8) & 0x000000ff) +#define TE411(i) ((Te0[((i) >> 24) & 0xff] << 8) & 0xff000000) +#define TE422(i) (Te0[((i) >> 16) & 0xff] & 0x00ff0000) +#define TE433(i) (Te0[((i) >> 8) & 0xff] & 0x0000ff00) +#define TE444(i) ((Te0[(i) & 0xff] >> 8) & 0x000000ff) +#define TE4(i) ((Te0[(i)] >> 8) & 0x000000ff) + +#define TD0(i) Td0[((i) >> 24) & 0xff] +#define TD1(i) rotr(Td0[((i) >> 16) & 0xff], 8) +#define TD2(i) rotr(Td0[((i) >> 8) & 0xff], 16) +#define TD3(i) rotr(Td0[(i) & 0xff], 24) +#define TD41(i) (Td4s[((i) >> 24) & 0xff] << 24) +#define TD42(i) (Td4s[((i) >> 16) & 0xff] << 16) +#define TD43(i) (Td4s[((i) >> 8) & 0xff] << 8) +#define TD44(i) (Td4s[(i) & 0xff]) +#define TD0_(i) Td0[(i) & 0xff] +#define TD1_(i) rotr(Td0[(i) & 0xff], 8) +#define TD2_(i) rotr(Td0[(i) & 0xff], 16) +#define TD3_(i) rotr(Td0[(i) & 0xff], 24) + +#endif /* AES_SMALL_TABLES */ + +#ifdef _MSC_VER + +#define SWAP(x) (_lrotl(x, 8) & 0x00ff00ff | _lrotr(x, 8) & 0xff00ff00) +#define GETU32(p) SWAP(*((uint32_t *)(p))) +#define PUTU32(ct, st) { *((uint32_t *)(ct)) = SWAP((st)); } +#else +#define SWAP(x) (_lrotl(x, 8) & 0x00ff00ff | _lrotr(x, 8) & 0xff00ff00) + +#define GETU32(pt) (((u32)(pt)[0] << 24) ^ ((u32)(pt)[1] << 16) ^\ + ((u32)(pt)[2] << 8) ^ ((u32)(pt)[3])) +#define PUTU32(ct, st) {\ + (ct)[0] = (u8)((st) >> 24);\ + (ct)[1] = (u8)((st) >> 16);\ + (ct)[2] = (u8)((st) >> 8);\ + (ct)[3] = (u8)(st); } +#endif + +#define AES_PRIV_SIZE (4 * 4 * 15 + 4) +#define AES_PRIV_NR_POS (4 * 15) + +#define WLAN_ALEN (6) + +struct ieee80211_hdr { + uint8_t frame_control[2]; + uint8_t duration_id[2]; + uint8_t addr1[WLAN_ALEN]; + uint8_t addr2[WLAN_ALEN]; + uint8_t addr3[WLAN_ALEN]; + uint8_t seq_ctrl[2]; +} __packed; + +struct ieee80211_hdr_addr4 { + uint8_t frame_control[2]; + uint8_t duration_id[2]; + uint8_t addr1[WLAN_ALEN]; + uint8_t addr2[WLAN_ALEN]; + uint8_t addr3[WLAN_ALEN]; + uint8_t seq_ctrl[2]; + uint8_t addr4[WLAN_ALEN]; +} __packed; + +struct ieee80211_hdr_qos { + uint8_t frame_control[2]; + uint8_t duration_id[2]; + uint8_t addr1[WLAN_ALEN]; + uint8_t addr2[WLAN_ALEN]; + uint8_t addr3[WLAN_ALEN]; + uint8_t seq_ctrl[2]; + uint8_t qos[2]; +} __packed; + +struct ieee80211_hdr_qos_addr4 { + uint8_t frame_control[2]; + uint8_t duration_id[2]; + uint8_t addr1[WLAN_ALEN]; + uint8_t addr2[WLAN_ALEN]; + uint8_t addr3[WLAN_ALEN]; + uint8_t seq_ctrl[2]; + uint8_t addr4[WLAN_ALEN]; + uint8_t qos[2]; +} __packed; + +int wlan_crypto_rijndaelKeySetupEnc(uint32_t rk[], const uint8_t cipherKey[], + int keyBits); + +uint8_t *wlan_crypto_ccmp_encrypt(const uint8_t *key, uint8_t *frame, + size_t len, size_t hdrlen); + +uint8_t *wlan_crypto_ccmp_decrypt(const uint8_t *key, + const struct ieee80211_hdr *hdr, + uint8_t *data, size_t data_len); + +uint8_t *wlan_crypto_tkip_encrypt(const uint8_t *key, uint8_t *frame, + size_t len, size_t hdrlen); + +uint8_t *wlan_crypto_tkip_decrypt(const uint8_t *key, + const struct ieee80211_hdr *hdr, + uint8_t *data, size_t data_len); + +uint8_t *wlan_crypto_wep_encrypt(const uint8_t *key, uint16_t key_len, + uint8_t *data, size_t data_len); +uint8_t *wlan_crypto_wep_decrypt(const uint8_t *key, uint16_t key_len, + uint8_t *data, size_t data_len); + +void wlan_crypto_wep_crypt(uint8_t *key, uint8_t *buf, size_t plen); + +uint32_t wlan_crypto_crc32(const uint8_t *frame, size_t frame_len); + +int wlan_crypto_aes_gcm_ae(const uint8_t *key, size_t key_len, + const uint8_t *iv, size_t iv_len, + const uint8_t *plain, size_t plain_len, + const uint8_t *aad, size_t aad_len, + uint8_t *crypt, uint8_t *tag); + +int wlan_crypto_aes_gcm_ad(const uint8_t *key, size_t key_len, + const uint8_t *iv, size_t iv_len, + const uint8_t *crypt, size_t crypt_len, + const uint8_t *aad, size_t aad_len, + const uint8_t *tag, + uint8_t *plain); + +int wlan_crypto_aes_gmac(const uint8_t *key, size_t key_len, + const uint8_t *iv, size_t iv_len, + const uint8_t *aad, size_t aad_len, uint8_t *tag); +int wlan_crypto_aes_ccm_ae(const uint8_t *key, size_t key_len, + const uint8_t *nonce, size_t M, const uint8_t *plain, + size_t plain_len, const uint8_t *aad, size_t aad_len, + uint8_t *crypt, uint8_t *auth); + +int wlan_crypto_aes_ccm_ad(const uint8_t *key, size_t key_len, + const uint8_t *nonce, size_t M, const uint8_t *crypt, + size_t crypt_len, const uint8_t *aad, size_t aad_len, + const uint8_t *auth, uint8_t *plain); + +void *wlan_crypto_aes_encrypt_init(const uint8_t *key, size_t len); +void wlan_crypto_aes_encrypt(void *ctx, const uint8_t *plain, uint8_t *crypt); +void wlan_crypto_aes_encrypt_deinit(void *ctx); +void *wlan_crypto_aes_decrypt_init(const uint8_t *key, size_t len); +void wlan_crypto_aes_decrypt(void *ctx, const uint8_t *crypt, uint8_t *plain); +void wlan_crypto_aes_decrypt_deinit(void *ctx); +int omac1_aes_128(const uint8_t *key, const uint8_t *data, + size_t data_len, uint8_t *mac); +int omac1_aes_256(const uint8_t *key, const uint8_t *data, + size_t data_len, uint8_t *mac); +int omac1_aes_vector(const uint8_t *key, size_t key_len, size_t num_elem, + const uint8_t *addr[], const size_t *len, uint8_t *mac); +#endif /* WLAN_CRYPTO_AES_I_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/src/wlan_crypto_aes_internal.c b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/src/wlan_crypto_aes_internal.c new file mode 100644 index 0000000000000000000000000000000000000000..d47477b8a5737cbbcd5d589a647a70f924f2ec75 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/src/wlan_crypto_aes_internal.c @@ -0,0 +1,853 @@ +/* + * Copyright (c) 2017 The Linux Foundation. All rights reserved. + */ +/* + * AES (Rijndael) cipher + * + * Modifications to public domain implementation: + * - cleanup + * - use C pre-processor to make it easier to change S table access + * - added option (AES_SMALL_TABLES) for reducing code size by about 8 kB at + * cost of reduced throughput (quite small difference on Pentium 4, + * 10-25% when using -O1 or -O2 optimization) + * + * Copyright (c) 2003-2012, Jouni Malinen + * + * This software may be distributed under the terms of the BSD license. + * See README for more details. + */ + +#include +#include +#include +#include "wlan_crypto_aes_i.h" +#include "wlan_crypto_def_i.h" + +/* + * rijndael-alg-fst.c + * + * @version 3.0 (December 2000) + * + * Optimised ANSI C code for the Rijndael cipher (now AES) + * + * @author Vincent Rijmen + * @author Antoon Bosselaers + * @author Paulo Barreto + * + * This code is hereby placed in the public domain. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ''AS IS'' AND ANY EXPRESS + * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, + * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + + +/* +Te0[x] = S [x].[02, 01, 01, 03]; +Te1[x] = S [x].[03, 02, 01, 01]; +Te2[x] = S [x].[01, 03, 02, 01]; +Te3[x] = S [x].[01, 01, 03, 02]; +Te4[x] = S [x].[01, 01, 01, 01]; + +Td0[x] = Si[x].[0e, 09, 0d, 0b]; +Td1[x] = Si[x].[0b, 0e, 09, 0d]; +Td2[x] = Si[x].[0d, 0b, 0e, 09]; +Td3[x] = Si[x].[09, 0d, 0b, 0e]; +Td4[x] = Si[x].[01, 01, 01, 01]; +*/ + +const uint32_t Te0[256] = { + 0xc66363a5U, 0xf87c7c84U, 0xee777799U, 0xf67b7b8dU, + 0xfff2f20dU, 0xd66b6bbdU, 0xde6f6fb1U, 0x91c5c554U, + 0x60303050U, 0x02010103U, 0xce6767a9U, 0x562b2b7dU, + 0xe7fefe19U, 0xb5d7d762U, 0x4dababe6U, 0xec76769aU, + 0x8fcaca45U, 0x1f82829dU, 0x89c9c940U, 0xfa7d7d87U, + 0xeffafa15U, 0xb25959ebU, 0x8e4747c9U, 0xfbf0f00bU, + 0x41adadecU, 0xb3d4d467U, 0x5fa2a2fdU, 0x45afafeaU, + 0x239c9cbfU, 0x53a4a4f7U, 0xe4727296U, 0x9bc0c05bU, + 0x75b7b7c2U, 0xe1fdfd1cU, 0x3d9393aeU, 0x4c26266aU, + 0x6c36365aU, 0x7e3f3f41U, 0xf5f7f702U, 0x83cccc4fU, + 0x6834345cU, 0x51a5a5f4U, 0xd1e5e534U, 0xf9f1f108U, + 0xe2717193U, 0xabd8d873U, 0x62313153U, 0x2a15153fU, + 0x0804040cU, 0x95c7c752U, 0x46232365U, 0x9dc3c35eU, + 0x30181828U, 0x379696a1U, 0x0a05050fU, 0x2f9a9ab5U, + 0x0e070709U, 0x24121236U, 0x1b80809bU, 0xdfe2e23dU, + 0xcdebeb26U, 0x4e272769U, 0x7fb2b2cdU, 0xea75759fU, + 0x1209091bU, 0x1d83839eU, 0x582c2c74U, 0x341a1a2eU, + 0x361b1b2dU, 0xdc6e6eb2U, 0xb45a5aeeU, 0x5ba0a0fbU, + 0xa45252f6U, 0x763b3b4dU, 0xb7d6d661U, 0x7db3b3ceU, + 0x5229297bU, 0xdde3e33eU, 0x5e2f2f71U, 0x13848497U, + 0xa65353f5U, 0xb9d1d168U, 0x00000000U, 0xc1eded2cU, + 0x40202060U, 0xe3fcfc1fU, 0x79b1b1c8U, 0xb65b5bedU, + 0xd46a6abeU, 0x8dcbcb46U, 0x67bebed9U, 0x7239394bU, + 0x944a4adeU, 0x984c4cd4U, 0xb05858e8U, 0x85cfcf4aU, + 0xbbd0d06bU, 0xc5efef2aU, 0x4faaaae5U, 0xedfbfb16U, + 0x864343c5U, 0x9a4d4dd7U, 0x66333355U, 0x11858594U, + 0x8a4545cfU, 0xe9f9f910U, 0x04020206U, 0xfe7f7f81U, + 0xa05050f0U, 0x783c3c44U, 0x259f9fbaU, 0x4ba8a8e3U, + 0xa25151f3U, 0x5da3a3feU, 0x804040c0U, 0x058f8f8aU, + 0x3f9292adU, 0x219d9dbcU, 0x70383848U, 0xf1f5f504U, + 0x63bcbcdfU, 0x77b6b6c1U, 0xafdada75U, 0x42212163U, + 0x20101030U, 0xe5ffff1aU, 0xfdf3f30eU, 0xbfd2d26dU, + 0x81cdcd4cU, 0x180c0c14U, 0x26131335U, 0xc3ecec2fU, + 0xbe5f5fe1U, 0x359797a2U, 0x884444ccU, 0x2e171739U, + 0x93c4c457U, 0x55a7a7f2U, 0xfc7e7e82U, 0x7a3d3d47U, + 0xc86464acU, 0xba5d5de7U, 0x3219192bU, 0xe6737395U, + 0xc06060a0U, 0x19818198U, 0x9e4f4fd1U, 0xa3dcdc7fU, + 0x44222266U, 0x542a2a7eU, 0x3b9090abU, 0x0b888883U, + 0x8c4646caU, 0xc7eeee29U, 0x6bb8b8d3U, 0x2814143cU, + 0xa7dede79U, 0xbc5e5ee2U, 0x160b0b1dU, 0xaddbdb76U, + 0xdbe0e03bU, 0x64323256U, 0x743a3a4eU, 0x140a0a1eU, + 0x924949dbU, 0x0c06060aU, 0x4824246cU, 0xb85c5ce4U, + 0x9fc2c25dU, 0xbdd3d36eU, 0x43acacefU, 0xc46262a6U, + 0x399191a8U, 0x319595a4U, 0xd3e4e437U, 0xf279798bU, + 0xd5e7e732U, 0x8bc8c843U, 0x6e373759U, 0xda6d6db7U, + 0x018d8d8cU, 0xb1d5d564U, 0x9c4e4ed2U, 0x49a9a9e0U, + 0xd86c6cb4U, 0xac5656faU, 0xf3f4f407U, 0xcfeaea25U, + 0xca6565afU, 0xf47a7a8eU, 0x47aeaee9U, 0x10080818U, + 0x6fbabad5U, 0xf0787888U, 0x4a25256fU, 0x5c2e2e72U, + 0x381c1c24U, 0x57a6a6f1U, 0x73b4b4c7U, 0x97c6c651U, + 0xcbe8e823U, 0xa1dddd7cU, 0xe874749cU, 0x3e1f1f21U, + 0x964b4bddU, 0x61bdbddcU, 0x0d8b8b86U, 0x0f8a8a85U, + 0xe0707090U, 0x7c3e3e42U, 0x71b5b5c4U, 0xcc6666aaU, + 0x904848d8U, 0x06030305U, 0xf7f6f601U, 0x1c0e0e12U, + 0xc26161a3U, 0x6a35355fU, 0xae5757f9U, 0x69b9b9d0U, + 0x17868691U, 0x99c1c158U, 0x3a1d1d27U, 0x279e9eb9U, + 0xd9e1e138U, 0xebf8f813U, 0x2b9898b3U, 0x22111133U, + 0xd26969bbU, 0xa9d9d970U, 0x078e8e89U, 0x339494a7U, + 0x2d9b9bb6U, 0x3c1e1e22U, 0x15878792U, 0xc9e9e920U, + 0x87cece49U, 0xaa5555ffU, 0x50282878U, 0xa5dfdf7aU, + 0x038c8c8fU, 0x59a1a1f8U, 0x09898980U, 0x1a0d0d17U, + 0x65bfbfdaU, 0xd7e6e631U, 0x844242c6U, 0xd06868b8U, + 0x824141c3U, 0x299999b0U, 0x5a2d2d77U, 0x1e0f0f11U, + 0x7bb0b0cbU, 0xa85454fcU, 0x6dbbbbd6U, 0x2c16163aU, +}; +#ifndef AES_SMALL_TABLES +const uint32_t Te1[256] = { + 0xa5c66363U, 0x84f87c7cU, 0x99ee7777U, 0x8df67b7bU, + 0x0dfff2f2U, 0xbdd66b6bU, 0xb1de6f6fU, 0x5491c5c5U, + 0x50603030U, 0x03020101U, 0xa9ce6767U, 0x7d562b2bU, + 0x19e7fefeU, 0x62b5d7d7U, 0xe64dababU, 0x9aec7676U, + 0x458fcacaU, 0x9d1f8282U, 0x4089c9c9U, 0x87fa7d7dU, + 0x15effafaU, 0xebb25959U, 0xc98e4747U, 0x0bfbf0f0U, + 0xec41adadU, 0x67b3d4d4U, 0xfd5fa2a2U, 0xea45afafU, + 0xbf239c9cU, 0xf753a4a4U, 0x96e47272U, 0x5b9bc0c0U, + 0xc275b7b7U, 0x1ce1fdfdU, 0xae3d9393U, 0x6a4c2626U, + 0x5a6c3636U, 0x417e3f3fU, 0x02f5f7f7U, 0x4f83ccccU, + 0x5c683434U, 0xf451a5a5U, 0x34d1e5e5U, 0x08f9f1f1U, + 0x93e27171U, 0x73abd8d8U, 0x53623131U, 0x3f2a1515U, + 0x0c080404U, 0x5295c7c7U, 0x65462323U, 0x5e9dc3c3U, + 0x28301818U, 0xa1379696U, 0x0f0a0505U, 0xb52f9a9aU, + 0x090e0707U, 0x36241212U, 0x9b1b8080U, 0x3ddfe2e2U, + 0x26cdebebU, 0x694e2727U, 0xcd7fb2b2U, 0x9fea7575U, + 0x1b120909U, 0x9e1d8383U, 0x74582c2cU, 0x2e341a1aU, + 0x2d361b1bU, 0xb2dc6e6eU, 0xeeb45a5aU, 0xfb5ba0a0U, + 0xf6a45252U, 0x4d763b3bU, 0x61b7d6d6U, 0xce7db3b3U, + 0x7b522929U, 0x3edde3e3U, 0x715e2f2fU, 0x97138484U, + 0xf5a65353U, 0x68b9d1d1U, 0x00000000U, 0x2cc1ededU, + 0x60402020U, 0x1fe3fcfcU, 0xc879b1b1U, 0xedb65b5bU, + 0xbed46a6aU, 0x468dcbcbU, 0xd967bebeU, 0x4b723939U, + 0xde944a4aU, 0xd4984c4cU, 0xe8b05858U, 0x4a85cfcfU, + 0x6bbbd0d0U, 0x2ac5efefU, 0xe54faaaaU, 0x16edfbfbU, + 0xc5864343U, 0xd79a4d4dU, 0x55663333U, 0x94118585U, + 0xcf8a4545U, 0x10e9f9f9U, 0x06040202U, 0x81fe7f7fU, + 0xf0a05050U, 0x44783c3cU, 0xba259f9fU, 0xe34ba8a8U, + 0xf3a25151U, 0xfe5da3a3U, 0xc0804040U, 0x8a058f8fU, + 0xad3f9292U, 0xbc219d9dU, 0x48703838U, 0x04f1f5f5U, + 0xdf63bcbcU, 0xc177b6b6U, 0x75afdadaU, 0x63422121U, + 0x30201010U, 0x1ae5ffffU, 0x0efdf3f3U, 0x6dbfd2d2U, + 0x4c81cdcdU, 0x14180c0cU, 0x35261313U, 0x2fc3ececU, + 0xe1be5f5fU, 0xa2359797U, 0xcc884444U, 0x392e1717U, + 0x5793c4c4U, 0xf255a7a7U, 0x82fc7e7eU, 0x477a3d3dU, + 0xacc86464U, 0xe7ba5d5dU, 0x2b321919U, 0x95e67373U, + 0xa0c06060U, 0x98198181U, 0xd19e4f4fU, 0x7fa3dcdcU, + 0x66442222U, 0x7e542a2aU, 0xab3b9090U, 0x830b8888U, + 0xca8c4646U, 0x29c7eeeeU, 0xd36bb8b8U, 0x3c281414U, + 0x79a7dedeU, 0xe2bc5e5eU, 0x1d160b0bU, 0x76addbdbU, + 0x3bdbe0e0U, 0x56643232U, 0x4e743a3aU, 0x1e140a0aU, + 0xdb924949U, 0x0a0c0606U, 0x6c482424U, 0xe4b85c5cU, + 0x5d9fc2c2U, 0x6ebdd3d3U, 0xef43acacU, 0xa6c46262U, + 0xa8399191U, 0xa4319595U, 0x37d3e4e4U, 0x8bf27979U, + 0x32d5e7e7U, 0x438bc8c8U, 0x596e3737U, 0xb7da6d6dU, + 0x8c018d8dU, 0x64b1d5d5U, 0xd29c4e4eU, 0xe049a9a9U, + 0xb4d86c6cU, 0xfaac5656U, 0x07f3f4f4U, 0x25cfeaeaU, + 0xafca6565U, 0x8ef47a7aU, 0xe947aeaeU, 0x18100808U, + 0xd56fbabaU, 0x88f07878U, 0x6f4a2525U, 0x725c2e2eU, + 0x24381c1cU, 0xf157a6a6U, 0xc773b4b4U, 0x5197c6c6U, + 0x23cbe8e8U, 0x7ca1ddddU, 0x9ce87474U, 0x213e1f1fU, + 0xdd964b4bU, 0xdc61bdbdU, 0x860d8b8bU, 0x850f8a8aU, + 0x90e07070U, 0x427c3e3eU, 0xc471b5b5U, 0xaacc6666U, + 0xd8904848U, 0x05060303U, 0x01f7f6f6U, 0x121c0e0eU, + 0xa3c26161U, 0x5f6a3535U, 0xf9ae5757U, 0xd069b9b9U, + 0x91178686U, 0x5899c1c1U, 0x273a1d1dU, 0xb9279e9eU, + 0x38d9e1e1U, 0x13ebf8f8U, 0xb32b9898U, 0x33221111U, + 0xbbd26969U, 0x70a9d9d9U, 0x89078e8eU, 0xa7339494U, + 0xb62d9b9bU, 0x223c1e1eU, 0x92158787U, 0x20c9e9e9U, + 0x4987ceceU, 0xffaa5555U, 0x78502828U, 0x7aa5dfdfU, + 0x8f038c8cU, 0xf859a1a1U, 0x80098989U, 0x171a0d0dU, + 0xda65bfbfU, 0x31d7e6e6U, 0xc6844242U, 0xb8d06868U, + 0xc3824141U, 0xb0299999U, 0x775a2d2dU, 0x111e0f0fU, + 0xcb7bb0b0U, 0xfca85454U, 0xd66dbbbbU, 0x3a2c1616U, +}; +const uint32_t Te2[256] = { + 0x63a5c663U, 0x7c84f87cU, 0x7799ee77U, 0x7b8df67bU, + 0xf20dfff2U, 0x6bbdd66bU, 0x6fb1de6fU, 0xc55491c5U, + 0x30506030U, 0x01030201U, 0x67a9ce67U, 0x2b7d562bU, + 0xfe19e7feU, 0xd762b5d7U, 0xabe64dabU, 0x769aec76U, + 0xca458fcaU, 0x829d1f82U, 0xc94089c9U, 0x7d87fa7dU, + 0xfa15effaU, 0x59ebb259U, 0x47c98e47U, 0xf00bfbf0U, + 0xadec41adU, 0xd467b3d4U, 0xa2fd5fa2U, 0xafea45afU, + 0x9cbf239cU, 0xa4f753a4U, 0x7296e472U, 0xc05b9bc0U, + 0xb7c275b7U, 0xfd1ce1fdU, 0x93ae3d93U, 0x266a4c26U, + 0x365a6c36U, 0x3f417e3fU, 0xf702f5f7U, 0xcc4f83ccU, + 0x345c6834U, 0xa5f451a5U, 0xe534d1e5U, 0xf108f9f1U, + 0x7193e271U, 0xd873abd8U, 0x31536231U, 0x153f2a15U, + 0x040c0804U, 0xc75295c7U, 0x23654623U, 0xc35e9dc3U, + 0x18283018U, 0x96a13796U, 0x050f0a05U, 0x9ab52f9aU, + 0x07090e07U, 0x12362412U, 0x809b1b80U, 0xe23ddfe2U, + 0xeb26cdebU, 0x27694e27U, 0xb2cd7fb2U, 0x759fea75U, + 0x091b1209U, 0x839e1d83U, 0x2c74582cU, 0x1a2e341aU, + 0x1b2d361bU, 0x6eb2dc6eU, 0x5aeeb45aU, 0xa0fb5ba0U, + 0x52f6a452U, 0x3b4d763bU, 0xd661b7d6U, 0xb3ce7db3U, + 0x297b5229U, 0xe33edde3U, 0x2f715e2fU, 0x84971384U, + 0x53f5a653U, 0xd168b9d1U, 0x00000000U, 0xed2cc1edU, + 0x20604020U, 0xfc1fe3fcU, 0xb1c879b1U, 0x5bedb65bU, + 0x6abed46aU, 0xcb468dcbU, 0xbed967beU, 0x394b7239U, + 0x4ade944aU, 0x4cd4984cU, 0x58e8b058U, 0xcf4a85cfU, + 0xd06bbbd0U, 0xef2ac5efU, 0xaae54faaU, 0xfb16edfbU, + 0x43c58643U, 0x4dd79a4dU, 0x33556633U, 0x85941185U, + 0x45cf8a45U, 0xf910e9f9U, 0x02060402U, 0x7f81fe7fU, + 0x50f0a050U, 0x3c44783cU, 0x9fba259fU, 0xa8e34ba8U, + 0x51f3a251U, 0xa3fe5da3U, 0x40c08040U, 0x8f8a058fU, + 0x92ad3f92U, 0x9dbc219dU, 0x38487038U, 0xf504f1f5U, + 0xbcdf63bcU, 0xb6c177b6U, 0xda75afdaU, 0x21634221U, + 0x10302010U, 0xff1ae5ffU, 0xf30efdf3U, 0xd26dbfd2U, + 0xcd4c81cdU, 0x0c14180cU, 0x13352613U, 0xec2fc3ecU, + 0x5fe1be5fU, 0x97a23597U, 0x44cc8844U, 0x17392e17U, + 0xc45793c4U, 0xa7f255a7U, 0x7e82fc7eU, 0x3d477a3dU, + 0x64acc864U, 0x5de7ba5dU, 0x192b3219U, 0x7395e673U, + 0x60a0c060U, 0x81981981U, 0x4fd19e4fU, 0xdc7fa3dcU, + 0x22664422U, 0x2a7e542aU, 0x90ab3b90U, 0x88830b88U, + 0x46ca8c46U, 0xee29c7eeU, 0xb8d36bb8U, 0x143c2814U, + 0xde79a7deU, 0x5ee2bc5eU, 0x0b1d160bU, 0xdb76addbU, + 0xe03bdbe0U, 0x32566432U, 0x3a4e743aU, 0x0a1e140aU, + 0x49db9249U, 0x060a0c06U, 0x246c4824U, 0x5ce4b85cU, + 0xc25d9fc2U, 0xd36ebdd3U, 0xacef43acU, 0x62a6c462U, + 0x91a83991U, 0x95a43195U, 0xe437d3e4U, 0x798bf279U, + 0xe732d5e7U, 0xc8438bc8U, 0x37596e37U, 0x6db7da6dU, + 0x8d8c018dU, 0xd564b1d5U, 0x4ed29c4eU, 0xa9e049a9U, + 0x6cb4d86cU, 0x56faac56U, 0xf407f3f4U, 0xea25cfeaU, + 0x65afca65U, 0x7a8ef47aU, 0xaee947aeU, 0x08181008U, + 0xbad56fbaU, 0x7888f078U, 0x256f4a25U, 0x2e725c2eU, + 0x1c24381cU, 0xa6f157a6U, 0xb4c773b4U, 0xc65197c6U, + 0xe823cbe8U, 0xdd7ca1ddU, 0x749ce874U, 0x1f213e1fU, + 0x4bdd964bU, 0xbddc61bdU, 0x8b860d8bU, 0x8a850f8aU, + 0x7090e070U, 0x3e427c3eU, 0xb5c471b5U, 0x66aacc66U, + 0x48d89048U, 0x03050603U, 0xf601f7f6U, 0x0e121c0eU, + 0x61a3c261U, 0x355f6a35U, 0x57f9ae57U, 0xb9d069b9U, + 0x86911786U, 0xc15899c1U, 0x1d273a1dU, 0x9eb9279eU, + 0xe138d9e1U, 0xf813ebf8U, 0x98b32b98U, 0x11332211U, + 0x69bbd269U, 0xd970a9d9U, 0x8e89078eU, 0x94a73394U, + 0x9bb62d9bU, 0x1e223c1eU, 0x87921587U, 0xe920c9e9U, + 0xce4987ceU, 0x55ffaa55U, 0x28785028U, 0xdf7aa5dfU, + 0x8c8f038cU, 0xa1f859a1U, 0x89800989U, 0x0d171a0dU, + 0xbfda65bfU, 0xe631d7e6U, 0x42c68442U, 0x68b8d068U, + 0x41c38241U, 0x99b02999U, 0x2d775a2dU, 0x0f111e0fU, + 0xb0cb7bb0U, 0x54fca854U, 0xbbd66dbbU, 0x163a2c16U, +}; +const uint32_t Te3[256] = { + + 0x6363a5c6U, 0x7c7c84f8U, 0x777799eeU, 0x7b7b8df6U, + 0xf2f20dffU, 0x6b6bbdd6U, 0x6f6fb1deU, 0xc5c55491U, + 0x30305060U, 0x01010302U, 0x6767a9ceU, 0x2b2b7d56U, + 0xfefe19e7U, 0xd7d762b5U, 0xababe64dU, 0x76769aecU, + 0xcaca458fU, 0x82829d1fU, 0xc9c94089U, 0x7d7d87faU, + 0xfafa15efU, 0x5959ebb2U, 0x4747c98eU, 0xf0f00bfbU, + 0xadadec41U, 0xd4d467b3U, 0xa2a2fd5fU, 0xafafea45U, + 0x9c9cbf23U, 0xa4a4f753U, 0x727296e4U, 0xc0c05b9bU, + 0xb7b7c275U, 0xfdfd1ce1U, 0x9393ae3dU, 0x26266a4cU, + 0x36365a6cU, 0x3f3f417eU, 0xf7f702f5U, 0xcccc4f83U, + 0x34345c68U, 0xa5a5f451U, 0xe5e534d1U, 0xf1f108f9U, + 0x717193e2U, 0xd8d873abU, 0x31315362U, 0x15153f2aU, + 0x04040c08U, 0xc7c75295U, 0x23236546U, 0xc3c35e9dU, + 0x18182830U, 0x9696a137U, 0x05050f0aU, 0x9a9ab52fU, + 0x0707090eU, 0x12123624U, 0x80809b1bU, 0xe2e23ddfU, + 0xebeb26cdU, 0x2727694eU, 0xb2b2cd7fU, 0x75759feaU, + 0x09091b12U, 0x83839e1dU, 0x2c2c7458U, 0x1a1a2e34U, + 0x1b1b2d36U, 0x6e6eb2dcU, 0x5a5aeeb4U, 0xa0a0fb5bU, + 0x5252f6a4U, 0x3b3b4d76U, 0xd6d661b7U, 0xb3b3ce7dU, + 0x29297b52U, 0xe3e33eddU, 0x2f2f715eU, 0x84849713U, + 0x5353f5a6U, 0xd1d168b9U, 0x00000000U, 0xeded2cc1U, + 0x20206040U, 0xfcfc1fe3U, 0xb1b1c879U, 0x5b5bedb6U, + 0x6a6abed4U, 0xcbcb468dU, 0xbebed967U, 0x39394b72U, + 0x4a4ade94U, 0x4c4cd498U, 0x5858e8b0U, 0xcfcf4a85U, + 0xd0d06bbbU, 0xefef2ac5U, 0xaaaae54fU, 0xfbfb16edU, + 0x4343c586U, 0x4d4dd79aU, 0x33335566U, 0x85859411U, + 0x4545cf8aU, 0xf9f910e9U, 0x02020604U, 0x7f7f81feU, + 0x5050f0a0U, 0x3c3c4478U, 0x9f9fba25U, 0xa8a8e34bU, + 0x5151f3a2U, 0xa3a3fe5dU, 0x4040c080U, 0x8f8f8a05U, + 0x9292ad3fU, 0x9d9dbc21U, 0x38384870U, 0xf5f504f1U, + 0xbcbcdf63U, 0xb6b6c177U, 0xdada75afU, 0x21216342U, + 0x10103020U, 0xffff1ae5U, 0xf3f30efdU, 0xd2d26dbfU, + 0xcdcd4c81U, 0x0c0c1418U, 0x13133526U, 0xecec2fc3U, + 0x5f5fe1beU, 0x9797a235U, 0x4444cc88U, 0x1717392eU, + 0xc4c45793U, 0xa7a7f255U, 0x7e7e82fcU, 0x3d3d477aU, + 0x6464acc8U, 0x5d5de7baU, 0x19192b32U, 0x737395e6U, + 0x6060a0c0U, 0x81819819U, 0x4f4fd19eU, 0xdcdc7fa3U, + 0x22226644U, 0x2a2a7e54U, 0x9090ab3bU, 0x8888830bU, + 0x4646ca8cU, 0xeeee29c7U, 0xb8b8d36bU, 0x14143c28U, + 0xdede79a7U, 0x5e5ee2bcU, 0x0b0b1d16U, 0xdbdb76adU, + 0xe0e03bdbU, 0x32325664U, 0x3a3a4e74U, 0x0a0a1e14U, + 0x4949db92U, 0x06060a0cU, 0x24246c48U, 0x5c5ce4b8U, + 0xc2c25d9fU, 0xd3d36ebdU, 0xacacef43U, 0x6262a6c4U, + 0x9191a839U, 0x9595a431U, 0xe4e437d3U, 0x79798bf2U, + 0xe7e732d5U, 0xc8c8438bU, 0x3737596eU, 0x6d6db7daU, + 0x8d8d8c01U, 0xd5d564b1U, 0x4e4ed29cU, 0xa9a9e049U, + 0x6c6cb4d8U, 0x5656faacU, 0xf4f407f3U, 0xeaea25cfU, + 0x6565afcaU, 0x7a7a8ef4U, 0xaeaee947U, 0x08081810U, + 0xbabad56fU, 0x787888f0U, 0x25256f4aU, 0x2e2e725cU, + 0x1c1c2438U, 0xa6a6f157U, 0xb4b4c773U, 0xc6c65197U, + 0xe8e823cbU, 0xdddd7ca1U, 0x74749ce8U, 0x1f1f213eU, + 0x4b4bdd96U, 0xbdbddc61U, 0x8b8b860dU, 0x8a8a850fU, + 0x707090e0U, 0x3e3e427cU, 0xb5b5c471U, 0x6666aaccU, + 0x4848d890U, 0x03030506U, 0xf6f601f7U, 0x0e0e121cU, + 0x6161a3c2U, 0x35355f6aU, 0x5757f9aeU, 0xb9b9d069U, + 0x86869117U, 0xc1c15899U, 0x1d1d273aU, 0x9e9eb927U, + 0xe1e138d9U, 0xf8f813ebU, 0x9898b32bU, 0x11113322U, + 0x6969bbd2U, 0xd9d970a9U, 0x8e8e8907U, 0x9494a733U, + 0x9b9bb62dU, 0x1e1e223cU, 0x87879215U, 0xe9e920c9U, + 0xcece4987U, 0x5555ffaaU, 0x28287850U, 0xdfdf7aa5U, + 0x8c8c8f03U, 0xa1a1f859U, 0x89898009U, 0x0d0d171aU, + 0xbfbfda65U, 0xe6e631d7U, 0x4242c684U, 0x6868b8d0U, + 0x4141c382U, 0x9999b029U, 0x2d2d775aU, 0x0f0f111eU, + 0xb0b0cb7bU, 0x5454fca8U, 0xbbbbd66dU, 0x16163a2cU, +}; +const uint32_t Te4[256] = { + 0x63636363U, 0x7c7c7c7cU, 0x77777777U, 0x7b7b7b7bU, + 0xf2f2f2f2U, 0x6b6b6b6bU, 0x6f6f6f6fU, 0xc5c5c5c5U, + 0x30303030U, 0x01010101U, 0x67676767U, 0x2b2b2b2bU, + 0xfefefefeU, 0xd7d7d7d7U, 0xababababU, 0x76767676U, + 0xcacacacaU, 0x82828282U, 0xc9c9c9c9U, 0x7d7d7d7dU, + 0xfafafafaU, 0x59595959U, 0x47474747U, 0xf0f0f0f0U, + 0xadadadadU, 0xd4d4d4d4U, 0xa2a2a2a2U, 0xafafafafU, + 0x9c9c9c9cU, 0xa4a4a4a4U, 0x72727272U, 0xc0c0c0c0U, + 0xb7b7b7b7U, 0xfdfdfdfdU, 0x93939393U, 0x26262626U, + 0x36363636U, 0x3f3f3f3fU, 0xf7f7f7f7U, 0xccccccccU, + 0x34343434U, 0xa5a5a5a5U, 0xe5e5e5e5U, 0xf1f1f1f1U, + 0x71717171U, 0xd8d8d8d8U, 0x31313131U, 0x15151515U, + 0x04040404U, 0xc7c7c7c7U, 0x23232323U, 0xc3c3c3c3U, + 0x18181818U, 0x96969696U, 0x05050505U, 0x9a9a9a9aU, + 0x07070707U, 0x12121212U, 0x80808080U, 0xe2e2e2e2U, + 0xebebebebU, 0x27272727U, 0xb2b2b2b2U, 0x75757575U, + 0x09090909U, 0x83838383U, 0x2c2c2c2cU, 0x1a1a1a1aU, + 0x1b1b1b1bU, 0x6e6e6e6eU, 0x5a5a5a5aU, 0xa0a0a0a0U, + 0x52525252U, 0x3b3b3b3bU, 0xd6d6d6d6U, 0xb3b3b3b3U, + 0x29292929U, 0xe3e3e3e3U, 0x2f2f2f2fU, 0x84848484U, + 0x53535353U, 0xd1d1d1d1U, 0x00000000U, 0xededededU, + 0x20202020U, 0xfcfcfcfcU, 0xb1b1b1b1U, 0x5b5b5b5bU, + 0x6a6a6a6aU, 0xcbcbcbcbU, 0xbebebebeU, 0x39393939U, + 0x4a4a4a4aU, 0x4c4c4c4cU, 0x58585858U, 0xcfcfcfcfU, + 0xd0d0d0d0U, 0xefefefefU, 0xaaaaaaaaU, 0xfbfbfbfbU, + 0x43434343U, 0x4d4d4d4dU, 0x33333333U, 0x85858585U, + 0x45454545U, 0xf9f9f9f9U, 0x02020202U, 0x7f7f7f7fU, + 0x50505050U, 0x3c3c3c3cU, 0x9f9f9f9fU, 0xa8a8a8a8U, + 0x51515151U, 0xa3a3a3a3U, 0x40404040U, 0x8f8f8f8fU, + 0x92929292U, 0x9d9d9d9dU, 0x38383838U, 0xf5f5f5f5U, + 0xbcbcbcbcU, 0xb6b6b6b6U, 0xdadadadaU, 0x21212121U, + 0x10101010U, 0xffffffffU, 0xf3f3f3f3U, 0xd2d2d2d2U, + 0xcdcdcdcdU, 0x0c0c0c0cU, 0x13131313U, 0xececececU, + 0x5f5f5f5fU, 0x97979797U, 0x44444444U, 0x17171717U, + 0xc4c4c4c4U, 0xa7a7a7a7U, 0x7e7e7e7eU, 0x3d3d3d3dU, + 0x64646464U, 0x5d5d5d5dU, 0x19191919U, 0x73737373U, + 0x60606060U, 0x81818181U, 0x4f4f4f4fU, 0xdcdcdcdcU, + 0x22222222U, 0x2a2a2a2aU, 0x90909090U, 0x88888888U, + 0x46464646U, 0xeeeeeeeeU, 0xb8b8b8b8U, 0x14141414U, + 0xdedededeU, 0x5e5e5e5eU, 0x0b0b0b0bU, 0xdbdbdbdbU, + 0xe0e0e0e0U, 0x32323232U, 0x3a3a3a3aU, 0x0a0a0a0aU, + 0x49494949U, 0x06060606U, 0x24242424U, 0x5c5c5c5cU, + 0xc2c2c2c2U, 0xd3d3d3d3U, 0xacacacacU, 0x62626262U, + 0x91919191U, 0x95959595U, 0xe4e4e4e4U, 0x79797979U, + 0xe7e7e7e7U, 0xc8c8c8c8U, 0x37373737U, 0x6d6d6d6dU, + 0x8d8d8d8dU, 0xd5d5d5d5U, 0x4e4e4e4eU, 0xa9a9a9a9U, + 0x6c6c6c6cU, 0x56565656U, 0xf4f4f4f4U, 0xeaeaeaeaU, + 0x65656565U, 0x7a7a7a7aU, 0xaeaeaeaeU, 0x08080808U, + 0xbabababaU, 0x78787878U, 0x25252525U, 0x2e2e2e2eU, + 0x1c1c1c1cU, 0xa6a6a6a6U, 0xb4b4b4b4U, 0xc6c6c6c6U, + 0xe8e8e8e8U, 0xddddddddU, 0x74747474U, 0x1f1f1f1fU, + 0x4b4b4b4bU, 0xbdbdbdbdU, 0x8b8b8b8bU, 0x8a8a8a8aU, + 0x70707070U, 0x3e3e3e3eU, 0xb5b5b5b5U, 0x66666666U, + 0x48484848U, 0x03030303U, 0xf6f6f6f6U, 0x0e0e0e0eU, + 0x61616161U, 0x35353535U, 0x57575757U, 0xb9b9b9b9U, + 0x86868686U, 0xc1c1c1c1U, 0x1d1d1d1dU, 0x9e9e9e9eU, + 0xe1e1e1e1U, 0xf8f8f8f8U, 0x98989898U, 0x11111111U, + 0x69696969U, 0xd9d9d9d9U, 0x8e8e8e8eU, 0x94949494U, + 0x9b9b9b9bU, 0x1e1e1e1eU, 0x87878787U, 0xe9e9e9e9U, + 0xcecececeU, 0x55555555U, 0x28282828U, 0xdfdfdfdfU, + 0x8c8c8c8cU, 0xa1a1a1a1U, 0x89898989U, 0x0d0d0d0dU, + 0xbfbfbfbfU, 0xe6e6e6e6U, 0x42424242U, 0x68686868U, + 0x41414141U, 0x99999999U, 0x2d2d2d2dU, 0x0f0f0f0fU, + 0xb0b0b0b0U, 0x54545454U, 0xbbbbbbbbU, 0x16161616U, +}; +#endif /* AES_SMALL_TABLES */ +const uint32_t Td0[256] = { + 0x51f4a750U, 0x7e416553U, 0x1a17a4c3U, 0x3a275e96U, + 0x3bab6bcbU, 0x1f9d45f1U, 0xacfa58abU, 0x4be30393U, + 0x2030fa55U, 0xad766df6U, 0x88cc7691U, 0xf5024c25U, + 0x4fe5d7fcU, 0xc52acbd7U, 0x26354480U, 0xb562a38fU, + 0xdeb15a49U, 0x25ba1b67U, 0x45ea0e98U, 0x5dfec0e1U, + 0xc32f7502U, 0x814cf012U, 0x8d4697a3U, 0x6bd3f9c6U, + 0x038f5fe7U, 0x15929c95U, 0xbf6d7aebU, 0x955259daU, + 0xd4be832dU, 0x587421d3U, 0x49e06929U, 0x8ec9c844U, + 0x75c2896aU, 0xf48e7978U, 0x99583e6bU, 0x27b971ddU, + 0xbee14fb6U, 0xf088ad17U, 0xc920ac66U, 0x7dce3ab4U, + 0x63df4a18U, 0xe51a3182U, 0x97513360U, 0x62537f45U, + 0xb16477e0U, 0xbb6bae84U, 0xfe81a01cU, 0xf9082b94U, + 0x70486858U, 0x8f45fd19U, 0x94de6c87U, 0x527bf8b7U, + 0xab73d323U, 0x724b02e2U, 0xe31f8f57U, 0x6655ab2aU, + 0xb2eb2807U, 0x2fb5c203U, 0x86c57b9aU, 0xd33708a5U, + 0x302887f2U, 0x23bfa5b2U, 0x02036abaU, 0xed16825cU, + 0x8acf1c2bU, 0xa779b492U, 0xf307f2f0U, 0x4e69e2a1U, + 0x65daf4cdU, 0x0605bed5U, 0xd134621fU, 0xc4a6fe8aU, + 0x342e539dU, 0xa2f355a0U, 0x058ae132U, 0xa4f6eb75U, + 0x0b83ec39U, 0x4060efaaU, 0x5e719f06U, 0xbd6e1051U, + 0x3e218af9U, 0x96dd063dU, 0xdd3e05aeU, 0x4de6bd46U, + 0x91548db5U, 0x71c45d05U, 0x0406d46fU, 0x605015ffU, + 0x1998fb24U, 0xd6bde997U, 0x894043ccU, 0x67d99e77U, + 0xb0e842bdU, 0x07898b88U, 0xe7195b38U, 0x79c8eedbU, + 0xa17c0a47U, 0x7c420fe9U, 0xf8841ec9U, 0x00000000U, + 0x09808683U, 0x322bed48U, 0x1e1170acU, 0x6c5a724eU, + 0xfd0efffbU, 0x0f853856U, 0x3daed51eU, 0x362d3927U, + 0x0a0fd964U, 0x685ca621U, 0x9b5b54d1U, 0x24362e3aU, + 0x0c0a67b1U, 0x9357e70fU, 0xb4ee96d2U, 0x1b9b919eU, + 0x80c0c54fU, 0x61dc20a2U, 0x5a774b69U, 0x1c121a16U, + 0xe293ba0aU, 0xc0a02ae5U, 0x3c22e043U, 0x121b171dU, + 0x0e090d0bU, 0xf28bc7adU, 0x2db6a8b9U, 0x141ea9c8U, + 0x57f11985U, 0xaf75074cU, 0xee99ddbbU, 0xa37f60fdU, + 0xf701269fU, 0x5c72f5bcU, 0x44663bc5U, 0x5bfb7e34U, + 0x8b432976U, 0xcb23c6dcU, 0xb6edfc68U, 0xb8e4f163U, + 0xd731dccaU, 0x42638510U, 0x13972240U, 0x84c61120U, + 0x854a247dU, 0xd2bb3df8U, 0xaef93211U, 0xc729a16dU, + 0x1d9e2f4bU, 0xdcb230f3U, 0x0d8652ecU, 0x77c1e3d0U, + 0x2bb3166cU, 0xa970b999U, 0x119448faU, 0x47e96422U, + 0xa8fc8cc4U, 0xa0f03f1aU, 0x567d2cd8U, 0x223390efU, + 0x87494ec7U, 0xd938d1c1U, 0x8ccaa2feU, 0x98d40b36U, + 0xa6f581cfU, 0xa57ade28U, 0xdab78e26U, 0x3fadbfa4U, + 0x2c3a9de4U, 0x5078920dU, 0x6a5fcc9bU, 0x547e4662U, + 0xf68d13c2U, 0x90d8b8e8U, 0x2e39f75eU, 0x82c3aff5U, + 0x9f5d80beU, 0x69d0937cU, 0x6fd52da9U, 0xcf2512b3U, + 0xc8ac993bU, 0x10187da7U, 0xe89c636eU, 0xdb3bbb7bU, + 0xcd267809U, 0x6e5918f4U, 0xec9ab701U, 0x834f9aa8U, + 0xe6956e65U, 0xaaffe67eU, 0x21bccf08U, 0xef15e8e6U, + 0xbae79bd9U, 0x4a6f36ceU, 0xea9f09d4U, 0x29b07cd6U, + 0x31a4b2afU, 0x2a3f2331U, 0xc6a59430U, 0x35a266c0U, + 0x744ebc37U, 0xfc82caa6U, 0xe090d0b0U, 0x33a7d815U, + 0xf104984aU, 0x41ecdaf7U, 0x7fcd500eU, 0x1791f62fU, + 0x764dd68dU, 0x43efb04dU, 0xccaa4d54U, 0xe49604dfU, + 0x9ed1b5e3U, 0x4c6a881bU, 0xc12c1fb8U, 0x4665517fU, + 0x9d5eea04U, 0x018c355dU, 0xfa877473U, 0xfb0b412eU, + 0xb3671d5aU, 0x92dbd252U, 0xe9105633U, 0x6dd64713U, + 0x9ad7618cU, 0x37a10c7aU, 0x59f8148eU, 0xeb133c89U, + 0xcea927eeU, 0xb761c935U, 0xe11ce5edU, 0x7a47b13cU, + 0x9cd2df59U, 0x55f2733fU, 0x1814ce79U, 0x73c737bfU, + 0x53f7cdeaU, 0x5ffdaa5bU, 0xdf3d6f14U, 0x7844db86U, + 0xcaaff381U, 0xb968c43eU, 0x3824342cU, 0xc2a3405fU, + 0x161dc372U, 0xbce2250cU, 0x283c498bU, 0xff0d9541U, + 0x39a80171U, 0x080cb3deU, 0xd8b4e49cU, 0x6456c190U, + 0x7bcb8461U, 0xd532b670U, 0x486c5c74U, 0xd0b85742U, +}; +#ifndef AES_SMALL_TABLES +const uint32_t Td1[256] = { + 0x5051f4a7U, 0x537e4165U, 0xc31a17a4U, 0x963a275eU, + 0xcb3bab6bU, 0xf11f9d45U, 0xabacfa58U, 0x934be303U, + 0x552030faU, 0xf6ad766dU, 0x9188cc76U, 0x25f5024cU, + 0xfc4fe5d7U, 0xd7c52acbU, 0x80263544U, 0x8fb562a3U, + 0x49deb15aU, 0x6725ba1bU, 0x9845ea0eU, 0xe15dfec0U, + 0x02c32f75U, 0x12814cf0U, 0xa38d4697U, 0xc66bd3f9U, + 0xe7038f5fU, 0x9515929cU, 0xebbf6d7aU, 0xda955259U, + 0x2dd4be83U, 0xd3587421U, 0x2949e069U, 0x448ec9c8U, + 0x6a75c289U, 0x78f48e79U, 0x6b99583eU, 0xdd27b971U, + 0xb6bee14fU, 0x17f088adU, 0x66c920acU, 0xb47dce3aU, + 0x1863df4aU, 0x82e51a31U, 0x60975133U, 0x4562537fU, + 0xe0b16477U, 0x84bb6baeU, 0x1cfe81a0U, 0x94f9082bU, + 0x58704868U, 0x198f45fdU, 0x8794de6cU, 0xb7527bf8U, + 0x23ab73d3U, 0xe2724b02U, 0x57e31f8fU, 0x2a6655abU, + 0x07b2eb28U, 0x032fb5c2U, 0x9a86c57bU, 0xa5d33708U, + 0xf2302887U, 0xb223bfa5U, 0xba02036aU, 0x5ced1682U, + 0x2b8acf1cU, 0x92a779b4U, 0xf0f307f2U, 0xa14e69e2U, + 0xcd65daf4U, 0xd50605beU, 0x1fd13462U, 0x8ac4a6feU, + 0x9d342e53U, 0xa0a2f355U, 0x32058ae1U, 0x75a4f6ebU, + 0x390b83ecU, 0xaa4060efU, 0x065e719fU, 0x51bd6e10U, + 0xf93e218aU, 0x3d96dd06U, 0xaedd3e05U, 0x464de6bdU, + 0xb591548dU, 0x0571c45dU, 0x6f0406d4U, 0xff605015U, + 0x241998fbU, 0x97d6bde9U, 0xcc894043U, 0x7767d99eU, + 0xbdb0e842U, 0x8807898bU, 0x38e7195bU, 0xdb79c8eeU, + 0x47a17c0aU, 0xe97c420fU, 0xc9f8841eU, 0x00000000U, + 0x83098086U, 0x48322bedU, 0xac1e1170U, 0x4e6c5a72U, + 0xfbfd0effU, 0x560f8538U, 0x1e3daed5U, 0x27362d39U, + 0x640a0fd9U, 0x21685ca6U, 0xd19b5b54U, 0x3a24362eU, + 0xb10c0a67U, 0x0f9357e7U, 0xd2b4ee96U, 0x9e1b9b91U, + 0x4f80c0c5U, 0xa261dc20U, 0x695a774bU, 0x161c121aU, + 0x0ae293baU, 0xe5c0a02aU, 0x433c22e0U, 0x1d121b17U, + 0x0b0e090dU, 0xadf28bc7U, 0xb92db6a8U, 0xc8141ea9U, + 0x8557f119U, 0x4caf7507U, 0xbbee99ddU, 0xfda37f60U, + 0x9ff70126U, 0xbc5c72f5U, 0xc544663bU, 0x345bfb7eU, + 0x768b4329U, 0xdccb23c6U, 0x68b6edfcU, 0x63b8e4f1U, + 0xcad731dcU, 0x10426385U, 0x40139722U, 0x2084c611U, + 0x7d854a24U, 0xf8d2bb3dU, 0x11aef932U, 0x6dc729a1U, + 0x4b1d9e2fU, 0xf3dcb230U, 0xec0d8652U, 0xd077c1e3U, + 0x6c2bb316U, 0x99a970b9U, 0xfa119448U, 0x2247e964U, + 0xc4a8fc8cU, 0x1aa0f03fU, 0xd8567d2cU, 0xef223390U, + 0xc787494eU, 0xc1d938d1U, 0xfe8ccaa2U, 0x3698d40bU, + 0xcfa6f581U, 0x28a57adeU, 0x26dab78eU, 0xa43fadbfU, + 0xe42c3a9dU, 0x0d507892U, 0x9b6a5fccU, 0x62547e46U, + 0xc2f68d13U, 0xe890d8b8U, 0x5e2e39f7U, 0xf582c3afU, + 0xbe9f5d80U, 0x7c69d093U, 0xa96fd52dU, 0xb3cf2512U, + 0x3bc8ac99U, 0xa710187dU, 0x6ee89c63U, 0x7bdb3bbbU, + 0x09cd2678U, 0xf46e5918U, 0x01ec9ab7U, 0xa8834f9aU, + 0x65e6956eU, 0x7eaaffe6U, 0x0821bccfU, 0xe6ef15e8U, + 0xd9bae79bU, 0xce4a6f36U, 0xd4ea9f09U, 0xd629b07cU, + 0xaf31a4b2U, 0x312a3f23U, 0x30c6a594U, 0xc035a266U, + 0x37744ebcU, 0xa6fc82caU, 0xb0e090d0U, 0x1533a7d8U, + 0x4af10498U, 0xf741ecdaU, 0x0e7fcd50U, 0x2f1791f6U, + 0x8d764dd6U, 0x4d43efb0U, 0x54ccaa4dU, 0xdfe49604U, + 0xe39ed1b5U, 0x1b4c6a88U, 0xb8c12c1fU, 0x7f466551U, + 0x049d5eeaU, 0x5d018c35U, 0x73fa8774U, 0x2efb0b41U, + 0x5ab3671dU, 0x5292dbd2U, 0x33e91056U, 0x136dd647U, + 0x8c9ad761U, 0x7a37a10cU, 0x8e59f814U, 0x89eb133cU, + 0xeecea927U, 0x35b761c9U, 0xede11ce5U, 0x3c7a47b1U, + 0x599cd2dfU, 0x3f55f273U, 0x791814ceU, 0xbf73c737U, + 0xea53f7cdU, 0x5b5ffdaaU, 0x14df3d6fU, 0x867844dbU, + 0x81caaff3U, 0x3eb968c4U, 0x2c382434U, 0x5fc2a340U, + 0x72161dc3U, 0x0cbce225U, 0x8b283c49U, 0x41ff0d95U, + 0x7139a801U, 0xde080cb3U, 0x9cd8b4e4U, 0x906456c1U, + 0x617bcb84U, 0x70d532b6U, 0x74486c5cU, 0x42d0b857U, +}; +const uint32_t Td2[256] = { + 0xa75051f4U, 0x65537e41U, 0xa4c31a17U, 0x5e963a27U, + 0x6bcb3babU, 0x45f11f9dU, 0x58abacfaU, 0x03934be3U, + 0xfa552030U, 0x6df6ad76U, 0x769188ccU, 0x4c25f502U, + 0xd7fc4fe5U, 0xcbd7c52aU, 0x44802635U, 0xa38fb562U, + 0x5a49deb1U, 0x1b6725baU, 0x0e9845eaU, 0xc0e15dfeU, + 0x7502c32fU, 0xf012814cU, 0x97a38d46U, 0xf9c66bd3U, + 0x5fe7038fU, 0x9c951592U, 0x7aebbf6dU, 0x59da9552U, + 0x832dd4beU, 0x21d35874U, 0x692949e0U, 0xc8448ec9U, + 0x896a75c2U, 0x7978f48eU, 0x3e6b9958U, 0x71dd27b9U, + 0x4fb6bee1U, 0xad17f088U, 0xac66c920U, 0x3ab47dceU, + 0x4a1863dfU, 0x3182e51aU, 0x33609751U, 0x7f456253U, + 0x77e0b164U, 0xae84bb6bU, 0xa01cfe81U, 0x2b94f908U, + 0x68587048U, 0xfd198f45U, 0x6c8794deU, 0xf8b7527bU, + 0xd323ab73U, 0x02e2724bU, 0x8f57e31fU, 0xab2a6655U, + 0x2807b2ebU, 0xc2032fb5U, 0x7b9a86c5U, 0x08a5d337U, + 0x87f23028U, 0xa5b223bfU, 0x6aba0203U, 0x825ced16U, + 0x1c2b8acfU, 0xb492a779U, 0xf2f0f307U, 0xe2a14e69U, + 0xf4cd65daU, 0xbed50605U, 0x621fd134U, 0xfe8ac4a6U, + 0x539d342eU, 0x55a0a2f3U, 0xe132058aU, 0xeb75a4f6U, + 0xec390b83U, 0xefaa4060U, 0x9f065e71U, 0x1051bd6eU, + + 0x8af93e21U, 0x063d96ddU, 0x05aedd3eU, 0xbd464de6U, + 0x8db59154U, 0x5d0571c4U, 0xd46f0406U, 0x15ff6050U, + 0xfb241998U, 0xe997d6bdU, 0x43cc8940U, 0x9e7767d9U, + 0x42bdb0e8U, 0x8b880789U, 0x5b38e719U, 0xeedb79c8U, + 0x0a47a17cU, 0x0fe97c42U, 0x1ec9f884U, 0x00000000U, + 0x86830980U, 0xed48322bU, 0x70ac1e11U, 0x724e6c5aU, + 0xfffbfd0eU, 0x38560f85U, 0xd51e3daeU, 0x3927362dU, + 0xd9640a0fU, 0xa621685cU, 0x54d19b5bU, 0x2e3a2436U, + 0x67b10c0aU, 0xe70f9357U, 0x96d2b4eeU, 0x919e1b9bU, + 0xc54f80c0U, 0x20a261dcU, 0x4b695a77U, 0x1a161c12U, + 0xba0ae293U, 0x2ae5c0a0U, 0xe0433c22U, 0x171d121bU, + 0x0d0b0e09U, 0xc7adf28bU, 0xa8b92db6U, 0xa9c8141eU, + 0x198557f1U, 0x074caf75U, 0xddbbee99U, 0x60fda37fU, + 0x269ff701U, 0xf5bc5c72U, 0x3bc54466U, 0x7e345bfbU, + 0x29768b43U, 0xc6dccb23U, 0xfc68b6edU, 0xf163b8e4U, + 0xdccad731U, 0x85104263U, 0x22401397U, 0x112084c6U, + 0x247d854aU, 0x3df8d2bbU, 0x3211aef9U, 0xa16dc729U, + 0x2f4b1d9eU, 0x30f3dcb2U, 0x52ec0d86U, 0xe3d077c1U, + 0x166c2bb3U, 0xb999a970U, 0x48fa1194U, 0x642247e9U, + 0x8cc4a8fcU, 0x3f1aa0f0U, 0x2cd8567dU, 0x90ef2233U, + 0x4ec78749U, 0xd1c1d938U, 0xa2fe8ccaU, 0x0b3698d4U, + 0x81cfa6f5U, 0xde28a57aU, 0x8e26dab7U, 0xbfa43fadU, + 0x9de42c3aU, 0x920d5078U, 0xcc9b6a5fU, 0x4662547eU, + 0x13c2f68dU, 0xb8e890d8U, 0xf75e2e39U, 0xaff582c3U, + 0x80be9f5dU, 0x937c69d0U, 0x2da96fd5U, 0x12b3cf25U, + 0x993bc8acU, 0x7da71018U, 0x636ee89cU, 0xbb7bdb3bU, + 0x7809cd26U, 0x18f46e59U, 0xb701ec9aU, 0x9aa8834fU, + 0x6e65e695U, 0xe67eaaffU, 0xcf0821bcU, 0xe8e6ef15U, + 0x9bd9bae7U, 0x36ce4a6fU, 0x09d4ea9fU, 0x7cd629b0U, + 0xb2af31a4U, 0x23312a3fU, 0x9430c6a5U, 0x66c035a2U, + 0xbc37744eU, 0xcaa6fc82U, 0xd0b0e090U, 0xd81533a7U, + 0x984af104U, 0xdaf741ecU, 0x500e7fcdU, 0xf62f1791U, + 0xd68d764dU, 0xb04d43efU, 0x4d54ccaaU, 0x04dfe496U, + 0xb5e39ed1U, 0x881b4c6aU, 0x1fb8c12cU, 0x517f4665U, + 0xea049d5eU, 0x355d018cU, 0x7473fa87U, 0x412efb0bU, + 0x1d5ab367U, 0xd25292dbU, 0x5633e910U, 0x47136dd6U, + 0x618c9ad7U, 0x0c7a37a1U, 0x148e59f8U, 0x3c89eb13U, + 0x27eecea9U, 0xc935b761U, 0xe5ede11cU, 0xb13c7a47U, + 0xdf599cd2U, 0x733f55f2U, 0xce791814U, 0x37bf73c7U, + 0xcdea53f7U, 0xaa5b5ffdU, 0x6f14df3dU, 0xdb867844U, + 0xf381caafU, 0xc43eb968U, 0x342c3824U, 0x405fc2a3U, + 0xc372161dU, 0x250cbce2U, 0x498b283cU, 0x9541ff0dU, + 0x017139a8U, 0xb3de080cU, 0xe49cd8b4U, 0xc1906456U, + 0x84617bcbU, 0xb670d532U, 0x5c74486cU, 0x5742d0b8U, +}; +const uint32_t Td3[256] = { + 0xf4a75051U, 0x4165537eU, 0x17a4c31aU, 0x275e963aU, + 0xab6bcb3bU, 0x9d45f11fU, 0xfa58abacU, 0xe303934bU, + 0x30fa5520U, 0x766df6adU, 0xcc769188U, 0x024c25f5U, + 0xe5d7fc4fU, 0x2acbd7c5U, 0x35448026U, 0x62a38fb5U, + 0xb15a49deU, 0xba1b6725U, 0xea0e9845U, 0xfec0e15dU, + 0x2f7502c3U, 0x4cf01281U, 0x4697a38dU, 0xd3f9c66bU, + 0x8f5fe703U, 0x929c9515U, 0x6d7aebbfU, 0x5259da95U, + 0xbe832dd4U, 0x7421d358U, 0xe0692949U, 0xc9c8448eU, + 0xc2896a75U, 0x8e7978f4U, 0x583e6b99U, 0xb971dd27U, + 0xe14fb6beU, 0x88ad17f0U, 0x20ac66c9U, 0xce3ab47dU, + 0xdf4a1863U, 0x1a3182e5U, 0x51336097U, 0x537f4562U, + 0x6477e0b1U, 0x6bae84bbU, 0x81a01cfeU, 0x082b94f9U, + 0x48685870U, 0x45fd198fU, 0xde6c8794U, 0x7bf8b752U, + 0x73d323abU, 0x4b02e272U, 0x1f8f57e3U, 0x55ab2a66U, + 0xeb2807b2U, 0xb5c2032fU, 0xc57b9a86U, 0x3708a5d3U, + 0x2887f230U, 0xbfa5b223U, 0x036aba02U, 0x16825cedU, + 0xcf1c2b8aU, 0x79b492a7U, 0x07f2f0f3U, 0x69e2a14eU, + 0xdaf4cd65U, 0x05bed506U, 0x34621fd1U, 0xa6fe8ac4U, + 0x2e539d34U, 0xf355a0a2U, 0x8ae13205U, 0xf6eb75a4U, + 0x83ec390bU, 0x60efaa40U, 0x719f065eU, 0x6e1051bdU, + 0x218af93eU, 0xdd063d96U, 0x3e05aeddU, 0xe6bd464dU, + 0x548db591U, 0xc45d0571U, 0x06d46f04U, 0x5015ff60U, + 0x98fb2419U, 0xbde997d6U, 0x4043cc89U, 0xd99e7767U, + 0xe842bdb0U, 0x898b8807U, 0x195b38e7U, 0xc8eedb79U, + 0x7c0a47a1U, 0x420fe97cU, 0x841ec9f8U, 0x00000000U, + 0x80868309U, 0x2bed4832U, 0x1170ac1eU, 0x5a724e6cU, + 0x0efffbfdU, 0x8538560fU, 0xaed51e3dU, 0x2d392736U, + 0x0fd9640aU, 0x5ca62168U, 0x5b54d19bU, 0x362e3a24U, + 0x0a67b10cU, 0x57e70f93U, 0xee96d2b4U, 0x9b919e1bU, + 0xc0c54f80U, 0xdc20a261U, 0x774b695aU, 0x121a161cU, + 0x93ba0ae2U, 0xa02ae5c0U, 0x22e0433cU, 0x1b171d12U, + 0x090d0b0eU, 0x8bc7adf2U, 0xb6a8b92dU, 0x1ea9c814U, + 0xf1198557U, 0x75074cafU, 0x99ddbbeeU, 0x7f60fda3U, + 0x01269ff7U, 0x72f5bc5cU, 0x663bc544U, 0xfb7e345bU, + 0x4329768bU, 0x23c6dccbU, 0xedfc68b6U, 0xe4f163b8U, + 0x31dccad7U, 0x63851042U, 0x97224013U, 0xc6112084U, + 0x4a247d85U, 0xbb3df8d2U, 0xf93211aeU, 0x29a16dc7U, + 0x9e2f4b1dU, 0xb230f3dcU, 0x8652ec0dU, 0xc1e3d077U, + 0xb3166c2bU, 0x70b999a9U, 0x9448fa11U, 0xe9642247U, + 0xfc8cc4a8U, 0xf03f1aa0U, 0x7d2cd856U, 0x3390ef22U, + 0x494ec787U, 0x38d1c1d9U, 0xcaa2fe8cU, 0xd40b3698U, + 0xf581cfa6U, 0x7ade28a5U, 0xb78e26daU, 0xadbfa43fU, + 0x3a9de42cU, 0x78920d50U, 0x5fcc9b6aU, 0x7e466254U, + 0x8d13c2f6U, 0xd8b8e890U, 0x39f75e2eU, 0xc3aff582U, + 0x5d80be9fU, 0xd0937c69U, 0xd52da96fU, 0x2512b3cfU, + 0xac993bc8U, 0x187da710U, 0x9c636ee8U, 0x3bbb7bdbU, + 0x267809cdU, 0x5918f46eU, 0x9ab701ecU, 0x4f9aa883U, + 0x956e65e6U, 0xffe67eaaU, 0xbccf0821U, 0x15e8e6efU, + 0xe79bd9baU, 0x6f36ce4aU, 0x9f09d4eaU, 0xb07cd629U, + 0xa4b2af31U, 0x3f23312aU, 0xa59430c6U, 0xa266c035U, + 0x4ebc3774U, 0x82caa6fcU, 0x90d0b0e0U, 0xa7d81533U, + 0x04984af1U, 0xecdaf741U, 0xcd500e7fU, 0x91f62f17U, + 0x4dd68d76U, 0xefb04d43U, 0xaa4d54ccU, 0x9604dfe4U, + 0xd1b5e39eU, 0x6a881b4cU, 0x2c1fb8c1U, 0x65517f46U, + 0x5eea049dU, 0x8c355d01U, 0x877473faU, 0x0b412efbU, + 0x671d5ab3U, 0xdbd25292U, 0x105633e9U, 0xd647136dU, + 0xd7618c9aU, 0xa10c7a37U, 0xf8148e59U, 0x133c89ebU, + 0xa927eeceU, 0x61c935b7U, 0x1ce5ede1U, 0x47b13c7aU, + 0xd2df599cU, 0xf2733f55U, 0x14ce7918U, 0xc737bf73U, + 0xf7cdea53U, 0xfdaa5b5fU, 0x3d6f14dfU, 0x44db8678U, + 0xaff381caU, 0x68c43eb9U, 0x24342c38U, 0xa3405fc2U, + 0x1dc37216U, 0xe2250cbcU, 0x3c498b28U, 0x0d9541ffU, + 0xa8017139U, 0x0cb3de08U, 0xb4e49cd8U, 0x56c19064U, + 0xcb84617bU, 0x32b670d5U, 0x6c5c7448U, 0xb85742d0U, +}; +const uint32_t Td4[256] = { + 0x52525252U, 0x09090909U, 0x6a6a6a6aU, 0xd5d5d5d5U, + 0x30303030U, 0x36363636U, 0xa5a5a5a5U, 0x38383838U, + 0xbfbfbfbfU, 0x40404040U, 0xa3a3a3a3U, 0x9e9e9e9eU, + 0x81818181U, 0xf3f3f3f3U, 0xd7d7d7d7U, 0xfbfbfbfbU, + 0x7c7c7c7cU, 0xe3e3e3e3U, 0x39393939U, 0x82828282U, + 0x9b9b9b9bU, 0x2f2f2f2fU, 0xffffffffU, 0x87878787U, + 0x34343434U, 0x8e8e8e8eU, 0x43434343U, 0x44444444U, + 0xc4c4c4c4U, 0xdedededeU, 0xe9e9e9e9U, 0xcbcbcbcbU, + 0x54545454U, 0x7b7b7b7bU, 0x94949494U, 0x32323232U, + 0xa6a6a6a6U, 0xc2c2c2c2U, 0x23232323U, 0x3d3d3d3dU, + 0xeeeeeeeeU, 0x4c4c4c4cU, 0x95959595U, 0x0b0b0b0bU, + 0x42424242U, 0xfafafafaU, 0xc3c3c3c3U, 0x4e4e4e4eU, + 0x08080808U, 0x2e2e2e2eU, 0xa1a1a1a1U, 0x66666666U, + 0x28282828U, 0xd9d9d9d9U, 0x24242424U, 0xb2b2b2b2U, + 0x76767676U, 0x5b5b5b5bU, 0xa2a2a2a2U, 0x49494949U, + 0x6d6d6d6dU, 0x8b8b8b8bU, 0xd1d1d1d1U, 0x25252525U, + 0x72727272U, 0xf8f8f8f8U, 0xf6f6f6f6U, 0x64646464U, + 0x86868686U, 0x68686868U, 0x98989898U, 0x16161616U, + 0xd4d4d4d4U, 0xa4a4a4a4U, 0x5c5c5c5cU, 0xccccccccU, + 0x5d5d5d5dU, 0x65656565U, 0xb6b6b6b6U, 0x92929292U, + 0x6c6c6c6cU, 0x70707070U, 0x48484848U, 0x50505050U, + 0xfdfdfdfdU, 0xededededU, 0xb9b9b9b9U, 0xdadadadaU, + 0x5e5e5e5eU, 0x15151515U, 0x46464646U, 0x57575757U, + 0xa7a7a7a7U, 0x8d8d8d8dU, 0x9d9d9d9dU, 0x84848484U, + 0x90909090U, 0xd8d8d8d8U, 0xababababU, 0x00000000U, + 0x8c8c8c8cU, 0xbcbcbcbcU, 0xd3d3d3d3U, 0x0a0a0a0aU, + 0xf7f7f7f7U, 0xe4e4e4e4U, 0x58585858U, 0x05050505U, + 0xb8b8b8b8U, 0xb3b3b3b3U, 0x45454545U, 0x06060606U, + 0xd0d0d0d0U, 0x2c2c2c2cU, 0x1e1e1e1eU, 0x8f8f8f8fU, + 0xcacacacaU, 0x3f3f3f3fU, 0x0f0f0f0fU, 0x02020202U, + 0xc1c1c1c1U, 0xafafafafU, 0xbdbdbdbdU, 0x03030303U, + 0x01010101U, 0x13131313U, 0x8a8a8a8aU, 0x6b6b6b6bU, + 0x3a3a3a3aU, 0x91919191U, 0x11111111U, 0x41414141U, + 0x4f4f4f4fU, 0x67676767U, 0xdcdcdcdcU, 0xeaeaeaeaU, + 0x97979797U, 0xf2f2f2f2U, 0xcfcfcfcfU, 0xcecececeU, + 0xf0f0f0f0U, 0xb4b4b4b4U, 0xe6e6e6e6U, 0x73737373U, + 0x96969696U, 0xacacacacU, 0x74747474U, 0x22222222U, + 0xe7e7e7e7U, 0xadadadadU, 0x35353535U, 0x85858585U, + 0xe2e2e2e2U, 0xf9f9f9f9U, 0x37373737U, 0xe8e8e8e8U, + 0x1c1c1c1cU, 0x75757575U, 0xdfdfdfdfU, 0x6e6e6e6eU, + 0x47474747U, 0xf1f1f1f1U, 0x1a1a1a1aU, 0x71717171U, + 0x1d1d1d1dU, 0x29292929U, 0xc5c5c5c5U, 0x89898989U, + 0x6f6f6f6fU, 0xb7b7b7b7U, 0x62626262U, 0x0e0e0e0eU, + 0xaaaaaaaaU, 0x18181818U, 0xbebebebeU, 0x1b1b1b1bU, + 0xfcfcfcfcU, 0x56565656U, 0x3e3e3e3eU, 0x4b4b4b4bU, + 0xc6c6c6c6U, 0xd2d2d2d2U, 0x79797979U, 0x20202020U, + 0x9a9a9a9aU, 0xdbdbdbdbU, 0xc0c0c0c0U, 0xfefefefeU, + 0x78787878U, 0xcdcdcdcdU, 0x5a5a5a5aU, 0xf4f4f4f4U, + 0x1f1f1f1fU, 0xddddddddU, 0xa8a8a8a8U, 0x33333333U, + 0x88888888U, 0x07070707U, 0xc7c7c7c7U, 0x31313131U, + 0xb1b1b1b1U, 0x12121212U, 0x10101010U, 0x59595959U, + 0x27272727U, 0x80808080U, 0xececececU, 0x5f5f5f5fU, + 0x60606060U, 0x51515151U, 0x7f7f7f7fU, 0xa9a9a9a9U, + 0x19191919U, 0xb5b5b5b5U, 0x4a4a4a4aU, 0x0d0d0d0dU, + 0x2d2d2d2dU, 0xe5e5e5e5U, 0x7a7a7a7aU, 0x9f9f9f9fU, + 0x93939393U, 0xc9c9c9c9U, 0x9c9c9c9cU, 0xefefefefU, + 0xa0a0a0a0U, 0xe0e0e0e0U, 0x3b3b3b3bU, 0x4d4d4d4dU, + 0xaeaeaeaeU, 0x2a2a2a2aU, 0xf5f5f5f5U, 0xb0b0b0b0U, + 0xc8c8c8c8U, 0xebebebebU, 0xbbbbbbbbU, 0x3c3c3c3cU, + 0x83838383U, 0x53535353U, 0x99999999U, 0x61616161U, + 0x17171717U, 0x2b2b2b2bU, 0x04040404U, 0x7e7e7e7eU, + 0xbabababaU, 0x77777777U, 0xd6d6d6d6U, 0x26262626U, + 0xe1e1e1e1U, 0x69696969U, 0x14141414U, 0x63636363U, + 0x55555555U, 0x21212121U, 0x0c0c0c0cU, 0x7d7d7d7dU, +}; + + +/* for 128-bit blocks, Rijndael never uses more than 10 rcon values */ +const uint32_t rcon[] = { + 0x01000000, 0x02000000, 0x04000000, 0x08000000, + 0x10000000, 0x20000000, 0x40000000, 0x80000000, + 0x1B000000, 0x36000000, +}; +#else /* AES_SMALL_TABLES */ +const uint8_t Td4s[256] = { + 0x52U, 0x09U, 0x6aU, 0xd5U, 0x30U, 0x36U, 0xa5U, 0x38U, + 0xbfU, 0x40U, 0xa3U, 0x9eU, 0x81U, 0xf3U, 0xd7U, 0xfbU, + 0x7cU, 0xe3U, 0x39U, 0x82U, 0x9bU, 0x2fU, 0xffU, 0x87U, + 0x34U, 0x8eU, 0x43U, 0x44U, 0xc4U, 0xdeU, 0xe9U, 0xcbU, + 0x54U, 0x7bU, 0x94U, 0x32U, 0xa6U, 0xc2U, 0x23U, 0x3dU, + 0xeeU, 0x4cU, 0x95U, 0x0bU, 0x42U, 0xfaU, 0xc3U, 0x4eU, + 0x08U, 0x2eU, 0xa1U, 0x66U, 0x28U, 0xd9U, 0x24U, 0xb2U, + 0x76U, 0x5bU, 0xa2U, 0x49U, 0x6dU, 0x8bU, 0xd1U, 0x25U, + 0x72U, 0xf8U, 0xf6U, 0x64U, 0x86U, 0x68U, 0x98U, 0x16U, + 0xd4U, 0xa4U, 0x5cU, 0xccU, 0x5dU, 0x65U, 0xb6U, 0x92U, + 0x6cU, 0x70U, 0x48U, 0x50U, 0xfdU, 0xedU, 0xb9U, 0xdaU, + 0x5eU, 0x15U, 0x46U, 0x57U, 0xa7U, 0x8dU, 0x9dU, 0x84U, + 0x90U, 0xd8U, 0xabU, 0x00U, 0x8cU, 0xbcU, 0xd3U, 0x0aU, + 0xf7U, 0xe4U, 0x58U, 0x05U, 0xb8U, 0xb3U, 0x45U, 0x06U, + 0xd0U, 0x2cU, 0x1eU, 0x8fU, 0xcaU, 0x3fU, 0x0fU, 0x02U, + 0xc1U, 0xafU, 0xbdU, 0x03U, 0x01U, 0x13U, 0x8aU, 0x6bU, + 0x3aU, 0x91U, 0x11U, 0x41U, 0x4fU, 0x67U, 0xdcU, 0xeaU, + 0x97U, 0xf2U, 0xcfU, 0xceU, 0xf0U, 0xb4U, 0xe6U, 0x73U, + 0x96U, 0xacU, 0x74U, 0x22U, 0xe7U, 0xadU, 0x35U, 0x85U, + 0xe2U, 0xf9U, 0x37U, 0xe8U, 0x1cU, 0x75U, 0xdfU, 0x6eU, + 0x47U, 0xf1U, 0x1aU, 0x71U, 0x1dU, 0x29U, 0xc5U, 0x89U, + 0x6fU, 0xb7U, 0x62U, 0x0eU, 0xaaU, 0x18U, 0xbeU, 0x1bU, + 0xfcU, 0x56U, 0x3eU, 0x4bU, 0xc6U, 0xd2U, 0x79U, 0x20U, + 0x9aU, 0xdbU, 0xc0U, 0xfeU, 0x78U, 0xcdU, 0x5aU, 0xf4U, + 0x1fU, 0xddU, 0xa8U, 0x33U, 0x88U, 0x07U, 0xc7U, 0x31U, + 0xb1U, 0x12U, 0x10U, 0x59U, 0x27U, 0x80U, 0xecU, 0x5fU, + 0x60U, 0x51U, 0x7fU, 0xa9U, 0x19U, 0xb5U, 0x4aU, 0x0dU, + 0x2dU, 0xe5U, 0x7aU, 0x9fU, 0x93U, 0xc9U, 0x9cU, 0xefU, + 0xa0U, 0xe0U, 0x3bU, 0x4dU, 0xaeU, 0x2aU, 0xf5U, 0xb0U, + 0xc8U, 0xebU, 0xbbU, 0x3cU, 0x83U, 0x53U, 0x99U, 0x61U, + 0x17U, 0x2bU, 0x04U, 0x7eU, 0xbaU, 0x77U, 0xd6U, 0x26U, + 0xe1U, 0x69U, 0x14U, 0x63U, 0x55U, 0x21U, 0x0cU, 0x7dU, +}; +const uint8_t rcons[] = { + 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1B, 0x36 + /* for 128-bit blocks, Rijndael never uses more than 10 rcon values */ +}; +#endif /* AES_SMALL_TABLES */ +/** + * Expand the cipher key into the encryption key schedule. + * + * @return the number of rounds for the given cipher key size. + */ +int wlan_crypto_rijndaelKeySetupEnc(uint32_t rk[], const uint8_t cipherKey[], + int keyBits) +{ + int i; + uint32_t temp; + int32_t status = -1; + + rk[0] = GETU32(cipherKey); + rk[1] = GETU32(cipherKey + 4); + rk[2] = GETU32(cipherKey + 8); + rk[3] = GETU32(cipherKey + 12); + + if (keyBits == 128) { + for (i = 0; i < 10; i++) { + temp = rk[3]; + rk[4] = rk[0] ^ TE421(temp) ^ TE432(temp) ^ + TE443(temp) ^ TE414(temp) ^ RCON(i); + rk[5] = rk[1] ^ rk[4]; + rk[6] = rk[2] ^ rk[5]; + rk[7] = rk[3] ^ rk[6]; + rk += 4; + } + return 10; + } + + rk[4] = GETU32(cipherKey + 16); + rk[5] = GETU32(cipherKey + 20); + + if (keyBits == 192) { + for (i = 0; i < 8; i++) { + temp = rk[5]; + rk[6] = rk[0] ^ TE421(temp) ^ TE432(temp) ^ + TE443(temp) ^ TE414(temp) ^ RCON(i); + rk[7] = rk[1] ^ rk[6]; + rk[8] = rk[2] ^ rk[7]; + rk[9] = rk[3] ^ rk[8]; + if (i == 7) + return 12; + rk[10] = rk[4] ^ rk[9]; + rk[11] = rk[5] ^ rk[10]; + rk += 6; + } + } + + rk[6] = GETU32(cipherKey + 24); + rk[7] = GETU32(cipherKey + 28); + + if (keyBits == 256) { + for (i = 0; i < 7; i++) { + temp = rk[7]; + rk[8] = rk[0] ^ TE421(temp) ^ TE432(temp) ^ + TE443(temp) ^ TE414(temp) ^ RCON(i); + rk[9] = rk[1] ^ rk[8]; + rk[10] = rk[2] ^ rk[9]; + rk[11] = rk[3] ^ rk[10]; + if (i == 6) + return 14; + temp = rk[11]; + rk[12] = rk[4] ^ TE411(temp) ^ TE422(temp) ^ + TE433(temp) ^ TE444(temp); + rk[13] = rk[5] ^ rk[12]; + rk[14] = rk[6] ^ rk[13]; + rk[15] = rk[7] ^ rk[14]; + rk += 8; + } + } + + return status; +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/src/wlan_crypto_aes_internal_dec.c b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/src/wlan_crypto_aes_internal_dec.c new file mode 100644 index 0000000000000000000000000000000000000000..fdf1d1e513b23d66840e2451c6cde847ccc70304 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/src/wlan_crypto_aes_internal_dec.c @@ -0,0 +1,162 @@ +/* + * Copyright (c) 2017 The Linux Foundation. All rights reserved. + */ +/* + * AES (Rijndael) cipher - decrypt + * + * Modifications to public domain implementation: + * - cleanup + * - use C pre-processor to make it easier to change S table access + * - added option (AES_SMALL_TABLES) for reducing code size by about 8 kB at + * cost of reduced throughput (quite small difference on Pentium 4, + * 10-25% when using -O1 or -O2 optimization) + * + * Copyright (c) 2003-2012, Jouni Malinen + * + * This software may be distributed under the terms of the BSD license. + * See README for more details. + */ +#include +#include +#include +#include "wlan_crypto_aes_i.h" +#include "wlan_crypto_def_i.h" + +/** + * Expand the cipher key into the decryption key schedule. + * + * @return the number of rounds for the given cipher key size. + */ +static int rijndaelKeySetupDec(uint32_t rk[], const uint8_t cipherKey[], + int keyBits){ + int Nr, i, j; + uint32_t temp; + + /* expand the cipher key: */ + Nr = wlan_crypto_rijndaelKeySetupEnc(rk, cipherKey, keyBits); + if (Nr < 0) + return Nr; + /* invert the order of the round keys: */ + for (i = 0, j = 4*Nr; i < j; i += 4, j -= 4) { + temp = rk[i]; rk[i] = rk[j]; rk[j] = temp; + temp = rk[i + 1]; rk[i + 1] = rk[j + 1]; rk[j + 1] = temp; + temp = rk[i + 2]; rk[i + 2] = rk[j + 2]; rk[j + 2] = temp; + temp = rk[i + 3]; rk[i + 3] = rk[j + 3]; rk[j + 3] = temp; + } + /* apply the inverse MixColumn transform to all round keys but the + * first and the last: */ + for (i = 1; i < Nr; i++) { + rk += 4; + for (j = 0; j < 4; j++) { + rk[j] = TD0_(TE4((rk[j] >> 24))) ^ + TD1_(TE4((rk[j] >> 16) & 0xff)) ^ + TD2_(TE4((rk[j] >> 8) & 0xff)) ^ + TD3_(TE4((rk[j]) & 0xff)); + } + } + + return Nr; +} + +void *wlan_crypto_aes_decrypt_init(const uint8_t *key, size_t len) +{ + uint32_t *rk; + int res; + rk = qdf_mem_malloc(AES_PRIV_SIZE); + if (rk == NULL) + return NULL; + res = rijndaelKeySetupDec(rk, key, len * 8); + if (res < 0) { + qdf_mem_free(rk); + return NULL; + } + rk[AES_PRIV_NR_POS] = res; + return rk; +} + +static void rijndaelDecrypt(const uint32_t rk[/*44*/], int Nr, + const uint8_t ct[16], uint8_t pt[16]){ + uint32_t s0, s1, s2, s3, t0, t1, t2, t3; +#ifndef FULL_UNROLL + int r; +#endif /* ?FULL_UNROLL */ + + /* + * map byte array block to cipher state + * and add initial round key: + */ + s0 = GETU32(ct) ^ rk[0]; + s1 = GETU32(ct + 4) ^ rk[1]; + s2 = GETU32(ct + 8) ^ rk[2]; + s3 = GETU32(ct + 12) ^ rk[3]; + +#define ROUND(i, d, s) {\ +d##0 = TD0(s##0) ^ TD1(s##3) ^ TD2(s##2) ^ TD3(s##1) ^ rk[4 * i]; \ +d##1 = TD0(s##1) ^ TD1(s##0) ^ TD2(s##3) ^ TD3(s##2) ^ rk[4 * i + 1]; \ +d##2 = TD0(s##2) ^ TD1(s##1) ^ TD2(s##0) ^ TD3(s##3) ^ rk[4 * i + 2]; \ +d##3 = TD0(s##3) ^ TD1(s##2) ^ TD2(s##1) ^ TD3(s##0) ^ rk[4 * i + 3]; } + +#ifdef FULL_UNROLL + + ROUND(1, t, s); + ROUND(2, s, t); + ROUND(3, t, s); + ROUND(4, s, t); + ROUND(5, t, s); + ROUND(6, s, t); + ROUND(7, t, s); + ROUND(8, s, t); + ROUND(9, t, s); + if (Nr > 10) { + ROUND(10, s, t); + ROUND(11, t, s); + if (Nr > 12) { + ROUND(12, s, t); + ROUND(13, t, s); + } + } + + rk += Nr << 2; + +#else /* !FULL_UNROLL */ + + /* Nr - 1 full rounds: */ + r = Nr >> 1; + for (;;) { + ROUND(1, t, s); + rk += 8; + if (--r == 0) + break; + ROUND(0, s, t); + } + +#endif /* ?FULL_UNROLL */ + +#undef ROUND + + /* + * apply last round and + * map cipher state to byte array block: + */ + s0 = TD41(t0) ^ TD42(t3) ^ TD43(t2) ^ TD44(t1) ^ rk[0]; + PUTU32(pt , s0); + s1 = TD41(t1) ^ TD42(t0) ^ TD43(t3) ^ TD44(t2) ^ rk[1]; + PUTU32(pt + 4, s1); + s2 = TD41(t2) ^ TD42(t1) ^ TD43(t0) ^ TD44(t3) ^ rk[2]; + PUTU32(pt + 8, s2); + s3 = TD41(t3) ^ TD42(t2) ^ TD43(t1) ^ TD44(t0) ^ rk[3]; + PUTU32(pt + 12, s3); +} + +void wlan_crypto_aes_decrypt(void *ctx, const uint8_t *crypt, uint8_t *plain) +{ + uint32_t *rk = ctx; + rijndaelDecrypt(ctx, rk[AES_PRIV_NR_POS], crypt, plain); +} + + +void wlan_crypto_aes_decrypt_deinit(void *ctx) +{ + qdf_mem_set(ctx, AES_PRIV_SIZE, 0); + qdf_mem_free(ctx); +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/src/wlan_crypto_aes_internal_enc.c b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/src/wlan_crypto_aes_internal_enc.c new file mode 100644 index 0000000000000000000000000000000000000000..07bbb15ce11c7a72564520b0b9c6de987e114c61 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/src/wlan_crypto_aes_internal_enc.c @@ -0,0 +1,129 @@ +/* + * Copyright (c) 2017 The Linux Foundation. All rights reserved. + */ +/* + * AES (Rijndael) cipher - encrypt + * + * Modifications to public domain implementation: + * - cleanup + * - use C pre-processor to make it easier to change S table access + * - added option (AES_SMALL_TABLES) for reducing code size by about 8 kB at + * cost of reduced throughput (quite small difference on Pentium 4, + * 10-25% when using -O1 or -O2 optimization) + * + * Copyright (c) 2003-2012, Jouni Malinen + * + * This software may be distributed under the terms of the BSD license. + * See README for more details. + */ + +#include +#include +#include +#include "wlan_crypto_aes_i.h" +#include "wlan_crypto_def_i.h" + +static void rijndaelEncrypt(const uint32_t rk[], int Nr, const uint8_t pt[16], + uint8_t ct[16]){ + uint32_t s0, s1, s2, s3, t0, t1, t2, t3; +#ifndef FULL_UNROLL + int r; +#endif /* ?FULL_UNROLL */ + + /* + * map byte array block to cipher state + * and add initial round key: + */ + s0 = GETU32(pt) ^ rk[0]; + s1 = GETU32(pt + 4) ^ rk[1]; + s2 = GETU32(pt + 8) ^ rk[2]; + s3 = GETU32(pt + 12) ^ rk[3]; + +#define ROUND(i, d, s) {\ +d##0 = TE0(s##0) ^ TE1(s##1) ^ TE2(s##2) ^ TE3(s##3) ^ rk[4 * i]; \ +d##1 = TE0(s##1) ^ TE1(s##2) ^ TE2(s##3) ^ TE3(s##0) ^ rk[4 * i + 1]; \ +d##2 = TE0(s##2) ^ TE1(s##3) ^ TE2(s##0) ^ TE3(s##1) ^ rk[4 * i + 2]; \ +d##3 = TE0(s##3) ^ TE1(s##0) ^ TE2(s##1) ^ TE3(s##2) ^ rk[4 * i + 3]; } + +#ifdef FULL_UNROLL + + ROUND(1, t, s); + ROUND(2, s, t); + ROUND(3, t, s); + ROUND(4, s, t); + ROUND(5, t, s); + ROUND(6, s, t); + ROUND(7, t, s); + ROUND(8, s, t); + ROUND(9, t, s); + if (Nr > 10) { + ROUND(10, s, t); + ROUND(11, t, s); + if (Nr > 12) { + ROUND(12, s, t); + ROUND(13, t, s); + } + } + + rk += Nr << 2; + +#else /* !FULL_UNROLL */ + + /* Nr - 1 full rounds: */ + r = Nr >> 1; + for (;;) { + ROUND(1, t, s); + rk += 8; + if (--r == 0) + break; + ROUND(0, s, t); + } + +#endif /* ?FULL_UNROLL */ + +#undef ROUND + + /* + * apply last round and + * map cipher state to byte array block: + */ + s0 = TE41(t0) ^ TE42(t1) ^ TE43(t2) ^ TE44(t3) ^ rk[0]; + PUTU32(ct , s0); + s1 = TE41(t1) ^ TE42(t2) ^ TE43(t3) ^ TE44(t0) ^ rk[1]; + PUTU32(ct + 4, s1); + s2 = TE41(t2) ^ TE42(t3) ^ TE43(t0) ^ TE44(t1) ^ rk[2]; + PUTU32(ct + 8, s2); + s3 = TE41(t3) ^ TE42(t0) ^ TE43(t1) ^ TE44(t2) ^ rk[3]; + PUTU32(ct + 12, s3); +} + + +void *wlan_crypto_aes_encrypt_init(const uint8_t *key, size_t len) +{ + uint32_t *rk; + int res; + rk = qdf_mem_malloc(AES_PRIV_SIZE); + if (rk == NULL) + return NULL; + res = wlan_crypto_rijndaelKeySetupEnc(rk, key, len * 8); + if (res < 0) { + qdf_mem_free(rk); + return NULL; + } + rk[AES_PRIV_NR_POS] = res; + return rk; +} + + +void wlan_crypto_aes_encrypt(void *ctx, const uint8_t *plain, uint8_t *crypt) +{ + uint32_t *rk = ctx; + rijndaelEncrypt(ctx, rk[AES_PRIV_NR_POS], plain, crypt); +} + + +void wlan_crypto_aes_encrypt_deinit(void *ctx) +{ + qdf_mem_set(ctx, AES_PRIV_SIZE, 0); + qdf_mem_free(ctx); +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/src/wlan_crypto_aes_omac1.c b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/src/wlan_crypto_aes_omac1.c new file mode 100644 index 0000000000000000000000000000000000000000..cf0518dd2ad50d0a93ec7685c7478f0d5f9da557 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/src/wlan_crypto_aes_omac1.c @@ -0,0 +1,176 @@ +/* + * Copyright (c) 2017 The Linux Foundation. All rights reserved. + */ +/* + * One-key CBC MAC (OMAC1) hash with AES + * + * Copyright (c) 2003-2007, Jouni Malinen + * + * This software may be distributed under the terms of the BSD license. + * See README for more details. + */ + +#include +#include +#include +#include "wlan_crypto_aes_i.h" +#include "wlan_crypto_def_i.h" + +static void gf_mulx(uint8_t *pad) +{ + int i, carry; + + carry = pad[0] & 0x80; + for (i = 0; i < AES_BLOCK_SIZE - 1; i++) + pad[i] = (pad[i] << 1) | (pad[i + 1] >> 7); + pad[AES_BLOCK_SIZE - 1] <<= 1; + if (carry) + pad[AES_BLOCK_SIZE - 1] ^= 0x87; +} + + +/** + * omac1_aes_vector - One-Key CBC MAC (OMAC1) hash with AES + * @key: Key for the hash operation + * @key_len: Key length in octets + * @num_elem: Number of elements in the data vector + * @addr: Pointers to the data areas + * @len: Lengths of the data blocks + * @mac: Buffer for MAC (128 bits, i.e., 16 bytes) + * Returns: 0 on success, -1 on failure + * + * This is a mode for using block cipher (AES in this case) for authentication. + * OMAC1 was standardized with the name CMAC by NIST in a Special Publication + * (SP) 800-38B. + */ +int omac1_aes_vector(const uint8_t *key, size_t key_len, size_t num_elem, + const uint8_t *addr[], const size_t *len, uint8_t *mac) +{ + void *ctx; + const uint8_t *pos, *end; + int32_t status = -1; + size_t i, e, left, total_len; + uint8_t cbc[AES_BLOCK_SIZE], pad[AES_BLOCK_SIZE]; + + + ctx = wlan_crypto_aes_encrypt_init(key, key_len); + if (ctx == NULL) + return status; + + total_len = 0; + for (e = 0; e < num_elem; e++) + total_len += len[e]; + left = total_len; + + qdf_mem_set(cbc, AES_BLOCK_SIZE, 0); + + e = 0; + pos = addr[0]; + end = pos + len[0]; + + while (left >= AES_BLOCK_SIZE) { + for (i = 0; i < AES_BLOCK_SIZE; i++) { + cbc[i] ^= *pos++; + if (pos >= end) { + /* + * Stop if there are no more bytes to process + * since there are no more entries in the array. + */ + if (i + 1 == AES_BLOCK_SIZE && + left == AES_BLOCK_SIZE) + break; + e++; + pos = addr[e]; + end = pos + len[e]; + } + } + if (left > AES_BLOCK_SIZE) + wlan_crypto_aes_encrypt(ctx, cbc, cbc); + left -= AES_BLOCK_SIZE; + } + + qdf_mem_set(pad, AES_BLOCK_SIZE, 0); + wlan_crypto_aes_encrypt(ctx, pad, pad); + gf_mulx(pad); + + if (left || total_len == 0) { + for (i = 0; i < left; i++) { + cbc[i] ^= *pos++; + if (pos >= end) { + /* + * Stop if there are no more bytes to process + * since there are no more entries in the array. + */ + if (i + 1 == left) + break; + e++; + pos = addr[e]; + end = pos + len[e]; + } + } + cbc[left] ^= 0x80; + gf_mulx(pad); + } + + for (i = 0; i < AES_BLOCK_SIZE; i++) + pad[i] ^= cbc[i]; + wlan_crypto_aes_encrypt(ctx, pad, mac); + wlan_crypto_aes_encrypt_deinit(ctx); + return 0; +} + + +/** + * omac1_aes_128_vector - One-Key CBC MAC (OMAC1) hash with AES-128 + * @key: 128-bit key for the hash operation + * @num_elem: Number of elements in the data vector + * @addr: Pointers to the data areas + * @len: Lengths of the data blocks + * @mac: Buffer for MAC (128 bits, i.e., 16 bytes) + * Returns: 0 on success, -1 on failure + * + * This is a mode for using block cipher (AES in this case) for authentication. + * OMAC1 was standardized with the name CMAC by NIST in a Special Publication + * (SP) 800-38B. + */ +int omac1_aes_128_vector(const uint8_t *key, size_t num_elem, + const uint8_t *addr[], const size_t *len, uint8_t *mac) +{ + return omac1_aes_vector(key, 16, num_elem, addr, len, mac); +} + + +/** + * omac1_aes_128 - One-Key CBC MAC (OMAC1) hash with AES-128 (aka AES-CMAC) + * @key: 128-bit key for the hash operation + * @data: Data buffer for which a MAC is determined + * @data_len: Length of data buffer in bytes + * @mac: Buffer for MAC (128 bits, i.e., 16 bytes) + * Returns: 0 on success, -1 on failure + * + * This is a mode for using block cipher (AES in this case) for authentication. + * OMAC1 was standardized with the name CMAC by NIST in a Special Publication + * (SP) 800-38B. + */ +int omac1_aes_128(const uint8_t *key, const uint8_t *data, + size_t data_len, uint8_t *mac){ + return omac1_aes_128_vector(key, 1, &data, &data_len, mac); +} + + +/** + * omac1_aes_256 - One-Key CBC MAC (OMAC1) hash with AES-256 (aka AES-CMAC) + * @key: 256-bit key for the hash operation + * @data: Data buffer for which a MAC is determined + * @data_len: Length of data buffer in bytes + * @mac: Buffer for MAC (128 bits, i.e., 16 bytes) + * Returns: 0 on success, -1 on failure + * + * This is a mode for using block cipher (AES in this case) for authentication. + * OMAC1 was standardized with the name CMAC by NIST in a Special Publication + * (SP) 800-38B. + */ +int omac1_aes_256(const uint8_t *key, const uint8_t *data, + size_t data_len, uint8_t *mac){ + return omac1_aes_vector(key, 32, 1, &data, &data_len, mac); +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/src/wlan_crypto_aes_siv.c b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/src/wlan_crypto_aes_siv.c new file mode 100644 index 0000000000000000000000000000000000000000..8186b77f8da92a44757e5d89f25577d19455e8bb --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/src/wlan_crypto_aes_siv.c @@ -0,0 +1,213 @@ +/* + * Copyright (c) 2017 The Linux Foundation. All rights reserved. + */ + +/* + * AES SIV (RFC 5297) + * Copyright (c) 2013 Cozybit, Inc. + * + * This software may be distributed under the terms of the BSD license. + * See README for more details. + */ + +#ifdef WLAN_SUPPORT_FILS + +#include +#include +#include "wlan_crypto_aes_ctr_i.h" + +static const uint8_t zero[AES_BLOCK_SIZE]; + +static void dbl(uint8_t *pad) +{ + int32_t i, carry; + + carry = pad[0] & 0x80; + for (i = 0; i < AES_BLOCK_SIZE - 1; i++) + pad[i] = (pad[i] << 1) | (pad[i + 1] >> 7); + pad[AES_BLOCK_SIZE - 1] <<= 1; + if (carry) + pad[AES_BLOCK_SIZE - 1] ^= 0x87; +} + +static void xor(uint8_t *a, const uint8_t *b) +{ + int32_t i; + + for (i = 0; i < AES_BLOCK_SIZE; i++) + *a++ ^= *b++; +} + +static void xorend(uint8_t *a, int32_t alen, const uint8_t *b, int32_t blen) +{ + int32_t i; + + if (alen < blen) + return; + + for (i = 0; i < blen; i++) + a[alen - blen + i] ^= b[i]; +} + +static void pad_block(uint8_t *pad, const uint8_t *addr, size_t len) +{ + qdf_mem_zero(pad, AES_BLOCK_SIZE); + qdf_mem_copy(pad, addr, len); + + if (len < AES_BLOCK_SIZE) + pad[len] = 0x80; +} + +static int32_t +aes_s2v(const uint8_t *key, size_t key_len, size_t num_elem, + const uint8_t *addr[], size_t *len, uint8_t *mac) +{ + uint8_t tmp[AES_BLOCK_SIZE], tmp2[AES_BLOCK_SIZE]; + uint8_t *buf = NULL; + int32_t ret = -1; + size_t i; + const uint8_t *data[1]; + size_t data_len[1]; + + if (!num_elem) { + qdf_mem_copy(tmp, zero, sizeof(zero)); + tmp[AES_BLOCK_SIZE - 1] = 1; + data[0] = tmp; + data_len[0] = sizeof(tmp); + return omac1_aes_vector(key, key_len, 1, data, data_len, mac); + } + + data[0] = zero; + data_len[0] = sizeof(zero); + ret = omac1_aes_vector(key, key_len, 1, data, data_len, tmp); + if (ret) + return ret; + + for (i = 0; i < num_elem - 1; i++) { + ret = omac1_aes_vector(key, key_len, 1, &addr[i], &len[i], + tmp2); + if (ret) + return ret; + + dbl(tmp); + xor(tmp, tmp2); + } + if (len[i] >= AES_BLOCK_SIZE) { + buf = OS_MALLOC(NULL, len[i], GFP_ATOMIC); + if (!buf) + return -ENOMEM; + + qdf_mem_copy(buf, addr[i], len[i]); + xorend(buf, len[i], tmp, AES_BLOCK_SIZE); + data[0] = buf; + ret = omac1_aes_vector(key, key_len, 1, data, &len[i], mac); + memset(buf, 0, len[i]); + OS_FREE(buf); + return ret; + } + + dbl(tmp); + pad_block(tmp2, addr[i], len[i]); + xor(tmp, tmp2); + + data[0] = tmp; + data_len[0] = sizeof(tmp); + + return omac1_aes_vector(key, key_len, 1, data, data_len, mac); +} + +int32_t wlan_crypto_aes_siv_encrypt(const uint8_t *key, size_t key_len, + const uint8_t *pw, size_t pwlen, + size_t num_elem, const uint8_t *addr[], + const size_t *len, uint8_t *out) +{ + const uint8_t *_addr[6]; + size_t _len[6]; + const uint8_t *k1, *k2; + uint8_t v[AES_BLOCK_SIZE]; + size_t i; + uint8_t *iv, *crypt_pw; + int32_t status = -1; + + if (num_elem > ARRAY_SIZE(_addr) - 1 || + (key_len != 32 && key_len != 48 && key_len != 64)) + return status; + + key_len /= 2; + k1 = key; + k2 = key + key_len; + + for (i = 0; i < num_elem; i++) { + _addr[i] = addr[i]; + _len[i] = len[i]; + } + _addr[num_elem] = pw; + _len[num_elem] = pwlen; + + if (aes_s2v(k1, key_len, num_elem + 1, _addr, _len, v)) + return status; + + iv = out; + crypt_pw = out + AES_BLOCK_SIZE; + + qdf_mem_copy(iv, v, AES_BLOCK_SIZE); + qdf_mem_copy(crypt_pw, pw, pwlen); + + /* zero out 63rd and 31st bits of ctr (from right) */ + v[8] &= 0x7f; + v[12] &= 0x7f; + + return wlan_crypto_aes_ctr_encrypt(k2, key_len, v, crypt_pw, pwlen); +} + +int32_t wlan_crypto_aes_siv_decrypt(const uint8_t *key, size_t key_len, + const uint8_t *iv_crypt, size_t iv_c_len, + size_t num_elem, const uint8_t *addr[], + const size_t *len, uint8_t *out) +{ + const uint8_t *_addr[6]; + size_t _len[6]; + const uint8_t *k1, *k2; + size_t crypt_len; + size_t i; + int32_t ret = -1; + uint8_t iv[AES_BLOCK_SIZE]; + uint8_t check[AES_BLOCK_SIZE]; + + if (iv_c_len < AES_BLOCK_SIZE || num_elem > ARRAY_SIZE(_addr) - 1 || + (key_len != 32 && key_len != 48 && key_len != 64)) + return ret; + + crypt_len = iv_c_len - AES_BLOCK_SIZE; + key_len /= 2; + k1 = key; + k2 = key + key_len; + + for (i = 0; i < num_elem; i++) { + _addr[i] = addr[i]; + _len[i] = len[i]; + } + _addr[num_elem] = out; + _len[num_elem] = crypt_len; + + qdf_mem_copy(iv, iv_crypt, AES_BLOCK_SIZE); + qdf_mem_copy(out, iv_crypt + AES_BLOCK_SIZE, crypt_len); + + iv[8] &= 0x7f; + iv[12] &= 0x7f; + + ret = wlan_crypto_aes_ctr_encrypt(k2, key_len, iv, out, crypt_len); + if (ret) + return ret; + + ret = aes_s2v(k1, key_len, num_elem + 1, _addr, _len, check); + if (ret) + return ret; + + if (qdf_mem_cmp(check, iv_crypt, AES_BLOCK_SIZE) == 0) + return 0; + + return ret; +} + +#endif /* WLAN_SUPPORT_FILS */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/src/wlan_crypto_aes_siv_i.h b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/src/wlan_crypto_aes_siv_i.h new file mode 100644 index 0000000000000000000000000000000000000000..f8192bb9e6464d4b6b668b8e148ca419856d2938 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/src/wlan_crypto_aes_siv_i.h @@ -0,0 +1,29 @@ +/* + * Copyright (c) 2017 The Linux Foundation. All rights reserved. + */ + +/* + * AES SIV (RFC 5297) + * Copyright (c) 2013 Cozybit, Inc. + * + * This software may be distributed under the terms of the BSD license. + * See README for more details. + */ + +#ifndef _WLAN_CRYPTO_AES_SIV_I_H_ +#define _WLAN_CRYPTO_AES_SIV_I_H_ + +#ifdef WLAN_SUPPORT_FILS +int32_t wlan_crypto_aes_siv_encrypt(const uint8_t *key, size_t key_len, + const uint8_t *pw, size_t pwlen, + size_t num_elem, const uint8_t *addr[], + const size_t *len, uint8_t *out); + +int32_t wlan_crypto_aes_siv_decrypt(const uint8_t *key, size_t key_len, + const uint8_t *iv_crypt, size_t iv_c_len, + size_t num_elem, const uint8_t *addr[], + const size_t *len, uint8_t *out); +#endif /* WLAN_SUPPORT_FILS */ + +#endif /* end of _WLAN_CRYPTO_AES_SIV_I_H_ */ + diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/src/wlan_crypto_ccmp.c b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/src/wlan_crypto_ccmp.c new file mode 100644 index 0000000000000000000000000000000000000000..ee122b7a741ced2033ee8c1bbeef5eb2f653deb7 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/src/wlan_crypto_ccmp.c @@ -0,0 +1,319 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + + /** + * DOC: Private API for handling CCMP related operations + */ +#include +#include +#include +#include +#include +#include +#include +#include + +#include "wlan_crypto_global_def.h" +#include "wlan_crypto_def_i.h" +#include "wlan_crypto_main_i.h" +#include "wlan_crypto_obj_mgr_i.h" + +#define MAX_CCMP_PN_GAP_ERR_CHECK 0 + +static QDF_STATUS ccmp_setkey(struct wlan_crypto_key *key) +{ + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS ccmp_encap(struct wlan_crypto_key *key, + qdf_nbuf_t wbuf, + uint8_t encapdone, + uint8_t hdrlen){ + uint8_t *ivp; + struct ieee80211_hdr *hdr; + struct wlan_crypto_cipher *cipher_table; + + cipher_table = (struct wlan_crypto_cipher *)key->cipher_table; + + hdr = (struct ieee80211_hdr *)qdf_nbuf_data(wbuf); + + /* + * Copy down 802.11 header and add the IV, KeyID, and ExtIV. + */ + if (encapdone) { + ivp = (uint8_t *)qdf_nbuf_data(wbuf); + } else { + uint8_t ivmic_len = cipher_table->header + cipher_table->miclen; + ivp = (uint8_t *)qdf_nbuf_push_head(wbuf, ivmic_len); + qdf_mem_move(ivp, ivp + ivmic_len, hdrlen); + + qdf_mem_move(ivp + hdrlen + cipher_table->header, + ivp + hdrlen + ivmic_len, + (qdf_nbuf_len(wbuf) - hdrlen - ivmic_len)); + + ivp = (uint8_t *) qdf_nbuf_data(wbuf); + } + + ivp += hdrlen; + /* XXX wrap at 48 bits */ + key->keytsc++; + + ivp[0] = key->keytsc >> 0; /* PN0 */ + ivp[1] = key->keytsc >> 8; /* PN1 */ + ivp[2] = 0; /* Reserved */ + ivp[3] = (key->keyix << 6)| WLAN_CRYPTO_EXT_IV_BIT;/* KeyID | ExtID */ + ivp[4] = key->keytsc >> 16; /* PN2 */ + ivp[5] = key->keytsc >> 24; /* PN3 */ + ivp[6] = key->keytsc >> 32; /* PN4 */ + ivp[7] = key->keytsc >> 40; /* PN5 */ + + /* + * Finally, do software encrypt if neeed. + */ + if (key->flags & WLAN_CRYPTO_KEY_SWENCRYPT) { + if (!wlan_crypto_ccmp_encrypt(key->keyval, + qdf_nbuf_data(wbuf), + qdf_nbuf_len(wbuf), hdrlen)) { + return QDF_STATUS_CRYPTO_ENCRYPT_FAILED; + } + } + + return QDF_STATUS_SUCCESS; +} + +#define WLAN_CRYPTO_CCMP_PN_MAX(pn) (pn + MAX_CCMP_PN_GAP_ERR_CHECK) + +static QDF_STATUS ccmp_decap(struct wlan_crypto_key *key, + qdf_nbuf_t wbuf, + uint8_t tid, + uint8_t hdrlen){ + struct ieee80211_hdr *hdr; + uint8_t *ivp, *origHdr; + uint64_t pn; + uint8_t update_keyrsc = 1; + struct wlan_crypto_cipher *cipher_table; + + cipher_table = (struct wlan_crypto_cipher *)key->cipher_table; + + /* + * Header should have extended IV and sequence number; + * verify the former and validate the latter. + */ + origHdr = (uint8_t *)qdf_nbuf_data(wbuf); + hdr = (struct ieee80211_hdr *)origHdr; + + ivp = origHdr + hdrlen; + + if ((ivp[WLAN_CRYPTO_IV_LEN] & WLAN_CRYPTO_EXT_IV_BIT) == 0) { + /*invalid CCMP iv*/ + return QDF_STATUS_E_INVAL; + } + + tid = wlan_get_tid(qdf_nbuf_data(wbuf)); + + pn = READ_6(ivp[0], ivp[1], ivp[4], ivp[5], ivp[6], ivp[7]); + + if (pn <= key->keyrsc[tid]) { + /* + * Replay violation. + */ + return QDF_STATUS_CRYPTO_PN_ERROR; + } + + if ((key->flags & WLAN_CRYPTO_KEY_SWDECRYPT)) { + if (!wlan_crypto_ccmp_decrypt(key->keyval, + (struct ieee80211_hdr *)origHdr, + (origHdr + hdrlen), + (qdf_nbuf_len(wbuf) - hdrlen))) { + return QDF_STATUS_CRYPTO_DECRYPT_FAILED; + } + } + + /* we can get corrupted frame that has a bad PN. + * The PN upper bits tend to get corrupted. + * The PN should be a monotically increasing counter. + * if we detected a big jump, then we will throw away this frame. + */ + if ((key->keyrsc[tid] > 1) && + (pn > (WLAN_CRYPTO_CCMP_PN_MAX(key->keyrsc[tid])))) { + /* PN jump wrt keyrsc is > MAX_CCMP_PN_GAP_ERR_CHECK - + * PN of current frame is suspected + */ + if (key->keyrsc_suspect[tid]) { + /* Check whether PN of the current frame + * is following prev PN seq or not + */ + if (pn < key->keyrsc_suspect[tid]) { + /* PN number of the curr frame < PN no of prev + * rxed frame. As we are not sure about prev + * suspect PN, to detect replay, check the + * current PN with global PN + */ + if (pn < key->keyglobal) + /* Replay violation */ + return QDF_STATUS_CRYPTO_PN_ERROR; + else { + /* Current PN is following global PN, + * so mark this as suspected PN + * Don't update keyrsc & keyglobal + */ + key->keyrsc_suspect[tid] = pn; + update_keyrsc = 0; + } + } else if (pn < + (WLAN_CRYPTO_CCMP_PN_MAX(key->keyrsc_suspect[tid]))) { + /* Current PN is following prev suspected + * PN seq Update keyrsc & keyglobal + * (update_keyrsc = 1;) + */ + } else { + /* Current PN is neither following prev + * suspected PN nor prev Keyrsc. + * Mark this as new suspect and + * don't update keyrsc & keyglobal + */ + key->keyrsc_suspect[tid] = pn; + update_keyrsc = 0; + } + } else { + /* New Jump in PN observed + * So mark this PN as suspected and + * don't update keyrsc/keyglobal */ + key->keyrsc_suspect[tid] = pn; + update_keyrsc = 0; + } + } else { + /* Valid PN, update keyrsc & keyglobal (update_keyrsc = 1;) */ + } + + /* + * Copy up 802.11 header and strip crypto bits. + */ + if (!(key->flags & WLAN_CRYPTO_KEY_SWDECRYPT)) { + qdf_mem_move(origHdr + cipher_table->header, origHdr, hdrlen); + qdf_nbuf_pull_head(wbuf, cipher_table->header); + qdf_nbuf_trim_tail(wbuf, cipher_table->trailer + + cipher_table->miclen); + } else { + qdf_nbuf_trim_tail(wbuf, cipher_table->header + + cipher_table->miclen); + } + + if (update_keyrsc) { + /* + * Ok to update rsc now. + */ + key->keyrsc[tid] = pn; + key->keyglobal = pn; + key->keyrsc_suspect[tid] = 0; + } + return QDF_STATUS_SUCCESS; +} +static QDF_STATUS ccmp_enmic(struct wlan_crypto_key *key, + qdf_nbuf_t wbuf, + uint8_t keyid, + uint8_t hdrlen){ + + return QDF_STATUS_SUCCESS; +} +static QDF_STATUS ccmp_demic(struct wlan_crypto_key *key, + qdf_nbuf_t wbuf, + uint8_t keyid, + uint8_t hdrlen){ + return QDF_STATUS_SUCCESS; +} + + +const struct wlan_crypto_cipher ccmp_cipher_table = { + "AES-CCM", + WLAN_CRYPTO_CIPHER_AES_CCM, + WLAN_CRYPTO_IV_LEN + WLAN_CRYPTO_KEYID_LEN + WLAN_CRYPTO_EXT_IV_LEN, + 0, + WLAN_CRYPTO_MIC_LEN, + 128, + ccmp_setkey, + ccmp_encap, + ccmp_decap, + ccmp_enmic, + ccmp_demic, +}; + + +const struct wlan_crypto_cipher ccmp256_cipher_table = { + "AES-CCM256", + WLAN_CRYPTO_CIPHER_AES_CCM_256, + WLAN_CRYPTO_IV_LEN + WLAN_CRYPTO_KEYID_LEN + WLAN_CRYPTO_EXT_IV_LEN, + 0, + WLAN_CRYPTO_MIC256_LEN, + 256, + ccmp_setkey, + ccmp_encap, + ccmp_decap, + ccmp_enmic, + ccmp_demic, +}; + + +const struct wlan_crypto_cipher gcmp_cipher_table = { + "AES-GCM", + WLAN_CRYPTO_CIPHER_AES_GCM, + WLAN_CRYPTO_IV_LEN + WLAN_CRYPTO_KEYID_LEN + WLAN_CRYPTO_EXT_IV_LEN, + 0, + WLAN_CRYPTO_MIC_LEN, + 128, + ccmp_setkey, + ccmp_encap, + ccmp_decap, + ccmp_enmic, + ccmp_demic, +}; + + +const struct wlan_crypto_cipher gcmp256_cipher_table = { + "AES-GCM256", + WLAN_CRYPTO_CIPHER_AES_GCM_256, + WLAN_CRYPTO_IV_LEN + WLAN_CRYPTO_KEYID_LEN + WLAN_CRYPTO_EXT_IV_LEN, + 0, + WLAN_CRYPTO_MIC256_LEN, + 256, + ccmp_setkey, + ccmp_encap, + ccmp_decap, + ccmp_enmic, + ccmp_demic, +}; + +const struct wlan_crypto_cipher *ccmp_register(void) +{ + return &ccmp_cipher_table; +} + +const struct wlan_crypto_cipher *ccmp256_register(void) +{ + return &ccmp256_cipher_table; +} + +const struct wlan_crypto_cipher *gcmp_register(void) +{ + return &gcmp_cipher_table; +} + +const struct wlan_crypto_cipher *gcmp256_register(void) +{ + return &gcmp256_cipher_table; +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/src/wlan_crypto_ccmp_sw.c b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/src/wlan_crypto_ccmp_sw.c new file mode 100644 index 0000000000000000000000000000000000000000..cfd5437f18457883d5e4eff507c02df166a37f1e --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/src/wlan_crypto_ccmp_sw.c @@ -0,0 +1,284 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + */ +/* + * CTR with CBC-MAC Protocol (CCMP) + * Copyright (c) 2010-2012, Jouni Malinen + * + * This software may be distributed under the terms of the BSD license. + * See README for more details. + */ + +#include +#include +#include +#include "wlan_crypto_aes_i.h" +#include "wlan_crypto_def_i.h" + +static void ccmp_aad_nonce(const struct ieee80211_hdr *hdr, const uint8_t *data, + uint8_t *aad, size_t *aad_len, uint8_t *nonce) +{ + uint16_t seq; + uint8_t stype; + int qos = 0, addr4 = 0; + uint8_t *pos; + + nonce[0] = 0; + + stype = WLAN_FC0_GET_STYPE(hdr->frame_control[0]); + if ((hdr->frame_control[1] & WLAN_FC1_DIR_MASK) == + (WLAN_FC1_DSTODS)) + addr4 = 1; + + if (WLAN_FC0_GET_TYPE(hdr->frame_control[0]) == WLAN_FC0_TYPE_DATA) { + aad[0] &= ~0x70; /* Mask subtype bits */ + if (stype & 0x08) { + const uint8_t *qc; + qos = 1; + aad[1] &= ~WLAN_FC1_ORDER; + qc = (const uint8_t *) (hdr + 1); + if (addr4) + qc += WLAN_ALEN; + nonce[0] = qc[0] & 0x0f; + } + } else if (WLAN_FC0_GET_TYPE(hdr->frame_control[0]) + == WLAN_FC0_TYPE_MGMT) { + nonce[0] |= 0x10; /* Management */ + } + + aad[1] &= ~(WLAN_FC1_RETRY | WLAN_FC1_PWRMGT | WLAN_FC1_MOREDATA); + aad[1] |= WLAN_FC1_ISWEP; + pos = aad + 2; + qdf_mem_copy(pos, hdr->addr1, 3 * WLAN_ALEN); + pos += 3 * WLAN_ALEN; + seq = qdf_le16_to_cpu(*((uint16_t *)&hdr->seq_ctrl[0])); + seq &= ~0xfff0; /* Mask Seq#; do not modify Frag# */ + wlan_crypto_put_le16(pos, seq); + pos += 2; + + qdf_mem_copy(pos, hdr + 1, addr4 * WLAN_ALEN + qos * 2); + pos += addr4 * WLAN_ALEN; + if (qos) { + pos[0] &= ~0x70; + if (1 /* FIX: either device has SPP A-MSDU Capab = 0 */) + pos[0] &= ~0x80; + pos++; + *pos++ = 0x00; + } + + *aad_len = pos - aad; + + qdf_mem_copy(nonce + 1, hdr->addr2, WLAN_ALEN); + nonce[7] = data[7]; /* PN5 */ + nonce[8] = data[6]; /* PN4 */ + nonce[9] = data[5]; /* PN3 */ + nonce[10] = data[4]; /* PN2 */ + nonce[11] = data[1]; /* PN1 */ + nonce[12] = data[0]; /* PN0 */ +} + + +uint8_t *wlan_crypto_ccmp_decrypt(const uint8_t *tk, + const struct ieee80211_hdr *hdr, + uint8_t *data, size_t data_len){ + uint8_t aad[30], nonce[13]; + size_t aad_len; + size_t mlen; + uint8_t *plain; + + if (data_len < CCMP_IV_SIZE + WLAN_CRYPTO_MIC_LEN) + return NULL; + + plain = qdf_mem_malloc(data_len + AES_BLOCK_SIZE); + if (plain == NULL) { + qdf_print("%s[%d] mem alloc failed\n", __func__, __LINE__); + return NULL; + } + + mlen = data_len - CCMP_IV_SIZE - WLAN_CRYPTO_MIC_LEN; + + qdf_mem_set(aad, sizeof(aad), 0); + ccmp_aad_nonce(hdr, data, aad, &aad_len, nonce); + wpa_hexdump(MSG_EXCESSIVE, "CCMP AAD", aad, aad_len); + wpa_hexdump(MSG_EXCESSIVE, "CCMP nonce", nonce, 13); + + if (wlan_crypto_aes_ccm_ad(tk, 16, nonce, + WLAN_CRYPTO_MIC_LEN, + data + CCMP_IV_SIZE, mlen, + aad, aad_len, + data + CCMP_IV_SIZE + mlen, + plain) < 0) { + /*uint16_t seq_ctrl = qdf_le16_to_cpu(hdr->seq_ctrl); + wpa_printf(MSG_INFO, "Invalid CCMP MIC in frame: A1=" MACSTR + " A2=" MACSTR " A3=" MACSTR " seq=%u frag=%u", + MAC2STR(hdr->addr1), MAC2STR(hdr->addr2), + MAC2STR(hdr->addr3), + WLAN_GET_SEQ_SEQ(seq_ctrl), + WLAN_GET_SEQ_FRAG(seq_ctrl));*/ + qdf_mem_free(plain); + return NULL; + } + wpa_hexdump(MSG_EXCESSIVE, "CCMP decrypted", plain, mlen); + + qdf_mem_copy(data, plain, data_len); + qdf_mem_free(plain); + return data; +} + + +void ccmp_get_pn(uint8_t *pn, const uint8_t *data) +{ + pn[0] = data[7]; /* PN5 */ + pn[1] = data[6]; /* PN4 */ + pn[2] = data[5]; /* PN3 */ + pn[3] = data[4]; /* PN2 */ + pn[4] = data[1]; /* PN1 */ + pn[5] = data[0]; /* PN0 */ +} + + +uint8_t *wlan_crypto_ccmp_encrypt(const uint8_t *tk, uint8_t *frame, + size_t len, size_t hdrlen){ + uint8_t aad[30], nonce[13]; + size_t aad_len, plen; + uint8_t *crypt, *pos; + struct ieee80211_hdr *hdr; + + if (len < hdrlen || hdrlen < 24) + return NULL; + plen = len - hdrlen - CCMP_IV_SIZE - WLAN_CRYPTO_MIC_LEN; + + crypt = qdf_mem_malloc(hdrlen + CCMP_IV_SIZE + plen + + WLAN_CRYPTO_MIC_LEN + AES_BLOCK_SIZE); + if (crypt == NULL) { + qdf_print("%s[%d] mem alloc failed\n", __func__, __LINE__); + return NULL; + } + + qdf_mem_copy(crypt, frame, hdrlen + CCMP_IV_SIZE); + + hdr = (struct ieee80211_hdr *) crypt; + hdr->frame_control[1] |= WLAN_FC1_ISWEP; + pos = crypt + hdrlen + 8; + + qdf_mem_set(aad, sizeof(aad), 0); + ccmp_aad_nonce(hdr, crypt + hdrlen, aad, &aad_len, nonce); + wpa_hexdump(MSG_EXCESSIVE, "CCMP AAD", aad, aad_len); + wpa_hexdump(MSG_EXCESSIVE, "CCMP nonce", nonce, 13); + + if (wlan_crypto_aes_ccm_ae(tk, 16, nonce, WLAN_CRYPTO_MIC_LEN, + frame + hdrlen + CCMP_IV_SIZE, + plen, aad, aad_len, pos, pos + plen) < 0) { + qdf_mem_free(crypt); + return NULL; + } + + qdf_mem_copy(frame, crypt, len); + wpa_hexdump(MSG_EXCESSIVE, "CCMP encrypted", + crypt + hdrlen + CCMP_IV_SIZE, plen); + qdf_mem_free(crypt); + + return frame; +} + + +uint8_t *wlan_crypto_ccmp_256_decrypt(const uint8_t *tk, + const struct ieee80211_hdr *hdr, + const uint8_t *data, size_t data_len, + size_t *decrypted_len){ + uint8_t aad[30], nonce[13]; + size_t aad_len; + size_t mlen; + uint8_t *plain; + + if (data_len < CCMP_IV_SIZE + WLAN_CRYPTO_MIC256_LEN) + return NULL; + + plain = qdf_mem_malloc(data_len + AES_BLOCK_SIZE); + if (plain == NULL) { + qdf_print("%s[%d] mem alloc failed\n", __func__, __LINE__); + return NULL; + } + + mlen = data_len - CCMP_IV_SIZE - WLAN_CRYPTO_MIC256_LEN; + + qdf_mem_set(aad, sizeof(aad), 0); + ccmp_aad_nonce(hdr, data, aad, &aad_len, nonce); + wpa_hexdump(MSG_EXCESSIVE, "CCMP-256 AAD", aad, aad_len); + wpa_hexdump(MSG_EXCESSIVE, "CCMP-256 nonce", nonce, 13); + + if (wlan_crypto_aes_ccm_ad(tk, 32, nonce, WLAN_CRYPTO_MIC256_LEN, + data + CCMP_IV_SIZE, mlen, + aad, aad_len, + data + CCMP_IV_SIZE + mlen, + plain) < 0) { + /*uint16_t seq_ctrl = qdf_le16_to_cpu(hdr->seq_ctrl); + wpa_printf(MSG_INFO, "Invalid CCMP-256 MIC in frame: A1=" MACSTR + " A2=" MACSTR " A3=" MACSTR " seq=%u frag=%u", + MAC2STR(hdr->addr1), MAC2STR(hdr->addr2), + MAC2STR(hdr->addr3), + WLAN_GET_SEQ_SEQ(seq_ctrl), + WLAN_GET_SEQ_FRAG(seq_ctrl));*/ + qdf_mem_free(plain); + return NULL; + } + wpa_hexdump(MSG_EXCESSIVE, "CCMP-256 decrypted", plain, mlen); + + *decrypted_len = mlen; + return plain; +} + + +uint8_t *wlan_crypto_ccmp_256_encrypt(const uint8_t *tk, uint8_t *frame, + size_t len, size_t hdrlen, uint8_t *qos, + uint8_t *pn, int keyid, + size_t *encrypted_len){ + uint8_t aad[30], nonce[13]; + size_t aad_len, plen; + uint8_t *crypt, *pos; + struct ieee80211_hdr *hdr; + + if (len < hdrlen || hdrlen < 24) + return NULL; + plen = len - hdrlen; + + crypt = qdf_mem_malloc(hdrlen + CCMP_IV_SIZE + plen + + WLAN_CRYPTO_MIC256_LEN + AES_BLOCK_SIZE); + if (crypt == NULL) { + qdf_print("%s[%d] mem alloc failed\n", __func__, __LINE__); + return NULL; + } + + qdf_mem_copy(crypt, frame, hdrlen); + hdr = (struct ieee80211_hdr *) crypt; + hdr->frame_control[1] |= WLAN_FC1_ISWEP; + pos = crypt + hdrlen; + *pos++ = pn[5]; /* PN0 */ + *pos++ = pn[4]; /* PN1 */ + *pos++ = 0x00; /* Rsvd */ + *pos++ = 0x20 | (keyid << 6); + *pos++ = pn[3]; /* PN2 */ + *pos++ = pn[2]; /* PN3 */ + *pos++ = pn[1]; /* PN4 */ + *pos++ = pn[0]; /* PN5 */ + + qdf_mem_set(aad, sizeof(aad), 0); + ccmp_aad_nonce(hdr, crypt + hdrlen, aad, &aad_len, nonce); + wpa_hexdump(MSG_EXCESSIVE, "CCMP-256 AAD", aad, aad_len); + wpa_hexdump(MSG_EXCESSIVE, "CCMP-256 nonce", nonce, 13); + + if (wlan_crypto_aes_ccm_ae(tk, 32, nonce, WLAN_CRYPTO_MIC256_LEN, + frame + hdrlen, plen, + aad, aad_len, pos, pos + plen) < 0) { + qdf_mem_free(crypt); + return NULL; + } + + wpa_hexdump(MSG_EXCESSIVE, "CCMP-256 encrypted", crypt + hdrlen + 8, + plen); + + *encrypted_len = hdrlen + CCMP_IV_SIZE + + plen + WLAN_CRYPTO_MIC256_LEN; + + return crypt; +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/src/wlan_crypto_crc32.c b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/src/wlan_crypto_crc32.c new file mode 100644 index 0000000000000000000000000000000000000000..bdf928f251091ec9a92c21af70d71ca066a7b0cd --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/src/wlan_crypto_crc32.c @@ -0,0 +1,83 @@ +/* + * Copyright (c) 2017 The Linux Foundation. All rights reserved. + */ +/* + * 32-bit CRC for FCS calculation + * Copyright (c) 2010, Jouni Malinen + * + * This software may be distributed under the terms of the BSD license. + * See README for more details. + */ +#include "qdf_types.h" +/* + * IEEE 802.11 FCS CRC32 + * G(x) = x^32 + x^26 + x^23 + x^22 + x^16 + x^12 + x^11 + x^10 + x^8 + x^7 + + * x^5 + x^4 + x^2 + x + 1 + */ +static const uint32_t crc32_table[256] = { + 0x00000000, 0x77073096, 0xee0e612c, 0x990951ba, 0x076dc419, + 0x706af48f, 0xe963a535, 0x9e6495a3, 0x0edb8832, 0x79dcb8a4, + 0xe0d5e91e, 0x97d2d988, 0x09b64c2b, 0x7eb17cbd, 0xe7b82d07, + 0x90bf1d91, 0x1db71064, 0x6ab020f2, 0xf3b97148, 0x84be41de, + 0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7, 0x136c9856, + 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec, 0x14015c4f, 0x63066cd9, + 0xfa0f3d63, 0x8d080df5, 0x3b6e20c8, 0x4c69105e, 0xd56041e4, + 0xa2677172, 0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b, + 0x35b5a8fa, 0x42b2986c, 0xdbbbc9d6, 0xacbcf940, 0x32d86ce3, + 0x45df5c75, 0xdcd60dcf, 0xabd13d59, 0x26d930ac, 0x51de003a, + 0xc8d75180, 0xbfd06116, 0x21b4f4b5, 0x56b3c423, 0xcfba9599, + 0xb8bda50f, 0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924, + 0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d, 0x76dc4190, + 0x01db7106, 0x98d220bc, 0xefd5102a, 0x71b18589, 0x06b6b51f, + 0x9fbfe4a5, 0xe8b8d433, 0x7807c9a2, 0x0f00f934, 0x9609a88e, + 0xe10e9818, 0x7f6a0dbb, 0x086d3d2d, 0x91646c97, 0xe6635c01, + 0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e, 0x6c0695ed, + 0x1b01a57b, 0x8208f4c1, 0xf50fc457, 0x65b0d9c6, 0x12b7e950, + 0x8bbeb8ea, 0xfcb9887c, 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3, + 0xfbd44c65, 0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2, + 0x4adfa541, 0x3dd895d7, 0xa4d1c46d, 0xd3d6f4fb, 0x4369e96a, + 0x346ed9fc, 0xad678846, 0xda60b8d0, 0x44042d73, 0x33031de5, + 0xaa0a4c5f, 0xdd0d7cc9, 0x5005713c, 0x270241aa, 0xbe0b1010, + 0xc90c2086, 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f, + 0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4, 0x59b33d17, + 0x2eb40d81, 0xb7bd5c3b, 0xc0ba6cad, 0xedb88320, 0x9abfb3b6, + 0x03b6e20c, 0x74b1d29a, 0xead54739, 0x9dd277af, 0x04db2615, + 0x73dc1683, 0xe3630b12, 0x94643b84, 0x0d6d6a3e, 0x7a6a5aa8, + 0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1, 0xf00f9344, + 0x8708a3d2, 0x1e01f268, 0x6906c2fe, 0xf762575d, 0x806567cb, + 0x196c3671, 0x6e6b06e7, 0xfed41b76, 0x89d32be0, 0x10da7a5a, + 0x67dd4acc, 0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5, + 0xd6d6a3e8, 0xa1d1937e, 0x38d8c2c4, 0x4fdff252, 0xd1bb67f1, + 0xa6bc5767, 0x3fb506dd, 0x48b2364b, 0xd80d2bda, 0xaf0a1b4c, + 0x36034af6, 0x41047a60, 0xdf60efc3, 0xa867df55, 0x316e8eef, + 0x4669be79, 0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236, + 0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f, 0xc5ba3bbe, + 0xb2bd0b28, 0x2bb45a92, 0x5cb36a04, 0xc2d7ffa7, 0xb5d0cf31, + 0x2cd99e8b, 0x5bdeae1d, 0x9b64c2b0, 0xec63f226, 0x756aa39c, + 0x026d930a, 0x9c0906a9, 0xeb0e363f, 0x72076785, 0x05005713, + 0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38, 0x92d28e9b, + 0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21, 0x86d3d2d4, 0xf1d4e242, + 0x68ddb3f8, 0x1fda836e, 0x81be16cd, 0xf6b9265b, 0x6fb077e1, + 0x18b74777, 0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c, + 0x8f659eff, 0xf862ae69, 0x616bffd3, 0x166ccf45, 0xa00ae278, + 0xd70dd2ee, 0x4e048354, 0x3903b3c2, 0xa7672661, 0xd06016f7, + 0x4969474d, 0x3e6e77db, 0xaed16a4a, 0xd9d65adc, 0x40df0b66, + 0x37d83bf0, 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9, + 0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6, 0xbad03605, + 0xcdd70693, 0x54de5729, 0x23d967bf, 0xb3667a2e, 0xc4614ab8, + 0x5d681b02, 0x2a6f2b94, 0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, + 0x2d02ef8d +}; + + +uint32_t wlan_crypto_crc32(const uint8_t *frame, size_t frame_len) +{ + size_t i; + uint32_t crc; + + crc = 0xFFFFFFFF; + for (i = 0; i < frame_len; i++) + crc = crc32_table[(crc ^ frame[i]) & 0xff] ^ (crc >> 8); + + return ~crc; +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/src/wlan_crypto_def_i.h b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/src/wlan_crypto_def_i.h new file mode 100644 index 0000000000000000000000000000000000000000..f377fa6f22b67c9f581916fdf15c7f8aa8768ade --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/src/wlan_crypto_def_i.h @@ -0,0 +1,558 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + + /** + * DOC: Private definations for handling crypto params + */ +#ifndef _WLAN_CRYPTO_DEF_I_H_ +#define _WLAN_CRYPTO_DEF_I_H_ + +#include "wlan_crypto_aes_i.h" + +/* IEEE 802.11 defines */ +#define WLAN_FC0_PVER 0x0003 +#define WLAN_FC1_DIR_MASK 0x03 +#define WLAN_FC1_TODS 0x01 +#define WLAN_FC1_FROMDS 0x02 +#define WLAN_FC1_DSTODS 0x03 +#define WLAN_FC1_MOREFRAG 0x04 +#define WLAN_FC1_RETRY 0x08 +#define WLAN_FC1_PWRMGT 0x10 +#define WLAN_FC1_MOREDATA 0x20 +#define WLAN_FC1_ISWEP 0x40 +#define WLAN_FC1_ORDER 0x80 + +#define WLAN_FC0_GET_TYPE(fc) (((fc) & 0x0c) >> 2) +#define WLAN_FC0_GET_STYPE(fc) (((fc) & 0xf0) >> 4) + +#define WLAN_INVALID_MGMT_SEQ 0xffff +#define WLAN_SEQ_MASK 0x0fff +#define WLAN_QOS_TID_MASK 0x0f +#define WLAN_GET_SEQ_FRAG(seq) ((seq) & (BIT(3) | BIT(2) | BIT(1) | BIT(0))) +#define WLAN_GET_SEQ_SEQ(seq) \ + (((seq) & (~(BIT(3) | BIT(2) | BIT(1) | BIT(0)))) >> 4) + +#define WLAN_FC0_TYPE_MGMT 0 +#define WLAN_FC0_TYPE_CTRL 1 +#define WLAN_FC0_TYPE_DATA 2 + +/* management */ +#define WLAN_FC0_STYPE_ASSOC_REQ 0 +#define WLAN_FC0_STYPE_ASSOC_RESP 1 +#define WLAN_FC0_STYPE_REASSOC_REQ 2 +#define WLAN_FC0_STYPE_REASSOC_RESP 3 +#define WLAN_FC0_STYPE_PROBE_REQ 4 +#define WLAN_FC0_STYPE_PROBE_RESP 5 +#define WLAN_FC0_STYPE_BEACON 8 +#define WLAN_FC0_STYPE_ATIM 9 +#define WLAN_FC0_STYPE_DISASSOC 10 +#define WLAN_FC0_STYPE_AUTH 11 +#define WLAN_FC0_STYPE_DEAUTH 12 +#define WLAN_FC0_STYPE_ACTION 13 + +/* control */ +#define WLAN_FC0_STYPE_PSPOLL 10 +#define WLAN_FC0_STYPE_RTS 11 +#define WLAN_FC0_STYPE_CTS 12 +#define WLAN_FC0_STYPE_ACK 13 +#define WLAN_FC0_STYPE_CFEND 14 +#define WLAN_FC0_STYPE_CFENDACK 15 + +/* data */ +#define WLAN_FC0_STYPE_DATA 0 +#define WLAN_FC0_STYPE_DATA_CFACK 1 +#define WLAN_FC0_STYPE_DATA_CFPOLL 2 +#define WLAN_FC0_STYPE_DATA_CFACKPOLL 3 +#define WLAN_FC0_STYPE_NULLFUNC 4 +#define WLAN_FC0_STYPE_CFACK 5 +#define WLAN_FC0_STYPE_CFPOLL 6 +#define WLAN_FC0_STYPE_CFACKPOLL 7 +#define WLAN_FC0_STYPE_QOS_DATA 8 +#define WLAN_FC0_STYPE_QOS_DATA_CFACK 9 +#define WLAN_FC0_STYPE_QOS_DATA_CFPOLL 10 +#define WLAN_FC0_STYPE_QOS_DATA_CFACKPOLL 11 +#define WLAN_FC0_STYPE_QOS_NULL 12 +#define WLAN_FC0_STYPE_QOS_CFPOLL 14 +#define WLAN_FC0_STYPE_QOS_CFACKPOLL 15 + +#define WLAN_TID_SIZE 17 +#define WLAN_NONQOS_SEQ 16 + +/* Macros for handling unaligned memory accesses */ + +static inline uint16_t wlan_crypto_get_be16(const uint8_t *a) +{ + return (a[0] << 8) | a[1]; +} + +static inline void wlan_crypto_put_be16(uint8_t *a, uint16_t val) +{ + a[0] = val >> 8; + a[1] = val & 0xff; +} + +static inline uint16_t wlan_crypto_get_le16(const uint8_t *a) +{ + return (a[1] << 8) | a[0]; +} + +static inline void wlan_crypto_put_le16(uint8_t *a, uint16_t val) +{ + a[1] = val >> 8; + a[0] = val & 0xff; +} + +static inline uint32_t wlan_crypto_get_be32(const uint8_t *a) +{ + return ((u32) a[0] << 24) | (a[1] << 16) | (a[2] << 8) | a[3]; +} + +static inline void wlan_crypto_put_be32(uint8_t *a, uint32_t val) +{ + a[0] = (val >> 24) & 0xff; + a[1] = (val >> 16) & 0xff; + a[2] = (val >> 8) & 0xff; + a[3] = val & 0xff; +} + +static inline uint32_t wlan_crypto_get_le32(const uint8_t *a) +{ + return ((u32) a[3] << 24) | (a[2] << 16) | (a[1] << 8) | a[0]; +} + +static inline void wlan_crypto_put_le32(uint8_t *a, uint32_t val) +{ + a[3] = (val >> 24) & 0xff; + a[2] = (val >> 16) & 0xff; + a[1] = (val >> 8) & 0xff; + a[0] = val & 0xff; +} + +static inline void wlan_crypto_put_be64(u8 *a, u64 val) +{ + a[0] = val >> 56; + a[1] = val >> 48; + a[2] = val >> 40; + a[3] = val >> 32; + a[4] = val >> 24; + a[5] = val >> 16; + a[6] = val >> 8; + a[7] = val & 0xff; +} + +#define WLAN_CRYPTO_TX_OPS_ALLOCKEY(psoc) \ + (psoc->soc_cb.tx_ops.crypto_tx_ops.allockey) +#define WLAN_CRYPTO_TX_OPS_SETKEY(psoc) \ + (psoc->soc_cb.tx_ops.crypto_tx_ops.setkey) +#define WLAN_CRYPTO_TX_OPS_DELKEY(psoc) \ + (psoc->soc_cb.tx_ops.crypto_tx_ops.delkey) +#define WLAN_CRYPTO_TX_OPS_DEFAULTKEY(psoc) \ + (psoc->soc_cb.tx_ops.crypto_tx_ops.defaultkey) + +/* unalligned little endian access */ +#ifndef LE_READ_2 +#define LE_READ_2(p) \ + ((uint16_t) \ + ((((const uint8_t *)(p))[0]) | \ + (((const uint8_t *)(p))[1] << 8))) +#endif + +#ifndef LE_READ_4 +#define LE_READ_4(p) \ + ((uint32_t) \ + ((((const uint8_t *)(p))[0]) | \ + (((const uint8_t *)(p))[1] << 8) | \ + (((const uint8_t *)(p))[2] << 16) | \ + (((const uint8_t *)(p))[3] << 24))) +#endif + +#ifndef BE_READ_4 +#define BE_READ_4(p) \ + ((uint32_t) \ + ((((const uint8_t *)(p))[0] << 24) | \ + (((const uint8_t *)(p))[1] << 16) | \ + (((const uint8_t *)(p))[2] << 8) | \ + (((const uint8_t *)(p))[3]))) +#endif + +#ifndef READ_6 +#define READ_6(b0, b1, b2, b3, b4, b5) ({ \ + uint32_t iv32 = (b0 << 0) | (b1 << 8) | (b2 << 16) | (b3 << 24);\ + uint16_t iv16 = (b4 << 0) | (b5 << 8);\ + (((uint64_t)iv16) << 32) | iv32;\ +}) +#endif + +#define OUI_SIZE (4) +#define WLAN_CRYPTO_ADDSHORT(frm, v) \ + do {frm[0] = (v) & 0xff; frm[1] = (v) >> 8; frm += 2; } while (0) + +#define WLAN_CRYPTO_ADDSELECTOR(frm, sel) \ + do { \ + uint32_t value = sel;\ + qdf_mem_copy(frm, (uint8_t *)&value, OUI_SIZE); \ + frm += OUI_SIZE; } while (0) + +#define WLAN_CRYPTO_SELECTOR(a, b, c, d) \ + ((((uint32_t) (a)) << 24) | \ + (((uint32_t) (b)) << 16) | \ + (((uint32_t) (c)) << 8) | \ + (uint32_t) (d)) + +#define WPA_TYPE_OUI WLAN_WPA_SEL(WLAN_WPA_OUI_TYPE) + +#define WLAN_CRYPTO_WAPI_IE_LEN 20 +#define WLAN_CRYPTO_WAPI_SMS4_CIPHER 0x01 + +#define WPA_AUTH_KEY_MGMT_NONE WLAN_WPA_SEL(WLAN_ASE_NONE) +#define WPA_AUTH_KEY_MGMT_UNSPEC_802_1X WLAN_WPA_SEL(WLAN_ASE_8021X_UNSPEC) +#define WPA_AUTH_KEY_MGMT_PSK_OVER_802_1X \ + WLAN_WPA_SEL(WLAN_ASE_8021X_PSK) +#define WPA_AUTH_KEY_MGMT_CCKM WLAN_WPA_CCKM_AKM + +#define WPA_CIPHER_SUITE_NONE WLAN_WPA_SEL(WLAN_CSE_NONE) +#define WPA_CIPHER_SUITE_WEP40 WLAN_WPA_SEL(WLAN_CSE_WEP40) +#define WPA_CIPHER_SUITE_WEP104 WLAN_WPA_SEL(WLAN_CSE_WEP104) +#define WPA_CIPHER_SUITE_TKIP WLAN_WPA_SEL(WLAN_CSE_TKIP) +#define WPA_CIPHER_SUITE_CCMP WLAN_WPA_SEL(WLAN_CSE_CCMP) + +#define RSN_AUTH_KEY_MGMT_NONE WLAN_RSN_SEL(0) +#define RSN_AUTH_KEY_MGMT_UNSPEC_802_1X WLAN_RSN_SEL(1) +#define RSN_AUTH_KEY_MGMT_PSK_OVER_802_1X\ + WLAN_RSN_SEL(2) +#define RSN_AUTH_KEY_MGMT_FT_802_1X WLAN_RSN_SEL(3) +#define RSN_AUTH_KEY_MGMT_FT_PSK WLAN_RSN_SEL(4) +#define RSN_AUTH_KEY_MGMT_802_1X_SHA256\ + WLAN_RSN_SEL(5) +#define RSN_AUTH_KEY_MGMT_PSK_SHA256 WLAN_RSN_SEL(6) +#define RSN_AUTH_KEY_MGMT_WPS WLAN_RSN_SEL(7) +#define RSN_AUTH_KEY_MGMT_SAE WLAN_RSN_SEL(8) +#define RSN_AUTH_KEY_MGMT_FT_SAE WLAN_RSN_SEL(9) +#define RSN_AUTH_KEY_MGMT_802_1X_SUITE_B\ + WLAN_RSN_SEL(11) +#define RSN_AUTH_KEY_MGMT_802_1X_SUITE_B_192\ + WLAN_RSN_SEL(12) +#define RSN_AUTH_KEY_MGMT_FT_802_1X_SUITE_B_192\ + WLAN_RSN_SEL(13) +#define RSN_AUTH_KEY_MGMT_FILS_SHA256 WLAN_RSN_SEL(14) +#define RSN_AUTH_KEY_MGMT_FILS_SHA384 WLAN_RSN_SEL(15) +#define RSN_AUTH_KEY_MGMT_FT_FILS_SHA256\ + WLAN_RSN_SEL(16) +#define RSN_AUTH_KEY_MGMT_FT_FILS_SHA384\ + WLAN_RSN_SEL(17) +#define RSN_AUTH_KEY_MGMT_OWE WLAN_RSN_SEL(18) + +#define RSN_AUTH_KEY_MGMT_CCKM (WLAN_RSN_CCKM_AKM) +#define RSN_AUTH_KEY_MGMT_OSEN (0x019a6f50) +#define RSN_AUTH_KEY_MGMT_DPP (WLAN_RSN_DPP_AKM) + +#define RSN_CIPHER_SUITE_NONE WLAN_RSN_SEL(WLAN_CSE_NONE) +#define RSN_CIPHER_SUITE_WEP40 WLAN_RSN_SEL(WLAN_CSE_WEP40) +#define RSN_CIPHER_SUITE_TKIP WLAN_RSN_SEL(WLAN_CSE_TKIP) +#define RSN_CIPHER_SUITE_WEP104 WLAN_RSN_SEL(WLAN_CSE_WEP104) +#define RSN_CIPHER_SUITE_CCMP WLAN_RSN_SEL(WLAN_CSE_CCMP) +#define RSN_CIPHER_SUITE_AES_CMAC WLAN_RSN_SEL(WLAN_CSE_AES_CMAC) +#define RSN_CIPHER_SUITE_GCMP WLAN_RSN_SEL(WLAN_CSE_GCMP_128) +#define RSN_CIPHER_SUITE_GCMP_256 WLAN_RSN_SEL(WLAN_CSE_GCMP_256) +#define RSN_CIPHER_SUITE_CCMP_256 WLAN_RSN_SEL(WLAN_CSE_CCMP_256) +#define RSN_CIPHER_SUITE_BIP_GMAC_128 WLAN_RSN_SEL(WLAN_CSE_BIP_GMAC_128) +#define RSN_CIPHER_SUITE_BIP_GMAC_256 WLAN_RSN_SEL(WLAN_CSE_BIP_GMAC_256) +#define RSN_CIPHER_SUITE_BIP_CMAC_256 WLAN_RSN_SEL(WLAN_CSE_BIP_CMAC_256) + +#define RESET_PARAM(__param) ((__param) = 0) +#define SET_PARAM(__param, __val) ((__param) |= (1 << (__val))) +#define HAS_PARAM(__param, __val) ((__param) & (1 << (__val))) +#define CLEAR_PARAM(__param, __val) ((__param) &= ((~1) << (__val))) + + +#define RESET_AUTHMODE(_param) ((_param)->authmodeset = \ + (1 << WLAN_CRYPTO_AUTH_OPEN)) + +#define SET_AUTHMODE(_param, _mode) ((_param)->authmodeset |= (1 << (_mode))) +#define HAS_AUTHMODE(_param, _mode) ((_param)->authmodeset & (1 << (_mode))) + +#define AUTH_IS_OPEN(_param) HAS_AUTHMODE((_param), WLAN_CRYPTO_AUTH_OPEN) +#define AUTH_IS_SHARED_KEY(_param) \ + HAS_AUTHMODE((_param), WLAN_CRYPTO_AUTH_SHARED) +#define AUTH_IS_8021X(_param) HAS_AUTHMODE((_param), WLAN_CRYPTO_AUTH_8021X) +#define AUTH_IS_WPA(_param) HAS_AUTHMODE((_param), WLAN_CRYPTO_AUTH_WPA) +#define AUTH_IS_RSNA(_param) HAS_AUTHMODE((_param), WLAN_CRYPTO_AUTH_RSNA) +#define AUTH_IS_CCKM(_param) HAS_AUTHMODE((_param), WLAN_CRYPTO_AUTH_CCKM) +#define AUTH_IS_WAI(_param) HAS_AUTHMODE((_param), WLAN_CRYPTO_AUTH_WAPI) +#define AUTH_IS_WPA2(_param) AUTH_IS_RSNA(_param) + +#define AUTH_MATCH(_param1, _param2) \ + (((_param1)->authmodeset & (_param2)->authmodeset) != 0) + + +#define RESET_UCAST_CIPHERS(_param) ((_param)->ucastcipherset =\ + (1 << WLAN_CRYPTO_CIPHER_NONE)) +#define SET_UCAST_CIPHER(_param, _c) ((_param)->ucastcipherset |= (1 << (_c))) +#define HAS_UCAST_CIPHER(_param, _c) ((_param)->ucastcipherset & (1 << (_c))) + +#define UCIPHER_IS_CLEAR(_param) \ + HAS_UCAST_CIPHER((_param), WLAN_CRYPTO_CIPHER_NONE) +#define UCIPHER_IS_WEP(_param) \ + HAS_UCAST_CIPHER((_param), WLAN_CRYPTO_CIPHER_WEP) +#define UCIPHER_IS_TKIP(_param) \ + HAS_UCAST_CIPHER((_param), WLAN_CRYPTO_CIPHER_TKIP) +#define UCIPHER_IS_CCMP128(_param) \ + HAS_UCAST_CIPHER((_param), WLAN_CRYPTO_CIPHER_AES_CCM) +#define UCIPHER_IS_CCMP256(_param) \ + HAS_UCAST_CIPHER((_param), WLAN_CRYPTO_CIPHER_AES_CCM_256) +#define UCIPHER_IS_GCMP128(_param) \ + HAS_UCAST_CIPHER((_param), WLAN_CRYPTO_CIPHER_AES_GCM) +#define UCIPHER_IS_GCMP256(_param) \ + HAS_UCAST_CIPHER((_param), WLAN_CRYPTO_CIPHER_AES_GCM_256) +#define UCIPHER_IS_SMS4(_param) \ + HAS_UCAST_CIPHER((_param), WLAN_CRYPTO_CIPHER_WAPI_SMS4) + +#define RESET_MCAST_CIPHERS(_param) ((_param)->mcastcipherset = \ + (1 << WLAN_CRYPTO_CIPHER_NONE)) +#define SET_MCAST_CIPHER(_param, _c) ((_param)->mcastcipherset |= (1 << (_c))) +#define HAS_MCAST_CIPHER(_param, _c) ((_param)->mcastcipherset & (1 << (_c))) +#define HAS_ANY_MCAST_CIPHER(_param) ((_param)->mcastcipherset) +#define CLEAR_MCAST_CIPHER(_param, _c) \ + ((_param)->mcastcipherset &= (~(1)<<(_c))) + +#define MCIPHER_IS_CLEAR(_param) \ + HAS_MCAST_CIPHER((_param), WLAN_CRYPTO_CIPHER_NONE) +#define MCIPHER_IS_WEP(_param) \ + HAS_MCAST_CIPHER((_param), WLAN_CRYPTO_CIPHER_WEP) +#define MCIPHER_IS_TKIP(_param) \ + HAS_MCAST_CIPHER((_param), WLAN_CRYPTO_CIPHER_TKIP) +#define MCIPHER_IS_CCMP128(_param) \ + HAS_MCAST_CIPHER((_param), WLAN_CRYPTO_CIPHER_AES_CCM) +#define MCIPHER_IS_CCMP256(_param) \ + HAS_MCAST_CIPHER((_param), WLAN_CRYPTO_CIPHER_AES_CCM_256) +#define MCIPHER_IS_GCMP128(_param) \ + HAS_MCAST_CIPHER((_param), WLAN_CRYPTO_CIPHER_AES_GCM) +#define MCIPHER_IS_GCMP256(_param) \ + HAS_MCAST_CIPHER((_param), WLAN_CRYPTO_CIPHER_AES_GCM_256) +#define MCIPHER_IS_SMS4(_param) \ + HAS_MCAST_CIPHER((_param), WLAN_CRYPTO_CIPHER_WAPI_SMS4) + +#define RESET_MGMT_CIPHERS(_param) ((_param)->mgmtcipherset = \ + (1 << WLAN_CRYPTO_CIPHER_NONE)) +#define SET_MGMT_CIPHER(_param, _c) ((_param)->mgmtcipherset |= (1 << (_c))) +#define HAS_MGMT_CIPHER(_param, _c) ((_param)->mgmtcipherset & (1 << (_c))) +#define IS_MGMT_CIPHER(_c) ((_c == WLAN_CRYPTO_CIPHER_AES_CMAC) || \ + (_c == WLAN_CRYPTO_CIPHER_AES_CMAC_256) || \ + (_c == WLAN_CRYPTO_CIPHER_AES_GMAC) || \ + (_c == WLAN_CRYPTO_CIPHER_AES_GMAC_256)) + +#define IS_FILS_CIPHER(_c) ((_c) == WLAN_CRYPTO_CIPHER_FILS_AEAD) + +#define MGMT_CIPHER_IS_CMAC(_param) \ + HAS_MGMT_CIPHER((_param), WLAN_CRYPTO_CIPHER_AES_CMAC) +#define MGMT_CIPHER_IS_CMAC256(_param) \ + HAS_MGMT_CIPHER((_param), WLAN_CRYPTO_CIPHER_AES_CMAC_256) +#define MGMT_CIPHER_IS_GMAC(_param) \ + HAS_MGMT_CIPHER((_param), WLAN_CRYPTO_CIPHER_AES_GMAC) +#define MGMT_CIPHER_IS_GMAC256(_param) \ + HAS_MGMT_CIPHER((_param), WLAN_CRYPTO_CIPHER_AES_GMAC_256) + +#define RESET_KEY_MGMT(_param) ((_param)->key_mgmt = \ + (1 << WLAN_CRYPTO_KEY_MGMT_NONE)) +#define SET_KEY_MGMT(_param, _c) ((_param)->key_mgmt |= (1 << (_c))) +#define HAS_KEY_MGMT(_param, _c) ((_param)->key_mgmt & (1 << (_c))) + +#define UCAST_CIPHER_MATCH(_param1, _param2) \ + (((_param1)->ucastcipherset & (_param2)->ucastcipherset) != 0) + +#define MCAST_CIPHER_MATCH(_param1, _param2) \ + (((_param1)->mcastcipherset & (_param2)->mcastcipherset) != 0) + +#define MGMT_CIPHER_MATCH(_param1, _param2) \ + (((_param1)->mgmtcipherset & (_param2)->mgmtcipherset) != 0) + +#define KEY_MGMTSET_MATCH(_param1, _param2) \ + (((_param1)->key_mgmt & (_param2)->key_mgmt) != 0 || \ + (!(_param1)->key_mgmt && !(_param2)->key_mgmt)) + +#define RESET_CIPHER_CAP(_param) ((_param)->cipher_caps = 0) +#define SET_CIPHER_CAP(_param, _c) ((_param)->cipher_caps |= (1 << (_c))) +#define HAS_CIPHER_CAP(_param, _c) ((_param)->cipher_caps & (1 << (_c))) +#define HAS_ANY_CIPHER_CAP(_param) ((_param)->cipher_caps) + +/** + * struct wlan_crypto_mmie - MMIE IE + * @element_id: element id + * @length: length of the ie + * @key_id: igtk key_id used + * @sequence_number: igtk PN number + * @mic: MIC for the frame + * + * This structure represents Management MIC information element (IEEE 802.11w) + */ +struct wlan_crypto_mmie { + uint8_t element_id; + uint8_t length; + uint16_t key_id; + uint8_t sequence_number[6]; + uint8_t mic[16]; +} __packed; + +/** + * struct wlan_crypto_comp_priv - crypto component private structure + * @crypto_params: crypto params for the peer + * @key: key buffers for this peer + * @igtk_key: igtk key buffer for this peer + * @igtk_key_type: igtk key type + * @def_tx_keyid: default key used for this peer + * @def_igtk_tx_keyid default igtk key used for this peer + * @fils_aead_set fils params for this peer + * + */ +struct wlan_crypto_comp_priv { + struct wlan_crypto_params crypto_params; + struct wlan_crypto_key *key[WLAN_CRYPTO_MAXKEYIDX]; + struct wlan_crypto_key *igtk_key[WLAN_CRYPTO_MAXIGTKKEYIDX]; + uint32_t igtk_key_type; + uint8_t def_tx_keyid; + uint8_t def_igtk_tx_keyid; + uint8_t fils_aead_set; +}; + +/** + * struct wlan_crypto_cipher - crypto cipher table + * @cipher_name: printable name + * @cipher: cipher type WLAN_CRYPTO_CIPHER_* + * @header: size of privacy header (bytes) + * @trailer: size of privacy trailer (bytes) + * @miclen: size of mic trailer (bytes) + * @keylen: max key length + * @setkey: function pointer for setkey + * @encap: function pointer for encap + * @decap: function pointer for decap + * @enmic: function pointer for enmic + * @demic: function pointer for demic + * + */ +struct wlan_crypto_cipher { + const char *cipher_name; + wlan_crypto_cipher_type cipher; + const uint8_t header; + const uint8_t trailer; + const uint8_t miclen; + const uint32_t keylen; + QDF_STATUS(*setkey)(struct wlan_crypto_key *); + QDF_STATUS(*encap)(struct wlan_crypto_key *, + qdf_nbuf_t, uint8_t, uint8_t); + QDF_STATUS(*decap)(struct wlan_crypto_key *, + qdf_nbuf_t, uint8_t, uint8_t); + QDF_STATUS(*enmic)(struct wlan_crypto_key *, + qdf_nbuf_t, uint8_t, uint8_t); + QDF_STATUS(*demic)(struct wlan_crypto_key *, + qdf_nbuf_t, uint8_t, uint8_t); +}; + + +/** + * wlan_crypto_is_data_protected - check is frame is protected or not + * @data: frame + * + * This function check is frame is protected or not + * + * Return: TRUE/FALSE + */ +static inline bool wlan_crypto_is_data_protected(const void *data) +{ + const struct ieee80211_hdr *hdr = (const struct ieee80211_hdr *)data; + if (hdr->frame_control[1] & WLAN_FC1_ISWEP) + return true; + else + return false; +} + +/** + * ieee80211_hdrsize - calculate frame header size + * @data: frame + * + * This function calculate frame header size + * + * Return: header size of the frame + */ +static inline uint8_t ieee80211_hdrsize(const void *data) +{ + const struct ieee80211_hdr *hdr = (const struct ieee80211_hdr *)data; + uint8_t size = sizeof(struct ieee80211_hdr); + + if ((hdr->frame_control[1] & WLAN_FC1_DIR_MASK) + == (WLAN_FC1_DSTODS)) { + size += WLAN_ALEN; + } + + if (((WLAN_FC0_GET_STYPE(hdr->frame_control[0]) + == WLAN_FC0_STYPE_QOS_DATA))) { + size += sizeof(uint16_t); + /* Qos frame with Order bit set indicates an HTC frame */ + if (hdr->frame_control[1] & WLAN_FC1_ORDER) + size += (sizeof(uint8_t)*4); + } + return size; +} + +/** + * ieee80211_hdrspace - calculate frame header size with padding + * @pdev: pdev + * @data: frame header + * + * This function returns the space occupied by the 802.11 header + * and any padding required by the driver. This works for a management + * or data frame. + * + * Return: header size of the frame with padding + */ +static inline uint8_t +ieee80211_hdrspace(struct wlan_objmgr_pdev *pdev, const void *data) +{ + uint8_t size = ieee80211_hdrsize(data); + + if (wlan_pdev_nif_feat_cap_get(pdev, WLAN_PDEV_F_DATAPAD)) + size = roundup(size, sizeof(u_int32_t)); + + return size; +} + +/** + * wlan_get_tid - get tid of the frame + * @data: frame + * + * This function get tid of the frame + * + * Return: tid of the frame + */ +static inline int wlan_get_tid(const void *data) +{ + const struct ieee80211_hdr *hdr = (const struct ieee80211_hdr *)data; + + if (((WLAN_FC0_GET_STYPE(hdr->frame_control[0]) + == WLAN_FC0_STYPE_QOS_DATA))) { + if ((hdr->frame_control[1] & WLAN_FC1_DIR_MASK) + == (WLAN_FC1_DSTODS)) { + return ((struct ieee80211_hdr_qos_addr4 *)data)->qos[0] + & WLAN_QOS_TID_MASK; + } else { + return ((struct ieee80211_hdr_qos *)data)->qos[0] + & WLAN_QOS_TID_MASK; + } + } else + return WLAN_NONQOS_SEQ; +} +#endif /* end of _WLAN_CRYPTO_DEF_I_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/src/wlan_crypto_fils.c b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/src/wlan_crypto_fils.c new file mode 100644 index 0000000000000000000000000000000000000000..3c5fc2cdde58af012f78e58c041c2f57e954a7aa --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/src/wlan_crypto_fils.c @@ -0,0 +1,477 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: Private API for handling FILS related operations + */ + +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "wlan_crypto_global_def.h" +#include "wlan_crypto_global_api.h" +#include "wlan_crypto_def_i.h" +#include "wlan_crypto_main_i.h" +#include "wlan_crypto_obj_mgr_i.h" +#ifdef WLAN_SUPPORT_FILS +#include "wlan_crypto_aes_siv_i.h" +#endif /* WLAN_SUPPORT_FILS */ + +#define ASSOC_RESP_FIXED_FIELDS_LEN 6 /* cap info + status + assoc id */ +#define ASSOC_REQ_FIXED_FIELDS_LEN 4 /* cap info + listen interval */ +#define REASSOC_REQ_FIXED_FIELDS_LEN 10 /* cap info + listen interval + BSSID */ + +#ifdef WLAN_SUPPORT_FILS +/** + * fils_parse_ie - Parse IEs from (Re-)association Req/Response frames + * @wbuf: Packet buffer + * @hdrlen: Header length + * @cap_info: Pointer to capability Information + * @fils_sess: Pointer to the end of Fils session Element + * @ie_start: Pointer to the start of Information element + * + * Parse IE and return required pointers to encrypt/decrypt routines + * + * Return: QDF_STATUS + */ +static QDF_STATUS +fils_parse_ie(qdf_nbuf_t wbuf, uint8_t hdrlen, uint8_t **cap_info, + uint8_t **fils_sess, uint8_t **ie_start) +{ + struct ieee80211_hdr *hdr; + uint32_t pktlen_left = 0; + bool fils_found = 0; + uint8_t subtype = 0; + uint8_t *frm = NULL; + uint8_t elem_id; + uint32_t len; + + frm = (uint8_t *)qdf_nbuf_data(wbuf); + hdr = (struct ieee80211_hdr *)frm; + subtype = WLAN_FC0_GET_STYPE(hdr->frame_control[0]); + + pktlen_left = qdf_nbuf_len(wbuf); + + if (pktlen_left < hdrlen) { + qdf_print(FL("Parse error.pktlen_left:%d Framehdr size:%d\n"), + pktlen_left, hdrlen); + return QDF_STATUS_E_INVAL; + } + + frm += hdrlen; + pktlen_left -= hdrlen; + + /* pointer to the capability information field */ + *cap_info = (uint8_t *)frm; + + if (subtype == WLAN_FC0_STYPE_ASSOC_RESP || + subtype == WLAN_FC0_STYPE_REASSOC_RESP) { + /* assoc resp frame - capability (2), status (2), associd (2) */ + if (pktlen_left < ASSOC_RESP_FIXED_FIELDS_LEN) { + qdf_print( + FL("Parse error.pktlen_left:%d Fixed Fields len:%d\n"), + pktlen_left, ASSOC_RESP_FIXED_FIELDS_LEN); + return QDF_STATUS_E_INVAL; + } + + frm += ASSOC_RESP_FIXED_FIELDS_LEN; + pktlen_left -= ASSOC_RESP_FIXED_FIELDS_LEN; + } else if (subtype == WLAN_FC0_STYPE_ASSOC_REQ) { + /* assoc req frame - capability(2), listen interval (2) */ + if (pktlen_left < ASSOC_REQ_FIXED_FIELDS_LEN) { + qdf_print( + FL("Parse Error.pktlen_left:%d Fixed Fields len:%d\n"), + pktlen_left, ASSOC_REQ_FIXED_FIELDS_LEN); + return QDF_STATUS_E_INVAL; + } + + frm += ASSOC_REQ_FIXED_FIELDS_LEN; + pktlen_left -= ASSOC_REQ_FIXED_FIELDS_LEN; + } else if (subtype == WLAN_FC0_STYPE_REASSOC_REQ) { + /* assoc req frame - capability(2), + * Listen interval(2), + * Current AP address(6) + */ + if (pktlen_left < REASSOC_REQ_FIXED_FIELDS_LEN) { + qdf_print( + FL("Parse Error.pktlen_left:%d Fixed Fields len:%d\n"), + pktlen_left, REASSOC_REQ_FIXED_FIELDS_LEN); + return QDF_STATUS_E_INVAL; + } + + frm += REASSOC_REQ_FIXED_FIELDS_LEN; + pktlen_left -= REASSOC_REQ_FIXED_FIELDS_LEN; + } + + *ie_start = frm; + /* 'frm' now pointing to TLVs. + * Parse through All IE's till FILS Session Element + */ + while ((pktlen_left >= 2) && frm) { + /* element ID & len*/ + elem_id = *frm++; + len = *frm++; + pktlen_left -= 2; + + /* for extension element, check the sub element ID */ + if (elem_id == WLAN_ELEMID_EXTN_ELEM) { + if ((len + 1) > pktlen_left) { + qdf_print(FL("Parse Error.pktlen_left:%did:%d"), + pktlen_left, elem_id); + qdf_print("len:%dextid:%d\n", len, *frm); + return QDF_STATUS_E_INVAL; + } + + if (*frm == WLAN_ELEMID_EXT_FILS_SESSION) { + fils_found = 1; + break; + } + frm++; + pktlen_left--; + } + + if (len > pktlen_left) { + qdf_print( + FL("Parse Error.pktlen_left:%did:%dlen:%dextid:%d\n"), + pktlen_left, elem_id, len, *frm); + return QDF_STATUS_E_INVAL; + } + + /* switch to the next IE */ + frm += len; + pktlen_left -= len; + } + + if (!fils_found) { + qdf_print(FL("FILS session element not found. Parse failed\n")); + return QDF_STATUS_E_INVAL; + } + + /* Points to end of FILS session element */ + *fils_sess = (frm + len); + + return QDF_STATUS_SUCCESS; +} + +/** + * fils_aead_setkey - Setkey function + * @key: Pointer to wlan_crypto_key + * + * Return: QDF_STATUS_SUCCESS + */ +static QDF_STATUS fils_aead_setkey(struct wlan_crypto_key *key) +{ + struct wlan_crypto_req_key *req_key; + struct wlan_crypto_fils_aad_key *fils_key; + + if (!key || !key->private) { + qdf_print(FL("Failed to set FILS key\n")); + return QDF_STATUS_E_INVAL; + } + req_key = key->private; + fils_key = qdf_mem_malloc(sizeof(struct wlan_crypto_fils_aad_key)); + if (!fils_key) { + qdf_print(FL("FILS key alloc failed\n")); + return QDF_STATUS_E_NOMEM; + } + qdf_mem_copy(fils_key, &req_key->filsaad, + sizeof(struct wlan_crypto_fils_aad_key)); + + /* Reassign the allocated fils_aad key object */ + key->private = fils_key; + + return QDF_STATUS_SUCCESS; +} + +/** + * fils_aead_encap - FILS AEAD encryption function + * @key: Pointer to wlan_crypto_key + * @wbuf: Packet buffer + * @keyid: Encrypting key ID + * @hdrlen: Header length + * + * This function encrypts FILS Association Response Packet + * + * Return: QDF_STATUS + */ +static QDF_STATUS +fils_aead_encap(struct wlan_crypto_key *key, qdf_nbuf_t wbuf, + uint8_t keyid, uint8_t hdrlen) +{ + const uint8_t *address[5 + 1]; + size_t length[5 + 1]; + uint8_t *cap_info = NULL, *fils_session = NULL, *ie_start = NULL; + uint32_t crypt_len = 0; + struct ieee80211_hdr *hdr = NULL; + struct wlan_crypto_fils_aad_key *fils_key = NULL; + uint8_t *buf = NULL; + uint32_t bufsize = 0; + uint8_t subtype = 0; + + if (!key) { + qdf_print(FL("Invalid Input\n")); + return QDF_STATUS_E_FAILURE; + } + + fils_key = (struct wlan_crypto_fils_aad_key *)key->private; + if (!fils_key) { + qdf_print(FL("Key is not set\n")); + return QDF_STATUS_E_FAILURE; + } + + if (!fils_key->kek_len) { + qdf_print(FL("Key len is zero. Returning error\n")); + return QDF_STATUS_E_FAILURE; + } + + hdr = (struct ieee80211_hdr *)qdf_nbuf_data(wbuf); + if (!hdr) { + qdf_print(FL("Invalid header\n")); + return QDF_STATUS_E_FAILURE; + } + + subtype = WLAN_FC0_GET_STYPE(hdr->frame_control[0]); + if ((subtype != WLAN_FC0_STYPE_ASSOC_RESP) && + (subtype != WLAN_FC0_STYPE_REASSOC_RESP)) + return QDF_STATUS_E_FAILURE; + + if (fils_parse_ie(wbuf, hdrlen, &cap_info, &fils_session, &ie_start) + != QDF_STATUS_SUCCESS) { + qdf_print(FL("FILS Parsing failed\n")); + return QDF_STATUS_E_FAILURE; + } + + /* The AP's BSSID */ + address[0] = hdr->addr2; + length[0] = WLAN_ALEN; + /* The STA's MAC address */ + address[1] = hdr->addr1; + length[1] = WLAN_ALEN; + /* The AP's nonce */ + address[2] = fils_key->a_nonce; + length[2] = WLAN_FILS_NONCE_LEN; + /* The STA's nonce */ + address[3] = fils_key->s_nonce; + length[3] = WLAN_FILS_NONCE_LEN; + address[4] = cap_info; + length[4] = fils_session - cap_info; + + crypt_len = (uint8_t *)hdr + (uint32_t)qdf_nbuf_len(wbuf) + - fils_session; + + bufsize = ((uint8_t *)hdr + (uint32_t)qdf_nbuf_len(wbuf) - ie_start) + + AES_BLOCK_SIZE; + buf = qdf_mem_malloc(bufsize); + if (!buf) { + qdf_print(FL("temp buf allocation failed\n")); + return QDF_STATUS_E_NOMEM; + } + qdf_mem_copy(buf, ie_start, bufsize); + + if (wlan_crypto_aes_siv_encrypt(fils_key->kek, fils_key->kek_len, + fils_session, crypt_len, 5, address, + length, buf + (fils_session - ie_start)) + < 0) { + qdf_print(FL("aes siv_encryption failed\n")); + qdf_mem_free(buf); + return QDF_STATUS_E_FAILURE; + } + + if (!qdf_nbuf_put_tail(wbuf, AES_BLOCK_SIZE)) + qdf_print(FL("Unable to put data in nbuf\n")); + + qdf_mem_copy(ie_start, buf, bufsize); + qdf_mem_free(buf); + + return QDF_STATUS_SUCCESS; +} + +/** + * fils_aead_decap - FILS AEAD decryption function + * @key: Pointer to wlan_crypto_key + * @wbuf: Packet buffer + * @tid: TID + * @hdrlen: Header length + * + * This function decrypts FILS Association Request Packet + * + * Return: QDF_STATUS + */ +static QDF_STATUS +fils_aead_decap(struct wlan_crypto_key *key, qdf_nbuf_t wbuf, + uint8_t tid, uint8_t hdrlen) +{ + const uint8_t *address[5]; + size_t length[5]; + uint8_t *cap_info = NULL, *fils_session = NULL, *ie_start = NULL; + struct ieee80211_hdr *hdr = NULL; + struct wlan_crypto_fils_aad_key *fils_key = NULL; + uint32_t crypt_len = 0; + uint8_t *buf = NULL; + uint32_t bufsize = 0; + + if (!key) { + qdf_print(FL("Invalid Input\n")); + return QDF_STATUS_E_FAILURE; + } + + fils_key = (struct wlan_crypto_fils_aad_key *)key->private; + if (!fils_key) { + qdf_print(FL("Key is not set\n")); + return QDF_STATUS_E_FAILURE; + } + + if (!fils_key->kek_len) { + qdf_print(FL("Key len is zero. Returning error\n")); + return QDF_STATUS_E_FAILURE; + } + + if (fils_parse_ie(wbuf, hdrlen, &cap_info, &fils_session, &ie_start) + != QDF_STATUS_SUCCESS) { + qdf_print(FL("IE parse failed\n")); + return QDF_STATUS_E_FAILURE; + } + + hdr = (struct ieee80211_hdr *)qdf_nbuf_data(wbuf); + if (!hdr) { + qdf_print(FL("Invalid header\n")); + return QDF_STATUS_E_FAILURE; + } + + /* The STA's MAC address */ + address[0] = hdr->addr1; + length[0] = WLAN_ALEN; + /* The AP's BSSID */ + address[1] = hdr->addr2; + length[1] = WLAN_ALEN; + /* The STA's nonce */ + address[2] = fils_key->s_nonce; + length[2] = WLAN_FILS_NONCE_LEN; + /* The AP's nonce */ + address[3] = fils_key->a_nonce; + length[3] = WLAN_FILS_NONCE_LEN; + + address[4] = cap_info; + length[4] = fils_session - cap_info; + + crypt_len = ((uint8_t *)hdr + (uint32_t)qdf_nbuf_len(wbuf)) + - fils_session; + if (crypt_len < AES_BLOCK_SIZE) { + qdf_print(FL( + "Not enough room for AES-SIV data after FILS Session")); + qdf_print( + " element in (Re)Association Request frame from %pM\n", + hdr->addr1); + return QDF_STATUS_E_INVAL; + } + + /* Allocate temp buf & copy contents */ + bufsize = (uint8_t *)hdr + (uint32_t)qdf_nbuf_len(wbuf) - ie_start; + buf = qdf_mem_malloc(bufsize); + if (!buf) { + qdf_print(FL("temp buf allocation failed\n")); + return QDF_STATUS_E_NOMEM; + } + qdf_mem_copy(buf, ie_start, bufsize); + + if (wlan_crypto_aes_siv_decrypt(fils_key->kek, fils_key->kek_len, + fils_session, crypt_len, 5, address, + length, buf + (fils_session - ie_start)) + < 0) { + qdf_print(FL("AES decrypt of assocreq frame from %s failed\n"), + ether_sprintf(hdr->addr1)); + qdf_mem_free(buf); + return QDF_STATUS_E_FAILURE; + } + qdf_mem_copy(ie_start, buf, bufsize); + qdf_nbuf_trim_tail(wbuf, AES_BLOCK_SIZE); + qdf_mem_free(buf); + + return QDF_STATUS_SUCCESS; +} + +void wlan_crypto_fils_delkey(struct wlan_objmgr_peer *peer) +{ + struct wlan_crypto_comp_priv *crypto_priv = NULL; + struct wlan_crypto_key *key = NULL; + + if (!peer) { + qdf_print(FL("Invalid Input\n")); + return; + } + + crypto_priv = wlan_get_peer_crypto_obj(peer); + if (!crypto_priv) { + qdf_print(FL("crypto_priv NULL\n")); + return; + } + + key = crypto_priv->key[0]; + if (key) { + qdf_mem_free(key->private); + key->private = NULL; + } +} +#else + +static QDF_STATUS fils_aead_setkey(struct wlan_crypto_key *key) +{ + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS +fils_aead_encap(struct wlan_crypto_key *key, qdf_nbuf_t wbuf, + uint8_t keyid, uint8_t hdrlen) +{ + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS +fils_aead_decap(struct wlan_crypto_key *key, qdf_nbuf_t wbuf, + uint8_t tid, uint8_t hdrlen) +{ + return QDF_STATUS_SUCCESS; +} +#endif /* WLAN_SUPPORT_FILS */ + +static const struct wlan_crypto_cipher fils_aead_cipher_table = { + "FILS AEAD", + WLAN_CRYPTO_CIPHER_FILS_AEAD, + 0, + 0, + 0, + WLAN_MAX_WPA_KEK_LEN, + fils_aead_setkey, + fils_aead_encap, + fils_aead_decap, + 0, + 0, +}; + +const struct wlan_crypto_cipher *fils_register(void) +{ + return &fils_aead_cipher_table; +} + diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/src/wlan_crypto_gcmp_sw.c b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/src/wlan_crypto_gcmp_sw.c new file mode 100644 index 0000000000000000000000000000000000000000..1bd9c9d306c8e1fe4731f009b6de71c919ea60eb --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/src/wlan_crypto_gcmp_sw.c @@ -0,0 +1,166 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + */ +/* + * GCM with GMAC Protocol (GCMP) + * Copyright (c) 2012, Jouni Malinen + * + * This software may be distributed under the terms of the BSD license. + * See README for more details. + */ + +#include +#include +#include +#include "wlan_crypto_aes_i.h" +#include "wlan_crypto_def_i.h" + +static void gcmp_aad_nonce(const struct ieee80211_hdr *hdr, const uint8_t *data, + uint8_t *aad, size_t *aad_len, uint8_t *nonce) +{ + uint16_t seq; + uint8_t stype; + int qos = 0, addr4 = 0; + uint8_t *pos; + + stype = WLAN_FC0_GET_STYPE(hdr->frame_control[0]); + if ((hdr->frame_control[1] & WLAN_FC1_DIR_MASK) == + (WLAN_FC1_DSTODS)) + addr4 = 1; + + if (WLAN_FC0_GET_TYPE(hdr->frame_control[0]) == WLAN_FC0_TYPE_DATA) { + aad[0] &= ~0x0070; /* Mask subtype bits */ + if (stype & 0x08) { + const uint8_t *qc; + qos = 1; + aad[1] &= ~WLAN_FC1_ORDER; + qc = (const uint8_t *) (hdr + 1); + if (addr4) + qc += WLAN_ALEN; + } + } + + aad[1] &= ~(WLAN_FC1_RETRY | WLAN_FC1_PWRMGT | WLAN_FC1_MOREDATA); + pos = aad + 2; + qdf_mem_copy(pos, hdr->addr1, 3 * WLAN_ALEN); + pos += 3 * WLAN_ALEN; + seq = qdf_le16_to_cpu(*((uint16_t *)&hdr->seq_ctrl[0])); + seq &= ~0xfff0; /* Mask Seq#; do not modify Frag# */ + wlan_crypto_put_le16(pos, seq); + pos += 2; + + qdf_mem_copy(pos, hdr + 1, addr4 * WLAN_ALEN + qos * 2); + pos += addr4 * WLAN_ALEN; + if (qos) { + pos[0] &= ~0x70; + if (1 /* FIX: either device has SPP A-MSDU Capab = 0 */) + pos[0] &= ~0x80; + pos++; + *pos++ = 0x00; + } + + *aad_len = pos - aad; + + qdf_mem_copy(nonce, hdr->addr2, WLAN_ALEN); + nonce[6] = data[7]; /* PN5 */ + nonce[7] = data[6]; /* PN4 */ + nonce[8] = data[5]; /* PN3 */ + nonce[9] = data[4]; /* PN2 */ + nonce[10] = data[1]; /* PN1 */ + nonce[11] = data[0]; /* PN0 */ +} + + +uint8_t *wlan_crypto_gcmp_decrypt(const uint8_t *tk, size_t tk_len, + const struct ieee80211_hdr *hdr, + const uint8_t *data, size_t data_len, + size_t *decrypted_len){ + uint8_t aad[30], nonce[12], *plain; + size_t aad_len, mlen; + const uint8_t *m; + + if (data_len < 8 + 16) + return NULL; + + plain = qdf_mem_malloc(data_len + AES_BLOCK_SIZE); + if (plain == NULL) { + qdf_print("%s[%d] mem alloc failed\n", __func__, __LINE__); + return NULL; + } + + m = data + 8; + mlen = data_len - 8 - 16; + + qdf_mem_set(aad, sizeof(aad), 0); + gcmp_aad_nonce(hdr, data, aad, &aad_len, nonce); + wpa_hexdump(MSG_EXCESSIVE, "GCMP AAD", aad, aad_len); + wpa_hexdump(MSG_EXCESSIVE, "GCMP nonce", nonce, sizeof(nonce)); + + if (wlan_crypto_aes_gcm_ad(tk, tk_len, nonce, sizeof(nonce), m, mlen, + aad, aad_len, m + mlen, plain) < 0) { + /*uint16_t seq_ctrl = qdf_le16_to_cpu(hdr->seq_ctrl); + wpa_printf(MSG_INFO, "Invalid GCMP frame: A1=" MACSTR + " A2=" MACSTR " A3=" MACSTR " seq=%u frag=%u", + MAC2STR(hdr->addr1), MAC2STR(hdr->addr2), + MAC2STR(hdr->addr3), + WLAN_GET_SEQ_SEQ(seq_ctrl), + WLAN_GET_SEQ_FRAG(seq_ctrl));*/ + qdf_mem_free(plain); + return NULL; + } + + *decrypted_len = mlen; + return plain; +} + + +uint8_t *wlan_crypto_gcmp_encrypt(const uint8_t *tk, size_t tk_len, + const uint8_t *frame, size_t len, + size_t hdrlen, const uint8_t *qos, + const uint8_t *pn, int keyid, + size_t *encrypted_len){ + uint8_t aad[30], nonce[12], *crypt, *pos; + size_t aad_len, plen; + struct ieee80211_hdr *hdr; + + if (len < hdrlen || hdrlen < 24) + return NULL; + plen = len - hdrlen; + + crypt = qdf_mem_malloc(hdrlen + 8 + plen + 16 + AES_BLOCK_SIZE); + if (crypt == NULL) { + qdf_print("%s[%d] mem alloc failed\n", __func__, __LINE__); + return NULL; + } + + qdf_mem_copy(crypt, frame, hdrlen); + hdr = (struct ieee80211_hdr *) crypt; + pos = crypt + hdrlen; + *pos++ = pn[5]; /* PN0 */ + *pos++ = pn[4]; /* PN1 */ + *pos++ = 0x00; /* Rsvd */ + *pos++ = 0x20 | (keyid << 6); + *pos++ = pn[3]; /* PN2 */ + *pos++ = pn[2]; /* PN3 */ + *pos++ = pn[1]; /* PN4 */ + *pos++ = pn[0]; /* PN5 */ + + qdf_mem_set(aad, sizeof(aad), 0); + gcmp_aad_nonce(hdr, crypt + hdrlen, aad, &aad_len, nonce); + wpa_hexdump(MSG_EXCESSIVE, "GCMP AAD", aad, aad_len); + wpa_hexdump(MSG_EXCESSIVE, "GCMP nonce", nonce, sizeof(nonce)); + + if (wlan_crypto_aes_gcm_ae(tk, tk_len, nonce, sizeof(nonce), + frame + hdrlen, plen, aad, aad_len, + pos, pos + plen) < 0) { + qdf_mem_free(crypt); + return NULL; + } + + wpa_hexdump(MSG_EXCESSIVE, "GCMP MIC", pos + plen, 16); + wpa_hexdump(MSG_EXCESSIVE, "GCMP encrypted", pos, plen); + + *encrypted_len = hdrlen + 8 + plen + 16; + + return crypt; +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/src/wlan_crypto_global_api.c b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/src/wlan_crypto_global_api.c new file mode 100644 index 0000000000000000000000000000000000000000..8ccfa6f18f61fedf2d37c6173646b0eccaa4006d --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/src/wlan_crypto_global_api.c @@ -0,0 +1,3338 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + + /** + * DOC: Public APIs for crypto service + */ +#include +#include +#include +#include +#include +#include +#include +#include + +#include "wlan_crypto_global_def.h" +#include "wlan_crypto_global_api.h" +#include "wlan_crypto_def_i.h" +#include "wlan_crypto_param_handling_i.h" +#include "wlan_crypto_obj_mgr_i.h" + +#include + + +const struct wlan_crypto_cipher *wlan_crypto_cipher_ops[WLAN_CRYPTO_CIPHER_MAX]; + +/** + * wlan_crypto_vdev_get_crypto_params - called by mlme to get crypto params + * @vdev:vdev + * + * This function gets called by mlme to get crypto params + * + * Return: wlan_crypto_params or NULL in case of failure + */ +static struct wlan_crypto_params *wlan_crypto_vdev_get_comp_params( + struct wlan_objmgr_vdev *vdev, + struct wlan_crypto_comp_priv **crypto_priv){ + *crypto_priv = (struct wlan_crypto_comp_priv *) + wlan_get_vdev_crypto_obj(vdev); + if (*crypto_priv == NULL) { + qdf_print("%s[%d] crypto_priv NULL\n", __func__, __LINE__); + return NULL; + } + + return &((*crypto_priv)->crypto_params); +} + +/** + * wlan_crypto_peer_get_crypto_params - called by mlme to get crypto params + * @peer:peer + * + * This function gets called by mlme to get crypto params + * + * Return: wlan_crypto_params or NULL in case of failure + */ +static struct wlan_crypto_params *wlan_crypto_peer_get_comp_params( + struct wlan_objmgr_peer *peer, + struct wlan_crypto_comp_priv **crypto_priv){ + + *crypto_priv = (struct wlan_crypto_comp_priv *) + wlan_get_peer_crypto_obj(peer); + if (*crypto_priv == NULL) { + qdf_print("%s[%d] crypto_priv NULL\n", __func__, __LINE__); + return NULL; + } + + return &((*crypto_priv)->crypto_params); +} + +static QDF_STATUS wlan_crypto_set_igtk_key(struct wlan_crypto_key *key) +{ + return QDF_STATUS_SUCCESS; +} + +/** + * wlan_crypto_set_param - called by ucfg to set crypto param + * @crypto_params: crypto_params + * @param: param to be set. + * @value: value + * + * This function gets called from ucfg to set param + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +static QDF_STATUS wlan_crypto_set_param(struct wlan_crypto_params *crypto_params, + wlan_crypto_param_type param, + uint32_t value){ + QDF_STATUS status = QDF_STATUS_E_INVAL; + + switch (param) { + case WLAN_CRYPTO_PARAM_AUTH_MODE: + status = wlan_crypto_set_authmode(crypto_params, value); + break; + case WLAN_CRYPTO_PARAM_UCAST_CIPHER: + status = wlan_crypto_set_ucastciphers(crypto_params, value); + break; + case WLAN_CRYPTO_PARAM_MCAST_CIPHER: + status = wlan_crypto_set_mcastcipher(crypto_params, value); + break; + case WLAN_CRYPTO_PARAM_MGMT_CIPHER: + status = wlan_crypto_set_mgmtcipher(crypto_params, value); + break; + case WLAN_CRYPTO_PARAM_CIPHER_CAP: + status = wlan_crypto_set_cipher_cap(crypto_params, value); + break; + case WLAN_CRYPTO_PARAM_RSN_CAP: + status = wlan_crypto_set_rsn_cap(crypto_params, value); + break; + case WLAN_CRYPTO_PARAM_KEY_MGMT: + status = wlan_crypto_set_key_mgmt(crypto_params, value); + break; + default: + status = QDF_STATUS_E_INVAL; + } + return status; +} + +/** + * wlan_crypto_set_vdev_param - called by ucfg to set crypto param + * @vdev: vdev + * @param: param to be set. + * @value: value + * + * This function gets called from ucfg to set param + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_crypto_set_vdev_param(struct wlan_objmgr_vdev *vdev, + wlan_crypto_param_type param, + uint32_t value){ + QDF_STATUS status = QDF_STATUS_E_INVAL; + struct wlan_crypto_comp_priv *crypto_priv; + struct wlan_crypto_params *crypto_params; + + crypto_priv = (struct wlan_crypto_comp_priv *) + wlan_get_vdev_crypto_obj(vdev); + + if (crypto_priv == NULL) { + qdf_print("%s[%d] crypto_priv NULL\n", __func__, __LINE__); + return QDF_STATUS_E_INVAL; + } + + crypto_params = &(crypto_priv->crypto_params); + + status = wlan_crypto_set_param(crypto_params, param, value); + + return status; +} + +/** + * wlan_crypto_set_param - called by ucfg to set crypto param + * + * @peer: peer + * @param: param to be set. + * @value: value + * + * This function gets called from ucfg to set param + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_crypto_set_peer_param(struct wlan_objmgr_peer *peer, + wlan_crypto_param_type param, + uint32_t value){ + QDF_STATUS status = QDF_STATUS_E_INVAL; + struct wlan_crypto_comp_priv *crypto_priv; + struct wlan_crypto_params *crypto_params; + + crypto_params = wlan_crypto_peer_get_comp_params(peer, + &crypto_priv); + + if (crypto_priv == NULL) { + qdf_print("%s[%d] crypto_priv NULL\n", __func__, __LINE__); + return QDF_STATUS_E_INVAL; + } + + crypto_params = &(crypto_priv->crypto_params); + + status = wlan_crypto_set_param(crypto_params, param, value); + + return status; +} + +/** + * wlan_crypto_get_param_value - called by crypto APIs to get value for param + * @param: Crypto param type + * @crypto_params: Crypto params struct + * + * This function gets called from in-within crypto layer + * + * Return: value or -1 for failure + */ +static int32_t wlan_crypto_get_param_value(wlan_crypto_param_type param, + struct wlan_crypto_params *crypto_params) +{ + int32_t value = -1; + + switch (param) { + case WLAN_CRYPTO_PARAM_AUTH_MODE: + value = wlan_crypto_get_authmode(crypto_params); + break; + case WLAN_CRYPTO_PARAM_UCAST_CIPHER: + value = wlan_crypto_get_ucastciphers(crypto_params); + break; + case WLAN_CRYPTO_PARAM_MCAST_CIPHER: + value = wlan_crypto_get_mcastcipher(crypto_params); + break; + case WLAN_CRYPTO_PARAM_MGMT_CIPHER: + value = wlan_crypto_get_mgmtciphers(crypto_params); + break; + case WLAN_CRYPTO_PARAM_CIPHER_CAP: + value = wlan_crypto_get_cipher_cap(crypto_params); + break; + case WLAN_CRYPTO_PARAM_RSN_CAP: + value = wlan_crypto_get_rsn_cap(crypto_params); + break; + case WLAN_CRYPTO_PARAM_KEY_MGMT: + value = wlan_crypto_get_key_mgmt(crypto_params); + break; + default: + value = QDF_STATUS_E_INVAL; + } + + return value; +} + +/** + * wlan_crypto_get_param - called to get value for param from vdev + * @vdev: vdev + * @param: Crypto param type + * + * This function gets called to get value for param from vdev + * + * Return: value or -1 for failure + */ +int32_t wlan_crypto_get_param(struct wlan_objmgr_vdev *vdev, + wlan_crypto_param_type param) +{ + int32_t value = -1; + struct wlan_crypto_comp_priv *crypto_priv; + struct wlan_crypto_params *crypto_params; + crypto_priv = (struct wlan_crypto_comp_priv *) + wlan_get_vdev_crypto_obj(vdev); + + if (crypto_priv == NULL) { + qdf_print("%s[%d] crypto_priv NULL\n", __func__, __LINE__); + return QDF_STATUS_E_INVAL; + } + + crypto_params = &(crypto_priv->crypto_params); + value = wlan_crypto_get_param_value(param, crypto_params); + + return value; +} +/** + * wlan_crypto_get_peer_param - called to get value for param from peer + * @peer: peer + * @param: Crypto param type + * + * This function gets called to get value for param from peer + * + * Return: value or -1 for failure + */ +int32_t wlan_crypto_get_peer_param(struct wlan_objmgr_peer *peer, + wlan_crypto_param_type param) +{ + int32_t value = -1; + struct wlan_crypto_comp_priv *crypto_priv; + struct wlan_crypto_params *crypto_params; + + crypto_params = wlan_crypto_peer_get_comp_params(peer, + &crypto_priv); + + if (crypto_params == NULL) { + qdf_print("%s[%d] crypto_params NULL\n", __func__, __LINE__); + return QDF_STATUS_E_INVAL; + } + value = wlan_crypto_get_param_value(param, crypto_params); + + return value; +} +qdf_export_symbol(wlan_crypto_get_peer_param); +/** + * wlan_crypto_is_htallowed - called to check is HT allowed for cipher + * @vdev: vdev + * @peer: peer + * + * This function gets called to check is HT allowed for cipher. + * HT is not allowed for wep and tkip. + * + * Return: 0 - not allowed or 1 - allowed + */ +uint8_t wlan_crypto_is_htallowed(struct wlan_objmgr_vdev *vdev, + struct wlan_objmgr_peer *peer) +{ + int32_t ucast_cipher; + + if (!(vdev || peer)) { + qdf_print("%s[%d] Invalid params\n", __func__, __LINE__); + return 0; + } + + if (vdev) + ucast_cipher = wlan_crypto_get_param(vdev, + WLAN_CRYPTO_PARAM_UCAST_CIPHER); + else + ucast_cipher = wlan_crypto_get_peer_param(peer, + WLAN_CRYPTO_PARAM_UCAST_CIPHER); + + return (ucast_cipher & (1 << WLAN_CRYPTO_CIPHER_WEP)) || + ((ucast_cipher & (1 << WLAN_CRYPTO_CIPHER_TKIP)) && + !(ucast_cipher & (1 << WLAN_CRYPTO_CIPHER_AES_CCM)) && + !(ucast_cipher & (1 << WLAN_CRYPTO_CIPHER_AES_GCM)) && + !(ucast_cipher & (1 << WLAN_CRYPTO_CIPHER_AES_GCM_256)) && + !(ucast_cipher & (1 << WLAN_CRYPTO_CIPHER_AES_CCM_256))); +} +qdf_export_symbol(wlan_crypto_is_htallowed); + +/** + * wlan_crypto_setkey - called by ucfg to setkey + * @vdev: vdev + * @req_key: req_key with cipher type, key macaddress + * + * This function gets called from ucfg to sey key + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_crypto_setkey(struct wlan_objmgr_vdev *vdev, + struct wlan_crypto_req_key *req_key){ + + QDF_STATUS status = QDF_STATUS_E_INVAL; + struct wlan_crypto_comp_priv *crypto_priv; + struct wlan_crypto_params *crypto_params; + struct wlan_objmgr_psoc *psoc; + struct wlan_objmgr_peer *peer; + struct wlan_crypto_key *key = NULL; + const struct wlan_crypto_cipher *cipher; + uint8_t macaddr[WLAN_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; + bool isbcast; + enum QDF_OPMODE vdev_mode; + uint8_t igtk_idx = 0; + + if (!vdev || !req_key || req_key->keylen > (sizeof(req_key->keydata))) { + qdf_print("%s[%d] Invalid params vdev%pK, req_key%pK\n", + __func__, __LINE__, vdev, req_key); + return QDF_STATUS_E_INVAL; + } + + isbcast = qdf_is_macaddr_group( + (struct qdf_mac_addr *)req_key->macaddr); + if ((req_key->keylen == 0) && !IS_FILS_CIPHER(req_key->type)) { + /* zero length keys, only set default key id if flags are set*/ + if ((req_key->flags & WLAN_CRYPTO_KEY_DEFAULT) + && (req_key->keyix != WLAN_CRYPTO_KEYIX_NONE) + && (!IS_MGMT_CIPHER(req_key->type))) { + wlan_crypto_default_key(vdev, + req_key->macaddr, + req_key->keyix, + !isbcast); + return QDF_STATUS_SUCCESS; + } + qdf_print("%s[%d] req_key len zero\n", __func__, __LINE__); + return QDF_STATUS_E_INVAL; + } + + cipher = wlan_crypto_cipher_ops[req_key->type]; + + if (!cipher && !IS_MGMT_CIPHER(req_key->type)) { + qdf_print("%s[%d] cipher invalid\n", __func__, __LINE__); + return QDF_STATUS_E_INVAL; + } + + if (cipher && (!IS_FILS_CIPHER(req_key->type)) && + (!IS_MGMT_CIPHER(req_key->type)) && + ((req_key->keylen != (cipher->keylen / NBBY)) && + (req_key->type != WLAN_CRYPTO_CIPHER_WEP))) { + qdf_print("%s[%d] cipher invalid\n", __func__, __LINE__); + return QDF_STATUS_E_INVAL; + } else if ((req_key->type == WLAN_CRYPTO_CIPHER_WEP) && + !((req_key->keylen == WLAN_CRYPTO_KEY_WEP40_LEN) + || (req_key->keylen == WLAN_CRYPTO_KEY_WEP104_LEN) + || (req_key->keylen == WLAN_CRYPTO_KEY_WEP128_LEN))) { + qdf_print("%s[%d] wep key len invalid\n", __func__, __LINE__); + return QDF_STATUS_E_INVAL; + } + + if (req_key->keyix == WLAN_CRYPTO_KEYIX_NONE) { + if (req_key->flags != (WLAN_CRYPTO_KEY_XMIT + | WLAN_CRYPTO_KEY_RECV)) { + req_key->flags |= (WLAN_CRYPTO_KEY_XMIT + | WLAN_CRYPTO_KEY_RECV); + } + } else { + if ((req_key->keyix >= WLAN_CRYPTO_MAXKEYIDX) + && (!IS_MGMT_CIPHER(req_key->type))) { + return QDF_STATUS_E_INVAL; + } + + req_key->flags |= (WLAN_CRYPTO_KEY_XMIT + | WLAN_CRYPTO_KEY_RECV); + if (isbcast) + req_key->flags |= WLAN_CRYPTO_KEY_GROUP; + } + + vdev_mode = wlan_vdev_mlme_get_opmode(vdev); + + wlan_vdev_obj_lock(vdev); + qdf_mem_copy(macaddr, wlan_vdev_mlme_get_macaddr(vdev), WLAN_ALEN); + psoc = wlan_vdev_get_psoc(vdev); + if (!psoc) { + wlan_vdev_obj_unlock(vdev); + qdf_print("%s[%d] psoc NULL\n", __func__, __LINE__); + return QDF_STATUS_E_INVAL; + } + wlan_vdev_obj_unlock(vdev); + + if (req_key->type == WLAN_CRYPTO_CIPHER_WEP) { + if (wlan_crypto_vdev_has_auth_mode(vdev, + (1 << WLAN_CRYPTO_AUTH_8021X))) { + req_key->flags |= WLAN_CRYPTO_KEY_DEFAULT; + } + } + + if (isbcast) { + crypto_params = wlan_crypto_vdev_get_comp_params(vdev, + &crypto_priv); + if (crypto_priv == NULL) { + qdf_print("%s[%d] crypto_priv NULL\n", + __func__, __LINE__); + return QDF_STATUS_E_INVAL; + } + + if (IS_MGMT_CIPHER(req_key->type)) { + igtk_idx = req_key->keyix - WLAN_CRYPTO_MAXKEYIDX; + if (igtk_idx >= WLAN_CRYPTO_MAXIGTKKEYIDX) { + qdf_print("%s[%d] igtk key invalid keyid %d \n", + __func__, __LINE__, igtk_idx); + return QDF_STATUS_E_INVAL; + } + key = qdf_mem_malloc(sizeof(struct wlan_crypto_key)); + if (key == NULL) { + qdf_print("%s[%d] igtk key alloc failed\n", + __func__, __LINE__); + return QDF_STATUS_E_NOMEM; + } + + if (crypto_priv->igtk_key[igtk_idx]) + qdf_mem_free(crypto_priv->igtk_key[igtk_idx]); + + crypto_priv->igtk_key[igtk_idx] = key; + crypto_priv->igtk_key_type = req_key->type; + crypto_priv->def_igtk_tx_keyid = igtk_idx; + } else { + if (IS_FILS_CIPHER(req_key->type)) { + qdf_print(FL( + "FILS key is not for BroadCast packet\n")); + return QDF_STATUS_E_INVAL; + } + if (!HAS_MCAST_CIPHER(crypto_params, req_key->type) + && (req_key->type != WLAN_CRYPTO_CIPHER_WEP)) { + return QDF_STATUS_E_INVAL; + } + if (!crypto_priv->key[req_key->keyix]) { + crypto_priv->key[req_key->keyix] + = qdf_mem_malloc( + sizeof(struct wlan_crypto_key)); + if (!crypto_priv->key[req_key->keyix]) + return QDF_STATUS_E_NOMEM; + } + key = crypto_priv->key[req_key->keyix]; + } + if (vdev_mode == QDF_STA_MODE) { + peer = wlan_vdev_get_bsspeer(vdev); + if (!(peer && (QDF_STATUS_SUCCESS + == wlan_objmgr_peer_try_get_ref(peer, + WLAN_CRYPTO_ID)))) { + qdf_print("%s[%d] peer %pK failed\n", + __func__, __LINE__, peer); + if (IS_MGMT_CIPHER(req_key->type)) { + crypto_priv->igtk_key[igtk_idx] = NULL; + crypto_priv->igtk_key_type + = WLAN_CRYPTO_CIPHER_NONE; + } else + crypto_priv->key[req_key->keyix] = NULL; + if (key) + qdf_mem_free(key); + return QDF_STATUS_E_INVAL; + } + qdf_mem_copy(macaddr, wlan_peer_get_macaddr(peer), + WLAN_ALEN); + wlan_objmgr_peer_release_ref(peer, WLAN_CRYPTO_ID); + } + } else { + uint8_t pdev_id; + + pdev_id = wlan_objmgr_pdev_get_pdev_id( + wlan_vdev_get_pdev(vdev)); + peer = wlan_objmgr_get_peer_by_mac_n_vdev( + psoc, + pdev_id, + macaddr, + req_key->macaddr, + WLAN_CRYPTO_ID); + + if (peer == NULL) { + qdf_print("%s[%d] peer NULL\n", __func__, __LINE__); + return QDF_STATUS_E_INVAL; + } + + qdf_mem_copy(macaddr, req_key->macaddr, WLAN_ALEN); + crypto_params = wlan_crypto_peer_get_comp_params(peer, + &crypto_priv); + wlan_objmgr_peer_release_ref(peer, WLAN_CRYPTO_ID); + + if (crypto_priv == NULL) { + qdf_print("%s[%d] crypto_priv NULL\n", + __func__, __LINE__); + return QDF_STATUS_E_INVAL; + } + if (IS_MGMT_CIPHER(req_key->type)) { + igtk_idx = req_key->keyix - WLAN_CRYPTO_MAXKEYIDX; + if (igtk_idx >= WLAN_CRYPTO_MAXIGTKKEYIDX) { + qdf_print("%s[%d] igtk key invalid keyid %d \n", + __func__, __LINE__, igtk_idx); + return QDF_STATUS_E_INVAL; + } + key = qdf_mem_malloc(sizeof(struct wlan_crypto_key)); + if (key == NULL) { + qdf_print("%s[%d] igtk key alloc failed\n", + __func__, __LINE__); + return QDF_STATUS_E_NOMEM; + } + if (crypto_priv->igtk_key[igtk_idx]) + qdf_mem_free(crypto_priv->igtk_key[igtk_idx]); + + crypto_priv->igtk_key[igtk_idx] = key; + crypto_priv->igtk_key_type = req_key->type; + crypto_priv->def_igtk_tx_keyid = igtk_idx; + } else { + uint16_t kid = req_key->keyix; + if (kid == WLAN_CRYPTO_KEYIX_NONE) + kid = 0; + if (kid >= WLAN_CRYPTO_MAXKEYIDX) { + qdf_print("%s[%d] invalid keyid %d \n", + __func__, __LINE__, kid); + return QDF_STATUS_E_INVAL; + } + if (!crypto_priv->key[kid]) { + crypto_priv->key[kid] + = qdf_mem_malloc( + sizeof(struct wlan_crypto_key)); + if (!crypto_priv->key[kid]) + return QDF_STATUS_E_NOMEM; + } + key = crypto_priv->key[kid]; + } + } + + /* alloc key might not required as it is already there */ + key->cipher_table = (void *)cipher; + key->keylen = req_key->keylen; + key->flags = req_key->flags; + + if (req_key->keyix == WLAN_CRYPTO_KEYIX_NONE) + key->keyix = 0; + else + key->keyix = req_key->keyix; + + if (req_key->flags & WLAN_CRYPTO_KEY_DEFAULT + && (!IS_MGMT_CIPHER(req_key->type))) { + crypto_priv->def_tx_keyid = key->keyix; + key->flags |= WLAN_CRYPTO_KEY_DEFAULT; + } + if ((req_key->type == WLAN_CRYPTO_CIPHER_WAPI_SMS4) + || (req_key->type == WLAN_CRYPTO_CIPHER_WAPI_GCM4)) { + uint8_t iv_AP[16] = { 0x5c, 0x36, 0x5c, 0x36, + 0x5c, 0x36, 0x5c, 0x36, + 0x5c, 0x36, 0x5c, 0x36, + 0x5c, 0x36, 0x5c, 0x37}; + uint8_t iv_STA[16] = { 0x5c, 0x36, 0x5c, 0x36, + 0x5c, 0x36, 0x5c, 0x36, + 0x5c, 0x36, 0x5c, 0x36, + 0x5c, 0x36, 0x5c, 0x36}; + + /* During Tx PN should be increment and + * send but as per our implementation we increment only after + * Tx complete. So First packet PN check will be failed. + * To compensate increment the PN here by 2 + */ + if (vdev_mode == QDF_SAP_MODE) { + iv_AP[15] += 2; + qdf_mem_copy(key->recviv, iv_STA, + WLAN_CRYPTO_WAPI_IV_SIZE); + qdf_mem_copy(key->txiv, iv_AP, + WLAN_CRYPTO_WAPI_IV_SIZE); + } else { + iv_STA[15] += 2; + qdf_mem_copy(key->recviv, iv_AP, + WLAN_CRYPTO_WAPI_IV_SIZE); + qdf_mem_copy(key->txiv, iv_STA, + WLAN_CRYPTO_WAPI_IV_SIZE); + } + } else { + uint8_t i = 0; + qdf_mem_copy((uint8_t *)(&key->keytsc), + (uint8_t *)(&req_key->keytsc), sizeof(key->keytsc)); + for (i = 0; i < WLAN_CRYPTO_TID_SIZE; i++) { + qdf_mem_copy((uint8_t *)(&key->keyrsc[i]), + (uint8_t *)(&req_key->keyrsc), + sizeof(key->keyrsc[0])); + } + } + + qdf_mem_copy(key->keyval, req_key->keydata, sizeof(key->keyval)); + key->valid = 1; + if ((IS_MGMT_CIPHER(req_key->type))) { + if (HAS_CIPHER_CAP(crypto_params, + WLAN_CRYPTO_CAP_PMF_OFFLOAD)) { + if (WLAN_CRYPTO_TX_OPS_SETKEY(psoc)) { + WLAN_CRYPTO_TX_OPS_SETKEY(psoc)(vdev, + key, macaddr, req_key->type); + } + } + wlan_crypto_set_mgmtcipher(crypto_params, req_key->type); + status = wlan_crypto_set_igtk_key(key); + return status; + } else if (IS_FILS_CIPHER(req_key->type)) { + /* Take request key object to FILS setkey */ + key->private = req_key; + } else { + if (WLAN_CRYPTO_TX_OPS_SETKEY(psoc)) { + WLAN_CRYPTO_TX_OPS_SETKEY(psoc)(vdev, key, + macaddr, req_key->type); + } + } + status = cipher->setkey(key); + + if ((req_key->flags & WLAN_CRYPTO_KEY_DEFAULT) && + (req_key->keyix != WLAN_CRYPTO_KEYIX_NONE) && + (!IS_MGMT_CIPHER(req_key->type))) { + /* default xmit key */ + wlan_crypto_default_key(vdev, + req_key->macaddr, + req_key->keyix, + !isbcast); + } + + return status; +} + +/** + * wlan_crypto_get_keytype - get keytype + * @key: key + * + * This function gets keytype from key + * + * Return: keytype + */ +wlan_crypto_cipher_type wlan_crypto_get_key_type( + struct wlan_crypto_key *key){ + if (key && key->cipher_table) { + return ((struct wlan_crypto_cipher *) + (key->cipher_table))->cipher; + } + return WLAN_CRYPTO_CIPHER_NONE; +} +qdf_export_symbol(wlan_crypto_get_key_type); +/** + * wlan_crypto_vdev_getkey - get key from vdev + * @vdev: vdev + * @keyix: keyix + * + * This function gets key from vdev + * + * Return: key or NULL + */ +struct wlan_crypto_key *wlan_crypto_vdev_getkey(struct wlan_objmgr_vdev *vdev, + uint16_t keyix){ + struct wlan_crypto_comp_priv *crypto_priv; + struct wlan_crypto_params *crypto_params; + struct wlan_crypto_key *key = NULL; + + crypto_params = wlan_crypto_vdev_get_comp_params(vdev, &crypto_priv); + + if (crypto_priv == NULL) { + qdf_print("%s[%d] crypto_priv NULL\n", __func__, __LINE__); + return NULL; + } + + if (keyix == WLAN_CRYPTO_KEYIX_NONE || keyix >= WLAN_CRYPTO_MAXKEYIDX) + key = crypto_priv->key[crypto_priv->def_tx_keyid]; + else + key = crypto_priv->key[keyix]; + + if (key && key->valid) + return key; + + return NULL; +} +qdf_export_symbol(wlan_crypto_vdev_getkey); + +/** + * wlan_crypto_peer_getkey - get key from peer + * @peer: peer + * @keyix: keyix + * + * This function gets key from peer + * + * Return: key or NULL + */ +struct wlan_crypto_key *wlan_crypto_peer_getkey(struct wlan_objmgr_peer *peer, + uint16_t keyix){ + struct wlan_crypto_comp_priv *crypto_priv; + struct wlan_crypto_params *crypto_params; + struct wlan_crypto_key *key = NULL; + + crypto_params = wlan_crypto_peer_get_comp_params(peer, &crypto_priv); + + if (crypto_priv == NULL) { + qdf_print("%s[%d] crypto_priv NULL\n", __func__, __LINE__); + return NULL; + } + + if (keyix == WLAN_CRYPTO_KEYIX_NONE || keyix >= WLAN_CRYPTO_MAXKEYIDX) + key = crypto_priv->key[crypto_priv->def_tx_keyid]; + else + key = crypto_priv->key[keyix]; + + if (key && key->valid) + return key; + + return NULL; +} +qdf_export_symbol(wlan_crypto_peer_getkey); + +/** + * wlan_crypto_getkey - called by ucfg to get key + * @vdev: vdev + * @req_key: key value will be copied in this req_key + * @mac_address: mac address of the peer for unicast key + * or broadcast address if group key is requested. + * + * This function gets called from ucfg to get key + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_crypto_getkey(struct wlan_objmgr_vdev *vdev, + struct wlan_crypto_req_key *req_key, + uint8_t *mac_addr){ + struct wlan_crypto_cipher *cipher_table; + struct wlan_crypto_key *key; + struct wlan_objmgr_psoc *psoc; + uint8_t macaddr[WLAN_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; + + if ((req_key->keyix != WLAN_CRYPTO_KEYIX_NONE) && + (req_key->keyix >= WLAN_CRYPTO_MAXKEYIDX)) { + qdf_print("%s[%d] invalid keyix %d\n", __func__, __LINE__, + req_key->keyix); + return QDF_STATUS_E_INVAL; + } + + wlan_vdev_obj_lock(vdev); + qdf_mem_copy(macaddr, wlan_vdev_mlme_get_macaddr(vdev), WLAN_ALEN); + psoc = wlan_vdev_get_psoc(vdev); + if (!psoc) { + wlan_vdev_obj_unlock(vdev); + qdf_print("%s[%d] psoc NULL\n", __func__, __LINE__); + return QDF_STATUS_E_INVAL; + } + wlan_vdev_obj_unlock(vdev); + + if (qdf_is_macaddr_broadcast((struct qdf_mac_addr *)mac_addr)) { + key = wlan_crypto_vdev_getkey(vdev, req_key->keyix); + if (!key) + return QDF_STATUS_E_INVAL; + } else { + struct wlan_objmgr_peer *peer; + uint8_t pdev_id; + + pdev_id = wlan_objmgr_pdev_get_pdev_id( + wlan_vdev_get_pdev(vdev)); + peer = wlan_objmgr_get_peer_by_mac_n_vdev( + psoc, + pdev_id, + macaddr, + mac_addr, + WLAN_CRYPTO_ID); + if (peer == NULL) { + QDF_TRACE(QDF_MODULE_ID_CRYPTO, QDF_TRACE_LEVEL_ERROR, + "%s[%d] peer NULL\n", __func__, __LINE__); + return QDF_STATUS_E_NOENT; + } + key = wlan_crypto_peer_getkey(peer, req_key->keyix); + wlan_objmgr_peer_release_ref(peer, WLAN_CRYPTO_ID); + if (!key) + return QDF_STATUS_E_INVAL; + } + + if (key->valid) { + qdf_mem_copy(req_key->keydata, + key->keyval, key->keylen); + qdf_mem_copy((uint8_t *)(&req_key->keytsc), + (uint8_t *)(&key->keytsc), + sizeof(req_key->keytsc)); + qdf_mem_copy((uint8_t *)(&req_key->keyrsc), + (uint8_t *)(&key->keyrsc[0]), + sizeof(req_key->keyrsc)); + req_key->keylen = key->keylen; + req_key->flags = key->flags; + cipher_table = (struct wlan_crypto_cipher *)key->cipher_table; + + if (!cipher_table) + return QDF_STATUS_SUCCESS; + + req_key->type = cipher_table->cipher; + if (req_key->type == WLAN_CRYPTO_CIPHER_WAPI_SMS4) { + qdf_mem_copy((uint8_t *)(&req_key->txiv), + (uint8_t *)(key->txiv), + sizeof(req_key->txiv)); + qdf_mem_copy((uint8_t *)(&req_key->recviv), + (uint8_t *)(key->recviv), + sizeof(req_key->recviv)); + } + } + + return QDF_STATUS_SUCCESS; +} + +/** + * wlan_crypto_delkey - called by ucfg to delete key + * @vdev: vdev + * @mac_address: mac address of the peer for unicast key + * or broadcast address if group key is deleted. + * @key_idx: key index to be deleted + * + * This function gets called from ucfg to delete key + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_crypto_delkey(struct wlan_objmgr_vdev *vdev, + uint8_t *macaddr, + uint8_t key_idx){ + struct wlan_crypto_comp_priv *crypto_priv; + struct wlan_crypto_params *crypto_params; + struct wlan_crypto_key *key; + struct wlan_crypto_cipher *cipher_table; + struct wlan_objmgr_psoc *psoc; + uint8_t bssid_mac[WLAN_ALEN]; + + if (!vdev || !macaddr || + (key_idx >= + (WLAN_CRYPTO_MAXKEYIDX + WLAN_CRYPTO_MAXIGTKKEYIDX))) { + QDF_TRACE(QDF_MODULE_ID_CRYPTO, QDF_TRACE_LEVEL_ERROR, + "%s[%d] Invalid params vdev %pK, macaddr %pK" + "keyidx %d\n", __func__, __LINE__, vdev, + macaddr, key_idx); + return QDF_STATUS_E_INVAL; + } + + wlan_vdev_obj_lock(vdev); + qdf_mem_copy(bssid_mac, wlan_vdev_mlme_get_macaddr(vdev), WLAN_ALEN); + psoc = wlan_vdev_get_psoc(vdev); + if (!psoc) { + wlan_vdev_obj_unlock(vdev); + qdf_print("%s[%d] psoc NULL\n", __func__, __LINE__); + return QDF_STATUS_E_INVAL; + } + wlan_vdev_obj_unlock(vdev); + + if (qdf_is_macaddr_broadcast((struct qdf_mac_addr *)macaddr)) { + crypto_params = wlan_crypto_vdev_get_comp_params(vdev, + &crypto_priv); + if (crypto_priv == NULL) { + qdf_print("%s[%d] crypto_priv NULL\n", + __func__, __LINE__); + return QDF_STATUS_E_INVAL; + } + } else { + struct wlan_objmgr_peer *peer; + uint8_t pdev_id; + + pdev_id = wlan_objmgr_pdev_get_pdev_id( + wlan_vdev_get_pdev(vdev)); + peer = wlan_objmgr_get_peer_by_mac_n_vdev( + psoc, pdev_id, + bssid_mac, + macaddr, + WLAN_CRYPTO_ID); + if (peer == NULL) { + return QDF_STATUS_E_INVAL; + } + crypto_params = wlan_crypto_peer_get_comp_params(peer, + &crypto_priv); + wlan_objmgr_peer_release_ref(peer, WLAN_CRYPTO_ID); + if (crypto_priv == NULL) { + qdf_print("%s[%d] crypto_priv NULL\n", + __func__, __LINE__); + return QDF_STATUS_E_INVAL; + } + } + + if (key_idx >= WLAN_CRYPTO_MAXKEYIDX) { + uint8_t igtk_idx = key_idx - WLAN_CRYPTO_MAXKEYIDX; + if (igtk_idx >= WLAN_CRYPTO_MAXIGTKKEYIDX) { + qdf_print("%s[%d] Igtk key invalid keyid %d\n", + __func__, __LINE__, igtk_idx); + return QDF_STATUS_E_INVAL; + } + key = crypto_priv->igtk_key[igtk_idx]; + crypto_priv->igtk_key[igtk_idx] = NULL; + if (key) + key->valid = 0; + } else { + key = crypto_priv->key[key_idx]; + crypto_priv->key[key_idx] = NULL; + } + + if (!key) + return QDF_STATUS_E_INVAL; + + if (key->valid) { + cipher_table = (struct wlan_crypto_cipher *)key->cipher_table; + + if (WLAN_CRYPTO_TX_OPS_DELKEY(psoc)) { + WLAN_CRYPTO_TX_OPS_DELKEY(psoc)(vdev, key, + macaddr, cipher_table->cipher); + } + } + qdf_mem_free(key); + + return QDF_STATUS_SUCCESS; +} + +/** + * wlan_crypto_default_key - called by ucfg to set default tx key + * @vdev: vdev + * @mac_address: mac address of the peer for unicast key + * or broadcast address if group key need to made default. + * @key_idx: key index to be made as default key + * @unicast: is key was unicast or group key. + * + * This function gets called from ucfg to set default key + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_crypto_default_key(struct wlan_objmgr_vdev *vdev, + uint8_t *macaddr, + uint8_t key_idx, + bool unicast){ + struct wlan_crypto_comp_priv *crypto_priv; + struct wlan_crypto_params *crypto_params; + struct wlan_crypto_key *key; + struct wlan_objmgr_psoc *psoc; + uint8_t bssid_mac[WLAN_ALEN]; + + if (!vdev || !macaddr || (key_idx >= WLAN_CRYPTO_MAXKEYIDX)) { + qdf_print("%s[%d] Invalid params vdev %pK, macaddr %pK" + "keyidx %d\n", __func__, __LINE__, + vdev, macaddr, key_idx); + return QDF_STATUS_E_INVAL; + } + + wlan_vdev_obj_lock(vdev); + qdf_mem_copy(bssid_mac, wlan_vdev_mlme_get_macaddr(vdev), WLAN_ALEN); + psoc = wlan_vdev_get_psoc(vdev); + if (!psoc) { + wlan_vdev_obj_unlock(vdev); + qdf_print("%s[%d] psoc NULL\n", __func__, __LINE__); + return QDF_STATUS_E_INVAL; + } + wlan_vdev_obj_unlock(vdev); + + if (qdf_is_macaddr_broadcast((struct qdf_mac_addr *)macaddr)) { + crypto_params = wlan_crypto_vdev_get_comp_params(vdev, + &crypto_priv); + if (crypto_priv == NULL) { + qdf_print("%s[%d] crypto_priv NULL\n", + __func__, __LINE__); + return QDF_STATUS_E_INVAL; + } + + key = crypto_priv->key[key_idx]; + if (!key) + return QDF_STATUS_E_INVAL; + } else { + struct wlan_objmgr_peer *peer; + uint8_t pdev_id; + + pdev_id = wlan_objmgr_pdev_get_pdev_id( + wlan_vdev_get_pdev(vdev)); + peer = wlan_objmgr_get_peer_by_mac_n_vdev( + psoc, pdev_id, + bssid_mac, + macaddr, + WLAN_CRYPTO_ID); + + if (peer == NULL) { + qdf_print("%s[%d] peer NULL\n", __func__, __LINE__); + return QDF_STATUS_E_INVAL; + } + crypto_params = wlan_crypto_peer_get_comp_params(peer, + &crypto_priv); + wlan_objmgr_peer_release_ref(peer, WLAN_CRYPTO_ID); + if (crypto_priv == NULL) { + qdf_print("%s[%d] crypto_priv NULL\n", + __func__, __LINE__); + return QDF_STATUS_E_INVAL; + } + + key = crypto_priv->key[key_idx]; + if (!key) + return QDF_STATUS_E_INVAL; + } + if (!key->valid) + return QDF_STATUS_E_INVAL; + + if (WLAN_CRYPTO_TX_OPS_DEFAULTKEY(psoc)) { + WLAN_CRYPTO_TX_OPS_DEFAULTKEY(psoc)(vdev, key_idx, + macaddr); + } + crypto_priv->def_tx_keyid = key_idx; + + return QDF_STATUS_SUCCESS; +} + +/** + * wlan_crypto_encap - called by mgmt for encap the frame based on cipher + * @vdev: vdev + * @wbuf: wbuf + * @macaddr: macaddr + * @encapdone: is encapdone already or not. + * + * This function gets called from mgmt txrx to encap frame. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_crypto_encap(struct wlan_objmgr_vdev *vdev, + qdf_nbuf_t wbuf, + uint8_t *mac_addr, + uint8_t encapdone){ + struct wlan_crypto_comp_priv *crypto_priv; + struct wlan_crypto_params *crypto_params; + struct wlan_crypto_key *key; + QDF_STATUS status; + struct wlan_crypto_cipher *cipher_table; + struct wlan_objmgr_psoc *psoc; + struct wlan_objmgr_peer *peer; + uint8_t bssid_mac[WLAN_ALEN]; + uint8_t pdev_id; + uint8_t hdrlen; + enum QDF_OPMODE opmode; + + opmode = wlan_vdev_mlme_get_opmode(vdev); + wlan_vdev_obj_lock(vdev); + qdf_mem_copy(bssid_mac, wlan_vdev_mlme_get_macaddr(vdev), WLAN_ALEN); + psoc = wlan_vdev_get_psoc(vdev); + if (!psoc) { + wlan_vdev_obj_unlock(vdev); + qdf_print("%s[%d] psoc NULL\n", __func__, __LINE__); + return QDF_STATUS_E_INVAL; + } + wlan_vdev_obj_unlock(vdev); + + pdev_id = wlan_objmgr_pdev_get_pdev_id(wlan_vdev_get_pdev(vdev)); + /* FILS Encap required only for (Re-)Assoc response */ + peer = wlan_objmgr_get_peer(psoc, pdev_id, mac_addr, WLAN_CRYPTO_ID); + + if (!wlan_crypto_is_data_protected((uint8_t *)qdf_nbuf_data(wbuf)) && + peer && !wlan_crypto_get_peer_fils_aead(peer)) { + wlan_objmgr_peer_release_ref(peer, WLAN_CRYPTO_ID); + return QDF_STATUS_E_INVAL; + } + + if (peer) + wlan_objmgr_peer_release_ref(peer, WLAN_CRYPTO_ID); + + if (qdf_is_macaddr_group((struct qdf_mac_addr *)mac_addr)) { + crypto_params = wlan_crypto_vdev_get_comp_params(vdev, + &crypto_priv); + if (crypto_priv == NULL) { + qdf_print("%s[%d] crypto_priv NULL\n", + __func__, __LINE__); + return QDF_STATUS_E_INVAL; + } + + key = crypto_priv->key[crypto_priv->def_tx_keyid]; + if (!key) + return QDF_STATUS_E_INVAL; + + } else { + struct wlan_objmgr_peer *peer; + uint8_t pdev_id; + + pdev_id = wlan_objmgr_pdev_get_pdev_id( + wlan_vdev_get_pdev(vdev)); + peer = wlan_objmgr_get_peer_by_mac_n_vdev(psoc, pdev_id, + bssid_mac, mac_addr, + WLAN_CRYPTO_ID); + + if (peer == NULL) { + qdf_print("%s[%d] crypto_priv NULL\n", + __func__, __LINE__); + return QDF_STATUS_E_INVAL; + } + crypto_params = wlan_crypto_peer_get_comp_params(peer, + &crypto_priv); + wlan_objmgr_peer_release_ref(peer, WLAN_CRYPTO_ID); + + if (crypto_priv == NULL) { + qdf_print("%s[%d] crypto_priv NULL\n", + __func__, __LINE__); + return QDF_STATUS_E_INVAL; + } + + key = crypto_priv->key[crypto_priv->def_tx_keyid]; + if (!key) + return QDF_STATUS_E_INVAL; + } + if (opmode == QDF_MONITOR_MODE) + hdrlen = ieee80211_hdrsize((uint8_t *)qdf_nbuf_data(wbuf)); + else + hdrlen = ieee80211_hdrspace(wlan_vdev_get_pdev(vdev), + (uint8_t *)qdf_nbuf_data(wbuf)); + + /* if tkip, is counter measures enabled, then drop the frame */ + cipher_table = (struct wlan_crypto_cipher *)key->cipher_table; + status = cipher_table->encap(key, wbuf, encapdone, + hdrlen); + + return status; +} +qdf_export_symbol(wlan_crypto_encap); + +/** + * wlan_crypto_decap - called by mgmt for decap the frame based on cipher + * @vdev: vdev + * @wbuf: wbuf + * @macaddr: macaddr + * @tid: tid of the frame + * + * This function gets called from mgmt txrx to decap frame. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_crypto_decap(struct wlan_objmgr_vdev *vdev, + qdf_nbuf_t wbuf, + uint8_t *mac_addr, + uint8_t tid){ + struct wlan_crypto_comp_priv *crypto_priv; + struct wlan_crypto_params *crypto_params; + struct wlan_crypto_key *key; + QDF_STATUS status; + struct wlan_crypto_cipher *cipher_table; + struct wlan_objmgr_psoc *psoc; + struct wlan_objmgr_peer *peer; + uint8_t bssid_mac[WLAN_ALEN]; + uint8_t keyid; + uint8_t pdev_id; + uint8_t hdrlen; + enum QDF_OPMODE opmode; + + opmode = wlan_vdev_mlme_get_opmode(vdev); + wlan_vdev_obj_lock(vdev); + qdf_mem_copy(bssid_mac, wlan_vdev_mlme_get_macaddr(vdev), WLAN_ALEN); + psoc = wlan_vdev_get_psoc(vdev); + if (!psoc) { + wlan_vdev_obj_unlock(vdev); + qdf_print("%s[%d] psoc NULL\n", __func__, __LINE__); + return QDF_STATUS_E_INVAL; + } + wlan_vdev_obj_unlock(vdev); + + if (opmode == QDF_MONITOR_MODE) + hdrlen = ieee80211_hdrsize((uint8_t *)qdf_nbuf_data(wbuf)); + else + hdrlen = ieee80211_hdrspace(wlan_vdev_get_pdev(vdev), + (uint8_t *)qdf_nbuf_data(wbuf)); + + keyid = wlan_crypto_get_keyid((uint8_t *)qdf_nbuf_data(wbuf), hdrlen); + + if (keyid >= WLAN_CRYPTO_MAXKEYIDX) + return QDF_STATUS_E_INVAL; + + pdev_id = wlan_objmgr_pdev_get_pdev_id(wlan_vdev_get_pdev(vdev)); + /* FILS Decap required only for (Re-)Assoc request */ + peer = wlan_objmgr_get_peer(psoc, pdev_id, mac_addr, WLAN_CRYPTO_ID); + + if (!wlan_crypto_is_data_protected((uint8_t *)qdf_nbuf_data(wbuf)) && + peer && !wlan_crypto_get_peer_fils_aead(peer)) { + wlan_objmgr_peer_release_ref(peer, WLAN_CRYPTO_ID); + return QDF_STATUS_E_INVAL; + } + + if (peer) + wlan_objmgr_peer_release_ref(peer, WLAN_CRYPTO_ID); + + if (qdf_is_macaddr_group((struct qdf_mac_addr *)mac_addr)) { + crypto_params = wlan_crypto_vdev_get_comp_params(vdev, + &crypto_priv); + if (crypto_priv == NULL) { + qdf_print("%s[%d] crypto_priv NULL\n", + __func__, __LINE__); + return QDF_STATUS_E_INVAL; + } + + key = crypto_priv->key[keyid]; + if (!key) + return QDF_STATUS_E_INVAL; + + } else { + struct wlan_objmgr_peer *peer; + uint8_t pdev_id; + + pdev_id = wlan_objmgr_pdev_get_pdev_id( + wlan_vdev_get_pdev(vdev)); + peer = wlan_objmgr_get_peer_by_mac_n_vdev( + psoc, pdev_id, bssid_mac, + mac_addr, WLAN_CRYPTO_ID); + if (peer == NULL) { + qdf_print("%s[%d] peer NULL\n", __func__, __LINE__); + return QDF_STATUS_E_INVAL; + } + + crypto_params = wlan_crypto_peer_get_comp_params(peer, + &crypto_priv); + wlan_objmgr_peer_release_ref(peer, WLAN_CRYPTO_ID); + + if (crypto_priv == NULL) { + qdf_print("%s[%d] crypto_priv NULL\n", + __func__, __LINE__); + return QDF_STATUS_E_INVAL; + } + + key = crypto_priv->key[keyid]; + if (!key) + return QDF_STATUS_E_INVAL; + } + /* if tkip, is counter measures enabled, then drop the frame */ + cipher_table = (struct wlan_crypto_cipher *)key->cipher_table; + status = cipher_table->decap(key, wbuf, tid, hdrlen); + + return status; +} +qdf_export_symbol(wlan_crypto_decap); +/** + * wlan_crypto_enmic - called by mgmt for adding mic in frame based on cipher + * @vdev: vdev + * @wbuf: wbuf + * @macaddr: macaddr + * @encapdone: is encapdone already or not. + * + * This function gets called from mgmt txrx to adding mic to the frame. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_crypto_enmic(struct wlan_objmgr_vdev *vdev, + qdf_nbuf_t wbuf, + uint8_t *mac_addr, + uint8_t encapdone){ + struct wlan_crypto_comp_priv *crypto_priv; + struct wlan_crypto_params *crypto_params; + struct wlan_crypto_key *key; + QDF_STATUS status; + struct wlan_crypto_cipher *cipher_table; + struct wlan_objmgr_psoc *psoc; + uint8_t bssid_mac[WLAN_ALEN]; + uint8_t hdrlen; + enum QDF_OPMODE opmode; + + opmode = wlan_vdev_mlme_get_opmode(vdev); + + + wlan_vdev_obj_lock(vdev); + qdf_mem_copy(bssid_mac, wlan_vdev_mlme_get_macaddr(vdev), WLAN_ALEN); + psoc = wlan_vdev_get_psoc(vdev); + if (!psoc) { + wlan_vdev_obj_unlock(vdev); + qdf_print("%s[%d] psoc NULL\n", __func__, __LINE__); + return QDF_STATUS_E_INVAL; + } + wlan_vdev_obj_unlock(vdev); + + if (qdf_is_macaddr_broadcast((struct qdf_mac_addr *)mac_addr)) { + crypto_params = wlan_crypto_vdev_get_comp_params(vdev, + &crypto_priv); + if (crypto_priv == NULL) { + qdf_print("%s[%d] crypto_priv NULL\n", + __func__, __LINE__); + return QDF_STATUS_E_INVAL; + } + + key = crypto_priv->key[crypto_priv->def_tx_keyid]; + if (!key) + return QDF_STATUS_E_INVAL; + + } else { + struct wlan_objmgr_peer *peer; + uint8_t pdev_id; + + pdev_id = wlan_objmgr_pdev_get_pdev_id( + wlan_vdev_get_pdev(vdev)); + peer = wlan_objmgr_get_peer_by_mac_n_vdev( + psoc, pdev_id, bssid_mac, + mac_addr, WLAN_CRYPTO_ID); + if (peer == NULL) { + qdf_print("%s[%d] crypto_priv NULL\n", + __func__, __LINE__); + return QDF_STATUS_E_INVAL; + } + + crypto_params = wlan_crypto_peer_get_comp_params(peer, + &crypto_priv); + wlan_objmgr_peer_release_ref(peer, WLAN_CRYPTO_ID); + + if (crypto_priv == NULL) { + qdf_print("%s[%d] crypto_priv NULL\n", + __func__, __LINE__); + return QDF_STATUS_E_INVAL; + } + + key = crypto_priv->key[crypto_priv->def_tx_keyid]; + if (!key) + return QDF_STATUS_E_INVAL; + } + if (opmode == QDF_MONITOR_MODE) + hdrlen = ieee80211_hdrsize((uint8_t *)qdf_nbuf_data(wbuf)); + else + hdrlen = ieee80211_hdrspace(wlan_vdev_get_pdev(vdev), + (uint8_t *)qdf_nbuf_data(wbuf)); + + /* if tkip, is counter measures enabled, then drop the frame */ + cipher_table = (struct wlan_crypto_cipher *)key->cipher_table; + status = cipher_table->enmic(key, wbuf, encapdone, hdrlen); + + return status; +} + +/** + * wlan_crypto_demic - called by mgmt for remove and check mic for + * the frame based on cipher + * @vdev: vdev + * @wbuf: wbuf + * @macaddr: macaddr + * @tid: tid of the frame + * @keyid: keyid in the received frame + * This function gets called from mgmt txrx to decap frame. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_crypto_demic(struct wlan_objmgr_vdev *vdev, + qdf_nbuf_t wbuf, + uint8_t *mac_addr, + uint8_t tid, + uint8_t keyid){ + struct wlan_crypto_comp_priv *crypto_priv; + struct wlan_crypto_params *crypto_params; + struct wlan_crypto_key *key; + QDF_STATUS status; + struct wlan_crypto_cipher *cipher_table; + struct wlan_objmgr_psoc *psoc; + uint8_t bssid_mac[WLAN_ALEN]; + uint8_t hdrlen; + enum QDF_OPMODE opmode; + + opmode = wlan_vdev_mlme_get_opmode(vdev); + + if (opmode == QDF_MONITOR_MODE) + hdrlen = ieee80211_hdrsize((uint8_t *)qdf_nbuf_data(wbuf)); + else + hdrlen = ieee80211_hdrspace(wlan_vdev_get_pdev(vdev), + (uint8_t *)qdf_nbuf_data(wbuf)); + + wlan_vdev_obj_lock(vdev); + qdf_mem_copy(bssid_mac, wlan_vdev_mlme_get_macaddr(vdev), WLAN_ALEN); + psoc = wlan_vdev_get_psoc(vdev); + if (!psoc) { + wlan_vdev_obj_unlock(vdev); + qdf_print("%s[%d] psoc NULL\n", __func__, __LINE__); + return QDF_STATUS_E_INVAL; + } + wlan_vdev_obj_unlock(vdev); + + if (qdf_is_macaddr_broadcast((struct qdf_mac_addr *)mac_addr)) { + crypto_params = wlan_crypto_vdev_get_comp_params(vdev, + &crypto_priv); + if (crypto_priv == NULL) { + qdf_print("%s[%d] crypto_priv NULL\n", + __func__, __LINE__); + return QDF_STATUS_E_INVAL; + } + + key = crypto_priv->key[keyid]; + if (!key) + return QDF_STATUS_E_INVAL; + + } else { + struct wlan_objmgr_peer *peer; + uint8_t pdev_id; + + pdev_id = wlan_objmgr_pdev_get_pdev_id( + wlan_vdev_get_pdev(vdev)); + peer = wlan_objmgr_get_peer_by_mac_n_vdev( + psoc, pdev_id, bssid_mac, + mac_addr, WLAN_CRYPTO_ID); + if (peer == NULL) { + qdf_print("%s[%d] peer NULL\n", __func__, __LINE__); + return QDF_STATUS_E_INVAL; + } + + crypto_params = wlan_crypto_peer_get_comp_params(peer, + &crypto_priv); + wlan_objmgr_peer_release_ref(peer, WLAN_CRYPTO_ID); + + if (crypto_priv == NULL) { + qdf_print("%s[%d] crypto_priv NULL\n", + __func__, __LINE__); + return QDF_STATUS_E_INVAL; + } + + key = crypto_priv->key[keyid]; + if (!key) + return QDF_STATUS_E_INVAL; + } + /* if tkip, is counter measures enabled, then drop the frame */ + cipher_table = (struct wlan_crypto_cipher *)key->cipher_table; + status = cipher_table->demic(key, wbuf, tid, hdrlen); + + return status; +} + +/** + * wlan_crypto_vdev_is_pmf_enabled - called to check is pmf enabled in vdev + * @vdev: vdev + * + * This function gets called to check is pmf enabled or not in vdev. + * + * Return: true or false + */ +bool wlan_crypto_vdev_is_pmf_enabled(struct wlan_objmgr_vdev *vdev) +{ + + struct wlan_crypto_comp_priv *crypto_priv; + struct wlan_crypto_params *vdev_crypto_params; + + if (!vdev) + return false; + vdev_crypto_params = wlan_crypto_vdev_get_comp_params(vdev, + &crypto_priv); + if (crypto_priv == NULL) { + qdf_print("%s[%d] crypto_priv NULL\n", __func__, __LINE__); + return QDF_STATUS_E_INVAL; + } + + if ((vdev_crypto_params->rsn_caps & + WLAN_CRYPTO_RSN_CAP_MFP_ENABLED) + || (vdev_crypto_params->rsn_caps & + WLAN_CRYPTO_RSN_CAP_MFP_REQUIRED)) { + return true; + } + + return false; +} +/** + * wlan_crypto_is_pmf_enabled - called by mgmt txrx to check is pmf enabled + * @vdev: vdev + * @peer: peer + * + * This function gets called by mgmt txrx to check is pmf enabled or not. + * + * Return: true or false + */ +bool wlan_crypto_is_pmf_enabled(struct wlan_objmgr_vdev *vdev, + struct wlan_objmgr_peer *peer){ + + struct wlan_crypto_comp_priv *crypto_priv; + struct wlan_crypto_params *vdev_crypto_params; + struct wlan_crypto_params *peer_crypto_params; + + if (!vdev || !peer) + return false; + vdev_crypto_params = wlan_crypto_vdev_get_comp_params(vdev, + &crypto_priv); + if (crypto_priv == NULL) { + qdf_print("%s[%d] crypto_priv NULL\n", __func__, __LINE__); + return QDF_STATUS_E_INVAL; + } + + peer_crypto_params = wlan_crypto_peer_get_comp_params(peer, + &crypto_priv); + if (crypto_priv == NULL) { + qdf_print("%s[%d] crypto_priv NULL\n", __func__, __LINE__); + return QDF_STATUS_E_INVAL; + } + if (((vdev_crypto_params->rsn_caps & + WLAN_CRYPTO_RSN_CAP_MFP_ENABLED) && + (peer_crypto_params->rsn_caps & + WLAN_CRYPTO_RSN_CAP_MFP_ENABLED)) + || (vdev_crypto_params->rsn_caps & + WLAN_CRYPTO_RSN_CAP_MFP_REQUIRED)) { + return true; + } + + return false; +} + +static void wlan_crypto_gmac_pn_swap(uint8_t *a, uint8_t *b) +{ + a[0] = b[5]; + a[1] = b[4]; + a[2] = b[3]; + a[3] = b[2]; + a[4] = b[1]; + a[5] = b[0]; +} + +/** + * wlan_crypto_add_mmie - called by mgmt txrx to add mmie in frame + * @vdev: vdev + * @bfrm: frame starting pointer + * @len: length of the frame + * + * This function gets called by mgmt txrx to add mmie in frame + * + * Return: end of frame or NULL in case failure + */ +uint8_t *wlan_crypto_add_mmie(struct wlan_objmgr_vdev *vdev, + uint8_t *bfrm, + uint32_t len) { + struct wlan_crypto_key *key; + struct wlan_crypto_mmie *mmie; + uint8_t *pn, *aad, *buf, *efrm, nounce[12]; + struct ieee80211_hdr *hdr; + uint32_t i, hdrlen, mic_len, aad_len; + uint8_t mic[16]; + struct wlan_crypto_comp_priv *crypto_priv; + struct wlan_crypto_params *crypto_params; + int32_t ret = -1; + + if (!bfrm) { + qdf_print("%s[%d] frame is NULL\n", __func__, __LINE__); + return NULL; + } + + crypto_params = wlan_crypto_vdev_get_comp_params(vdev, + &crypto_priv); + if (crypto_priv == NULL) { + qdf_print("%s[%d] crypto_priv NULL\n", __func__, __LINE__); + return NULL; + } + + if (crypto_priv->def_igtk_tx_keyid >= WLAN_CRYPTO_MAXIGTKKEYIDX) { + qdf_print("%s[%d] igtk key invalid keyid %d \n", + __func__, __LINE__, crypto_priv->def_igtk_tx_keyid); + return NULL; + } + + key = crypto_priv->igtk_key[crypto_priv->def_igtk_tx_keyid]; + if (!key) { + qdf_print("%s[%d] No igtk key present\n", __func__, __LINE__); + return NULL; + } + mic_len = (crypto_priv->igtk_key_type + == WLAN_CRYPTO_CIPHER_AES_CMAC) ? 8 : 16; + + efrm = bfrm + len; + aad_len = 20; + hdrlen = sizeof(struct ieee80211_hdr); + len += sizeof(struct wlan_crypto_mmie); + + mmie = (struct wlan_crypto_mmie *) efrm; + qdf_mem_zero((unsigned char *)mmie, sizeof(*mmie)); + mmie->element_id = WLAN_ELEMID_MMIE; + mmie->length = sizeof(*mmie) - 2; + mmie->key_id = qdf_cpu_to_le16(key->keyix); + + mic_len = (crypto_priv->igtk_key_type + == WLAN_CRYPTO_CIPHER_AES_CMAC) ? 8 : 16; + if (mic_len == 8) { + mmie->length -= 8; + len -= 8; + } + /* PN = PN + 1 */ + pn = (uint8_t *)&key->keytsc; + + for (i = 0; i <= 5; i++) { + pn[i]++; + if (pn[i]) + break; + } + + /* Copy IPN */ + qdf_mem_copy(mmie->sequence_number, pn, 6); + + hdr = (struct ieee80211_hdr *) bfrm; + + buf = qdf_mem_malloc(len - hdrlen + 20); + if (!buf) { + qdf_print("%s[%d] malloc failed\n", __func__, __LINE__); + return NULL; + } + qdf_mem_zero(buf, len - hdrlen + 20); + aad = buf; + /* generate BIP AAD: FC(masked) || A1 || A2 || A3 */ + + /* FC type/subtype */ + aad[0] = hdr->frame_control[0]; + /* Mask FC Retry, PwrMgt, MoreData flags to zero */ + aad[1] = (hdr->frame_control[1] & ~(WLAN_FC1_RETRY | WLAN_FC1_PWRMGT + | WLAN_FC1_MOREDATA)); + /* A1 || A2 || A3 */ + qdf_mem_copy(aad + 2, hdr->addr1, WLAN_ALEN); + qdf_mem_copy(aad + 8, hdr->addr2, WLAN_ALEN); + qdf_mem_copy(aad + 14, hdr->addr3, WLAN_ALEN); + qdf_mem_zero(mic, 16); + + /* + * MIC = AES-128-CMAC(IGTK, AAD || Management Frame Body || MMIE, 64) + */ + + qdf_mem_copy(buf + aad_len, bfrm + hdrlen, len - hdrlen); + if (crypto_priv->igtk_key_type == WLAN_CRYPTO_CIPHER_AES_CMAC) { + + ret = omac1_aes_128(key->keyval, buf, + len + aad_len - hdrlen, mic); + qdf_mem_copy(mmie->mic, mic, 8); + + } else if (crypto_priv->igtk_key_type + == WLAN_CRYPTO_CIPHER_AES_CMAC_256) { + + ret = omac1_aes_256(key->keyval, buf, + len + aad_len - hdrlen, mmie->mic); + } else if ((crypto_priv->igtk_key_type == WLAN_CRYPTO_CIPHER_AES_GMAC) + || (crypto_priv->igtk_key_type + == WLAN_CRYPTO_CIPHER_AES_GMAC_256)) { + + qdf_mem_copy(nounce, hdr->addr2, WLAN_ALEN); + wlan_crypto_gmac_pn_swap(nounce + 6, pn); + ret = wlan_crypto_aes_gmac(key->keyval, key->keylen, nounce, + sizeof(nounce), buf, + len + aad_len - hdrlen, mmie->mic); + } + qdf_mem_free(buf); + if (ret < 0) { + qdf_print("%s[%d] add mmie failed\n", __func__, __LINE__); + return NULL; + } + + return bfrm + len; +} + +/** + * wlan_crypto_is_mmie_valid - called by mgmt txrx to check mmie of the frame + * @vdev: vdev + * @frm: frame starting pointer + * @efrm: end of frame pointer + * + * This function gets called by mgmt txrx to check mmie of the frame + * + * Return: true or false + */ +bool wlan_crypto_is_mmie_valid(struct wlan_objmgr_vdev *vdev, + uint8_t *frm, + uint8_t *efrm){ + struct wlan_crypto_mmie *mmie = NULL; + uint8_t *ipn, *aad, *buf, mic[16], nounce[12]; + struct wlan_crypto_key *key; + struct ieee80211_hdr *hdr; + uint16_t mic_len, hdrlen, len; + struct wlan_crypto_comp_priv *crypto_priv; + struct wlan_crypto_params *crypto_params; + uint8_t aad_len = 20; + int32_t ret = -1; + + /* check if frame is illegal length */ + if (!frm || !efrm || (efrm < frm) + || ((efrm - frm) < sizeof(struct ieee80211_hdr))) { + qdf_print("%s[%d] Invalid params\n", __func__, __LINE__); + return false; + } + len = efrm - frm; + crypto_priv = (struct wlan_crypto_comp_priv *) + wlan_get_vdev_crypto_obj(vdev); + if (crypto_priv == NULL) { + qdf_print("%s[%d] crypto_priv NULL\n", __func__, __LINE__); + return false; + } + + crypto_params = &(crypto_priv->crypto_params); + + + mic_len = (crypto_priv->igtk_key_type + == WLAN_CRYPTO_CIPHER_AES_CMAC) ? 8 : 16; + hdrlen = sizeof(struct ieee80211_hdr); + + if (mic_len == 8) + mmie = (struct wlan_crypto_mmie *)(efrm - sizeof(*mmie) + 8); + else + mmie = (struct wlan_crypto_mmie *)(efrm - sizeof(*mmie)); + + + /* check Elem ID*/ + if ((mmie == NULL) || (mmie->element_id != WLAN_ELEMID_MMIE)) { + qdf_print("%s[%d] IE is not MMIE\n", __func__, __LINE__); + return false; + } + + if (mmie->key_id >= (WLAN_CRYPTO_MAXKEYIDX + + WLAN_CRYPTO_MAXIGTKKEYIDX) || + (mmie->key_id < WLAN_CRYPTO_MAXKEYIDX)) { + qdf_print("%s[%d] keyid not valid\n", __func__, __LINE__); + return false; + } + + key = crypto_priv->igtk_key[mmie->key_id - WLAN_CRYPTO_MAXKEYIDX]; + if (!key) { + qdf_print("%s[%d] No igtk key present\n", __func__, __LINE__); + return false; + } + + /* validate ipn */ + ipn = mmie->sequence_number; + if (qdf_mem_cmp(ipn, key->keyrsc, 6) <= 0) { + qdf_print("%s[%d] replay error\n", __func__, __LINE__); + return false; + } + + buf = qdf_mem_malloc(len - hdrlen + 20); + if (!buf) { + qdf_print("%s[%d] malloc failed\n", __func__, __LINE__); + return false; + } + aad = buf; + + /* construct AAD */ + hdr = (struct ieee80211_hdr *)frm; + /* generate BIP AAD: FC(masked) || A1 || A2 || A3 */ + + /* FC type/subtype */ + aad[0] = hdr->frame_control[0]; + /* Mask FC Retry, PwrMgt, MoreData flags to zero */ + aad[1] = (hdr->frame_control[1] & ~(WLAN_FC1_RETRY | WLAN_FC1_PWRMGT + | WLAN_FC1_MOREDATA)); + /* A1 || A2 || A3 */ + qdf_mem_copy(aad + 2, hdr->addr1, 3 * WLAN_ALEN); + + /* + * MIC = AES-128-CMAC(IGTK, AAD || Management Frame Body || MMIE, 64) + */ + qdf_mem_copy(buf + 20, frm + hdrlen, len - hdrlen); + qdf_mem_zero(buf + (len - hdrlen + 20 - mic_len), mic_len); + qdf_mem_zero(mic, 16); + if (crypto_priv->igtk_key_type == WLAN_CRYPTO_CIPHER_AES_CMAC) { + ret = omac1_aes_128(key->keyval, buf, + len - hdrlen + aad_len, mic); + } else if (crypto_priv->igtk_key_type + == WLAN_CRYPTO_CIPHER_AES_CMAC_256) { + ret = omac1_aes_256(key->keyval, buf, + len + aad_len - hdrlen, mic); + } else if ((crypto_priv->igtk_key_type == WLAN_CRYPTO_CIPHER_AES_GMAC) + || (crypto_priv->igtk_key_type + == WLAN_CRYPTO_CIPHER_AES_GMAC_256)) { + qdf_mem_copy(nounce, hdr->addr2, WLAN_ALEN); + wlan_crypto_gmac_pn_swap(nounce + 6, ipn); + ret = wlan_crypto_aes_gmac(key->keyval, key->keylen, nounce, + sizeof(nounce), buf, + len + aad_len - hdrlen, mic); + } + + qdf_mem_free(buf); + + if (ret < 0) { + qdf_print("%s[%d] genarate mmie failed\n", __func__, __LINE__); + return false; + } + + if (qdf_mem_cmp(mic, mmie->mic, mic_len) != 0) { + qdf_print("%s[%d] mmie mismatch\n", __func__, __LINE__); + /* MMIE MIC mismatch */ + return false; + } + + /* Update the receive sequence number */ + qdf_mem_copy(key->keyrsc, ipn, 6); + qdf_print("%s[%d] mmie matched\n", __func__, __LINE__); + + return true; +} + + +static int32_t wlan_crypto_wpa_cipher_to_suite(uint32_t cipher) +{ + int32_t status = -1; + + switch (cipher) { + case WLAN_CRYPTO_CIPHER_TKIP: + return WPA_CIPHER_SUITE_TKIP; + case WLAN_CRYPTO_CIPHER_AES_CCM: + return WPA_CIPHER_SUITE_CCMP; + case WLAN_CRYPTO_CIPHER_NONE: + return WPA_CIPHER_SUITE_NONE; + } + + return status; +} + +static int32_t wlan_crypto_rsn_cipher_to_suite(uint32_t cipher) +{ + int32_t status = -1; + + switch (cipher) { + case WLAN_CRYPTO_CIPHER_TKIP: + return RSN_CIPHER_SUITE_TKIP; + case WLAN_CRYPTO_CIPHER_AES_CCM: + return RSN_CIPHER_SUITE_CCMP; + case WLAN_CRYPTO_CIPHER_AES_CCM_256: + return RSN_CIPHER_SUITE_CCMP_256; + case WLAN_CRYPTO_CIPHER_AES_GCM: + return RSN_CIPHER_SUITE_GCMP; + case WLAN_CRYPTO_CIPHER_AES_GCM_256: + return RSN_CIPHER_SUITE_GCMP_256; + case WLAN_CRYPTO_CIPHER_AES_CMAC: + return RSN_CIPHER_SUITE_AES_CMAC; + case WLAN_CRYPTO_CIPHER_AES_CMAC_256: + return RSN_CIPHER_SUITE_BIP_CMAC_256; + case WLAN_CRYPTO_CIPHER_AES_GMAC: + return RSN_CIPHER_SUITE_BIP_GMAC_128; + case WLAN_CRYPTO_CIPHER_AES_GMAC_256: + return RSN_CIPHER_SUITE_BIP_GMAC_256; + case WLAN_CRYPTO_CIPHER_NONE: + return RSN_CIPHER_SUITE_NONE; + } + + return status; +} + +/* + * Convert an RSN key management/authentication algorithm + * to an internal code. + */ +static int32_t +wlan_crypto_rsn_keymgmt_to_suite(uint32_t keymgmt) +{ + int32_t status = -1; + + switch (keymgmt) { + case WLAN_CRYPTO_KEY_MGMT_NONE: + return RSN_AUTH_KEY_MGMT_NONE; + case WLAN_CRYPTO_KEY_MGMT_IEEE8021X: + return RSN_AUTH_KEY_MGMT_UNSPEC_802_1X; + case WLAN_CRYPTO_KEY_MGMT_PSK: + return RSN_AUTH_KEY_MGMT_PSK_OVER_802_1X; + case WLAN_CRYPTO_KEY_MGMT_FT_IEEE8021X: + return RSN_AUTH_KEY_MGMT_FT_802_1X; + case WLAN_CRYPTO_KEY_MGMT_FT_PSK: + return RSN_AUTH_KEY_MGMT_FT_PSK; + case WLAN_CRYPTO_KEY_MGMT_IEEE8021X_SHA256: + return RSN_AUTH_KEY_MGMT_802_1X_SHA256; + case WLAN_CRYPTO_KEY_MGMT_PSK_SHA256: + return RSN_AUTH_KEY_MGMT_PSK_SHA256; + case WLAN_CRYPTO_KEY_MGMT_SAE: + return RSN_AUTH_KEY_MGMT_SAE; + case WLAN_CRYPTO_KEY_MGMT_FT_SAE: + return RSN_AUTH_KEY_MGMT_FT_SAE; + case WLAN_CRYPTO_KEY_MGMT_IEEE8021X_SUITE_B: + return RSN_AUTH_KEY_MGMT_802_1X_SUITE_B; + case WLAN_CRYPTO_KEY_MGMT_IEEE8021X_SUITE_B_192: + return RSN_AUTH_KEY_MGMT_802_1X_SUITE_B_192; + case WLAN_CRYPTO_KEY_MGMT_CCKM: + return RSN_AUTH_KEY_MGMT_CCKM; + case WLAN_CRYPTO_KEY_MGMT_OSEN: + return RSN_AUTH_KEY_MGMT_OSEN; + case WLAN_CRYPTO_KEY_MGMT_FILS_SHA256: + return RSN_AUTH_KEY_MGMT_FILS_SHA256; + case WLAN_CRYPTO_KEY_MGMT_FILS_SHA384: + return RSN_AUTH_KEY_MGMT_FILS_SHA384; + case WLAN_CRYPTO_KEY_MGMT_FT_FILS_SHA256: + return RSN_AUTH_KEY_MGMT_FT_FILS_SHA256; + case WLAN_CRYPTO_KEY_MGMT_FT_FILS_SHA384: + return RSN_AUTH_KEY_MGMT_FT_FILS_SHA384; + case WLAN_CRYPTO_KEY_MGMT_OWE: + return RSN_AUTH_KEY_MGMT_OWE; + case WLAN_CRYPTO_KEY_MGMT_DPP: + return RSN_AUTH_KEY_MGMT_DPP; + } + + return status; +} + +/* + * Convert an RSN key management/authentication algorithm + * to an internal code. + */ +static int32_t +wlan_crypto_wpa_keymgmt_to_suite(uint32_t keymgmt) +{ + int32_t status = -1; + + switch (keymgmt) { + case WLAN_CRYPTO_KEY_MGMT_NONE: + return WPA_AUTH_KEY_MGMT_NONE; + case WLAN_CRYPTO_KEY_MGMT_IEEE8021X: + return WPA_AUTH_KEY_MGMT_UNSPEC_802_1X; + case WLAN_CRYPTO_KEY_MGMT_PSK: + return WPA_AUTH_KEY_MGMT_PSK_OVER_802_1X; + case WLAN_CRYPTO_KEY_MGMT_CCKM: + return WPA_AUTH_KEY_MGMT_CCKM; + } + + return status; +} +/** + * Convert a WPA cipher selector OUI to an internal + * cipher algorithm. Where appropriate we also + * record any key length. + */ +static int32_t wlan_crypto_wpa_suite_to_cipher(uint8_t *sel) +{ + uint32_t w = LE_READ_4(sel); + int32_t status = -1; + + switch (w) { + case WPA_CIPHER_SUITE_TKIP: + return WLAN_CRYPTO_CIPHER_TKIP; + case WPA_CIPHER_SUITE_CCMP: + return WLAN_CRYPTO_CIPHER_AES_CCM; + case WPA_CIPHER_SUITE_NONE: + return WLAN_CRYPTO_CIPHER_NONE; + } + + return status; +} + +/* + * Convert a WPA key management/authentication algorithm + * to an internal code. + */ +static int32_t wlan_crypto_wpa_suite_to_keymgmt(uint8_t *sel) +{ + uint32_t w = LE_READ_4(sel); + int32_t status = -1; + + switch (w) { + case WPA_AUTH_KEY_MGMT_UNSPEC_802_1X: + return WLAN_CRYPTO_KEY_MGMT_IEEE8021X; + case WPA_AUTH_KEY_MGMT_PSK_OVER_802_1X: + return WLAN_CRYPTO_KEY_MGMT_PSK; + case WPA_AUTH_KEY_MGMT_CCKM: + return WLAN_CRYPTO_KEY_MGMT_CCKM; + case WPA_AUTH_KEY_MGMT_NONE: + return WLAN_CRYPTO_KEY_MGMT_NONE; + } + return status; +} + +/* + * Convert a RSN cipher selector OUI to an internal + * cipher algorithm. Where appropriate we also + * record any key length. + */ +static int32_t wlan_crypto_rsn_suite_to_cipher(uint8_t *sel) +{ + uint32_t w = LE_READ_4(sel); + int32_t status = -1; + + switch (w) { + case RSN_CIPHER_SUITE_TKIP: + return WLAN_CRYPTO_CIPHER_TKIP; + case RSN_CIPHER_SUITE_CCMP: + return WLAN_CRYPTO_CIPHER_AES_CCM; + case RSN_CIPHER_SUITE_CCMP_256: + return WLAN_CRYPTO_CIPHER_AES_CCM_256; + case RSN_CIPHER_SUITE_GCMP: + return WLAN_CRYPTO_CIPHER_AES_GCM; + case RSN_CIPHER_SUITE_GCMP_256: + return WLAN_CRYPTO_CIPHER_AES_GCM_256; + case RSN_CIPHER_SUITE_AES_CMAC: + return WLAN_CRYPTO_CIPHER_AES_CMAC; + case RSN_CIPHER_SUITE_BIP_CMAC_256: + return WLAN_CRYPTO_CIPHER_AES_CMAC_256; + case RSN_CIPHER_SUITE_BIP_GMAC_128: + return WLAN_CRYPTO_CIPHER_AES_GMAC; + case RSN_CIPHER_SUITE_BIP_GMAC_256: + return WLAN_CRYPTO_CIPHER_AES_GMAC_256; + case RSN_CIPHER_SUITE_NONE: + return WLAN_CRYPTO_CIPHER_NONE; + } + + return status; +} +/* + * Convert an RSN key management/authentication algorithm + * to an internal code. + */ +static int32_t wlan_crypto_rsn_suite_to_keymgmt(uint8_t *sel) +{ + uint32_t w = LE_READ_4(sel); + int32_t status = -1; + + switch (w) { + case RSN_AUTH_KEY_MGMT_UNSPEC_802_1X: + return WLAN_CRYPTO_KEY_MGMT_IEEE8021X; + case RSN_AUTH_KEY_MGMT_PSK_OVER_802_1X: + return WLAN_CRYPTO_KEY_MGMT_PSK; + case RSN_AUTH_KEY_MGMT_FT_802_1X: + return WLAN_CRYPTO_KEY_MGMT_FT_IEEE8021X; + case RSN_AUTH_KEY_MGMT_FT_PSK: + return WLAN_CRYPTO_KEY_MGMT_FT_PSK; + case RSN_AUTH_KEY_MGMT_802_1X_SHA256: + return WLAN_CRYPTO_KEY_MGMT_IEEE8021X_SHA256; + case RSN_AUTH_KEY_MGMT_PSK_SHA256: + return WLAN_CRYPTO_KEY_MGMT_PSK_SHA256; + case RSN_AUTH_KEY_MGMT_SAE: + return WLAN_CRYPTO_KEY_MGMT_SAE; + case RSN_AUTH_KEY_MGMT_FT_SAE: + return WLAN_CRYPTO_KEY_MGMT_FT_SAE; + case RSN_AUTH_KEY_MGMT_802_1X_SUITE_B: + return WLAN_CRYPTO_KEY_MGMT_IEEE8021X_SUITE_B; + case RSN_AUTH_KEY_MGMT_802_1X_SUITE_B_192: + return WLAN_CRYPTO_KEY_MGMT_IEEE8021X_SUITE_B_192; + case RSN_AUTH_KEY_MGMT_CCKM: + return WLAN_CRYPTO_KEY_MGMT_CCKM; + case RSN_AUTH_KEY_MGMT_OSEN: + return WLAN_CRYPTO_KEY_MGMT_OSEN; + case RSN_AUTH_KEY_MGMT_FILS_SHA256: + return WLAN_CRYPTO_KEY_MGMT_FILS_SHA256; + case RSN_AUTH_KEY_MGMT_FILS_SHA384: + return WLAN_CRYPTO_KEY_MGMT_FILS_SHA384; + case RSN_AUTH_KEY_MGMT_FT_FILS_SHA256: + return WLAN_CRYPTO_KEY_MGMT_FT_FILS_SHA256; + case RSN_AUTH_KEY_MGMT_FT_FILS_SHA384: + return WLAN_CRYPTO_KEY_MGMT_FT_FILS_SHA384; + case RSN_AUTH_KEY_MGMT_OWE: + return WLAN_CRYPTO_KEY_MGMT_OWE; + case RSN_AUTH_KEY_MGMT_DPP: + return WLAN_CRYPTO_KEY_MGMT_DPP; + } + + return status; +} + +/** + * wlan_crypto_wpaie_check - called by mlme to check the wpaie + * @crypto params: crypto params + * @iebuf: ie buffer + * + * This function gets called by mlme to check the contents of wpa is + * matching with given crypto params + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_crypto_wpaie_check(struct wlan_crypto_params *crypto_params, + uint8_t *frm){ + uint8_t len = frm[1]; + int32_t w; + int n; + + /* + * Check the length once for fixed parts: OUI, type, + * version, mcast cipher, and 2 selector counts. + * Other, variable-length data, must be checked separately. + */ + RESET_AUTHMODE(crypto_params); + SET_AUTHMODE(crypto_params, WLAN_CRYPTO_AUTH_WPA); + + if (len < 14) + return QDF_STATUS_E_INVAL; + + frm += 6, len -= 4; + + w = LE_READ_2(frm); + if (w != WPA_VERSION) + return QDF_STATUS_E_INVAL; + + frm += 2, len -= 2; + + /* multicast/group cipher */ + RESET_MCAST_CIPHERS(crypto_params); + w = wlan_crypto_wpa_suite_to_cipher(frm); + if (w < 0) + return QDF_STATUS_E_INVAL; + SET_MCAST_CIPHER(crypto_params, w); + frm += 4, len -= 4; + + /* unicast ciphers */ + n = LE_READ_2(frm); + frm += 2, len -= 2; + if (len < n*4+2) + return QDF_STATUS_E_INVAL; + + RESET_UCAST_CIPHERS(crypto_params); + for (; n > 0; n--) { + w = wlan_crypto_wpa_suite_to_cipher(frm); + if (w < 0) + return QDF_STATUS_E_INVAL; + SET_UCAST_CIPHER(crypto_params, w); + frm += 4, len -= 4; + } + + if (!crypto_params->ucastcipherset) + return QDF_STATUS_E_INVAL; + + /* key management algorithms */ + n = LE_READ_2(frm); + frm += 2, len -= 2; + if (len < n*4) + return QDF_STATUS_E_INVAL; + + w = 0; + RESET_KEY_MGMT(crypto_params); + for (; n > 0; n--) { + w = wlan_crypto_wpa_suite_to_keymgmt(frm); + if (w < 0) + return QDF_STATUS_E_INVAL; + SET_KEY_MGMT(crypto_params, w); + frm += 4, len -= 4; + } + + /* optional capabilities */ + if (len >= 2) { + crypto_params->rsn_caps = LE_READ_2(frm); + frm += 2, len -= 2; + } + + return 0; +} + +/** + * wlan_crypto_rsnie_check - called by mlme to check the rsnie + * @crypto params: crypto params + * @iebuf: ie buffer + * + * This function gets called by mlme to check the contents of wpa is + * matching with given crypto params + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_crypto_rsnie_check(struct wlan_crypto_params *crypto_params, + uint8_t *frm){ + uint8_t len = frm[1]; + int32_t w; + int n; + + /* Check the length once for fixed parts: OUI, type & version */ + if (len < 2) + return QDF_STATUS_E_INVAL; + + /* initialize crypto params */ + qdf_mem_zero(crypto_params, sizeof(struct wlan_crypto_params)); + + SET_AUTHMODE(crypto_params, WLAN_CRYPTO_AUTH_RSNA); + + frm += 2; + /* NB: iswapoui already validated the OUI and type */ + w = LE_READ_2(frm); + if (w != RSN_VERSION) + return QDF_STATUS_E_INVAL; + + frm += 2, len -= 2; + + if (!len) { + /* set defaults */ + /* default group cipher CCMP-128 */ + SET_MCAST_CIPHER(crypto_params, WLAN_CRYPTO_CIPHER_AES_CCM); + /* default ucast cipher CCMP-128 */ + SET_UCAST_CIPHER(crypto_params, WLAN_CRYPTO_CIPHER_AES_CCM); + /* default key mgmt 8021x */ + SET_KEY_MGMT(crypto_params, WLAN_CRYPTO_KEY_MGMT_IEEE8021X); + return QDF_STATUS_SUCCESS; + } else if (len < 4) { + return QDF_STATUS_E_INVAL; + } + + /* multicast/group cipher */ + w = wlan_crypto_rsn_suite_to_cipher(frm); + if (w < 0) + return QDF_STATUS_E_INVAL; + else { + SET_MCAST_CIPHER(crypto_params, w); + frm += 4, len -= 4; + } + + if (crypto_params->mcastcipherset == 0) + return QDF_STATUS_E_INVAL; + + if (!len) { + /* default ucast cipher CCMP-128 */ + SET_UCAST_CIPHER(crypto_params, WLAN_CRYPTO_CIPHER_AES_CCM); + /* default key mgmt 8021x */ + SET_KEY_MGMT(crypto_params, WLAN_CRYPTO_KEY_MGMT_IEEE8021X); + return QDF_STATUS_SUCCESS; + } else if (len < 2) { + return QDF_STATUS_E_INVAL; + } + + /* unicast ciphers */ + n = LE_READ_2(frm); + frm += 2, len -= 2; + if (n) { + if (len < n * 4) + return QDF_STATUS_E_INVAL; + + for (; n > 0; n--) { + w = wlan_crypto_rsn_suite_to_cipher(frm); + if (w < 0) + return QDF_STATUS_E_INVAL; + SET_UCAST_CIPHER(crypto_params, w); + frm += 4, len -= 4; + } + } else { + /* default ucast cipher CCMP-128 */ + SET_UCAST_CIPHER(crypto_params, WLAN_CRYPTO_CIPHER_AES_CCM); + } + + if (crypto_params->ucastcipherset == 0) + return QDF_STATUS_E_INVAL; + + if (!len) { + /* default key mgmt 8021x */ + SET_KEY_MGMT(crypto_params, WLAN_CRYPTO_KEY_MGMT_IEEE8021X); + return QDF_STATUS_SUCCESS; + } else if (len < 2) { + return QDF_STATUS_E_INVAL; + } + + /* key management algorithms */ + n = LE_READ_2(frm); + frm += 2, len -= 2; + + if (n) { + if (len < n * 4) + return QDF_STATUS_E_INVAL; + + for (; n > 0; n--) { + w = wlan_crypto_rsn_suite_to_keymgmt(frm); + if (w < 0) + return QDF_STATUS_E_INVAL; + SET_KEY_MGMT(crypto_params, w); + frm += 4, len -= 4; + } + } else { + /* default key mgmt 8021x */ + SET_KEY_MGMT(crypto_params, WLAN_CRYPTO_KEY_MGMT_IEEE8021X); + } + + if (crypto_params->key_mgmt == 0) + return QDF_STATUS_E_INVAL; + + /* optional capabilities */ + if (len >= 2) { + crypto_params->rsn_caps = LE_READ_2(frm); + frm += 2, len -= 2; + } else if (len && len < 2) { + return QDF_STATUS_E_INVAL; + } + + + /* PMKID */ + if (len >= 2) { + n = LE_READ_2(frm); + frm += 2, len -= 2; + if (n && len) { + if (len >= n * PMKID_LEN) + frm += (n * PMKID_LEN), len -= (n * PMKID_LEN); + else + return QDF_STATUS_E_INVAL; + } else if (n && !len) { + return QDF_STATUS_E_INVAL; + } + /*TODO: Save pmkid in params for further reference */ + } + + /* BIP */ + if (!len && + (crypto_params->rsn_caps & WLAN_CRYPTO_RSN_CAP_MFP_ENABLED)) { + /* when no BIP mentioned and MFP capable use CMAC as default*/ + SET_MGMT_CIPHER(crypto_params, WLAN_CRYPTO_CIPHER_AES_CMAC); + return QDF_STATUS_SUCCESS; + } else if (len >= 4) { + w = wlan_crypto_rsn_suite_to_cipher(frm); + frm += 4, len -= 4; + SET_MGMT_CIPHER(crypto_params, w); + } + + return QDF_STATUS_SUCCESS; +} + +/** + * wlan_crypto_build_wpaie - called by mlme to build wpaie + * @vdev: vdev + * @iebuf: ie buffer + * + * This function gets called by mlme to build wpaie from given vdev + * + * Return: end of buffer + */ +uint8_t *wlan_crypto_build_wpaie(struct wlan_objmgr_vdev *vdev, + uint8_t *iebuf){ + uint8_t *frm = iebuf; + uint8_t *selcnt; + struct wlan_crypto_comp_priv *crypto_priv; + struct wlan_crypto_params *crypto_params; + + if (!frm) + return NULL; + + crypto_params = wlan_crypto_vdev_get_comp_params(vdev, &crypto_priv); + + if (!crypto_params) + return NULL; + + *frm++ = WLAN_ELEMID_VENDOR; + *frm++ = 0; + WLAN_CRYPTO_ADDSELECTOR(frm, WPA_TYPE_OUI); + WLAN_CRYPTO_ADDSHORT(frm, WPA_VERSION); + + + /* multicast cipher */ + if (MCIPHER_IS_TKIP(crypto_params)) { + WLAN_CRYPTO_ADDSELECTOR(frm, + wlan_crypto_wpa_cipher_to_suite( + WLAN_CRYPTO_CIPHER_TKIP)); + } else if (MCIPHER_IS_CCMP128(crypto_params)) { + WLAN_CRYPTO_ADDSELECTOR(frm, + wlan_crypto_wpa_cipher_to_suite( + WLAN_CRYPTO_CIPHER_AES_CCM)); + } + /* unicast cipher list */ + selcnt = frm; + WLAN_CRYPTO_ADDSHORT(frm, 0); + /* do not use CCMP unicast cipher in WPA mode */ + if (UCIPHER_IS_TKIP(crypto_params)) { + selcnt[0]++; + WLAN_CRYPTO_ADDSELECTOR(frm, + wlan_crypto_wpa_cipher_to_suite( + WLAN_CRYPTO_CIPHER_TKIP)); + } + if (UCIPHER_IS_CCMP128(crypto_params)) { + selcnt[0]++; + WLAN_CRYPTO_ADDSELECTOR(frm, + wlan_crypto_wpa_cipher_to_suite( + WLAN_CRYPTO_CIPHER_AES_CCM)); + } + + /* authenticator selector list */ + selcnt = frm; + WLAN_CRYPTO_ADDSHORT(frm, 0); + + if (HAS_KEY_MGMT(crypto_params, WLAN_CRYPTO_KEY_MGMT_IEEE8021X)) { + selcnt[0]++; + WLAN_CRYPTO_ADDSELECTOR(frm, + wlan_crypto_wpa_keymgmt_to_suite( + WLAN_CRYPTO_KEY_MGMT_IEEE8021X)); + } else if (HAS_KEY_MGMT(crypto_params, WLAN_CRYPTO_KEY_MGMT_PSK)) { + selcnt[0]++; + WLAN_CRYPTO_ADDSELECTOR(frm, + wlan_crypto_wpa_keymgmt_to_suite( + WLAN_CRYPTO_KEY_MGMT_PSK)); + } else if (HAS_KEY_MGMT(crypto_params, WLAN_CRYPTO_KEY_MGMT_CCKM)) { + selcnt[0]++; + WLAN_CRYPTO_ADDSELECTOR(frm, + wlan_crypto_wpa_keymgmt_to_suite( + WLAN_CRYPTO_KEY_MGMT_CCKM)); + } else { + selcnt[0]++; + WLAN_CRYPTO_ADDSELECTOR(frm, + wlan_crypto_wpa_keymgmt_to_suite( + WLAN_CRYPTO_KEY_MGMT_NONE)); + } + /* calculate element length */ + iebuf[1] = frm - iebuf - 2; + + return frm; +} + +/** + * wlan_crypto_build_rsnie - called by mlme to build rsnie + * @vdev: vdev + * @iebuf: ie buffer + * + * This function gets called by mlme to build rsnie from given vdev + * + * Return: end of buffer + */ +uint8_t *wlan_crypto_build_rsnie(struct wlan_objmgr_vdev *vdev, + uint8_t *iebuf){ + uint8_t *frm = iebuf; + uint8_t *selcnt; + struct wlan_crypto_comp_priv *crypto_priv; + struct wlan_crypto_params *crypto_params; + + if (!frm) { + return NULL; + } + + crypto_params = wlan_crypto_vdev_get_comp_params(vdev, &crypto_priv); + + if (!crypto_params) { + return NULL; + } + + *frm++ = WLAN_ELEMID_RSN; + *frm++ = 0; + WLAN_CRYPTO_ADDSHORT(frm, RSN_VERSION); + + + /* multicast cipher */ + if (MCIPHER_IS_TKIP(crypto_params)) { + WLAN_CRYPTO_ADDSELECTOR(frm, + wlan_crypto_rsn_cipher_to_suite( + WLAN_CRYPTO_CIPHER_TKIP)); + } else if (MCIPHER_IS_CCMP128(crypto_params)) { + WLAN_CRYPTO_ADDSELECTOR(frm, + wlan_crypto_rsn_cipher_to_suite( + WLAN_CRYPTO_CIPHER_AES_CCM)); + } else if (MCIPHER_IS_CCMP256(crypto_params)) { + WLAN_CRYPTO_ADDSELECTOR(frm, + wlan_crypto_rsn_cipher_to_suite( + WLAN_CRYPTO_CIPHER_AES_CCM_256)); + } else if (MCIPHER_IS_GCMP128(crypto_params)) { + WLAN_CRYPTO_ADDSELECTOR(frm, + wlan_crypto_rsn_cipher_to_suite( + WLAN_CRYPTO_CIPHER_AES_GCM)); + } else if (MCIPHER_IS_GCMP256(crypto_params)) { + WLAN_CRYPTO_ADDSELECTOR(frm, + wlan_crypto_rsn_cipher_to_suite( + WLAN_CRYPTO_CIPHER_AES_GCM_256)); + } + + /* unicast cipher list */ + selcnt = frm; + WLAN_CRYPTO_ADDSHORT(frm, 0); + /* do not use CCMP unicast cipher in WPA mode */ + if (UCIPHER_IS_TKIP(crypto_params)) { + selcnt[0]++; + WLAN_CRYPTO_ADDSELECTOR(frm, + wlan_crypto_rsn_cipher_to_suite( + WLAN_CRYPTO_CIPHER_TKIP)); + } + if (UCIPHER_IS_CCMP128(crypto_params)) { + selcnt[0]++; + WLAN_CRYPTO_ADDSELECTOR(frm, + wlan_crypto_rsn_cipher_to_suite( + WLAN_CRYPTO_CIPHER_AES_CCM)); + } + if (UCIPHER_IS_CCMP256(crypto_params)) { + selcnt[0]++; + WLAN_CRYPTO_ADDSELECTOR(frm, + wlan_crypto_rsn_cipher_to_suite( + WLAN_CRYPTO_CIPHER_AES_CCM_256)); + } + + if (UCIPHER_IS_GCMP128(crypto_params)) { + selcnt[0]++; + WLAN_CRYPTO_ADDSELECTOR(frm, + wlan_crypto_rsn_cipher_to_suite( + WLAN_CRYPTO_CIPHER_AES_GCM)); + } + if (UCIPHER_IS_GCMP256(crypto_params)) { + selcnt[0]++; + WLAN_CRYPTO_ADDSELECTOR(frm, + wlan_crypto_rsn_cipher_to_suite( + WLAN_CRYPTO_CIPHER_AES_GCM_256)); + } + + + /* authenticator selector list */ + selcnt = frm; + WLAN_CRYPTO_ADDSHORT(frm, 0); + if (HAS_KEY_MGMT(crypto_params, WLAN_CRYPTO_KEY_MGMT_CCKM)) { + selcnt[0]++; + WLAN_CRYPTO_ADDSELECTOR(frm, + wlan_crypto_rsn_keymgmt_to_suite( + WLAN_CRYPTO_KEY_MGMT_CCKM)); + } else { + if (HAS_KEY_MGMT(crypto_params, WLAN_CRYPTO_KEY_MGMT_PSK)) { + selcnt[0]++; + WLAN_CRYPTO_ADDSELECTOR(frm, + wlan_crypto_rsn_keymgmt_to_suite( + WLAN_CRYPTO_KEY_MGMT_PSK)); + } + if (HAS_KEY_MGMT(crypto_params, + WLAN_CRYPTO_KEY_MGMT_IEEE8021X)) { + selcnt[0]++; + WLAN_CRYPTO_ADDSELECTOR(frm, + wlan_crypto_rsn_keymgmt_to_suite( + WLAN_CRYPTO_KEY_MGMT_IEEE8021X)); + } + if (HAS_KEY_MGMT(crypto_params, + WLAN_CRYPTO_KEY_MGMT_FT_IEEE8021X)) { + selcnt[0]++; + WLAN_CRYPTO_ADDSELECTOR(frm, + wlan_crypto_rsn_keymgmt_to_suite( + WLAN_CRYPTO_KEY_MGMT_FT_IEEE8021X)); + } + if (HAS_KEY_MGMT(crypto_params, WLAN_CRYPTO_KEY_MGMT_FT_PSK)) { + selcnt[0]++; + WLAN_CRYPTO_ADDSELECTOR(frm, + wlan_crypto_rsn_keymgmt_to_suite( + WLAN_CRYPTO_KEY_MGMT_FT_PSK)); + } + if (HAS_KEY_MGMT(crypto_params, + WLAN_CRYPTO_KEY_MGMT_IEEE8021X_SHA256)) { + selcnt[0]++; + WLAN_CRYPTO_ADDSELECTOR(frm, + wlan_crypto_rsn_keymgmt_to_suite( + WLAN_CRYPTO_KEY_MGMT_IEEE8021X_SHA256)); + } + if (HAS_KEY_MGMT(crypto_params, + WLAN_CRYPTO_KEY_MGMT_PSK_SHA256)) { + selcnt[0]++; + WLAN_CRYPTO_ADDSELECTOR(frm, + wlan_crypto_rsn_keymgmt_to_suite( + WLAN_CRYPTO_KEY_MGMT_PSK_SHA256)); + } + if (HAS_KEY_MGMT(crypto_params, WLAN_CRYPTO_KEY_MGMT_OSEN)) { + selcnt[0]++; + WLAN_CRYPTO_ADDSELECTOR(frm, + wlan_crypto_rsn_keymgmt_to_suite( + WLAN_CRYPTO_KEY_MGMT_OSEN)); + } + } + + WLAN_CRYPTO_ADDSHORT(frm, crypto_params->rsn_caps); + /* optional capabilities */ + if (crypto_params->rsn_caps & WLAN_CRYPTO_RSN_CAP_MFP_ENABLED) { + /* PMK list */ + WLAN_CRYPTO_ADDSHORT(frm, 0); + if (HAS_MGMT_CIPHER(crypto_params, + WLAN_CRYPTO_CIPHER_AES_CMAC)) { + WLAN_CRYPTO_ADDSELECTOR(frm, + wlan_crypto_rsn_cipher_to_suite( + WLAN_CRYPTO_CIPHER_AES_CMAC)); + } + if (HAS_MGMT_CIPHER(crypto_params, + WLAN_CRYPTO_CIPHER_AES_GMAC)) { + WLAN_CRYPTO_ADDSELECTOR(frm, + wlan_crypto_rsn_cipher_to_suite( + WLAN_CRYPTO_CIPHER_AES_GMAC)); + } + if (HAS_MGMT_CIPHER(crypto_params, + WLAN_CRYPTO_CIPHER_AES_CMAC_256)) { + WLAN_CRYPTO_ADDSELECTOR(frm, + wlan_crypto_rsn_cipher_to_suite( + WLAN_CRYPTO_CIPHER_AES_CMAC_256)); + } + + if (HAS_MGMT_CIPHER(crypto_params, + WLAN_CRYPTO_CIPHER_AES_GMAC_256)) { + WLAN_CRYPTO_ADDSELECTOR(frm, + wlan_crypto_rsn_cipher_to_suite( + WLAN_CRYPTO_CIPHER_AES_GMAC_256)); + } + } + + /* calculate element length */ + iebuf[1] = frm - iebuf - 2; + + return frm; +} + +bool wlan_crypto_rsn_info(struct wlan_objmgr_vdev *vdev, + struct wlan_crypto_params *crypto_params){ + struct wlan_crypto_params *my_crypto_params; + my_crypto_params = wlan_crypto_vdev_get_crypto_params(vdev); + + if (!my_crypto_params) + return false; + /* + * Check peer's pairwise ciphers. + * At least one must match with our unicast cipher + */ + if (!UCAST_CIPHER_MATCH(crypto_params, my_crypto_params)) + return false; + /* + * Check peer's group cipher is our enabled multicast cipher. + */ + if (!MCAST_CIPHER_MATCH(crypto_params, my_crypto_params)) + return false; + /* + * Check peer's key management class set (PSK or UNSPEC) + */ + if (!KEY_MGMTSET_MATCH(crypto_params, my_crypto_params)) + return false; + + return true; +} + +/* + * Convert an WAPI CIPHER suite to to an internal code. + */ +static int32_t wlan_crypto_wapi_suite_to_cipher(uint8_t *sel) +{ + uint32_t w = LE_READ_4(sel); + int32_t status = -1; + + switch (w) { + case (WLAN_WAPI_SEL(WLAN_CRYPTO_WAPI_SMS4_CIPHER)): + return WLAN_CRYPTO_CIPHER_WAPI_SMS4; + } + + return status; +} + +/* + * Convert an WAPI key management/authentication algorithm + * to an internal code. + */ +static int32_t wlan_crypto_wapi_keymgmt(u_int8_t *sel) +{ + uint32_t w = LE_READ_4(sel); + int32_t status = -1; + + switch (w) { + case (WLAN_WAPI_SEL(WLAN_WAI_PSK)): + return WLAN_CRYPTO_KEY_MGMT_WAPI_PSK; + case (WLAN_WAPI_SEL(WLAN_WAI_CERT_OR_SMS4)): + return WLAN_CRYPTO_KEY_MGMT_WAPI_CERT; + } + + return status; +} +/** + * wlan_crypto_wapiie_check - called by mlme to check the wapiie + * @crypto params: crypto params + * @iebuf: ie buffer + * + * This function gets called by mlme to check the contents of wapi is + * matching with given crypto params + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_crypto_wapiie_check(struct wlan_crypto_params *crypto_params, + uint8_t *frm) +{ + uint8_t len = frm[1]; + int32_t w; + int n; + + /* + * Check the length once for fixed parts: OUI, type, + * version, mcast cipher, and 2 selector counts. + * Other, variable-length data, must be checked separately. + */ + RESET_AUTHMODE(crypto_params); + SET_AUTHMODE(crypto_params, WLAN_CRYPTO_AUTH_WAPI); + + if (len < WLAN_CRYPTO_WAPI_IE_LEN) + return QDF_STATUS_E_INVAL; + + + frm += 2; + + w = LE_READ_2(frm); + frm += 2, len -= 2; + if (w != WAPI_VERSION) + return QDF_STATUS_E_INVAL; + + n = LE_READ_2(frm); + frm += 2, len -= 2; + if (len < n*4+2) + return QDF_STATUS_E_INVAL; + + RESET_KEY_MGMT(crypto_params); + for (; n > 0; n--) { + w = wlan_crypto_wapi_keymgmt(frm); + if (w < 0) + return QDF_STATUS_E_INVAL; + + SET_KEY_MGMT(crypto_params, w); + frm += 4, len -= 4; + } + + /* unicast ciphers */ + n = LE_READ_2(frm); + frm += 2, len -= 2; + if (len < n*4+2) + return QDF_STATUS_E_INVAL; + + RESET_UCAST_CIPHERS(crypto_params); + for (; n > 0; n--) { + w = wlan_crypto_wapi_suite_to_cipher(frm); + if (w < 0) + return QDF_STATUS_E_INVAL; + SET_UCAST_CIPHER(crypto_params, w); + frm += 4, len -= 4; + } + + if (!crypto_params->ucastcipherset) + return QDF_STATUS_E_INVAL; + + /* multicast/group cipher */ + RESET_MCAST_CIPHERS(crypto_params); + w = wlan_crypto_wapi_suite_to_cipher(frm); + + if (w < 0) + return QDF_STATUS_E_INVAL; + + SET_MCAST_CIPHER(crypto_params, w); + frm += 4, len -= 4; + + return QDF_STATUS_SUCCESS; +} + +/** + * wlan_crypto_build_wapiie - called by mlme to build wapi ie + * @vdev: vdev + * @iebuf: ie buffer + * + * This function gets called by mlme to build wapi ie from given vdev + * + * Return: end of buffer + */ +uint8_t *wlan_crypto_build_wapiie(struct wlan_objmgr_vdev *vdev, + uint8_t *iebuf) +{ + uint8_t *frm; + uint8_t *selcnt; + struct wlan_crypto_comp_priv *crypto_priv; + struct wlan_crypto_params *crypto_params; + + frm = iebuf; + if (!frm) { + qdf_print("%s[%d] ie buffer NULL\n", __func__, __LINE__); + return NULL; + } + + crypto_params = wlan_crypto_vdev_get_comp_params(vdev, &crypto_priv); + + if (!crypto_params) { + qdf_print("%s[%d] crypto_params NULL\n", __func__, __LINE__); + return NULL; + } + + *frm++ = WLAN_ELEMID_WAPI; + *frm++ = 0; + + WLAN_CRYPTO_ADDSHORT(frm, WAPI_VERSION); + + /* authenticator selector list */ + selcnt = frm; + WLAN_CRYPTO_ADDSHORT(frm, 0); + + if (HAS_KEY_MGMT(crypto_params, WLAN_CRYPTO_KEY_MGMT_WAPI_PSK)) { + selcnt[0]++; + WLAN_CRYPTO_ADDSELECTOR(frm, + WLAN_WAPI_SEL(WLAN_WAI_PSK)); + } + + if (HAS_KEY_MGMT(crypto_params, WLAN_CRYPTO_KEY_MGMT_WAPI_CERT)) { + selcnt[0]++; + WLAN_CRYPTO_ADDSELECTOR(frm, + WLAN_WAPI_SEL(WLAN_WAI_CERT_OR_SMS4)); + } + + /* unicast cipher list */ + selcnt = frm; + WLAN_CRYPTO_ADDSHORT(frm, 0); + + if (UCIPHER_IS_SMS4(crypto_params)) { + selcnt[0]++; + WLAN_CRYPTO_ADDSELECTOR(frm, + WLAN_WAPI_SEL(WLAN_CRYPTO_WAPI_SMS4_CIPHER)); + } + + WLAN_CRYPTO_ADDSELECTOR(frm, + WLAN_WAPI_SEL(WLAN_CRYPTO_WAPI_SMS4_CIPHER)); + + /* optional capabilities */ + WLAN_CRYPTO_ADDSHORT(frm, crypto_params->rsn_caps); + + /* calculate element length */ + iebuf[1] = frm - iebuf - 2; + + return frm; + +} + +/** + * wlan_crypto_pn_check - called by data patch for PN check + * @vdev: vdev + * @wbuf: wbuf + * + * This function gets called by data patch for PN check + * + * Return: QDF_STATUS + */ +QDF_STATUS wlan_crypto_pn_check(struct wlan_objmgr_vdev *vdev, + qdf_nbuf_t wbuf){ + /* Need to check is there real requirement for this function + * as PN check is already handled in decap function. + */ + return QDF_STATUS_SUCCESS; +} + +/** + * wlan_crypto_vdev_get_crypto_params - called by mlme to get crypto params + * @vdev:vdev + * + * This function gets called by mlme to get crypto params + * + * Return: wlan_crypto_params or NULL in case of failure + */ +struct wlan_crypto_params *wlan_crypto_vdev_get_crypto_params( + struct wlan_objmgr_vdev *vdev){ + struct wlan_crypto_comp_priv *crypto_priv; + + return wlan_crypto_vdev_get_comp_params(vdev, &crypto_priv); +} + +/** + * wlan_crypto_peer_get_crypto_params - called by mlme to get crypto params + * @peer:peer + * + * This function gets called by mlme to get crypto params + * + * Return: wlan_crypto_params or NULL in case of failure + */ +struct wlan_crypto_params *wlan_crypto_peer_get_crypto_params( + struct wlan_objmgr_peer *peer){ + struct wlan_crypto_comp_priv *crypto_priv; + + return wlan_crypto_peer_get_comp_params(peer, &crypto_priv); +} + + +QDF_STATUS wlan_crypto_set_peer_wep_keys(struct wlan_objmgr_vdev *vdev, + struct wlan_objmgr_peer *peer) +{ + struct wlan_crypto_comp_priv *crypto_priv; + struct wlan_crypto_comp_priv *sta_crypto_priv; + struct wlan_crypto_params *crypto_params; + struct wlan_crypto_key *key; + struct wlan_crypto_key *sta_key; + struct wlan_crypto_cipher *cipher_table; + struct wlan_objmgr_psoc *psoc; + uint8_t *mac_addr; + int i; + enum QDF_OPMODE opmode; + + if (!vdev) + return QDF_STATUS_E_NULL_VALUE; + + if (!peer) { + qdf_print("%s[%d] peer NULL\n", __func__, __LINE__); + return QDF_STATUS_E_INVAL; + } + + opmode = wlan_vdev_mlme_get_opmode(vdev); + psoc = wlan_vdev_get_psoc(vdev); + + if (!psoc) { + qdf_print("%s[%d] psoc NULL\n", __func__, __LINE__); + return QDF_STATUS_E_NULL_VALUE; + } + + wlan_peer_obj_lock(peer); + mac_addr = wlan_peer_get_macaddr(peer); + wlan_peer_obj_unlock(peer); + + crypto_params = wlan_crypto_vdev_get_comp_params(vdev, + &crypto_priv); + if (crypto_priv == NULL) { + qdf_print("%s[%d] crypto_priv NULL\n", __func__, __LINE__); + return QDF_STATUS_E_NULL_VALUE; + } + + /* push only valid static WEP keys from vap */ + if (AUTH_IS_8021X(crypto_params)) + return QDF_STATUS_E_INVAL; + + if (opmode == QDF_STA_MODE) { + peer = wlan_vdev_get_bsspeer(vdev); + if (!peer) { + qdf_print("%s[%d] peer NULL\n", __func__, __LINE__); + return QDF_STATUS_E_INVAL; + } + } + + wlan_crypto_peer_get_comp_params(peer, &sta_crypto_priv); + if (sta_crypto_priv == NULL) { + qdf_print("%s[%d] sta priv is null\n", __func__, __LINE__); + return QDF_STATUS_E_INVAL; + } + + for (i = 0; i < WLAN_CRYPTO_MAXKEYIDX; i++) { + if (crypto_priv->key[i]) { + key = crypto_priv->key[i]; + if (!key || !key->valid) + continue; + + cipher_table = (struct wlan_crypto_cipher *) + key->cipher_table; + + if (cipher_table->cipher == WLAN_CRYPTO_CIPHER_WEP) { + sta_key = qdf_mem_malloc( + sizeof(struct wlan_crypto_key)); + if (!sta_key) { + qdf_print("%s[%d] key alloc failed\n", + __func__, __LINE__); + return QDF_STATUS_E_NOMEM; + } + sta_crypto_priv->key[i] = sta_key; + qdf_mem_copy(sta_key, key, + sizeof(struct wlan_crypto_key)); + + sta_key->flags &= ~WLAN_CRYPTO_KEY_DEFAULT; + + if (crypto_priv->def_tx_keyid == i) { + sta_key->flags + |= WLAN_CRYPTO_KEY_DEFAULT; + sta_crypto_priv->def_tx_keyid = + crypto_priv->def_tx_keyid; + } + /* setting the broadcast/multicast key for sta*/ + if (opmode == QDF_STA_MODE || + opmode == QDF_IBSS_MODE){ + if (WLAN_CRYPTO_TX_OPS_SETKEY(psoc)) { + WLAN_CRYPTO_TX_OPS_SETKEY(psoc)( + vdev, sta_key, mac_addr, + cipher_table->cipher); + } + } + + /* setting unicast key */ + sta_key->flags &= ~WLAN_CRYPTO_KEY_GROUP; + if (WLAN_CRYPTO_TX_OPS_SETKEY(psoc)) { + WLAN_CRYPTO_TX_OPS_SETKEY(psoc)(vdev, + sta_key, mac_addr, + cipher_table->cipher); + } + } + } + } + + return QDF_STATUS_SUCCESS; +} + +/** + * wlan_crypto_register_crypto_rx_ops - set crypto_rx_ops + * @crypto_rx_ops: crypto_rx_ops + * + * This function gets called by object manger to register crypto rx ops. + * + * Return: QDF_STATUS + */ +QDF_STATUS wlan_crypto_register_crypto_rx_ops( + struct wlan_lmac_if_crypto_rx_ops *crypto_rx_ops){ + crypto_rx_ops->crypto_encap = wlan_crypto_encap; + crypto_rx_ops->crypto_decap = wlan_crypto_decap; + crypto_rx_ops->crypto_enmic = wlan_crypto_enmic; + crypto_rx_ops->crypto_demic = wlan_crypto_demic; + crypto_rx_ops->set_peer_wep_keys = wlan_crypto_set_peer_wep_keys; + + return QDF_STATUS_SUCCESS; +} + +/** + * wlan_crypto_get_crypto_rx_ops - get crypto_rx_ops from psoc + * @psoc: psoc + * + * This function gets called by umac to get the crypto_rx_ops + * + * Return: crypto_rx_ops + */ +struct wlan_lmac_if_crypto_rx_ops *wlan_crypto_get_crypto_rx_ops( + struct wlan_objmgr_psoc *psoc) +{ + + return &(psoc->soc_cb.rx_ops.crypto_rx_ops); +} +qdf_export_symbol(wlan_crypto_get_crypto_rx_ops); + +/** + * wlan_crypto_vdev_has_auth_mode - check authmode for vdev + * @vdev: vdev + * @authvalue: authvalue to be checked + * + * This function check is authvalue passed is set in vdev or not + * + * Return: true or false + */ +bool wlan_crypto_vdev_has_auth_mode(struct wlan_objmgr_vdev *vdev, + wlan_crypto_auth_mode authvalue) +{ + return wlan_crypto_get_param(vdev, WLAN_CRYPTO_PARAM_AUTH_MODE) + & authvalue; +} +qdf_export_symbol(wlan_crypto_vdev_has_auth_mode); + +/** + * wlan_crypto_peer_has_auth_mode - check authmode for peer + * @peer: peer + * @authvalue: authvalue to be checked + * + * This function check is authvalue passed is set in peer or not + * + * Return: true or false + */ +bool wlan_crypto_peer_has_auth_mode(struct wlan_objmgr_peer *peer, + wlan_crypto_auth_mode authvalue) +{ + return wlan_crypto_get_peer_param(peer, WLAN_CRYPTO_PARAM_AUTH_MODE) + & authvalue; +} +qdf_export_symbol(wlan_crypto_peer_has_auth_mode); + +/** + * wlan_crypto_vdev_has_ucastcipher - check ucastcipher for vdev + * @vdev: vdev + * @ucastcipher: ucastcipher to be checked + * + * This function check is ucastcipher passed is set in vdev or not + * + * Return: true or false + */ +bool wlan_crypto_vdev_has_ucastcipher(struct wlan_objmgr_vdev *vdev, + wlan_crypto_cipher_type ucastcipher) +{ + return wlan_crypto_get_param(vdev, WLAN_CRYPTO_PARAM_UCAST_CIPHER) + & ucastcipher; +} +qdf_export_symbol(wlan_crypto_vdev_has_ucastcipher); + +/** + * wlan_crypto_peer_has_ucastcipher - check ucastcipher for peer + * @peer: peer + * @ucastcipher: ucastcipher to be checked + * + * This function check is ucastcipher passed is set in peer or not + * + * Return: true or false + */ +bool wlan_crypto_peer_has_ucastcipher(struct wlan_objmgr_peer *peer, + wlan_crypto_cipher_type ucastcipher) +{ + return wlan_crypto_get_peer_param(peer, WLAN_CRYPTO_PARAM_UCAST_CIPHER) + & ucastcipher; +} +qdf_export_symbol(wlan_crypto_peer_has_ucastcipher); + +/** + * wlan_crypto_vdev_has_mcastcipher - check mcastcipher for vdev + * @vdev: vdev + * @mcastcipher: mcastcipher to be checked + * + * This function check is mcastcipher passed is set in vdev or not + * + * Return: true or false + */ +bool wlan_crypto_vdev_has_mcastcipher(struct wlan_objmgr_vdev *vdev, + wlan_crypto_cipher_type mcastcipher) +{ + return wlan_crypto_get_param(vdev, WLAN_CRYPTO_PARAM_MCAST_CIPHER) + & mcastcipher; +} +qdf_export_symbol(wlan_crypto_vdev_has_mcastcipher); + +/** + * wlan_crypto_peer_has_mcastcipher - check mcastcipher for peer + * @peer: peer + * @mcastcipher: mcastcipher to be checked + * + * This function check is mcastcipher passed is set in peer or not + * + * Return: true or false + */ +bool wlan_crypto_peer_has_mcastcipher(struct wlan_objmgr_peer *peer, + wlan_crypto_cipher_type mcastcipher) +{ + return wlan_crypto_get_peer_param(peer, WLAN_CRYPTO_PARAM_UCAST_CIPHER) + & mcastcipher; +} +qdf_export_symbol(wlan_crypto_peer_has_mcastcipher); + +uint8_t wlan_crypto_get_peer_fils_aead(struct wlan_objmgr_peer *peer) +{ + struct wlan_crypto_comp_priv *crypto_priv = NULL; + + if (!peer) { + qdf_print(FL("Invalid Input\n")); + return 0; + } + + crypto_priv = wlan_get_peer_crypto_obj(peer); + if (!crypto_priv) { + qdf_print(FL("crypto_priv NULL\n")); + return 0; + } + + return crypto_priv->fils_aead_set; +} + +void +wlan_crypto_set_peer_fils_aead(struct wlan_objmgr_peer *peer, uint8_t value) +{ + struct wlan_crypto_comp_priv *crypto_priv = NULL; + + if (!peer) { + qdf_print(FL("Invalid Input\n")); + return; + } + + crypto_priv = wlan_get_peer_crypto_obj(peer); + if (!crypto_priv) { + qdf_print(FL("crypto_priv NULL\n")); + return; + } + + crypto_priv->fils_aead_set = value; +} + +/** + * wlan_crypto_get_key_header - get header length + * @key: key + * + * This function gets header length based on keytype + * + * Return: header length + */ +uint8_t wlan_crypto_get_key_header(struct wlan_crypto_key *key) +{ + struct wlan_crypto_cipher *cipher_table; + + cipher_table = (struct wlan_crypto_cipher *)key->cipher_table; + if (cipher_table) + return cipher_table->header; + else + return 0; +} + +qdf_export_symbol(wlan_crypto_get_key_header); + +/** + * wlan_crypto_get_key_trailer - get cipher trailer length + * @key: key + * + * This function gets cipher trailer length based on keytype + * + * Return: cipher trailer length + */ +uint8_t wlan_crypto_get_key_trailer(struct wlan_crypto_key *key) +{ + struct wlan_crypto_cipher *cipher_table; + + cipher_table = (struct wlan_crypto_cipher *)key->cipher_table; + if (cipher_table) + return cipher_table->trailer; + else + return 0; +} + +qdf_export_symbol(wlan_crypto_get_key_trailer); + +/** + * wlan_crypto_get_key_miclen - get cipher miclen length + * @key: key + * + * This function gets cipher miclen length based on keytype + * + * Return: cipher miclen length + */ +uint8_t wlan_crypto_get_key_miclen(struct wlan_crypto_key *key) +{ + struct wlan_crypto_cipher *cipher_table; + + cipher_table = (struct wlan_crypto_cipher *)key->cipher_table; + if (cipher_table) + return cipher_table->miclen; + else + return 0; +} + +qdf_export_symbol(wlan_crypto_get_key_miclen); + +/** + * wlan_crypto_get_keyid - get keyid from frame + * @data: frame + * + * This function parse frame and returns keyid + * + * Return: keyid + */ +uint16_t wlan_crypto_get_keyid(uint8_t *data, int hdrlen) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)data; + uint8_t *iv; + + if (hdr->frame_control[1] & WLAN_FC1_ISWEP) { + iv = data + hdrlen; + /* + * iv[3] is the Key ID octet in the CCMP/TKIP/WEP headers + * Bits 6–7 of the Key ID octet are for the Key ID subfield + */ + return ((iv[3] >> 6) & 0x3); + } else { + return WLAN_CRYPTO_KEYIX_NONE; + } +} + +qdf_export_symbol(wlan_crypto_get_keyid); + +/** + * crypto_plumb_peer_keys - called during radio reset + * @vdev: vdev + * @object: peer + * @arg: psoc + * + * Restore unicast and persta hardware keys + * + * Return: void + */ +static void crypto_plumb_peer_keys(struct wlan_objmgr_vdev *vdev, + void *object, void *arg) { + struct wlan_objmgr_peer *peer = (struct wlan_objmgr_peer *)object; + struct wlan_objmgr_psoc *psoc = (struct wlan_objmgr_psoc *)arg; + struct wlan_crypto_comp_priv *crypto_priv; + struct wlan_crypto_params *crypto_params; + struct wlan_crypto_key *key = NULL; + int i; + + if ((NULL == peer) || (NULL == vdev) || (NULL == psoc)) { + QDF_TRACE(QDF_MODULE_ID_CRYPTO, QDF_TRACE_LEVEL_ERROR, + "%s[%d] Peer or vdev or psoc objects are null!\n", + __func__, __LINE__); + return; + } + + crypto_params = wlan_crypto_peer_get_comp_params(peer, + &crypto_priv); + + if (!crypto_priv) { + QDF_TRACE(QDF_MODULE_ID_CRYPTO, QDF_TRACE_LEVEL_ERROR, + "%s[%d] crypto_priv NULL\n", + __func__, __LINE__); + return; + } + + for (i = 0; i < WLAN_CRYPTO_MAXKEYIDX; i++) { + key = crypto_priv->key[i]; + if (key && key->valid) { + if (WLAN_CRYPTO_TX_OPS_SETKEY(psoc)) { + WLAN_CRYPTO_TX_OPS_SETKEY(psoc) + ( + vdev, + key, + wlan_peer_get_macaddr(peer), + wlan_crypto_get_key_type(key) + ); + } + } + } +} + +/** + * wlan_crypto_restore_keys - called during radio reset + * @vdev: vdev + * + * Clear and restore keycache, needed for some DA chipsets which put + * random values in keycache when phy reset is triggered + * + * Return: void + */ +void wlan_crypto_restore_keys(struct wlan_objmgr_vdev *vdev) +{ + int i; + struct wlan_crypto_comp_priv *crypto_priv; + struct wlan_crypto_params *crypto_params; + struct wlan_crypto_key *key; + uint8_t macaddr[WLAN_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; + struct wlan_objmgr_pdev *pdev = NULL; + struct wlan_objmgr_psoc *psoc = NULL; + + pdev = wlan_vdev_get_pdev(vdev); + psoc = wlan_vdev_get_psoc(vdev); + if (NULL == pdev) { + QDF_TRACE(QDF_MODULE_ID_CRYPTO, QDF_TRACE_LEVEL_ERROR, + "%s[%d] pdev is NULL\n", + __func__, __LINE__); + return; + } + if (NULL == psoc) { + QDF_TRACE(QDF_MODULE_ID_CRYPTO, QDF_TRACE_LEVEL_ERROR, + "%s[%d] psoc is NULL\n", + __func__, __LINE__); + return; + } + + /* TBD: QWRAP key restore*/ + /* crypto is on */ + if (wlan_vdev_mlme_feat_cap_get(vdev, WLAN_VDEV_F_PRIVACY)) { + /* restore static shared keys */ + for (i = 0; i < WLAN_CRYPTO_MAXKEYIDX; i++) { + crypto_params = wlan_crypto_vdev_get_comp_params + ( + vdev, + &crypto_priv + ); + if (!crypto_priv) { + QDF_TRACE(QDF_MODULE_ID_CRYPTO, + QDF_TRACE_LEVEL_ERROR, + "%s[%d] crypto_priv is NULL\n", + __func__, __LINE__); + return; + } + key = crypto_priv->key[i]; + if (key && key->valid) { + if (WLAN_CRYPTO_TX_OPS_SETKEY(psoc)) { + WLAN_CRYPTO_TX_OPS_SETKEY(psoc) + ( + vdev, + key, + macaddr, + wlan_crypto_get_key_type(key) + ); + } + } + } + + wlan_objmgr_iterate_peerobj_list(vdev, + crypto_plumb_peer_keys, + psoc, + WLAN_CRYPTO_ID); + } +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/src/wlan_crypto_main.c b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/src/wlan_crypto_main.c new file mode 100644 index 0000000000000000000000000000000000000000..2ae5adca43547cf752067a9b920661808d318a2d --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/src/wlan_crypto_main.c @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + + /** + * DOC: Public API intialization of crypto service with object manager + */ +#include +#include "wlan_crypto_main_i.h" +#include "wlan_crypto_main.h" + +/** + * wlan_crypto_init - Init the crypto service with object manager + * Called from umac init context. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_crypto_init(void) +{ + return __wlan_crypto_init(); +} + +/** + * wlan_crypto_deinit - Deinit the crypto service with object manager + * Called from umac deinit context. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_crypto_deinit(void) +{ + return __wlan_crypto_deinit(); +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/src/wlan_crypto_main_i.h b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/src/wlan_crypto_main_i.h new file mode 100644 index 0000000000000000000000000000000000000000..49aa80a19bba9d9086d4ddbc17503e939326e08f --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/src/wlan_crypto_main_i.h @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + + /** + * DOC: Private API for crypto service with object manager handler + */ +#ifndef _WLAN_CRYPTO_MAIN_I_H_ +#define _WLAN_CRYPTO_MAIN_I_H_ + +/** + * wlan_crypto_init - Init the crypto service with object manager + * Called from umac init context. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS __wlan_crypto_init(void); + +/** + * wlan_crypto_deinit - Deinit the crypto service with object manager + * Called from umac deinit context. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS __wlan_crypto_deinit(void); + + +#endif /* end of _WLAN_CRYPTO_MAIN_I_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/src/wlan_crypto_none.c b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/src/wlan_crypto_none.c new file mode 100644 index 0000000000000000000000000000000000000000..cc4a68d6e2face54a36265998ae81914b6e8426c --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/src/wlan_crypto_none.c @@ -0,0 +1,83 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + + /** + * DOC: Public API intialization of crypto service with object manager + */ +#include +#include +#include +#include +#include +#include +#include +#include + +#include "wlan_crypto_global_def.h" +#include "wlan_crypto_def_i.h" +#include "wlan_crypto_main_i.h" +#include "wlan_crypto_obj_mgr_i.h" + + +static QDF_STATUS none_setkey(struct wlan_crypto_key *key) +{ + return QDF_STATUS_SUCCESS; +} +static QDF_STATUS none_encap(struct wlan_crypto_key *key, + qdf_nbuf_t wbuf, + uint8_t encapdone, + uint8_t hdrlen){ + return QDF_STATUS_SUCCESS; +} +static QDF_STATUS none_decap(struct wlan_crypto_key *key, + qdf_nbuf_t wbuf, + uint8_t tid, + uint8_t hdrlen){ + return QDF_STATUS_SUCCESS; +} +static QDF_STATUS none_enmic(struct wlan_crypto_key *key, + qdf_nbuf_t wbuf, + uint8_t encapdone, + uint8_t hdrlen){ + return QDF_STATUS_SUCCESS; +} +static QDF_STATUS none_demic(struct wlan_crypto_key *key, + qdf_nbuf_t wbuf, + uint8_t tid, + uint8_t hdrlen){ + return QDF_STATUS_SUCCESS; +} + +const struct wlan_crypto_cipher none_cipher_table = { + "NONE", + WLAN_CRYPTO_CIPHER_NONE, + 0, + 0, + 0, + 0, + none_setkey, + none_encap, + none_decap, + none_enmic, + none_demic, +}; + +const struct wlan_crypto_cipher *none_register(void) +{ + return &none_cipher_table; +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/src/wlan_crypto_obj_mgr.c b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/src/wlan_crypto_obj_mgr.c new file mode 100644 index 0000000000000000000000000000000000000000..a65a28da188d4cb351cf5e8ffcf7411c9ae73566 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/src/wlan_crypto_obj_mgr.c @@ -0,0 +1,370 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + + /** + * DOC: Public API intialization of crypto service with object manager + */ +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "wlan_crypto_global_def.h" +#include "wlan_crypto_global_api.h" +#include "wlan_crypto_def_i.h" +#include "wlan_crypto_main_i.h" +#include "wlan_crypto_obj_mgr_i.h" +#include "wlan_crypto_fils_api.h" + + +extern const struct wlan_crypto_cipher *wep_register(void); +extern const struct wlan_crypto_cipher *tkip_register(void); +extern const struct wlan_crypto_cipher *ccmp_register(void); +extern const struct wlan_crypto_cipher *ccmp256_register(void); +extern const struct wlan_crypto_cipher *gcmp_register(void); +extern const struct wlan_crypto_cipher *gcmp256_register(void); +extern const struct wlan_crypto_cipher *wapi_register(void); + +extern const struct wlan_crypto_cipher + *wlan_crypto_cipher_ops[WLAN_CRYPTO_CIPHER_MAX]; + +static QDF_STATUS wlan_crypto_register_all_ciphers( + struct wlan_crypto_params *crypto_param) +{ + + if (HAS_CIPHER_CAP(crypto_param, WLAN_CRYPTO_CAP_WEP)) { + wlan_crypto_cipher_ops[WLAN_CRYPTO_CIPHER_WEP] + = wep_register(); + } + if (HAS_CIPHER_CAP(crypto_param, WLAN_CRYPTO_CAP_TKIP_MIC)) { + wlan_crypto_cipher_ops[WLAN_CRYPTO_CIPHER_TKIP] + = tkip_register(); + } + if (HAS_CIPHER_CAP(crypto_param, WLAN_CRYPTO_CAP_AES)) { + wlan_crypto_cipher_ops[WLAN_CRYPTO_CIPHER_AES_CCM] + = ccmp_register(); + wlan_crypto_cipher_ops[WLAN_CRYPTO_CIPHER_AES_CCM_256] + = ccmp256_register(); + wlan_crypto_cipher_ops[WLAN_CRYPTO_CIPHER_AES_GCM] + = gcmp_register(); + wlan_crypto_cipher_ops[WLAN_CRYPTO_CIPHER_AES_GCM_256] + = gcmp256_register(); + } + if (HAS_CIPHER_CAP(crypto_param, WLAN_CRYPTO_CAP_WAPI_SMS4)) { + wlan_crypto_cipher_ops[WLAN_CRYPTO_CIPHER_WAPI_SMS4] + = wapi_register(); + } + if (HAS_CIPHER_CAP(crypto_param, WLAN_CRYPTO_CAP_FILS_AEAD)) { + wlan_crypto_cipher_ops[WLAN_CRYPTO_CIPHER_FILS_AEAD] + = fils_register(); + } + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS wlan_crypto_psoc_obj_create_handler( + struct wlan_objmgr_psoc *psoc, + void *arg) +{ + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS wlan_crypto_pdev_obj_create_handler( + struct wlan_objmgr_pdev *pdev, + void *arg) +{ + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS wlan_crypto_vdev_obj_create_handler( + struct wlan_objmgr_vdev *vdev, + void *arg) +{ + struct wlan_crypto_comp_priv *crypto_priv; + struct wlan_objmgr_pdev *pdev; + struct wlan_crypto_params *crypto_param; + QDF_STATUS status; + + if (!vdev) + return QDF_STATUS_E_INVAL; + + crypto_priv = qdf_mem_malloc(sizeof(struct wlan_crypto_comp_priv)); + if (!crypto_priv) + return QDF_STATUS_E_NOMEM; + + crypto_param = &(crypto_priv->crypto_params); + + RESET_AUTHMODE(crypto_param); + RESET_UCAST_CIPHERS(crypto_param); + RESET_MCAST_CIPHERS(crypto_param); + RESET_MGMT_CIPHERS(crypto_param); + RESET_KEY_MGMT(crypto_param); + RESET_CIPHER_CAP(crypto_param); + + pdev = wlan_vdev_get_pdev(vdev); + wlan_pdev_obj_lock(pdev); + if (wlan_pdev_nif_fw_cap_get(pdev, WLAN_SOC_C_WEP)) + SET_CIPHER_CAP(crypto_param, WLAN_CRYPTO_CAP_WEP); + if (wlan_pdev_nif_fw_cap_get(pdev, WLAN_SOC_C_TKIP)) + SET_CIPHER_CAP(crypto_param, WLAN_CRYPTO_CAP_TKIP_MIC); + if (wlan_pdev_nif_fw_cap_get(pdev, WLAN_SOC_C_AES)) { + SET_CIPHER_CAP(crypto_param, WLAN_CRYPTO_CAP_AES); + SET_CIPHER_CAP(crypto_param, WLAN_CRYPTO_CAP_CCM256); + SET_CIPHER_CAP(crypto_param, WLAN_CRYPTO_CAP_GCM); + SET_CIPHER_CAP(crypto_param, WLAN_CRYPTO_CAP_GCM_256); + } + if (wlan_pdev_nif_fw_cap_get(pdev, WLAN_SOC_C_CKIP)) + SET_CIPHER_CAP(crypto_param, WLAN_CRYPTO_CAP_CKIP); + if (wlan_pdev_nif_fw_cap_get(pdev, WLAN_SOC_C_WAPI)) + SET_CIPHER_CAP(crypto_param, WLAN_CRYPTO_CAP_WAPI_SMS4); + SET_CIPHER_CAP(crypto_param, WLAN_CRYPTO_CAP_FILS_AEAD); + wlan_pdev_obj_unlock(pdev); + /* update the crypto cipher table based on the fw caps*/ + /* update the fw_caps into ciphercaps then attach to objmgr*/ + wlan_crypto_register_all_ciphers(crypto_param); + + status = wlan_objmgr_vdev_component_obj_attach(vdev, + WLAN_UMAC_COMP_CRYPTO, + (void *)crypto_priv, + QDF_STATUS_SUCCESS); + if (status != QDF_STATUS_SUCCESS) + qdf_mem_free(crypto_priv); + + return status; +} + +static QDF_STATUS wlan_crypto_peer_obj_create_handler( + struct wlan_objmgr_peer *peer, + void *arg) +{ + struct wlan_crypto_comp_priv *crypto_priv; + struct wlan_crypto_params *crypto_param; + QDF_STATUS status; + + if (!peer) + return QDF_STATUS_E_INVAL; + + crypto_priv = qdf_mem_malloc(sizeof(struct wlan_crypto_comp_priv)); + if (!crypto_priv) + return QDF_STATUS_E_NOMEM; + + status = wlan_objmgr_peer_component_obj_attach(peer, + WLAN_UMAC_COMP_CRYPTO, (void *)crypto_priv, + QDF_STATUS_SUCCESS); + + if (status == QDF_STATUS_SUCCESS) { + crypto_param = &crypto_priv->crypto_params; + RESET_AUTHMODE(crypto_param); + RESET_UCAST_CIPHERS(crypto_param); + RESET_MCAST_CIPHERS(crypto_param); + RESET_MGMT_CIPHERS(crypto_param); + RESET_KEY_MGMT(crypto_param); + RESET_CIPHER_CAP(crypto_param); + if (wlan_vdev_get_selfpeer(peer->peer_objmgr.vdev) != peer) { + wlan_crypto_set_peer_wep_keys( + wlan_peer_get_vdev(peer), peer); + } + } else { + qdf_print("%s[%d] peer obj failed status %d\n", + __func__, __LINE__, status); + qdf_mem_free(crypto_priv); + } + + return status; +} + +static QDF_STATUS wlan_crypto_psoc_obj_destroy_handler( + struct wlan_objmgr_psoc *psoc, + void *arg){ + + return QDF_STATUS_COMP_DISABLED; +} + +static QDF_STATUS wlan_crypto_pdev_obj_destroy_handler( + struct wlan_objmgr_pdev *pdev, + void *arg){ + + return QDF_STATUS_SUCCESS; +} + +static void wlan_crypto_free_key(struct wlan_crypto_comp_priv *crypto_priv) +{ + uint8_t i; + + if (!crypto_priv) { + qdf_print("%s[%d] crypto_priv NULL\n", __func__, __LINE__); + return; + } + + for (i = 0; i < WLAN_CRYPTO_MAXKEYIDX; i++) { + if (crypto_priv->key[i]) { + qdf_mem_free(crypto_priv->key[i]); + crypto_priv->key[i] = NULL; + } + } + + for (i = 0; i < WLAN_CRYPTO_MAXIGTKKEYIDX; i++) { + if (crypto_priv->igtk_key[i]) { + qdf_mem_free(crypto_priv->igtk_key[i]); + crypto_priv->igtk_key[i] = NULL; + } + } + +} + +static QDF_STATUS wlan_crypto_vdev_obj_destroy_handler( + struct wlan_objmgr_vdev *vdev, + void *arg){ + struct wlan_crypto_comp_priv *crypto_priv; + + if (!vdev) { + qdf_print("%s[%d] Vdev NULL\n", __func__, __LINE__); + return QDF_STATUS_E_INVAL; + } + + crypto_priv = (struct wlan_crypto_comp_priv *) + wlan_get_vdev_crypto_obj(vdev); + + if (!crypto_priv) { + qdf_print("%s[%d] crypto_priv NULL\n", __func__, __LINE__); + return QDF_STATUS_E_INVAL; + } + wlan_objmgr_vdev_component_obj_detach(vdev, + WLAN_UMAC_COMP_CRYPTO, + (void *)crypto_priv); + wlan_crypto_free_key(crypto_priv); + qdf_mem_free(crypto_priv); + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS wlan_crypto_peer_obj_destroy_handler( + struct wlan_objmgr_peer *peer, + void *arg){ + struct wlan_crypto_comp_priv *crypto_priv; + + if (!peer) { + qdf_print("%s[%d] Peer NULL\n", __func__, __LINE__); + return QDF_STATUS_E_INVAL; + } + crypto_priv = (struct wlan_crypto_comp_priv *) + wlan_get_peer_crypto_obj(peer); + if (!crypto_priv) { + qdf_print("%s[%d] crypto_priv NULL\n", __func__, __LINE__); + return QDF_STATUS_E_INVAL; + } + + wlan_objmgr_peer_component_obj_detach(peer, + WLAN_UMAC_COMP_CRYPTO, + (void *)crypto_priv); + wlan_crypto_free_key(crypto_priv); + qdf_mem_free(crypto_priv); + + return QDF_STATUS_SUCCESS; +} +/** + * __wlan_crypto_init - Init the crypto service with object manager + * Called from crypto init context. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS __wlan_crypto_init(void) +{ + QDF_STATUS status = QDF_STATUS_SUCCESS; + + status = wlan_objmgr_register_vdev_create_handler( + WLAN_UMAC_COMP_CRYPTO, + wlan_crypto_vdev_obj_create_handler, NULL); + if (status != QDF_STATUS_SUCCESS) + goto err_vdev_create; + + status = wlan_objmgr_register_peer_create_handler( + WLAN_UMAC_COMP_CRYPTO, + wlan_crypto_peer_obj_create_handler, NULL); + if (status != QDF_STATUS_SUCCESS) + goto err_peer_create; + + status = wlan_objmgr_register_vdev_destroy_handler( + WLAN_UMAC_COMP_CRYPTO, + wlan_crypto_vdev_obj_destroy_handler, NULL); + if (status != QDF_STATUS_SUCCESS) + goto err_vdev_delete; + + status = wlan_objmgr_register_peer_destroy_handler( + WLAN_UMAC_COMP_CRYPTO, + wlan_crypto_peer_obj_destroy_handler, NULL); + if (status != QDF_STATUS_SUCCESS) + goto err_peer_delete; + + goto register_success; +err_peer_delete: + wlan_objmgr_unregister_vdev_destroy_handler(WLAN_UMAC_COMP_CRYPTO, + wlan_crypto_vdev_obj_destroy_handler, NULL); +err_vdev_delete: + wlan_objmgr_unregister_peer_create_handler(WLAN_UMAC_COMP_CRYPTO, + wlan_crypto_peer_obj_create_handler, NULL); +err_peer_create: + wlan_objmgr_unregister_vdev_create_handler(WLAN_UMAC_COMP_CRYPTO, + wlan_crypto_vdev_obj_create_handler, NULL); +err_vdev_create: + wlan_objmgr_unregister_pdev_create_handler(WLAN_UMAC_COMP_CRYPTO, + wlan_crypto_pdev_obj_create_handler, NULL); +register_success: + return status; +} + +/** + * __wlan_crypto_deinit - Deinit the crypto service with object manager + * Called from crypto context. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS __wlan_crypto_deinit(void) +{ + + if (wlan_objmgr_unregister_vdev_create_handler(WLAN_UMAC_COMP_CRYPTO, + wlan_crypto_vdev_obj_create_handler, NULL) + != QDF_STATUS_SUCCESS) { + return QDF_STATUS_E_FAILURE; + } + + if (wlan_objmgr_unregister_peer_create_handler(WLAN_UMAC_COMP_CRYPTO, + wlan_crypto_peer_obj_create_handler, NULL) + != QDF_STATUS_SUCCESS) { + return QDF_STATUS_E_FAILURE; + } + + if (wlan_objmgr_unregister_vdev_destroy_handler(WLAN_UMAC_COMP_CRYPTO, + wlan_crypto_vdev_obj_destroy_handler, NULL) + != QDF_STATUS_SUCCESS) { + return QDF_STATUS_E_FAILURE; + } + + if (wlan_objmgr_unregister_peer_destroy_handler(WLAN_UMAC_COMP_CRYPTO, + wlan_crypto_peer_obj_destroy_handler, NULL) + != QDF_STATUS_SUCCESS) { + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/src/wlan_crypto_obj_mgr_i.h b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/src/wlan_crypto_obj_mgr_i.h new file mode 100644 index 0000000000000000000000000000000000000000..013df9c46c8383e0ba270f22c56f59e3746d35f7 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/src/wlan_crypto_obj_mgr_i.h @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + + /** + * DOC: Public API intialization of crypto service with object manager + */ + +#ifndef __WLAN_CRYPTO_OBJ_MGR_I_ +#define __WLAN_CRYPTO_OBJ_MGR_I_ + + +static inline void *wlan_get_vdev_crypto_obj(struct wlan_objmgr_vdev *vdev) +{ + void *crypto_priv; + crypto_priv = wlan_objmgr_vdev_get_comp_private_obj(vdev, + WLAN_UMAC_COMP_CRYPTO); + + return crypto_priv; +} + +static inline void *wlan_get_peer_crypto_obj(struct wlan_objmgr_peer *peer) +{ + void *crypto_priv; + crypto_priv = wlan_objmgr_peer_get_comp_private_obj(peer, + WLAN_UMAC_COMP_CRYPTO); + + return crypto_priv; +} +#endif /* end of __WLAN_CRYPTO_OBJ_MGR_I_*/ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/src/wlan_crypto_param_handling.c b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/src/wlan_crypto_param_handling.c new file mode 100644 index 0000000000000000000000000000000000000000..5ee66a3332d668c4949e9481eab8fa32ed81f4b2 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/src/wlan_crypto_param_handling.c @@ -0,0 +1,302 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + + /** + * DOC: Public APIs for crypto service + */ +/* include files */ +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "wlan_crypto_global_def.h" +#include "wlan_crypto_global_api.h" +#include "wlan_crypto_def_i.h" +#include "wlan_crypto_param_handling_i.h" + +static uint32_t +cipher2cap(int cipher) +{ + switch (cipher) { + case WLAN_CRYPTO_CIPHER_WEP: return WLAN_CRYPTO_CAP_WEP; + case WLAN_CRYPTO_CIPHER_AES_OCB: return WLAN_CRYPTO_CAP_AES; + case WLAN_CRYPTO_CIPHER_AES_CCM: return WLAN_CRYPTO_CAP_AES; + case WLAN_CRYPTO_CIPHER_AES_CCM_256: return WLAN_CRYPTO_CAP_AES; + case WLAN_CRYPTO_CIPHER_AES_GCM: return WLAN_CRYPTO_CAP_AES; + case WLAN_CRYPTO_CIPHER_AES_GCM_256: return WLAN_CRYPTO_CAP_AES; + case WLAN_CRYPTO_CIPHER_CKIP: return WLAN_CRYPTO_CAP_CKIP; + case WLAN_CRYPTO_CIPHER_TKIP: return WLAN_CRYPTO_CAP_TKIP_MIC; + case WLAN_CRYPTO_CIPHER_WAPI_SMS4: return WLAN_CRYPTO_CAP_WAPI_SMS4; + case WLAN_CRYPTO_CIPHER_WAPI_GCM4: return WLAN_CRYPTO_CAP_WAPI_GCM4; + case WLAN_CRYPTO_CIPHER_FILS_AEAD: return WLAN_CRYPTO_CAP_FILS_AEAD; + } + return 0; +} + +/** + * wlan_crypto_set_authmode - called by ucfg to configure authmode for vdev + * @vdev: vdev + * @authmode: authmode + * + * This function gets called from ucfg to configure authmode for vdev. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_crypto_set_authmode(struct wlan_crypto_params *crypto_params, + uint32_t authmode) +{ + crypto_params->authmodeset = authmode; + return QDF_STATUS_SUCCESS; +} + +/** + * wlan_crypto_get_authmode - called by ucfg to get authmode of particular vdev + * @vdev: vdev + * + * This function gets called from ucfg to get authmode of particular vdev + * + * Return: authmode + */ +int32_t wlan_crypto_get_authmode(struct wlan_crypto_params *crypto_params) +{ + return crypto_params->authmodeset; +} + +/** + * wlan_crypto_set_mcastcipher - called by ucfg to configure mcastcipher in vdev + * @vdev: vdev + * @wlan_crypto_cipher_type: mcast cipher value. + * + * This function gets called from ucfg to configure mcastcipher in vdev + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_crypto_set_mcastcipher(struct wlan_crypto_params *crypto_params, + wlan_crypto_cipher_type cipher) +{ + uint16_t i; + uint32_t cap; + QDF_STATUS status = QDF_STATUS_E_INVAL; + + RESET_MCAST_CIPHERS(crypto_params); + + for (i = 0; i < WLAN_CRYPTO_CIPHER_MAX; i++) { + if (HAS_PARAM(cipher, i)) { + cap = cipher2cap(cipher & i); + if (cap && HAS_CIPHER_CAP(crypto_params, cap)) { + SET_MCAST_CIPHER(crypto_params, i); + status = QDF_STATUS_SUCCESS; + } + } + CLEAR_PARAM(cipher, i); + } + return status; +} +/** + * wlan_crypto_get_mcastcipher - called by ucfg to get mcastcipher from vdev + * @vdev: vdev + * + * This function gets called from ucfg to get mcastcipher of particular vdev + * + * Return: mcast cipher + */ +int32_t wlan_crypto_get_mcastcipher(struct wlan_crypto_params *crypto_params) +{ + return crypto_params->mcastcipherset; +} + +/** + * wlan_crypto_set_ucastciphers - called by ucfg to configure + * unicast ciphers in vdev + * @vdev: vdev + * @ciphers: bitmap value of all supported unicast ciphers + * + * This function gets called from ucfg to configure unicast ciphers in vdev + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_crypto_set_ucastciphers( + struct wlan_crypto_params *crypto_params, + uint32_t cipher) +{ + uint16_t i; + uint32_t cap; + QDF_STATUS status = QDF_STATUS_E_INVAL; + + RESET_UCAST_CIPHERS(crypto_params); + + for (i = 0; i < WLAN_CRYPTO_CIPHER_MAX ; i++) { + if (HAS_PARAM(cipher, i)) { + cap = cipher2cap(cipher & i); + if (cap && HAS_CIPHER_CAP(crypto_params, cap)) { + SET_UCAST_CIPHER(crypto_params, i); + status = QDF_STATUS_SUCCESS; + } + } + CLEAR_PARAM(cipher, i); + } + + return status; +} + +/** + * wlan_crypto_get_ucastciphers - called by ucfg to get ucastcipher from vdev + * @vdev: vdev + * + * This function gets called from ucfg to get supported unicast ciphers + * + * Return: bitmap value of all supported unicast ciphers + */ +int32_t wlan_crypto_get_ucastciphers(struct wlan_crypto_params *crypto_params) +{ + return crypto_params->ucastcipherset; +} + +/** + * wlan_crypto_set_mgmtcipher - called by ucfg to configure + * mgmt ciphers in vdev + * @vdev: vdev + * @ciphers: bitmap value of all supported unicast ciphers + * + * This function gets called from ucfg to configure unicast ciphers in vdev + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_crypto_set_mgmtcipher( + struct wlan_crypto_params *crypto_params, + uint32_t value) +{ + SET_MGMT_CIPHER(crypto_params, value); + return QDF_STATUS_SUCCESS; +} + +/** + * wlan_crypto_get_mgmtciphers - called by ucfg to get mgmtcipher from vdev + * @vdev: vdev + * + * This function gets called from ucfg to get supported unicast ciphers + * + * Return: bitmap value of all supported unicast ciphers + */ +int32_t wlan_crypto_get_mgmtciphers(struct wlan_crypto_params *crypto_params) +{ + return crypto_params->mgmtcipherset; +} + +/** + * wlan_crypto_set_cipher_cap - called by ucfg to configure + * cipher cap in vdev + * @vdev: vdev + * @ciphers: bitmap value of all supported unicast ciphers + * + * This function gets called from ucfg to configure unicast ciphers in vdev + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_crypto_set_cipher_cap( + struct wlan_crypto_params *crypto_params, + uint32_t value) +{ + crypto_params->cipher_caps = value; + + return QDF_STATUS_SUCCESS; +} + +/** + * wlan_crypto_get_cipher_cap - called by ucfg to get cipher caps from vdev + * @vdev: vdev + * + * This function gets called from ucfg to get supported unicast ciphers + * + * Return: bitmap value of all supported unicast ciphers + */ +int32_t wlan_crypto_get_cipher_cap(struct wlan_crypto_params *crypto_params) +{ + return crypto_params->cipher_caps; +} + +/** + * wlan_crypto_set_rsn_cap - called by ucfg to configure + * cipher cap in vdev + * @vdev: vdev + * @ciphers: bitmap value of all supported unicast ciphers + * + * This function gets called from ucfg to configure unicast ciphers in vdev + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_crypto_set_rsn_cap( + struct wlan_crypto_params *crypto_params, + uint32_t value) +{ + crypto_params->rsn_caps = value; + + return QDF_STATUS_SUCCESS; +} + +/** + * wlan_crypto_get_rsn_cap - called by ucfg to get rsn caps from vdev + * @vdev: vdev + * + * This function gets called from ucfg to get supported unicast ciphers + * + * Return: bitmap value of all supported unicast ciphers + */ +int32_t wlan_crypto_get_rsn_cap(struct wlan_crypto_params *crypto_params) +{ + return crypto_params->rsn_caps; +} + + +/** + * wlan_crypto_set_key_mgmt - called by ucfg to configure + * key_mgmt in vdev + * @vdev: vdev + * @ciphers: bitmap value of all supported unicast ciphers + * + * This function gets called from ucfg to configure unicast ciphers in vdev + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_crypto_set_key_mgmt( + struct wlan_crypto_params *crypto_params, + uint32_t value) +{ + crypto_params->key_mgmt = value; + + return QDF_STATUS_SUCCESS; +} + +/** + * wlan_crypto_get_key_mgmt - called by ucfg to get key mgmt from vdev + * @vdev: vdev + * + * This function gets called from ucfg to get supported unicast ciphers + * + * Return: bitmap value of all supported unicast ciphers + */ +int32_t wlan_crypto_get_key_mgmt(struct wlan_crypto_params *crypto_params) +{ + return crypto_params->key_mgmt; +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/src/wlan_crypto_param_handling_i.h b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/src/wlan_crypto_param_handling_i.h new file mode 100644 index 0000000000000000000000000000000000000000..d6198b4b2aa1256e4806791eeec21589a23f7a7f --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/src/wlan_crypto_param_handling_i.h @@ -0,0 +1,182 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + + /** + * DOC: Public APIs for crypto service + */ +/* include files */ +#ifndef __WLAN_CRYPTO_PARAM_HANDLING_I_H_ +#define __WLAN_CRYPTO_PARAM_HANDLING_I_H_ +/** + * wlan_crypto_set_authmode - called by ucfg to configure authmode for vdev + * @vdev: vdev + * @authmode: authmode + * + * This function gets called from ucfg to configure authmode for vdev. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_crypto_set_authmode(struct wlan_crypto_params *crypto_params, + uint32_t authmode); + +/** + * wlan_crypto_get_authmode - called by ucfg to get authmode of particular vdev + * @vdev: vdev + * + * This function gets called from ucfg to get authmode of particular vdev + * + * Return: authmode + */ +int32_t wlan_crypto_get_authmode(struct wlan_crypto_params *crypto_params); + +/** + * wlan_crypto_set_mcastcipher - called by ucfg to configure mcastcipher in vdev + * @vdev: vdev + * @wlan_crypto_cipher_type: mcast cipher value. + * + * This function gets called from ucfg to configure mcastcipher in vdev + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_crypto_set_mcastcipher(struct wlan_crypto_params *crypto_params, + wlan_crypto_cipher_type cipher); +/** + * wlan_crypto_get_mcastcipher - called by ucfg to get mcastcipher from vdev + * @vdev: vdev + * + * This function gets called from ucfg to get mcastcipher of particular vdev + * + * Return: mcast cipher + */ +int32_t wlan_crypto_get_mcastcipher(struct wlan_crypto_params *crypto_params); + +/** + * wlan_crypto_set_ucastciphers - called by ucfg to configure + * unicast ciphers in vdev + * @vdev: vdev + * @ciphers: bitmap value of all supported unicast ciphers + * + * This function gets called from ucfg to configure unicast ciphers in vdev + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_crypto_set_ucastciphers(struct wlan_crypto_params *, + uint32_t ciphers); +/** + * wlan_crypto_get_ucastciphers - called by ucfg to get ucastcipher from vdev + * @vdev: vdev + * + * This function gets called from ucfg to get supported unicast ciphers + * + * Return: bitmap value of all supported unicast ciphers + */ +int32_t wlan_crypto_get_ucastciphers(struct wlan_crypto_params *crypto_params); + +/** + * wlan_crypto_set_mgmtcipher - called by ucfg to configure + * mgmt ciphers in vdev + * @vdev: vdev + * @ciphers: bitmap value of all supported unicast ciphers + * + * This function gets called from ucfg to configure unicast ciphers in vdev + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_crypto_set_mgmtcipher(struct wlan_crypto_params *crypto_params, + uint32_t ciphers); + +/** + * wlan_crypto_get_mgmtciphers - called by ucfg to get mgmtcipher from vdev + * @vdev: vdev + * + * This function gets called from ucfg to get supported unicast ciphers + * + * Return: bitmap value of all supported unicast ciphers + */ +int32_t wlan_crypto_get_mgmtciphers(struct wlan_crypto_params *crypto_params); + +/** + * wlan_crypto_set_cipher_cap - called by ucfg to configure + * cipher cap in vdev + * @vdev: vdev + * @ciphers: bitmap value of all supported unicast ciphers + * + * This function gets called from ucfg to configure unicast ciphers in vdev + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_crypto_set_cipher_cap(struct wlan_crypto_params *crypto_params, + uint32_t ciphers); + +/** + * wlan_crypto_get_cipher_cap - called by ucfg to get cipher caps from vdev + * @vdev: vdev + * + * This function gets called from ucfg to get supported unicast ciphers + * + * Return: bitmap value of all supported unicast ciphers + */ +int32_t wlan_crypto_get_cipher_cap(struct wlan_crypto_params *crypto_params); + +/** + * wlan_crypto_set_rsn_cap - called by ucfg to configure + * cipher cap in vdev + * @vdev: vdev + * @ciphers: bitmap value of all supported unicast ciphers + * + * This function gets called from ucfg to configure unicast ciphers in vdev + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_crypto_set_rsn_cap(struct wlan_crypto_params *crypto_params, + uint32_t ciphers); + +/** + * wlan_crypto_get_rsn_cap - called by ucfg to get rsn caps from vdev + * @vdev: vdev + * + * This function gets called from ucfg to get supported unicast ciphers + * + * Return: bitmap value of all supported unicast ciphers + */ +int32_t wlan_crypto_get_rsn_cap(struct wlan_crypto_params *crypto_params); + + +/** + * wlan_crypto_set_key_mgmt - called by ucfg to configure + * key_mgmt in vdev + * @vdev: vdev + * @ciphers: bitmap value of all supported unicast ciphers + * + * This function gets called from ucfg to configure unicast ciphers in vdev + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_crypto_set_key_mgmt(struct wlan_crypto_params *crypto_params, + uint32_t ciphers); + +/** + * wlan_crypto_get_key_mgmt - called by ucfg to get key mgmt from vdev + * @vdev: vdev + * + * This function gets called from ucfg to get supported unicast ciphers + * + * Return: bitmap value of all supported unicast ciphers + */ +int32_t wlan_crypto_get_key_mgmt(struct wlan_crypto_params *crypto_params); +#endif /* __WLAN_CRYPTO_PARAM_HANDLING_I_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/src/wlan_crypto_tkip.c b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/src/wlan_crypto_tkip.c new file mode 100644 index 0000000000000000000000000000000000000000..947e39828c7daab8c42c60b91ac3679627f6440c --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/src/wlan_crypto_tkip.c @@ -0,0 +1,185 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + + /** + * DOC: Public API intialization of crypto service with object manager + */ +#include +#include +#include +#include +#include +#include +#include +#include + +#include "wlan_crypto_global_def.h" +#include "wlan_crypto_def_i.h" +#include "wlan_crypto_main_i.h" +#include "wlan_crypto_obj_mgr_i.h" + + +static QDF_STATUS tkip_enmic(struct wlan_crypto_key *key, qdf_nbuf_t wbuf, + uint8_t encapdone, uint8_t hdrlen); +static QDF_STATUS tkip_demic(struct wlan_crypto_key *key, qdf_nbuf_t wbuf, + uint8_t tid, uint8_t hdrlen); + + +static QDF_STATUS tkip_setkey(struct wlan_crypto_key *key) +{ + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS tkip_encap(struct wlan_crypto_key *key, + qdf_nbuf_t wbuf, + uint8_t encapdone, + uint8_t hdrlen){ + uint8_t *ivp; + struct wlan_crypto_cipher *cipher_table; + + cipher_table = key->cipher_table; + + /* + * Copy down 802.11 header and add the IV, KeyID, and ExtIV. + */ + if (encapdone) { + ivp = (uint8_t *)qdf_nbuf_data(wbuf); + } else { + uint8_t ivmictrailer_len = cipher_table->header + + cipher_table->miclen + + cipher_table->trailer; + ivp = qdf_nbuf_push_head(wbuf, ivmictrailer_len); + qdf_mem_move(ivp, (ivp + ivmictrailer_len), hdrlen); + qdf_mem_move((ivp + hdrlen + cipher_table->header), + (ivp + ivmictrailer_len + hdrlen), + (qdf_nbuf_len(wbuf) - hdrlen - ivmictrailer_len)); + } + + ivp += hdrlen; + key->keytsc++; /* XXX wrap at 48 bits */ + + ivp[0] = key->keytsc >> 8; /* TSC1 */ + ivp[1] = (ivp[0] | 0x20) & 0x7f; /* WEP seed */ + ivp[2] = key->keytsc >> 0; /* TSC0*/ + ivp[3] = (key->keyix << 6) | WLAN_CRYPTO_EXT_IV_BIT; /* KeyID | ExtID */ + ivp[4] = key->keytsc >> 16; /* PN2 */ + ivp[5] = key->keytsc >> 24; /* PN3 */ + ivp[6] = key->keytsc >> 32; /* PN4 */ + ivp[7] = key->keytsc >> 40; /* PN5 */ + + /* + * Finally, do software encrypt if neeed. + */ + if (key->flags & WLAN_CRYPTO_KEY_SWENCRYPT) { + qdf_nbuf_realloc_tailroom(wbuf, cipher_table->miclen); + if (qdf_nbuf_realloc_tailroom(wbuf, cipher_table->miclen) + && (!wlan_crypto_tkip_encrypt(key->keyval, + qdf_nbuf_data(wbuf), qdf_nbuf_len(wbuf), + hdrlen))){ + return QDF_STATUS_CRYPTO_ENCRYPT_FAILED; + } + } + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS tkip_decap(struct wlan_crypto_key *key, + qdf_nbuf_t wbuf, + uint8_t tid, + uint8_t hdrlen){ + struct ieee80211_hdr *hdr; + uint8_t *ivp, *origHdr; + uint64_t pn; + struct wlan_crypto_cipher *cipher_table; + + cipher_table = key->cipher_table; + + /* + * Header should have extended IV and sequence number; + * verify the former and validate the latter. + */ + origHdr = (uint8_t *)qdf_nbuf_data(wbuf); + hdr = (struct ieee80211_hdr *)origHdr; + + ivp = origHdr + hdrlen; + + if ((ivp[WLAN_CRYPTO_IV_LEN] & WLAN_CRYPTO_EXT_IV_BIT) == 0) + return 0; + + tid = wlan_get_tid(qdf_nbuf_data(wbuf)); + + pn = READ_6(ivp[0], ivp[1], ivp[4], ivp[5], ivp[6], ivp[7]); + + if (pn <= key->keyrsc[tid]) { + /* Replay violation.*/ + return QDF_STATUS_CRYPTO_DECRYPT_FAILED; + } + + if ((key->flags & WLAN_CRYPTO_KEY_SWDECRYPT)) { + if (!wlan_crypto_tkip_decrypt(key->keyval, + (struct ieee80211_hdr *)origHdr, + (origHdr + hdrlen), + (qdf_nbuf_len(wbuf) - hdrlen))) + return QDF_STATUS_CRYPTO_DECRYPT_FAILED; + } + + /* + * Copy up 802.11 header and strip crypto bits. + */ + qdf_mem_move(origHdr + cipher_table->header, origHdr, hdrlen); + + qdf_nbuf_pull_head(wbuf, cipher_table->header); + qdf_nbuf_trim_tail(wbuf, cipher_table->trailer); + + return QDF_STATUS_SUCCESS; +} +static QDF_STATUS tkip_enmic(struct wlan_crypto_key *key, + qdf_nbuf_t wbuf, + uint8_t encapdone, + uint8_t hdrlen){ + return QDF_STATUS_SUCCESS; +} +static QDF_STATUS tkip_demic(struct wlan_crypto_key *key, + qdf_nbuf_t wbuf, + uint8_t tid, + uint8_t hdrlen){ + struct wlan_crypto_cipher *cipher_table; + + cipher_table = key->cipher_table; + qdf_nbuf_trim_tail(wbuf, cipher_table->miclen); + return QDF_STATUS_SUCCESS; +} + +const struct wlan_crypto_cipher tkip_cipher_table = { + "TKIP", + WLAN_CRYPTO_CIPHER_TKIP, + WLAN_CRYPTO_IV_LEN + WLAN_CRYPTO_KEYID_LEN + WLAN_CRYPTO_EXT_IV_LEN, + WLAN_CRYPTO_CRC_LEN, + WLAN_CRYPTO_MIC_LEN, + 256, + tkip_setkey, + tkip_encap, + tkip_decap, + tkip_enmic, + tkip_demic, +}; + +const struct wlan_crypto_cipher *tkip_register(void) +{ + return &tkip_cipher_table; +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/src/wlan_crypto_tkip_sw.c b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/src/wlan_crypto_tkip_sw.c new file mode 100644 index 0000000000000000000000000000000000000000..27031c97361b685c7b1f673f0377a84127bc1f1e --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/src/wlan_crypto_tkip_sw.c @@ -0,0 +1,398 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + */ +/* + * Temporal Key Integrity Protocol (CCMP) + * Copyright (c) 2010, Jouni Malinen + * + * This software may be distributed under the terms of the BSD license. + * See README for more details. + */ + +#include +#include +#include +#include "wlan_crypto_aes_i.h" +#include "wlan_crypto_def_i.h" + + +static inline uint16_t RotR1(uint16_t val) +{ + return (val >> 1) | (val << 15); +} + + +static inline uint8_t Lo8(uint16_t val) +{ + return val & 0xff; +} + + +static inline uint8_t Hi8(uint16_t val) +{ + return val >> 8; +} + + +static inline uint16_t Lo16(uint32_t val) +{ + return val & 0xffff; +} + + +static inline uint16_t Hi16(uint32_t val) +{ + return val >> 16; +} + + +static inline uint16_t Mk16(uint8_t hi, uint8_t lo) +{ + return lo | (((u16) hi) << 8); +} + + +static inline uint16_t Mk16_le(uint16_t *v) +{ + return qdf_le16_to_cpu(*v); +} + + +static const uint16_t Sbox[256] = { + 0xC6A5, 0xF884, 0xEE99, 0xF68D, 0xFF0D, 0xD6BD, 0xDEB1, 0x9154, + 0x6050, 0x0203, 0xCEA9, 0x567D, 0xE719, 0xB562, 0x4DE6, 0xEC9A, + 0x8F45, 0x1F9D, 0x8940, 0xFA87, 0xEF15, 0xB2EB, 0x8EC9, 0xFB0B, + 0x41EC, 0xB367, 0x5FFD, 0x45EA, 0x23BF, 0x53F7, 0xE496, 0x9B5B, + 0x75C2, 0xE11C, 0x3DAE, 0x4C6A, 0x6C5A, 0x7E41, 0xF502, 0x834F, + 0x685C, 0x51F4, 0xD134, 0xF908, 0xE293, 0xAB73, 0x6253, 0x2A3F, + 0x080C, 0x9552, 0x4665, 0x9D5E, 0x3028, 0x37A1, 0x0A0F, 0x2FB5, + 0x0E09, 0x2436, 0x1B9B, 0xDF3D, 0xCD26, 0x4E69, 0x7FCD, 0xEA9F, + 0x121B, 0x1D9E, 0x5874, 0x342E, 0x362D, 0xDCB2, 0xB4EE, 0x5BFB, + 0xA4F6, 0x764D, 0xB761, 0x7DCE, 0x527B, 0xDD3E, 0x5E71, 0x1397, + 0xA6F5, 0xB968, 0x0000, 0xC12C, 0x4060, 0xE31F, 0x79C8, 0xB6ED, + 0xD4BE, 0x8D46, 0x67D9, 0x724B, 0x94DE, 0x98D4, 0xB0E8, 0x854A, + 0xBB6B, 0xC52A, 0x4FE5, 0xED16, 0x86C5, 0x9AD7, 0x6655, 0x1194, + 0x8ACF, 0xE910, 0x0406, 0xFE81, 0xA0F0, 0x7844, 0x25BA, 0x4BE3, + 0xA2F3, 0x5DFE, 0x80C0, 0x058A, 0x3FAD, 0x21BC, 0x7048, 0xF104, + 0x63DF, 0x77C1, 0xAF75, 0x4263, 0x2030, 0xE51A, 0xFD0E, 0xBF6D, + 0x814C, 0x1814, 0x2635, 0xC32F, 0xBEE1, 0x35A2, 0x88CC, 0x2E39, + 0x9357, 0x55F2, 0xFC82, 0x7A47, 0xC8AC, 0xBAE7, 0x322B, 0xE695, + 0xC0A0, 0x1998, 0x9ED1, 0xA37F, 0x4466, 0x547E, 0x3BAB, 0x0B83, + 0x8CCA, 0xC729, 0x6BD3, 0x283C, 0xA779, 0xBCE2, 0x161D, 0xAD76, + 0xDB3B, 0x6456, 0x744E, 0x141E, 0x92DB, 0x0C0A, 0x486C, 0xB8E4, + 0x9F5D, 0xBD6E, 0x43EF, 0xC4A6, 0x39A8, 0x31A4, 0xD337, 0xF28B, + 0xD532, 0x8B43, 0x6E59, 0xDAB7, 0x018C, 0xB164, 0x9CD2, 0x49E0, + 0xD8B4, 0xACFA, 0xF307, 0xCF25, 0xCAAF, 0xF48E, 0x47E9, 0x1018, + 0x6FD5, 0xF088, 0x4A6F, 0x5C72, 0x3824, 0x57F1, 0x73C7, 0x9751, + 0xCB23, 0xA17C, 0xE89C, 0x3E21, 0x96DD, 0x61DC, 0x0D86, 0x0F85, + 0xE090, 0x7C42, 0x71C4, 0xCCAA, 0x90D8, 0x0605, 0xF701, 0x1C12, + 0xC2A3, 0x6A5F, 0xAEF9, 0x69D0, 0x1791, 0x9958, 0x3A27, 0x27B9, + 0xD938, 0xEB13, 0x2BB3, 0x2233, 0xD2BB, 0xA970, 0x0789, 0x33A7, + 0x2DB6, 0x3C22, 0x1592, 0xC920, 0x8749, 0xAAFF, 0x5078, 0xA57A, + 0x038F, 0x59F8, 0x0980, 0x1A17, 0x65DA, 0xD731, 0x84C6, 0xD0B8, + 0x82C3, 0x29B0, 0x5A77, 0x1E11, 0x7BCB, 0xA8FC, 0x6DD6, 0x2C3A, +}; + + +static inline uint16_t _S_(uint16_t v) +{ + uint16_t t = Sbox[Hi8(v)]; + return Sbox[Lo8(v)] ^ ((t << 8) | (t >> 8)); +} + + +#define PHASE1_LOOP_COUNT 8 + +static void tkip_mixing_phase1(uint16_t *TTAK, const uint8_t *TK, + const uint8_t *TA, uint32_t IV32) +{ + int i, j; + + /* Initialize the 80-bit TTAK from TSC (IV32) and TA[0..5] */ + TTAK[0] = Lo16(IV32); + TTAK[1] = Hi16(IV32); + TTAK[2] = Mk16(TA[1], TA[0]); + TTAK[3] = Mk16(TA[3], TA[2]); + TTAK[4] = Mk16(TA[5], TA[4]); + + for (i = 0; i < PHASE1_LOOP_COUNT; i++) { + j = 2 * (i & 1); + TTAK[0] += _S_(TTAK[4] ^ Mk16(TK[1 + j], TK[0 + j])); + TTAK[1] += _S_(TTAK[0] ^ Mk16(TK[5 + j], TK[4 + j])); + TTAK[2] += _S_(TTAK[1] ^ Mk16(TK[9 + j], TK[8 + j])); + TTAK[3] += _S_(TTAK[2] ^ Mk16(TK[13 + j], TK[12 + j])); + TTAK[4] += _S_(TTAK[3] ^ Mk16(TK[1 + j], TK[0 + j])) + i; + } +} + + +static void tkip_mixing_phase2(uint8_t *WEPSeed, const uint8_t *TK, + const uint16_t *TTAK, uint16_t IV16){ + uint16_t PPK[6]; + + /* Step 1 - make copy of TTAK and bring in TSC */ + PPK[0] = TTAK[0]; + PPK[1] = TTAK[1]; + PPK[2] = TTAK[2]; + PPK[3] = TTAK[3]; + PPK[4] = TTAK[4]; + PPK[5] = TTAK[4] + IV16; + + /* Step 2 - 96-bit bijective mixing using S-box */ + PPK[0] += _S_(PPK[5] ^ Mk16_le((uint16_t *) &TK[0])); + PPK[1] += _S_(PPK[0] ^ Mk16_le((uint16_t *) &TK[2])); + PPK[2] += _S_(PPK[1] ^ Mk16_le((uint16_t *) &TK[4])); + PPK[3] += _S_(PPK[2] ^ Mk16_le((uint16_t *) &TK[6])); + PPK[4] += _S_(PPK[3] ^ Mk16_le((uint16_t *) &TK[8])); + PPK[5] += _S_(PPK[4] ^ Mk16_le((uint16_t *) &TK[10])); + + PPK[0] += RotR1(PPK[5] ^ Mk16_le((uint16_t *) &TK[12])); + PPK[1] += RotR1(PPK[0] ^ Mk16_le((uint16_t *) &TK[14])); + PPK[2] += RotR1(PPK[1]); + PPK[3] += RotR1(PPK[2]); + PPK[4] += RotR1(PPK[3]); + PPK[5] += RotR1(PPK[4]); + + /* Step 3 - bring in last of TK bits, assign 24-bit WEP IV value + * WEPSeed[0..2] is transmitted as WEP IV */ + WEPSeed[0] = Hi8(IV16); + WEPSeed[1] = (Hi8(IV16) | 0x20) & 0x7F; + WEPSeed[2] = Lo8(IV16); + WEPSeed[3] = Lo8((PPK[5] ^ Mk16_le((uint16_t *) &TK[0])) >> 1); + wlan_crypto_put_le16(&WEPSeed[4], PPK[0]); + wlan_crypto_put_le16(&WEPSeed[6], PPK[1]); + wlan_crypto_put_le16(&WEPSeed[8], PPK[2]); + wlan_crypto_put_le16(&WEPSeed[10], PPK[3]); + wlan_crypto_put_le16(&WEPSeed[12], PPK[4]); + wlan_crypto_put_le16(&WEPSeed[14], PPK[5]); +} + + +static inline uint32_t rotl(uint32_t val, int bits) +{ + return (val << bits) | (val >> (32 - bits)); +} + +static inline uint32_t xswap(uint32_t val) +{ + return ((val & 0x00ff00ff) << 8) | ((val & 0xff00ff00) >> 8); +} + + +#define michael_block(l, r) \ +do { \ + r ^= rotl(l, 17); \ + l += r; \ + r ^= xswap(l); \ + l += r; \ + r ^= rotl(l, 3); \ + l += r; \ + r ^= rotr(l, 2); \ + l += r; \ +} while (0) + + +static void michael_mic(const uint8_t *key, const uint8_t *hdr, + const uint8_t *data, size_t data_len, uint8_t *mic){ + uint32_t l, r; + int i, blocks, last; + + l = wlan_crypto_get_le32(key); + r = wlan_crypto_get_le32(key + 4); + + /* Michael MIC pseudo header: DA, SA, 3 x 0, Priority */ + l ^= wlan_crypto_get_le32(hdr); + michael_block(l, r); + l ^= wlan_crypto_get_le32(&hdr[4]); + michael_block(l, r); + l ^= wlan_crypto_get_le32(&hdr[8]); + michael_block(l, r); + l ^= wlan_crypto_get_le32(&hdr[12]); + michael_block(l, r); + + /* 32-bit blocks of data */ + blocks = data_len / 4; + last = data_len % 4; + for (i = 0; i < blocks; i++) { + l ^= wlan_crypto_get_le32(&data[4 * i]); + michael_block(l, r); + } + + /* Last block and padding (0x5a, 4..7 x 0) */ + switch (last) { + case 0: + l ^= 0x5a; + break; + case 1: + l ^= data[4 * i] | 0x5a00; + break; + case 2: + l ^= data[4 * i] | (data[4 * i + 1] << 8) | 0x5a0000; + break; + case 3: + l ^= data[4 * i] | (data[4 * i + 1] << 8) | + (data[4 * i + 2] << 16) | 0x5a000000; + break; + } + michael_block(l, r); + /* l ^= 0; */ + michael_block(l, r); + + wlan_crypto_put_le32(mic, l); + wlan_crypto_put_le32(mic + 4, r); +} + + +static void michael_mic_hdr(const struct ieee80211_hdr *hdr11, uint8_t *hdr) +{ + int hdrlen = 24; + + switch (hdr11->frame_control[1] & (WLAN_FC1_FROMDS | WLAN_FC1_TODS)) { + case WLAN_FC1_TODS: + qdf_mem_copy(hdr, hdr11->addr3, WLAN_ALEN); /* DA */ + qdf_mem_copy(hdr + WLAN_ALEN, hdr11->addr2, WLAN_ALEN); /* SA */ + break; + case WLAN_FC1_FROMDS: + qdf_mem_copy(hdr, hdr11->addr1, WLAN_ALEN); /* DA */ + qdf_mem_copy(hdr + WLAN_ALEN, hdr11->addr3, WLAN_ALEN); /* SA */ + break; + case WLAN_FC1_FROMDS | WLAN_FC1_TODS: + qdf_mem_copy(hdr, hdr11->addr3, WLAN_ALEN); /* DA */ + qdf_mem_copy(hdr + WLAN_ALEN, hdr11 + 1, WLAN_ALEN); /* SA */ + hdrlen += WLAN_ALEN; + break; + case 0: + qdf_mem_copy(hdr, hdr11->addr1, WLAN_ALEN); /* DA */ + qdf_mem_copy(hdr + WLAN_ALEN, hdr11->addr2, WLAN_ALEN); /* SA */ + break; + } + + if (WLAN_FC0_GET_TYPE(hdr11->frame_control[0]) == WLAN_FC0_TYPE_DATA && + (WLAN_FC0_GET_STYPE(hdr11->frame_control[0]) & 0x08)) { + const uint8_t *qos = ((const uint8_t *) hdr11) + hdrlen; + hdr[12] = qos[0] & 0x0f; /* priority */ + } else + hdr[12] = 0; /* priority */ + + hdr[13] = hdr[14] = hdr[15] = 0; /* reserved */ +} + + +uint8_t *wlan_crypto_tkip_decrypt(const uint8_t *tk, + const struct ieee80211_hdr *hdr, + uint8_t *data, size_t data_len){ + uint16_t iv16; + uint32_t iv32; + uint16_t ttak[5]; + uint8_t rc4key[16]; + uint8_t *plain; + size_t plain_len; + uint32_t icv, rx_icv; + const uint8_t *mic_key; + uint8_t michael_hdr[16]; + uint8_t mic[8]; + + if (data_len < 8 + 4) + return NULL; + + iv16 = (data[0] << 8) | data[2]; + iv32 = wlan_crypto_get_le32(&data[4]); + wpa_printf(MSG_EXCESSIVE, "TKIP decrypt: iv32=%08x iv16=%04x", + iv32, iv16); + + tkip_mixing_phase1(ttak, tk, hdr->addr2, iv32); + wpa_hexdump(MSG_EXCESSIVE, "TKIP TTAK", (uint8_t *) ttak, sizeof(ttak)); + tkip_mixing_phase2(rc4key, tk, ttak, iv16); + wpa_hexdump(MSG_EXCESSIVE, "TKIP RC4KEY", rc4key, sizeof(rc4key)); + + plain_len = data_len - 8; + plain = data + 8; + wlan_crypto_wep_crypt(rc4key, plain, plain_len); + + icv = wlan_crypto_crc32(plain, plain_len - 4); + rx_icv = wlan_crypto_get_le32(plain + plain_len - 4); + if (icv != rx_icv) { + wpa_printf(MSG_INFO, "TKIP ICV mismatch in frame from " MACSTR, + MAC2STR(hdr->addr2)); + wpa_printf(MSG_DEBUG, "TKIP calculated ICV %08x received ICV " + "%08x", icv, rx_icv); + qdf_mem_free(plain); + return NULL; + } + plain_len -= 4; + + /* TODO: MSDU reassembly */ + + if (plain_len < 8) { + wpa_printf(MSG_INFO, "TKIP: Not enough room for Michael MIC " + "in a frame from " MACSTR, MAC2STR(hdr->addr2)); + qdf_mem_free(plain); + return NULL; + } + + michael_mic_hdr(hdr, michael_hdr); + mic_key = tk + ((hdr->frame_control[1] & WLAN_FC1_FROMDS) ? 16 : 24); + michael_mic(mic_key, michael_hdr, plain, plain_len - 8, mic); + if (qdf_mem_cmp(mic, plain + plain_len - 8, 8) != 0) { + wpa_printf(MSG_INFO, "TKIP: Michael MIC mismatch in a frame " + "from " MACSTR, MAC2STR(hdr->addr2)); + wpa_hexdump(MSG_DEBUG, "TKIP: Calculated MIC", mic, 8); + wpa_hexdump(MSG_DEBUG, "TKIP: Received MIC", + plain + plain_len - 8, 8); + return NULL; + } + + return data; +} + + +void tkip_get_pn(uint8_t *pn, const uint8_t *data) +{ + pn[0] = data[7]; /* PN5 */ + pn[1] = data[6]; /* PN4 */ + pn[2] = data[5]; /* PN3 */ + pn[3] = data[4]; /* PN2 */ + pn[4] = data[0]; /* PN1 */ + pn[5] = data[2]; /* PN0 */ +} + + +uint8_t *wlan_crypto_tkip_encrypt(const uint8_t *tk, uint8_t *frame, + size_t len, size_t hdrlen){ + uint8_t michael_hdr[16]; + uint8_t mic[8]; + struct ieee80211_hdr *hdr; + const uint8_t *mic_key; + uint8_t *pos; + uint16_t iv16; + uint32_t iv32; + uint16_t ttak[5]; + uint8_t rc4key[16]; + + if (len < sizeof(*hdr) || len < hdrlen) + return NULL; + hdr = (struct ieee80211_hdr *) frame; + + michael_mic_hdr(hdr, michael_hdr); + mic_key = tk + ((hdr->frame_control[1] & WLAN_FC1_FROMDS) ? 16 : 24); + michael_mic(mic_key, michael_hdr, frame + hdrlen, len - hdrlen, mic); + wpa_hexdump(MSG_EXCESSIVE, "TKIP: MIC", mic, sizeof(mic)); + pos = frame + hdrlen; + + iv32 = wlan_crypto_get_be32(pos); + iv16 = wlan_crypto_get_be16(pos + 4); + tkip_mixing_phase1(ttak, tk, hdr->addr2, iv32); + wpa_hexdump(MSG_EXCESSIVE, "TKIP TTAK", (uint8_t *) ttak, sizeof(ttak)); + tkip_mixing_phase2(rc4key, tk, ttak, iv16); + wpa_hexdump(MSG_EXCESSIVE, "TKIP RC4KEY", rc4key, sizeof(rc4key)); + + qdf_mem_copy(pos, rc4key, 3); + pos += 8; + + qdf_mem_copy(pos + len - hdrlen, mic, sizeof(mic)); + wlan_crypto_put_le32(pos + len - hdrlen + sizeof(mic), + wlan_crypto_crc32(pos, len - hdrlen + sizeof(mic))); + wlan_crypto_wep_crypt(rc4key, pos, len - hdrlen + sizeof(mic) + 4); + + return frame; +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/src/wlan_crypto_wapi.c b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/src/wlan_crypto_wapi.c new file mode 100644 index 0000000000000000000000000000000000000000..b42c30b76e2b3d80b0100a35760a61740e44eb50 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/src/wlan_crypto_wapi.c @@ -0,0 +1,84 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + + /** + * DOC: Public API intialization of crypto service with object manager + */ +#include +#include +#include +#include +#include +#include +#include +#include + +#include "wlan_crypto_global_def.h" +#include "wlan_crypto_def_i.h" +#include "wlan_crypto_main_i.h" +#include "wlan_crypto_obj_mgr_i.h" + + +static QDF_STATUS wapi_setkey(struct wlan_crypto_key *key) +{ + return QDF_STATUS_SUCCESS; +} +static QDF_STATUS wapi_encap(struct wlan_crypto_key *key, + qdf_nbuf_t wbuf, + uint8_t encapdone, + uint8_t hdrlen){ + return QDF_STATUS_SUCCESS; +} +static QDF_STATUS wapi_decap(struct wlan_crypto_key *key, + qdf_nbuf_t wbuf, + uint8_t tid, + uint8_t hdrlen){ + return QDF_STATUS_SUCCESS; +} +static QDF_STATUS wapi_enmic(struct wlan_crypto_key *key, + qdf_nbuf_t wbuf, + uint8_t encapdone, + uint8_t hdrlen){ + return QDF_STATUS_SUCCESS; +} +static QDF_STATUS wapi_demic(struct wlan_crypto_key *key, + qdf_nbuf_t wbuf, + uint8_t tid, + uint8_t hdrlen){ + return QDF_STATUS_SUCCESS; +} + +const struct wlan_crypto_cipher wapi_cipher_table = { + "WPI_SMS4", + WLAN_CRYPTO_CIPHER_WAPI_SMS4, + WLAN_CRYPTO_WPI_SMS4_IVLEN + WLAN_CRYPTO_WPI_SMS4_KIDLEN + + WLAN_CRYPTO_WPI_SMS4_PADLEN, + WLAN_CRYPTO_WPI_SMS4_MICLEN, + 0, + 256, + wapi_setkey, + wapi_encap, + wapi_decap, + wapi_enmic, + wapi_demic, +}; + +const struct wlan_crypto_cipher *wapi_register(void) +{ + return &wapi_cipher_table; +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/src/wlan_crypto_wep.c b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/src/wlan_crypto_wep.c new file mode 100644 index 0000000000000000000000000000000000000000..b3a5ba756ffcabde94767e4062545035a66056f8 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/src/wlan_crypto_wep.c @@ -0,0 +1,162 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + + /** + * DOC: Public API intialization of crypto service with object manager + */ +#include +#include +#include +#include +#include +#include +#include +#include + +#include "wlan_crypto_global_def.h" +#include "wlan_crypto_def_i.h" +#include "wlan_crypto_main_i.h" +#include "wlan_crypto_obj_mgr_i.h" + + +static QDF_STATUS wep_setkey(struct wlan_crypto_key *key) +{ + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS wep_encap(struct wlan_crypto_key *key, + qdf_nbuf_t wbuf, + uint8_t encapdone, + uint8_t hdrlen) +{ + uint8_t *ivp; + struct wlan_crypto_cipher *cipher_table; + + cipher_table = key->cipher_table; + /* + * Copy down 802.11 header and add the IV, KeyID, and ExtIV. + */ + + if (encapdone) { + ivp = (uint8_t *)qdf_nbuf_data(wbuf); + } else { + ivp = (uint8_t *)qdf_nbuf_push_head(wbuf, + cipher_table->header + + cipher_table->trailer); + qdf_mem_move(ivp, + ivp + cipher_table->header + cipher_table->trailer, + hdrlen); + qdf_mem_move(ivp + hdrlen + cipher_table->header, + ivp + hdrlen + + cipher_table->header + cipher_table->trailer, + (qdf_nbuf_len(wbuf) - hdrlen + - cipher_table->header - cipher_table->trailer)); + ivp = (uint8_t *)qdf_nbuf_data(wbuf); + } + + ivp += hdrlen; + key->keytsc++; +#if _BYTE_ORDER == _BIG_ENDIAN + ivp[2] = key->keyrsc[0] >> 0; + ivp[1] = key->keyrsc[0] >> 8; + ivp[0] = key->keyrsc[0] >> 16; +#else + ivp[0] = key->keyrsc[0] >> 0; + ivp[1] = key->keyrsc[0] >> 8; + ivp[2] = key->keyrsc[0] >> 16; +#endif + ivp[3] = key->keyix << 6; + + /* + * Finally, do software encrypt if neeed. + */ + if ((key->flags & WLAN_CRYPTO_KEY_SWENCRYPT) && + !wlan_crypto_wep_encrypt(key->keyval, key->keylen, + qdf_nbuf_data(wbuf), qdf_nbuf_len(wbuf))) { + return QDF_STATUS_CRYPTO_ENCRYPT_FAILED; + } + + return QDF_STATUS_SUCCESS; +} +static QDF_STATUS wep_decap(struct wlan_crypto_key *key, + qdf_nbuf_t wbuf, + uint8_t tid, + uint8_t hdrlen) +{ + struct wlan_crypto_cipher *cipher_table; + uint8_t *origHdr = (uint8_t *)qdf_nbuf_data(wbuf); + uint16_t off, data_len; + + cipher_table = key->cipher_table; + + /* + * Check if the device handled the decrypt in hardware. + * If so we just strip the header; otherwise we need to + * handle the decrypt in software. + */ + + off = hdrlen + cipher_table->header; + data_len = qdf_nbuf_len(wbuf) - off - cipher_table->trailer; + if ((key->flags & WLAN_CRYPTO_KEY_SWDECRYPT) && + !wlan_crypto_wep_decrypt(key->keyval, key->keylen, + qdf_nbuf_data(wbuf), qdf_nbuf_len(wbuf))) { + return QDF_STATUS_CRYPTO_DECRYPT_FAILED; + } + /* + * Copy up 802.11 header and strip crypto bits. + */ + qdf_mem_move(origHdr + cipher_table->header, origHdr, hdrlen); + qdf_nbuf_pull_head(wbuf, cipher_table->header); + qdf_nbuf_trim_tail(wbuf, cipher_table->trailer); + + return QDF_STATUS_SUCCESS; +} +static QDF_STATUS wep_enmic(struct wlan_crypto_key *key, + qdf_nbuf_t wbuf, + uint8_t tid, + uint8_t hdrlen){ + + return QDF_STATUS_SUCCESS; +} +static QDF_STATUS wep_demic(struct wlan_crypto_key *key, + qdf_nbuf_t wbuf, + uint8_t tid, + uint8_t hdrlen){ + + return QDF_STATUS_SUCCESS; +} + +const struct wlan_crypto_cipher wep_cipher_table = { + "WEP", + WLAN_CRYPTO_CIPHER_WEP, + WLAN_CRYPTO_IV_LEN + WLAN_CRYPTO_KEYID_LEN, + WLAN_CRYPTO_CRC_LEN, + 0, + 152, + wep_setkey, + wep_encap, + wep_decap, + wep_enmic, + wep_demic, +}; + +const struct wlan_crypto_cipher *wep_register(void) +{ + return &wep_cipher_table; +} + diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/src/wlan_crypto_wep_sw.c b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/src/wlan_crypto_wep_sw.c new file mode 100644 index 0000000000000000000000000000000000000000..b516f18d2bdbe834a5165ee90a48e90d1498eb03 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/crypto/src/wlan_crypto_wep_sw.c @@ -0,0 +1,104 @@ +/* + * Copyright (c) 2017 The Linux Foundation. All rights reserved. + */ +/* + * Wired Equivalent Privacy (WEP) + * Copyright (c) 2010, Jouni Malinen + * + * This software may be distributed under the terms of the BSD license. + * See README for more details. + */ + +#include +#include +#include +#include "wlan_crypto_aes_i.h" +#include "wlan_crypto_def_i.h" + +void wlan_crypto_wep_crypt(uint8_t *key, uint8_t *buf, size_t plen) +{ + uint32_t i, j, k; + uint8_t S[256]; +#define S_SWAP(a, b) do { uint8_t t = S[a]; S[a] = S[b]; S[b] = t; } while (0) + uint8_t *pos; + + /* Setup RC4 state */ + for (i = 0; i < 256; i++) + S[i] = i; + j = 0; + for (i = 0; i < 256; i++) { + j = (j + S[i] + key[i & 0x0f]) & 0xff; + S_SWAP(i, j); + } + + /* Apply RC4 to data */ + pos = buf; + i = j = 0; + for (k = 0; k < plen; k++) { + i = (i + 1) & 0xff; + j = (j + S[i]) & 0xff; + S_SWAP(i, j); + *pos ^= S[(S[i] + S[j]) & 0xff]; + pos++; + } +} + + +void wlan_crypto_try_wep(const uint8_t *key, size_t key_len, + uint8_t *data, size_t data_len, + uint32_t *icv){ + uint8_t k[16]; + int i, j; + + for (i = 0, j = 0; i < sizeof(k); i++) { + k[i] = key[j]; + j++; + if (j >= key_len) + j = 0; + } + + wlan_crypto_wep_crypt(k, data, data_len); + *icv = wlan_crypto_crc32(data, data_len - 4); +} + +uint8_t *wlan_crypto_wep_encrypt(const uint8_t *key, uint16_t key_len, + uint8_t *data, size_t data_len){ + uint8_t k[16]; + uint32_t icv; + + if (data_len < 4 + 4) { + qdf_print("%s[%d] invalid len\n", __func__, __LINE__); + return NULL; + } + + qdf_mem_copy(k, data, 3); + qdf_mem_copy(k + 3, key, key_len); + wlan_crypto_try_wep(k, 3 + key_len, data + 4, data_len - 4, &icv); + + return data; +} + +uint8_t *wlan_crypto_wep_decrypt(const uint8_t *key, uint16_t key_len, + uint8_t *data, size_t data_len){ + uint8_t k[16]; + uint32_t icv, rx_icv; + + if (data_len < 4 + 4) { + qdf_print("%s[%d] invalid len\n", __func__, __LINE__); + return NULL; + } + + qdf_mem_copy(k, data, 3); + qdf_mem_copy(k + 3, key, key_len); + + rx_icv = wlan_crypto_get_le32(data + data_len - 4); + + wlan_crypto_try_wep(k, 3 + key_len, data + 4, data_len - 4, &icv); + + if (icv != rx_icv) { + qdf_print("%s[%d] iv mismatch\n", __func__, __LINE__); + return NULL; + } + + return data; +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/inc/wlan_cmn.h b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/inc/wlan_cmn.h new file mode 100644 index 0000000000000000000000000000000000000000..27815073c39efb83d685b44aa1d9c151c3cbb54a --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/inc/wlan_cmn.h @@ -0,0 +1,596 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + /** + * DOC: This file provides the common definitions for object manager + */ + +#ifndef _WLAN_CMN_H_ +#define _WLAN_CMN_H_ + +#include + +/* Max no of UMAC components */ +#define WLAN_UMAC_MAX_COMPONENTS WLAN_UMAC_COMP_ID_MAX +/* Max no. of radios, a pSoc/Device can support */ +#define WLAN_UMAC_MAX_PDEVS 3 +/* Max no. of VDEV per PSOC */ +#define WLAN_UMAC_PSOC_MAX_VDEVS 51 +/* Max no. of VDEVs, a PDEV can support */ +#define WLAN_UMAC_PDEV_MAX_VDEVS 17 +/* Max no. of Peers, a device can support */ +#define WLAN_UMAC_PSOC_MAX_PEERS (1024 + WLAN_UMAC_PSOC_MAX_VDEVS) +/* Max no. of Temporary Peers, a pdev can support */ +#define WLAN_MAX_PDEV_TEMP_PEERS 128 +/* Max no. of Temporary Peers, a psoc can support */ +#define WLAN_MAX_PSOC_TEMP_PEERS \ + (WLAN_MAX_PDEV_TEMP_PEERS * WLAN_UMAC_MAX_PDEVS) + +/* Max length of a SSID */ +#define WLAN_SSID_MAX_LEN 32 + +/* Max sequence number */ +#define WLAN_MAX_SEQ_NUM 4096 + +/* Max no. of peers for STA vap */ +#define WLAN_UMAC_MAX_STA_PEERS 2 +/* Max vdev_id */ +#define WLAN_UMAC_VDEV_ID_MAX 0xFF + +/* Invalid pdev_id */ +#define WLAN_INVALID_PDEV_ID 0xFFFFFFFF + +/* Invalid free descriptor count */ +#define WLAN_INVALID_MGMT_DESC_COUNT 0xFFFFFFFF + +/* 802.11 cap info */ +#define WLAN_CAPINFO_ESS 0x0001 +#define WLAN_CAPINFO_IBSS 0x0002 +#define WLAN_CAPINFO_CF_POLLABLE 0x0004 +#define WLAN_CAPINFO_CF_POLLREQ 0x0008 +#define WLAN_CAPINFO_PRIVACY 0x0010 +#define WLAN_CAPINFO_SHORT_PREAMBLE 0x0020 +#define WLAN_CAPINFO_PBCC 0x0040 +#define WLAN_CAPINFO_CHNL_AGILITY 0x0080 +#define WLAN_CAPINFO_SPECTRUM_MGMT 0x0100 +#define WLAN_CAPINFO_QOS 0x0200 +#define WLAN_CAPINFO_SHORT_SLOTTIME 0x0400 +#define WLAN_CAPINFO_APSD 0x0800 +#define WLAN_CAPINFO_RADIOMEAS 0x1000 +#define WLAN_CAPINFO_DSSSOFDM 0x2000 + +/* Allowed time to wait for Object creation */ +#define WLAN_VDEV_CREATE_TIMEOUT_CNT 300 +/* 25 msec */ +#define WLAN_VDEV_CREATE_TIMEOUT 25 + +#define WLAN_PDEV_CREATE_TIMEOUT_CNT 300 +/* 25 msec */ +#define WLAN_PDEV_CREATE_TIMEOUT 25 + +#define WLAN_PSOC_CREATE_TIMEOUT_CNT 300 +/* 25 msec */ +#define WLAN_PSOC_CREATE_TIMEOUT 25 +#define WLAN_24_GHZ_BASE_FREQ (2407) +#define WLAN_5_GHZ_BASE_FREQ (5000) +#define WLAN_24_GHZ_CHANNEL_6 (6) +#define WLAN_24_GHZ_CHANNEL_14 (14) +#define WLAN_24_GHZ_CHANNEL_15 (15) +#define WLAN_24_GHZ_CHANNEL_27 (27) +#define WLAN_5_GHZ_CHANNEL_170 (170) +#define WLAN_CHAN_SPACING_5MHZ (5) +#define WLAN_CHAN_SPACING_20MHZ (20) +#define WLAN_CHAN_14_FREQ (2484) +#define WLAN_CHAN_15_FREQ (2512) +#define WLAN_CHAN_170_FREQ (5852) + +#define WLAN_MAC_EID_VENDOR 221 +#define WLAN_MAC_EID_EXT 255 + +/* VHT capability flags */ +/* B0-B1 Maximum MPDU Length */ +/* A-MSDU Length 3839 octets */ +#define WLAN_VHTCAP_MAX_MPDU_LEN_3839 0x00000000 + /* A-MSDU Length 7991 octets */ +#define WLAN_VHTCAP_MAX_MPDU_LEN_7935 0x00000001 +/* A-MSDU Length 11454 octets */ +#define WLAN_VHTCAP_MAX_MPDU_LEN_11454 0x00000002 + +/* B2-B3 Supported Channel Width */ +/* Does not support 160 or 80+80 */ +#define WLAN_VHTCAP_SUP_CHAN_WIDTH_80 0x00000000 +/* Supports 160 */ +#define WLAN_VHTCAP_SUP_CHAN_WIDTH_160 0x00000004 +/* Support both 160 or 80+80 */ +#define WLAN_VHTCAP_SUP_CHAN_WIDTH_80_160 0x00000008 +/* B2-B3 */ +#define WLAN_VHTCAP_SUP_CHAN_WIDTH_S 2 +#define WLAN_VHTCAP_SUP_CHAN_WIDTH_MASK 0x0000000C +/* B4 RX LDPC */ +#define WLAN_VHTCAP_RX_LDPC 0x00000010 +/* B5 Short GI for 80MHz */ +#define WLAN_VHTCAP_SHORTGI_80 0x00000020 +/* B6 Short GI for 160 and 80+80 MHz */ +#define WLAN_VHTCAP_SHORTGI_160 0x00000040 +/* B7 Tx STBC */ +#define WLAN_VHTCAP_TX_STBC 0x00000080 +#define WLAN_VHTCAP_TX_STBC_S 7 +/* B8-B10 Rx STBC */ +#define WLAN_VHTCAP_RX_STBC 0x00000700 +#define WLAN_VHTCAP_RX_STBC_S 8 +/* B11 SU Beam former capable */ +#define WLAN_VHTCAP_SU_BFORMER 0x00000800 +#define WLAN_VHTCAP_SU_BFORMER_S 11 +/* B12 SU Beam formee capable */ +#define WLAN_VHTCAP_SU_BFORMEE 0x00001000 +#define WLAN_VHTCAP_SU_BFORMEE_S 12 + +/* B13-B15 Compressed steering number of beacomformer Antennas supported */ +#define WLAN_VHTCAP_BF_MAX_ANT 0x0000E000 +#define WLAN_VHTCAP_BF_MAX_ANT_S 13 +/* B13-B15 Beamformee STS Capability */ +#define WLAN_VHTCAP_STS_CAP_S 13 +#define WLAN_VHTCAP_STS_CAP_M 0x7 + +/* B16-B18 Sounding Dimensions */ +#define WLAN_VHTCAP_SOUND_DIM 0x00070000 +#define WLAN_VHTCAP_SOUND_DIM_S 16 +/* B19 MU Beam Former */ +#define WLAN_VHTCAP_MU_BFORMER 0x00080000 +#define WLAN_VHTCAP_MU_BFORMER_S 19 +/* B20 MU Beam Formee */ +#define WLAN_VHTCAP_MU_BFORMEE 0x00100000 +#define WLAN_VHTCAP_MU_BFORMEE_S 20 +/* B21 VHT TXOP PS */ +#define WLAN_VHTCAP_TXOP_PS 0x00200000 +/* B22 +HTC-VHT capable */ +#define WLAN_VHTCAP_PLUS_HTC_VHT 0x00400000 + +#define WLAN_VHTCAP_MAX_AMPDU_LEN_FACTOR 13 +/* B23-B25 maximum AMPDU Length Exponent */ +#define WLAN_VHTCAP_MAX_AMPDU_LEN_EXP 0x03800000 +#define WLAN_VHTCAP_MAX_AMPDU_LEN_EXP_S 23 +/* B26-B27 VHT Link Adaptation capable */ +#define WLAN_VHTCAP_LINK_ADAPT 0x0C000000 +/* Rx Antenna Pattern Consistency Supported */ +#define WLAN_VHTCAP_RX_ANTENNA_PATTERN 0x10000000 +/* Tx Antenna Pattern Consistency Supported */ +#define WLAN_VHTCAP_TX_ANTENNA_PATTERN 0x20000000 +/* B30-B31 Extended NSS Bandwidth Support */ +#define WLAN_VHTCAP_NO_EXT_NSS_BW_SUPPORT 0x00000000 +/* B30-B31 Extended NSS Bandwidth Support */ +#define WLAN_VHTCAP_EXT_NSS_BW_SUPPORT_1 0x40000000 +/* B30-B31 Extended NSS Bandwidth Support */ +#define WLAN_VHTCAP_EXT_NSS_BW_SUPPORT_2 0x80000000 +/* B30-B31 Extended NSS Bandwidth Support */ +#define WLAN_VHTCAP_EXT_NSS_BW_SUPPORT_3 0xC0000000 +#define WLAN_VHTCAP_EXT_NSS_BW_SUPPORT_S 30 +#define WLAN_VHTCAP_EXT_NSS_BW_SUPPORT_MASK 0xC0000000 + +#define WLAN_VHTCAP_EXT_NSS_MASK (WLAN_VHTCAP_SUP_CHAN_WIDTH_MASK |\ + WLAN_VHTCAP_EXT_NSS_BW_SUPPORT_MASK) +/* VHTCAP combinations of "supported channel width" and "ext nss support" + * which determine the NSS value supported by STA for <=80 MHz, 160 MHz + * and 80+80 MHz. The macros to be read as combination of + * "supported channel width" and "ext nss support" followed by NSS for 80MHz, + * 160MHz and 80+80MHz defined as a function of Max VHT NSS supported. + * Ex: WLAN_EXTNSS_MAP_01_80F1_160FDOT5_80P80NONE - To be reas as + * supported channel width = 0 + * ext nss support = 1 + * NSS value for <=80MHz = max_vht_nss * 1 + * NSS value for 160MHz = max_vht_nss * (.5) + * NSS value for 80+80MHz = not supported + */ +#define WLAN_EXTNSS_MAP_00_80F1_160NONE_80P80NONE \ + (WLAN_VHTCAP_SUP_CHAN_WIDTH_80 | WLAN_VHTCAP_NO_EXT_NSS_BW_SUPPORT) +#define WLAN_EXTNSS_MAP_01_80F1_160FDOT5_80P80NONE \ + (WLAN_VHTCAP_SUP_CHAN_WIDTH_80 | WLAN_VHTCAP_EXT_NSS_BW_SUPPORT_1) +#define WLAN_EXTNSS_MAP_02_80F1_160FDOT5_80P80FDOT5 \ + (WLAN_VHTCAP_SUP_CHAN_WIDTH_80 | WLAN_VHTCAP_EXT_NSS_BW_SUPPORT_2) +#define WLAN_EXTNSS_MAP_03_80F1_160FDOT75_80P80FDOT75 \ + (WLAN_VHTCAP_SUP_CHAN_WIDTH_80 | WLAN_VHTCAP_EXT_NSS_BW_SUPPORT_3) +#define WLAN_EXTNSS_MAP_10_80F1_160F1_80P80NONE \ + (WLAN_VHTCAP_SUP_CHAN_WIDTH_160 | WLAN_VHTCAP_NO_EXT_NSS_BW_SUPPORT) +#define WLAN_EXTNSS_MAP_11_80F1_160F1_80P80FDOT5 \ + (WLAN_VHTCAP_SUP_CHAN_WIDTH_160 | WLAN_VHTCAP_EXT_NSS_BW_SUPPORT_1) +#define WLAN_EXTNSS_MAP_12_80F1_160F1_80P80FDOT75 \ + (WLAN_VHTCAP_SUP_CHAN_WIDTH_160 | WLAN_VHTCAP_EXT_NSS_BW_SUPPORT_2) +#define WLAN_EXTNSS_MAP_13_80F2_160F2_80P80F1 \ + (WLAN_VHTCAP_SUP_CHAN_WIDTH_160 | WLAN_VHTCAP_EXT_NSS_BW_SUPPORT_3) +#define WLAN_EXTNSS_MAP_20_80F1_160F1_80P80F1 \ + (WLAN_VHTCAP_SUP_CHAN_WIDTH_80_160 | WLAN_VHTCAP_NO_EXT_NSS_BW_SUPPORT) +#define WLAN_EXTNSS_MAP_23_80F2_160F1_80P80F1 \ + (WLAN_VHTCAP_SUP_CHAN_WIDTH_80_160 | WLAN_VHTCAP_EXT_NSS_BW_SUPPORT_3) + + +/** + * enum wlan_umac_comp_id - UMAC component id + * @WLAN_UMAC_COMP_MLME: MLME + * @WLAN_UMAC_COMP_MGMT_TXRX: MGMT Tx/Rx + * @WLAN_UMAC_COMP_SERIALIZATION: Serialization + * @WLAN_UMAC_COMP_SCAN: SCAN - as scan module uses services provided by + * MLME, MGMT_TXRX and SERIALIZATION, SCAN module + * must be initializes after above modules. + * @WLAN_UMAC_COMP_PMO: PMO component + * @WLAN_UMAC_COMP_P2P: P2P + * @WLAN_UMAC_COMP_POLICY_MGR: Policy Manager + * @WLAN_UMAC_COMP_CONFIG: Configuration + * @WLAN_UMAC_COMP_WIFI_POS: WIFI Positioning + * @WLAN_UMAC_COMP_TDLS: TDLS + * @WLAN_UMAC_COMP_ATF: Airtime Fairness + * @WLAN_UMAC_COMP_SA_API: Smart Antenna API + * @WLAN_UMAC_COMP_REGULATORY: REGULATORY + * @WLAN_UMAC_COMP_CRYPTO: CRYPTO + * @WLAN_UMAC_COMP_NAN: Neighbor Aware Networking + * @WLAN_UMAC_COMP_DFS: DFS + * @WLAN_UMAC_COMP_SPECTRAL: Spectral + * @WLAN_UMAC_COMP_OFFCHAN_TXRX: Offchan TxRx + * @WLAN_UMAC_COMP_SON: SON + * @WLAN_UMAC_COMP_SPECTRAL: Spectral + * @WLAN_UMAC_COMP_SPLITMAC: SplitMAC + * @WLAN_UMAC_COMP_DISA: DISA encryption test + * @WLAN_UMAC_COMP_GREEN_AP: Green AP + * @WLAN_UMAC_COMP_FTM: FTM component + * @WLAN_UMAC_COMP_FD: FILS Discovery + * @WLAN_UMAC_COMP_OCB: OCB + * @WLAN_UMAC_COMP_IPA: IPA + * @WLAN_UMAC_COMP_CP_STATS: Control Plane Statistics + * @WLAN_UMAC_COMP_ACTION_OUI: ACTION OUI + * @WLAN_UMAC_COMP_ID_MAX: Maximum components in UMAC + * + * This id is static. + * On Adding new component, new id has to be assigned + */ +enum wlan_umac_comp_id { + WLAN_UMAC_COMP_MLME = 0, + WLAN_UMAC_COMP_MGMT_TXRX = 1, + WLAN_UMAC_COMP_SERIALIZATION = 2, + WLAN_UMAC_COMP_SCAN = 3, + WLAN_UMAC_COMP_PMO = 4, + WLAN_UMAC_COMP_P2P = 5, + WLAN_UMAC_COMP_POLICY_MGR = 6, + WLAN_UMAC_COMP_CONFIG = 7, + WLAN_TARGET_IF_COMP_DIRECT_BUF_RX = 8, + WLAN_UMAC_COMP_WIFI_POS = 9, + WLAN_UMAC_COMP_TDLS = 10, + WLAN_UMAC_COMP_ATF = 11, + WLAN_UMAC_COMP_SA_API = 12, + WLAN_UMAC_COMP_REGULATORY = 13, + WLAN_UMAC_COMP_CRYPTO = 14, + WLAN_UMAC_COMP_NAN = 15, + WLAN_UMAC_COMP_DFS = 16, + WLAN_UMAC_COMP_OFFCHAN_TXRX = 17, + WLAN_UMAC_COMP_SON = 18, + WLAN_UMAC_COMP_SPECTRAL = 19, + WLAN_UMAC_COMP_SPLITMAC = 20, + WLAN_UMAC_COMP_DISA = 21, + WLAN_UMAC_COMP_GREEN_AP = 22, + WLAN_UMAC_COMP_FTM = 23, + WLAN_UMAC_COMP_FD = 24, + WLAN_UMAC_COMP_OCB = 25, + WLAN_UMAC_COMP_IPA = 26, + WLAN_UMAC_COMP_CP_STATS = 27, + WLAN_UMAC_COMP_ACTION_OUI = 28, + WLAN_UMAC_COMP_ID_MAX, +}; + +/** + * enum WLAN_DEV_TYPE - for DA or OL architecture types + * @WLAN_DEV_DA: Direct attach + * @WLAN_DEV_OL: Partial offload + * @WLAN_DEV_INVALID: Invalid dev type + */ +typedef enum { + WLAN_DEV_DA = 0, + WLAN_DEV_OL = 1, + WLAN_DEV_INVALID = 3, +} WLAN_DEV_TYPE; + +/** + * enum wlan_phymode - phy mode + * @WLAN_PHYMODE_AUTO: autoselect + * @WLAN_PHYMODE_11A: 5GHz, OFDM + * @WLAN_PHYMODE_11B: 2GHz, CCK + * @WLAN_PHYMODE_11G: 2GHz, OFDM + * @WLAN_PHYMODE_11NA_HT20: 5Ghz, HT20 + * @WLAN_PHYMODE_11NG_HT20: 2Ghz, HT20 + * @WLAN_PHYMODE_11NA_HT40PLUS: 5Ghz, HT40 (ext ch +1) + * @WLAN_PHYMODE_11NA_HT40MINUS: 5Ghz, HT40 (ext ch -1) + * @WLAN_PHYMODE_11NG_HT40PLUS: 2Ghz, HT40 (ext ch +1) + * @WLAN_PHYMODE_11NG_HT40MINUS: 2Ghz, HT40 (ext ch -1) + * @WLAN_PHYMODE_11NG_HT40: 2Ghz, Auto HT40 + * @WLAN_PHYMODE_11NA_HT40: 5Ghz, Auto HT40 + * @WLAN_PHYMODE_11AC_VHT20: 5Ghz, VHT20 + * @WLAN_PHYMODE_11AC_VHT40PLUS: 5Ghz, VHT40 (Ext ch +1) + * @WLAN_PHYMODE_11AC_VHT40MINUS:5Ghz VHT40 (Ext ch -1) + * @WLAN_PHYMODE_11AC_VHT40: 5Ghz, VHT40 + * @WLAN_PHYMODE_11AC_VHT80: 5Ghz, VHT80 + * @WLAN_PHYMODE_11AC_VHT160: 5Ghz, VHT160 + * @WLAN_PHYMODE_11AC_VHT80_80: 5Ghz, VHT80_80 + * @WLAN_PHYMODE_11AXA_HE20: 5GHz, HE20 + * @WLAN_PHYMODE_11AXG_HE20: 2GHz, HE20 + * @WLAN_PHYMODE_11AXA_HE40PLUS: 5GHz, HE40 (ext ch +1) + * @WLAN_PHYMODE_11AXA_HE40MINUS:5GHz, HE40 (ext ch -1) + * @WLAN_PHYMODE_11AXG_HE40PLUS: 2GHz, HE40 (ext ch +1) + * @WLAN_PHYMODE_11AXG_HE40MINUS:2GHz, HE40 (ext ch -1) + * @WLAN_PHYMODE_11AXA_HE40: 5GHz, HE40 + * @WLAN_PHYMODE_11AXG_HE40: 2GHz, HE40 + * @WLAN_PHYMODE_11AXA_HE80: 5GHz, HE80 + * @WLAN_PHYMODE_11AXA_HE160: 5GHz, HE160 + * @WLAN_PHYMODE_11AXA_HE80_80: 5GHz, HE80_80 + */ +enum wlan_phymode { + WLAN_PHYMODE_AUTO = 0, + WLAN_PHYMODE_11A = 1, + WLAN_PHYMODE_11B = 2, + WLAN_PHYMODE_11G = 3, + WLAN_PHYMODE_11NA_HT20 = 4, + WLAN_PHYMODE_11NG_HT20 = 5, + WLAN_PHYMODE_11NA_HT40PLUS = 6, + WLAN_PHYMODE_11NA_HT40MINUS = 7, + WLAN_PHYMODE_11NG_HT40PLUS = 8, + WLAN_PHYMODE_11NG_HT40MINUS = 9, + WLAN_PHYMODE_11NG_HT40 = 10, + WLAN_PHYMODE_11NA_HT40 = 11, + WLAN_PHYMODE_11AC_VHT20 = 12, + WLAN_PHYMODE_11AC_VHT40PLUS = 13, + WLAN_PHYMODE_11AC_VHT40MINUS = 14, + WLAN_PHYMODE_11AC_VHT40 = 15, + WLAN_PHYMODE_11AC_VHT80 = 16, + WLAN_PHYMODE_11AC_VHT160 = 17, + WLAN_PHYMODE_11AC_VHT80_80 = 18, + WLAN_PHYMODE_11AXA_HE20 = 19, + WLAN_PHYMODE_11AXG_HE20 = 20, + WLAN_PHYMODE_11AXA_HE40PLUS = 21, + WLAN_PHYMODE_11AXA_HE40MINUS = 22, + WLAN_PHYMODE_11AXG_HE40PLUS = 23, + WLAN_PHYMODE_11AXG_HE40MINUS = 24, + WLAN_PHYMODE_11AXA_HE40 = 25, + WLAN_PHYMODE_11AXG_HE40 = 26, + WLAN_PHYMODE_11AXA_HE80 = 27, + WLAN_PHYMODE_11AXA_HE160 = 28, + WLAN_PHYMODE_11AXA_HE80_80 = 29, +}; + +#define WLAN_PHYMODE_MAX (WLAN_PHYMODE_11AXA_HE80_80 + 1) + +/** + * enum wlan_phy_ch_width - channel width + * @WLAN_CH_WIDTH_20MHZ: 20 mhz width + * @WLAN_CH_WIDTH_40MHZ: 40 mhz width + * @WLAN_CH_WIDTH_80MHZ: 80 mhz width + * @WLAN_CH_WIDTH_160MHZ: 160 mhz width + * @WLAN_CH_WIDTH_80P80HZ: 80+80 mhz width + * @WLAN_CH_WIDTH_5MHZ: 5 mhz width + * @WLAN_CH_WIDTH_10MHZ: 10 mhz width + * @WLAN_CH_WIDTH_INVALID: invalid width + * @WLAN_CH_WIDTH_MAX: max possible width + */ +enum wlan_phy_ch_width { + WLAN_CH_WIDTH_20MHZ = 0, + WLAN_CH_WIDTH_40MHZ, + WLAN_CH_WIDTH_80MHZ, + WLAN_CH_WIDTH_160MHZ, + WLAN_CH_WIDTH_80P80MHZ, + WLAN_CH_WIDTH_5MHZ, + WLAN_CH_WIDTH_10MHZ, + WLAN_CH_WIDTH_INVALID, + WLAN_CH_WIDTH_MAX +}; + +/** + * enum wifi_traffic_ac - access category type + * @WIFI_AC_VO: Voice AC + * @WIFI_AC_VI: Video AC + * @WIFI_AC_BE: Best effort AC + * @WIFI_AC_BK: Background AC + * @WIFI_AC_MAX: MAX access category + */ +enum wifi_traffic_ac { + WIFI_AC_VO = 0, + WIFI_AC_VI = 1, + WIFI_AC_BE = 2, + WIFI_AC_BK = 3, + WIFI_AC_MAX = 4, +}; + +/** + * enum wlan_peer_type - peer type + * @WLAN_PEER_SELF: for AP mode, SELF PEER or AP PEER are same + * @WLAN_PEER_AP: BSS peer for STA mode, Self peer for AP mode + * @WLAN_PEER_P2P_GO: BSS peer for P2P CLI mode, Self peer for P2P GO mode + * @WLAN_PEER_STA: Self Peer for STA mode, STA peer for AP mode + * @WLAN_PEER_P2P_CLI: Self peer for P2P CLI mode, P2P CLI peer for P2P GO mode + * @WLAN_PEER_TDLS: TDLS Peer + * @WLAN_PEER_NAWDS: NAWDS Peer + * @WLAN_PEER_STA_TEMP: STA Peer Temp (its host only node) + * @WLAN_PEER_IBSS: IBSS Peer + * @WLAN_PEER_NDP: NDP Peer + */ +enum wlan_peer_type { + WLAN_PEER_SELF = 1, + WLAN_PEER_AP = 2, + WLAN_PEER_P2P_GO = 3, + WLAN_PEER_STA = 4, + WLAN_PEER_P2P_CLI = 5, + WLAN_PEER_TDLS = 6, + WLAN_PEER_NAWDS = 7, + WLAN_PEER_STA_TEMP = 8, + WLAN_PEER_IBSS = 9, + WLAN_PEER_NDP = 10, +}; + +/** + * enum wlan_band - specifies operating channel band + * @WLAN_BAND_ALL: Any band + * @WLAN_BAND_2_4_GHZ: 2.4 GHz band + * @WLAN_BAND_5_GHZ: 5 GHz band + * @WLAN_BAND_4_9_GHZ: 4.9 GHz band + */ +enum wlan_band { + WLAN_BAND_ALL, + WLAN_BAND_2_4_GHZ, + WLAN_BAND_5_GHZ, + WLAN_BAND_4_9_GHZ, +}; + +/** + * enum wlan_bss_type - type of network + * @WLAN_TYPE_ANY: Default value + * @WLAN_TYPE_BSS: Type BSS + * @WLAN_TYPE_IBSS: Type IBSS + */ +enum wlan_bss_type { + WLAN_TYPE_ANY, + WLAN_TYPE_BSS, + WLAN_TYPE_IBSS, +}; + +/** + * enum wlan_pmf_cap: pmf capability + * @PMF_DISABLED: PMF is disabled + * @PMF_CAPABLE: PMF is supported + * @PMF_REQUIRED: PMF is mandatory + */ +enum wlan_pmf_cap { + WLAN_PMF_DISABLED, + WLAN_PMF_CAPABLE, + WLAN_PMF_REQUIRED, +}; + +/** + * enum wlan_auth_type - Enumeration of the various Auth types + * @WLAN_AUTH_TYPE_OPEN_SYSTEM: Open auth type + * @WLAN_AUTH_TYPE_SHARED_KEY: Shared Key Auth type + * @WLAN_AUTH_TYPE_AUTOSWITCH: Auto switch Open/Shared + * @WLAN_AUTH_TYPE_SAE: SAE auth type + * @WLAN_AUTH_TYPE_WPA: WPA Enterprise + * @WLAN_AUTH_TYPE_WPA_PSK: WPA PSK + * @WLAN_AUTH_TYPE_WPA_NONE: WPA None + * @WLAN_AUTH_TYPE_RSN: RSN Enterprise + * @WLAN_AUTH_TYPE_RSN_PSK: RSN PSK + * @WLAN_AUTH_TYPE_FT_RSN: FT RSN Enterprise + * @WLAN_AUTH_TYPE_FT_RSN_PSK: FT RSN PSK + * @WLAN_AUTH_TYPE_WAPI_WAI_CERTIFICATE: WAPI certificate + * @WLAN_AUTH_TYPE_WAPI_WAI_PSK: WAPI PSK + * @WLAN_AUTH_TYPE_CCKM_WPA: CCKM WPA + * @WLAN_AUTH_TYPE_CCKM_RSN: CCKM RSN + * @WLAN_AUTH_TYPE_RSN_PSK_SHA256: SHA256 PSK + * @WLAN_AUTH_TYPE_RSN_8021X_SHA256: SHA256 Enterprise + * @WLAN_AUTH_TYPE_FILS_SHA256: FILS SHA256 + * @WLAN_AUTH_TYPE_FILS_SHA384: FILS SHA384 + * @WLAN_AUTH_TYPE_FT_FILS_SHA256: FILS SHA256 for 11r + * @WLAN_AUTH_TYPE_FT_FILS_SHA384: FILS SHA384 for 11r + * @WLAN_AUTH_TYPE_DPP_RSN: DPP RSN + * @WLAN_AUTH_TYPE_OWE: OWE + * @WLAN_AUTH_TYPE_SUITEB_EAP_SHA256: EAP SHA256 + * @WLAN_AUTH_TYPE_SUITEB_EAP_SHA384: EAP SHA384 + * @WLAN_AUTH_TYPE_ANY: To match any auth type + * @WLAN_NUM_OF_SUPPORT_AUTH_TYPE: Max no of Auth type + */ +enum wlan_auth_type { + WLAN_AUTH_TYPE_OPEN_SYSTEM, + WLAN_AUTH_TYPE_SHARED_KEY, + WLAN_AUTH_TYPE_AUTOSWITCH, + WLAN_AUTH_TYPE_SAE, + WLAN_AUTH_TYPE_WPA, + WLAN_AUTH_TYPE_WPA_PSK, + WLAN_AUTH_TYPE_WPA_NONE, + WLAN_AUTH_TYPE_RSN, + WLAN_AUTH_TYPE_RSN_PSK, + WLAN_AUTH_TYPE_FT_RSN, + WLAN_AUTH_TYPE_FT_RSN_PSK, + WLAN_AUTH_TYPE_WAPI_WAI_CERTIFICATE, + WLAN_AUTH_TYPE_WAPI_WAI_PSK, + WLAN_AUTH_TYPE_CCKM_WPA, + WLAN_AUTH_TYPE_CCKM_RSN, + WLAN_AUTH_TYPE_RSN_PSK_SHA256, + WLAN_AUTH_TYPE_RSN_8021X_SHA256, + WLAN_AUTH_TYPE_FILS_SHA256, + WLAN_AUTH_TYPE_FILS_SHA384, + WLAN_AUTH_TYPE_FT_FILS_SHA256, + WLAN_AUTH_TYPE_FT_FILS_SHA384, + WLAN_AUTH_TYPE_DPP_RSN, + WLAN_AUTH_TYPE_OWE, + WLAN_AUTH_TYPE_SUITEB_EAP_SHA256, + WLAN_AUTH_TYPE_SUITEB_EAP_SHA384, + WLAN_AUTH_TYPE_OSEN, + WLAN_AUTH_TYPE_ANY, + WLAN_NUM_OF_SUPPORT_AUTH_TYPE = WLAN_AUTH_TYPE_ANY, +}; + +/** + * enum wlan_enc_type - Enumeration of the various Enc types + * @WLAN_ENCRYPT_TYPE_NONE: No encryption + * @WLAN_ENCRYPT_TYPE_WEP40_STATICKEY: WEP 40 Static key + * @WLAN_ENCRYPT_TYPE_WEP104_STATICKEY: WEP 104 Static key + * @WLAN_ENCRYPT_TYPE_WEP40: WEP 40 + * @WLAN_ENCRYPT_TYPE_WEP104: WEP 104 + * @WLAN_ENCRYPT_TYPE_TKIP: TKIP + * @WLAN_ENCRYPT_TYPE_AES: AES + * @WLAN_ENCRYPT_TYPE_WPI: WAPI + * @WLAN_ENCRYPT_TYPE_KRK: KRK + * @WLAN_ENCRYPT_TYPE_BTK: BTK + * @WLAN_ENCRYPT_TYPE_AES_CMAC: 11W BIP + * @WLAN_ENCRYPT_TYPE_ANY: Any + * @WLAN_NUM_OF_ENCRYPT_TYPE: Max value + */ +enum wlan_enc_type { + WLAN_ENCRYPT_TYPE_NONE, + WLAN_ENCRYPT_TYPE_WEP40_STATICKEY, + WLAN_ENCRYPT_TYPE_WEP104_STATICKEY, + WLAN_ENCRYPT_TYPE_WEP40, + WLAN_ENCRYPT_TYPE_WEP104, + WLAN_ENCRYPT_TYPE_TKIP, + WLAN_ENCRYPT_TYPE_AES, + WLAN_ENCRYPT_TYPE_WPI, + WLAN_ENCRYPT_TYPE_KRK, + WLAN_ENCRYPT_TYPE_BTK, + WLAN_ENCRYPT_TYPE_AES_CMAC, + WLAN_ENCRYPT_TYPE_AES_GCMP, + WLAN_ENCRYPT_TYPE_AES_GCMP_256, + WLAN_ENCRYPT_TYPE_ANY, + WLAN_NUM_OF_ENCRYPT_TYPE = WLAN_ENCRYPT_TYPE_ANY, +}; + +/** + * struct wlan_ssid - SSID info + * @length: ssid length of bss excluding null + * @ssid: ssid character array potentially non null terminated + */ +struct wlan_ssid { + uint8_t length; + uint8_t ssid[WLAN_SSID_MAX_LEN]; +}; + +/* depreciated; use QDF_MAC_ADDR_SIZE instead */ +#define WLAN_MACADDR_LEN QDF_MAC_ADDR_SIZE +/* Util API to copy the MAC address */ +#define WLAN_ADDR_COPY(dst, src) qdf_mem_copy(dst, src, QDF_MAC_ADDR_SIZE) +/* Util API to compare the MAC address */ +#define WLAN_ADDR_EQ(a1, a2) qdf_mem_cmp(a1, a2, QDF_MAC_ADDR_SIZE) + +#define PSOC_SERVICE_BM_SIZE ((128 + sizeof(uint32_t) - 1) / sizeof(uint32_t)) +#define PSOC_HOST_MAX_NUM_SS (8) +#define PSOC_HOST_MAX_PHY_SIZE (3) +#define PSOC_MAX_HW_MODE (2) +#define PSOC_MAX_MAC_PHY_CAP (5) +#define PSOC_MAX_PHY_REG_CAP (3) +#define PSOC_MAX_CHAINMASK_TABLES (5) + + +#endif /* _WLAN_OBJMGR_CMN_H_*/ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/mgmt_txrx/core/src/wlan_mgmt_txrx_main.c b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/mgmt_txrx/core/src/wlan_mgmt_txrx_main.c new file mode 100644 index 0000000000000000000000000000000000000000..7946a2ccefe6e0d26beb14d9814df65092e497aa --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/mgmt_txrx/core/src/wlan_mgmt_txrx_main.c @@ -0,0 +1,173 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_mgmt_txrx_main.c + * This file contains mgmt txrx private API definitions for + * mgmt txrx component. + */ + +#include "wlan_mgmt_txrx_main_i.h" +#include "qdf_nbuf.h" + +QDF_STATUS wlan_mgmt_txrx_desc_pool_init( + struct mgmt_txrx_priv_pdev_context *mgmt_txrx_pdev_ctx) +{ + uint32_t i; + + mgmt_txrx_info( + "mgmt_txrx ctx: %pK pdev: %pK mgmt desc pool size %d", + mgmt_txrx_pdev_ctx, mgmt_txrx_pdev_ctx->pdev, + MGMT_DESC_POOL_MAX); + mgmt_txrx_pdev_ctx->mgmt_desc_pool.pool = qdf_mem_malloc( + MGMT_DESC_POOL_MAX * + sizeof(struct mgmt_txrx_desc_elem_t)); + + if (!mgmt_txrx_pdev_ctx->mgmt_desc_pool.pool) { + mgmt_txrx_err("Failed to allocate desc pool"); + return QDF_STATUS_E_NOMEM; + } + qdf_list_create(&mgmt_txrx_pdev_ctx->mgmt_desc_pool.free_list, + MGMT_DESC_POOL_MAX); + + for (i = 0; i < MGMT_DESC_POOL_MAX; i++) { + mgmt_txrx_pdev_ctx->mgmt_desc_pool.pool[i].desc_id = i; + mgmt_txrx_pdev_ctx->mgmt_desc_pool.pool[i].in_use = false; + qdf_list_insert_front( + &mgmt_txrx_pdev_ctx->mgmt_desc_pool.free_list, + &mgmt_txrx_pdev_ctx->mgmt_desc_pool.pool[i].entry); + } + + qdf_spinlock_create( + &mgmt_txrx_pdev_ctx->mgmt_desc_pool.desc_pool_lock); + + return QDF_STATUS_SUCCESS; +} + +void wlan_mgmt_txrx_desc_pool_deinit( + struct mgmt_txrx_priv_pdev_context *mgmt_txrx_pdev_ctx) +{ + uint32_t i; + uint32_t pool_size; + QDF_STATUS status; + + if (!mgmt_txrx_pdev_ctx->mgmt_desc_pool.pool) { + mgmt_txrx_err("Empty mgmt descriptor pool"); + qdf_assert_always(0); + return; + } + + pool_size = mgmt_txrx_pdev_ctx->mgmt_desc_pool.free_list.max_size; + for (i = 0; i < pool_size; i++) { + status = qdf_list_remove_node( + &mgmt_txrx_pdev_ctx->mgmt_desc_pool.free_list, + &mgmt_txrx_pdev_ctx->mgmt_desc_pool.pool[i].entry); + if (status != QDF_STATUS_SUCCESS) + mgmt_txrx_err( + "Failed to get mgmt desc from freelist, desc id: %d: status %d", + i, status); + } + + qdf_list_destroy(&mgmt_txrx_pdev_ctx->mgmt_desc_pool.free_list); + qdf_mem_free(mgmt_txrx_pdev_ctx->mgmt_desc_pool.pool); + mgmt_txrx_pdev_ctx->mgmt_desc_pool.pool = NULL; + + qdf_spinlock_destroy( + &mgmt_txrx_pdev_ctx->mgmt_desc_pool.desc_pool_lock); +} + +struct mgmt_txrx_desc_elem_t *wlan_mgmt_txrx_desc_get( + struct mgmt_txrx_priv_pdev_context *mgmt_txrx_pdev_ctx) +{ + QDF_STATUS status; + qdf_list_node_t *desc_node; + struct mgmt_txrx_desc_elem_t *mgmt_txrx_desc; + + qdf_spin_lock_bh(&mgmt_txrx_pdev_ctx->mgmt_desc_pool.desc_pool_lock); + if (qdf_list_peek_front(&mgmt_txrx_pdev_ctx->mgmt_desc_pool.free_list, + &desc_node) + != QDF_STATUS_SUCCESS) { + qdf_spin_unlock_bh( + &mgmt_txrx_pdev_ctx->mgmt_desc_pool.desc_pool_lock); + mgmt_txrx_err("Descriptor freelist empty for mgmt_txrx_ctx %pK", + mgmt_txrx_pdev_ctx); + return NULL; + } + + status = qdf_list_remove_node( + &mgmt_txrx_pdev_ctx->mgmt_desc_pool.free_list, + desc_node); + if (status != QDF_STATUS_SUCCESS) { + qdf_spin_unlock_bh( + &mgmt_txrx_pdev_ctx->mgmt_desc_pool.desc_pool_lock); + mgmt_txrx_err("Failed to get descriptor from list: status %d", + status); + qdf_assert_always(0); + } + + mgmt_txrx_desc = qdf_container_of(desc_node, + struct mgmt_txrx_desc_elem_t, + entry); + mgmt_txrx_desc->in_use = true; + + /* acquire the wakelock when there are pending mgmt tx frames */ + qdf_wake_lock_timeout_acquire(&mgmt_txrx_pdev_ctx->wakelock_tx_cmp, + MGMT_TXRX_WAKELOCK_TIMEOUT_TX_CMP); + + qdf_spin_unlock_bh(&mgmt_txrx_pdev_ctx->mgmt_desc_pool.desc_pool_lock); + + mgmt_txrx_info("retrieved mgmt desc: %pK with desc id: %d", + mgmt_txrx_desc, mgmt_txrx_desc->desc_id); + return mgmt_txrx_desc; +} + +void wlan_mgmt_txrx_desc_put( + struct mgmt_txrx_priv_pdev_context *mgmt_txrx_pdev_ctx, + uint32_t desc_id) +{ + struct mgmt_txrx_desc_elem_t *desc; + + desc = &mgmt_txrx_pdev_ctx->mgmt_desc_pool.pool[desc_id]; + qdf_spin_lock_bh(&mgmt_txrx_pdev_ctx->mgmt_desc_pool.desc_pool_lock); + if (!desc->in_use) { + qdf_spin_unlock_bh(&mgmt_txrx_pdev_ctx->mgmt_desc_pool. + desc_pool_lock); + mgmt_txrx_err("desc %d is freed", desc_id); + return; + } + desc->in_use = false; + desc->context = NULL; + desc->peer = NULL; + desc->nbuf = NULL; + desc->tx_dwnld_cmpl_cb = NULL; + desc->tx_ota_cmpl_cb = NULL; + desc->vdev_id = WLAN_UMAC_VDEV_ID_MAX; + qdf_list_insert_front(&mgmt_txrx_pdev_ctx->mgmt_desc_pool.free_list, + &desc->entry); + + /* release the wakelock if there are no pending mgmt tx frames */ + if (mgmt_txrx_pdev_ctx->mgmt_desc_pool.free_list.count == + mgmt_txrx_pdev_ctx->mgmt_desc_pool.free_list.max_size) + qdf_wake_lock_release(&mgmt_txrx_pdev_ctx->wakelock_tx_cmp, + MGMT_TXRX_WAKELOCK_REASON_TX_CMP); + + qdf_spin_unlock_bh(&mgmt_txrx_pdev_ctx->mgmt_desc_pool.desc_pool_lock); + + mgmt_txrx_info("put mgmt desc: %pK with desc id: %d into freelist", + desc, desc->desc_id); +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/mgmt_txrx/core/src/wlan_mgmt_txrx_main_i.h b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/mgmt_txrx/core/src/wlan_mgmt_txrx_main_i.h new file mode 100644 index 0000000000000000000000000000000000000000..e596d2537dd2156b02657541d3769d0c06df6cd3 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/mgmt_txrx/core/src/wlan_mgmt_txrx_main_i.h @@ -0,0 +1,246 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _WLAN_MGMT_TXRX_MAIN_I_H_ +#define _WLAN_MGMT_TXRX_MAIN_I_H_ + +/** + * DOC: wlan_mgmt_txrx_main_i.h + * + * management tx/rx layer private API and structures + * + */ + +#include "wlan_mgmt_txrx_utils_api.h" +#include "wlan_objmgr_cmn.h" +#include "qdf_list.h" + + +#define IEEE80211_ADDR_LEN 6 /* size of 802.11 address */ +#define IEEE80211_FC0_TYPE_MASK 0x0c +#define IEEE80211_FC0_SUBTYPE_MASK 0xf0 +#define IEEE80211_FC0_TYPE_MGT 0x00 + +/** + * mgmt_wakelock_reason - reasons mgmt_txrx might hold a wakelock + * @MGMT_TXRX_WAKELOCK_REASON_TX_CMP - wait for mgmt_tx_complete event + */ +enum mgmt_txrx_wakelock_reason { + MGMT_TXRX_WAKELOCK_REASON_TX_CMP +}; + +/* timeout to wait for management_tx_complete event from firmware */ +#define MGMT_TXRX_WAKELOCK_TIMEOUT_TX_CMP 300 + +/* + * generic definitions for IEEE 802.11 frames + */ +struct ieee80211_frame { + uint8_t i_fc[2]; + uint8_t i_dur[2]; + union { + struct { + uint8_t i_addr1[IEEE80211_ADDR_LEN]; + uint8_t i_addr2[IEEE80211_ADDR_LEN]; + uint8_t i_addr3[IEEE80211_ADDR_LEN]; + }; + uint8_t i_addr_all[3 * IEEE80211_ADDR_LEN]; + }; + uint8_t i_seq[2]; + /* possibly followed by addr4[IEEE80211_ADDR_LEN]; */ + /* see below */ +} __packed; + + +/** + * struct mgmt_txrx_desc_elem_t - element in mgmt desc pool linked list + * @entry: list entry + * @tx_dwnld_cmpl_cb: dma completion callback function pointer + * @tx_ota_cmpl_cb: ota completion callback function pointer + * @nbuf: frame buffer + * @desc_id: descriptor id + * @peer: peer who wants to send this frame + * @context: caller component specific context + * @vdev_id: vdev id + * @in_use: flag to denote whether desc is in use + */ +struct mgmt_txrx_desc_elem_t { + qdf_list_node_t entry; + mgmt_tx_download_comp_cb tx_dwnld_cmpl_cb; + mgmt_ota_comp_cb tx_ota_cmpl_cb; + qdf_nbuf_t nbuf; + uint32_t desc_id; + struct wlan_objmgr_peer *peer; + void *context; + uint8_t vdev_id; + bool in_use; +}; + +/** + * struct mgmt_desc_pool_t - linked list mgmt desc pool + * @free_list: linked list of free descriptors + * @pool: pool of descriptors in use + * @desc_pool_lock: mgmt. descriptor free pool spinlock + */ +struct mgmt_desc_pool_t { + qdf_list_t free_list; + struct mgmt_txrx_desc_elem_t *pool; + qdf_spinlock_t desc_pool_lock; +}; + +/** + * struct mgmt_rx_handler - structure for storing rx cb + * @comp_id: component id + * @rx_cb: rx callback for the mgmt. frame + * @next: pointer to next rx cb structure + */ +struct mgmt_rx_handler { + enum wlan_umac_comp_id comp_id; + mgmt_frame_rx_callback rx_cb; + struct mgmt_rx_handler *next; +}; + +/** + * struct txrx_stats - txrx stats for mgmt frames + * @pkts_success: no. of packets successfully txed/rcvd + * @pkts_fail: no. of packets unsuccessfully txed/rcvd + * @bytes_success: no. of bytes successfully txed/rcvd + * @bytes_fail: no. of bytes successfully txed/rcvd + * @assoc_req_rcvd: no. of assoc requests rcvd + * @assoc_rsp_rcvd: no. of assoc responses rcvd + * @reassoc_req_rcvd: no. of reassoc requests rcvd + * @reassoc_rsp_rcvd: no. of reassoc responses rcvd + * @probe_req_rcvd: no. of probe requests rcvd + * @prob_resp_rcvd: no. of probe responses rcvd + * @beacon_rcvd: no. of beacons rcvd + * @atim_rcvd: no. of ATIMs rcvd + * @disassoc_rcvd: no. of disassocs rcvd + * @auth_rcvd: no. of auths rcvd + * @deauth_rcvd: no. of deauths rcvd + * @action_rcvd: no. of action frames rcvd + * @action_no_ack_rcvd: no. of action frames with no ack rcvd + */ +struct txrx_stats { + uint64_t pkts_success; + uint64_t pkts_fail; + uint64_t bytes_success; + uint64_t bytes_fail; + uint64_t assoc_req_rcvd; + uint64_t assoc_rsp_rcvd; + uint64_t reassoc_req_rcvd; + uint64_t reassoc_rsp_rcvd; + uint64_t probe_req_rcvd; + uint64_t prob_resp_rcvd; + uint64_t beacon_rcvd; + uint64_t atim_rcvd; + uint64_t disassoc_rcvd; + uint64_t auth_rcvd; + uint64_t deauth_rcvd; + uint64_t action_rcvd; + uint64_t action_no_ack_rcvd; +}; + +/** + * struct mgmt_txrx_stats_t - mgmt txrx stats + * @mgmt_tx_stats: mgmt tx stats + * @mgmt_rx_stats: mgmt rx stats + * @ota_comp: no. of ota completions rcvd + * @dma_comp: no. of dma completions rcvd + */ +struct mgmt_txrx_stats_t { + struct txrx_stats mgmt_tx_stats; + struct txrx_stats mgmt_rx_stats; + uint64_t ota_comp; + uint64_t dma_comp; +}; + +/** + * struct mgmt_txrx_priv_psoc_context - mgmt txrx private psoc context + * @psoc: psoc context + * @mgmt_rx_comp_cb: array of pointers of mgmt rx cbs + * @mgmt_txrx_psoc_ctx_lock: mgmt txrx psoc ctx lock + */ +struct mgmt_txrx_priv_psoc_context { + struct wlan_objmgr_psoc *psoc; + struct mgmt_rx_handler *mgmt_rx_comp_cb[MGMT_MAX_FRAME_TYPE]; + qdf_spinlock_t mgmt_txrx_psoc_ctx_lock; +}; + +/** + * struct mgmt_txrx_priv_context_dev - mgmt txrx private context + * @pdev: pdev context + * @mgmt_desc_pool: pointer to mgmt desc. pool + * @mgmt_txrx_stats: pointer to mgmt txrx stats + * @wakelock_tx_cmp: mgmt tx complete wake lock + */ +struct mgmt_txrx_priv_pdev_context { + struct wlan_objmgr_pdev *pdev; + struct mgmt_desc_pool_t mgmt_desc_pool; + struct mgmt_txrx_stats_t *mgmt_txrx_stats; + qdf_wake_lock_t wakelock_tx_cmp; +}; + + +/** + * wlan_mgmt_txrx_desc_pool_init() - initializes mgmt. desc. pool + * @mgmt_txrx_pdev_ctx: mgmt txrx pdev context + * + * This function initializes the mgmt descriptor pool. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_mgmt_txrx_desc_pool_init( + struct mgmt_txrx_priv_pdev_context *mgmt_txrx_pdev_ctx); + +/** + * wlan_mgmt_txrx_desc_pool_deinit() - deinitializes mgmt. desc. pool + * @mgmt_txrx_pdev_ctx: mgmt txrx pdev context + * + * This function deinitializes the mgmt descriptor pool. + * + * Return: void + */ +void wlan_mgmt_txrx_desc_pool_deinit( + struct mgmt_txrx_priv_pdev_context *mgmt_txrx_pdev_ctx); + +/** + * wlan_mgmt_txrx_desc_get() - gets mgmt. descriptor from freelist + * @mgmt_txrx_pdev_ctx: mgmt txrx pdev context + * + * This function retrieves the mgmt. descriptor for mgmt. tx frames + * from the mgmt. descriptor freelist. + * + * Return: mgmt. descriptor retrieved. + */ +struct mgmt_txrx_desc_elem_t *wlan_mgmt_txrx_desc_get( + struct mgmt_txrx_priv_pdev_context *mgmt_txrx_pdev_ctx); + +/** + * wlan_mgmt_txrx_desc_put() - puts mgmt. descriptor back in freelist + * @mgmt_txrx_pdev_ctx: mgmt txrx pdev context + * @desc_id: mgmt txrx descriptor id + * + * This function puts the mgmt. descriptor back in to the freelist. + * + * Return: void + */ +void wlan_mgmt_txrx_desc_put( + struct mgmt_txrx_priv_pdev_context *mgmt_txrx_pdev_ctx, + uint32_t desc_id); + +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/mgmt_txrx/dispatcher/inc/wlan_mgmt_txrx_tgt_api.h b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/mgmt_txrx/dispatcher/inc/wlan_mgmt_txrx_tgt_api.h new file mode 100644 index 0000000000000000000000000000000000000000..9505ceac4fc3d9121d5cf644b9da65d5847974c5 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/mgmt_txrx/dispatcher/inc/wlan_mgmt_txrx_tgt_api.h @@ -0,0 +1,123 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _WLAN_MGMT_TXRX_TGT_API_H_ +#define _WLAN_MGMT_TXRX_TGT_API_H_ + +/** + * DOC: wlan_mgmt_txrx_tgt_api.h + * + * management tx/rx layer public API and structures for + * umac southbound interface. + * + */ + +#include "wlan_objmgr_cmn.h" +#include "wlan_mgmt_txrx_utils_api.h" +#include "qdf_nbuf.h" + + +/** + * tgt_mgmt_txrx_rx_frame_handler() - handles rx mgmt. frames + * @psoc: psoc context + * @buf: buffer + * @mgmt_rx_params: rx event params + * + * This function handles mgmt. rx frames and is registered to southbound + * interface through rx ops. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS tgt_mgmt_txrx_rx_frame_handler( + struct wlan_objmgr_psoc *psoc, + qdf_nbuf_t buf, + struct mgmt_rx_event_params *mgmt_rx_params); + +/** + * tgt_mgmt_txrx_tx_completion_handler() - handles mgmt. tx completions + * @pdev: pdev context + * @desc_id: mgmt desc. id + * @status: status of download of tx packet + * @tx_compl_params: tx completion params + * + * This function handles tx completions of mgmt. frames and is registered to + * LMAC_if layer through lmac_if cbs.The cb needs to free the nbuf. In case no + * callback is registered, this function will free the nbuf. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS tgt_mgmt_txrx_tx_completion_handler( + struct wlan_objmgr_pdev *pdev, + uint32_t desc_id, uint32_t status, + void *tx_compl_params); + +/** + * tgt_mgmt_txrx_get_nbuf_from_desc_id() - extracts nbuf from mgmt desc + * @pdev: pdev context + * @desc_id: desc_id + * + * This function extracts nbuf from mgmt desc extracted from desc id. + * + * Return: nbuf - in case of success + * NULL - in case of failure + */ +qdf_nbuf_t tgt_mgmt_txrx_get_nbuf_from_desc_id( + struct wlan_objmgr_pdev *pdev, + uint32_t desc_id); + +/** + * tgt_mgmt_txrx_get_peer_from_desc_id() - extracts peer from mgmt desc + * @pdev: pdev context + * @desc_id: desc_id + * + * This function extracts peer from mgmt desc extracted from desc id. + * + * Return: peer - in case of success + * NULL - in case of failure + */ +struct wlan_objmgr_peer * +tgt_mgmt_txrx_get_peer_from_desc_id( + struct wlan_objmgr_pdev *pdev, + uint32_t desc_id); + +/** + * tgt_mgmt_txrx_get_vdev_id_from_desc_id() - extracts vdev id from mgmt desc + * @pdev: pdev context + * @desc_id: desc_id + * + * This function extracts vdev id from mgmt desc extracted from desc id. + * + * Return: vdev_id - in case of success + * WLAN_UMAC_VDEV_ID_MAX - in case of failure + */ +uint8_t tgt_mgmt_txrx_get_vdev_id_from_desc_id( + struct wlan_objmgr_pdev *pdev, + uint32_t desc_id); + +/** + * tgt_mgmt_txrx_get_free_desc_pool_count() - get free mgmt desc count + * @pdev: pdev context + * + * This function returns the count of free mgmt descriptors. + * + * Return: free descpriptor count + */ +uint32_t tgt_mgmt_txrx_get_free_desc_pool_count( + struct wlan_objmgr_pdev *pdev); + +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/mgmt_txrx/dispatcher/inc/wlan_mgmt_txrx_utils_api.h b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/mgmt_txrx/dispatcher/inc/wlan_mgmt_txrx_utils_api.h new file mode 100644 index 0000000000000000000000000000000000000000..88c470d11490aa86452f4a16830a6a88a5037cd6 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/mgmt_txrx/dispatcher/inc/wlan_mgmt_txrx_utils_api.h @@ -0,0 +1,917 @@ +/* + * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _WLAN_MGMT_TXRX_UTILS_API_H_ +#define _WLAN_MGMT_TXRX_UTILS_API_H_ + +/** + * DOC: wlan_mgmt_txrx_utils_api.h + * + * management tx/rx layer public API and structures + * for umac converged components. + * + */ + +#include "wlan_objmgr_cmn.h" +#include "qdf_nbuf.h" + +#ifdef CONFIG_MCL +#define MGMT_DESC_POOL_MAX 64 +#else +#define MGMT_DESC_POOL_MAX 512 +#endif + +#define mgmt_txrx_log(level, args...) \ + QDF_TRACE(QDF_MODULE_ID_MGMT_TXRX, level, ## args) +#define mgmt_txrx_logfl(level, format, args...) \ + mgmt_txrx_log(level, FL(format), ## args) + +#define mgmt_txrx_alert(format, args...) \ + mgmt_txrx_logfl(QDF_TRACE_LEVEL_FATAL, format, ## args) +#define mgmt_txrx_err(format, args...) \ + mgmt_txrx_logfl(QDF_TRACE_LEVEL_ERROR, format, ## args) +#define mgmt_txrx_warn(format, args...) \ + mgmt_txrx_logfl(QDF_TRACE_LEVEL_WARN, format, ## args) +#define mgmt_txrx_notice(format, args...) \ + mgmt_txrx_logfl(QDF_TRACE_LEVEL_INFO, format, ## args) +#define mgmt_txrx_info(format, args...) \ + mgmt_txrx_logfl(QDF_TRACE_LEVEL_INFO_HIGH, format, ## args) +#define mgmt_txrx_debug(format, args...) \ + mgmt_txrx_logfl(QDF_TRACE_LEVEL_DEBUG, format, ## args) +#define mgmt_txrx_err_rl(params...) \ + QDF_TRACE_ERROR_RL(QDF_MODULE_ID_MGMT_TXRX, params) +#define mgmt_txrx_debug_rl(params...) \ + QDF_TRACE_DEBUG_RL(QDF_MODULE_ID_MGMT_TXRX, params) + +/** + * enum mgmt_subtype - enum of mgmt. subtypes + * @MGMT_SUBTYPE_ASSOC_REQ: association request frame + * @MGMT_SUBTYPE_ASSOC_RESP: association response frame + * @MGMT_SUBTYPE_REASSOC_REQ: reassociation request frame + * @MGMT_SUBTYPE_REASSOC_RESP: reassociation response frame + * @MGMT_SUBTYPE_PROBE_REQ: probe request frame + * @MGMT_SUBTYPE_PROBE_RESP: probe response frame + * @MGMT_SUBTYPE_BEACON: beacon frame + * @MGMT_SUBTYPE_ATIM: ATIM frame + * @MGMT_SUBTYPE_DISASSOC: disassociation frame + * @MGMT_SUBTYPE_AUTH: authentication frame + * @MGMT_SUBTYPE_DEAUTH: deauthentication frame + * @MGMT_SUBTYPE_ACTION: action frame + * @MGMT_SUBTYPE_ACTION_NO_ACK: action no ack frame + */ +enum mgmt_subtype { + MGMT_SUBTYPE_ASSOC_REQ = 0x00, + MGMT_SUBTYPE_ASSOC_RESP = 0x10, + MGMT_SUBTYPE_REASSOC_REQ = 0x20, + MGMT_SUBTYPE_REASSOC_RESP = 0x30, + MGMT_SUBTYPE_PROBE_REQ = 0x40, + MGMT_SUBTYPE_PROBE_RESP = 0x50, + MGMT_SUBTYPE_BEACON = 0x80, + MGMT_SUBTYPE_ATIM = 0x90, + MGMT_SUBTYPE_DISASSOC = 0xa0, + MGMT_SUBTYPE_AUTH = 0xb0, + MGMT_SUBTYPE_DEAUTH = 0xc0, + MGMT_SUBTYPE_ACTION = 0xd0, + MGMT_SUBTYPE_ACTION_NO_ACK = 0xe0, +}; + +/** + * enum mgmt_action_category - mgmt. action categories + * @ACTION_CATEGORY_SPECTRUM_MGMT: spectrum mgmt. action category + * @ACTION_CATEGORY_QOS: qos action category + * @ACTION_CATEGORY_DLS: dls action category + * @ACTION_CATEGORY_BACK: block ack action category + * @ACTION_CATEGORY_PUBLIC: public action category + * @ACTION_CATEGORY_RRM: rrm action category + * @ACTION_FAST_BSS_TRNST: trnst action category + * @ACTION_CATEGORY_HT: ht actipon category + * @ACTION_CATEGORY_SA_QUERY: sa query action category + * @ACTION_CATEGORY_PROTECTED_DUAL_OF_PUBLIC_ACTION: protected + * public action category + * @ACTION_CATEGORY_WNM: wnm action category + * @ACTION_CATEGORY_WNM_UNPROTECTED: wnm protected action category + * @ACTION_CATEGORY_TDLS: tdls action category + * @ACTION_CATEGORY_MESH_ACTION: mesh action category + * @ACTION_CATEGORY_MULTIHOP_ACTION: multihop action category + * @ACTION_CATEGORY_SELF_PROTECTED: self protected action category + * @ACTION_CATEGORY_DMG: unprotected dmg action category + * @ACTION_CATEGORY_WMM: wmm action category + * @ACTION_CATEGORY_FST: fst action category + * @ACTION_CATEGORY_UNPROT_DMG: dmg action category + * @ACTION_CATEGORY_VHT: vht action category + * @ACTION_CATEGORY_VENDOR_SPECIFIC_PROTECTED: vendor specific protected + * action category + * @ACTION_CATEGORY_VENDOR_SPECIFIC: vendor specific action category + */ +enum mgmt_action_category { + ACTION_CATEGORY_SPECTRUM_MGMT = 0, + ACTION_CATEGORY_QOS = 1, + ACTION_CATEGORY_DLS = 2, + ACTION_CATEGORY_BACK = 3, + ACTION_CATEGORY_PUBLIC = 4, + ACTION_CATEGORY_RRM = 5, + ACTION_FAST_BSS_TRNST = 6, + ACTION_CATEGORY_HT = 7, + ACTION_CATEGORY_SA_QUERY = 8, + ACTION_CATEGORY_PROTECTED_DUAL_OF_PUBLIC_ACTION = 9, + ACTION_CATEGORY_WNM = 10, + ACTION_CATEGORY_WNM_UNPROTECTED = 11, + ACTION_CATEGORY_TDLS = 12, + ACTION_CATEGORY_MESH_ACTION = 13, + ACTION_CATEGORY_MULTIHOP_ACTION = 14, + ACTION_CATEGORY_SELF_PROTECTED = 15, + ACTION_CATEGORY_DMG = 16, + ACTION_CATEGORY_WMM = 17, + ACTION_CATEGORY_FST = 18, + ACTION_CATEGORY_UNPROT_DMG = 20, + ACTION_CATEGORY_VHT = 21, + ACTION_CATEGORY_VENDOR_SPECIFIC_PROTECTED = 126, + ACTION_CATEGORY_VENDOR_SPECIFIC = 127, +}; + +/** + * enum spectrum_mgmt_actioncode - spectrum mgmt. action frms + * @ACTION_SPCT_MSR_REQ: spectrum measurement request frame + * @ACTION_SPCT_MSR_RPRT: spectrum measurement report frame + * @ACTION_SPCT_TPC_REQ: spectrum tpc request frame + * @ACTION_SPCT_TPC_RPRT: spectrum tpc report frame + * @ACTION_SPCT_CHL_SWITCH: spectrum channel switch frame + */ +enum spectrum_mgmt_actioncode { + ACTION_SPCT_MSR_REQ, + ACTION_SPCT_MSR_RPRT, + ACTION_SPCT_TPC_REQ, + ACTION_SPCT_TPC_RPRT, + ACTION_SPCT_CHL_SWITCH, +}; + +/** + * enum qos_actioncode - qos action frames + * @QOS_ADD_TS_REQ: qos add ts request frame + * @QOS_ADD_TS_RSP: qos add ts response frame + * @QOS_DEL_TS_REQ: qos del ts request frame + * @QOS_SCHEDULE: qos schecule frame + * @QOS_MAP_CONFIGURE: qos map configure frame + */ +enum qos_actioncode { + QOS_ADD_TS_REQ, + QOS_ADD_TS_RSP, + QOS_DEL_TS_REQ, + QOS_SCHEDULE, + QOS_MAP_CONFIGURE, +}; + +/** + * enum dls_actioncode - dls action frames + * @DLS_REQUEST: dls request frame + * @DLS_RESPONSE: dls response frame + * @DLS_TEARDOWN: dls teardown frame + */ +enum dls_actioncode { + DLS_REQUEST, + DLS_RESPONSE, + DLS_TEARDOWN, +}; + +/** + * enum block_ack_actioncode - block ack action frames + * @ADDBA_REQUEST: add block ack request frame + * @ADDBA_RESPONSE: add block ack response frame + * @DELBA: delete block ack frame + */ +enum block_ack_actioncode { + ADDBA_REQUEST, + ADDBA_RESPONSE, + DELBA, +}; + +/** + * enum pub_actioncode - public action frames + * @PUB_ACTION_2040_BSS_COEXISTENCE: public 20-40 bss coex action frame + * @PUB_ACTION_EXT_CHANNEL_SWITCH_ID: public ext channel switch id action frame + * @PUB_ACTION_VENDOR_SPECIFIC: vendor specific public action frame + * @PUB_ACTION_GAS_INITIAL_REQUEST: GAS initial request action frame + * @PUB_ACTION_GAS_INITIAL_RESPONSE: GAS initial response action frame + * @PUB_ACTION_GAS_COMEBACK_REQUEST: GAS comeback request action frame + * @PUB_ACTION_GAS_COMEBACK_RESPONSE: GAS comeback respose action frame + * @PUB_ACTION_TDLS_DISCRESP: tdls discovery response public action frame + */ +enum pub_actioncode { + PUB_ACTION_2040_BSS_COEXISTENCE = 0, + PUB_ACTION_EXT_CHANNEL_SWITCH_ID = 4, + PUB_ACTION_VENDOR_SPECIFIC = 9, + PUB_ACTION_GAS_INITIAL_REQUEST = 10, + PUB_ACTION_GAS_INITIAL_RESPONSE = 11, + PUB_ACTION_GAS_COMEBACK_REQUEST = 12, + PUB_ACTION_GAS_COMEBACK_RESPONSE = 13, + PUB_ACTION_TDLS_DISCRESP = 14, +}; + +/** + * enum rrm_actioncode - rrm action frames + * @RRM_RADIO_MEASURE_REQ: rrm radio meas. request frame + * @RRM_RADIO_MEASURE_RPT: rrm radio meas. report frame + * @RRM_LINK_MEASUREMENT_REQ: rrm link meas. request frmae + * @RRM_LINK_MEASUREMENT_RPT: rrm link meas. report frame + * @RRM_NEIGHBOR_REQ: rrm neighbor request frame + * @RRM_NEIGHBOR_RPT: rrm neighbor report frame + */ +enum rrm_actioncode { + RRM_RADIO_MEASURE_REQ, + RRM_RADIO_MEASURE_RPT, + RRM_LINK_MEASUREMENT_REQ, + RRM_LINK_MEASUREMENT_RPT, + RRM_NEIGHBOR_REQ, + RRM_NEIGHBOR_RPT, +}; + +/** + * enum ht_actioncode - ht action frames + * @HT_ACTION_NOTIFY_CHANWIDTH: ht notify bw action frame + * @HT_ACTION_SMPS: ht smps action frame + * @HT_ACTION_PSMP: ht psmp action frame + * @HT_ACTION_PCO_PHASE: ht pco phase action frame + * @HT_ACTION_CSI: ht csi action frame + * @HT_ACTION_NONCOMPRESSED_BF: ht noncompressed bf action frame + * @HT_ACTION_COMPRESSED_BF: ht compressed bf action frame + * @HT_ACTION_ASEL_IDX_FEEDBACK: ht asel idx feedback action frame + */ +enum ht_actioncode { + HT_ACTION_NOTIFY_CHANWIDTH, + HT_ACTION_SMPS, + HT_ACTION_PSMP, + HT_ACTION_PCO_PHASE, + HT_ACTION_CSI, + HT_ACTION_NONCOMPRESSED_BF, + HT_ACTION_COMPRESSED_BF, + HT_ACTION_ASEL_IDX_FEEDBACK, +}; + +/** + * enum sa_query_action - sa query action frames + * @SA_QUERY_REQUEST: sa query request frame + * @SA_QUERY_RESPONSE: sa query response frame + */ +enum sa_query_action { + SA_QUERY_REQUEST, + SA_QUERY_RESPONSE, +}; + +/** + * enum protected_dual_actioncode - protected dual action frames + * @PDPA_GAS_INIT_REQ: pdpa gas init request frame + * @PDPA_GAS_INIT_RSP: pdpa gas init response frame + * @PDPA_GAS_COMEBACK_REQ: pdpa gas comeback request frame + * @PDPA_GAS_COMEBACK_RSP: pdpa gas comeback response frame + */ +enum protected_dual_actioncode { + PDPA_GAS_INIT_REQ = 10, + PDPA_GAS_INIT_RSP = 11, + PDPA_GAS_COMEBACK_REQ = 12, + PDPA_GAS_COMEBACK_RSP = 13, +}; + +/** + * enum wnm_actioncode - wnm action frames + * @WNM_BSS_TM_QUERY: wnm bss tm query frame + * @WNM_BSS_TM_REQUEST: wnm bss tm request frame + * @WNM_BSS_TM_RESPONSE: wnm bss tm response frame + * @WNM_FMS_REQ: wnm fms request frame + * @WNM_FMS_RESP: wnm fms response frame + * @WNM_TFS_REQ: wnm tfs request frame + * @WNM_TFS_RESP: wnm tfs response frame + * @WNM_TFS_NOTIFY: wnm tfs notify frame + * @WNM_SLEEP_REQ: wnm sleep request frame + * @WNM_SLEEP_RESP: wnm sleep response frame + * @WNM_TIM_REQ: wnm Tim broadcast request frame + * @WNM_TIM_RESP: wnm Tim broadcast response frame + * @WNM_NOTIF_REQUEST: wnm notify request frame + * @WNM_NOTIF_RESPONSE: wnm notify response frame + */ +enum wnm_actioncode { + WNM_BSS_TM_QUERY = 6, + WNM_BSS_TM_REQUEST = 7, + WNM_BSS_TM_RESPONSE = 8, + WNM_FMS_REQ = 9, + WNM_FMS_RESP = 10, + WNM_TFS_REQ = 13, + WNM_TFS_RESP = 14, + WNM_TFS_NOTIFY = 15, + WNM_SLEEP_REQ = 16, + WNM_SLEEP_RESP = 17, + WNM_TIM_REQ = 18, + WNM_TIM_RESP = 19, + WNM_NOTIF_REQUEST = 26, + WNM_NOTIF_RESPONSE = 27, +}; + +/** + * enum tdls_actioncode - tdls action frames + * @TDLS_SETUP_REQUEST: tdls setup request frame + * @TDLS_SETUP_RESPONSE: tdls setup response frame + * @TDLS_SETUP_CONFIRM: tdls setup confirm frame + * @TDLS_TEARDOWN: tdls teardown frame + * @TDLS_PEER_TRAFFIC_INDICATION: tdls peer traffic indication frame + * @TDLS_CHANNEL_SWITCH_REQUEST: tdls channel switch req. frame + * @TDLS_CHANNEL_SWITCH_RESPONSE: tdls channel switch response frame + * @TDLS_PEER_PSM_REQUEST: tdls peer psm request frame + * @TDLS_PEER_PSM_RESPONSE: tdls peer psm response frame + * @TDLS_PEER_TRAFFIC_RESPONSE: tdls peer traffic response frame + * @TDLS_DISCOVERY_REQUEST: tdls discovery request frame + */ +enum tdls_actioncode { + TDLS_SETUP_REQUEST = 0, + TDLS_SETUP_RESPONSE = 1, + TDLS_SETUP_CONFIRM = 2, + TDLS_TEARDOWN = 3, + TDLS_PEER_TRAFFIC_INDICATION = 4, + TDLS_CHANNEL_SWITCH_REQUEST = 5, + TDLS_CHANNEL_SWITCH_RESPONSE = 6, + TDLS_PEER_PSM_REQUEST = 7, + TDLS_PEER_PSM_RESPONSE = 8, + TDLS_PEER_TRAFFIC_RESPONSE = 9, + TDLS_DISCOVERY_REQUEST = 10, + TDLS_DISCOVERY_RESPONSE = 14, +}; + +/** + * enum mesh_actioncode - mesh action frames + * @MESH_ACTION_LINK_METRIC_REPORT: mesh link metric report action frame + * @MESH_ACTION_HWMP_PATH_SELECTION: mesh hwmp path selection action frame + * @MESH_ACTION_GATE_ANNOUNCEMENT: mesh gate announcement action frame + * @MESH_ACTION_CONGESTION_CONTROL_NOTIFICATION: mesh congestion control frame + * @MESH_ACTION_MCCA_SETUP_REQUEST: mesh mcca setup request action frame + * @MESH_ACTION_MCCA_SETUP_REPLY: mesh mcca setup reply action frame + * @MESH_ACTION_MCCA_ADVERTISEMENT_REQUEST: mesh mcca advertisement req. frame + * @MESH_ACTION_MCCA_ADVERTISEMENT: mesh mcca advertisement action frame + * @MESH_ACTION_MCCA_TEARDOWN: mesh mcca teardown action frame + * @MESH_ACTION_TBTT_ADJUSTMENT_REQUEST: mesh tbtt adjustment req. frame + * @MESH_ACTION_TBTT_ADJUSTMENT_RESPONSE: mesh tbtt adjustment rsp. frame + */ +enum mesh_actioncode { + MESH_ACTION_LINK_METRIC_REPORT, + MESH_ACTION_HWMP_PATH_SELECTION, + MESH_ACTION_GATE_ANNOUNCEMENT, + MESH_ACTION_CONGESTION_CONTROL_NOTIFICATION, + MESH_ACTION_MCCA_SETUP_REQUEST, + MESH_ACTION_MCCA_SETUP_REPLY, + MESH_ACTION_MCCA_ADVERTISEMENT_REQUEST, + MESH_ACTION_MCCA_ADVERTISEMENT, + MESH_ACTION_MCCA_TEARDOWN, + MESH_ACTION_TBTT_ADJUSTMENT_REQUEST, + MESH_ACTION_TBTT_ADJUSTMENT_RESPONSE, +}; + +/** + * enum self_protected_actioncode - self protected action frames + * @SP_RESERVED: self protected reserved + * @SP_MESH_PEERING_OPEN: self protected mesh peering open frame + * @SP_MESH_PEERING_CONFIRM: self protected mesh peering confirm frame + * @SP_MESH_PEERING_CLOSE: self protected mesh peering close frame + * @SP_MGK_INFORM: self protected mgk inform frame + * @SP_MGK_ACK: self protected mgk ack frame + */ +enum self_protected_actioncode { + SP_RESERVED, + SP_MESH_PEERING_OPEN, + SP_MESH_PEERING_CONFIRM, + SP_MESH_PEERING_CLOSE, + SP_MGK_INFORM, + SP_MGK_ACK, +}; + +/** + * enum wmm_actioncode - wmm action frames + * @WMM_QOS_SETUP_REQ: wmm qos setup request frame + * @WMM_QOS_SETUP_RESP: q wmm qos setup response frame + * @WMM_QOS_TEARDOWN: wmm qos teardown frame + */ +enum wmm_actioncode { + WMM_QOS_SETUP_REQ, + WMM_QOS_SETUP_RESP, + WMM_QOS_TEARDOWN, +}; + +/** + * enum vht_actioncode - vht action frames + * @VHT_ACTION_COMPRESSED_BF: vht compressed bf action frame + * @VHT_ACTION_GID_NOTIF: vht gid notification action frame + * @VHT_ACTION_OPMODE_NOTIF: vht opmode notification action frame + */ +enum vht_actioncode { + VHT_ACTION_COMPRESSED_BF, + VHT_ACTION_GID_NOTIF, + VHT_ACTION_OPMODE_NOTIF, +}; + +/** + * struct action_frm_hdr - action frame header + * @action_category: action category + * @action_code: action code + */ +struct action_frm_hdr { + uint8_t action_category; + uint8_t action_code; +}; + +/** + * enum mgmt_frame_type - enum of mgmt. frames + * @MGMT_FRM_UNSPECIFIED: unspecified + * @MGMT_ASSOC_REQ: association request frame + * @MGMT_ASSOC_RESP: association response frame + * @MGMT_REASSOC_REQ: reassociation request frame + * @MGMT_REASSOC_RESP: reassociation response frame + * @MGMT_PROBE_REQ: probe request frame + * @MGMT_PROBE_RESP: probe response frame + * @MGMT_BEACON: beacon frame + * @MGMT_ATIM: ATIM frame + * @MGMT_DISASSOC: disassociation frame + * @MGMT_AUTH: authentication frame + * @MGMT_DEAUTH: deauthentication frame + * @MGMT_ACTION_MEAS_REQUEST: measure channels request action frame + * @MGMT_ACTION_MEAS_REPORT: measure channels response action frame + * @MGMT_ACTION_TPC_REQUEST: transmit power control request action frame + * @MGMT_ACTION_TPC_REPORT: transmit power control response action frame + * @MGMT_ACTION_CHAN_SWITCH: 802.11 channel switch announcement frame + * @MGMT_ACTION_QOS_ADD_TS_REQ: qos add ts request frame + * @MGMT_ACTION_QOS_ADD_TS_RSP: qos add ts response frame + * @MGMT_ACTION_QOS_DEL_TS_REQ: qos del ts request frame + * @MGMT_ACTION_QOS_SCHEDULE: qos schedule frame + * @MGMT_ACTION_QOS_MAP_CONFIGURE: qos map configure frame + * @MGMT_ACTION_DLS_REQUEST: DLS request action frame + * @MGMT_ACTION_DLS_RESPONSE: DLS response action frame + * @MGMT_ACTION_DLS_TEARDOWN: DLS taerdown action frame + * @MGMT_ACTION_BA_ADDBA_REQUEST: ADDBA request action frame + * @MGMT_ACTION_BA_ADDBA_RESPONSE: ADDBA response action frame + * @MGMT_ACTION_BA_DELBA: DELBA action frame + * @MGMT_ACTION_2040_BSS_COEXISTENCE: 20-40 bss coex action frame + * @MGMT_ACTION_CATEGORY_VENDOR_SPECIFIC: category vendor spcific action frame + * @MGMT_ACTION_EXT_CHANNEL_SWITCH_ID: ext channel switch id action frame + * @MGMT_ACTION_VENDOR_SPECIFIC: vendor specific action frame + * @MGMT_ACTION_TDLS_DISCRESP: TDLS discovery response frame + * @MGMT_ACTION_RRM_RADIO_MEASURE_REQ: rrm radio meas. req. action frame + * @MGMT_ACTION_RRM_RADIO_MEASURE_RPT: rrm radio meas. report action frame + * @MGMT_ACTION_RRM_LINK_MEASUREMENT_REQ: rrm link meas. req. action frame + * @MGMT_ACTION_RRM_LINK_MEASUREMENT_RPT: rrm link meas. report action frame + * @MGMT_ACTION_RRM_NEIGHBOR_REQ: rrm neighbor request action frame + * @MGMT_ACTION_RRM_NEIGHBOR_RPT: rrm neighbor response action frame + * @MGMT_ACTION_HT_NOTIFY_CHANWIDTH: notify channel width action frame + * @MGMT_ACTION_HT_SMPS: spatial multiplexing power save action frame + * @MGMT_ACTION_HT_PSMP: psmp action frame + * @MGMT_ACTION_HT_PCO_PHASE: pco phase action frame + * @MGMT_ACTION_HT_CSI: CSI action frame + * @MGMT_ACTION_HT_NONCOMPRESSED_BF: non-compressed beamforming action frame + * @MGMT_ACTION_HT_COMPRESSED_BF: compressed beamforming action frame + * @MGMT_ACTION_HT_ASEL_IDX_FEEDBACK: asel idx feedback action frame + * @MGMT_ACTION_SA_QUERY_REQUEST: SA query request frame + * @MGMT_ACTION_SA_QUERY_RESPONSE: SA query response frame + * @MGMT_ACTION_PDPA_GAS_INIT_REQ: pdpa gas init request action frame + * @MGMT_ACTION_PDPA_GAS_INIT_RSP: pdpa gas init response frame + * @MGMT_ACTION_PDPA_GAS_COMEBACK_REQ: pdpa gas comeback req. action frame + * @MGMT_ACTION_PDPA_GAS_COMEBACK_RSP: pdpa gas comeback rsp. action frame + * @MGMT_ACTION_WNM_BSS_TM_QUERY: wnm bss tm query action frame + * @MGMT_ACTION_WNM_BSS_TM_REQUEST: wnm bss tm request action frame + * @MGMT_ACTION_WNM_BSS_TM_RESPONSE: wnm bss tm response action frame + * @MGMT_ACTION_WNM_NOTIF_REQUEST: wnm notification request action frame + * @MGMT_ACTION_WNM_NOTIF_RESPONSE: wnm notification response action frame + * @MGMT_ACTION_WNM_FMS_REQ: wnm fms request frame + * @MGMT_ACTION_WNM_FMS_RESP: wnm fms response frame + * @MGMT_ACTION_WNM_TFS_REQ: wnm tfs request frame + * @MGMT_ACTION_WNM_TFS_RESP: wnm tfs response frame + * @MGMT_ACTION_WNM_TFS_NOTIFY: wnm tfs notify frame + * @MGMT_ACTION_WNM_SLEEP_REQ: wnm sleep request frame + * @MGMT_ACTION_WNM_SLEEP_RESP: wnm sleep response frame + * @MGMT_ACTION_WNM_TIM_REQ: wnm Tim broadcast request frame + * @MGMT_ACTION_WNM_TIM_RESP: wnm Tim broadcast response frame + * @MGMT_ACTION_TDLS_SETUP_REQ: tdls setup request action frame + * @MGMT_ACTION_TDLS_SETUP_RSP: tdls setup response frame + * @MGMT_ACTION_TDLS_SETUP_CNF: tdls setup confirm frame + * @MGMT_ACTION_TDLS_TEARDOWN: tdls teardown frame + * @MGMT_ACTION_TDLS_PEER_TRAFFIC_IND: tdls peer traffic indication frame + * @MGMT_ACTION_TDLS_CH_SWITCH_REQ: tdls channel switch req. frame + * @MGMT_ACTION_TDLS_CH_SWITCH_RSP: tdls channel switch response frame + * @MGMT_ACTION_TDLS_PEER_PSM_REQUEST: tdls peer psm request frame + * @MGMT_ACTION_TDLS_PEER_PSM_RESPONSE: tdls peer psm response frame + * @MGMT_ACTION_TDLS_PEER_TRAFFIC_RSP: tdls peer traffic response frame + * @MGMT_ACTION_TDLS_DIS_REQ: tdls discovery request frame + * @MGMT_ACTION_MESH_LINK_METRIC_REPORT: mesh link metric report action frame + * @MGMT_ACTION_MESH_HWMP_PATH_SELECTION: mesh hwmp path selection action frame + * @MGMT_ACTION_MESH_GATE_ANNOUNCEMENT: mesh gate announcement action frame + * @MGMT_ACTION_MESH_CONGESTION_CONTROL_NOTIFICATION: mesh congestion control + * @MGMT_ACTION_MESH_MCCA_SETUP_REQUEST: mesh mcca setup request action frame + * @MGMT_ACTION_MESH_MCCA_SETUP_REPLY: mesh mcca setup reply action frame + * @MGMT_ACTION_MESH_MCCA_ADVERTISEMENT_REQUEST: mesh mcca advertisement req. + * @MGMT_ACTION_MESH_MCCA_ADVERTISEMENT: mesh mcca advertisement action frame + * @MGMT_ACTION_MESH_MCCA_TEARDOWN: mesh mcca teardown action fram + * @MGMT_ACTION_MESH_TBTT_ADJUSTMENT_REQUEST: mesh tbtt adjustment req. frame + * @MGMT_ACTION_MESH_TBTT_ADJUSTMENT_RESPONSE: mesh tbtt adjustment rsp. frame + * @MGMT_ACTION_SP_MESH_PEERING_OPEN: self protected mesh peering open frame + * @MGMT_ACTION_SP_MESH_PEERING_CONFIRM: self protected mesh peering confirm + * @MGMT_ACTION_SP_MESH_PEERING_CLOSE: self protected mesh peering close frame + * @MGMT_ACTION_SP_MGK_INFORM: self protected mgk inform frame + * @MGMT_ACTION_SP_MGK_ACK: self protected mgk ack frame + * @MGMT_ACTION_WMM_QOS_SETUP_REQ: WMM qos setup request action frame + * @MGMT_ACTION_WMM_QOS_SETUP_RESP: WMM qos setup response action frame + * @MGMT_ACTION_WMM_QOS_TEARDOWN: WMM qos teardown action frame + * @MGMT_ACTION_VHT_COMPRESSED_BF: vht compressed bf action frame + * @MGMT_ACTION_VHT_GID_NOTIF: vht gid notification action frame + * @MGMT_ACTION_VHT_OPMODE_NOTIF: vht opmode notification action frame + * @MGMT_FRAME_TYPE_ALL: mgmt frame type for all type of frames + * @MGMT_MAX_FRAME_TYPE: max. mgmt frame types + * @MGMT_ACTION_GAS_INITIAL_REQUEST: GAS Initial request action frame + * @MGMT_ACTION_GAS_INITIAL_RESPONSE: GAS Initial response action frame + * @MGMT_ACTION_GAS_COMEBACK_REQUEST: GAS Comeback request action frame + * @MGMT_ACTION_GAS_COMEBACK_RESPONSE: GAS Comeback response action frame + */ +enum mgmt_frame_type { + MGMT_FRM_UNSPECIFIED = -1, + MGMT_ASSOC_REQ, + MGMT_ASSOC_RESP, + MGMT_REASSOC_REQ, + MGMT_REASSOC_RESP, + MGMT_PROBE_REQ, + MGMT_PROBE_RESP, + MGMT_BEACON, + MGMT_ATIM, + MGMT_DISASSOC, + MGMT_AUTH, + MGMT_DEAUTH, + MGMT_ACTION_MEAS_REQUEST, + MGMT_ACTION_MEAS_REPORT, + MGMT_ACTION_TPC_REQUEST, + MGMT_ACTION_TPC_REPORT, + MGMT_ACTION_CHAN_SWITCH, + MGMT_ACTION_QOS_ADD_TS_REQ, + MGMT_ACTION_QOS_ADD_TS_RSP, + MGMT_ACTION_QOS_DEL_TS_REQ, + MGMT_ACTION_QOS_SCHEDULE, + MGMT_ACTION_QOS_MAP_CONFIGURE, + MGMT_ACTION_DLS_REQUEST, + MGMT_ACTION_DLS_RESPONSE, + MGMT_ACTION_DLS_TEARDOWN, + MGMT_ACTION_BA_ADDBA_REQUEST, + MGMT_ACTION_BA_ADDBA_RESPONSE, + MGMT_ACTION_BA_DELBA, + MGMT_ACTION_2040_BSS_COEXISTENCE, + MGMT_ACTION_CATEGORY_VENDOR_SPECIFIC, + MGMT_ACTION_EXT_CHANNEL_SWITCH_ID, + MGMT_ACTION_VENDOR_SPECIFIC, + MGMT_ACTION_TDLS_DISCRESP, + MGMT_ACTION_RRM_RADIO_MEASURE_REQ, + MGMT_ACTION_RRM_RADIO_MEASURE_RPT, + MGMT_ACTION_RRM_LINK_MEASUREMENT_REQ, + MGMT_ACTION_RRM_LINK_MEASUREMENT_RPT, + MGMT_ACTION_RRM_NEIGHBOR_REQ, + MGMT_ACTION_RRM_NEIGHBOR_RPT, + MGMT_ACTION_HT_NOTIFY_CHANWIDTH, + MGMT_ACTION_HT_SMPS, + MGMT_ACTION_HT_PSMP, + MGMT_ACTION_HT_PCO_PHASE, + MGMT_ACTION_HT_CSI, + MGMT_ACTION_HT_NONCOMPRESSED_BF, + MGMT_ACTION_HT_COMPRESSED_BF, + MGMT_ACTION_HT_ASEL_IDX_FEEDBACK, + MGMT_ACTION_SA_QUERY_REQUEST, + MGMT_ACTION_SA_QUERY_RESPONSE, + MGMT_ACTION_PDPA_GAS_INIT_REQ, + MGMT_ACTION_PDPA_GAS_INIT_RSP, + MGMT_ACTION_PDPA_GAS_COMEBACK_REQ, + MGMT_ACTION_PDPA_GAS_COMEBACK_RSP, + MGMT_ACTION_WNM_BSS_TM_QUERY, + MGMT_ACTION_WNM_BSS_TM_REQUEST, + MGMT_ACTION_WNM_BSS_TM_RESPONSE, + MGMT_ACTION_WNM_NOTIF_REQUEST, + MGMT_ACTION_WNM_NOTIF_RESPONSE, + MGMT_ACTION_WNM_FMS_REQ, + MGMT_ACTION_WNM_FMS_RESP, + MGMT_ACTION_WNM_TFS_REQ, + MGMT_ACTION_WNM_TFS_RESP, + MGMT_ACTION_WNM_TFS_NOTIFY, + MGMT_ACTION_WNM_SLEEP_REQ, + MGMT_ACTION_WNM_SLEEP_RESP, + MGMT_ACTION_WNM_TIM_REQ, + MGMT_ACTION_WNM_TIM_RESP, + MGMT_ACTION_TDLS_SETUP_REQ, + MGMT_ACTION_TDLS_SETUP_RSP, + MGMT_ACTION_TDLS_SETUP_CNF, + MGMT_ACTION_TDLS_TEARDOWN, + MGMT_ACTION_TDLS_PEER_TRAFFIC_IND, + MGMT_ACTION_TDLS_CH_SWITCH_REQ, + MGMT_ACTION_TDLS_CH_SWITCH_RSP, + MGMT_ACTION_TDLS_PEER_PSM_REQUEST, + MGMT_ACTION_TDLS_PEER_PSM_RESPONSE, + MGMT_ACTION_TDLS_PEER_TRAFFIC_RSP, + MGMT_ACTION_TDLS_DIS_REQ, + MGMT_ACTION_MESH_LINK_METRIC_REPORT, + MGMT_ACTION_MESH_HWMP_PATH_SELECTION, + MGMT_ACTION_MESH_GATE_ANNOUNCEMENT, + MGMT_ACTION_MESH_CONGESTION_CONTROL_NOTIFICATION, + MGMT_ACTION_MESH_MCCA_SETUP_REQUEST, + MGMT_ACTION_MESH_MCCA_SETUP_REPLY, + MGMT_ACTION_MESH_MCCA_ADVERTISEMENT_REQUEST, + MGMT_ACTION_MESH_MCCA_ADVERTISEMENT, + MGMT_ACTION_MESH_MCCA_TEARDOWN, + MGMT_ACTION_MESH_TBTT_ADJUSTMENT_REQUEST, + MGMT_ACTION_MESH_TBTT_ADJUSTMENT_RESPONSE, + MGMT_ACTION_SP_MESH_PEERING_OPEN, + MGMT_ACTION_SP_MESH_PEERING_CONFIRM, + MGMT_ACTION_SP_MESH_PEERING_CLOSE, + MGMT_ACTION_SP_MGK_INFORM, + MGMT_ACTION_SP_MGK_ACK, + MGMT_ACTION_WMM_QOS_SETUP_REQ, + MGMT_ACTION_WMM_QOS_SETUP_RESP, + MGMT_ACTION_WMM_QOS_TEARDOWN, + MGMT_ACTION_VHT_COMPRESSED_BF, + MGMT_ACTION_VHT_GID_NOTIF, + MGMT_ACTION_VHT_OPMODE_NOTIF, + MGMT_ACTION_GAS_INITIAL_REQUEST, + MGMT_ACTION_GAS_INITIAL_RESPONSE, + MGMT_ACTION_GAS_COMEBACK_REQUEST, + MGMT_ACTION_GAS_COMEBACK_RESPONSE, + MGMT_FRAME_TYPE_ALL, + MGMT_MAX_FRAME_TYPE, +}; + +#define WLAN_MGMT_TXRX_HOST_MAX_ANTENNA 4 +#define WLAN_INVALID_PER_CHAIN_RSSI 0x80 +#define WLAN_NOISE_FLOOR_DBM_DEFAULT -96 +/** + * struct mgmt_rx_event_params - host mgmt header params + * @channel: channel on which this frame is received + * @snr: snr information used to call rssi + * @rssi_ctl[WLAN_MGMT_TXRX_HOST_MAX_ANTENNA]: RSSI of PRI 20MHz for each chain + * @rate: Rate kbps + * @phy_mode: rx phy mode + * @buf_len: length of the frame + * @status: rx status + * @flags: information about the management frame e.g. can give a + * scan source for a scan result mgmt frame + * @rssi: combined RSSI, i.e. the sum of the snr + noise floor (dBm units) + * @tsf_delta: tsf delta + * @pdev_id: pdev id + * @rx_params: pointer to other rx params + * (win specific, will be removed in phase 4) + */ +struct mgmt_rx_event_params { + uint32_t channel; + uint32_t snr; + uint8_t rssi_ctl[WLAN_MGMT_TXRX_HOST_MAX_ANTENNA]; + uint32_t rate; + enum wlan_phymode phy_mode; + uint32_t buf_len; + QDF_STATUS status; + uint32_t flags; + int32_t rssi; + uint32_t tsf_delta; + uint8_t pdev_id; + void *rx_params; +}; + +/** + * mgmt_tx_download_comp_cb - function pointer for tx download completions. + * @context: caller component specific context + * @buf: buffer + * @free: to free/not free the buffer + * + * This is the function pointer to be called on tx download completion + * if download complete is required. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +typedef QDF_STATUS (*mgmt_tx_download_comp_cb)(void *context, + qdf_nbuf_t buf, bool free); + +/** + * mgmt_ota_comp_cb - function pointer for tx ota completions. + * @context: caller component specific context + * @buf: buffer + * @status: tx completion status + * @tx_compl_params: tx completion params + * + * This is the function pointer to be called on tx ota completion. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +typedef QDF_STATUS (*mgmt_ota_comp_cb)(void *context, qdf_nbuf_t buf, + uint32_t status, void *tx_compl_params); + +/** + * mgmt_frame_rx_callback - function pointer for receiving mgmt rx frames + * @psoc: psoc context + * @peer: peer + * @buf: buffer + * @mgmt_rx_params: rx params + * @frm_type: mgmt rx frame type + * + * This is the function pointer to be called on receiving mgmt rx frames. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +typedef QDF_STATUS (*mgmt_frame_rx_callback)( + struct wlan_objmgr_psoc *psoc, + struct wlan_objmgr_peer *peer, + qdf_nbuf_t buf, + struct mgmt_rx_event_params *mgmt_rx_params, + enum mgmt_frame_type frm_type); + +/** + * mgmt_frame_fill_peer_cb - Function pointer to fill peer in the buf + * @peer: peer + * @buf: buffer + * + * This is the function pointer to be called during drain to fill the + * peer into the buf's cb structure. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +typedef QDF_STATUS (*mgmt_frame_fill_peer_cb)( + struct wlan_objmgr_peer *peer, + qdf_nbuf_t buf); + +/** + * struct mgmt_txrx_mgmt_frame_cb_info - frm and corresponding rx cb info + * @frm_type: mgmt frm type + * @mgmt_rx_cb: corresponding rx callback + */ +struct mgmt_txrx_mgmt_frame_cb_info { + enum mgmt_frame_type frm_type; + mgmt_frame_rx_callback mgmt_rx_cb; +}; + + +/** + * wlan_mgmt_txrx_init() - initialize mgmt txrx context. + * + * This function initializes the mgmt txrx context, + * mgmt descriptor pool, etc. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_mgmt_txrx_init(void); + +/** + * wlan_mgmt_txrx_deinit() - deinitialize mgmt txrx context. + * + * This function deinitializes the mgmt txrx context, + * mgmt descriptor pool, etc. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_mgmt_txrx_deinit(void); + +/** + * wlan_mgmt_txrx_mgmt_frame_tx() - transmits mgmt. frame + * @peer: peer + * @context: caller component specific context + * @buf: buffer to be transmitted + * @comp_cb: download completion cb function + * @ota_cb: post processing cb function + * @comp_id: umac component id + * @mgmt_tx_params: mgmt tx params + * + * This function transmits the mgmt. frame to southbound interface. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_mgmt_txrx_mgmt_frame_tx(struct wlan_objmgr_peer *peer, + void *context, + qdf_nbuf_t buf, + mgmt_tx_download_comp_cb tx_comp_cb, + mgmt_ota_comp_cb tx_ota_comp_cb, + enum wlan_umac_comp_id comp_id, + void *mgmt_tx_params); + +/** + * wlan_mgmt_txrx_beacon_frame_tx() - transmits mgmt. beacon + * @psoc: psoc context + * @buf: buffer to be transmitted + * @comp_id: umac component id + * + * This function transmits the mgmt. beacon to southbound interface. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_mgmt_txrx_beacon_frame_tx(struct wlan_objmgr_peer *peer, + qdf_nbuf_t buf, + enum wlan_umac_comp_id comp_id); + +#ifdef WLAN_SUPPORT_FILS +/** + * wlan_mgmt_txrx_fd_action_frame_tx() - transmits mgmt. FD Action frame + * @vdev: vdev object + * @buf: buffer to be transmitted + * @comp_id: umac component id + * + * This function transmits the FILS Dicovery Action frame to + * southbound interface. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_mgmt_txrx_fd_action_frame_tx(struct wlan_objmgr_vdev *vdev, + qdf_nbuf_t buf, + enum wlan_umac_comp_id comp_id); +#endif /* WLAN_SUPPORT_FILS */ + +/** + * wlan_mgmt_txrx_register_rx_cb() - registers the rx cb for mgmt. frames + * @psoc: psoc context + * @comp_id: umac component id + * @frm_cb_info: pointer to array of structure containing frm type and callback + * @num_entries: num of frames for which cb to be registered + * + * This function registers rx callback for mgmt. frames for + * the corresponding umac component passed in the func. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_mgmt_txrx_register_rx_cb( + struct wlan_objmgr_psoc *psoc, + enum wlan_umac_comp_id comp_id, + struct mgmt_txrx_mgmt_frame_cb_info *frm_cb_info, + uint8_t num_entries); + +/** + * wlan_mgmt_txrx_vdev_drain() - Function to drain all mgmt packets + * specific to a vdev + * @vdev: vdev context + * @mgmt_fill_peer_cb: callback func to UMAC to fill peer into buf + * @status: opaque pointer about the status of the pkts passed to UMAC + * + * This function drains all mgmt packets of a vdev. This can be used in the + * event of target going down without sending completions. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_mgmt_txrx_vdev_drain( + struct wlan_objmgr_vdev *vdev, + mgmt_frame_fill_peer_cb mgmt_fill_peer_cb, + void *status); + +/** + * wlan_mgmt_txrx_deregister_rx_cb() - deregisters the rx cb for mgmt. frames + * @psoc: psoc context + * @comp_id: umac component id + * @frm_cb_info: pointer to array of structure containing frm type and callback + * @num_entries: num of frames for which cb to be deregistered + * + * This function deregisters rx callback for mgmt. frames for + * the corresponding umac component passed in the func. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_mgmt_txrx_deregister_rx_cb( + struct wlan_objmgr_psoc *psoc, + enum wlan_umac_comp_id comp_id, + struct mgmt_txrx_mgmt_frame_cb_info *frm_cb_info, + uint8_t num_entries); + +/** + * wlan_mgmt_txrx_psoc_open() - mgmt txrx module psoc open API + * @psoc: psoc context + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_mgmt_txrx_psoc_open(struct wlan_objmgr_psoc *psoc); + +/** + * wlan_mgmt_txrx_psoc_close() - mgmt txrx module psoc close API + * @psoc: psoc context + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_mgmt_txrx_psoc_close(struct wlan_objmgr_psoc *psoc); + +/** + * wlan_mgmt_txrx_pdev_open() - mgmt txrx module pdev open API + * @pdev: pdev context + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_mgmt_txrx_pdev_open(struct wlan_objmgr_pdev *pdev); + + +/** + * wlan_mgmt_txrx_pdev_close() - mgmt txrx module pdev close API + * @pdev: pdev context + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_mgmt_txrx_pdev_close(struct wlan_objmgr_pdev *pdev); +#endif + + diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/mgmt_txrx/dispatcher/src/wlan_mgmt_txrx_tgt_api.c b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/mgmt_txrx/dispatcher/src/wlan_mgmt_txrx_tgt_api.c new file mode 100644 index 0000000000000000000000000000000000000000..ecffc258169129b4ebda690b85fd8833cbd8c804 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/mgmt_txrx/dispatcher/src/wlan_mgmt_txrx_tgt_api.c @@ -0,0 +1,1241 @@ +/* + * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_mgmt_txrx_tgt_api.c + * This file contains mgmt txrx public API definitions for + * southbound interface. + */ + +#include "wlan_mgmt_txrx_tgt_api.h" +#include "wlan_mgmt_txrx_utils_api.h" +#include "../../core/src/wlan_mgmt_txrx_main_i.h" +#include "wlan_objmgr_psoc_obj.h" +#include "wlan_objmgr_peer_obj.h" +#include "wlan_objmgr_pdev_obj.h" + + +/** + * mgmt_get_spec_mgmt_action_subtype() - gets spec mgmt action subtype + * @action_code: action code + * + * This function returns the subtype for spectrum management action + * category. + * + * Return: mgmt frame type + */ +static enum mgmt_frame_type +mgmt_get_spec_mgmt_action_subtype(uint8_t action_code) +{ + enum mgmt_frame_type frm_type; + + switch (action_code) { + case ACTION_SPCT_MSR_REQ: + frm_type = MGMT_ACTION_MEAS_REQUEST; + break; + case ACTION_SPCT_MSR_RPRT: + frm_type = MGMT_ACTION_MEAS_REPORT; + break; + case ACTION_SPCT_TPC_REQ: + frm_type = MGMT_ACTION_TPC_REQUEST; + break; + case ACTION_SPCT_TPC_RPRT: + frm_type = MGMT_ACTION_TPC_REPORT; + break; + case ACTION_SPCT_CHL_SWITCH: + frm_type = MGMT_ACTION_CHAN_SWITCH; + break; + default: + frm_type = MGMT_FRM_UNSPECIFIED; + break; + } + + return frm_type; +} + +/** + * mgmt_get_qos_action_subtype() - gets qos action subtype + * @action_code: action code + * + * This function returns the subtype for qos action + * category. + * + * Return: mgmt frame type + */ +static enum mgmt_frame_type +mgmt_get_qos_action_subtype(uint8_t action_code) +{ + enum mgmt_frame_type frm_type; + + switch (action_code) { + case QOS_ADD_TS_REQ: + frm_type = MGMT_ACTION_QOS_ADD_TS_REQ; + break; + case QOS_ADD_TS_RSP: + frm_type = MGMT_ACTION_QOS_ADD_TS_RSP; + break; + case QOS_DEL_TS_REQ: + frm_type = MGMT_ACTION_QOS_DEL_TS_REQ; + break; + case QOS_SCHEDULE: + frm_type = MGMT_ACTION_QOS_SCHEDULE; + break; + case QOS_MAP_CONFIGURE: + frm_type = MGMT_ACTION_QOS_MAP_CONFIGURE; + break; + default: + frm_type = MGMT_FRM_UNSPECIFIED; + break; + } + + return frm_type; +} + +/** + * mgmt_get_dls_action_subtype() - gets dls action subtype + * @action_code: action code + * + * This function returns the subtype for dls action + * category. + * + * Return: mgmt frame type + */ +static enum mgmt_frame_type +mgmt_get_dls_action_subtype(uint8_t action_code) +{ + enum mgmt_frame_type frm_type; + + switch (action_code) { + case DLS_REQUEST: + frm_type = MGMT_ACTION_DLS_REQUEST; + break; + case DLS_RESPONSE: + frm_type = MGMT_ACTION_DLS_RESPONSE; + break; + case DLS_TEARDOWN: + frm_type = MGMT_ACTION_DLS_TEARDOWN; + break; + default: + frm_type = MGMT_FRM_UNSPECIFIED; + break; + } + + return frm_type; +} + +/** + * mgmt_get_back_action_subtype() - gets block ack action subtype + * @action_code: action code + * + * This function returns the subtype for block ack action + * category. + * + * Return: mgmt frame type + */ +static enum mgmt_frame_type +mgmt_get_back_action_subtype(uint8_t action_code) +{ + enum mgmt_frame_type frm_type; + + switch (action_code) { + case ADDBA_REQUEST: + frm_type = MGMT_ACTION_BA_ADDBA_REQUEST; + break; + case ADDBA_RESPONSE: + frm_type = MGMT_ACTION_BA_ADDBA_RESPONSE; + break; + case DELBA: + frm_type = MGMT_ACTION_BA_DELBA; + break; + default: + frm_type = MGMT_FRM_UNSPECIFIED; + break; + } + + return frm_type; +} + +/** + * mgmt_get_public_action_subtype() - gets public action subtype + * @action_code: action code + * + * This function returns the subtype for public action + * category. + * + * Return: mgmt frame type + */ +static enum mgmt_frame_type +mgmt_get_public_action_subtype(uint8_t action_code) +{ + enum mgmt_frame_type frm_type; + + switch (action_code) { + case PUB_ACTION_2040_BSS_COEXISTENCE: + frm_type = MGMT_ACTION_2040_BSS_COEXISTENCE; + break; + case PUB_ACTION_EXT_CHANNEL_SWITCH_ID: + frm_type = MGMT_ACTION_EXT_CHANNEL_SWITCH_ID; + break; + case PUB_ACTION_VENDOR_SPECIFIC: + frm_type = MGMT_ACTION_VENDOR_SPECIFIC; + break; + case PUB_ACTION_TDLS_DISCRESP: + frm_type = MGMT_ACTION_TDLS_DISCRESP; + break; + case PUB_ACTION_GAS_INITIAL_REQUEST: + frm_type = MGMT_ACTION_GAS_INITIAL_REQUEST; + break; + case PUB_ACTION_GAS_INITIAL_RESPONSE: + frm_type = MGMT_ACTION_GAS_INITIAL_RESPONSE; + break; + case PUB_ACTION_GAS_COMEBACK_REQUEST: + frm_type = MGMT_ACTION_GAS_COMEBACK_REQUEST; + break; + case PUB_ACTION_GAS_COMEBACK_RESPONSE: + frm_type = MGMT_ACTION_GAS_COMEBACK_RESPONSE; + break; + default: + frm_type = MGMT_FRM_UNSPECIFIED; + break; + } + + return frm_type; +} + +/** + * mgmt_get_rrm_action_subtype() - gets rrm action subtype + * @action_code: action code + * + * This function returns the subtype for rrm action + * category. + * + * Return: mgmt frame type + */ +static enum mgmt_frame_type +mgmt_get_rrm_action_subtype(uint8_t action_code) +{ + enum mgmt_frame_type frm_type; + + switch (action_code) { + case RRM_RADIO_MEASURE_REQ: + frm_type = MGMT_ACTION_RRM_RADIO_MEASURE_REQ; + break; + case RRM_RADIO_MEASURE_RPT: + frm_type = MGMT_ACTION_RRM_RADIO_MEASURE_RPT; + break; + case RRM_LINK_MEASUREMENT_REQ: + frm_type = MGMT_ACTION_RRM_LINK_MEASUREMENT_REQ; + break; + case RRM_LINK_MEASUREMENT_RPT: + frm_type = MGMT_ACTION_RRM_LINK_MEASUREMENT_RPT; + break; + case RRM_NEIGHBOR_REQ: + frm_type = MGMT_ACTION_RRM_NEIGHBOR_REQ; + break; + case RRM_NEIGHBOR_RPT: + frm_type = MGMT_ACTION_RRM_NEIGHBOR_RPT; + break; + default: + frm_type = MGMT_FRM_UNSPECIFIED; + break; + } + + return frm_type; +} + +/** + * mgmt_get_ht_action_subtype() - gets ht action subtype + * @action_code: action code + * + * This function returns the subtype for ht action + * category. + * + * Return: mgmt frame type + */ +static enum mgmt_frame_type +mgmt_get_ht_action_subtype(uint8_t action_code) +{ + enum mgmt_frame_type frm_type; + + switch (action_code) { + case HT_ACTION_NOTIFY_CHANWIDTH: + frm_type = MGMT_ACTION_HT_NOTIFY_CHANWIDTH; + break; + case HT_ACTION_SMPS: + frm_type = MGMT_ACTION_HT_SMPS; + break; + case HT_ACTION_PSMP: + frm_type = MGMT_ACTION_HT_PSMP; + break; + case HT_ACTION_PCO_PHASE: + frm_type = MGMT_ACTION_HT_PCO_PHASE; + break; + case HT_ACTION_CSI: + frm_type = MGMT_ACTION_HT_CSI; + break; + case HT_ACTION_NONCOMPRESSED_BF: + frm_type = MGMT_ACTION_HT_NONCOMPRESSED_BF; + break; + case HT_ACTION_COMPRESSED_BF: + frm_type = MGMT_ACTION_HT_COMPRESSED_BF; + break; + case HT_ACTION_ASEL_IDX_FEEDBACK: + frm_type = MGMT_ACTION_HT_ASEL_IDX_FEEDBACK; + break; + default: + frm_type = MGMT_FRM_UNSPECIFIED; + break; + } + + return frm_type; +} + +/** + * mgmt_get_sa_query_action_subtype() - gets sa query action subtype + * @action_code: action code + * + * This function returns the subtype for sa query action + * category. + * + * Return: mgmt frame type + */ +static enum mgmt_frame_type +mgmt_get_sa_query_action_subtype(uint8_t action_code) +{ + enum mgmt_frame_type frm_type; + + switch (action_code) { + case SA_QUERY_REQUEST: + frm_type = MGMT_ACTION_SA_QUERY_REQUEST; + break; + case SA_QUERY_RESPONSE: + frm_type = MGMT_ACTION_SA_QUERY_RESPONSE; + break; + default: + frm_type = MGMT_FRM_UNSPECIFIED; + break; + } + + return frm_type; +} + +/** + * mgmt_get_pdpa_action_subtype() - gets pdpa action subtype + * @action_code: action code + * + * This function returns the subtype for protected dual public + * action category. + * + * Return: mgmt frame type + */ +static enum mgmt_frame_type +mgmt_get_pdpa_action_subtype(uint8_t action_code) +{ + enum mgmt_frame_type frm_type; + + switch (action_code) { + case PDPA_GAS_INIT_REQ: + frm_type = MGMT_ACTION_PDPA_GAS_INIT_REQ; + break; + case PDPA_GAS_INIT_RSP: + frm_type = MGMT_ACTION_PDPA_GAS_INIT_RSP; + break; + case PDPA_GAS_COMEBACK_REQ: + frm_type = MGMT_ACTION_PDPA_GAS_COMEBACK_REQ; + break; + case PDPA_GAS_COMEBACK_RSP: + frm_type = MGMT_ACTION_PDPA_GAS_COMEBACK_RSP; + break; + default: + frm_type = MGMT_FRM_UNSPECIFIED; + break; + } + + return frm_type; +} + +/** + * mgmt_get_wnm_action_subtype() - gets wnm action subtype + * @action_code: action code + * + * This function returns the subtype for wnm action + * category. + * + * Return: mgmt frame type + */ +static enum mgmt_frame_type +mgmt_get_wnm_action_subtype(uint8_t action_code) +{ + enum mgmt_frame_type frm_type; + + switch (action_code) { + case WNM_BSS_TM_QUERY: + frm_type = MGMT_ACTION_WNM_BSS_TM_QUERY; + break; + case WNM_BSS_TM_REQUEST: + frm_type = MGMT_ACTION_WNM_BSS_TM_REQUEST; + break; + case WNM_BSS_TM_RESPONSE: + frm_type = MGMT_ACTION_WNM_BSS_TM_RESPONSE; + break; + case WNM_NOTIF_REQUEST: + frm_type = MGMT_ACTION_WNM_NOTIF_REQUEST; + break; + case WNM_NOTIF_RESPONSE: + frm_type = MGMT_ACTION_WNM_NOTIF_RESPONSE; + break; + case WNM_FMS_REQ: + frm_type = MGMT_ACTION_WNM_FMS_REQ; + break; + case WNM_FMS_RESP: + frm_type = MGMT_ACTION_WNM_FMS_RESP; + break; + case WNM_TFS_REQ: + frm_type = MGMT_ACTION_WNM_TFS_REQ; + break; + case WNM_TFS_RESP: + frm_type = MGMT_ACTION_WNM_TFS_RESP; + break; + case WNM_TFS_NOTIFY: + frm_type = MGMT_ACTION_WNM_TFS_NOTIFY; + break; + case WNM_SLEEP_REQ: + frm_type = MGMT_ACTION_WNM_SLEEP_REQ; + break; + case WNM_SLEEP_RESP: + frm_type = MGMT_ACTION_WNM_SLEEP_RESP; + break; + case WNM_TIM_REQ: + frm_type = MGMT_ACTION_WNM_TFS_REQ; + break; + case WNM_TIM_RESP: + frm_type = MGMT_ACTION_WNM_TFS_RESP; + break; + default: + frm_type = MGMT_FRM_UNSPECIFIED; + break; + } + + return frm_type; +} + +/** + * mgmt_get_wnm_action_subtype() - gets tdls action subtype + * @action_code: action code + * + * This function returns the subtype for tdls action + * category. + * + * Return: mgmt frame type + */ +static enum mgmt_frame_type +mgmt_get_tdls_action_subtype(uint8_t action_code) +{ + enum mgmt_frame_type frm_type; + + switch (action_code) { + case TDLS_SETUP_REQUEST: + frm_type = MGMT_ACTION_TDLS_SETUP_REQ; + break; + case TDLS_SETUP_RESPONSE: + frm_type = MGMT_ACTION_TDLS_SETUP_RSP; + break; + case TDLS_SETUP_CONFIRM: + frm_type = MGMT_ACTION_TDLS_SETUP_CNF; + break; + case TDLS_TEARDOWN: + frm_type = MGMT_ACTION_TDLS_TEARDOWN; + break; + case TDLS_PEER_TRAFFIC_INDICATION: + frm_type = MGMT_ACTION_TDLS_PEER_TRAFFIC_IND; + break; + case TDLS_CHANNEL_SWITCH_REQUEST: + frm_type = MGMT_ACTION_TDLS_CH_SWITCH_REQ; + break; + case TDLS_CHANNEL_SWITCH_RESPONSE: + frm_type = MGMT_ACTION_TDLS_CH_SWITCH_RSP; + break; + case TDLS_PEER_PSM_REQUEST: + frm_type = MGMT_ACTION_TDLS_PEER_PSM_REQUEST; + break; + case TDLS_PEER_PSM_RESPONSE: + frm_type = MGMT_ACTION_TDLS_PEER_PSM_RESPONSE; + break; + case TDLS_PEER_TRAFFIC_RESPONSE: + frm_type = MGMT_ACTION_TDLS_PEER_TRAFFIC_RSP; + break; + case TDLS_DISCOVERY_REQUEST: + frm_type = MGMT_ACTION_TDLS_DIS_REQ; + break; + default: + frm_type = MGMT_FRM_UNSPECIFIED; + break; + } + + return frm_type; +} + +/** + * mgmt_get_mesh_action_subtype() - gets mesh action subtype + * @action_code: action code + * + * This function returns the subtype for mesh action + * category. + * + * Return: mgmt frame type + */ +static enum mgmt_frame_type +mgmt_get_mesh_action_subtype(uint8_t action_code) +{ + enum mgmt_frame_type frm_type; + + switch (action_code) { + case MESH_ACTION_LINK_METRIC_REPORT: + frm_type = MGMT_ACTION_MESH_LINK_METRIC_REPORT; + break; + case MESH_ACTION_HWMP_PATH_SELECTION: + frm_type = MGMT_ACTION_MESH_HWMP_PATH_SELECTION; + break; + case MESH_ACTION_GATE_ANNOUNCEMENT: + frm_type = MGMT_ACTION_MESH_GATE_ANNOUNCEMENT; + break; + case MESH_ACTION_CONGESTION_CONTROL_NOTIFICATION: + frm_type = MGMT_ACTION_MESH_CONGESTION_CONTROL_NOTIFICATION; + break; + case MESH_ACTION_MCCA_SETUP_REQUEST: + frm_type = MGMT_ACTION_MESH_MCCA_SETUP_REQUEST; + break; + case MESH_ACTION_MCCA_SETUP_REPLY: + frm_type = MGMT_ACTION_MESH_MCCA_SETUP_REPLY; + break; + case MESH_ACTION_MCCA_ADVERTISEMENT_REQUEST: + frm_type = MGMT_ACTION_MESH_MCCA_ADVERTISEMENT_REQUEST; + break; + case MESH_ACTION_MCCA_ADVERTISEMENT: + frm_type = MGMT_ACTION_MESH_MCCA_ADVERTISEMENT; + break; + case MESH_ACTION_MCCA_TEARDOWN: + frm_type = MGMT_ACTION_MESH_MCCA_TEARDOWN; + break; + case MESH_ACTION_TBTT_ADJUSTMENT_REQUEST: + frm_type = MGMT_ACTION_MESH_TBTT_ADJUSTMENT_REQUEST; + break; + case MESH_ACTION_TBTT_ADJUSTMENT_RESPONSE: + frm_type = MGMT_ACTION_MESH_TBTT_ADJUSTMENT_RESPONSE; + break; + default: + frm_type = MGMT_FRM_UNSPECIFIED; + break; + } + + return frm_type; +} + +/** + * mgmt_get_self_prot_action_subtype() - gets self prot. action subtype + * @action_code: action code + * + * This function returns the subtype for self protected action + * category. + * + * Return: mgmt frame type + */ +static enum mgmt_frame_type +mgmt_get_self_prot_action_subtype(uint8_t action_code) +{ + enum mgmt_frame_type frm_type; + + switch (action_code) { + case SP_MESH_PEERING_OPEN: + frm_type = MGMT_ACTION_SP_MESH_PEERING_OPEN; + break; + case SP_MESH_PEERING_CONFIRM: + frm_type = MGMT_ACTION_SP_MESH_PEERING_CONFIRM; + break; + case SP_MESH_PEERING_CLOSE: + frm_type = MGMT_ACTION_SP_MESH_PEERING_CLOSE; + break; + case SP_MGK_INFORM: + frm_type = MGMT_ACTION_SP_MGK_INFORM; + break; + case SP_MGK_ACK: + frm_type = MGMT_ACTION_SP_MGK_ACK; + break; + default: + frm_type = MGMT_FRM_UNSPECIFIED; + break; + } + + return frm_type; +} + +/** + * mgmt_get_wmm_action_subtype() - gets wmm action subtype + * @action_code: action code + * + * This function returns the subtype for wmm action + * category. + * + * Return: mgmt frame type + */ +static enum mgmt_frame_type +mgmt_get_wmm_action_subtype(uint8_t action_code) +{ + enum mgmt_frame_type frm_type; + + switch (action_code) { + case WMM_QOS_SETUP_REQ: + frm_type = MGMT_ACTION_WMM_QOS_SETUP_REQ; + break; + case WMM_QOS_SETUP_RESP: + frm_type = MGMT_ACTION_WMM_QOS_SETUP_RESP; + break; + case WMM_QOS_TEARDOWN: + frm_type = MGMT_ACTION_WMM_QOS_TEARDOWN; + break; + default: + frm_type = MGMT_FRM_UNSPECIFIED; + break; + } + + return frm_type; +} + +/** + * mgmt_get_vht_action_subtype() - gets vht action subtype + * @action_code: action code + * + * This function returns the subtype for vht action + * category. + * + * Return: mgmt frame type + */ +static enum mgmt_frame_type +mgmt_get_vht_action_subtype(uint8_t action_code) +{ + enum mgmt_frame_type frm_type; + + switch (action_code) { + case VHT_ACTION_COMPRESSED_BF: + frm_type = MGMT_ACTION_VHT_COMPRESSED_BF; + break; + case VHT_ACTION_GID_NOTIF: + frm_type = MGMT_ACTION_VHT_GID_NOTIF; + break; + case VHT_ACTION_OPMODE_NOTIF: + frm_type = MGMT_ACTION_VHT_OPMODE_NOTIF; + break; + default: + frm_type = MGMT_FRM_UNSPECIFIED; + break; + } + + return frm_type; +} + +/** + * mgmt_txrx_get_action_frm_subtype() - gets action frm subtype + * @mpdu_data_ptr: pointer to mpdu data + * + * This function determines the action category of the frame + * and calls respective function to get mgmt frame type. + * + * Return: mgmt frame type + */ +static enum mgmt_frame_type +mgmt_txrx_get_action_frm_subtype(uint8_t *mpdu_data_ptr) +{ + struct action_frm_hdr *action_hdr = + (struct action_frm_hdr *)mpdu_data_ptr; + enum mgmt_frame_type frm_type; + + switch (action_hdr->action_category) { + case ACTION_CATEGORY_SPECTRUM_MGMT: + frm_type = mgmt_get_spec_mgmt_action_subtype( + action_hdr->action_code); + break; + case ACTION_CATEGORY_QOS: + frm_type = mgmt_get_qos_action_subtype(action_hdr->action_code); + break; + case ACTION_CATEGORY_DLS: + frm_type = mgmt_get_dls_action_subtype(action_hdr->action_code); + break; + case ACTION_CATEGORY_BACK: + frm_type = mgmt_get_back_action_subtype( + action_hdr->action_code); + break; + case ACTION_CATEGORY_PUBLIC: + frm_type = mgmt_get_public_action_subtype( + action_hdr->action_code); + break; + case ACTION_CATEGORY_RRM: + frm_type = mgmt_get_rrm_action_subtype(action_hdr->action_code); + break; + case ACTION_CATEGORY_HT: + frm_type = mgmt_get_ht_action_subtype(action_hdr->action_code); + break; + case ACTION_CATEGORY_SA_QUERY: + frm_type = mgmt_get_sa_query_action_subtype( + action_hdr->action_code); + break; + case ACTION_CATEGORY_PROTECTED_DUAL_OF_PUBLIC_ACTION: + frm_type = mgmt_get_pdpa_action_subtype( + action_hdr->action_code); + break; + case ACTION_CATEGORY_WNM: + frm_type = mgmt_get_wnm_action_subtype(action_hdr->action_code); + break; + case ACTION_CATEGORY_TDLS: + frm_type = mgmt_get_tdls_action_subtype( + action_hdr->action_code); + break; + case ACTION_CATEGORY_MESH_ACTION: + frm_type = mgmt_get_mesh_action_subtype( + action_hdr->action_code); + break; + case ACTION_CATEGORY_SELF_PROTECTED: + frm_type = mgmt_get_self_prot_action_subtype( + action_hdr->action_code); + break; + case ACTION_CATEGORY_WMM: + frm_type = mgmt_get_wmm_action_subtype(action_hdr->action_code); + break; + case ACTION_CATEGORY_VHT: + frm_type = mgmt_get_vht_action_subtype(action_hdr->action_code); + break; + case ACTION_CATEGORY_VENDOR_SPECIFIC: + frm_type = MGMT_ACTION_CATEGORY_VENDOR_SPECIFIC; + break; + default: + frm_type = MGMT_FRM_UNSPECIFIED; + break; + } + + return frm_type; +} + +/** + * mgmt_txrx_get_frm_type() - gets mgmt frm type + * @mgmt_subtype: mgmt subtype + * @mpdu_data_ptr: pointer to mpdu data + * + * This function returns mgmt frame type of the frame + * based on the mgmt subtype. + * + * Return: mgmt frame type + */ +static enum mgmt_frame_type +mgmt_txrx_get_frm_type(uint8_t mgmt_subtype, uint8_t *mpdu_data_ptr) +{ + enum mgmt_frame_type frm_type; + + switch (mgmt_subtype) { + case MGMT_SUBTYPE_ASSOC_REQ: + frm_type = MGMT_ASSOC_REQ; + break; + case MGMT_SUBTYPE_ASSOC_RESP: + frm_type = MGMT_ASSOC_RESP; + break; + case MGMT_SUBTYPE_REASSOC_REQ: + frm_type = MGMT_ASSOC_REQ; + break; + case MGMT_SUBTYPE_REASSOC_RESP: + frm_type = MGMT_REASSOC_RESP; + break; + case MGMT_SUBTYPE_PROBE_REQ: + frm_type = MGMT_PROBE_REQ; + break; + case MGMT_SUBTYPE_PROBE_RESP: + frm_type = MGMT_PROBE_RESP; + break; + case MGMT_SUBTYPE_BEACON: + frm_type = MGMT_BEACON; + break; + case MGMT_SUBTYPE_ATIM: + frm_type = MGMT_ATIM; + break; + case MGMT_SUBTYPE_DISASSOC: + frm_type = MGMT_DISASSOC; + break; + case MGMT_SUBTYPE_AUTH: + frm_type = MGMT_AUTH; + break; + case MGMT_SUBTYPE_DEAUTH: + frm_type = MGMT_DEAUTH; + break; + case MGMT_SUBTYPE_ACTION: + case MGMT_SUBTYPE_ACTION_NO_ACK: + frm_type = mgmt_txrx_get_action_frm_subtype(mpdu_data_ptr); + break; + default: + frm_type = MGMT_FRM_UNSPECIFIED; + break; + } + + return frm_type; +} + +/** + * wlan_mgmt_txrx_rx_handler_list_copy() - copies rx handler list + * @rx_handler: pointer to rx handler list + * @rx_handler_head: pointer to head of the copies list + * @rx_handler_tail: pointer to tail of the copies list + * + * This function copies the rx handler linked list into a local + * linked list. + * + * Return: QDF_STATUS_SUCCESS in case of success + */ +static QDF_STATUS wlan_mgmt_txrx_rx_handler_list_copy( + struct mgmt_rx_handler *rx_handler, + struct mgmt_rx_handler **rx_handler_head, + struct mgmt_rx_handler **rx_handler_tail) +{ + struct mgmt_rx_handler *rx_handler_node; + + while (rx_handler) { + rx_handler_node = + qdf_mem_malloc_atomic(sizeof(*rx_handler_node)); + if (!rx_handler_node) { + mgmt_txrx_err_rl("Couldn't allocate memory for rx handler node"); + return QDF_STATUS_E_NOMEM; + } + + rx_handler_node->comp_id = rx_handler->comp_id; + rx_handler_node->rx_cb = rx_handler->rx_cb; + rx_handler_node->next = NULL; + + if (!(*rx_handler_head)) { + *rx_handler_head = rx_handler_node; + *rx_handler_tail = *rx_handler_head; + } else { + (*rx_handler_tail)->next = rx_handler_node; + *rx_handler_tail = (*rx_handler_tail)->next; + } + rx_handler = rx_handler->next; + } + + return QDF_STATUS_SUCCESS; +} + +static bool +mgmt_rx_is_bssid_valid(struct qdf_mac_addr *mac_addr) +{ + if (qdf_is_macaddr_group(mac_addr) || + qdf_is_macaddr_zero(mac_addr)) + return false; + + return true; +} + +QDF_STATUS tgt_mgmt_txrx_rx_frame_handler( + struct wlan_objmgr_psoc *psoc, + qdf_nbuf_t buf, + struct mgmt_rx_event_params *mgmt_rx_params) +{ + struct mgmt_txrx_priv_psoc_context *mgmt_txrx_psoc_ctx; + struct ieee80211_frame *wh; + qdf_nbuf_t copy_buf; + struct wlan_objmgr_peer *peer = NULL; + uint8_t mgmt_type, mgmt_subtype; + uint8_t *mac_addr, *mpdu_data_ptr; + enum mgmt_frame_type frm_type; + struct mgmt_rx_handler *rx_handler; + struct mgmt_rx_handler *rx_handler_head = NULL, *rx_handler_tail = NULL; + u_int8_t *data, *ivp = NULL; + uint16_t buflen; + QDF_STATUS status = QDF_STATUS_SUCCESS; + bool is_from_addr_valid, is_bssid_valid; + + if (!buf) { + mgmt_txrx_err("buffer passed is NULL"); + return QDF_STATUS_E_INVAL; + } + + if (!psoc) { + mgmt_txrx_err("psoc_ctx passed is NULL"); + qdf_nbuf_free(buf); + return QDF_STATUS_E_INVAL; + } + + data = (uint8_t *)qdf_nbuf_data(buf); + wh = (struct ieee80211_frame *)data; + buflen = qdf_nbuf_len(buf); + + /** + * TO DO (calculate pdev) + * Waiting for a new parameter: pdev id to get added in rx event + */ + + mgmt_type = (wh)->i_fc[0] & IEEE80211_FC0_TYPE_MASK; + mgmt_subtype = (wh)->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; + + is_from_addr_valid = mgmt_rx_is_bssid_valid((struct qdf_mac_addr *) + wh->i_addr2); + is_bssid_valid = mgmt_rx_is_bssid_valid((struct qdf_mac_addr *) + wh->i_addr3); + + if (!is_from_addr_valid && !is_bssid_valid) { + mgmt_txrx_debug_rl("from addr %pM bssid addr %pM both not valid, dropping them", + wh->i_addr2, wh->i_addr3); + qdf_nbuf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + if ((mgmt_subtype == MGMT_SUBTYPE_BEACON || + mgmt_subtype == MGMT_SUBTYPE_PROBE_RESP) && + !(is_from_addr_valid && is_bssid_valid)) { + mgmt_txrx_debug_rl("from addr %pM bssid addr %pM not valid, modifying them", + wh->i_addr2, wh->i_addr3); + if (!is_from_addr_valid) + qdf_mem_copy(wh->i_addr2, wh->i_addr3, + IEEE80211_ADDR_LEN); + else + qdf_mem_copy(wh->i_addr3, wh->i_addr2, + IEEE80211_ADDR_LEN); + } + + if (mgmt_type != IEEE80211_FC0_TYPE_MGT) { + mgmt_txrx_err("Rx event doesn't conatin a mgmt. packet, %d", + mgmt_type); + qdf_nbuf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + /* mpdu_data_ptr is pointer to action header */ + mpdu_data_ptr = (uint8_t *)qdf_nbuf_data(buf) + + sizeof(struct ieee80211_frame); + if ((wh->i_fc[1] & IEEE80211_FC1_WEP) && + !qdf_is_macaddr_group((struct qdf_mac_addr *)wh->i_addr1) && + !qdf_is_macaddr_broadcast((struct qdf_mac_addr *)wh->i_addr1)) { + + if (buflen > (sizeof(struct ieee80211_frame) + + WLAN_HDR_EXT_IV_LEN)) + ivp = data + sizeof(struct ieee80211_frame); + + /* Set mpdu_data_ptr based on EXT IV bit + * if EXT IV bit set, CCMP using PMF 8 bytes of IV is present + * else for WEP using PMF, 4 bytes of IV is present + */ + if (ivp && (ivp[WLAN_HDR_IV_LEN] & WLAN_HDR_EXT_IV_BIT)) { + if (buflen <= (sizeof(struct ieee80211_frame) + + IEEE80211_CCMP_HEADERLEN)) { + qdf_nbuf_free(buf); + return QDF_STATUS_E_FAILURE; + } + mpdu_data_ptr += IEEE80211_CCMP_HEADERLEN; + } else { + if (buflen <= (sizeof(struct ieee80211_frame) + + WLAN_HDR_EXT_IV_LEN)) { + qdf_nbuf_free(buf); + return QDF_STATUS_E_FAILURE; + } + mpdu_data_ptr += WLAN_HDR_EXT_IV_LEN; + } + } + + frm_type = mgmt_txrx_get_frm_type(mgmt_subtype, mpdu_data_ptr); + if (frm_type == MGMT_FRM_UNSPECIFIED) { + mgmt_txrx_err_rl("Unspecified mgmt frame type fc: %x %x", + wh->i_fc[0], wh->i_fc[1]); + qdf_nbuf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + if (!(mgmt_subtype == MGMT_SUBTYPE_BEACON || + mgmt_subtype == MGMT_SUBTYPE_PROBE_RESP || + mgmt_subtype == MGMT_SUBTYPE_PROBE_REQ)) + mgmt_txrx_debug("Rcvd mgmt frame subtype %x (frame type %u) from %pM, seq_num = %d, rssi = %d tsf_delta: %u", + mgmt_subtype, frm_type, wh->i_addr2, + (le16toh(*(uint16_t *)wh->i_seq) >> + WLAN_SEQ_SEQ_SHIFT), mgmt_rx_params->rssi, + mgmt_rx_params->tsf_delta); + + mgmt_txrx_psoc_ctx = (struct mgmt_txrx_priv_psoc_context *) + wlan_objmgr_psoc_get_comp_private_obj(psoc, + WLAN_UMAC_COMP_MGMT_TXRX); + + qdf_spin_lock_bh(&mgmt_txrx_psoc_ctx->mgmt_txrx_psoc_ctx_lock); + rx_handler = mgmt_txrx_psoc_ctx->mgmt_rx_comp_cb[frm_type]; + if (rx_handler) { + status = wlan_mgmt_txrx_rx_handler_list_copy(rx_handler, + &rx_handler_head, &rx_handler_tail); + if (status != QDF_STATUS_SUCCESS) { + qdf_spin_unlock_bh(&mgmt_txrx_psoc_ctx->mgmt_txrx_psoc_ctx_lock); + qdf_nbuf_free(buf); + goto rx_handler_mem_free; + } + } + + rx_handler = mgmt_txrx_psoc_ctx->mgmt_rx_comp_cb[MGMT_FRAME_TYPE_ALL]; + if (rx_handler) { + status = wlan_mgmt_txrx_rx_handler_list_copy(rx_handler, + &rx_handler_head, &rx_handler_tail); + if (status != QDF_STATUS_SUCCESS) { + qdf_spin_unlock_bh(&mgmt_txrx_psoc_ctx->mgmt_txrx_psoc_ctx_lock); + qdf_nbuf_free(buf); + goto rx_handler_mem_free; + } + } + + if (!rx_handler_head) { + qdf_spin_unlock_bh(&mgmt_txrx_psoc_ctx->mgmt_txrx_psoc_ctx_lock); + mgmt_txrx_debug("No rx callback registered for frm_type: %d", + frm_type); + qdf_nbuf_free(buf); + return QDF_STATUS_E_FAILURE; + } + qdf_spin_unlock_bh(&mgmt_txrx_psoc_ctx->mgmt_txrx_psoc_ctx_lock); + + mac_addr = (uint8_t *)wh->i_addr2; + /* + * peer can be NULL in following 2 scenarios: + * 1. broadcast frame received + * 2. operating in monitor mode + * + * and in both scenarios, the receiver of frame + * is expected to do processing accordingly considerng + * the fact that peer = NULL can be received and is a valid + * scenario. + */ + peer = wlan_objmgr_get_peer(psoc, mgmt_rx_params->pdev_id, + mac_addr, WLAN_MGMT_SB_ID); + if (!peer && !qdf_is_macaddr_broadcast( + (struct qdf_mac_addr *)wh->i_addr1)) { + mac_addr = (uint8_t *)wh->i_addr1; + peer = wlan_objmgr_get_peer(psoc, + mgmt_rx_params->pdev_id, + mac_addr, WLAN_MGMT_SB_ID); + } + + rx_handler = rx_handler_head; + while (rx_handler->next) { + copy_buf = qdf_nbuf_clone(buf); + rx_handler->rx_cb(psoc, peer, copy_buf, + mgmt_rx_params, frm_type); + rx_handler = rx_handler->next; + } + rx_handler->rx_cb(psoc, peer, buf, + mgmt_rx_params, frm_type); + + if (peer) + wlan_objmgr_peer_release_ref(peer, WLAN_MGMT_SB_ID); + +rx_handler_mem_free: + while (rx_handler_head) { + rx_handler = rx_handler_head; + rx_handler_head = rx_handler_head->next; + qdf_mem_free(rx_handler); + } + + return status; +} + +QDF_STATUS tgt_mgmt_txrx_tx_completion_handler( + struct wlan_objmgr_pdev *pdev, + uint32_t desc_id, uint32_t status, + void *tx_compl_params) +{ + struct mgmt_txrx_priv_pdev_context *mgmt_txrx_pdev_ctx; + struct mgmt_txrx_desc_elem_t *mgmt_desc; + void *cb_context; + mgmt_tx_download_comp_cb tx_compl_cb; + mgmt_ota_comp_cb ota_comp_cb; + qdf_nbuf_t nbuf; + + mgmt_txrx_pdev_ctx = (struct mgmt_txrx_priv_pdev_context *) + wlan_objmgr_pdev_get_comp_private_obj(pdev, + WLAN_UMAC_COMP_MGMT_TXRX); + if (!mgmt_txrx_pdev_ctx) { + mgmt_txrx_err("Mgmt txrx context empty for pdev %pK", pdev); + return QDF_STATUS_E_NULL_VALUE; + } + if (desc_id >= MGMT_DESC_POOL_MAX) { + mgmt_txrx_err("desc_id:%u is out of bounds", desc_id); + return QDF_STATUS_E_INVAL; + } + mgmt_desc = &mgmt_txrx_pdev_ctx->mgmt_desc_pool.pool[desc_id]; + if (!mgmt_desc || !mgmt_desc->in_use) { + mgmt_txrx_err("Mgmt desc empty for id %d pdev %pK ", + desc_id, pdev); + return QDF_STATUS_E_NULL_VALUE; + } + tx_compl_cb = mgmt_desc->tx_dwnld_cmpl_cb; + ota_comp_cb = mgmt_desc->tx_ota_cmpl_cb; + nbuf = mgmt_desc->nbuf; + + /* + * TO DO + * Make the API more generic to handle tx download completion as well + * as OTA completion separately. + */ + + /* + * 1. If the tx frame is sent by any UMAC converged component then it + * passes the context as NULL while calling mgmt txrx API for + * sending mgmt frame. If context is NULL, peer will be passed as + * cb_context in completion callbacks. + * 2. If the tx frame is sent by legacy MLME then it passes the context + * as its specific context (for ex- mac context in case of MCL) while + * calling mgmt txrx API for sending mgmt frame. This caller specific + * context is passed as cb_context in completion callbacks. + */ + if (mgmt_desc->context) + cb_context = mgmt_desc->context; + else + cb_context = (void *)mgmt_desc->peer; + + if (!tx_compl_cb && !ota_comp_cb) { + qdf_nbuf_free(nbuf); + goto no_registered_cb; + } + + if (tx_compl_cb) + tx_compl_cb(cb_context, nbuf, status); + + if (ota_comp_cb) + ota_comp_cb(cb_context, nbuf, status, tx_compl_params); + +no_registered_cb: + /* + * decrementing the peer ref count that was incremented while + * accessing peer in wlan_mgmt_txrx_mgmt_frame_tx + */ + wlan_objmgr_peer_release_ref(mgmt_desc->peer, WLAN_MGMT_NB_ID); + wlan_mgmt_txrx_desc_put(mgmt_txrx_pdev_ctx, desc_id); + return QDF_STATUS_SUCCESS; +} + +qdf_nbuf_t tgt_mgmt_txrx_get_nbuf_from_desc_id( + struct wlan_objmgr_pdev *pdev, + uint32_t desc_id) +{ + struct mgmt_txrx_priv_pdev_context *mgmt_txrx_pdev_ctx; + struct mgmt_txrx_desc_elem_t *mgmt_desc; + qdf_nbuf_t buf; + + mgmt_txrx_pdev_ctx = (struct mgmt_txrx_priv_pdev_context *) + wlan_objmgr_pdev_get_comp_private_obj(pdev, + WLAN_UMAC_COMP_MGMT_TXRX); + if (!mgmt_txrx_pdev_ctx) { + mgmt_txrx_err("Mgmt txrx context empty for pdev %pK", pdev); + goto fail; + } + if (desc_id >= MGMT_DESC_POOL_MAX) { + mgmt_txrx_err("desc_id:%u is out of bounds", desc_id); + goto fail; + } + + mgmt_desc = &mgmt_txrx_pdev_ctx->mgmt_desc_pool.pool[desc_id]; + if (!mgmt_desc || !mgmt_desc->in_use) { + mgmt_txrx_err("Mgmt descriptor unavailable for id %d pdev %pK", + desc_id, pdev); + goto fail; + } + buf = mgmt_desc->nbuf; + return buf; + +fail: + return NULL; +} + +struct wlan_objmgr_peer * +tgt_mgmt_txrx_get_peer_from_desc_id( + struct wlan_objmgr_pdev *pdev, + uint32_t desc_id) +{ + struct mgmt_txrx_priv_pdev_context *mgmt_txrx_pdev_ctx; + struct mgmt_txrx_desc_elem_t *mgmt_desc; + struct wlan_objmgr_peer *peer; + + mgmt_txrx_pdev_ctx = (struct mgmt_txrx_priv_pdev_context *) + wlan_objmgr_pdev_get_comp_private_obj(pdev, + WLAN_UMAC_COMP_MGMT_TXRX); + if (!mgmt_txrx_pdev_ctx) { + mgmt_txrx_err("Mgmt txrx context empty for pdev %pK", pdev); + goto fail; + } + + mgmt_desc = &mgmt_txrx_pdev_ctx->mgmt_desc_pool.pool[desc_id]; + if (!mgmt_desc || !mgmt_desc->in_use) { + mgmt_txrx_err("Mgmt descriptor unavailable for id %d pdev %pK", + desc_id, pdev); + goto fail; + } + + peer = mgmt_desc->peer; + return peer; + +fail: + return NULL; +} + +uint8_t tgt_mgmt_txrx_get_vdev_id_from_desc_id( + struct wlan_objmgr_pdev *pdev, + uint32_t desc_id) +{ + struct mgmt_txrx_priv_pdev_context *mgmt_txrx_pdev_ctx; + struct mgmt_txrx_desc_elem_t *mgmt_desc; + uint8_t vdev_id; + + mgmt_txrx_pdev_ctx = (struct mgmt_txrx_priv_pdev_context *) + wlan_objmgr_pdev_get_comp_private_obj(pdev, + WLAN_UMAC_COMP_MGMT_TXRX); + if (!mgmt_txrx_pdev_ctx) { + mgmt_txrx_err("Mgmt txrx context empty for pdev %pK", pdev); + goto fail; + } + if (desc_id >= MGMT_DESC_POOL_MAX) { + mgmt_txrx_err("desc_id:%u is out of bounds", desc_id); + goto fail; + } + + mgmt_desc = &mgmt_txrx_pdev_ctx->mgmt_desc_pool.pool[desc_id]; + if (!mgmt_desc || !mgmt_desc->in_use) { + mgmt_txrx_err("Mgmt descriptor unavailable for id %d pdev %pK", + desc_id, pdev); + goto fail; + } + + vdev_id = mgmt_desc->vdev_id; + return vdev_id; + +fail: + return WLAN_UMAC_VDEV_ID_MAX; +} + +uint32_t tgt_mgmt_txrx_get_free_desc_pool_count( + struct wlan_objmgr_pdev *pdev) +{ + struct mgmt_txrx_priv_pdev_context *mgmt_txrx_pdev_ctx; + uint32_t free_desc_count = WLAN_INVALID_MGMT_DESC_COUNT; + + mgmt_txrx_pdev_ctx = (struct mgmt_txrx_priv_pdev_context *) + wlan_objmgr_pdev_get_comp_private_obj(pdev, + WLAN_UMAC_COMP_MGMT_TXRX); + if (!mgmt_txrx_pdev_ctx) { + mgmt_txrx_err("Mgmt txrx context empty for pdev %pK", pdev); + goto fail; + } + + free_desc_count = qdf_list_size( + &(mgmt_txrx_pdev_ctx->mgmt_desc_pool.free_list)); + +fail: + return free_desc_count; +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/mgmt_txrx/dispatcher/src/wlan_mgmt_txrx_utils_api.c b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/mgmt_txrx/dispatcher/src/wlan_mgmt_txrx_utils_api.c new file mode 100644 index 0000000000000000000000000000000000000000..cd5b8750ae4a2e3c61d3cd3c9d7e6d68df779d5e --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/mgmt_txrx/dispatcher/src/wlan_mgmt_txrx_utils_api.c @@ -0,0 +1,841 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_mgmt_txrx_utils_api.c + * This file contains mgmt txrx public API definitions for umac + * converged components. + */ + +#include "wlan_mgmt_txrx_utils_api.h" +#include "../../core/src/wlan_mgmt_txrx_main_i.h" +#include "wlan_objmgr_psoc_obj.h" +#include "wlan_objmgr_global_obj.h" +#include "wlan_objmgr_pdev_obj.h" +#include "wlan_objmgr_vdev_obj.h" +#include "wlan_objmgr_peer_obj.h" +#include "qdf_nbuf.h" +#include "wlan_lmac_if_api.h" + +/** + * wlan_mgmt_txrx_psoc_obj_create_notification() - called from objmgr when psoc + * is created + * @psoc: psoc context + * @arg: argument + * + * This function gets called from object manager when psoc is being created and + * creates mgmt_txrx context, mgmt desc pool. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +static QDF_STATUS wlan_mgmt_txrx_psoc_obj_create_notification( + struct wlan_objmgr_psoc *psoc, + void *arg) +{ + struct mgmt_txrx_priv_psoc_context *mgmt_txrx_psoc_ctx; + QDF_STATUS status; + + if (!psoc) { + mgmt_txrx_err("psoc context passed is NULL"); + status = QDF_STATUS_E_INVAL; + goto err_return; + } + + mgmt_txrx_psoc_ctx = qdf_mem_malloc(sizeof(*mgmt_txrx_psoc_ctx)); + if (!mgmt_txrx_psoc_ctx) { + mgmt_txrx_err("Failed to allocate mgmt txrx context"); + status = QDF_STATUS_E_NOMEM; + goto err_return; + } + + mgmt_txrx_psoc_ctx->psoc = psoc; + + qdf_spinlock_create(&mgmt_txrx_psoc_ctx->mgmt_txrx_psoc_ctx_lock); + + if (wlan_objmgr_psoc_component_obj_attach(psoc, + WLAN_UMAC_COMP_MGMT_TXRX, + mgmt_txrx_psoc_ctx, QDF_STATUS_SUCCESS) + != QDF_STATUS_SUCCESS) { + mgmt_txrx_err("Failed to attach mgmt txrx ctx in psoc ctx"); + status = QDF_STATUS_E_FAILURE; + goto err_psoc_attach; + } + + mgmt_txrx_info("Mgmt txrx creation successful, mgmt txrx ctx: %pK, psoc: %pK", + mgmt_txrx_psoc_ctx, psoc); + + return QDF_STATUS_SUCCESS; + +err_psoc_attach: + qdf_spinlock_destroy(&mgmt_txrx_psoc_ctx->mgmt_txrx_psoc_ctx_lock); + qdf_mem_free(mgmt_txrx_psoc_ctx); +err_return: + return status; +} + +/** + * wlan_mgmt_txrx_psoc_obj_destroy_notification() - called from objmgr when + * psoc is destroyed + * @psoc: psoc context + * @arg: argument + * + * This function gets called from object manager when psoc is being destroyed + * psoc deletes mgmt_txrx context, mgmt desc pool. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +static QDF_STATUS wlan_mgmt_txrx_psoc_obj_destroy_notification( + struct wlan_objmgr_psoc *psoc, + void *arg) +{ + struct mgmt_txrx_priv_psoc_context *mgmt_txrx_psoc_ctx; + + if (!psoc) { + mgmt_txrx_err("psoc context passed is NULL"); + return QDF_STATUS_E_INVAL; + } + + mgmt_txrx_psoc_ctx = wlan_objmgr_psoc_get_comp_private_obj( + psoc, WLAN_UMAC_COMP_MGMT_TXRX); + if (!mgmt_txrx_psoc_ctx) { + mgmt_txrx_err("mgmt txrx context is already NULL"); + return QDF_STATUS_E_FAILURE; + } + + mgmt_txrx_info("deleting mgmt txrx psoc obj, mgmt txrx ctx: %pK, psoc: %pK", + mgmt_txrx_psoc_ctx, psoc); + if (wlan_objmgr_psoc_component_obj_detach(psoc, + WLAN_UMAC_COMP_MGMT_TXRX, mgmt_txrx_psoc_ctx) + != QDF_STATUS_SUCCESS) { + mgmt_txrx_err("Failed to detach mgmt txrx ctx in psoc ctx"); + return QDF_STATUS_E_FAILURE; + } + + qdf_spinlock_destroy(&mgmt_txrx_psoc_ctx->mgmt_txrx_psoc_ctx_lock); + qdf_mem_free(mgmt_txrx_psoc_ctx); + + mgmt_txrx_info("mgmt txrx deletion successful, psoc: %pK", psoc); + + return QDF_STATUS_SUCCESS; +} + +/** + * wlan_mgmt_txrx_pdev_obj_create_notification() - called from objmgr when pdev + * is created + * @pdev: pdev context + * @arg: argument + * + * This function gets called from object manager when pdev is being created and + * creates mgmt_txrx context, mgmt desc pool. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +static QDF_STATUS wlan_mgmt_txrx_pdev_obj_create_notification( + struct wlan_objmgr_pdev *pdev, + void *arg) +{ + struct mgmt_txrx_priv_pdev_context *mgmt_txrx_pdev_ctx; + struct mgmt_txrx_stats_t *mgmt_txrx_stats; + QDF_STATUS status; + + if (!pdev) { + mgmt_txrx_err("pdev context passed is NULL"); + status = QDF_STATUS_E_INVAL; + goto err_return; + + } + + mgmt_txrx_pdev_ctx = qdf_mem_malloc(sizeof(*mgmt_txrx_pdev_ctx)); + if (!mgmt_txrx_pdev_ctx) { + mgmt_txrx_err("Failed to allocate mgmt txrx context"); + status = QDF_STATUS_E_NOMEM; + goto err_return; + } + + mgmt_txrx_pdev_ctx->pdev = pdev; + + status = wlan_mgmt_txrx_desc_pool_init(mgmt_txrx_pdev_ctx); + if (status != QDF_STATUS_SUCCESS) { + mgmt_txrx_err( + "Failed to initialize mgmt desc. pool with status: %u", + status); + goto err_desc_pool_init; + } + + mgmt_txrx_stats = qdf_mem_malloc(sizeof(*mgmt_txrx_stats)); + if (!mgmt_txrx_stats) { + mgmt_txrx_err( + "Failed to allocate memory for mgmt txrx stats structure"); + status = QDF_STATUS_E_NOMEM; + goto err_mgmt_txrx_stats; + } + mgmt_txrx_pdev_ctx->mgmt_txrx_stats = mgmt_txrx_stats; + + qdf_wake_lock_create(&mgmt_txrx_pdev_ctx->wakelock_tx_cmp, + "mgmt_txrx tx_cmp"); + + if (wlan_objmgr_pdev_component_obj_attach(pdev, + WLAN_UMAC_COMP_MGMT_TXRX, + mgmt_txrx_pdev_ctx, QDF_STATUS_SUCCESS) + != QDF_STATUS_SUCCESS) { + mgmt_txrx_err("Failed to attach mgmt txrx ctx in pdev ctx"); + status = QDF_STATUS_E_FAILURE; + goto err_pdev_attach; + } + + mgmt_txrx_info( + "Mgmt txrx creation successful, mgmt txrx ctx: %pK, pdev: %pK", + mgmt_txrx_pdev_ctx, pdev); + + return QDF_STATUS_SUCCESS; + +err_pdev_attach: + qdf_wake_lock_destroy(&mgmt_txrx_pdev_ctx->wakelock_tx_cmp); + qdf_mem_free(mgmt_txrx_stats); +err_mgmt_txrx_stats: + wlan_mgmt_txrx_desc_pool_deinit(mgmt_txrx_pdev_ctx); +err_desc_pool_init: + qdf_mem_free(mgmt_txrx_pdev_ctx); +err_return: + return status; +} + +/** + * wlan_mgmt_txrx_pdev_obj_destroy_notification() - called from objmgr when + * pdev is destroyed + * @pdev: pdev context + * @arg: argument + * + * This function gets called from object manager when pdev is being destroyed + * pdev deletes mgmt_txrx context, mgmt desc pool. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +static QDF_STATUS wlan_mgmt_txrx_pdev_obj_destroy_notification( + struct wlan_objmgr_pdev *pdev, + void *arg) +{ + struct mgmt_txrx_priv_pdev_context *mgmt_txrx_pdev_ctx; + + if (!pdev) { + mgmt_txrx_err("pdev context passed is NULL"); + return QDF_STATUS_E_INVAL; + } + + mgmt_txrx_pdev_ctx = wlan_objmgr_pdev_get_comp_private_obj( + pdev, WLAN_UMAC_COMP_MGMT_TXRX); + if (!mgmt_txrx_pdev_ctx) { + mgmt_txrx_err("mgmt txrx context is already NULL"); + return QDF_STATUS_E_FAILURE; + } + + mgmt_txrx_info("deleting mgmt txrx pdev obj, mgmt txrx ctx: %pK, pdev: %pK", + mgmt_txrx_pdev_ctx, pdev); + if (wlan_objmgr_pdev_component_obj_detach(pdev, + WLAN_UMAC_COMP_MGMT_TXRX, mgmt_txrx_pdev_ctx) + != QDF_STATUS_SUCCESS) { + mgmt_txrx_err("Failed to detach mgmt txrx ctx in pdev ctx"); + return QDF_STATUS_E_FAILURE; + } + + wlan_mgmt_txrx_desc_pool_deinit(mgmt_txrx_pdev_ctx); + qdf_mem_free(mgmt_txrx_pdev_ctx->mgmt_txrx_stats); + qdf_wake_lock_destroy(&mgmt_txrx_pdev_ctx->wakelock_tx_cmp); + qdf_mem_free(mgmt_txrx_pdev_ctx); + + mgmt_txrx_info("mgmt txrx deletion successful, pdev: %pK", pdev); + + return QDF_STATUS_SUCCESS; +} + + +QDF_STATUS wlan_mgmt_txrx_init(void) +{ + QDF_STATUS status = QDF_STATUS_SUCCESS; + + status = wlan_objmgr_register_psoc_create_handler( + WLAN_UMAC_COMP_MGMT_TXRX, + wlan_mgmt_txrx_psoc_obj_create_notification, + NULL); + if (status != QDF_STATUS_SUCCESS) { + mgmt_txrx_err("Failed to register mgmt txrx psoc create handler"); + goto err_psoc_create; + } + + status = wlan_objmgr_register_psoc_destroy_handler( + WLAN_UMAC_COMP_MGMT_TXRX, + wlan_mgmt_txrx_psoc_obj_destroy_notification, + NULL); + if (status != QDF_STATUS_SUCCESS) { + mgmt_txrx_err("Failed to register mgmt txrx psoc destroy handler"); + goto err_psoc_delete; + } + + status = wlan_objmgr_register_pdev_create_handler( + WLAN_UMAC_COMP_MGMT_TXRX, + wlan_mgmt_txrx_pdev_obj_create_notification, + NULL); + if (status != QDF_STATUS_SUCCESS) { + mgmt_txrx_err("Failed to register mgmt txrx pdev obj create handler"); + goto err_pdev_create; + } + + status = wlan_objmgr_register_pdev_destroy_handler( + WLAN_UMAC_COMP_MGMT_TXRX, + wlan_mgmt_txrx_pdev_obj_destroy_notification, + NULL); + if (status != QDF_STATUS_SUCCESS) { + mgmt_txrx_err("Failed to register mgmt txrx obj destroy handler"); + goto err_pdev_delete; + } + + mgmt_txrx_info("Successfully registered create and destroy handlers with objmgr"); + return QDF_STATUS_SUCCESS; + +err_pdev_delete: + wlan_objmgr_unregister_pdev_create_handler(WLAN_UMAC_COMP_MGMT_TXRX, + wlan_mgmt_txrx_pdev_obj_create_notification, NULL); +err_pdev_create: + wlan_objmgr_unregister_psoc_destroy_handler(WLAN_UMAC_COMP_MGMT_TXRX, + wlan_mgmt_txrx_psoc_obj_destroy_notification, NULL); +err_psoc_delete: + wlan_objmgr_unregister_psoc_create_handler(WLAN_UMAC_COMP_MGMT_TXRX, + wlan_mgmt_txrx_psoc_obj_create_notification, NULL); +err_psoc_create: + return status; +} + +QDF_STATUS wlan_mgmt_txrx_deinit(void) +{ + if (wlan_objmgr_unregister_psoc_create_handler(WLAN_UMAC_COMP_MGMT_TXRX, + wlan_mgmt_txrx_psoc_obj_create_notification, + NULL) + != QDF_STATUS_SUCCESS) { + return QDF_STATUS_E_FAILURE; + } + + if (wlan_objmgr_unregister_psoc_destroy_handler( + WLAN_UMAC_COMP_MGMT_TXRX, + wlan_mgmt_txrx_psoc_obj_destroy_notification, + NULL) + != QDF_STATUS_SUCCESS) { + return QDF_STATUS_E_FAILURE; + } + + if (wlan_objmgr_unregister_pdev_create_handler(WLAN_UMAC_COMP_MGMT_TXRX, + wlan_mgmt_txrx_pdev_obj_create_notification, + NULL) + != QDF_STATUS_SUCCESS) { + return QDF_STATUS_E_FAILURE; + } + + if (wlan_objmgr_unregister_pdev_destroy_handler( + WLAN_UMAC_COMP_MGMT_TXRX, + wlan_mgmt_txrx_pdev_obj_destroy_notification, + NULL) + != QDF_STATUS_SUCCESS) { + return QDF_STATUS_E_FAILURE; + } + + + mgmt_txrx_info("Successfully unregistered create and destroy handlers with objmgr"); + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_mgmt_txrx_mgmt_frame_tx(struct wlan_objmgr_peer *peer, + void *context, + qdf_nbuf_t buf, + mgmt_tx_download_comp_cb tx_comp_cb, + mgmt_ota_comp_cb tx_ota_comp_cb, + enum wlan_umac_comp_id comp_id, + void *mgmt_tx_params) +{ + struct mgmt_txrx_desc_elem_t *desc; + struct wlan_objmgr_psoc *psoc; + struct wlan_objmgr_pdev *pdev; + struct mgmt_txrx_priv_pdev_context *txrx_ctx; + struct wlan_objmgr_vdev *vdev; + QDF_STATUS status; + + if (!peer) { + mgmt_txrx_err("peer passed is NULL"); + return QDF_STATUS_E_NULL_VALUE; + } + + status = wlan_objmgr_peer_try_get_ref(peer, WLAN_MGMT_NB_ID); + if (QDF_IS_STATUS_ERROR(status)) { + mgmt_txrx_err("failed to get ref count for peer %pK", peer); + return QDF_STATUS_E_NULL_VALUE; + } + + vdev = wlan_peer_get_vdev(peer); + if (!vdev) { + mgmt_txrx_err("vdev unavailable for peer %pK", peer); + wlan_objmgr_peer_release_ref(peer, WLAN_MGMT_NB_ID); + return QDF_STATUS_E_NULL_VALUE; + } + + psoc = wlan_vdev_get_psoc(vdev); + if (!psoc) { + mgmt_txrx_err("psoc unavailable for peer %pK vdev %pK", + peer, vdev); + wlan_objmgr_peer_release_ref(peer, WLAN_MGMT_NB_ID); + return QDF_STATUS_E_NULL_VALUE; + } + + pdev = wlan_vdev_get_pdev(vdev); + if (!pdev) { + mgmt_txrx_err("pdev unavailable for peer %pK vdev %pK", + peer, vdev); + wlan_objmgr_peer_release_ref(peer, WLAN_MGMT_NB_ID); + return QDF_STATUS_E_NULL_VALUE; + } + + txrx_ctx = (struct mgmt_txrx_priv_pdev_context *) + wlan_objmgr_pdev_get_comp_private_obj(pdev, + WLAN_UMAC_COMP_MGMT_TXRX); + if (!txrx_ctx) { + mgmt_txrx_err("No txrx context for peer %pK pdev %pK", + peer, pdev); + wlan_objmgr_peer_release_ref(peer, WLAN_MGMT_NB_ID); + return QDF_STATUS_E_NULL_VALUE; + } + + desc = wlan_mgmt_txrx_desc_get(txrx_ctx); + if (!desc) { + wlan_objmgr_peer_release_ref(peer, WLAN_MGMT_NB_ID); + return QDF_STATUS_E_RESOURCES; + } + + desc->nbuf = buf; + desc->tx_ota_cmpl_cb = tx_ota_comp_cb; + desc->tx_dwnld_cmpl_cb = tx_comp_cb; + desc->peer = peer; + desc->vdev_id = wlan_vdev_get_id(vdev); + desc->context = context; + + if (!psoc->soc_cb.tx_ops.mgmt_txrx_tx_ops.mgmt_tx_send) { + mgmt_txrx_err( + "mgmt txrx txop to send mgmt frame is NULL for psoc: %pK", + psoc); + wlan_objmgr_peer_release_ref(peer, WLAN_MGMT_NB_ID); + desc->nbuf = NULL; + wlan_mgmt_txrx_desc_put(txrx_ctx, desc->desc_id); + return QDF_STATUS_E_FAILURE; + } + + if (psoc->soc_cb.tx_ops.mgmt_txrx_tx_ops.mgmt_tx_send( + vdev, buf, desc->desc_id, mgmt_tx_params)) { + mgmt_txrx_err("Mgmt send fail for peer %pK psoc %pK pdev: %pK", + peer, psoc, pdev); + wlan_objmgr_peer_release_ref(peer, WLAN_MGMT_NB_ID); + desc->nbuf = NULL; + wlan_mgmt_txrx_desc_put(txrx_ctx, desc->desc_id); + return QDF_STATUS_E_FAILURE; + } + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_mgmt_txrx_beacon_frame_tx(struct wlan_objmgr_peer *peer, + qdf_nbuf_t buf, + enum wlan_umac_comp_id comp_id) +{ + struct wlan_objmgr_vdev *vdev; + struct wlan_objmgr_psoc *psoc; + + vdev = wlan_peer_get_vdev(peer); + if (!vdev) { + mgmt_txrx_err("vdev unavailable for peer %pK", peer); + return QDF_STATUS_E_NULL_VALUE; + } + + psoc = wlan_vdev_get_psoc(vdev); + if (!psoc) { + mgmt_txrx_err("psoc unavailable for peer %pK", peer); + return QDF_STATUS_E_NULL_VALUE; + } + + if (!psoc->soc_cb.tx_ops.mgmt_txrx_tx_ops.beacon_send) { + mgmt_txrx_err("mgmt txrx tx op to send beacon frame is NULL for psoc: %pK", + psoc); + return QDF_STATUS_E_FAILURE; + } + + if (psoc->soc_cb.tx_ops.mgmt_txrx_tx_ops.beacon_send(vdev, buf)) { + mgmt_txrx_err("Beacon send fail for peer %pK psoc %pK", + peer, psoc); + return QDF_STATUS_E_FAILURE; + } + return QDF_STATUS_SUCCESS; +} + +#ifdef WLAN_SUPPORT_FILS +QDF_STATUS +wlan_mgmt_txrx_fd_action_frame_tx(struct wlan_objmgr_vdev *vdev, + qdf_nbuf_t buf, + enum wlan_umac_comp_id comp_id) +{ + struct wlan_objmgr_psoc *psoc; + uint32_t vdev_id; + + if (!vdev) { + mgmt_txrx_err("Invalid vdev"); + return QDF_STATUS_E_NULL_VALUE; + } + vdev_id = wlan_vdev_get_id(vdev); + psoc = wlan_vdev_get_psoc(vdev); + if (!psoc) { + mgmt_txrx_err("psoc unavailable for vdev %d", vdev_id); + return QDF_STATUS_E_NULL_VALUE; + } + + if (!psoc->soc_cb.tx_ops.mgmt_txrx_tx_ops.fd_action_frame_send) { + mgmt_txrx_err("mgmt txrx txop to send fd action frame is NULL"); + return QDF_STATUS_E_FAILURE; + } + + if (psoc->soc_cb.tx_ops.mgmt_txrx_tx_ops.fd_action_frame_send( + vdev, buf)) { + mgmt_txrx_err("FD send fail for vdev %d", vdev_id); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} +#endif /* WLAN_SUPPORT_FILS */ + +/** + * wlan_mgmt_txrx_create_rx_handler() - creates rx handler node for umac comp. + * @mgmt_txrx_psoc_ctx: mgmt txrx context + * @mgmt_rx_cb: mgmt rx callback to be registered + * @comp_id: umac component id + * @frm_type: mgmt. frame for which cb to be registered. + * + * This function creates rx handler node for frame type and + * umac component passed in the func. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +static QDF_STATUS wlan_mgmt_txrx_create_rx_handler( + struct mgmt_txrx_priv_psoc_context *mgmt_txrx_psoc_ctx, + mgmt_frame_rx_callback mgmt_rx_cb, + enum wlan_umac_comp_id comp_id, + enum mgmt_frame_type frm_type) +{ + struct mgmt_rx_handler *rx_handler; + + rx_handler = qdf_mem_malloc(sizeof(*rx_handler)); + if (!rx_handler) { + mgmt_txrx_err("Couldn't allocate memory for rx handler"); + return QDF_STATUS_E_NOMEM; + } + + rx_handler->comp_id = comp_id; + rx_handler->rx_cb = mgmt_rx_cb; + + qdf_spin_lock_bh(&mgmt_txrx_psoc_ctx->mgmt_txrx_psoc_ctx_lock); + rx_handler->next = mgmt_txrx_psoc_ctx->mgmt_rx_comp_cb[frm_type]; + mgmt_txrx_psoc_ctx->mgmt_rx_comp_cb[frm_type] = rx_handler; + qdf_spin_unlock_bh(&mgmt_txrx_psoc_ctx->mgmt_txrx_psoc_ctx_lock); + + mgmt_txrx_info("Callback registered for comp_id: %d, frm_type: %d", + comp_id, frm_type); + return QDF_STATUS_SUCCESS; +} + +/** + * wlan_mgmt_txrx_delete_rx_handler() - deletes rx handler node for umac comp. + * @mgmt_txrx_psoc_ctx: mgmt txrx context + * @mgmt_rx_cb: mgmt rx callback to be deregistered + * @comp_id: umac component id + * @frm_type: mgmt. frame for which cb to be registered. + * + * This function deletes rx handler node for frame type and + * umac component passed in the func. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +static QDF_STATUS wlan_mgmt_txrx_delete_rx_handler( + struct mgmt_txrx_priv_psoc_context *mgmt_txrx_psoc_ctx, + mgmt_frame_rx_callback mgmt_rx_cb, + enum wlan_umac_comp_id comp_id, + enum mgmt_frame_type frm_type) +{ + struct mgmt_rx_handler *rx_handler = NULL, *rx_handler_prev = NULL; + bool delete = false; + + qdf_spin_lock_bh(&mgmt_txrx_psoc_ctx->mgmt_txrx_psoc_ctx_lock); + rx_handler = mgmt_txrx_psoc_ctx->mgmt_rx_comp_cb[frm_type]; + while (rx_handler) { + if (rx_handler->comp_id == comp_id && + rx_handler->rx_cb == mgmt_rx_cb) { + if (rx_handler_prev) + rx_handler_prev->next = + rx_handler->next; + else + mgmt_txrx_psoc_ctx->mgmt_rx_comp_cb[frm_type] = + rx_handler->next; + + qdf_mem_free(rx_handler); + delete = true; + break; + } + + rx_handler_prev = rx_handler; + rx_handler = rx_handler->next; + } + qdf_spin_unlock_bh(&mgmt_txrx_psoc_ctx->mgmt_txrx_psoc_ctx_lock); + + if (!delete) { + mgmt_txrx_err("No callback registered for comp_id: %d, frm_type: %d", + comp_id, frm_type); + return QDF_STATUS_E_FAILURE; + } + + mgmt_txrx_info("Callback deregistered for comp_id: %d, frm_type: %d", + comp_id, frm_type); + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_mgmt_txrx_register_rx_cb( + struct wlan_objmgr_psoc *psoc, + enum wlan_umac_comp_id comp_id, + struct mgmt_txrx_mgmt_frame_cb_info *frm_cb_info, + uint8_t num_entries) +{ + struct mgmt_txrx_priv_psoc_context *mgmt_txrx_psoc_ctx; + QDF_STATUS status; + uint8_t i, j; + + if (!psoc) { + mgmt_txrx_err("psoc context is NULL"); + return QDF_STATUS_E_INVAL; + } + + if (comp_id >= WLAN_UMAC_MAX_COMPONENTS) { + mgmt_txrx_err("Invalid component id %d passed", comp_id); + return QDF_STATUS_E_INVAL; + } + + if (!num_entries || num_entries >= MGMT_MAX_FRAME_TYPE) { + mgmt_txrx_err("Invalid value for num_entries: %d passed", + num_entries); + return QDF_STATUS_E_INVAL; + } + + if (!frm_cb_info) { + mgmt_txrx_err("frame cb info pointer is NULL"); + return QDF_STATUS_E_INVAL; + } + + mgmt_txrx_psoc_ctx = (struct mgmt_txrx_priv_psoc_context *) + wlan_objmgr_psoc_get_comp_private_obj(psoc, + WLAN_UMAC_COMP_MGMT_TXRX); + if (!mgmt_txrx_psoc_ctx) { + mgmt_txrx_err("mgmt txrx context is NULL"); + return QDF_STATUS_E_FAILURE; + } + + for (i = 0; i < num_entries; i++) { + status = wlan_mgmt_txrx_create_rx_handler(mgmt_txrx_psoc_ctx, + frm_cb_info[i].mgmt_rx_cb, comp_id, + frm_cb_info[i].frm_type); + if (status != QDF_STATUS_SUCCESS) { + for (j = 0; j < i; j++) { + wlan_mgmt_txrx_delete_rx_handler( + mgmt_txrx_psoc_ctx, + frm_cb_info[j].mgmt_rx_cb, + comp_id, frm_cb_info[j].frm_type); + } + return status; + } + } + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_mgmt_txrx_deregister_rx_cb( + struct wlan_objmgr_psoc *psoc, + enum wlan_umac_comp_id comp_id, + struct mgmt_txrx_mgmt_frame_cb_info *frm_cb_info, + uint8_t num_entries) +{ + struct mgmt_txrx_priv_psoc_context *mgmt_txrx_psoc_ctx; + uint8_t i; + + if (!psoc) { + mgmt_txrx_err("psoc context is NULL"); + return QDF_STATUS_E_INVAL; + } + + if (comp_id >= WLAN_UMAC_MAX_COMPONENTS) { + mgmt_txrx_err("Invalid component id %d passed", comp_id); + return QDF_STATUS_E_INVAL; + } + + if (!num_entries || num_entries >= MGMT_MAX_FRAME_TYPE) { + mgmt_txrx_err("Invalid value for num_entries: %d passed", + num_entries); + return QDF_STATUS_E_INVAL; + } + + if (!frm_cb_info) { + mgmt_txrx_err("frame cb info pointer is NULL"); + return QDF_STATUS_E_INVAL; + } + + mgmt_txrx_psoc_ctx = (struct mgmt_txrx_priv_psoc_context *) + wlan_objmgr_psoc_get_comp_private_obj(psoc, + WLAN_UMAC_COMP_MGMT_TXRX); + if (!mgmt_txrx_psoc_ctx) { + mgmt_txrx_err("mgmt txrx context is NULL"); + return QDF_STATUS_E_FAILURE; + } + + for (i = 0; i < num_entries; i++) { + wlan_mgmt_txrx_delete_rx_handler(mgmt_txrx_psoc_ctx, + frm_cb_info[i].mgmt_rx_cb, comp_id, + frm_cb_info[i].frm_type); + } + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_mgmt_txrx_psoc_open(struct wlan_objmgr_psoc *psoc) +{ + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_mgmt_txrx_psoc_close(struct wlan_objmgr_psoc *psoc) +{ + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_mgmt_txrx_pdev_open(struct wlan_objmgr_pdev *pdev) +{ + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_mgmt_txrx_pdev_close(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_objmgr_psoc *psoc; + struct mgmt_txrx_priv_pdev_context *mgmt_txrx_pdev_ctx; + struct mgmt_txrx_desc_elem_t *mgmt_desc; + uint32_t pool_size; + uint32_t index; + + if (!pdev) { + mgmt_txrx_err("pdev context is NULL"); + return QDF_STATUS_E_INVAL; + } + + psoc = wlan_pdev_get_psoc(pdev); + if (!psoc) { + mgmt_txrx_err("psoc unavailable for pdev %pK", pdev); + return QDF_STATUS_E_NULL_VALUE; + } + + mgmt_txrx_pdev_ctx = (struct mgmt_txrx_priv_pdev_context *) + wlan_objmgr_pdev_get_comp_private_obj(pdev, + WLAN_UMAC_COMP_MGMT_TXRX); + + if (!mgmt_txrx_pdev_ctx) { + mgmt_txrx_err("mgmt txrx context is NULL"); + return QDF_STATUS_E_FAILURE; + } + + pool_size = mgmt_txrx_pdev_ctx->mgmt_desc_pool.free_list.max_size; + if (!pool_size) { + mgmt_txrx_err("pool size is 0"); + return QDF_STATUS_E_FAILURE; + } + + for (index = 0; index < pool_size; index++) { + if (mgmt_txrx_pdev_ctx->mgmt_desc_pool.pool[index].in_use) { + mgmt_txrx_info( + "mgmt descriptor with desc id: %d not in freelist", + index); + mgmt_desc = &mgmt_txrx_pdev_ctx->mgmt_desc_pool.pool[index]; + if (psoc->soc_cb.tx_ops.mgmt_txrx_tx_ops. + tx_drain_nbuf_op) + psoc->soc_cb.tx_ops.mgmt_txrx_tx_ops. + tx_drain_nbuf_op(pdev, mgmt_desc->nbuf); + qdf_nbuf_free(mgmt_desc->nbuf); + wlan_objmgr_peer_release_ref(mgmt_desc->peer, + WLAN_MGMT_NB_ID); + wlan_mgmt_txrx_desc_put(mgmt_txrx_pdev_ctx, index); + } + } + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_mgmt_txrx_vdev_drain(struct wlan_objmgr_vdev *vdev, + mgmt_frame_fill_peer_cb mgmt_fill_peer_cb, + void *status) +{ + struct wlan_objmgr_pdev *pdev; + struct mgmt_txrx_priv_pdev_context *mgmt_txrx_pdev_ctx; + struct mgmt_txrx_desc_elem_t *mgmt_desc; + struct wlan_objmgr_peer *peer; + struct wlan_objmgr_vdev *peer_vdev; + uint32_t pool_size; + int i; + + if (!vdev) { + mgmt_txrx_err("vdev context is NULL"); + return QDF_STATUS_E_INVAL; + } + + pdev = wlan_vdev_get_pdev(vdev); + if (!pdev) { + mgmt_txrx_err("pdev context is NULL"); + return QDF_STATUS_E_INVAL; + } + mgmt_txrx_pdev_ctx = (struct mgmt_txrx_priv_pdev_context *) + wlan_objmgr_pdev_get_comp_private_obj(pdev, + WLAN_UMAC_COMP_MGMT_TXRX); + if (!mgmt_txrx_pdev_ctx) { + mgmt_txrx_err("mgmt txrx context is NULL"); + return QDF_STATUS_E_FAILURE; + } + + pool_size = mgmt_txrx_pdev_ctx->mgmt_desc_pool.free_list.max_size; + if (!pool_size) { + mgmt_txrx_err("pool size is 0"); + return QDF_STATUS_E_FAILURE; + } + + for (i = 0; i < pool_size; i++) { + if (mgmt_txrx_pdev_ctx->mgmt_desc_pool.pool[i].in_use) { + mgmt_desc = &mgmt_txrx_pdev_ctx->mgmt_desc_pool.pool[i]; + peer = mgmt_txrx_get_peer(pdev, mgmt_desc->desc_id); + if (peer) { + peer_vdev = wlan_peer_get_vdev(peer); + if (peer_vdev == vdev) { + if (mgmt_fill_peer_cb) + mgmt_fill_peer_cb(peer, mgmt_desc->nbuf); + mgmt_txrx_tx_completion_handler(pdev, + mgmt_desc->desc_id, 0, status); + } + } + } + } + + return QDF_STATUS_SUCCESS; +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/obj_mgr/inc/wlan_objmgr_cmn.h b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/obj_mgr/inc/wlan_objmgr_cmn.h new file mode 100644 index 0000000000000000000000000000000000000000..b3351eb484a12d1124808c3ffad2005c9744e234 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/obj_mgr/inc/wlan_objmgr_cmn.h @@ -0,0 +1,344 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + /** + * DOC: This file provides the common definitions for object manager + */ + +#ifndef _WLAN_OBJMGR_CMN_H_ +#define _WLAN_OBJMGR_CMN_H_ + +#include "qdf_lock.h" +#include "qdf_list.h" +#include "qdf_status.h" +#include "wlan_cmn.h" +#include "qdf_atomic.h" + +/* No. of PSOCs can be supported */ +#define WLAN_OBJMGR_MAX_DEVICES 3 + +/* size of Hash */ +#define WLAN_PEER_HASHSIZE 64 + +/* simple hash is enough for variation of macaddr */ +#define WLAN_PEER_HASH(addr) \ +(((const uint8_t *)(addr))[QDF_MAC_ADDR_SIZE - 1] % WLAN_PEER_HASHSIZE) + +#define obj_mgr_log(level, args...) \ + QDF_TRACE(QDF_MODULE_ID_OBJ_MGR, level, ## args) + +#define obj_mgr_logfl(level, format, args...) \ + obj_mgr_log(level, FL(format), ## args) + +#define obj_mgr_alert(format, args...) \ + obj_mgr_logfl(QDF_TRACE_LEVEL_FATAL, format, ## args) + +#define obj_mgr_err(format, args...) \ + obj_mgr_logfl(QDF_TRACE_LEVEL_ERROR, format, ## args) + +#define obj_mgr_warn(format, args...) \ + obj_mgr_logfl(QDF_TRACE_LEVEL_WARN, format, ## args) + +#define obj_mgr_info(format, args...) \ + obj_mgr_logfl(QDF_TRACE_LEVEL_INFO, format, ## args) + +#define obj_mgr_debug(format, args...) \ + obj_mgr_logfl(QDF_TRACE_LEVEL_DEBUG, format, ## args) + +#define obj_mgr_log_level(level, format, args...)\ + obj_mgr_logfl(level, format, ## args) + +/** + * enum WLAN_OBJ_STATE - State of Object + * @WLAN_OBJ_STATE_ALLOCATED: Common object is allocated, but not + * fully initialized + * @WLAN_OBJ_STATE_CREATED: All component objects are created + * @WLAN_OBJ_STATE_DELETED: All component objects are destroyed + * @WLAN_OBJ_STATE_PARTIALLY_CREATED: Few/All component objects creation is + * in progress + * @WLAN_OBJ_STATE_PARTIALLY_DELETED: Component objects deletion is triggered, + * they are yet to be destroyed + * @WLAN_OBJ_STATE_COMP_DEL_PROGRESS: If a component is disabled run time, + * and this state is used to represent the + * deletion in progress after that + * component object is destroyed, object + * state would be moved to CREATED state + * @WLAN_OBJ_STATE_LOGICALLY_DELETED: Object deletion has been initiated, + * object destroy invoked once references + * are released + * @WLAN_OBJ_STATE_CREATION_FAILED: any component object is failed to be + * created + * @WLAN_OBJ_STATE_DELETION_FAILED: any component object is failed to be + * destroyed + */ +typedef enum { + WLAN_OBJ_STATE_ALLOCATED = 0, + WLAN_OBJ_STATE_CREATED = 1, + WLAN_OBJ_STATE_DELETED = 2, + WLAN_OBJ_STATE_PARTIALLY_CREATED = 3, + WLAN_OBJ_STATE_PARTIALLY_DELETED = 4, + WLAN_OBJ_STATE_COMP_DEL_PROGRESS = 5, + WLAN_OBJ_STATE_LOGICALLY_DELETED = 6, + WLAN_OBJ_STATE_CREATION_FAILED = 7, + WLAN_OBJ_STATE_DELETION_FAILED = 8, +} WLAN_OBJ_STATE; + +/* Object type is assigned with value */ +enum wlan_objmgr_obj_type { + WLAN_PSOC_OP = 0, + WLAN_PDEV_OP = 1, + WLAN_VDEV_OP = 2, + WLAN_PEER_OP = 3, + WLAN_OBJ_TYPE_MAX = 4, +}; + +/** + * struct wlan_peer_list { + * @peer_hash[]: peer sub lists + * @peer_list_lock: List lock, this has to be acquired on + * accessing/updating the list + * + * Peer list, it maintains sublists based on the MAC address as hash + * Note: For DA WDS similar peer list has to be maintained + * This peer list will not have WDS nodes + */ +struct wlan_peer_list { + qdf_list_t peer_hash[WLAN_PEER_HASHSIZE]; + qdf_spinlock_t peer_list_lock; +}; + +struct wlan_objmgr_psoc; +struct wlan_objmgr_pdev; +struct wlan_objmgr_vdev; +struct wlan_objmgr_peer; + +/* Create handler would return the following status + QDF_STATUS_SUCCESS-- + For synchronous handler:- this is returned on successful + component object creation + + QDF_STATUS_COMP_DISABLED-- + For synchronous handler:- this is returned on if component + doesn't want to allocate + + QDF_STATUS_COMP_ASYNC-- + For asynchronous handler:- this is returned on if component + needs a context break + + QDF_STATUS_E_NOMEM-- + For synchronous handler:- this is returned on if component + can't allocate + QDF_STATUS_E_FAILURE-- + For synchronous handler:- If it is failed, + For asynchronous handler:- If it is failed to post message + (means, not required)/feature is not supported +*/ +typedef QDF_STATUS (*wlan_objmgr_psoc_create_handler)( + struct wlan_objmgr_psoc *psoc, void *arg); +typedef QDF_STATUS (*wlan_objmgr_psoc_destroy_handler)( + struct wlan_objmgr_psoc *psoc, void *arg); +typedef void (*wlan_objmgr_psoc_status_handler)(struct wlan_objmgr_psoc *psoc, + void *arg, QDF_STATUS status); + +typedef QDF_STATUS (*wlan_objmgr_pdev_create_handler)( + struct wlan_objmgr_pdev *pdev, void *arg); +typedef QDF_STATUS (*wlan_objmgr_pdev_destroy_handler)( + struct wlan_objmgr_pdev *pdev, void *arg); +typedef void (*wlan_objmgr_pdev_status_handler)( + struct wlan_objmgr_pdev *pdev, void *arg, + QDF_STATUS status); + +typedef QDF_STATUS (*wlan_objmgr_vdev_create_handler)( + struct wlan_objmgr_vdev *vdev, void *arg); +typedef QDF_STATUS (*wlan_objmgr_vdev_destroy_handler)( + struct wlan_objmgr_vdev *vdev, void *arg); +typedef void (*wlan_objmgr_vdev_status_handler)( + struct wlan_objmgr_vdev *vdev, void *arg, + QDF_STATUS status); + +typedef QDF_STATUS (*wlan_objmgr_peer_create_handler)( + struct wlan_objmgr_peer *peer, void *arg); +typedef QDF_STATUS (*wlan_objmgr_peer_destroy_handler)( + struct wlan_objmgr_peer *peer, void *arg); +typedef void (*wlan_objmgr_peer_status_handler)( + struct wlan_objmgr_peer *peer, void *arg, + QDF_STATUS status); + +/** + * enum wlan_objmgr_ref_dbgid - ref count debug id + * @WLAN_OBJMGR_ID: Object manager internal operations + * @WLAN_MLME_SB_ID: MLME Southbound operations + * @WLAN_MLME_NB_ID: MLME Northbound operations + * @WLAN_MGMT_SB_ID: MGMT Northbound operations + * @WLAN_MGMT_NB_ID: MGMT Southbound operations + * @WLAN_HDD_ID_OBJ_MGR: HDD Object Manager operations + * @WLAN_OSIF_ID: New component's OS IF ID + * @WLAN_LEGACY_MAC_ID: Legacy MAC operations + * @WLAN_LEGACY_WMA_ID: Legacy WMA operations + * @WLAN_SERIALIZATION_ID: Serialization operations + * @WLAN_PMO_ID: power manager offload (PMO) ID + * @WLAN_LEGACY_SME_ID: Legacy SME operations + * @WLAN_SCAN_ID: scan operations + * @WLAN_WIFI_POS_CORE_ID: wifi positioning (CORE) + * @WLAN_DFS_ID: DFS operations + * @WLAN_P2P_ID: P2P operations + * @WLAN_TDLS_SB_ID: TDLS Southbound operations + * @WLAN_TDLS_NB_ID: TDLS Northbound operations + * @WLAN_ATF_ID: Airtime Fairness operations + * @WLAN_CRYPTO_ID: Crypto service operation + * @WLAN_NAN_ID: nan operations + * @WLAN_REGULATORY_SB_ID: SB regulatory operations + * @WLAN_REGULATORY_NB_ID: NB regulatory operations + * @WLAN_POLICY_MGR_ID: Policy Manager operations + * @WLAN_SON_ID: SON + * @WLAN_SA_API_ID: SA PAI + * @WLAN_SPECTRAL_ID: Spectral operations + * @WLAN_SPLITMAC_ID: SplitMac + * @WLAN_DEBUG_ID: Debug operations + * @WLAN_DIRECT_BUF_RX_ID: Direct Buffer Receive operations + * @WLAN_DISA_ID: DISA (encryption test) operations + * @WLAN_FTM_ID: FTM module + * @WLAN_FD_ID: FILS Discovery + * @WLAN_OCB_NB_ID: OCB Northbound operations + * @WLAN_OCB_SB_ID: OCB Southbound operations + * @WLAN_INIT_DEINIT_ID: Init deinit module + * @WLAN_IPA_ID: IPA operations + * @WLAN_CP_STATS_ID: Control Plane Statistics Module + * @WLAN_GREEN_AP_ID: Green AP operations + * @WLAN_WIFI_POS_OSIF_ID: wifi positioning (OSID) + * @WLAN_WIFI_POS_TGT_IF_ID: wifi positioning (Target IF) + * @WLAN_MLME_OBJ_DEL_ID: Object delete req/resp tracking with FW + * @WLAN_ACTION_OUI_ID: action oui operations + * @WLAN_REF_ID_MAX: Max id used to generate ref count tracking array + */ + /* New value added to the enum must also be reflected in function + * string_from_dbgid() + */ +typedef enum { + WLAN_OBJMGR_ID = 0, + WLAN_MLME_SB_ID = 1, + WLAN_MLME_NB_ID = 2, + WLAN_MGMT_SB_ID = 3, + WLAN_MGMT_NB_ID = 4, + WLAN_HDD_ID_OBJ_MGR = 5, + WLAN_OSIF_ID = 6, + WLAN_LEGACY_MAC_ID = 7, + WLAN_LEGACY_WMA_ID = 8, + WLAN_SERIALIZATION_ID = 9, + WLAN_PMO_ID = 10, + WLAN_LEGACY_SME_ID = 11, + WLAN_SCAN_ID = 12, + WLAN_WIFI_POS_CORE_ID = 13, + WLAN_DFS_ID = 14, + WLAN_P2P_ID = 15, + WLAN_TDLS_SB_ID = 16, + WLAN_TDLS_NB_ID = 17, + WLAN_ATF_ID = 18, + WLAN_CRYPTO_ID = 19, + WLAN_NAN_ID = 20, + WLAN_REGULATORY_SB_ID = 21, + WLAN_REGULATORY_NB_ID = 22, + WLAN_OFFCHAN_TXRX_ID = 23, + WLAN_POLICY_MGR_ID = 24, + WLAN_SON_ID = 25, + WLAN_SA_API_ID = 26, + WLAN_SPECTRAL_ID = 27, + WLAN_SPLITMAC_ID = 28, + WLAN_DEBUG_ID = 29, + WLAN_DIRECT_BUF_RX_ID = 30, + WLAN_DISA_ID = 31, + WLAN_FTM_ID = 32, + WLAN_FD_ID = 33, + WLAN_OCB_NB_ID = 34, + WLAN_OCB_SB_ID = 35, + WLAN_INIT_DEINIT_ID = 36, + WLAN_IPA_ID = 37, + WLAN_CP_STATS_ID = 38, + WLAN_GREEN_AP_ID = 39, + WLAN_WIFI_POS_OSIF_ID = 40, + WLAN_WIFI_POS_TGT_IF_ID = 41, + WLAN_MLME_OBJ_DEL_ID = 42, + WLAN_ACTION_OUI_ID = 43, + WLAN_REF_ID_MAX, +} wlan_objmgr_ref_dbgid; + +/** + * string_from_dbgid() - Convert Refcnt dbgid to respective string + * @id - Reference count debug id + * + * Debug support function to convert refcnt dbgid to string. + * Please note to add new string in the array at index equal to + * its enum value in wlan_objmgr_ref_dbgid. + */ +static inline char *string_from_dbgid(wlan_objmgr_ref_dbgid id) +{ + static const char *strings[] = { "WLAN_OBJMGR_ID", + "WLAN_MLME_SB_ID", + "WLAN_MLME_NB_ID", + "WLAN_MGMT_SB_ID", + "WLAN_MGMT_NB_ID", + "WLAN_HDD_ID_OBJ_MGR", + "WLAN_OSIF_ID", + "WLAN_LEGACY_MAC_ID", + "WLAN_LEGACY_WMA_ID", + "WLAN_SERIALIZATION_ID", + "WLAN_PMO_ID", + "WLAN_LEGACY_SME_ID", + "WLAN_SCAN_ID", + "WLAN_WIFI_POS_CORE_ID", + "WLAN_DFS_ID", + "WLAN_P2P_ID", + "WLAN_TDLS_SB_ID", + "WLAN_TDLS_NB_ID", + "WLAN_ATF_ID", + "WLAN_CRYPTO_ID", + "WLAN_NAN_ID", + "WLAN_REGULATORY_SB_ID", + "WLAN_REGULATORY_NB_ID", + "WLAN_OFFCHAN_TXRX_ID", + "WLAN_POLICY_MGR_ID", + "WLAN_SON_ID", + "WLAN_SA_API_ID", + "WLAN_SPECTRAL_ID", + "WLAN_SPLITMAC_ID", + "WLAN_DEBUG_ID", + "WLAN_DIRECT_BUF_RX_ID", + "WLAN_DISA_ID", + "WLAN_FTM_ID", + "WLAN_FD_ID", + "WLAN_OCB_NB_ID", + "WLAN_OCB_SB_ID", + "WLAN_INIT_DEINIT_ID", + "WLAN_IPA_ID", + "WLAN_CP_STATS_ID", + "WLAN_GREEN_AP_ID", + "WLAN_WIFI_POS_OSIF_ID", + "WLAN_WIFI_POS_TGT_IF_ID", + "WLAN_MLME_OBJ_DEL_ID", + "WLAN_ACTION_OUI_ID", + "WLAN_REF_ID_MAX"}; + + return (char *)strings[id]; +} + +#ifdef WLAN_OBJMGR_DEBUG +#define WLAN_OBJMGR_BUG(val) QDF_BUG(val) +#else +#define WLAN_OBJMGR_BUG(val) +#endif +#define WLAN_OBJMGR_RATELIMIT_THRESH 2 +#endif /* _WLAN_OBJMGR_CMN_H_*/ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/obj_mgr/inc/wlan_objmgr_debug.h b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/obj_mgr/inc/wlan_objmgr_debug.h new file mode 100644 index 0000000000000000000000000000000000000000..2df21f0d6d2b3f546a5af044c462c39a91f8fd76 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/obj_mgr/inc/wlan_objmgr_debug.h @@ -0,0 +1,96 @@ + /* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + /** + * DOC: Public Data Structures to perform debug operations + * on object manager + */ + +#ifndef _WLAN_OBJMGR_DEBUG_H_ +#define _WLAN_OBJMGR_DEBUG_H_ + +#include + +#ifdef WLAN_OBJMGR_DEBUG + +/** + * wlan_objmgr_notify_log_delete()- insert + * logically deleted object into list + * @obj: object to be inserted + * @obj_type: type of object to be inserted + * + * Return: void + */ +void wlan_objmgr_notify_log_delete(void *obj, + enum wlan_objmgr_obj_type obj_type); + +/** + * wlan_objmgr_notify_destroy() - remove + * logically deleted object from list + * @obj: object to be removed + * @obj_type: type of object to be removed + * + * Return: void + */ +void wlan_objmgr_notify_destroy(void *obj, + enum wlan_objmgr_obj_type obj_type); + +/** + * wlan_objmgr_debug_info_init() - initialize + * the logically deleted list object + * Caller need to protect with global object lock + * + * Return: void + */ +void wlan_objmgr_debug_info_init(void); + +/** + * wlan_objmgr_debug_info_deinit() - deinitialize + * the logically deleted list object + * + * Return: void + */ +void wlan_objmgr_debug_info_deinit(void); + + +#else + +static inline void +wlan_objmgr_notify_log_delete(void *obj, + enum wlan_objmgr_obj_type obj_type) +{ +} + +static inline void +wlan_objmgr_notify_destroy(void *obj, + enum wlan_objmgr_obj_type obj_typ) +{ +} + +static inline void +wlan_objmgr_debug_info_init(void) +{ +} + +static inline void +wlan_objmgr_debug_info_deinit(void) +{ +} + +#endif /*WLAN_OBJMGR_DEBUG*/ + +#endif /*_WLAN_OBJMGR_DEBUG_H_*/ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/obj_mgr/inc/wlan_objmgr_global_obj.h b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/obj_mgr/inc/wlan_objmgr_global_obj.h new file mode 100644 index 0000000000000000000000000000000000000000..ef9494b07a913c67996c061aa101e0238adcc835 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/obj_mgr/inc/wlan_objmgr_global_obj.h @@ -0,0 +1,483 @@ +/* + * Copyright (c) 2016-2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + /** + * DOC: Public APIs to perform operations on Global objects + */ +#ifndef _WLAN_OBJMGR_GLOBAL_OBJ_H_ +#define _WLAN_OBJMGR_GLOBAL_OBJ_H_ + +/** + * wlan_objmgr_global_obj_init() - global object initialization + * + * Creates global object, intializes with default values + * + * Return: SUCCESS on successful creation, + * FAILURE on Mem alloc failure or allocated already + * + */ +QDF_STATUS wlan_objmgr_global_obj_init(void); + +/** + * wlan_objmgr_global_obj_deinit() - global object deinitialization + * + * Deletes global object + * + * Return: SUCCESS on successful deletion, + * FAILURE on object is not found + * + */ +QDF_STATUS wlan_objmgr_global_obj_deinit(void); + +/** + * wlan_objmgr_global_obj_can_destroyed() - Checks whether global object + * can be destroyed + * + * Checks the psoc table of global object, if psoc table is empty + * returns the SUCCESS + * + * Return: SUCCESS on can be destroyed, + * FAILURE on can't be destroyed + * + */ +QDF_STATUS wlan_objmgr_global_obj_can_destroyed(void); + +/** + * wlan_objmgr_register_psoc_create_handler() - register psoc create handler + * @id: component id + * @handler: function pointer of the component + * @args: args, if component wants certain args to be passed on PSOC creation + * + * API, allows other UMAC components to register handler + * The registered handler would be invoked on PSOC creation + * + * Return: SUCCESS, + * Failure (if registration fails, each failure has different error + * code) + */ +QDF_STATUS wlan_objmgr_register_psoc_create_handler( + enum wlan_umac_comp_id id, + wlan_objmgr_psoc_create_handler handler, + void *args); + +/** + * wlan_objmgr_unregister_psoc_create_handler() - unregister psoc create handler + * @id: component id + * @handler: function pointer of the component + * @args: args, if component wants certain args to be passed on PSOC creation + * + * API, allows other UMAC components to unregister handler + * + * Return: SUCCESS, + * Failure (if handler is not present, each failure has different error + * code) + */ +QDF_STATUS wlan_objmgr_unregister_psoc_create_handler( + enum wlan_umac_comp_id id, + wlan_objmgr_psoc_create_handler handler, + void *args); + +/** + * wlan_objmgr_register_psoc_destroy_handler() - register destroy handler + * @id: component id + * @handler: function pointer of the component + * @args: args, if component wants certain args to be passed on PSOC deletion + * + * API, allows other UMAC components to register handler + * The registered handler would be invoked on PSOC deletion + * + * Return: SUCCESS, + * Failure (if registration fails, each failure has different error + * code) + */ +QDF_STATUS wlan_objmgr_register_psoc_destroy_handler( + enum wlan_umac_comp_id id, + wlan_objmgr_psoc_destroy_handler handler, + void *args); + +/** + * wlan_objmgr_unregister_psoc_destroy_handler() - unregister destroy handler + * @id: component id + * @handler: function pointer of the component + * @args: args, if component wants certain args to be passed on PSOC deletion + * + * API, allows other UMAC components to unregister handler + * + * Return: SUCCESS, + * Failure (if handler is not present, each failure has different error + * code) + */ +QDF_STATUS wlan_objmgr_unregister_psoc_destroy_handler( + enum wlan_umac_comp_id id, + wlan_objmgr_psoc_destroy_handler handler, + void *args); + +/** + * wlan_objmgr_register_psoc_status_handler() - register status handler + * @id: component id + * @handler: function pointer of the component + * @args: args, if component wants certain args to be passed on PSOC status + * change + * + * API, allows other UMAC components to register handler + * The registered handler would be invoked on PSOC object status change + * + * Return: SUCCESS, + * Failure (if registration fails, each failure has different error + * code) + */ +QDF_STATUS wlan_objmgr_register_psoc_status_handler( + enum wlan_umac_comp_id id, + wlan_objmgr_psoc_status_handler handler, + void *args); + +/** + * wlan_objmgr_unregister_psoc_status_handler() - unregister status handler + * @id: component id + * @handler: function pointer of the component + * @args: args, if component wants certain args to be passed on PSOC status + * + * API, allows other UMAC components to unregister handler + * + * Return: SUCCESS, + * Failure (if handler is not present, each failure has different error + * code) + */ +QDF_STATUS wlan_objmgr_unregister_psoc_status_handler( + enum wlan_umac_comp_id id, + wlan_objmgr_psoc_status_handler handler, + void *args); + +/** + * wlan_objmgr_register_pdev_create_handler() - register pdev create handler + * @id: component id + * @handler: function pointer of the component + * @args: args, if component wants certain args to be passed on PDEV creation + * + * API, allows other UMAC components to register handler + * The registered handler would be invoked on PDEV creation + * + * Return: SUCCESS, + * Failure (if registration fails, each failure has different error + * code) + */ +QDF_STATUS wlan_objmgr_register_pdev_create_handler( + enum wlan_umac_comp_id id, + wlan_objmgr_pdev_create_handler handler, + void *args); + +/** + * wlan_objmgr_unregister_pdev_create_handler() - unregister pdev create handler + * @id: component id + * @handler: function pointer of the component + * @args: args, if component wants certain args to be passed on PDEV creation + * + * API, allows other UMAC components to unregister handler + * + * Return: SUCCESS, + * Failure (if handler is not present, each failure has different error + * code) + */ +QDF_STATUS wlan_objmgr_unregister_pdev_create_handler( + enum wlan_umac_comp_id id, + wlan_objmgr_pdev_create_handler handler, + void *args); + +/** + * wlan_objmgr_register_pdev_destroy_handler() - register pdev destroy handler + * @id: component id + * @handler: function pointer of the component + * @args: args, if component wants certain args to be passed on PDEV deletion + * + * API, allows other UMAC components to register handler + * The registered handler would be invoked on PDEV deletion + * + * Return: SUCCESS, + * Failure (if registration fails, each failure has different error + * code) + */ +QDF_STATUS wlan_objmgr_register_pdev_destroy_handler( + enum wlan_umac_comp_id id, + wlan_objmgr_pdev_destroy_handler handler, + void *args); + +/** + * wlan_objmgr_unregister_pdev_destroy_handler() - unregister pdev destroy handler + * @id: component id + * @handler: function pointer of the component + * @args: args, if component wants certain args to be passed on PDEV deletion + * + * API, allows other UMAC components to unregister handler + * + * Return: SUCCESS, + * Failure (if handler is not present, each failure has different error + * code) + */ +QDF_STATUS wlan_objmgr_unregister_pdev_destroy_handler( + enum wlan_umac_comp_id id, + wlan_objmgr_pdev_destroy_handler handler, + void *args); + +/** + * wlan_objmgr_register_pdev_status_handler() - register pdev status handler + * @id: component id + * @handler: function pointer of the component + * @args: args, if component wants certain args to be passed on PDEV status + * change + * + * API, allows other UMAC components to register handler + * The registered handler would be invoked on PDEV object status change + * + * Return: SUCCESS, + * Failure (if registration fails, each failure has different error + * code) + */ +QDF_STATUS wlan_objmgr_register_pdev_status_handler( + enum wlan_umac_comp_id id, + wlan_objmgr_pdev_status_handler handler, + void *args); + +/** + * wlan_objmgr_unregister_pdev_status_handler() - unregister pdev status handler + * @id: component id + * @handler: function pointer of the component + * @args: args, if component wants certain args to be passed on PDEV status + * + * API, allows other UMAC components to unregister handler + * + * Return: SUCCESS, + * Failure (if handler is not present, each failure has different error + * code) + */ +QDF_STATUS wlan_objmgr_unregister_pdev_status_handler( + enum wlan_umac_comp_id id, + wlan_objmgr_pdev_status_handler handler, + void *args); + +/** + * wlan_objmgr_register_vdev_create_handler() - register vdev create handler + * @id: component id + * @handler: function pointer of the component + * @args: args, if component wants certain args to be passed on VDEV creation + * + * API, allows other UMAC components to register handler + * The registered handler would be invoked on VDEV creation + * + * Return: SUCCESS, + * Failure (if registration fails, each failure has different error + * code) + */ +QDF_STATUS wlan_objmgr_register_vdev_create_handler( + enum wlan_umac_comp_id id, + wlan_objmgr_vdev_create_handler handler, + void *args); + +/** + * wlan_objmgr_unregister_vdev_create_handler() - unregister vdev create handler + * @id: component id + * @handler: function pointer of the component + * @args: args, if component wants certain args to be passed on VDEV creation + * + * API, allows other UMAC components to unregister handler + * + * Return: SUCCESS, + * Failure (if handler is not present, each failure has different error + * code) + */ +QDF_STATUS wlan_objmgr_unregister_vdev_create_handler( + enum wlan_umac_comp_id id, + wlan_objmgr_vdev_create_handler handler, + void *args); + +/** + * wlan_objmgr_register_vdev_destroy_handler() - register vdev destroy handler + * @id: component id + * @handler: function pointer of the component + * @args: args, if component wants certain args to be passed on VDEV deletion + * + * API, allows other UMAC components to register handler + * The registered handler would be invoked on VDEV deletion + * + * Return: SUCCESS, + * Failure (if registration fails, each failure has different error + * code) + */ +QDF_STATUS wlan_objmgr_register_vdev_destroy_handler( + enum wlan_umac_comp_id id, + wlan_objmgr_vdev_destroy_handler handler, + void *args); + +/** + * wlan_objmgr_unregister_vdev_destroy_handler() - unregister vdev destroy handler + * @id: component id + * @handler: function pointer of the component + * @args: args, if component wants certain args to be passed on VDEV deletion + * + * API, allows other UMAC components to unregister handler + * + * Return: SUCCESS, + * Failure (if handler is not present, each failure has different error + * code) + */ +QDF_STATUS wlan_objmgr_unregister_vdev_destroy_handler( + enum wlan_umac_comp_id id, + wlan_objmgr_vdev_destroy_handler handler, + void *args); + +/** + * wlan_objmgr_register_vdev_status_handler() - register vdev status handler + * @id: component id + * @handler: function pointer of the component + * @args: args, if component wants certain args to be passed on VDEV status + * change + * + * API, allows other UMAC components to register handler + * The registered handler would be invoked on VDEV object status change + * + * Return: SUCCESS, + * Failure (if registration fails, each failure has different error + * code) + */ +QDF_STATUS wlan_objmgr_register_vdev_status_handler( + enum wlan_umac_comp_id id, + wlan_objmgr_vdev_status_handler handler, + void *args); + +/** + * wlan_objmgr_unregister_vdev_status_handler() - unregister vdev status handler + * @id: component id + * @handler: function pointer of the component + * @args: args, if component wants certain args to be passed on VDEV status + * + * API, allows other UMAC components to unregister handler + * + * Return: SUCCESS, + * Failure (if handler is not present, each failure has different error + * code) + */ +QDF_STATUS wlan_objmgr_unregister_vdev_status_handler( + enum wlan_umac_comp_id id, + wlan_objmgr_vdev_status_handler handler, + void *args); + +/** + * wlan_objmgr_register_peer_create_handler() - register peer create handler + * @id: component id + * @handler: function pointer of the component + * @args: args, if component wants certain args to be passed on PEER creation + * + * API, allows other UMAC components to register handler + * The registered handler would be invoked on PEER creation + * + * Return: SUCCESS, + * Failure (if registration fails, each failure has different error + * code) + */ +QDF_STATUS wlan_objmgr_register_peer_create_handler( + enum wlan_umac_comp_id id, + wlan_objmgr_peer_create_handler handler, + void *args); + +/** + * wlan_objmgr_unregister_peer_create_handler() - unregister peer create handler + * @id: component id + * @handler: function pointer of the component + * @args: args, if component wants certain args to be passed on PEER creation + * + * API, allows other UMAC components to unregister handler + * + * Return: SUCCESS, + * Failure (if handler is not present, each failure has different error + * code) + */ +QDF_STATUS wlan_objmgr_unregister_peer_create_handler( + enum wlan_umac_comp_id id, + wlan_objmgr_peer_create_handler handler, + void *args); + +/** + * wlan_objmgr_register_peer_destroy_handler() - register peer destroy handler + * @id: component id + * @handler: function pointer of the component + * @args: args, if component wants certain args to be passed on PEER deletion + * + * API, allows other UMAC components to register handler + * The registered handler would be invoked on PEER deletion + * + * Return: SUCCESS, + * Failure (if registration fails, each failure has different error + * code) + */ +QDF_STATUS wlan_objmgr_register_peer_destroy_handler( + enum wlan_umac_comp_id id, + wlan_objmgr_peer_destroy_handler handler, + void *args); + +/** + * wlan_objmgr_unregister_peer_destroy_handler() - unregister peer destroy handler + * @id: component id + * @handler: function pointer of the component + * @args: args, if component wants certain args to be passed on PEER deletion + * + * API, allows other UMAC components to unregister handler + * + * Return: SUCCESS, + * Failure (if handler is not present, each failure has different error + * code) + */ +QDF_STATUS wlan_objmgr_unregister_peer_destroy_handler( + enum wlan_umac_comp_id id, + wlan_objmgr_peer_destroy_handler handler, + void *args); + +/** + * wlan_objmgr_register_peer_status_handler() - register peer status handler + * @id: component id + * @handler: function pointer of the component + * @args: args, if component wants certain args to be passed on PEER status + * change + * + * API, allows other UMAC components to register handler + * The registered handler would be invoked on PEER object status change + * + * Return: SUCCESS, + * Failure (if registration fails, each failure has different error + * code) + */ +QDF_STATUS wlan_objmgr_register_peer_status_handler( + enum wlan_umac_comp_id id, + wlan_objmgr_peer_status_handler handler, + void *args); + +/** + * wlan_objmgr_unregister_peer_status_handler() - unregister peer status handler + * @id: component id + * @handler: function pointer of the component + * @args: args, if component wants certain args to be passed on PEER status + * + * API, allows other UMAC components to unregister handler + * + * Return: SUCCESS, + * Failure (if handler is not present, each failure has different error + * code) + */ +QDF_STATUS wlan_objmgr_unregister_peer_status_handler( + enum wlan_umac_comp_id id, + wlan_objmgr_peer_status_handler handler, + void *args); + +#endif /* _WLAN_OBJMGR_GLOBAL_OBJ_H_*/ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/obj_mgr/inc/wlan_objmgr_pdev_obj.h b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/obj_mgr/inc/wlan_objmgr_pdev_obj.h new file mode 100644 index 0000000000000000000000000000000000000000..3dcdf1753794ba7b77fd4fd107d696bd8366f33e --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/obj_mgr/inc/wlan_objmgr_pdev_obj.h @@ -0,0 +1,959 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: Define the pdev data structure of UMAC + * Public APIs to perform operations on Global objects + */ + +#ifndef _WLAN_OBJMGR_PDEV_OBJ_H_ +#define _WLAN_OBJMGR_PDEV_OBJ_H_ + +#include +#include "wlan_objmgr_psoc_obj.h" + +/* STATUS: scanning */ +#define WLAN_PDEV_F_SCAN 0x00000001 +/* STATUS: use short slot time*/ +#define WLAN_PDEV_F_SHSLOT 0x00000002 + /* STATUS: channel switch event pending after DFS RADAR */ +#define WLAN_PDEV_F_DFS_CHANSWITCH_PENDING 0x00000004 + /* TX Power: fixed rate */ +#define WLAN_PDEV_F_TXPOW_FIXED 0x00000008 + /* STATUS: use short preamble */ +#define WLAN_PDEV_F_SHPREAMBLE 0x00000010 + /* CONF: do alignment pad */ +#define WLAN_PDEV_F_DATAPAD 0x00000020 + /* STATUS: protection enabled */ +#define WLAN_PDEV_F_USEPROT 0x00000040 + /* STATUS: use barker preamble*/ +#define WLAN_PDEV_F_USEBARKER 0x00000080 + /* CONF: DISABLE 2040 coexistence */ +#define WLAN_PDEV_F_COEXT_DISABLE 0x00000100 + /* STATE: scan pending */ +#define WLAN_PDEV_F_SCAN_PENDING 0x00000200 + /* CONF: send regclassids in country ie */ +#define WLAN_PDEV_F_REGCLASS 0x00000400 + /* CONF: block the use of DFS channels */ +#define WLAN_PDEV_F_BLKDFSCHAN 0x00000800 + /* STATUS: 11D in used */ +#define WLAN_PDEV_F_DOT11D 0x00001000 + /* STATUS: 11D channel-switch detected */ +#define WLAN_PDEV_F_RADAR 0x00002000 + /* CONF: A-MPDU supported */ +#define WLAN_PDEV_F_AMPDU 0x00004000 + /* CONF: A-MSDU supported */ +#define WLAN_PDEV_F_AMSDU 0x00008000 + /* CONF: HT traffic protected */ +#define WLAN_PDEV_F_HTPROT 0x00010000 + /* CONF: Reset once */ +#define WLAN_PDEV_F_RESET 0x00020000 + /* CONF: ignore 11d beacon */ +#define WLAN_PDEV_F_IGNORE_11D_BEACON 0x00040000 + /* HT CAP IE present */ +#define WLAN_PDEV_F_HTVIE 0x00080000 + /* radio in middle of CSA */ +#define WLAN_PDEV_F_CSA_WAIT 0x00100000 + /* wnm support flag */ +#define WLAN_PDEV_F_WNM 0x00200000 +#define WLAN_PDEV_F_2G_CSA 0x00400000 + /* enhanced independent repeater */ +#define WLAN_PDEV_F_ENH_REP_IND 0x00800000 + /* Disable Tx AMSDU for station vap */ +#define WLAN_PDEV_F_STA_AMPDU_DIS 0x01000000 +/* do not send probe request in passive channel */ +#define WLAN_PDEV_F_STRICT_PSCAN_EN 0x02000000 + /* dupie (ANA,pre ANA ) */ +/*#define WLAN_PDEV_F_DUPIE 0x00200000*/ + /* QWRAP enable flag */ +#define WLAN_PDEV_F_WRAP_EN 0x04000000 + +/* PDEV op flags */ + /* Enable htrate for wep and tkip */ +#define WLAN_PDEV_OP_WEP_TKIP_HTRATE 0x00000001 + /* non HT AP found flag */ +#define WLAN_PDEV_OP_NON_HT_AP 0x00000002 + /* block the use of DFS channels flag */ +#define WLAN_PDEV_OP_BLK_DFS_CHAN 0x00000004 + /* 11.h flag */ +#define WLAN_PDEV_OP_DOTH 0x00000008 + /* Off-channel support enabled */ +#define WLAN_PDEV_OP_OFFCHAN 0x00000010 +#define WLAN_PDEV_OP_HT20ADHOC 0x00000020 +#define WLAN_PDEV_OP_HT40ADHOC 0x00000040 +#define WLAN_PDEV_OP_HTADHOC_AGGR 0x00000080 + /* disallow CC change when assoc completes */ +#define WLAN_PDEV_OP_DISALLOW_AUTO_CC 0x00000100 + /* Is P2P Enabled? */ +#define WLAN_PDEV_OP_P2P 0x00000200 + /* disallowed */ +#define WLAN_PDEV_OP_IGNORE_DYNHALT 0x00000400 + /* overwrite probe response IE with beacon IE */ +#define WLAN_PDEV_OP_OVERRIDE_PROBERESP 0x00000800 +#define WLAN_PDEV_OP_DROPSTA_QUERY 0x00001000 +#define WLAN_PDEV_OP_BLK_REPORT_FLOOD 0x00002000 + /* Offchan scan */ +#define WLAN_PDEV_OP_OFFCHAN_SCAN 0x00004000 + /*Consider OBSS non-erp to change to long slot*/ +#define WLAN_PDEV_OP_OBSS_LONGSLOT 0x00008000 + /* enable/disable min rssi cli block */ +#define WLAN_PDEV_OP_MIN_RSSI_ENABLE 0x00010000 + + +struct osif_pdev_priv; + +/** + * struct wlan_objmgr_pdev_nif - pdev object nif structure + * @pdev_fw_caps: radio specific FW capabilities + * @pdev_feature_caps: radio specific feature capabilities + * @pdev_ospriv: OS specific pointer + * @macaddr[]: MAC address + * @notified_ap_vdev: ap vdev + */ +struct wlan_objmgr_pdev_nif { + uint32_t pdev_fw_caps; + uint32_t pdev_feature_caps; + struct pdev_osif_priv *pdev_ospriv; + uint8_t macaddr[QDF_MAC_ADDR_SIZE]; + uint8_t notified_ap_vdev; +}; + +/** + * struct wlan_objmgr_pdev_mlme - pdev object mlme structure + * @pdev_op_flags: PDEV operation flags, can be used to know the + * operation status (deletion progress, etc) + */ +struct wlan_objmgr_pdev_mlme { + uint32_t pdev_op_flags; +}; + +/** + * struct wlan_objmgr_pdev_objmgr - pdev object object manager structure + * @wlan_pdev_id: PDEV id + * @wlan_vdev_count: VDEVs count + * @max_vdev_count: Max no. of VDEVs supported by this PDEV + * @print_cnt: Count to throttle Logical delete prints + * @wlan_vdev_list: List maintains the VDEVs created on this PDEV + * @wlan_peer_count: Peer count + * @max_peer_count: Max Peer count + * @temp_peer_count: Temporary peer count + * @wlan_psoc: back pointer to PSOC, its attached to + * @ref_cnt: Ref count + * @ref_id_dbg: Array to track Ref count + */ +struct wlan_objmgr_pdev_objmgr { + uint8_t wlan_pdev_id; + uint8_t wlan_vdev_count; + uint8_t max_vdev_count; + uint8_t print_cnt; + qdf_list_t wlan_vdev_list; + uint16_t wlan_peer_count; + uint16_t max_peer_count; + uint16_t temp_peer_count; + struct wlan_objmgr_psoc *wlan_psoc; + qdf_atomic_t ref_cnt; + qdf_atomic_t ref_id_dbg[WLAN_REF_ID_MAX]; +}; + +/** + * struct wlan_objmgr_pdev - PDEV common object + * @current_chan_list: Active/current Channel list of the radio + * @pdev_nif: pdev nif structure + * @pdev_objmgr: pdev object manager structure + * @pdev_mlme: pdev MLME structure + * @pdev_comp_priv_obj[]: component's private object array + * @obj_status[]: object status of each component object + * @obj_state: object state + * @tgt_if_handle: Target interface handle + * @dp_handle: DP module handle + * @pdev_lock: lock to protect object +*/ +struct wlan_objmgr_pdev { + struct wlan_chan_list *current_chan_list; + struct wlan_objmgr_pdev_nif pdev_nif; + struct wlan_objmgr_pdev_objmgr pdev_objmgr; + struct wlan_objmgr_pdev_mlme pdev_mlme; + void *pdev_comp_priv_obj[WLAN_UMAC_MAX_COMPONENTS]; + QDF_STATUS obj_status[WLAN_UMAC_MAX_COMPONENTS]; + WLAN_OBJ_STATE obj_state; + void *tgt_if_handle; + void *dp_handle; + qdf_spinlock_t pdev_lock; +}; + +/** + ** APIs to Create/Delete Global object APIs + */ +/** + * wlan_objmgr_pdev_obj_create() - pdev create + * @psoc: PSOC object + * @scn: os private object + * + * Creates PDEV object, intializes with default values + * Invokes the registered notifiers to create component object + * + * Return: Handle to struct wlan_objmgr_psoc on successful creation, + * NULL on Failure (on Mem alloc failure and Component objects + * Failure) + */ +struct wlan_objmgr_pdev *wlan_objmgr_pdev_obj_create( + struct wlan_objmgr_psoc *psoc, struct pdev_osif_priv *osif_priv); + +/** + * wlan_objmgr_pdev_obj_delete() - pdev delete + * @psoc: PDEV object + * + * Logically deletes PDEV object, + * Once all the references are released, object manager invokes the registered + * notifiers to destroy component objects + * + * Return: SUCCESS/FAILURE + */ +QDF_STATUS wlan_objmgr_pdev_obj_delete(struct wlan_objmgr_pdev *pdev); + +/** + ** APIs to attach/detach component objects + */ +/** + * wlan_objmgr_pdev_component_obj_attach() - pdev comp object attach + * @psoc: PDEV object + * @id: Component id + * @comp_priv_obj: component's private object pointer + * @status: Component's private object creation status + * + * API to be used for attaching component object with PDEV common object + * + * Return: SUCCESS on successful storing of component's object in common object + * On FAILURE (appropriate failure codes are returned) + */ +QDF_STATUS wlan_objmgr_pdev_component_obj_attach( + struct wlan_objmgr_pdev *pdev, + enum wlan_umac_comp_id id, + void *comp_priv_obj, + QDF_STATUS status); + +/** + * wlan_objmgr_pdev_component_obj_detach() - pdev comp object detach + * @psoc: PDEV object + * @id: Component id + * @comp_priv_obj: component's private object pointer + * + * API to be used for detaching component object with PDEV common object + * + * Return: SUCCESS on successful removal of component's object from common + * object + * On FAILURE (appropriate failure codes are returned) + */ +QDF_STATUS wlan_objmgr_pdev_component_obj_detach( + struct wlan_objmgr_pdev *pdev, + enum wlan_umac_comp_id id, + void *comp_priv_obj); + +/** + ** APIs to operations on pdev objects + */ + +typedef void (*wlan_objmgr_pdev_op_handler)(struct wlan_objmgr_pdev *pdev, + void *object, + void *arg); + +/** + * wlan_objmgr_pdev_iterate_obj_list() - operate on all objects of pdev + * @pdev: PDEV object + * @obj_type: VDEV_OP/PEER_OP + * @handler: the handler will be called for each object of requested type + * the handler should be implemented to perform required operation + * @arg: agruments passed by caller + * @lock_free_op: its obsolete + * @dbg_id: id of the caller + * + * API to be used for performing the operations on all VDEV/PEER objects + * of pdev + * + * Return: SUCCESS/FAILURE + */ +QDF_STATUS wlan_objmgr_pdev_iterate_obj_list( + struct wlan_objmgr_pdev *pdev, + enum wlan_objmgr_obj_type obj_type, + wlan_objmgr_pdev_op_handler handler, + void *arg, uint8_t lock_free_op, + wlan_objmgr_ref_dbgid dbg_id); + +/** + * wlan_objmgr_trigger_pdev_comp_priv_object_creation() - create + * comp object of pdev + * @pdev: PDEV object + * @id: Component id + * + * API to create component private object in run time, this would be + * used for features which gets enabled in run time + * + * Return: SUCCESS on successful creation + * On FAILURE (appropriate failure codes are returned) + */ +QDF_STATUS wlan_objmgr_trigger_pdev_comp_priv_object_creation( + struct wlan_objmgr_pdev *pdev, + enum wlan_umac_comp_id id); + +/** + * wlan_objmgr_trigger_pdev_comp_priv_object_deletion() - destroy + * comp object of pdev + * @pdev: PDEV object + * @id: Component id + * + * API to destroy component private object in run time, this would + * be used for features which gets disabled in run time + * + * Return: SUCCESS on successful deletion + * On FAILURE (appropriate failure codes are returned) + */ +QDF_STATUS wlan_objmgr_trigger_pdev_comp_priv_object_deletion( + struct wlan_objmgr_pdev *pdev, + enum wlan_umac_comp_id id); + +/** + * wlan_objmgr_get_vdev_by_id_from_pdev() - find vdev using id from pdev + * @pdev: PDEV object + * @vdev_id: vdev id + * @dbg_id: id of the caller + * + * API to find vdev object pointer by vdev id from pdev's vdev list + * + * This API increments the ref count of the vdev object internally, the + * caller has to invoke the wlan_objmgr_vdev_release_ref() to decrement + * ref count + * + * Return: vdev pointer + * NULL on FAILURE + */ +struct wlan_objmgr_vdev *wlan_objmgr_get_vdev_by_id_from_pdev( + struct wlan_objmgr_pdev *pdev, uint8_t vdev_id, + wlan_objmgr_ref_dbgid dbg_id); + +/** + * wlan_objmgr_get_vdev_by_id_from_pdev_no_state() - find vdev using id from + * pdev + * @pdev: PDEV object + * @vdev_id: vdev id + * @dbg_id: id of the caller + * + * API to find vdev object pointer by vdev id from pdev's vdev list + * + * This API increments the ref count of the vdev object internally, the + * caller has to invoke the wlan_objmgr_vdev_release_ref() to decrement + * ref count + * + * Return: vdev pointer + * NULL on FAILURE + */ +struct wlan_objmgr_vdev *wlan_objmgr_get_vdev_by_id_from_pdev_no_state( + struct wlan_objmgr_pdev *pdev, uint8_t vdev_id, + wlan_objmgr_ref_dbgid dbg_id); + +/** + * wlan_objmgr_get_vdev_by_macaddr_from_pdev() - find vdev using macaddr + * @pdev: PDEV object + * @macaddr: MAC address + * @dbg_id: id of the caller + * + * API to find vdev object pointer by vdev mac addr from pdev's vdev list + * + * This API increments the ref count of the vdev object internally, the + * caller has to invoke the wlan_objmgr_vdev_release_ref() to decrement + * ref count + * + * Return: vdev pointer + * NULL on FAILURE + */ +struct wlan_objmgr_vdev *wlan_objmgr_get_vdev_by_macaddr_from_pdev( + struct wlan_objmgr_pdev *pdev, uint8_t *macaddr, + wlan_objmgr_ref_dbgid dbg_id); + +/** + * wlan_objmgr_get_vdev_by_macaddr_from_pdev_no_state() - find vdev using + * macaddr + * @pdev: PDEV object + * @macaddr: MAC address + * @dbg_id: id of the caller + * + * API to find vdev object pointer by vdev mac addr from pdev's vdev list + * + * This API increments the ref count of the vdev object internally, the + * caller has to invoke the wlan_objmgr_vdev_release_ref() to decrement + * ref count + * + * Return: vdev pointer + * NULL on FAILURE + */ +struct wlan_objmgr_vdev *wlan_objmgr_get_vdev_by_macaddr_from_pdev_no_state( + struct wlan_objmgr_pdev *pdev, uint8_t *macaddr, + wlan_objmgr_ref_dbgid dbg_id); + +/** + * wlan_objmgr_pdev_get_comp_private_obj() - get pdev component private object + * @pdev: PDEV object + * @id: Component id + * + * API to get component private object + * + * Return: void *ptr on SUCCESS + * NULL on Failure + */ +void *wlan_objmgr_pdev_get_comp_private_obj( + struct wlan_objmgr_pdev *pdev, + enum wlan_umac_comp_id id); + +/** + * wlan_pdev_obj_lock() - Acquire PDEV spinlock + * @pdev: PDEV object + * + * API to acquire PDEV lock + * Parent lock should not be taken in child lock context + * but child lock can be taken in parent lock context + * (for ex: psoc lock can't be invoked in pdev/vdev/peer lock context) + * + * Return: void + */ +static inline void wlan_pdev_obj_lock(struct wlan_objmgr_pdev *pdev) +{ + qdf_spin_lock_bh(&pdev->pdev_lock); +} + +/** + * wlan_pdev_obj_unlock() - Release PDEV spinlock + * @pdev: PDEV object + * + * API to Release PDEV lock + * + * Return: void + */ +static inline void wlan_pdev_obj_unlock(struct wlan_objmgr_pdev *pdev) +{ + qdf_spin_unlock_bh(&pdev->pdev_lock); +} + +/** + * wlan_pdev_get_psoc() - get psoc + * @pdev: PDEV object + * + * API to get the psoc object from PDEV + * + * Return: + * @psoc: PSOC object + */ +static inline struct wlan_objmgr_psoc *wlan_pdev_get_psoc( + struct wlan_objmgr_pdev *pdev) +{ + return pdev->pdev_objmgr.wlan_psoc; +} + +/** + * wlan_pdev_set_psoc() - set psoc + * @pdev: PDEV object + * @psoc: PSOC object + * + * API to set the psoc object from PDEV + * + * Return: void + */ +static inline void wlan_pdev_set_psoc(struct wlan_objmgr_pdev *pdev, + struct wlan_objmgr_psoc *psoc) +{ + pdev->pdev_objmgr.wlan_psoc = psoc; +} + +/** + * wlan_pdev_nif_fw_cap_set() - set fw caps + * @pdev: PDEV object + * @cap: capability flag to be set + * + * API to set fw caps in pdev + * + * Return: void + */ +static inline void wlan_pdev_nif_fw_cap_set(struct wlan_objmgr_pdev *pdev, + uint32_t cap) +{ + pdev->pdev_nif.pdev_fw_caps |= cap; +} + +/** + * wlan_pdev_nif_fw_cap_clear() - clear fw cap + * @pdev: PDEV object + * @cap: capability flag to be cleared + * + * API to clear fw caps in pdev + * + * Return: void + */ +static inline void wlan_pdev_nif_fw_cap_clear(struct wlan_objmgr_pdev *pdev, + uint32_t cap) +{ + pdev->pdev_nif.pdev_fw_caps &= ~cap; +} + +/** + * wlan_pdev_nif_fw_cap_get() - get fw caps + * @pdev: PDEV object + * @cap: capability flag to be checked + * + * API to know, whether particular fw caps flag is set in pdev + * + * Return: 1 (for set) or 0 (for not set) + */ +static inline uint8_t wlan_pdev_nif_fw_cap_get(struct wlan_objmgr_pdev *pdev, + uint32_t cap) +{ + return (pdev->pdev_nif.pdev_fw_caps & cap) ? 1 : 0; +} + +/** + * wlan_pdev_nif_feat_cap_set() - set feature caps + * @pdev: PDEV object + * @cap: capability flag to be set + * + * API to set feat caps in pdev + * + * Return: void + */ +static inline void wlan_pdev_nif_feat_cap_set(struct wlan_objmgr_pdev *pdev, + uint32_t cap) +{ + pdev->pdev_nif.pdev_feature_caps |= cap; +} + +/** + * wlan_pdev_nif_feat_cap_clear() - clear feature caps + * @pdev: PDEV object + * @cap: capability flag to be cleared + * + * API to clear feat caps in pdev + * + * Return: void + */ +static inline void wlan_pdev_nif_feat_cap_clear(struct wlan_objmgr_pdev *pdev, + uint32_t cap) +{ + pdev->pdev_nif.pdev_feature_caps &= ~cap; +} + +/** + * wlan_pdev_nif_feat_cap_get() - get feature caps + * @pdev: PDEV object + * @cap: capability flag to be checked + * + * API to know, whether particular feat caps flag is set in pdev + * + * Return: 1 (for set) or 0 (for not set) + */ +static inline uint8_t wlan_pdev_nif_feat_cap_get(struct wlan_objmgr_pdev *pdev, + uint32_t cap) +{ + return (pdev->pdev_nif.pdev_feature_caps & cap) ? 1 : 0; +} + +/** + * wlan_pdev_get_hw_macaddr() - get hw macaddr + * @pdev: PDEV object + * + * API to get HW MAC address form PDEV + * + * Caller need to acquire lock with wlan_pdev_obj_lock() + * + * Return: @macaddr -MAC address + */ +static inline uint8_t *wlan_pdev_get_hw_macaddr(struct wlan_objmgr_pdev *pdev) +{ + if (!pdev) + return NULL; + + /* This API is invoked with lock acquired, do not add log prints */ + return pdev->pdev_nif.macaddr; +} + +/** + * wlan_pdev_set_hw_macaddr() - set hw macaddr + * @pdev: PDEV object + * @macaddr: MAC address + * + * API to set HW MAC address form PDEV + * + * Caller need to acquire lock with wlan_pdev_obj_lock() + * + * Return: void + */ +static inline void wlan_pdev_set_hw_macaddr(struct wlan_objmgr_pdev *pdev, + uint8_t *macaddr) +{ + /* This API is invoked with lock acquired, do not add log prints */ + WLAN_ADDR_COPY(pdev->pdev_nif.macaddr, macaddr); +} + +/** + * wlan_pdev_get_ospriv() - get os priv pointer + * @pdev: PDEV object + * + * API to get OS private pointer from PDEV + * + * Return: ospriv - private pointer + */ +static inline struct pdev_osif_priv *wlan_pdev_get_ospriv( + struct wlan_objmgr_pdev *pdev) +{ + return pdev->pdev_nif.pdev_ospriv; +} + +/** + * wlan_pdev_reset_ospriv() - reset os priv pointer + * @pdev: PDEV object + * + * API to reset OS private pointer in PDEV + * + * Return: void + */ +static inline void wlan_pdev_reset_ospriv(struct wlan_objmgr_pdev *pdev) +{ + pdev->pdev_nif.pdev_ospriv = NULL; +} + +/** + * wlan_pdev_set_max_vdev_count() - set pdev max vdev count + * @pdev: PDEV object + * @vdev count: Max vdev count + * + * API to set Max vdev count + * + * Return: void + */ +static inline void wlan_pdev_set_max_vdev_count(struct wlan_objmgr_pdev *pdev, + uint8_t max_vdev_count) +{ + pdev->pdev_objmgr.max_vdev_count = max_vdev_count; +} + +/** + * wlan_pdev_get_max_vdev_count() - get pdev max vdev count + * @pdev: PDEV object + * + * API to set Max vdev count + * + * Return: @vdev count: Max vdev count + */ +static inline uint8_t wlan_pdev_get_max_vdev_count( + struct wlan_objmgr_pdev *pdev) +{ + return pdev->pdev_objmgr.max_vdev_count; +} + +/** + * DOC: Examples to use PDEV ref count APIs + * + * In all the scenarios, the pair of API should be followed + * otherwise it lead to memory leak + * + * scenario 1: + * + * wlan_objmgr_pdev_obj_create() + * ---- + * wlan_objmgr_pdev_obj_delete() + * + * scenario 2: + * + * wlan_objmgr_pdev_get_ref() + * ---- + * the operations which are done on + * pdev object + * ---- + * wlan_objmgr_pdev_release_ref() + * + * scenario 3: + * + * wlan_objmgr_get_pdev_by_id[_no_state]() + * ---- + * the operations which are done on + * pdev object + * ---- + * wlan_objmgr_pdev_release_ref() + * + * scenario 4: + * + * wlan_objmgr_get_pdev_by_macaddr[_no_state]() + * ---- + * the operations which are done on + * pdev object + * ---- + * wlan_objmgr_pdev_release_ref() + */ + +/** + * wlan_objmgr_pdev_get_ref() - increment ref count + * @pdev: PDEV object + * @id: Object Manager ref debug id + * + * API to increment ref count of pdev + * + * Return: void + */ +void wlan_objmgr_pdev_get_ref(struct wlan_objmgr_pdev *pdev, + wlan_objmgr_ref_dbgid id); + +/** + * wlan_objmgr_pdev_try_get_ref() - increment ref count, if allowed + * @pdev: PDEV object + * @id: Object Manager ref debug id + * + * API to increment ref count of pdev after checking valid object state + * + * Return: void + */ +QDF_STATUS wlan_objmgr_pdev_try_get_ref(struct wlan_objmgr_pdev *pdev, + wlan_objmgr_ref_dbgid id); + +/** + * wlan_objmgr_pdev_release_ref() - decrement ref count + * @pdev: PDEV object + * @id: Object Manager ref debug id + * + * API to decrement ref count of pdev, if ref count is 1, it initiates the + * PDEV deletion + * + * Return: void + */ +void wlan_objmgr_pdev_release_ref(struct wlan_objmgr_pdev *pdev, + wlan_objmgr_ref_dbgid id); + +/** + * wlan_objmgr_pdev_get_first_vdev() - Get first vdev of pdev + * @pdev: PDEV object + * @dbg_id: Object Manager ref debug id + * + * API to get reference to first vdev of pdev. + * + * Return: reference to first vdev + */ +struct wlan_objmgr_vdev *wlan_objmgr_pdev_get_first_vdev( + struct wlan_objmgr_pdev *pdev, + wlan_objmgr_ref_dbgid dbg_id); + +/** + * wlan_objmgr_pdev_get_pdev_id() - get pdev id + * @pdev: PDEV object + * + * API to get pdev id from pdev object + * + * Return: @pdev id + */ +static inline +uint8_t wlan_objmgr_pdev_get_pdev_id(struct wlan_objmgr_pdev *pdev) +{ + return pdev->pdev_objmgr.wlan_pdev_id; +} + +/** + * wlan_pdev_set_tgt_if_handle(): API to set target if handle in pdev object + * @pdev: Pdev pointer + * @tgt_if_handle: target interface handle + * + * API to set target interface handle in pdev object + * + * Caller needs to acquire lock with wlan_pdev_obj_lock() + * + * Return: None + */ +static inline void wlan_pdev_set_tgt_if_handle(struct wlan_objmgr_pdev *pdev, + void *tgt_if_handle) +{ + /* This API is invoked with lock acquired, do not add log prints */ + if (pdev == NULL) + return; + + pdev->tgt_if_handle = tgt_if_handle; +} + +/** + * wlan_pdev_get_tgt_if_handle(): API to get target interface handle + * @pdev: Pdev pointer + * + * API to get target interface handle from pdev object + * + * Return: target interface handle + */ +static inline void *wlan_pdev_get_tgt_if_handle(struct wlan_objmgr_pdev *pdev) +{ + if (pdev == NULL) + return NULL; + + return pdev->tgt_if_handle; +} + +/** + * wlan_pdev_set_max_peer_count() - set max peer count + * @vdev: PDEV object + * @count: Max peer count + * + * API to set max peer count of PDEV + * + * Return: void + */ +static inline void wlan_pdev_set_max_peer_count(struct wlan_objmgr_pdev *pdev, + uint16_t count) +{ + pdev->pdev_objmgr.max_peer_count = count; +} + +/** + * wlan_pdev_get_max_peer_count() - get max peer count + * @pdev: PDEV object + * + * API to get max peer count of PDEV + * + * Return: max peer count + */ +static inline uint16_t wlan_pdev_get_max_peer_count( + struct wlan_objmgr_pdev *pdev) +{ + return pdev->pdev_objmgr.max_peer_count; +} + +/** + * wlan_pdev_get_peer_count() - get pdev peer count + * @pdev: PDEV object + * + * API to get peer count from PDEV + * + * Return: peer_count - pdev's peer count + */ +static inline uint16_t wlan_pdev_get_peer_count(struct wlan_objmgr_pdev *pdev) +{ + return pdev->pdev_objmgr.wlan_peer_count; +} + +/** + * wlan_pdev_get_temp_peer_count() - get pdev temporary peer count + * @pdev: PDEV object + * + * API to get temporary peer count from PDEV + * + * Return: temp_peer_count - pdev's temporary peer count + */ +static inline uint16_t wlan_pdev_get_temp_peer_count(struct wlan_objmgr_pdev *pdev) +{ + return pdev->pdev_objmgr.temp_peer_count; +} + + +/** + * wlan_pdev_incr_peer_count() - increment pdev peer count + * @pdev: PDEV object + * + * API to increment peer count of PDEV by 1 + * + * Return: void + */ +static inline void wlan_pdev_incr_peer_count(struct wlan_objmgr_pdev *pdev) +{ + pdev->pdev_objmgr.wlan_peer_count++; +} + +/** + * wlan_pdev_decr_peer_count() - decrement pdev peer count + * @pdev: PDEV object + * + * API to decrement peer count of PDEV by 1 + * + * Return: void + */ +static inline void wlan_pdev_decr_peer_count(struct wlan_objmgr_pdev *pdev) +{ + pdev->pdev_objmgr.wlan_peer_count--; +} + +/** + * wlan_pdev_incr_temp_peer_count() - increment temporary pdev peer count + * @pdev: PDEV object + * + * API to increment temporary peer count of PDEV by 1 + * + * Return: void + */ +static inline void wlan_pdev_incr_temp_peer_count(struct wlan_objmgr_pdev *pdev) +{ + pdev->pdev_objmgr.temp_peer_count++; +} + +/** + * wlan_pdev_decr_temp_peer_count() - decrement pdev temporary peer count + * @pdev: PDEV object + * + * API to decrement temporary peer count of PDEV by 1 + * + * Return: void + */ +static inline void wlan_pdev_decr_temp_peer_count(struct wlan_objmgr_pdev *pdev) +{ + pdev->pdev_objmgr.temp_peer_count--; +} + +/** + * wlan_pdev_get_vdev_count() - get PDEV vdev count + * @pdev: PDEV object + * + * API to get vdev count from PDEV + * + * Return: vdev_count - pdev's vdev count + */ +static inline uint8_t wlan_pdev_get_vdev_count(struct wlan_objmgr_pdev *pdev) +{ + return pdev->pdev_objmgr.wlan_vdev_count; +} + +/** + * wlan_pdev_set_dp_handle() - set dp handle + * @pdev: pdev object pointer + * @dp_handle: Data path module handle + * + * Return: void + */ +static inline void wlan_pdev_set_dp_handle(struct wlan_objmgr_pdev *pdev, + void *dp_handle) +{ + if (qdf_unlikely(!pdev)) { + QDF_BUG(0); + return; + } + + pdev->dp_handle = dp_handle; +} + +/** + * wlan_pdev_get_dp_handle() - get dp handle + * @pdev: pdev object pointer + * + * Return: dp handle + */ +static inline void *wlan_pdev_get_dp_handle(struct wlan_objmgr_pdev *pdev) +{ + if (qdf_unlikely(!pdev)) { + QDF_BUG(0); + return NULL; + } + + return pdev->dp_handle; +} + +#endif /* _WLAN_OBJMGR_PDEV_H_*/ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/obj_mgr/inc/wlan_objmgr_peer_obj.h b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/obj_mgr/inc/wlan_objmgr_peer_obj.h new file mode 100644 index 0000000000000000000000000000000000000000..97f2ff87d5b54d190136c49ba5b7de62f9f7cff4 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/obj_mgr/inc/wlan_objmgr_peer_obj.h @@ -0,0 +1,1039 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + /** + * DOC: Define the peer data structure of UMAC + * Public APIs to perform operations on Global objects + */ +#ifndef _WLAN_OBJMGR_PEER_OBJ_H_ +#define _WLAN_OBJMGR_PEER_OBJ_H_ + +#include +#include +#include "wlan_objmgr_vdev_obj.h" + +/* peer flags */ +/* authorized for data */ +#define WLAN_PEER_F_AUTH 0x00000001 +/* QoS enabled */ +#define WLAN_PEER_F_QOS 0x00000002 +/* ERP enabled */ +#define WLAN_PEER_F_ERP 0x00000004 +/* HT enabled */ +#define WLAN_PEER_F_HT 0x00000008 +/* NB: tWLANhave the same value as IEEE80211_FC1_PWR_MGT */ +/* power save mode enabled */ +#define WLAN_PEER_F_PWR_MGT 0x00000010 +/* keytsc for node has already been updated */ +#define WLAN_PEER_F_TSC_SET 0x00000020 +/* U-APSD power save enabled */ +#define WLAN_PEER_F_UAPSD 0x00000040 +/* U-APSD triggerable state */ +#define WLAN_PEER_F_UAPSD_TRIG 0x00000080 +/* U-APSD SP in progress */ +#define WLAN_PEER_F_UAPSD_SP 0x00000100 +/* Atheros Owl or follow-on device */ +#define WLAN_PEER_F_ATH 0x00000200 +/* Owl WDS workaround needed*/ +#define WLAN_PEER_F_OWL_WDSWAR 0x00000400 +/* WDS link */ +#define WLAN_PEER_F_WDS 0x00000800 +/* No AMPDU support */ +#define WLAN_PEER_F_NOAMPDU 0x00001000 +/* wep/tkip aggregation support */ +#define WLAN_PEER_F_WEPTKIPAGGR 0x00002000 +#define WLAN_PEER_F_WEPTKIP 0x00004000 +/* temp node (not in the node table) */ +#define WLAN_PEER_F_TEMP 0x00008000 +/* 2.4ng VHT interop AMSDU disabled */ +#define WLAN_PEER_F_11NG_VHT_INTEROP_AMSDU_DISABLE 0x00010000 +/* 40 MHz Intolerant */ +#define WLAN_PEER_F_40MHZ_INTOLERANT 0x00020000 +/* node is paused*/ +#define WLAN_PEER_F_PAUSED 0x00040000 +#define WLAN_PEER_F_EXTRADELIMWAR 0x00080000 +/* 20 MHz requesting node */ +#define WLAN_PEER_F_REQ_20MHZ 0x00100000 +/* all the tid queues in ath layer are paused*/ +#define WLAN_PEER_F_ATH_PAUSED 0x00200000 +/*Require credit update*/ +#define WLAN_PEER_F_UAPSD_CREDIT_UPDATE 0x00400000 +/*Require send deauth when h/w queue no data*/ +#define WLAN_PEER_F_KICK_OUT_DEAUTH 0x00800000 +/* RRM enabled node */ +#define WLAN_PEER_F_RRM 0x01000000 +/* Wakeup node */ +#define WLAN_PEER_F_WAKEUP 0x02000000 +/* VHT enabled node */ +#define WLAN_PEER_F_VHT 0x04000000 +/* deauth/Disassoc wait for node cleanup till frame goes on + air and tx feedback received */ +#define WLAN_PEER_F_DELAYED_CLEANUP 0x08000000 +/* Extended stats enabled node */ +#define WLAN_PEER_F_EXT_STATS 0x10000000 +/* Prevent _ieee80211_node_leave() from reentry */ +#define WLAN_PEER_F_LEAVE_ONGOING 0x20000000 +/* band steering is enabled for this node */ +#define WLAN_PEER_F_BSTEERING_CAPABLE 0x40000000 +/* node is a local mesh peer */ +#define WLAN_PEER_F_LOCAL_MESH_PEER 0x80000000 + +/** + * enum wlan_peer_state - peer state + * @WLAN_INIT_STATE: Default state + * @WLAN_JOIN_STATE: Station mode, STA is waiting for Join + * @WLAN_AUTH_STATE: AUTH in progress + * @WLAN_ASSOC_STATE: ASSOC in progress + * @WLAN_WAITKEY_STATE: 4-way KEY handshake is in progress + * @WLAN_CONNECTED_STATE: Connected state + * @WLAN_PREAUTH_STATE: Station mode: Preauth + * @WLAN_DISCONNECT_STATE: Disconnect is in progress + */ +enum wlan_peer_state { + WLAN_INIT_STATE = 1, + WLAN_JOIN_STATE = 2, + WLAN_AUTH_STATE = 3, + WLAN_ASSOC_STATE = 4, + WLAN_WAITKEY_STATE = 5, + WLAN_CONNECTED_STATE = 6, + WLAN_PREAUTH_STATE = 7, + WLAN_DISCONNECT_STATE = 8, +}; + +/** + * struct wlan_objmgr_peer_mlme - mlme common data of peer + * @peer_capinfo: protocol cap info + * @peer_flags: PEER OP flags + * @peer_type: Type of PEER, (STA/AP/etc.) + * @phymode: phy mode of station + * @max_rate: Max Rate supported + * @state: State of the peer + * @seq_num: Sequence number + * @rssi: Last received RSSI value + */ +struct wlan_objmgr_peer_mlme { + uint32_t peer_capinfo; + uint32_t peer_flags; + enum wlan_peer_type peer_type; + enum wlan_phymode phymode; + uint32_t max_rate; + enum wlan_peer_state state; + uint16_t seq_num; + int8_t rssi; + bool is_authenticated; +}; + +/** + * struct wlan_objmgr_peer_objmgr - object manager data of peer + * @vdev: VDEV pointer to which it is associated + * @ref_cnt: Ref count + * @ref_id_dbg: Array to track Ref count + * @print_cnt: Count to throttle Logical delete prints + */ +struct wlan_objmgr_peer_objmgr { + struct wlan_objmgr_vdev *vdev; + qdf_atomic_t ref_cnt; + qdf_atomic_t ref_id_dbg[WLAN_REF_ID_MAX]; + uint8_t print_cnt; +}; + +/** + * struct wlan_peer_activity -- peer inactivity info + * + */ +struct wlan_peer_activity { /*TODO */ + +}; + +/** + * struct wlan_objmgr_peer - PEER common object + * @psoc_peer: peer list node for psoc's qdf list + * @vdev_peer: peer list node for vdev's qdf list + * @macaddr[]: Peer MAC address + * @peer_mlme: Peer MLME common structure + * @peer_activity: peer activity + * @peer_objmgr: Peer Object manager common structure + * @peer_comp_priv_obj[]: Component's private object pointers + * @obj_status[]: status of each component object + * @obj_state: Status of Peer object + * @dp_handle: DP module handle + * @pdev_id: Pdev ID + * @peer_lock: Lock for access/update peer contents + */ +struct wlan_objmgr_peer { + qdf_list_node_t psoc_peer; + qdf_list_node_t vdev_peer; + uint8_t macaddr[QDF_MAC_ADDR_SIZE]; + uint8_t pdev_id; + struct wlan_objmgr_peer_mlme peer_mlme; + struct wlan_peer_activity peer_activity; + struct wlan_objmgr_peer_objmgr peer_objmgr; + void *peer_comp_priv_obj[WLAN_UMAC_MAX_COMPONENTS]; + QDF_STATUS obj_status[WLAN_UMAC_MAX_COMPONENTS]; + WLAN_OBJ_STATE obj_state; + void *dp_handle; + qdf_spinlock_t peer_lock; +}; + +/** + ** APIs to Create/Delete Global object APIs + */ +/** + * wlan_objmgr_peer_obj_create() - peer object create + * @vdev: VDEV object on which this peer gets created + * @peer_type: peer type (AP/STA) + * @macaddr: MAC address + * + * Creates Peer object, intializes with default values + * Attaches to psoc and vdev objects + * Invokes the registered notifiers to create component object + * + * Return: Handle to struct wlan_objmgr_peer on successful creation, + * NULL on Failure (on Mem alloc failure and Component objects + * Failure) + */ +struct wlan_objmgr_peer *wlan_objmgr_peer_obj_create( + struct wlan_objmgr_vdev *vdev, + enum wlan_peer_type type, + uint8_t macaddr[]); + +/** + * wlan_objmgr_peer_obj_delete() - peer object delete + * @peer: PEER object + * + * Deletes PEER object, removes it from PSOC's, VDEV's peer list + * Invokes the registered notifiers to destroy component objects + * + * Return: SUCCESS/FAILURE + */ +QDF_STATUS wlan_objmgr_peer_obj_delete(struct wlan_objmgr_peer *peer); + +/** + ** APIs to attach/detach component objects + */ +/** + * wlan_objmgr_peer_component_obj_attach() - attach comp object to peer + * @peer: PEER object + * @id: Component id + * @comp_priv_obj: component's private object pointer + * @status: Component's private object creation status + * + * API to be used for attaching component object with PEER common object + * + * Return: SUCCESS on successful storing of component's object in common object + * On FAILURE (appropriate failure codes are returned) + */ +QDF_STATUS wlan_objmgr_peer_component_obj_attach( + struct wlan_objmgr_peer *peer, + enum wlan_umac_comp_id id, + void *comp_priv_obj, + QDF_STATUS status); + +/** + * wlan_objmgr_peer_component_obj_detach() - detach comp object from peer + * @peer: PEER object + * @id: Component id + * @comp_priv_obj: component's private object pointer + * + * API to be used for detaching component object with PEER common object + * + * Return: SUCCESS on successful removal of component's object from common + * object + * On FAILURE (appropriate failure codes are returned) + */ +QDF_STATUS wlan_objmgr_peer_component_obj_detach( + struct wlan_objmgr_peer *peer, + enum wlan_umac_comp_id id, + void *comp_priv_obj); + +/** + ** APIs to operations on peer objects + */ + +/** + * wlan_objmgr_trigger_peer_comp_priv_object_creation() - create + * peer comp object + * @peer: PEER object + * @id: Component id + * + * API to create component private object in run time, this would + * be used for features which gets enabled in run time + * + * Return: SUCCESS on successful creation + * On FAILURE (appropriate failure codes are returned) + */ +QDF_STATUS wlan_objmgr_trigger_peer_comp_priv_object_creation( + struct wlan_objmgr_peer *peer, + enum wlan_umac_comp_id id); + +/** + * wlan_objmgr_trigger_peer_comp_priv_object_deletion() - destroy + * peer comp object + * @peer: PEER object + * @id: Component id + * + * API to destroy component private object in run time, this would + * be used for features which gets disabled in run time + * + * Return: SUCCESS on successful deletion + * On FAILURE (appropriate failure codes are returned) + */ +QDF_STATUS wlan_objmgr_trigger_peer_comp_priv_object_deletion( + struct wlan_objmgr_peer *peer, + enum wlan_umac_comp_id id); + +/** + * wlan_objmgr_peer_get_comp_private_obj() - get peer component private object + * @peer: PEER object + * @id: Component id + * + * API to get component private object + * + * Return: void *ptr on SUCCESS + * NULL on Failure + */ +void *wlan_objmgr_peer_get_comp_private_obj( + struct wlan_objmgr_peer *peer, + enum wlan_umac_comp_id id); + +/** + * wlan_peer_obj_lock() - Acquire PEER spinlock + * @psoc: PEER object + * + * API to acquire PEER spin lock + * Parent lock should not be taken in child lock context + * but child lock can be taken in parent lock context + * (for ex: psoc lock can't be invoked in pdev/vdev/peer lock context) + * + * Return: void + */ +static inline void wlan_peer_obj_lock(struct wlan_objmgr_peer *peer) +{ + qdf_spin_lock_bh(&peer->peer_lock); +} + +/** + * wlan_peer_obj_unlock() - Release PEER spinlock + * @peer: PEER object + * + * API to Release PEER spin lock + * + * Return: void + */ +static inline void wlan_peer_obj_unlock(struct wlan_objmgr_peer *peer) +{ + qdf_spin_unlock_bh(&peer->peer_lock); +} + +/** + * DOC: Examples to use PEER ref count APIs + * + * In all the scenarios, the pair of API should be followed + * other it lead to memory leak + * + * scenario 1: + * + * wlan_objmgr_peer_obj_create() + * ---- + * wlan_objmgr_peer_obj_delete() + * + * scenario 2: + * + * wlan_objmgr_peer_get_ref() + * ---- + * the operations which are done on + * peer object + * ---- + * wlan_objmgr_peer_release_ref() + * + * scenario 3: + * + * API to retrieve peer (xxx_get_peer_xxx()) + * ---- + * the operations which are done on + * peer object + * ---- + * wlan_objmgr_peer_release_ref() + */ + +/** + * wlan_objmgr_peer_get_ref() - increment ref count + * @peer: PEER object + * @id: Object Manager ref debug id + * + * API to increment ref count of peer + * + * Return: void + */ +void wlan_objmgr_peer_get_ref(struct wlan_objmgr_peer *peer, + wlan_objmgr_ref_dbgid id); + +/** + * wlan_objmgr_peer_try_get_ref() - increment ref count, if allowed + * @peer: PEER object + * @id: Object Manager ref debug id + * + * API to increment ref count of peer, if object state is valid + * + * Return: void + */ +QDF_STATUS wlan_objmgr_peer_try_get_ref(struct wlan_objmgr_peer *peer, + wlan_objmgr_ref_dbgid id); + +/** + * wlan_objmgr_peer_release_ref() - decrement ref count + * @peer: PEER object + * @id: Object Manager ref debug id + * + * API to decrement ref count of peer, if ref count is 1, it initiates the + * peer deletion + * + * Return: void + */ +void wlan_objmgr_peer_release_ref(struct wlan_objmgr_peer *peer, + wlan_objmgr_ref_dbgid id); + +/** + * wlan_psoc_peer_list_peek_head() - get head of psoc peer list + * @peer_list: qdf_list_t + * + * API to get the head peer of given peer (of psoc's peer list) + * + * Caller need to acquire lock with wlan_peer_obj_lock() + * + * Return: + * @peer: head peer + */ +static inline struct wlan_objmgr_peer *wlan_psoc_peer_list_peek_head( + qdf_list_t *peer_list) +{ + struct wlan_objmgr_peer *peer; + qdf_list_node_t *psoc_node = NULL; + + /* This API is invoked with lock acquired, do not add log prints */ + if (qdf_list_peek_front(peer_list, &psoc_node) != QDF_STATUS_SUCCESS) + return NULL; + + peer = qdf_container_of(psoc_node, struct wlan_objmgr_peer, psoc_peer); + return peer; +} + +/** + * wlan_psoc_peer_list_peek_active_head() - get active head of psoc peer list + * @peer_list: wlan_peer_list + * @hash_index: peer list hash index + * @dbg_id: Ref count debug module id + * + * API to get the head peer of given peer (of psoc's peer list) + * + * Return: + * @peer: head peer + */ +struct wlan_objmgr_peer *wlan_psoc_peer_list_peek_active_head( + struct wlan_peer_list *peer_list, + uint8_t hash_index, + wlan_objmgr_ref_dbgid dbg_id); + +/** + * wlan_psoc_peer_list_peek_head_lock_ref() - get head of psoc peer list + * with ref and lock protected + * @peer_list: wlan_peer_list + * @hash_index: peer list hash index + * @dbg_id: Ref count debug module id + * + * API to get the head peer of given peer (of psoc's peer list) + * + * Return: + * @peer: head peer + */ +struct wlan_objmgr_peer *wlan_psoc_peer_list_peek_head_ref( + struct wlan_peer_list *peer_list, + uint8_t hash_index, + wlan_objmgr_ref_dbgid dbg_id); + +/** + * wlan_vdev_peer_list_peek_head() - get head of vdev peer list + * @peer_list: qdf_list_t + * + * API to get the head peer of given peer (of vdev's peer list) + * + * Caller need to acquire lock with wlan_peer_obj_lock() + * + * Return: + * @peer: head peer + */ +static inline struct wlan_objmgr_peer *wlan_vdev_peer_list_peek_head( + qdf_list_t *peer_list) +{ + struct wlan_objmgr_peer *peer; + qdf_list_node_t *vdev_node = NULL; + + /* This API is invoked with lock acquired, do not add log prints */ + if (qdf_list_peek_front(peer_list, &vdev_node) != QDF_STATUS_SUCCESS) + return NULL; + + peer = qdf_container_of(vdev_node, struct wlan_objmgr_peer, vdev_peer); + return peer; +} + +/** + * wlan_vdev_peer_list_peek_active_head() - get active head of vdev peer list + * @vdev: VDEV object + * @peer_list: qdf_list_t + * @dbg_id: Ref count debug module id + * + * API to get the active head peer of given peer (of vdev's peer list) + * + * Return: + * @peer: active head peer + */ +struct wlan_objmgr_peer *wlan_vdev_peer_list_peek_active_head( + struct wlan_objmgr_vdev *vdev, + qdf_list_t *peer_list, + wlan_objmgr_ref_dbgid dbg_id); + +/** + * wlan_peer_get_next_peer_of_vdev() - get next peer of vdev list + * @peer: PEER object + * + * API to get the next peer of given peer (of vdev's peer list) + * + * Caller need to acquire lock with wlan_peer_obj_lock() + * + * Return: + * @next_peer: PEER object + */ +static inline struct wlan_objmgr_peer *wlan_peer_get_next_peer_of_vdev( + qdf_list_t *peer_list, struct wlan_objmgr_peer *peer) +{ + struct wlan_objmgr_peer *peer_next; + qdf_list_node_t *node; + qdf_list_node_t *next_node = NULL; + + /* This API is invoked with lock acquired, do not add log prints */ + if (peer == NULL) + return NULL; + + node = &peer->vdev_peer; + if (qdf_list_peek_next(peer_list, node, &next_node) != + QDF_STATUS_SUCCESS) + return NULL; + + peer_next = qdf_container_of(next_node, struct wlan_objmgr_peer, + vdev_peer); + return peer_next; +} + +/** + * wlan_peer_get_next_active_peer_of_vdev() - get next active_peer of vdev list + * @vdev: VDEV object + * @peer_list: Peer object list + * @peer: PEER object + * @dbg_id: Ref count debug module id + * + * API to get the next active peer of given peer (of vdev's peer list) + * + * Return: + * @next_peer: PEER object + */ +struct wlan_objmgr_peer *wlan_peer_get_next_active_peer_of_vdev( + struct wlan_objmgr_vdev *vdev, + qdf_list_t *peer_list, + struct wlan_objmgr_peer *peer, + wlan_objmgr_ref_dbgid dbg_id); + +/** + * wlan_peer_set_next_peer_of_vdev() - add peer to vdev peer list + * @peer: PEER object + * @new_peer: PEER object + * + * API to set as the next peer to given peer (of vdev's peer list) + * + * Caller need to acquire lock with wlan_peer_obj_lock() + * + * Return: void + */ +static inline void wlan_peer_set_next_peer_of_vdev(qdf_list_t *peer_list, + struct wlan_objmgr_peer *new_peer) +{ + /* This API is invoked with lock acquired, do not add log prints */ + /* set next peer with new peer */ + qdf_list_insert_back(peer_list, &new_peer->vdev_peer); + return; +} + +/** + * wlan_peer_get_next_peer_of_psoc() - get next peer to psoc peer list + * @peer_list: Peer list + * @peer: PEER object + * + * API to get the next peer of given peer (of psoc's peer list) + * + * Caller need to acquire lock with wlan_peer_obj_lock() + * + * Return: + * @next_peer: PEER object + */ +static inline struct wlan_objmgr_peer *wlan_peer_get_next_peer_of_psoc( + qdf_list_t *peer_list, struct wlan_objmgr_peer *peer) +{ + struct wlan_objmgr_peer *peer_next; + qdf_list_node_t *node = NULL; + qdf_list_node_t *next_node = NULL; + + /* This API is invoked with lock acquired, do not add log prints */ + if (peer == NULL) + return NULL; + + node = &peer->psoc_peer; + if (qdf_list_peek_next(peer_list, node, &next_node) != + QDF_STATUS_SUCCESS) + return NULL; + + peer_next = qdf_container_of(next_node, struct wlan_objmgr_peer, + psoc_peer); + return peer_next; +} + +/** + * wlan_peer_get_next_active_peer_of_psoc() - get next active peer to psoc peer + * list + * @peer_list: Peer list + * @hash_index: peer list hash index + * @peer: PEER object + * @dbg_id: Ref count debug module id + * + * API to get the next peer of given peer (of psoc's peer list) + * + * Return: + * @next_peer: PEER object + */ +struct wlan_objmgr_peer *wlan_peer_get_next_active_peer_of_psoc( + struct wlan_peer_list *peer_list, + uint8_t hash_index, + struct wlan_objmgr_peer *peer, + wlan_objmgr_ref_dbgid dbg_id); + + +/** + * wlan_peer_get_next_peer_of_psoc_ref() - get next peer to psoc peer list + * with lock and ref taken + * @peer_list: Peer list + * @hash_index: peer list hash index + * @peer: PEER object + * @dbg_id: Ref count debug module id + * + * API to get the next peer of given peer (of psoc's peer list) + * + * Return: + * @next_peer: PEER object + */ +struct wlan_objmgr_peer *wlan_peer_get_next_peer_of_psoc_ref( + struct wlan_peer_list *peer_list, + uint8_t hash_index, + struct wlan_objmgr_peer *peer, + wlan_objmgr_ref_dbgid dbg_id); + +/** + * wlan_peer_set_next_peer_of_psoc() - add peer to psoc peer list + * @peer: PEER object + * @new_peer: PEER object + * + * API to set as the next peer to given peer (of psoc's peer list) + * + * Caller need to acquire lock with wlan_peer_obj_lock() + * + * Return: void + */ +static inline void wlan_peer_set_next_peer_of_psoc(qdf_list_t *peer_list, + struct wlan_objmgr_peer *new_peer) +{ + /* This API is invoked with lock acquired, do not add log prints */ + /* set next peer with new peer */ + qdf_list_insert_back(peer_list, &new_peer->psoc_peer); + return; +} + +/** + * wlan_peer_set_peer_type() - set peer type + * @peer: PEER object + * @peer_type: type of PEER + * + * API to set peer type + * + * Return: void + */ +static inline void wlan_peer_set_peer_type(struct wlan_objmgr_peer *peer, + enum wlan_peer_type type) +{ + peer->peer_mlme.peer_type = type; +} + +/** + * wlan_peer_get_peer_type() - get peer type + * @peer: PEER object + * + * API to get peer type + * + * Return: + * @peer_type: type of PEER + */ +static inline enum wlan_peer_type wlan_peer_get_peer_type( + struct wlan_objmgr_peer *peer) +{ + return peer->peer_mlme.peer_type; +} + +/** + * wlan_peer_set_phymode() - set phymode + * @peer: PEER object + * @phymode: phymode of peer + * + * API to set phymode + * + * Return: void + */ +static inline void wlan_peer_set_phymode(struct wlan_objmgr_peer *peer, + enum wlan_phymode phymode) +{ + peer->peer_mlme.phymode = phymode; +} + +/** + * wlan_peer_get_phymode() - get phymode + * @peer: PEER object + * + * API to get phymode + * + * Return: + * @phymode: phymode of PEER + */ +static inline enum wlan_phymode wlan_peer_get_phymode( + struct wlan_objmgr_peer *peer) +{ + return peer->peer_mlme.phymode; +} + + +/** + * wlan_peer_set_macaddr() - set mac addr + * @peer: PEER object + * @macaddr: MAC address + * + * API to set peer mac address + * + * Caller need to acquire lock with wlan_peer_obj_lock() + * + * Return: void + */ +static inline void wlan_peer_set_macaddr(struct wlan_objmgr_peer *peer, + uint8_t *macaddr) +{ + /* This API is invoked with lock acquired, do not add log prints */ + WLAN_ADDR_COPY(peer->macaddr, macaddr); +} + +/** + * wlan_peer_get_macaddr() - get mac addr + * @peer: PEER object + * + * API to get peer mac address + * + * Caller need to acquire lock with wlan_peer_obj_lock() + * + * Return: + * @macaddr: MAC address + */ +static inline uint8_t *wlan_peer_get_macaddr(struct wlan_objmgr_peer *peer) +{ + /* This API is invoked with lock acquired, do not add log prints */ + return peer->macaddr; +} + +/** + * wlan_peer_get_vdev() - get vdev + * @peer: PEER object + * + * API to get peer's vdev + * + * Return: + * @vdev: VDEV object + */ +static inline struct wlan_objmgr_vdev *wlan_peer_get_vdev( + struct wlan_objmgr_peer *peer) +{ + return peer->peer_objmgr.vdev; +} + +/** + * wlan_peer_set_vdev() - set vdev + * @peer: PEER object + * @vdev: VDEV object + * + * API to set peer's vdev + * + * Return: void + */ +static inline void wlan_peer_set_vdev(struct wlan_objmgr_peer *peer, + struct wlan_objmgr_vdev *vdev) +{ + peer->peer_objmgr.vdev = vdev; +} + +/** + * wlan_peer_mlme_flag_set() - mlme flag set + * @peer: PEER object + * @flag: flag to be set + * + * API to set flag in peer + * + * Return: void + */ +static inline void wlan_peer_mlme_flag_set(struct wlan_objmgr_peer *peer, + uint32_t flag) +{ + peer->peer_mlme.peer_flags |= flag; +} + +/** + * wlan_peer_mlme_flag_clear() - mlme flag clear + * @peer: PEER object + * @flag: flag to be cleared + * + * API to clear flag in peer + * + * Return: void + */ +static inline void wlan_peer_mlme_flag_clear(struct wlan_objmgr_peer *peer, + uint32_t flag) +{ + peer->peer_mlme.peer_flags &= ~flag; +} + +/** + * wlan_peer_mlme_flag_get() - mlme flag get + * @peer: PEER object + * @flag: flag to be checked + * + * API to know, whether particular flag is set in peer + * + * Return: 1 (for set) or 0 (for not set) + */ +static inline uint8_t wlan_peer_mlme_flag_get(struct wlan_objmgr_peer *peer, + uint32_t flag) +{ + return (peer->peer_mlme.peer_flags & flag) ? 1 : 0; +} + +/** + * wlan_peer_mlme_set_state() - peer mlme state + * @peer: PEER object + * @state: enum wlan_peer_state + * + * API to update the current peer state + * + * Return: void + */ +static inline void wlan_peer_mlme_set_state( + struct wlan_objmgr_peer *peer, + enum wlan_peer_state state) +{ + peer->peer_mlme.state = state; +} + +/** + * wlan_peer_mlme_set_auth_state() - peer mlme auth state + * @peer: PEER object + * @is_authenticated: true or false + * + * API to update the current peer auth state + * + * Return: void + */ +static inline void wlan_peer_mlme_set_auth_state( + struct wlan_objmgr_peer *peer, + bool is_authenticated) +{ + peer->peer_mlme.is_authenticated = is_authenticated; +} + +/** + * wlan_peer_mlme_get_state() - peer mlme state + * @peer: PEER object + * + * API to get peer state + * + * Return: enum wlan_peer_state + */ +static inline enum wlan_peer_state wlan_peer_mlme_get_state( + struct wlan_objmgr_peer *peer) +{ + return peer->peer_mlme.state; +} + +/** + * wlan_peer_mlme_get_auth_state() - peer mlme auth state + * @peer: PEER object + * + * API to get peer auth state + * + * Return: auth state true/false + */ +static inline bool wlan_peer_mlme_get_auth_state( + struct wlan_objmgr_peer *peer) +{ + return peer->peer_mlme.is_authenticated; +} + +/** + * wlan_peer_mlme_get_next_seq_num() - get peer mlme next sequence number + * @peer: PEER object + * + * API to get mlme peer next sequence number + * + * Caller need to acquire lock with wlan_peer_obj_lock() + * + * Return: peer mlme next sequence number + */ +static inline uint32_t wlan_peer_mlme_get_next_seq_num( + struct wlan_objmgr_peer *peer) +{ + /* This API is invoked with lock acquired, do not add log prints */ + if (peer->peer_mlme.seq_num < WLAN_MAX_SEQ_NUM) + peer->peer_mlme.seq_num++; + else + peer->peer_mlme.seq_num = 0; + + return peer->peer_mlme.seq_num; +} + +/** + * wlan_peer_mlme_get_seq_num() - get peer mlme sequence number + * @peer: PEER object + * + * API to get mlme peer sequence number + * + * Caller need to acquire lock with wlan_peer_obj_lock() + * + * Return: peer mlme sequence number + */ +static inline uint32_t wlan_peer_mlme_get_seq_num( + struct wlan_objmgr_peer *peer) +{ + /* This API is invoked with lock acquired, do not add log prints */ + return peer->peer_mlme.seq_num; +} + +/** + * wlan_peer_mlme_reset_seq_num() - reset peer mlme sequence number + * @peer: PEER object + * + * API to reset peer sequence number + * + * Caller need to acquire lock with wlan_peer_obj_lock() + * + * Return: void + */ +static inline void wlan_peer_mlme_reset_seq_num( + struct wlan_objmgr_peer *peer) +{ + /* This API is invoked with lock acquired, do not add log prints */ + peer->peer_mlme.seq_num = 0; +} + +/** + * wlan_peer_set_dp_handle() - set dp handle + * @peer: peer object pointer + * @dp_handle: Data path module handle + * + * Return: void + */ +static inline void wlan_peer_set_dp_handle(struct wlan_objmgr_peer *peer, + void *dp_handle) +{ + if (qdf_unlikely(!peer)) { + QDF_BUG(0); + return; + } + + peer->dp_handle = dp_handle; +} + +/** + * wlan_peer_get_dp_handle() - get dp handle + * @peer: peer object pointer + * + * Return: dp handle + */ +static inline void *wlan_peer_get_dp_handle(struct wlan_objmgr_peer *peer) +{ + if (qdf_unlikely(!peer)) { + QDF_BUG(0); + return NULL; + } + + return peer->dp_handle; +} + +/** + * wlan_peer_get_psoc() - get psoc + * @peer: PEER object + * + * API to get peer's psoc + * + * Return: PSOC object or NULL if the psoc can not be found + */ +static inline struct wlan_objmgr_psoc *wlan_peer_get_psoc( + struct wlan_objmgr_peer *peer) +{ + struct wlan_objmgr_vdev *vdev; + struct wlan_objmgr_psoc *psoc; + + vdev = wlan_peer_get_vdev(peer); + if (!vdev) + return NULL; + + psoc = wlan_vdev_get_psoc(vdev); + + return psoc; +} + +/* + * wlan_peer_get_pdev_id() - get pdev id + * @peer: peer object pointer + * + * Return: pdev id + */ +static inline uint8_t wlan_peer_get_pdev_id(struct wlan_objmgr_peer *peer) +{ + return peer->pdev_id; +} + +/** + * wlan_peer_set_pdev_id() - set pdev id + * @peer: peer object pointer + * @pdev_id: pdev id + * + * Return: void + */ +static inline void wlan_peer_set_pdev_id(struct wlan_objmgr_peer *peer, + uint8_t pdev_id) +{ + peer->pdev_id = pdev_id; +} + +#endif /* _WLAN_OBJMGR_PEER_OBJ_H_*/ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/obj_mgr/inc/wlan_objmgr_psoc_obj.h b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/obj_mgr/inc/wlan_objmgr_psoc_obj.h new file mode 100644 index 0000000000000000000000000000000000000000..0674f0fc7a1e6069f9dd54ea28fedce5e47f7f1c --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/obj_mgr/inc/wlan_objmgr_psoc_obj.h @@ -0,0 +1,1544 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + /** + * DOC: Define the pSoc data structure of UMAC + * Public APIs to perform operations on Global objects + */ +#ifndef _WLAN_OBJMGR_PSOC_OBJ_H_ +#define _WLAN_OBJMGR_PSOC_OBJ_H_ + +#include "wlan_objmgr_cmn.h" +#include "wlan_objmgr_debug.h" +#include "wlan_lmac_if_def.h" + +#define REG_DMN_CH144 0x0001 +#define REG_DMN_ENTREPRISE 0x0002 + + +/* fw_caps */ + /* CAPABILITY: WEP available */ +#define WLAN_SOC_C_WEP 0x00000001 + /* CAPABILITY: TKIP available */ +#define WLAN_SOC_C_TKIP 0x00000002 + /* CAPABILITY: AES OCB avail */ +#define WLAN_SOC_C_AES 0x00000004 + /* CAPABILITY: AES CCM avail */ +#define WLAN_SOC_C_AES_CCM 0x00000008 + /* CAPABILITY: 11n HT available */ +#define WLAN_SOC_C_HT 0x00000010 + /* CAPABILITY: CKIP available */ +#define WLAN_SOC_C_CKIP 0x00000020 + /* CAPABILITY: ATH FF avail */ +#define WLAN_SOC_C_FF 0x00000040 + /* CAPABILITY: ATH Turbo avail*/ +#define WLAN_SOC_C_TURBOP 0x00000080 + /* CAPABILITY: IBSS available */ +#define WLAN_SOC_C_IBSS 0x00000100 + /* CAPABILITY: Power mgmt */ +#define WLAN_SOC_C_PMGT 0x00000200 + /* CAPABILITY: HOSTAP avail */ +#define WLAN_SOC_C_HOSTAP 0x00000400 + /* CAPABILITY: Old Adhoc Demo */ +#define WLAN_SOC_C_AHDEMO 0x00000800 + /* CAPABILITY: tx power mgmt */ +#define WLAN_SOC_C_TXPMGT 0x00001000 + /* CAPABILITY: short slottime */ +#define WLAN_SOC_C_SHSLOT 0x00002000 + /* CAPABILITY: short preamble */ +#define WLAN_SOC_C_SHPREAMBLE 0x00004000 + /* CAPABILITY: monitor mode */ +#define WLAN_SOC_C_MONITOR 0x00008000 + /* CAPABILITY: TKIP MIC avail */ +#define WLAN_SOC_C_TKIPMIC 0x00010000 + /* CAPABILITY: ATH WAPI avail */ +#define WLAN_SOC_C_WAPI 0x00020000 + /* CONF: WDS auto Detect/DELBA */ +#define WLAN_SOC_C_WDS_AUTODETECT 0x00040000 + /* CAPABILITY: WPA1 avail */ +#define WLAN_SOC_C_WPA1 0x00080000 + /* CAPABILITY: WPA2 avail */ +#define WLAN_SOC_C_WPA2 0x00100000 + /* CAPABILITY: WPA1+WPA2 avail*/ +#define WLAN_SOC_C_WPA 0x00180000 + /* CAPABILITY: frame bursting */ +#define WLAN_SOC_C_BURST 0x00200000 + /* CAPABILITY: WME avail */ +#define WLAN_SOC_C_WME 0x00400000 + /* CAPABILITY: 4-addr support */ +#define WLAN_SOC_C_WDS 0x00800000 + /* CAPABILITY: TKIP MIC for QoS frame */ +#define WLAN_SOC_C_WME_TKIPMIC 0x01000000 + /* CAPABILITY: bg scanning */ +#define WLAN_SOC_C_BGSCAN 0x02000000 + /* CAPABILITY: UAPSD */ +#define WLAN_SOC_C_UAPSD 0x04000000 + /* CAPABILITY: enabled 11.h */ +#define WLAN_SOC_C_DOTH 0x08000000 + +/* XXX protection/barker? */ + /* CAPABILITY: crypto alg's */ +#define WLAN_SOC_C_CRYPTO 0x0000002f + +/* fw_caps_ext */ + /* CAPABILITY: fast channel change */ +#define WLAN_SOC_CEXT_FASTCC 0x00000001 + /* CAPABILITY: P2P */ +#define WLAN_SOC_CEXT_P2P 0x00000002 + /* CAPABILITY: Multi-Channel Operations */ +#define WLAN_SOC_CEXT_MULTICHAN 0x00000004 + /* CAPABILITY: the device supports perf and power offload */ +#define WLAN_SOC_CEXT_PERF_PWR_OFLD 0x00000008 + /* CAPABILITY: the device supports 11ac */ +#define WLAN_SOC_CEXT_11AC 0x00000010 + /* CAPABILITY: the device support acs channel hopping */ +#define WLAN_SOC_CEXT_ACS_CHAN_HOP 0x00000020 + /* CAPABILITY: the device support STA DFS */ +#define WLAN_SOC_CEXT_STADFS 0x00000040 + /* NSS offload capability */ +#define WLAN_SOC_CEXT_NSS_OFFLOAD 0x00000080 + /* SW cal support capability */ +#define WLAN_SOC_CEXT_SW_CAL 0x00000100 + /* Hybrid mode */ +#define WLAN_SOC_CEXT_HYBRID_MODE 0x00000200 + /* TT support */ +#define WLAN_SOC_CEXT_TT_SUPPORT 0x00000400 + /* WMI MGMT REF */ +#define WLAN_SOC_CEXT_WMI_MGMT_REF 0x00000800 + /* Wideband scan */ +#define WLAN_SOC_CEXT_WIDEBAND_SCAN 0x00001000 + +/* feature_flags */ + /* CONF: ATH FF enabled */ +#define WLAN_SOC_F_FF 0x00000001 + /* CONF: ATH Turbo enabled*/ +#define WLAN_SOC_F_TURBOP 0x00000002 + /* STATUS: promiscuous mode */ +#define WLAN_SOC_F_PROMISC 0x00000004 + /* STATUS: all multicast mode */ +#define WLAN_SOC_F_ALLMULTI 0x00000008 +/* NB: this is intentionally setup to be IEEE80211_CAPINFO_PRIVACY */ + /* STATUS: start IBSS */ +#define WLAN_SOC_F_SIBSS 0x00000010 +/* NB: this is intentionally setup to be IEEE80211_CAPINFO_SHORT_SLOTTIME */ + /* CONF: Power mgmt enable */ +#define WLAN_SOC_F_PMGTON 0x00000020 + /* CONF: IBSS creation enable */ +#define WLAN_SOC_F_IBSSON 0x00000040 + /* force chanswitch */ +#define WLAN_SOC_F_CHANSWITCH 0x00000080 + +/* ic_flags_ext and/or iv_flags_ext */ + /* CONF: enable country IE */ +#define WLAN_SOC_F_COUNTRYIE 0x00000100 + /* STATE: enable full bgscan completion */ +#define WLAN_SOC_F_BGSCAN 0x00000200 + /* CONF: enable U-APSD */ +#define WLAN_SOC_F_UAPSD 0x00000400 + /* STATUS: sleeping */ +#define WLAN_SOC_F_SLEEP 0x00000800 + /* Enable marking of dfs interfernce */ +#define WLAN_SOC_F_MARKDFS 0x00001000 + /* enable or disable s/w ccmp encrypt decrypt support */ +#define WLAN_SOC_F_CCMPSW_ENCDEC 0x00002000 + /* STATE: hibernating */ +#define WLAN_SOC_F_HIBERNATION 0x00004000 + /* CONF: desired country has been set */ +#define WLAN_SOC_F_DESCOUNTRY 0x00008000 + /* CONF: enable power capability or contraint IE */ +#define WLAN_SOC_F_PWRCNSTRIE 0x00010000 + /* STATUS: 11D in used */ +#define WLAN_SOC_F_DOT11D 0x00020000 + /* Beacon offload */ +#define WLAN_SOC_F_BCN_OFFLOAD 0x00040000 + /* QWRAP enable */ +#define WLAN_SOC_F_QWRAP_ENABLE 0x00080000 + /* LTEU support */ +#define WLAN_SOC_F_LTEU_SUPPORT 0x00100000 + /* BT coext support */ +#define WLAN_SOC_F_BTCOEX_SUPPORT 0x00200000 + /* HOST 80211 enable*/ +#define WLAN_SOC_F_HOST_80211_ENABLE 0x00400000 + +/* PSOC op flags */ + + /* Invalid VHT cap */ +#define WLAN_SOC_OP_VHT_INVALID_CAP 0x00000001 +/** + * struct wlan_objmgr_psoc_regulatory - Regulatory sub structure of PSOC + * @country_code: Country code + * @reg_dmn: Regulatory Domain + * @reg_flags: Regulatory flags + */ +struct wlan_objmgr_psoc_regulatory { + uint16_t country_code; + uint16_t reg_dmn; + uint16_t reg_flags; +}; + +/** + * struct wlan_objmgr_psoc_user_config - user configurations to + * be used by common modules + * @is_11d_support_enabled: Enable/disable 11d feature + * @is_11h_support_enabled: Enable/disable 11h feature + * @dot11_mode: Phy mode + * @skip_dfs_chnl_in_p2p_search: Skip Dfs Channel in case of P2P + * Search + * @indoor_channel_support: Enable/disable sap on indoor channel + * @optimize_chan_avoid_event: Optimize channel avoidance + * indication coming from firmware + * @band_capability: Preferred band (0:Both, 1:2G only, 2:5G only) + * @dual_mac_feature_disable: Disable Dual MAC feature + */ +struct wlan_objmgr_psoc_user_config { + bool is_11d_support_enabled; + bool is_11h_support_enabled; + uint8_t dot11_mode; + bool skip_dfs_chnl_in_p2p_search; + bool indoor_channel_support; + bool optimize_chan_avoid_event; + uint8_t band_capability; + uint32_t dual_mac_feature_disable; +}; + +/** + * struct wlan_objmgr_psoc_nif - HDD/OSIF specific sub structure of PSOC + * @phy_version: phy version, read in device probe + * @phy_type: OL/DA type + * @soc_fw_caps: FW capabilities + * @soc_fw_ext_caps: FW ext capabilities + * @soc_feature_caps:Feature capabilities + * @soc_op_flags: Flags to set/reset during operation + * @soc_hw_macaddr[]:HW MAC address + * @user_config: user config from OS layer + */ +struct wlan_objmgr_psoc_nif { + uint32_t phy_version; + WLAN_DEV_TYPE phy_type; + uint32_t soc_fw_caps; + uint32_t soc_fw_ext_caps; + uint32_t soc_feature_caps; + uint32_t soc_op_flags; + uint8_t soc_hw_macaddr[QDF_MAC_ADDR_SIZE]; + struct wlan_objmgr_psoc_user_config user_config; +}; + +/** + * struct wlan_objmgr_psoc_objmgr - psoc object manager sub structure + * @psoc_id: The PSOC's numeric Id + * @wlan_pdev_count: PDEV count + * @wlan_pdev_id_map: PDEV id map, to allocate free ids + * @wlan_vdev_count: VDEV count + * @max_vdev_count: Max no. of VDEVs supported by this PSOC + * @print_cnt: Count to throttle Logical delete prints + * @wlan_peer_count: PEER count + * @max_peer_count: Max no. of peers supported by this PSOC + * @temp_peer_count: Temporary peer count + * @wlan_pdev_list[]: PDEV list + * @wlan_vdev_list[]: VDEV list + * @wlan_vdev_id_map[]: VDEV id map, to allocate free ids + * @peer_list: Peer list + * @ref_cnt: Ref count + * @ref_id_dbg: Array to track Ref count + * @qdf_dev: QDF Device + */ +struct wlan_objmgr_psoc_objmgr { + uint8_t psoc_id; + uint8_t wlan_pdev_count; + uint8_t wlan_pdev_id_map; + uint8_t wlan_vdev_count; + uint8_t max_vdev_count; + uint8_t print_cnt; + uint16_t wlan_peer_count; + uint16_t max_peer_count; + uint16_t temp_peer_count; + struct wlan_objmgr_pdev *wlan_pdev_list[WLAN_UMAC_MAX_PDEVS]; + struct wlan_objmgr_vdev *wlan_vdev_list[WLAN_UMAC_PSOC_MAX_VDEVS]; + uint32_t wlan_vdev_id_map[2]; + struct wlan_peer_list peer_list; + qdf_atomic_t ref_cnt; + qdf_atomic_t ref_id_dbg[WLAN_REF_ID_MAX]; + qdf_device_t qdf_dev; +}; + +/** + * struct wlan_soc_southbound_cb - Southbound callbacks + * @tx_ops: contains southbound tx callbacks + * @rx_ops: contains southbound rx callbacks + */ +struct wlan_soc_southbound_cb { + struct wlan_lmac_if_tx_ops tx_ops; + struct wlan_lmac_if_rx_ops rx_ops; +}; + +/** + * struct wlan_concurrency_info - structure for concurrency info + * + */ +struct wlan_concurrency_info { +}; + +/** + * struct wlan_soc_timer - structure for soc timer + * + */ +struct wlan_soc_timer { +}; + +/** + * struct wlan_objmgr_psoc - PSOC common object + * @soc_reg: regulatory sub structure + * @soc_nif: nif sub strucutre + * @soc_objmgr: object manager sub structure + * @soc_cb: south bound callbacks + * @soc_timer: soc timer for inactivity + * @soc_concurrency: concurrency info + * @wlan_active_vdevs[]: List of active VDEVs + * @soc_comp_priv_obj[]: component private object pointers + * @obj_status[]: component object status + * @obj_state: object state + * @tgt_if_handle: target interface handle + * @dp_handle: DP module handle + * @psoc_lock: psoc lock + */ +struct wlan_objmgr_psoc { + struct wlan_objmgr_psoc_regulatory soc_reg; + struct wlan_objmgr_psoc_nif soc_nif; + struct wlan_objmgr_psoc_objmgr soc_objmgr; + struct wlan_soc_southbound_cb soc_cb; + struct wlan_soc_timer soc_timer; + struct wlan_concurrency_info soc_concurrency; /*TODO */ + uint8_t wlan_active_vdevs[WLAN_UMAC_PSOC_MAX_VDEVS]; + void *soc_comp_priv_obj[WLAN_UMAC_MAX_COMPONENTS]; + QDF_STATUS obj_status[WLAN_UMAC_MAX_COMPONENTS]; + WLAN_OBJ_STATE obj_state; + void *tgt_if_handle; + void *dp_handle; + qdf_spinlock_t psoc_lock; +}; + +/** + * struct wlan_psoc_host_hal_reg_capabilities_ext: Below are Reg caps per PHY. + * Please note PHY ID starts with 0. + * @phy_id: phy id starts with 0. + * @eeprom_reg_domain: regdomain value specified in EEPROM + * @eeprom_reg_domain_ext: regdomain + * @regcap1: CAP1 capabilities bit map, see REGDMN_CAP1_ defines + * @regcap2: REGDMN EEPROM CAP, see REGDMN_EEPROM_EEREGCAP_ defines + * @wireless_modes: REGDMN MODE, see REGDMN_MODE_ enum + * @low_2ghz_chan: 2G channel low + * @high_2ghz_chan: 2G channel High + * @low_5ghz_chan: 5G channel low + * @high_5ghz_chan: 5G channel High + */ +struct wlan_psoc_host_hal_reg_capabilities_ext { + uint32_t phy_id; + uint32_t eeprom_reg_domain; + uint32_t eeprom_reg_domain_ext; + uint32_t regcap1; + uint32_t regcap2; + uint32_t wireless_modes; + uint32_t low_2ghz_chan; + uint32_t high_2ghz_chan; + uint32_t low_5ghz_chan; + uint32_t high_5ghz_chan; +}; + +/** + ** APIs to Create/Delete Global object APIs + */ +/** + * wlan_objmgr_psoc_obj_create() - psoc object create + * @phy_version: device id (from probe) + * @dev_type: Offload/DA + * + * Creates PSOC object, intializes with default values + * Invokes the registered notifiers to create component object + * + * Return: Handle to struct wlan_objmgr_psoc on successful creation, + * NULL on Failure (on Mem alloc failure and Component objects + * Failure) + */ +struct wlan_objmgr_psoc *wlan_objmgr_psoc_obj_create(uint32_t phy_version, + WLAN_DEV_TYPE dev_type); + +/** + * wlan_objmgr_psoc_obj_delete() - psoc object delete + * @psoc: PSOC object + * + * Logically deletes PSOC object, + * Once all the references are released, object manager invokes the registered + * notifiers to destroy component objects + * + * Return: SUCCESS/FAILURE + */ +QDF_STATUS wlan_objmgr_psoc_obj_delete(struct wlan_objmgr_psoc *psoc); + +/** + ** APIs to attach/detach component objects + */ + +/** + * wlan_objmgr_psoc_component_obj_attach() - psoc comp object attach + * @psoc: PSOC object + * @id: Component id + * @comp_priv_obj: component's private object pointer + * @status: Component's private object creation status + * + * API to be used for attaching component object with PSOC common object + * + * Return: SUCCESS on successful storing of component's object in common object + * On FAILURE (appropriate failure codes are returned) + */ +QDF_STATUS wlan_objmgr_psoc_component_obj_attach( + struct wlan_objmgr_psoc *psoc, + enum wlan_umac_comp_id id, + void *comp_priv_obj, + QDF_STATUS status); + +/** + * wlan_objmgr_psoc_component_obj_detach() - psoc comp object detach + * @psoc: PSOC object + * @id: Component id + * @comp_priv_obj: component's private object pointer + * + * API to be used for detaching component object with PSOC common object + * + * Return: SUCCESS on successful removal of component's object from common + * object + * On FAILURE (appropriate failure codes are returned) + */ +QDF_STATUS wlan_objmgr_psoc_component_obj_detach( + struct wlan_objmgr_psoc *psoc, + enum wlan_umac_comp_id id, + void *comp_priv_obj); + +/** + ** APIs to operations on psoc objects + */ +typedef void (*wlan_objmgr_op_handler)(struct wlan_objmgr_psoc *psoc, + void *object, + void *arg); + +/** + * wlan_objmgr_iterate_obj_list() - iterate through all psoc objects + * (CREATED state) + * @psoc: PSOC object + * @obj_type: PDEV_OP/VDEV_OP/PEER_OP + * @handler: the handler will be called for each object of requested type + * the handler should be implemented to perform required operation + * @arg: agruments passed by caller + * @lock_free_op: its obsolete + * @dbg_id: id of the caller + * + * API to be used for performing the operations on all PDEV/VDEV/PEER objects + * of psoc + * + * Return: SUCCESS/FAILURE + */ +QDF_STATUS wlan_objmgr_iterate_obj_list( + struct wlan_objmgr_psoc *psoc, + enum wlan_objmgr_obj_type obj_type, + wlan_objmgr_op_handler handler, + void *arg, uint8_t lock_free_op, + wlan_objmgr_ref_dbgid dbg_id); + +/** + * wlan_objmgr_iterate_obj_list_all() - iterate through all psoc objects + * @psoc: PSOC object + * @obj_type: PDEV_OP/VDEV_OP/PEER_OP + * @handler: the handler will be called for each object of requested type + * the handler should be implemented to perform required operation + * @arg: agruments passed by caller + * @lock_free_op: its obsolete + * @dbg_id: id of the caller + * + * API to be used for performing the operations on all PDEV/VDEV/PEER objects + * of psoc + * + * Return: SUCCESS/FAILURE + */ +QDF_STATUS wlan_objmgr_iterate_obj_list_all( + struct wlan_objmgr_psoc *psoc, + enum wlan_objmgr_obj_type obj_type, + wlan_objmgr_op_handler handler, + void *arg, uint8_t lock_free_op, + wlan_objmgr_ref_dbgid dbg_id); + +/** + * wlan_objmgr_free_all_objects_per_psoc() - free all psoc objects + * @psoc: PSOC object + * + * API to be used free all the objects(pdev/vdev/peer) of psoc + * + * Return: SUCCESS/FAILURE + */ +QDF_STATUS wlan_objmgr_free_all_objects_per_psoc( + struct wlan_objmgr_psoc *psoc); + +/** + * wlan_objmgr_trigger_psoc_comp_priv_object_creation() - create + * psoc comp object + * @psoc: PSOC object + * @id: Component id + * + * API to create component private object in run time, this would + * be used for features which gets enabled in run time + * + * Return: SUCCESS on successful creation + * On FAILURE (appropriate failure codes are returned) + */ +QDF_STATUS wlan_objmgr_trigger_psoc_comp_priv_object_creation( + struct wlan_objmgr_psoc *psoc, + enum wlan_umac_comp_id id); + +/** + * wlan_objmgr_trigger_psoc_comp_priv_object_deletion() - destroy + * psoc comp object + * @psoc: PSOC object + * @id: Component id + * + * API to destroy component private object in run time, this would + * be used for features which gets disabled in run time + * + * Return: SUCCESS on successful deletion + * On FAILURE (appropriate failure codes are returned) + */ +QDF_STATUS wlan_objmgr_trigger_psoc_comp_priv_object_deletion( + struct wlan_objmgr_psoc *psoc, + enum wlan_umac_comp_id id); + +/** + * wlan_objmgr_get_peer_by_mac() - find peer from psoc's peer list + * @psoc: PSOC object + * @macaddr: MAC address + * @dbg_id: id of the caller + * + * API to find peer object pointer by MAC addr + * + * This API increments the ref count of the peer object internally, the + * caller has to invoke the wlan_objmgr_peer_release_ref() to decrement + * ref count + * + * Return: peer pointer + * NULL on FAILURE + */ +struct wlan_objmgr_peer *wlan_objmgr_get_peer_by_mac( + struct wlan_objmgr_psoc *psoc, uint8_t *macaddr, + wlan_objmgr_ref_dbgid dbg_id); + +/** + * wlan_objmgr_get_peer() - find peer from psoc's peer list + * @psoc: PSOC object + * @pdev_id: Pdev id + * @macaddr: MAC address + * @dbg_id: id of the caller + * + * API to find peer object pointer by MAC addr and pdev id + * + * This API increments the ref count of the peer object internally, the + * caller has to invoke the wlan_objmgr_peer_release_ref() to decrement + * ref count + * + * Return: peer pointer + * NULL on FAILURE + */ +struct wlan_objmgr_peer *wlan_objmgr_get_peer( + struct wlan_objmgr_psoc *psoc, uint8_t pdev_id, + uint8_t *macaddr, wlan_objmgr_ref_dbgid dbg_id); + +/** + * wlan_objmgr_get_peer_nolock() - find peer from psoc's peer list (lock free) + * @psoc: PSOC object + * @pdev_id: Pdev id + * @macaddr: MAC address + * @dbg_id: id of the caller + * + * API to find peer object pointer by MAC addr + * + * This API increments the ref count of the peer object internally, the + * caller has to invoke the wlan_objmgr_peer_release_ref() to decrement + * ref count + * + * Return: peer pointer + * NULL on FAILURE + */ +struct wlan_objmgr_peer *wlan_objmgr_get_peer_nolock( + struct wlan_objmgr_psoc *psoc, uint8_t pdev_id, + uint8_t *macaddr, wlan_objmgr_ref_dbgid dbg_id); + +/** + * wlan_objmgr_get_peer_logically_deleted() - find peer + * from psoc's peer list + * @psoc: PSOC object + * @macaddr: MAC address + * @dbg_id: id of the caller + * + * API to find peer object pointer of logically deleted peer + * + * This API increments the ref count of the peer object internally, the + * caller has to invoke the wlan_objmgr_peer_release_ref() to decrement + * ref count + * + * Return: peer pointer + * NULL on FAILURE + */ +struct wlan_objmgr_peer *wlan_objmgr_get_peer_logically_deleted( + struct wlan_objmgr_psoc *psoc, uint8_t *macaddr, + wlan_objmgr_ref_dbgid dbg_id); + +/** + * wlan_objmgr_get_peer_no_state() - find peer from psoc's peer list + * @psoc: PSOC object + * @pdev_id: Pdev id + * @macaddr: MAC address + * @dbg_id: id of the caller + * + * API to find peer object pointer by MAC addr and pdev id, + * ignores the state check + * + * This API increments the ref count of the peer object internally, the + * caller has to invoke the wlan_objmgr_peer_release_ref() to decrement + * ref count + * + * Return: peer pointer + * NULL on FAILURE + */ +struct wlan_objmgr_peer *wlan_objmgr_get_peer_no_state( + struct wlan_objmgr_psoc *psoc, uint8_t pdev_id, + uint8_t *macaddr, wlan_objmgr_ref_dbgid dbg_id); + +/** + * wlan_objmgr_populate_logically_deleted_peerlist_by_mac_n_vdev() - get peer from + * psoc peer list using + * mac and vdev + * self mac + * @psoc: PSOC object + * @pdev_id: Pdev id + * @bssid: BSSID address + * @macaddr: MAC address + * @dbg_id: id of the caller + * + * API to find peer object pointer by MAC addr, vdev self mac + * address and pdev id for a node that is logically in deleted state + * + * This API increments the ref count of the peer object internally, the + * caller has to invoke the wlan_objmgr_peer_release_ref() to decrement + * ref count + * + * Return: List of peer pointers + * NULL on FAILURE + */ +qdf_list_t *wlan_objmgr_populate_logically_deleted_peerlist_by_mac_n_vdev( + struct wlan_objmgr_psoc *psoc, uint8_t pdev_id, + uint8_t *bssid, uint8_t *macaddr, + wlan_objmgr_ref_dbgid dbg_id); + +/** + * wlan_objmgr_get_peer_by_mac_n_vdev() - find peer from psoc's peer list + * using mac address and bssid + * @psoc: PSOC object + * @pdev_id: Pdev id + * @bssid: MAC address of AP its associated + * @macaddr: MAC address + * @dbg_id: id of the caller + * + * API to find peer object pointer by MAC addr and vdev self mac address + * and pdev id + * + * This API increments the ref count of the peer object internally, the + * caller has to invoke the wlan_objmgr_peer_release_ref() to decrement + * ref count + * + * Return: peer pointer + * NULL on FAILURE + */ +struct wlan_objmgr_peer *wlan_objmgr_get_peer_by_mac_n_vdev( + struct wlan_objmgr_psoc *psoc, uint8_t pdev_id, + uint8_t *bssid, uint8_t *macaddr, + wlan_objmgr_ref_dbgid dbg_id); + +/** + * wlan_objmgr_get_peer_by_mac_n_vdev_no_state() - find peer from psoc's peer + * list using mac address and bssid + * @psoc: PSOC object + * @pdev_id: Pdev id + * @bssid: MAC address of AP its associated + * @macaddr: MAC address + * @dbg_id: id of the caller + * + * API to find peer object pointer by MAC addr, vdev self mac address, + * and pdev id ,ignores the state + * + * This API increments the ref count of the peer object internally, the + * caller has to invoke the wlan_objmgr_peer_release_ref() to decrement + * ref count + * + * Return: peer pointer + * NULL on FAILURE + */ +struct wlan_objmgr_peer *wlan_objmgr_get_peer_by_mac_n_vdev_no_state( + struct wlan_objmgr_psoc *psoc, uint8_t pdev_id, + uint8_t *bssid, uint8_t *macaddr, + wlan_objmgr_ref_dbgid dbg_id); + +/** + * wlan_objmgr_get_pdev_by_id() - retrieve pdev by id + * @psoc: PSOC object + * @id: pdev id + * @dbg_id: id of the caller + * + * API to find pdev object pointer by pdev id + * + * This API increments the ref count of the pdev object internally, the + * caller has to invoke the wlan_objmgr_pdev_release_ref() to decrement + * ref count + * + * Return: pdev pointer + * NULL on FAILURE + */ +struct wlan_objmgr_pdev *wlan_objmgr_get_pdev_by_id( + struct wlan_objmgr_psoc *psoc, uint8_t id, + wlan_objmgr_ref_dbgid dbg_id); + +/** + * wlan_objmgr_get_pdev_by_id_no_state() - retrieve pdev by id + * @psoc: PSOC object + * @id: pdev id + * @dbg_id: id of the caller + * + * API to find pdev object pointer by pdev id, Ignores the state check + * + * This API increments the ref count of the pdev object internally, the + * caller has to invoke the wlan_objmgr_pdev_release_ref() to decrement + * ref count + * + * Return: pdev pointer + * NULL on FAILURE + */ +struct wlan_objmgr_pdev *wlan_objmgr_get_pdev_by_id_no_state( + struct wlan_objmgr_psoc *psoc, uint8_t id, + wlan_objmgr_ref_dbgid dbg_id); + +/** + * wlan_objmgr_get_pdev_by_macaddr() - retrieve pdev by macaddr + * @psoc: PSOC object + * @macaddr: MAC address + * @dbg_id: id of the caller + * + * API to find pdev object pointer by pdev macaddr + * + * This API increments the ref count of the pdev object internally, the + * caller has to invoke the wlan_objmgr_pdev_release_ref() to decrement + * ref count + * + * Return: pdev pointer + * NULL on FAILURE + */ +struct wlan_objmgr_pdev *wlan_objmgr_get_pdev_by_macaddr( + struct wlan_objmgr_psoc *psoc, uint8_t *macaddr, + wlan_objmgr_ref_dbgid dbg_id); + +/** + * wlan_objmgr_get_pdev_by_macaddr_no_state() - retrieve pdev by macaddr + * @psoc: PSOC object + * @macaddr: MAC address + * @dbg_id: id of the caller + * + * API to find pdev object pointer by pdev macaddr, ignores the state check + * + * This API increments the ref count of the pdev object internally, the + * caller has to invoke the wlan_objmgr_pdev_release_ref() to decrement + * ref count + * + * Return: pdev pointer + * NULL on FAILURE + */ +struct wlan_objmgr_pdev *wlan_objmgr_get_pdev_by_macaddr_no_state( + struct wlan_objmgr_psoc *psoc, uint8_t *macaddr, + wlan_objmgr_ref_dbgid dbg_id); + +/** + * wlan_objmgr_get_vdev_by_opmode_from_psoc() - retrieve vdev by opmode + * @psoc: PSOC object + * @opmode: vdev operating mode + * @dbg_id: id of the caller + * + * API to find vdev object pointer by vdev operating mode from psoc + * + * This API increments the ref count of the vdev object internally, the + * caller has to invoke the wlan_objmgr_vdev_release_ref() to decrement + * ref count + * + * Return: vdev pointer + * NULL on FAILURE + */ +struct wlan_objmgr_vdev *wlan_objmgr_get_vdev_by_opmode_from_psoc( + struct wlan_objmgr_psoc *psoc, + enum QDF_OPMODE opmode, + wlan_objmgr_ref_dbgid dbg_id); + +/** + * wlan_objmgr_get_vdev_by_id_from_psoc() - retrieve vdev by id + * @psoc: PSOC object + * @id: vdev id + * @dbg_id: id of the caller + * + * API to find vdev object pointer by vdev id from psoc + * + * This API increments the ref count of the vdev object internally, the + * caller has to invoke the wlan_objmgr_vdev_release_ref() to decrement + * ref count + * + * Return: vdev pointer + * NULL on FAILURE + */ +struct wlan_objmgr_vdev *wlan_objmgr_get_vdev_by_id_from_psoc( + struct wlan_objmgr_psoc *psoc, uint8_t vdev_id, + wlan_objmgr_ref_dbgid dbg_id); + +/** + * wlan_objmgr_get_vdev_by_id_from_psoc_no_state() - retrieve vdev by id + * @psoc: PSOC object + * @id: vdev id + * @dbg_id: id of the caller + * + * API to find vdev object pointer by vdev id from psoc, ignores the + * state check + * + * This API increments the ref count of the vdev object internally, the + * caller has to invoke the wlan_objmgr_vdev_release_ref() to decrement + * ref count + * + * Return: vdev pointer + * NULL on FAILURE + */ +struct wlan_objmgr_vdev *wlan_objmgr_get_vdev_by_id_from_psoc_no_state( + struct wlan_objmgr_psoc *psoc, uint8_t vdev_id, + wlan_objmgr_ref_dbgid dbg_id); + +/** + * wlan_objmgr_get_vdev_by_macaddr_from_psoc() - retrieve vdev by macaddr + * @psoc: PSOC object + * @pdev_id: Pdev id + * @macaddr: macaddr + * @dbg_id: id of the caller + * + * API to find vdev object pointer by vdev macaddr from pdev + * + * This API increments the ref count of the vdev object internally, the + * caller has to invoke the wlan_objmgr_vdev_release_ref() to decrement + * ref count + * + * Return: vdev pointer + * NULL on FAILURE + */ +struct wlan_objmgr_vdev *wlan_objmgr_get_vdev_by_macaddr_from_psoc( + struct wlan_objmgr_psoc *psoc, uint8_t pdev_id, + uint8_t *macaddr, wlan_objmgr_ref_dbgid dbg_id); + +/** + * wlan_objmgr_get_vdev_by_macaddr_from_psoc_no_state() - retrieve vdev by + * macaddr + * @psoc: PSOC object + * @pdev_id: Pdev id + * @macaddr: macaddr + * @dbg_id: id of the caller + * + * API to find vdev object pointer by vdev macaddr from psoc, ignores the state + * check + * + * This API increments the ref count of the vdev object internally, the + * caller has to invoke the wlan_objmgr_vdev_release_ref() to decrement + * ref count + * + * Return: vdev pointer + * NULL on FAILURE + */ +struct wlan_objmgr_vdev *wlan_objmgr_get_vdev_by_macaddr_from_psoc_no_state( + struct wlan_objmgr_psoc *psoc, uint8_t pdev_id, + uint8_t *macaddr, wlan_objmgr_ref_dbgid dbg_id); + +/** + * wlan_psoc_obj_lock() - Acquire PSOC spinlock + * @psoc: PSOC object + * + * API to acquire PSOC lock + * Parent lock should not be taken in child lock context + * but child lock can be taken in parent lock context + * (for ex: psoc lock can't be invoked in pdev/vdev/peer lock context) + * + * Return: void + */ +static inline void wlan_psoc_obj_lock(struct wlan_objmgr_psoc *psoc) +{ + qdf_spin_lock_bh(&psoc->psoc_lock); +} + +/** + * wlan_psoc_obj_unlock() - Release PSOC spinlock + * @psoc: PSOC object + * + * API to Release PSOC lock + * + * Return: void + */ +static inline void wlan_psoc_obj_unlock(struct wlan_objmgr_psoc *psoc) +{ + qdf_spin_unlock_bh(&psoc->psoc_lock); +} + +/** + * wlan_psoc_set_nif_phy_version() - set nif phy version + * @psoc: PSOC object + * @phy_ver: phy version + * + * API to set nif phy version in psoc + * + * Return: void + */ +static inline void wlan_psoc_set_nif_phy_version(struct wlan_objmgr_psoc *psoc, + uint32_t phy_ver) +{ + psoc->soc_nif.phy_version = phy_ver; +} + +/** + * wlan_psoc_get_nif_phy_version() - get nif phy version + * @psoc: PSOC object + * + * API to set nif phy version in psoc + * + * Return: @phy_ver: phy version + */ +static inline uint32_t wlan_psoc_get_nif_phy_version( + struct wlan_objmgr_psoc *psoc) +{ + if (psoc == NULL) + return (uint32_t)-1; + + return psoc->soc_nif.phy_version; +} + +/** + * wlan_psoc_set_dev_type() - set dev type + * @psoc: PSOC object + * @phy_type: phy type (OL/DA) + * + * API to set dev type in psoc + * + * Return: void + */ +static inline void wlan_psoc_set_dev_type(struct wlan_objmgr_psoc *psoc, + WLAN_DEV_TYPE phy_type) +{ + psoc->soc_nif.phy_type = phy_type; +} + +/** + * wlan_objmgr_psoc_get_dev_type - get dev type + * @psoc: PSOC object + * + * API to get dev type in psoc + * + * Return: phy type (OL/DA) + */ +static inline WLAN_DEV_TYPE wlan_objmgr_psoc_get_dev_type( + struct wlan_objmgr_psoc *psoc) +{ + if (psoc == NULL) + return (uint32_t)-1; + + return psoc->soc_nif.phy_type; +} + +/** + * wlan_psoc_nif_fw_cap_set() - set fw caps + * @psoc: PSOC object + * @cap: capability flag to be set + * + * API to set fw caps in psoc + * + * Return: void + */ +static inline void wlan_psoc_nif_fw_cap_set(struct wlan_objmgr_psoc *psoc, + uint32_t cap) +{ + psoc->soc_nif.soc_fw_caps |= cap; +} + +/** + * wlan_psoc_nif_fw_cap_clear() - clear fw caps + * @psoc: PSOC object + * @cap: capability flag to be cleared + * + * API to clear fw caps in psoc + * + * Return: void + */ +static inline void wlan_psoc_nif_fw_cap_clear(struct wlan_objmgr_psoc *psoc, + uint32_t cap) +{ + psoc->soc_nif.soc_fw_caps &= ~cap; +} + +/** + * wlan_psoc_nif_fw_cap_get() - get fw caps + * @psoc: PSOC object + * @cap: capability flag to be checked + * + * API to know, whether particular fw caps flag is set in psoc + * + * Return: 1 (for set) or 0 (for not set) + */ +static inline uint8_t wlan_psoc_nif_fw_cap_get(struct wlan_objmgr_psoc *psoc, + uint32_t cap) +{ + return (psoc->soc_nif.soc_fw_caps & cap) ? 1 : 0; +} + +/** + * wlan_psoc_nif_fw_ext_cap_set() - set fw ext caps + * @psoc: PSOC object + * @ext_cap: capability flag to be set + * + * API to set fw ext caps in psoc + * + * Return: void + */ +static inline void wlan_psoc_nif_fw_ext_cap_set(struct wlan_objmgr_psoc *psoc, + uint32_t ext_cap) +{ + psoc->soc_nif.soc_fw_ext_caps |= ext_cap; +} + +/** + * wlan_psoc_nif_fw_ext_cap_clear() - clear fw ext caps + * @psoc: PSOC object + * @ext_cap: capability flag to be cleared + * + * API to clear fw ext caps in psoc + * + * Return: void + */ +static inline void wlan_psoc_nif_fw_ext_cap_clear(struct wlan_objmgr_psoc *psoc, + uint32_t ext_cap) +{ + psoc->soc_nif.soc_fw_ext_caps &= ~ext_cap; +} + +/** + * wlan_psoc_nif_fw_ext_cap_get() - get fw caps + * @psoc: PSOC object + * @ext_cap: capability flag to be checked + * + * API to know, whether particular fw caps flag is set in psoc + * + * Return: 1 (for set) or 0 (for not set) + */ +static inline uint8_t wlan_psoc_nif_fw_ext_cap_get( + struct wlan_objmgr_psoc *psoc, uint32_t ext_cap) +{ + return (psoc->soc_nif.soc_fw_ext_caps & ext_cap) ? 1 : 0; +} + +/** + * wlan_psoc_nif_feat_cap_set() - set feature caps + * @psoc: PSOC object + * @cap: feature flag to be set + * + * API to set feature caps in psoc + * + * Return: void + */ +static inline void wlan_psoc_nif_feat_cap_set(struct wlan_objmgr_psoc *psoc, + uint32_t feat_cap) +{ + psoc->soc_nif.soc_feature_caps |= feat_cap; +} + +/** + * wlan_psoc_nif_feat_cap_clear() - clear feature caps + * @psoc: PSOC object + * @cap: feature flag to be cleared + * + * API to clear feature caps in psoc + * + * Return: void + */ +static inline void wlan_psoc_nif_feat_cap_clear(struct wlan_objmgr_psoc *psoc, + uint32_t feat_cap) +{ + psoc->soc_nif.soc_feature_caps &= ~feat_cap; +} + +/** + * wlan_psoc_nif_feat_cap_get() - get feature caps + * @psoc: PSOC object + * @cap: feature flag to be checked + * + * API to know, whether particular feature cap flag is set in psoc + * + * Return: 1 (for set) or 0 (for not set) + */ +static inline uint8_t wlan_psoc_nif_feat_cap_get(struct wlan_objmgr_psoc *psoc, + uint32_t feat_cap) +{ + return (psoc->soc_nif.soc_feature_caps & feat_cap) ? 1 : 0; +} + +/** + * wlan_psoc_nif_op_flag_get() - get op flags + * @psoc: PSOC object + * @flag: op flag to be checked + * + * API to know, whether particular op flag is set in psoc + * + * Return: 1 (for set) or 0 (for not set) + */ +static inline uint8_t wlan_psoc_nif_op_flag_get(struct wlan_objmgr_psoc *psoc, + uint32_t flag) +{ + return (psoc->soc_nif.soc_op_flags & flag) ? 1 : 0; +} + +/** + * wlan_psoc_nif_op_flag_set() - set op flag + * @psoc: PSOC object + * @flag: op flag to be set + * + * API to set op flag in psoc + * + * Return: void + */ +static inline void wlan_psoc_nif_op_flag_set(struct wlan_objmgr_psoc *psoc, + uint32_t flag) +{ + psoc->soc_nif.soc_op_flags |= flag; +} + +/** + * wlan_psoc_nif_op_flag_clear() - clear op flag + * @psoc: PSOC object + * @flag: op flag to be cleared + * + * API to clear op flag in psoc + * + * Return: void + */ +static inline void wlan_psoc_nif_op_flag_clear(struct wlan_objmgr_psoc *psoc, + uint32_t flag) +{ + psoc->soc_nif.soc_op_flags &= ~flag; +} + +/** + * wlan_psoc_set_hw_macaddr() - set hw mac addr + * @psoc: PSOC object + * @macaddr: hw macaddr + * + * API to set hw macaddr of psoc + * + * Caller need to acquire lock with wlan_psoc_obj_lock() + * + * Return: void + */ +static inline void wlan_psoc_set_hw_macaddr(struct wlan_objmgr_psoc *psoc, + uint8_t *macaddr) +{ + /* This API is invoked with lock acquired, do not add log prints */ + if (psoc != NULL) + WLAN_ADDR_COPY(psoc->soc_nif.soc_hw_macaddr, macaddr); +} + +/** + * wlan_psoc_get_hw_macaddr() - get hw macaddr + * @psoc: PSOC object + * + * API to set hw macaddr of psoc + * + * Return: hw macaddr + */ +static inline uint8_t *wlan_psoc_get_hw_macaddr(struct wlan_objmgr_psoc *psoc) +{ + if (psoc == NULL) + return NULL; + + return psoc->soc_nif.soc_hw_macaddr; +} + +/** + * wlan_objmgr_psoc_get_comp_private_obj(): API to retrieve component object + * @psoc: Psoc pointer + * @id: component id + * + * This API is used to get the component private object pointer tied to the + * corresponding psoc object + * + * Return: Component private object + */ +void *wlan_objmgr_psoc_get_comp_private_obj(struct wlan_objmgr_psoc *psoc, + enum wlan_umac_comp_id id); +/** + * wlan_psoc_get_pdev_count() - get pdev count for psoc + * @psoc: PSOC object + * + * API to get number of pdev's attached to the psoc + * + * Return: number of pdev's + */ +static inline uint8_t wlan_psoc_get_pdev_count(struct wlan_objmgr_psoc *psoc) +{ + if (psoc == NULL) + return 0; + + return psoc->soc_objmgr.wlan_pdev_count; +} + +/** + * wlan_psoc_set_tgt_if_handle(): API to set target if handle in psoc object + * @psoc: Psoc pointer + * @tgt_if_handle: target interface handle + * + * API to set target interface handle in psoc object + * + * Return: None + */ +static inline void wlan_psoc_set_tgt_if_handle(struct wlan_objmgr_psoc *psoc, + void *tgt_if_handle) +{ + if (psoc == NULL) + return; + + psoc->tgt_if_handle = tgt_if_handle; +} + +/** + * wlan_psoc_get_tgt_if_handle(): API to get target interface handle + * @psoc: Psoc pointer + * + * API to get target interface handle from psoc object + * + * Return: target interface handle + */ +static inline void *wlan_psoc_get_tgt_if_handle(struct wlan_objmgr_psoc *psoc) +{ + if (psoc == NULL) + return NULL; + + return psoc->tgt_if_handle; +} + +/** + * wlan_psoc_get_qdf_dev(): API to get qdf device + * @psoc: Psoc pointer + * + * API to get qdf device from psoc object + * + * Return: qdf_device_t + */ +static inline qdf_device_t wlan_psoc_get_qdf_dev( + struct wlan_objmgr_psoc *psoc) +{ + if (psoc == NULL) + return NULL; + + return psoc->soc_objmgr.qdf_dev; +} + +/** + * wlan_psoc_set_qdf_dev(): API to get qdf device + * @psoc: Psoc pointer + * dev: qdf device + * + * API to set qdf device from psoc object + * + * Return: None + */ +static inline void wlan_psoc_set_qdf_dev( + struct wlan_objmgr_psoc *psoc, + qdf_device_t dev) +{ + if (psoc == NULL) + return; + + psoc->soc_objmgr.qdf_dev = dev; +} + +/** + * wlan_psoc_set_max_vdev_count() - set psoc max vdev count + * @psoc: PSOC object + * @vdev count: Max vdev count + * + * API to set Max vdev count + * + * Return: void + */ +static inline void wlan_psoc_set_max_vdev_count(struct wlan_objmgr_psoc *psoc, + uint8_t max_vdev_count) +{ + psoc->soc_objmgr.max_vdev_count = max_vdev_count; +} + +/** + * wlan_psoc_get_max_vdev_count() - get psoc max vdev count + * @psoc: PSOC object + * + * API to set Max vdev count + * + * Return: @vdev count: Max vdev count + */ +static inline uint8_t wlan_psoc_get_max_vdev_count( + struct wlan_objmgr_psoc *psoc) +{ + return psoc->soc_objmgr.max_vdev_count; +} + +/** + * wlan_psoc_set_max_peer_count() - set psoc max peer count + * @psoc: PSOC object + * @peer count: Max peer count + * + * API to set Max peer count + * + * Return: void + */ +static inline void wlan_psoc_set_max_peer_count(struct wlan_objmgr_psoc *psoc, + uint16_t max_peer_count) +{ + psoc->soc_objmgr.max_peer_count = max_peer_count; +} + +/** + * wlan_psoc_get_max_peer_count() - get psoc max peer count + * @psoc: PSOC object + * + * API to set Max peer count + * + * Return: @peer count: Max peer count + */ +static inline uint16_t wlan_psoc_get_max_peer_count( + struct wlan_objmgr_psoc *psoc) +{ + return psoc->soc_objmgr.max_peer_count; +} + +/** + * wlan_psoc_get_peer_count() - get psoc peer count + * @psoc: PSOC object + * + * API to get peer count + * + * Return: @peer count: peer count + */ +static inline uint16_t wlan_psoc_get_peer_count( + struct wlan_objmgr_psoc *psoc) +{ + return psoc->soc_objmgr.wlan_peer_count; +} + + +/** + * DOC: Examples to use PSOC ref count APIs + * + * In all the scenarios, the pair of API should be followed + * other it lead to memory leak + * + * scenario 1: + * + * wlan_objmgr_psoc_obj_create() + * ---- + * wlan_objmgr_psoc_obj_delete() + * + * scenario 2: + * + * wlan_objmgr_psoc_get_ref() + * ---- + * the operations which are done on + * psoc object + * ---- + * wlan_objmgr_psoc_release_ref() + */ + +/** + * wlan_objmgr_psoc_get_ref() - increment ref count + * @psoc: PSOC object + * @id: Object Manager ref debug id + * + * API to increment ref count of psoc + * + * Return: void + */ +void wlan_objmgr_psoc_get_ref(struct wlan_objmgr_psoc *psoc, + wlan_objmgr_ref_dbgid id); + +/** + * wlan_objmgr_psoc_try_get_ref() - increment ref count, if allowed + * @psoc: PSOC object + * @id: Object Manager ref debug id + * + * API to increment ref count after checking valid object state + * + * Return: void + */ +QDF_STATUS wlan_objmgr_psoc_try_get_ref(struct wlan_objmgr_psoc *psoc, + wlan_objmgr_ref_dbgid id); + +/** + * wlan_objmgr_psoc_release_ref() - decrement ref count + * @psoc: PSOC object + * @id: Object Manager ref debug id + * + * API to decrement ref count of psoc, if ref count is 1, it initiates the + * PSOC deletion + * + * Return: void + */ +void wlan_objmgr_psoc_release_ref(struct wlan_objmgr_psoc *psoc, + wlan_objmgr_ref_dbgid id); + +/** + * wlan_objmgr_print_ref_all_objects_per_psoc() - print all psoc objects' + * ref counts + * @psoc: PSOC object + * + * API to be used for printing all the objects(pdev/vdev/peer) ref counts + * + * Return: SUCCESS/FAILURE + */ +QDF_STATUS wlan_objmgr_print_ref_all_objects_per_psoc( + struct wlan_objmgr_psoc *psoc); + +/** +* wlan_objmgr_psoc_set_user_config () - populate user config +* data in psoc +* @psoc: psoc object pointer +* @user_config_data: pointer to user config data filled up by os +* dependent component +* it is intended to set all elements by OSIF/HDD and it not +* intended to modify a single element +* Return: QDF status +*/ +QDF_STATUS wlan_objmgr_psoc_set_user_config(struct wlan_objmgr_psoc *psoc, + struct wlan_objmgr_psoc_user_config *user_config_data); + +/** + * wlan_objmgr_psoc_check_for_pdev_leaks() - Assert no pdevs attached to @psoc + * @psoc: The psoc to check + * + * Return: None + */ +void wlan_objmgr_psoc_check_for_pdev_leaks(struct wlan_objmgr_psoc *psoc); + +/** + * wlan_objmgr_psoc_check_for_vdev_leaks() - Assert no vdevs attached to @psoc + * @psoc: The psoc to check + * + * Return: None + */ +void wlan_objmgr_psoc_check_for_vdev_leaks(struct wlan_objmgr_psoc *psoc); + +/** + * wlan_objmgr_psoc_check_for_peer_leaks() - Assert no peers attached to @psoc + * @psoc: The psoc to check + * + * Return: None + */ +void wlan_objmgr_psoc_check_for_peer_leaks(struct wlan_objmgr_psoc *psoc); + +/** +* wlan_objmgr_psoc_get_dual_mac_disable () - get user config +* data for DBS disable +* @psoc: psoc object pointer +* +* Return: Disable or Enable +*/ +static inline uint32_t wlan_objmgr_psoc_get_dual_mac_disable( + struct wlan_objmgr_psoc *psoc) +{ + if (psoc == NULL) + return 0; + return psoc->soc_nif.user_config.dual_mac_feature_disable; +} + +/** +* wlan_objmgr_psoc_get_band_capability () - get user config +* data for band capability +* @psoc: psoc object pointer +* +* Return: band_capability +*/ +static inline uint8_t wlan_objmgr_psoc_get_band_capability( + struct wlan_objmgr_psoc *psoc) +{ + if (psoc == NULL) + return 0; + + return psoc->soc_nif.user_config.band_capability; +} + +/** + * wlan_psoc_set_dp_handle() - set dp handle + * @psoc: psoc object pointer + * @dp_handle: Data path module handle + * + * Return: void + */ +static inline void wlan_psoc_set_dp_handle(struct wlan_objmgr_psoc *psoc, + void *dp_handle) +{ + if (qdf_unlikely(!psoc)) { + QDF_BUG(0); + return; + } + + psoc->dp_handle = dp_handle; +} + +/** + * wlan_psoc_get_dp_handle() - get dp handle + * @psoc: psoc object pointer + * + * Return: dp handle + */ +static inline void *wlan_psoc_get_dp_handle(struct wlan_objmgr_psoc *psoc) +{ + if (qdf_unlikely(!psoc)) { + QDF_BUG(0); + return NULL; + } + + return psoc->dp_handle; +} + +struct wlan_logically_del_peer { + qdf_list_node_t list; + struct wlan_objmgr_peer *peer; +}; + +/** + * wlan_psoc_get_lmac_if_txops() - get lmac if txops for the psoc + * @psoc: psoc object pointer + * + * Return: Pointer to wlan_lmac_if_tx_ops + */ +static inline struct wlan_lmac_if_tx_ops * +wlan_psoc_get_lmac_if_txops(struct wlan_objmgr_psoc *psoc) +{ + return &((psoc->soc_cb.tx_ops)); +} +#endif /* _WLAN_OBJMGR_PSOC_OBJ_H_*/ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/obj_mgr/inc/wlan_objmgr_vdev_obj.h b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/obj_mgr/inc/wlan_objmgr_vdev_obj.h new file mode 100644 index 0000000000000000000000000000000000000000..3822046ca9d71c307b89a16e1842b940bf8701d2 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/obj_mgr/inc/wlan_objmgr_vdev_obj.h @@ -0,0 +1,1541 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: Define the vdev data structure of UMAC + */ + +#ifndef _WLAN_OBJMGR_VDEV_OBJ_H_ +#define _WLAN_OBJMGR_VDEV_OBJ_H_ + +#include "qdf_atomic.h" +#include "qdf_list.h" +#include "qdf_lock.h" +#include "qdf_types.h" +#include "wlan_cmn.h" +#include "wlan_objmgr_cmn.h" +#include "wlan_objmgr_pdev_obj.h" +#include "wlan_objmgr_psoc_obj.h" + + /* CONF: privacy enabled */ +#define WLAN_VDEV_F_PRIVACY 0x00000001 + /* CONF: 11g w/o 11b sta's */ +#define WLAN_VDEV_F_PUREG 0x00000002 + /* CONF: des_bssid is set */ +#define WLAN_VDEV_F_DESBSSID 0x00000004 + /* CONF: bg scan enabled */ +#define WLAN_VDEV_F_BGSCAN 0x00000008 + /* CONF: sw tx retry enabled */ +#define WLAN_VDEV_F_SWRETRY 0x00000010 + /* STATUS: update beacon tim */ +#define WLAN_VDEV_F_TIMUPDATE 0x00000020 + /* CONF: WPA enabled */ +#define WLAN_VDEV_F_WPA1 0x00000040 + /* CONF: WPA2 enabled */ +#define WLAN_VDEV_F_WPA2 0x00000080 + /* CONF: WPA/WPA2 enabled */ +#define WLAN_VDEV_F_WPA 0x000000c0 + /* CONF: drop unencrypted */ +#define WLAN_VDEV_F_DROPUNENC 0x00000100 + /* CONF: TKIP countermeasures */ +#define WLAN_VDEV_F_COUNTERM 0x00000200 + /* CONF: hide SSID in beacon */ /*TODO PDEV/PSOC */ +#define WLAN_VDEV_F_HIDESSID 0x00000400 + /* CONF: disable internal bridge */ /*TODO PDEV/PSOC */ +#define WLAN_VDEV_F_NOBRIDGE 0x00000800 + /* STATUS: update beacon wme */ +#define WLAN_VDEV_F_WMEUPDATE 0x00001000 + /* CONF: 4 addr allowed */ +#define WLAN_VDEV_F_WDS 0x00002000 + /* CONF: enable U-APSD */ +#define WLAN_VDEV_F_UAPSD 0x00004000 + /* STATUS: sleeping */ +#define WLAN_VDEV_F_SLEEP 0x00008000 + /* drop uapsd EOSP frames for test */ +#define WLAN_VDEV_F_EOSPDROP 0x00010000 + /* CONF: A-MPDU supported */ +#define WLAN_VDEV_F_AMPDU 0x00020000 + /* STATE: beacon APP IE updated */ +#define WLAN_VDEV_F_APPIE_UPDATE 0x00040000 + /* CONF: WDS auto Detect/DELBA */ +#define WLAN_VDEV_F_WDS_AUTODETECT 0x00080000 + /* 11b only without 11g stations */ +#define WLAN_VDEV_F_PUREB 0x00100000 + /* disable HT rates */ +#define WLAN_VDEV_F_HTRATES 0x00200000 + /* Extender AP */ +#define WLAN_VDEV_F_AP 0x00400000 + /* CONF: deliver rx frames with 802.11 header */ +#define WLAN_VDEV_F_DELIVER_80211 0x00800000 + /* CONF: os sends down tx frames with 802.11 header */ +#define WLAN_VDEV_F_SEND_80211 0x01000000 + /* CONF: statically configured WDS */ +#define WLAN_VDEV_F_WDS_STATIC 0x02000000 + /* CONF: pure 11n mode */ +#define WLAN_VDEV_F_PURE11N 0x04000000 + /* CONF: pure 11ac mode */ +#define WLAN_VDEV_F_PURE11AC 0x08000000 + /* Basic Rates Update */ +#define WLAN_VDEV_F_BR_UPDATE 0x10000000 + /* CONF: restrict bw ont top of per 11ac/n */ +#define WLAN_VDEV_F_STRICT_BW 0x20000000 + /* Wi-Fi SON mode (with APS) */ +#define WLAN_VDEV_F_SON 0x40000000 + /* Wi-Fi SON mode (with APS) */ +#define WLAN_VDEV_F_MBO 0x80000000 + +/* Feature extension flags */ + /* CONF: MSFT safe mode */ +#define WLAN_VDEV_FEXT_SAFEMODE 0x00000001 + /* if the vap can sleep*/ +#define WLAN_VDEV_FEXT_CANSLEEP 0x00000002 + /* use sw bmiss timer */ +#define WLAN_VDEV_FEXT_SWBMISS 0x00000004 + /* enable beacon copy */ +#define WLAN_VDEV_FEXT_COPY_BEACON 0x00000008 +#define WLAN_VDEV_FEXT_WAPI 0x00000010 + /* 802.11h enabled */ +#define WLAN_VDEV_FEXT_DOTH 0x00000020 + /* if the vap has wds independance set */ +#define WLAN_VDEV_FEXT_VAPIND 0x00000040 + /* QBSS load IE enabled */ +#define WLAN_VDEV_FEXT_BSSLOAD 0x00000080 + /* Short Guard Interval Enable:1 Disable:0 */ +#define WLAN_VDEV_FEXT_SGI 0x00000100 + /* Short Guard Interval Enable:1 Disable:0 for VHT fixed rates */ +#define WLAN_VDEV_FEXT_DATASGI 0x00000200 + /* LDPC Enable Rx:1 TX: 2 ; Disable:0 */ +#define WLAN_VDEV_FEXT_LDPC_TX 0x00000400 +#define WLAN_VDEV_FEXT_LDPC_RX 0x00000800 +#define WLAN_VDEV_FEXT_LDPC 0x00000c00 + /* wme enabled */ +#define WLAN_VDEV_FEXT_WME 0x00001000 + /* WNM Capabilities */ +#define WLAN_VDEV_FEXT_WNM 0x00002000 + /* RRM Capabilities */ +#define WLAN_VDEV_FEXT_RRM 0x00004000 + /* WNM Proxy ARP Capabilities */ +#define WLAN_VDEV_FEXT_PROXYARP 0x00008000 + /* 256 QAM support in 2.4GHz mode Enable:1 Disable:0 */ +#define WLAN_VDEV_FEXT_256QAM 0x00010000 + /* 2.4NG 256 QAM Interop mode Enable:1 Disable:0 */ +#define WLAN_VDEV_FEXT_256QAM_INTEROP 0x00020000 + /* static mimo ps enabled */ +#define WLAN_VDEV_FEXT_STATIC_MIMOPS 0x00040000 + /* dynamic mimo ps enabled */ +#define WLAN_VDEV_FEXT_DYN_MIMOPS 0x00080000 + /* Country IE enabled */ +#define WLAN_VDEV_FEXT_CNTRY_IE 0x00100000 + /*does not want to trigger multi channel operation + instead follow master vaps channel (for AP/GO Vaps) */ +#define WLAN_VDEV_FEXT_NO_MULCHAN 0x00200000 + /*non-beaconing AP VAP*/ +#define WLAN_VDEV_FEXT_NON_BEACON 0x00400000 + /* SPL repeater enabled for SON*/ +#define WLAN_VDEV_FEXT_SON_SPL_RPT 0x00800000 + /* SON IE update in MGMT frame */ +#define WLAN_VDEV_FEXT_SON_INFO_UPDATE 0x01000000 + /* CONF: A-MSDU supported */ +#define WLAN_VDEV_FEXT_AMSDU 0x02000000 + +/* VDEV OP flags */ + /* if the vap destroyed by user */ +#define WLAN_VDEV_OP_DELETE_PROGRESS 0x00000001 + /* set to enable sta-fws fweature */ +#define WLAN_VDEV_OP_STAFWD 0x00000002 + /* Off-channel support enabled */ +#define WLAN_VDEV_OP_OFFCHAN 0x00000004 + /* if the vap has erp update set */ +#define WLAN_VDEV_OP_ERPUPDATE 0x00000008 + /* this vap needs scheduler for off channel operation */ +#define WLAN_VDEV_OP_NEEDS_SCHED 0x00000010 + /*STA in forced sleep set PS bit for all outgoing frames */ +#define WLAN_VDEV_OP_FORCED_SLEEP 0x00000020 + /* update bssload IE in beacon */ +#define WLAN_VDEV_OP_BSSLOAD_UPDATE 0x00000040 + /* Hotspot 2.0 DGAF Disable bit */ +#define WLAN_VDEV_OP_DGAF_DISABLE 0x00000080 + /* STA SmartNet enabled */ +#define WLAN_VDEV_OP_SMARTNET_EN 0x00000100 + /* SoftAP to reject resuming in DFS channels */ +#define WLAN_VDEV_OP_REJ_DFS_CHAN 0x00000200 + /* Trigger mlme response */ +#define WLAN_VDEV_OP_TRIGGER_MLME_RESP 0x00000400 + /* test flag for MFP */ +#define WLAN_VDEV_OP_MFP_TEST 0x00000800 + /* flag to indicate using default ratemask */ +#define WLAN_VDEV_OP_DEF_RATEMASK 0x00001000 +/*For wakeup AP VAP when wds-sta connect to the AP only use when + export (UMAC_REPEATER_DELAYED_BRINGUP || DBDC_REPEATER_SUPPORT)=1*/ +#define WLAN_VDEV_OP_KEYFLAG 0x00002000 + /* if performe the iwlist scanning */ +#define WLAN_VDEV_OP_LIST_SCANNING 0x00004000 + /*Set when VAP down*/ +#define WLAN_VDEV_OP_IS_DOWN 0x00008000 + /* if vap may require acs when another vap is brought down */ +#define WLAN_VDEV_OP_NEEDS_UP_ACS 0x00010000 + /* Block data traffic tx for this vap */ +#define WLAN_VDEV_OP_BLOCK_TX_TRAFFIC 0x00020000 + /* for mbo functionality */ +#define WLAN_VDEV_OP_MBO 0x00040000 + + /* CAPABILITY: IBSS available */ +#define WLAN_VDEV_C_IBSS 0x00000001 +/* CAPABILITY: HOSTAP avail */ +#define WLAN_VDEV_C_HOSTAP 0x00000002 + /* CAPABILITY: Old Adhoc Demo */ +#define WLAN_VDEV_C_AHDEMO 0x00000004 + /* CAPABILITY: sw tx retry */ +#define WLAN_VDEV_C_SWRETRY 0x00000008 + /* CAPABILITY: monitor mode */ +#define WLAN_VDEV_C_MONITOR 0x00000010 + /* CAPABILITY: TKIP MIC avail */ +#define WLAN_VDEV_C_TKIPMIC 0x00000020 + /* CAPABILITY: 4-addr support */ +#define WLAN_VDEV_C_WDS 0x00000040 + /* CAPABILITY: TKIP MIC for QoS frame */ +#define WLAN_VDEV_C_WME_TKIPMIC 0x00000080 + /* CAPABILITY: bg scanning */ +#define WLAN_VDEV_C_BGSCAN 0x00000100 + /* CAPABILITY: Restrict offchannel */ +#define WLAN_VDEV_C_RESTRICT_OFFCHAN 0x00000200 + +/* Invalid VDEV identifier */ +#define WLAN_INVALID_VDEV_ID 255 + +/** + * enum wlan_vdev_state - VDEV state + * @WLAN_VDEV_S_INIT: Default state, IDLE state + * @WLAN_VDEV_S_SCAN: SCAN state + * @WLAN_VDEV_S_JOIN: Join state + * @WLAN_VDEV_S_DFS_WAIT:CAC period + * @WLAN_VDEV_S_RUN: RUN state + * @WLAN_VDEV_S_STOP: STOP state + * @WLAN_VDEV_S_RESET: RESET state, STOP+INIT+JOIN + * @WLAN_VDEV_S_MAX: MAX state + */ +enum wlan_vdev_state { + WLAN_VDEV_S_INIT = 0, + WLAN_VDEV_S_SCAN = 1, + WLAN_VDEV_S_JOIN = 2, + WLAN_VDEV_S_DFS_WAIT = 3, + WLAN_VDEV_S_RUN = 4, + WLAN_VDEV_S_STOP = 5, + WLAN_VDEV_S_RESET = 6, + WLAN_VDEV_S_MAX, +}; + +/** + * struct wlan_vdev_create_params - Create params, HDD/OSIF passes this + * structure While creating VDEV + * @opmode: Opmode of VDEV + * @flags: create flags + * @osifp: OS structure + * @macaddr[]: MAC address + * @mataddr[]: MAT address + */ +struct wlan_vdev_create_params { + enum QDF_OPMODE opmode; + uint32_t flags; + struct vdev_osif_priv *osifp; + uint8_t macaddr[QDF_MAC_ADDR_SIZE]; + uint8_t mataddr[QDF_MAC_ADDR_SIZE]; +}; + +/** + * struct wlan_channel - channel structure + * @ch_freq: Channel in Mhz. + * @ch_ieee: IEEE channel number. + * @ch_flags: Channel flags. + * @ch_flagext: Channel extension flags. + * @ch_maxpower: Maximum tx power in dBm. + * @ch_freq_seg1: Channel Center frequeny for VHT80/160 and HE80/160. + * @ch_freq_seg2: Second channel Center frequency applicable for 80+80MHz mode. + * @ch_width: Channel width. + * @ch_phymode: Channel phymode. + */ +struct wlan_channel { + uint16_t ch_freq; + uint8_t ch_ieee; + uint64_t ch_flags; + uint16_t ch_flagext; + int8_t ch_maxpower; + uint8_t ch_freq_seg1; + uint8_t ch_freq_seg2; + enum wlan_phy_ch_width ch_width; + enum wlan_phymode ch_phymode; +}; + +/** + * struct wlan_objmgr_vdev_mlme - VDEV MLME specific sub structure + * @vdev_opmode: Opmode of VDEV + * @mlme_state: VDEV state + * @bss_chan: BSS channel + * @des_chan: Desired channel, for STA Desired may not be used + * @nss: Num. Spatial streams + * @tx_chainmask: Tx Chainmask + * @rx_chainmask: Rx Chainmask + * @tx_power: Tx power + * @vdev_caps: VDEV capabilities + * @vdev_feat_caps: VDEV feature caps + * @vdev_feat_ext_caps: VDEV Extended feature caps + * @max_rate: MAX rate + * @tx_mgmt_rate: TX Mgmt. Rate + * @vdev_op_flags: Operation flags + * @mataddr[]: MAT address + * @macaddr[]: VDEV self MAC address + * @ssid[]: SSID + * @ssid_len: SSID length + */ +struct wlan_objmgr_vdev_mlme { + enum QDF_OPMODE vdev_opmode; + enum wlan_vdev_state mlme_state; + struct wlan_channel *bss_chan; /* Define wlan_channel */ + struct wlan_channel *des_chan; /*TODO ??? */ + uint8_t nss; + uint8_t tx_chainmask; + uint8_t rx_chainmask; + uint8_t tx_power; + uint32_t vdev_caps; + uint32_t vdev_feat_caps; + uint32_t vdev_feat_ext_caps; + uint32_t max_rate; + uint32_t tx_mgmt_rate; + uint32_t vdev_op_flags; + uint8_t mataddr[QDF_MAC_ADDR_SIZE]; + uint8_t macaddr[QDF_MAC_ADDR_SIZE]; + char ssid[WLAN_SSID_MAX_LEN+1]; + uint8_t ssid_len; +}; + +/** + * struct wlan_objmgr_vdev_nif - VDEV HDD specific sub structure + * @osdev: OS specific pointer + */ +struct wlan_objmgr_vdev_nif { + struct vdev_osif_priv *osdev; +}; + +/** + * struct wlan_objmgr_vdev_objmgr - vdev object manager sub structure + * @vdev_id: VDEV id + * @print_cnt: Count to throttle Logical delete prints + * @self_peer: Self PEER + * @bss_peer: BSS PEER + * @wlan_peer_list: PEER list + * @wlan_pdev: PDEV pointer + * @wlan_peer_count: Peer count + * @max_peer_count: Max Peer count + * @c_flags: creation specific flags + * @ref_cnt: Ref count + * @ref_id_dbg: Array to track Ref count + */ +struct wlan_objmgr_vdev_objmgr { + uint8_t vdev_id; + uint8_t print_cnt; + struct wlan_objmgr_peer *self_peer; + struct wlan_objmgr_peer *bss_peer; + qdf_list_t wlan_peer_list; + struct wlan_objmgr_pdev *wlan_pdev; + uint16_t wlan_peer_count; + uint16_t max_peer_count; + uint32_t c_flags; + qdf_atomic_t ref_cnt; + qdf_atomic_t ref_id_dbg[WLAN_REF_ID_MAX]; +}; + +/** + * struct wlan_objmgr_vdev - VDEV common object + * @vdev_node: qdf list of pdev's vdev list + * @vdev_mlme: VDEV MLME substructure + * @vdev_objmgr: VDEV Object Mgr substructure + * @vdev_nif: VDEV HDD substructure + * @vdev_comp_priv_obj[]:Component's private objects list + * @obj_status[]: Component object status + * @obj_state: VDEV object state + * @dp_handle: DP module handle + * @vdev_lock: VDEV lock + */ +struct wlan_objmgr_vdev { + qdf_list_node_t vdev_node; + struct wlan_objmgr_vdev_mlme vdev_mlme; + struct wlan_objmgr_vdev_objmgr vdev_objmgr; + struct wlan_objmgr_vdev_nif vdev_nif; + void *vdev_comp_priv_obj[WLAN_UMAC_MAX_COMPONENTS]; + QDF_STATUS obj_status[WLAN_UMAC_MAX_COMPONENTS]; + WLAN_OBJ_STATE obj_state; + void *dp_handle; + qdf_spinlock_t vdev_lock; +}; + +/** + ** APIs to Create/Delete Global object APIs + */ +/** + * wlan_objmgr_vdev_obj_create() - vdev object create + * @pdev: PDEV object on which this vdev gets created + * @params: VDEV create params from HDD + * + * Creates vdev object, intializes with default values + * Attaches to psoc and pdev objects + * Invokes the registered notifiers to create component object + * + * Return: Handle to struct wlan_objmgr_vdev on successful creation, + * NULL on Failure (on Mem alloc failure and Component objects + * Failure) + */ +struct wlan_objmgr_vdev *wlan_objmgr_vdev_obj_create( + struct wlan_objmgr_pdev *pdev, + struct wlan_vdev_create_params *params); + +/** + * wlan_objmgr_vdev_obj_delete() - vdev object delete + * @vdev: vdev object + * + * Logically deletes VDEV object, + * Once all the references are released, object manager invokes the registered + * notifiers to destroy component objects + * + * Return: SUCCESS/FAILURE + */ +QDF_STATUS wlan_objmgr_vdev_obj_delete(struct wlan_objmgr_vdev *vdev); + +/** + ** APIs to attach/detach component objects + */ +/** + * wlan_objmgr_vdev_component_obj_attach() - vdev comp object attach + * @vdev: VDEV object + * @id: Component id + * @comp_priv_obj: component's private object pointer + * @status: Component's private object creation status + * + * API to be used for attaching component object with VDEV common object + * + * Return: SUCCESS on successful storing of component's object in common object + * On FAILURE (appropriate failure codes are returned) + */ +QDF_STATUS wlan_objmgr_vdev_component_obj_attach( + struct wlan_objmgr_vdev *vdev, + enum wlan_umac_comp_id id, + void *comp_priv_obj, + QDF_STATUS status); + +/** + * wlan_objmgr_vdev_component_obj_detach() - vdev comp object detach + * @vdev: VDEV object + * @id: Component id + * @comp_priv_obj: component's private object pointer + * + * API to be used for detaching component object with VDEV common object + * + * Return: SUCCESS on successful removal of component's object from common + * object + * On FAILURE (appropriate failure codes are returned) + */ +QDF_STATUS wlan_objmgr_vdev_component_obj_detach( + struct wlan_objmgr_vdev *vdev, + enum wlan_umac_comp_id id, + void *comp_priv_obj); +/* + ** APIs to operations on vdev objects +*/ + +typedef void (*wlan_objmgr_vdev_op_handler)(struct wlan_objmgr_vdev *vdev, + void *object, + void *arg); + +/** + * wlan_objmgr_iterate_peerobj_list() - iterate vdev's peer list + * @vdev: vdev object + * @handler: the handler will be called for each object of requested type + * the handler should be implemented to perform required operation + * @arg: agruments passed by caller + * @dbg_id: id of the caller + * + * API to be used for performing the operations on all PEER objects + * of vdev + * + * Return: SUCCESS/FAILURE + */ +QDF_STATUS wlan_objmgr_iterate_peerobj_list( + struct wlan_objmgr_vdev *vdev, + wlan_objmgr_vdev_op_handler handler, + void *arg, wlan_objmgr_ref_dbgid dbg_id); + +/** + * wlan_objmgr_trigger_vdev_comp_priv_object_creation() - vdev + * comp object creation + * @vdev: VDEV object + * @id: Component id + * + * API to create component private object in run time, this would + * be used for features which gets enabled in run time + * + * Return: SUCCESS on successful creation + * On FAILURE (appropriate failure codes are returned) + */ +QDF_STATUS wlan_objmgr_trigger_vdev_comp_priv_object_creation( + struct wlan_objmgr_vdev *vdev, + enum wlan_umac_comp_id id); + +/** + * wlan_objmgr_trigger_vdev_comp_priv_object_deletion() - vdev comp + * object deletion + * @vdev: VDEV object + * @id: Component id + * + * API to destroy component private object in run time, this would + * be used for features which gets disabled in run time + * + * Return: SUCCESS on successful deletion + * On FAILURE (appropriate failure codes are returned) + */ +QDF_STATUS wlan_objmgr_trigger_vdev_comp_priv_object_deletion( + struct wlan_objmgr_vdev *vdev, + enum wlan_umac_comp_id id); + +/** + * wlan_objmgr_vdev_get_comp_private_obj() - get vdev component private object + * @vdev: VDEV object + * @id: Component id + * + * API to get component private object + * + * Return: void *ptr on SUCCESS + * NULL on Failure + */ +void *wlan_objmgr_vdev_get_comp_private_obj( + struct wlan_objmgr_vdev *vdev, + enum wlan_umac_comp_id id); + +/* Util APIs */ + +/** + * wlan_vdev_get_pdev() - get pdev + * @vdev: VDEV object + * + * API to get pdev object pointer from vdev + * + * Return: pdev object pointer + */ +static inline struct wlan_objmgr_pdev *wlan_vdev_get_pdev( + struct wlan_objmgr_vdev *vdev) +{ + return vdev->vdev_objmgr.wlan_pdev; +} + +/** + * wlan_pdev_vdev_list_peek_head() - get first vdev from pdev list + * @peer_list: qdf_list_t + * + * API to get the head vdev of given vdev (of pdev's vdev list) + * + * Caller need to acquire lock with wlan_vdev_obj_lock() + * + * Return: + * @peer: head peer + */ +static inline struct wlan_objmgr_vdev *wlan_pdev_vdev_list_peek_head( + qdf_list_t *vdev_list) +{ + struct wlan_objmgr_vdev *vdev; + qdf_list_node_t *vdev_node = NULL; + + /* This API is invoked with lock acquired, do not add log prints */ + if (qdf_list_peek_front(vdev_list, &vdev_node) != QDF_STATUS_SUCCESS) + return NULL; + + vdev = qdf_container_of(vdev_node, struct wlan_objmgr_vdev, vdev_node); + return vdev; +} + +/** + * wlan_pdev_vdev_list_peek_active_head() - get first active vdev from pdev list + * @vdev: VDEV object + * @vdev_list: qdf_list_t + * @dbg_id: id of the caller + * + * API to get the head active vdev of given vdev (of pdev's vdev list) + * + * Return: + * @peer: head peer + */ +struct wlan_objmgr_vdev *wlan_pdev_vdev_list_peek_active_head( + struct wlan_objmgr_pdev *pdev, + qdf_list_t *vdev_list, + wlan_objmgr_ref_dbgid dbg_id); + +/** + * wlan_vdev_get_next_vdev_of_pdev() - get next vdev + * @vdev: VDEV object + * + * API to get next vdev object pointer of vdev + * + * Caller need to acquire lock with wlan_vdev_obj_lock() + * + * Return: + * @vdev_next: VDEV object + */ +static inline struct wlan_objmgr_vdev *wlan_vdev_get_next_vdev_of_pdev( + qdf_list_t *vdev_list, + struct wlan_objmgr_vdev *vdev) +{ + struct wlan_objmgr_vdev *vdev_next; + qdf_list_node_t *node = &vdev->vdev_node; + qdf_list_node_t *next_node = NULL; + + /* This API is invoked with lock acquired, do not add log prints */ + if (node == NULL) + return NULL; + + if (qdf_list_peek_next(vdev_list, node, &next_node) != + QDF_STATUS_SUCCESS) + return NULL; + + vdev_next = qdf_container_of(next_node, struct wlan_objmgr_vdev, + vdev_node); + return vdev_next; +} + +/** + * wlan_vdev_get_next_active_vdev_of_pdev() - get next active vdev + * @pdev: PDEV object + * @vdev_list: qdf_list_t + * @vdev: VDEV object + * @dbg_id: id of the caller + * + * API to get next active vdev object pointer of vdev + * + * Return: + * @vdev_next: VDEV object + */ +struct wlan_objmgr_vdev *wlan_vdev_get_next_active_vdev_of_pdev( + struct wlan_objmgr_pdev *pdev, + qdf_list_t *vdev_list, + struct wlan_objmgr_vdev *vdev, + wlan_objmgr_ref_dbgid dbg_id); + + +/** + * wlan_vdev_set_pdev() - set pdev + * @vdev: VDEV object + * @pdev: PDEV object + * + * API to get pdev object pointer from vdev + * + * Caller need to acquire lock with wlan_vdev_obj_lock() + * + * Return: void + */ +static inline void wlan_vdev_set_pdev(struct wlan_objmgr_vdev *vdev, + struct wlan_objmgr_pdev *pdev) +{ + /* This API is invoked with lock acquired, do not add log prints */ + vdev->vdev_objmgr.wlan_pdev = pdev; +} + +/** + * wlan_vdev_get_psoc() - get psoc + * @vdev: VDEV object + * + * API to get pdev object pointer from vdev + * + * Return: psoc object pointer + */ +static inline struct wlan_objmgr_psoc *wlan_vdev_get_psoc( + struct wlan_objmgr_vdev *vdev) +{ + struct wlan_objmgr_pdev *pdev; + struct wlan_objmgr_psoc *psoc = NULL; + + pdev = wlan_vdev_get_pdev(vdev); + if (pdev == NULL) + return NULL; + + psoc = wlan_pdev_get_psoc(pdev); + + return psoc; +} + +/** + * wlan_vdev_mlme_set_opmode() - set vdev opmode + * @vdev: VDEV object + * @mode: VDEV op mode + * + * API to set opmode in vdev object + * + * Return: void + */ +static inline void wlan_vdev_mlme_set_opmode(struct wlan_objmgr_vdev *vdev, + enum QDF_OPMODE mode) +{ + vdev->vdev_mlme.vdev_opmode = mode; +} + +/** + * wlan_vdev_mlme_get_opmode() - get vdev opmode + * @vdev: VDEV object + * + * API to set opmode of vdev object + * + * Return: + * @mode: VDEV op mode + */ +static inline enum QDF_OPMODE wlan_vdev_mlme_get_opmode( + struct wlan_objmgr_vdev *vdev) +{ + return vdev->vdev_mlme.vdev_opmode; +} + +/** + * wlan_vdev_mlme_set_macaddr() - set vdev macaddr + * @vdev: VDEV object + * @macaddr: MAC address + * + * API to set macaddr in vdev object + * + * Caller need to acquire lock with wlan_vdev_obj_lock() + * + * Return: void + */ +static inline void wlan_vdev_mlme_set_macaddr(struct wlan_objmgr_vdev *vdev, + uint8_t *macaddr) +{ + /* This API is invoked with lock acquired, do not add log prints */ + WLAN_ADDR_COPY(vdev->vdev_mlme.macaddr, macaddr); +} + +/** + * wlan_vdev_mlme_get_macaddr() - get vdev macaddr + * @vdev: VDEV object + * + * API to get MAC address from vdev object + * + * Caller need to acquire lock with wlan_vdev_obj_lock() + * + * Return: + * @macaddr: MAC address + */ +static inline uint8_t *wlan_vdev_mlme_get_macaddr(struct wlan_objmgr_vdev *vdev) +{ + /* This API is invoked with lock acquired, do not add log prints */ + return vdev->vdev_mlme.macaddr; +} + +/** + * wlan_vdev_mlme_set_mataddr() - set vdev mataddr + * @vdev: VDEV object + * @mataddr: MAT address + * + * API to set mataddr in vdev object + * + * Caller need to acquire lock with wlan_vdev_obj_lock() + * + * Return: void + */ +static inline void wlan_vdev_mlme_set_mataddr(struct wlan_objmgr_vdev *vdev, + uint8_t *mataddr) +{ + /* This API is invoked with lock acquired, do not add log prints */ + WLAN_ADDR_COPY(vdev->vdev_mlme.mataddr, mataddr); +} + +/** + * wlan_vdev_mlme_get_mataddr() - get mataddr + * @vdev: VDEV object + * + * API to get MAT address from vdev object + * + * Caller need to acquire lock with wlan_vdev_obj_lock() + * + * Return: + * @mataddr: MAT address + */ +static inline uint8_t *wlan_vdev_mlme_get_mataddr(struct wlan_objmgr_vdev *vdev) +{ + /* This API is invoked with lock acquired, do not add log prints */ + return vdev->vdev_mlme.mataddr; +} + +/** + * wlan_vdev_get_id() - get vdev id + * @vdev: VDEV object + * + * API to get vdev id + * + * Return: + * @id: vdev id + */ +static inline uint8_t wlan_vdev_get_id(struct wlan_objmgr_vdev *vdev) +{ + return vdev->vdev_objmgr.vdev_id; +} + +/** + * wlan_vdev_get_hw_macaddr() - get hw macaddr + * @vdev: VDEV object + * + * API to retrieve the HW MAC address from PDEV + * + * Caller need to acquire lock with wlan_vdev_obj_lock() + * + * Return: + * @macaddr: HW MAC address + */ +static inline uint8_t *wlan_vdev_get_hw_macaddr(struct wlan_objmgr_vdev *vdev) +{ + struct wlan_objmgr_pdev *pdev = wlan_vdev_get_pdev(vdev); + + /* This API is invoked with lock acquired, do not add log prints */ + if (pdev != NULL) + return wlan_pdev_get_hw_macaddr(pdev); + else + return NULL; +} + +/** + * wlan_vdev_mlme_set_ssid() - set ssid + * @vdev: VDEV object + * @ssid: SSID (input) + * @ssid_len: Length of SSID + * + * API to set the SSID of VDEV + * + * Caller need to acquire lock with wlan_vdev_obj_lock() + * + * Return: SUCCESS, if update is done + * FAILURE, if ssid length is > max ssid len + */ +static inline QDF_STATUS wlan_vdev_mlme_set_ssid( + struct wlan_objmgr_vdev *vdev, + const uint8_t *ssid, uint8_t ssid_len) +{ + /* This API is invoked with lock acquired, do not add log prints */ + if (ssid_len <= WLAN_SSID_MAX_LEN) { + qdf_mem_copy(vdev->vdev_mlme.ssid, ssid, ssid_len); + vdev->vdev_mlme.ssid_len = ssid_len; + } else { + vdev->vdev_mlme.ssid_len = 0; + return QDF_STATUS_E_FAILURE; + } + return QDF_STATUS_SUCCESS; +} + +/** + * wlan_vdev_mlme_get_ssid() - get ssid + * @vdev: VDEV object + * @ssid: SSID + * @ssid_len: Length of SSID + * + * API to get the SSID of VDEV, it updates the SSID and its length + * in @ssid, @ssid_len respectively + * + * Caller need to acquire lock with wlan_vdev_obj_lock() + * + * Return: SUCCESS, if update is done + * FAILURE, if ssid length is > max ssid len + */ +static inline QDF_STATUS wlan_vdev_mlme_get_ssid( + struct wlan_objmgr_vdev *vdev, + uint8_t *ssid, uint8_t *ssid_len) +{ + /* This API is invoked with lock acquired, do not add log prints */ + if (vdev->vdev_mlme.ssid_len > 0) { + *ssid_len = vdev->vdev_mlme.ssid_len; + qdf_mem_copy(ssid, vdev->vdev_mlme.ssid, *ssid_len); + } else { + *ssid_len = 0; + return QDF_STATUS_E_FAILURE; + } + return QDF_STATUS_SUCCESS; +} + +/** + * wlan_vdev_obj_lock() - Acquire VDEV spinlock + * @vdev: VDEV object + * + * API to acquire VDEV lock + * Parent lock should not be taken in child lock context + * but child lock can be taken in parent lock context + * (for ex: psoc lock can't be invoked in pdev/vdev/peer lock context) + * + * Return: void + */ +static inline void wlan_vdev_obj_lock(struct wlan_objmgr_vdev *vdev) +{ + qdf_spin_lock_bh(&vdev->vdev_lock); +} + +/** + * wlan_vdev_obj_unlock() - Release VDEV spinlock + * @vdev: VDEV object + * + * API to Release VDEV lock + * + * Return: void + */ +static inline void wlan_vdev_obj_unlock(struct wlan_objmgr_vdev *vdev) +{ + qdf_spin_unlock_bh(&vdev->vdev_lock); +} + +/** + * wlan_vdev_mlme_set_bss_chan() - set bss chan + * @vdev: VDEV object + * @bss_chan: Channel + * + * API to set the BSS channel + * + * Return: void + */ +static inline void wlan_vdev_mlme_set_bss_chan(struct wlan_objmgr_vdev *vdev, + struct wlan_channel *bss_chan) +{ + vdev->vdev_mlme.bss_chan = bss_chan; +} + +/** + * wlan_vdev_mlme_get_bss_chan() - get bss chan + * @vdev: VDEV object + * + * API to get the BSS channel + * + * Return: + * @bss_chan: Channel + */ +static inline struct wlan_channel *wlan_vdev_mlme_get_bss_chan( + struct wlan_objmgr_vdev *vdev) +{ + return vdev->vdev_mlme.bss_chan; +} + +/** + * wlan_vdev_mlme_set_des_chan() - set desired chan + * @vdev: VDEV object + * @des_chan: Channel configured by user + * + * API to set the desired channel + * + * Return: void + */ +static inline void wlan_vdev_mlme_set_des_chan(struct wlan_objmgr_vdev *vdev, + struct wlan_channel *des_chan) +{ + vdev->vdev_mlme.des_chan = des_chan; +} + +/** + * wlan_vdev_mlme_get_des_chan() - get desired chan + * @vdev: VDEV object + * + * API to get the desired channel + * + * Return: + * @des_chan: Channel configured by user + */ +static inline struct wlan_channel *wlan_vdev_mlme_get_des_chan( + struct wlan_objmgr_vdev *vdev) +{ + return vdev->vdev_mlme.des_chan; +} + +/** + * wlan_vdev_mlme_set_nss() - set NSS + * @vdev: VDEV object + * @nss: nss configured by user + * + * API to set the Number of Spatial streams + * + * Return: void + */ +static inline void wlan_vdev_mlme_set_nss(struct wlan_objmgr_vdev *vdev, + uint8_t nss) +{ + vdev->vdev_mlme.nss = nss; +} + +/** + * wlan_vdev_mlme_get_nss() - get NSS + * @vdev: VDEV object + * + * API to get the Number of Spatial Streams + * + * Return: + * @nss: nss value + */ +static inline uint8_t wlan_vdev_mlme_get_nss( + struct wlan_objmgr_vdev *vdev) +{ + return vdev->vdev_mlme.nss; +} + +/** + * wlan_vdev_mlme_set_txchainmask() - set Tx chainmask + * @vdev: VDEV object + * @chainmask : chainmask either configured by user or max supported + * + * API to set the Tx chainmask + * + * Return: void + */ +static inline void wlan_vdev_mlme_set_txchainmask(struct wlan_objmgr_vdev *vdev, + uint8_t chainmask) +{ + vdev->vdev_mlme.tx_chainmask = chainmask; +} + +/** + * wlan_vdev_mlme_get_txchainmask() - get Tx chainmask + * @vdev: VDEV object + * + * API to get the Tx chainmask + * + * Return: + * @chainmask : Tx chainmask either configured by user or max supported + */ +static inline uint8_t wlan_vdev_mlme_get_txchainmask( + struct wlan_objmgr_vdev *vdev) +{ + return vdev->vdev_mlme.tx_chainmask; +} + +/** + * wlan_vdev_mlme_set_rxchainmask() - set Rx chainmask + * @vdev: VDEV object + * @chainmask : Rx chainmask either configured by user or max supported + * + * API to set the Rx chainmask + * + * Return: void + */ +static inline void wlan_vdev_mlme_set_rxchainmask(struct wlan_objmgr_vdev *vdev, + uint8_t chainmask) +{ + vdev->vdev_mlme.rx_chainmask = chainmask; +} + +/** + * wlan_vdev_mlme_get_rxchainmask() - get Rx chainmask + * @vdev: VDEV object + * + * API to get the Rx chainmask + * + * Return: + * @chainmask : Rx chainmask either configured by user or max supported + */ +static inline uint8_t wlan_vdev_mlme_get_rxchainmask( + struct wlan_objmgr_vdev *vdev) +{ + /* This API is invoked with lock acquired, do not add log prints */ + return vdev->vdev_mlme.rx_chainmask; +} + +/** + * wlan_vdev_mlme_set_txpower() - set tx power + * @vdev: VDEV object + * @txpow: tx power either configured by used or max allowed + * + * API to set the tx power + * + * Return: void + */ +static inline void wlan_vdev_mlme_set_txpower(struct wlan_objmgr_vdev *vdev, + uint8_t txpow) +{ + vdev->vdev_mlme.tx_power = txpow; +} + +/** + * wlan_vdev_mlme_get_txpower() - get tx power + * @vdev: VDEV object + * + * API to get the tx power + * + * Return: + * @txpow: tx power either configured by used or max allowed + */ +static inline uint8_t wlan_vdev_mlme_get_txpower( + struct wlan_objmgr_vdev *vdev) +{ + return vdev->vdev_mlme.tx_power; +} + +/** + * wlan_vdev_mlme_set_maxrate() - set max rate + * @vdev: VDEV object + * @maxrate: configured by used or based on configured mode + * + * API to set the max rate the vdev supports + * + * Return: void + */ +static inline void wlan_vdev_mlme_set_maxrate(struct wlan_objmgr_vdev *vdev, + uint32_t maxrate) +{ + vdev->vdev_mlme.max_rate = maxrate; +} + +/** + * wlan_vdev_mlme_get_maxrate() - get max rate + * @vdev: VDEV object + * + * API to get the max rate the vdev supports + * + * Return: + * @maxrate: configured by used or based on configured mode + */ +static inline uint32_t wlan_vdev_mlme_get_maxrate( + struct wlan_objmgr_vdev *vdev) +{ + return vdev->vdev_mlme.max_rate; +} + +/** + * wlan_vdev_mlme_set_txmgmtrate() - set txmgmtrate + * @vdev: VDEV object + * @txmgmtrate: Tx Mgmt rate + * + * API to set Mgmt Tx rate + * + * Return: void + */ +static inline void wlan_vdev_mlme_set_txmgmtrate(struct wlan_objmgr_vdev *vdev, + uint32_t txmgmtrate) +{ + vdev->vdev_mlme.tx_mgmt_rate = txmgmtrate; +} + +/** + * wlan_vdev_mlme_get_txmgmtrate() - get txmgmtrate + * @vdev: VDEV object + * + * API to get Mgmt Tx rate + * + * Return: + * @txmgmtrate: Tx Mgmt rate + */ +static inline uint32_t wlan_vdev_mlme_get_txmgmtrate( + struct wlan_objmgr_vdev *vdev) +{ + return vdev->vdev_mlme.tx_mgmt_rate; +} + +/** + * wlan_vdev_mlme_feat_cap_set() - set feature caps + * @vdev: VDEV object + * @cap: capabilities to be set + * + * API to set MLME feature capabilities + * + * Return: void + */ +static inline void wlan_vdev_mlme_feat_cap_set(struct wlan_objmgr_vdev *vdev, + uint32_t cap) +{ + vdev->vdev_mlme.vdev_feat_caps |= cap; +} + +/** + * wlan_vdev_mlme_feat_cap_clear() - clear feature caps + * @vdev: VDEV object + * @cap: capabilities to be cleared + * + * API to clear MLME feature capabilities + * + * Return: void + */ +static inline void wlan_vdev_mlme_feat_cap_clear(struct wlan_objmgr_vdev *vdev, + uint32_t cap) +{ + vdev->vdev_mlme.vdev_feat_caps &= ~cap; +} + +/** + * wlan_vdev_mlme_feat_cap_get() - get feature caps + * @vdev: VDEV object + * @cap: capabilities to be checked + * + * API to know MLME feature capability is set or not + * + * Return: 1 -- if capabilities set + * 0 -- if capabilities clear + */ +static inline uint8_t wlan_vdev_mlme_feat_cap_get(struct wlan_objmgr_vdev *vdev, + uint32_t cap) +{ + return (vdev->vdev_mlme.vdev_feat_caps & cap) ? 1 : 0; +} + +/** + * wlan_vdev_mlme_feat_ext_cap_set() - set ext feature caps + * @vdev: VDEV object + * @cap: capabilities to be set + * + * API to set the MLME extensive feature capabilities + * + * Return: void + */ +static inline void wlan_vdev_mlme_feat_ext_cap_set( + struct wlan_objmgr_vdev *vdev, + uint32_t cap) +{ + vdev->vdev_mlme.vdev_feat_ext_caps |= cap; +} + +/** + * wlan_vdev_mlme_feat_ext_cap_clear() - clear ext feature caps + * @vdev: VDEV object + * @cap: capabilities to be cleared + * + * API to clear the MLME extensive feature capabilities + * + * Return: void + */ +static inline void wlan_vdev_mlme_feat_ext_cap_clear( + struct wlan_objmgr_vdev *vdev, + uint32_t cap) +{ + vdev->vdev_mlme.vdev_feat_ext_caps &= ~cap; +} + +/** + * wlan_vdev_mlme_feat_ext_cap_get() - get feature ext caps + * @vdev: VDEV object + * @cap: capabilities to be checked + * + * API to know MLME ext feature capability is set or not + * + * Return: 1 -- if capabilities set + * 0 -- if capabilities clear + */ +static inline uint8_t wlan_vdev_mlme_feat_ext_cap_get( + struct wlan_objmgr_vdev *vdev, + uint32_t cap) +{ + return (vdev->vdev_mlme.vdev_feat_ext_caps & cap) ? 1 : 0; +} + +/** + * wlan_vdev_mlme_cap_set() - mlme caps set + * @vdev: VDEV object + * @cap: capabilities to be set + * + * API to set the MLME capabilities + * + * Return: void + */ +static inline void wlan_vdev_mlme_cap_set(struct wlan_objmgr_vdev *vdev, + uint32_t cap) +{ + vdev->vdev_mlme.vdev_caps |= cap; +} + +/** + * wlan_vdev_mlme_cap_clear() - mlme caps clear + * @vdev: VDEV object + * @cap: capabilities to be cleared + * + * API to clear the MLME capabilities + * + * Return: void + */ +static inline void wlan_vdev_mlme_cap_clear(struct wlan_objmgr_vdev *vdev, + uint32_t cap) +{ + vdev->vdev_mlme.vdev_caps &= ~cap; +} + +/** + * wlan_vdev_mlme_cap_get() - get mlme caps + * @vdev: VDEV object + * @cap: capabilities to be checked + * + * API to know MLME capability is set or not + * + * Return: 1 -- if capabilities set + * 0 -- if capabilities clear + */ +static inline uint8_t wlan_vdev_mlme_cap_get(struct wlan_objmgr_vdev *vdev, + uint32_t cap) +{ + return (vdev->vdev_mlme.vdev_caps & cap) ? 1 : 0; +} + +/** + * wlan_vdev_mlme_get_state() - get mlme state + * @vdev: VDEV object + * + * API to get MLME state + * + * Return: state of MLME + */ +static inline enum wlan_vdev_state wlan_vdev_mlme_get_state( + struct wlan_objmgr_vdev *vdev) +{ + return vdev->vdev_mlme.mlme_state; +} + +/** + * wlan_vdev_mlme_set_state() - set mlme state + * @vdev: VDEV object + * @state: MLME state + * + * API to set MLME state + * + * Return: void + */ +static inline void wlan_vdev_mlme_set_state(struct wlan_objmgr_vdev *vdev, + enum wlan_vdev_state state) +{ + if (state < WLAN_VDEV_S_MAX) + vdev->vdev_mlme.mlme_state = state; +} + +/** + * wlan_vdev_set_selfpeer() - set self peer + * @vdev: VDEV object + * @peer: peer pointer + * + * API to set the self peer of VDEV + * + * Return: void + */ +static inline void wlan_vdev_set_selfpeer(struct wlan_objmgr_vdev *vdev, + struct wlan_objmgr_peer *peer) +{ + vdev->vdev_objmgr.self_peer = peer; +} + +/** + * wlan_vdev_get_selfpeer() - get self peer + * @vdev: VDEV object + * + * API to get the self peer of VDEV + * + * Return: + * @peer: peer pointer + */ +static inline struct wlan_objmgr_peer *wlan_vdev_get_selfpeer( + struct wlan_objmgr_vdev *vdev) +{ + return vdev->vdev_objmgr.self_peer; +} + +/** + * wlan_vdev_set_bsspeer() - set bss peer + * @vdev: VDEV object + * @peer: BSS peer pointer + * + * API to set the BSS peer of VDEV + * + * Return: void + */ +static inline void wlan_vdev_set_bsspeer(struct wlan_objmgr_vdev *vdev, + struct wlan_objmgr_peer *peer) +{ + vdev->vdev_objmgr.bss_peer = peer; +} + +/** + * wlan_vdev_get_bsspeer() - get bss peer + * @vdev: VDEV object + * + * API to get the BSS peer of VDEV + * + * Return: + * @peer: BSS peer pointer + */ +static inline struct wlan_objmgr_peer *wlan_vdev_get_bsspeer( + struct wlan_objmgr_vdev *vdev) +{ + return vdev->vdev_objmgr.bss_peer; +} + +/** + * wlan_vdev_get_ospriv() - get os priv pointer + * @vdev: VDEV object + * + * API to get OS private pointer from VDEV + * + * Return: ospriv - private pointer + */ +static inline struct vdev_osif_priv *wlan_vdev_get_ospriv( + struct wlan_objmgr_vdev *vdev) +{ + return vdev->vdev_nif.osdev; +} + +/** + * wlan_vdev_reset_ospriv() - reset os priv pointer + * @vdev: VDEV object + * + * API to reset OS private pointer in VDEV + * + * Return: void + */ +static inline void wlan_vdev_reset_ospriv(struct wlan_objmgr_vdev *vdev) +{ + vdev->vdev_nif.osdev = NULL; +} + +/** + * wlan_vdev_get_peer_count() - get vdev peer count + * @vdev: VDEV object + * + * API to get peer count from VDEV + * + * Return: peer_count - vdev's peer count + */ +static inline uint16_t wlan_vdev_get_peer_count(struct wlan_objmgr_vdev *vdev) +{ + return vdev->vdev_objmgr.wlan_peer_count; +} + +/** + * DOC: Examples to use VDEV ref count APIs + * + * In all the scenarios, the pair of API should be followed + * other it lead to memory leak + * + * scenario 1: + * + * wlan_objmgr_vdev_obj_create() + * ---- + * wlan_objmgr_vdev_obj_delete() + * + * scenario 2: + * + * wlan_objmgr_vdev_get_ref() + * ---- + * the operations which are done on + * vdev object + * ---- + * wlan_objmgr_vdev_release_ref() + * + * scenario 3: + * + * API to retrieve vdev (xxx_get_vdev_xxx()) + * ---- + * the operations which are done on + * vdev object + * ---- + * wlan_objmgr_vdev_release_ref() + */ + +/** + * wlan_objmgr_vdev_get_ref() - increment ref count + * @vdev: VDEV object + * @id: Object Manager ref debug id + * + * API to increment ref count of vdev + * + * Return: void + */ +void wlan_objmgr_vdev_get_ref(struct wlan_objmgr_vdev *vdev, + wlan_objmgr_ref_dbgid id); + +/** + * wlan_objmgr_vdev_try_get_ref() - increment ref count, if allowed + * @vdev: VDEV object + * @id: Object Manager ref debug id + * + * API to increment ref count of vdev after checking valid object state + * + * Return: void + */ +QDF_STATUS wlan_objmgr_vdev_try_get_ref(struct wlan_objmgr_vdev *vdev, + wlan_objmgr_ref_dbgid id); + +/** + * wlan_objmgr_vdev_release_ref() - decrement ref count + * @vdev: VDEV object + * @id: Object Manager ref debug id + * + * API to decrement ref count of vdev, if ref count is 1, it initiates the + * VDEV deletion + * + * Return: void + */ +void wlan_objmgr_vdev_release_ref(struct wlan_objmgr_vdev *vdev, + wlan_objmgr_ref_dbgid id); + +/** + * wlan_vdev_set_max_peer_count() - set max peer count + * @vdev: VDEV object + * @count: Max peer count + * + * API to set max peer count of VDEV + * + * Return: void + */ +static inline void wlan_vdev_set_max_peer_count(struct wlan_objmgr_vdev *vdev, + uint16_t count) +{ + vdev->vdev_objmgr.max_peer_count = count; +} + +/** + * wlan_vdev_get_max_peer_count() - get max peer count + * @vdev: VDEV object + * + * API to get max peer count of VDEV + * + * Return: max peer count + */ +static inline uint16_t wlan_vdev_get_max_peer_count( + struct wlan_objmgr_vdev *vdev) +{ + return vdev->vdev_objmgr.max_peer_count; +} + +/** + * wlan_vdev_set_dp_handle() - set dp handle + * @vdev: vdev object pointer + * @dp_handle: Data path module handle + * + * Return: void + */ +static inline void wlan_vdev_set_dp_handle(struct wlan_objmgr_vdev *vdev, + void *dp_handle) +{ + if (qdf_unlikely(!vdev)) { + QDF_BUG(0); + return; + } + + vdev->dp_handle = dp_handle; +} + +/** + * wlan_vdev_get_dp_handle() - get dp handle + * @vdev: vdev object pointer + * + * Return: dp handle + */ +static inline void *wlan_vdev_get_dp_handle(struct wlan_objmgr_vdev *vdev) +{ + if (qdf_unlikely(!vdev)) { + QDF_BUG(0); + return NULL; + } + + return vdev->dp_handle; +} + +#endif /* _WLAN_OBJMGR_VDEV_OBJ_H_*/ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/obj_mgr/src/wlan_objmgr_debug.c b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/obj_mgr/src/wlan_objmgr_debug.c new file mode 100644 index 0000000000000000000000000000000000000000..705f0b5b32098012c3153fcf201f4049886f2af2 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/obj_mgr/src/wlan_objmgr_debug.c @@ -0,0 +1,464 @@ +/* + * + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ +/* + * DOC: Public APIs to perform debug operations on object manager + */ + +#include +#include +#include +#include +#include "wlan_objmgr_global_obj_i.h" +#include +#include + +#define LOG_DEL_OBJ_TIMEOUT_VALUE_MSEC 5000 +#define LOG_DEL_OBJ_DESTROY_DURATION_SEC 5 +/* + * The max duration for which a obj can be allowed to remain in L-state + * The duration should be higher than the psoc idle timeout. + */ +#define LOG_DEL_OBJ_DESTROY_ASSERT_DURATION_SEC 15 +#define LOG_DEL_OBJ_LIST_MAX_COUNT (3 + 5 + 48 + 4096) + +/** + * struct log_del_obj - Logically deleted Object + * @obj: Represents peer/vdev/pdev/psoc + * @node: List node from Logically deleted list + * @obj_type: Object type for peer/vdev/pdev/psoc + * @tstamp: Timestamp when node entered logically + * deleted state + */ +struct log_del_obj { + void *obj; + qdf_list_node_t node; + enum wlan_objmgr_obj_type obj_type; + qdf_time_t tstamp; +}; + +/** + * struct wlan_objmgr_debug_info - Objmgr debug info + * for Logically deleted object + * @obj_timer: Timer object + * @obj_list: list object having linking logically + * deleted nodes + * @list_lock: lock to protect list + */ +struct wlan_objmgr_debug_info { + qdf_timer_t obj_timer; + qdf_list_t obj_list; + qdf_spinlock_t list_lock; +}; + +static const char * +wlan_obj_type_get_obj_name(enum wlan_objmgr_obj_type obj_type) +{ + static const struct wlan_obj_type_to_name { + enum wlan_objmgr_obj_type obj_type; + const char *name; + } obj_type_name[WLAN_OBJ_TYPE_MAX] = { + {WLAN_PSOC_OP, "psoc"}, + {WLAN_PDEV_OP, "pdev"}, + {WLAN_VDEV_OP, "vdev"}, + {WLAN_PEER_OP, "peer"} + }; + uint8_t idx; + + for (idx = 0; idx < WLAN_OBJ_TYPE_MAX; idx++) { + if (obj_type == obj_type_name[idx].obj_type) + return obj_type_name[idx].name; + } + + return NULL; +} + +static uint8_t* +wlan_objmgr_debug_get_macaddr(void *obj, + enum wlan_objmgr_obj_type obj_type) +{ + switch (obj_type) { + case WLAN_PSOC_OP: + return wlan_psoc_get_hw_macaddr(obj); + case WLAN_PDEV_OP: + return wlan_pdev_get_hw_macaddr(obj); + case WLAN_VDEV_OP: + return wlan_vdev_mlme_get_macaddr(obj); + case WLAN_PEER_OP: + return wlan_peer_get_macaddr(obj); + default: + obj_mgr_err("invalid obj_type"); + return NULL; + } +} + +static void +wlan_objmgr_insert_ld_obj_to_list(struct wlan_objmgr_debug_info *debug_info, + qdf_list_node_t *node) +{ + /* Insert object to list with lock being held*/ + qdf_spin_lock_bh(&debug_info->list_lock); + + /* Start timer only when list is empty */ + if (qdf_list_empty(&debug_info->obj_list)) + qdf_timer_start(&debug_info->obj_timer, + LOG_DEL_OBJ_TIMEOUT_VALUE_MSEC); + + qdf_list_insert_back(&debug_info->obj_list, node); + qdf_spin_unlock_bh(&debug_info->list_lock); +} + +void wlan_objmgr_notify_log_delete(void *obj, + enum wlan_objmgr_obj_type obj_type) +{ + struct wlan_objmgr_debug_info *debug_info; + const char *obj_name; + uint8_t *macaddr; + qdf_time_t tstamp; + struct log_del_obj *node; + + if (!obj) { + obj_mgr_err("object is null"); + return; + } + + qdf_spin_lock_bh(&g_umac_glb_obj->global_lock); + debug_info = g_umac_glb_obj->debug_info; + qdf_spin_unlock_bh(&g_umac_glb_obj->global_lock); + + if (!debug_info) { + obj_mgr_err("debug_info is null"); + return; + } + + macaddr = wlan_objmgr_debug_get_macaddr(obj, obj_type); + if (!macaddr) { + obj_mgr_err("macaddr is null"); + return; + } + + obj_name = wlan_obj_type_get_obj_name(obj_type); + if (!obj_name) { + obj_mgr_err("obj_name is null"); + return; + } + + tstamp = qdf_system_ticks_to_msecs(qdf_system_ticks()) / 1000; + node = qdf_mem_malloc(sizeof(*node)); + if (!node) { + obj_mgr_err("Object node creation failed"); + return; + } + node->obj = obj; + node->obj_type = obj_type; + node->tstamp = tstamp; + obj_mgr_debug("#%s : mac_addr :" QDF_MAC_ADDR_STR" entered L-state", + obj_name, QDF_MAC_ADDR_ARRAY(macaddr)); + wlan_objmgr_insert_ld_obj_to_list(debug_info, &node->node); +} + +static void +wlan_objmgr_rem_ld_obj_from_list(void *obj, + struct wlan_objmgr_debug_info *debug_info, + enum wlan_objmgr_obj_type obj_type) +{ + qdf_list_node_t *node = NULL; + struct log_del_obj *obj_to_remove = NULL; + qdf_list_t *list; + QDF_STATUS status; + + list = &debug_info->obj_list; + qdf_spin_lock_bh(&debug_info->list_lock); + status = qdf_list_peek_front(list, &node); + + while (QDF_IS_STATUS_SUCCESS(status)) { + obj_to_remove = qdf_container_of(node, + struct log_del_obj, node); + if (obj_to_remove->obj == obj && + obj_to_remove->obj_type == obj_type) { + status = qdf_list_remove_node(list, + &obj_to_remove->node); + /* Stop timer if list is empty */ + if (QDF_IS_STATUS_SUCCESS(status)) { + if (qdf_list_empty(&debug_info->obj_list)) + qdf_timer_stop(&debug_info->obj_timer); + qdf_mem_free(obj_to_remove); + } + break; + } + status = qdf_list_peek_next(list, node, &node); + }; + qdf_spin_unlock_bh(&debug_info->list_lock); +} + +void wlan_objmgr_notify_destroy(void *obj, + enum wlan_objmgr_obj_type obj_type) +{ + struct wlan_objmgr_debug_info *debug_info; + uint8_t *macaddr; + const char *obj_name; + + qdf_spin_lock_bh(&g_umac_glb_obj->global_lock); + debug_info = g_umac_glb_obj->debug_info; + qdf_spin_unlock_bh(&g_umac_glb_obj->global_lock); + + if (!debug_info) { + obj_mgr_err("debug_info is null"); + return; + } + macaddr = wlan_objmgr_debug_get_macaddr(obj, obj_type); + if (!macaddr) { + obj_mgr_err("macaddr is null"); + return; + } + obj_name = wlan_obj_type_get_obj_name(obj_type); + if (!obj_name) { + obj_mgr_err("obj_name is null"); + return; + } + obj_mgr_debug("#%s, macaddr: " QDF_MAC_ADDR_STR" exited L-state", + obj_name, QDF_MAC_ADDR_ARRAY(macaddr)); + + wlan_objmgr_rem_ld_obj_from_list(obj, debug_info, obj_type); +} + +/** + * wlan_objmgr_debug_obj_destroyed_panic() - Panic in case obj is in L-state + * for long + * @obj_name: The name of the module ID + * + * This will invoke panic in the case that the obj is in logically destroyed + * state for a long time. The panic is invoked only in case feature flag + * WLAN_OBJMGR_PANIC_ON_BUG is enabled + * + * Return: None + */ +#ifdef CONFIG_LEAK_DETECTION +static inline void wlan_objmgr_debug_obj_destroyed_panic(const char *obj_name) +{ + obj_mgr_alert("#%s in L-state for too long!", obj_name); + QDF_BUG(0); +} +#else +static inline void wlan_objmgr_debug_obj_destroyed_panic(const char *obj_name) +{ +} +#endif + +/* + * wlan_objmgr_print_pending_refs() - Print pending refs according to the obj + * @obj: Represents peer/vdev/pdev/psoc + * @obj_type: Object type for peer/vdev/pdev/psoc + * + * Return: None + */ +static void wlan_objmgr_print_pending_refs(void *obj, + enum wlan_objmgr_obj_type obj_type) +{ + switch (obj_type) { + case WLAN_PSOC_OP: + wlan_objmgr_print_ref_ids(((struct wlan_objmgr_psoc *) + obj)->soc_objmgr.ref_id_dbg, + QDF_TRACE_LEVEL_DEBUG); + break; + case WLAN_PDEV_OP: + wlan_objmgr_print_ref_ids(((struct wlan_objmgr_pdev *) + obj)->pdev_objmgr.ref_id_dbg, + QDF_TRACE_LEVEL_DEBUG); + break; + case WLAN_VDEV_OP: + wlan_objmgr_print_ref_ids(((struct wlan_objmgr_vdev *) + obj)->vdev_objmgr.ref_id_dbg, + QDF_TRACE_LEVEL_DEBUG); + break; + case WLAN_PEER_OP: + wlan_objmgr_print_ref_ids(((struct wlan_objmgr_peer *) + obj)->peer_objmgr.ref_id_dbg, + QDF_TRACE_LEVEL_DEBUG); + break; + default: + obj_mgr_debug("invalid obj_type"); + } +} + +/* timeout handler for iterating logically deleted object */ + +static void wlan_objmgr_iterate_log_del_obj_handler(void *timer_arg) +{ + enum wlan_objmgr_obj_type obj_type; + uint8_t *macaddr; + const char *obj_name; + struct wlan_objmgr_debug_info *debug_info; + qdf_list_node_t *node; + qdf_list_t *log_del_obj_list = NULL; + struct log_del_obj *del_obj = NULL; + qdf_time_t cur_tstamp; + QDF_STATUS status; + + qdf_spin_lock_bh(&g_umac_glb_obj->global_lock); + debug_info = g_umac_glb_obj->debug_info; + qdf_spin_unlock_bh(&g_umac_glb_obj->global_lock); + + if (!debug_info) { + obj_mgr_err("debug_info is not initialized"); + return; + } + + log_del_obj_list = &debug_info->obj_list; + qdf_spin_lock_bh(&debug_info->list_lock); + + status = qdf_list_peek_front(log_del_obj_list, &node); + if (QDF_IS_STATUS_ERROR(status)) { + qdf_spin_unlock_bh(&debug_info->list_lock); + return; + } + + /* compute the current timestamp in seconds + * need to compare with destroy duration of object + */ + cur_tstamp = (qdf_system_ticks_to_msecs(qdf_system_ticks()) / 1000); + + do { + del_obj = qdf_container_of(node, struct log_del_obj, node); + obj_type = del_obj->obj_type; + macaddr = wlan_objmgr_debug_get_macaddr(del_obj->obj, obj_type); + obj_name = wlan_obj_type_get_obj_name(obj_type); + + /* If object is in logically deleted state for time more than + * destroy duration, print the object type and MAC + */ + if (cur_tstamp < (del_obj->tstamp + + LOG_DEL_OBJ_DESTROY_DURATION_SEC)) { + break; + } + if (!macaddr) { + qdf_spin_unlock_bh(&debug_info->list_lock); + obj_mgr_err("macaddr is null"); + QDF_BUG(0); + goto modify_timer; + } + if (!obj_name) { + qdf_spin_unlock_bh(&debug_info->list_lock); + obj_mgr_err("obj_name is null"); + QDF_BUG(0); + goto modify_timer; + } + + obj_mgr_alert("#%s in L-state,MAC: " QDF_MAC_ADDR_STR, + obj_name, QDF_MAC_ADDR_ARRAY(macaddr)); + wlan_objmgr_print_pending_refs(del_obj->obj, obj_type); + + if (cur_tstamp > del_obj->tstamp + + LOG_DEL_OBJ_DESTROY_ASSERT_DURATION_SEC) { + if (!qdf_is_recovering() && !qdf_is_fw_down()) + wlan_objmgr_debug_obj_destroyed_panic(obj_name); + } + + status = qdf_list_peek_next(log_del_obj_list, node, &node); + + } while (QDF_IS_STATUS_SUCCESS(status)); + + qdf_spin_unlock_bh(&debug_info->list_lock); + +modify_timer: + /* modify timer timeout value */ + qdf_timer_mod(&debug_info->obj_timer, LOG_DEL_OBJ_TIMEOUT_VALUE_MSEC); +} + +void wlan_objmgr_debug_info_deinit(void) +{ + struct log_del_obj *obj_to_remove; + struct wlan_objmgr_debug_info *debug_info; + qdf_list_node_t *node = NULL; + qdf_list_t *list; + bool is_child_alive = false; + + qdf_spin_lock_bh(&g_umac_glb_obj->global_lock); + debug_info = g_umac_glb_obj->debug_info; + qdf_spin_unlock_bh(&g_umac_glb_obj->global_lock); + + if (!debug_info) { + obj_mgr_err("debug_info is not initialized"); + return; + } + list = &debug_info->obj_list; + + qdf_spin_lock_bh(&debug_info->list_lock); + + /* Check if any child of global object is in L-state and remove it, + * ideally it shouldn't be + */ + while (qdf_list_remove_front(list, &node) == QDF_STATUS_SUCCESS) { + is_child_alive = true; + obj_to_remove = qdf_container_of(node, + struct log_del_obj, node); + if (qdf_list_empty(&debug_info->obj_list)) + qdf_timer_stop(&debug_info->obj_timer); + /* free the object */ + qdf_mem_free(obj_to_remove); + } + qdf_spin_unlock_bh(&debug_info->list_lock); + + if (is_child_alive) { + obj_mgr_alert("This shouldn't happen!!, No child of global" + "object should be in L-state, as global obj" + "is going to destroy"); + QDF_BUG(0); + } + + /* free timer, destroy spinlock, list and debug_info object as + * global object is going to free + */ + qdf_list_destroy(list); + qdf_timer_free(&debug_info->obj_timer); + qdf_spinlock_destroy(&debug_info->list_lock); + qdf_mem_free(debug_info); + + qdf_spin_lock_bh(&g_umac_glb_obj->global_lock); + g_umac_glb_obj->debug_info = NULL; + qdf_spin_unlock_bh(&g_umac_glb_obj->global_lock); +} + +void wlan_objmgr_debug_info_init(void) +{ + struct wlan_objmgr_debug_info *debug_info; + + debug_info = qdf_mem_malloc(sizeof(*debug_info)); + if (!debug_info) { + obj_mgr_err("debug_info allocation failed"); + g_umac_glb_obj->debug_info = NULL; + return; + } + + /* Initialize timer with timeout handler */ + qdf_timer_init(NULL, &debug_info->obj_timer, + wlan_objmgr_iterate_log_del_obj_handler, + NULL, QDF_TIMER_TYPE_WAKE_APPS); + + /* Initialze the node_count to 0 and create list*/ + qdf_list_create(&debug_info->obj_list, + LOG_DEL_OBJ_LIST_MAX_COUNT); + + /* Initialize the spin_lock to protect list */ + qdf_spinlock_create(&debug_info->list_lock); + + /* attach debug_info object to global object */ + qdf_spin_lock_bh(&g_umac_glb_obj->global_lock); + g_umac_glb_obj->debug_info = debug_info; + qdf_spin_unlock_bh(&g_umac_glb_obj->global_lock); +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/obj_mgr/src/wlan_objmgr_global_obj.c b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/obj_mgr/src/wlan_objmgr_global_obj.c new file mode 100644 index 0000000000000000000000000000000000000000..065cb19114cefe27816523f60f8107ffa83a4173 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/obj_mgr/src/wlan_objmgr_global_obj.c @@ -0,0 +1,788 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + /** + * DOC: Public APIs to perform operations on Global objects + */ + +#include "wlan_objmgr_global_obj_i.h" +#include +#include "wlan_objmgr_psoc_obj.h" +#include "qdf_mem.h" +#include + +/* Global object, it is declared globally */ +struct wlan_objmgr_global *g_umac_glb_obj; + +/* +** APIs to Create/Delete Global object APIs +*/ +QDF_STATUS wlan_objmgr_global_obj_init(void) +{ + struct wlan_objmgr_global *umac_global_obj; + + /* If it is already created, ignore */ + if (g_umac_glb_obj != NULL) { + obj_mgr_err("Global object is already created"); + return QDF_STATUS_E_FAILURE; + } + + /* Allocation of memory for Global object */ + umac_global_obj = (struct wlan_objmgr_global *)qdf_mem_malloc( + sizeof(*umac_global_obj)); + if (umac_global_obj == NULL) { + obj_mgr_err("Global object alloc failed due to malloc"); + return QDF_STATUS_E_NOMEM; + } + /* Store Global object pointer in Global variable */ + g_umac_glb_obj = umac_global_obj; + /* Initialize spinlock */ + qdf_spinlock_create(&g_umac_glb_obj->global_lock); + wlan_objmgr_debug_info_init(); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(wlan_objmgr_global_obj_init); + +QDF_STATUS wlan_objmgr_global_obj_deinit(void) +{ + /* If it is already destroyed */ + if (g_umac_glb_obj == NULL) { + obj_mgr_err("Global object is not allocated"); + return QDF_STATUS_E_FAILURE; + } + + wlan_objmgr_debug_info_deinit(); + + if (QDF_STATUS_SUCCESS == wlan_objmgr_global_obj_can_destroyed()) { + qdf_spinlock_destroy(&g_umac_glb_obj->global_lock); + qdf_mem_free(g_umac_glb_obj); + g_umac_glb_obj = NULL; + } else { + obj_mgr_err("PSOCs are leaked can't free global objmgr ctx"); + WLAN_OBJMGR_BUG(0); + } + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(wlan_objmgr_global_obj_deinit); + +/** + ** APIs to register/unregister handlers + */ +QDF_STATUS wlan_objmgr_register_psoc_create_handler( + enum wlan_umac_comp_id id, + wlan_objmgr_psoc_create_handler handler, + void *arg) +{ + /* If id is not within valid range, return */ + if (id >= WLAN_UMAC_MAX_COMPONENTS) { + obj_mgr_err("Component %d is out of range", id); + return QDF_STATUS_MAXCOMP_FAIL; + } + + qdf_spin_lock_bh(&g_umac_glb_obj->global_lock); + /* If there is a valid entry, return failure */ + if (g_umac_glb_obj->psoc_create_handler[id] != NULL) { + qdf_spin_unlock_bh(&g_umac_glb_obj->global_lock); + obj_mgr_err("Callback for comp %d is already registered", id); + QDF_ASSERT(0); + return QDF_STATUS_E_FAILURE; + } + /* Store handler and args in Global object table */ + g_umac_glb_obj->psoc_create_handler[id] = handler; + g_umac_glb_obj->psoc_create_handler_arg[id] = arg; + + qdf_spin_unlock_bh(&g_umac_glb_obj->global_lock); + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(wlan_objmgr_register_psoc_create_handler); + +QDF_STATUS wlan_objmgr_unregister_psoc_create_handler( + enum wlan_umac_comp_id id, + wlan_objmgr_psoc_create_handler handler, + void *arg) +{ + /* If id is not within valid range, return */ + if (id >= WLAN_UMAC_MAX_COMPONENTS) { + obj_mgr_err("Component %d is out of range", id); + return QDF_STATUS_MAXCOMP_FAIL; + } + qdf_spin_lock_bh(&g_umac_glb_obj->global_lock); + /* If there is an invalid entry, return failure */ + if (g_umac_glb_obj->psoc_create_handler[id] != handler) { + qdf_spin_unlock_bh(&g_umac_glb_obj->global_lock); + obj_mgr_err("Callback for comp %d is not registered", id); + QDF_ASSERT(0); + return QDF_STATUS_E_FAILURE; + } + /* Reset handlers, and args to NULL */ + g_umac_glb_obj->psoc_create_handler[id] = NULL; + g_umac_glb_obj->psoc_create_handler_arg[id] = NULL; + + qdf_spin_unlock_bh(&g_umac_glb_obj->global_lock); + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(wlan_objmgr_unregister_psoc_create_handler); + +QDF_STATUS wlan_objmgr_register_psoc_destroy_handler( + enum wlan_umac_comp_id id, + wlan_objmgr_psoc_destroy_handler handler, + void *arg) +{ + /* If id is not within valid range, return */ + if (id >= WLAN_UMAC_MAX_COMPONENTS) { + obj_mgr_err("Component %d is out of range", id); + return QDF_STATUS_MAXCOMP_FAIL; + } + qdf_spin_lock_bh(&g_umac_glb_obj->global_lock); + /* If there is a valid entry, return failure */ + if (g_umac_glb_obj->psoc_destroy_handler[id] != NULL) { + qdf_spin_unlock_bh(&g_umac_glb_obj->global_lock); + obj_mgr_err("Callback for comp %d is already registered", id); + QDF_ASSERT(0); + return QDF_STATUS_E_FAILURE; + } + /* Store handler and args in Global object table */ + g_umac_glb_obj->psoc_destroy_handler[id] = handler; + g_umac_glb_obj->psoc_destroy_handler_arg[id] = arg; + + qdf_spin_unlock_bh(&g_umac_glb_obj->global_lock); + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(wlan_objmgr_register_psoc_destroy_handler); + +QDF_STATUS wlan_objmgr_unregister_psoc_destroy_handler( + enum wlan_umac_comp_id id, + wlan_objmgr_psoc_destroy_handler handler, + void *arg) +{ + /* If id is not within valid range, return */ + if (id >= WLAN_UMAC_MAX_COMPONENTS) { + obj_mgr_err("Component %d is out of range", id); + return QDF_STATUS_MAXCOMP_FAIL; + } + qdf_spin_lock_bh(&g_umac_glb_obj->global_lock); + /* If there is an invalid entry, return failure */ + if (g_umac_glb_obj->psoc_destroy_handler[id] != handler) { + qdf_spin_unlock_bh(&g_umac_glb_obj->global_lock); + obj_mgr_err("Callback for comp %d is not registered", id); + QDF_ASSERT(0); + return QDF_STATUS_E_FAILURE; + } + /* Reset handlers, and args to NULL */ + g_umac_glb_obj->psoc_destroy_handler[id] = NULL; + g_umac_glb_obj->psoc_destroy_handler_arg[id] = NULL; + + qdf_spin_unlock_bh(&g_umac_glb_obj->global_lock); + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(wlan_objmgr_unregister_psoc_destroy_handler); + +QDF_STATUS wlan_objmgr_register_psoc_status_handler( + enum wlan_umac_comp_id id, + wlan_objmgr_psoc_status_handler handler, + void *arg) +{ + /* If id is not within valid range, return */ + if (id >= WLAN_UMAC_MAX_COMPONENTS) { + obj_mgr_err("Component %d is out of range", id); + return QDF_STATUS_MAXCOMP_FAIL; + } + qdf_spin_lock_bh(&g_umac_glb_obj->global_lock); + /* If there is a valid entry, return failure */ + if (g_umac_glb_obj->psoc_status_handler[id] != NULL) { + qdf_spin_unlock_bh(&g_umac_glb_obj->global_lock); + obj_mgr_err("Callback for comp %d is already registered", id); + return QDF_STATUS_E_FAILURE; + } + /* Store handler and args in Global object table */ + g_umac_glb_obj->psoc_status_handler[id] = handler; + g_umac_glb_obj->psoc_status_handler_arg[id] = arg; + + qdf_spin_unlock_bh(&g_umac_glb_obj->global_lock); + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_objmgr_unregister_psoc_status_handler( + enum wlan_umac_comp_id id, + wlan_objmgr_psoc_status_handler handler, + void *arg) +{ + /* If id is not within valid range, return */ + if (id >= WLAN_UMAC_MAX_COMPONENTS) { + obj_mgr_err("Component %d is out of range", id); + return QDF_STATUS_MAXCOMP_FAIL; + } + qdf_spin_lock_bh(&g_umac_glb_obj->global_lock); + /* If there is an invalid entry, return failure */ + if (g_umac_glb_obj->psoc_status_handler[id] != handler) { + qdf_spin_unlock_bh(&g_umac_glb_obj->global_lock); + obj_mgr_err("Callback for comp %d is not registered", id); + return QDF_STATUS_E_FAILURE; + } + /* Reset handlers, and args to NULL */ + g_umac_glb_obj->psoc_status_handler[id] = NULL; + g_umac_glb_obj->psoc_status_handler_arg[id] = NULL; + + qdf_spin_unlock_bh(&g_umac_glb_obj->global_lock); + return QDF_STATUS_SUCCESS; +} + + +QDF_STATUS wlan_objmgr_register_pdev_create_handler( + enum wlan_umac_comp_id id, + wlan_objmgr_pdev_create_handler handler, + void *arg) +{ + /* If id is not within valid range, return */ + if (id >= WLAN_UMAC_MAX_COMPONENTS) { + obj_mgr_err("Component %d is out of range", id); + return QDF_STATUS_MAXCOMP_FAIL; + } + qdf_spin_lock_bh(&g_umac_glb_obj->global_lock); + /* If there is a valid entry, return failure */ + if (g_umac_glb_obj->pdev_create_handler[id] != NULL) { + qdf_spin_unlock_bh(&g_umac_glb_obj->global_lock); + obj_mgr_err("Callback for comp %d is already registered", id); + QDF_ASSERT(0); + return QDF_STATUS_E_FAILURE; + } + /* Store handler and args in Global object table */ + g_umac_glb_obj->pdev_create_handler[id] = handler; + g_umac_glb_obj->pdev_create_handler_arg[id] = arg; + + qdf_spin_unlock_bh(&g_umac_glb_obj->global_lock); + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(wlan_objmgr_register_pdev_create_handler); + +QDF_STATUS wlan_objmgr_unregister_pdev_create_handler( + enum wlan_umac_comp_id id, + wlan_objmgr_pdev_create_handler handler, + void *arg) +{ + /* If id is not within valid range, return */ + if (id >= WLAN_UMAC_MAX_COMPONENTS) { + obj_mgr_err("Component %d is out of range", id); + return QDF_STATUS_MAXCOMP_FAIL; + } + qdf_spin_lock_bh(&g_umac_glb_obj->global_lock); + /* If there is an invalid entry, return failure */ + if (g_umac_glb_obj->pdev_create_handler[id] != handler) { + qdf_spin_unlock_bh(&g_umac_glb_obj->global_lock); + obj_mgr_err("Callback for comp %d is not registered", id); + QDF_ASSERT(0); + return QDF_STATUS_E_FAILURE; + } + /* Reset handlers, and args to NULL */ + g_umac_glb_obj->pdev_create_handler[id] = NULL; + g_umac_glb_obj->pdev_create_handler_arg[id] = NULL; + + qdf_spin_unlock_bh(&g_umac_glb_obj->global_lock); + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(wlan_objmgr_unregister_pdev_create_handler); + +QDF_STATUS wlan_objmgr_register_pdev_destroy_handler( + enum wlan_umac_comp_id id, + wlan_objmgr_pdev_destroy_handler handler, + void *arg) +{ + /* If id is not within valid range, return */ + if (id >= WLAN_UMAC_MAX_COMPONENTS) { + obj_mgr_err("Component %d is out of range", id); + return QDF_STATUS_MAXCOMP_FAIL; + } + qdf_spin_lock_bh(&g_umac_glb_obj->global_lock); + /* If there is a valid entry, return failure */ + if (g_umac_glb_obj->pdev_destroy_handler[id] != NULL) { + qdf_spin_unlock_bh(&g_umac_glb_obj->global_lock); + obj_mgr_err("Callback for comp %d is already registered", id); + QDF_ASSERT(0); + return QDF_STATUS_E_FAILURE; + } + /* Store handler and args in Global object table */ + g_umac_glb_obj->pdev_destroy_handler[id] = handler; + g_umac_glb_obj->pdev_destroy_handler_arg[id] = arg; + + qdf_spin_unlock_bh(&g_umac_glb_obj->global_lock); + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(wlan_objmgr_register_pdev_destroy_handler); + +QDF_STATUS wlan_objmgr_unregister_pdev_destroy_handler( + enum wlan_umac_comp_id id, + wlan_objmgr_pdev_destroy_handler handler, + void *arg) +{ + /* If id is not within valid range, return */ + if (id >= WLAN_UMAC_MAX_COMPONENTS) { + obj_mgr_err("Component %d is out of range", id); + return QDF_STATUS_MAXCOMP_FAIL; + } + qdf_spin_lock_bh(&g_umac_glb_obj->global_lock); + /* If there is an invalid entry, return failure */ + if (g_umac_glb_obj->pdev_destroy_handler[id] != handler) { + qdf_spin_unlock_bh(&g_umac_glb_obj->global_lock); + obj_mgr_err("Callback for Component %d is not registered", id); + QDF_ASSERT(0); + return QDF_STATUS_E_FAILURE; + } + /* Reset handlers, and args to NULL */ + g_umac_glb_obj->pdev_destroy_handler[id] = NULL; + g_umac_glb_obj->pdev_destroy_handler_arg[id] = NULL; + + qdf_spin_unlock_bh(&g_umac_glb_obj->global_lock); + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(wlan_objmgr_unregister_pdev_destroy_handler); + +QDF_STATUS wlan_objmgr_register_pdev_status_handler( + enum wlan_umac_comp_id id, + wlan_objmgr_pdev_status_handler handler, + void *arg) +{ + /* If id is not within valid range, return */ + if (id >= WLAN_UMAC_MAX_COMPONENTS) { + obj_mgr_err("Component %d is out of range", id); + return QDF_STATUS_MAXCOMP_FAIL; + } + qdf_spin_lock_bh(&g_umac_glb_obj->global_lock); + /* If there is a valid entry, return failure */ + if (g_umac_glb_obj->pdev_status_handler[id] != NULL) { + qdf_spin_unlock_bh(&g_umac_glb_obj->global_lock); + obj_mgr_err("Callback for comp %d is already registered", id); + return QDF_STATUS_E_FAILURE; + } + /* Store handler and args in Global object table */ + g_umac_glb_obj->pdev_status_handler[id] = handler; + g_umac_glb_obj->pdev_status_handler_arg[id] = arg; + + qdf_spin_unlock_bh(&g_umac_glb_obj->global_lock); + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_objmgr_unregister_pdev_status_handler( + enum wlan_umac_comp_id id, + wlan_objmgr_pdev_status_handler handler, + void *arg) +{ + /* If id is not within valid range, return */ + if (id >= WLAN_UMAC_MAX_COMPONENTS) { + obj_mgr_err("Component %d is out of range", id); + return QDF_STATUS_MAXCOMP_FAIL; + } + qdf_spin_lock_bh(&g_umac_glb_obj->global_lock); + /* If there is an invalid entry, return failure */ + if (g_umac_glb_obj->pdev_status_handler[id] != handler) { + qdf_spin_unlock_bh(&g_umac_glb_obj->global_lock); + obj_mgr_err("Callback for Component %d is not registered", id); + return QDF_STATUS_E_FAILURE; + } + /* Reset handlers, and args to NULL */ + g_umac_glb_obj->pdev_status_handler[id] = NULL; + g_umac_glb_obj->pdev_status_handler_arg[id] = NULL; + + qdf_spin_unlock_bh(&g_umac_glb_obj->global_lock); + return QDF_STATUS_SUCCESS; +} + + +QDF_STATUS wlan_objmgr_register_vdev_create_handler( + enum wlan_umac_comp_id id, + wlan_objmgr_vdev_create_handler handler, + void *arg) +{ + /* If id is not within valid range, return */ + if (id >= WLAN_UMAC_MAX_COMPONENTS) { + obj_mgr_err("Component %d is out of range", id); + return QDF_STATUS_MAXCOMP_FAIL; + } + qdf_spin_lock_bh(&g_umac_glb_obj->global_lock); + /* If there is a valid entry, return failure */ + if (g_umac_glb_obj->vdev_create_handler[id] != NULL) { + qdf_spin_unlock_bh(&g_umac_glb_obj->global_lock); + obj_mgr_err("Callback for comp %d is already registered", id); + QDF_ASSERT(0); + return QDF_STATUS_E_FAILURE; + } + /* Store handler and args in Global object table */ + g_umac_glb_obj->vdev_create_handler[id] = handler; + g_umac_glb_obj->vdev_create_handler_arg[id] = arg; + + qdf_spin_unlock_bh(&g_umac_glb_obj->global_lock); + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_objmgr_unregister_vdev_create_handler( + enum wlan_umac_comp_id id, + wlan_objmgr_vdev_create_handler handler, + void *arg) +{ + /* If id is not within valid range, return */ + if (id >= WLAN_UMAC_MAX_COMPONENTS) { + obj_mgr_err("Component %d is out of range", id); + return QDF_STATUS_MAXCOMP_FAIL; + } + qdf_spin_lock_bh(&g_umac_glb_obj->global_lock); + /* If there is an invalid entry, return failure */ + if (g_umac_glb_obj->vdev_create_handler[id] != handler) { + qdf_spin_unlock_bh(&g_umac_glb_obj->global_lock); + obj_mgr_err("Callback for comp %d is not registered", id); + QDF_ASSERT(0); + return QDF_STATUS_E_FAILURE; + } + /* Reset handlers, and args to NULL */ + g_umac_glb_obj->vdev_create_handler[id] = NULL; + g_umac_glb_obj->vdev_create_handler_arg[id] = NULL; + + qdf_spin_unlock_bh(&g_umac_glb_obj->global_lock); + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_objmgr_register_vdev_destroy_handler( + enum wlan_umac_comp_id id, + wlan_objmgr_vdev_destroy_handler handler, + void *arg) +{ + /* If id is not within valid range, return */ + if (id >= WLAN_UMAC_MAX_COMPONENTS) { + obj_mgr_err("Component %d is out of range", id); + return QDF_STATUS_MAXCOMP_FAIL; + } + qdf_spin_lock_bh(&g_umac_glb_obj->global_lock); + /* If there is a valid entry, return failure */ + if (g_umac_glb_obj->vdev_destroy_handler[id] != NULL) { + qdf_spin_unlock_bh(&g_umac_glb_obj->global_lock); + obj_mgr_err("Callback for comp %d is already registered", id); + QDF_ASSERT(0); + return QDF_STATUS_E_FAILURE; + } + /* Store handler and args in Global object table */ + g_umac_glb_obj->vdev_destroy_handler[id] = handler; + g_umac_glb_obj->vdev_destroy_handler_arg[id] = arg; + + qdf_spin_unlock_bh(&g_umac_glb_obj->global_lock); + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_objmgr_unregister_vdev_destroy_handler( + enum wlan_umac_comp_id id, + wlan_objmgr_vdev_destroy_handler handler, + void *arg) +{ + /* If id is not within valid range, return */ + if (id >= WLAN_UMAC_MAX_COMPONENTS) { + obj_mgr_err("Component %d is out of range", id); + return QDF_STATUS_MAXCOMP_FAIL; + } + qdf_spin_lock_bh(&g_umac_glb_obj->global_lock); + /* If there is an invalid entry, return failure */ + if (g_umac_glb_obj->vdev_destroy_handler[id] != handler) { + qdf_spin_unlock_bh(&g_umac_glb_obj->global_lock); + obj_mgr_err("Callback for comp %d is not registered", id); + QDF_ASSERT(0); + return QDF_STATUS_E_FAILURE; + } + /* Reset handlers, and args to NULL */ + g_umac_glb_obj->vdev_destroy_handler[id] = NULL; + g_umac_glb_obj->vdev_destroy_handler_arg[id] = NULL; + + qdf_spin_unlock_bh(&g_umac_glb_obj->global_lock); + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_objmgr_register_vdev_status_handler( + enum wlan_umac_comp_id id, + wlan_objmgr_vdev_status_handler handler, + void *arg) +{ + /* If id is not within valid range, return */ + if (id >= WLAN_UMAC_MAX_COMPONENTS) { + obj_mgr_err("Component %d is out of range", id); + return QDF_STATUS_MAXCOMP_FAIL; + } + qdf_spin_lock_bh(&g_umac_glb_obj->global_lock); + /* If there is a valid entry, return failure */ + if (g_umac_glb_obj->vdev_status_handler[id] != NULL) { + qdf_spin_unlock_bh(&g_umac_glb_obj->global_lock); + obj_mgr_err("Callback for comp %d is already registered", id); + return QDF_STATUS_E_FAILURE; + } + /* Store handler and args in Global object table */ + g_umac_glb_obj->vdev_status_handler[id] = handler; + g_umac_glb_obj->vdev_status_handler_arg[id] = arg; + + qdf_spin_unlock_bh(&g_umac_glb_obj->global_lock); + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_objmgr_unregister_vdev_status_handler( + enum wlan_umac_comp_id id, + wlan_objmgr_vdev_status_handler handler, + void *arg) +{ + /* If id is not within valid range, return */ + if (id >= WLAN_UMAC_MAX_COMPONENTS) { + obj_mgr_err("Component %d is out of range", id); + return QDF_STATUS_MAXCOMP_FAIL; + } + qdf_spin_lock_bh(&g_umac_glb_obj->global_lock); + /* If there is an invalid entry, return failure */ + if (g_umac_glb_obj->vdev_status_handler[id] != handler) { + qdf_spin_unlock_bh(&g_umac_glb_obj->global_lock); + obj_mgr_err("Callback for Component %d is not registered", id); + return QDF_STATUS_E_FAILURE; + } + /* Reset handlers, and args to NULL */ + g_umac_glb_obj->vdev_status_handler[id] = NULL; + g_umac_glb_obj->vdev_status_handler_arg[id] = NULL; + + qdf_spin_unlock_bh(&g_umac_glb_obj->global_lock); + return QDF_STATUS_SUCCESS; +} + + +QDF_STATUS wlan_objmgr_register_peer_create_handler( + enum wlan_umac_comp_id id, + wlan_objmgr_peer_create_handler handler, + void *arg) +{ + /* If id is not within valid range, return */ + if (id >= WLAN_UMAC_MAX_COMPONENTS) { + obj_mgr_err("Component %d is out of range", id); + return QDF_STATUS_MAXCOMP_FAIL; + } + qdf_spin_lock_bh(&g_umac_glb_obj->global_lock); + /* If there is a valid entry, return failure */ + if (g_umac_glb_obj->peer_create_handler[id] != NULL) { + qdf_spin_unlock_bh(&g_umac_glb_obj->global_lock); + obj_mgr_err("Callback for comp %d is already registered", id); + QDF_ASSERT(0); + return QDF_STATUS_E_FAILURE; + } + /* Store handler and args in Global object table */ + g_umac_glb_obj->peer_create_handler[id] = handler; + g_umac_glb_obj->peer_create_handler_arg[id] = arg; + + qdf_spin_unlock_bh(&g_umac_glb_obj->global_lock); + return QDF_STATUS_SUCCESS; +} + + +QDF_STATUS wlan_objmgr_unregister_peer_create_handler( + enum wlan_umac_comp_id id, + wlan_objmgr_peer_create_handler handler, + void *arg) +{ + /* If id is not within valid range, return */ + if (id >= WLAN_UMAC_MAX_COMPONENTS) { + obj_mgr_err("Component %d is out of range", id); + return QDF_STATUS_MAXCOMP_FAIL; + } + qdf_spin_lock_bh(&g_umac_glb_obj->global_lock); + /* If there is an invalid entry, return failure */ + if (g_umac_glb_obj->peer_create_handler[id] != handler) { + qdf_spin_unlock_bh(&g_umac_glb_obj->global_lock); + obj_mgr_err("Callback for comp %d is not registered", id); + QDF_ASSERT(0); + return QDF_STATUS_E_FAILURE; + } + /* Reset handlers, and args to NULL */ + g_umac_glb_obj->peer_create_handler[id] = NULL; + g_umac_glb_obj->peer_create_handler_arg[id] = NULL; + + qdf_spin_unlock_bh(&g_umac_glb_obj->global_lock); + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_objmgr_register_peer_destroy_handler( + enum wlan_umac_comp_id id, + wlan_objmgr_peer_destroy_handler handler, + void *arg) +{ + /* If id is not within valid range, return */ + if (id >= WLAN_UMAC_MAX_COMPONENTS) { + obj_mgr_err("Component %d is out of range", id); + return QDF_STATUS_MAXCOMP_FAIL; + } + qdf_spin_lock_bh(&g_umac_glb_obj->global_lock); + /* If there is a valid entry, return failure */ + if (g_umac_glb_obj->peer_destroy_handler[id] != NULL) { + qdf_spin_unlock_bh(&g_umac_glb_obj->global_lock); + obj_mgr_err("Callback for comp %d is already registered", id); + QDF_ASSERT(0); + return QDF_STATUS_E_FAILURE; + } + /* Store handler and args in Global object table */ + g_umac_glb_obj->peer_destroy_handler[id] = handler; + g_umac_glb_obj->peer_destroy_handler_arg[id] = arg; + + qdf_spin_unlock_bh(&g_umac_glb_obj->global_lock); + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_objmgr_unregister_peer_destroy_handler( + enum wlan_umac_comp_id id, + wlan_objmgr_peer_destroy_handler handler, + void *arg) +{ + /* If id is not within valid range, return */ + if (id >= WLAN_UMAC_MAX_COMPONENTS) { + obj_mgr_err("Component %d is out of range", id); + return QDF_STATUS_MAXCOMP_FAIL; + } + qdf_spin_lock_bh(&g_umac_glb_obj->global_lock); + /* If there is an invalid entry, return failure */ + if (g_umac_glb_obj->peer_destroy_handler[id] != handler) { + qdf_spin_unlock_bh(&g_umac_glb_obj->global_lock); + obj_mgr_err("Callback for comp %d is not registered", id); + QDF_ASSERT(0); + return QDF_STATUS_E_FAILURE; + } + /* Reset handlers, and args to NULL */ + g_umac_glb_obj->peer_destroy_handler[id] = NULL; + g_umac_glb_obj->peer_destroy_handler_arg[id] = NULL; + + qdf_spin_unlock_bh(&g_umac_glb_obj->global_lock); + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_objmgr_register_peer_status_handler( + enum wlan_umac_comp_id id, + wlan_objmgr_peer_status_handler handler, + void *arg) +{ + /* If id is not within valid range, return */ + if (id >= WLAN_UMAC_MAX_COMPONENTS) { + obj_mgr_err("Component %d is out of range", id); + return QDF_STATUS_MAXCOMP_FAIL; + } + qdf_spin_lock_bh(&g_umac_glb_obj->global_lock); + /* If there is a valid entry, return failure */ + if (g_umac_glb_obj->peer_status_handler[id] != NULL) { + qdf_spin_unlock_bh(&g_umac_glb_obj->global_lock); + obj_mgr_err("Callback for comp %d is already registered", id); + return QDF_STATUS_E_FAILURE; + } + /* Store handler and args in Global object table */ + g_umac_glb_obj->peer_status_handler[id] = handler; + g_umac_glb_obj->peer_status_handler_arg[id] = arg; + + qdf_spin_unlock_bh(&g_umac_glb_obj->global_lock); + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_objmgr_unregister_peer_status_handler( + enum wlan_umac_comp_id id, + wlan_objmgr_peer_status_handler handler, + void *arg) +{ + /* If id is not within valid range, return */ + if (id >= WLAN_UMAC_MAX_COMPONENTS) { + obj_mgr_err("Component %d is out of range", id); + return QDF_STATUS_MAXCOMP_FAIL; + } + qdf_spin_lock_bh(&g_umac_glb_obj->global_lock); + /* If there is an invalid entry, return failure */ + if (g_umac_glb_obj->peer_status_handler[id] != handler) { + qdf_spin_unlock_bh(&g_umac_glb_obj->global_lock); + obj_mgr_err("Callback for comp %d is not registered", id); + return QDF_STATUS_E_FAILURE; + } + /* Reset handlers, and args to NULL */ + g_umac_glb_obj->peer_status_handler[id] = NULL; + g_umac_glb_obj->peer_status_handler_arg[id] = NULL; + + qdf_spin_unlock_bh(&g_umac_glb_obj->global_lock); + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_objmgr_psoc_object_attach(struct wlan_objmgr_psoc *psoc) +{ + uint8_t index = 0; + QDF_STATUS status = QDF_STATUS_E_FAILURE; + + qdf_spin_lock_bh(&g_umac_glb_obj->global_lock); + /* Find free slot in PSOC table, store the PSOC */ + while (index < WLAN_OBJMGR_MAX_DEVICES) { + if (g_umac_glb_obj->psoc[index] == NULL) { + /* Found free slot, store psoc */ + g_umac_glb_obj->psoc[index] = psoc; + psoc->soc_objmgr.psoc_id = index; + status = QDF_STATUS_SUCCESS; + break; + } + index++; + } + qdf_spin_unlock_bh(&g_umac_glb_obj->global_lock); + return status; +} + +QDF_STATUS wlan_objmgr_psoc_object_detach(struct wlan_objmgr_psoc *psoc) +{ + uint8_t psoc_id; + + psoc_id = psoc->soc_objmgr.psoc_id; + QDF_BUG(psoc_id < WLAN_OBJMGR_MAX_DEVICES); + if (psoc_id >= WLAN_OBJMGR_MAX_DEVICES) + return QDF_STATUS_E_INVAL; + + qdf_spin_lock_bh(&g_umac_glb_obj->global_lock); + g_umac_glb_obj->psoc[psoc_id] = NULL; + qdf_spin_unlock_bh(&g_umac_glb_obj->global_lock); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_objmgr_global_obj_can_destroyed(void) +{ + uint8_t index = 0; + QDF_STATUS status = QDF_STATUS_SUCCESS; + + qdf_spin_lock_bh(&g_umac_glb_obj->global_lock); + /* Check whether all PSOCs are freed */ + while (index < WLAN_OBJMGR_MAX_DEVICES) { + if (g_umac_glb_obj->psoc[index] != NULL) { + status = QDF_STATUS_E_FAILURE; + break; + } + index++; + } + qdf_spin_unlock_bh(&g_umac_glb_obj->global_lock); + + return status; +} +qdf_export_symbol(wlan_objmgr_global_obj_can_destroyed); + +void wlan_objmgr_print_ref_ids(qdf_atomic_t *id, + QDF_TRACE_LEVEL log_level) +{ + uint32_t i; + uint32_t pending_ref; + + obj_mgr_log_level(log_level, "Pending references of object"); + for (i = 0; i < WLAN_REF_ID_MAX; i++) { + pending_ref = qdf_atomic_read(&id[i]); + if (pending_ref) + obj_mgr_log_level(log_level, "%s -- %d", + string_from_dbgid(i), pending_ref); + } + + return; +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/obj_mgr/src/wlan_objmgr_global_obj_i.h b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/obj_mgr/src/wlan_objmgr_global_obj_i.h new file mode 100644 index 0000000000000000000000000000000000000000..c42ca6ca911907052444b6ac8d0dc6a4bc12bc70 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/obj_mgr/src/wlan_objmgr_global_obj_i.h @@ -0,0 +1,139 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + /** + * DOC: Define the global data structure of UMAC + */ +#ifndef _WLAN_OBJMGR_GLOBAL_OBJ_I_H_ +#define _WLAN_OBJMGR_GLOBAL_OBJ_I_H_ + +#include "wlan_objmgr_cmn.h" + +struct wlan_objmgr_debug_info; +/** + * struct wlan_objmgr_global - Global object definition + * @psoc[]: Array of PSOCs to maintain PSOC's list, + * its optional + * @psoc_create_handler[]: PSOC create handler array + * @psoc_create_handler_arg[]: PSOC create handler args array + * @psoc_destroy_handler[]: PSOC destroy handler array + * @psoc_destroy_handler_arg[]: PSOC destroy handler args array + * @psoc_status_handler[]: PSOC status handler array + * @psoc_status_handler_arg[]: PSOC status handler args array + * @pdev_create_handler[]: PDEV create handler array + * @pdev_create_handler_arg[]: PDEV create handler args array + * @pdev_destroy_handler[]: PDEV destroy handler array + * @pdev_destroy_handler_arg[]: PDEV destroy handler args array + * @pdev_status_handler[]: PDEV status handler array + * @pdev_status_handler_arg[]: PDEV status handler args array + * @vdev_create_handler[]: VDEV create handler array + * @vdev_create_handler_arg[]: VDEV create handler args array + * @vdev_destroy_handler[]: VDEV destroy handler array + * @vdev_destroy_handler_arg[]: VDEV destroy handler args array + * @vdev_status_handler[]: VDEV status handler array + * @vdev_status_handler_arg[]: VDEV status handler args array + * @peer_create_handler[]: PEER create handler array + * @peer_create_handler_arg[]: PEER create handler args array + * @peer_destroy_handler[]: PEER destroy handler array + * @peer_destroy_handler_arg[]: PEER destroy handler args array + * @peer_status_handler[]: PEER status handler array + * @peer_status_handler_arg[]: PEER status handler args array + * @debug_info: Objmgr debug information + * @global_lock: Global lock + */ +struct wlan_objmgr_global { + struct wlan_objmgr_psoc *psoc[WLAN_OBJMGR_MAX_DEVICES]; + wlan_objmgr_psoc_create_handler + psoc_create_handler[WLAN_UMAC_MAX_COMPONENTS]; + void *psoc_create_handler_arg[WLAN_UMAC_MAX_COMPONENTS]; + wlan_objmgr_psoc_destroy_handler + psoc_destroy_handler[WLAN_UMAC_MAX_COMPONENTS]; + void *psoc_destroy_handler_arg[WLAN_UMAC_MAX_COMPONENTS]; + wlan_objmgr_psoc_status_handler + psoc_status_handler[WLAN_UMAC_MAX_COMPONENTS]; + void *psoc_status_handler_arg[WLAN_UMAC_MAX_COMPONENTS]; + wlan_objmgr_pdev_create_handler + pdev_create_handler[WLAN_UMAC_MAX_COMPONENTS]; + void *pdev_create_handler_arg[WLAN_UMAC_MAX_COMPONENTS]; + wlan_objmgr_pdev_destroy_handler + pdev_destroy_handler[WLAN_UMAC_MAX_COMPONENTS]; + void *pdev_destroy_handler_arg[WLAN_UMAC_MAX_COMPONENTS]; + wlan_objmgr_pdev_status_handler + pdev_status_handler[WLAN_UMAC_MAX_COMPONENTS]; + void *pdev_status_handler_arg[WLAN_UMAC_MAX_COMPONENTS]; + wlan_objmgr_vdev_create_handler + vdev_create_handler[WLAN_UMAC_MAX_COMPONENTS]; + void *vdev_create_handler_arg[WLAN_UMAC_MAX_COMPONENTS]; + wlan_objmgr_vdev_destroy_handler + vdev_destroy_handler[WLAN_UMAC_MAX_COMPONENTS]; + void *vdev_destroy_handler_arg[WLAN_UMAC_MAX_COMPONENTS]; + wlan_objmgr_vdev_status_handler + vdev_status_handler[WLAN_UMAC_MAX_COMPONENTS]; + void *vdev_status_handler_arg[WLAN_UMAC_MAX_COMPONENTS]; + wlan_objmgr_peer_create_handler + peer_create_handler[WLAN_UMAC_MAX_COMPONENTS]; + void *peer_create_handler_arg[WLAN_UMAC_MAX_COMPONENTS]; + wlan_objmgr_peer_destroy_handler + peer_destroy_handler[WLAN_UMAC_MAX_COMPONENTS]; + void *peer_destroy_handler_arg[WLAN_UMAC_MAX_COMPONENTS]; + wlan_objmgr_peer_status_handler + peer_status_handler[WLAN_UMAC_MAX_COMPONENTS]; + void *peer_status_handler_arg[WLAN_UMAC_MAX_COMPONENTS]; + struct wlan_objmgr_debug_info *debug_info; + qdf_spinlock_t global_lock; +}; + +#define MAX_SLEEP_ITERATION 5 + +extern struct wlan_objmgr_global *g_umac_glb_obj; + +/** + * wlan_objmgr_psoc_object_attach() - attach psoc to global object + * @psoc - PSOC object + * + * attaches PSOC to global psoc list + * + * Return: SUCCESS + * Failure (Max supported PSOCs exceeded) + */ +QDF_STATUS wlan_objmgr_psoc_object_attach( + struct wlan_objmgr_psoc *psoc); + +/** + * wlan_objmgr_psoc_object_detach() - detach psoc from global object + * @psoc - PSOC object + * + * detaches PSOC from global psoc list + * + * Return: SUCCESS + * Failure (if list is empty and PSOC is not present) + */ +QDF_STATUS wlan_objmgr_psoc_object_detach( + struct wlan_objmgr_psoc *psoc); + +/** + * wlan_objmgr_print_ref_ids() - Print ref counts of modules + * @id - array of ref debug + * @log_level - log level + * + * Itertes through array, and prints the ref count debug + * + * Return: nothing + */ +void wlan_objmgr_print_ref_ids(qdf_atomic_t *id, + QDF_TRACE_LEVEL log_level); +#endif /* _WLAN_OBJMGR_GLOBAL_OBJ_I_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/obj_mgr/src/wlan_objmgr_pdev_obj.c b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/obj_mgr/src/wlan_objmgr_pdev_obj.c new file mode 100644 index 0000000000000000000000000000000000000000..8884b38d54815ad3b04f3de32a191a2ba8da252f --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/obj_mgr/src/wlan_objmgr_pdev_obj.c @@ -0,0 +1,906 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + /** + * DOC: Public APIs to perform operations on Global objects + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include "wlan_objmgr_global_obj_i.h" +#include "wlan_objmgr_psoc_obj_i.h" +#include "wlan_objmgr_pdev_obj_i.h" + + +/** + ** APIs to Create/Delete Global object APIs + */ +static QDF_STATUS wlan_objmgr_pdev_object_status( + struct wlan_objmgr_pdev *pdev) +{ + uint8_t id; + QDF_STATUS status = QDF_STATUS_SUCCESS; + + wlan_pdev_obj_lock(pdev); + /* Iterate through all components to derive the object status */ + for (id = 0; id < WLAN_UMAC_MAX_COMPONENTS; id++) { + /* If component disabled, Ignore */ + if (pdev->obj_status[id] == QDF_STATUS_COMP_DISABLED) { + continue; + /* If component operates in Async, status is Partially created, + break */ + } else if (pdev->obj_status[id] == QDF_STATUS_COMP_ASYNC) { + if (pdev->pdev_comp_priv_obj[id] == NULL) { + status = QDF_STATUS_COMP_ASYNC; + break; + } + /* If component failed to allocate its object, treat it as + failure, complete object need to be cleaned up */ + } else if ((pdev->obj_status[id] == QDF_STATUS_E_NOMEM) || + (pdev->obj_status[id] == QDF_STATUS_E_FAILURE)) { + status = QDF_STATUS_E_FAILURE; + break; + } + } + wlan_pdev_obj_unlock(pdev); + return status; +} + +static QDF_STATUS wlan_objmgr_pdev_obj_free(struct wlan_objmgr_pdev *pdev) +{ + + uint8_t pdev_id; + + if (pdev == NULL) { + obj_mgr_err("pdev obj is NULL"); + QDF_ASSERT(0); + return QDF_STATUS_E_FAILURE; + } + + pdev_id = wlan_objmgr_pdev_get_pdev_id(pdev); + + /* Detach PDEV from PSOC PDEV's list */ + if (wlan_objmgr_psoc_pdev_detach(pdev->pdev_objmgr.wlan_psoc, pdev) == + QDF_STATUS_E_FAILURE) { + obj_mgr_err("PSOC PDEV detach failed: pdev-id: %d", pdev_id); + return QDF_STATUS_E_FAILURE; + } + qdf_spinlock_destroy(&pdev->pdev_lock); + qdf_mem_free(pdev); + + return QDF_STATUS_SUCCESS; +} + +struct wlan_objmgr_pdev *wlan_objmgr_pdev_obj_create( + struct wlan_objmgr_psoc *psoc, + struct pdev_osif_priv *osdev_priv) +{ + struct wlan_objmgr_pdev *pdev; + uint8_t id; + wlan_objmgr_pdev_create_handler handler; + wlan_objmgr_pdev_status_handler s_handler; + void *arg; + QDF_STATUS obj_status; + + if (psoc == NULL) { + obj_mgr_err("psoc is NULL"); + return NULL; + } + /* Allocate PDEV object's memory */ + pdev = qdf_mem_malloc(sizeof(*pdev)); + if (pdev == NULL) { + obj_mgr_err("pdev alloc failed"); + return NULL; + } + pdev->obj_state = WLAN_OBJ_STATE_ALLOCATED; + /* Initialize PDEV spinlock */ + qdf_spinlock_create(&pdev->pdev_lock); + /* Attach PDEV with PSOC */ + if (wlan_objmgr_psoc_pdev_attach(psoc, pdev) + != QDF_STATUS_SUCCESS) { + obj_mgr_err("pdev psoc attach failed"); + qdf_spinlock_destroy(&pdev->pdev_lock); + qdf_mem_free(pdev); + return NULL; + } + /* Save PSOC object pointer in PDEV */ + wlan_pdev_set_psoc(pdev, psoc); + /* Initialize PDEV's VDEV list, assign default values */ + qdf_list_create(&pdev->pdev_objmgr.wlan_vdev_list, + WLAN_UMAC_PDEV_MAX_VDEVS); + pdev->pdev_objmgr.wlan_vdev_count = 0; + pdev->pdev_objmgr.max_vdev_count = WLAN_UMAC_PDEV_MAX_VDEVS; + pdev->pdev_objmgr.wlan_peer_count = 0; + pdev->pdev_objmgr.temp_peer_count = 0; + pdev->pdev_objmgr.max_peer_count = wlan_psoc_get_max_peer_count(psoc); + /* Save HDD/OSIF pointer */ + pdev->pdev_nif.pdev_ospriv = osdev_priv; + qdf_atomic_init(&pdev->pdev_objmgr.ref_cnt); + pdev->pdev_objmgr.print_cnt = 0; + wlan_objmgr_pdev_get_ref(pdev, WLAN_OBJMGR_ID); + /* Invoke registered create handlers */ + for (id = 0; id < WLAN_UMAC_MAX_COMPONENTS; id++) { + handler = g_umac_glb_obj->pdev_create_handler[id]; + arg = g_umac_glb_obj->pdev_create_handler_arg[id]; + if (handler != NULL) + pdev->obj_status[id] = handler(pdev, arg); + else + pdev->obj_status[id] = QDF_STATUS_COMP_DISABLED; + } + /* Derive object status */ + obj_status = wlan_objmgr_pdev_object_status(pdev); + + if (obj_status == QDF_STATUS_SUCCESS) { + /* Object status is SUCCESS, Object is created */ + pdev->obj_state = WLAN_OBJ_STATE_CREATED; + /* Invoke component registered status handlers */ + for (id = 0; id < WLAN_UMAC_MAX_COMPONENTS; id++) { + s_handler = g_umac_glb_obj->pdev_status_handler[id]; + arg = g_umac_glb_obj->pdev_status_handler_arg[id]; + if (s_handler != NULL) { + s_handler(pdev, arg, + QDF_STATUS_SUCCESS); + } + } + /* Few components operates in Asynchrous communction, Object state + partially created */ + } else if (obj_status == QDF_STATUS_COMP_ASYNC) { + pdev->obj_state = WLAN_OBJ_STATE_PARTIALLY_CREATED; + /* Component object failed to be created, clean up the object */ + } else if (obj_status == QDF_STATUS_E_FAILURE) { + /* Clean up the psoc */ + obj_mgr_err("PDEV component objects allocation failed"); + wlan_objmgr_pdev_obj_delete(pdev); + return NULL; + } + + obj_mgr_info("Created pdev %d", pdev->pdev_objmgr.wlan_pdev_id); + + return pdev; +} +qdf_export_symbol(wlan_objmgr_pdev_obj_create); + +static QDF_STATUS wlan_objmgr_pdev_obj_destroy(struct wlan_objmgr_pdev *pdev) +{ + uint8_t id; + wlan_objmgr_pdev_destroy_handler handler; + QDF_STATUS obj_status; + void *arg; + uint8_t pdev_id; + + if (pdev == NULL) { + obj_mgr_err("pdev is NULL"); + return QDF_STATUS_E_FAILURE; + } + wlan_objmgr_notify_destroy(pdev, WLAN_PDEV_OP); + + pdev_id = wlan_objmgr_pdev_get_pdev_id(pdev); + + obj_mgr_info("Physically deleting pdev %d", pdev_id); + + if (pdev->obj_state != WLAN_OBJ_STATE_LOGICALLY_DELETED) { + obj_mgr_err("PDEV object delete is not invoked pdevid:%d objstate:%d", + pdev_id, pdev->obj_state); + WLAN_OBJMGR_BUG(0); + } + + /* Invoke registered destroy handlers */ + for (id = 0; id < WLAN_UMAC_MAX_COMPONENTS; id++) { + handler = g_umac_glb_obj->pdev_destroy_handler[id]; + arg = g_umac_glb_obj->pdev_destroy_handler_arg[id]; + if (handler && + (pdev->obj_status[id] == QDF_STATUS_SUCCESS || + pdev->obj_status[id] == QDF_STATUS_COMP_ASYNC)) + pdev->obj_status[id] = handler(pdev, arg); + else + pdev->obj_status[id] = QDF_STATUS_COMP_DISABLED; + } + /* Derive object status */ + obj_status = wlan_objmgr_pdev_object_status(pdev); + + if (obj_status == QDF_STATUS_E_FAILURE) { + obj_mgr_err("PDEV component objects destroy failed: pdev-id:%d", + pdev_id); + /* Ideally should not happen */ + /* This leads to memleak ??? how to handle */ + QDF_BUG(0); + return QDF_STATUS_E_FAILURE; + } + /* Deletion is in progress */ + if (obj_status == QDF_STATUS_COMP_ASYNC) { + pdev->obj_state = WLAN_OBJ_STATE_PARTIALLY_DELETED; + return QDF_STATUS_COMP_ASYNC; + } + + /* Free PDEV object */ + return wlan_objmgr_pdev_obj_free(pdev); +} + +QDF_STATUS wlan_objmgr_pdev_obj_delete(struct wlan_objmgr_pdev *pdev) +{ + uint8_t print_idx; + + if (pdev == NULL) { + obj_mgr_err("pdev is NULL"); + return QDF_STATUS_E_FAILURE; + } + + obj_mgr_info("Logically deleting pdev %d", + pdev->pdev_objmgr.wlan_pdev_id); + + print_idx = qdf_get_pidx(); + wlan_objmgr_print_ref_ids(pdev->pdev_objmgr.ref_id_dbg, + QDF_TRACE_LEVEL_DEBUG); + /* + * Update PDEV object state to LOGICALLY DELETED + * It prevents further access of this object + */ + wlan_pdev_obj_lock(pdev); + pdev->obj_state = WLAN_OBJ_STATE_LOGICALLY_DELETED; + wlan_pdev_obj_unlock(pdev); + wlan_objmgr_notify_log_delete(pdev, WLAN_PDEV_OP); + wlan_objmgr_pdev_release_ref(pdev, WLAN_OBJMGR_ID); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(wlan_objmgr_pdev_obj_delete); + +/** + ** APIs to attach/detach component objects + */ +QDF_STATUS wlan_objmgr_pdev_component_obj_attach( + struct wlan_objmgr_pdev *pdev, + enum wlan_umac_comp_id id, + void *comp_priv_obj, + QDF_STATUS status) +{ + uint8_t i; + wlan_objmgr_pdev_status_handler s_hlr; + void *a; + QDF_STATUS obj_status; + + /* component id is invalid */ + if (id >= WLAN_UMAC_MAX_COMPONENTS) { + obj_mgr_err("component-id %d is not supported", id); + return QDF_STATUS_MAXCOMP_FAIL; + } + wlan_pdev_obj_lock(pdev); + /* If there is a valid entry, return failure */ + if (pdev->pdev_comp_priv_obj[id] != NULL) { + obj_mgr_err("component-%d already have valid pointer", id); + wlan_pdev_obj_unlock(pdev); + return QDF_STATUS_E_FAILURE; + } + /* Save component's pointer and status */ + pdev->pdev_comp_priv_obj[id] = comp_priv_obj; + pdev->obj_status[id] = status; + + wlan_pdev_obj_unlock(pdev); + + if (pdev->obj_state != WLAN_OBJ_STATE_PARTIALLY_CREATED) + return QDF_STATUS_SUCCESS; + /** + * If PDEV object status is partially created means, this API is + * invoked with differnt context, this block should be executed for + * async components only + */ + /* Derive status */ + obj_status = wlan_objmgr_pdev_object_status(pdev); + /* STATUS_SUCCESS means, object is CREATED */ + if (obj_status == QDF_STATUS_SUCCESS) + pdev->obj_state = WLAN_OBJ_STATE_CREATED; + /* update state as CREATION failed, caller has to delete the + PDEV object */ + else if (obj_status == QDF_STATUS_E_FAILURE) + pdev->obj_state = WLAN_OBJ_STATE_CREATION_FAILED; + /* Notify components about the CREATION success/failure */ + if ((obj_status == QDF_STATUS_SUCCESS) || + (obj_status == QDF_STATUS_E_FAILURE)) { + /* nofity object status */ + for (i = 0; i < WLAN_UMAC_MAX_COMPONENTS; i++) { + s_hlr = g_umac_glb_obj->pdev_status_handler[i]; + a = g_umac_glb_obj->pdev_status_handler_arg[i]; + if (s_hlr != NULL) + s_hlr(pdev, a, obj_status); + } + } + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(wlan_objmgr_pdev_component_obj_attach); + +QDF_STATUS wlan_objmgr_pdev_component_obj_detach( + struct wlan_objmgr_pdev *pdev, + enum wlan_umac_comp_id id, + void *comp_priv_obj) +{ + QDF_STATUS obj_status; + + /* component id is invalid */ + if (id >= WLAN_UMAC_MAX_COMPONENTS) + return QDF_STATUS_MAXCOMP_FAIL; + + wlan_pdev_obj_lock(pdev); + /* If there is a invalid entry, return failure */ + if (pdev->pdev_comp_priv_obj[id] != comp_priv_obj) { + pdev->obj_status[id] = QDF_STATUS_E_FAILURE; + wlan_pdev_obj_unlock(pdev); + return QDF_STATUS_E_FAILURE; + } + /* Reset pointers to NULL, update the status*/ + pdev->pdev_comp_priv_obj[id] = NULL; + pdev->obj_status[id] = QDF_STATUS_SUCCESS; + wlan_pdev_obj_unlock(pdev); + + /* If PDEV object status is partially destroyed means, this API is + invoked with differnt context, this block should be executed for async + components only */ + if ((pdev->obj_state == WLAN_OBJ_STATE_PARTIALLY_DELETED) || + (pdev->obj_state == WLAN_OBJ_STATE_COMP_DEL_PROGRESS)) { + /* Derive object status */ + obj_status = wlan_objmgr_pdev_object_status(pdev); + if (obj_status == QDF_STATUS_SUCCESS) { + /*Update the status as Deleted, if full object + deletion is in progress */ + if (pdev->obj_state == + WLAN_OBJ_STATE_PARTIALLY_DELETED) + pdev->obj_state = WLAN_OBJ_STATE_DELETED; + /* Move to creation state, since this component + deletion alone requested */ + if (pdev->obj_state == + WLAN_OBJ_STATE_COMP_DEL_PROGRESS) + pdev->obj_state = WLAN_OBJ_STATE_CREATED; + /* Object status is failure */ + } else if (obj_status == QDF_STATUS_E_FAILURE) { + /*Update the status as Deletion failed, if full object + deletion is in progress */ + if (pdev->obj_state == + WLAN_OBJ_STATE_PARTIALLY_DELETED) + pdev->obj_state = + WLAN_OBJ_STATE_DELETION_FAILED; + /* Move to creation state, since this component + deletion alone requested (do not block other + components)*/ + if (pdev->obj_state == + WLAN_OBJ_STATE_COMP_DEL_PROGRESS) + pdev->obj_state = WLAN_OBJ_STATE_CREATED; + } + + /* Delete pdev object */ + if ((obj_status == QDF_STATUS_SUCCESS) && + (pdev->obj_state == WLAN_OBJ_STATE_DELETED)) { + /* Free PDEV object */ + return wlan_objmgr_pdev_obj_free(pdev); + } + } + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(wlan_objmgr_pdev_component_obj_detach); + +/** + ** APIs to operations on pdev objects + */ +static void wlan_objmgr_pdev_vdev_iterate_peers(struct wlan_objmgr_pdev *pdev, + struct wlan_objmgr_vdev *vdev, + wlan_objmgr_pdev_op_handler handler, + void *arg, uint8_t lock_free_op, + wlan_objmgr_ref_dbgid dbg_id) +{ + qdf_list_t *peer_list = NULL; + struct wlan_objmgr_peer *peer = NULL; + struct wlan_objmgr_peer *peer_next = NULL; + + /* Iterating through vdev's peer list, so lock is + needed */ + /* Get peer list of the vdev */ + peer_list = &vdev->vdev_objmgr.wlan_peer_list; + if (peer_list != NULL) { + peer = wlan_vdev_peer_list_peek_active_head(vdev, peer_list, + dbg_id); + while (peer != NULL) { + /* Invoke the handler */ + handler(pdev, (void *)peer, arg); + /* Get next peer pointer, increments the ref count */ + peer_next = wlan_peer_get_next_active_peer_of_vdev(vdev, + peer_list, peer, dbg_id); + wlan_objmgr_peer_release_ref(peer, dbg_id); + peer = peer_next; + } + } +} + +QDF_STATUS wlan_objmgr_pdev_iterate_obj_list( + struct wlan_objmgr_pdev *pdev, + enum wlan_objmgr_obj_type obj_type, + wlan_objmgr_pdev_op_handler handler, + void *arg, uint8_t lock_free_op, + wlan_objmgr_ref_dbgid dbg_id) +{ + struct wlan_objmgr_pdev_objmgr *objmgr = &pdev->pdev_objmgr; + qdf_list_t *vdev_list = NULL; + struct wlan_objmgr_vdev *vdev = NULL; + struct wlan_objmgr_vdev *vdev_next = NULL; + + /* VDEV list */ + vdev_list = &objmgr->wlan_vdev_list; + + switch (obj_type) { + case WLAN_VDEV_OP: + /* Iterate through all VDEV object, and invoke handler for each + VDEV object */ + vdev = wlan_pdev_vdev_list_peek_active_head(pdev, vdev_list, + dbg_id); + while (vdev != NULL) { + handler(pdev, (void *)vdev, arg); + /* Get next vdev, it increments ref of next vdev */ + vdev_next = wlan_vdev_get_next_active_vdev_of_pdev( + pdev, vdev_list, vdev, dbg_id); + wlan_objmgr_vdev_release_ref(vdev, dbg_id); + vdev = vdev_next; + } + break; + case WLAN_PEER_OP: + vdev = wlan_pdev_vdev_list_peek_active_head(pdev, vdev_list, + dbg_id); + while (vdev != NULL) { + wlan_objmgr_pdev_vdev_iterate_peers(pdev, vdev, handler, + arg, lock_free_op, dbg_id); + /* Get next vdev, it increments ref of next vdev */ + vdev_next = wlan_vdev_get_next_active_vdev_of_pdev( + pdev, vdev_list, vdev, dbg_id); + wlan_objmgr_vdev_release_ref(vdev, dbg_id); + vdev = vdev_next; + } + break; + default: + break; + } + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(wlan_objmgr_pdev_iterate_obj_list); + +QDF_STATUS wlan_objmgr_trigger_pdev_comp_priv_object_creation( + struct wlan_objmgr_pdev *pdev, + enum wlan_umac_comp_id id) +{ + wlan_objmgr_pdev_create_handler handler; + void *arg; + QDF_STATUS obj_status = QDF_STATUS_SUCCESS; + + /* Component id is invalid */ + if (id >= WLAN_UMAC_MAX_COMPONENTS) + return QDF_STATUS_MAXCOMP_FAIL; + + wlan_pdev_obj_lock(pdev); + /* If component object is already created, delete old + component object, then invoke creation */ + if (pdev->pdev_comp_priv_obj[id] != NULL) { + wlan_pdev_obj_unlock(pdev); + return QDF_STATUS_E_FAILURE; + } + wlan_pdev_obj_unlock(pdev); + + /* Invoke registered create handlers */ + handler = g_umac_glb_obj->pdev_create_handler[id]; + arg = g_umac_glb_obj->pdev_create_handler_arg[id]; + if (handler != NULL) + pdev->obj_status[id] = handler(pdev, arg); + else + return QDF_STATUS_E_FAILURE; + /* If object status is created, then only handle this object status */ + if (pdev->obj_state == WLAN_OBJ_STATE_CREATED) { + /* Derive object status */ + obj_status = wlan_objmgr_pdev_object_status(pdev); + /* Move PDEV object state to Partially created state */ + if (obj_status == QDF_STATUS_COMP_ASYNC) { + /*TODO atomic */ + pdev->obj_state = WLAN_OBJ_STATE_PARTIALLY_CREATED; + } + } + return obj_status; +} + +QDF_STATUS wlan_objmgr_trigger_pdev_comp_priv_object_deletion( + struct wlan_objmgr_pdev *pdev, + enum wlan_umac_comp_id id) +{ + wlan_objmgr_pdev_destroy_handler handler; + void *arg; + QDF_STATUS obj_status = QDF_STATUS_SUCCESS; + + /* component id is invalid */ + if (id >= WLAN_UMAC_MAX_COMPONENTS) + return QDF_STATUS_MAXCOMP_FAIL; + + wlan_pdev_obj_lock(pdev); + /* Component object was never created, invalid operation */ + if (pdev->pdev_comp_priv_obj[id] == NULL) { + wlan_pdev_obj_unlock(pdev); + return QDF_STATUS_E_FAILURE; + } + wlan_pdev_obj_unlock(pdev); + + /* Invoke registered create handlers */ + handler = g_umac_glb_obj->pdev_destroy_handler[id]; + arg = g_umac_glb_obj->pdev_destroy_handler_arg[id]; + if (handler != NULL) + pdev->obj_status[id] = handler(pdev, arg); + else + return QDF_STATUS_E_FAILURE; + + /* If object status is created, then only handle this object status */ + if (pdev->obj_state == WLAN_OBJ_STATE_CREATED) { + obj_status = wlan_objmgr_pdev_object_status(pdev); + /* move object state to DEL progress */ + if (obj_status == QDF_STATUS_COMP_ASYNC) + pdev->obj_state = WLAN_OBJ_STATE_COMP_DEL_PROGRESS; + } + return obj_status; +} + +static void wlan_obj_pdev_vdevlist_add_tail(qdf_list_t *obj_list, + struct wlan_objmgr_vdev *obj) +{ + qdf_list_insert_back(obj_list, &obj->vdev_node); +} + +static QDF_STATUS wlan_obj_pdev_vdevlist_remove_vdev( + qdf_list_t *obj_list, + struct wlan_objmgr_vdev *vdev) +{ + qdf_list_node_t *vdev_node = NULL; + + if (vdev == NULL) + return QDF_STATUS_E_FAILURE; + /* get vdev list node element */ + vdev_node = &vdev->vdev_node; + /* list is empty, return failure */ + if (qdf_list_remove_node(obj_list, vdev_node) != QDF_STATUS_SUCCESS) + return QDF_STATUS_E_FAILURE; + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_objmgr_pdev_vdev_attach(struct wlan_objmgr_pdev *pdev, + struct wlan_objmgr_vdev *vdev) +{ + struct wlan_objmgr_pdev_objmgr *objmgr = &pdev->pdev_objmgr; + + wlan_pdev_obj_lock(pdev); + /* If Max vdev count exceeds, return failure */ + if (objmgr->wlan_vdev_count > objmgr->max_vdev_count) { + wlan_pdev_obj_unlock(pdev); + return QDF_STATUS_E_FAILURE; + } + /* Add vdev to pdev's vdev list */ + wlan_obj_pdev_vdevlist_add_tail(&objmgr->wlan_vdev_list, vdev); + /* Increment pdev ref count to make sure it won't be destroyed before */ + wlan_objmgr_pdev_get_ref(pdev, WLAN_OBJMGR_ID); + /* Increment vdev count of pdev */ + objmgr->wlan_vdev_count++; + wlan_pdev_obj_unlock(pdev); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_objmgr_pdev_vdev_detach(struct wlan_objmgr_pdev *pdev, + struct wlan_objmgr_vdev *vdev) +{ + struct wlan_objmgr_pdev_objmgr *objmgr = &pdev->pdev_objmgr; + + wlan_pdev_obj_lock(pdev); + /* if vdev count is 0, return failure */ + if (objmgr->wlan_vdev_count == 0) { + wlan_pdev_obj_unlock(pdev); + return QDF_STATUS_E_FAILURE; + } + /* remove vdev from pdev's vdev list */ + wlan_obj_pdev_vdevlist_remove_vdev(&objmgr->wlan_vdev_list, vdev); + /* decrement vdev count */ + objmgr->wlan_vdev_count--; + wlan_pdev_obj_unlock(pdev); + /* Decrement pdev ref count since vdev is releasing reference */ + wlan_objmgr_pdev_release_ref(pdev, WLAN_OBJMGR_ID); + return QDF_STATUS_SUCCESS; +} + +struct wlan_objmgr_vdev *wlan_objmgr_get_vdev_by_id_from_pdev( + struct wlan_objmgr_pdev *pdev, uint8_t vdev_id, + wlan_objmgr_ref_dbgid dbg_id) +{ + struct wlan_objmgr_vdev *vdev; + struct wlan_objmgr_vdev *vdev_next; + struct wlan_objmgr_pdev_objmgr *objmgr; + qdf_list_t *vdev_list; + + wlan_pdev_obj_lock(pdev); + + objmgr = &pdev->pdev_objmgr; + vdev_list = &objmgr->wlan_vdev_list; + /* Get first vdev */ + vdev = wlan_pdev_vdev_list_peek_head(vdev_list); + /* Iterate through pdev's vdev list, till vdev id matches with + entry of vdev list */ + while (vdev != NULL) { + if (wlan_vdev_get_id(vdev) == vdev_id) { + if (wlan_objmgr_vdev_try_get_ref(vdev, dbg_id) != + QDF_STATUS_SUCCESS) + vdev = NULL; + + wlan_pdev_obj_unlock(pdev); + return vdev; + } + /* get next vdev */ + vdev_next = wlan_vdev_get_next_vdev_of_pdev(vdev_list, vdev); + vdev = vdev_next; + } + wlan_pdev_obj_unlock(pdev); + return NULL; +} +qdf_export_symbol(wlan_objmgr_get_vdev_by_id_from_pdev); + +struct wlan_objmgr_vdev *wlan_objmgr_get_vdev_by_id_from_pdev_no_state( + struct wlan_objmgr_pdev *pdev, uint8_t vdev_id, + wlan_objmgr_ref_dbgid dbg_id) +{ + struct wlan_objmgr_vdev *vdev; + struct wlan_objmgr_vdev *vdev_next; + struct wlan_objmgr_pdev_objmgr *objmgr; + qdf_list_t *vdev_list; + + wlan_pdev_obj_lock(pdev); + + objmgr = &pdev->pdev_objmgr; + vdev_list = &objmgr->wlan_vdev_list; + /* Get first vdev */ + vdev = wlan_pdev_vdev_list_peek_head(vdev_list); + /** + * Iterate through pdev's vdev list, till vdev id matches with + * entry of vdev list + */ + while (vdev != NULL) { + if (wlan_vdev_get_id(vdev) == vdev_id) { + wlan_objmgr_vdev_get_ref(vdev, dbg_id); + wlan_pdev_obj_unlock(pdev); + + return vdev; + } + /* get next vdev */ + vdev_next = wlan_vdev_get_next_vdev_of_pdev(vdev_list, vdev); + vdev = vdev_next; + } + wlan_pdev_obj_unlock(pdev); + + return NULL; +} +qdf_export_symbol(wlan_objmgr_get_vdev_by_id_from_pdev_no_state); + +struct wlan_objmgr_vdev *wlan_objmgr_get_vdev_by_macaddr_from_pdev( + struct wlan_objmgr_pdev *pdev, uint8_t *macaddr, + wlan_objmgr_ref_dbgid dbg_id) +{ + struct wlan_objmgr_vdev *vdev; + struct wlan_objmgr_vdev *vdev_next; + struct wlan_objmgr_pdev_objmgr *objmgr; + qdf_list_t *vdev_list; + + wlan_pdev_obj_lock(pdev); + objmgr = &pdev->pdev_objmgr; + vdev_list = &objmgr->wlan_vdev_list; + /* Get first vdev */ + vdev = wlan_pdev_vdev_list_peek_head(vdev_list); + /* Iterate through pdev's vdev list, till vdev macaddr matches with + entry of vdev list */ + while (vdev != NULL) { + if (WLAN_ADDR_EQ(wlan_vdev_mlme_get_macaddr(vdev), macaddr) + == QDF_STATUS_SUCCESS) { + if (wlan_objmgr_vdev_try_get_ref(vdev, dbg_id) != + QDF_STATUS_SUCCESS) + vdev = NULL; + + wlan_pdev_obj_unlock(pdev); + return vdev; + } + /* get next vdev */ + vdev_next = wlan_vdev_get_next_vdev_of_pdev(vdev_list, vdev); + vdev = vdev_next; + } + wlan_pdev_obj_unlock(pdev); + + return NULL; +} + +struct wlan_objmgr_vdev *wlan_objmgr_get_vdev_by_macaddr_from_pdev_no_state( + struct wlan_objmgr_pdev *pdev, uint8_t *macaddr, + wlan_objmgr_ref_dbgid dbg_id) +{ + struct wlan_objmgr_vdev *vdev; + struct wlan_objmgr_vdev *vdev_next; + struct wlan_objmgr_pdev_objmgr *objmgr; + qdf_list_t *vdev_list; + + wlan_pdev_obj_lock(pdev); + objmgr = &pdev->pdev_objmgr; + vdev_list = &objmgr->wlan_vdev_list; + /* Get first vdev */ + vdev = wlan_pdev_vdev_list_peek_head(vdev_list); + /* Iterate through pdev's vdev list, till vdev macaddr matches with + entry of vdev list */ + while (vdev != NULL) { + if (WLAN_ADDR_EQ(wlan_vdev_mlme_get_macaddr(vdev), macaddr) + == QDF_STATUS_SUCCESS) { + wlan_objmgr_vdev_get_ref(vdev, dbg_id); + wlan_pdev_obj_unlock(pdev); + + return vdev; + } + /* get next vdev */ + vdev_next = wlan_vdev_get_next_vdev_of_pdev(vdev_list, vdev); + vdev = vdev_next; + } + wlan_pdev_obj_unlock(pdev); + + return NULL; +} + +void *wlan_objmgr_pdev_get_comp_private_obj( + struct wlan_objmgr_pdev *pdev, + enum wlan_umac_comp_id id) +{ + void *comp_priv_obj; + + /* component id is invalid */ + if (id >= WLAN_UMAC_MAX_COMPONENTS) { + QDF_BUG(0); + return NULL; + } + + if (pdev == NULL) { + QDF_BUG(0); + return NULL; + } + + comp_priv_obj = pdev->pdev_comp_priv_obj[id]; + + return comp_priv_obj; +} +qdf_export_symbol(wlan_objmgr_pdev_get_comp_private_obj); + +void wlan_objmgr_pdev_get_ref(struct wlan_objmgr_pdev *pdev, + wlan_objmgr_ref_dbgid id) +{ + if (pdev == NULL) { + obj_mgr_err("pdev obj is NULL"); + QDF_ASSERT(0); + return; + } + qdf_atomic_inc(&pdev->pdev_objmgr.ref_cnt); + qdf_atomic_inc(&pdev->pdev_objmgr.ref_id_dbg[id]); + + return; +} +qdf_export_symbol(wlan_objmgr_pdev_get_ref); + +QDF_STATUS wlan_objmgr_pdev_try_get_ref(struct wlan_objmgr_pdev *pdev, + wlan_objmgr_ref_dbgid id) +{ + uint8_t pdev_id; + + if (pdev == NULL) { + obj_mgr_err("pdev obj is NULL"); + QDF_ASSERT(0); + return QDF_STATUS_E_FAILURE; + } + + wlan_pdev_obj_lock(pdev); + pdev_id = wlan_objmgr_pdev_get_pdev_id(pdev); + if (pdev->obj_state != WLAN_OBJ_STATE_CREATED) { + wlan_pdev_obj_unlock(pdev); + if (pdev->pdev_objmgr.print_cnt++ <= + WLAN_OBJMGR_RATELIMIT_THRESH) + obj_mgr_err( + "[Ref id: %d] pdev [%d] is not in Created(st:%d)", + id, pdev_id, pdev->obj_state); + return QDF_STATUS_E_RESOURCES; + } + + wlan_objmgr_pdev_get_ref(pdev, id); + wlan_pdev_obj_unlock(pdev); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(wlan_objmgr_pdev_try_get_ref); + +void wlan_objmgr_pdev_release_ref(struct wlan_objmgr_pdev *pdev, + wlan_objmgr_ref_dbgid id) +{ + uint8_t pdev_id; + + if (pdev == NULL) { + obj_mgr_err("pdev obj is NULL"); + QDF_ASSERT(0); + return; + } + + pdev_id = wlan_objmgr_pdev_get_pdev_id(pdev); + + if (!qdf_atomic_read(&pdev->pdev_objmgr.ref_id_dbg[id])) { + obj_mgr_err("pdev (id:%d)ref cnt was not taken by %d", + pdev_id, id); + wlan_objmgr_print_ref_ids(pdev->pdev_objmgr.ref_id_dbg, + QDF_TRACE_LEVEL_FATAL); + WLAN_OBJMGR_BUG(0); + } + + if (!qdf_atomic_read(&pdev->pdev_objmgr.ref_cnt)) { + obj_mgr_err("pdev ref cnt is 0: pdev-id:%d", pdev_id); + WLAN_OBJMGR_BUG(0); + return; + } + + qdf_atomic_dec(&pdev->pdev_objmgr.ref_id_dbg[id]); + /* Decrement ref count, free pdev, if ref count == 0 */ + if (qdf_atomic_dec_and_test(&pdev->pdev_objmgr.ref_cnt)) + wlan_objmgr_pdev_obj_destroy(pdev); + + return; +} +qdf_export_symbol(wlan_objmgr_pdev_release_ref); + +struct wlan_objmgr_vdev *wlan_objmgr_pdev_get_first_vdev( + struct wlan_objmgr_pdev *pdev, + wlan_objmgr_ref_dbgid dbg_id) +{ + struct wlan_objmgr_pdev_objmgr *objmgr = &pdev->pdev_objmgr; + qdf_list_t *vdev_list = NULL; + struct wlan_objmgr_vdev *vdev; + qdf_list_node_t *node = NULL; + qdf_list_node_t *prev_node = NULL; + + wlan_pdev_obj_lock(pdev); + + /* VDEV list */ + vdev_list = &objmgr->wlan_vdev_list; + if (qdf_list_peek_front(vdev_list, &node) != QDF_STATUS_SUCCESS) { + wlan_pdev_obj_unlock(pdev); + return NULL; + } + + do { + vdev = qdf_container_of(node, struct wlan_objmgr_vdev, + vdev_node); + if (wlan_objmgr_vdev_try_get_ref(vdev, dbg_id) == + QDF_STATUS_SUCCESS) { + wlan_pdev_obj_unlock(pdev); + return vdev; + } + + prev_node = node; + } while (qdf_list_peek_next(vdev_list, prev_node, &node) == + QDF_STATUS_SUCCESS); + + wlan_pdev_obj_unlock(pdev); + + return NULL; +} + +qdf_export_symbol(wlan_objmgr_pdev_get_first_vdev); diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/obj_mgr/src/wlan_objmgr_pdev_obj_i.h b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/obj_mgr/src/wlan_objmgr_pdev_obj_i.h new file mode 100644 index 0000000000000000000000000000000000000000..8c0bc8e09688327cb3e290b09df2094ef553df03 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/obj_mgr/src/wlan_objmgr_pdev_obj_i.h @@ -0,0 +1,50 @@ +/* + * Copyright (c) 2016 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + /** + * DOC: Public APIs to perform operations on PDEV object + */ +#ifndef _WLAN_OBJMGR_PDEV_OBJ_I_H_ +#define _WLAN_OBJMGR_PDEV_OBJ_I_H_ + +/** + * wlan_objmgr_pdev_vdev_attach() - attach vdev to pdev + * @pdev: PDEV object + * @vdev: VDEV object + * + * API to be used for adding the VDEV object in PDEV's VDEV object list + * + * Return: SUCCESS on successful storing of VDEV object + * FAILURE + */ +QDF_STATUS wlan_objmgr_pdev_vdev_attach(struct wlan_objmgr_pdev *pdev, + struct wlan_objmgr_vdev *vdev); + +/** + * wlan_objmgr_pdev_vdev_detach() - detach vdev from pdev + * @pdev: PDEV object + * @vdev: VDEV object + * + * API to be used for removing the VDEV object from PDEV's VDEV object list + * + * Return: SUCCESS on successful removal of VDEV object + * FAILURE + */ +QDF_STATUS wlan_objmgr_pdev_vdev_detach(struct wlan_objmgr_pdev *pdev, + struct wlan_objmgr_vdev *vdev); + +#endif /* _WLAN_OBJMGR_PDEV_OBJ_I_H_*/ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/obj_mgr/src/wlan_objmgr_peer_obj.c b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/obj_mgr/src/wlan_objmgr_peer_obj.c new file mode 100644 index 0000000000000000000000000000000000000000..ab8a2ad058127dc3684a712c7730ff561f0f63cf --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/obj_mgr/src/wlan_objmgr_peer_obj.c @@ -0,0 +1,864 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + /** + * DOC: Public APIs to perform operations on Peer object + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include "wlan_objmgr_global_obj_i.h" +#include "wlan_objmgr_psoc_obj_i.h" +#include "wlan_objmgr_pdev_obj_i.h" +#include "wlan_objmgr_vdev_obj_i.h" + + +/** + ** APIs to Create/Delete Peer object APIs + */ +static QDF_STATUS wlan_objmgr_peer_object_status( + struct wlan_objmgr_peer *peer) +{ + uint8_t id; + QDF_STATUS status = QDF_STATUS_SUCCESS; + + wlan_peer_obj_lock(peer); + /* Iterate through all components to derive the object status */ + for (id = 0; id < WLAN_UMAC_MAX_COMPONENTS; id++) { + /* If component disabled, Ignore */ + if (peer->obj_status[id] == QDF_STATUS_COMP_DISABLED) + continue; + /* If component operates in Async, status is Partially created, + break */ + else if (peer->obj_status[id] == QDF_STATUS_COMP_ASYNC) { + if (peer->peer_comp_priv_obj[id] == NULL) { + status = QDF_STATUS_COMP_ASYNC; + break; + } + /* If component failed to allocate its object, treat it as + failure, complete object need to be cleaned up */ + } else if ((peer->obj_status[id] == QDF_STATUS_E_NOMEM) || + (peer->obj_status[id] == QDF_STATUS_E_FAILURE)) { + obj_mgr_err("Peer comp object(id:%d) alloc fail", id); + status = QDF_STATUS_E_FAILURE; + break; + } + } + wlan_peer_obj_unlock(peer); + return status; +} + +static QDF_STATUS wlan_objmgr_peer_obj_free(struct wlan_objmgr_peer *peer) +{ + struct wlan_objmgr_psoc *psoc; + struct wlan_objmgr_vdev *vdev; + uint8_t *macaddr; + uint8_t vdev_id; + + if (peer == NULL) { + obj_mgr_err("PEER is NULL"); + return QDF_STATUS_E_FAILURE; + } + + macaddr = wlan_peer_get_macaddr(peer); + + vdev = wlan_peer_get_vdev(peer); + if (vdev == NULL) { + obj_mgr_err( + "VDEV is NULL for peer(%02x:%02x:%02x:%02x:%02x:%02x)", + macaddr[0], macaddr[1], macaddr[2], + macaddr[3], macaddr[4], macaddr[5]); + return QDF_STATUS_E_FAILURE; + } + + vdev_id = wlan_vdev_get_id(vdev); + + /* get PSOC from VDEV, if it is NULL, return */ + psoc = wlan_vdev_get_psoc(vdev); + if (psoc == NULL) { + obj_mgr_err( + "PSOC is NULL for peer(%02x:%02x:%02x:%02x:%02x:%02x)", + macaddr[0], macaddr[1], macaddr[2], + macaddr[3], macaddr[4], macaddr[5]); + return QDF_STATUS_E_FAILURE; + } + + /* Decrement ref count for BSS peer, so that BSS peer deletes last*/ + if ((wlan_peer_get_peer_type(peer) == WLAN_PEER_STA) || + (wlan_peer_get_peer_type(peer) == WLAN_PEER_STA_TEMP) || + (wlan_peer_get_peer_type(peer) == WLAN_PEER_P2P_CLI)) + wlan_objmgr_peer_release_ref(wlan_vdev_get_bsspeer(vdev), + WLAN_OBJMGR_ID); + + /* Detach peer from VDEV's peer list */ + if (wlan_objmgr_vdev_peer_detach(vdev, peer) == QDF_STATUS_E_FAILURE) { + obj_mgr_err( + "Peer(%02x:%02x:%02x:%02x:%02x:%02x) VDEV detach fail, vdev id: %d", + macaddr[0], macaddr[1], macaddr[2], + macaddr[3], macaddr[4], macaddr[5], vdev_id); + return QDF_STATUS_E_FAILURE; + } + /* Detach peer from PSOC's peer list */ + if (wlan_objmgr_psoc_peer_detach(psoc, peer) == QDF_STATUS_E_FAILURE) { + obj_mgr_err( + "Peer(%02x:%02x:%02x:%02x:%02x:%02x) PSOC detach failure", + macaddr[0], macaddr[1], macaddr[2], + macaddr[3], macaddr[4], macaddr[5]); + return QDF_STATUS_E_FAILURE; + } + qdf_spinlock_destroy(&peer->peer_lock); + qdf_mem_free(peer); + + return QDF_STATUS_SUCCESS; + +} + +struct wlan_objmgr_peer *wlan_objmgr_peer_obj_create( + struct wlan_objmgr_vdev *vdev, + enum wlan_peer_type type, + uint8_t *macaddr) +{ + struct wlan_objmgr_peer *peer; + struct wlan_objmgr_psoc *psoc; + wlan_objmgr_peer_create_handler handler; + wlan_objmgr_peer_status_handler stat_handler; + void *arg; + QDF_STATUS obj_status; + uint8_t id; + + if (vdev == NULL) { + obj_mgr_err( + "VDEV is NULL for peer (%02x:%02x:%02x:%02x:%02x:%02x)", + macaddr[0], macaddr[1], macaddr[2], + macaddr[3], macaddr[4], macaddr[5]); + return NULL; + } + /* Get psoc, if psoc is NULL, return */ + psoc = wlan_vdev_get_psoc(vdev); + if (psoc == NULL) { + obj_mgr_err( + "PSOC is NULL for peer (%02x:%02x:%02x:%02x:%02x:%02x)", + macaddr[0], macaddr[1], macaddr[2], + macaddr[3], macaddr[4], macaddr[5]); + return NULL; + } + /* Allocate memory for peer object */ + peer = qdf_mem_malloc(sizeof(*peer)); + if (peer == NULL) { + obj_mgr_err( + "Peer(%02x:%02x:%02x:%02x:%02x:%02x) allocation failure", + macaddr[0], macaddr[1], macaddr[2], + macaddr[3], macaddr[4], macaddr[5]); + return NULL; + } + peer->obj_state = WLAN_OBJ_STATE_ALLOCATED; + qdf_atomic_init(&peer->peer_objmgr.ref_cnt); + for (id = 0; id < WLAN_REF_ID_MAX; id++) + qdf_atomic_init(&peer->peer_objmgr.ref_id_dbg[id]); + wlan_objmgr_peer_get_ref(peer, WLAN_OBJMGR_ID); + /* set vdev to peer */ + wlan_peer_set_vdev(peer, vdev); + /* set peer type */ + wlan_peer_set_peer_type(peer, type); + /* set mac address of peer */ + wlan_peer_set_macaddr(peer, macaddr); + /* initialize peer state */ + wlan_peer_mlme_set_state(peer, WLAN_INIT_STATE); + wlan_peer_mlme_reset_seq_num(peer); + peer->peer_objmgr.print_cnt = 0; + + qdf_spinlock_create(&peer->peer_lock); + /* Attach peer to psoc, psoc maintains the node table for the device */ + if (wlan_objmgr_psoc_peer_attach(psoc, peer) != + QDF_STATUS_SUCCESS) { + obj_mgr_warn( + "Peer(%02x:%02x:%02x:%02x:%02x:%02x) PSOC attach failure", + macaddr[0], macaddr[1], macaddr[2], + macaddr[3], macaddr[4], macaddr[5]); + qdf_spinlock_destroy(&peer->peer_lock); + qdf_mem_free(peer); + return NULL; + } + /* Attach peer to vdev peer table */ + if (wlan_objmgr_vdev_peer_attach(vdev, peer) != + QDF_STATUS_SUCCESS) { + obj_mgr_warn( + "Peer(%02x:%02x:%02x:%02x:%02x:%02x) VDEV attach failure", + macaddr[0], macaddr[1], macaddr[2], + macaddr[3], macaddr[4], macaddr[5]); + /* if attach fails, detach from psoc table before free */ + wlan_objmgr_psoc_peer_detach(psoc, peer); + qdf_spinlock_destroy(&peer->peer_lock); + qdf_mem_free(peer); + return NULL; + } + wlan_peer_set_pdev_id(peer, wlan_objmgr_pdev_get_pdev_id( + wlan_vdev_get_pdev(vdev))); + /* Increment ref count for BSS peer, so that BSS peer deletes last*/ + if ((type == WLAN_PEER_STA) || (type == WLAN_PEER_STA_TEMP) + || (type == WLAN_PEER_P2P_CLI)) + wlan_objmgr_peer_get_ref(wlan_vdev_get_bsspeer(vdev), + WLAN_OBJMGR_ID); + /* TODO init other parameters */ + /* Invoke registered create handlers */ + for (id = 0; id < WLAN_UMAC_MAX_COMPONENTS; id++) { + handler = g_umac_glb_obj->peer_create_handler[id]; + arg = g_umac_glb_obj->peer_create_handler_arg[id]; + if (handler != NULL) + peer->obj_status[id] = handler(peer, arg); + else + peer->obj_status[id] = QDF_STATUS_COMP_DISABLED; + } + /* derive the object status */ + obj_status = wlan_objmgr_peer_object_status(peer); + /* If SUCCESS, Object is created */ + if (obj_status == QDF_STATUS_SUCCESS) { + peer->obj_state = WLAN_OBJ_STATE_CREATED; + for (id = 0; id < WLAN_UMAC_MAX_COMPONENTS; id++) { + stat_handler = g_umac_glb_obj->peer_status_handler[id]; + arg = g_umac_glb_obj->peer_status_handler_arg[id]; + if (stat_handler != NULL) + stat_handler(peer, arg, + QDF_STATUS_SUCCESS); + } + } else if (obj_status == QDF_STATUS_COMP_ASYNC) { + /* If any component operates in different context, update it + as partially created */ + peer->obj_state = WLAN_OBJ_STATE_PARTIALLY_CREATED; + } else if (obj_status == QDF_STATUS_E_FAILURE) { + /* Clean up the peer */ + obj_mgr_err( + "Peer(%02x:%02x:%02x:%02x:%02x:%02x) comp object alloc fail", + macaddr[0], macaddr[1], macaddr[2], + macaddr[3], macaddr[4], macaddr[5]); + wlan_objmgr_peer_obj_delete(peer); + return NULL; + } + + obj_mgr_debug("Created peer " QDF_MAC_ADDR_STR, + QDF_MAC_ADDR_ARRAY(macaddr)); + + return peer; +} + +static QDF_STATUS wlan_objmgr_peer_obj_destroy(struct wlan_objmgr_peer *peer) +{ + uint8_t id; + wlan_objmgr_peer_destroy_handler handler; + QDF_STATUS obj_status; + void *arg; + uint8_t *macaddr; + + if (peer == NULL) { + obj_mgr_err("PEER is NULL"); + return QDF_STATUS_E_FAILURE; + } + wlan_objmgr_notify_destroy(peer, WLAN_PEER_OP); + + macaddr = wlan_peer_get_macaddr(peer); + + obj_mgr_debug("Physically deleting peer " QDF_MAC_ADDR_STR, + QDF_MAC_ADDR_ARRAY(macaddr)); + + if (peer->obj_state != WLAN_OBJ_STATE_LOGICALLY_DELETED) { + obj_mgr_err("PEER object del is not invoked obj_state:%d peer " + QDF_MAC_ADDR_STR, peer->obj_state, + QDF_MAC_ADDR_ARRAY(macaddr)); + WLAN_OBJMGR_BUG(0); + } + + /* Invoke registered destroy handlers */ + for (id = 0; id < WLAN_UMAC_MAX_COMPONENTS; id++) { + handler = g_umac_glb_obj->peer_destroy_handler[id]; + arg = g_umac_glb_obj->peer_destroy_handler_arg[id]; + if (handler && + (peer->obj_status[id] == QDF_STATUS_SUCCESS || + peer->obj_status[id] == QDF_STATUS_COMP_ASYNC)) + peer->obj_status[id] = handler(peer, arg); + else + peer->obj_status[id] = QDF_STATUS_COMP_DISABLED; + } + /* Derive the object status */ + obj_status = wlan_objmgr_peer_object_status(peer); + if (obj_status == QDF_STATUS_E_FAILURE) { + /* If it status is failure, memory will not be freed */ + QDF_BUG(0); + return QDF_STATUS_E_FAILURE; + } + /* few components deletion is in progress */ + if (obj_status == QDF_STATUS_COMP_ASYNC) { + peer->obj_state = WLAN_OBJ_STATE_PARTIALLY_DELETED; + return QDF_STATUS_COMP_ASYNC; + } + + /* Free the peer object */ + return wlan_objmgr_peer_obj_free(peer); +} + +QDF_STATUS wlan_objmgr_peer_obj_delete(struct wlan_objmgr_peer *peer) +{ + uint8_t print_idx; + uint8_t *macaddr; + + if (peer == NULL) { + obj_mgr_err("PEER is NULL"); + return QDF_STATUS_E_FAILURE; + } + + wlan_peer_obj_lock(peer); + macaddr = wlan_peer_get_macaddr(peer); + wlan_peer_obj_unlock(peer); + + obj_mgr_debug("Logically deleting peer " QDF_MAC_ADDR_STR, + QDF_MAC_ADDR_ARRAY(macaddr)); + + print_idx = qdf_get_pidx(); + wlan_objmgr_print_ref_ids(peer->peer_objmgr.ref_id_dbg, + QDF_TRACE_LEVEL_DEBUG); + /** + * Update VDEV object state to LOGICALLY DELETED + * It prevents further access of this object + */ + wlan_peer_obj_lock(peer); + peer->obj_state = WLAN_OBJ_STATE_LOGICALLY_DELETED; + wlan_peer_obj_unlock(peer); + wlan_objmgr_notify_log_delete(peer, WLAN_PEER_OP); + wlan_objmgr_peer_release_ref(peer, WLAN_OBJMGR_ID); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(wlan_objmgr_peer_obj_delete); +/** + ** APIs to attach/detach component objects + */ +QDF_STATUS wlan_objmgr_peer_component_obj_attach( + struct wlan_objmgr_peer *peer, + enum wlan_umac_comp_id id, + void *comp_priv_obj, + QDF_STATUS status) +{ + wlan_objmgr_peer_status_handler s_hler; + void *arg; + uint8_t i; + QDF_STATUS obj_status; + + /* component id is invalid */ + if (id >= WLAN_UMAC_MAX_COMPONENTS) + return QDF_STATUS_MAXCOMP_FAIL; + + wlan_peer_obj_lock(peer); + /* If there is a valid entry, return failure, + valid object needs to be freed first */ + if (peer->peer_comp_priv_obj[id] != NULL) { + wlan_peer_obj_unlock(peer); + return QDF_STATUS_E_FAILURE; + } + /* Assign component object private pointer(can be NULL also), status */ + peer->peer_comp_priv_obj[id] = comp_priv_obj; + peer->obj_status[id] = status; + wlan_peer_obj_unlock(peer); + + if (peer->obj_state != WLAN_OBJ_STATE_PARTIALLY_CREATED) + return QDF_STATUS_SUCCESS; + + /* If PEER object status is partially created means, this API is + invoked with differnt context. this block should be executed for async + components only */ + /* Derive status */ + obj_status = wlan_objmgr_peer_object_status(peer); + /* STATUS_SUCCESS means, object is CREATED */ + if (obj_status == QDF_STATUS_SUCCESS) + peer->obj_state = WLAN_OBJ_STATE_CREATED; + /* update state as CREATION failed, caller has to delete the + PEER object */ + else if (obj_status == QDF_STATUS_E_FAILURE) + peer->obj_state = WLAN_OBJ_STATE_CREATION_FAILED; + /* Notify components about the CREATION success/failure */ + if ((obj_status == QDF_STATUS_SUCCESS) || + (obj_status == QDF_STATUS_E_FAILURE)) { + /* nofity object status */ + for (i = 0; i < WLAN_UMAC_MAX_COMPONENTS; i++) { + s_hler = g_umac_glb_obj->peer_status_handler[i]; + arg = g_umac_glb_obj->peer_status_handler_arg[i]; + if (s_hler != NULL) + s_hler(peer, arg, obj_status); + } + } + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_objmgr_peer_component_obj_detach( + struct wlan_objmgr_peer *peer, + enum wlan_umac_comp_id id, + void *comp_priv_obj) +{ + QDF_STATUS obj_status; + + /* component id is invalid */ + if (id >= WLAN_UMAC_MAX_COMPONENTS) + return QDF_STATUS_MAXCOMP_FAIL; + + wlan_peer_obj_lock(peer); + /* If there is a invalid entry, return failure */ + if (peer->peer_comp_priv_obj[id] != comp_priv_obj) { + peer->obj_status[id] = QDF_STATUS_E_FAILURE; + wlan_peer_obj_unlock(peer); + return QDF_STATUS_E_FAILURE; + } + /* Reset the pointer to NULL */ + peer->peer_comp_priv_obj[id] = NULL; + peer->obj_status[id] = QDF_STATUS_SUCCESS; + wlan_peer_obj_unlock(peer); + + /* If PEER object status is partially destroyed means, this API is + invoked with differnt context, this block should be executed for async + components only */ + if ((peer->obj_state == WLAN_OBJ_STATE_PARTIALLY_DELETED) || + (peer->obj_state == WLAN_OBJ_STATE_COMP_DEL_PROGRESS)) { + /* Derive object status */ + obj_status = wlan_objmgr_peer_object_status(peer); + if (obj_status == QDF_STATUS_SUCCESS) { + /*Update the status as Deleted, if full object + deletion is in progress */ + if (peer->obj_state == WLAN_OBJ_STATE_PARTIALLY_DELETED) + peer->obj_state = WLAN_OBJ_STATE_DELETED; + /* Move to creation state, since this component + deletion alone requested */ + if (peer->obj_state == WLAN_OBJ_STATE_COMP_DEL_PROGRESS) + peer->obj_state = WLAN_OBJ_STATE_CREATED; + /* Object status is failure */ + } else if (obj_status == QDF_STATUS_E_FAILURE) { + /*Update the status as Deletion failed, if full object + deletion is in progress */ + if (peer->obj_state == WLAN_OBJ_STATE_PARTIALLY_DELETED) + peer->obj_state = + WLAN_OBJ_STATE_DELETION_FAILED; + /* Move to creation state, since this component + deletion alone requested (do not block other + components) */ + if (peer->obj_state == WLAN_OBJ_STATE_COMP_DEL_PROGRESS) + peer->obj_state = WLAN_OBJ_STATE_CREATED; + } + + /* Delete peer object */ + if ((obj_status == QDF_STATUS_SUCCESS) && + (peer->obj_state == WLAN_OBJ_STATE_DELETED)) { + /* Free the peer object */ + return wlan_objmgr_peer_obj_free(peer); + } + } + + return QDF_STATUS_SUCCESS; +} + + +QDF_STATUS wlan_objmgr_trigger_peer_comp_priv_object_creation( + struct wlan_objmgr_peer *peer, + enum wlan_umac_comp_id id) +{ + wlan_objmgr_peer_create_handler handler; + void *arg; + QDF_STATUS obj_status = QDF_STATUS_SUCCESS; + + /* Component id is invalid */ + if (id >= WLAN_UMAC_MAX_COMPONENTS) + return QDF_STATUS_MAXCOMP_FAIL; + + wlan_peer_obj_lock(peer); + /* If component object is already created, delete old + component object, then invoke creation */ + if (peer->peer_comp_priv_obj[id] != NULL) { + wlan_peer_obj_unlock(peer); + return QDF_STATUS_E_FAILURE; + } + wlan_peer_obj_unlock(peer); + + /* Invoke registered create handlers */ + handler = g_umac_glb_obj->peer_create_handler[id]; + arg = g_umac_glb_obj->peer_create_handler_arg[id]; + if (handler != NULL) + peer->obj_status[id] = handler(peer, arg); + else + return QDF_STATUS_E_FAILURE; + + /* If object status is created, then only handle this object status */ + if (peer->obj_state == WLAN_OBJ_STATE_CREATED) { + /* Derive object status */ + obj_status = wlan_objmgr_peer_object_status(peer); + /* Move PDEV object state to Partially created state */ + if (obj_status == QDF_STATUS_COMP_ASYNC) { + /*TODO atomic */ + peer->obj_state = WLAN_OBJ_STATE_PARTIALLY_CREATED; + } + } + + return obj_status; +} + + +QDF_STATUS wlan_objmgr_trigger_peer_comp_priv_object_deletion( + struct wlan_objmgr_peer *peer, + enum wlan_umac_comp_id id) +{ + wlan_objmgr_peer_destroy_handler handler; + void *arg; + QDF_STATUS obj_status = QDF_STATUS_SUCCESS; + + /* component id is invalid */ + if (id >= WLAN_UMAC_MAX_COMPONENTS) + return QDF_STATUS_MAXCOMP_FAIL; + + wlan_peer_obj_lock(peer); + /* Component object was never created, invalid operation */ + if (peer->peer_comp_priv_obj[id] == NULL) { + wlan_peer_obj_unlock(peer); + return QDF_STATUS_E_FAILURE; + } + + wlan_peer_obj_unlock(peer); + + /* Invoke registered destroy handlers */ + handler = g_umac_glb_obj->peer_destroy_handler[id]; + arg = g_umac_glb_obj->peer_destroy_handler_arg[id]; + if (handler != NULL) + peer->obj_status[id] = handler(peer, arg); + else + return QDF_STATUS_E_FAILURE; + + /* If object status is created, then only handle this object status */ + if (peer->obj_state == WLAN_OBJ_STATE_CREATED) { + obj_status = wlan_objmgr_peer_object_status(peer); + /* move object state to DEL progress */ + if (obj_status == QDF_STATUS_COMP_ASYNC) + peer->obj_state = WLAN_OBJ_STATE_COMP_DEL_PROGRESS; + } + return obj_status; +} + +void *wlan_objmgr_peer_get_comp_private_obj( + struct wlan_objmgr_peer *peer, + enum wlan_umac_comp_id id) +{ + void *comp_priv_obj; + + /* component id is invalid */ + if (id >= WLAN_UMAC_MAX_COMPONENTS) { + QDF_BUG(0); + return NULL; + } + + if (peer == NULL) { + QDF_BUG(0); + return NULL; + } + + comp_priv_obj = peer->peer_comp_priv_obj[id]; + return comp_priv_obj; +} +qdf_export_symbol(wlan_objmgr_peer_get_comp_private_obj); + +void wlan_objmgr_peer_get_ref(struct wlan_objmgr_peer *peer, + wlan_objmgr_ref_dbgid id) +{ + if (peer == NULL) { + obj_mgr_err("peer obj is NULL for %d", id); + QDF_ASSERT(0); + return; + } + /* Increment ref count */ + qdf_atomic_inc(&peer->peer_objmgr.ref_cnt); + qdf_atomic_inc(&peer->peer_objmgr.ref_id_dbg[id]); + + return; +} +qdf_export_symbol(wlan_objmgr_peer_get_ref); + +QDF_STATUS wlan_objmgr_peer_try_get_ref(struct wlan_objmgr_peer *peer, + wlan_objmgr_ref_dbgid id) +{ + + uint8_t *macaddr; + + if (peer == NULL) { + obj_mgr_err("peer obj is NULL for %d", id); + QDF_ASSERT(0); + return QDF_STATUS_E_FAILURE; + } + + wlan_peer_obj_lock(peer); + macaddr = wlan_peer_get_macaddr(peer); + if (peer->obj_state != WLAN_OBJ_STATE_CREATED) { + wlan_peer_obj_unlock(peer); + if (peer->peer_objmgr.print_cnt++ <= + WLAN_OBJMGR_RATELIMIT_THRESH) + obj_mgr_warn( + "peer(" QDF_MAC_ADDR_STR ") not in Created st(%d)", + QDF_MAC_ADDR_ARRAY(macaddr), + peer->obj_state); + return QDF_STATUS_E_RESOURCES; + } + + wlan_objmgr_peer_get_ref(peer, id); + wlan_peer_obj_unlock(peer); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(wlan_objmgr_peer_try_get_ref); + +void wlan_objmgr_peer_release_ref(struct wlan_objmgr_peer *peer, + wlan_objmgr_ref_dbgid id) +{ + + uint8_t *macaddr; + + if (peer == NULL) { + obj_mgr_err("peer obj is NULL for %d", id); + QDF_ASSERT(0); + return; + } + + macaddr = wlan_peer_get_macaddr(peer); + + if (!qdf_atomic_read(&peer->peer_objmgr.ref_id_dbg[id])) { + obj_mgr_err( + "peer(%02x:%02x:%02x:%02x:%02x:%02x) ref was not taken by %d", + macaddr[0], macaddr[1], macaddr[2], + macaddr[3], macaddr[4], macaddr[5], id); + wlan_objmgr_print_ref_ids(peer->peer_objmgr.ref_id_dbg, + QDF_TRACE_LEVEL_FATAL); + WLAN_OBJMGR_BUG(0); + } + + if (!qdf_atomic_read(&peer->peer_objmgr.ref_cnt)) { + obj_mgr_err("peer(%02x:%02x:%02x:%02x:%02x:%02x) ref cnt is 0", + macaddr[0], macaddr[1], macaddr[2], + macaddr[3], macaddr[4], macaddr[5]); + WLAN_OBJMGR_BUG(0); + return; + } + qdf_atomic_dec(&peer->peer_objmgr.ref_id_dbg[id]); + + /* Provide synchronization from the access to add peer + * to logically deleted peer list. + */ + wlan_peer_obj_lock(peer); + /* Decrement ref count, free peer object, if ref count == 0 */ + if (qdf_atomic_dec_and_test(&peer->peer_objmgr.ref_cnt)) { + wlan_peer_obj_unlock(peer); + wlan_objmgr_peer_obj_destroy(peer); + } else { + wlan_peer_obj_unlock(peer); + } + + return; +} +qdf_export_symbol(wlan_objmgr_peer_release_ref); + +struct wlan_objmgr_peer *wlan_vdev_peer_list_peek_active_head( + struct wlan_objmgr_vdev *vdev, + qdf_list_t *peer_list, + wlan_objmgr_ref_dbgid dbg_id) +{ + struct wlan_objmgr_peer *peer; + qdf_list_node_t *vdev_node = NULL; + qdf_list_node_t *prev_vdev_node = NULL; + + wlan_vdev_obj_lock(vdev); + + if (qdf_list_peek_front(peer_list, &vdev_node) != QDF_STATUS_SUCCESS) { + wlan_vdev_obj_unlock(vdev); + return NULL; + } + + do { + peer = qdf_container_of(vdev_node, struct wlan_objmgr_peer, + vdev_peer); + + if (wlan_objmgr_peer_try_get_ref(peer, dbg_id) == + QDF_STATUS_SUCCESS) { + wlan_vdev_obj_unlock(vdev); + return peer; + } + + prev_vdev_node = vdev_node; + } while (qdf_list_peek_next(peer_list, prev_vdev_node, &vdev_node) == + QDF_STATUS_SUCCESS); + + wlan_vdev_obj_unlock(vdev); + + return NULL; +} + +struct wlan_objmgr_peer *wlan_peer_get_next_active_peer_of_vdev( + struct wlan_objmgr_vdev *vdev, + qdf_list_t *peer_list, + struct wlan_objmgr_peer *peer, + wlan_objmgr_ref_dbgid dbg_id) +{ + struct wlan_objmgr_peer *peer_next; + qdf_list_node_t *vdev_node = NULL; + qdf_list_node_t *prev_vdev_node = NULL; + + if (peer == NULL) + return NULL; + + wlan_vdev_obj_lock(vdev); + + prev_vdev_node = &peer->vdev_peer; + while (qdf_list_peek_next(peer_list, prev_vdev_node, &vdev_node) == + QDF_STATUS_SUCCESS) { + peer_next = qdf_container_of(vdev_node, struct wlan_objmgr_peer, + vdev_peer); + + if (wlan_objmgr_peer_try_get_ref(peer_next, dbg_id) == + QDF_STATUS_SUCCESS) { + wlan_vdev_obj_unlock(vdev); + return peer_next; + } + + prev_vdev_node = vdev_node; + } + + wlan_vdev_obj_unlock(vdev); + + return NULL; +} + +struct wlan_objmgr_peer *wlan_peer_get_next_active_peer_of_psoc( + struct wlan_peer_list *peer_list, + uint8_t hash_index, + struct wlan_objmgr_peer *peer, + wlan_objmgr_ref_dbgid dbg_id) +{ + struct wlan_objmgr_peer *peer_next = NULL; + qdf_list_node_t *psoc_node = NULL; + qdf_list_node_t *prev_psoc_node = NULL; + qdf_list_t *obj_list; + + qdf_spin_lock_bh(&peer_list->peer_list_lock); + obj_list = &peer_list->peer_hash[hash_index]; + + prev_psoc_node = &peer->psoc_peer; + while (qdf_list_peek_next(obj_list, prev_psoc_node, &psoc_node) == + QDF_STATUS_SUCCESS) { + peer_next = qdf_container_of(psoc_node, struct wlan_objmgr_peer, + psoc_peer); + + if (wlan_objmgr_peer_try_get_ref(peer_next, dbg_id) == + QDF_STATUS_SUCCESS) { + qdf_spin_unlock_bh(&peer_list->peer_list_lock); + return peer_next; + } + + prev_psoc_node = psoc_node; + } + + qdf_spin_unlock_bh(&peer_list->peer_list_lock); + + return NULL; +} + +struct wlan_objmgr_peer *wlan_psoc_peer_list_peek_active_head( + struct wlan_peer_list *peer_list, + uint8_t hash_index, + wlan_objmgr_ref_dbgid dbg_id) +{ + struct wlan_objmgr_peer *peer; + qdf_list_node_t *psoc_node = NULL; + qdf_list_node_t *prev_psoc_node = NULL; + qdf_list_t *obj_list; + + qdf_spin_lock_bh(&peer_list->peer_list_lock); + obj_list = &peer_list->peer_hash[hash_index]; + + if (qdf_list_peek_front(obj_list, &psoc_node) != QDF_STATUS_SUCCESS) { + qdf_spin_unlock_bh(&peer_list->peer_list_lock); + return NULL; + } + + do { + peer = qdf_container_of(psoc_node, struct wlan_objmgr_peer, + psoc_peer); + if (wlan_objmgr_peer_try_get_ref(peer, dbg_id) == + QDF_STATUS_SUCCESS) { + qdf_spin_unlock_bh(&peer_list->peer_list_lock); + return peer; + } + + prev_psoc_node = psoc_node; + } while (qdf_list_peek_next(obj_list, prev_psoc_node, &psoc_node) == + QDF_STATUS_SUCCESS); + + qdf_spin_unlock_bh(&peer_list->peer_list_lock); + return NULL; +} + +struct wlan_objmgr_peer *wlan_psoc_peer_list_peek_head_ref( + struct wlan_peer_list *peer_list, + uint8_t hash_index, + wlan_objmgr_ref_dbgid dbg_id) +{ + struct wlan_objmgr_peer *peer; + qdf_list_t *obj_list; + + qdf_spin_lock_bh(&peer_list->peer_list_lock); + obj_list = &peer_list->peer_hash[hash_index]; + + peer = wlan_psoc_peer_list_peek_head(obj_list); + + /** + * This API is invoked by caller, only when caller need to access the + * peer object, though object is not in active state, this API should be + * used carefully, where multiple object frees are not triggered + */ + if (peer) + wlan_objmgr_peer_get_ref(peer, dbg_id); + + qdf_spin_unlock_bh(&peer_list->peer_list_lock); + + return peer; +} + +struct wlan_objmgr_peer *wlan_peer_get_next_peer_of_psoc_ref( + struct wlan_peer_list *peer_list, uint8_t hash_index, + struct wlan_objmgr_peer *peer, + wlan_objmgr_ref_dbgid dbg_id) +{ + qdf_list_t *obj_list; + struct wlan_objmgr_peer *peer_next; + + qdf_spin_lock_bh(&peer_list->peer_list_lock); + obj_list = &peer_list->peer_hash[hash_index]; + + peer_next = wlan_peer_get_next_peer_of_psoc(obj_list, peer); + /** + * This API is invoked by caller, only when caller need to access the + * peer object, though object is not in active state, this API should be + * used carefully, where multiple free on object are not triggered + */ + if (peer_next) + wlan_objmgr_peer_get_ref(peer_next, dbg_id); + + qdf_spin_unlock_bh(&peer_list->peer_list_lock); + + return peer_next; +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/obj_mgr/src/wlan_objmgr_psoc_obj.c b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/obj_mgr/src/wlan_objmgr_psoc_obj.c new file mode 100644 index 0000000000000000000000000000000000000000..afadf8517ab17ae6851f3f49b85a0a5d907355ba --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/obj_mgr/src/wlan_objmgr_psoc_obj.c @@ -0,0 +1,2155 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + /** + * DOC: Public APIs to perform operations on Global objects + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "wlan_objmgr_global_obj_i.h" +#include "wlan_objmgr_psoc_obj_i.h" +#include "wlan_objmgr_pdev_obj_i.h" +#include "wlan_objmgr_vdev_obj_i.h" + +/** + ** APIs to Create/Delete Global object APIs + */ +static QDF_STATUS wlan_objmgr_psoc_object_status( + struct wlan_objmgr_psoc *psoc) +{ + uint8_t id; + QDF_STATUS status = QDF_STATUS_SUCCESS; + + wlan_psoc_obj_lock(psoc); + /* Iterate through all components to derive the object status */ + for (id = 0; id < WLAN_UMAC_MAX_COMPONENTS; id++) { + /* If component disabled, Ignore */ + if (psoc->obj_status[id] == QDF_STATUS_COMP_DISABLED) + continue; + /* If component operates in Async, status is Partially created, + * break + */ + else if (psoc->obj_status[id] == QDF_STATUS_COMP_ASYNC) { + if (psoc->soc_comp_priv_obj[id] == NULL) { + status = QDF_STATUS_COMP_ASYNC; + break; + } + /* + * If component failed to allocate its object, treat it as + * failure, complete object need to be cleaned up + */ + } else if ((psoc->obj_status[id] == QDF_STATUS_E_NOMEM) || + (psoc->obj_status[id] == QDF_STATUS_E_FAILURE)) { + status = QDF_STATUS_E_FAILURE; + break; + } + } + wlan_psoc_obj_unlock(psoc); + + return status; +} + +static void wlan_objmgr_psoc_peer_list_init(struct wlan_peer_list *peer_list) +{ + uint8_t i; + + qdf_spinlock_create(&peer_list->peer_list_lock); + for (i = 0; i < WLAN_PEER_HASHSIZE; i++) + qdf_list_create(&peer_list->peer_hash[i], + WLAN_UMAC_PSOC_MAX_PEERS + + WLAN_MAX_PSOC_TEMP_PEERS); +} + +static void wlan_objmgr_psoc_peer_list_deinit(struct wlan_peer_list *peer_list) +{ + uint8_t i; + + /* deinit the lock */ + qdf_spinlock_destroy(&peer_list->peer_list_lock); + for (i = 0; i < WLAN_PEER_HASHSIZE; i++) + qdf_list_destroy(&peer_list->peer_hash[i]); +} + +static QDF_STATUS wlan_objmgr_psoc_obj_free(struct wlan_objmgr_psoc *psoc) +{ + /* Detach PSOC from global object's psoc list */ + if (wlan_objmgr_psoc_object_detach(psoc) == QDF_STATUS_E_FAILURE) { + obj_mgr_err("PSOC object detach failed"); + return QDF_STATUS_E_FAILURE; + } + wlan_objmgr_psoc_peer_list_deinit(&psoc->soc_objmgr.peer_list); + + qdf_spinlock_destroy(&psoc->psoc_lock); + qdf_mem_free(psoc); + + return QDF_STATUS_SUCCESS; +} + +struct wlan_objmgr_psoc *wlan_objmgr_psoc_obj_create(uint32_t phy_version, + WLAN_DEV_TYPE dev_type) +{ + uint8_t id; + struct wlan_objmgr_psoc *psoc = NULL; + wlan_objmgr_psoc_create_handler handler; + wlan_objmgr_psoc_status_handler stat_handler; + struct wlan_objmgr_psoc_objmgr *objmgr; + QDF_STATUS obj_status; + void *arg; + + psoc = qdf_mem_malloc(sizeof(*psoc)); + if (psoc == NULL) { + obj_mgr_err("PSOC allocation failed"); + return NULL; + } + psoc->obj_state = WLAN_OBJ_STATE_ALLOCATED; + qdf_spinlock_create(&psoc->psoc_lock); + /* Initialize with default values */ + objmgr = &psoc->soc_objmgr; + objmgr->wlan_pdev_count = 0; + objmgr->wlan_vdev_count = 0; + objmgr->max_vdev_count = WLAN_UMAC_PSOC_MAX_VDEVS; + objmgr->wlan_peer_count = 0; + objmgr->temp_peer_count = 0; + objmgr->max_peer_count = WLAN_UMAC_PSOC_MAX_PEERS; + qdf_atomic_init(&objmgr->ref_cnt); + objmgr->print_cnt = 0; + /* set phy version, dev_type in psoc */ + wlan_psoc_set_nif_phy_version(psoc, phy_version); + wlan_psoc_set_dev_type(psoc, dev_type); + /* Initialize peer list */ + wlan_objmgr_psoc_peer_list_init(&objmgr->peer_list); + wlan_objmgr_psoc_get_ref(psoc, WLAN_OBJMGR_ID); + /* Invoke registered create handlers */ + for (id = 0; id < WLAN_UMAC_MAX_COMPONENTS; id++) { + handler = g_umac_glb_obj->psoc_create_handler[id]; + arg = g_umac_glb_obj->psoc_create_handler_arg[id]; + if (handler != NULL) + psoc->obj_status[id] = handler(psoc, arg); + else + psoc->obj_status[id] = QDF_STATUS_COMP_DISABLED; + } + /* Derive object status */ + obj_status = wlan_objmgr_psoc_object_status(psoc); + + if (obj_status == QDF_STATUS_SUCCESS) { + /* Object status is SUCCESS, Object is created */ + psoc->obj_state = WLAN_OBJ_STATE_CREATED; + for (id = 0; id < WLAN_UMAC_MAX_COMPONENTS; id++) { + stat_handler = g_umac_glb_obj->psoc_status_handler[id]; + arg = g_umac_glb_obj->psoc_status_handler_arg[id]; + if (stat_handler != NULL) + stat_handler(psoc, arg, + QDF_STATUS_SUCCESS); + } + } else if (obj_status == QDF_STATUS_COMP_ASYNC) { + /* + * Few components operates in Asynchrous communction + * Object state partially created + */ + psoc->obj_state = WLAN_OBJ_STATE_PARTIALLY_CREATED; + } else if (obj_status == QDF_STATUS_E_FAILURE) { + /* Component object failed to be created, clean up the object */ + obj_mgr_err("PSOC component objects allocation failed"); + /* Clean up the psoc */ + wlan_objmgr_psoc_obj_delete(psoc); + return NULL; + } + + if (wlan_objmgr_psoc_object_attach(psoc) != + QDF_STATUS_SUCCESS) { + obj_mgr_err("PSOC object attach failed"); + wlan_objmgr_psoc_obj_delete(psoc); + return NULL; + } + + obj_mgr_info("Created psoc %d", psoc->soc_objmgr.psoc_id); + + return psoc; +} +qdf_export_symbol(wlan_objmgr_psoc_obj_create); + +static QDF_STATUS wlan_objmgr_psoc_obj_destroy(struct wlan_objmgr_psoc *psoc) +{ + uint8_t id; + wlan_objmgr_psoc_destroy_handler handler; + QDF_STATUS obj_status; + void *arg; + + if (psoc == NULL) { + obj_mgr_err("psoc is NULL"); + return QDF_STATUS_E_FAILURE; + } + wlan_objmgr_notify_destroy(psoc, WLAN_PSOC_OP); + + obj_mgr_info("Physically deleting psoc %d", psoc->soc_objmgr.psoc_id); + + if (psoc->obj_state != WLAN_OBJ_STATE_LOGICALLY_DELETED) { + obj_mgr_err("PSOC object delete is not invoked obj_state:%d", + psoc->obj_state); + WLAN_OBJMGR_BUG(0); + } + + /* Invoke registered create handlers */ + for (id = 0; id < WLAN_UMAC_MAX_COMPONENTS; id++) { + handler = g_umac_glb_obj->psoc_destroy_handler[id]; + arg = g_umac_glb_obj->psoc_destroy_handler_arg[id]; + if (handler && + (psoc->obj_status[id] == QDF_STATUS_SUCCESS || + psoc->obj_status[id] == QDF_STATUS_COMP_ASYNC)) + psoc->obj_status[id] = handler(psoc, arg); + else + psoc->obj_status[id] = QDF_STATUS_COMP_DISABLED; + } + /* Derive object status */ + obj_status = wlan_objmgr_psoc_object_status(psoc); + + if (obj_status == QDF_STATUS_E_FAILURE) { + obj_mgr_err("PSOC component object free failed"); + /* Ideally should not happen + * This leads to memleak, BUG_ON to find which component + * delete notification failed and fix it. + */ + QDF_BUG(0); + return QDF_STATUS_E_FAILURE; + } + /* Deletion is in progress */ + if (obj_status == QDF_STATUS_COMP_ASYNC) { + psoc->obj_state = WLAN_OBJ_STATE_PARTIALLY_DELETED; + return QDF_STATUS_COMP_ASYNC; + } + + /* Free psoc object */ + return wlan_objmgr_psoc_obj_free(psoc); +} + + +QDF_STATUS wlan_objmgr_psoc_obj_delete(struct wlan_objmgr_psoc *psoc) +{ + uint8_t print_idx; + + if (psoc == NULL) { + obj_mgr_err("psoc is NULL"); + return QDF_STATUS_E_FAILURE; + } + + obj_mgr_info("Logically deleting psoc %d", psoc->soc_objmgr.psoc_id); + + print_idx = qdf_get_pidx(); + wlan_objmgr_print_ref_ids(psoc->soc_objmgr.ref_id_dbg, + QDF_TRACE_LEVEL_DEBUG); + /* + * Update PSOC object state to LOGICALLY DELETED + * It prevents further access of this object + */ + wlan_psoc_obj_lock(psoc); + psoc->obj_state = WLAN_OBJ_STATE_LOGICALLY_DELETED; + wlan_psoc_obj_unlock(psoc); + wlan_objmgr_notify_log_delete(psoc, WLAN_PSOC_OP); + wlan_objmgr_psoc_release_ref(psoc, WLAN_OBJMGR_ID); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(wlan_objmgr_psoc_obj_delete); + +QDF_STATUS wlan_objmgr_psoc_component_obj_attach( + struct wlan_objmgr_psoc *psoc, + enum wlan_umac_comp_id id, + void *comp_priv_obj, + QDF_STATUS status) +{ + wlan_objmgr_psoc_status_handler stat_handler; + void *arg = NULL; + QDF_STATUS obj_status; + uint8_t i; + + /* component id is invalid */ + if (id >= WLAN_UMAC_MAX_COMPONENTS) + return QDF_STATUS_MAXCOMP_FAIL; + + wlan_psoc_obj_lock(psoc); + /* If there is a valid entry, return failure */ + if (psoc->soc_comp_priv_obj[id] != NULL) { + wlan_psoc_obj_unlock(psoc); + return QDF_STATUS_E_FAILURE; + } + /* Save component's pointer and status */ + psoc->soc_comp_priv_obj[id] = comp_priv_obj; + psoc->obj_status[id] = status; + + wlan_psoc_obj_unlock(psoc); + + if (psoc->obj_state != WLAN_OBJ_STATE_PARTIALLY_CREATED) + return QDF_STATUS_SUCCESS; + /* If PSOC object status is partially created means, this API is + * invoked with differnt context, this block should be executed for + * async components only + */ + /* Derive status */ + obj_status = wlan_objmgr_psoc_object_status(psoc); + /* STATUS_SUCCESS means, object is CREATED */ + if (obj_status == QDF_STATUS_SUCCESS) + psoc->obj_state = WLAN_OBJ_STATE_CREATED; + /* update state as CREATION failed, caller has to delete the + * PSOC object + */ + else if (obj_status == QDF_STATUS_E_FAILURE) + psoc->obj_state = WLAN_OBJ_STATE_CREATION_FAILED; + + /* Notify components about the CREATION success/failure */ + if ((obj_status == QDF_STATUS_SUCCESS) || + (obj_status == QDF_STATUS_E_FAILURE)) { + /* nofity object status */ + for (i = 0; i < WLAN_UMAC_MAX_COMPONENTS; i++) { + stat_handler = g_umac_glb_obj->psoc_status_handler[i]; + arg = g_umac_glb_obj->psoc_status_handler_arg[i]; + if (stat_handler != NULL) + stat_handler(psoc, arg, obj_status); + } + } + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(wlan_objmgr_psoc_component_obj_attach); + +QDF_STATUS wlan_objmgr_psoc_component_obj_detach( + struct wlan_objmgr_psoc *psoc, + enum wlan_umac_comp_id id, + void *comp_priv_obj) +{ + QDF_STATUS obj_status; + + /* component id is invalid */ + if (id >= WLAN_UMAC_MAX_COMPONENTS) + return QDF_STATUS_MAXCOMP_FAIL; + + wlan_psoc_obj_lock(psoc); + /* If there is a valid entry, return failure */ + if (psoc->soc_comp_priv_obj[id] != comp_priv_obj) { + psoc->obj_status[id] = QDF_STATUS_E_FAILURE; + wlan_psoc_obj_unlock(psoc); + return QDF_STATUS_E_FAILURE; + } + /* Reset pointers to NULL, update the status*/ + psoc->soc_comp_priv_obj[id] = NULL; + psoc->obj_status[id] = QDF_STATUS_SUCCESS; + wlan_psoc_obj_unlock(psoc); + + /* If PSOC object status is partially created means, this API is + * invoked with differnt context, this block should be executed for + * async components only + */ + if ((psoc->obj_state == WLAN_OBJ_STATE_PARTIALLY_DELETED) || + (psoc->obj_state == WLAN_OBJ_STATE_COMP_DEL_PROGRESS)) { + /* Derive object status */ + obj_status = wlan_objmgr_psoc_object_status(psoc); + if (obj_status == QDF_STATUS_SUCCESS) { + /* Update the status as Deleted, if full object + * deletion is in progress + */ + if (psoc->obj_state == WLAN_OBJ_STATE_PARTIALLY_DELETED) + psoc->obj_state = WLAN_OBJ_STATE_DELETED; + + /* Move to creation state, since this component + * deletion alone requested + */ + if (psoc->obj_state == WLAN_OBJ_STATE_COMP_DEL_PROGRESS) + psoc->obj_state = WLAN_OBJ_STATE_CREATED; + /* Object status is failure */ + } else if (obj_status == QDF_STATUS_E_FAILURE) { + /* Update the status as Deletion failed, if full object + * deletion is in progress + */ + if (psoc->obj_state == WLAN_OBJ_STATE_PARTIALLY_DELETED) + psoc->obj_state = + WLAN_OBJ_STATE_DELETION_FAILED; + + /* Move to creation state, since this component + * deletion alone requested (do not block other + * components) + */ + if (psoc->obj_state == WLAN_OBJ_STATE_COMP_DEL_PROGRESS) + psoc->obj_state = WLAN_OBJ_STATE_CREATED; + } + + /* Delete psoc object */ + if ((obj_status == QDF_STATUS_SUCCESS) && + (psoc->obj_state == WLAN_OBJ_STATE_DELETED)) { + /* Free psoc object */ + return wlan_objmgr_psoc_obj_free(psoc); + } + } + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(wlan_objmgr_psoc_component_obj_detach); + +QDF_STATUS wlan_objmgr_iterate_obj_list( + struct wlan_objmgr_psoc *psoc, + enum wlan_objmgr_obj_type obj_type, + wlan_objmgr_op_handler handler, + void *arg, uint8_t lock_free_op, + wlan_objmgr_ref_dbgid dbg_id) +{ + uint16_t obj_id; + uint8_t i; + struct wlan_objmgr_psoc_objmgr *objmgr = &psoc->soc_objmgr; + struct wlan_peer_list *peer_list; + struct wlan_objmgr_pdev *pdev; + struct wlan_objmgr_vdev *vdev; + struct wlan_objmgr_peer *peer; + struct wlan_objmgr_peer *peer_next; + uint16_t max_vdev_cnt; + + switch (obj_type) { + case WLAN_PDEV_OP: + /* Iterate through PDEV list, invoke handler for each pdev */ + for (obj_id = 0; obj_id < WLAN_UMAC_MAX_PDEVS; obj_id++) { + pdev = wlan_objmgr_get_pdev_by_id(psoc, obj_id, dbg_id); + if (pdev != NULL) { + handler(psoc, (void *)pdev, arg); + wlan_objmgr_pdev_release_ref(pdev, dbg_id); + } + } + break; + case WLAN_VDEV_OP: + /* Iterate through VDEV list, invoke handler for each vdev */ + max_vdev_cnt = wlan_psoc_get_max_vdev_count(psoc); + for (obj_id = 0; obj_id < max_vdev_cnt; obj_id++) { + vdev = wlan_objmgr_get_vdev_by_id_from_psoc(psoc, + obj_id, dbg_id); + if (vdev != NULL) { + handler(psoc, vdev, arg); + wlan_objmgr_vdev_release_ref(vdev, dbg_id); + } + } + break; + case WLAN_PEER_OP: + /* Iterate through PEER list, invoke handler for each peer */ + peer_list = &objmgr->peer_list; + /* Since peer list has sublist, iterate through sublists */ + for (i = 0; i < WLAN_PEER_HASHSIZE; i++) { + peer = wlan_psoc_peer_list_peek_active_head(peer_list, + i, dbg_id); + while (peer) { + handler(psoc, (void *)peer, arg); + /* Get next peer */ + peer_next = + wlan_peer_get_next_active_peer_of_psoc( + peer_list, i, peer, dbg_id); + wlan_objmgr_peer_release_ref(peer, dbg_id); + peer = peer_next; + } + } + break; + default: + break; + } + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(wlan_objmgr_iterate_obj_list); + +QDF_STATUS wlan_objmgr_iterate_obj_list_all( + struct wlan_objmgr_psoc *psoc, + enum wlan_objmgr_obj_type obj_type, + wlan_objmgr_op_handler handler, + void *arg, uint8_t lock_free_op, + wlan_objmgr_ref_dbgid dbg_id) +{ + uint16_t obj_id; + uint8_t i; + struct wlan_objmgr_psoc_objmgr *objmgr = &psoc->soc_objmgr; + struct wlan_peer_list *peer_list; + struct wlan_objmgr_pdev *pdev; + struct wlan_objmgr_vdev *vdev; + struct wlan_objmgr_peer *peer; + struct wlan_objmgr_peer *peer_next; + uint16_t max_vdev_cnt; + + /* If caller requests for lock free opeation, do not acquire, + * handler will handle the synchronization + */ + + switch (obj_type) { + case WLAN_PDEV_OP: + /* Iterate through PDEV list, invoke handler for each pdev */ + for (obj_id = 0; obj_id < WLAN_UMAC_MAX_PDEVS; obj_id++) { + pdev = wlan_objmgr_get_pdev_by_id_no_state(psoc, + obj_id, dbg_id); + if (pdev != NULL) { + handler(psoc, (void *)pdev, arg); + wlan_objmgr_pdev_release_ref(pdev, dbg_id); + } + } + break; + case WLAN_VDEV_OP: + /* Iterate through VDEV list, invoke handler for each vdev */ + max_vdev_cnt = wlan_psoc_get_max_vdev_count(psoc); + for (obj_id = 0; obj_id < max_vdev_cnt; obj_id++) { + vdev = wlan_objmgr_get_vdev_by_id_from_psoc_no_state( + psoc, obj_id, dbg_id); + if (vdev != NULL) { + handler(psoc, vdev, arg); + wlan_objmgr_vdev_release_ref(vdev, dbg_id); + } + } + break; + case WLAN_PEER_OP: + /* Iterate through PEER list, invoke handler for each peer */ + peer_list = &objmgr->peer_list; + /* Since peer list has sublist, iterate through sublists */ + for (i = 0; i < WLAN_PEER_HASHSIZE; i++) { + peer = wlan_psoc_peer_list_peek_head_ref(peer_list, i, + dbg_id); + + while (peer) { + handler(psoc, (void *)peer, arg); + /* Get next peer */ + peer_next = wlan_peer_get_next_peer_of_psoc_ref( + peer_list, i, + peer, dbg_id); + wlan_objmgr_peer_release_ref(peer, dbg_id); + peer = peer_next; + } + } + break; + default: + break; + } + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(wlan_objmgr_iterate_obj_list_all); + +/** + * wlan_objmgr_iterate_obj_list_all_noref() - iterate through all psoc objects + * without taking ref + * @psoc: PSOC object + * @obj_type: PDEV_OP/VDEV_OP/PEER_OP + * @handler: the handler will be called for each object of requested type + * the handler should be implemented to perform required operation + * @arg: agruments passed by caller + * + * API to be used for performing the operations on all PDEV/VDEV/PEER objects + * of psoc with lock protected + * + * Return: SUCCESS/FAILURE + */ +static QDF_STATUS wlan_objmgr_iterate_obj_list_all_noref( + struct wlan_objmgr_psoc *psoc, + enum wlan_objmgr_obj_type obj_type, + wlan_objmgr_op_handler handler, + void *arg) +{ + uint16_t obj_id; + uint8_t i; + struct wlan_objmgr_psoc_objmgr *objmgr = &psoc->soc_objmgr; + struct wlan_peer_list *peer_list; + qdf_list_t *obj_list; + struct wlan_objmgr_pdev *pdev; + struct wlan_objmgr_vdev *vdev; + struct wlan_objmgr_peer *peer; + struct wlan_objmgr_peer *peer_next; + uint16_t max_vdev_cnt; + + /* If caller requests for lock free opeation, do not acquire, + * handler will handle the synchronization + */ + wlan_psoc_obj_lock(psoc); + + switch (obj_type) { + case WLAN_PDEV_OP: + /* Iterate through PDEV list, invoke handler for each pdev */ + for (obj_id = 0; obj_id < WLAN_UMAC_MAX_PDEVS; obj_id++) { + pdev = objmgr->wlan_pdev_list[obj_id]; + if (pdev != NULL) + handler(psoc, (void *)pdev, arg); + } + break; + case WLAN_VDEV_OP: + /* Iterate through VDEV list, invoke handler for each vdev */ + max_vdev_cnt = wlan_psoc_get_max_vdev_count(psoc); + for (obj_id = 0; obj_id < max_vdev_cnt; obj_id++) { + vdev = objmgr->wlan_vdev_list[obj_id]; + if (vdev != NULL) + handler(psoc, vdev, arg); + } + break; + case WLAN_PEER_OP: + /* Iterate through PEER list, invoke handler for each peer */ + peer_list = &objmgr->peer_list; + /* psoc lock should be taken before list lock */ + qdf_spin_lock_bh(&peer_list->peer_list_lock); + /* Since peer list has sublist, iterate through sublists */ + for (i = 0; i < WLAN_PEER_HASHSIZE; i++) { + obj_list = &peer_list->peer_hash[i]; + peer = wlan_psoc_peer_list_peek_head(obj_list); + while (peer) { + /* Get next peer */ + peer_next = wlan_peer_get_next_peer_of_psoc( + obj_list, peer); + handler(psoc, (void *)peer, arg); + peer = peer_next; + } + } + qdf_spin_unlock_bh(&peer_list->peer_list_lock); + break; + default: + break; + } + wlan_psoc_obj_unlock(psoc); + + return QDF_STATUS_SUCCESS; +} + +static void wlan_objmgr_psoc_peer_delete(struct wlan_objmgr_psoc *psoc, + void *obj, void *args) +{ + struct wlan_objmgr_peer *peer = (struct wlan_objmgr_peer *)obj; + + wlan_objmgr_peer_obj_delete(peer); +} + +static void wlan_objmgr_psoc_vdev_delete(struct wlan_objmgr_psoc *psoc, + void *obj, void *args) +{ + struct wlan_objmgr_vdev *vdev = (struct wlan_objmgr_vdev *)obj; + + wlan_objmgr_vdev_obj_delete(vdev); +} + +static void wlan_objmgr_psoc_pdev_delete(struct wlan_objmgr_psoc *psoc, + void *obj, void *args) +{ + struct wlan_objmgr_pdev *pdev = (struct wlan_objmgr_pdev *)obj; + + wlan_objmgr_pdev_obj_delete(pdev); +} + +QDF_STATUS wlan_objmgr_free_all_objects_per_psoc( + struct wlan_objmgr_psoc *psoc) +{ + /* Free all peers */ + wlan_objmgr_iterate_obj_list(psoc, WLAN_PEER_OP, + wlan_objmgr_psoc_peer_delete, NULL, 1, + WLAN_OBJMGR_ID); + /* Free all vdevs */ + wlan_objmgr_iterate_obj_list(psoc, WLAN_VDEV_OP, + wlan_objmgr_psoc_vdev_delete, NULL, 1, + WLAN_OBJMGR_ID); + /* Free all PDEVs */ + wlan_objmgr_iterate_obj_list(psoc, WLAN_PDEV_OP, + wlan_objmgr_psoc_pdev_delete, NULL, 1, + WLAN_OBJMGR_ID); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_objmgr_trigger_psoc_comp_priv_object_creation( + struct wlan_objmgr_psoc *psoc, + enum wlan_umac_comp_id id) +{ + wlan_objmgr_psoc_create_handler handler; + void *arg; + QDF_STATUS obj_status = QDF_STATUS_SUCCESS; + + /* Component id is invalid */ + if (id >= WLAN_UMAC_MAX_COMPONENTS) + return QDF_STATUS_MAXCOMP_FAIL; + + wlan_psoc_obj_lock(psoc); + /* If component object is already created, delete old + * component object, then invoke creation + */ + if (psoc->soc_comp_priv_obj[id] != NULL) { + wlan_psoc_obj_unlock(psoc); + return QDF_STATUS_E_FAILURE; + } + wlan_psoc_obj_unlock(psoc); + /* Invoke registered create handlers */ + handler = g_umac_glb_obj->psoc_create_handler[id]; + arg = g_umac_glb_obj->psoc_create_handler_arg[id]; + if (handler != NULL) + psoc->obj_status[id] = handler(psoc, arg); + else + return QDF_STATUS_E_FAILURE; + + /* If object status is created, then only handle this object status */ + if (psoc->obj_state == WLAN_OBJ_STATE_CREATED) { + /* Derive object status */ + obj_status = wlan_objmgr_psoc_object_status(psoc); + /* Move PSOC object state to Partially created state */ + if (obj_status == QDF_STATUS_COMP_ASYNC) { + /*TODO atomic */ + psoc->obj_state = WLAN_OBJ_STATE_PARTIALLY_CREATED; + } + } + + return obj_status; +} + +QDF_STATUS wlan_objmgr_trigger_psoc_comp_priv_object_deletion( + struct wlan_objmgr_psoc *psoc, + enum wlan_umac_comp_id id) +{ + wlan_objmgr_psoc_destroy_handler handler; + void *arg; + QDF_STATUS obj_status = QDF_STATUS_SUCCESS; + + /* component id is invalid */ + if (id >= WLAN_UMAC_MAX_COMPONENTS) + return QDF_STATUS_MAXCOMP_FAIL; + + wlan_psoc_obj_lock(psoc); + /* Component object was never created, invalid operation */ + if (psoc->soc_comp_priv_obj[id] == NULL) { + wlan_psoc_obj_unlock(psoc); + return QDF_STATUS_E_FAILURE; + } + wlan_psoc_obj_unlock(psoc); + /* Invoke registered create handlers */ + handler = g_umac_glb_obj->psoc_destroy_handler[id]; + arg = g_umac_glb_obj->psoc_destroy_handler_arg[id]; + if (handler != NULL) + psoc->obj_status[id] = handler(psoc, arg); + else + return QDF_STATUS_E_FAILURE; + + /* If object status is created, then only handle this object status */ + if (psoc->obj_state == WLAN_OBJ_STATE_CREATED) { + obj_status = wlan_objmgr_psoc_object_status(psoc); + /* move object state to DEL progress */ + if (obj_status == QDF_STATUS_COMP_ASYNC) + psoc->obj_state = WLAN_OBJ_STATE_COMP_DEL_PROGRESS; + } + + return obj_status; +} + +/* Util APIs */ + +QDF_STATUS wlan_objmgr_psoc_pdev_attach(struct wlan_objmgr_psoc *psoc, + struct wlan_objmgr_pdev *pdev) +{ + struct wlan_objmgr_psoc_objmgr *objmgr; + uint8_t id = 0; + QDF_STATUS status; + + wlan_psoc_obj_lock(psoc); + objmgr = &psoc->soc_objmgr; + /* + * Derive pdev id from pdev map + * First free pdev id is assigned + */ + while ((id < WLAN_UMAC_MAX_PDEVS) && + (objmgr->wlan_pdev_id_map & (1<wlan_pdev_id_map |= (1<wlan_pdev_list[id] = pdev; + /* Increment pdev count */ + objmgr->wlan_pdev_count++; + /* save pdev id */ + pdev->pdev_objmgr.wlan_pdev_id = id; + status = QDF_STATUS_SUCCESS; + /* Inrement psoc ref count to block its free before pdev */ + wlan_objmgr_psoc_get_ref(psoc, WLAN_OBJMGR_ID); + } + wlan_psoc_obj_unlock(psoc); + + return status; +} + +QDF_STATUS wlan_objmgr_psoc_pdev_detach(struct wlan_objmgr_psoc *psoc, + struct wlan_objmgr_pdev *pdev) +{ + struct wlan_objmgr_psoc_objmgr *objmgr; + uint8_t id; + + id = pdev->pdev_objmgr.wlan_pdev_id; + /* If id is invalid, return */ + if (id >= WLAN_UMAC_MAX_PDEVS) + return QDF_STATUS_E_FAILURE; + + wlan_psoc_obj_lock(psoc); + objmgr = &psoc->soc_objmgr; + /* Free pdev id slot */ + objmgr->wlan_pdev_id_map &= ~(1<wlan_pdev_list[id] = NULL; + objmgr->wlan_pdev_count--; + pdev->pdev_objmgr.wlan_pdev_id = 0xff; + wlan_psoc_obj_unlock(psoc); + /* Release ref count of psoc */ + wlan_objmgr_psoc_release_ref(psoc, WLAN_OBJMGR_ID); + + return QDF_STATUS_SUCCESS; +} + +struct wlan_objmgr_pdev *wlan_objmgr_get_pdev_by_id( + struct wlan_objmgr_psoc *psoc, uint8_t id, + wlan_objmgr_ref_dbgid dbg_id) +{ + struct wlan_objmgr_psoc_objmgr *objmgr; + struct wlan_objmgr_pdev *pdev = NULL; + + /* If id is invalid, return */ + if (id >= WLAN_UMAC_MAX_PDEVS) + return NULL; + + wlan_psoc_obj_lock(psoc); + objmgr = &psoc->soc_objmgr; + /* get pdev from pdev list */ + pdev = objmgr->wlan_pdev_list[id]; + /* Do not return object, if it is not CREATED state */ + if (pdev != NULL) { + if (wlan_objmgr_pdev_try_get_ref(pdev, dbg_id) != + QDF_STATUS_SUCCESS) + pdev = NULL; + } + + wlan_psoc_obj_unlock(psoc); + + return pdev; +} +qdf_export_symbol(wlan_objmgr_get_pdev_by_id); + +struct wlan_objmgr_pdev *wlan_objmgr_get_pdev_by_id_no_state( + struct wlan_objmgr_psoc *psoc, uint8_t id, + wlan_objmgr_ref_dbgid dbg_id) +{ + struct wlan_objmgr_psoc_objmgr *objmgr; + struct wlan_objmgr_pdev *pdev = NULL; + + /* If id is invalid, return */ + if (id >= WLAN_UMAC_MAX_PDEVS) + return NULL; + + wlan_psoc_obj_lock(psoc); + objmgr = &psoc->soc_objmgr; + /* get pdev from pdev list */ + pdev = objmgr->wlan_pdev_list[id]; + /* Do not return object, if it is not CREATED state */ + if (pdev != NULL) + wlan_objmgr_pdev_get_ref(pdev, dbg_id); + + wlan_psoc_obj_unlock(psoc); + + return pdev; +} +QDF_STATUS wlan_objmgr_psoc_vdev_attach(struct wlan_objmgr_psoc *psoc, + struct wlan_objmgr_vdev *vdev) +{ + struct wlan_objmgr_psoc_objmgr *objmgr; + uint8_t id = 0; + uint8_t map_index = 0; + uint8_t map_entry_size = 32; + uint8_t adjust_ix = 0; + QDF_STATUS status; + + wlan_psoc_obj_lock(psoc); + objmgr = &psoc->soc_objmgr; + /* Find first free vdev id */ + while ((id < objmgr->max_vdev_count) && + (objmgr->wlan_vdev_id_map[map_index] & (1<<(id - adjust_ix)))) { + id++; + /* + * The map is two DWORDS(32 bits), so, map_index + * adjust_ix derived based on the id value + */ + if (id == ((map_index + 1) * map_entry_size)) { + map_index++; + adjust_ix = map_index * map_entry_size; + } + } + /* If no free slot, return failure */ + if (id == objmgr->max_vdev_count) { + status = QDF_STATUS_E_FAILURE; + } else { + /* set free vdev id index */ + objmgr->wlan_vdev_id_map[map_index] |= (1<<(id-adjust_ix)); + /* store vdev pointer in vdev list */ + objmgr->wlan_vdev_list[id] = vdev; + /* increment vdev counter */ + objmgr->wlan_vdev_count++; + /* save vdev id */ + vdev->vdev_objmgr.vdev_id = id; + status = QDF_STATUS_SUCCESS; + } + wlan_psoc_obj_unlock(psoc); + + return status; +} + +QDF_STATUS wlan_objmgr_psoc_vdev_detach(struct wlan_objmgr_psoc *psoc, + struct wlan_objmgr_vdev *vdev) +{ + struct wlan_objmgr_psoc_objmgr *objmgr; + uint8_t id = 0; + uint8_t map_index = 0; + uint8_t map_entry_size = 32; + uint8_t adjust_ix = 0; + + id = vdev->vdev_objmgr.vdev_id; + /* Invalid vdev id */ + if (id >= wlan_psoc_get_max_vdev_count(psoc)) + return QDF_STATUS_E_FAILURE; + /* + * Derive map_index and adjust_ix to find actual DWORD + * the id map is present + */ + while ((id - adjust_ix) >= map_entry_size) { + map_index++; + adjust_ix = map_index * map_entry_size; + } + wlan_psoc_obj_lock(psoc); + objmgr = &psoc->soc_objmgr; + /* unset bit, to free the slot */ + objmgr->wlan_vdev_id_map[map_index] &= ~(1<<(id-adjust_ix)); + /* reset VDEV pointer to NULL in VDEV list array */ + objmgr->wlan_vdev_list[id] = NULL; + /* decrement vdev count */ + objmgr->wlan_vdev_count--; + vdev->vdev_objmgr.vdev_id = 0xff; + wlan_psoc_obj_unlock(psoc); + + return QDF_STATUS_SUCCESS; +} + +struct wlan_objmgr_vdev *wlan_objmgr_get_vdev_by_opmode_from_psoc( + struct wlan_objmgr_psoc *psoc, + enum QDF_OPMODE opmode, + wlan_objmgr_ref_dbgid dbg_id) +{ + struct wlan_objmgr_vdev *vdev = NULL; + int vdev_cnt = 0; + uint16_t max_vdev_cnt; + + /* if PSOC is NULL, return */ + if (psoc == NULL) + return NULL; + + wlan_psoc_obj_lock(psoc); + + max_vdev_cnt = wlan_psoc_get_max_vdev_count(psoc); + /* retrieve vdev pointer from vdev list */ + while (vdev_cnt < max_vdev_cnt) { + vdev = psoc->soc_objmgr.wlan_vdev_list[vdev_cnt]; + vdev_cnt++; + if (vdev == NULL) + continue; + wlan_vdev_obj_lock(vdev); + if (vdev->vdev_mlme.vdev_opmode == opmode) { + wlan_vdev_obj_unlock(vdev); + if (wlan_objmgr_vdev_try_get_ref(vdev, dbg_id) != + QDF_STATUS_SUCCESS) { + vdev = NULL; + continue; + } + break; + } + wlan_vdev_obj_unlock(vdev); + } + wlan_psoc_obj_unlock(psoc); + + return vdev; +} + +struct wlan_objmgr_vdev *wlan_objmgr_get_vdev_by_id_from_psoc( + struct wlan_objmgr_psoc *psoc, uint8_t vdev_id, + wlan_objmgr_ref_dbgid dbg_id) +{ + struct wlan_objmgr_vdev *vdev; + + /* if PSOC is NULL, return */ + if (psoc == NULL) + return NULL; + /* vdev id is invalid */ + if (vdev_id >= wlan_psoc_get_max_vdev_count(psoc)) + return NULL; + + wlan_psoc_obj_lock(psoc); + /* retrieve vdev pointer from vdev list */ + vdev = psoc->soc_objmgr.wlan_vdev_list[vdev_id]; + if (vdev != NULL) { + if (wlan_objmgr_vdev_try_get_ref(vdev, dbg_id) != + QDF_STATUS_SUCCESS) + vdev = NULL; + } + wlan_psoc_obj_unlock(psoc); + + return vdev; +} +qdf_export_symbol(wlan_objmgr_get_vdev_by_id_from_psoc); + +struct wlan_objmgr_vdev *wlan_objmgr_get_vdev_by_id_from_psoc_no_state( + struct wlan_objmgr_psoc *psoc, uint8_t vdev_id, + wlan_objmgr_ref_dbgid dbg_id) +{ + struct wlan_objmgr_vdev *vdev; + + /* if PSOC is NULL, return */ + if (psoc == NULL) + return NULL; + /* vdev id is invalid */ + if (vdev_id >= wlan_psoc_get_max_vdev_count(psoc)) + return NULL; + + wlan_psoc_obj_lock(psoc); + /* retrieve vdev pointer from vdev list */ + vdev = psoc->soc_objmgr.wlan_vdev_list[vdev_id]; + if (vdev != NULL) + wlan_objmgr_vdev_get_ref(vdev, dbg_id); + + wlan_psoc_obj_unlock(psoc); + + return vdev; +} +qdf_export_symbol(wlan_objmgr_get_vdev_by_id_from_psoc_no_state); + +struct wlan_objmgr_vdev *wlan_objmgr_get_vdev_by_macaddr_from_psoc( + struct wlan_objmgr_psoc *psoc, uint8_t pdev_id, + uint8_t *macaddr, wlan_objmgr_ref_dbgid dbg_id) +{ + struct wlan_objmgr_vdev *vdev; + struct wlan_objmgr_pdev *pdev; + + /* if PSOC is NULL, return */ + if (psoc == NULL) + return NULL; + + if (!macaddr) + return NULL; + + pdev = wlan_objmgr_get_pdev_by_id(psoc, pdev_id, dbg_id); + if (!pdev) { + obj_mgr_err("pdev is null"); + return NULL; + } + vdev = wlan_objmgr_get_vdev_by_macaddr_from_pdev(pdev, macaddr, dbg_id); + wlan_objmgr_pdev_release_ref(pdev, dbg_id); + + return vdev; +} +qdf_export_symbol(wlan_objmgr_get_vdev_by_macaddr_from_psoc); + +struct wlan_objmgr_vdev *wlan_objmgr_get_vdev_by_macaddr_from_psoc_no_state( + struct wlan_objmgr_psoc *psoc, uint8_t pdev_id, + uint8_t *macaddr, wlan_objmgr_ref_dbgid dbg_id) +{ + struct wlan_objmgr_vdev *vdev; + struct wlan_objmgr_pdev *pdev; + + /* if PSOC is NULL, return */ + if (psoc == NULL) + return NULL; + + if (!macaddr) + return NULL; + + pdev = wlan_objmgr_get_pdev_by_id(psoc, pdev_id, dbg_id); + if (!pdev) { + obj_mgr_err("pdev is null"); + return NULL; + } + vdev = wlan_objmgr_get_vdev_by_macaddr_from_pdev_no_state(pdev, macaddr, dbg_id); + wlan_objmgr_pdev_release_ref(pdev, dbg_id); + + return vdev; +} +qdf_export_symbol(wlan_objmgr_get_vdev_by_macaddr_from_psoc_no_state); + +static void wlan_obj_psoc_peerlist_add_tail(qdf_list_t *obj_list, + struct wlan_objmgr_peer *obj) +{ + qdf_list_insert_back(obj_list, &obj->psoc_peer); +} + +static QDF_STATUS wlan_obj_psoc_peerlist_remove_peer( + qdf_list_t *obj_list, + struct wlan_objmgr_peer *peer) +{ + qdf_list_node_t *psoc_node = NULL; + + if (peer == NULL) + return QDF_STATUS_E_FAILURE; + /* get vdev list node element */ + psoc_node = &peer->psoc_peer; + /* list is empty, return failure */ + if (qdf_list_remove_node(obj_list, psoc_node) != QDF_STATUS_SUCCESS) + return QDF_STATUS_E_FAILURE; + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS wlan_peer_bssid_match(struct wlan_objmgr_peer *peer, + uint8_t *bssid) +{ + struct wlan_objmgr_vdev *vdev = wlan_peer_get_vdev(peer); + uint8_t *peer_bssid = wlan_vdev_mlme_get_macaddr(vdev); + + if (WLAN_ADDR_EQ(peer_bssid, bssid) == QDF_STATUS_SUCCESS) + return QDF_STATUS_SUCCESS; + else + return QDF_STATUS_E_FAILURE; +} + +/** + * wlan_obj_psoc_peerlist_get_peer_logically_deleted() - get peer + * from psoc peer list + * @psoc: PSOC object + * @macaddr: MAC address + * + * API to finds peer object pointer of logically deleted peer + * + * Return: peer pointer + * NULL on FAILURE + */ +static struct wlan_objmgr_peer * + wlan_obj_psoc_peerlist_get_peer_logically_deleted( + qdf_list_t *obj_list, uint8_t *macaddr, + wlan_objmgr_ref_dbgid dbg_id) +{ + struct wlan_objmgr_peer *peer; + struct wlan_objmgr_peer *peer_temp; + + /* Iterate through hash list to get the peer */ + peer = wlan_psoc_peer_list_peek_head(obj_list); + while (peer != NULL) { + /* For peer, macaddr is key */ + if (WLAN_ADDR_EQ(wlan_peer_get_macaddr(peer), macaddr) + == QDF_STATUS_SUCCESS) { + /* Return peer in logically deleted state */ + if (peer->obj_state == + WLAN_OBJ_STATE_LOGICALLY_DELETED) { + wlan_objmgr_peer_get_ref(peer, dbg_id); + + return peer; + } + + } + /* Move to next peer */ + peer_temp = peer; + peer = wlan_peer_get_next_peer_of_psoc(obj_list, peer_temp); + } + + /* Not found, return NULL */ + return NULL; +} + +/** + * wlan_obj_psoc_peerlist_get_peer() - get peer from psoc peer list + * @psoc: PSOC object + * @macaddr: MAC address + * + * API to finds peer object pointer by MAC addr from hash list + * + * Return: peer pointer + * NULL on FAILURE + */ +static struct wlan_objmgr_peer *wlan_obj_psoc_peerlist_get_peer( + qdf_list_t *obj_list, uint8_t *macaddr, + wlan_objmgr_ref_dbgid dbg_id) +{ + struct wlan_objmgr_peer *peer; + struct wlan_objmgr_peer *peer_temp; + + /* Iterate through hash list to get the peer */ + peer = wlan_psoc_peer_list_peek_head(obj_list); + while (peer != NULL) { + /* For peer, macaddr is key */ + if (WLAN_ADDR_EQ(wlan_peer_get_macaddr(peer), macaddr) + == QDF_STATUS_SUCCESS) { + if (wlan_objmgr_peer_try_get_ref(peer, dbg_id) == + QDF_STATUS_SUCCESS) { + return peer; + } + } + /* Move to next peer */ + peer_temp = peer; + peer = wlan_peer_get_next_peer_of_psoc(obj_list, peer_temp); + } + + /* Not found, return NULL */ + return NULL; +} + +/** + * wlan_obj_psoc_peerlist_get_peer_by_pdev_id() - get peer from psoc peer list + * @psoc: PSOC object + * @macaddr: MAC address + * #pdev_id: Pdev id + * + * API to finds peer object pointer by MAC addr and pdev id from hash list + * + * Return: peer pointer + * NULL on FAILURE + */ +static struct wlan_objmgr_peer *wlan_obj_psoc_peerlist_get_peer_by_pdev_id( + qdf_list_t *obj_list, uint8_t *macaddr, + uint8_t pdev_id, wlan_objmgr_ref_dbgid dbg_id) +{ + struct wlan_objmgr_peer *peer; + struct wlan_objmgr_peer *peer_temp; + + /* Iterate through hash list to get the peer */ + peer = wlan_psoc_peer_list_peek_head(obj_list); + while (peer != NULL) { + /* For peer, macaddr is key */ + if ((WLAN_ADDR_EQ(wlan_peer_get_macaddr(peer), macaddr) + == QDF_STATUS_SUCCESS) && + (wlan_peer_get_pdev_id(peer) == pdev_id)) { + if (wlan_objmgr_peer_try_get_ref(peer, dbg_id) == + QDF_STATUS_SUCCESS) { + return peer; + } + } + /* Move to next peer */ + peer_temp = peer; + peer = wlan_peer_get_next_peer_of_psoc(obj_list, peer_temp); + } + + /* Not found, return NULL */ + return NULL; +} + +static struct wlan_objmgr_peer *wlan_obj_psoc_peerlist_get_peer_no_state( + qdf_list_t *obj_list, uint8_t *macaddr, + uint8_t pdev_id, wlan_objmgr_ref_dbgid dbg_id) +{ + struct wlan_objmgr_peer *peer; + struct wlan_objmgr_peer *peer_temp; + + /* Iterate through hash list to get the peer */ + peer = wlan_psoc_peer_list_peek_head(obj_list); + while (peer != NULL) { + /* For peer, macaddr and pdev_id is key */ + if ((WLAN_ADDR_EQ(wlan_peer_get_macaddr(peer), macaddr) + == QDF_STATUS_SUCCESS) && + (wlan_peer_get_pdev_id(peer) == pdev_id)) { + wlan_objmgr_peer_get_ref(peer, dbg_id); + + return peer; + } + /* Move to next peer */ + peer_temp = peer; + peer = wlan_peer_get_next_peer_of_psoc(obj_list, peer_temp); + } + + /* Not found, return NULL */ + return NULL; +} + +/** + * wlan_obj_psoc_populate_logically_del_peerlist_by_mac_n_bssid() - get peer + * from psoc peer list using + * mac and vdev self mac + * @obj_list: peer object list + * @macaddr: MAC address + * @bssid: BSSID address + * @dbg_id: id of the caller + * + * API to finds peer object pointer by MAC addr and BSSID from + * peer hash list for a node which is in logically deleted state, + * bssid check is done on matching peer + * + * Caller to free the list allocated in this function + * + * Return: list of peer pointers + * NULL on FAILURE + */ +static qdf_list_t + *wlan_obj_psoc_populate_logically_del_peerlist_by_mac_n_bssid( + qdf_list_t *obj_list, uint8_t *macaddr, + uint8_t *bssid, uint8_t pdev_id, + wlan_objmgr_ref_dbgid dbg_id) +{ + struct wlan_objmgr_peer *peer; + struct wlan_objmgr_peer *peer_temp; + struct wlan_logically_del_peer *peer_list = NULL; + qdf_list_t *logical_del_peer_list = NULL; + bool lock_released = false; + + logical_del_peer_list = qdf_mem_malloc(sizeof(*logical_del_peer_list)); + if (!logical_del_peer_list) { + obj_mgr_err("failed to allocate list"); + return NULL; + } + + qdf_list_create(logical_del_peer_list, WLAN_UMAC_PSOC_MAX_PEERS); + + /* Iterate through hash list to get the peer */ + peer = wlan_psoc_peer_list_peek_head(obj_list); + while (peer != NULL) { + wlan_peer_obj_lock(peer); + /* For peer, macaddr and pdev id are keys */ + if ((WLAN_ADDR_EQ(wlan_peer_get_macaddr(peer), macaddr) + == QDF_STATUS_SUCCESS) && + (wlan_peer_get_pdev_id(peer) == pdev_id)) { + /* + * if BSSID not NULL, + * then match is requested by caller, check BSSID + * (vdev mac == bssid) -- return peer + * (vdev mac != bssid) -- perform next iteration + */ + if ((bssid == NULL) || + (wlan_peer_bssid_match(peer, bssid) == + QDF_STATUS_SUCCESS)) { + /* Return peer in logically deleted state */ + if ((peer->obj_state == + WLAN_OBJ_STATE_LOGICALLY_DELETED) && + qdf_atomic_read( + &peer->peer_objmgr.ref_cnt)) { + + wlan_objmgr_peer_get_ref(peer, dbg_id); + wlan_peer_obj_unlock(peer); + lock_released = true; + + peer_list = + qdf_mem_malloc( + sizeof(struct wlan_logically_del_peer)); + if (peer_list == NULL) { + wlan_objmgr_peer_release_ref(peer, dbg_id); + /* Lock is already released */ + obj_mgr_alert("Mem alloc failed"); + WLAN_OBJMGR_BUG(0); + break; + } + + peer_list->peer = peer; + + qdf_list_insert_front( + logical_del_peer_list, + &peer_list->list); + } + } + } + + if (!lock_released) + wlan_peer_obj_unlock(peer); + + /* Move to next peer */ + peer_temp = peer; + peer = wlan_peer_get_next_peer_of_psoc(obj_list, peer_temp); + lock_released = false; + } + + /* Not found, return NULL */ + if (qdf_list_empty(logical_del_peer_list)) { + qdf_mem_free(logical_del_peer_list); + return NULL; + } else { + return logical_del_peer_list; + } + +} + +/** + * wlan_obj_psoc_peerlist_get_peer_by_mac_n_bssid() - get peer from psoc peer + * list using mac and vdev + * self mac + * @psoc: PSOC object + * @macaddr: MAC address + * @bssid: BSSID address + * + * API to finds peer object pointer by MAC addr and BSSID from + * peer hash list, bssid check is done on matching peer + * + * Return: peer pointer + * NULL on FAILURE + */ +static struct wlan_objmgr_peer *wlan_obj_psoc_peerlist_get_peer_by_mac_n_bssid( + qdf_list_t *obj_list, uint8_t *macaddr, + uint8_t *bssid, uint8_t pdev_id, + wlan_objmgr_ref_dbgid dbg_id) +{ + struct wlan_objmgr_peer *peer; + struct wlan_objmgr_peer *peer_temp; + + /* Iterate through hash list to get the peer */ + peer = wlan_psoc_peer_list_peek_head(obj_list); + while (peer != NULL) { + /* For peer, macaddr is key */ + if (WLAN_ADDR_EQ(wlan_peer_get_macaddr(peer), macaddr) + == QDF_STATUS_SUCCESS) { + /* + * BSSID match is requested by caller, check BSSID + * (vdev mac == bssid) -- return peer + * (vdev mac != bssid) -- perform next iteration + */ + if ((wlan_peer_bssid_match(peer, bssid) == + QDF_STATUS_SUCCESS) && + (wlan_peer_get_pdev_id(peer) == pdev_id)) { + if (wlan_objmgr_peer_try_get_ref(peer, dbg_id) + == QDF_STATUS_SUCCESS) { + return peer; + } + } + } + /* Move to next peer */ + peer_temp = peer; + peer = wlan_peer_get_next_peer_of_psoc(obj_list, peer_temp); + } + /* Not found, return NULL */ + return NULL; +} + +static struct wlan_objmgr_peer + *wlan_obj_psoc_peerlist_get_peer_by_mac_n_bssid_no_state( + qdf_list_t *obj_list, uint8_t *macaddr, + uint8_t *bssid, + uint8_t pdev_id, + wlan_objmgr_ref_dbgid dbg_id) +{ + struct wlan_objmgr_peer *peer; + struct wlan_objmgr_peer *peer_temp; + + /* Iterate through hash list to get the peer */ + peer = wlan_psoc_peer_list_peek_head(obj_list); + while (peer != NULL) { + /* For peer, macaddr is key */ + if (WLAN_ADDR_EQ(wlan_peer_get_macaddr(peer), macaddr) + == QDF_STATUS_SUCCESS) { + /* + * BSSID match is requested by caller, check BSSID + * (vdev mac == bssid) -- return peer + * (vdev mac != bssid) -- perform next iteration + */ + if ((wlan_peer_bssid_match(peer, bssid) == + QDF_STATUS_SUCCESS) && + (wlan_peer_get_pdev_id(peer) == pdev_id)) { + wlan_objmgr_peer_get_ref(peer, dbg_id); + + return peer; + } + } + /* Move to next peer */ + peer_temp = peer; + peer = wlan_peer_get_next_peer_of_psoc(obj_list, peer_temp); + } + + /* Not found, return NULL */ + return NULL; +} + +QDF_STATUS wlan_objmgr_psoc_peer_attach(struct wlan_objmgr_psoc *psoc, + struct wlan_objmgr_peer *peer) +{ + struct wlan_objmgr_psoc_objmgr *objmgr; + uint8_t hash_index; + struct wlan_peer_list *peer_list; + + wlan_psoc_obj_lock(psoc); + objmgr = &psoc->soc_objmgr; + /* Max temporary peer limit is reached, return failure */ + if (peer->peer_mlme.peer_type == WLAN_PEER_STA_TEMP) { + if (objmgr->temp_peer_count >= WLAN_MAX_PSOC_TEMP_PEERS) { + wlan_psoc_obj_unlock(psoc); + return QDF_STATUS_E_FAILURE; + } + } else { + /* Max peer limit is reached, return failure */ + if (objmgr->wlan_peer_count + >= wlan_psoc_get_max_peer_count(psoc)) { + wlan_psoc_obj_unlock(psoc); + return QDF_STATUS_E_FAILURE; + } + } + + /* Derive hash index from mac address */ + hash_index = WLAN_PEER_HASH(peer->macaddr); + peer_list = &objmgr->peer_list; + /* psoc lock should be taken before list lock */ + qdf_spin_lock_bh(&peer_list->peer_list_lock); + /* add peer to hash peer list */ + wlan_obj_psoc_peerlist_add_tail( + &peer_list->peer_hash[hash_index], + peer); + qdf_spin_unlock_bh(&peer_list->peer_list_lock); + /* Increment peer count */ + if (peer->peer_mlme.peer_type == WLAN_PEER_STA_TEMP) + objmgr->temp_peer_count++; + else + objmgr->wlan_peer_count++; + + wlan_psoc_obj_unlock(psoc); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_objmgr_psoc_peer_detach(struct wlan_objmgr_psoc *psoc, + struct wlan_objmgr_peer *peer) +{ + struct wlan_objmgr_psoc_objmgr *objmgr; + uint8_t hash_index; + struct wlan_peer_list *peer_list; + + wlan_psoc_obj_lock(psoc); + objmgr = &psoc->soc_objmgr; + /* if list is empty, return */ + if (objmgr->wlan_peer_count == 0) { + wlan_psoc_obj_unlock(psoc); + return QDF_STATUS_E_FAILURE; + } + /* Get hash index, to locate the actual peer list */ + hash_index = WLAN_PEER_HASH(peer->macaddr); + peer_list = &objmgr->peer_list; + /* psoc lock should be taken before list lock */ + qdf_spin_lock_bh(&peer_list->peer_list_lock); + /* removes the peer from peer_list */ + if (wlan_obj_psoc_peerlist_remove_peer( + &peer_list->peer_hash[hash_index], + peer) == + QDF_STATUS_E_FAILURE) { + qdf_spin_unlock_bh(&peer_list->peer_list_lock); + wlan_psoc_obj_unlock(psoc); + obj_mgr_err("Failed to detach peer"); + return QDF_STATUS_E_FAILURE; + } + qdf_spin_unlock_bh(&peer_list->peer_list_lock); + /* Decrement peer count */ + if (peer->peer_mlme.peer_type == WLAN_PEER_STA_TEMP) + objmgr->temp_peer_count--; + else + objmgr->wlan_peer_count--; + wlan_psoc_obj_unlock(psoc); + + return QDF_STATUS_SUCCESS; +} + +struct wlan_objmgr_peer *wlan_objmgr_get_peer_logically_deleted( + struct wlan_objmgr_psoc *psoc, uint8_t *macaddr, + wlan_objmgr_ref_dbgid dbg_id) +{ + struct wlan_objmgr_psoc_objmgr *objmgr; + uint8_t hash_index; + struct wlan_objmgr_peer *peer = NULL; + struct wlan_peer_list *peer_list; + + /* psoc lock should be taken before peer list lock */ + wlan_psoc_obj_lock(psoc); + objmgr = &psoc->soc_objmgr; + /* List is empty, return NULL */ + if (objmgr->wlan_peer_count == 0) { + wlan_psoc_obj_unlock(psoc); + return NULL; + } + /* reduce the search window, with hash key */ + hash_index = WLAN_PEER_HASH(macaddr); + peer_list = &objmgr->peer_list; + qdf_spin_lock_bh(&peer_list->peer_list_lock); + /* Iterate through peer list, get peer */ + peer = wlan_obj_psoc_peerlist_get_peer_logically_deleted( + &peer_list->peer_hash[hash_index], macaddr, dbg_id); + qdf_spin_unlock_bh(&peer_list->peer_list_lock); + wlan_psoc_obj_unlock(psoc); + + return peer; +} + +struct wlan_objmgr_peer *wlan_objmgr_get_peer_by_mac( + struct wlan_objmgr_psoc *psoc, uint8_t *macaddr, + wlan_objmgr_ref_dbgid dbg_id) +{ + struct wlan_objmgr_psoc_objmgr *objmgr; + uint8_t hash_index; + struct wlan_objmgr_peer *peer = NULL; + struct wlan_peer_list *peer_list; + + if (!macaddr) + return NULL; + + /* psoc lock should be taken before peer list lock */ + wlan_psoc_obj_lock(psoc); + objmgr = &psoc->soc_objmgr; + /* List is empty, return NULL */ + if (objmgr->wlan_peer_count == 0) { + wlan_psoc_obj_unlock(psoc); + return NULL; + } + /* reduce the search window, with hash key */ + hash_index = WLAN_PEER_HASH(macaddr); + peer_list = &objmgr->peer_list; + qdf_spin_lock_bh(&peer_list->peer_list_lock); + /* Iterate through peer list, get peer */ + peer = wlan_obj_psoc_peerlist_get_peer( + &peer_list->peer_hash[hash_index], macaddr, dbg_id); + qdf_spin_unlock_bh(&peer_list->peer_list_lock); + wlan_psoc_obj_unlock(psoc); + + return peer; +} +qdf_export_symbol(wlan_objmgr_get_peer_by_mac); + +struct wlan_objmgr_peer *wlan_objmgr_get_peer( + struct wlan_objmgr_psoc *psoc, uint8_t pdev_id, + uint8_t *macaddr, wlan_objmgr_ref_dbgid dbg_id) +{ + struct wlan_objmgr_psoc_objmgr *objmgr; + uint8_t hash_index; + struct wlan_objmgr_peer *peer = NULL; + struct wlan_peer_list *peer_list; + + if (pdev_id >= WLAN_UMAC_MAX_PDEVS) + QDF_ASSERT(0); + + if (!macaddr) + return NULL; + + /* psoc lock should be taken before peer list lock */ + wlan_psoc_obj_lock(psoc); + objmgr = &psoc->soc_objmgr; + /* List is empty, return NULL */ + if (objmgr->wlan_peer_count == 0) { + wlan_psoc_obj_unlock(psoc); + return NULL; + } + /* reduce the search window, with hash key */ + hash_index = WLAN_PEER_HASH(macaddr); + peer_list = &objmgr->peer_list; + qdf_spin_lock_bh(&peer_list->peer_list_lock); + /* Iterate through peer list, get peer */ + peer = wlan_obj_psoc_peerlist_get_peer_by_pdev_id( + &peer_list->peer_hash[hash_index], macaddr, pdev_id, dbg_id); + qdf_spin_unlock_bh(&peer_list->peer_list_lock); + wlan_psoc_obj_unlock(psoc); + + return peer; +} +qdf_export_symbol(wlan_objmgr_get_peer); + +struct wlan_objmgr_peer *wlan_objmgr_get_peer_nolock( + struct wlan_objmgr_psoc *psoc, uint8_t pdev_id, + uint8_t *macaddr, wlan_objmgr_ref_dbgid dbg_id) +{ + struct wlan_objmgr_psoc_objmgr *objmgr; + uint8_t hash_index; + struct wlan_objmgr_peer *peer = NULL; + struct wlan_peer_list *peer_list; + + /* psoc lock should be taken before peer list lock */ + objmgr = &psoc->soc_objmgr; + /* List is empty, return NULL */ + if (objmgr->wlan_peer_count == 0) + return NULL; + + /* reduce the search window, with hash key */ + hash_index = WLAN_PEER_HASH(macaddr); + peer_list = &objmgr->peer_list; + /* Iterate through peer list, get peer */ + peer = wlan_obj_psoc_peerlist_get_peer_by_pdev_id( + &peer_list->peer_hash[hash_index], macaddr, pdev_id, dbg_id); + + return peer; +} +qdf_export_symbol(wlan_objmgr_get_peer_nolock); + + +struct wlan_objmgr_peer *wlan_objmgr_get_peer_no_state( + struct wlan_objmgr_psoc *psoc, uint8_t pdev_id, + uint8_t *macaddr, wlan_objmgr_ref_dbgid dbg_id) +{ + struct wlan_objmgr_psoc_objmgr *objmgr; + uint8_t hash_index; + struct wlan_objmgr_peer *peer = NULL; + struct wlan_peer_list *peer_list; + + /* psoc lock should be taken before peer list lock */ + wlan_psoc_obj_lock(psoc); + objmgr = &psoc->soc_objmgr; + /* List is empty, return NULL */ + if (objmgr->wlan_peer_count == 0) { + wlan_psoc_obj_unlock(psoc); + return NULL; + } + /* reduce the search window, with hash key */ + hash_index = WLAN_PEER_HASH(macaddr); + peer_list = &objmgr->peer_list; + qdf_spin_lock_bh(&peer_list->peer_list_lock); + /* Iterate through peer list, get peer */ + peer = wlan_obj_psoc_peerlist_get_peer_no_state( + &peer_list->peer_hash[hash_index], macaddr, pdev_id, dbg_id); + qdf_spin_unlock_bh(&peer_list->peer_list_lock); + wlan_psoc_obj_unlock(psoc); + + return peer; +} +qdf_export_symbol(wlan_objmgr_get_peer_no_state); + +struct wlan_objmgr_peer *wlan_objmgr_get_peer_by_mac_n_vdev( + struct wlan_objmgr_psoc *psoc, uint8_t pdev_id, + uint8_t *bssid, uint8_t *macaddr, + wlan_objmgr_ref_dbgid dbg_id) +{ + struct wlan_objmgr_psoc_objmgr *objmgr; + uint8_t hash_index; + struct wlan_objmgr_peer *peer = NULL; + struct wlan_peer_list *peer_list; + + /* psoc lock should be taken before peer list lock */ + wlan_psoc_obj_lock(psoc); + objmgr = &psoc->soc_objmgr; + /* List is empty, return NULL */ + if (objmgr->wlan_peer_count == 0) { + wlan_psoc_obj_unlock(psoc); + return NULL; + } + /* reduce the search window, with hash key */ + hash_index = WLAN_PEER_HASH(macaddr); + peer_list = &objmgr->peer_list; + qdf_spin_lock_bh(&peer_list->peer_list_lock); + /* Iterate through peer list, get peer */ + peer = wlan_obj_psoc_peerlist_get_peer_by_mac_n_bssid( + &peer_list->peer_hash[hash_index], macaddr, bssid, + pdev_id, dbg_id); + qdf_spin_unlock_bh(&peer_list->peer_list_lock); + wlan_psoc_obj_unlock(psoc); + + return peer; +} +qdf_export_symbol(wlan_objmgr_get_peer_by_mac_n_vdev); + + +/** + * wlan_objmgr_populate_logically_deleted_peerlist_by_mac_n_vdev() - get peer from psoc + * peer list using + * mac and vdev + * self mac + * @psoc: PSOC object + * @pdev_id: Pdev id + * @macaddr: MAC address + * @bssid: BSSID address. NULL mac means search all. + * @dbg_id: id of the caller + * + * API to finds peer object pointer by MAC addr and BSSID from + * peer hash list, bssid check is done on matching peer + * + * Return: list of peer pointer pointers + * NULL on FAILURE + */ + +qdf_list_t *wlan_objmgr_populate_logically_deleted_peerlist_by_mac_n_vdev( + struct wlan_objmgr_psoc *psoc, uint8_t pdev_id, + uint8_t *bssid, uint8_t *macaddr, + wlan_objmgr_ref_dbgid dbg_id) +{ + struct wlan_objmgr_psoc_objmgr *objmgr; + uint8_t hash_index; + struct wlan_peer_list *peer_list = NULL; + qdf_list_t *logical_del_peer_list = NULL; + + /* psoc lock should be taken before peer list lock */ + wlan_psoc_obj_lock(psoc); + objmgr = &psoc->soc_objmgr; + /* List is empty, return NULL */ + if (objmgr->wlan_peer_count == 0) { + wlan_psoc_obj_unlock(psoc); + return NULL; + } + /* reduce the search window, with hash key */ + hash_index = WLAN_PEER_HASH(macaddr); + peer_list = &objmgr->peer_list; + qdf_spin_lock_bh(&peer_list->peer_list_lock); + + /* Iterate through peer list, get peer */ + logical_del_peer_list = + wlan_obj_psoc_populate_logically_del_peerlist_by_mac_n_bssid( + &peer_list->peer_hash[hash_index], macaddr, + bssid, pdev_id, dbg_id); + + qdf_spin_unlock_bh(&peer_list->peer_list_lock); + wlan_psoc_obj_unlock(psoc); + + return logical_del_peer_list; +} +qdf_export_symbol(wlan_objmgr_populate_logically_deleted_peerlist_by_mac_n_vdev); + +struct wlan_objmgr_peer *wlan_objmgr_get_peer_by_mac_n_vdev_no_state( + struct wlan_objmgr_psoc *psoc, uint8_t pdev_id, + uint8_t *bssid, uint8_t *macaddr, + wlan_objmgr_ref_dbgid dbg_id) +{ + struct wlan_objmgr_psoc_objmgr *objmgr; + uint8_t hash_index; + struct wlan_objmgr_peer *peer = NULL; + struct wlan_peer_list *peer_list; + + /* psoc lock should be taken before peer list lock */ + wlan_psoc_obj_lock(psoc); + objmgr = &psoc->soc_objmgr; + /* List is empty, return NULL */ + if (objmgr->wlan_peer_count == 0) { + wlan_psoc_obj_unlock(psoc); + return NULL; + } + /* reduce the search window, with hash key */ + hash_index = WLAN_PEER_HASH(macaddr); + peer_list = &objmgr->peer_list; + qdf_spin_lock_bh(&peer_list->peer_list_lock); + /* Iterate through peer list, get peer */ + peer = wlan_obj_psoc_peerlist_get_peer_by_mac_n_bssid_no_state( + &peer_list->peer_hash[hash_index], macaddr, bssid, + pdev_id, dbg_id); + qdf_spin_unlock_bh(&peer_list->peer_list_lock); + wlan_psoc_obj_unlock(psoc); + + return peer; +} +qdf_export_symbol(wlan_objmgr_get_peer_by_mac_n_vdev_no_state); + +void *wlan_objmgr_psoc_get_comp_private_obj(struct wlan_objmgr_psoc *psoc, + enum wlan_umac_comp_id id) +{ + void *comp_private_obj; + + /* component id is invalid */ + if (id >= WLAN_UMAC_MAX_COMPONENTS) { + QDF_BUG(0); + return NULL; + } + + if (psoc == NULL) { + QDF_BUG(0); + return NULL; + } + + comp_private_obj = psoc->soc_comp_priv_obj[id]; + + return comp_private_obj; +} +qdf_export_symbol(wlan_objmgr_psoc_get_comp_private_obj); + +void wlan_objmgr_psoc_get_ref(struct wlan_objmgr_psoc *psoc, + wlan_objmgr_ref_dbgid id) +{ + if (psoc == NULL) { + obj_mgr_err("psoc obj is NULL for id:%d", id); + QDF_ASSERT(0); + return; + } + /* Increment ref count */ + qdf_atomic_inc(&psoc->soc_objmgr.ref_cnt); + qdf_atomic_inc(&psoc->soc_objmgr.ref_id_dbg[id]); + return; +} +qdf_export_symbol(wlan_objmgr_psoc_get_ref); + +QDF_STATUS wlan_objmgr_psoc_try_get_ref(struct wlan_objmgr_psoc *psoc, + wlan_objmgr_ref_dbgid id) +{ + if (psoc == NULL) { + obj_mgr_err("psoc obj is NULL for id:%d", id); + QDF_ASSERT(0); + return QDF_STATUS_E_FAILURE; + } + + wlan_psoc_obj_lock(psoc); + if (psoc->obj_state != WLAN_OBJ_STATE_CREATED) { + wlan_psoc_obj_unlock(psoc); + if (psoc->soc_objmgr.print_cnt++ <= + WLAN_OBJMGR_RATELIMIT_THRESH) + obj_mgr_err( + "[Ref id: %d] psoc is not in Created state(%d)", + id, psoc->obj_state); + + return QDF_STATUS_E_RESOURCES; + } + + /* Increment ref count */ + wlan_objmgr_psoc_get_ref(psoc, id); + wlan_psoc_obj_unlock(psoc); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(wlan_objmgr_psoc_try_get_ref); + +void wlan_objmgr_psoc_release_ref(struct wlan_objmgr_psoc *psoc, + wlan_objmgr_ref_dbgid id) +{ + if (psoc == NULL) { + obj_mgr_err("psoc obj is NULL for id:%d", id); + QDF_ASSERT(0); + return; + } + + if (!qdf_atomic_read(&psoc->soc_objmgr.ref_id_dbg[id])) { + obj_mgr_err("psoc ref cnt was not taken by %d", id); + wlan_objmgr_print_ref_ids(psoc->soc_objmgr.ref_id_dbg, + QDF_TRACE_LEVEL_FATAL); + WLAN_OBJMGR_BUG(0); + } + + if (!qdf_atomic_read(&psoc->soc_objmgr.ref_cnt)) { + obj_mgr_err("psoc ref cnt is 0"); + WLAN_OBJMGR_BUG(0); + return; + } + + qdf_atomic_dec(&psoc->soc_objmgr.ref_id_dbg[id]); + /* Decrement ref count, free psoc, if ref count == 0 */ + if (qdf_atomic_dec_and_test(&psoc->soc_objmgr.ref_cnt)) + wlan_objmgr_psoc_obj_destroy(psoc); + + return; +} +qdf_export_symbol(wlan_objmgr_psoc_release_ref); + +static void wlan_objmgr_psoc_peer_ref_print(struct wlan_objmgr_psoc *psoc, + void *obj, void *args) +{ + struct wlan_objmgr_peer *peer = (struct wlan_objmgr_peer *)obj; + WLAN_OBJ_STATE obj_state; + uint8_t vdev_id; + uint8_t *macaddr; + + wlan_peer_obj_lock(peer); + macaddr = wlan_peer_get_macaddr(peer); + obj_state = peer->obj_state; + vdev_id = wlan_vdev_get_id(wlan_peer_get_vdev(peer)); + wlan_peer_obj_unlock(peer); + + obj_mgr_alert("Peer MAC:%02x:%02x:%02x:%02x:%02x:%02x state:%d vdev_id:%d", + macaddr[0], macaddr[1], macaddr[2], macaddr[3], + macaddr[4], macaddr[5], obj_state, vdev_id); + wlan_objmgr_print_ref_ids(peer->peer_objmgr.ref_id_dbg, + QDF_TRACE_LEVEL_FATAL); +} + +static void wlan_objmgr_psoc_vdev_ref_print(struct wlan_objmgr_psoc *psoc, + void *obj, void *args) +{ + struct wlan_objmgr_vdev *vdev = (struct wlan_objmgr_vdev *)obj; + WLAN_OBJ_STATE obj_state; + uint8_t id; + + wlan_vdev_obj_lock(vdev); + id = wlan_vdev_get_id(vdev); + obj_state = vdev->obj_state; + wlan_vdev_obj_unlock(vdev); + obj_mgr_alert("Vdev ID is %d, state %d", id, obj_state); + + wlan_objmgr_print_ref_ids(vdev->vdev_objmgr.ref_id_dbg, + QDF_TRACE_LEVEL_FATAL); +} + +static void wlan_objmgr_psoc_pdev_ref_print(struct wlan_objmgr_psoc *psoc, + void *obj, void *args) +{ + struct wlan_objmgr_pdev *pdev = (struct wlan_objmgr_pdev *)obj; + uint8_t id; + + wlan_pdev_obj_lock(pdev); + id = wlan_objmgr_pdev_get_pdev_id(pdev); + wlan_pdev_obj_unlock(pdev); + obj_mgr_alert("pdev ID is %d", id); + + wlan_objmgr_print_ref_ids(pdev->pdev_objmgr.ref_id_dbg, + QDF_TRACE_LEVEL_FATAL); +} + +QDF_STATUS wlan_objmgr_print_ref_all_objects_per_psoc( + struct wlan_objmgr_psoc *psoc) +{ + obj_mgr_alert("Ref counts of PEER"); + wlan_objmgr_iterate_obj_list_all_noref(psoc, WLAN_PEER_OP, + wlan_objmgr_psoc_peer_ref_print, NULL); + obj_mgr_alert("Ref counts of VDEV"); + wlan_objmgr_iterate_obj_list_all_noref(psoc, WLAN_VDEV_OP, + wlan_objmgr_psoc_vdev_ref_print, NULL); + obj_mgr_alert("Ref counts of PDEV"); + wlan_objmgr_iterate_obj_list_all_noref(psoc, WLAN_PDEV_OP, + wlan_objmgr_psoc_pdev_ref_print, NULL); + + obj_mgr_alert(" Ref counts of PSOC"); + wlan_objmgr_print_ref_ids(psoc->soc_objmgr.ref_id_dbg, + QDF_TRACE_LEVEL_FATAL); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(wlan_objmgr_print_ref_all_objects_per_psoc); + +QDF_STATUS wlan_objmgr_psoc_set_user_config(struct wlan_objmgr_psoc *psoc, + struct wlan_objmgr_psoc_user_config *user_config_data) +{ + if (user_config_data == NULL) { + obj_mgr_err("user_config_data is NULL"); + QDF_BUG(0); + return QDF_STATUS_E_FAILURE; + } + wlan_psoc_obj_lock(psoc); + qdf_mem_copy(&psoc->soc_nif.user_config, user_config_data, + sizeof(psoc->soc_nif.user_config)); + wlan_psoc_obj_unlock(psoc); + + return QDF_STATUS_SUCCESS; +} + +void wlan_objmgr_psoc_check_for_pdev_leaks(struct wlan_objmgr_psoc *psoc) +{ + struct wlan_objmgr_psoc_objmgr *_psoc; + struct wlan_objmgr_pdev *pdev; + int pdev_id; + uint32_t leaks = 0; + + QDF_BUG(psoc); + if (!psoc) + return; + + wlan_psoc_obj_lock(psoc); + _psoc = &psoc->soc_objmgr; + if (!_psoc->wlan_pdev_count) { + wlan_psoc_obj_unlock(psoc); + return; + } + + obj_mgr_err("objmgr pdev leaks detected for psoc %u!", _psoc->psoc_id); + obj_mgr_err("--------------------------------------------------------"); + obj_mgr_err("Pdev Id Refs Module"); + obj_mgr_err("--------------------------------------------------------"); + + wlan_objmgr_for_each_psoc_pdev(psoc, pdev_id, pdev) { + qdf_atomic_t *ref_id_dbg; + int ref_id; + int32_t refs; + + wlan_pdev_obj_lock(pdev); + ref_id_dbg = pdev->pdev_objmgr.ref_id_dbg; + wlan_objmgr_for_each_refs(ref_id_dbg, ref_id, refs) { + leaks++; + obj_mgr_err("%7u %4u %s", + pdev_id, refs, string_from_dbgid(ref_id)); + } + wlan_pdev_obj_unlock(pdev); + } + + QDF_DEBUG_PANIC("%u objmgr pdev leaks detected for psoc %u!", + leaks, _psoc->psoc_id); + + wlan_psoc_obj_unlock(psoc); +} +qdf_export_symbol(wlan_objmgr_psoc_check_for_pdev_leaks); + +void wlan_objmgr_psoc_check_for_vdev_leaks(struct wlan_objmgr_psoc *psoc) +{ + struct wlan_objmgr_psoc_objmgr *_psoc; + struct wlan_objmgr_vdev *vdev; + int vdev_id; + uint32_t leaks = 0; + + QDF_BUG(psoc); + if (!psoc) + return; + + wlan_psoc_obj_lock(psoc); + _psoc = &psoc->soc_objmgr; + if (!_psoc->wlan_vdev_count) { + wlan_psoc_obj_unlock(psoc); + return; + } + + obj_mgr_err("objmgr vdev leaks detected for psoc %u!", _psoc->psoc_id); + obj_mgr_err("--------------------------------------------------------"); + obj_mgr_err("Vdev Id Refs Module"); + obj_mgr_err("--------------------------------------------------------"); + + wlan_objmgr_for_each_psoc_vdev(psoc, vdev_id, vdev) { + qdf_atomic_t *ref_id_dbg; + int ref_id; + int32_t refs; + + wlan_vdev_obj_lock(vdev); + ref_id_dbg = vdev->vdev_objmgr.ref_id_dbg; + wlan_objmgr_for_each_refs(ref_id_dbg, ref_id, refs) { + leaks++; + obj_mgr_err("%7u %4u %s", + vdev_id, refs, string_from_dbgid(ref_id)); + } + wlan_vdev_obj_unlock(vdev); + } + + QDF_DEBUG_PANIC("%u objmgr vdev leaks detected for psoc %u!", + leaks, _psoc->psoc_id); + + wlan_psoc_obj_unlock(psoc); +} +qdf_export_symbol(wlan_objmgr_psoc_check_for_vdev_leaks); + +void wlan_objmgr_psoc_check_for_peer_leaks(struct wlan_objmgr_psoc *psoc) +{ + struct wlan_objmgr_psoc_objmgr *_psoc; + struct wlan_objmgr_vdev *vdev; + int vdev_id; + uint32_t leaks = 0; + + QDF_BUG(psoc); + if (!psoc) + return; + + wlan_psoc_obj_lock(psoc); + _psoc = &psoc->soc_objmgr; + if (!_psoc->temp_peer_count && !_psoc->wlan_peer_count) { + wlan_psoc_obj_unlock(psoc); + return; + } + + obj_mgr_err("objmgr peer leaks detected for psoc %u!", _psoc->psoc_id); + obj_mgr_err("--------------------------------------------------------"); + obj_mgr_err("Peer MAC Vdev Id Refs Module"); + obj_mgr_err("--------------------------------------------------------"); + + wlan_objmgr_for_each_psoc_vdev(psoc, vdev_id, vdev) { + struct wlan_objmgr_peer *peer; + + wlan_vdev_obj_lock(vdev); + wlan_objmgr_for_each_vdev_peer(vdev, peer) { + qdf_atomic_t *ref_id_dbg; + int ref_id; + int32_t refs; + + wlan_peer_obj_lock(peer); + ref_id_dbg = peer->peer_objmgr.ref_id_dbg; + wlan_objmgr_for_each_refs(ref_id_dbg, ref_id, refs) { + leaks++; + obj_mgr_err(QDF_MAC_ADDR_STR " %7u %4u %s", + QDF_MAC_ADDR_ARRAY(peer->macaddr), + vdev_id, + refs, + string_from_dbgid(ref_id)); + } + wlan_peer_obj_unlock(peer); + } + wlan_vdev_obj_unlock(vdev); + } + + QDF_DEBUG_PANIC("%u objmgr peer leaks detected for psoc %u!", + leaks, _psoc->psoc_id); + + wlan_psoc_obj_unlock(psoc); +} +qdf_export_symbol(wlan_objmgr_psoc_check_for_peer_leaks); diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/obj_mgr/src/wlan_objmgr_psoc_obj_i.h b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/obj_mgr/src/wlan_objmgr_psoc_obj_i.h new file mode 100644 index 0000000000000000000000000000000000000000..81fcfb24752bf9fdaee7e0d2fd3f0a32aa57bf1f --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/obj_mgr/src/wlan_objmgr_psoc_obj_i.h @@ -0,0 +1,140 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + /** + * DOC: Public APIs to perform operations on Global objects + */ +#ifndef _WLAN_OBJMGR_PSOC_OBJ_I_H_ +#define _WLAN_OBJMGR_PSOC_OBJ_I_H_ + +/** + * wlan_objmgr_for_each_psoc_pdev() - iterate over each pdev for @psoc + * @psoc: the psoc whose pdevs should be iterated + * @pdev_id: pdev Id index cursor + * @pdev: pdev object cursor + * + * Note: The caller is responsible for grabbing @psoc's object lock before + * using this iterator + */ +#define wlan_objmgr_for_each_psoc_pdev(psoc, pdev_id, pdev) \ + for (pdev_id = 0; pdev_id < WLAN_UMAC_MAX_PDEVS; pdev_id++) \ + if ((pdev = (psoc)->soc_objmgr.wlan_pdev_list[pdev_id])) + +/** + * wlan_objmgr_for_each_psoc_vdev() - iterate over each vdev for @psoc + * @psoc: the psoc whose vdevs should be iterated + * @vdev_id: vdev Id index cursor + * @vdev: vdev object cursor + * + * Note: The caller is responsible for grabbing @psoc's object lock before + * using this iterator + */ +#define wlan_objmgr_for_each_psoc_vdev(psoc, vdev_id, vdev) \ + for (vdev_id = 0; vdev_id < WLAN_UMAC_PSOC_MAX_VDEVS; vdev_id++) \ + if ((vdev = (psoc)->soc_objmgr.wlan_vdev_list[vdev_id])) + +/** + * wlan_objmgr_for_each_refs() - iterate non-zero ref counts in @ref_id_dbg + * @ref_id_dbg: the ref count array to iterate + * @ref_id: the reference Id index cursor + * @refs: the ref count cursor + * + * Note: The caller is responsible for grabbing @ref_id_dbg's parent object lock + * before using this iterator + */ +#define wlan_objmgr_for_each_refs(ref_id_dbg, ref_id, refs) \ + for (ref_id = 0; ref_id < WLAN_REF_ID_MAX; ref_id++) \ + if ((refs = qdf_atomic_read(&(ref_id_dbg)[ref_id])) > 0) + +/** + * wlan_objmgr_psoc_pdev_attach() - store pdev in psoc's pdev list + * @psoc - PSOC object + * @pdev - PDEV object + * + * Attaches PDEV to PSOC, allocates PDEV id + * + * Return: SUCCESS + * Failure (Max PDEVs are exceeded) + */ +QDF_STATUS wlan_objmgr_psoc_pdev_attach(struct wlan_objmgr_psoc *psoc, + struct wlan_objmgr_pdev *pdev); + +/** + * wlan_objmgr_psoc_pdev_detach() - remove pdev from psoc's pdev list + * @psoc - PSOC object + * @pdev - PDEV object + * + * detaches PDEV to PSOC, frees PDEV id + * + * Return: SUCCESS + * Failure (No PDEVs are present) + */ +QDF_STATUS wlan_objmgr_psoc_pdev_detach(struct wlan_objmgr_psoc *psoc, + struct wlan_objmgr_pdev *pdev); + +/** + * wlan_objmgr_psoc_vdev_attach() - store vdev in psoc's vdev list + * @psoc - PSOC object + * @vdev - VDEV object + * + * Attaches VDEV to PSOC, allocates VDEV id + * + * Return: SUCCESS + * Failure (Max VDEVs are exceeded) + */ +QDF_STATUS wlan_objmgr_psoc_vdev_attach(struct wlan_objmgr_psoc *psoc, + struct wlan_objmgr_vdev *vdev); + +/** + * wlan_objmgr_psoc_vdev_detach() - remove vdev from psoc's vdev list + * @psoc - PSOC object + * @vdev - VDEV object + * + * detaches VDEV to PSOC, frees VDEV id + * + * Return: SUCCESS + * Failure (No VDEVs are present) + */ +QDF_STATUS wlan_objmgr_psoc_vdev_detach(struct wlan_objmgr_psoc *psoc, + struct wlan_objmgr_vdev *vdev); + +/** + * wlan_objmgr_psoc_peer_attach() - store peer in psoc's peer table + * @psoc - PSOC object + * @peer - PEER object + * + * Attaches PEER to PSOC, derives the HASH, add peer to its peer list + * + * Return: SUCCESS + * Failure (Max PEERs are exceeded) + */ +QDF_STATUS wlan_objmgr_psoc_peer_attach(struct wlan_objmgr_psoc *psoc, + struct wlan_objmgr_peer *peer); + +/** + * wlan_objmgr_psoc_peer_detach() - remove peer from psoc's peer table + * @psoc - PSOC object + * @peer - PEER object + * + * detaches PEER to PSOC, removes the peer from the peer list + * + * Return: SUCCESS + * Failure (PEER is not present) + */ +QDF_STATUS wlan_objmgr_psoc_peer_detach(struct wlan_objmgr_psoc *psoc, + struct wlan_objmgr_peer *peer); +#endif /* _WLAN_OBJMGR_PSOC_OBJ_I_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/obj_mgr/src/wlan_objmgr_vdev_obj.c b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/obj_mgr/src/wlan_objmgr_vdev_obj.c new file mode 100644 index 0000000000000000000000000000000000000000..0cd8c7b0f90e2831529e3813b55d436b6c69bfd7 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/obj_mgr/src/wlan_objmgr_vdev_obj.c @@ -0,0 +1,945 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + /** + * DOC: Public APIs to perform operations on Global objects + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include "wlan_objmgr_global_obj_i.h" +#include "wlan_objmgr_psoc_obj_i.h" +#include "wlan_objmgr_pdev_obj_i.h" +#include "wlan_objmgr_vdev_obj_i.h" + +/** + ** APIs to Create/Delete Global object APIs + */ + +static QDF_STATUS wlan_objmgr_vdev_object_status( + struct wlan_objmgr_vdev *vdev) +{ + uint8_t id; + QDF_STATUS status = QDF_STATUS_SUCCESS; + + wlan_vdev_obj_lock(vdev); + + /* Iterate through all components to derive the object status */ + for (id = 0; id < WLAN_UMAC_MAX_COMPONENTS; id++) { + /* If component disabled, Ignore */ + if (vdev->obj_status[id] == QDF_STATUS_COMP_DISABLED) { + continue; + /* + * If component operates in Async, status is Partially created, + * break + */ + } else if (vdev->obj_status[id] == QDF_STATUS_COMP_ASYNC) { + if (vdev->vdev_comp_priv_obj[id] == NULL) { + status = QDF_STATUS_COMP_ASYNC; + break; + } + /* + * If component failed to allocate its object, treat it as + * failure, complete object need to be cleaned up + */ + } else if ((vdev->obj_status[id] == QDF_STATUS_E_NOMEM) || + (vdev->obj_status[id] == QDF_STATUS_E_FAILURE)) { + status = QDF_STATUS_E_FAILURE; + break; + } + } + wlan_vdev_obj_unlock(vdev); + + return status; +} + +static QDF_STATUS wlan_objmgr_vdev_obj_free(struct wlan_objmgr_vdev *vdev) +{ + struct wlan_objmgr_pdev *pdev; + struct wlan_objmgr_psoc *psoc; + + if (vdev == NULL) { + obj_mgr_err("vdev is NULL"); + return QDF_STATUS_E_FAILURE; + } + /* if PDEV is NULL, return */ + pdev = wlan_vdev_get_pdev(vdev); + if (pdev == NULL) { + obj_mgr_err("pdev is NULL for vdev-id: %d", + vdev->vdev_objmgr.vdev_id); + return QDF_STATUS_E_FAILURE; + } + psoc = wlan_pdev_get_psoc(pdev); + if (psoc == NULL) { + obj_mgr_err("psoc is NULL in pdev"); + return QDF_STATUS_E_FAILURE; + } + + /* Detach VDEV from PDEV VDEV's list */ + if (wlan_objmgr_pdev_vdev_detach(pdev, vdev) == + QDF_STATUS_E_FAILURE) + return QDF_STATUS_E_FAILURE; + + /* Detach VDEV from PSOC VDEV's list */ + if (wlan_objmgr_psoc_vdev_detach(psoc, vdev) == + QDF_STATUS_E_FAILURE) + return QDF_STATUS_E_FAILURE; + + qdf_spinlock_destroy(&vdev->vdev_lock); + + qdf_mem_free(vdev->vdev_mlme.bss_chan); + qdf_mem_free(vdev->vdev_mlme.des_chan); + qdf_mem_free(vdev->vdev_nif.osdev); + qdf_mem_free(vdev); + + return QDF_STATUS_SUCCESS; + +} + +struct wlan_objmgr_vdev *wlan_objmgr_vdev_obj_create( + struct wlan_objmgr_pdev *pdev, + struct wlan_vdev_create_params *params) +{ + struct wlan_objmgr_vdev *vdev; + struct wlan_objmgr_psoc *psoc; + uint8_t id; + wlan_objmgr_vdev_create_handler handler; + wlan_objmgr_vdev_status_handler stat_handler; + void *arg; + QDF_STATUS obj_status; + + if (pdev == NULL) { + obj_mgr_err("pdev is NULL"); + return NULL; + } + psoc = wlan_pdev_get_psoc(pdev); + /* PSOC is NULL */ + if (psoc == NULL) { + obj_mgr_err("psoc is NULL for pdev-id:%d", + pdev->pdev_objmgr.wlan_pdev_id); + return NULL; + } + /* Allocate vdev object memory */ + vdev = qdf_mem_malloc(sizeof(*vdev)); + if (vdev == NULL) { + obj_mgr_err("Memory allocation failure"); + return NULL; + } + vdev->obj_state = WLAN_OBJ_STATE_ALLOCATED; + + vdev->vdev_mlme.bss_chan = (struct wlan_channel *)qdf_mem_malloc( + sizeof(struct wlan_channel)); + if (vdev->vdev_mlme.bss_chan == NULL) { + QDF_TRACE(QDF_MODULE_ID_MLME, QDF_TRACE_LEVEL_ERROR, + "%s:bss_chan is NULL", __func__); + qdf_mem_free(vdev); + return NULL; + } + + vdev->vdev_mlme.des_chan = (struct wlan_channel *)qdf_mem_malloc( + sizeof(struct wlan_channel)); + if (vdev->vdev_mlme.des_chan == NULL) { + QDF_TRACE(QDF_MODULE_ID_MLME, QDF_TRACE_LEVEL_ERROR, + "%s:des_chan is NULL", __func__); + qdf_mem_free(vdev->vdev_mlme.bss_chan); + qdf_mem_free(vdev); + return NULL; + } + + /* Initialize spinlock */ + qdf_spinlock_create(&vdev->vdev_lock); + /* Attach VDEV to PSOC VDEV's list */ + if (wlan_objmgr_psoc_vdev_attach(psoc, vdev) != + QDF_STATUS_SUCCESS) { + obj_mgr_err("psoc vdev attach failed for vdev-id:%d", + vdev->vdev_objmgr.vdev_id); + qdf_mem_free(vdev->vdev_mlme.bss_chan); + qdf_mem_free(vdev->vdev_mlme.des_chan); + qdf_spinlock_destroy(&vdev->vdev_lock); + qdf_mem_free(vdev); + return NULL; + } + /* Store pdev in vdev */ + wlan_vdev_set_pdev(vdev, pdev); + /* Attach vdev to PDEV */ + if (wlan_objmgr_pdev_vdev_attach(pdev, vdev) != + QDF_STATUS_SUCCESS) { + obj_mgr_err("pdev vdev attach failed for vdev-id:%d", + vdev->vdev_objmgr.vdev_id); + wlan_objmgr_psoc_vdev_detach(psoc, vdev); + qdf_mem_free(vdev->vdev_mlme.bss_chan); + qdf_mem_free(vdev->vdev_mlme.des_chan); + qdf_spinlock_destroy(&vdev->vdev_lock); + qdf_mem_free(vdev); + return NULL; + } + /* set opmode */ + wlan_vdev_mlme_set_opmode(vdev, params->opmode); + /* set MAC address */ + wlan_vdev_mlme_set_macaddr(vdev, params->macaddr); + /* set MAT address */ + wlan_vdev_mlme_set_mataddr(vdev, params->mataddr); + /* Set create flags */ + vdev->vdev_objmgr.c_flags = params->flags; + /* store os-specific pointer */ + vdev->vdev_nif.osdev = params->osifp; + /* peer count to 0 */ + vdev->vdev_objmgr.wlan_peer_count = 0; + qdf_atomic_init(&vdev->vdev_objmgr.ref_cnt); + vdev->vdev_objmgr.print_cnt = 0; + wlan_objmgr_vdev_get_ref(vdev, WLAN_OBJMGR_ID); + /* Initialize max peer count based on opmode type */ + if (wlan_vdev_mlme_get_opmode(vdev) == QDF_STA_MODE) + vdev->vdev_objmgr.max_peer_count = WLAN_UMAC_MAX_STA_PEERS; + else + vdev->vdev_objmgr.max_peer_count = + wlan_pdev_get_max_peer_count(pdev); + + /* Initialize peer list */ + qdf_list_create(&vdev->vdev_objmgr.wlan_peer_list, + vdev->vdev_objmgr.max_peer_count + + WLAN_MAX_PDEV_TEMP_PEERS); + /* TODO init other parameters */ + + /* Invoke registered create handlers */ + for (id = 0; id < WLAN_UMAC_MAX_COMPONENTS; id++) { + handler = g_umac_glb_obj->vdev_create_handler[id]; + arg = g_umac_glb_obj->vdev_create_handler_arg[id]; + if (handler != NULL) + vdev->obj_status[id] = handler(vdev, arg); + else + vdev->obj_status[id] = QDF_STATUS_COMP_DISABLED; + } + + /* Derive object status */ + obj_status = wlan_objmgr_vdev_object_status(vdev); + + if (obj_status == QDF_STATUS_SUCCESS) { + /* Object status is SUCCESS, Object is created */ + vdev->obj_state = WLAN_OBJ_STATE_CREATED; + /* Invoke component registered status handlers */ + for (id = 0; id < WLAN_UMAC_MAX_COMPONENTS; id++) { + stat_handler = g_umac_glb_obj->vdev_status_handler[id]; + arg = g_umac_glb_obj->vdev_status_handler_arg[id]; + if (stat_handler != NULL) { + stat_handler(vdev, arg, + QDF_STATUS_SUCCESS); + } + } + /* + * Few components operates in Asynchrous communction, Object state + * partially created + */ + } else if (obj_status == QDF_STATUS_COMP_ASYNC) { + vdev->obj_state = WLAN_OBJ_STATE_PARTIALLY_CREATED; + /* Component object failed to be created, clean up the object */ + } else if (obj_status == QDF_STATUS_E_FAILURE) { + /* Clean up the psoc */ + wlan_objmgr_vdev_obj_delete(vdev); + obj_mgr_err("VDEV comp objects creation failed for vdev-id:%d", + vdev->vdev_objmgr.vdev_id); + return NULL; + } + + obj_mgr_info("Created vdev %d", vdev->vdev_objmgr.vdev_id); + + return vdev; +} +qdf_export_symbol(wlan_objmgr_vdev_obj_create); + +static QDF_STATUS wlan_objmgr_vdev_obj_destroy(struct wlan_objmgr_vdev *vdev) +{ + uint8_t id; + wlan_objmgr_vdev_destroy_handler handler; + QDF_STATUS obj_status; + void *arg; + uint8_t vdev_id; + + if (vdev == NULL) { + obj_mgr_err("vdev is NULL"); + return QDF_STATUS_E_FAILURE; + } + wlan_objmgr_notify_destroy(vdev, WLAN_VDEV_OP); + + vdev_id = wlan_vdev_get_id(vdev); + + obj_mgr_debug("Physically deleting vdev %d", vdev_id); + + if (vdev->obj_state != WLAN_OBJ_STATE_LOGICALLY_DELETED) { + obj_mgr_err("VDEV object delete is not invoked vdevid:%d objstate:%d", + wlan_vdev_get_id(vdev), vdev->obj_state); + WLAN_OBJMGR_BUG(0); + } + + /* Invoke registered destroy handlers */ + for (id = 0; id < WLAN_UMAC_MAX_COMPONENTS; id++) { + handler = g_umac_glb_obj->vdev_destroy_handler[id]; + arg = g_umac_glb_obj->vdev_destroy_handler_arg[id]; + if (handler && + (vdev->obj_status[id] == QDF_STATUS_SUCCESS || + vdev->obj_status[id] == QDF_STATUS_COMP_ASYNC)) + vdev->obj_status[id] = handler(vdev, arg); + else + vdev->obj_status[id] = QDF_STATUS_COMP_DISABLED; + } + /* Derive object status */ + obj_status = wlan_objmgr_vdev_object_status(vdev); + + if (obj_status == QDF_STATUS_E_FAILURE) { + obj_mgr_err("VDEV object deletion failed: vdev-id: %d", + vdev_id); + /* Ideally should not happen */ + /* This leads to memleak ??? how to handle */ + QDF_BUG(0); + return QDF_STATUS_E_FAILURE; + } + + /* Deletion is in progress */ + if (obj_status == QDF_STATUS_COMP_ASYNC) { + vdev->obj_state = WLAN_OBJ_STATE_PARTIALLY_DELETED; + return QDF_STATUS_COMP_ASYNC; + } + + /* Free VDEV object */ + return wlan_objmgr_vdev_obj_free(vdev); +} + +QDF_STATUS wlan_objmgr_vdev_obj_delete(struct wlan_objmgr_vdev *vdev) +{ + uint8_t print_idx; + + if (vdev == NULL) { + obj_mgr_err("vdev is NULL"); + return QDF_STATUS_E_FAILURE; + } + + obj_mgr_info("Logically deleting vdev %d", vdev->vdev_objmgr.vdev_id); + + print_idx = qdf_get_pidx(); + wlan_objmgr_print_ref_ids(vdev->vdev_objmgr.ref_id_dbg, + QDF_TRACE_LEVEL_DEBUG); + /* + * Update VDEV object state to LOGICALLY DELETED + * It prevents further access of this object + */ + wlan_vdev_obj_lock(vdev); + vdev->obj_state = WLAN_OBJ_STATE_LOGICALLY_DELETED; + wlan_vdev_obj_unlock(vdev); + wlan_objmgr_notify_log_delete(vdev, WLAN_VDEV_OP); + wlan_objmgr_vdev_release_ref(vdev, WLAN_OBJMGR_ID); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(wlan_objmgr_vdev_obj_delete); + +/** + ** APIs to attach/detach component objects + */ +QDF_STATUS wlan_objmgr_vdev_component_obj_attach( + struct wlan_objmgr_vdev *vdev, + enum wlan_umac_comp_id id, + void *comp_priv_obj, + QDF_STATUS status) +{ + wlan_objmgr_vdev_status_handler stat_handler; + void *arg; + uint8_t i; + QDF_STATUS obj_status; + + /* component id is invalid */ + if (id >= WLAN_UMAC_MAX_COMPONENTS) + return QDF_STATUS_MAXCOMP_FAIL; + + wlan_vdev_obj_lock(vdev); + /* If there is a valid entry, return failure */ + if (vdev->vdev_comp_priv_obj[id] != NULL) { + wlan_vdev_obj_unlock(vdev); + return QDF_STATUS_E_FAILURE; + } + /* Save component's pointer and status */ + vdev->vdev_comp_priv_obj[id] = comp_priv_obj; + vdev->obj_status[id] = status; + wlan_vdev_obj_unlock(vdev); + if (vdev->obj_state != WLAN_OBJ_STATE_PARTIALLY_CREATED) + return QDF_STATUS_SUCCESS; + /* + * If VDEV object status is partially created means, this API is + * invoked with differnt context, this block should be executed for + * async components only + */ + /* Derive status */ + obj_status = wlan_objmgr_vdev_object_status(vdev); + /* STATUS_SUCCESS means, object is CREATED */ + if (obj_status == QDF_STATUS_SUCCESS) + vdev->obj_state = WLAN_OBJ_STATE_CREATED; + /* + * update state as CREATION failed, caller has to delete the + * VDEV object + */ + else if (obj_status == QDF_STATUS_E_FAILURE) + vdev->obj_state = WLAN_OBJ_STATE_CREATION_FAILED; + /* Notify components about the CREATION success/failure */ + if ((obj_status == QDF_STATUS_SUCCESS) || + (obj_status == QDF_STATUS_E_FAILURE)) { + for (i = 0; i < WLAN_UMAC_MAX_COMPONENTS; i++) { + stat_handler = g_umac_glb_obj->vdev_status_handler[i]; + arg = g_umac_glb_obj->vdev_status_handler_arg[i]; + if (stat_handler != NULL) + stat_handler(vdev, arg, obj_status); + } + } + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(wlan_objmgr_vdev_component_obj_attach); + +QDF_STATUS wlan_objmgr_vdev_component_obj_detach( + struct wlan_objmgr_vdev *vdev, + enum wlan_umac_comp_id id, + void *comp_priv_obj) +{ + QDF_STATUS obj_status; + + /* component id is invalid */ + if (id >= WLAN_UMAC_MAX_COMPONENTS) + return QDF_STATUS_MAXCOMP_FAIL; + + wlan_vdev_obj_lock(vdev); + /* If there is a valid entry, return failure */ + if (vdev->vdev_comp_priv_obj[id] != comp_priv_obj) { + vdev->obj_status[id] = QDF_STATUS_E_FAILURE; + wlan_vdev_obj_unlock(vdev); + return QDF_STATUS_E_FAILURE; + } + /* Reset pointers to NULL, update the status*/ + vdev->vdev_comp_priv_obj[id] = NULL; + vdev->obj_status[id] = QDF_STATUS_SUCCESS; + wlan_vdev_obj_unlock(vdev); + + /** + *If VDEV object status is partially destroyed means, this API is + * invoked with differnt context, this block should be executed for + * async components only + */ + if ((vdev->obj_state == WLAN_OBJ_STATE_PARTIALLY_DELETED) || + (vdev->obj_state == WLAN_OBJ_STATE_COMP_DEL_PROGRESS)) { + /* Derive object status */ + obj_status = wlan_objmgr_vdev_object_status(vdev); + if (obj_status == QDF_STATUS_SUCCESS) { + /* + * Update the status as Deleted, if full object + * deletion is in progress + */ + if (vdev->obj_state == WLAN_OBJ_STATE_PARTIALLY_DELETED) + vdev->obj_state = WLAN_OBJ_STATE_DELETED; + /* + * Move to creation state, since this component + * deletion alone requested + */ + else if (vdev->obj_state == + WLAN_OBJ_STATE_COMP_DEL_PROGRESS) + vdev->obj_state = WLAN_OBJ_STATE_CREATED; + /* Object status is failure */ + } else if (obj_status == QDF_STATUS_E_FAILURE) { + /* + * Update the status as Deletion failed, if full object + * deletion is in progress + */ + if (vdev->obj_state == WLAN_OBJ_STATE_PARTIALLY_DELETED) + vdev->obj_state = + WLAN_OBJ_STATE_DELETION_FAILED; + /* Move to creation state, since this component + deletion alone requested (do not block other + components) */ + else if (vdev->obj_state == + WLAN_OBJ_STATE_COMP_DEL_PROGRESS) + vdev->obj_state = WLAN_OBJ_STATE_CREATED; + } + /* Delete vdev object */ + if ((obj_status == QDF_STATUS_SUCCESS) && + (vdev->obj_state == WLAN_OBJ_STATE_DELETED)) { + /* Free VDEV object */ + return wlan_objmgr_vdev_obj_free(vdev); + } + } + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(wlan_objmgr_vdev_component_obj_detach); + +/** + ** APIs to operations on vdev objects + */ +QDF_STATUS wlan_objmgr_iterate_peerobj_list( + struct wlan_objmgr_vdev *vdev, + wlan_objmgr_vdev_op_handler handler, + void *arg, wlan_objmgr_ref_dbgid dbg_id) +{ + qdf_list_t *peer_list = NULL; + struct wlan_objmgr_peer *peer = NULL; + struct wlan_objmgr_peer *peer_next = NULL; + uint8_t vdev_id; + + if (vdev == NULL) { + obj_mgr_err("VDEV is NULL"); + return QDF_STATUS_E_FAILURE; + } + wlan_vdev_obj_lock(vdev); + vdev_id = wlan_vdev_get_id(vdev); + + if (vdev->obj_state != WLAN_OBJ_STATE_CREATED) { + wlan_vdev_obj_unlock(vdev); + obj_mgr_err("VDEV is not in create state(:%d): vdev-id:%d", + vdev_id, vdev->obj_state); + return QDF_STATUS_E_FAILURE; + } + wlan_objmgr_vdev_get_ref(vdev, dbg_id); + peer_list = &vdev->vdev_objmgr.wlan_peer_list; + if (peer_list != NULL) { + /* Iterate through VDEV's peer list */ + peer = wlan_vdev_peer_list_peek_head(peer_list); + while (peer != NULL) { + peer_next = wlan_peer_get_next_peer_of_vdev(peer_list, + peer); + if (wlan_objmgr_peer_try_get_ref(peer, dbg_id) == + QDF_STATUS_SUCCESS) { + /* Invoke handler for operation */ + handler(vdev, (void *)peer, arg); + wlan_objmgr_peer_release_ref(peer, dbg_id); + } + peer = peer_next; + } + } + wlan_objmgr_vdev_release_ref(vdev, dbg_id); + wlan_vdev_obj_unlock(vdev); + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_objmgr_trigger_vdev_comp_priv_object_creation( + struct wlan_objmgr_vdev *vdev, + enum wlan_umac_comp_id id) +{ + wlan_objmgr_vdev_create_handler handler; + void *arg; + QDF_STATUS obj_status = QDF_STATUS_SUCCESS; + + /* Component id is invalid */ + if (id >= WLAN_UMAC_MAX_COMPONENTS) + return QDF_STATUS_MAXCOMP_FAIL; + + wlan_vdev_obj_lock(vdev); + /* + * If component object is already created, delete old + * component object, then invoke creation + */ + if (vdev->vdev_comp_priv_obj[id] != NULL) { + wlan_vdev_obj_unlock(vdev); + return QDF_STATUS_E_FAILURE; + } + wlan_vdev_obj_unlock(vdev); + + /* Invoke registered create handlers */ + handler = g_umac_glb_obj->vdev_create_handler[id]; + arg = g_umac_glb_obj->vdev_create_handler_arg[id]; + if (handler != NULL) + vdev->obj_status[id] = handler(vdev, arg); + else + return QDF_STATUS_E_FAILURE; + + /* If object status is created, then only handle this object status */ + if (vdev->obj_state == WLAN_OBJ_STATE_CREATED) { + /* Derive object status */ + obj_status = wlan_objmgr_vdev_object_status(vdev); + /* Move PDEV object state to Partially created state */ + if (obj_status == QDF_STATUS_COMP_ASYNC) { + /*TODO atomic */ + vdev->obj_state = WLAN_OBJ_STATE_PARTIALLY_CREATED; + } + } + return obj_status; +} + +QDF_STATUS wlan_objmgr_trigger_vdev_comp_priv_object_deletion( + struct wlan_objmgr_vdev *vdev, + enum wlan_umac_comp_id id) +{ + wlan_objmgr_vdev_destroy_handler handler; + void *arg; + QDF_STATUS obj_status = QDF_STATUS_SUCCESS; + + /* component id is invalid */ + if (id >= WLAN_UMAC_MAX_COMPONENTS) + return QDF_STATUS_MAXCOMP_FAIL; + + wlan_vdev_obj_lock(vdev); + /* Component object was never created, invalid operation */ + if (vdev->vdev_comp_priv_obj[id] == NULL) { + wlan_vdev_obj_unlock(vdev); + return QDF_STATUS_E_FAILURE; + } + wlan_vdev_obj_unlock(vdev); + + /* Invoke registered create handlers */ + handler = g_umac_glb_obj->vdev_destroy_handler[id]; + arg = g_umac_glb_obj->vdev_destroy_handler_arg[id]; + if (handler != NULL) + vdev->obj_status[id] = handler(vdev, arg); + else + return QDF_STATUS_E_FAILURE; + + /* If object status is created, then only handle this object status */ + if (vdev->obj_state == WLAN_OBJ_STATE_CREATED) { + obj_status = wlan_objmgr_vdev_object_status(vdev); + /* move object state to DEL progress */ + if (obj_status == QDF_STATUS_COMP_ASYNC) + vdev->obj_state = WLAN_OBJ_STATE_COMP_DEL_PROGRESS; + } + return obj_status; +} + + + +static void wlan_obj_vdev_peerlist_add_tail(qdf_list_t *obj_list, + struct wlan_objmgr_peer *obj) +{ + qdf_list_insert_back(obj_list, &obj->vdev_peer); +} + +static QDF_STATUS wlan_obj_vdev_peerlist_remove_peer(qdf_list_t *obj_list, + struct wlan_objmgr_peer *peer) +{ + qdf_list_node_t *vdev_node = NULL; + + if (peer == NULL) + return QDF_STATUS_E_FAILURE; + /* get vdev list node element */ + vdev_node = &peer->vdev_peer; + /* list is empty, return failure */ + if (qdf_list_remove_node(obj_list, vdev_node) != QDF_STATUS_SUCCESS) + return QDF_STATUS_E_FAILURE; + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_objmgr_vdev_peer_attach(struct wlan_objmgr_vdev *vdev, + struct wlan_objmgr_peer *peer) +{ + struct wlan_objmgr_vdev_objmgr *objmgr = &vdev->vdev_objmgr; + struct wlan_objmgr_pdev *pdev; + enum QDF_OPMODE opmode; + + wlan_vdev_obj_lock(vdev); + pdev = wlan_vdev_get_pdev(vdev); + /* If Max VDEV peer count exceeds, return failure */ + if (peer->peer_mlme.peer_type != WLAN_PEER_STA_TEMP) { + if (objmgr->wlan_peer_count >= objmgr->max_peer_count) { + wlan_vdev_obj_unlock(vdev); + return QDF_STATUS_E_FAILURE; + } + } + wlan_vdev_obj_unlock(vdev); + + /* If Max PDEV peer count exceeds, return failure */ + wlan_pdev_obj_lock(pdev); + if (peer->peer_mlme.peer_type == WLAN_PEER_STA_TEMP) { + if (wlan_pdev_get_temp_peer_count(pdev) >= + WLAN_MAX_PDEV_TEMP_PEERS) { + wlan_pdev_obj_unlock(pdev); + return QDF_STATUS_E_FAILURE; + } + } else { + if (wlan_pdev_get_peer_count(pdev) >= + wlan_pdev_get_max_peer_count(pdev)) { + wlan_pdev_obj_unlock(pdev); + return QDF_STATUS_E_FAILURE; + } + } + + if (peer->peer_mlme.peer_type == WLAN_PEER_STA_TEMP) + wlan_pdev_incr_temp_peer_count(wlan_vdev_get_pdev(vdev)); + else + wlan_pdev_incr_peer_count(wlan_vdev_get_pdev(vdev)); + wlan_pdev_obj_unlock(pdev); + + wlan_vdev_obj_lock(vdev); + /* Add peer to vdev's peer list */ + wlan_obj_vdev_peerlist_add_tail(&objmgr->wlan_peer_list, peer); + objmgr->wlan_peer_count++; + + if (WLAN_ADDR_EQ(wlan_peer_get_macaddr(peer), + wlan_vdev_mlme_get_macaddr(vdev)) == + QDF_STATUS_SUCCESS) { + /* + * if peer mac address and vdev mac address match, set + * this peer as self peer + */ + wlan_vdev_set_selfpeer(vdev, peer); + opmode = wlan_vdev_mlme_get_opmode(vdev); + /* For AP mode, self peer and BSS peer are same */ + if ((opmode == QDF_SAP_MODE) || (opmode == QDF_P2P_GO_MODE)) + wlan_vdev_set_bsspeer(vdev, peer); + } + /* set BSS peer for sta */ + if ((wlan_vdev_mlme_get_opmode(vdev) == QDF_STA_MODE || + wlan_vdev_mlme_get_opmode(vdev) == QDF_P2P_CLIENT_MODE) && + (wlan_peer_get_peer_type(peer) == WLAN_PEER_AP || + wlan_peer_get_peer_type(peer) == WLAN_PEER_P2P_GO)) + wlan_vdev_set_bsspeer(vdev, peer); + + /* Increment vdev ref count to make sure it won't be destroyed before */ + wlan_objmgr_vdev_get_ref(vdev, WLAN_OBJMGR_ID); + wlan_vdev_obj_unlock(vdev); + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_objmgr_vdev_peer_detach(struct wlan_objmgr_vdev *vdev, + struct wlan_objmgr_peer *peer) +{ + struct wlan_objmgr_vdev_objmgr *objmgr = &vdev->vdev_objmgr; + struct wlan_objmgr_pdev *pdev; + + wlan_vdev_obj_lock(vdev); + /* if peer count is 0, return failure */ + if (objmgr->wlan_peer_count == 0) { + wlan_vdev_obj_unlock(vdev); + return QDF_STATUS_E_FAILURE; + } + + if ((wlan_peer_get_peer_type(peer) == WLAN_PEER_AP) || + (wlan_peer_get_peer_type(peer) == WLAN_PEER_P2P_GO)) { + if (wlan_vdev_get_selfpeer(vdev) == peer) { + /* + * There might be instances where new node is created + * before deleting existing node, in which case selfpeer + * will be pointing to the new node. So set selfpeer to + * NULL only if vdev->vdev_objmgr.self_peer is pointing + * to the peer processed for deletion + */ + wlan_vdev_set_selfpeer(vdev, NULL); + } + + if (wlan_vdev_get_bsspeer(vdev) == peer) { + /* + * There might be instances where new node is created + * before deleting existing node, in which case bsspeer + * in vdev will be pointing to the new node. So set + * bsspeer to NULL only if vdev->vdev_objmgr.bss_peer is + * pointing to the peer processed for deletion + */ + wlan_vdev_set_bsspeer(vdev, NULL); + } + } + + /* remove peer from vdev's peer list */ + if (wlan_obj_vdev_peerlist_remove_peer(&objmgr->wlan_peer_list, peer) + == QDF_STATUS_E_FAILURE) { + wlan_vdev_obj_unlock(vdev); + return QDF_STATUS_E_FAILURE; + } + /* decrement peer count */ + objmgr->wlan_peer_count--; + /* decrement pdev peer count */ + pdev = wlan_vdev_get_pdev(vdev); + wlan_vdev_obj_unlock(vdev); + + wlan_pdev_obj_lock(pdev); + if (peer->peer_mlme.peer_type == WLAN_PEER_STA_TEMP) + wlan_pdev_decr_temp_peer_count(pdev); + else + wlan_pdev_decr_peer_count(pdev); + wlan_pdev_obj_unlock(pdev); + + /* decrement vdev ref count after peer released its reference */ + wlan_objmgr_vdev_release_ref(vdev, WLAN_OBJMGR_ID); + return QDF_STATUS_SUCCESS; +} + +void *wlan_objmgr_vdev_get_comp_private_obj( + struct wlan_objmgr_vdev *vdev, + enum wlan_umac_comp_id id) +{ + void *comp_priv_obj; + + /* component id is invalid */ + if (id >= WLAN_UMAC_MAX_COMPONENTS) { + QDF_BUG(0); + return NULL; + } + + if (vdev == NULL) { + QDF_BUG(0); + return NULL; + } + + comp_priv_obj = vdev->vdev_comp_priv_obj[id]; + + return comp_priv_obj; +} +qdf_export_symbol(wlan_objmgr_vdev_get_comp_private_obj); + +void wlan_objmgr_vdev_get_ref(struct wlan_objmgr_vdev *vdev, + wlan_objmgr_ref_dbgid id) +{ + if (vdev == NULL) { + obj_mgr_err("vdev obj is NULL for id:%d", id); + QDF_ASSERT(0); + return; + } + /* Increment ref count */ + qdf_atomic_inc(&vdev->vdev_objmgr.ref_cnt); + qdf_atomic_inc(&vdev->vdev_objmgr.ref_id_dbg[id]); + + return; +} +qdf_export_symbol(wlan_objmgr_vdev_get_ref); + +QDF_STATUS wlan_objmgr_vdev_try_get_ref(struct wlan_objmgr_vdev *vdev, + wlan_objmgr_ref_dbgid id) +{ + uint8_t vdev_id; + + if (vdev == NULL) { + obj_mgr_err("vdev obj is NULL for id:%d", id); + QDF_ASSERT(0); + return QDF_STATUS_E_FAILURE; + } + + wlan_vdev_obj_lock(vdev); + vdev_id = wlan_vdev_get_id(vdev); + if (vdev->obj_state != WLAN_OBJ_STATE_CREATED) { + wlan_vdev_obj_unlock(vdev); + if (vdev->vdev_objmgr.print_cnt++ <= + WLAN_OBJMGR_RATELIMIT_THRESH) + obj_mgr_err( + "[Ref id: %d] vdev(%d) is not in Created state(%d)", + id, vdev_id, vdev->obj_state); + + return QDF_STATUS_E_RESOURCES; + } + + /* Increment ref count */ + wlan_objmgr_vdev_get_ref(vdev, id); + wlan_vdev_obj_unlock(vdev); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(wlan_objmgr_vdev_try_get_ref); + +void wlan_objmgr_vdev_release_ref(struct wlan_objmgr_vdev *vdev, + wlan_objmgr_ref_dbgid id) +{ + uint8_t vdev_id; + + if (vdev == NULL) { + obj_mgr_err("vdev obj is NULL for id:%d", id); + QDF_ASSERT(0); + return; + } + + vdev_id = wlan_vdev_get_id(vdev); + + if (!qdf_atomic_read(&vdev->vdev_objmgr.ref_id_dbg[id])) { + obj_mgr_err("vdev (id:%d)ref cnt was not taken by %d", + vdev_id, id); + wlan_objmgr_print_ref_ids(vdev->vdev_objmgr.ref_id_dbg, + QDF_TRACE_LEVEL_FATAL); + WLAN_OBJMGR_BUG(0); + } + + if (!qdf_atomic_read(&vdev->vdev_objmgr.ref_cnt)) { + obj_mgr_err("vdev ref cnt is 0"); + WLAN_OBJMGR_BUG(0); + return; + } + qdf_atomic_dec(&vdev->vdev_objmgr.ref_id_dbg[id]); + + /* Decrement ref count, free vdev, if ref count == 0 */ + if (qdf_atomic_dec_and_test(&vdev->vdev_objmgr.ref_cnt)) + wlan_objmgr_vdev_obj_destroy(vdev); + + return; +} +qdf_export_symbol(wlan_objmgr_vdev_release_ref); + +struct wlan_objmgr_vdev *wlan_pdev_vdev_list_peek_active_head( + struct wlan_objmgr_pdev *pdev, + qdf_list_t *vdev_list, wlan_objmgr_ref_dbgid dbg_id) +{ + struct wlan_objmgr_vdev *vdev; + qdf_list_node_t *node = NULL; + qdf_list_node_t *prev_node = NULL; + + wlan_pdev_obj_lock(pdev); + + if (qdf_list_peek_front(vdev_list, &node) != QDF_STATUS_SUCCESS) { + wlan_pdev_obj_unlock(pdev); + return NULL; + } + + do { + vdev = qdf_container_of(node, struct wlan_objmgr_vdev, + vdev_node); + if (wlan_objmgr_vdev_try_get_ref(vdev, dbg_id) == + QDF_STATUS_SUCCESS) { + wlan_pdev_obj_unlock(pdev); + return vdev; + } + + prev_node = node; + } while (qdf_list_peek_next(vdev_list, prev_node, &node) == + QDF_STATUS_SUCCESS); + + wlan_pdev_obj_unlock(pdev); + + return NULL; +} + +struct wlan_objmgr_vdev *wlan_vdev_get_next_active_vdev_of_pdev( + struct wlan_objmgr_pdev *pdev, + qdf_list_t *vdev_list, + struct wlan_objmgr_vdev *vdev, + wlan_objmgr_ref_dbgid dbg_id) +{ + struct wlan_objmgr_vdev *vdev_next; + qdf_list_node_t *node = &vdev->vdev_node; + qdf_list_node_t *prev_node = NULL; + + if (node == NULL) + return NULL; + + wlan_pdev_obj_lock(pdev); + prev_node = node; + while (qdf_list_peek_next(vdev_list, prev_node, &node) == + QDF_STATUS_SUCCESS) { + vdev_next = qdf_container_of(node, struct wlan_objmgr_vdev, + vdev_node); + if (wlan_objmgr_vdev_try_get_ref(vdev_next, dbg_id) == + QDF_STATUS_SUCCESS) { + wlan_pdev_obj_unlock(pdev); + return vdev_next; + } + + prev_node = node; + } + wlan_pdev_obj_unlock(pdev); + + return NULL; +} + diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/obj_mgr/src/wlan_objmgr_vdev_obj_i.h b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/obj_mgr/src/wlan_objmgr_vdev_obj_i.h new file mode 100644 index 0000000000000000000000000000000000000000..460d79f3283b238434782ad04063cf7721c5b0b3 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/obj_mgr/src/wlan_objmgr_vdev_obj_i.h @@ -0,0 +1,61 @@ +/* + * Copyright (c) 2016,2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + /** + * DOC: Public APIs to perform operations on VDEV objects + */ +#ifndef _WLAN_OBJMGR_VDEV_OBJ_I_H_ +#define _WLAN_OBJMGR_VDEV_OBJ_I_H_ + +/** + * wlan_objmgr_for_each_vdev_peer() - iterate over each peer for @vdev + * @vdev: the vdev whose peers should be iterated + * @peer: peer object cursor + * + * Note: The caller is responsible for grabbing @vdev's object lock before + * using this iterator + */ +#define wlan_objmgr_for_each_vdev_peer(vdev, peer) \ + qdf_list_for_each(&(vdev)->vdev_objmgr.wlan_peer_list, peer, vdev_peer) + +/** + * wlan_objmgr_vdev_peer_attach() - attach peer to vdev peer list + * @vdev: VDEV object + * @peer: PEER object + * + * Attaches PEER to VDEV, stores it in VDEV's peer list + * + * Return: SUCCESS + * Failure (Max PEERs are exceeded) + */ +QDF_STATUS wlan_objmgr_vdev_peer_attach(struct wlan_objmgr_vdev *vdev, + struct wlan_objmgr_peer *peer); + +/** + * wlan_objmgr_vdev_peer_detach() - detach peer from vdev peer list + * @vdev: VDEV object + * @peer: PEER object + * + * detaches PEER from VDEV's peer list + * + * Return: SUCCESS + * Failure (No PEERs are present) + */ +QDF_STATUS wlan_objmgr_vdev_peer_detach(struct wlan_objmgr_vdev *vdev, + struct wlan_objmgr_peer *peer); + +#endif /* _WLAN_OBJMGR_VDEV_OBJ_I_H_*/ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/policy_mgr/inc/wlan_policy_mgr_api.h b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/policy_mgr/inc/wlan_policy_mgr_api.h new file mode 100644 index 0000000000000000000000000000000000000000..d8c30b2ac9d7aaedacc84cc5ca12cd809faf3bf8 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/policy_mgr/inc/wlan_policy_mgr_api.h @@ -0,0 +1,2590 @@ +/* + * Copyright (c) 2012-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef __WLAN_POLICY_MGR_API_H +#define __WLAN_POLICY_MGR_API_H + +/** + * DOC: wlan_policy_mgr_api.h + * + * Concurrenct Connection Management entity + */ + +/* Include files */ +#include "qdf_types.h" +#include "qdf_status.h" +#include "wlan_objmgr_psoc_obj.h" +#include "wlan_policy_mgr_public_struct.h" + +struct target_psoc_info; + +typedef const enum policy_mgr_pcl_type + pm_dbs_pcl_second_connection_table_type + [PM_MAX_ONE_CONNECTION_MODE][PM_MAX_NUM_OF_MODE] + [PM_MAX_CONC_PRIORITY_MODE]; + +typedef const enum policy_mgr_pcl_type + pm_dbs_pcl_third_connection_table_type + [PM_MAX_TWO_CONNECTION_MODE][PM_MAX_NUM_OF_MODE] + [PM_MAX_CONC_PRIORITY_MODE]; + +typedef const enum policy_mgr_conc_next_action + policy_mgr_next_action_two_connection_table_type + [PM_MAX_ONE_CONNECTION_MODE][POLICY_MGR_MAX_BAND]; + +typedef const enum policy_mgr_conc_next_action + policy_mgr_next_action_three_connection_table_type + [PM_MAX_TWO_CONNECTION_MODE][POLICY_MGR_MAX_BAND]; + +#define PM_FW_MODE_STA_STA_BIT_POS 0 +#define PM_FW_MODE_STA_P2P_BIT_POS 1 + +#define PM_FW_MODE_STA_STA_BIT_MASK (0x1 << PM_FW_MODE_STA_STA_BIT_POS) +#define PM_FW_MODE_STA_P2P_BIT_MASK (0x1 << PM_FW_MODE_STA_P2P_BIT_POS) + +#define PM_CHANNEL_SELECT_LOGIC_STA_STA_GET(channel_select_logic_conc) \ + ((channel_select_logic_conc & PM_FW_MODE_STA_STA_BIT_MASK) >> \ + PM_FW_MODE_STA_STA_BIT_POS) +#define PM_CHANNEL_SELECT_LOGIC_STA_P2P_GET(channel_select_logic_conc) \ + ((channel_select_logic_conc & PM_FW_MODE_STA_P2P_BIT_MASK) >> \ + PM_FW_MODE_STA_P2P_BIT_POS) + +/** + * policy_mgr_set_concurrency_mode() - To set concurrency mode + * @psoc: PSOC object data + * @mode: device mode + * + * This routine is called to set the concurrency mode + * + * Return: NONE + */ +void policy_mgr_set_concurrency_mode(struct wlan_objmgr_psoc *psoc, + enum QDF_OPMODE mode); + +/** + * policy_mgr_clear_concurrency_mode() - To clear concurrency mode + * @psoc: PSOC object data + * @mode: device mode + * + * This routine is called to clear the concurrency mode + * + * Return: NONE + */ +void policy_mgr_clear_concurrency_mode(struct wlan_objmgr_psoc *psoc, + enum QDF_OPMODE mode); + +/** + * policy_mgr_get_connection_count() - provides the count of + * current connections + * @psoc: PSOC object information + * + * This function provides the count of current connections + * + * Return: connection count + */ +uint32_t policy_mgr_get_connection_count(struct wlan_objmgr_psoc *psoc); + +/** + * policy_mgr_get_concurrency_mode() - return concurrency mode + * @psoc: PSOC object information + * + * This routine is used to retrieve concurrency mode + * + * Return: uint32_t value of concurrency mask + */ +uint32_t policy_mgr_get_concurrency_mode(struct wlan_objmgr_psoc *psoc); + +/** + * policy_mgr_search_and_check_for_session_conc() - Checks if + * concurrecy is allowed + * @psoc: PSOC object information + * @session_id: Session id + * @roam_profile: Pointer to the roam profile + * + * Searches and gets the channel number from the scan results and checks if + * concurrency is allowed for the given session ID + * + * Non zero channel number if concurrency is allowed, zero otherwise + */ +uint8_t policy_mgr_search_and_check_for_session_conc( + struct wlan_objmgr_psoc *psoc, + uint8_t session_id, void *roam_profile); + +/** + * policy_mgr_is_chnl_in_diff_band() - to check that given channel + * is in diff band from existing channel or not + * @psoc: pointer to psoc + * @channel: given channel + * + * This API will check that if the passed channel is in diff band than the + * already existing connections or not. + * + * Return: true if channel is in diff band + */ +bool policy_mgr_is_chnl_in_diff_band(struct wlan_objmgr_psoc *psoc, + uint8_t channel); + +/** + * policy_mgr_check_for_session_conc() - Check if concurrency is + * allowed for a session + * @psoc: PSOC object information + * @session_id: Session ID + * @channel: Channel number + * + * Checks if connection is allowed for a given session_id + * + * True if the concurrency is allowed, false otherwise + */ +bool policy_mgr_check_for_session_conc( + struct wlan_objmgr_psoc *psoc, uint8_t session_id, uint8_t channel); + +/** + * policy_mgr_handle_conc_multiport() - to handle multiport concurrency + * @session_id: Session ID + * @channel: Channel number + * + * This routine will handle STA side concurrency when policy manager + * is enabled. + * + * Return: QDF_STATUS + */ +QDF_STATUS policy_mgr_handle_conc_multiport( + struct wlan_objmgr_psoc *psoc, uint8_t session_id, uint8_t channel); + +#ifdef FEATURE_WLAN_MCC_TO_SCC_SWITCH +/** + * policy_mgr_check_concurrent_intf_and_restart_sap() - Check + * concurrent change intf + * @psoc: PSOC object information + * @operation_channel: operation channel + * @vdev_id: vdev id of SAP + * + * Checks the concurrent change interface and restarts SAP + * + * Return: None + */ +void policy_mgr_check_concurrent_intf_and_restart_sap( + struct wlan_objmgr_psoc *psoc); +#else +static inline void policy_mgr_check_concurrent_intf_and_restart_sap( + struct wlan_objmgr_psoc *psoc) +{ + +} +#endif /* FEATURE_WLAN_MCC_TO_SCC_SWITCH */ + +/** + * policy_mgr_is_mcc_in_24G() - Function to check for MCC in 2.4GHz + * @psoc: PSOC object information + * + * This function is used to check for MCC operation in 2.4GHz band. + * STA, P2P and SAP adapters are only considered. + * + * Return: True if mcc is detected in 2.4 Ghz, false otherwise + * + */ +bool policy_mgr_is_mcc_in_24G(struct wlan_objmgr_psoc *psoc); + +/** + * policy_mgr_change_mcc_go_beacon_interval() - Change MCC beacon interval + * @psoc: PSOC object information + * @vdev_id: vdev id + * @dev_mode: device mode + * + * Updates the beacon parameters of the GO in MCC scenario + * + * Return: Success or Failure depending on the overall function behavior + */ +QDF_STATUS policy_mgr_change_mcc_go_beacon_interval( + struct wlan_objmgr_psoc *psoc, + uint8_t vdev_id, enum QDF_OPMODE dev_mode); + +#if defined(FEATURE_WLAN_MCC_TO_SCC_SWITCH) +/** + * policy_mgr_change_sap_channel_with_csa() - Move SAP channel using (E)CSA + * @psoc: PSOC object information + * @vdev_id: Vdev id + * @channel: Channel to change + * @ch_width: channel width to change + * @forced: Force to switch channel, ignore SCC/MCC check + * + * Invoke the callback function to change SAP channel using (E)CSA + * + * Return: None + */ +void policy_mgr_change_sap_channel_with_csa( + struct wlan_objmgr_psoc *psoc, + uint8_t vdev_id, uint32_t channel, + uint32_t ch_width, + bool forced); +#else +static inline void policy_mgr_change_sap_channel_with_csa( + struct wlan_objmgr_psoc *psoc, + uint8_t vdev_id, uint32_t channel, + uint32_t ch_width, + bool forced) +{ + +} +#endif + +/** + * policy_mgr_set_pcl_for_existing_combo() - SET PCL for existing combo + * @psoc: PSOC object information + * @mode: Adapter mode + * + * Return: None + */ +void policy_mgr_set_pcl_for_existing_combo(struct wlan_objmgr_psoc *psoc, + enum policy_mgr_con_mode mode); +/** + * policy_mgr_incr_active_session() - increments the number of active sessions + * @psoc: PSOC object information + * @mode: Adapter mode + * @session_id: session ID for the connection session + * + * This function increments the number of active sessions maintained per device + * mode. In the case of STA/P2P CLI/IBSS upon connection indication it is + * incremented; In the case of SAP/P2P GO upon bss start it is incremented + * + * Return: None + */ +void policy_mgr_incr_active_session(struct wlan_objmgr_psoc *psoc, + enum QDF_OPMODE mode, uint8_t session_id); + +/** + * policy_mgr_decr_active_session() - decrements the number of active sessions + * @psoc: PSOC object information + * @mode: Adapter mode + * @session_id: session ID for the connection session + * + * This function decrements the number of active sessions maintained per device + * mode. In the case of STA/P2P CLI/IBSS upon disconnection it is decremented + * In the case of SAP/P2P GO upon bss stop it is decremented + * + * Return: QDF_STATUS + */ +QDF_STATUS policy_mgr_decr_active_session(struct wlan_objmgr_psoc *psoc, + enum QDF_OPMODE mode, uint8_t session_id); + +/** + * policy_mgr_decr_session_set_pcl() - Decrement session count and set PCL + * @psoc: PSOC object information + * @mode: Adapter mode + * @session_id: Session id + * + * Decrements the active session count and sets the PCL if a STA connection + * exists + * + * Return: None + */ +void policy_mgr_decr_session_set_pcl(struct wlan_objmgr_psoc *psoc, + enum QDF_OPMODE mode, uint8_t session_id); + +/** + * policy_mgr_get_channel() - provide channel number of given mode and vdevid + * @psoc: PSOC object information + * @mode: given mode + * @vdev_id: pointer to vdev_id + * + * This API will provide channel number of matching mode and vdevid. + * If vdev_id is NULL then it will match only mode + * If vdev_id is not NULL the it will match both mode and vdev_id + * + * Return: channel number + */ +uint8_t policy_mgr_get_channel(struct wlan_objmgr_psoc *psoc, + enum policy_mgr_con_mode mode, uint32_t *vdev_id); + +/** + * policy_mgr_get_pcl() - provides the preferred channel list for + * new connection + * @psoc: PSOC object information + * @mode: Device mode + * @pcl_channels: PCL channels + * @len: length of the PCL + * @pcl_weight: Weights of the PCL + * @weight_len: Max length of the weights list + * + * This function provides the preferred channel list on which + * policy manager wants the new connection to come up. Various + * connection decision making entities will using this function + * to query the PCL info + * + * Return: QDF_STATUS + */ +QDF_STATUS policy_mgr_get_pcl(struct wlan_objmgr_psoc *psoc, + enum policy_mgr_con_mode mode, + uint8_t *pcl_channels, uint32_t *len, + uint8_t *pcl_weight, uint32_t weight_len); + +/** + * policy_mgr_update_with_safe_channel_list() - provides the safe + * channel list + * @psoc: PSOC object information + * @pcl_channels: channel list + * @len: length of the list + * @weight_list: Weights of the PCL + * @weight_len: Max length of the weights list + * + * This function provides the safe channel list from the list + * provided after consulting the channel avoidance list + * + * Return: None + */ +void policy_mgr_update_with_safe_channel_list(struct wlan_objmgr_psoc *psoc, + uint8_t *pcl_channels, uint32_t *len, + uint8_t *weight_list, uint32_t weight_len); + +/** + * policy_mgr_get_nondfs_preferred_channel() - to get non-dfs preferred channel + * for given mode + * @psoc: PSOC object information + * @mode: mode for which preferred non-dfs channel is requested + * @for_existing_conn: flag to indicate if preferred channel is requested + * for existing connection + * + * this routine will return non-dfs channel + * 1) for getting non-dfs preferred channel, first we check if there are any + * other connection exist whose channel is non-dfs. if yes then return that + * channel so that we can accommodate upto 3 mode concurrency. + * 2) if there no any other connection present then query concurrency module + * to give preferred channel list. once we get preferred channel list, loop + * through list to find first non-dfs channel from ascending order. + * + * Return: uint8_t non-dfs channel + */ +uint8_t policy_mgr_get_nondfs_preferred_channel(struct wlan_objmgr_psoc *psoc, + enum policy_mgr_con_mode mode, bool for_existing_conn); + +/** + * policy_mgr_is_any_nondfs_chnl_present() - Find any non-dfs + * channel from conc table + * @psoc: PSOC object information + * @channel: pointer to channel which needs to be filled + * + * In-case if any connection is already present whose channel is none dfs then + * return that channel + * + * Return: true up-on finding non-dfs channel else false + */ +bool policy_mgr_is_any_nondfs_chnl_present(struct wlan_objmgr_psoc *psoc, + uint8_t *channel); + +/** + * policy_mgr_is_any_dfs_beaconing_session_present() - to find + * if any DFS session + * @psoc: PSOC object information + * @channel: pointer to channel number that needs to filled + * + * If any beaconing session such as SAP or GO present and it is on DFS channel + * then this function will return true + * + * Return: true if session is on DFS or false if session is on non-dfs channel + */ +bool policy_mgr_is_any_dfs_beaconing_session_present( + struct wlan_objmgr_psoc *psoc, uint8_t *channel); + +/** + * policy_mgr_allow_concurrency() - Check for allowed concurrency + * combination consulting the PCL + * @psoc: PSOC object information + * @mode: new connection mode + * @channel: channel on which new connection is coming up + * @bw: Bandwidth requested by the connection (optional) + * + * When a new connection is about to come up check if current + * concurrency combination including the new connection is + * allowed or not based on the HW capability + * + * Return: True/False + */ +bool policy_mgr_allow_concurrency(struct wlan_objmgr_psoc *psoc, + enum policy_mgr_con_mode mode, + uint8_t channel, enum hw_mode_bandwidth bw); + +/** + * policy_mgr_allow_concurrency_csa() - Check for allowed concurrency + * combination when channel switch + * @psoc: PSOC object information + * @mode: connection mode + * @channel: target channel to switch + * @vdev_id: vdev id of channel switch interface + * + * There is already existing SAP+GO combination but due to upper layer + * notifying LTE-COEX event or sending command to move one of the connections + * to different channel. In such cases before moving existing connection to new + * channel, check if new channel can co-exist with the other existing + * connection. For example, one SAP1 is on channel-6 and second SAP2 is on + * channel-36 and lets say they are doing DBS, and lets say upper layer sends + * LTE-COEX to move SAP1 from channel-6 to channel-149. In this case, SAP1 and + * SAP2 will end up doing MCC which may not be desirable result. such cases + * will be prevented with this API. + * + * Return: True/False + */ +bool policy_mgr_allow_concurrency_csa(struct wlan_objmgr_psoc *psoc, + enum policy_mgr_con_mode mode, + uint8_t channel, + uint32_t vdev_id); + +/** + * policy_mgr_get_first_connection_pcl_table_index() - provides the + * row index to firstConnectionPclTable to get to the correct + * pcl + * @psoc: PSOC object information + * + * This function provides the row index to + * firstConnectionPclTable. The index is the preference config. + * + * Return: table index + */ +enum policy_mgr_conc_priority_mode + policy_mgr_get_first_connection_pcl_table_index( + struct wlan_objmgr_psoc *psoc); + +/** + * policy_mgr_get_second_connection_pcl_table_index() - provides the + * row index to secondConnectionPclTable to get to the correct + * pcl + * @psoc: PSOC object information + * + * This function provides the row index to + * secondConnectionPclTable. The index is derived based on + * current connection, band on which it is on & chain mask it is + * using, as obtained from pm_conc_connection_list. + * + * Return: table index + */ +enum policy_mgr_one_connection_mode + policy_mgr_get_second_connection_pcl_table_index( + struct wlan_objmgr_psoc *psoc); + +/** + * policy_mgr_get_third_connection_pcl_table_index() - provides the + * row index to thirdConnectionPclTable to get to the correct + * pcl + * @psoc: PSOC object information + * + * This function provides the row index to + * thirdConnectionPclTable. The index is derived based on + * current connection, band on which it is on & chain mask it is + * using, as obtained from pm_conc_connection_list. + * + * Return: table index + */ +enum policy_mgr_two_connection_mode + policy_mgr_get_third_connection_pcl_table_index( + struct wlan_objmgr_psoc *psoc); + +/** + * policy_mgr_incr_connection_count() - adds the new connection to + * the current connections list + * @psoc: PSOC object information + * @vdev_id: vdev id + * + * + * This function adds the new connection to the current + * connections list + * + * Return: QDF_STATUS + */ +QDF_STATUS policy_mgr_incr_connection_count(struct wlan_objmgr_psoc *psoc, + uint32_t vdev_id); + +/** + * policy_mgr_update_connection_info() - updates the existing + * connection in the current connections list + * @psoc: PSOC object information + * @vdev_id: vdev id + * + * + * This function adds the new connection to the current + * connections list + * + * Return: QDF_STATUS + */ +QDF_STATUS policy_mgr_update_connection_info(struct wlan_objmgr_psoc *psoc, + uint32_t vdev_id); + +/** + * policy_mgr_decr_connection_count() - remove the old connection + * from the current connections list + * @psoc: PSOC object information + * @vdev_id: vdev id of the old connection + * + * + * This function removes the old connection from the current + * connections list + * + * Return: QDF_STATUS + */ +QDF_STATUS policy_mgr_decr_connection_count(struct wlan_objmgr_psoc *psoc, + uint32_t vdev_id); + +/** + * policy_mgr_current_connections_update() - initiates actions + * needed on current connections once channel has been decided + * for the new connection + * @psoc: PSOC object information + * @session_id: Session id + * @channel: Channel on which new connection will be + * @reason: Reason for which connection update is required + * + * This function initiates initiates actions + * needed on current connections once channel has been decided + * for the new connection. Notifies UMAC & FW as well + * + * Return: QDF_STATUS enum + */ +QDF_STATUS policy_mgr_current_connections_update(struct wlan_objmgr_psoc *psoc, + uint32_t session_id, uint8_t channel, + enum policy_mgr_conn_update_reason); + +/** + * policy_mgr_is_dbs_allowed_for_concurrency() - If dbs is allowed for current + * concurreny + * @new_conn_mode: new connection mode + * + * When a new connection is about to come up, check if dbs is allowed for + * STA+STA or STA+P2P + * + * Return: true if dbs is allowed for STA+STA or STA+P2P else false + */ +bool policy_mgr_is_dbs_allowed_for_concurrency( + struct wlan_objmgr_psoc *psoc, enum QDF_OPMODE new_conn_mode); + +/** + * policy_mgr_is_ibss_conn_exist() - to check if IBSS connection already present + * @psoc: PSOC object information + * @ibss_channel: pointer to ibss channel which needs to be filled + * + * this routine will check if IBSS connection already exist or no. If it + * exist then this routine will return true and fill the ibss_channel value. + * + * Return: true if ibss connection exist else false + */ +bool policy_mgr_is_ibss_conn_exist(struct wlan_objmgr_psoc *psoc, + uint8_t *ibss_channel); + +/** + * policy_mgr_get_conn_info() - get the current connections list + * @len: length of the list + * + * This function returns a pointer to the current connections + * list + * + * Return: pointer to connection list + */ +struct policy_mgr_conc_connection_info *policy_mgr_get_conn_info( + uint32_t *len); +#ifdef MPC_UT_FRAMEWORK +/** + * policy_mgr_incr_connection_count_utfw() - adds the new + * connection to the current connections list + * @psoc: PSOC object information + * @vdev_id: vdev id + * @tx_streams: number of transmit spatial streams + * @rx_streams: number of receive spatial streams + * @chain_mask: chain mask + * @type: connection type + * @sub_type: connection subtype + * @channelid: channel number + * @mac_id: mac id + * + * This function adds the new connection to the current + * connections list + * + * Return: QDF_STATUS + */ +QDF_STATUS policy_mgr_incr_connection_count_utfw(struct wlan_objmgr_psoc *psoc, + uint32_t vdev_id, uint32_t tx_streams, uint32_t rx_streams, + uint32_t chain_mask, uint32_t type, uint32_t sub_type, + uint32_t channelid, uint32_t mac_id); + +/** + * policy_mgr_update_connection_info_utfw() - updates the + * existing connection in the current connections list + * @psoc: PSOC object information + * @vdev_id: vdev id + * @tx_streams: number of transmit spatial streams + * @rx_streams: number of receive spatial streams + * @chain_mask: chain mask + * @type: connection type + * @sub_type: connection subtype + * @channelid: channel number + * @mac_id: mac id + * + * This function updates the connection to the current + * connections list + * + * Return: QDF_STATUS + */ +QDF_STATUS policy_mgr_update_connection_info_utfw(struct wlan_objmgr_psoc *psoc, + uint32_t vdev_id, uint32_t tx_streams, uint32_t rx_streams, + uint32_t chain_mask, uint32_t type, uint32_t sub_type, + uint32_t channelid, uint32_t mac_id); + +/** + * policy_mgr_decr_connection_count_utfw() - remove the old + * connection from the current connections list + * @psoc: PSOC object information + * @del_all: delete all entries + * @vdev_id: vdev id + * + * This function removes the old connection from the current + * connections list + * + * Return: QDF_STATUS + */ +QDF_STATUS policy_mgr_decr_connection_count_utfw(struct wlan_objmgr_psoc *psoc, + uint32_t del_all, uint32_t vdev_id); + +/** + * policy_mgr_get_pcl_from_first_conn_table() - Get PCL for new + * connection from first connection table + * @type: Connection mode of type 'policy_mgr_con_mode' + * @sys_pref: System preference + * + * Get the PCL for a new connection + * + * Return: PCL channels enum + */ +enum policy_mgr_pcl_type policy_mgr_get_pcl_from_first_conn_table( + enum policy_mgr_con_mode type, + enum policy_mgr_conc_priority_mode sys_pref); + +/** + * policy_mgr_get_pcl_from_second_conn_table() - Get PCL for new + * connection from second connection table + * @idx: index into first connection table + * @type: Connection mode of type 'policy_mgr_con_mode' + * @sys_pref: System preference + * @dbs_capable: if HW DBS capable + * + * Get the PCL for a new connection + * + * Return: PCL channels enum + */ +enum policy_mgr_pcl_type policy_mgr_get_pcl_from_second_conn_table( + enum policy_mgr_one_connection_mode idx, enum policy_mgr_con_mode type, + enum policy_mgr_conc_priority_mode sys_pref, uint8_t dbs_capable); + +/** + * policy_mgr_get_pcl_from_third_conn_table() - Get PCL for new + * connection from third connection table + * @idx: index into second connection table + * @type: Connection mode of type 'policy_mgr_con_mode' + * @sys_pref: System preference + * @dbs_capable: if HW DBS capable + * + * Get the PCL for a new connection + * + * Return: PCL channels enum + */ +enum policy_mgr_pcl_type policy_mgr_get_pcl_from_third_conn_table( + enum policy_mgr_two_connection_mode idx, enum policy_mgr_con_mode type, + enum policy_mgr_conc_priority_mode sys_pref, uint8_t dbs_capable); +#else +static inline QDF_STATUS policy_mgr_incr_connection_count_utfw( + struct wlan_objmgr_psoc *psoc, uint32_t vdev_id, + uint32_t tx_streams, uint32_t rx_streams, + uint32_t chain_mask, uint32_t type, uint32_t sub_type, + uint32_t channelid, uint32_t mac_id) +{ + return QDF_STATUS_SUCCESS; +} +static inline QDF_STATUS policy_mgr_update_connection_info_utfw( + struct wlan_objmgr_psoc *psoc, uint32_t vdev_id, + uint32_t tx_streams, uint32_t rx_streams, + uint32_t chain_mask, uint32_t type, uint32_t sub_type, + uint32_t channelid, uint32_t mac_id) +{ + return QDF_STATUS_SUCCESS; +} +static inline QDF_STATUS policy_mgr_decr_connection_count_utfw( + struct wlan_objmgr_psoc *psoc, uint32_t del_all, + uint32_t vdev_id) +{ + return QDF_STATUS_SUCCESS; +} +#endif + +/** + * policy_mgr_convert_device_mode_to_qdf_type() - provides the + * type translation from HDD to policy manager type + * @device_mode: Generic connection mode type + * + * + * This function provides the type translation + * + * Return: policy_mgr_con_mode enum + */ +enum policy_mgr_con_mode policy_mgr_convert_device_mode_to_qdf_type( + enum QDF_OPMODE device_mode); + +/** + * policy_mgr_get_qdf_mode_from_pm - provides the + * type translation from policy manager type + * to generic connection mode type + * @device_mode: policy manager mode type + * + * + * This function provides the type translation + * + * Return: QDF_OPMODE enum + */ +enum QDF_OPMODE policy_mgr_get_qdf_mode_from_pm( + enum policy_mgr_con_mode device_mode); + +/** + * policy_mgr_check_n_start_opportunistic_timer - check single mac upgrade + * needed or not, if needed start the oppurtunistic timer. + * @psoc: pointer to SOC + * + * This function starts the oppurtunistic timer if hw_mode change is needed + * + * Return: QDF_STATUS + */ +QDF_STATUS policy_mgr_check_n_start_opportunistic_timer( + struct wlan_objmgr_psoc *psoc); + +/** + * policy_mgr_pdev_set_hw_mode() - Set HW mode command to FW + * @psoc: PSOC object information + * @session_id: Session ID + * @mac0_ss: MAC0 spatial stream configuration + * @mac0_bw: MAC0 bandwidth configuration + * @mac1_ss: MAC1 spatial stream configuration + * @mac1_bw: MAC1 bandwidth configuration + * @dbs: HW DBS capability + * @dfs: HW Agile DFS capability + * @sbs: HW SBS capability + * @reason: Reason for connection update + * @next_action: next action to happen at policy mgr after + * HW mode change + * + * Sends the set hw mode request to FW + * + * e.g.: To configure 2x2_80 + * mac0_ss = HW_MODE_SS_2x2, mac0_bw = HW_MODE_80_MHZ + * mac1_ss = HW_MODE_SS_0x0, mac1_bw = HW_MODE_BW_NONE + * dbs = HW_MODE_DBS_NONE, dfs = HW_MODE_AGILE_DFS_NONE, + * sbs = HW_MODE_SBS_NONE + * e.g.: To configure 1x1_80_1x1_40 (DBS) + * mac0_ss = HW_MODE_SS_1x1, mac0_bw = HW_MODE_80_MHZ + * mac1_ss = HW_MODE_SS_1x1, mac1_bw = HW_MODE_40_MHZ + * dbs = HW_MODE_DBS, dfs = HW_MODE_AGILE_DFS_NONE, + * sbs = HW_MODE_SBS_NONE + * e.g.: To configure 1x1_80_1x1_40 (Agile DFS) + * mac0_ss = HW_MODE_SS_1x1, mac0_bw = HW_MODE_80_MHZ + * mac1_ss = HW_MODE_SS_1x1, mac1_bw = HW_MODE_40_MHZ + * dbs = HW_MODE_DBS, dfs = HW_MODE_AGILE_DFS, + * sbs = HW_MODE_SBS_NONE + * + * Return: Success if the message made it down to the next layer + */ +QDF_STATUS policy_mgr_pdev_set_hw_mode(struct wlan_objmgr_psoc *psoc, + uint32_t session_id, + enum hw_mode_ss_config mac0_ss, + enum hw_mode_bandwidth mac0_bw, + enum hw_mode_ss_config mac1_ss, + enum hw_mode_bandwidth mac1_bw, + enum hw_mode_dbs_capab dbs, + enum hw_mode_agile_dfs_capab dfs, + enum hw_mode_sbs_capab sbs, + enum policy_mgr_conn_update_reason reason, + uint8_t next_action); + +/** + * policy_mgr_pdev_set_hw_mode_cback() - callback invoked by + * other component to provide set HW mode request status + * @status: status of the request + * @cfgd_hw_mode_index: new HW mode index + * @num_vdev_mac_entries: Number of mac entries + * @vdev_mac_map: The table of vdev to mac mapping + * @next_action: next action to happen at policy mgr after + * beacon update + * @reason: Reason for set HW mode + * @session_id: vdev id on which the request was made + * @context: PSOC object information + * + * This function is the callback registered with SME at set HW + * mode request time + * + * Return: None + */ +typedef void (*policy_mgr_pdev_set_hw_mode_cback)(uint32_t status, + uint32_t cfgd_hw_mode_index, + uint32_t num_vdev_mac_entries, + struct policy_mgr_vdev_mac_map *vdev_mac_map, + uint8_t next_action, + enum policy_mgr_conn_update_reason reason, + uint32_t session_id, void *context); + +/** + * policy_mgr_nss_update_cback() - callback invoked by other + * component to provide nss update request status + * @psoc: PSOC object information + * @tx_status: tx completion status for updated beacon with new + * nss value + * @vdev_id: vdev id for the specific connection + * @next_action: next action to happen at policy mgr after + * beacon update + * @reason: Reason for nss update + * @original_vdev_id: original request hwmode change vdev id + * + * This function is the callback registered with SME at nss + * update request time + * + * Return: None + */ +typedef void (*policy_mgr_nss_update_cback)(struct wlan_objmgr_psoc *psoc, + uint8_t tx_status, + uint8_t vdev_id, + uint8_t next_action, + enum policy_mgr_conn_update_reason reason, + uint32_t original_vdev_id); + +/** + * struct policy_mgr_sme_cbacks - SME Callbacks to be invoked + * from policy manager + * @sme_get_valid_channels: Get valid channel list + * @sme_get_nss_for_vdev: Get the allowed nss value for the vdev + * @sme_soc_set_dual_mac_config: Set the dual MAC scan & FW + * config + * @sme_pdev_set_hw_mode: Set the new HW mode to FW + * @sme_pdev_set_pcl: Set new PCL to FW + * @sme_nss_update_request: Update NSS value to FW + * @sme_change_mcc_beacon_interval: Set MCC beacon interval to FW + */ +struct policy_mgr_sme_cbacks { + QDF_STATUS (*sme_get_valid_channels)(uint8_t *chan_list, + uint32_t *list_len); + void (*sme_get_nss_for_vdev)(enum QDF_OPMODE, + uint8_t *nss_2g, uint8_t *nss_5g); + QDF_STATUS (*sme_soc_set_dual_mac_config)( + struct policy_mgr_dual_mac_config msg); + QDF_STATUS (*sme_pdev_set_hw_mode)(struct policy_mgr_hw_mode msg); + QDF_STATUS (*sme_pdev_set_pcl)(struct policy_mgr_pcl_list *msg); + QDF_STATUS (*sme_nss_update_request)(uint32_t vdev_id, + uint8_t new_nss, policy_mgr_nss_update_cback cback, + uint8_t next_action, struct wlan_objmgr_psoc *psoc, + enum policy_mgr_conn_update_reason reason, + uint32_t original_vdev_id); + QDF_STATUS (*sme_change_mcc_beacon_interval)(uint8_t session_id); + QDF_STATUS (*sme_get_ap_channel_from_scan)( + void *roam_profile, + void **scan_cache, + uint8_t *channel); + QDF_STATUS (*sme_scan_result_purge)( + void *scan_result); +}; + +/** + * struct policy_mgr_hdd_cbacks - HDD Callbacks to be invoked + * from policy manager + * @sap_restart_chan_switch_cb: Restart SAP + * @wlan_hdd_get_channel_for_sap_restart: Get channel to restart + * SAP + * @get_mode_for_non_connected_vdev: Get the mode for a non + * connected vdev + * @hdd_get_device_mode: Get QDF_OPMODE type for session id (vdev id) + * @hdd_wapi_security_sta_exist: Get whether wapi encription station existing + * or not. Some hw doesn't support WAPI encryption concurrency with other + * encryption type. + */ +struct policy_mgr_hdd_cbacks { + void (*sap_restart_chan_switch_cb)(struct wlan_objmgr_psoc *psoc, + uint8_t vdev_id, uint32_t channel, + uint32_t channel_bw, + bool forced); + QDF_STATUS (*wlan_hdd_get_channel_for_sap_restart)( + struct wlan_objmgr_psoc *psoc, + uint8_t vdev_id, uint8_t *channel, + uint8_t *sec_ch); + enum policy_mgr_con_mode (*get_mode_for_non_connected_vdev)( + struct wlan_objmgr_psoc *psoc, + uint8_t vdev_id); + enum QDF_OPMODE (*hdd_get_device_mode)(uint32_t session_id); + bool (*hdd_wapi_security_sta_exist)(void); +}; + + +/** + * struct policy_mgr_tdls_cbacks - TDLS Callbacks to be invoked + * from policy manager + * @set_tdls_ct_mode: Set the tdls connection tracker mode + * @check_is_tdls_allowed: check if tdls allowed or not + */ +struct policy_mgr_tdls_cbacks { + void (*tdls_notify_increment_session)(struct wlan_objmgr_psoc *psoc); + void (*tdls_notify_decrement_session)(struct wlan_objmgr_psoc *psoc); +}; + +/** + * struct policy_mgr_cdp_cbacks - CDP Callbacks to be invoked + * from policy manager + * @cdp_update_mac_id: update mac_id for vdev + */ +struct policy_mgr_cdp_cbacks { + void (*cdp_update_mac_id)(struct wlan_objmgr_psoc *soc, + uint8_t vdev_id, uint8_t mac_id); +}; + +/** + * struct policy_mgr_dp_cbacks - CDP Callbacks to be invoked + * from policy manager + * @hdd_disable_rx_ol_in_concurrency: Callback to disable LRO/GRO offloads + * @hdd_set_rx_mode_rps_cb: Callback to set RPS + * @hdd_ipa_set_mcc_mode_cb: Callback to set mcc mode for ipa module + * @hdd_v2_flow_pool_map: Callback to create vdev flow pool + * @hdd_v2_flow_pool_unmap: Callback to delete vdev flow pool + */ +struct policy_mgr_dp_cbacks { + void (*hdd_disable_rx_ol_in_concurrency)(bool); + void (*hdd_set_rx_mode_rps_cb)(bool); + void (*hdd_ipa_set_mcc_mode_cb)(bool); + void (*hdd_v2_flow_pool_map)(int); + void (*hdd_v2_flow_pool_unmap)(int); +}; + +/** + * struct policy_mgr_wma_cbacks - WMA Callbacks to be invoked + * from policy manager + * @wma_get_connection_info: Get the connection related info + * from wma table + */ +struct policy_mgr_wma_cbacks { + QDF_STATUS (*wma_get_connection_info)(uint8_t vdev_id, + struct policy_mgr_vdev_entry_info *conn_table_entry); +}; + +/** + * policy_mgr_need_opportunistic_upgrade() - Tells us if we really + * need an upgrade to 2x2 + * @psoc: PSOC object information + * This function returns if updrade to 2x2 is needed + * + * Return: PM_NOP = upgrade is not needed, otherwise upgrade is + * needed + */ +enum policy_mgr_conc_next_action policy_mgr_need_opportunistic_upgrade( + struct wlan_objmgr_psoc *psoc); + +/** + * policy_mgr_next_actions() - initiates actions needed on current + * connections once channel has been decided for the new + * connection + * @psoc: PSOC object information + * @session_id: Session id + * @action: action to be executed + * @reason: Reason for connection update + * + * This function initiates initiates actions + * needed on current connections once channel has been decided + * for the new connection. Notifies UMAC & FW as well + * + * Return: QDF_STATUS enum + */ +QDF_STATUS policy_mgr_next_actions(struct wlan_objmgr_psoc *psoc, + uint32_t session_id, + enum policy_mgr_conc_next_action action, + enum policy_mgr_conn_update_reason reason); + +/** + * policy_mgr_set_dual_mac_scan_config() - Set the dual MAC scan config + * @psoc: PSOC object information + * @dbs_val: Value of DBS bit + * @dbs_plus_agile_scan_val: Value of DBS plus agile scan bit + * @single_mac_scan_with_dbs_val: Value of Single MAC scan with DBS + * + * Set the values of scan config. For FW mode config, the existing values + * will be retained + * + * Return: None + */ +void policy_mgr_set_dual_mac_scan_config(struct wlan_objmgr_psoc *psoc, + uint8_t dbs_val, + uint8_t dbs_plus_agile_scan_val, + uint8_t single_mac_scan_with_dbs_val); + +/** + * policy_mgr_set_dual_mac_fw_mode_config() - Set the dual mac FW mode config + * @psoc: PSOC object information + * @dbs: DBS bit + * @dfs: Agile DFS bit + * + * Set the values of fw mode config. For scan config, the existing values + * will be retain. + * + * Return: None + */ +void policy_mgr_set_dual_mac_fw_mode_config(struct wlan_objmgr_psoc *psoc, + uint8_t dbs, uint8_t dfs); + +/** + * policy_mgr_soc_set_dual_mac_cfg_cb() - Callback for set dual mac config + * @status: Status of set dual mac config + * @scan_config: Current scan config whose status is the first param + * @fw_mode_config: Current FW mode config whose status is the first param + * + * Callback on setting the dual mac configuration + * + * Return: None + */ +void policy_mgr_soc_set_dual_mac_cfg_cb(enum set_hw_mode_status status, + uint32_t scan_config, uint32_t fw_mode_config); + +/** + * policy_mgr_map_concurrency_mode() - to map concurrency mode + * between sme and hdd + * @old_mode: sme provided adapter mode + * @new_mode: hdd provided concurrency mode + * + * This routine will map concurrency mode between sme and hdd + * + * Return: true or false + */ +bool policy_mgr_map_concurrency_mode(enum QDF_OPMODE *old_mode, + enum policy_mgr_con_mode *new_mode); + +/** + * policy_mgr_get_channel_from_scan_result() - to get channel from scan result + * @psoc: PSOC object information + * @roam_profile: pointer to roam profile + * @channel: channel to be filled + * + * This routine gets channel which most likely a candidate to which STA + * will make connection. + * + * Return: QDF_STATUS + */ +QDF_STATUS policy_mgr_get_channel_from_scan_result( + struct wlan_objmgr_psoc *psoc, + void *roam_profile, uint8_t *channel); + +/** + * policy_mgr_mode_specific_num_open_sessions() - to get number of open sessions + * for a specific mode + * @psoc: PSOC object information + * @mode: device mode + * @num_sessions: to store num open sessions + * + * Return: QDF_STATUS + */ +QDF_STATUS policy_mgr_mode_specific_num_open_sessions( + struct wlan_objmgr_psoc *psoc, enum QDF_OPMODE mode, + uint8_t *num_sessions); + +/** + * policy_mgr_mode_specific_num_active_sessions() - to get number of active + * sessions for a specific mode + * @psoc: PSOC object information + * @mode: device mode + * @num_sessions: to store num active sessions + * + * Return: QDF_STATUS + */ +QDF_STATUS policy_mgr_mode_specific_num_active_sessions( + struct wlan_objmgr_psoc *psoc, enum QDF_OPMODE mode, + uint8_t *num_sessions); + +/** + * policy_mgr_concurrent_open_sessions_running() - Checks for + * concurrent open session + * @psoc: PSOC object information + * + * Checks if more than one open session is running for all the allowed modes + * in the driver + * + * Return: True if more than one open session exists, False otherwise + */ +bool policy_mgr_concurrent_open_sessions_running( + struct wlan_objmgr_psoc *psoc); + +/** + * policy_mgr_max_concurrent_connections_reached() - Check if + * max conccurrency is reached + * @psoc: PSOC object information + * Checks for presence of concurrency where more than one connection exists + * + * Return: True if the max concurrency is reached, False otherwise + * + * Example: + * STA + STA (wlan0 and wlan1 are connected) - returns true + * STA + STA (wlan0 connected and wlan1 disconnected) - returns false + * DUT with P2P-GO + P2P-CLIENT connection) - returns true + * + */ +bool policy_mgr_max_concurrent_connections_reached( + struct wlan_objmgr_psoc *psoc); + +/** + * policy_mgr_clear_concurrent_session_count() - Clear active session count + * @psoc: PSOC object information + * Clears the active session count for all modes + * + * Return: None + */ +void policy_mgr_clear_concurrent_session_count(struct wlan_objmgr_psoc *psoc); + +/** + * policy_mgr_is_multiple_active_sta_sessions() - Check for + * multiple STA connections + * @psoc: PSOC object information + * + * Checks if multiple active STA connection are in the driver + * + * Return: True if multiple STA sessions are present, False otherwise + * + */ +bool policy_mgr_is_multiple_active_sta_sessions( + struct wlan_objmgr_psoc *psoc); + +/** + * policy_mgr_is_sta_active_connection_exists() - Check if a STA + * connection is active + * @psoc: PSOC object information + * + * Checks if there is atleast one active STA connection in the driver + * + * Return: True if an active STA session is present, False otherwise + */ +bool policy_mgr_is_sta_active_connection_exists( + struct wlan_objmgr_psoc *psoc); + +/** + * policy_mgr_concurrent_beaconing_sessions_running() - Checks + * for concurrent beaconing entities + * @psoc: PSOC object information + * + * Checks if multiple beaconing sessions are running i.e., if SAP or GO or IBSS + * are beaconing together + * + * Return: True if multiple entities are beaconing together, False otherwise + */ +bool policy_mgr_concurrent_beaconing_sessions_running( + struct wlan_objmgr_psoc *psoc); + +/** + * policy_mgr_wait_for_connection_update() - Wait for hw mode + * command to get processed + * @psoc: PSOC object information + * Waits for CONNECTION_UPDATE_TIMEOUT duration until the set hw mode + * response sets the event connection_update_done_evt + * + * Return: QDF_STATUS + */ +QDF_STATUS policy_mgr_wait_for_connection_update( + struct wlan_objmgr_psoc *psoc); + +/** + * policy_mgr_reset_connection_update() - Reset connection + * update event + * @psoc: PSOC object information + * Resets the concurrent connection update event + * + * Return: QDF_STATUS + */ +QDF_STATUS policy_mgr_reset_connection_update(struct wlan_objmgr_psoc *psoc); + +/** + * policy_mgr_set_connection_update() - Set connection update + * event + * @psoc: PSOC object information + * Sets the concurrent connection update event + * + * Return: QDF_STATUS + */ +QDF_STATUS policy_mgr_set_connection_update(struct wlan_objmgr_psoc *psoc); + +/** + * policy_mgr_set_chan_switch_complete_evt() - set channel + * switch completion event + * @psoc: PSOC object information + * Sets the channel switch completion event. + * + * Return: QDF_STATUS + */ +QDF_STATUS policy_mgr_set_chan_switch_complete_evt( + struct wlan_objmgr_psoc *psoc); + +/** + * policy_mgr_reset_chan_switch_complete_evt() - reset channel + * switch completion event + * @psoc: PSOC object information + * Resets the channel switch completion event. + * + * Return: QDF_STATUS + */ +QDF_STATUS policy_mgr_reset_chan_switch_complete_evt( + struct wlan_objmgr_psoc *psoc); + +/** + * policy_mgr_set_opportunistic_update() - Set opportunistic + * update event + * @psoc: PSOC object information + * Sets the opportunistic update event + * + * Return: QDF_STATUS + */ +QDF_STATUS policy_mgr_set_opportunistic_update(struct wlan_objmgr_psoc *psoc); + +/** + * policy_mgr_stop_opportunistic_timer() - Stops opportunistic timer + * @psoc: PSOC object information + * + * Return: QDF_STATUS + */ +QDF_STATUS policy_mgr_stop_opportunistic_timer(struct wlan_objmgr_psoc *psoc); + +/** + * policy_mgr_restart_opportunistic_timer() - Restarts opportunistic timer + * @psoc: PSOC object information + * @check_state: check timer state if this flag is set, else restart + * irrespective of state + * + * Restarts opportunistic timer for DBS_OPPORTUNISTIC_TIME seconds. + * Check if current state is RUNNING if check_state is set, else + * restart the timer irrespective of state. + * + * Return: QDF_STATUS + */ +QDF_STATUS policy_mgr_restart_opportunistic_timer( + struct wlan_objmgr_psoc *psoc, bool check_state); + +/** + * policy_mgr_modify_sap_pcl_based_on_mandatory_channel() - + * Modify SAPs PCL based on mandatory channel list + * @psoc: PSOC object information + * @pcl_list_org: Pointer to the preferred channel list to be trimmed + * @weight_list_org: Pointer to the weights of the preferred channel list + * @pcl_len_org: Pointer to the length of the preferred chanel list + * + * Modifies the preferred channel list of SAP based on the mandatory channel + * + * Return: QDF_STATUS + */ +QDF_STATUS policy_mgr_modify_sap_pcl_based_on_mandatory_channel( + struct wlan_objmgr_psoc *psoc, uint8_t *pcl_list_org, + uint8_t *weight_list_org, uint32_t *pcl_len_org); + +/** + * policy_mgr_update_and_wait_for_connection_update() - Update and wait for + * connection update + * @psoc: PSOC object information + * @session_id: Session id + * @channel: Channel number + * @reason: Reason for connection update + * + * Update the connection to either single MAC or dual MAC and wait for the + * update to complete + * + * Return: QDF_STATUS + */ +QDF_STATUS policy_mgr_update_and_wait_for_connection_update( + struct wlan_objmgr_psoc *psoc, uint8_t session_id, + uint8_t channel, enum policy_mgr_conn_update_reason reason); + +/** + * policy_mgr_is_sap_mandatory_channel_set() - Checks if SAP + * mandatory channel is set + * @psoc: PSOC object information + * Checks if any mandatory channel is set for SAP operation + * + * Return: True if mandatory channel is set, false otherwise + */ +bool policy_mgr_is_sap_mandatory_channel_set(struct wlan_objmgr_psoc *psoc); + +/** + * policy_mgr_list_has_24GHz_channel() - Check if list contains 2.4GHz channels + * @channel_list: Channel list + * @list_len: Length of the channel list + * + * Checks if the channel list contains atleast one 2.4GHz channel + * + * Return: True if 2.4GHz channel is present, false otherwise + */ +bool policy_mgr_list_has_24GHz_channel(uint8_t *channel_list, + uint32_t list_len); + +/** + * policy_mgr_get_valid_chans_from_range() - get valid channel from given range + * @psoc: PSOC object information + * @ch_list: Pointer to the channel list + * @ch_cnt: Pointer to the length of the channel list + * @mode: Device mode + * + * Return: QDF_STATUS + */ +QDF_STATUS policy_mgr_get_valid_chans_from_range(struct wlan_objmgr_psoc *psoc, + uint8_t *ch_list, + uint32_t *ch_cnt, + enum policy_mgr_con_mode mode); + +/** + * policy_mgr_get_valid_chans() - Get the valid channel list + * @psoc: PSOC object information + * @chan_list: Pointer to the valid channel list + * @list_len: Pointer to the length of the valid channel list + * + * Gets the valid channel list filtered by band + * + * Return: QDF_STATUS + */ +QDF_STATUS policy_mgr_get_valid_chans(struct wlan_objmgr_psoc *psoc, + uint8_t *chan_list, uint32_t *list_len); + +/** + * policy_mgr_get_nss_for_vdev() - Get the allowed nss value for the + * vdev + * @psoc: PSOC object information + * @dev_mode: connection type. + * @nss2g: Pointer to the 2G Nss parameter. + * @nss5g: Pointer to the 5G Nss parameter. + * + * Fills the 2G and 5G Nss values based on connection type. + * + * Return: QDF_STATUS + */ +QDF_STATUS policy_mgr_get_nss_for_vdev(struct wlan_objmgr_psoc *psoc, + enum policy_mgr_con_mode mode, + uint8_t *nss_2g, uint8_t *nss_5g); + +/** + * policy_mgr_get_sap_mandatory_channel() - Get the mandatory channel for SAP + * @psoc: PSOC object information + * @chan: Pointer to the SAP mandatory channel + * + * Gets the mandatory channel for SAP operation + * + * Return: QDF_STATUS + */ +QDF_STATUS policy_mgr_get_sap_mandatory_channel(struct wlan_objmgr_psoc *psoc, + uint32_t *chan); + +/** + * policy_mgr_set_sap_mandatory_channels() - Set the mandatory channel for SAP + * @psoc: PSOC object information + * @channels: Channel list to be set + * @len: Length of the channel list + * + * Sets the channels for the mandatory channel list along with the length of + * of the channel list. + * + * Return: QDF_STATUS + */ +QDF_STATUS policy_mgr_set_sap_mandatory_channels(struct wlan_objmgr_psoc *psoc, + uint8_t *channels, uint32_t len); + +/** + * policy_mgr_is_any_mode_active_on_band_along_with_session() - + * Check if any connection mode is active on a band along with + * the given session + * @psoc: PSOC object information + * @session_id: Session along which active sessions are looked for + * @band: Operating frequency band of the connection + * POLICY_MGR_BAND_24: Looks for active connection on 2.4 GHz only + * POLICY_MGR_BAND_5: Looks for active connection on 5 GHz only + * + * Checks if any of the connection mode is active on a given frequency band + * + * Return: True if any connection is active on a given band, false otherwise + */ +bool policy_mgr_is_any_mode_active_on_band_along_with_session( + struct wlan_objmgr_psoc *psoc, uint8_t session_id, + enum policy_mgr_band band); + +/** + * policy_mgr_get_chan_by_session_id() - Get channel for a given session ID + * @psoc: PSOC object information + * @session_id: Session ID + * @chan: Pointer to the channel + * + * Gets the channel for a given session ID + * + * Return: QDF_STATUS + */ +QDF_STATUS policy_mgr_get_chan_by_session_id(struct wlan_objmgr_psoc *psoc, + uint8_t session_id, uint8_t *chan); + +/** + * policy_mgr_get_mac_id_by_session_id() - Get MAC ID for a given session ID + * @psoc: PSOC object information + * @session_id: Session ID + * @mac_id: Pointer to the MAC ID + * + * Gets the MAC ID for a given session ID + * + * Return: QDF_STATUS + */ +QDF_STATUS policy_mgr_get_mac_id_by_session_id(struct wlan_objmgr_psoc *psoc, + uint8_t session_id, uint8_t *mac_id); + +/** + * policy_mgr_get_mcc_session_id_on_mac() - Get MCC session's ID + * @psoc: PSOC object information + * @mac_id: MAC ID on which MCC session needs to be found + * @session_id: Session with which MCC combination needs to be found + * @mcc_session_id: Pointer to the MCC session ID + * + * Get the session ID of the MCC interface + * + * Return: QDF_STATUS + */ +QDF_STATUS policy_mgr_get_mcc_session_id_on_mac(struct wlan_objmgr_psoc *psoc, + uint8_t mac_id, uint8_t session_id, + uint8_t *mcc_session_id); + +/** + * policy_mgr_get_mcc_operating_channel() - Get the MCC channel + * @psoc: PSOC object information + * @session_id: Session ID with which MCC is being done + * + * Gets the MCC channel for a given session ID. + * + * Return: '0' (INVALID_CHANNEL_ID) or valid channel number + */ +uint8_t policy_mgr_get_mcc_operating_channel(struct wlan_objmgr_psoc *psoc, + uint8_t session_id); + +/** + * policy_mgr_get_pcl_for_existing_conn() - Get PCL for existing connection + * @psoc: PSOC object information + * @mode: Connection mode of type 'policy_mgr_con_mode' + * @pcl_ch: Pointer to the PCL + * @len: Pointer to the length of the PCL + * @pcl_weight: Pointer to the weights of the PCL + * @weight_len: Max length of the weights list + * @all_matching_cxn_to_del: Need remove all entries before getting pcl + * + * Get the PCL for an existing connection + * + * Return: None + */ +QDF_STATUS policy_mgr_get_pcl_for_existing_conn(struct wlan_objmgr_psoc *psoc, + enum policy_mgr_con_mode mode, + uint8_t *pcl_ch, uint32_t *len, + uint8_t *weight_list, uint32_t weight_len, + bool all_matching_cxn_to_del); + +/** + * policy_mgr_get_valid_chan_weights() - Get the weightage for + * all valid channels + * @psoc: PSOC object information + * @weight: Pointer to the structure containing pcl, saved channel list and + * weighed channel list + * + * Provides the weightage for all valid channels. This compares the PCL list + * with the valid channel list. The channels present in the PCL get their + * corresponding weightage and the non-PCL channels get the default weightage + * of WEIGHT_OF_NON_PCL_CHANNELS. + * + * Return: QDF_STATUS + */ +QDF_STATUS policy_mgr_get_valid_chan_weights(struct wlan_objmgr_psoc *psoc, + struct policy_mgr_pcl_chan_weights *weight); + +/** + * policy_mgr_set_hw_mode_on_channel_switch() - Set hw mode + * after channel switch + * @session_id: Session ID + * + * Sets hw mode after doing a channel switch + * + * Return: QDF_STATUS + */ +QDF_STATUS policy_mgr_set_hw_mode_on_channel_switch( + struct wlan_objmgr_psoc *psoc, uint8_t session_id); + +/** + * policy_mgr_set_do_hw_mode_change_flag() - Set flag to indicate hw mode change + * @psoc: PSOC object information + * @flag: Indicate if hw mode change is required or not + * + * Set the flag to indicate whether a hw mode change is required after a + * vdev up or not. Flag value of true indicates that a hw mode change is + * required after vdev up. + * + * Return: None + */ +void policy_mgr_set_do_hw_mode_change_flag(struct wlan_objmgr_psoc *psoc, + bool flag); + +/** + * policy_mgr_is_hw_mode_change_after_vdev_up() - Check if hw + * mode change is needed + * @psoc: PSOC object information + * Returns the flag which indicates if a hw mode change is required after + * vdev up. + * + * Return: True if hw mode change is required, false otherwise + */ +bool policy_mgr_is_hw_mode_change_after_vdev_up(struct wlan_objmgr_psoc *psoc); + +/** + * policy_mgr_checkn_update_hw_mode_single_mac_mode() - Set hw_mode to SMM + * if required + * @psoc: PSOC object information + * @channel: channel number for the new STA connection + * + * After the STA disconnection, if the hw_mode is in DBS and the new STA + * connection is coming in the band in which existing connections are + * present, then this function stops the dbs opportunistic timer and sets + * the hw_mode to Single MAC mode (SMM). + * + * Return: None + */ +void policy_mgr_checkn_update_hw_mode_single_mac_mode( + struct wlan_objmgr_psoc *psoc, uint8_t channel); + +/** + * policy_mgr_dump_connection_status_info() - Dump the concurrency information + * @psoc: PSOC object information + * Prints the concurrency information such as tx/rx spatial stream, chainmask, + * etc. + * + * Return: None + */ +void policy_mgr_dump_connection_status_info(struct wlan_objmgr_psoc *psoc); + +/** + * policy_mgr_mode_specific_vdev_id() - provides the + * vdev id of the pecific mode + * @psoc: PSOC object information + * @mode: type of connection + * + * This function provides vdev id for the given mode + * + * Return: vdev id + */ +uint32_t policy_mgr_mode_specific_vdev_id(struct wlan_objmgr_psoc *psoc, + enum policy_mgr_con_mode mode); + +/** + * policy_mgr_mode_specific_connection_count() - provides the + * count of connections of specific mode + * @psoc: PSOC object information + * @mode: type of connection + * @list: To provide the indices on pm_conc_connection_list + * (optional) + * + * This function provides the count of current connections + * + * Return: connection count of specific type + */ +uint32_t policy_mgr_mode_specific_connection_count( + struct wlan_objmgr_psoc *psoc, enum policy_mgr_con_mode mode, + uint32_t *list); + +/** + * policy_mgr_check_conn_with_mode_and_vdev_id() - checks if any active + * session with specific mode and vdev_id + * @psoc: PSOC object information + * @mode: type of connection + * @vdev_id: vdev_id of the connection + * + * This function checks if any active session with specific mode and vdev_id + * is present + * + * Return: QDF STATUS with success if active session is found, else failure + */ +QDF_STATUS policy_mgr_check_conn_with_mode_and_vdev_id( + struct wlan_objmgr_psoc *psoc, enum policy_mgr_con_mode mode, + uint32_t vdev_id); + +/** + * policy_mgr_hw_mode_transition_cb() - Callback for HW mode + * transition from FW + * @old_hw_mode_index: Old HW mode index + * @new_hw_mode_index: New HW mode index + * @num_vdev_mac_entries: Number of vdev-mac id mapping that follows + * @vdev_mac_map: vdev-mac id map. This memory will be freed by the caller. + * So, make local copy if needed. + * + * Provides the old and new HW mode index set by the FW + * + * Return: None + */ +void policy_mgr_hw_mode_transition_cb(uint32_t old_hw_mode_index, + uint32_t new_hw_mode_index, + uint32_t num_vdev_mac_entries, + struct policy_mgr_vdev_mac_map *vdev_mac_map, + struct wlan_objmgr_psoc *context); + +/** + * policy_mgr_current_concurrency_is_mcc() - To check the current + * concurrency combination if it is doing MCC + * @psoc: PSOC object information + * This routine is called to check if it is doing MCC + * + * Return: True - MCC, False - Otherwise + */ +bool policy_mgr_current_concurrency_is_mcc(struct wlan_objmgr_psoc *psoc); + +/** + * policy_mgr_register_sme_cb() - register SME callbacks + * @psoc: PSOC object information + * @sme_cbacks: function pointers from SME + * + * API, allows SME to register callbacks to be invoked by policy + * mgr + * + * Return: SUCCESS, + * Failure (if registration fails) + */ +QDF_STATUS policy_mgr_register_sme_cb(struct wlan_objmgr_psoc *psoc, + struct policy_mgr_sme_cbacks *sme_cbacks); + +/** + * policy_mgr_register_hdd_cb() - register HDD callbacks + * @psoc: PSOC object information + * @hdd_cbacks: function pointers from HDD + * + * API, allows HDD to register callbacks to be invoked by policy + * mgr + * + * Return: SUCCESS, + * Failure (if registration fails) + */ +QDF_STATUS policy_mgr_register_hdd_cb(struct wlan_objmgr_psoc *psoc, + struct policy_mgr_hdd_cbacks *hdd_cbacks); + +/** + * policy_mgr_deregister_hdd_cb() - Deregister HDD callbacks + * @psoc: PSOC object information + * + * API, allows HDD to deregister callbacks + * + * Return: SUCCESS, + * Failure (if de-registration fails) + */ +QDF_STATUS policy_mgr_deregister_hdd_cb(struct wlan_objmgr_psoc *psoc); + +/** + * policy_mgr_register_tdls_cb() - register TDLS callbacks + * @psoc: PSOC object information + * @tdls_cbacks: function pointers from TDLS + * + * API, allows TDLS to register callbacks to be invoked by + * policy mgr + * + * Return: SUCCESS, + * Failure (if registration fails) + */ +QDF_STATUS policy_mgr_register_tdls_cb(struct wlan_objmgr_psoc *psoc, + struct policy_mgr_tdls_cbacks *tdls_cbacks); + +/** + * policy_mgr_register_cdp_cb() - register CDP callbacks + * @psoc: PSOC object information + * @cdp_cbacks: function pointers from CDP + * + * API, allows CDP to register callbacks to be invoked by + * policy mgr + * + * Return: SUCCESS, + * Failure (if registration fails) + */ +QDF_STATUS policy_mgr_register_cdp_cb(struct wlan_objmgr_psoc *psoc, + struct policy_mgr_cdp_cbacks *cdp_cbacks); + +/** + * policy_mgr_register_dp_cb() - register CDP callbacks + * @psoc: PSOC object information + * @cdp_cbacks: function pointers from CDP + * + * API, allows CDP to register callbacks to be invoked by + * policy mgr + * + * Return: SUCCESS, + * Failure (if registration fails) + */ +QDF_STATUS policy_mgr_register_dp_cb(struct wlan_objmgr_psoc *psoc, + struct policy_mgr_dp_cbacks *dp_cbacks); + +/** + * policy_mgr_register_wma_cb() - register WMA callbacks + * @psoc: PSOC object information + * @wma_cbacks: function pointers from WMA + * + * API, allows WMA to register callbacks to be invoked by policy + * mgr + * + * Return: SUCCESS, + * Failure (if registration fails) + */ +QDF_STATUS policy_mgr_register_wma_cb(struct wlan_objmgr_psoc *psoc, + struct policy_mgr_wma_cbacks *wma_cbacks); + +/** + * policy_mgr_find_if_fw_supports_dbs() - to find if FW/HW supports DBS + * @psoc: PSOC object information + * + * This API checks if legacy service ready event contains DBS or no. + * This API doesn't check service ready extension which contains actual + * hw mode list that tells if all supported HW modes' caps. + * + * Return: true (if service ready indication supports DBS or no) else false + * + */ +bool policy_mgr_find_if_fw_supports_dbs(struct wlan_objmgr_psoc *psoc); + +/** + * policy_mgr_is_dbs_enable() - Check if master DBS control is enabled + * @psoc: PSOC object information + * Checks if the master DBS control is enabled. This will be used + * to override any other DBS capability + * + * Return: True if master DBS control is enabled + */ +bool policy_mgr_is_dbs_enable(struct wlan_objmgr_psoc *psoc); + +/** + * policy_mgr_is_hw_dbs_capable() - Check if HW is DBS capable + * @psoc: PSOC object information + * Checks if the HW is DBS capable + * + * Return: true if the HW is DBS capable + */ +bool policy_mgr_is_hw_dbs_capable(struct wlan_objmgr_psoc *psoc); + +/** + * policy_mgr_is_hw_sbs_capable() - Check if HW is SBS capable + * @psoc: PSOC object information + * Checks if the HW is SBS capable + * + * Return: true if the HW is SBS capable + */ +bool policy_mgr_is_hw_sbs_capable(struct wlan_objmgr_psoc *psoc); + +/** + * policy_mgr_is_current_hwmode_dbs() - Check if current hw mode is DBS + * @psoc: PSOC object information + * Checks if current hardware mode of the system is DBS or no + * + * Return: true or false + */ +bool policy_mgr_is_current_hwmode_dbs(struct wlan_objmgr_psoc *psoc); + +/** + * policy_mgr_is_hw_dbs_2x2_capable() - if hardware is capable of dbs 2x2 + * @psoc: PSOC object information + * This function checks if hw_modes supported are always capable of + * DBS and there is no need for downgrading while entering DBS. + * true: DBS 2x2 can always be supported + * false: hw_modes support DBS 1x1 as well + * + * Return: true - DBS2x2, false - DBS1x1 + */ +bool policy_mgr_is_hw_dbs_2x2_capable(struct wlan_objmgr_psoc *psoc); + +/** + * policy_mgr_init() - Policy Manager component initialization + * routine + * + * Return - QDF Status + */ +QDF_STATUS policy_mgr_init(void); + +/** + * policy_mgr_deinit() - Policy Manager component + * de-initialization routine + * + * Return - QDF Status + */ +QDF_STATUS policy_mgr_deinit(void); + +/** + * policy_mgr_psoc_enable() - Policy Manager component + * enable routine + * @psoc: PSOC object information + * + * Return - QDF Status + */ +QDF_STATUS policy_mgr_psoc_enable(struct wlan_objmgr_psoc *psoc); + +/** + * policy_mgr_psoc_disable() - Policy Manager component + * disable routine + * @psoc: PSOC object information + * + * Return - QDF Status + */ +QDF_STATUS policy_mgr_psoc_disable(struct wlan_objmgr_psoc *psoc); + +/** + * policy_mgr_psoc_open() - Policy Manager component + * open routine + * @psoc: PSOC object information + * + * Return - QDF Status + */ +QDF_STATUS policy_mgr_psoc_open(struct wlan_objmgr_psoc *psoc); + +/** + * policy_mgr_psoc_close() - Policy Manager component + * close routine + * @psoc: PSOC object information + * + * Return - QDF Status + */ +QDF_STATUS policy_mgr_psoc_close(struct wlan_objmgr_psoc *psoc); + +/** + * policy_mgr_get_num_dbs_hw_modes() - Get number of HW mode + * @psoc: PSOC object information + * Fetches the number of DBS HW modes returned by the FW + * + * Return: Negative value on error or returns the number of DBS HW modes + */ +int8_t policy_mgr_get_num_dbs_hw_modes(struct wlan_objmgr_psoc *psoc); + +/** + * policy_mgr_get_dbs_hw_modes() - Get the DBS HW modes for userspace + * @psoc: PSOC object information + * @one_by_one_dbs: 1x1 DBS capability of HW + * @two_by_two_dbs: 2x2 DBS capability of HW + * + * Provides the DBS HW mode capability such as whether + * 1x1 DBS, 2x2 DBS is supported by the HW or not. + * + * Return: Failure in case of error and 0 on success + * one_by_one_dbs/two_by_two_dbs will be false, + * if they are not supported. + * one_by_one_dbs/two_by_two_dbs will be true, + * if they are supported. + * false values of one_by_one_dbs/two_by_two_dbs, + * indicate DBS is disabled + */ +QDF_STATUS policy_mgr_get_dbs_hw_modes(struct wlan_objmgr_psoc *psoc, + bool *one_by_one_dbs, bool *two_by_two_dbs); + +/** + * policy_mgr_check_sta_ap_concurrent_ch_intf() - Restart SAP in STA-AP case + * @data: Pointer to STA adapter + * + * Restarts the SAP interface in STA-AP concurrency scenario + * + * Restart: None + */ +void policy_mgr_check_sta_ap_concurrent_ch_intf(void *data); + +/** + * policy_mgr_get_current_hw_mode() - Get current HW mode params + * @psoc: PSOC object information + * @hw_mode: HW mode parameters + * + * Provides the current HW mode parameters if the HW mode is initialized + * in the driver + * + * Return: Success if the current HW mode params are successfully populated + */ +QDF_STATUS policy_mgr_get_current_hw_mode(struct wlan_objmgr_psoc *psoc, + struct policy_mgr_hw_mode_params *hw_mode); + +/** + * policy_mgr_get_dbs_plus_agile_scan_config() - Get DBS plus agile scan bit + * @psoc: PSOC object information + * Gets the DBS plus agile scan bit of concurrent_scan_config_bits + * + * Return: 0 or 1 to indicate the DBS plus agile scan bit + */ +bool policy_mgr_get_dbs_plus_agile_scan_config(struct wlan_objmgr_psoc *psoc); + +/** + * policy_mgr_get_single_mac_scan_with_dfs_config() - Get Single + * MAC scan with DFS bit + * @psoc: PSOC object information + * Gets the Single MAC scan with DFS bit of concurrent_scan_config_bits + * + * Return: 0 or 1 to indicate the Single MAC scan with DFS bit + */ +bool policy_mgr_get_single_mac_scan_with_dfs_config( + struct wlan_objmgr_psoc *psoc); + +/** + * policy_mgr_set_hw_mode_change_in_progress() - Set value + * corresponding to policy_mgr_hw_mode_change that indicate if + * HW mode change is in progress + * @psoc: PSOC object information + * @value: Indicate if hw mode change is in progress + * + * Set the value corresponding to policy_mgr_hw_mode_change that + * indicated if hw mode change is in progress. + * + * Return: None + */ +void policy_mgr_set_hw_mode_change_in_progress( + struct wlan_objmgr_psoc *psoc, enum policy_mgr_hw_mode_change value); + +/** + * policy_mgr_is_hw_mode_change_in_progress() - Check if HW mode + * change is in progress. + * @psoc: PSOC object information + * + * Returns the corresponding policy_mgr_hw_mode_change value. + * + * Return: policy_mgr_hw_mode_change value. + */ +enum policy_mgr_hw_mode_change policy_mgr_is_hw_mode_change_in_progress( + struct wlan_objmgr_psoc *psoc); + +/** + * policy_mgr_get_hw_mode_change_from_hw_mode_index() - Get + * matching HW mode from index + * @psoc: PSOC object information + * @hw_mode_index: HW mode index + * Returns the corresponding policy_mgr_hw_mode_change HW mode. + * + * Return: policy_mgr_hw_mode_change value. + */ +enum policy_mgr_hw_mode_change policy_mgr_get_hw_mode_change_from_hw_mode_index( + struct wlan_objmgr_psoc *psoc, uint32_t hw_mode_index); + +/** + * policy_mgr_is_scan_simultaneous_capable() - Check if scan + * parallelization is supported or not + * @psoc: PSOC object information + * currently scan parallelization feature support is dependent on DBS but + * it can be independent in future. + * + * Return: True if master DBS control is enabled + */ +bool policy_mgr_is_scan_simultaneous_capable(struct wlan_objmgr_psoc *psoc); + +/** + * policy_mgr_is_mcc_adaptive_scheduler_enabled() - Function to + * gets the policy manager mcc adaptive scheduler enabled + * @psoc: PSOC object information + * + * This function gets the value mcc adaptive scheduler + * + * Return: true if MCC adaptive scheduler is set else false + * + */ +bool policy_mgr_is_mcc_adaptive_scheduler_enabled( + struct wlan_objmgr_psoc *psoc); + +/** + * policy_mgr_set_user_cfg() - Function to set user cfg variables + * required by policy manager component + * @psoc: PSOC object information + * @user_cfg: User config valiables structure pointer + * + * This function sets the user cfg variables required by policy + * manager + * + * Return: SUCCESS or FAILURE + * + */ +QDF_STATUS policy_mgr_set_user_cfg(struct wlan_objmgr_psoc *psoc, + struct policy_mgr_user_cfg *user_cfg); + +/** + * policy_mgr_init_dbs_config() - Function to initialize DBS + * config in policy manager component + * @psoc: PSOC object information + * @scan_config: DBS scan config + * @fw_config: DBS FW config + * + * This function sets the DBS configurations required by policy + * manager + * + * Return: SUCCESS or FAILURE + * + */ +void policy_mgr_init_dbs_config(struct wlan_objmgr_psoc *psoc, + uint32_t scan_config, uint32_t fw_config); + +/** + * policy_mgr_update_dbs_scan_config() - Function to update + * DBS scan config in policy manager component + * @psoc: PSOC object information + * + * This function updates the DBS scan configurations required by + * policy manager + * + * Return: SUCCESS or FAILURE + * + */ +void policy_mgr_update_dbs_scan_config(struct wlan_objmgr_psoc *psoc); + +/** + * policy_mgr_update_dbs_fw_config() - Function to update DBS FW + * config in policy manager component + * @psoc: PSOC object information + * + * This function updates the DBS FW configurations required by + * policy manager + * + * Return: SUCCESS or FAILURE + * + */ +void policy_mgr_update_dbs_fw_config(struct wlan_objmgr_psoc *psoc); + +/** + * policy_mgr_update_dbs_req_config() - Function to update DBS + * request config in policy manager component + * @psoc: PSOC object information + * @scan_config: DBS scan config + * @fw_config: DBS FW config + * + * This function updates DBS request configurations required by + * policy manager + * + * Return: SUCCESS or FAILURE + * + */ +void policy_mgr_update_dbs_req_config(struct wlan_objmgr_psoc *psoc, + uint32_t scan_config, uint32_t fw_mode_config); + +/** + * policy_mgr_dump_dbs_hw_mode() - Function to dump DBS config + * @psoc: PSOC object information + * + * This function dumps the DBS configurations + * + * Return: SUCCESS or FAILURE + * + */ +void policy_mgr_dump_dbs_hw_mode(struct wlan_objmgr_psoc *psoc); + +/** + * policy_mgr_init_dbs_hw_mode() - Function to initialize DBS HW + * modes in policy manager component + * @psoc: PSOC object information + * @num_dbs_hw_modes: Number of HW modes + * @ev_wlan_dbs_hw_mode_list: HW list + * + * This function to initialize the DBS HW modes in policy + * manager + * + * Return: SUCCESS or FAILURE + * + */ +void policy_mgr_init_dbs_hw_mode(struct wlan_objmgr_psoc *psoc, + uint32_t num_dbs_hw_modes, + uint32_t *ev_wlan_dbs_hw_mode_list); + +/** + * policy_mgr_update_hw_mode_list() - Function to initialize DBS + * HW modes in policy manager component + * @psoc: PSOC object information + * @tgt_hdl: Target psoc information + * + * This function to initialize the DBS HW modes in policy + * manager + * + * Return: SUCCESS or FAILURE + * + */ +QDF_STATUS policy_mgr_update_hw_mode_list(struct wlan_objmgr_psoc *psoc, + struct target_psoc_info *tgt_hdl); + +/** + * policy_mgr_update_hw_mode_index() - Function to update + * current HW mode in policy manager component + * @psoc: PSOC object information + * @new_hw_mode_index: index to new HW mode + * + * This function to update the current HW mode in policy manager + * + * Return: SUCCESS or FAILURE + * + */ +void policy_mgr_update_hw_mode_index(struct wlan_objmgr_psoc *psoc, + uint32_t new_hw_mode_index); + +/** + * policy_mgr_update_old_hw_mode_index() - Function to update + * old HW mode in policy manager component + * @psoc: PSOC object information + * @new_hw_mode_index: index to old HW mode + * + * This function to update the old HW mode in policy manager + * + * Return: SUCCESS or FAILURE + * + */ +void policy_mgr_update_old_hw_mode_index(struct wlan_objmgr_psoc *psoc, + uint32_t old_hw_mode_index); + +/** + * policy_mgr_update_new_hw_mode_index() - Function to update + * new HW mode in policy manager component + * @psoc: PSOC object information + * @new_hw_mode_index: index to new HW mode + * + * This function to update the new HW mode in policy manager + * + * Return: SUCCESS or FAILURE + * + */ +void policy_mgr_update_new_hw_mode_index(struct wlan_objmgr_psoc *psoc, + uint32_t new_hw_mode_index); + +/** + * policy_mgr_is_chan_ok_for_dnbs() - Function to check if a channel + * is OK for "Do Not Break Stream" + * @psoc: PSOC object information + * @channel: Channel to check. + * @ok: Pointer to flag in which status will be stored + * This function checks if a channel is OK for + * "Do Not Break Stream" + * Return: SUCCESS or FAILURE + */ +QDF_STATUS policy_mgr_is_chan_ok_for_dnbs(struct wlan_objmgr_psoc *psoc, + uint8_t channel, bool *ok); + +/** + * policy_mgr_get_hw_dbs_nss() - Computes DBS NSS + * @psoc: PSOC object information + * @nss_dbs: NSS info of both MAC0 and MAC1 + * This function computes NSS info of both MAC0 and MAC1 + * + * Return: uint32_t value signifies supported RF chains + */ +uint32_t policy_mgr_get_hw_dbs_nss(struct wlan_objmgr_psoc *psoc, + struct dbs_nss *nss_dbs); + +/** + * policy_mgr_is_dnsc_set - Check if user has set + * "Do_Not_Switch_Channel" for the vdev passed + * @vdev: vdev pointer + * + * Get "Do_Not_Switch_Channel" setting for the vdev passed. + * + * Return: true for success, else false + */ +bool policy_mgr_is_dnsc_set(struct wlan_objmgr_vdev *vdev); + +/** + * policy_mgr_get_updated_scan_and_fw_mode_config() - Function + * to get latest scan & fw config for DBS + * @psoc: PSOC object information + * @scan_config: DBS related scan config + * @fw_mode_config: DBS related FW config + * @dual_mac_disable_ini: DBS related ini config + * This function returns the latest DBS configuration for + * connection & scan, sent to FW + * Return: SUCCESS or FAILURE + */ +QDF_STATUS policy_mgr_get_updated_scan_and_fw_mode_config( + struct wlan_objmgr_psoc *psoc, uint32_t *scan_config, + uint32_t *fw_mode_config, uint32_t dual_mac_disable_ini, + uint32_t channel_select_logic_conc); + +/** + * policy_mgr_is_safe_channel - Check if the channel is in LTE + * coex channel avoidance list + * @psoc: PSOC object information + * @channel: channel to be checked + * + * Check if the channel is in LTE coex channel avoidance list. + * + * Return: true for success, else false + */ +bool policy_mgr_is_safe_channel(struct wlan_objmgr_psoc *psoc, + uint8_t channel); + +/** + * policy_mgr_is_force_scc() - checks if SCC needs to be + * mandated + * @psoc: PSOC object information + * + * This function checks if SCC needs to be mandated or not + * + * Return: True if SCC to be mandated, false otherwise + */ +bool policy_mgr_is_force_scc(struct wlan_objmgr_psoc *psoc); + +/** + * policy_mgr_valid_sap_conc_channel_check() - checks & updates + * the channel SAP to come up on in case of STA+SAP concurrency + * @psoc: PSOC object information + * @con_ch: pointer to the channel on which sap will come up + * @sap_ch: initial channel for SAP + * + * This function checks & updates the channel SAP to come up on in + * case of STA+SAP concurrency + * Return: Success if SAP can come up on a channel + */ +QDF_STATUS policy_mgr_valid_sap_conc_channel_check( + struct wlan_objmgr_psoc *psoc, uint8_t *con_ch, uint8_t sap_ch); + +/** + * policy_mgr_get_alternate_channel_for_sap() - Get an alternate + * channel to move the SAP to + * @psoc: PSOC object information + * + * This function returns an alternate channel for SAP to move to + * Return: The new channel for SAP + */ +uint8_t policy_mgr_get_alternate_channel_for_sap( + struct wlan_objmgr_psoc *psoc); + +/** + * policy_mgr_disallow_mcc() - Check for mcc + * + * @psoc: PSOC object information + * @channel: channel on which new connection is coming up + * + * When a new connection is about to come up check if current + * concurrency combination including the new connection is + * causing MCC + * + * Return: True if it is causing MCC + */ +bool policy_mgr_disallow_mcc(struct wlan_objmgr_psoc *psoc, + uint8_t channel); + +/** + * policy_mgr_mode_specific_get_channel() - Get channel for a + * connection type + * @psoc: PSOC object information + * @chan_list: Connection type + * + * Get channel for a connection type + * + * Return: channel number + */ +uint8_t policy_mgr_mode_specific_get_channel( + struct wlan_objmgr_psoc *psoc, enum policy_mgr_con_mode mode); + +/** + * policy_mgr_enable_disable_sap_mandatory_chan_list() - Update the value of + * enable_sap_mandatory_chan_list + * @psoc: Pointer to soc + * @val: value of enable_sap_mandatory_chan_list + * + * Update the value of enable_sap_mandatory_chan_list + * + * Return: void + */ +void policy_mgr_enable_disable_sap_mandatory_chan_list( + struct wlan_objmgr_psoc *psoc, bool val); + +/** + * policy_mgr_add_sap_mandatory_chan() - Add chan to SAP mandatory channel + * list + * @psoc: Pointer to soc + * @chan: Channel to be added + * + * Add chan to SAP mandatory channel list + * + * Return: None + */ +void policy_mgr_add_sap_mandatory_chan(struct wlan_objmgr_psoc *psoc, + uint8_t chan); + +/** + * policy_mgr_is_sap_mandatory_chan_list_enabled() - Return the SAP mandatory + * channel list enabled status + * @psoc: Pointer to soc + * + * Get the SAP mandatory channel list enabled status + * + * Return: Enable or Disable + */ +bool policy_mgr_is_sap_mandatory_chan_list_enabled( + struct wlan_objmgr_psoc *psoc); + +/** + * policy_mgr_get_sap_mandatory_chan_list_len() - Return the SAP mandatory + * channel list len + * @psoc: Pointer to soc + * + * Get the SAP mandatory channel list len + * + * Return: Channel list length + */ +uint32_t policy_mgr_get_sap_mandatory_chan_list_len( + struct wlan_objmgr_psoc *psoc); + +/** + * policy_mgr_init_sap_mandatory_2g_chan() - Init 2.4G SAP mandatory channel + * list + * @psoc: Pointer to soc + * + * Initialize the 2.4G SAP mandatory channels + * + * Return: None + */ +void policy_mgr_init_sap_mandatory_2g_chan(struct wlan_objmgr_psoc *psoc); + +/** + * policy_mgr_remove_sap_mandatory_chan() - Remove channel from SAP mandatory + * channel list + * @psoc: Pointer to soc + * @chan: channel to be removed from mandatory channel list + * + * Remove channel from SAP mandatory channel list + * + * Return: None + */ +void policy_mgr_remove_sap_mandatory_chan(struct wlan_objmgr_psoc *psoc, + uint8_t chan); +/* + * policy_set_cur_conc_system_pref - set current conc_system_pref + * @psoc: soc pointer + * + * Set the current concurrency system preference. + * + * Return: None + */ +void policy_mgr_set_cur_conc_system_pref(struct wlan_objmgr_psoc *psoc, + uint8_t conc_system_pref); +/** + * policy_mgr_get_cur_conc_system_pref - Get current conc_system_pref + * @psoc: soc pointer + * + * Get the current concurrent system preference. + * + * Return: conc_system_pref + */ +uint8_t policy_mgr_get_cur_conc_system_pref(struct wlan_objmgr_psoc *psoc); +/** + * policy_mgr_check_and_stop_opportunistic_timer - Get current + * state of opportunistic timer, if running, stop it and take + * action + * @psoc: soc pointer + * @id: Session/vdev id + * + * Get the current state of opportunistic timer, if it is + * running, stop it and take action. + * + * Return: None + */ +void policy_mgr_check_and_stop_opportunistic_timer( + struct wlan_objmgr_psoc *psoc, uint8_t id); + +/** + * policy_mgr_set_weight_of_dfs_passive_channels_to_zero() - set weight of dfs + * and passive channels to 0 + * @psoc: pointer to soc + * @pcl_channels: preferred channel list + * @len: length of preferred channel list + * @weight_list: preferred channel weight list + * @weight_len: length of weight list + * This function set the weight of dfs and passive channels to 0 + * + * Return: None + */ +void policy_mgr_set_weight_of_dfs_passive_channels_to_zero( + struct wlan_objmgr_psoc *psoc, uint8_t *pcl_channels, + uint32_t *len, uint8_t *weight_list, uint32_t weight_len); + +/** + * policy_mgr_is_sta_sap_scc_allowed_on_dfs_chan() - check if sta+sap scc + * allowed on dfs chan + * @psoc: pointer to soc + * This function is used to check if sta+sap scc allowed on dfs channel + * + * Return: true if sta+sap scc is allowed on dfs channel, otherwise false + */ +bool policy_mgr_is_sta_sap_scc_allowed_on_dfs_chan( + struct wlan_objmgr_psoc *psoc); +/** + * policy_mgr_is_sta_connected_2g() - check if sta connected in 2g + * @psoc: pointer to soc + * + * Return: true if sta is connected in 2g else false + */ +bool policy_mgr_is_sta_connected_2g(struct wlan_objmgr_psoc *psoc); + +/** + * policy_mgr_trim_acs_channel_list() - Trim the ACS channel list based + * on the number of active station connections + * @org_ch_list: ACS channel list from user space + * @org_ch_list_count: ACS channel count from user space + * + * Return: None + */ +void policy_mgr_trim_acs_channel_list(struct wlan_objmgr_psoc *psoc, + uint8_t *org_ch_list, uint8_t *org_ch_list_count); + +/** + * policy_mgr_is_hwmode_set_for_given_chnl() - to check for given channel + * if the hw mode is properly set. + * @psoc: pointer to psoc + * @channel: given channel + * + * If HW mode is properly set for given channel then it returns true else + * it returns false. + * For example, when 2x2 DBS is supported and if the first connection is + * coming up on 2G band then driver expects DBS HW mode to be set first + * before the connection can be established. Driver can call this API to + * find-out if HW mode is set properly. + * + * Return: true if HW mode is set properly else false + */ +bool policy_mgr_is_hwmode_set_for_given_chnl(struct wlan_objmgr_psoc *psoc, + uint8_t channel); +/* + * policy_mgr_get_connection_info() - Get info of all active connections + * @info: Pointer to connection info + * + * Return: Connection count + */ +uint32_t policy_mgr_get_connection_info(struct wlan_objmgr_psoc *psoc, + struct connection_info *info); +/** + * policy_mgr_register_mode_change_cb() - Register mode change callback with + * policy manager + * @callback: HDD callback to be registered + * + * Return: QDF_STATUS + */ +QDF_STATUS policy_mgr_register_mode_change_cb(struct wlan_objmgr_psoc *psoc, + send_mode_change_event_cb mode_change_cb); +/** + * policy_mgr_deregister_mode_change_cb() - Deregister mode change callback with + * policy manager + * @callback: HDD callback to be registered + * + * Return: QDF_STATUS + */ +QDF_STATUS policy_mgr_deregister_mode_change_cb(struct wlan_objmgr_psoc *psoc); + +/** + * policy_mgr_allow_sap_go_concurrency() - check whether SAP/GO concurrency is + * allowed. + * @psoc: pointer to soc + * @policy_mgr_con_mode: operating mode of interface to be checked + * @channel: new operating channel of the interface to be checked + * @vdev_id: vdev id of the connection to be checked, 0xff for new connection + * + * Checks whether new channel SAP/GO can co-exist with the channel of existing + * SAP/GO connection. This API mainly used for two purposes: + * + * 1) When new GO/SAP session is coming up and needs to check if this session's + * channel can co-exist with existing existing GO/SAP sessions. For example, + * when single radio platform comes, MCC for SAP/GO+SAP/GO is not supported, in + * such case this API should prevent bringing the second connection. + * + * 2) There is already existing SAP+GO combination but due to upper layer + * notifying LTE-COEX event or sending command to move one of the connections + * to different channel. In such cases before moving existing connection to new + * channel, check if new channel can co-exist with the other existing + * connection. For example, one SAP1 is on channel-6 and second SAP2 is on + * channel-36 and lets say they are doing DBS, and lets say upper layer sends + * LTE-COEX to move SAP1 from channel-6 to channel-149. In this case, SAP1 and + * SAP2 will end up doing MCC which may not be desirable result. such cases + * will be prevented with this API. + * + * Return: true or false + */ +bool policy_mgr_allow_sap_go_concurrency(struct wlan_objmgr_psoc *psoc, + enum policy_mgr_con_mode mode, + uint8_t channel, + uint32_t vdev_id); + +/** + * policy_mgr_allow_multiple_sta_connections() - API to get FW support + * @psoc: Pointer to soc + * + * This function checks FW support for simultaneous connections on + * concurrent STA interfaces. + * + * Return: true if supports else false. + */ +bool policy_mgr_allow_multiple_sta_connections(struct wlan_objmgr_psoc *psoc); + +/** + * policy_mgr_dual_beacon_on_single_mac_scc_capable() - get capability that + * whether support dual beacon on same channel on single MAC + * @psoc: pointer to soc + * + * Return: bool: capable + */ +bool policy_mgr_dual_beacon_on_single_mac_scc_capable( + struct wlan_objmgr_psoc *psoc); + +/** + * policy_mgr_dual_beacon_on_single_mac_mcc_capable() - get capability that + * whether support dual beacon on different channel on single MAC + * @psoc: pointer to soc + * + * Return: bool: capable + */ +bool policy_mgr_dual_beacon_on_single_mac_mcc_capable( + struct wlan_objmgr_psoc *psoc); + +/** + * policy_mgr_sta_sap_scc_on_lte_coex_chan() - get capability that + * whether support sta sap scc on lte coex chan + * @psoc: pointer to soc + * + * Return: bool: capable + */ +bool policy_mgr_sta_sap_scc_on_lte_coex_chan( + struct wlan_objmgr_psoc *psoc); + +/** + * policy_mgr_valid_channel_for_channel_switch() - check for valid channel for + * channel switch. + * @psoc: poniter to psoc + * @channel: channel to be validated. + * This function validates whether the given channel is valid for channel + * switch. + * + * Return: true or false + */ +bool policy_mgr_is_valid_for_channel_switch(struct wlan_objmgr_psoc *psoc, + uint8_t channel); + +/** + * policy_mgr_update_user_config_sap_chan() - Update user configured channel + * @psoc: poniter to psoc + * @channel: channel to be upated + * + * Return: void + **/ +void policy_mgr_update_user_config_sap_chan( + struct wlan_objmgr_psoc *psoc, uint32_t channel); + +/** + * policy_mgr_is_sap_restart_required_after_sta_disconnect() - is sap restart + * required + * after sta disconnection + * @psoc: psoc object data + * @intf_ch: sap channel + * + * Check if SAP should be moved to a non dfs channel after STA disconnection. + * This API applicable only for STA+SAP SCC and ini 'sta_sap_scc_on_dfs_chan' + * or 'sta_sap_scc_on_lte_coex_chan' is enabled. + * + * Return: true if sap restart is required, otherwise false + */ +bool policy_mgr_is_sap_restart_required_after_sta_disconnect( + struct wlan_objmgr_psoc *psoc, uint8_t *intf_ch); + +/** + * policy_mgr_is_sta_sap_scc() - check whether SAP is doing SCC with + * STA + * @psoc: poniter to psoc + * @sap_ch: operating channel of SAP + * This function checks whether SAP is doing SCC with STA + * + * Return: true or false + */ +bool policy_mgr_is_sta_sap_scc(struct wlan_objmgr_psoc *psoc, uint8_t sap_ch); + +#endif /* __WLAN_POLICY_MGR_API_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/policy_mgr/inc/wlan_policy_mgr_public_struct.h b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/policy_mgr/inc/wlan_policy_mgr_public_struct.h new file mode 100644 index 0000000000000000000000000000000000000000..c4080778ae758eee008cb8ccbdd825276d9b3eb8 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/policy_mgr/inc/wlan_policy_mgr_public_struct.h @@ -0,0 +1,1047 @@ +/* + * Copyright (c) 2012-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef __WLAN_POLICY_MGR_PUBLIC_STRUCT_H +#define __WLAN_POLICY_MGR_PUBLIC_STRUCT_H + +/** + * DOC: wlan_policy_mgr_public_struct.h + * + * Concurrenct Connection Management entity + */ + +/* Include files */ +#include + +#define POLICY_MGR_MAX_CHANNEL_LIST 128 + +/** + * Some max value greater than the max length of the channel list + */ +#define MAX_WEIGHT_OF_PCL_CHANNELS 255 +/** + * Some fixed weight difference between the groups + */ +#define PCL_GROUPS_WEIGHT_DIFFERENCE 20 + +/** + * Currently max, only 3 groups are possible as per 'enum policy_mgr_pcl_type'. + * i.e., in a PCL only 3 groups of channels can be present + * e.g., SCC channel on 2.4 Ghz, SCC channel on 5 Ghz & 5 Ghz channels. + * Group 1 has highest priority, group 2 has the next higher priority + * and so on. + */ +#define WEIGHT_OF_GROUP1_PCL_CHANNELS MAX_WEIGHT_OF_PCL_CHANNELS +#define WEIGHT_OF_GROUP2_PCL_CHANNELS \ + (WEIGHT_OF_GROUP1_PCL_CHANNELS - PCL_GROUPS_WEIGHT_DIFFERENCE) +#define WEIGHT_OF_GROUP3_PCL_CHANNELS \ + (WEIGHT_OF_GROUP2_PCL_CHANNELS - PCL_GROUPS_WEIGHT_DIFFERENCE) +#define WEIGHT_OF_GROUP4_PCL_CHANNELS \ + (WEIGHT_OF_GROUP3_PCL_CHANNELS - PCL_GROUPS_WEIGHT_DIFFERENCE) + +#define WEIGHT_OF_NON_PCL_CHANNELS 1 +#define WEIGHT_OF_DISALLOWED_CHANNELS 0 + +#define MAX_MAC 2 + +#define MAX_NUMBER_OF_CONC_CONNECTIONS 3 + +typedef int (*send_mode_change_event_cb)(void); + +/** + * enum hw_mode_ss_config - Possible spatial stream configuration + * @HW_MODE_SS_0x0: Unused Tx and Rx of MAC + * @HW_MODE_SS_1x1: 1 Tx SS and 1 Rx SS + * @HW_MODE_SS_2x2: 2 Tx SS and 2 Rx SS + * @HW_MODE_SS_3x3: 3 Tx SS and 3 Rx SS + * @HW_MODE_SS_4x4: 4 Tx SS and 4 Rx SS + * + * Note: Right now only 1x1 and 2x2 are being supported. Other modes should + * be added when supported. Asymmetric configuration like 1x2, 2x1 are also + * not supported now. But, they are still valid. Right now, Tx/Rx SS support is + * 4 bits long. So, we can go upto 15x15 + */ +enum hw_mode_ss_config { + HW_MODE_SS_0x0, + HW_MODE_SS_1x1, + HW_MODE_SS_2x2, + HW_MODE_SS_3x3, + HW_MODE_SS_4x4, +}; + +/** + * enum hw_mode_dbs_capab - DBS HW mode capability + * @HW_MODE_DBS_NONE: Non DBS capable + * @HW_MODE_DBS: DBS capable + */ +enum hw_mode_dbs_capab { + HW_MODE_DBS_NONE, + HW_MODE_DBS, +}; + +/** + * enum hw_mode_agile_dfs_capab - Agile DFS HW mode capability + * @HW_MODE_AGILE_DFS_NONE: Non Agile DFS capable + * @HW_MODE_AGILE_DFS: Agile DFS capable + */ +enum hw_mode_agile_dfs_capab { + HW_MODE_AGILE_DFS_NONE, + HW_MODE_AGILE_DFS, +}; + +/** + * enum hw_mode_sbs_capab - SBS HW mode capability + * @HW_MODE_SBS_NONE: Non SBS capable + * @HW_MODE_SBS: SBS capable + */ +enum hw_mode_sbs_capab { + HW_MODE_SBS_NONE, + HW_MODE_SBS, +}; + +/** + * enum policy_mgr_pcl_group_id - Identifies the pcl groups to be used + * @POLICY_MGR_PCL_GROUP_ID1_ID2: Use weights of group1 and group2 + * @POLICY_MGR_PCL_GROUP_ID2_ID3: Use weights of group2 and group3 + * @POLICY_MGR_PCL_GROUP_ID3_ID4: Use weights of group3 and group4 + * + * Since maximum of three groups are possible, this will indicate which + * PCL group needs to be used. + */ +enum policy_mgr_pcl_group_id { + POLICY_MGR_PCL_GROUP_ID1_ID2, + POLICY_MGR_PCL_GROUP_ID2_ID3, + POLICY_MGR_PCL_GROUP_ID3_ID4, +}; + +/** + * policy_mgr_pcl_channel_order - Order in which the PCL is requested + * @POLICY_MGR_PCL_ORDER_NONE: no order + * @POLICY_MGR_PCL_ORDER_24G_THEN_5G: 2.4 Ghz channel followed by 5 Ghz channel + * @POLICY_MGR_PCL_ORDER_5G_THEN_2G: 5 Ghz channel followed by 2.4 Ghz channel + * + * Order in which the PCL is requested + */ +enum policy_mgr_pcl_channel_order { + POLICY_MGR_PCL_ORDER_NONE, + POLICY_MGR_PCL_ORDER_24G_THEN_5G, + POLICY_MGR_PCL_ORDER_5G_THEN_2G, +}; + +/** + * enum policy_mgr_max_rx_ss - Maximum number of receive spatial streams + * @POLICY_MGR_RX_NSS_1: Receive Nss = 1 + * @POLICY_MGR_RX_NSS_2: Receive Nss = 2 + * @POLICY_MGR_RX_NSS_3: Receive Nss = 3 + * @POLICY_MGR_RX_NSS_4: Receive Nss = 4 + * @POLICY_MGR_RX_NSS_5: Receive Nss = 5 + * @POLICY_MGR_RX_NSS_6: Receive Nss = 6 + * @POLICY_MGR_RX_NSS_7: Receive Nss = 7 + * @POLICY_MGR_RX_NSS_8: Receive Nss = 8 + * + * Indicates the maximum number of spatial streams that the STA can receive + */ +enum policy_mgr_max_rx_ss { + POLICY_MGR_RX_NSS_1 = 0, + POLICY_MGR_RX_NSS_2 = 1, + POLICY_MGR_RX_NSS_3 = 2, + POLICY_MGR_RX_NSS_4 = 3, + POLICY_MGR_RX_NSS_5 = 4, + POLICY_MGR_RX_NSS_6 = 5, + POLICY_MGR_RX_NSS_7 = 6, + POLICY_MGR_RX_NSS_8 = 7, + POLICY_MGR_RX_NSS_MAX, +}; + +/** + * enum policy_mgr_chain_mode - Chain Mask tx & rx combination. + * + * @POLICY_MGR_ONE_ONE: One for Tx, One for Rx + * @POLICY_MGR_TWO_TWO: Two for Tx, Two for Rx + * @POLICY_MGR_MAX_NO_OF_CHAIN_MODE: Max place holder + * + * These are generic IDs that identify the various roles + * in the software system + */ +enum policy_mgr_chain_mode { + POLICY_MGR_ONE_ONE = 0, + POLICY_MGR_TWO_TWO, + POLICY_MGR_MAX_NO_OF_CHAIN_MODE +}; + +/** + * enum policy_mgr_conc_priority_mode - t/p, powersave, latency. + * + * @PM_THROUGHPUT: t/p is the priority + * @PM_POWERSAVE: powersave is the priority + * @PM_LATENCY: latency is the priority + * @PM_MAX_CONC_PRIORITY_MODE: Max place holder + * + * These are generic IDs that identify the various roles + * in the software system + */ +enum policy_mgr_conc_priority_mode { + PM_THROUGHPUT = 0, + PM_POWERSAVE, + PM_LATENCY, + PM_MAX_CONC_PRIORITY_MODE +}; + +/** + * enum policy_mgr_con_mode - concurrency mode for PCL table + * + * @PM_STA_MODE: station mode + * @PM_SAP_MODE: SAP mode + * @PM_P2P_CLIENT_MODE: P2P client mode + * @PM_P2P_GO_MODE: P2P Go mode + * @PM_IBSS_MODE: IBSS mode + * @PM_NDI_MODE: NDI mode + * @PM_MAX_NUM_OF_MODE: max value place holder + */ +enum policy_mgr_con_mode { + PM_STA_MODE = 0, + PM_SAP_MODE, + PM_P2P_CLIENT_MODE, + PM_P2P_GO_MODE, + PM_IBSS_MODE, + PM_NDI_MODE, + PM_MAX_NUM_OF_MODE +}; + +/** + * enum policy_mgr_mac_use - MACs that are used + * @POLICY_MGR_MAC0: Only MAC0 is used + * @POLICY_MGR_MAC1: Only MAC1 is used + * @POLICY_MGR_MAC0_AND_MAC1: Both MAC0 and MAC1 are used + */ +enum policy_mgr_mac_use { + POLICY_MGR_MAC0 = 1, + POLICY_MGR_MAC1 = 2, + POLICY_MGR_MAC0_AND_MAC1 = 3 +}; + +/** + * enum policy_mgr_pcl_type - Various types of Preferred channel list (PCL). + * + * @PM_NONE: No channel preference + * @PM_24G: 2.4 Ghz channels only + * @PM_5G: 5 Ghz channels only + * @PM_SCC_CH: SCC channel only + * @PM_MCC_CH: MCC channels only + * @PM_SBS_CH: SBS channels only + * @PM_SCC_CH_24G: SCC channel & 2.4 Ghz channels + * @PM_SCC_CH_5G: SCC channel & 5 Ghz channels + * @PM_24G_SCC_CH: 2.4 Ghz channels & SCC channel + * @PM_5G_SCC_CH: 5 Ghz channels & SCC channel + * @PM_SCC_ON_5_SCC_ON_24_24G: SCC channel on 5 Ghz, SCC + * channel on 2.4 Ghz & 2.4 Ghz channels + * @PM_SCC_ON_5_SCC_ON_24_5G: SCC channel on 5 Ghz, SCC channel + * on 2.4 Ghz & 5 Ghz channels + * @PM_SCC_ON_24_SCC_ON_5_24G: SCC channel on 2.4 Ghz, SCC + * channel on 5 Ghz & 2.4 Ghz channels + * @PM_SCC_ON_24_SCC_ON_5_5G: SCC channel on 2.4 Ghz, SCC + * channel on 5 Ghz & 5 Ghz channels + * @PM_SCC_ON_5_SCC_ON_24: SCC channel on 5 Ghz, SCC channel on + * 2.4 Ghz + * @PM_SCC_ON_24_SCC_ON_5: SCC channel on 2.4 Ghz, SCC channel + * on 5 Ghz + * @PM_MCC_CH_24G: MCC channels & 2.4 Ghz channels + * @PM_MCC_CH_5G: MCC channels & 5 Ghz channels + * @PM_24G_MCC_CH: 2.4 Ghz channels & MCC channels + * @PM_5G_MCC_CH: 5 Ghz channels & MCC channels + * @PM_SBS_CH_5G: SBS channels & rest of 5 Ghz channels + * @PM_24G_SCC_CH_SBS_CH: 2.4 Ghz channels, SCC channel & SBS channels + * @PM_24G_SCC_CH_SBS_CH_5G: 2.4 Ghz channels, SCC channel, + * SBS channels & rest of the 5G channels + * @PM_24G_SBS_CH_MCC_CH: 2.4 Ghz channels, SBS channels & MCC channels + * @PM_MAX_PCL_TYPE: Max place holder + * + * These are generic IDs that identify the various roles + * in the software system + */ +enum policy_mgr_pcl_type { + PM_NONE = 0, + PM_24G, + PM_5G, + PM_SCC_CH, + PM_MCC_CH, + PM_SBS_CH, + PM_SCC_CH_24G, + PM_SCC_CH_5G, + PM_24G_SCC_CH, + PM_5G_SCC_CH, + PM_SCC_ON_5_SCC_ON_24_24G, + PM_SCC_ON_5_SCC_ON_24_5G, + PM_SCC_ON_24_SCC_ON_5_24G, + PM_SCC_ON_24_SCC_ON_5_5G, + PM_SCC_ON_5_SCC_ON_24, + PM_SCC_ON_24_SCC_ON_5, + PM_MCC_CH_24G, + PM_MCC_CH_5G, + PM_24G_MCC_CH, + PM_5G_MCC_CH, + PM_SBS_CH_5G, + PM_24G_SCC_CH_SBS_CH, + PM_24G_SCC_CH_SBS_CH_5G, + PM_24G_SBS_CH_MCC_CH, + + PM_MAX_PCL_TYPE +}; + +/** + * enum policy_mgr_one_connection_mode - Combination of first connection + * type, band & spatial stream used. + * + * @PM_STA_24_1x1: STA connection using 1x1@2.4 Ghz + * @PM_STA_24_2x2: STA connection using 2x2@2.4 Ghz + * @PM_STA_5_1x1: STA connection using 1x1@5 Ghz + * @PM_STA_5_2x2: STA connection using 2x2@5 Ghz + * @PM_P2P_CLI_24_1x1: P2P Client connection using 1x1@2.4 Ghz + * @PM_P2P_CLI_24_2x2: P2P Client connection using 2x2@2.4 Ghz + * @PM_P2P_CLI_5_1x1: P2P Client connection using 1x1@5 Ghz + * @PM_P2P_CLI_5_2x2: P2P Client connection using 2x2@5 Ghz + * @PM_P2P_GO_24_1x1: P2P GO connection using 1x1@2.4 Ghz + * @PM_P2P_GO_24_2x2: P2P GO connection using 2x2@2.4 Ghz + * @PM_P2P_GO_5_1x1: P2P GO connection using 1x1@5 Ghz + * @PM_P2P_GO_5_2x2: P2P GO connection using 2x2@5 Ghz + * @PM_SAP_24_1x1: SAP connection using 1x1@2.4 Ghz + * @PM_SAP_24_2x2: SAP connection using 2x2@2.4 Ghz + * @PM_SAP_5_1x1: SAP connection using 1x1@5 Ghz + * @PM_SAP_5_1x1: SAP connection using 2x2@5 Ghz + * @PM_IBSS_24_1x1: IBSS connection using 1x1@2.4 Ghz + * @PM_IBSS_24_2x2: IBSS connection using 2x2@2.4 Ghz + * @PM_IBSS_5_1x1: IBSS connection using 1x1@5 Ghz + * @PM_IBSS_5_2x2: IBSS connection using 2x2@5 Ghz + * @PM_MAX_ONE_CONNECTION_MODE: Max place holder + * + * These are generic IDs that identify the various roles + * in the software system + */ +enum policy_mgr_one_connection_mode { + PM_STA_24_1x1 = 0, + PM_STA_24_2x2, + PM_STA_5_1x1, + PM_STA_5_2x2, + PM_P2P_CLI_24_1x1, + PM_P2P_CLI_24_2x2, + PM_P2P_CLI_5_1x1, + PM_P2P_CLI_5_2x2, + PM_P2P_GO_24_1x1, + PM_P2P_GO_24_2x2, + PM_P2P_GO_5_1x1, + PM_P2P_GO_5_2x2, + PM_SAP_24_1x1, + PM_SAP_24_2x2, + PM_SAP_5_1x1, + PM_SAP_5_2x2, + PM_IBSS_24_1x1, + PM_IBSS_24_2x2, + PM_IBSS_5_1x1, + PM_IBSS_5_2x2, + + PM_MAX_ONE_CONNECTION_MODE +}; + +/** + * enum policy_mgr_two_connection_mode - Combination of first two + * connections type, concurrency state, band & spatial stream + * used. + * + * @PM_STA_SAP_SCC_24_1x1: STA & SAP connection on SCC using + * 1x1@2.4 Ghz + * @PM_STA_SAP_SCC_24_2x2: STA & SAP connection on SCC using + * 2x2@2.4 Ghz + * @PM_STA_SAP_MCC_24_1x1: STA & SAP connection on MCC using + * 1x1@2.4 Ghz + * @PM_STA_SAP_MCC_24_2x2: STA & SAP connection on MCC using + * 2x2@2.4 Ghz + * @PM_STA_SAP_SCC_5_1x1: STA & SAP connection on SCC using + * 1x1@5 Ghz + * @PM_STA_SAP_SCC_5_2x2: STA & SAP connection on SCC using + * 2x2@5 Ghz + * @PM_STA_SAP_MCC_5_1x1: STA & SAP connection on MCC using + * 1x1@5 Ghz + * @PM_STA_SAP_MCC_5_2x2: STA & SAP connection on MCC using + * 2x2@5 Ghz + * @PM_STA_SAP_DBS_1x1: STA & SAP connection on DBS using 1x1 + * @PM_STA_SAP_DBS_2x2: STA & SAP connection on DBS using 2x2 + * @PM_STA_SAP_SBS_5_1x1: STA & SAP connection on 5G SBS using 1x1 + * @PM_STA_P2P_GO_SCC_24_1x1: STA & P2P GO connection on SCC + * using 1x1@2.4 Ghz + * @PM_STA_P2P_GO_SCC_24_2x2: STA & P2P GO connection on SCC + * using 2x2@2.4 Ghz + * @PM_STA_P2P_GO_MCC_24_1x1: STA & P2P GO connection on MCC + * using 1x1@2.4 Ghz + * @PM_STA_P2P_GO_MCC_24_2x2: STA & P2P GO connection on MCC + * using 2x2@2.4 Ghz + * @PM_STA_P2P_GO_SCC_5_1x1: STA & P2P GO connection on SCC + * using 1x1@5 Ghz + * @PM_STA_P2P_GO_SCC_5_2x2: STA & P2P GO connection on SCC + * using 2x2@5 Ghz + * @PM_STA_P2P_GO_MCC_5_1x1: STA & P2P GO connection on MCC + * using 1x1@5 Ghz + * @PM_STA_P2P_GO_MCC_5_2x2: STA & P2P GO connection on MCC + * using 2x2@5 Ghz + * @PM_STA_P2P_GO_DBS_1x1: STA & P2P GO connection on DBS using + * 1x1 + * @PM_STA_P2P_GO_DBS_2x2: STA & P2P GO connection on DBS using + * 2x2 + * @PM_STA_P2P_GO_SBS_5_1x1: STA & P2P GO connection on 5G SBS + * using 1x1 + * @PM_STA_P2P_CLI_SCC_24_1x1: STA & P2P CLI connection on SCC + * using 1x1@2.4 Ghz + * @PM_STA_P2P_CLI_SCC_24_2x2: STA & P2P CLI connection on SCC + * using 2x2@2.4 Ghz + * @PM_STA_P2P_CLI_MCC_24_1x1: STA & P2P CLI connection on MCC + * using 1x1@2.4 Ghz + * @PM_STA_P2P_CLI_MCC_24_2x2: STA & P2P CLI connection on MCC + * using 2x2@2.4 Ghz + * @PM_STA_P2P_CLI_SCC_5_1x1: STA & P2P CLI connection on SCC + * using 1x1@5 Ghz + * @PM_STA_P2P_CLI_SCC_5_2x2: STA & P2P CLI connection on SCC + * using 2x2@5 Ghz + * @PM_STA_P2P_CLI_MCC_5_1x1: STA & P2P CLI connection on MCC + * using 1x1@5 Ghz + * @PM_STA_P2P_CLI_MCC_5_2x2: STA & P2P CLI connection on MCC + * using 2x2@5 Ghz + * @PM_STA_P2P_CLI_DBS_1x1: STA & P2P CLI connection on DBS + * using 1x1 + * @PM_STA_P2P_CLI_DBS_2x2: STA & P2P CLI connection on DBS + * using 2x2 + * @PM_STA_P2P_CLI_SBS_5_1x1: STA & P2P CLI connection on 5G + * SBS using 1x1 + * @PM_P2P_GO_P2P_CLI_SCC_24_1x1: P2P GO & CLI connection on + * SCC using 1x1@2.4 Ghz + * @PM_P2P_GO_P2P_CLI_SCC_24_2x2: P2P GO & CLI connection on + * SCC using 2x2@2.4 Ghz + * @PM_P2P_GO_P2P_CLI_MCC_24_1x1: P2P GO & CLI connection on + * MCC using 1x1@2.4 Ghz + * @PM_P2P_GO_P2P_CLI_MCC_24_2x2: P2P GO & CLI connection on + * MCC using 2x2@2.4 Ghz + * @PM_P2P_GO_P2P_CLI_SCC_5_1x1: P2P GO & CLI connection on + * SCC using 1x1@5 Ghz + * @PM_P2P_GO_P2P_CLI_SCC_5_2x2: P2P GO & CLI connection on + * SCC using 2x2@5 Ghz + * @PM_P2P_GO_P2P_CLI_MCC_5_1x1: P2P GO & CLI connection on + * MCC using 1x1@5 Ghz + * @PM_P2P_GO_P2P_CLI_MCC_5_2x2: P2P GO & CLI connection on + * MCC using 2x2@5 Ghz + * @PM_P2P_GO_P2P_CLI_DBS_1x1: P2P GO & CLI connection on DBS + * using 1x1 + * @PM_P2P_GO_P2P_CLI_DBS_2x2: P2P GO & P2P CLI connection + * on DBS using 2x2 + * @PM_P2P_GO_P2P_CLI_SBS_5_1x1: P2P GO & P2P CLI connection + * on 5G SBS using 1x1 + * @PM_P2P_GO_SAP_SCC_24_1x1: P2P GO & SAP connection on + * SCC using 1x1@2.4 Ghz + * @PM_P2P_GO_SAP_SCC_24_2x2: P2P GO & SAP connection on + * SCC using 2x2@2.4 Ghz + * @PM_P2P_GO_SAP_MCC_24_1x1: P2P GO & SAP connection on + * MCC using 1x1@2.4 Ghz + * @PM_P2P_GO_SAP_MCC_24_2x2: P2P GO & SAP connection on + * MCC using 2x2@2.4 Ghz + * @PM_P2P_GO_SAP_SCC_5_1x1: P2P GO & SAP connection on + * SCC using 1x1@5 Ghz + * @PM_P2P_GO_SAP_SCC_5_2x2: P2P GO & SAP connection on + * SCC using 2x2@5 Ghz + * @PM_P2P_GO_SAP_MCC_5_1x1: P2P GO & SAP connection on + * MCC using 1x1@5 Ghz + * @PM_P2P_GO_SAP_MCC_5_2x2: P2P GO & SAP connection on + * MCC using 2x2@5 Ghz + * @PM_P2P_GO_SAP_DBS_1x1: P2P GO & SAP connection on DBS using + * 1x1 + * @PM_P2P_GO_SAP_DBS_2x2: P2P GO & SAP connection on DBS using + * 2x2 + * @PM_P2P_GO_SAP_SBS_5_1x1: P2P GO & SAP connection on 5G SBS + * using 1x1 + * @PM_P2P_CLI_SAP_SCC_24_1x1: CLI & SAP connection on SCC using + * 1x1@2.4 Ghz + * @PM_P2P_CLI_SAP_SCC_24_2x2: CLI & SAP connection on SCC using + * 2x2@2.4 Ghz + * @PM_P2P_CLI_SAP_MCC_24_1x1: CLI & SAP connection on MCC using + * 1x1@2.4 Ghz + * @PM_P2P_CLI_SAP_MCC_24_2x2: CLI & SAP connection on MCC using + * 2x2@2.4 Ghz + * @PM_P2P_CLI_SAP_SCC_5_1x1: CLI & SAP connection on SCC using + * 1x1@5 Ghz + * @PM_P2P_CLI_SAP_SCC_5_2x2: CLI & SAP connection on SCC using + * 2x2@5 Ghz + * @PM_P2P_CLI_SAP_MCC_5_1x1: CLI & SAP connection on MCC using + * 1x1@5 Ghz + * @PM_P2P_CLI_SAP_MCC_5_2x2: CLI & SAP connection on MCC using + * 2x2@5 Ghz + * @POLICY_MGR_P2P_STA_SAP_MCC_24_5_1x1: CLI and SAP connecting on MCC + * in 2.4 and 5GHz 1x1 + * @POLICY_MGR_P2P_STA_SAP_MCC_24_5_2x2: CLI and SAP connecting on MCC + * in 2.4 and 5GHz 2x2 + * @PM_P2P_CLI_SAP_DBS_1x1,: CLI & SAP connection on DBS using 1x1 + * @PM_P2P_CLI_SAP_DBS_2x2: P2P CLI & SAP connection on DBS using + * 2x2 + * @PM_P2P_CLI_SAP_SBS_5_1x1: P2P CLI & SAP connection on 5G SBS + * using 1x1 + * @PM_SAP_SAP_SCC_24_1x1: SAP & SAP connection on + * SCC using 1x1@2.4 Ghz + * @PM_SAP_SAP_SCC_24_2x2: SAP & SAP connection on + * SCC using 2x2@2.4 Ghz + * @PM_SAP_SAP_MCC_24_1x1: SAP & SAP connection on + * MCC using 1x1@2.4 Ghz + * @PM_SAP_SAP_MCC_24_2x2: SAP & SAP connection on + * MCC using 2x2@2.4 Ghz + * @PM_SAP_SAP_SCC_5_1x1: SAP & SAP connection on + * SCC using 1x1@5 Ghz + * @PM_SAP_SAP_SCC_5_2x2: SAP & SAP connection on + * SCC using 2x2@5 Ghz + * @PM_SAP_SAP_MCC_5_1x1: SAP & SAP connection on + * MCC using 1x1@5 Ghz + * @PM_SAP_SAP_MCC_5_2x2: SAP & SAP connection on + * MCC using 2x2@5 Ghz + * @PM_SAP_SAP_MCC_24_5_1x1: SAP & SAP connection on + * MCC in 2.4 and 5GHz 1x1 + * @PM_SAP_SAP_MCC_24_5_2x2: SAP & SAP connection on + * MCC in 2.4 and 5GHz 2x2 + * @PM_SAP_SAP_DBS_1x1: SAP & SAP connection on DBS using + * 1x1 + * @PM_SAP_SAP_DBS_2x2: SAP & SAP connection on DBS using 2x2 + * @PM_SAP_SAP_SBS_5_1x1: SAP & SAP connection on 5G SBS using 1x1 + * @PM_STA_STA_SCC_24_1x1: STA & STA connection on + * SCC using 1x1@2.4 Ghz + * @PM_STA_STA_SCC_24_2x2: STA & STA connection on + * SCC using 2x2@2.4 Ghz + * @PM_STA_STA_MCC_24_1x1: STA & STA connection on + * MCC using 1x1@2.4 Ghz + * @PM_STA_STA_MCC_24_2x2: STA & STA connection on + * MCC using 2x2@2.4 Ghz + * @PM_STA_STA_SCC_5_1x1: STA & STA connection on + * SCC using 1x1@5 Ghz + * @PM_STA_STA_SCC_5_2x2: STA & STA connection on + * SCC using 2x2@5 Ghz + * @PM_STA_STA_MCC_5_1x1: STA & STA connection on + * MCC using 1x1@5 Ghz + * @PM_STA_STA_MCC_5_2x2: STA & STA connection on + * MCC using 2x2@5 Ghz + * @PM_STA_STA_MCC_24_5_1x1: STA & STA connection on + * MCC in 2.4 and 5GHz 1x1 + * @PM_STA_STA_MCC_24_5_2x2: STA & STA connection on + * MCC in 2.4 and 5GHz 2x2 + * @PM_STA_STA_DBS_1x1: STA & STA connection on DBS using + * 1x1 + * @PM_STA_STA_DBS_2x2: STA & STA connection on DBS using 2x2 + * @PM_STA_STA_SBS_5_1x1: STA & STA connection on 5G SBS using 1x1 + * + * These are generic IDs that identify the various roles in the + * software system + */ +enum policy_mgr_two_connection_mode { + PM_STA_SAP_SCC_24_1x1 = 0, + PM_STA_SAP_SCC_24_2x2, + PM_STA_SAP_MCC_24_1x1, + PM_STA_SAP_MCC_24_2x2, + PM_STA_SAP_SCC_5_1x1, + PM_STA_SAP_SCC_5_2x2, + PM_STA_SAP_MCC_5_1x1, + PM_STA_SAP_MCC_5_2x2, + PM_STA_SAP_MCC_24_5_1x1, + PM_STA_SAP_MCC_24_5_2x2, + PM_STA_SAP_DBS_1x1, + PM_STA_SAP_DBS_2x2, + PM_STA_SAP_SBS_5_1x1, + PM_STA_P2P_GO_SCC_24_1x1, + PM_STA_P2P_GO_SCC_24_2x2, + PM_STA_P2P_GO_MCC_24_1x1, + PM_STA_P2P_GO_MCC_24_2x2, + PM_STA_P2P_GO_SCC_5_1x1, + PM_STA_P2P_GO_SCC_5_2x2, + PM_STA_P2P_GO_MCC_5_1x1, + PM_STA_P2P_GO_MCC_5_2x2, + PM_STA_P2P_GO_MCC_24_5_1x1, + PM_STA_P2P_GO_MCC_24_5_2x2, + PM_STA_P2P_GO_DBS_1x1, + PM_STA_P2P_GO_DBS_2x2, + PM_STA_P2P_GO_SBS_5_1x1, + PM_STA_P2P_CLI_SCC_24_1x1, + PM_STA_P2P_CLI_SCC_24_2x2, + PM_STA_P2P_CLI_MCC_24_1x1, + PM_STA_P2P_CLI_MCC_24_2x2, + PM_STA_P2P_CLI_SCC_5_1x1, + PM_STA_P2P_CLI_SCC_5_2x2, + PM_STA_P2P_CLI_MCC_5_1x1, + PM_STA_P2P_CLI_MCC_5_2x2, + PM_STA_P2P_CLI_MCC_24_5_1x1, + PM_STA_P2P_CLI_MCC_24_5_2x2, + PM_STA_P2P_CLI_DBS_1x1, + PM_STA_P2P_CLI_DBS_2x2, + PM_STA_P2P_CLI_SBS_5_1x1, + PM_P2P_GO_P2P_CLI_SCC_24_1x1, + PM_P2P_GO_P2P_CLI_SCC_24_2x2, + PM_P2P_GO_P2P_CLI_MCC_24_1x1, + PM_P2P_GO_P2P_CLI_MCC_24_2x2, + PM_P2P_GO_P2P_CLI_SCC_5_1x1, + PM_P2P_GO_P2P_CLI_SCC_5_2x2, + PM_P2P_GO_P2P_CLI_MCC_5_1x1, + PM_P2P_GO_P2P_CLI_MCC_5_2x2, + PM_P2P_GO_P2P_CLI_MCC_24_5_1x1, + PM_P2P_GO_P2P_CLI_MCC_24_5_2x2, + PM_P2P_GO_P2P_CLI_DBS_1x1, + PM_P2P_GO_P2P_CLI_DBS_2x2, + PM_P2P_GO_P2P_CLI_SBS_5_1x1, + PM_P2P_GO_SAP_SCC_24_1x1, + PM_P2P_GO_SAP_SCC_24_2x2, + PM_P2P_GO_SAP_MCC_24_1x1, + PM_P2P_GO_SAP_MCC_24_2x2, + PM_P2P_GO_SAP_SCC_5_1x1, + PM_P2P_GO_SAP_SCC_5_2x2, + PM_P2P_GO_SAP_MCC_5_1x1, + PM_P2P_GO_SAP_MCC_5_2x2, + PM_P2P_GO_SAP_MCC_24_5_1x1, + PM_P2P_GO_SAP_MCC_24_5_2x2, + PM_P2P_GO_SAP_DBS_1x1, + PM_P2P_GO_SAP_DBS_2x2, + PM_P2P_GO_SAP_SBS_5_1x1, + PM_P2P_CLI_SAP_SCC_24_1x1, + PM_P2P_CLI_SAP_SCC_24_2x2, + PM_P2P_CLI_SAP_MCC_24_1x1, + PM_P2P_CLI_SAP_MCC_24_2x2, + PM_P2P_CLI_SAP_SCC_5_1x1, + PM_P2P_CLI_SAP_SCC_5_2x2, + PM_P2P_CLI_SAP_MCC_5_1x1, + PM_P2P_CLI_SAP_MCC_5_2x2, + PM_P2P_CLI_SAP_MCC_24_5_1x1, + PM_P2P_CLI_SAP_MCC_24_5_2x2, + PM_P2P_CLI_SAP_DBS_1x1, + PM_P2P_CLI_SAP_DBS_2x2, + PM_P2P_CLI_SAP_SBS_5_1x1, + PM_SAP_SAP_SCC_24_1x1, + PM_SAP_SAP_SCC_24_2x2, + PM_SAP_SAP_MCC_24_1x1, + PM_SAP_SAP_MCC_24_2x2, + PM_SAP_SAP_SCC_5_1x1, + PM_SAP_SAP_SCC_5_2x2, + PM_SAP_SAP_MCC_5_1x1, + PM_SAP_SAP_MCC_5_2x2, + PM_SAP_SAP_MCC_24_5_1x1, + PM_SAP_SAP_MCC_24_5_2x2, + PM_SAP_SAP_DBS_1x1, + PM_SAP_SAP_DBS_2x2, + PM_SAP_SAP_SBS_5_1x1, + PM_STA_STA_SCC_24_1x1, + PM_STA_STA_SCC_24_2x2, + PM_STA_STA_MCC_24_1x1, + PM_STA_STA_MCC_24_2x2, + PM_STA_STA_SCC_5_1x1, + PM_STA_STA_SCC_5_2x2, + PM_STA_STA_MCC_5_1x1, + PM_STA_STA_MCC_5_2x2, + PM_STA_STA_MCC_24_5_1x1, + PM_STA_STA_MCC_24_5_2x2, + PM_STA_STA_DBS_1x1, + PM_STA_STA_DBS_2x2, + PM_STA_STA_SBS_5_1x1, + + PM_MAX_TWO_CONNECTION_MODE +}; + +/** + * enum policy_mgr_conc_next_action - actions to be taken on old + * connections. + * + * @PM_NOP: No action + * @PM_DBS: switch to DBS mode + * @PM_DBS_DOWNGRADE: switch to DBS mode & downgrade to 1x1 + * @PM_DBS_UPGRADE: switch to DBS mode & upgrade to 2x2 + * @PM_SINGLE_MAC: switch to MCC/SCC mode + * @PM_SINGLE_MAC_UPGRADE: switch to MCC/SCC mode & upgrade to 2x2 + * @PM_SBS: switch to SBS mode + * @PM_SBS_DOWNGRADE: switch to SBS mode & downgrade to 1x1 + * @PM_DOWNGRADE: downgrade to 1x1 + * @PM_UPGRADE: upgrade to 2x2 + * @PM_MAX_CONC_PRIORITY_MODE: Max place holder + * + * These are generic IDs that identify the various roles + * in the software system + */ +enum policy_mgr_conc_next_action { + PM_NOP = 0, + PM_DBS, + PM_DBS_DOWNGRADE, + PM_DBS_UPGRADE, + PM_SINGLE_MAC, + PM_SINGLE_MAC_UPGRADE, + PM_SBS, + PM_SBS_DOWNGRADE, + PM_DOWNGRADE, + PM_UPGRADE, + + PM_MAX_CONC_NEXT_ACTION +}; + +/** + * enum policy_mgr_band - wifi band. + * + * @POLICY_MGR_BAND_24: 2.4 Ghz band + * @POLICY_MGR_BAND_5: 5 Ghz band + * @POLICY_MGR_MAX_BAND: Max place holder + * + * These are generic IDs that identify the various roles + * in the software system + */ +enum policy_mgr_band { + POLICY_MGR_BAND_24 = 0, + POLICY_MGR_BAND_5, + POLICY_MGR_MAX_BAND +}; + +/** + * enum policy_mgr_conn_update_reason: Reason for conc connection update + * @POLICY_MGR_UPDATE_REASON_SET_OPER_CHAN: Set probable operating channel + * @POLICY_MGR_UPDATE_REASON_JOIN_IBSS: Join IBSS + * @POLICY_MGR_UPDATE_REASON_UT: Unit test related + * @POLICY_MGR_UPDATE_REASON_START_AP: Start AP + * @POLICY_MGR_UPDATE_REASON_NORMAL_STA: Connection to Normal STA + * @POLICY_MGR_UPDATE_REASON_HIDDEN_STA: Connection to Hidden STA + * @POLICY_MGR_UPDATE_REASON_OPPORTUNISTIC: Opportunistic HW mode update + * @POLICY_MGR_UPDATE_REASON_NSS_UPDATE: NSS update + * @POLICY_MGR_UPDATE_REASON_CHANNEL_SWITCH: Channel switch + * @POLICY_MGR_UPDATE_REASON_CHANNEL_SWITCH_STA: Channel switch for STA + */ +enum policy_mgr_conn_update_reason { + POLICY_MGR_UPDATE_REASON_SET_OPER_CHAN, + POLICY_MGR_UPDATE_REASON_JOIN_IBSS, + POLICY_MGR_UPDATE_REASON_UT, + POLICY_MGR_UPDATE_REASON_START_AP, + POLICY_MGR_UPDATE_REASON_NORMAL_STA, + POLICY_MGR_UPDATE_REASON_HIDDEN_STA, + POLICY_MGR_UPDATE_REASON_OPPORTUNISTIC, + POLICY_MGR_UPDATE_REASON_NSS_UPDATE, + POLICY_MGR_UPDATE_REASON_CHANNEL_SWITCH, + POLICY_MGR_UPDATE_REASON_CHANNEL_SWITCH_STA, + POLICY_MGR_UPDATE_REASON_PRE_CAC, +}; + +/** + * enum hw_mode_bandwidth - bandwidth of wifi channel. + * + * @HW_MODE_5_MHZ: 5 Mhz bandwidth + * @HW_MODE_10_MHZ: 10 Mhz bandwidth + * @HW_MODE_20_MHZ: 20 Mhz bandwidth + * @HW_MODE_40_MHZ: 40 Mhz bandwidth + * @HW_MODE_80_MHZ: 80 Mhz bandwidth + * @HW_MODE_80_PLUS_80_MHZ: 80 Mhz plus 80 Mhz bandwidth + * @HW_MODE_160_MHZ: 160 Mhz bandwidth + * @HW_MODE_MAX_BANDWIDTH: Max place holder + * + * These are generic IDs that identify the various roles + * in the software system + */ +enum hw_mode_bandwidth { + HW_MODE_BW_NONE, + HW_MODE_5_MHZ, + HW_MODE_10_MHZ, + HW_MODE_20_MHZ, + HW_MODE_40_MHZ, + HW_MODE_80_MHZ, + HW_MODE_80_PLUS_80_MHZ, + HW_MODE_160_MHZ, + HW_MODE_MAX_BANDWIDTH +}; + +/** + * enum set_hw_mode_status - Status of set HW mode command + * @SET_HW_MODE_STATUS_OK: command successful + * @SET_HW_MODE_STATUS_EINVAL: Requested invalid hw_mode + * @SET_HW_MODE_STATUS_ECANCELED: HW mode change cancelled + * @SET_HW_MODE_STATUS_ENOTSUP: HW mode not supported + * @SET_HW_MODE_STATUS_EHARDWARE: HW mode change prevented by hardware + * @SET_HW_MODE_STATUS_EPENDING: HW mode change is pending + * @SET_HW_MODE_STATUS_ECOEX: HW mode change conflict with Coex + */ +enum set_hw_mode_status { + SET_HW_MODE_STATUS_OK, + SET_HW_MODE_STATUS_EINVAL, + SET_HW_MODE_STATUS_ECANCELED, + SET_HW_MODE_STATUS_ENOTSUP, + SET_HW_MODE_STATUS_EHARDWARE, + SET_HW_MODE_STATUS_EPENDING, + SET_HW_MODE_STATUS_ECOEX, +}; + +typedef void (*dual_mac_cb)(enum set_hw_mode_status status, + uint32_t scan_config, + uint32_t fw_mode_config); +/** + * enum policy_mgr_hw_mode_change - identify the HW mode switching to. + * + * @POLICY_MGR_HW_MODE_NOT_IN_PROGRESS: HW mode change not in progress + * @POLICY_MGR_SMM_IN_PROGRESS: switching to SMM mode + * @POLICY_MGR_DBS_IN_PROGRESS: switching to DBS mode + * @POLICY_MGR_SBS_IN_PROGRESS: switching to SBS mode + * + * These are generic IDs that identify the various roles + * in the software system + */ +enum policy_mgr_hw_mode_change { + POLICY_MGR_HW_MODE_NOT_IN_PROGRESS = 0, + POLICY_MGR_SMM_IN_PROGRESS, + POLICY_MGR_DBS_IN_PROGRESS, + POLICY_MGR_SBS_IN_PROGRESS +}; + +/** + * enum dbs_support - structure to define INI values and their meaning + * + * @ENABLE_DBS_CXN_AND_SCAN: Enable DBS support for connection and scan + * @DISABLE_DBS_CXN_AND_SCAN: Disable DBS support for connection and scan + * @DISABLE_DBS_CXN_AND_ENABLE_DBS_SCAN: disable dbs support for + * connection but keep dbs support for scan + * @DISABLE_DBS_CXN_AND_ENABLE_DBS_SCAN_WITH_ASYNC_SCAN_OFF: disable dbs support + * for connection but keep dbs for scan but switch off the async scan + * @ENABLE_DBS_CXN_AND_ENABLE_SCAN_WITH_ASYNC_SCAN_OFF: enable dbs support for + * connection and scan but switch off the async scan + * @ENABLE_DBS_CXN_AND_DISABLE_DBS_SCAN: Enable DBS support for connection and + * disable DBS support for scan + * @ENABLE_DBS_CXN_AND_DISABLE_SIMULTANEOUS_SCAN: Enable DBS + * support for connection and disable simultaneous scan from + * upper layer (DBS scan remains enabled in FW) + */ +enum dbs_support { + ENABLE_DBS_CXN_AND_SCAN, + DISABLE_DBS_CXN_AND_SCAN, + DISABLE_DBS_CXN_AND_ENABLE_DBS_SCAN, + DISABLE_DBS_CXN_AND_ENABLE_DBS_SCAN_WITH_ASYNC_SCAN_OFF, + ENABLE_DBS_CXN_AND_ENABLE_SCAN_WITH_ASYNC_SCAN_OFF, + ENABLE_DBS_CXN_AND_DISABLE_DBS_SCAN, + ENABLE_DBS_CXN_AND_DISABLE_SIMULTANEOUS_SCAN, +}; + +/** + * struct policy_mgr_conc_connection_info - information of all existing + * connections in the wlan system + * + * @mode: connection type + * @chan: channel of the connection + * @bw: channel bandwidth used for the connection + * @mac: The HW mac it is running + * @chain_mask: The original capability advertised by HW + * @original_nss: nss negotiated at connection time + * @vdev_id: vdev id of the connection + * @in_use: if the table entry is active + */ +struct policy_mgr_conc_connection_info { + enum policy_mgr_con_mode mode; + uint8_t chan; + enum hw_mode_bandwidth bw; + uint8_t mac; + enum policy_mgr_chain_mode chain_mask; + uint32_t original_nss; + uint32_t vdev_id; + bool in_use; +}; + +/** + * struct policy_mgr_hw_mode_params - HW mode params + * @mac0_tx_ss: MAC0 Tx spatial stream + * @mac0_rx_ss: MAC0 Rx spatial stream + * @mac1_tx_ss: MAC1 Tx spatial stream + * @mac1_rx_ss: MAC1 Rx spatial stream + * @mac0_bw: MAC0 bandwidth + * @mac1_bw: MAC1 bandwidth + * @dbs_cap: DBS capabality + * @agile_dfs_cap: Agile DFS capabality + */ +struct policy_mgr_hw_mode_params { + uint8_t mac0_tx_ss; + uint8_t mac0_rx_ss; + uint8_t mac1_tx_ss; + uint8_t mac1_rx_ss; + uint8_t mac0_bw; + uint8_t mac1_bw; + uint8_t dbs_cap; + uint8_t agile_dfs_cap; + uint8_t sbs_cap; +}; + +/** + * struct policy_mgr_vdev_mac_map - vdev id-mac id map + * @vdev_id: VDEV id + * @mac_id: MAC id + */ +struct policy_mgr_vdev_mac_map { + uint32_t vdev_id; + uint32_t mac_id; +}; + +/** + * struct policy_mgr_dual_mac_config - Dual MAC configuration + * @scan_config: Scan configuration + * @fw_mode_config: FW mode configuration + * @set_dual_mac_cb: Callback function to be executed on response to the command + */ +struct policy_mgr_dual_mac_config { + uint32_t scan_config; + uint32_t fw_mode_config; + dual_mac_cb set_dual_mac_cb; +}; + +/** + * struct policy_mgr_hw_mode - Format of set HW mode + * @hw_mode_index: Index of HW mode to be set + * @set_hw_mode_cb: HDD set HW mode callback + * @reason: Reason for HW mode change + * @session_id: Session id + * @next_action: next action to happen at policy mgr + * @context: psoc context + */ +struct policy_mgr_hw_mode { + uint32_t hw_mode_index; + void *set_hw_mode_cb; + enum policy_mgr_conn_update_reason reason; + uint32_t session_id; + uint8_t next_action; + struct wlan_objmgr_psoc *context; +}; + +/** + * struct policy_mgr_pcl_list - Format of PCL + * @pcl_list: List of preferred channels + * @weight_list: Weights of the PCL + * @pcl_len: Number of channels in the PCL + */ +struct policy_mgr_pcl_list { + uint8_t pcl_list[POLICY_MGR_MAX_CHANNEL_LIST]; + uint8_t weight_list[POLICY_MGR_MAX_CHANNEL_LIST]; + uint32_t pcl_len; +}; + +/** + * struct policy_mgr_pcl_chan_weights - Params to get the valid weighed list + * @pcl_list: Preferred channel list already sorted in the order of preference + * @pcl_len: Length of the PCL + * @saved_chan_list: Valid channel list updated as part of + * WMA_UPDATE_CHAN_LIST_REQ + * @saved_num_chan: Length of the valid channel list + * @weighed_valid_list: Weights of the valid channel list. This will have one + * to one mapping with valid_chan_list. FW expects channel order and size to be + * as per the list provided in WMI_SCAN_CHAN_LIST_CMDID. + * @weight_list: Weights assigned by policy manager + */ +struct policy_mgr_pcl_chan_weights { + uint8_t pcl_list[POLICY_MGR_MAX_CHANNEL_LIST]; + uint32_t pcl_len; + uint8_t saved_chan_list[POLICY_MGR_MAX_CHANNEL_LIST]; + uint32_t saved_num_chan; + uint8_t weighed_valid_list[POLICY_MGR_MAX_CHANNEL_LIST]; + uint8_t weight_list[POLICY_MGR_MAX_CHANNEL_LIST]; +}; + +/** + * struct policy_mgr_vdev_entry_info - vdev related param to be + * used by policy manager + * @type: type + * @sub_type: sub type + * @mhz: channel frequency in MHz + * @chan_width: channel bandwidth + * @mac_id: the mac on which vdev is on + */ +struct policy_mgr_vdev_entry_info { + uint32_t type; + uint32_t sub_type; + uint32_t mhz; + uint32_t chan_width; + uint32_t mac_id; +}; + +/** + * struct dbs_hw_mode_info - WLAN_DBS_HW_MODES_TLV Format + * @tlv_header: TLV header, TLV tag and len; tag equals WMITLV_TAG_ARRAY_UINT32 + * @hw_mode_list: WLAN_DBS_HW_MODE_LIST entries + */ +struct dbs_hw_mode_info { + uint32_t tlv_header; + uint32_t *hw_mode_list; +}; + +/** + * struct dual_mac_config - Dual MAC configurations + * @prev_scan_config: Previous scan configuration + * @prev_fw_mode_config: Previous FW mode configuration + * @cur_scan_config: Current scan configuration + * @cur_fw_mode_config: Current FW mode configuration + * @req_scan_config: Requested scan configuration + * @req_fw_mode_config: Requested FW mode configuration + */ +struct dual_mac_config { + uint32_t prev_scan_config; + uint32_t prev_fw_mode_config; + uint32_t cur_scan_config; + uint32_t cur_fw_mode_config; + uint32_t req_scan_config; + uint32_t req_fw_mode_config; +}; + +/** + * struct policy_mgr_user_cfg - Policy manager user config variables + * @enable_mcc_adaptive_scheduler: Enable MCC adaptive scheduler + * @max_concurrent_active_sessions: User allowed maximum active + * connections + * @conc_system_pref: System preference for PCL table + * @enable2x2: 2x2 chain mask user config + * @mcc_to_scc_switch_mode: Control SAP channel in concurrency + * @sub_20_mhz_enabled: Is 5 or 10 Mhz enabled + * @is_sta_sap_scc_allowed_on_dfs_chan: Is STA+SAP SCC allowed + * on a DFS channel + * @channel_select_logic_conc: channel selection logic for + * different concurrency combinations to DBS or inter band MCC. + * Default is DBS for STA+STA and STA+P2P. + * @sta_sap_scc_on_lte_coex_chan: Is STA+SAP SCC allowed on a + * lte coex channel + * @enable_dfs_master_cap: Is DFS master capability enabled + */ +struct policy_mgr_user_cfg { + uint8_t enable_mcc_adaptive_scheduler; + uint8_t max_concurrent_active_sessions; + uint8_t conc_system_pref; + bool enable2x2; + uint32_t mcc_to_scc_switch_mode; + bool sub_20_mhz_enabled; + bool is_sta_sap_scc_allowed_on_dfs_chan; + uint32_t channel_select_logic_conc; + uint32_t sta_sap_scc_on_lte_coex_chan; + uint8_t enable_dfs_master_cap; +}; + +/** + * struct dbs_nss - Number of spatial streams in DBS mode + * @mac0_ss: Number of spatial streams on MAC0 + * @mac1_ss: Number of spatial streams on MAC1 + */ +struct dbs_nss { + enum hw_mode_ss_config mac0_ss; + enum hw_mode_ss_config mac1_ss; +}; + +/** + * struct connection_info - connection information + * @mac_id: The HW mac it is running + * @vdev_id: vdev id + * @channel: channel of the connection + */ +struct connection_info { + uint8_t mac_id; + uint8_t vdev_id; + uint8_t channel; +}; +#endif /* __WLAN_POLICY_MGR_PUBLIC_STRUCT_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/policy_mgr/src/wlan_policy_mgr_action.c b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/policy_mgr/src/wlan_policy_mgr_action.c new file mode 100644 index 0000000000000000000000000000000000000000..4c5ba6b8725fb7bd7f6c7ddd365e80d7e36fdcc3 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/policy_mgr/src/wlan_policy_mgr_action.c @@ -0,0 +1,1756 @@ +/* + * Copyright (c) 2012-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_policy_mgr_action.c + * + * WLAN Concurrenct Connection Management APIs + * + */ + +/* Include files */ + +#include "wlan_policy_mgr_api.h" +#include "wlan_policy_mgr_tables_no_dbs_i.h" +#include "wlan_policy_mgr_i.h" +#include "qdf_types.h" +#include "qdf_trace.h" +#include "wlan_objmgr_global_obj.h" +#include "qdf_platform.h" + +enum policy_mgr_conc_next_action (*policy_mgr_get_current_pref_hw_mode_ptr) + (struct wlan_objmgr_psoc *psoc); + +void policy_mgr_hw_mode_transition_cb(uint32_t old_hw_mode_index, + uint32_t new_hw_mode_index, + uint32_t num_vdev_mac_entries, + struct policy_mgr_vdev_mac_map *vdev_mac_map, + struct wlan_objmgr_psoc *context) +{ + QDF_STATUS status; + struct policy_mgr_hw_mode_params hw_mode; + uint32_t i; + struct policy_mgr_psoc_priv_obj *pm_ctx; + + pm_ctx = policy_mgr_get_context(context); + if (!pm_ctx) { + policy_mgr_err("Invalid context"); + return; + } + + if (!vdev_mac_map) { + policy_mgr_err("vdev_mac_map is NULL"); + return; + } + + policy_mgr_debug("old_hw_mode_index=%d, new_hw_mode_index=%d", + old_hw_mode_index, new_hw_mode_index); + + for (i = 0; i < num_vdev_mac_entries; i++) + policy_mgr_debug("vdev_id:%d mac_id:%d", + vdev_mac_map[i].vdev_id, + vdev_mac_map[i].mac_id); + + status = policy_mgr_get_hw_mode_from_idx(context, + new_hw_mode_index, &hw_mode); + if (status != QDF_STATUS_SUCCESS) { + policy_mgr_err("Get HW mode failed: %d", status); + return; + } + + policy_mgr_debug("MAC0: TxSS:%d, RxSS:%d, Bw:%d", + hw_mode.mac0_tx_ss, hw_mode.mac0_rx_ss, hw_mode.mac0_bw); + policy_mgr_debug("MAC1: TxSS:%d, RxSS:%d, Bw:%d", + hw_mode.mac1_tx_ss, hw_mode.mac1_rx_ss, hw_mode.mac1_bw); + policy_mgr_debug("DBS:%d, Agile DFS:%d, SBS:%d", + hw_mode.dbs_cap, hw_mode.agile_dfs_cap, hw_mode.sbs_cap); + + /* update pm_conc_connection_list */ + policy_mgr_update_hw_mode_conn_info(context, num_vdev_mac_entries, + vdev_mac_map, + hw_mode); + + if (pm_ctx->mode_change_cb) + pm_ctx->mode_change_cb(); + + return; +} + +QDF_STATUS policy_mgr_check_n_start_opportunistic_timer( + struct wlan_objmgr_psoc *psoc) +{ + struct policy_mgr_psoc_priv_obj *pm_ctx; + QDF_STATUS status = QDF_STATUS_E_FAILURE; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("PM ctx not valid. Oppurtunistic timer cannot start"); + return QDF_STATUS_E_FAILURE; + } + if (policy_mgr_need_opportunistic_upgrade(psoc)) { + /* let's start the timer */ + qdf_mc_timer_stop(&pm_ctx->dbs_opportunistic_timer); + status = qdf_mc_timer_start( + &pm_ctx->dbs_opportunistic_timer, + DBS_OPPORTUNISTIC_TIME * 1000); + if (!QDF_IS_STATUS_SUCCESS(status)) + policy_mgr_err("Failed to start dbs opportunistic timer"); + } + return status; +} + +QDF_STATUS policy_mgr_pdev_set_hw_mode(struct wlan_objmgr_psoc *psoc, + uint32_t session_id, + enum hw_mode_ss_config mac0_ss, + enum hw_mode_bandwidth mac0_bw, + enum hw_mode_ss_config mac1_ss, + enum hw_mode_bandwidth mac1_bw, + enum hw_mode_dbs_capab dbs, + enum hw_mode_agile_dfs_capab dfs, + enum hw_mode_sbs_capab sbs, + enum policy_mgr_conn_update_reason reason, + uint8_t next_action) +{ + int8_t hw_mode_index; + struct policy_mgr_hw_mode msg; + QDF_STATUS status; + struct policy_mgr_psoc_priv_obj *pm_ctx; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid context"); + return QDF_STATUS_E_FAILURE; + } + + /* + * if HW is not capable of doing 2x2 or ini config disabled 2x2, don't + * allow to request FW for 2x2 + */ + if ((HW_MODE_SS_2x2 == mac0_ss) && (!pm_ctx->user_cfg.enable2x2)) { + policy_mgr_debug("2x2 is not allowed downgrading to 1x1 for mac0"); + mac0_ss = HW_MODE_SS_1x1; + } + if ((HW_MODE_SS_2x2 == mac1_ss) && (!pm_ctx->user_cfg.enable2x2)) { + policy_mgr_debug("2x2 is not allowed downgrading to 1x1 for mac1"); + mac1_ss = HW_MODE_SS_1x1; + } + + hw_mode_index = policy_mgr_get_hw_mode_idx_from_dbs_hw_list(psoc, + mac0_ss, mac0_bw, mac1_ss, mac1_bw, dbs, dfs, sbs); + if (hw_mode_index < 0) { + policy_mgr_err("Invalid HW mode index obtained"); + return QDF_STATUS_E_FAILURE; + } + + msg.hw_mode_index = hw_mode_index; + msg.set_hw_mode_cb = (void *)policy_mgr_pdev_set_hw_mode_cb; + msg.reason = reason; + msg.session_id = session_id; + msg.next_action = next_action; + msg.context = psoc; + + policy_mgr_debug("set hw mode to sme: hw_mode_index: %d session:%d reason:%d", + msg.hw_mode_index, msg.session_id, msg.reason); + + status = pm_ctx->sme_cbacks.sme_pdev_set_hw_mode(msg); + if (status != QDF_STATUS_SUCCESS) { + policy_mgr_err("Failed to set hw mode to SME"); + return status; + } + + return QDF_STATUS_SUCCESS; +} + +enum policy_mgr_conc_next_action policy_mgr_need_opportunistic_upgrade( + struct wlan_objmgr_psoc *psoc) +{ + uint32_t conn_index; + enum policy_mgr_conc_next_action upgrade = PM_NOP; + uint8_t mac = 0; + struct policy_mgr_hw_mode_params hw_mode; + QDF_STATUS status = QDF_STATUS_E_FAILURE; + struct policy_mgr_psoc_priv_obj *pm_ctx; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + goto done; + } + + if (policy_mgr_is_hw_dbs_capable(psoc) == false) { + policy_mgr_err("driver isn't dbs capable, no further action needed"); + goto done; + } + + status = policy_mgr_get_current_hw_mode(psoc, &hw_mode); + if (!QDF_IS_STATUS_SUCCESS(status)) { + policy_mgr_err("policy_mgr_get_current_hw_mode failed"); + goto done; + } + if (!hw_mode.dbs_cap) { + policy_mgr_debug("current HW mode is non-DBS capable"); + goto done; + } + + qdf_mutex_acquire(&pm_ctx->qdf_conc_list_lock); + /* Are both mac's still in use */ + for (conn_index = 0; conn_index < MAX_NUMBER_OF_CONC_CONNECTIONS; + conn_index++) { + policy_mgr_debug("index:%d mac:%d in_use:%d chan:%d org_nss:%d", + conn_index, + pm_conc_connection_list[conn_index].mac, + pm_conc_connection_list[conn_index].in_use, + pm_conc_connection_list[conn_index].chan, + pm_conc_connection_list[conn_index].original_nss); + if ((pm_conc_connection_list[conn_index].mac == 0) && + pm_conc_connection_list[conn_index].in_use) { + mac |= POLICY_MGR_MAC0; + if (POLICY_MGR_MAC0_AND_MAC1 == mac) { + qdf_mutex_release(&pm_ctx->qdf_conc_list_lock); + goto done; + } + } else if ((pm_conc_connection_list[conn_index].mac == 1) && + pm_conc_connection_list[conn_index].in_use) { + mac |= POLICY_MGR_MAC1; + if (policy_mgr_is_hw_dbs_2x2_capable(psoc) && + WLAN_REG_IS_24GHZ_CH( + pm_conc_connection_list[conn_index].chan) + ) { + qdf_mutex_release(&pm_ctx->qdf_conc_list_lock); + policy_mgr_debug("2X2 DBS capable with 2.4 GHZ connection"); + goto done; + } + if (POLICY_MGR_MAC0_AND_MAC1 == mac) { + qdf_mutex_release(&pm_ctx->qdf_conc_list_lock); + goto done; + } + } + } + /* Let's request for single MAC mode */ + upgrade = PM_SINGLE_MAC; + /* Is there any connection had an initial connection with 2x2 */ + for (conn_index = 0; conn_index < MAX_NUMBER_OF_CONC_CONNECTIONS; + conn_index++) { + if ((pm_conc_connection_list[conn_index].original_nss == 2) && + pm_conc_connection_list[conn_index].in_use) { + upgrade = PM_SINGLE_MAC_UPGRADE; + qdf_mutex_release(&pm_ctx->qdf_conc_list_lock); + goto done; + } + } + qdf_mutex_release(&pm_ctx->qdf_conc_list_lock); + +done: + return upgrade; +} + +QDF_STATUS policy_mgr_update_connection_info(struct wlan_objmgr_psoc *psoc, + uint32_t vdev_id) +{ + QDF_STATUS status = QDF_STATUS_E_FAILURE; + uint32_t conn_index = 0; + bool found = false; + struct policy_mgr_vdev_entry_info conn_table_entry; + enum policy_mgr_chain_mode chain_mask = POLICY_MGR_ONE_ONE; + uint8_t nss_2g, nss_5g; + enum policy_mgr_con_mode mode; + uint8_t chan; + uint32_t nss = 0; + struct policy_mgr_psoc_priv_obj *pm_ctx; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return status; + } + + qdf_mutex_acquire(&pm_ctx->qdf_conc_list_lock); + while (PM_CONC_CONNECTION_LIST_VALID_INDEX(conn_index)) { + if (vdev_id == pm_conc_connection_list[conn_index].vdev_id) { + /* debug msg */ + found = true; + break; + } + conn_index++; + } + qdf_mutex_release(&pm_ctx->qdf_conc_list_lock); + if (!found) { + /* err msg */ + policy_mgr_err("can't find vdev_id %d in pm_conc_connection_list", + vdev_id); + return status; + } + if (pm_ctx->wma_cbacks.wma_get_connection_info) { + status = pm_ctx->wma_cbacks.wma_get_connection_info( + vdev_id, &conn_table_entry); + if (QDF_STATUS_SUCCESS != status) { + policy_mgr_err("can't find vdev_id %d in connection table", + vdev_id); + return status; + } + } else { + policy_mgr_err("wma_get_connection_info is NULL"); + return QDF_STATUS_E_FAILURE; + } + + mode = policy_mgr_get_mode(conn_table_entry.type, + conn_table_entry.sub_type); + chan = wlan_reg_freq_to_chan(pm_ctx->pdev, conn_table_entry.mhz); + status = policy_mgr_get_nss_for_vdev(psoc, mode, &nss_2g, &nss_5g); + if (QDF_IS_STATUS_SUCCESS(status)) { + if ((WLAN_REG_IS_24GHZ_CH(chan) && (nss_2g > 1)) || + (WLAN_REG_IS_5GHZ_CH(chan) && (nss_5g > 1))) + chain_mask = POLICY_MGR_TWO_TWO; + else + chain_mask = POLICY_MGR_ONE_ONE; + nss = (WLAN_REG_IS_24GHZ_CH(chan)) ? nss_2g : nss_5g; + } else { + policy_mgr_err("Error in getting nss"); + } + + policy_mgr_debug("update PM connection table for vdev:%d", vdev_id); + + /* add the entry */ + policy_mgr_update_conc_list(psoc, conn_index, + mode, + chan, + policy_mgr_get_bw(conn_table_entry.chan_width), + conn_table_entry.mac_id, + chain_mask, + nss, vdev_id, true, true); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS policy_mgr_update_and_wait_for_connection_update( + struct wlan_objmgr_psoc *psoc, + uint8_t session_id, + uint8_t channel, + enum policy_mgr_conn_update_reason reason) +{ + QDF_STATUS status; + + policy_mgr_debug("session:%d channel:%d reason:%d", + session_id, channel, reason); + + status = policy_mgr_reset_connection_update(psoc); + if (QDF_IS_STATUS_ERROR(status)) + policy_mgr_err("clearing event failed"); + + status = policy_mgr_current_connections_update(psoc, + session_id, channel, reason); + if (QDF_STATUS_E_FAILURE == status) { + policy_mgr_err("connections update failed"); + return QDF_STATUS_E_FAILURE; + } + + /* Wait only when status is success */ + if (QDF_IS_STATUS_SUCCESS(status)) { + status = policy_mgr_wait_for_connection_update(psoc); + if (QDF_IS_STATUS_ERROR(status)) { + policy_mgr_err("qdf wait for event failed"); + return QDF_STATUS_E_FAILURE; + } + } + + return QDF_STATUS_SUCCESS; +} + +/** + * policy_mgr_is_dbs_allowed_for_concurrency() - If dbs is allowed for current + * concurreny + * @new_conn_mode: new connection mode + * + * When a new connection is about to come up, check if dbs is allowed for + * STA+STA or STA+P2P + * + * Return: true if dbs is allowed for STA+STA or STA+P2P else false + */ +bool policy_mgr_is_dbs_allowed_for_concurrency( + struct wlan_objmgr_psoc *psoc, enum QDF_OPMODE new_conn_mode) +{ + struct policy_mgr_psoc_priv_obj *pm_ctx; + uint32_t count, dbs_for_sta_sta, dbs_for_sta_p2p; + bool ret = true; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid context"); + return ret; + } + + count = policy_mgr_get_connection_count(psoc); + + if (count != 1 || new_conn_mode == QDF_MAX_NO_OF_MODE) + return ret; + + dbs_for_sta_sta = PM_CHANNEL_SELECT_LOGIC_STA_STA_GET(pm_ctx->user_cfg. + channel_select_logic_conc); + dbs_for_sta_p2p = PM_CHANNEL_SELECT_LOGIC_STA_P2P_GET(pm_ctx->user_cfg. + channel_select_logic_conc); + + switch (pm_conc_connection_list[0].mode) { + case PM_STA_MODE: + switch (new_conn_mode) { + case QDF_STA_MODE: + if (!dbs_for_sta_sta) + return false; + break; + case QDF_P2P_DEVICE_MODE: + case QDF_P2P_CLIENT_MODE: + case QDF_P2P_GO_MODE: + if (!dbs_for_sta_p2p) + return false; + break; + default: + break; + } + break; + case PM_P2P_CLIENT_MODE: + case PM_P2P_GO_MODE: + switch (new_conn_mode) { + case QDF_STA_MODE: + if (!dbs_for_sta_p2p) + return false; + break; + default: + break; + } + break; + default: + break; + } + + return ret; +} + +bool policy_mgr_is_chnl_in_diff_band(struct wlan_objmgr_psoc *psoc, + uint8_t channel) +{ + uint8_t i, pm_chnl; + struct policy_mgr_psoc_priv_obj *pm_ctx; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return false; + } + + /* + * check given channel against already existing connections' + * channels. if they differ then channels are in different bands + */ + qdf_mutex_acquire(&pm_ctx->qdf_conc_list_lock); + for (i = 0; i < MAX_NUMBER_OF_CONC_CONNECTIONS; i++) { + pm_chnl = pm_conc_connection_list[i].chan; + if (pm_conc_connection_list[i].in_use) + if (!WLAN_REG_IS_SAME_BAND_CHANNELS(channel, pm_chnl)) { + qdf_mutex_release(&pm_ctx->qdf_conc_list_lock); + policy_mgr_debug("channel is in diff band"); + return true; + } + } + qdf_mutex_release(&pm_ctx->qdf_conc_list_lock); + + return false; +} + +bool policy_mgr_is_hwmode_set_for_given_chnl(struct wlan_objmgr_psoc *psoc, + uint8_t channel) +{ + enum policy_mgr_band band; + bool is_hwmode_dbs, is_2x2_dbs; + + if (policy_mgr_is_hw_dbs_capable(psoc) == false) + return true; + + if (WLAN_REG_IS_24GHZ_CH(channel)) + band = POLICY_MGR_BAND_24; + else + band = POLICY_MGR_BAND_5; + + is_hwmode_dbs = policy_mgr_is_current_hwmode_dbs(psoc); + is_2x2_dbs = policy_mgr_is_hw_dbs_2x2_capable(psoc); + /* + * If HW supports 2x2 chains in DBS HW mode and if DBS HW mode is not + * yet set then this is the right time to block the connection. + */ + if ((band == POLICY_MGR_BAND_24) && is_2x2_dbs && !is_hwmode_dbs) { + policy_mgr_err("HW mode is not yet in DBS!!!!!"); + return false; + } + + return true; +} + +QDF_STATUS policy_mgr_current_connections_update(struct wlan_objmgr_psoc *psoc, + uint32_t session_id, + uint8_t channel, + enum policy_mgr_conn_update_reason reason) +{ + enum policy_mgr_conc_next_action next_action = PM_NOP; + uint32_t num_connections = 0; + enum policy_mgr_one_connection_mode second_index = 0; + enum policy_mgr_two_connection_mode third_index = 0; + enum policy_mgr_band band; + QDF_STATUS status = QDF_STATUS_E_FAILURE; + struct policy_mgr_psoc_priv_obj *pm_ctx; + enum QDF_OPMODE new_conn_mode = QDF_MAX_NO_OF_MODE; + + if (policy_mgr_is_hw_dbs_capable(psoc) == false) { + policy_mgr_err("driver isn't dbs capable, no further action needed"); + return QDF_STATUS_E_NOSUPPORT; + } + if (WLAN_REG_IS_24GHZ_CH(channel)) + band = POLICY_MGR_BAND_24; + else + band = POLICY_MGR_BAND_5; + + num_connections = policy_mgr_get_connection_count(psoc); + + policy_mgr_debug("num_connections=%d channel=%d", + num_connections, channel); + + switch (num_connections) { + case 0: + if (band == POLICY_MGR_BAND_24) + if (policy_mgr_is_hw_dbs_2x2_capable(psoc)) + next_action = PM_DBS; + else + next_action = PM_NOP; + else + next_action = PM_NOP; + break; + case 1: + second_index = + policy_mgr_get_second_connection_pcl_table_index(psoc); + if (PM_MAX_ONE_CONNECTION_MODE == second_index) { + policy_mgr_err( + "couldn't find index for 2nd connection next action table"); + goto done; + } + next_action = + (*next_action_two_connection_table)[second_index][band]; + break; + case 2: + third_index = + policy_mgr_get_third_connection_pcl_table_index(psoc); + if (PM_MAX_TWO_CONNECTION_MODE == third_index) { + policy_mgr_err( + "couldn't find index for 3rd connection next action table"); + goto done; + } + next_action = (*next_action_three_connection_table) + [third_index][band]; + break; + default: + policy_mgr_err("unexpected num_connections value %d", + num_connections); + break; + } + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid context"); + goto done; + } + + if (pm_ctx->hdd_cbacks.hdd_get_device_mode) + new_conn_mode = pm_ctx->hdd_cbacks. + hdd_get_device_mode(session_id); + + /* + * Based on channel_select_logic_conc ini, hw mode is set + * when second connection is about to come up that results + * in STA+STA and STA+P2P concurrency. + * 1) If MCC is set and if current hw mode is dbs, hw mode + * should be set to single mac for above concurrency. + * 2) If MCC is set and if current hw mode is not dbs, hw + * mode change is not required. + */ + if (policy_mgr_is_current_hwmode_dbs(psoc) && + !policy_mgr_is_dbs_allowed_for_concurrency(psoc, new_conn_mode)) + next_action = PM_SINGLE_MAC; + else if (!policy_mgr_is_current_hwmode_dbs(psoc) && + !policy_mgr_is_dbs_allowed_for_concurrency(psoc, new_conn_mode)) + next_action = PM_NOP; + + if (PM_NOP != next_action) + status = policy_mgr_next_actions(psoc, session_id, + next_action, reason); + else + status = QDF_STATUS_E_NOSUPPORT; + + policy_mgr_debug( + "idx2=%d idx3=%d next_action=%d, band=%d status=%d reason=%d session_id=%d", + second_index, third_index, next_action, band, status, + reason, session_id); + +done: + return status; +} + +QDF_STATUS policy_mgr_next_actions(struct wlan_objmgr_psoc *psoc, + uint32_t session_id, + enum policy_mgr_conc_next_action action, + enum policy_mgr_conn_update_reason reason) +{ + QDF_STATUS status = QDF_STATUS_E_FAILURE; + struct policy_mgr_hw_mode_params hw_mode; + struct dbs_nss nss_dbs = {0}; + + if (policy_mgr_is_hw_dbs_capable(psoc) == false) { + policy_mgr_err("driver isn't dbs capable, no further action needed"); + return QDF_STATUS_E_NOSUPPORT; + } + + /* check for the current HW index to see if really need any action */ + status = policy_mgr_get_current_hw_mode(psoc, &hw_mode); + if (!QDF_IS_STATUS_SUCCESS(status)) { + policy_mgr_err("policy_mgr_get_current_hw_mode failed"); + return status; + } + /** + * if already in DBS no need to request DBS. Might be needed + * to extend the logic when multiple dbs HW mode is available + */ + if ((((PM_DBS_DOWNGRADE == action) || (PM_DBS == action) || + (PM_DBS_UPGRADE == action)) + && hw_mode.dbs_cap)) { + policy_mgr_err("driver is already in %s mode, no further action needed", + (hw_mode.dbs_cap) ? "dbs" : "non dbs"); + return QDF_STATUS_E_ALREADY; + } + + if ((PM_SBS == action) || (action == PM_SBS_DOWNGRADE)) { + if (!policy_mgr_is_hw_sbs_capable(psoc)) { + /* No action */ + policy_mgr_notice("firmware is not sbs capable"); + return QDF_STATUS_E_NOSUPPORT; + } + /* check if current mode is already SBS nothing to be + * done + */ + + } + + switch (action) { + case PM_DBS_DOWNGRADE: + /* + * check if we have a beaconing entity that is using 2x2. If yes, + * update the beacon template & notify FW. Once FW confirms + * beacon updated, send down the HW mode change req + */ + status = policy_mgr_complete_action(psoc, POLICY_MGR_RX_NSS_1, + PM_DBS, reason, session_id); + break; + case PM_DBS: + (void)policy_mgr_get_hw_dbs_nss(psoc, &nss_dbs); + + status = policy_mgr_pdev_set_hw_mode(psoc, session_id, + nss_dbs.mac0_ss, + HW_MODE_80_MHZ, + nss_dbs.mac1_ss, + HW_MODE_40_MHZ, + HW_MODE_DBS, + HW_MODE_AGILE_DFS_NONE, + HW_MODE_SBS_NONE, + reason, PM_NOP); + break; + case PM_SINGLE_MAC_UPGRADE: + /* + * change the HW mode first before the NSS upgrade + */ + status = policy_mgr_pdev_set_hw_mode(psoc, session_id, + HW_MODE_SS_2x2, + HW_MODE_80_MHZ, + HW_MODE_SS_0x0, HW_MODE_BW_NONE, + HW_MODE_DBS_NONE, + HW_MODE_AGILE_DFS_NONE, + HW_MODE_SBS_NONE, + reason, PM_UPGRADE); + break; + case PM_SINGLE_MAC: + status = policy_mgr_pdev_set_hw_mode(psoc, session_id, + HW_MODE_SS_2x2, + HW_MODE_80_MHZ, + HW_MODE_SS_0x0, HW_MODE_BW_NONE, + HW_MODE_DBS_NONE, + HW_MODE_AGILE_DFS_NONE, + HW_MODE_SBS_NONE, + reason, PM_NOP); + break; + case PM_DBS_UPGRADE: + status = policy_mgr_pdev_set_hw_mode(psoc, session_id, + HW_MODE_SS_2x2, + HW_MODE_80_MHZ, + HW_MODE_SS_2x2, HW_MODE_80_MHZ, + HW_MODE_DBS, + HW_MODE_AGILE_DFS_NONE, + HW_MODE_SBS_NONE, + reason, PM_UPGRADE); + break; + case PM_SBS_DOWNGRADE: + status = policy_mgr_complete_action(psoc, POLICY_MGR_RX_NSS_1, + PM_SBS, reason, session_id); + break; + case PM_SBS: + status = policy_mgr_pdev_set_hw_mode(psoc, session_id, + HW_MODE_SS_1x1, + HW_MODE_80_MHZ, + HW_MODE_SS_1x1, HW_MODE_80_MHZ, + HW_MODE_DBS, + HW_MODE_AGILE_DFS_NONE, + HW_MODE_SBS, + reason, PM_NOP); + break; + case PM_DOWNGRADE: + /* + * check if we have a beaconing entity that advertised 2x2 + * intially. If yes, update the beacon template & notify FW. + */ + status = policy_mgr_nss_update(psoc, POLICY_MGR_RX_NSS_1, + PM_NOP, reason, session_id); + break; + case PM_UPGRADE: + /* + * check if we have a beaconing entity that advertised 2x2 + * intially. If yes, update the beacon template & notify FW. + */ + status = policy_mgr_nss_update(psoc, POLICY_MGR_RX_NSS_2, + PM_NOP, reason, session_id); + break; + default: + policy_mgr_err("unexpected action value %d", action); + status = QDF_STATUS_E_FAILURE; + break; + } + + return status; +} + +QDF_STATUS policy_mgr_handle_conc_multiport(struct wlan_objmgr_psoc *psoc, + uint8_t session_id, uint8_t channel) +{ + QDF_STATUS status; + + if (!policy_mgr_check_for_session_conc(psoc, session_id, channel)) { + policy_mgr_err("Conc not allowed for the session %d", + session_id); + return QDF_STATUS_E_FAILURE; + } + + status = policy_mgr_reset_connection_update(psoc); + if (!QDF_IS_STATUS_SUCCESS(status)) + policy_mgr_err("clearing event failed"); + + status = policy_mgr_current_connections_update(psoc, session_id, + channel, + POLICY_MGR_UPDATE_REASON_NORMAL_STA); + if (QDF_STATUS_E_FAILURE == status) { + policy_mgr_err("connections update failed"); + return status; + } + + return status; +} + +#ifdef FEATURE_WLAN_MCC_TO_SCC_SWITCH +void policy_mgr_update_user_config_sap_chan( + struct wlan_objmgr_psoc *psoc, uint32_t channel) +{ + struct policy_mgr_psoc_priv_obj *pm_ctx; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid pm context and failed to update the user config sap channel"); + return; + } + pm_ctx->user_config_sap_channel = channel; +} + +/** + * policy_mgr_is_restart_sap_allowed() - Check if restart SAP + * allowed during SCC -> MCC switch + * @psoc: PSOC object data + * @mcc_to_scc_switch: MCC to SCC switch enabled user config + * + * Check if restart SAP allowed during SCC->MCC switch + * + * Restart: true or false + */ +static bool policy_mgr_is_restart_sap_allowed( + struct wlan_objmgr_psoc *psoc, + uint32_t mcc_to_scc_switch) +{ + uint32_t sta_ap_bit_mask = QDF_STA_MASK | QDF_SAP_MASK; + uint32_t sta_go_bit_mask = QDF_STA_MASK | QDF_P2P_GO_MASK; + uint32_t ap_present, go_present; + + ap_present = policy_mgr_mode_specific_connection_count( + psoc, PM_SAP_MODE, NULL); + go_present = policy_mgr_mode_specific_connection_count( + psoc, PM_P2P_GO_MODE, NULL); + + if ((mcc_to_scc_switch == QDF_MCC_TO_SCC_SWITCH_DISABLE) || + !policy_mgr_concurrent_open_sessions_running(psoc) || + !((ap_present && ((policy_mgr_get_concurrency_mode(psoc) & + sta_ap_bit_mask) == sta_ap_bit_mask)) || + ((mcc_to_scc_switch == + QDF_MCC_TO_SCC_SWITCH_FORCE_PREFERRED_WITHOUT_DISCONNECTION) + && go_present && ((policy_mgr_get_concurrency_mode(psoc) & + sta_go_bit_mask) == sta_go_bit_mask)))) { + policy_mgr_debug("MCC switch disabled or not concurrent STA/SAP, STA/GO"); + return false; + } + + return true; +} + +bool policy_mgr_is_safe_channel(struct wlan_objmgr_psoc *psoc, + uint8_t channel) +{ + struct policy_mgr_psoc_priv_obj *pm_ctx; + bool is_safe = true; + uint8_t j; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid context"); + return is_safe; + } + + + if (pm_ctx->unsafe_channel_count == 0) { + policy_mgr_debug("There are no unsafe channels"); + return is_safe; + } + + for (j = 0; j < pm_ctx->unsafe_channel_count; j++) { + if (channel == pm_ctx->unsafe_channel_list[j]) { + is_safe = false; + policy_mgr_warn("CH %d is not safe", channel); + break; + } + } + + return is_safe; +} + +bool policy_mgr_is_sap_restart_required_after_sta_disconnect( + struct wlan_objmgr_psoc *psoc, uint8_t *intf_ch) +{ + struct policy_mgr_psoc_priv_obj *pm_ctx; + uint8_t sap_chan = policy_mgr_mode_specific_get_channel(psoc, + PM_SAP_MODE); + bool sta_sap_scc_on_dfs_chan = + policy_mgr_is_sta_sap_scc_allowed_on_dfs_chan(psoc); + + *intf_ch = 0; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid pm context"); + return false; + } + + policy_mgr_debug("sta_sap_scc_on_dfs_chan %u, sap_chan %u", + sta_sap_scc_on_dfs_chan, sap_chan); + + if ((!sta_sap_scc_on_dfs_chan || + !(sap_chan && WLAN_REG_IS_5GHZ_CH(sap_chan) && + (wlan_reg_get_channel_state(pm_ctx->pdev, sap_chan) == + CHANNEL_STATE_DFS))) && + (!policy_mgr_sta_sap_scc_on_lte_coex_chan(psoc) || + policy_mgr_is_safe_channel(psoc, sap_chan))) { + return false; + } + + *intf_ch = pm_ctx->user_config_sap_channel; + policy_mgr_debug("Standalone SAP is not allowed on DFS channel, Move it to channel %u", + *intf_ch); + + return true; +} + +static void __policy_mgr_check_sta_ap_concurrent_ch_intf(void *data) +{ + struct wlan_objmgr_psoc *psoc; + struct policy_mgr_psoc_priv_obj *pm_ctx = NULL; + struct sta_ap_intf_check_work_ctx *work_info = NULL; + uint32_t mcc_to_scc_switch, cc_count = 0, i; + QDF_STATUS status; + uint8_t channel, sec_ch; + uint8_t operating_channel[MAX_NUMBER_OF_CONC_CONNECTIONS]; + uint8_t vdev_id[MAX_NUMBER_OF_CONC_CONNECTIONS]; + + if (qdf_is_module_state_transitioning()) { + policy_mgr_err("Module transition in progress"); + goto end; + } + + work_info = (struct sta_ap_intf_check_work_ctx *) data; + if (!work_info) { + policy_mgr_err("Invalid work_info"); + goto end; + } + + psoc = work_info->psoc; + if (!psoc) { + policy_mgr_err("Invalid psoc"); + goto end; + } + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid context"); + goto end; + } + mcc_to_scc_switch = + policy_mgr_mcc_to_scc_switch_mode_in_user_cfg(psoc); + + policy_mgr_info("Concurrent open sessions running: %d", + policy_mgr_concurrent_open_sessions_running(psoc)); + + if (!policy_mgr_is_restart_sap_allowed(psoc, mcc_to_scc_switch)) + goto end; + + cc_count = policy_mgr_get_mode_specific_conn_info(psoc, + &operating_channel[cc_count], + &vdev_id[cc_count], + PM_SAP_MODE); + policy_mgr_debug("Number of concurrent SAP: %d", cc_count); + if (cc_count < MAX_NUMBER_OF_CONC_CONNECTIONS) + cc_count = cc_count + + policy_mgr_get_mode_specific_conn_info + (psoc, + &operating_channel[cc_count], + &vdev_id[cc_count], + PM_P2P_GO_MODE); + policy_mgr_debug("Number of beaconing entities (SAP + GO):%d", + cc_count); + if (!cc_count) { + policy_mgr_err("Could not retrieve SAP/GO operating channel&vdevid"); + goto end; + } + + policy_mgr_debug("wait if channel switch is already in progress"); + status = qdf_wait_single_event( + &pm_ctx->channel_switch_complete_evt, + CHANNEL_SWITCH_COMPLETE_TIMEOUT); + + if (!QDF_IS_STATUS_SUCCESS(status)) { + policy_mgr_err("wait for event failed, still continue with channel switch"); + } + if (!pm_ctx->hdd_cbacks.wlan_hdd_get_channel_for_sap_restart) { + policy_mgr_err("SAP restart get channel callback in NULL"); + goto end; + } + if (cc_count < MAX_NUMBER_OF_CONC_CONNECTIONS) + for (i = 0; i < cc_count; i++) { + status = pm_ctx->hdd_cbacks. + wlan_hdd_get_channel_for_sap_restart + (psoc, + vdev_id[i], &channel, &sec_ch); + if (status == QDF_STATUS_SUCCESS) { + policy_mgr_info("SAP restarts due to MCC->SCC switch, old chan :%d new chan: %d" + , operating_channel[i], channel); + break; + } + } + if (status != QDF_STATUS_SUCCESS) + policy_mgr_err("Failed to switch SAP channel"); +end: + if (work_info) { + qdf_mem_free(work_info); + if (pm_ctx) + pm_ctx->sta_ap_intf_check_work_info = NULL; + } +} + +void policy_mgr_check_sta_ap_concurrent_ch_intf(void *data) +{ + qdf_ssr_protect(__func__); + __policy_mgr_check_sta_ap_concurrent_ch_intf(data); + qdf_ssr_unprotect(__func__); +} + +static bool policy_mgr_valid_sta_channel_check(struct wlan_objmgr_psoc *psoc, + uint8_t sta_channel) +{ + struct policy_mgr_psoc_priv_obj *pm_ctx; + bool sta_sap_scc_on_dfs_chan; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid context"); + return false; + } + + sta_sap_scc_on_dfs_chan = + policy_mgr_is_sta_sap_scc_allowed_on_dfs_chan(psoc); + if (wlan_reg_is_dfs_ch(pm_ctx->pdev, sta_channel) && + sta_sap_scc_on_dfs_chan) { + policy_mgr_debug("STA, SAP SCC is allowed on DFS chan %u", + sta_channel); + return true; + } + if ((wlan_reg_is_dfs_ch(pm_ctx->pdev, sta_channel) && + !sta_sap_scc_on_dfs_chan) || + wlan_reg_is_passive_or_disable_ch(pm_ctx->pdev, sta_channel) || + !policy_mgr_is_safe_channel(psoc, sta_channel)) { + if (policy_mgr_is_hw_dbs_capable(psoc)) + return true; + else + return false; + } + else + return true; +} +QDF_STATUS policy_mgr_valid_sap_conc_channel_check( + struct wlan_objmgr_psoc *psoc, uint8_t *con_ch, uint8_t sap_ch) +{ + uint8_t channel = *con_ch; + uint8_t temp_channel = 0; + struct policy_mgr_psoc_priv_obj *pm_ctx; + bool sta_sap_scc_on_dfs_chan; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid context"); + return QDF_STATUS_E_FAILURE; + } + + /* + * if force SCC is set, Check if conc channel is DFS + * or passive or part of LTE avoided channel list. + * In that case move SAP to other band if DBS is supported, + * return otherwise + */ + if (!policy_mgr_is_force_scc(psoc)) + return QDF_STATUS_SUCCESS; + + /* + * if interference is 0, check if it is DBS case. If DBS case + * return from here. If SCC, check further if SAP can move to + * STA's channel. + */ + if (!channel && + (sap_ch != policy_mgr_mode_specific_get_channel( + psoc, PM_STA_MODE))) + return QDF_STATUS_SUCCESS; + else if (!channel) + channel = sap_ch; + + sta_sap_scc_on_dfs_chan = + policy_mgr_is_sta_sap_scc_allowed_on_dfs_chan(psoc); + + if (policy_mgr_valid_sta_channel_check(psoc, channel)) { + if (wlan_reg_is_dfs_ch(pm_ctx->pdev, channel) || + wlan_reg_is_passive_or_disable_ch(pm_ctx->pdev, channel) || + !(policy_mgr_sta_sap_scc_on_lte_coex_chan(psoc) || + policy_mgr_is_safe_channel(psoc, channel)) || + (!reg_is_etsi13_srd_chan_allowed_master_mode(pm_ctx->pdev) + && reg_is_etsi13_srd_chan(pm_ctx->pdev, channel))) { + if (wlan_reg_is_dfs_ch(pm_ctx->pdev, channel) && + sta_sap_scc_on_dfs_chan) { + policy_mgr_debug("STA SAP SCC is allowed on DFS channel"); + goto update_chan; + } + + if (policy_mgr_is_hw_dbs_capable(psoc)) { + temp_channel = + policy_mgr_get_alternate_channel_for_sap(psoc); + policy_mgr_debug("temp_channel is %d", + temp_channel); + if (temp_channel) { + channel = temp_channel; + } else { + if (WLAN_REG_IS_5GHZ_CH(channel)) + channel = PM_24_GHZ_CHANNEL_6; + else + channel = PM_5_GHZ_CHANNEL_36; + } + if (!policy_mgr_is_safe_channel( + psoc, channel)) { + policy_mgr_warn( + "Can't have concurrency on %d as it is not safe", + channel); + return QDF_STATUS_E_FAILURE; + } + } else { + policy_mgr_warn("Can't have concurrency on %d", + channel); + return QDF_STATUS_E_FAILURE; + } + } + } + +update_chan: + if (channel != sap_ch) + *con_ch = channel; + + return QDF_STATUS_SUCCESS; +} + +/** + * policy_mgr_check_concurrent_intf_and_restart_sap() - Check + * concurrent change intf + * @psoc: PSOC object information + * @operation_channel: operation channel + * @vdev_id: vdev id of SAP + * + * Checks the concurrent change interface and restarts SAP + * + * Return: None + */ +void policy_mgr_check_concurrent_intf_and_restart_sap( + struct wlan_objmgr_psoc *psoc) +{ + struct policy_mgr_psoc_priv_obj *pm_ctx; + uint32_t mcc_to_scc_switch; + uint8_t operating_channel[MAX_NUMBER_OF_CONC_CONNECTIONS] = {0}; + uint8_t vdev_id[MAX_NUMBER_OF_CONC_CONNECTIONS] = {0}; + uint32_t cc_count = 0; + bool restart_sap = false; + uint8_t sap_ch; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid context"); + return; + } + if (policy_mgr_get_connection_count(psoc) == 1) { + /* + * If STA+SAP sessions are on DFS channel and STA+SAP SCC is + * enabled on DFS channel then move the SAP out of DFS channel + * as soon as STA gets disconnect. + * If STA+SAP sessions are on unsafe channel and STA+SAP SCC is + * enabled on unsafe channel then move the SAP to safe channel + * as soon as STA disconnected. + */ + if (policy_mgr_is_sap_restart_required_after_sta_disconnect( + psoc, &sap_ch)) { + policy_mgr_debug("move the SAP to configured channel %u", + sap_ch); + restart_sap = true; + goto sap_restart; + } + } + + /* + * force SCC with STA+STA+SAP will need some additional logic + */ + cc_count = policy_mgr_get_mode_specific_conn_info(psoc, + &operating_channel[cc_count], + &vdev_id[cc_count], PM_STA_MODE); + if (!cc_count) { + policy_mgr_debug("Could not get STA operating channel&vdevid"); + return; + } + + mcc_to_scc_switch = + policy_mgr_mcc_to_scc_switch_mode_in_user_cfg(psoc); + policy_mgr_info("MCC to SCC switch: %d chan: %d", + mcc_to_scc_switch, operating_channel[0]); + + if (!policy_mgr_is_restart_sap_allowed(psoc, mcc_to_scc_switch)) { + policy_mgr_debug( + "No action taken at check_concurrent_intf_and_restart_sap"); + return; + } + +sap_restart: + /* + * If sta_sap_scc_on_dfs_chan is true then standalone SAP is not + * allowed on DFS channel. SAP is allowed on DFS channel only when STA + * is already connected on that channel. + * In following condition restart_sap will be true if + * sta_sap_scc_on_dfs_chan is true and SAP is on DFS channel. + * This scenario can come if STA+SAP are operating on DFS channel and + * STA gets disconnected. + */ + if (restart_sap || + ((mcc_to_scc_switch != QDF_MCC_TO_SCC_SWITCH_DISABLE) && + policy_mgr_valid_sta_channel_check(psoc, operating_channel[0]) && + !pm_ctx->sta_ap_intf_check_work_info)) { + struct sta_ap_intf_check_work_ctx *work_info; + work_info = qdf_mem_malloc( + sizeof(struct sta_ap_intf_check_work_ctx)); + pm_ctx->sta_ap_intf_check_work_info = work_info; + if (work_info) { + work_info->psoc = psoc; + qdf_create_work(0, &pm_ctx->sta_ap_intf_check_work, + policy_mgr_check_sta_ap_concurrent_ch_intf, + work_info); + qdf_sched_work(0, &pm_ctx->sta_ap_intf_check_work); + policy_mgr_info( + "Checking for Concurrent Change interference"); + } + } +} +#endif /* FEATURE_WLAN_MCC_TO_SCC_SWITCH */ + +#ifdef FEATURE_WLAN_MCC_TO_SCC_SWITCH +/** + * policy_mgr_change_sap_channel_with_csa() - Move SAP channel using (E)CSA + * @psoc: PSOC object information + * @vdev_id: Vdev id + * @channel: Channel to change + * @ch_width: channel width to change + * @forced: Force to switch channel, ignore SCC/MCC check + * + * Invoke the callback function to change SAP channel using (E)CSA + * + * Return: None + */ +void policy_mgr_change_sap_channel_with_csa(struct wlan_objmgr_psoc *psoc, + uint8_t vdev_id, uint32_t channel, + uint32_t ch_width, + bool forced) +{ + struct policy_mgr_psoc_priv_obj *pm_ctx; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid context"); + return; + } + + if (pm_ctx->hdd_cbacks.sap_restart_chan_switch_cb) { + policy_mgr_info("SAP change change without restart"); + pm_ctx->hdd_cbacks.sap_restart_chan_switch_cb(psoc, + vdev_id, channel, ch_width, forced); + } +} +#endif + +QDF_STATUS policy_mgr_wait_for_connection_update(struct wlan_objmgr_psoc *psoc) +{ + QDF_STATUS status; + struct policy_mgr_psoc_priv_obj *policy_mgr_context; + + policy_mgr_context = policy_mgr_get_context(psoc); + if (!policy_mgr_context) { + policy_mgr_err("Invalid context"); + return QDF_STATUS_E_FAILURE; + } + + status = qdf_wait_single_event( + &policy_mgr_context->connection_update_done_evt, + CONNECTION_UPDATE_TIMEOUT); + + if (!QDF_IS_STATUS_SUCCESS(status)) { + policy_mgr_err("wait for event failed"); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS policy_mgr_reset_connection_update(struct wlan_objmgr_psoc *psoc) +{ + QDF_STATUS status; + struct policy_mgr_psoc_priv_obj *policy_mgr_context; + + policy_mgr_context = policy_mgr_get_context(psoc); + if (!policy_mgr_context) { + policy_mgr_err("Invalid context"); + return QDF_STATUS_E_FAILURE; + } + + status = qdf_event_reset( + &policy_mgr_context->connection_update_done_evt); + + if (!QDF_IS_STATUS_SUCCESS(status)) { + policy_mgr_err("clear event failed"); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS policy_mgr_set_connection_update(struct wlan_objmgr_psoc *psoc) +{ + QDF_STATUS status; + struct policy_mgr_psoc_priv_obj *policy_mgr_context; + + policy_mgr_context = policy_mgr_get_context(psoc); + if (!policy_mgr_context) { + policy_mgr_err("Invalid context"); + return QDF_STATUS_E_FAILURE; + } + + status = qdf_event_set(&policy_mgr_context->connection_update_done_evt); + + if (!QDF_IS_STATUS_SUCCESS(status)) { + policy_mgr_err("set event failed"); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS policy_mgr_set_chan_switch_complete_evt( + struct wlan_objmgr_psoc *psoc) +{ + QDF_STATUS status; + struct policy_mgr_psoc_priv_obj *policy_mgr_context; + + policy_mgr_context = policy_mgr_get_context(psoc); + + if (!policy_mgr_context) { + policy_mgr_err("Invalid context"); + return QDF_STATUS_E_FAILURE; + } + status = qdf_event_set( + &policy_mgr_context->channel_switch_complete_evt); + + if (!QDF_IS_STATUS_SUCCESS(status)) { + policy_mgr_err("set event failed"); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS policy_mgr_reset_chan_switch_complete_evt( + struct wlan_objmgr_psoc *psoc) +{ + QDF_STATUS status; + struct policy_mgr_psoc_priv_obj *policy_mgr_context; + + policy_mgr_context = policy_mgr_get_context(psoc); + + if (!policy_mgr_context) { + policy_mgr_err("Invalid context"); + return QDF_STATUS_E_FAILURE; + } + status = qdf_event_reset( + &policy_mgr_context->channel_switch_complete_evt); + + if (!QDF_IS_STATUS_SUCCESS(status)) { + policy_mgr_err("reset event failed"); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS policy_mgr_set_opportunistic_update(struct wlan_objmgr_psoc *psoc) +{ + QDF_STATUS status; + struct policy_mgr_psoc_priv_obj *policy_mgr_context; + + policy_mgr_context = policy_mgr_get_context(psoc); + if (!policy_mgr_context) { + policy_mgr_err("Invalid context"); + return QDF_STATUS_E_FAILURE; + } + + status = qdf_event_set( + &policy_mgr_context->opportunistic_update_done_evt); + + if (!QDF_IS_STATUS_SUCCESS(status)) { + policy_mgr_err("set event failed"); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS policy_mgr_stop_opportunistic_timer(struct wlan_objmgr_psoc *psoc) +{ + struct policy_mgr_psoc_priv_obj *policy_mgr_ctx; + + policy_mgr_ctx = policy_mgr_get_context(psoc); + if (!policy_mgr_ctx) { + policy_mgr_err("Invalid context"); + return QDF_STATUS_E_FAILURE; + } + + if (policy_mgr_ctx->dbs_opportunistic_timer.state != + QDF_TIMER_STATE_RUNNING) + return QDF_STATUS_SUCCESS; + + qdf_mc_timer_stop(&policy_mgr_ctx->dbs_opportunistic_timer); + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS policy_mgr_restart_opportunistic_timer( + struct wlan_objmgr_psoc *psoc, bool check_state) +{ + QDF_STATUS status = QDF_STATUS_E_FAILURE; + struct policy_mgr_psoc_priv_obj *policy_mgr_ctx; + + policy_mgr_ctx = policy_mgr_get_context(psoc); + if (!policy_mgr_ctx) { + policy_mgr_err("Invalid context"); + return status; + } + + if (check_state && + QDF_TIMER_STATE_RUNNING != + policy_mgr_ctx->dbs_opportunistic_timer.state) + return status; + + qdf_mc_timer_stop(&policy_mgr_ctx->dbs_opportunistic_timer); + + status = qdf_mc_timer_start( + &policy_mgr_ctx->dbs_opportunistic_timer, + DBS_OPPORTUNISTIC_TIME * 1000); + + if (!QDF_IS_STATUS_SUCCESS(status)) { + policy_mgr_err("failed to start opportunistic timer"); + return status; + } + + return status; +} + +QDF_STATUS policy_mgr_set_hw_mode_on_channel_switch( + struct wlan_objmgr_psoc *psoc, uint8_t session_id) +{ + QDF_STATUS status = QDF_STATUS_E_FAILURE, qdf_status; + enum policy_mgr_conc_next_action action; + + if (!policy_mgr_is_hw_dbs_capable(psoc)) { + policy_mgr_err("PM/DBS is disabled"); + return status; + } + + action = (*policy_mgr_get_current_pref_hw_mode_ptr)(psoc); + if ((action != PM_DBS_DOWNGRADE) && + (action != PM_SINGLE_MAC_UPGRADE)) { + policy_mgr_err("Invalid action: %d", action); + status = QDF_STATUS_SUCCESS; + goto done; + } + + policy_mgr_debug("action:%d session id:%d", action, session_id); + + /* Opportunistic timer is started, PM will check if MCC upgrade can be + * done on timer expiry. This avoids any possible ping pong effect + * as well. + */ + if (action == PM_SINGLE_MAC_UPGRADE) { + qdf_status = policy_mgr_restart_opportunistic_timer( + psoc, false); + if (QDF_IS_STATUS_SUCCESS(qdf_status)) + policy_mgr_debug("opportunistic timer for MCC upgrade"); + goto done; + } + + /* For DBS, we want to move right away to DBS mode */ + status = policy_mgr_next_actions(psoc, session_id, action, + POLICY_MGR_UPDATE_REASON_CHANNEL_SWITCH); + if (!QDF_IS_STATUS_SUCCESS(status)) { + policy_mgr_err("no set hw mode command was issued"); + goto done; + } +done: + /* success must be returned only when a set hw mode was done */ + return status; +} + +void policy_mgr_checkn_update_hw_mode_single_mac_mode( + struct wlan_objmgr_psoc *psoc, uint8_t channel) +{ + uint8_t i; + struct policy_mgr_psoc_priv_obj *pm_ctx; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return; + } + + if (QDF_TIMER_STATE_RUNNING == + pm_ctx->dbs_opportunistic_timer.state) + qdf_mc_timer_stop(&pm_ctx->dbs_opportunistic_timer); + + qdf_mutex_acquire(&pm_ctx->qdf_conc_list_lock); + for (i = 0; i < MAX_NUMBER_OF_CONC_CONNECTIONS; i++) { + if (pm_conc_connection_list[i].in_use) { + if (!WLAN_REG_IS_SAME_BAND_CHANNELS(channel, + pm_conc_connection_list[i].chan)) { + qdf_mutex_release(&pm_ctx->qdf_conc_list_lock); + policy_mgr_debug("DBS required"); + return; + } + if (policy_mgr_is_hw_dbs_2x2_capable(psoc) && + (WLAN_REG_IS_24GHZ_CH(channel) || + WLAN_REG_IS_24GHZ_CH + (pm_conc_connection_list[i].chan))) { + qdf_mutex_release(&pm_ctx->qdf_conc_list_lock); + policy_mgr_debug("DBS required"); + return; + } + } + } + qdf_mutex_release(&pm_ctx->qdf_conc_list_lock); + pm_dbs_opportunistic_timer_handler((void *)psoc); +} + +void policy_mgr_check_and_stop_opportunistic_timer( + struct wlan_objmgr_psoc *psoc, uint8_t id) +{ + struct policy_mgr_psoc_priv_obj *pm_ctx; + enum policy_mgr_conc_next_action action = PM_NOP; + QDF_STATUS status = QDF_STATUS_E_FAILURE; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return; + } + if (QDF_TIMER_STATE_RUNNING == + pm_ctx->dbs_opportunistic_timer.state) { + qdf_mc_timer_stop(&pm_ctx->dbs_opportunistic_timer); + action = policy_mgr_need_opportunistic_upgrade(psoc); + if (action) { + qdf_event_reset(&pm_ctx->opportunistic_update_done_evt); + status = policy_mgr_next_actions(psoc, id, action, + POLICY_MGR_UPDATE_REASON_OPPORTUNISTIC); + if (status != QDF_STATUS_SUCCESS) { + policy_mgr_err("Failed in policy_mgr_next_actions"); + return; + } + status = qdf_wait_single_event( + &pm_ctx->opportunistic_update_done_evt, + CONNECTION_UPDATE_TIMEOUT); + + if (!QDF_IS_STATUS_SUCCESS(status)) { + policy_mgr_err("wait for event failed"); + return; + } + } + } +} + +void policy_mgr_set_hw_mode_change_in_progress( + struct wlan_objmgr_psoc *psoc, enum policy_mgr_hw_mode_change value) +{ + struct policy_mgr_psoc_priv_obj *pm_ctx; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return; + } + + qdf_mutex_acquire(&pm_ctx->qdf_conc_list_lock); + pm_ctx->hw_mode_change_in_progress = value; + qdf_mutex_release(&pm_ctx->qdf_conc_list_lock); + + policy_mgr_debug("hw_mode_change_in_progress:%d", value); +} + +enum policy_mgr_hw_mode_change policy_mgr_is_hw_mode_change_in_progress( + struct wlan_objmgr_psoc *psoc) +{ + enum policy_mgr_hw_mode_change value; + struct policy_mgr_psoc_priv_obj *pm_ctx; + + value = POLICY_MGR_HW_MODE_NOT_IN_PROGRESS; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return value; + } + qdf_mutex_acquire(&pm_ctx->qdf_conc_list_lock); + value = pm_ctx->hw_mode_change_in_progress; + qdf_mutex_release(&pm_ctx->qdf_conc_list_lock); + + return value; +} + +enum policy_mgr_hw_mode_change policy_mgr_get_hw_mode_change_from_hw_mode_index( + struct wlan_objmgr_psoc *psoc, uint32_t hw_mode_index) +{ + struct policy_mgr_psoc_priv_obj *pm_ctx; + uint32_t param = 0; + enum policy_mgr_hw_mode_change value + = POLICY_MGR_HW_MODE_NOT_IN_PROGRESS; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return value; + } + + policy_mgr_info("HW param: %x", param); + param = pm_ctx->hw_mode.hw_mode_list[hw_mode_index]; + if (POLICY_MGR_HW_MODE_DBS_MODE_GET(param)) { + policy_mgr_info("DBS is requested with HW (%d)", + hw_mode_index); + value = POLICY_MGR_DBS_IN_PROGRESS; + goto ret_value; + } + + if (POLICY_MGR_HW_MODE_SBS_MODE_GET(param)) { + policy_mgr_info("SBS is requested with HW (%d)", + hw_mode_index); + value = POLICY_MGR_SBS_IN_PROGRESS; + goto ret_value; + } + + value = POLICY_MGR_SMM_IN_PROGRESS; + policy_mgr_info("SMM is requested with HW (%d)", hw_mode_index); + +ret_value: + return value; +} + +#ifdef MPC_UT_FRAMEWORK +QDF_STATUS policy_mgr_update_connection_info_utfw( + struct wlan_objmgr_psoc *psoc, + uint32_t vdev_id, uint32_t tx_streams, uint32_t rx_streams, + uint32_t chain_mask, uint32_t type, uint32_t sub_type, + uint32_t channelid, uint32_t mac_id) +{ + QDF_STATUS status = QDF_STATUS_E_FAILURE; + uint32_t conn_index = 0, found = 0; + struct policy_mgr_psoc_priv_obj *pm_ctx; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return status; + } + + qdf_mutex_acquire(&pm_ctx->qdf_conc_list_lock); + while (PM_CONC_CONNECTION_LIST_VALID_INDEX(conn_index)) { + if (vdev_id == pm_conc_connection_list[conn_index].vdev_id) { + /* debug msg */ + found = 1; + break; + } + conn_index++; + } + qdf_mutex_release(&pm_ctx->qdf_conc_list_lock); + if (!found) { + /* err msg */ + policy_mgr_err("can't find vdev_id %d in pm_conc_connection_list", + vdev_id); + return status; + } + policy_mgr_debug("--> updating entry at index[%d]", conn_index); + + policy_mgr_update_conc_list(psoc, conn_index, + policy_mgr_get_mode(type, sub_type), + channelid, HW_MODE_20_MHZ, + mac_id, chain_mask, 0, vdev_id, true, true); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS policy_mgr_incr_connection_count_utfw(struct wlan_objmgr_psoc *psoc, + uint32_t vdev_id, uint32_t tx_streams, uint32_t rx_streams, + uint32_t chain_mask, uint32_t type, uint32_t sub_type, + uint32_t channelid, uint32_t mac_id) +{ + QDF_STATUS status = QDF_STATUS_E_FAILURE; + uint32_t conn_index = 0; + bool update_conn = true; + enum policy_mgr_con_mode mode; + + conn_index = policy_mgr_get_connection_count(psoc); + if (MAX_NUMBER_OF_CONC_CONNECTIONS <= conn_index) { + /* err msg */ + policy_mgr_err("exceeded max connection limit %d", + MAX_NUMBER_OF_CONC_CONNECTIONS); + return status; + } + policy_mgr_debug("--> filling entry at index[%d]", conn_index); + + mode = policy_mgr_get_mode(type, sub_type); + if (mode == PM_STA_MODE || mode == PM_P2P_CLIENT_MODE) + update_conn = false; + + policy_mgr_update_conc_list(psoc, conn_index, + mode, + channelid, HW_MODE_20_MHZ, + mac_id, chain_mask, 0, vdev_id, true, + update_conn); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS policy_mgr_decr_connection_count_utfw(struct wlan_objmgr_psoc *psoc, + uint32_t del_all, uint32_t vdev_id) +{ + QDF_STATUS status; + + if (del_all) { + status = policy_mgr_psoc_disable(psoc); + if (!QDF_IS_STATUS_SUCCESS(status)) { + policy_mgr_err("Policy manager initialization failed"); + return QDF_STATUS_E_FAILURE; + } + status = policy_mgr_psoc_enable(psoc); + if (!QDF_IS_STATUS_SUCCESS(status)) { + policy_mgr_err("Policy manager initialization failed"); + return QDF_STATUS_E_FAILURE; + } + } else { + policy_mgr_decr_connection_count(psoc, vdev_id); + } + + return QDF_STATUS_SUCCESS; +} + +enum policy_mgr_pcl_type policy_mgr_get_pcl_from_first_conn_table( + enum policy_mgr_con_mode type, + enum policy_mgr_conc_priority_mode sys_pref) +{ + if ((sys_pref >= PM_MAX_CONC_PRIORITY_MODE) || + (type >= PM_MAX_NUM_OF_MODE)) + return PM_MAX_PCL_TYPE; + return first_connection_pcl_table[type][sys_pref]; +} + +enum policy_mgr_pcl_type policy_mgr_get_pcl_from_second_conn_table( + enum policy_mgr_one_connection_mode idx, enum policy_mgr_con_mode type, + enum policy_mgr_conc_priority_mode sys_pref, uint8_t dbs_capable) +{ + if ((idx >= PM_MAX_ONE_CONNECTION_MODE) || + (sys_pref >= PM_MAX_CONC_PRIORITY_MODE) || + (type >= PM_MAX_NUM_OF_MODE)) + return PM_MAX_PCL_TYPE; + if (dbs_capable) + return (*second_connection_pcl_dbs_table)[idx][type][sys_pref]; + else + return second_connection_pcl_nodbs_table[idx][type][sys_pref]; +} + +enum policy_mgr_pcl_type policy_mgr_get_pcl_from_third_conn_table( + enum policy_mgr_two_connection_mode idx, enum policy_mgr_con_mode type, + enum policy_mgr_conc_priority_mode sys_pref, uint8_t dbs_capable) +{ + if ((idx >= PM_MAX_TWO_CONNECTION_MODE) || + (sys_pref >= PM_MAX_CONC_PRIORITY_MODE) || + (type >= PM_MAX_NUM_OF_MODE)) + return PM_MAX_PCL_TYPE; + if (dbs_capable) + return (*third_connection_pcl_dbs_table)[idx][type][sys_pref]; + else + return third_connection_pcl_nodbs_table[idx][type][sys_pref]; +} +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/policy_mgr/src/wlan_policy_mgr_core.c b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/policy_mgr/src/wlan_policy_mgr_core.c new file mode 100644 index 0000000000000000000000000000000000000000..a16b6d039e85fb26496fb3ae57ccef9e39b9537b --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/policy_mgr/src/wlan_policy_mgr_core.c @@ -0,0 +1,3011 @@ +/* + * Copyright (c) 2012-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_policy_mgr_core.c + * + * WLAN Concurrenct Connection Management functions + * + */ + +/* Include files */ + +#include "wlan_policy_mgr_i.h" +#include "qdf_types.h" +#include "qdf_trace.h" +#include "wlan_objmgr_global_obj.h" + +#define POLICY_MGR_MAX_CON_STRING_LEN 100 + +struct policy_mgr_conc_connection_info + pm_conc_connection_list[MAX_NUMBER_OF_CONC_CONNECTIONS]; + +struct policy_mgr_psoc_priv_obj *policy_mgr_get_context( + struct wlan_objmgr_psoc *psoc) +{ + struct policy_mgr_psoc_priv_obj *pm_ctx; + pm_ctx = (struct policy_mgr_psoc_priv_obj *) + wlan_objmgr_psoc_get_comp_private_obj(psoc, + WLAN_UMAC_COMP_POLICY_MGR); + return pm_ctx; +} + +/** + * policy_mgr_get_updated_scan_config() - Get the updated scan configuration + * @scan_config: Pointer containing the updated scan config + * @dbs_scan: 0 or 1 indicating if DBS scan needs to be enabled/disabled + * @dbs_plus_agile_scan: 0 or 1 indicating if DBS plus agile scan needs to be + * enabled/disabled + * @single_mac_scan_with_dfs: 0 or 1 indicating if single MAC scan with DFS + * needs to be enabled/disabled + * + * Takes the current scan configuration and set the necessary scan config + * bits to either 0/1 and provides the updated value to the caller who + * can use this to pass it on to the FW + * + * Return: 0 on success + */ +QDF_STATUS policy_mgr_get_updated_scan_config( + struct wlan_objmgr_psoc *psoc, + uint32_t *scan_config, + bool dbs_scan, + bool dbs_plus_agile_scan, + bool single_mac_scan_with_dfs) +{ + struct policy_mgr_psoc_priv_obj *pm_ctx; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return QDF_STATUS_E_FAILURE; + } + *scan_config = pm_ctx->dual_mac_cfg.cur_scan_config; + + WMI_DBS_CONC_SCAN_CFG_DBS_SCAN_SET(*scan_config, dbs_scan); + WMI_DBS_CONC_SCAN_CFG_AGILE_SCAN_SET(*scan_config, + dbs_plus_agile_scan); + WMI_DBS_CONC_SCAN_CFG_AGILE_DFS_SCAN_SET(*scan_config, + single_mac_scan_with_dfs); + + policy_mgr_debug("scan_config:%x ", *scan_config); + return QDF_STATUS_SUCCESS; +} + +/** + * policy_mgr_get_updated_fw_mode_config() - Get the updated fw + * mode configuration + * @fw_mode_config: Pointer containing the updated fw mode config + * @dbs: 0 or 1 indicating if DBS needs to be enabled/disabled + * @agile_dfs: 0 or 1 indicating if agile DFS needs to be enabled/disabled + * + * Takes the current fw mode configuration and set the necessary fw mode config + * bits to either 0/1 and provides the updated value to the caller who + * can use this to pass it on to the FW + * + * Return: 0 on success + */ +QDF_STATUS policy_mgr_get_updated_fw_mode_config( + struct wlan_objmgr_psoc *psoc, + uint32_t *fw_mode_config, + bool dbs, + bool agile_dfs) +{ + struct policy_mgr_psoc_priv_obj *pm_ctx; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return QDF_STATUS_E_FAILURE; + } + *fw_mode_config = pm_ctx->dual_mac_cfg.cur_fw_mode_config; + + WMI_DBS_FW_MODE_CFG_DBS_SET(*fw_mode_config, dbs); + WMI_DBS_FW_MODE_CFG_AGILE_DFS_SET(*fw_mode_config, agile_dfs); + + policy_mgr_debug("fw_mode_config:%x ", *fw_mode_config); + return QDF_STATUS_SUCCESS; +} + +/** + * policy_mgr_is_dual_mac_disabled_in_ini() - Check if dual mac + * is disabled in INI + * + * Checks if the dual mac feature is disabled in INI + * + * Return: true if the dual mac connection is disabled from INI + */ +bool policy_mgr_is_dual_mac_disabled_in_ini( + struct wlan_objmgr_psoc *psoc) +{ + bool is_disabled = false; + enum dbs_support dbs_type = wlan_objmgr_psoc_get_dual_mac_disable(psoc); + + /* + * If DBS support for connection is disabled through INI then assume + * that DBS is not supported, so that policy manager takes + * the decision considering non-dbs cases only. + * + * For DBS scan check the INI value explicitly + */ + switch (dbs_type) { + case DISABLE_DBS_CXN_AND_SCAN: + case DISABLE_DBS_CXN_AND_ENABLE_DBS_SCAN: + case DISABLE_DBS_CXN_AND_ENABLE_DBS_SCAN_WITH_ASYNC_SCAN_OFF: + is_disabled = true; + break; + default: + break; + } + + return is_disabled; +} + +uint32_t policy_mgr_mcc_to_scc_switch_mode_in_user_cfg( + struct wlan_objmgr_psoc *psoc) +{ + struct policy_mgr_psoc_priv_obj *pm_ctx; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return 0; + } + + return pm_ctx->user_cfg.mcc_to_scc_switch_mode; +} + +/** + * policy_mgr_get_dbs_config() - Get DBS bit + * + * Gets the DBS bit of fw_mode_config_bits + * + * Return: 0 or 1 to indicate the DBS bit + */ +bool policy_mgr_get_dbs_config(struct wlan_objmgr_psoc *psoc) +{ + struct policy_mgr_psoc_priv_obj *pm_ctx; + uint32_t fw_mode_config; + + if (policy_mgr_is_dual_mac_disabled_in_ini(psoc)) + return false; + + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + /* We take that it is disabled and proceed */ + return false; + } + fw_mode_config = pm_ctx->dual_mac_cfg.cur_fw_mode_config; + + return WMI_DBS_FW_MODE_CFG_DBS_GET(fw_mode_config); +} + +/** + * policy_mgr_get_agile_dfs_config() - Get Agile DFS bit + * + * Gets the Agile DFS bit of fw_mode_config_bits + * + * Return: 0 or 1 to indicate the Agile DFS bit + */ +bool policy_mgr_get_agile_dfs_config(struct wlan_objmgr_psoc *psoc) +{ + struct policy_mgr_psoc_priv_obj *pm_ctx; + uint32_t fw_mode_config; + + if (policy_mgr_is_dual_mac_disabled_in_ini(psoc)) + return false; + + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + /* We take that it is disabled and proceed */ + return false; + } + fw_mode_config = pm_ctx->dual_mac_cfg.cur_fw_mode_config; + + return WMI_DBS_FW_MODE_CFG_AGILE_DFS_GET(fw_mode_config); +} + +/** + * policy_mgr_get_dbs_scan_config() - Get DBS scan bit + * + * Gets the DBS scan bit of concurrent_scan_config_bits + * + * Return: 0 or 1 to indicate the DBS scan bit + */ +bool policy_mgr_get_dbs_scan_config(struct wlan_objmgr_psoc *psoc) +{ + uint32_t scan_config; + struct policy_mgr_psoc_priv_obj *pm_ctx; + + if (policy_mgr_is_dual_mac_disabled_in_ini(psoc)) + return false; + + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + /* We take that it is disabled and proceed */ + return false; + } + scan_config = pm_ctx->dual_mac_cfg.cur_scan_config; + + return WMI_DBS_CONC_SCAN_CFG_DBS_SCAN_GET(scan_config); +} + +/** + * policy_mgr_get_tx_rx_ss_from_config() - Get Tx/Rx spatial + * stream from HW mode config + * @mac_ss: Config which indicates the HW mode as per 'hw_mode_ss_config' + * @tx_ss: Contains the Tx spatial stream + * @rx_ss: Contains the Rx spatial stream + * + * Returns the number of spatial streams of Tx and Rx + * + * Return: None + */ +void policy_mgr_get_tx_rx_ss_from_config(enum hw_mode_ss_config mac_ss, + uint32_t *tx_ss, uint32_t *rx_ss) +{ + switch (mac_ss) { + case HW_MODE_SS_0x0: + *tx_ss = 0; + *rx_ss = 0; + break; + case HW_MODE_SS_1x1: + *tx_ss = 1; + *rx_ss = 1; + break; + case HW_MODE_SS_2x2: + *tx_ss = 2; + *rx_ss = 2; + break; + case HW_MODE_SS_3x3: + *tx_ss = 3; + *rx_ss = 3; + break; + case HW_MODE_SS_4x4: + *tx_ss = 4; + *rx_ss = 4; + break; + default: + *tx_ss = 0; + *rx_ss = 0; + } +} + +/** + * policy_mgr_get_matching_hw_mode_index() - Get matching HW mode index + * @psoc: psoc handle + * @mac0_tx_ss: Number of tx spatial streams of MAC0 + * @mac0_rx_ss: Number of rx spatial streams of MAC0 + * @mac0_bw: Bandwidth of MAC0 of type 'hw_mode_bandwidth' + * @mac1_tx_ss: Number of tx spatial streams of MAC1 + * @mac1_rx_ss: Number of rx spatial streams of MAC1 + * @mac1_bw: Bandwidth of MAC1 of type 'hw_mode_bandwidth' + * @dbs: DBS capability of type 'hw_mode_dbs_capab' + * @dfs: Agile DFS capability of type 'hw_mode_agile_dfs_capab' + * @sbs: SBS capability of type 'hw_mode_sbs_capab' + * + * Fetches the HW mode index corresponding to the HW mode provided + * + * Return: Positive hw mode index in case a match is found or a negative + * value, otherwise + */ +int8_t policy_mgr_get_matching_hw_mode_index( + struct wlan_objmgr_psoc *psoc, + uint32_t mac0_tx_ss, uint32_t mac0_rx_ss, + enum hw_mode_bandwidth mac0_bw, + uint32_t mac1_tx_ss, uint32_t mac1_rx_ss, + enum hw_mode_bandwidth mac1_bw, + enum hw_mode_dbs_capab dbs, + enum hw_mode_agile_dfs_capab dfs, + enum hw_mode_sbs_capab sbs) +{ + uint32_t i; + uint32_t t_mac0_tx_ss, t_mac0_rx_ss, t_mac0_bw; + uint32_t t_mac1_tx_ss, t_mac1_rx_ss, t_mac1_bw; + uint32_t dbs_mode, agile_dfs_mode, sbs_mode; + int8_t found = -EINVAL; + struct policy_mgr_psoc_priv_obj *pm_ctx; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return found; + } + + for (i = 0; i < pm_ctx->num_dbs_hw_modes; i++) { + t_mac0_tx_ss = POLICY_MGR_HW_MODE_MAC0_TX_STREAMS_GET( + pm_ctx->hw_mode.hw_mode_list[i]); + if (t_mac0_tx_ss < mac0_tx_ss) + continue; + + t_mac0_rx_ss = POLICY_MGR_HW_MODE_MAC0_RX_STREAMS_GET( + pm_ctx->hw_mode.hw_mode_list[i]); + if (t_mac0_rx_ss < mac0_rx_ss) + continue; + + t_mac0_bw = POLICY_MGR_HW_MODE_MAC0_BANDWIDTH_GET( + pm_ctx->hw_mode.hw_mode_list[i]); + /* + * Firmware advertises max bw capability as CBW 80+80 + * for single MAC. Thus CBW 20/40/80 should also be + * supported, if CBW 80+80 is supported. + */ + if (t_mac0_bw < mac0_bw) + continue; + + t_mac1_tx_ss = POLICY_MGR_HW_MODE_MAC1_TX_STREAMS_GET( + pm_ctx->hw_mode.hw_mode_list[i]); + if (t_mac1_tx_ss < mac1_tx_ss) + continue; + + t_mac1_rx_ss = POLICY_MGR_HW_MODE_MAC1_RX_STREAMS_GET( + pm_ctx->hw_mode.hw_mode_list[i]); + if (t_mac1_rx_ss < mac1_rx_ss) + continue; + + t_mac1_bw = POLICY_MGR_HW_MODE_MAC1_BANDWIDTH_GET( + pm_ctx->hw_mode.hw_mode_list[i]); + if (t_mac1_bw < mac1_bw) + continue; + + dbs_mode = POLICY_MGR_HW_MODE_DBS_MODE_GET( + pm_ctx->hw_mode.hw_mode_list[i]); + if (dbs_mode != dbs) + continue; + + agile_dfs_mode = POLICY_MGR_HW_MODE_AGILE_DFS_GET( + pm_ctx->hw_mode.hw_mode_list[i]); + if (agile_dfs_mode != dfs) + continue; + + sbs_mode = POLICY_MGR_HW_MODE_SBS_MODE_GET( + pm_ctx->hw_mode.hw_mode_list[i]); + if (sbs_mode != sbs) + continue; + + found = i; + policy_mgr_debug("hw_mode index %d found", i); + break; + } + return found; +} + +/** + * policy_mgr_get_hw_mode_from_dbs_hw_list() - Get hw_mode index + * @mac0_ss: MAC0 spatial stream configuration + * @mac0_bw: MAC0 bandwidth configuration + * @mac1_ss: MAC1 spatial stream configuration + * @mac1_bw: MAC1 bandwidth configuration + * @dbs: HW DBS capability + * @dfs: HW Agile DFS capability + * @sbs: HW SBS capability + * + * Get the HW mode index corresponding to the HW modes spatial stream, + * bandwidth, DBS, Agile DFS and SBS capability + * + * Return: Index number if a match is found or -negative value if not found + */ +int8_t policy_mgr_get_hw_mode_idx_from_dbs_hw_list( + struct wlan_objmgr_psoc *psoc, + enum hw_mode_ss_config mac0_ss, + enum hw_mode_bandwidth mac0_bw, + enum hw_mode_ss_config mac1_ss, + enum hw_mode_bandwidth mac1_bw, + enum hw_mode_dbs_capab dbs, + enum hw_mode_agile_dfs_capab dfs, + enum hw_mode_sbs_capab sbs) +{ + uint32_t mac0_tx_ss, mac0_rx_ss; + uint32_t mac1_tx_ss, mac1_rx_ss; + + policy_mgr_get_tx_rx_ss_from_config(mac0_ss, &mac0_tx_ss, &mac0_rx_ss); + policy_mgr_get_tx_rx_ss_from_config(mac1_ss, &mac1_tx_ss, &mac1_rx_ss); + + policy_mgr_debug("MAC0: TxSS=%d, RxSS=%d, BW=%d", + mac0_tx_ss, mac0_rx_ss, mac0_bw); + policy_mgr_debug("MAC1: TxSS=%d, RxSS=%d, BW=%d", + mac1_tx_ss, mac1_rx_ss, mac1_bw); + policy_mgr_debug("DBS=%d, Agile DFS=%d, SBS=%d", + dbs, dfs, sbs); + + return policy_mgr_get_matching_hw_mode_index(psoc, mac0_tx_ss, + mac0_rx_ss, + mac0_bw, + mac1_tx_ss, mac1_rx_ss, + mac1_bw, + dbs, dfs, sbs); +} + +/** + * policy_mgr_get_hw_mode_from_idx() - Get HW mode based on index + * @idx: HW mode index + * @hw_mode: HW mode params + * + * Fetches the HW mode parameters + * + * Return: Success if hw mode is obtained and the hw mode params + */ +QDF_STATUS policy_mgr_get_hw_mode_from_idx( + struct wlan_objmgr_psoc *psoc, + uint32_t idx, + struct policy_mgr_hw_mode_params *hw_mode) +{ + uint32_t param; + struct policy_mgr_psoc_priv_obj *pm_ctx; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return QDF_STATUS_E_FAILURE; + } + + if (idx > pm_ctx->num_dbs_hw_modes) { + policy_mgr_err("Invalid index"); + return QDF_STATUS_E_FAILURE; + } + + if (!pm_ctx->num_dbs_hw_modes) { + policy_mgr_err("No dbs hw modes available"); + return QDF_STATUS_E_FAILURE; + } + + param = pm_ctx->hw_mode.hw_mode_list[idx]; + + hw_mode->mac0_tx_ss = POLICY_MGR_HW_MODE_MAC0_TX_STREAMS_GET(param); + hw_mode->mac0_rx_ss = POLICY_MGR_HW_MODE_MAC0_RX_STREAMS_GET(param); + hw_mode->mac0_bw = POLICY_MGR_HW_MODE_MAC0_BANDWIDTH_GET(param); + hw_mode->mac1_tx_ss = POLICY_MGR_HW_MODE_MAC1_TX_STREAMS_GET(param); + hw_mode->mac1_rx_ss = POLICY_MGR_HW_MODE_MAC1_RX_STREAMS_GET(param); + hw_mode->mac1_bw = POLICY_MGR_HW_MODE_MAC1_BANDWIDTH_GET(param); + hw_mode->dbs_cap = POLICY_MGR_HW_MODE_DBS_MODE_GET(param); + hw_mode->agile_dfs_cap = POLICY_MGR_HW_MODE_AGILE_DFS_GET(param); + hw_mode->sbs_cap = POLICY_MGR_HW_MODE_SBS_MODE_GET(param); + + return QDF_STATUS_SUCCESS; +} + +/** + * policy_mgr_get_old_and_new_hw_index() - Get the old and new HW index + * @old_hw_mode_index: Value at this pointer contains the old HW mode index + * Default value when not configured is POLICY_MGR_DEFAULT_HW_MODE_INDEX + * @new_hw_mode_index: Value at this pointer contains the new HW mode index + * Default value when not configured is POLICY_MGR_DEFAULT_HW_MODE_INDEX + * + * Get the old and new HW index configured in the driver + * + * Return: Failure in case the HW mode indices cannot be fetched and Success + * otherwise. When no HW mode transition has happened the values of + * old_hw_mode_index and new_hw_mode_index will be the same. + */ +QDF_STATUS policy_mgr_get_old_and_new_hw_index( + struct wlan_objmgr_psoc *psoc, + uint32_t *old_hw_mode_index, + uint32_t *new_hw_mode_index) +{ + struct policy_mgr_psoc_priv_obj *pm_ctx; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return QDF_STATUS_E_INVAL; + } + + *old_hw_mode_index = pm_ctx->old_hw_mode_index; + *new_hw_mode_index = pm_ctx->new_hw_mode_index; + + return QDF_STATUS_SUCCESS; +} + +/** + * policy_mgr_update_conc_list() - Update the concurrent connection list + * @conn_index: Connection index + * @mode: Mode + * @chan: Channel + * @bw: Bandwidth + * @mac: Mac id + * @chain_mask: Chain mask + * @vdev_id: vdev id + * @in_use: Flag to indicate if the index is in use or not + * @update_conn: Flag to indicate if mode change event should + * be sent or not + * + * Updates the index value of the concurrent connection list + * + * Return: None + */ +void policy_mgr_update_conc_list(struct wlan_objmgr_psoc *psoc, + uint32_t conn_index, + enum policy_mgr_con_mode mode, + uint8_t chan, + enum hw_mode_bandwidth bw, + uint8_t mac, + enum policy_mgr_chain_mode chain_mask, + uint32_t original_nss, + uint32_t vdev_id, + bool in_use, + bool update_conn) +{ + struct policy_mgr_psoc_priv_obj *pm_ctx; + bool mcc_mode; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return; + } + + if (conn_index >= MAX_NUMBER_OF_CONC_CONNECTIONS) { + policy_mgr_err("Number of connections exceeded conn_index: %d", + conn_index); + return; + } + + qdf_mutex_acquire(&pm_ctx->qdf_conc_list_lock); + pm_conc_connection_list[conn_index].mode = mode; + pm_conc_connection_list[conn_index].chan = chan; + pm_conc_connection_list[conn_index].bw = bw; + pm_conc_connection_list[conn_index].mac = mac; + pm_conc_connection_list[conn_index].chain_mask = chain_mask; + pm_conc_connection_list[conn_index].original_nss = original_nss; + pm_conc_connection_list[conn_index].vdev_id = vdev_id; + pm_conc_connection_list[conn_index].in_use = in_use; + qdf_mutex_release(&pm_ctx->qdf_conc_list_lock); + + /* + * For STA and P2P client mode, the mode change event sent as part + * of the callback causes delay in processing M1 frame at supplicant + * resulting in cert test case failure. The mode change event is sent + * as part of add key for STA and P2P client mode. + */ + if (pm_ctx->mode_change_cb && update_conn) + pm_ctx->mode_change_cb(); + + policy_mgr_dump_connection_status_info(psoc); + if (pm_ctx->cdp_cbacks.cdp_update_mac_id) + pm_ctx->cdp_cbacks.cdp_update_mac_id(psoc, vdev_id, mac); + + /* IPA only cares about STA or SAP mode */ + if (mode == PM_STA_MODE || mode == PM_SAP_MODE) { + qdf_mutex_acquire(&pm_ctx->qdf_conc_list_lock); + mcc_mode = policy_mgr_current_concurrency_is_mcc(psoc); + qdf_mutex_release(&pm_ctx->qdf_conc_list_lock); + + if (pm_ctx->dp_cbacks.hdd_ipa_set_mcc_mode_cb) + pm_ctx->dp_cbacks.hdd_ipa_set_mcc_mode_cb(mcc_mode); + } +} + +/** + * policy_mgr_store_and_del_conn_info() - Store and del a connection info + * @mode: Mode whose entry has to be deleted + * @all_matching_cxn_to_del: All the specified mode entries should be deleted + * @info: Struture array pointer where the connection info will be saved + * @num_cxn_del: Number of connection which are going to be deleted + * + * Saves the connection info corresponding to the provided mode + * and deleted that corresponding entry based on vdev from the + * connection info structure + * + * Return: None + */ +void policy_mgr_store_and_del_conn_info(struct wlan_objmgr_psoc *psoc, + enum policy_mgr_con_mode mode, bool all_matching_cxn_to_del, + struct policy_mgr_conc_connection_info *info, uint8_t *num_cxn_del) +{ + int32_t conn_index = 0; + uint32_t found_index = 0; + struct policy_mgr_psoc_priv_obj *pm_ctx; + + if (!num_cxn_del) { + policy_mgr_err("num_cxn_del is NULL"); + return; + } + *num_cxn_del = 0; + if (!info) { + policy_mgr_err("Invalid connection info"); + return; + } + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return; + } + + qdf_mutex_acquire(&pm_ctx->qdf_conc_list_lock); + while (PM_CONC_CONNECTION_LIST_VALID_INDEX(conn_index)) { + if (mode == pm_conc_connection_list[conn_index].mode) { + /* + * Storing the connection entry which will be + * temporarily deleted. + */ + info[found_index] = pm_conc_connection_list[conn_index]; + /* Deleting the connection entry */ + policy_mgr_decr_connection_count(psoc, + info[found_index].vdev_id); + policy_mgr_debug("Stored %d (%d), deleted STA entry with vdev id %d, index %d", + info[found_index].vdev_id, + info[found_index].mode, + info[found_index].vdev_id, conn_index); + found_index++; + if (all_matching_cxn_to_del) + continue; + else + break; + } + conn_index++; + } + qdf_mutex_release(&pm_ctx->qdf_conc_list_lock); + + if (!found_index) { + *num_cxn_del = 0; + policy_mgr_err("Mode:%d not available in the conn info", mode); + } else { + *num_cxn_del = found_index; + policy_mgr_err("Mode:%d number of conn %d temp del", + mode, *num_cxn_del); + } + + /* + * Caller should set the PCL and restore the connection entry + * in conn info. + */ +} + +void policy_mgr_store_and_del_conn_info_by_vdev_id( + struct wlan_objmgr_psoc *psoc, + uint32_t vdev_id, + struct policy_mgr_conc_connection_info *info, + uint8_t *num_cxn_del) +{ + uint32_t conn_index = 0; + struct policy_mgr_psoc_priv_obj *pm_ctx; + + if (!info || !num_cxn_del) { + policy_mgr_err("Invalid parameters"); + return; + } + *num_cxn_del = 0; + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return; + } + qdf_mutex_acquire(&pm_ctx->qdf_conc_list_lock); + for (conn_index = 0; conn_index < MAX_NUMBER_OF_CONC_CONNECTIONS; + conn_index++) { + if ((pm_conc_connection_list[conn_index].vdev_id == vdev_id) && + pm_conc_connection_list[conn_index].in_use) { + *num_cxn_del = 1; + break; + } + } + /* + * Storing the connection entry which will be + * temporarily deleted. + */ + if (*num_cxn_del == 1) { + *info = pm_conc_connection_list[conn_index]; + /* Deleting the connection entry */ + policy_mgr_decr_connection_count( + psoc, + pm_conc_connection_list[conn_index].vdev_id); + } + qdf_mutex_release(&pm_ctx->qdf_conc_list_lock); +} + +/** + * policy_mgr_restore_deleted_conn_info() - Restore connection info + * @info: An array saving connection info that is to be restored + * @num_cxn_del: Number of connection temporary deleted + * + * Restores the connection info of STA that was saved before + * updating the PCL to the FW + * + * Return: None + */ +void policy_mgr_restore_deleted_conn_info(struct wlan_objmgr_psoc *psoc, + struct policy_mgr_conc_connection_info *info, + uint8_t num_cxn_del) +{ + uint32_t conn_index; + struct policy_mgr_psoc_priv_obj *pm_ctx; + + if (MAX_NUMBER_OF_CONC_CONNECTIONS <= num_cxn_del || 0 == num_cxn_del) { + policy_mgr_err("Failed to restore %d/%d deleted information", + num_cxn_del, MAX_NUMBER_OF_CONC_CONNECTIONS); + return; + } + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return; + } + + qdf_mutex_acquire(&pm_ctx->qdf_conc_list_lock); + conn_index = policy_mgr_get_connection_count(psoc); + if (MAX_NUMBER_OF_CONC_CONNECTIONS <= conn_index) { + qdf_mutex_release(&pm_ctx->qdf_conc_list_lock); + policy_mgr_err("Failed to restore the deleted information %d/%d", + conn_index, MAX_NUMBER_OF_CONC_CONNECTIONS); + return; + } + + qdf_mem_copy(&pm_conc_connection_list[conn_index], info, + num_cxn_del * sizeof(*info)); + qdf_mutex_release(&pm_ctx->qdf_conc_list_lock); + + policy_mgr_debug("Restored the deleleted conn info, vdev:%d, index:%d", + info->vdev_id, conn_index); +} + +/** + * policy_mgr_update_hw_mode_conn_info() - Update connection + * info based on HW mode + * @num_vdev_mac_entries: Number of vdev-mac id entries that follow + * @vdev_mac_map: Mapping of vdev-mac id + * @hw_mode: HW mode + * + * Updates the connection info parameters based on the new HW mode + * + * Return: None + */ +void policy_mgr_update_hw_mode_conn_info(struct wlan_objmgr_psoc *psoc, + uint32_t num_vdev_mac_entries, + struct policy_mgr_vdev_mac_map *vdev_mac_map, + struct policy_mgr_hw_mode_params hw_mode) +{ + uint32_t i, conn_index, found; + struct policy_mgr_psoc_priv_obj *pm_ctx; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return; + } + + qdf_mutex_acquire(&pm_ctx->qdf_conc_list_lock); + for (i = 0; i < num_vdev_mac_entries; i++) { + conn_index = 0; + found = 0; + while (PM_CONC_CONNECTION_LIST_VALID_INDEX(conn_index)) { + if (vdev_mac_map[i].vdev_id == + pm_conc_connection_list[conn_index].vdev_id) { + found = 1; + break; + } + conn_index++; + } + if (found) { + pm_conc_connection_list[conn_index].mac = + vdev_mac_map[i].mac_id; + policy_mgr_debug("vdev:%d, mac:%d", + pm_conc_connection_list[conn_index].vdev_id, + pm_conc_connection_list[conn_index].mac); + if (pm_ctx->cdp_cbacks.cdp_update_mac_id) + pm_ctx->cdp_cbacks.cdp_update_mac_id( + psoc, + vdev_mac_map[i].vdev_id, + vdev_mac_map[i].mac_id); + } + } + qdf_mutex_release(&pm_ctx->qdf_conc_list_lock); + policy_mgr_dump_connection_status_info(psoc); +} + +void policy_mgr_pdev_set_hw_mode_cb(uint32_t status, + uint32_t cfgd_hw_mode_index, + uint32_t num_vdev_mac_entries, + struct policy_mgr_vdev_mac_map *vdev_mac_map, + uint8_t next_action, + enum policy_mgr_conn_update_reason reason, + uint32_t session_id, void *context) +{ + QDF_STATUS ret; + struct policy_mgr_hw_mode_params hw_mode; + uint32_t i; + struct policy_mgr_psoc_priv_obj *pm_ctx; + + pm_ctx = policy_mgr_get_context(context); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + goto send_done_event; + } + + policy_mgr_set_hw_mode_change_in_progress(context, + POLICY_MGR_HW_MODE_NOT_IN_PROGRESS); + + if (status != SET_HW_MODE_STATUS_OK) { + policy_mgr_err("Set HW mode failed with status %d", status); + goto send_done_event; + } + + if (!vdev_mac_map) { + policy_mgr_err("vdev_mac_map is NULL"); + goto send_done_event; + } + + policy_mgr_debug("cfgd_hw_mode_index=%d", cfgd_hw_mode_index); + + for (i = 0; i < num_vdev_mac_entries; i++) + policy_mgr_debug("vdev_id:%d mac_id:%d", + vdev_mac_map[i].vdev_id, + vdev_mac_map[i].mac_id); + + ret = policy_mgr_get_hw_mode_from_idx(context, cfgd_hw_mode_index, + &hw_mode); + if (ret != QDF_STATUS_SUCCESS) { + policy_mgr_err("Get HW mode failed: %d", ret); + goto send_done_event; + } + + policy_mgr_debug("MAC0: TxSS:%d, RxSS:%d, Bw:%d", + hw_mode.mac0_tx_ss, hw_mode.mac0_rx_ss, hw_mode.mac0_bw); + policy_mgr_debug("MAC1: TxSS:%d, RxSS:%d, Bw:%d", + hw_mode.mac1_tx_ss, hw_mode.mac1_rx_ss, hw_mode.mac1_bw); + policy_mgr_debug("DBS:%d, Agile DFS:%d, SBS:%d", + hw_mode.dbs_cap, hw_mode.agile_dfs_cap, hw_mode.sbs_cap); + + /* update pm_conc_connection_list */ + policy_mgr_update_hw_mode_conn_info(context, num_vdev_mac_entries, + vdev_mac_map, + hw_mode); + if (pm_ctx->mode_change_cb) + pm_ctx->mode_change_cb(); + + ret = policy_mgr_set_connection_update(context); + if (!QDF_IS_STATUS_SUCCESS(ret)) + policy_mgr_err("ERROR: set connection_update_done event failed"); + + if (PM_NOP != next_action) + policy_mgr_next_actions(context, session_id, + next_action, reason); + else + policy_mgr_debug("No action needed right now"); + +send_done_event: + ret = policy_mgr_set_opportunistic_update(context); + if (!QDF_IS_STATUS_SUCCESS(ret)) + policy_mgr_err("ERROR: set opportunistic_update event failed"); +} + +/** + * policy_mgr_dump_current_concurrency_one_connection() - To dump the + * current concurrency info with one connection + * @cc_mode: connection string + * @length: Maximum size of the string + * + * This routine is called to dump the concurrency info + * + * Return: length of the string + */ +static uint32_t policy_mgr_dump_current_concurrency_one_connection( + char *cc_mode, uint32_t length) +{ + uint32_t count = 0; + enum policy_mgr_con_mode mode; + + mode = pm_conc_connection_list[0].mode; + + switch (mode) { + case PM_STA_MODE: + count = strlcat(cc_mode, "STA", + length); + break; + case PM_SAP_MODE: + count = strlcat(cc_mode, "SAP", + length); + break; + case PM_P2P_CLIENT_MODE: + count = strlcat(cc_mode, "P2P CLI", + length); + break; + case PM_P2P_GO_MODE: + count = strlcat(cc_mode, "P2P GO", + length); + break; + case PM_IBSS_MODE: + count = strlcat(cc_mode, "IBSS", + length); + break; + default: + policy_mgr_err("unexpected mode %d", mode); + break; + } + + return count; +} + +/** + * policy_mgr_dump_current_concurrency_two_connection() - To dump the + * current concurrency info with two connections + * @cc_mode: connection string + * @length: Maximum size of the string + * + * This routine is called to dump the concurrency info + * + * Return: length of the string + */ +static uint32_t policy_mgr_dump_current_concurrency_two_connection( + char *cc_mode, uint32_t length) +{ + uint32_t count = 0; + enum policy_mgr_con_mode mode; + + mode = pm_conc_connection_list[1].mode; + + switch (mode) { + case PM_STA_MODE: + count = policy_mgr_dump_current_concurrency_one_connection( + cc_mode, length); + count += strlcat(cc_mode, "+STA", + length); + break; + case PM_SAP_MODE: + count = policy_mgr_dump_current_concurrency_one_connection( + cc_mode, length); + count += strlcat(cc_mode, "+SAP", + length); + break; + case PM_P2P_CLIENT_MODE: + count = policy_mgr_dump_current_concurrency_one_connection( + cc_mode, length); + count += strlcat(cc_mode, "+P2P CLI", + length); + break; + case PM_P2P_GO_MODE: + count = policy_mgr_dump_current_concurrency_one_connection( + cc_mode, length); + count += strlcat(cc_mode, "+P2P GO", + length); + break; + case PM_IBSS_MODE: + count = policy_mgr_dump_current_concurrency_one_connection( + cc_mode, length); + count += strlcat(cc_mode, "+IBSS", + length); + break; + default: + policy_mgr_err("unexpected mode %d", mode); + break; + } + + return count; +} + +/** + * policy_mgr_dump_current_concurrency_three_connection() - To dump the + * current concurrency info with three connections + * @cc_mode: connection string + * @length: Maximum size of the string + * + * This routine is called to dump the concurrency info + * + * Return: length of the string + */ +static uint32_t policy_mgr_dump_current_concurrency_three_connection( + char *cc_mode, uint32_t length) +{ + uint32_t count = 0; + enum policy_mgr_con_mode mode; + + mode = pm_conc_connection_list[2].mode; + + switch (mode) { + case PM_STA_MODE: + count = policy_mgr_dump_current_concurrency_two_connection( + cc_mode, length); + count += strlcat(cc_mode, "+STA", + length); + break; + case PM_SAP_MODE: + count = policy_mgr_dump_current_concurrency_two_connection( + cc_mode, length); + count += strlcat(cc_mode, "+SAP", + length); + break; + case PM_P2P_CLIENT_MODE: + count = policy_mgr_dump_current_concurrency_two_connection( + cc_mode, length); + count += strlcat(cc_mode, "+P2P CLI", + length); + break; + case PM_P2P_GO_MODE: + count = policy_mgr_dump_current_concurrency_two_connection( + cc_mode, length); + count += strlcat(cc_mode, "+P2P GO", + length); + break; + case PM_IBSS_MODE: + count = policy_mgr_dump_current_concurrency_two_connection( + cc_mode, length); + count += strlcat(cc_mode, "+IBSS", + length); + break; + default: + policy_mgr_err("unexpected mode %d", mode); + break; + } + + return count; +} + +/** + * policy_mgr_dump_dbs_concurrency() - To dump the dbs concurrency + * combination + * @cc_mode: connection string + * + * This routine is called to dump the concurrency info + * + * Return: None + */ +static void policy_mgr_dump_dbs_concurrency(struct wlan_objmgr_psoc *psoc, + char *cc_mode, uint32_t length) +{ + char buf[4] = {0}; + uint8_t mac = 0; + struct policy_mgr_psoc_priv_obj *pm_ctx; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return; + } + + strlcat(cc_mode, " DBS", length); + qdf_mutex_acquire(&pm_ctx->qdf_conc_list_lock); + if (pm_conc_connection_list[0].mac == + pm_conc_connection_list[1].mac) { + if (pm_conc_connection_list[0].chan == + pm_conc_connection_list[1].chan) + strlcat(cc_mode, + " with SCC for 1st two connections on mac ", + length); + else + strlcat(cc_mode, + " with MCC for 1st two connections on mac ", + length); + mac = pm_conc_connection_list[0].mac; + } + if (pm_conc_connection_list[0].mac == pm_conc_connection_list[2].mac) { + if (pm_conc_connection_list[0].chan == + pm_conc_connection_list[2].chan) + strlcat(cc_mode, + " with SCC for 1st & 3rd connections on mac ", + length); + else + strlcat(cc_mode, + " with MCC for 1st & 3rd connections on mac ", + length); + mac = pm_conc_connection_list[0].mac; + } + if (pm_conc_connection_list[1].mac == pm_conc_connection_list[2].mac) { + if (pm_conc_connection_list[1].chan == + pm_conc_connection_list[2].chan) + strlcat(cc_mode, + " with SCC for 2nd & 3rd connections on mac ", + length); + else + strlcat(cc_mode, + " with MCC for 2nd & 3rd connections on mac ", + length); + mac = pm_conc_connection_list[1].mac; + } + qdf_mutex_release(&pm_ctx->qdf_conc_list_lock); + snprintf(buf, sizeof(buf), "%d ", mac); + strlcat(cc_mode, buf, length); +} + +/** + * policy_mgr_dump_current_concurrency() - To dump the current + * concurrency combination + * + * This routine is called to dump the concurrency info + * + * Return: None + */ +void policy_mgr_dump_current_concurrency(struct wlan_objmgr_psoc *psoc) +{ + uint32_t num_connections = 0; + char cc_mode[POLICY_MGR_MAX_CON_STRING_LEN] = {0}; + uint32_t count = 0; + struct policy_mgr_psoc_priv_obj *pm_ctx; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return; + } + + num_connections = policy_mgr_get_connection_count(psoc); + + switch (num_connections) { + case 1: + policy_mgr_dump_current_concurrency_one_connection(cc_mode, + sizeof(cc_mode)); + policy_mgr_err("%s Standalone", cc_mode); + break; + case 2: + count = policy_mgr_dump_current_concurrency_two_connection( + cc_mode, sizeof(cc_mode)); + qdf_mutex_acquire(&pm_ctx->qdf_conc_list_lock); + if (pm_conc_connection_list[0].chan == + pm_conc_connection_list[1].chan) { + strlcat(cc_mode, " SCC", sizeof(cc_mode)); + } else if (pm_conc_connection_list[0].mac == + pm_conc_connection_list[1].mac) { + strlcat(cc_mode, " MCC", sizeof(cc_mode)); + } else + strlcat(cc_mode, " DBS", sizeof(cc_mode)); + qdf_mutex_release(&pm_ctx->qdf_conc_list_lock); + policy_mgr_err("%s", cc_mode); + break; + case 3: + count = policy_mgr_dump_current_concurrency_three_connection( + cc_mode, sizeof(cc_mode)); + qdf_mutex_acquire(&pm_ctx->qdf_conc_list_lock); + if ((pm_conc_connection_list[0].chan == + pm_conc_connection_list[1].chan) && + (pm_conc_connection_list[0].chan == + pm_conc_connection_list[2].chan)){ + qdf_mutex_release(&pm_ctx->qdf_conc_list_lock); + strlcat(cc_mode, " SCC", + sizeof(cc_mode)); + } else if ((pm_conc_connection_list[0].mac == + pm_conc_connection_list[1].mac) + && (pm_conc_connection_list[0].mac == + pm_conc_connection_list[2].mac)) { + qdf_mutex_release(&pm_ctx->qdf_conc_list_lock); + strlcat(cc_mode, " MCC on single MAC", + sizeof(cc_mode)); + } else { + qdf_mutex_release(&pm_ctx->qdf_conc_list_lock); + policy_mgr_dump_dbs_concurrency(psoc, cc_mode, + sizeof(cc_mode)); + } + policy_mgr_err("%s", cc_mode); + break; + default: + policy_mgr_err("unexpected num_connections value %d", + num_connections); + break; + } + + return; +} + +QDF_STATUS policy_mgr_pdev_get_pcl(struct wlan_objmgr_psoc *psoc, + enum QDF_OPMODE mode, + struct policy_mgr_pcl_list *pcl) +{ + QDF_STATUS status; + enum policy_mgr_con_mode con_mode; + + pcl->pcl_len = 0; + + switch (mode) { + case QDF_STA_MODE: + con_mode = PM_STA_MODE; + break; + case QDF_P2P_CLIENT_MODE: + con_mode = PM_P2P_CLIENT_MODE; + break; + case QDF_P2P_GO_MODE: + con_mode = PM_P2P_GO_MODE; + break; + case QDF_SAP_MODE: + con_mode = PM_SAP_MODE; + break; + case QDF_IBSS_MODE: + con_mode = PM_IBSS_MODE; + break; + default: + policy_mgr_err("Unable to set PCL to FW: %d", mode); + return QDF_STATUS_E_FAILURE; + } + + policy_mgr_debug("get pcl to set it to the FW"); + + status = policy_mgr_get_pcl(psoc, con_mode, + pcl->pcl_list, &pcl->pcl_len, + pcl->weight_list, QDF_ARRAY_SIZE(pcl->weight_list)); + if (status != QDF_STATUS_SUCCESS) + policy_mgr_err("Unable to set PCL to FW, Get PCL failed"); + + return status; +} + +/** + * policy_mgr_set_pcl_for_existing_combo() - Set PCL for existing connection + * @mode: Connection mode of type 'policy_mgr_con_mode' + * + * Set the PCL for an existing connection + * + * Return: None + */ +void policy_mgr_set_pcl_for_existing_combo( + struct wlan_objmgr_psoc *psoc, enum policy_mgr_con_mode mode) +{ + QDF_STATUS status = QDF_STATUS_E_FAILURE; + struct policy_mgr_conc_connection_info + info[MAX_NUMBER_OF_CONC_CONNECTIONS] = { {0} }; + enum QDF_OPMODE pcl_mode; + uint8_t num_cxn_del = 0; + struct policy_mgr_psoc_priv_obj *pm_ctx; + struct policy_mgr_pcl_list pcl; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return; + } + + pcl_mode = policy_mgr_get_qdf_mode_from_pm(mode); + if (pcl_mode == QDF_MAX_NO_OF_MODE) + return; + qdf_mutex_acquire(&pm_ctx->qdf_conc_list_lock); + if (policy_mgr_mode_specific_connection_count(psoc, mode, NULL) > 0) { + /* Check, store and temp delete the mode's parameter */ + policy_mgr_store_and_del_conn_info(psoc, mode, false, + info, &num_cxn_del); + /* Set the PCL to the FW since connection got updated */ + status = policy_mgr_pdev_get_pcl(psoc, pcl_mode, &pcl); + policy_mgr_debug("Set PCL to FW for mode:%d", mode); + /* Restore the connection info */ + policy_mgr_restore_deleted_conn_info(psoc, info, num_cxn_del); + } + qdf_mutex_release(&pm_ctx->qdf_conc_list_lock); + + /* Send PCL only if policy_mgr_pdev_get_pcl returned success */ + if (QDF_IS_STATUS_SUCCESS(status)) { + status = pm_ctx->sme_cbacks.sme_pdev_set_pcl(&pcl); + if (QDF_IS_STATUS_ERROR(status)) + policy_mgr_err("Send set PCL to SME failed"); + } +} + +static uint32_t pm_get_vdev_id_of_first_conn_idx(struct wlan_objmgr_psoc *psoc) +{ + uint32_t conn_index = 0, vdev_id = 0; + struct policy_mgr_psoc_priv_obj *pm_ctx; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return conn_index; + } + qdf_mutex_acquire(&pm_ctx->qdf_conc_list_lock); + for (conn_index = 0; conn_index < MAX_NUMBER_OF_CONC_CONNECTIONS; + conn_index++) { + if (pm_conc_connection_list[conn_index].in_use) { + vdev_id = pm_conc_connection_list[conn_index].vdev_id; + break; + } + } + qdf_mutex_release(&pm_ctx->qdf_conc_list_lock); + if (conn_index == MAX_NUMBER_OF_CONC_CONNECTIONS) + policy_mgr_debug("Use default vdev_id:%d for opportunistic upgrade", + vdev_id); + else + policy_mgr_debug("Use vdev_id:%d for opportunistic upgrade", + vdev_id); + + return vdev_id; +} + +/** + * pm_dbs_opportunistic_timer_handler() - handler of + * dbs_opportunistic_timer + * @data: context + * + * handler for dbs_opportunistic_timer + * + * Return: None + */ +void pm_dbs_opportunistic_timer_handler(void *data) +{ + enum policy_mgr_conc_next_action action = PM_NOP; + uint32_t session_id; + struct wlan_objmgr_psoc *psoc = (struct wlan_objmgr_psoc *)data; + + if (!psoc) { + policy_mgr_err("Invalid Context"); + return; + } + + /* if we still need it */ + action = policy_mgr_need_opportunistic_upgrade(psoc); + policy_mgr_debug("action:%d", action); + if (!action) + return; + session_id = pm_get_vdev_id_of_first_conn_idx(psoc); + policy_mgr_next_actions(psoc, session_id, action, + POLICY_MGR_UPDATE_REASON_OPPORTUNISTIC); +} + +/** + * policy_mgr_get_connection_for_vdev_id() - provides the + * perticular connection with the requested vdev id + * @vdev_id: vdev id of the connection + * + * This function provides the specific connection with the + * requested vdev id + * + * Return: index in the connection table + */ +static uint32_t policy_mgr_get_connection_for_vdev_id( + struct wlan_objmgr_psoc *psoc, uint32_t vdev_id) +{ + uint32_t conn_index = 0; + struct policy_mgr_psoc_priv_obj *pm_ctx; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return conn_index; + } + qdf_mutex_acquire(&pm_ctx->qdf_conc_list_lock); + for (conn_index = 0; conn_index < MAX_NUMBER_OF_CONC_CONNECTIONS; + conn_index++) { + if ((pm_conc_connection_list[conn_index].vdev_id == vdev_id) && + pm_conc_connection_list[conn_index].in_use) { + break; + } + } + qdf_mutex_release(&pm_ctx->qdf_conc_list_lock); + + return conn_index; +} + +/** + * policy_mgr_get_mode() - Get mode from type and subtype + * @type: type + * @subtype: subtype + * + * Get the concurrency mode from the type and subtype + * of the interface + * + * Return: policy_mgr_con_mode + */ +enum policy_mgr_con_mode policy_mgr_get_mode(uint8_t type, + uint8_t subtype) +{ + enum policy_mgr_con_mode mode = PM_MAX_NUM_OF_MODE; + + if (type == WMI_VDEV_TYPE_AP) { + switch (subtype) { + case 0: + mode = PM_SAP_MODE; + break; + case WMI_UNIFIED_VDEV_SUBTYPE_P2P_GO: + mode = PM_P2P_GO_MODE; + break; + default: + policy_mgr_err("Unknown subtype %d for type %d", + subtype, type); + break; + } + } else if (type == WMI_VDEV_TYPE_STA) { + switch (subtype) { + case 0: + mode = PM_STA_MODE; + break; + case WMI_UNIFIED_VDEV_SUBTYPE_P2P_CLIENT: + mode = PM_P2P_CLIENT_MODE; + break; + default: + policy_mgr_err("Unknown subtype %d for type %d", + subtype, type); + break; + } + } else if (type == WMI_VDEV_TYPE_IBSS) { + mode = PM_IBSS_MODE; + } else { + policy_mgr_err("Unknown type %d", type); + } + + return mode; +} + +/** + * policy_mgr_get_bw() - Get channel bandwidth type used by WMI + * @chan_width: channel bandwidth type defined by host + * + * Get the channel bandwidth type used by WMI + * + * Return: hw_mode_bandwidth + */ +enum hw_mode_bandwidth policy_mgr_get_bw(enum phy_ch_width chan_width) +{ + enum hw_mode_bandwidth bw = HW_MODE_BW_NONE; + + switch (chan_width) { + case CH_WIDTH_20MHZ: + bw = HW_MODE_20_MHZ; + break; + case CH_WIDTH_40MHZ: + bw = HW_MODE_40_MHZ; + break; + case CH_WIDTH_80MHZ: + bw = HW_MODE_80_MHZ; + break; + case CH_WIDTH_160MHZ: + bw = HW_MODE_160_MHZ; + break; + case CH_WIDTH_80P80MHZ: + bw = HW_MODE_80_PLUS_80_MHZ; + break; + case CH_WIDTH_5MHZ: + bw = HW_MODE_5_MHZ; + break; + case CH_WIDTH_10MHZ: + bw = HW_MODE_10_MHZ; + break; + default: + policy_mgr_err("Unknown channel BW type %d", chan_width); + break; + } + + return bw; +} + +/** + * policy_mgr_get_sbs_channels() - provides the sbs channel(s) + * with respect to current connection(s) + * @channels: the channel(s) on which current connection(s) is + * @len: Number of channels + * @pcl_weight: Pointer to the weights of PCL + * @weight_len: Max length of the weight list + * @index: Index from which the weight list needs to be populated + * @group_id: Next available groups for weight assignment + * @available_5g_channels: List of available 5g channels + * @available_5g_channels_len: Length of the 5g channels list + * @add_5g_channels: If this flag is true append 5G channel list as well + * + * This function provides the channel(s) on which current + * connection(s) is/are + * + * Return: QDF_STATUS + */ + +static QDF_STATUS policy_mgr_get_sbs_channels(uint8_t *channels, + uint32_t *len, uint8_t *pcl_weight, uint32_t weight_len, + uint32_t *index, enum policy_mgr_pcl_group_id group_id, + uint8_t *available_5g_channels, + uint32_t available_5g_channels_len, + bool add_5g_channels) +{ + QDF_STATUS status = QDF_STATUS_SUCCESS; + uint32_t conn_index = 0, num_channels = 0; + uint32_t num_5g_channels = 0, cur_5g_channel = 0; + uint8_t remaining_5g_Channels[QDF_MAX_NUM_CHAN] = {}; + uint32_t remaining_channel_index = 0; + uint32_t j = 0, i = 0, weight1, weight2; + + if ((NULL == channels) || (NULL == len)) { + policy_mgr_err("channels or len is NULL"); + status = QDF_STATUS_E_FAILURE; + return status; + } + + if (group_id == POLICY_MGR_PCL_GROUP_ID1_ID2) { + weight1 = WEIGHT_OF_GROUP1_PCL_CHANNELS; + weight2 = WEIGHT_OF_GROUP2_PCL_CHANNELS; + } else if (group_id == POLICY_MGR_PCL_GROUP_ID2_ID3) { + weight1 = WEIGHT_OF_GROUP2_PCL_CHANNELS; + weight2 = WEIGHT_OF_GROUP3_PCL_CHANNELS; + } else { + weight1 = WEIGHT_OF_GROUP3_PCL_CHANNELS; + weight2 = WEIGHT_OF_GROUP4_PCL_CHANNELS; + } + + policy_mgr_debug("weight1=%d weight2=%d index=%d ", + weight1, weight2, *index); + + while (PM_CONC_CONNECTION_LIST_VALID_INDEX(conn_index)) { + if ((WLAN_REG_IS_5GHZ_CH( + pm_conc_connection_list[conn_index].chan)) + && (pm_conc_connection_list[conn_index].in_use)) { + num_5g_channels++; + cur_5g_channel = + pm_conc_connection_list[conn_index].chan; + } + conn_index++; + } + + conn_index = 0; + if (num_5g_channels > 1) { + /* This case we are already in SBS so return the channels */ + while (PM_CONC_CONNECTION_LIST_VALID_INDEX(conn_index)) { + channels[num_channels++] = + pm_conc_connection_list[conn_index++].chan; + if (*index < weight_len) + pcl_weight[(*index)++] = weight1; + } + *len = num_channels; + /* fix duplicate issue later */ + if (add_5g_channels) + for (j = 0; j < available_5g_channels_len; j++) + remaining_5g_Channels[ + remaining_channel_index++] = + available_5g_channels[j]; + } else { + /* Get list of valid sbs channels for the current + * connected channel + */ + for (j = 0; j < available_5g_channels_len; j++) { + if (WLAN_REG_IS_CHANNEL_VALID_5G_SBS( + cur_5g_channel, available_5g_channels[j])) { + channels[num_channels++] = + available_5g_channels[j]; + } else { + remaining_5g_Channels[ + remaining_channel_index++] = + available_5g_channels[j]; + continue; + } + if (*index < weight_len) + pcl_weight[(*index)++] = weight1; + } + *len = num_channels; + } + + if (add_5g_channels) { + qdf_mem_copy(channels+num_channels, remaining_5g_Channels, + remaining_channel_index); + *len += remaining_channel_index; + for (i = 0; ((i < remaining_channel_index) + && (i < weight_len)); i++) + pcl_weight[i] = weight2; + } + + return status; +} + + +/** + * policy_mgr_get_connection_channels() - provides the channel(s) + * on which current connection(s) is + * @channels: the channel(s) on which current connection(s) is + * @len: Number of channels + * @order: no order OR 2.4 Ghz channel followed by 5 Ghz + * channel OR 5 Ghz channel followed by 2.4 Ghz channel + * @skip_dfs_channel: if this flag is true then skip the dfs channel + * @pcl_weight: Pointer to the weights of PCL + * @weight_len: Max length of the weight list + * @index: Index from which the weight list needs to be populated + * @group_id: Next available groups for weight assignment + * + * + * This function provides the channel(s) on which current + * connection(s) is/are + * + * Return: QDF_STATUS + */ +static +QDF_STATUS policy_mgr_get_connection_channels(struct wlan_objmgr_psoc *psoc, + uint8_t *channels, + uint32_t *len, enum policy_mgr_pcl_channel_order order, + bool skip_dfs_channel, + uint8_t *pcl_weight, uint32_t weight_len, + uint32_t *index, enum policy_mgr_pcl_group_id group_id) +{ + QDF_STATUS status = QDF_STATUS_SUCCESS; + uint32_t conn_index = 0, num_channels = 0; + uint32_t weight1, weight2; + struct policy_mgr_psoc_priv_obj *pm_ctx; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return status; + } + + if ((NULL == channels) || (NULL == len)) { + policy_mgr_err("channels or len is NULL"); + status = QDF_STATUS_E_FAILURE; + return status; + } + + /* POLICY_MGR_PCL_GROUP_ID1_ID2 indicates that all three weights are + * available for assignment. i.e., WEIGHT_OF_GROUP1_PCL_CHANNELS, + * WEIGHT_OF_GROUP2_PCL_CHANNELS and WEIGHT_OF_GROUP3_PCL_CHANNELS + * are all available. Since in this function only two weights are + * assigned at max, only group1 and group2 weights are considered. + * + * The other possible group id POLICY_MGR_PCL_GROUP_ID2_ID3 indicates + * group1 was assigned the weight WEIGHT_OF_GROUP1_PCL_CHANNELS and + * only weights WEIGHT_OF_GROUP2_PCL_CHANNELS and + * WEIGHT_OF_GROUP3_PCL_CHANNELS are available for further weight + * assignments. + * + * e.g., when order is POLICY_MGR_PCL_ORDER_24G_THEN_5G and group id is + * POLICY_MGR_PCL_GROUP_ID2_ID3, WEIGHT_OF_GROUP2_PCL_CHANNELS is + * assigned to 2.4GHz channels and the weight + * WEIGHT_OF_GROUP3_PCL_CHANNELS is assigned to the 5GHz channels. + */ + if (group_id == POLICY_MGR_PCL_GROUP_ID1_ID2) { + weight1 = WEIGHT_OF_GROUP1_PCL_CHANNELS; + weight2 = WEIGHT_OF_GROUP2_PCL_CHANNELS; + } else { + weight1 = WEIGHT_OF_GROUP2_PCL_CHANNELS; + weight2 = WEIGHT_OF_GROUP3_PCL_CHANNELS; + } + + qdf_mutex_acquire(&pm_ctx->qdf_conc_list_lock); + if (POLICY_MGR_PCL_ORDER_NONE == order) { + while (PM_CONC_CONNECTION_LIST_VALID_INDEX(conn_index)) { + if (skip_dfs_channel && wlan_reg_is_dfs_ch(pm_ctx->pdev, + pm_conc_connection_list[conn_index].chan)) { + conn_index++; + } else if (*index < weight_len) { + channels[num_channels++] = + pm_conc_connection_list[conn_index++].chan; + pcl_weight[(*index)++] = weight1; + } else { + conn_index++; + } + } + *len = num_channels; + } else if (POLICY_MGR_PCL_ORDER_24G_THEN_5G == order) { + while (PM_CONC_CONNECTION_LIST_VALID_INDEX(conn_index)) { + if (WLAN_REG_IS_24GHZ_CH( + pm_conc_connection_list[conn_index].chan) + && (*index < weight_len)) { + channels[num_channels++] = + pm_conc_connection_list[conn_index++].chan; + pcl_weight[(*index)++] = weight1; + } else { + conn_index++; + } + } + conn_index = 0; + while (PM_CONC_CONNECTION_LIST_VALID_INDEX(conn_index)) { + if (skip_dfs_channel && wlan_reg_is_dfs_ch(pm_ctx->pdev, + pm_conc_connection_list[conn_index].chan)) { + conn_index++; + } else if (WLAN_REG_IS_5GHZ_CH( + pm_conc_connection_list[conn_index].chan) + && (*index < weight_len)) { + channels[num_channels++] = + pm_conc_connection_list[conn_index++].chan; + pcl_weight[(*index)++] = weight2; + } else { + conn_index++; + } + } + *len = num_channels; + } else if (POLICY_MGR_PCL_ORDER_5G_THEN_2G == order) { + while (PM_CONC_CONNECTION_LIST_VALID_INDEX(conn_index)) { + if (skip_dfs_channel && wlan_reg_is_dfs_ch(pm_ctx->pdev, + pm_conc_connection_list[conn_index].chan)) { + conn_index++; + } else if (WLAN_REG_IS_5GHZ_CH( + pm_conc_connection_list[conn_index].chan) + && (*index < weight_len)) { + channels[num_channels++] = + pm_conc_connection_list[conn_index++].chan; + pcl_weight[(*index)++] = weight1; + } else { + conn_index++; + } + } + conn_index = 0; + while (PM_CONC_CONNECTION_LIST_VALID_INDEX(conn_index)) { + if (WLAN_REG_IS_24GHZ_CH( + pm_conc_connection_list[conn_index].chan) + && (*index < weight_len)) { + channels[num_channels++] = + pm_conc_connection_list[conn_index++].chan; + pcl_weight[(*index)++] = weight2; + + } else { + conn_index++; + } + } + *len = num_channels; + } else { + policy_mgr_err("unknown order %d", order); + status = QDF_STATUS_E_FAILURE; + } + qdf_mutex_release(&pm_ctx->qdf_conc_list_lock); + + return status; +} + +/** + * policy_mgr_set_weight_of_dfs_passive_channels_to_zero() - set weight of dfs + * and passive channels to 0 + * @psoc: pointer to soc + * @pcl_channels: preferred channel list + * @len: length of preferred channel list + * @weight_list: preferred channel weight list + * @weight_len: length of weight list + * This function set the weight of dfs and passive channels to 0 + * + * Return: None + */ +void policy_mgr_set_weight_of_dfs_passive_channels_to_zero( + struct wlan_objmgr_psoc *psoc, uint8_t *pcl_channels, + uint32_t *len, uint8_t *weight_list, uint32_t weight_len) +{ + uint8_t i; + uint32_t orig_channel_count = 0; + bool sta_sap_scc_on_dfs_chan; + uint32_t sap_count; + enum channel_state channel_state; + struct policy_mgr_psoc_priv_obj *pm_ctx; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return; + } + + sta_sap_scc_on_dfs_chan = + policy_mgr_is_sta_sap_scc_allowed_on_dfs_chan(psoc); + sap_count = policy_mgr_mode_specific_connection_count(psoc, + PM_SAP_MODE, NULL); + policy_mgr_debug("sta_sap_scc_on_dfs_chan %u, sap_count %u", + sta_sap_scc_on_dfs_chan, sap_count); + + if (!sta_sap_scc_on_dfs_chan || !sap_count) + return; + + if (len) + orig_channel_count = QDF_MIN(*len, QDF_MAX_NUM_CHAN); + else { + policy_mgr_err("invalid number of channel length"); + return; + } + + policy_mgr_debug("Set weight of DFS/passive channels to 0"); + + for (i = 0; i < orig_channel_count; i++) { + channel_state = reg_get_channel_state(pm_ctx->pdev, + pcl_channels[i]); + if ((channel_state == CHANNEL_STATE_DISABLE) || + (channel_state == CHANNEL_STATE_INVALID)) + /* Set weight of inactive channels to 0 */ + weight_list[i] = 0; + + policy_mgr_debug("chan[%d] - %d, weight[%d] - %d", + i, pcl_channels[i], i, weight_list[i]); + } + + return; +} + +/** + * policy_mgr_get_channel_list() - provides the channel list + * suggestion for new connection + * @pcl: The preferred channel list enum + * @pcl_channels: PCL channels + * @len: length of the PCL + * @mode: concurrency mode for which channel list is requested + * @pcl_weights: Weights of the PCL + * @weight_len: Max length of the weight list + * + * This function provides the actual channel list based on the + * current regulatory domain derived using preferred channel + * list enum obtained from one of the pcl_table + * + * Return: Channel List + */ +QDF_STATUS policy_mgr_get_channel_list(struct wlan_objmgr_psoc *psoc, + enum policy_mgr_pcl_type pcl, + uint8_t *pcl_channels, uint32_t *len, + enum policy_mgr_con_mode mode, + uint8_t *pcl_weights, uint32_t weight_len) +{ + QDF_STATUS status = QDF_STATUS_E_FAILURE; + uint32_t num_channels = 0; + uint32_t sbs_num_channels = 0; + uint32_t chan_index = 0, chan_index_24 = 0, chan_index_5 = 0; + uint8_t channel_list[QDF_MAX_NUM_CHAN] = {0}; + uint8_t channel_list_24[QDF_MAX_NUM_CHAN] = {0}; + uint8_t channel_list_5[QDF_MAX_NUM_CHAN] = {0}; + uint8_t sbs_channel_list[QDF_MAX_NUM_CHAN] = {0}; + bool skip_dfs_channel = false; + bool is_etsi13_srd_chan_allowed_in_mas_mode = true; + uint32_t i = 0, j = 0; + struct policy_mgr_psoc_priv_obj *pm_ctx; + bool sta_sap_scc_on_dfs_chan; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return status; + } + + if ((NULL == pcl_channels) || (NULL == len)) { + policy_mgr_err("pcl_channels or len is NULL"); + return status; + } + + if (PM_MAX_PCL_TYPE == pcl) { + /* msg */ + policy_mgr_err("pcl is invalid"); + return status; + } + + if (PM_NONE == pcl) { + /* msg */ + policy_mgr_debug("pcl is 0"); + return QDF_STATUS_SUCCESS; + } + /* get the channel list for current domain */ + status = policy_mgr_get_valid_chans(psoc, channel_list, &num_channels); + if (QDF_IS_STATUS_ERROR(status)) { + policy_mgr_err("Error in getting valid channels"); + return status; + } + + /* + * if you have atleast one STA connection then don't fill DFS channels + * in the preferred channel list + */ + sta_sap_scc_on_dfs_chan = + policy_mgr_is_sta_sap_scc_allowed_on_dfs_chan(psoc); + policy_mgr_debug("sta_sap_scc_on_dfs_chan %u", sta_sap_scc_on_dfs_chan); + if ((mode == PM_SAP_MODE) || (mode == PM_P2P_GO_MODE)) { + if ((policy_mgr_mode_specific_connection_count(psoc, + PM_STA_MODE, + NULL) > 0) && + (!sta_sap_scc_on_dfs_chan)) { + policy_mgr_debug("skip DFS ch from pcl for SAP/Go"); + skip_dfs_channel = true; + } + is_etsi13_srd_chan_allowed_in_mas_mode = + wlan_reg_is_etsi13_srd_chan_allowed_master_mode(pm_ctx-> + pdev); + } + + /* Let's divide the list in 2.4 & 5 Ghz lists */ + while ((chan_index < QDF_MAX_NUM_CHAN) && + (channel_list[chan_index] <= 11) && + (chan_index_24 < QDF_MAX_NUM_CHAN)) + channel_list_24[chan_index_24++] = channel_list[chan_index++]; + if ((chan_index < QDF_MAX_NUM_CHAN) && + (channel_list[chan_index] == 12) && + (chan_index_24 < QDF_MAX_NUM_CHAN)) { + channel_list_24[chan_index_24++] = channel_list[chan_index++]; + if ((chan_index < QDF_MAX_NUM_CHAN) && + (channel_list[chan_index] == 13) && + (chan_index_24 < QDF_MAX_NUM_CHAN)) { + channel_list_24[chan_index_24++] = + channel_list[chan_index++]; + if ((chan_index < QDF_MAX_NUM_CHAN) && + (channel_list[chan_index] == 14) && + (chan_index_24 < QDF_MAX_NUM_CHAN)) + channel_list_24[chan_index_24++] = + channel_list[chan_index++]; + } + } + + while ((chan_index < num_channels) && + (chan_index < QDF_MAX_NUM_CHAN) && + (chan_index_5 < QDF_MAX_NUM_CHAN)) { + if ((true == skip_dfs_channel) && + wlan_reg_is_dfs_ch(pm_ctx->pdev, + channel_list[chan_index])) { + chan_index++; + continue; + } + if (!is_etsi13_srd_chan_allowed_in_mas_mode && + wlan_reg_is_etsi13_srd_chan(pm_ctx->pdev, + channel_list[chan_index])) { + chan_index++; + continue; + } + channel_list_5[chan_index_5++] = channel_list[chan_index++]; + } + + num_channels = 0; + sbs_num_channels = 0; + /* In the below switch case, the channel list is populated based on the + * pcl. e.g., if the pcl is PM_SCC_CH_24G, the SCC channel group is + * populated first followed by the 2.4GHz channel group. Along with + * this, the weights are also populated in the same order for each of + * these groups. There are three weight groups: + * WEIGHT_OF_GROUP1_PCL_CHANNELS, WEIGHT_OF_GROUP2_PCL_CHANNELS and + * WEIGHT_OF_GROUP3_PCL_CHANNELS. + * + * e.g., if pcl is PM_SCC_ON_5_SCC_ON_24_24G: scc on 5GHz (group1) + * channels take the weight WEIGHT_OF_GROUP1_PCL_CHANNELS, scc on 2.4GHz + * (group2) channels take the weight WEIGHT_OF_GROUP2_PCL_CHANNELS and + * 2.4GHz (group3) channels take the weight + * WEIGHT_OF_GROUP3_PCL_CHANNELS. + * + * When the weight to be assigned to the group is known along with the + * number of channels, the weights are directly assigned to the + * pcl_weights list. But, the channel list is populated using + * policy_mgr_get_connection_channels(), the order of weights to be used + * is passed as an argument to the function + * policy_mgr_get_connection_channels() using + * 'enum policy_mgr_pcl_group_id' which indicates the next available + * weights to be used and policy_mgr_get_connection_channels() will take + * care of the weight assignments. + * + * e.g., 'enum policy_mgr_pcl_group_id' value of + * POLICY_MGR_PCL_GROUP_ID2_ID3 indicates that the next available groups + * for weight assignment are WEIGHT_OF_GROUP2_PCL_CHANNELS and + * WEIGHT_OF_GROUP3_PCL_CHANNELS and that the + * weight WEIGHT_OF_GROUP1_PCL_CHANNELS was already allocated. + * So, in the same example, when order is + * POLICY_MGR_PCL_ORDER_24G_THEN_5G, + * policy_mgr_get_connection_channels() will assign the weight + * WEIGHT_OF_GROUP2_PCL_CHANNELS to 2.4GHz channels and assign the + * weight WEIGHT_OF_GROUP3_PCL_CHANNELS to 5GHz channels. + */ + switch (pcl) { + case PM_24G: + chan_index_24 = QDF_MIN(chan_index_24, weight_len); + qdf_mem_copy(pcl_channels, channel_list_24, + chan_index_24); + *len = chan_index_24; + for (i = 0; i < *len; i++) + pcl_weights[i] = WEIGHT_OF_GROUP1_PCL_CHANNELS; + status = QDF_STATUS_SUCCESS; + break; + case PM_5G: + chan_index_5 = QDF_MIN(chan_index_5, weight_len); + qdf_mem_copy(pcl_channels, channel_list_5, + chan_index_5); + *len = chan_index_5; + for (i = 0; i < *len; i++) + pcl_weights[i] = WEIGHT_OF_GROUP1_PCL_CHANNELS; + status = QDF_STATUS_SUCCESS; + break; + case PM_SCC_CH: + case PM_MCC_CH: + policy_mgr_get_connection_channels(psoc, + channel_list, &num_channels, POLICY_MGR_PCL_ORDER_NONE, + skip_dfs_channel, pcl_weights, weight_len, &i, + POLICY_MGR_PCL_GROUP_ID1_ID2); + qdf_mem_copy(pcl_channels, channel_list, num_channels); + *len = num_channels; + status = QDF_STATUS_SUCCESS; + break; + case PM_SCC_CH_24G: + case PM_MCC_CH_24G: + policy_mgr_get_connection_channels(psoc, + channel_list, &num_channels, POLICY_MGR_PCL_ORDER_NONE, + skip_dfs_channel, pcl_weights, weight_len, &i, + POLICY_MGR_PCL_GROUP_ID1_ID2); + qdf_mem_copy(pcl_channels, channel_list, num_channels); + *len = num_channels; + chan_index_24 = QDF_MIN((num_channels + chan_index_24), + weight_len) - num_channels; + qdf_mem_copy(&pcl_channels[num_channels], + channel_list_24, chan_index_24); + *len += chan_index_24; + for (j = 0; j < chan_index_24; i++, j++) + pcl_weights[i] = WEIGHT_OF_GROUP2_PCL_CHANNELS; + + status = QDF_STATUS_SUCCESS; + break; + case PM_SCC_CH_5G: + case PM_MCC_CH_5G: + policy_mgr_get_connection_channels(psoc, + channel_list, &num_channels, POLICY_MGR_PCL_ORDER_NONE, + skip_dfs_channel, pcl_weights, weight_len, &i, + POLICY_MGR_PCL_GROUP_ID1_ID2); + qdf_mem_copy(pcl_channels, channel_list, + num_channels); + *len = num_channels; + chan_index_5 = QDF_MIN((num_channels + chan_index_5), + weight_len) - num_channels; + qdf_mem_copy(&pcl_channels[num_channels], + channel_list_5, chan_index_5); + *len += chan_index_5; + for (j = 0; j < chan_index_5; i++, j++) + pcl_weights[i] = WEIGHT_OF_GROUP2_PCL_CHANNELS; + status = QDF_STATUS_SUCCESS; + break; + case PM_24G_SCC_CH: + case PM_24G_MCC_CH: + chan_index_24 = QDF_MIN(chan_index_24, weight_len); + qdf_mem_copy(pcl_channels, channel_list_24, + chan_index_24); + *len = chan_index_24; + for (i = 0; i < chan_index_24; i++) + pcl_weights[i] = WEIGHT_OF_GROUP1_PCL_CHANNELS; + policy_mgr_get_connection_channels(psoc, + channel_list, &num_channels, POLICY_MGR_PCL_ORDER_NONE, + skip_dfs_channel, pcl_weights, weight_len, &i, + POLICY_MGR_PCL_GROUP_ID2_ID3); + qdf_mem_copy(&pcl_channels[chan_index_24], + channel_list, num_channels); + *len += num_channels; + status = QDF_STATUS_SUCCESS; + break; + case PM_5G_SCC_CH: + case PM_5G_MCC_CH: + chan_index_5 = QDF_MIN(chan_index_5, weight_len); + qdf_mem_copy(pcl_channels, channel_list_5, + chan_index_5); + *len = chan_index_5; + for (i = 0; i < chan_index_5; i++) + pcl_weights[i] = WEIGHT_OF_GROUP1_PCL_CHANNELS; + policy_mgr_get_connection_channels(psoc, + channel_list, &num_channels, POLICY_MGR_PCL_ORDER_NONE, + skip_dfs_channel, pcl_weights, weight_len, &i, + POLICY_MGR_PCL_GROUP_ID2_ID3); + qdf_mem_copy(&pcl_channels[chan_index_5], + channel_list, num_channels); + *len += num_channels; + status = QDF_STATUS_SUCCESS; + break; + case PM_SCC_ON_24_SCC_ON_5: + policy_mgr_get_connection_channels(psoc, + channel_list, &num_channels, + POLICY_MGR_PCL_ORDER_24G_THEN_5G, + skip_dfs_channel, pcl_weights, weight_len, &i, + POLICY_MGR_PCL_GROUP_ID1_ID2); + qdf_mem_copy(pcl_channels, channel_list, + num_channels); + *len = num_channels; + status = QDF_STATUS_SUCCESS; + break; + case PM_SCC_ON_5_SCC_ON_24: + policy_mgr_get_connection_channels(psoc, + channel_list, &num_channels, + POLICY_MGR_PCL_ORDER_5G_THEN_2G, + skip_dfs_channel, pcl_weights, weight_len, &i, + POLICY_MGR_PCL_GROUP_ID1_ID2); + qdf_mem_copy(pcl_channels, channel_list, num_channels); + *len = num_channels; + status = QDF_STATUS_SUCCESS; + break; + case PM_SCC_ON_24_SCC_ON_5_24G: + policy_mgr_get_connection_channels(psoc, + channel_list, &num_channels, + POLICY_MGR_PCL_ORDER_24G_THEN_5G, + skip_dfs_channel, pcl_weights, weight_len, &i, + POLICY_MGR_PCL_GROUP_ID1_ID2); + qdf_mem_copy(pcl_channels, channel_list, num_channels); + *len = num_channels; + chan_index_24 = QDF_MIN((num_channels + chan_index_24), + weight_len) - num_channels; + qdf_mem_copy(&pcl_channels[num_channels], + channel_list_24, chan_index_24); + *len += chan_index_24; + for (j = 0; j < chan_index_24; i++, j++) + pcl_weights[i] = WEIGHT_OF_GROUP3_PCL_CHANNELS; + status = QDF_STATUS_SUCCESS; + break; + case PM_SCC_ON_24_SCC_ON_5_5G: + policy_mgr_get_connection_channels(psoc, + channel_list, &num_channels, + POLICY_MGR_PCL_ORDER_24G_THEN_5G, + skip_dfs_channel, pcl_weights, weight_len, &i, + POLICY_MGR_PCL_GROUP_ID1_ID2); + qdf_mem_copy(pcl_channels, channel_list, num_channels); + *len = num_channels; + chan_index_5 = QDF_MIN((num_channels + chan_index_5), + weight_len) - num_channels; + qdf_mem_copy(&pcl_channels[num_channels], + channel_list_5, chan_index_5); + *len += chan_index_5; + for (j = 0; j < chan_index_5; i++, j++) + pcl_weights[i] = WEIGHT_OF_GROUP3_PCL_CHANNELS; + status = QDF_STATUS_SUCCESS; + break; + case PM_SCC_ON_5_SCC_ON_24_24G: + policy_mgr_get_connection_channels(psoc, + channel_list, &num_channels, + POLICY_MGR_PCL_ORDER_5G_THEN_2G, + skip_dfs_channel, pcl_weights, weight_len, &i, + POLICY_MGR_PCL_GROUP_ID1_ID2); + qdf_mem_copy(pcl_channels, channel_list, num_channels); + *len = num_channels; + chan_index_24 = QDF_MIN((num_channels + chan_index_24), + weight_len) - num_channels; + qdf_mem_copy(&pcl_channels[num_channels], + channel_list_24, chan_index_24); + *len += chan_index_24; + for (j = 0; j < chan_index_24; i++, j++) + pcl_weights[i] = WEIGHT_OF_GROUP3_PCL_CHANNELS; + status = QDF_STATUS_SUCCESS; + break; + case PM_SCC_ON_5_SCC_ON_24_5G: + policy_mgr_get_connection_channels(psoc, + channel_list, &num_channels, + POLICY_MGR_PCL_ORDER_5G_THEN_2G, + skip_dfs_channel, pcl_weights, weight_len, &i, + POLICY_MGR_PCL_GROUP_ID1_ID2); + qdf_mem_copy(pcl_channels, channel_list, num_channels); + *len = num_channels; + chan_index_5 = QDF_MIN((num_channels + chan_index_5), + weight_len) - num_channels; + qdf_mem_copy(&pcl_channels[num_channels], + channel_list_5, chan_index_5); + *len += chan_index_5; + for (j = 0; j < chan_index_5; i++, j++) + pcl_weights[i] = WEIGHT_OF_GROUP3_PCL_CHANNELS; + status = QDF_STATUS_SUCCESS; + break; + case PM_24G_SCC_CH_SBS_CH: + qdf_mem_copy(pcl_channels, channel_list_24, + chan_index_24); + *len = chan_index_24; + for (i = 0; ((i < chan_index_24) && (i < weight_len)); i++) + pcl_weights[i] = WEIGHT_OF_GROUP1_PCL_CHANNELS; + policy_mgr_get_connection_channels(psoc, + channel_list, &num_channels, POLICY_MGR_PCL_ORDER_NONE, + skip_dfs_channel, pcl_weights, weight_len, &i, + POLICY_MGR_PCL_GROUP_ID2_ID3); + qdf_mem_copy(&pcl_channels[chan_index_24], + channel_list, num_channels); + *len += num_channels; + if (policy_mgr_is_hw_sbs_capable(psoc)) { + policy_mgr_get_sbs_channels( + sbs_channel_list, &sbs_num_channels, pcl_weights, + weight_len, &i, POLICY_MGR_PCL_GROUP_ID3_ID4, + channel_list_5, chan_index_5, false); + qdf_mem_copy( + &pcl_channels[chan_index_24 + num_channels], + sbs_channel_list, sbs_num_channels); + *len += sbs_num_channels; + } + status = QDF_STATUS_SUCCESS; + break; + case PM_24G_SCC_CH_SBS_CH_5G: + qdf_mem_copy(pcl_channels, channel_list_24, + chan_index_24); + *len = chan_index_24; + for (i = 0; ((i < chan_index_24) && (i < weight_len)); i++) + pcl_weights[i] = WEIGHT_OF_GROUP1_PCL_CHANNELS; + policy_mgr_get_connection_channels(psoc, + channel_list, &num_channels, POLICY_MGR_PCL_ORDER_NONE, + skip_dfs_channel, pcl_weights, weight_len, &i, + POLICY_MGR_PCL_GROUP_ID2_ID3); + qdf_mem_copy(&pcl_channels[chan_index_24], + channel_list, num_channels); + *len += num_channels; + if (policy_mgr_is_hw_sbs_capable(psoc)) { + policy_mgr_get_sbs_channels( + sbs_channel_list, &sbs_num_channels, pcl_weights, + weight_len, &i, POLICY_MGR_PCL_GROUP_ID3_ID4, + channel_list_5, chan_index_5, true); + qdf_mem_copy( + &pcl_channels[chan_index_24 + num_channels], + sbs_channel_list, sbs_num_channels); + *len += sbs_num_channels; + } else { + qdf_mem_copy( + &pcl_channels[chan_index_24 + num_channels], + channel_list_5, chan_index_5); + *len += chan_index_5; + for (i = chan_index_24 + num_channels; + ((i < *len) && (i < weight_len)); i++) + pcl_weights[i] = WEIGHT_OF_GROUP3_PCL_CHANNELS; + } + status = QDF_STATUS_SUCCESS; + break; + case PM_24G_SBS_CH_MCC_CH: + qdf_mem_copy(pcl_channels, channel_list_24, + chan_index_24); + *len = chan_index_24; + for (i = 0; ((i < chan_index_24) && (i < weight_len)); i++) + pcl_weights[i] = WEIGHT_OF_GROUP1_PCL_CHANNELS; + if (policy_mgr_is_hw_sbs_capable(psoc)) { + policy_mgr_get_sbs_channels( + sbs_channel_list, &sbs_num_channels, pcl_weights, + weight_len, &i, POLICY_MGR_PCL_GROUP_ID2_ID3, + channel_list_5, chan_index_5, false); + qdf_mem_copy(&pcl_channels[num_channels], + sbs_channel_list, sbs_num_channels); + *len += sbs_num_channels; + } + policy_mgr_get_connection_channels(psoc, + channel_list, &num_channels, POLICY_MGR_PCL_ORDER_NONE, + skip_dfs_channel, pcl_weights, weight_len, &i, + POLICY_MGR_PCL_GROUP_ID2_ID3); + qdf_mem_copy(&pcl_channels[chan_index_24], + channel_list, num_channels); + *len += num_channels; + status = QDF_STATUS_SUCCESS; + break; + case PM_SBS_CH_5G: + if (policy_mgr_is_hw_sbs_capable(psoc)) { + policy_mgr_get_sbs_channels( + sbs_channel_list, &sbs_num_channels, pcl_weights, + weight_len, &i, POLICY_MGR_PCL_GROUP_ID1_ID2, + channel_list_5, chan_index_5, true); + qdf_mem_copy(&pcl_channels[num_channels], + sbs_channel_list, sbs_num_channels); + *len += sbs_num_channels; + } else { + qdf_mem_copy(pcl_channels, channel_list_5, + chan_index_5); + *len = chan_index_5; + for (i = 0; ((i < *len) && (i < weight_len)); i++) + pcl_weights[i] = WEIGHT_OF_GROUP1_PCL_CHANNELS; + } + status = QDF_STATUS_SUCCESS; + break; + default: + policy_mgr_err("unknown pcl value %d", pcl); + break; + } + + if ((*len != 0) && (*len != i)) + policy_mgr_debug("pcl len (%d) and weight list len mismatch (%d)", + *len, i); + + /* check the channel avoidance list for beaconing entities */ + if ((mode == PM_SAP_MODE) || (mode == PM_P2P_GO_MODE)) + policy_mgr_update_with_safe_channel_list(psoc, pcl_channels, + len, pcl_weights, + weight_len); + + policy_mgr_set_weight_of_dfs_passive_channels_to_zero(psoc, + pcl_channels, len, pcl_weights, weight_len); + return status; +} + +/** + * policy_mgr_disallow_mcc() - Check for mcc + * + * @channel: channel on which new connection is coming up + * + * When a new connection is about to come up check if current + * concurrency combination including the new connection is + * causing MCC + * + * Return: True/False + */ +bool policy_mgr_disallow_mcc(struct wlan_objmgr_psoc *psoc, + uint8_t channel) +{ + uint32_t index = 0; + bool match = false; + struct policy_mgr_psoc_priv_obj *pm_ctx; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return match; + } + qdf_mutex_acquire(&pm_ctx->qdf_conc_list_lock); + while (PM_CONC_CONNECTION_LIST_VALID_INDEX(index)) { + if (policy_mgr_is_hw_dbs_capable(psoc) == false) { + if (pm_conc_connection_list[index].chan != + channel) { + match = true; + break; + } + } else if (WLAN_REG_IS_5GHZ_CH + (pm_conc_connection_list[index].chan)) { + if (pm_conc_connection_list[index].chan != channel) { + match = true; + break; + } + } + index++; + } + qdf_mutex_release(&pm_ctx->qdf_conc_list_lock); + + return match; +} + +/** + * policy_mgr_allow_new_home_channel() - Check for allowed number of + * home channels + * @channel: channel on which new connection is coming up + * @num_connections: number of current connections + * + * When a new connection is about to come up check if current + * concurrency combination including the new connection is + * allowed or not based on the HW capability + * + * Return: True/False + */ +bool policy_mgr_allow_new_home_channel(struct wlan_objmgr_psoc *psoc, + uint8_t channel, uint32_t num_connections) +{ + bool status = true; + struct policy_mgr_psoc_priv_obj *pm_ctx; + uint32_t mcc_to_scc_switch; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return false; + } + mcc_to_scc_switch = + policy_mgr_mcc_to_scc_switch_mode_in_user_cfg(psoc); + + qdf_mutex_acquire(&pm_ctx->qdf_conc_list_lock); + if (num_connections == 2) { + /* No SCC or MCC combination is allowed with / on DFS channel */ + if ((mcc_to_scc_switch == + QDF_MCC_TO_SCC_SWITCH_FORCE_PREFERRED_WITHOUT_DISCONNECTION) + && wlan_reg_is_dfs_ch(pm_ctx->pdev, channel) && + (wlan_reg_is_dfs_ch(pm_ctx->pdev, + pm_conc_connection_list[0].chan) || + wlan_reg_is_dfs_ch(pm_ctx->pdev, + pm_conc_connection_list[1].chan))) { + + policy_mgr_err("Existing DFS connection, new 3-port DFS connection is not allowed"); + status = false; + + } else if (((pm_conc_connection_list[0].chan != + pm_conc_connection_list[1].chan) + || (mcc_to_scc_switch == + QDF_MCC_TO_SCC_SWITCH_FORCE_PREFERRED_WITHOUT_DISCONNECTION) + ) && (pm_conc_connection_list[0].mac == + pm_conc_connection_list[1].mac)) { + if (policy_mgr_is_hw_dbs_capable(psoc) == false) { + if ((channel != + pm_conc_connection_list[0].chan) && + (channel != + pm_conc_connection_list[1].chan)) { + policy_mgr_err("don't allow 3rd home channel on same MAC"); + status = false; + } + } else if (((WLAN_REG_IS_24GHZ_CH(channel)) && + (WLAN_REG_IS_24GHZ_CH + (pm_conc_connection_list[0].chan)) && + (WLAN_REG_IS_24GHZ_CH + (pm_conc_connection_list[1].chan))) || + ((WLAN_REG_IS_5GHZ_CH(channel)) && + (WLAN_REG_IS_5GHZ_CH + (pm_conc_connection_list[0].chan)) && + (WLAN_REG_IS_5GHZ_CH + (pm_conc_connection_list[1].chan)))) { + policy_mgr_err("don't allow 3rd home channel on same MAC"); + status = false; + } + } + } else if ((num_connections == 1) + && (mcc_to_scc_switch == + QDF_MCC_TO_SCC_SWITCH_FORCE_PREFERRED_WITHOUT_DISCONNECTION) + && wlan_reg_is_dfs_ch(pm_ctx->pdev, channel) + && wlan_reg_is_dfs_ch(pm_ctx->pdev, + pm_conc_connection_list[0].chan)) { + + policy_mgr_err("Existing DFS connection, new 2-port DFS connection is not allowed"); + status = false; + } + qdf_mutex_release(&pm_ctx->qdf_conc_list_lock); + + return status; +} + +/** + * policy_mgr_is_5g_channel_allowed() - check if 5g channel is allowed + * @channel: channel number which needs to be validated + * @list: list of existing connections. + * @mode: mode against which channel needs to be validated + * + * This API takes the channel as input and compares with existing + * connection channels. If existing connection's channel is DFS channel + * and provided channel is 5G channel then don't allow concurrency to + * happen as MCC with DFS channel is not yet supported + * + * Return: true if 5G channel is allowed, false if not allowed + * + */ +bool policy_mgr_is_5g_channel_allowed(struct wlan_objmgr_psoc *psoc, + uint8_t channel, uint32_t *list, + enum policy_mgr_con_mode mode) +{ + uint32_t index = 0, count = 0; + struct policy_mgr_psoc_priv_obj *pm_ctx; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return false; + } + + count = policy_mgr_mode_specific_connection_count(psoc, mode, list); + qdf_mutex_acquire(&pm_ctx->qdf_conc_list_lock); + while (index < count) { + if (wlan_reg_is_dfs_ch( + pm_ctx->pdev, + pm_conc_connection_list[list[index]].chan) && + WLAN_REG_IS_5GHZ_CH(channel) && + (channel != pm_conc_connection_list[list[index]].chan)) { + qdf_mutex_release(&pm_ctx->qdf_conc_list_lock); + policy_mgr_err("don't allow MCC if SAP/GO on DFS channel"); + return false; + } + index++; + } + qdf_mutex_release(&pm_ctx->qdf_conc_list_lock); + + return true; +} + +/** + * policy_mgr_nss_update_cb() - callback from SME confirming nss + * update + * @hdd_ctx: HDD Context + * @tx_status: tx completion status for updated beacon with new + * nss value + * @vdev_id: vdev id for the specific connection + * @next_action: next action to happen at policy mgr after + * beacon update + * @reason: Reason for nss update + * @original_vdev_id: original request hwmode change vdev id + * + * This function is the callback registered with SME at nss + * update request time + * + * Return: None + */ +static void policy_mgr_nss_update_cb(struct wlan_objmgr_psoc *psoc, + uint8_t tx_status, + uint8_t vdev_id, + uint8_t next_action, + enum policy_mgr_conn_update_reason reason, + uint32_t original_vdev_id) +{ + uint32_t conn_index = 0; + QDF_STATUS ret; + + if (QDF_STATUS_SUCCESS != tx_status) + policy_mgr_err("nss update failed(%d) for vdev %d", + tx_status, vdev_id); + + /* + * Check if we are ok to request for HW mode change now + */ + conn_index = policy_mgr_get_connection_for_vdev_id(psoc, vdev_id); + if (MAX_NUMBER_OF_CONC_CONNECTIONS == conn_index) { + policy_mgr_err("connection not found for vdev %d", vdev_id); + return; + } + + policy_mgr_debug("nss update successful for vdev:%d ori %d reason %d", + vdev_id, original_vdev_id, reason); + if (PM_NOP != next_action) + policy_mgr_next_actions(psoc, original_vdev_id, next_action, + reason); + else { + policy_mgr_debug("No action needed right now"); + ret = policy_mgr_set_opportunistic_update(psoc); + if (!QDF_IS_STATUS_SUCCESS(ret)) + policy_mgr_err("ERROR: set opportunistic_update event failed"); + } + + return; +} + +QDF_STATUS policy_mgr_nss_update(struct wlan_objmgr_psoc *psoc, + uint8_t new_nss, uint8_t next_action, + enum policy_mgr_conn_update_reason reason, + uint32_t original_vdev_id) +{ + QDF_STATUS status = QDF_STATUS_E_FAILURE; + uint32_t index, count; + uint32_t list[MAX_NUMBER_OF_CONC_CONNECTIONS]; + uint32_t conn_index = 0; + uint32_t vdev_id; + uint32_t original_nss; + struct policy_mgr_psoc_priv_obj *pm_ctx; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return status; + } + + count = policy_mgr_mode_specific_connection_count(psoc, + PM_P2P_GO_MODE, list); + for (index = 0; index < count; index++) { + qdf_mutex_acquire(&pm_ctx->qdf_conc_list_lock); + vdev_id = pm_conc_connection_list[list[index]].vdev_id; + original_nss = + pm_conc_connection_list[list[index]].original_nss; + qdf_mutex_release(&pm_ctx->qdf_conc_list_lock); + conn_index = policy_mgr_get_connection_for_vdev_id( + psoc, vdev_id); + if (MAX_NUMBER_OF_CONC_CONNECTIONS == conn_index) { + policy_mgr_err("connection not found for vdev %d", + vdev_id); + continue; + } + + if (2 == original_nss) { + status = pm_ctx->sme_cbacks.sme_nss_update_request( + vdev_id, new_nss, + policy_mgr_nss_update_cb, + next_action, psoc, reason, + original_vdev_id); + if (!QDF_IS_STATUS_SUCCESS(status)) { + policy_mgr_err("sme_nss_update_request() failed for vdev %d", + vdev_id); + } + } + } + + count = policy_mgr_mode_specific_connection_count(psoc, + PM_SAP_MODE, list); + for (index = 0; index < count; index++) { + qdf_mutex_acquire(&pm_ctx->qdf_conc_list_lock); + vdev_id = pm_conc_connection_list[list[index]].vdev_id; + original_nss = + pm_conc_connection_list[list[index]].original_nss; + qdf_mutex_release(&pm_ctx->qdf_conc_list_lock); + conn_index = policy_mgr_get_connection_for_vdev_id( + psoc, vdev_id); + if (MAX_NUMBER_OF_CONC_CONNECTIONS == conn_index) { + policy_mgr_err("connection not found for vdev %d", + vdev_id); + continue; + } + if (2 == original_nss) { + status = pm_ctx->sme_cbacks.sme_nss_update_request( + vdev_id, new_nss, + policy_mgr_nss_update_cb, + next_action, psoc, reason, + original_vdev_id); + if (!QDF_IS_STATUS_SUCCESS(status)) { + policy_mgr_err("sme_nss_update_request() failed for vdev %d", + vdev_id); + } + } + } + + return status; +} + +/** + * policy_mgr_complete_action() - initiates actions needed on + * current connections once channel has been decided for the new + * connection + * @new_nss: the new nss value + * @next_action: next action to happen at policy mgr after + * beacon update + * @reason: Reason for connection update + * @session_id: Session id + * + * This function initiates initiates actions + * needed on current connections once channel has been decided + * for the new connection. Notifies UMAC & FW as well + * + * Return: QDF_STATUS enum + */ +QDF_STATUS policy_mgr_complete_action(struct wlan_objmgr_psoc *psoc, + uint8_t new_nss, uint8_t next_action, + enum policy_mgr_conn_update_reason reason, + uint32_t session_id) +{ + QDF_STATUS status = QDF_STATUS_E_FAILURE; + + if (policy_mgr_is_hw_dbs_capable(psoc) == false) { + policy_mgr_err("driver isn't dbs capable, no further action needed"); + return QDF_STATUS_E_NOSUPPORT; + } + + /* policy_mgr_complete_action() is called by policy_mgr_next_actions(). + * All other callers of policy_mgr_next_actions() have taken mutex + * protection. So, not taking any lock inside + * policy_mgr_complete_action() during pm_conc_connection_list access. + */ + + status = policy_mgr_nss_update(psoc, new_nss, next_action, reason, + session_id); + if (!QDF_IS_STATUS_SUCCESS(status)) + status = policy_mgr_next_actions(psoc, session_id, + next_action, reason); + + return status; +} + +enum policy_mgr_con_mode policy_mgr_get_mode_by_vdev_id( + struct wlan_objmgr_psoc *psoc, + uint8_t vdev_id) +{ + enum policy_mgr_con_mode mode = PM_MAX_NUM_OF_MODE; + struct policy_mgr_psoc_priv_obj *pm_ctx; + uint32_t conn_index; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return mode; + } + qdf_mutex_acquire(&pm_ctx->qdf_conc_list_lock); + for (conn_index = 0; conn_index < MAX_NUMBER_OF_CONC_CONNECTIONS; + conn_index++) + if ((pm_conc_connection_list[conn_index].vdev_id == vdev_id) && + pm_conc_connection_list[conn_index].in_use){ + mode = pm_conc_connection_list[conn_index].mode; + break; + } + qdf_mutex_release(&pm_ctx->qdf_conc_list_lock); + + return mode; +} + +/** + * policy_mgr_init_connection_update() - Initialize connection + * update event + * @pm_ctx: policy mgr context + * + * Initializes the concurrent connection update event + * + * Return: QDF_STATUS + */ +QDF_STATUS policy_mgr_init_connection_update( + struct policy_mgr_psoc_priv_obj *pm_ctx) +{ + QDF_STATUS qdf_status; + + qdf_status = qdf_event_create(&pm_ctx->connection_update_done_evt); + + if (!QDF_IS_STATUS_SUCCESS(qdf_status)) { + policy_mgr_err("init event failed"); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * policy_mgr_get_current_pref_hw_mode_dbs_2x2() - Get the + * current preferred hw mode + * + * Get the preferred hw mode based on the current connection combinations + * + * Return: No change (PM_NOP), MCC (PM_SINGLE_MAC), + * DBS (PM_DBS), SBS (PM_SBS) + */ +enum policy_mgr_conc_next_action + policy_mgr_get_current_pref_hw_mode_dbs_2x2( + struct wlan_objmgr_psoc *psoc) +{ + uint32_t num_connections; + uint8_t band1, band2, band3; + struct policy_mgr_hw_mode_params hw_mode; + QDF_STATUS status; + + status = policy_mgr_get_current_hw_mode(psoc, &hw_mode); + if (!QDF_IS_STATUS_SUCCESS(status)) { + policy_mgr_err("policy_mgr_get_current_hw_mode failed"); + return PM_NOP; + } + + num_connections = policy_mgr_get_connection_count(psoc); + + policy_mgr_debug("chan[0]:%d chan[1]:%d chan[2]:%d num_connections:%d dbs:%d", + pm_conc_connection_list[0].chan, + pm_conc_connection_list[1].chan, + pm_conc_connection_list[2].chan, num_connections, + hw_mode.dbs_cap); + + /* If the band of operation of both the MACs is the same, + * single MAC is preferred, otherwise DBS is preferred. + */ + switch (num_connections) { + case 1: + band1 = reg_chan_to_band(pm_conc_connection_list[0].chan); + if (band1 == BAND_2G) + return PM_DBS; + else + return PM_NOP; + case 2: + band1 = reg_chan_to_band(pm_conc_connection_list[0].chan); + band2 = reg_chan_to_band(pm_conc_connection_list[1].chan); + if ((band1 == BAND_2G) || + (band2 == BAND_2G)) { + if (!hw_mode.dbs_cap) + return PM_DBS; + else + return PM_NOP; + } else if ((band1 == BAND_5G) && + (band2 == BAND_5G)) { + if (WLAN_REG_IS_CHANNEL_VALID_5G_SBS( + pm_conc_connection_list[0].chan, + pm_conc_connection_list[1].chan)) { + if (!hw_mode.sbs_cap) + return PM_SBS; + else + return PM_NOP; + } else { + if (hw_mode.sbs_cap || hw_mode.dbs_cap) + return PM_SINGLE_MAC; + else + return PM_NOP; + } + } else + return PM_NOP; + case 3: + band1 = reg_chan_to_band(pm_conc_connection_list[0].chan); + band2 = reg_chan_to_band(pm_conc_connection_list[1].chan); + band3 = reg_chan_to_band(pm_conc_connection_list[2].chan); + if ((band1 == BAND_2G) || + (band2 == BAND_2G) || + (band3 == BAND_2G)) { + if (!hw_mode.dbs_cap) + return PM_DBS; + else + return PM_NOP; + } else if ((band1 == BAND_5G) && + (band2 == BAND_5G) && + (band3 == BAND_5G)) { + if (WLAN_REG_IS_CHANNEL_VALID_5G_SBS( + pm_conc_connection_list[0].chan, + pm_conc_connection_list[2].chan) && + WLAN_REG_IS_CHANNEL_VALID_5G_SBS( + pm_conc_connection_list[1].chan, + pm_conc_connection_list[2].chan) && + WLAN_REG_IS_CHANNEL_VALID_5G_SBS( + pm_conc_connection_list[0].chan, + pm_conc_connection_list[1].chan)) { + if (!hw_mode.sbs_cap) + return PM_SBS; + else + return PM_NOP; + } else { + if (hw_mode.sbs_cap || hw_mode.dbs_cap) + return PM_SINGLE_MAC; + else + return PM_NOP; + } + } else + return PM_NOP; + default: + policy_mgr_err("unexpected num_connections value %d", + num_connections); + return PM_NOP; + } +} + +/** + * policy_mgr_get_current_pref_hw_mode_dbs_1x1() - Get the + * current preferred hw mode + * + * Get the preferred hw mode based on the current connection combinations + * + * Return: No change (PM_NOP), MCC (PM_SINGLE_MAC_UPGRADE), + * DBS (PM_DBS_DOWNGRADE) + */ +enum policy_mgr_conc_next_action + policy_mgr_get_current_pref_hw_mode_dbs_1x1( + struct wlan_objmgr_psoc *psoc) +{ + uint32_t num_connections; + uint8_t band1, band2, band3; + struct policy_mgr_hw_mode_params hw_mode; + QDF_STATUS status; + enum policy_mgr_conc_next_action next_action; + struct policy_mgr_psoc_priv_obj *pm_ctx; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return PM_NOP; + } + + status = policy_mgr_get_current_hw_mode(psoc, &hw_mode); + if (!QDF_IS_STATUS_SUCCESS(status)) { + policy_mgr_err("policy_mgr_get_current_hw_mode failed"); + return PM_NOP; + } + + num_connections = policy_mgr_get_connection_count(psoc); + + qdf_mutex_acquire(&pm_ctx->qdf_conc_list_lock); + policy_mgr_debug("chan[0]:%d chan[1]:%d chan[2]:%d num_connections:%d dbs:%d", + pm_conc_connection_list[0].chan, + pm_conc_connection_list[1].chan, + pm_conc_connection_list[2].chan, num_connections, + hw_mode.dbs_cap); + + /* If the band of operation of both the MACs is the same, + * single MAC is preferred, otherwise DBS is preferred. + */ + switch (num_connections) { + case 1: + /* The driver would already be in the required hw mode */ + next_action = PM_NOP; + break; + case 2: + band1 = reg_chan_to_band(pm_conc_connection_list[0].chan); + band2 = reg_chan_to_band(pm_conc_connection_list[1].chan); + if ((band1 == band2) && (hw_mode.dbs_cap)) + next_action = PM_SINGLE_MAC_UPGRADE; + else if ((band1 != band2) && (!hw_mode.dbs_cap)) + next_action = PM_DBS_DOWNGRADE; + else + next_action = PM_NOP; + + break; + + case 3: + band1 = reg_chan_to_band(pm_conc_connection_list[0].chan); + band2 = reg_chan_to_band(pm_conc_connection_list[1].chan); + band3 = reg_chan_to_band(pm_conc_connection_list[2].chan); + if (((band1 == band2) && (band2 == band3)) && + (hw_mode.dbs_cap)) { + next_action = PM_SINGLE_MAC_UPGRADE; + } else if (((band1 != band2) || (band2 != band3) || + (band1 != band3)) && + (!hw_mode.dbs_cap)) { + next_action = PM_DBS_DOWNGRADE; + } else { + next_action = PM_NOP; + } + break; + default: + policy_mgr_err("unexpected num_connections value %d", + num_connections); + next_action = PM_NOP; + break; + } + + qdf_mutex_release(&pm_ctx->qdf_conc_list_lock); + + return next_action; +} + +/** + * policy_mgr_reset_sap_mandatory_channels() - Reset the SAP mandatory channels + * + * Resets the SAP mandatory channel list and the length of the list + * + * Return: QDF_STATUS + */ +QDF_STATUS policy_mgr_reset_sap_mandatory_channels( + struct policy_mgr_psoc_priv_obj *pm_ctx) +{ + pm_ctx->sap_mandatory_channels_len = 0; + qdf_mem_zero(pm_ctx->sap_mandatory_channels, + QDF_ARRAY_SIZE(pm_ctx->sap_mandatory_channels)); + + return QDF_STATUS_SUCCESS; +} + +void policy_mgr_enable_disable_sap_mandatory_chan_list( + struct wlan_objmgr_psoc *psoc, bool val) +{ + struct policy_mgr_psoc_priv_obj *pm_ctx; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return; + } + + pm_ctx->enable_sap_mandatory_chan_list = val; +} + +void policy_mgr_add_sap_mandatory_chan(struct wlan_objmgr_psoc *psoc, + uint8_t chan) +{ + int i; + struct policy_mgr_psoc_priv_obj *pm_ctx; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return; + } + + for (i = 0; i < pm_ctx->sap_mandatory_channels_len; i++) { + if (chan == pm_ctx->sap_mandatory_channels[i]) + return; + } + + policy_mgr_debug("chan %hu", chan); + pm_ctx->sap_mandatory_channels[pm_ctx->sap_mandatory_channels_len++] + = chan; +} + +bool policy_mgr_is_sap_mandatory_chan_list_enabled( + struct wlan_objmgr_psoc *psoc) +{ + struct policy_mgr_psoc_priv_obj *pm_ctx; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return false; + } + + return pm_ctx->enable_sap_mandatory_chan_list; +} + +uint32_t policy_mgr_get_sap_mandatory_chan_list_len( + struct wlan_objmgr_psoc *psoc) +{ + struct policy_mgr_psoc_priv_obj *pm_ctx; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return 0; + } + + return pm_ctx->sap_mandatory_channels_len; +} + +void policy_mgr_init_sap_mandatory_2g_chan(struct wlan_objmgr_psoc *psoc) +{ + uint8_t chan_list[QDF_MAX_NUM_CHAN] = {0}; + uint32_t len = 0; + int i; + QDF_STATUS status; + struct policy_mgr_psoc_priv_obj *pm_ctx; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return; + } + + status = policy_mgr_get_valid_chans(psoc, chan_list, &len); + if (QDF_IS_STATUS_ERROR(status)) { + policy_mgr_err("Error in getting valid channels"); + return; + } + pm_ctx->sap_mandatory_channels_len = 0; + + for (i = 0; (i < len) && (i < QDF_MAX_NUM_CHAN); i++) { + if (WLAN_REG_IS_24GHZ_CH(chan_list[i])) { + policy_mgr_debug("Add chan %hu to mandatory list", + chan_list[i]); + pm_ctx->sap_mandatory_channels[ + pm_ctx->sap_mandatory_channels_len++] = + chan_list[i]; + } + } +} + +void policy_mgr_remove_sap_mandatory_chan(struct wlan_objmgr_psoc *psoc, + uint8_t chan) +{ + uint8_t chan_list[QDF_MAX_NUM_CHAN] = {0}; + uint32_t num_chan = 0; + int i; + struct policy_mgr_psoc_priv_obj *pm_ctx; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return; + } + + if (pm_ctx->sap_mandatory_channels_len >= QDF_MAX_NUM_CHAN) { + policy_mgr_err("Invalid channel len %d ", + pm_ctx->sap_mandatory_channels_len); + return; + } + + for (i = 0; i < pm_ctx->sap_mandatory_channels_len; i++) { + if (chan == pm_ctx->sap_mandatory_channels[i]) + continue; + chan_list[num_chan++] = pm_ctx->sap_mandatory_channels[i]; + } + + qdf_mem_zero(pm_ctx->sap_mandatory_channels, + pm_ctx->sap_mandatory_channels_len); + qdf_mem_copy(pm_ctx->sap_mandatory_channels, chan_list, num_chan); + pm_ctx->sap_mandatory_channels_len = num_chan; +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/policy_mgr/src/wlan_policy_mgr_get_set_utils.c b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/policy_mgr/src/wlan_policy_mgr_get_set_utils.c new file mode 100644 index 0000000000000000000000000000000000000000..f01f7a6b259d64b02cae926cd863a933001ddb98 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/policy_mgr/src/wlan_policy_mgr_get_set_utils.c @@ -0,0 +1,3280 @@ +/* + * Copyright (c) 2012-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_policy_mgr_get_set_utils.c + * + * WLAN Concurrenct Connection Management APIs + * + */ + +/* Include files */ +#include "target_if.h" +#include "wlan_policy_mgr_api.h" +#include "wlan_policy_mgr_i.h" +#include "qdf_types.h" +#include "qdf_trace.h" +#include "wlan_objmgr_global_obj.h" +#include "wlan_objmgr_pdev_obj.h" +#include "wlan_objmgr_vdev_obj.h" + +/* invalid channel id. */ +#define INVALID_CHANNEL_ID 0 + +void policy_mgr_update_new_hw_mode_index(struct wlan_objmgr_psoc *psoc, + uint32_t new_hw_mode_index) +{ + struct policy_mgr_psoc_priv_obj *pm_ctx; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return; + } + pm_ctx->new_hw_mode_index = new_hw_mode_index; +} + +void policy_mgr_update_old_hw_mode_index(struct wlan_objmgr_psoc *psoc, + uint32_t old_hw_mode_index) +{ + struct policy_mgr_psoc_priv_obj *pm_ctx; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return; + } + pm_ctx->old_hw_mode_index = old_hw_mode_index; +} + +void policy_mgr_update_hw_mode_index(struct wlan_objmgr_psoc *psoc, + uint32_t new_hw_mode_index) +{ + struct policy_mgr_psoc_priv_obj *pm_ctx; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return; + } + if (POLICY_MGR_DEFAULT_HW_MODE_INDEX == pm_ctx->new_hw_mode_index) { + pm_ctx->new_hw_mode_index = new_hw_mode_index; + } else { + pm_ctx->old_hw_mode_index = pm_ctx->new_hw_mode_index; + pm_ctx->new_hw_mode_index = new_hw_mode_index; + } + policy_mgr_debug("Updated: old_hw_mode_index:%d new_hw_mode_index:%d", + pm_ctx->old_hw_mode_index, pm_ctx->new_hw_mode_index); +} + +/** + * policy_mgr_get_num_of_setbits_from_bitmask() - to get num of + * setbits from bitmask + * @mask: given bitmask + * + * This helper function should return number of setbits from bitmask + * + * Return: number of setbits from bitmask + */ +static uint32_t policy_mgr_get_num_of_setbits_from_bitmask(uint32_t mask) +{ + uint32_t num_of_setbits = 0; + + while (mask) { + mask &= (mask - 1); + num_of_setbits++; + } + return num_of_setbits; +} + +/** + * policy_mgr_map_wmi_channel_width_to_hw_mode_bw() - returns + * bandwidth in terms of hw_mode_bandwidth + * @width: bandwidth in terms of wmi_channel_width + * + * This function returns the bandwidth in terms of hw_mode_bandwidth. + * + * Return: BW in terms of hw_mode_bandwidth. + */ +static enum hw_mode_bandwidth policy_mgr_map_wmi_channel_width_to_hw_mode_bw( + wmi_channel_width width) +{ + switch (width) { + case WMI_CHAN_WIDTH_20: + return HW_MODE_20_MHZ; + case WMI_CHAN_WIDTH_40: + return HW_MODE_40_MHZ; + case WMI_CHAN_WIDTH_80: + return HW_MODE_80_MHZ; + case WMI_CHAN_WIDTH_160: + return HW_MODE_160_MHZ; + case WMI_CHAN_WIDTH_80P80: + return HW_MODE_80_PLUS_80_MHZ; + case WMI_CHAN_WIDTH_5: + return HW_MODE_5_MHZ; + case WMI_CHAN_WIDTH_10: + return HW_MODE_10_MHZ; + default: + return HW_MODE_BW_NONE; + } + + return HW_MODE_BW_NONE; +} + +static void policy_mgr_get_hw_mode_params( + struct wlan_psoc_host_mac_phy_caps *caps, + struct policy_mgr_mac_ss_bw_info *info) +{ + if (!caps) { + policy_mgr_err("Invalid capabilities"); + return; + } + + info->mac_tx_stream = policy_mgr_get_num_of_setbits_from_bitmask( + QDF_MAX(caps->tx_chain_mask_2G, + caps->tx_chain_mask_5G)); + info->mac_rx_stream = policy_mgr_get_num_of_setbits_from_bitmask( + QDF_MAX(caps->rx_chain_mask_2G, + caps->rx_chain_mask_5G)); + info->mac_bw = policy_mgr_map_wmi_channel_width_to_hw_mode_bw( + QDF_MAX(caps->max_bw_supported_2G, + caps->max_bw_supported_5G)); +} + +/** + * policy_mgr_set_hw_mode_params() - sets TX-RX stream, + * bandwidth and DBS in hw_mode_list + * @wma_handle: pointer to wma global structure + * @mac0_ss_bw_info: TX-RX streams, BW for MAC0 + * @mac1_ss_bw_info: TX-RX streams, BW for MAC1 + * @pos: refers to hw_mode_index + * @dbs_mode: dbs_mode for the dbs_hw_mode + * @sbs_mode: sbs_mode for the sbs_hw_mode + * + * This function sets TX-RX stream, bandwidth and DBS mode in + * hw_mode_list. + * + * Return: none + */ +static void policy_mgr_set_hw_mode_params(struct wlan_objmgr_psoc *psoc, + struct policy_mgr_mac_ss_bw_info mac0_ss_bw_info, + struct policy_mgr_mac_ss_bw_info mac1_ss_bw_info, + uint32_t pos, uint32_t dbs_mode, + uint32_t sbs_mode) +{ + struct policy_mgr_psoc_priv_obj *pm_ctx; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return; + } + + POLICY_MGR_HW_MODE_MAC0_TX_STREAMS_SET( + pm_ctx->hw_mode.hw_mode_list[pos], + mac0_ss_bw_info.mac_tx_stream); + POLICY_MGR_HW_MODE_MAC0_RX_STREAMS_SET( + pm_ctx->hw_mode.hw_mode_list[pos], + mac0_ss_bw_info.mac_rx_stream); + POLICY_MGR_HW_MODE_MAC0_BANDWIDTH_SET( + pm_ctx->hw_mode.hw_mode_list[pos], + mac0_ss_bw_info.mac_bw); + POLICY_MGR_HW_MODE_MAC1_TX_STREAMS_SET( + pm_ctx->hw_mode.hw_mode_list[pos], + mac1_ss_bw_info.mac_tx_stream); + POLICY_MGR_HW_MODE_MAC1_RX_STREAMS_SET( + pm_ctx->hw_mode.hw_mode_list[pos], + mac1_ss_bw_info.mac_rx_stream); + POLICY_MGR_HW_MODE_MAC1_BANDWIDTH_SET( + pm_ctx->hw_mode.hw_mode_list[pos], + mac1_ss_bw_info.mac_bw); + POLICY_MGR_HW_MODE_DBS_MODE_SET( + pm_ctx->hw_mode.hw_mode_list[pos], + dbs_mode); + POLICY_MGR_HW_MODE_AGILE_DFS_SET( + pm_ctx->hw_mode.hw_mode_list[pos], + HW_MODE_AGILE_DFS_NONE); + POLICY_MGR_HW_MODE_SBS_MODE_SET( + pm_ctx->hw_mode.hw_mode_list[pos], + sbs_mode); +} + +QDF_STATUS policy_mgr_update_hw_mode_list(struct wlan_objmgr_psoc *psoc, + struct target_psoc_info *tgt_hdl) +{ + struct wlan_psoc_host_mac_phy_caps *tmp; + uint32_t i, hw_config_type, j = 0; + uint32_t dbs_mode, sbs_mode; + struct policy_mgr_mac_ss_bw_info mac0_ss_bw_info = {0}; + struct policy_mgr_mac_ss_bw_info mac1_ss_bw_info = {0}; + struct policy_mgr_psoc_priv_obj *pm_ctx; + struct tgt_info *info; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return QDF_STATUS_E_FAILURE; + } + + info = &tgt_hdl->info; + if (!info->service_ext_param.num_hw_modes) { + policy_mgr_err("Number of HW modes: %d", + info->service_ext_param.num_hw_modes); + return QDF_STATUS_E_FAILURE; + } + + /* + * This list was updated as part of service ready event. Re-populate + * HW mode list from the device capabilities. + */ + if (pm_ctx->hw_mode.hw_mode_list) { + qdf_mem_free(pm_ctx->hw_mode.hw_mode_list); + pm_ctx->hw_mode.hw_mode_list = NULL; + policy_mgr_debug("DBS list is freed"); + } + + pm_ctx->num_dbs_hw_modes = info->service_ext_param.num_hw_modes; + pm_ctx->hw_mode.hw_mode_list = + qdf_mem_malloc(sizeof(*pm_ctx->hw_mode.hw_mode_list) * + pm_ctx->num_dbs_hw_modes); + if (!pm_ctx->hw_mode.hw_mode_list) { + policy_mgr_err("Memory allocation failed for DBS"); + return QDF_STATUS_E_FAILURE; + } + + policy_mgr_debug("Updated HW mode list: Num modes:%d", + pm_ctx->num_dbs_hw_modes); + + for (i = 0; i < pm_ctx->num_dbs_hw_modes; i++) { + /* Update for MAC0 */ + tmp = &info->mac_phy_cap[j++]; + policy_mgr_get_hw_mode_params(tmp, &mac0_ss_bw_info); + hw_config_type = tmp->hw_mode_config_type; + dbs_mode = HW_MODE_DBS_NONE; + sbs_mode = HW_MODE_SBS_NONE; + mac1_ss_bw_info.mac_tx_stream = 0; + mac1_ss_bw_info.mac_rx_stream = 0; + mac1_ss_bw_info.mac_bw = 0; + + /* SBS and DBS have dual MAC. Upto 2 MACs are considered. */ + if ((hw_config_type == WMI_HW_MODE_DBS) || + (hw_config_type == WMI_HW_MODE_SBS_PASSIVE) || + (hw_config_type == WMI_HW_MODE_SBS)) { + /* Update for MAC1 */ + tmp = &info->mac_phy_cap[j++]; + policy_mgr_get_hw_mode_params(tmp, &mac1_ss_bw_info); + if (hw_config_type == WMI_HW_MODE_DBS) + dbs_mode = HW_MODE_DBS; + if ((hw_config_type == WMI_HW_MODE_SBS_PASSIVE) || + (hw_config_type == WMI_HW_MODE_SBS)) + sbs_mode = HW_MODE_SBS; + } + + /* Updating HW mode list */ + policy_mgr_set_hw_mode_params(psoc, mac0_ss_bw_info, + mac1_ss_bw_info, i, dbs_mode, sbs_mode); + } + return QDF_STATUS_SUCCESS; +} + +void policy_mgr_init_dbs_hw_mode(struct wlan_objmgr_psoc *psoc, + uint32_t num_dbs_hw_modes, + uint32_t *ev_wlan_dbs_hw_mode_list) +{ + struct policy_mgr_psoc_priv_obj *pm_ctx; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return; + } + + pm_ctx->num_dbs_hw_modes = num_dbs_hw_modes; + pm_ctx->hw_mode.hw_mode_list = + qdf_mem_malloc(sizeof(*pm_ctx->hw_mode.hw_mode_list) * + pm_ctx->num_dbs_hw_modes); + if (!pm_ctx->hw_mode.hw_mode_list) { + policy_mgr_err("Memory allocation failed for DBS"); + return; + } + qdf_mem_copy(pm_ctx->hw_mode.hw_mode_list, + ev_wlan_dbs_hw_mode_list, + (sizeof(*pm_ctx->hw_mode.hw_mode_list) * + pm_ctx->num_dbs_hw_modes)); +} + +void policy_mgr_dump_dbs_hw_mode(struct wlan_objmgr_psoc *psoc) +{ + uint32_t i, param; + struct policy_mgr_psoc_priv_obj *pm_ctx; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return; + } + + for (i = 0; i < pm_ctx->num_dbs_hw_modes; i++) { + param = pm_ctx->hw_mode.hw_mode_list[i]; + policy_mgr_debug("[%d]-MAC0: tx_ss:%d rx_ss:%d bw_idx:%d", + i, + POLICY_MGR_HW_MODE_MAC0_TX_STREAMS_GET(param), + POLICY_MGR_HW_MODE_MAC0_RX_STREAMS_GET(param), + POLICY_MGR_HW_MODE_MAC0_BANDWIDTH_GET(param)); + policy_mgr_debug("[%d]-MAC1: tx_ss:%d rx_ss:%d bw_idx:%d", + i, + POLICY_MGR_HW_MODE_MAC1_TX_STREAMS_GET(param), + POLICY_MGR_HW_MODE_MAC1_RX_STREAMS_GET(param), + POLICY_MGR_HW_MODE_MAC1_BANDWIDTH_GET(param)); + policy_mgr_debug("[%d] DBS:%d SBS:%d", i, + POLICY_MGR_HW_MODE_DBS_MODE_GET(param), + POLICY_MGR_HW_MODE_SBS_MODE_GET(param)); + } +} + +void policy_mgr_init_dbs_config(struct wlan_objmgr_psoc *psoc, + uint32_t scan_config, uint32_t fw_config) +{ + struct policy_mgr_psoc_priv_obj *pm_ctx; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return; + } + pm_ctx->dual_mac_cfg.cur_scan_config = 0; + pm_ctx->dual_mac_cfg.cur_fw_mode_config = 0; + + /* If dual mac features are disabled in the INI, we + * need not proceed further + */ + if (DISABLE_DBS_CXN_AND_SCAN == + wlan_objmgr_psoc_get_dual_mac_disable(psoc)) { + policy_mgr_err("Disabling dual mac capabilities"); + /* All capabilities are initialized to 0. We can return */ + goto done; + } + + /* Initialize concurrent_scan_config_bits with default FW value */ + WMI_DBS_CONC_SCAN_CFG_ASYNC_DBS_SCAN_SET( + pm_ctx->dual_mac_cfg.cur_scan_config, + WMI_DBS_CONC_SCAN_CFG_ASYNC_DBS_SCAN_GET(scan_config)); + WMI_DBS_CONC_SCAN_CFG_SYNC_DBS_SCAN_SET( + pm_ctx->dual_mac_cfg.cur_scan_config, + WMI_DBS_CONC_SCAN_CFG_SYNC_DBS_SCAN_GET(scan_config)); + WMI_DBS_CONC_SCAN_CFG_DBS_SCAN_SET( + pm_ctx->dual_mac_cfg.cur_scan_config, + WMI_DBS_CONC_SCAN_CFG_DBS_SCAN_GET(scan_config)); + WMI_DBS_CONC_SCAN_CFG_AGILE_SCAN_SET( + pm_ctx->dual_mac_cfg.cur_scan_config, + WMI_DBS_CONC_SCAN_CFG_AGILE_SCAN_GET(scan_config)); + WMI_DBS_CONC_SCAN_CFG_AGILE_DFS_SCAN_SET( + pm_ctx->dual_mac_cfg.cur_scan_config, + WMI_DBS_CONC_SCAN_CFG_AGILE_DFS_SCAN_GET(scan_config)); + + /* Initialize fw_mode_config_bits with default FW value */ + WMI_DBS_FW_MODE_CFG_DBS_SET( + pm_ctx->dual_mac_cfg.cur_fw_mode_config, + WMI_DBS_FW_MODE_CFG_DBS_GET(fw_config)); + WMI_DBS_FW_MODE_CFG_AGILE_DFS_SET( + pm_ctx->dual_mac_cfg.cur_fw_mode_config, + WMI_DBS_FW_MODE_CFG_AGILE_DFS_GET(fw_config)); + WMI_DBS_FW_MODE_CFG_DBS_FOR_CXN_SET( + pm_ctx->dual_mac_cfg.cur_fw_mode_config, + WMI_DBS_FW_MODE_CFG_DBS_FOR_CXN_GET(fw_config)); +done: + /* Initialize the previous scan/fw mode config */ + pm_ctx->dual_mac_cfg.prev_scan_config = + pm_ctx->dual_mac_cfg.cur_scan_config; + pm_ctx->dual_mac_cfg.prev_fw_mode_config = + pm_ctx->dual_mac_cfg.cur_fw_mode_config; + + policy_mgr_debug("cur_scan_config:%x cur_fw_mode_config:%x", + pm_ctx->dual_mac_cfg.cur_scan_config, + pm_ctx->dual_mac_cfg.cur_fw_mode_config); +} + +void policy_mgr_update_dbs_scan_config(struct wlan_objmgr_psoc *psoc) +{ + struct policy_mgr_psoc_priv_obj *pm_ctx; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return; + } + + pm_ctx->dual_mac_cfg.prev_scan_config = + pm_ctx->dual_mac_cfg.cur_scan_config; + pm_ctx->dual_mac_cfg.cur_scan_config = + pm_ctx->dual_mac_cfg.req_scan_config; +} + +void policy_mgr_update_dbs_fw_config(struct wlan_objmgr_psoc *psoc) +{ + struct policy_mgr_psoc_priv_obj *pm_ctx; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return; + } + + pm_ctx->dual_mac_cfg.prev_fw_mode_config = + pm_ctx->dual_mac_cfg.cur_fw_mode_config; + pm_ctx->dual_mac_cfg.cur_fw_mode_config = + pm_ctx->dual_mac_cfg.req_fw_mode_config; +} + +void policy_mgr_update_dbs_req_config(struct wlan_objmgr_psoc *psoc, + uint32_t scan_config, uint32_t fw_mode_config) +{ + struct policy_mgr_psoc_priv_obj *pm_ctx; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return; + } + pm_ctx->dual_mac_cfg.req_scan_config = scan_config; + pm_ctx->dual_mac_cfg.req_fw_mode_config = fw_mode_config; +} + +bool policy_mgr_get_dbs_plus_agile_scan_config(struct wlan_objmgr_psoc *psoc) +{ + uint32_t scan_config; + struct policy_mgr_psoc_priv_obj *pm_ctx; + + if (policy_mgr_is_dual_mac_disabled_in_ini(psoc)) + return false; + + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + /* We take that it is disabled and proceed */ + return false; + } + scan_config = pm_ctx->dual_mac_cfg.cur_scan_config; + + return WMI_DBS_CONC_SCAN_CFG_AGILE_SCAN_GET(scan_config); +} + +bool policy_mgr_get_single_mac_scan_with_dfs_config( + struct wlan_objmgr_psoc *psoc) +{ + uint32_t scan_config; + struct policy_mgr_psoc_priv_obj *pm_ctx; + + if (policy_mgr_is_dual_mac_disabled_in_ini(psoc)) + return false; + + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + /* We take that it is disabled and proceed */ + return false; + } + scan_config = pm_ctx->dual_mac_cfg.cur_scan_config; + + return WMI_DBS_CONC_SCAN_CFG_AGILE_DFS_SCAN_GET(scan_config); +} + +int8_t policy_mgr_get_num_dbs_hw_modes(struct wlan_objmgr_psoc *psoc) +{ + struct policy_mgr_psoc_priv_obj *pm_ctx; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return -EINVAL; + } + return pm_ctx->num_dbs_hw_modes; +} + +bool policy_mgr_find_if_fw_supports_dbs(struct wlan_objmgr_psoc *psoc) +{ + struct policy_mgr_psoc_priv_obj *pm_ctx; + struct wmi_unified *wmi_handle; + bool dbs_support; + + pm_ctx = policy_mgr_get_context(psoc); + + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return false; + } + wmi_handle = get_wmi_unified_hdl_from_psoc(psoc); + if (!wmi_handle) { + policy_mgr_debug("Invalid WMI handle"); + return false; + } + dbs_support = + wmi_service_enabled(wmi_handle, + wmi_service_dual_band_simultaneous_support); + policy_mgr_debug("is DBS supported by FW/HW: %s", + dbs_support ? "yes" : "no"); + + /* The agreement with FW is that: To know if the target is DBS + * capable, DBS needs to be supported both in the HW mode list + * and in the service ready event + */ + if (!dbs_support) + return false; + + return true; +} + +static bool policy_mgr_find_if_hwlist_has_dbs(struct wlan_objmgr_psoc *psoc) +{ + struct policy_mgr_psoc_priv_obj *pm_ctx; + uint32_t param, i, found = 0; + + pm_ctx = policy_mgr_get_context(psoc); + + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return false; + } + for (i = 0; i < pm_ctx->num_dbs_hw_modes; i++) { + param = pm_ctx->hw_mode.hw_mode_list[i]; + policy_mgr_debug("HW param: %x", param); + if (POLICY_MGR_HW_MODE_DBS_MODE_GET(param)) { + policy_mgr_debug("HW (%d) is DBS capable", i); + found = 1; + break; + } + } + if (found) + return true; + + return false; +} + +static bool policy_mgr_find_if_hwlist_has_sbs(struct wlan_objmgr_psoc *psoc) +{ + struct policy_mgr_psoc_priv_obj *pm_ctx; + uint32_t param, i, found = 0; + + pm_ctx = policy_mgr_get_context(psoc); + + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return false; + } + for (i = 0; i < pm_ctx->num_dbs_hw_modes; i++) { + param = pm_ctx->hw_mode.hw_mode_list[i]; + policy_mgr_debug("HW param: %x", param); + if (POLICY_MGR_HW_MODE_SBS_MODE_GET(param)) { + policy_mgr_debug("HW (%d) is SBS capable", i); + found = 1; + break; + } + } + if (found) + return true; + + return false; +} + +bool policy_mgr_is_hw_dbs_capable(struct wlan_objmgr_psoc *psoc) +{ + if (!policy_mgr_is_dbs_enable(psoc)) { + policy_mgr_debug("DBS is disabled"); + return false; + } + + if (!policy_mgr_find_if_fw_supports_dbs(psoc)) { + policy_mgr_debug("HW mode list has no DBS"); + return false; + } + + return policy_mgr_find_if_hwlist_has_dbs(psoc); +} + +bool policy_mgr_is_hw_sbs_capable(struct wlan_objmgr_psoc *psoc) +{ + if (!policy_mgr_find_if_fw_supports_dbs(psoc)) { + policy_mgr_debug("HW mode list has no DBS"); + return false; + } + + return policy_mgr_find_if_hwlist_has_sbs(psoc); +} + +QDF_STATUS policy_mgr_get_dbs_hw_modes(struct wlan_objmgr_psoc *psoc, + bool *one_by_one_dbs, bool *two_by_two_dbs) +{ + struct policy_mgr_psoc_priv_obj *pm_ctx; + uint32_t i; + int8_t found_one_by_one = -EINVAL, found_two_by_two = -EINVAL; + uint32_t conf1_tx_ss, conf1_rx_ss; + uint32_t conf2_tx_ss, conf2_rx_ss; + + *one_by_one_dbs = false; + *two_by_two_dbs = false; + + if (policy_mgr_is_hw_dbs_capable(psoc) == false) { + policy_mgr_err("HW is not DBS capable"); + /* Caller will understand that DBS is disabled */ + return QDF_STATUS_SUCCESS; + + } + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return QDF_STATUS_E_FAILURE; + } + + /* To check 1x1 capability */ + policy_mgr_get_tx_rx_ss_from_config(HW_MODE_SS_1x1, + &conf1_tx_ss, &conf1_rx_ss); + /* To check 2x2 capability */ + policy_mgr_get_tx_rx_ss_from_config(HW_MODE_SS_2x2, + &conf2_tx_ss, &conf2_rx_ss); + + for (i = 0; i < pm_ctx->num_dbs_hw_modes; i++) { + uint32_t t_conf0_tx_ss, t_conf0_rx_ss; + uint32_t t_conf1_tx_ss, t_conf1_rx_ss; + uint32_t dbs_mode; + + t_conf0_tx_ss = POLICY_MGR_HW_MODE_MAC0_TX_STREAMS_GET( + pm_ctx->hw_mode.hw_mode_list[i]); + t_conf0_rx_ss = POLICY_MGR_HW_MODE_MAC0_RX_STREAMS_GET( + pm_ctx->hw_mode.hw_mode_list[i]); + t_conf1_tx_ss = POLICY_MGR_HW_MODE_MAC1_TX_STREAMS_GET( + pm_ctx->hw_mode.hw_mode_list[i]); + t_conf1_rx_ss = POLICY_MGR_HW_MODE_MAC1_RX_STREAMS_GET( + pm_ctx->hw_mode.hw_mode_list[i]); + dbs_mode = POLICY_MGR_HW_MODE_DBS_MODE_GET( + pm_ctx->hw_mode.hw_mode_list[i]); + + if (((((t_conf0_tx_ss == conf1_tx_ss) && + (t_conf0_rx_ss == conf1_rx_ss)) || + ((t_conf1_tx_ss == conf1_tx_ss) && + (t_conf1_rx_ss == conf1_rx_ss))) && + (dbs_mode == HW_MODE_DBS)) && + (found_one_by_one < 0)) { + found_one_by_one = i; + policy_mgr_debug("1x1 hw_mode index %d found", i); + /* Once an entry is found, need not check for 1x1 + * again + */ + continue; + } + + if (((((t_conf0_tx_ss == conf2_tx_ss) && + (t_conf0_rx_ss == conf2_rx_ss)) || + ((t_conf1_tx_ss == conf2_tx_ss) && + (t_conf1_rx_ss == conf2_rx_ss))) && + (dbs_mode == HW_MODE_DBS)) && + (found_two_by_two < 0)) { + found_two_by_two = i; + policy_mgr_debug("2x2 hw_mode index %d found", i); + /* Once an entry is found, need not check for 2x2 + * again + */ + continue; + } + } + + if (found_one_by_one >= 0) + *one_by_one_dbs = true; + if (found_two_by_two >= 0) + *two_by_two_dbs = true; + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS policy_mgr_get_current_hw_mode(struct wlan_objmgr_psoc *psoc, + struct policy_mgr_hw_mode_params *hw_mode) +{ + QDF_STATUS status; + uint32_t old_hw_index = 0, new_hw_index = 0; + + policy_mgr_debug("Get the current hw mode"); + + status = policy_mgr_get_old_and_new_hw_index(psoc, &old_hw_index, + &new_hw_index); + if (QDF_STATUS_SUCCESS != status) { + policy_mgr_err("Failed to get HW mode index"); + return QDF_STATUS_E_FAILURE; + } + + if (new_hw_index == POLICY_MGR_DEFAULT_HW_MODE_INDEX) { + policy_mgr_err("HW mode is not yet initialized"); + return QDF_STATUS_E_FAILURE; + } + + status = policy_mgr_get_hw_mode_from_idx(psoc, new_hw_index, hw_mode); + if (QDF_STATUS_SUCCESS != status) { + policy_mgr_err("Failed to get HW mode index"); + return QDF_STATUS_E_FAILURE; + } + return QDF_STATUS_SUCCESS; +} + +bool policy_mgr_is_current_hwmode_dbs(struct wlan_objmgr_psoc *psoc) +{ + struct policy_mgr_hw_mode_params hw_mode; + + if (!policy_mgr_is_hw_dbs_capable(psoc)) + return false; + if (QDF_STATUS_SUCCESS != + policy_mgr_get_current_hw_mode(psoc, &hw_mode)) + return false; + if (hw_mode.dbs_cap) + return true; + return false; +} + +bool policy_mgr_is_dbs_enable(struct wlan_objmgr_psoc *psoc) +{ + struct policy_mgr_psoc_priv_obj *pm_ctx; + + if (policy_mgr_is_dual_mac_disabled_in_ini(psoc)) { + policy_mgr_debug("DBS is disabled from ini"); + return false; + } + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return false; + } + + policy_mgr_debug("DBS=%d", + WMI_DBS_FW_MODE_CFG_DBS_GET( + pm_ctx->dual_mac_cfg.cur_fw_mode_config)); + + if (WMI_DBS_FW_MODE_CFG_DBS_GET( + pm_ctx->dual_mac_cfg.cur_fw_mode_config)) + return true; + + return false; +} + +bool policy_mgr_is_hw_dbs_2x2_capable(struct wlan_objmgr_psoc *psoc) +{ + struct dbs_nss nss_dbs; + + return ((policy_mgr_get_hw_dbs_nss(psoc, &nss_dbs)) >= HW_MODE_SS_2x2) + ? true : false; +} + +uint32_t policy_mgr_get_connection_count(struct wlan_objmgr_psoc *psoc) +{ + uint32_t conn_index, count = 0; + struct policy_mgr_psoc_priv_obj *pm_ctx; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return count; + } + + qdf_mutex_acquire(&pm_ctx->qdf_conc_list_lock); + for (conn_index = 0; conn_index < MAX_NUMBER_OF_CONC_CONNECTIONS; + conn_index++) { + if (pm_conc_connection_list[conn_index].in_use) + count++; + } + qdf_mutex_release(&pm_ctx->qdf_conc_list_lock); + + return count; +} + +uint32_t policy_mgr_mode_specific_vdev_id(struct wlan_objmgr_psoc *psoc, + enum policy_mgr_con_mode mode) +{ + uint32_t conn_index = 0; + uint32_t vdev_id = WLAN_INVALID_VDEV_ID; + struct policy_mgr_psoc_priv_obj *pm_ctx; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return vdev_id; + } + qdf_mutex_acquire(&pm_ctx->qdf_conc_list_lock); + /* + * Note: This gives you the first vdev id of the mode type in a + * sta+sta or sap+sap or p2p + p2p case + */ + for (conn_index = 0; conn_index < MAX_NUMBER_OF_CONC_CONNECTIONS; + conn_index++) { + if ((pm_conc_connection_list[conn_index].mode == mode) && + pm_conc_connection_list[conn_index].in_use) { + vdev_id = pm_conc_connection_list[conn_index].vdev_id; + break; + } + } + qdf_mutex_release(&pm_ctx->qdf_conc_list_lock); + + return vdev_id; +} + +uint32_t policy_mgr_mode_specific_connection_count( + struct wlan_objmgr_psoc *psoc, + enum policy_mgr_con_mode mode, + uint32_t *list) +{ + uint32_t conn_index = 0, count = 0; + struct policy_mgr_psoc_priv_obj *pm_ctx; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return count; + } + qdf_mutex_acquire(&pm_ctx->qdf_conc_list_lock); + for (conn_index = 0; conn_index < MAX_NUMBER_OF_CONC_CONNECTIONS; + conn_index++) { + if ((pm_conc_connection_list[conn_index].mode == mode) && + pm_conc_connection_list[conn_index].in_use) { + if (list != NULL) + list[count] = conn_index; + count++; + } + } + qdf_mutex_release(&pm_ctx->qdf_conc_list_lock); + + return count; +} + +QDF_STATUS policy_mgr_check_conn_with_mode_and_vdev_id( + struct wlan_objmgr_psoc *psoc, enum policy_mgr_con_mode mode, + uint32_t vdev_id) +{ + QDF_STATUS qdf_status = QDF_STATUS_E_FAILURE; + uint32_t conn_index = 0; + struct policy_mgr_psoc_priv_obj *pm_ctx; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return qdf_status; + } + + qdf_mutex_acquire(&pm_ctx->qdf_conc_list_lock); + while (PM_CONC_CONNECTION_LIST_VALID_INDEX(conn_index)) { + if ((pm_conc_connection_list[conn_index].mode == mode) && + (pm_conc_connection_list[conn_index].vdev_id == vdev_id)) { + qdf_status = QDF_STATUS_SUCCESS; + break; + } + conn_index++; + } + qdf_mutex_release(&pm_ctx->qdf_conc_list_lock); + return qdf_status; +} + +void policy_mgr_soc_set_dual_mac_cfg_cb(enum set_hw_mode_status status, + uint32_t scan_config, + uint32_t fw_mode_config) +{ + policy_mgr_debug("Status:%d for scan_config:%x fw_mode_config:%x", + status, scan_config, fw_mode_config); +} + +void policy_mgr_set_dual_mac_scan_config(struct wlan_objmgr_psoc *psoc, + uint8_t dbs_val, + uint8_t dbs_plus_agile_scan_val, + uint8_t single_mac_scan_with_dbs_val) +{ + struct policy_mgr_dual_mac_config cfg; + QDF_STATUS status; + struct policy_mgr_psoc_priv_obj *pm_ctx; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return; + } + + /* Any non-zero positive value is treated as 1 */ + if (dbs_val != 0) + dbs_val = 1; + if (dbs_plus_agile_scan_val != 0) + dbs_plus_agile_scan_val = 1; + if (single_mac_scan_with_dbs_val != 0) + single_mac_scan_with_dbs_val = 1; + + status = policy_mgr_get_updated_scan_config(psoc, &cfg.scan_config, + dbs_val, + dbs_plus_agile_scan_val, + single_mac_scan_with_dbs_val); + if (status != QDF_STATUS_SUCCESS) { + policy_mgr_err("policy_mgr_get_updated_scan_config failed %d", + status); + return; + } + + status = policy_mgr_get_updated_fw_mode_config(psoc, + &cfg.fw_mode_config, + policy_mgr_get_dbs_config(psoc), + policy_mgr_get_agile_dfs_config(psoc)); + if (status != QDF_STATUS_SUCCESS) { + policy_mgr_err("policy_mgr_get_updated_fw_mode_config failed %d", + status); + return; + } + + cfg.set_dual_mac_cb = policy_mgr_soc_set_dual_mac_cfg_cb; + + policy_mgr_debug("scan_config:%x fw_mode_config:%x", + cfg.scan_config, cfg.fw_mode_config); + + status = pm_ctx->sme_cbacks.sme_soc_set_dual_mac_config(cfg); + if (status != QDF_STATUS_SUCCESS) + policy_mgr_err("sme_soc_set_dual_mac_config failed %d", status); +} + +void policy_mgr_set_dual_mac_fw_mode_config(struct wlan_objmgr_psoc *psoc, + uint8_t dbs, uint8_t dfs) +{ + struct policy_mgr_dual_mac_config cfg; + QDF_STATUS status; + struct policy_mgr_psoc_priv_obj *pm_ctx; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return; + } + + /* Any non-zero positive value is treated as 1 */ + if (dbs != 0) + dbs = 1; + if (dfs != 0) + dfs = 1; + + status = policy_mgr_get_updated_scan_config(psoc, &cfg.scan_config, + policy_mgr_get_dbs_scan_config(psoc), + policy_mgr_get_dbs_plus_agile_scan_config(psoc), + policy_mgr_get_single_mac_scan_with_dfs_config(psoc)); + if (status != QDF_STATUS_SUCCESS) { + policy_mgr_err("policy_mgr_get_updated_scan_config failed %d", + status); + return; + } + + status = policy_mgr_get_updated_fw_mode_config(psoc, + &cfg.fw_mode_config, dbs, dfs); + if (status != QDF_STATUS_SUCCESS) { + policy_mgr_err("policy_mgr_get_updated_fw_mode_config failed %d", + status); + return; + } + + cfg.set_dual_mac_cb = policy_mgr_soc_set_dual_mac_cfg_cb; + + policy_mgr_debug("scan_config:%x fw_mode_config:%x", + cfg.scan_config, cfg.fw_mode_config); + + status = pm_ctx->sme_cbacks.sme_soc_set_dual_mac_config(cfg); + if (status != QDF_STATUS_SUCCESS) + policy_mgr_err("sme_soc_set_dual_mac_config failed %d", status); +} + +bool policy_mgr_current_concurrency_is_mcc(struct wlan_objmgr_psoc *psoc) +{ + uint32_t num_connections = 0; + bool is_mcc = false; + + num_connections = policy_mgr_get_connection_count(psoc); + + switch (num_connections) { + case 1: + break; + case 2: + if ((pm_conc_connection_list[0].chan != + pm_conc_connection_list[1].chan) && + (pm_conc_connection_list[0].mac == + pm_conc_connection_list[1].mac)) { + is_mcc = true; + } + break; + case 3: + if ((pm_conc_connection_list[0].chan != + pm_conc_connection_list[1].chan) || + (pm_conc_connection_list[0].chan != + pm_conc_connection_list[2].chan) || + (pm_conc_connection_list[1].chan != + pm_conc_connection_list[2].chan)){ + is_mcc = true; + } + break; + default: + policy_mgr_err("unexpected num_connections value %d", + num_connections); + break; + } + + return is_mcc; +} + +/** + * policy_mgr_set_concurrency_mode() - To set concurrency mode + * @psoc: PSOC object data + * @mode: device mode + * + * This routine is called to set the concurrency mode + * + * Return: NONE + */ +void policy_mgr_set_concurrency_mode(struct wlan_objmgr_psoc *psoc, + enum QDF_OPMODE mode) +{ + struct policy_mgr_psoc_priv_obj *pm_ctx; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid context"); + return; + } + + switch (mode) { + case QDF_STA_MODE: + case QDF_P2P_CLIENT_MODE: + case QDF_P2P_GO_MODE: + case QDF_SAP_MODE: + case QDF_IBSS_MODE: + case QDF_MONITOR_MODE: + pm_ctx->concurrency_mode |= (1 << mode); + pm_ctx->no_of_open_sessions[mode]++; + break; + default: + break; + } + + policy_mgr_info("concurrency_mode = 0x%x Number of open sessions for mode %d = %d", + pm_ctx->concurrency_mode, mode, + pm_ctx->no_of_open_sessions[mode]); +} + +/** + * policy_mgr_clear_concurrency_mode() - To clear concurrency mode + * @psoc: PSOC object data + * @mode: device mode + * + * This routine is called to clear the concurrency mode + * + * Return: NONE + */ +void policy_mgr_clear_concurrency_mode(struct wlan_objmgr_psoc *psoc, + enum QDF_OPMODE mode) +{ + struct policy_mgr_psoc_priv_obj *pm_ctx; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid context"); + return; + } + + switch (mode) { + case QDF_STA_MODE: + case QDF_P2P_CLIENT_MODE: + case QDF_P2P_GO_MODE: + case QDF_SAP_MODE: + case QDF_MONITOR_MODE: + pm_ctx->no_of_open_sessions[mode]--; + if (!(pm_ctx->no_of_open_sessions[mode])) + pm_ctx->concurrency_mode &= (~(1 << mode)); + break; + default: + break; + } + + policy_mgr_info("concurrency_mode = 0x%x Number of open sessions for mode %d = %d", + pm_ctx->concurrency_mode, mode, + pm_ctx->no_of_open_sessions[mode]); +} + +void policy_mgr_incr_active_session(struct wlan_objmgr_psoc *psoc, + enum QDF_OPMODE mode, + uint8_t session_id) +{ + struct policy_mgr_psoc_priv_obj *pm_ctx; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return; + } + + /* + * Need to aquire mutex as entire functionality in this function + * is in critical section + */ + qdf_mutex_acquire(&pm_ctx->qdf_conc_list_lock); + switch (mode) { + case QDF_STA_MODE: + case QDF_P2P_CLIENT_MODE: + case QDF_P2P_GO_MODE: + case QDF_SAP_MODE: + case QDF_IBSS_MODE: + pm_ctx->no_of_active_sessions[mode]++; + break; + default: + break; + } + + if (pm_ctx->dp_cbacks.hdd_v2_flow_pool_map) + pm_ctx->dp_cbacks.hdd_v2_flow_pool_map(session_id); + + policy_mgr_debug("No.# of active sessions for mode %d = %d", + mode, pm_ctx->no_of_active_sessions[mode]); + policy_mgr_incr_connection_count(psoc, session_id); + if ((policy_mgr_mode_specific_connection_count( + psoc, PM_STA_MODE, NULL) > 0) && (mode != QDF_STA_MODE)) { + qdf_mutex_release(&pm_ctx->qdf_conc_list_lock); + policy_mgr_set_pcl_for_existing_combo(psoc, PM_STA_MODE); + qdf_mutex_acquire(&pm_ctx->qdf_conc_list_lock); + } + + /* Notify tdls */ + if (pm_ctx->tdls_cbacks.tdls_notify_increment_session) + pm_ctx->tdls_cbacks.tdls_notify_increment_session(psoc); + + /* + * Disable LRO/GRO if P2P or IBSS or SAP connection has come up or + * there are more than one STA connections + */ + if ((policy_mgr_mode_specific_connection_count(psoc, PM_STA_MODE, NULL) > 1) || + (policy_mgr_mode_specific_connection_count(psoc, PM_SAP_MODE, NULL) > 0) || + (policy_mgr_mode_specific_connection_count(psoc, PM_P2P_CLIENT_MODE, NULL) > + 0) || + (policy_mgr_mode_specific_connection_count(psoc, PM_P2P_GO_MODE, NULL) > 0) || + (policy_mgr_mode_specific_connection_count(psoc, PM_IBSS_MODE, NULL) > 0)) { + if (pm_ctx->dp_cbacks.hdd_disable_rx_ol_in_concurrency != NULL) + pm_ctx->dp_cbacks.hdd_disable_rx_ol_in_concurrency(true); + }; + + /* Enable RPS if SAP interface has come up */ + if (policy_mgr_mode_specific_connection_count(psoc, PM_SAP_MODE, NULL) + == 1) { + if (pm_ctx->dp_cbacks.hdd_set_rx_mode_rps_cb != NULL) + pm_ctx->dp_cbacks.hdd_set_rx_mode_rps_cb(true); + } + + policy_mgr_dump_current_concurrency(psoc); + + qdf_mutex_release(&pm_ctx->qdf_conc_list_lock); +} + +QDF_STATUS policy_mgr_decr_active_session(struct wlan_objmgr_psoc *psoc, + enum QDF_OPMODE mode, + uint8_t session_id) +{ + struct policy_mgr_psoc_priv_obj *pm_ctx; + QDF_STATUS qdf_status; + bool mcc_mode; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("context is NULL"); + return QDF_STATUS_E_EMPTY; + } + + qdf_status = policy_mgr_check_conn_with_mode_and_vdev_id(psoc, + policy_mgr_convert_device_mode_to_qdf_type(mode), + session_id); + if (!QDF_IS_STATUS_SUCCESS(qdf_status)) { + policy_mgr_debug("No connection with mode:%d vdev_id:%d", + policy_mgr_convert_device_mode_to_qdf_type(mode), + session_id); + return qdf_status; + } + + switch (mode) { + case QDF_STA_MODE: + case QDF_P2P_CLIENT_MODE: + case QDF_P2P_GO_MODE: + case QDF_SAP_MODE: + case QDF_IBSS_MODE: + if (pm_ctx->no_of_active_sessions[mode]) + pm_ctx->no_of_active_sessions[mode]--; + break; + default: + break; + } + + if (pm_ctx->dp_cbacks.hdd_v2_flow_pool_unmap) + pm_ctx->dp_cbacks.hdd_v2_flow_pool_unmap(session_id); + + policy_mgr_debug("No.# of active sessions for mode %d = %d", + mode, pm_ctx->no_of_active_sessions[mode]); + + policy_mgr_decr_connection_count(psoc, session_id); + + /* Notify tdls */ + if (pm_ctx->tdls_cbacks.tdls_notify_decrement_session) + pm_ctx->tdls_cbacks.tdls_notify_decrement_session(psoc); + /* Enable LRO/GRO if there no concurrency */ + if ((policy_mgr_mode_specific_connection_count(psoc, PM_STA_MODE, NULL) == 1) && + (policy_mgr_mode_specific_connection_count(psoc, PM_SAP_MODE, NULL) == 0) && + (policy_mgr_mode_specific_connection_count(psoc, PM_P2P_CLIENT_MODE, NULL) == + 0) && + (policy_mgr_mode_specific_connection_count(psoc, PM_P2P_GO_MODE, NULL) == 0) && + (policy_mgr_mode_specific_connection_count(psoc, PM_IBSS_MODE, NULL) == 0)) { + if (pm_ctx->dp_cbacks.hdd_disable_rx_ol_in_concurrency != NULL) + pm_ctx->dp_cbacks.hdd_disable_rx_ol_in_concurrency(false); + }; + + /* Disable RPS if SAP interface has come up */ + if (policy_mgr_mode_specific_connection_count(psoc, PM_SAP_MODE, NULL) + == 0) { + if (pm_ctx->dp_cbacks.hdd_set_rx_mode_rps_cb != NULL) + pm_ctx->dp_cbacks.hdd_set_rx_mode_rps_cb(false); + } + + policy_mgr_dump_current_concurrency(psoc); + + /* + * Check mode of entry being removed. Update mcc_mode only when STA + * or SAP since IPA only cares about these two + */ + if (mode == QDF_STA_MODE || mode == QDF_SAP_MODE) { + qdf_mutex_acquire(&pm_ctx->qdf_conc_list_lock); + mcc_mode = policy_mgr_current_concurrency_is_mcc(psoc); + qdf_mutex_release(&pm_ctx->qdf_conc_list_lock); + + if (pm_ctx->dp_cbacks.hdd_ipa_set_mcc_mode_cb) + pm_ctx->dp_cbacks.hdd_ipa_set_mcc_mode_cb(mcc_mode); + } + + return qdf_status; +} + +QDF_STATUS policy_mgr_incr_connection_count( + struct wlan_objmgr_psoc *psoc, uint32_t vdev_id) +{ + QDF_STATUS status = QDF_STATUS_E_FAILURE; + uint32_t conn_index; + struct policy_mgr_vdev_entry_info conn_table_entry = {0}; + enum policy_mgr_chain_mode chain_mask = POLICY_MGR_ONE_ONE; + uint8_t nss_2g = 0, nss_5g = 0; + enum policy_mgr_con_mode mode; + uint8_t chan; + uint32_t nss = 0; + struct policy_mgr_psoc_priv_obj *pm_ctx; + bool update_conn = true; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("context is NULL"); + return status; + } + + conn_index = policy_mgr_get_connection_count(psoc); + if (pm_ctx->user_cfg.max_concurrent_active_sessions < conn_index) { + policy_mgr_err("exceeded max connection limit %d", + pm_ctx->user_cfg.max_concurrent_active_sessions); + return status; + } + if (pm_ctx->wma_cbacks.wma_get_connection_info) { + status = pm_ctx->wma_cbacks.wma_get_connection_info( + vdev_id, &conn_table_entry); + if (QDF_STATUS_SUCCESS != status) { + policy_mgr_err("can't find vdev_id %d in connection table", + vdev_id); + return status; + } + } else { + policy_mgr_err("wma_get_connection_info is NULL"); + return QDF_STATUS_E_FAILURE; + } + + mode = policy_mgr_get_mode(conn_table_entry.type, + conn_table_entry.sub_type); + chan = wlan_reg_freq_to_chan(pm_ctx->pdev, conn_table_entry.mhz); + status = policy_mgr_get_nss_for_vdev(psoc, mode, &nss_2g, &nss_5g); + if (QDF_IS_STATUS_SUCCESS(status)) { + if ((WLAN_REG_IS_24GHZ_CH(chan) && (nss_2g > 1)) || + (WLAN_REG_IS_5GHZ_CH(chan) && (nss_5g > 1))) + chain_mask = POLICY_MGR_TWO_TWO; + else + chain_mask = POLICY_MGR_ONE_ONE; + nss = (WLAN_REG_IS_24GHZ_CH(chan)) ? nss_2g : nss_5g; + } else { + policy_mgr_err("Error in getting nss"); + } + + if (mode == PM_STA_MODE || mode == PM_P2P_CLIENT_MODE) + update_conn = false; + + /* add the entry */ + policy_mgr_update_conc_list(psoc, conn_index, + mode, + chan, + policy_mgr_get_bw(conn_table_entry.chan_width), + conn_table_entry.mac_id, + chain_mask, + nss, vdev_id, true, update_conn); + policy_mgr_debug("Add at idx:%d vdev %d mac=%d", + conn_index, vdev_id, + conn_table_entry.mac_id); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS policy_mgr_decr_connection_count(struct wlan_objmgr_psoc *psoc, + uint32_t vdev_id) +{ + QDF_STATUS status = QDF_STATUS_E_FAILURE; + uint32_t conn_index = 0, next_conn_index = 0; + bool found = false; + struct policy_mgr_psoc_priv_obj *pm_ctx; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return status; + } + + qdf_mutex_acquire(&pm_ctx->qdf_conc_list_lock); + while (PM_CONC_CONNECTION_LIST_VALID_INDEX(conn_index)) { + if (vdev_id == pm_conc_connection_list[conn_index].vdev_id) { + /* debug msg */ + found = true; + break; + } + conn_index++; + } + if (!found) { + policy_mgr_err("can't find vdev_id %d in pm_conc_connection_list", + vdev_id); + qdf_mutex_release(&pm_ctx->qdf_conc_list_lock); + return status; + } + next_conn_index = conn_index + 1; + while (PM_CONC_CONNECTION_LIST_VALID_INDEX(next_conn_index)) { + pm_conc_connection_list[conn_index].vdev_id = + pm_conc_connection_list[next_conn_index].vdev_id; + pm_conc_connection_list[conn_index].mode = + pm_conc_connection_list[next_conn_index].mode; + pm_conc_connection_list[conn_index].mac = + pm_conc_connection_list[next_conn_index].mac; + pm_conc_connection_list[conn_index].chan = + pm_conc_connection_list[next_conn_index].chan; + pm_conc_connection_list[conn_index].bw = + pm_conc_connection_list[next_conn_index].bw; + pm_conc_connection_list[conn_index].chain_mask = + pm_conc_connection_list[next_conn_index].chain_mask; + pm_conc_connection_list[conn_index].original_nss = + pm_conc_connection_list[next_conn_index].original_nss; + pm_conc_connection_list[conn_index].in_use = + pm_conc_connection_list[next_conn_index].in_use; + conn_index++; + next_conn_index++; + } + + /* clean up the entry */ + qdf_mem_zero(&pm_conc_connection_list[next_conn_index - 1], + sizeof(*pm_conc_connection_list)); + qdf_mutex_release(&pm_ctx->qdf_conc_list_lock); + + return QDF_STATUS_SUCCESS; +} + +bool policy_mgr_map_concurrency_mode(enum QDF_OPMODE *old_mode, + enum policy_mgr_con_mode *new_mode) +{ + bool status = true; + + switch (*old_mode) { + + case QDF_STA_MODE: + *new_mode = PM_STA_MODE; + break; + case QDF_SAP_MODE: + *new_mode = PM_SAP_MODE; + break; + case QDF_P2P_CLIENT_MODE: + *new_mode = PM_P2P_CLIENT_MODE; + break; + case QDF_P2P_GO_MODE: + *new_mode = PM_P2P_GO_MODE; + break; + case QDF_IBSS_MODE: + *new_mode = PM_IBSS_MODE; + break; + default: + *new_mode = PM_MAX_NUM_OF_MODE; + status = false; + break; + } + + return status; +} + +bool policy_mgr_is_ibss_conn_exist(struct wlan_objmgr_psoc *psoc, + uint8_t *ibss_channel) +{ + uint32_t count = 0, index = 0; + uint32_t list[MAX_NUMBER_OF_CONC_CONNECTIONS]; + bool status = false; + struct policy_mgr_psoc_priv_obj *pm_ctx; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return status; + } + if (NULL == ibss_channel) { + policy_mgr_err("Null pointer error"); + return false; + } + count = policy_mgr_mode_specific_connection_count( + psoc, PM_IBSS_MODE, list); + qdf_mutex_acquire(&pm_ctx->qdf_conc_list_lock); + if (count == 0) { + /* No IBSS connection */ + status = false; + } else if (count == 1) { + *ibss_channel = pm_conc_connection_list[list[index]].chan; + status = true; + } else { + *ibss_channel = pm_conc_connection_list[list[index]].chan; + policy_mgr_debug("Multiple IBSS connections, picking first one"); + status = true; + } + qdf_mutex_release(&pm_ctx->qdf_conc_list_lock); + + return status; +} + +uint32_t policy_mgr_get_mode_specific_conn_info(struct wlan_objmgr_psoc *psoc, + uint8_t *channel, uint8_t *vdev_id, + enum policy_mgr_con_mode mode) +{ + + uint32_t count = 0, index = 0; + uint32_t list[MAX_NUMBER_OF_CONC_CONNECTIONS]; + struct policy_mgr_psoc_priv_obj *pm_ctx; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return count; + } + if (NULL == channel || NULL == vdev_id) { + policy_mgr_err("Null pointer error"); + return count; + } + + count = policy_mgr_mode_specific_connection_count( + psoc, mode, list); + qdf_mutex_acquire(&pm_ctx->qdf_conc_list_lock); + if (count == 0) { + policy_mgr_debug("No mode:[%d] connection", mode); + } else if (count == 1) { + *channel = pm_conc_connection_list[list[index]].chan; + *vdev_id = + pm_conc_connection_list[list[index]].vdev_id; + } else { + for (index = 0; index < count; index++) { + channel[index] = + pm_conc_connection_list[list[index]].chan; + + vdev_id[index] = + pm_conc_connection_list[list[index]].vdev_id; + } + policy_mgr_debug("Multiple mode:[%d] connections", mode); + } + qdf_mutex_release(&pm_ctx->qdf_conc_list_lock); + + return count; +} + +bool policy_mgr_max_concurrent_connections_reached( + struct wlan_objmgr_psoc *psoc) +{ + uint8_t i = 0, j = 0; + struct policy_mgr_psoc_priv_obj *pm_ctx; + + pm_ctx = policy_mgr_get_context(psoc); + if (NULL != pm_ctx) { + for (i = 0; i < QDF_MAX_NO_OF_MODE; i++) + j += pm_ctx->no_of_active_sessions[i]; + return j > + (pm_ctx->user_cfg. + max_concurrent_active_sessions - 1); + } + + return false; +} + +static bool policy_mgr_is_sub_20_mhz_enabled(struct wlan_objmgr_psoc *psoc) +{ + struct policy_mgr_psoc_priv_obj *pm_ctx; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return false; + } + + return pm_ctx->user_cfg.sub_20_mhz_enabled; +} + +/** + * policy_mgr_check_privacy_for_new_conn() - Check privacy mode concurrency + * @pm_ctx: policy_mgr_psoc_priv_obj policy mgr context + * + * This routine is called to check vdev security mode allowed in concurrency. + * At present, WAPI security mode is not allowed to run concurrency with any + * other vdev. + * + * Return: true - allow + */ +static bool policy_mgr_check_privacy_for_new_conn( + struct policy_mgr_psoc_priv_obj *pm_ctx) +{ + if (!pm_ctx->hdd_cbacks.hdd_wapi_security_sta_exist) + return true; + + if (pm_ctx->hdd_cbacks.hdd_wapi_security_sta_exist() && + (policy_mgr_get_connection_count(pm_ctx->psoc) > 0)) + return false; + + return true; +} + +bool policy_mgr_is_concurrency_allowed(struct wlan_objmgr_psoc *psoc, + enum policy_mgr_con_mode mode, + uint8_t channel, + enum hw_mode_bandwidth bw) +{ + uint32_t num_connections = 0, count = 0, index = 0; + bool status = false, match = false; + uint32_t list[MAX_NUMBER_OF_CONC_CONNECTIONS]; + struct policy_mgr_psoc_priv_obj *pm_ctx; + bool sta_sap_scc_on_dfs_chan; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return status; + } + /* find the current connection state from pm_conc_connection_list*/ + num_connections = policy_mgr_get_connection_count(psoc); + + if (num_connections && policy_mgr_is_sub_20_mhz_enabled(psoc)) { + policy_mgr_err("dont allow concurrency if Sub 20 MHz is enabled"); + status = false; + goto done; + } + + if (policy_mgr_max_concurrent_connections_reached(psoc)) { + policy_mgr_err("Reached max concurrent connections: %d", + pm_ctx->user_cfg.max_concurrent_active_sessions); + goto done; + } + + if (channel) { + /* don't allow 3rd home channel on same MAC */ + if (!policy_mgr_allow_new_home_channel(psoc, + channel, num_connections)) + goto done; + + /* + * 1) DFS MCC is not yet supported + * 2) If you already have STA connection on 5G channel then + * don't allow any other persona to make connection on DFS + * channel because STA 5G + DFS MCC is not allowed. + * 3) If STA is on 2G channel and SAP is coming up on + * DFS channel then allow concurrency but make sure it is + * going to DBS and send PCL to firmware indicating that + * don't allow STA to roam to 5G channels. + */ + if (!policy_mgr_is_5g_channel_allowed(psoc, + channel, list, PM_P2P_GO_MODE)) + goto done; + if (!policy_mgr_is_5g_channel_allowed(psoc, + channel, list, PM_SAP_MODE)) + goto done; + + sta_sap_scc_on_dfs_chan = + policy_mgr_is_sta_sap_scc_allowed_on_dfs_chan(psoc); + policy_mgr_debug("sta_sap_scc_on_dfs_chan %u", + sta_sap_scc_on_dfs_chan); + + if (!sta_sap_scc_on_dfs_chan && ((mode == PM_P2P_GO_MODE) || + (mode == PM_SAP_MODE))) { + if (wlan_reg_is_dfs_ch(pm_ctx->pdev, channel)) + match = policy_mgr_disallow_mcc(psoc, channel); + } + if (true == match) { + policy_mgr_err("No MCC, SAP/GO about to come up on DFS channel"); + goto done; + } + } + + count = policy_mgr_mode_specific_connection_count(psoc, PM_STA_MODE, + list); + + /* Check for STA+STA concurrency */ + if (mode == PM_STA_MODE && count && + !policy_mgr_allow_multiple_sta_connections(psoc)) { + policy_mgr_err("No 2nd STA connection, already one STA is connected"); + goto done; + } + + /* + * Check all IBSS+STA concurrencies + * + * don't allow IBSS + STA MCC + * don't allow IBSS + STA SCC if IBSS is on DFS channel + */ + if ((PM_IBSS_MODE == mode) && + (policy_mgr_mode_specific_connection_count(psoc, + PM_IBSS_MODE, list)) && count) { + policy_mgr_err("No 2nd IBSS, we already have STA + IBSS"); + goto done; + } + if ((PM_IBSS_MODE == mode) && + (wlan_reg_is_dfs_ch(pm_ctx->pdev, channel)) && count) { + policy_mgr_err("No IBSS + STA SCC/MCC, IBSS is on DFS channel"); + goto done; + } + if (PM_IBSS_MODE == mode) { + if (policy_mgr_is_hw_dbs_capable(psoc) == true) { + if (num_connections > 1) { + policy_mgr_err("No IBSS, we have concurrent connections already"); + goto done; + } + qdf_mutex_acquire(&pm_ctx->qdf_conc_list_lock); + if (PM_STA_MODE != pm_conc_connection_list[0].mode) { + policy_mgr_err("No IBSS, we've a non-STA connection"); + qdf_mutex_release(&pm_ctx->qdf_conc_list_lock); + goto done; + } + /* + * This logic protects STA and IBSS to come up on same + * band. If requirement changes then this condition + * needs to be removed + */ + if (channel && + (pm_conc_connection_list[0].chan != channel) && + WLAN_REG_IS_SAME_BAND_CHANNELS( + pm_conc_connection_list[0].chan, channel)) { + qdf_mutex_release(&pm_ctx->qdf_conc_list_lock); + policy_mgr_err("No IBSS + STA MCC"); + goto done; + } + qdf_mutex_release(&pm_ctx->qdf_conc_list_lock); + } else if (num_connections) { + policy_mgr_err("No IBSS, we have one connection already"); + goto done; + } + } + + if ((PM_STA_MODE == mode) && + (policy_mgr_mode_specific_connection_count(psoc, + PM_IBSS_MODE, list)) && count) { + policy_mgr_err("No 2nd STA, we already have STA + IBSS"); + goto done; + } + + if ((PM_STA_MODE == mode) && + (policy_mgr_mode_specific_connection_count(psoc, + PM_IBSS_MODE, list))) { + if (policy_mgr_is_hw_dbs_capable(psoc) == true) { + if (num_connections > 1) { + policy_mgr_err("No 2nd STA, we already have IBSS concurrency"); + goto done; + } + qdf_mutex_acquire(&pm_ctx->qdf_conc_list_lock); + if (channel && + (wlan_reg_is_dfs_ch(pm_ctx->pdev, + pm_conc_connection_list[0].chan)) + && (WLAN_REG_IS_5GHZ_CH(channel))) { + qdf_mutex_release(&pm_ctx->qdf_conc_list_lock); + policy_mgr_err("No IBSS + STA SCC/MCC, IBSS is on DFS channel"); + goto done; + } + /* + * This logic protects STA and IBSS to come up on same + * band. If requirement changes then this condition + * needs to be removed + */ + if ((pm_conc_connection_list[0].chan != channel) && + WLAN_REG_IS_SAME_BAND_CHANNELS( + pm_conc_connection_list[0].chan, channel)) { + policy_mgr_err("No IBSS + STA MCC"); + qdf_mutex_release(&pm_ctx->qdf_conc_list_lock); + goto done; + } + qdf_mutex_release(&pm_ctx->qdf_conc_list_lock); + } else { + policy_mgr_err("No STA, we have IBSS connection already"); + goto done; + } + } + + if (!policy_mgr_allow_sap_go_concurrency(psoc, mode, channel, + WLAN_INVALID_VDEV_ID)) { + policy_mgr_err("This concurrency combination is not allowed"); + goto done; + } + + /* don't allow two P2P GO on same band */ + if (channel && (mode == PM_P2P_GO_MODE) && num_connections) { + index = 0; + count = policy_mgr_mode_specific_connection_count(psoc, + PM_P2P_GO_MODE, list); + qdf_mutex_acquire(&pm_ctx->qdf_conc_list_lock); + while (index < count) { + if (WLAN_REG_IS_SAME_BAND_CHANNELS(channel, + pm_conc_connection_list[list[index]].chan)) { + policy_mgr_err("Don't allow P2P GO on same band"); + qdf_mutex_release(&pm_ctx->qdf_conc_list_lock); + goto done; + } + index++; + } + qdf_mutex_release(&pm_ctx->qdf_conc_list_lock); + } + + if (!policy_mgr_check_privacy_for_new_conn(pm_ctx)) { + policy_mgr_err("Don't allow new conn when wapi security conn existing"); + goto done; + } + + status = true; + +done: + return status; +} + +bool policy_mgr_allow_concurrency(struct wlan_objmgr_psoc *psoc, + enum policy_mgr_con_mode mode, + uint8_t channel, enum hw_mode_bandwidth bw) +{ + QDF_STATUS status; + struct policy_mgr_pcl_list pcl; + + qdf_mem_zero(&pcl, sizeof(pcl)); + status = policy_mgr_get_pcl(psoc, mode, pcl.pcl_list, &pcl.pcl_len, + pcl.weight_list, + QDF_ARRAY_SIZE(pcl.weight_list)); + if (QDF_IS_STATUS_ERROR(status)) { + policy_mgr_err("disallow connection:%d", status); + return false; + } + + return policy_mgr_is_concurrency_allowed(psoc, mode, channel, bw); +} + +bool policy_mgr_allow_concurrency_csa(struct wlan_objmgr_psoc *psoc, + enum policy_mgr_con_mode mode, + uint8_t channel, + uint32_t vdev_id) +{ + bool allow = false; + struct policy_mgr_conc_connection_info info; + uint8_t num_cxn_del = 0; + struct policy_mgr_psoc_priv_obj *pm_ctx; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return allow; + } + + /* + * Store the connection's parameter and temporarily delete it + * from the concurrency table. This way the allow concurrency + * check can be used as though a new connection is coming up, + * after check, restore the connection to concurrency table. + */ + qdf_mutex_acquire(&pm_ctx->qdf_conc_list_lock); + policy_mgr_store_and_del_conn_info_by_vdev_id(psoc, vdev_id, + &info, &num_cxn_del); + allow = policy_mgr_allow_concurrency( + psoc, + mode, + channel, + HW_MODE_20_MHZ); + /* Restore the connection entry */ + if (num_cxn_del > 0) + policy_mgr_restore_deleted_conn_info(psoc, &info, num_cxn_del); + qdf_mutex_release(&pm_ctx->qdf_conc_list_lock); + + if (!allow) + policy_mgr_err("CSA concurrency check failed"); + + return allow; +} + +/** + * policy_mgr_get_concurrency_mode() - return concurrency mode + * @psoc: PSOC object information + * + * This routine is used to retrieve concurrency mode + * + * Return: uint32_t value of concurrency mask + */ +uint32_t policy_mgr_get_concurrency_mode(struct wlan_objmgr_psoc *psoc) +{ + struct policy_mgr_psoc_priv_obj *pm_ctx; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid context"); + return QDF_STA_MASK; + } + + policy_mgr_info("concurrency_mode: 0x%x", + pm_ctx->concurrency_mode); + + return pm_ctx->concurrency_mode; +} + +/** + * policy_mgr_get_channel_from_scan_result() - to get channel from scan result + * @psoc: PSOC object information + * @roam_profile: pointer to roam profile + * @channel: channel to be filled + * + * This routine gets channel which most likely a candidate to which STA + * will make connection. + * + * Return: QDF_STATUS + */ +QDF_STATUS policy_mgr_get_channel_from_scan_result( + struct wlan_objmgr_psoc *psoc, + void *roam_profile, uint8_t *channel) +{ + QDF_STATUS status = QDF_STATUS_E_FAILURE; + void *scan_cache = NULL; + struct policy_mgr_psoc_priv_obj *pm_ctx; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid context"); + return QDF_STATUS_E_INVAL; + } + + if (!roam_profile || !channel) { + policy_mgr_err("Invalid input parameters"); + return QDF_STATUS_E_INVAL; + } + + if (pm_ctx->sme_cbacks.sme_get_ap_channel_from_scan) { + status = pm_ctx->sme_cbacks.sme_get_ap_channel_from_scan + (roam_profile, &scan_cache, channel); + if (status != QDF_STATUS_SUCCESS) { + policy_mgr_err("Get AP channel failed"); + return status; + } + } else { + policy_mgr_err("sme_get_ap_channel_from_scan_cache NULL"); + return QDF_STATUS_E_FAILURE; + } + + if (pm_ctx->sme_cbacks.sme_scan_result_purge) + status = pm_ctx->sme_cbacks.sme_scan_result_purge(scan_cache); + else + policy_mgr_err("sme_scan_result_purge NULL"); + + return status; +} + +QDF_STATUS policy_mgr_set_user_cfg(struct wlan_objmgr_psoc *psoc, + struct policy_mgr_user_cfg *user_cfg) +{ + + struct policy_mgr_psoc_priv_obj *pm_ctx; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid context"); + return QDF_STATUS_E_FAILURE; + } + if (NULL == user_cfg) { + policy_mgr_err("Invalid User Config"); + return QDF_STATUS_E_FAILURE; + } + + pm_ctx->user_cfg = *user_cfg; + + pm_ctx->cur_conc_system_pref = pm_ctx->user_cfg.conc_system_pref; + + return QDF_STATUS_SUCCESS; +} + +uint8_t policy_mgr_search_and_check_for_session_conc( + struct wlan_objmgr_psoc *psoc, + uint8_t session_id, + void *roam_profile) +{ + uint8_t channel = 0; + QDF_STATUS status; + enum policy_mgr_con_mode mode; + bool ret; + struct policy_mgr_psoc_priv_obj *pm_ctx; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return channel; + } + + if (pm_ctx->hdd_cbacks.get_mode_for_non_connected_vdev) { + mode = pm_ctx->hdd_cbacks.get_mode_for_non_connected_vdev( + psoc, session_id); + if (PM_MAX_NUM_OF_MODE == mode) { + policy_mgr_err("Invalid mode"); + return channel; + } + } else + return channel; + + status = policy_mgr_get_channel_from_scan_result(psoc, + roam_profile, &channel); + if ((QDF_STATUS_SUCCESS != status) || (channel == 0)) { + policy_mgr_err("%s error %d %d", + __func__, status, channel); + return 0; + } + + /* Take care of 160MHz and 80+80Mhz later */ + ret = policy_mgr_allow_concurrency(psoc, mode, channel, HW_MODE_20_MHZ); + if (false == ret) { + policy_mgr_err("Connection failed due to conc check fail"); + return 0; + } + + return channel; +} + +/** + * policy_mgr_is_two_connection_mcc() - Check if MCC scenario + * when there are two connections + * + * If if MCC scenario when there are two connections + * + * Return: true or false + */ +static bool policy_mgr_is_two_connection_mcc(void) +{ + return ((pm_conc_connection_list[0].chan != + pm_conc_connection_list[1].chan) && + (pm_conc_connection_list[0].mac == + pm_conc_connection_list[1].mac) && + (pm_conc_connection_list[0].chan <= + WLAN_REG_MAX_24GHZ_CH_NUM) && + (pm_conc_connection_list[1].chan <= + WLAN_REG_MAX_24GHZ_CH_NUM)) ? true : false; +} + +/** + * policy_mgr_is_three_connection_mcc() - Check if MCC scenario + * when there are three connections + * + * If if MCC scenario when there are three connections + * + * Return: true or false + */ +static bool policy_mgr_is_three_connection_mcc(void) +{ + return (((pm_conc_connection_list[0].chan != + pm_conc_connection_list[1].chan) || + (pm_conc_connection_list[0].chan != + pm_conc_connection_list[2].chan) || + (pm_conc_connection_list[1].chan != + pm_conc_connection_list[2].chan)) && + (pm_conc_connection_list[0].chan <= + WLAN_REG_MAX_24GHZ_CH_NUM) && + (pm_conc_connection_list[1].chan <= + WLAN_REG_MAX_24GHZ_CH_NUM) && + (pm_conc_connection_list[2].chan <= + WLAN_REG_MAX_24GHZ_CH_NUM)) ? true : false; +} + +bool policy_mgr_is_mcc_in_24G(struct wlan_objmgr_psoc *psoc) +{ + uint32_t num_connections = 0; + bool is_24G_mcc = false; + + num_connections = policy_mgr_get_connection_count(psoc); + + switch (num_connections) { + case 1: + break; + case 2: + if (policy_mgr_is_two_connection_mcc()) + is_24G_mcc = true; + break; + case 3: + if (policy_mgr_is_three_connection_mcc()) + is_24G_mcc = true; + break; + default: + policy_mgr_err("unexpected num_connections value %d", + num_connections); + break; + } + + return is_24G_mcc; +} + +bool policy_mgr_check_for_session_conc(struct wlan_objmgr_psoc *psoc, + uint8_t session_id, uint8_t channel) +{ + enum policy_mgr_con_mode mode; + bool ret; + struct policy_mgr_psoc_priv_obj *pm_ctx; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return false; + } + + if (pm_ctx->hdd_cbacks.get_mode_for_non_connected_vdev) { + mode = pm_ctx->hdd_cbacks.get_mode_for_non_connected_vdev( + psoc, session_id); + if (PM_MAX_NUM_OF_MODE == mode) { + policy_mgr_err("Invalid mode"); + return false; + } + } else + return false; + + if (channel == 0) { + policy_mgr_err("Invalid channel number 0"); + return false; + } + + /* Take care of 160MHz and 80+80Mhz later */ + ret = policy_mgr_allow_concurrency(psoc, mode, channel, HW_MODE_20_MHZ); + if (false == ret) { + policy_mgr_err("Connection failed due to conc check fail"); + return 0; + } + + return true; +} + +bool policy_mgr_is_mcc_adaptive_scheduler_enabled( + struct wlan_objmgr_psoc *psoc) { + struct policy_mgr_psoc_priv_obj *pm_ctx; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid context"); + return false; + } + + return pm_ctx->user_cfg.enable_mcc_adaptive_scheduler ? + true : false; +} + +/** + * policy_mgr_change_mcc_go_beacon_interval() - Change MCC beacon interval + * @psoc: PSOC object information + * @vdev_id: vdev id + * @dev_mode: device mode + * + * Updates the beacon parameters of the GO in MCC scenario + * + * Return: Success or Failure depending on the overall function behavior + */ +QDF_STATUS policy_mgr_change_mcc_go_beacon_interval( + struct wlan_objmgr_psoc *psoc, + uint8_t vdev_id, enum QDF_OPMODE dev_mode) +{ + QDF_STATUS status; + struct policy_mgr_psoc_priv_obj *pm_ctx; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid context"); + return QDF_STATUS_E_FAILURE; + } + + policy_mgr_info("UPDATE Beacon Params"); + + if (QDF_SAP_MODE == dev_mode) { + if (pm_ctx->sme_cbacks.sme_change_mcc_beacon_interval + ) { + status = pm_ctx->sme_cbacks. + sme_change_mcc_beacon_interval(vdev_id); + if (status == QDF_STATUS_E_FAILURE) { + policy_mgr_err("Failed to update Beacon Params"); + return QDF_STATUS_E_FAILURE; + } + } else { + policy_mgr_err("sme_change_mcc_beacon_interval callback is NULL"); + return QDF_STATUS_E_FAILURE; + } + } + + return QDF_STATUS_SUCCESS; +} + +struct policy_mgr_conc_connection_info *policy_mgr_get_conn_info(uint32_t *len) +{ + struct policy_mgr_conc_connection_info *conn_ptr = + &pm_conc_connection_list[0]; + *len = MAX_NUMBER_OF_CONC_CONNECTIONS; + + return conn_ptr; +} + +enum policy_mgr_con_mode policy_mgr_convert_device_mode_to_qdf_type( + enum QDF_OPMODE device_mode) +{ + enum policy_mgr_con_mode mode = PM_MAX_NUM_OF_MODE; + switch (device_mode) { + case QDF_STA_MODE: + mode = PM_STA_MODE; + break; + case QDF_P2P_CLIENT_MODE: + mode = PM_P2P_CLIENT_MODE; + break; + case QDF_P2P_GO_MODE: + mode = PM_P2P_GO_MODE; + break; + case QDF_SAP_MODE: + mode = PM_SAP_MODE; + break; + case QDF_IBSS_MODE: + mode = PM_IBSS_MODE; + break; + default: + policy_mgr_debug("Unsupported mode (%d)", + device_mode); + } + + return mode; +} + +enum QDF_OPMODE policy_mgr_get_qdf_mode_from_pm( + enum policy_mgr_con_mode device_mode) +{ + enum QDF_OPMODE mode = QDF_MAX_NO_OF_MODE; + + switch (device_mode) { + case PM_STA_MODE: + mode = QDF_STA_MODE; + break; + case PM_SAP_MODE: + mode = QDF_SAP_MODE; + break; + case PM_P2P_CLIENT_MODE: + mode = QDF_P2P_CLIENT_MODE; + break; + case PM_P2P_GO_MODE: + mode = QDF_P2P_GO_MODE; + break; + case PM_IBSS_MODE: + mode = QDF_IBSS_MODE; + break; + default: + policy_mgr_debug("Unsupported policy mgr mode (%d)", + device_mode); + } + return mode; +} + +QDF_STATUS policy_mgr_mode_specific_num_open_sessions( + struct wlan_objmgr_psoc *psoc, enum QDF_OPMODE mode, + uint8_t *num_sessions) +{ + struct policy_mgr_psoc_priv_obj *pm_ctx; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid context"); + return QDF_STATUS_E_FAILURE; + } + + *num_sessions = pm_ctx->no_of_open_sessions[mode]; + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS policy_mgr_mode_specific_num_active_sessions( + struct wlan_objmgr_psoc *psoc, enum QDF_OPMODE mode, + uint8_t *num_sessions) +{ + struct policy_mgr_psoc_priv_obj *pm_ctx; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid context"); + return QDF_STATUS_E_FAILURE; + } + + *num_sessions = pm_ctx->no_of_active_sessions[mode]; + return QDF_STATUS_SUCCESS; +} + +/** + * policy_mgr_concurrent_open_sessions_running() - Checks for + * concurrent open session + * @psoc: PSOC object information + * + * Checks if more than one open session is running for all the allowed modes + * in the driver + * + * Return: True if more than one open session exists, False otherwise + */ +bool policy_mgr_concurrent_open_sessions_running( + struct wlan_objmgr_psoc *psoc) +{ + uint8_t i = 0; + uint8_t j = 0; + struct policy_mgr_psoc_priv_obj *pm_ctx; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid context"); + return false; + } + + for (i = 0; i < QDF_MAX_NO_OF_MODE; i++) + j += pm_ctx->no_of_open_sessions[i]; + + return j > 1; +} + +/** + * policy_mgr_concurrent_beaconing_sessions_running() - Checks + * for concurrent beaconing entities + * @psoc: PSOC object information + * + * Checks if multiple beaconing sessions are running i.e., if SAP or GO or IBSS + * are beaconing together + * + * Return: True if multiple entities are beaconing together, False otherwise + */ +bool policy_mgr_concurrent_beaconing_sessions_running( + struct wlan_objmgr_psoc *psoc) +{ + struct policy_mgr_psoc_priv_obj *pm_ctx; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid context"); + return false; + } + + return (pm_ctx->no_of_open_sessions[QDF_SAP_MODE] + + pm_ctx->no_of_open_sessions[QDF_P2P_GO_MODE] + + pm_ctx->no_of_open_sessions[QDF_IBSS_MODE] > 1) ? true : false; +} + + +void policy_mgr_clear_concurrent_session_count(struct wlan_objmgr_psoc *psoc) +{ + uint8_t i = 0; + struct policy_mgr_psoc_priv_obj *pm_ctx; + + pm_ctx = policy_mgr_get_context(psoc); + if (NULL != pm_ctx) { + for (i = 0; i < QDF_MAX_NO_OF_MODE; i++) + pm_ctx->no_of_active_sessions[i] = 0; + } +} + +bool policy_mgr_is_multiple_active_sta_sessions(struct wlan_objmgr_psoc *psoc) +{ + return policy_mgr_mode_specific_connection_count( + psoc, PM_STA_MODE, NULL) > 1; +} + +/** + * policy_mgr_is_sta_active_connection_exists() - Check if a STA + * connection is active + * @psoc: PSOC object information + * + * Checks if there is atleast one active STA connection in the driver + * + * Return: True if an active STA session is present, False otherwise + */ +bool policy_mgr_is_sta_active_connection_exists( + struct wlan_objmgr_psoc *psoc) +{ + return (!policy_mgr_mode_specific_connection_count( + psoc, PM_STA_MODE, NULL)) ? false : true; +} + +bool policy_mgr_is_any_nondfs_chnl_present(struct wlan_objmgr_psoc *psoc, + uint8_t *channel) +{ + bool status = false; + uint32_t conn_index = 0; + struct policy_mgr_psoc_priv_obj *pm_ctx; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return false; + } + qdf_mutex_acquire(&pm_ctx->qdf_conc_list_lock); + for (conn_index = 0; conn_index < MAX_NUMBER_OF_CONC_CONNECTIONS; + conn_index++) { + if (pm_conc_connection_list[conn_index].in_use && + !wlan_reg_is_dfs_ch(pm_ctx->pdev, + pm_conc_connection_list[conn_index].chan)) { + *channel = pm_conc_connection_list[conn_index].chan; + status = true; + } + } + qdf_mutex_release(&pm_ctx->qdf_conc_list_lock); + + return status; +} + +bool policy_mgr_is_any_dfs_beaconing_session_present( + struct wlan_objmgr_psoc *psoc, uint8_t *channel) +{ + struct policy_mgr_conc_connection_info *conn_info; + bool status = false; + uint32_t conn_index = 0; + struct policy_mgr_psoc_priv_obj *pm_ctx; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return false; + } + qdf_mutex_acquire(&pm_ctx->qdf_conc_list_lock); + for (conn_index = 0; conn_index < MAX_NUMBER_OF_CONC_CONNECTIONS; + conn_index++) { + conn_info = &pm_conc_connection_list[conn_index]; + if (conn_info->in_use && + wlan_reg_is_dfs_ch(pm_ctx->pdev, conn_info->chan) && + (PM_SAP_MODE == conn_info->mode || + PM_P2P_GO_MODE == conn_info->mode)) { + *channel = pm_conc_connection_list[conn_index].chan; + status = true; + } + } + qdf_mutex_release(&pm_ctx->qdf_conc_list_lock); + + return status; +} + +QDF_STATUS policy_mgr_get_nss_for_vdev(struct wlan_objmgr_psoc *psoc, + enum policy_mgr_con_mode mode, + uint8_t *nss_2g, uint8_t *nss_5g) +{ + enum QDF_OPMODE dev_mode; + struct policy_mgr_psoc_priv_obj *pm_ctx; + + dev_mode = policy_mgr_get_qdf_mode_from_pm(mode); + if (dev_mode == QDF_MAX_NO_OF_MODE) + return QDF_STATUS_E_FAILURE; + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return QDF_STATUS_E_FAILURE; + } + + if (pm_ctx->sme_cbacks.sme_get_nss_for_vdev) { + pm_ctx->sme_cbacks.sme_get_nss_for_vdev( + dev_mode, nss_2g, nss_5g); + + } else { + policy_mgr_err("sme_get_nss_for_vdev callback is NULL"); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +void policy_mgr_dump_connection_status_info(struct wlan_objmgr_psoc *psoc) +{ + uint32_t i; + struct policy_mgr_psoc_priv_obj *pm_ctx; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return; + } + + qdf_mutex_acquire(&pm_ctx->qdf_conc_list_lock); + for (i = 0; i < MAX_NUMBER_OF_CONC_CONNECTIONS; i++) { + policy_mgr_debug("%d: use:%d vdev:%d mode:%d mac:%d chan:%d orig chainmask:%d orig nss:%d bw:%d", + i, pm_conc_connection_list[i].in_use, + pm_conc_connection_list[i].vdev_id, + pm_conc_connection_list[i].mode, + pm_conc_connection_list[i].mac, + pm_conc_connection_list[i].chan, + pm_conc_connection_list[i].chain_mask, + pm_conc_connection_list[i].original_nss, + pm_conc_connection_list[i].bw); + } + qdf_mutex_release(&pm_ctx->qdf_conc_list_lock); +} + +bool policy_mgr_is_any_mode_active_on_band_along_with_session( + struct wlan_objmgr_psoc *psoc, + uint8_t session_id, + enum policy_mgr_band band) +{ + uint32_t i; + bool status = false; + struct policy_mgr_psoc_priv_obj *pm_ctx; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + status = false; + goto send_status; + } + + qdf_mutex_acquire(&pm_ctx->qdf_conc_list_lock); + for (i = 0; i < MAX_NUMBER_OF_CONC_CONNECTIONS; i++) { + switch (band) { + case POLICY_MGR_BAND_24: + if ((pm_conc_connection_list[i].vdev_id != session_id) + && (pm_conc_connection_list[i].in_use) && + (WLAN_REG_IS_24GHZ_CH( + pm_conc_connection_list[i].chan))) { + status = true; + goto release_mutex_and_send_status; + } + break; + case POLICY_MGR_BAND_5: + if ((pm_conc_connection_list[i].vdev_id != session_id) + && (pm_conc_connection_list[i].in_use) && + (WLAN_REG_IS_5GHZ_CH( + pm_conc_connection_list[i].chan))) { + status = true; + goto release_mutex_and_send_status; + } + break; + default: + policy_mgr_err("Invalidband option:%d", band); + status = false; + goto release_mutex_and_send_status; + } + } +release_mutex_and_send_status: + qdf_mutex_release(&pm_ctx->qdf_conc_list_lock); +send_status: + return status; +} + +QDF_STATUS policy_mgr_get_chan_by_session_id(struct wlan_objmgr_psoc *psoc, + uint8_t session_id, uint8_t *chan) +{ + uint32_t i; + struct policy_mgr_psoc_priv_obj *pm_ctx; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return QDF_STATUS_E_FAILURE; + } + + qdf_mutex_acquire(&pm_ctx->qdf_conc_list_lock); + for (i = 0; i < MAX_NUMBER_OF_CONC_CONNECTIONS; i++) { + if ((pm_conc_connection_list[i].vdev_id == session_id) && + (pm_conc_connection_list[i].in_use)) { + *chan = pm_conc_connection_list[i].chan; + qdf_mutex_release(&pm_ctx->qdf_conc_list_lock); + return QDF_STATUS_SUCCESS; + } + } + qdf_mutex_release(&pm_ctx->qdf_conc_list_lock); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS policy_mgr_get_mac_id_by_session_id(struct wlan_objmgr_psoc *psoc, + uint8_t session_id, uint8_t *mac_id) +{ + uint32_t i; + struct policy_mgr_psoc_priv_obj *pm_ctx; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return QDF_STATUS_E_FAILURE; + } + + qdf_mutex_acquire(&pm_ctx->qdf_conc_list_lock); + for (i = 0; i < MAX_NUMBER_OF_CONC_CONNECTIONS; i++) { + if ((pm_conc_connection_list[i].vdev_id == session_id) && + (pm_conc_connection_list[i].in_use)) { + *mac_id = pm_conc_connection_list[i].mac; + qdf_mutex_release(&pm_ctx->qdf_conc_list_lock); + return QDF_STATUS_SUCCESS; + } + } + qdf_mutex_release(&pm_ctx->qdf_conc_list_lock); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS policy_mgr_get_mcc_session_id_on_mac(struct wlan_objmgr_psoc *psoc, + uint8_t mac_id, uint8_t session_id, + uint8_t *mcc_session_id) +{ + uint32_t i; + QDF_STATUS status; + uint8_t chan; + struct policy_mgr_psoc_priv_obj *pm_ctx; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return QDF_STATUS_E_FAILURE; + } + + status = policy_mgr_get_chan_by_session_id(psoc, session_id, &chan); + if (QDF_IS_STATUS_ERROR(status)) { + policy_mgr_err("Failed to get channel for session id:%d", + session_id); + return QDF_STATUS_E_FAILURE; + } + + qdf_mutex_acquire(&pm_ctx->qdf_conc_list_lock); + for (i = 0; i < MAX_NUMBER_OF_CONC_CONNECTIONS; i++) { + if (pm_conc_connection_list[i].mac != mac_id) + continue; + if (pm_conc_connection_list[i].vdev_id == session_id) + continue; + /* Inter band or intra band MCC */ + if ((pm_conc_connection_list[i].chan != chan) && + (pm_conc_connection_list[i].in_use)) { + *mcc_session_id = pm_conc_connection_list[i].vdev_id; + qdf_mutex_release(&pm_ctx->qdf_conc_list_lock); + return QDF_STATUS_SUCCESS; + } + } + qdf_mutex_release(&pm_ctx->qdf_conc_list_lock); + + return QDF_STATUS_E_FAILURE; +} + +uint8_t policy_mgr_get_mcc_operating_channel(struct wlan_objmgr_psoc *psoc, + uint8_t session_id) +{ + uint8_t mac_id, mcc_session_id; + QDF_STATUS status; + uint8_t chan; + struct policy_mgr_psoc_priv_obj *pm_ctx; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return INVALID_CHANNEL_ID; + } + + status = policy_mgr_get_mac_id_by_session_id(psoc, session_id, &mac_id); + if (QDF_IS_STATUS_ERROR(status)) { + policy_mgr_err("failed to get MAC ID"); + return INVALID_CHANNEL_ID; + } + + status = policy_mgr_get_mcc_session_id_on_mac(psoc, mac_id, session_id, + &mcc_session_id); + if (QDF_IS_STATUS_ERROR(status)) { + policy_mgr_err("failed to get MCC session ID"); + return INVALID_CHANNEL_ID; + } + + status = policy_mgr_get_chan_by_session_id(psoc, mcc_session_id, + &chan); + if (QDF_IS_STATUS_ERROR(status)) { + policy_mgr_err("Failed to get channel for MCC session ID:%d", + mcc_session_id); + return INVALID_CHANNEL_ID; + } + + return chan; +} + +void policy_mgr_set_do_hw_mode_change_flag(struct wlan_objmgr_psoc *psoc, + bool flag) +{ + struct policy_mgr_psoc_priv_obj *pm_ctx; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return; + } + + qdf_mutex_acquire(&pm_ctx->qdf_conc_list_lock); + pm_ctx->do_hw_mode_change = flag; + qdf_mutex_release(&pm_ctx->qdf_conc_list_lock); + + policy_mgr_debug("hw_mode_change_channel:%d", flag); +} + +bool policy_mgr_is_hw_mode_change_after_vdev_up(struct wlan_objmgr_psoc *psoc) +{ + bool flag; + struct policy_mgr_psoc_priv_obj *pm_ctx; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return INVALID_CHANNEL_ID; + } + + qdf_mutex_acquire(&pm_ctx->qdf_conc_list_lock); + flag = pm_ctx->do_hw_mode_change; + qdf_mutex_release(&pm_ctx->qdf_conc_list_lock); + + return flag; +} + +bool policy_mgr_is_dnsc_set(struct wlan_objmgr_vdev *vdev) +{ + bool roffchan; + + if (!vdev) { + policy_mgr_err("Invalid parameter"); + return false; + } + + roffchan = wlan_vdev_mlme_cap_get(vdev, WLAN_VDEV_C_RESTRICT_OFFCHAN); + + policy_mgr_debug("Restrict offchannel:%s", + roffchan ? "set" : "clear"); + + return roffchan; +} + +QDF_STATUS policy_mgr_is_chan_ok_for_dnbs(struct wlan_objmgr_psoc *psoc, + uint8_t channel, bool *ok) +{ + uint32_t cc_count = 0, i; + uint8_t operating_channel[MAX_NUMBER_OF_CONC_CONNECTIONS]; + uint8_t vdev_id[MAX_NUMBER_OF_CONC_CONNECTIONS]; + struct wlan_objmgr_vdev *vdev; + + if (!ok) { + policy_mgr_err("Invalid parameter"); + return QDF_STATUS_E_INVAL; + } + + cc_count = policy_mgr_get_mode_specific_conn_info(psoc, + &operating_channel[cc_count], + &vdev_id[cc_count], + PM_SAP_MODE); + policy_mgr_debug("Number of SAP modes: %d", cc_count); + cc_count = cc_count + policy_mgr_get_mode_specific_conn_info(psoc, + &operating_channel[cc_count], + &vdev_id[cc_count], + PM_P2P_GO_MODE); + policy_mgr_debug("Number of beaconing entities (SAP + GO):%d", + cc_count); + if (!cc_count) { + *ok = true; + return QDF_STATUS_SUCCESS; + } + + if (!channel) { + policy_mgr_err("channel is 0, cc count %d", cc_count); + return QDF_STATUS_E_INVAL; + } + + for (i = 0; i < cc_count; i++) { + vdev = wlan_objmgr_get_vdev_by_id_from_psoc(psoc, + vdev_id[i], + WLAN_POLICY_MGR_ID); + if (!vdev) { + policy_mgr_err("vdev for vdev_id:%d is NULL", + vdev_id[i]); + return QDF_STATUS_E_INVAL; + } + + /** + * If channel passed is same as AP/GO operating channel, then + * return true. + * If channel is different from operating channel but in same band. + * return false. + * If operating channel in different band (DBS capable). + * return true. + * If operating channel in different band (not DBS capable). + * return false. + */ + /* TODO: To be enhanced for SBS */ + if (policy_mgr_is_dnsc_set(vdev)) { + if (operating_channel[i] == channel) { + *ok = true; + wlan_objmgr_vdev_release_ref(vdev, + WLAN_POLICY_MGR_ID); + break; + } else if (WLAN_REG_IS_SAME_BAND_CHANNELS( + operating_channel[i], channel)) { + *ok = false; + wlan_objmgr_vdev_release_ref(vdev, + WLAN_POLICY_MGR_ID); + break; + } else if (policy_mgr_is_hw_dbs_capable(psoc)) { + *ok = true; + wlan_objmgr_vdev_release_ref(vdev, + WLAN_POLICY_MGR_ID); + break; + } else { + *ok = false; + wlan_objmgr_vdev_release_ref(vdev, + WLAN_POLICY_MGR_ID); + break; + } + } else { + *ok = true; + } + wlan_objmgr_vdev_release_ref(vdev, WLAN_POLICY_MGR_ID); + } + policy_mgr_debug("chan: %d ok %d", channel, *ok); + + return QDF_STATUS_SUCCESS; +} + +uint32_t policy_mgr_get_hw_dbs_nss(struct wlan_objmgr_psoc *psoc, + struct dbs_nss *nss_dbs) +{ + int i, param; + uint32_t dbs, tx_chain0, rx_chain0, tx_chain1, rx_chain1; + uint32_t min_mac0_rf_chains, min_mac1_rf_chains; + uint32_t max_rf_chains, final_max_rf_chains = HW_MODE_SS_0x0; + struct policy_mgr_psoc_priv_obj *pm_ctx; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return final_max_rf_chains; + } + + for (i = 0; i < pm_ctx->num_dbs_hw_modes; i++) { + param = pm_ctx->hw_mode.hw_mode_list[i]; + dbs = POLICY_MGR_HW_MODE_DBS_MODE_GET(param); + + if (dbs) { + tx_chain0 + = POLICY_MGR_HW_MODE_MAC0_TX_STREAMS_GET(param); + rx_chain0 + = POLICY_MGR_HW_MODE_MAC0_RX_STREAMS_GET(param); + + tx_chain1 + = POLICY_MGR_HW_MODE_MAC1_TX_STREAMS_GET(param); + rx_chain1 + = POLICY_MGR_HW_MODE_MAC1_RX_STREAMS_GET(param); + + min_mac0_rf_chains = QDF_MIN(tx_chain0, rx_chain0); + min_mac1_rf_chains = QDF_MIN(tx_chain1, rx_chain1); + + max_rf_chains + = QDF_MAX(min_mac0_rf_chains, min_mac1_rf_chains); + + if (final_max_rf_chains < max_rf_chains) { + final_max_rf_chains + = (max_rf_chains == 2) + ? HW_MODE_SS_2x2 : HW_MODE_SS_1x1; + + nss_dbs->mac0_ss + = (min_mac0_rf_chains == 2) + ? HW_MODE_SS_2x2 : HW_MODE_SS_1x1; + + nss_dbs->mac1_ss + = (min_mac1_rf_chains == 2) + ? HW_MODE_SS_2x2 : HW_MODE_SS_1x1; + } + } else { + continue; + } + } + + return final_max_rf_chains; +} + +bool policy_mgr_is_scan_simultaneous_capable(struct wlan_objmgr_psoc *psoc) +{ + if ((DISABLE_DBS_CXN_AND_SCAN == + wlan_objmgr_psoc_get_dual_mac_disable(psoc)) || + (ENABLE_DBS_CXN_AND_DISABLE_DBS_SCAN == + wlan_objmgr_psoc_get_dual_mac_disable(psoc)) || + (ENABLE_DBS_CXN_AND_DISABLE_SIMULTANEOUS_SCAN == + wlan_objmgr_psoc_get_dual_mac_disable(psoc)) || + !policy_mgr_is_hw_dbs_capable(psoc)) + return false; + + return true; +} + +void policy_mgr_set_cur_conc_system_pref(struct wlan_objmgr_psoc *psoc, + uint8_t conc_system_pref) +{ + struct policy_mgr_psoc_priv_obj *pm_ctx; + + pm_ctx = policy_mgr_get_context(psoc); + + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return; + } + + policy_mgr_debug("conc_system_pref %hu", conc_system_pref); + pm_ctx->cur_conc_system_pref = conc_system_pref; +} + +uint8_t policy_mgr_get_cur_conc_system_pref(struct wlan_objmgr_psoc *psoc) +{ + struct policy_mgr_psoc_priv_obj *pm_ctx; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return PM_THROUGHPUT; + } + + policy_mgr_debug("conc_system_pref %hu", pm_ctx->cur_conc_system_pref); + return pm_ctx->cur_conc_system_pref; +} + +QDF_STATUS policy_mgr_get_updated_scan_and_fw_mode_config( + struct wlan_objmgr_psoc *psoc, uint32_t *scan_config, + uint32_t *fw_mode_config, uint32_t dual_mac_disable_ini, + uint32_t channel_select_logic_conc) +{ + struct policy_mgr_psoc_priv_obj *pm_ctx; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return QDF_STATUS_E_FAILURE; + } + + *scan_config = pm_ctx->dual_mac_cfg.cur_scan_config; + *fw_mode_config = pm_ctx->dual_mac_cfg.cur_fw_mode_config; + switch (dual_mac_disable_ini) { + case DISABLE_DBS_CXN_AND_ENABLE_DBS_SCAN_WITH_ASYNC_SCAN_OFF: + policy_mgr_debug("dual_mac_disable_ini:%d async/dbs off", + dual_mac_disable_ini); + WMI_DBS_CONC_SCAN_CFG_ASYNC_DBS_SCAN_SET(*scan_config, 0); + WMI_DBS_FW_MODE_CFG_DBS_FOR_CXN_SET(*fw_mode_config, 0); + break; + case DISABLE_DBS_CXN_AND_ENABLE_DBS_SCAN: + policy_mgr_debug("dual_mac_disable_ini:%d dbs_cxn off", + dual_mac_disable_ini); + WMI_DBS_FW_MODE_CFG_DBS_FOR_CXN_SET(*fw_mode_config, 0); + break; + case ENABLE_DBS_CXN_AND_ENABLE_SCAN_WITH_ASYNC_SCAN_OFF: + policy_mgr_debug("dual_mac_disable_ini:%d async off", + dual_mac_disable_ini); + WMI_DBS_CONC_SCAN_CFG_ASYNC_DBS_SCAN_SET(*scan_config, 0); + break; + case ENABLE_DBS_CXN_AND_DISABLE_DBS_SCAN: + policy_mgr_debug("%s: dual_mac_disable_ini:%d ", __func__, + dual_mac_disable_ini); + WMI_DBS_CONC_SCAN_CFG_DBS_SCAN_SET(*scan_config, 0); + break; + default: + break; + } + + WMI_DBS_FW_MODE_CFG_DBS_FOR_STA_PLUS_STA_SET(*fw_mode_config, + PM_CHANNEL_SELECT_LOGIC_STA_STA_GET(channel_select_logic_conc)); + WMI_DBS_FW_MODE_CFG_DBS_FOR_STA_PLUS_P2P_SET(*fw_mode_config, + PM_CHANNEL_SELECT_LOGIC_STA_P2P_GET(channel_select_logic_conc)); + + policy_mgr_debug("*scan_config:%x ", *scan_config); + policy_mgr_debug("*fw_mode_config:%x ", *fw_mode_config); + + return QDF_STATUS_SUCCESS; +} + +bool policy_mgr_is_force_scc(struct wlan_objmgr_psoc *psoc) +{ + struct policy_mgr_psoc_priv_obj *pm_ctx; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return 0; + } + + return ((pm_ctx->user_cfg.mcc_to_scc_switch_mode == + QDF_MCC_TO_SCC_SWITCH_FORCE_WITHOUT_DISCONNECTION) || + (pm_ctx->user_cfg.mcc_to_scc_switch_mode == + QDF_MCC_TO_SCC_SWITCH_WITH_FAVORITE_CHANNEL) || + (pm_ctx->user_cfg.mcc_to_scc_switch_mode == + QDF_MCC_TO_SCC_SWITCH_FORCE_PREFERRED_WITHOUT_DISCONNECTION) || + (pm_ctx->user_cfg.mcc_to_scc_switch_mode == + QDF_MCC_TO_SCC_WITH_PREFERRED_BAND)); +} + +bool policy_mgr_is_sta_sap_scc_allowed_on_dfs_chan( + struct wlan_objmgr_psoc *psoc) +{ + struct policy_mgr_psoc_priv_obj *pm_ctx; + bool status = false; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return status; + } + + if (policy_mgr_is_force_scc(psoc) && + pm_ctx->user_cfg.is_sta_sap_scc_allowed_on_dfs_chan) + status = true; + + return status; +} + +bool policy_mgr_is_sta_connected_2g(struct wlan_objmgr_psoc *psoc) +{ + struct policy_mgr_psoc_priv_obj *pm_ctx; + uint32_t conn_index; + bool ret = false; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return ret; + } + qdf_mutex_acquire(&pm_ctx->qdf_conc_list_lock); + for (conn_index = 0; conn_index < MAX_NUMBER_OF_CONC_CONNECTIONS; + conn_index++) { + if (pm_conc_connection_list[conn_index].mode == PM_STA_MODE && + pm_conc_connection_list[conn_index].chan <= 14 && + pm_conc_connection_list[conn_index].in_use) + ret = true; + } + qdf_mutex_release(&pm_ctx->qdf_conc_list_lock); + + return ret; +} + +void policy_mgr_trim_acs_channel_list(struct wlan_objmgr_psoc *psoc, + uint8_t *org_ch_list, uint8_t *org_ch_list_count) +{ + uint32_t list[MAX_NUMBER_OF_CONC_CONNECTIONS]; + uint32_t index = 0, count, i, ch_list_count; + uint8_t band_mask = 0, ch_5g = 0, ch_24g = 0; + uint8_t ch_list[QDF_MAX_NUM_CHAN]; + struct policy_mgr_psoc_priv_obj *pm_ctx; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return; + } + + if (*org_ch_list_count >= QDF_MAX_NUM_CHAN) { + policy_mgr_err("org_ch_list_count too big %d", + *org_ch_list_count); + return; + } + /* + * if force SCC is enabled and there is a STA connection, trim the + * ACS channel list on the band on which STA connection is present + */ + count = policy_mgr_mode_specific_connection_count( + psoc, PM_STA_MODE, list); + if (!(policy_mgr_is_force_scc(psoc) && count)) + return; + while (index < count) { + if (WLAN_REG_IS_24GHZ_CH( + pm_conc_connection_list[list[index]].chan) && + policy_mgr_is_safe_channel(psoc, + pm_conc_connection_list[list[index]].chan)) { + band_mask |= 1; + ch_24g = pm_conc_connection_list[list[index]].chan; + } + if (WLAN_REG_IS_5GHZ_CH( + pm_conc_connection_list[list[index]].chan) && + policy_mgr_is_safe_channel(psoc, + pm_conc_connection_list[list[index]].chan) && + !wlan_reg_is_dfs_ch(pm_ctx->pdev, + pm_conc_connection_list[list[index]].chan) && + !wlan_reg_is_passive_or_disable_ch(pm_ctx->pdev, + pm_conc_connection_list[list[index]].chan)) { + band_mask |= 2; + ch_5g = pm_conc_connection_list[list[index]].chan; + } + index++; + } + ch_list_count = 0; + if (band_mask == 1) { + ch_list[ch_list_count++] = ch_24g; + for (i = 0; i < *org_ch_list_count; i++) { + if (WLAN_REG_IS_24GHZ_CH( + org_ch_list[i])) + continue; + ch_list[ch_list_count++] = + org_ch_list[i]; + } + } else if (band_mask == 2) { + if ((reg_get_channel_state(pm_ctx->pdev, ch_5g) == + CHANNEL_STATE_DFS) && + policy_mgr_is_sta_sap_scc_allowed_on_dfs_chan(psoc)) + ch_list[ch_list_count++] = ch_5g; + else if (!(reg_get_channel_state(pm_ctx->pdev, ch_5g) == + CHANNEL_STATE_DFS)) + ch_list[ch_list_count++] = ch_5g; + for (i = 0; i < *org_ch_list_count; i++) { + if (WLAN_REG_IS_5GHZ_CH( + org_ch_list[i])) + continue; + ch_list[ch_list_count++] = + org_ch_list[i]; + } + } else if (band_mask == 3) { + ch_list[ch_list_count++] = ch_24g; + ch_list[ch_list_count++] = ch_5g; + } else { + policy_mgr_debug("unexpected band_mask value %d", + band_mask); + return; + } + + *org_ch_list_count = ch_list_count; + for (i = 0; i < *org_ch_list_count; i++) + org_ch_list[i] = ch_list[i]; + +} + +uint32_t policy_mgr_get_connection_info(struct wlan_objmgr_psoc *psoc, + struct connection_info *info) +{ + struct policy_mgr_psoc_priv_obj *pm_ctx; + uint32_t conn_index, count = 0; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return count; + } + + for (conn_index = 0; conn_index < MAX_NUMBER_OF_CONC_CONNECTIONS; + conn_index++) { + if (PM_CONC_CONNECTION_LIST_VALID_INDEX(conn_index)) { + info[count].vdev_id = + pm_conc_connection_list[conn_index].vdev_id; + info[count].mac_id = + pm_conc_connection_list[conn_index].mac; + info[count].channel = + pm_conc_connection_list[conn_index].chan; + count++; + } + } + + return count; +} + +bool policy_mgr_allow_sap_go_concurrency(struct wlan_objmgr_psoc *psoc, + enum policy_mgr_con_mode mode, + uint8_t channel, + uint32_t vdev_id) +{ + enum policy_mgr_con_mode con_mode; + uint8_t con_chan; + int id; + uint32_t vdev; + bool dbs; + + if (mode != PM_SAP_MODE && mode != PM_P2P_GO_MODE) + return true; + if (policy_mgr_dual_beacon_on_single_mac_mcc_capable(psoc)) + return true; + dbs = policy_mgr_is_hw_dbs_capable(psoc); + for (id = 0; id < MAX_NUMBER_OF_CONC_CONNECTIONS; id++) { + if (!pm_conc_connection_list[id].in_use) + continue; + vdev = pm_conc_connection_list[id].vdev_id; + if (vdev_id == vdev) + continue; + con_mode = pm_conc_connection_list[id].mode; + if (con_mode != PM_SAP_MODE && con_mode != PM_P2P_GO_MODE) + continue; + con_chan = pm_conc_connection_list[id].chan; + if (policy_mgr_dual_beacon_on_single_mac_scc_capable(psoc) && + (channel == con_chan)) { + policy_mgr_debug("SCC enabled, 2 AP on same channel, allow 2nd AP"); + return true; + } + if (!dbs) { + policy_mgr_debug("DBS unsupported, mcc and scc unsupported too, don't allow 2nd AP"); + return false; + } + if (WLAN_REG_IS_SAME_BAND_CHANNELS(channel, con_chan)) { + policy_mgr_debug("DBS supported, 2 SAP on same band, reject 2nd AP"); + return false; + } + } + + /* Don't block the second interface */ + return true; +} + +bool policy_mgr_allow_multiple_sta_connections(struct wlan_objmgr_psoc *psoc) +{ + struct wmi_unified *wmi_handle; + + wmi_handle = get_wmi_unified_hdl_from_psoc(psoc); + if (!wmi_handle) { + policy_mgr_debug("Invalid WMI handle"); + return false; + } + + if (wmi_service_enabled(wmi_handle, + wmi_service_sta_plus_sta_support)) + return true; + + policy_mgr_debug("Concurrent STA connections are not supported"); + return false; +} + +bool policy_mgr_dual_beacon_on_single_mac_scc_capable( + struct wlan_objmgr_psoc *psoc) +{ + struct wmi_unified *wmi_handle; + + wmi_handle = get_wmi_unified_hdl_from_psoc(psoc); + if (!wmi_handle) { + policy_mgr_debug("Invalid WMI handle"); + return false; + } + + if (wmi_service_enabled( + wmi_handle, + wmi_service_dual_beacon_on_single_mac_scc_support)) { + policy_mgr_debug("Support dual beacon on same channel on single MAC"); + return true; + } + if (wmi_service_enabled( + wmi_handle, + wmi_service_dual_beacon_on_single_mac_mcc_support)) { + policy_mgr_debug("Support dual beacon on both different and same channel on single MAC"); + return true; + } + policy_mgr_debug("Not support dual beacon on same channel on single MAC"); + return false; +} + +bool policy_mgr_dual_beacon_on_single_mac_mcc_capable( + struct wlan_objmgr_psoc *psoc) +{ + struct wmi_unified *wmi_handle; + + wmi_handle = get_wmi_unified_hdl_from_psoc(psoc); + if (!wmi_handle) { + policy_mgr_debug("Invalid WMI handle"); + return false; + } + + if (wmi_service_enabled( + wmi_handle, + wmi_service_dual_beacon_on_single_mac_mcc_support)) { + policy_mgr_debug("Support dual beacon on different channel on single MAC"); + return true; + } + policy_mgr_debug("Not support dual beacon on different channel on single MAC"); + return false; +} + +bool policy_mgr_sta_sap_scc_on_lte_coex_chan( + struct wlan_objmgr_psoc *psoc) +{ + struct policy_mgr_psoc_priv_obj *pm_ctx; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return false; + } + return pm_ctx->user_cfg.sta_sap_scc_on_lte_coex_chan; +} + +bool policy_mgr_is_valid_for_channel_switch(struct wlan_objmgr_psoc *psoc, + uint8_t channel) +{ + uint32_t sta_sap_scc_on_dfs_chan; + uint32_t sap_count; + enum channel_state state; + struct policy_mgr_psoc_priv_obj *pm_ctx; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return false; + } + + sta_sap_scc_on_dfs_chan = + policy_mgr_is_sta_sap_scc_allowed_on_dfs_chan(psoc); + sap_count = policy_mgr_mode_specific_connection_count(psoc, + PM_SAP_MODE, + NULL); + state = reg_get_channel_state(pm_ctx->pdev, channel); + + policy_mgr_debug("sta_sap_scc_on_dfs_chan %u, sap_count %u, channel %u, state %u", + sta_sap_scc_on_dfs_chan, sap_count, channel, state); + + if ((state == CHANNEL_STATE_ENABLE) || (sap_count == 0) || + ((state == CHANNEL_STATE_DFS) && sta_sap_scc_on_dfs_chan)) { + policy_mgr_debug("Valid channel for channel switch"); + return true; + } + + policy_mgr_debug("Invalid channel for channel switch"); + return false; +} + +bool policy_mgr_is_sta_sap_scc(struct wlan_objmgr_psoc *psoc, uint8_t sap_ch) +{ + uint32_t conn_index; + bool is_scc = false; + struct policy_mgr_psoc_priv_obj *pm_ctx; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return is_scc; + } + + if (!policy_mgr_mode_specific_connection_count( + psoc, PM_STA_MODE, NULL)) { + policy_mgr_debug("There is no STA+SAP conc"); + return is_scc; + } + + qdf_mutex_acquire(&pm_ctx->qdf_conc_list_lock); + for (conn_index = 0; conn_index < MAX_NUMBER_OF_CONC_CONNECTIONS; + conn_index++) { + if (pm_conc_connection_list[conn_index].in_use && + (pm_conc_connection_list[conn_index].mode == + PM_STA_MODE) && + (sap_ch == pm_conc_connection_list[conn_index].chan)) { + is_scc = true; + break; + } + } + qdf_mutex_release(&pm_ctx->qdf_conc_list_lock); + + return is_scc; +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/policy_mgr/src/wlan_policy_mgr_i.h b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/policy_mgr/src/wlan_policy_mgr_i.h new file mode 100644 index 0000000000000000000000000000000000000000..90e9c994ccda08b2741ac0fa8ee174c12a2ae1f6 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/policy_mgr/src/wlan_policy_mgr_i.h @@ -0,0 +1,501 @@ +/* + * Copyright (c) 2012-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef WLAN_POLICY_MGR_I_H +#define WLAN_POLICY_MGR_I_H + +#include "wlan_policy_mgr_api.h" +#include "qdf_event.h" +#include "qdf_mc_timer.h" +#include "qdf_lock.h" +#include "qdf_defer.h" +#include "wlan_reg_services_api.h" + +#define DBS_OPPORTUNISTIC_TIME 10 +#ifdef QCA_WIFI_3_0_EMU +#define CONNECTION_UPDATE_TIMEOUT 3000 +#else +#define CONNECTION_UPDATE_TIMEOUT 1000 +#endif + +#define PM_24_GHZ_CHANNEL_6 (6) +#define PM_5_GHZ_CHANNEL_36 (36) +#define CHANNEL_SWITCH_COMPLETE_TIMEOUT (2000) + +/** + * Policy Mgr hardware mode list bit-mask definitions. + * Bits 4:0, 31:29 are unused. + * + * The below definitions are added corresponding to WMI DBS HW mode + * list to make it independent of firmware changes for WMI definitions. + * Currently these definitions have dependency with BIT positions of + * the existing WMI macros. Thus, if the BIT positions are changed for + * WMI macros, then these macros' BIT definitions are also need to be + * changed. + */ +#define POLICY_MGR_HW_MODE_MAC0_TX_STREAMS_BITPOS (28) +#define POLICY_MGR_HW_MODE_MAC0_RX_STREAMS_BITPOS (24) +#define POLICY_MGR_HW_MODE_MAC1_TX_STREAMS_BITPOS (20) +#define POLICY_MGR_HW_MODE_MAC1_RX_STREAMS_BITPOS (16) +#define POLICY_MGR_HW_MODE_MAC0_BANDWIDTH_BITPOS (12) +#define POLICY_MGR_HW_MODE_MAC1_BANDWIDTH_BITPOS (8) +#define POLICY_MGR_HW_MODE_DBS_MODE_BITPOS (7) +#define POLICY_MGR_HW_MODE_AGILE_DFS_MODE_BITPOS (6) +#define POLICY_MGR_HW_MODE_SBS_MODE_BITPOS (5) + +#define POLICY_MGR_HW_MODE_MAC0_TX_STREAMS_MASK \ + (0xf << POLICY_MGR_HW_MODE_MAC0_TX_STREAMS_BITPOS) +#define POLICY_MGR_HW_MODE_MAC0_RX_STREAMS_MASK \ + (0xf << POLICY_MGR_HW_MODE_MAC0_RX_STREAMS_BITPOS) +#define POLICY_MGR_HW_MODE_MAC1_TX_STREAMS_MASK \ + (0xf << POLICY_MGR_HW_MODE_MAC1_TX_STREAMS_BITPOS) +#define POLICY_MGR_HW_MODE_MAC1_RX_STREAMS_MASK \ + (0xf << POLICY_MGR_HW_MODE_MAC1_RX_STREAMS_BITPOS) +#define POLICY_MGR_HW_MODE_MAC0_BANDWIDTH_MASK \ + (0xf << POLICY_MGR_HW_MODE_MAC0_BANDWIDTH_BITPOS) +#define POLICY_MGR_HW_MODE_MAC1_BANDWIDTH_MASK \ + (0xf << POLICY_MGR_HW_MODE_MAC1_BANDWIDTH_BITPOS) +#define POLICY_MGR_HW_MODE_DBS_MODE_MASK \ + (0x1 << POLICY_MGR_HW_MODE_DBS_MODE_BITPOS) +#define POLICY_MGR_HW_MODE_AGILE_DFS_MODE_MASK \ + (0x1 << POLICY_MGR_HW_MODE_AGILE_DFS_MODE_BITPOS) +#define POLICY_MGR_HW_MODE_SBS_MODE_MASK \ + (0x1 << POLICY_MGR_HW_MODE_SBS_MODE_BITPOS) + +#define POLICY_MGR_HW_MODE_MAC0_TX_STREAMS_SET(hw_mode, value) \ + WMI_SET_BITS(hw_mode, POLICY_MGR_HW_MODE_MAC0_TX_STREAMS_BITPOS,\ + 4, value) +#define POLICY_MGR_HW_MODE_MAC0_RX_STREAMS_SET(hw_mode, value) \ + WMI_SET_BITS(hw_mode, POLICY_MGR_HW_MODE_MAC0_RX_STREAMS_BITPOS,\ + 4, value) +#define POLICY_MGR_HW_MODE_MAC1_TX_STREAMS_SET(hw_mode, value) \ + WMI_SET_BITS(hw_mode, POLICY_MGR_HW_MODE_MAC1_TX_STREAMS_BITPOS,\ + 4, value) +#define POLICY_MGR_HW_MODE_MAC1_RX_STREAMS_SET(hw_mode, value) \ + WMI_SET_BITS(hw_mode, POLICY_MGR_HW_MODE_MAC1_RX_STREAMS_BITPOS,\ + 4, value) +#define POLICY_MGR_HW_MODE_MAC0_BANDWIDTH_SET(hw_mode, value) \ + WMI_SET_BITS(hw_mode, POLICY_MGR_HW_MODE_MAC0_BANDWIDTH_BITPOS,\ + 4, value) +#define POLICY_MGR_HW_MODE_MAC1_BANDWIDTH_SET(hw_mode, value) \ + WMI_SET_BITS(hw_mode, POLICY_MGR_HW_MODE_MAC1_BANDWIDTH_BITPOS,\ + 4, value) +#define POLICY_MGR_HW_MODE_DBS_MODE_SET(hw_mode, value) \ + WMI_SET_BITS(hw_mode, POLICY_MGR_HW_MODE_DBS_MODE_BITPOS,\ + 1, value) +#define POLICY_MGR_HW_MODE_AGILE_DFS_SET(hw_mode, value) \ + WMI_SET_BITS(hw_mode, POLICY_MGR_HW_MODE_AGILE_DFS_MODE_BITPOS,\ + 1, value) +#define POLICY_MGR_HW_MODE_SBS_MODE_SET(hw_mode, value) \ + WMI_SET_BITS(hw_mode, POLICY_MGR_HW_MODE_SBS_MODE_BITPOS,\ + 1, value) + +#define POLICY_MGR_HW_MODE_MAC0_TX_STREAMS_GET(hw_mode) \ + ((hw_mode & POLICY_MGR_HW_MODE_MAC0_TX_STREAMS_MASK) >> \ + POLICY_MGR_HW_MODE_MAC0_TX_STREAMS_BITPOS) +#define POLICY_MGR_HW_MODE_MAC0_RX_STREAMS_GET(hw_mode) \ + ((hw_mode & POLICY_MGR_HW_MODE_MAC0_RX_STREAMS_MASK) >> \ + POLICY_MGR_HW_MODE_MAC0_RX_STREAMS_BITPOS) +#define POLICY_MGR_HW_MODE_MAC1_TX_STREAMS_GET(hw_mode) \ + ((hw_mode & POLICY_MGR_HW_MODE_MAC1_TX_STREAMS_MASK) >> \ + POLICY_MGR_HW_MODE_MAC1_TX_STREAMS_BITPOS) +#define POLICY_MGR_HW_MODE_MAC1_RX_STREAMS_GET(hw_mode) \ + ((hw_mode & POLICY_MGR_HW_MODE_MAC1_RX_STREAMS_MASK) >> \ + POLICY_MGR_HW_MODE_MAC1_RX_STREAMS_BITPOS) +#define POLICY_MGR_HW_MODE_MAC0_BANDWIDTH_GET(hw_mode) \ + ((hw_mode & POLICY_MGR_HW_MODE_MAC0_BANDWIDTH_MASK) >> \ + POLICY_MGR_HW_MODE_MAC0_BANDWIDTH_BITPOS) +#define POLICY_MGR_HW_MODE_MAC1_BANDWIDTH_GET(hw_mode) \ + ((hw_mode & POLICY_MGR_HW_MODE_MAC1_BANDWIDTH_MASK) >> \ + POLICY_MGR_HW_MODE_MAC1_BANDWIDTH_BITPOS) +#define POLICY_MGR_HW_MODE_DBS_MODE_GET(hw_mode) \ + ((hw_mode & POLICY_MGR_HW_MODE_DBS_MODE_MASK) >> \ + POLICY_MGR_HW_MODE_DBS_MODE_BITPOS) +#define POLICY_MGR_HW_MODE_AGILE_DFS_GET(hw_mode) \ + ((hw_mode & POLICY_MGR_HW_MODE_AGILE_DFS_MODE_MASK) >> \ + POLICY_MGR_HW_MODE_AGILE_DFS_MODE_BITPOS) +#define POLICY_MGR_HW_MODE_SBS_MODE_GET(hw_mode) \ + ((hw_mode & POLICY_MGR_HW_MODE_SBS_MODE_MASK) >> \ + POLICY_MGR_HW_MODE_SBS_MODE_BITPOS) + +#define POLICY_MGR_DEFAULT_HW_MODE_INDEX 0xFFFF + +#define policy_mgr_log(level, args...) \ + QDF_TRACE(QDF_MODULE_ID_POLICY_MGR, level, ## args) +#define policy_mgr_logfl(level, format, args...) \ + policy_mgr_log(level, FL(format), ## args) + +#define policy_mgr_alert(format, args...) \ + policy_mgr_logfl(QDF_TRACE_LEVEL_FATAL, format, ## args) +#define policy_mgr_err(format, args...) \ + policy_mgr_logfl(QDF_TRACE_LEVEL_ERROR, format, ## args) +#define policy_mgr_warn(format, args...) \ + policy_mgr_logfl(QDF_TRACE_LEVEL_WARN, format, ## args) +#define policy_mgr_notice(format, args...) \ + policy_mgr_logfl(QDF_TRACE_LEVEL_INFO, format, ## args) +#define policy_mgr_info(format, args...) \ + policy_mgr_logfl(QDF_TRACE_LEVEL_INFO_HIGH, format, ## args) +#define policy_mgr_debug(format, args...) \ + policy_mgr_logfl(QDF_TRACE_LEVEL_DEBUG, format, ## args) + +#define PM_CONC_CONNECTION_LIST_VALID_INDEX(index) \ + ((MAX_NUMBER_OF_CONC_CONNECTIONS > index) && \ + (pm_conc_connection_list[index].in_use)) + +extern struct policy_mgr_conc_connection_info + pm_conc_connection_list[MAX_NUMBER_OF_CONC_CONNECTIONS]; + +extern const enum policy_mgr_pcl_type + first_connection_pcl_table[PM_MAX_NUM_OF_MODE] + [PM_MAX_CONC_PRIORITY_MODE]; +extern pm_dbs_pcl_second_connection_table_type + *second_connection_pcl_dbs_table; +extern pm_dbs_pcl_third_connection_table_type + *third_connection_pcl_dbs_table; +extern policy_mgr_next_action_two_connection_table_type + *next_action_two_connection_table; +extern policy_mgr_next_action_three_connection_table_type + *next_action_three_connection_table; +extern enum policy_mgr_conc_next_action + (*policy_mgr_get_current_pref_hw_mode_ptr) + (struct wlan_objmgr_psoc *psoc); + +/** + * struct sta_ap_intf_check_work_ctx - sta_ap_intf_check_work + * related info + * @psoc: pointer to PSOC object information + */ +struct sta_ap_intf_check_work_ctx { + struct wlan_objmgr_psoc *psoc; +}; + +/** + * struct policy_mgr_psoc_priv_obj - Policy manager private data + * @psoc: pointer to PSOC object information + * @pdev: pointer to PDEV object information + * @connection_update_done_evt: qdf event to synchronize + * connection activities + * @qdf_conc_list_lock: To protect connection table + * @dbs_opportunistic_timer: Timer to drop down to Single Mac + * Mode opportunistically + * @sap_restart_chan_switch_cb: Callback for channel switch + * notification for SAP + * @sme_cbacks: callbacks to be registered by SME for + * interaction with Policy Manager + * @wma_cbacks: callbacks to be registered by SME for + * interaction with Policy Manager + * @tdls_cbacks: callbacks to be registered by SME for + * interaction with Policy Manager + * @cdp_cbacks: callbacks to be registered by SME for + * interaction with Policy Manager + * @sap_mandatory_channels: The user preferred master list on + * which SAP can be brought up. This + * mandatory channel list would be as per + * OEMs preference & conforming to the + * regulatory/other considerations + * @sap_mandatory_channels_len: Length of the SAP mandatory + * channel list + * @do_hw_mode_change: Flag to check if HW mode change is needed + * after vdev is up. Especially used after + * channel switch related vdev restart + * @concurrency_mode: active concurrency combination + * @no_of_open_sessions: Number of active vdevs + * @no_of_active_sessions: Number of active connections + * @sta_ap_intf_check_work: delayed sap restart work + * @num_dbs_hw_modes: Number of different HW modes supported + * @hw_mode: List of HW modes supported + * @old_hw_mode_index: Old HW mode from hw_mode table + * @new_hw_mode_index: New HW mode from hw_mode table + * @dual_mac_cfg: DBS configuration currenctly used by FW for + * scan & connections + * @hw_mode_change_in_progress: This is to track if HW mode + * change is in progress + * @enable_mcc_adaptive_scheduler: Enable MCC adaptive scheduler + * value from INI + * @unsafe_channel_list: LTE coex channel avoidance list + * @unsafe_channel_count: LTE coex channel avoidance list count + * @sta_ap_intf_check_work_info: Info related to sta_ap_intf_check_work + * @opportunistic_update_done_evt: qdf event to synchronize host + * & FW HW mode + */ +struct policy_mgr_psoc_priv_obj { + struct wlan_objmgr_psoc *psoc; + struct wlan_objmgr_pdev *pdev; + qdf_event_t connection_update_done_evt; + qdf_mutex_t qdf_conc_list_lock; + qdf_mc_timer_t dbs_opportunistic_timer; + struct policy_mgr_hdd_cbacks hdd_cbacks; + struct policy_mgr_sme_cbacks sme_cbacks; + struct policy_mgr_wma_cbacks wma_cbacks; + struct policy_mgr_tdls_cbacks tdls_cbacks; + struct policy_mgr_cdp_cbacks cdp_cbacks; + struct policy_mgr_dp_cbacks dp_cbacks; + bool enable_sap_mandatory_chan_list; + uint8_t sap_mandatory_channels[QDF_MAX_NUM_CHAN]; + uint32_t sap_mandatory_channels_len; + bool do_hw_mode_change; + uint32_t concurrency_mode; + uint8_t no_of_open_sessions[QDF_MAX_NO_OF_MODE]; + uint8_t no_of_active_sessions[QDF_MAX_NO_OF_MODE]; + qdf_work_t sta_ap_intf_check_work; + uint32_t num_dbs_hw_modes; + struct dbs_hw_mode_info hw_mode; + uint32_t old_hw_mode_index; + uint32_t new_hw_mode_index; + struct dual_mac_config dual_mac_cfg; + uint32_t hw_mode_change_in_progress; + struct policy_mgr_user_cfg user_cfg; + uint16_t unsafe_channel_list[QDF_MAX_NUM_CHAN]; + uint16_t unsafe_channel_count; + struct sta_ap_intf_check_work_ctx *sta_ap_intf_check_work_info; + uint8_t cur_conc_system_pref; + uint8_t sta_sap_scc_on_dfs_chan_allowed; + qdf_event_t opportunistic_update_done_evt; + qdf_event_t channel_switch_complete_evt; + send_mode_change_event_cb mode_change_cb; + uint32_t user_config_sap_channel; +}; + +/** + * struct policy_mgr_mac_ss_bw_info - hw_mode_list PHY/MAC params for each MAC + * @mac_tx_stream: Max TX stream + * @mac_rx_stream: Max RX stream + * @mac_bw: Max bandwidth + */ +struct policy_mgr_mac_ss_bw_info { + uint32_t mac_tx_stream; + uint32_t mac_rx_stream; + uint32_t mac_bw; +}; + +struct policy_mgr_psoc_priv_obj *policy_mgr_get_context( + struct wlan_objmgr_psoc *psoc); +QDF_STATUS policy_mgr_get_updated_scan_config( + struct wlan_objmgr_psoc *psoc, + uint32_t *scan_config, + bool dbs_scan, + bool dbs_plus_agile_scan, + bool single_mac_scan_with_dfs); +QDF_STATUS policy_mgr_get_updated_fw_mode_config( + struct wlan_objmgr_psoc *psoc, + uint32_t *fw_mode_config, + bool dbs, + bool agile_dfs); +bool policy_mgr_is_dual_mac_disabled_in_ini( + struct wlan_objmgr_psoc *psoc); + +/** + * policy_mgr_mcc_to_scc_switch_mode_in_user_cfg() - MCC to SCC + * switch mode value in the user config + * @psoc: PSOC object information + * + * MCC to SCC switch mode value in user config + * + * Return: MCC to SCC switch mode value + */ +uint32_t policy_mgr_mcc_to_scc_switch_mode_in_user_cfg( + struct wlan_objmgr_psoc *psoc); +bool policy_mgr_get_dbs_config(struct wlan_objmgr_psoc *psoc); +bool policy_mgr_get_agile_dfs_config(struct wlan_objmgr_psoc *psoc); +bool policy_mgr_get_dbs_scan_config(struct wlan_objmgr_psoc *psoc); +void policy_mgr_get_tx_rx_ss_from_config(enum hw_mode_ss_config mac_ss, + uint32_t *tx_ss, uint32_t *rx_ss); +int8_t policy_mgr_get_matching_hw_mode_index( + struct wlan_objmgr_psoc *psoc, + uint32_t mac0_tx_ss, uint32_t mac0_rx_ss, + enum hw_mode_bandwidth mac0_bw, + uint32_t mac1_tx_ss, uint32_t mac1_rx_ss, + enum hw_mode_bandwidth mac1_bw, + enum hw_mode_dbs_capab dbs, + enum hw_mode_agile_dfs_capab dfs, + enum hw_mode_sbs_capab sbs); +int8_t policy_mgr_get_hw_mode_idx_from_dbs_hw_list( + struct wlan_objmgr_psoc *psoc, + enum hw_mode_ss_config mac0_ss, + enum hw_mode_bandwidth mac0_bw, + enum hw_mode_ss_config mac1_ss, + enum hw_mode_bandwidth mac1_bw, + enum hw_mode_dbs_capab dbs, + enum hw_mode_agile_dfs_capab dfs, + enum hw_mode_sbs_capab sbs); +QDF_STATUS policy_mgr_get_hw_mode_from_idx( + struct wlan_objmgr_psoc *psoc, + uint32_t idx, + struct policy_mgr_hw_mode_params *hw_mode); +QDF_STATUS policy_mgr_get_old_and_new_hw_index( + struct wlan_objmgr_psoc *psoc, + uint32_t *old_hw_mode_index, + uint32_t *new_hw_mode_index); +void policy_mgr_update_conc_list(struct wlan_objmgr_psoc *psoc, + uint32_t conn_index, + enum policy_mgr_con_mode mode, + uint8_t chan, + enum hw_mode_bandwidth bw, + uint8_t mac, + enum policy_mgr_chain_mode chain_mask, + uint32_t original_nss, + uint32_t vdev_id, + bool in_use, + bool update_conn); +void policy_mgr_store_and_del_conn_info(struct wlan_objmgr_psoc *psoc, + enum policy_mgr_con_mode mode, + bool all_matching_cxn_to_del, + struct policy_mgr_conc_connection_info *info, + uint8_t *num_cxn_del); + +/** + * policy_mgr_store_and_del_conn_info_by_vdev_id() - Store and del a + * connection info by vdev id + * @psoc: PSOC object information + * @vdev_id: vdev id whose entry has to be deleted + * @info: struture array pointer where the connection info will be saved + * @num_cxn_del: number of connection which are going to be deleted + * + * Saves the connection info corresponding to the provided mode + * and deleted that corresponding entry based on vdev from the + * connection info structure + * + * Return: None + */ +void policy_mgr_store_and_del_conn_info_by_vdev_id( + struct wlan_objmgr_psoc *psoc, + uint32_t vdev_id, + struct policy_mgr_conc_connection_info *info, + uint8_t *num_cxn_del); + +void policy_mgr_restore_deleted_conn_info(struct wlan_objmgr_psoc *psoc, + struct policy_mgr_conc_connection_info *info, + uint8_t num_cxn_del); +void policy_mgr_update_hw_mode_conn_info(struct wlan_objmgr_psoc *psoc, + uint32_t num_vdev_mac_entries, + struct policy_mgr_vdev_mac_map *vdev_mac_map, + struct policy_mgr_hw_mode_params hw_mode); +void policy_mgr_pdev_set_hw_mode_cb(uint32_t status, + uint32_t cfgd_hw_mode_index, + uint32_t num_vdev_mac_entries, + struct policy_mgr_vdev_mac_map *vdev_mac_map, + uint8_t next_action, + enum policy_mgr_conn_update_reason reason, + uint32_t session_id, void *context); +void policy_mgr_dump_current_concurrency(struct wlan_objmgr_psoc *psoc); + +/** + * policy_mgr_pdev_get_pcl() - GET PCL channel list + * @psoc: PSOC object information + * @mode: Adapter mode + * @pcl: the pointer of pcl list + * + * Fetches the PCL. + * + * Return: QDF_STATUS + */ +QDF_STATUS policy_mgr_pdev_get_pcl(struct wlan_objmgr_psoc *psoc, + enum QDF_OPMODE mode, + struct policy_mgr_pcl_list *pcl); +void pm_dbs_opportunistic_timer_handler(void *data); +enum policy_mgr_con_mode policy_mgr_get_mode(uint8_t type, + uint8_t subtype); +enum hw_mode_bandwidth policy_mgr_get_bw(enum phy_ch_width chan_width); +QDF_STATUS policy_mgr_get_channel_list(struct wlan_objmgr_psoc *psoc, + enum policy_mgr_pcl_type pcl, + uint8_t *pcl_channels, uint32_t *len, + enum policy_mgr_con_mode mode, + uint8_t *pcl_weights, uint32_t weight_len); +bool policy_mgr_allow_new_home_channel(struct wlan_objmgr_psoc *psoc, + uint8_t channel, uint32_t num_connections); +bool policy_mgr_is_5g_channel_allowed(struct wlan_objmgr_psoc *psoc, + uint8_t channel, uint32_t *list, + enum policy_mgr_con_mode mode); +QDF_STATUS policy_mgr_complete_action(struct wlan_objmgr_psoc *psoc, + uint8_t new_nss, uint8_t next_action, + enum policy_mgr_conn_update_reason reason, + uint32_t session_id); +enum policy_mgr_con_mode policy_mgr_get_mode_by_vdev_id( + struct wlan_objmgr_psoc *psoc, + uint8_t vdev_id); +QDF_STATUS policy_mgr_init_connection_update( + struct policy_mgr_psoc_priv_obj *pm_ctx); +enum policy_mgr_conc_next_action + policy_mgr_get_current_pref_hw_mode_dbs_2x2( + struct wlan_objmgr_psoc *psoc); +enum policy_mgr_conc_next_action + policy_mgr_get_current_pref_hw_mode_dbs_1x1( + struct wlan_objmgr_psoc *psoc); +QDF_STATUS policy_mgr_reset_sap_mandatory_channels( + struct policy_mgr_psoc_priv_obj *pm_ctx); + +/** + * policy_mgr_get_mode_specific_conn_info() - Get active mode specific + * channel and vdev id + * @psoc: PSOC object information + * @channel: Mode specific channel (list) + * @vdev_id: Mode specific vdev id (list) + * @mode: Connection Mode + * + * Get active mode specific channel and vdev id + * + * Return: number of connection found as per given mode + */ +uint32_t policy_mgr_get_mode_specific_conn_info(struct wlan_objmgr_psoc *psoc, + uint8_t *channel, uint8_t *vdev_id, + enum policy_mgr_con_mode mode); + +/** + * policy_mgr_reg_chan_change_callback() - Callback to be + * invoked by regulatory module when valid channel list changes + * @psoc: PSOC object information + * @pdev: PDEV object information + * @chan_list: New channel list + * @avoid_freq_ind: LTE coex avoid channel list + * @arg: Information passed at registration + * + * Get updated channel list from regulatory module + * + * Return: None + */ +void policy_mgr_reg_chan_change_callback(struct wlan_objmgr_psoc *psoc, + struct wlan_objmgr_pdev *pdev, + struct regulatory_channel *chan_list, + struct avoid_freq_ind_data *avoid_freq_ind, + void *arg); + +QDF_STATUS policy_mgr_nss_update(struct wlan_objmgr_psoc *psoc, + uint8_t new_nss, uint8_t next_action, + enum policy_mgr_conn_update_reason reason, + uint32_t original_vdev_id); + +/** + * policy_mgr_is_concurrency_allowed() - Check for allowed + * concurrency combination + * @psoc: PSOC object information + * @mode: new connection mode + * @channel: channel on which new connection is coming up + * @bw: Bandwidth requested by the connection (optional) + * + * When a new connection is about to come up check if current + * concurrency combination including the new connection is + * allowed or not based on the HW capability, but no need to + * invoke get_pcl + * + * Return: True/False + */ +bool policy_mgr_is_concurrency_allowed(struct wlan_objmgr_psoc *psoc, + enum policy_mgr_con_mode mode, + uint8_t channel, + enum hw_mode_bandwidth bw); +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/policy_mgr/src/wlan_policy_mgr_init_deinit.c b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/policy_mgr/src/wlan_policy_mgr_init_deinit.c new file mode 100644 index 0000000000000000000000000000000000000000..322de079032dca64c2dd9bb0238b709c6f7bc5da --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/policy_mgr/src/wlan_policy_mgr_init_deinit.c @@ -0,0 +1,715 @@ +/* + * Copyright (c) 2012-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_policy_mgr_init_deinit.c + * + * WLAN Concurrenct Connection Management APIs + * + */ + +/* Include files */ + +#include "wlan_policy_mgr_api.h" +#include "wlan_policy_mgr_tables_1x1_dbs_i.h" +#include "wlan_policy_mgr_tables_2x2_dbs_i.h" +#include "wlan_policy_mgr_i.h" +#include "qdf_types.h" +#include "qdf_trace.h" +#include "wlan_objmgr_global_obj.h" + +static QDF_STATUS policy_mgr_psoc_obj_create_cb(struct wlan_objmgr_psoc *psoc, + void *data) +{ + struct policy_mgr_psoc_priv_obj *policy_mgr_ctx; + + policy_mgr_ctx = qdf_mem_malloc( + sizeof(struct policy_mgr_psoc_priv_obj)); + if (!policy_mgr_ctx) { + policy_mgr_err("memory allocation failed"); + return QDF_STATUS_E_FAILURE; + } + + policy_mgr_ctx->psoc = psoc; + policy_mgr_ctx->old_hw_mode_index = POLICY_MGR_DEFAULT_HW_MODE_INDEX; + policy_mgr_ctx->new_hw_mode_index = POLICY_MGR_DEFAULT_HW_MODE_INDEX; + + wlan_objmgr_psoc_component_obj_attach(psoc, + WLAN_UMAC_COMP_POLICY_MGR, + policy_mgr_ctx, + QDF_STATUS_SUCCESS); + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS policy_mgr_psoc_obj_destroy_cb(struct wlan_objmgr_psoc *psoc, + void *data) +{ + struct policy_mgr_psoc_priv_obj *policy_mgr_ctx; + + policy_mgr_ctx = policy_mgr_get_context(psoc); + wlan_objmgr_psoc_component_obj_detach(psoc, + WLAN_UMAC_COMP_POLICY_MGR, + policy_mgr_ctx); + qdf_mem_free(policy_mgr_ctx); + + return QDF_STATUS_SUCCESS; +} + +static void policy_mgr_psoc_obj_status_cb(struct wlan_objmgr_psoc *psoc, + void *data, QDF_STATUS status) +{ + return; +} + +static QDF_STATUS policy_mgr_pdev_obj_create_cb(struct wlan_objmgr_pdev *pdev, + void *data) +{ + struct policy_mgr_psoc_priv_obj *policy_mgr_ctx; + struct wlan_objmgr_psoc *psoc; + + psoc = wlan_pdev_get_psoc(pdev); + policy_mgr_ctx = policy_mgr_get_context(psoc); + if (!policy_mgr_ctx) { + policy_mgr_err("invalid context"); + return QDF_STATUS_E_FAILURE; + } + + policy_mgr_ctx->pdev = pdev; + + wlan_reg_register_chan_change_callback(psoc, + policy_mgr_reg_chan_change_callback, NULL); + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS policy_mgr_pdev_obj_destroy_cb(struct wlan_objmgr_pdev *pdev, + void *data) +{ + struct policy_mgr_psoc_priv_obj *policy_mgr_ctx; + struct wlan_objmgr_psoc *psoc; + + psoc = wlan_pdev_get_psoc(pdev); + policy_mgr_ctx = policy_mgr_get_context(psoc); + if (!policy_mgr_ctx) { + policy_mgr_err("invalid context"); + return QDF_STATUS_E_FAILURE; + } + + policy_mgr_ctx->pdev = NULL; + wlan_reg_unregister_chan_change_callback(psoc, + policy_mgr_reg_chan_change_callback); + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS policy_mgr_vdev_obj_create_cb(struct wlan_objmgr_vdev *vdev, + void *data) +{ + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS policy_mgr_vdev_obj_destroy_cb(struct wlan_objmgr_vdev *vdev, + void *data) +{ + return QDF_STATUS_SUCCESS; +} + +static void policy_mgr_vdev_obj_status_cb(struct wlan_objmgr_vdev *vdev, + void *data, QDF_STATUS status) +{ + return; +} + +QDF_STATUS policy_mgr_init(void) +{ + QDF_STATUS status = QDF_STATUS_SUCCESS; + status = wlan_objmgr_register_psoc_create_handler( + WLAN_UMAC_COMP_POLICY_MGR, + policy_mgr_psoc_obj_create_cb, + NULL); + if (status != QDF_STATUS_SUCCESS) { + policy_mgr_err("Failed to register psoc obj create cback"); + goto err_psoc_create; + } + + status = wlan_objmgr_register_psoc_destroy_handler( + WLAN_UMAC_COMP_POLICY_MGR, + policy_mgr_psoc_obj_destroy_cb, + NULL); + if (status != QDF_STATUS_SUCCESS) { + policy_mgr_err("Failed to register psoc obj delete cback"); + goto err_psoc_delete; + } + + status = wlan_objmgr_register_psoc_status_handler( + WLAN_UMAC_COMP_POLICY_MGR, + policy_mgr_psoc_obj_status_cb, + NULL); + if (status != QDF_STATUS_SUCCESS) { + policy_mgr_err("Failed to register psoc obj status cback"); + goto err_psoc_status; + } + + status = wlan_objmgr_register_pdev_create_handler( + WLAN_UMAC_COMP_POLICY_MGR, + policy_mgr_pdev_obj_create_cb, + NULL); + if (status != QDF_STATUS_SUCCESS) { + policy_mgr_err("Failed to register pdev obj create cback"); + goto err_pdev_create; + } + + status = wlan_objmgr_register_pdev_destroy_handler( + WLAN_UMAC_COMP_POLICY_MGR, + policy_mgr_pdev_obj_destroy_cb, + NULL); + if (status != QDF_STATUS_SUCCESS) { + policy_mgr_err("Failed to register pdev obj delete cback"); + goto err_pdev_delete; + } + + status = wlan_objmgr_register_vdev_create_handler( + WLAN_UMAC_COMP_POLICY_MGR, + policy_mgr_vdev_obj_create_cb, + NULL); + if (status != QDF_STATUS_SUCCESS) { + policy_mgr_err("Failed to register vdev obj create cback"); + goto err_vdev_create; + } + + status = wlan_objmgr_register_vdev_destroy_handler( + WLAN_UMAC_COMP_POLICY_MGR, + policy_mgr_vdev_obj_destroy_cb, + NULL); + if (status != QDF_STATUS_SUCCESS) { + policy_mgr_err("Failed to register vdev obj delete cback"); + goto err_vdev_delete; + } + + status = wlan_objmgr_register_vdev_status_handler( + WLAN_UMAC_COMP_POLICY_MGR, + policy_mgr_vdev_obj_status_cb, + NULL); + if (status != QDF_STATUS_SUCCESS) { + policy_mgr_err("Failed to register vdev obj status cback"); + goto err_vdev_status; + } + + policy_mgr_notice("Callbacks registered with obj mgr"); + + return QDF_STATUS_SUCCESS; + +err_vdev_status: + wlan_objmgr_unregister_vdev_destroy_handler(WLAN_UMAC_COMP_POLICY_MGR, + policy_mgr_vdev_obj_destroy_cb, + NULL); +err_vdev_delete: + wlan_objmgr_unregister_vdev_create_handler(WLAN_UMAC_COMP_POLICY_MGR, + policy_mgr_vdev_obj_create_cb, + NULL); +err_vdev_create: + wlan_objmgr_unregister_pdev_destroy_handler(WLAN_UMAC_COMP_POLICY_MGR, + policy_mgr_pdev_obj_destroy_cb, + NULL); +err_pdev_delete: + wlan_objmgr_unregister_pdev_create_handler(WLAN_UMAC_COMP_POLICY_MGR, + policy_mgr_pdev_obj_create_cb, + NULL); +err_pdev_create: + wlan_objmgr_unregister_psoc_status_handler(WLAN_UMAC_COMP_POLICY_MGR, + policy_mgr_psoc_obj_status_cb, + NULL); +err_psoc_status: + wlan_objmgr_unregister_psoc_destroy_handler(WLAN_UMAC_COMP_POLICY_MGR, + policy_mgr_psoc_obj_destroy_cb, + NULL); +err_psoc_delete: + wlan_objmgr_unregister_psoc_create_handler(WLAN_UMAC_COMP_POLICY_MGR, + policy_mgr_psoc_obj_create_cb, + NULL); +err_psoc_create: + return status; +} + +QDF_STATUS policy_mgr_deinit(void) +{ + QDF_STATUS status; + + status = wlan_objmgr_unregister_psoc_status_handler( + WLAN_UMAC_COMP_POLICY_MGR, + policy_mgr_psoc_obj_status_cb, + NULL); + if (status != QDF_STATUS_SUCCESS) + policy_mgr_err("Failed to deregister psoc obj status cback"); + + status = wlan_objmgr_unregister_psoc_destroy_handler( + WLAN_UMAC_COMP_POLICY_MGR, + policy_mgr_psoc_obj_destroy_cb, + NULL); + if (status != QDF_STATUS_SUCCESS) + policy_mgr_err("Failed to deregister psoc obj delete cback"); + + status = wlan_objmgr_unregister_psoc_create_handler( + WLAN_UMAC_COMP_POLICY_MGR, + policy_mgr_psoc_obj_create_cb, + NULL); + if (status != QDF_STATUS_SUCCESS) + policy_mgr_err("Failed to deregister psoc obj create cback"); + + status = wlan_objmgr_unregister_pdev_destroy_handler( + WLAN_UMAC_COMP_POLICY_MGR, + policy_mgr_pdev_obj_destroy_cb, + NULL); + if (status != QDF_STATUS_SUCCESS) + policy_mgr_err("Failed to deregister pdev obj delete cback"); + + status = wlan_objmgr_unregister_pdev_create_handler( + WLAN_UMAC_COMP_POLICY_MGR, + policy_mgr_pdev_obj_create_cb, + NULL); + if (status != QDF_STATUS_SUCCESS) + policy_mgr_err("Failed to deregister pdev obj create cback"); + + status = wlan_objmgr_unregister_vdev_status_handler( + WLAN_UMAC_COMP_POLICY_MGR, + policy_mgr_vdev_obj_status_cb, + NULL); + if (status != QDF_STATUS_SUCCESS) + policy_mgr_err("Failed to deregister vdev obj status cback"); + + status = wlan_objmgr_unregister_vdev_destroy_handler( + WLAN_UMAC_COMP_POLICY_MGR, + policy_mgr_vdev_obj_destroy_cb, + NULL); + if (status != QDF_STATUS_SUCCESS) + policy_mgr_err("Failed to deregister vdev obj delete cback"); + + status = wlan_objmgr_unregister_vdev_create_handler( + WLAN_UMAC_COMP_POLICY_MGR, + policy_mgr_vdev_obj_create_cb, + NULL); + if (status != QDF_STATUS_SUCCESS) + policy_mgr_err("Failed to deregister vdev obj create cback"); + + policy_mgr_info("deregistered callbacks with obj mgr successfully"); + + return status; +} + +QDF_STATUS policy_mgr_psoc_open(struct wlan_objmgr_psoc *psoc) +{ + struct policy_mgr_psoc_priv_obj *pm_ctx; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return QDF_STATUS_E_FAILURE; + } + + if (!QDF_IS_STATUS_SUCCESS(qdf_mutex_create( + &pm_ctx->qdf_conc_list_lock))) { + policy_mgr_err("Failed to init qdf_conc_list_lock"); + QDF_ASSERT(0); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS policy_mgr_psoc_close(struct wlan_objmgr_psoc *psoc) +{ + struct policy_mgr_psoc_priv_obj *pm_ctx; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return QDF_STATUS_E_FAILURE; + } + + if (!QDF_IS_STATUS_SUCCESS(qdf_mutex_destroy( + &pm_ctx->qdf_conc_list_lock))) { + policy_mgr_err("Failed to destroy qdf_conc_list_lock"); + QDF_ASSERT(0); + return QDF_STATUS_E_FAILURE; + } + + if (pm_ctx->hw_mode.hw_mode_list) { + qdf_mem_free(pm_ctx->hw_mode.hw_mode_list); + pm_ctx->hw_mode.hw_mode_list = NULL; + policy_mgr_info("HW list is freed"); + } + + if (pm_ctx->sta_ap_intf_check_work_info) { + qdf_cancel_work(&pm_ctx->sta_ap_intf_check_work); + qdf_mem_free(pm_ctx->sta_ap_intf_check_work_info); + pm_ctx->sta_ap_intf_check_work_info = NULL; + } + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS policy_mgr_psoc_enable(struct wlan_objmgr_psoc *psoc) +{ + QDF_STATUS status; + struct policy_mgr_psoc_priv_obj *pm_ctx; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return QDF_STATUS_E_FAILURE; + } + + policy_mgr_debug("Initializing the policy manager"); + + /* init pm_conc_connection_list */ + qdf_mem_zero(pm_conc_connection_list, sizeof(pm_conc_connection_list)); + + /* init dbs_opportunistic_timer */ + status = qdf_mc_timer_init(&pm_ctx->dbs_opportunistic_timer, + QDF_TIMER_TYPE_SW, + pm_dbs_opportunistic_timer_handler, + (void *)psoc); + if (!QDF_IS_STATUS_SUCCESS(status)) { + policy_mgr_err("Failed to init DBS opportunistic timer"); + return status; + } + + /* init connection_update_done_evt */ + status = policy_mgr_init_connection_update(pm_ctx); + if (!QDF_IS_STATUS_SUCCESS(status)) { + policy_mgr_err("connection_update_done_evt init failed"); + return status; + } + + status = qdf_event_create(&pm_ctx->opportunistic_update_done_evt); + if (!QDF_IS_STATUS_SUCCESS(status)) { + policy_mgr_err("opportunistic_update_done_evt init failed"); + return status; + } + + status = qdf_event_create(&pm_ctx->channel_switch_complete_evt); + if (!QDF_IS_STATUS_SUCCESS(status)) { + policy_mgr_err("channel_switch_complete_evt init failed"); + return status; + } + pm_ctx->do_hw_mode_change = false; + pm_ctx->hw_mode_change_in_progress = POLICY_MGR_HW_MODE_NOT_IN_PROGRESS; + /* reset sap mandatory channels */ + status = policy_mgr_reset_sap_mandatory_channels(pm_ctx); + if (QDF_IS_STATUS_ERROR(status)) { + policy_mgr_err("failed to reset mandatory channels"); + return status; + } + + /* init PCL table & function pointers based on HW capability */ + if (policy_mgr_is_hw_dbs_2x2_capable(psoc)) + policy_mgr_get_current_pref_hw_mode_ptr = + policy_mgr_get_current_pref_hw_mode_dbs_2x2; + else + policy_mgr_get_current_pref_hw_mode_ptr = + policy_mgr_get_current_pref_hw_mode_dbs_1x1; + + if (policy_mgr_is_hw_dbs_2x2_capable(psoc)) + second_connection_pcl_dbs_table = + &pm_second_connection_pcl_dbs_2x2_table; + else + second_connection_pcl_dbs_table = + &pm_second_connection_pcl_dbs_1x1_table; + + if (policy_mgr_is_hw_dbs_2x2_capable(psoc)) + third_connection_pcl_dbs_table = + &pm_third_connection_pcl_dbs_2x2_table; + else + third_connection_pcl_dbs_table = + &pm_third_connection_pcl_dbs_1x1_table; + + if (policy_mgr_is_hw_dbs_2x2_capable(psoc)) + next_action_two_connection_table = + &pm_next_action_two_connection_dbs_2x2_table; + else + next_action_two_connection_table = + &pm_next_action_two_connection_dbs_1x1_table; + + if (policy_mgr_is_hw_dbs_2x2_capable(psoc)) + next_action_three_connection_table = + &pm_next_action_three_connection_dbs_2x2_table; + else + next_action_three_connection_table = + &pm_next_action_three_connection_dbs_1x1_table; + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS policy_mgr_psoc_disable(struct wlan_objmgr_psoc *psoc) +{ + QDF_STATUS status = QDF_STATUS_SUCCESS; + struct policy_mgr_psoc_priv_obj *pm_ctx; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return QDF_STATUS_E_FAILURE; + } + + /* destroy connection_update_done_evt */ + if (!QDF_IS_STATUS_SUCCESS(qdf_event_destroy + (&pm_ctx->connection_update_done_evt))) { + policy_mgr_err("Failed to destroy connection_update_done_evt"); + status = QDF_STATUS_E_FAILURE; + QDF_ASSERT(0); + } + + /* destroy opportunistic_update_done_evt */ + if (!QDF_IS_STATUS_SUCCESS(qdf_event_destroy + (&pm_ctx->opportunistic_update_done_evt))) { + policy_mgr_err("Failed to destroy opportunistic_update_done_evt"); + status = QDF_STATUS_E_FAILURE; + QDF_ASSERT(0); + } + /* destroy channel_switch_complete_evt */ + if (!QDF_IS_STATUS_SUCCESS(qdf_event_destroy + (&pm_ctx->channel_switch_complete_evt))) { + policy_mgr_err("Failed to destroy channel_switch_complete evt"); + status = QDF_STATUS_E_FAILURE; + QDF_ASSERT(0); + } + + /* deallocate dbs_opportunistic_timer */ + if (QDF_TIMER_STATE_RUNNING == + qdf_mc_timer_get_current_state( + &pm_ctx->dbs_opportunistic_timer)) { + qdf_mc_timer_stop(&pm_ctx->dbs_opportunistic_timer); + } + + if (!QDF_IS_STATUS_SUCCESS(qdf_mc_timer_destroy( + &pm_ctx->dbs_opportunistic_timer))) { + policy_mgr_err("Cannot deallocate dbs opportunistic timer"); + status = QDF_STATUS_E_FAILURE; + QDF_ASSERT(0); + } + + /* reset sap mandatory channels */ + if (QDF_IS_STATUS_ERROR( + policy_mgr_reset_sap_mandatory_channels(pm_ctx))) { + policy_mgr_err("failed to reset sap mandatory channels"); + status = QDF_STATUS_E_FAILURE; + QDF_ASSERT(0); + } + + /* deinit pm_conc_connection_list */ + qdf_mem_zero(pm_conc_connection_list, sizeof(pm_conc_connection_list)); + + return status; +} + +QDF_STATUS policy_mgr_register_sme_cb(struct wlan_objmgr_psoc *psoc, + struct policy_mgr_sme_cbacks *sme_cbacks) +{ + struct policy_mgr_psoc_priv_obj *pm_ctx; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return QDF_STATUS_E_FAILURE; + } + + pm_ctx->sme_cbacks.sme_get_nss_for_vdev = + sme_cbacks->sme_get_nss_for_vdev; + pm_ctx->sme_cbacks.sme_get_valid_channels = + sme_cbacks->sme_get_valid_channels; + pm_ctx->sme_cbacks.sme_nss_update_request = + sme_cbacks->sme_nss_update_request; + pm_ctx->sme_cbacks.sme_pdev_set_hw_mode = + sme_cbacks->sme_pdev_set_hw_mode; + pm_ctx->sme_cbacks.sme_pdev_set_pcl = + sme_cbacks->sme_pdev_set_pcl; + pm_ctx->sme_cbacks.sme_soc_set_dual_mac_config = + sme_cbacks->sme_soc_set_dual_mac_config; + pm_ctx->sme_cbacks.sme_change_mcc_beacon_interval = + sme_cbacks->sme_change_mcc_beacon_interval; + pm_ctx->sme_cbacks.sme_get_ap_channel_from_scan = + sme_cbacks->sme_get_ap_channel_from_scan; + pm_ctx->sme_cbacks.sme_scan_result_purge = + sme_cbacks->sme_scan_result_purge; + + return QDF_STATUS_SUCCESS; +} + +/** + * policy_mgr_register_hdd_cb() - register HDD callbacks + * @psoc: PSOC object information + * @hdd_cbacks: function pointers from HDD + * + * API, allows HDD to register callbacks to be invoked by policy + * mgr + * + * Return: SUCCESS, + * Failure (if registration fails) + */ +QDF_STATUS policy_mgr_register_hdd_cb(struct wlan_objmgr_psoc *psoc, + struct policy_mgr_hdd_cbacks *hdd_cbacks) +{ + struct policy_mgr_psoc_priv_obj *pm_ctx; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return QDF_STATUS_E_FAILURE; + } + + pm_ctx->hdd_cbacks.sap_restart_chan_switch_cb = + hdd_cbacks->sap_restart_chan_switch_cb; + pm_ctx->hdd_cbacks.wlan_hdd_get_channel_for_sap_restart = + hdd_cbacks->wlan_hdd_get_channel_for_sap_restart; + pm_ctx->hdd_cbacks.get_mode_for_non_connected_vdev = + hdd_cbacks->get_mode_for_non_connected_vdev; + pm_ctx->hdd_cbacks.hdd_get_device_mode = + hdd_cbacks->hdd_get_device_mode; + pm_ctx->hdd_cbacks.hdd_wapi_security_sta_exist = + hdd_cbacks->hdd_wapi_security_sta_exist; + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS policy_mgr_deregister_hdd_cb(struct wlan_objmgr_psoc *psoc) +{ + struct policy_mgr_psoc_priv_obj *pm_ctx; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return QDF_STATUS_E_FAILURE; + } + + pm_ctx->hdd_cbacks.sap_restart_chan_switch_cb = NULL; + pm_ctx->hdd_cbacks.wlan_hdd_get_channel_for_sap_restart = NULL; + pm_ctx->hdd_cbacks.get_mode_for_non_connected_vdev = NULL; + pm_ctx->hdd_cbacks.hdd_get_device_mode = NULL; + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS policy_mgr_register_wma_cb(struct wlan_objmgr_psoc *psoc, + struct policy_mgr_wma_cbacks *wma_cbacks) +{ + struct policy_mgr_psoc_priv_obj *pm_ctx; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return QDF_STATUS_E_FAILURE; + } + + pm_ctx->wma_cbacks.wma_get_connection_info = + wma_cbacks->wma_get_connection_info; + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS policy_mgr_register_cdp_cb(struct wlan_objmgr_psoc *psoc, + struct policy_mgr_cdp_cbacks *cdp_cbacks) +{ + struct policy_mgr_psoc_priv_obj *pm_ctx; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return QDF_STATUS_E_FAILURE; + } + + pm_ctx->cdp_cbacks.cdp_update_mac_id = + cdp_cbacks->cdp_update_mac_id; + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS policy_mgr_register_dp_cb(struct wlan_objmgr_psoc *psoc, + struct policy_mgr_dp_cbacks *dp_cbacks) +{ + struct policy_mgr_psoc_priv_obj *pm_ctx; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return QDF_STATUS_E_FAILURE; + } + + pm_ctx->dp_cbacks.hdd_disable_rx_ol_in_concurrency = + dp_cbacks->hdd_disable_rx_ol_in_concurrency; + pm_ctx->dp_cbacks.hdd_set_rx_mode_rps_cb = + dp_cbacks->hdd_set_rx_mode_rps_cb; + pm_ctx->dp_cbacks.hdd_ipa_set_mcc_mode_cb = + dp_cbacks->hdd_ipa_set_mcc_mode_cb; + pm_ctx->dp_cbacks.hdd_v2_flow_pool_map = + dp_cbacks->hdd_v2_flow_pool_map; + pm_ctx->dp_cbacks.hdd_v2_flow_pool_unmap = + dp_cbacks->hdd_v2_flow_pool_unmap; + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS policy_mgr_register_tdls_cb(struct wlan_objmgr_psoc *psoc, + struct policy_mgr_tdls_cbacks *tdls_cbacks) +{ + struct policy_mgr_psoc_priv_obj *pm_ctx; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return QDF_STATUS_E_FAILURE; + } + + pm_ctx->tdls_cbacks.tdls_notify_increment_session = + tdls_cbacks->tdls_notify_increment_session; + pm_ctx->tdls_cbacks.tdls_notify_decrement_session = + tdls_cbacks->tdls_notify_decrement_session; + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS policy_mgr_register_mode_change_cb(struct wlan_objmgr_psoc *psoc, + send_mode_change_event_cb mode_change_cb) +{ + struct policy_mgr_psoc_priv_obj *pm_ctx; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return QDF_STATUS_E_FAILURE; + } + + pm_ctx->mode_change_cb = mode_change_cb; + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS policy_mgr_deregister_mode_change_cb(struct wlan_objmgr_psoc *psoc) +{ + struct policy_mgr_psoc_priv_obj *pm_ctx; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return QDF_STATUS_E_FAILURE; + } + + pm_ctx->mode_change_cb = NULL; + + return QDF_STATUS_SUCCESS; +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/policy_mgr/src/wlan_policy_mgr_pcl.c b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/policy_mgr/src/wlan_policy_mgr_pcl.c new file mode 100644 index 0000000000000000000000000000000000000000..8927314d581a2201f7aba2cadd8507e545175371 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/policy_mgr/src/wlan_policy_mgr_pcl.c @@ -0,0 +1,1926 @@ +/* + * Copyright (c) 2012-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_policy_mgr_pcl.c + * + * WLAN Concurrenct Connection Management APIs + * + */ + +/* Include files */ + +#include "wlan_policy_mgr_api.h" +#include "wlan_policy_mgr_tables_no_dbs_i.h" +#include "wlan_policy_mgr_i.h" +#include "qdf_types.h" +#include "qdf_trace.h" +#include "wlan_objmgr_global_obj.h" +#include "wlan_utility.h" +#include "wlan_mlme_ucfg_api.h" + +/** + * first_connection_pcl_table - table which provides PCL for the + * very first connection in the system + */ +const enum policy_mgr_pcl_type +first_connection_pcl_table[PM_MAX_NUM_OF_MODE] + [PM_MAX_CONC_PRIORITY_MODE] = { + [PM_STA_MODE] = {PM_NONE, PM_NONE, PM_NONE}, + [PM_SAP_MODE] = {PM_5G, PM_5G, PM_5G }, + [PM_P2P_CLIENT_MODE] = {PM_5G, PM_5G, PM_5G }, + [PM_P2P_GO_MODE] = {PM_5G, PM_5G, PM_5G }, + [PM_IBSS_MODE] = {PM_NONE, PM_NONE, PM_NONE}, +}; + +pm_dbs_pcl_second_connection_table_type + *second_connection_pcl_dbs_table; +pm_dbs_pcl_third_connection_table_type + *third_connection_pcl_dbs_table; +policy_mgr_next_action_two_connection_table_type + *next_action_two_connection_table; +policy_mgr_next_action_three_connection_table_type + *next_action_three_connection_table; + +QDF_STATUS policy_mgr_get_pcl_for_existing_conn(struct wlan_objmgr_psoc *psoc, + enum policy_mgr_con_mode mode, + uint8_t *pcl_ch, uint32_t *len, + uint8_t *pcl_weight, uint32_t weight_len, + bool all_matching_cxn_to_del) +{ + struct policy_mgr_conc_connection_info + info[MAX_NUMBER_OF_CONC_CONNECTIONS] = { {0} }; + uint8_t num_cxn_del = 0; + + QDF_STATUS status = QDF_STATUS_SUCCESS; + struct policy_mgr_psoc_priv_obj *pm_ctx; + + policy_mgr_debug("get pcl for existing conn:%d", mode); + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return QDF_STATUS_E_FAILURE; + } + + qdf_mutex_acquire(&pm_ctx->qdf_conc_list_lock); + if (policy_mgr_mode_specific_connection_count(psoc, mode, NULL) > 0) { + /* Check, store and temp delete the mode's parameter */ + policy_mgr_store_and_del_conn_info(psoc, mode, + all_matching_cxn_to_del, info, &num_cxn_del); + /* Get the PCL */ + status = policy_mgr_get_pcl(psoc, mode, pcl_ch, len, + pcl_weight, weight_len); + policy_mgr_debug("Get PCL to FW for mode:%d", mode); + /* Restore the connection info */ + policy_mgr_restore_deleted_conn_info(psoc, info, num_cxn_del); + } + qdf_mutex_release(&pm_ctx->qdf_conc_list_lock); + + return status; +} + +void policy_mgr_decr_session_set_pcl(struct wlan_objmgr_psoc *psoc, + enum QDF_OPMODE mode, + uint8_t session_id) +{ + QDF_STATUS qdf_status; + struct policy_mgr_psoc_priv_obj *pm_ctx; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return; + } + + qdf_status = policy_mgr_decr_active_session(psoc, mode, session_id); + if (!QDF_IS_STATUS_SUCCESS(qdf_status)) { + policy_mgr_debug("Invalid active session"); + return; + } + + /* + * After the removal of this connection, we need to check if + * a STA connection still exists. The reason for this is that + * if one or more STA exists, we need to provide the updated + * PCL to the FW for cases like LFR. + * + * Since policy_mgr_get_pcl provides PCL list based on the new + * connection that is going to come up, we will find the + * existing STA entry, save it and delete it temporarily. + * After this we will get PCL as though as new STA connection + * is coming up. This will give the exact PCL that needs to be + * given to the FW. After setting the PCL, we need to restore + * the entry that we have saved before. + */ + policy_mgr_set_pcl_for_existing_combo(psoc, PM_STA_MODE); + /* do we need to change the HW mode */ + policy_mgr_check_n_start_opportunistic_timer(psoc); + return; +} + +void policy_mgr_reg_chan_change_callback(struct wlan_objmgr_psoc *psoc, + struct wlan_objmgr_pdev *pdev, + struct regulatory_channel *chan_list, + struct avoid_freq_ind_data *avoid_freq_ind, + void *arg) +{ + struct policy_mgr_psoc_priv_obj *pm_ctx; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return; + } + + if (!avoid_freq_ind) { + policy_mgr_debug("avoid_freq_ind NULL"); + return; + } + + /* + * The ch_list buffer can accomadate a maximum of + * NUM_CHANNELS and hence the ch_cnt should also not + * exceed NUM_CHANNELS. + */ + pm_ctx->unsafe_channel_count = avoid_freq_ind->chan_list.ch_cnt >= + NUM_CHANNELS ? + NUM_CHANNELS : avoid_freq_ind->chan_list.ch_cnt; + if (pm_ctx->unsafe_channel_count) + qdf_mem_copy(pm_ctx->unsafe_channel_list, + avoid_freq_ind->chan_list.ch_list, + pm_ctx->unsafe_channel_count * + sizeof(pm_ctx->unsafe_channel_list[0])); + policy_mgr_debug("Channel list update, received %d avoided channels", + pm_ctx->unsafe_channel_count); +} + +void policy_mgr_update_with_safe_channel_list(struct wlan_objmgr_psoc *psoc, + uint8_t *pcl_channels, uint32_t *len, + uint8_t *weight_list, uint32_t weight_len) +{ + uint8_t current_channel_list[QDF_MAX_NUM_CHAN]; + uint8_t org_weight_list[QDF_MAX_NUM_CHAN]; + uint8_t is_unsafe = 1; + uint8_t i, j; + uint32_t safe_channel_count = 0, current_channel_count = 0; + struct policy_mgr_psoc_priv_obj *pm_ctx; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return; + } + + if (len) { + current_channel_count = QDF_MIN(*len, QDF_MAX_NUM_CHAN); + } else { + policy_mgr_err("invalid number of channel length"); + return; + } + + if (pm_ctx->unsafe_channel_count == 0) { + policy_mgr_debug("There are no unsafe channels"); + return; + } + + qdf_mem_copy(current_channel_list, pcl_channels, + current_channel_count); + qdf_mem_zero(pcl_channels, current_channel_count); + + qdf_mem_copy(org_weight_list, weight_list, QDF_MAX_NUM_CHAN); + qdf_mem_zero(weight_list, weight_len); + + for (i = 0; i < current_channel_count; i++) { + is_unsafe = 0; + for (j = 0; j < pm_ctx->unsafe_channel_count; j++) { + if (current_channel_list[i] == + pm_ctx->unsafe_channel_list[j]) { + /* Found unsafe channel, update it */ + is_unsafe = 1; + policy_mgr_debug("CH %d is not safe", + current_channel_list[i]); + break; + } + } + if (!is_unsafe) { + pcl_channels[safe_channel_count] = + current_channel_list[i]; + if (safe_channel_count < weight_len) + weight_list[safe_channel_count] = + org_weight_list[i]; + safe_channel_count++; + } + } + *len = safe_channel_count; + + return; +} + +static QDF_STATUS policy_mgr_modify_pcl_based_on_enabled_channels( + struct policy_mgr_psoc_priv_obj *pm_ctx, + uint8_t *pcl_list_org, + uint8_t *weight_list_org, + uint32_t *pcl_len_org) +{ + uint32_t i, pcl_len = 0; + + for (i = 0; i < *pcl_len_org; i++) { + if (!wlan_reg_is_passive_or_disable_ch( + pm_ctx->pdev, pcl_list_org[i])) { + pcl_list_org[pcl_len] = pcl_list_org[i]; + weight_list_org[pcl_len++] = weight_list_org[i]; + } + } + *pcl_len_org = pcl_len; + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS policy_mgr_modify_pcl_based_on_dnbs( + struct wlan_objmgr_psoc *psoc, + uint8_t *pcl_list_org, + uint8_t *weight_list_org, + uint32_t *pcl_len_org) +{ + uint32_t i, pcl_len = 0; + uint8_t pcl_list[QDF_MAX_NUM_CHAN]; + uint8_t weight_list[QDF_MAX_NUM_CHAN]; + bool ok; + QDF_STATUS status = QDF_STATUS_E_FAILURE; + + if (*pcl_len_org > QDF_MAX_NUM_CHAN) { + policy_mgr_err("Invalid PCL List Length %d", *pcl_len_org); + return status; + } + for (i = 0; i < *pcl_len_org; i++) { + status = policy_mgr_is_chan_ok_for_dnbs(psoc, pcl_list_org[i], + &ok); + + if (QDF_IS_STATUS_ERROR(status)) { + policy_mgr_err("Not able to check DNBS eligibility"); + return status; + } + if (ok) { + pcl_list[pcl_len] = pcl_list_org[i]; + weight_list[pcl_len++] = weight_list_org[i]; + } + } + + qdf_mem_zero(pcl_list_org, *pcl_len_org); + qdf_mem_zero(weight_list_org, *pcl_len_org); + qdf_mem_copy(pcl_list_org, pcl_list, pcl_len); + qdf_mem_copy(weight_list_org, weight_list, pcl_len); + *pcl_len_org = pcl_len; + + return QDF_STATUS_SUCCESS; +} + +uint8_t policy_mgr_get_channel(struct wlan_objmgr_psoc *psoc, + enum policy_mgr_con_mode mode, uint32_t *vdev_id) +{ + uint32_t idx = 0; + uint8_t chan; + struct policy_mgr_psoc_priv_obj *pm_ctx; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return 0; + } + + if (mode >= PM_MAX_NUM_OF_MODE) { + policy_mgr_err("incorrect mode"); + return 0; + } + + for (idx = 0; idx < MAX_NUMBER_OF_CONC_CONNECTIONS; idx++) { + qdf_mutex_acquire(&pm_ctx->qdf_conc_list_lock); + if ((pm_conc_connection_list[idx].mode == mode) && + (!vdev_id || (*vdev_id == + pm_conc_connection_list[idx].vdev_id)) + && pm_conc_connection_list[idx].in_use) { + chan = pm_conc_connection_list[idx].chan; + qdf_mutex_release(&pm_ctx->qdf_conc_list_lock); + return chan; + } + qdf_mutex_release(&pm_ctx->qdf_conc_list_lock); + } + + return 0; +} + +/** + * policy_mgr_skip_dfs_ch() - skip dfs channel or not + * @psoc: pointer to soc + * @skip_dfs_channel: pointer to result + * + * Return: QDF_STATUS + */ +static QDF_STATUS policy_mgr_skip_dfs_ch(struct wlan_objmgr_psoc *psoc, + bool *skip_dfs_channel) +{ + bool sta_sap_scc_on_dfs_chan; + bool dfs_master_capable; + struct policy_mgr_psoc_priv_obj *pm_ctx; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return QDF_STATUS_E_FAILURE; + } + + dfs_master_capable = pm_ctx->user_cfg.enable_dfs_master_cap; + + *skip_dfs_channel = false; + if (!dfs_master_capable) { + policy_mgr_debug("skip DFS ch for SAP/Go dfs master cap %d", + dfs_master_capable); + *skip_dfs_channel = true; + return QDF_STATUS_SUCCESS; + } + + sta_sap_scc_on_dfs_chan = + policy_mgr_is_sta_sap_scc_allowed_on_dfs_chan(psoc); + if ((policy_mgr_mode_specific_connection_count(psoc, PM_STA_MODE, + NULL) > 0) && + !sta_sap_scc_on_dfs_chan) { + policy_mgr_debug("SAP/Go skips DFS ch if sta connects"); + *skip_dfs_channel = true; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * policy_mgr_modify_sap_pcl_based_on_dfs() - filter out DFS channel if needed + * @psoc: pointer to soc + * @pcl_list_org: channel list to filter out + * @weight_list_org: weight of channel list + * @pcl_len_org: length of channel list + * + * Return: QDF_STATUS + */ +static QDF_STATUS policy_mgr_modify_sap_pcl_based_on_dfs( + struct wlan_objmgr_psoc *psoc, + uint8_t *pcl_list_org, + uint8_t *weight_list_org, + uint32_t *pcl_len_org) +{ + size_t i, pcl_len = 0; + struct policy_mgr_psoc_priv_obj *pm_ctx; + bool skip_dfs_channel = false; + QDF_STATUS status; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return QDF_STATUS_E_FAILURE; + } + if (*pcl_len_org > QDF_MAX_NUM_CHAN) { + policy_mgr_err("Invalid PCL List Length %d", *pcl_len_org); + return QDF_STATUS_E_FAILURE; + } + + status = policy_mgr_skip_dfs_ch(psoc, &skip_dfs_channel); + if (QDF_IS_STATUS_ERROR(status)) { + policy_mgr_err("failed to get dfs channel skip info"); + return status; + } + + if (!skip_dfs_channel) { + policy_mgr_debug("No more operation on DFS channel"); + return QDF_STATUS_SUCCESS; + } + + for (i = 0; i < *pcl_len_org; i++) { + if (!wlan_reg_is_dfs_ch(pm_ctx->pdev, pcl_list_org[i])) { + pcl_list_org[pcl_len] = pcl_list_org[i]; + weight_list_org[pcl_len++] = weight_list_org[i]; + } + } + + *pcl_len_org = pcl_len; + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS policy_mgr_modify_sap_pcl_based_on_nol( + struct wlan_objmgr_psoc *psoc, + uint8_t *pcl_list_org, + uint8_t *weight_list_org, + uint32_t *pcl_len_org) +{ + uint32_t i, pcl_len = 0; + uint8_t pcl_list[QDF_MAX_NUM_CHAN]; + uint8_t weight_list[QDF_MAX_NUM_CHAN]; + struct policy_mgr_psoc_priv_obj *pm_ctx; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return QDF_STATUS_E_FAILURE; + } + if (*pcl_len_org > QDF_MAX_NUM_CHAN) { + policy_mgr_err("Invalid PCL List Length %d", *pcl_len_org); + return QDF_STATUS_E_FAILURE; + } + + for (i = 0; i < *pcl_len_org; i++) { + if (!wlan_reg_is_disable_ch(pm_ctx->pdev, pcl_list_org[i])) { + pcl_list[pcl_len] = pcl_list_org[i]; + weight_list[pcl_len++] = weight_list_org[i]; + } + } + + qdf_mem_zero(pcl_list_org, *pcl_len_org); + qdf_mem_zero(weight_list_org, *pcl_len_org); + qdf_mem_copy(pcl_list_org, pcl_list, pcl_len); + qdf_mem_copy(weight_list_org, weight_list, pcl_len); + *pcl_len_org = pcl_len; + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS +policy_mgr_modify_pcl_based_on_srd(struct wlan_objmgr_psoc *psoc, + uint8_t *pcl_list_org, + uint8_t *weight_list_org, + uint32_t *pcl_len_org) +{ + uint32_t i, pcl_len = 0; + uint8_t pcl_list[QDF_MAX_NUM_CHAN]; + uint8_t weight_list[QDF_MAX_NUM_CHAN]; + struct policy_mgr_psoc_priv_obj *pm_ctx; + bool is_etsi13_srd_chan_allowed_in_mas_mode = true; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return QDF_STATUS_E_FAILURE; + } + is_etsi13_srd_chan_allowed_in_mas_mode = + wlan_reg_is_etsi13_srd_chan_allowed_master_mode(pm_ctx->pdev); + + if (is_etsi13_srd_chan_allowed_in_mas_mode) + return QDF_STATUS_SUCCESS; + + if (*pcl_len_org > QDF_MAX_NUM_CHAN) { + policy_mgr_err("Invalid PCL List Length %d", *pcl_len_org); + return QDF_STATUS_E_FAILURE; + } + for (i = 0; i < *pcl_len_org; i++) { + if (wlan_reg_is_etsi13_srd_chan(pm_ctx->pdev, + pcl_list_org[i])) + continue; + pcl_list[pcl_len] = pcl_list_org[i]; + weight_list[pcl_len++] = weight_list_org[i]; + } + + qdf_mem_zero(pcl_list_org, *pcl_len_org); + qdf_mem_zero(weight_list_org, *pcl_len_org); + qdf_mem_copy(pcl_list_org, pcl_list, pcl_len); + qdf_mem_copy(weight_list_org, weight_list, pcl_len); + *pcl_len_org = pcl_len; + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS policy_mgr_pcl_modification_for_sap( + struct wlan_objmgr_psoc *psoc, + uint8_t *pcl_channels, uint8_t *pcl_weight, + uint32_t *len) +{ + QDF_STATUS status = QDF_STATUS_E_FAILURE; + uint32_t i; + + if (policy_mgr_is_sap_mandatory_channel_set(psoc)) { + status = policy_mgr_modify_sap_pcl_based_on_mandatory_channel( + psoc, pcl_channels, pcl_weight, len); + if (QDF_IS_STATUS_ERROR(status)) { + policy_mgr_err( + "failed to get mandatory modified pcl for SAP"); + return status; + } + policy_mgr_debug("mandatory modified pcl len:%d", *len); + for (i = 0; i < *len; i++) + policy_mgr_debug("chan:%d weight:%d", + pcl_channels[i], pcl_weight[i]); + } + + status = policy_mgr_modify_sap_pcl_based_on_nol( + psoc, pcl_channels, pcl_weight, len); + if (QDF_IS_STATUS_ERROR(status)) { + policy_mgr_err("failed to get nol modified pcl for SAP"); + return status; + } + policy_mgr_debug("nol modified pcl len:%d", *len); + for (i = 0; i < *len; i++) + policy_mgr_debug("chan:%d weight:%d", + pcl_channels[i], pcl_weight[i]); + + status = policy_mgr_modify_sap_pcl_based_on_dfs( + psoc, pcl_channels, pcl_weight, len); + if (QDF_IS_STATUS_ERROR(status)) { + policy_mgr_err("failed to get dfs modified pcl for SAP"); + return status; + } + policy_mgr_debug("dfs modified pcl len:%d", *len); + for (i = 0; i < *len; i++) + policy_mgr_debug("chan:%d weight:%d", + pcl_channels[i], pcl_weight[i]); + + status = policy_mgr_modify_pcl_based_on_srd + (psoc, pcl_channels, pcl_weight, len); + if (QDF_IS_STATUS_ERROR(status)) { + policy_mgr_err("failed to get srd modified pcl for SAP"); + return status; + } + policy_mgr_debug("modified final pcl len:%d", *len); + for (i = 0; i < *len; i++) + policy_mgr_debug("chan:%d weight:%d", + pcl_channels[i], pcl_weight[i]); + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS policy_mgr_pcl_modification_for_p2p_go( + struct wlan_objmgr_psoc *psoc, + uint8_t *pcl_channels, uint8_t *pcl_weight, + uint32_t *len) +{ + QDF_STATUS status = QDF_STATUS_E_FAILURE; + struct policy_mgr_psoc_priv_obj *pm_ctx; + uint32_t i; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("context is NULL"); + return status; + } + + status = policy_mgr_modify_pcl_based_on_enabled_channels( + pm_ctx, pcl_channels, pcl_weight, len); + if (QDF_IS_STATUS_ERROR(status)) { + policy_mgr_err("failed to get modified pcl for GO"); + return status; + } + status = policy_mgr_modify_pcl_based_on_srd + (psoc, pcl_channels, pcl_weight, len); + if (QDF_IS_STATUS_ERROR(status)) { + policy_mgr_err("failed to get modified pcl for SAP"); + return status; + } + policy_mgr_debug("modified pcl len:%d", *len); + for (i = 0; i < *len; i++) + policy_mgr_debug("chan:%d weight:%d", + pcl_channels[i], pcl_weight[i]); + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS policy_mgr_mode_specific_modification_on_pcl( + struct wlan_objmgr_psoc *psoc, + uint8_t *pcl_channels, uint8_t *pcl_weight, + uint32_t *len, enum policy_mgr_con_mode mode) +{ + QDF_STATUS status = QDF_STATUS_E_FAILURE; + + switch (mode) { + case PM_SAP_MODE: + status = policy_mgr_pcl_modification_for_sap( + psoc, pcl_channels, pcl_weight, len); + break; + case PM_P2P_GO_MODE: + status = policy_mgr_pcl_modification_for_p2p_go( + psoc, pcl_channels, pcl_weight, len); + break; + case PM_STA_MODE: + case PM_P2P_CLIENT_MODE: + case PM_IBSS_MODE: + status = QDF_STATUS_SUCCESS; + break; + default: + policy_mgr_err("unexpected mode %d", mode); + break; + } + + return status; +} + +QDF_STATUS policy_mgr_get_pcl(struct wlan_objmgr_psoc *psoc, + enum policy_mgr_con_mode mode, + uint8_t *pcl_channels, uint32_t *len, + uint8_t *pcl_weight, uint32_t weight_len) +{ + QDF_STATUS status = QDF_STATUS_E_FAILURE; + uint32_t num_connections = 0, i; + enum policy_mgr_conc_priority_mode first_index = 0; + enum policy_mgr_one_connection_mode second_index = 0; + enum policy_mgr_two_connection_mode third_index = 0; + enum policy_mgr_pcl_type pcl = PM_NONE; + enum policy_mgr_conc_priority_mode conc_system_pref = 0; + struct policy_mgr_psoc_priv_obj *pm_ctx; + enum QDF_OPMODE qdf_mode; + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("context is NULL"); + return status; + } + + if ((mode < 0) || (mode >= PM_MAX_NUM_OF_MODE)) { + policy_mgr_err("Invalid connection mode %d received", mode); + return status; + } + + /* find the current connection state from pm_conc_connection_list*/ + num_connections = policy_mgr_get_connection_count(psoc); + policy_mgr_debug("connections:%d pref:%d requested mode:%d", + num_connections, pm_ctx->cur_conc_system_pref, mode); + + switch (pm_ctx->cur_conc_system_pref) { + case 0: + conc_system_pref = PM_THROUGHPUT; + break; + case 1: + conc_system_pref = PM_POWERSAVE; + break; + case 2: + conc_system_pref = PM_LATENCY; + break; + default: + policy_mgr_err("unknown cur_conc_system_pref value %d", + pm_ctx->cur_conc_system_pref); + break; + } + + switch (num_connections) { + case 0: + first_index = + policy_mgr_get_first_connection_pcl_table_index(psoc); + pcl = first_connection_pcl_table[mode][first_index]; + break; + case 1: + second_index = + policy_mgr_get_second_connection_pcl_table_index(psoc); + if (PM_MAX_ONE_CONNECTION_MODE == second_index) { + policy_mgr_err("couldn't find index for 2nd connection pcl table"); + return status; + } + qdf_mode = policy_mgr_get_qdf_mode_from_pm(mode); + if (qdf_mode == QDF_MAX_NO_OF_MODE) + return status; + + if (policy_mgr_is_hw_dbs_capable(psoc) == true && + policy_mgr_is_dbs_allowed_for_concurrency( + psoc, qdf_mode)) { + pcl = (*second_connection_pcl_dbs_table) + [second_index][mode][conc_system_pref]; + } else { + pcl = second_connection_pcl_nodbs_table + [second_index][mode][conc_system_pref]; + } + + break; + case 2: + third_index = + policy_mgr_get_third_connection_pcl_table_index(psoc); + if (PM_MAX_TWO_CONNECTION_MODE == third_index) { + policy_mgr_err( + "couldn't find index for 3rd connection pcl table"); + return status; + } + if (policy_mgr_is_hw_dbs_capable(psoc) == true) { + pcl = (*third_connection_pcl_dbs_table) + [third_index][mode][conc_system_pref]; + } else { + pcl = third_connection_pcl_nodbs_table + [third_index][mode][conc_system_pref]; + } + break; + default: + policy_mgr_err("unexpected num_connections value %d", + num_connections); + break; + } + + policy_mgr_debug("index1:%d index2:%d index3:%d pcl:%d dbs:%d", + first_index, second_index, third_index, + pcl, policy_mgr_is_hw_dbs_capable(psoc)); + + /* once the PCL enum is obtained find out the exact channel list with + * help from sme_get_cfg_valid_channels + */ + status = policy_mgr_get_channel_list(psoc, pcl, pcl_channels, len, mode, + pcl_weight, weight_len); + if (QDF_IS_STATUS_ERROR(status)) { + policy_mgr_err("failed to get channel list:%d", status); + return status; + } + + policy_mgr_debug("pcl len:%d", *len); + for (i = 0; i < *len; i++) { + policy_mgr_debug("chan:%d weight:%d", + pcl_channels[i], pcl_weight[i]); + } + + policy_mgr_mode_specific_modification_on_pcl( + psoc, pcl_channels, pcl_weight, len, mode); + + status = policy_mgr_modify_pcl_based_on_dnbs(psoc, pcl_channels, + pcl_weight, len); + + if (QDF_IS_STATUS_ERROR(status)) { + policy_mgr_err("failed to get modified pcl based on DNBS"); + return status; + } + return QDF_STATUS_SUCCESS; +} + +enum policy_mgr_conc_priority_mode + policy_mgr_get_first_connection_pcl_table_index( + struct wlan_objmgr_psoc *psoc) +{ + struct policy_mgr_psoc_priv_obj *pm_ctx; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("context is NULL"); + return PM_THROUGHPUT; + } + + if (pm_ctx->cur_conc_system_pref >= PM_MAX_CONC_PRIORITY_MODE) + return PM_THROUGHPUT; + + return pm_ctx->cur_conc_system_pref; +} + +enum policy_mgr_one_connection_mode + policy_mgr_get_second_connection_pcl_table_index( + struct wlan_objmgr_psoc *psoc) +{ + enum policy_mgr_one_connection_mode index = PM_MAX_ONE_CONNECTION_MODE; + struct policy_mgr_psoc_priv_obj *pm_ctx; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return index; + } + + qdf_mutex_acquire(&pm_ctx->qdf_conc_list_lock); + if (PM_STA_MODE == pm_conc_connection_list[0].mode) { + if (WLAN_REG_IS_24GHZ_CH(pm_conc_connection_list[0].chan)) { + if (POLICY_MGR_ONE_ONE == + pm_conc_connection_list[0].chain_mask) + index = PM_STA_24_1x1; + else + index = PM_STA_24_2x2; + } else { + if (POLICY_MGR_ONE_ONE == + pm_conc_connection_list[0].chain_mask) + index = PM_STA_5_1x1; + else + index = PM_STA_5_2x2; + } + } else if (PM_SAP_MODE == pm_conc_connection_list[0].mode) { + if (WLAN_REG_IS_24GHZ_CH(pm_conc_connection_list[0].chan)) { + if (POLICY_MGR_ONE_ONE == + pm_conc_connection_list[0].chain_mask) + index = PM_SAP_24_1x1; + else + index = PM_SAP_24_2x2; + } else { + if (POLICY_MGR_ONE_ONE == + pm_conc_connection_list[0].chain_mask) + index = PM_SAP_5_1x1; + else + index = PM_SAP_5_2x2; + } + } else if (PM_P2P_CLIENT_MODE == pm_conc_connection_list[0].mode) { + if (WLAN_REG_IS_24GHZ_CH(pm_conc_connection_list[0].chan)) { + if (POLICY_MGR_ONE_ONE == + pm_conc_connection_list[0].chain_mask) + index = PM_P2P_CLI_24_1x1; + else + index = PM_P2P_CLI_24_2x2; + } else { + if (POLICY_MGR_ONE_ONE == + pm_conc_connection_list[0].chain_mask) + index = PM_P2P_CLI_5_1x1; + else + index = PM_P2P_CLI_5_2x2; + } + } else if (PM_P2P_GO_MODE == pm_conc_connection_list[0].mode) { + if (WLAN_REG_IS_24GHZ_CH(pm_conc_connection_list[0].chan)) { + if (POLICY_MGR_ONE_ONE == + pm_conc_connection_list[0].chain_mask) + index = PM_P2P_GO_24_1x1; + else + index = PM_P2P_GO_24_2x2; + } else { + if (POLICY_MGR_ONE_ONE == + pm_conc_connection_list[0].chain_mask) + index = PM_P2P_GO_5_1x1; + else + index = PM_P2P_GO_5_2x2; + } + } else if (PM_IBSS_MODE == pm_conc_connection_list[0].mode) { + if (WLAN_REG_IS_24GHZ_CH(pm_conc_connection_list[0].chan)) { + if (POLICY_MGR_ONE_ONE == + pm_conc_connection_list[0].chain_mask) + index = PM_IBSS_24_1x1; + else + index = PM_IBSS_24_2x2; + } else { + if (POLICY_MGR_ONE_ONE == + pm_conc_connection_list[0].chain_mask) + index = PM_IBSS_5_1x1; + else + index = PM_IBSS_5_2x2; + } + } + + policy_mgr_debug("mode:%d chan:%d chain:%d index:%d", + pm_conc_connection_list[0].mode, + pm_conc_connection_list[0].chan, + pm_conc_connection_list[0].chain_mask, index); + + qdf_mutex_release(&pm_ctx->qdf_conc_list_lock); + + return index; +} + +static enum policy_mgr_two_connection_mode + policy_mgr_get_third_connection_pcl_table_index_cli_sap(void) +{ + enum policy_mgr_two_connection_mode index = PM_MAX_TWO_CONNECTION_MODE; + /* SCC */ + if (pm_conc_connection_list[0].chan == + pm_conc_connection_list[1].chan) { + if (WLAN_REG_IS_24GHZ_CH( + pm_conc_connection_list[0].chan)) { + if (POLICY_MGR_ONE_ONE == + pm_conc_connection_list[0].chain_mask) + index = PM_P2P_CLI_SAP_SCC_24_1x1; + else + index = PM_P2P_CLI_SAP_SCC_24_2x2; + } else { + if (POLICY_MGR_ONE_ONE == + pm_conc_connection_list[0].chain_mask) + index = PM_P2P_CLI_SAP_SCC_5_1x1; + else + index = PM_P2P_CLI_SAP_SCC_5_2x2; + } + /* MCC */ + } else if (pm_conc_connection_list[0].mac == + pm_conc_connection_list[1].mac) { + if ((WLAN_REG_IS_24GHZ_CH + (pm_conc_connection_list[0].chan)) && + (WLAN_REG_IS_24GHZ_CH + (pm_conc_connection_list[1].chan))) { + if (POLICY_MGR_ONE_ONE == + pm_conc_connection_list[0].chain_mask) + index = PM_P2P_CLI_SAP_MCC_24_1x1; + else + index = PM_P2P_CLI_SAP_MCC_24_2x2; + } else if ((WLAN_REG_IS_5GHZ_CH( + pm_conc_connection_list[0].chan)) && + (WLAN_REG_IS_5GHZ_CH( + pm_conc_connection_list[1].chan))) { + if (POLICY_MGR_ONE_ONE == + pm_conc_connection_list[0].chain_mask) + index = PM_P2P_CLI_SAP_MCC_5_1x1; + else + index = PM_P2P_CLI_SAP_MCC_5_2x2; + } else { + if (POLICY_MGR_ONE_ONE == + pm_conc_connection_list[0].chain_mask) + index = PM_P2P_CLI_SAP_MCC_24_5_1x1; + else + index = PM_P2P_CLI_SAP_MCC_24_5_2x2; + } + /* SBS or DBS */ + } else if (pm_conc_connection_list[0].mac != + pm_conc_connection_list[1].mac) { + /* SBS */ + if ((WLAN_REG_IS_5GHZ_CH(pm_conc_connection_list[0].chan)) && + (WLAN_REG_IS_5GHZ_CH(pm_conc_connection_list[1].chan))) { + if (POLICY_MGR_ONE_ONE == + pm_conc_connection_list[0].chain_mask) + index = PM_P2P_CLI_SAP_SBS_5_1x1; + } else { + /* DBS */ + if (POLICY_MGR_ONE_ONE == + pm_conc_connection_list[0].chain_mask) + index = PM_P2P_CLI_SAP_DBS_1x1; + else + index = PM_P2P_CLI_SAP_DBS_2x2; + } + } + return index; +} + +static enum policy_mgr_two_connection_mode + policy_mgr_get_third_connection_pcl_table_index_sta_sap(void) +{ + enum policy_mgr_two_connection_mode index = PM_MAX_TWO_CONNECTION_MODE; + /* SCC */ + if (pm_conc_connection_list[0].chan == + pm_conc_connection_list[1].chan) { + if (WLAN_REG_IS_24GHZ_CH( + pm_conc_connection_list[0].chan)) { + if (POLICY_MGR_ONE_ONE == + pm_conc_connection_list[0].chain_mask) + index = PM_STA_SAP_SCC_24_1x1; + else + index = PM_STA_SAP_SCC_24_2x2; + } else { + if (POLICY_MGR_ONE_ONE == + pm_conc_connection_list[0].chain_mask) + index = PM_STA_SAP_SCC_5_1x1; + else + index = PM_STA_SAP_SCC_5_2x2; + } + /* MCC */ + } else if (pm_conc_connection_list[0].mac == + pm_conc_connection_list[1].mac) { + if ((WLAN_REG_IS_24GHZ_CH + (pm_conc_connection_list[0].chan)) && + (WLAN_REG_IS_24GHZ_CH + (pm_conc_connection_list[1].chan))) { + if (POLICY_MGR_ONE_ONE == + pm_conc_connection_list[0].chain_mask) + index = PM_STA_SAP_MCC_24_1x1; + else + index = PM_STA_SAP_MCC_24_2x2; + } else if ((WLAN_REG_IS_5GHZ_CH( + pm_conc_connection_list[0].chan)) && + (WLAN_REG_IS_5GHZ_CH( + pm_conc_connection_list[1].chan))) { + if (POLICY_MGR_ONE_ONE == + pm_conc_connection_list[0].chain_mask) + index = PM_STA_SAP_MCC_5_1x1; + else + index = PM_STA_SAP_MCC_5_2x2; + } else { + if (POLICY_MGR_ONE_ONE == + pm_conc_connection_list[0].chain_mask) + index = PM_STA_SAP_MCC_24_5_1x1; + else + index = PM_STA_SAP_MCC_24_5_2x2; + } + /* SBS or DBS */ + } else if (pm_conc_connection_list[0].mac != + pm_conc_connection_list[1].mac) { + /* SBS */ + if ((WLAN_REG_IS_5GHZ_CH(pm_conc_connection_list[0].chan)) && + (WLAN_REG_IS_5GHZ_CH(pm_conc_connection_list[1].chan))) { + if (POLICY_MGR_ONE_ONE == + pm_conc_connection_list[0].chain_mask) + index = PM_STA_SAP_SBS_5_1x1; + } else { + /* DBS */ + if (POLICY_MGR_ONE_ONE == + pm_conc_connection_list[0].chain_mask) + index = PM_STA_SAP_DBS_1x1; + else + index = PM_STA_SAP_DBS_2x2; + } + } + return index; +} + +static enum policy_mgr_two_connection_mode + policy_mgr_get_third_connection_pcl_table_index_sap_sap(void) +{ + enum policy_mgr_two_connection_mode index = PM_MAX_TWO_CONNECTION_MODE; + /* SCC */ + if (pm_conc_connection_list[0].chan == + pm_conc_connection_list[1].chan) { + if (WLAN_REG_IS_24GHZ_CH( + pm_conc_connection_list[0].chan)) { + if (POLICY_MGR_ONE_ONE == + pm_conc_connection_list[0].chain_mask) + index = PM_SAP_SAP_SCC_24_1x1; + else + index = PM_SAP_SAP_SCC_24_2x2; + } else { + if (POLICY_MGR_ONE_ONE == + pm_conc_connection_list[0].chain_mask) + index = PM_SAP_SAP_SCC_5_1x1; + else + index = PM_SAP_SAP_SCC_5_2x2; + } + /* MCC */ + } else if (pm_conc_connection_list[0].mac == + pm_conc_connection_list[1].mac) { + if ((WLAN_REG_IS_24GHZ_CH + (pm_conc_connection_list[0].chan)) && + (WLAN_REG_IS_24GHZ_CH + (pm_conc_connection_list[1].chan))) { + if (POLICY_MGR_ONE_ONE == + pm_conc_connection_list[0].chain_mask) + index = PM_SAP_SAP_MCC_24_1x1; + else + index = PM_SAP_SAP_MCC_24_2x2; + } else if ((WLAN_REG_IS_5GHZ_CH( + pm_conc_connection_list[0].chan)) && + (WLAN_REG_IS_5GHZ_CH( + pm_conc_connection_list[1].chan))) { + if (POLICY_MGR_ONE_ONE == + pm_conc_connection_list[0].chain_mask) + index = PM_SAP_SAP_MCC_5_1x1; + else + index = PM_SAP_SAP_MCC_5_2x2; + } else { + if (POLICY_MGR_ONE_ONE == + pm_conc_connection_list[0].chain_mask) + index = PM_SAP_SAP_MCC_24_5_1x1; + else + index = PM_SAP_SAP_MCC_24_5_2x2; + } + /* SBS or DBS */ + } else if (pm_conc_connection_list[0].mac != + pm_conc_connection_list[1].mac) { + /* SBS */ + if ((WLAN_REG_IS_5GHZ_CH(pm_conc_connection_list[0].chan)) && + (WLAN_REG_IS_5GHZ_CH(pm_conc_connection_list[1].chan))) { + if (POLICY_MGR_ONE_ONE == + pm_conc_connection_list[0].chain_mask) + index = PM_SAP_SAP_SBS_5_1x1; + } else { + /* DBS */ + if (POLICY_MGR_ONE_ONE == + pm_conc_connection_list[0].chain_mask) + index = PM_SAP_SAP_DBS_1x1; + else + index = PM_SAP_SAP_DBS_2x2; + } + } + return index; +} + +static enum policy_mgr_two_connection_mode + policy_mgr_get_third_connection_pcl_table_index_sta_go(void) +{ + enum policy_mgr_two_connection_mode index = PM_MAX_TWO_CONNECTION_MODE; + /* SCC */ + if (pm_conc_connection_list[0].chan == + pm_conc_connection_list[1].chan) { + if (WLAN_REG_IS_24GHZ_CH + (pm_conc_connection_list[0].chan)) { + if (POLICY_MGR_ONE_ONE == + pm_conc_connection_list[0].chain_mask) + index = PM_STA_P2P_GO_SCC_24_1x1; + else + index = PM_STA_P2P_GO_SCC_24_2x2; + } else { + if (POLICY_MGR_ONE_ONE == + pm_conc_connection_list[0].chain_mask) + index = PM_STA_P2P_GO_SCC_5_1x1; + else + index = PM_STA_P2P_GO_SCC_5_2x2; + } + /* MCC */ + } else if (pm_conc_connection_list[0].mac == + pm_conc_connection_list[1].mac) { + if ((WLAN_REG_IS_24GHZ_CH( + pm_conc_connection_list[0].chan)) && + (WLAN_REG_IS_24GHZ_CH + (pm_conc_connection_list[1].chan))) { + if (POLICY_MGR_ONE_ONE == + pm_conc_connection_list[0].chain_mask) + index = PM_STA_P2P_GO_MCC_24_1x1; + else + index = PM_STA_P2P_GO_MCC_24_2x2; + } else if ((WLAN_REG_IS_5GHZ_CH( + pm_conc_connection_list[0].chan)) && + (WLAN_REG_IS_5GHZ_CH( + pm_conc_connection_list[1].chan))) { + if (POLICY_MGR_ONE_ONE == + pm_conc_connection_list[0].chain_mask) + index = PM_STA_P2P_GO_MCC_5_1x1; + else + index = PM_STA_P2P_GO_MCC_5_2x2; + } else { + if (POLICY_MGR_ONE_ONE == + pm_conc_connection_list[0].chain_mask) + index = PM_STA_P2P_GO_MCC_24_5_1x1; + else + index = PM_STA_P2P_GO_MCC_24_5_2x2; + } + /* SBS or DBS */ + } else if (pm_conc_connection_list[0].mac != + pm_conc_connection_list[1].mac) { + /* SBS */ + if ((WLAN_REG_IS_5GHZ_CH(pm_conc_connection_list[0].chan)) && + (WLAN_REG_IS_5GHZ_CH(pm_conc_connection_list[1].chan))) { + if (POLICY_MGR_ONE_ONE == + pm_conc_connection_list[0].chain_mask) + index = PM_STA_P2P_GO_SBS_5_1x1; + } else { + /* DBS */ + if (POLICY_MGR_ONE_ONE == + pm_conc_connection_list[0].chain_mask) + index = PM_STA_P2P_GO_DBS_1x1; + else + index = PM_STA_P2P_GO_DBS_2x2; + } + } + return index; +} + +static enum policy_mgr_two_connection_mode + policy_mgr_get_third_connection_pcl_table_index_sta_cli(void) +{ + enum policy_mgr_two_connection_mode index = PM_MAX_TWO_CONNECTION_MODE; + /* SCC */ + if (pm_conc_connection_list[0].chan == + pm_conc_connection_list[1].chan) { + if (WLAN_REG_IS_24GHZ_CH + (pm_conc_connection_list[0].chan)) { + if (POLICY_MGR_ONE_ONE == + pm_conc_connection_list[0].chain_mask) + index = PM_STA_P2P_CLI_SCC_24_1x1; + else + index = PM_STA_P2P_CLI_SCC_24_2x2; + } else { + if (POLICY_MGR_ONE_ONE == + pm_conc_connection_list[0].chain_mask) + index = PM_STA_P2P_CLI_SCC_5_1x1; + else + index = PM_STA_P2P_CLI_SCC_5_2x2; + } + /* MCC */ + } else if (pm_conc_connection_list[0].mac == + pm_conc_connection_list[1].mac) { + if ((WLAN_REG_IS_24GHZ_CH( + pm_conc_connection_list[0].chan)) && + (WLAN_REG_IS_24GHZ_CH( + pm_conc_connection_list[1].chan))) { + if (POLICY_MGR_ONE_ONE == + pm_conc_connection_list[0].chain_mask) + index = PM_STA_P2P_CLI_MCC_24_1x1; + else + index = PM_STA_P2P_CLI_MCC_24_2x2; + } else if ((WLAN_REG_IS_5GHZ_CH( + pm_conc_connection_list[0].chan)) && + (WLAN_REG_IS_5GHZ_CH( + pm_conc_connection_list[1].chan))) { + if (POLICY_MGR_ONE_ONE == + pm_conc_connection_list[0].chain_mask) + index = PM_STA_P2P_CLI_MCC_5_1x1; + else + index = PM_STA_P2P_CLI_MCC_5_2x2; + } else { + if (POLICY_MGR_ONE_ONE == + pm_conc_connection_list[0].chain_mask) + index = PM_STA_P2P_CLI_MCC_24_5_1x1; + else + index = PM_STA_P2P_CLI_MCC_24_5_2x2; + } + /* SBS or DBS */ + } else if (pm_conc_connection_list[0].mac != + pm_conc_connection_list[1].mac) { + /* SBS */ + if ((WLAN_REG_IS_5GHZ_CH(pm_conc_connection_list[0].chan)) && + (WLAN_REG_IS_5GHZ_CH(pm_conc_connection_list[1].chan))) { + if (POLICY_MGR_ONE_ONE == + pm_conc_connection_list[0].chain_mask) + index = PM_STA_P2P_CLI_SBS_5_1x1; + } else { + /* DBS */ + if (POLICY_MGR_ONE_ONE == + pm_conc_connection_list[0].chain_mask) + index = PM_STA_P2P_CLI_DBS_1x1; + else + index = PM_STA_P2P_CLI_DBS_2x2; + } + } + return index; +} + +static enum policy_mgr_two_connection_mode + policy_mgr_get_third_connection_pcl_table_index_go_cli(void) +{ + enum policy_mgr_two_connection_mode index = PM_MAX_TWO_CONNECTION_MODE; + /* SCC */ + if (pm_conc_connection_list[0].chan == + pm_conc_connection_list[1].chan) { + if (WLAN_REG_IS_24GHZ_CH( + pm_conc_connection_list[0].chan)) { + if (POLICY_MGR_ONE_ONE == + pm_conc_connection_list[0].chain_mask) + index = PM_P2P_GO_P2P_CLI_SCC_24_1x1; + else + index = PM_P2P_GO_P2P_CLI_SCC_24_2x2; + } else { + if (POLICY_MGR_ONE_ONE == + pm_conc_connection_list[0].chain_mask) + index = PM_P2P_GO_P2P_CLI_SCC_5_1x1; + else + index = PM_P2P_GO_P2P_CLI_SCC_5_2x2; + } + /* MCC */ + } else if (pm_conc_connection_list[0].mac == + pm_conc_connection_list[1].mac) { + if ((WLAN_REG_IS_24GHZ_CH( + pm_conc_connection_list[0].chan)) && + (WLAN_REG_IS_24GHZ_CH( + pm_conc_connection_list[1].chan))) { + if (POLICY_MGR_ONE_ONE == + pm_conc_connection_list[0].chain_mask) + index = PM_P2P_GO_P2P_CLI_MCC_24_1x1; + else + index = PM_P2P_GO_P2P_CLI_MCC_24_2x2; + } else if ((WLAN_REG_IS_5GHZ_CH( + pm_conc_connection_list[0].chan)) && + (WLAN_REG_IS_5GHZ_CH( + pm_conc_connection_list[1].chan))) { + if (POLICY_MGR_ONE_ONE == + pm_conc_connection_list[0].chain_mask) + index = PM_P2P_GO_P2P_CLI_MCC_5_1x1; + else + index = PM_P2P_GO_P2P_CLI_MCC_5_2x2; + } else { + if (POLICY_MGR_ONE_ONE == + pm_conc_connection_list[0].chain_mask) + index = PM_P2P_GO_P2P_CLI_MCC_24_5_1x1; + else + index = PM_P2P_GO_P2P_CLI_MCC_24_5_2x2; + } + /* SBS or DBS */ + } else if (pm_conc_connection_list[0].mac != + pm_conc_connection_list[1].mac) { + /* SBS */ + if ((WLAN_REG_IS_5GHZ_CH(pm_conc_connection_list[0].chan)) && + (WLAN_REG_IS_5GHZ_CH(pm_conc_connection_list[1].chan))) { + if (POLICY_MGR_ONE_ONE == + pm_conc_connection_list[0].chain_mask) + index = PM_P2P_GO_P2P_CLI_SBS_5_1x1; + } else { + /* DBS */ + if (POLICY_MGR_ONE_ONE == + pm_conc_connection_list[0].chain_mask) + index = PM_P2P_GO_P2P_CLI_DBS_1x1; + else + index = PM_P2P_GO_P2P_CLI_DBS_2x2; + } + } + return index; +} + +static enum policy_mgr_two_connection_mode + policy_mgr_get_third_connection_pcl_table_index_go_sap(void) +{ + enum policy_mgr_two_connection_mode index = PM_MAX_TWO_CONNECTION_MODE; + /* SCC */ + if (pm_conc_connection_list[0].chan == + pm_conc_connection_list[1].chan) { + if (WLAN_REG_IS_24GHZ_CH( + pm_conc_connection_list[0].chan)) { + if (POLICY_MGR_ONE_ONE == + pm_conc_connection_list[0].chain_mask) + index = PM_P2P_GO_SAP_SCC_24_1x1; + else + index = PM_P2P_GO_SAP_SCC_24_2x2; + } else { + if (POLICY_MGR_ONE_ONE == + pm_conc_connection_list[0].chain_mask) + index = PM_P2P_GO_SAP_SCC_5_1x1; + else + index = PM_P2P_GO_SAP_SCC_5_2x2; + } + /* MCC */ + } else if (pm_conc_connection_list[0].mac == + pm_conc_connection_list[1].mac) { + if ((WLAN_REG_IS_24GHZ_CH( + pm_conc_connection_list[0].chan)) && + (WLAN_REG_IS_24GHZ_CH( + pm_conc_connection_list[1].chan))) { + if (POLICY_MGR_ONE_ONE == + pm_conc_connection_list[0].chain_mask) + index = PM_P2P_GO_SAP_MCC_24_1x1; + else + index = PM_P2P_GO_SAP_MCC_24_2x2; + } else if ((WLAN_REG_IS_5GHZ_CH( + pm_conc_connection_list[0].chan)) && + (WLAN_REG_IS_5GHZ_CH( + pm_conc_connection_list[1].chan))) { + if (POLICY_MGR_ONE_ONE == + pm_conc_connection_list[0].chain_mask) + index = PM_P2P_GO_SAP_MCC_5_1x1; + else + index = PM_P2P_GO_SAP_MCC_5_2x2; + } else { + if (POLICY_MGR_ONE_ONE == + pm_conc_connection_list[0].chain_mask) + index = PM_P2P_GO_SAP_MCC_24_5_1x1; + else + index = PM_P2P_GO_SAP_MCC_24_5_2x2; + } + /* SBS or DBS */ + } else if (pm_conc_connection_list[0].mac != + pm_conc_connection_list[1].mac) { + /* SBS */ + if ((WLAN_REG_IS_5GHZ_CH(pm_conc_connection_list[0].chan)) && + (WLAN_REG_IS_5GHZ_CH(pm_conc_connection_list[1].chan))) { + if (POLICY_MGR_ONE_ONE == + pm_conc_connection_list[0].chain_mask) + index = PM_P2P_GO_SAP_SBS_5_1x1; + } else { + /* DBS */ + if (POLICY_MGR_ONE_ONE == + pm_conc_connection_list[0].chain_mask) + index = PM_P2P_GO_SAP_DBS_1x1; + else + index = PM_P2P_GO_SAP_DBS_2x2; + } + } + return index; +} + +static enum policy_mgr_two_connection_mode + policy_mgr_get_third_connection_pcl_table_index_sta_sta(void) +{ + enum policy_mgr_two_connection_mode index = PM_MAX_TWO_CONNECTION_MODE; + /* SCC */ + if (pm_conc_connection_list[0].chan == + pm_conc_connection_list[1].chan) { + if (WLAN_REG_IS_24GHZ_CH + (pm_conc_connection_list[0].chan)) { + if (POLICY_MGR_ONE_ONE == + pm_conc_connection_list[0].chain_mask) + index = PM_STA_STA_SCC_24_1x1; + else + index = PM_STA_STA_SCC_24_2x2; + } else { + if (POLICY_MGR_ONE_ONE == + pm_conc_connection_list[0].chain_mask) + index = PM_STA_STA_SCC_5_1x1; + else + index = PM_STA_STA_SCC_5_2x2; + } + /* MCC */ + } else if (pm_conc_connection_list[0].mac == + pm_conc_connection_list[1].mac) { + if ((WLAN_REG_IS_24GHZ_CH( + pm_conc_connection_list[0].chan)) && + (WLAN_REG_IS_24GHZ_CH( + pm_conc_connection_list[1].chan))) { + if (POLICY_MGR_ONE_ONE == + pm_conc_connection_list[0].chain_mask) + index = PM_STA_STA_MCC_24_1x1; + else + index = PM_STA_STA_MCC_24_2x2; + } else if ((WLAN_REG_IS_5GHZ_CH( + pm_conc_connection_list[0].chan)) && + (WLAN_REG_IS_5GHZ_CH( + pm_conc_connection_list[1].chan))) { + if (POLICY_MGR_ONE_ONE == + pm_conc_connection_list[0].chain_mask) + index = PM_STA_STA_MCC_5_1x1; + else + index = PM_STA_STA_MCC_5_2x2; + } else { + if (POLICY_MGR_ONE_ONE == + pm_conc_connection_list[0].chain_mask) + index = PM_STA_STA_MCC_24_5_1x1; + else + index = PM_STA_STA_MCC_24_5_2x2; + } + /* SBS or DBS */ + } else if (pm_conc_connection_list[0].mac != + pm_conc_connection_list[1].mac) { + /* SBS */ + if ((WLAN_REG_IS_5GHZ_CH(pm_conc_connection_list[0].chan)) && + (WLAN_REG_IS_5GHZ_CH(pm_conc_connection_list[1].chan))) { + if (POLICY_MGR_ONE_ONE == + pm_conc_connection_list[0].chain_mask) + index = PM_STA_STA_SBS_5_1x1; + } else { + /* DBS */ + if (POLICY_MGR_ONE_ONE == + pm_conc_connection_list[0].chain_mask) + index = PM_STA_STA_DBS_1x1; + else + index = PM_STA_STA_DBS_2x2; + } + } + return index; +} + +enum policy_mgr_two_connection_mode + policy_mgr_get_third_connection_pcl_table_index( + struct wlan_objmgr_psoc *psoc) +{ + enum policy_mgr_two_connection_mode index = PM_MAX_TWO_CONNECTION_MODE; + struct policy_mgr_psoc_priv_obj *pm_ctx; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return index; + } + + qdf_mutex_acquire(&pm_ctx->qdf_conc_list_lock); + if (((PM_P2P_CLIENT_MODE == pm_conc_connection_list[0].mode) && + (PM_SAP_MODE == pm_conc_connection_list[1].mode)) || + ((PM_SAP_MODE == pm_conc_connection_list[0].mode) && + (PM_P2P_CLIENT_MODE == pm_conc_connection_list[1].mode))) + index = + policy_mgr_get_third_connection_pcl_table_index_cli_sap(); + else if (((PM_STA_MODE == pm_conc_connection_list[0].mode) && + (PM_SAP_MODE == pm_conc_connection_list[1].mode)) || + ((PM_SAP_MODE == pm_conc_connection_list[0].mode) && + (PM_STA_MODE == pm_conc_connection_list[1].mode))) + index = + policy_mgr_get_third_connection_pcl_table_index_sta_sap(); + else if ((PM_SAP_MODE == pm_conc_connection_list[0].mode) && + (PM_SAP_MODE == pm_conc_connection_list[1].mode)) + index = + policy_mgr_get_third_connection_pcl_table_index_sap_sap(); + else if (((PM_STA_MODE == pm_conc_connection_list[0].mode) && + (PM_P2P_GO_MODE == pm_conc_connection_list[1].mode)) || + ((PM_P2P_GO_MODE == pm_conc_connection_list[0].mode) && + (PM_STA_MODE == pm_conc_connection_list[1].mode))) + index = + policy_mgr_get_third_connection_pcl_table_index_sta_go(); + else if (((PM_STA_MODE == pm_conc_connection_list[0].mode) && + (PM_P2P_CLIENT_MODE == pm_conc_connection_list[1].mode)) || + ((PM_P2P_CLIENT_MODE == pm_conc_connection_list[0].mode) && + (PM_STA_MODE == pm_conc_connection_list[1].mode))) + index = + policy_mgr_get_third_connection_pcl_table_index_sta_cli(); + else if (((PM_P2P_GO_MODE == pm_conc_connection_list[0].mode) && + (PM_P2P_CLIENT_MODE == pm_conc_connection_list[1].mode)) || + ((PM_P2P_CLIENT_MODE == pm_conc_connection_list[0].mode) && + (PM_P2P_GO_MODE == pm_conc_connection_list[1].mode))) + index = + policy_mgr_get_third_connection_pcl_table_index_go_cli(); + else if (((PM_SAP_MODE == pm_conc_connection_list[0].mode) && + (PM_P2P_GO_MODE == pm_conc_connection_list[1].mode)) || + ((PM_P2P_GO_MODE == pm_conc_connection_list[0].mode) && + (PM_SAP_MODE == pm_conc_connection_list[1].mode))) + index = + policy_mgr_get_third_connection_pcl_table_index_go_sap(); + else if (((PM_STA_MODE == pm_conc_connection_list[0].mode) && + (PM_STA_MODE == pm_conc_connection_list[1].mode)) || + ((PM_STA_MODE == pm_conc_connection_list[0].mode) && + (PM_STA_MODE == pm_conc_connection_list[1].mode))) + index = + policy_mgr_get_third_connection_pcl_table_index_sta_sta(); + + policy_mgr_debug("mode0:%d mode1:%d chan0:%d chan1:%d chain:%d index:%d", + pm_conc_connection_list[0].mode, + pm_conc_connection_list[1].mode, + pm_conc_connection_list[0].chan, + pm_conc_connection_list[1].chan, + pm_conc_connection_list[0].chain_mask, index); + + qdf_mutex_release(&pm_ctx->qdf_conc_list_lock); + + return index; +} + +uint8_t +policy_mgr_get_nondfs_preferred_channel(struct wlan_objmgr_psoc *psoc, + enum policy_mgr_con_mode mode, + bool for_existing_conn) +{ + uint8_t pcl_channels[QDF_MAX_NUM_CHAN]; + uint8_t pcl_weight[QDF_MAX_NUM_CHAN]; + struct policy_mgr_psoc_priv_obj *pm_ctx; + + /* + * in worst case if we can't find any channel at all + * then return 2.4G channel, so atleast we won't fall + * under 5G MCC scenario + */ + uint8_t channel = PM_24_GHZ_CHANNEL_6; + uint32_t i, pcl_len = 0; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return channel; + } + + if (true == for_existing_conn) { + /* + * First try to see if there is any non-dfs channel already + * present in current connection table. If yes then return + * that channel + */ + if (true == policy_mgr_is_any_nondfs_chnl_present( + psoc, &channel)) + return channel; + + if (QDF_STATUS_SUCCESS != policy_mgr_get_pcl_for_existing_conn( + psoc, mode, + &pcl_channels[0], &pcl_len, + pcl_weight, QDF_ARRAY_SIZE(pcl_weight), + false)) + return channel; + } else { + if (QDF_STATUS_SUCCESS != policy_mgr_get_pcl(psoc, mode, + &pcl_channels[0], &pcl_len, + pcl_weight, QDF_ARRAY_SIZE(pcl_weight))) + return channel; + } + + for (i = 0; i < pcl_len; i++) { + if (wlan_reg_is_dfs_ch(pm_ctx->pdev, pcl_channels[i])) { + continue; + } else { + channel = pcl_channels[i]; + break; + } + } + + return channel; +} + +static void policy_mgr_remove_dsrc_channels(uint8_t *chan_list, + uint32_t *num_channels, + struct wlan_objmgr_pdev *pdev) +{ + uint32_t num_chan_temp = 0; + int i; + + for (i = 0; i < *num_channels; i++) { + if (!wlan_reg_is_dsrc_chan(pdev, chan_list[i])) { + chan_list[num_chan_temp] = chan_list[i]; + num_chan_temp++; + } + } + + *num_channels = num_chan_temp; +} + +QDF_STATUS policy_mgr_get_valid_chans_from_range(struct wlan_objmgr_psoc *psoc, + uint8_t *ch_list, + uint32_t *ch_cnt, + enum policy_mgr_con_mode mode) +{ + uint8_t ch_weight_list[QDF_MAX_NUM_CHAN] = {0}; + uint32_t ch_weight_len; + QDF_STATUS status = QDF_STATUS_E_FAILURE; + size_t chan_index = 0; + + if (!ch_list || !ch_cnt) { + policy_mgr_err("NULL parameters"); + return QDF_STATUS_E_FAILURE; + } + + for (chan_index = 0; chan_index < *ch_cnt; chan_index++) + ch_weight_list[chan_index] = WEIGHT_OF_GROUP1_PCL_CHANNELS; + + ch_weight_len = *ch_cnt; + + /* check the channel avoidance list for beaconing entities */ + if (mode == PM_SAP_MODE || mode == PM_P2P_GO_MODE) + policy_mgr_update_with_safe_channel_list(psoc, ch_list, + ch_cnt, ch_weight_list, + ch_weight_len); + + status = policy_mgr_mode_specific_modification_on_pcl( + psoc, ch_list, ch_weight_list, ch_cnt, mode); + + if (QDF_IS_STATUS_ERROR(status)) { + policy_mgr_err("failed to get modified pcl for mode %d", mode); + return status; + } + + status = policy_mgr_modify_pcl_based_on_dnbs(psoc, ch_list, + ch_weight_list, ch_cnt); + + if (QDF_IS_STATUS_ERROR(status)) { + policy_mgr_err("failed to get modified pcl based on DNBS"); + return status; + } + + return status; +} + +QDF_STATUS policy_mgr_get_valid_chans(struct wlan_objmgr_psoc *psoc, + uint8_t *chan_list, uint32_t *list_len) +{ + QDF_STATUS status; + struct policy_mgr_psoc_priv_obj *pm_ctx; + + *list_len = 0; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return QDF_STATUS_E_FAILURE; + } + + if (!pm_ctx->sme_cbacks.sme_get_valid_channels) { + policy_mgr_err("sme_get_valid_chans callback is NULL"); + return QDF_STATUS_E_FAILURE; + } + + *list_len = QDF_MAX_NUM_CHAN; + status = pm_ctx->sme_cbacks.sme_get_valid_channels( + chan_list, list_len); + if (QDF_IS_STATUS_ERROR(status)) { + policy_mgr_err("Error in getting valid channels"); + *list_len = 0; + return status; + } + + policy_mgr_remove_dsrc_channels(chan_list, list_len, pm_ctx->pdev); + + return QDF_STATUS_SUCCESS; +} + +bool policy_mgr_list_has_24GHz_channel(uint8_t *channel_list, + uint32_t list_len) +{ + uint32_t i; + + for (i = 0; i < list_len; i++) { + if (WLAN_REG_IS_24GHZ_CH(channel_list[i])) + return true; + } + + return false; +} + +QDF_STATUS policy_mgr_set_sap_mandatory_channels(struct wlan_objmgr_psoc *psoc, + uint8_t *channels, uint32_t len) +{ + uint32_t i; + struct policy_mgr_psoc_priv_obj *pm_ctx; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return QDF_STATUS_E_FAILURE; + } + + if (!len) { + policy_mgr_err("No mandatory freq/chan configured"); + return QDF_STATUS_E_FAILURE; + } + + if (!policy_mgr_list_has_24GHz_channel(channels, len)) { + policy_mgr_err("2.4GHz channels missing, this is not expected"); + return QDF_STATUS_E_FAILURE; + } + + policy_mgr_debug("mandatory chan length:%d", + pm_ctx->sap_mandatory_channels_len); + + for (i = 0; i < len; i++) { + pm_ctx->sap_mandatory_channels[i] = channels[i]; + policy_mgr_debug("chan:%d", pm_ctx->sap_mandatory_channels[i]); + } + + pm_ctx->sap_mandatory_channels_len = len; + + return QDF_STATUS_SUCCESS; +} + +bool policy_mgr_is_sap_mandatory_channel_set(struct wlan_objmgr_psoc *psoc) +{ + struct policy_mgr_psoc_priv_obj *pm_ctx; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return false; + } + + if (pm_ctx->sap_mandatory_channels_len) + return true; + else + return false; +} + +QDF_STATUS policy_mgr_modify_sap_pcl_based_on_mandatory_channel( + struct wlan_objmgr_psoc *psoc, + uint8_t *pcl_list_org, + uint8_t *weight_list_org, + uint32_t *pcl_len_org) +{ + uint32_t i, j, pcl_len = 0; + bool found; + struct policy_mgr_psoc_priv_obj *pm_ctx; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return QDF_STATUS_E_FAILURE; + } + + if (!pm_ctx->sap_mandatory_channels_len) + return QDF_STATUS_SUCCESS; + + if (!policy_mgr_list_has_24GHz_channel(pm_ctx->sap_mandatory_channels, + pm_ctx->sap_mandatory_channels_len)) { + policy_mgr_err("fav channel list is missing 2.4GHz channels"); + return QDF_STATUS_E_FAILURE; + } + + for (i = 0; i < pm_ctx->sap_mandatory_channels_len; i++) + policy_mgr_debug("fav chan:%d", + pm_ctx->sap_mandatory_channels[i]); + + for (i = 0; i < *pcl_len_org; i++) { + found = false; + if (i >= QDF_MAX_NUM_CHAN) { + policy_mgr_debug("index is exceeding QDF_MAX_NUM_CHAN"); + break; + } + for (j = 0; j < pm_ctx->sap_mandatory_channels_len; j++) { + if (pcl_list_org[i] == + pm_ctx->sap_mandatory_channels[j]) { + found = true; + break; + } + } + if (found && (pcl_len < QDF_MAX_NUM_CHAN)) { + pcl_list_org[pcl_len] = pcl_list_org[i]; + weight_list_org[pcl_len++] = weight_list_org[i]; + } + } + *pcl_len_org = pcl_len; + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS policy_mgr_get_sap_mandatory_channel(struct wlan_objmgr_psoc *psoc, + uint32_t *chan) +{ + QDF_STATUS status; + struct policy_mgr_pcl_list pcl; + + qdf_mem_zero(&pcl, sizeof(pcl)); + + status = policy_mgr_get_pcl_for_existing_conn(psoc, PM_SAP_MODE, + pcl.pcl_list, &pcl.pcl_len, + pcl.weight_list, QDF_ARRAY_SIZE(pcl.weight_list), + false); + if (QDF_IS_STATUS_ERROR(status)) { + policy_mgr_err("Unable to get PCL for SAP"); + return status; + } + + /* + * Get inside below loop if no existing SAP connection and hence a new + * SAP connection might be coming up. pcl.pcl_len can be 0 if no common + * channel between PCL & mandatory channel list as well + */ + if (!pcl.pcl_len && !policy_mgr_mode_specific_connection_count(psoc, + PM_SAP_MODE, NULL)) { + policy_mgr_debug("policy_mgr_get_pcl_for_existing_conn returned no pcl"); + status = policy_mgr_get_pcl(psoc, PM_SAP_MODE, + pcl.pcl_list, &pcl.pcl_len, + pcl.weight_list, + QDF_ARRAY_SIZE(pcl.weight_list)); + if (QDF_IS_STATUS_ERROR(status)) { + policy_mgr_err("Unable to get PCL for SAP: policy_mgr_get_pcl"); + return status; + } + } + + status = policy_mgr_modify_sap_pcl_based_on_mandatory_channel( + psoc, pcl.pcl_list, + pcl.weight_list, + &pcl.pcl_len); + if (QDF_IS_STATUS_ERROR(status)) { + policy_mgr_err("Unable to modify SAP PCL"); + return status; + } + + if (!pcl.pcl_len) { + policy_mgr_err("No common channel between mandatory list & PCL"); + return QDF_STATUS_E_FAILURE; + } + + *chan = pcl.pcl_list[0]; + policy_mgr_debug("mandatory channel:%d", *chan); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS policy_mgr_get_valid_chan_weights(struct wlan_objmgr_psoc *psoc, + struct policy_mgr_pcl_chan_weights *weight) +{ + uint32_t i, j; + struct policy_mgr_conc_connection_info + info[MAX_NUMBER_OF_CONC_CONNECTIONS] = { {0} }; + uint8_t num_cxn_del = 0; + struct policy_mgr_psoc_priv_obj *pm_ctx; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return QDF_STATUS_E_FAILURE; + } + + qdf_mem_set(weight->weighed_valid_list, QDF_MAX_NUM_CHAN, + WEIGHT_OF_DISALLOWED_CHANNELS); + qdf_mutex_acquire(&pm_ctx->qdf_conc_list_lock); + if (policy_mgr_mode_specific_connection_count( + psoc, PM_STA_MODE, NULL) > 0) { + /* + * Store the STA mode's parameter and temporarily delete it + * from the concurrency table. This way the allow concurrency + * check can be used as though a new connection is coming up, + * allowing to detect the disallowed channels. + */ + policy_mgr_store_and_del_conn_info(psoc, PM_STA_MODE, false, + info, &num_cxn_del); + /* + * There is a small window between releasing the above lock + * and acquiring the same in policy_mgr_allow_concurrency, + * below! + */ + for (i = 0; i < weight->saved_num_chan; i++) { + if (policy_mgr_is_concurrency_allowed + (psoc, PM_STA_MODE, weight->saved_chan_list[i], + HW_MODE_20_MHZ)) { + weight->weighed_valid_list[i] = + WEIGHT_OF_NON_PCL_CHANNELS; + } + } + /* Restore the connection info */ + policy_mgr_restore_deleted_conn_info(psoc, info, num_cxn_del); + } + qdf_mutex_release(&pm_ctx->qdf_conc_list_lock); + + for (i = 0; i < weight->saved_num_chan; i++) { + for (j = 0; j < weight->pcl_len; j++) { + if (weight->saved_chan_list[i] == weight->pcl_list[j]) { + weight->weighed_valid_list[i] = + weight->weight_list[j]; + break; + } + } + } + + return QDF_STATUS_SUCCESS; +} + +uint8_t policy_mgr_mode_specific_get_channel( + struct wlan_objmgr_psoc *psoc, enum policy_mgr_con_mode mode) +{ + uint32_t conn_index; + uint8_t channel = 0; + struct policy_mgr_psoc_priv_obj *pm_ctx; + + pm_ctx = policy_mgr_get_context(psoc); + if (!pm_ctx) { + policy_mgr_err("Invalid Context"); + return channel; + } + /* provides the channel for the first matching mode type */ + qdf_mutex_acquire(&pm_ctx->qdf_conc_list_lock); + for (conn_index = 0; conn_index < MAX_NUMBER_OF_CONC_CONNECTIONS; + conn_index++) { + if ((pm_conc_connection_list[conn_index].mode == mode) && + pm_conc_connection_list[conn_index].in_use) { + channel = pm_conc_connection_list[conn_index].chan; + break; + } + } + qdf_mutex_release(&pm_ctx->qdf_conc_list_lock); + + return channel; +} + +uint8_t policy_mgr_get_alternate_channel_for_sap( + struct wlan_objmgr_psoc *psoc) +{ + uint8_t pcl_channels[QDF_MAX_NUM_CHAN]; + uint8_t pcl_weight[QDF_MAX_NUM_CHAN]; + uint8_t channel = 0; + uint32_t pcl_len = 0; + + if (QDF_STATUS_SUCCESS == policy_mgr_get_pcl(psoc, PM_SAP_MODE, + &pcl_channels[0], &pcl_len, + pcl_weight, QDF_ARRAY_SIZE(pcl_weight))) { + channel = pcl_channels[0]; + } + + return channel; +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/policy_mgr/src/wlan_policy_mgr_tables_1x1_dbs_i.h b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/policy_mgr/src/wlan_policy_mgr_tables_1x1_dbs_i.h new file mode 100644 index 0000000000000000000000000000000000000000..92501e8872ce6483339ad478ac478504c68eda33 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/policy_mgr/src/wlan_policy_mgr_tables_1x1_dbs_i.h @@ -0,0 +1,1170 @@ +/* + * Copyright (c) 2012-2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef __WLAN_POLICY_MGR_TABLES_1X1_DBS_H +#define __WLAN_POLICY_MGR_TABLES_1X1_DBS_H + +#include "wlan_policy_mgr_api.h" + +/** + * second_connection_pcl_dbs_1x1_table - table which provides PCL + * for the 2nd connection, when we have a connection already in + * the system (with DBS supported by HW) + */ +pm_dbs_pcl_second_connection_table_type +pm_second_connection_pcl_dbs_1x1_table = { + [PM_STA_24_1x1] = { + [PM_STA_MODE] = {PM_5G_SCC_CH, PM_5G_SCC_CH, PM_5G_SCC_CH}, + [PM_SAP_MODE] = {PM_5G_SCC_CH, PM_5G_SCC_CH, PM_5G_SCC_CH}, + [PM_P2P_CLIENT_MODE] = {PM_5G_SCC_CH, PM_5G_SCC_CH, PM_5G_SCC_CH}, + [PM_P2P_GO_MODE] = {PM_5G_SCC_CH, PM_5G_SCC_CH, PM_5G_SCC_CH}, + [PM_IBSS_MODE] = {PM_5G, PM_5G, PM_5G } }, + + [PM_STA_24_2x2] = { + [PM_STA_MODE] = {PM_5G_SCC_CH, PM_5G_SCC_CH, PM_5G_SCC_CH}, + [PM_SAP_MODE] = {PM_5G_SCC_CH, PM_5G_SCC_CH, PM_5G_SCC_CH}, + [PM_P2P_CLIENT_MODE] = {PM_5G_SCC_CH, PM_5G_SCC_CH, PM_5G_SCC_CH}, + [PM_P2P_GO_MODE] = {PM_5G_SCC_CH, PM_5G_SCC_CH, PM_5G_SCC_CH}, + [PM_IBSS_MODE] = {PM_5G, PM_5G, PM_5G } }, + + [PM_STA_5_1x1] = { + [PM_STA_MODE] = {PM_SCC_CH_24G, PM_24G_SCC_CH, PM_SCC_CH_24G}, + [PM_SAP_MODE] = {PM_SCC_CH_24G, PM_24G_SCC_CH, PM_SCC_CH_24G}, + [PM_P2P_CLIENT_MODE] = { + PM_SCC_CH_24G, PM_24G_SCC_CH, PM_SCC_CH_24G}, + [PM_P2P_GO_MODE] = {PM_SCC_CH_24G, PM_24G_SCC_CH, PM_SCC_CH_24G}, + [PM_IBSS_MODE] = {PM_24G, PM_24G, PM_24G } }, + + [PM_STA_5_2x2] = { + [PM_STA_MODE] = {PM_SCC_CH_24G, PM_24G_SCC_CH, PM_SCC_CH_24G}, + [PM_SAP_MODE] = {PM_SCC_CH_24G, PM_24G_SCC_CH, PM_SCC_CH_24G}, + [PM_P2P_CLIENT_MODE] = { + PM_SCC_CH_24G, PM_24G_SCC_CH, PM_SCC_CH_24G}, + [PM_P2P_GO_MODE] = {PM_SCC_CH_24G, PM_24G_SCC_CH, PM_SCC_CH_24G}, + [PM_IBSS_MODE] = {PM_24G, PM_24G, PM_24G } }, + + [PM_P2P_CLI_24_1x1] = { + [PM_STA_MODE] = {PM_5G_SCC_CH, PM_5G_SCC_CH, PM_5G_SCC_CH}, + [PM_SAP_MODE] = {PM_5G_SCC_CH, PM_5G_SCC_CH, PM_5G_SCC_CH}, + [PM_P2P_CLIENT_MODE] = {PM_5G_SCC_CH, PM_5G_SCC_CH, PM_5G_SCC_CH}, + [PM_P2P_GO_MODE] = {PM_5G_SCC_CH, PM_5G_SCC_CH, PM_5G_SCC_CH}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_P2P_CLI_24_2x2] = { + [PM_STA_MODE] = {PM_5G_SCC_CH, PM_5G_SCC_CH, PM_5G_SCC_CH}, + [PM_SAP_MODE] = {PM_5G_SCC_CH, PM_5G_SCC_CH, PM_5G_SCC_CH}, + [PM_P2P_CLIENT_MODE] = {PM_5G_SCC_CH, PM_5G_SCC_CH, PM_5G_SCC_CH}, + [PM_P2P_GO_MODE] = {PM_5G_SCC_CH, PM_5G_SCC_CH, PM_5G_SCC_CH}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_P2P_CLI_5_1x1] = { + [PM_STA_MODE] = {PM_SCC_CH_24G, PM_24G_SCC_CH, PM_SCC_CH_24G}, + [PM_SAP_MODE] = {PM_SCC_CH_24G, PM_24G_SCC_CH, PM_SCC_CH_24G}, + [PM_P2P_CLIENT_MODE] = { + PM_SCC_CH_24G, PM_24G_SCC_CH, PM_SCC_CH_24G}, + [PM_P2P_GO_MODE] = {PM_SCC_CH_24G, PM_24G_SCC_CH, PM_SCC_CH_24G}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_P2P_CLI_5_2x2] = { + [PM_STA_MODE] = {PM_SCC_CH_24G, PM_24G_SCC_CH, PM_SCC_CH_24G}, + [PM_SAP_MODE] = {PM_SCC_CH_24G, PM_24G_SCC_CH, PM_SCC_CH_24G}, + [PM_P2P_CLIENT_MODE] = { + PM_SCC_CH_24G, PM_24G_SCC_CH, PM_SCC_CH_24G}, + [PM_P2P_GO_MODE] = {PM_SCC_CH_24G, PM_24G_SCC_CH, PM_SCC_CH_24G}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_P2P_GO_24_1x1] = { + [PM_STA_MODE] = {PM_5G_SCC_CH, PM_5G_SCC_CH, PM_5G_SCC_CH}, + [PM_SAP_MODE] = {PM_5G_SCC_CH, PM_5G_SCC_CH, PM_5G_SCC_CH}, + [PM_P2P_CLIENT_MODE] = {PM_5G_SCC_CH, PM_5G_SCC_CH, PM_5G_SCC_CH}, + [PM_P2P_GO_MODE] = {PM_5G, PM_5G, PM_5G}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_P2P_GO_24_2x2] = { + [PM_STA_MODE] = {PM_5G_SCC_CH, PM_5G_SCC_CH, PM_5G_SCC_CH}, + [PM_SAP_MODE] = {PM_5G_SCC_CH, PM_5G_SCC_CH, PM_5G_SCC_CH}, + [PM_P2P_CLIENT_MODE] = {PM_5G_SCC_CH, PM_5G_SCC_CH, PM_5G_SCC_CH}, + [PM_P2P_GO_MODE] = {PM_5G, PM_5G, PM_5G}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_P2P_GO_5_1x1] = { + [PM_STA_MODE] = {PM_SCC_CH_24G, PM_SCC_CH_24G, PM_SCC_CH_24G}, + [PM_SAP_MODE] = {PM_SCC_CH_24G, PM_SCC_CH_24G, PM_SCC_CH_24G}, + [PM_P2P_CLIENT_MODE] = { + PM_SCC_CH_24G, PM_24G_SCC_CH, PM_SCC_CH_24G}, + [PM_P2P_GO_MODE] = {PM_24G, PM_24G, PM_24G}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_P2P_GO_5_2x2] = { + [PM_STA_MODE] = {PM_SCC_CH_24G, PM_SCC_CH_24G, PM_SCC_CH_24G}, + [PM_SAP_MODE] = {PM_SCC_CH_24G, PM_SCC_CH_24G, PM_SCC_CH_24G}, + [PM_P2P_CLIENT_MODE] = { + PM_SCC_CH_24G, PM_24G_SCC_CH, PM_SCC_CH_24G}, + [PM_P2P_GO_MODE] = {PM_24G, PM_24G, PM_24G}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_SAP_24_1x1] = { + [PM_STA_MODE] = {PM_5G_SCC_CH, PM_5G_SCC_CH, PM_5G_SCC_CH}, + [PM_SAP_MODE] = {PM_5G_SCC_CH, PM_5G_SCC_CH, PM_5G_SCC_CH}, + [PM_P2P_CLIENT_MODE] = {PM_5G_SCC_CH, PM_5G_SCC_CH, PM_5G_SCC_CH}, + [PM_P2P_GO_MODE] = {PM_5G_SCC_CH, PM_5G_SCC_CH, PM_5G_SCC_CH}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_SAP_24_2x2] = { + [PM_STA_MODE] = {PM_5G_SCC_CH, PM_5G_SCC_CH, PM_5G_SCC_CH}, + [PM_SAP_MODE] = {PM_5G_SCC_CH, PM_5G_SCC_CH, PM_5G_SCC_CH}, + [PM_P2P_CLIENT_MODE] = {PM_5G_SCC_CH, PM_5G_SCC_CH, PM_5G_SCC_CH}, + [PM_P2P_GO_MODE] = {PM_5G_SCC_CH, PM_5G_SCC_CH, PM_5G_SCC_CH}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_SAP_5_1x1] = { + [PM_STA_MODE] = {PM_SCC_CH_24G, PM_SCC_CH_24G, PM_SCC_CH_24G}, + [PM_SAP_MODE] = {PM_SCC_CH_24G, PM_SCC_CH_24G, PM_SCC_CH_24G}, + [PM_P2P_CLIENT_MODE] = { + PM_SCC_CH_24G, PM_24G_SCC_CH, PM_SCC_CH_24G}, + [PM_P2P_GO_MODE] = {PM_SCC_CH_24G, PM_SCC_CH_24G, PM_SCC_CH_24G}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_SAP_5_2x2] = { + [PM_STA_MODE] = {PM_SCC_CH_24G, PM_SCC_CH_24G, PM_SCC_CH_24G}, + [PM_SAP_MODE] = {PM_SCC_CH_24G, PM_SCC_CH_24G, PM_SCC_CH_24G}, + [PM_P2P_CLIENT_MODE] = { + PM_SCC_CH_24G, PM_24G_SCC_CH, PM_SCC_CH_24G}, + [PM_P2P_GO_MODE] = {PM_SCC_CH_24G, PM_SCC_CH_24G, PM_SCC_CH_24G}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_IBSS_24_1x1] = { + [PM_STA_MODE] = {PM_5G_SCC_CH, PM_5G_SCC_CH, PM_5G_SCC_CH}, + [PM_SAP_MODE] = {PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_IBSS_24_2x2] = { + [PM_STA_MODE] = {PM_5G_SCC_CH, PM_5G_SCC_CH, PM_5G_SCC_CH}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_IBSS_5_1x1] = { + [PM_STA_MODE] = {PM_24G_SCC_CH, PM_24G_SCC_CH, PM_24G_SCC_CH}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_IBSS_5_2x2] = { + [PM_STA_MODE] = {PM_24G_SCC_CH, PM_24G_SCC_CH, PM_24G_SCC_CH}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, +}; + +/** + * third_connection_pcl_dbs_table - table which provides PCL for + * the 3rd connection, when we have two connections already in + * the system (with DBS supported by HW) + */ +static pm_dbs_pcl_third_connection_table_type +pm_third_connection_pcl_dbs_1x1_table = { + [PM_STA_SAP_SCC_24_1x1] = { + [PM_STA_MODE] = {PM_5G_SCC_CH, PM_5G_SCC_CH, PM_5G_SCC_CH}, + [PM_SAP_MODE] = {PM_5G, PM_5G, PM_5G}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = {PM_5G, PM_5G, PM_5G}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_STA_SAP_SCC_24_2x2] = { + [PM_STA_MODE] = {PM_5G_SCC_CH, PM_5G_SCC_CH, PM_5G_SCC_CH}, + [PM_SAP_MODE] = {PM_5G, PM_5G, PM_5G}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = {PM_5G, PM_5G, PM_5G}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_STA_SAP_MCC_24_1x1] = { + [PM_STA_MODE] = {PM_5G_MCC_CH, PM_5G, PM_5G_MCC_CH}, + [PM_SAP_MODE] = {PM_5G, PM_5G, PM_5G}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = {PM_5G, PM_5G, PM_5G}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_STA_SAP_MCC_24_2x2] = { + [PM_STA_MODE] = {PM_5G_MCC_CH, PM_5G, PM_5G_MCC_CH}, + [PM_SAP_MODE] = {PM_5G, PM_5G, PM_5G}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = {PM_5G, PM_5G, PM_5G}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_STA_SAP_SCC_5_1x1] = { + [PM_STA_MODE] = {PM_SCC_CH_24G, PM_24G_SCC_CH, PM_SCC_CH_24G}, + [PM_SAP_MODE] = {PM_24G, PM_24G, PM_24G}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = {PM_24G, PM_24G, PM_24G}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_STA_SAP_SCC_5_2x2] = { + [PM_STA_MODE] = {PM_SCC_CH_24G, PM_24G_SCC_CH, PM_SCC_CH_24G}, + [PM_SAP_MODE] = {PM_24G, PM_24G, PM_24G}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = {PM_24G, PM_24G, PM_24G}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_STA_SAP_MCC_5_1x1] = { + [PM_STA_MODE] = {PM_MCC_CH_24G, PM_24G, PM_24G}, + [PM_SAP_MODE] = {PM_24G, PM_24G, PM_24G}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = {PM_24G, PM_24G, PM_24G}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_STA_SAP_MCC_5_2x2] = { + [PM_STA_MODE] = {PM_MCC_CH_24G, PM_24G, PM_24G}, + [PM_SAP_MODE] = {PM_24G, PM_24G, PM_24G}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = {PM_24G, PM_24G, PM_24G}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_STA_SAP_MCC_24_5_1x1] = { + [PM_STA_MODE] = {PM_MCC_CH_5G, PM_5G, PM_5G}, + [PM_SAP_MODE] = {PM_24G, PM_24G, PM_24G}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_STA_SAP_MCC_24_5_2x2] = { + [PM_STA_MODE] = {PM_MCC_CH_5G, PM_5G, PM_5G}, + [PM_SAP_MODE] = {PM_24G, PM_24G, PM_24G}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_STA_SAP_DBS_1x1] = { + [PM_STA_MODE] = { + PM_SCC_ON_5_SCC_ON_24_5G, PM_NONE, PM_SCC_ON_5_SCC_ON_24}, + [PM_SAP_MODE] = {PM_SCC_ON_5_SCC_ON_24, PM_SCC_ON_5_SCC_ON_24, + PM_SCC_ON_5_SCC_ON_24}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = {PM_SCC_ON_5_SCC_ON_24, PM_SCC_ON_5_SCC_ON_24, + PM_SCC_ON_5_SCC_ON_24}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_SAP_SAP_SCC_24_1x1] = { + [PM_STA_MODE] = {PM_5G_SCC_CH, PM_5G_SCC_CH, PM_5G_SCC_CH}, + [PM_SAP_MODE] = {PM_5G, PM_5G, PM_5G}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = {PM_5G, PM_5G, PM_5G}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_SAP_SAP_SCC_24_2x2] = { + [PM_STA_MODE] = {PM_5G_SCC_CH, PM_5G_SCC_CH, PM_5G_SCC_CH}, + [PM_SAP_MODE] = {PM_5G, PM_5G, PM_5G}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = {PM_5G, PM_5G, PM_5G}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_SAP_SAP_MCC_24_1x1] = { + [PM_STA_MODE] = {PM_5G, PM_5G, PM_5G}, + [PM_SAP_MODE] = {PM_5G, PM_5G, PM_5G}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_SAP_SAP_MCC_24_2x2] = { + [PM_STA_MODE] = {PM_5G, PM_5G, PM_5G}, + [PM_SAP_MODE] = {PM_5G, PM_5G, PM_5G}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_SAP_SAP_SCC_5_1x1] = { + [PM_STA_MODE] = {PM_24G_SCC_CH_SBS_CH_5G, PM_24G_SCC_CH, + PM_24G_SCC_CH_SBS_CH}, + [PM_SAP_MODE] = {PM_24G, PM_24G, PM_24G}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = {PM_24G, PM_24G, PM_24G}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_SAP_SAP_SCC_5_2x2] = { + [PM_STA_MODE] = {PM_24G_SCC_CH_SBS_CH_5G, PM_24G_SCC_CH, + PM_24G_SCC_CH_SBS_CH}, + [PM_SAP_MODE] = {PM_24G, PM_24G, PM_24G}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = {PM_24G, PM_24G, PM_24G}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_SAP_SAP_MCC_5_1x1] = { + [PM_STA_MODE] = {PM_24G_SBS_CH_MCC_CH, PM_24G, PM_24G_MCC_CH}, + [PM_SAP_MODE] = {PM_24G_SBS_CH_MCC_CH, PM_24G, PM_24G_MCC_CH}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_SAP_SAP_MCC_5_2x2] = { + [PM_STA_MODE] = {PM_24G_SBS_CH_MCC_CH, PM_24G, PM_24G_MCC_CH}, + [PM_SAP_MODE] = {PM_24G_SBS_CH_MCC_CH, PM_24G, PM_24G_MCC_CH}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_SAP_SAP_MCC_24_5_1x1] = { + [PM_STA_MODE] = {PM_5G, PM_5G, PM_5G}, + [PM_SAP_MODE] = {PM_5G, PM_5G, PM_5G}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_SAP_SAP_MCC_24_5_2x2] = { + [PM_STA_MODE] = {PM_5G, PM_5G, PM_5G}, + [PM_SAP_MODE] = {PM_5G, PM_5G, PM_5G}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_SAP_SAP_DBS_1x1] = { + [PM_STA_MODE] = { PM_SCC_ON_5_SCC_ON_24_5G, PM_SCC_ON_5_SCC_ON_24, + PM_SCC_ON_5_SCC_ON_24}, + [PM_SAP_MODE] = { PM_SCC_ON_5_SCC_ON_24, PM_SCC_ON_5_SCC_ON_24, + PM_SCC_ON_5_SCC_ON_24}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = {PM_SCC_ON_5_SCC_ON_24, PM_SCC_ON_5_SCC_ON_24, + PM_SCC_ON_5_SCC_ON_24}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_STA_P2P_GO_SCC_24_1x1] = { + [PM_STA_MODE] = {PM_5G_SCC_CH, PM_5G_SCC_CH, PM_5G_SCC_CH}, + [PM_SAP_MODE] = {PM_5G, PM_5G, PM_5G}, + [PM_P2P_CLIENT_MODE] = {PM_5G_SCC_CH, PM_5G_SCC_CH, PM_5G_SCC_CH}, + [PM_P2P_GO_MODE] = {PM_5G_SCC_CH, PM_5G_SCC_CH, PM_5G_SCC_CH}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_STA_P2P_GO_SCC_24_2x2] = { + [PM_STA_MODE] = {PM_5G_SCC_CH, PM_5G_SCC_CH, PM_5G_SCC_CH}, + [PM_SAP_MODE] = {PM_5G, PM_5G, PM_5G}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = {PM_5G_SCC_CH, PM_5G_SCC_CH, PM_5G_SCC_CH}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_STA_P2P_GO_MCC_24_1x1] = { + [PM_STA_MODE] = {PM_5G_MCC_CH, PM_5G, PM_5G_MCC_CH}, + [PM_SAP_MODE] = {PM_5G, PM_5G, PM_5G}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = {PM_5G_MCC_CH, PM_5G, PM_5G_MCC_CH}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_STA_P2P_GO_MCC_24_2x2] = { + [PM_STA_MODE] = {PM_5G_MCC_CH, PM_5G, PM_5G_MCC_CH}, + [PM_SAP_MODE] = {PM_5G, PM_5G, PM_5G}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = {PM_5G_MCC_CH, PM_5G, PM_5G_MCC_CH}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_STA_P2P_GO_SCC_5_1x1] = { + [PM_STA_MODE] = {PM_SCC_CH_24G, PM_24G_SCC_CH, PM_SCC_CH_24G}, + [PM_SAP_MODE] = {PM_24G, PM_24G, PM_24G}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = {PM_SCC_CH_24G, PM_24G_SCC_CH, PM_SCC_CH_24G}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_STA_P2P_GO_SCC_5_2x2] = { + [PM_STA_MODE] = {PM_SCC_CH_24G, PM_24G_SCC_CH, PM_SCC_CH_24G}, + [PM_SAP_MODE] = {PM_24G, PM_24G, PM_24G}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = {PM_SCC_CH_24G, PM_24G_SCC_CH, PM_SCC_CH_24G}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_STA_P2P_GO_MCC_5_1x1] = { + [PM_STA_MODE] = {PM_MCC_CH_24G, PM_24G, PM_24G}, + [PM_SAP_MODE] = {PM_24G, PM_24G, PM_24G}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = {PM_MCC_CH_24G, PM_24G, PM_24G}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_STA_P2P_GO_MCC_5_2x2] = { + [PM_STA_MODE] = {PM_MCC_CH_24G, PM_24G, PM_24G}, + [PM_SAP_MODE] = {PM_24G, PM_24G, PM_24G}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = {PM_MCC_CH_24G, PM_24G, PM_24G}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_STA_P2P_GO_MCC_24_5_1x1] = { + [PM_STA_MODE] = {PM_MCC_CH_5G, PM_5G, PM_5G}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = {PM_MCC_CH_24G, PM_24G, PM_24G}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_STA_P2P_GO_MCC_24_5_2x2] = { + [PM_STA_MODE] = {PM_MCC_CH_5G, PM_5G, PM_5G}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = {PM_MCC_CH_24G, PM_24G, PM_24G}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_STA_P2P_GO_DBS_1x1] = { + [PM_STA_MODE] = { + PM_SCC_ON_5_SCC_ON_24_5G, PM_NONE, PM_SCC_ON_5_SCC_ON_24}, + [PM_SAP_MODE] = {PM_SCC_ON_5_SCC_ON_24, PM_SCC_ON_5_SCC_ON_24, + PM_SCC_ON_5_SCC_ON_24}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_SCC_ON_5_SCC_ON_24_5G, PM_NONE, PM_SCC_ON_5_SCC_ON_24}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_STA_P2P_CLI_SCC_24_1x1] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_STA_P2P_CLI_SCC_24_2x2] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_STA_P2P_CLI_MCC_24_1x1] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_STA_P2P_CLI_MCC_24_2x2] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_STA_P2P_CLI_SCC_5_1x1] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_STA_P2P_CLI_SCC_5_2x2] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_STA_P2P_CLI_MCC_5_1x1] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_STA_P2P_CLI_MCC_5_2x2] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_STA_P2P_CLI_MCC_24_5_1x1] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_STA_P2P_CLI_MCC_24_5_2x2] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_STA_P2P_CLI_DBS_1x1] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_P2P_GO_P2P_CLI_SCC_24_1x1] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_P2P_GO_P2P_CLI_SCC_24_2x2] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_P2P_GO_P2P_CLI_MCC_24_1x1] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_P2P_GO_P2P_CLI_MCC_24_2x2] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_P2P_GO_P2P_CLI_SCC_5_1x1] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_P2P_GO_P2P_CLI_SCC_5_2x2] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_P2P_GO_P2P_CLI_MCC_5_1x1] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_P2P_GO_P2P_CLI_MCC_5_2x2] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_P2P_GO_P2P_CLI_MCC_24_5_1x1] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_P2P_GO_P2P_CLI_MCC_24_5_2x2] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_P2P_GO_P2P_CLI_DBS_1x1] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_P2P_GO_SAP_SCC_24_1x1] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = {PM_5G, PM_5G, PM_5G}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_P2P_GO_SAP_SCC_24_2x2] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = {PM_5G, PM_5G, PM_5G}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_P2P_GO_SAP_MCC_24_1x1] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_P2P_GO_SAP_MCC_24_2x2] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_P2P_GO_SAP_SCC_5_1x1] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = {PM_24G, PM_24G, PM_24G}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_P2P_GO_SAP_SCC_5_2x2] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = {PM_24G, PM_24G, PM_24G}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_P2P_GO_SAP_MCC_5_1x1] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_P2P_GO_SAP_MCC_5_2x2] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_P2P_GO_SAP_MCC_24_5_1x1] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_P2P_GO_SAP_MCC_24_5_2x2] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_P2P_GO_SAP_DBS_1x1] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = {PM_SCC_ON_5_SCC_ON_24, PM_SCC_ON_5_SCC_ON_24, + PM_SCC_ON_5_SCC_ON_24}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_P2P_CLI_SAP_SCC_24_1x1] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_P2P_CLI_SAP_SCC_24_2x2] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_P2P_CLI_SAP_MCC_24_1x1] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_P2P_CLI_SAP_MCC_24_2x2] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_P2P_CLI_SAP_SCC_5_1x1] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_P2P_CLI_SAP_SCC_5_2x2] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_P2P_CLI_SAP_MCC_5_1x1] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_P2P_CLI_SAP_MCC_5_2x2] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_P2P_CLI_SAP_MCC_24_5_1x1] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_P2P_CLI_SAP_MCC_24_5_2x2] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_P2P_CLI_SAP_DBS_1x1] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + +}; + +/** + * next_action_two_connection_table - table which provides next + * action while a new connection is coming up, with one + * connection already in the system + */ +static policy_mgr_next_action_two_connection_table_type + pm_next_action_two_connection_dbs_1x1_table = { + [PM_STA_24_1x1] = {PM_NOP, PM_DBS}, + [PM_STA_24_2x2] = {PM_NOP, PM_DBS_DOWNGRADE}, + [PM_STA_5_1x1] = {PM_DBS, PM_NOP}, + [PM_STA_5_2x2] = {PM_DBS_DOWNGRADE, PM_NOP}, + [PM_P2P_CLI_24_1x1] = {PM_NOP, PM_DBS}, + [PM_P2P_CLI_24_2x2] = {PM_NOP, PM_DBS_DOWNGRADE}, + [PM_P2P_CLI_5_1x1] = {PM_DBS, PM_NOP}, + [PM_P2P_CLI_5_2x2] = {PM_DBS_DOWNGRADE, PM_NOP}, + [PM_P2P_GO_24_1x1] = {PM_NOP, PM_DBS}, + [PM_P2P_GO_24_2x2] = {PM_NOP, PM_DBS_DOWNGRADE}, + [PM_P2P_GO_5_1x1] = {PM_DBS, PM_NOP}, + [PM_P2P_GO_5_2x2] = {PM_DBS_DOWNGRADE, PM_NOP}, + [PM_SAP_24_1x1] = {PM_NOP, PM_DBS}, + [PM_SAP_24_2x2] = {PM_NOP, PM_DBS_DOWNGRADE}, + [PM_SAP_5_1x1] = {PM_DBS, PM_NOP}, + [PM_SAP_5_2x2] = {PM_DBS_DOWNGRADE, PM_NOP}, + [PM_IBSS_24_1x1] = {PM_NOP, PM_DBS}, + [PM_IBSS_24_2x2] = {PM_NOP, PM_DBS_DOWNGRADE}, + [PM_IBSS_5_1x1] = {PM_DBS, PM_NOP}, + [PM_IBSS_5_2x2] = {PM_DBS_DOWNGRADE, PM_NOP}, +}; + +/** + * next_action_three_connection_table - table which provides next + * action while a new connection is coming up, with two + * connections already in the system + */ +static policy_mgr_next_action_three_connection_table_type + pm_next_action_three_connection_dbs_1x1_table = { + [PM_STA_SAP_SCC_24_1x1] = {PM_NOP, PM_DBS}, + [PM_STA_SAP_SCC_24_2x2] = {PM_NOP, PM_DBS_DOWNGRADE}, + [PM_STA_SAP_MCC_24_1x1] = {PM_NOP, PM_DBS}, + [PM_STA_SAP_MCC_24_2x2] = {PM_NOP, PM_DBS_DOWNGRADE}, + [PM_STA_SAP_SCC_5_1x1] = {PM_DBS, PM_NOP}, + [PM_STA_SAP_SCC_5_2x2] = {PM_DBS_DOWNGRADE, PM_NOP}, + [PM_STA_SAP_MCC_5_1x1] = {PM_DBS, PM_NOP}, + [PM_STA_SAP_MCC_5_2x2] = {PM_DBS_DOWNGRADE, PM_NOP}, + [PM_STA_SAP_MCC_24_5_1x1] = {PM_DBS, PM_DBS}, + [PM_STA_SAP_MCC_24_5_2x2] = {PM_DBS_DOWNGRADE, PM_DBS_DOWNGRADE}, + [PM_STA_SAP_DBS_1x1] = {PM_NOP, PM_NOP}, + [PM_STA_P2P_GO_SCC_24_1x1] = {PM_NOP, PM_DBS}, + [PM_STA_P2P_GO_SCC_24_2x2] = {PM_NOP, PM_DBS_DOWNGRADE}, + [PM_STA_P2P_GO_MCC_24_1x1] = {PM_NOP, PM_DBS}, + [PM_STA_P2P_GO_MCC_24_2x2] = {PM_NOP, PM_DBS_DOWNGRADE}, + [PM_STA_P2P_GO_SCC_5_1x1] = {PM_DBS, PM_NOP}, + [PM_STA_P2P_GO_SCC_5_2x2] = {PM_DBS_DOWNGRADE, PM_NOP}, + [PM_STA_P2P_GO_MCC_5_1x1] = {PM_DBS, PM_NOP}, + [PM_STA_P2P_GO_MCC_5_2x2] = {PM_DBS_DOWNGRADE, PM_NOP}, + [PM_STA_P2P_GO_MCC_24_5_1x1] = {PM_DBS, PM_DBS}, + [PM_STA_P2P_GO_MCC_24_5_2x2] = { + PM_DBS_DOWNGRADE, PM_DBS_DOWNGRADE}, + [PM_STA_P2P_GO_DBS_1x1] = {PM_NOP, PM_NOP}, + [PM_STA_P2P_CLI_SCC_24_1x1] = {PM_NOP, PM_DBS}, + [PM_STA_P2P_CLI_SCC_24_2x2] = { + PM_NOP, PM_DBS_DOWNGRADE}, + [PM_STA_P2P_CLI_MCC_24_1x1] = {PM_NOP, PM_DBS}, + [PM_STA_P2P_CLI_MCC_24_2x2] = { + PM_NOP, PM_DBS_DOWNGRADE}, + [PM_STA_P2P_CLI_SCC_5_1x1] = {PM_DBS, PM_NOP}, + [PM_STA_P2P_CLI_SCC_5_2x2] = {PM_DBS_DOWNGRADE, PM_NOP}, + [PM_STA_P2P_CLI_MCC_5_1x1] = {PM_DBS, PM_NOP}, + [PM_STA_P2P_CLI_MCC_5_2x2] = {PM_DBS_DOWNGRADE, PM_NOP}, + [PM_STA_P2P_CLI_MCC_24_5_1x1] = {PM_DBS, PM_DBS}, + [PM_STA_P2P_CLI_MCC_24_5_2x2] = { + PM_DBS_DOWNGRADE, PM_DBS_DOWNGRADE}, + [PM_STA_P2P_CLI_DBS_1x1] = {PM_NOP, PM_NOP}, + [PM_P2P_GO_P2P_CLI_SCC_24_1x1] = {PM_NOP, PM_DBS}, + [PM_P2P_GO_P2P_CLI_SCC_24_2x2] = { + PM_NOP, PM_DBS_DOWNGRADE}, + [PM_P2P_GO_P2P_CLI_MCC_24_1x1] = {PM_NOP, PM_DBS}, + [PM_P2P_GO_P2P_CLI_MCC_24_2x2] = { + PM_NOP, PM_DBS_DOWNGRADE}, + [PM_P2P_GO_P2P_CLI_SCC_5_1x1] = {PM_DBS, PM_NOP}, + [PM_P2P_GO_P2P_CLI_SCC_5_2x2] = {PM_DBS_DOWNGRADE, PM_NOP}, + [PM_P2P_GO_P2P_CLI_MCC_5_1x1] = {PM_DBS, PM_NOP}, + [PM_P2P_GO_P2P_CLI_MCC_5_2x2] = {PM_DBS_DOWNGRADE, PM_NOP}, + [PM_P2P_GO_P2P_CLI_MCC_24_5_1x1] = {PM_DBS, PM_DBS}, + [PM_P2P_GO_P2P_CLI_MCC_24_5_2x2] = { + PM_DBS_DOWNGRADE, PM_DBS_DOWNGRADE}, + [PM_P2P_GO_P2P_CLI_DBS_1x1] = {PM_NOP, PM_NOP}, + [PM_P2P_GO_SAP_SCC_24_1x1] = {PM_NOP, PM_DBS}, + [PM_P2P_GO_SAP_SCC_24_2x2] = {PM_NOP, PM_DBS_DOWNGRADE}, + [PM_P2P_GO_SAP_MCC_24_1x1] = {PM_NOP, PM_DBS}, + [PM_P2P_GO_SAP_MCC_24_2x2] = {PM_NOP, PM_DBS_DOWNGRADE}, + [PM_P2P_GO_SAP_SCC_5_1x1] = {PM_DBS, PM_NOP}, + [PM_P2P_GO_SAP_SCC_5_2x2] = {PM_DBS_DOWNGRADE, PM_NOP}, + [PM_P2P_GO_SAP_MCC_5_1x1] = {PM_DBS, PM_NOP}, + [PM_P2P_GO_SAP_MCC_5_2x2] = {PM_DBS_DOWNGRADE, PM_NOP}, + [PM_P2P_GO_SAP_MCC_24_5_1x1] = {PM_DBS, PM_DBS}, + [PM_P2P_GO_SAP_MCC_24_5_2x2] = { + PM_DBS_DOWNGRADE, PM_DBS_DOWNGRADE}, + [PM_P2P_GO_SAP_DBS_1x1] = {PM_NOP, PM_NOP}, + [PM_P2P_CLI_SAP_SCC_24_1x1] = {PM_NOP, PM_DBS}, + [PM_P2P_CLI_SAP_SCC_24_2x2] = {PM_NOP, PM_DBS_DOWNGRADE}, + [PM_P2P_CLI_SAP_MCC_24_1x1] = {PM_NOP, PM_DBS}, + [PM_P2P_CLI_SAP_MCC_24_2x2] = {PM_NOP, PM_DBS_DOWNGRADE}, + [PM_P2P_CLI_SAP_SCC_5_1x1] = {PM_DBS, PM_NOP}, + [PM_P2P_CLI_SAP_SCC_5_2x2] = {PM_DBS_DOWNGRADE, PM_NOP}, + [PM_P2P_CLI_SAP_MCC_5_1x1] = {PM_DBS, PM_NOP}, + [PM_P2P_CLI_SAP_MCC_5_2x2] = {PM_DBS_DOWNGRADE, PM_NOP}, + [PM_P2P_CLI_SAP_MCC_24_5_1x1] = {PM_DBS, PM_DBS}, + [PM_P2P_CLI_SAP_MCC_24_5_2x2] = {PM_DBS_DOWNGRADE, PM_DBS_DOWNGRADE}, + [PM_P2P_CLI_SAP_DBS_1x1] = {PM_NOP, PM_NOP}, + +}; + +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/policy_mgr/src/wlan_policy_mgr_tables_2x2_dbs_i.h b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/policy_mgr/src/wlan_policy_mgr_tables_2x2_dbs_i.h new file mode 100644 index 0000000000000000000000000000000000000000..dd11a9faea0a930c52ddf6e068a715bc6e1b96e6 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/policy_mgr/src/wlan_policy_mgr_tables_2x2_dbs_i.h @@ -0,0 +1,1389 @@ +/* + * Copyright (c) 2012-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef __WLAN_POLICY_MGR_TABLES_2X2_DBS_H +#define __WLAN_POLICY_MGR_TABLES_2X2_DBS_H + +#include "wlan_policy_mgr_api.h" + +/** + * second_connection_pcl_dbs_2x2_table - table which provides PCL + * for the 2nd connection, when we have a connection already in + * the system (with DBS supported by HW) + * This table consolidates selection for P2PCLI, P2PGO, STA, SAP + * into the single set of STA entries for 2.4G and 5G. + */ +static pm_dbs_pcl_second_connection_table_type +pm_second_connection_pcl_dbs_2x2_table = { + [PM_STA_24_1x1] = { + [PM_STA_MODE] = {PM_5G_SCC_CH, PM_5G_SCC_CH, PM_5G_SCC_CH}, + [PM_SAP_MODE] = {PM_5G_SCC_CH, PM_5G_SCC_CH, PM_5G_SCC_CH}, + [PM_P2P_CLIENT_MODE] = {PM_5G_SCC_CH, PM_5G_SCC_CH, PM_5G_SCC_CH}, + [PM_P2P_GO_MODE] = {PM_5G_SCC_CH, PM_5G_SCC_CH, PM_5G_SCC_CH}, + [PM_IBSS_MODE] = {PM_5G, PM_5G, PM_5G} }, + + [PM_STA_24_2x2] = { + [PM_STA_MODE] = {PM_5G_SCC_CH, PM_5G_SCC_CH, PM_5G_SCC_CH}, + [PM_SAP_MODE] = {PM_5G_SCC_CH, PM_5G_SCC_CH, PM_5G_SCC_CH}, + [PM_P2P_CLIENT_MODE] = {PM_5G_SCC_CH, PM_5G_SCC_CH, PM_5G_SCC_CH}, + [PM_P2P_GO_MODE] = {PM_5G_SCC_CH, PM_5G_SCC_CH, PM_5G_SCC_CH}, + [PM_IBSS_MODE] = {PM_5G, PM_5G, PM_5G} }, + + [PM_STA_5_1x1] = { + [PM_STA_MODE] = {PM_24G_SCC_CH_SBS_CH, + PM_24G_SCC_CH_SBS_CH, PM_24G_SCC_CH_SBS_CH}, + [PM_SAP_MODE] = {PM_24G_SCC_CH_SBS_CH, + PM_24G_SCC_CH_SBS_CH, PM_24G_SCC_CH_SBS_CH}, + [PM_P2P_CLIENT_MODE] = { PM_24G_SCC_CH_SBS_CH, + PM_24G_SCC_CH_SBS_CH, PM_24G_SCC_CH_SBS_CH}, + [PM_P2P_GO_MODE] = {PM_24G_SCC_CH_SBS_CH, + PM_24G_SCC_CH_SBS_CH, PM_24G_SCC_CH_SBS_CH}, + [PM_IBSS_MODE] = {PM_24G, PM_24G, PM_24G} }, + + [PM_STA_5_2x2] = { + [PM_STA_MODE] = {PM_24G_SCC_CH_SBS_CH, + PM_24G_SCC_CH_SBS_CH, PM_24G_SCC_CH_SBS_CH}, + [PM_SAP_MODE] = {PM_24G_SCC_CH_SBS_CH, + PM_24G_SCC_CH_SBS_CH, PM_24G_SCC_CH_SBS_CH}, + [PM_P2P_CLIENT_MODE] = { PM_24G_SCC_CH_SBS_CH, + PM_24G_SCC_CH_SBS_CH, PM_24G_SCC_CH_SBS_CH}, + [PM_P2P_GO_MODE] = {PM_24G_SCC_CH_SBS_CH, + PM_24G_SCC_CH_SBS_CH, PM_24G_SCC_CH_SBS_CH}, + [PM_IBSS_MODE] = {PM_24G, PM_24G, PM_24G} }, + + [PM_P2P_CLI_24_1x1] = { + [PM_STA_MODE] = {PM_5G_SCC_CH, PM_5G_SCC_CH, PM_5G_SCC_CH}, + [PM_SAP_MODE] = {PM_5G_SCC_CH, PM_5G_SCC_CH, PM_5G_SCC_CH}, + [PM_P2P_CLIENT_MODE] = {PM_5G_SCC_CH, PM_5G_SCC_CH, PM_5G_SCC_CH}, + [PM_P2P_GO_MODE] = {PM_5G_SCC_CH, PM_5G_SCC_CH, PM_5G_SCC_CH}, + [PM_IBSS_MODE] = {PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, + PM_MAX_PCL_TYPE} }, + + [PM_P2P_CLI_24_2x2] = { + [PM_STA_MODE] = {PM_5G_SCC_CH, PM_5G_SCC_CH, PM_5G_SCC_CH}, + [PM_SAP_MODE] = {PM_5G_SCC_CH, PM_5G_SCC_CH, PM_5G_SCC_CH}, + [PM_P2P_CLIENT_MODE] = {PM_5G_SCC_CH, PM_5G_SCC_CH, PM_5G_SCC_CH}, + [PM_P2P_GO_MODE] = {PM_5G_SCC_CH, PM_5G_SCC_CH, PM_5G_SCC_CH}, + [PM_IBSS_MODE] = {PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, + PM_MAX_PCL_TYPE} }, + + [PM_P2P_CLI_5_1x1] = { + [PM_STA_MODE] = {PM_24G_SCC_CH_SBS_CH, + PM_24G_SCC_CH_SBS_CH, PM_24G_SCC_CH_SBS_CH}, + [PM_SAP_MODE] = {PM_24G_SCC_CH_SBS_CH, + PM_24G_SCC_CH_SBS_CH, PM_24G_SCC_CH_SBS_CH}, + [PM_P2P_CLIENT_MODE] = { PM_24G_SCC_CH_SBS_CH, + PM_24G_SCC_CH_SBS_CH, PM_24G_SCC_CH_SBS_CH}, + [PM_P2P_GO_MODE] = {PM_24G_SCC_CH_SBS_CH, PM_24G_SCC_CH_SBS_CH, + PM_24G_SCC_CH_SBS_CH}, + [PM_IBSS_MODE] = {PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, + PM_MAX_PCL_TYPE} }, + + [PM_P2P_CLI_5_2x2] = { + [PM_STA_MODE] = {PM_24G_SCC_CH_SBS_CH, + PM_24G_SCC_CH_SBS_CH, PM_24G_SCC_CH_SBS_CH}, + [PM_SAP_MODE] = {PM_24G_SCC_CH_SBS_CH, + PM_24G_SCC_CH_SBS_CH, PM_24G_SCC_CH_SBS_CH}, + [PM_P2P_CLIENT_MODE] = { PM_24G_SCC_CH_SBS_CH, + PM_24G_SCC_CH_SBS_CH, PM_24G_SCC_CH_SBS_CH}, + [PM_P2P_GO_MODE] = {PM_24G_SCC_CH_SBS_CH, PM_24G_SCC_CH_SBS_CH, + PM_24G_SCC_CH_SBS_CH}, + [PM_IBSS_MODE] = {PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, + PM_MAX_PCL_TYPE} }, + + [PM_P2P_GO_24_1x1] = { + [PM_STA_MODE] = {PM_5G_SCC_CH, PM_5G_SCC_CH, PM_5G_SCC_CH}, + [PM_SAP_MODE] = {PM_5G_SCC_CH, PM_5G_SCC_CH, PM_5G_SCC_CH}, + [PM_P2P_CLIENT_MODE] = {PM_5G_SCC_CH, PM_5G_SCC_CH, PM_5G_SCC_CH}, + [PM_P2P_GO_MODE] = {PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, + PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = {PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, + PM_MAX_PCL_TYPE} }, + + [PM_P2P_GO_24_2x2] = { + [PM_STA_MODE] = {PM_5G_SCC_CH, PM_5G_SCC_CH, PM_5G_SCC_CH}, + [PM_SAP_MODE] = {PM_5G_SCC_CH, PM_5G_SCC_CH, PM_5G_SCC_CH}, + [PM_P2P_CLIENT_MODE] = {PM_5G_SCC_CH, PM_5G_SCC_CH, PM_5G_SCC_CH}, + [PM_P2P_GO_MODE] = {PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, + PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = {PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, + PM_MAX_PCL_TYPE} }, + + [PM_P2P_GO_5_1x1] = { + [PM_STA_MODE] = {PM_24G_SCC_CH_SBS_CH, + PM_24G_SCC_CH_SBS_CH, PM_24G_SCC_CH_SBS_CH}, + [PM_SAP_MODE] = {PM_24G_SCC_CH_SBS_CH, + PM_24G_SCC_CH_SBS_CH, PM_24G_SCC_CH_SBS_CH}, + [PM_P2P_CLIENT_MODE] = { PM_24G_SCC_CH_SBS_CH, + PM_24G_SCC_CH_SBS_CH, PM_24G_SCC_CH_SBS_CH}, + [PM_P2P_GO_MODE] = {PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, + PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = {PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, + PM_MAX_PCL_TYPE} }, + + [PM_P2P_GO_5_2x2] = { + [PM_STA_MODE] = {PM_24G_SCC_CH_SBS_CH, + PM_24G_SCC_CH_SBS_CH, PM_24G_SCC_CH_SBS_CH}, + [PM_SAP_MODE] = {PM_24G_SCC_CH_SBS_CH, + PM_24G_SCC_CH_SBS_CH, PM_24G_SCC_CH_SBS_CH}, + [PM_P2P_CLIENT_MODE] = { PM_24G_SCC_CH_SBS_CH, + PM_24G_SCC_CH_SBS_CH, PM_24G_SCC_CH_SBS_CH}, + [PM_P2P_GO_MODE] = {PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, + PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = {PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, + PM_MAX_PCL_TYPE} }, + + [PM_SAP_24_1x1] = { + [PM_STA_MODE] = {PM_5G_SCC_CH, PM_5G_SCC_CH, PM_5G_SCC_CH}, + [PM_SAP_MODE] = {PM_5G_SCC_CH, PM_5G_SCC_CH, PM_5G_SCC_CH}, + [PM_P2P_CLIENT_MODE] = {PM_5G_SCC_CH, PM_5G_SCC_CH, PM_5G_SCC_CH}, + [PM_P2P_GO_MODE] = {PM_5G_SCC_CH, PM_5G_SCC_CH, PM_5G_SCC_CH}, + [PM_IBSS_MODE] = {PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, + PM_MAX_PCL_TYPE} }, + + [PM_SAP_24_2x2] = { + [PM_STA_MODE] = {PM_5G_SCC_CH, PM_5G_SCC_CH, PM_5G_SCC_CH}, + [PM_SAP_MODE] = {PM_5G_SCC_CH, PM_5G_SCC_CH, PM_5G_SCC_CH}, + [PM_P2P_CLIENT_MODE] = {PM_5G_SCC_CH, PM_5G_SCC_CH, PM_5G_SCC_CH}, + [PM_P2P_GO_MODE] = {PM_5G_SCC_CH, PM_5G_SCC_CH, PM_5G_SCC_CH}, + [PM_IBSS_MODE] = {PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, + PM_MAX_PCL_TYPE} }, + + [PM_SAP_5_1x1] = { + [PM_STA_MODE] = {PM_24G_SCC_CH_SBS_CH, + PM_24G_SCC_CH_SBS_CH, PM_24G_SCC_CH_SBS_CH}, + [PM_SAP_MODE] = {PM_24G_SCC_CH_SBS_CH, + PM_24G_SCC_CH_SBS_CH, PM_24G_SCC_CH_SBS_CH}, + [PM_P2P_CLIENT_MODE] = { PM_24G_SCC_CH_SBS_CH, + PM_24G_SCC_CH_SBS_CH, PM_24G_SCC_CH_SBS_CH}, + [PM_P2P_GO_MODE] = {PM_24G_SCC_CH_SBS_CH, PM_24G_SCC_CH_SBS_CH, + PM_24G_SCC_CH_SBS_CH}, + [PM_IBSS_MODE] = {PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, + PM_MAX_PCL_TYPE} }, + + [PM_SAP_5_2x2] = { + [PM_STA_MODE] = {PM_24G_SCC_CH_SBS_CH, + PM_24G_SCC_CH_SBS_CH, PM_24G_SCC_CH_SBS_CH}, + [PM_SAP_MODE] = {PM_24G_SCC_CH_SBS_CH, + PM_24G_SCC_CH_SBS_CH, PM_24G_SCC_CH_SBS_CH}, + [PM_P2P_CLIENT_MODE] = { PM_24G_SCC_CH_SBS_CH, + PM_24G_SCC_CH_SBS_CH, PM_24G_SCC_CH_SBS_CH}, + [PM_P2P_GO_MODE] = {PM_24G_SCC_CH_SBS_CH, PM_24G_SCC_CH_SBS_CH, + PM_24G_SCC_CH_SBS_CH}, + [PM_IBSS_MODE] = {PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, + PM_MAX_PCL_TYPE} }, + + + [PM_IBSS_24_1x1] = { + [PM_STA_MODE] = {PM_5G, PM_5G, PM_5G}, + [PM_SAP_MODE] = {PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_IBSS_24_2x2] = { + [PM_STA_MODE] = {PM_5G, PM_5G, PM_5G}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_IBSS_5_1x1] = { + [PM_STA_MODE] = {PM_24G, PM_24G, PM_24G}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_IBSS_5_2x2] = { + [PM_STA_MODE] = {PM_24G, PM_24G, PM_24G}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + +}; + +/** + * third_connection_pcl_dbs_table - table which provides PCL for + * the 3rd connection, when we have two connections already in + * the system (with DBS supported by HW) + */ +static pm_dbs_pcl_third_connection_table_type +pm_third_connection_pcl_dbs_2x2_table = { + [PM_STA_SAP_SCC_24_1x1] = { + [PM_STA_MODE] = {PM_5G_SCC_CH, PM_5G_SCC_CH, PM_5G_SCC_CH}, + [PM_SAP_MODE] = {PM_5G, PM_5G, PM_5G}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = {PM_5G, PM_5G, PM_5G}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_STA_SAP_SCC_24_2x2] = { + [PM_STA_MODE] = {PM_5G_SCC_CH, PM_5G_SCC_CH, PM_5G_SCC_CH}, + [PM_SAP_MODE] = {PM_5G, PM_5G, PM_5G}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = {PM_5G, PM_5G, PM_5G}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_STA_SAP_MCC_24_1x1] = { + [PM_STA_MODE] = {PM_5G, PM_5G, PM_5G}, + [PM_SAP_MODE] = {PM_5G, PM_5G, PM_5G}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = {PM_5G, PM_5G, PM_5G}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_STA_SAP_MCC_24_2x2] = { + [PM_STA_MODE] = {PM_5G, PM_5G, PM_5G}, + [PM_SAP_MODE] = {PM_5G, PM_5G, PM_5G}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = {PM_5G, PM_5G, PM_5G}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_STA_SAP_SCC_5_1x1] = { + [PM_STA_MODE] = {PM_24G_SCC_CH_SBS_CH_5G, PM_24G_SCC_CH, + PM_24G_SCC_CH_SBS_CH}, + [PM_SAP_MODE] = {PM_24G, PM_24G, PM_24G}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = {PM_24G, PM_24G, PM_24G}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_STA_SAP_SCC_5_2x2] = { + [PM_STA_MODE] = {PM_24G_SCC_CH_SBS_CH_5G, PM_24G_SCC_CH, + PM_24G_SCC_CH_SBS_CH}, + [PM_SAP_MODE] = {PM_24G, PM_24G, PM_24G}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = {PM_24G, PM_24G, PM_24G}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_STA_SAP_MCC_5_1x1] = { + [PM_STA_MODE] = {PM_24G_SBS_CH_MCC_CH, PM_24G, PM_24G_MCC_CH}, + [PM_SAP_MODE] = {PM_24G, PM_24G, PM_24G}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = {PM_24G, PM_24G, PM_24G}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_STA_SAP_MCC_5_2x2] = { + [PM_STA_MODE] = {PM_24G_SBS_CH_MCC_CH, PM_24G, PM_24G_MCC_CH}, + [PM_SAP_MODE] = {PM_24G, PM_24G, PM_24G}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = {PM_24G, PM_24G, PM_24G}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_STA_SAP_MCC_24_5_1x1] = { + [PM_STA_MODE] = {PM_5G, PM_5G, PM_5G}, + [PM_SAP_MODE] = {PM_5G, PM_5G, PM_5G}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_STA_SAP_MCC_24_5_2x2] = { + [PM_STA_MODE] = {PM_5G, PM_5G, PM_5G}, + [PM_SAP_MODE] = {PM_5G, PM_5G, PM_5G}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_STA_SAP_DBS_1x1] = { + [PM_STA_MODE] = { PM_SCC_ON_5_SCC_ON_24_5G, PM_SCC_ON_5_SCC_ON_24, + PM_SCC_ON_5_SCC_ON_24}, + [PM_SAP_MODE] = { PM_SCC_ON_5_SCC_ON_24, PM_SCC_ON_5_SCC_ON_24, + PM_SCC_ON_5_SCC_ON_24}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { PM_SCC_ON_5_SCC_ON_24, PM_SCC_ON_5_SCC_ON_24, + PM_SCC_ON_5_SCC_ON_24}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_STA_SAP_DBS_2x2] = { + [PM_STA_MODE] = { PM_SCC_ON_5_SCC_ON_24_5G, PM_SCC_ON_5_SCC_ON_24, + PM_SCC_ON_5_SCC_ON_24}, + [PM_SAP_MODE] = { PM_SCC_ON_5_SCC_ON_24, PM_SCC_ON_5_SCC_ON_24, + PM_SCC_ON_5_SCC_ON_24}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { PM_SCC_ON_5_SCC_ON_24, PM_SCC_ON_5_SCC_ON_24, + PM_SCC_ON_5_SCC_ON_24}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_STA_SAP_SBS_5_1x1] = { + [PM_STA_MODE] = { + PM_SBS_CH_5G, PM_SBS_CH, PM_SBS_CH}, + [PM_SAP_MODE] = { + PM_SBS_CH_5G, PM_SBS_CH, PM_SBS_CH}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_SBS_CH_5G, PM_SBS_CH, PM_SBS_CH}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_SAP_SAP_SCC_24_1x1] = { + [PM_STA_MODE] = {PM_5G_SCC_CH, PM_5G_SCC_CH, PM_5G_SCC_CH}, + [PM_SAP_MODE] = {PM_5G, PM_5G, PM_5G}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = {PM_5G, PM_5G, PM_5G}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_SAP_SAP_SCC_24_2x2] = { + [PM_STA_MODE] = {PM_5G_SCC_CH, PM_5G_SCC_CH, PM_5G_SCC_CH}, + [PM_SAP_MODE] = {PM_5G, PM_5G, PM_5G}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = {PM_5G, PM_5G, PM_5G}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_SAP_SAP_MCC_24_1x1] = { + [PM_STA_MODE] = {PM_5G, PM_5G, PM_5G}, + [PM_SAP_MODE] = {PM_5G, PM_5G, PM_5G}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_SAP_SAP_MCC_24_2x2] = { + [PM_STA_MODE] = {PM_5G, PM_5G, PM_5G}, + [PM_SAP_MODE] = {PM_5G, PM_5G, PM_5G}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_SAP_SAP_SCC_5_1x1] = { + [PM_STA_MODE] = {PM_24G_SCC_CH_SBS_CH_5G, PM_24G_SCC_CH, + PM_24G_SCC_CH_SBS_CH}, + [PM_SAP_MODE] = {PM_24G, PM_24G, PM_24G}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = {PM_24G, PM_24G, PM_24G}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_SAP_SAP_SCC_5_2x2] = { + [PM_STA_MODE] = {PM_24G_SCC_CH_SBS_CH_5G, PM_24G_SCC_CH, + PM_24G_SCC_CH_SBS_CH}, + [PM_SAP_MODE] = {PM_24G, PM_24G, PM_24G}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = {PM_24G, PM_24G, PM_24G}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_SAP_SAP_MCC_5_1x1] = { + [PM_STA_MODE] = {PM_24G_SBS_CH_MCC_CH, PM_24G, PM_24G_MCC_CH}, + [PM_SAP_MODE] = {PM_24G_SBS_CH_MCC_CH, PM_24G, PM_24G_MCC_CH}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_SAP_SAP_MCC_5_2x2] = { + [PM_STA_MODE] = {PM_24G_SBS_CH_MCC_CH, PM_24G, PM_24G_MCC_CH}, + [PM_SAP_MODE] = {PM_24G_SBS_CH_MCC_CH, PM_24G, PM_24G_MCC_CH}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_SAP_SAP_MCC_24_5_1x1] = { + [PM_STA_MODE] = {PM_5G, PM_5G, PM_5G}, + [PM_SAP_MODE] = {PM_5G, PM_5G, PM_5G}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_SAP_SAP_MCC_24_5_2x2] = { + [PM_STA_MODE] = {PM_5G, PM_5G, PM_5G}, + [PM_SAP_MODE] = {PM_5G, PM_5G, PM_5G}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_SAP_SAP_DBS_1x1] = { + [PM_STA_MODE] = { PM_SCC_ON_5_SCC_ON_24_5G, PM_SCC_ON_5_SCC_ON_24, + PM_SCC_ON_5_SCC_ON_24}, + [PM_SAP_MODE] = { PM_SCC_ON_5_SCC_ON_24, PM_SCC_ON_5_SCC_ON_24, + PM_SCC_ON_5_SCC_ON_24}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = {PM_SCC_ON_5_SCC_ON_24, PM_SCC_ON_5_SCC_ON_24, + PM_SCC_ON_5_SCC_ON_24}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_SAP_SAP_DBS_2x2] = { + [PM_STA_MODE] = { PM_SCC_ON_5_SCC_ON_24_5G, PM_SCC_ON_5_SCC_ON_24, + PM_SCC_ON_5_SCC_ON_24}, + [PM_SAP_MODE] = { PM_SCC_ON_5_SCC_ON_24, PM_SCC_ON_5_SCC_ON_24, + PM_SCC_ON_5_SCC_ON_24}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = {PM_SCC_ON_5_SCC_ON_24, PM_SCC_ON_5_SCC_ON_24, + PM_SCC_ON_5_SCC_ON_24}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_SAP_SAP_SBS_5_1x1] = { + [PM_STA_MODE] = { + PM_SBS_CH_5G, PM_SBS_CH, PM_SBS_CH}, + [PM_SAP_MODE] = { + PM_SBS_CH_5G, PM_SBS_CH, PM_SBS_CH}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_STA_P2P_GO_SCC_24_1x1] = { + [PM_STA_MODE] = {PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, + PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = {PM_5G_SCC_CH, PM_5G_SCC_CH, PM_5G_SCC_CH}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_STA_P2P_GO_SCC_24_2x2] = { + [PM_STA_MODE] = {PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = {PM_5G_SCC_CH, PM_5G_SCC_CH, PM_5G_SCC_CH}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_STA_P2P_GO_MCC_24_1x1] = { + [PM_STA_MODE] = {PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = {PM_5G, PM_5G, PM_5G}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_STA_P2P_GO_MCC_24_2x2] = { + [PM_STA_MODE] = {PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = {PM_5G, PM_5G, PM_5G}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_STA_P2P_GO_SCC_5_1x1] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = { + PM_24G_SCC_CH_SBS_CH_5G, PM_24G_SCC_CH, + PM_24G_SCC_CH_SBS_CH}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_STA_P2P_GO_SCC_5_2x2] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = { + PM_24G_SCC_CH_SBS_CH_5G, PM_24G_SCC_CH, + PM_24G_SCC_CH_SBS_CH}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_STA_P2P_GO_MCC_5_1x1] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = { + PM_24G_SBS_CH_MCC_CH, PM_24G, PM_24G_MCC_CH}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_STA_P2P_GO_MCC_5_2x2] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = { + PM_24G_SBS_CH_MCC_CH, PM_24G, PM_24G_MCC_CH}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_STA_P2P_GO_MCC_24_5_1x1] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_STA_P2P_GO_MCC_24_5_2x2] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_STA_P2P_GO_DBS_1x1] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = { PM_SCC_ON_5_SCC_ON_24_5G, PM_SCC_ON_5_SCC_ON_24, + PM_SCC_ON_5_SCC_ON_24}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_STA_P2P_GO_DBS_2x2] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = { PM_SCC_ON_5_SCC_ON_24_5G, PM_SCC_ON_5_SCC_ON_24, + PM_SCC_ON_5_SCC_ON_24}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_STA_P2P_GO_SBS_5_1x1] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = { + PM_SBS_CH_5G, PM_SBS_CH, PM_SBS_CH}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_P2P_GO_SAP_SCC_24_1x1] = { + [PM_STA_MODE] = { + PM_5G_SCC_CH, PM_5G_SCC_CH, PM_5G_SCC_CH}, + [PM_SAP_MODE] = {PM_5G, PM_5G, PM_5G}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_P2P_GO_SAP_SCC_24_2x2] = { + [PM_STA_MODE] = { + PM_5G_SCC_CH, PM_5G_SCC_CH, PM_5G_SCC_CH}, + [PM_SAP_MODE] = {PM_5G, PM_5G, PM_5G}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_P2P_GO_SAP_MCC_24_1x1] = { + [PM_STA_MODE] = { PM_5G, PM_5G, PM_5G}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_P2P_GO_SAP_MCC_24_2x2] = { + [PM_STA_MODE] = {PM_5G, PM_5G, PM_5G}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_P2P_GO_SAP_SCC_5_1x1] = { + [PM_STA_MODE] = { + PM_24G_SCC_CH_SBS_CH_5G, PM_24G_SCC_CH, + PM_24G_SCC_CH_SBS_CH}, + [PM_SAP_MODE] = {PM_5G, PM_5G, PM_5G}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_P2P_GO_SAP_SCC_5_2x2] = { + [PM_STA_MODE] = { + PM_24G_SCC_CH_SBS_CH_5G, PM_24G_SCC_CH, + PM_24G_SCC_CH_SBS_CH}, + [PM_SAP_MODE] = {PM_5G, PM_5G, PM_5G}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_P2P_GO_SAP_MCC_5_1x1] = { + [PM_STA_MODE] = { + PM_24G_SBS_CH_MCC_CH, PM_24G, PM_24G_MCC_CH}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_P2P_GO_SAP_MCC_5_2x2] = { + [PM_STA_MODE] = { + PM_24G_SBS_CH_MCC_CH, PM_24G, PM_24G_MCC_CH}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_P2P_GO_SAP_MCC_24_5_1x1] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_P2P_GO_SAP_MCC_24_5_2x2] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_P2P_GO_SAP_DBS_1x1] = { + [PM_STA_MODE] = { + PM_SCC_ON_5_SCC_ON_24_5G, PM_SCC_ON_5_SCC_ON_24, + PM_SCC_ON_5_SCC_ON_24}, + [PM_SAP_MODE] = { + PM_SCC_ON_5_SCC_ON_24, PM_SCC_ON_5_SCC_ON_24, + PM_SCC_ON_5_SCC_ON_24}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_P2P_GO_SAP_DBS_2x2] = { + [PM_STA_MODE] = { + PM_SCC_ON_5_SCC_ON_24_5G, PM_SCC_ON_5_SCC_ON_24, + PM_SCC_ON_5_SCC_ON_24}, + [PM_SAP_MODE] = { + PM_SCC_ON_5_SCC_ON_24, PM_SCC_ON_5_SCC_ON_24, + PM_SCC_ON_5_SCC_ON_24}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_P2P_GO_SAP_SBS_5_1x1] = { + [PM_STA_MODE] = { + PM_SBS_CH_5G, PM_SBS_CH, PM_SBS_CH}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_STA_P2P_CLI_SCC_24_1x1] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_STA_P2P_CLI_SCC_24_2x2] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_STA_P2P_CLI_MCC_24_1x1] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_STA_P2P_CLI_MCC_24_2x2] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_STA_P2P_CLI_SCC_5_1x1] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_STA_P2P_CLI_SCC_5_2x2] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_STA_P2P_CLI_MCC_5_1x1] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_STA_P2P_CLI_MCC_5_2x2] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, }, + + [PM_STA_P2P_CLI_MCC_24_5_1x1] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_STA_P2P_CLI_MCC_24_5_2x2] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_STA_P2P_CLI_DBS_1x1] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_STA_P2P_CLI_DBS_2x2] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_STA_P2P_CLI_SBS_5_1x1] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_P2P_GO_P2P_CLI_SCC_24_1x1] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_P2P_GO_P2P_CLI_SCC_24_2x2] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_P2P_GO_P2P_CLI_MCC_24_1x1] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_P2P_GO_P2P_CLI_MCC_24_2x2] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_P2P_GO_P2P_CLI_SCC_5_1x1] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_P2P_GO_P2P_CLI_SCC_5_2x2] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_P2P_GO_P2P_CLI_MCC_5_1x1] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_P2P_GO_P2P_CLI_MCC_5_2x2] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_P2P_GO_P2P_CLI_MCC_24_5_1x1] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_P2P_GO_P2P_CLI_MCC_24_5_2x2] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_P2P_GO_P2P_CLI_DBS_1x1] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_P2P_GO_P2P_CLI_DBS_2x2] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_P2P_GO_P2P_CLI_SBS_5_1x1] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_STA_STA_SCC_24_1x1] = { + [PM_STA_MODE] = { + PM_5G_SCC_CH, PM_5G_SCC_CH, PM_5G_SCC_CH}, + [PM_SAP_MODE] = { + PM_5G_SCC_CH, PM_5G_SCC_CH, PM_5G_SCC_CH}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_STA_STA_SCC_24_2x2] = { + [PM_STA_MODE] = { + PM_5G_SCC_CH, PM_5G_SCC_CH, PM_5G_SCC_CH}, + [PM_SAP_MODE] = { + PM_5G_SCC_CH, PM_5G_SCC_CH, PM_5G_SCC_CH}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_STA_STA_MCC_24_1x1] = { + [PM_STA_MODE] = {PM_5G, PM_5G, PM_5G}, + [PM_SAP_MODE] = {PM_5G, PM_5G, PM_5G}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_STA_STA_MCC_24_2x2] = { + [PM_STA_MODE] = {PM_5G, PM_5G, PM_5G}, + [PM_SAP_MODE] = {PM_5G, PM_5G, PM_5G}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_STA_STA_SCC_5_1x1] = { + [PM_STA_MODE] = {PM_24G_SCC_CH_SBS_CH_5G, PM_24G_SCC_CH, + PM_24G_SCC_CH_SBS_CH}, + [PM_SAP_MODE] = {PM_24G_SCC_CH_SBS_CH_5G, PM_24G_SCC_CH, + PM_24G_SCC_CH_SBS_CH}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_STA_STA_SCC_5_2x2] = { + [PM_STA_MODE] = {PM_24G_SCC_CH_SBS_CH_5G, PM_24G_SCC_CH, + PM_24G_SCC_CH_SBS_CH}, + [PM_SAP_MODE] = {PM_24G_SCC_CH_SBS_CH_5G, PM_24G_SCC_CH, + PM_24G_SCC_CH_SBS_CH}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_STA_STA_MCC_5_1x1] = { + [PM_STA_MODE] = { + PM_24G_SBS_CH_MCC_CH, PM_24G, PM_24G_MCC_CH}, + [PM_SAP_MODE] = { + PM_24G_SBS_CH_MCC_CH, PM_24G, PM_24G_MCC_CH}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_STA_STA_MCC_5_2x2] = { + [PM_STA_MODE] = { + PM_24G_SBS_CH_MCC_CH, PM_24G, PM_24G_MCC_CH}, + [PM_SAP_MODE] = { + PM_24G_SBS_CH_MCC_CH, PM_24G, PM_24G_MCC_CH}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, }, + + [PM_STA_STA_MCC_24_5_1x1] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_STA_STA_MCC_24_5_2x2] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_STA_STA_DBS_1x1] = { + [PM_STA_MODE] = {PM_SCC_ON_5_SCC_ON_24_5G, PM_SCC_ON_5_SCC_ON_24, + PM_SCC_ON_5_SCC_ON_24}, + [PM_SAP_MODE] = {PM_SCC_ON_5_SCC_ON_24_5G, PM_SCC_ON_5_SCC_ON_24, + PM_SCC_ON_5_SCC_ON_24}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_STA_STA_DBS_2x2] = { + [PM_STA_MODE] = {PM_SCC_ON_5_SCC_ON_24_5G, PM_SCC_ON_5_SCC_ON_24, + PM_SCC_ON_5_SCC_ON_24}, + [PM_SAP_MODE] = {PM_SCC_ON_5_SCC_ON_24_5G, PM_SCC_ON_5_SCC_ON_24, + PM_SCC_ON_5_SCC_ON_24}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_STA_STA_SBS_5_1x1] = { + [PM_STA_MODE] = { + PM_SBS_CH_5G, PM_SBS_CH, PM_SBS_CH}, + [PM_SAP_MODE] = { + PM_SBS_CH_5G, PM_SBS_CH, PM_SBS_CH}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + +}; + +/** + * next_action_two_connection_table - table which provides next + * action while a new connection is coming up, with one + * connection already in the system + */ +static policy_mgr_next_action_two_connection_table_type + pm_next_action_two_connection_dbs_2x2_table = { + [PM_STA_24_1x1] = {PM_NOP, PM_NOP}, + [PM_STA_24_2x2] = {PM_NOP, PM_NOP}, + [PM_STA_5_1x1] = {PM_DBS, PM_SBS}, + [PM_STA_5_2x2] = {PM_DBS, PM_SBS_DOWNGRADE}, + [PM_P2P_CLI_24_1x1] = {PM_NOP, PM_NOP}, + [PM_P2P_CLI_24_2x2] = {PM_NOP, PM_NOP}, + [PM_P2P_CLI_5_1x1] = {PM_DBS, PM_SBS}, + [PM_P2P_CLI_5_2x2] = {PM_DBS, PM_SBS_DOWNGRADE}, + [PM_P2P_GO_24_1x1] = {PM_NOP, PM_NOP}, + [PM_P2P_GO_24_2x2] = {PM_NOP, PM_NOP}, + [PM_P2P_GO_5_1x1] = {PM_DBS, PM_SBS}, + [PM_P2P_GO_5_2x2] = {PM_DBS, PM_SBS_DOWNGRADE}, + [PM_SAP_24_1x1] = {PM_NOP, PM_NOP}, + [PM_SAP_24_2x2] = {PM_NOP, PM_NOP}, + [PM_SAP_5_1x1] = {PM_DBS, PM_SBS}, + [PM_SAP_5_2x2] = {PM_DBS, PM_SBS_DOWNGRADE}, + [PM_IBSS_24_1x1] = {PM_NOP, PM_NOP}, + [PM_IBSS_24_2x2] = {PM_NOP, PM_NOP}, + [PM_IBSS_5_1x1] = {PM_DBS, PM_NOP}, + [PM_IBSS_5_2x2] = {PM_DBS, PM_NOP}, +}; + +/** + * next_action_three_connection_table - table which provides next + * action while a new connection is coming up, with two + * connections already in the system + */ +static policy_mgr_next_action_three_connection_table_type + pm_next_action_three_connection_dbs_2x2_table = { + [PM_STA_SAP_SCC_24_1x1] = {PM_NOP, PM_NOP}, + [PM_STA_SAP_SCC_24_2x2] = {PM_NOP, PM_NOP}, + [PM_STA_SAP_MCC_24_1x1] = {PM_NOP, PM_NOP}, + [PM_STA_SAP_MCC_24_2x2] = {PM_NOP, PM_NOP}, + [PM_STA_SAP_SCC_5_1x1] = {PM_DBS, PM_SBS}, + [PM_STA_SAP_SCC_5_2x2] = {PM_DBS, PM_SBS_DOWNGRADE}, + [PM_STA_SAP_MCC_5_1x1] = {PM_DBS, PM_SBS}, + [PM_STA_SAP_MCC_5_2x2] = {PM_DBS, PM_SBS_DOWNGRADE}, + [PM_STA_SAP_MCC_24_5_1x1] = {PM_NOP, PM_NOP}, + [PM_STA_SAP_MCC_24_5_2x2] = {PM_NOP, PM_NOP}, + [PM_STA_SAP_DBS_1x1] = {PM_NOP, PM_NOP}, + [PM_STA_SAP_DBS_2x2] = {PM_NOP, PM_NOP}, + [PM_STA_SAP_SBS_5_1x1] = {PM_DBS_UPGRADE, PM_NOP}, + + [PM_STA_P2P_GO_SCC_24_1x1] = {PM_NOP, PM_NOP}, + [PM_STA_P2P_GO_SCC_24_2x2] = {PM_NOP, PM_NOP}, + [PM_STA_P2P_GO_MCC_24_1x1] = {PM_NOP, PM_NOP}, + [PM_STA_P2P_GO_MCC_24_2x2] = {PM_NOP, PM_NOP}, + [PM_STA_P2P_GO_SCC_5_1x1] = {PM_DBS, PM_SBS}, + [PM_STA_P2P_GO_SCC_5_2x2] = {PM_DBS, PM_SBS_DOWNGRADE}, + [PM_STA_P2P_GO_MCC_5_1x1] = {PM_DBS, PM_SBS}, + [PM_STA_P2P_GO_MCC_5_2x2] = {PM_DBS, PM_SBS_DOWNGRADE}, + [PM_STA_P2P_GO_MCC_24_5_1x1] = {PM_NOP, PM_NOP}, + [PM_STA_P2P_GO_MCC_24_5_2x2] = {PM_NOP, PM_NOP}, + [PM_STA_P2P_GO_DBS_1x1] = {PM_NOP, PM_NOP}, + [PM_STA_P2P_GO_DBS_2x2] = {PM_NOP, PM_NOP}, + [PM_STA_P2P_GO_SBS_5_1x1] = {PM_DBS_UPGRADE, PM_NOP}, + + [PM_STA_P2P_CLI_SCC_24_1x1] = {PM_NOP, PM_NOP}, + [PM_STA_P2P_CLI_SCC_24_2x2] = {PM_NOP, PM_NOP}, + [PM_STA_P2P_CLI_MCC_24_1x1] = {PM_NOP, PM_NOP}, + [PM_STA_P2P_CLI_MCC_24_2x2] = {PM_NOP, PM_NOP}, + [PM_STA_P2P_CLI_SCC_5_1x1] = {PM_DBS, PM_SBS}, + [PM_STA_P2P_CLI_SCC_5_2x2] = {PM_DBS, PM_SBS_DOWNGRADE}, + [PM_STA_P2P_CLI_MCC_5_1x1] = {PM_DBS, PM_SBS}, + [PM_STA_P2P_CLI_MCC_5_2x2] = {PM_DBS, PM_SBS_DOWNGRADE}, + [PM_STA_P2P_CLI_MCC_24_5_1x1] = {PM_NOP, PM_NOP}, + [PM_STA_P2P_CLI_MCC_24_5_2x2] = {PM_NOP, PM_NOP}, + [PM_STA_P2P_CLI_DBS_1x1] = {PM_NOP, PM_NOP}, + [PM_STA_P2P_CLI_DBS_2x2] = {PM_NOP, PM_NOP}, + [PM_STA_P2P_CLI_SBS_5_1x1] = {PM_DBS_UPGRADE, PM_NOP}, + + [PM_P2P_GO_P2P_CLI_SCC_24_1x1] = {PM_NOP, PM_NOP}, + [PM_P2P_GO_P2P_CLI_SCC_24_2x2] = {PM_NOP, PM_NOP}, + [PM_P2P_GO_P2P_CLI_MCC_24_1x1] = {PM_NOP, PM_NOP}, + [PM_P2P_GO_P2P_CLI_MCC_24_2x2] = {PM_NOP, PM_NOP}, + [PM_P2P_GO_P2P_CLI_SCC_5_1x1] = {PM_DBS, PM_SBS}, + [PM_P2P_GO_P2P_CLI_SCC_5_2x2] = {PM_DBS, PM_SBS_DOWNGRADE}, + [PM_P2P_GO_P2P_CLI_MCC_5_1x1] = {PM_DBS, PM_SBS}, + [PM_P2P_GO_P2P_CLI_MCC_5_2x2] = {PM_DBS, PM_SBS_DOWNGRADE}, + [PM_P2P_GO_P2P_CLI_MCC_24_5_1x1] = {PM_NOP, PM_NOP}, + [PM_P2P_GO_P2P_CLI_MCC_24_5_2x2] = {PM_NOP, PM_NOP}, + [PM_P2P_GO_P2P_CLI_DBS_1x1] = {PM_NOP, PM_NOP}, + [PM_P2P_GO_P2P_CLI_DBS_2x2] = {PM_NOP, PM_NOP}, + [PM_P2P_GO_P2P_CLI_SBS_5_1x1] = {PM_DBS_UPGRADE, PM_NOP}, + + [PM_STA_STA_SCC_24_1x1] = {PM_NOP, PM_NOP}, + [PM_STA_STA_SCC_24_2x2] = {PM_NOP, PM_NOP}, + [PM_STA_STA_MCC_24_1x1] = {PM_NOP, PM_NOP}, + [PM_STA_STA_MCC_24_2x2] = {PM_NOP, PM_NOP}, + [PM_STA_STA_SCC_5_1x1] = {PM_DBS, PM_SBS}, + [PM_STA_STA_SCC_5_2x2] = {PM_DBS, PM_SBS_DOWNGRADE}, + [PM_STA_STA_MCC_5_1x1] = {PM_DBS, PM_SBS}, + [PM_STA_STA_MCC_5_2x2] = {PM_DBS, PM_SBS_DOWNGRADE}, + [PM_STA_STA_MCC_24_5_1x1] = {PM_NOP, PM_NOP}, + [PM_STA_STA_MCC_24_5_2x2] = {PM_NOP, PM_NOP}, + [PM_STA_STA_DBS_1x1] = {PM_NOP, PM_NOP}, + [PM_STA_STA_DBS_2x2] = {PM_NOP, PM_NOP}, + [PM_STA_STA_SBS_5_1x1] = {PM_DBS_UPGRADE, PM_NOP}, + +}; + +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/policy_mgr/src/wlan_policy_mgr_tables_no_dbs_i.h b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/policy_mgr/src/wlan_policy_mgr_tables_no_dbs_i.h new file mode 100644 index 0000000000000000000000000000000000000000..204185247d07295ba4ef640cf08a622f7c9308b6 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/policy_mgr/src/wlan_policy_mgr_tables_no_dbs_i.h @@ -0,0 +1,900 @@ +/* + * Copyright (c) 2012-2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef __WLAN_POLICY_MGR_TABLES_NO_DBS_H +#define __WLAN_POLICY_MGR_TABLES_NO_DBS_H + +#include "wlan_policy_mgr_api.h" + +/** + * second_connection_pcl_nodbs_table - table which provides PCL + * for the 2nd connection, when we have a connection already in + * the system (with DBS not supported by HW) + */ +static const enum policy_mgr_pcl_type +second_connection_pcl_nodbs_table[PM_MAX_ONE_CONNECTION_MODE] + [PM_MAX_NUM_OF_MODE][PM_MAX_CONC_PRIORITY_MODE] = { + [PM_STA_24_1x1] = { + [PM_STA_MODE] = {PM_5G, PM_SCC_CH, PM_SCC_CH}, + [PM_SAP_MODE] = {PM_SCC_CH_5G, PM_SCC_CH, PM_SCC_CH_5G}, + [PM_P2P_CLIENT_MODE] = {PM_SCC_CH, PM_SCC_CH, PM_SCC_CH_5G}, + [PM_P2P_GO_MODE] = {PM_SCC_CH, PM_SCC_CH, PM_SCC_CH_5G}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_STA_24_2x2] = { + [PM_STA_MODE] = {PM_5G, PM_SCC_CH, PM_SCC_CH}, + [PM_SAP_MODE] = {PM_SCC_CH_5G, PM_SCC_CH, PM_SCC_CH_5G}, + [PM_P2P_CLIENT_MODE] = {PM_SCC_CH, PM_SCC_CH, PM_SCC_CH_5G}, + [PM_P2P_GO_MODE] = {PM_SCC_CH, PM_SCC_CH, PM_SCC_CH_5G}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_STA_5_1x1] = { + [PM_STA_MODE] = {PM_SCC_CH_5G, PM_SCC_CH, PM_SCC_CH_5G}, + [PM_SAP_MODE] = {PM_SCC_CH_5G, PM_SCC_CH, PM_SCC_CH}, + [PM_P2P_CLIENT_MODE] = {PM_SCC_CH, PM_SCC_CH, PM_SCC_CH}, + [PM_P2P_GO_MODE] = {PM_SCC_CH, PM_SCC_CH, PM_SCC_CH}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_STA_5_2x2] = { + [PM_STA_MODE] = {PM_SCC_CH_5G, PM_SCC_CH, PM_SCC_CH_5G}, + [PM_SAP_MODE] = {PM_SCC_CH_5G, PM_SCC_CH, PM_SCC_CH}, + [PM_P2P_CLIENT_MODE] = {PM_SCC_CH, PM_SCC_CH, PM_SCC_CH}, + [PM_P2P_GO_MODE] = {PM_SCC_CH, PM_SCC_CH, PM_SCC_CH}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_P2P_CLI_24_1x1] = { + [PM_STA_MODE] = {PM_5G, PM_SCC_CH, PM_SCC_CH}, + [PM_SAP_MODE] = {PM_SCC_CH_5G, PM_SCC_CH, PM_SCC_CH_5G}, + [PM_P2P_CLIENT_MODE] = {PM_5G, PM_SCC_CH, PM_SCC_CH_5G}, + [PM_P2P_GO_MODE] = {PM_5G, PM_SCC_CH, PM_SCC_CH_5G}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_P2P_CLI_24_2x2] = { + [PM_STA_MODE] = {PM_5G, PM_SCC_CH, PM_SCC_CH}, + [PM_SAP_MODE] = {PM_SCC_CH_5G, PM_SCC_CH, PM_SCC_CH_5G}, + [PM_P2P_CLIENT_MODE] = {PM_5G, PM_SCC_CH, PM_SCC_CH_5G}, + [PM_P2P_GO_MODE] = {PM_5G, PM_SCC_CH, PM_SCC_CH_5G}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_P2P_CLI_5_1x1] = { + [PM_STA_MODE] = {PM_SCC_CH_5G, PM_SCC_CH, PM_SCC_CH_5G}, + [PM_SAP_MODE] = {PM_SCC_CH_5G, PM_SCC_CH, PM_SCC_CH}, + [PM_P2P_CLIENT_MODE] = {PM_SCC_CH_5G, PM_SCC_CH, PM_SCC_CH}, + [PM_P2P_GO_MODE] = {PM_SCC_CH_5G, PM_SCC_CH, PM_SCC_CH}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_P2P_CLI_5_2x2] = { + [PM_STA_MODE] = {PM_SCC_CH_5G, PM_SCC_CH, PM_SCC_CH_5G}, + [PM_SAP_MODE] = {PM_SCC_CH_5G, PM_SCC_CH, PM_SCC_CH}, + [PM_P2P_CLIENT_MODE] = {PM_SCC_CH_5G, PM_SCC_CH, PM_SCC_CH}, + [PM_P2P_GO_MODE] = {PM_SCC_CH_5G, PM_SCC_CH, PM_SCC_CH}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_P2P_GO_24_1x1] = { + [PM_STA_MODE] = {PM_SCC_CH, PM_SCC_CH, PM_SCC_CH}, + [PM_SAP_MODE] = {PM_SCC_CH, PM_SCC_CH, PM_SCC_CH}, + [PM_P2P_CLIENT_MODE] = {PM_SCC_CH, PM_SCC_CH, PM_SCC_CH}, + [PM_P2P_GO_MODE] = {PM_SCC_CH, PM_SCC_CH, PM_SCC_CH}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_P2P_GO_24_2x2] = { + [PM_STA_MODE] = {PM_SCC_CH, PM_SCC_CH, PM_SCC_CH}, + [PM_SAP_MODE] = {PM_SCC_CH, PM_SCC_CH, PM_SCC_CH}, + [PM_P2P_CLIENT_MODE] = {PM_SCC_CH, PM_SCC_CH, PM_SCC_CH}, + [PM_P2P_GO_MODE] = {PM_SCC_CH, PM_SCC_CH, PM_SCC_CH}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_P2P_GO_5_1x1] = { + [PM_STA_MODE] = {PM_SCC_CH_5G, PM_SCC_CH, PM_SCC_CH}, + [PM_SAP_MODE] = {PM_SCC_CH, PM_SCC_CH, PM_SCC_CH}, + [PM_P2P_CLIENT_MODE] = {PM_SCC_CH, PM_SCC_CH, PM_SCC_CH}, + [PM_P2P_GO_MODE] = {PM_SCC_CH, PM_SCC_CH, PM_SCC_CH}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_P2P_GO_5_2x2] = { + [PM_STA_MODE] = {PM_SCC_CH_5G, PM_SCC_CH, PM_SCC_CH}, + [PM_SAP_MODE] = {PM_SCC_CH, PM_SCC_CH, PM_SCC_CH}, + [PM_P2P_CLIENT_MODE] = {PM_SCC_CH, PM_SCC_CH, PM_SCC_CH}, + [PM_P2P_GO_MODE] = {PM_SCC_CH, PM_SCC_CH, PM_SCC_CH}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_SAP_24_1x1] = { + [PM_STA_MODE] = {PM_SCC_CH, PM_SCC_CH, PM_SCC_CH}, + [PM_SAP_MODE] = {PM_SCC_CH, PM_SCC_CH, PM_SCC_CH}, + [PM_P2P_CLIENT_MODE] = {PM_SCC_CH, PM_SCC_CH, PM_SCC_CH}, + [PM_P2P_GO_MODE] = {PM_SCC_CH, PM_SCC_CH, PM_SCC_CH}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_SAP_24_2x2] = { + [PM_STA_MODE] = {PM_SCC_CH, PM_SCC_CH, PM_SCC_CH}, + [PM_SAP_MODE] = {PM_SCC_CH, PM_SCC_CH, PM_SCC_CH}, + [PM_P2P_CLIENT_MODE] = {PM_SCC_CH, PM_SCC_CH, PM_SCC_CH}, + [PM_P2P_GO_MODE] = {PM_SCC_CH, PM_SCC_CH, PM_SCC_CH}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_SAP_5_1x1] = { + [PM_STA_MODE] = {PM_SCC_CH_5G, PM_SCC_CH, PM_SCC_CH}, + [PM_SAP_MODE] = {PM_SCC_CH, PM_SCC_CH, PM_SCC_CH}, + [PM_P2P_CLIENT_MODE] = {PM_SCC_CH, PM_SCC_CH, PM_SCC_CH}, + [PM_P2P_GO_MODE] = {PM_SCC_CH, PM_SCC_CH, PM_SCC_CH}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_SAP_5_2x2] = { + [PM_STA_MODE] = {PM_SCC_CH_5G, PM_SCC_CH, PM_SCC_CH}, + [PM_SAP_MODE] = {PM_SCC_CH, PM_SCC_CH, PM_SCC_CH}, + [PM_P2P_CLIENT_MODE] = {PM_SCC_CH, PM_SCC_CH, PM_SCC_CH}, + [PM_P2P_GO_MODE] = {PM_SCC_CH, PM_SCC_CH, PM_SCC_CH}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_IBSS_24_1x1] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_IBSS_24_2x2] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_IBSS_5_1x1] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_IBSS_5_2x2] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, +}; + +/** + * third_connection_pcl_nodbs_table - table which provides PCL + * for the 3rd connection, when we have two connections already + * in the system (with DBS not supported by HW) + */ +static const enum policy_mgr_pcl_type +third_connection_pcl_nodbs_table[PM_MAX_TWO_CONNECTION_MODE] + [PM_MAX_NUM_OF_MODE][PM_MAX_CONC_PRIORITY_MODE] = { + [PM_STA_SAP_SCC_24_1x1] = { + [PM_STA_MODE] = {PM_SCC_CH_5G, PM_SCC_CH, PM_SCC_CH}, + [PM_SAP_MODE] = {PM_SCC_CH_5G, PM_SCC_CH, PM_SCC_CH}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_STA_SAP_SCC_24_2x2] = { + [PM_STA_MODE] = {PM_SCC_CH_5G, PM_SCC_CH, PM_SCC_CH}, + [PM_SAP_MODE] = {PM_SCC_CH_5G, PM_SCC_CH, PM_SCC_CH}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_STA_SAP_MCC_24_1x1] = { + [PM_STA_MODE] = {PM_5G_MCC_CH, PM_MCC_CH, PM_MCC_CH}, + [PM_SAP_MODE] = {PM_5G_MCC_CH, PM_MCC_CH, PM_MCC_CH}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_STA_SAP_MCC_24_2x2] = { + [PM_STA_MODE] = {PM_5G_MCC_CH, PM_MCC_CH, PM_MCC_CH}, + [PM_SAP_MODE] = {PM_5G_MCC_CH, PM_MCC_CH, PM_MCC_CH}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_STA_SAP_SCC_5_1x1] = { + [PM_STA_MODE] = {PM_SCC_CH, PM_SCC_CH, PM_SCC_CH}, + [PM_SAP_MODE] = {PM_SCC_CH, PM_SCC_CH, PM_SCC_CH}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_STA_SAP_SCC_5_2x2] = { + [PM_STA_MODE] = {PM_SCC_CH, PM_SCC_CH, PM_SCC_CH}, + [PM_SAP_MODE] = {PM_SCC_CH, PM_SCC_CH, PM_SCC_CH}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_STA_SAP_MCC_5_1x1] = { + [PM_STA_MODE] = {PM_MCC_CH, PM_MCC_CH, PM_MCC_CH}, + [PM_SAP_MODE] = {PM_MCC_CH, PM_MCC_CH, PM_MCC_CH}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_STA_SAP_MCC_5_2x2] = { + [PM_STA_MODE] = {PM_MCC_CH, PM_MCC_CH, PM_MCC_CH}, + [PM_SAP_MODE] = {PM_MCC_CH, PM_MCC_CH, PM_MCC_CH}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_STA_SAP_MCC_24_5_1x1] = { + [PM_STA_MODE] = {PM_MCC_CH, PM_MCC_CH, PM_MCC_CH}, + [PM_SAP_MODE] = {PM_MCC_CH, PM_MCC_CH, PM_MCC_CH}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_STA_SAP_MCC_24_5_2x2] = { + [PM_STA_MODE] = {PM_MCC_CH, PM_MCC_CH, PM_MCC_CH}, + [PM_SAP_MODE] = {PM_MCC_CH, PM_MCC_CH, PM_MCC_CH}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_STA_SAP_DBS_1x1] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_STA_P2P_GO_SCC_24_1x1] = { + [PM_STA_MODE] = {PM_5G, PM_SCC_CH, PM_SCC_CH}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = {PM_5G, PM_SCC_CH, PM_SCC_CH}, + [PM_P2P_GO_MODE] = {PM_5G_SCC_CH, PM_SCC_CH, PM_SCC_CH}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_STA_P2P_GO_SCC_24_2x2] = { + [PM_STA_MODE] = {PM_5G, PM_SCC_CH, PM_SCC_CH}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = {PM_5G, PM_SCC_CH, PM_SCC_CH}, + [PM_P2P_GO_MODE] = {PM_5G_SCC_CH, PM_SCC_CH, PM_SCC_CH}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_STA_P2P_GO_MCC_24_1x1] = { + [PM_STA_MODE] = {PM_5G_MCC_CH, PM_MCC_CH, PM_MCC_CH}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = { + PM_5G_MCC_CH, PM_MCC_CH, PM_MCC_CH}, + [PM_P2P_GO_MODE] = { + PM_5G_MCC_CH, PM_MCC_CH, PM_MCC_CH}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_STA_P2P_GO_MCC_24_2x2] = { + [PM_STA_MODE] = {PM_5G_MCC_CH, PM_MCC_CH, PM_MCC_CH}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = { + PM_5G_MCC_CH, PM_MCC_CH, PM_MCC_CH}, + [PM_P2P_GO_MODE] = { + PM_5G_MCC_CH, PM_MCC_CH, PM_MCC_CH}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_STA_P2P_GO_SCC_5_1x1] = { + [PM_STA_MODE] = {PM_SCC_CH, PM_SCC_CH, PM_SCC_CH}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = {PM_SCC_CH, PM_SCC_CH, PM_SCC_CH}, + [PM_P2P_GO_MODE] = {PM_SCC_CH, PM_SCC_CH, PM_SCC_CH}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_STA_P2P_GO_SCC_5_2x2] = { + [PM_STA_MODE] = {PM_SCC_CH, PM_SCC_CH, PM_SCC_CH}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = {PM_SCC_CH, PM_SCC_CH, PM_SCC_CH}, + [PM_P2P_GO_MODE] = {PM_SCC_CH, PM_SCC_CH, PM_SCC_CH}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_STA_P2P_GO_MCC_5_1x1] = { + [PM_STA_MODE] = {PM_MCC_CH, PM_MCC_CH, PM_MCC_CH}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = {PM_MCC_CH, PM_MCC_CH, PM_MCC_CH}, + [PM_P2P_GO_MODE] = {PM_MCC_CH, PM_MCC_CH, PM_MCC_CH}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_STA_P2P_GO_MCC_5_2x2] = { + [PM_STA_MODE] = {PM_MCC_CH, PM_MCC_CH, PM_MCC_CH}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = {PM_MCC_CH, PM_MCC_CH, PM_MCC_CH}, + [PM_P2P_GO_MODE] = {PM_MCC_CH, PM_MCC_CH, PM_MCC_CH}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_STA_P2P_GO_MCC_24_5_1x1] = { + [PM_STA_MODE] = {PM_MCC_CH, PM_MCC_CH, PM_MCC_CH}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = {PM_MCC_CH, PM_MCC_CH, PM_MCC_CH}, + [PM_P2P_GO_MODE] = {PM_MCC_CH, PM_MCC_CH, PM_MCC_CH}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_STA_P2P_GO_MCC_24_5_2x2] = { + [PM_STA_MODE] = {PM_MCC_CH, PM_MCC_CH, PM_MCC_CH}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = {PM_MCC_CH, PM_MCC_CH, PM_MCC_CH}, + [PM_P2P_GO_MODE] = {PM_MCC_CH, PM_MCC_CH, PM_MCC_CH}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_STA_P2P_GO_DBS_1x1] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_STA_P2P_CLI_SCC_24_1x1] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = {PM_5G_SCC_CH, PM_SCC_CH, PM_SCC_CH}, + [PM_P2P_GO_MODE] = {PM_5G_SCC_CH, PM_SCC_CH, PM_SCC_CH}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_STA_P2P_CLI_SCC_24_2x2] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = {PM_5G_SCC_CH, PM_SCC_CH, PM_SCC_CH}, + [PM_P2P_GO_MODE] = {PM_5G_SCC_CH, PM_SCC_CH, PM_SCC_CH}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_STA_P2P_CLI_MCC_24_1x1] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = {PM_5G_MCC_CH, PM_MCC_CH, PM_MCC_CH}, + [PM_P2P_GO_MODE] = {PM_5G_MCC_CH, PM_MCC_CH, PM_MCC_CH}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_STA_P2P_CLI_MCC_24_2x2] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = {PM_5G_MCC_CH, PM_MCC_CH, PM_MCC_CH}, + [PM_P2P_GO_MODE] = {PM_5G_MCC_CH, PM_MCC_CH, PM_MCC_CH}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_STA_P2P_CLI_SCC_5_1x1] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = {PM_SCC_CH, PM_SCC_CH, PM_SCC_CH}, + [PM_P2P_GO_MODE] = {PM_SCC_CH, PM_SCC_CH, PM_SCC_CH}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_STA_P2P_CLI_SCC_5_2x2] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = {PM_SCC_CH, PM_SCC_CH, PM_SCC_CH}, + [PM_P2P_GO_MODE] = {PM_SCC_CH, PM_SCC_CH, PM_SCC_CH}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_STA_P2P_CLI_MCC_5_1x1] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = {PM_MCC_CH, PM_MCC_CH, PM_MCC_CH}, + [PM_P2P_GO_MODE] = {PM_MCC_CH, PM_MCC_CH, PM_MCC_CH}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_STA_P2P_CLI_MCC_5_2x2] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = {PM_MCC_CH, PM_MCC_CH, PM_MCC_CH}, + [PM_P2P_GO_MODE] = {PM_MCC_CH, PM_MCC_CH, PM_MCC_CH}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_STA_P2P_CLI_MCC_24_5_1x1] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = {PM_MCC_CH, PM_MCC_CH, PM_MCC_CH}, + [PM_P2P_GO_MODE] = {PM_MCC_CH, PM_MCC_CH, PM_MCC_CH}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_STA_P2P_CLI_MCC_24_5_2x2] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = {PM_MCC_CH, PM_MCC_CH, PM_MCC_CH}, + [PM_P2P_GO_MODE] = {PM_MCC_CH, PM_MCC_CH, PM_MCC_CH}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_STA_P2P_CLI_DBS_1x1] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_P2P_GO_P2P_CLI_SCC_24_1x1] = { + [PM_STA_MODE] = {PM_5G, PM_SCC_CH, PM_SCC_CH}, + [PM_SAP_MODE] = {PM_SCC_CH_5G, PM_SCC_CH, PM_SCC_CH}, + [PM_P2P_CLIENT_MODE] = {PM_5G, PM_SCC_CH, PM_SCC_CH}, + [PM_P2P_GO_MODE] = {PM_5G_SCC_CH, PM_SCC_CH, PM_SCC_CH}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_P2P_GO_P2P_CLI_SCC_24_2x2] = { + [PM_STA_MODE] = {PM_5G, PM_SCC_CH, PM_SCC_CH}, + [PM_SAP_MODE] = {PM_SCC_CH_5G, PM_SCC_CH, PM_SCC_CH}, + [PM_P2P_CLIENT_MODE] = {PM_5G, PM_SCC_CH, PM_SCC_CH}, + [PM_P2P_GO_MODE] = {PM_5G_SCC_CH, PM_SCC_CH, PM_SCC_CH}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_P2P_GO_P2P_CLI_MCC_24_1x1] = { + [PM_STA_MODE] = {PM_5G_MCC_CH, PM_MCC_CH, PM_MCC_CH}, + [PM_SAP_MODE] = {PM_5G_MCC_CH, PM_MCC_CH, PM_MCC_CH}, + [PM_P2P_CLIENT_MODE] = { + PM_5G_MCC_CH, PM_MCC_CH, PM_MCC_CH}, + [PM_P2P_GO_MODE] = { + PM_5G_MCC_CH, PM_MCC_CH, PM_MCC_CH}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_P2P_GO_P2P_CLI_MCC_24_2x2] = { + [PM_STA_MODE] = {PM_5G_MCC_CH, PM_MCC_CH, PM_MCC_CH}, + [PM_SAP_MODE] = {PM_5G_MCC_CH, PM_MCC_CH, PM_MCC_CH}, + [PM_P2P_CLIENT_MODE] = { + PM_5G_MCC_CH, PM_MCC_CH, PM_MCC_CH}, + [PM_P2P_GO_MODE] = { + PM_5G_MCC_CH, PM_MCC_CH, PM_MCC_CH}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_P2P_GO_P2P_CLI_SCC_5_1x1] = { + [PM_STA_MODE] = {PM_SCC_CH, PM_SCC_CH, PM_SCC_CH}, + [PM_SAP_MODE] = {PM_SCC_CH, PM_SCC_CH, PM_SCC_CH}, + [PM_P2P_CLIENT_MODE] = {PM_SCC_CH, PM_SCC_CH, PM_SCC_CH}, + [PM_P2P_GO_MODE] = {PM_SCC_CH, PM_SCC_CH, PM_SCC_CH}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_P2P_GO_P2P_CLI_SCC_5_2x2] = { + [PM_STA_MODE] = {PM_SCC_CH, PM_SCC_CH, PM_SCC_CH}, + [PM_SAP_MODE] = {PM_SCC_CH, PM_SCC_CH, PM_SCC_CH}, + [PM_P2P_CLIENT_MODE] = {PM_SCC_CH, PM_SCC_CH, PM_SCC_CH}, + [PM_P2P_GO_MODE] = {PM_SCC_CH, PM_SCC_CH, PM_SCC_CH}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_P2P_GO_P2P_CLI_MCC_5_1x1] = { + [PM_STA_MODE] = {PM_MCC_CH, PM_MCC_CH, PM_MCC_CH}, + [PM_SAP_MODE] = {PM_MCC_CH, PM_MCC_CH, PM_MCC_CH}, + [PM_P2P_CLIENT_MODE] = {PM_MCC_CH, PM_MCC_CH, PM_MCC_CH}, + [PM_P2P_GO_MODE] = {PM_MCC_CH, PM_MCC_CH, PM_MCC_CH}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_P2P_GO_P2P_CLI_MCC_5_2x2] = { + [PM_STA_MODE] = {PM_MCC_CH, PM_MCC_CH, PM_MCC_CH}, + [PM_SAP_MODE] = {PM_MCC_CH, PM_MCC_CH, PM_MCC_CH}, + [PM_P2P_CLIENT_MODE] = {PM_MCC_CH, PM_MCC_CH, PM_MCC_CH}, + [PM_P2P_GO_MODE] = {PM_MCC_CH, PM_MCC_CH, PM_MCC_CH}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_P2P_GO_P2P_CLI_MCC_24_5_1x1] = { + [PM_STA_MODE] = {PM_MCC_CH, PM_MCC_CH, PM_MCC_CH}, + [PM_SAP_MODE] = {PM_MCC_CH, PM_MCC_CH, PM_MCC_CH}, + [PM_P2P_CLIENT_MODE] = {PM_MCC_CH, PM_MCC_CH, PM_MCC_CH}, + [PM_P2P_GO_MODE] = {PM_MCC_CH, PM_MCC_CH, PM_MCC_CH}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_P2P_GO_P2P_CLI_MCC_24_5_2x2] = { + [PM_STA_MODE] = {PM_MCC_CH, PM_MCC_CH, PM_MCC_CH}, + [PM_SAP_MODE] = {PM_MCC_CH, PM_MCC_CH, PM_MCC_CH}, + [PM_P2P_CLIENT_MODE] = {PM_MCC_CH, PM_MCC_CH, PM_MCC_CH}, + [PM_P2P_GO_MODE] = {PM_MCC_CH, PM_MCC_CH, PM_MCC_CH}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_P2P_GO_P2P_CLI_DBS_1x1] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_P2P_GO_SAP_SCC_24_1x1] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_P2P_GO_SAP_SCC_24_2x2] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_P2P_GO_SAP_MCC_24_1x1] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_P2P_GO_SAP_MCC_24_2x2] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_P2P_GO_SAP_SCC_5_1x1] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_P2P_GO_SAP_SCC_5_2x2] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_P2P_GO_SAP_MCC_5_1x1] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_P2P_GO_SAP_MCC_5_2x2] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_P2P_GO_SAP_MCC_24_5_1x1] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_P2P_GO_SAP_MCC_24_5_2x2] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_P2P_GO_SAP_DBS_1x1] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + + [PM_P2P_CLI_SAP_SCC_24_1x1] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = {PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_P2P_CLI_SAP_SCC_24_2x2] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = {PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_P2P_CLI_SAP_MCC_24_1x1] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = {PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_P2P_CLI_SAP_MCC_24_2x2] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = {PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_P2P_CLI_SAP_SCC_5_1x1] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = {PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_P2P_CLI_SAP_SCC_5_2x2] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = {PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_P2P_CLI_SAP_MCC_5_1x1] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = {PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_P2P_CLI_SAP_MCC_5_2x2] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = {PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_P2P_CLI_SAP_MCC_24_5_1x1] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = {PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_P2P_CLI_SAP_MCC_24_5_2x2] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = {PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + + [PM_P2P_CLI_SAP_DBS_1x1] = { + [PM_STA_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_SAP_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_CLIENT_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_P2P_GO_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE}, + [PM_IBSS_MODE] = { + PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE, PM_MAX_PCL_TYPE} }, + +}; + +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/serialization/inc/wlan_serialization_api.h b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/serialization/inc/wlan_serialization_api.h new file mode 100644 index 0000000000000000000000000000000000000000..69b33481d7e0edd82339c6dd42e389be1bb4e2a6 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/serialization/inc/wlan_serialization_api.h @@ -0,0 +1,515 @@ +/* + * Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_serialization_api.h + * This file provides prototypes of the routines needed for the + * external components to utilize the services provided by the + * serialization component. + */ + +/* Include files */ +#ifndef __WLAN_SERIALIZATION_API_H +#define __WLAN_SERIALIZATION_API_H + +#include "qdf_status.h" +#include "wlan_objmgr_cmn.h" + +/* Preprocessor Definitions and Constants */ + +/* + * struct wlan_serialization_queued_cmd_info member queue_type specifies the + * below values to cancel the commands in these queues. Setting both the + * bits will cancel the commands in both the queues. + */ +#define WLAN_SERIALIZATION_ACTIVE_QUEUE 0x1 +#define WLAN_SERIALIZATION_PENDING_QUEUE 0x2 + +/** + * enum wlan_serialization_cb_reason - reason for calling the callback + * @WLAN_SERIALIZATION_REASON_ACTIVATE_CMD: activate the cmd by sending it to FW + * @WLAN_SERIALIZATION_REASON_CANCEL_CMD: Cancel the cmd in the pending list + * @WLAN_SERIALIZATION_REASON_RELEASE_MEM_CMD:cmd execution complete. Release + * the memory allocated while + * building the command + * @WLAN_SER_CB_ACTIVE_CMD_TIMEOUT: active cmd has been timeout. + */ +enum wlan_serialization_cb_reason { + WLAN_SER_CB_ACTIVATE_CMD, + WLAN_SER_CB_CANCEL_CMD, + WLAN_SER_CB_RELEASE_MEM_CMD, + WLAN_SER_CB_ACTIVE_CMD_TIMEOUT, +}; + +/** + * struct wlan_serialization_scan_info - Information needed for scan cmd + * @is_cac_in_progress: boolean to check the cac status + * @is_tdls_in_progress: boolean to check the tdls status + * + * This information is needed for scan command from other components + * to apply the rules and check whether the cmd is allowed or not + */ +struct wlan_serialization_scan_info { + bool is_cac_in_progress; + bool is_tdls_in_progress; +}; + +/** + * union wlan_serialization_rules_info - union of all rules info structures + * @scan_info: information needed to apply rules on scan command + */ +union wlan_serialization_rules_info { + struct wlan_serialization_scan_info scan_info; +}; + +struct wlan_serialization_command; + +/** + * wlan_serialization_cmd_callback() - Callback registered by the component + * @wlan_cmd: Command passed by the component for serialization + * @reason: Reason code for which the callback is being called + * + * Reason specifies the reason for which the callback is being called. callback + * should return success or failure based up on overall success of callback. + * if callback returns failure then serialization will remove the command from + * active queue and proceed for next pending command. + * + * Return: QDF_STATUS_SUCCESS or QDF_STATUS_E_FAILURE + */ +typedef QDF_STATUS +(*wlan_serialization_cmd_callback)(struct wlan_serialization_command *wlan_cmd, + enum wlan_serialization_cb_reason reason); + +/** + * wlan_serialization_comp_info_cb() - callback to fill the rules information + * @vdev: VDEV object for which the command has been received + * @comp_info: Information filled by the component + * + * This callback is registered dynamically by the component with the + * serialization component. Serialization component invokes the callback + * while applying the rules for a particular command and the component + * fills in the required information to apply the rules + * + * Return: None + */ +typedef void (*wlan_serialization_comp_info_cb)(struct wlan_objmgr_vdev *vdev, + union wlan_serialization_rules_info *comp_info); + +/** + * wlan_serialization_apply_rules_cb() - callback per command to apply rules + * @comp_info: information needed to apply the rules + * + * The rules are applied using this callback and decided whether to + * allow or deny the command + * + * Return: true, if rules are successful and cmd can be queued + * false, if rules failed and cmd should not be queued + */ +typedef bool (*wlan_serialization_apply_rules_cb)( + union wlan_serialization_rules_info *comp_info, + uint8_t comp_id); + +/** + * enum wlan_umac_cmd_id - Command Type + * @WLAN_SER_CMD_SCAN: Scan command + */ +enum wlan_serialization_cmd_type { + /* all scan command before non-scan */ + WLAN_SER_CMD_SCAN, + /* all non-scan command below */ + WLAN_SER_CMD_NONSCAN, + WLAN_SER_CMD_FORCE_DISASSOC, + WLAN_SER_CMD_HDD_ISSUED, + WLAN_SER_CMD_FORCE_DISASSOC_MIC_FAIL, + WLAN_SER_CMD_HDD_ISSUE_REASSOC_SAME_AP, + WLAN_SER_CMD_SME_ISSUE_REASSOC_SAME_AP, + WLAN_SER_CMD_FORCE_DEAUTH, + WLAN_SER_CMD_SME_ISSUE_DISASSOC_FOR_HANDOFF, + WLAN_SER_CMD_SME_ISSUE_ASSOC_TO_SIMILAR_AP, + WLAN_SER_CMD_FORCE_IBSS_LEAVE, + WLAN_SER_CMD_STOP_BSS, + WLAN_SER_CMD_SME_ISSUE_FT_REASSOC, + WLAN_SER_CMD_FORCE_DISASSOC_STA, + WLAN_SER_CMD_FORCE_DEAUTH_STA, + WLAN_SER_CMD_PERFORM_PRE_AUTH, + WLAN_SER_CMD_WM_STATUS_CHANGE, + WLAN_SER_CMD_NDP_INIT_REQ, + WLAN_SER_CMD_NDP_RESP_REQ, + WLAN_SER_CMD_NDP_DATA_END_INIT_REQ, + WLAN_SER_CMD_ADDTS, + WLAN_SER_CMD_DELTS, + WLAN_SER_CMD_TDLS_SEND_MGMT, + WLAN_SER_CMD_TDLS_ADD_PEER, + WLAN_SER_CMD_TDLS_DEL_PEER, + WLAN_SER_CMD_SET_HW_MODE, + WLAN_SER_CMD_NSS_UPDATE, + WLAN_SER_CMD_SET_DUAL_MAC_CONFIG, + WLAN_SER_CMD_SET_ANTENNA_MODE, + WLAN_SER_CMD_DEL_STA_SESSION, + WLAN_SER_CMD_MAX +}; + +/** + * enum wlan_serialization_cancel_type - Type of commands to be cancelled + * @WLAN_SER_CANCEL_SINGLE_SCAN: Cancel a single scan with a given ID + * @WLAN_SER_CANCEL_PDEV_SCANS: Cancel all the scans on a given pdev + * @WLAN_SER_CANCEL_VDEV_SCANS: Cancel all the scans on given vdev + * @WLAN_SER_CANCEL_NON_SCAN_CMD: Cancel the given non scan command + */ +enum wlan_serialization_cancel_type { + WLAN_SER_CANCEL_SINGLE_SCAN, + WLAN_SER_CANCEL_PDEV_SCANS, + WLAN_SER_CANCEL_VDEV_SCANS, + WLAN_SER_CANCEL_NON_SCAN_CMD, + WLAN_SER_CANCEL_MAX, +}; + +/** + * enum wlan_serialization_status - Return status of cmd serialization request + * @WLAN_SER_CMD_PENDING: Command is put into the pending queue + * @WLAN_SER_CMD_ACTIVE: Command is activated and put in active queue + * @WLAN_SER_CMD_DENIED_RULES_FAILED: Command denied as the rules fail + * @WLAN_SER_CMD_DENIED_LIST_FULL: Command denied as the pending list is full + * @WLAN_SER_CMD_DENIED_UNSPECIFIED: Command denied due to unknown reason + */ +enum wlan_serialization_status { + WLAN_SER_CMD_PENDING, + WLAN_SER_CMD_ACTIVE, + WLAN_SER_CMD_DENIED_RULES_FAILED, + WLAN_SER_CMD_DENIED_LIST_FULL, + WLAN_SER_CMD_DENIED_UNSPECIFIED, +}; + +/** + * enum wlan_serialization_cmd_status - Return status for a cancel request + * @WLAN_SER_CMD_IN_PENDING_LIST: Command cancelled from pending list + * @WLAN_SER_CMD_IN_ACTIVE_LIST: Command cancelled from active list + * @WLAN_SER_CMDS_IN_ALL_LISTS: Command cancelled from all lists + * @WLAN_SER_CMD_NOT_FOUND: Specified command to be cancelled + * not found in the lists + */ +enum wlan_serialization_cmd_status { + WLAN_SER_CMD_IN_PENDING_LIST, + WLAN_SER_CMD_IN_ACTIVE_LIST, + WLAN_SER_CMDS_IN_ALL_LISTS, + WLAN_SER_CMD_NOT_FOUND, +}; + +/** + * struct wlan_serialization_command - Command to be serialized + * @wlan_serialization_cmd_type: Type of command + * @cmd_id: Command Identifier + * @cmd_cb: Command callback + * @source: component ID of the source of the command + * @is_high_priority: Normal/High Priority at which the cmd has to be queued + * @cmd_timeout_cb: Command timeout callback + * @cmd_timeout_duration: Timeout duration in milliseconds + * @vdev: VDEV object associated to the command + * @umac_cmd: Actual command that needs to be sent to WMI/firmware + * + * Note: Unnamed union has been used in this structure, so that in future if + * somebody wants to add pdev or psoc structure then that person can add without + * modifying existing code. + */ +struct wlan_serialization_command { + enum wlan_serialization_cmd_type cmd_type; + uint32_t cmd_id; + wlan_serialization_cmd_callback cmd_cb; + enum wlan_umac_comp_id source; + bool is_high_priority; + uint16_t cmd_timeout_duration; + union { + struct wlan_objmgr_vdev *vdev; + }; + void *umac_cmd; +}; + +/** + * struct wlan_serialization_queued_cmd_info - cmd that has to be cancelled + * @requestor: component ID of the source requesting this action + * @cmd_type: Command type + * @cmd_id: Command ID + * @req_type: Commands that need to be cancelled + * @vdev: VDEV object associated to the command + * @queue_type: Queues from which the command to be cancelled + */ +struct wlan_serialization_queued_cmd_info { + enum wlan_umac_comp_id requestor; + enum wlan_serialization_cmd_type cmd_type; + uint32_t cmd_id; + enum wlan_serialization_cancel_type req_type; + union { + struct wlan_objmgr_vdev *vdev; + }; + uint8_t queue_type; +}; + +/** + * wlan_serialization_cancel_request() - Request to cancel a command + * @req: Request information + * + * This API is used by external components to cancel a command + * that is either in the pending or active queue. Based on the + * req_type, it is decided whether to use pdev or vdev + * object. For all non-scan commands, it will be pdev. + * + * Return: Status specifying the removal of a command from a certain queue + */ +enum wlan_serialization_cmd_status +wlan_serialization_cancel_request( + struct wlan_serialization_queued_cmd_info *req); + +/** + * wlan_serialization_remove_cmd() - Request to release a command + * @cmd: Command information + * + * This API is used to release a command sitting in the active + * queue upon successful completion of the command + * + * Return: None + */ +void wlan_serialization_remove_cmd( + struct wlan_serialization_queued_cmd_info *cmd); + +/** + * wlan_serialization_flush_cmd() - Request to flush command + * @cmd: Command information + * + * This API is used to flush a cmd sitting in the queue. It + * simply flushes the cmd from the queue and does not call + * any callbacks in between. If the request is for active + * queue, and if the active queue becomes empty upon flush, + * then it will pick the next pending cmd and put in the active + * queue before returning. + * + * Return: None + */ +void wlan_serialization_flush_cmd( + struct wlan_serialization_queued_cmd_info *cmd); +/** + * wlan_serialization_request() - Request to serialize a command + * @cmd: Command information + * + * Return: Status of the serialization request + */ +enum wlan_serialization_status +wlan_serialization_request(struct wlan_serialization_command *cmd); + +/** + * wlan_serialization_register_comp_info_cb() - Register component's info + * callback + * @psoc: PSOC object information + * @comp_id: Component ID + * @cmd_type: Command Type + * @cb: Callback + * + * This is called from component during its initialization.It initializes + * callback handler for given comp_id/cmd_id in a 2-D array. + * + * Return: QDF Status + */ +QDF_STATUS +wlan_serialization_register_comp_info_cb(struct wlan_objmgr_psoc *psoc, + enum wlan_umac_comp_id comp_id, + enum wlan_serialization_cmd_type cmd_type, + wlan_serialization_comp_info_cb cb); + +/** + * wlan_serialization_deregister_comp_info_cb() - Deregister component's info + * callback + * @psoc: PSOC object information + * @comp_id: Component ID + * @cmd_type: Command Type + * + * This routine is called from other component during its de-initialization. + * + * Return: QDF Status + */ +QDF_STATUS +wlan_serialization_deregister_comp_info_cb(struct wlan_objmgr_psoc *psoc, + enum wlan_umac_comp_id comp_id, + enum wlan_serialization_cmd_type cmd_type); + +/** + * wlan_serialization_register_apply_rules_cb() - Register component's rules + * callback + * @psoc: PSOC object information + * @cmd_type: Command Type + * @cb: Callback + * + * This is called from component during its initialization.It initializes + * callback handler for given cmd_type in a 1-D array. + * + * Return: QDF Status + */ +QDF_STATUS +wlan_serialization_register_apply_rules_cb(struct wlan_objmgr_psoc *psoc, + enum wlan_serialization_cmd_type cmd_type, + wlan_serialization_apply_rules_cb apply_rules_cb); + +/** + * wlan_serialization_deregister_apply_rules_cb() - Deregister component's rules + * callback + * @psoc: PSOC object information + * @cmd_type: Command Type + * + * This routine is called from other component during its de-initialization. + * + * Return: QDF Status + */ +QDF_STATUS +wlan_serialization_deregister_apply_rules_cb(struct wlan_objmgr_psoc *psoc, + enum wlan_serialization_cmd_type cmd_type); + +/** + * @wlan_serialization_init() - Serialization component initialization routine + * + * Return - QDF Status + */ +QDF_STATUS wlan_serialization_init(void); + +/** + * @wlan_serialization_deinit() - Serialization component de-init routine + * + * Return - QDF Status + */ +QDF_STATUS wlan_serialization_deinit(void); + +/** + * wlan_serialization_psoc_enable() - Serialization component enable routine + * + * Return - QDF Status + */ +QDF_STATUS wlan_serialization_psoc_enable(struct wlan_objmgr_psoc *psoc); + +/** + * wlan_serialization_psoc_disable() - Serialization component disable routine + * + * Return - QDF Status + */ +QDF_STATUS wlan_serialization_psoc_disable(struct wlan_objmgr_psoc *psoc); + +/** + * wlan_serialization_vdev_scan_status() - Return the status of the vdev scan + * @vdev: VDEV Object + * + * Return: Status of the scans for the corresponding vdev + */ +enum wlan_serialization_cmd_status +wlan_serialization_vdev_scan_status(struct wlan_objmgr_vdev *vdev); + +/** + * wlan_serialization_pdev_scan_status() - Return the status of the pdev scan + * @pdev: PDEV Object + * + * Return: Status of the scans for the corresponding pdev + */ +enum wlan_serialization_cmd_status +wlan_serialization_pdev_scan_status(struct wlan_objmgr_pdev *pdev); + +/** + * wlan_serialization_non_scan_cmd_status() - Return status of pdev non-scan cmd + * @pdev: PDEV Object + * @cmd_id: ID of the command for which the status has to be checked + * + * Return: Status of the command for the corresponding pdev + */ +enum wlan_serialization_cmd_status +wlan_serialization_non_scan_cmd_status(struct wlan_objmgr_pdev *pdev, + enum wlan_serialization_cmd_type cmd_id); + +/** + * wlan_serialization_is_cmd_present_in_pending_queue() - Return if the command + * is already present in pending queue + * @cmd: pointer to serialization command to check + * + * This API will check if command is present in pending queue. If present + * then return true, so use know that it is duplicated command + * + * Return: true or false + */ +bool wlan_serialization_is_cmd_present_in_pending_queue( + struct wlan_objmgr_psoc *psoc, + struct wlan_serialization_command *cmd); +/** + * wlan_serialization_is_cmd_present_in_active_queue() - Return if the command + * is already present in active queue + * @cmd: pointer to serialization command to check + * + * This API will check if command is present in active queue. If present + * then return true, so use know that it is duplicated command + * + * Return: true or false + */ +bool wlan_serialization_is_cmd_present_in_active_queue( + struct wlan_objmgr_psoc *psoc, + struct wlan_serialization_command *cmd); + +/** + * wlan_serialization_purge_all_pdev_cmd() - purge all command for given pdev + * @pdev: objmgr pdev pointer + * + * Return: void + */ +void wlan_serialization_purge_all_pdev_cmd(struct wlan_objmgr_pdev *pdev); + +/** + * wlan_serialization_purge_all_cmd() - purge all command for psoc + * @psoc: objmgr psoc pointer + * + * Return: void + */ +void wlan_serialization_purge_all_cmd(struct wlan_objmgr_psoc *psoc); + +/** + * wlan_serialization_get_scan_cmd_using_scan_id() - Return command which + * matches vdev_id and scan_id + * @psoc: pointer to soc + * @vdev_id: vdev id to pull vdev object + * @scan_id: scan id to match + * @is_scan_cmd_from_active_queue: to indicate active or pending queue + * + * This API fetches vdev/pdev object based on vdev_id, loops through scan + * command queue and find the command which matches scan id as well as vdev + * object. + * + * Return: pointer to serialization command + */ +struct wlan_serialization_command* +wlan_serialization_get_scan_cmd_using_scan_id( + struct wlan_objmgr_psoc *psoc, + uint8_t vdev_id, uint16_t scan_id, + uint8_t is_scan_cmd_from_active_queue); +/** + * wlan_serialization_get_active_cmd() - Return active umac command which + * matches vdev and cmd type + * @psoc: pointer to soc + * @vdev_id: vdev id to pull vdev object + * @cmd_type: cmd type to match + * + * This API fetches vdev/pdev object based on vdev_id, loops through active + * command queue and find the active command which matches cmd_type as well + * as vdev object. + * + * Return: Pointer to umac command. NULL is returned if active command of given + * type is not found. + */ +void *wlan_serialization_get_active_cmd(struct wlan_objmgr_psoc *psoc, + uint8_t vdev_id, + enum wlan_serialization_cmd_type cmd_type); +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/serialization/inc/wlan_serialization_legacy_api.h b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/serialization/inc/wlan_serialization_legacy_api.h new file mode 100644 index 0000000000000000000000000000000000000000..051f26053d8ed5f3b96781a8ae415f5a099306cd --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/serialization/inc/wlan_serialization_legacy_api.h @@ -0,0 +1,196 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ +/** + * DOC: wlan_serialization_legacy_api.h + * This file provides prototypes of the routines needed for the + * legacy mcl serialization to utilize the services provided by the + * serialization component. + */ +#ifndef __WLAN_SERIALIZATION_LEGACY_API_H +#define __WLAN_SERIALIZATION_LEGACY_API_H + +#include "wlan_serialization_api.h" + +/** + * wlan_serialization_peek_head_pending_cmd_using_psoc() - Return command from + * scan or non-scan pending queue based on flag + * @psoc: pointer to psoc + * @is_cmd_from_pending_scan_queue: flag to determine whether command needed + * from scan or non-scan pending queue + * + * This API finds the first active pdev, and loops through scan or non-scan + * pending queue (based on is_cmd_from_pending_scan_queue flag) and fetches + * first pending command from queue + * + * Return: pointer to serialization command + */ +struct wlan_serialization_command* +wlan_serialization_peek_head_pending_cmd_using_psoc( + struct wlan_objmgr_psoc *psoc, + uint8_t is_cmd_from_pending_scan_queue); +/** + * wlan_serialization_peek_head_active_cmd_using_psoc() - Return command from + * scan or non-scan active queue based on flag + * @psoc: pointer to psoc + * @is_cmd_from_active_scan_queue: flag to determine whether command needed + * from scan or non-scan active queue + * + * This API finds the first active pdev, and loops through scan or non-scan + * active queue (based on is_cmd_from_active_scan_queue flag) and fetches + * first active command from queue + * + * Return: pointer to serialization command + */ +struct wlan_serialization_command* +wlan_serialization_peek_head_active_cmd_using_psoc( + struct wlan_objmgr_psoc *psoc, + uint8_t is_cmd_from_active_scan_queue); + +/** + * wlan_serialization_get_pending_list_next_node_using_psoc() - Return next + * scan or non-scan pending command from queue + * @psoc: pointer to psoc + * @prev_cmd: previous command given by caller, find next command after this + * @is_cmd_for_pending_scan_queue: to find from scan or non-scan pending queue + * + * This API finds the first active pdev, and loops through scan or non-scan + * pending queue (based on is_cmd_from_pending_scan_queue flag) and fetches + * next pending command after prev_cmd + * + * Return: pointer to serialization command + */ +struct wlan_serialization_command* +wlan_serialization_get_pending_list_next_node_using_psoc( + struct wlan_objmgr_psoc *psoc, + struct wlan_serialization_command *prev_cmd, + uint8_t is_cmd_for_pending_scan_queue); +/** + * wlan_serialization_get_active_list_next_node_using_psoc() - Return next + * scan or non-scan pending command from queue + * @psoc: pointer to psoc + * @prev_cmd: previous command given by caller, find next command after this + * @is_cmd_for_active_scan_queue: to find from active scan or non-scan queue + * + * This API finds the first active pdev, and loops through scan or non-scan + * pending queue (based on is_cmd_from_pending_scan_queue flag) and fetches + * next pending command after prev_cmd + * + * Return: pointer to serialization command + */ +struct wlan_serialization_command* +wlan_serialization_get_active_list_next_node_using_psoc( + struct wlan_objmgr_psoc *psoc, + struct wlan_serialization_command *prev_cmd, + uint8_t is_cmd_for_active_scan_queue); +/** + * wlan_serialization_get_active_list_count() - Return Active list count + * @psoc: pointer to soc + * @is_cmd_from_active_scan_queue: flag to determine whether command needed + * from scan or non-scan active queue + * + * Get the number of nodes present in active list + * + * Return: count number of active commands in queue + */ + +uint32_t wlan_serialization_get_active_list_count(struct wlan_objmgr_psoc *psoc, + uint8_t is_cmd_from_active_scan_queue); +/** + * wlan_serialization_get_pending_list_count() - Return pending list count + * @psoc: pointer to soc + * @is_cmd_from_pending_scan_queue: flag to determine whether command needed + * from scan or non-scan pending queue + * + * Get the number of nodes present in pending list + * + * Return: count number of pending commands in queue + */ +uint32_t wlan_serialization_get_pending_list_count( + struct wlan_objmgr_psoc *psoc, + uint8_t is_cmd_from_pending_scan_queue); + +/** + * wlan_serialization_purge_cmd_list_by_vdev_id() - Purge given list + * @psoc: pointer to soc + * @vdev_id: vdev_id variable + * @purge_scan_active_queue: whether to purge active scan queue + * @purge_scan_pending_queue: whether to purge pending scan queue + * @purge_nonscan_active_queue: whether to purge active nonscan queue + * @purge_nonscan_pending_queue: whether to purge pending nonscan queue + * @purge_all_queues: whether to purge all queues. + * + * This API will purge queue based given flags and vdev_id. If vdev + * is invalid then it will return immediately. If correct vdev_id is given then + * it will purge the queues per vdev. + * + * Example: + * 1) If you want to purge scan active queue for particular vdev then + * provide correct vdev_id value and purge_scan_active_queue flag set to + * TRUE and rest of the flags set to false. + * 2) If you want to purge all queues for particular vdev then provide + * correct vdev_id value and set purge_all_queues flag set to TRUE and rest + * of the flags set to false. + * 3) If you want to purge active scan and active non-scan queues to be flushed + * then set purge_scan_active_queue and purge_nonscan_active_queue flags to + * be set TRUE and rest of the flags to be FALSE + * + * Return: none + */ +void wlan_serialization_purge_cmd_list_by_vdev_id(struct wlan_objmgr_psoc *psoc, + uint8_t vdev_id, + bool purge_scan_active_queue, + bool purge_scan_pending_queue, + bool purge_nonscan_active_queue, + bool purge_nonscan_pending_queue, + bool purge_all_queues); +/** + * wlan_serialization_purge_cmd_list() - Purge given list + * @psoc: pointer to soc + * @vdev: pointer to vdev object + * @purge_scan_active_queue: whether to purge active scan queue + * @purge_scan_pending_queue: whether to purge pending scan queue + * @purge_nonscan_active_queue: whether to purge active nonscan queue + * @purge_nonscan_pending_queue: whether to purge pending nonscan queue + * @purge_all_queues: whether to purge all queues. + * + * This API will purge queue based given flags and vdev object. If vdev + * is null then it will purge the queues per pdev by default. + * If vdev is given then it will purge the queues per vdev. + * + * Example: + * 1) If you want to purge scan active queue for particular vdev then + * provide correct vdev object and purge_scan_active_queue flag set to + * TRUE and rest of the flags set to false. + * 2) If you want to purge all queues for particular vdev then provide + * correct vdev object value & set purge_all_queues flag set to TRUE and rest + * of the flags set to false. + * 3) If you want to purge active scan and active non-scan queues to be flushed + * for pdev then set purge_scan_active_queue and purge_nonscan_active_queue + * flags to be set TRUE and rest of the flags to be FALSE with vdev object + * passed as NULL. + * + * Return: none + */ +void wlan_serialization_purge_cmd_list(struct wlan_objmgr_psoc *psoc, + struct wlan_objmgr_vdev *vdev, + bool purge_scan_active_queue, + bool purge_scan_pending_queue, + bool purge_nonscan_active_queue, + bool purge_nonscan_pending_queue, + bool purge_all_queues); +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/serialization/src/wlan_serialization_api.c b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/serialization/src/wlan_serialization_api.c new file mode 100644 index 0000000000000000000000000000000000000000..73bb6c4905b19cde0fdefa19cdbd403498e6488c --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/serialization/src/wlan_serialization_api.c @@ -0,0 +1,483 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ +/** + * DOC: wlan_serialization_api.c + * This file provides an interface for the external components + * to utilize the services provided by the serialization + * component. + */ + +/* Include files */ +#include "wlan_objmgr_psoc_obj.h" +#include "wlan_objmgr_pdev_obj.h" +#include "wlan_objmgr_vdev_obj.h" +#include "wlan_serialization_main_i.h" +#include "wlan_serialization_utils_i.h" + +bool wlan_serialization_is_cmd_present_in_pending_queue( + struct wlan_objmgr_psoc *psoc, + struct wlan_serialization_command *cmd) +{ + if (!cmd) { + serialization_err("invalid cmd"); + return false; + } + return wlan_serialization_is_cmd_present_queue(cmd, false); +} + +bool wlan_serialization_is_cmd_present_in_active_queue( + struct wlan_objmgr_psoc *psoc, + struct wlan_serialization_command *cmd) +{ + if (!cmd) { + serialization_err("invalid cmd"); + return false; + } + return wlan_serialization_is_cmd_present_queue(cmd, true); +} + +QDF_STATUS +wlan_serialization_register_apply_rules_cb(struct wlan_objmgr_psoc *psoc, + enum wlan_serialization_cmd_type cmd_type, + wlan_serialization_apply_rules_cb cb) +{ + struct wlan_serialization_psoc_priv_obj *ser_soc_obj; + QDF_STATUS status; + + status = wlan_serialization_validate_cmdtype(cmd_type); + if (status != QDF_STATUS_SUCCESS) { + serialization_err("invalid cmd_type %d", + cmd_type); + return status; + } + ser_soc_obj = wlan_serialization_get_psoc_priv_obj(psoc); + if (!ser_soc_obj) { + serialization_err("invalid ser_soc_obj"); + return QDF_STATUS_E_FAILURE; + } + ser_soc_obj->apply_rules_cb[cmd_type] = cb; + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS +wlan_serialization_deregister_apply_rules_cb(struct wlan_objmgr_psoc *psoc, + enum wlan_serialization_cmd_type cmd_type) +{ + struct wlan_serialization_psoc_priv_obj *ser_soc_obj; + QDF_STATUS status; + + status = wlan_serialization_validate_cmdtype(cmd_type); + if (status != QDF_STATUS_SUCCESS) { + serialization_err("invalid cmd_type %d", + cmd_type); + return status; + } + ser_soc_obj = wlan_serialization_get_psoc_priv_obj(psoc); + if (!ser_soc_obj) { + serialization_err("invalid ser_soc_obj"); + return QDF_STATUS_E_FAILURE; + } + ser_soc_obj->apply_rules_cb[cmd_type] = NULL; + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS +wlan_serialization_register_comp_info_cb(struct wlan_objmgr_psoc *psoc, + enum wlan_umac_comp_id comp_id, + enum wlan_serialization_cmd_type cmd_type, + wlan_serialization_comp_info_cb cb) +{ + struct wlan_serialization_psoc_priv_obj *ser_soc_obj; + QDF_STATUS status; + + status = wlan_serialization_validate_cmd(comp_id, cmd_type); + if (status != QDF_STATUS_SUCCESS) { + serialization_err("invalid comp_id %d or cmd_type %d", + comp_id, cmd_type); + return status; + } + ser_soc_obj = wlan_serialization_get_psoc_priv_obj(psoc); + if (!ser_soc_obj) { + serialization_err("invalid ser_soc_obj"); + return QDF_STATUS_E_FAILURE; + } + ser_soc_obj->comp_info_cb[cmd_type][comp_id] = cb; + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS +wlan_serialization_deregister_comp_info_cb(struct wlan_objmgr_psoc *psoc, + enum wlan_umac_comp_id comp_id, + enum wlan_serialization_cmd_type cmd_type) +{ + struct wlan_serialization_psoc_priv_obj *ser_soc_obj; + QDF_STATUS status; + + status = wlan_serialization_validate_cmd(comp_id, cmd_type); + if (status != QDF_STATUS_SUCCESS) { + serialization_err("invalid comp_id %d or cmd_type %d", + comp_id, cmd_type); + return status; + } + ser_soc_obj = wlan_serialization_get_psoc_priv_obj(psoc); + if (!ser_soc_obj) { + serialization_err("invalid ser_soc_obj"); + return QDF_STATUS_E_FAILURE; + } + ser_soc_obj->comp_info_cb[cmd_type][comp_id] = NULL; + + return QDF_STATUS_SUCCESS; +} + +enum wlan_serialization_cmd_status +wlan_serialization_non_scan_cmd_status(struct wlan_objmgr_pdev *pdev, + enum wlan_serialization_cmd_type cmd_id) +{ + serialization_enter(); + + return WLAN_SER_CMD_NOT_FOUND; +} + +enum wlan_serialization_cmd_status +wlan_serialization_cancel_request( + struct wlan_serialization_queued_cmd_info *req) +{ + QDF_STATUS status; + + serialization_enter(); + if (!req) { + serialization_err("given request is empty"); + return WLAN_SER_CMD_NOT_FOUND; + } + status = wlan_serialization_validate_cmd(req->requestor, req->cmd_type); + if (status != QDF_STATUS_SUCCESS) { + serialization_err("req is not valid"); + return WLAN_SER_CMD_NOT_FOUND; + } + + return wlan_serialization_find_and_cancel_cmd(req); +} + +void wlan_serialization_remove_cmd( + struct wlan_serialization_queued_cmd_info *cmd) +{ + QDF_STATUS status; + + serialization_enter(); + if (!cmd) { + serialization_err("given request is empty"); + QDF_ASSERT(0); + return; + } + status = wlan_serialization_validate_cmd(cmd->requestor, cmd->cmd_type); + if (status != QDF_STATUS_SUCCESS) { + serialization_err("cmd is not valid"); + QDF_ASSERT(0); + return; + } + wlan_serialization_find_and_remove_cmd(cmd); + + return; +} + +enum wlan_serialization_status +wlan_serialization_request(struct wlan_serialization_command *cmd) +{ + bool is_active_cmd_allowed; + QDF_STATUS status; + enum wlan_serialization_status serialization_status; + uint8_t comp_id; + struct wlan_serialization_psoc_priv_obj *ser_soc_obj; + union wlan_serialization_rules_info info; + struct wlan_serialization_pdev_priv_obj *ser_pdev_obj = NULL; + struct wlan_objmgr_pdev *pdev = NULL; + struct wlan_serialization_command_list *cmd_list = NULL; + + serialization_enter(); + if (!cmd) { + serialization_err("serialization cmd is null"); + return WLAN_SER_CMD_DENIED_UNSPECIFIED; + } + status = wlan_serialization_validate_cmd(cmd->source, cmd->cmd_type); + if (status != QDF_STATUS_SUCCESS) { + serialization_err("cmd is not valid"); + return WLAN_SER_CMD_DENIED_UNSPECIFIED; + } + + ser_soc_obj = wlan_serialization_get_psoc_obj(cmd); + if (!ser_soc_obj) { + serialization_err("ser_soc_obj is invalid"); + return WLAN_SER_CMD_DENIED_UNSPECIFIED; + } + + pdev = wlan_serialization_get_pdev_from_cmd(cmd); + if (!pdev) { + serialization_err("pdev is invalid"); + return WLAN_SER_CMD_DENIED_UNSPECIFIED; + } + + ser_pdev_obj = wlan_objmgr_pdev_get_comp_private_obj(pdev, + WLAN_UMAC_COMP_SERIALIZATION); + if (!ser_pdev_obj) { + serialization_err("Invalid ser_pdev_obj"); + return WLAN_SER_CMD_DENIED_UNSPECIFIED; + } + /* + * Get Component Info callback by calling + * each registered module + */ + for (comp_id = 0; comp_id < WLAN_UMAC_COMP_ID_MAX; comp_id++) { + if (!ser_soc_obj->comp_info_cb[cmd->cmd_type][comp_id]) + continue; + (ser_soc_obj->comp_info_cb[cmd->cmd_type][comp_id])(cmd->vdev, + &info); + if (!ser_soc_obj->apply_rules_cb[cmd->cmd_type]) + continue; + if (!ser_soc_obj->apply_rules_cb[cmd->cmd_type](&info, comp_id)) + return WLAN_SER_CMD_DENIED_RULES_FAILED; + } + + is_active_cmd_allowed = wlan_serialization_is_active_cmd_allowed(cmd); + serialization_status = wlan_serialization_enqueue_cmd( + cmd, is_active_cmd_allowed, &cmd_list); + if (WLAN_SER_CMD_ACTIVE == serialization_status) + wlan_serialization_activate_cmd(cmd_list, ser_pdev_obj); + + return serialization_status; +} + +enum wlan_serialization_cmd_status +wlan_serialization_vdev_scan_status(struct wlan_objmgr_vdev *vdev) +{ + bool cmd_in_active, cmd_in_pending; + struct wlan_objmgr_pdev *pdev = wlan_vdev_get_pdev(vdev); + struct wlan_serialization_pdev_priv_obj *ser_pdev_obj = + wlan_serialization_get_pdev_priv_obj(pdev); + + cmd_in_active = + wlan_serialization_is_cmd_in_vdev_list( + vdev, &ser_pdev_obj->active_scan_list); + + cmd_in_pending = + wlan_serialization_is_cmd_in_vdev_list( + vdev, &ser_pdev_obj->pending_scan_list); + + return wlan_serialization_is_cmd_in_active_pending( + cmd_in_active, cmd_in_pending); +} + +void wlan_serialization_flush_cmd( + struct wlan_serialization_queued_cmd_info *cmd) +{ + serialization_enter(); + if (!cmd) { + serialization_err("cmd is null, can't flush"); + return; + } + /* TODO: discuss and fill this API later */ + + return; +} + +enum wlan_serialization_cmd_status +wlan_serialization_pdev_scan_status(struct wlan_objmgr_pdev *pdev) +{ + bool cmd_in_active, cmd_in_pending; + struct wlan_serialization_pdev_priv_obj *ser_pdev_obj = + wlan_serialization_get_pdev_priv_obj(pdev); + + cmd_in_active = !qdf_list_empty(&ser_pdev_obj->active_scan_list); + cmd_in_pending = !qdf_list_empty(&ser_pdev_obj->pending_scan_list); + + return wlan_serialization_is_cmd_in_active_pending( + cmd_in_active, cmd_in_pending); +} + +struct wlan_serialization_command* +wlan_serialization_get_scan_cmd_using_scan_id( + struct wlan_objmgr_psoc *psoc, + uint8_t vdev_id, uint16_t scan_id, + uint8_t is_scan_cmd_from_active_queue) +{ + uint32_t qlen; + struct wlan_objmgr_vdev *vdev; + struct wlan_objmgr_pdev *pdev; + struct wlan_serialization_pdev_priv_obj *ser_pdev_obj; + struct wlan_serialization_command *cmd = NULL; + qdf_list_node_t *nnode = NULL; + qdf_list_t *queue; + + if (!psoc) { + serialization_err("invalid psoc"); + return cmd; + } + vdev = wlan_objmgr_get_vdev_by_id_from_psoc(psoc, vdev_id, + WLAN_SERIALIZATION_ID); + if (!vdev) { + serialization_err("invalid vdev"); + return cmd; + } + + pdev = wlan_vdev_get_pdev(vdev); + if (!pdev) { + serialization_err("invalid pdev"); + goto release_vdev_ref; + } + + ser_pdev_obj = wlan_serialization_get_pdev_priv_obj(pdev); + if (!ser_pdev_obj) { + serialization_err("invalid ser_pdev_obj"); + goto release_vdev_ref; + } + if (is_scan_cmd_from_active_queue) + queue = &ser_pdev_obj->active_scan_list; + else + queue = &ser_pdev_obj->pending_scan_list; + qlen = wlan_serialization_list_size(queue, ser_pdev_obj); + while (qlen--) { + if (QDF_STATUS_SUCCESS != wlan_serialization_get_cmd_from_queue( + queue, &nnode, ser_pdev_obj)) { + serialization_debug("Node not found"); + break; + } + if (wlan_serialization_match_cmd_scan_id(nnode, &cmd, scan_id, + vdev, ser_pdev_obj)) { + serialization_debug("Cmd matched with the scan_id"); + break; + } + } +release_vdev_ref: + wlan_objmgr_vdev_release_ref(vdev, WLAN_SERIALIZATION_ID); + + return cmd; +} + +void *wlan_serialization_get_active_cmd(struct wlan_objmgr_psoc *psoc, + uint8_t vdev_id, + enum wlan_serialization_cmd_type cmd_type) +{ + uint32_t qlen; + struct wlan_objmgr_vdev *vdev; + struct wlan_objmgr_pdev *pdev; + struct wlan_serialization_pdev_priv_obj *ser_pdev_obj; + struct wlan_serialization_command_list *cmd_list = NULL; + void *umac_cmd = NULL; + qdf_list_node_t *nnode = NULL; + qdf_list_t *queue; + + if (!psoc) { + serialization_err("invalid psoc"); + return umac_cmd; + } + vdev = wlan_objmgr_get_vdev_by_id_from_psoc(psoc, vdev_id, + WLAN_SERIALIZATION_ID); + if (!vdev) { + serialization_err("invalid vdev"); + return umac_cmd; + } + + pdev = wlan_vdev_get_pdev(vdev); + if (!pdev) { + serialization_err("invalid pdev"); + goto release_vdev_ref; + } + + ser_pdev_obj = wlan_serialization_get_pdev_priv_obj(pdev); + if (!ser_pdev_obj) { + serialization_err("invalid ser_pdev_obj"); + goto release_vdev_ref; + } + + queue = &ser_pdev_obj->active_list; + + qlen = qdf_list_size(queue); + if (!qlen) { + serialization_err("Empty Queue"); + goto release_vdev_ref; + } + while (qlen--) { + if (QDF_STATUS_SUCCESS != wlan_serialization_get_cmd_from_queue( + queue, &nnode, + ser_pdev_obj)) { + serialization_err("unsuccessful attempt"); + break; + } + cmd_list = qdf_container_of(nnode, + struct wlan_serialization_command_list, + node); + if (cmd_list->cmd.cmd_type == cmd_type && + cmd_list->cmd.vdev == vdev) { + serialization_debug("cmd_type[%d] matched", cmd_type); + umac_cmd = cmd_list->cmd.umac_cmd; + break; + } + } +release_vdev_ref: + wlan_objmgr_vdev_release_ref(vdev, WLAN_SERIALIZATION_ID); + + return umac_cmd; +} + +void wlan_serialization_purge_all_pdev_cmd(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_serialization_pdev_priv_obj *ser_pdev_obj; + + if (!pdev) { + serialization_err("NULL pdev"); + return; + } + + ser_pdev_obj = wlan_serialization_get_pdev_priv_obj(pdev); + if (!ser_pdev_obj) { + serialization_err("invalid ser_pdev_obj"); + return; + } + + wlan_serialization_remove_all_cmd_from_queue( + &ser_pdev_obj->pending_scan_list, ser_pdev_obj, + pdev, NULL, NULL, false); + wlan_serialization_remove_all_cmd_from_queue( + &ser_pdev_obj->active_scan_list, ser_pdev_obj, + pdev, NULL, NULL, true); + wlan_serialization_remove_all_cmd_from_queue( + &ser_pdev_obj->pending_list, ser_pdev_obj, + pdev, NULL, NULL, false); + wlan_serialization_remove_all_cmd_from_queue( + &ser_pdev_obj->active_list, ser_pdev_obj, + pdev, NULL, NULL, true); +} + +static inline +void wlan_ser_purge_pdev_cmd_cb(struct wlan_objmgr_psoc *psoc, + void *object, void *arg) +{ + struct wlan_objmgr_pdev *pdev = (struct wlan_objmgr_pdev *)object; + + wlan_serialization_purge_all_pdev_cmd(pdev); +} + +void wlan_serialization_purge_all_cmd(struct wlan_objmgr_psoc *psoc) +{ + wlan_objmgr_iterate_obj_list(psoc, WLAN_PDEV_OP, + wlan_ser_purge_pdev_cmd_cb, NULL, 1, + WLAN_SERIALIZATION_ID); +} + diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/serialization/src/wlan_serialization_dequeue.c b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/serialization/src/wlan_serialization_dequeue.c new file mode 100644 index 0000000000000000000000000000000000000000..d602730673a4839d72f9a8306a41f5a76d4281b7 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/serialization/src/wlan_serialization_dequeue.c @@ -0,0 +1,667 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ +/** + * DOC: wlan_serialization_dequeue.c + * This file defines the routines which are pertinent + * to the dequeue of commands. + */ +#include +#include "wlan_serialization_main_i.h" +#include "wlan_serialization_utils_i.h" +#include +#include +#include +#include +#include + +void wlan_serialization_move_pending_to_active( + enum wlan_serialization_cmd_type cmd_type, + struct wlan_serialization_pdev_priv_obj *ser_pdev_obj) +{ + qdf_list_t *pending_queue; + struct wlan_serialization_command_list *cmd_list; + struct wlan_serialization_command_list *active_cmd_list = NULL; + enum wlan_serialization_status status; + qdf_list_node_t *nnode = NULL; + QDF_STATUS list_peek_status; + + if (!ser_pdev_obj) { + serialization_err("Can't find ser_pdev_obj"); + return; + } + + if (cmd_type < WLAN_SER_CMD_NONSCAN) + pending_queue = &ser_pdev_obj->pending_scan_list; + else + pending_queue = &ser_pdev_obj->pending_list; + if (wlan_serialization_list_empty(pending_queue, ser_pdev_obj)) { + serialization_debug("nothing to move from pend to active que"); + serialization_debug("cmd_type - %d", cmd_type); + return; + } + list_peek_status = wlan_serialization_peek_front(pending_queue, &nnode, + ser_pdev_obj); + if (QDF_STATUS_SUCCESS != list_peek_status) { + serialization_err("can't read from pending queue"); + serialization_debug("cmd_type - %d", cmd_type); + return; + } + cmd_list = qdf_container_of(nnode, + struct wlan_serialization_command_list, node); + /* + * Idea is to peek command from pending queue, and try to + * push to active queue. If command goes to active queue + * successfully then remove the command from pending queue which + * we previously peeked. + * + * By doing this way, we will make sure that command will be removed + * from pending queue only when it was able to make it to active queue + */ + status = wlan_serialization_enqueue_cmd(&cmd_list->cmd, + true, + &active_cmd_list); + if (WLAN_SER_CMD_ACTIVE != status) { + serialization_err("Can't move cmd to activeQ id-%d type-%d", + cmd_list->cmd.cmd_id, cmd_list->cmd.cmd_type); + return; + } else { + /* + * Before removing the cmd from pending list and putting it + * back in the global list, check if someone has already + * deleted it. if so, do not do it again. if not, continue with + * removing the node. if the CMD_MARKED_FOR_DELETE is + * cleared after deletion, then inside the below API, + * it is checked if the command is active and in use or + * not before removing. + */ + if (!qdf_atomic_test_and_set_bit(CMD_MARKED_FOR_DELETE, + &cmd_list->cmd_in_use)) { + serialization_debug("SER_CMD marked for removal"); + wlan_serialization_put_back_to_global_list( + pending_queue, ser_pdev_obj, cmd_list); + } else { + serialization_debug("SER_CMD already being deleted"); + } + wlan_serialization_activate_cmd(active_cmd_list, + ser_pdev_obj); + } + + return; +} + +enum wlan_serialization_cmd_status +wlan_serialization_remove_all_cmd_from_queue(qdf_list_t *queue, + struct wlan_serialization_pdev_priv_obj *ser_pdev_obj, + struct wlan_objmgr_pdev *pdev, struct wlan_objmgr_vdev *vdev, + struct wlan_serialization_command *cmd, uint8_t is_active_queue) +{ + uint32_t qsize; + struct wlan_serialization_command_list *cmd_list = NULL; + qdf_list_node_t *nnode = NULL, *pnode = NULL; + enum wlan_serialization_cmd_status status = WLAN_SER_CMD_NOT_FOUND; + struct wlan_objmgr_psoc *psoc = NULL; + QDF_STATUS qdf_status; + + if (pdev) + psoc = wlan_pdev_get_psoc(pdev); + else if (vdev) + psoc = wlan_vdev_get_psoc(vdev); + else if (cmd && cmd->vdev) + psoc = wlan_vdev_get_psoc(cmd->vdev); + else + serialization_debug("Can't find psoc"); + + qsize = wlan_serialization_list_size(queue, ser_pdev_obj); + while (!wlan_serialization_list_empty(queue, ser_pdev_obj) && qsize--) { + if (wlan_serialization_get_cmd_from_queue( + queue, &nnode, + ser_pdev_obj) != QDF_STATUS_SUCCESS) { + serialization_err("can't read cmd from queue"); + status = WLAN_SER_CMD_NOT_FOUND; + break; + } + cmd_list = qdf_container_of(nnode, + struct wlan_serialization_command_list, node); + if (cmd && !wlan_serialization_match_cmd_id_type( + nnode, cmd, + ser_pdev_obj)) { + pnode = nnode; + continue; + } + if (vdev && !wlan_serialization_match_cmd_vdev(nnode, vdev)) { + pnode = nnode; + continue; + } + if (pdev && !wlan_serialization_match_cmd_pdev(nnode, pdev)) { + pnode = nnode; + continue; + } + /* + * active queue can't be removed directly, requester needs to + * wait for active command response and send remove request for + * active command separately + */ + if (is_active_queue) { + if (!psoc || !cmd_list) { + serialization_err("psoc:0x%pK, cmd_list:0x%pK", + psoc, cmd_list); + status = WLAN_SER_CMD_NOT_FOUND; + break; + } + + qdf_status = wlan_serialization_find_and_stop_timer( + psoc, &cmd_list->cmd); + if (QDF_IS_STATUS_ERROR(qdf_status)) { + serialization_err("Can't find timer for active cmd"); + status = WLAN_SER_CMD_NOT_FOUND; + /* + * This should not happen, as an active command + * should always have the timer. + */ + QDF_BUG(0); + break; + } + + status = WLAN_SER_CMD_IN_ACTIVE_LIST; + } + /* + * There is a possiblity that the cmd cleanup may happen + * in different contexts at the same time. + * e.g: ifconfig down coming in ioctl context and command + * complete event being handled in scheduler thread context. + * In such scenario's check if either of the threads have + * marked the command for delete and then proceed further + * with cleanup. if it is already marked for cleanup, then + * there is no need to proceed since the other thread is + * cleaning it up. + */ + if (qdf_atomic_test_and_set_bit(CMD_MARKED_FOR_DELETE, + &cmd_list->cmd_in_use)) { + serialization_debug("SER_CMD already being deleted"); + status = WLAN_SER_CMD_NOT_FOUND; + break; + } + serialization_debug("SER_CMD marked for removal"); + /* + * call pending cmd's callback to notify that + * it is being removed + */ + if (cmd_list->cmd.cmd_cb) { + /* caller should now do necessary clean up */ + cmd_list->cmd.cmd_cb(&cmd_list->cmd, + WLAN_SER_CB_CANCEL_CMD); + /* caller should release the memory */ + cmd_list->cmd.cmd_cb(&cmd_list->cmd, + WLAN_SER_CB_RELEASE_MEM_CMD); + } + + qdf_status = wlan_serialization_put_back_to_global_list(queue, + ser_pdev_obj, cmd_list); + if (QDF_STATUS_SUCCESS != qdf_status) { + serialization_err("can't remove cmd from queue"); + status = WLAN_SER_CMD_NOT_FOUND; + break; + } + nnode = pnode; + + if (!is_active_queue) + status = WLAN_SER_CMD_IN_PENDING_LIST; + } + + return status; +} + +/** + * wlan_serialization_remove_cmd_from_given_queue() - to remove command from + * given queue + * @queue: queue from which command needs to be removed + * @cmd: command to match in the queue + * @ser_pdev_obj: pointer to private pdev serialization object + * + * This API takes the queue, it matches the provided command from this queue + * and removes it. Before removing the command, it will notify the caller + * that if it needs to remove any memory allocated by caller. + * + * Return: none + */ +static void wlan_serialization_remove_cmd_from_given_queue(qdf_list_t *queue, + struct wlan_serialization_command *cmd, + struct wlan_serialization_pdev_priv_obj *ser_pdev_obj) +{ + uint32_t qsize; + struct wlan_serialization_command_list *cmd_list; + qdf_list_node_t *nnode = NULL; + QDF_STATUS status; + + if (!cmd) + return; + + qsize = wlan_serialization_list_size(queue, ser_pdev_obj); + while (qsize--) { + status = wlan_serialization_get_cmd_from_queue(queue, &nnode, + ser_pdev_obj); + if (status != QDF_STATUS_SUCCESS) { + serialization_err("can't peek cmd_id[%d] type[%d]", + cmd->cmd_id, cmd->cmd_type); + break; + } + cmd_list = qdf_container_of(nnode, + struct wlan_serialization_command_list, node); + if (!wlan_serialization_match_cmd_id_type(nnode, cmd, + ser_pdev_obj)) + continue; + if (!wlan_serialization_match_cmd_vdev(nnode, cmd->vdev)) + continue; + /* + * Before removing the command from queue, check if it is + * already in process of being removed in some other + * context and if so, there is no need to continue with + * the removal. + */ + if (qdf_atomic_test_and_set_bit(CMD_MARKED_FOR_DELETE, + &cmd_list->cmd_in_use)) { + serialization_debug("SER_CMD already being deleted"); + break; + } + serialization_debug("SER_CMD marked for removal"); + if (cmd_list->cmd.cmd_cb) { + /* caller should release the memory */ + cmd_list->cmd.cmd_cb(&cmd_list->cmd, + WLAN_SER_CB_RELEASE_MEM_CMD); + } + status = wlan_serialization_put_back_to_global_list(queue, + ser_pdev_obj, cmd_list); + + if (QDF_STATUS_SUCCESS != status) + serialization_err("Fail to add to free pool type[%d]", + cmd->cmd_type); + /* + * zero out the command, so caller would know that command has + * been removed + */ + qdf_mem_zero(cmd, sizeof(struct wlan_serialization_command)); + break; + } +} + +/** + * wlan_serialization_remove_cmd_from_active_queue() - helper function to remove + * cmd from active queue + * @psoc: pointer to psoc + * @obj: pointer to object getting passed by object manager + * @arg: argument passed by caller to object manager which comes to this cb + * + * caller provide this API as callback to object manager, and in turn + * object manager iterate through each pdev and call this API callback. + * + * Return: none + */ +static void +wlan_serialization_remove_cmd_from_active_queue(struct wlan_objmgr_psoc *psoc, + void *obj, void *arg) +{ + qdf_list_t *queue; + struct wlan_objmgr_pdev *pdev = obj; + struct wlan_serialization_pdev_priv_obj *ser_pdev_obj; + struct wlan_serialization_command *cmd = arg; + + if (!pdev || !cmd) { + serialization_err("Invalid param"); + return; + } + + ser_pdev_obj = wlan_objmgr_pdev_get_comp_private_obj(pdev, + WLAN_UMAC_COMP_SERIALIZATION); + if (!ser_pdev_obj) { + serialization_err("Invalid ser_pdev_obj"); + return; + } + + if (cmd->cmd_type < WLAN_SER_CMD_NONSCAN) + queue = &ser_pdev_obj->active_scan_list; + else + queue = &ser_pdev_obj->active_list; + + if (wlan_serialization_list_empty(queue, ser_pdev_obj)) { + serialization_err("Empty queue"); + return; + } + + wlan_serialization_remove_cmd_from_given_queue(queue, cmd, + ser_pdev_obj); + + return; +} + +/** + * wlan_serialization_remove_cmd_from_active_queue() - helper function to remove + * cmd from pending queue + * @psoc: pointer to psoc + * @obj: pointer to object getting passed by object manager + * @arg: argument passed by caller to object manager which comes to this cb + * + * caller provide this API as callback to object manager, and in turn + * object manager iterate through each pdev and call this API callback. + * + * Return: none + */ +static void +wlan_serialization_remove_cmd_from_pending_queue(struct wlan_objmgr_psoc *psoc, + void *obj, void *arg) +{ + qdf_list_t *queue; + struct wlan_objmgr_pdev *pdev = obj; + struct wlan_serialization_pdev_priv_obj *ser_pdev_obj; + struct wlan_serialization_command *cmd = arg; + + if (!pdev || !cmd) { + serialization_err("Invalid param"); + return; + } + + ser_pdev_obj = wlan_objmgr_pdev_get_comp_private_obj(pdev, + WLAN_UMAC_COMP_SERIALIZATION); + if (!ser_pdev_obj) { + serialization_err("Invalid ser_pdev_obj"); + return; + } + + if (cmd->cmd_type < WLAN_SER_CMD_NONSCAN) + queue = &ser_pdev_obj->pending_scan_list; + else + queue = &ser_pdev_obj->pending_list; + + if (wlan_serialization_list_empty(queue, ser_pdev_obj)) { + serialization_err("Empty queue"); + return; + } + wlan_serialization_remove_cmd_from_given_queue(queue, + cmd, ser_pdev_obj); + + return; +} + +/** + * wlan_serialization_is_cmd_removed() - to check if requested command is + * removed + * @psoc: pointer to soc + * @cmd: given command to remove + * @check_active_queue: flag to find out whether command needs to be removed + * from active queue or pending queue + * + * Return: true if removed else false + */ +static bool +wlan_serialization_is_cmd_removed(struct wlan_objmgr_psoc *psoc, + struct wlan_serialization_command *cmd, + bool check_active_queue) +{ + if (!psoc) { + serialization_err("Invalid psoc"); + return false; + } + + if (check_active_queue) + wlan_objmgr_iterate_obj_list(psoc, WLAN_PDEV_OP, + wlan_serialization_remove_cmd_from_active_queue, + cmd, 1, WLAN_SERIALIZATION_ID); + else + wlan_objmgr_iterate_obj_list(psoc, WLAN_PDEV_OP, + wlan_serialization_remove_cmd_from_pending_queue, + cmd, 1, WLAN_SERIALIZATION_ID); + + if (cmd->vdev == NULL) + return true; + + return false; +} + +enum wlan_serialization_cmd_status +wlan_serialization_dequeue_cmd(struct wlan_serialization_command *cmd, + uint8_t only_active_cmd) +{ + enum wlan_serialization_cmd_status status = WLAN_SER_CMD_NOT_FOUND; + struct wlan_objmgr_pdev *pdev; + struct wlan_objmgr_psoc *psoc; + struct wlan_serialization_pdev_priv_obj *ser_pdev_obj; + struct wlan_serialization_command cmd_backup; + enum wlan_serialization_cmd_type cmd_type; + bool is_cmd_removed; + + if (!cmd) { + serialization_err("NULL command"); + return status; + } + /* Dequeue process + * 1) peek through command structure and see what is the command type + * 2) two main types of commands to process + * a) SCAN + * b) NON-SCAN + * 3) for each command there are separate command queues per pdev + * 4) iterate through every pdev object and find the command and remove + */ + + pdev = wlan_serialization_get_pdev_from_cmd(cmd); + + if (pdev == NULL) { + serialization_err("invalid pdev"); + return status; + } + + psoc = wlan_pdev_get_psoc(pdev); + if (psoc == NULL) { + serialization_err("invalid psoc"); + return status; + } + + /* get priv object by wlan_objmgr_vdev_get_comp_private_obj */ + ser_pdev_obj = wlan_objmgr_pdev_get_comp_private_obj( + pdev, WLAN_UMAC_COMP_SERIALIZATION); + if (!ser_pdev_obj) { + serialization_err("ser_pdev_obj is empty"); + return status; + } + serialization_debug("command high_priority[%d] cmd_type[%d] cmd_id[%d]", + cmd->is_high_priority, cmd->cmd_type, cmd->cmd_id); + /* + * Pass the copy of command, instead of actual command because + * wlan_serialization_is_cmd_removed() api cleans the command + * buffer up on successful removal. We may need to use the command's + * content to stop the timer and etc. + */ + qdf_mem_copy(&cmd_backup, cmd, + sizeof(struct wlan_serialization_command)); + + cmd_type = cmd->cmd_type; + /* find and remove from active list */ + if (only_active_cmd) { + wlan_serialization_find_and_stop_timer(psoc, cmd); + is_cmd_removed = wlan_serialization_is_cmd_removed(psoc, + &cmd_backup, true); + if (true == is_cmd_removed) { + /* + * command is removed from active queue. now we have a + * room in active queue, so we will move from relevant + * pending queue to active queue + */ + wlan_serialization_move_pending_to_active(cmd_type, + ser_pdev_obj); + status = WLAN_SER_CMD_IN_ACTIVE_LIST; + } else { + serialization_err("cmd_type[%d], cmd_id[%d], vdev[%pK]", + cmd->cmd_type, cmd->cmd_id, cmd->vdev); + /* + * if you come here means there is a possibility + * that we couldn't find the command in active queue + * which user has requested to remove or we couldn't + * remove command from active queue and timer has been + * stopped, so active queue may possibly stuck. + */ + QDF_ASSERT(0); + status = WLAN_SER_CMD_NOT_FOUND; + } + serialization_debug("Request to remove only from active queue"); + return status; + } + qdf_mem_copy(&cmd_backup, cmd, + sizeof(struct wlan_serialization_command)); + /* find and remove from pending list */ + if (wlan_serialization_is_cmd_removed(psoc, &cmd_backup, false)) { + if (status != WLAN_SER_CMD_IN_ACTIVE_LIST) + status = WLAN_SER_CMD_IN_PENDING_LIST; + else + status = WLAN_SER_CMDS_IN_ALL_LISTS; + } + + return status; +} + +/** + * wlan_serialization_cmd_cancel_handler() - helper func to cancel cmd + * @ser_obj: private pdev ser obj + * @cmd: pointer to command + * @pdev: pointer to pdev + * @vdev: pointer to vdev + * @cmd_type: pointer to cmd_type + * + * This API will decide from which queue, command needs to be cancelled + * and pass that queue and other parameter required to cancel the command + * to helper function. + * + * Return: wlan_serialization_cmd_status + */ +static enum wlan_serialization_cmd_status +wlan_serialization_cmd_cancel_handler( + struct wlan_serialization_pdev_priv_obj *ser_obj, + struct wlan_serialization_command *cmd, + struct wlan_objmgr_pdev *pdev, struct wlan_objmgr_vdev *vdev, + enum wlan_serialization_cmd_type cmd_type) +{ + enum wlan_serialization_cmd_status status; + qdf_list_t *queue; + + if (!ser_obj) { + serialization_err("invalid serial object"); + return WLAN_SER_CMD_NOT_FOUND; + } + /* remove pending commands first */ + if (cmd_type < WLAN_SER_CMD_NONSCAN) + queue = &ser_obj->pending_scan_list; + else + queue = &ser_obj->pending_list; + /* try and remove first from pending list */ + status = wlan_serialization_remove_all_cmd_from_queue(queue, + ser_obj, pdev, vdev, cmd, false); + if (cmd_type < WLAN_SER_CMD_NONSCAN) + queue = &ser_obj->active_scan_list; + else + queue = &ser_obj->active_list; + /* try and remove next from active list */ + if (WLAN_SER_CMD_IN_ACTIVE_LIST == + wlan_serialization_remove_all_cmd_from_queue(queue, + ser_obj, pdev, vdev, cmd, true)) { + if (WLAN_SER_CMD_IN_PENDING_LIST == status) + status = WLAN_SER_CMDS_IN_ALL_LISTS; + else + status = WLAN_SER_CMD_IN_ACTIVE_LIST; + } + + return status; +} + +enum wlan_serialization_cmd_status +wlan_serialization_find_and_cancel_cmd( + struct wlan_serialization_queued_cmd_info *cmd_info) +{ + struct wlan_serialization_command cmd; + enum wlan_serialization_cmd_status status = WLAN_SER_CMD_NOT_FOUND; + struct wlan_serialization_pdev_priv_obj *ser_obj = NULL; + struct wlan_objmgr_pdev *pdev; + + if (!cmd_info) { + serialization_err("Invalid cmd_info"); + return WLAN_SER_CMD_NOT_FOUND; + } + cmd.cmd_id = cmd_info->cmd_id; + cmd.cmd_type = cmd_info->cmd_type; + cmd.vdev = cmd_info->vdev; + pdev = wlan_serialization_get_pdev_from_cmd(&cmd); + if (!pdev) { + serialization_err("Invalid pdev"); + return WLAN_SER_CMD_NOT_FOUND; + } + ser_obj = wlan_serialization_get_pdev_priv_obj(pdev); + if (!ser_obj) { + serialization_err("Invalid ser_obj"); + return WLAN_SER_CMD_NOT_FOUND; + } + + switch (cmd_info->req_type) { + case WLAN_SER_CANCEL_SINGLE_SCAN: + /* remove scan cmd which matches the given cmd struct */ + status = wlan_serialization_cmd_cancel_handler(ser_obj, + &cmd, NULL, NULL, cmd.cmd_type); + break; + case WLAN_SER_CANCEL_PDEV_SCANS: + /* remove all scan cmds which matches the pdev object */ + status = wlan_serialization_cmd_cancel_handler(ser_obj, + NULL, + wlan_vdev_get_pdev(cmd.vdev), + NULL, cmd.cmd_type); + break; + case WLAN_SER_CANCEL_VDEV_SCANS: + /* remove all scan cmds which matches the vdev object */ + status = wlan_serialization_cmd_cancel_handler(ser_obj, + NULL, NULL, + cmd.vdev, cmd.cmd_type); + break; + case WLAN_SER_CANCEL_NON_SCAN_CMD: + /* remove nonscan cmd which matches the given cmd */ + status = wlan_serialization_cmd_cancel_handler(ser_obj, + &cmd, NULL, NULL, cmd.cmd_type); + break; + default: + serialization_err("Invalid request"); + } + + return status; +} + +QDF_STATUS wlan_serialization_find_and_remove_cmd( + struct wlan_serialization_queued_cmd_info *cmd_info) +{ + struct wlan_serialization_command cmd; + + if (!cmd_info) { + serialization_err("Invalid cmd_info"); + return QDF_STATUS_E_FAILURE; + } + + cmd.cmd_id = cmd_info->cmd_id; + cmd.cmd_type = cmd_info->cmd_type; + cmd.vdev = cmd_info->vdev; + if (WLAN_SER_CMD_IN_ACTIVE_LIST != + wlan_serialization_dequeue_cmd(&cmd, true)) { + serialization_err("Can't dequeue requested cmd_id[%d] type[%d]", + cmd_info->cmd_id, cmd_info->cmd_type); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/serialization/src/wlan_serialization_enqueue.c b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/serialization/src/wlan_serialization_enqueue.c new file mode 100644 index 0000000000000000000000000000000000000000..0fe75915900e9ce61fc482dc90262d88a360d339 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/serialization/src/wlan_serialization_enqueue.c @@ -0,0 +1,282 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ +/** + * DOC: wlan_serialization_enqueue.c + * This file defines the routines which are pertinent + * to the queuing of commands. + */ +#include +#include "wlan_serialization_main_i.h" +#include "wlan_serialization_utils_i.h" +#include +#include +#include +#include + +static enum wlan_serialization_status +wlan_serialization_add_cmd_to_given_queue(qdf_list_t *queue, + struct wlan_serialization_command *cmd, + struct wlan_objmgr_psoc *psoc, + struct wlan_serialization_pdev_priv_obj *ser_pdev_obj, + uint8_t is_cmd_for_active_queue, + struct wlan_serialization_command_list **pcmd_list) +{ + struct wlan_serialization_command_list *cmd_list; + enum wlan_serialization_status status; + QDF_STATUS qdf_status; + qdf_list_node_t *nnode; + + if (!cmd || !queue || !ser_pdev_obj || !psoc) { + serialization_err("Input arguments are not valid"); + return WLAN_SER_CMD_DENIED_UNSPECIFIED; + } + serialization_debug("add cmd cmd_id-%d type-%d", + cmd->cmd_id, cmd->cmd_type); + + if ((cmd->cmd_type < WLAN_SER_CMD_NONSCAN) && + !wlan_serialization_is_scan_cmd_allowed(psoc, ser_pdev_obj)) { + serialization_err("Failed to add scan cmd id %d type %d, Scan cmd list is full", + cmd->cmd_id, cmd->cmd_type); + return WLAN_SER_CMD_DENIED_LIST_FULL; + } + if (wlan_serialization_list_empty(&ser_pdev_obj->global_cmd_pool_list, + ser_pdev_obj)) { + serialization_err("Failed to add cmd id %d type %d, Cmd list is full", + cmd->cmd_id, cmd->cmd_type); + return WLAN_SER_CMD_DENIED_LIST_FULL; + } + if (wlan_serialization_remove_front(&ser_pdev_obj->global_cmd_pool_list, + &nnode, ser_pdev_obj) != + QDF_STATUS_SUCCESS) { + serialization_err("Failed to get cmd buffer from pool for cmd id %d type %d", + cmd->cmd_id, cmd->cmd_type); + return WLAN_SER_CMD_DENIED_UNSPECIFIED; + } + cmd_list = qdf_container_of(nnode, + struct wlan_serialization_command_list, node); + qdf_mem_copy(&cmd_list->cmd, cmd, + sizeof(struct wlan_serialization_command)); + if (cmd->is_high_priority) + qdf_status = wlan_serialization_insert_front(queue, + &cmd_list->node, + ser_pdev_obj); + else + qdf_status = wlan_serialization_insert_back(queue, + &cmd_list->node, + ser_pdev_obj); + if (qdf_status != QDF_STATUS_SUCCESS) { + qdf_mem_zero(&cmd_list->cmd, + sizeof(struct wlan_serialization_command)); + qdf_status = wlan_serialization_insert_back( + &ser_pdev_obj->global_cmd_pool_list, + &cmd_list->node, + ser_pdev_obj); + if (QDF_STATUS_SUCCESS != qdf_status) { + serialization_err("can't put cmd back to global pool"); + QDF_ASSERT(0); + } + return WLAN_SER_CMD_DENIED_UNSPECIFIED; + } + qdf_atomic_set_bit(CMD_IS_ACTIVE, &cmd_list->cmd_in_use); + *pcmd_list = cmd_list; + if (is_cmd_for_active_queue) + status = WLAN_SER_CMD_ACTIVE; + else + status = WLAN_SER_CMD_PENDING; + + return status; +} + +void wlan_serialization_activate_cmd( + struct wlan_serialization_command_list *cmd_list, + struct wlan_serialization_pdev_priv_obj *ser_pdev_obj) +{ + QDF_STATUS qdf_status = QDF_STATUS_SUCCESS; + qdf_list_t *queue = NULL; + struct wlan_objmgr_psoc *psoc = NULL; + + if (!cmd_list) { + serialization_err("invalid cmd_list"); + QDF_ASSERT(0); + return; + } + if (cmd_list->cmd.cmd_type < WLAN_SER_CMD_NONSCAN) + queue = &ser_pdev_obj->active_scan_list; + else + queue = &ser_pdev_obj->active_list; + if (wlan_serialization_list_empty(queue, ser_pdev_obj)) { + serialization_err("nothing in active queue"); + QDF_ASSERT(0); + return; + } + if (!cmd_list->cmd.cmd_cb) { + serialization_err("no cmd_cb for cmd type:%d, id: %d", + cmd_list->cmd.cmd_type, + cmd_list->cmd.cmd_id); + QDF_ASSERT(0); + return; + } + + if (cmd_list->cmd.vdev) { + psoc = wlan_vdev_get_psoc(cmd_list->cmd.vdev); + if (psoc == NULL) { + serialization_err("invalid psoc"); + return; + } + } else { + serialization_err("invalid cmd.vdev"); + return; + } + /* + * command is already pushed to active queue above + * now start the timer and notify requestor + */ + wlan_serialization_find_and_start_timer(psoc, + &cmd_list->cmd); + /* + * Remember that serialization module may send + * this callback in same context through which it + * received the serialization request. Due to which + * it is caller's responsibility to ensure acquiring + * and releasing its own lock appropriately. + */ + qdf_status = cmd_list->cmd.cmd_cb(&cmd_list->cmd, + WLAN_SER_CB_ACTIVATE_CMD); + if (QDF_IS_STATUS_SUCCESS(qdf_status)) + return; + /* + * Since the command activation has not succeeded, + * remove the cmd from the active list and before + * doing so, try to mark the cmd for delete so that + * it is not accessed in other thread context for deletion + * again. + */ + if (wlan_serialization_is_cmd_present_in_active_queue( + psoc, &cmd_list->cmd)) { + wlan_serialization_find_and_stop_timer(psoc, + &cmd_list->cmd); + if (qdf_atomic_test_and_set_bit(CMD_MARKED_FOR_DELETE, + &cmd_list->cmd_in_use)) { + serialization_debug("SER_CMD already being deleted"); + } else { + serialization_debug("SER_CMD marked for removal"); + cmd_list->cmd.cmd_cb(&cmd_list->cmd, + WLAN_SER_CB_RELEASE_MEM_CMD); + wlan_serialization_put_back_to_global_list(queue, + ser_pdev_obj, + cmd_list); + } + } else { + serialization_err("active cmd :%d,id:%d is removed already", + cmd_list->cmd.cmd_type, + cmd_list->cmd.cmd_id); + } + wlan_serialization_move_pending_to_active( + cmd_list->cmd.cmd_type, + ser_pdev_obj); +} + +enum wlan_serialization_status +wlan_serialization_enqueue_cmd(struct wlan_serialization_command *cmd, + uint8_t is_cmd_for_active_queue, + struct wlan_serialization_command_list **pcmd_list) +{ + enum wlan_serialization_status status = WLAN_SER_CMD_DENIED_UNSPECIFIED; + struct wlan_objmgr_pdev *pdev; + struct wlan_objmgr_psoc *psoc; + struct wlan_serialization_pdev_priv_obj *ser_pdev_obj; + qdf_list_t *queue; + + /* Enqueue process + * 1) peek through command structure and see what is the command type + * 2) two main types of commands to process + * a) SCAN + * b) NON-SCAN + * 3) for each command there are separate command queues per pdev + * 4) pull pdev from vdev structure and get the command queue associated + * with that pdev and try to enqueue on those queue + * 5) Thumb rule: + * a) There could be only 1 active non-scan command at a + * time including all total non-scan commands of all pdevs. + * + * example: pdev1 has 1 non-scan active command and + * pdev2 got 1 non-scan command then that command should go to + * pdev2's pending queue + * + * b) There could be only N number of scan commands at a time + * including all total scan commands of all pdevs + * + * example: Let's say N=8, + * pdev1's vdev1 has 5 scan command, pdev2's vdev1 has 3 + * scan commands, if we get scan request on vdev2 then it will go + * to pending queue of vdev2 as we reached max allowed scan active + * command. + */ + if (!cmd) { + serialization_err("NULL command"); + return status; + } + if (!cmd->cmd_cb) { + serialization_err("no cmd_cb for cmd type:%d, id: %d", + cmd->cmd_type, + cmd->cmd_id); + return status; + } + pdev = wlan_serialization_get_pdev_from_cmd(cmd); + if (pdev == NULL) { + serialization_err("invalid pdev"); + return status; + } + + psoc = wlan_pdev_get_psoc(pdev); + if (psoc == NULL) { + serialization_err("invalid psoc"); + return status; + } + + /* get priv object by wlan_objmgr_vdev_get_comp_private_obj */ + ser_pdev_obj = wlan_objmgr_pdev_get_comp_private_obj( + pdev, WLAN_UMAC_COMP_SERIALIZATION); + if (!ser_pdev_obj) { + serialization_err("Can't find ser_pdev_obj"); + return status; + } + + serialization_debug("command high_priority[%d] cmd_type[%d] cmd_id[%d]", + cmd->is_high_priority, cmd->cmd_type, cmd->cmd_id); + if (cmd->cmd_type < WLAN_SER_CMD_NONSCAN) { + if (is_cmd_for_active_queue) + queue = &ser_pdev_obj->active_scan_list; + else + queue = &ser_pdev_obj->pending_scan_list; + } else { + if (is_cmd_for_active_queue) + queue = &ser_pdev_obj->active_list; + else + queue = &ser_pdev_obj->pending_list; + } + + if (wlan_serialization_is_cmd_present_queue(cmd, + is_cmd_for_active_queue)) { + serialization_err("duplicate command, can't enqueue"); + return status; + } + + return wlan_serialization_add_cmd_to_given_queue(queue, cmd, psoc, + ser_pdev_obj, is_cmd_for_active_queue, pcmd_list); +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/serialization/src/wlan_serialization_legacy_api.c b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/serialization/src/wlan_serialization_legacy_api.c new file mode 100644 index 0000000000000000000000000000000000000000..8fe7af6c8987a3df5298fcd2c369a214402b5312 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/serialization/src/wlan_serialization_legacy_api.c @@ -0,0 +1,400 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ +/** + * DOC: wlan_serialization_legacy_api.c + * This file provides prototypes of the routines needed for the + * legacy mcl serialization to utilize the services provided by the + * serialization component. + */ + +#include "wlan_serialization_legacy_api.h" +#include "wlan_serialization_main_i.h" +#include "wlan_serialization_utils_i.h" +#include "wlan_objmgr_vdev_obj.h" + +static struct wlan_objmgr_pdev *wlan_serialization_get_first_pdev( + struct wlan_objmgr_psoc *psoc) +{ + struct wlan_objmgr_pdev *pdev; + uint8_t i = 0; + + if (!psoc) { + serialization_err("invalid psoc"); + return NULL; + } + for (i = 0; i < WLAN_UMAC_MAX_PDEVS; i++) { + pdev = wlan_objmgr_get_pdev_by_id(psoc, i, + WLAN_SERIALIZATION_ID); + if (pdev != NULL) + break; + } + + return pdev; +} + +static struct wlan_serialization_pdev_priv_obj * +wlan_serialization_get_pdev_priv_obj_using_psoc(struct wlan_objmgr_psoc *psoc) +{ + struct wlan_objmgr_pdev *pdev = NULL; + struct wlan_serialization_pdev_priv_obj *ser_pdev_obj; + + if (!psoc) { + serialization_err("invalid psoc"); + return NULL; + } + + pdev = wlan_serialization_get_first_pdev(psoc); + if (!pdev) { + serialization_err("invalid pdev"); + return NULL; + } + + ser_pdev_obj = wlan_serialization_get_pdev_priv_obj(pdev); + wlan_objmgr_pdev_release_ref(pdev, WLAN_SERIALIZATION_ID); + if (!ser_pdev_obj) { + serialization_err("invalid ser_pdev_obj"); + return NULL; + } + + return ser_pdev_obj; +} + +uint32_t wlan_serialization_get_active_list_count( + struct wlan_objmgr_psoc *psoc, + uint8_t is_cmd_from_active_scan_queue) +{ + struct wlan_serialization_pdev_priv_obj *ser_pdev_obj; + qdf_list_t *queue; + uint32_t count; + + ser_pdev_obj = wlan_serialization_get_pdev_priv_obj_using_psoc(psoc); + if (!ser_pdev_obj) { + serialization_err("invalid ser_pdev_obj"); + return 0; + } + + wlan_serialization_acquire_lock(&ser_pdev_obj->pdev_ser_list_lock); + if (is_cmd_from_active_scan_queue) + queue = &ser_pdev_obj->active_scan_list; + else + queue = &ser_pdev_obj->active_list; + + count = qdf_list_size(queue); + wlan_serialization_release_lock(&ser_pdev_obj->pdev_ser_list_lock); + + return count; +} + +uint32_t wlan_serialization_get_pending_list_count( + struct wlan_objmgr_psoc *psoc, + uint8_t is_cmd_from_pending_scan_queue) +{ + struct wlan_serialization_pdev_priv_obj *ser_pdev_obj; + qdf_list_t *queue; + uint32_t count = 0; + + ser_pdev_obj = wlan_serialization_get_pdev_priv_obj_using_psoc(psoc); + if (!ser_pdev_obj) { + serialization_err("invalid ser_pdev_obj"); + return 0; + } + + wlan_serialization_acquire_lock(&ser_pdev_obj->pdev_ser_list_lock); + if (is_cmd_from_pending_scan_queue) + queue = &ser_pdev_obj->pending_scan_list; + else + queue = &ser_pdev_obj->pending_list; + + count = qdf_list_size(queue); + wlan_serialization_release_lock(&ser_pdev_obj->pdev_ser_list_lock); + + return count; +} + +struct wlan_serialization_command* +wlan_serialization_peek_head_active_cmd_using_psoc( + struct wlan_objmgr_psoc *psoc, + uint8_t is_cmd_from_active_scan_queue) +{ + struct wlan_serialization_pdev_priv_obj *ser_pdev_obj; + struct wlan_serialization_command_list *cmd_list = NULL; + struct wlan_serialization_command *cmd = NULL; + qdf_list_node_t *nnode = NULL; + qdf_list_t *queue; + + ser_pdev_obj = wlan_serialization_get_pdev_priv_obj_using_psoc(psoc); + if (!ser_pdev_obj) { + serialization_err("invalid ser_pdev_obj"); + return NULL; + } + + if (is_cmd_from_active_scan_queue) + queue = &ser_pdev_obj->active_scan_list; + else + queue = &ser_pdev_obj->active_list; + if (wlan_serialization_list_empty(queue, ser_pdev_obj)) { + serialization_err("Empty Queue"); + goto end; + } + + if (QDF_STATUS_SUCCESS != wlan_serialization_get_cmd_from_queue(queue, + &nnode, ser_pdev_obj)) { + serialization_err("Can't get command from queue"); + goto end; + } + + cmd_list = qdf_container_of(nnode, + struct wlan_serialization_command_list, node); + cmd = &cmd_list->cmd; + serialization_debug("cmd_type[%d], cmd_id[%d]", + cmd_list->cmd.cmd_type, cmd_list->cmd.cmd_id); + +end: + return cmd; +} + +struct wlan_serialization_command* +wlan_serialization_peek_head_pending_cmd_using_psoc( + struct wlan_objmgr_psoc *psoc, + uint8_t is_cmd_from_pending_scan_queue) +{ + struct wlan_serialization_pdev_priv_obj *ser_pdev_obj; + struct wlan_serialization_command_list *cmd_list = NULL; + struct wlan_serialization_command *cmd = NULL; + qdf_list_node_t *nnode = NULL; + qdf_list_t *queue; + + ser_pdev_obj = wlan_serialization_get_pdev_priv_obj_using_psoc(psoc); + if (!ser_pdev_obj) { + serialization_err("invalid ser_pdev_obj"); + return NULL; + } + if (is_cmd_from_pending_scan_queue) + queue = &ser_pdev_obj->pending_scan_list; + else + queue = &ser_pdev_obj->pending_list; + if (wlan_serialization_list_empty(queue, ser_pdev_obj)) { + serialization_err("Empty Queue"); + goto end; + } + + if (QDF_STATUS_SUCCESS != wlan_serialization_get_cmd_from_queue(queue, + &nnode, ser_pdev_obj)) { + serialization_err("Can't get command from queue"); + goto end; + } + cmd_list = qdf_container_of(nnode, + struct wlan_serialization_command_list, node); + cmd = &cmd_list->cmd; + serialization_debug("cmd_type[%d] cmd_id[%d]matched", + cmd_list->cmd.cmd_type, cmd_list->cmd.cmd_id); + +end: + return cmd; +} + +static struct wlan_serialization_command* +wlan_serialization_get_list_next_node(qdf_list_t *queue, + struct wlan_serialization_command *cmd, + struct wlan_serialization_pdev_priv_obj *ser_pdev_obj) +{ + struct wlan_serialization_command_list *cmd_list = NULL; + qdf_list_node_t *pnode = NULL, *nnode = NULL; + bool found = false; + uint32_t i = 0; + QDF_STATUS status; + struct wlan_serialization_command *ret_cmd = NULL; + + i = wlan_serialization_list_size(queue, ser_pdev_obj); + if (i == 0) { + serialization_err("Empty Queue"); + return NULL; + } + while (i--) { + if (!cmd_list) + status = wlan_serialization_peek_front(queue, &nnode, + ser_pdev_obj); + else + status = wlan_serialization_peek_next(queue, pnode, + &nnode, + ser_pdev_obj); + + if ((status != QDF_STATUS_SUCCESS) || found) + break; + + pnode = nnode; + cmd_list = qdf_container_of( + nnode, + struct wlan_serialization_command_list, + node); + if (wlan_serialization_match_cmd_id_type(nnode, cmd, + ser_pdev_obj) && + wlan_serialization_match_cmd_vdev(nnode, + cmd->vdev)) { + found = true; + } + nnode = NULL; + } + if (nnode && found) { + cmd_list = qdf_container_of(nnode, + struct wlan_serialization_command_list, node); + ret_cmd = &cmd_list->cmd; + } + if (!found) { + serialization_err("Can't locate next command"); + return NULL; + } + if (!nnode) { + serialization_debug("next node is empty, so fine"); + return NULL; + } + + return ret_cmd; +} + +struct wlan_serialization_command* +wlan_serialization_get_active_list_next_node_using_psoc( + struct wlan_objmgr_psoc *psoc, + struct wlan_serialization_command *prev_cmd, + uint8_t is_cmd_for_active_scan_queue) +{ + struct wlan_serialization_pdev_priv_obj *ser_pdev_obj; + qdf_list_t *queue; + + if (!prev_cmd) { + serialization_err("invalid prev_cmd"); + return NULL; + } + + ser_pdev_obj = wlan_serialization_get_pdev_priv_obj_using_psoc(psoc); + if (!ser_pdev_obj) { + serialization_err("invalid ser_pdev_obj"); + return NULL; + } + + if (is_cmd_for_active_scan_queue) + queue = &ser_pdev_obj->active_scan_list; + else + queue = &ser_pdev_obj->active_list; + + return wlan_serialization_get_list_next_node(queue, prev_cmd, + ser_pdev_obj); +} + +struct wlan_serialization_command* +wlan_serialization_get_pending_list_next_node_using_psoc( + struct wlan_objmgr_psoc *psoc, + struct wlan_serialization_command *prev_cmd, + uint8_t is_cmd_for_pending_scan_queue) +{ + struct wlan_serialization_pdev_priv_obj *ser_pdev_obj; + qdf_list_t *queue; + + if (!prev_cmd) { + serialization_err("invalid prev_cmd"); + return NULL; + } + + ser_pdev_obj = wlan_serialization_get_pdev_priv_obj_using_psoc(psoc); + if (!ser_pdev_obj) { + serialization_err("invalid ser_pdev_obj"); + return NULL; + } + if (is_cmd_for_pending_scan_queue) + queue = &ser_pdev_obj->pending_scan_list; + else + queue = &ser_pdev_obj->pending_list; + + return wlan_serialization_get_list_next_node(queue, prev_cmd, + ser_pdev_obj); +} + +void wlan_serialization_purge_cmd_list_by_vdev_id(struct wlan_objmgr_psoc *psoc, + uint8_t vdev_id, bool purge_scan_active_queue, + bool purge_scan_pending_queue, + bool purge_nonscan_active_queue, + bool purge_nonscan_pending_queue, + bool purge_all_queues) +{ + struct wlan_objmgr_vdev *vdev; + + vdev = wlan_objmgr_get_vdev_by_id_from_psoc(psoc, vdev_id, + WLAN_SERIALIZATION_ID); + if (!vdev) { + serialization_err("Invalid vdev"); + return; + } + wlan_serialization_purge_cmd_list(psoc, vdev, purge_scan_active_queue, + purge_scan_pending_queue, + purge_nonscan_active_queue, + purge_nonscan_pending_queue, + purge_all_queues); + wlan_objmgr_vdev_release_ref(vdev, WLAN_SERIALIZATION_ID); +} + +void wlan_serialization_purge_cmd_list(struct wlan_objmgr_psoc *psoc, + struct wlan_objmgr_vdev *vdev, + bool purge_scan_active_queue, + bool purge_scan_pending_queue, + bool purge_nonscan_active_queue, + bool purge_nonscan_pending_queue, + bool purge_all_queues) +{ + struct wlan_serialization_pdev_priv_obj *ser_pdev_obj; + struct wlan_objmgr_pdev *pdev = NULL; + + if (!psoc) { + serialization_err("Invalid psoc"); + return; + } + ser_pdev_obj = wlan_serialization_get_pdev_priv_obj_using_psoc(psoc); + if (!ser_pdev_obj) { + serialization_err("Invalid ser_pdev_obj"); + return; + } + + pdev = wlan_serialization_get_first_pdev(psoc); + if (!pdev) { + serialization_err("Invalid pdev"); + return; + } + + if (purge_all_queues || purge_scan_active_queue) { + wlan_serialization_remove_all_cmd_from_queue( + &ser_pdev_obj->active_scan_list, ser_pdev_obj, + pdev, vdev, NULL, true); + } + if (purge_all_queues || purge_scan_pending_queue) { + wlan_serialization_remove_all_cmd_from_queue( + &ser_pdev_obj->pending_scan_list, ser_pdev_obj, + pdev, vdev, NULL, false); + } + if (purge_all_queues || purge_nonscan_active_queue) { + wlan_serialization_remove_all_cmd_from_queue( + &ser_pdev_obj->active_list, ser_pdev_obj, + pdev, vdev, NULL, true); + } + if (purge_all_queues || purge_nonscan_pending_queue) { + wlan_serialization_remove_all_cmd_from_queue( + &ser_pdev_obj->pending_list, ser_pdev_obj, + pdev, vdev, NULL, false); + } + wlan_objmgr_pdev_release_ref(pdev, WLAN_SERIALIZATION_ID); + + return; +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/serialization/src/wlan_serialization_main.c b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/serialization/src/wlan_serialization_main.c new file mode 100644 index 0000000000000000000000000000000000000000..476e5d32d5d73cb78faa64a81376c4b12e941005 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/serialization/src/wlan_serialization_main.c @@ -0,0 +1,445 @@ +/* + * Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_serialization_main.c + * This file defines the important functions pertinent to + * serialization to initialize and de-initialize the + * component. + */ +#include "qdf_status.h" +#include "qdf_list.h" +#include "wlan_objmgr_cmn.h" +#include "wlan_objmgr_global_obj.h" +#include "wlan_objmgr_psoc_obj.h" +#include "wlan_serialization_main_i.h" +#include "wlan_serialization_rules_i.h" +#include "wlan_serialization_utils_i.h" + +QDF_STATUS wlan_serialization_psoc_disable(struct wlan_objmgr_psoc *psoc) +{ + QDF_STATUS status; + struct wlan_serialization_psoc_priv_obj *ser_soc_obj = + wlan_serialization_get_psoc_priv_obj(psoc); + + if (!ser_soc_obj) { + serialization_err("invalid ser_soc_obj"); + return QDF_STATUS_E_FAILURE; + } + + /* + * purge all serialization command if there are any pending to make + * sure memory and vdev ref are freed. + */ + wlan_serialization_purge_all_cmd(psoc); + /* clean up all timers before exiting */ + status = wlan_serialization_cleanup_all_timers(ser_soc_obj); + if (status != QDF_STATUS_SUCCESS) + serialization_err("ser cleanning up all timer failed"); + + qdf_mem_free(ser_soc_obj->timers); + ser_soc_obj->timers = NULL; + ser_soc_obj->max_active_cmds = 0; + + wlan_serialization_destroy_lock(&ser_soc_obj->timer_lock); + return status; +} + +QDF_STATUS wlan_serialization_psoc_enable(struct wlan_objmgr_psoc *psoc) +{ + uint8_t pdev_count; + struct wlan_serialization_psoc_priv_obj *ser_soc_obj = + wlan_serialization_get_psoc_priv_obj(psoc); + + if (!ser_soc_obj) { + serialization_err("invalid ser_soc_obj"); + return QDF_STATUS_E_FAILURE; + } + /* TODO:Get WLAN_SERIALIZATION_MAX_ACTIVE_SCAN_CMDS frm service ready */ + pdev_count = wlan_psoc_get_pdev_count(psoc); + ser_soc_obj->max_active_cmds = WLAN_SERIALIZATION_MAX_ACTIVE_SCAN_CMDS + + pdev_count; + + serialization_debug("max_active_cmds %d", ser_soc_obj->max_active_cmds); + ser_soc_obj->timers = + qdf_mem_malloc(sizeof(struct wlan_serialization_timer) * + ser_soc_obj->max_active_cmds); + if (NULL == ser_soc_obj->timers) { + serialization_alert("Mem alloc failed for ser timers"); + return QDF_STATUS_E_NOMEM; + } + + wlan_serialization_create_lock(&ser_soc_obj->timer_lock); + return QDF_STATUS_SUCCESS; +} + +/** + * wlan_serialization_psoc_obj_create_notification() - PSOC obj create callback + * @psoc: PSOC object + * @arg_list: Variable argument list + * + * This callback is registered with object manager during initialization and + * when obj manager gets its turn to create the object, it would notify each + * component with the corresponding callback registered to inform the + * completion of the creation of the respective object. + * + * Return: QDF Status + */ +static QDF_STATUS wlan_serialization_psoc_obj_create_notification( + struct wlan_objmgr_psoc *psoc, void *arg_list) +{ + struct wlan_serialization_psoc_priv_obj *soc_ser_obj; + QDF_STATUS status = QDF_STATUS_E_NOMEM; + + soc_ser_obj = + qdf_mem_malloc(sizeof(*soc_ser_obj)); + if (NULL == soc_ser_obj) { + serialization_alert("Mem alloc failed for ser psoc priv obj"); + return QDF_STATUS_E_NOMEM; + } + status = wlan_objmgr_psoc_component_obj_attach( + psoc, + WLAN_UMAC_COMP_SERIALIZATION, + soc_ser_obj, + QDF_STATUS_SUCCESS); + if (QDF_IS_STATUS_ERROR(status)) { + qdf_mem_free(soc_ser_obj); + serialization_err("Obj attach failed"); + return status; + } + serialization_debug("ser psoc obj created"); + + return QDF_STATUS_SUCCESS; +} + +/** + * wlan_serialization_destroy_cmd_pool() - Destroy the global cmd pool + * @ser_pdev_obj: Serialization private pdev object + * + * Return: None + */ +static void wlan_serialization_destroy_cmd_pool( + struct wlan_serialization_pdev_priv_obj *ser_pdev_obj) +{ + + qdf_list_node_t *node = NULL; + struct wlan_serialization_command_list *cmd_list; + + while (!qdf_list_empty(&ser_pdev_obj->global_cmd_pool_list)) { + qdf_list_remove_front(&ser_pdev_obj->global_cmd_pool_list, + &node); + cmd_list = (struct wlan_serialization_command_list *)node; + serialization_debug("Node being freed from global pool %pK", + cmd_list); + qdf_mem_free(cmd_list); + + } + qdf_list_destroy(&ser_pdev_obj->global_cmd_pool_list); +} + +/** + * wlan_serialization_create_cmd_pool() - Create the global cmd pool + * @pdev: PDEV Object + * @ser_pdev_obj: Serialization private pdev object + * + * Global command pool of memory is created here. + * It is safe to allocate memory individually for each command rather than + * requesting for a huge chunk of memory at once. + * + * The individual command nodes allocated above will keep moving between + * the active, pending and global pool lists dynamically, but all the + * memory will be freed during driver unload only. + * + * Return: QDF Status + */ +static QDF_STATUS +wlan_serialization_create_cmd_pool(struct wlan_objmgr_pdev *pdev, + struct wlan_serialization_pdev_priv_obj *ser_pdev_obj) +{ + struct wlan_serialization_command_list *cmd_list_ptr; + uint8_t i; + + qdf_list_create(&ser_pdev_obj->global_cmd_pool_list, + WLAN_SERIALIZATION_MAX_GLOBAL_POOL_CMDS); + for (i = 0; i < WLAN_SERIALIZATION_MAX_GLOBAL_POOL_CMDS; i++) { + cmd_list_ptr = qdf_mem_malloc(sizeof(*cmd_list_ptr)); + if (NULL == cmd_list_ptr) { + serialization_alert("Mem alloc failed for cmd node"); + wlan_serialization_destroy_cmd_pool(ser_pdev_obj); + return QDF_STATUS_E_NOMEM; + } + qdf_list_insert_back( + &ser_pdev_obj->global_cmd_pool_list, + &cmd_list_ptr->node); + cmd_list_ptr->cmd_in_use = 0; + serialization_debug("Created node at %pK and inserted to pool", + cmd_list_ptr); + } + + return QDF_STATUS_SUCCESS; +} + + +/** + * wlan_serialization_pdev_obj_create_notification() - PDEV obj create callback + * @pdev: PDEV object + * @arg_list: Variable argument list + * + * This callback is registered with object manager during initialization and + * when obj manager gets its turn to create the object, it would notify each + * component with the corresponding callback registered to inform the + * completion of the creation of the respective object. + * + * Return: QDF Status + */ +static QDF_STATUS wlan_serialization_pdev_obj_create_notification( + struct wlan_objmgr_pdev *pdev, void *arg_list) +{ + struct wlan_serialization_pdev_priv_obj *ser_pdev_obj; + QDF_STATUS status; + + ser_pdev_obj = + qdf_mem_malloc(sizeof(*ser_pdev_obj)); + if (NULL == ser_pdev_obj) { + serialization_alert("Mem alloc failed for ser pdev obj"); + return QDF_STATUS_E_NOMEM; + } + status = wlan_serialization_create_lock( + &ser_pdev_obj->pdev_ser_list_lock); + if (status != QDF_STATUS_SUCCESS) { + serialization_err("Failed to create serialization lock"); + goto err_mem_free; + } + qdf_list_create(&ser_pdev_obj->active_list, + WLAN_SERIALIZATION_MAX_ACTIVE_CMDS); + qdf_list_create(&ser_pdev_obj->pending_list, + WLAN_SERIALIZATION_MAX_GLOBAL_POOL_CMDS); + qdf_list_create(&ser_pdev_obj->active_scan_list, + WLAN_SERIALIZATION_MAX_ACTIVE_SCAN_CMDS); + qdf_list_create(&ser_pdev_obj->pending_scan_list, + WLAN_SERIALIZATION_MAX_GLOBAL_POOL_CMDS); + status = wlan_serialization_create_cmd_pool(pdev, ser_pdev_obj); + if (status != QDF_STATUS_SUCCESS) { + serialization_err("ser_pdev_obj failed status %d", status); + goto err_destroy_cmd_pool; + } + status = wlan_objmgr_pdev_component_obj_attach(pdev, + WLAN_UMAC_COMP_SERIALIZATION, ser_pdev_obj, + QDF_STATUS_SUCCESS); + if (status != QDF_STATUS_SUCCESS) { + serialization_err("serialization pdev obj attach failed"); + goto err_destroy_cmd_pool; + } + + return QDF_STATUS_SUCCESS; + +err_destroy_cmd_pool: + wlan_serialization_destroy_cmd_pool(ser_pdev_obj); + qdf_list_destroy(&ser_pdev_obj->pending_scan_list); + qdf_list_destroy(&ser_pdev_obj->active_scan_list); + qdf_list_destroy(&ser_pdev_obj->pending_list); + qdf_list_destroy(&ser_pdev_obj->active_list); + wlan_serialization_destroy_lock(&ser_pdev_obj->pdev_ser_list_lock); + +err_mem_free: + qdf_mem_free(ser_pdev_obj); + + return status; +} + +/** + * wlan_serialization_psoc_obj_destroy_notification() - PSOC obj delete callback + * @psoc: PSOC object + * @arg_list: Variable argument list + * + * This callback is registered with object manager during initialization and + * when obj manager gets its turn to delete the object, it would notify each + * component with the corresponding callback registered to inform the + * completion of the deletion of the respective object. + * + * Return: QDF Status + */ +static QDF_STATUS wlan_serialization_psoc_obj_destroy_notification( + struct wlan_objmgr_psoc *psoc, void *arg_list) +{ + QDF_STATUS status; + struct wlan_serialization_psoc_priv_obj *ser_soc_obj = + wlan_serialization_get_psoc_priv_obj(psoc); + + if (NULL == ser_soc_obj) { + serialization_err("invalid ser_soc_obj"); + return QDF_STATUS_E_FAULT; + } + status = wlan_objmgr_psoc_component_obj_detach(psoc, + WLAN_UMAC_COMP_SERIALIZATION, + ser_soc_obj); + if (status != QDF_STATUS_SUCCESS) + serialization_err("ser psoc private obj detach failed"); + serialization_debug("ser psoc obj deleted with status %d", status); + qdf_mem_free(ser_soc_obj); + + return status; +} + +/** + * wlan_serialization_pdev_obj_destroy_notification() - PDEV obj delete callback + * @pdev: PDEV object + * @arg_list: Variable argument list + * + * This callback is registered with object manager during initialization and + * when obj manager gets its turn to delete the object, it would notify each + * component with the corresponding callback registered to inform the + * completion of the deletion of the respective object. + * + * Return: QDF Status + */ +static QDF_STATUS wlan_serialization_pdev_obj_destroy_notification( + struct wlan_objmgr_pdev *pdev, void *arg_list) +{ + QDF_STATUS status; + struct wlan_serialization_pdev_priv_obj *ser_pdev_obj = + wlan_serialization_get_pdev_priv_obj(pdev); + + if (!ser_pdev_obj) { + serialization_err("invalid ser_pdev_obj"); + return QDF_STATUS_E_FAULT; + } + status = wlan_objmgr_pdev_component_obj_detach(pdev, + WLAN_UMAC_COMP_SERIALIZATION, ser_pdev_obj); + wlan_serialization_destroy_list(ser_pdev_obj, + &ser_pdev_obj->active_list); + wlan_serialization_destroy_list(ser_pdev_obj, + &ser_pdev_obj->pending_list); + wlan_serialization_destroy_list(ser_pdev_obj, + &ser_pdev_obj->active_scan_list); + wlan_serialization_destroy_list(ser_pdev_obj, + &ser_pdev_obj->pending_scan_list); + wlan_serialization_destroy_cmd_pool(ser_pdev_obj); + serialization_debug("ser pdev obj detached with status %d", status); + status = wlan_serialization_destroy_lock( + &ser_pdev_obj->pdev_ser_list_lock); + if (status != QDF_STATUS_SUCCESS) + serialization_err("Failed to destroy serialization lock"); + qdf_mem_free(ser_pdev_obj); + + return status; +} + +QDF_STATUS wlan_serialization_init(void) +{ + QDF_STATUS status = QDF_STATUS_SUCCESS; + + status = wlan_objmgr_register_psoc_create_handler( + WLAN_UMAC_COMP_SERIALIZATION, + wlan_serialization_psoc_obj_create_notification, NULL); + if (status != QDF_STATUS_SUCCESS) { + serialization_err("Failed to reg soc ser obj create handler"); + goto err_psoc_create; + } + + status = wlan_objmgr_register_psoc_destroy_handler( + WLAN_UMAC_COMP_SERIALIZATION, + wlan_serialization_psoc_obj_destroy_notification, NULL); + if (status != QDF_STATUS_SUCCESS) { + serialization_err("Failed to reg soc ser obj delete handler"); + goto err_psoc_delete; + } + + status = wlan_objmgr_register_pdev_create_handler( + WLAN_UMAC_COMP_SERIALIZATION, + wlan_serialization_pdev_obj_create_notification, NULL); + if (status != QDF_STATUS_SUCCESS) { + serialization_err("Failed to reg pdev ser obj create handler"); + goto err_pdev_create; + } + + status = wlan_objmgr_register_pdev_destroy_handler( + WLAN_UMAC_COMP_SERIALIZATION, + wlan_serialization_pdev_obj_destroy_notification, NULL); + if (status != QDF_STATUS_SUCCESS) { + serialization_err("Failed to reg pdev ser obj delete handler"); + goto err_pdev_delete; + } + + serialization_debug("serialization handlers registered with obj mgr"); + + return QDF_STATUS_SUCCESS; + +err_pdev_delete: + wlan_objmgr_unregister_pdev_create_handler(WLAN_UMAC_COMP_SERIALIZATION, + wlan_serialization_pdev_obj_create_notification, NULL); +err_pdev_create: + wlan_objmgr_unregister_psoc_destroy_handler( + WLAN_UMAC_COMP_SERIALIZATION, + wlan_serialization_psoc_obj_destroy_notification, NULL); +err_psoc_delete: + wlan_objmgr_unregister_psoc_create_handler(WLAN_UMAC_COMP_SERIALIZATION, + wlan_serialization_psoc_obj_create_notification, NULL); +err_psoc_create: + return status; + +} + +QDF_STATUS wlan_serialization_deinit(void) +{ + QDF_STATUS status; + QDF_STATUS ret_status = QDF_STATUS_SUCCESS; + + status = wlan_objmgr_unregister_psoc_create_handler( + WLAN_UMAC_COMP_SERIALIZATION, + wlan_serialization_psoc_obj_create_notification, + NULL); + if (status != QDF_STATUS_SUCCESS) { + serialization_err("unreg fail for psoc ser obj create notf:%d", + status); + ret_status = QDF_STATUS_E_FAILURE; + } + status = wlan_objmgr_unregister_psoc_destroy_handler( + WLAN_UMAC_COMP_SERIALIZATION, + wlan_serialization_psoc_obj_destroy_notification, + NULL); + if (status != QDF_STATUS_SUCCESS) { + serialization_err("unreg fail for psoc ser obj destroy notf:%d", + status); + ret_status = QDF_STATUS_E_FAILURE; + } + + status = wlan_objmgr_unregister_pdev_create_handler( + WLAN_UMAC_COMP_SERIALIZATION, + wlan_serialization_pdev_obj_create_notification, + NULL); + if (status != QDF_STATUS_SUCCESS) { + serialization_err("unreg fail for pdev ser obj create notf:%d", + status); + ret_status = QDF_STATUS_E_FAILURE; + } + + status = wlan_objmgr_unregister_pdev_destroy_handler( + WLAN_UMAC_COMP_SERIALIZATION, + wlan_serialization_pdev_obj_destroy_notification, + NULL); + if (status != QDF_STATUS_SUCCESS) { + serialization_err("unreg fail for pdev ser destroy notf:%d", + status); + ret_status = QDF_STATUS_E_FAILURE; + } + + serialization_alert("deregistered callbacks with obj mgr successfully"); + + return ret_status; +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/serialization/src/wlan_serialization_main_i.h b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/serialization/src/wlan_serialization_main_i.h new file mode 100644 index 0000000000000000000000000000000000000000..5a375acda0b24cd27750748c102e6e3f50d38049 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/serialization/src/wlan_serialization_main_i.h @@ -0,0 +1,68 @@ +/* + * Copyright (c) 2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ +/** + * DOC: wlan_serialization_main.h + * This file contains all the prototype definitions necessary for the + * serialization component's internal functions + */ +#ifndef __WLAN_SERIALIZATION_MAIN_I_H +#define __WLAN_SERIALIZATION_MAIN_I_H +/* Include files */ +#include "wlan_objmgr_cmn.h" +#include "wlan_objmgr_psoc_obj.h" +#include "wlan_objmgr_pdev_obj.h" +#include "qdf_mc_timer.h" + +#define WLAN_SERIALIZATION_MAX_GLOBAL_POOL_CMDS 24 +#define WLAN_SERIALIZATION_MAX_ACTIVE_CMDS 1 +#define WLAN_SERIALIZATION_MAX_ACTIVE_SCAN_CMDS 8 + +#define serialization_log(level, args...) \ + QDF_TRACE(QDF_MODULE_ID_SERIALIZATION, level, ## args) +#define serialization_logfl(level, format, args...) \ + serialization_log(level, FL(format), ## args) + +#define serialization_alert(format, args...) \ + serialization_logfl(QDF_TRACE_LEVEL_FATAL, format, ## args) +#define serialization_err(format, args...) \ + serialization_logfl(QDF_TRACE_LEVEL_ERROR, format, ## args) +#define serialization_warn(format, args...) \ + serialization_logfl(QDF_TRACE_LEVEL_WARN, format, ## args) +#define serialization_info(format, args...) \ + serialization_logfl(QDF_TRACE_LEVEL_INFO, format, ## args) +#define serialization_debug(format, args...) \ + serialization_logfl(QDF_TRACE_LEVEL_DEBUG, format, ## args) +#define serialization_enter() \ + serialization_logfl(QDF_TRACE_LEVEL_DEBUG, "enter") +#define serialization_exit() serialization_logfl(QDF_TRACE_LEVEL_DEBUG, "exit") + +/** + * struct serialization_legacy_callback - to handle legacy serialization cb + * + * @serialization_purge_cmd_list: function ptr to be filled by serialization + * module + * + * Some of the legacy modules wants to call API to purge the commands in + * order to handle backward compatibility. + */ +struct serialization_legacy_callback { + void (*serialization_purge_cmd_list) (struct wlan_objmgr_psoc *, + struct wlan_objmgr_vdev *, bool, bool, bool, bool, bool); +}; + +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/serialization/src/wlan_serialization_rules.c b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/serialization/src/wlan_serialization_rules.c new file mode 100644 index 0000000000000000000000000000000000000000..97f3024ba8db1f129f5f7ebb87b8f2a71d3f3ef2 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/serialization/src/wlan_serialization_rules.c @@ -0,0 +1,29 @@ +/* + * Copyright (c) 2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "wlan_serialization_rules_i.h" + +bool +wlan_serialization_apply_scan_rules( + union wlan_serialization_rules_info *info, uint8_t comp_id) +{ + switch (comp_id) { + default: + return false; + } +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/serialization/src/wlan_serialization_rules_i.h b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/serialization/src/wlan_serialization_rules_i.h new file mode 100644 index 0000000000000000000000000000000000000000..5035ef27eb04e2b3b863e4bd3b3c535a7587253a --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/serialization/src/wlan_serialization_rules_i.h @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ +/** + * DOC: wlan_serialization_rules_i.h + * This file defines the prototypes for the rules related data + * pertinent to the serialization component. + */ +#ifndef __WLAN_SERIALIZATION_RULES_I_H +#define __WLAN_SERIALIZATION_RULES_I_H + +#include +#include + +/** + * wlan_serialization_apply_scan_rules() - apply scan rules callback + * @info: rules info structure + * @comp_id: component Identifier + * + * This callback is registered with object manager during initialization and + * when serialization request is called by component, this callback handler + * applies rules depending on component. + * There will be many apply rules callback handlers in future + * + * Return: boolean + */ +bool +wlan_serialization_apply_scan_rules( + union wlan_serialization_rules_info *info, uint8_t comp_id); +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/serialization/src/wlan_serialization_utils.c b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/serialization/src/wlan_serialization_utils.c new file mode 100644 index 0000000000000000000000000000000000000000..3cf29951b3aed9af0c0b40b564b582a2bd1a8f8e --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/serialization/src/wlan_serialization_utils.c @@ -0,0 +1,1055 @@ +/* + * Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ +/** + * DOC: wlan_serialization_utils.c + * This file defines the utility helper functions for serialization component. + */ + +#include "wlan_serialization_utils_i.h" +#include "wlan_serialization_main_i.h" +#include "wlan_serialization_api.h" +#include "wlan_objmgr_vdev_obj.h" +#include "wlan_objmgr_pdev_obj.h" +#include "qdf_mc_timer.h" +#include "wlan_utility.h" +#include "wlan_scan_ucfg_api.h" +#ifdef CONFIG_MCL +#include "qdf_platform.h" +#endif + +QDF_STATUS +wlan_serialization_put_back_to_global_list(qdf_list_t *queue, + struct wlan_serialization_pdev_priv_obj *ser_pdev_obj, + struct wlan_serialization_command_list *cmd_list) +{ + QDF_STATUS status; + uint32_t cmd_id, cmd_type; + + if (!queue || !ser_pdev_obj || !cmd_list) { + serialization_err("input parameters are invalid"); + return QDF_STATUS_E_FAILURE; + } + /* + * if the command is already removed in other context, + * then it will be marked as inactive with the same + * below code. So, test before proceeding. + */ + if (!qdf_atomic_test_and_clear_bit(CMD_IS_ACTIVE, + &cmd_list->cmd_in_use)) { + serialization_debug("CMD is not active or being used"); + return QDF_STATUS_SUCCESS; + } + status = wlan_serialization_remove_node(queue, &cmd_list->node, + ser_pdev_obj); + if (QDF_STATUS_SUCCESS != status) { + serialization_err("can't remove cmd from queue"); + /* assert to catch any leaks */ + QDF_ASSERT(0); + return status; + } + cmd_id = cmd_list->cmd.cmd_id; + cmd_type = cmd_list->cmd.cmd_type; + qdf_mem_zero(&cmd_list->cmd, sizeof(struct wlan_serialization_command)); + status = wlan_serialization_insert_back( + &ser_pdev_obj->global_cmd_pool_list, + &cmd_list->node, ser_pdev_obj); + qdf_atomic_clear_bit(CMD_MARKED_FOR_DELETE, &cmd_list->cmd_in_use); + if (QDF_STATUS_SUCCESS != status) { + serialization_err("can't put command back to global pool"); + QDF_ASSERT(0); + } + serialization_debug("cmd_id-%d, cmd_type-%d", cmd_id, cmd_type); + + return status; +} + +struct wlan_objmgr_pdev* +wlan_serialization_get_pdev_from_cmd(struct wlan_serialization_command *cmd) +{ + struct wlan_objmgr_pdev *pdev = NULL; + + if (!cmd) { + serialization_err("invalid cmd"); + return pdev; + } + if (!cmd->vdev) { + serialization_err("invalid cmd->vdev"); + return pdev; + } + pdev = wlan_vdev_get_pdev(cmd->vdev); + + return pdev; +} + +QDF_STATUS wlan_serialization_get_cmd_from_queue(qdf_list_t *queue, + qdf_list_node_t **nnode, + struct wlan_serialization_pdev_priv_obj *ser_pdev_obj) +{ + QDF_STATUS status; + qdf_list_node_t *pnode; + + if (!queue || !ser_pdev_obj) { + serialization_err("input parameters are invalid"); + return QDF_STATUS_E_FAILURE; + } + + pnode = *nnode; + if (!pnode) + status = wlan_serialization_peek_front(queue, nnode, + ser_pdev_obj); + else + status = wlan_serialization_peek_next(queue, pnode, nnode, + ser_pdev_obj); + + if (status != QDF_STATUS_SUCCESS) { + serialization_err("can't get next node from queue"); + } + + return status; +} + +/** + * wlan_serialization_timer_destroy() - destroys the timer + * @ser_timer: pointer to particular timer + * + * This API destroys the memory allocated by timer and assigns cmd member of + * that timer structure to NULL + * + * Return: QDF_STATUS + */ +static QDF_STATUS wlan_serialization_timer_destroy( + struct wlan_serialization_timer *ser_timer) +{ + QDF_STATUS status = QDF_STATUS_E_FAILURE; + + if (!ser_timer || !ser_timer->cmd) { + serialization_debug("Invalid ser_timer"); + return status; + } + status = qdf_mc_timer_destroy(&ser_timer->timer); + if (QDF_IS_STATUS_ERROR(status)) { + serialization_err("Failed to destroy timer for cmd_id[%d]", + ser_timer->cmd->cmd_id); + return status; + } + ser_timer->cmd = NULL; + + return status; +} + +#ifdef CONFIG_MCL +static void wlan_serialization_non_scan_timeout_action(void) +{ + qdf_trigger_self_recovery(); +} +#else +static void wlan_serialization_non_scan_timeout_action(void) +{ + QDF_BUG(0); +} +#endif + +/** + * wlan_serialization_generic_timer_callback() - timer callback when timer fire + * @arg: argument that timer passes to this callback + * + * All the timers in serialization module calls this callback when they fire, + * and this API in turn calls command specific timeout callback and remove + * timed-out command from active queue and move any pending command to active + * queue of same cmd_type. + * + * Return: none + */ +static void wlan_serialization_generic_timer_callback(void *arg) +{ + struct wlan_serialization_timer *timer = arg; + struct wlan_serialization_command *cmd = timer->cmd; + uint8_t vdev_id = WLAN_INVALID_VDEV_ID; + + if (!cmd) { + serialization_err("command not found"); + QDF_ASSERT(0); + return; + } + + if (cmd->vdev) + vdev_id = wlan_vdev_get_id(cmd->vdev); + + serialization_err("active cmd timeout for cmd_type[%d] vdev_id[%d]", + cmd->cmd_type, vdev_id); + + if (cmd->cmd_cb) + cmd->cmd_cb(cmd, WLAN_SER_CB_ACTIVE_CMD_TIMEOUT); + + if (cmd->cmd_type >= WLAN_SER_CMD_NONSCAN) + wlan_serialization_non_scan_timeout_action(); + /* + * dequeue cmd API will cleanup and destroy the timer. If it fails to + * dequeue command then we have to destroy the timer. It will also call + * cmd callback with WLAN_SER_CB_RELEASE_MEM_CMD to free the memory. + */ + if (WLAN_SER_CMD_NOT_FOUND == wlan_serialization_dequeue_cmd(cmd, true)) + wlan_serialization_timer_destroy(timer); + if (cmd->cmd_cb) + cmd->cmd_cb(cmd, WLAN_SER_CB_RELEASE_MEM_CMD); +} + +/** + * wlan_serialization_stop_timer() - to stop particular timer + * @ser_timer: pointer to serialization timer + * + * This API stops the particular timer + * + * Return: QDF_STATUS + */ +static QDF_STATUS +wlan_serialization_stop_timer(struct wlan_serialization_timer *ser_timer) +{ + QDF_TIMER_STATE state; + QDF_STATUS status; + + state = qdf_mc_timer_get_current_state(&ser_timer->timer); + if (QDF_TIMER_STATE_RUNNING != state && + QDF_TIMER_STATE_STARTING != state) { + serialization_debug("nothing to stop"); + wlan_serialization_timer_destroy(ser_timer); + return QDF_STATUS_SUCCESS; + } + status = qdf_mc_timer_stop(&ser_timer->timer); + if (QDF_IS_STATUS_ERROR(status)) { + serialization_err("Failed to stop timer"); + return status; + } + wlan_serialization_timer_destroy(ser_timer); + status = QDF_STATUS_SUCCESS; + + return status; +} + +QDF_STATUS wlan_serialization_cleanup_all_timers( + struct wlan_serialization_psoc_priv_obj *psoc_ser_obj) +{ + struct wlan_serialization_timer *ser_timer; + QDF_STATUS status = QDF_STATUS_SUCCESS; + uint32_t i = 0; + + if (!psoc_ser_obj) { + serialization_err("Invalid psoc_ser_obj"); + return QDF_STATUS_E_FAILURE; + } + + wlan_serialization_acquire_lock(&psoc_ser_obj->timer_lock); + for (i = 0; psoc_ser_obj->max_active_cmds > i; i++) { + ser_timer = &psoc_ser_obj->timers[i]; + if (!ser_timer->cmd) + continue; + status = wlan_serialization_stop_timer(ser_timer); + if (QDF_STATUS_SUCCESS != status) { + /* lets not break the loop but report error */ + serialization_err("some error in stopping timer"); + } + } + wlan_serialization_release_lock(&psoc_ser_obj->timer_lock); + + return status; +} + +QDF_STATUS +wlan_serialization_find_and_stop_timer(struct wlan_objmgr_psoc *psoc, + struct wlan_serialization_command *cmd) +{ + struct wlan_serialization_psoc_priv_obj *psoc_ser_obj; + struct wlan_serialization_timer *ser_timer; + QDF_STATUS status = QDF_STATUS_E_FAILURE; + int i = 0; + + if (!psoc || !cmd) { + serialization_err("invalid param"); + return status; + } + + if ((cmd->cmd_timeout_duration == 0) && + (wlan_is_emulation_platform(wlan_psoc_get_nif_phy_version(psoc) + ))) { + serialization_err("[SCAN-EMULATION]: Not performing timer functions\n"); + return QDF_STATUS_SUCCESS; + } + + psoc_ser_obj = wlan_serialization_get_psoc_priv_obj(psoc); + wlan_serialization_acquire_lock(&psoc_ser_obj->timer_lock); + /* + * Here cmd_id and cmd_type are used to locate the timer being + * associated with command. For scan command, cmd_id is expected to + * be unique and For non-scan command, there should be only one active + * command per pdev + */ + for (i = 0; psoc_ser_obj->max_active_cmds > i; i++) { + ser_timer = &psoc_ser_obj->timers[i]; + if (!(ser_timer->cmd) || + (ser_timer->cmd->cmd_id != cmd->cmd_id) || + (ser_timer->cmd->cmd_type != cmd->cmd_type) || + (ser_timer->cmd->vdev != cmd->vdev)) + continue; + status = wlan_serialization_stop_timer(ser_timer); + status = QDF_STATUS_SUCCESS; + break; + } + wlan_serialization_release_lock(&psoc_ser_obj->timer_lock); + + if (QDF_IS_STATUS_SUCCESS(status)) + serialization_debug("Stopped timer for cmd_type %d cmd id %d", + cmd->cmd_type, cmd->cmd_id); + else + serialization_err("can't find timer for cmd_type %d cmd_id %d", + cmd->cmd_type, cmd->cmd_id); + return status; +} + +QDF_STATUS +wlan_serialization_find_and_start_timer(struct wlan_objmgr_psoc *psoc, + struct wlan_serialization_command *cmd) +{ + QDF_STATUS status = QDF_STATUS_E_FAILURE; + struct wlan_serialization_psoc_priv_obj *psoc_ser_obj; + struct wlan_serialization_timer *ser_timer; + int i = 0; + + if (!psoc || !cmd) { + serialization_err("invalid param"); + return status; + } + + if ((cmd->cmd_timeout_duration == 0) && + (wlan_is_emulation_platform(wlan_psoc_get_nif_phy_version(psoc) + ))) { + serialization_err("[SCAN-EMULATION]: Not performing timer functions\n"); + return QDF_STATUS_SUCCESS; + } + + + psoc_ser_obj = wlan_serialization_get_psoc_priv_obj(psoc); + + wlan_serialization_acquire_lock(&psoc_ser_obj->timer_lock); + for (i = 0; psoc_ser_obj->max_active_cmds > i; i++) { + /* Keep trying timer */ + ser_timer = &psoc_ser_obj->timers[i]; + if (ser_timer->cmd) + continue; + /* Remember timer is pointing to command */ + ser_timer->cmd = cmd; + status = QDF_STATUS_SUCCESS; + break; + } + wlan_serialization_release_lock(&psoc_ser_obj->timer_lock); + + if (QDF_IS_STATUS_SUCCESS(status)) { + status = qdf_mc_timer_init(&ser_timer->timer, QDF_TIMER_TYPE_SW, + wlan_serialization_generic_timer_callback, + ser_timer); + if (QDF_IS_STATUS_ERROR(status)) { + serialization_err("Failed to init timer cmdid [%d]", + cmd->cmd_id); + QDF_ASSERT(0); + return status; + } + status = qdf_mc_timer_start(&ser_timer->timer, + cmd->cmd_timeout_duration); + if (QDF_IS_STATUS_ERROR(status)) { + serialization_err("Failed to start timer cmdid [%d]", + cmd->cmd_id); + wlan_serialization_timer_destroy(ser_timer); + QDF_ASSERT(0); + return status; + } + serialization_debug("Started timer for cmd: type[%d] id[%d]", + cmd->cmd_type, cmd->cmd_id); + } else { + serialization_err("Failed to start timer for cmd: type[%d] id[%d]", + cmd->cmd_type, cmd->cmd_id); + } + + return status; +} + +/** + * wlan_serialization_active_scan_cmd_count_handler() - count active scan cmds + * @psoc: pointer to soc strucutre + * @obj : pointer to pdev object + * @arg: pointer to argument + * + * This API will be called while iterating each pdev object and it will count + * number of scan commands present in that pdev object's active queue. count + * will be updated in *arg + * + * Return: none + */ +static void +wlan_serialization_active_scan_cmd_count_handler(struct wlan_objmgr_psoc *psoc, + void *obj, void *arg) +{ + struct wlan_objmgr_pdev *pdev = obj; + struct wlan_serialization_pdev_priv_obj *ser_pdev_obj; + uint32_t *count = arg; + + if (!pdev) { + serialization_err("invalid pdev"); + return; + } + + ser_pdev_obj = wlan_objmgr_pdev_get_comp_private_obj( + pdev, WLAN_UMAC_COMP_SERIALIZATION); + *count += wlan_serialization_list_size(&ser_pdev_obj->active_scan_list, + ser_pdev_obj); +} + +/** + * wlan_serialization_is_active_scan_cmd_allowed() - find if scan cmd allowed + * @pdev: pointer to pdev object + * + * This API will be called to find out if active scan cmd is allowed. It has + * to iterate through all pdev to find out total number of active scan cmds. + * If total number of active scan cmds reach to allowed threshold then don't + * allow more scan cmd. + * + * Return: true or false + */ +static bool +wlan_serialization_is_active_scan_cmd_allowed(struct wlan_objmgr_pdev *pdev) +{ + uint32_t count = 0; + struct wlan_objmgr_psoc *psoc; + + if (!pdev) { + serialization_err("invalid pdev"); + return false; + } + + psoc = wlan_pdev_get_psoc(pdev); + + if (!psoc) { + serialization_err("invalid psoc"); + return false; + } + + wlan_objmgr_iterate_obj_list(psoc, WLAN_PDEV_OP, + wlan_serialization_active_scan_cmd_count_handler, + &count, 1, WLAN_SERIALIZATION_ID); + if (count < ucfg_scan_get_max_active_scans(psoc)) { + serialization_debug("count is [%d]", count); + return true; + } + + return false; +} + +/** + * wlan_serialization_is_active_nonscan_cmd_allowed() - find if cmd allowed + * @pdev: pointer to pdev object + * + * This API will be called to find out if non scan cmd is allowed. + * + * Return: true or false + */ +static bool +wlan_serialization_is_active_nonscan_cmd_allowed(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_serialization_pdev_priv_obj *ser_pdev_obj; + + if (!pdev) { + serialization_err("invalid pdev"); + return false; + } + + ser_pdev_obj = wlan_objmgr_pdev_get_comp_private_obj( + pdev, WLAN_UMAC_COMP_SERIALIZATION); + + if (!ser_pdev_obj) { + serialization_err("invalid ser_pdev_obj"); + return false; + } + + if (wlan_serialization_list_empty(&ser_pdev_obj->active_list, + ser_pdev_obj)) + return true; + + return false; +} + +bool +wlan_serialization_is_active_cmd_allowed(struct wlan_serialization_command *cmd) +{ + struct wlan_objmgr_pdev *pdev; + + pdev = wlan_serialization_get_pdev_from_cmd(cmd); + if (!pdev) { + serialization_err("NULL pdev"); + return false; + } + + if (cmd->cmd_type < WLAN_SER_CMD_NONSCAN) + return wlan_serialization_is_active_scan_cmd_allowed(pdev); + else + return wlan_serialization_is_active_nonscan_cmd_allowed(pdev); +} + +bool +wlan_serialization_is_scan_cmd_allowed(struct wlan_objmgr_psoc *psoc, + struct wlan_serialization_pdev_priv_obj + *ser_pdev_obj) +{ + uint32_t active_count, pending_count; + uint32_t max_scan_commands_allowed; + + if (!psoc || !ser_pdev_obj) { + serialization_err("invalid psoc or serialization object"); + return false; + } + + max_scan_commands_allowed = ucfg_scan_get_max_cmd_allowed(); + + active_count = + wlan_serialization_list_size(&ser_pdev_obj->active_scan_list, + ser_pdev_obj); + + pending_count = + wlan_serialization_list_size(&ser_pdev_obj->pending_scan_list, + ser_pdev_obj); + + if ((active_count + pending_count) >= max_scan_commands_allowed) { + serialization_debug("active scan cmds %d, pending scan cmds %d max allowed %d", + active_count, pending_count, + max_scan_commands_allowed); + return false; + } + + return true; +} + +QDF_STATUS wlan_serialization_validate_cmdtype( + enum wlan_serialization_cmd_type cmd_type) +{ + serialization_debug("validate cmd_type:%d", cmd_type); + + if (cmd_type < 0 || cmd_type >= WLAN_SER_CMD_MAX) { + serialization_err("Invalid cmd or comp passed"); + return QDF_STATUS_E_INVAL; + } + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_serialization_validate_cmd( + enum wlan_umac_comp_id comp_id, + enum wlan_serialization_cmd_type cmd_type) +{ + serialization_debug("validate cmd_type:%d, comp_id:%d", + cmd_type, comp_id); + if (cmd_type < 0 || comp_id < 0 || + cmd_type >= WLAN_SER_CMD_MAX || + comp_id >= WLAN_UMAC_COMP_ID_MAX) { + serialization_err("Invalid cmd or comp passed"); + return QDF_STATUS_E_INVAL; + } + + return QDF_STATUS_SUCCESS; +} + +static void wlan_serialization_release_list_cmds( + struct wlan_serialization_pdev_priv_obj *ser_pdev_obj, + qdf_list_t *list) +{ + qdf_list_node_t *node = NULL; + + while (!wlan_serialization_list_empty(list, ser_pdev_obj)) { + wlan_serialization_remove_front(list, &node, ser_pdev_obj); + wlan_serialization_insert_back( + &ser_pdev_obj->global_cmd_pool_list, + node, ser_pdev_obj); + } + + return; +} + +void wlan_serialization_destroy_list( + struct wlan_serialization_pdev_priv_obj *ser_pdev_obj, + qdf_list_t *list) +{ + wlan_serialization_release_list_cmds(ser_pdev_obj, list); + qdf_list_destroy(list); +} + +struct wlan_serialization_psoc_priv_obj *wlan_serialization_get_psoc_priv_obj( + struct wlan_objmgr_psoc *psoc) +{ + struct wlan_serialization_psoc_priv_obj *ser_soc_obj; + ser_soc_obj = wlan_objmgr_psoc_get_comp_private_obj(psoc, + WLAN_UMAC_COMP_SERIALIZATION); + + return ser_soc_obj; +} + +struct wlan_serialization_pdev_priv_obj *wlan_serialization_get_pdev_priv_obj( + struct wlan_objmgr_pdev *pdev) +{ + struct wlan_serialization_pdev_priv_obj *obj; + obj = wlan_objmgr_pdev_get_comp_private_obj(pdev, + WLAN_UMAC_COMP_SERIALIZATION); + + return obj; +} + +struct wlan_serialization_psoc_priv_obj * +wlan_serialization_get_psoc_obj(struct wlan_serialization_command *cmd) +{ + struct wlan_serialization_psoc_priv_obj *ser_soc_obj = NULL; + struct wlan_objmgr_psoc *psoc; + + if (!cmd->vdev) + return ser_soc_obj; + + psoc = wlan_vdev_get_psoc(cmd->vdev); + ser_soc_obj = wlan_serialization_get_psoc_priv_obj(psoc); + + return ser_soc_obj; +} + +bool wlan_serialization_is_cmd_in_vdev_list(struct wlan_objmgr_vdev *vdev, + qdf_list_t *queue) +{ + uint32_t queuelen; + qdf_list_node_t *nnode = NULL; + qdf_list_node_t *pnode; + struct wlan_objmgr_pdev *pdev = wlan_vdev_get_pdev(vdev); + struct wlan_serialization_pdev_priv_obj *ser_pdev_obj = + wlan_serialization_get_pdev_priv_obj(pdev); + QDF_STATUS status; + struct wlan_serialization_command_list *cmd_list = NULL; + bool match_found = false; + + wlan_serialization_acquire_lock(&ser_pdev_obj->pdev_ser_list_lock); + queuelen = qdf_list_size(queue); + if (!queuelen) { + wlan_serialization_release_lock( + &ser_pdev_obj->pdev_ser_list_lock); + serialization_debug("queue empty"); + return false; + } + + status = qdf_list_peek_front(queue, &nnode); + if (QDF_IS_STATUS_ERROR(status)) { + wlan_serialization_release_lock( + &ser_pdev_obj->pdev_ser_list_lock); + serialization_debug("failed to get first node"); + return false; + } + while (nnode) { + + cmd_list = qdf_container_of(nnode, + struct wlan_serialization_command_list, + node); + if (cmd_list->cmd.vdev == vdev) { + match_found = true; + break; + } + + pnode = nnode; + status = qdf_list_peek_next(queue, pnode, &nnode); + if (QDF_IS_STATUS_ERROR(status)) + break; + } + wlan_serialization_release_lock(&ser_pdev_obj->pdev_ser_list_lock); + + return match_found; +} + +bool wlan_serialization_is_cmd_in_pdev_list( + struct wlan_objmgr_pdev *pdev, + qdf_list_t *queue) +{ + uint32_t queuelen; + qdf_list_node_t *nnode = NULL; + struct wlan_serialization_pdev_priv_obj *ser_pdev_obj = + wlan_serialization_get_pdev_priv_obj(pdev); + QDF_STATUS status; + + queuelen = wlan_serialization_list_size(queue, ser_pdev_obj); + if (!queuelen) { + serialization_debug("queue empty"); + return false; + } + + while (queuelen--) { + status = wlan_serialization_get_cmd_from_queue(queue, &nnode, + ser_pdev_obj); + if (status != QDF_STATUS_SUCCESS) + break; + if (wlan_serialization_match_cmd_pdev(nnode, pdev)) + return true; + } + + return false; +} + +enum wlan_serialization_cmd_status +wlan_serialization_is_cmd_in_active_pending(bool cmd_in_active, + bool cmd_in_pending) +{ + if (cmd_in_active && cmd_in_pending) + return WLAN_SER_CMDS_IN_ALL_LISTS; + else if (cmd_in_active) + return WLAN_SER_CMD_IN_ACTIVE_LIST; + else if (cmd_in_pending) + return WLAN_SER_CMD_IN_PENDING_LIST; + else + return WLAN_SER_CMD_NOT_FOUND; +} + +static bool wlan_serialization_is_cmd_present_in_given_queue(qdf_list_t *queue, + struct wlan_serialization_command *cmd, + struct wlan_serialization_pdev_priv_obj *ser_pdev_obj) +{ + uint32_t qsize; + QDF_STATUS status; + struct wlan_serialization_command_list *cmd_list = NULL; + qdf_list_node_t *nnode = NULL, *pnode = NULL; + bool found = false; + + qsize = wlan_serialization_list_size(queue, ser_pdev_obj); + while (qsize--) { + if (!cmd_list) + status = wlan_serialization_peek_front(queue, &nnode, + ser_pdev_obj); + else + status = wlan_serialization_peek_next(queue, pnode, + &nnode, + ser_pdev_obj); + + if (status != QDF_STATUS_SUCCESS) + break; + + pnode = nnode; + cmd_list = qdf_container_of(nnode, + struct wlan_serialization_command_list, node); + if (wlan_serialization_match_cmd_id_type(nnode, cmd, + ser_pdev_obj) && + wlan_serialization_match_cmd_vdev(nnode, cmd->vdev)) { + found = true; + break; + } + nnode = NULL; + } + + return found; +} + +bool wlan_serialization_is_cmd_present_queue( + struct wlan_serialization_command *cmd, + uint8_t is_active_queue) +{ + qdf_list_t *queue; + struct wlan_objmgr_pdev *pdev; + struct wlan_serialization_pdev_priv_obj *ser_pdev_obj; + + if (!cmd) { + serialization_err("invalid params"); + return false; + } + pdev = wlan_serialization_get_pdev_from_cmd(cmd); + if (!pdev) { + serialization_err("invalid pdev"); + return false; + } + ser_pdev_obj = wlan_objmgr_pdev_get_comp_private_obj(pdev, + WLAN_UMAC_COMP_SERIALIZATION); + if (!ser_pdev_obj) { + serialization_err("invalid ser_pdev_obj"); + return false; + } + if (!is_active_queue) { + if (cmd->cmd_type < WLAN_SER_CMD_NONSCAN) + queue = &ser_pdev_obj->pending_scan_list; + else + queue = &ser_pdev_obj->pending_list; + } else { + if (cmd->cmd_type < WLAN_SER_CMD_NONSCAN) + queue = &ser_pdev_obj->active_scan_list; + else + queue = &ser_pdev_obj->active_list; + } + + return wlan_serialization_is_cmd_present_in_given_queue(queue, cmd, + ser_pdev_obj); +} + +bool wlan_serialization_list_empty( + qdf_list_t *queue, + struct wlan_serialization_pdev_priv_obj *ser_pdev_obj) +{ + bool is_empty; + + wlan_serialization_acquire_lock(&ser_pdev_obj->pdev_ser_list_lock); + if (qdf_list_empty(queue)) + is_empty = true; + else + is_empty = false; + wlan_serialization_release_lock(&ser_pdev_obj->pdev_ser_list_lock); + + return is_empty; +} + +uint32_t wlan_serialization_list_size( + qdf_list_t *queue, + struct wlan_serialization_pdev_priv_obj *ser_pdev_obj) +{ + uint32_t size; + + wlan_serialization_acquire_lock(&ser_pdev_obj->pdev_ser_list_lock); + size = qdf_list_size(queue); + wlan_serialization_release_lock(&ser_pdev_obj->pdev_ser_list_lock); + + return size; +} + +QDF_STATUS wlan_serialization_remove_front( + qdf_list_t *list, + qdf_list_node_t **node, + struct wlan_serialization_pdev_priv_obj *ser_pdev_obj) +{ + QDF_STATUS status; + + wlan_serialization_acquire_lock(&ser_pdev_obj->pdev_ser_list_lock); + status = qdf_list_remove_front(list, node); + wlan_serialization_release_lock(&ser_pdev_obj->pdev_ser_list_lock); + + return status; +} + +QDF_STATUS wlan_serialization_remove_node( + qdf_list_t *list, + qdf_list_node_t *node, + struct wlan_serialization_pdev_priv_obj *ser_pdev_obj) +{ + QDF_STATUS status; + + wlan_serialization_acquire_lock(&ser_pdev_obj->pdev_ser_list_lock); + status = qdf_list_remove_node(list, node); + wlan_serialization_release_lock(&ser_pdev_obj->pdev_ser_list_lock); + + return status; +} + +QDF_STATUS wlan_serialization_insert_front( + qdf_list_t *list, + qdf_list_node_t *node, + struct wlan_serialization_pdev_priv_obj *ser_pdev_obj) +{ + QDF_STATUS status; + + wlan_serialization_acquire_lock(&ser_pdev_obj->pdev_ser_list_lock); + status = qdf_list_insert_front(list, node); + wlan_serialization_release_lock(&ser_pdev_obj->pdev_ser_list_lock); + + return status; +} + +QDF_STATUS wlan_serialization_insert_back( + qdf_list_t *list, + qdf_list_node_t *node, + struct wlan_serialization_pdev_priv_obj *ser_pdev_obj) +{ + QDF_STATUS status; + + wlan_serialization_acquire_lock(&ser_pdev_obj->pdev_ser_list_lock); + status = qdf_list_insert_back(list, node); + wlan_serialization_release_lock(&ser_pdev_obj->pdev_ser_list_lock); + + return status; +} + +QDF_STATUS wlan_serialization_peek_front( + qdf_list_t *list, + qdf_list_node_t **node, + struct wlan_serialization_pdev_priv_obj *ser_pdev_obj) +{ + QDF_STATUS status; + + wlan_serialization_acquire_lock(&ser_pdev_obj->pdev_ser_list_lock); + status = qdf_list_peek_front(list, node); + wlan_serialization_release_lock(&ser_pdev_obj->pdev_ser_list_lock); + + return status; +} + +QDF_STATUS wlan_serialization_peek_next( + qdf_list_t *list, + qdf_list_node_t *node1, qdf_list_node_t **node2, + struct wlan_serialization_pdev_priv_obj *ser_pdev_obj) +{ + QDF_STATUS status; + + wlan_serialization_acquire_lock(&ser_pdev_obj->pdev_ser_list_lock); + status = qdf_list_peek_next(list, node1, node2); + wlan_serialization_release_lock(&ser_pdev_obj->pdev_ser_list_lock); + + return status; +} + +bool wlan_serialization_match_cmd_scan_id( + qdf_list_node_t *nnode, + struct wlan_serialization_command **cmd, + uint16_t scan_id, struct wlan_objmgr_vdev *vdev, + struct wlan_serialization_pdev_priv_obj *ser_pdev_obj) +{ + struct wlan_serialization_command_list *cmd_list = NULL; + bool match_found = false; + + wlan_serialization_acquire_lock(&ser_pdev_obj->pdev_ser_list_lock); + cmd_list = qdf_container_of(nnode, + struct wlan_serialization_command_list, + node); + if ((cmd_list->cmd.cmd_id == scan_id) && + (cmd_list->cmd.vdev == vdev)) { + *cmd = &cmd_list->cmd; + match_found = true; + }; + wlan_serialization_release_lock(&ser_pdev_obj->pdev_ser_list_lock); + + return match_found; +} + +bool wlan_serialization_match_cmd_id_type( + qdf_list_node_t *nnode, + struct wlan_serialization_command *cmd, + struct wlan_serialization_pdev_priv_obj *ser_pdev_obj) +{ + struct wlan_serialization_command_list *cmd_list = NULL; + bool match_found = true; + + if (!cmd) + return false; + wlan_serialization_acquire_lock(&ser_pdev_obj->pdev_ser_list_lock); + cmd_list = qdf_container_of(nnode, + struct wlan_serialization_command_list, + node); + if ((cmd_list->cmd.cmd_id != cmd->cmd_id) || + (cmd_list->cmd.cmd_type != cmd->cmd_type)) { + match_found = false; + }; + wlan_serialization_release_lock(&ser_pdev_obj->pdev_ser_list_lock); + + return match_found; +} + +bool wlan_serialization_match_cmd_vdev(qdf_list_node_t *nnode, + struct wlan_objmgr_vdev *vdev) +{ + struct wlan_serialization_command_list *cmd_list = NULL; + bool match_found = false; + struct wlan_objmgr_pdev *pdev = wlan_vdev_get_pdev(vdev); + struct wlan_serialization_pdev_priv_obj *ser_pdev_obj = + wlan_serialization_get_pdev_priv_obj(pdev); + + wlan_serialization_acquire_lock(&ser_pdev_obj->pdev_ser_list_lock); + cmd_list = qdf_container_of(nnode, + struct wlan_serialization_command_list, + node); + if (cmd_list->cmd.vdev == vdev) + match_found = true; + wlan_serialization_release_lock(&ser_pdev_obj->pdev_ser_list_lock); + + return match_found; +} + +bool wlan_serialization_match_cmd_pdev(qdf_list_node_t *nnode, + struct wlan_objmgr_pdev *pdev) +{ + struct wlan_serialization_command_list *cmd_list = NULL; + bool match_found = false; + struct wlan_serialization_pdev_priv_obj *ser_pdev_obj = + wlan_serialization_get_pdev_priv_obj(pdev); + struct wlan_objmgr_pdev *node_pdev = NULL; + + wlan_serialization_acquire_lock(&ser_pdev_obj->pdev_ser_list_lock); + cmd_list = qdf_container_of(nnode, + struct wlan_serialization_command_list, + node); + node_pdev = wlan_vdev_get_pdev(cmd_list->cmd.vdev); + if (node_pdev == pdev) + match_found = true; + wlan_serialization_release_lock(&ser_pdev_obj->pdev_ser_list_lock); + + return match_found; +} + +#ifdef WLAN_CMD_SERIALIZATION_LOCKING +QDF_STATUS +wlan_serialization_acquire_lock(qdf_spinlock_t *lock) +{ + qdf_spin_lock_bh(lock); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS +wlan_serialization_release_lock(qdf_spinlock_t *lock) +{ + qdf_spin_unlock_bh(lock); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS +wlan_serialization_create_lock(qdf_spinlock_t *lock) +{ + qdf_spinlock_create(lock); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS +wlan_serialization_destroy_lock(qdf_spinlock_t *lock) +{ + qdf_spinlock_destroy(lock); + + return QDF_STATUS_SUCCESS; +} +#else +QDF_STATUS +wlan_serialization_acquire_lock(qdf_spinlock_t *lock) +{ + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS +wlan_serialization_release_lock(qdf_spinlock_t *lock) +{ + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS +wlan_serialization_create_lock(qdf_spinlock_t *lock) +{ + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS +wlan_serialization_destroy_lock(qdf_spinlock_t *lock) +{ + return QDF_STATUS_SUCCESS; +} +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/serialization/src/wlan_serialization_utils_i.h b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/serialization/src/wlan_serialization_utils_i.h new file mode 100644 index 0000000000000000000000000000000000000000..65b02b118df9789acc75671f3bbd180e169aaa82 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/serialization/src/wlan_serialization_utils_i.h @@ -0,0 +1,635 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ +/** + * DOC: wlan_serialization_utils_i.h + * This file defines the prototypes for the utility helper functions + * for the serialization component. + */ +#ifndef __WLAN_SERIALIZATION_UTILS_I_H +#define __WLAN_SERIALIZATION_UTILS_I_H +/* Include files */ +#include "qdf_status.h" +#include "qdf_list.h" +#include "qdf_mc_timer.h" +#include "wlan_objmgr_cmn.h" +#include "wlan_objmgr_global_obj.h" +#include "wlan_objmgr_psoc_obj.h" +#include "wlan_serialization_rules_i.h" +#include "wlan_scan_ucfg_api.h" + +/* + * Below bit positions are used to identify if a + * serialization command is in use or marked for + * deletion. + * CMD_MARKED_FOR_DELETE - The command is about to be deleted + * CMD_IS_ACTIVE - The command is active and currently in use + */ +#define CMD_MARKED_FOR_DELETE 1 +#define CMD_IS_ACTIVE 2 +/** + * struct wlan_serialization_timer - Timer used for serialization + * @cmd: Cmd to which the timer is linked + * @timer: Timer associated with the command + * + * Timers are allocated statically during init, one each for the + * maximum active commands permitted in the system. Once a cmd is + * moved from pending list to active list, the timer is activated + * and once the cmd is completed, the timer is cancelled. Timer is + * also cancelled if the command is aborted + * + * The timers are maintained per psoc. A timer is associated to + * unique combination of pdev, cmd_type and cmd_id. + */ +struct wlan_serialization_timer { + struct wlan_serialization_command *cmd; + qdf_mc_timer_t timer; +}; + +/** + * struct wlan_serialization_command_list - List of commands to be serialized + * @node: Node identifier in the list + * @cmd: Command to be serialized + * @active: flag to check if the node/entry is logically active + */ +struct wlan_serialization_command_list { + qdf_list_node_t node; + struct wlan_serialization_command cmd; + unsigned long cmd_in_use; +}; + +/** + * struct wlan_serialization_pdev_priv_obj - pdev obj data for serialization + * @active_list: list to hold the non-scan commands currently being executed + * @pending_list list: to hold the non-scan commands currently pending + * @active_scan_list: list to hold the scan commands currently active + * @pending_scan_list: list to hold the scan commands currently pending + * @global_cmd_pool_list: list to hold the global buffers + * @pdev_ser_list_lock: A per pdev lock to protect the concurrent operations + * on the queues. + * + * Serialization component maintains linked lists to store the commands + * sent by other components to get serialized. All the lists are per + * pdev. The maximum number of active scans is determined by the firmware. + * There is only one non-scan active command per pdev at a time as per the + * current software architecture. cmd_ptr holds the memory allocated for + * each of the global cmd pool nodes and it is useful in freeing up these + * nodes when needed. + */ +struct wlan_serialization_pdev_priv_obj { + qdf_list_t active_list; + qdf_list_t pending_list; + qdf_list_t active_scan_list; + qdf_list_t pending_scan_list; + qdf_list_t global_cmd_pool_list; + qdf_spinlock_t pdev_ser_list_lock; +}; + +/** + * struct wlan_serialization_psoc_priv_obj - psoc obj data for serialization + * @wlan_serialization_module_state_cb - module level callback + * @wlan_serialization_apply_rules_cb - pointer to apply rules on the cmd + * @timers - Timers associated with the active commands + * @max_axtive_cmds - Maximum active commands allowed + * + * Serialization component takes a command as input and checks whether to + * allow/deny the command. It will use the module level callback registered + * by each component to fetch the information needed to apply the rules. + * Once the information is available, the rules callback registered for each + * command internally by serialization will be applied to determine the + * checkpoint for the command. If allowed, command will be put into active/ + * pending list and each active command is associated with a timer. + */ +struct wlan_serialization_psoc_priv_obj { + wlan_serialization_comp_info_cb comp_info_cb[ + WLAN_SER_CMD_MAX][WLAN_UMAC_COMP_ID_MAX]; + wlan_serialization_apply_rules_cb apply_rules_cb[WLAN_SER_CMD_MAX]; + struct wlan_serialization_timer *timers; + uint8_t max_active_cmds; + qdf_spinlock_t timer_lock; +}; + +/** + * wlan_serialization_put_back_to_global_list() - put back cmd in global pool + * @queue: queue from which cmd needs to be taken out + * @ser_pdev_obj: pdev private object + * @cmd_list: cmd which needs to be matched + * + * command will be taken off from the queue and will be put back to global + * pool of free command buffers. + * + * Return: QDF_STATUS + */ +QDF_STATUS +wlan_serialization_put_back_to_global_list(qdf_list_t *queue, + struct wlan_serialization_pdev_priv_obj *ser_pdev_obj, + struct wlan_serialization_command_list *cmd_list); +/** + * wlan_serialization_move_pending_to_active() - to move pending command to + * active queue + * @cmd_type: cmd type to device to which queue the command needs to go + * @ser_pdev_obj: pointer to ser_pdev_obj + * + * Return: none + */ +void wlan_serialization_move_pending_to_active( + enum wlan_serialization_cmd_type cmd_type, + struct wlan_serialization_pdev_priv_obj *ser_pdev_obj); +/** + * wlan_serialization_get_pdev_from_cmd() - get pdev from provided cmd + * @cmd: pointer to actual command + * + * This API will get the pointer to pdev through checking type of cmd + * + * Return: pointer to pdev + */ +struct wlan_objmgr_pdev* +wlan_serialization_get_pdev_from_cmd(struct wlan_serialization_command *cmd); + +/** + * wlan_serialization_get_cmd_from_queue() - to extract command from given queue + * @queue: pointer to queue + * @nnode: next node to extract + * @ser_pdev_obj: Serialization PDEV object pointer + * + * This API will try to extract node from queue which is next to prev node. If + * no previous node is given then take out the front node of the queue. + * + * Return: QDF_STATUS + */ +QDF_STATUS wlan_serialization_get_cmd_from_queue(qdf_list_t *queue, + qdf_list_node_t **nnode, + struct wlan_serialization_pdev_priv_obj *ser_pdev_obj); + +/** + * wlan_serialization_is_active_cmd_allowed() - check to see if command + * is allowed in active queue + * @pdev: pointer to pdev structure + * @cmd_type: type of command to check against + * + * Takes the command type and based on the type, it checks scan command queue + * or nonscan command queue to see if active command is allowed or no + * + * Return: true if allowed else false + */ +bool wlan_serialization_is_active_cmd_allowed( + struct wlan_serialization_command *cmd); + +/** + * wlan_serialization_is_scan_cmd_allowed() - check if the scan command is + * allowed to be queued + * @psoc: pointer to the PSOC object + * @ser_pdev_obj: pointer to the pdev serialization object + * + * This function checks if the total number of scan commands (active + pending) + * is less than the max number of scan commands allowed. + * + * Return: true if allowed else false + */ +bool +wlan_serialization_is_scan_cmd_allowed(struct wlan_objmgr_psoc *psoc, + struct wlan_serialization_pdev_priv_obj + *ser_pdev_obj); + +/** + * wlan_serialization_cleanup_all_timers() - to clean-up all timers + * + * @psoc_ser_ob: pointer to serialization psoc private object + * + * This API is to cleanup all the timers. it can be used when serialization + * module is exiting. it will make sure that if timer is running then it will + * stop and destroys the timer + * + * Return: QDF_STATUS + */ +QDF_STATUS wlan_serialization_cleanup_all_timers( + struct wlan_serialization_psoc_priv_obj *psoc_ser_ob); + +/** + * wlan_serialization_find_and_remove_cmd() - to find cmd from queue and remove + * @cmd_info: pointer to command related information + * + * This api will find command from active queue and removes the command + * + * Return: QDF_STATUS + */ +QDF_STATUS wlan_serialization_find_and_remove_cmd( + struct wlan_serialization_queued_cmd_info *cmd_info); + +/** + * wlan_serialization_find_and_cancel_cmd() - to find cmd from queue and cancel + * @cmd_info: pointer to command related information + * + * This api will find command from active queue and pending queue and + * removes the command. If it is in active queue then it will notifies the + * requester that it is in active queue and from there it expects requester + * to send remove command + * + * Return: wlan_serialization_cmd_status + */ +enum wlan_serialization_cmd_status +wlan_serialization_find_and_cancel_cmd( + struct wlan_serialization_queued_cmd_info *cmd_info); +/** + * wlan_serialization_enqueue_cmd() - Enqueue the cmd to pending/active Queue + * @cmd: Command information + * @is_cmd_for_active_queue: whether command is for active queue + * @cmd_list: command which needs to be inserted in active queue + * Return: Status of the serialization request + */ +enum wlan_serialization_status +wlan_serialization_enqueue_cmd( + struct wlan_serialization_command *cmd, + uint8_t is_cmd_for_active_queue, + struct wlan_serialization_command_list **pcmd_list); + +/** + * wlan_serialization_dequeue_cmd() - dequeue the cmd to pending/active Queue + * @cmd: Command information + * @is_cmd_for_active_queue: whether command is for active queue + * + * Return: Status of the serialization request + */ +enum wlan_serialization_cmd_status +wlan_serialization_dequeue_cmd(struct wlan_serialization_command *cmd, + uint8_t is_cmd_for_active_queue); +/** + * wlan_serialization_find_and_stop_timer() - to find and stop the timer + * @psoc: pointer to psoc + * @cmd: pointer to actual command + * + * find the timer associated with command, stop it and destroy it + * + * Return: QDF_STATUS + */ +QDF_STATUS +wlan_serialization_find_and_stop_timer(struct wlan_objmgr_psoc *psoc, + struct wlan_serialization_command *cmd); +/** + * wlan_serialization_find_and_stop_timer() - to find and start the timer + * @psoc: pointer to psoc + * @cmd: pointer to actual command + * + * find the free timer, initialize it, and start it + * + * Return: QDF_STATUS + */ +QDF_STATUS +wlan_serialization_find_and_start_timer(struct wlan_objmgr_psoc *psoc, + struct wlan_serialization_command *cmd); + +/** + * wlan_serialization_validate_cmd() - Validate the command + * @comp_id: Component ID + * @cmd_type: Command Type + * + * Return: QDF Status + */ +QDF_STATUS wlan_serialization_validate_cmd( + enum wlan_umac_comp_id comp_id, + enum wlan_serialization_cmd_type cmd_type); + +/** + * wlan_serialization_validate_cmdtype() - Validate the command type + * @cmd_type: Command Type + * + * Return: QDF Status + */ +QDF_STATUS wlan_serialization_validate_cmdtype( + enum wlan_serialization_cmd_type cmd_type); + + +/** + * wlan_serialization_destroy_list() - Release the cmds and destroy list + * @ser_pdev_obj: Serialization private pdev object + * @list: List to be destroyed + * + * Return: None + */ +void wlan_serialization_destroy_list( + struct wlan_serialization_pdev_priv_obj *ser_pdev_obj, + qdf_list_t *list); + +/** + * wlan_serialization_get_psoc_priv_obj() - Return the component private obj + * @psoc: Pointer to the PSOC object + * + * Return: Serialization component's PSOC level private data object + */ +struct wlan_serialization_psoc_priv_obj *wlan_serialization_get_psoc_priv_obj( + struct wlan_objmgr_psoc *psoc); + +/** + * wlan_serialization_get_pdev_priv_obj() - Return the component private obj + * @psoc: Pointer to the PDEV object + * + * Return: Serialization component's PDEV level private data object + */ +struct wlan_serialization_pdev_priv_obj *wlan_serialization_get_pdev_priv_obj( + struct wlan_objmgr_pdev *pdev); + +/** + * wlan_serialization_get_psoc_obj() - Return the component private obj + * @psoc: Pointer to the SERIALIZATION object + * + * Return: Serialization component's level private data object + */ +struct wlan_serialization_psoc_priv_obj * +wlan_serialization_get_psoc_obj(struct wlan_serialization_command *cmd); + +/** + * wlan_serialization_is_cmd_in_vdev_list() - Check Node present in VDEV list + * @vdev: Pointer to the VDEV object + * @queue: Pointer to the qdf_list_t + * + * Return: Boolean true or false + */ +bool +wlan_serialization_is_cmd_in_vdev_list( + struct wlan_objmgr_vdev *vdev, qdf_list_t *queue); + +/** + * wlan_serialization_is_cmd_in_pdev_list() - Check Node present in PDEV list + * @pdev: Pointer to the PDEV object + * @queue: Pointer to the qdf_list_t + * + * Return: Boolean true or false + */ +bool +wlan_serialization_is_cmd_in_pdev_list( + struct wlan_objmgr_pdev *pdev, qdf_list_t *queue); + +/** + * wlan_serialization_is_cmd_in_active_pending() - return cmd status + * active/pending queue + * @cmd_in_active: CMD in active list + * @cmd_in_pending: CMD in pending list + * + * Return: enum wlan_serialization_cmd_status + */ +enum wlan_serialization_cmd_status +wlan_serialization_is_cmd_in_active_pending(bool cmd_in_active, + bool cmd_in_pending); + +/** + * wlan_serialization_remove_all_cmd_from_queue() - Remove cmd which matches + * @queue: queue from where command needs to be removed + * @ser_pdev_obj: pointer to serialization object + * @pdev: pointer to pdev + * @vdev: pointer to vdev + * @cmd: pointer to cmd + * @is_active_queue: to check if command matching is for active queue + * + * This API will remove one or more commands which match the given parameters + * interms of argument. For example, if user request all commands to removed + * which matches "vdev" then iterate through all commands, find out and remove + * command which matches vdev object. + * + * Return: enum wlan_serialization_cmd_status + */ +enum wlan_serialization_cmd_status +wlan_serialization_remove_all_cmd_from_queue(qdf_list_t *queue, + struct wlan_serialization_pdev_priv_obj *ser_pdev_obj, + struct wlan_objmgr_pdev *pdev, struct wlan_objmgr_vdev *vdev, + struct wlan_serialization_command *cmd, + uint8_t is_active_queue); +/** + * wlan_serialization_is_cmd_present_queue() - Check if same command + * is already present active or pending queue + * @cmd: pointer to command which we need to find + * @is_active_queue: flag to find the command in active or pending queue + * + * This API will check the given command is already present in active or + * pending queue based on flag + * If present then return true otherwise false + * + * Return: true or false + */ +bool wlan_serialization_is_cmd_present_queue( + struct wlan_serialization_command *cmd, + uint8_t is_active_queue); + +/** + * wlan_serialization_activate_cmd() - activate cmd in active queue + * @cmd_list: Command needs to be activated + * @ser_pdev_obj: Serialization private pdev object + * + * Return: None + */ +void wlan_serialization_activate_cmd( + struct wlan_serialization_command_list *cmd_list, + struct wlan_serialization_pdev_priv_obj *ser_pdev_obj); + +/** + * wlan_serialization_list_empty() - check if the list is empty + * @queue: Queue/List that needs to be checked for emptiness + * @ser_pdev_obj: Serialization private pdev object + * + * Return: true if list is empty and false otherwise + */ +bool wlan_serialization_list_empty( + qdf_list_t *queue, + struct wlan_serialization_pdev_priv_obj *ser_pdev_obj); + +/** + * wlan_serialization_list_size() - Find the size of the provided queue + * @queue: Queue/List for which the size/length is to be returned + * @ser_pdev_obj: Serialization private pdev object + * + * Return: size/length of the queue/list + */ +uint32_t wlan_serialization_list_size( + qdf_list_t *queue, + struct wlan_serialization_pdev_priv_obj *ser_pdev_obj); +/** + * wlan_serialization_acquire_lock() - to acquire lock for serialization module + * @lock: lock that is to be acquired + * + * This API will acquire lock for serialization module. Mutex or spinlock will + * be decided based on the context of the operation. + * + * Return: QDF_STATUS based on outcome of the operation + */ +QDF_STATUS +wlan_serialization_acquire_lock(qdf_spinlock_t *lock); + +/** + * wlan_serialization_release_lock() - to release lock for serialization module + * @lock: lock that is to be released + * + * This API will release lock for serialization module. Mutex or spinlock will + * be decided based on the context of the operation. + * + * Return: QDF_STATUS based on outcome of the operation + */ +QDF_STATUS +wlan_serialization_release_lock(qdf_spinlock_t *lock); + +/** + * wlan_serialization_create_lock() - to create lock for serialization module + * @lock: lock that is to be created + * + * This API will create a lock for serialization module. + * + * Return: QDF_STATUS based on outcome of the operation + */ +QDF_STATUS +wlan_serialization_create_lock(qdf_spinlock_t *lock); + +/** + * wlan_serialization_destroy_lock() - to destroy lock for serialization module + * @lock: lock that is to be destroyed + * + * This API will destroy a lock for serialization module. + * + * Return: QDF_STATUS based on outcome of the operation + */ +QDF_STATUS +wlan_serialization_destroy_lock(qdf_spinlock_t *lock); + +/** + * wlan_serialization_match_cmd_scan_id() - Check for a match on given nnode + * @nnode: The node on which the matching has to be done + * @cmd: Command that needs to be filled if there is a match + * @scan_id: Scan ID to be matched + * @vdev: VDEV object to be matched + * @ser_pdev_obj: Serialization PDEV Object pointer. + * + * This API will check if the scan ID and VDEV of the given nnode are + * matching with the one's that are being passed to this function. + * + * Return: True if matched,false otherwise. + */ +bool wlan_serialization_match_cmd_scan_id( + qdf_list_node_t *nnode, + struct wlan_serialization_command **cmd, + uint16_t scan_id, struct wlan_objmgr_vdev *vdev, + struct wlan_serialization_pdev_priv_obj *ser_pdev_obj); +/** + * wlan_serialization_match_cmd_id_type() - Check for a match on given nnode + * @nnode: The node on which the matching has to be done + * @cmd: Command that needs to be matched + * @ser_pdev_obj: Serialization PDEV Object pointer. + * + * This API will check if the cmd ID and cmd type of the given nnode are + * matching with the one's that are being passed to this function. + * + * Return: True if matched,false otherwise. + */ +bool wlan_serialization_match_cmd_id_type( + qdf_list_node_t *nnode, + struct wlan_serialization_command *cmd, + struct wlan_serialization_pdev_priv_obj *ser_pdev_obj); +/** + * wlan_serialization_match_cmd_vdev() - Check for a match on given nnode + * @nnode: The node on which the matching has to be done + * @vdev: VDEV object that needs to be matched + * + * This API will check if the VDEV object of the given nnode are + * matching with the one's that are being passed to this function. + * + * Return: True if matched,false otherwise. + */ +bool wlan_serialization_match_cmd_vdev(qdf_list_node_t *nnode, + struct wlan_objmgr_vdev *vdev); +/** + * wlan_serialization_match_cmd_pdev() - Check for a match on given nnode + * @nnode: The node on which the matching has to be done + * @pdev: VDEV object that needs to be matched + * + * This API will check if the PDEV object of the given nnode are + * matching with the one's that are being passed to this function. + * + * Return: True if matched,false otherwise. + */ +bool wlan_serialization_match_cmd_pdev(qdf_list_node_t *nnode, + struct wlan_objmgr_pdev *pdev); +/** + * wlan_serialization_remove_front() - Remove the front node of the list + * @list: List from which the node is to be removed + * @node: Pointer to store the node that is removed + * @ser_pdev_obj: Serialization PDEV Object pointer + * + * Return: QDF_STATUS Success or Failure + */ +QDF_STATUS wlan_serialization_remove_front( + qdf_list_t *list, + qdf_list_node_t **node, + struct wlan_serialization_pdev_priv_obj *ser_pdev_obj); +/** + * wlan_serialization_remove_node() - Remove the given node from the list + * @list: List from which the node is to be removed + * @node: Pointer to the node that is to be removed + * @ser_pdev_obj: Serialization PDEV Object pointer + * + * Return: QDF_STATUS Success or Failure + */ +QDF_STATUS wlan_serialization_remove_node( + qdf_list_t *list, + qdf_list_node_t *node, + struct wlan_serialization_pdev_priv_obj *ser_pdev_obj); +/** + * wlan_serialization_insert_front() - Insert a node into the front of the list + * @list: List to which the node is to be inserted + * @node: Pointer to the node that is to be inserted + * @ser_pdev_obj: Serialization PDEV Object pointer + * + * Return: QDF_STATUS Success or Failure + */ +QDF_STATUS wlan_serialization_insert_front( + qdf_list_t *list, + qdf_list_node_t *node, + struct wlan_serialization_pdev_priv_obj *ser_pdev_obj); +/** + * wlan_serialization_insert_back() - Insert a node into the back of the list + * @list: List to which the node is to be inserted + * @node: Pointer to the node that is to be inserted + * @ser_pdev_obj: Serialization PDEV Object pointer + * + * Return: QDF_STATUS Success or Failure + */ +QDF_STATUS wlan_serialization_insert_back( + qdf_list_t *list, + qdf_list_node_t *node, + struct wlan_serialization_pdev_priv_obj *ser_pdev_obj); +/** + * wlan_serialization_peek_front() - Peek the front node of the list + * @list: List on which the node is to be peeked + * @node: Pointer to the store the node that is being peeked + * @ser_pdev_obj: Serialization PDEV Object pointer + * + * Return: QDF_STATUS Success or Failure + */ +QDF_STATUS wlan_serialization_peek_front( + qdf_list_t *list, + qdf_list_node_t **node, + struct wlan_serialization_pdev_priv_obj *ser_pdev_obj); +/** + * wlan_serialization_peek_next() - Peek the next node of the list + * @list: List on which the node is to be peeked + * @node1: Input node which is previous to the node to be peeked + * @node2: Pointer to the store the node that is being peeked + * @ser_pdev_obj: Serialization PDEV Object pointer + * + * Return: QDF_STATUS Success or Failure + */ +QDF_STATUS wlan_serialization_peek_next( + qdf_list_t *list, + qdf_list_node_t *node1, + qdf_list_node_t **node2, + struct wlan_serialization_pdev_priv_obj *ser_pdev_obj); +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/utils/inc/wlan_utility.h b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/utils/inc/wlan_utility.h new file mode 100644 index 0000000000000000000000000000000000000000..a702193a39987b2adc9d8959b3919a55d7350c73 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/utils/inc/wlan_utility.h @@ -0,0 +1,208 @@ +/* + * Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: Contains mandatory API from legacy + */ + +#ifndef _WLAN_UTILITY_H_ +#define _WLAN_UTILITY_H_ + +#include +#include +#include +#include + +/** + * struct wlan_find_vdev_filter - find vdev filter object. this can be extended + * @ifname: interface name of vdev + * @found_vdev: found vdev object matching one or more of above params + */ +struct wlan_find_vdev_filter { + char *ifname; + struct wlan_objmgr_vdev *found_vdev; +}; + +/** + * struct wlan_op_mode_peer_count- vdev connected peer count + * @opmode: QDF mode + * @peer_count: peer count + **/ +struct wlan_op_mode_peer_count { + enum QDF_OPMODE opmode; + uint16_t peer_count; +}; + +/** + * wlan_chan_to_freq() - converts channel to frequency + * @chan: channel number + * + * @return frequency of the channel + */ +uint32_t wlan_chan_to_freq(uint8_t chan); + +/** + * wlan_freq_to_chan() - converts frequency to channel + * @freq: frequency + * + * Return: channel of frequency + */ +uint8_t wlan_freq_to_chan(uint32_t freq); + +/** + * wlan_is_ie_valid() - Determine if an IE sequence is valid + * @ie: Pointer to the IE buffer + * @ie_len: Length of the IE buffer @ie + * + * This function validates that the IE sequence is valid by verifying + * that the sum of the lengths of the embedded elements match the + * length of the sequence. + * + * Note well that a 0-length IE sequence is considered valid. + * + * Return: true if the IE sequence is valid, false if it is invalid + */ +bool wlan_is_ie_valid(const uint8_t *ie, size_t ie_len); + +/** + * wlan_get_ie_ptr_from_eid() - Find out ie from eid + * @eid: element id + * @ie: source ie address + * @ie_len: source ie length + * + * Return: vendor ie address - success + * NULL - failure + */ +const uint8_t *wlan_get_ie_ptr_from_eid(uint8_t eid, + const uint8_t *ie, + int ie_len); + +/** + * wlan_get_vendor_ie_ptr_from_oui() - Find out vendor ie + * @oui: oui buffer + * @oui_size: oui size + * @ie: source ie address + * @ie_len: source ie length + * + * This function find out vendor ie by pass source ie and vendor oui. + * + * Return: vendor ie address - success + * NULL - failure + */ +const uint8_t *wlan_get_vendor_ie_ptr_from_oui(const uint8_t *oui, + uint8_t oui_size, + const uint8_t *ie, + uint16_t ie_len); + +/** + * wlan_get_ext_ie_ptr_from_ext_id() - Find out ext ie + * @oui: oui buffer + * @oui_size: oui size + * @ie: source ie address + * @ie_len: source ie length + * + * This function find out ext ie from ext id (passed oui) + * + * Return: vendor ie address - success + * NULL - failure + */ +const uint8_t *wlan_get_ext_ie_ptr_from_ext_id(const uint8_t *oui, + uint8_t oui_size, + const uint8_t *ie, + uint16_t ie_len); + +/** + * wlan_is_emulation_platform() - check if platform is emulation based + * @phy_version - psoc nif phy_version + * + * Return: boolean value based on platform type + */ +bool wlan_is_emulation_platform(uint32_t phy_version); + +/** + * wlan_get_pdev_id_from_vdev_id() - Helper func to derive pdev id from vdev_id + * @psoc : psoc object + * @vdev_id : vdev identifier + * @dbg_id : object manager debug id + * + * This function is used to derive the pdev id from vdev id for a psoc + * + * Return : pdev_id - +ve integer for success and WLAN_INVALID_PDEV_ID + * for failure + */ +uint32_t wlan_get_pdev_id_from_vdev_id(struct wlan_objmgr_psoc *psoc, + uint8_t vdev_id, + wlan_objmgr_ref_dbgid dbg_id); + +/** + * wlan_util_get_vdev_by_ifname() - function to return vdev object from psoc + * matching given interface name + * @psoc : psoc object + * @ifname : interface name + * @ref_id : object manager ref id + * + * This function returns vdev object from psoc by interface name. If found this + * will also take reference with given ref_id + * + * Return : vdev object if found, NULL otherwise + */ +struct wlan_objmgr_vdev *wlan_util_get_vdev_by_ifname( + struct wlan_objmgr_psoc *psoc, char *ifname, + wlan_objmgr_ref_dbgid ref_id); + +/** + * wlan_util_vdev_get_if_name() - get vdev's interface name + * @vdev: VDEV object + * + * API to get vdev's interface name + * + * Return: + * @id: vdev's interface name + */ +uint8_t *wlan_util_vdev_get_if_name(struct wlan_objmgr_vdev *vdev); + +/* + * wlan_vdev_is_up() - Check for vdev is in UP state + * @vdev: vdev pointer + * + * @Return: true in case of vdev is in UP state + */ +bool wlan_vdev_is_up(struct wlan_objmgr_vdev *vdev); + +/* + * wlan_util_is_vap_active() - Check for vap active + * @pdev: pdev pointer + * @dbg_id: debug id for ref counting + * + * @Return: QDF_STATUS_SUCCESS in case of vap active + */ +QDF_STATUS wlan_util_is_vap_active(struct wlan_objmgr_pdev *pdev, + wlan_objmgr_ref_dbgid dbg_id); + +/** + * wlan_util_get_peer_count_for_mode - This api gives vdev mode specific + * peer count` + * @pdev: PDEV object + * @mode: Operation mode. + * + * Return: int- peer count + */ +uint16_t wlan_util_get_peer_count_for_mode(struct wlan_objmgr_pdev *pdev, + enum QDF_OPMODE mode); + +#endif /* _WLAN_UTILITY_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/utils/src/wlan_utility.c b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/utils/src/wlan_utility.c new file mode 100644 index 0000000000000000000000000000000000000000..7914d86352367c572a2f648f444a2cf19e9b32f0 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/utils/src/wlan_utility.c @@ -0,0 +1,346 @@ +/* + * Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: This file contains definition for mandatory legacy API + */ + +#include "qdf_str.h" +#include "wlan_utility.h" +#include +#include "wlan_osif_priv.h" +#include +#include + +uint32_t wlan_chan_to_freq(uint8_t chan) +{ + /* ch 0 - ch 13 */ + if (chan < WLAN_24_GHZ_CHANNEL_14) + return WLAN_24_GHZ_BASE_FREQ + chan * WLAN_CHAN_SPACING_5MHZ; + else if (chan == WLAN_24_GHZ_CHANNEL_14) + return WLAN_CHAN_14_FREQ; + else if (chan < WLAN_24_GHZ_CHANNEL_27) + /* ch 15 - ch 26 */ + return WLAN_CHAN_15_FREQ + + (chan - WLAN_24_GHZ_CHANNEL_15) * WLAN_CHAN_SPACING_20MHZ; + else if (chan == WLAN_5_GHZ_CHANNEL_170) + return WLAN_CHAN_170_FREQ; + else + return WLAN_5_GHZ_BASE_FREQ + chan * WLAN_CHAN_SPACING_5MHZ; +} + +uint8_t wlan_freq_to_chan(uint32_t freq) +{ + uint8_t chan; + + if (freq > WLAN_24_GHZ_BASE_FREQ && freq < WLAN_CHAN_14_FREQ) + chan = ((freq - WLAN_24_GHZ_BASE_FREQ) / + WLAN_CHAN_SPACING_5MHZ); + else if (freq == WLAN_CHAN_14_FREQ) + chan = WLAN_24_GHZ_CHANNEL_14; + else if ((freq > WLAN_24_GHZ_BASE_FREQ) && + (freq < WLAN_5_GHZ_BASE_FREQ)) + chan = (((freq - WLAN_CHAN_15_FREQ) / + WLAN_CHAN_SPACING_20MHZ) + + WLAN_24_GHZ_CHANNEL_15); + else + chan = (freq - WLAN_5_GHZ_BASE_FREQ) / + WLAN_CHAN_SPACING_5MHZ; + + return chan; +} + +bool wlan_is_ie_valid(const uint8_t *ie, size_t ie_len) +{ + uint8_t elen; + + while (ie_len) { + if (ie_len < 2) + return false; + + elen = ie[1]; + ie_len -= 2; + ie += 2; + if (elen > ie_len) + return false; + + ie_len -= elen; + ie += elen; + } + + return true; +} + +static const uint8_t *wlan_get_ie_ptr_from_eid_n_oui(uint8_t eid, + const uint8_t *oui, + uint8_t oui_size, + const uint8_t *ie, + uint16_t ie_len) +{ + int32_t left = ie_len; + const uint8_t *ptr = ie; + uint8_t elem_id, elem_len; + + while (left >= 2) { + elem_id = ptr[0]; + elem_len = ptr[1]; + left -= 2; + + if (elem_len > left) + return NULL; + + if (eid == elem_id) { + /* if oui is not provide eid match is enough */ + if (!oui) + return ptr; + + /* + * if oui is provided and oui_size is more than left + * bytes, then we cannot have match + */ + if (oui_size > left) + return NULL; + + if (qdf_mem_cmp(&ptr[2], oui, oui_size) == 0) + return ptr; + } + + left -= elem_len; + ptr += (elem_len + 2); + } + + return NULL; +} + +const uint8_t *wlan_get_ie_ptr_from_eid(uint8_t eid, + const uint8_t *ie, + int ie_len) +{ + return wlan_get_ie_ptr_from_eid_n_oui(eid, NULL, 0, ie, ie_len); +} + +const uint8_t *wlan_get_vendor_ie_ptr_from_oui(const uint8_t *oui, + uint8_t oui_size, + const uint8_t *ie, + uint16_t ie_len) +{ + return wlan_get_ie_ptr_from_eid_n_oui(WLAN_MAC_EID_VENDOR, + oui, oui_size, ie, ie_len); +} + +const uint8_t *wlan_get_ext_ie_ptr_from_ext_id(const uint8_t *oui, + uint8_t oui_size, + const uint8_t *ie, + uint16_t ie_len) +{ + return wlan_get_ie_ptr_from_eid_n_oui(WLAN_MAC_EID_EXT, + oui, oui_size, ie, ie_len); +} + +bool wlan_is_emulation_platform(uint32_t phy_version) +{ + if ((phy_version == 0xABC0) || (phy_version == 0xABC1) || + (phy_version == 0xABC2) || (phy_version == 0xABC3) || + (phy_version == 0xFFFF) || (phy_version == 0xABCD)) + return true; + + return false; +} + +uint32_t wlan_get_pdev_id_from_vdev_id(struct wlan_objmgr_psoc *psoc, + uint8_t vdev_id, + wlan_objmgr_ref_dbgid dbg_id) +{ + struct wlan_objmgr_vdev *vdev; + struct wlan_objmgr_pdev *pdev = NULL; + uint32_t pdev_id = WLAN_INVALID_PDEV_ID; + + vdev = wlan_objmgr_get_vdev_by_id_from_psoc(psoc, + vdev_id, dbg_id); + + if (vdev) { + pdev = wlan_vdev_get_pdev(vdev); + if (pdev) + pdev_id = wlan_objmgr_pdev_get_pdev_id(pdev); + wlan_objmgr_vdev_release_ref(vdev, dbg_id); + } + + return pdev_id; +} +qdf_export_symbol(wlan_get_pdev_id_from_vdev_id); + +static void wlan_util_get_vdev_by_ifname_cb(struct wlan_objmgr_psoc *psoc, + void *obj, void *arg) +{ + struct wlan_objmgr_vdev *vdev = obj; + struct wlan_find_vdev_filter *filter = arg; + + if (filter->found_vdev) + return; + + wlan_vdev_obj_lock(vdev); + if (!qdf_str_cmp(vdev->vdev_nif.osdev->wdev->netdev->name, + filter->ifname)) { + filter->found_vdev = vdev; + } + wlan_vdev_obj_unlock(vdev); +} + +struct wlan_objmgr_vdev *wlan_util_get_vdev_by_ifname( + struct wlan_objmgr_psoc *psoc, char *ifname, + wlan_objmgr_ref_dbgid ref_id) +{ + QDF_STATUS status; + struct wlan_find_vdev_filter filter = {0}; + + filter.ifname = ifname; + wlan_objmgr_iterate_obj_list(psoc, WLAN_VDEV_OP, + wlan_util_get_vdev_by_ifname_cb, + &filter, 0, ref_id); + + if (!filter.found_vdev) + return NULL; + + status = wlan_objmgr_vdev_try_get_ref(filter.found_vdev, ref_id); + if (QDF_IS_STATUS_ERROR(status)) + return NULL; + + return filter.found_vdev; +} + +/** + * wlan_util_vdev_get_if_name() - get vdev's interface name + * @vdev: VDEV object + * + * API to get vdev's interface name + * + * Return: + * @id: vdev's interface name + */ +uint8_t *wlan_util_vdev_get_if_name(struct wlan_objmgr_vdev *vdev) +{ + uint8_t *name; + struct vdev_osif_priv *osif_priv; + + wlan_vdev_obj_lock(vdev); + + osif_priv = wlan_vdev_get_ospriv(vdev); + if (!osif_priv) { + wlan_vdev_obj_unlock(vdev); + return NULL; + } + + if (!osif_priv->wdev) { + wlan_vdev_obj_unlock(vdev); + return NULL; + } + + name = osif_priv->wdev->netdev->name; + wlan_vdev_obj_unlock(vdev); + + return name; +} +qdf_export_symbol(wlan_util_vdev_get_if_name); + +static void wlan_vap_active(struct wlan_objmgr_pdev *pdev, + void *object, + void *arg) +{ + struct wlan_objmgr_vdev *vdev = (struct wlan_objmgr_vdev *)object; + uint8_t *flag = (uint8_t *)arg; + + wlan_vdev_obj_lock(vdev); + if ((wlan_vdev_mlme_get_state(vdev) == WLAN_VDEV_S_RUN) || + (wlan_vdev_mlme_get_state(vdev) == WLAN_VDEV_S_DFS_WAIT)) { + *flag = 1; + } + wlan_vdev_obj_unlock(vdev); +} + +bool wlan_vdev_is_up(struct wlan_objmgr_vdev *vdev) +{ + bool ret_val = false; + + wlan_vdev_obj_lock(vdev); + if (wlan_vdev_mlme_get_state(vdev) == WLAN_VDEV_S_RUN) + ret_val = true; + + wlan_vdev_obj_unlock(vdev); + + return ret_val; +} + +QDF_STATUS wlan_util_is_vap_active(struct wlan_objmgr_pdev *pdev, + wlan_objmgr_ref_dbgid dbg_id) +{ + uint8_t flag = 0; + + if (!pdev) + return QDF_STATUS_E_INVAL; + + wlan_objmgr_pdev_iterate_obj_list(pdev, + WLAN_VDEV_OP, + wlan_vap_active, + &flag, 0, dbg_id); + + if (flag == 1) + return QDF_STATUS_SUCCESS; + + return QDF_STATUS_E_INVAL; +} + +/** + * wlan_util_get_mode_specific_peer_count - This api gives vdev mode specific + * peer count` + * @pdev: PDEV object + * @object: vdev object + * @arg: argument passed by caller + * + * Return: void + */ +static void +wlan_util_get_mode_specific_peer_count(struct wlan_objmgr_pdev *pdev, + void *object, void *arg) +{ + struct wlan_objmgr_vdev *vdev = object; + uint16_t temp_count = 0; + struct wlan_op_mode_peer_count *count = arg; + + wlan_vdev_obj_lock(vdev); + if (wlan_vdev_mlme_get_opmode(vdev) == count->opmode) { + temp_count = wlan_vdev_get_peer_count(vdev); + /* Decrement the self peer count */ + if (temp_count > 1) + count->peer_count += (temp_count - 1); + } + wlan_vdev_obj_unlock(vdev); +} + +uint16_t wlan_util_get_peer_count_for_mode(struct wlan_objmgr_pdev *pdev, + enum QDF_OPMODE mode) +{ + struct wlan_op_mode_peer_count count; + + count.opmode = mode; + count.peer_count = 0; + wlan_objmgr_pdev_iterate_obj_list(pdev, WLAN_VDEV_OP, + wlan_util_get_mode_specific_peer_count, + &count, 0, WLAN_OBJMGR_ID); + + return count.peer_count; +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/core/src/wlan_cp_stats_cmn_api_i.h b/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/core/src/wlan_cp_stats_cmn_api_i.h new file mode 100644 index 0000000000000000000000000000000000000000..207704ee4e44621ce7e050f3c892d698675b8d97 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/core/src/wlan_cp_stats_cmn_api_i.h @@ -0,0 +1,95 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_cp_stats_cmn_api_i.h + * + * This header filed declare APIs which have separate definition for both mc + * and ic + */ +#ifndef __WLAN_CP_STATS_CMN_API_I_H__ +#define __WLAN_CP_STATS_CMN_API_I_H__ +#ifdef QCA_SUPPORT_CP_STATS +#include "wlan_cp_stats_defs.h" + +/** + * wlan_cp_stats_psoc_cs_init() - common psoc obj initialization + * @psoc: pointer to psoc object + * + * Return: QDF_STATUS - Success or Failure + */ +QDF_STATUS wlan_cp_stats_psoc_cs_init(struct psoc_cp_stats *psoc_cs); + +/** + * wlan_cp_stats_psoc_cs_deinit() - common psoc obj deinitialization + * @psoc: pointer to psoc object + * + * Return: QDF_STATUS - Success or Failure + */ +QDF_STATUS wlan_cp_stats_psoc_cs_deinit(struct psoc_cp_stats *psoc_cs); + +/** + * wlan_cp_stats_pdev_cs_init() - common pdev obj initialization + * @pdev: pointer to pdev object + * + * Return: QDF_STATUS - Success or Failure + */ +QDF_STATUS wlan_cp_stats_pdev_cs_init(struct pdev_cp_stats *pdev_cs); + +/** + * wlan_cp_stats_pdev_cs_deinit() - common pdev obj deinitialization + * @pdev: pointer to pdev object + * + * Return: QDF_STATUS - Success or Failure + */ +QDF_STATUS wlan_cp_stats_pdev_cs_deinit(struct pdev_cp_stats *pdev_cs); + +/** + * wlan_cp_stats_vdev_cs_init() - common vdev obj initialization + * @vdev: pointer to vdev object + * + * Return: QDF_STATUS - Success or Failure + */ +QDF_STATUS wlan_cp_stats_vdev_cs_init(struct vdev_cp_stats *vdev_cs); + +/** + * wlan_cp_stats_vdev_cs_deinit() - common vdev obj deinitialization + * @vdev: pointer to vdev object + * + * Return: QDF_STATUS - Success or Failure + */ +QDF_STATUS wlan_cp_stats_vdev_cs_deinit(struct vdev_cp_stats *vdev_cs); + +/** + * wlan_cp_stats_peer_cs_init() - common peer obj initialization + * @peer: pointer to peer object + * + * Return: QDF_STATUS - Success or Failure + */ +QDF_STATUS wlan_cp_stats_peer_cs_init(struct peer_cp_stats *peer_cs); + +/** + * wlan_cp_stats_peer_cs_deinit() - common peer obj deinitialization + * @peer: pointer to peer object + * + * Return: QDF_STATUS - Success or Failure + */ +QDF_STATUS wlan_cp_stats_peer_cs_deinit(struct peer_cp_stats *peer_cs); + +#endif /* QCA_SUPPORT_CP_STATS */ +#endif /* __WLAN_CP_STATS_CMN_API_I_H__ */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/core/src/wlan_cp_stats_cmn_defs.h b/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/core/src/wlan_cp_stats_cmn_defs.h new file mode 100644 index 0000000000000000000000000000000000000000..0e9d2afee4092275acf4cb59f9637eb40ec94145 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/core/src/wlan_cp_stats_cmn_defs.h @@ -0,0 +1,29 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_cp_stats_cmn_defs.h + * + * This header file maintain definitions for cp stats structures which are + * common between win and mcl + */ + +#ifndef __WLAN_CP_STATS_CMN_DEFS_H__ +#define __WLAN_CP_STATS_CMN_DEFS_H__ + +#endif /* __WLAN_CP_STATS_CMN_DEFS_H__ */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/core/src/wlan_cp_stats_comp_handler.c b/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/core/src/wlan_cp_stats_comp_handler.c new file mode 100644 index 0000000000000000000000000000000000000000..e3fca004fea91b947b9a25138ce31b6dc7347cf9 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/core/src/wlan_cp_stats_comp_handler.c @@ -0,0 +1,232 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_cp_stats_comp_handler.c + * + * This file maintain definitions to APIs which handle attach/detach of other + * UMAC component specific cp stat object to cp stats + * + * Components calling configure API should alloc data structure while attaching + * dealloc while detaching, where as address for which to be deallocated will + * be passed back to component for data + */ +#include "wlan_cp_stats_comp_handler.h" +#include "wlan_cp_stats_defs.h" +#include +#include + +static QDF_STATUS +wlan_cp_stats_psoc_comp_obj_config +(struct wlan_objmgr_psoc *psoc, enum wlan_cp_stats_comp_id comp_id, + enum wlan_cp_stats_cfg_state cfg_state, void *data) +{ + struct psoc_cp_stats *psoc_cs; + + psoc_cs = wlan_cp_stats_get_psoc_stats_obj(psoc); + if (!psoc_cs) { + cp_stats_err("psoc cp stats object is null"); + return QDF_STATUS_E_INVAL; + } + + wlan_cp_stats_psoc_obj_lock(psoc_cs); + if (cfg_state == WLAN_CP_STATS_OBJ_ATTACH) { + if (psoc_cs->psoc_comp_priv_obj[comp_id]) { + wlan_cp_stats_psoc_obj_unlock(psoc_cs); + return QDF_STATUS_E_EXISTS; + } + psoc_cs->psoc_comp_priv_obj[comp_id] = data; + } else if (cfg_state == WLAN_CP_STATS_OBJ_DETACH) { + if (psoc_cs->psoc_comp_priv_obj[comp_id] != data) { + wlan_cp_stats_psoc_obj_unlock(psoc_cs); + return QDF_STATUS_E_INVAL; + } + data = psoc_cs->psoc_comp_priv_obj[comp_id]; + psoc_cs->psoc_comp_priv_obj[comp_id] = NULL; + } else if (cfg_state == WLAN_CP_STATS_OBJ_INVALID) { + cp_stats_err("Invalid cp stats cfg_state"); + wlan_cp_stats_psoc_obj_unlock(psoc_cs); + return QDF_STATUS_E_INVAL; + } + + wlan_cp_stats_psoc_obj_unlock(psoc_cs); + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS +wlan_cp_stats_pdev_comp_obj_config +(struct wlan_objmgr_pdev *pdev, enum wlan_cp_stats_comp_id comp_id, + enum wlan_cp_stats_cfg_state cfg_state, void *data) +{ + struct pdev_cp_stats *pdev_cs; + + pdev_cs = wlan_cp_stats_get_pdev_stats_obj(pdev); + if (!pdev_cs) { + cp_stats_err("pdev cp stats object is null"); + return QDF_STATUS_E_INVAL; + } + + wlan_cp_stats_pdev_obj_lock(pdev_cs); + if (cfg_state == WLAN_CP_STATS_OBJ_ATTACH) { + if (pdev_cs->pdev_comp_priv_obj[comp_id]) { + wlan_cp_stats_pdev_obj_unlock(pdev_cs); + return QDF_STATUS_E_EXISTS; + } + pdev_cs->pdev_comp_priv_obj[comp_id] = data; + } else if (cfg_state == WLAN_CP_STATS_OBJ_DETACH) { + if (pdev_cs->pdev_comp_priv_obj[comp_id] != data) { + wlan_cp_stats_pdev_obj_unlock(pdev_cs); + return QDF_STATUS_E_INVAL; + } + data = pdev_cs->pdev_comp_priv_obj[comp_id]; + pdev_cs->pdev_comp_priv_obj[comp_id] = NULL; + } else if (cfg_state == WLAN_CP_STATS_OBJ_INVALID) { + cp_stats_err("Invalid cp stats cfg_state"); + wlan_cp_stats_pdev_obj_unlock(pdev_cs); + return QDF_STATUS_E_INVAL; + } + + wlan_cp_stats_pdev_obj_unlock(pdev_cs); + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS +wlan_cp_stats_vdev_comp_obj_config +(struct wlan_objmgr_vdev *vdev, enum wlan_cp_stats_comp_id comp_id, + enum wlan_cp_stats_cfg_state cfg_state, void *data) +{ + struct vdev_cp_stats *vdev_cs; + + vdev_cs = wlan_cp_stats_get_vdev_stats_obj(vdev); + if (!vdev_cs) { + cp_stats_err("vdev cp stats object is null"); + return QDF_STATUS_E_INVAL; + } + + wlan_cp_stats_vdev_obj_lock(vdev_cs); + if (cfg_state == WLAN_CP_STATS_OBJ_ATTACH) { + if (vdev_cs->vdev_comp_priv_obj[comp_id]) { + wlan_cp_stats_vdev_obj_unlock(vdev_cs); + return QDF_STATUS_E_EXISTS; + } + vdev_cs->vdev_comp_priv_obj[comp_id] = data; + } else if (cfg_state == WLAN_CP_STATS_OBJ_DETACH) { + if (vdev_cs->vdev_comp_priv_obj[comp_id] != data) { + wlan_cp_stats_vdev_obj_unlock(vdev_cs); + return QDF_STATUS_E_INVAL; + } + data = vdev_cs->vdev_comp_priv_obj[comp_id]; + vdev_cs->vdev_comp_priv_obj[comp_id] = NULL; + } else if (cfg_state == WLAN_CP_STATS_OBJ_INVALID) { + cp_stats_err("Invalid cp stats cfg_state"); + wlan_cp_stats_vdev_obj_unlock(vdev_cs); + return QDF_STATUS_E_INVAL; + } + + wlan_cp_stats_vdev_obj_unlock(vdev_cs); + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS +wlan_cp_stats_peer_comp_obj_config +(struct wlan_objmgr_peer *peer, enum wlan_cp_stats_comp_id comp_id, + enum wlan_cp_stats_cfg_state cfg_state, void *data) +{ + struct peer_cp_stats *peer_cs; + + peer_cs = wlan_cp_stats_get_peer_stats_obj(peer); + if (!peer_cs) { + cp_stats_err("peer cp stats object is null"); + return QDF_STATUS_E_INVAL; + } + + wlan_cp_stats_peer_obj_lock(peer_cs); + if (cfg_state == WLAN_CP_STATS_OBJ_ATTACH) { + if (peer_cs->peer_comp_priv_obj[comp_id]) { + wlan_cp_stats_peer_obj_unlock(peer_cs); + return QDF_STATUS_E_EXISTS; + } + peer_cs->peer_comp_priv_obj[comp_id] = data; + } else if (cfg_state == WLAN_CP_STATS_OBJ_DETACH) { + if (peer_cs->peer_comp_priv_obj[comp_id] != data) { + wlan_cp_stats_peer_obj_unlock(peer_cs); + return QDF_STATUS_E_INVAL; + } + data = peer_cs->peer_comp_priv_obj[comp_id]; + peer_cs->peer_comp_priv_obj[comp_id] = NULL; + } else if (cfg_state == WLAN_CP_STATS_OBJ_INVALID) { + cp_stats_err("Invalid cp stats cfg_state"); + wlan_cp_stats_peer_obj_unlock(peer_cs); + return QDF_STATUS_E_INVAL; + } + + wlan_cp_stats_peer_obj_unlock(peer_cs); + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS +wlan_cp_stats_comp_obj_config(enum wlan_objmgr_obj_type obj_type, + enum wlan_cp_stats_cfg_state cfg_state, + enum wlan_cp_stats_comp_id comp_id, + void *cmn_obj, void *data) +{ + QDF_STATUS status; + + if (!cmn_obj) { + cp_stats_err("Common object is NULL"); + return QDF_STATUS_E_INVAL; + } + + /* component id is invalid */ + if (comp_id >= WLAN_CP_STATS_MAX_COMPONENTS) { + cp_stats_err("Invalid component Id"); + return QDF_STATUS_MAXCOMP_FAIL; + } + + switch (obj_type) { + case WLAN_PSOC_OP: + status = + wlan_cp_stats_psoc_comp_obj_config( + (struct wlan_objmgr_psoc *)cmn_obj, + comp_id, cfg_state, data); + break; + case WLAN_PDEV_OP: + status = + wlan_cp_stats_pdev_comp_obj_config( + (struct wlan_objmgr_pdev *)cmn_obj, + comp_id, cfg_state, data); + break; + case WLAN_VDEV_OP: + status = + wlan_cp_stats_vdev_comp_obj_config( + (struct wlan_objmgr_vdev *)cmn_obj, + comp_id, cfg_state, data); + break; + case WLAN_PEER_OP: + status = + wlan_cp_stats_peer_comp_obj_config( + (struct wlan_objmgr_peer *)cmn_obj, + comp_id, cfg_state, data); + break; + default: + cp_stats_err("Invalid common object"); + return QDF_STATUS_E_INVAL; + } + + return status; +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/core/src/wlan_cp_stats_comp_handler.h b/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/core/src/wlan_cp_stats_comp_handler.h new file mode 100644 index 0000000000000000000000000000000000000000..02a63f53992499aee70831682a53e9a357653a7e --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/core/src/wlan_cp_stats_comp_handler.h @@ -0,0 +1,52 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_cp_stats_comp_handler.h + * + * This header file API declarations required to attach/detach and + * enable/disable other UMAC component specific control plane statitics + * to cp stats component object + */ + +#ifndef __WLAN_CP_STATS_COMP_HANDLER_H__ +#define __WLAN_CP_STATS_COMP_HANDLER_H__ + +#ifdef QCA_SUPPORT_CP_STATS +#include "wlan_cp_stats_defs.h" + +/** + * wlan_cp_stats_comp_obj_config() - attach/detach component specific stats + * callback function + * @obj_type: common object type + * @cfg_state: config state either to attach of detach + * @comp_id: cpstats component id + * @cmn_obj: pointer to common object + * @comp_priv_obj: pointer to component specific cp stats object + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +QDF_STATUS wlan_cp_stats_comp_obj_config( + enum wlan_objmgr_obj_type obj_type, + enum wlan_cp_stats_cfg_state cfg_state, + enum wlan_cp_stats_comp_id comp_id, + void *cmn_obj, + void *comp_priv_obj); + +#endif /* QCA_SUPPORT_CP_STATS */ +#endif /* __WLAN_CP_STATS_COMP_HANDLER_H__ */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/core/src/wlan_cp_stats_defs.h b/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/core/src/wlan_cp_stats_defs.h new file mode 100644 index 0000000000000000000000000000000000000000..f57f285f0301da937d0c06f7e1d307ca50dc12b5 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/core/src/wlan_cp_stats_defs.h @@ -0,0 +1,450 @@ +/* + * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_cp_stats_defs.h + * + * This header file maintains core definitions of control plane statistics + * component + */ + +#ifndef __WLAN_CP_STATS_DEFS_H__ +#define __WLAN_CP_STATS_DEFS_H__ + +#ifdef QCA_SUPPORT_CP_STATS +#include +#include +#include +#include +#include +#include +#include "wlan_cp_stats_cmn_defs.h" +#include + +/* noise floor */ +#define CP_STATS_TGT_NOISE_FLOOR_DBM (-96) + +/** + * struct psoc_cp_stats - defines cp stats at psoc object + * @psoc_obj: pointer to psoc + * @psoc_comp_priv_obj[]: component's private object pointers + * @psoc_cp_stats_lock: lock to protect object + * @cmn_stats: stats common for AP and STA devices + * @obj_stats: stats specific to AP or STA devices + * @legacy_stats_cb: callback to update the stats received from FW in + * asynchronous events. + */ +struct psoc_cp_stats { + struct wlan_objmgr_psoc *psoc_obj; + void *psoc_comp_priv_obj[WLAN_CP_STATS_MAX_COMPONENTS]; + qdf_spinlock_t psoc_cp_stats_lock; + struct psoc_cmn_cp_stats *cmn_stats; + void *obj_stats; + void (*legacy_stats_cb)(void *stats); +}; + +/** + * struct pdev_cp_stats - defines cp stats at pdev object + * @pdev_obj: pointer to pdev + * @pdev_stats: pointer to ic/mc specific stats + * @pdev_comp_priv_obj[]: component's private object pointers + * @pdev_cp_stats_lock: lock to protect object + */ +struct pdev_cp_stats { + struct wlan_objmgr_pdev *pdev_obj; + void *pdev_stats; + void *pdev_comp_priv_obj[WLAN_CP_STATS_MAX_COMPONENTS]; + qdf_spinlock_t pdev_cp_stats_lock; +}; + +/** + * struct vdev_cp_stats - defines cp stats at vdev object + * @vdev_obj: pointer to vdev + * @vdev_stats: pointer to ic/mc specific stats + * @vdev_comp_priv_obj[]: component's private object pointers + * @vdev_cp_stats_lock: lock to protect object + */ +struct vdev_cp_stats { + struct wlan_objmgr_vdev *vdev_obj; + void *vdev_stats; + void *vdev_comp_priv_obj[WLAN_CP_STATS_MAX_COMPONENTS]; + qdf_spinlock_t vdev_cp_stats_lock; +}; + +/** + * struct peer_cp_stats - defines cp stats at peer object + * @peer_obj: pointer to peer + * @peer_stats: pointer to ic/mc specific stats + * @peer_adv_stats: pointer to peer adv stats + * @peer_comp_priv_obj[]: component's private object pointers + * @peer_cp_stats_lock: lock to protect object + */ +struct peer_cp_stats { + struct wlan_objmgr_peer *peer_obj; + void *peer_stats; + void *peer_adv_stats; + void *peer_comp_priv_obj[WLAN_CP_STATS_MAX_COMPONENTS]; + qdf_spinlock_t peer_cp_stats_lock; +}; + +/** + * struct cp_stats_context - defines cp stats global context object + * @csc_lock: lock to protect object + * @psoc_obj: pointer to psoc + * @psoc_cs: pointer to cp stats at psoc + * @cp_stats_ctx_init: callback pointer to init cp stats global ctx + * @cp_stats_ctx_deinit: callback pointer to deinit cp stats global ctx + * @cp_stats_psoc_obj_init:callback pointer to init cp stats obj on psoc create + * @cp_stats_psoc_obj_deinit:callback pointer to deinit cp stats obj on psoc + * destroy + * @cp_stats_pdev_obj_init:callback pointer to init cp stats obj on pdev create + * @cp_stats_pdev_obj_deinit:callback pointer to deinit cp stats obj on pdev + * destroy + * @cp_stats_vdev_obj_init:callback pointer to init cp stats obj on vdev create + * @cp_stats_vdev_obj_deinit:callback pointer to deinit cp stats obj on vdev + * destroy + * @cp_stats_peer_obj_init:callback pointer to init cp stats obj on peer create + * @cp_stats_peer_obj_deinit:callback pointer to deinit cp stats obj on peer + * destroy + * @cp_stats_comp_obj_config:callback pointer to attach/detach other umac comp + * @cp_stats_open: callback pointer for cp stats on psoc open + * @cp_stats_close: callback pointer for cp stats on psoc close + * @cp_stats_enable: callback pointer for cp stats on psoc enable + * @cp_stats_disable: callback pointer for cp stats on psoc disable + */ +struct cp_stats_context { + qdf_spinlock_t csc_lock; + struct wlan_objmgr_psoc *psoc_obj; + struct psoc_cp_stats *psoc_cs; + QDF_STATUS (*cp_stats_ctx_init)(struct cp_stats_context *ctx); + QDF_STATUS (*cp_stats_ctx_deinit)(struct cp_stats_context *ctx); + QDF_STATUS (*cp_stats_psoc_obj_init)(struct psoc_cp_stats *psoc_cs); + QDF_STATUS (*cp_stats_psoc_obj_deinit)(struct psoc_cp_stats *psoc_cs); + QDF_STATUS (*cp_stats_pdev_obj_init)(struct pdev_cp_stats *pdev_cs); + QDF_STATUS (*cp_stats_pdev_obj_deinit)(struct pdev_cp_stats *pdev_cs); + QDF_STATUS (*cp_stats_vdev_obj_init)(struct vdev_cp_stats *vdev_cs); + QDF_STATUS (*cp_stats_vdev_obj_deinit)(struct vdev_cp_stats *vdev_cs); + QDF_STATUS (*cp_stats_peer_obj_init)(struct peer_cp_stats *peer_cs); + QDF_STATUS (*cp_stats_peer_obj_deinit)(struct peer_cp_stats *peer_cs); + QDF_STATUS (*cp_stats_comp_obj_config)( + enum wlan_objmgr_obj_type obj_type, + enum wlan_cp_stats_cfg_state cfg_state, + enum wlan_cp_stats_comp_id comp_id, + void *cmn_obj, + void *data); + QDF_STATUS (*cp_stats_open)(struct wlan_objmgr_psoc *psoc); + QDF_STATUS (*cp_stats_close)(struct wlan_objmgr_psoc *psoc); + QDF_STATUS (*cp_stats_enable)(struct wlan_objmgr_psoc *psoc); + QDF_STATUS (*cp_stats_disable)(struct wlan_objmgr_psoc *psoc); +}; + +/** + * wlan_cp_stats_psoc_obj_lock() - private API to acquire spinlock at psoc + * @psoc: pointer to psoc cp stats object + * + * Return: void + */ +static inline void wlan_cp_stats_psoc_obj_lock(struct psoc_cp_stats *psoc) +{ + qdf_spin_lock_bh(&psoc->psoc_cp_stats_lock); +} + +/** + * wlan_cp_stats_psoc_obj_unlock() - private API to release spinlock at psoc + * @psoc: pointer to psoc cp stats object + * + * Return: void + */ +static inline void wlan_cp_stats_psoc_obj_unlock(struct psoc_cp_stats *psoc) +{ + qdf_spin_unlock_bh(&psoc->psoc_cp_stats_lock); +} + +/** + * wlan_cp_stats_pdev_obj_lock() - private API to acquire spinlock at pdev + * @pdev: pointer to pdev cp stats object + * + * Return: void + */ +static inline void wlan_cp_stats_pdev_obj_lock(struct pdev_cp_stats *pdev) +{ + qdf_spin_lock_bh(&pdev->pdev_cp_stats_lock); +} + +/** + * wlan_cp_stats_pdev_obj_unlock() - private api to release spinlock at pdev + * @pdev: pointer to pdev cp stats object + * + * Return: void + */ +static inline void wlan_cp_stats_pdev_obj_unlock(struct pdev_cp_stats *pdev) +{ + qdf_spin_unlock_bh(&pdev->pdev_cp_stats_lock); +} + +/** + * wlan_cp_stats_vdev_obj_lock() - private api to acquire spinlock at vdev + * @vdev: pointer to vdev cp stats object + * + * Return: void + */ +static inline void wlan_cp_stats_vdev_obj_lock(struct vdev_cp_stats *vdev) +{ + qdf_spin_lock_bh(&vdev->vdev_cp_stats_lock); +} + +/** + * wlan_cp_stats_vdev_obj_unlock() - private api to release spinlock at vdev + * @vdev: pointer to vdev cp stats object + * + * Return: void + */ +static inline void wlan_cp_stats_vdev_obj_unlock(struct vdev_cp_stats *vdev) +{ + qdf_spin_unlock_bh(&vdev->vdev_cp_stats_lock); +} + +/** + * wlan_cp_stats_peer_obj_lock() - private api to acquire spinlock at peer + * @peer: pointer to peer cp stats object + * + * Return: void + */ +static inline void wlan_cp_stats_peer_obj_lock(struct peer_cp_stats *peer) +{ + qdf_spin_lock_bh(&peer->peer_cp_stats_lock); +} + +/** + * wlan_cp_stats_peer_obj_unlock() - private api to release spinlock at peer + * @peer: pointer to peer cp stats object + * + * Return: void + */ +static inline void wlan_cp_stats_peer_obj_unlock(struct peer_cp_stats *peer) +{ + qdf_spin_unlock_bh(&peer->peer_cp_stats_lock); +} + +/** + * wlan_cp_stats_get_psoc_stats_obj() - API to get psoc_cp_stats from psoc + * @psoc: Reference to psoc global object + * + * This API used to get psoc specific cp_stats object from global psoc + * reference. + * + * Return : Reference to psoc_cp_stats object on success or NULL on failure + */ +static inline +struct psoc_cp_stats *wlan_cp_stats_get_psoc_stats_obj(struct wlan_objmgr_psoc + *psoc) +{ + struct cp_stats_context *csc; + + if (!psoc) + return NULL; + + csc = wlan_objmgr_psoc_get_comp_private_obj(psoc, + WLAN_UMAC_COMP_CP_STATS); + + if (!csc) + return NULL; + + return csc->psoc_cs; +} + +/** + * wlan_cp_stats_get_pdev_stats_obj() - API to get pdev_cp_stats from pdev + * @pdev: Reference to pdev global object + * + * This API used to get pdev specific cp_stats object from global pdev + * reference. + * + * Return : Reference to pdev_cp_stats object on success or NULL on failure + */ +static inline +struct pdev_cp_stats *wlan_cp_stats_get_pdev_stats_obj(struct wlan_objmgr_pdev + *pdev) +{ + struct pdev_cp_stats *pdev_cs = NULL; + + if (pdev) { + pdev_cs = wlan_objmgr_pdev_get_comp_private_obj + (pdev, WLAN_UMAC_COMP_CP_STATS); + } + + return pdev_cs; +} + +/** + * wlan_cp_stats_get_vdev_stats_obj() - API to get vdev_cp_stats from vdev + * @vdev : Reference to vdev global object + * + * This API used to get vdev specific cp_stats object from global vdev + * reference. + * + * Return : Reference to vdev_cp_stats object on success or NULL on failure + */ +static inline +struct vdev_cp_stats *wlan_cp_stats_get_vdev_stats_obj(struct wlan_objmgr_vdev + *vdev) +{ + struct vdev_cp_stats *vdev_cs = NULL; + + if (vdev) { + vdev_cs = wlan_objmgr_vdev_get_comp_private_obj + (vdev, WLAN_UMAC_COMP_CP_STATS); + } + + return vdev_cs; +} + +/** + * wlan_cp_stats_get_peer_stats_obj() - API to get peer_cp_stats from peer + * @peer: Reference to peer global object + * + * This API used to get peer specific cp_stats object from global peer + * reference. + * + * Return : Reference to peer_cp_stats object on success or NULL on failure + */ +static inline +struct peer_cp_stats *wlan_cp_stats_get_peer_stats_obj(struct wlan_objmgr_peer + *peer) +{ + struct peer_cp_stats *peer_cs = NULL; + + if (peer) { + peer_cs = wlan_objmgr_peer_get_comp_private_obj + (peer, WLAN_UMAC_COMP_CP_STATS); + } + + return peer_cs; +} + +/** + * wlan_cp_stats_get_pdev_from_vdev() - API to get pdev_cp_stats obj from vdev + * @vdev: Reference to vdev global object + * + * This API used to get pdev specific cp_stats object from global vdev + * reference. + * + * Return: Reference to pdev_cp_stats object on success or NULL on failure + */ +static inline +struct pdev_cp_stats *wlan_cp_stats_get_pdev_from_vdev(struct wlan_objmgr_vdev + *vdev) +{ + struct wlan_objmgr_pdev *pdev; + struct pdev_cp_stats *pdev_cs = NULL; + + pdev = wlan_vdev_get_pdev(vdev); + if (pdev) { + pdev_cs = wlan_objmgr_pdev_get_comp_private_obj + (pdev, WLAN_UMAC_COMP_CP_STATS); + } + + return pdev_cs; +} + +/** + * wlan_cp_stats_ctx_get_from_pdev() - API to get cp_stats ctx obj from pdev + * @pdev: Reference to pdev global object + * + * This API used to get cp_stats context object from global pdev reference. + * + * Return: Reference to cp_stats_context object on success or NULL on failure + */ +static inline +struct cp_stats_context *wlan_cp_stats_ctx_get_from_pdev(struct wlan_objmgr_pdev + *pdev) +{ + struct wlan_objmgr_psoc *psoc; + struct cp_stats_context *csc = NULL; + + if (!pdev) + return NULL; + + psoc = wlan_pdev_get_psoc(pdev); + if (psoc) { + csc = wlan_objmgr_psoc_get_comp_private_obj + (psoc, WLAN_UMAC_COMP_CP_STATS); + } + return csc; +} + +/** + * wlan_cp_stats_ctx_get_from_vdev() - API to get cp_stats ctx obj from vdev + * @vdev: Reference to vdev global object + * + * This API used to get cp_stats context object from global vdev reference. + * + * Return: Reference to cp_stats_context object on success or NULL on failure + */ +static inline +struct cp_stats_context *wlan_cp_stats_ctx_get_from_vdev(struct wlan_objmgr_vdev + *vdev) +{ + struct wlan_objmgr_pdev *pdev; + + if (!vdev) + return NULL; + + pdev = wlan_vdev_get_pdev(vdev); + return wlan_cp_stats_ctx_get_from_pdev(pdev); +} + +/** + * wlan_cp_stats_ctx_get_from_peer() - API to get cp_stats ctx object from peer + * @peer: Reference to peer object + * + * This API used to get cp_stats context object from global peer reference. + * + * Return: Reference to cp_stats_context object on success or NULL on failure + */ +static inline +struct cp_stats_context *wlan_cp_stats_ctx_get_from_peer(struct wlan_objmgr_peer + *peer) +{ + struct wlan_objmgr_vdev *vdev; + + vdev = wlan_peer_get_vdev(peer); + return wlan_cp_stats_ctx_get_from_vdev(vdev); +} + +/** + * wlan_cp_stats_get_comp_id() - API to get cp_stats component id from umac + * component id + * @comp_id: umac comp id + * + * Return: wlan_cp_stats_comp_id + */ +static inline enum wlan_cp_stats_comp_id +wlan_cp_stats_get_comp_id(enum wlan_umac_comp_id comp_id) +{ + enum wlan_cp_stats_comp_id cp_stats_comp_id = + WLAN_CP_STATS_MAX_COMPONENTS; + + if (comp_id == WLAN_UMAC_COMP_ATF) + cp_stats_comp_id = WLAN_CP_STATS_ATF; + + return cp_stats_comp_id; +} + +#endif /* QCA_SUPPORT_CP_STATS */ +#endif /* __WLAN_CP_STATS_DEFS_H__ */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/core/src/wlan_cp_stats_obj_mgr_handler.c b/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/core/src/wlan_cp_stats_obj_mgr_handler.c new file mode 100644 index 0000000000000000000000000000000000000000..dbe4ed930d6dbdd87886132891d879d6c01a265b --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/core/src/wlan_cp_stats_obj_mgr_handler.c @@ -0,0 +1,411 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * Doc: wlan_cp_stats_om_handler.c + * + * This file provide definitions to APIs invoked on receiving common object + * repective create/destroy event notifications, which further + * (de)allocate cp specific objects and (de)attach to specific + * common object + */ +#include "wlan_cp_stats_obj_mgr_handler.h" +#include "wlan_cp_stats_defs.h" +#include "wlan_cp_stats_ol_api.h" +#include "wlan_cp_stats_defs.h" +#include +#include "wlan_cp_stats_utils_api.h" + +QDF_STATUS +wlan_cp_stats_psoc_obj_create_handler(struct wlan_objmgr_psoc *psoc, void *arg) +{ + WLAN_DEV_TYPE dev_type; + struct cp_stats_context *csc = NULL; + struct psoc_cp_stats *psoc_cs = NULL; + QDF_STATUS status = QDF_STATUS_E_FAILURE; + + if (!psoc) { + cp_stats_err("PSOC is NULL"); + status = QDF_STATUS_E_INVAL; + goto wlan_cp_stats_psoc_obj_create_handler_return; + } + + csc = qdf_mem_malloc(sizeof(*csc)); + if (!csc) { + cp_stats_err("Failed to allocate cp_stats_context object"); + status = QDF_STATUS_E_NOMEM; + goto wlan_cp_stats_psoc_obj_create_handler_return; + } + + csc->psoc_obj = psoc; + dev_type = wlan_objmgr_psoc_get_dev_type(csc->psoc_obj); + if (dev_type == WLAN_DEV_INVALID) { + cp_stats_err("Failed to init cp stats ctx, bad device type"); + status = QDF_STATUS_E_INVAL; + goto wlan_cp_stats_psoc_obj_create_handler_return; + } else if (WLAN_DEV_DA == dev_type) { + csc->cp_stats_ctx_init = wlan_cp_stats_ctx_init_da; + csc->cp_stats_ctx_deinit = wlan_cp_stats_ctx_deinit_da; + } else if (WLAN_DEV_OL == dev_type) { + csc->cp_stats_ctx_init = wlan_cp_stats_ctx_init_ol; + csc->cp_stats_ctx_deinit = wlan_cp_stats_ctx_deinit_ol; + } + + if (QDF_STATUS_SUCCESS != csc->cp_stats_ctx_init(csc)) { + cp_stats_err("Failed to init global ctx call back handlers"); + goto wlan_cp_stats_psoc_obj_create_handler_return; + } + + psoc_cs = qdf_mem_malloc(sizeof(*psoc_cs)); + if (!psoc_cs) { + cp_stats_err("Failed to allocate psoc_cp_stats object"); + status = QDF_STATUS_E_NOMEM; + goto wlan_cp_stats_psoc_obj_create_handler_return; + } + + psoc_cs->psoc_obj = psoc; + csc->psoc_cs = psoc_cs; + if (csc->cp_stats_psoc_obj_init) { + if (QDF_STATUS_SUCCESS != + csc->cp_stats_psoc_obj_init(psoc_cs)) { + cp_stats_err("Failed to initialize psoc handlers"); + goto wlan_cp_stats_psoc_obj_create_handler_return; + } + } + + status = wlan_objmgr_psoc_component_obj_attach(psoc, + WLAN_UMAC_COMP_CP_STATS, + csc, + QDF_STATUS_SUCCESS); + +wlan_cp_stats_psoc_obj_create_handler_return: + if (QDF_IS_STATUS_ERROR(status)) { + if (csc) { + if (csc->cp_stats_psoc_obj_deinit && psoc_cs) + csc->cp_stats_psoc_obj_deinit(psoc_cs); + + if (csc->psoc_cs) { + qdf_mem_free(csc->psoc_cs); + csc->psoc_cs = NULL; + } + + if (csc->cp_stats_ctx_deinit) + csc->cp_stats_ctx_deinit(csc); + + qdf_mem_free(csc); + csc = NULL; + } + return status; + } + + cp_stats_debug("cp stats context attach at psoc"); + return status; +} + +QDF_STATUS +wlan_cp_stats_psoc_obj_destroy_handler(struct wlan_objmgr_psoc *psoc, void *arg) +{ + struct cp_stats_context *csc; + + if (!psoc) { + cp_stats_err("PSOC is NULL"); + return QDF_STATUS_E_NOMEM; + } + csc = wlan_objmgr_psoc_get_comp_private_obj(psoc, + WLAN_UMAC_COMP_CP_STATS); + if (!csc) { + cp_stats_err("cp_stats context is NULL!"); + return QDF_STATUS_E_INVAL; + } + + wlan_objmgr_psoc_component_obj_detach(psoc, + WLAN_UMAC_COMP_CP_STATS, csc); + if (csc->cp_stats_psoc_obj_deinit) + csc->cp_stats_psoc_obj_deinit(csc->psoc_cs); + qdf_mem_free(csc->psoc_cs); + if (csc->cp_stats_ctx_deinit) + csc->cp_stats_ctx_deinit(csc); + qdf_mem_free(csc); + + cp_stats_debug("cp stats context dettached at psoc"); + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS +wlan_cp_stats_pdev_obj_create_handler(struct wlan_objmgr_pdev *pdev, void *arg) +{ + struct cp_stats_context *csc = NULL; + struct pdev_cp_stats *pdev_cs = NULL; + QDF_STATUS status = QDF_STATUS_E_FAILURE; + + if (!pdev) { + cp_stats_err("PDEV is NULL"); + status = QDF_STATUS_E_INVAL; + goto wlan_cp_stats_pdev_obj_create_handler_return; + } + + pdev_cs = qdf_mem_malloc(sizeof(*pdev_cs)); + if (!pdev_cs) { + cp_stats_err("Failed to allocate pdev_cp_stats object"); + status = QDF_STATUS_E_NOMEM; + goto wlan_cp_stats_pdev_obj_create_handler_return; + } + csc = wlan_cp_stats_ctx_get_from_pdev(pdev); + if (!csc) { + cp_stats_err("cp_stats context is NULL!"); + status = QDF_STATUS_E_INVAL; + goto wlan_cp_stats_pdev_obj_create_handler_return; + } + pdev_cs->pdev_obj = pdev; + if (csc->cp_stats_pdev_obj_init) { + if (QDF_STATUS_SUCCESS != + csc->cp_stats_pdev_obj_init(pdev_cs)) { + cp_stats_err("Failed to initialize pdev handlers"); + goto wlan_cp_stats_pdev_obj_create_handler_return; + } + } + + status = wlan_objmgr_pdev_component_obj_attach(pdev, + WLAN_UMAC_COMP_CP_STATS, + pdev_cs, + QDF_STATUS_SUCCESS); + + cp_stats_debug("pdev cp stats object attached"); +wlan_cp_stats_pdev_obj_create_handler_return: + if (QDF_IS_STATUS_ERROR(status)) { + if (csc) { + if (csc->cp_stats_pdev_obj_deinit) + csc->cp_stats_pdev_obj_deinit(pdev_cs); + } + + if (pdev_cs) + qdf_mem_free(pdev_cs); + } + + return status; +} + +QDF_STATUS +wlan_cp_stats_pdev_obj_destroy_handler(struct wlan_objmgr_pdev *pdev, void *arg) +{ + struct pdev_cp_stats *pdev_cs; + struct cp_stats_context *csc; + + if (!pdev) { + cp_stats_err("pdev is NULL"); + return QDF_STATUS_E_INVAL; + } + + pdev_cs = wlan_objmgr_pdev_get_comp_private_obj(pdev, + WLAN_UMAC_COMP_CP_STATS); + if (!pdev_cs) { + cp_stats_err("pdev is NULL"); + return QDF_STATUS_E_INVAL; + } + csc = wlan_cp_stats_ctx_get_from_pdev(pdev); + if (!csc) { + cp_stats_err("cp_stats context is NULL!"); + return QDF_STATUS_E_INVAL; + } + + if (csc->cp_stats_pdev_obj_deinit) + csc->cp_stats_pdev_obj_deinit(pdev_cs); + + wlan_objmgr_pdev_component_obj_detach(pdev, WLAN_UMAC_COMP_CP_STATS, + pdev_cs); + + qdf_mem_free(pdev_cs); + cp_stats_debug("pdev cp stats object dettached"); + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS +wlan_cp_stats_vdev_obj_create_handler(struct wlan_objmgr_vdev *vdev, void *arg) +{ + struct cp_stats_context *csc = NULL; + struct vdev_cp_stats *vdev_cs = NULL; + QDF_STATUS status = QDF_STATUS_E_FAILURE; + + if (!vdev) { + cp_stats_err("vdev is NULL"); + status = QDF_STATUS_E_INVAL; + goto wlan_cp_stats_vdev_obj_create_handler_return; + } + + vdev_cs = qdf_mem_malloc(sizeof(*vdev_cs)); + if (!vdev_cs) { + cp_stats_err("Failed to allocate vdev_cp_stats object"); + status = QDF_STATUS_E_NOMEM; + goto wlan_cp_stats_vdev_obj_create_handler_return; + } + csc = wlan_cp_stats_ctx_get_from_vdev(vdev); + if (!csc) { + cp_stats_err("cp_stats context is NULL!"); + status = QDF_STATUS_E_INVAL; + goto wlan_cp_stats_vdev_obj_create_handler_return; + } + vdev_cs->vdev_obj = vdev; + if (csc->cp_stats_vdev_obj_init) { + if (QDF_STATUS_SUCCESS != + csc->cp_stats_vdev_obj_init(vdev_cs)) { + cp_stats_err("Failed to initialize vdev handlers"); + goto wlan_cp_stats_vdev_obj_create_handler_return; + } + } + + status = wlan_objmgr_vdev_component_obj_attach(vdev, + WLAN_UMAC_COMP_CP_STATS, + vdev_cs, + QDF_STATUS_SUCCESS); + +wlan_cp_stats_vdev_obj_create_handler_return: + if (QDF_IS_STATUS_ERROR(status)) { + if (csc) { + if (csc->cp_stats_vdev_obj_deinit) + csc->cp_stats_vdev_obj_deinit(vdev_cs); + } + + if (vdev_cs) + qdf_mem_free(vdev_cs); + } + + cp_stats_debug("vdev cp stats object attach"); + return status; +} + +QDF_STATUS +wlan_cp_stats_vdev_obj_destroy_handler(struct wlan_objmgr_vdev *vdev, void *arg) +{ + struct vdev_cp_stats *vdev_cs; + struct cp_stats_context *csc; + + if (!vdev) { + cp_stats_err("vdev is NULL"); + return QDF_STATUS_E_INVAL; + } + + vdev_cs = wlan_objmgr_vdev_get_comp_private_obj(vdev, + WLAN_UMAC_COMP_CP_STATS); + if (!vdev_cs) { + cp_stats_err("vdev is NULL"); + return QDF_STATUS_E_INVAL; + } + csc = wlan_cp_stats_ctx_get_from_vdev(vdev); + if (!csc) { + cp_stats_err("cp_stats context is NULL!"); + return QDF_STATUS_E_INVAL; + } + + if (csc->cp_stats_vdev_obj_deinit) + csc->cp_stats_vdev_obj_deinit(vdev_cs); + + wlan_objmgr_vdev_component_obj_detach(vdev, WLAN_UMAC_COMP_CP_STATS, + vdev_cs); + + qdf_mem_free(vdev_cs); + cp_stats_debug("vdev cp stats object dettach"); + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS +wlan_cp_stats_peer_obj_create_handler(struct wlan_objmgr_peer *peer, void *arg) +{ + struct cp_stats_context *csc = NULL; + struct peer_cp_stats *peer_cs = NULL; + QDF_STATUS status = QDF_STATUS_E_FAILURE; + + if (!peer) { + cp_stats_err("peer is NULL"); + status = QDF_STATUS_E_INVAL; + goto wlan_cp_stats_peer_obj_create_handler_return; + } + + peer_cs = qdf_mem_malloc(sizeof(*peer_cs)); + if (!peer_cs) { + cp_stats_err("Failed to allocate peer_cp_stats object"); + status = QDF_STATUS_E_NOMEM; + goto wlan_cp_stats_peer_obj_create_handler_return; + } + csc = wlan_cp_stats_ctx_get_from_peer(peer); + if (!csc) { + cp_stats_err("cp_stats context is NULL!"); + status = QDF_STATUS_E_INVAL; + goto wlan_cp_stats_peer_obj_create_handler_return; + } + peer_cs->peer_obj = peer; + if (csc->cp_stats_peer_obj_init) { + if (QDF_STATUS_SUCCESS != + csc->cp_stats_peer_obj_init(peer_cs)) { + cp_stats_err("Failed to initialize peer handlers"); + goto wlan_cp_stats_peer_obj_create_handler_return; + } + } + + status = wlan_objmgr_peer_component_obj_attach(peer, + WLAN_UMAC_COMP_CP_STATS, + peer_cs, + QDF_STATUS_SUCCESS); + +wlan_cp_stats_peer_obj_create_handler_return: + if (QDF_IS_STATUS_ERROR(status)) { + if (csc) { + if (csc->cp_stats_peer_obj_deinit) + csc->cp_stats_peer_obj_deinit(peer_cs); + } + + if (peer_cs) + qdf_mem_free(peer_cs); + } + + cp_stats_debug("peer cp stats object attach"); + return status; +} + +QDF_STATUS +wlan_cp_stats_peer_obj_destroy_handler(struct wlan_objmgr_peer *peer, void *arg) +{ + struct peer_cp_stats *peer_cs; + struct cp_stats_context *csc; + + if (!peer) { + cp_stats_err("peer is NULL"); + return QDF_STATUS_E_INVAL; + } + + peer_cs = wlan_objmgr_peer_get_comp_private_obj(peer, + WLAN_UMAC_COMP_CP_STATS); + if (!peer_cs) { + cp_stats_err("peer is NULL"); + return QDF_STATUS_E_INVAL; + } + csc = wlan_cp_stats_ctx_get_from_peer(peer); + if (!csc) { + cp_stats_err("cp_stats context is NULL!"); + return QDF_STATUS_E_INVAL; + } + + if (csc->cp_stats_peer_obj_deinit) + csc->cp_stats_peer_obj_deinit(peer_cs); + + wlan_objmgr_peer_component_obj_detach(peer, WLAN_UMAC_COMP_CP_STATS, + peer_cs); + + qdf_mem_free(peer_cs); + cp_stats_debug("peer cp stats object dettached"); + return QDF_STATUS_SUCCESS; +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/core/src/wlan_cp_stats_obj_mgr_handler.h b/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/core/src/wlan_cp_stats_obj_mgr_handler.h new file mode 100644 index 0000000000000000000000000000000000000000..0f60f94b940e250461a07592102be1f56d01b2a3 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/core/src/wlan_cp_stats_obj_mgr_handler.h @@ -0,0 +1,146 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_cp_stats_obj_mgr_handler.h + * + * This header file provide declarations for APIs to handle events from object + * manager for registered events from wlan_cp_stats_init() + */ + +#ifndef __WLAN_CP_STATS_OBJ_MGR_HANDLER_H__ +#define __WLAN_CP_STATS_OBJ_MGR_HANDLER_H__ + +#ifdef QCA_SUPPORT_CP_STATS +#include +#include +#include +#include +#include +#include + +#ifdef QCA_SUPPORT_CP_STATS_DA +#include "wlan_cp_stats_da_api.h" +#else +#include "wlan_cp_stats_defs.h" +#endif + +/** + * wlan_cp_stats_psoc_obj_create_handler() - psoc create notification handler + * callback function + * @psoc: pointer to psoc object + * @data: pointer to arg data + * + * Return: QDF_STATUS - Success or Failure + */ +QDF_STATUS wlan_cp_stats_psoc_obj_create_handler( + struct wlan_objmgr_psoc *psoc, void *data); + +/** + * wlan_cp_stats_psoc_obj_destroy_handler() - psoc destroy notification handler + * callback function + * @psoc: pointer to psoc object + * @data: pointer to arg data + * + * Return: QDF_STATUS - Success or Failure + */ +QDF_STATUS wlan_cp_stats_psoc_obj_destroy_handler( + struct wlan_objmgr_psoc *psoc, void *data); + +/** + * wlan_cp_stats_pdev_obj_create_handler() - Pdev create notification handler + * callback function + * @pdev: pointer to pdev object + * @data: pointer to arg data + * + * Return: QDF_STATUS - Success or Failure + */ +QDF_STATUS wlan_cp_stats_pdev_obj_create_handler( + struct wlan_objmgr_pdev *pdev, void *data); + +/** + * wlan_cp_stats_pdev_obj_destroy_handler() - Pdev destroy notification handler + * callback function + * @pdev: pointer to pdev object + * @data: pointer to arg data + * + * Return: QDF_STATUS - Success or Failure + */ +QDF_STATUS wlan_cp_stats_pdev_obj_destroy_handler( + struct wlan_objmgr_pdev *pdev, void *data); + +/** + * wlan_cp_stats_vdev_obj_create_handler() - vdev create notification handler + * callback function + * @vdev: pointer to vdev object + * @data: pointer to arg data + * + * Return: QDF_STATUS - Success or Failure + */ +QDF_STATUS wlan_cp_stats_vdev_obj_create_handler( + struct wlan_objmgr_vdev *vdev, void *data); + +/** + * wlan_cp_stats_vdev_obj_destroy_handler() - vdev destroy notification handler + * callback function + * @vdev: pointer to vdev object + * @data: pointer to arg data + * + * Return: QDF_STATUS - Success or Failure + */ +QDF_STATUS wlan_cp_stats_vdev_obj_destroy_handler( + struct wlan_objmgr_vdev *vdev, void *data); + +/** + * wlan_cp_stats_peer_obj_create_handler() - peer create notification handler + * callback function + * @peer: pointer to peer object + * @data: pointer to arg data + * + * Return: QDF_STATUS - Success or Failure + */ +QDF_STATUS wlan_cp_stats_peer_obj_create_handler( + struct wlan_objmgr_peer *peer, void *data); + +/** + * wlan_cp_stats_peer_obj_destroy_handler() - peer destroy notification handler + * callback function + * @peer: pointer to peer object + * @data: pointer to arg data + * + * Return: QDF_STATUS - Success or Failure + */ +QDF_STATUS wlan_cp_stats_peer_obj_destroy_handler( + struct wlan_objmgr_peer *peer, void *data); + +#ifndef QCA_SUPPORT_CP_STATS_DA +static inline +QDF_STATUS wlan_cp_stats_ctx_init_da(struct cp_stats_context *csc) +{ + return QDF_STATUS_SUCCESS; +} + +static inline +QDF_STATUS wlan_cp_stats_ctx_deinit_da(struct cp_stats_context *csc) +{ + return QDF_STATUS_SUCCESS; +} +#endif + +#endif /* QCA_SUPPORT_CP_STATS */ +#endif /* __WLAN_CP_STATS_OBJ_MGR_HANDLER_H__ */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/core/src/wlan_cp_stats_ol_api.c b/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/core/src/wlan_cp_stats_ol_api.c new file mode 100644 index 0000000000000000000000000000000000000000..1f038310a5505d5279cadc1793b69766703fc99a --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/core/src/wlan_cp_stats_ol_api.c @@ -0,0 +1,193 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_cp_stats_ol_api.c + * + * This file provide definitions for following + * - (de)init cp stat global ctx obj + * - (de)init common specific ucfg handler + * - (de)register to WMI events for psoc open + */ +#include +#include "wlan_cp_stats_defs.h" +#include "wlan_cp_stats_ol_api.h" +#include "wlan_cp_stats_cmn_api_i.h" +#include +#include +#include + +QDF_STATUS wlan_cp_stats_psoc_obj_init_ol(struct psoc_cp_stats *psoc_cs) +{ + qdf_spinlock_create(&psoc_cs->psoc_cp_stats_lock); + wlan_cp_stats_psoc_cs_init(psoc_cs); + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_cp_stats_psoc_obj_deinit_ol(struct psoc_cp_stats *psoc_cs) +{ + wlan_cp_stats_psoc_cs_deinit(psoc_cs); + qdf_spinlock_destroy(&psoc_cs->psoc_cp_stats_lock); + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_cp_stats_pdev_obj_init_ol(struct pdev_cp_stats *pdev_cs) +{ + qdf_spinlock_create(&pdev_cs->pdev_cp_stats_lock); + wlan_cp_stats_pdev_cs_init(pdev_cs); + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_cp_stats_pdev_obj_deinit_ol(struct pdev_cp_stats *pdev_cs) +{ + wlan_cp_stats_pdev_cs_deinit(pdev_cs); + qdf_spinlock_destroy(&pdev_cs->pdev_cp_stats_lock); + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_cp_stats_vdev_obj_init_ol(struct vdev_cp_stats *vdev_cs) +{ + qdf_spinlock_create(&vdev_cs->vdev_cp_stats_lock); + wlan_cp_stats_vdev_cs_init(vdev_cs); + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_cp_stats_vdev_obj_deinit_ol(struct vdev_cp_stats *vdev_cs) +{ + wlan_cp_stats_vdev_cs_deinit(vdev_cs); + qdf_spinlock_destroy(&vdev_cs->vdev_cp_stats_lock); + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_cp_stats_peer_obj_init_ol(struct peer_cp_stats *peer_cs) +{ + qdf_spinlock_create(&peer_cs->peer_cp_stats_lock); + wlan_cp_stats_peer_cs_init(peer_cs); + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_cp_stats_peer_obj_deinit_ol(struct peer_cp_stats *peer_cs) +{ + wlan_cp_stats_peer_cs_deinit(peer_cs); + qdf_spinlock_destroy(&peer_cs->peer_cp_stats_lock); + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_cp_stats_open_ol(struct wlan_objmgr_psoc *psoc) +{ + if (!psoc) { + cp_stats_err("PSOC is null!"); + return QDF_STATUS_E_INVAL; + } + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_cp_stats_close_ol(struct wlan_objmgr_psoc *psoc) +{ + if (!psoc) { + cp_stats_err("PSOC is null!"); + return QDF_STATUS_E_INVAL; + } + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_cp_stats_enable_ol(struct wlan_objmgr_psoc *psoc) +{ + struct wlan_lmac_if_cp_stats_tx_ops *tx_ops; + + if (!psoc) { + cp_stats_err("PSOC is null!"); + return QDF_STATUS_E_INVAL; + } + + tx_ops = target_if_cp_stats_get_tx_ops(psoc); + if (!tx_ops) { + cp_stats_err("tx_ops is null!"); + return QDF_STATUS_E_NULL_VALUE; + } + + if (!tx_ops->cp_stats_attach) { + cp_stats_err("cp_stats_attach function ptr is null!"); + return QDF_STATUS_E_NULL_VALUE; + } + + tx_ops->cp_stats_attach(psoc); + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_cp_stats_disable_ol(struct wlan_objmgr_psoc *psoc) +{ + struct wlan_lmac_if_cp_stats_tx_ops *tx_ops; + + if (!psoc) { + cp_stats_err("PSOC is null!"); + return QDF_STATUS_E_INVAL; + } + + tx_ops = target_if_cp_stats_get_tx_ops(psoc); + if (!tx_ops) { + cp_stats_err("tx_ops is null!"); + return QDF_STATUS_E_NULL_VALUE; + } + + if (!tx_ops->cp_stats_detach) { + cp_stats_err("cp_stats_detach function ptr is null!"); + return QDF_STATUS_E_NULL_VALUE; + } + + tx_ops->cp_stats_detach(psoc); + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_cp_stats_ctx_init_ol(struct cp_stats_context *csc) +{ + csc->cp_stats_open = wlan_cp_stats_open_ol; + csc->cp_stats_close = wlan_cp_stats_close_ol; + csc->cp_stats_enable = wlan_cp_stats_enable_ol; + csc->cp_stats_disable = wlan_cp_stats_disable_ol; + csc->cp_stats_psoc_obj_init = wlan_cp_stats_psoc_obj_init_ol; + csc->cp_stats_psoc_obj_deinit = wlan_cp_stats_psoc_obj_deinit_ol; + csc->cp_stats_pdev_obj_init = wlan_cp_stats_pdev_obj_init_ol; + csc->cp_stats_pdev_obj_deinit = wlan_cp_stats_pdev_obj_deinit_ol; + csc->cp_stats_vdev_obj_init = wlan_cp_stats_vdev_obj_init_ol; + csc->cp_stats_vdev_obj_deinit = wlan_cp_stats_vdev_obj_deinit_ol; + csc->cp_stats_peer_obj_init = wlan_cp_stats_peer_obj_init_ol; + csc->cp_stats_peer_obj_deinit = wlan_cp_stats_peer_obj_deinit_ol; + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_cp_stats_ctx_deinit_ol(struct cp_stats_context *csc) +{ + csc->cp_stats_open = NULL; + csc->cp_stats_close = NULL; + csc->cp_stats_enable = NULL; + csc->cp_stats_disable = NULL; + csc->cp_stats_psoc_obj_init = NULL; + csc->cp_stats_psoc_obj_deinit = NULL; + csc->cp_stats_pdev_obj_init = NULL; + csc->cp_stats_pdev_obj_deinit = NULL; + csc->cp_stats_vdev_obj_init = NULL; + csc->cp_stats_vdev_obj_deinit = NULL; + csc->cp_stats_peer_obj_init = NULL; + csc->cp_stats_peer_obj_deinit = NULL; + + return QDF_STATUS_SUCCESS; +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/core/src/wlan_cp_stats_ol_api.h b/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/core/src/wlan_cp_stats_ol_api.h new file mode 100644 index 0000000000000000000000000000000000000000..1b97c956a0172534670c81eb70a42174e2431955 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/core/src/wlan_cp_stats_ol_api.h @@ -0,0 +1,147 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_cp_stats_ol_api.h + * + * This header file provide API declarations required for cp stats global + * context specific to offload + */ + +#ifndef __WLAN_CP_STATS_OL_API_H__ +#define __WLAN_CP_STATS_OL_API_H__ + +#ifdef QCA_SUPPORT_CP_STATS +#include +#include "wlan_cp_stats_defs.h" +#include "wlan_cp_stats_ol_api.h" + +/** + * wlan_cp_stats_psoc_obj_init_ol() - private API to init psoc cp stats obj + * @psoc_cs: pointer to psoc cp stat object + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +QDF_STATUS wlan_cp_stats_psoc_obj_init_ol(struct psoc_cp_stats *psoc_cs); + +/** + * wlan_cp_stats_psoc_obj_deinit_ol() - private API to deinit psoc cp stats obj + * @psoc_cs: pointer to psoc cp stat object + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +QDF_STATUS wlan_cp_stats_psoc_obj_deinit_ol(struct psoc_cp_stats *psoc_cs); + +/** + * wlan_cp_stats_pdev_obj_init_ol() - private API to init pdev cp stats obj + * @pdev_cs: pointer to pdev cp stat object + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +QDF_STATUS wlan_cp_stats_pdev_obj_init_ol(struct pdev_cp_stats *pdev_cs); + +/** + * wlan_cp_stats_pdev_obj_deinit_ol() - private API to deinit pdev cp stats obj + * @pdev_cs: pointer to pdev cp stat object + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +QDF_STATUS wlan_cp_stats_pdev_obj_deinit_ol(struct pdev_cp_stats *pdev_cs); + +/** + * wlan_cp_stats_vdev_obj_init_ol() - private API to init vdev cp stats obj + * @vdev_cs: pointer to vdev cp stat object + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +QDF_STATUS wlan_cp_stats_vdev_obj_init_ol(struct vdev_cp_stats *vdev_cs); + +/** + * wlan_cp_stats_vdev_obj_deinit_ol() - private API to deinit vdev cp stats obj + * @vdev_cs: pointer to vdev cp stat object + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +QDF_STATUS wlan_cp_stats_vdev_obj_deinit_ol(struct vdev_cp_stats *vdev_cs); + +/** + * wlan_cp_stats_peer_obj_init_ol() - private API to init peer cp stats obj + * @peer_cs: pointer to peer cp stat object + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +QDF_STATUS wlan_cp_stats_peer_obj_init_ol(struct peer_cp_stats *peer_cs); + +/** + * wlan_cp_stats_peer_obj_deinit_ol() - private API to deinit peer cp stats obj + * @peer_cs: pointer to peer cp stat object + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +QDF_STATUS wlan_cp_stats_peer_obj_deinit_ol(struct peer_cp_stats *peer_cs); + +/** + * wlan_cp_stats_open_ol() - private API for psoc open + * @psoc: pointer to psoc object + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +QDF_STATUS wlan_cp_stats_open_ol(struct wlan_objmgr_psoc *psoc); + +/** + * wlan_cp_stats_close_ol() - private API for psoc close + * @psoc: pointer to psoc object + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +QDF_STATUS wlan_cp_stats_close_ol(struct wlan_objmgr_psoc *psoc); + +/** + * wlan_cp_stats_enable_ol() - private API for psoc enable + * @psoc: pointer to psoc enable + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +QDF_STATUS wlan_cp_stats_enable_ol(struct wlan_objmgr_psoc *psoc); + +/** + * wlan_cp_stats_disable_ol() - private API for psoc disable + * @psoc: pointer to psoc enable + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +QDF_STATUS wlan_cp_stats_disable_ol(struct wlan_objmgr_psoc *psoc); + +/** + * wlan_cp_stats_ctx_init_ol() - private API to initialize cp stat global ctx + * @csc: pointer to cp stats global context object + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +QDF_STATUS wlan_cp_stats_ctx_init_ol(struct cp_stats_context *csc); + +/** + * wlan_cp_stats_ctx_deinit_ol() - private API to deinit cp stat global ctx + * @csc: pointer to cp stats global context object + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +QDF_STATUS wlan_cp_stats_ctx_deinit_ol(struct cp_stats_context *csc); + +#endif /* QCA_SUPPORT_CP_STATS */ +#endif /* __WLAN_CP_STATS_OL_API_H__ */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/dispatcher/inc/wlan_cp_stats_chan_info_api.h b/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/dispatcher/inc/wlan_cp_stats_chan_info_api.h new file mode 100644 index 0000000000000000000000000000000000000000..734e898036073a9748a78bf79f2523c4d1f93bcf --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/dispatcher/inc/wlan_cp_stats_chan_info_api.h @@ -0,0 +1,31 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_cp_stats_chan_info_api.h + * + * This file provide structure definitions for ACS related control plane stats + */ + +#ifndef __WLAN_CP_STATS_CHAN_INFO_API_H__ +#define __WLAN_CP_STATS_CHAN_INFO_API_H__ + +#ifdef QCA_SUPPORT_CP_STATS + +#endif /* QCA_SUPPORT_CP_STATS*/ +#endif /* __WLAN_CP_STATS_CHAN_INFO_API_H__ */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/dispatcher/inc/wlan_cp_stats_chan_info_defs.h b/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/dispatcher/inc/wlan_cp_stats_chan_info_defs.h new file mode 100644 index 0000000000000000000000000000000000000000..48ac52ceb852d8d30d6ad476431762528f3f036f --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/dispatcher/inc/wlan_cp_stats_chan_info_defs.h @@ -0,0 +1,30 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_cp_stats_chan_info_defs.h + * + * This file provide structure definitions for ACS related control plane stats + */ +#ifndef __WLAN_CP_STATS_CHAN_INFO_DEFS_H__ +#define __WLAN_CP_STATS_CHAN_INFO_DEFS_H__ + +#ifdef QCA_SUPPORT_CP_STATS + +#endif /* QCA_SUPPORT_CP_STATS */ +#endif /* __WLAN_CP_STATS_CHAN_INFO_DEFS_H__ */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/dispatcher/inc/wlan_cp_stats_ic_acs_api.h b/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/dispatcher/inc/wlan_cp_stats_ic_acs_api.h new file mode 100644 index 0000000000000000000000000000000000000000..e0b999d774d2625dfbe4d911f58f87af41629244 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/dispatcher/inc/wlan_cp_stats_ic_acs_api.h @@ -0,0 +1,31 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_cp_stats_ic_acs_api.h + * + * This file provide declarations for APIs exposed for accessing ACS related + * control plane stats + */ + +#ifndef __WLAN_CP_STATS_IC_ACS_API_H__ +#define __WLAN_CP_STATS_IC_ACS_API_H__ +#ifdef QCA_SUPPORT_CP_STATS + +#endif /* QCA_SUPPORT_CP_STATS */ +#endif /* __WLAN_CP_STATS_IC_ACS_API_H__ */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/dispatcher/inc/wlan_cp_stats_ic_acs_defs.h b/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/dispatcher/inc/wlan_cp_stats_ic_acs_defs.h new file mode 100644 index 0000000000000000000000000000000000000000..f4dc9f411ad2de03fdab4af0d85a01c2857c95e9 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/dispatcher/inc/wlan_cp_stats_ic_acs_defs.h @@ -0,0 +1,29 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_cp_stats_acs_defs.h + * + * This header file declare APIs defines structure for + * common ACS functionality + */ + +#ifndef __WLAN_CP_STATS_ACS_DEFS_H__ +#define __WLAN_CP_STATS_ACS_DEFS_H__ + +#endif /* __WLAN_CP_STATS_ACS_DEFS_H__ */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/dispatcher/inc/wlan_cp_stats_ic_atf_defs.h b/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/dispatcher/inc/wlan_cp_stats_ic_atf_defs.h new file mode 100644 index 0000000000000000000000000000000000000000..8292bd60d814f54bf70d91e3470eda6b81b151d7 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/dispatcher/inc/wlan_cp_stats_ic_atf_defs.h @@ -0,0 +1,83 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_cp_stats_atf_defs.h + * + * This header file provides structure definitions to ATF control plane stats + */ + +#ifndef __WLAN_CP_STATS_ATF_DEFS_H__ +#define __WLAN_CP_STATS_ATF_DEFS_H__ + +#ifdef QCA_SUPPORT_CP_STATS + +/** + * struct atf_peer_cp_stats - ATF statistics + * @vdev_id: vdev object identifier + * @tokens: tokens distributed by strictq/fairq + * @act_tokens: tokens available, after adjustemnt of excess + * consumed in prev cycle + * @total: total tokens distributed by strictq/fairq + * @contribution: tokens contributed by this node + * @tot_contribution: tokens contributed by all nodes + * @borrow: tokens borrowed by this node + * @unused: tokens not used + * @pkt_drop_nobuf: packets dropped as node is already holding + * it's share of tx buffers + * @allowed_bufs: max tx buffers that this node can hold + * @max_num_buf_held: max tx buffers held by this node + * @min_num_buf_held: min tx buffers held by this node + * @num_tx_bufs: packets sent for this node + * @num_tx_bytes: bytes sent for this node + * @tokens_common: tokens distributed by strictq/fairq + * (for non-atf nodes) + * @act_tokens_common: tokens available, after adjustemnt of + * excess consumed in prev cycle (for non-atf nodes) + * @timestamp: time when stats are updated + * @weighted_unusedtokens_percent: weighted unused tokens percent + * @raw_tx_tokens: raw tokens + * @throughput: attainable throughput assuming 100% airtime + * @total_used_tokens: total of used tokens + */ +struct atf_peer_cp_stats { + uint8_t vdev_id; + uint32_t tokens; + uint32_t act_tokens; + uint32_t total; + uint32_t contribution; + uint32_t tot_contribution; + uint32_t borrow; + uint32_t unused; + uint32_t pkt_drop_nobuf; + uint16_t allowed_bufs; + uint16_t max_num_buf_held; + uint16_t min_num_buf_held; + uint16_t num_tx_bufs; + uint32_t num_tx_bytes; + uint32_t tokens_common; + uint32_t act_tokens_common; + uint32_t timestamp; + uint32_t weighted_unusedtokens_percent; + uint32_t raw_tx_tokens; + uint32_t throughput; + uint64_t total_used_tokens; +}; + +#endif /* QCA_SUPPORT_CP_STATS */ +#endif /* __WLAN_CP_STATS_ATF_DEFS_H__ */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/dispatcher/inc/wlan_cp_stats_ic_dcs_defs.h b/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/dispatcher/inc/wlan_cp_stats_ic_dcs_defs.h new file mode 100644 index 0000000000000000000000000000000000000000..405ed9bfb7750f14a930380b74592873d22cabeb --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/dispatcher/inc/wlan_cp_stats_ic_dcs_defs.h @@ -0,0 +1,65 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_cp_stats_ic_dcs_defs.h + * + * This header file provide structure definitions for DCS control plane stats + */ + +#ifndef __WLAN_CP_STATS_IC_DCS_CHAN_STATS_H__ +#define __WLAN_CP_STATS_IC_DCS_CHAN_STATS_H__ + +#ifdef QCA_SUPPORT_CP_STATS + +/** + * struct pdev_dcs_chan_stats - DCS statistics + * @dcs_cck_phyerr: channel noise floor + * @dcs_total_chan_util: total channel utilization + * @dcs_tx_chan_util: tx channel utilization + * @dcs_rx_chan_util: rx channel utilization + * @dcs_self_bss_util: self BSS util + * @dcs_other_bss_util: other BSS util + * @dcs_wasted_chan_util: wasted chan util + * @dcs_unused_chan_util: unused chan util + * @dcs_ss_under_util: spatial stream under util + * @dcs_sec_20_util: secondary 20MHz util + * @dcs_sec_40_util: secondary 40Mhz util + * @dcs_sec_80_util: secondary 80MHz util + * @dcs_ofdm_phyerr: tx ofdm errors + * @dcs_cck_phyerr: tx cck errors + */ +struct pdev_dcs_chan_stats { + uint32_t dcs_chan_nf; + uint32_t dcs_total_chan_util; + uint32_t dcs_tx_chan_util; + uint32_t dcs_rx_chan_util; + uint32_t dcs_self_bss_util; + uint32_t dcs_other_bss_util; + uint32_t dcs_wasted_chan_util; + uint32_t dcs_unused_chan_util; + uint32_t dcs_ss_under_util; + uint32_t dcs_sec_20_util; + uint32_t dcs_sec_40_util; + uint32_t dcs_sec_80_util; + uint32_t dcs_ofdm_phyerr; + uint32_t dcs_cck_phyerr; +}; + +#endif /* QCA_SUPPORT_CP_STATS */ +#endif /* __WLAN_CP_STATS_IC_DCS_CHAN_STATS_H__ */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/dispatcher/inc/wlan_cp_stats_ic_defs.h b/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/dispatcher/inc/wlan_cp_stats_ic_defs.h new file mode 100644 index 0000000000000000000000000000000000000000..0335588e0dda97ffa3799b62bbaf3ff0932658d3 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/dispatcher/inc/wlan_cp_stats_ic_defs.h @@ -0,0 +1,449 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_cp_stats_ic_defs.h + * + * This header file maintain structure definitions for cp stats specific to ic + */ + +#ifndef __WLAN_CP_STATS_IC_DEFS_H__ +#define __WLAN_CP_STATS_IC_DEFS_H__ + +#ifdef QCA_SUPPORT_CP_STATS + +/** + * struct pdev_rx_rssi - rx rssi information + * + * @rx_rssi_pri20: primary 20 rssi + * @rx_rssi_sec20: secondary 20 rssi + * @rx_rssi_sec40: secondary 40 rssi + * @rx_rssi_sec80: secondary 80 rssi + */ +struct pdev_rx_rssi { + uint8_t rx_rssi_pri20; + uint8_t rx_rssi_sec20; + uint8_t rx_rssi_sec40; + uint8_t rx_rssi_sec80; +}; + +/** + * struct pdev_hw_stats - pdev hw stats + * + * @tx_hw_retries: tx hw retries + * @rx_hw_retries: rx hw retries + */ +struct pdev_hw_stats { + uint64_t tx_hw_retries; + uint64_t tx_hw_failures; +}; + +/** + * struct pdev_80211_stats - control plane stats at pdev + * + * the same statistics were earlier maintained with a reference to + * ieee80211_mac_stats in vap structure, now the same structure will be + * used as interface structure with user space application + * make sure to align this structure with ieee80211_mac_stats + * + * @cs_tx_beacon: tx beacon + * @cs_be_nobuf: no skbuff available for beacon + * @cs_tx_buf_count: tx buf count + * @cs_tx_packets: tx packets + * @cs_rx_packets: rx packets + * @cs_tx_mgmt: tx mgmt + * @cs_tx_num_data: tx data + * @cs_rx_num_data: rx data + * @cs_rx_mgmt: rx mgmt + * @cs_rx_num_mgmt: rx num mgmt + * @cs_rx_num_ctl: rx num ctrl + * @cs_rx_ctrl: rx ctrl + * @cs_tx_ctrl: tx ctrl + * @cs_tx_rssi: tx rssi + * @cs_tx_mcs[]: tx mcs + * @cs_rx_mcs[]: rx mcs + * @cs_rx_rssi_comb: rx rssi comb + * @cs_rx_bytes: rx bytes + * @cs_tx_bytes: tx bytes + * @cs_tx_compaggr: tx comp aggr + * @cs_rx_aggr: rx aggr + * @cs_tx_bawadv: tx bad adv frames + * @cs_tx_compunaggr: tx comp unaggr frames + * @cs_rx_overrun: rx over run frames + * @cs_rx_crypt_err: rx crypt error count + * @cs_rx_mic_err: rx mic error count + * @cs_rx_crc_err: rx crc error count + * @cs_rx_phy_err: rx phy error count + * @cs_rx_ack_err: rx ack error count + * @cs_rx_rts_err: rx rts error count + * @cs_rx_rts_success: rx rts success count + * @cs_rx_fcs_err: rx fcs error count + * @cs_no_beacons: rx beacon + * @cs_mib_int_count: mib int count + * @cs_rx_looplimit_start: rx loop limit start + * @cs_rx_looplimit_end: rx loop limit end + * @cs_ap_stats_tx_cal_enable: ap stats tx cal enable status + * @cs_tgt_asserts: tgt assert count + * @cs_chan_nf: channel noise floor + * @cs_rx_last_msdu_unset_cnt: rx last msdu unset count + * @cs_chan_nf_sec80: channel noise floor secondary 80 + * @cs_wmi_tx_mgmt: wmi tx mgmt + * @cs_wmi_tx_mgmt_completions: wmi tx mgmt complete + * @cs_wmi_tx_mgmt_completion_err: wmi tx mgmt error + * @cs_peer_delete_req: peer del request + * @cs_peer_delete_resp: peer del response + * @cs_rx_mgmt_rssi_drop: rx mgmt rssi drop + * @cs_tx_retries: tx retries + * @cs_rx_data_bytes: rx data bytes + * @cs_tx_frame_count: tx frame count + * @cs_rx_frame_count: rx frame count + * @cs_rx_clear_count: rx clear count + * @cs_cycle_count: cycle count + * @cs_phy_err_count: phy error count + * @cs_chan_tx_pwr: channel tx power + */ +struct pdev_80211_stats { + uint64_t cs_tx_beacon; + uint32_t cs_be_nobuf; + uint32_t cs_tx_buf_count; + uint32_t cs_tx_packets; + uint32_t cs_rx_packets; + uint32_t cs_tx_mgmt; + uint32_t cs_tx_num_data; + uint32_t cs_rx_num_data; + uint32_t cs_rx_mgmt; + uint32_t cs_rx_num_mgmt; + uint32_t cs_rx_num_ctl; + uint64_t cs_rx_ctrl; + uint64_t cs_tx_ctrl; + uint32_t cs_tx_rssi; + uint32_t cs_tx_mcs[10]; + uint32_t cs_rx_mcs[10]; + uint32_t cs_rx_rssi_comb; + struct pdev_rx_rssi cs_rx_rssi_chain0; + struct pdev_rx_rssi cs_rx_rssi_chain1; + struct pdev_rx_rssi cs_rx_rssi_chain2; + struct pdev_rx_rssi cs_rx_rssi_chain3; + uint64_t cs_rx_bytes; + uint64_t cs_tx_bytes; + uint32_t cs_tx_compaggr; + uint32_t cs_rx_aggr; + uint32_t cs_tx_bawadv; + uint32_t cs_tx_compunaggr; + uint32_t cs_rx_overrun; + uint32_t cs_rx_crypt_err; + uint32_t cs_rx_mic_err; + uint32_t cs_rx_crc_err; + uint32_t cs_rx_phy_err; + uint32_t cs_rx_ack_err; + uint32_t cs_rx_rts_err; + uint32_t cs_rx_rts_success; + uint32_t cs_rx_fcs_err; + uint32_t cs_no_beacons; + uint32_t cs_mib_int_count; + uint32_t cs_rx_looplimit_start; + uint32_t cs_rx_looplimit_end; + uint8_t cs_ap_stats_tx_cal_enable; + uint8_t cs_self_bss_util; + uint8_t cs_obss_util; + uint32_t cs_tgt_asserts; + int16_t cs_chan_nf; + uint32_t cs_rx_last_msdu_unset_cnt; + int16_t cs_chan_nf_sec80; + uint64_t cs_wmi_tx_mgmt; + uint64_t cs_wmi_tx_mgmt_completions; + uint32_t cs_wmi_tx_mgmt_completion_err; + uint32_t cs_peer_delete_req; + uint32_t cs_peer_delete_resp; + uint32_t cs_rx_mgmt_rssi_drop; + uint32_t cs_tx_retries; + uint32_t cs_rx_data_bytes; + uint32_t cs_tx_frame_count; + uint32_t cs_rx_frame_count; + uint32_t cs_rx_clear_count; + uint32_t cs_cycle_count; + uint32_t cs_phy_err_count; + uint32_t cs_chan_tx_pwr; + + /* at places of copying required for scn-stats, copy till here only */ + struct pdev_hw_stats hw_stats; +}; + +/** + * struct pdev_ic_cp_stats - control plane stats specific to WIN at pdev + * @stats: 80211 stats + */ +struct pdev_ic_cp_stats { + struct pdev_80211_stats stats; +}; + +/** + * struct vdev_80211_stats - control plane 80211 stats at vdev + * + * the same statistics were earlier maintained with a reference to + * ieee80211_mac_stats in vap structure, now the same structure will be + * used as interface structure with user space application + * make sure to align this structure with ieee80211_mac_stats + * + * @cs_tx_bcn_swba: tx beacon + * @cs_tx_offchan_mgmt: tx offchan mgmt + * @cs_tx_offchan_data: tx offchan data + * @cs_tx_offchan_fail: tx offchan fail + * @cs_rx_wrongbss: rx from wrong bssid + * @cs_rx_wrongdir: rx wrong direction + * @cs_rx_not_assoc: rx discard cuz sta !assoc + * @cs_rx_no_privacy: rx wep but privacy off + * @cs_rx_mgt_discard: rx mgmt frames discard + * @cs_rx_ctl: rx control frames discard + * @cs_rx_rs_too_big: rx rate set truncated + * @cs_rx_elem_missing: rx required element missing + * @cs_rx_elem_too_big: rx elem too big + * @cs_rx_chan_err: rx chan err + * @cs_rx_node_alloc: rx frame dropped + * @cs_rx_ssid_mismatch: rx ssid mismatch + * @cs_rx_auth_unsupported: rx auth unsupported algo + * @cs_rx_auth_fail: rx auth fail + * @cs_rx_auth_countermeasures: rx auth discard cuz counter measures + * @cs_rx_assoc_bss: rx assoc from wrong bss + * @cs_rx_assoc_notauth: rx assoc w/o auth + * @cs_rx_assoc_cap_mismatch: rx assoc w/ cap mismatch + * @cs_rx_assoc_norate: rx assoc w/ no rate match + * @cs_rx_assoc_wpaie_err: rx assoc w/ WPA err + * @cs_rx_action: rx action frames + * @cs_rx_auth_err: rx auth errors + * @cs_tx_nodefkey: tx nodefkey cuz no defkey + * @cs_tx_noheadroom: tx failed no headroom space + * @cs_rx_nocipherctx: rx no cipher context key + * @cs_rx_acl: rx acl + * @cs_rx_nowds: rx 4-addr packets with no wds enabled + * @cs_tx_nonode: tx tx failed for lack of buf + * @cs_tx_unknown_mgt: tx unkonwn mgmt + * @cs_tx_cipher_err: tx cipher error + * @cs_node_timeout: node timeout + * @cs_crypto_nomem: no memory for crypto ctx + * @cs_crypto_tkip: tkip crypto done in s/w + * @cs_crypto_tkipenmic: tkip en-MIC done in s/w + * @cs_crypto_tkipcm: crypto tkip counter measures + * @cs_crypto_ccmp: crypto ccmp done in s/w + * @cs_crypto_wep: crypto wep done in s/w + * @cs_crypto_setkey_cipher: crypto set key cipher + * @cs_crypto_setkey_nokey: crypto set key no key index + * @cs_crypto_delkey: crypto driver key delete failed + * @cs_crypto_cipher_err: crypto cipher err + * @cs_crypto_attach_fail: crypto attach fail + * @cs_crypto_swfallback: crypto sw fallback + * @cs_crypto_keyfail: crypto key fail + * @cs_ibss_capmismatch: ibss cap mismatch + * @cs_ps_unassoc: ps unassoc + * @cs_ps_aid_err: ps aid err + * @cs_padding: padding + * @cs_invalid_macaddr_nodealloc_failcnt: invalid mac node alloc failures + * @cs_tx_bcn_succ_cnt:tx beacon success + * @cs_tx_bcn_outage_cnt: tx beacon outage + * @total_num_offchan_tx_mgmt: total number of offchan TX mgmt frames + * @total_num_offchan_tx_data: total number of offchan TX data frames + * @num_offchan_tx_failed: number of offchan TX frames failed + * @sta_xceed_rlim: no of connections refused after radio limit + * @sta_xceed_vlim: no of connections refused after vap limit + * @mlme_auth_attempt: no of 802.11 MLME Auth Attempt + * @mlme_auth_success: no of 802.11 MLME Auth Success + * @authorize_attempt: no of Authorization Attempt + * @authorize_success: no of Authorization successful + */ +struct vdev_80211_stats { + uint64_t cs_rx_wrongbss; + uint64_t cs_rx_wrongdir; + uint64_t cs_rx_mcast_echo; + uint64_t cs_rx_not_assoc; + uint64_t cs_rx_noprivacy; + uint64_t cs_rx_mgmt_discard; + uint64_t cs_rx_ctl; + uint64_t cs_rx_rs_too_big; + uint64_t cs_rx_elem_missing; + uint64_t cs_rx_elem_too_big; + uint64_t cs_rx_chan_err; + uint64_t cs_rx_node_alloc; + uint64_t cs_rx_ssid_mismatch; + uint64_t cs_rx_auth_unsupported; + uint64_t cs_rx_auth_fail; + uint64_t cs_rx_auth_countermeasures; + uint64_t cs_rx_assoc_bss; + uint64_t cs_rx_assoc_notauth; + uint64_t cs_rx_assoc_cap_mismatch; + uint64_t cs_rx_assoc_norate; + uint64_t cs_rx_assoc_wpaie_err; + uint64_t cs_rx_action; + uint64_t cs_rx_auth_err; + uint64_t cs_tx_nodefkey; + uint64_t cs_tx_noheadroom; + uint64_t cs_rx_acl; + uint64_t cs_rx_nowds; + uint64_t cs_tx_nobuf; + uint64_t cs_tx_nonode; + uint64_t cs_tx_cipher_err; + uint64_t cs_tx_not_ok; + uint64_t cs_tx_bcn_swba; + uint64_t cs_node_timeout; + uint64_t cs_crypto_nomem; + uint64_t cs_crypto_tkip; + uint64_t cs_crypto_tkipenmic; + uint64_t cs_crypto_tkipcm; + uint64_t cs_crypto_ccmp; + uint64_t cs_crypto_wep; + uint64_t cs_crypto_setkey_cipher; + uint64_t cs_crypto_setkey_nokey; + uint64_t cs_crypto_delkey; + uint64_t cs_crypto_cipher_err; + uint64_t cs_crypto_attach_fail; + uint64_t cs_crypto_swfallback; + uint64_t cs_crypto_keyfail; + uint64_t cs_ibss_capmismatch; + uint64_t cs_ps_unassoc; + uint64_t cs_ps_aid_err; + uint64_t cs_padding; + uint64_t cs_tx_offchan_mgmt; + uint64_t cs_tx_offchan_data; + uint64_t cs_tx_offchan_fail; + uint64_t cs_invalid_macaddr_nodealloc_fail; + uint64_t cs_tx_bcn_success; + uint64_t cs_tx_bcn_outage; + uint64_t cs_sta_xceed_rlim; + uint64_t cs_sta_xceed_vlim; + uint64_t cs_mlme_auth_attempt; + uint64_t cs_mlme_auth_success; + uint64_t cs_authorize_attempt; + uint64_t cs_authorize_success; +}; + +/** + * struct vdev_80211_mac_stats - control plane 80211 mac stats at vdev + * + * the same statistics were earlier maintained with a reference to + * ieee80211_mac_stats in vap structure, now the same structure will be + * used as interface structure with user space application + * make sure to align this structure with ieee80211_mac_stats + * + * @cs_rx_badkeyid: rx bad keyid + * @cs_rx_decryptok: rx decrypt success + * @cs_rx_wepfail: rx wep failures + * @cs_rx_tkipreplay: rx tkip replays + * @cs_rx_tkipformat: rx tkip format + * @cs_rx_tkipicv: rx tkip icv + * @cs_rx_ccmpreplay: rx ccmp replay + * @cs_rx_ccmpformat: rx ccmp format + * @cs_rx_ccmpmic: rx ccmp mic failures + * @cs_rx_wpireplay: rx wpi replay + * @cs_rx_wpimic: rx wpi mic failures + * @cs_rx_countermeasure: rx counter measures count + * @cs_retries: rx retries + * @cs_tx_mgmt: tx mgmt + * @cs_rx_mgmt: rx mgmt + */ +struct vdev_80211_mac_stats { + uint64_t cs_rx_badkeyid; + uint64_t cs_rx_decryptok; + uint64_t cs_rx_wepfail; + uint64_t cs_rx_tkipreplay; + uint64_t cs_rx_tkipformat; + uint64_t cs_rx_tkipicv; + uint64_t cs_rx_ccmpreplay; + uint64_t cs_rx_ccmpformat; + uint64_t cs_rx_ccmpmic; + uint64_t cs_rx_wpireplay; + uint64_t cs_rx_wpimic; + uint64_t cs_rx_countermeasure; + uint64_t cs_retries; + uint64_t cs_tx_mgmt; + uint64_t cs_rx_mgmt; +}; + +/** + * struct vdev_ic_cp_stats - control plane stats specific to WIN at vdev + * @stats: 80211 stats + * @ucast_stats: unicast stats + * @mcast_stats: multicast or broadcast stats + */ +struct vdev_ic_cp_stats { + struct vdev_80211_stats stats; + struct vdev_80211_mac_stats ucast_stats; + struct vdev_80211_mac_stats mcast_stats; +}; + +/** + * struct peer_ic_cp_stats - control plane stats specific to WIN at peer + * the same statistics were earlier maintained with a reference to + * ieee80211_nodestats in ni structure, now the same structure will be + * as interface structure with user space application + * make sure to align this structure with ieee80211_nodestats always + * + * @cs_rx_mgmt_rssi: rx mgmt rssi + * @cs_rx_mgmt: rx mgmt + * @cs_rx_noprivacy: rx no privacy + * @cs_rx_wepfail: rx wep failures + * @cs_rx_ccmpmic: rx ccmp mic failures + * @cs_rx_wpimic: rx wpi mic failures + * @cs_rx_tkipicv: rx tkip icv + * @cs_tx_mgmt: tx mgmt + * @cs_is_tx_not_ok: tx failures + * @cs_ps_discard: ps discard + * @cs_rx_mgmt_rate: rx mgmt rate + * @cs_tx_bytes_rate: tx rate + * @cs_tx_data_rate: tx data rate + * @cs_rx_bytes_rate: rx rate + * @cs_rx_data_rate: rx data rate + * @cs_tx_bytes_success_last: tx success count in last 1 sec + * @cs_tx_data_success_last: tx data success count in last 1 sec + * @cs_rx_bytes_last: rx rate + * @cs_rx_data_last: rx data rate + * @cs_psq_drops: psq drops + * @cs_tx_dropblock: tx dropblock + * @cs_tx_assoc: tx assoc success + * @cs_tx_assoc_fail: tx assoc failure + */ +struct peer_ic_cp_stats { + int8_t cs_rx_mgmt_rssi; + uint32_t cs_rx_mgmt; + uint32_t cs_rx_noprivacy; + uint32_t cs_rx_wepfail; + uint32_t cs_rx_ccmpmic; + uint32_t cs_rx_wpimic; + uint32_t cs_rx_tkipicv; + uint32_t cs_tx_mgmt; + uint32_t cs_is_tx_not_ok; + uint32_t cs_ps_discard; + uint32_t cs_rx_mgmt_rate; +#ifdef WLAN_ATH_SUPPORT_EXT_STAT + uint32_t cs_tx_bytes_rate; + uint32_t cs_tx_data_rate; + uint32_t cs_rx_bytes_rate; + uint32_t cs_rx_data_rate; + uint32_t cs_tx_bytes_success_last; + uint32_t cs_tx_data_success_last; + uint32_t cs_rx_bytes_last; + uint32_t cs_rx_data_last; +#endif + uint32_t cs_psq_drops; +#ifdef ATH_SUPPORT_IQUE + uint32_t cs_tx_dropblock; +#endif + uint32_t cs_tx_assoc; + uint32_t cs_tx_assoc_fail; +}; + +#endif /* QCA_SUPPORT_CP_STATS */ +#endif /* __WLAN_CP_STATS_IC_DEFS_H__ */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/dispatcher/inc/wlan_cp_stats_ic_tgt_api.h b/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/dispatcher/inc/wlan_cp_stats_ic_tgt_api.h new file mode 100644 index 0000000000000000000000000000000000000000..8a5ab6609d52f50681e00e4bb8ac277ee881a150 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/dispatcher/inc/wlan_cp_stats_ic_tgt_api.h @@ -0,0 +1,33 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_cp_stats_ic_tgt_api.h + * + * This header file provides API declarations required for southbound + * interaction specific to ic + */ +#ifndef __WLAN_CP_STATS_IC_TGT_API_H__ +#define __WLAN_CP_STATS_IC_TGT_API_H__ + +#ifdef QCA_SUPPORT_CP_STATS + +#include + +#endif /* QCA_SUPPORT_CP_STATS */ +#endif /* __WLAN_CP_STATS_IC_TGT_API_H__ */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/dispatcher/inc/wlan_cp_stats_ic_ucfg_api.h b/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/dispatcher/inc/wlan_cp_stats_ic_ucfg_api.h new file mode 100644 index 0000000000000000000000000000000000000000..9ae156a81eee382d6566e79f524e883a8eda4694 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/dispatcher/inc/wlan_cp_stats_ic_ucfg_api.h @@ -0,0 +1,676 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_cp_stats_ic_ucfg_api.h + * + */ + +#ifndef __WLAN_CP_STATS_IC_UCFG_API_H__ +#define __WLAN_CP_STATS_IC_UCFG_API_H__ + +#ifdef QCA_SUPPORT_CP_STATS +#include +#include +#include +#include "../../core/src/wlan_cp_stats_defs.h" +#ifdef WLAN_ATF_ENABLE +#include +#endif + +#define UCFG_PDEV_CP_STATS_SET_FUNCS(field) \ + static inline void \ + ucfg_pdev_cp_stats_##field##_inc(struct wlan_objmgr_pdev *_pdev, \ + uint64_t _val) \ + { \ + struct pdev_cp_stats *_pdev_cs = \ + wlan_cp_stats_get_pdev_stats_obj(_pdev); \ + if (_pdev_cs) { \ + struct pdev_ic_cp_stats *_pdev_ics = \ + _pdev_cs->pdev_stats; \ + if (_pdev_ics) { \ + _pdev_ics->stats.cs_##field += _val;\ + } \ + } \ + } \ + static inline void \ + ucfg_pdev_cp_stats_##field##_dec(struct wlan_objmgr_pdev *_pdev, \ + uint64_t _val) \ + { \ + struct pdev_cp_stats *_pdev_cs = \ + wlan_cp_stats_get_pdev_stats_obj(_pdev); \ + if (_pdev_cs) { \ + struct pdev_ic_cp_stats *_pdev_ics = \ + _pdev_cs->pdev_stats; \ + if (_pdev_ics) { \ + _pdev_ics->stats.cs_##field -= _val;\ + } \ + } \ + } \ + static inline void \ + ucfg_pdev_cp_stats_##field##_update(struct wlan_objmgr_pdev *_pdev, \ + uint64_t _val) \ + { \ + struct pdev_cp_stats *_pdev_cs = \ + wlan_cp_stats_get_pdev_stats_obj(_pdev); \ + if (_pdev_cs) { \ + struct pdev_ic_cp_stats *_pdev_ics = \ + _pdev_cs->pdev_stats; \ + if (_pdev_ics) { \ + _pdev_ics->stats.cs_##field = _val;\ + } \ + } \ + } + +UCFG_PDEV_CP_STATS_SET_FUNCS(tx_beacon); +UCFG_PDEV_CP_STATS_SET_FUNCS(be_nobuf); +UCFG_PDEV_CP_STATS_SET_FUNCS(tx_buf_count); +UCFG_PDEV_CP_STATS_SET_FUNCS(tx_packets); +UCFG_PDEV_CP_STATS_SET_FUNCS(rx_packets); +UCFG_PDEV_CP_STATS_SET_FUNCS(tx_mgmt); +UCFG_PDEV_CP_STATS_SET_FUNCS(tx_num_data); +UCFG_PDEV_CP_STATS_SET_FUNCS(rx_num_data); +UCFG_PDEV_CP_STATS_SET_FUNCS(rx_mgmt); +UCFG_PDEV_CP_STATS_SET_FUNCS(rx_num_mgmt); +UCFG_PDEV_CP_STATS_SET_FUNCS(rx_num_ctl); +UCFG_PDEV_CP_STATS_SET_FUNCS(rx_ctrl); +UCFG_PDEV_CP_STATS_SET_FUNCS(tx_ctrl); +UCFG_PDEV_CP_STATS_SET_FUNCS(tx_rssi); +UCFG_PDEV_CP_STATS_SET_FUNCS(rx_rssi_comb); +UCFG_PDEV_CP_STATS_SET_FUNCS(rx_bytes); +UCFG_PDEV_CP_STATS_SET_FUNCS(tx_bytes); +UCFG_PDEV_CP_STATS_SET_FUNCS(tx_compaggr); +UCFG_PDEV_CP_STATS_SET_FUNCS(rx_aggr); +UCFG_PDEV_CP_STATS_SET_FUNCS(tx_bawadv); +UCFG_PDEV_CP_STATS_SET_FUNCS(tx_compunaggr); +UCFG_PDEV_CP_STATS_SET_FUNCS(rx_overrun); +UCFG_PDEV_CP_STATS_SET_FUNCS(rx_crypt_err); +UCFG_PDEV_CP_STATS_SET_FUNCS(rx_mic_err); +UCFG_PDEV_CP_STATS_SET_FUNCS(rx_crc_err); +UCFG_PDEV_CP_STATS_SET_FUNCS(rx_phy_err); +UCFG_PDEV_CP_STATS_SET_FUNCS(rx_ack_err); +UCFG_PDEV_CP_STATS_SET_FUNCS(rx_rts_err); +UCFG_PDEV_CP_STATS_SET_FUNCS(rx_rts_success); +UCFG_PDEV_CP_STATS_SET_FUNCS(rx_fcs_err); +UCFG_PDEV_CP_STATS_SET_FUNCS(no_beacons); +UCFG_PDEV_CP_STATS_SET_FUNCS(mib_int_count); +UCFG_PDEV_CP_STATS_SET_FUNCS(rx_looplimit_start); +UCFG_PDEV_CP_STATS_SET_FUNCS(rx_looplimit_end); +UCFG_PDEV_CP_STATS_SET_FUNCS(ap_stats_tx_cal_enable); +UCFG_PDEV_CP_STATS_SET_FUNCS(tgt_asserts); +UCFG_PDEV_CP_STATS_SET_FUNCS(chan_nf); +UCFG_PDEV_CP_STATS_SET_FUNCS(rx_last_msdu_unset_cnt); +UCFG_PDEV_CP_STATS_SET_FUNCS(chan_nf_sec80); +UCFG_PDEV_CP_STATS_SET_FUNCS(wmi_tx_mgmt); +UCFG_PDEV_CP_STATS_SET_FUNCS(wmi_tx_mgmt_completions); +UCFG_PDEV_CP_STATS_SET_FUNCS(wmi_tx_mgmt_completion_err); +UCFG_PDEV_CP_STATS_SET_FUNCS(peer_delete_req); +UCFG_PDEV_CP_STATS_SET_FUNCS(peer_delete_resp); +UCFG_PDEV_CP_STATS_SET_FUNCS(rx_mgmt_rssi_drop); +UCFG_PDEV_CP_STATS_SET_FUNCS(tx_retries); +UCFG_PDEV_CP_STATS_SET_FUNCS(rx_data_bytes); +UCFG_PDEV_CP_STATS_SET_FUNCS(tx_frame_count); +UCFG_PDEV_CP_STATS_SET_FUNCS(rx_frame_count); +UCFG_PDEV_CP_STATS_SET_FUNCS(rx_clear_count); +UCFG_PDEV_CP_STATS_SET_FUNCS(cycle_count); +UCFG_PDEV_CP_STATS_SET_FUNCS(phy_err_count); +UCFG_PDEV_CP_STATS_SET_FUNCS(chan_tx_pwr); +UCFG_PDEV_CP_STATS_SET_FUNCS(self_bss_util); +UCFG_PDEV_CP_STATS_SET_FUNCS(obss_util); + +#define UCFG_PDEV_CP_STATS_GET_FUNCS(field) \ + static inline uint64_t \ + ucfg_pdev_cp_stats_##field##_get(struct wlan_objmgr_pdev *_pdev) \ + { \ + struct pdev_cp_stats *_pdev_cs = \ + wlan_cp_stats_get_pdev_stats_obj(_pdev); \ + struct pdev_ic_cp_stats *_pdev_ics; \ + if (_pdev_cs) { \ + _pdev_ics = _pdev_cs->pdev_stats; \ + if (_pdev_ics) \ + return _pdev_ics->stats.cs_##field; \ + } \ + return 0; \ + } + +UCFG_PDEV_CP_STATS_GET_FUNCS(ap_stats_tx_cal_enable); +UCFG_PDEV_CP_STATS_GET_FUNCS(wmi_tx_mgmt); +UCFG_PDEV_CP_STATS_GET_FUNCS(wmi_tx_mgmt_completions); +UCFG_PDEV_CP_STATS_GET_FUNCS(wmi_tx_mgmt_completion_err); +UCFG_PDEV_CP_STATS_GET_FUNCS(tgt_asserts); + +static inline void ucfg_pdev_cp_stats_reset(struct wlan_objmgr_pdev *_pdev) +{ + struct pdev_cp_stats *pdev_cps = NULL; + + pdev_cps = wlan_cp_stats_get_pdev_stats_obj(_pdev); + if (!pdev_cps) + return; + + qdf_mem_zero(pdev_cps->pdev_stats, sizeof(struct pdev_ic_cp_stats)); +} + +#define UCFG_VDEV_CP_STATS_SET_FUNCS(field) \ + static inline void \ + ucfg_vdev_cp_stats_##field##_inc(struct wlan_objmgr_vdev *_vdev, \ + uint64_t _val) \ + { \ + struct vdev_cp_stats *_vdev_cs = \ + wlan_cp_stats_get_vdev_stats_obj(_vdev); \ + if (_vdev_cs) { \ + struct vdev_ic_cp_stats *_vdev_ics = \ + _vdev_cs->vdev_stats; \ + if (_vdev_ics) { \ + _vdev_ics->stats.cs_##field += _val;\ + } \ + } \ + } \ + static inline void \ + ucfg_vdev_cp_stats_##field##_dec(struct wlan_objmgr_vdev *_vdev, \ + uint64_t _val) \ + { \ + struct vdev_cp_stats *_vdev_cs = \ + wlan_cp_stats_get_vdev_stats_obj(_vdev); \ + if (_vdev_cs) { \ + struct vdev_ic_cp_stats *_vdev_ics = \ + _vdev_cs->vdev_stats; \ + if (_vdev_ics) { \ + _vdev_ics->stats.cs_##field -= _val;\ + } \ + } \ + } \ + static inline void \ + ucfg_vdev_cp_stats_##field##_update(struct wlan_objmgr_vdev *_vdev, \ + uint64_t _val) \ + { \ + struct vdev_cp_stats *_vdev_cs = \ + wlan_cp_stats_get_vdev_stats_obj(_vdev); \ + if (_vdev_cs) { \ + struct vdev_ic_cp_stats *_vdev_ics = \ + _vdev_cs->vdev_stats; \ + if (_vdev_ics) { \ + _vdev_ics->stats.cs_##field = _val;\ + } \ + } \ + } + +UCFG_VDEV_CP_STATS_SET_FUNCS(rx_wrongbss); +UCFG_VDEV_CP_STATS_SET_FUNCS(rx_wrongdir); +UCFG_VDEV_CP_STATS_SET_FUNCS(rx_not_assoc); +UCFG_VDEV_CP_STATS_SET_FUNCS(rx_noprivacy); +UCFG_VDEV_CP_STATS_SET_FUNCS(rx_mgmt_discard); +UCFG_VDEV_CP_STATS_SET_FUNCS(rx_ctl); +UCFG_VDEV_CP_STATS_SET_FUNCS(rx_rs_too_big); +UCFG_VDEV_CP_STATS_SET_FUNCS(rx_elem_missing); +UCFG_VDEV_CP_STATS_SET_FUNCS(rx_elem_too_big); +UCFG_VDEV_CP_STATS_SET_FUNCS(rx_chan_err); +UCFG_VDEV_CP_STATS_SET_FUNCS(rx_node_alloc); +UCFG_VDEV_CP_STATS_SET_FUNCS(rx_ssid_mismatch); +UCFG_VDEV_CP_STATS_SET_FUNCS(rx_auth_unsupported); +UCFG_VDEV_CP_STATS_SET_FUNCS(rx_auth_fail); +UCFG_VDEV_CP_STATS_SET_FUNCS(rx_auth_countermeasures); +UCFG_VDEV_CP_STATS_SET_FUNCS(rx_assoc_bss); +UCFG_VDEV_CP_STATS_SET_FUNCS(rx_assoc_notauth); +UCFG_VDEV_CP_STATS_SET_FUNCS(rx_assoc_cap_mismatch); +UCFG_VDEV_CP_STATS_SET_FUNCS(rx_assoc_norate); +UCFG_VDEV_CP_STATS_SET_FUNCS(rx_assoc_wpaie_err); +UCFG_VDEV_CP_STATS_SET_FUNCS(rx_action); +UCFG_VDEV_CP_STATS_SET_FUNCS(rx_auth_err); +UCFG_VDEV_CP_STATS_SET_FUNCS(tx_nodefkey); +UCFG_VDEV_CP_STATS_SET_FUNCS(tx_noheadroom); +UCFG_VDEV_CP_STATS_SET_FUNCS(rx_acl); +UCFG_VDEV_CP_STATS_SET_FUNCS(rx_nowds); +UCFG_VDEV_CP_STATS_SET_FUNCS(tx_nobuf); +UCFG_VDEV_CP_STATS_SET_FUNCS(tx_nonode); +UCFG_VDEV_CP_STATS_SET_FUNCS(tx_cipher_err); +UCFG_VDEV_CP_STATS_SET_FUNCS(tx_not_ok); +UCFG_VDEV_CP_STATS_SET_FUNCS(tx_bcn_swba); +UCFG_VDEV_CP_STATS_SET_FUNCS(node_timeout); +UCFG_VDEV_CP_STATS_SET_FUNCS(crypto_nomem); +UCFG_VDEV_CP_STATS_SET_FUNCS(crypto_tkip); +UCFG_VDEV_CP_STATS_SET_FUNCS(crypto_tkipenmic); +UCFG_VDEV_CP_STATS_SET_FUNCS(crypto_tkipcm); +UCFG_VDEV_CP_STATS_SET_FUNCS(crypto_ccmp); +UCFG_VDEV_CP_STATS_SET_FUNCS(crypto_wep); +UCFG_VDEV_CP_STATS_SET_FUNCS(crypto_setkey_cipher); +UCFG_VDEV_CP_STATS_SET_FUNCS(crypto_setkey_nokey); +UCFG_VDEV_CP_STATS_SET_FUNCS(crypto_delkey); +UCFG_VDEV_CP_STATS_SET_FUNCS(crypto_cipher_err); +UCFG_VDEV_CP_STATS_SET_FUNCS(crypto_attach_fail); +UCFG_VDEV_CP_STATS_SET_FUNCS(crypto_swfallback); +UCFG_VDEV_CP_STATS_SET_FUNCS(crypto_keyfail); +UCFG_VDEV_CP_STATS_SET_FUNCS(ibss_capmismatch); +UCFG_VDEV_CP_STATS_SET_FUNCS(ps_unassoc); +UCFG_VDEV_CP_STATS_SET_FUNCS(ps_aid_err); +UCFG_VDEV_CP_STATS_SET_FUNCS(tx_offchan_mgmt); +UCFG_VDEV_CP_STATS_SET_FUNCS(tx_offchan_data); +UCFG_VDEV_CP_STATS_SET_FUNCS(tx_offchan_fail); +UCFG_VDEV_CP_STATS_SET_FUNCS(invalid_macaddr_nodealloc_fail); +UCFG_VDEV_CP_STATS_SET_FUNCS(tx_bcn_success); +UCFG_VDEV_CP_STATS_SET_FUNCS(tx_bcn_outage); +UCFG_VDEV_CP_STATS_SET_FUNCS(sta_xceed_rlim); +UCFG_VDEV_CP_STATS_SET_FUNCS(sta_xceed_vlim); +UCFG_VDEV_CP_STATS_SET_FUNCS(mlme_auth_attempt); +UCFG_VDEV_CP_STATS_SET_FUNCS(mlme_auth_success); +UCFG_VDEV_CP_STATS_SET_FUNCS(authorize_attempt); +UCFG_VDEV_CP_STATS_SET_FUNCS(authorize_success); + +#define UCFG_VDEV_CP_STATS_GET_FUNCS(field) \ + static inline uint64_t \ + ucfg_vdev_cp_stats_##field##_get(struct wlan_objmgr_vdev *_vdev) { \ + struct vdev_cp_stats *_vdev_cs = \ + wlan_cp_stats_get_vdev_stats_obj(_vdev); \ + struct vdev_ic_cp_stats *_vdev_ics; \ + if (_vdev_cs) { \ + _vdev_ics = _vdev_cs->vdev_stats; \ + if (_vdev_ics) \ + return _vdev_ics->stats.cs_##field; \ + } \ + return 0; \ + } + +UCFG_VDEV_CP_STATS_GET_FUNCS(rx_wrongbss); +UCFG_VDEV_CP_STATS_GET_FUNCS(rx_wrongdir); +UCFG_VDEV_CP_STATS_GET_FUNCS(rx_ssid_mismatch); + +#define UCFG_VDEV_UCAST_CP_STATS_SET_FUNCS(field) \ + static inline void \ + ucfg_vdev_ucast_cp_stats_##field##_inc(struct wlan_objmgr_vdev *_vdev, \ + uint64_t _val) \ + { \ + struct vdev_cp_stats *_vdev_cs = \ + wlan_cp_stats_get_vdev_stats_obj(_vdev); \ + if (_vdev_cs) { \ + struct vdev_ic_cp_stats *_vdev_ics = \ + _vdev_cs->vdev_stats; \ + if (_vdev_ics) { \ + _vdev_ics->ucast_stats.cs_##field += _val;\ + } \ + } \ + } \ + static inline void \ + ucfg_vdev_ucast_cp_stats_##field##_dec(struct wlan_objmgr_vdev *_vdev, \ + uint64_t _val) \ + { \ + struct vdev_cp_stats *_vdev_cs = \ + wlan_cp_stats_get_vdev_stats_obj(_vdev); \ + if (_vdev_cs) { \ + struct vdev_ic_cp_stats *_vdev_ics = \ + _vdev_cs->vdev_stats; \ + if (_vdev_ics) { \ + _vdev_ics->ucast_stats.cs_##field -= _val;\ + } \ + } \ + } \ + static inline void ucfg_vdev_ucast_cp_stats_##field##_update( \ + struct wlan_objmgr_vdev *_vdev, uint64_t _val) \ + { \ + struct vdev_cp_stats *_vdev_cs = \ + wlan_cp_stats_get_vdev_stats_obj(_vdev); \ + if (_vdev_cs) { \ + struct vdev_ic_cp_stats *_vdev_ics = \ + _vdev_cs->vdev_stats; \ + if (_vdev_ics) { \ + _vdev_ics->ucast_stats.cs_##field = _val;\ + } \ + } \ + } + +UCFG_VDEV_UCAST_CP_STATS_SET_FUNCS(rx_badkeyid); +UCFG_VDEV_UCAST_CP_STATS_SET_FUNCS(rx_decryptok); +UCFG_VDEV_UCAST_CP_STATS_SET_FUNCS(rx_wepfail); +UCFG_VDEV_UCAST_CP_STATS_SET_FUNCS(rx_tkipicv); +UCFG_VDEV_UCAST_CP_STATS_SET_FUNCS(rx_tkipreplay); +UCFG_VDEV_UCAST_CP_STATS_SET_FUNCS(rx_tkipformat); +UCFG_VDEV_UCAST_CP_STATS_SET_FUNCS(rx_ccmpmic); +UCFG_VDEV_UCAST_CP_STATS_SET_FUNCS(rx_ccmpreplay); +UCFG_VDEV_UCAST_CP_STATS_SET_FUNCS(rx_ccmpformat); +UCFG_VDEV_UCAST_CP_STATS_SET_FUNCS(rx_wpimic); +UCFG_VDEV_UCAST_CP_STATS_SET_FUNCS(rx_wpireplay); +UCFG_VDEV_UCAST_CP_STATS_SET_FUNCS(rx_countermeasure); +UCFG_VDEV_UCAST_CP_STATS_SET_FUNCS(rx_mgmt); +UCFG_VDEV_UCAST_CP_STATS_SET_FUNCS(tx_mgmt); + +#define UCFG_VDEV_UCAST_CP_STATS_GET_FUNCS(field) \ + static inline uint64_t \ + ucfg_vdev_ucast_cp_stats_##field##_get(struct wlan_objmgr_vdev *_vdev) \ + { \ + struct vdev_cp_stats *_vdev_cs = \ + wlan_cp_stats_get_vdev_stats_obj(_vdev); \ + struct vdev_ic_cp_stats *_vdev_ics; \ + if (_vdev_cs) { \ + _vdev_ics = _vdev_cs->vdev_stats; \ + if (_vdev_ics) \ + return _vdev_ics->ucast_stats.cs_##field; \ + } \ + return 0; \ + } + +UCFG_VDEV_UCAST_CP_STATS_GET_FUNCS(rx_decryptok); +UCFG_VDEV_UCAST_CP_STATS_GET_FUNCS(rx_ccmpmic); +UCFG_VDEV_UCAST_CP_STATS_GET_FUNCS(rx_ccmpreplay); +UCFG_VDEV_UCAST_CP_STATS_GET_FUNCS(rx_wepfail); + +#define UCFG_VDEV_MCAST_CP_STATS_SET_FUNCS(field) \ + static inline void \ + ucfg_vdev_mcast_cp_stats_##field##_inc(struct wlan_objmgr_vdev *_vdev, \ + uint64_t _val) \ + { \ + struct vdev_cp_stats *_vdev_cs = \ + wlan_cp_stats_get_vdev_stats_obj(_vdev); \ + if (_vdev_cs) { \ + struct vdev_ic_cp_stats *_vdev_ics = \ + _vdev_cs->vdev_stats; \ + if (_vdev_ics) { \ + _vdev_ics->mcast_stats.cs_##field += _val;\ + } \ + } \ + } \ + static inline void \ + ucfg_vdev_mcast_cp_stats_##field##_dec(struct wlan_objmgr_vdev *_vdev, \ + uint64_t _val) \ + { \ + struct vdev_cp_stats *_vdev_cs = \ + wlan_cp_stats_get_vdev_stats_obj(_vdev); \ + if (_vdev_cs) { \ + struct vdev_ic_cp_stats *_vdev_ics = \ + _vdev_cs->vdev_stats; \ + if (_vdev_ics) { \ + _vdev_ics->mcast_stats.cs_##field -= _val;\ + } \ + } \ + } \ + static inline void ucfg_vdev_mcast_cp_stats_##field##_update( \ + struct wlan_objmgr_vdev *_vdev, uint64_t _val) \ + { \ + struct vdev_cp_stats *_vdev_cs = \ + wlan_cp_stats_get_vdev_stats_obj(_vdev); \ + if (_vdev_cs) { \ + struct vdev_ic_cp_stats *_vdev_ics = \ + _vdev_cs->vdev_stats; \ + if (_vdev_ics) { \ + _vdev_ics->mcast_stats.cs_##field = _val;\ + } \ + } \ + } + +UCFG_VDEV_MCAST_CP_STATS_SET_FUNCS(rx_badkeyid); +UCFG_VDEV_MCAST_CP_STATS_SET_FUNCS(rx_decryptok); +UCFG_VDEV_MCAST_CP_STATS_SET_FUNCS(rx_wepfail); +UCFG_VDEV_MCAST_CP_STATS_SET_FUNCS(rx_tkipicv); +UCFG_VDEV_MCAST_CP_STATS_SET_FUNCS(rx_tkipreplay); +UCFG_VDEV_MCAST_CP_STATS_SET_FUNCS(rx_tkipformat); +UCFG_VDEV_MCAST_CP_STATS_SET_FUNCS(rx_ccmpmic); +UCFG_VDEV_MCAST_CP_STATS_SET_FUNCS(rx_ccmpreplay); +UCFG_VDEV_MCAST_CP_STATS_SET_FUNCS(rx_ccmpformat); +UCFG_VDEV_MCAST_CP_STATS_SET_FUNCS(rx_wpimic); +UCFG_VDEV_MCAST_CP_STATS_SET_FUNCS(rx_wpireplay); +UCFG_VDEV_MCAST_CP_STATS_SET_FUNCS(rx_countermeasure); +UCFG_VDEV_MCAST_CP_STATS_SET_FUNCS(rx_mgmt); +UCFG_VDEV_MCAST_CP_STATS_SET_FUNCS(tx_mgmt); + +#define UCFG_VDEV_MCAST_CP_STATS_GET_FUNCS(field) \ + static inline uint64_t \ + ucfg_vdev_mcast_cp_stats_##field##_get(struct wlan_objmgr_vdev *_vdev) \ + { \ + struct vdev_cp_stats *_vdev_cs = \ + wlan_cp_stats_get_vdev_stats_obj(_vdev); \ + struct vdev_ic_cp_stats *_vdev_ics; \ + if (_vdev_cs) { \ + _vdev_ics = _vdev_cs->vdev_stats; \ + if (_vdev_ics) \ + return _vdev_ics->mcast_stats.cs_##field; \ + } \ + return 0; \ + } + +UCFG_VDEV_MCAST_CP_STATS_GET_FUNCS(rx_decryptok); +UCFG_VDEV_MCAST_CP_STATS_GET_FUNCS(rx_ccmpmic); +UCFG_VDEV_MCAST_CP_STATS_GET_FUNCS(rx_ccmpreplay); +UCFG_VDEV_MCAST_CP_STATS_GET_FUNCS(rx_wepfail); + +#define UCFG_PEER_CP_STATS_SET_FUNCS(field) \ + static inline void \ + ucfg_peer_cp_stats_##field##_inc(struct wlan_objmgr_peer *_peer, \ + uint32_t _val) \ + { \ + struct peer_cp_stats *_peer_cs = \ + wlan_cp_stats_get_peer_stats_obj(_peer); \ + if (_peer_cs) { \ + struct peer_ic_cp_stats *_peer_ics = \ + _peer_cs->peer_stats; \ + if (_peer_ics) { \ + _peer_ics->cs_##field += _val;\ + } \ + } \ + } \ + static inline void \ + ucfg_peer_cp_stats_##field##_dec(struct wlan_objmgr_peer *_peer, \ + uint32_t _val) \ + { \ + struct peer_cp_stats *_peer_cs = \ + wlan_cp_stats_get_peer_stats_obj(_peer); \ + if (_peer_cs) { \ + struct peer_ic_cp_stats *_peer_ics = \ + _peer_cs->peer_stats; \ + if (_peer_ics) { \ + _peer_ics->cs_##field -= _val;\ + } \ + } \ + } \ + static inline void \ + ucfg_peer_cp_stats_##field##_update(struct wlan_objmgr_peer *_peer, \ + uint32_t _val) \ + { \ + struct peer_cp_stats *_peer_cs = \ + wlan_cp_stats_get_peer_stats_obj(_peer); \ + if (_peer_cs) { \ + struct peer_ic_cp_stats *_peer_ics = \ + _peer_cs->peer_stats; \ + if (_peer_ics) { \ + _peer_ics->cs_##field = _val;\ + } \ + } \ + } + +UCFG_PEER_CP_STATS_SET_FUNCS(rx_mgmt); +UCFG_PEER_CP_STATS_SET_FUNCS(tx_mgmt); +UCFG_PEER_CP_STATS_SET_FUNCS(rx_mgmt_rate); +UCFG_PEER_CP_STATS_SET_FUNCS(is_tx_not_ok); +UCFG_PEER_CP_STATS_SET_FUNCS(rx_noprivacy); +UCFG_PEER_CP_STATS_SET_FUNCS(rx_wepfail); +UCFG_PEER_CP_STATS_SET_FUNCS(rx_tkipicv); +UCFG_PEER_CP_STATS_SET_FUNCS(rx_wpimic); +UCFG_PEER_CP_STATS_SET_FUNCS(rx_ccmpmic); +UCFG_PEER_CP_STATS_SET_FUNCS(ps_discard); +UCFG_PEER_CP_STATS_SET_FUNCS(psq_drops); +UCFG_PEER_CP_STATS_SET_FUNCS(tx_assoc); +UCFG_PEER_CP_STATS_SET_FUNCS(tx_assoc_fail); +#ifdef ATH_SUPPORT_IQUE +UCFG_PEER_CP_STATS_SET_FUNCS(tx_dropblock); +#endif +#ifdef WLAN_ATH_SUPPORT_EXT_STAT +UCFG_PEER_CP_STATS_SET_FUNCS(tx_bytes_rate); +UCFG_PEER_CP_STATS_SET_FUNCS(rx_bytes_rate); +UCFG_PEER_CP_STATS_SET_FUNCS(tx_data_rate); +UCFG_PEER_CP_STATS_SET_FUNCS(rx_data_rate); +UCFG_PEER_CP_STATS_SET_FUNCS(rx_bytes_last); +UCFG_PEER_CP_STATS_SET_FUNCS(rx_data_last); +UCFG_PEER_CP_STATS_SET_FUNCS(tx_bytes_success_last); +UCFG_PEER_CP_STATS_SET_FUNCS(tx_data_success_last); +#endif + +static inline +void ucfg_peer_cp_stats_rx_mgmt_rssi_update(struct wlan_objmgr_peer *peer, + int8_t rssi) +{ + struct peer_cp_stats *peer_cs; + struct peer_ic_cp_stats *peer_cps; + + if (!peer) + return; + + peer_cs = wlan_cp_stats_get_peer_stats_obj(peer); + if (!peer_cs) + return; + + peer_cps = peer_cs->peer_stats; + if (peer_cps) + peer_cps->cs_rx_mgmt_rssi = rssi; +} + +static inline +int8_t ucfg_peer_cp_stats_rx_mgmt_rssi_get(struct wlan_objmgr_peer *peer) +{ + struct peer_cp_stats *peer_cs; + struct peer_ic_cp_stats *peer_cps; + int8_t val = -1; + + if (!peer) + return val; + + peer_cs = wlan_cp_stats_get_peer_stats_obj(peer); + if (!peer_cs) + return val; + + peer_cps = peer_cs->peer_stats; + if (peer_cps) + val = peer_cps->cs_rx_mgmt_rssi; + + return val; +} + +#define UCFG_PEER_CP_STATS_GET_FUNCS(field) \ + static inline uint32_t \ + ucfg_peer_cp_stats_##field##_get(struct wlan_objmgr_peer *_peer) \ + { \ + struct peer_cp_stats *_peer_cs = \ + wlan_cp_stats_get_peer_stats_obj(_peer); \ + struct peer_ic_cp_stats *_peer_ics; \ + if (_peer_cs) { \ + _peer_ics = _peer_cs->peer_stats; \ + if (_peer_ics) \ + return _peer_ics->cs_##field; \ + } \ + return 0; \ + } + +UCFG_PEER_CP_STATS_GET_FUNCS(rx_mgmt_rate); +#ifdef ATH_SUPPORT_IQUE +UCFG_PEER_CP_STATS_GET_FUNCS(tx_dropblock); +#endif + +/** + * wlan_ucfg_get_peer_cp_stats() - ucfg API to get peer cp stats + * @peer_obj: pointer to peer object + * @peer_cps: pointer to peer cp stats object to populate + * + * Return: QDF_STATUS - Success or Failure + */ +QDF_STATUS wlan_ucfg_get_peer_cp_stats(struct wlan_objmgr_peer *peer, + struct peer_ic_cp_stats *peer_cps); + +/** + * wlan_ucfg_get_vdev_cp_stats() - ucfg API to get vdev cp stats + * @vdev_obj: pointer to vdev object + * @vdev_cps: pointer to vdev cp stats object to populate + * + * Return: QDF_STATUS - Success or Failure + */ +QDF_STATUS wlan_ucfg_get_vdev_cp_stats(struct wlan_objmgr_vdev *vdev, + struct vdev_ic_cp_stats *vdev_cps); + +/** + * wlan_ucfg_get_pdev_cp_stats_ref() - API to get reference to pdev cp stats + * @pdev_obj: pointer to pdev object + * + * Return: pdev_ic_cp_stats or NULL + */ +struct pdev_ic_cp_stats +*wlan_ucfg_get_pdev_cp_stats_ref(struct wlan_objmgr_pdev *pdev); + +/** + * wlan_ucfg_get_pdev_cp_stats() - ucfg API to get pdev cp stats + * @pdev_obj: pointer to pdev object + * @pdev_cps: pointer to pdev cp stats object to populate + * + * Return: QDF_STATUS - Success or Failure + */ +QDF_STATUS wlan_ucfg_get_pdev_cp_stats(struct wlan_objmgr_pdev *pdev, + struct pdev_ic_cp_stats *pdev_cps); + +/** + * wlan_ucfg_get_pdev_cp_stats() - ucfg API to get pdev hw stats + * @pdev_obj: pointer to pdev object + * @hw_stats: pointer to pdev hw cp stats to populate + * + * Return: QDF_STATUS - Success or Failure + */ +QDF_STATUS wlan_ucfg_get_pdev_hw_cp_stats(struct wlan_objmgr_pdev *pdev, + struct pdev_hw_stats *hw_stats); + +/** + * wlan_ucfg_set_pdev_cp_stats() - ucfg API to set pdev hw stats + * @pdev_obj: pointer to pdev object + * @hw_stats: pointer to pdev hw cp stats + * + * Return: QDF_STATUS - Success or Failure + */ +QDF_STATUS wlan_ucfg_set_pdev_hw_cp_stats(struct wlan_objmgr_pdev *pdev, + struct pdev_hw_stats *hw_stats); + +#ifdef WLAN_ATF_ENABLE +/** + * wlan_ucfg_get_atf_peer_cp_stats() - ucfg API to get ATF peer cp stats + * @peer_obj: pointer to peer object + * @atf_cps: pointer to atf peer cp stats object to populate + * + * Return: QDF_STATUS - Success or Failure + */ +QDF_STATUS wlan_ucfg_get_atf_peer_cp_stats(struct wlan_objmgr_peer *peer, + struct atf_peer_cp_stats *atf_cps); + +/** + * wlan_ucfg_get_atf_peer_cp_stats_from_mac() - ucfg API to get ATF + * peer cp stats from peer mac address + * @vdev_obj: pointer to vdev object + * @mac: pointer to peer mac address + * @atf_cps: pointer to atf peer cp stats object to populate + * + * Return: QDF_STATUS - Success or Failure + */ +QDF_STATUS +wlan_ucfg_get_atf_peer_cp_stats_from_mac(struct wlan_objmgr_vdev *vdev, + uint8_t *mac, + struct atf_peer_cp_stats *astats); + +#endif + +/** + * wlan_ucfg_get_dcs_chan_stats() - ucfg API to set dcs chan stats + * @pdev_obj: pointer to pdev object + * @dcs_chan_stats: pointer to dcs chan stats structure + * + * Return: QDF_STATUS - Success or Failure + */ +QDF_STATUS +wlan_ucfg_get_dcs_chan_stats(struct wlan_objmgr_pdev *pdev, + struct pdev_dcs_chan_stats *dcs_chan_stats); + +#endif /* QCA_SUPPORT_CP_STATS */ +#endif /* __WLAN_CP_STATS_IC_UCFG_API_H__ */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/dispatcher/inc/wlan_cp_stats_ic_utils_api.h b/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/dispatcher/inc/wlan_cp_stats_ic_utils_api.h new file mode 100644 index 0000000000000000000000000000000000000000..8c737fab5949e61623fe52a3b56904a2bb7fda24 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/dispatcher/inc/wlan_cp_stats_ic_utils_api.h @@ -0,0 +1,386 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_cp_stats_ic_utils_api.h + * + * This header file provide definitions and declarations required for northbound + * specific to WIN + */ + +#ifndef __WLAN_CP_STATS_IC_UTILS_API_H__ +#define __WLAN_CP_STATS_IC_UTILS_API_H__ + +#ifdef QCA_SUPPORT_CP_STATS +#include "wlan_cp_stats_ic_ucfg_api.h" + +#define PEER_CP_STATS_SET_FUNCS(field) \ + static inline void \ + peer_cp_stats_##field##_inc(struct wlan_objmgr_peer *_peer, \ + uint64_t _val) \ + { \ + ucfg_peer_cp_stats_##field##_inc(_peer, _val); \ + } \ + static inline void \ + peer_cp_stats_##field##_dec(struct wlan_objmgr_peer *_peer, \ + uint64_t _val) \ + { \ + ucfg_peer_cp_stats_##field##_inc(_peer, _val); \ + } \ + static inline void \ + peer_cp_stats_##field##_update(struct wlan_objmgr_peer *_peer, \ + uint64_t _val) \ + { \ + ucfg_peer_cp_stats_##field##_update(_peer, _val); \ + } + +PEER_CP_STATS_SET_FUNCS(rx_mgmt); +PEER_CP_STATS_SET_FUNCS(tx_mgmt); +PEER_CP_STATS_SET_FUNCS(rx_mgmt_rate); +PEER_CP_STATS_SET_FUNCS(is_tx_not_ok); +PEER_CP_STATS_SET_FUNCS(rx_noprivacy); +PEER_CP_STATS_SET_FUNCS(rx_wepfail); +PEER_CP_STATS_SET_FUNCS(rx_tkipicv); +PEER_CP_STATS_SET_FUNCS(rx_wpimic); +PEER_CP_STATS_SET_FUNCS(rx_ccmpmic); +PEER_CP_STATS_SET_FUNCS(ps_discard); +PEER_CP_STATS_SET_FUNCS(psq_drops); +PEER_CP_STATS_SET_FUNCS(tx_assoc); +PEER_CP_STATS_SET_FUNCS(tx_assoc_fail); +#ifdef ATH_SUPPORT_IQUE +PEER_CP_STATS_SET_FUNCS(tx_dropblock); +#endif +#ifdef WLAN_ATH_SUPPORT_EXT_STAT +PEER_CP_STATS_SET_FUNCS(tx_bytes_rate); +PEER_CP_STATS_SET_FUNCS(rx_bytes_rate); +PEER_CP_STATS_SET_FUNCS(tx_data_rate); +PEER_CP_STATS_SET_FUNCS(rx_data_rate); +PEER_CP_STATS_SET_FUNCS(rx_bytes_last); +PEER_CP_STATS_SET_FUNCS(rx_data_last); +PEER_CP_STATS_SET_FUNCS(tx_bytes_success_last); +PEER_CP_STATS_SET_FUNCS(tx_data_success_last); +#endif + +#define PEER_CP_STATS_GET_FUNCS(field) \ + static inline uint64_t \ + peer_cp_stats_##field##_get(struct wlan_objmgr_peer *_peer) \ + { \ + return ucfg_peer_cp_stats_##field##_get(_peer); \ + } + +PEER_CP_STATS_GET_FUNCS(rx_mgmt_rate); +#ifdef ATH_SUPPORT_IQUE +PEER_CP_STATS_GET_FUNCS(tx_dropblock); +#endif + +static inline void +peer_cp_stats_rx_mgmt_rssi_update(struct wlan_objmgr_peer *peer, + int8_t rssi) +{ + ucfg_peer_cp_stats_rx_mgmt_rssi_update(peer, rssi); +} + +static inline int8_t +peer_cp_stats_rx_mgmt_rssi_get(struct wlan_objmgr_peer *peer) +{ + return ucfg_peer_cp_stats_rx_mgmt_rssi_get(peer); +} + +#define VDEV_UCAST_CP_STATS_SET_FUNCS(field) \ + static inline void \ + vdev_ucast_cp_stats_##field##_inc(struct wlan_objmgr_vdev *_vdev, \ + uint64_t _val) \ + { \ + ucfg_vdev_ucast_cp_stats_##field##_inc(_vdev, _val); \ + } \ + static inline void \ + vdev_ucast_cp_stats_##field##_update(struct wlan_objmgr_vdev *_vdev, \ + uint64_t _val) \ + { \ + ucfg_vdev_ucast_cp_stats_##field##_update(_vdev, _val); \ + } + +VDEV_UCAST_CP_STATS_SET_FUNCS(rx_badkeyid); +VDEV_UCAST_CP_STATS_SET_FUNCS(rx_decryptok); +VDEV_UCAST_CP_STATS_SET_FUNCS(rx_wepfail); +VDEV_UCAST_CP_STATS_SET_FUNCS(rx_tkipicv); +VDEV_UCAST_CP_STATS_SET_FUNCS(rx_tkipreplay); +VDEV_UCAST_CP_STATS_SET_FUNCS(rx_tkipformat); +VDEV_UCAST_CP_STATS_SET_FUNCS(rx_ccmpmic); +VDEV_UCAST_CP_STATS_SET_FUNCS(rx_ccmpreplay); +VDEV_UCAST_CP_STATS_SET_FUNCS(rx_ccmpformat); +VDEV_UCAST_CP_STATS_SET_FUNCS(rx_wpimic); +VDEV_UCAST_CP_STATS_SET_FUNCS(rx_wpireplay); +VDEV_UCAST_CP_STATS_SET_FUNCS(rx_countermeasure); +VDEV_UCAST_CP_STATS_SET_FUNCS(rx_mgmt); +VDEV_UCAST_CP_STATS_SET_FUNCS(tx_mgmt); + +#define VDEV_UCAST_CP_STATS_GET_FUNCS(field) \ + static inline uint64_t \ + vdev_ucast_cp_stats_##field##_get(struct wlan_objmgr_vdev *_vdev) \ + { \ + return ucfg_vdev_ucast_cp_stats_##field##_get(_vdev); \ + } + +VDEV_UCAST_CP_STATS_GET_FUNCS(rx_wepfail); +VDEV_UCAST_CP_STATS_GET_FUNCS(rx_decryptok); +VDEV_UCAST_CP_STATS_GET_FUNCS(rx_ccmpmic); +VDEV_UCAST_CP_STATS_GET_FUNCS(rx_ccmpreplay); + +#define VDEV_MCAST_CP_STATS_SET_FUNCS(field) \ + static inline void \ + vdev_mcast_cp_stats_##field##_inc(struct wlan_objmgr_vdev *_vdev, \ + uint64_t _val) \ + { \ + ucfg_vdev_mcast_cp_stats_##field##_inc(_vdev, _val); \ + } \ + static inline void \ + vdev_mcast_cp_stats_##field##_update(struct wlan_objmgr_vdev *_vdev, \ + uint64_t _val) \ + { \ + ucfg_vdev_mcast_cp_stats_##field##_update(_vdev, _val); \ + } + +VDEV_MCAST_CP_STATS_SET_FUNCS(rx_badkeyid); +VDEV_MCAST_CP_STATS_SET_FUNCS(rx_decryptok); +VDEV_MCAST_CP_STATS_SET_FUNCS(rx_wepfail); +VDEV_MCAST_CP_STATS_SET_FUNCS(rx_tkipicv); +VDEV_MCAST_CP_STATS_SET_FUNCS(rx_tkipreplay); +VDEV_MCAST_CP_STATS_SET_FUNCS(rx_tkipformat); +VDEV_MCAST_CP_STATS_SET_FUNCS(rx_ccmpmic); +VDEV_MCAST_CP_STATS_SET_FUNCS(rx_ccmpreplay); +VDEV_MCAST_CP_STATS_SET_FUNCS(rx_ccmpformat); +VDEV_MCAST_CP_STATS_SET_FUNCS(rx_wpimic); +VDEV_MCAST_CP_STATS_SET_FUNCS(rx_wpireplay); +VDEV_MCAST_CP_STATS_SET_FUNCS(rx_countermeasure); +VDEV_MCAST_CP_STATS_SET_FUNCS(rx_mgmt); +VDEV_MCAST_CP_STATS_SET_FUNCS(tx_mgmt); + +#define VDEV_MCAST_CP_STATS_GET_FUNCS(field) \ + static inline uint64_t \ + vdev_mcast_cp_stats_##field##_get(struct wlan_objmgr_vdev *_vdev) \ + { \ + return ucfg_vdev_mcast_cp_stats_##field##_get(_vdev); \ + } + +VDEV_MCAST_CP_STATS_GET_FUNCS(rx_wepfail); +VDEV_MCAST_CP_STATS_GET_FUNCS(rx_decryptok); +VDEV_MCAST_CP_STATS_GET_FUNCS(rx_ccmpmic); +VDEV_MCAST_CP_STATS_GET_FUNCS(rx_ccmpreplay); + +#define VDEV_CP_STATS_SET_FUNCS(field) \ + static inline void \ + vdev_cp_stats_##field##_inc(struct wlan_objmgr_vdev *_vdev, \ + uint64_t _val) \ + { \ + ucfg_vdev_cp_stats_##field##_inc(_vdev, _val); \ + } \ + static inline void \ + vdev_cp_stats_##field##_update(struct wlan_objmgr_vdev *_vdev, \ + uint64_t _val) \ + { \ + ucfg_vdev_cp_stats_##field##_update(_vdev, _val); \ + } + +VDEV_CP_STATS_SET_FUNCS(rx_wrongbss); +VDEV_CP_STATS_SET_FUNCS(rx_wrongdir); +VDEV_CP_STATS_SET_FUNCS(rx_not_assoc); +VDEV_CP_STATS_SET_FUNCS(rx_noprivacy); +VDEV_CP_STATS_SET_FUNCS(rx_mgmt_discard); +VDEV_CP_STATS_SET_FUNCS(rx_ctl); +VDEV_CP_STATS_SET_FUNCS(rx_rs_too_big); +VDEV_CP_STATS_SET_FUNCS(rx_elem_missing); +VDEV_CP_STATS_SET_FUNCS(rx_elem_too_big); +VDEV_CP_STATS_SET_FUNCS(rx_chan_err); +VDEV_CP_STATS_SET_FUNCS(rx_node_alloc); +VDEV_CP_STATS_SET_FUNCS(rx_ssid_mismatch); +VDEV_CP_STATS_SET_FUNCS(rx_auth_unsupported); +VDEV_CP_STATS_SET_FUNCS(rx_auth_fail); +VDEV_CP_STATS_SET_FUNCS(rx_auth_countermeasures); +VDEV_CP_STATS_SET_FUNCS(rx_assoc_bss); +VDEV_CP_STATS_SET_FUNCS(rx_assoc_notauth); +VDEV_CP_STATS_SET_FUNCS(rx_assoc_cap_mismatch); +VDEV_CP_STATS_SET_FUNCS(rx_assoc_norate); +VDEV_CP_STATS_SET_FUNCS(rx_assoc_wpaie_err); +VDEV_CP_STATS_SET_FUNCS(rx_action); +VDEV_CP_STATS_SET_FUNCS(rx_auth_err); +VDEV_CP_STATS_SET_FUNCS(tx_nodefkey); +VDEV_CP_STATS_SET_FUNCS(tx_noheadroom); +VDEV_CP_STATS_SET_FUNCS(rx_acl); +VDEV_CP_STATS_SET_FUNCS(rx_nowds); +VDEV_CP_STATS_SET_FUNCS(tx_nobuf); +VDEV_CP_STATS_SET_FUNCS(tx_nonode); +VDEV_CP_STATS_SET_FUNCS(tx_cipher_err); +VDEV_CP_STATS_SET_FUNCS(tx_not_ok); +VDEV_CP_STATS_SET_FUNCS(tx_bcn_swba); +VDEV_CP_STATS_SET_FUNCS(node_timeout); +VDEV_CP_STATS_SET_FUNCS(crypto_nomem); +VDEV_CP_STATS_SET_FUNCS(crypto_tkip); +VDEV_CP_STATS_SET_FUNCS(crypto_tkipenmic); +VDEV_CP_STATS_SET_FUNCS(crypto_tkipcm); +VDEV_CP_STATS_SET_FUNCS(crypto_ccmp); +VDEV_CP_STATS_SET_FUNCS(crypto_wep); +VDEV_CP_STATS_SET_FUNCS(crypto_setkey_cipher); +VDEV_CP_STATS_SET_FUNCS(crypto_setkey_nokey); +VDEV_CP_STATS_SET_FUNCS(crypto_delkey); +VDEV_CP_STATS_SET_FUNCS(crypto_cipher_err); +VDEV_CP_STATS_SET_FUNCS(crypto_attach_fail); +VDEV_CP_STATS_SET_FUNCS(crypto_swfallback); +VDEV_CP_STATS_SET_FUNCS(crypto_keyfail); +VDEV_CP_STATS_SET_FUNCS(ibss_capmismatch); +VDEV_CP_STATS_SET_FUNCS(ps_unassoc); +VDEV_CP_STATS_SET_FUNCS(ps_aid_err); +VDEV_CP_STATS_SET_FUNCS(tx_offchan_mgmt); +VDEV_CP_STATS_SET_FUNCS(tx_offchan_data); +VDEV_CP_STATS_SET_FUNCS(tx_offchan_fail); +VDEV_CP_STATS_SET_FUNCS(invalid_macaddr_nodealloc_fail); +VDEV_CP_STATS_SET_FUNCS(tx_bcn_success); +VDEV_CP_STATS_SET_FUNCS(tx_bcn_outage); +VDEV_CP_STATS_SET_FUNCS(sta_xceed_rlim); +VDEV_CP_STATS_SET_FUNCS(sta_xceed_vlim); +VDEV_CP_STATS_SET_FUNCS(mlme_auth_attempt); +VDEV_CP_STATS_SET_FUNCS(mlme_auth_success); +VDEV_CP_STATS_SET_FUNCS(authorize_attempt); +VDEV_CP_STATS_SET_FUNCS(authorize_success); + +#define VDEV_CP_STATS_GET_FUNCS(field) \ + static inline uint64_t \ + vdev_cp_stats_##field##_get(struct wlan_objmgr_vdev *_vdev) \ + { \ + return ucfg_vdev_cp_stats_##field##_get(_vdev); \ + } + +VDEV_CP_STATS_GET_FUNCS(rx_wrongbss); +VDEV_CP_STATS_GET_FUNCS(rx_wrongdir); +VDEV_CP_STATS_GET_FUNCS(rx_ssid_mismatch); + +static inline void vdev_cp_stats_reset(struct wlan_objmgr_vdev *vdev) +{ + struct vdev_cp_stats *vdev_cps; + struct vdev_ic_cp_stats *vdev_cs; + + if (!vdev) + return; + + vdev_cps = wlan_cp_stats_get_vdev_stats_obj(vdev); + if (!vdev_cps) + return; + + vdev_cs = vdev_cps->vdev_stats; + qdf_mem_set(vdev_cs, sizeof(struct vdev_ic_cp_stats), 0x0); +} + +#define PDEV_CP_STATS_SET_FUNCS(field) \ + static inline void \ + pdev_cp_stats_##field##_inc(struct wlan_objmgr_pdev *_pdev, \ + uint64_t _val) \ + { \ + ucfg_pdev_cp_stats_##field##_inc(_pdev, _val); \ + } \ + static inline void \ + pdev_cp_stats_##field##_update(struct wlan_objmgr_pdev *_pdev, \ + uint64_t _val) \ + { \ + ucfg_pdev_cp_stats_##field##_update(_pdev, _val); \ + } + +PDEV_CP_STATS_SET_FUNCS(tx_beacon); +PDEV_CP_STATS_SET_FUNCS(be_nobuf); +PDEV_CP_STATS_SET_FUNCS(tx_buf_count); +PDEV_CP_STATS_SET_FUNCS(tx_packets); +PDEV_CP_STATS_SET_FUNCS(rx_packets); +PDEV_CP_STATS_SET_FUNCS(tx_mgmt); +PDEV_CP_STATS_SET_FUNCS(tx_num_data); +PDEV_CP_STATS_SET_FUNCS(rx_num_data); +PDEV_CP_STATS_SET_FUNCS(rx_mgmt); +PDEV_CP_STATS_SET_FUNCS(rx_num_mgmt); +PDEV_CP_STATS_SET_FUNCS(rx_num_ctl); +PDEV_CP_STATS_SET_FUNCS(rx_ctrl); +PDEV_CP_STATS_SET_FUNCS(tx_ctrl); +PDEV_CP_STATS_SET_FUNCS(tx_rssi); +PDEV_CP_STATS_SET_FUNCS(rx_rssi_comb); +PDEV_CP_STATS_SET_FUNCS(rx_bytes); +PDEV_CP_STATS_SET_FUNCS(tx_bytes); +PDEV_CP_STATS_SET_FUNCS(tx_compaggr); +PDEV_CP_STATS_SET_FUNCS(rx_aggr); +PDEV_CP_STATS_SET_FUNCS(tx_bawadv); +PDEV_CP_STATS_SET_FUNCS(tx_compunaggr); +PDEV_CP_STATS_SET_FUNCS(rx_overrun); +PDEV_CP_STATS_SET_FUNCS(rx_crypt_err); +PDEV_CP_STATS_SET_FUNCS(rx_mic_err); +PDEV_CP_STATS_SET_FUNCS(rx_crc_err); +PDEV_CP_STATS_SET_FUNCS(rx_phy_err); +PDEV_CP_STATS_SET_FUNCS(rx_ack_err); +PDEV_CP_STATS_SET_FUNCS(rx_rts_err); +PDEV_CP_STATS_SET_FUNCS(rx_rts_success); +PDEV_CP_STATS_SET_FUNCS(rx_fcs_err); +PDEV_CP_STATS_SET_FUNCS(no_beacons); +PDEV_CP_STATS_SET_FUNCS(mib_int_count); +PDEV_CP_STATS_SET_FUNCS(rx_looplimit_start); +PDEV_CP_STATS_SET_FUNCS(rx_looplimit_end); +PDEV_CP_STATS_SET_FUNCS(ap_stats_tx_cal_enable); +PDEV_CP_STATS_SET_FUNCS(tgt_asserts); +PDEV_CP_STATS_SET_FUNCS(chan_nf); +PDEV_CP_STATS_SET_FUNCS(rx_last_msdu_unset_cnt); +PDEV_CP_STATS_SET_FUNCS(chan_nf_sec80); +PDEV_CP_STATS_SET_FUNCS(wmi_tx_mgmt); +PDEV_CP_STATS_SET_FUNCS(wmi_tx_mgmt_completions); +PDEV_CP_STATS_SET_FUNCS(wmi_tx_mgmt_completion_err); +PDEV_CP_STATS_SET_FUNCS(peer_delete_req); +PDEV_CP_STATS_SET_FUNCS(peer_delete_resp); +PDEV_CP_STATS_SET_FUNCS(rx_mgmt_rssi_drop); +PDEV_CP_STATS_SET_FUNCS(tx_retries); +PDEV_CP_STATS_SET_FUNCS(rx_data_bytes); +PDEV_CP_STATS_SET_FUNCS(tx_frame_count); +PDEV_CP_STATS_SET_FUNCS(rx_frame_count); +PDEV_CP_STATS_SET_FUNCS(rx_clear_count); +PDEV_CP_STATS_SET_FUNCS(cycle_count); +PDEV_CP_STATS_SET_FUNCS(phy_err_count); +PDEV_CP_STATS_SET_FUNCS(chan_tx_pwr); +PDEV_CP_STATS_SET_FUNCS(self_bss_util); +PDEV_CP_STATS_SET_FUNCS(obss_util); + +#define PDEV_CP_STATS_GET_FUNCS(field) \ + static inline uint64_t \ + pdev_cp_stats_##field##_get(struct wlan_objmgr_pdev *_pdev) \ + { \ + return ucfg_pdev_cp_stats_##field##_get(_pdev); \ + } + +PDEV_CP_STATS_GET_FUNCS(ap_stats_tx_cal_enable); +PDEV_CP_STATS_GET_FUNCS(wmi_tx_mgmt); +PDEV_CP_STATS_GET_FUNCS(wmi_tx_mgmt_completions); +PDEV_CP_STATS_GET_FUNCS(wmi_tx_mgmt_completion_err); +PDEV_CP_STATS_GET_FUNCS(tgt_asserts); + +static inline void pdev_cp_stats_reset(struct wlan_objmgr_pdev *pdev) +{ + ucfg_pdev_cp_stats_reset(pdev); +} + +/** + * wlan_get_pdev_cp_stats_ref() - API to reference to pdev cp stats object + * @pdev: pointer to pdev object + * + * Return: pdev_ic_cp_stats or NULL + */ +struct pdev_ic_cp_stats +*wlan_get_pdev_cp_stats_ref(struct wlan_objmgr_pdev *pdev); + +#endif /* QCA_SUPPORT_CP_STATS */ +#endif /* __WLAN_CP_STATS_IC_UTILS_API_H__ */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/dispatcher/inc/wlan_cp_stats_mc_defs.h b/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/dispatcher/inc/wlan_cp_stats_mc_defs.h new file mode 100644 index 0000000000000000000000000000000000000000..1ee5439b6b486127d7de47323afa1b3e80e15e7f --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/dispatcher/inc/wlan_cp_stats_mc_defs.h @@ -0,0 +1,335 @@ +/* + * Copyright (c) 2012-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_cp_stats_mc_defs.h + * + * This file provide definition for structure/enums/defines related to control + * path stats componenet + */ + +#ifndef __WLAN_CP_STATS_MC_DEFS_H__ +#define __WLAN_CP_STATS_MC_DEFS_H__ + +#ifdef CONFIG_MCL + +#include "wlan_cmn.h" +#include "qdf_event.h" + +#define MAX_NUM_CHAINS 2 + +#define IS_MSB_SET(__num) ((__num) & BIT(31)) +#define IS_LSB_SET(__num) ((__num) & BIT(0)) + +/** + * enum stats_req_type - enum indicating bit position of various stats type in + * request map + * @TYPE_CONNECTION_TX_POWER: tx power was requested + * @TYPE_STATION_STATS: station stats was requested + * @TYPE_PEER_STATS: peer stats was requested + */ +enum stats_req_type { + TYPE_CONNECTION_TX_POWER = 0, + TYPE_STATION_STATS, + TYPE_PEER_STATS, + TYPE_MAX, +}; + +/** + * enum tx_rate_info - tx rate flags + * @TX_RATE_LEGACY: Legacy rates + * @TX_RATE_HT20: HT20 rates + * @TX_RATE_HT40: HT40 rates + * @TX_RATE_SGI: Rate with Short guard interval + * @TX_RATE_LGI: Rate with Long guard interval + * @TX_RATE_VHT20: VHT 20 rates + * @TX_RATE_VHT40: VHT 40 rates + * @TX_RATE_VHT80: VHT 80 rates + */ +enum tx_rate_info { + TX_RATE_LEGACY = 0x1, + TX_RATE_HT20 = 0x2, + TX_RATE_HT40 = 0x4, + TX_RATE_SGI = 0x8, + TX_RATE_LGI = 0x10, + TX_RATE_VHT20 = 0x20, + TX_RATE_VHT40 = 0x40, + TX_RATE_VHT80 = 0x80, +}; + +/** + * struct wake_lock_stats - wake lock stats structure + * @ucast_wake_up_count: Unicast wakeup count + * @bcast_wake_up_count: Broadcast wakeup count + * @ipv4_mcast_wake_up_count: ipv4 multicast wakeup count + * @ipv6_mcast_wake_up_count: ipv6 multicast wakeup count + * @ipv6_mcast_ra_stats: ipv6 multicast ra stats + * @ipv6_mcast_ns_stats: ipv6 multicast ns stats + * @ipv6_mcast_na_stats: ipv6 multicast na stats + * @icmpv4_count: ipv4 icmp packet count + * @icmpv6_count: ipv6 icmp packet count + * @rssi_breach_wake_up_count: rssi breach wakeup count + * @low_rssi_wake_up_count: low rssi wakeup count + * @gscan_wake_up_count: gscan wakeup count + * @pno_complete_wake_up_count: pno complete wakeup count + * @pno_match_wake_up_count: pno match wakeup count + * @oem_response_wake_up_count: oem response wakeup count + * @pwr_save_fail_detected: pwr save fail detected wakeup count + * @scan_11d 11d scan wakeup count + * @mgmt_assoc: association request management frame + * @mgmt_disassoc: disassociation management frame + * @mgmt_assoc_resp: association response management frame + * @mgmt_reassoc: reassociate request management frame + * @mgmt_reassoc_resp: reassociate response management frame + * @mgmt_auth: authentication managament frame + * @mgmt_deauth: deauthentication management frame + * @mgmt_action: action managament frame + */ +struct wake_lock_stats { + uint32_t ucast_wake_up_count; + uint32_t bcast_wake_up_count; + uint32_t ipv4_mcast_wake_up_count; + uint32_t ipv6_mcast_wake_up_count; + uint32_t ipv6_mcast_ra_stats; + uint32_t ipv6_mcast_ns_stats; + uint32_t ipv6_mcast_na_stats; + uint32_t icmpv4_count; + uint32_t icmpv6_count; + uint32_t rssi_breach_wake_up_count; + uint32_t low_rssi_wake_up_count; + uint32_t gscan_wake_up_count; + uint32_t pno_complete_wake_up_count; + uint32_t pno_match_wake_up_count; + uint32_t oem_response_wake_up_count; + uint32_t pwr_save_fail_detected; + uint32_t scan_11d; + uint32_t mgmt_assoc; + uint32_t mgmt_disassoc; + uint32_t mgmt_assoc_resp; + uint32_t mgmt_reassoc; + uint32_t mgmt_reassoc_resp; + uint32_t mgmt_auth; + uint32_t mgmt_deauth; + uint32_t mgmt_action; +}; + +struct stats_event; + +/** + * struct request_info: details of each request + * @cookie: identifier for os_if request + * @callback: callback to process os_if request when response comes. + * @vdev_id: vdev_id of request + * @pdev_id: pdev_id of request + * @peer_mac_addr: peer mac address + */ +struct request_info { + void *cookie; + union { + void (*get_tx_power_cb)(int tx_power, void *cookie); + void (*get_peer_rssi_cb)(struct stats_event *ev, void *cookie); + void (*get_station_stats_cb)(struct stats_event *ev, + void *cookie); + } u; + uint32_t vdev_id; + uint32_t pdev_id; + uint8_t peer_mac_addr[WLAN_MACADDR_LEN]; +}; + +/** + * struct pending_stats_requests: details of pending requests + * @type_map: map indicating type of outstanding requests + * @req: array of info for outstanding request of each type + */ +struct pending_stats_requests { + uint32_t type_map; + struct request_info req[TYPE_MAX]; +}; + +/** + * struct cca_stats - cca stats + * @congestion: the congestion percentage = (busy_time/total_time)*100 + * for the interval from when the vdev was started to the current time + * (or the time at which the vdev was stopped). + */ +struct cca_stats { + uint32_t congestion; +}; + +/** + * struct psoc_mc_cp_stats: psoc specific stats + * @pending: details of pending requests + * @wow_unspecified_wake_up_count: number of non-wow related wake ups + */ +struct psoc_mc_cp_stats { + struct pending_stats_requests pending; + uint32_t wow_unspecified_wake_up_count; +}; + +/** + * struct pdev_mc_cp_stats: pdev specific stats + * @max_pwr: max tx power for vdev + */ +struct pdev_mc_cp_stats { + int32_t max_pwr; +}; + +/** + * struct summary_stats - summary stats + * @snr: snr of vdev + * @rssi: rssi of vdev + * @retry_cnt: retry count + * @multiple_retry_cnt: multiple_retry_cnt + * @tx_frm_cnt: num of tx frames + * @rx_frm_cnt: num of rx frames + * @frm_dup_cnt: duplicate frame count + * @fail_cnt: fail count + * @rts_fail_cnt: rts fail count + * @ack_fail_cnt: ack fail count + * @rts_succ_cnt: rts success count + * @rx_discard_cnt: rx frames discarded + * @rx_error_cnt: rx frames with error + */ +struct summary_stats { + uint32_t snr; + uint32_t rssi; + uint32_t retry_cnt[4]; + uint32_t multiple_retry_cnt[4]; + uint32_t tx_frm_cnt[4]; + uint32_t rx_frm_cnt; + uint32_t frm_dup_cnt; + uint32_t fail_cnt[4]; + uint32_t rts_fail_cnt; + uint32_t ack_fail_cnt; + uint32_t rts_succ_cnt; + uint32_t rx_discard_cnt; + uint32_t rx_error_cnt; +}; + +/** + * struct vdev_mc_cp_stats - vdev specific stats + * @wow_stats: wake_lock stats for vdev + * @cca: cca stats + * @tx_rate_flags: tx rate flags (enum tx_rate_info) + * @chain_rssi: chain rssi + * @vdev_summary_stats: vdev's summary stats + */ +struct vdev_mc_cp_stats { + struct wake_lock_stats wow_stats; + struct cca_stats cca; + uint32_t tx_rate_flags; + int8_t chain_rssi[MAX_NUM_CHAINS]; + struct summary_stats vdev_summary_stats; +}; + +/** + * struct peer_mc_cp_stats - peer specific stats + * @tx_rate: tx rate + * @rx_rate: rx rate + * @peer_rssi: rssi + * @peer_macaddr: mac address + */ +struct peer_mc_cp_stats { + uint32_t tx_rate; + uint32_t rx_rate; + uint32_t peer_rssi; + uint8_t peer_macaddr[WLAN_MACADDR_LEN]; +}; + +/** + * struct peer_adv_mc_cp_stats - peer specific adv stats + * @peer_macaddr: mac address + * @fcs_count: fcs count + * @rx_bytes: rx bytes + * @rx_count: rx count + */ +struct peer_adv_mc_cp_stats { + uint8_t peer_macaddr[WLAN_MACADDR_LEN]; + uint32_t fcs_count; + uint32_t rx_count; + uint64_t rx_bytes; +}; + +/** + * struct congestion_stats_event: congestion stats event param + * @vdev_id: vdev_id of the event + * @congestion: the congestion percentage + */ +struct congestion_stats_event { + uint8_t vdev_id; + uint32_t congestion; +}; + +/** + * struct summary_stats_event - summary_stats event param + * @vdev_id: vdev_id of the event + * @stats: summary stats + */ +struct summary_stats_event { + uint8_t vdev_id; + struct summary_stats stats; +}; + +/** + * struct chain_rssi_event - chain_rssi event param + * @vdev_id: vdev_id of the event + * @chain_rssi: chain_rssi + */ +struct chain_rssi_event { + uint8_t vdev_id; + int8_t chain_rssi[MAX_NUM_CHAINS]; +}; + +/** + * struct stats_event - parameters populated by stats event + * @num_pdev_stats: num pdev stats + * @pdev_stats: if populated array indicating pdev stats (index = pdev_id) + * @num_peer_stats: num peer stats + * @peer_stats: if populated array indicating peer stats + * @peer_adv_stats: if populated, indicates peer adv (extd2) stats + * @num_peer_adv_stats: number of peer adv (extd2) stats + * @cca_stats: if populated indicates congestion stats + * @num_summary_stats: number of summary stats + * @vdev_summary_stats: if populated indicates array of summary stats per vdev + * @num_chain_rssi_stats: number of chain rssi stats + * @vdev_chain_rssi: if populated indicates array of chain rssi per vdev + * @tx_rate: tx rate (kbps) + * @tx_rate_flags: tx rate flags, (enum tx_rate_info) + * @last_event: The LSB indicates if the event is the last event or not and the + * MSB indicates if this feature is supported by FW or not. + */ +struct stats_event { + uint32_t num_pdev_stats; + struct pdev_mc_cp_stats *pdev_stats; + uint32_t num_peer_stats; + struct peer_mc_cp_stats *peer_stats; + uint32_t num_peer_adv_stats; + struct peer_adv_mc_cp_stats *peer_adv_stats; + struct congestion_stats_event *cca_stats; + uint32_t num_summary_stats; + struct summary_stats_event *vdev_summary_stats; + uint32_t num_chain_rssi_stats; + struct chain_rssi_event *vdev_chain_rssi; + uint32_t tx_rate; + uint32_t rx_rate; + enum tx_rate_info tx_rate_flags; + uint32_t last_event; +}; + +#endif /* CONFIG_MCL */ +#endif /* __WLAN_CP_STATS_MC_DEFS_H__ */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/dispatcher/inc/wlan_cp_stats_mc_tgt_api.h b/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/dispatcher/inc/wlan_cp_stats_mc_tgt_api.h new file mode 100644 index 0000000000000000000000000000000000000000..5ee653bfb4bc15cbbd731ce35c48a95e7c3c8fb2 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/dispatcher/inc/wlan_cp_stats_mc_tgt_api.h @@ -0,0 +1,65 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_cp_stats_mc_tgt_api.h + * + * This header file provide with API declarations to interface with Southbound + */ +#ifndef __WLAN_CP_STATS_MC_TGT_API_H__ +#define __WLAN_CP_STATS_MC_TGT_API_H__ + +#ifdef QCA_SUPPORT_CP_STATS +#include "wlan_cp_stats_mc_defs.h" + +/** + * tgt_mc_cp_stats_process_stats_event(): API to process stats event + * @psoc: pointer to psoc object + * @event: event parameters + * + * Return: status of operation + */ +QDF_STATUS tgt_mc_cp_stats_process_stats_event(struct wlan_objmgr_psoc *psoc, + struct stats_event *event); + +/** + * tgt_send_mc_cp_stats_req(): API to send stats request to lmac + * @psoc: pointer to psoc object + * + * Return: status of operation + */ +QDF_STATUS tgt_send_mc_cp_stats_req(struct wlan_objmgr_psoc *psoc, + enum stats_req_type type, + struct request_info *req); + +/** + * tgt_mc_cp_stats_inc_wake_lock_stats() : API to increment wake lock stats + * given the wake reason code + * @psoc: pointer to psoc object + * @reason: wake reason + * @stats: vdev wow stats to update + * @unspecified_wake_count: unspecified wake count to update + * + * Return : status of operation + */ +QDF_STATUS tgt_mc_cp_stats_inc_wake_lock_stats(struct wlan_objmgr_psoc *psoc, + uint32_t reason, struct wake_lock_stats *stats, + uint32_t *unspecified_wake_count); + +#endif /* QCA_SUPPORT_CP_STATS */ +#endif /* __WLAN_CP_STATS_MC_TGT_API_H__ */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/dispatcher/inc/wlan_cp_stats_mc_ucfg_api.h b/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/dispatcher/inc/wlan_cp_stats_mc_ucfg_api.h new file mode 100644 index 0000000000000000000000000000000000000000..f417fc2cef7f201bd9b05bf54eb57774a9512dca --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/dispatcher/inc/wlan_cp_stats_mc_ucfg_api.h @@ -0,0 +1,220 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_cp_stats_mc_ucfg_api.h + * + * This header file maintain API declaration required for northbound interaction + */ + +#ifndef __WLAN_CP_STATS_MC_UCFG_API_H__ +#define __WLAN_CP_STATS_MC_UCFG_API_H__ + +#ifdef QCA_SUPPORT_CP_STATS + +#include +#include +#include + +struct psoc_cp_stats; +struct vdev_cp_stats; + +/** + * ucfg_mc_cp_stats_get_psoc_wake_lock_stats() : API to get wake lock stats from + * psoc + * @psoc: pointer to psoc object + * @stats: stats object to populate + * + * Return : status of operation + */ +QDF_STATUS ucfg_mc_cp_stats_get_psoc_wake_lock_stats( + struct wlan_objmgr_psoc *psoc, + struct wake_lock_stats *stats); + +/** + * ucfg_mc_cp_stats_get_vdev_wake_lock_stats() : API to get wake lock stats from + * vdev + * @vdev: pointer to vdev object + * @stats: stats object to populate + * + * Return : status of operation + */ +QDF_STATUS ucfg_mc_cp_stats_get_vdev_wake_lock_stats( + struct wlan_objmgr_vdev *vdev, + struct wake_lock_stats *stats); + +/** + * ucfg_mc_cp_stats_inc_wake_lock_stats_by_protocol() : API to increment wake + * lock stats given the protocol of the packet that was received. + * @psoc: pointer to psoc object + * @vdev_id: vdev_id for which the packet was received + * @protocol: protocol of the packet that was received + * + * Return : status of operation + */ +QDF_STATUS ucfg_mc_cp_stats_inc_wake_lock_stats_by_protocol( + struct wlan_objmgr_psoc *psoc, + uint8_t vdev_id, + enum qdf_proto_subtype protocol); + +/** + * ucfg_mc_cp_stats_inc_wake_lock_stats_by_protocol() : API to increment wake + * lock stats given destnation of packet that was received. + * @psoc: pointer to psoc object + * @dest_mac: destinamtion mac address of packet that was received + * + * Return : status of operation + */ +QDF_STATUS ucfg_mc_cp_stats_inc_wake_lock_stats_by_dst_addr( + struct wlan_objmgr_psoc *psoc, + uint8_t vdev_id, uint8_t *dest_mac); + +/** + * ucfg_mc_cp_stats_inc_wake_lock_stats() : API to increment wake lock stats + * given wake reason. + * @psoc: pointer to psoc object + * @vdev_id: vdev_id on with WOW was received + * @reason: reason of WOW + * + * Return : status of operation + */ +QDF_STATUS ucfg_mc_cp_stats_inc_wake_lock_stats(struct wlan_objmgr_psoc *psoc, + uint8_t vdev_id, + uint32_t reason); + +/** + * ucfg_mc_cp_stats_write_wow_stats() - Writes WOW stats to buffer + * @psoc: pointer to psoc object + * @buffer: The char buffer to write to + * @max_len: The maximum number of chars to write + * @ret: number of bytes written + * + * Return: status of operation + */ +QDF_STATUS ucfg_mc_cp_stats_write_wow_stats( + struct wlan_objmgr_psoc *psoc, + char *buffer, uint16_t max_len, int *ret); + +/** + * ucfg_mc_cp_stats_send_tx_power_request() - API to send tx_power request to + * lmac + * @vdev: pointer to vdev object + * @type: request type + * + * Return: status of operation + */ +QDF_STATUS ucfg_mc_cp_stats_send_stats_request(struct wlan_objmgr_vdev *vdev, + enum stats_req_type type, + struct request_info *info); + +/** + * ucfg_mc_cp_stats_get_tx_power() - API to fetch tx_power + * @vdev: pointer to vdev object + * @dbm: pointer to tx power in dbm + * + * Return: status of operation + */ +QDF_STATUS ucfg_mc_cp_stats_get_tx_power(struct wlan_objmgr_vdev *vdev, + int *dbm); + +/** + * ucfg_mc_cp_stats_is_req_pending() - API to tell if given request is pending + * @psoc: pointer to psoc object + * @type: request type to check + * + * Return: true of request is pending, false otherwise + */ +bool ucfg_mc_cp_stats_is_req_pending(struct wlan_objmgr_psoc *psoc, + enum stats_req_type type); + +/** + * ucfg_mc_cp_stats_set_pending_req() - API to set pending request + * @psoc: pointer to psoc object + * @type: request to update + * @req: value to update + * + * Return: status of operation + */ +QDF_STATUS ucfg_mc_cp_stats_set_pending_req(struct wlan_objmgr_psoc *psoc, + enum stats_req_type type, + struct request_info *req); + +/** + * ucfg_mc_cp_stats_reset_pending_req() - API to reset pending request + * @psoc: pointer to psoc object + * @type: request to update + * + * Return: status of operation + */ +QDF_STATUS ucfg_mc_cp_stats_reset_pending_req(struct wlan_objmgr_psoc *psoc, + enum stats_req_type type); + +/** + * ucfg_mc_cp_stats_get_pending_req() - API to get pending request + * @psoc: pointer to psoc object + * @type: request to update + * @info: buffer to populate + * + * Return: status of operation + */ +QDF_STATUS ucfg_mc_cp_stats_get_pending_req(struct wlan_objmgr_psoc *psoc, + enum stats_req_type type, + struct request_info *info); + +/** + * ucfg_mc_cp_stats_free_stats_resources() - API to free buffers within stats_event + * structure + * @ev: strcture whose buffer are to freed + * + * Return: none + */ +void ucfg_mc_cp_stats_free_stats_resources(struct stats_event *ev); + +/** + * ucfg_mc_cp_stats_cca_stats_get() - API to fetch cca stats + * @vdev: pointer to vdev object + * @cca_stats: pointer to cca info + * + * Return: status of operation + */ +QDF_STATUS ucfg_mc_cp_stats_cca_stats_get(struct wlan_objmgr_vdev *vdev, + struct cca_stats *cca_stats); + +/** + * ucfg_mc_cp_stats_set_rate_flags() - API to set rate flags + * @vdev: pointer to vdev object + * @flags: value to set (enum tx_rate_info) + * + * Return: status of operation + */ +QDF_STATUS ucfg_mc_cp_stats_set_rate_flags(struct wlan_objmgr_vdev *vdev, + enum tx_rate_info flags); + +/** + * ucfg_mc_cp_stats_register_lost_link_info_cb() - API to register lost link + * info callback + * @psoc: pointer to psoc object + * @lost_link_cp_stats_info_cb: Lost link info callback to be registered + * + */ +void ucfg_mc_cp_stats_register_lost_link_info_cb( + struct wlan_objmgr_psoc *psoc, + void (*lost_link_cp_stats_info_cb)(void *stats_ev)); + +#endif /* QCA_SUPPORT_CP_STATS */ +#endif /* __WLAN_CP_STATS_MC_UCFG_API_H__ */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/dispatcher/inc/wlan_cp_stats_tgt_api.h b/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/dispatcher/inc/wlan_cp_stats_tgt_api.h new file mode 100644 index 0000000000000000000000000000000000000000..94929afff981e06770fa9bc1e20c1c133ead6bdb --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/dispatcher/inc/wlan_cp_stats_tgt_api.h @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_cp_stats_tgt_api.h + * + * This header file provide with API declarations to interface with Southbound + */ +#ifndef __WLAN_CP_STATS_TGT_API_H__ +#define __WLAN_CP_STATS_TGT_API_H__ +#include +#include + +#ifdef QCA_SUPPORT_CP_STATS +/** + * tgt_cp_stats_register_rx_ops(): API to register rx ops with lmac + * @rx_ops: rx ops struct + * + * Return: none + */ +void tgt_cp_stats_register_rx_ops(struct wlan_lmac_if_rx_ops *rx_ops); +#else +static inline void tgt_cp_stats_register_rx_ops( + struct wlan_lmac_if_rx_ops *rx_ops) {} +#endif /* QCA_SUPPORT_CP_STATS */ +#endif /* __WLAN_CP_STATS_TGT_API_H__ */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/dispatcher/inc/wlan_cp_stats_ucfg_api.h b/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/dispatcher/inc/wlan_cp_stats_ucfg_api.h new file mode 100644 index 0000000000000000000000000000000000000000..e09eee385047abac46a3ed3ead440a169b374c95 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/dispatcher/inc/wlan_cp_stats_ucfg_api.h @@ -0,0 +1,34 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_cp_stats_ucfg_api.h + * + * This header file maintain API declaration required for northbound interaction + */ + +#ifndef __WLAN_CP_STATS_UCFG_API_H__ +#define __WLAN_CP_STATS_UCFG_API_H__ + +#ifdef QCA_SUPPORT_CP_STATS +#include +#include "../../core/src/wlan_cp_stats_defs.h" +#include "../../core/src/wlan_cp_stats_cmn_api_i.h" + +#endif /* QCA_SUPPORT_CP_STATS */ +#endif /* __WLAN_CP_STATS_UCFG_API_H__ */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/dispatcher/inc/wlan_cp_stats_utils_api.h b/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/dispatcher/inc/wlan_cp_stats_utils_api.h new file mode 100644 index 0000000000000000000000000000000000000000..8bd51b1cdcb741ba5a3eea13b4a50a55be36944d --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/dispatcher/inc/wlan_cp_stats_utils_api.h @@ -0,0 +1,147 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_cp_stats_utils_api.h + * + * This header file provide declaration to public APIs exposed for other UMAC + * components to init/deinit, (de)register to required WMI events on + * soc enable/disable + */ + +#ifndef __WLAN_CP_STATS_UTILS_API_H__ +#define __WLAN_CP_STATS_UTILS_API_H__ + +#ifdef QCA_SUPPORT_CP_STATS +#include + +#define cp_stats_debug(args ...) \ + QDF_TRACE_DEBUG(QDF_MODULE_ID_CP_STATS, ## args) +#define cp_stats_err(args ...) \ + QDF_TRACE_ERROR(QDF_MODULE_ID_CP_STATS, ## args) + +/** + * enum wlan_cp_stats_cfg_state - State of Object configuration to + * indicate whether object has to be attached/detached in cp stats + * @WLAN_CP_STATS_OBJ_DETACH: Object has to be detached + * @WLAN_CP_STATS_OBJ_ATTACH: Object has to be attached + * @WLAN_CP_STATS_OBJ_INVALID: Object is invalid + */ +enum wlan_cp_stats_cfg_state { + WLAN_CP_STATS_OBJ_DETACH = 0, + WLAN_CP_STATS_OBJ_ATTACH = 1, + WLAN_CP_STATS_OBJ_INVALID +}; + +/** + * enum wlan_cp_stats_comp_id - component id for other umac components + * @WLAN_CP_STATS_ATF: ATF component specific id + * @WLAN_CP_STATS_MAX_COMPONENTS : Max id of cp stats components + */ +enum wlan_cp_stats_comp_id { + WLAN_CP_STATS_ATF = 0, + WLAN_CP_STATS_MAX_COMPONENTS, +}; + +/** + * wlan_cp_stats_init(): API to init stats component + * + * This API is invoked from dispatcher init during all component init. + * This API will register all required handlers for psoc, pdev,vdev + * and peer object create/delete notification. + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +QDF_STATUS wlan_cp_stats_init(void); + +/** + * wlan_cp_stats_init(): API to deinit stats component + * + * This API is invoked from dispatcher deinit during all component deinit. + * This API will unregister all required handlers for psoc, pdev,vdev + * and peer object create/delete notification. + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +QDF_STATUS wlan_cp_stats_deinit(void); + +/** + * wlan_cp_stats_open(): API to open cp stats component + * @psoc: pointer to psoc + * + * This API is invoked from dispatcher psoc open. + * This API will initialize psoc level cp stats object. + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +QDF_STATUS wlan_cp_stats_open(struct wlan_objmgr_psoc *psoc); + +/** + * wlan_cp_stats_close(): API to close cp stats component + * @psoc: pointer to psoc + * + * This API is invoked from dispatcher psoc close. + * This API will de-initialize psoc level cp stats object. + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +QDF_STATUS wlan_cp_stats_close(struct wlan_objmgr_psoc *psoc); + +/** + * wlan_cp_stats_enable(): API to enable cp stats component + * @psoc: pointer to psoc + * + * This API is invoked from dispatcher psoc enable. + * This API will register cp_stats WMI event handlers. + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +QDF_STATUS wlan_cp_stats_enable(struct wlan_objmgr_psoc *psoc); + +/** + * wlan_cp_stats_disable(): API to disable cp stats component + * @psoc: pointer to psoc + * + * This API is invoked from dispatcher psoc disable. + * This API will unregister cp_stats WMI event handlers. + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +QDF_STATUS wlan_cp_stats_disable(struct wlan_objmgr_psoc *psoc); + +/** + * wlan_cp_stats_comp_obj_cfg() - public API to umac for + * attach/detach + * component specific stat obj to cp stats obj + * @obj_type: common object type + * @cfg_state: config state either to attach of detach + * @comp_id: umac component id + * @cmn_obj: pointer to common object + * @comp_priv_obj: pointer to component specific cp stats object + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +QDF_STATUS wlan_cp_stats_comp_obj_cfg( + enum wlan_objmgr_obj_type obj_type, + enum wlan_cp_stats_cfg_state cfg_state, + enum wlan_umac_comp_id comp_id, + void *cmn_obj, + void *data); + +#endif /* QCA_SUPPORT_CP_STATS */ +#endif /* __WLAN_CP_STATS_UTILS_API_H__ */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/dispatcher/src/wlan_cp_stats_chan_info_api.c b/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/dispatcher/src/wlan_cp_stats_chan_info_api.c new file mode 100644 index 0000000000000000000000000000000000000000..dc6a1909ebd3c84feab9390b8821a616c4dd695f --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/dispatcher/src/wlan_cp_stats_chan_info_api.c @@ -0,0 +1,23 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_cp_stats_chan_info_api.c + * + * This header file declare APIs and defines structure for channel information + */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/dispatcher/src/wlan_cp_stats_ic_acs_api.c b/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/dispatcher/src/wlan_cp_stats_ic_acs_api.c new file mode 100644 index 0000000000000000000000000000000000000000..7e93ba929a2675f89935c96398b564ebccd2d5f2 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/dispatcher/src/wlan_cp_stats_ic_acs_api.c @@ -0,0 +1,23 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_cp_stats_ic_acs_api.c + * + * This file holds definition for APIs of ACS specific to ic + */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/dispatcher/src/wlan_cp_stats_ic_dcs_api.c b/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/dispatcher/src/wlan_cp_stats_ic_dcs_api.c new file mode 100644 index 0000000000000000000000000000000000000000..2abd7338645789adb7a0e8dcf419b345ef18cf8f --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/dispatcher/src/wlan_cp_stats_ic_dcs_api.c @@ -0,0 +1,24 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_cp_stats_ic_dcs_api.c + * + * This file provides definitions for APIs exposed to get and set DCS related + * control plane statistics + */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/dispatcher/src/wlan_cp_stats_ic_tgt_api.c b/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/dispatcher/src/wlan_cp_stats_ic_tgt_api.c new file mode 100644 index 0000000000000000000000000000000000000000..164372897a98cfc0705786381580ea4a98c0fa76 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/dispatcher/src/wlan_cp_stats_ic_tgt_api.c @@ -0,0 +1,30 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC:wlan_cp_stats_ic_tgt_api.c + * + * This file provide API definitions to update control plane statistics received + * from southbound interface + */ +#include +#include + +void tgt_cp_stats_register_rx_ops(struct wlan_lmac_if_rx_ops *rx_ops) +{ +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/dispatcher/src/wlan_cp_stats_ic_ucfg_api.c b/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/dispatcher/src/wlan_cp_stats_ic_ucfg_api.c new file mode 100644 index 0000000000000000000000000000000000000000..a8a8ca582dbda82bb363597d3b6f2e562cfce53d --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/dispatcher/src/wlan_cp_stats_ic_ucfg_api.c @@ -0,0 +1,348 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC:wlan_cp_stats_ic_ucfg_api.c + * + * This file provide APIs definition for registering cp stats cfg80211 command + * handlers + */ +#include +#include +#ifdef WLAN_ATF_ENABLE +#include +#endif +#include +#include +#include "../../core/src/wlan_cp_stats_cmn_api_i.h" +#include + +QDF_STATUS wlan_cp_stats_psoc_cs_init(struct psoc_cp_stats *psoc_cs) +{ + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_cp_stats_psoc_cs_deinit(struct psoc_cp_stats *psoc_cs) +{ + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_cp_stats_pdev_cs_init(struct pdev_cp_stats *pdev_cs) +{ + pdev_cs->pdev_stats = qdf_mem_malloc(sizeof(struct pdev_ic_cp_stats)); + if (!pdev_cs->pdev_stats) { + cp_stats_err("malloc failed"); + return QDF_STATUS_E_NOMEM; + } + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_cp_stats_pdev_cs_deinit(struct pdev_cp_stats *pdev_cs) +{ + qdf_mem_free(pdev_cs->pdev_stats); + pdev_cs->pdev_stats = NULL; + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_cp_stats_vdev_cs_init(struct vdev_cp_stats *vdev_cs) +{ + vdev_cs->vdev_stats = qdf_mem_malloc(sizeof(struct vdev_ic_cp_stats)); + if (!vdev_cs->vdev_stats) { + cp_stats_err("malloc failed"); + return QDF_STATUS_E_NOMEM; + } + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_cp_stats_vdev_cs_deinit(struct vdev_cp_stats *vdev_cs) +{ + qdf_mem_free(vdev_cs->vdev_stats); + vdev_cs->vdev_stats = NULL; + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_cp_stats_peer_cs_init(struct peer_cp_stats *peer_cs) +{ + peer_cs->peer_stats = qdf_mem_malloc(sizeof(struct peer_ic_cp_stats)); + if (!peer_cs->peer_stats) { + cp_stats_err("malloc failed"); + return QDF_STATUS_E_NOMEM; + } + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_cp_stats_peer_cs_deinit(struct peer_cp_stats *peer_cs) +{ + qdf_mem_free(peer_cs->peer_stats); + peer_cs->peer_stats = NULL; + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_ucfg_get_peer_cp_stats(struct wlan_objmgr_peer *peer, + struct peer_ic_cp_stats *peer_cps) +{ + struct peer_cp_stats *peer_cs; + + if (!peer) { + cp_stats_err("Invalid input fields, peer obj is NULL"); + return QDF_STATUS_E_INVAL; + } + + if (!peer_cps) { + cp_stats_err("Invalid input fields, peer cp obj is NULL"); + return QDF_STATUS_E_INVAL; + } + + peer_cs = wlan_cp_stats_get_peer_stats_obj(peer); + if (peer_cs && peer_cs->peer_stats) { + wlan_cp_stats_peer_obj_lock(peer_cs); + qdf_mem_copy(peer_cps, peer_cs->peer_stats, + sizeof(struct peer_ic_cp_stats)); + wlan_cp_stats_peer_obj_unlock(peer_cs); + return QDF_STATUS_SUCCESS; + } + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wlan_ucfg_get_vdev_cp_stats(struct wlan_objmgr_vdev *vdev, + struct vdev_ic_cp_stats *vdev_cps) +{ + struct vdev_cp_stats *vdev_cs; + + if (!vdev) { + cp_stats_err("Invalid input, vdev obj is null"); + return QDF_STATUS_E_INVAL; + } + + if (!vdev_cps) { + cp_stats_err("Invalid input, vdev cp obj is null"); + return QDF_STATUS_E_INVAL; + } + + vdev_cs = wlan_cp_stats_get_vdev_stats_obj(vdev); + if (vdev_cs && vdev_cs->vdev_stats) { + wlan_cp_stats_vdev_obj_lock(vdev_cs); + qdf_mem_copy(vdev_cps, vdev_cs->vdev_stats, + sizeof(*vdev_cps)); + wlan_cp_stats_vdev_obj_unlock(vdev_cs); + return QDF_STATUS_SUCCESS; + } + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wlan_ucfg_get_pdev_cp_stats(struct wlan_objmgr_pdev *pdev, + struct pdev_ic_cp_stats *pdev_cps) +{ + struct pdev_cp_stats *pdev_cs; + + if (!pdev) { + cp_stats_err("Invalid input, pdev obj is null"); + return QDF_STATUS_E_INVAL; + } + + if (!pdev_cps) { + cp_stats_err("Invalid input, pdev cp obj is null"); + return QDF_STATUS_E_INVAL; + } + + pdev_cs = wlan_cp_stats_get_pdev_stats_obj(pdev); + if (pdev_cs && pdev_cs->pdev_stats) { + wlan_cp_stats_pdev_obj_lock(pdev_cs); + qdf_mem_copy(pdev_cps, pdev_cs->pdev_stats, + sizeof(*pdev_cps)); + wlan_cp_stats_pdev_obj_unlock(pdev_cs); + return QDF_STATUS_SUCCESS; + } + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wlan_ucfg_get_pdev_hw_cp_stats(struct wlan_objmgr_pdev *pdev, + struct pdev_hw_stats *hw_stats) +{ + struct pdev_cp_stats *pdev_cs; + struct pdev_ic_cp_stats *pdev_cps; + + if (!pdev) { + cp_stats_err("Invalid input, pdev obj is null"); + return QDF_STATUS_E_INVAL; + } + + if (!hw_stats) { + cp_stats_err("Invalid input, pdev hw_stats is null"); + return QDF_STATUS_E_INVAL; + } + + pdev_cs = wlan_cp_stats_get_pdev_stats_obj(pdev); + if (pdev_cs && pdev_cs->pdev_stats) { + pdev_cps = pdev_cs->pdev_stats; + wlan_cp_stats_pdev_obj_lock(pdev_cs); + qdf_mem_copy(hw_stats, &pdev_cps->stats.hw_stats, + sizeof(*hw_stats)); + wlan_cp_stats_pdev_obj_unlock(pdev_cs); + return QDF_STATUS_SUCCESS; + } + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wlan_ucfg_set_pdev_hw_cp_stats(struct wlan_objmgr_pdev *pdev, + struct pdev_hw_stats *hw_stats) +{ + struct pdev_cp_stats *pdev_cs; + struct pdev_ic_cp_stats *pdev_cps; + + if (!pdev) { + cp_stats_err("Invalid input, pdev obj is null"); + return QDF_STATUS_E_INVAL; + } + + if (!hw_stats) { + cp_stats_err("Invalid input, pdev hw_stats is null"); + return QDF_STATUS_E_INVAL; + } + + pdev_cs = wlan_cp_stats_get_pdev_stats_obj(pdev); + if (pdev_cs && pdev_cs->pdev_stats) { + pdev_cps = pdev_cs->pdev_stats; + wlan_cp_stats_pdev_obj_lock(pdev_cs); + qdf_mem_copy(&pdev_cps->stats.hw_stats, hw_stats, + sizeof(*hw_stats)); + wlan_cp_stats_pdev_obj_unlock(pdev_cs); + return QDF_STATUS_SUCCESS; + } + + return QDF_STATUS_E_FAILURE; +} + +struct pdev_ic_cp_stats +*wlan_ucfg_get_pdev_cp_stats_ref(struct wlan_objmgr_pdev *pdev) +{ + struct pdev_cp_stats *pdev_cs = NULL; + + if (!pdev) { + cp_stats_err("pdev is null"); + return NULL; + } + + pdev_cs = wlan_cp_stats_get_pdev_stats_obj(pdev); + if (pdev_cs && pdev_cs->pdev_stats) + return pdev_cs->pdev_stats; + + return NULL; +} + +struct pdev_ic_cp_stats +*wlan_get_pdev_cp_stats_ref(struct wlan_objmgr_pdev *pdev) +{ + return wlan_ucfg_get_pdev_cp_stats_ref(pdev); +} + +qdf_export_symbol(wlan_get_pdev_cp_stats_ref); + +#ifdef WLAN_ATF_ENABLE +QDF_STATUS +wlan_ucfg_get_atf_peer_cp_stats(struct wlan_objmgr_peer *peer, + struct atf_peer_cp_stats *atf_cps) +{ + struct peer_cp_stats *peer_cs; + + if (!peer) { + cp_stats_err("Invalid input, peer obj is null"); + return QDF_STATUS_E_INVAL; + } + + if (!atf_cps) { + cp_stats_err("Invalid input, ATF cp stats obj is null"); + return QDF_STATUS_E_INVAL; + } + + peer_cs = wlan_cp_stats_get_peer_stats_obj(peer); + if (peer_cs) { + if (peer_cs->peer_comp_priv_obj[WLAN_CP_STATS_ATF]) { + wlan_cp_stats_peer_obj_lock(peer_cs); + qdf_mem_copy(atf_cps, + peer_cs->peer_comp_priv_obj[WLAN_CP_STATS_ATF], + sizeof(*atf_cps)); + wlan_cp_stats_peer_obj_unlock(peer_cs); + return QDF_STATUS_SUCCESS; + } + } + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wlan_ucfg_get_atf_peer_cp_stats_from_mac(struct wlan_objmgr_vdev *vdev, + uint8_t *mac, + struct atf_peer_cp_stats *astats) +{ + struct wlan_objmgr_peer *peer; + struct wlan_objmgr_psoc *psoc; + struct wlan_objmgr_pdev *pdev; + QDF_STATUS status; + + if (!vdev) { + cp_stats_err("vdev object is NULL"); + return QDF_STATUS_E_INVAL; + } + + if (!mac) { + cp_stats_err("peer mac address is NULL"); + return QDF_STATUS_E_INVAL; + } + + if (!astats) { + cp_stats_err("atf peer stats obj is NULL"); + return QDF_STATUS_E_INVAL; + } + + psoc = wlan_vdev_get_psoc(vdev); + if (!psoc) { + cp_stats_err("psoc is NULL"); + return QDF_STATUS_E_INVAL; + } + + pdev = wlan_vdev_get_pdev(vdev); + if (!pdev) { + cp_stats_err("pdev is NULL"); + return QDF_STATUS_E_INVAL; + } + + peer = wlan_objmgr_get_peer(psoc, wlan_objmgr_pdev_get_pdev_id(pdev), + mac, WLAN_CP_STATS_ID); + if (!peer) { + cp_stats_err("peer is NULL"); + return QDF_STATUS_E_INVAL; + } + + status = wlan_ucfg_get_atf_peer_cp_stats(peer, astats); + wlan_objmgr_peer_release_ref(peer, WLAN_CP_STATS_ID); + + return status; +} +#endif + +QDF_STATUS +wlan_ucfg_get_dcs_chan_stats(struct wlan_objmgr_pdev *pdev, + struct pdev_dcs_chan_stats *dcs_chan_stats) +{ + return QDF_STATUS_E_INVAL; +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/dispatcher/src/wlan_cp_stats_mc_tgt_api.c b/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/dispatcher/src/wlan_cp_stats_mc_tgt_api.c new file mode 100644 index 0000000000000000000000000000000000000000..085907f21f16e1c4ef22103129527af0bebe26c9 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/dispatcher/src/wlan_cp_stats_mc_tgt_api.c @@ -0,0 +1,777 @@ +/* + * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC:wlan_cp_stats_mc_tgt_api.c + * + * This file provide API definitions to update control plane statistics received + * from southbound interface + */ + +#include "wlan_cp_stats_mc_defs.h" +#include "target_if_cp_stats.h" +#include "wlan_cp_stats_tgt_api.h" +#include "wlan_cp_stats_mc_tgt_api.h" +#include +#include +#include "../../core/src/wlan_cp_stats_defs.h" + +void tgt_cp_stats_register_rx_ops(struct wlan_lmac_if_rx_ops *rx_ops) +{ + rx_ops->cp_stats_rx_ops.process_stats_event = + tgt_mc_cp_stats_process_stats_event; +} + +static void tgt_mc_cp_stats_extract_tx_power(struct wlan_objmgr_psoc *psoc, + struct stats_event *ev, + bool is_station_stats) +{ + int32_t max_pwr; + uint8_t pdev_id; + QDF_STATUS status; + struct wlan_objmgr_pdev *pdev; + struct request_info last_req = {0}; + struct wlan_objmgr_vdev *vdev = NULL; + struct pdev_mc_cp_stats *pdev_mc_stats; + struct pdev_cp_stats *pdev_cp_stats_priv; + + if (!ev->pdev_stats) { + cp_stats_err("no pdev stats"); + return; + } + + if (is_station_stats) + status = ucfg_mc_cp_stats_get_pending_req(psoc, + TYPE_STATION_STATS, &last_req); + else + status = ucfg_mc_cp_stats_get_pending_req(psoc, + TYPE_CONNECTION_TX_POWER, &last_req); + + if (QDF_IS_STATUS_ERROR(status)) { + cp_stats_err("ucfg_mc_cp_stats_get_pending_req failed"); + goto end; + } + + vdev = wlan_objmgr_get_vdev_by_id_from_psoc(psoc, last_req.vdev_id, + WLAN_CP_STATS_ID); + if (!vdev) { + cp_stats_err("vdev is null"); + goto end; + } + + pdev = wlan_vdev_get_pdev(vdev); + if (!pdev) { + cp_stats_err("pdev is null"); + goto end; + } + + pdev_id = wlan_objmgr_pdev_get_pdev_id(pdev); + if (pdev_id >= ev->num_pdev_stats) { + cp_stats_err("pdev_id: %d invalid", pdev_id); + goto end; + } + + pdev_cp_stats_priv = wlan_cp_stats_get_pdev_stats_obj(pdev); + if (!pdev_cp_stats_priv) { + cp_stats_err("pdev_cp_stats_priv is null"); + goto end; + } + + wlan_cp_stats_pdev_obj_lock(pdev_cp_stats_priv); + pdev_mc_stats = pdev_cp_stats_priv->pdev_stats; + max_pwr = pdev_mc_stats->max_pwr = ev->pdev_stats[pdev_id].max_pwr; + wlan_cp_stats_pdev_obj_unlock(pdev_cp_stats_priv); + + if (is_station_stats) + goto end; + + ucfg_mc_cp_stats_reset_pending_req(psoc, TYPE_CONNECTION_TX_POWER); + if (last_req.u.get_tx_power_cb) + last_req.u.get_tx_power_cb(max_pwr, last_req.cookie); + +end: + if (vdev) + wlan_objmgr_vdev_release_ref(vdev, WLAN_CP_STATS_ID); +} + +static void peer_rssi_iterator(struct wlan_objmgr_pdev *pdev, + void *peer, void *arg) +{ + struct stats_event *ev; + struct peer_mc_cp_stats *peer_mc_stats; + struct peer_cp_stats *peer_cp_stats_priv; + + if (WLAN_PEER_SELF == wlan_peer_get_peer_type(peer)) { + cp_stats_err("ignore self peer: %pM", + wlan_peer_get_macaddr(peer)); + return; + } + + peer_cp_stats_priv = wlan_cp_stats_get_peer_stats_obj(peer); + if (!peer_cp_stats_priv) { + cp_stats_err("peer cp stats object is null"); + return; + } + + wlan_cp_stats_peer_obj_lock(peer_cp_stats_priv); + peer_mc_stats = peer_cp_stats_priv->peer_stats; + ev = arg; + ev->peer_stats[ev->num_peer_stats] = *peer_mc_stats; + ev->num_peer_stats++; + wlan_cp_stats_peer_obj_unlock(peer_cp_stats_priv); +} + +static void +tgt_mc_cp_stats_prepare_raw_peer_rssi(struct wlan_objmgr_psoc *psoc, + struct request_info *last_req) +{ + uint8_t *mac_addr; + uint16_t peer_count; + struct stats_event ev = {0}; + struct wlan_objmgr_pdev *pdev; + struct wlan_objmgr_vdev *vdev; + struct wlan_objmgr_peer *peer = NULL; + struct peer_mc_cp_stats *peer_mc_stats; + struct peer_cp_stats *peer_cp_stats_priv; + void (*get_peer_rssi_cb)(struct stats_event *ev, void *cookie); + + get_peer_rssi_cb = last_req->u.get_peer_rssi_cb; + if (!get_peer_rssi_cb) { + cp_stats_err("get_peer_rssi_cb is null"); + return; + } + + vdev = wlan_objmgr_get_vdev_by_id_from_psoc(psoc, last_req->vdev_id, + WLAN_CP_STATS_ID); + if (!vdev) { + cp_stats_err("vdev is null"); + goto end; + } + + mac_addr = last_req->peer_mac_addr; + if (QDF_IS_ADDR_BROADCAST(mac_addr)) { + pdev = wlan_vdev_get_pdev(vdev); + peer_count = wlan_pdev_get_peer_count(pdev); + ev.peer_stats = qdf_mem_malloc(sizeof(*ev.peer_stats) * + peer_count); + if (!ev.peer_stats) { + cp_stats_err("malloc failed"); + goto end; + } + + wlan_objmgr_pdev_iterate_obj_list(pdev, WLAN_PEER_OP, + peer_rssi_iterator, &ev, + true, WLAN_CP_STATS_ID); + } else { + peer = wlan_objmgr_get_peer(psoc, last_req->pdev_id, + mac_addr, WLAN_CP_STATS_ID); + if (!peer) { + cp_stats_err("peer[%pM] is null", mac_addr); + goto end; + } + + peer_cp_stats_priv = wlan_cp_stats_get_peer_stats_obj(peer); + if (!peer_cp_stats_priv) { + cp_stats_err("peer cp stats object is null"); + goto end; + } + + ev.peer_stats = qdf_mem_malloc(sizeof(*ev.peer_stats)); + if (!ev.peer_stats) { + cp_stats_err("malloc failed"); + goto end; + } + + ev.num_peer_stats = 1; + wlan_cp_stats_peer_obj_lock(peer_cp_stats_priv); + peer_mc_stats = peer_cp_stats_priv->peer_stats; + *ev.peer_stats = *peer_mc_stats; + wlan_cp_stats_peer_obj_unlock(peer_cp_stats_priv); + } + +end: + if (ev.peer_stats) + get_peer_rssi_cb(&ev, last_req->cookie); + + ucfg_mc_cp_stats_free_stats_resources(&ev); + + if (vdev) + wlan_objmgr_vdev_release_ref(vdev, WLAN_CP_STATS_ID); + if (peer) + wlan_objmgr_peer_release_ref(peer, WLAN_CP_STATS_ID); +} + +static QDF_STATUS +tgt_mc_cp_stats_update_peer_adv_stats(struct wlan_objmgr_psoc *psoc, + struct peer_adv_mc_cp_stats + *peer_adv_stats, uint32_t size) +{ + uint8_t *peer_mac_addr; + struct wlan_objmgr_peer *peer; + struct peer_adv_mc_cp_stats *peer_adv_mc_stats; + QDF_STATUS status = QDF_STATUS_SUCCESS; + struct peer_cp_stats *peer_cp_stats_priv; + + if (!peer_adv_stats) + return QDF_STATUS_E_INVAL; + + peer_mac_addr = peer_adv_stats->peer_macaddr; + peer = wlan_objmgr_get_peer_by_mac(psoc, peer_mac_addr, + WLAN_CP_STATS_ID); + if (!peer) { + cp_stats_err("peer is null"); + return QDF_STATUS_E_EXISTS; + } + peer_cp_stats_priv = wlan_cp_stats_get_peer_stats_obj(peer); + if (!peer_cp_stats_priv) { + cp_stats_err("peer_cp_stats_priv is null"); + status = QDF_STATUS_E_EXISTS; + goto end; + } + wlan_cp_stats_peer_obj_lock(peer_cp_stats_priv); + peer_adv_mc_stats = peer_cp_stats_priv->peer_adv_stats; + + qdf_mem_copy(peer_adv_mc_stats->peer_macaddr, + peer_adv_stats->peer_macaddr, + WLAN_MACADDR_LEN); + if (peer_adv_stats->fcs_count) + peer_adv_mc_stats->fcs_count = peer_adv_stats->fcs_count; + if (peer_adv_stats->rx_bytes) + peer_adv_mc_stats->rx_bytes = peer_adv_stats->rx_bytes; + if (peer_adv_stats->rx_count) + peer_adv_mc_stats->rx_count = peer_adv_stats->rx_count; + wlan_cp_stats_peer_obj_unlock(peer_cp_stats_priv); + +end: + if (peer) + wlan_objmgr_peer_release_ref(peer, WLAN_CP_STATS_ID); + + return status; +} + +static QDF_STATUS +tgt_mc_cp_stats_update_peer_stats(struct wlan_objmgr_psoc *psoc, + struct peer_mc_cp_stats *peer_stats) +{ + uint8_t *peer_mac_addr; + struct wlan_objmgr_peer *peer; + struct peer_mc_cp_stats *peer_mc_stats; + QDF_STATUS status = QDF_STATUS_SUCCESS; + struct peer_cp_stats *peer_cp_stats_priv; + + if (!peer_stats) + return QDF_STATUS_E_INVAL; + + peer_mac_addr = peer_stats->peer_macaddr; + peer = wlan_objmgr_get_peer_by_mac(psoc, peer_mac_addr, + WLAN_CP_STATS_ID); + if (!peer) { + cp_stats_err("peer is null"); + return QDF_STATUS_E_EXISTS; + } + + peer_cp_stats_priv = wlan_cp_stats_get_peer_stats_obj(peer); + if (!peer_cp_stats_priv) { + cp_stats_err("peer_cp_stats_priv is null"); + status = QDF_STATUS_E_EXISTS; + goto end; + } + + wlan_cp_stats_peer_obj_lock(peer_cp_stats_priv); + peer_mc_stats = peer_cp_stats_priv->peer_stats; + qdf_mem_copy(peer_mc_stats->peer_macaddr, + peer_stats->peer_macaddr, + WLAN_MACADDR_LEN); + if (peer_stats->tx_rate) + peer_mc_stats->tx_rate = peer_stats->tx_rate; + if (peer_stats->rx_rate) + peer_mc_stats->rx_rate = peer_stats->rx_rate; + if (peer_stats->peer_rssi) + peer_mc_stats->peer_rssi = peer_stats->peer_rssi; + + cp_stats_debug("peer_mac=%pM, tx_rate=%u, rx_rate=%u, peer_rssi=%u", + peer_mc_stats->peer_macaddr, peer_mc_stats->tx_rate, + peer_mc_stats->rx_rate, peer_mc_stats->peer_rssi); + wlan_cp_stats_peer_obj_unlock(peer_cp_stats_priv); + +end: + if (peer) + wlan_objmgr_peer_release_ref(peer, WLAN_CP_STATS_ID); + + return status; +} + +static void tgt_mc_cp_stats_extract_peer_stats(struct wlan_objmgr_psoc *psoc, + struct stats_event *ev, + bool is_station_stats) +{ + uint32_t i; + QDF_STATUS status; + struct request_info last_req = {0}; + uint32_t selected; + + + if (is_station_stats) + status = ucfg_mc_cp_stats_get_pending_req(psoc, + TYPE_STATION_STATS, + &last_req); + else + status = ucfg_mc_cp_stats_get_pending_req(psoc, + TYPE_PEER_STATS, + &last_req); + + if (QDF_IS_STATUS_ERROR(status)) { + cp_stats_err("ucfg_mc_cp_stats_get_pending_req failed"); + return; + } + + if (!ev->peer_stats) { + cp_stats_debug("no peer stats"); + goto extd2_stats; + } + + selected = ev->num_peer_stats; + for (i = 0; i < ev->num_peer_stats; i++) { + status = tgt_mc_cp_stats_update_peer_stats(psoc, + &ev->peer_stats[i]); + if (!QDF_IS_ADDR_BROADCAST(last_req.peer_mac_addr) && + !qdf_mem_cmp(ev->peer_stats[i].peer_macaddr, + last_req.peer_mac_addr, + WLAN_MACADDR_LEN)) { + /* mac is specified, but failed to update the peer */ + if (QDF_IS_STATUS_ERROR(status)) + return; + + selected = i; + } + } + + /* no matched peer */ + if (!QDF_IS_ADDR_BROADCAST(last_req.peer_mac_addr) && + selected == ev->num_peer_stats) { + cp_stats_err("peer not found for stats"); + } + +extd2_stats: + + if (!ev->peer_adv_stats) { + cp_stats_err("no peer_extd2 stats"); + goto complete; + } + selected = ev->num_peer_adv_stats; + for (i = 0; i < ev->num_peer_adv_stats; i++) { + status = tgt_mc_cp_stats_update_peer_adv_stats( + psoc, &ev->peer_adv_stats[i], + ev->num_peer_adv_stats); + if (!QDF_IS_ADDR_BROADCAST(last_req.peer_mac_addr) && + !qdf_mem_cmp(ev->peer_adv_stats[i].peer_macaddr, + last_req.peer_mac_addr, + WLAN_MACADDR_LEN)) { + /* mac is specified, but failed to update the peer */ + if (QDF_IS_STATUS_ERROR(status)) + return; + + selected = i; + } + } + + /* no matched peer */ + if (!QDF_IS_ADDR_BROADCAST(last_req.peer_mac_addr) && + selected == ev->num_peer_adv_stats) { + cp_stats_err("peer not found for extd stats"); + return; + } + +complete: + if (is_station_stats) + return; + + tgt_mc_cp_stats_prepare_raw_peer_rssi(psoc, &last_req); + ucfg_mc_cp_stats_reset_pending_req(psoc, TYPE_PEER_STATS); +} + +static void tgt_mc_cp_stats_extract_cca_stats(struct wlan_objmgr_psoc *psoc, + struct stats_event *ev) +{ + struct wlan_objmgr_vdev *vdev; + struct vdev_mc_cp_stats *vdev_mc_stats; + struct vdev_cp_stats *vdev_cp_stats_priv; + + if (!ev->cca_stats) + return; + + vdev = wlan_objmgr_get_vdev_by_id_from_psoc(psoc, + ev->cca_stats->vdev_id, + WLAN_CP_STATS_ID); + if (!vdev) { + cp_stats_err("vdev is null"); + return; + } + + vdev_cp_stats_priv = wlan_cp_stats_get_vdev_stats_obj(vdev); + if (!vdev_cp_stats_priv) { + cp_stats_err("vdev cp stats object is null"); + goto end; + } + + wlan_cp_stats_vdev_obj_lock(vdev_cp_stats_priv); + vdev_mc_stats = vdev_cp_stats_priv->vdev_stats; + vdev_mc_stats->cca.congestion = ev->cca_stats->congestion; + wlan_cp_stats_vdev_obj_unlock(vdev_cp_stats_priv); + +end: + wlan_objmgr_vdev_release_ref(vdev, WLAN_CP_STATS_ID); +} + +static void tgt_mc_cp_stats_extract_vdev_summary_stats( + struct wlan_objmgr_psoc *psoc, + struct stats_event *ev) +{ + uint8_t i; + QDF_STATUS status; + struct wlan_objmgr_peer *peer = NULL; + struct request_info last_req = {0}; + struct wlan_objmgr_vdev *vdev; + struct peer_mc_cp_stats *peer_mc_stats; + struct vdev_mc_cp_stats *vdev_mc_stats; + struct peer_cp_stats *peer_cp_stats_priv; + struct vdev_cp_stats *vdev_cp_stats_priv; + + if (!ev->vdev_summary_stats) { + cp_stats_err("no summary stats"); + return; + } + + status = ucfg_mc_cp_stats_get_pending_req(psoc, + TYPE_STATION_STATS, + &last_req); + if (QDF_IS_STATUS_ERROR(status)) { + cp_stats_err("ucfg_mc_cp_stats_get_pending_req failed"); + return; + } + + for (i = 0; i < ev->num_summary_stats; i++) { + if (ev->vdev_summary_stats[i].vdev_id == last_req.vdev_id) + break; + } + + if (i == ev->num_summary_stats) { + cp_stats_err("vdev_id %d not found", last_req.vdev_id); + return; + } + + vdev = wlan_objmgr_get_vdev_by_id_from_psoc(psoc, last_req.vdev_id, + WLAN_CP_STATS_ID); + if (!vdev) { + cp_stats_err("vdev is null"); + return; + } + + vdev_cp_stats_priv = wlan_cp_stats_get_vdev_stats_obj(vdev); + if (!vdev_cp_stats_priv) { + cp_stats_err("vdev cp stats object is null"); + goto end; + } + + wlan_cp_stats_vdev_obj_lock(vdev_cp_stats_priv); + vdev_mc_stats = vdev_cp_stats_priv->vdev_stats; + qdf_mem_copy(&vdev_mc_stats->vdev_summary_stats, + &ev->vdev_summary_stats[i].stats, + sizeof(vdev_mc_stats->vdev_summary_stats)); + wlan_cp_stats_vdev_obj_unlock(vdev_cp_stats_priv); + + peer = wlan_objmgr_get_peer(psoc, last_req.pdev_id, + last_req.peer_mac_addr, WLAN_CP_STATS_ID); + if (!peer) { + cp_stats_err("peer is null %pM", last_req.peer_mac_addr); + goto end; + } + + peer_cp_stats_priv = wlan_cp_stats_get_peer_stats_obj(peer); + if (!peer_cp_stats_priv) { + cp_stats_err("peer cp stats object is null"); + goto end; + } + + wlan_cp_stats_peer_obj_lock(peer_cp_stats_priv); + peer_mc_stats = peer_cp_stats_priv->peer_stats; + peer_mc_stats->peer_rssi = ev->vdev_summary_stats[i].stats.rssi; + wlan_cp_stats_peer_obj_unlock(peer_cp_stats_priv); + +end: + if (peer) + wlan_objmgr_peer_release_ref(peer, WLAN_CP_STATS_ID); + wlan_objmgr_vdev_release_ref(vdev, WLAN_CP_STATS_ID); +} + +static void tgt_mc_cp_stats_extract_vdev_chain_rssi_stats( + struct wlan_objmgr_psoc *psoc, + struct stats_event *ev) +{ + uint8_t i, j; + QDF_STATUS status; + struct request_info last_req = {0}; + struct wlan_objmgr_vdev *vdev; + struct vdev_mc_cp_stats *vdev_mc_stats; + struct vdev_cp_stats *vdev_cp_stats_priv; + + if (!ev->vdev_chain_rssi) { + cp_stats_err("no vdev chain rssi stats"); + return; + } + + status = ucfg_mc_cp_stats_get_pending_req(psoc, + TYPE_STATION_STATS, + &last_req); + if (QDF_IS_STATUS_ERROR(status)) { + cp_stats_err("ucfg_mc_cp_stats_get_pending_req failed"); + return; + } + + for (i = 0; i < ev->num_chain_rssi_stats; i++) { + if (ev->vdev_chain_rssi[i].vdev_id == last_req.vdev_id) + break; + } + + if (i == ev->num_chain_rssi_stats) { + cp_stats_err("vdev_id %d not found", last_req.vdev_id); + return; + } + + vdev = wlan_objmgr_get_vdev_by_id_from_psoc(psoc, last_req.vdev_id, + WLAN_CP_STATS_ID); + if (!vdev) { + cp_stats_err("vdev is null"); + return; + } + + vdev_cp_stats_priv = wlan_cp_stats_get_vdev_stats_obj(vdev); + if (!vdev_cp_stats_priv) { + cp_stats_err("vdev cp stats object is null"); + goto end; + } + + wlan_cp_stats_vdev_obj_lock(vdev_cp_stats_priv); + vdev_mc_stats = vdev_cp_stats_priv->vdev_stats; + for (j = 0; j < MAX_NUM_CHAINS; j++) { + vdev_mc_stats->chain_rssi[j] = + ev->vdev_chain_rssi[i].chain_rssi[j]; + } + wlan_cp_stats_vdev_obj_unlock(vdev_cp_stats_priv); + +end: + wlan_objmgr_vdev_release_ref(vdev, WLAN_CP_STATS_ID); +} + +static void +tgt_mc_cp_stats_prepare_n_send_raw_station_stats(struct wlan_objmgr_psoc *psoc, + struct request_info *last_req) +{ + /* station_stats to be given to userspace thread */ + struct stats_event info = {0}; + struct wlan_objmgr_vdev *vdev; + struct wlan_objmgr_peer *peer; + struct peer_mc_cp_stats *peer_mc_stats; + struct vdev_mc_cp_stats *vdev_mc_stats; + struct peer_cp_stats *peer_cp_stats_priv; + struct vdev_cp_stats *vdev_cp_stats_priv; + void (*get_station_stats_cb)(struct stats_event *info, void *cookie); + + get_station_stats_cb = last_req->u.get_station_stats_cb; + if (!get_station_stats_cb) { + cp_stats_err("callback is null"); + return; + } + + vdev = wlan_objmgr_get_vdev_by_id_from_psoc(psoc, last_req->vdev_id, + WLAN_CP_STATS_ID); + if (!vdev) { + cp_stats_err("vdev object is null"); + return; + } + + peer = wlan_objmgr_get_peer(psoc, last_req->pdev_id, + last_req->peer_mac_addr, WLAN_CP_STATS_ID); + if (!peer) { + cp_stats_err("peer object is null"); + goto end; + } + + vdev_cp_stats_priv = wlan_cp_stats_get_vdev_stats_obj(vdev); + if (!vdev_cp_stats_priv) { + cp_stats_err("vdev cp stats object is null"); + goto end; + } + + peer_cp_stats_priv = wlan_cp_stats_get_peer_stats_obj(peer); + if (!peer_cp_stats_priv) { + cp_stats_err("peer cp stats object is null"); + goto end; + } + + info.num_summary_stats = 1; + info.vdev_summary_stats = qdf_mem_malloc( + sizeof(*info.vdev_summary_stats)); + if (!info.vdev_summary_stats) + goto end; + + info.num_chain_rssi_stats = 1; + info.vdev_chain_rssi = qdf_mem_malloc(sizeof(*info.vdev_chain_rssi));; + if (!info.vdev_chain_rssi) + goto end; + + wlan_cp_stats_vdev_obj_lock(vdev_cp_stats_priv); + vdev_mc_stats = vdev_cp_stats_priv->vdev_stats; + info.vdev_summary_stats[0].vdev_id = last_req->vdev_id; + info.vdev_summary_stats[0].stats = vdev_mc_stats->vdev_summary_stats; + info.vdev_chain_rssi[0].vdev_id = last_req->vdev_id; + qdf_mem_copy(info.vdev_chain_rssi[0].chain_rssi, + vdev_mc_stats->chain_rssi, + sizeof(vdev_mc_stats->chain_rssi)); + info.tx_rate_flags = vdev_mc_stats->tx_rate_flags; + wlan_cp_stats_vdev_obj_unlock(vdev_cp_stats_priv); + + info.peer_adv_stats = qdf_mem_malloc(sizeof(*info.peer_adv_stats)); + if (!info.peer_adv_stats) + goto end; + + wlan_cp_stats_peer_obj_lock(peer_cp_stats_priv); + peer_mc_stats = peer_cp_stats_priv->peer_stats; + /* + * The linkspeed returned by fw is in kbps so convert + * it in units of 100kbps which is expected by UMAC + */ + info.tx_rate = peer_mc_stats->tx_rate / 100; + info.rx_rate = peer_mc_stats->rx_rate / 100; + + if (peer_cp_stats_priv->peer_adv_stats) { + info.num_peer_adv_stats = 1; + qdf_mem_copy(info.peer_adv_stats, + peer_cp_stats_priv->peer_adv_stats, + sizeof(peer_cp_stats_priv->peer_adv_stats)); + } + + wlan_cp_stats_peer_obj_unlock(peer_cp_stats_priv); + +end: + if (info.vdev_summary_stats && info.vdev_chain_rssi) + get_station_stats_cb(&info, last_req->cookie); + + ucfg_mc_cp_stats_free_stats_resources(&info); + + if (peer) + wlan_objmgr_peer_release_ref(peer, WLAN_CP_STATS_ID); + + wlan_objmgr_vdev_release_ref(vdev, WLAN_CP_STATS_ID); +} + +static void tgt_mc_cp_stats_extract_station_stats( + struct wlan_objmgr_psoc *psoc, + struct stats_event *ev) +{ + QDF_STATUS status; + bool is_last_event; + struct request_info last_req = {0}; + + if (IS_MSB_SET(ev->last_event)) + is_last_event = IS_LSB_SET(ev->last_event); + else + is_last_event = !!ev->peer_stats; + + status = ucfg_mc_cp_stats_get_pending_req(psoc, + TYPE_STATION_STATS, + &last_req); + if (QDF_IS_STATUS_ERROR(status)) { + cp_stats_err("ucfg_mc_cp_stats_get_pending_req failed"); + return; + } + + tgt_mc_cp_stats_extract_tx_power(psoc, ev, true); + tgt_mc_cp_stats_extract_peer_stats(psoc, ev, true); + tgt_mc_cp_stats_extract_vdev_summary_stats(psoc, ev); + tgt_mc_cp_stats_extract_vdev_chain_rssi_stats(psoc, ev); + + /* + * PEER stats are the last stats sent for get_station statistics. + * reset type_map bit for station stats . + */ + if (is_last_event) { + tgt_mc_cp_stats_prepare_n_send_raw_station_stats(psoc, + &last_req); + ucfg_mc_cp_stats_reset_pending_req(psoc, TYPE_STATION_STATS); + } +} + +static void tgt_mc_cp_send_lost_link_stats(struct wlan_objmgr_psoc *psoc, + struct stats_event *ev) +{ + struct psoc_cp_stats *psoc_cp_stats_priv; + + psoc_cp_stats_priv = wlan_cp_stats_get_psoc_stats_obj(psoc); + if (psoc_cp_stats_priv && psoc_cp_stats_priv->legacy_stats_cb) + psoc_cp_stats_priv->legacy_stats_cb(ev); +} + +QDF_STATUS tgt_mc_cp_stats_process_stats_event(struct wlan_objmgr_psoc *psoc, + struct stats_event *ev) +{ + if (ucfg_mc_cp_stats_is_req_pending(psoc, TYPE_CONNECTION_TX_POWER)) + tgt_mc_cp_stats_extract_tx_power(psoc, ev, false); + + if (ucfg_mc_cp_stats_is_req_pending(psoc, TYPE_PEER_STATS)) + tgt_mc_cp_stats_extract_peer_stats(psoc, ev, false); + + if (ucfg_mc_cp_stats_is_req_pending(psoc, TYPE_STATION_STATS)) + tgt_mc_cp_stats_extract_station_stats(psoc, ev); + + tgt_mc_cp_stats_extract_cca_stats(psoc, ev); + + tgt_mc_cp_send_lost_link_stats(psoc, ev); + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS tgt_mc_cp_stats_inc_wake_lock_stats(struct wlan_objmgr_psoc *psoc, + uint32_t reason, + struct wake_lock_stats *stats, + uint32_t *unspecified_wake_count) +{ + struct wlan_lmac_if_cp_stats_tx_ops *tx_ops; + + tx_ops = target_if_cp_stats_get_tx_ops(psoc); + if (!tx_ops) + return QDF_STATUS_E_NULL_VALUE; + + tx_ops->inc_wake_lock_stats(reason, stats, unspecified_wake_count); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS tgt_send_mc_cp_stats_req(struct wlan_objmgr_psoc *psoc, + enum stats_req_type type, + struct request_info *req) +{ + struct wlan_lmac_if_cp_stats_tx_ops *tx_ops; + + tx_ops = target_if_cp_stats_get_tx_ops(psoc); + if (!tx_ops || !tx_ops->send_req_stats) { + cp_stats_err("could not get tx_ops"); + return QDF_STATUS_E_NULL_VALUE; + } + + return tx_ops->send_req_stats(psoc, type, req); +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/dispatcher/src/wlan_cp_stats_mc_ucfg_api.c b/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/dispatcher/src/wlan_cp_stats_mc_ucfg_api.c new file mode 100644 index 0000000000000000000000000000000000000000..7b5b1a25cd52b26b168b75ad5173a37b71bca1b2 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/dispatcher/src/wlan_cp_stats_mc_ucfg_api.c @@ -0,0 +1,632 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_cp_stats_mc_ucfg_api.c + * + * This file provide API definitions required for northbound interaction + */ + +#include +#include "wlan_cp_stats_mc_defs.h" +#include +#include +#include +#include "../../core/src/wlan_cp_stats_defs.h" +#include "../../core/src/wlan_cp_stats_defs.h" +#include "../../core/src/wlan_cp_stats_cmn_api_i.h" + +QDF_STATUS wlan_cp_stats_psoc_cs_init(struct psoc_cp_stats *psoc_cs) +{ + psoc_cs->obj_stats = qdf_mem_malloc(sizeof(struct psoc_mc_cp_stats)); + if (!psoc_cs->obj_stats) { + cp_stats_err("malloc failed"); + return QDF_STATUS_E_NOMEM; + } + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_cp_stats_psoc_cs_deinit(struct psoc_cp_stats *psoc_cs) +{ + qdf_mem_free(psoc_cs->obj_stats); + psoc_cs->obj_stats = NULL; + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_cp_stats_vdev_cs_init(struct vdev_cp_stats *vdev_cs) +{ + vdev_cs->vdev_stats = qdf_mem_malloc(sizeof(struct vdev_mc_cp_stats)); + if (!vdev_cs->vdev_stats) { + cp_stats_err("malloc failed"); + return QDF_STATUS_E_NOMEM; + } + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_cp_stats_vdev_cs_deinit(struct vdev_cp_stats *vdev_cs) +{ + qdf_mem_free(vdev_cs->vdev_stats); + vdev_cs->vdev_stats = NULL; + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_cp_stats_pdev_cs_init(struct pdev_cp_stats *pdev_cs) +{ + pdev_cs->pdev_stats = qdf_mem_malloc(sizeof(struct pdev_mc_cp_stats)); + if (!pdev_cs->pdev_stats) { + cp_stats_err("malloc failed"); + return QDF_STATUS_E_NOMEM; + } + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_cp_stats_pdev_cs_deinit(struct pdev_cp_stats *pdev_cs) +{ + qdf_mem_free(pdev_cs->pdev_stats); + pdev_cs->pdev_stats = NULL; + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_cp_stats_peer_cs_init(struct peer_cp_stats *peer_cs) +{ + peer_cs->peer_stats = qdf_mem_malloc(sizeof(struct peer_mc_cp_stats)); + if (!peer_cs->peer_stats) { + cp_stats_err("malloc failed"); + return QDF_STATUS_E_NOMEM; + } + + peer_cs->peer_adv_stats = qdf_mem_malloc(sizeof + (struct peer_adv_mc_cp_stats)); + if (!peer_cs->peer_adv_stats) { + cp_stats_err("malloc failed"); + qdf_mem_free(peer_cs->peer_stats); + return QDF_STATUS_E_NOMEM; + } + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_cp_stats_peer_cs_deinit(struct peer_cp_stats *peer_cs) +{ + qdf_mem_free(peer_cs->peer_adv_stats); + peer_cs->peer_adv_stats = NULL; + qdf_mem_free(peer_cs->peer_stats); + peer_cs->peer_stats = NULL; + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS ucfg_mc_cp_stats_inc_wake_lock_stats_by_protocol( + struct wlan_objmgr_psoc *psoc, + uint8_t vdev_id, + enum qdf_proto_subtype protocol) +{ + struct wlan_objmgr_vdev *vdev; + struct wake_lock_stats *stats; + struct vdev_mc_cp_stats *vdev_mc_stats; + struct vdev_cp_stats *vdev_cp_stats_priv; + + vdev = wlan_objmgr_get_vdev_by_id_from_psoc(psoc, vdev_id, + WLAN_CP_STATS_ID); + if (!vdev) { + cp_stats_err("vdev numm for vdev_id: %d", vdev_id); + return QDF_STATUS_E_NULL_VALUE; + } + + vdev_cp_stats_priv = wlan_cp_stats_get_vdev_stats_obj(vdev); + if (!vdev_cp_stats_priv) { + cp_stats_err("vdev cp stats object is null"); + wlan_objmgr_vdev_release_ref(vdev, WLAN_CP_STATS_ID); + return QDF_STATUS_E_NULL_VALUE; + } + + wlan_cp_stats_vdev_obj_lock(vdev_cp_stats_priv); + vdev_mc_stats = vdev_cp_stats_priv->vdev_stats; + stats = &vdev_mc_stats->wow_stats; + switch (protocol) { + case QDF_PROTO_ICMP_RES: + stats->icmpv4_count++; + break; + case QDF_PROTO_ICMPV6_REQ: + case QDF_PROTO_ICMPV6_RES: + case QDF_PROTO_ICMPV6_RS: + stats->icmpv6_count++; + break; + case QDF_PROTO_ICMPV6_RA: + stats->icmpv6_count++; + stats->ipv6_mcast_ra_stats++; + break; + case QDF_PROTO_ICMPV6_NS: + stats->icmpv6_count++; + stats->ipv6_mcast_ns_stats++; + break; + case QDF_PROTO_ICMPV6_NA: + stats->icmpv6_count++; + stats->ipv6_mcast_na_stats++; + break; + default: + break; + } + + wlan_cp_stats_vdev_obj_unlock(vdev_cp_stats_priv); + wlan_objmgr_vdev_release_ref(vdev, WLAN_CP_STATS_ID); + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS ucfg_mc_cp_stats_inc_wake_lock_stats_by_dst_addr( + struct wlan_objmgr_psoc *psoc, + uint8_t vdev_id, uint8_t *dest_mac) +{ + struct wlan_objmgr_vdev *vdev; + struct wake_lock_stats *stats; + struct vdev_mc_cp_stats *vdev_mc_stats; + struct vdev_cp_stats *vdev_cp_stats_priv; + + vdev = wlan_objmgr_get_vdev_by_id_from_psoc(psoc, vdev_id, + WLAN_CP_STATS_ID); + if (!vdev) { + cp_stats_err("vdev numm for vdev_id: %d", vdev_id); + return QDF_STATUS_E_NULL_VALUE; + } + + vdev_cp_stats_priv = wlan_cp_stats_get_vdev_stats_obj(vdev); + if (!vdev_cp_stats_priv) { + cp_stats_err("vdev cp stats object is null"); + wlan_objmgr_vdev_release_ref(vdev, WLAN_CP_STATS_ID); + return QDF_STATUS_E_NULL_VALUE; + } + + wlan_cp_stats_vdev_obj_lock(vdev_cp_stats_priv); + vdev_mc_stats = vdev_cp_stats_priv->vdev_stats; + stats = &vdev_mc_stats->wow_stats; + switch (*dest_mac) { + case QDF_BCAST_MAC_ADDR: + stats->bcast_wake_up_count++; + break; + case QDF_MCAST_IPV4_MAC_ADDR: + stats->ipv4_mcast_wake_up_count++; + break; + case QDF_MCAST_IPV6_MAC_ADDR: + stats->ipv6_mcast_wake_up_count++; + break; + default: + stats->ucast_wake_up_count++; + break; + } + + wlan_cp_stats_vdev_obj_unlock(vdev_cp_stats_priv); + wlan_objmgr_vdev_release_ref(vdev, WLAN_CP_STATS_ID); + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS ucfg_mc_cp_stats_inc_wake_lock_stats(struct wlan_objmgr_psoc *psoc, + uint8_t vdev_id, + uint32_t reason) +{ + struct wake_lock_stats *stats; + QDF_STATUS status = QDF_STATUS_SUCCESS; + struct wlan_objmgr_vdev *vdev = NULL; + struct psoc_mc_cp_stats *psoc_mc_stats; + struct psoc_cp_stats *psoc_cp_stats_priv; + struct vdev_mc_cp_stats *vdev_mc_stats; + struct vdev_cp_stats *vdev_cp_stats_priv; + + psoc_cp_stats_priv = wlan_cp_stats_get_psoc_stats_obj(psoc); + if (!psoc_cp_stats_priv) { + cp_stats_err("psoc cp stats object is null"); + return QDF_STATUS_E_NULL_VALUE; + } + + vdev = wlan_objmgr_get_vdev_by_id_from_psoc(psoc, vdev_id, + WLAN_CP_STATS_ID); + if (!vdev) { + cp_stats_err("vdev numm for vdev_id: %d", vdev_id); + return QDF_STATUS_E_NULL_VALUE; + } + + vdev_cp_stats_priv = wlan_cp_stats_get_vdev_stats_obj(vdev); + if (!vdev_cp_stats_priv) { + cp_stats_err("vdev cp stats object is null"); + status = QDF_STATUS_E_NULL_VALUE; + goto release_vdev_ref; + } + + wlan_cp_stats_psoc_obj_lock(psoc_cp_stats_priv); + wlan_cp_stats_vdev_obj_lock(vdev_cp_stats_priv); + + psoc_mc_stats = psoc_cp_stats_priv->obj_stats; + vdev_mc_stats = vdev_cp_stats_priv->vdev_stats; + stats = &vdev_mc_stats->wow_stats; + status = tgt_mc_cp_stats_inc_wake_lock_stats(psoc, reason, stats, + &psoc_mc_stats->wow_unspecified_wake_up_count); + wlan_cp_stats_vdev_obj_unlock(vdev_cp_stats_priv); + wlan_cp_stats_psoc_obj_unlock(psoc_cp_stats_priv); + +release_vdev_ref: + if (vdev) + wlan_objmgr_vdev_release_ref(vdev, WLAN_CP_STATS_ID); + + return status; +} + +/** + * vdev_iterator() - iterator function to collect wake_lock_stats from all vdev + * @psoc: pointer to psoc object + * @vdev: pointer to vdev object + * @arg: stats object pointer passed as arg + * + * Return - none + */ +static void vdev_iterator(struct wlan_objmgr_psoc *psoc, void *vdev, void *arg) +{ + struct wake_lock_stats *vdev_stats; + struct wake_lock_stats *stats = arg; + struct vdev_mc_cp_stats *vdev_mc_stats; + struct vdev_cp_stats *vdev_cp_stats_priv; + + vdev_cp_stats_priv = wlan_cp_stats_get_vdev_stats_obj(vdev); + if (!vdev_cp_stats_priv) { + cp_stats_err("vdev cp stats object is null"); + return; + } + + wlan_cp_stats_vdev_obj_lock(vdev_cp_stats_priv); + vdev_mc_stats = vdev_cp_stats_priv->vdev_stats; + vdev_stats = &vdev_mc_stats->wow_stats; + stats->ucast_wake_up_count += vdev_stats->ucast_wake_up_count; + stats->bcast_wake_up_count += vdev_stats->bcast_wake_up_count; + stats->ipv4_mcast_wake_up_count += vdev_stats->ipv4_mcast_wake_up_count; + stats->ipv6_mcast_wake_up_count += vdev_stats->ipv6_mcast_wake_up_count; + stats->ipv6_mcast_ra_stats += vdev_stats->ipv6_mcast_ra_stats; + stats->ipv6_mcast_ns_stats += vdev_stats->ipv6_mcast_ns_stats; + stats->ipv6_mcast_na_stats += vdev_stats->ipv6_mcast_na_stats; + stats->icmpv4_count += vdev_stats->icmpv4_count; + stats->icmpv6_count += vdev_stats->icmpv6_count; + stats->rssi_breach_wake_up_count += + vdev_stats->rssi_breach_wake_up_count; + stats->low_rssi_wake_up_count += vdev_stats->low_rssi_wake_up_count; + stats->gscan_wake_up_count += vdev_stats->gscan_wake_up_count; + stats->pno_complete_wake_up_count += + vdev_stats->pno_complete_wake_up_count; + stats->pno_match_wake_up_count += vdev_stats->pno_match_wake_up_count; + stats->oem_response_wake_up_count += + vdev_stats->oem_response_wake_up_count; + stats->pwr_save_fail_detected += vdev_stats->pwr_save_fail_detected; + stats->scan_11d += vdev_stats->scan_11d; + wlan_cp_stats_vdev_obj_unlock(vdev_cp_stats_priv); +} + +QDF_STATUS ucfg_mc_cp_stats_get_psoc_wake_lock_stats( + struct wlan_objmgr_psoc *psoc, + struct wake_lock_stats *stats) +{ + struct psoc_cp_stats *psoc_cp_stats_priv; + struct psoc_mc_cp_stats *psoc_mc_stats; + + psoc_cp_stats_priv = wlan_cp_stats_get_psoc_stats_obj(psoc); + if (!psoc_cp_stats_priv) { + cp_stats_err("psoc cp stats object is null"); + return QDF_STATUS_E_NULL_VALUE; + } + + wlan_cp_stats_psoc_obj_lock(psoc_cp_stats_priv); + psoc_mc_stats = psoc_cp_stats_priv->obj_stats; + /* iterate through all vdevs, and get wow stats from vdev_cs object */ + wlan_objmgr_iterate_obj_list(psoc, WLAN_VDEV_OP, vdev_iterator, + stats, true, WLAN_CP_STATS_ID); + wlan_cp_stats_psoc_obj_unlock(psoc_cp_stats_priv); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS ucfg_mc_cp_stats_get_vdev_wake_lock_stats( + struct wlan_objmgr_vdev *vdev, + struct wake_lock_stats *stats) +{ + struct vdev_cp_stats *vdev_cp_stats_priv; + struct vdev_mc_cp_stats *vdev_mc_stats; + + vdev_cp_stats_priv = wlan_cp_stats_get_vdev_stats_obj(vdev); + if (!vdev_cp_stats_priv) { + cp_stats_err("vdev cp stats object is null"); + return QDF_STATUS_E_NULL_VALUE; + } + + wlan_cp_stats_vdev_obj_lock(vdev_cp_stats_priv); + vdev_mc_stats = vdev_cp_stats_priv->vdev_stats; + qdf_mem_copy(stats, &vdev_mc_stats->wow_stats, sizeof(*stats)); + wlan_cp_stats_vdev_obj_unlock(vdev_cp_stats_priv); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS ucfg_mc_cp_stats_write_wow_stats( + struct wlan_objmgr_psoc *psoc, + char *buffer, uint16_t max_len, int *ret) +{ + QDF_STATUS status; + uint32_t unspecified_wake_count; + struct wake_lock_stats wow_stats = {0}; + struct psoc_mc_cp_stats *psoc_mc_stats; + struct psoc_cp_stats *psoc_cp_stats_priv; + + psoc_cp_stats_priv = wlan_cp_stats_get_psoc_stats_obj(psoc); + if (!psoc_cp_stats_priv) { + cp_stats_err("psoc cp stats object is null"); + return QDF_STATUS_E_NULL_VALUE; + } + + /* get stats from psoc */ + status = ucfg_mc_cp_stats_get_psoc_wake_lock_stats(psoc, &wow_stats); + if (QDF_IS_STATUS_ERROR(status)) { + cp_stats_err("Failed to get WoW stats"); + return status; + } + + wlan_cp_stats_psoc_obj_lock(psoc_cp_stats_priv); + psoc_mc_stats = psoc_cp_stats_priv->obj_stats; + unspecified_wake_count = psoc_mc_stats->wow_unspecified_wake_up_count; + wlan_cp_stats_psoc_obj_unlock(psoc_cp_stats_priv); + + *ret = qdf_scnprintf(buffer, max_len, + "WoW Wake Reasons\n" + "\tunspecified wake count: %u\n" + "\tunicast: %u\n" + "\tbroadcast: %u\n" + "\tIPv4 multicast: %u\n" + "\tIPv6 multicast: %u\n" + "\tIPv6 multicast RA: %u\n" + "\tIPv6 multicast NS: %u\n" + "\tIPv6 multicast NA: %u\n" + "\tICMPv4: %u\n" + "\tICMPv6: %u\n" + "\tRSSI Breach: %u\n" + "\tLow RSSI: %u\n" + "\tG-Scan: %u\n" + "\tPNO Complete: %u\n" + "\tPNO Match: %u\n" + "\tOEM rsp wake_count: %u\n" + "\twake count due to pwr_save_fail_detected: %u\n" + "\twake count due to 11d scan: %u\n", + unspecified_wake_count, + wow_stats.ucast_wake_up_count, + wow_stats.bcast_wake_up_count, + wow_stats.ipv4_mcast_wake_up_count, + wow_stats.ipv6_mcast_wake_up_count, + wow_stats.ipv6_mcast_ra_stats, + wow_stats.ipv6_mcast_ns_stats, + wow_stats.ipv6_mcast_na_stats, + wow_stats.icmpv4_count, + wow_stats.icmpv6_count, + wow_stats.rssi_breach_wake_up_count, + wow_stats.low_rssi_wake_up_count, + wow_stats.gscan_wake_up_count, + wow_stats.pno_complete_wake_up_count, + wow_stats.pno_match_wake_up_count, + wow_stats.oem_response_wake_up_count, + wow_stats.pwr_save_fail_detected, + wow_stats.scan_11d); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS ucfg_mc_cp_stats_send_stats_request(struct wlan_objmgr_vdev *vdev, + enum stats_req_type type, + struct request_info *info) +{ + QDF_STATUS status; + + status = ucfg_mc_cp_stats_set_pending_req(wlan_vdev_get_psoc(vdev), + type, info); + if (QDF_IS_STATUS_ERROR(status)) { + cp_stats_err("ucfg_mc_cp_stats_set_pending_req pdev failed: %d", + status); + return status; + } + + return tgt_send_mc_cp_stats_req(wlan_vdev_get_psoc(vdev), type, info); +} + +QDF_STATUS ucfg_mc_cp_stats_get_tx_power(struct wlan_objmgr_vdev *vdev, + int *dbm) +{ + struct wlan_objmgr_pdev *pdev; + struct pdev_mc_cp_stats *pdev_mc_stats; + struct pdev_cp_stats *pdev_cp_stats_priv; + + pdev = wlan_vdev_get_pdev(vdev); + pdev_cp_stats_priv = wlan_cp_stats_get_pdev_stats_obj(pdev); + if (!pdev_cp_stats_priv) { + cp_stats_err("pdev cp stats object is null"); + return QDF_STATUS_E_NULL_VALUE; + } + + wlan_cp_stats_pdev_obj_lock(pdev_cp_stats_priv); + pdev_mc_stats = pdev_cp_stats_priv->pdev_stats; + *dbm = pdev_mc_stats->max_pwr; + wlan_cp_stats_pdev_obj_unlock(pdev_cp_stats_priv); + + return QDF_STATUS_SUCCESS; +} + +bool ucfg_mc_cp_stats_is_req_pending(struct wlan_objmgr_psoc *psoc, + enum stats_req_type type) +{ + uint32_t pending_req_map; + struct psoc_mc_cp_stats *psoc_mc_stats; + struct psoc_cp_stats *psoc_cp_stats_priv; + + psoc_cp_stats_priv = wlan_cp_stats_get_psoc_stats_obj(psoc); + if (!psoc_cp_stats_priv) { + cp_stats_err("psoc cp stats object is null"); + return false; + } + + wlan_cp_stats_psoc_obj_lock(psoc_cp_stats_priv); + psoc_mc_stats = psoc_cp_stats_priv->obj_stats; + pending_req_map = psoc_mc_stats->pending.type_map; + wlan_cp_stats_psoc_obj_unlock(psoc_cp_stats_priv); + + return (pending_req_map & (1 << type)); +} + +QDF_STATUS ucfg_mc_cp_stats_set_pending_req(struct wlan_objmgr_psoc *psoc, + enum stats_req_type type, + struct request_info *req) +{ + struct psoc_mc_cp_stats *psoc_mc_stats; + struct psoc_cp_stats *psoc_cp_stats_priv; + + psoc_cp_stats_priv = wlan_cp_stats_get_psoc_stats_obj(psoc); + if (!psoc_cp_stats_priv) { + cp_stats_err("psoc cp stats object is null"); + return QDF_STATUS_E_NULL_VALUE; + } + + if (type >= TYPE_MAX) { + cp_stats_err("Invalid type index: %d", type); + return QDF_STATUS_E_INVAL; + } + + wlan_cp_stats_psoc_obj_lock(psoc_cp_stats_priv); + psoc_mc_stats = psoc_cp_stats_priv->obj_stats; + psoc_mc_stats->pending.type_map |= (1 << type); + psoc_mc_stats->pending.req[type] = *req; + wlan_cp_stats_psoc_obj_unlock(psoc_cp_stats_priv); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS ucfg_mc_cp_stats_reset_pending_req(struct wlan_objmgr_psoc *psoc, + enum stats_req_type type) +{ + struct psoc_mc_cp_stats *psoc_mc_stats; + struct psoc_cp_stats *psoc_cp_stats_priv; + + psoc_cp_stats_priv = wlan_cp_stats_get_psoc_stats_obj(psoc); + if (!psoc_cp_stats_priv) { + cp_stats_err("psoc cp stats object is null"); + return QDF_STATUS_E_NULL_VALUE; + } + + if (type >= TYPE_MAX) { + cp_stats_err("Invalid type index: %d", type); + return QDF_STATUS_E_INVAL; + } + + wlan_cp_stats_psoc_obj_lock(psoc_cp_stats_priv); + psoc_mc_stats = psoc_cp_stats_priv->obj_stats; + psoc_mc_stats->pending.type_map &= ~(1 << type); + qdf_mem_zero(&psoc_mc_stats->pending.req[type], + sizeof(psoc_mc_stats->pending.req[type])); + wlan_cp_stats_psoc_obj_unlock(psoc_cp_stats_priv); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS ucfg_mc_cp_stats_get_pending_req(struct wlan_objmgr_psoc *psoc, + enum stats_req_type type, + struct request_info *info) +{ + struct psoc_mc_cp_stats *psoc_mc_stats; + struct psoc_cp_stats *psoc_cp_stats_priv; + + psoc_cp_stats_priv = wlan_cp_stats_get_psoc_stats_obj(psoc); + if (!psoc_cp_stats_priv) { + cp_stats_err("psoc cp stats object is null"); + return QDF_STATUS_E_NULL_VALUE; + } + + if (type >= TYPE_MAX) { + cp_stats_err("Invalid type index: %d", type); + return QDF_STATUS_E_INVAL; + } + wlan_cp_stats_psoc_obj_lock(psoc_cp_stats_priv); + psoc_mc_stats = psoc_cp_stats_priv->obj_stats; + *info = psoc_mc_stats->pending.req[type]; + wlan_cp_stats_psoc_obj_unlock(psoc_cp_stats_priv); + + return QDF_STATUS_SUCCESS; +} + +void ucfg_mc_cp_stats_free_stats_resources(struct stats_event *ev) +{ + if (!ev) + return; + + qdf_mem_free(ev->pdev_stats); + qdf_mem_free(ev->peer_adv_stats); + qdf_mem_free(ev->peer_stats); + qdf_mem_free(ev->cca_stats); + qdf_mem_free(ev->vdev_summary_stats); + qdf_mem_free(ev->vdev_chain_rssi); + qdf_mem_zero(ev, sizeof(*ev)); +} + +QDF_STATUS ucfg_mc_cp_stats_cca_stats_get(struct wlan_objmgr_vdev *vdev, + struct cca_stats *cca_stats) +{ + struct vdev_cp_stats *vdev_cp_stats_priv; + struct vdev_mc_cp_stats *vdev_mc_stats; + + vdev_cp_stats_priv = wlan_cp_stats_get_vdev_stats_obj(vdev); + if (!vdev_cp_stats_priv) { + cp_stats_err("vdev cp stats object is null"); + return QDF_STATUS_E_NULL_VALUE; + } + + wlan_cp_stats_vdev_obj_lock(vdev_cp_stats_priv); + vdev_mc_stats = vdev_cp_stats_priv->vdev_stats; + cca_stats->congestion = vdev_mc_stats->cca.congestion; + wlan_cp_stats_vdev_obj_unlock(vdev_cp_stats_priv); + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS ucfg_mc_cp_stats_set_rate_flags(struct wlan_objmgr_vdev *vdev, + uint32_t flags) +{ + struct vdev_mc_cp_stats *vdev_mc_stats; + struct vdev_cp_stats *vdev_cp_stats_priv; + + vdev_cp_stats_priv = wlan_cp_stats_get_vdev_stats_obj(vdev); + if (!vdev_cp_stats_priv) { + cp_stats_err("vdev cp stats object is null"); + return QDF_STATUS_E_NULL_VALUE; + } + + wlan_cp_stats_vdev_obj_lock(vdev_cp_stats_priv); + vdev_mc_stats = vdev_cp_stats_priv->vdev_stats; + vdev_mc_stats->tx_rate_flags = flags; + wlan_cp_stats_vdev_obj_unlock(vdev_cp_stats_priv); + + return QDF_STATUS_SUCCESS; +} + +void ucfg_mc_cp_stats_register_lost_link_info_cb( + struct wlan_objmgr_psoc *psoc, + void (*lost_link_cp_stats_info_cb)(void *stats_ev)) +{ + struct psoc_cp_stats *psoc_cp_stats_priv; + + psoc_cp_stats_priv = wlan_cp_stats_get_psoc_stats_obj(psoc); + if (!psoc_cp_stats_priv) { + cp_stats_err("psoc cp stats object is null"); + return; + } + + psoc_cp_stats_priv->legacy_stats_cb = lost_link_cp_stats_info_cb; +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/dispatcher/src/wlan_cp_stats_ucfg_api.c b/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/dispatcher/src/wlan_cp_stats_ucfg_api.c new file mode 100644 index 0000000000000000000000000000000000000000..6fa1675024acb526ee772e1b42d16095cbf961e3 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/dispatcher/src/wlan_cp_stats_ucfg_api.c @@ -0,0 +1,28 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_cp_stats_ucfg_api.h + * + * This file provide API definitions required for northbound interaction + */ + +#ifndef __WLAN_CP_STATS_UCFG_API_H__ +#define __WLAN_CP_STATS_UCFG_API_H__ + +#endif /* __WLAN_CP_STATS_UCFG_API_H__ */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/dispatcher/src/wlan_cp_stats_utils_api.c b/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/dispatcher/src/wlan_cp_stats_utils_api.c new file mode 100644 index 0000000000000000000000000000000000000000..89f4f4a0052d2097453757ff0b2ec1887ecdc5d5 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/cp_stats/dispatcher/src/wlan_cp_stats_utils_api.c @@ -0,0 +1,351 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_cp_stats_utils_api.c + * + * This file provide public API definitions for other accessing other UMAC + * components + */ +#include "../../core/src/wlan_cp_stats_defs.h" +#include "../../core/src/wlan_cp_stats_obj_mgr_handler.h" +#include +#include + +QDF_STATUS wlan_cp_stats_init(void) +{ + QDF_STATUS status = QDF_STATUS_E_FAILURE; + + status = wlan_objmgr_register_psoc_create_handler + (WLAN_UMAC_COMP_CP_STATS, + wlan_cp_stats_psoc_obj_create_handler, + NULL); + if (QDF_IS_STATUS_ERROR(status)) { + cp_stats_err("Failed to register psoc create handler"); + goto wlan_cp_stats_psoc_init_fail1; + } + + status = wlan_objmgr_register_psoc_destroy_handler + (WLAN_UMAC_COMP_CP_STATS, + wlan_cp_stats_psoc_obj_destroy_handler, + NULL); + if (QDF_IS_STATUS_ERROR(status)) { + cp_stats_err("Failed to register psoc destroy handler"); + goto wlan_cp_stats_psoc_init_fail2; + } + + status = wlan_objmgr_register_pdev_create_handler + (WLAN_UMAC_COMP_CP_STATS, + wlan_cp_stats_pdev_obj_create_handler, + NULL); + if (QDF_IS_STATUS_ERROR(status)) { + cp_stats_err("Failed to register pdev create handler"); + goto wlan_cp_stats_pdev_init_fail1; + } + + status = wlan_objmgr_register_pdev_destroy_handler + (WLAN_UMAC_COMP_CP_STATS, + wlan_cp_stats_pdev_obj_destroy_handler, + NULL); + if (QDF_IS_STATUS_ERROR(status)) { + cp_stats_err("Failed to register pdev destroy handler"); + goto wlan_cp_stats_pdev_init_fail2; + } + + status = wlan_objmgr_register_vdev_create_handler + (WLAN_UMAC_COMP_CP_STATS, + wlan_cp_stats_vdev_obj_create_handler, + NULL); + if (QDF_IS_STATUS_ERROR(status)) { + cp_stats_err("Failed to register vdev create handler"); + goto wlan_cp_stats_vdev_init_fail1; + } + + status = wlan_objmgr_register_vdev_destroy_handler + (WLAN_UMAC_COMP_CP_STATS, + wlan_cp_stats_vdev_obj_destroy_handler, + NULL); + if (QDF_IS_STATUS_ERROR(status)) { + cp_stats_err("Failed to register vdev destroy handler"); + goto wlan_cp_stats_vdev_init_fail2; + } + + status = wlan_objmgr_register_peer_create_handler + (WLAN_UMAC_COMP_CP_STATS, + wlan_cp_stats_peer_obj_create_handler, + NULL); + if (QDF_IS_STATUS_ERROR(status)) { + cp_stats_err("Failed to register peer create handler"); + goto wlan_cp_stats_peer_init_fail1; + } + + status = wlan_objmgr_register_peer_destroy_handler + (WLAN_UMAC_COMP_CP_STATS, + wlan_cp_stats_peer_obj_destroy_handler, + NULL); + if (QDF_IS_STATUS_ERROR(status)) { + cp_stats_err("Failed to register peer destroy handler"); + goto wlan_cp_stats_peer_init_fail2; + } + + return QDF_STATUS_SUCCESS; + +wlan_cp_stats_peer_init_fail2: + wlan_objmgr_unregister_peer_create_handler + (WLAN_UMAC_COMP_CP_STATS, + wlan_cp_stats_peer_obj_create_handler, + NULL); +wlan_cp_stats_peer_init_fail1: + wlan_objmgr_unregister_vdev_destroy_handler + (WLAN_UMAC_COMP_CP_STATS, + wlan_cp_stats_vdev_obj_destroy_handler, + NULL); +wlan_cp_stats_vdev_init_fail2: + wlan_objmgr_unregister_vdev_create_handler + (WLAN_UMAC_COMP_CP_STATS, + wlan_cp_stats_vdev_obj_create_handler, + NULL); +wlan_cp_stats_vdev_init_fail1: + wlan_objmgr_unregister_pdev_destroy_handler + (WLAN_UMAC_COMP_CP_STATS, + wlan_cp_stats_pdev_obj_destroy_handler, + NULL); +wlan_cp_stats_pdev_init_fail2: + wlan_objmgr_unregister_pdev_create_handler + (WLAN_UMAC_COMP_CP_STATS, + wlan_cp_stats_pdev_obj_create_handler, + NULL); +wlan_cp_stats_pdev_init_fail1: + wlan_objmgr_unregister_psoc_destroy_handler + (WLAN_UMAC_COMP_CP_STATS, + wlan_cp_stats_psoc_obj_destroy_handler, + NULL); +wlan_cp_stats_psoc_init_fail2: + wlan_objmgr_unregister_psoc_create_handler + (WLAN_UMAC_COMP_CP_STATS, + wlan_cp_stats_psoc_obj_create_handler, + NULL); +wlan_cp_stats_psoc_init_fail1: + return status; +} + +QDF_STATUS wlan_cp_stats_deinit(void) +{ + QDF_STATUS status = QDF_STATUS_E_FAILURE; + + status = wlan_objmgr_unregister_psoc_create_handler + (WLAN_UMAC_COMP_CP_STATS, + wlan_cp_stats_psoc_obj_create_handler, + NULL); + if (QDF_IS_STATUS_ERROR(status)) + cp_stats_err("Failed to unregister psoc create handler"); + + status = wlan_objmgr_unregister_psoc_destroy_handler + (WLAN_UMAC_COMP_CP_STATS, + wlan_cp_stats_psoc_obj_destroy_handler, + NULL); + if (QDF_IS_STATUS_ERROR(status)) + cp_stats_err("Failed to unregister psoc destroy handler"); + + status = wlan_objmgr_unregister_pdev_create_handler + (WLAN_UMAC_COMP_CP_STATS, + wlan_cp_stats_pdev_obj_create_handler, + NULL); + if (QDF_IS_STATUS_ERROR(status)) + cp_stats_err("Failed to unregister pdev create handler"); + + status = wlan_objmgr_unregister_pdev_destroy_handler + (WLAN_UMAC_COMP_CP_STATS, + wlan_cp_stats_pdev_obj_destroy_handler, + NULL); + if (QDF_IS_STATUS_ERROR(status)) + cp_stats_err("Failed to unregister pdev destroy handler"); + + status = wlan_objmgr_unregister_vdev_create_handler + (WLAN_UMAC_COMP_CP_STATS, + wlan_cp_stats_vdev_obj_create_handler, + NULL); + if (QDF_IS_STATUS_ERROR(status)) + cp_stats_err("Failed to unregister vdev create handler"); + + status = wlan_objmgr_unregister_vdev_destroy_handler + (WLAN_UMAC_COMP_CP_STATS, + wlan_cp_stats_vdev_obj_destroy_handler, + NULL); + if (QDF_IS_STATUS_ERROR(status)) + cp_stats_err("Failed to unregister vdev destroy handler"); + + status = wlan_objmgr_unregister_peer_create_handler + (WLAN_UMAC_COMP_CP_STATS, + wlan_cp_stats_peer_obj_create_handler, + NULL); + if (QDF_IS_STATUS_ERROR(status)) + cp_stats_err("Failed to unregister peer create handler"); + + status = wlan_objmgr_unregister_peer_destroy_handler + (WLAN_UMAC_COMP_CP_STATS, + wlan_cp_stats_peer_obj_destroy_handler, + NULL); + if (QDF_IS_STATUS_ERROR(status)) + cp_stats_err("Failed to unregister peer destroy handler"); + + return status; +} + +/* DA/OL specific call back initialization */ +QDF_STATUS wlan_cp_stats_open(struct wlan_objmgr_psoc *psoc) +{ + QDF_STATUS status = QDF_STATUS_E_FAILURE; + struct cp_stats_context *csc; + + if (!psoc) { + cp_stats_err("PSOC is null!"); + return QDF_STATUS_E_INVAL; + } + csc = + wlan_objmgr_psoc_get_comp_private_obj(psoc, WLAN_UMAC_COMP_CP_STATS); + if (!csc) { + cp_stats_err("cp_stats_context is null!"); + return QDF_STATUS_E_FAILURE; + } + + if (csc->cp_stats_open) + status = csc->cp_stats_open(psoc); + + qdf_spinlock_create(&csc->csc_lock); + return status; +} + +QDF_STATUS wlan_cp_stats_close(struct wlan_objmgr_psoc *psoc) +{ + struct cp_stats_context *csc; + + if (!psoc) { + cp_stats_err("PSOC is null!"); + return QDF_STATUS_E_INVAL; + } + csc = + wlan_objmgr_psoc_get_comp_private_obj(psoc, WLAN_UMAC_COMP_CP_STATS); + if (csc && csc->cp_stats_close) { + csc->cp_stats_close(psoc); + qdf_spinlock_destroy(&csc->csc_lock); + } + + return QDF_STATUS_SUCCESS; +} + +/* WMI registrations stage */ +QDF_STATUS wlan_cp_stats_enable(struct wlan_objmgr_psoc *psoc) +{ + QDF_STATUS status = QDF_STATUS_E_FAILURE; + struct cp_stats_context *csc; + + if (!psoc) { + cp_stats_err("PSOC is null!"); + return QDF_STATUS_E_INVAL; + } + csc = + wlan_objmgr_psoc_get_comp_private_obj(psoc, WLAN_UMAC_COMP_CP_STATS); + if (!csc) { + cp_stats_err("cp_stats_context is null!"); + return QDF_STATUS_E_FAILURE; + } + + if (csc->cp_stats_enable) + status = csc->cp_stats_enable(psoc); + + return status; +} + +QDF_STATUS wlan_cp_stats_disable(struct wlan_objmgr_psoc *psoc) +{ + struct cp_stats_context *csc; + + if (!psoc) { + cp_stats_err("PSOC is null!\n"); + return QDF_STATUS_E_INVAL; + } + csc = + wlan_objmgr_psoc_get_comp_private_obj(psoc, WLAN_UMAC_COMP_CP_STATS); + if (csc && csc->cp_stats_disable) + csc->cp_stats_disable(psoc); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS +wlan_cp_stats_comp_obj_cfg(enum wlan_objmgr_obj_type obj_type, + enum wlan_cp_stats_cfg_state cfg_state, + enum wlan_umac_comp_id comp_id, + void *cmn_obj, void *data) +{ + QDF_STATUS status = QDF_STATUS_E_FAILURE; + struct cp_stats_context *csc; + struct wlan_objmgr_psoc *psoc; + struct wlan_objmgr_pdev *pdev; + struct wlan_objmgr_vdev *vdev; + struct wlan_objmgr_peer *peer; + enum wlan_cp_stats_comp_id cp_stats_comp_id; + + if (!cmn_obj) { + cp_stats_err("common object is null!"); + return QDF_STATUS_E_INVAL; + } + + cp_stats_comp_id = wlan_cp_stats_get_comp_id(comp_id); + if (cp_stats_comp_id >= WLAN_CP_STATS_MAX_COMPONENTS) { + cp_stats_err("Invalid UMAC id provided to cp_stats"); + return QDF_STATUS_E_INVAL; + } + + switch (obj_type) { + case WLAN_PSOC_OP: + psoc = (struct wlan_objmgr_psoc *)cmn_obj; + csc = + wlan_objmgr_psoc_get_comp_private_obj + (psoc, WLAN_UMAC_COMP_CP_STATS); + break; + case WLAN_PDEV_OP: + pdev = (struct wlan_objmgr_pdev *)cmn_obj; + csc = wlan_cp_stats_ctx_get_from_pdev(pdev); + break; + case WLAN_VDEV_OP: + vdev = (struct wlan_objmgr_vdev *)cmn_obj; + csc = wlan_cp_stats_ctx_get_from_vdev(vdev); + break; + case WLAN_PEER_OP: + peer = (struct wlan_objmgr_peer *)cmn_obj; + csc = wlan_cp_stats_ctx_get_from_peer(peer); + break; + default: + cp_stats_err("Invalid common object type"); + return QDF_STATUS_E_INVAL; + } + + if (!csc) { + cp_stats_err("cp_stats_context is null!"); + return QDF_STATUS_E_FAILURE; + } + + if (csc->cp_stats_comp_obj_config) + status = csc->cp_stats_comp_obj_config(obj_type, cfg_state, + cp_stats_comp_id, + cmn_obj, data); + + return status; +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/dfs.h b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/dfs.h new file mode 100644 index 0000000000000000000000000000000000000000..1fe760f97d17e01852383667a4921bfc42bf425a --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/dfs.h @@ -0,0 +1,2319 @@ +/* + * Copyright (c) 2013, 2016-2018 The Linux Foundation. All rights reserved. + * Copyright (c) 2005-2006 Atheros Communications, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: This file has main dfs structures. + */ + +#ifndef _DFS_H_ +#define _DFS_H_ + +#include /* QDF_NBUF_EXEMPT_NO_EXEMPTION, etc. */ +#include /* QDF_NBUF_EXEMPT_NO_EXEMPTION, etc. */ +#include /* qdf_nbuf_t, etc. */ +#include /* qdf_assert */ +#include /* qdf_spinlock */ +#include +#include + +#include +#include "dfs_structs.h" +#include "dfs_channel.h" +#include "dfs_ioctl_private.h" +#include /* For qdf_packed*/ +#include /* For STAILQ_ENTRY */ +#include +#include +#include + +/* File Line and Submodule String */ +#define FLSM(x, str) #str " : " FL(x) +/* Cast to dfs type */ +#define DC(x) ((struct wlan_dfs *)(x)) + +/** + * dfs_log: dfs logging using submodule MASKs and + * QDF trace level. + * The logging is controlled by two bitmasks: + * 1) submodule bitmask: sm + * 2) trace level masks: level + * + * @dfs: The dfs object pointer or NULL if dfs is not defined. + * @sm: Submodule BITMASK. + * @level: QDF trace level. + * @args...: Variable argument list. + * + * The submodule(sm) cannot be empty even if argument dfs is NULL. + * Else the macro will create a compilation error. + * One may provide WLAN_DEBUG_DFS_ALWAYS when the argument dfs is NULL. + * Example:- + * dfs_log(NULL, WLAN_DEBUG_DFS_ALWAYS, QDF_TRACE_LEVEL_INFO,"Error pulse"); + * + * Why DC(x) is required? + * Since NULL is defined as ((void *)(0)), if the argument "dfs" + * in a call to the macro "dfs_log" is NULL + * then during compilation (NULL)->dfs_debug_mask will dereference + * a (void *) type, which is illegal. Therefore, we need + * the cast: (DC(dfs))->dfs_debug_mask. + * Example:- + * dfs_log(NULL, WLAN_DEBUG_DFS, QDF_TRACE_LEVEL_INFO,"dfs is NULL"); + */ +#define dfs_log(dfs, sm, level, args...) do { \ + if (((dfs) == NULL) || \ + ((sm) == WLAN_DEBUG_DFS_ALWAYS) || \ + ((sm) & ((DC(dfs))->dfs_debug_mask))) { \ + QDF_TRACE(QDF_MODULE_ID_DFS, level, ## args); \ + } \ +} while (0) + +#define dfs_logfl(dfs, level, sm, format, args...) \ + dfs_log(dfs, sm, level, FLSM(format, sm), ## args) + +#define dfs_alert(dfs, sm, format, args...) \ + dfs_logfl(dfs, QDF_TRACE_LEVEL_FATAL, sm, format, ## args) + +#define dfs_err(dfs, sm, format, args...) \ + dfs_logfl(dfs, QDF_TRACE_LEVEL_ERROR, sm, format, ## args) + +#define dfs_warn(dfs, sm, format, args...) \ + dfs_logfl(dfs, QDF_TRACE_LEVEL_WARN, sm, format, ## args) + +#define dfs_info(dfs, sm, format, args...) \ + dfs_logfl(dfs, QDF_TRACE_LEVEL_INFO, sm, format, ## args) + +#define dfs_debug(dfs, sm, format, args...) \ + dfs_logfl(dfs, QDF_TRACE_LEVEL_DEBUG, sm, format, ## args) + +#define DFS_MIN(a, b) ((a) < (b)?(a):(b)) +#define DFS_MAX(a, b) ((a) > (b)?(a) : (b)) +#define DFS_DIFF(a, b)(DFS_MAX(a, b) - DFS_MIN(a, b)) + +/** + * Maximum number of radar events to be processed in a single iteration. + * Allows soft watchdog to run. + */ +#define MAX_EVENTS 100 + +/** + * Constants to use for chirping detection. + * + * All are unconverted as HW reports them. + * + * XXX Are these constants with or without fast clock 5GHz operation? + * XXX Peregrine reports pulses in microseconds, not hardware clocks! + */ + +#define MAX_DUR_FOR_LOW_RSSI 4 + +/** + * Cascade has issue with reported duration especially when there is a + * crossover of chirp from one segment to another. It may report a value + * of duration that is well below 50us for a valid FCC type 5 chirping + * pulse. For now changing minimum duration as a work around. This will + * affect all chips but since we detect chirp with Merlin+, we may be OK + * for now. We need a more robust solution for this. + */ +#define MIN_BIN5_DUR_CAS 25 /* 50 * 1.25*/ +#define MIN_BIN5_DUR_MICROSEC_CAS 20 +#define MIN_BIN5_DUR 63 /* 50 * 1.25*/ +#define MIN_BIN5_DUR_MICROSEC 50 +#define MAYBE_BIN5_DUR 35 /* 28 * 1.25*/ +#define MAYBE_BIN5_DUR_MICROSEC 28 + +/* Conversion is already done using dfs->dur_multiplier */ +#define MAX_BIN5_DUR 145 /* use 145 for osprey */ +#define MAX_BIN5_DUR_MICROSEC 105 + +#define DFS_MARGIN_EQUAL(a, b, margin) ((DFS_DIFF(a, b)) <= margin) +#define DFS_MAX_STAGGERED_BURSTS 3 + +/** + * All filter thresholds in the radar filter tables are effective at a 50% + * channel loading. + */ +#define DFS_CHAN_LOADING_THRESH 50 +#define DFS_EXT_CHAN_LOADING_THRESH 30 +#define DFS_DEFAULT_PRI_MARGIN 6 +#define DFS_DEFAULT_FIXEDPATTERN_PRI_MARGIN 4 + +#define WLAN_DFSQ_LOCK(_dfs) qdf_spin_lock_bh(&(_dfs)->dfs_radarqlock) +#define WLAN_DFSQ_UNLOCK(_dfs) qdf_spin_unlock_bh(&(_dfs)->dfs_radarqlock) +#define WLAN_DFSQ_LOCK_CREATE(_dfs) qdf_spinlock_create( \ + &(_dfs)->dfs_radarqlock) +#define WLAN_DFSQ_LOCK_DESTROY(_dfs) qdf_spinlock_destroy( \ + &(_dfs)->dfs_radarqlock) + +#define WLAN_ARQ_LOCK(_dfs) qdf_spin_lock_bh(&(_dfs)->dfs_arqlock) +#define WLAN_ARQ_UNLOCK(_dfs) qdf_spin_unlock_bh(&(_dfs)->dfs_arqlock) +#define WLAN_ARQ_LOCK_CREATE(_dfs) qdf_spinlock_create(&(_dfs)->dfs_arqlock) +#define WLAN_ARQ_LOCK_DESTROY(_dfs) qdf_spinlock_destroy(&(_dfs)->dfs_arqlock) + +#define WLAN_DFSEVENTQ_LOCK(_dfs) qdf_spin_lock_bh(&(_dfs)->dfs_eventqlock) +#define WLAN_DFSEVENTQ_UNLOCK(_dfs) qdf_spin_unlock_bh( \ + &(_dfs)->dfs_eventqlock) +#define WLAN_DFSEVENTQ_LOCK_CREATE(_dfs) qdf_spinlock_create( \ + &(_dfs)->dfs_eventqlock) +#define WLAN_DFSEVENTQ_LOCK_DESTROY(_dfs) qdf_spinlock_destroy( \ + &(_dfs)->dfs_eventqlock) + +#define WLAN_DFSNOL_LOCK(_dfs) qdf_spin_lock_bh(&(_dfs)->dfs_nol_lock) +#define WLAN_DFSNOL_UNLOCK(_dfs) qdf_spin_unlock_bh(&(_dfs)->dfs_nol_lock) +#define WLAN_DFSNOL_LOCK_CREATE(_dfs) qdf_spinlock_create( \ + &(_dfs)->dfs_nol_lock) +#define WLAN_DFSNOL_LOCK_DESTROY(_dfs) qdf_spinlock_destroy( \ + &(_dfs)->dfs_nol_lock) + +#define PRECAC_LIST_LOCK(_dfs) qdf_spin_lock_irqsave( \ + &(_dfs)->dfs_precac_lock) +#define PRECAC_LIST_UNLOCK(_dfs) qdf_spin_unlock_irqrestore( \ + &(_dfs)->dfs_precac_lock) +#define PRECAC_LIST_LOCK_CREATE(_dfs) qdf_spinlock_create( \ + &(_dfs)->dfs_precac_lock) +#define PRECAC_LIST_LOCK_DESTROY(_dfs) qdf_spinlock_destroy( \ + &(_dfs)->dfs_precac_lock) + +/* Mask for time stamp from descriptor */ +#define DFS_TSMASK 0xFFFFFFFF +/* Shift for time stamp from descriptor */ +#define DFS_TSSHIFT 32 +/* 64 bit TSF wrap value */ +#define DFS_TSF_WRAP 0xFFFFFFFFFFFFFFFFULL +/* TS mask for 64 bit value */ +#define DFS_64BIT_TSFMASK 0x0000000000007FFFULL + +#define DFS_AR_RADAR_RSSI_THR 5 /* in dB */ +#define DFS_AR_RADAR_RESET_INT 1 /* in secs */ +#define DFS_AR_RADAR_MAX_HISTORY 500 +#define DFS_AR_REGION_WIDTH 128 +#define DFS_AR_RSSI_THRESH_STRONG_PKTS 17 /* in dB */ +#define DFS_AR_RSSI_DOUBLE_THRESHOLD 15 /* in dB */ +#define DFS_AR_MAX_NUM_ACK_REGIONS 9 +#define DFS_AR_ACK_DETECT_PAR_THRESH 20 +#define DFS_AR_PKT_COUNT_THRESH 20 + +#define DFS_MAX_DL_SIZE 64 +#define DFS_MAX_DL_MASK 0x3F + +#define DFS_NOL_TIME DFS_NOL_TIMEOUT_US +/* 30 minutes in usecs */ + +#define DFS_WAIT_TIME (60*1000000) /* 1 minute in usecs */ + +#define DFS_DISABLE_TIME (3*60*1000000) /* 3 minutes in usecs */ + +#define DFS_MAX_B5_SIZE 128 +#define DFS_MAX_B5_MASK 0x0000007F /* 128 */ + +/* Max number of overlapping filters */ +#define DFS_MAX_RADAR_OVERLAP 16 + +/* Max number of dfs events which can be q'd */ +#define DFS_MAX_EVENTS 1024 + +#define DFS_RADAR_EN 0x80000000 /* Radar detect is capable */ +#define DFS_AR_EN 0x40000000 /* AR detect is capable */ +/* Radar detect in second segment is capable */ +#define DFS_SECOND_SEGMENT_RADAR_EN 0x20000000 +#define DFS_MAX_RSSI_VALUE 0x7fffffff /* Max rssi value */ + +#define DFS_BIN_MAX_PULSES 60 /* max num of pulses in a burst */ +#define DFS_BIN5_PRI_LOWER_LIMIT 990 /* us */ + +/** + * To cover the single pusle burst case, change from 2010 us to + * 2010000 us. + */ + +/** + * This is reverted back to 2010 as larger value causes false + * bin5 detect (EV76432, EV76320) + */ +#define DFS_BIN5_PRI_HIGHER_LIMIT 2010 /* us */ + +#define DFS_BIN5_WIDTH_MARGIN 4 /* us */ +#define DFS_BIN5_RSSI_MARGIN 5 /* dBm */ + +/** + * Following threshold is not specified but should be + * okay statistically. + */ +#define DFS_BIN5_BRI_LOWER_LIMIT 300000 /* us */ +#define DFS_BIN5_BRI_UPPER_LIMIT 12000000 /* us */ + +/* Max number of pulses kept in buffer */ +#define DFS_MAX_PULSE_BUFFER_SIZE 1024 +#define DFS_MAX_PULSE_BUFFER_MASK 0x3ff + +#define DFS_FAST_CLOCK_MULTIPLIER (800/11) +#define DFS_NO_FAST_CLOCK_MULTIPLIER (80) +#define DFS_BIG_SIDX 10000 + +/* Min value of valid psidx diff */ +#define DFS_MIN_PSIDX_DIFF 4 +/* Max value of valid psidx diff */ +#define DFS_MAX_PSIDX_DIFF 16 + +/** + * Software use: channel interference used for as AR as well as RADAR + * interference detection. + */ +#define CHANNEL_INTERFERENCE 0x01 + +#define CHANNEL_2GHZ 0x00080 /* 2 GHz spectrum channel. */ +#define CHANNEL_OFDM 0x00040 /* OFDM channel */ +#define CHANNEL_TURBO 0x00010 /* Turbo Channel */ +#define CHANNEL_108G (CHANNEL_2GHZ|CHANNEL_OFDM|CHANNEL_TURBO) + +/* qdf_packed - denotes structure is packed. */ +#define qdf_packed __qdf_packed + +#define SEG_ID_PRIMARY 0 +#define SEG_ID_SECONDARY 1 + +/* MIN and MAX width for different regions */ +#define REG0_MIN_WIDTH 33 +#define REG0_MAX_WIDTH 38 +#define REG1_MIN_WIDTH 39 +#define REG1_MAX_WIDTH 44 +#define REG2_MIN_WIDTH 53 +#define REG2_MAX_WIDTH 58 +#define REG3_MIN_WIDTH 126 +#define REG3_MAX_WIDTH 140 +#define REG4_MIN_WIDTH 141 +#define REG4_MAX_WIDTH 160 +#define REG5_MIN_WIDTH 189 +#define REG5_MAX_WIDTH 210 +#define REG6_MIN_WIDTH 360 +#define REG6_MAX_WIDTH 380 +#define REG7_MIN_WIDTH 257 +#define REG7_MAX_WIDTH 270 +#define REG8_MIN_WIDTH 295 +#define REG8_MAX_WIDTH 302 + +#define OVER_SAMPLING_FREQ 44000 +#define SAMPLING_FREQ 40000 +#define HUNDRED 100 +#define NUM_BINS 128 +#define THOUSAND 1000 + +/* Check if the dfs current channel is 5.8GHz */ +#define DFS_CURCHAN_IS_58GHz(freq) \ + ((((freq) >= 5745) && ((freq) <= 5865)) ? true : false) + +/* ETSI11_WORLD regdmn pair id */ +#define ETSI11_WORLD_REGDMN_PAIR_ID 0x26 +#define ETSI12_WORLD_REGDMN_PAIR_ID 0x28 +#define ETSI13_WORLD_REGDMN_PAIR_ID 0x27 +#define ETSI14_WORLD_REGDMN_PAIR_ID 0x29 + +/* Array offset to ETSI legacy pulse */ +#define ETSI_LEGACY_PULSE_ARR_OFFSET 2 + +#define DFS_NOL_ADD_CHAN_LOCKED(dfs, freq, timeout) \ + do { \ + WLAN_DFSNOL_LOCK(dfs); \ + dfs_nol_addchan(dfs, freq, timeout); \ + WLAN_DFSNOL_UNLOCK(dfs); \ + } while (0) + +#define DFS_NOL_DELETE_CHAN_LOCKED(dfs, freq, chwidth) \ + do { \ + WLAN_DFSNOL_LOCK(dfs); \ + dfs_nol_delete(dfs, freq, chwidth); \ + WLAN_DFSNOL_UNLOCK(dfs); \ + } while (0) + +#define DFS_GET_NOL_LOCKED(dfs, dfs_nol, nchan) \ + do { \ + WLAN_DFSNOL_LOCK(dfs); \ + dfs_get_nol(dfs, dfs_nol, nchan); \ + WLAN_DFSNOL_UNLOCK(dfs); \ + } while (0) + +#define DFS_PRINT_NOL_LOCKED(dfs) \ + do { \ + WLAN_DFSNOL_LOCK(dfs); \ + dfs_print_nol(dfs); \ + WLAN_DFSNOL_UNLOCK(dfs); \ + } while (0) + +#define DFS_NOL_FREE_LIST_LOCKED(dfs) \ + do { \ + WLAN_DFSNOL_LOCK(dfs); \ + dfs_nol_free_list(dfs); \ + WLAN_DFSNOL_UNLOCK(dfs); \ + } while (0) + +/* Host sends the average parameters of the radar pulses and starts the status + * wait timer with this timeout. + */ +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) && defined(HOST_DFS_SPOOF_TEST) +#define HOST_DFS_STATUS_WAIT_TIMER_MS 200 +#endif + +/** + * struct dfs_pulseparams - DFS pulse param structure. + * @p_time: Time for start of pulse in usecs. + * @p_dur: Duration of pulse in usecs. + * @p_rssi: RSSI of pulse. + * @p_seg_id: Segment id. + * @p_sidx: Sidx value. + * @p_delta_peak: Delta peak value. + * @p_psidx_diff: The difference in the FFT peak index between the short FFT + * and the first long FFT. + * @p_seq_num: Sequence number. + */ +struct dfs_pulseparams { + uint64_t p_time; + uint8_t p_dur; + uint8_t p_rssi; + uint8_t p_seg_id; + int16_t p_sidx; + int8_t p_delta_peak; + int16_t p_psidx_diff; + uint32_t p_seq_num; +} qdf_packed; + +/** + * struct dfs_pulseline - Pulseline structure. + * @pl_elems[]: array of pulses in delay line. + * @pl_firstelem: Index of the first element. + * @pl_lastelem: Index of the last element. + * @pl_numelems: Number of elements in the delay line. + */ +struct dfs_pulseline { + struct dfs_pulseparams pl_elems[DFS_MAX_PULSE_BUFFER_SIZE]; + uint32_t pl_firstelem; + uint32_t pl_lastelem; + uint32_t pl_numelems; +} qdf_packed; + +#define DFS_EVENT_CHECKCHIRP 0x01 /* Whether to check the chirp flag */ +#define DFS_EVENT_HW_CHIRP 0x02 /* hardware chirp */ +#define DFS_EVENT_SW_CHIRP 0x04 /* software chirp */ +/* Whether the event contains valid psidx diff value*/ +#define DFS_EVENT_VALID_PSIDX_DIFF 0x08 + +/* Use this only if the event has CHECKCHIRP set. */ +#define DFS_EVENT_ISCHIRP(e) \ + ((e)->re_flags & (DFS_EVENT_HW_CHIRP | DFS_EVENT_SW_CHIRP)) + +/** + * Check if the given event is to be rejected as not possibly + * a chirp. This means: + * (a) it's a hardware or software checked chirp, and + * (b) the HW/SW chirp bits are both 0. + */ +#define DFS_EVENT_NOTCHIRP(e) \ + (((e)->re_flags & (DFS_EVENT_CHECKCHIRP)) && (!DFS_EVENT_ISCHIRP((e)))) + +/** + * struct dfs_event - DFS event structure. + * @re_full_ts: 64-bit full timestamp from interrupt time. + * @re_ts: Original 15 bit recv timestamp. + * @re_rssi: Rssi of radar event. + * @re_dur: Duration of radar pulse. + * @re_chanindex: Channel of event. + * @re_flags: Event flags. + * @re_freq: Centre frequency of event, KHz. + * @re_freq_lo: Lower bounds of frequency, KHz. + * @re_freq_hi: Upper bounds of frequency, KHz. + * @re_seg_id: HT80_80/HT160 use. + * @re_sidx: Seg index. + * @re_freq_offset_khz: Freq offset in KHz + * @re_peak_mag: Peak mag. + * @re_total_gain: Total gain. + * @re_mb_gain: Mb gain. + * @re_relpwr_db: Relpower in db. + * @re_delta_diff: Delta diff. + * @re_delta_peak: Delta peak. + * @re_psidx_diff: Psidx diff. + * @re_list: List of radar events. + */ +struct dfs_event { + uint64_t re_full_ts; + uint32_t re_ts; + uint8_t re_rssi; + uint8_t re_dur; + uint8_t re_chanindex; + uint8_t re_flags; + uint32_t re_freq; + uint32_t re_freq_lo; + uint32_t re_freq_hi; + uint8_t re_seg_id; + int re_sidx; + u_int re_freq_offset_khz; + int re_peak_mag; + int re_total_gain; + int re_mb_gain; + int re_relpwr_db; + uint8_t re_delta_diff; + int8_t re_delta_peak; + int16_t re_psidx_diff; + + STAILQ_ENTRY(dfs_event) re_list; +} qdf_packed; + +#define DFS_AR_MAX_ACK_RADAR_DUR 511 +#define DFS_AR_MAX_NUM_PEAKS 3 +#define DFS_AR_ARQ_SIZE 2048 /* 8K AR events for buffer size */ +#define DFS_AR_ARQ_SEQSIZE 2049 /* Sequence counter wrap for AR */ + +#define DFS_RADARQ_SIZE 512 /* 1K radar events for buffer size */ +#define DFS_RADARQ_SEQSIZE 513 /* Sequence counter wrap for radar */ +/* Number of radar channels we keep state for */ +#define DFS_NUM_RADAR_STATES 64 +/* Max number radar filters for each type */ +#define DFS_MAX_NUM_RADAR_FILTERS 10 +/* Number of different radar types */ +#define DFS_MAX_RADAR_TYPES 32 +/* Number of filter index table rows */ +#define DFS_NUM_FT_IDX_TBL_ROWS 256 + +/* RADAR filter pattern type 1*/ +#define WLAN_DFS_RF_PATTERN_TYPE_1 1 + +/** + * struct dfs_ar_state - DFS AR state structure. + * @ar_prevwidth: Previous width. + * @ar_phyerrcount[]: Phy error count. + * @ar_acksum: Acksum. + * @ar_packetthreshold: Thresh to determine traffic load. + * @ar_parthreshold: Thresh to determine peak. + * @ar_radarrssi: Rssi threshold for AR event. + * @ar_prevtimestamp: Prev time stamp. + * @ar_peaklist[]: Peak list. + */ +struct dfs_ar_state { + uint32_t ar_prevwidth; + uint32_t ar_phyerrcount[DFS_AR_MAX_ACK_RADAR_DUR]; + uint32_t ar_acksum; + uint32_t ar_packetthreshold; + uint32_t ar_parthreshold; + uint32_t ar_radarrssi; + uint16_t ar_prevtimestamp; + uint16_t ar_peaklist[DFS_AR_MAX_NUM_PEAKS]; +}; + +/** + * struct dfs_delayelem - Delay Element. + * @de_time: Current "filter" time for start of pulse in usecs. + * @de_dur: Duration of pulse in usecs. + * @de_rssi: Rssi of pulse in dB. + * @de_ts: Time stamp for this delay element. + * @de_seg_id: Segment id for HT80_80/HT160 use. + * @de_sidx: Sidx value. + * @de_delta_peak: Delta peak. + * @de_psidx_diff: Psidx diff. + * @de_seq_num: Sequence number. + */ +struct dfs_delayelem { + uint32_t de_time; + uint8_t de_dur; + uint8_t de_rssi; + uint64_t de_ts; + uint8_t de_seg_id; + int16_t de_sidx; + int8_t de_delta_peak; + int16_t de_psidx_diff; + uint32_t de_seq_num; +} qdf_packed; + +/** + * struct dfs_delayline - DFS Delay Line. + * @dl_elems[]: Array of pulses in delay line. + * @dl_last_ts: Last timestamp the delay line was used (in usecs). + * @dl_firstelem: Index of the first element. + * @dl_lastelem: Index of the last element. + * @dl_numelems: Number of elements in the delay line. + * The following is to handle fractional PRI pulses that can cause false + * detection. + * @dl_seq_num_start: Sequence number of first pulse that was part of + * threshold match. + * @dl_seq_num_stop: Sequence number of last pulse that was part of threshold + * match. + * The following is required because the first pulse may or may not be in the + * delay line but we will find it iin the pulse line using dl_seq_num_second's + * diff_ts value. + * @dl_seq_num_second: Sequence number of second pulse that was part of + * threshold match. + * @dl_search_pri: We need final search PRI to identify possible fractional + * PRI issue. + * @dl_min_sidx: Minimum sidx value of pulses used to match thershold. + * Used for sidx spread check. + * @dl_max_sidx: Maximum sidx value of pulses used to match thershold. + * Used for sidx spread check. + * @dl_delta_peak_match_count: Number of pulse in the delay line that had valid + * delta peak value. + * @dl_psidx_diff_match_count: Number of pulse in the delay line that had valid + * psidx diff value. + */ +struct dfs_delayline { + struct dfs_delayelem dl_elems[DFS_MAX_DL_SIZE]; + uint64_t dl_last_ts; + uint32_t dl_firstelem; + uint32_t dl_lastelem; + uint32_t dl_numelems; + uint32_t dl_seq_num_start; + uint32_t dl_seq_num_stop; + uint32_t dl_seq_num_second; + uint32_t dl_search_pri; + int16_t dl_min_sidx; + int8_t dl_max_sidx; + uint8_t dl_delta_peak_match_count; + uint8_t dl_psidx_diff_match_count; +} qdf_packed; + +/** + * struct dfs_filter - Dfs filter. + * @rf_dl: Delay line of pulses for this filter. + * @rf_numpulses: Number of pulses in the filter. + * @rf_minpri: Min pri to be considered for this filter. + * @rf_maxpri: Max pri to be considered for this filter. + * @rf_threshold: Match filter output threshold for radar detect. + * @rf_filterlen: Length (in usecs) of the filter. + * @rf_patterntype: Fixed or variable pattern type. + * @rf_fixed_pri_radar_pulse: indicates if it is a fixed pri pulse. + * @rf_mindur: Min duration for this radar filter. + * @rf_maxdur: Max duration for this radar filter. + * @rf_ignore_pri_window: Ignore pri window. + * @rf_pulseid: Unique ID corresponding to the original filter ID. + * To reduce false detection, look at frequency spread. For now we will use + * sidx spread. But for HT160 frequency spread will be a better measure. + * @rf_sidx_spread: Maximum SIDX value spread in a matched sequence + * excluding FCC Bin 5. + * @rf_check_delta_peak: Minimum allowed delta_peak value for a pulse to be + * considetred for this filter's match. + */ +struct dfs_filter { + struct dfs_delayline rf_dl; + uint32_t rf_numpulses; + uint32_t rf_minpri; + uint32_t rf_maxpri; + uint32_t rf_threshold; + uint32_t rf_filterlen; + uint32_t rf_patterntype; + uint32_t rf_fixed_pri_radar_pulse; + uint32_t rf_mindur; + uint32_t rf_maxdur; + uint32_t rf_ignore_pri_window; + uint32_t rf_pulseid; + uint16_t rf_sidx_spread; + int8_t rf_check_delta_peak; +} qdf_packed; + +/** + * struct dfs_filtertype - Structure of DFS Filter type. + * @ft_filters[]: Array of ptrs storing addresses for struct of dfs_filter. + * @ft_filterdur: Duration of pulse which specifies filter type. + * @ft_numfilters: Num filters of this type. + * @ft_last_ts: Last timestamp this filtertype was used (in usecs). + * @ft_mindur: Min pulse duration to be considered for this filter type. + * @ft_maxdur: Max pulse duration to be considered for this filter type. + * @ft_rssithresh: Min rssi to be considered for this filter type. + * @ft_numpulses: Num pulses in each filter of this type. + * @ft_patterntype: Fixed or variable pattern type. + * @ft_minpri: Min pri to be considered for this type. + * @ft_rssimargin: Rssi threshold margin. In Turbo Mode HW reports rssi 3dB + * lower than in non TURBO mode. This will offset that diff. + */ +struct dfs_filtertype { + struct dfs_filter *ft_filters[DFS_MAX_NUM_RADAR_FILTERS]; + uint32_t ft_filterdur; + uint32_t ft_numfilters; + uint64_t ft_last_ts; + uint32_t ft_mindur; + uint32_t ft_maxdur; + uint32_t ft_rssithresh; + uint32_t ft_numpulses; + uint32_t ft_patterntype; + uint32_t ft_minpri; + uint32_t ft_rssimargin; +}; + +/** + * struct dfs_channel - Channel structure for dfs component. + * @dfs_ch_freq: Frequency in Mhz. + * @dfs_ch_flags: Channel flags. + * @dfs_ch_flagext: Extended channel flags. + * @dfs_ch_ieee: IEEE channel number. + * @dfs_ch_vhtop_ch_freq_seg1: Channel Center frequency. + * @dfs_ch_vhtop_ch_freq_seg2: Channel Center frequency applicable for 80+80MHz + * mode of operation. + */ +struct dfs_channel { + uint16_t dfs_ch_freq; + uint64_t dfs_ch_flags; + uint16_t dfs_ch_flagext; + uint8_t dfs_ch_ieee; + uint8_t dfs_ch_vhtop_ch_freq_seg1; + uint8_t dfs_ch_vhtop_ch_freq_seg2; +}; + +/** + * struct dfs_state - DFS state. + * @rs_chan: Channel info. + * @rs_chanindex: Channel index in radar structure. + * @rs_numradarevents: Number of radar events. + * @rs_param: Phy param. + */ +struct dfs_state { + struct dfs_channel rs_chan; + uint8_t rs_chanindex; + uint32_t rs_numradarevents; + struct wlan_dfs_phyerr_param rs_param; +}; + +#define DFS_NOL_TIMEOUT_S (30*60) /* 30 minutes in seconds */ +#define DFS_NOL_TIMEOUT_MS (DFS_NOL_TIMEOUT_S * 1000) +#define DFS_NOL_TIMEOUT_US (DFS_NOL_TIMEOUT_MS * 1000) + +/** + * struct dfs_nolelem - DFS NOL element. + * @nol_dfs Back pointer to dfs object. + * @nol_freq: Centre frequency. + * @nol_chwidth: Event width (MHz). + * @nol_start_ticks: NOL start time in OS ticks. + * @nol_timeout_ms: NOL timeout value in msec. + * @nol_timer: Per element NOL timer. + * @nol_next: Next element pointer. + */ +struct dfs_nolelem { + TAILQ_ENTRY(dfs_nolelem) nolelem_list; + struct wlan_dfs *nol_dfs; + uint32_t nol_freq; + uint32_t nol_chwidth; + unsigned long nol_start_ticks; + uint32_t nol_timeout_ms; + qdf_timer_t nol_timer; + struct dfs_nolelem *nol_next; +} qdf_packed; + + +/** + * struct dfs_info - DFS Info. + * @rn_ftindex: Number of different types of radars. + * @rn_lastfull_ts: Last 64 bit timstamp from recv interrupt. + * @rn_last_ts: last 15 bit ts from recv descriptor. + * @rn_last_unique_ts: last unique 32 bit ts from recv descriptor. + * @rn_ts_prefix: Prefix to prepend to 15 bit recv ts. + * @rn_numbin5radars: Number of bin5 radar pulses to search for. + * @rn_fastdivGCval: Value of fast diversity gc limit from init file. + * @rn_minrssithresh: Min rssi for all radar types. + * @rn_maxpulsedur: Max pulse width in TSF ticks. + * @dfs_ext_chan_busy: Ext chan busy. + * @ext_chan_busy_ts: Ext chan busy time. + * @dfs_bin5_chirp_ts: Ext bin5 chrip time. + * @dfs_last_bin5_dur: Last bin5 during. + */ +struct dfs_info { + uint32_t rn_ftindex; + uint64_t rn_lastfull_ts; + uint16_t rn_last_ts; + uint32_t rn_last_unique_ts; + uint64_t rn_ts_prefix; + uint32_t rn_numbin5radars; + uint32_t rn_fastdivGCval; + int32_t rn_minrssithresh; + uint32_t rn_maxpulsedur; + uint8_t dfs_ext_chan_busy; + uint64_t ext_chan_busy_ts; + uint64_t dfs_bin5_chirp_ts; + uint8_t dfs_last_bin5_dur; +} qdf_packed; + +/** + * struct dfs_bin5elem - BIN5 elements. + * @be_ts: Timestamp for the bin5 element. + * @be_rssi: Rssi for the bin5 element. + * @be_dur: Duration of bin5 element. + */ +struct dfs_bin5elem { + uint64_t be_ts; + uint32_t be_rssi; + uint32_t be_dur; +}; + +/** + * struct dfs_bin5radars - BIN5 radars. + * @br_elems[]: List of bin5 elems that fall within the time window. + * @br_firstelem: Index of the first element. + * @br_lastelem: Index of the last element. + * @br_numelems: Number of elements in the delay line. + * @br_pulse: Original info about bin5 pulse. + */ +struct dfs_bin5radars { + struct dfs_bin5elem br_elems[DFS_MAX_B5_SIZE]; + uint32_t br_firstelem; + uint32_t br_lastelem; + uint32_t br_numelems; + struct dfs_bin5pulse br_pulse; +}; + +/** + * struct dfs_stats - DFS stats. + * @num_radar_detects: Total num. of radar detects. + * @num_seg_two_radar_detects: Total num. of radar detected in secondary segment + * @total_phy_errors: Total PHY errors. + * @owl_phy_errors: OWL PHY errors. + * @pri_phy_errors: Primary channel phy errors. + * @ext_phy_errors: Extension channel phy errors. + * @dc_phy_errors: DC PHY errors. + * @early_ext_phy_errors: Extension channel early radar found error. + * @bwinfo_errors: Bogus bandwidth info received in descriptor. + * @datalen_discards: data length at least three bytes of payload. + * @rssi_discards: RSSI is not accurate. + * @last_reset_tstamp: Last reset timestamp. + */ +struct dfs_stats { + uint32_t num_radar_detects; + uint32_t num_seg_two_radar_detects; + uint32_t total_phy_errors; + uint32_t owl_phy_errors; + uint32_t pri_phy_errors; + uint32_t ext_phy_errors; + uint32_t dc_phy_errors; + uint32_t early_ext_phy_errors; + uint32_t bwinfo_errors; + uint32_t datalen_discards; + uint32_t rssi_discards; + uint64_t last_reset_tstamp; +}; + +#define DFS_EVENT_LOG_SIZE 256 + +/** + * struct dfs_event_log - DFS event log. + * @ts: 64-bit full timestamp from interrupt time. + * @diff_ts: Diff timestamp. + * @rssi: Rssi of radar event. + * @dur: Duration of radar pulse. + * @is_chirp: Chirp flag. + * @seg_id: HT80_80/HT160 use. + * @sidx: Seg index. + * @freq_offset_khz: Freq offset in KHz + * @peak_mag: Peak mag. + * @total_gain: Total gain. + * @mb_gain: Mb gain. + * @relpwr_db: Relpower in db. + * @delta_diff: Delta diff. + * @delta_peak: Delta peak. + * @psidx_diff: Psidx diff. + */ + +struct dfs_event_log { + uint64_t ts; + uint32_t diff_ts; + uint8_t rssi; + uint8_t dur; + int is_chirp; + uint8_t seg_id; + int sidx; + u_int freq_offset_khz; + int peak_mag; + int total_gain; + int mb_gain; + int relpwr_db; + uint8_t delta_diff; + int8_t delta_peak; + int16_t psidx_diff; +}; + +#define WLAN_DFS_RESET_TIME_S 7 +#define WLAN_DFS_WAIT (60 + WLAN_DFS_RESET_TIME_S) /* 60 seconds */ +#define WLAN_DFS_WAIT_MS ((WLAN_DFS_WAIT) * 1000) /*in MS*/ + +#define WLAN_DFS_WEATHER_CHANNEL_WAIT_MIN 10 /*10 minutes*/ +#define WLAN_DFS_WEATHER_CHANNEL_WAIT_S (WLAN_DFS_WEATHER_CHANNEL_WAIT_MIN * 60) +#define WLAN_DFS_WEATHER_CHANNEL_WAIT_MS \ + ((WLAN_DFS_WEATHER_CHANNEL_WAIT_S) * 1000) /*in MS*/ + +#define WLAN_DFS_WAIT_POLL_PERIOD 2 /* 2 seconds */ +#define WLAN_DFS_WAIT_POLL_PERIOD_MS \ + ((WLAN_DFS_WAIT_POLL_PERIOD) * 1000) /*in MS*/ + +#define DFS_DEBUG_TIMEOUT_S 30 /* debug timeout is 30 seconds */ +#define DFS_DEBUG_TIMEOUT_MS (DFS_DEBUG_TIMEOUT_S * 1000) + +#define RSSI_POSSIBLY_FALSE 50 +#define SEARCH_FFT_REPORT_PEAK_MAG_THRSH 40 + +/** + * struct wlan_dfs - The main dfs structure. + * @dfs_debug_mask: Current debug bitmask. + * @dfs_curchan_radindex: Current channel radar index. + * @dfs_extchan_radindex: Extension channel radar index. + * @dfsdomain: Current DFS domain. + * @dfs_proc_phyerr: Flags for Phy Errs to process. + * @dfs_eventq: Q of free dfs event objects. + * @dfs_eventqlock: Lock for free dfs event list. + * @dfs_radarq: Q of radar events. + * @dfs_radarqlock: Lock for dfs q. + * @dfs_arq: Q of AR events. + * @dfs_arqlock: Lock for AR q. + * @dfs_ar_state: AR state. + * @dfs_radar[]: Per-Channel Radar detector state. + * @dfs_radarf[]: One filter for each radar pulse type. + * @dfs_rinfo: State vars for radar processing. + * @dfs_b5radars: Array of bin5 radar events. + * @dfs_ftindextable: Map of radar durs to filter types. + * @dfs_nol: Non occupancy list for radar. + * @dfs_nol_count: How many items? + * @dfs_defaultparams: Default phy params per radar state. + * @wlan_dfs_stats: DFS related stats. + * @pulses: Pulse history. + * @events: Events structure. + * @wlan_radar_tasksched: Radar task is scheduled. + * @wlan_dfswait: Waiting on channel for radar detect. + * @wlan_dfstest: Test timer in progress. + * @dfs_caps: Object of wlan_dfs_caps structure. + * @wlan_dfstest_ieeechan: IEEE chan num to return to after a dfs mute + * test. + * @wlan_dfs_cac_time: CAC period. + * @wlan_dfstesttime: Time to stay off chan during dfs test. + * @wlan_dfswaittimer: Dfs wait timer. + * @wlan_dfstesttimer: Dfs mute test timer. + * @wlan_dfs_debug_timer: Dfs debug timer. + * @dfs_second_segment_bangradar: Bangaradar on second segment of + * VHT80_80/160. + * @is_radar_found_on_secondary_seg: Radar on second segment. + * @is_radar_during_precac: Radar found during precac. + * @dfs_precac_lock: Lock to protect precac lists. + * @dfs_precac_enable: Enable the precac. + * @dfs_precac_secondary_freq: Second segment freq for precac. + * @dfs_precac_primary_freq: Primary freq. + * @dfs_precac_timer_running: Precac timer running. + * @dfs_defer_precac_channel_change: Defer precac channel change. + * @dfs_pre_cac_timeout_channel_change: Channel change due to precac timeout. + * @wlan_dfs_task_timer: Dfs wait timer. + * @dur_multiplier: Duration multiplier. + * @wlan_dfs_isdfsregdomain: True when AP is in DFS domain + * @wlan_dfs_false_rssi_thres: False RSSI Threshold. + * @wlan_dfs_peak_mag: Peak mag. + * @radar_log[]: Radar log. + * @dfs_event_log_count: Event log count. + * @dfs_event_log_on: Event log on. + * @dfs_phyerr_count: Same as number of PHY radar interrupts. + * @dfs_phyerr_reject_count: When TLV is supported, # of radar events + * ignored after TLV is parsed. + * @dfs_phyerr_queued_count: Number of radar events queued for matching + * the filters. + * @dfs_phyerr_freq_min: Phyerr min freq. + * @dfs_phyerr_freq_max: Phyerr max freq. + * @dfs_phyerr_w53_counter: Phyerr w53 counter. + * @dfs_pri_multiplier: Allow pulse if they are within multiple of + * PRI for the radar type. + * @wlan_dfs_nol_timeout: NOL timeout. + * @update_nol: Update NOL. + * @dfs_seq_num: Sequence number. + * @dfs_nol_event[]: NOL event. + * @dfs_nol_timer: NOL list processing. + * @dfs_nol_free_list: NOL free list. + * @dfs_nol_elem_free_work: The work queue to free an NOL element. + * @dfs_cac_timer: CAC timer. + * @dfs_cac_valid_timer: Ignore CAC when this timer is running. + * @dfs_cac_timeout_override: Overridden cac timeout. + * @dfs_enable: DFS Enable. + * @dfs_cac_timer_running: DFS CAC timer running. + * @dfs_ignore_dfs: Ignore DFS. + * @dfs_ignore_cac: Ignore CAC. + * @dfs_cac_valid: DFS CAC valid. + * @dfs_cac_valid_time: Time for which CAC will be valid and will + * not be re-done. + * @dfs_precac_timer: PRECAC timer. + * @dfs_precac_timeout_override: Overridden precac timeout. + * @dfs_num_precac_freqs: Number of PreCAC VHT80 frequencies. + * @dfs_precac_required_list: PreCAC required list. + * @dfs_precac_done_list: PreCAC done list. + * @dfs_precac_nol_list: PreCAC NOL List. + * @dfs_is_offload_enabled: Set if DFS offload enabled. + * @dfs_use_nol: Use the NOL when radar found(default: TRUE) + * @dfs_nol_lock: Lock to protect nol list. + * @tx_leakage_threshold: Tx leakage threshold for dfs. + * @dfs_use_nol_subchannel_marking: Use subchannel marking logic to add only + * radar affected subchannel instead of all + * bonding channels. + * @dfs_host_wait_timer: The timer that is started from host after + * sending the average radar parameters. + * Before this timeout host expects its dfs + * status from fw. + * @dfs_average_pri: Average pri value of the received radar + * pulses. + * @dfs_average_duration: Average duration of the received radar + * pulses. + * @dfs_average_sidx: Average sidx of the received radar pulses. + * @dfs_is_host_wait_running: Indicates if host dfs status wait timer is + * running. + * @dfs_average_params_sent: Indicates if host has sent the average + * radar parameters. + * @dfs_no_res_from_fw: Indicates no response from fw. + * @dfs_spoof_check_failed: Indicates if the spoof check has failed. + * @dfs_spoof_test_done: Indicates if the sppof test is done. + * @dfs_seg_id: Segment ID of the radar hit channel. + * @dfs_status_timeout_override: Used to change the timeout value of + * dfs_host_wait_timer. + */ +struct wlan_dfs { + uint32_t dfs_debug_mask; + int16_t dfs_curchan_radindex; + int16_t dfs_extchan_radindex; + uint32_t dfsdomain; + uint32_t dfs_proc_phyerr; + + STAILQ_HEAD(, dfs_event) dfs_eventq; + qdf_spinlock_t dfs_eventqlock; + + STAILQ_HEAD(, dfs_event) dfs_radarq; + qdf_spinlock_t dfs_radarqlock; + + STAILQ_HEAD(, dfs_event) dfs_arq; + qdf_spinlock_t dfs_arqlock; + + struct dfs_ar_state dfs_ar_state; + struct dfs_state dfs_radar[DFS_NUM_RADAR_STATES]; + struct dfs_filtertype *dfs_radarf[DFS_MAX_RADAR_TYPES]; + struct dfs_info dfs_rinfo; + struct dfs_bin5radars *dfs_b5radars; + int8_t **dfs_ftindextable; + struct dfs_nolelem *dfs_nol; + int dfs_nol_count; + struct wlan_dfs_phyerr_param dfs_defaultparams; + struct dfs_stats wlan_dfs_stats; + struct dfs_pulseline *pulses; + struct dfs_event *events; + + uint32_t wlan_radar_tasksched:1, + wlan_dfswait:1, + wlan_dfstest:1; + struct wlan_dfs_caps dfs_caps; + uint8_t wlan_dfstest_ieeechan; + uint32_t wlan_dfs_cac_time; + uint32_t wlan_dfstesttime; + qdf_timer_t wlan_dfswaittimer; + qdf_timer_t wlan_dfstesttimer; + qdf_timer_t wlan_dfs_debug_timer; + uint8_t dfs_bangradar; + bool dfs_second_segment_bangradar; + bool is_radar_found_on_secondary_seg; + bool is_radar_during_precac; + qdf_spinlock_t dfs_precac_lock; + bool dfs_precac_enable; + uint8_t dfs_precac_secondary_freq; + uint8_t dfs_precac_primary_freq; + uint8_t dfs_precac_timer_running; + uint8_t dfs_defer_precac_channel_change; + uint8_t dfs_pre_cac_timeout_channel_change:1; + qdf_timer_t wlan_dfs_task_timer; + int dur_multiplier; + uint16_t wlan_dfs_isdfsregdomain; + int wlan_dfs_false_rssi_thres; + int wlan_dfs_peak_mag; + struct dfs_event_log radar_log[DFS_EVENT_LOG_SIZE]; + int dfs_event_log_count; + int dfs_event_log_on; + int dfs_phyerr_count; + int dfs_phyerr_reject_count; + int dfs_phyerr_queued_count; + int dfs_phyerr_freq_min; + int dfs_phyerr_freq_max; + int dfs_phyerr_w53_counter; + int dfs_pri_multiplier; + int wlan_dfs_nol_timeout; + bool update_nol; + uint32_t dfs_seq_num; + int dfs_nol_event[DFS_CHAN_MAX]; + qdf_timer_t dfs_nol_timer; + + TAILQ_HEAD(, dfs_nolelem) dfs_nol_free_list; + qdf_work_t dfs_nol_elem_free_work; + + qdf_timer_t dfs_cac_timer; + qdf_timer_t dfs_cac_valid_timer; + int dfs_cac_timeout_override; + uint8_t dfs_enable:1, + dfs_cac_timer_running:1, + dfs_ignore_dfs:1, + dfs_ignore_cac:1, + dfs_cac_valid:1; + uint32_t dfs_cac_valid_time; + qdf_timer_t dfs_precac_timer; + int dfs_precac_timeout_override; + uint8_t dfs_num_precac_freqs; + + TAILQ_HEAD(, dfs_precac_entry) dfs_precac_required_list; + TAILQ_HEAD(, dfs_precac_entry) dfs_precac_done_list; + TAILQ_HEAD(, dfs_precac_entry) dfs_precac_nol_list; + + struct dfs_channel *dfs_curchan; + struct wlan_objmgr_pdev *dfs_pdev_obj; + bool dfs_is_offload_enabled; + int dfs_use_nol; + qdf_spinlock_t dfs_nol_lock; + uint16_t tx_leakage_threshold; + bool dfs_use_nol_subchannel_marking; +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) && defined(HOST_DFS_SPOOF_TEST) + qdf_timer_t dfs_host_wait_timer; + uint32_t dfs_average_pri; + uint32_t dfs_average_duration; + uint32_t dfs_average_sidx; + uint8_t dfs_is_host_wait_running:1, + dfs_average_params_sent:1, + dfs_no_res_from_fw:1, + dfs_spoof_check_failed:1, + dfs_spoof_test_done:1; + uint8_t dfs_seg_id; + struct dfs_channel dfs_radar_found_chan; + int dfs_status_timeout_override; +#endif +}; + +/** + * struct dfs_soc_priv_obj - dfs private data + * @psoc: pointer to PSOC object information + * @pdev: pointer to PDEV object information + * @dfs_is_phyerr_filter_offload: For some chip like Rome indicates too many + * phyerr packets in a short time, which causes + * OS hang. If this feild is configured as true, + * FW will do the pre-check, filter out some + * kinds of invalid phyerrors and indicate + * radar detection related information to host. + */ +struct dfs_soc_priv_obj { + struct wlan_objmgr_psoc *psoc; + struct wlan_objmgr_pdev *pdev; + bool dfs_is_phyerr_filter_offload; +}; + +/** + * enum DFS debug - This should match the table from if_ath.c. + * @WLAN_DEBUG_DFS: Minimal DFS debug. + * @WLAN_DEBUG_DFS1: Normal DFS debug. + * @WLAN_DEBUG_DFS2: Maximal DFS debug. + * @WLAN_DEBUG_DFS3: Matched filterID display. + * @WLAN_DEBUG_DFS_PHYERR: Phy error parsing. + * @WLAN_DEBUG_DFS_NOL: NOL related entries. + * @WLAN_DEBUG_DFS_PHYERR_SUM: PHY error summary. + * @WLAN_DEBUG_DFS_PHYERR_PKT: PHY error payload. + * @WLAN_DEBUG_DFS_BIN5: BIN5 checks. + * @WLAN_DEBUG_DFS_BIN5_FFT: BIN5 FFT check. + * @WLAN_DEBUG_DFS_BIN5_PULSE: BIN5 pulse check. + * @WLAN_DEBUG_DFS_FALSE_DET: False detection debug related prints. + * @WLAN_DEBUG_DFS_FALSE_DET2: Second level check to confirm poisitive + * detection. + * @WLAN_DEBUG_DFS_RANDOM_CHAN: Random channel selection. + */ +enum { + WLAN_DEBUG_DFS = 0x00000100, + WLAN_DEBUG_DFS1 = 0x00000200, + WLAN_DEBUG_DFS2 = 0x00000400, + WLAN_DEBUG_DFS3 = 0x00000800, + WLAN_DEBUG_DFS_PHYERR = 0x00001000, + WLAN_DEBUG_DFS_NOL = 0x00002000, + WLAN_DEBUG_DFS_PHYERR_SUM = 0x00004000, + WLAN_DEBUG_DFS_PHYERR_PKT = 0x00008000, + WLAN_DEBUG_DFS_BIN5 = 0x00010000, + WLAN_DEBUG_DFS_BIN5_FFT = 0x00020000, + WLAN_DEBUG_DFS_BIN5_PULSE = 0x00040000, + WLAN_DEBUG_DFS_FALSE_DET = 0x00080000, + WLAN_DEBUG_DFS_FALSE_DET2 = 0x00100000, + WLAN_DEBUG_DFS_RANDOM_CHAN = 0x00200000, + WLAN_DEBUG_DFS_MAX = 0x80000000, + WLAN_DEBUG_DFS_ALWAYS = WLAN_DEBUG_DFS_MAX +}; + +/** + * enum host dfs spoof check status. + * @HOST_DFS_CHECK_PASSED: Host indicates RADAR detected and the FW + * confirms it to be spoof radar to host. + * @HOST_DFS_CHECK_FAILED: Host doesn't indicate RADAR detected or spoof + * radar parameters by + * WMI_HOST_DFS_RADAR_FOUND_CMDID doesn't match. + * @HOST_DFS_STATUS_CHECK_HW_RADAR: Host indicates RADAR detected and the + * FW confirms it to be real HW radar to host. + */ +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) && defined(HOST_DFS_SPOOF_TEST) +enum { + HOST_DFS_STATUS_CHECK_PASSED = 0, + HOST_DFS_STATUS_CHECK_FAILED = 1, + HOST_DFS_STATUS_CHECK_HW_RADAR = 2 +}; +#endif + +/** + * struct dfs_phy_err - DFS phy error. + * @fulltsf: 64-bit TSF as read from MAC. + * @is_pri: Detected on primary channel. + * @is_ext: Detected on extension channel. + * @is_dc: Detected at DC. + * @is_early: Early detect. + * @do_check_chirp: Whether to check hw_chirp/sw_chirp. + * @is_hw_chirp: Hardware-detected chirp. + * @is_sw_chirp: Software detected chirp. + * @rs_tstamp: 32 bit TSF from RX descriptor (event). + * @freq: Centre frequency of event - KHz. + * @freq_lo: Lower bounds of frequency - KHz. + * @freq_hi: Upper bounds of frequency - KHz. + * @rssi: Pulse RSSI. + * @dur: Pulse duration, raw (not uS). + * @seg_id: HT80_80/HT160 use. + * @sidx: Seg index. + * @freq_offset_khz: Freq offset in KHz. + * @peak_mag: Peak mag. + * @total_gain: Total gain. + * @mb_gain: Mb gain. + * @relpwr_db: Relpower in DB. + * @pulse_delta_diff: Pulse delta diff. + * @pulse_delta_peak: Pulse delta peak. + * @pulse_psidx_diff: Pulse psidx diff. + * + * Chirp notes! + * + * Pre-Sowl chips don't do FFT reports, so chirp pulses simply show up + * as long duration pulses. + * + * The bin5 checking code would simply look for a chirp pulse of the correct + * duration (within MIN_BIN5_DUR and MAX_BIN5_DUR) and add it to the "chirp" + * pattern. + * + * For Sowl and later, an FFT was done on longer duration frames. If those + * frames looked like a chirp, their duration was adjusted to fall within + * the chirp duration limits. If the pulse failed the chirp test (it had + * no FFT data or the FFT didn't meet the chirping requirements) then the + * pulse duration was adjusted to be greater than MAX_BIN5_DUR, so it + * would always fail chirp detection. + * + * This is pretty horrible. + * + * The eventual goal for chirp handling is thus: + * + * 1)In case someone ever wants to do chirp detection with this code on + * chips that don't support chirp detection, you can still do it based + * on pulse duration. That's your problem to solve. + * + * 2)For chips that do hardware chirp detection or FFT, the "do_check_chirp" + * bit should be set. + * + * 3)Then, either is_hw_chirp or is_sw_chirp is set, indicating that + * the hardware or software post-processing of the chirp event found + * that indeed it was a chirp. + * + * 4)Finally, the bin5 code should just check whether the chirp bits are + * set and behave appropriately, falling back onto the duration checks + * if someone wishes to use this on older hardware (or with disabled + * FFTs, for whatever reason.) + * + * XXX TODO: + * + * 1)add duration in uS and raw duration, so the PHY error parsing + * code is responsible for doing the duration calculation; + * 2)add ts in raw and corrected, so the PHY error parsing + * code is responsible for doing the offsetting, not the radar + * event code. + */ +struct dfs_phy_err { + uint64_t fulltsf; + uint32_t is_pri:1, + is_ext:1, + is_dc:1, + is_early:1, + do_check_chirp:1, + is_hw_chirp:1, + is_sw_chirp:1; + uint32_t rs_tstamp; + uint32_t freq; + uint32_t freq_lo; + uint32_t freq_hi; + uint8_t rssi; + uint8_t dur; + uint8_t seg_id; + int sidx; + u_int freq_offset_khz; + int peak_mag; + int total_gain; + int mb_gain; + int relpwr_db; + uint8_t pulse_delta_diff; + int8_t pulse_delta_peak; + int16_t pulse_psidx_diff; +}; + +/** + * struct rx_radar_status - Parsed radar status + * @raw_tsf: Raw tsf + * @tsf_offset: TSF offset. + * @rssi: RSSI. + * @pulse_duration: Pulse duration. + * @is_chirp: Is chirp. + * @delta_peak: Delta peak. + * @delta_diff: Delta diff. + * @sidx: Starting frequency. + * @freq_offset: Frequency offset. + * @agc_total_gain: AGC total gain. + * @agc_mb_gain: AGC MB gain. + */ +struct rx_radar_status { + uint32_t raw_tsf; + uint32_t tsf_offset; + int rssi; + int pulse_duration; + int is_chirp:1; + int delta_peak; + int delta_diff; + int sidx; + int freq_offset; /* in KHz */ + int agc_total_gain; + int agc_mb_gain; +}; + +/** + * struct rx_search_fft_report - FFT report. + * @total_gain_db: Total gain in Db. + * @base_pwr_db: Base power in Db. + * @fft_chn_idx: FFT channel index. + * @peak_sidx: Peak sidx. + * @relpwr_db: Real power in Db. + * @avgpwr_db: Average power in Db. + * @peak_mag: Peak Mag. + * @num_str_bins_ib: Num dtr BINs IB + * @seg_id: Segment ID + */ +struct rx_search_fft_report { + uint32_t total_gain_db; + uint32_t base_pwr_db; + int fft_chn_idx; + int peak_sidx; + int relpwr_db; + int avgpwr_db; + int peak_mag; + int num_str_bins_ib; + int seg_id; +}; + +/** + * dfs_process_radarevent() - process the radar event generated for a pulse. + * @dfs: Pointer to wlan_dfs structure. + * @chan: Current channel. + * + * There is currently no way to specify that a radar event has occurred on + * a specific channel, so the current methodology is to mark both the pri + * and ext channels as being unavailable. This should be fixed for 802.11ac + * or we'll quickly run out of valid channels to use. + * + * If Radar found, this marks the channel (and the extension channel, if HT40) + * as having seen a radar event. It marks CHAN_INTERFERENCE and will add it to + * the local NOL implementation. This is only done for 'usenol=1', as the other + * two modes don't do radar notification or CAC/CSA/NOL; it just notes there + * was a radar. + */ +void dfs_process_radarevent(struct wlan_dfs *dfs, + struct dfs_channel *chan); + +/** + * dfs_nol_addchan() - Add channel to NOL. + * @dfs: Pointer to wlan_dfs structure. + * @freq: frequency to add to NOL. + * @dfs_nol_timeout: NOL timeout. + */ +void dfs_nol_addchan(struct wlan_dfs *dfs, + uint16_t freq, + uint32_t dfs_nol_timeout); + +/** + * dfs_get_nol() - Get NOL. + * @dfs: Pointer to wlan_dfs structure. + * @dfs_nol: Pointer to dfsreq_nolelem structure to save the channels from NOL. + * @nchan: Number of channels. + */ +void dfs_get_nol(struct wlan_dfs *dfs, + struct dfsreq_nolelem *dfs_nol, + int *nchan); + +/** + * dfs_set_nol() - Set NOL. + * @dfs: Pointer to wlan_dfs structure. + * @dfs_nol: Pointer to dfsreq_nolelem structure. + * @nchan: Number of channels. + */ +void dfs_set_nol(struct wlan_dfs *dfs, + struct dfsreq_nolelem *dfs_nol, + int nchan); + +/** + * dfs_nol_update() - NOL update + * @dfs: Pointer to wlan_dfs structure. + * + * Notify the driver/umac that it should update the channel radar/NOL flags + * based on the current NOL list. + */ +void dfs_nol_update(struct wlan_dfs *dfs); + +/** + * dfs_nol_timer_cleanup() - NOL timer cleanup. + * @dfs: Pointer to wlan_dfs structure. + * + * Cancels the NOL timer and frees the NOL elements. + */ +void dfs_nol_timer_cleanup(struct wlan_dfs *dfs); + +/** + * dfs_nol_timer_free() - Free NOL timer. + * @dfs: Pointer to wlan_dfs structure. + */ +void dfs_nol_timer_free(struct wlan_dfs *dfs); + +/** + * dfs_nol_workqueue_cleanup() - Flushes NOL workqueue. + * @dfs: Pointer to wlan_dfs structure. + * + * Flushes the NOL workqueue. + */ +void dfs_nol_workqueue_cleanup(struct wlan_dfs *dfs); + +/** + * dfs_retain_bin5_burst_pattern() - Retain the BIN5 burst pattern. + * @dfs: Pointer to wlan_dfs structure. + * @diff_ts: Timestamp diff. + * @old_dur: Old duration. + */ +uint8_t dfs_retain_bin5_burst_pattern(struct wlan_dfs *dfs, + uint32_t diff_ts, + uint8_t old_dur); + +/** + * dfs_bin5_check_pulse() - BIN5 check pulse. + * @dfs: Pointer to wlan_dfs structure. + * @re: Pointer to dfs_event structure. + * @br: Pointer to dfs_bin5radars structure. + * + * Reject the pulse if: + * 1) It's outside the RSSI threshold; + * 2) It's outside the pulse duration; + * 3) It's been verified by HW/SW chirp checking + * and neither of those found a chirp. + */ +int dfs_bin5_check_pulse(struct wlan_dfs *dfs, + struct dfs_event *re, + struct dfs_bin5radars *br); + +/** + * dfs_bin5_addpulse() - BIN5 add pulse. + * @dfs: Pointer to wlan_dfs structure. + * @br: Pointer to dfs_bin5radars structure. + * @re: Pointer to dfs_event structure. + * @thists: Timestamp. + */ +int dfs_bin5_addpulse(struct wlan_dfs *dfs, + struct dfs_bin5radars *br, + struct dfs_event *re, + uint64_t thists); + +/** + * dfs_bin5_check() - BIN5 check. + * @dfs: Pointer to wlan_dfs structure. + * + * If the dfs structure is NULL (which should be illegal if everyting is working + * properly, then signify that a bin5 radar was found. + */ +int dfs_bin5_check(struct wlan_dfs *dfs); + +/** + * dfs_check_chirping() - Check chirping. + * @dfs: Pointer to wlan_dfs structure. + * @buf: Phyerr buffer + * @datalen: Phyerr buf length + * @is_ctl: detected on primary channel. + * @is_ext: detected on extension channel. + * @slope: Slope + * @is_dc: DC found + * + * This examines the FFT data contained in the PHY error information to figure + * out whether the pulse is moving across frequencies. + */ +int dfs_check_chirping(struct wlan_dfs *dfs, + void *buf, + uint16_t datalen, + int is_ctl, + int is_ext, + int *slope, + int *is_dc); + +/** + * dfs_get_random_bin5_dur() - Get random BIN5 duration. + * @dfs: Pointer to wlan_dfs structure. + * @tstamp: Timestamp. + * + * Chirping pulses may get cut off at DC and report lower durations. + * This function will compute a suitable random duration for each pulse. + * Duration must be between 50 and 100 us, but remember that in + * wlan_process_phyerr() which calls this function, we are dealing with the + * HW reported duration (unconverted). dfs_process_radarevent() will + * actually convert the duration into the correct value. + * This function doesn't take into account whether the hardware + * is operating in 5GHz fast clock mode or not. + * And this function doesn't take into account whether the hardware + * is peregrine or not. + */ +int dfs_get_random_bin5_dur(struct wlan_dfs *dfs, + uint64_t tstamp); + +/** + * dfs_print_delayline() - Prints delayline. + * @dfs: Pointer to wlan_dfs structure. + * @dl: Pointer to dfs_delayline structure. + */ +void dfs_print_delayline(struct wlan_dfs *dfs, + struct dfs_delayline *dl); + +/** + * dfs_print_nol() - Print NOL elements. + * @dfs: Pointer to wlan_dfs structure. + */ +void dfs_print_nol(struct wlan_dfs *dfs); + +/** + * dfs_print_filter() - Prints the filter. + * @dfs: Pointer to wlan_dfs structure. + * @rf: Pointer to dfs_filter structure. + */ +void dfs_print_filter(struct wlan_dfs *dfs, + struct dfs_filter *rf); + +/** + * dfs_getchanstate() - Get chan state. + * @dfs: Pointer to wlan_dfs structure. + * @index: To save the index of dfs_radar[] + * @ext_chan_flag: Extension channel flag; + */ +struct dfs_state *dfs_getchanstate(struct wlan_dfs *dfs, + uint8_t *index, + int ext_ch_flag); + +/** + * dfs_round() - DFS found. + * @val: Convert durations to TSF ticks. + * + * Return: TSF ticks. + */ +uint32_t dfs_round(int32_t val); + +/** + * dfs_reset_alldelaylines() - Reset alldelaylines. + * @dfs: Pointer to wlan_dfs structure. + */ +#if defined(WLAN_DFS_DIRECT_ATTACH) || defined(WLAN_DFS_PARTIAL_OFFLOAD) +void dfs_reset_alldelaylines(struct wlan_dfs *dfs); +#else +static inline void dfs_reset_alldelaylines(struct wlan_dfs *dfs) +{ +} +#endif + +/** + * dfs_reset_delayline() - Clear only a single delay line. + * @dl: Pointer to dfs_delayline structure. + */ +void dfs_reset_delayline(struct dfs_delayline *dl); + +/** + * dfs_reset_filter_delaylines() - Reset filter delaylines. + * @dft: Pointer to dfs_filtertype structure. + */ +void dfs_reset_filter_delaylines(struct dfs_filtertype *dft); + +/** + * dfs_reset_radarq() - Reset radar queue. + * @dfs: Pointer to wlan_dfs structure. + */ +#if defined(WLAN_DFS_DIRECT_ATTACH) || defined(WLAN_DFS_PARTIAL_OFFLOAD) +void dfs_reset_radarq(struct wlan_dfs *dfs); +#else +static inline void dfs_reset_radarq(struct wlan_dfs *dfs) +{ +} +#endif + +/** + * dfs_add_pulse() - Adds pulse to the queue. + * @dfs: Pointer to wlan_dfs structure. + * @rf: Pointer to dfs_filter structure. + * @re: Pointer to dfs_event structure. + * @deltaT: deltaT value. + * @this_ts: Last time stamp. + */ +void dfs_add_pulse(struct wlan_dfs *dfs, + struct dfs_filter *rf, + struct dfs_event *re, + uint32_t deltaT, + uint64_t this_ts); + +/** + * dfs_bin_check() - BIN check + * @dfs: Pointer to wlan_dfs structure. + * @rf: Pointer to dfs_filter structure. + * @deltaT: deltaT value. + * @width: Width + * @ext_chan_flag: Extension channel flag. + */ +int dfs_bin_check(struct wlan_dfs *dfs, + struct dfs_filter *rf, + uint32_t deltaT, + uint32_t dur, + int ext_chan_flag); + +/** + * dfs_bin_pri_check() - BIN PRI check + * @dfs: Pointer to wlan_dfs structure. + * @rf: Pointer to dfs_filter structure. + * @dl: Pointer to dfs_delayline structure. + * @score: Primary score. + * @refpri: Current "filter" time for start of pulse in usecs. + * @refdur: Duration value. + * @ext_chan_flag: Extension channel flag. + * @fundamentalpri: Highest PRI. + */ +int dfs_bin_pri_check(struct wlan_dfs *dfs, + struct dfs_filter *rf, + struct dfs_delayline *dl, + uint32_t score, + uint32_t refpri, + uint32_t refdur, + int ext_chan_flag, + int fundamentalpri); + +/** + * dfs_staggered_check() - Detection implementation for staggered PRIs. + * @dfs: Pointer to wlan_dfs structure. + * @rf: Pointer to dfs_filter structure. + * @deltaT: Delta of the Timestamp. + * @width: Duration of radar pulse. + * + * Return: 1 on success and 0 on failure. + */ +int dfs_staggered_check(struct wlan_dfs *dfs, + struct dfs_filter *rf, + uint32_t deltaT, + uint32_t width); + +/** + * dfs_get_pri_margin() - Get Primary margin. + * @dfs: Pointer to wlan_dfs structure. + * @is_extchan_detect: Extension channel detect. + * @is_fixed_pattern: Fixed pattern. + * + * For the extension channel, if legacy traffic is present, we see a lot of + * false alarms, so make the PRI margin narrower depending on the busy % for + * the extension channel. + * + * Return: Returns pri_margin. + */ +int dfs_get_pri_margin(struct wlan_dfs *dfs, + int is_extchan_detect, + int is_fixed_pattern); + +/** + * dfs_get_filter_threshold() - Get filter threshold. + * @dfs: Pointer to wlan_dfs structure. + * @rf: Pointer to dfs_filter structure. + * @is_extchan_detect: Extension channel detect. + * + * For the extension channel, if legacy traffic is present, we see a lot of + * false alarms, so make the thresholds higher depending on the busy % for the + * extension channel. + * + * Return: Returns threshold. + */ +int dfs_get_filter_threshold(struct wlan_dfs *dfs, + struct dfs_filter *rf, + int is_extchan_detect); + +/** + * dfs_process_ar_event() - Process the ar event. + * @dfs: Pointer to wlan_dfs structure. + * @chan: Current channel structure. + */ +void dfs_process_ar_event(struct wlan_dfs *dfs, + struct dfs_channel *chan); + +/** + * dfs_reset_ar() - resets the ar state. + * @dfs: pointer to wlan_dfs structure. + */ +void dfs_reset_ar(struct wlan_dfs *dfs); + +/** + * dfs_reset_arq() - resets the ar queue. + * @dfs: pointer to wlan_dfs structure. + */ +void dfs_reset_arq(struct wlan_dfs *dfs); + +/** + * dfs_is_radar_enabled() - check if radar detection is enabled. + * @dfs: Pointer to wlan_dfs structure. + * @ignore_dfs: if 1 then radar detection is disabled.. + */ +#if defined(WLAN_DFS_DIRECT_ATTACH) || defined(WLAN_DFS_PARTIAL_OFFLOAD) +void dfs_is_radar_enabled(struct wlan_dfs *dfs, + int *ignore_dfs); +#else +static inline void dfs_is_radar_enabled(struct wlan_dfs *dfs, + int *ignore_dfs) +{ +} +#endif + +/** + * dfs_process_phyerr_bb_tlv() - Parses the PHY error and populates the + * dfs_phy_err struct. + * @dfs: Pointer to wlan_dfs structure. + * @buf: Phyerr buffer + * @datalen: Phyerr buf len + * @rssi: RSSI + * @ext_rssi: Extension RSSI. + * @rs_tstamp: Time stamp. + * @fulltsf: TSF64. + * @e: Pointer to dfs_phy_err structure. + * + * Return: Returns 1. + */ +int dfs_process_phyerr_bb_tlv(struct wlan_dfs *dfs, + void *buf, + uint16_t datalen, + uint8_t rssi, + uint8_t ext_rssi, + uint32_t rs_tstamp, + uint64_t fulltsf, + struct dfs_phy_err *e); + +/** + * dfs_reset() - DFS reset + * @dfs: Pointer to wlan_dfs structure. + */ +void dfs_reset(struct wlan_dfs *dfs); + +/** + * dfs_radar_enable() - Enables the radar. + * @dfs: Pointer to wlan_dfs structure. + * @no_cac: If no_cac is 0, it cancels the CAC. + */ +#if defined(WLAN_DFS_DIRECT_ATTACH) || defined(WLAN_DFS_PARTIAL_OFFLOAD) +void dfs_radar_enable(struct wlan_dfs *dfs, + int no_cac, uint32_t opmode); +#else +static inline void dfs_radar_enable(struct wlan_dfs *dfs, + int no_cac, uint32_t opmode) +{ +} +#endif + +/** + * dfs_process_phyerr() - Process phyerr. + * @dfs: Pointer to wlan_dfs structure. + * @buf: Phyerr buffer. + * @datalen: phyerr buffer length. + * @r_rssi: RSSI. + * @r_ext_rssi: Extension channel RSSI. + * @r_rs_tstamp: Timestamp. + * @r_fulltsf: TSF64. + */ +#if defined(WLAN_DFS_DIRECT_ATTACH) || defined(WLAN_DFS_PARTIAL_OFFLOAD) +void dfs_process_phyerr(struct wlan_dfs *dfs, + void *buf, + uint16_t datalen, + uint8_t r_rssi, + uint8_t r_ext_rssi, + uint32_t r_rs_tstamp, + uint64_t r_fulltsf); +#else +static inline void dfs_process_phyerr(struct wlan_dfs *dfs, + void *buf, + uint16_t datalen, + uint8_t r_rssi, + uint8_t r_ext_rssi, + uint32_t r_rs_tstamp, + uint64_t r_fulltsf) +{ +} +#endif + +#ifdef QCA_MCL_DFS_SUPPORT +/** + * dfs_process_phyerr_filter_offload() - Process radar event. + * @dfs: Pointer to wlan_dfs structure. + * @wlan_radar_event: Pointer to radar_event_info structure. + * + * Return: None + */ +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) +void dfs_process_phyerr_filter_offload(struct wlan_dfs *dfs, + struct radar_event_info *wlan_radar_event); +#else +static inline void dfs_process_phyerr_filter_offload( + struct wlan_dfs *dfs, + struct radar_event_info *wlan_radar_event) +{ +} +#endif +#endif + +/** + * dfs_get_radars() - Based on the chipset, calls init radar table functions. + * @dfs: Pointer to wlan_dfs structure. + */ +#if defined(WLAN_DFS_DIRECT_ATTACH) || defined(WLAN_DFS_PARTIAL_OFFLOAD) +void dfs_get_radars(struct wlan_dfs *dfs); +#else +static inline void dfs_get_radars(struct wlan_dfs *dfs) +{ +} +#endif + +/** + * dfs_attach() - Wrapper function to allocate memory for wlan_dfs members. + * @dfs: Pointer to wlan_dfs structure. + */ +int dfs_attach(struct wlan_dfs *dfs); + + +/** + * dfs_create_object() - Creates DFS object. + * @dfs: Pointer to wlan_dfs structure. + */ +int dfs_create_object(struct wlan_dfs **dfs); + +/** + * dfs_destroy_object() - Destroys the DFS object. + * @dfs: Pointer to wlan_dfs structure. + */ +void dfs_destroy_object(struct wlan_dfs *dfs); + +/** + * dfs_detach() - Wrapper function to free dfs variables. + * @dfs: Pointer to wlan_dfs structure. + */ +void dfs_detach(struct wlan_dfs *dfs); + +/** + * dfs_cac_valid_reset() - Cancels the dfs_cac_valid_timer timer. + * @dfs: Pointer to wlan_dfs structure. + * @prevchan_ieee: Prevchan number. + * @prevchan_flags: Prevchan flags. + */ +void dfs_cac_valid_reset(struct wlan_dfs *dfs, + uint8_t prevchan_ieee, + uint32_t prevchan_flags); + +/** + * dfs_cac_stop() - Clear the AP CAC timer. + * @dfs: Pointer to wlan_dfs structure. + */ +void dfs_cac_stop(struct wlan_dfs *dfs); + +/** + * dfs_cancel_cac_timer() - Cancels the CAC timer. + * @dfs: Pointer to wlan_dfs structure. + */ +void dfs_cancel_cac_timer(struct wlan_dfs *dfs); + +/** + * dfs_start_cac_timer() - Starts the CAC timer. + * @dfs: Pointer to wlan_dfs structure. + */ +void dfs_start_cac_timer(struct wlan_dfs *dfs); + +/** + * dfs_set_update_nol_flag() - Sets update_nol flag. + * @dfs: Pointer to wlan_dfs structure. + * @val: update_nol flag. + */ +void dfs_set_update_nol_flag(struct wlan_dfs *dfs, + bool val); + +/** + * dfs_get_update_nol_flag() - Returns update_nol flag. + * @dfs: Pointer to wlan_dfs structure. + */ +bool dfs_get_update_nol_flag(struct wlan_dfs *dfs); + +/** + * dfs_get_use_nol() - Get usenol. + * @dfs: Pointer to wlan_dfs structure. + */ +int dfs_get_use_nol(struct wlan_dfs *dfs); + +/** + * dfs_get_nol_timeout() - Get NOL timeout. + * @dfs: Pointer to wlan_dfs structure. + */ +int dfs_get_nol_timeout(struct wlan_dfs *dfs); + +/** + * dfs_is_ap_cac_timer_running() - Returns the dfs cac timer. + * @dfs: Pointer to wlan_dfs structure. + */ +int dfs_is_ap_cac_timer_running(struct wlan_dfs *dfs); + +/** + * dfs_control()- Used to process ioctls related to DFS. + * @dfs: Pointer to wlan_dfs structure. + * @id: Command type. + * @indata: Input buffer. + * @insize: size of the input buffer. + * @outdata: A buffer for the results. + * @outsize: Size of the output buffer. + */ +int dfs_control(struct wlan_dfs *dfs, + u_int id, + void *indata, + uint32_t insize, + void *outdata, + uint32_t *outsize); + +/** + * dfs_getnol() - Wrapper function for dfs_get_nol() + * @dfs: Pointer to wlan_dfs structure. + * @dfs_nolinfo: Pointer to dfsreq_nolinfo structure. + */ +void dfs_getnol(struct wlan_dfs *dfs, + void *dfs_nolinfo); + +/** + * dfs_get_override_cac_timeout() - Get override CAC timeout value. + * @dfs: Pointer to DFS object. + * @cac_timeout: Pointer to save the CAC timeout value. + */ +int dfs_get_override_cac_timeout(struct wlan_dfs *dfs, + int *cac_timeout); + +/** + * dfs_override_cac_timeout() - Override the default CAC timeout. + * @dfs: Pointer to DFS object. + * @cac_timeout: CAC timeout value. + */ +int dfs_override_cac_timeout(struct wlan_dfs *dfs, + int cac_timeout); + +/** + * dfs_clear_nolhistory() - unmarks WLAN_CHAN_CLR_HISTORY_RADAR flag for + * all the channels in dfs_ch_channels. + * @dfs: Pointer to wlan_dfs structure. + */ +void dfs_clear_nolhistory(struct wlan_dfs *dfs); + +/** + * ol_if_dfs_configure() - Initialize the RADAR table for offload chipsets. + * @dfs: Pointer to wlan_dfs structure. + * + * This is called during a channel change or regulatory domain + * reset; in order to fetch the new configuration information and + * program the DFS pattern matching module. + * + * Eventually this should be split into "fetch config" (which can + * happen at regdomain selection time) and "configure DFS" (which + * can happen at channel config time) so as to minimise overheads + * when doing channel changes. However, this'll do for now. + */ +void ol_if_dfs_configure(struct wlan_dfs *dfs); + +/** + * dfs_init_radar_filters() - Init Radar filters. + * @dfs: Pointer to wlan_dfs structure. + * @radar_info: Pointer to wlan_dfs_radar_tab_info structure. + */ +int dfs_init_radar_filters(struct wlan_dfs *dfs, + struct wlan_dfs_radar_tab_info *radar_info); + +/** + * dfs_get_radars_for_ar5212() - Initialize radar table for AR5212 chipsets. + * @dfs: Pointer to wlan_dfs structure. + */ +void dfs_get_radars_for_ar5212(struct wlan_dfs *dfs); + +/** + * dfs_get_radars_for_ar5416() - Initialize radar table for AR5416 chipsets. + * @dfs: Pointer to wlan_dfs structure. + */ +void dfs_get_radars_for_ar5416(struct wlan_dfs *dfs); + +/** + * dfs_get_radars_for_ar9300() - Initialize radar table for AR9300 chipsets. + * @dfs: Pointer to wlan_dfs structure. + */ +void dfs_get_radars_for_ar9300(struct wlan_dfs *dfs); + +/** + * dfs_print_filters() - Print the filters. + * @dfs: Pointer to wlan_dfs structure. + */ +void dfs_print_filters(struct wlan_dfs *dfs); + +/** + * dfs_clear_stats() - Clear stats. + * @dfs: Pointer to wlan_dfs structure. + */ +void dfs_clear_stats(struct wlan_dfs *dfs); + +/** + * dfs_radar_disable() - Disables the radar. + * @dfs: Pointer to wlan_dfs structure. + */ +#if defined(WLAN_DFS_DIRECT_ATTACH) || defined(WLAN_DFS_PARTIAL_OFFLOAD) +int dfs_radar_disable(struct wlan_dfs *dfs); +#else +static inline int dfs_radar_disable(struct wlan_dfs *dfs) +{ + return 0; +} +#endif + +/** + * dfs_get_debug_info() - Get debug info. + * @dfs: Pointer to wlan_dfs structure. + * @data: void pointer to the data to save dfs_proc_phyerr. + */ +int dfs_get_debug_info(struct wlan_dfs *dfs, + void *data); + +/** + * dfs_cac_timer_init() - Initialize cac timers. + * @dfs: Pointer to wlan_dfs structure. + */ +void dfs_cac_timer_init(struct wlan_dfs *dfs); + +/** + * dfs_cac_attach() - Initialize dfs cac variables. + * @dfs: Pointer to wlan_dfs structure. + */ +void dfs_cac_attach(struct wlan_dfs *dfs); + +/** + * dfs_cac_timer_reset() - Cancel dfs cac timers. + * @dfs: Pointer to wlan_dfs structure. + */ +void dfs_cac_timer_reset(struct wlan_dfs *dfs); + +/** + * dfs_cac_timer_free() - Free dfs cac timers. + * @dfs: Pointer to wlan_dfs structure. + */ +void dfs_cac_timer_free(struct wlan_dfs *dfs); + +/** + * dfs_nol_timer_init() - Initialize NOL timers. + * @dfs: Pointer to wlan_dfs structure. + */ +void dfs_nol_timer_init(struct wlan_dfs *dfs); + +/** + * dfs_nol_attach() - Initialize NOL variables. + * @dfs: Pointer to wlan_dfs structure. + */ +void dfs_nol_attach(struct wlan_dfs *dfs); + +/** + * dfs_nol_detach() - Detach NOL variables. + * @dfs: Pointer to wlan_dfs structure. + */ +void dfs_nol_detach(struct wlan_dfs *dfs); + +/** + * dfs_print_nolhistory() - Print NOL history. + * @dfs: Pointer to wlan_dfs structure. + */ +void dfs_print_nolhistory(struct wlan_dfs *dfs); + +/** + * dfs_stacac_stop() - Clear the STA CAC timer. + * @dfs: Pointer to wlan_dfs structure. + */ +void dfs_stacac_stop(struct wlan_dfs *dfs); + +/** + * dfs_find_precac_secondary_vht80_chan() - Get a VHT80 channel with the + * precac primary center frequency. + * @dfs: Pointer to wlan_dfs structure. + * @chan: Pointer to dfs channel structure. + */ +void dfs_find_precac_secondary_vht80_chan(struct wlan_dfs *dfs, + struct dfs_channel *chan); + +/** + * dfs_phyerr_param_copy() - Function to copy src buf to dest buf. + * @dst: dest buf. + * @src: src buf. + */ +void dfs_phyerr_param_copy(struct wlan_dfs_phyerr_param *dst, + struct wlan_dfs_phyerr_param *src); + +/** + * dfs_get_thresholds() - Get the threshold value. + * @dfs: Pointer to wlan_dfs structure. + * @param: Pointer to wlan_dfs_phyerr_param structure. + */ +#if defined(WLAN_DFS_DIRECT_ATTACH) || defined(WLAN_DFS_PARTIAL_OFFLOAD) +int dfs_get_thresholds(struct wlan_dfs *dfs, + struct wlan_dfs_phyerr_param *param); +#else +static inline int dfs_get_thresholds(struct wlan_dfs *dfs, + struct wlan_dfs_phyerr_param *param) +{ + return 0; +} +#endif + +/** + * dfs_set_thresholds() - Sets the threshold value. + * @dfs: Pointer to wlan_dfs structure. + * @threshtype: DFS ioctl param type. + * @value: Threshold value. + */ +#if defined(WLAN_DFS_DIRECT_ATTACH) || defined(WLAN_DFS_PARTIAL_OFFLOAD) +int dfs_set_thresholds(struct wlan_dfs *dfs, + const uint32_t threshtype, + const uint32_t value); +#else +static inline int dfs_set_thresholds(struct wlan_dfs *dfs, + const uint32_t threshtype, + const uint32_t value) +{ + return 0; +} +#endif + +/** + * dfs_set_current_channel() - Set DFS current channel. + * @dfs: Pointer to wlan_dfs structure. + * @dfs_ch_freq: Frequency in Mhz. + * @dfs_ch_flags: Channel flags. + * @dfs_ch_flagext: Extended channel flags. + * @dfs_ch_ieee: IEEE channel number. + * @dfs_ch_vhtop_ch_freq_seg1: Channel Center frequency1. + * @dfs_ch_vhtop_ch_freq_seg2: Channel Center frequency2. + */ +void dfs_set_current_channel(struct wlan_dfs *dfs, + uint16_t dfs_ch_freq, + uint64_t dfs_ch_flags, + uint16_t dfs_ch_flagext, + uint8_t dfs_ch_ieee, + uint8_t dfs_ch_vhtop_ch_freq_seg1, + uint8_t dfs_ch_vhtop_ch_freq_seg2); + +/** + * dfs_get_nol_chfreq_and_chwidth() - Get channel freq and width from NOL list. + * @dfs_nol: Pointer to NOL channel entry. + * @nol_chfreq: Pointer to save channel frequency. + * @nol_chwidth: Pointer to save channel width. + * @index: Index to dfs_nol list. + */ +void dfs_get_nol_chfreq_and_chwidth(struct dfsreq_nolelem *dfs_nol, + uint32_t *nol_chfreq, + uint32_t *nol_chwidth, + int index); + +/** + * dfs_process_phyerr_owl() - Process an Owl-style phy error. + * @dfs: Pointer to wlan_dfs structure. + * @buf: Phyerr buffer + * @datalen: Phyerr buf len + * @rssi: RSSI + * @ext_rssi: Extension RSSI. + * @rs_tstamp: Time stamp. + * @fulltsf: TSF64. + * @e: Pointer to dfs_phy_err structure. + * + * Return: Returns 1. + */ +int dfs_process_phyerr_owl(struct wlan_dfs *dfs, + void *buf, + uint16_t datalen, + uint8_t rssi, + uint8_t ext_rssi, + uint32_t rs_tstamp, + uint64_t fulltsf, + struct dfs_phy_err *e); + +/** + * dfs_process_phyerr_sowl() -Process a Sowl/Howl style phy error. + * @dfs: Pointer to wlan_dfs structure. + * @buf: Phyerr buffer + * @datalen: Phyerr buf len + * @rssi: RSSI + * @ext_rssi: Extension RSSI. + * @rs_tstamp: Time stamp. + * @fulltsf: TSF64. + * @e: Pointer to dfs_phy_err structure. + * + * Return: Returns 1. + */ +int dfs_process_phyerr_sowl(struct wlan_dfs *dfs, + void *buf, + uint16_t datalen, + uint8_t rssi, + uint8_t ext_rssi, + uint32_t rs_tstamp, + uint64_t fulltsf, + struct dfs_phy_err *e); + +/** + * dfs_process_phyerr_merlin() - Process a Merlin/Osprey style phy error. + * dfs_phy_err struct. + * @dfs: Pointer to wlan_dfs structure. + * @buf: Phyerr buffer + * @datalen: Phyerr buf len + * @rssi: RSSI + * @ext_rssi: Extension RSSI. + * @rs_tstamp: Time stamp. + * @fulltsf: TSF64. + * @e: Pointer to dfs_phy_err structure. + * + * Return: Returns 1. + */ +int dfs_process_phyerr_merlin(struct wlan_dfs *dfs, + void *buf, + uint16_t datalen, + uint8_t rssi, + uint8_t ext_rssi, + uint32_t rs_tstamp, + uint64_t fulltsf, + struct dfs_phy_err *e); + +/* + * __dfs_process_radarevent() - Continuation of process a radar event function. + * @dfs: Pointer to wlan_dfs structure. + * @ft: Pointer to dfs_filtertype structure. + * @re: Pointer to dfs_event structure. + * @this_ts: Timestamp. + * + * There is currently no way to specify that a radar event has occurred on + * a specific channel, so the current methodology is to mark both the pri + * and ext channels as being unavailable. This should be fixed for 802.11ac + * or we'll quickly run out of valid channels to use. + * + * Return: If a radar event is found, return 1. Otherwise, return 0. + */ +void __dfs_process_radarevent(struct wlan_dfs *dfs, + struct dfs_filtertype *ft, + struct dfs_event *re, + uint64_t this_ts, + int *found, + int *false_radar_found); + +/** + * dfs_radar_found_action() - Radar found action + * @dfs: Pointer to wlan_dfs structure. + * @bangradar: true if radar is due to bangradar command. + * @seg_id: Segment id. + */ +void dfs_radar_found_action(struct wlan_dfs *dfs, + bool bangradar, + uint8_t seg_id); + +/** + * bin5_rules_check_internal() - This is a extension of dfs_bin5_check(). + * @dfs: Pointer to wlan_dfs structure. + * @br: Pointer to dfs_bin5radars structure. + * @bursts: Bursts. + * @numevents: Number of events. + * @prev: prev index. + * @i: Index. + * @this: index to br_elems[] + */ +void bin5_rules_check_internal(struct wlan_dfs *dfs, + struct dfs_bin5radars *br, + uint32_t *bursts, + uint32_t *numevents, + uint32_t prev, + uint32_t i, + uint32_t this, + int *index); + +/** + * dfs_main_task_testtimer_init() - Initialize dfs task testtimer. + * @dfs: Pointer to wlan_dfs structure. + */ +void dfs_main_task_testtimer_init(struct wlan_dfs *dfs); + +/** + * dfs_stop() - Clear dfs timers. + * @dfs: Pointer to wlan_dfs structure. + */ +void dfs_stop(struct wlan_dfs *dfs); + +/** + * dfs_update_cur_chan_flags() - Update DFS channel flag and flagext. + * @dfs: Pointer to wlan_dfs structure. + * @flags: New channel flags + * @flagext: New Extended flags + */ +void dfs_update_cur_chan_flags(struct wlan_dfs *dfs, + uint64_t flags, + uint16_t flagext); + +/** + * dfs_radarevent_basic_sanity() - Check basic sanity of the radar event + * @dfs: Pointer to wlan_dfs structure. + * @chan: Current channel. + * + * Return: If a radar event found on NON-DFS channel return 0. Otherwise, + * return 1. + */ +int dfs_radarevent_basic_sanity(struct wlan_dfs *dfs, + struct dfs_channel *chan); + +/** + * wlan_psoc_get_dfs_txops() - Get dfs_tx_ops pointer + * @psoc: Pointer to psoc structure. + * + * Return: Pointer to dfs_tx_ops. + */ +struct wlan_lmac_if_dfs_tx_ops * +wlan_psoc_get_dfs_txops(struct wlan_objmgr_psoc *psoc); + +/** + * dfs_nol_free_list() - Free NOL elements. + * @dfs: Pointer to wlan_dfs structure. + */ +void dfs_nol_free_list(struct wlan_dfs *dfs); + +/** + * dfs_second_segment_radar_disable() - Disables the second segment radar. + * @dfs: Pointer to wlan_dfs structure. + * + * This is called when AP detects the radar, to (potentially) disable + * the radar code. + * + * Return: returns 0. + */ +int dfs_second_segment_radar_disable(struct wlan_dfs *dfs); + +/** + * dfs_task_testtimer_reset() - stop dfs test timer. + * @dfs: Pointer to wlan_dfs structure. + */ +void dfs_task_testtimer_reset(struct wlan_dfs *dfs); + +/** + * dfs_task_testtimer_free() - Free dfs test timer. + * @dfs: Pointer to wlan_dfs structure. + */ +void dfs_task_testtimer_free(struct wlan_dfs *dfs); + +/** + * dfs_timer_free() - Free dfs timers. + * @dfs: Pointer to wlan_dfs structure. + */ +void dfs_timer_free(struct wlan_dfs *dfs); +#endif /* _DFS_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/dfs_channel.h b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/dfs_channel.h new file mode 100644 index 0000000000000000000000000000000000000000..cb5a636c2fa04238bcd2f0f74eb09083b1c3eba0 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/dfs_channel.h @@ -0,0 +1,294 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * Copyright (c) 2008 Atheros Communications, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: This file has channel related information. + */ + +#ifndef _DFS_CHANNEL_H_ +#define _DFS_CHANNEL_H_ + +/* Channel attributes */ + +/* OFDM channel */ +#define WLAN_CHAN_OFDM 0x0000000000000040 + +/* 2 GHz spectrum channel. */ +#define WLAN_CHAN_2GHZ 0x0000000000000080 + +/* 5 GHz spectrum channel */ +#define WLAN_CHAN_5GHZ 0x0000000000000100 + +/* Radar found on channel */ +#define WLAN_CHAN_DFS_RADAR 0x0000000000001000 + +/* HT 20 channel */ +#define WLAN_CHAN_HT20 0x0000000000010000 + +/* HT 40 with extension channel above */ +#define WLAN_CHAN_HT40PLUS 0x0000000000020000 + +/* HT 40 with extension channel below */ +#define WLAN_CHAN_HT40MINUS 0x0000000000040000 + +/* VHT 20 channel */ +#define WLAN_CHAN_VHT20 0x0000000000100000 + +/* VHT 40 with extension channel above */ +#define WLAN_CHAN_VHT40PLUS 0x0000000000200000 + +/* VHT 40 with extension channel below */ +#define WLAN_CHAN_VHT40MINUS 0x0000000000400000 + +/* VHT 80 channel */ +#define WLAN_CHAN_VHT80 0x0000000000800000 + +/* VHT 160 channel */ +#define WLAN_CHAN_VHT160 0x0000000004000000 + +/* VHT 80_80 channel */ +#define WLAN_CHAN_VHT80_80 0x0000000008000000 + +/* HE 20 channel */ +#define WLAN_CHAN_HE20 0x0000000010000000 + +/* HE 40 with extension channel above */ +#define WLAN_CHAN_HE40PLUS 0x0000000020000000 + +/* HE 40 with extension channel below */ +#define WLAN_CHAN_HE40MINUS 0x0000000040000000 + +/* HE 80 channel */ +#define WLAN_CHAN_HE80 0x0000000200000000 + +/* HE 160 channel */ +#define WLAN_CHAN_HE160 0x0000000400000000 + +/* HE 80_80 channel */ +#define WLAN_CHAN_HE80_80 0x0000000800000000 + +/* flagext */ +#define WLAN_CHAN_DFS_RADAR_FOUND 0x01 + +/* DFS required on channel */ +#define WLAN_CHAN_DFS 0x0002 + +/* DFS required on channel for 2nd band of 80+80*/ +#define WLAN_CHAN_DFS_CFREQ2 0x0004 + +/* if channel has been checked for DFS */ +#define WLAN_CHAN_DFS_CLEAR 0x0008 + +/* DFS radar history for slave device(STA mode) */ +#define WLAN_CHAN_HISTORY_RADAR 0x0100 + +/* DFS CAC valid for slave device(STA mode) */ +#define WLAN_CHAN_CAC_VALID 0x0200 + +#define WLAN_IS_CHAN_2GHZ(_c) \ + (((_c)->dfs_ch_flags & WLAN_CHAN_2GHZ) != 0) + +#define WLAN_IS_CHAN_5GHZ(_c) \ + (((_c)->dfs_ch_flags & WLAN_CHAN_5GHZ) != 0) + +#define WLAN_IS_CHAN_11N_HT40(_c) \ + (((_c)->dfs_ch_flags & (WLAN_CHAN_HT40PLUS | \ + WLAN_CHAN_HT40MINUS)) != 0) + +#define WLAN_IS_CHAN_11N_HT40PLUS(_c) \ + (((_c)->dfs_ch_flags & WLAN_CHAN_HT40PLUS) != 0) + +#define WLAN_IS_CHAN_11N_HT40MINUS(_c) \ + (((_c)->dfs_ch_flags & WLAN_CHAN_HT40MINUS) != 0) + +#define WLAN_CHAN_A \ + (WLAN_CHAN_5GHZ | WLAN_CHAN_OFDM) + +#define WLAN_IS_CHAN_A(_c) \ + (((_c)->dfs_ch_flags & WLAN_CHAN_A) == WLAN_CHAN_A) + +#define WLAN_CHAN_11NA_HT20 \ + (WLAN_CHAN_5GHZ | WLAN_CHAN_HT20) + +#define WLAN_CHAN_11NA_HT40PLUS \ + (WLAN_CHAN_5GHZ | WLAN_CHAN_HT40PLUS) + +#define WLAN_CHAN_11NA_HT40MINUS \ + (WLAN_CHAN_5GHZ | WLAN_CHAN_HT40MINUS) + +#define WLAN_IS_CHAN_11NA_HT20(_c) \ + (((_c)->dfs_ch_flags & WLAN_CHAN_11NA_HT20) == \ + WLAN_CHAN_11NA_HT20) + +#define WLAN_IS_CHAN_11NA_HT40PLUS(_c) \ + (((_c)->dfs_ch_flags & WLAN_CHAN_11NA_HT40PLUS) == \ + WLAN_CHAN_11NA_HT40PLUS) + +#define WLAN_IS_CHAN_11NA_HT40MINUS(_c) \ + (((_c)->dfs_ch_flags & WLAN_CHAN_11NA_HT40MINUS) == \ + WLAN_CHAN_11NA_HT40MINUS) + +#define WLAN_CHAN_11AC_VHT20 \ + (WLAN_CHAN_5GHZ | WLAN_CHAN_VHT20) + +#define WLAN_CHAN_11AC_VHT40PLUS \ + (WLAN_CHAN_5GHZ | WLAN_CHAN_VHT40PLUS) + +#define WLAN_CHAN_11AC_VHT40MINUS \ + (WLAN_CHAN_5GHZ | WLAN_CHAN_VHT40MINUS) + +#define WLAN_CHAN_11AC_VHT80 \ + (WLAN_CHAN_5GHZ | WLAN_CHAN_VHT80) + +#define WLAN_CHAN_11AC_VHT160 \ + (WLAN_CHAN_5GHZ | WLAN_CHAN_VHT160) + +#define WLAN_CHAN_11AC_VHT80_80 \ + (WLAN_CHAN_5GHZ | WLAN_CHAN_VHT80_80) + +#define WLAN_IS_CHAN_11AC_VHT20(_c) \ + (((_c)->dfs_ch_flags & WLAN_CHAN_11AC_VHT20) == \ + WLAN_CHAN_11AC_VHT20) + +#define WLAN_IS_CHAN_11AC_VHT40(_c) \ + (((_c)->dfs_ch_flags & (WLAN_CHAN_VHT40PLUS | \ + WLAN_CHAN_VHT40MINUS)) != 0) + +#define WLAN_IS_CHAN_11AC_VHT40PLUS(_c) \ + (((_c)->dfs_ch_flags & WLAN_CHAN_11AC_VHT40PLUS) == \ + WLAN_CHAN_11AC_VHT40PLUS) + +#define WLAN_IS_CHAN_11AC_VHT40MINUS(_c) \ + (((_c)->dfs_ch_flags & WLAN_CHAN_11AC_VHT40MINUS) == \ + WLAN_CHAN_11AC_VHT40MINUS) + +#define WLAN_IS_CHAN_11AC_VHT80(_c) \ + (((_c)->dfs_ch_flags & WLAN_CHAN_11AC_VHT80) == \ + WLAN_CHAN_11AC_VHT80) + +#define WLAN_IS_CHAN_11AC_VHT160(_c) \ + (((_c)->dfs_ch_flags & WLAN_CHAN_11AC_VHT160) == \ + WLAN_CHAN_11AC_VHT160) + +#define WLAN_IS_CHAN_11AC_VHT80_80(_c) \ + (((_c)->dfs_ch_flags & WLAN_CHAN_11AC_VHT80_80) == \ + WLAN_CHAN_11AC_VHT80_80) + +#define WLAN_CHAN_11AXA_HE20 \ + (WLAN_CHAN_5GHZ | WLAN_CHAN_HE20) + +#define WLAN_CHAN_11AXA_HE40PLUS \ + (WLAN_CHAN_5GHZ | WLAN_CHAN_HE40PLUS) + +#define WLAN_CHAN_11AXA_HE40MINUS \ + (WLAN_CHAN_5GHZ | WLAN_CHAN_HE40MINUS) + +#define WLAN_CHAN_11AXA_HE80 \ + (WLAN_CHAN_5GHZ | WLAN_CHAN_HE80) + +#define WLAN_CHAN_11AXA_HE160 \ + (WLAN_CHAN_5GHZ | WLAN_CHAN_HE160) + +#define WLAN_CHAN_11AXA_HE80_80 \ + (WLAN_CHAN_5GHZ | WLAN_CHAN_HE80_80) + +#define WLAN_IS_CHAN_11AXA_HE20(_c) \ + (((_c)->dfs_ch_flags & WLAN_CHAN_11AXA_HE20) == \ + WLAN_CHAN_11AXA_HE20) + +#define WLAN_IS_CHAN_11AXA_HE40PLUS(_c) \ + (((_c)->dfs_ch_flags & WLAN_CHAN_11AXA_HE40PLUS) == \ + WLAN_CHAN_11AXA_HE40PLUS) + +#define WLAN_IS_CHAN_11AXA_HE40MINUS(_c) \ + (((_c)->dfs_ch_flags & WLAN_CHAN_11AXA_HE40MINUS) == \ + WLAN_CHAN_11AXA_HE40MINUS) + +#define WLAN_IS_CHAN_11AXA_HE80(_c) \ + (((_c)->dfs_ch_flags & WLAN_CHAN_11AXA_HE80) == \ + WLAN_CHAN_11AXA_HE80) + +#define WLAN_IS_CHAN_11AXA_HE160(_c) \ + (((_c)->dfs_ch_flags & WLAN_CHAN_11AXA_HE160) == \ + WLAN_CHAN_11AXA_HE160) + +#define WLAN_IS_CHAN_11AXA_HE80_80(_c) \ + (((_c)->dfs_ch_flags & WLAN_CHAN_11AXA_HE80_80) == \ + WLAN_CHAN_11AXA_HE80_80) + +#define WLAN_IS_CHAN_DFS(_c) \ + (((_c)->dfs_ch_flagext & \ + (WLAN_CHAN_DFS | WLAN_CHAN_DFS_CLEAR)) == WLAN_CHAN_DFS) + +#define WLAN_IS_CHAN_DFS_CFREQ2(_c) \ + (((_c)->dfs_ch_flagext & \ + (WLAN_CHAN_DFS_CFREQ2|WLAN_CHAN_DFS_CLEAR)) == \ + WLAN_CHAN_DFS_CFREQ2) + +#define WLAN_IS_PRIMARY_OR_SECONDARY_CHAN_DFS(_c) \ + (WLAN_IS_CHAN_DFS(_c) || \ + ((WLAN_IS_CHAN_11AC_VHT160(_c) || \ + WLAN_IS_CHAN_11AC_VHT80_80(_c) || \ + WLAN_IS_CHAN_11AXA_HE160(_c) || \ + WLAN_IS_CHAN_11AXA_HE80_80(_c)) \ + && WLAN_IS_CHAN_DFS_CFREQ2(_c))) + +#define WLAN_IS_CHAN_RADAR(_c) \ + (((_c)->dfs_ch_flags & WLAN_CHAN_DFS_RADAR) == \ + WLAN_CHAN_DFS_RADAR) + +#define WLAN_IS_CHAN_HISTORY_RADAR(_c) \ + (((_c)->dfs_ch_flagext & WLAN_CHAN_HISTORY_RADAR) == \ + WLAN_CHAN_HISTORY_RADAR) + +#define WLAN_CHAN_CLR_HISTORY_RADAR(_c) \ + ((_c)->dfs_ch_flagext &= ~WLAN_CHAN_HISTORY_RADAR) + +#define WLAN_CHAN_ANY (-1) /* token for ``any channel'' */ + +#define WLAN_CHAN_ANYC \ + ((struct dfs_channel *) WLAN_CHAN_ANY) + +#define WLAN_IS_CHAN_MODE_20(_c) \ + (WLAN_IS_CHAN_A(_c) || \ + WLAN_IS_CHAN_11NA_HT20(_c) || \ + WLAN_IS_CHAN_11AC_VHT20(_c) || \ + WLAN_IS_CHAN_11AXA_HE20(_c)) + +#define WLAN_IS_CHAN_MODE_40(_c) \ + (WLAN_IS_CHAN_11AC_VHT40PLUS(_c) || \ + WLAN_IS_CHAN_11AC_VHT40MINUS(_c) || \ + WLAN_IS_CHAN_11NA_HT40PLUS(_c) || \ + WLAN_IS_CHAN_11NA_HT40MINUS(_c) || \ + WLAN_IS_CHAN_11AXA_HE40PLUS(_c) || \ + WLAN_IS_CHAN_11AXA_HE40MINUS(_c)) + +#define WLAN_IS_CHAN_MODE_80(_c) \ + (WLAN_IS_CHAN_11AC_VHT80(_c) || \ + WLAN_IS_CHAN_11AXA_HE80(_c)) + +#define WLAN_IS_CHAN_MODE_160(_c) \ + (WLAN_IS_CHAN_11AC_VHT160(_c) || \ + WLAN_IS_CHAN_11AXA_HE160(_c)) + +#define WLAN_IS_CHAN_MODE_80_80(_c) \ + (WLAN_IS_CHAN_11AC_VHT80_80(_c) || \ + WLAN_IS_CHAN_11AXA_HE80_80(_c)) + +#endif /* _DFS_CHANNEL_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/dfs_direct_attach_radar.h b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/dfs_direct_attach_radar.h new file mode 100644 index 0000000000000000000000000000000000000000..283f0559611070588b0b63fc35ed34f53685437c --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/dfs_direct_attach_radar.h @@ -0,0 +1,39 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: dfs_direct_attach_radar.h + * This file contains direct attach specific dfs interfaces + */ + +#ifndef _DFS_DIRECT_ATTACH_RADAR_H_ +#define _DFS_DIRECT_ATTACH_RADAR_H_ + +/** + * dfs_get_da_radars() - Initialize the RADAR table for DA. + * @dfs: Pointer to wlan_dfs structure. + */ +#if defined(WLAN_DFS_DIRECT_ATTACH) +void dfs_get_da_radars(struct wlan_dfs *dfs); +#else +static inline void dfs_get_da_radars(struct wlan_dfs *dfs) +{ +} +#endif +#endif /* _DFS_DIRECT_ATTACH_RADAR_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/dfs_filter_init.h b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/dfs_filter_init.h new file mode 100644 index 0000000000000000000000000000000000000000..657a9c35246aff87b96ba3885fde036e1ea2b48e --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/dfs_filter_init.h @@ -0,0 +1,91 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: umac/dfs/core/src/dfs_filter_init.h + * This file contains dfs interfaces + */ + +#ifndef _DFS_FILTER_INIT_H_ +#define _DFS_FILTER_INIT_H_ + +/** + * dfs_main_attach() - Allocates memory for wlan_dfs members. + * @dfs: Pointer to wlan_dfs structure. + */ +#if defined(WLAN_DFS_DIRECT_ATTACH) || defined(WLAN_DFS_PARTIAL_OFFLOAD) +int dfs_main_attach(struct wlan_dfs *dfs); +#else +static inline int dfs_main_attach(struct wlan_dfs *dfs) +{ + return 0; +} +#endif + +/** + * dfs_main_detach() - Free dfs variables. + * @dfs: Pointer to wlan_dfs structure. + */ +#if defined(WLAN_DFS_DIRECT_ATTACH) || defined(WLAN_DFS_PARTIAL_OFFLOAD) +void dfs_main_detach(struct wlan_dfs *dfs); +#else +static inline void dfs_main_detach(struct wlan_dfs *dfs) +{ +} +#endif + +/** + * dfs_start_host_based_bangradar() - Mark as bangradar and start + * wlan_dfs_task_timer. + * @dfs: Pointer to wlan_dfs structure. + */ +#if defined(WLAN_DFS_DIRECT_ATTACH) || defined(WLAN_DFS_PARTIAL_OFFLOAD) +int dfs_start_host_based_bangradar(struct wlan_dfs *dfs); +#else +static int dfs_start_host_based_bangradar(struct wlan_dfs *dfs) +{ + return 0; +} +#endif + +/** + * dfs_main_timer_reset() - Stop dfs timers. + * @dfs: Pointer to wlan_dfs structure. + */ +#if defined(WLAN_DFS_DIRECT_ATTACH) || defined(WLAN_DFS_PARTIAL_OFFLOAD) +void dfs_main_timer_reset(struct wlan_dfs *dfs); +#else +static void dfs_main_timer_reset(struct wlan_dfs *dfs) +{ +} +#endif + +/** + * dfs_main_timer_free() - Free dfs timers. + * @dfs: Pointer to wlan_dfs structure. + */ +#if defined(WLAN_DFS_DIRECT_ATTACH) || defined(WLAN_DFS_PARTIAL_OFFLOAD) +void dfs_main_timer_free(struct wlan_dfs *dfs); +#else +static void dfs_main_timer_free(struct wlan_dfs *dfs) +{ +} +#endif + +#endif /* _DFS_FILTER_INIT_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/dfs_full_offload.h b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/dfs_full_offload.h new file mode 100644 index 0000000000000000000000000000000000000000..e44be42acf542a87beaa99fd4220590df1314984 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/dfs_full_offload.h @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: dfs_full_offload.h + * This file contains full offload specific dfs interfaces + */ + +#ifndef _DFS_FULL_OFFLOAD_H_ +#define _DFS_FULL_OFFLOAD_H_ + +/** + * dfs_fill_emulate_bang_radar_test() - Update dfs unit test arguments and + * send bangradar command to firmware. + * @dfs: Pointer to wlan_dfs structure. + * @segid: Segment Identifier(Primary and Secondary) + * @dfs_unit_test: Pointer to Unit test command structure + * + * Return: If the event is received return 0. + */ +#if defined(WLAN_DFS_FULL_OFFLOAD) +int dfs_fill_emulate_bang_radar_test(struct wlan_dfs *dfs, + uint32_t segid, + struct dfs_emulate_bang_radar_test_cmd *dfs_unit_test); +#else +static inline int dfs_fill_emulate_bang_radar_test(struct wlan_dfs *dfs, + uint32_t segid, + struct dfs_emulate_bang_radar_test_cmd *dfs_unit_test) +{ + return 0; +} +#endif +#endif /* _DFS_FULL_OFFLOAD_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/dfs_internal.h b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/dfs_internal.h new file mode 100644 index 0000000000000000000000000000000000000000..07ff4b1670e43a84a7d1e91e6ff5c017b8d868bf --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/dfs_internal.h @@ -0,0 +1,62 @@ +/* + * Copyright (c) 2016-2017 The Linux Foundation. All rights reserved. + * Copyright (c) 2008 Atheros Communications, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: The structurs and functions in this file are used only within DFS + * component. + */ + +#ifndef _DFS_INTERNAL_H_ +#define _DFS_INTERNAL_H_ + +#include +#include "dfs.h" + +/** + * enum DFS_DOMAIN - DFS domain + * @DFS_UNINIT_DOMAIN: Uninitialized domain + * @DFS_FCC_DOMAIN: FCC domain + * @DFS_ETSI_DOMAIN: ETSI domain + * @DFS_MKK4_DOMAIN: MKK domain + * @DFS_CN_DOMAIN: China domain + * @DFS_KR_DOMAIN: Korea domain + * @DFS_UNDEF_DOMAIN: Undefined domain + */ +enum DFS_DOMAIN { + DFS_UNINIT_DOMAIN = 0, + DFS_FCC_DOMAIN = 1, + DFS_ETSI_DOMAIN = 2, + DFS_MKK4_DOMAIN = 3, + DFS_CN_DOMAIN = 4, + DFS_KR_DOMAIN = 5, + DFS_UNDEF_DOMAIN +}; + +/* CAPABILITY: the device support STA DFS */ +#define WLAN_CEXT_STADFS 0x00000040 + +/** + * dfs_chan2freq() - Convert channel to frequency value. + * @chan: Pointer to dfs_channel structure. + * + * Return: Channel frequency. + */ +uint16_t dfs_chan2freq(struct dfs_channel *chan); + +#endif /* _DFS_INTERNAL_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/dfs_ioctl_private.h b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/dfs_ioctl_private.h new file mode 100644 index 0000000000000000000000000000000000000000..5609633be1070dee9b41deddcb0cb337633ea2b0 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/dfs_ioctl_private.h @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2011, 2016-2017 The Linux Foundation. All rights reserved. + * Copyright (c) 2010, Atheros Communications Inc. + * All Rights Reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: This file has dfs param copy functions. + */ + +#ifndef _DFS_IOCTL_PRIVATE_H_ +#define _DFS_IOCTL_PRIVATE_H_ + + +static inline void +wlan_dfs_dfsparam_to_ioctlparam(struct wlan_dfs_phyerr_param *src, + struct dfs_ioctl_params *dst) +{ + dst->dfs_firpwr = src->pe_firpwr; + dst->dfs_rrssi = src->pe_rrssi; + dst->dfs_height = src->pe_height; + dst->dfs_prssi = src->pe_prssi; + dst->dfs_inband = src->pe_inband; + dst->dfs_relpwr = src->pe_relpwr; + dst->dfs_relstep = src->pe_relstep; + dst->dfs_maxlen = src->pe_maxlen; +} + +#endif /* _DFS_IOCTL_PRIVATE_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/dfs_partial_offload_radar.h b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/dfs_partial_offload_radar.h new file mode 100644 index 0000000000000000000000000000000000000000..92f7903da2f0c420090c5a247041e99cc2629ed4 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/dfs_partial_offload_radar.h @@ -0,0 +1,192 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: dfs_partial_offload_radar.h + * This file contains partial offload specific dfs interfaces + */ + +#ifndef _DFS_PARTIAL_OFFLOAD_RADAR_H_ +#define _DFS_PARTIAL_OFFLOAD_RADAR_H_ + +/** + * dfs_get_po_radars() - Initialize the RADAR table for PO. + * @dfs: Pointer to wlan_dfs structure. + */ +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) +void dfs_get_po_radars(struct wlan_dfs *dfs); +#else +static inline void dfs_get_po_radars(struct wlan_dfs *dfs) +{ +} +#endif + +/** + * dfs_send_avg_params_to_fw - send avg radar parameters to FW. + * @dfs: Pointer to wlan_dfs structure. + * @params: Pointer to dfs_radar_found_params. + * + * Return: None + */ +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) && defined(HOST_DFS_SPOOF_TEST) +void dfs_send_avg_params_to_fw(struct wlan_dfs *dfs, + struct dfs_radar_found_params *params); +#else +static inline +void dfs_send_avg_params_to_fw(struct wlan_dfs *dfs, + struct dfs_radar_found_params *params) +{ +} +#endif + +/** + * dfs_host_wait_timer_init() - Initialize dfs host status wait timer. + * @dfs: Pointer to wlan_dfs structure. + */ +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) && defined(HOST_DFS_SPOOF_TEST) +void dfs_host_wait_timer_init(struct wlan_dfs *dfs); +#else +static inline void dfs_host_wait_timer_init(struct wlan_dfs *dfs) +{ +} +#endif + +/** + * dfs_host_wait_timer_free() - Free dfs host status wait timer. + * @dfs: Pointer to wlan_dfs structure. + */ +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) && defined(HOST_DFS_SPOOF_TEST) +void dfs_host_wait_timer_free(struct wlan_dfs *dfs); +#else +static inline void dfs_host_wait_timer_free(struct wlan_dfs *dfs) +{ +} +#endif + +/** + * dfs_set_override_status_timeout() - Change the dfs host status timeout. + * @dfs: Pointer to wlan_dfs structure. + * @status_timeout: timeout value. + * + * Return: QDF_STATUS + */ +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) && defined(HOST_DFS_SPOOF_TEST) +QDF_STATUS dfs_set_override_status_timeout(struct wlan_dfs *dfs, + int status_timeout); +#else +static inline QDF_STATUS dfs_set_override_status_timeout(struct wlan_dfs *dfs, + int status_timeout) +{ + return QDF_STATUS_SUCCESS; +} +#endif + +/** + * dfs_get_override_status_timeout() - Get the dfs host status timeout value. + * @dfs: Pointer to wlan_dfs structure. + * @status_timeout: Pointer to timeout value. + * + * Return: QDF_STATUS + */ +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) && defined(HOST_DFS_SPOOF_TEST) +QDF_STATUS dfs_get_override_status_timeout(struct wlan_dfs *dfs, + int *status_timeout); +#else +static inline +QDF_STATUS dfs_get_override_status_timeout(struct wlan_dfs *dfs, + int *status_timeout) +{ + return QDF_STATUS_SUCCESS; +} +#endif + +/** + * dfs_radarfound_action_fcc() - The dfs action on radar detection by host for + * FCC domain. + * @dfs: Pointer to wlan_dfs structure. + * @seg_id: segment id. + * + * Return: None + */ +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) && defined(HOST_DFS_SPOOF_TEST) +void dfs_radarfound_action_fcc(struct wlan_dfs *dfs, uint8_t seg_id); +#else +static inline void dfs_radarfound_action_fcc(struct wlan_dfs *dfs, + uint8_t seg_id) +{ +} +#endif + +/** + * dfs_host_wait_timer_reset() - Stop dfs host wait timer. + * @dfs: Pointer to wlan_dfs structure. + */ +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) && defined(HOST_DFS_SPOOF_TEST) +void dfs_host_wait_timer_reset(struct wlan_dfs *dfs); +#else +static inline void dfs_host_wait_timer_reset(struct wlan_dfs *dfs) +{ +} +#endif + +/** + * dfs_remove_spoof_channel_from_nol() - Remove the spoofed radar hit channel + * from NOL. + * @dfs: Pointer to wlan_dfs structure. + */ +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) && defined(HOST_DFS_SPOOF_TEST) +void dfs_remove_spoof_channel_from_nol(struct wlan_dfs *dfs); +#else +static inline void dfs_remove_spoof_channel_from_nol(struct wlan_dfs *dfs) +{ +} +#endif + +/** + * dfs_reset_spoof_test() - reset the spoof test variables. + * @dfs: Pointer to wlan_dfs structure. + * + * Return: None. + */ +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) && defined(HOST_DFS_SPOOF_TEST) +void dfs_reset_spoof_test(struct wlan_dfs *dfs); +#else +static inline void dfs_reset_spoof_test(struct wlan_dfs *dfs) +{ +} +#endif + +/** + * dfs_action_on_fw_radar_status_check() - The dfs action on host dfs + * confirmation by fw. + * @dfs: Pointer to wlan_dfs structure. + * @status: pointer to host dfs status. + * + * Return: None + */ +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) && defined(HOST_DFS_SPOOF_TEST) +void dfs_action_on_fw_radar_status_check(struct wlan_dfs *dfs, + uint32_t *status); +#else +static inline void dfs_action_on_fw_radar_status_check(struct wlan_dfs *dfs, + uint32_t *status) +{ +} +#endif +#endif /* _DFS_PARTIAL_OFFLOAD_RADAR_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/dfs_phyerr_tlv.h b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/dfs_phyerr_tlv.h new file mode 100644 index 0000000000000000000000000000000000000000..0fad819bde94ff3131e571b958b5c0b46cae461a --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/dfs_phyerr_tlv.h @@ -0,0 +1,179 @@ +/* + * Copyright (c) 2012, 2016-2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: This file has Radar summary. + */ + +#ifndef _DFS_PHYERR_TLV_H_ +#define _DFS_PHYERR_TLV_H_ + +/* + * Register manipulation macros that expect bit field defines + * to follow the convention that an _S suffix is appended for + * a shift count, while the field mask has no suffix. + */ +#define SM(_v, _f) (((_v) << _f##_S) & _f) +#define MS(_v, _f) (((_v) & _f) >> _f##_S) + +/* The TLV dword is at the beginning of each TLV section. */ +#define TLV_REG 0x00 + +#define TLV_LEN 0x0000FFFF +#define TLV_LEN_S 0 + +#define TLV_SIG 0x00FF0000 +#define TLV_SIG_S 16 + +#define TLV_TAG 0xFF000000 +#define TLV_TAG_S 24 + +#define TAG_ID_SEARCH_FFT_REPORT 0xFB +#define TAG_ID_RADAR_PULSE_SUMMARY 0xF8 + +/* + * Radar pulse summary + * + TYPE=0xF8 (Radar pulse summary reprot) + * + SIG=0xBB (baseband PHY generated TLV components) + */ + +#define RADAR_REPORT_PULSE_REG_1 0x00 + +#define RADAR_REPORT_PULSE_IS_CHIRP 0x80000000 +#define RADAR_REPORT_PULSE_IS_CHIRP_S 31 + +#define RADAR_REPORT_PULSE_IS_MAX_WIDTH 0x40000000 +#define RADAR_REPORT_PULSE_IS_MAX_WIDTH_S 30 + +#define RADAR_REPORT_AGC_TOTAL_GAIN 0x3FF00000 +#define RADAR_REPORT_AGC_TOTAL_GAIN_S 20 + +#define RADAR_REPORT_PULSE_DELTA_DIFF 0x000F0000 +#define RADAR_REPORT_PULSE_DELTA_DIFF_S 16 + +#define RADAR_REPORT_PULSE_DELTA_PEAK 0x0000FC00 +#define RADAR_REPORT_PULSE_DELTA_PEAK_S 10 + +#define RADAR_REPORT_PULSE_SIDX 0x000003FF +#define RADAR_REPORT_PULSE_SIDX_S 0x0 + +#define RADAR_REPORT_PULSE_REG_2 0x01 + +#define RADAR_REPORT_PULSE_SRCH_FFT_A_VALID 0x80000000 +#define RADAR_REPORT_PULSE_SRCH_FFT_A_VALID_S 31 + +#define RADAR_REPORT_PULSE_AGC_MB_GAIN 0x7F000000 +#define RADAR_REPORT_PULSE_AGC_MB_GAIN_S 24 + +#define RADAR_REPORT_PULSE_SUBCHAN_MASK 0x00FF0000 +#define RADAR_REPORT_PULSE_SUBCHAN_MASK_S 16 + +#define RADAR_REPORT_PULSE_TSF_OFFSET 0x0000FF00 +#define RADAR_REPORT_PULSE_TSF_OFFSET_S 8 + +#define RADAR_REPORT_PULSE_DUR 0x000000FF +#define RADAR_REPORT_PULSE_DUR_S 0 + +#define SEARCH_FFT_REPORT_REG_1 0x00 + +#define SEARCH_FFT_REPORT_TOTAL_GAIN_DB 0xFF800000 +#define SEARCH_FFT_REPORT_TOTAL_GAIN_DB_S 23 + +#define SEARCH_FFT_REPORT_BASE_PWR_DB 0x007FC000 +#define SEARCH_FFT_REPORT_BASE_PWR_DB_S 14 + +#define SEARCH_FFT_REPORT_FFT_CHN_IDX 0x00003000 +#define SEARCH_FFT_REPORT_FFT_CHN_IDX_S 12 + +#define SEARCH_FFT_REPORT_PEAK_SIDX 0x00000FFF +#define SEARCH_FFT_REPORT_PEAK_SIDX_S 0 + +#define SEARCH_FFT_REPORT_REG_2 0x01 + +#define SEARCH_FFT_REPORT_RELPWR_DB 0xFC000000 +#define SEARCH_FFT_REPORT_RELPWR_DB_S 26 + +#define SEARCH_FFT_REPORT_AVGPWR_DB 0x03FC0000 +#define SEARCH_FFT_REPORT_AVGPWR_DB_S 18 + +#define SEARCH_FFT_REPORT_PEAK_MAG 0x0003FF00 +#define SEARCH_FFT_REPORT_PEAK_MAG_S 8 + +#define SEARCH_FFT_REPORT_NUM_STR_BINS_IB 0x000000FF +#define SEARCH_FFT_REPORT_NUM_STR_BINS_IB_S 0 + +#define SEARCH_FFT_REPORT_REG_3 0x02 + +#define SEARCH_FFT_REPORT_SEG_ID 0x00000001 +#define SEARCH_FFT_REPORT_SEG_ID_S 0 + +/* + * Although this code is now not parsing the whole frame (descriptor + * and all), the relevant fields are still useful information + * for anyone who is working on the PHY error part of DFS pattern + * matching. + * + * However, to understand _where_ these descriptors start, you + * should do some digging into the peregrine descriptor format. + * The 30 second version: each RX ring has a bitmap listing which + * descriptors are to be included, and then a set of offsets + * into the RX buffer for where each descriptor will be written. + * It's not like the 802.11n generation hardware which has + * a fixed descriptor format. + */ + +/* RX_PPDU_START */ +#define RX_PPDU_START_LEN (10*4) +#define RX_PPDU_START_REG_4 0x0004 +#define RX_PPDU_START_RSSI_COMB 0x000000FF +#define RX_PPDU_START_RSSI_COMB_S 0 + +/* RX_PPDU_END */ +#define RX_PPDU_END_LEN (21*4) +#define RX_PPDU_END_REG_16 16 +#define RX_PPDU_END_TSF_TIMESTAMP 0xFFFFFFFF +#define RX_PPDU_END_TSF_TIMESTAMP_S 0 +#define RX_PPDU_END_REG_18 18 +#define RX_PPDU_END_PHY_ERR_CODE 0x0000FF00 +#define RX_PPDU_END_PHY_ERR_CODE_S 8 +#define RX_PPDU_END_PHY_ERR 0x00010000 +#define RX_PPDU_END_PHY_ERR_S 16 + +/* + * The RSSI values can have "special meanings". + * If rssi=50, it means that the peak detector triggered. + */ +#define RSSI_PEAK_DETECTOR_SAT 50 + +/* + * If rssi=25, it means that the ADC was saturated, but that only is + * valid when there is one ADC gain change. For short pulses this + * is true - you won't have time to do a gain change before the pulse + * goes away. But for longer pulses, ADC gain changes can occur, so + * you'll get a more accurate RSSI figure. + * + * For short pulses (and the definition of "short" still isn't clear + * at the time of writing) there isn't any real time to do a gain change + * (or two, or three..) in order to get an accurate estimation of signal + * sizing. Thus, RSSI will not be very accurate for short duration pulses. + * All you can really say for certain is that yes, there's a pulse that + * met the requirements of the pulse detector. + * + * For more information, see the 802.11ac Microarchitecture guide. + * (TODO: add a twiki reference.) + */ + +#endif /* _DFS_PHYERR_TLV_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/dfs_process_radar_found_ind.h b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/dfs_process_radar_found_ind.h new file mode 100644 index 0000000000000000000000000000000000000000..b6ee1f48b3ca6e9ce0733d11f16fc8eeef2d1dfa --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/dfs_process_radar_found_ind.h @@ -0,0 +1,144 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: dfs_process_radar_found_ind.h + * This file provides prototypes of the routines needed for the + * external components to utilize the services provided by the + * DFS component. + */ + +/* Number of channel marking offsets */ +#define DFS_NUM_FREQ_OFFSET 3 + +/* Lower channel from 20 Mhz center channel */ +#define DFS_20MHZ_LOWER_CHANNEL(_f) ((_f) - 20) +/* Upper channel from 20 Mhz center channel */ +#define DFS_20MHZ_UPPER_CHANNEL(_f) ((_f) + 20) +/* 1st lower channel from center channel of bandwidth 40/80/160Mhz */ +#define DFS_FIRST_LOWER_CHANNEL(_f) ((_f) - 10) +/* 2nd lower channel from center channel of bandwidth 40/80/160Mhz */ +#define DFS_SECOND_LOWER_CHANNEL(_f) ((_f) - 30) +/* 3rd lower channel from center channel of bandwidth 80/160Mhz */ +#define DFS_THIRD_LOWER_CHANNEL(_f) ((_f) - 50) +/* 1st upper channel from center channel of bandwidth 40/80/160Mhz */ +#define DFS_FIRST_UPPER_CHANNEL(_f) ((_f) + 10) +/* 2nd upper channel from center channel of bandwidth 40/80/160Mhz */ +#define DFS_SECOND_UPPER_CHANNEL(_f) ((_f) + 30) +/* 3rd upper channel from center channel of bandwidth 80/160Mhz */ +#define DFS_THIRD_UPPER_CHANNEL(_f) ((_f) + 50) + +/* 20 Mhz freq_offset lower */ +#define DFS_20MZ_OFFSET_LOWER (-10) +/* 20 Mhz freq_offset upper */ +#define DFS_20MZ_OFFSET_UPPER (10) +/* 40/80 Mhz freq_offset first lower */ +#define DFS_OFFSET_FIRST_LOWER (-20) +/* 40/80 Mhz freq_offset second lower */ +#define DFS_OFFSET_SECOND_LOWER (-40) +/* 40/80 Mhz freq_offset first upper */ +#define DFS_OFFSET_FIRST_UPPER (20) +/* 40/80 Mhz freq_offset second upper */ +#define DFS_OFFSET_SECOND_UPPER (40) + +/* Frequency offset to sidx */ +#define DFS_FREQ_OFFSET_TO_SIDX(_f) ((32 * (_f)) / 10) +/* sidx offset boundary */ +#define DFS_BOUNDARY_SIDX 32 +/* freq offset for chirp */ +#define DFS_CHIRP_OFFSET 10 +/* second segment freq offset */ +#define DFS_160MHZ_SECOND_SEG_OFFSET 40 + +/* Frequency offset indices */ +#define CENTER_CH 0 +#define LEFT_CH 1 +#define RIGHT_CH 2 + +/* Next channel number offset's from center channel number */ +#define DFS_5GHZ_NEXT_CHAN_OFFSET 2 +#define DFS_5GHZ_2ND_CHAN_OFFSET 6 +#define DFS_5GHZ_3RD_CHAN_OFFSET 10 +#define DFS_5GHZ_4TH_CHAN_OFFSET 14 + +/* Max number of bonding channels in 160 MHz segment */ +#define NUM_CHANNELS_160MHZ 8 + +/** + * struct freqs_offsets - frequency and offset information + * @freq: channel frequency in mhz. + * @offset: offset from center frequency. + * + * Index 0 - Center channel affected by RADAR. + * Index 1 - Left of Center channel affected by RADAR. + * Index 2 - Right of Center channel affected by RADAR. + * + * This information is needed to find and mark radar infected + * channels in NOL and regulatory database. + */ +struct freqs_offsets { + uint32_t freq[DFS_NUM_FREQ_OFFSET]; + int32_t offset[DFS_NUM_FREQ_OFFSET]; +}; + +/** + * dfs_process_radar_found_indication() - Process radar found indication + * @dfs: Pointer to wlan_dfs structure. + * @radar_found: radar found info. + * + * Process radar found indication and update radar effected channel in NOL + * and regulatory. + * + * Return: None + */ +void dfs_process_radar_found_indication(struct wlan_dfs *dfs, + struct radar_found_info *radar_found); + +/** + * dfs_process_radar_ind() - Process radar indication event + * @dfs: Pointer to wlan_dfs structure. + * @radar_found: Pointer to radar_found_info structure. + * + * Wrapper function of dfs_process_radar_found_indication(). + * + * Return: QDF_STATUS + */ +QDF_STATUS dfs_process_radar_ind(struct wlan_dfs *dfs, + struct radar_found_info *radar_found); + +/** + * dfs_radarfound_action_generic() - The dfs action on radar detection by host + * for domains other than FCC. + * @dfs: Pointer to wlan_dfs structure. + * @seg_id: segment id. + * + * Return: None + */ +void dfs_radarfound_action_generic(struct wlan_dfs *dfs, uint8_t seg_id); + +/** + * dfs_get_bonding_channels() - Get bonding channels. + * @curchan: Pointer to dfs_channels to know width and primary channel. + * @segment_id: Segment id, useful for 80+80/160 MHz operating band. + * @channels: Pointer to save radar affected channels. + * + * Return: Number of channels. + */ +uint8_t dfs_get_bonding_channels(struct dfs_channel *curchan, + uint32_t segment_id, + uint8_t *channels); diff --git a/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/dfs_random_chan_sel.h b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/dfs_random_chan_sel.h new file mode 100644 index 0000000000000000000000000000000000000000..d1cf83bba1ab904f3a0c55b5da19052c76e8ff14 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/dfs_random_chan_sel.h @@ -0,0 +1,214 @@ +/* + * Copyright (c) 2012-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/* dfs regions definitions */ +/* un-initialized region */ +#define DFS_UNINIT_REGION_VAL 0 + +/* FCC region */ +#define DFS_FCC_REGION_VAL 1 + +/* ETSI region */ +#define DFS_ETSI_REGION_VAL 2 + +/* MKK region */ +#define DFS_MKK_REGION_VAL 3 + +/* China region */ +#define DFS_CN_REGION_VAL 4 + +/* Korea region */ +#define DFS_KR_REGION_VAL 5 + +/* Undefined region */ +#define DFS_UNDEF_REGION_VAL 6 + +/* Channel width definitions */ +/* 20MHz channel width */ +#define DFS_CH_WIDTH_20MHZ 0 + +/* 40MHz channel width */ +#define DFS_CH_WIDTH_40MHZ 1 + +/* 80MHz channel width */ +#define DFS_CH_WIDTH_80MHZ 2 + +/* 160MHz channel width */ +#define DFS_CH_WIDTH_160MHZ 3 + +/* 80+80 non-contiguous */ +#define DFS_CH_WIDTH_80P80MHZ 4 + +/* 5MHz channel width */ +#define DFS_CH_WIDTH_5MHZ 5 + +/* 10MHz channel width */ +#define DFS_CH_WIDTH_10MHZ 6 + +/* Invalid channel width */ +#define DFS_CH_WIDTH_INVALID 7 + +/* Max channel width */ +#define DFS_CH_WIDTH_MAX 8 + +/* Next 5GHz channel number */ +#define DFS_80_NUM_SUB_CHANNEL 4 + +/* Next 5GHz channel number */ +#define DFS_NEXT_5GHZ_CHANNEL 4 + +/* Number of 20MHz channels in bitmap */ +#define DFS_MAX_20M_SUB_CH 8 + +/* Number of 80MHz channels in 5GHz band */ +#define DFS_MAX_80MHZ_BANDS 6 + +/* Start channel and center channel diff in 80Mhz */ +#define DFS_80MHZ_START_CENTER_CH_DIFF 6 + +/* Max number of channels */ +#define DFS_MAX_NUM_CHAN 128 + +/* Bitmap mask for 80MHz */ +#define DFS_80MHZ_MASK 0x0F + +/* Bitmap mask for 40MHz lower */ +#define DFS_40MHZ_MASK_L 0x03 + +/* Bitmap mask for 40MHz higher */ +#define DFS_40MHZ_MASK_H 0x0C + +/* Adjacent weather radar channel frequency */ +#define DFS_ADJACENT_WEATHER_RADAR_CHANNEL 5580 + +/* Adjacent weather radar channel number */ +#define DFS_ADJACENT_WEATHER_RADAR_CHANNEL_NUM 116 + +/* Max 2.4 GHz channel number */ +#define DFS_MAX_24GHZ_CHANNEL 14 + +/* Max valid channel number */ +#define MAX_CHANNEL_NUM 184 + +#ifdef WLAN_ENABLE_CHNL_MATRIX_RESTRICTION +#define DFS_TX_LEAKAGE_THRES 310 +#define DFS_TX_LEAKAGE_MAX 1000 +#define DFS_TX_LEAKAGE_MIN 200 + +/* + * This define is used to block additional channels + * based on the new data gathered on auto platforms + * and to differentiate the leakage data among different + * platforms. + */ + +#define DFS_TX_LEAKAGE_AUTO_MIN 210 +#endif + +#define DFS_IS_CHANNEL_WEATHER_RADAR(_f) (((_f) >= 5600) && ((_f) <= 5650)) +#define DFS_IS_CHAN_JAPAN_INDOOR(_ch) (((_ch) >= 36) && ((_ch) <= 64)) +#define DFS_IS_CHAN_JAPAN_OUTDOOR(_ch) (((_ch) >= 100) && ((_ch) <= 140)) + +/** + * struct chan_bonding_info - for holding channel bonding bitmap + * @chan_map: channel map + * @rsvd: reserved + * @start_chan: start channel + */ +struct chan_bonding_info { + uint8_t chan_map:4; + uint8_t rsvd:4; + uint8_t start_chan; +}; + +/** + * struct chan_bonding_bitmap - bitmap structure which represent + * all 5GHZ channels. + * @chan_bonding_set: channel bonding bitmap + */ +struct chan_bonding_bitmap { + struct chan_bonding_info chan_bonding_set[DFS_MAX_80MHZ_BANDS]; +}; + +#ifdef WLAN_ENABLE_CHNL_MATRIX_RESTRICTION +/** + * struct dfs_tx_leak_info - DFS leakage info + * @leak_chan: leak channel. + * @leak_lvl: tx leakage lvl. + */ +struct dfs_tx_leak_info { + uint8_t leak_chan; + uint32_t leak_lvl; +}; + +/** + * struct dfs_matrix_tx_leak_info - DFS leakage matrix info for dfs channel. + * @channel: channel to switch from + * @chan_matrix DFS leakage matrix info for given dfs channel. + */ +struct dfs_matrix_tx_leak_info { + uint8_t channel; + struct dfs_tx_leak_info chan_matrix[CHAN_ENUM_144 - CHAN_ENUM_36 + 1]; +}; +#endif + +/** + * dfs_mark_leaking_ch() - to mark channel leaking in to nol + * @dfs: dfs handler. + * @ch_width: channel width + * @temp_ch_lst_sz: the target channel list + * @temp_ch_lst: the target channel list + * + * This function removes the channels from temp channel list that + * (if selected as target channel) will cause leakage in one of + * the NOL channels + * + * Return: QDF_STATUS + */ +QDF_STATUS dfs_mark_leaking_ch(struct wlan_dfs *dfs, + enum phy_ch_width ch_width, + uint8_t temp_ch_lst_sz, + uint8_t *temp_ch_lst); + +/** + * dfs_prepare_random_channel() - This function picks a random channel from + * the list of available channels. + * @dfs: dfs handler. + * @ch_list: channel list. + * @ch_count: Number of channels in given list. + * @flags: DFS_RANDOM_CH_FLAG_* + * @ch_wd: input channel width, used same variable to return new ch width. + * @cur_chan: current channel. + * @dfs_region: DFS region. + * @acs_info: acs channel range information. + * + * Function used to find random channel selection from a given list. + * First this function removes channels based on flags and then uses final + * list to find channel based on requested bandwidth, if requested bandwidth + * not available, it chooses next lower bandwidth and try. + * + * Return: channel number, else zero. + */ +uint8_t dfs_prepare_random_channel(struct wlan_dfs *dfs, + struct dfs_channel *ch_list, + uint32_t ch_count, + uint32_t flags, + uint8_t *ch_wd, + struct dfs_channel *cur_chan, + uint8_t dfs_region, + struct dfs_acs_info *acs_info); diff --git a/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/dfs_structs.h b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/dfs_structs.h new file mode 100644 index 0000000000000000000000000000000000000000..0700b896c28bf945daa1a4e3a9207553f3f087ba --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/dfs_structs.h @@ -0,0 +1,147 @@ +/* + * Copyright (c) 2011-2012, 2016-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: This file has dfs capability, dfs pulse structures. + */ + +#ifndef _DFS_STRUCTS_H_ +#define _DFS_STRUCTS_H_ + +/** + * This represents the general case of the radar PHY configuration, + * across all chips. + * + * It's then up to each chip layer to translate to/from this + * (eg to HAL_PHYERR_PARAM for the HAL case.) + */ + +#define WLAN_DFS_PHYERR_PARAM_NOVAL 0xFFFF +#define WLAN_DFS_PHYERR_PARAM_ENABLE 0x8000 + +/** + * For the dfs_nol_clist_update() method - this is the + * update command. + */ +enum { + DFS_NOL_CLIST_CMD_NONE = 0x0, + DFS_NOL_CLIST_CMD_UPDATE = 0x1, +}; + +/** + * struct dfs_pulse - DFS pulses. + * @rp_numpulses: Num of pulses in radar burst. + * @rp_pulsedur: Duration of each pulse in usecs. + * @rp_pulsefreq: Frequency of pulses in burst. + * @rp_max_pulsefreq: Frequency of pulses in burst. + * @rp_patterntype: fixed or variable pattern type. + * @rp_pulsevar: Time variation of pulse duration for matched + * filter (single-sided) in usecs. + * @rp_threshold: Threshold for MF output to indicate radar match. + * @rp_mindur: Min pulse duration to be considered for this pulse + * type. + * @rp_maxdur: Min pulse duration to be considered for this pulse + * type. + * @rp_rssithresh: Minimum rssi to be considered a radar pulse. + * @rp_meanoffset: Offset for timing adjustment. + * @rp_rssimargin: rssi threshold margin. In Turbo Mode HW reports + * rssi 3dBm. lower than in non TURBO mode. This + * will be used to offset that diff. + * @rp_ignore_pri_window: Ignore PRI window. + * @rp_sidx_spread: To reduce false detection use sidx spread. For HT160, + * for consistency, push all pulses at center of the + * channel to 80MHz ext when both segments are DFS. + * Maximum SIDX value spread in a matched sequence + * excluding FCC Bin 5. + * @rp_check_delta_peak: This is mainly used for ETSI Type 4 5MHz chirp pulses + * which HW cnanot identify. + * Reliably as chirping but can correctly characterize + * these with delta_peak non-zero. + * Is delta_peak check required for this filter. + * @rp_pulseid: Unique ID for identifying filter. + */ +struct dfs_pulse { + uint32_t rp_numpulses; + uint32_t rp_pulsedur; + uint32_t rp_pulsefreq; + uint32_t rp_max_pulsefreq; + uint32_t rp_patterntype; + uint32_t rp_pulsevar; + uint32_t rp_threshold; + uint32_t rp_mindur; + uint32_t rp_maxdur; + uint32_t rp_rssithresh; + uint32_t rp_meanoffset; + int32_t rp_rssimargin; + uint32_t rp_ignore_pri_window; + uint16_t rp_sidx_spread; + int8_t rp_check_delta_peak; + uint16_t rp_pulseid; +}; + +/** + * struct dfs_bin5pulse - DFS bin5 pulse. + * @b5_threshold: Number of bin5 pulses to indicate detection. + * @b5_mindur: Min duration for a bin5 pulse. + * @b5_maxdur: Max duration for a bin5 pulse. + * @b5_timewindow: Window over which to count bin5 pulses. + * @b5_rssithresh: Min rssi to be considered a pulse. + * @b5_rssimargin: rssi threshold margin. In Turbo Mode HW reports rssi 3dB + */ +struct dfs_bin5pulse { + uint32_t b5_threshold; + uint32_t b5_mindur; + uint32_t b5_maxdur; + uint32_t b5_timewindow; + uint32_t b5_rssithresh; + uint32_t b5_rssimargin; +}; + +/** + * wlan_dfs_phyerr_init_noval() - Fill wlan_dfs_phyerr_param with 0xFF. + * @pe: Pointer to wlan_dfs_phyerr_param structure. + */ +static inline void wlan_dfs_phyerr_init_noval(struct wlan_dfs_phyerr_param *pe) +{ + pe->pe_firpwr = WLAN_DFS_PHYERR_PARAM_NOVAL; + pe->pe_rrssi = WLAN_DFS_PHYERR_PARAM_NOVAL; + pe->pe_height = WLAN_DFS_PHYERR_PARAM_NOVAL; + pe->pe_prssi = WLAN_DFS_PHYERR_PARAM_NOVAL; + pe->pe_inband = WLAN_DFS_PHYERR_PARAM_NOVAL; + pe->pe_relpwr = WLAN_DFS_PHYERR_PARAM_NOVAL; + pe->pe_relstep = WLAN_DFS_PHYERR_PARAM_NOVAL; + pe->pe_maxlen = WLAN_DFS_PHYERR_PARAM_NOVAL; +} + +/** + * struct wlan_dfs_radar_tab_info - Radar table information. + * @dfsdomain: DFS domain. + * @numradars: Number of radars. + * @dfs_radars: Pointer to dfs_pulse structure. + * @numb5radars: NUM5 radars. + * @b5pulses: BIN5 radars. + * @dfs_defaultparams: phyerr params. + */ +struct wlan_dfs_radar_tab_info { + uint32_t dfsdomain; + int numradars; + struct dfs_pulse *dfs_radars; + int numb5radars; + struct dfs_bin5pulse *b5pulses; + struct wlan_dfs_phyerr_param dfs_defaultparams; +}; + +#endif /* _DFS_STRUCTS_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/dfs_zero_cac.h b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/dfs_zero_cac.h new file mode 100644 index 0000000000000000000000000000000000000000..cf68f74a49caa61b968690dbfb0dd191714bd2ee --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/dfs_zero_cac.h @@ -0,0 +1,369 @@ +/* + * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved. + * Copyright (c) 2007-2008 Sam Leffler, Errno Consulting + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/** + * DOC: This file has Zero CAC DFS APIs. + */ + +#ifndef _DFS_ZERO_CAC_H_ +#define _DFS_ZERO_CAC_H_ + +#include "dfs.h" + +#define VHT160_IEEE_FREQ_DIFF 16 + +/** + * struct dfs_precac_entry - PreCAC entry. + * @pe_list: PreCAC entry. + * @vht80_freq: VHT80 freq. + * @precac_nol_timer: Per element precac NOL timer. + * @dfs: Pointer to wlan_dfs structure. + */ +struct dfs_precac_entry { + TAILQ_ENTRY(dfs_precac_entry) pe_list; + uint8_t vht80_freq; + qdf_timer_t precac_nol_timer; + struct wlan_dfs *dfs; +}; + +/** + * dfs_zero_cac_timer_init() - Initialize zero-cac timers + * @dfs: Pointer to DFS structure. + */ +void dfs_zero_cac_timer_init(struct wlan_dfs *dfs); + +/** + * dfs_print_precaclists() - Print precac list. + * @dfs: Pointer to wlan_dfs structure. + */ +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) +void dfs_print_precaclists(struct wlan_dfs *dfs); +#else +static inline void dfs_print_precaclists(struct wlan_dfs *dfs) +{ +} +#endif + +/** + * dfs_reset_precac_lists() - Resets the precac lists. + * @dfs: Pointer to wlan_dfs structure. + */ +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) +void dfs_reset_precac_lists(struct wlan_dfs *dfs); +#else +static inline void dfs_reset_precac_lists(struct wlan_dfs *dfs) +{ +} +#endif + +/** + * dfs_reset_precaclists() - Clears and initiakizes precac_required_list, + * precac_done_list and precac_nol_list. + * + * @dfs: Pointer to wlan_dfs structure. + */ +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) +void dfs_reset_precaclists(struct wlan_dfs *dfs); +#else +static inline void dfs_reset_precaclists(struct wlan_dfs *dfs) +{ +} +#endif + +/** + * dfs_deinit_precac_list() - Clears the precac list. + * @dfs: Pointer to wlan_dfs dtructure. + */ +void dfs_deinit_precac_list(struct wlan_dfs *dfs); + +/** + * dfs_zero_cac_detach() - Free zero_cac memory. + * @dfs: Pointer to wlan_dfs dtructure. + */ +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) +void dfs_zero_cac_detach(struct wlan_dfs *dfs); +#else +static inline void dfs_zero_cac_detach(struct wlan_dfs *dfs) +{ +} +#endif + +/** + * dfs_init_precac_list() - Init precac list. + * @dfs: Pointer to wlan_dfs dtructure. + */ +void dfs_init_precac_list(struct wlan_dfs *dfs); + +/** + * dfs_start_precac_timer() - Start precac timer. + * @dfs: Pointer to wlan_dfs structure. + * @precac_chan: Start thr precac timer in this channel. + */ +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) +void dfs_start_precac_timer(struct wlan_dfs *dfs, + uint8_t precac_chan); +#else +static inline void dfs_start_precac_timer(struct wlan_dfs *dfs, + uint8_t precac_chan) +{ +} +#endif + +/** + * dfs_cancel_precac_timer() - Cancel the precac timer. + * @dfs: Pointer to wlan_dfs structure. + */ +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) +void dfs_cancel_precac_timer(struct wlan_dfs *dfs); +#else +static inline void dfs_cancel_precac_timer(struct wlan_dfs *dfs) +{ +} +#endif + +/** + * dfs_zero_cac_attach() - Initialize dfs zerocac variables. + * @dfs: Pointer to DFS structure. + */ +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) +void dfs_zero_cac_attach(struct wlan_dfs *dfs); +#else +static inline void dfs_zero_cac_attach(struct wlan_dfs *dfs) +{ +} +#endif + +/** + * dfs_zero_cac_reset() - Reset Zero cac DFS variables. + * @dfs: Pointer to wlan_dfs structure. + */ +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) +void dfs_zero_cac_reset(struct wlan_dfs *dfs); +#else +static inline void dfs_zero_cac_reset(struct wlan_dfs *dfs) +{ +} +#endif + +/** + * dfs_zero_cac_timer_free() - Free Zero cac DFS variables. + * @dfs: Pointer to wlan_dfs structure. + */ +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) +void dfs_zero_cac_timer_free(struct wlan_dfs *dfs); +#else +static inline void dfs_zero_cac_timer_free(struct wlan_dfs *dfs) +{ +} +#endif + +/** + * dfs_is_precac_done() - Is precac done. + * @dfs: Pointer to wlan_dfs structure. + */ +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) +bool dfs_is_precac_done(struct wlan_dfs *dfs); +#else +static inline bool dfs_is_precac_done(struct wlan_dfs *dfs) +{ + return false; +} +#endif + +/** + * dfs_get_freq_from_precac_required_list() - Get VHT80 freq from + * precac_required_list. + * @dfs: Pointer to wlan_dfs structure. + * @exclude_ieee_freq: Find a VHT80 freqency that is not equal to + * exclude_ieee_freq. + */ +uint8_t dfs_get_freq_from_precac_required_list(struct wlan_dfs *dfs, + uint8_t exclude_ieee_freq); + +/** + * dfs_override_precac_timeout() - Override the default precac timeout. + * @dfs: Pointer to wlan_dfs structure. + * @precac_timeout: Precac timeout value. + */ +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) +int dfs_override_precac_timeout(struct wlan_dfs *dfs, + int precac_timeout); +#else +static inline int dfs_override_precac_timeout(struct wlan_dfs *dfs, + int precac_timeout) +{ + return 0; +} +#endif + +/** + * dfs_get_override_precac_timeout() - Get precac timeout. + * @dfs: Pointer wlan_dfs structure. + * @precac_timeout: Get precac timeout value in this variable. + */ +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) +int dfs_get_override_precac_timeout(struct wlan_dfs *dfs, + int *precac_timeout); +#else +static inline int dfs_get_override_precac_timeout(struct wlan_dfs *dfs, + int *precac_timeout) +{ + return 0; +} +#endif + +/** + * dfs_find_vht80_chan_for_precac() - Find VHT80 channel for precac. + * @dfs: Pointer to wlan_dfs structure. + * @chan_mode: Channel mode. + * @ch_freq_seg1: Segment1 channel freq. + * @cfreq1: cfreq1. + * @cfreq2: cfreq2. + * @phy_mode: Precac phymode. + * @dfs_set_cfreq2: Precac cfreq2 + * @set_agile: Agile mode flag. + * + * Zero-CAC-DFS algorithm:- + * Zero-CAC-DFS algorithm works in stealth mode. + * 1) When any channel change happens in VHT80 mode the algorithm + * changes the HW channel mode to VHT80_80/VHT160 mode and adds a + * new channel in the secondary VHT80 to perform precac and a + * precac timer is started. However the upper layer/UMAC is unaware + * of this change. + * 2) When the precac timer expires without being interrupted by + * any channel change the secondary VHT80 channel is moved from + * precac-required-list to precac-done-list. + * 3) If there is a radar detect at any time in any segment + * (segment-1 is preimary VHT80 and segment-2 is VHT80)then the + * channel is searched in both precac-reuired-list and precac-done-list + * and moved to precac-nol-list. + * 4) Whenever channel change happens if the new channel is a DFS + * channel then precac-done-list is searched and if the channel is + * found in the precac-done-list then the CAC is skipped. + * 5) The precac expiry timer makes a vedv_restart(channel change + * with current-upper-layer-channel-mode which is VHT80). In channel + * change the algorithm tries to pick a new channel from the + * precac-required list. If none found then channel mode remains same. + * Which means when all the channels in precac-required-list are + * exhausted the VHT80_80/VHT160 comes back to VHT80 mode. + */ +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) +void dfs_find_vht80_chan_for_precac(struct wlan_dfs *dfs, + uint32_t chan_mode, + uint8_t ch_freq_seg1, + uint32_t *cfreq1, + uint32_t *cfreq2, + uint32_t *phy_mode, + bool *dfs_set_cfreq2, + bool *set_agile); +#else +static inline void dfs_find_vht80_chan_for_precac(struct wlan_dfs *dfs, + uint32_t chan_mode, + uint8_t ch_freq_seg1, + uint32_t *cfreq1, + uint32_t *cfreq2, + uint32_t *phy_mode, + bool *dfs_set_cfreq2, + bool *set_agile) +{ +} +#endif + +/** + * dfs_set_precac_enable() - Set precac enable flag. + * @dfs: Pointer to wlan_dfs structure. + * @value: input value for dfs_precac_enable flag. + */ +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) +void dfs_set_precac_enable(struct wlan_dfs *dfs, + uint32_t value); +#else +static inline void dfs_set_precac_enable(struct wlan_dfs *dfs, + uint32_t value) +{ +} +#endif + +/** + * dfs_get_precac_enable() - Get precac enable flag. + * @dfs: Pointer to wlan_dfs structure. + */ +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) +uint32_t dfs_get_precac_enable(struct wlan_dfs *dfs); +#else +static inline uint32_t dfs_get_precac_enable(struct wlan_dfs *dfs) +{ + return 0; +} +#endif + +/** + * dfs_zero_cac_reset() - Reset Zero cac DFS variables. + * @dfs: Pointer to wlan_dfs structure. + */ +void dfs_zero_cac_reset(struct wlan_dfs *dfs); + +/** + * dfs_is_ht20_40_80_chan_in_precac_done_list() - Is precac done on a + * VHT20/40/80 channel. + *@dfs: Pointer to wlan_dfs structure. + */ +bool dfs_is_ht20_40_80_chan_in_precac_done_list(struct wlan_dfs *dfs); + +/** + * dfs_is_ht80_80_chan_in_precac_done_list() - Is precac done on a VHT80+80 + * channel. + *@dfs: Pointer to wlan_dfs structure. + */ +bool dfs_is_ht80_80_chan_in_precac_done_list(struct wlan_dfs *dfs); + +/** + * dfs_mark_precac_dfs() - Mark the precac channel as radar. + * @dfs: Pointer to wlan_dfs structure. + */ +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) +void dfs_mark_precac_dfs(struct wlan_dfs *dfs, + uint8_t is_radar_found_on_secondary_seg); +#else +static inline void dfs_mark_precac_dfs(struct wlan_dfs *dfs, + uint8_t is_radar_found_on_secondary_seg) +{ +} +#endif + +/** + * dfs_is_precac_timer_running() - Check whether precac timer is running. + * @dfs: Pointer to wlan_dfs structure. + */ +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) +bool dfs_is_precac_timer_running(struct wlan_dfs *dfs); +#else +static inline bool dfs_is_precac_timer_running(struct wlan_dfs *dfs) +{ + return false; +} +#endif +#endif /* _DFS_ZERO_CAC_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/filtering/ar5212_radar.c b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/filtering/ar5212_radar.c new file mode 100644 index 0000000000000000000000000000000000000000..e28e154400d5dede1614b25a0085026829ebf684 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/filtering/ar5212_radar.c @@ -0,0 +1,233 @@ +/* + * Copyright (c) 2011, 2016-2018 The Linux Foundation. All rights reserved. + * Copyright (c) 2002-2005 Atheros Communications, Inc. + * Copyright (c) 2008-2010, Atheros Communications Inc. + * All Rights Reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: This file has the radar table for AR5212 chipset and function to + * initialize the radar table. + */ +#include "../dfs.h" +#include "../dfs_internal.h" +#include "wlan_dfs_utils_api.h" + +/* Default 5212/5312 radar phy parameters. */ +#define AR5212_DFS_FIRPWR -41 +#define AR5212_DFS_RRSSI 12 +#define AR5212_DFS_HEIGHT 20 +#define AR5212_DFS_PRSSI 22 +#define AR5212_DFS_INBAND 6 + +/** + * struct dfs_pulse ar5212_etsi_radars - ETSI radar pulse table for + * AR5212 chipset. + */ +struct dfs_pulse ar5212_etsi_radars[] = { + /* EN 302 502 frequency hopping pulse */ + /* PRF 3000, 1us duration, 9 pulses per burst */ + {9, 1, 3000, 3000, 1, 4, 5, 0, 1, 18, 0, 0, 1, 1000, 0, 40}, + /* PRF 4500, 20us duration, 9 pulses per burst */ + {9, 20, 4500, 4500, 1, 4, 5, 19, 21, 18, 0, 0, 1, 1000, 0, 41}, + + /* TYPE 1 */ + {10, 2, 750, 0, 24, 50, 0, 2, 22, 0, 3, 0, 0}, + + /* TYPE 2 */ + {7, 2, 200, 0, 24, 50, 0, 2, 22, 0, 3, 0, 1}, + {7, 2, 300, 0, 24, 50, 0, 2, 22, 0, 3, 0, 2}, + {7, 2, 500, 0, 24, 50, 0, 2, 22, 1, 3, 0, 3}, + {7, 2, 800, 0, 24, 50, 0, 2, 22, 1, 3, 0, 4}, + {7, 2, 1001, 0, 24, 50, 0, 2, 22, 0, 3, 0, 5}, + {7, 8, 200, 0, 24, 50, 6, 9, 22, 8, 3, 0, 6}, + {7, 8, 300, 0, 24, 50, 6, 9, 22, 8, 3, 0, 7}, + {7, 8, 502, 0, 24, 50, 6, 9, 22, 0, 3, 0, 8}, + {7, 8, 805, 0, 24, 50, 6, 9, 22, 0, 3, 0, 9}, + {7, 8, 1008, 0, 24, 50, 6, 9, 22, 0, 3, 0, 10}, + + /* TYPE 3 */ + {10, 14, 200, 0, 24, 50, 12, 15, 22, 14, 3, 0, 11}, + {10, 14, 300, 0, 24, 50, 12, 15, 22, 14, 3, 0, 12}, + {10, 14, 503, 0, 24, 50, 12, 15, 22, 2, 3, 0, 13}, + {10, 14, 809, 0, 24, 50, 12, 15, 22, 0, 3, 0, 14}, + {10, 14, 1014, 0, 24, 50, 12, 15, 22, 0, 3, 0, 15}, + {10, 18, 200, 0, 24, 50, 15, 19, 22, 18, 3, 0, 16}, + {10, 18, 301, 0, 24, 50, 15, 19, 22, 7, 3, 0, 17}, + {10, 18, 504, 0, 24, 50, 15, 19, 22, 2, 3, 0, 18}, + {10, 18, 811, 0, 24, 50, 15, 19, 22, 0, 3, 0, 19}, + {10, 18, 1018, 0, 24, 50, 15, 19, 22, 0, 3, 0, 20}, + + /* TYPE 4 */ + {10, 2, 1200, 0, 24, 50, 0, 2, 22, 0, 3, 0, 21}, + {10, 2, 1500, 0, 24, 50, 0, 2, 22, 0, 3, 0, 22}, + {10, 2, 1600, 0, 24, 50, 0, 2, 22, 0, 3, 0, 23}, + {10, 8, 1212, 0, 24, 50, 6, 9, 22, 0, 3, 0, 24}, + {10, 8, 1517, 0, 24, 50, 6, 9, 22, 0, 3, 0, 25}, + {10, 8, 1620, 0, 24, 50, 6, 9, 22, 0, 3, 0, 26}, + {10, 14, 1221, 0, 24, 50, 12, 15, 22, 0, 3, 0, 27}, + {10, 14, 1531, 0, 24, 50, 12, 15, 22, 0, 3, 0, 28}, + {10, 14, 1636, 0, 24, 50, 12, 15, 22, 0, 3, 0, 29}, + {10, 18, 1226, 0, 24, 50, 15, 19, 22, 0, 3, 0, 30}, + {10, 18, 1540, 0, 24, 50, 15, 19, 22, 0, 3, 0, 31}, + {10, 18, 1647, 0, 24, 50, 15, 19, 22, 0, 3, 0, 32}, + + /* TYPE 5 */ + {17, 2, 2305, 0, 24, 50, 0, 2, 22, 0, 3, 0, 33}, + {17, 2, 3009, 0, 24, 50, 0, 2, 22, 0, 3, 0, 34}, + {17, 2, 3512, 0, 24, 50, 0, 2, 22, 0, 3, 0, 35}, + {17, 2, 4016, 0, 24, 50, 0, 2, 22, 0, 3, 0, 36}, + {17, 8, 2343, 0, 24, 50, 6, 9, 22, 0, 3, 0, 37}, + {17, 8, 3073, 0, 24, 50, 6, 9, 22, 0, 3, 0, 38}, + {17, 8, 3601, 0, 24, 50, 6, 9, 22, 0, 3, 0, 39}, + {17, 8, 4132, 0, 24, 50, 6, 9, 22, 0, 3, 0, 40}, + {17, 14, 2376, 0, 24, 50, 12, 15, 22, 0, 3, 0, 41}, + {17, 14, 3131, 0, 24, 50, 12, 15, 22, 0, 3, 0, 42}, + {17, 14, 3680, 0, 24, 50, 12, 15, 22, 0, 3, 0, 43}, + {17, 14, 4237, 0, 24, 50, 12, 15, 22, 0, 3, 0, 44}, + {17, 18, 2399, 0, 24, 50, 15, 19, 22, 0, 3, 0, 45}, + {17, 18, 3171, 0, 24, 50, 15, 19, 22, 0, 3, 0, 46}, + {17, 18, 3735, 0, 24, 50, 15, 19, 22, 0, 3, 0, 47}, + {17, 18, 4310, 0, 24, 50, 15, 19, 22, 0, 3, 0, 48}, + + /* TYPE 6 */ + {14, 22, 2096, 0, 24, 50, 21, 24, 22, 0, 3, 0, 49}, + {14, 22, 3222, 0, 24, 50, 21, 24, 22, 0, 3, 0, 50}, + {14, 22, 4405, 0, 24, 50, 21, 24, 22, 0, 3, 0, 51}, + {14, 32, 2146, 0, 24, 50, 30, 35, 22, 0, 3, 0, 52}, + {14, 32, 3340, 0, 24, 50, 30, 35, 22, 0, 3, 0, 53}, + {14, 32, 4629, 0, 24, 50, 30, 35, 22, 0, 3, 0, 54}, +}; + +/** + * struct dfs_pulse ar5212_fcc_radars - FCC radar pulse table for + * AR5212 chipset. + */ +struct dfs_pulse ar5212_fcc_radars[] = { + /* following two filters are specific to Japan/MKK4 */ + {16, 2, 720, 6, 40, 0, 2, 18, 0, 3, 0, 30}, + {16, 3, 260, 6, 40, 0, 5, 18, 0, 3, 0, 31}, + + /* following filters are common to both FCC and JAPAN */ + {9, 2, 3003, 6, 50, 0, 2, 18, 0, 0, 0, 29}, + {16, 2, 700, 6, 35, 0, 2, 18, 0, 3, 0, 28}, + + {10, 3, 6666, 10, 90, 2, 3, 22, 0, 3, 0, 0}, + {10, 3, 5900, 10, 90, 2, 3, 22, 0, 3, 0, 1}, + {10, 3, 5200, 10, 90, 2, 3, 22, 0, 3, 0, 2}, + {10, 3, 4800, 10, 90, 2, 3, 22, 0, 3, 0, 3}, + {10, 3, 4400, 10, 90, 2, 3, 22, 0, 3, 0, 4}, + {10, 5, 6666, 50, 30, 3, 10, 22, 0, 3, 0, 5}, + {10, 5, 5900, 70, 30, 3, 10, 22, 0, 3, 0, 6}, + {10, 5, 5200, 70, 30, 3, 10, 22, 0, 3, 0, 7}, + {10, 5, 4800, 70, 30, 3, 10, 22, 0, 3, 0, 8}, + {10, 5, 4400, 50, 30, 3, 9, 22, 0, 3, 0, 9}, + + {8, 10, 5000, 100, 40, 7, 17, 22, 0, 3, 0, 10}, + {8, 10, 3000, 100, 40, 7, 17, 22, 0, 3, 0, 11}, + {8, 10, 2000, 40, 40, 9, 17, 22, 0, 3, 0, 12}, + {8, 14, 5000, 100, 40, 13, 16, 22, 0, 3, 0, 13}, + {8, 14, 3000, 100, 40, 13, 16, 22, 0, 3, 0, 14}, + {8, 14, 2000, 40, 40, 13, 16, 22, 0, 3, 0, 15}, + + {6, 10, 5000, 80, 40, 10, 15, 22, 0, 3, 0, 16}, + {6, 10, 3000, 80, 40, 10, 15, 22, 0, 3, 0, 17}, + {6, 10, 2000, 40, 40, 10, 15, 22, 0, 3, 0, 18}, + {6, 10, 5000, 80, 40, 10, 12, 22, 0, 3, 0, 19}, + {6, 10, 3000, 80, 40, 10, 12, 22, 0, 3, 0, 20}, + {6, 10, 2000, 40, 40, 10, 12, 22, 0, 3, 0, 21}, + + {6, 18, 5000, 80, 40, 16, 25, 22, 0, 3, 0, 22}, + {6, 18, 3000, 80, 40, 16, 25, 22, 0, 3, 0, 23}, + {6, 18, 2000, 40, 40, 16, 25, 22, 0, 3, 0, 24}, + + {6, 21, 5000, 80, 40, 12, 25, 22, 0, 3, 0, 25}, + {6, 21, 3000, 80, 40, 12, 25, 22, 0, 3, 0, 26}, + {6, 21, 2000, 40, 40, 12, 25, 22, 0, 3, 0, 27}, +}; + +/** + * struct dfs_bin5pulse ar5212_bin5pulses - BIN5 pulse for AR5212 chipset. + */ +struct dfs_bin5pulse ar5212_bin5pulses[] = { + {5, 52, 100, 12, 22, 3}, +}; + +void dfs_get_radars_for_ar5212(struct wlan_dfs *dfs) +{ + struct wlan_dfs_radar_tab_info rinfo; + int dfsdomain = DFS_FCC_DOMAIN; + uint16_t ch_freq; + uint16_t regdmn; + + qdf_mem_zero(&rinfo, sizeof(rinfo)); + dfsdomain = utils_get_dfsdomain(dfs->dfs_pdev_obj); + + switch (dfsdomain) { + case DFS_FCC_DOMAIN: + dfs_info(dfs, WLAN_DEBUG_DFS_ALWAYS, "DFS_FCC_DOMAIN_5212"); + rinfo.dfsdomain = DFS_FCC_DOMAIN; + rinfo.dfs_radars = &ar5212_fcc_radars[2]; + rinfo.numradars = QDF_ARRAY_SIZE(ar5212_fcc_radars)-2; + rinfo.b5pulses = &ar5212_bin5pulses[0]; + rinfo.numb5radars = QDF_ARRAY_SIZE(ar5212_bin5pulses); + break; + case DFS_ETSI_DOMAIN: + dfs_info(dfs, WLAN_DEBUG_DFS_ALWAYS, "DFS_ETSI_DOMAIN_5412"); + rinfo.dfsdomain = DFS_ETSI_DOMAIN; + + ch_freq = dfs->dfs_curchan->dfs_ch_freq; + regdmn = utils_dfs_get_cur_rd(dfs->dfs_pdev_obj); + + if (((regdmn == ETSI11_WORLD_REGDMN_PAIR_ID) || + (regdmn == ETSI12_WORLD_REGDMN_PAIR_ID) || + (regdmn == ETSI13_WORLD_REGDMN_PAIR_ID) || + (regdmn == ETSI14_WORLD_REGDMN_PAIR_ID)) && + DFS_CURCHAN_IS_58GHz(ch_freq)) { + rinfo.dfs_radars = ar5212_etsi_radars; + rinfo.numradars = QDF_ARRAY_SIZE(ar5212_etsi_radars); + } else { + uint8_t offset = ETSI_LEGACY_PULSE_ARR_OFFSET; + + rinfo.dfs_radars = &ar5212_etsi_radars[offset]; + rinfo.numradars = + QDF_ARRAY_SIZE(ar5212_etsi_radars) - offset; + } + rinfo.b5pulses = &ar5212_bin5pulses[0]; + rinfo.numb5radars = QDF_ARRAY_SIZE(ar5212_bin5pulses); + break; + case DFS_MKK4_DOMAIN: + dfs_info(dfs, WLAN_DEBUG_DFS_ALWAYS, "DFS_MKK4_DOMAIN_5412"); + rinfo.dfsdomain = DFS_MKK4_DOMAIN; + rinfo.dfs_radars = &ar5212_fcc_radars[0]; + rinfo.numradars = QDF_ARRAY_SIZE(ar5212_fcc_radars); + rinfo.b5pulses = &ar5212_bin5pulses[0]; + rinfo.numb5radars = QDF_ARRAY_SIZE(ar5212_bin5pulses); + break; + default: + dfs_info(dfs, WLAN_DEBUG_DFS_ALWAYS, "No domain"); + return; + } + + rinfo.dfs_defaultparams.pe_firpwr = AR5212_DFS_FIRPWR; + rinfo.dfs_defaultparams.pe_rrssi = AR5212_DFS_RRSSI; + rinfo.dfs_defaultparams.pe_height = AR5212_DFS_HEIGHT; + rinfo.dfs_defaultparams.pe_prssi = AR5212_DFS_PRSSI; + rinfo.dfs_defaultparams.pe_inband = AR5212_DFS_INBAND; + + dfs_init_radar_filters(dfs, &rinfo); +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/filtering/ar5416_radar.c b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/filtering/ar5416_radar.c new file mode 100644 index 0000000000000000000000000000000000000000..d6995a0ef815671d7d400453fb5fc80544198252 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/filtering/ar5416_radar.c @@ -0,0 +1,181 @@ +/* + * Copyright (c) 2011, 2016-2018 The Linux Foundation. All rights reserved. + * Copyright (c) 2002-2005 Atheros Communications, Inc. + * Copyright (c) 2008-2010, Atheros Communications Inc. + * All Rights Reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: This file has the radar table for AR5416 chipset and function to + * initialize the radar table. + */ + +#include "../dfs.h" +#include "../dfs_internal.h" +#include "wlan_dfs_utils_api.h" + +/* Default 5413/5416 radar phy parameters. */ +#define AR5416_DFS_FIRPWR -33 +#define AR5416_DFS_RRSSI 20 +#define AR5416_DFS_HEIGHT 10 +#define AR5416_DFS_PRSSI 15 +#define AR5416_DFS_INBAND 15 +#define AR5416_DFS_RELPWR 8 +#define AR5416_DFS_RELSTEP 12 +#define AR5416_DFS_MAXLEN 255 + +/** + * struct dfs_pulse ar5416_etsi_radars - ETSI radar pulse table for + * AR5416 chipset. + */ +struct dfs_pulse ar5416_etsi_radars[] = { + + /* EN 302 502 frequency hopping pulse */ + /* PRF 3000, 1us duration, 9 pulses per burst */ + {9, 1, 3000, 3000, 1, 4, 5, 0, 1, 18, 0, 0, 1, 1000, 0, 40}, + /* PRF 4500, 20us duration, 9 pulses per burst */ + {9, 20, 4500, 4500, 1, 4, 5, 19, 21, 18, 0, 0, 1, 1000, 0, 41}, + + /* TYPE staggered pulse */ + /* 0.8-2us, 2-3 bursts,300-400 PRF, 10 pulses each */ + {20, 2, 300, 400, 2, 30, 4, 0, 2, 15, 0, 0, 0, 0, 0, 31}, + /* 0.8-2us, 2-3 bursts, 400-1200 PRF, 15 pulses each */ + {30, 2, 400, 1200, 2, 30, 7, 0, 2, 15, 0, 0, 0, 0, 0, 32}, + + /* constant PRF based */ + /* 0.8-5us, 200 300 PRF, 10 pulses */ + {10, 5, 200, 400, 0, 24, 5, 0, 8, 18, 0, 0, 0, 0, 0, 33}, + {10, 5, 400, 600, 0, 24, 5, 0, 8, 18, 0, 0, 0, 0, 0, 37}, + {10, 5, 600, 800, 0, 24, 5, 0, 8, 18, 0, 0, 0, 0, 0, 38}, + {10, 5, 800, 1000, 0, 24, 5, 0, 8, 18, 0, 0, 0, 0, 0, 39}, + + /* 0.8-15us, 200-1600 PRF, 15 pulses */ + {15, 15, 200, 1600, 0, 24, 6, 0, 18, 15, 0, 0, 0, 0, 0, 34}, + + /* 0.8-15us, 2300-4000 PRF, 25 pulses*/ + {25, 15, 2300, 4000, 0, 24, 8, 0, 18, 15, 0, 0, 0, 0, 0, 35}, + + /* 20-30us, 2000-4000 PRF, 20 pulses*/ + {20, 30, 2000, 4000, 0, 24, 8, 19, 33, 15, 0, 0, 0, 0, 0, 36}, +}; + +/** + * struct dfs_pulse ar5416_fcc_radars - FCC radar pulse table for + * AR5416 chipset. + */ +struct dfs_pulse ar5416_fcc_radars[] = { + /* following two filters are specific to Japan/MKK4 */ + /* 1389 +/- 6 us */ + {18, 1, 720, 720, 0, 6, 6, 0, 1, 18, 0, 3, 0, 0, 0, 17}, + /* 4000 +/- 6 us */ + {18, 4, 250, 250, 0, 10, 5, 1, 6, 18, 0, 3, 0, 0, 0, 18}, + /* 3846 +/- 7 us */ + {18, 5, 260, 260, 0, 10, 6, 1, 6, 18, 0, 3, 0, 0, 0, 19}, + + /* following filters are common to both FCC and JAPAN */ + /* FCC TYPE 1 */ + {18, 1, 700, 700, 0, 6, 5, 0, 1, 18, 0, 3, 0, 0, 0, 0}, + {18, 1, 350, 350, 0, 6, 5, 0, 1, 18, 0, 3, 0, 0, 0, 0}, + + /* FCC TYPE 6 */ + {9, 1, 3003, 3003, 1, 7, 5, 0, 1, 18, 0, 0, 0, 0, 0, 1}, + + /* FCC TYPE 2 */ + {23, 5, 4347, 6666, 0, 18, 11, 0, 7, 20, 0, 3, 0, 0, 0, 2}, + + /* FCC TYPE 3 */ + {18, 10, 2000, 5000, 0, 23, 8, 6, 13, 20, 0, 3, 0, 0, 0, 5}, + + /* FCC TYPE 4 */ + {16, 15, 2000, 5000, 0, 25, 7, 11, 23, 20, 0, 3, 0, 0, 0, 11}, +}; + +/** + * struct dfs_bin5pulse ar5416_bin5pulses - BIN5 pulse for AR5416 chipset. + */ +struct dfs_bin5pulse ar5416_bin5pulses[] = { + {2, 28, 105, 12, 22, 5}, +}; + +void dfs_get_radars_for_ar5416(struct wlan_dfs *dfs) +{ + struct wlan_dfs_radar_tab_info rinfo; + int dfsdomain = DFS_FCC_DOMAIN; + uint16_t ch_freq; + uint16_t regdmn; + + qdf_mem_zero(&rinfo, sizeof(rinfo)); + dfsdomain = utils_get_dfsdomain(dfs->dfs_pdev_obj); + + switch (dfsdomain) { + case DFS_FCC_DOMAIN: + dfs_info(dfs, WLAN_DEBUG_DFS_ALWAYS, "DFS_FCC_DOMAIN_5416"); + rinfo.dfsdomain = DFS_FCC_DOMAIN; + rinfo.dfs_radars = &ar5416_fcc_radars[3]; + rinfo.numradars = QDF_ARRAY_SIZE(ar5416_fcc_radars)-3; + rinfo.b5pulses = &ar5416_bin5pulses[0]; + rinfo.numb5radars = QDF_ARRAY_SIZE(ar5416_bin5pulses); + break; + case DFS_ETSI_DOMAIN: + dfs_info(dfs, WLAN_DEBUG_DFS_ALWAYS, "DFS_ETSI_DOMAIN_5416"); + rinfo.dfsdomain = DFS_ETSI_DOMAIN; + + ch_freq = dfs->dfs_curchan->dfs_ch_freq; + regdmn = utils_dfs_get_cur_rd(dfs->dfs_pdev_obj); + + if (((regdmn == ETSI11_WORLD_REGDMN_PAIR_ID) || + (regdmn == ETSI12_WORLD_REGDMN_PAIR_ID) || + (regdmn == ETSI13_WORLD_REGDMN_PAIR_ID) || + (regdmn == ETSI14_WORLD_REGDMN_PAIR_ID)) && + DFS_CURCHAN_IS_58GHz(ch_freq)) { + rinfo.dfs_radars = ar5416_etsi_radars; + rinfo.numradars = QDF_ARRAY_SIZE(ar5416_etsi_radars); + } else { + uint8_t offset = ETSI_LEGACY_PULSE_ARR_OFFSET; + + rinfo.dfs_radars = &ar5416_etsi_radars[offset]; + rinfo.numradars = + QDF_ARRAY_SIZE(ar5416_etsi_radars) - offset; + } + + rinfo.b5pulses = &ar5416_bin5pulses[0]; + rinfo.numb5radars = QDF_ARRAY_SIZE(ar5416_bin5pulses); + break; + case DFS_MKK4_DOMAIN: + dfs_info(dfs, WLAN_DEBUG_DFS_ALWAYS, "DFS_MKK4_DOMAIN_5416"); + rinfo.dfsdomain = DFS_MKK4_DOMAIN; + rinfo.dfs_radars = &ar5416_fcc_radars[0]; + rinfo.numradars = QDF_ARRAY_SIZE(ar5416_fcc_radars); + rinfo.b5pulses = &ar5416_bin5pulses[0]; + rinfo.numb5radars = QDF_ARRAY_SIZE(ar5416_bin5pulses); + break; + default: + dfs_info(dfs, WLAN_DEBUG_DFS_ALWAYS, "no domain"); + return; + } + + rinfo.dfs_defaultparams.pe_firpwr = AR5416_DFS_FIRPWR; + rinfo.dfs_defaultparams.pe_rrssi = AR5416_DFS_RRSSI; + rinfo.dfs_defaultparams.pe_height = AR5416_DFS_HEIGHT; + rinfo.dfs_defaultparams.pe_prssi = AR5416_DFS_PRSSI; + rinfo.dfs_defaultparams.pe_inband = AR5416_DFS_INBAND; + rinfo.dfs_defaultparams.pe_relpwr = AR5416_DFS_RELPWR; + rinfo.dfs_defaultparams.pe_relstep = AR5416_DFS_RELSTEP; + rinfo.dfs_defaultparams.pe_maxlen = AR5416_DFS_MAXLEN; + + dfs_init_radar_filters(dfs, &rinfo); +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/filtering/ar9300_radar.c b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/filtering/ar9300_radar.c new file mode 100644 index 0000000000000000000000000000000000000000..d33f92346d728707a3f2bc0d3afe40941f09e017 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/filtering/ar9300_radar.c @@ -0,0 +1,259 @@ +/* + * Copyright (c) 2011, 2016-2018 The Linux Foundation. All rights reserved. + * Copyright (c) 2008-2010, Atheros Communications Inc. + * All Rights Reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: This file has the radar table for AR9300 chipset and function to + * initialize the radar table. + */ + +#include "../dfs.h" +#include "../dfs_internal.h" +#include "wlan_dfs_utils_api.h" +#include "wlan_dfs_lmac_api.h" + +/* + * Default 5413/9300 radar phy parameters + * Values adjusted to fix EV76432/EV76320 + */ +#define AR9300_DFS_FIRPWR -28 +#define AR9300_DFS_RRSSI 0 +#define AR9300_DFS_HEIGHT 10 +#define AR9300_DFS_PRSSI 6 +#define AR9300_DFS_INBAND 8 +#define AR9300_DFS_RELPWR 8 +#define AR9300_DFS_RELSTEP 12 +#define AR9300_DFS_MAXLEN 255 +#define AR9300_DFS_PRSSI_CAC 10 + +/* + * Make sure that value matches value in ar9300_osprey_2p2_mac_core[][2] for + * register 0x1040 to 0x104c. + */ +#define AR9300_FCC_RADARS_FCC_OFFSET 4 + +/** + * struct dfs_pulse ar9300_etsi_radars - ETSI radar pulse table for + * AR9300 chipset. + * + * For short pulses, RSSI threshold should be smaller than Kquick-drop. + * The chip has only one chance to drop the gain which will be reported + * as the estimated RSSI. + */ +struct dfs_pulse ar9300_etsi_radars[] = { + + /* EN 302 502 frequency hopping pulse */ + /* PRF 3000, 1us duration, 9 pulses per burst */ + {9, 1, 3000, 3000, 1, 4, 5, 0, 1, 18, 0, 0, 1, 1000, 0, 40}, + /* PRF 4500, 20us duration, 9 pulses per burst */ + {9, 20, 4500, 4500, 1, 4, 5, 19, 21, 18, 0, 0, 1, 1000, 0, 41}, + + /* TYPE staggered pulse */ + /* Type 5*/ + /* 0.8-2us, 2-3 bursts,300-400 PRF, 10 pulses each */ + {30, 2, 300, 400, 2, 30, 3, 0, 5, 15, 0, 0, 1, 0, 0, 31}, + /* Type 6 */ + /* 0.8-2us, 2-3 bursts, 400-1200 PRF, 15 pulses each */ + {30, 2, 400, 1200, 2, 30, 7, 0, 5, 15, 0, 0, 0, 0, 0, 32}, + + /* constant PRF based */ + /* Type 1 */ + /* 0.8-5us, 200 300 PRF, 10 pulses */ + {10, 5, 200, 400, 0, 24, 5, 0, 8, 15, 0, 0, 2, 0, 0, 33}, + {10, 5, 400, 600, 0, 24, 5, 0, 8, 15, 0, 0, 2, 0, 0, 37}, + {10, 5, 600, 800, 0, 24, 5, 0, 8, 15, 0, 0, 2, 0, 0, 38}, + {10, 5, 800, 1000, 0, 24, 5, 0, 8, 15, 0, 0, 2, 0, 0, 39}, + + /* Type 2 */ + /* 0.8-15us, 200-1600 PRF, 15 pulses */ + {15, 15, 200, 1600, 0, 24, 8, 0, 18, 24, 0, 0, 0, 0, 0, 34}, + + /* Type 3 */ + /* 0.8-15us, 2300-4000 PRF, 25 pulses*/ + {25, 15, 2300, 4000, 0, 24, 10, 0, 18, 24, 0, 0, 0, 0, 0, 35}, + + /* Type 4 */ + /* 20-30us, 2000-4000 PRF, 20 pulses*/ + {20, 30, 2000, 4000, 0, 24, 8, 19, 33, 24, 0, 0, 0, 0, 0, 36}, +}; + +/** + * struct dfs_pulse ar9300_fcc_radars - FCC radar pulse table for + * AR9300 chipset. + */ +struct dfs_pulse ar9300_fcc_radars[] = { + /* + * Format is as following: + * Numpulses pulsedur pulsefreq max_pulsefreq patterntype pulsevar + * threshold mindur maxdur rssithresh meanoffset rssimargin pulseid. + */ + + /* following two filters are specific to Japan/MKK4 */ + /* 1389 +/- 6 us */ + {18, 1, 720, 720, 0, 6, 6, 0, 1, 18, 0, 3, 0, 0, 0, 17}, + /* 4000 +/- 6 us */ + {18, 4, 250, 250, 0, 10, 5, 1, 6, 18, 0, 3, 0, 0, 0, 18}, + /* 3846 +/- 7 us */ + {18, 5, 260, 260, 0, 10, 6, 1, 6, 18, 0, 3, 1, 0, 0, 19}, + /* 3846 +/- 7 us */ + {18, 5, 260, 260, 1, 10, 6, 1, 6, 18, 0, 3, 1, 0, 0, 20}, + + /* following filters are common to both FCC and JAPAN */ + + /* FCC TYPE 1 */ + {18, 1, 700, 700, 0, 6, 5, 0, 1, 18, 0, 3, 1, 0, 0, 8}, + {18, 1, 350, 350, 0, 6, 5, 0, 1, 18, 0, 3, 0, 0, 0, 0}, + + /* FCC TYPE 6 */ + {9, 1, 3003, 3003, 0, 7, 5, 0, 1, 18, 0, 0, 1, 0, 0, 1}, + + /* FCC TYPE 2 */ + {23, 5, 4347, 6666, 0, 18, 11, 0, 7, 22, 0, 3, 0, 0, 0, 2}, + + /* FCC TYPE 3 */ + {18, 10, 2000, 5000, 0, 23, 8, 6, 13, 22, 0, 3, 0, 0, 0, 5}, + + /* FCC TYPE 4 */ + {16, 15, 2000, 5000, 0, 25, 7, 11, 23, 22, 0, 3, 0, 0, 0, 11}, + + /* FCC NEW TYPE 1 */ + /* Search duration is numpulses*maxpri. + * The last theshold can be increased if false detects happen + */ + /* 518us to 938us pulses (min 56 pulses) */ + {57, 1, 1066, 1930, 0, 6, 20, 0, 1, 22, 0, 3, 0, 0, 0, 21}, + /* 938us to 2000 pulses (min 26 pulses) */ + {27, 1, 500, 1066, 0, 6, 13, 0, 1, 22, 0, 3, 0, 0, 0, 22}, + /* 2000 to 3067us pulses (min 17 pulses)*/ + {18, 1, 325, 500, 0, 6, 9, 0, 1, 22, 0, 3, 0, 0, 0, 23}, + +}; + +/** + * struct dfs_bin5pulse ar9300_bin5pulses - BIN5 pulse for AR9300 chipset. + */ +struct dfs_bin5pulse ar9300_bin5pulses[] = { + {2, 28, 105, 12, 22, 5}, +}; + +/** + * struct dfs_pulse ar9300_korea_radars - DFS pulses for KOREA domain. + */ +struct dfs_pulse ar9300_korea_radars[] = { + /* Korea Type 1 */ + {18, 1, 700, 700, 0, 6, 5, 0, 1, 18, 0, 3, 1, 0, 0, 40}, + /* Korea Type 2 */ + {10, 1, 1800, 1800, 0, 6, 4, 0, 1, 18, 0, 3, 1, 0, 0, 41}, + /* Korea Type 3 */ + {70, 1, 330, 330, 0, 6, 20, 0, 2, 18, 0, 3, 1, 0, 0, 42}, + /* Korea Type 4 */ + {3, 1, 3003, 3003, 1, 7, 2, 0, 1, 18, 0, 0, 1, 0, 0, 43}, +}; + +void dfs_get_radars_for_ar9300(struct wlan_dfs *dfs) +{ + struct wlan_dfs_radar_tab_info rinfo; + int dfsdomain = DFS_FCC_DOMAIN; + uint16_t ch_freq; + uint16_t regdmn; + + qdf_mem_zero(&rinfo, sizeof(rinfo)); + dfsdomain = utils_get_dfsdomain(dfs->dfs_pdev_obj); + + switch (dfsdomain) { + case DFS_FCC_DOMAIN: + dfs_info(dfs, WLAN_DEBUG_DFS_ALWAYS, "DFS_FCC_DOMAIN_9300"); + rinfo.dfsdomain = DFS_FCC_DOMAIN; + rinfo.dfs_radars = + &ar9300_fcc_radars[AR9300_FCC_RADARS_FCC_OFFSET]; + rinfo.numradars = + (QDF_ARRAY_SIZE(ar9300_fcc_radars) - + AR9300_FCC_RADARS_FCC_OFFSET); + rinfo.b5pulses = &ar9300_bin5pulses[0]; + rinfo.numb5radars = QDF_ARRAY_SIZE(ar9300_bin5pulses); + break; + case DFS_ETSI_DOMAIN: + dfs_info(dfs, WLAN_DEBUG_DFS_ALWAYS, "DFS_ETSI_DOMAIN_9300"); + rinfo.dfsdomain = DFS_ETSI_DOMAIN; + + ch_freq = dfs->dfs_curchan->dfs_ch_freq; + regdmn = utils_dfs_get_cur_rd(dfs->dfs_pdev_obj); + + if (((regdmn == ETSI11_WORLD_REGDMN_PAIR_ID) || + (regdmn == ETSI12_WORLD_REGDMN_PAIR_ID) || + (regdmn == ETSI13_WORLD_REGDMN_PAIR_ID) || + (regdmn == ETSI14_WORLD_REGDMN_PAIR_ID)) && + DFS_CURCHAN_IS_58GHz(ch_freq)) { + rinfo.dfs_radars = ar9300_etsi_radars; + rinfo.numradars = QDF_ARRAY_SIZE(ar9300_etsi_radars); + } else { + uint8_t offset = ETSI_LEGACY_PULSE_ARR_OFFSET; + + rinfo.dfs_radars = &ar9300_etsi_radars[offset]; + rinfo.numradars = + QDF_ARRAY_SIZE(ar9300_etsi_radars) - offset; + } + + rinfo.b5pulses = &ar9300_bin5pulses[0]; + rinfo.numb5radars = QDF_ARRAY_SIZE(ar9300_bin5pulses); + break; + case DFS_KR_DOMAIN: + dfs_info(dfs, WLAN_DEBUG_DFS_ALWAYS, + "DFS_ETSI_DOMAIN_9300_Country_Korea"); + rinfo.dfsdomain = DFS_ETSI_DOMAIN; + rinfo.dfs_radars = &ar9300_korea_radars[0]; + rinfo.numradars = QDF_ARRAY_SIZE(ar9300_korea_radars); + rinfo.b5pulses = &ar9300_bin5pulses[0]; + rinfo.numb5radars = QDF_ARRAY_SIZE(ar9300_bin5pulses); + break; + case DFS_MKK4_DOMAIN: + dfs_info(dfs, WLAN_DEBUG_DFS_ALWAYS, "DFS_MKK4_DOMAIN_9300"); + rinfo.dfsdomain = DFS_MKK4_DOMAIN; + rinfo.dfs_radars = &ar9300_fcc_radars[0]; + rinfo.numradars = QDF_ARRAY_SIZE(ar9300_fcc_radars); + rinfo.b5pulses = &ar9300_bin5pulses[0]; + rinfo.numb5radars = QDF_ARRAY_SIZE(ar9300_bin5pulses); + break; + default: + dfs_info(dfs, WLAN_DEBUG_DFS_ALWAYS, "no domain"); + return; + } + + lmac_set_use_cac_prssi(dfs->dfs_pdev_obj); + + rinfo.dfs_defaultparams.pe_firpwr = AR9300_DFS_FIRPWR; + rinfo.dfs_defaultparams.pe_rrssi = AR9300_DFS_RRSSI; + rinfo.dfs_defaultparams.pe_height = AR9300_DFS_HEIGHT; + rinfo.dfs_defaultparams.pe_prssi = AR9300_DFS_PRSSI; + + /* + * We have an issue with PRSSI. + * For normal operation we use AR9300_DFS_PRSSI, which is set to 6. + * Please refer to EV91563, 94164. + * However, this causes problem during CAC as no radar is detected + * during that period with PRSSI=6. Only PRSSI= 10 seems to fix this. + * We use this flag to keep track of change in PRSSI. + */ + rinfo.dfs_defaultparams.pe_inband = AR9300_DFS_INBAND; + rinfo.dfs_defaultparams.pe_relpwr = AR9300_DFS_RELPWR; + rinfo.dfs_defaultparams.pe_relstep = AR9300_DFS_RELSTEP; + rinfo.dfs_defaultparams.pe_maxlen = AR9300_DFS_MAXLEN; + + dfs_init_radar_filters(dfs, &rinfo); +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/filtering/dfs_ar.c b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/filtering/dfs_ar.c new file mode 100644 index 0000000000000000000000000000000000000000..fb6c136af2a9fd122bc9cb13ac2f6a3285590670 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/filtering/dfs_ar.c @@ -0,0 +1,316 @@ +/* + * Copyright (c) 2013, 2016-2017 The Linux Foundation. All rights reserved. + * Copyright (c) 2002-2010, Atheros Communications Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: This file contains now obsolete code which used to implement AR + * (Adaptive Radio) feature for older chipsets. + */ + +#include "../dfs.h" + +#define UPDATE_TOP_THREE_PEAKS(_histo, _peakPtrList, _currWidth) \ + do { \ + if ((_histo)[(_peakPtrList)[0]] < (_histo)[(_currWidth)]) { \ + (_peakPtrList)[2] = \ + (_currWidth != (_peakPtrList)[1]) ? \ + (_peakPtrList)[1] : (_peakPtrList)[2]; \ + (_peakPtrList)[1] = (_peakPtrList)[0]; \ + (_peakPtrList)[0] = (_currWidth); \ + } else if ((_currWidth != (_peakPtrList)[0]) \ + && ((_histo)[(_peakPtrList)[1]] < \ + (_histo)[(_currWidth)])) { \ + (_peakPtrList)[2] = (_peakPtrList)[1]; \ + (_peakPtrList)[1] = (_currWidth); \ + } else if ((_currWidth != (_peakPtrList)[1]) \ + && (_currWidth != (_peakPtrList)[0]) \ + && ((_histo)[(_peakPtrList)[2]] < \ + (_histo)[(_currWidth)])) { \ + (_peakPtrList)[2] = (_currWidth); \ + } \ + } while (0) + +void dfs_process_ar_event(struct wlan_dfs *dfs, + struct dfs_channel *chan) +{ + struct dfs_ar_state *ar; + struct dfs_event *re = NULL; + uint32_t sumpeak = 0, numpeaks = 0; + uint32_t rssi = 0, width = 0; + uint32_t origregionsum = 0, i = 0; + uint16_t thistimestamp; + int empty; + + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs is NULL"); + return; + } + + ar = (struct dfs_ar_state *) &(dfs->dfs_ar_state); + WLAN_ARQ_LOCK(dfs); + empty = STAILQ_EMPTY(&(dfs->dfs_arq)); + WLAN_ARQ_UNLOCK(dfs); + while (!empty) { + WLAN_ARQ_LOCK(dfs); + re = STAILQ_FIRST(&(dfs->dfs_arq)); + if (re != NULL) + STAILQ_REMOVE_HEAD(&(dfs->dfs_arq), re_list); + WLAN_ARQ_UNLOCK(dfs); + if (!re) + return; + + thistimestamp = re->re_ts; + rssi = re->re_rssi; + width = re->re_dur; + + /* Return the dfs event to the free event list. */ + qdf_mem_zero(re, sizeof(struct dfs_event)); + WLAN_DFSEVENTQ_LOCK(dfs); + STAILQ_INSERT_TAIL(&(dfs->dfs_eventq), re, re_list); + WLAN_DFSEVENTQ_UNLOCK(dfs); + + /* + * Determine if current radar is an extension of previous + * radar. + */ + if (ar->ar_prevwidth == 255) { + /* + * Tag on previous width for consideraion of low data + * rate ACKs. + */ + ar->ar_prevwidth += width; + width = (width == 255) ? 255 : ar->ar_prevwidth; + } else if ((width == 255) && + (ar->ar_prevwidth == 510 || + ar->ar_prevwidth == 765 || + ar->ar_prevwidth == 1020)) { + /* + * Aggregate up to 5 consecuate max radar widths to + * consider 11Mbps long preamble 1500-byte pkts. + */ + ar->ar_prevwidth += width; + } else if (ar->ar_prevwidth == 1275 && width != 255) { + /* Found 5th consecute maxed out radar, reset history */ + width += ar->ar_prevwidth; + ar->ar_prevwidth = 0; + } else if (ar->ar_prevwidth > 255) { + /* + * Ignore if there are less than 5 consecutive maxed + * out radars. + */ + ar->ar_prevwidth = width; + width = 255; + } else { + ar->ar_prevwidth = width; + } + + /* + * For ignoring noises with radar duration in ranges + * of 3-30: AP4x. Region 7 - 5.5Mbps (long pre) + * ACK = 270 = 216 us. + */ + if ((width >= 257 && width <= 278) || + /* + * Region 8 - 2Mbps (long pre) + * ACKC = 320 = 256us. + */ + (width >= 295 && width <= 325) || + (width >= 1280 && width <= 1300)) { + uint16_t wraparoundadj = 0; + uint16_t base = (width >= 1280) ? 1275 : 255; + + if (thistimestamp < ar->ar_prevtimestamp) + wraparoundadj = 32768; + + if ((thistimestamp + wraparoundadj - + ar->ar_prevtimestamp) != (width - base)) + width = 1; + } + if (width <= 10) { + WLAN_ARQ_LOCK(dfs); + empty = STAILQ_EMPTY(&(dfs->dfs_arq)); + WLAN_ARQ_UNLOCK(dfs); + continue; + } + + /* + * Overloading the width=2 in: Store a count of + * radars w/max duration and high RSSI (not noise) + */ + if ((width == 255) && (rssi > DFS_AR_RSSI_THRESH_STRONG_PKTS)) + width = 2; + + /* + * Overloading the width=3 bin: + * Double and store a count of rdars of durtaion that matches + * 11Mbps (long preamble) TCP ACKs or 1500-byte data packets. + */ + if ((width >= 1280 && width <= 1300) || + (width >= 318 && width <= 325)) { + width = 3; + ar->ar_phyerrcount[3] += 2; + ar->ar_acksum += 2; + } + + /* Build histogram of radar duration. */ + if (width > 0 && width <= 510) + ar->ar_phyerrcount[width]++; + else { + /* Invalid radar width, throw it away. */ + WLAN_ARQ_LOCK(dfs); + empty = STAILQ_EMPTY(&(dfs->dfs_arq)); + WLAN_ARQ_UNLOCK(dfs); + continue; + } + + /* Received radar of interest (i.e., signature match), + * proceed to check if there is enough neighboring + * traffic to drop out of Turbo. + */ + /* Region 0: 24Mbps ACK = 35 = 28us */ + if ((width >= REG0_MIN_WIDTH && width <= REG0_MAX_WIDTH) || + /* Region 1: 12Mbps ACK = 40 = 32us */ + (width >= REG1_MIN_WIDTH && width <= REG1_MAX_WIDTH) || + /* Region 2: 6Mbps ACK = 55 = 44us */ + (width >= REG2_MIN_WIDTH && width <= REG2_MAX_WIDTH) || + /* Region 3: 11Mbps ACK = 135 = 108us */ + (width >= REG3_MIN_WIDTH && width <= REG3_MAX_WIDTH) || + /* Region 4: 5.5Mbps ACK = 150 = 120us */ + (width >= REG4_MIN_WIDTH && width <= REG4_MAX_WIDTH) || + /* Region 5: 2Mbps ACK = 200 = 160us */ + (width >= REG5_MIN_WIDTH && width <= REG5_MAX_WIDTH) || + /* Region 6: 1Mbps ACK = 400 = 320us */ + (width >= REG6_MIN_WIDTH && width <= REG6_MAX_WIDTH) || + /* Region 7: 5.5Mbps (Long Pre) ACK = 270 = 216us. */ + (width >= REG7_MIN_WIDTH && width <= REG7_MAX_WIDTH) || + /* Region 8: 2Mbps (Long Pre) ACK = 320 = 256us. */ + (width >= REG8_MIN_WIDTH && width <= REG8_MAX_WIDTH) || + /* + * Ignoring Region 9 due to overlap with 255 which is + * same as board noise. + */ + /* Region 9: 11Mbps (Long Pre) ACK = 255 = 204us. */ + (width == 3)) { + ar->ar_acksum++; + /* + * Double the count for strong radars that match + * one of the ACK signatures. + */ + if (rssi > DFS_AR_RSSI_DOUBLE_THRESHOLD) { + ar->ar_phyerrcount[width]++; + ar->ar_acksum++; + } + UPDATE_TOP_THREE_PEAKS(ar->ar_phyerrcount, + ar->ar_peaklist, width); + /* Sum the counts of these peaks. */ + numpeaks = DFS_AR_MAX_NUM_PEAKS; + origregionsum = ar->ar_acksum; + for (i = 0; i < DFS_AR_MAX_NUM_PEAKS; i++) { + if (ar->ar_peaklist[i] > 0) { + if ((i == 0) && + (ar->ar_peaklist[i] == 3) && + (ar->ar_phyerrcount[3] < + ar->ar_phyerrcount[2]) && + (ar->ar_phyerrcount[3] > 6)) { + /* + * If the top peak is one that + * matches the 11Mbps long + * preamble TCP Ack/1500-byte + * data, include the count for + * radars that hav emax duration + * and high rssi (width = 2) to + * boost the sum for the PAR + * test that follows. + */ + sumpeak += + (ar->ar_phyerrcount[2] + + ar->ar_phyerrcount[3]); + ar->ar_acksum += + (ar->ar_phyerrcount[2] + + ar->ar_phyerrcount[3]); + } else { + sumpeak += ar->ar_phyerrcount[ + ar->ar_peaklist[i]]; + } + } else + numpeaks--; + } + /* + * If sum of patterns matches exceeds packet threshold, + * perform comparison between peak-to-avg ratio against + * parThreshold. + */ + if ((ar->ar_acksum > ar->ar_packetthreshold) && + ((sumpeak * DFS_AR_REGION_WIDTH) > + (ar->ar_parthreshold * numpeaks * + ar->ar_acksum))) { + /* + * Neighboring traffic detected, get out of + * Turbo. + */ + chan->dfs_ch_flagext |= CHANNEL_INTERFERENCE; + qdf_mem_zero(ar->ar_peaklist, + sizeof(ar->ar_peaklist)); + ar->ar_acksum = 0; + qdf_mem_zero(ar->ar_phyerrcount, + sizeof(ar->ar_phyerrcount)); + } else { + /* + * Reset sum of matches to discount the count + * of strong radars with max duration. + */ + ar->ar_acksum = origregionsum; + } + } + ar->ar_prevtimestamp = thistimestamp; + WLAN_ARQ_LOCK(dfs); + empty = STAILQ_EMPTY(&(dfs->dfs_arq)); + WLAN_ARQ_UNLOCK(dfs); + } +} + +void dfs_reset_ar(struct wlan_dfs *dfs) +{ + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs is NULL"); + return; + } + + qdf_mem_zero(&dfs->dfs_ar_state, sizeof(dfs->dfs_ar_state)); + dfs->dfs_ar_state.ar_packetthreshold = DFS_AR_PKT_COUNT_THRESH; + dfs->dfs_ar_state.ar_parthreshold = DFS_AR_ACK_DETECT_PAR_THRESH; +} + +void dfs_reset_arq(struct wlan_dfs *dfs) +{ + struct dfs_event *event; + + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs is NULL"); + return; + } + + WLAN_ARQ_LOCK(dfs); + WLAN_DFSEVENTQ_LOCK(dfs); + while (!STAILQ_EMPTY(&(dfs->dfs_arq))) { + event = STAILQ_FIRST(&(dfs->dfs_arq)); + STAILQ_REMOVE_HEAD(&(dfs->dfs_arq), re_list); + qdf_mem_zero(event, sizeof(struct dfs_event)); + STAILQ_INSERT_TAIL(&(dfs->dfs_eventq), event, re_list); + } + WLAN_DFSEVENTQ_UNLOCK(dfs); + WLAN_ARQ_UNLOCK(dfs); +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/filtering/dfs_bindetects.c b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/filtering/dfs_bindetects.c new file mode 100644 index 0000000000000000000000000000000000000000..ff2b64d5e3536cdda29bd068e7432537dff8d824 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/filtering/dfs_bindetects.c @@ -0,0 +1,969 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * Copyright (c) 2002-2010, Atheros Communications Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: DFS specs specify various types of radars to be detected. + * Each separate type is called a Bin and has different characteristics. + * This file contains the functionality to look at a group of pulses and + * to detect whether we have detected a valid radar waveform. To do that, + * it must match the group against each different Bin's characteristics. + */ + +#include "../dfs.h" + +/** + * dfs_find_first_index_within_window() - Find first index within window + * @pl: Pointer to dfs_pulseline structure. + * @index: Index to dfs pulse elements. + * @start_ts: Start timestamp. + * + * Return: Returns index. + */ +static inline uint32_t dfs_find_first_index_within_window( + struct dfs_pulseline *pl, + uint32_t index, + uint64_t start_ts) +{ + uint16_t i; + + /* Find the index of first element in our window of interest. */ + for (i = 0; i < pl->pl_numelems; i++) { + index = (index - 1) & DFS_MAX_PULSE_BUFFER_MASK; + if (pl->pl_elems[index].p_time >= start_ts) { + continue; + } else { + index = (index) & DFS_MAX_PULSE_BUFFER_MASK; + break; + } + } + + return index; +} + +/** + * dfs_ts_within_window() - Calculate pulses for timestamp within window + * @dfs: Pointer to wlan_dfs structure. + * @pl: Pointer to dfs_pulseline structure. + * @index: Index to dfs pulse elements. + * @dur: Pulse duration/width + * @numpulses: Number of pulses + * + * Return: Returns 1 if pulse count is incremented else returns 0. + */ +static inline bool dfs_ts_within_window( + struct wlan_dfs *dfs, + struct dfs_pulseline *pl, + uint32_t *index, + uint32_t dur, + int *numpulses) +{ + uint32_t deltadur; + + deltadur = DFS_DIFF(pl->pl_elems[*index].p_dur, dur); + if ((pl->pl_elems[*index].p_dur == 1) || + ((dur != 1) && (deltadur <= 2))) { + (*numpulses)++; + dfs_debug(dfs, WLAN_DEBUG_DFS2, "numpulses %u", *numpulses); + return 1; + } + + return 0; +} + +/** + * dfs_ts_eq_prevts() - Calculate pulses for timestamp equals to prev event + * @dfs: Pointer to wlan_dfs structure. + * @pl: Pointer to dfs_pulseline structure. + * @index: Index to dfs pulse elements. + * @dur: Pulse duration/width + * @numpulses: Number of pulses + * + * Return: Returns 1 if pulse count is incremented else returns 0. + */ +static inline bool dfs_ts_eq_prevts( + struct wlan_dfs *dfs, + struct dfs_pulseline *pl, + uint64_t next_event_ts, + uint64_t event_ts, + uint32_t refpri, + uint32_t *index, + uint32_t dur, + int *numpulses) + +{ + uint32_t deltadur; + + if (((next_event_ts - event_ts) > refpri) || + ((next_event_ts - event_ts) == 0)) { + deltadur = DFS_DIFF(pl->pl_elems[*index].p_dur, dur); + if ((pl->pl_elems[*index].p_dur == 1) || + ((pl->pl_elems[*index].p_dur != 1) && + (deltadur <= 2))) { + (*numpulses)++; + dfs_debug(dfs, WLAN_DEBUG_DFS2, + "zero PRI: numpulses %u", *numpulses); + return 1; + } + } + + return 0; +} + +/** + * dfs_pulses_within_window() - Calculate pulses within window + * @dfs: Pointer to wlan_dfs structure. + * @window_start: Start of the window. + * @window_end: End of the window. + * @index: Index to dfs pulse elements. + * @dur: Pulse duration/width. + * @refpri: reference PRI. + * + * Return: Returns 1 if pulse count is incremented else returns 0. + */ +static inline int dfs_pulses_within_window( + struct wlan_dfs *dfs, + uint64_t window_start, + uint64_t window_end, + uint32_t *index, + uint32_t dur, + uint32_t refpri) +{ + int numpulses = 0; + uint32_t i; + struct dfs_pulseline *pl = dfs->pulses; + uint64_t event_ts, prev_event_ts, next_event_ts; + uint32_t next_index; + + for (i = 0; i < pl->pl_numelems; i++) { + prev_event_ts = pl->pl_elems[*index].p_time; + *index = (*index+1) & DFS_MAX_PULSE_BUFFER_MASK; + event_ts = pl->pl_elems[*index].p_time; + next_index = (*index+1) & DFS_MAX_PULSE_BUFFER_MASK; + next_event_ts = pl->pl_elems[next_index].p_time; + dfs_debug(dfs, WLAN_DEBUG_DFS2, "ts %u", + (uint32_t)event_ts); + + if ((event_ts <= window_end) && (event_ts >= window_start)) { + if (dfs_ts_within_window(dfs, pl, index, dur, + &numpulses)) + break; + } else if (event_ts > window_end) { + *index = (*index-1) & DFS_MAX_PULSE_BUFFER_MASK; + break; + } else if (event_ts == prev_event_ts) { + if (dfs_ts_eq_prevts(dfs, pl, next_event_ts, event_ts, + refpri, index, dur, &numpulses)) + break; + } + } + + return numpulses; +} + +/** + * dfs_count_pulses() - Count pulses + * @dfs: Pointer to wlan_dfs structure. + * @rf: Pointer to dfs_filter structure. + * @dur: Pulse duration/width. + * @ext_chan_flag : Ext channel flag. + * @primargin: Primary margin. + * @index: Index to dfs pulse elements. + * @refpri: reference PRI. + * @start_ts: Start timestamp. + * + * Return: Returns number of pulses within window. + */ +static inline int dfs_count_pulses( + struct wlan_dfs *dfs, + struct dfs_filter *rf, + uint32_t dur, + int ext_chan_flag, + int primargin, + uint32_t index, + uint32_t refpri, + uint64_t start_ts) +{ + uint32_t n; + int numpulses = 0; + uint64_t window_start, window_end; + + for (n = 0; n <= rf->rf_numpulses; n++) { + window_start = (start_ts + (refpri*n))-(primargin+n); + window_end = window_start + 2*(primargin+n); + dfs_debug(dfs, WLAN_DEBUG_DFS2, + "window_start %u window_end %u", + (uint32_t)window_start, (uint32_t)window_end); + numpulses += dfs_pulses_within_window(dfs, window_start, + window_end, &index, dur, refpri); + } + + return numpulses; +} + +/** + * dfs_bin_fixedpattern_check() - Fixed pattern check + * @dfs: Pointer to wlan_dfs structure. + * @rf: Pointer to dfs_filter structure. + * @dur: Pulse duration/width. + * @ext_chan_flag : Ext channel flag. + */ +static int dfs_bin_fixedpattern_check( + struct wlan_dfs *dfs, + struct dfs_filter *rf, + uint32_t dur, + int ext_chan_flag) +{ + struct dfs_pulseline *pl = dfs->pulses; + int primargin, numpulses, fil_thresh; + uint64_t start_ts, end_ts; + uint32_t last_index, first_index; + uint32_t refpri; + + refpri = (rf->rf_minpri + rf->rf_maxpri)/2; + last_index = pl->pl_lastelem; + end_ts = pl->pl_elems[last_index].p_time; + start_ts = end_ts - (refpri*rf->rf_numpulses); + + dfs_debug(dfs, WLAN_DEBUG_DFS3, + "lastelem ts=%llu start_ts=%llu, end_ts=%llu", + (unsigned long long)pl->pl_elems[last_index].p_time, + (unsigned long long)start_ts, + (unsigned long long) end_ts); + + first_index = dfs_find_first_index_within_window(pl, last_index, + start_ts); + + /* For fixed pattern types, rf->rf_patterntype=1. */ + primargin = dfs_get_pri_margin(dfs, ext_chan_flag, + (rf->rf_patterntype == 1)); + + numpulses = dfs_count_pulses(dfs, rf, dur, ext_chan_flag, primargin, + first_index, refpri, start_ts); + + fil_thresh = dfs_get_filter_threshold(dfs, rf, ext_chan_flag); + + if (numpulses >= fil_thresh) { + dfs_debug(dfs, WLAN_DEBUG_DFS1, + "FOUND filterID=%u numpulses=%d unadj thresh=%d", + rf->rf_pulseid, numpulses, rf->rf_threshold); + return 1; + } else { + return 0; + } +} + +void dfs_add_pulse( + struct wlan_dfs *dfs, + struct dfs_filter *rf, + struct dfs_event *re, + uint32_t deltaT, + uint64_t this_ts) +{ + uint32_t index, n, window; + struct dfs_delayline *dl; + + dl = &rf->rf_dl; + /* Circular buffer of size 2^n */ + index = (dl->dl_lastelem + 1) & DFS_MAX_DL_MASK; + if ((dl->dl_numelems) == DFS_MAX_DL_SIZE) + dl->dl_firstelem = (dl->dl_firstelem + 1) & DFS_MAX_DL_MASK; + else + dl->dl_numelems++; + dl->dl_lastelem = index; + dl->dl_elems[index].de_time = deltaT; + dl->dl_elems[index].de_ts = this_ts; + window = deltaT; + dl->dl_elems[index].de_dur = re->re_dur; + dl->dl_elems[index].de_rssi = re->re_rssi; + dl->dl_elems[index].de_seg_id = re->re_seg_id; + dl->dl_elems[index].de_sidx = re->re_sidx; + dl->dl_elems[index].de_delta_peak = re->re_delta_peak; + dl->dl_elems[index].de_psidx_diff = re->re_psidx_diff; + dl->dl_elems[index].de_seq_num = dfs->dfs_seq_num; + + dfs_debug(dfs, WLAN_DEBUG_DFS2, + "adding: filter id %d, dur=%d, rssi=%d, ts=%llu", + rf->rf_pulseid, re->re_dur, + re->re_rssi, (unsigned long long int)this_ts); + + for (n = 0; n < dl->dl_numelems-1; n++) { + index = (index-1) & DFS_MAX_DL_MASK; + /* + * Calculate window based on full time stamp instead of deltaT + * deltaT (de_time) may result in incorrect window value + */ + window = (uint32_t) (this_ts - dl->dl_elems[index].de_ts); + + if (window > rf->rf_filterlen) { + dl->dl_firstelem = (index+1) & DFS_MAX_DL_MASK; + dl->dl_numelems = n+1; + } + } + dfs_debug(dfs, WLAN_DEBUG_DFS2, "dl firstElem = %d lastElem = %d", + dl->dl_firstelem, dl->dl_lastelem); +} + +/** + * dfs_find_lowestpri() - Find lowest PRI + * @dl: Pointer to dfs delayline. + * @lowpriindex: Low PRI index. + * @lowpri: Low PRI + */ +static inline void dfs_find_lowestpri( + struct dfs_delayline *dl, + uint32_t *lowpriindex, + uint32_t *lowpri) +{ + int delayindex; + uint32_t refpri; + uint32_t n; + + /* Find out the lowest pri. */ + for (n = 0; n < dl->dl_numelems; n++) { + delayindex = (dl->dl_firstelem + n) & DFS_MAX_DL_MASK; + refpri = dl->dl_elems[delayindex].de_time; + if (refpri == 0) { + continue; + } else if (refpri < *lowpri) { + *lowpri = dl->dl_elems[delayindex].de_time; + *lowpriindex = n; + } + } +} + +/** + * dfs_calculate_score() - Calculate score for the score index + * if PRI match is found + * @dl: Pointer to dfs delayline. + * @rf: Pointer to dfs_filter structure. + * @score: score array. + * @refpri: reference PRI. + * @primargin: PRI margin. + * @score_index: Score index. + */ +static inline void dfs_calculate_score( + struct dfs_delayline *dl, + struct dfs_filter *rf, + int *score, + uint32_t refpri, + uint32_t primargin, + uint32_t score_index) +{ + int pri_match = 0; + int dindex; + uint32_t searchpri, deltapri, deltapri_2, deltapri_3; + uint32_t i; + + for (i = 0; i < dl->dl_numelems; i++) { + dindex = (dl->dl_firstelem + i) & DFS_MAX_DL_MASK; + searchpri = dl->dl_elems[dindex].de_time; + deltapri = DFS_DIFF(searchpri, refpri); + deltapri_2 = DFS_DIFF(searchpri, 2*refpri); + deltapri_3 = DFS_DIFF(searchpri, 3*refpri); + if (rf->rf_ignore_pri_window == 2) + pri_match = ((deltapri < primargin) || + (deltapri_2 < primargin) || + (deltapri_3 < primargin)); + else + pri_match = (deltapri < primargin); + + if (pri_match) + score[score_index]++; + } +} + +/** + * dfs_find_priscores() - Find PRI score + * @dl: Pointer to dfs delayline. + * @rf: Pointer to dfs_filter structure. + * @score: score array. + * @primargin: PRI margin. + */ +static void dfs_find_priscores( + struct dfs_delayline *dl, + struct dfs_filter *rf, + int *score, + uint32_t primargin) +{ + int delayindex; + uint32_t refpri; + uint32_t n; + + qdf_mem_zero(score, sizeof(int)*DFS_MAX_DL_SIZE); + + for (n = 0; n < dl->dl_numelems; n++) { + delayindex = (dl->dl_firstelem + n) & DFS_MAX_DL_MASK; + refpri = dl->dl_elems[delayindex].de_time; + if (refpri == 0) + continue; + if (refpri < rf->rf_maxpri) { + /* Use only valid PRI range for high score. */ + dfs_calculate_score(dl, rf, score, refpri, primargin, + n); + } else { + score[n] = 0; + } + + if (score[n] > rf->rf_threshold) { + /* + * We got the most possible candidate, + * no need to continue further. + */ + break; + } + } +} + +/** + * dfs_find_highscore() - Find PRI high score + * @dl: Pointer to dfs delayline. + * @score: score array. + * @highscore: High score. + * @highscoreindex: High score index. + */ +static inline void dfs_find_highscore( + struct dfs_delayline *dl, + int *score, + uint32_t *highscore, + uint32_t *highscoreindex) +{ + int delayindex, dindex; + uint32_t n; + + *highscore = 0; + *highscoreindex = 0; + + for (n = 0; n < dl->dl_numelems; n++) { + if (score[n] > *highscore) { + *highscore = score[n]; + *highscoreindex = n; + } else if (score[n] == *highscore) { + /* + * More than one pri has highscore take the least pri. + */ + delayindex = (dl->dl_firstelem + *highscoreindex) & + DFS_MAX_DL_MASK; + dindex = (dl->dl_firstelem + n) & DFS_MAX_DL_MASK; + if (dl->dl_elems[dindex].de_time <= + dl->dl_elems[delayindex].de_time) { + *highscoreindex = n; + } + } + } + + return; +} + +/** + * dfs_get_durmargin() - Find duration margin + * @rf: Pointer to dfs_filter structure. + * @durmargin: Duration margin + */ +static inline void dfs_get_durmargin( + struct dfs_filter *rf, + uint32_t *durmargin) +{ +#define DUR_THRESH 10 +#define LOW_MARGIN 4 +#define HIGH_MARGIN 6 + + if (rf->rf_maxdur < DUR_THRESH) + *durmargin = LOW_MARGIN; + else + *durmargin = HIGH_MARGIN; + +#undef DUR_THRESH +#undef LOW_MARGIN +#undef HIGH_MARGIN +} + +/** + * dfs_handle_fixedpattern() - Handle Fixed pattern radar + * @dfs: Pointer to wlan_dfs structure. + * @dl: Pointer to dfs delayline. + * @rf: Pointer to dfs_filter structure. + * @dur: Pulse duration/width + * @ext_chan_flag : Ext channel flag. + */ +static inline int dfs_handle_fixedpattern( + struct wlan_dfs *dfs, + struct dfs_delayline *dl, + struct dfs_filter *rf, + uint32_t dur, + int ext_chan_flag) +{ + int found = 0; + + found = dfs_bin_fixedpattern_check(dfs, rf, dur, ext_chan_flag); + if (found) + dl->dl_numelems = 0; + + return found; +} + +/** + * dfs_bin_basic_sanity() - Sanity check + * @dl: Pointer to dfs delayline. + * @rf: Pointer to dfs_filter structure. + * @deltaT: Delta time. + */ +static inline int dfs_bin_basic_sanity( + struct dfs_delayline *dl, + struct dfs_filter *rf, + uint32_t *deltaT) +{ + if (dl->dl_numelems < (rf->rf_threshold-1)) + return 0; + + if (*deltaT > rf->rf_filterlen) + return 0; + + return 1; +} + +/** + * dfs_find_scoreindex() - Find score index + * @rf: Pointer to dfs_filter structure. + * @highscore: High score. + * @lowpriindex: Low PRI index. + * @highscoreindex: High score index. + * @scoreindex: score index. + */ +static inline void dfs_find_scoreindex( + struct dfs_filter *rf, + uint32_t highscore, + uint32_t lowpriindex, + uint32_t highscoreindex, + uint32_t *scoreindex) +{ + int lowprichk = 3; + + if (rf->rf_ignore_pri_window > 0) + lowprichk = (rf->rf_threshold >> 1)+1; + else + lowprichk = 3; + + if (highscore < lowprichk) + *scoreindex = lowpriindex; + else + *scoreindex = highscoreindex; +} + +/** + * dfs_find_refs() - Find reference values. + * @dl: Pointer to dfs delayline. + * @rf: Pointer to dfs_filter structure. + * @scoreindex: score index. + * @refdur: Duration value. + * @refpri: Current "filter" time for start of pulse in usecs. + */ +static inline void dfs_find_refs( + struct dfs_delayline *dl, + struct dfs_filter *rf, + uint32_t scoreindex, + uint32_t *refdur, + uint32_t *refpri) +{ + int delayindex; + + delayindex = (dl->dl_firstelem + scoreindex) & DFS_MAX_DL_MASK; + *refdur = dl->dl_elems[delayindex].de_dur; + *refpri = dl->dl_elems[delayindex].de_time; + + if (rf->rf_fixed_pri_radar_pulse) + *refpri = (rf->rf_minpri + rf->rf_maxpri)/2; +} + +/** + * dfs_bin_success_print() - Debug print + * @dfs: Pointer to wlan_dfs structure. + * @rf: Pointer to dfs_filter structure. + * @ext_chan_flag: Extension channel flag. + * @numpulses: Number of pulses. + * @refpri: Current "filter" time for start of pulse in usecs. + * @refdur: Duration value. + * @primargin: PRI margin. + */ +static inline void dfs_bin_success_print( + struct wlan_dfs *dfs, + struct dfs_filter *rf, + int ext_chan_flag, + int numpulses, + uint32_t refpri, + uint32_t refdur, + uint32_t primargin) +{ + dfs_debug(dfs, WLAN_DEBUG_DFS1, + "ext_flag=%d MATCH filter=%u numpulses=%u thresh=%u refdur=%d refpri=%d primargin=%d", + ext_chan_flag, rf->rf_pulseid, numpulses, + rf->rf_threshold, refdur, refpri, primargin); + dfs_print_delayline(dfs, &rf->rf_dl); + dfs_print_filter(dfs, rf); +} + +int dfs_bin_check( + struct wlan_dfs *dfs, + struct dfs_filter *rf, + uint32_t deltaT, + uint32_t width, + int ext_chan_flag) +{ + struct dfs_delayline *dl; + uint32_t refpri, refdur; + uint32_t highscoreindex; + uint32_t primargin, highscore; + int score[DFS_MAX_DL_SIZE], found = 0; + uint32_t scoreindex, lowpriindex = 0, lowpri = 0xffff; + int numpulses = 0; + int fil_thresh; + + dl = &rf->rf_dl; + if (!dfs_bin_basic_sanity(dl, rf, &deltaT)) + return 0; + + primargin = dfs_get_pri_margin(dfs, ext_chan_flag, + (rf->rf_patterntype == 1)); + + + if (rf->rf_patterntype == 1) + return dfs_handle_fixedpattern(dfs, dl, rf, width, + ext_chan_flag); + + dfs_find_lowestpri(dl, &lowpriindex, &lowpri); + + /* Find out the each delay element's pri score. */ + dfs_find_priscores(dl, rf, score, primargin); + + /* Find out the high scorer. */ + dfs_find_highscore(dl, score, &highscore, &highscoreindex); + + /* + * Find the average pri of pulses around the pri of highscore + * or the pulses around the lowest pri. + */ + dfs_find_scoreindex(rf, highscore, lowpriindex, highscoreindex, + &scoreindex); + + /* We got the possible pri, save its parameters as reference. */ + dfs_find_refs(dl, rf, scoreindex, &refdur, &refpri); + + numpulses = dfs_bin_pri_check(dfs, rf, dl, score[scoreindex], refpri, + refdur, ext_chan_flag, refpri); + + fil_thresh = dfs_get_filter_threshold(dfs, rf, ext_chan_flag); + + if (numpulses >= fil_thresh) { + found = 1; + dfs_bin_success_print(dfs, rf, ext_chan_flag, numpulses, + refpri, refdur, primargin); + } + + return found; +} + +/** + * dfs_update_min_and_max_sidx() - Calculate min and max sidx. + * @dl: Pointer to dfs_delayline structure. + * @delayindex: Delay index. + * @sidx_min: Sidx min. + * @sidx_max: Sidx max. + * @delta_peak_match_count: Delta peak match count. + * @psidx_diff_match_count: Psidx diff match count. + * @rf: Pointer to dfs_filter structure. + */ +static inline void dfs_update_min_and_max_sidx( + struct dfs_delayline *dl, + int delayindex, + int32_t *sidx_min, + int32_t *sidx_max, + uint8_t *delta_peak_match_count, + uint8_t *psidx_diff_match_count, + struct dfs_filter *rf) +{ + /* update sidx min/max for false detection check later */ + if (*sidx_min > dl->dl_elems[delayindex].de_sidx) + *sidx_min = dl->dl_elems[delayindex].de_sidx; + + if (*sidx_max < dl->dl_elems[delayindex].de_sidx) + *sidx_max = dl->dl_elems[delayindex].de_sidx; + + if (rf->rf_check_delta_peak) { + if (dl->dl_elems[delayindex].de_delta_peak != 0) + (*delta_peak_match_count)++; + else if ((dl->dl_elems[delayindex].de_psidx_diff >= + DFS_MIN_PSIDX_DIFF) && + (dl->dl_elems[delayindex].de_psidx_diff <= + DFS_MAX_PSIDX_DIFF)) + (*psidx_diff_match_count)++; + } +} + +/** + * dfs_check_pulses_for_delta_variance() - Check pulses for delta variance. + * @rf: Pointer to dfs_filter structure. + * @numpulsetochk: Number of pulses to check. + * @delta_time_stamps: Delta time stamp. + * @fundamentalpri: Highest PRI. + * @primargin: Primary margin. + * @numpulses: Number of pulses. + * @delayindex: Delay index. + * @sidx_min: Sidx min. + * @sidx_max: Sidx max. + * @delta_peak_match_count: Delta peak match count. + * @psidx_diff_match_count: Psidx diff match count. + * @dl: Pointer to dfs_delayline structure. + */ +static inline void dfs_check_pulses_for_delta_variance( + struct dfs_filter *rf, + int numpulsetochk, + uint32_t delta_time_stamps, + int fundamentalpri, + uint32_t primargin, + int *numpulses, + int delayindex, + int32_t *sidx_min, + int32_t *sidx_max, + uint8_t *delta_peak_match_count, + uint8_t *psidx_diff_match_count, + struct dfs_delayline *dl) +{ + uint32_t delta_ts_variance, j; + + for (j = 0; j < numpulsetochk; j++) { + delta_ts_variance = DFS_DIFF(delta_time_stamps, + ((j + 1) * fundamentalpri)); + if (delta_ts_variance < (2 * (j + 1) * primargin)) { + dl->dl_seq_num_stop = + dl->dl_elems[delayindex].de_seq_num; + dfs_update_min_and_max_sidx(dl, delayindex, + sidx_min, sidx_max, + delta_peak_match_count, + psidx_diff_match_count, + rf); + (*numpulses)++; + if (rf->rf_ignore_pri_window > 0) + break; + } + } +} + +/** + * dfs_count_the_other_delay_elements() - Counts the ther delay elements. + * @dfs: Pointer to wlan_dfs structure. + * @rf: Pointer to dfs_filter structure. + * @dl: Pointer to dfs_delayline structure. + * @i: Index value. + * @refpri: Current "filter" time for start of pulse in usecs. + * @refdur: Duration value. + * @primargin: Primary margin. + * @durmargin: Duration margin. + * @numpulses: Number of pulses. + * @delta_peak_match_count: Pointer to delta_peak_match_count. + * @psidx_diff_match_count: Pointer to psidx_diff_match_count. + * @prev_good_timestamp: Previous good timestamp. + * @fundamentalpri: Highest PRI. + */ +static void dfs_count_the_other_delay_elements( + struct wlan_dfs *dfs, + struct dfs_filter *rf, + struct dfs_delayline *dl, + uint32_t i, + uint32_t refpri, + uint32_t refdur, + uint32_t primargin, + uint32_t durmargin, + int *numpulses, + uint8_t *delta_peak_match_count, + uint8_t *psidx_diff_match_count, + uint32_t *prev_good_timestamp, + int fundamentalpri) +{ + int delayindex; + uint32_t searchpri, searchdur, deltadur, deltapri1, deltapri2; + uint32_t j = 0, delta_time_stamps, deltapri; + int dindex, primatch, numpulsetochk = 2; + int32_t sidx_min = DFS_BIG_SIDX; + int32_t sidx_max = -DFS_BIG_SIDX; + + delayindex = (dl->dl_firstelem + i) & DFS_MAX_DL_MASK; + searchpri = dl->dl_elems[delayindex].de_time; + if (searchpri == 0) { + /* + * This events PRI is zero, take it as a valid pulse + * but decrement next event's PRI by refpri. + */ + dindex = (delayindex + 1) & DFS_MAX_DL_MASK; + dl->dl_elems[dindex].de_time -= refpri; + searchpri = refpri; + } + searchdur = dl->dl_elems[delayindex].de_dur; + deltadur = DFS_DIFF(searchdur, refdur); + deltapri = DFS_DIFF(searchpri, refpri); + deltapri1 = DFS_DIFF(searchpri, refpri); + deltapri2 = DFS_DIFF(searchpri, 2 * refpri); + primatch = 0; + + if ((rf->rf_ignore_pri_window > 0) && (rf->rf_patterntype != 2)) { + for (j = 0; j < rf->rf_numpulses; j++) { + deltapri1 = DFS_DIFF(searchpri, (j + 1) * refpri); + if (deltapri1 < (2 * primargin)) { + primatch = 1; + break; + } + } + } else if ((deltapri1 < primargin) || (deltapri2 < primargin)) { + primatch = 1; + } + + if (primatch && (deltadur < durmargin)) { + if (*numpulses == 1) { + dl->dl_seq_num_second = + dl->dl_elems[delayindex].de_seq_num; + dfs_update_min_and_max_sidx(dl, delayindex, + &sidx_min, &sidx_max, + delta_peak_match_count, + psidx_diff_match_count, + rf); + (*numpulses)++; + } else { + delta_time_stamps = (dl->dl_elems[delayindex].de_ts - + *prev_good_timestamp); + if ((rf->rf_ignore_pri_window > 0)) { + numpulsetochk = rf->rf_numpulses; + if ((rf->rf_patterntype == 2) && + (fundamentalpri < refpri + 100)) { + numpulsetochk = 4; + } + } else { + numpulsetochk = 4; + } + + dfs_check_pulses_for_delta_variance(rf, numpulsetochk, + delta_time_stamps, fundamentalpri, + primargin, numpulses, delayindex, + &sidx_min, &sidx_max, + delta_peak_match_count, + psidx_diff_match_count, + dl); + } + *prev_good_timestamp = dl->dl_elems[delayindex].de_ts; + dl->dl_search_pri = searchpri; + dl->dl_min_sidx = sidx_min; + dl->dl_max_sidx = sidx_max; + dl->dl_delta_peak_match_count = *delta_peak_match_count; + dl->dl_psidx_diff_match_count = *psidx_diff_match_count; + + dfs_debug(dfs, WLAN_DEBUG_DFS2, + "rf->minpri=%d rf->maxpri=%d searchpri = %d index = %d numpulses = %d delta peak match count = %d psidx diff match count = %d deltapri=%d j=%d", + rf->rf_minpri, rf->rf_maxpri, searchpri, i, + *numpulses, *delta_peak_match_count, + *psidx_diff_match_count, deltapri, j); + } +} + +int dfs_bin_pri_check( + struct wlan_dfs *dfs, + struct dfs_filter *rf, + struct dfs_delayline *dl, + uint32_t score, + uint32_t refpri, + uint32_t refdur, + int ext_chan_flag, + int fundamentalpri) +{ + uint32_t searchpri, deltapri = 0; + uint32_t averagerefpri = 0, MatchCount = 0; + uint32_t prev_good_timestamp = 0; + int dindex; + uint32_t i, primargin, durmargin; +#ifdef WLAN_DEBUG + uint32_t highscore = score; + uint32_t highscoreindex = 0; +#endif + /* + * First pulse in the burst is most likely being filtered out based on + * maxfilterlen. + */ + int numpulses = 1; + uint8_t delta_peak_match_count = 1; + uint8_t psidx_diff_match_count = 1; + int priscorechk = 1; + + /* Use the adjusted PRI margin to reduce false alarms + * For non fixed pattern types, rf->rf_patterntype=0. + */ + primargin = dfs_get_pri_margin(dfs, ext_chan_flag, + (rf->rf_patterntype == 1)); + + if ((refpri > rf->rf_maxpri) || (refpri < rf->rf_minpri)) { + numpulses = 0; + return numpulses; + } + + dfs_get_durmargin(rf, &durmargin); + + if ((!rf->rf_fixed_pri_radar_pulse)) { + if (rf->rf_ignore_pri_window == 1) + priscorechk = (rf->rf_threshold >> 1); + else + priscorechk = 1; + + MatchCount = 0; + if (score > priscorechk) { + for (i = 0; i < dl->dl_numelems; i++) { + dindex = (dl->dl_firstelem + i) & + DFS_MAX_DL_MASK; + searchpri = dl->dl_elems[dindex].de_time; + deltapri = DFS_DIFF(searchpri, refpri); + if (deltapri < primargin) { + averagerefpri += searchpri; + MatchCount++; + } + } + if (rf->rf_patterntype != 2) { + if (MatchCount > 0) + refpri = (averagerefpri / MatchCount); + } else { + refpri = (averagerefpri / score); + } + } + } + + /* Note: Following primultiple calculation should be done + * once per filter during initialization stage (dfs_attach) + * and stored in its array atleast for fixed frequency + * types like FCC Bin1 to save some CPU cycles. + * multiplication, devide operators in the following code + * are left as it is for readability hoping the complier + * will use left/right shifts wherever possible. + */ + dfs_debug(dfs, WLAN_DEBUG_DFS2, + "refpri = %d high score = %d index = %d numpulses = %d", + refpri, highscore, highscoreindex, numpulses); + /* + * Count the other delay elements that have pri and dur with + * in the acceptable range from the reference one. + */ + for (i = 0; i < dl->dl_numelems; i++) + dfs_count_the_other_delay_elements(dfs, rf, dl, i, refpri, + refdur, primargin, durmargin, &numpulses, + &delta_peak_match_count, + &psidx_diff_match_count, + &prev_good_timestamp, fundamentalpri); + + return numpulses; +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/filtering/dfs_debug.c b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/filtering/dfs_debug.c new file mode 100644 index 0000000000000000000000000000000000000000..5ea3f20598a01427b5f5771fb491f61ac8c4bceb --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/filtering/dfs_debug.c @@ -0,0 +1,97 @@ +/* + * Copyright (c) 2013, 2016-2018 The Linux Foundation. All rights reserved. + * Copyright (c) 2002-2010, Atheros Communications Inc. + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: It contains useful print functions that can be used for debug. + * Add all debug related functionality into this file. + */ +#include "../dfs.h" +#include "wlan_dfs_lmac_api.h" + +void dfs_print_delayline(struct wlan_dfs *dfs, struct dfs_delayline *dl) +{ + int i = 0, index; + struct dfs_delayelem *de; + + index = dl->dl_firstelem; + for (i = 0; i < dl->dl_numelems; i++) { + de = &dl->dl_elems[index]; + dfs_debug(dfs, WLAN_DEBUG_DFS2, + "Elem %u: ts=%llu diff_ts=%u (0x%x) dur=%u, seg_id=%d sidx=%d delta_peak=%d psidx_diff=%d seq_num=%d", + i, de->de_ts, de->de_time, de->de_time, + de->de_dur, de->de_seg_id, de->de_sidx, + de->de_delta_peak, de->de_psidx_diff, + de->de_seq_num); + + index = (index + 1) & DFS_MAX_DL_MASK; + } +} + +void dfs_print_filter(struct wlan_dfs *dfs, struct dfs_filter *rf) +{ + dfs_debug(dfs, WLAN_DEBUG_DFS1, + "filterID[%d] rf_numpulses=%u; rf->rf_minpri=%u; rf->rf_maxpri=%u; rf->rf_threshold=%u; rf->rf_filterlen=%u; rf->rf_mindur=%u; rf->rf_maxdur=%u", + rf->rf_pulseid, rf->rf_numpulses, rf->rf_minpri, rf->rf_maxpri, + rf->rf_threshold, rf->rf_filterlen, rf->rf_mindur, + rf->rf_maxdur); +} + +/** + * dfs_print_filtertype() - Print the filtertype + * @dfs: Pointer to wlan_dfs structure. + * @ft: Pointer to dfs_filtertype structure. + */ +static void dfs_print_filtertype( + struct wlan_dfs *dfs, + struct dfs_filtertype *ft) +{ + uint32_t j; + struct dfs_filter *rf; + + for (j = 0; j < ft->ft_numfilters; j++) { + rf = ft->ft_filters[j]; + dfs_debug(dfs, WLAN_DEBUG_DFS2, + "filter[%d] filterID = %d rf_numpulses=%u; rf->rf_minpri=%u; rf->rf_maxpri=%u; rf->rf_threshold=%u; rf->rf_filterlen=%u; rf->rf_mindur=%u; rf->rf_maxdur=%u", + j, rf->rf_pulseid, rf->rf_numpulses, + rf->rf_minpri, rf->rf_maxpri, + rf->rf_threshold, rf->rf_filterlen, + rf->rf_mindur, rf->rf_maxdur); + } +} + +void dfs_print_filters(struct wlan_dfs *dfs) +{ + struct dfs_filtertype *ft = NULL; + uint8_t i; + + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs is NULL"); + return; + } + + for (i = 0; i < DFS_MAX_RADAR_TYPES; i++) { + if (dfs->dfs_radarf[i] != NULL) { + ft = dfs->dfs_radarf[i]; + if ((ft->ft_numfilters > DFS_MAX_NUM_RADAR_FILTERS) || + (!ft->ft_numfilters)) { + continue; + } + dfs_debug(dfs, WLAN_DEBUG_DFS2, + "===========ft->ft_numfilters = %u===========", + ft->ft_numfilters); + dfs_print_filtertype(dfs, ft); + } + } +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/filtering/dfs_direct_attach_radar.c b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/filtering/dfs_direct_attach_radar.c new file mode 100644 index 0000000000000000000000000000000000000000..f295e5df133c184c46b2523a86ffee135458a143 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/filtering/dfs_direct_attach_radar.c @@ -0,0 +1,107 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * Copyright (c) 2011, Atheros Communications Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: This file has radar table and initialization function for Beeliner + * family of chipsets. + */ + +#include "../dfs.h" +#include "wlan_dfs_mlme_api.h" +#include "wlan_dfs_utils_api.h" +#include "wlan_dfs_lmac_api.h" +#include "../dfs_internal.h" + +void dfs_get_da_radars(struct wlan_dfs *dfs) +{ +#define AR5212_DEVID_IBM 0x1014 /* IBM minipci ID */ +#define AR5212_AR2413 0x001a /* AR2413 aka Griffin-lite */ +#define AR5212_AR2413 0x001a /* AR2413 aka Griffin-lite */ +#define AR5212_AR5413 0x001b /* Eagle */ +#define AR5212_AR5424 0x001c /* Condor (PCI express) */ +#define AR5212_DEVID_FF19 0xff19 /* PCI express */ +#define AR5212_AR2417 0x001d /* Nala, PCI */ +#define AR5212_DEVID 0x0013 /* Final ar5212 devid */ +#define AR5212_FPGA 0xf013 /* Emulation board */ +#define AR5212_DEFAULT 0x1113 /* No eeprom HW default */ + +#define AR5416_DEVID_PCI 0x0023 /* AR5416 PCI (CB/MB) (Owl)*/ +#define AR5416_DEVID_PCIE 0x0024 /* AR5416 PCI-E (XB) (Owl) */ +#define AR5416_DEVID_AR9160_PCI 0x0027 /* AR9160 PCI (Sowl) */ +#define AR5416_AR9100_DEVID 0x000b /* AR9100 (Howl) */ +#define AR5416_DEVID_AR9280_PCI 0x0029 /* PCI (Merlin) */ +#define AR5416_DEVID_AR9280_PCIE 0x002a /* PCIE (Merlin) */ +#define AR5416_DEVID_AR9285_PCIE 0x002b /* PCIE (Kite) */ +#define AR5416_DEVID_AR9285G_PCIE 0x002c /* PCIE (Kite G only) */ +#define AR5416_DEVID_AR9287_PCI 0x002d /* PCI (Kiwi) */ +#define AR5416_DEVID_AR9287_PCIE 0x002e /* PCIE (Kiwi) */ + +#define AR9300_DEVID_AR9380_PCIE 0x0030 /* PCIE (Osprey) */ +#define AR9300_DEVID_AR9340 0x0031 /* Wasp */ +#define AR9300_DEVID_AR9485_PCIE 0x0032 /* Poseidon */ +#define AR9300_DEVID_AR9580_PCIE 0x0033 /* Peacock */ +#define AR9300_DEVID_AR1111_PCIE 0x0037 /* AR1111 */ +#define AR9300_DEVID_AR946X_PCIE 0x0034 /* Jupiter: 2x2 DB + BT - AR9462 */ +#define AR9300_DEVID_AR955X 0x0039 /* Scorpion */ +#define AR9300_DEVID_AR953X 0x003d /* Honey Bee */ +#define AR9300_DEVID_AR956X 0x003f /* Dragonfly */ +#define AR9300_DEVID_AR956X_PCIE 0x0036 /* Aphrodite: 1x1 DB + BT - AR9564 */ +#define AR9300_DEVID_EMU_PCIE 0xabcd + + uint16_t devid = lmac_get_ah_devid(dfs->dfs_pdev_obj); + /* For DA */ + + switch (devid) { + case AR5212_DEVID_IBM: + case AR5212_AR2413: + case AR5212_AR5413: + case AR5212_AR5424: + case AR5212_DEVID_FF19: + devid = AR5212_DEVID; + case AR5212_AR2417: + case AR5212_DEVID: + case AR5212_FPGA: + case AR5212_DEFAULT: + dfs_get_radars_for_ar5212(dfs); + break; + case AR5416_DEVID_PCI: + case AR5416_DEVID_PCIE: + case AR5416_DEVID_AR9160_PCI: + case AR5416_AR9100_DEVID: + case AR5416_DEVID_AR9280_PCI: + case AR5416_DEVID_AR9280_PCIE: + case AR5416_DEVID_AR9285_PCIE: + case AR5416_DEVID_AR9285G_PCIE: + case AR5416_DEVID_AR9287_PCI: + case AR5416_DEVID_AR9287_PCIE: + dfs_get_radars_for_ar5416(dfs); + break; + case AR9300_DEVID_AR9380_PCIE: + case AR9300_DEVID_AR9340: + case AR9300_DEVID_AR9485_PCIE: + case AR9300_DEVID_AR9580_PCIE: + case AR9300_DEVID_AR1111_PCIE: + case AR9300_DEVID_AR946X_PCIE: + case AR9300_DEVID_AR955X: + case AR9300_DEVID_AR953X: + case AR9300_DEVID_AR956X: + case AR9300_DEVID_AR956X_PCIE: + case AR9300_DEVID_EMU_PCIE: + dfs_get_radars_for_ar9300(dfs); + break; + } +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/filtering/dfs_fcc_bin5.c b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/filtering/dfs_fcc_bin5.c new file mode 100644 index 0000000000000000000000000000000000000000..07c285897957ab7d8398f4fb6b38025b16282ac9 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/filtering/dfs_fcc_bin5.c @@ -0,0 +1,835 @@ +/* + * Copyright (c) 2013, 2016-2017 The Linux Foundation. All rights reserved. + * Copyright (c) 2002-2010, Atheros Communications Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: FCC Bin5 are special type of radars because they "chirp". Basically the + * pulses move across the frequency band and are called chirping pulses. + * dfs_check_chirping() actually examines the FFT data contained in the PHY + * error information to figure out whether the pulse is moving across + * frequencies. + */ + +#include "../dfs.h" +#include "wlan_dfs_mlme_api.h" +#include "../dfs_channel.h" + +int dfs_bin5_check_pulse(struct wlan_dfs *dfs, struct dfs_event *re, + struct dfs_bin5radars *br) +{ + int b5_rssithresh = br->br_pulse.b5_rssithresh; + + dfs_debug(dfs, WLAN_DEBUG_DFS_BIN5_PULSE, + "re_dur=%d, rssi=%d, check_chirp=%d, hw_chirp=%d, sw_chirp=%d", + (int)re->re_dur, (int)re->re_rssi, + !!(re->re_flags & DFS_EVENT_CHECKCHIRP), + !!(re->re_flags & DFS_EVENT_HW_CHIRP), + !!(re->re_flags & DFS_EVENT_SW_CHIRP)); + + /* If the SW/HW chirp detection says to fail the pulse,do so. */ + if (DFS_EVENT_NOTCHIRP(re)) { + dfs_debug(dfs, WLAN_DEBUG_DFS_BIN5, + "rejecting chirp: ts=%llu, dur=%d, rssi=%d checkchirp=%d, hwchirp=%d, swchirp=%d", + (unsigned long long)re->re_full_ts, + (int)re->re_dur, (int)re->re_rssi, + !!(re->re_flags & DFS_EVENT_CHECKCHIRP), + !!(re->re_flags & DFS_EVENT_HW_CHIRP), + !!(re->re_flags & DFS_EVENT_SW_CHIRP)); + + return 0; + } + +#define CHANNEL_TURBO 0x00010 + /* Adjust the filter threshold for rssi in non TURBO mode. */ + if (!(dfs->dfs_curchan->dfs_ch_flags & CHANNEL_TURBO)) + b5_rssithresh += br->br_pulse.b5_rssimargin; + + /* Check if the pulse is within duration and rssi thresholds. */ + if ((re->re_dur >= br->br_pulse.b5_mindur) && + (re->re_dur <= br->br_pulse.b5_maxdur) && + (re->re_rssi >= b5_rssithresh)) { + dfs_debug(dfs, WLAN_DEBUG_DFS_BIN5, + "dur=%d, rssi=%d - adding!", + (int)re->re_dur, (int)re->re_rssi); + return 1; + } + + dfs_debug(dfs, WLAN_DEBUG_DFS_BIN5, + "too low to be Bin5 pulse tsf=%llu, dur=%d, rssi=%d", + (unsigned long long)re->re_full_ts, + (int)re->re_dur, (int)re->re_rssi); + + return 0; +} + +int dfs_bin5_addpulse(struct wlan_dfs *dfs, + struct dfs_bin5radars *br, + struct dfs_event *re, + uint64_t thists) +{ + uint32_t index, stop; + uint64_t tsDelta; + + /* + * Check if this pulse is a valid pulse in terms of repetition, + * if not, return without adding it to the queue. PRI : Pulse + * Repitetion Interval. + * BRI : Burst Repitetion Interval. + */ + if (br->br_numelems != 0) { + index = br->br_lastelem; + tsDelta = thists - br->br_elems[index].be_ts; + if ((tsDelta < DFS_BIN5_PRI_LOWER_LIMIT) || + ((tsDelta > DFS_BIN5_PRI_HIGHER_LIMIT) && + (tsDelta < DFS_BIN5_BRI_LOWER_LIMIT))) { + return 0; + } + } + + /* Circular buffer of size 2^n. */ + index = (br->br_lastelem + 1) & DFS_MAX_B5_MASK; + br->br_lastelem = index; + if (br->br_numelems == DFS_MAX_B5_SIZE) + br->br_firstelem = (br->br_firstelem + 1) & DFS_MAX_B5_MASK; + else + br->br_numelems++; + + br->br_elems[index].be_ts = thists; + br->br_elems[index].be_rssi = re->re_rssi; + br->br_elems[index].be_dur = re->re_dur; /* This is in u-sec */ + stop = 0; + index = br->br_firstelem; + while ((!stop) && (br->br_numelems - 1) > 0) { + if ((thists - br->br_elems[index].be_ts) > + ((uint64_t)br->br_pulse.b5_timewindow)) { + br->br_numelems--; + br->br_firstelem = + (br->br_firstelem + 1) & DFS_MAX_B5_MASK; + index = br->br_firstelem; + } else { + stop = 1; + } + } + + return 1; +} + +/** + * dfs_calculate_bursts_for_same_rssi() - Calculate bursts for same rssi. + * @dfs: Pointer to wlan_dfs structure. + * @br: Pointer to dfs_bin5radars structure. + * @bursts: Bursts. + * @numevents: Number of events. + * @prev: prev index. + * @this: index to br_elems[]. + * @index: index array. + */ +static inline void dfs_calculate_bursts_for_same_rssi( + struct wlan_dfs *dfs, + struct dfs_bin5radars *br, + uint32_t *bursts, + uint32_t *numevents, + uint32_t prev, + uint32_t this, + int *index) +{ + uint32_t rssi_diff; + + if (br->br_elems[this].be_rssi >= br->br_elems[prev].be_rssi) + rssi_diff = (br->br_elems[this].be_rssi - + br->br_elems[prev].be_rssi); + else + rssi_diff = (br->br_elems[prev].be_rssi - + br->br_elems[this].be_rssi); + + if (rssi_diff <= DFS_BIN5_RSSI_MARGIN) { + (*bursts)++; + /* + * Save the indexes of this pair for later + * width variance check. + */ + if ((*numevents) >= 2) { + /* + * Make sure the event is not duplicated, possible in + * a 3 pulse burst. + */ + if (index[(*numevents)-1] != prev) + index[(*numevents)++] = prev; + } else { + index[(*numevents)++] = prev; + } + + index[(*numevents)++] = this; + } else { + dfs_debug(dfs, WLAN_DEBUG_DFS_BIN5, + "Bin5 rssi_diff=%d", rssi_diff); + } +} + +void bin5_rules_check_internal(struct wlan_dfs *dfs, + struct dfs_bin5radars *br, + uint32_t *bursts, + uint32_t *numevents, + uint32_t prev, + uint32_t i, + uint32_t this, + int *index) +{ + uint64_t pri = 0; + uint32_t width_diff = 0; + + /* Rule 1: 1000 <= PRI <= 2000 + some margin. */ + if (br->br_elems[this].be_ts >= br->br_elems[prev].be_ts) { + pri = br->br_elems[this].be_ts - br->br_elems[prev].be_ts; + } else { + /* Roll over case */ + pri = br->br_elems[this].be_ts; + } + dfs_debug(dfs, WLAN_DEBUG_DFS_BIN5, + " pri=%llu this.ts=%llu this.dur=%d this.rssi=%d prev.ts=%llu", + (uint64_t)pri, + (uint64_t) br->br_elems[this].be_ts, + (int) br->br_elems[this].be_dur, + (int) br->br_elems[this].be_rssi, + (uint64_t)br->br_elems[prev].be_ts); + + if (((pri >= DFS_BIN5_PRI_LOWER_LIMIT) && + /*pri: pulse repitition interval in us. */ + (pri <= DFS_BIN5_PRI_HIGHER_LIMIT))) { + /* + * Rule 2: pulse width of the pulses in the + * burst should be same (+/- margin). + */ + if (br->br_elems[this].be_dur >= br->br_elems[prev].be_dur) { + width_diff = (br->br_elems[this].be_dur + - br->br_elems[prev].be_dur); + } else { + width_diff = (br->br_elems[prev].be_dur + - br->br_elems[this].be_dur); + } + + if (width_diff <= DFS_BIN5_WIDTH_MARGIN) + /* + * Rule 3: RSSI of the pulses in the + * burst should be same (+/- margin) + */ + dfs_calculate_bursts_for_same_rssi(dfs, br, bursts, + numevents, prev, this, index); + else + dfs_debug(dfs, WLAN_DEBUG_DFS_BIN5, + "Bin5 width_diff=%d", width_diff); + } else if ((pri >= DFS_BIN5_BRI_LOWER_LIMIT) && + (pri <= DFS_BIN5_BRI_UPPER_LIMIT)) { + /* Check pulse width to make sure it is in range of bin 5. */ + (*bursts)++; + } else{ + dfs_debug(dfs, WLAN_DEBUG_DFS_BIN5, + "Bin5 PRI check fail pri=%llu", (uint64_t)pri); + } +} + +int dfs_bin5_check(struct wlan_dfs *dfs) +{ + struct dfs_bin5radars *br; + uint32_t n = 0, i = 0, i1 = 0, this = 0, prev = 0; + uint32_t bursts = 0; +#ifdef WLAN_DEBUG + uint32_t total_diff = 0, average_diff = 0; + uint32_t total_width = 0, average_width = 0; +#endif + uint32_t numevents = 0; + int index[DFS_MAX_B5_SIZE]; + + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs is NULL"); + return 1; + } + + for (n = 0; n < dfs->dfs_rinfo.rn_numbin5radars; n++) { + br = &(dfs->dfs_b5radars[n]); + dfs_debug(dfs, WLAN_DEBUG_DFS_BIN5, "Num elems = %d", + br->br_numelems); + + /* Find a valid bin 5 pulse and use it as reference. */ + for (i1 = 0; i1 < br->br_numelems; i1++) { + this = ((br->br_firstelem + i1) & DFS_MAX_B5_MASK); + if ((br->br_elems[this].be_dur >= MIN_BIN5_DUR_MICROSEC) + && (br->br_elems[this].be_dur <= + MAX_BIN5_DUR_MICROSEC)) { + break; + } + } + + prev = this; + for (i = i1 + 1; i < br->br_numelems; i++) { + this = ((br->br_firstelem + i) & DFS_MAX_B5_MASK); + /* + * First make sure it is a bin 5 pulse by checking + * the duration. + */ + if ((br->br_elems[this].be_dur < MIN_BIN5_DUR_MICROSEC) + || (br->br_elems[this].be_dur > + MAX_BIN5_DUR_MICROSEC)) { + continue; + } + bin5_rules_check_internal(dfs, br, &bursts, &numevents, + prev, i, this, index); + prev = this; + } + + dfs_debug(dfs, WLAN_DEBUG_DFS_BIN5, + "bursts=%u numevents=%u", bursts, numevents); + if (bursts >= br->br_pulse.b5_threshold) { + if ((br->br_elems[br->br_lastelem].be_ts - + br->br_elems[br->br_firstelem].be_ts) < + 3000000) + return 0; + + dfs_debug(dfs, WLAN_DEBUG_DFS_BIN5, + "bursts=%u numevents=%u total_width=%d average_width=%d total_diff=%d average_diff=%d", + bursts, numevents, total_width, + average_width, total_diff, + average_diff); + dfs_info(dfs, WLAN_DEBUG_DFS_ALWAYS, + "bin 5 radar detected, bursts=%d", + bursts); + return 1; + } + } + + return 0; +} + +/** + * dfs_check_chirping_sowl() - Chirp detection for Sowl/Howl. + * @dfs: Pointer to wlan_dfs structure. + * @buf: Phyerr buffer. + * @datalen: Phyerr buf length + * @is_ctl: detected on primary channel. + * @is_ext: detected on extension channel. + * @slope: Slope + * @is_dc: DC found + * + * Return: Return TRUE if chirping pulse, FALSE if not. Decision is made + * based on processing the FFT data included with the PHY error. + * Calculate the slope using the maximum bin index reported in + * the FFT data. Calculate slope between FFT packet 0 and packet + * n-1. Also calculate slope between packet 1 and packet n. If a + * pulse is chirping, a slope of 5 and greater is seen. + * Non-chirping pulses have slopes of 0, 1, 2 or 3. + */ +static int dfs_check_chirping_sowl(struct wlan_dfs *dfs, + void *buf, + uint16_t datalen, + int is_ctl, + int is_ext, + int *slope, + int *is_dc) +{ +#define FFT_LEN 70 +#define FFT_LOWER_BIN_MAX_INDEX_BYTE 66 +#define FFT_UPPER_BIN_MAX_INDEX_BYTE 69 +#define MIN_CHIRPING_SLOPE 4 + int is_chirp = 0; + int p, num_fft_packets = 0; + int ctl_slope = 0, ext_slope = 0; + int ctl_high0 = 0, ctl_low0 = 0, ctl_slope0 = 0; + int ext_high0 = 0, ext_low0 = 0, ext_slope0 = 0; + int ctl_high1 = 0, ctl_low1 = 0, ctl_slope1 = 0; + int ext_high1 = 0, ext_low1 = 0, ext_slope1 = 0; + uint8_t *fft_data_ptr; + + *slope = 0; + *is_dc = 0; + num_fft_packets = datalen / FFT_LEN; + fft_data_ptr = (uint8_t *)buf; + + /* DEBUG - Print relevant portions of the FFT data. */ + for (p = 0; p < num_fft_packets; p++) { + dfs_debug(dfs, WLAN_DEBUG_DFS_BIN5_FFT, + "fft_data_ptr=0x%pK\t", fft_data_ptr); + dfs_debug(dfs, WLAN_DEBUG_DFS_BIN5_FFT, + "[66]=%d [69]=%d", + *(fft_data_ptr + FFT_LOWER_BIN_MAX_INDEX_BYTE) >> 2, + *(fft_data_ptr + FFT_UPPER_BIN_MAX_INDEX_BYTE) >> 2); + fft_data_ptr += FFT_LEN; + } + + dfs_debug(dfs, WLAN_DEBUG_DFS_BIN5_FFT, + "datalen=%d num_fft_packets=%d", datalen, num_fft_packets); + + /* + * There is not enough FFT data to figure out whether the pulse + * is chirping or not. + */ + if (num_fft_packets < 4) + return 0; + + fft_data_ptr = (uint8_t *)buf; + + if (is_ctl) { + fft_data_ptr = (uint8_t *)buf; + ctl_low0 = *(fft_data_ptr + FFT_LOWER_BIN_MAX_INDEX_BYTE) >> 2; + fft_data_ptr += FFT_LEN; + ctl_low1 = *(fft_data_ptr + FFT_LOWER_BIN_MAX_INDEX_BYTE) >> 2; + + /* Last packet with first packet. */ + fft_data_ptr = + (uint8_t *)buf + (FFT_LEN * (num_fft_packets - 1)); + ctl_high1 = *(fft_data_ptr + FFT_LOWER_BIN_MAX_INDEX_BYTE) >> 2; + + /* Second last packet with 0th packet. */ + fft_data_ptr = + (uint8_t *)buf + (FFT_LEN * (num_fft_packets - 2)); + ctl_high0 = *(fft_data_ptr + FFT_LOWER_BIN_MAX_INDEX_BYTE) >> 2; + + ctl_slope0 = ctl_high0 - ctl_low0; + if (ctl_slope0 < 0) + ctl_slope0 *= (-1); + + ctl_slope1 = ctl_high1 - ctl_low1; + if (ctl_slope1 < 0) + ctl_slope1 *= (-1); + + ctl_slope = + ((ctl_slope0 > ctl_slope1) ? ctl_slope0 : ctl_slope1); + *slope = ctl_slope; + + dfs_debug(dfs, WLAN_DEBUG_DFS_BIN5_FFT, + "ctl_slope0=%d ctl_slope1=%d ctl_slope=%d", + ctl_slope0, ctl_slope1, ctl_slope); + } else if (is_ext) { + fft_data_ptr = (uint8_t *)buf; + ext_low0 = *(fft_data_ptr + FFT_UPPER_BIN_MAX_INDEX_BYTE) >> 2; + + fft_data_ptr += FFT_LEN; + ext_low1 = *(fft_data_ptr + FFT_UPPER_BIN_MAX_INDEX_BYTE) >> 2; + + fft_data_ptr = + (uint8_t *)buf + (FFT_LEN * (num_fft_packets - 1)); + ext_high1 = *(fft_data_ptr + FFT_UPPER_BIN_MAX_INDEX_BYTE) >> 2; + fft_data_ptr = + (uint8_t *)buf + (FFT_LEN * (num_fft_packets - 2)); + + ext_high0 = *(fft_data_ptr + FFT_UPPER_BIN_MAX_INDEX_BYTE) >> 2; + + ext_slope0 = ext_high0 - ext_low0; + if (ext_slope0 < 0) + ext_slope0 *= (-1); + + ext_slope1 = ext_high1 - ext_low1; + if (ext_slope1 < 0) + ext_slope1 *= (-1); + + ext_slope = ((ext_slope0 > ext_slope1) ? + ext_slope0 : ext_slope1); + *slope = ext_slope; + dfs_debug(dfs, WLAN_DEBUG_DFS_BIN5_FFT | WLAN_DEBUG_DFS_BIN5, + "ext_slope0=%d ext_slope1=%d ext_slope=%d", + ext_slope0, ext_slope1, ext_slope); + } else + return 0; + + if ((ctl_slope >= MIN_CHIRPING_SLOPE) || + (ext_slope >= MIN_CHIRPING_SLOPE)) { + is_chirp = 1; + dfs_debug(dfs, WLAN_DEBUG_DFS_BIN5 | WLAN_DEBUG_DFS_BIN5_FFT | + WLAN_DEBUG_DFS_PHYERR_SUM, "is_chirp=%d is_dc=%d", + is_chirp, *is_dc); + } + + return is_chirp; + +#undef FFT_LEN +#undef FFT_LOWER_BIN_MAX_INDEX_BYTE +#undef FFT_UPPER_BIN_MAX_INDEX_BYTE +#undef MIN_CHIRPING_SLOPE +} + +/** + * dfs_check_chirping_merlin() - Merlin (and Osprey, etc) chirp radar chirp + * detection. + * @dfs: Pointer to wlan_dfs structure. + * @buf: Phyerr buffer + * @datalen: Phyerr buf length + * @is_ctl: detected on primary channel. + * @is_ext: detected on extension channel. + * @slope: Slope + * @is_dc: DC found + */ +static int dfs_check_chirping_merlin(struct wlan_dfs *dfs, + void *buf, + uint16_t datalen, + int is_ctl, + int is_ext, + int *slope, + int *is_dc) +{ +#define ABS_DIFF(_x, _y) ((int)_x > (int)_y ? (int)_x - (int)_y : \ + (int)_y - (int)_x) +#define ABS(_x) ((int)_x > 0 ? (int)_x : -(int)_x) + /* This should be between 1 and 3. Default is 1. */ +#define DELTA_STEP 1 + /* Number of Diffs to compute. valid range is 2-4. */ +#define NUM_DIFFS 3 + /* Threshold for difference of delta peaks. */ +#define MAX_DIFF 2 + /* Max. number of strong bins for narrow band. */ +#define BIN_COUNT_MAX 6 + + /* Dynamic 20/40 mode FFT packet format related definition. */ +#define NUM_FFT_BYTES_HT40 70 +#define NUM_BIN_BYTES_HT40 64 +#define NUM_SUBCHAN_BINS_HT40 64 +#define LOWER_INDEX_BYTE_HT40 66 +#define UPPER_INDEX_BYTE_HT40 69 +#define LOWER_WEIGHT_BYTE_HT40 64 +#define UPPER_WEIGHT_BYTE_HT40 67 +#define LOWER_MAG_BYTE_HT40 65 +#define UPPER_MAG_BYTE_HT40 68 + + /* Static 20 mode FFT packet format related definition. */ +#define NUM_FFT_BYTES_HT20 31 +#define NUM_BIN_BYTES_HT20 28 +#define NUM_SUBCHAN_BINS_HT20 56 +#define LOWER_INDEX_BYTE_HT20 30 +#define UPPER_INDEX_BYTE_HT20 30 +#define LOWER_WEIGHT_BYTE_HT20 28 +#define UPPER_WEIGHT_BYTE_HT20 28 +#define LOWER_MAG_BYTE_HT20 29 +#define UPPER_MAG_BYTE_HT20 29 + + int num_fft_packets; /* number of FFT packets reported to software */ + int num_fft_bytes; + int num_bin_bytes; + int num_subchan_bins; + int lower_index_byte; + int upper_index_byte; + int lower_weight_byte; + int upper_weight_byte; + int lower_mag_byte; + int upper_mag_byte; + int max_index_lower[DELTA_STEP + NUM_DIFFS]; + int max_index_upper[DELTA_STEP + NUM_DIFFS]; + int max_mag_lower[DELTA_STEP + NUM_DIFFS]; + int max_mag_upper[DELTA_STEP + NUM_DIFFS]; + int bin_wt_lower[DELTA_STEP + NUM_DIFFS]; + int bin_wt_upper[DELTA_STEP + NUM_DIFFS]; + int max_mag_sel[DELTA_STEP + NUM_DIFFS]; + int max_mag[DELTA_STEP + NUM_DIFFS]; + int max_index[DELTA_STEP + NUM_DIFFS]; + int max_d[] = {10, 19, 28}; + int min_d[] = {1, 2, 3}; + uint8_t *ptr; /* pointer to FFT data */ + int i; + int fft_start; + int chirp_found; + int delta_peak[NUM_DIFFS]; + int j; + int bin_count; + int bw_mask; + int delta_diff; + int same_sign; + int temp; + + if (WLAN_IS_CHAN_11N_HT40(dfs->dfs_curchan)) { + num_fft_bytes = NUM_FFT_BYTES_HT40; + num_bin_bytes = NUM_BIN_BYTES_HT40; + num_subchan_bins = NUM_SUBCHAN_BINS_HT40; + lower_index_byte = LOWER_INDEX_BYTE_HT40; + upper_index_byte = UPPER_INDEX_BYTE_HT40; + lower_weight_byte = LOWER_WEIGHT_BYTE_HT40; + upper_weight_byte = UPPER_WEIGHT_BYTE_HT40; + lower_mag_byte = LOWER_MAG_BYTE_HT40; + upper_mag_byte = UPPER_MAG_BYTE_HT40; + + /* If we are in HT40MINUS then swap primary and extension. */ + if (WLAN_IS_CHAN_11N_HT40MINUS(dfs->dfs_curchan)) { + temp = is_ctl; + is_ctl = is_ext; + is_ext = temp; + } + } else { + num_fft_bytes = NUM_FFT_BYTES_HT20; + num_bin_bytes = NUM_BIN_BYTES_HT20; + num_subchan_bins = NUM_SUBCHAN_BINS_HT20; + lower_index_byte = LOWER_INDEX_BYTE_HT20; + upper_index_byte = UPPER_INDEX_BYTE_HT20; + lower_weight_byte = LOWER_WEIGHT_BYTE_HT20; + upper_weight_byte = UPPER_WEIGHT_BYTE_HT20; + lower_mag_byte = LOWER_MAG_BYTE_HT20; + upper_mag_byte = UPPER_MAG_BYTE_HT20; + } + + ptr = (uint8_t *)buf; + /* Sanity check for FFT buffer. */ + if (!ptr || (datalen == 0)) { + dfs_debug(dfs, WLAN_DEBUG_DFS_BIN5_FFT, + "FFT buffer pointer is null or size is 0"); + return 0; + } + + num_fft_packets = (datalen - 3) / num_fft_bytes; + if (num_fft_packets < (NUM_DIFFS + DELTA_STEP)) { + dfs_debug(dfs, WLAN_DEBUG_DFS_BIN5_FFT, + "datalen = %d, num_fft_packets = %d, too few packets... (exiting)", + datalen, num_fft_packets); + return 0; + } + + if ((((datalen - 3) % num_fft_bytes) == 2) && + (datalen > num_fft_bytes)) { + ptr += 2; + datalen -= 2; + } + + for (i = 0; i < (NUM_DIFFS + DELTA_STEP); i++) { + fft_start = i * num_fft_bytes; + bin_wt_lower[i] = ptr[fft_start + lower_weight_byte] & 0x3f; + bin_wt_upper[i] = ptr[fft_start + upper_weight_byte] & 0x3f; + max_index_lower[i] = ptr[fft_start + lower_index_byte] >> 2; + max_index_upper[i] = (ptr[fft_start + upper_index_byte] >> 2) + + num_subchan_bins; + + if (!WLAN_IS_CHAN_11N_HT40(dfs->dfs_curchan)) { + /* For HT20 mode indices are 6 bit signed number. */ + max_index_lower[i] ^= 0x20; + max_index_upper[i] = 0; + } + + /* + * Reconstruct the maximum magnitude for each sub-channel. + * Also select and flag the max overall magnitude between + * the two sub-channels. + */ + + max_mag_lower[i] = + ((ptr[fft_start + lower_index_byte] & 0x03) << 8) + + ptr[fft_start + lower_mag_byte]; + max_mag_upper[i] = + ((ptr[fft_start + upper_index_byte] & 0x03) << 8) + + ptr[fft_start + upper_mag_byte]; + bw_mask = ((bin_wt_lower[i] == 0) ? 0 : is_ctl) + + (((bin_wt_upper[i] == 0) ? 0 : is_ext) << 1); + + /* + * Limit the max bin based on channel bandwidth + * If the upper sub-channel max index is stuck at '1', + * the signal is dominated * by residual DC + * (or carrier leak) and should be ignored. + */ + + if (bw_mask == 1) { + max_mag_sel[i] = 0; + max_mag[i] = max_mag_lower[i]; + max_index[i] = max_index_lower[i]; + } else if (bw_mask == 2) { + max_mag_sel[i] = 1; + max_mag[i] = max_mag_upper[i]; + max_index[i] = max_index_upper[i]; + } else if (max_index_upper[i] == num_subchan_bins) { + max_mag_sel[i] = 0; /* Ignore DC bin. */ + max_mag[i] = max_mag_lower[i]; + max_index[i] = max_index_lower[i]; + } else { + if (max_mag_upper[i] > max_mag_lower[i]) { + max_mag_sel[i] = 1; + max_mag[i] = max_mag_upper[i]; + max_index[i] = max_index_upper[i]; + } else { + max_mag_sel[i] = 0; + max_mag[i] = max_mag_lower[i]; + max_index[i] = max_index_lower[i]; + } + } + dfs_debug(dfs, WLAN_DEBUG_DFS_BIN5_FFT, + "i=%d, max_index[i]=%d, max_index_lower[i]=%d, max_index_upper[i]=%d", + i, max_index[i], max_index_lower[i], + max_index_upper[i]); + } + + chirp_found = 1; + delta_diff = 0; + same_sign = 1; + + /* + * delta_diff computation -- look for movement in peak. + * make sure that the chirp direction (i.e. sign) is + * always the same, i.e. sign of the two peaks should + * be same. + */ + for (i = 0; i < NUM_DIFFS; i++) { + delta_peak[i] = max_index[i + DELTA_STEP] - max_index[i]; + if (i > 0) { + delta_diff = delta_peak[i] - delta_peak[i-1]; + same_sign = !((delta_peak[i] & 0x80) ^ + (delta_peak[i-1] & 0x80)); + } + chirp_found &= + (ABS(delta_peak[i]) >= min_d[DELTA_STEP - 1]) && + (ABS(delta_peak[i]) <= max_d[DELTA_STEP - 1]) && + same_sign && (ABS(delta_diff) <= MAX_DIFF); + dfs_debug(dfs, WLAN_DEBUG_DFS_BIN5_FFT, + "i=%d, delta_peak[i]=%d, delta_diff=%d", + i, delta_peak[i], delta_diff); + } + + if (chirp_found) { + dfs_debug(dfs, WLAN_DEBUG_DFS_BIN5_FFT, + "CHIRPING_BEFORE_STRONGBIN_YES"); + } else { + dfs_debug(dfs, WLAN_DEBUG_DFS_BIN5_FFT, + "CHIRPING_BEFORE_STRONGBIN_NO"); + } + + /* + * Work around for potential hardware data corruption bug. + * Check for wide band signal by counting strong bins + * indicated by bitmap flags. This check is done if + * chirp_found is true. We do this as a final check to + * weed out corrupt FFTs bytes. This looks expensive but + * in most cases it will exit early. + */ + + for (i = 0; (i < (NUM_DIFFS + DELTA_STEP)) && + (chirp_found == 1); i++) { + bin_count = 0; + /* + * Point to the start of the 1st byte of the selected + * sub-channel. + */ + fft_start = (i * num_fft_bytes) + (max_mag_sel[i] ? + (num_subchan_bins >> 1) : 0); + for (j = 0; j < (num_subchan_bins >> 1); j++) { + /* + * If either bin is flagged "strong", accumulate + * the bin_count. It's not accurate, but good + * enough... + */ + bin_count += (ptr[fft_start + j] & 0x88) ? 1 : 0; + } + chirp_found &= (bin_count > BIN_COUNT_MAX) ? 0 : 1; + dfs_debug(dfs, WLAN_DEBUG_DFS_BIN5_FFT, + "i=%d, computed bin_count=%d", + i, bin_count); + } + + if (chirp_found) { + dfs_debug(dfs, WLAN_DEBUG_DFS_BIN5_FFT | + WLAN_DEBUG_DFS_PHYERR_SUM, + "CHIRPING_YES"); + } else { + dfs_debug(dfs, WLAN_DEBUG_DFS_BIN5_FFT | + WLAN_DEBUG_DFS_PHYERR_SUM, + "CHIRPING_NO"); + } + + return chirp_found; +#undef ABS_DIFF +#undef ABS +#undef DELTA_STEP +#undef NUM_DIFFS +#undef MAX_DIFF +#undef BIN_COUNT_MAX + +#undef NUM_FFT_BYTES_HT40 +#undef NUM_BIN_BYTES_HT40 +#undef NUM_SUBCHAN_BINS_HT40 +#undef LOWER_INDEX_BYTE_HT40 +#undef UPPER_INDEX_BYTE_HT40 +#undef LOWER_WEIGHT_BYTE_HT40 +#undef UPPER_WEIGHT_BYTE_HT40 +#undef LOWER_MAG_BYTE_HT40 +#undef UPPER_MAG_BYTE_HT40 + +#undef NUM_FFT_BYTES_HT40 +#undef NUM_BIN_BYTES_HT40 +#undef NUM_SUBCHAN_BINS_HT40 +#undef LOWER_INDEX_BYTE_HT40 +#undef UPPER_INDEX_BYTE_HT40 +#undef LOWER_WEIGHT_BYTE_HT40 +#undef UPPER_WEIGHT_BYTE_HT40 +#undef LOWER_MAG_BYTE_HT40 +#undef UPPER_MAG_BYTE_HT40 +} + +int dfs_check_chirping(struct wlan_dfs *dfs, + void *buf, + uint16_t datalen, + int is_ctl, + int is_ext, + int *slope, + int *is_dc) +{ + if (dfs->dfs_caps.wlan_dfs_use_enhancement) { + return dfs_check_chirping_merlin(dfs, buf, datalen, is_ctl, + is_ext, slope, is_dc); + } else { + return dfs_check_chirping_sowl(dfs, buf, datalen, is_ctl, + is_ext, slope, is_dc); + } +} + +uint8_t dfs_retain_bin5_burst_pattern(struct wlan_dfs *dfs, + uint32_t diff_ts, + uint8_t old_dur) +{ + /* + * Pulses may get split into 2 during chirping, this print + * is only to show that it happened, we do not handle this + * condition if we cannot detect the chirping. + */ + /* SPLIT pulses will have a time stamp difference of < 50 */ + if (diff_ts < 50) { + dfs_debug(dfs, WLAN_DEBUG_DFS_BIN5, + "SPLIT pulse diffTs=%u dur=%d (old_dur=%d)", + diff_ts, + dfs->dfs_rinfo.dfs_last_bin5_dur, old_dur); + } + + /* + * Check if this is the 2nd or 3rd pulse in the same burst, + * PRI will be between 1000 and 2000 us. + */ + if (((diff_ts >= DFS_BIN5_PRI_LOWER_LIMIT) && + (diff_ts <= DFS_BIN5_PRI_HIGHER_LIMIT))) { + /* + * This pulse belongs to the same burst as the pulse before, + * so return the same random duration for it. + */ + dfs_debug(dfs, WLAN_DEBUG_DFS_BIN5, + "this pulse belongs to the same burst as before, give it same dur=%d (old_dur=%d)", + dfs->dfs_rinfo.dfs_last_bin5_dur, old_dur); + return dfs->dfs_rinfo.dfs_last_bin5_dur; + } + + /* This pulse does not belong to this burst, return unchanged duration*/ + return old_dur; +} + +int dfs_get_random_bin5_dur(struct wlan_dfs *dfs, + uint64_t tstamp) +{ + uint8_t new_dur = MIN_BIN5_DUR; + int range; + + get_random_bytes(&new_dur, sizeof(uint8_t)); + range = (MAX_BIN5_DUR - MIN_BIN5_DUR + 1); + new_dur %= range; + new_dur += MIN_BIN5_DUR; + + return new_dur; +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/filtering/dfs_init.c b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/filtering/dfs_init.c new file mode 100644 index 0000000000000000000000000000000000000000..252883a5d5e23f0ec2a41af5fd0a1ed4677b4f1d --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/filtering/dfs_init.c @@ -0,0 +1,408 @@ +/* + * Copyright (c) 2013, 2016-2018 The Linux Foundation. All rights reserved. + * Copyright (c) 2002-2010, Atheros Communications Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: This file contains initialization functions and functions that reset + * internal data structures. + */ + +#include "../dfs.h" +#include "wlan_dfs_lmac_api.h" + +/** + * dfs_reset_filtertype() - Reset filtertype. + * @ft: Pointer to dfs_filtertype structure. + */ +static inline void dfs_reset_filtertype( + struct dfs_filtertype *ft) +{ + int j; + struct dfs_filter *rf; + struct dfs_delayline *dl; + + for (j = 0; j < ft->ft_numfilters; j++) { + rf = ft->ft_filters[j]; + dl = &(rf->rf_dl); + if (dl != NULL) { + qdf_mem_zero(dl, sizeof(*dl)); + dl->dl_lastelem = (0xFFFFFFFF) & DFS_MAX_DL_MASK; + } + } +} + +void dfs_reset_alldelaylines(struct wlan_dfs *dfs) +{ + struct dfs_filtertype *ft = NULL; + struct dfs_pulseline *pl; + int i; + + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs is NULL"); + return; + } + pl = dfs->pulses; + + if (!pl) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "pl is NULL"); + return; + } + + /* Reset the pulse log. */ + pl->pl_firstelem = pl->pl_numelems = 0; + pl->pl_lastelem = DFS_MAX_PULSE_BUFFER_MASK; + + for (i = 0; i < DFS_MAX_RADAR_TYPES; i++) { + if (dfs->dfs_radarf[i] != NULL) { + ft = dfs->dfs_radarf[i]; + dfs_reset_filtertype(ft); + } + } + + if (!(dfs->dfs_b5radars)) { + if (dfs->dfs_rinfo.rn_numbin5radars > 0) + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, + "null dfs_b5radars, numbin5radars=%d domain=%d", + dfs->dfs_rinfo.rn_numbin5radars, + dfs->dfsdomain); + return; + } + + for (i = 0; i < dfs->dfs_rinfo.rn_numbin5radars; i++) { + qdf_mem_zero(&(dfs->dfs_b5radars[i].br_elems[0]), + sizeof(struct dfs_bin5elem) * DFS_MAX_B5_SIZE); + dfs->dfs_b5radars[i].br_firstelem = 0; + dfs->dfs_b5radars[i].br_numelems = 0; + dfs->dfs_b5radars[i].br_lastelem = + (0xFFFFFFFF) & DFS_MAX_B5_MASK; + } +} + +void dfs_reset_delayline(struct dfs_delayline *dl) +{ + qdf_mem_zero(&(dl->dl_elems[0]), sizeof(dl->dl_elems)); + dl->dl_lastelem = (0xFFFFFFFF) & DFS_MAX_DL_MASK; +} + +void dfs_reset_filter_delaylines(struct dfs_filtertype *dft) +{ + struct dfs_filter *df; + int i; + + for (i = 0; i < DFS_MAX_NUM_RADAR_FILTERS; i++) { + df = dft->ft_filters[i]; + dfs_reset_delayline(&(df->rf_dl)); + } +} + +void dfs_reset_radarq(struct wlan_dfs *dfs) +{ + struct dfs_event *event; + + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs is NULL"); + return; + } + + WLAN_DFSQ_LOCK(dfs); + WLAN_DFSEVENTQ_LOCK(dfs); + while (!STAILQ_EMPTY(&(dfs->dfs_radarq))) { + event = STAILQ_FIRST(&(dfs->dfs_radarq)); + STAILQ_REMOVE_HEAD(&(dfs->dfs_radarq), re_list); + qdf_mem_zero(event, sizeof(struct dfs_event)); + STAILQ_INSERT_TAIL(&(dfs->dfs_eventq), event, re_list); + } + WLAN_DFSEVENTQ_UNLOCK(dfs); + WLAN_DFSQ_UNLOCK(dfs); +} + +/** + * dfs_fill_ft_index_table() - DFS fill ft index table. + * @dfs: Pointer to wlan_dfs structure. + * @i: Duration used as an index. + * + * Return: 1 if too many overlapping radar filters else 0. + */ +static inline bool dfs_fill_ft_index_table( + struct wlan_dfs *dfs, + int i) +{ + uint32_t stop = 0, tableindex = 0; + + while ((tableindex < DFS_MAX_RADAR_OVERLAP) && (!stop)) { + if ((dfs->dfs_ftindextable[i])[tableindex] == -1) + stop = 1; + else + tableindex++; + } + + if (stop) { + (dfs->dfs_ftindextable[i])[tableindex] = + (int8_t)(dfs->dfs_rinfo.rn_ftindex); + } else { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "Too many overlapping radar filters"); + return 1; + } + + return 0; +} + +/** + * dfs_fill_filter_type() - DFS fill filter type. + * @dfs: Pointer to wlan_dfs structure. + * @ft: Double pointer to dfs_filtertype structure. + * @dfs_radars: Pointer to dfs_pulse structure. + * @min_rssithresh: Minimum RSSI threshold. + * @max_pulsedur: Maximum RSSI threshold. + * @p: Index to dfs_pulse structure. + * + * Return: 1 if too many overlapping radar filters else 0. + */ +static inline bool dfs_fill_filter_type( + struct wlan_dfs *dfs, + struct dfs_filtertype **ft, + struct dfs_pulse *dfs_radars, + int32_t *min_rssithresh, + uint32_t *max_pulsedur, + int p) +{ + int i; + + /* No filter of the appropriate dur was found. */ + if ((dfs->dfs_rinfo.rn_ftindex + 1) > DFS_MAX_RADAR_TYPES) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "Too many filter types"); + return 1; + } + (*ft) = dfs->dfs_radarf[dfs->dfs_rinfo.rn_ftindex]; + (*ft)->ft_numfilters = 0; + (*ft)->ft_numpulses = dfs_radars[p].rp_numpulses; + (*ft)->ft_patterntype = dfs_radars[p].rp_patterntype; + (*ft)->ft_mindur = dfs_radars[p].rp_mindur; + (*ft)->ft_maxdur = dfs_radars[p].rp_maxdur; + (*ft)->ft_filterdur = dfs_radars[p].rp_pulsedur; + (*ft)->ft_rssithresh = dfs_radars[p].rp_rssithresh; + (*ft)->ft_rssimargin = dfs_radars[p].rp_rssimargin; + (*ft)->ft_minpri = 1000000; + + if ((*ft)->ft_rssithresh < *min_rssithresh) + *min_rssithresh = (*ft)->ft_rssithresh; + + if ((*ft)->ft_maxdur > *max_pulsedur) + *max_pulsedur = (*ft)->ft_maxdur; + + for (i = (*ft)->ft_mindur; i <= (*ft)->ft_maxdur; i++) { + if (dfs_fill_ft_index_table(dfs, i)) + return 1; + } + + dfs->dfs_rinfo.rn_ftindex++; + + return 0; +} + +int dfs_init_radar_filters(struct wlan_dfs *dfs, + struct wlan_dfs_radar_tab_info *radar_info) +{ + struct dfs_filtertype *ft = NULL; + struct dfs_filter *rf = NULL; + struct dfs_pulse *dfs_radars; + struct dfs_bin5pulse *b5pulses = NULL; + uint32_t T, Tmax; + int32_t min_rssithresh = DFS_MAX_RSSI_VALUE; + uint32_t max_pulsedur = 0; + int numpulses, p, n, i; + int numradars = 0, numb5radars = 0; + int retval; + + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs is NULL"); + return 1; + } + + dfs_debug(dfs, WLAN_DEBUG_DFS, + "dfsdomain=%d, numradars=%d, numb5radars=%d", + radar_info->dfsdomain, + radar_info->numradars, radar_info->numb5radars); + + /* Clear up the dfs domain flag first. */ + dfs->wlan_dfs_isdfsregdomain = 0; + + /* + * If radar_info is NULL or dfsdomain is NULL, treat the + * rest of the radar configuration as suspect. + */ + if (!radar_info || radar_info->dfsdomain == 0) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "Unknown dfs domain %d", + dfs->dfsdomain); + /* Disable radar detection since we don't have a radar domain.*/ + dfs->dfs_proc_phyerr &= ~DFS_RADAR_EN; + dfs->dfs_proc_phyerr &= ~DFS_SECOND_SEGMENT_RADAR_EN; + return 0; + } + + dfs->dfsdomain = radar_info->dfsdomain; + dfs_radars = radar_info->dfs_radars; + numradars = radar_info->numradars; + b5pulses = radar_info->b5pulses; + numb5radars = radar_info->numb5radars; + + dfs->dfs_defaultparams = radar_info->dfs_defaultparams; + + dfs->wlan_dfs_isdfsregdomain = 1; + dfs->dfs_rinfo.rn_ftindex = 0; + /* Clear filter type table. */ + for (n = 0; n < 256; n++) { + for (i = 0; i < DFS_MAX_RADAR_OVERLAP; i++) + (dfs->dfs_ftindextable[n])[i] = -1; + } + + /* Now, initialize the radar filters. */ + for (p = 0; p < numradars; p++) { + ft = NULL; + for (n = 0; n < dfs->dfs_rinfo.rn_ftindex; n++) { + if ((dfs_radars[p].rp_pulsedur == + dfs->dfs_radarf[n]->ft_filterdur) && + (dfs_radars[p].rp_numpulses == + dfs->dfs_radarf[n]->ft_numpulses) && + (dfs_radars[p].rp_mindur == + dfs->dfs_radarf[n]->ft_mindur) && + (dfs_radars[p].rp_maxdur == + dfs->dfs_radarf[n]->ft_maxdur)) { + ft = dfs->dfs_radarf[n]; + break; + } + } + + if (!ft) { + retval = dfs_fill_filter_type(dfs, &ft, dfs_radars, + &min_rssithresh, &max_pulsedur, p); + if (retval == 1) + goto bad4; + } + + rf = ft->ft_filters[ft->ft_numfilters++]; + dfs_reset_delayline(&rf->rf_dl); + numpulses = dfs_radars[p].rp_numpulses; + + rf->rf_numpulses = numpulses; + rf->rf_patterntype = dfs_radars[p].rp_patterntype; + rf->rf_sidx_spread = dfs_radars[p].rp_sidx_spread; + rf->rf_check_delta_peak = dfs_radars[p].rp_check_delta_peak; + rf->rf_pulseid = dfs_radars[p].rp_pulseid; + rf->rf_mindur = dfs_radars[p].rp_mindur; + rf->rf_maxdur = dfs_radars[p].rp_maxdur; + rf->rf_numpulses = dfs_radars[p].rp_numpulses; + rf->rf_ignore_pri_window = dfs_radars[p].rp_ignore_pri_window; + T = (100000000 / dfs_radars[p].rp_max_pulsefreq) - + 100 * (dfs_radars[p].rp_meanoffset); + rf->rf_minpri = dfs_round((int32_t)T - + (100 * (dfs_radars[p].rp_pulsevar))); + Tmax = (100000000 / dfs_radars[p].rp_pulsefreq) - + 100 * (dfs_radars[p].rp_meanoffset); + rf->rf_maxpri = dfs_round((int32_t)Tmax + + (100 * (dfs_radars[p].rp_pulsevar))); + + if (rf->rf_minpri < ft->ft_minpri) + ft->ft_minpri = rf->rf_minpri; + + rf->rf_fixed_pri_radar_pulse = ( + dfs_radars[p].rp_max_pulsefreq == + dfs_radars[p].rp_pulsefreq) ? 1 : 0; + rf->rf_threshold = dfs_radars[p].rp_threshold; + rf->rf_filterlen = rf->rf_maxpri * rf->rf_numpulses; + + dfs_debug(dfs, WLAN_DEBUG_DFS2, + "minprf = %d maxprf = %d pulsevar = %d thresh=%d", + dfs_radars[p].rp_pulsefreq, + dfs_radars[p].rp_max_pulsefreq, + dfs_radars[p].rp_pulsevar, + rf->rf_threshold); + + dfs_debug(dfs, WLAN_DEBUG_DFS2, + "minpri = %d maxpri = %d filterlen = %d filterID = %d", + rf->rf_minpri, rf->rf_maxpri, + rf->rf_filterlen, rf->rf_pulseid); + } + + dfs_print_filters(dfs); + + dfs->dfs_rinfo.rn_numbin5radars = numb5radars; + if (dfs->dfs_b5radars) { + qdf_mem_free(dfs->dfs_b5radars); + dfs->dfs_b5radars = NULL; + } + + if (numb5radars) { + dfs->dfs_b5radars = (struct dfs_bin5radars *)qdf_mem_malloc( + numb5radars * sizeof(struct dfs_bin5radars)); + /* + * Malloc can return NULL if numb5radars is zero. But we still + * want to reset the delay lines. + */ + if (!(dfs->dfs_b5radars)) { + dfs_alert(dfs, WLAN_DEBUG_DFS_ALWAYS, + "cannot allocate memory for bin5 radars"); + goto bad4; + } + } + + for (n = 0; n < numb5radars; n++) { + dfs->dfs_b5radars[n].br_pulse = b5pulses[n]; + dfs->dfs_b5radars[n].br_pulse.b5_timewindow *= 1000000; + if (dfs->dfs_b5radars[n].br_pulse.b5_rssithresh < + min_rssithresh) + min_rssithresh = + dfs->dfs_b5radars[n].br_pulse.b5_rssithresh; + + if (dfs->dfs_b5radars[n].br_pulse.b5_maxdur > max_pulsedur) + max_pulsedur = dfs->dfs_b5radars[n].br_pulse.b5_maxdur; + } + dfs_reset_alldelaylines(dfs); + dfs_reset_radarq(dfs); + dfs->dfs_curchan_radindex = -1; + dfs->dfs_extchan_radindex = -1; + dfs->dfs_rinfo.rn_minrssithresh = min_rssithresh; + + /* Convert durations to TSF ticks. */ + dfs->dfs_rinfo.rn_maxpulsedur = + dfs_round((int32_t)((max_pulsedur * 100/80) * 100)); + /* + * Relax the max pulse duration a little bit due to inaccuracy + * caused by chirping. + */ + dfs->dfs_rinfo.rn_maxpulsedur = dfs->dfs_rinfo.rn_maxpulsedur + 20; + + dfs_debug(dfs, WLAN_DEBUG_DFS, "DFS min filter rssiThresh = %d", + min_rssithresh); + + dfs_debug(dfs, WLAN_DEBUG_DFS, "DFS max pulse dur = %d ticks", + dfs->dfs_rinfo.rn_maxpulsedur); + + return 0; + +bad4: + return 1; +} + +void dfs_clear_stats(struct wlan_dfs *dfs) +{ + if (!dfs) + return; + + qdf_mem_zero(&dfs->wlan_dfs_stats, sizeof(struct dfs_stats)); + dfs->wlan_dfs_stats.last_reset_tstamp = + lmac_get_tsf64(dfs->dfs_pdev_obj); +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/filtering/dfs_misc.c b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/filtering/dfs_misc.c new file mode 100644 index 0000000000000000000000000000000000000000..64602df3ad81b4e82eefe8863c8a31897567b7fc --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/filtering/dfs_misc.c @@ -0,0 +1,170 @@ +/* + * Copyright (c) 2016-2017 The Linux Foundation. All rights reserved. + * Copyright (c) 2002-2010, Atheros Communications Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: This file really does contain miscellaneous functions that didn't fit + * in anywhere else. + */ + +#include "../dfs.h" +#include "wlan_dfs_lmac_api.h" +#include "wlan_dfs_mlme_api.h" +#include "../dfs_internal.h" + +/** + * dfs_adjust_pri_per_chan_busy() - Calculates adjust_pri. + * @ext_chan_busy: Extension channel PRI. + * @pri_margin: Primary margin. + * + * Calculates the adjust_pri using ext_chan_busy, DFS_EXT_CHAN_LOADING_THRESH + * and pri_margin. + * + * Return: adjust_pri. + */ +static int dfs_adjust_pri_per_chan_busy(int ext_chan_busy, int pri_margin) +{ + int adjust_pri = 0; + + if (ext_chan_busy > DFS_EXT_CHAN_LOADING_THRESH) { + adjust_pri = ((ext_chan_busy - DFS_EXT_CHAN_LOADING_THRESH) * + (pri_margin)); + adjust_pri /= 100; + } + + return adjust_pri; +} + +/** + * dfs_adjust_thresh_per_chan_busy() - Calculates adjust_thresh. + * @ext_chan_busy: Extension channel PRI. + * @thresh: Threshold value. + * + * Calculates the adjust_thresh using ext_chan_busy, DFS_EXT_CHAN_LOADING_THRESH + * and thresh. + * + * Return: adjust_thresh. + */ +static int dfs_adjust_thresh_per_chan_busy(int ext_chan_busy, int thresh) +{ + int adjust_thresh = 0; + + if (ext_chan_busy > DFS_EXT_CHAN_LOADING_THRESH) { + adjust_thresh = ((ext_chan_busy - DFS_EXT_CHAN_LOADING_THRESH) * + thresh); + adjust_thresh /= 100; + } + + return adjust_thresh; +} + +/** + * dfs_get_cached_ext_chan_busy() - Get cached ext chan busy. + * @dfs: Pointer to wlan_dfs structure. + * @ext_chan_busy: Extension channel PRI. + */ +static inline void dfs_get_cached_ext_chan_busy( + struct wlan_dfs *dfs, + int *ext_chan_busy) +{ + *ext_chan_busy = 0; + /* Check to see if the cached value of ext_chan_busy can be used. */ + + if (dfs->dfs_rinfo.dfs_ext_chan_busy && + (dfs->dfs_rinfo.rn_lastfull_ts < + dfs->dfs_rinfo.ext_chan_busy_ts)) { + *ext_chan_busy = dfs->dfs_rinfo.dfs_ext_chan_busy; + dfs_debug(dfs, WLAN_DEBUG_DFS2, + "Use cached copy of ext_chan_busy extchanbusy=%d rn_lastfull_ts=%llu ext_chan_busy_ts=%llu", + *ext_chan_busy, + (uint64_t)dfs->dfs_rinfo.rn_lastfull_ts, + (uint64_t)dfs->dfs_rinfo.ext_chan_busy_ts); + } +} + +int dfs_get_pri_margin(struct wlan_dfs *dfs, + int is_extchan_detect, + int is_fixed_pattern) +{ + int adjust_pri = 0, ext_chan_busy = 0; + int pri_margin; + + if (is_fixed_pattern) + pri_margin = DFS_DEFAULT_FIXEDPATTERN_PRI_MARGIN; + else + pri_margin = DFS_DEFAULT_PRI_MARGIN; + + if (WLAN_IS_CHAN_11N_HT40(dfs->dfs_curchan)) { + ext_chan_busy = lmac_get_ext_busy(dfs->dfs_pdev_obj); + if (ext_chan_busy >= 0) { + dfs->dfs_rinfo.ext_chan_busy_ts = + lmac_get_tsf64(dfs->dfs_pdev_obj); + dfs->dfs_rinfo.dfs_ext_chan_busy = ext_chan_busy; + } else { + dfs_get_cached_ext_chan_busy(dfs, &ext_chan_busy); + } + adjust_pri = dfs_adjust_pri_per_chan_busy(ext_chan_busy, + pri_margin); + pri_margin -= adjust_pri; + } + + return pri_margin; +} + +int dfs_get_filter_threshold(struct wlan_dfs *dfs, + struct dfs_filter *rf, + int is_extchan_detect) +{ + int ext_chan_busy = 0; + int thresh, adjust_thresh = 0; + + thresh = rf->rf_threshold; + + if (WLAN_IS_CHAN_11N_HT40(dfs->dfs_curchan)) { + ext_chan_busy = lmac_get_ext_busy(dfs->dfs_pdev_obj); + if (ext_chan_busy >= 0) { + dfs->dfs_rinfo.ext_chan_busy_ts = + lmac_get_tsf64(dfs->dfs_pdev_obj); + dfs->dfs_rinfo.dfs_ext_chan_busy = ext_chan_busy; + } else { + dfs_get_cached_ext_chan_busy(dfs, &ext_chan_busy); + } + + adjust_thresh = + dfs_adjust_thresh_per_chan_busy(ext_chan_busy, thresh); + dfs_debug(dfs, WLAN_DEBUG_DFS2, + " filterID=%d extchanbusy=%d adjust_thresh=%d", + rf->rf_pulseid, ext_chan_busy, adjust_thresh); + + thresh += adjust_thresh; + } + + return thresh; +} + +uint32_t dfs_round(int32_t val) +{ + uint32_t ival, rem; + + if (val < 0) + return 0; + ival = val/100; + rem = val - (ival * 100); + if (rem < 50) + return ival; + else + return ival + 1; +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/filtering/dfs_partial_offload_radar.c b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/filtering/dfs_partial_offload_radar.c new file mode 100644 index 0000000000000000000000000000000000000000..db3bd30cc365e34f98c736cec987a5d39ed88a9e --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/filtering/dfs_partial_offload_radar.c @@ -0,0 +1,644 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * Copyright (c) 2011, Atheros Communications Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: This file has radar table and initialization function for Beeliner + * family of chipsets. + */ + +#include "../dfs.h" +#include "wlan_dfs_mlme_api.h" +#include "wlan_dfs_utils_api.h" +#include "wlan_dfs_lmac_api.h" +#include "../dfs_internal.h" +#include "../dfs_partial_offload_radar.h" +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) && defined(HOST_DFS_SPOOF_TEST) +#include "../dfs_process_radar_found_ind.h" +#endif + +/** + * struct dfs_pulse dfs_fcc_radars - FCC radar table for Offload chipsets. + */ +static struct dfs_pulse dfs_fcc_radars[] = { + /* FCC TYPE 1 */ + {18, 1, 700, 700, 0, 4, 5, 0, 1, 18, 0, 3, 1, 5, 0, 0}, + {18, 1, 350, 350, 0, 4, 5, 0, 1, 18, 0, 3, 0, 5, 0, 0}, + + /* FCC TYPE 6 */ + {9, 1, 3003, 3003, 1, 7, 5, 0, 1, 18, 0, 0, 1, 1000, 0, 1}, + + /* FCC TYPE 2 */ + {23, 5, 4347, 6666, 0, 4, 11, 0, 7, 22, 0, 3, 0, 5, 0, 2}, + + /* FCC TYPE 3 */ + {18, 10, 2000, 5000, 0, 4, 8, 6, 13, 22, 0, 3, 0, 5, 0, 5}, + + /* FCC TYPE 4 */ + {16, 15, 2000, 5000, 0, 4, 7, 11, 23, 22, 0, 3, 0, 5, 0, 11}, + + /* FCC NEW TYPE 1 */ + /* 518us to 938us pulses (min 56 pulses) */ + {57, 1, 1066, 1930, 0, 4, 20, 0, 1, 22, 0, 3, 0, 5, 0, 21}, + + /* 938us to 2000 pulses (min 26 pulses) */ + {27, 1, 500, 1066, 0, 4, 13, 0, 1, 22, 0, 3, 0, 5, 0, 22}, + + /* 2000 to 3067us pulses (min 17 pulses) */ + {18, 1, 325, 500, 0, 4, 9, 0, 1, 22, 0, 3, 0, 5, 0, 23}, +}; + +/** + * struct dfs_pulse dfs_mkk4_radars - MKK4 radar table for Offload chipsets. + */ +static struct dfs_pulse dfs_mkk4_radars[] = { + + /* following two filters are specific to Japan/MKK4 */ + /* 1389 +/- 6 us */ + {18, 1, 720, 720, 0, 4, 6, 0, 1, 18, 0, 3, 0, 5, 0, 17}, + + /* 4000 +/- 6 us */ + {18, 4, 250, 250, 0, 4, 5, 1, 6, 18, 0, 3, 0, 5, 0, 18}, + + /* 3846 +/- 7 us */ + {18, 5, 260, 260, 0, 4, 6, 1, 6, 18, 0, 3, 1, 5, 0, 19}, + + /* following filters are common to both FCC and JAPAN */ + + /* FCC TYPE 1 */ + {18, 1, 700, 700, 0, 4, 5, 0, 1, 18, 0, 3, 1, 5, 0, 0}, + {18, 1, 350, 350, 0, 4, 5, 0, 1, 18, 0, 3, 0, 5, 0, 0}, + + /* FCC TYPE 6 */ + {9, 1, 3003, 3003, 1, 7, 5, 0, 1, 18, 0, 0, 1, 1000, 0, 1}, + + /* FCC TYPE 2 */ + {23, 5, 4347, 6666, 0, 4, 11, 0, 7, 22, 0, 3, 0, 5, 0, 2}, + + /* FCC TYPE 3 */ + {18, 10, 2000, 5000, 0, 4, 8, 6, 13, 22, 0, 3, 0, 5, 0, 5}, + + /* FCC TYPE 4 */ + {16, 15, 2000, 5000, 0, 4, 7, 11, 23, 22, 0, 3, 0, 5, 0, 11}, +}; + +/** + * struct dfs_bin5pulse dfs_fcc_bin5pulses - FCC BIN5 pulses for Offload + * chipsets. + */ +static struct dfs_bin5pulse dfs_fcc_bin5pulses[] = { + {6, 28, 105, 12, 18, 5}, +}; + +/** + * struct dfs_bin5pulse dfs_jpn_bin5pulses - JAPAN BIN5 pulses for Offload + * chipsets. + */ +static struct dfs_bin5pulse dfs_jpn_bin5pulses[] = { + {5, 28, 105, 12, 22, 5}, +}; + +/** + * dfs_bin5pulse dfs_fcc_bin5pulses_ar900b - FCC BIN5 pulses for AR9300 + * chipsets. + * + * WAR : IR 42631 + * Beeliner 2 is tested at -65dbm as opposed to -62 dbm. + * For FCC/JPN chirping pulses, HW reports RSSI value that is lower by 2dbm + * when we enable noise floor claibration. This is specially true for + * frequencies that are greater than center frequency and in VHT80 mode. + */ + +static struct dfs_bin5pulse dfs_fcc_bin5pulses_ar900b[] = { + {5, 28, 105, 12, 20, 5}, +}; + +/** + * dfs_bin5pulse dfs_jpn_bin5pulses_ar900b - JAPAN BIN5 pulses for AR9300 + * chipsets. + */ +static struct dfs_bin5pulse dfs_jpn_bin5pulses_ar900b[] = { + {5, 28, 105, 12, 20, 5}, +}; + +/** + * dfs_bin5pulse dfs_fcc_bin5pulses_qca9984 - FCC BIN5 pulses for QCA9984 + * chipsets. + * WAR : IR-83400 + * Cascade is tested at -65dbm as opposed to -62 dbm. + * For FCC/JPN chirping pulses, HW reports RSSI value that is significantly + * lower at left edge especially in HT80_80 mode. Also, duration may be + * significantly low. This can result in false detection and we may have to + * raise the threshold. + */ +static struct dfs_bin5pulse dfs_fcc_bin5pulses_qca9984[] = { + {5, 20, 105, 12, 20, 0}, +}; + +/** + * dfs_bin5pulse dfs_jpn_bin5pulses_qca9984 - JAPAN BIN5 pulses for QCA9984 + * chipsets. + */ +static struct dfs_bin5pulse dfs_jpn_bin5pulses_qca9984[] = { + {5, 20, 105, 12, 20, 0}, +}; + +/** + * dfs_pulse dfs_etsi_radars - ETSI radar table. + */ +static struct dfs_pulse dfs_etsi_radars[] = { + + /* EN 302 502 frequency hopping pulse */ + /* PRF 3000, 1us duration, 9 pulses per burst */ + {9, 1, 3000, 3000, 1, 4, 5, 0, 1, 18, 0, 0, 1, 1000, 0, 40}, + /* PRF 4500, 20us duration, 9 pulses per burst */ + {9, 20, 4500, 4500, 1, 4, 5, 19, 21, 18, 0, 0, 1, 1000, 0, 41}, + + /* TYPE staggered pulse */ + /* Type 5*/ + /* 0.8-2us, 2-3 bursts,300-400 PRF, 10 pulses each */ + {30, 2, 300, 400, 2, 30, 3, 0, 5, 15, 0, 0, 1, 5, 0, 31}, + /* Type 6 */ + /* 0.8-2us, 2-3 bursts, 400-1200 PRF, 15 pulses each */ + {30, 2, 400, 1200, 2, 30, 7, 0, 5, 15, 0, 0, 0, 5, 0, 32}, + + /* constant PRF based */ + /* Type 1 */ + /* 0.8-5us, 200 300 PRF, 10 pulses */ + {10, 5, 200, 400, 0, 4, 5, 0, 8, 15, 0, 0, 2, 5, 0, 33}, + {10, 5, 400, 600, 0, 4, 5, 0, 8, 15, 0, 0, 2, 5, 0, 37}, + {10, 5, 600, 800, 0, 4, 5, 0, 8, 15, 0, 0, 2, 5, 0, 38}, + {10, 5, 800, 1000, 0, 4, 5, 0, 8, 15, 0, 0, 2, 5, 0, 39}, + /* {10, 5, 200, 1000, 0, 6, 5, 0, 8, 15, 0, 0, 2, 5, 33}, */ + + /* Type 2 */ + /* 0.8-15us, 200-1600 PRF, 15 pulses */ + {15, 15, 200, 1600, 0, 4, 8, 0, 18, 24, 0, 0, 0, 5, 0, 34}, + + /* Type 3 */ + /* 0.8-15us, 2300-4000 PRF, 25 pulses*/ + {25, 15, 2300, 4000, 0, 4, 10, 0, 18, 24, 0, 0, 0, 5, 0, 35}, + + /* Type 4 */ + /* 20-30us, 2000-4000 PRF, 20 pulses*/ + {20, 30, 2000, 4000, 0, 4, 6, 19, 33, 24, 0, 0, 0, 24, 1, 36}, +}; + +/** + * dfs_pulse dfs_china_radars - CHINA radar table. + */ +static struct dfs_pulse dfs_china_radars[] = { + + /* TYPE staggered pulse */ + /* Type 5*/ + /* 0.8-2us, 2-3 bursts,300-400 PRF, 12 pulses each */ + {36, 2, 300, 400, 2, 30, 3, 0, 5, 15, 0, 0, 1, 51}, + /* Type 6 */ + /* 0.8-2us, 2-3 bursts, 400-1200 PRF, 16 pulses each */ + {48, 2, 400, 1200, 2, 30, 7, 0, 5, 15, 0, 0, 0, 52}, + + /* constant PRF based */ + /* Type 1 */ + /* 0.5-5us, 200 1000 PRF, 12 pulses */ + {12, 5, 200, 400, 0, 24, 5, 0, 8, 15, 0, 0, 2, 53}, + {12, 5, 400, 600, 0, 24, 5, 0, 8, 15, 0, 0, 2, 57}, + {12, 5, 600, 800, 0, 24, 5, 0, 8, 15, 0, 0, 2, 58}, + {12, 5, 800, 1000, 0, 24, 5, 0, 8, 15, 0, 0, 2, 59}, + + /* Type 2 */ + /* 0.5-15us, 200-1600 PRF, 16 pulses */ + {16, 15, 200, 1600, 0, 24, 8, 0, 18, 24, 0, 0, 0, 54}, + + /* Type 3 */ + /* 0.5-30us, 2300-4000 PRF, 24 pulses*/ + {24, 15, 2300, 4000, 0, 24, 10, 0, 33, 24, 0, 0, 0, 55}, + + /* Type 4 */ + /* 20-30us, 2000-4000 PRF, 20 pulses*/ + {20, 30, 2000, 4000, 0, 24, 6, 19, 33, 24, 0, 0, 0, 56}, + + /* 1us, 1000 PRF, 20 pulses */ + /* 1000 us PRI */ + {20, 1, 1000, 1000, 0, 6, 6, 0, 1, 18, 0, 3, 0, 50}, +}; + +/** + * dfs_pulse dfs_korea_radars - KOREA radar table. + */ +static struct dfs_pulse dfs_korea_radars[] = { + /* Korea Type 1 */ + {18, 1, 700, 700, 0, 4, 5, 0, 1, 18, 0, 3, 1, 5, 0, 40}, + + /* Korea Type 2 */ + {10, 1, 1800, 1800, 0, 4, 4, 0, 1, 18, 0, 3, 1, 5, 0, 41}, + + /* Korea Type 3 */ + {70, 1, 330, 330, 0, 4, 20, 0, 2, 18, 0, 3, 1, 5, 0, 42}, + + /* Korea Type 4 */ + {3, 1, 3003, 3003, 1, 7, 2, 0, 1, 18, 0, 0, 1, 1000, 0, 43}, +}; + +#define RSSI_THERSH_AR900B 15 + +/** + * dfs_assign_fcc_pulse_table() - Assign FCC pulse table + * @rinfo: Pointer to wlan_dfs_radar_tab_info structure. + * @target_type: Target type. + * @tx_ops: target tx ops. + */ +static inline void dfs_assign_fcc_pulse_table( + struct wlan_dfs_radar_tab_info *rinfo, + uint32_t target_type, + struct wlan_lmac_if_target_tx_ops *tx_ops) +{ + rinfo->dfs_radars = dfs_fcc_radars; + rinfo->numradars = QDF_ARRAY_SIZE(dfs_fcc_radars); + + if (tx_ops->tgt_is_tgt_type_ar900b(target_type) || + tx_ops->tgt_is_tgt_type_ipq4019(target_type)) { + rinfo->b5pulses = dfs_fcc_bin5pulses_ar900b; + rinfo->numb5radars = QDF_ARRAY_SIZE(dfs_fcc_bin5pulses_ar900b); + } else if (tx_ops->tgt_is_tgt_type_qca9984(target_type) || + tx_ops->tgt_is_tgt_type_qca9888(target_type)) { + rinfo->b5pulses = dfs_fcc_bin5pulses_qca9984; + rinfo->numb5radars = + QDF_ARRAY_SIZE(dfs_fcc_bin5pulses_qca9984); + } else { + rinfo->b5pulses = dfs_fcc_bin5pulses; + rinfo->numb5radars = QDF_ARRAY_SIZE(dfs_fcc_bin5pulses); + } +} + +void dfs_get_po_radars(struct wlan_dfs *dfs) +{ + struct wlan_dfs_radar_tab_info rinfo; + struct wlan_objmgr_psoc *psoc; + struct wlan_lmac_if_target_tx_ops *tx_ops; + int i; + uint32_t target_type; + int dfsdomain = DFS_FCC_DOMAIN; + uint16_t ch_freq; + uint16_t regdmn; + + /* Fetch current radar patterns from the lmac */ + qdf_mem_zero(&rinfo, sizeof(rinfo)); + + /* + * Look up the current DFS regulatory domain and decide + * which radar pulses to use. + */ + dfsdomain = utils_get_dfsdomain(dfs->dfs_pdev_obj); + target_type = lmac_get_target_type(dfs->dfs_pdev_obj); + + psoc = wlan_pdev_get_psoc(dfs->dfs_pdev_obj); + if (!psoc) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "psoc is NULL"); + return; + } + + tx_ops = &(psoc->soc_cb.tx_ops.target_tx_ops); + switch (dfsdomain) { + case DFS_FCC_DOMAIN: + dfs_debug(dfs, WLAN_DEBUG_DFS_ALWAYS, "FCC domain"); + rinfo.dfsdomain = DFS_FCC_DOMAIN; + dfs_assign_fcc_pulse_table(&rinfo, target_type, tx_ops); + break; + case DFS_CN_DOMAIN: + dfs_info(dfs, WLAN_DEBUG_DFS_ALWAYS, + "FCC domain -- Country China(156) override FCC radar pattern" + ); + rinfo.dfsdomain = DFS_FCC_DOMAIN; + /* + * China uses a radar pattern that is similar to ETSI but it + * follows FCC in all other respect like transmit power, CCA + * threshold etc. + */ + rinfo.dfs_radars = dfs_china_radars; + rinfo.numradars = QDF_ARRAY_SIZE(dfs_china_radars); + rinfo.b5pulses = NULL; + rinfo.numb5radars = 0; + break; + case DFS_ETSI_DOMAIN: + dfs_info(dfs, WLAN_DEBUG_DFS_ALWAYS, "ETSI domain"); + rinfo.dfsdomain = DFS_ETSI_DOMAIN; + + ch_freq = dfs->dfs_curchan->dfs_ch_freq; + regdmn = utils_dfs_get_cur_rd(dfs->dfs_pdev_obj); + + if (((regdmn == ETSI11_WORLD_REGDMN_PAIR_ID) || + (regdmn == ETSI12_WORLD_REGDMN_PAIR_ID) || + (regdmn == ETSI13_WORLD_REGDMN_PAIR_ID) || + (regdmn == ETSI14_WORLD_REGDMN_PAIR_ID)) && + DFS_CURCHAN_IS_58GHz(ch_freq)) { + rinfo.dfs_radars = dfs_etsi_radars; + rinfo.numradars = QDF_ARRAY_SIZE(dfs_etsi_radars); + } else { + uint8_t offset = ETSI_LEGACY_PULSE_ARR_OFFSET; + + rinfo.dfs_radars = &dfs_etsi_radars[offset]; + rinfo.numradars = + QDF_ARRAY_SIZE(dfs_etsi_radars) - offset; + } + rinfo.b5pulses = NULL; + rinfo.numb5radars = 0; + break; + case DFS_KR_DOMAIN: + dfs_info(dfs, WLAN_DEBUG_DFS_ALWAYS, + "ETSI domain -- Korea(412)"); + rinfo.dfsdomain = DFS_ETSI_DOMAIN; + + /* + * So far we have treated Korea as part of ETSI and did not + * support any radar patters specific to Korea other than + * standard ETSI radar patterns. Ideally we would want to + * treat Korea as a different domain. This is something that + * we will address in the future. However, for now override + * ETSI tables for Korea. + */ + rinfo.dfs_radars = dfs_korea_radars; + rinfo.numradars = QDF_ARRAY_SIZE(dfs_korea_radars); + rinfo.b5pulses = NULL; + rinfo.numb5radars = 0; + break; + case DFS_MKK4_DOMAIN: + dfs_info(dfs, WLAN_DEBUG_DFS_ALWAYS, "MKK4 domain"); + rinfo.dfsdomain = DFS_MKK4_DOMAIN; + rinfo.dfs_radars = dfs_mkk4_radars; + rinfo.numradars = QDF_ARRAY_SIZE(dfs_mkk4_radars); + + if (tx_ops->tgt_is_tgt_type_ar900b(target_type) || + tx_ops->tgt_is_tgt_type_ipq4019(target_type)) { + rinfo.b5pulses = dfs_jpn_bin5pulses_ar900b; + rinfo.numb5radars = QDF_ARRAY_SIZE( + dfs_jpn_bin5pulses_ar900b); + } else if (tx_ops->tgt_is_tgt_type_qca9984(target_type) || + tx_ops->tgt_is_tgt_type_qca9888(target_type)) { + rinfo.b5pulses = dfs_jpn_bin5pulses_qca9984; + rinfo.numb5radars = QDF_ARRAY_SIZE + (dfs_jpn_bin5pulses_qca9984); + } else { + rinfo.b5pulses = dfs_jpn_bin5pulses; + rinfo.numb5radars = QDF_ARRAY_SIZE( + dfs_jpn_bin5pulses); + } + break; + default: + dfs_info(dfs, WLAN_DEBUG_DFS_ALWAYS, "UNINIT domain"); + rinfo.dfsdomain = DFS_UNINIT_DOMAIN; + rinfo.dfs_radars = NULL; + rinfo.numradars = 0; + rinfo.b5pulses = NULL; + rinfo.numb5radars = 0; + break; + } + + if (tx_ops->tgt_is_tgt_type_ar900b(target_type) || + tx_ops->tgt_is_tgt_type_ipq4019(target_type) || + tx_ops->tgt_is_tgt_type_qca9984(target_type) || + tx_ops->tgt_is_tgt_type_qca9888(target_type)) { + /* Beeliner WAR: lower RSSI threshold to improve detection of + * certian radar types + */ + /* Cascade WAR: + * Cascade can report lower RSSI near the channel boundary then + * expected. It can also report significantly low RSSI at center + * (as low as 16) at center. So we are lowering threshold for + * all types of radar for * Cascade. + * This may increase the possibility of false radar detection. + * IR -- 083703, 083398, 083387 + */ + + for (i = 0; i < rinfo.numradars; i++) + rinfo.dfs_radars[i].rp_rssithresh = RSSI_THERSH_AR900B; + } + + dfs_init_radar_filters(dfs, &rinfo); +} + +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) && defined(HOST_DFS_SPOOF_TEST) +void dfs_send_avg_params_to_fw(struct wlan_dfs *dfs, + struct dfs_radar_found_params *params) +{ + tgt_dfs_send_avg_params_to_fw(dfs->dfs_pdev_obj, params); +} + +/** + * dfs_no_res_from_fw_task() - The timer function that is called if there is no + * response from fw after sending the average radar pulse parameters. + */ +static os_timer_func(dfs_no_res_from_fw_task) +{ + struct wlan_dfs *dfs = NULL; + + OS_GET_TIMER_ARG(dfs, struct wlan_dfs *); + + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs is NULL"); + return; + } + + dfs_info(dfs, WLAN_DEBUG_DFS_ALWAYS, "Host wait timer expired"); + + dfs->dfs_is_host_wait_running = 0; + dfs->dfs_no_res_from_fw = 1; + dfs_radarfound_action_generic(dfs, dfs->dfs_seg_id); + dfs->dfs_seg_id = 0; +} + +void dfs_host_wait_timer_init(struct wlan_dfs *dfs) +{ + qdf_timer_init(NULL, + &(dfs->dfs_host_wait_timer), + dfs_no_res_from_fw_task, + (void *)(dfs), + QDF_TIMER_TYPE_WAKE_APPS); + dfs->dfs_status_timeout_override = -1; +} + +QDF_STATUS dfs_set_override_status_timeout(struct wlan_dfs *dfs, + int status_timeout) +{ + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs is NULL"); + return QDF_STATUS_E_FAILURE; + } + + dfs->dfs_status_timeout_override = status_timeout; + + dfs_info(dfs, WLAN_DEBUG_DFS_ALWAYS, + "Host wait status timeout is now %s : %d", + (status_timeout == -1) ? "default" : "overridden", + status_timeout); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS dfs_get_override_status_timeout(struct wlan_dfs *dfs, + int *status_timeout) +{ + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs is NULL"); + return QDF_STATUS_E_FAILURE; + } + + *status_timeout = dfs->dfs_status_timeout_override; + + return QDF_STATUS_SUCCESS; +} + +/** + * dfs_extract_radar_found_params() - Copy the contents of average radar + * parameters to dfs_radar_found_params parameter structure. + * + * @dfs: Pointer to wlan_dfs structure which contains the average radar + * parameters. + * @params: Pointer to dfs_radar_found_params structure. + */ +static +void dfs_extract_radar_found_params(struct wlan_dfs *dfs, + struct dfs_radar_found_params *params) +{ + qdf_mem_zero(params, sizeof(*params)); + params->pri_min = dfs->dfs_average_pri; + params->pri_max = dfs->dfs_average_pri; + params->duration_min = dfs->dfs_average_duration; + params->duration_max = dfs->dfs_average_duration; + params->sidx_min = dfs->dfs_average_sidx; + params->sidx_max = dfs->dfs_average_sidx; + + /* Bangradar will not populate any of these average + * parameters as pulse is not received. If these variables + * are not resetted here, these go as radar_found params + * for bangradar if bangradar is issued after real radar. + */ + dfs->dfs_average_sidx = 0; + dfs->dfs_average_duration = 0; + dfs->dfs_average_pri = 0; +} + +void dfs_radarfound_action_fcc(struct wlan_dfs *dfs, uint8_t seg_id) +{ + struct dfs_radar_found_params params; + + qdf_mem_copy(&dfs->dfs_radar_found_chan, dfs->dfs_curchan, + sizeof(dfs->dfs_radar_found_chan)); + dfs_extract_radar_found_params(dfs, ¶ms); + dfs_send_avg_params_to_fw(dfs, ¶ms); + dfs->dfs_is_host_wait_running = 1; + dfs->dfs_seg_id = seg_id; + qdf_timer_mod(&dfs->dfs_host_wait_timer, + (dfs->dfs_status_timeout_override == + -1) ? HOST_DFS_STATUS_WAIT_TIMER_MS : + dfs->dfs_status_timeout_override); +} + +void dfs_host_wait_timer_reset(struct wlan_dfs *dfs) +{ + dfs->dfs_is_host_wait_running = 0; + qdf_timer_sync_cancel(&dfs->dfs_host_wait_timer); +} + +/** + * dfs_action_on_spoof_success() - DFS action on spoof test pass + * @dfs: Pointer to DFS object + */ +static void dfs_action_on_spoof_success(struct wlan_dfs *dfs) +{ + dfs->dfs_spoof_test_done = 1; + if (dfs->dfs_radar_found_chan.dfs_ch_freq == + dfs->dfs_curchan->dfs_ch_freq) { + dfs_info(dfs, WLAN_DEBUG_DFS_ALWAYS, + "cac timer started for channel %d", + dfs->dfs_curchan->dfs_ch_ieee); + dfs_start_cac_timer(dfs); + } else{ + dfs_remove_spoof_channel_from_nol(dfs); + } +} + +void dfs_action_on_fw_radar_status_check(struct wlan_dfs *dfs, + uint32_t *status) +{ + struct wlan_objmgr_pdev *dfs_pdev; + int no_chans_avail = 0; + int error_flag = 0; + + dfs_host_wait_timer_reset(dfs); + dfs_info(dfs, WLAN_DEBUG_DFS_ALWAYS, "Host DFS status = %d", + *status); + + dfs_pdev = dfs->dfs_pdev_obj; + if (!dfs_pdev) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs_pdev_obj is NULL"); + return; + } + + switch (*status) { + case HOST_DFS_STATUS_CHECK_PASSED: + if (dfs->dfs_average_params_sent) + dfs_action_on_spoof_success(dfs); + else + error_flag = 1; + break; + case HOST_DFS_STATUS_CHECK_FAILED: + dfs->dfs_spoof_check_failed = 1; + no_chans_avail = + dfs_mlme_rebuild_chan_list_with_non_dfs_channels(dfs_pdev); + dfs_mlme_restart_vaps_with_non_dfs_chan(dfs_pdev, + no_chans_avail); + break; + case HOST_DFS_STATUS_CHECK_HW_RADAR: + if (dfs->dfs_average_params_sent) { + if (dfs->dfs_radar_found_chan.dfs_ch_freq == + dfs->dfs_curchan->dfs_ch_freq) { + dfs_radarfound_action_generic( + dfs, + dfs->dfs_seg_id); + } else { + /* Else of this case, no action is needed as + * dfs_action would have been done at timer + * expiry itself. + */ + dfs_info(dfs, WLAN_DEBUG_DFS_ALWAYS, + "DFS Action already taken"); + } + } else { + error_flag = 1; + } + break; + default: + dfs_info(dfs, WLAN_DEBUG_DFS_ALWAYS, + "Status event mismatch:%d, Ignoring it", + *status); + } + + dfs->dfs_average_params_sent = 0; + qdf_mem_zero(&dfs->dfs_radar_found_chan, sizeof(struct dfs_channel)); + + if (error_flag == 1) { + dfs_info(dfs, WLAN_DEBUG_DFS_ALWAYS, + "Received imroper response %d. Discarding it", + *status); + } +} + +void dfs_reset_spoof_test(struct wlan_dfs *dfs) +{ + dfs->dfs_spoof_test_done = 0; + dfs->dfs_spoof_check_failed = 0; +} +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/filtering/dfs_phyerr_tlv.c b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/filtering/dfs_phyerr_tlv.c new file mode 100644 index 0000000000000000000000000000000000000000..6cb60691b19e34cb575525255cca8dfd0683fc38 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/filtering/dfs_phyerr_tlv.c @@ -0,0 +1,755 @@ +/* + * Copyright (c) 2012, 2016-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: This file contains TLV frame processing functions. + */ + +#include "../dfs.h" +#include "../dfs_channel.h" +#include "../dfs_phyerr_tlv.h" +#include "wlan_dfs_mlme_api.h" +#include "../dfs_internal.h" + +#define AGC_MB_GAIN_THRESH1 68 +#define AGC_OTHER_GAIN_THRESH1 40 +#define AGC_MB_GAIN_THRESH2 80 +#define AGC_OTHER_GAIN_THRESH2 60 +#define AGC_GAIN_RSSI_THRESH 25 + +/* + * Until "fastclk" is stored in the DFS configuration. + */ +#define PERE_IS_OVERSAMPLING(_dfs) \ + (_dfs->dfs_caps.wlan_chip_is_over_sampled ? 1 : 0) + +/** + * dfs_sign_extend_32() - Calculates extended 32bit value. + * @v: Value. + * @nb: Offset. + * + * Return: Returns Extend vale. + */ +static int32_t dfs_sign_extend_32(uint32_t v, int nb) +{ + uint32_t m = 1U << (nb - 1); + + /* Chop off high bits, just in case. */ + v &= v & ((1U << nb) - 1); + + /* Extend */ + return (v ^ m) - m; +} + +/** + * dfs_calc_freq_offset() - Calculate the frequency offset. + * @sindex: signed bin index. + * @is_oversampling: oversampling mode + * + * Calculate the frequency offset from the given signed bin index from the + * radar summary report. This takes the oversampling mode into account. + * For oversampling, each bin has resolution 44MHz/128. For non-oversampling, + * each bin has resolution 40MHz/128. It returns kHz - ie, 1000th's of MHz. + */ +static int dfs_calc_freq_offset(int sindex, int is_oversampling) +{ + if (is_oversampling) + return sindex * (44000 / 128); + else + return sindex * (40000 / 128); +} + +/** + * dfs_radar_summary_print() - Prints the Radar summary. + * @dfs: Pointer to wlan_dfs structure. + * @rsu: Pointer rx_radar_status structure. + */ +static void dfs_radar_summary_print(struct wlan_dfs *dfs, + struct rx_radar_status *rsu) +{ + + dfs_debug(dfs, WLAN_DEBUG_DFS_PHYERR, + " pulsedur=%d", rsu->pulse_duration); + dfs_debug(dfs, WLAN_DEBUG_DFS_PHYERR, + " rssi=%d", rsu->rssi); + dfs_debug(dfs, WLAN_DEBUG_DFS_PHYERR, + " ischirp=%d", rsu->is_chirp); + dfs_debug(dfs, WLAN_DEBUG_DFS_PHYERR, + " sidx=%d", rsu->sidx); + dfs_debug(dfs, WLAN_DEBUG_DFS_PHYERR, + " raw tsf=%d", rsu->raw_tsf); + dfs_debug(dfs, WLAN_DEBUG_DFS_PHYERR, + " tsf_offset=%d", rsu->tsf_offset); + dfs_debug(dfs, WLAN_DEBUG_DFS_PHYERR, + " cooked tsf=%d", rsu->raw_tsf - rsu->tsf_offset); + dfs_debug(dfs, WLAN_DEBUG_DFS_PHYERR, + " frequency offset=%d.%d MHz (oversampling=%d)", + (int)(rsu->freq_offset / 1000), + (int)abs(rsu->freq_offset % 1000), + PERE_IS_OVERSAMPLING(dfs)); + dfs_debug(dfs, WLAN_DEBUG_DFS_PHYERR, + " agc_total_gain=%d", rsu->agc_total_gain); + dfs_debug(dfs, WLAN_DEBUG_DFS_PHYERR, + " agc_mb_gain=%d", rsu->agc_mb_gain); +} + +/** + * dfs_radar_summary_parse() - Parse the radar summary frame. + * @dfs: pointer to wlan_dfs structure. + * @buf: Phyerr buffer. + * @len: Phyerr buflen. + * @rsu: Pointer to rx_radar_status structure. + * + * The frame contents _minus_ the TLV are passed in. + */ +static void dfs_radar_summary_parse(struct wlan_dfs *dfs, + const char *buf, + size_t len, + struct rx_radar_status *rsu) +{ + uint32_t rs[3]; + + /* Drop out if we have < 2 DWORDs available. */ + if (len < sizeof(rs)) { + dfs_debug(dfs, WLAN_DEBUG_DFS_PHYERR | + WLAN_DEBUG_DFS_PHYERR_SUM, + "len (%zu) < expected (%zu)!", len, sizeof(rs)); + } + + /* + * Since the TLVs may be unaligned for some reason + * we take a private copy into aligned memory. + * This enables us to use the HAL-like accessor macros + * into the DWORDs to access sub-DWORD fields. + */ + qdf_mem_copy(rs, buf, sizeof(rs)); + + dfs_debug(dfs, WLAN_DEBUG_DFS_PHYERR, + "two 32 bit values are: %08x %08x", rs[0], rs[1]); + + /* Populate the fields from the summary report. */ + rsu->tsf_offset = + MS(rs[RADAR_REPORT_PULSE_REG_2], RADAR_REPORT_PULSE_TSF_OFFSET); + rsu->pulse_duration = + MS(rs[RADAR_REPORT_PULSE_REG_2], RADAR_REPORT_PULSE_DUR); + rsu->is_chirp = + MS(rs[RADAR_REPORT_PULSE_REG_1], RADAR_REPORT_PULSE_IS_CHIRP); + rsu->sidx = dfs_sign_extend_32(MS(rs[RADAR_REPORT_PULSE_REG_1], + RADAR_REPORT_PULSE_SIDX), + 10); + rsu->freq_offset = + dfs_calc_freq_offset(rsu->sidx, PERE_IS_OVERSAMPLING(dfs)); + + /* These are only relevant if the pulse is a chirp. */ + rsu->delta_peak = dfs_sign_extend_32(MS(rs[RADAR_REPORT_PULSE_REG_1], + RADAR_REPORT_PULSE_DELTA_PEAK), 6); + rsu->delta_diff = + MS(rs[RADAR_REPORT_PULSE_REG_1], RADAR_REPORT_PULSE_DELTA_DIFF); + rsu->agc_total_gain = + MS(rs[RADAR_REPORT_PULSE_REG_1], RADAR_REPORT_AGC_TOTAL_GAIN); + rsu->agc_mb_gain = MS(rs[RADAR_REPORT_PULSE_REG_2], + RADAR_REPORT_PULSE_AGC_MB_GAIN); +} + +/** + * dfs_radar_fft_search_report_parse () - Parse FFT report. + * @dfs: pointer to wlan_dfs structure. + * @buf: Phyerr buffer. + * @len: Phyerr buflen. + * @rsu: Pointer to rx_radar_status structure. + */ +static void dfs_radar_fft_search_report_parse(struct wlan_dfs *dfs, + const char *buf, + size_t len, + struct rx_search_fft_report *rsfr) +{ + uint32_t rs[3]; + + /* Drop out if we have < 2 DWORDs available. */ + if (len < sizeof(rs)) { + dfs_debug(dfs, WLAN_DEBUG_DFS_PHYERR | + WLAN_DEBUG_DFS_PHYERR_SUM, + "len (%zu) < expected (%zu)!", len, sizeof(rs)); + } + + /* + * Since the TLVs may be unaligned for some reason we take a private + * copy into aligned memory. This enables us to use the HAL-like + * accessor macros into the DWORDs to access sub-DWORD fields. + */ + qdf_mem_copy(rs, buf, sizeof(rs)); + + rsfr->total_gain_db = + MS(rs[SEARCH_FFT_REPORT_REG_1], SEARCH_FFT_REPORT_TOTAL_GAIN_DB); + + rsfr->base_pwr_db = + MS(rs[SEARCH_FFT_REPORT_REG_1], SEARCH_FFT_REPORT_BASE_PWR_DB); + + rsfr->fft_chn_idx = + MS(rs[SEARCH_FFT_REPORT_REG_1], SEARCH_FFT_REPORT_FFT_CHN_IDX); + + rsfr->peak_sidx = dfs_sign_extend_32(MS(rs[SEARCH_FFT_REPORT_REG_1], + SEARCH_FFT_REPORT_PEAK_SIDX), 12); + + rsfr->relpwr_db = + MS(rs[SEARCH_FFT_REPORT_REG_2], SEARCH_FFT_REPORT_RELPWR_DB); + + rsfr->avgpwr_db = + MS(rs[SEARCH_FFT_REPORT_REG_2], SEARCH_FFT_REPORT_AVGPWR_DB); + + rsfr->peak_mag = + MS(rs[SEARCH_FFT_REPORT_REG_2], SEARCH_FFT_REPORT_PEAK_MAG); + + rsfr->num_str_bins_ib = + MS(rs[SEARCH_FFT_REPORT_REG_2], SEARCH_FFT_REPORT_NUM_STR_BINS_IB); + + dfs_debug(dfs, WLAN_DEBUG_DFS_PHYERR, + "two 32 bit values are: %08x %08x", rs[0], rs[1]); + dfs_debug(dfs, WLAN_DEBUG_DFS_PHYERR, + "rsfr->total_gain_db = %d", rsfr->total_gain_db); + dfs_debug(dfs, WLAN_DEBUG_DFS_PHYERR, + "rsfr->base_pwr_db = %d", rsfr->base_pwr_db); + dfs_debug(dfs, WLAN_DEBUG_DFS_PHYERR, + "rsfr->fft_chn_idx = %d", rsfr->fft_chn_idx); + dfs_debug(dfs, WLAN_DEBUG_DFS_PHYERR, + "rsfr->peak_sidx = %d", rsfr->peak_sidx); + dfs_debug(dfs, WLAN_DEBUG_DFS_PHYERR, + "rsfr->relpwr_db = %d", rsfr->relpwr_db); + dfs_debug(dfs, WLAN_DEBUG_DFS_PHYERR, + "rsfr->avgpwr_db = %d", rsfr->avgpwr_db); + dfs_debug(dfs, WLAN_DEBUG_DFS_PHYERR, + "rsfr->peak_mag = %d", rsfr->peak_mag); + dfs_debug(dfs, WLAN_DEBUG_DFS_PHYERR, + "rsfr->num_str_bins_ib = %d", rsfr->num_str_bins_ib); + + if (dfs->dfs_caps.wlan_chip_is_ht160) { + rsfr->seg_id = + MS(rs[SEARCH_FFT_REPORT_REG_3], SEARCH_FFT_REPORT_SEG_ID); + dfs_debug(dfs, WLAN_DEBUG_DFS_PHYERR, + "rsfr->seg_id = %d", rsfr->seg_id); + } +} + +/** + * dfs_check_for_false_detection() - Check for possible false detection on + * beeliner this may also work for Cascade but parameters + * (e.g. AGC_MB_GAIN_THRESH1) may be different for Cascade. + * @dfs: pointer to wlan_dfs structure. + * @rs: pointer to rx_radar_status structure. + * @false_detect: Pointer to save false detect value. + * @rssi: RSSI. + */ +static inline void dfs_check_for_false_detection( + struct wlan_dfs *dfs, + struct rx_radar_status *rs, + bool *false_detect, + uint8_t rssi) +{ + bool is_ht160 = false; + bool is_false_detect = false; + + is_ht160 = dfs->dfs_caps.wlan_chip_is_ht160; + is_false_detect = dfs->dfs_caps.wlan_chip_is_false_detect; + + if ((dfs->dfs_caps.wlan_chip_is_over_sampled == 0) && + (is_ht160 == 0 && is_false_detect)) { + if ((rs->agc_mb_gain > AGC_MB_GAIN_THRESH1) && + ((rs->agc_total_gain - rs->agc_mb_gain) < + AGC_OTHER_GAIN_THRESH1)) { + *false_detect = true; + } + + if ((rs->agc_mb_gain > AGC_MB_GAIN_THRESH2) && + ((rs->agc_total_gain - rs->agc_mb_gain) > + AGC_OTHER_GAIN_THRESH2) && + (rssi > AGC_GAIN_RSSI_THRESH)) { + *false_detect = true; + } + } + + if (*false_detect) + dfs_debug(dfs, WLAN_DEBUG_DFS_PHYERR, + "setting false_detect to TRUE because of mb/total_gain/rssi, agc_mb_gain=%d, agc_total_gain=%d, rssi=%d", + rs->agc_mb_gain, rs->agc_total_gain, rssi); +} + +/** + * dfs_tlv_parse_frame () - Parse a Peregrine BB TLV frame. + * @dfs: pointer to wlan_dfs structure. + * @rs: pointer to rx_radar_status structure. + * @rsfr: Pointer to rx_search_fft_report structure. + * @buf: Phyerr buffer. + * @len: Phyerr buflen. + * @rssi: RSSI. + * @first_short_fft_peak_mag: first short FFT peak_mag. + * @psidx_diff: Pointer to psidx diff. + * + * This routine parses each TLV, prints out what's going on and calls an + * appropriate sub-function. Since the TLV format doesn't _specify_ all TLV + * components are DWORD aligned, we must treat them as not and access the + * fields appropriately. + */ +static int dfs_tlv_parse_frame(struct wlan_dfs *dfs, + struct rx_radar_status *rs, + struct rx_search_fft_report *rsfr, + const char *buf, + size_t len, + uint8_t rssi, + int *first_short_fft_peak_mag, + int16_t *psidx_diff) +{ + int i = 0; + uint32_t tlv_hdr[1]; + bool false_detect = false; + /* total search FFT reports including short and long */ + int8_t sfr_count = 0; + int16_t first_short_fft_psidx = 0; + + *psidx_diff = 0; + dfs_debug(dfs, WLAN_DEBUG_DFS_PHYERR, + "total length = %zu bytes", len); + while ((i < len) && (false_detect == false)) { + /* Ensure we at least have four bytes. */ + if ((len - i) < sizeof(tlv_hdr)) { + dfs_debug(dfs, WLAN_DEBUG_DFS_PHYERR | + WLAN_DEBUG_DFS_PHYERR_SUM, + "ran out of bytes, len=%zu, i=%d", len, i); + return 0; + } + + /* + * Copy the offset into the header, so the DWORD style access + * macros can be used. + */ + qdf_mem_copy(&tlv_hdr, buf + i, sizeof(tlv_hdr)); + + dfs_debug(dfs, WLAN_DEBUG_DFS_PHYERR, + "HDR: TLV SIG=0x%x, TAG=0x%x, LEN=%d bytes", + MS(tlv_hdr[TLV_REG], TLV_SIG), + MS(tlv_hdr[TLV_REG], TLV_TAG), + MS(tlv_hdr[TLV_REG], TLV_LEN)); + + /* + * Sanity check the length field is available in the remaining + * frame. Drop out if this isn't the case - we can't trust the + * rest of the TLV entries. + */ + if (MS(tlv_hdr[TLV_REG], TLV_LEN) + i >= len) { + dfs_debug(dfs, WLAN_DEBUG_DFS_PHYERR, + "TLV oversize: TLV LEN=%d, available=%zu, i=%d", + MS(tlv_hdr[TLV_REG], TLV_LEN), + len, i); + break; + } + + /* Skip the TLV header - one DWORD. */ + i += sizeof(tlv_hdr); + + /* Handle the payload. */ + switch (MS(tlv_hdr[TLV_REG], TLV_SIG)) { + case TAG_ID_RADAR_PULSE_SUMMARY: /* Radar pulse summary */ + dfs_radar_summary_parse(dfs, buf + i, + MS(tlv_hdr[TLV_REG], TLV_LEN), rs); + + dfs_check_for_false_detection(dfs, rs, &false_detect, + rssi); + break; + case TAG_ID_SEARCH_FFT_REPORT: + sfr_count++; + dfs_radar_fft_search_report_parse(dfs, buf + i, + MS(tlv_hdr[TLV_REG], TLV_LEN), rsfr); + + /* we are interested in the first short FFT report's + * peak_mag for this value to be reliable, we must + * ensure that + * BB_srch_fft_ctrl_4.radar_fft_short_rpt_scl is set to + * 0. + */ + if (sfr_count == 1) { + *first_short_fft_peak_mag = rsfr->peak_mag; + first_short_fft_psidx = rsfr->peak_sidx; + } + + /* + * Check for possible false detection on Peregrine. + * we examine search FFT report and make the following + * assumption as per algorithms group's input: + * (1) There may be multiple TLV + * (2) We make false detection decison solely based on + * the first TLV + * (3) If the first TLV is a serch FFT report then we + * check the peak_mag value. + * When RSSI is equal to dfs->wlan_dfs_false_rssI_thres + * (default 50) and peak_mag is less than + * 2 * dfs->wlan_dfs_peak_mag (default 40) we treat it + * as false detect. Please note that 50 is not a true + * RSSI estimate, but value indicated by HW for RF + * saturation event. + */ + if (PERE_IS_OVERSAMPLING(dfs) && + (sfr_count == 1) && + (rssi == dfs->wlan_dfs_false_rssi_thres) && + (rsfr->peak_mag < (2 * dfs->wlan_dfs_peak_mag)) + ) { + false_detect = true; + dfs_debug(dfs, WLAN_DEBUG_DFS_PHYERR, + "setting false_detect to TRUE because of false_rssi_thres"); + } + + /* + * The first FFT report indicated by (sfr_count == 1) + * should correspond to the first short FFT report from + * HW and the second FFT report indicated by + * (sfr_count == 2) should correspond to the first long + * FFT report from HW for the same pulse. The short and + * log FFT reports have a factor of 4 difference in + * resolution; hence the need to multiply by 4 when + * computing the psidx_diff. + */ + if (sfr_count == 2) + *psidx_diff = rsfr->peak_sidx - + 4 * first_short_fft_psidx; + + break; + default: + dfs_debug(dfs, WLAN_DEBUG_DFS_PHYERR, + "unknown entry, SIG=0x%02x", + MS(tlv_hdr[TLV_REG], TLV_SIG)); + } + + /* Skip the payload. */ + i += MS(tlv_hdr[TLV_REG], TLV_LEN); + } + dfs_debug(dfs, WLAN_DEBUG_DFS_PHYERR, "done"); + + return false_detect ? 0 : 1; +} + +/** + * dfs_tlv_calc_freq_info() - Calculate the channel centre in MHz. + * @dfs: pointer to wlan_dfs structure. + * @rs: pointer to rx_radar_status structure. + * + * Return: Returns the channel center. + */ +static int dfs_tlv_calc_freq_info(struct wlan_dfs *dfs, + struct rx_radar_status *rs) +{ + uint32_t chan_centre; + uint32_t chan_width; + int chan_offset; + + /* For now, just handle up to VHT80 correctly. */ + if (!dfs->dfs_curchan) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs_curchan is null"); + return 0; + /* + * For now, the only 11ac channel with freq1/freq2 setup is + * VHT80. Should have a flag macro to check this! + */ + } else if (WLAN_IS_CHAN_11AC_VHT80(dfs->dfs_curchan)) { + /* + * 11AC, so cfreq1/cfreq2 are setup. + * If it's 80+80 this won't work - need to use seg + * appropriately! + */ + chan_centre = dfs_mlme_ieee2mhz(dfs->dfs_pdev_obj, + dfs->dfs_curchan->dfs_ch_vhtop_ch_freq_seg1, + dfs->dfs_curchan->dfs_ch_flags); + } else { + /* + * HT20/HT40. + * This is hard-coded - it should be 5 or 10 for half/quarter + * appropriately. + */ + chan_width = 20; + + /* Grab default channel centre. */ + chan_centre = dfs_chan2freq(dfs->dfs_curchan); + + /* Calculate offset based on HT40U/HT40D and VHT40U/VHT40D. */ + if (WLAN_IS_CHAN_11N_HT40PLUS(dfs->dfs_curchan) || + dfs->dfs_curchan->dfs_ch_flags & + WLAN_CHAN_VHT40PLUS) + chan_offset = chan_width; + else if (WLAN_IS_CHAN_11N_HT40MINUS(dfs->dfs_curchan) || + dfs->dfs_curchan->dfs_ch_flags & + WLAN_CHAN_VHT40MINUS) + chan_offset = -chan_width; + else + chan_offset = 0; + + /* Calculate new _real_ channel centre. */ + chan_centre += (chan_offset / 2); + } + + /* Return ev_chan_centre in MHz. */ + return chan_centre; +} + +/** + * dfs_tlv_calc_event_freq_pulse() - Calculate the centre frequency and + * low/high range for a radar pulse event. + * @dfs: pointer to wlan_dfs structure. + * @rs: pointer to rx_radar_status structure. + * @freq_centre: center frequency + * @freq_lo: lower bounds of frequency. + * @freq_hi: upper bounds of frequency. + * + * XXX TODO: Handle half/quarter rates correctly! + * XXX TODO: handle VHT160 correctly! + * XXX TODO: handle VHT80+80 correctly! + * + * Return: Returns 1. + */ +static int dfs_tlv_calc_event_freq_pulse(struct wlan_dfs *dfs, + struct rx_radar_status *rs, + uint32_t *freq_centre, + uint32_t *freq_lo, + uint32_t *freq_hi) +{ + int chan_width; + int chan_centre; + + /* Fetch the channel centre frequency in MHz. */ + chan_centre = dfs_tlv_calc_freq_info(dfs, rs); + + /* Convert to KHz. */ + chan_centre *= 1000; + + /* + * XXX hard-code event width to be 2 * bin size for now; + * XXX this needs to take into account the core clock speed + * XXX for half/quarter rate mode. + */ + if (PERE_IS_OVERSAMPLING(dfs)) + chan_width = (44000 * 2 / 128); + else + chan_width = (40000 * 2 / 128); + + /* XXX adjust chan_width for half/quarter rate! */ + + /* Now we can do the math to figure out the correct channel range. */ + (*freq_centre) = (uint32_t) (chan_centre + rs->freq_offset); + (*freq_lo) = (uint32_t) ((chan_centre + rs->freq_offset) - chan_width); + (*freq_hi) = (uint32_t) ((chan_centre + rs->freq_offset) + chan_width); + + return 1; +} + +/** + * dfs_tlv_calc_event_freq_chirp() - Calculate the event freq. + * @dfs: pointer to wlan_dfs structure. + * @rs: pointer to rx_radar_status structure. + * @freq_centre: center frequency + * @freq_lo: lower bounds of frequency. + * @freq_hi: upper bounds of frequency. + * + * The chirp bandwidth in KHz is defined as: + * totalBW(KHz) = delta_peak(mean) + * * [ (bin resolution in KHz) / (radar_fft_long_period in uS) ] + * * pulse_duration (us) + * The bin resolution depends upon oversampling. + * For now, we treat the radar_fft_long_period as a hard-coded 8uS. + * + * Return: Returns 1 + */ +static int dfs_tlv_calc_event_freq_chirp(struct wlan_dfs *dfs, + struct rx_radar_status *rs, + uint32_t *freq_centre, + uint32_t *freq_lo, + uint32_t *freq_hi) +{ + int32_t bin_resolution; /* KHz * 100 */ + int32_t radar_fft_long_period = 8; /* microseconds */ + int32_t delta_peak; + int32_t pulse_duration; + int32_t total_bw; + int32_t chan_centre; + int32_t freq_1, freq_2; + + /* + * KHz isn't enough resolution here! + * So treat it as deci-hertz (10Hz) and convert back to KHz later. + */ + + if (PERE_IS_OVERSAMPLING(dfs)) + bin_resolution = (OVER_SAMPLING_FREQ * HUNDRED) / NUM_BINS; + else + bin_resolution = (SAMPLING_FREQ * HUNDRED) / NUM_BINS; + + delta_peak = rs->delta_peak; + pulse_duration = rs->pulse_duration; + + total_bw = delta_peak * (bin_resolution / radar_fft_long_period) * + pulse_duration; + + dfs_debug(dfs, WLAN_DEBUG_DFS_PHYERR | WLAN_DEBUG_DFS_PHYERR_SUM, + "delta_peak=%d, pulse_duration=%d, bin_resolution=%d.%dKHz, radar_fft_long_period=%d, total_bw=%d.%ldKHz", + delta_peak, pulse_duration, bin_resolution / THOUSAND, + bin_resolution % THOUSAND, radar_fft_long_period, + total_bw / HUNDRED, + (long)abs(total_bw % HUNDRED)); + + total_bw /= HUNDRED; /* back to KHz */ + /* Grab the channel centre frequency in MHz. */ + chan_centre = dfs_tlv_calc_freq_info(dfs, rs); + + /* Early abort! */ + if (chan_centre == 0) { + (*freq_centre) = 0; + return 0; + } + + /* Convert to KHz. */ + chan_centre *= THOUSAND; + + /* + * Sidx is the starting frequency; total_bw is a signed value and for + * negative chirps (ie, moving down in frequency rather than up) the end + * frequency may be less than the start frequency. + */ + if (total_bw > 0) { + freq_1 = chan_centre + rs->freq_offset; + freq_2 = chan_centre + rs->freq_offset + total_bw; + } else { + freq_1 = chan_centre + rs->freq_offset + total_bw; + freq_2 = chan_centre + rs->freq_offset; + } + + (*freq_lo) = (uint32_t)(freq_1); + (*freq_hi) = (uint32_t)(freq_2); + (*freq_centre) = (uint32_t) (freq_1 + (abs(total_bw) / 2)); + + return 1; +} + +/** + * dfs_tlv_calc_event_freq() - Calculate the centre and band edge frequencies + * of the given radar event. + * @dfs: Pointer to wlan_dfs structure. + * @rs: Pointer to rx_radar_status structure. + * @freq_centre: Center frequency + * @freq_lo: Lower bounds of frequency. + * @freq_hi: Upper bounds of frequency. + */ +static int dfs_tlv_calc_event_freq(struct wlan_dfs *dfs, + struct rx_radar_status *rs, + uint32_t *freq_centre, + uint32_t *freq_lo, + uint32_t *freq_hi) +{ + if (rs->is_chirp) + return dfs_tlv_calc_event_freq_chirp(dfs, rs, freq_centre, + freq_lo, freq_hi); + else + return dfs_tlv_calc_event_freq_pulse(dfs, rs, freq_centre, + freq_lo, freq_hi); +} + +int dfs_process_phyerr_bb_tlv(struct wlan_dfs *dfs, + void *buf, + uint16_t datalen, + uint8_t rssi, + uint8_t ext_rssi, + uint32_t rs_tstamp, + uint64_t fulltsf, + struct dfs_phy_err *e) +{ + struct rx_radar_status rs; + struct rx_search_fft_report rsfr; + int first_short_fft_peak_mag = 0; + int16_t psidx_diff; + + qdf_mem_zero(&rs, sizeof(rs)); + qdf_mem_zero(&rsfr, sizeof(rsfr)); + + /* + * Add the ppdu_start/ppdu_end fields given to us by the upper layers. + * The firmware gives us a summary set of parameters rather than the + * whole PPDU_START/PPDU_END descriptor contenst. + */ + rs.rssi = rssi; + rs.raw_tsf = rs_tstamp; + + /* Try parsing the TLV set. */ + if (!dfs_tlv_parse_frame(dfs, &rs, &rsfr, buf, datalen, rssi, + &first_short_fft_peak_mag, &psidx_diff)) + return 0; + + /* For debugging, print what we have parsed. */ + dfs_radar_summary_print(dfs, &rs); + + /* Populate dfs_phy_err from rs. */ + qdf_mem_set(e, sizeof(*e), 0); + e->rssi = rs.rssi; + e->dur = rs.pulse_duration; + e->is_pri = 1; /* Always PRI for now */ + e->is_ext = 0; + e->is_dc = 0; + e->is_early = 0; + + /* + * XXX TODO: add a "chirp detection enabled" capability or config bit + * somewhere, in case for some reason the hardware chirp detection AND + * FFTs are disabled. + * For now, assume this hardware always does chirp detection. + */ + e->do_check_chirp = 1; + e->is_hw_chirp = !!(rs.is_chirp); + e->is_sw_chirp = 0; /* We don't yet do software chirp checking */ + + e->fulltsf = fulltsf; + e->rs_tstamp = rs.raw_tsf - rs.tsf_offset; + + /* XXX error check */ + (void)dfs_tlv_calc_event_freq(dfs, &rs, &e->freq, &e->freq_lo, + &e->freq_hi); + + e->seg_id = rsfr.seg_id; + e->sidx = rs.sidx; + e->freq_offset_khz = rs.freq_offset; + e->peak_mag = first_short_fft_peak_mag; + e->total_gain = rs.agc_total_gain; + e->mb_gain = rs.agc_mb_gain; + e->relpwr_db = rsfr.relpwr_db; + e->pulse_delta_peak = rs.delta_peak; + e->pulse_psidx_diff = psidx_diff; + e->pulse_delta_diff = rs.delta_diff; + + dfs_debug(dfs, WLAN_DEBUG_DFS_PHYERR_SUM, + "fbin=%d, freq=%d.%d MHz, raw tsf=%u, offset=%d, cooked tsf=%u, rssi=%d, dur=%d, is_chirp=%d, fulltsf=%llu, freq=%d.%d MHz, freq_lo=%d.%dMHz, freq_hi=%d.%d MHz", + rs.sidx, (int) (rs.freq_offset / 1000), + (int) abs(rs.freq_offset % 1000), rs.raw_tsf, rs.tsf_offset, + e->rs_tstamp, rs.rssi, rs.pulse_duration, (int)rs.is_chirp, + (unsigned long long) fulltsf, (int)e->freq / 1000, + (int) abs(e->freq) % 1000, (int)e->freq_lo / 1000, + (int) abs(e->freq_lo) % 1000, (int)e->freq_hi / 1000, + (int) abs(e->freq_hi) % 1000); + + dfs_debug(dfs, WLAN_DEBUG_DFS_FALSE_DET, + "ts=%u, dur=%d, rssi=%d, freq_offset=%d.%dMHz, is_chirp=%d, seg_id=%d, peak_mag=%d, total_gain=%d, mb_gain=%d, relpwr_db=%d, delta_peak=%d, delta_diff=%d, psidx_diff=%d", + e->rs_tstamp, rs.pulse_duration, rs.rssi, + (int)e->freq_offset_khz / 1000, + (int)abs(e->freq_offset_khz) % 1000, (int)rs.is_chirp, + rsfr.seg_id, rsfr.peak_mag, rs.agc_total_gain, rs.agc_mb_gain, + rsfr.relpwr_db, + rs.delta_peak, + rs.delta_diff, + psidx_diff); + + return 1; +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/filtering/dfs_process_phyerr.c b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/filtering/dfs_process_phyerr.c new file mode 100644 index 0000000000000000000000000000000000000000..bb4aff53302b4f260eac696ba54959834f7f7574 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/filtering/dfs_process_phyerr.c @@ -0,0 +1,995 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * Copyright (c) 2002-2010, Atheros Communications Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: For each radar pulse that the HW detects, a single radar PHY error is + * reported to the driver. This PHY error contains information like the RSSI, + * the pulse duration, the pulse location (primary/extension/DC) and possibly + * FFT data. + */ + +#include "../dfs.h" +#include "../dfs_zero_cac.h" +#include "../dfs_channel.h" +#include "wlan_dfs_mlme_api.h" +#include "../dfs_internal.h" + +/** + * dfs_get_event_freqwidth() - Get frequency width. + * @dfs: Pointer to wlan_dfs structure. + * + * Return: Return the frequency width for the current operating channel. + * This isn't the channel width - it's how wide the reported event may be. + * For HT20 this is 20MHz. For HT40 on Howl and later it'll still be 20MHz + * - the hardware returns either pri or ext channel. + */ +static inline int dfs_get_event_freqwidth(struct wlan_dfs *dfs) +{ + /* Handle edge cases during startup/transition, shouldn't happen! */ + if (!dfs) + return 0; + + if (!dfs->dfs_curchan) + return 0; + + /* + * For now, assume 20MHz wide - but this is incorrect when operating in + * half/quarter mode! + */ + return 20; +} + +/** + * dfs_get_event_freqcentre() - Get event frequency centre. + * @dfs: Pointer to wlan_dfs structure. + * @is_pri: detected on primary channel. + * @is_ext: detected on extension channel. + * @is_dc: detected at DC. + * + * Return the centre frequency for the current operating channel and event. + * This is for post-Owl 11n chips which report pri/extension channel events. + */ +static inline uint16_t dfs_get_event_freqcentre(struct wlan_dfs *dfs, + int is_pri, + int is_ext, + int is_dc) +{ + int chan_offset = 0, chan_width; + + /* Handle edge cases during startup/transition, shouldn't happen! */ + if (!dfs) + return 0; + if (!dfs->dfs_curchan) + return 0; + + /* + * For wide channels, DC and ext frequencies need a bit of hand-holding + * based on whether it's an upper or lower channel. + */ + chan_width = dfs_get_event_freqwidth(dfs); + + if (WLAN_IS_CHAN_11N_HT40PLUS(dfs->dfs_curchan)) + chan_offset = chan_width; + else if (WLAN_IS_CHAN_11N_HT40MINUS(dfs->dfs_curchan)) + chan_offset = -chan_width; + else + chan_offset = 0; + + /* + * Check for DC events first - the sowl code may just set all the bits + * together. + */ + if (is_dc) { + /* XXX TODO: Should DC events be considered 40MHz wide here? */ + return dfs_chan2freq( + dfs->dfs_curchan) + (chan_offset / 2); + } + + /* + * For non-wide channels, the centre frequency is just dfs_ch_freq. + * The centre frequency for pri events is still dfs_ch_freq. + */ + if (is_pri) + return dfs_chan2freq(dfs->dfs_curchan); + + if (is_ext) + return dfs_chan2freq(dfs->dfs_curchan) + chan_width; + + return dfs_chan2freq(dfs->dfs_curchan); +} + +int dfs_process_phyerr_owl(struct wlan_dfs *dfs, + void *buf, + uint16_t datalen, + uint8_t rssi, + uint8_t ext_rssi, + uint32_t rs_tstamp, + uint64_t fulltsf, + struct dfs_phy_err *e) +{ + const char *cbuf = (const char *) buf; + uint8_t dur; + int event_width; + + dfs->wlan_dfs_stats.owl_phy_errors++; + + /* + * HW cannot detect extension channel radar so it only passes us primary + * channel radar data. + */ + if (datalen == 0) + dur = 0; + else + dur = ((uint8_t *) cbuf)[0]; + + /* This is a spurious event; toss. */ + if (rssi == 0 && dur == 0) { + dfs->wlan_dfs_stats.datalen_discards++; + return 0; + } + + /* Fill out dfs_phy_err with the information we have at hand. */ + qdf_mem_set(e, sizeof(*e), 0); + e->rssi = rssi; + e->dur = dur; + e->is_pri = 1; + e->is_ext = 0; + e->is_dc = 0; + e->is_early = 1; + e->fulltsf = fulltsf; + e->rs_tstamp = rs_tstamp; + + /* + * Owl only ever reports events on the primary channel. It doesn't + * even see events on the secondary channel. + */ + event_width = dfs_get_event_freqwidth(dfs); + e->freq = dfs_get_event_freqcentre(dfs, 1, 0, 0) * 1000; + e->freq_lo = e->freq - (event_width / 2) * 1000; + e->freq_hi = e->freq + (event_width / 2) * 1000; + + dfs_debug(dfs, WLAN_DEBUG_DFS_PHYERR_SUM, + "rssi=%u dur=%u, freq=%d MHz, freq_lo=%d MHz, freq_hi=%d MHz", + rssi, dur, e->freq/1000, e->freq_lo/1000, + e->freq_hi / 1000); + + return 1; +} + +int dfs_process_phyerr_sowl(struct wlan_dfs *dfs, + void *buf, + uint16_t datalen, + uint8_t rssi, + uint8_t ext_rssi, + uint32_t rs_tstamp, + uint64_t fulltsf, + struct dfs_phy_err *e) +{ +#define EXT_CH_RADAR_FOUND 0x02 +#define PRI_CH_RADAR_FOUND 0x01 +#define EXT_CH_RADAR_EARLY_FOUND 0x04 + const char *cbuf = (const char *)buf; + uint8_t dur = 0; + uint8_t pulse_bw_info, pulse_length_ext, pulse_length_pri; + int pri_found = 0, ext_found = 0; + int early_ext = 0; + int event_width; + + /* + * If radar can be detected on the extension channel, datalen zero + * pulses are bogus, discard them. + */ + if (!datalen) { + dfs->wlan_dfs_stats.datalen_discards++; + return 0; + } + + /* Ensure that we have at least three bytes of payload. */ + if (datalen < 3) { + dfs_debug(dfs, WLAN_DEBUG_DFS, + "short error frame (%d bytes)", datalen); + dfs->wlan_dfs_stats.datalen_discards++; + return 0; + } + + /* + * Fetch the payload directly - the compiler will happily generate + * byte-read instructions with a const char * cbuf pointer. + */ + pulse_length_pri = cbuf[datalen - 3]; + pulse_length_ext = cbuf[datalen - 2]; + pulse_bw_info = cbuf[datalen - 1]; + + /* + * Only the last 3 bits of the BW info are relevant, they indicate + * which channel the radar was detected in. + */ + pulse_bw_info &= 0x07; + + /* If pulse on DC, both primary and extension flags will be set */ + if (((pulse_bw_info & EXT_CH_RADAR_FOUND) && + (pulse_bw_info & PRI_CH_RADAR_FOUND))) { + /* + * Conducted testing, when pulse is on DC, both pri and ext + * durations are reported to be same. Radiated testing, when + * pulse is on DC, differentpri and ext durations are reported, + * so take the larger of the two. + */ + if (pulse_length_ext >= pulse_length_pri) { + dur = pulse_length_ext; + ext_found = 1; + } else { + dur = pulse_length_pri; + pri_found = 1; + } + dfs->wlan_dfs_stats.dc_phy_errors++; + } else { + if (pulse_bw_info & EXT_CH_RADAR_FOUND) { + dur = pulse_length_ext; + pri_found = 0; + ext_found = 1; + dfs->wlan_dfs_stats.ext_phy_errors++; + } + if (pulse_bw_info & PRI_CH_RADAR_FOUND) { + dur = pulse_length_pri; + pri_found = 1; + ext_found = 0; + dfs->wlan_dfs_stats.pri_phy_errors++; + } + if (pulse_bw_info & EXT_CH_RADAR_EARLY_FOUND) { + dur = pulse_length_ext; + pri_found = 0; + ext_found = 1; + early_ext = 1; + dfs->wlan_dfs_stats.early_ext_phy_errors++; + dfs_debug(dfs, WLAN_DEBUG_DFS_PHYERR, + "EARLY ext channel dur=%u rssi=%u datalen=%d", + dur, rssi, datalen); + } + if (!pulse_bw_info) { + dfs_debug(dfs, WLAN_DEBUG_DFS_PHYERR, + "ERROR channel dur=%u rssi=%u pulse_bw_info=0x%x datalen MOD 4 = %d", + dur, rssi, pulse_bw_info, (datalen & 0x3)); + /* + * Bogus bandwidth info received in descriptor, so + * ignore this PHY error. + */ + dfs->wlan_dfs_stats.bwinfo_errors++; + return 0; + } + } + + /* + * Always use combined RSSI reported, unless RSSI reported on + * extension is stronger. + */ + if ((ext_rssi > rssi) && (ext_rssi < 128)) + rssi = ext_rssi; + + /* Fill out the rssi/duration fields from above. */ + qdf_mem_set(e, sizeof(*e), 0); + e->rssi = rssi; + e->dur = dur; + e->is_pri = pri_found; + e->is_ext = ext_found; + e->is_dc = !!(((pulse_bw_info & EXT_CH_RADAR_FOUND) && + (pulse_bw_info & PRI_CH_RADAR_FOUND))); + e->is_early = early_ext; + e->fulltsf = fulltsf; + e->rs_tstamp = rs_tstamp; + + /* Sowl and later can report pri/ext events. */ + event_width = dfs_get_event_freqwidth(dfs); + e->freq = dfs_get_event_freqcentre(dfs, e->is_pri, e->is_ext, + e->is_dc) * 1000; + e->freq_lo = e->freq - (event_width / 2) * 1000; + e->freq_hi = e->freq + (event_width / 2) * 1000; + + dfs_debug(dfs, WLAN_DEBUG_DFS_PHYERR_SUM, + "pulse_bw_info=0x%x pulse_length_ext=%u pulse_length_pri=%u rssi=%u ext_rssi=%u, freq=%d MHz, freq_lo=%d MHz, freq_hi=%d MHz", + pulse_bw_info, pulse_length_ext, pulse_length_pri, + rssi, ext_rssi, e->freq/1000, e->freq_lo/1000, e->freq_hi/1000); +#undef EXT_CH_RADAR_FOUND +#undef PRI_CH_RADAR_FOUND +#undef EXT_CH_RADAR_EARLY_FOUND + + return 1; +} + +int dfs_process_phyerr_merlin(struct wlan_dfs *dfs, + void *buf, + uint16_t datalen, + uint8_t rssi, + uint8_t ext_rssi, + uint32_t rs_tstamp, + uint64_t fulltsf, + struct dfs_phy_err *e) +{ + const char *cbuf = (const char *) buf; + uint8_t pulse_bw_info = 0; + + /* Process using the sowl code. */ + if (!dfs_process_phyerr_sowl(dfs, buf, datalen, rssi, ext_rssi, + rs_tstamp, fulltsf, e)) { + return 0; + } + + /* + * For osprey (and Merlin) bw_info has implication for selecting RSSI + * value. So re-fetch the bw_info field so the RSSI values can be + * appropriately overridden. + */ + pulse_bw_info = cbuf[datalen - 1]; + + switch (pulse_bw_info & 0x03) { + case 0x00: + /* No radar in ctrl or ext channel */ + rssi = 0; + break; + case 0x01: + /* Radar in ctrl channel */ + dfs_debug(dfs, WLAN_DEBUG_DFS_PHYERR, + "RAW RSSI: rssi=%u ext_rssi=%u", rssi, ext_rssi); + if (ext_rssi >= (rssi + 3)) { + /* + * Cannot use ctrl channel RSSI if extension channel is + * stronger. + */ + rssi = 0; + } + break; + case 0x02: + /* Radar in extension channel */ + dfs_debug(dfs, WLAN_DEBUG_DFS_PHYERR, + "RAW RSSI: rssi=%u ext_rssi=%u", rssi, ext_rssi); + if (rssi >= (ext_rssi + 12)) { + /* + * Cannot use extension channel RSSI if control channel + * is stronger + */ + rssi = 0; + } else { + rssi = ext_rssi; + } + break; + case 0x03: + /* When both are present use stronger one */ + if (rssi < ext_rssi) + rssi = ext_rssi; + break; + } + + /* + * Override the rssi decision made by the sowl code. The rest of the + * fields (duration, timestamp, etc) are left untouched. + */ + e->rssi = rssi; + + return 1; +} + +/** + * dfs_dump_phyerr_contents() - Dump the phyerr contents. + * @d: Phyerr buffer. + * @len: Phyerr buf length. + */ + +static void dfs_dump_phyerr_contents(const char *d, int len) +{ + int i, n, bufsize = 64; + + /* + * This is statically sized for a 4-digit address + 16 * 2 digit data + * string. It's done so the printk() passed to the kernel is an entire + * line, so the kernel logging code will atomically print it. Otherwise + * we'll end up with interleaved lines with output from other kernel + * threads. + */ + char buf[64]; + + /* Initial conditions */ + buf[0] = '\n'; + n = 0; + + for (i = 0; i < len; i++) { + if (i % 16 == 0) + n += snprintf(buf + n, bufsize - n, "%04x: ", i); + + n += snprintf(buf + n, bufsize - n, "%02x ", d[i] & 0xff); + if (i % 16 == 15) { + dfs_info(NULL, WLAN_DEBUG_DFS_ALWAYS, "%s", buf); + n = 0; + buf[0] = '\0'; + } + } + + /* Print the final line if we didn't print it above. */ + if (n != 0) + dfs_info(NULL, WLAN_DEBUG_DFS_ALWAYS, "%s", buf); +} + +/** + * dfs_bump_up_bin5_pulse_dur() - Bump up to a random BIN 5 pulse duration. + * @dfs: Pointer to wlan_dfs structure. + * @e: Pointer to dfs_phy_err structure. + * @slope: Slope value. + */ +static inline void dfs_bump_up_bin5_pulse_dur( + struct wlan_dfs *dfs, + struct dfs_phy_err *e, + int slope) +{ + dfs_debug(dfs, WLAN_DEBUG_DFS_PHYERR, "old dur %d slope =%d", + e->dur, slope); + + e->is_sw_chirp = 1; + /* bump up to a random bin5 pulse duration */ + if (e->dur < MIN_BIN5_DUR) + e->dur = dfs_get_random_bin5_dur(dfs, e->fulltsf); + + dfs_debug(dfs, WLAN_DEBUG_DFS_PHYERR, "new dur %d", e->dur); +} + +/** + * dfs_filter_short_pulses() - Filter short pulses. + * @dfs: Pointer to wlan_dfs structure. + * @e: Pointer to dfs_phy_err structure. + * @retval: Return value + * + * Rssi is not accurate for short pulses, so donot filter based on that for + * short duration pulses. + */ +static inline void dfs_filter_short_pulses( + struct wlan_dfs *dfs, + struct dfs_phy_err *e, + int *retval) +{ + if (dfs->dfs_caps.wlan_dfs_ext_chan_ok) { + if ((e->rssi < dfs->dfs_rinfo.rn_minrssithresh && + (e->dur > MAX_DUR_FOR_LOW_RSSI)) || + e->dur > (dfs->dfs_rinfo.rn_maxpulsedur)) { + dfs->wlan_dfs_stats.rssi_discards++; + *retval = 1; + } + } else if (e->rssi < dfs->dfs_rinfo.rn_minrssithresh || + e->dur > dfs->dfs_rinfo.rn_maxpulsedur) { + dfs->wlan_dfs_stats.rssi_discards++; + *retval = 1; + } + + if (*retval) { + dfs_debug(dfs, WLAN_DEBUG_DFS1, + "%s pulse is discarded: dur=%d, maxpulsedur=%d, rssi=%d, minrssi=%d", + (dfs->dfs_caps.wlan_dfs_ext_chan_ok) ? + "Extension channel" : "", + e->dur, dfs->dfs_rinfo.rn_maxpulsedur, + e->rssi, dfs->dfs_rinfo.rn_minrssithresh); + } +} + +/** + * dfs_set_chan_index() - Set channel index. + * @dfs: Pointer to wlan_dfs structure. + * @e: Pointer to dfs_phy_err structure. + * @event: Pointer to dfs_event structure. + */ +static inline void dfs_set_chan_index( + struct wlan_dfs *dfs, + struct dfs_phy_err *e, + struct dfs_event *event) +{ + if (e->is_pri) { + event->re_chanindex = dfs->dfs_curchan_radindex; + } else { + event->re_chanindex = dfs->dfs_extchan_radindex; + dfs_debug(dfs, WLAN_DEBUG_DFS_PHYERR, + "%s New extension channel event is added to queue", + (event->re_chanindex == -1) ? + "- phyerr on ext channel" : ""); + } +} + +/** + * dfs_is_second_seg_radar_disabled() - Check for second segment radar disabled. + * @dfs: Pointer to wlan_dfs structure. + * @seg_id: Segment id. + * + * Return: true if the second segment RADAR is enabled else false. + */ +static bool dfs_is_second_seg_radar_disabled( + struct wlan_dfs *dfs, int seg_id) +{ + if ((seg_id == SEG_ID_SECONDARY) && + !(dfs->dfs_proc_phyerr & DFS_SECOND_SEGMENT_RADAR_EN)) { + dfs_debug(dfs, WLAN_DEBUG_DFS3, + "Second segment radar detection is disabled"); + return true; + } + + return false; +} + +void dfs_process_phyerr(struct wlan_dfs *dfs, void *buf, uint16_t datalen, + uint8_t r_rssi, uint8_t r_ext_rssi, uint32_t r_rs_tstamp, + uint64_t r_fulltsf) +{ + struct dfs_event *event; + struct dfs_phy_err e; + int empty; + + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs is NULL"); + return; + } + + if (dfs->dfs_ignore_dfs) { + dfs_debug(dfs, WLAN_DEBUG_DFS1, "ignoring dfs"); + return; + } + + /* + * EV 129487: If radar detection is disabled, do not process PHY error + * data. + */ + + if (!(dfs->dfs_proc_phyerr & DFS_RADAR_EN)) { + dfs_debug(dfs, WLAN_DEBUG_DFS1, + "DFS_RADAR_EN not set in dfs->dfs_proc_phyerr"); + return; + } + + /* + * The combined_rssi_ok support has been removed. This was only clear + * for Owl. + * XXX TODO: re-add this; it requires passing in the ctl/ext + * RSSI set from the RX status descriptor. + * XXX TODO : this may be done for us from the legacy phy error path in + * wlan_dev; please review that code. + */ + + /* + * At this time we have a radar pulse that we need to examine and + * queue. But if dfs_process_radarevent already detected radar and set + * CHANNEL_INTERFERENCE flag then do not queue any more radar data. + * When we are in a new channel this flag will be clear and we will + * start queueing data for new channel. (EV74162) + */ + if (dfs->dfs_debug_mask & WLAN_DEBUG_DFS_PHYERR_PKT) + dfs_dump_phyerr_contents(buf, datalen); + + if (WLAN_IS_CHAN_RADAR(dfs->dfs_curchan)) { + dfs_debug(dfs, WLAN_DEBUG_DFS1, + "Radar already found in the channel, do not queue radar data"); + return; + } + + dfs->wlan_dfs_stats.total_phy_errors++; + dfs_debug(dfs, WLAN_DEBUG_DFS2, "phyerr %d len %d", + dfs->wlan_dfs_stats.total_phy_errors, datalen); + + /* + * Hardware stores this as 8 bit signed value. we will cap it at 0 if it + * is a negative number. + */ + if (r_rssi & 0x80) + r_rssi = 0; + + if (r_ext_rssi & 0x80) + r_ext_rssi = 0; + + qdf_mem_set(&e, sizeof(e), 0); + + /* + * This is a bit evil - instead of just passing in the chip version, the + * existing code uses a set of HAL capability bits to determine what is + * possible. + * The way I'm decoding it is thus: + * + DFS enhancement? Merlin or later + * + DFS extension channel? Sowl or later. (Howl?) + * + otherwise, Owl (and legacy.) + */ + if (dfs->dfs_caps.wlan_chip_is_bb_tlv) { + if (dfs_process_phyerr_bb_tlv(dfs, buf, datalen, r_rssi, + r_ext_rssi, r_rs_tstamp, r_fulltsf, &e) == 0) { + dfs->dfs_phyerr_reject_count++; + return; + } + + if (dfs->dfs_phyerr_freq_min > e.freq) + dfs->dfs_phyerr_freq_min = e. freq; + + if (dfs->dfs_phyerr_freq_max < e.freq) + dfs->dfs_phyerr_freq_max = e. freq; + } else if (dfs->dfs_caps.wlan_dfs_use_enhancement) { + if (dfs_process_phyerr_merlin(dfs, buf, datalen, r_rssi, + r_ext_rssi, r_rs_tstamp, r_fulltsf, &e) == 0) + return; + } else if (dfs->dfs_caps.wlan_dfs_ext_chan_ok) { + if (dfs_process_phyerr_sowl(dfs, buf, datalen, r_rssi, + r_ext_rssi, r_rs_tstamp, r_fulltsf, &e) == 0) + return; + } else { + if (dfs_process_phyerr_owl(dfs, buf, datalen, r_rssi, + r_ext_rssi, r_rs_tstamp, r_fulltsf, &e) == 0) + return; + } + + /* + * If the hardware supports radar reporting on the extension channel + * it will supply FFT data for longer radar pulses. + * TLV chips don't go through this software check - the hardware + * check should be enough. If we want to do software checking + * later on then someone will have to craft an FFT parser + * suitable for the TLV FFT data format. + */ + if ((!dfs->dfs_caps.wlan_chip_is_bb_tlv) && + dfs->dfs_caps.wlan_dfs_ext_chan_ok) { + /* + * HW has a known issue with chirping pulses injected at or + * around DC in 40MHz mode. Such pulses are reported with much + * lower durations and SW then discards them because they do + * not fit the minimum bin5 pulse duration. To work around this + * issue, if a pulse is within a 10us range of the bin5 min + * duration, check if the pulse is chirping. If the pulse is + * chirping, bump up the duration to the minimum bin5 duration. + * This makes sure that a valid chirping pulse will not be + * discarded because of incorrect low duration. TBD - Is it + * possible to calculate the 'real' duration of the pulse using + * the slope of the FFT data? TBD - Use FFT data to + * differentiate between radar pulses and false PHY errors. + * This will let us reduce the number of false alarms seen. + * BIN 5 chirping pulses are only for FCC or Japan MMK4 domain + */ + if (((dfs->dfsdomain == DFS_FCC_DOMAIN) || + (dfs->dfsdomain == DFS_MKK4_DOMAIN)) && + (e.dur >= MAYBE_BIN5_DUR) && (e.dur < MAX_BIN5_DUR)) { + int add_dur; + int slope = 0, dc_found = 0; + + /* + * Set the event chirping flags; as we're doing an + * actual chirp check. + */ + e.do_check_chirp = 1; + e.is_hw_chirp = 0; + e.is_sw_chirp = 0; + + /* + * dfs_check_chirping() expects is_pri and is_ext to + * be '1' for true and '0' for false for now, as the + * function itself uses these values in constructing + * things rather than testing them + */ + add_dur = dfs_check_chirping(dfs, buf, datalen, + (e.is_pri ? 1 : 0), + (e.is_ext ? 1 : 0), &slope, &dc_found); + if (add_dur) { + dfs_bump_up_bin5_pulse_dur(dfs, &e, slope); + } else { + /* Set the duration so that it is rejected. */ + e.is_sw_chirp = 0; + e.dur = MAX_BIN5_DUR + 100; + dfs_debug(dfs, WLAN_DEBUG_DFS_PHYERR, + "is_chirping = %d dur=%d", + add_dur, e.dur); + } + } else { + /* + * We have a pulse that is either bigger than + * MAX_BIN5_DUR or less than MAYBE_BIN5_DUR + */ + if ((dfs->dfsdomain == DFS_FCC_DOMAIN) || + (dfs->dfsdomain == DFS_MKK4_DOMAIN)) { + /* + * Would this result in very large pulses + * wrapping around to become short pulses? + */ + if (e.dur >= MAX_BIN5_DUR) { + /* + * Set the duration so that it is + * rejected. + */ + e.dur = MAX_BIN5_DUR + 50; + } + } + } + } + + /* + * Add the parsed, checked and filtered entry to the radar pulse + * event list. This is then checked by dfs_radar_processevent(). + * + * XXX TODO: some filtering is still done below this point - fix this! + */ + WLAN_DFSEVENTQ_LOCK(dfs); + empty = STAILQ_EMPTY(&(dfs->dfs_eventq)); + WLAN_DFSEVENTQ_UNLOCK(dfs); + if (empty) + return; + + /* + * If the channel is a turbo G channel, then the event is for the + * adaptive radio (AR) pattern matching rather than radar detection. + */ + if ((dfs->dfs_curchan->dfs_ch_flags & CHANNEL_108G) == CHANNEL_108G) { + if (!(dfs->dfs_proc_phyerr & DFS_AR_EN)) { + dfs_debug(dfs, WLAN_DEBUG_DFS2, + "DFS_AR_EN not enabled"); + return; + } + WLAN_DFSEVENTQ_LOCK(dfs); + event = STAILQ_FIRST(&(dfs->dfs_eventq)); + if (!event) { + WLAN_DFSEVENTQ_UNLOCK(dfs); + dfs_debug(dfs, WLAN_DEBUG_DFS, + "no more events space left"); + return; + } + STAILQ_REMOVE_HEAD(&(dfs->dfs_eventq), re_list); + WLAN_DFSEVENTQ_UNLOCK(dfs); + event->re_rssi = e.rssi; + event->re_dur = e.dur; + event->re_full_ts = e.fulltsf; + event->re_ts = (e.rs_tstamp) & DFS_TSMASK; + event->re_chanindex = dfs->dfs_curchan_radindex; + event->re_flags = 0; + + /* Handle chirp flags. */ + if (e.do_check_chirp) { + event->re_flags |= DFS_EVENT_CHECKCHIRP; + if (e.is_hw_chirp) + event->re_flags |= DFS_EVENT_HW_CHIRP; + if (e.is_sw_chirp) + event->re_flags |= DFS_EVENT_SW_CHIRP; + } + + WLAN_ARQ_LOCK(dfs); + STAILQ_INSERT_TAIL(&(dfs->dfs_arq), event, re_list); + WLAN_ARQ_UNLOCK(dfs); + } else { + if ((WLAN_IS_CHAN_DFS(dfs->dfs_curchan) || + ((WLAN_IS_CHAN_11AC_VHT160(dfs->dfs_curchan) || + WLAN_IS_CHAN_11AC_VHT80_80(dfs->dfs_curchan)) && + WLAN_IS_CHAN_DFS_CFREQ2(dfs->dfs_curchan))) || + (dfs_is_precac_timer_running(dfs))) { + + int retval = 0; + + if (!(dfs->dfs_proc_phyerr & DFS_RADAR_EN)) { + dfs_debug(dfs, WLAN_DEBUG_DFS3, + "DFS_RADAR_EN not enabled"); + return; + } + + dfs_filter_short_pulses(dfs, &e, &retval); + if (retval) + return; + + if (dfs_is_second_seg_radar_disabled(dfs, e.seg_id)) + return; + + /* Add the event to the list, if there's space. */ + WLAN_DFSEVENTQ_LOCK(dfs); + event = STAILQ_FIRST(&(dfs->dfs_eventq)); + if (!event) { + WLAN_DFSEVENTQ_UNLOCK(dfs); + dfs_debug(dfs, WLAN_DEBUG_DFS, + "no more events space left"); + return; + } + STAILQ_REMOVE_HEAD(&(dfs->dfs_eventq), re_list); + WLAN_DFSEVENTQ_UNLOCK(dfs); + + dfs->dfs_phyerr_queued_count++; + dfs->dfs_phyerr_w53_counter++; + + event->re_dur = e.dur; + event->re_full_ts = e.fulltsf; + event->re_ts = (e.rs_tstamp) & DFS_TSMASK; + event->re_rssi = e.rssi; + + event->re_seg_id = e.seg_id; + event->re_sidx = e.sidx; + event->re_freq_offset_khz = e.freq_offset_khz; + event->re_peak_mag = e.peak_mag; + event->re_total_gain = e.total_gain; + event->re_mb_gain = e.mb_gain; + event->re_relpwr_db = e.relpwr_db; + event->re_delta_diff = e.pulse_delta_diff; + event->re_delta_peak = e.pulse_delta_peak; + event->re_psidx_diff = e.pulse_psidx_diff; + event->re_flags = 0; + event->re_flags |= DFS_EVENT_VALID_PSIDX_DIFF; + /* Handle chirp flags. */ + if (e.do_check_chirp) { + event->re_flags |= DFS_EVENT_CHECKCHIRP; + if (e.is_hw_chirp) + event->re_flags |= DFS_EVENT_HW_CHIRP; + if (e.is_sw_chirp) + event->re_flags |= DFS_EVENT_SW_CHIRP; + } + + /* Correctly set which channel is being reported on */ + dfs_set_chan_index(dfs, &e, event); + + WLAN_DFSQ_LOCK(dfs); + STAILQ_INSERT_TAIL(&(dfs->dfs_radarq), event, re_list); + WLAN_DFSQ_UNLOCK(dfs); + } + } + + /* + * Schedule the radar/AR task as appropriate. + * XXX isn't a lock needed for wlan_radar_tasksched? + */ + if (!STAILQ_EMPTY(&dfs->dfs_arq)) { + /* XXX shouldn't this be a task/timer too? */ + dfs_process_ar_event(dfs, dfs->dfs_curchan); + } + if (!STAILQ_EMPTY(&dfs->dfs_radarq) && !dfs->wlan_radar_tasksched) { + dfs->wlan_radar_tasksched = 1; + qdf_timer_mod(&dfs->wlan_dfs_task_timer, 0); + } +#undef EXT_CH_RADAR_FOUND +#undef PRI_CH_RADAR_FOUND +#undef EXT_CH_RADAR_EARLY_FOUND +} + +#ifdef QCA_MCL_DFS_SUPPORT +void dfs_process_phyerr_filter_offload(struct wlan_dfs *dfs, + struct radar_event_info *wlan_radar_event) +{ + struct dfs_event *event; + int empty; + int do_check_chirp = 0; + int is_hw_chirp = 0; + int is_sw_chirp = 0; + int is_pri = 0; + + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs is NULL"); + return; + } + + if (dfs->dfs_ignore_dfs) { + dfs_debug(dfs, WLAN_DEBUG_DFS1, "ignoring dfs"); + return; + } + + if (!(dfs->dfs_proc_phyerr & DFS_RADAR_EN)) { + dfs_debug(dfs, WLAN_DEBUG_DFS1, + "DFS_RADAR_EN not set in dfs->dfs_proc_phyerr"); + return; + } + + if (WLAN_IS_CHAN_RADAR(dfs->dfs_curchan)) { + dfs_debug(dfs, WLAN_DEBUG_DFS1, + "Radar already found in the channel, do not queue radar data"); + return; + } + + dfs->wlan_dfs_stats.total_phy_errors++; + if (dfs->dfs_caps.wlan_chip_is_bb_tlv) { + do_check_chirp = 1; + is_pri = 1; + is_hw_chirp = wlan_radar_event->pulse_is_chirp; + + if ((uint32_t) dfs->dfs_phyerr_freq_min > + wlan_radar_event->pulse_center_freq) { + dfs->dfs_phyerr_freq_min = + (int)wlan_radar_event->pulse_center_freq; + } + + if (dfs->dfs_phyerr_freq_max < + (int)wlan_radar_event->pulse_center_freq) { + dfs->dfs_phyerr_freq_max = + (int)wlan_radar_event->pulse_center_freq; + } + } + + /* + * Now, add the parsed, checked and filtered + * radar phyerror event radar pulse event list. + * This event will then be processed by + * dfs_radar_processevent() to see if the pattern + * of pulses in radar pulse list match any radar + * singnature in the current regulatory domain. + */ + + WLAN_DFSEVENTQ_LOCK(dfs); + empty = STAILQ_EMPTY(&(dfs->dfs_eventq)); + WLAN_DFSEVENTQ_UNLOCK(dfs); + if (empty) + return; + /* + * Add the event to the list, if there's space. + */ + WLAN_DFSEVENTQ_LOCK(dfs); + event = STAILQ_FIRST(&(dfs->dfs_eventq)); + if (!event) { + WLAN_DFSEVENTQ_UNLOCK(dfs); + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, + "%s: No more space left for queuing DFS Phyerror events", + __func__); + return; + } + STAILQ_REMOVE_HEAD(&(dfs->dfs_eventq), re_list); + WLAN_DFSEVENTQ_UNLOCK(dfs); + dfs->dfs_phyerr_queued_count++; + dfs->dfs_phyerr_w53_counter++; + event->re_dur = (uint8_t) wlan_radar_event->pulse_duration; + event->re_rssi = wlan_radar_event->rssi; + event->re_ts = wlan_radar_event->pulse_detect_ts & DFS_TSMASK; + event->re_full_ts = (((uint64_t) wlan_radar_event->upload_fullts_high) + << 32) | wlan_radar_event->upload_fullts_low; + + /* + * Index of peak magnitude + */ + event->re_sidx = wlan_radar_event->peak_sidx; + event->re_delta_diff = wlan_radar_event->delta_diff; + event->re_delta_peak = wlan_radar_event->delta_peak; + event->re_flags = 0; + if (wlan_radar_event->is_psidx_diff_valid) { + event->re_flags |= DFS_EVENT_VALID_PSIDX_DIFF; + event->re_psidx_diff = wlan_radar_event->psidx_diff; + } + + /* + * Handle chirp flags. + */ + if (do_check_chirp) { + event->re_flags |= DFS_EVENT_CHECKCHIRP; + if (is_hw_chirp) + event->re_flags |= DFS_EVENT_HW_CHIRP; + if (is_sw_chirp) + event->re_flags |= DFS_EVENT_SW_CHIRP; + } + /* + * Correctly set which channel is being reported on + */ + if (is_pri) { + event->re_chanindex = (uint8_t) dfs->dfs_curchan_radindex; + } else { + if (dfs->dfs_extchan_radindex == -1) + dfs_debug(dfs, WLAN_DEBUG_DFS1, + "%s phyerr on ext channel", __func__); + event->re_chanindex = (uint8_t) dfs->dfs_extchan_radindex; + dfs_debug(dfs, WLAN_DEBUG_DFS1, + "%s:New extension channel event is added to queue", + __func__); + } + + WLAN_DFSQ_LOCK(dfs); + + STAILQ_INSERT_TAIL(&(dfs->dfs_radarq), event, re_list); + + empty = STAILQ_EMPTY(&dfs->dfs_radarq); + + WLAN_DFSQ_UNLOCK(dfs); + + if (!empty && !dfs->wlan_radar_tasksched) { + dfs->wlan_radar_tasksched = 1; + qdf_timer_mod(&dfs->wlan_dfs_task_timer, 0); + } +} +#endif + +void dfs_is_radar_enabled(struct wlan_dfs *dfs, int *ignore_dfs) +{ + *ignore_dfs = dfs->dfs_ignore_dfs; +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/filtering/dfs_process_radarevent.c b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/filtering/dfs_process_radarevent.c new file mode 100644 index 0000000000000000000000000000000000000000..2996edf967dcf8d4bf7deba8fdc83fb70f8d1078 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/filtering/dfs_process_radarevent.c @@ -0,0 +1,1346 @@ +/* + * Copyright (c) 2013, 2016-2018 The Linux Foundation. All rights reserved. + * Copyright (c) 2002-2010, Atheros Communications Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: This contains the functionality to process the radar event generated + * for a pulse. This will group together pulses and call various detection + * functions to figure out whether a valid radar has been detected. + */ + +#include "../dfs.h" +#include "../dfs_zero_cac.h" +#include "../dfs_channel.h" +#include "../dfs_internal.h" +#include "../dfs_process_radar_found_ind.h" +#include "wlan_dfs_utils_api.h" +#include "wlan_dfs_lmac_api.h" +#include "../dfs_partial_offload_radar.h" + +#define FREQ_5500_MHZ 5500 +#define FREQ_5500_MHZ 5500 + +#define DFS_MAX_FREQ_SPREAD (1375 * 1) +#define DFS_LARGE_PRI_MULTIPLIER 4 +#define DFS_W53_DEFAULT_PRI_MULTIPLIER 2 +#define DFS_INVALID_PRI_LIMIT 100 /* should we use 135? */ +#define DFS_BIG_SIDX 10000 + +#define FRAC_PRI_SCORE_ARRAY_SIZE 40 + +static char debug_dup[33]; +static int debug_dup_cnt; + +/** + * dfs_process_pulse_dur() - Process pulse duration. + * @dfs: Pointer to wlan_dfs structure. + * @re_dur: Duration. + * + * Convert the hardware provided duration to TSF ticks (usecs) taking the clock + * (fast or normal) into account. Legacy (pre-11n, Owl, Sowl, Howl) operate + * 5GHz using a 40MHz clock. Later 11n chips (Merlin, Osprey, etc) operate + * 5GHz using a 44MHz clock, so the reported pulse durations are different. + * Peregrine reports the pulse duration in microseconds regardless of the + * operating mode. (XXX TODO: verify this, obviously.) + * + * The hardware returns the duration in a variety of formats, + * so it's converted from the hardware format to TSF (usec) + * values here. + * XXX TODO: this should really be done when the PHY error + * is processed, rather than way out here.. + * + * + * Return: Returns the duration. + */ +static inline uint8_t dfs_process_pulse_dur(struct wlan_dfs *dfs, + uint8_t re_dur) +{ + /* + * Short pulses are sometimes returned as having a duration of 0, + * so round those up to 1. + * XXX This holds true for BB TLV chips too, right? + */ + if (re_dur == 0) + return 1; + + /* + * For BB TLV chips, the hardware always returns microsecond pulse + * durations. + */ + if (dfs->dfs_caps.wlan_chip_is_bb_tlv) + return re_dur; + + /* + * This is for 11n and legacy chips, which may or may not use the 5GHz + * fast clock mode. + */ + /* Convert 0.8us durations to TSF ticks (usecs) */ + return (uint8_t)dfs_round((int32_t)((dfs->dur_multiplier)*re_dur)); +} + +/* + * dfs_print_radar_events() - Prints the Radar events. + * @dfs: Pointer to wlan_dfs structure. + */ +static void dfs_print_radar_events(struct wlan_dfs *dfs) +{ + int i; + + dfs_info(dfs, WLAN_DEBUG_DFS_ALWAYS, "#Phyerr=%d, #false detect=%d, #queued=%d", + dfs->dfs_phyerr_count, dfs->dfs_phyerr_reject_count, + dfs->dfs_phyerr_queued_count); + + dfs_info(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs_phyerr_freq_min=%d, dfs_phyerr_freq_max=%d", + dfs->dfs_phyerr_freq_min, dfs->dfs_phyerr_freq_max); + + dfs_info(dfs, WLAN_DEBUG_DFS_ALWAYS, + "Total radar events detected=%d, entries in the radar queue follows:", + dfs->dfs_event_log_count); + + for (i = 0; (i < DFS_EVENT_LOG_SIZE) && (i < dfs->dfs_event_log_count); + i++) { + dfs_debug(dfs, WLAN_DEBUG_DFS, + "ts=%llu diff_ts=%u rssi=%u dur=%u, is_chirp=%d, seg_id=%d, sidx=%d, freq_offset=%d.%dMHz, peak_mag=%d, total_gain=%d, mb_gain=%d, relpwr_db=%d, delta_diff=%d, delta_peak=%d, psidx_diff=%d", + dfs->radar_log[i].ts, dfs->radar_log[i].diff_ts, + dfs->radar_log[i].rssi, dfs->radar_log[i].dur, + dfs->radar_log[i].is_chirp, dfs->radar_log[i].seg_id, + dfs->radar_log[i].sidx, + (int)dfs->radar_log[i].freq_offset_khz/1000, + (int)abs(dfs->radar_log[i].freq_offset_khz)%1000, + dfs->radar_log[i].peak_mag, + dfs->radar_log[i].total_gain, + dfs->radar_log[i].mb_gain, + dfs->radar_log[i].relpwr_db, + dfs->radar_log[i].delta_diff, + dfs->radar_log[i].delta_peak, + dfs->radar_log[i].psidx_diff); + } + dfs->dfs_event_log_count = 0; + dfs->dfs_phyerr_count = 0; + dfs->dfs_phyerr_reject_count = 0; + dfs->dfs_phyerr_queued_count = 0; + dfs->dfs_phyerr_freq_min = 0x7fffffff; + dfs->dfs_phyerr_freq_max = 0; +} + +/** + * dfs_confirm_radar() - This function checks for fractional PRI and jitter in + * sidx index to determine if the radar is real or not. + * @dfs: Pointer to dfs structure. + * @rf: Pointer to dfs_filter structure. + * @ext_chan_flag: ext chan flags. + */ +static int dfs_confirm_radar(struct wlan_dfs *dfs, + struct dfs_filter *rf, + int ext_chan_flag) +{ + int i = 0; + int index; + struct dfs_delayline *dl = &rf->rf_dl; + struct dfs_delayelem *de; + uint64_t target_ts = 0; + struct dfs_pulseline *pl; + int start_index = 0, current_index, next_index; + unsigned char scores[FRAC_PRI_SCORE_ARRAY_SIZE]; + uint32_t pri_margin; + uint64_t this_diff_ts; + uint32_t search_bin; + + unsigned char max_score = 0; + int max_score_index = 0; + + pl = dfs->pulses; + + OS_MEMZERO(scores, sizeof(scores)); + scores[0] = rf->rf_threshold; + + pri_margin = dfs_get_pri_margin(dfs, ext_chan_flag, + (rf->rf_patterntype == 1)); + + /* + * Look for the entry that matches dl_seq_num_second. + * we need the time stamp and diff_ts from there. + */ + + for (i = 0; i < dl->dl_numelems; i++) { + index = (dl->dl_firstelem + i) & DFS_MAX_DL_MASK; + de = &dl->dl_elems[index]; + if (dl->dl_seq_num_second == de->de_seq_num) + target_ts = de->de_ts - de->de_time; + } + + if (dfs->dfs_debug_mask & WLAN_DEBUG_DFS2) { + dfs_print_delayline(dfs, &rf->rf_dl); + + /* print pulse line */ + dfs_debug(dfs, WLAN_DEBUG_DFS2, + "%s: Pulse Line\n", __func__); + for (i = 0; i < pl->pl_numelems; i++) { + index = (pl->pl_firstelem + i) & + DFS_MAX_PULSE_BUFFER_MASK; + dfs_debug(dfs, WLAN_DEBUG_DFS2, + "Elem %u: ts=%llu dur=%u, seq_num=%d, delta_peak=%d, psidx_diff=%d\n", + i, pl->pl_elems[index].p_time, + pl->pl_elems[index].p_dur, + pl->pl_elems[index].p_seq_num, + pl->pl_elems[index].p_delta_peak, + pl->pl_elems[index].p_psidx_diff); + } + } + + /* + * Walk through the pulse line and find pulse with target_ts. + * Then continue until we find entry with seq_number dl_seq_num_stop. + */ + + for (i = 0; i < pl->pl_numelems; i++) { + index = (pl->pl_firstelem + i) & DFS_MAX_PULSE_BUFFER_MASK; + if (pl->pl_elems[index].p_time == target_ts) { + dl->dl_seq_num_start = pl->pl_elems[index].p_seq_num; + start_index = index; /* save for future use */ + } + } + + dfs_debug(dfs, WLAN_DEBUG_DFS2, + "%s: target_ts=%llu, dl_seq_num_start=%d, dl_seq_num_second=%d, dl_seq_num_stop=%d\n", + __func__, target_ts, dl->dl_seq_num_start, + dl->dl_seq_num_second, dl->dl_seq_num_stop); + + current_index = start_index; + while (pl->pl_elems[current_index].p_seq_num < dl->dl_seq_num_stop) { + next_index = (current_index + 1) & DFS_MAX_PULSE_BUFFER_MASK; + this_diff_ts = pl->pl_elems[next_index].p_time - + pl->pl_elems[current_index].p_time; + + /* Now update the score for this diff_ts */ + for (i = 1; i < FRAC_PRI_SCORE_ARRAY_SIZE; i++) { + search_bin = dl->dl_search_pri / (i + 1); + + /* + * We do not give score to PRI that is lower then the + * limit. + */ + if (search_bin < DFS_INVALID_PRI_LIMIT) + break; + + /* + * Increment the score if this_diff_ts belongs to this + * search_bin +/- margin. + */ + if ((this_diff_ts >= (search_bin - pri_margin)) && + (this_diff_ts <= + (search_bin + pri_margin))) { + /*increment score */ + scores[i]++; + } + } + current_index = next_index; + } + + for (i = 0; i < FRAC_PRI_SCORE_ARRAY_SIZE; i++) + if (scores[i] > max_score) { + max_score = scores[i]; + max_score_index = i; + } + + if (max_score_index != 0) { + dfs_debug(dfs, WLAN_DEBUG_DFS_ALWAYS, + "Rejecting Radar since Fractional PRI detected: searchpri=%d, threshold=%d, fractional PRI=%d, Fractional PRI score=%d", + dl->dl_search_pri, scores[0], + dl->dl_search_pri/(max_score_index + 1), + max_score); + return 0; + } + + + /* Check for frequency spread */ + if (dl->dl_min_sidx > pl->pl_elems[start_index].p_sidx) + dl->dl_min_sidx = pl->pl_elems[start_index].p_sidx; + + if (dl->dl_max_sidx < pl->pl_elems[start_index].p_sidx) + dl->dl_max_sidx = pl->pl_elems[start_index].p_sidx; + + if ((dl->dl_max_sidx - dl->dl_min_sidx) > rf->rf_sidx_spread) { + dfs_debug(dfs, WLAN_DEBUG_DFS_ALWAYS, + "Rejecting Radar since frequency spread is too large : min_sidx=%d, max_sidx=%d, rf_sidx_spread=%d", + dl->dl_min_sidx, dl->dl_max_sidx, + rf->rf_sidx_spread); + return 0; + } + + if ((rf->rf_check_delta_peak) && + ((dl->dl_delta_peak_match_count + + dl->dl_psidx_diff_match_count - 1) < + rf->rf_threshold)) { + dfs_debug(dfs, WLAN_DEBUG_DFS_ALWAYS, + "Rejecting Radar since delta peak values are invalid : dl_delta_peak_match_count=%d, dl_psidx_diff_match_count=%d, rf_threshold=%d", + dl->dl_delta_peak_match_count, + dl->dl_psidx_diff_match_count, + rf->rf_threshold); + return 0; + } + + return 1; +} + +/* + * dfs_reject_on_pri() - Rejecting on individual filter based on min PRI . + * @dfs: Pointer to wlan_dfs structure. + * @rf: Pointer to dfs_filter structure. + * @deltaT: deltaT value. + * @this_ts: Timestamp. + */ +static inline bool dfs_reject_on_pri( + struct wlan_dfs *dfs, + struct dfs_filter *rf, + uint64_t deltaT, + uint64_t this_ts) +{ + if ((deltaT < rf->rf_minpri) && (deltaT != 0)) { + /* Second line of PRI filtering. */ + dfs_debug(dfs, WLAN_DEBUG_DFS2, + "filterID %d : Rejecting on individual filter min PRI deltaT=%lld rf->rf_minpri=%u", + rf->rf_pulseid, (uint64_t)deltaT, + rf->rf_minpri); + return 1; + } + + if (rf->rf_ignore_pri_window > 0) { + if (deltaT < rf->rf_minpri) { + dfs_debug(dfs, WLAN_DEBUG_DFS2, + "filterID %d : Rejecting on individual filter max PRI deltaT=%lld rf->rf_minpri=%u", + rf->rf_pulseid, (uint64_t)deltaT, + rf->rf_minpri); + /* But update the last time stamp. */ + rf->rf_dl.dl_last_ts = this_ts; + return 1; + } + } else { + /* + * The HW may miss some pulses especially with + * high channel loading. This is true for Japan + * W53 where channel loaoding is 50%. Also for + * ETSI where channel loading is 30% this can + * be an issue too. To take care of missing + * pulses, we introduce pri_margin multiplie. + * This is normally 2 but can be higher for W53. + */ + + if ((deltaT > (dfs->dfs_pri_multiplier * rf->rf_maxpri)) || + (deltaT < rf->rf_minpri)) { + dfs_debug(dfs, WLAN_DEBUG_DFS2, + "filterID %d : Rejecting on individual filter max PRI deltaT=%lld rf->rf_minpri=%u", + rf->rf_pulseid, (uint64_t) deltaT, + rf->rf_minpri); + /* But update the last time stamp. */ + rf->rf_dl.dl_last_ts = this_ts; + return 1; + } + } + + return 0; +} + +/** + * dfs_confirm_radar_check() - Do additioal check to conirm radar except for + * the staggered, chirp FCC Bin 5, frequency hopping indicated by + * rf_patterntype == 1. + * @dfs: Pointer to wlan_dfs structure. + * @rf: Pointer to dfs_filter structure. + * @ext_chan_event_flag: Extension channel event flag + * @found: Pointer to radar found flag (return value). + * @false_radar_found: Pointer to false radar found (return value). + */ + +static inline void dfs_confirm_radar_check( + struct wlan_dfs *dfs, + struct dfs_filter *rf, + int ext_chan_event_flag, + int *found, + int *false_radar_found) +{ + if (rf->rf_patterntype != 1) { + *found = dfs_confirm_radar(dfs, rf, ext_chan_event_flag); + *false_radar_found = (*found == 1) ? 0 : 1; + } +} + +void __dfs_process_radarevent(struct wlan_dfs *dfs, + struct dfs_filtertype *ft, + struct dfs_event *re, + uint64_t this_ts, + int *found, + int *false_radar_found) +{ + int p; + uint64_t deltaT = 0; + int ext_chan_event_flag = 0; + struct dfs_filter *rf = NULL; + int8_t ori_rf_check_delta_peak = 0; + + for (p = 0, *found = 0; (p < ft->ft_numfilters) && + (!(*found)) && !(*false_radar_found); p++) { + rf = ft->ft_filters[p]; + if ((re->re_dur >= rf->rf_mindur) && + (re->re_dur <= rf->rf_maxdur)) { + /* The above check is probably not necessary. */ + deltaT = (this_ts < rf->rf_dl.dl_last_ts) ? + (int64_t)((DFS_TSF_WRAP - rf->rf_dl.dl_last_ts) + + this_ts + 1) : + this_ts - rf->rf_dl.dl_last_ts; + + if (dfs_reject_on_pri(dfs, rf, deltaT, this_ts)) + continue; + + dfs_add_pulse(dfs, rf, re, deltaT, this_ts); + + /* + * If this is an extension channel event, flag it for + * false alarm reduction. + */ + if (re->re_chanindex == dfs->dfs_extchan_radindex) + ext_chan_event_flag = 1; + + if (rf->rf_patterntype == 2) { + *found = dfs_staggered_check(dfs, rf, + (uint32_t) deltaT, re->re_dur); + } else { + *found = dfs_bin_check(dfs, rf, + (uint32_t) deltaT, re->re_dur, + ext_chan_event_flag); + + if (*found) { + ori_rf_check_delta_peak = + rf->rf_check_delta_peak; + /* + * If FW does not send valid psidx_diff + * Do not do chirp check. + */ + if (rf->rf_check_delta_peak && + (!(re->re_flags & + DFS_EVENT_VALID_PSIDX_DIFF))) + rf->rf_check_delta_peak = false; + dfs_confirm_radar_check(dfs, + rf, ext_chan_event_flag, + found, + false_radar_found); + rf->rf_check_delta_peak = + ori_rf_check_delta_peak; + } + } + + if (dfs->dfs_debug_mask & WLAN_DEBUG_DFS2) + if (rf->rf_patterntype != + WLAN_DFS_RF_PATTERN_TYPE_1) + dfs_print_delayline(dfs, &rf->rf_dl); + + rf->rf_dl.dl_last_ts = this_ts; + } + } + + if (*found) { + dfs_info(dfs, WLAN_DEBUG_DFS_ALWAYS, + "Found on channel minDur = %d, filterId = %d", + ft->ft_mindur, + rf != NULL ? rf->rf_pulseid : -1); + } + + return; +} + +/** + * dfs_cal_average_radar_parameters() - Calculate the average radar parameters. + * @dfs: Pointer to wlan_dfs structure. + */ +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) && defined(HOST_DFS_SPOOF_TEST) +static void dfs_cal_average_radar_parameters(struct wlan_dfs *dfs) +{ + int i, count = 0; + u_int32_t total_pri = 0; + u_int32_t total_duration = 0; + u_int32_t total_sidx = 0; + + /* Calculating average PRI, Duration, SIDX from + * the 2nd pulse, ignoring the 1st pulse (radar_log[0]). + * This is because for the first pulse, the diff_ts will be + * (0 - current_ts) which will be a huge value. + * Average PRI computation will be wrong. FW returns a + * failure test result as PRI does not match their expected + * value. + */ + + for (i = 1; (i < DFS_EVENT_LOG_SIZE) && (i < dfs->dfs_event_log_count); + i++) { + total_pri += dfs->radar_log[i].diff_ts; + total_duration += dfs->radar_log[i].dur; + total_sidx += dfs->radar_log[i].sidx; + count++; + } + + if (count > 0) { + dfs->dfs_average_pri = total_pri / count; + dfs->dfs_average_duration = total_duration / count; + dfs->dfs_average_sidx = total_sidx / count; + + dfs_info(dfs, WLAN_DEBUG_DFS2, + "Avg.PRI =%u, Avg.duration =%u Avg.sidx =%u", + dfs->dfs_average_pri, + dfs->dfs_average_duration, + dfs->dfs_average_sidx); + } +} +#else +static void dfs_cal_average_radar_parameters(struct wlan_dfs *dfs) +{ +} +#endif + +/** + * dfs_radarfound_reset_vars() - Reset dfs variables after radar found + * @dfs: Pointer to wlan_dfs structure. + * @rs: Pointer to dfs_state. + * @chan: Current channel. + * @seg_id: Segment id. + */ +static inline void dfs_radarfound_reset_vars( + struct wlan_dfs *dfs, + struct dfs_state *rs, + struct dfs_channel *chan, + uint8_t seg_id) +{ + struct dfs_channel *thischan; + + /* + * TODO: Instead of discarding the radar, create a workqueue + * if the channel change is happenning through userspace and + * process the radar event once the channel change is completed. + */ + + /* Collect stats */ + dfs->wlan_dfs_stats.num_radar_detects++; + thischan = &rs->rs_chan; + if ((seg_id == SEG_ID_SECONDARY) && + (dfs_is_precac_timer_running(dfs))) + dfs->is_radar_during_precac = 1; + + /* + * If event log is on then dump the radar event queue on + * filter match. This can be used to collect information + * on false radar detection. + */ + if (dfs->dfs_event_log_on) { + dfs_cal_average_radar_parameters(dfs); + dfs_print_radar_events(dfs); + } + + dfs_reset_radarq(dfs); + dfs_reset_alldelaylines(dfs); + + dfs_debug(dfs, WLAN_DEBUG_DFS1, + "Primary channel freq = %u flags=0x%x", + chan->dfs_ch_freq, chan->dfs_ch_flagext); + + if (chan->dfs_ch_freq != thischan->dfs_ch_freq) + dfs_debug(dfs, WLAN_DEBUG_DFS1, + "Ext channel freq = %u flags=0x%x", + thischan->dfs_ch_freq, + thischan->dfs_ch_flagext); + + dfs->dfs_phyerr_freq_min = 0x7fffffff; + dfs->dfs_phyerr_freq_max = 0; + dfs->dfs_phyerr_w53_counter = 0; + + if (seg_id == SEG_ID_SECONDARY) { + dfs->wlan_dfs_stats.num_seg_two_radar_detects++; + dfs->is_radar_found_on_secondary_seg = 1; + } +} + +/** + * dfs_handle_bangradar - Handle the case of bangradar + * @dfs: Pointer to wlan_dfs structure. + * @chan: Current channel. + * @rs: Pointer to dfs_state. + * Return: if bangradar then return 0. Otherwise, return 1. + */ +static inline int dfs_handle_bangradar( + struct wlan_dfs *dfs, + struct dfs_channel *chan, + struct dfs_state **rs, + uint8_t *seg_id, + int *retval) +{ + + if (dfs->dfs_bangradar) { + /* + * Bangradar will always simulate radar found on the primary + * channel. + */ + *rs = &dfs->dfs_radar[dfs->dfs_curchan_radindex]; + dfs->dfs_bangradar = 0; /* Reset */ + dfs_debug(dfs, WLAN_DEBUG_DFS, "bangradar"); + *retval = 1; + return 1; + } + + if (dfs->dfs_second_segment_bangradar) { + if (dfs_is_precac_timer_running(dfs) || + WLAN_IS_CHAN_11AC_VHT160(chan) || + WLAN_IS_CHAN_11AC_VHT80_80(chan)) { + dfs->is_radar_found_on_secondary_seg = 1; + *rs = &dfs->dfs_radar[dfs->dfs_curchan_radindex]; + dfs_debug(dfs, WLAN_DEBUG_DFS, + "second segment bangradar on cfreq = %u", + dfs->dfs_precac_secondary_freq); + *retval = 1; + *seg_id = SEG_ID_SECONDARY; + } else { + dfs_debug(dfs, WLAN_DEBUG_DFS, + "Do not process the second segment bangradar"); + } + dfs->dfs_second_segment_bangradar = 0; /* Reset */ + return 1; + } + + return 0; +} + +/** + * dfs_process_w53_pulses() - Prrocess w53 pulses + * @dfs: Pointer to wlan_dfs structure. + * + * For chips that support frequency information, we can relax PRI + * restriction if the frequency spread is narrow. + */ +static inline void dfs_process_w53_pulses( + struct wlan_dfs *dfs) +{ + if ((dfs->dfs_phyerr_freq_max - dfs->dfs_phyerr_freq_min) < + DFS_MAX_FREQ_SPREAD) + dfs->dfs_pri_multiplier = DFS_LARGE_PRI_MULTIPLIER; + + dfs_debug(dfs, WLAN_DEBUG_DFS1, + "w53_counter=%d, freq_max=%d, freq_min=%d, pri_multiplier=%d", + dfs->dfs_phyerr_w53_counter, + dfs->dfs_phyerr_freq_max, dfs->dfs_phyerr_freq_min, + dfs->dfs_pri_multiplier); + + dfs->dfs_phyerr_freq_min = 0x7fffffff; + dfs->dfs_phyerr_freq_max = 0; +} + +/** + * dfs_handle_missing_pulses - Handle the case of missing pulses + * @dfs: Pointer to wlan_dfs structure. + * @chan: Current channel. + * + * The HW may miss some pulses especially with high channel loading. + * This is true for Japan W53 where channel loaoding is 50%. Also + * for ETSI where channel loading is 30% this can be an issue too. + * To take care of missing pulses, we introduce pri_margin multiplie. + * This is normally 2 but can be higher for W53. + * Return: If not enough pulses return 0. Otherwise, return 1. + */ +static inline int dfs_handle_missing_pulses( + struct wlan_dfs *dfs, + struct dfs_channel *chan) +{ + if ((dfs->dfsdomain == DFS_MKK4_DOMAIN) && + (dfs->dfs_caps.wlan_chip_is_bb_tlv) && + (chan->dfs_ch_freq < FREQ_5500_MHZ)) { + dfs->dfs_pri_multiplier = DFS_W53_DEFAULT_PRI_MULTIPLIER; + /* + * Do not process W53 pulses unless we have a minimum number + * of them. + */ + if (dfs->dfs_phyerr_w53_counter >= 5) + dfs_process_w53_pulses(dfs); + else + return 0; + } + + dfs_debug(dfs, WLAN_DEBUG_DFS1, "pri_multiplier=%d", + dfs->dfs_pri_multiplier); + + return 1; +} + +/** + * dfs_is_radarq_empty - check if radarq is empty + * @dfs: Pointer to wlan_dfs structure. + * @empty: Pointer to empty + */ +static inline void dfs_is_radarq_empty( + struct wlan_dfs *dfs, + int *empty) +{ + WLAN_DFSQ_LOCK(dfs); + *empty = STAILQ_EMPTY(&(dfs->dfs_radarq)); + WLAN_DFSQ_UNLOCK(dfs); +} + +/** + * dfs_remove_event_from_radarq - remove event from radarq + * @dfs: Pointer to wlan_dfs structure. + * @event: Double pointer to the event structure + */ +static inline void dfs_remove_event_from_radarq( + struct wlan_dfs *dfs, + struct dfs_event **event) +{ + WLAN_DFSQ_LOCK(dfs); + *event = STAILQ_FIRST(&(dfs->dfs_radarq)); + if (*event != NULL) + STAILQ_REMOVE_HEAD(&(dfs->dfs_radarq), re_list); + WLAN_DFSQ_UNLOCK(dfs); +} + +/** + * dfs_return_event_to_eventq - return event to eventq + * @dfs: Pointer to wlan_dfs structure. + * @event: Pointer to the event structure + */ +static inline void dfs_return_event_to_eventq( + struct wlan_dfs *dfs, + struct dfs_event *event) +{ + qdf_mem_zero(event, sizeof(struct dfs_event)); + WLAN_DFSEVENTQ_LOCK(dfs); + STAILQ_INSERT_TAIL(&(dfs->dfs_eventq), event, re_list); + WLAN_DFSEVENTQ_UNLOCK(dfs); +} + +/** + * dfs_log_event - log dfs event + * @dfs: Pointer to wlan_dfs structure. + * @re: Pointer to dfs_event re + * @this_ts: Current time stamp 64bit + * @diff_ts: Difference between 2 timestamps 32bit + * @index: Index value. + */ +static inline void dfs_log_event( + struct wlan_dfs *dfs, + struct dfs_event *re, + uint64_t this_ts, + uint32_t diff_ts, + uint32_t index) +{ + uint8_t i; + struct dfs_pulseline *pl = dfs->pulses; + + if (dfs->dfs_event_log_on) { + i = dfs->dfs_event_log_count % DFS_EVENT_LOG_SIZE; + dfs->radar_log[i].ts = this_ts; + dfs->radar_log[i].diff_ts = diff_ts; + dfs->radar_log[i].rssi = (*re).re_rssi; + dfs->radar_log[i].dur = (*re).re_dur; + dfs->radar_log[i].seg_id = (*re).re_seg_id; + dfs->radar_log[i].sidx = (*re).re_sidx; + dfs->radar_log[i].freq_offset_khz = + (*re).re_freq_offset_khz; + dfs->radar_log[i].peak_mag = (*re).re_peak_mag; + dfs->radar_log[i].total_gain = (*re).re_total_gain; + dfs->radar_log[i].mb_gain = (*re).re_mb_gain; + dfs->radar_log[i].relpwr_db = (*re).re_relpwr_db; + dfs->radar_log[i].delta_diff = (*re).re_delta_diff; + dfs->radar_log[i].delta_peak = (*re).re_delta_peak; + dfs->radar_log[i].psidx_diff = (*re).re_psidx_diff; + dfs->radar_log[i].is_chirp = DFS_EVENT_NOTCHIRP(re) ? + 0 : 1; + dfs->dfs_event_log_count++; + } + + dfs->dfs_seq_num++; + pl->pl_elems[index].p_seq_num = dfs->dfs_seq_num; +} + +/** + * dfs_check_if_nonbin5 - Check if radar, other than bin5, is found + * @dfs: Pointer to wlan_dfs structure. + * @re: Pointer to re (radar event) + * @rs: Double Pointer to rs (radar state) + * @this_ts: Current time stamp 64bit + * @diff_ts: Difference between 2 timestamps 32bit + * @found: Pointer to found. If radar found or not. + * @retval: Pointer to retval(return value). + * @false_radar_found: Pointer to false_radar_found(return value). + */ +static inline void dfs_check_if_nonbin5( + struct wlan_dfs *dfs, + struct dfs_event *re, + struct dfs_state **rs, + uint64_t this_ts, + uint32_t diff_ts, + int *found, + int *retval, + int *false_radar_found) +{ + + uint32_t tabledepth = 0; + struct dfs_filtertype *ft; + uint64_t deltaT; + + dfs_debug(dfs, WLAN_DEBUG_DFS1, + " *** chan freq (%d): ts %llu dur %u rssi %u", + (*rs)->rs_chan.dfs_ch_freq, (uint64_t)this_ts, + (*re).re_dur, (*re).re_rssi); + + while ((tabledepth < DFS_MAX_RADAR_OVERLAP) && + ((dfs->dfs_ftindextable[(*re).re_dur])[tabledepth] != + -1) && (!*retval) && !(*false_radar_found)) { + ft = dfs->dfs_radarf[((dfs->dfs_ftindextable[(*re).re_dur]) + [tabledepth])]; + dfs_debug(dfs, WLAN_DEBUG_DFS2, + " ** RD (%d): ts %x dur %u rssi %u", + (*rs)->rs_chan.dfs_ch_freq, (*re).re_ts, + (*re).re_dur, (*re).re_rssi); + + if ((*re).re_rssi < ft->ft_rssithresh && + (*re).re_dur > MAX_DUR_FOR_LOW_RSSI) { + dfs_debug(dfs, WLAN_DEBUG_DFS2, + "Rejecting on rssi rssi=%u thresh=%u", + (*re).re_rssi, + ft->ft_rssithresh); + tabledepth++; + continue; + } + deltaT = this_ts - ft->ft_last_ts; + dfs_debug(dfs, WLAN_DEBUG_DFS2, + "deltaT = %lld (ts: 0x%llx) (last ts: 0x%llx)", + (uint64_t)deltaT, (uint64_t)this_ts, + (uint64_t)ft->ft_last_ts); + + if ((deltaT < ft->ft_minpri) && (deltaT != 0)) { + /* + * This check is for the whole filter type. + * Individual filters will check this again. + * This is first line of filtering. + */ + dfs_debug(dfs, WLAN_DEBUG_DFS2, + "Rejecting on pri pri=%lld minpri=%u", + (uint64_t)deltaT, ft->ft_minpri); + tabledepth++; + continue; + } + + __dfs_process_radarevent(dfs, ft, re, this_ts, found, + false_radar_found); + + ft->ft_last_ts = this_ts; + *retval |= *found; + tabledepth++; + } +} + +/** + * dfs_check_each_b5radar() - Check each bin5 radar + * @dfs: Pointer to wlan_dfs structure. + * @re: Pointer to re(radar event). + * @br: Pointer to dfs_bin5radars structure. + * @this_ts: Current time stamp 64bit. + * @diff_ts: Difference between 2 timestamps 32bit. + * @found: Pointer to found. If radar found or not. + */ +static inline void dfs_check_each_b5radar( + struct wlan_dfs *dfs, + struct dfs_event *re, + struct dfs_bin5radars *br, + uint64_t this_ts, + uint32_t diff_ts, + int *found) +{ + if (dfs_bin5_check_pulse(dfs, re, br)) { + /* + * This is a valid Bin5 pulse, check if it belongs to a + * burst. + */ + (*re).re_dur = dfs_retain_bin5_burst_pattern(dfs, diff_ts, + (*re).re_dur); + /* + * Remember our computed duration for the next pulse in the + * burst (if needed). + */ + dfs->dfs_rinfo.dfs_bin5_chirp_ts = this_ts; + dfs->dfs_rinfo.dfs_last_bin5_dur = (*re).re_dur; + + if (dfs_bin5_addpulse(dfs, br, re, this_ts)) + *found |= dfs_bin5_check(dfs); + } else { + dfs_debug(dfs, WLAN_DEBUG_DFS_BIN5_PULSE, + "not a BIN5 pulse (dur=%d)", (*re).re_dur); + } +} + +/** + * dfs_check_if_bin5() - Check if bin5 radar is found + * @dfs: Pointer to wlan_dfs structure. + * @re: Pointer to re(radar event). + * @this_ts: Current time stamp 64bit. + * @diff_ts: Difference between 2 timestamps 32bit. + * @found: Pointer to found. If radar found or not. + */ +static inline void dfs_check_if_bin5( + struct wlan_dfs *dfs, + struct dfs_event *re, + uint64_t this_ts, + uint32_t diff_ts, + int *found) +{ + int p; + + /* BIN5 pulses are FCC and Japan specific. */ + if ((dfs->dfsdomain == DFS_FCC_DOMAIN) || + (dfs->dfsdomain == DFS_MKK4_DOMAIN)) { + for (p = 0; (p < dfs->dfs_rinfo.rn_numbin5radars) && (!*found); + p++) { + struct dfs_bin5radars *br; + + br = &(dfs->dfs_b5radars[p]); + dfs_check_each_b5radar(dfs, re, br, this_ts, diff_ts, + found); + } + } + + if (*found) + dfs_debug(dfs, WLAN_DEBUG_DFS, "Found bin5 radar"); +} + +/** + * dfs_skip_the_event() - Skip the Radar event + * @dfs: Pointer to wlan_dfs structure. + * @re: Pointer to re(radar event). + * @rs: Pointer to dfs_state. + */ +static inline bool dfs_skip_the_event( + struct wlan_dfs *dfs, + struct dfs_event *re, + struct dfs_state **rs) +{ + if ((*re).re_chanindex < DFS_NUM_RADAR_STATES) + (*rs) = &dfs->dfs_radar[(*re).re_chanindex]; + else + return 1; + + if ((*rs)->rs_chan.dfs_ch_flagext & CHANNEL_INTERFERENCE) + return 1; + + return 0; +} + +/** + * dfs_check_ts_wrap() - dfs check for timestamp wrap. + * @dfs: Pointer to wlan_dfs structure. + * @re: Pointer to re(radar event). + * @deltafull_ts: Deltafull ts. + * + * Return: Deltafull ts. + */ +static inline uint64_t dfs_check_ts_wrap( + struct wlan_dfs *dfs, + struct dfs_event *re, + uint64_t deltafull_ts) +{ + if (deltafull_ts > + ((uint64_t)((DFS_TSMASK - + dfs->dfs_rinfo.rn_last_ts) + + 1 + (*re).re_ts))) + deltafull_ts -= + (DFS_TSMASK - dfs->dfs_rinfo.rn_last_ts) + + 1 + (*re).re_ts; + + return deltafull_ts; +} + +/** + * dfs_calculate_ts_prefix() - Calculate deltafull ts value. + * @dfs: Pointer to wlan_dfs structure. + * @re: Pointer to re(radar event). + */ +static inline void dfs_calculate_ts_prefix( + struct wlan_dfs *dfs, + struct dfs_event *re) +{ + uint64_t deltafull_ts; + + if ((*re).re_ts <= dfs->dfs_rinfo.rn_last_ts) { + dfs->dfs_rinfo.rn_ts_prefix += (((uint64_t) 1) << DFS_TSSHIFT); + /* Now, see if it's been more than 1 wrap */ + deltafull_ts = (*re).re_full_ts - dfs->dfs_rinfo.rn_lastfull_ts; + deltafull_ts = dfs_check_ts_wrap(dfs, re, deltafull_ts); + deltafull_ts >>= DFS_TSSHIFT; + + if (deltafull_ts > 1) + dfs->dfs_rinfo.rn_ts_prefix += + ((deltafull_ts - 1) << DFS_TSSHIFT); + } else { + deltafull_ts = (*re).re_full_ts - + dfs->dfs_rinfo.rn_lastfull_ts; + if (deltafull_ts > (uint64_t) DFS_TSMASK) { + deltafull_ts >>= DFS_TSSHIFT; + dfs->dfs_rinfo.rn_ts_prefix += + ((deltafull_ts - 1) << DFS_TSSHIFT); + } + } +} + +/** + * dfs_calculate_timestamps() - Calculate various timestamps + * @dfs: Pointer to wlan_dfs structure. + * @re: Pointer to re(radar event) + * @this_ts : Pointer to this_ts (this timestamp) + */ + +static inline void dfs_calculate_timestamps( + struct wlan_dfs *dfs, + struct dfs_event *re, + uint64_t *this_ts) +{ + if (dfs->dfs_rinfo.rn_lastfull_ts == 0) { + /* + * Either not started, or 64-bit rollover exactly to + * zero Just prepend zeros to the 15-bit ts. + */ + dfs->dfs_rinfo.rn_ts_prefix = 0; + } else { + /* WAR 23031- patch duplicate ts on very short pulses. + * This pacth has two problems in linux environment. + * 1)The time stamp created and hence PRI depends + * entirely on the latency. If the latency is high, it + * possibly can split two consecutive pulses in the + * same burst so far away (the same amount of latency) + * that make them look like they are from differenct + * bursts. It is observed to happen too often. It sure + * makes the detection fail. + * 2)Even if the latency is not that bad, it simply + * shifts the duplicate timestamps to a new duplicate + * timestamp based on how they are processed. + * This is not worse but not good either. + * Take this pulse as a good one and create a probable + * PRI later. + */ + if ((*re).re_dur == 0 && (*re).re_ts == + dfs->dfs_rinfo.rn_last_unique_ts) { + debug_dup[debug_dup_cnt++] = '1'; + dfs_debug(dfs, WLAN_DEBUG_DFS1, "deltaT is 0"); + } else { + dfs->dfs_rinfo.rn_last_unique_ts = (*re).re_ts; + debug_dup[debug_dup_cnt++] = '0'; + } + + if (debug_dup_cnt >= 32) + debug_dup_cnt = 0; + + dfs_calculate_ts_prefix(dfs, re); + } + + /* + * At this stage rn_ts_prefix has either been blanked or + * calculated, so it's safe to use. + */ + *this_ts = dfs->dfs_rinfo.rn_ts_prefix | ((uint64_t) (*re).re_ts); + dfs->dfs_rinfo.rn_lastfull_ts = (*re).re_full_ts; + dfs->dfs_rinfo.rn_last_ts = (*re).re_ts; +} + +/** + * dfs_add_to_pulseline - Extract necessary items from dfs_event and + * add it as pulse in the pulseline + * @dfs: Pointer to wlan_dfs structure. + * @re: Pointer to re(radar event) + * @this_ts: Pointer to this_ts (this timestamp) + * @diff_ts: Diff ts. + * @index: Pointer to get index value. + */ +static inline void dfs_add_to_pulseline( + struct wlan_dfs *dfs, + struct dfs_event *re, + uint64_t *this_ts, + uint32_t *test_ts, + uint32_t *diff_ts, + uint32_t *index) +{ + struct dfs_pulseline *pl; + + /* + * Calculate the start of the radar pulse. + * + * The TSF is stamped by the MAC upon reception of the event, + * which is (typically?) at the end of the event. But the + * pattern matching code expects the event timestamps to be at + * the start of the event. So to fake it, we subtract the pulse + * duration from the given TSF. This is done after the 64-bit + * timestamp has been calculated so long pulses correctly + * under-wrap the counter. Ie, if this was done on the 32 + * (or 15!) bit TSF when the TSF value is closed to 0, it will + * underflow to 0xfffffXX, which would mess up the logical "OR" + * operation done above. + * This isn't valid for Peregrine as the hardware gives us the + * actual TSF offset of the radar event, not just the MAC TSF + * of the completed receive. + * + * XXX TODO: ensure that the TLV PHY error processing code will + * correctly calculate the TSF to be the start of the radar + * pulse. + * + * XXX TODO TODO: modify the TLV parsing code to subtract the + * duration from the TSF, based on the current fast clock value. + */ + if ((!dfs->dfs_caps.wlan_chip_is_bb_tlv) && (*re).re_dur != 1) + *this_ts -= (*re).re_dur; + + pl = dfs->pulses; + /* Save the pulse parameters in the pulse buffer(pulse line). */ + *index = (pl->pl_lastelem + 1) & DFS_MAX_PULSE_BUFFER_MASK; + + if (pl->pl_numelems == DFS_MAX_PULSE_BUFFER_SIZE) + pl->pl_firstelem = (pl->pl_firstelem+1) & + DFS_MAX_PULSE_BUFFER_MASK; + else + pl->pl_numelems++; + + pl->pl_lastelem = *index; + pl->pl_elems[*index].p_time = *this_ts; + pl->pl_elems[*index].p_dur = (*re).re_dur; + pl->pl_elems[*index].p_rssi = (*re).re_rssi; + pl->pl_elems[*index].p_sidx = (*re).re_sidx; + pl->pl_elems[*index].p_delta_peak = (*re).re_delta_peak; + pl->pl_elems[*index].p_psidx_diff = (*re).re_psidx_diff; + *diff_ts = (uint32_t)*this_ts - *test_ts; + *test_ts = (uint32_t)*this_ts; + + dfs_debug(dfs, WLAN_DEBUG_DFS1, + "ts%u %u %u diff %u pl->pl_lastelem.p_time=%llu", + (uint32_t)*this_ts, (*re).re_dur, + (*re).re_rssi, *diff_ts, + (uint64_t)pl->pl_elems[*index].p_time); +} + +/** + * dfs_conditional_clear_delaylines - Clear delay lines to remove the + * false pulses. + * @dfs: Pointer to wlan_dfs structure. + * @diff_ts: diff between timerstamps. + * @this_ts: this timestamp value. + * @re: Pointer to dfs_event structure. + */ +static inline void dfs_conditional_clear_delaylines( + struct wlan_dfs *dfs, + uint32_t diff_ts, + uint64_t this_ts, + struct dfs_event re) +{ + struct dfs_pulseline *pl = dfs->pulses; + uint32_t index; + + /* If diff_ts is very small, we might be getting false pulse + * detects due to heavy interference. We might be getting + * spectral splatter from adjacent channel. In order to prevent + * false alarms we clear the delay-lines. This might impact + * positive detections under harsh environments, but helps with + * false detects. + */ + + if (diff_ts < DFS_INVALID_PRI_LIMIT) { + dfs->dfs_seq_num = 0; + dfs_reset_alldelaylines(dfs); + dfs_reset_radarq(dfs); + + index = (pl->pl_lastelem + 1) & DFS_MAX_PULSE_BUFFER_MASK; + if (pl->pl_numelems == DFS_MAX_PULSE_BUFFER_SIZE) + pl->pl_firstelem = (pl->pl_firstelem+1) & + DFS_MAX_PULSE_BUFFER_MASK; + else + pl->pl_numelems++; + + pl->pl_lastelem = index; + pl->pl_elems[index].p_time = this_ts; + pl->pl_elems[index].p_dur = re.re_dur; + pl->pl_elems[index].p_rssi = re.re_rssi; + pl->pl_elems[index].p_sidx = re.re_sidx; + pl->pl_elems[index].p_delta_peak = re.re_delta_peak; + pl->pl_elems[index].p_psidx_diff = re.re_psidx_diff; + dfs->dfs_seq_num++; + pl->pl_elems[index].p_seq_num = dfs->dfs_seq_num; + } +} + +/** + * dfs_process_each_radarevent - remove each event from the dfs radar queue + * and process it. + * @dfs: Pointer to wlan_dfs structure. + * @chan: Pointer to DFS current channel. + * @rs: Pointer to dfs_state structure. + * @seg_id: segment id. + * @retval: pointer to retval. + * @false_radar_found: pointer to false radar found. + * + * Return: If radar found then return 1 else return 0. + */ +static inline int dfs_process_each_radarevent( + struct wlan_dfs *dfs, + struct dfs_channel *chan, + struct dfs_state **rs, + uint8_t *seg_id, + int *retval, + int *false_radar_found) +{ + struct dfs_event re, *event; + int found, empty; + int events_processed = 0; + uint64_t this_ts; + static uint32_t test_ts; + static uint32_t diff_ts; + uint32_t index; + + dfs_is_radarq_empty(dfs, &empty); + + while ((!empty) && (!*retval) && !(*false_radar_found) && + (events_processed < MAX_EVENTS)) { + dfs_remove_event_from_radarq(dfs, &event); + if (!event) { + empty = 1; + break; + } + events_processed++; + re = *event; + + dfs_return_event_to_eventq(dfs, event); + + *seg_id = re.re_seg_id; + found = 0; + if (dfs_skip_the_event(dfs, &re, rs)) { + dfs_is_radarq_empty(dfs, &empty); + continue; + } + + dfs_calculate_timestamps(dfs, &re, &this_ts); + + re.re_dur = dfs_process_pulse_dur(dfs, re.re_dur); + + dfs_add_to_pulseline(dfs, &re, &this_ts, &test_ts, &diff_ts, + &index); + + dfs_log_event(dfs, &re, this_ts, diff_ts, index); + + dfs_conditional_clear_delaylines(dfs, diff_ts, this_ts, re); + + found = 0; + dfs_check_if_bin5(dfs, &re, this_ts, diff_ts, &found); + if (found) { + *retval |= found; + return 1; + } + + dfs_check_if_nonbin5(dfs, &re, rs, this_ts, diff_ts, &found, + retval, false_radar_found); + + dfs_is_radarq_empty(dfs, &empty); + } + + return 0; +} + +/** + * dfs_false_radarfound_reset_vars () - Reset dfs variables after false radar + * found. + * @dfs: Pointer to wlan_dfs structure. + */ +static inline void dfs_false_radarfound_reset_vars( + struct wlan_dfs *dfs) +{ + dfs->dfs_seq_num = 0; + dfs_reset_radarq(dfs); + dfs_reset_alldelaylines(dfs); + dfs->dfs_phyerr_freq_min = 0x7fffffff; + dfs->dfs_phyerr_freq_max = 0; + dfs->dfs_phyerr_w53_counter = 0; +} + +void dfs_radarfound_action_generic(struct wlan_dfs *dfs, uint8_t seg_id) +{ + struct radar_found_info *radar_found; + + radar_found = qdf_mem_malloc(sizeof(*radar_found)); + if (!radar_found) { + dfs_alert(dfs, WLAN_DEBUG_DFS_ALWAYS, + "radar_found allocation failed"); + return; + } + + qdf_mem_zero(radar_found, sizeof(*radar_found)); + radar_found->segment_id = seg_id; + radar_found->pdev_id = + wlan_objmgr_pdev_get_pdev_id(dfs->dfs_pdev_obj); + + dfs_process_radar_ind(dfs, radar_found); + qdf_mem_free(radar_found); +} + +void dfs_radar_found_action(struct wlan_dfs *dfs, + bool bangradar, + uint8_t seg_id) +{ + /* If Host DFS confirmation is supported, save the curchan as + * radar found chan, send radar found indication along with + * average radar parameters to FW and start the host status + * wait timer. + */ + if (!bangradar && + (utils_get_dfsdomain(dfs->dfs_pdev_obj) == DFS_FCC_DOMAIN) && + lmac_is_host_dfs_check_support_enabled(dfs->dfs_pdev_obj)) { + dfs_radarfound_action_fcc(dfs, seg_id); + } else { + dfs_radarfound_action_generic(dfs, seg_id); + } +} + +void dfs_process_radarevent( + struct wlan_dfs *dfs, + struct dfs_channel *chan) +{ + struct dfs_state *rs = NULL; + uint8_t seg_id = 0; + int retval = 0; + int false_radar_found = 0; + bool bangradar = false; + + if (!dfs_radarevent_basic_sanity(dfs, chan)) + return; + /* + * TEST : Simulate radar bang, make sure we add the channel to NOL + * (bug 29968) + */ + if (dfs_handle_bangradar(dfs, chan, &rs, &seg_id, &retval)) { + if (retval) + bangradar = true; + goto dfsfound; + } + + if (!dfs_handle_missing_pulses(dfs, chan)) + return; + + dfs_process_each_radarevent(dfs, chan, &rs, &seg_id, &retval, + &false_radar_found); + +dfsfound: + if (retval) { + dfs_radarfound_reset_vars(dfs, rs, chan, seg_id); + dfs_radar_found_action(dfs, bangradar, seg_id); + } + + if (false_radar_found) + dfs_false_radarfound_reset_vars(dfs); +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/filtering/dfs_radar.c b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/filtering/dfs_radar.c new file mode 100644 index 0000000000000000000000000000000000000000..d128dfe70945812e20702a99165a21881785462a --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/filtering/dfs_radar.c @@ -0,0 +1,320 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * Copyright (c) 2011, Atheros Communications Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "../dfs.h" +#include "../dfs_zero_cac.h" +#include "wlan_dfs_mlme_api.h" +#include "wlan_dfs_lmac_api.h" +#include "../dfs_partial_offload_radar.h" +#include "../dfs_direct_attach_radar.h" +#include "../dfs_internal.h" + +void dfs_get_radars(struct wlan_dfs *dfs) +{ + struct wlan_objmgr_psoc *psoc; + + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs is NULL"); + return; + } + + psoc = wlan_pdev_get_psoc(dfs->dfs_pdev_obj); + if (!psoc) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "psoc is NULL"); + return; + } + + if (wlan_objmgr_psoc_get_dev_type(psoc) == WLAN_DEV_OL) { + /* For Partial offload */ + dfs_get_po_radars(dfs); + } else { + /* For Direct Attach (DA) */ + dfs_get_da_radars(dfs); + } +} + +int dfs_radar_disable(struct wlan_dfs *dfs) +{ + dfs->dfs_proc_phyerr &= ~DFS_AR_EN; + dfs->dfs_proc_phyerr &= ~DFS_RADAR_EN; + + return 0; +} + +void dfs_phyerr_param_copy(struct wlan_dfs_phyerr_param *dst, + struct wlan_dfs_phyerr_param *src) +{ + qdf_mem_copy(dst, src, sizeof(*dst)); +} + +struct dfs_state *dfs_getchanstate(struct wlan_dfs *dfs, uint8_t *index, + int ext_chan_flag) +{ + struct dfs_state *rs = NULL; + struct dfs_channel *cmp_ch, cmp_ch1; + int i; + QDF_STATUS err; + + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs is NULL"); + return NULL; + } + cmp_ch = &cmp_ch1; + if (ext_chan_flag) { + err = dfs_mlme_get_extchan(dfs->dfs_pdev_obj, + &(cmp_ch->dfs_ch_freq), + &(cmp_ch->dfs_ch_flags), + &(cmp_ch->dfs_ch_flagext), + &(cmp_ch->dfs_ch_ieee), + &(cmp_ch->dfs_ch_vhtop_ch_freq_seg1), + &(cmp_ch->dfs_ch_vhtop_ch_freq_seg2)); + + if (err == QDF_STATUS_SUCCESS) { + dfs_debug(dfs, WLAN_DEBUG_DFS2, + "Extension channel freq = %u flags=0x%x", + cmp_ch->dfs_ch_freq, + cmp_ch->dfs_ch_flagext); + } else + return NULL; + } else { + cmp_ch = dfs->dfs_curchan; + dfs_debug(dfs, WLAN_DEBUG_DFS2, + "Primary channel freq = %u flags=0x%x", + cmp_ch->dfs_ch_freq, cmp_ch->dfs_ch_flagext); + } + + for (i = 0; i < DFS_NUM_RADAR_STATES; i++) { + if ((dfs->dfs_radar[i].rs_chan.dfs_ch_freq == + cmp_ch->dfs_ch_freq) && + (dfs->dfs_radar[i].rs_chan.dfs_ch_flags == + cmp_ch->dfs_ch_flags) + ) { + if (index != NULL) + *index = (uint8_t)i; + return &(dfs->dfs_radar[i]); + } + } + /* No existing channel found, look for first free channel state entry.*/ + for (i = 0; i < DFS_NUM_RADAR_STATES; i++) { + if (dfs->dfs_radar[i].rs_chan.dfs_ch_freq == 0) { + rs = &(dfs->dfs_radar[i]); + /* Found one, set channel info and default thresholds.*/ + rs->rs_chan = *cmp_ch; + + /* Copy the parameters from the default set. */ + dfs_phyerr_param_copy(&rs->rs_param, + &dfs->dfs_defaultparams); + + if (index != NULL) + *index = (uint8_t)i; + + return rs; + } + } + dfs_debug(dfs, WLAN_DEBUG_DFS2, "No more radar states left."); + + return NULL; +} + +void dfs_radar_enable(struct wlan_dfs *dfs, int no_cac, uint32_t opmode) +{ + int is_ext_ch; + int is_fastclk = 0; + struct dfs_channel *ext_ch, extchan; + QDF_STATUS err = QDF_STATUS_E_FAILURE; + + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs is NULL"); + return; + } + + is_ext_ch = WLAN_IS_CHAN_11N_HT40(dfs->dfs_curchan); + lmac_dfs_disable(dfs->dfs_pdev_obj, no_cac); + /* + * In all modes, if the primary is DFS then we have to + * enable radar detection. In HT80_80, we can have + * primary non-DFS 80MHz with extension 80MHz DFS. + */ + if ((WLAN_IS_CHAN_DFS(dfs->dfs_curchan) || + ((WLAN_IS_CHAN_11AC_VHT160(dfs->dfs_curchan) || + WLAN_IS_CHAN_11AC_VHT80_80(dfs->dfs_curchan)) + && + WLAN_IS_CHAN_DFS_CFREQ2(dfs->dfs_curchan))) || + (dfs_is_precac_timer_running(dfs))) { + struct dfs_state *rs_pri = NULL, *rs_ext = NULL; + uint8_t index_pri, index_ext; + + dfs->dfs_proc_phyerr |= DFS_AR_EN; + dfs->dfs_proc_phyerr |= DFS_RADAR_EN; + dfs->dfs_proc_phyerr |= DFS_SECOND_SEGMENT_RADAR_EN; + + ext_ch = &extchan; + if (is_ext_ch) + err = dfs_mlme_get_extchan(dfs->dfs_pdev_obj, + &(ext_ch->dfs_ch_freq), + &(ext_ch->dfs_ch_flags), + &(ext_ch->dfs_ch_flagext), + &(ext_ch->dfs_ch_ieee), + &(ext_ch->dfs_ch_vhtop_ch_freq_seg1), + &(ext_ch->dfs_ch_vhtop_ch_freq_seg2)); + + + dfs_reset_alldelaylines(dfs); + + rs_pri = dfs_getchanstate(dfs, &index_pri, 0); + if (err == QDF_STATUS_SUCCESS) + rs_ext = dfs_getchanstate(dfs, &index_ext, 1); + + if (rs_pri != NULL && ((err == QDF_STATUS_E_FAILURE) || + (rs_ext != NULL))) { + struct wlan_dfs_phyerr_param pe; + + qdf_mem_set(&pe, sizeof(pe), '\0'); + + if (index_pri != dfs->dfs_curchan_radindex) + dfs_reset_alldelaylines(dfs); + + dfs->dfs_curchan_radindex = (int16_t)index_pri; + + if (rs_ext) + dfs->dfs_extchan_radindex = (int16_t)index_ext; + + dfs_phyerr_param_copy(&pe, &rs_pri->rs_param); + dfs_debug(dfs, WLAN_DEBUG_DFS3, + "firpwr=%d, rssi=%d, height=%d, prssi=%d, inband=%d, relpwr=%d, relstep=%d, maxlen=%d", + pe.pe_firpwr, + pe.pe_rrssi, pe.pe_height, + pe.pe_prssi, pe.pe_inband, + pe.pe_relpwr, pe.pe_relstep, + pe.pe_maxlen); + + lmac_dfs_enable(dfs->dfs_pdev_obj, &is_fastclk, + &pe, dfs->dfsdomain); + dfs_debug(dfs, WLAN_DEBUG_DFS, + "Enabled radar detection on channel %d", + dfs->dfs_curchan->dfs_ch_freq); + + dfs->dur_multiplier = is_fastclk ? + DFS_FAST_CLOCK_MULTIPLIER : + DFS_NO_FAST_CLOCK_MULTIPLIER; + + dfs_debug(dfs, WLAN_DEBUG_DFS3, + "duration multiplier is %d", + dfs->dur_multiplier); + } else + dfs_debug(dfs, WLAN_DEBUG_DFS, + "No more radar states left"); + } +} + +int dfs_set_thresholds(struct wlan_dfs *dfs, const uint32_t threshtype, + const uint32_t value) +{ + int16_t chanindex; + struct dfs_state *rs; + struct wlan_dfs_phyerr_param pe; + int is_fastclk = 0; + + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs is NULL"); + return 0; + } + + chanindex = dfs->dfs_curchan_radindex; + if ((chanindex < 0) || (chanindex >= DFS_NUM_RADAR_STATES)) { + dfs_debug(dfs, WLAN_DEBUG_DFS1, + "%s: chanindex = %d, DFS_NUM_RADAR_STATES=%d\n", + __func__, + chanindex, + DFS_NUM_RADAR_STATES); + return 0; + } + + dfs_debug(dfs, WLAN_DEBUG_DFS, + "threshtype=%d, value=%d", threshtype, value); + + wlan_dfs_phyerr_init_noval(&pe); + + rs = &(dfs->dfs_radar[chanindex]); + switch (threshtype) { + case DFS_PARAM_FIRPWR: + rs->rs_param.pe_firpwr = (int32_t) value; + pe.pe_firpwr = value; + break; + case DFS_PARAM_RRSSI: + rs->rs_param.pe_rrssi = value; + pe.pe_rrssi = value; + break; + case DFS_PARAM_HEIGHT: + rs->rs_param.pe_height = value; + pe.pe_height = value; + break; + case DFS_PARAM_PRSSI: + rs->rs_param.pe_prssi = value; + pe.pe_prssi = value; + break; + case DFS_PARAM_INBAND: + rs->rs_param.pe_inband = value; + pe.pe_inband = value; + break; + /* 5413 specific */ + case DFS_PARAM_RELPWR: + rs->rs_param.pe_relpwr = value; + pe.pe_relpwr = value; + break; + case DFS_PARAM_RELSTEP: + rs->rs_param.pe_relstep = value; + pe.pe_relstep = value; + break; + case DFS_PARAM_MAXLEN: + rs->rs_param.pe_maxlen = value; + pe.pe_maxlen = value; + break; + default: + dfs_debug(dfs, WLAN_DEBUG_DFS1, + "unknown threshtype (%d)", threshtype); + break; + } + + + /* + * The driver layer dfs_enable routine is tasked with translating + * values from the global format to the per-device (HAL, offload) + * format. + */ + lmac_dfs_enable(dfs->dfs_pdev_obj, &is_fastclk, + &pe, dfs->dfsdomain); + + return 1; +} + +int dfs_get_thresholds(struct wlan_dfs *dfs, + struct wlan_dfs_phyerr_param *param) +{ + qdf_mem_zero(param, sizeof(*param)); + lmac_dfs_get_thresholds(dfs->dfs_pdev_obj, param); + + return 1; +} + +uint16_t dfs_chan2freq(struct dfs_channel *chan) +{ + if (!chan) + return 0; + + return chan == WLAN_CHAN_ANYC ? WLAN_CHAN_ANY : chan->dfs_ch_freq; +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/filtering/dfs_staggered.c b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/filtering/dfs_staggered.c new file mode 100644 index 0000000000000000000000000000000000000000..1be33310ddded60889d9a135a2225cdeea3153d1 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/filtering/dfs_staggered.c @@ -0,0 +1,266 @@ +/* + * Copyright (c) 2013, 2016-2017 The Linux Foundation. All rights reserved. + * Copyright (c) 2002-2010, Atheros Communications Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: ETSI 1.5.1 introduced new waveforms which use staggered PRIs within + * the same waveform. This file contains the detection implementation for + * these specific types of radars. This logic is different from the other + * detection because it must detect waveforms that may have 2 or more + * different PRIs (pulse repetition intervals). + */ + +#include "../dfs.h" + +/** + * dfs_is_pri_multiple() - Is PRI is multiple. + * @sample_pri: Sample PRI. + * @refpri: Reference PRI. + */ +static int dfs_is_pri_multiple(uint32_t sample_pri, uint32_t refpri) +{ +#define MAX_ALLOWED_MISSED 3 + int i; + + if (sample_pri < refpri || (!refpri)) + return 0; + + for (i = 1; i <= MAX_ALLOWED_MISSED; i++) { + if ((sample_pri%(i*refpri) <= 5)) + return 1; + } + + return 0; +#undef MAX_ALLOWED_MISSED +} + +/** + * dfs_is_unique_pri() - Check for the unique PRI. + * @highestpri: Highest PRI. + * @midpri: MID PRI. + * @lowestpri: Lowest PRI. + * @refpri: Reference PRI. + */ +static int dfs_is_unique_pri(uint32_t highestpri, uint32_t midpri, + uint32_t lowestpri, uint32_t refpri) +{ +#define DFS_STAGGERED_PRI_MARGIN_MIN 20 +#define DFS_STAGGERED_PRI_MARGIN_MAX 400 + if ((DFS_DIFF(lowestpri, refpri) >= DFS_STAGGERED_PRI_MARGIN_MIN) && + (DFS_DIFF(midpri, refpri) >= DFS_STAGGERED_PRI_MARGIN_MIN) && + (DFS_DIFF(highestpri, refpri) >= DFS_STAGGERED_PRI_MARGIN_MIN) + ) + return 1; + + if ((dfs_is_pri_multiple(refpri, highestpri)) || + (dfs_is_pri_multiple(refpri, lowestpri)) || + (dfs_is_pri_multiple(refpri, midpri))) + return 0; +#undef DFS_STAGGERED_PRI_MARGIN_MIN +#undef DFS_STAGGERED_PRI_MARGIN_MAX + + return 0; +} + +int dfs_staggered_check(struct wlan_dfs *dfs, struct dfs_filter *rf, + uint32_t deltaT, uint32_t width) +{ + uint32_t refpri, refdur, searchpri = 0, deltapri; + uint32_t n, i, primargin, durmargin; + int score[DFS_MAX_DL_SIZE], delayindex, dindex, found = 0; + struct dfs_delayline *dl; + uint32_t scoreindex, lowpriindex = 0, lowpri = 0xffff; + int higherthan, lowerthan, numscores; + int numpulseshigh = 0, numpulsesmid = 0, numpulsestemp = 0; + uint32_t lowestscore = 0, lowestscoreindex = 0, lowestpri = 0; + uint32_t midscore = 0, midscoreindex = 0, midpri = 0; + uint32_t highestscore = 0, highestscoreindex = 0, highestpri = 0; + + dl = &rf->rf_dl; + if (dl->dl_numelems < (rf->rf_threshold-1)) { + dfs_debug(dfs, WLAN_DEBUG_DFS2, + "numelems %d < threshold for filter %d", + dl->dl_numelems, + rf->rf_pulseid); + return 0; + } + if (deltaT > rf->rf_filterlen) { + dfs_debug(dfs, WLAN_DEBUG_DFS2, + "numelems %d < threshold for filter %d", + dl->dl_numelems, + rf->rf_pulseid); + return 0; + } + primargin = 6; + if (rf->rf_maxdur < 10) + durmargin = 4; + else + durmargin = 6; + + qdf_mem_zero(score, sizeof(int)*DFS_MAX_DL_SIZE); + /* Find out the lowest pri */ + for (n = 0; n < dl->dl_numelems; n++) { + delayindex = (dl->dl_firstelem + n) & DFS_MAX_DL_MASK; + refpri = dl->dl_elems[delayindex].de_time; + if (refpri == 0) { + continue; + } else if (refpri < lowpri) { + lowpri = dl->dl_elems[delayindex].de_time; + lowpriindex = n; + } + } + + /* Find out the each delay element's pri score */ + for (n = 0; n < dl->dl_numelems; n++) { + delayindex = (dl->dl_firstelem + n) & DFS_MAX_DL_MASK; + refpri = dl->dl_elems[delayindex].de_time; + if (refpri == 0) + continue; + + if ((refpri > rf->rf_maxpri) || (refpri < rf->rf_minpri)) { + score[n] = 0; + continue; + } + + for (i = 0; i < dl->dl_numelems; i++) { + dindex = (dl->dl_firstelem + i) & DFS_MAX_DL_MASK; + searchpri = dl->dl_elems[dindex].de_time; + deltapri = DFS_DIFF(searchpri, refpri); + if (deltapri < primargin) + score[n]++; + } + } + + for (n = 0; n < dl->dl_numelems; n++) { + delayindex = (dl->dl_firstelem + n) & DFS_MAX_DL_MASK; + refdur = dl->dl_elems[delayindex].de_time; + dfs_debug(dfs, WLAN_DEBUG_DFS2, + "score[%d]=%d pri=%d", + n, score[n], refdur); + } + + /* Find out the 2 or 3 highest scorers */ + scoreindex = 0; + highestscore = 0; + highestscoreindex = 0; + highestpri = 0; numscores = 0; lowestscore = 0; + + for (n = 0; n < dl->dl_numelems; n++) { + higherthan = 0; + lowerthan = 0; + delayindex = (dl->dl_firstelem + n) & DFS_MAX_DL_MASK; + refpri = dl->dl_elems[delayindex].de_time; + + if (!dfs_is_unique_pri(highestpri, + midpri, + lowestpri, + refpri)) + continue; + + if (score[n] >= highestscore) { + lowestscore = midscore; + lowestpri = midpri; + lowestscoreindex = midscoreindex; + midscore = highestscore; + midpri = highestpri; + midscoreindex = highestscoreindex; + highestscore = score[n]; + highestpri = refpri; + highestscoreindex = n; + } else if (score[n] >= midscore) { + lowestscore = midscore; + lowestpri = midpri; + lowestscoreindex = midscoreindex; + midscore = score[n]; + midpri = refpri; + midscoreindex = n; + } else if (score[n] >= lowestscore) { + lowestscore = score[n]; + lowestpri = refpri; + lowestscoreindex = n; + } + } + + if (midscore == 0) + return 0; + + dfs_debug(dfs, WLAN_DEBUG_DFS1, + "FINAL highestscore=%d highestscoreindex = %d highestpri = %d", + highestscore, highestscoreindex, highestpri); + + dfs_debug(dfs, WLAN_DEBUG_DFS1, + "FINAL lowestscore=%d lowestscoreindex=%d lowpri=%d", + lowestscore, lowestscoreindex, lowestpri); + + dfs_debug(dfs, WLAN_DEBUG_DFS1, + "FINAL midscore=%d midscoreindex=%d midpri=%d", + midscore, midscoreindex, midpri); + + delayindex = (dl->dl_firstelem + highestscoreindex) & DFS_MAX_DL_MASK; + refdur = dl->dl_elems[delayindex].de_dur; + refpri = dl->dl_elems[delayindex].de_time; + + dfs_debug(dfs, WLAN_DEBUG_DFS1, + "highscoreindex=%d refdur=%d refpri=%d", + highestscoreindex, refdur, refpri); + + numpulsestemp = dfs_bin_pri_check(dfs, rf, dl, highestscore, refpri, + refdur, 0, highestpri); + numpulseshigh = numpulsestemp; + numpulsestemp = dfs_bin_pri_check(dfs, rf, dl, highestscore, refpri, + refdur, 0, highestpri + midpri); + if (numpulsestemp > numpulseshigh) + numpulseshigh = numpulsestemp; + + numpulsestemp = dfs_bin_pri_check(dfs, rf, dl, highestscore, refpri, + refdur, 0, highestpri + midpri + lowestpri); + if (numpulsestemp > numpulseshigh) + numpulseshigh = numpulsestemp; + + delayindex = (dl->dl_firstelem + midscoreindex) & DFS_MAX_DL_MASK; + refdur = dl->dl_elems[delayindex].de_dur; + refpri = dl->dl_elems[delayindex].de_time; + dfs_debug(dfs, WLAN_DEBUG_DFS1, + "midscoreindex=%d refdur=%d refpri=%d", + midscoreindex, refdur, refpri); + + numpulsestemp = dfs_bin_pri_check(dfs, rf, dl, midscore, refpri, refdur, + 0, midpri); + numpulsesmid = numpulsestemp; + numpulsestemp = dfs_bin_pri_check(dfs, rf, dl, midscore, refpri, refdur, + 0, highestpri + midpri); + if (numpulsestemp > numpulsesmid) + numpulsesmid = numpulsestemp; + numpulsestemp = dfs_bin_pri_check(dfs, rf, dl, midscore, refpri, refdur, + 0, highestpri + midpri + lowestpri); + if (numpulsestemp > numpulsesmid) + numpulsesmid = numpulsestemp; + + dfs_debug(dfs, WLAN_DEBUG_DFS2, + "numpulseshigh=%d, numpulsesmid=%d", + numpulseshigh, numpulsesmid); + + if ((numpulseshigh >= rf->rf_threshold) && + (numpulsesmid >= rf->rf_threshold)) { + found = 1; + dfs_debug(dfs, WLAN_DEBUG_DFS2, + "MATCH filter=%u numpulseshigh=%u numpulsesmid= %u thresh=%u", + rf->rf_pulseid, numpulseshigh, + numpulsesmid, rf->rf_threshold); + } + + return found; +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/misc/dfs.c b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/misc/dfs.c new file mode 100644 index 0000000000000000000000000000000000000000..20a03cb0a6854940d661dbfb9b3c2d775e32d934 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/misc/dfs.c @@ -0,0 +1,633 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * Copyright (c) 2002-2006, Atheros Communications Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: This file contains the dfs_attach() and dfs_detach() functions as well + * as the dfs_control() function which is used to process ioctls related to DFS. + * For Linux/Mac, "radartool" is the command line tool that can be used to call + * various ioctls to set and get radar detection thresholds. + */ + +#include "../dfs_zero_cac.h" +#include "wlan_dfs_lmac_api.h" +#include "wlan_dfs_mlme_api.h" +#include "wlan_dfs_tgt_api.h" +#include "../dfs_internal.h" +#include "../dfs_filter_init.h" +#include "../dfs_full_offload.h" +#include "wlan_dfs_utils_api.h" +#include "../dfs_partial_offload_radar.h" + +#ifndef WLAN_DFS_STATIC_MEM_ALLOC +/* + * dfs_alloc_wlan_dfs() - allocate wlan_dfs buffer + * + * Return: buffer, null on failure. + */ +static inline struct wlan_dfs *dfs_alloc_wlan_dfs(void) +{ + return qdf_mem_malloc(sizeof(struct wlan_dfs)); +} + +/* + * dfs_free_wlan_dfs() - Free wlan_dfs buffer + * @dfs: wlan_dfs buffer pointer + * + * Return: None + */ +static inline void dfs_free_wlan_dfs(struct wlan_dfs *dfs) +{ + qdf_mem_free(dfs); +} + +/* + * dfs_alloc_dfs_curchan() - allocate dfs_channel buffer + * + * Return: buffer, null on failure. + */ +static inline struct dfs_channel *dfs_alloc_dfs_curchan(void) +{ + return qdf_mem_malloc(sizeof(struct dfs_channel)); +} + +/* + * dfs_free_dfs_curchan() - Free dfs_channel buffer + * @dfs_curchan: dfs_channel buffer pointer + * + * Return: None + */ +static inline void dfs_free_dfs_curchan(struct dfs_channel *dfs_curchan) +{ + qdf_mem_free(dfs_curchan); +} + +#else + +/* Static buffers for DFS objects */ +static struct wlan_dfs global_dfs; +static struct dfs_channel global_dfs_curchan; + +static inline struct wlan_dfs *dfs_alloc_wlan_dfs(void) +{ + return &global_dfs; +} + +static inline void dfs_free_wlan_dfs(struct wlan_dfs *dfs) +{ +} + +static inline struct dfs_channel *dfs_alloc_dfs_curchan(void) +{ + return &global_dfs_curchan; +} + +static inline void dfs_free_dfs_curchan(struct dfs_channel *dfs_curchan) +{ +} +#endif + +/** + * dfs_testtimer_task() - Sends CSA in the current channel. + * + * When the user sets usenol to 0 and inject the RADAR, AP does not mark the + * channel as RADAR and does not add the channel to NOL. It sends the CSA in + * the current channel. + */ +static os_timer_func(dfs_testtimer_task) +{ + struct wlan_dfs *dfs = NULL; + + OS_GET_TIMER_ARG(dfs, struct wlan_dfs *); + dfs->wlan_dfstest = 0; + + /* + * Flip the channel back to the original channel. + * Make sure this is done properly with a CSA. + */ + dfs_info(dfs, WLAN_DEBUG_DFS_ALWAYS, "go back to channel %d", + dfs->wlan_dfstest_ieeechan); + dfs_mlme_start_csa(dfs->dfs_pdev_obj, + dfs->wlan_dfstest_ieeechan, + dfs->dfs_curchan->dfs_ch_freq, + dfs->dfs_curchan->dfs_ch_vhtop_ch_freq_seg2, + dfs->dfs_curchan->dfs_ch_flags); +} + +int dfs_get_debug_info(struct wlan_dfs *dfs, void *data) +{ + if (data) + *(uint32_t *)data = dfs->dfs_proc_phyerr; + + return (int)dfs->dfs_proc_phyerr; +} + +void dfs_main_task_testtimer_init(struct wlan_dfs *dfs) +{ + qdf_timer_init(NULL, + &(dfs->wlan_dfstesttimer), + dfs_testtimer_task, (void *)dfs, + QDF_TIMER_TYPE_WAKE_APPS); +} + +int dfs_create_object(struct wlan_dfs **dfs) +{ + *dfs = dfs_alloc_wlan_dfs(); + if (!(*dfs)) { + dfs_alert(NULL, WLAN_DEBUG_DFS_ALWAYS, + "wlan_dfs allocation failed"); + return 1; + } + + qdf_mem_zero(*dfs, sizeof(**dfs)); + + (*dfs)->dfs_curchan = dfs_alloc_dfs_curchan(); + if (!((*dfs)->dfs_curchan)) { + dfs_free_wlan_dfs(*dfs); + dfs_alert(*dfs, WLAN_DEBUG_DFS_ALWAYS, + "dfs_curchan allocation failed"); + return 1; + } + + return 0; +} + +int dfs_attach(struct wlan_dfs *dfs) +{ + int ret; + + if (!dfs->dfs_is_offload_enabled) { + ret = dfs_main_attach(dfs); + + /* + * For full offload we have a wmi handler registered to process + * a radar event from firmware in the event of a radar detect. + * So, init of timer, dfs_task is not required for + * full-offload. dfs_task timer is called in + * dfs_main_timer_init within dfs_main_attach for + * partial-offload in the event of radar detect. + */ + if (ret) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs_main_attach failed"); + return ret; + } + } + dfs_cac_attach(dfs); + dfs_zero_cac_attach(dfs); + dfs_nol_attach(dfs); + + /* + * Init of timer ,dfs_testtimer_task is required by both partial + * and full offload, indicating test mode timer initialization for both. + */ + dfs_main_task_testtimer_init(dfs); + return 0; +} + +void dfs_stop(struct wlan_dfs *dfs) +{ + dfs_nol_timer_cleanup(dfs); + dfs_nol_workqueue_cleanup(dfs); + dfs_clear_nolhistory(dfs); +} + +void dfs_task_testtimer_reset(struct wlan_dfs *dfs) +{ + if (dfs->wlan_dfstest) { + qdf_timer_sync_cancel(&dfs->wlan_dfstesttimer); + dfs->wlan_dfstest = 0; + } +} + +void dfs_task_testtimer_free(struct wlan_dfs *dfs) +{ + qdf_timer_free(&dfs->wlan_dfstesttimer); + dfs->wlan_dfstest = 0; +} + +void dfs_reset(struct wlan_dfs *dfs) +{ + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs is NULL"); + return; + } + + dfs_cac_timer_reset(dfs); + dfs_zero_cac_reset(dfs); + if (!dfs->dfs_is_offload_enabled) { + dfs_main_timer_reset(dfs); + dfs_host_wait_timer_reset(dfs); + dfs->dfs_event_log_count = 0; + } + dfs_task_testtimer_reset(dfs); +} + +void dfs_timer_free(struct wlan_dfs *dfs) +{ + dfs_cac_timer_free(dfs); + dfs_zero_cac_timer_free(dfs); + + if (!dfs->dfs_is_offload_enabled) { + dfs_main_timer_free(dfs); + dfs_host_wait_timer_free(dfs); + } + + dfs_task_testtimer_free(dfs); + dfs_nol_timer_free(dfs); +} + +void dfs_detach(struct wlan_dfs *dfs) +{ + dfs_timer_free(dfs); + if (!dfs->dfs_is_offload_enabled) + dfs_main_detach(dfs); + dfs_zero_cac_detach(dfs); + dfs_nol_detach(dfs); +} + +#ifndef WLAN_DFS_STATIC_MEM_ALLOC +void dfs_destroy_object(struct wlan_dfs *dfs) +{ + dfs_free_dfs_curchan(dfs->dfs_curchan); + dfs_free_wlan_dfs(dfs); +} +#else +void dfs_destroy_object(struct wlan_dfs *dfs) +{ +} +#endif + +int dfs_control(struct wlan_dfs *dfs, + u_int id, + void *indata, + uint32_t insize, + void *outdata, + uint32_t *outsize) +{ + struct wlan_dfs_phyerr_param peout; + struct dfs_ioctl_params *dfsparams; + int error = 0; +#ifdef WLAN_DEBUG + uint32_t val = 0; +#endif + struct dfsreq_nolinfo *nol; + uint32_t *data = NULL; + int i; + struct dfs_emulate_bang_radar_test_cmd dfs_unit_test; + + qdf_mem_zero(&dfs_unit_test, sizeof(dfs_unit_test)); + + if (!dfs) { + dfs_err(NULL, WLAN_DEBUG_DFS_ALWAYS, "dfs is NULL"); + goto bad; + } + + switch (id) { + case DFS_SET_THRESH: + if (insize < sizeof(struct dfs_ioctl_params) || !indata) { + dfs_debug(dfs, WLAN_DEBUG_DFS1, + "insize = %d, expected = %zu bytes, indata = %pK", + insize, + sizeof(struct dfs_ioctl_params), + indata); + error = -EINVAL; + break; + } + dfsparams = (struct dfs_ioctl_params *)indata; + if (!dfs_set_thresholds(dfs, DFS_PARAM_FIRPWR, + dfsparams->dfs_firpwr)) + error = -EINVAL; + if (!dfs_set_thresholds(dfs, DFS_PARAM_RRSSI, + dfsparams->dfs_rrssi)) + error = -EINVAL; + if (!dfs_set_thresholds(dfs, DFS_PARAM_HEIGHT, + dfsparams->dfs_height)) + error = -EINVAL; + if (!dfs_set_thresholds(dfs, DFS_PARAM_PRSSI, + dfsparams->dfs_prssi)) + error = -EINVAL; + if (!dfs_set_thresholds(dfs, DFS_PARAM_INBAND, + dfsparams->dfs_inband)) + error = -EINVAL; + + /* 5413 speicfic. */ + if (!dfs_set_thresholds(dfs, DFS_PARAM_RELPWR, + dfsparams->dfs_relpwr)) + error = -EINVAL; + if (!dfs_set_thresholds(dfs, DFS_PARAM_RELSTEP, + dfsparams->dfs_relstep)) + error = -EINVAL; + if (!dfs_set_thresholds(dfs, DFS_PARAM_MAXLEN, + dfsparams->dfs_maxlen)) + error = -EINVAL; + break; + case DFS_GET_THRESH: + if (!outdata || !outsize || + *outsize < sizeof(struct dfs_ioctl_params)) { + error = -EINVAL; + break; + } + *outsize = sizeof(struct dfs_ioctl_params); + dfsparams = (struct dfs_ioctl_params *) outdata; + + /* Fetch the DFS thresholds using the internal representation */ + (void) dfs_get_thresholds(dfs, &peout); + + /* Convert them to the dfs IOCTL representation. */ + wlan_dfs_dfsparam_to_ioctlparam(&peout, dfsparams); + break; + case DFS_RADARDETECTS: + if (!outdata || !outsize || *outsize < sizeof(uint32_t)) { + error = -EINVAL; + break; + } + *outsize = sizeof(uint32_t); + *((uint32_t *)outdata) = dfs->wlan_dfs_stats.num_radar_detects; + break; + case DFS_DISABLE_DETECT: + dfs->dfs_proc_phyerr &= ~DFS_RADAR_EN; + dfs->dfs_proc_phyerr &= ~DFS_SECOND_SEGMENT_RADAR_EN; + dfs->dfs_ignore_dfs = 1; + dfs_info(dfs, WLAN_DEBUG_DFS_ALWAYS, + "enable detects, ignore_dfs %d", + dfs->dfs_ignore_dfs ? 1:0); + break; + case DFS_ENABLE_DETECT: + dfs->dfs_proc_phyerr |= DFS_RADAR_EN; + dfs->dfs_proc_phyerr |= DFS_SECOND_SEGMENT_RADAR_EN; + dfs->dfs_ignore_dfs = 0; + dfs_info(dfs, WLAN_DEBUG_DFS_ALWAYS + , "enable detects, ignore_dfs %d", + dfs->dfs_ignore_dfs ? 1:0); + break; + case DFS_DISABLE_FFT: + dfs_info(dfs, WLAN_DEBUG_DFS_ALWAYS, + "TODO disable FFT val=0x%x", val); + break; + case DFS_ENABLE_FFT: + dfs_info(dfs, WLAN_DEBUG_DFS_ALWAYS, + "TODO enable FFT val=0x%x", val); + break; + case DFS_SET_DEBUG_LEVEL: + if (insize < sizeof(uint32_t) || !indata) { + error = -EINVAL; + break; + } + dfs->dfs_debug_mask = *(uint32_t *)indata; + + /* Do not allow user to set the ALWAYS/MAX bit. + * It will be used internally by dfs print macro(s) + * to print messages when dfs is NULL. + */ + dfs->dfs_debug_mask &= ~(WLAN_DEBUG_DFS_ALWAYS); + + dfs_info(dfs, WLAN_DEBUG_DFS_ALWAYS, + "debug level now = 0x%x", dfs->dfs_debug_mask); + if (dfs->dfs_debug_mask & WLAN_DEBUG_DFS3) { + /* Enable debug Radar Event */ + dfs->dfs_event_log_on = 1; + } else if ((utils_get_dfsdomain(dfs->dfs_pdev_obj) == + DFS_FCC_DOMAIN) && + lmac_is_host_dfs_check_support_enabled(dfs->dfs_pdev_obj)) { + dfs->dfs_event_log_on = 1; + } else { + dfs->dfs_event_log_on = 0; + } + break; + case DFS_SET_FALSE_RSSI_THRES: + if (insize < sizeof(uint32_t) || !indata) { + error = -EINVAL; + break; + } + dfs->wlan_dfs_false_rssi_thres = *(uint32_t *)indata; + dfs_info(dfs, WLAN_DEBUG_DFS_ALWAYS, + "false RSSI threshold now = 0x%x", + dfs->wlan_dfs_false_rssi_thres); + break; + case DFS_SET_PEAK_MAG: + if (insize < sizeof(uint32_t) || !indata) { + error = -EINVAL; + break; + } + dfs->wlan_dfs_peak_mag = *(uint32_t *)indata; + dfs_info(dfs, WLAN_DEBUG_DFS_ALWAYS, + "peak_mag now = 0x%x", + dfs->wlan_dfs_peak_mag); + break; + case DFS_GET_CAC_VALID_TIME: + if (!outdata || !outsize || *outsize < sizeof(uint32_t)) { + error = -EINVAL; + break; + } + *outsize = sizeof(uint32_t); + *((uint32_t *)outdata) = dfs->dfs_cac_valid_time; + break; + case DFS_SET_CAC_VALID_TIME: + if (insize < sizeof(uint32_t) || !indata) { + error = -EINVAL; + break; + } + dfs->dfs_cac_valid_time = *(uint32_t *)indata; + dfs_info(dfs, WLAN_DEBUG_DFS_ALWAYS, + "dfs timeout = %d", dfs->dfs_cac_valid_time); + break; + case DFS_IGNORE_CAC: + if (insize < sizeof(uint32_t) || !indata) { + error = -EINVAL; + break; + } + + if (*(uint32_t *)indata) + dfs->dfs_ignore_cac = 1; + else + dfs->dfs_ignore_cac = 0; + + dfs_info(dfs, WLAN_DEBUG_DFS_ALWAYS, + "ignore cac = 0x%x", dfs->dfs_ignore_cac); + break; + case DFS_SET_NOL_TIMEOUT: + if (insize < sizeof(uint32_t) || !indata) { + error = -EINVAL; + break; + } + if (*(int *)indata) + dfs->wlan_dfs_nol_timeout = *(int *)indata; + else + dfs->wlan_dfs_nol_timeout = DFS_NOL_TIMEOUT_S; + + dfs_info(dfs, WLAN_DEBUG_DFS_ALWAYS, "nol timeout = %d sec", + dfs->wlan_dfs_nol_timeout); + break; + case DFS_MUTE_TIME: + if (insize < sizeof(uint32_t) || !indata) { + error = -EINVAL; + break; + } + data = (uint32_t *) indata; + dfs->wlan_dfstesttime = *data; + dfs->wlan_dfstesttime *= (1000); /* convert sec into ms */ + break; + case DFS_GET_USENOL: + if (!outdata || !outsize || *outsize < sizeof(uint32_t)) { + error = -EINVAL; + break; + } + *outsize = sizeof(uint32_t); + *((uint32_t *)outdata) = dfs->dfs_use_nol; + + dfs_info(dfs, WLAN_DEBUG_DFS_ALWAYS, + "#Phyerr=%d, #false detect=%d, #queued=%d", + dfs->dfs_phyerr_count, + dfs->dfs_phyerr_reject_count, + dfs->dfs_phyerr_queued_count); + + dfs_info(dfs, WLAN_DEBUG_DFS_ALWAYS, + "dfs_phyerr_freq_min=%d, dfs_phyerr_freq_max=%d", + dfs->dfs_phyerr_freq_min, + dfs->dfs_phyerr_freq_max); + + dfs_info(dfs, WLAN_DEBUG_DFS_ALWAYS, + "Total radar events detected=%d, entries in the radar queue follows:", + dfs->dfs_event_log_count); + + for (i = 0; (i < DFS_EVENT_LOG_SIZE) && + (i < dfs->dfs_event_log_count); i++) { +#define FREQ_OFFSET1 ((int)dfs->radar_log[i].freq_offset_khz / 1000) +#define FREQ_OFFSET2 ((int)abs(dfs->radar_log[i].freq_offset_khz) % 1000) + dfs_debug(dfs, WLAN_DEBUG_DFS, + "ts=%llu diff_ts=%u rssi=%u dur=%u, is_chirp=%d, seg_id=%d, sidx=%d, freq_offset=%d.%dMHz, peak_mag=%d, total_gain=%d, mb_gain=%d, relpwr_db=%d, delta_diff=%d, delta_peak=%d, psidx_diff=%d\n", + dfs->radar_log[i].ts, + dfs->radar_log[i].diff_ts, + dfs->radar_log[i].rssi, + dfs->radar_log[i].dur, + dfs->radar_log[i].is_chirp, + dfs->radar_log[i].seg_id, + dfs->radar_log[i].sidx, + FREQ_OFFSET1, + FREQ_OFFSET2, + dfs->radar_log[i].peak_mag, + dfs->radar_log[i].total_gain, + dfs->radar_log[i].mb_gain, + dfs->radar_log[i].relpwr_db, + dfs->radar_log[i].delta_diff, + dfs->radar_log[i].delta_peak, + dfs->radar_log[i].psidx_diff); + } + dfs->dfs_event_log_count = 0; + dfs->dfs_phyerr_count = 0; + dfs->dfs_phyerr_reject_count = 0; + dfs->dfs_phyerr_queued_count = 0; + dfs->dfs_phyerr_freq_min = 0x7fffffff; + dfs->dfs_phyerr_freq_max = 0; + break; + case DFS_SET_USENOL: + if (insize < sizeof(uint32_t) || !indata) { + error = -EINVAL; + break; + } + dfs->dfs_use_nol = *(uint32_t *)indata; + break; + case DFS_GET_NOL: + if (!outdata || !outsize || + *outsize < sizeof(struct dfsreq_nolinfo)) { + error = -EINVAL; + break; + } + *outsize = sizeof(struct dfsreq_nolinfo); + nol = (struct dfsreq_nolinfo *)outdata; + DFS_GET_NOL_LOCKED(dfs, + (struct dfsreq_nolelem *)nol->dfs_nol, + &nol->dfs_ch_nchans); + DFS_PRINT_NOL_LOCKED(dfs); + break; + case DFS_SET_NOL: + if (insize < sizeof(struct dfsreq_nolinfo) || !indata) { + error = -EINVAL; + break; + } + nol = (struct dfsreq_nolinfo *) indata; + dfs_set_nol(dfs, + (struct dfsreq_nolelem *)nol->dfs_nol, + nol->dfs_ch_nchans); + break; + case DFS_SHOW_NOL: + DFS_PRINT_NOL_LOCKED(dfs); + break; + case DFS_SHOW_NOLHISTORY: + dfs_print_nolhistory(dfs); + break; + case DFS_BANGRADAR: + if (dfs->dfs_is_offload_enabled) { + error = dfs_fill_emulate_bang_radar_test(dfs, + SEG_ID_PRIMARY, + &dfs_unit_test); + } else { + dfs->dfs_bangradar = 1; + error = dfs_start_host_based_bangradar(dfs); + } + break; + case DFS_SHOW_PRECAC_LISTS: + dfs_print_precaclists(dfs); + break; + case DFS_RESET_PRECAC_LISTS: + dfs_reset_precac_lists(dfs); + break; + case DFS_SECOND_SEGMENT_BANGRADAR: + if (dfs->dfs_is_offload_enabled) { + error = dfs_fill_emulate_bang_radar_test(dfs, + SEG_ID_SECONDARY, + &dfs_unit_test); + } else { + dfs->dfs_second_segment_bangradar = 1; + error = dfs_start_host_based_bangradar(dfs); + } + break; + default: + error = -EINVAL; + } + +bad: + return error; +} + +void dfs_set_current_channel(struct wlan_dfs *dfs, + uint16_t dfs_ch_freq, + uint64_t dfs_ch_flags, + uint16_t dfs_ch_flagext, + uint8_t dfs_ch_ieee, + uint8_t dfs_ch_vhtop_ch_freq_seg1, + uint8_t dfs_ch_vhtop_ch_freq_seg2) +{ + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs is NULL"); + return; + } + + dfs->dfs_curchan->dfs_ch_freq = dfs_ch_freq; + dfs->dfs_curchan->dfs_ch_flags = dfs_ch_flags; + dfs->dfs_curchan->dfs_ch_flagext = dfs_ch_flagext; + dfs->dfs_curchan->dfs_ch_ieee = dfs_ch_ieee; + dfs->dfs_curchan->dfs_ch_vhtop_ch_freq_seg1 = dfs_ch_vhtop_ch_freq_seg1; + dfs->dfs_curchan->dfs_ch_vhtop_ch_freq_seg2 = dfs_ch_vhtop_ch_freq_seg2; +} + +void dfs_update_cur_chan_flags(struct wlan_dfs *dfs, + uint64_t flags, + uint16_t flagext) +{ + dfs->dfs_curchan->dfs_ch_flags = flags; + dfs->dfs_curchan->dfs_ch_flagext = flagext; +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/misc/dfs_cac.c b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/misc/dfs_cac.c new file mode 100644 index 0000000000000000000000000000000000000000..54d3ffe2fff2d9e35ccf8199aa815eb316e9357d --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/misc/dfs_cac.c @@ -0,0 +1,230 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * Copyright (c) 2007-2008 Sam Leffler, Errno Consulting + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/** + * DOC: This file has the functions related to DFS CAC. + */ + +#include "../dfs_channel.h" +#include "../dfs_zero_cac.h" +#include "wlan_dfs_utils_api.h" +#include "wlan_dfs_mlme_api.h" +#include "../dfs_internal.h" + +#define IS_CHANNEL_WEATHER_RADAR(freq) ((freq >= 5600) && (freq <= 5650)) +#define ADJACENT_WEATHER_RADAR_CHANNEL 5580 +#define CH100_START_FREQ 5490 +#define CH100 100 + +int dfs_override_cac_timeout(struct wlan_dfs *dfs, int cac_timeout) +{ + if (!dfs) + return -EIO; + + dfs->dfs_cac_timeout_override = cac_timeout; + dfs_info(dfs, WLAN_DEBUG_DFS_ALWAYS, "CAC timeout is now %s %d", + (cac_timeout == -1) ? "default" : "overridden", + cac_timeout); + + return 0; +} + +int dfs_get_override_cac_timeout(struct wlan_dfs *dfs, int *cac_timeout) +{ + if (!dfs) + return -EIO; + + (*cac_timeout) = dfs->dfs_cac_timeout_override; + + return 0; +} + +void dfs_cac_valid_reset(struct wlan_dfs *dfs, + uint8_t prevchan_ieee, + uint32_t prevchan_flags) +{ + if (dfs->dfs_cac_valid_time) { + if ((prevchan_ieee != dfs->dfs_curchan->dfs_ch_ieee) || + (prevchan_flags != dfs->dfs_curchan->dfs_ch_flags)) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, + "Cancelling timer & clearing cac_valid" + ); + qdf_timer_stop(&dfs->dfs_cac_valid_timer); + dfs->dfs_cac_valid = 0; + } + } +} + +/** + * dfs_cac_valid_timeout() - Timeout function for dfs_cac_valid_timer + * cac_valid bit will be reset in this function. + */ +static os_timer_func(dfs_cac_valid_timeout) +{ + struct wlan_dfs *dfs = NULL; + + OS_GET_TIMER_ARG(dfs, struct wlan_dfs *); + dfs->dfs_cac_valid = 0; + dfs_info(dfs, WLAN_DEBUG_DFS_ALWAYS, ": Timed out!!"); +} + +/** + * dfs_cac_timeout() - DFS cactimeout function. + * + * Sets dfs_cac_timer_running to 0 and dfs_cac_valid_timer. + */ +static os_timer_func(dfs_cac_timeout) +{ + struct wlan_dfs *dfs = NULL; + + OS_GET_TIMER_ARG(dfs, struct wlan_dfs *); + dfs->dfs_cac_timer_running = 0; + + dfs_info(dfs, WLAN_DEBUG_DFS_ALWAYS, "cac expired, chan %d curr time %d", + dfs->dfs_curchan->dfs_ch_freq, + (qdf_system_ticks_to_msecs(qdf_system_ticks()) / 1000)); + /* + * When radar is detected during a CAC we are woken up prematurely to + * switch to a new channel. Check the channel to decide how to act. + */ + if (WLAN_IS_CHAN_RADAR(dfs->dfs_curchan)) { + dfs_mlme_mark_dfs(dfs->dfs_pdev_obj, + dfs->dfs_curchan->dfs_ch_ieee, + dfs->dfs_curchan->dfs_ch_freq, + dfs->dfs_curchan->dfs_ch_vhtop_ch_freq_seg2, + dfs->dfs_curchan->dfs_ch_flags); + dfs_debug(dfs, WLAN_DEBUG_DFS, + "CAC timer on channel %u (%u MHz) stopped due to radar", + dfs->dfs_curchan->dfs_ch_ieee, + dfs->dfs_curchan->dfs_ch_freq); + } else { + dfs_debug(dfs, WLAN_DEBUG_DFS, + "CAC timer on channel %u (%u MHz) expired; no radar detected", + dfs->dfs_curchan->dfs_ch_ieee, + dfs->dfs_curchan->dfs_ch_freq); + + /* On CAC completion, set the bit 'cac_valid'. + * CAC will not be re-done if this bit is reset. + * The flag will be reset when dfs_cac_valid_timer + * timesout. + */ + if (dfs->dfs_cac_valid_time) { + dfs->dfs_cac_valid = 1; + qdf_timer_mod(&dfs->dfs_cac_valid_timer, + dfs->dfs_cac_valid_time * 1000); + } + } + + /* Iterate over the nodes, processing the CAC completion event. */ + dfs_mlme_proc_cac(dfs->dfs_pdev_obj, 0); + + /* Send a CAC timeout, VAP up event to user space */ + dfs_mlme_deliver_event_up_afrer_cac(dfs->dfs_pdev_obj); + + if (dfs->dfs_defer_precac_channel_change == 1) { + dfs_mlme_channel_change_by_precac(dfs->dfs_pdev_obj); + dfs->dfs_defer_precac_channel_change = 0; + } +} + +void dfs_cac_timer_init(struct wlan_dfs *dfs) +{ + qdf_timer_init(NULL, + &(dfs->dfs_cac_timer), + dfs_cac_timeout, + (void *)(dfs), + QDF_TIMER_TYPE_WAKE_APPS); + + qdf_timer_init(NULL, + &(dfs->dfs_cac_valid_timer), + dfs_cac_valid_timeout, + (void *)(dfs), + QDF_TIMER_TYPE_WAKE_APPS); +} + +void dfs_cac_attach(struct wlan_dfs *dfs) +{ + dfs->dfs_cac_timeout_override = -1; + dfs->wlan_dfs_cac_time = WLAN_DFS_WAIT_MS; + dfs_cac_timer_init(dfs); +} + +void dfs_cac_timer_reset(struct wlan_dfs *dfs) +{ + qdf_timer_stop(&dfs->dfs_cac_timer); + dfs_get_override_cac_timeout(dfs, + &(dfs->dfs_cac_timeout_override)); + +} + +void dfs_cac_timer_free(struct wlan_dfs *dfs) +{ + qdf_timer_free(&dfs->dfs_cac_timer); + + qdf_timer_free(&dfs->dfs_cac_valid_timer); + dfs->dfs_cac_valid = 0; +} + +int dfs_is_ap_cac_timer_running(struct wlan_dfs *dfs) +{ + return dfs->dfs_cac_timer_running; +} + +void dfs_start_cac_timer(struct wlan_dfs *dfs) +{ + qdf_timer_mod(&dfs->dfs_cac_timer, + dfs_mlme_get_cac_timeout(dfs->dfs_pdev_obj, + dfs->dfs_curchan->dfs_ch_freq, + dfs->dfs_curchan->dfs_ch_vhtop_ch_freq_seg2, + dfs->dfs_curchan->dfs_ch_flags) * 1000); +} + +void dfs_cancel_cac_timer(struct wlan_dfs *dfs) +{ + qdf_timer_stop(&dfs->dfs_cac_timer); +} + +void dfs_cac_stop(struct wlan_dfs *dfs) +{ + uint32_t phyerr; + + dfs_get_debug_info(dfs, (void *)&phyerr); + dfs_debug(dfs, WLAN_DEBUG_DFS, + "Stopping CAC Timer %d procphyerr 0x%08x", + dfs->dfs_curchan->dfs_ch_freq, phyerr); + qdf_timer_stop(&dfs->dfs_cac_timer); + dfs->dfs_cac_timer_running = 0; +} + +void dfs_stacac_stop(struct wlan_dfs *dfs) +{ + uint32_t phyerr; + + dfs_get_debug_info(dfs, (void *)&phyerr); + dfs_debug(dfs, WLAN_DEBUG_DFS, + "Stopping STA CAC Timer %d procphyerr 0x%08x", + dfs->dfs_curchan->dfs_ch_freq, phyerr); +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/misc/dfs_filter_init.c b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/misc/dfs_filter_init.c new file mode 100644 index 0000000000000000000000000000000000000000..d867bba7b2627e0fdde012cc632f8282cf5a9c4b --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/misc/dfs_filter_init.c @@ -0,0 +1,425 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * Copyright (c) 2002-2006, Atheros Communications Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: This file contains the dfs_attach() and dfs_detach() functions as well + * as the dfs_control() function which is used to process ioctls related to DFS. + * For Linux/Mac, "radartool" is the command line tool that can be used to call + * various ioctls to set and get radar detection thresholds. + */ + +#include "../dfs_zero_cac.h" +#include "wlan_dfs_lmac_api.h" +#include "wlan_dfs_mlme_api.h" +#include "wlan_dfs_tgt_api.h" +#include "../dfs_internal.h" +#include "../dfs_filter_init.h" +#include "../dfs_partial_offload_radar.h" + +#ifndef WLAN_DFS_STATIC_MEM_ALLOC +/* + * dfs_alloc_dfs_events() - allocate dfs events buffer + * + * Return: events buffer, null on failure. + */ +static inline struct dfs_event *dfs_alloc_dfs_events(void) +{ + return qdf_mem_malloc(sizeof(struct dfs_event) * DFS_MAX_EVENTS); +} + +/* + * dfs_free_dfs_events() - Free events buffer + * @events: Events buffer pointer + * + * Return: None + */ +static inline void dfs_free_dfs_events(struct dfs_event *events) +{ + qdf_mem_free(events); +} + +/* + * dfs_alloc_dfs_pulseline() - allocate buffer for dfs pulses + * + * Return: events buffer, null on failure. + */ +static inline struct dfs_pulseline *dfs_alloc_dfs_pulseline(void) +{ + return qdf_mem_malloc(sizeof(struct dfs_pulseline)); +} + +/* + * dfs_free_dfs_pulseline() - Free pulse buffer + * @pulses: Pulses buffer pointer + * + * Return: None + */ +static inline void dfs_free_dfs_pulseline(struct dfs_pulseline *pulses) +{ + qdf_mem_free(pulses); +} +#else +/* Static buffers for DFS objects */ +static struct dfs_event global_dfs_event[DFS_MAX_EVENTS]; +static struct dfs_pulseline global_dfs_pulseline; + +static inline struct dfs_event *dfs_alloc_dfs_events(void) +{ + return global_dfs_event; +} + +static inline void dfs_free_dfs_events(struct dfs_event *events) +{ +} + +static inline struct dfs_pulseline *dfs_alloc_dfs_pulseline(void) +{ + return &global_dfs_pulseline; +} + +static inline void dfs_free_dfs_pulseline(struct dfs_pulseline *pulses) +{ +} +#endif + +/* + * Channel switch announcement (CSA) + * usenol=1 (default) make CSA and switch to a new channel on radar detect + * usenol=0, make CSA with next channel same as current on radar detect + * usenol=2, no CSA and stay on the same channel on radar detect + */ + +/** + * dfs_task() - The timer function to process the radar pulses. + */ +static os_timer_func(dfs_task) +{ + struct wlan_dfs *dfs = NULL; + + OS_GET_TIMER_ARG(dfs, struct wlan_dfs *); + + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs is NULL"); + return; + } + + dfs_process_radarevent(dfs, dfs->dfs_curchan); + + dfs->wlan_radar_tasksched = 0; +} + +/** + * dfs_main_task_timer_init() - Initialize dfs task timer. + * @dfs: Pointer to wlan_dfs structure. + */ +static void dfs_main_task_timer_init(struct wlan_dfs *dfs) +{ + qdf_timer_init(NULL, + &(dfs->wlan_dfs_task_timer), + dfs_task, + (void *)(dfs), + QDF_TIMER_TYPE_WAKE_APPS); +} + +/** + * dfs_free_filter() - free memory allocated for dfs ft_filters + * @radarf: pointer holding ft_filters. + * + * Return: None + */ +static void dfs_free_filter(struct dfs_filtertype *radarf) +{ + uint8_t i; + + for (i = 0; i < DFS_MAX_NUM_RADAR_FILTERS; i++) { + if (radarf->ft_filters[i]) { + qdf_mem_free(radarf->ft_filters[i]); + radarf->ft_filters[i] = NULL; + } + } +} + +/** + * dfs_alloc_mem_filter() - allocate memory for dfs ft_filters + * @radarf: pointer holding ft_filters. + * + * Return: QDF_STATUS + */ +static QDF_STATUS dfs_alloc_mem_filter(struct dfs_filtertype *radarf) +{ + uint8_t i; + + for (i = 0; i < DFS_MAX_NUM_RADAR_FILTERS; i++) { + radarf->ft_filters[i] = qdf_mem_malloc(sizeof(struct + dfs_filter)); + if (!radarf->ft_filters[i]) { + /* Free all the filter if malloc failed */ + dfs_free_filter(radarf); + return QDF_STATUS_E_FAILURE; + } + } + + return QDF_STATUS_SUCCESS; +} + +int dfs_main_attach(struct wlan_dfs *dfs) +{ + int i, n; + QDF_STATUS status; + struct wlan_dfs_radar_tab_info radar_info; + + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs is NULL"); + return 0; + } + + /* If ignore_dfs is set to 1 then Radar detection is disabled. */ + if (dfs->dfs_ignore_dfs) { + dfs_debug(dfs, WLAN_DEBUG_DFS1, "ignoring dfs"); + return 0; + } + + /* + * Zero out radar_info. It's possible that the attach function + * won't fetch an initial regulatory configuration; you really + * do want to ensure that the contents indicates there aren't + * any filters. + */ + qdf_mem_zero(&radar_info, sizeof(radar_info)); + + lmac_get_caps(dfs->dfs_pdev_obj, &(dfs->dfs_caps)); + + dfs_clear_stats(dfs); + dfs->dfs_event_log_on = 1; + dfs_debug(dfs, WLAN_DEBUG_DFS_ALWAYS, "event log enabled by default"); + + dfs->dfs_enable = 1; + + /*Verify : Passing NULL to qdf_timer_init().*/ + dfs_main_task_timer_init(dfs); + + dfs_host_wait_timer_init(dfs); + + WLAN_DFSQ_LOCK_CREATE(dfs); + STAILQ_INIT(&dfs->dfs_radarq); + WLAN_ARQ_LOCK_CREATE(dfs); + STAILQ_INIT(&dfs->dfs_arq); + STAILQ_INIT(&(dfs->dfs_eventq)); + WLAN_DFSEVENTQ_LOCK_CREATE(dfs); + + dfs->events = dfs_alloc_dfs_events(); + if (!(dfs->events)) { + dfs_alert(dfs, WLAN_DEBUG_DFS_ALWAYS, + "events allocation failed"); + return 1; + } + + for (i = 0; i < DFS_MAX_EVENTS; i++) + STAILQ_INSERT_TAIL(&(dfs->dfs_eventq), &dfs->events[i], + re_list); + + dfs->pulses = dfs_alloc_dfs_pulseline(); + if (!(dfs->pulses)) { + dfs_free_dfs_events(dfs->events); + dfs->events = NULL; + dfs_alert(dfs, WLAN_DEBUG_DFS_ALWAYS, + "Pulse buffer allocation failed"); + return 1; + } + + dfs->pulses->pl_lastelem = DFS_MAX_PULSE_BUFFER_MASK; + + /* Allocate memory for radar filters. */ + for (n = 0; n < DFS_MAX_RADAR_TYPES; n++) { + dfs->dfs_radarf[n] = (struct dfs_filtertype *) + qdf_mem_malloc(sizeof(struct dfs_filtertype)); + if (!(dfs->dfs_radarf[n])) { + dfs_alert(dfs, WLAN_DEBUG_DFS_ALWAYS, + "cannot allocate memory for radar filter types"); + goto bad1; + } + qdf_mem_zero(dfs->dfs_radarf[n], + sizeof(struct dfs_filtertype)); + status = dfs_alloc_mem_filter(dfs->dfs_radarf[n]); + if (!QDF_IS_STATUS_SUCCESS(status)) { + dfs_alert(dfs, WLAN_DEBUG_DFS_ALWAYS, + "mem alloc for dfs_filter failed"); + goto bad1; + } + } + + /* Allocate memory for radar table. */ + dfs->dfs_ftindextable = (int8_t **)qdf_mem_malloc( + DFS_NUM_FT_IDX_TBL_ROWS*sizeof(int8_t *)); + if (!(dfs->dfs_ftindextable)) { + dfs_alert(dfs, WLAN_DEBUG_DFS_ALWAYS, "Cannot allocate memory for radar table"); + goto bad1; + } + for (n = 0; n < DFS_NUM_FT_IDX_TBL_ROWS; n++) { + dfs->dfs_ftindextable[n] = qdf_mem_malloc( + DFS_MAX_RADAR_OVERLAP*sizeof(int8_t)); + if (!(dfs->dfs_ftindextable[n])) { + dfs_alert(dfs, WLAN_DEBUG_DFS_ALWAYS, + "cannot allocate memory for radar table entry"); + goto bad2; + } + } + + dfs->dfs_use_nol = 1; + + /* Init the cached extension channel busy for false alarm reduction */ + dfs->dfs_rinfo.ext_chan_busy_ts = lmac_get_tsf64(dfs->dfs_pdev_obj); + dfs->dfs_rinfo.dfs_ext_chan_busy = 0; + /* Init the Bin5 chirping related data */ + dfs->dfs_rinfo.dfs_bin5_chirp_ts = dfs->dfs_rinfo.ext_chan_busy_ts; + dfs->dfs_rinfo.dfs_last_bin5_dur = MAX_BIN5_DUR; + dfs->dfs_b5radars = NULL; + + /* + * If dfs_init_radar_filters() fails, we can abort here and + * reconfigure when the first valid channel + radar config + * is available. + */ + if (dfs_init_radar_filters(dfs, &radar_info)) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "Radar Filter Intialization Failed"); + return 1; + } + + dfs->wlan_dfs_false_rssi_thres = RSSI_POSSIBLY_FALSE; + dfs->wlan_dfs_peak_mag = SEARCH_FFT_REPORT_PEAK_MAG_THRSH; + dfs->dfs_phyerr_freq_min = 0x7fffffff; + dfs->dfs_phyerr_freq_max = 0; + dfs->dfs_phyerr_queued_count = 0; + dfs->dfs_phyerr_w53_counter = 0; + dfs->dfs_pri_multiplier = 2; + dfs_get_radars(dfs); + + return 0; + +bad2: + qdf_mem_free(dfs->dfs_ftindextable); + dfs->dfs_ftindextable = NULL; +bad1: + for (n = 0; n < DFS_MAX_RADAR_TYPES; n++) { + if (dfs->dfs_radarf[n] != NULL) { + dfs_free_filter(dfs->dfs_radarf[n]); + qdf_mem_free(dfs->dfs_radarf[n]); + dfs->dfs_radarf[n] = NULL; + } + } + if (dfs->pulses) { + dfs_free_dfs_pulseline(dfs->pulses); + dfs->pulses = NULL; + } + if (dfs->events) { + dfs_free_dfs_events(dfs->events); + dfs->events = NULL; + } + + return 1; +} + +void dfs_main_timer_reset(struct wlan_dfs *dfs) +{ + if (dfs->wlan_radar_tasksched) { + qdf_timer_sync_cancel(&dfs->wlan_dfs_task_timer); + dfs->wlan_radar_tasksched = 0; + } +} + +void dfs_main_timer_free(struct wlan_dfs *dfs) +{ + qdf_timer_free(&dfs->wlan_dfs_task_timer); + dfs->wlan_radar_tasksched = 0; +} + +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) && defined(HOST_DFS_SPOOF_TEST) +void dfs_host_wait_timer_free(struct wlan_dfs *dfs) +{ + qdf_timer_free(&dfs->dfs_host_wait_timer); +} +#endif + +void dfs_main_detach(struct wlan_dfs *dfs) +{ + int n, empty; + + if (!dfs->dfs_enable) { + dfs_info(dfs, WLAN_DEBUG_DFS_ALWAYS, "Already detached"); + return; + } + + dfs->dfs_enable = 0; + + dfs_reset_radarq(dfs); + dfs_reset_alldelaylines(dfs); + + if (dfs->pulses != NULL) { + dfs_free_dfs_pulseline(dfs->pulses); + dfs->pulses = NULL; + } + + for (n = 0; n < DFS_MAX_RADAR_TYPES; n++) { + if (dfs->dfs_radarf[n] != NULL) { + dfs_free_filter(dfs->dfs_radarf[n]); + qdf_mem_free(dfs->dfs_radarf[n]); + dfs->dfs_radarf[n] = NULL; + } + } + + if (dfs->dfs_ftindextable != NULL) { + for (n = 0; n < DFS_NUM_FT_IDX_TBL_ROWS; n++) { + if (dfs->dfs_ftindextable[n] != NULL) { + qdf_mem_free(dfs->dfs_ftindextable[n]); + dfs->dfs_ftindextable[n] = NULL; + } + } + qdf_mem_free(dfs->dfs_ftindextable); + dfs->dfs_ftindextable = NULL; + dfs->wlan_dfs_isdfsregdomain = 0; + } + + if (dfs->dfs_b5radars != NULL) { + qdf_mem_free(dfs->dfs_b5radars); + dfs->dfs_b5radars = NULL; + } + + dfs_reset_ar(dfs); + + WLAN_ARQ_LOCK(dfs); + empty = STAILQ_EMPTY(&(dfs->dfs_arq)); + WLAN_ARQ_UNLOCK(dfs); + if (!empty) + dfs_reset_arq(dfs); + + if (dfs->events != NULL) { + dfs_free_dfs_events(dfs->events); + dfs->events = NULL; + } + + WLAN_DFSQ_LOCK_DESTROY(dfs); + WLAN_ARQ_LOCK_DESTROY(dfs); + WLAN_DFSEVENTQ_LOCK_DESTROY(dfs); +} + +int dfs_start_host_based_bangradar(struct wlan_dfs *dfs) +{ + dfs->wlan_radar_tasksched = 1; + qdf_timer_mod(&dfs->wlan_dfs_task_timer, 0); + + return 0; +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/misc/dfs_full_offload.c b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/misc/dfs_full_offload.c new file mode 100644 index 0000000000000000000000000000000000000000..bf2397c7a2438303c5bc0bba0659ef80eefff3e4 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/misc/dfs_full_offload.c @@ -0,0 +1,57 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * Copyright (c) 2002-2006, Atheros Communications Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: This file contains the dfs_fill_emulate_bang_radar_test() which is used + * to send command to firmware to emulate RADAR found event. + */ + +#include "../dfs_zero_cac.h" +#include "wlan_dfs_lmac_api.h" +#include "wlan_dfs_mlme_api.h" +#include "wlan_dfs_tgt_api.h" +#include "../dfs_internal.h" +#include "../dfs_full_offload.h" + +int dfs_fill_emulate_bang_radar_test(struct wlan_dfs *dfs, + uint32_t segid, + struct dfs_emulate_bang_radar_test_cmd *dfs_unit_test) +{ + /* + * More parameters are to be added later indicating + * seg id, chirp and sidx values to be sent to fw. + */ + if (!(WLAN_IS_PRIMARY_OR_SECONDARY_CHAN_DFS(dfs->dfs_curchan))) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, + "Ignore bangradar on a NON-DFS channel"); + return -EINVAL; + } + + dfs_unit_test->num_args = DFS_UNIT_TEST_NUM_ARGS; + dfs_unit_test->args[IDX_CMD_ID] = + DFS_PHYERR_OFFLOAD_TEST_SET_RADAR; + dfs_unit_test->args[IDX_PDEV_ID] = + wlan_objmgr_pdev_get_pdev_id(dfs->dfs_pdev_obj); + dfs_unit_test->args[IDX_SEG_ID] = segid; + + if (tgt_dfs_process_emulate_bang_radar_cmd(dfs->dfs_pdev_obj, + dfs_unit_test) == QDF_STATUS_E_FAILURE) { + return -EINVAL; + } + + return 0; +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/misc/dfs_nol.c b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/misc/dfs_nol.c new file mode 100644 index 0000000000000000000000000000000000000000..3db80055212129b773529870ca89304d6e1dda51 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/misc/dfs_nol.c @@ -0,0 +1,635 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * Copyright (c) 2002-2010, Atheros Communications Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: This file contains NOL related functionality, NOL being the non + * occupancy list. After radar has been detected in a particular channel, + * the channel cannot be used for a period of 30 minutes which is called + * the non occupancy. The NOL is basically a list of all the channels that + * radar has been detected on. Each channel has a 30 minute timer associated + * with it. This file contains the functionality to add a channel to the NOL, + * the NOL timer function and the functionality to remove a channel from the + * NOL when its time is up. + */ + +#include "../dfs.h" +#include "../dfs_channel.h" +#include "../dfs_ioctl_private.h" +#include "../dfs_internal.h" +#include +#include +#include +#include +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) && defined(HOST_DFS_SPOOF_TEST) +#include "../dfs_process_radar_found_ind.h" +#include "../dfs_partial_offload_radar.h" +#endif + +void dfs_set_update_nol_flag(struct wlan_dfs *dfs, bool val) +{ + dfs->update_nol = val; +} + +bool dfs_get_update_nol_flag(struct wlan_dfs *dfs) +{ + return dfs->update_nol; +} + +/** + * dfs_nol_timeout() - NOL timeout function. + * + * Clears the WLAN_CHAN_DFS_RADAR_FOUND flag for the NOL timeout channel. + */ +static os_timer_func(dfs_nol_timeout) +{ + struct dfs_channel *c = NULL, lc; + unsigned long oldest, now; + struct wlan_dfs *dfs = NULL; + int i; + int nchans = 0; + + c = &lc; + + OS_GET_TIMER_ARG(dfs, struct wlan_dfs *); + dfs_mlme_get_dfs_ch_nchans(dfs->dfs_pdev_obj, &nchans); + + now = oldest = qdf_system_ticks(); + for (i = 0; i < nchans; i++) { + dfs_mlme_get_dfs_ch_channels(dfs->dfs_pdev_obj, + &(c->dfs_ch_freq), + &(c->dfs_ch_flags), + &(c->dfs_ch_flagext), + &(c->dfs_ch_ieee), + &(c->dfs_ch_vhtop_ch_freq_seg1), + &(c->dfs_ch_vhtop_ch_freq_seg2), + i); + if (WLAN_IS_CHAN_RADAR(c)) { + if (qdf_system_time_after_eq(now, + dfs->dfs_nol_event[i] + + dfs_get_nol_timeout(dfs))) { + c->dfs_ch_flagext &= + ~WLAN_CHAN_DFS_RADAR_FOUND; + if (c->dfs_ch_flags & + WLAN_CHAN_DFS_RADAR) { + /* + * NB: do this here so we get only one + * msg instead of one for every channel + * table entry. + */ + dfs_debug(dfs, WLAN_DEBUG_DFS, + "radar on channel %u (%u MHz) cleared after timeout", + + c->dfs_ch_ieee, + c->dfs_ch_freq); + } + } else if (dfs->dfs_nol_event[i] < oldest) + oldest = dfs->dfs_nol_event[i]; + } + } + if (oldest != now) { + /* Arrange to process next channel up for a status change. */ + qdf_timer_mod(&dfs->dfs_nol_timer, + dfs_get_nol_timeout(dfs) - + qdf_system_ticks_to_msecs(qdf_system_ticks())); + } +} + +/** + * dfs_nol_elem_free_work_cb - Free NOL element + * + * Free the NOL element memory + */ +static void dfs_nol_elem_free_work_cb(void *context) +{ + struct wlan_dfs *dfs = (struct wlan_dfs *)context; + struct dfs_nolelem *tmp_nol_entry, *nol_entry; + + WLAN_DFSNOL_LOCK(dfs); + if (!TAILQ_EMPTY(&dfs->dfs_nol_free_list)) + TAILQ_FOREACH_SAFE(nol_entry, + &dfs->dfs_nol_free_list, + nolelem_list, + tmp_nol_entry) { + TAILQ_REMOVE(&dfs->dfs_nol_free_list, + nol_entry, nolelem_list); + qdf_timer_free(&nol_entry->nol_timer); + qdf_mem_free(nol_entry); + } + WLAN_DFSNOL_UNLOCK(dfs); +} + +void dfs_nol_timer_init(struct wlan_dfs *dfs) +{ + qdf_timer_init(NULL, + &(dfs->dfs_nol_timer), + dfs_nol_timeout, + (void *)(dfs), + QDF_TIMER_TYPE_WAKE_APPS); +} + +void dfs_nol_attach(struct wlan_dfs *dfs) +{ + dfs->wlan_dfs_nol_timeout = DFS_NOL_TIMEOUT_S; + dfs_nol_timer_init(dfs); + qdf_create_work(NULL, &dfs->dfs_nol_elem_free_work, + dfs_nol_elem_free_work_cb, dfs); + TAILQ_INIT(&dfs->dfs_nol_free_list); + dfs->dfs_use_nol = 1; + WLAN_DFSNOL_LOCK_CREATE(dfs); +} + +void dfs_nol_detach(struct wlan_dfs *dfs) +{ + dfs_nol_timer_cleanup(dfs); + qdf_flush_work(&dfs->dfs_nol_elem_free_work); + qdf_destroy_work(NULL, &dfs->dfs_nol_elem_free_work); + WLAN_DFSNOL_LOCK_DESTROY(dfs); +} + +void dfs_nol_timer_free(struct wlan_dfs *dfs) +{ + qdf_timer_free(&dfs->dfs_nol_timer); +} + +/** + * dfs_nol_delete() - Delete the given frequency/chwidth from the NOL. + * @dfs: Pointer to wlan_dfs structure. + * @delfreq: Freq to delete. + * @delchwidth: Channel width to delete. + */ +static void dfs_nol_delete(struct wlan_dfs *dfs, + uint16_t delfreq, + uint16_t delchwidth) +{ + struct dfs_nolelem *nol, **prev_next; + + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs is NULL"); + return; + } + + dfs_debug(dfs, WLAN_DEBUG_DFS_NOL, + "remove channel=%d/%d MHz from NOL", + delfreq, delchwidth); + prev_next = &(dfs->dfs_nol); + nol = dfs->dfs_nol; + while (nol != NULL) { + if (nol->nol_freq == delfreq && + nol->nol_chwidth == delchwidth) { + *prev_next = nol->nol_next; + dfs_debug(dfs, WLAN_DEBUG_DFS_NOL, + "removing channel %d/%dMHz from NOL tstamp=%d", + nol->nol_freq, + nol->nol_chwidth, + (qdf_system_ticks_to_msecs + (qdf_system_ticks()) / 1000)); + TAILQ_INSERT_TAIL(&dfs->dfs_nol_free_list, + nol, nolelem_list); + nol = *prev_next; + + /* Update the NOL counter. */ + dfs->dfs_nol_count--; + + /* Be paranoid! */ + if (dfs->dfs_nol_count < 0) { + dfs_info(NULL, WLAN_DEBUG_DFS_ALWAYS, "dfs_nol_count < 0; eek!"); + dfs->dfs_nol_count = 0; + } + + } else { + prev_next = &(nol->nol_next); + nol = nol->nol_next; + } + } +} + +/** + * dfs_remove_from_nol() - Remove the freq from NOL list. + * + * When NOL times out, this function removes the channel from NOL list. + */ +static os_timer_func(dfs_remove_from_nol) +{ + struct dfs_nolelem *nol_arg; + struct wlan_dfs *dfs; + uint16_t delfreq; + uint16_t delchwidth; + uint8_t chan; + + OS_GET_TIMER_ARG(nol_arg, struct dfs_nolelem *); + + dfs = nol_arg->nol_dfs; + delfreq = nol_arg->nol_freq; + delchwidth = nol_arg->nol_chwidth; + + /* Delete the given NOL entry. */ + DFS_NOL_DELETE_CHAN_LOCKED(dfs, delfreq, delchwidth); + + /* Update the wireless stack with the new NOL. */ + dfs_nol_update(dfs); + + dfs_mlme_nol_timeout_notification(dfs->dfs_pdev_obj); + chan = utils_dfs_freq_to_chan(delfreq); + dfs_debug(dfs, WLAN_DEBUG_DFS_NOL, + "remove channel %d from nol", chan); + utils_dfs_reg_update_nol_ch(dfs->dfs_pdev_obj, + &chan, 1, DFS_NOL_RESET); + utils_dfs_save_nol(dfs->dfs_pdev_obj); + + /* + * Free the NOL element in a thread. This is to avoid freeing the + * timer object from within timer callback function . The nol element + * contains the timer Object. + */ + qdf_sched_work(NULL, &dfs->dfs_nol_elem_free_work); +} + +void dfs_print_nol(struct wlan_dfs *dfs) +{ + struct dfs_nolelem *nol; +#ifdef WLAN_DEBUG + int i = 0; +#endif + uint32_t diff_ms, remaining_sec; + + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs is NULL"); + return; + } + + nol = dfs->dfs_nol; + dfs_debug(dfs, WLAN_DEBUG_DFS_NOL, "NOL"); + while (nol != NULL) { + diff_ms = qdf_system_ticks_to_msecs(qdf_system_ticks() - + nol->nol_start_ticks); + diff_ms = (nol->nol_timeout_ms - diff_ms); + remaining_sec = diff_ms / 1000; /* Convert to seconds */ + dfs_info(NULL, WLAN_DEBUG_DFS_ALWAYS, + "nol:%d channel=%d MHz width=%d MHz time left=%u seconds nol starttick=%llu", + i++, nol->nol_freq, + nol->nol_chwidth, + remaining_sec, + (uint64_t)nol->nol_start_ticks); + nol = nol->nol_next; + } +} + +void dfs_print_nolhistory(struct wlan_dfs *dfs) +{ + struct dfs_channel *c, lc; + int i, j = 0; + int nchans = 0; + + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs is NULL"); + return; + } + + c = &lc; + + dfs_mlme_get_dfs_ch_nchans(dfs->dfs_pdev_obj, &nchans); + for (i = 0; i < nchans; i++) { + dfs_mlme_get_dfs_ch_channels(dfs->dfs_pdev_obj, + &(c->dfs_ch_freq), + &(c->dfs_ch_flags), + &(c->dfs_ch_flagext), + &(c->dfs_ch_ieee), + &(c->dfs_ch_vhtop_ch_freq_seg1), + &(c->dfs_ch_vhtop_ch_freq_seg2), + i); + if (WLAN_IS_CHAN_HISTORY_RADAR(c)) { + dfs_info(NULL, WLAN_DEBUG_DFS_ALWAYS, + "nolhistory:%d channel=%d MHz Flags=%llx", + j, c->dfs_ch_freq, c->dfs_ch_flags); + j++; + } + } +} + +void dfs_get_nol(struct wlan_dfs *dfs, + struct dfsreq_nolelem *dfs_nol, + int *nchan) +{ + struct dfs_nolelem *nol; + + *nchan = 0; + + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs is NULL"); + return; + } + + nol = dfs->dfs_nol; + while (nol != NULL) { + dfs_nol[*nchan].nol_freq = nol->nol_freq; + dfs_nol[*nchan].nol_chwidth = nol->nol_chwidth; + dfs_nol[*nchan].nol_start_ticks = nol->nol_start_ticks; + dfs_nol[*nchan].nol_timeout_ms = nol->nol_timeout_ms; + ++(*nchan); + nol = nol->nol_next; + } +} + +void dfs_set_nol(struct wlan_dfs *dfs, + struct dfsreq_nolelem *dfs_nol, + int nchan) +{ +#define TIME_IN_MS 1000 + uint32_t nol_time_left_ms; + struct dfs_channel chan; + int i; + uint8_t chan_num; + + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs is NULL"); + return; + } + + for (i = 0; i < nchan; i++) { + nol_time_left_ms = + qdf_system_ticks_to_msecs(qdf_system_ticks() - + dfs_nol[i].nol_start_ticks); + + if (nol_time_left_ms < dfs_nol[i].nol_timeout_ms) { + chan.dfs_ch_freq = dfs_nol[i].nol_freq; + chan.dfs_ch_flags = 0; + chan.dfs_ch_flagext = 0; + nol_time_left_ms = + (dfs_nol[i].nol_timeout_ms - nol_time_left_ms); + + DFS_NOL_ADD_CHAN_LOCKED(dfs, chan.dfs_ch_freq, + (nol_time_left_ms / TIME_IN_MS)); + chan_num = utils_dfs_freq_to_chan(chan.dfs_ch_freq); + utils_dfs_reg_update_nol_ch(dfs->dfs_pdev_obj, + &chan_num, 1, DFS_NOL_SET); + } + } +#undef TIME_IN_MS + dfs_nol_update(dfs); +} + +void dfs_nol_addchan(struct wlan_dfs *dfs, + uint16_t freq, + uint32_t dfs_nol_timeout) +{ +#define TIME_IN_MS 1000 +#define TIME_IN_US (TIME_IN_MS * 1000) + struct dfs_nolelem *nol, *elem, *prev; + /* For now, assume all events are 20MHz wide. */ + int ch_width = 20; + + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs is NULL"); + return; + } + nol = dfs->dfs_nol; + prev = dfs->dfs_nol; + elem = NULL; + while (nol != NULL) { + if ((nol->nol_freq == freq) && + (nol->nol_chwidth == ch_width)) { + nol->nol_start_ticks = qdf_system_ticks(); + nol->nol_timeout_ms = dfs_nol_timeout * TIME_IN_MS; + + dfs_debug(dfs, WLAN_DEBUG_DFS_NOL, + "Update OS Ticks for NOL %d MHz / %d MHz", + nol->nol_freq, nol->nol_chwidth); + + qdf_timer_stop(&nol->nol_timer); + qdf_timer_mod(&nol->nol_timer, + dfs_nol_timeout * TIME_IN_MS); + return; + } + prev = nol; + nol = nol->nol_next; + } + + /* Add a new element to the NOL. */ + elem = (struct dfs_nolelem *)qdf_mem_malloc(sizeof(struct dfs_nolelem)); + if (!elem) + goto bad; + + qdf_mem_zero(elem, sizeof(*elem)); + elem->nol_dfs = dfs; + elem->nol_freq = freq; + elem->nol_chwidth = ch_width; + elem->nol_start_ticks = qdf_system_ticks(); + elem->nol_timeout_ms = dfs_nol_timeout*TIME_IN_MS; + elem->nol_next = NULL; + if (prev) { + prev->nol_next = elem; + } else { + /* This is the first element in the NOL. */ + dfs->dfs_nol = elem; + } + + qdf_timer_init(NULL, + &elem->nol_timer, dfs_remove_from_nol, + elem, QDF_TIMER_TYPE_WAKE_APPS); + qdf_timer_mod(&elem->nol_timer, dfs_nol_timeout * TIME_IN_MS); + + /* Update the NOL counter. */ + dfs->dfs_nol_count++; + + dfs_debug(dfs, WLAN_DEBUG_DFS_NOL, + "new NOL channel %d MHz / %d MHz", + elem->nol_freq, elem->nol_chwidth); + return; + +bad: + dfs_debug(dfs, WLAN_DEBUG_DFS_NOL | WLAN_DEBUG_DFS, + "failed to allocate memory for nol entry"); + +#undef TIME_IN_MS +#undef TIME_IN_US +} + +void dfs_get_nol_chfreq_and_chwidth(struct dfsreq_nolelem *dfs_nol, + uint32_t *nol_chfreq, + uint32_t *nol_chwidth, + int index) +{ + if (!dfs_nol) + return; + + *nol_chfreq = dfs_nol[index].nol_freq; + *nol_chwidth = dfs_nol[index].nol_chwidth; +} + +void dfs_nol_update(struct wlan_dfs *dfs) +{ + struct dfsreq_nolelem *dfs_nol; + int nlen; + + /* + * Allocate enough entries to store the NOL. At least on Linux + * (don't ask why), if you allocate a 0 entry array, the + * returned pointer is 0x10. Make sure you're aware of this + * when you start debugging. + */ + dfs_nol = (struct dfsreq_nolelem *)qdf_mem_malloc( + sizeof(struct dfsreq_nolelem) * dfs->dfs_nol_count); + + if (!dfs_nol) { + /* + * XXX TODO: if this fails, just schedule a task to retry + * updating the NOL at a later stage. That way the NOL + * update _DOES_ happen - hopefully the failure was just + * temporary. + */ + dfs_alert(dfs, WLAN_DEBUG_DFS_ALWAYS, "failed to allocate NOL update memory!"); + return; + } + + DFS_GET_NOL_LOCKED(dfs, dfs_nol, &nlen); + + /* Be suitably paranoid for now. */ + if (nlen != dfs->dfs_nol_count) + dfs_info(NULL, WLAN_DEBUG_DFS_ALWAYS, "nlen (%d) != dfs->dfs_nol_count (%d)!", + nlen, dfs->dfs_nol_count); + + /* + * Call the driver layer to have it recalculate the NOL flags + * for each driver/umac channel. If the list is empty, pass + * NULL instead of dfs_nol. The operating system may have some + * special representation for "malloc a 0 byte memory region" + * - for example, Linux 2.6.38-13 (ubuntu) returns 0x10 rather + * than a valid allocation (and is likely not NULL so the + * pointer doesn't match NULL checks in any later code. + */ + dfs_mlme_clist_update(dfs->dfs_pdev_obj, + (nlen > 0) ? dfs_nol : NULL, + nlen); + + qdf_mem_free(dfs_nol); +} + +void dfs_nol_free_list(struct wlan_dfs *dfs) +{ + struct dfs_nolelem *nol = dfs->dfs_nol, *prev; + + while (nol) { + prev = nol; + nol = nol->nol_next; + qdf_mem_free(prev); + /* Update the NOL counter. */ + dfs->dfs_nol_count--; + + if (dfs->dfs_nol_count < 0) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs_nol_count < 0"); + ASSERT(0); + } + } + + dfs->dfs_nol = NULL; +} + +void dfs_nol_timer_cleanup(struct wlan_dfs *dfs) +{ + struct dfs_nolelem *nol; + + WLAN_DFSNOL_LOCK(dfs); + nol = dfs->dfs_nol; + while (nol) { + dfs->dfs_nol = nol->nol_next; + dfs->dfs_nol_count--; + /* + * Unlock is required so that when we sync with the + * nol_timeout timer we do not run into deadlock. + */ + WLAN_DFSNOL_UNLOCK(dfs); + qdf_timer_free(&nol->nol_timer); + WLAN_DFSNOL_LOCK(dfs); + + qdf_mem_free(nol); + nol = dfs->dfs_nol; + } + WLAN_DFSNOL_UNLOCK(dfs); +} + +void dfs_nol_workqueue_cleanup(struct wlan_dfs *dfs) +{ + qdf_flush_work(&dfs->dfs_nol_elem_free_work); +} + +int dfs_get_use_nol(struct wlan_dfs *dfs) +{ + return dfs->dfs_use_nol; +} + +int dfs_get_nol_timeout(struct wlan_dfs *dfs) +{ + return dfs->wlan_dfs_nol_timeout; +} + +void dfs_getnol(struct wlan_dfs *dfs, void *dfs_nolinfo) +{ + struct dfsreq_nolinfo *nolinfo = (struct dfsreq_nolinfo *)dfs_nolinfo; + + DFS_GET_NOL_LOCKED(dfs, nolinfo->dfs_nol, &(nolinfo->dfs_ch_nchans)); +} + +void dfs_clear_nolhistory(struct wlan_dfs *dfs) +{ + /* We should have a dfs_clear_nolhistory API from Regdomain. */ + struct dfs_channel *c, lc; + int i; + int nchans = 0; + + c = &lc; + dfs_mlme_get_dfs_ch_nchans(dfs->dfs_pdev_obj, &nchans); + for (i = 0; i < nchans; i++) { + dfs_mlme_get_dfs_ch_channels(dfs->dfs_pdev_obj, + &(c->dfs_ch_freq), + &(c->dfs_ch_flags), + &(c->dfs_ch_flagext), + &(c->dfs_ch_ieee), + &(c->dfs_ch_vhtop_ch_freq_seg1), + &(c->dfs_ch_vhtop_ch_freq_seg2), + i); + WLAN_CHAN_CLR_HISTORY_RADAR(c); + } +} + +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) && defined(HOST_DFS_SPOOF_TEST) +void dfs_remove_spoof_channel_from_nol(struct wlan_dfs *dfs) +{ + struct dfs_nolelem *nol; + uint8_t channels[NUM_CHANNELS_160MHZ]; + int i, nchans = 0; + + nchans = dfs_get_bonding_channels(&dfs->dfs_radar_found_chan, 0, + channels); + + WLAN_DFSNOL_LOCK(dfs); + for (i = 0; i < nchans && i < NUM_CHANNELS_160MHZ; i++) { + nol = dfs->dfs_nol; + while (nol) { + if (nol->nol_freq == (uint16_t)utils_dfs_chan_to_freq( + channels[i])) { + OS_SET_TIMER(&nol->nol_timer, 0); + break; + } + nol = nol->nol_next; + } + } + WLAN_DFSNOL_UNLOCK(dfs); + + utils_dfs_reg_update_nol_ch(dfs->dfs_pdev_obj, + channels, nchans, DFS_NOL_RESET); +} +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/misc/dfs_process_radar_found_ind.c b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/misc/dfs_process_radar_found_ind.c new file mode 100644 index 0000000000000000000000000000000000000000..8819305e95fd5e0d3b8a8f793d53634e6cd55ca3 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/misc/dfs_process_radar_found_ind.c @@ -0,0 +1,566 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: API for processing radar found indication. + * + */ + +#include "../dfs.h" +#include "../dfs_zero_cac.h" +#include "../dfs_process_radar_found_ind.h" +#include +#include +#include "wlan_dfs_mlme_api.h" + +/** + * TODO: The code is not according to the following description needs + * modification and correction. Code always adds left and right channels to + * NOL even if it is not a chirp radar. + * + * A) If chirp radar starts at boundary and ends at boundary then three channels + * will be affected. + * freq_offset.freq[0] = fn (Center frequency) + * freq_offset.freq[1] = fn-1 (Left of center) + * freq_offset.freq[2] = fn+1 (Right of center) + * + * Three channels, ch(n-1), ch(n)and ch(n+1) will be added to NOL. + * + * Chirp start freq Chirp end freq + * | | + * | | + * V V + * _______________________________________________________________________ + * | center freq | center freq | center freq | + * | ch(n-1) | ch(n) | ch(n+1) | + * | | | | | | | + * | | | | | | | + * | | | | | | | + * fn-1 fn boundary fn+1 + * <-------- 20 Mhz ------> + * + * B) If chirp radar starts at one channel and continues up to another channel + * then two channels will be affected. + * freq_offset.freq[0] = fn + * freq_offset.freq[1] = 0 + * freq_offset.freq[2] = fn+1 + * + * Three channels, ch(n-1), ch(n)and ch(n+1) will be added to NOL. + * + * Chirp start freq Chirp end freq + * | | + * | | + * V V + * _______________________________________________________________________ + * | center freq | center freq | center freq | + * | ch(n-1) | ch(n) | ch(n+1) | + * | | | | | | | + * | | | | | | | + * | | | | | | | + * fn-1 fn boundary fn+1 + * <-------- 20 Mhz ------> + * + * C) Radar found at boundary, two channels will be affected. + * freq_offset.freq[0] = fn + * freq_offset.freq[1] = 0 + * freq_offset.freq[2] = fn+1 + * + * Two channels, ch(n) and ch(n+1) will be added to NOL. + * + * dfs_freq_offset (radar found freq) + * | + * | + * V + * _______________________________________________________________________ + * | center freq | center freq | center freq | + * | ch(n-1) | ch(n) | ch(n+1) | + * | | | | | | | + * | | | | | | | + * | | | | | | | + * fn-1 fn boundary fn+1 + * <-------- 20 Mhz ------> + * + * + * D) Else only one channel will be affected. + * freq_offset.freq[0] = fn + * freq_offset.freq[1] = 0 + * freq_offset.freq[2] = 0 + * + * One channel ch(n) will be added to NOL. + * + * + * dfs_freq_offset (radar found freq) + * | + * | + * V + * _______________________________________________________________________ + * | center freq | center freq | center freq | + * | ch(n-1) | ch(n) | ch(n+1) | + * | | | | | | | + * | | | | | | | + * | | | | | | | + * fn-1 fn boundary fn+1 + * <-------- 20 Mhz ------> + */ + +/** + * dfs_radar_add_channel_list_to_nol()- Add given channels to nol + * @dfs: Pointer to wlan_dfs structure. + * @channels: Pointer to the channel list. + * @num_channels: Number of channels in the list. + * + * Add list of channels to nol, only if the channel is dfs. + * + * Return: QDF_STATUS + */ +static QDF_STATUS dfs_radar_add_channel_list_to_nol(struct wlan_dfs *dfs, + uint8_t *channels, + uint8_t num_channels) +{ + int i; + uint8_t last_chan = 0; + uint8_t nollist[NUM_CHANNELS_160MHZ]; + uint8_t num_ch = 0; + + if (num_channels > NUM_CHANNELS_160MHZ) { + dfs_err(dfs, WLAN_DEBUG_DFS, + "Invalid num channels: %d", num_channels); + return QDF_STATUS_E_FAILURE; + } + + for (i = 0; i < num_channels; i++) { + if (channels[i] == 0 || + channels[i] == last_chan) + continue; + if (!utils_is_dfs_ch(dfs->dfs_pdev_obj, channels[i])) { + dfs_info(dfs, WLAN_DEBUG_DFS, "ch=%d is not dfs, skip", + channels[i]); + continue; + } + last_chan = channels[i]; + DFS_NOL_ADD_CHAN_LOCKED(dfs, + (uint16_t)utils_dfs_chan_to_freq(channels[i]), + dfs->wlan_dfs_nol_timeout); + nollist[num_ch++] = last_chan; + dfs_info(dfs, WLAN_DEBUG_DFS, "ch=%d Added to NOL", last_chan); + } + + if (!num_ch) { + dfs_err(dfs, WLAN_DEBUG_DFS, + "dfs channels not found in channel list"); + return QDF_STATUS_E_FAILURE; + } + + utils_dfs_reg_update_nol_ch(dfs->dfs_pdev_obj, + nollist, num_ch, DFS_NOL_SET); + dfs_nol_update(dfs); + utils_dfs_save_nol(dfs->dfs_pdev_obj); + + return QDF_STATUS_SUCCESS; +} + +/** + * dfs_radar_chan_for_80()- Find frequency offsets for 80MHz + * @freq_offset: freq offset + * @center_freq: center frequency + * + * Find frequency offsets for 80MHz + * + * Return: None + */ +static void dfs_radar_chan_for_80(struct freqs_offsets *freq_offset, + uint32_t center_freq) +{ + int i; + + for (i = 0; i < DFS_NUM_FREQ_OFFSET; i++) { + if (freq_offset->offset[i] < DFS_OFFSET_SECOND_LOWER) + freq_offset->freq[i] = + DFS_THIRD_LOWER_CHANNEL(center_freq); + else if ((freq_offset->offset[i] > DFS_OFFSET_SECOND_LOWER) && + (freq_offset->offset[i] < DFS_OFFSET_FIRST_LOWER)) + freq_offset->freq[i] = + DFS_SECOND_LOWER_CHANNEL(center_freq); + else if ((freq_offset->offset[i] > DFS_OFFSET_FIRST_LOWER) && + (freq_offset->offset[i] < 0)) + freq_offset->freq[i] = + DFS_FIRST_LOWER_CHANNEL(center_freq); + else if ((freq_offset->offset[i] > 0) && + (freq_offset->offset[i] < DFS_OFFSET_FIRST_UPPER)) + freq_offset->freq[i] = + DFS_FIRST_UPPER_CHANNEL(center_freq); + else if ((freq_offset->offset[i] > DFS_OFFSET_FIRST_UPPER) && + (freq_offset->offset[i] < DFS_OFFSET_SECOND_UPPER)) + freq_offset->freq[i] = + DFS_SECOND_UPPER_CHANNEL(center_freq); + else if (freq_offset->offset[i] > DFS_OFFSET_SECOND_UPPER) + freq_offset->freq[i] = + DFS_THIRD_UPPER_CHANNEL(center_freq); + } +} + +/** + * dfs_radar_chan_for_40()- Find frequency offsets for 40MHz + * @freq_offset: freq offset + * @center_freq: center frequency + * + * Find frequency offsets for 40MHz + * + * Return: None + */ +static void dfs_radar_chan_for_40(struct freqs_offsets *freq_offset, + uint32_t center_freq) +{ + int i; + + for (i = 0; i < DFS_NUM_FREQ_OFFSET; i++) { + if (freq_offset->offset[i] < DFS_OFFSET_FIRST_LOWER) + freq_offset->freq[i] = + DFS_SECOND_LOWER_CHANNEL(center_freq); + else if ((freq_offset->offset[i] > DFS_OFFSET_FIRST_LOWER) && + (freq_offset->offset[i] < 0)) + freq_offset->freq[i] = + DFS_FIRST_LOWER_CHANNEL(center_freq); + else if ((freq_offset->offset[i] > 0) && + (freq_offset->offset[i] < DFS_OFFSET_FIRST_UPPER)) + freq_offset->freq[i] = + DFS_FIRST_UPPER_CHANNEL(center_freq); + else if (freq_offset->offset[i] > DFS_OFFSET_FIRST_UPPER) + freq_offset->freq[i] = + DFS_SECOND_UPPER_CHANNEL(center_freq); + } +} + +/** + * dfs_radar_chan_for_20()- Find frequency offsets for 20MHz + * @freq_offset: freq offset + * @center_freq: center frequency + * + * Find frequency offsets for 20MHz + * + * Return: None + */ +static void dfs_radar_chan_for_20(struct freqs_offsets *freq_offset, + uint32_t center_freq) +{ + int i; + + for (i = 0; i < DFS_NUM_FREQ_OFFSET; i++) { + if (freq_offset->offset[i] <= DFS_20MZ_OFFSET_LOWER) + freq_offset->freq[i] = + DFS_20MHZ_LOWER_CHANNEL(center_freq); + else if ((freq_offset->offset[i] > DFS_20MZ_OFFSET_LOWER) && + (freq_offset->offset[i] < DFS_20MZ_OFFSET_UPPER)) + freq_offset->freq[i] = center_freq; + else if (freq_offset->offset[i] >= DFS_20MZ_OFFSET_UPPER) + freq_offset->freq[i] = + DFS_20MHZ_UPPER_CHANNEL(center_freq); + } +} + +/** + * dfs_find_radar_affected_subchans() - Finds radar affected sub channels. + * @dfs: Pointer to wlan_dfs structure. + * @radar_found: Pointer to radar_found structure. + * @channels: Pointer to save radar affected channels. + * + * Return: Number of channels. + */ +static uint8_t dfs_find_radar_affected_subchans(struct wlan_dfs *dfs, + struct radar_found_info + *radar_found, + uint8_t *channels) +{ + int i; + uint32_t freq_center, flag; + int32_t sidx; + struct dfs_channel *curchan = dfs->dfs_curchan; + struct freqs_offsets freq_offset; + + qdf_mem_set(&freq_offset, sizeof(freq_offset), 0); + flag = curchan->dfs_ch_flags; + + for (i = 0; i < DFS_NUM_FREQ_OFFSET; i++) + freq_offset.offset[i] = radar_found->freq_offset; + + sidx = DFS_FREQ_OFFSET_TO_SIDX(radar_found->freq_offset); + + if (!radar_found->segment_id) + freq_center = utils_dfs_chan_to_freq( + curchan->dfs_ch_vhtop_ch_freq_seg1); + else { + if (dfs_is_precac_timer_running(dfs)) { + freq_center = utils_dfs_chan_to_freq( + dfs->dfs_precac_secondary_freq); + } else { + freq_center = utils_dfs_chan_to_freq( + curchan->dfs_ch_vhtop_ch_freq_seg2); + if (flag & WLAN_CHAN_VHT160) + freq_center += DFS_160MHZ_SECOND_SEG_OFFSET; + } + } + + dfs_info(dfs, WLAN_DEBUG_DFS, + "seg=%d, sidx=%d, offset=%d, chirp=%d, flag=%d, f=%d", + radar_found->segment_id, sidx, + radar_found->freq_offset, radar_found->is_chirp, + flag, freq_center); + + if ((WLAN_IS_CHAN_A(curchan)) || + WLAN_IS_CHAN_MODE_20(curchan)) { + if (radar_found->is_chirp || + (sidx && !(abs(sidx) % DFS_BOUNDARY_SIDX))) { + freq_offset.offset[LEFT_CH] -= DFS_CHIRP_OFFSET; + freq_offset.offset[RIGHT_CH] += DFS_CHIRP_OFFSET; + } + dfs_radar_chan_for_20(&freq_offset, freq_center); + } else if (WLAN_IS_CHAN_MODE_40(curchan)) { + if (radar_found->is_chirp || !(abs(sidx) % DFS_BOUNDARY_SIDX)) { + freq_offset.offset[LEFT_CH] -= DFS_CHIRP_OFFSET; + freq_offset.offset[RIGHT_CH] += DFS_CHIRP_OFFSET; + } + dfs_radar_chan_for_40(&freq_offset, freq_center); + } else if (WLAN_IS_CHAN_MODE_80(curchan) || + WLAN_IS_CHAN_MODE_160(curchan) || + WLAN_IS_CHAN_MODE_80_80(curchan)) { + if (radar_found->is_chirp || !(abs(sidx) % DFS_BOUNDARY_SIDX)) { + freq_offset.offset[LEFT_CH] -= DFS_CHIRP_OFFSET; + freq_offset.offset[RIGHT_CH] += DFS_CHIRP_OFFSET; + } + dfs_radar_chan_for_80(&freq_offset, freq_center); + } else { + dfs_err(dfs, WLAN_DEBUG_DFS, + "channel flag=%d is invalid", flag); + return 0; + } + + for (i = 0; i < DFS_NUM_FREQ_OFFSET; i++) { + channels[i] = utils_dfs_freq_to_chan(freq_offset.freq[i]); + dfs_info(dfs, WLAN_DEBUG_DFS, "offset=%d, channel=%d", + i, channels[i]); + } + + return i; +} + +/** + * dfs_get_bonding_channels() - Get bonding channels. + * @curchan: Pointer to dfs_channels to know width and primary channel. + * @segment_id: Segment id, useful for 80+80/160 MHz operating band. + * @channels: Pointer to save radar affected channels. + * + * Return: Number of channels. + */ +uint8_t dfs_get_bonding_channels(struct dfs_channel *curchan, + uint32_t segment_id, + uint8_t *channels) +{ + uint8_t center_chan; + uint8_t nchannels = 0; + + if (!segment_id) + center_chan = curchan->dfs_ch_vhtop_ch_freq_seg1; + else + center_chan = curchan->dfs_ch_vhtop_ch_freq_seg2; + + if (WLAN_IS_CHAN_MODE_20(curchan)) { + nchannels = 1; + channels[0] = center_chan; + } else if (WLAN_IS_CHAN_MODE_40(curchan)) { + nchannels = 2; + channels[0] = center_chan - DFS_5GHZ_NEXT_CHAN_OFFSET; + channels[1] = center_chan + DFS_5GHZ_NEXT_CHAN_OFFSET; + } else if (WLAN_IS_CHAN_MODE_80(curchan) || + WLAN_IS_CHAN_MODE_80_80(curchan)) { + nchannels = 4; + channels[0] = center_chan - DFS_5GHZ_2ND_CHAN_OFFSET; + channels[1] = center_chan - DFS_5GHZ_NEXT_CHAN_OFFSET; + channels[2] = center_chan + DFS_5GHZ_NEXT_CHAN_OFFSET; + channels[3] = center_chan + DFS_5GHZ_2ND_CHAN_OFFSET; + } else if (WLAN_IS_CHAN_MODE_160(curchan)) { + nchannels = 8; + center_chan = curchan->dfs_ch_vhtop_ch_freq_seg2; + channels[0] = center_chan - DFS_5GHZ_4TH_CHAN_OFFSET; + channels[1] = center_chan - DFS_5GHZ_3RD_CHAN_OFFSET; + channels[2] = center_chan - DFS_5GHZ_2ND_CHAN_OFFSET; + channels[3] = center_chan - DFS_5GHZ_NEXT_CHAN_OFFSET; + channels[4] = center_chan + DFS_5GHZ_NEXT_CHAN_OFFSET; + channels[5] = center_chan + DFS_5GHZ_2ND_CHAN_OFFSET; + channels[6] = center_chan + DFS_5GHZ_3RD_CHAN_OFFSET; + channels[7] = center_chan + DFS_5GHZ_4TH_CHAN_OFFSET; + } + + return nchannels; +} + +int dfs_radarevent_basic_sanity(struct wlan_dfs *dfs, + struct dfs_channel *chan) +{ + if (!(dfs->dfs_second_segment_bangradar || + dfs_is_precac_timer_running(dfs))) + if (!(WLAN_IS_PRIMARY_OR_SECONDARY_CHAN_DFS(chan))) { + dfs_debug(dfs, WLAN_DEBUG_DFS2, + "radar event on non-DFS chan"); + if (!(dfs->dfs_is_offload_enabled)) { + dfs_reset_radarq(dfs); + dfs_reset_alldelaylines(dfs); + dfs->dfs_bangradar = 0; + } + return 0; + } + + return 1; +} + +/** + * dfs_send_csa_to_current_chan() - Send CSA to current channel + * @dfs: Pointer to wlan_dfs structure. + * + * For the test mode(usenol = 0), don't do a CSA; but setup the test timer so + * we get a CSA _back_ to the current operating channel. + */ +static inline void dfs_send_csa_to_current_chan(struct wlan_dfs *dfs) +{ + qdf_timer_stop(&dfs->wlan_dfstesttimer); + dfs->wlan_dfstest = 1; + dfs->wlan_dfstest_ieeechan = dfs->dfs_curchan->dfs_ch_ieee; + dfs->wlan_dfstesttime = 1; /* 1ms */ + qdf_timer_mod(&dfs->wlan_dfstesttimer, dfs->wlan_dfstesttime); +} + +int dfs_second_segment_radar_disable(struct wlan_dfs *dfs) +{ + dfs->dfs_proc_phyerr &= ~DFS_SECOND_SEGMENT_RADAR_EN; + + return 0; +} + +QDF_STATUS dfs_process_radar_ind(struct wlan_dfs *dfs, + struct radar_found_info *radar_found) +{ + bool wait_for_csa = false; + uint8_t channels[NUM_CHANNELS_160MHZ]; + uint8_t num_channels; + QDF_STATUS status; + + if (!dfs->dfs_curchan) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs->dfs_curchan is NULL"); + return QDF_STATUS_E_FAILURE; + } + + /* Check if the current channel is a non DFS channel */ + if (!dfs_radarevent_basic_sanity(dfs, dfs->dfs_curchan)) { + dfs_err(dfs, WLAN_DEBUG_DFS, + "radar event on a non-DFS channel"); + return QDF_STATUS_E_FAILURE; + } + + if (radar_found->segment_id == SEG_ID_SECONDARY) + dfs_info(dfs, WLAN_DEBUG_DFS_ALWAYS, + "Radar found on second segment VHT80 freq=%d MHz", + dfs->dfs_precac_secondary_freq); + else + dfs_info(NULL, WLAN_DEBUG_DFS_ALWAYS, + "Radar found on channel=%d, freq=%d MHz", + dfs->dfs_curchan->dfs_ch_ieee, + dfs->dfs_curchan->dfs_ch_freq); + + if (!dfs->dfs_use_nol) { + dfs_send_csa_to_current_chan(dfs); + return QDF_STATUS_SUCCESS; + } + + if (dfs->dfs_use_nol_subchannel_marking) + num_channels = dfs_find_radar_affected_subchans(dfs, + radar_found, + channels); + else + num_channels = dfs_get_bonding_channels(dfs->dfs_curchan, + radar_found->segment_id, + channels); + + status = dfs_radar_add_channel_list_to_nol(dfs, channels, num_channels); + if (QDF_IS_STATUS_ERROR(status)) { + dfs_err(dfs, WLAN_DEBUG_DFS, + "radar event received on invalid channel"); + return status; + } + + /* + * If precac is running and the radar found in secondary + * VHT80 mark the channel as radar and add to NOL list. + * Otherwise random channel selection can choose this + * channel. + */ + dfs_debug(dfs, WLAN_DEBUG_DFS, + "found_on_second=%d is_pre=%d", + dfs->is_radar_found_on_secondary_seg, + dfs_is_precac_timer_running(dfs)); + + /* + * Even if radar found on primary, we need to move the channel + * from precac-required-list and precac-done-list to + * precac-nol-list. + */ + if (dfs->dfs_precac_enable) + dfs_mark_precac_dfs(dfs, dfs->is_radar_found_on_secondary_seg); + + if (!dfs->dfs_is_offload_enabled && + dfs->is_radar_found_on_secondary_seg) { + dfs_second_segment_radar_disable(dfs); + dfs->is_radar_found_on_secondary_seg = 0; + + if (dfs->is_radar_during_precac) { + dfs->is_radar_during_precac = 0; + return QDF_STATUS_SUCCESS; + } + } + + /* + * This calls into the umac DFS code, which sets the umac + * related radar flags and begins the channel change + * machinery. + * XXX TODO: the umac NOL code isn't used, but + * WLAN_CHAN_DFS_RADAR still gets set. Since the umac + * NOL code isn't used, that flag is never cleared. This + * needs to be fixed. See EV 105776. + */ + dfs_mlme_start_rcsa(dfs->dfs_pdev_obj, &wait_for_csa); + if (wait_for_csa) + return QDF_STATUS_SUCCESS; + + /* + * EV 129487 : We have detected radar in the channel, + * stop processing PHY error data as this can cause + * false detect in the new channel while channel + * change is in progress. + */ + + if (!dfs->dfs_is_offload_enabled) { + dfs_radar_disable(dfs); + dfs_second_segment_radar_disable(dfs); + } + + dfs_mlme_mark_dfs(dfs->dfs_pdev_obj, + dfs->dfs_curchan->dfs_ch_ieee, + dfs->dfs_curchan->dfs_ch_freq, + dfs->dfs_curchan->dfs_ch_vhtop_ch_freq_seg2, + dfs->dfs_curchan->dfs_ch_flags); + + return QDF_STATUS_SUCCESS; +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/misc/dfs_random_chan_sel.c b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/misc/dfs_random_chan_sel.c new file mode 100644 index 0000000000000000000000000000000000000000..08083063c88371f7314a68572fdd70ec0039f78d --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/misc/dfs_random_chan_sel.c @@ -0,0 +1,1505 @@ +/* + * Copyright (c) 2012-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "../dfs.h" +#include "../dfs_random_chan_sel.h" +#include +#include + +#ifdef WLAN_ENABLE_CHNL_MATRIX_RESTRICTION +/* + * TODO: At present SAP Channel leakage matrix for ch 144 + * is not available from system's team. So to play it safe + * and avoid crash if channel 144 is request, in following + * matix channel 144 is added such that it will cause code + * to avoid selecting channel 144. + * + * THESE ENTRIES SHOULD BE REPLACED WITH CORRECT VALUES AS + * PROVIDED BY SYSTEM'S TEAM. + */ + +/* channel tx leakage table - ht80 */ +struct dfs_matrix_tx_leak_info ht80_chan[] = { + {52, + {{36, 148}, {40, 199}, + {44, 193}, {48, 197}, + {52, DFS_TX_LEAKAGE_MIN}, {56, 153}, + {60, 137}, {64, 134}, + {100, 358}, {104, 350}, + {108, 404}, {112, 344}, + {116, 424}, {120, 429}, + {124, 437}, {128, 435}, + {132, DFS_TX_LEAKAGE_MAX}, {136, DFS_TX_LEAKAGE_MAX}, + {140, DFS_TX_LEAKAGE_MAX}, + {144, DFS_TX_LEAKAGE_MIN} + } }, + + + {56, + {{36, 171}, {40, 178}, + {44, 171}, {48, 178}, + {52, DFS_TX_LEAKAGE_MIN}, {56, DFS_TX_LEAKAGE_MIN}, + {60, DFS_TX_LEAKAGE_MIN}, {64, 280}, + {100, 351}, {104, 376}, + {108, 362}, {112, 362}, + {116, 403}, {120, 397}, + {124, DFS_TX_LEAKAGE_MAX}, {128, DFS_TX_LEAKAGE_MAX}, + {132, DFS_TX_LEAKAGE_MAX}, {136, DFS_TX_LEAKAGE_MAX}, + {140, DFS_TX_LEAKAGE_MAX}, + {144, DFS_TX_LEAKAGE_MIN} + } }, + + {60, + {{36, 156}, {40, 146}, + {44, DFS_TX_LEAKAGE_MIN}, {48, DFS_TX_LEAKAGE_MIN}, + {52, 180}, {56, DFS_TX_LEAKAGE_MIN}, + {60, DFS_TX_LEAKAGE_MIN}, {64, DFS_TX_LEAKAGE_MIN}, + {100, 376}, {104, 360}, + {108, DFS_TX_LEAKAGE_MAX}, {112, DFS_TX_LEAKAGE_MAX}, + {116, 395}, {120, 399}, + {124, DFS_TX_LEAKAGE_MAX}, {128, DFS_TX_LEAKAGE_MAX}, + {132, DFS_TX_LEAKAGE_MAX}, {136, DFS_TX_LEAKAGE_MAX}, + {140, DFS_TX_LEAKAGE_MAX}, + {144, DFS_TX_LEAKAGE_MIN} + } }, + + {64, + {{36, 217}, {40, 221}, + {44, DFS_TX_LEAKAGE_MIN}, {48, DFS_TX_LEAKAGE_MIN}, + {52, 176}, {56, 176}, + {60, DFS_TX_LEAKAGE_MIN}, {64, DFS_TX_LEAKAGE_MIN}, + {100, 384}, {104, 390}, + {108, DFS_TX_LEAKAGE_MAX}, {112, DFS_TX_LEAKAGE_MAX}, + {116, 375}, {120, 374}, + {124, DFS_TX_LEAKAGE_MAX}, {128, DFS_TX_LEAKAGE_MAX}, + {132, DFS_TX_LEAKAGE_MAX}, {136, DFS_TX_LEAKAGE_MAX}, + {140, DFS_TX_LEAKAGE_MAX}, + {144, DFS_TX_LEAKAGE_MIN} + } }, + + {100, + {{36, 357}, {40, 326}, + {44, 321}, {48, 326}, + {52, 378}, {56, 396}, + {60, DFS_TX_LEAKAGE_MAX}, {64, DFS_TX_LEAKAGE_MAX}, + {100, DFS_TX_LEAKAGE_MIN}, {104, DFS_TX_LEAKAGE_MIN}, + {108, 196}, {112, 116}, + {116, 166}, {120, DFS_TX_LEAKAGE_MIN}, + {124, DFS_TX_LEAKAGE_MIN}, {128, DFS_TX_LEAKAGE_MIN}, + {132, DFS_TX_LEAKAGE_MIN}, {136, DFS_TX_LEAKAGE_MIN}, + {140, DFS_TX_LEAKAGE_MIN}, + {144, DFS_TX_LEAKAGE_MIN} + } }, + + {104, + {{36, 325}, {40, 325}, + {44, 305}, {48, 352}, + {52, 411}, {56, 411}, + {60, DFS_TX_LEAKAGE_MAX}, {64, DFS_TX_LEAKAGE_MAX}, + {100, DFS_TX_LEAKAGE_MIN}, {104, DFS_TX_LEAKAGE_MIN}, + {108, DFS_TX_LEAKAGE_MIN}, {112, 460}, + {116, 198}, {120, DFS_TX_LEAKAGE_MIN}, + {124, DFS_TX_LEAKAGE_MIN}, {128, DFS_TX_LEAKAGE_MIN}, + {132, DFS_TX_LEAKAGE_MIN}, {136, DFS_TX_LEAKAGE_MIN}, + {140, DFS_TX_LEAKAGE_MIN}, + {144, DFS_TX_LEAKAGE_MIN} + } }, + + {108, + {{36, 304}, {40, 332}, + {44, 310}, {48, 335}, + {52, 431}, {56, 391}, + {60, DFS_TX_LEAKAGE_MAX}, {64, DFS_TX_LEAKAGE_MAX}, + {100, 280}, {104, DFS_TX_LEAKAGE_MIN}, + {108, DFS_TX_LEAKAGE_MIN}, {112, DFS_TX_LEAKAGE_MIN}, + {116, 185}, {120, DFS_TX_LEAKAGE_MIN}, + {124, DFS_TX_LEAKAGE_MIN}, {128, DFS_TX_LEAKAGE_MIN}, + {132, DFS_TX_LEAKAGE_MIN}, {136, DFS_TX_LEAKAGE_MIN}, + {140, DFS_TX_LEAKAGE_MIN}, + {144, DFS_TX_LEAKAGE_MIN} + } }, + + {112, + {{36, 327}, {40, 335}, + {44, 331}, {48, 345}, + {52, 367}, {56, 401}, + {60, DFS_TX_LEAKAGE_MAX}, {64, DFS_TX_LEAKAGE_MAX}, + {100, 131}, {104, 132}, + {108, DFS_TX_LEAKAGE_MIN}, {112, DFS_TX_LEAKAGE_MIN}, + {116, 189}, {120, DFS_TX_LEAKAGE_MIN}, + {124, DFS_TX_LEAKAGE_MIN}, {128, DFS_TX_LEAKAGE_MIN}, + {132, DFS_TX_LEAKAGE_MIN}, {136, DFS_TX_LEAKAGE_MIN}, + {140, DFS_TX_LEAKAGE_MIN}, + {144, DFS_TX_LEAKAGE_MIN} + } }, + + {116, + {{36, 384}, {40, 372}, + {44, 389}, {48, 396}, + {52, 348}, {56, 336}, + {60, DFS_TX_LEAKAGE_MAX}, {64, DFS_TX_LEAKAGE_MAX}, + {100, 172}, {104, 169}, + {108, DFS_TX_LEAKAGE_MIN}, {112, DFS_TX_LEAKAGE_MIN}, + {116, DFS_TX_LEAKAGE_MIN}, {120, DFS_TX_LEAKAGE_MIN}, + {124, DFS_TX_LEAKAGE_MIN}, {128, DFS_TX_LEAKAGE_MIN}, + {132, DFS_TX_LEAKAGE_MIN}, {136, DFS_TX_LEAKAGE_MIN}, + {140, DFS_TX_LEAKAGE_MIN}, + {144, DFS_TX_LEAKAGE_MIN} + } }, + + {120, + {{36, 395}, {40, 419}, + {44, 439}, {48, 407}, + {52, 321}, {56, 334}, + {60, DFS_TX_LEAKAGE_MAX}, {64, DFS_TX_LEAKAGE_MAX}, + {100, 134}, {104, 186}, + {108, DFS_TX_LEAKAGE_MIN}, {112, DFS_TX_LEAKAGE_MIN}, + {116, DFS_TX_LEAKAGE_MIN}, {120, DFS_TX_LEAKAGE_MIN}, + {124, DFS_TX_LEAKAGE_MIN}, {128, 159}, + {132, DFS_TX_LEAKAGE_MIN}, {136, DFS_TX_LEAKAGE_MIN}, + {140, DFS_TX_LEAKAGE_MIN}, + {144, DFS_TX_LEAKAGE_MIN} + } }, + + {124, + {{36, 469}, {40, 433}, + {44, 434}, {48, 435}, + {52, 332}, {56, 345}, + {60, DFS_TX_LEAKAGE_MAX}, {64, DFS_TX_LEAKAGE_MAX}, + {100, 146}, {104, 177}, + {108, DFS_TX_LEAKAGE_MIN}, {112, DFS_TX_LEAKAGE_MIN}, + {116, 350}, {120, DFS_TX_LEAKAGE_MIN}, + {124, DFS_TX_LEAKAGE_MIN}, {128, 138}, + {132, DFS_TX_LEAKAGE_MIN}, {136, DFS_TX_LEAKAGE_MIN}, + {140, DFS_TX_LEAKAGE_MIN}, + {144, DFS_TX_LEAKAGE_MIN} + } }, + + {128, + {{36, 408}, {40, 434}, + {44, 449}, {48, 444}, + {52, 341}, {56, 374}, + {60, DFS_TX_LEAKAGE_MAX}, {64, DFS_TX_LEAKAGE_MAX}, + {100, 205}, {104, 208}, + {108, DFS_TX_LEAKAGE_MIN}, {112, DFS_TX_LEAKAGE_MIN}, + {116, 142}, {120, DFS_TX_LEAKAGE_MIN}, + {124, DFS_TX_LEAKAGE_MIN}, {128, DFS_TX_LEAKAGE_MIN}, + {132, DFS_TX_LEAKAGE_MIN}, {136, DFS_TX_LEAKAGE_MIN}, + {140, DFS_TX_LEAKAGE_MIN}, + {144, DFS_TX_LEAKAGE_MIN} + } }, + + {132, + {{36, DFS_TX_LEAKAGE_MAX}, {40, DFS_TX_LEAKAGE_MAX}, + {44, DFS_TX_LEAKAGE_MAX}, {48, DFS_TX_LEAKAGE_MAX}, + {52, DFS_TX_LEAKAGE_MAX}, {56, DFS_TX_LEAKAGE_MAX}, + {60, DFS_TX_LEAKAGE_MIN}, {64, DFS_TX_LEAKAGE_MIN}, + {100, DFS_TX_LEAKAGE_MIN}, {104, DFS_TX_LEAKAGE_MIN}, + {108, DFS_TX_LEAKAGE_MIN}, {112, DFS_TX_LEAKAGE_MIN}, + {116, DFS_TX_LEAKAGE_MIN}, {120, DFS_TX_LEAKAGE_MIN}, + {124, DFS_TX_LEAKAGE_MIN}, {128, DFS_TX_LEAKAGE_MIN}, + {132, DFS_TX_LEAKAGE_MIN}, {136, DFS_TX_LEAKAGE_MIN}, + {140, DFS_TX_LEAKAGE_MIN}, + {144, DFS_TX_LEAKAGE_MIN} + } }, + + {136, + {{36, DFS_TX_LEAKAGE_MAX}, {40, DFS_TX_LEAKAGE_MAX}, + {44, DFS_TX_LEAKAGE_MAX}, {48, DFS_TX_LEAKAGE_MAX}, + {52, DFS_TX_LEAKAGE_MAX}, {56, DFS_TX_LEAKAGE_MAX}, + {60, DFS_TX_LEAKAGE_MIN}, {64, DFS_TX_LEAKAGE_MIN}, + {100, DFS_TX_LEAKAGE_MIN}, {104, DFS_TX_LEAKAGE_MIN}, + {108, DFS_TX_LEAKAGE_MIN}, {112, DFS_TX_LEAKAGE_MIN}, + {116, DFS_TX_LEAKAGE_MIN}, {120, DFS_TX_LEAKAGE_MIN}, + {124, DFS_TX_LEAKAGE_MIN}, {128, DFS_TX_LEAKAGE_MIN}, + {132, DFS_TX_LEAKAGE_MIN}, {136, DFS_TX_LEAKAGE_MIN}, + {140, DFS_TX_LEAKAGE_MIN}, + {144, DFS_TX_LEAKAGE_MIN} + } }, + + {140, + {{36, DFS_TX_LEAKAGE_MAX}, {40, DFS_TX_LEAKAGE_MAX}, + {44, DFS_TX_LEAKAGE_MAX}, {48, DFS_TX_LEAKAGE_MAX}, + {52, DFS_TX_LEAKAGE_MAX}, {56, DFS_TX_LEAKAGE_MAX}, + {60, DFS_TX_LEAKAGE_MIN}, {64, DFS_TX_LEAKAGE_MIN}, + {100, DFS_TX_LEAKAGE_MIN}, {104, DFS_TX_LEAKAGE_MIN}, + {108, DFS_TX_LEAKAGE_MIN}, {112, DFS_TX_LEAKAGE_MIN}, + {116, DFS_TX_LEAKAGE_MIN}, {120, DFS_TX_LEAKAGE_MIN}, + {124, DFS_TX_LEAKAGE_MIN}, {128, DFS_TX_LEAKAGE_MIN}, + {132, DFS_TX_LEAKAGE_MIN}, {136, DFS_TX_LEAKAGE_MIN}, + {144, DFS_TX_LEAKAGE_MIN} + } }, + + {144, + {{36, DFS_TX_LEAKAGE_MAX}, {40, DFS_TX_LEAKAGE_MAX}, + {44, DFS_TX_LEAKAGE_MAX}, {48, DFS_TX_LEAKAGE_MAX}, + {52, DFS_TX_LEAKAGE_MAX}, {56, DFS_TX_LEAKAGE_MAX}, + {60, DFS_TX_LEAKAGE_MIN}, {64, DFS_TX_LEAKAGE_MIN}, + {100, DFS_TX_LEAKAGE_MIN}, {104, DFS_TX_LEAKAGE_MIN}, + {108, DFS_TX_LEAKAGE_MIN}, {112, DFS_TX_LEAKAGE_MIN}, + {116, DFS_TX_LEAKAGE_MIN}, {120, DFS_TX_LEAKAGE_MIN}, + {124, DFS_TX_LEAKAGE_MIN}, {128, DFS_TX_LEAKAGE_MIN}, + {132, DFS_TX_LEAKAGE_MIN}, {136, DFS_TX_LEAKAGE_MIN}, + {144, DFS_TX_LEAKAGE_MIN} + } }, +}; + +/* channel tx leakage table - ht40 */ +struct dfs_matrix_tx_leak_info ht40_chan[] = { + {52, + {{36, DFS_TX_LEAKAGE_AUTO_MIN}, {40, DFS_TX_LEAKAGE_AUTO_MIN}, + {44, 230}, {48, 230}, + {52, DFS_TX_LEAKAGE_MIN}, {56, DFS_TX_LEAKAGE_MIN}, + {60, DFS_TX_LEAKAGE_AUTO_MIN}, {64, DFS_TX_LEAKAGE_AUTO_MIN}, + {100, 625}, {104, 323}, + {108, 646}, {112, 646}, + {116, DFS_TX_LEAKAGE_MAX}, {120, DFS_TX_LEAKAGE_MAX}, + {124, DFS_TX_LEAKAGE_MAX}, {128, DFS_TX_LEAKAGE_MAX}, + {132, DFS_TX_LEAKAGE_MAX}, {136, DFS_TX_LEAKAGE_MAX}, + {140, DFS_TX_LEAKAGE_MAX}, + {144, DFS_TX_LEAKAGE_MIN} + } }, + + {56, + {{36, DFS_TX_LEAKAGE_AUTO_MIN}, {40, DFS_TX_LEAKAGE_AUTO_MIN}, + {44, DFS_TX_LEAKAGE_AUTO_MIN}, {48, DFS_TX_LEAKAGE_AUTO_MIN}, + {52, DFS_TX_LEAKAGE_MIN}, {56, DFS_TX_LEAKAGE_MIN}, + {60, DFS_TX_LEAKAGE_MIN}, {64, DFS_TX_LEAKAGE_MIN}, + {100, 611}, {104, 611}, + {108, 617}, {112, 617}, + {116, DFS_TX_LEAKAGE_MAX}, {120, DFS_TX_LEAKAGE_MAX}, + {124, DFS_TX_LEAKAGE_MAX}, {128, DFS_TX_LEAKAGE_MAX}, + {132, DFS_TX_LEAKAGE_MAX}, {136, DFS_TX_LEAKAGE_MAX}, + {140, DFS_TX_LEAKAGE_MAX}, + {144, DFS_TX_LEAKAGE_MIN} + } }, + + {60, + {{36, DFS_TX_LEAKAGE_AUTO_MIN}, {40, DFS_TX_LEAKAGE_AUTO_MIN}, + {44, DFS_TX_LEAKAGE_AUTO_MIN}, {48, DFS_TX_LEAKAGE_AUTO_MIN}, + {52, 190}, {56, 190}, + {60, DFS_TX_LEAKAGE_MIN}, {64, DFS_TX_LEAKAGE_MIN}, + {100, 608}, {104, 608}, + {108, 623}, {112, 623}, + {116, DFS_TX_LEAKAGE_MAX}, {120, DFS_TX_LEAKAGE_MAX}, + {124, DFS_TX_LEAKAGE_MAX}, {128, DFS_TX_LEAKAGE_MAX}, + {132, DFS_TX_LEAKAGE_MAX}, {136, DFS_TX_LEAKAGE_MAX}, + {140, DFS_TX_LEAKAGE_MAX}, + {144, DFS_TX_LEAKAGE_MIN} + } }, + + {64, + {{36, DFS_TX_LEAKAGE_AUTO_MIN}, {40, DFS_TX_LEAKAGE_AUTO_MIN}, + {44, DFS_TX_LEAKAGE_AUTO_MIN}, {48, DFS_TX_LEAKAGE_AUTO_MIN}, + {52, 295}, {56, 295}, + {60, DFS_TX_LEAKAGE_MIN}, {64, DFS_TX_LEAKAGE_MIN}, + {100, 594}, {104, 594}, + {108, 625}, {112, 625}, + {116, DFS_TX_LEAKAGE_MAX}, {120, DFS_TX_LEAKAGE_MAX}, + {124, DFS_TX_LEAKAGE_MAX}, {128, DFS_TX_LEAKAGE_MAX}, + {132, DFS_TX_LEAKAGE_MAX}, {136, DFS_TX_LEAKAGE_MAX}, + {140, DFS_TX_LEAKAGE_MAX}, + {144, DFS_TX_LEAKAGE_MIN} + } }, + + {100, + {{36, 618}, {40, 618}, + {44, 604}, {48, 604}, + {52, 596}, {56, 596}, + {60, 584}, {64, 584}, + {100, DFS_TX_LEAKAGE_MIN}, {104, DFS_TX_LEAKAGE_MIN}, + {108, 299}, {112, 299}, + {116, DFS_TX_LEAKAGE_AUTO_MIN}, {120, DFS_TX_LEAKAGE_AUTO_MIN}, + {124, DFS_TX_LEAKAGE_AUTO_MIN}, {128, DFS_TX_LEAKAGE_AUTO_MIN}, + {132, 538}, {136, 538}, + {140, 598}, + {144, DFS_TX_LEAKAGE_MIN} + } }, + + {104, + {{36, 636}, {40, 636}, + {44, 601}, {48, 601}, + {52, 616}, {56, 616}, + {60, 584}, {64, 584}, + {100, DFS_TX_LEAKAGE_MIN}, {104, DFS_TX_LEAKAGE_MIN}, + {108, DFS_TX_LEAKAGE_MIN}, {112, DFS_TX_LEAKAGE_MIN}, + {116, DFS_TX_LEAKAGE_AUTO_MIN}, {120, DFS_TX_LEAKAGE_AUTO_MIN}, + {124, DFS_TX_LEAKAGE_AUTO_MIN}, {128, DFS_TX_LEAKAGE_AUTO_MIN}, + {132, 553}, {136, 553}, + {140, 568}, + {144, DFS_TX_LEAKAGE_MIN} + } }, + + {108, + {{36, 600}, {40, 600}, + {44, 627}, {48, 627}, + {52, 611}, {56, 611}, + {60, 611}, {64, 611}, + {100, 214}, {104, 214}, + {108, DFS_TX_LEAKAGE_MIN}, {112, DFS_TX_LEAKAGE_MIN}, + {116, DFS_TX_LEAKAGE_AUTO_MIN}, {120, DFS_TX_LEAKAGE_AUTO_MIN}, + {124, DFS_TX_LEAKAGE_AUTO_MIN}, {128, DFS_TX_LEAKAGE_AUTO_MIN}, + {132, DFS_TX_LEAKAGE_AUTO_MIN}, {136, DFS_TX_LEAKAGE_AUTO_MIN}, + {140, 534}, + {144, DFS_TX_LEAKAGE_MIN} + } }, + + {112, + {{36, 645}, {40, 645}, + {44, 641}, {48, 641}, + {52, 618}, {56, 618}, + {60, 612}, {64, 612}, + {100, 293}, {104, 293}, + {108, DFS_TX_LEAKAGE_MIN}, {112, DFS_TX_LEAKAGE_MIN}, + {116, DFS_TX_LEAKAGE_MIN}, {120, DFS_TX_LEAKAGE_MIN}, + {124, DFS_TX_LEAKAGE_AUTO_MIN}, {128, DFS_TX_LEAKAGE_AUTO_MIN}, + {132, DFS_TX_LEAKAGE_AUTO_MIN}, {136, DFS_TX_LEAKAGE_AUTO_MIN}, + {140, 521}, + {144, DFS_TX_LEAKAGE_MIN} + } }, + + {116, + {{36, 661}, {40, 661}, + {44, 624}, {48, 624}, + {52, 634}, {56, 634}, + {60, 611}, {64, 611}, + {100, DFS_TX_LEAKAGE_AUTO_MIN}, {104, DFS_TX_LEAKAGE_AUTO_MIN}, + {108, 217}, {112, 217}, + {116, DFS_TX_LEAKAGE_MIN}, {120, DFS_TX_LEAKAGE_MIN}, + {124, DFS_TX_LEAKAGE_AUTO_MIN}, {128, DFS_TX_LEAKAGE_AUTO_MIN}, + {132, DFS_TX_LEAKAGE_AUTO_MIN}, {136, DFS_TX_LEAKAGE_AUTO_MIN}, + {140, DFS_TX_LEAKAGE_AUTO_MIN}, + {144, DFS_TX_LEAKAGE_MIN} + } }, + + {120, + {{36, 667}, {40, 667}, + {44, 645}, {48, 645}, + {52, 633}, {56, 633}, + {60, 619}, {64, 619}, + {100, DFS_TX_LEAKAGE_AUTO_MIN}, {104, DFS_TX_LEAKAGE_AUTO_MIN}, + {108, 291}, {112, 291}, + {116, DFS_TX_LEAKAGE_MIN}, {120, DFS_TX_LEAKAGE_MIN}, + {124, DFS_TX_LEAKAGE_MIN}, {128, DFS_TX_LEAKAGE_MIN}, + {132, DFS_TX_LEAKAGE_AUTO_MIN}, {136, DFS_TX_LEAKAGE_AUTO_MIN}, + {140, DFS_TX_LEAKAGE_AUTO_MIN}, + {144, DFS_TX_LEAKAGE_MIN} + } }, + + {124, + {{36, 676}, {40, 676}, + {44, 668}, {48, 668}, + {52, 595}, {56, 595}, + {60, 622}, {64, 622}, + {100, DFS_TX_LEAKAGE_AUTO_MIN}, {104, DFS_TX_LEAKAGE_AUTO_MIN}, + {108, DFS_TX_LEAKAGE_AUTO_MIN}, {112, DFS_TX_LEAKAGE_AUTO_MIN}, + {116, 225}, {120, 225}, + {124, DFS_TX_LEAKAGE_MIN}, {128, DFS_TX_LEAKAGE_MIN}, + {132, DFS_TX_LEAKAGE_AUTO_MIN}, {136, DFS_TX_LEAKAGE_AUTO_MIN}, + {140, DFS_TX_LEAKAGE_AUTO_MIN}, + {144, DFS_TX_LEAKAGE_MIN} + } }, + + {128, + {{36, 678}, {40, 678}, + {44, 664}, {48, 664}, + {52, 651}, {56, 651}, + {60, 643}, {64, 643}, + {100, DFS_TX_LEAKAGE_AUTO_MIN}, {104, DFS_TX_LEAKAGE_AUTO_MIN}, + {108, DFS_TX_LEAKAGE_AUTO_MIN}, {112, DFS_TX_LEAKAGE_AUTO_MIN}, + {116, 293}, {120, 293}, + {124, DFS_TX_LEAKAGE_MIN}, {128, DFS_TX_LEAKAGE_MIN}, + {132, DFS_TX_LEAKAGE_MIN}, {136, DFS_TX_LEAKAGE_MIN}, + {140, DFS_TX_LEAKAGE_AUTO_MIN}, + {144, DFS_TX_LEAKAGE_MIN} + } }, + + {132, + {{36, 689}, {40, 689}, + {44, 669}, {48, 669}, + {52, 662}, {56, 662}, + {60, 609}, {64, 609}, + {100, 538}, {104, 538}, + {108, DFS_TX_LEAKAGE_AUTO_MIN}, {112, DFS_TX_LEAKAGE_AUTO_MIN}, + {116, DFS_TX_LEAKAGE_AUTO_MIN}, {120, DFS_TX_LEAKAGE_AUTO_MIN}, + {124, 247}, {128, 247}, + {132, DFS_TX_LEAKAGE_MIN}, {136, DFS_TX_LEAKAGE_MIN}, + {140, DFS_TX_LEAKAGE_MIN}, + {144, DFS_TX_LEAKAGE_MIN} + } }, + + {136, + {{36, 703}, {40, 703}, + {44, 688}, {48, DFS_TX_LEAKAGE_MIN}, + {52, 671}, {56, 671}, + {60, 658}, {64, 658}, + {100, 504}, {104, 504}, + {108, DFS_TX_LEAKAGE_AUTO_MIN}, {112, DFS_TX_LEAKAGE_AUTO_MIN}, + {116, DFS_TX_LEAKAGE_AUTO_MIN}, {120, DFS_TX_LEAKAGE_AUTO_MIN}, + {124, 289}, {128, 289}, + {132, DFS_TX_LEAKAGE_MIN}, {136, DFS_TX_LEAKAGE_MIN}, + {140, DFS_TX_LEAKAGE_MIN}, + {144, DFS_TX_LEAKAGE_MIN} + } }, + + {140, + {{36, 695}, {40, 695}, + {44, 684}, {48, 684}, + {52, 664}, {56, 664}, + {60, 658}, {64, 658}, + {100, 601}, {104, 601}, + {108, 545}, {112, 545}, + {116, DFS_TX_LEAKAGE_AUTO_MIN}, {120, DFS_TX_LEAKAGE_AUTO_MIN}, + {124, DFS_TX_LEAKAGE_AUTO_MIN}, {128, DFS_TX_LEAKAGE_AUTO_MIN}, + {132, 262}, {136, 262}, + {140, DFS_TX_LEAKAGE_MIN}, + {144, DFS_TX_LEAKAGE_MIN} + } }, + + {144, + {{36, 695}, {40, 695}, + {44, 684}, {48, 684}, + {52, 664}, {56, 664}, + {60, 658}, {64, 658}, + {100, 601}, {104, 601}, + {108, 545}, {112, 545}, + {116, DFS_TX_LEAKAGE_AUTO_MIN}, {120, DFS_TX_LEAKAGE_AUTO_MIN}, + {124, DFS_TX_LEAKAGE_AUTO_MIN}, {128, DFS_TX_LEAKAGE_AUTO_MIN}, + {132, 262}, {136, 262}, + {140, DFS_TX_LEAKAGE_MIN}, + {144, DFS_TX_LEAKAGE_MIN} + } }, +}; + +/* channel tx leakage table - ht20 */ +struct dfs_matrix_tx_leak_info ht20_chan[] = { + {52, + {{36, DFS_TX_LEAKAGE_AUTO_MIN}, {40, 286}, + {44, 225}, {48, 121}, + {52, DFS_TX_LEAKAGE_MIN}, {56, DFS_TX_LEAKAGE_MIN}, + {60, 300}, {64, DFS_TX_LEAKAGE_AUTO_MIN}, + {100, 637}, {104, DFS_TX_LEAKAGE_MAX}, + {108, DFS_TX_LEAKAGE_MAX}, {112, DFS_TX_LEAKAGE_MAX}, + {116, DFS_TX_LEAKAGE_MAX}, {120, DFS_TX_LEAKAGE_MAX}, + {124, DFS_TX_LEAKAGE_MAX}, {128, DFS_TX_LEAKAGE_MAX}, + {132, DFS_TX_LEAKAGE_MAX}, {136, DFS_TX_LEAKAGE_MAX}, + {140, DFS_TX_LEAKAGE_MAX}, + {144, DFS_TX_LEAKAGE_MIN} + } }, + + {56, + {{36, 468}, {40, DFS_TX_LEAKAGE_AUTO_MIN}, + {44, DFS_TX_LEAKAGE_AUTO_MIN}, {48, 206}, + {52, DFS_TX_LEAKAGE_MIN}, {56, DFS_TX_LEAKAGE_MIN}, + {60, DFS_TX_LEAKAGE_MIN}, {64, DFS_TX_LEAKAGE_MIN}, + {100, DFS_TX_LEAKAGE_MAX}, {104, DFS_TX_LEAKAGE_MAX}, + {108, DFS_TX_LEAKAGE_MAX}, {112, DFS_TX_LEAKAGE_MAX}, + {116, DFS_TX_LEAKAGE_MAX}, {120, DFS_TX_LEAKAGE_MAX}, + {124, DFS_TX_LEAKAGE_MAX}, {128, DFS_TX_LEAKAGE_MAX}, + {132, DFS_TX_LEAKAGE_MAX}, {136, DFS_TX_LEAKAGE_MAX}, + {140, DFS_TX_LEAKAGE_MAX}, + {144, DFS_TX_LEAKAGE_MIN} + } }, + + {60, + {{36, 507}, {40, 440}, + {44, DFS_TX_LEAKAGE_AUTO_MIN}, {48, 313}, + {52, DFS_TX_LEAKAGE_MIN}, {56, DFS_TX_LEAKAGE_MIN}, + {60, DFS_TX_LEAKAGE_MIN}, {64, DFS_TX_LEAKAGE_MIN}, + {100, DFS_TX_LEAKAGE_MAX}, {104, DFS_TX_LEAKAGE_MAX}, + {108, DFS_TX_LEAKAGE_MAX}, {112, DFS_TX_LEAKAGE_MAX}, + {116, DFS_TX_LEAKAGE_MAX}, {120, DFS_TX_LEAKAGE_MAX}, + {124, DFS_TX_LEAKAGE_MAX}, {128, DFS_TX_LEAKAGE_MAX}, + {132, DFS_TX_LEAKAGE_MAX}, {136, DFS_TX_LEAKAGE_MAX}, + {140, DFS_TX_LEAKAGE_MAX}, + {144, DFS_TX_LEAKAGE_MIN} + } }, + + {64, + {{36, 516}, {40, 520}, + {44, 506}, {48, DFS_TX_LEAKAGE_AUTO_MIN}, + {52, 301}, {56, 258}, + {60, DFS_TX_LEAKAGE_MIN}, {64, DFS_TX_LEAKAGE_MIN}, + {100, 620}, {104, 617}, + {108, DFS_TX_LEAKAGE_MAX}, {112, DFS_TX_LEAKAGE_MAX}, + {116, DFS_TX_LEAKAGE_MAX}, {120, DFS_TX_LEAKAGE_MAX}, + {124, DFS_TX_LEAKAGE_MAX}, {128, DFS_TX_LEAKAGE_MAX}, + {132, DFS_TX_LEAKAGE_MAX}, {136, DFS_TX_LEAKAGE_MAX}, + {140, DFS_TX_LEAKAGE_MAX}, + {144, DFS_TX_LEAKAGE_MIN} + } }, + + {100, + {{36, 616}, {40, 601}, + {44, 604}, {48, 589}, + {52, 612}, {56, 592}, + {60, 590}, {64, 582}, + {100, DFS_TX_LEAKAGE_MIN}, {104, 131}, + {108, DFS_TX_LEAKAGE_AUTO_MIN}, {112, DFS_TX_LEAKAGE_AUTO_MIN}, + {116, DFS_TX_LEAKAGE_AUTO_MIN}, {120, 522}, + {124, 571}, {128, 589}, + {132, 593}, {136, 598}, + {140, 594}, + {144, DFS_TX_LEAKAGE_MIN}, + } }, + + {104, + {{36, 622}, {40, 624}, + {44, 618}, {48, 610}, + {52, DFS_TX_LEAKAGE_MAX}, {56, DFS_TX_LEAKAGE_MAX}, + {60, DFS_TX_LEAKAGE_MAX}, {64, DFS_TX_LEAKAGE_MAX}, + {100, DFS_TX_LEAKAGE_MIN}, {104, DFS_TX_LEAKAGE_MIN}, + {108, DFS_TX_LEAKAGE_MIN}, {112, 463}, + {116, 483}, {120, 503}, + {124, 523}, {128, 565}, + {132, 570}, {136, 588}, + {140, 585}, + {144, DFS_TX_LEAKAGE_MIN}, + } }, + + {108, + {{36, 620}, {40, 638}, + {44, 611}, {48, 614}, + {52, DFS_TX_LEAKAGE_MAX}, {56, DFS_TX_LEAKAGE_MAX}, + {60, DFS_TX_LEAKAGE_MAX}, {64, DFS_TX_LEAKAGE_MAX}, + {100, 477}, {104, DFS_TX_LEAKAGE_MIN}, + {108, DFS_TX_LEAKAGE_MIN}, {112, DFS_TX_LEAKAGE_MIN}, + {116, 477}, {120, 497}, + {124, 517}, {128, 537}, + {132, 557}, {136, 577}, + {140, 603}, + {144, DFS_TX_LEAKAGE_MIN}, + } }, + + {112, + {{36, 636}, {40, 623}, + {44, 638}, {48, 628}, + {52, DFS_TX_LEAKAGE_MAX}, {56, DFS_TX_LEAKAGE_MAX}, + {60, DFS_TX_LEAKAGE_MAX}, {64, 606}, + {100, 501}, {104, 481}, + {108, DFS_TX_LEAKAGE_MIN}, {112, DFS_TX_LEAKAGE_MIN}, + {116, DFS_TX_LEAKAGE_MIN}, {120, 481}, + {124, 501}, {128, 421}, + {132, 541}, {136, 561}, + {140, 583}, + {144, DFS_TX_LEAKAGE_MIN}, + } }, + + {116, + {{36, 646}, {40, 648}, + {44, 633}, {48, 634}, + {52, DFS_TX_LEAKAGE_MAX}, {56, DFS_TX_LEAKAGE_MAX}, + {60, 615}, {64, 594}, + {100, 575}, {104, 554}, + {108, 534}, {112, DFS_TX_LEAKAGE_MIN}, + {116, DFS_TX_LEAKAGE_MIN}, {120, DFS_TX_LEAKAGE_MIN}, + {124, DFS_TX_LEAKAGE_MIN}, {128, DFS_TX_LEAKAGE_MIN}, + {132, 534}, {136, 554}, + {140, 574}, + {144, DFS_TX_LEAKAGE_MIN}, + } }, + + {120, + {{36, 643}, {40, 649}, + {44, 654}, {48, 629}, + {52, DFS_TX_LEAKAGE_MAX}, {56, 621}, + {60, DFS_TX_LEAKAGE_MAX}, {64, DFS_TX_LEAKAGE_MAX}, + {100, 565}, {104, 545}, + {108, 525}, {112, 505}, + {116, DFS_TX_LEAKAGE_MIN}, {120, DFS_TX_LEAKAGE_MIN}, + {124, DFS_TX_LEAKAGE_MIN}, {128, 505}, + {132, 525}, {136, 545}, + {140, 565}, + {144, DFS_TX_LEAKAGE_MIN}, + } }, + + {124, + {{36, 638}, {40, 657}, + {44, 663}, {48, 649}, + {52, DFS_TX_LEAKAGE_MAX}, {56, DFS_TX_LEAKAGE_MAX}, + {60, DFS_TX_LEAKAGE_MAX}, {64, DFS_TX_LEAKAGE_MAX}, + {100, 581}, {104, 561}, + {108, 541}, {112, 521}, + {116, 499}, {120, DFS_TX_LEAKAGE_MIN}, + {124, DFS_TX_LEAKAGE_MIN}, {128, DFS_TX_LEAKAGE_MIN}, + {132, 499}, {136, 519}, + {140, 539}, + {144, DFS_TX_LEAKAGE_MIN} + } }, + + {128, + {{36, 651}, {40, 651}, + {44, 674}, {48, 640}, + {52, DFS_TX_LEAKAGE_MAX}, {56, DFS_TX_LEAKAGE_MAX}, + {60, DFS_TX_LEAKAGE_MAX}, {64, DFS_TX_LEAKAGE_MAX}, + {100, 603}, {104, 560}, + {108, 540}, {112, 520}, + {116, 499}, {120, 479}, + {124, DFS_TX_LEAKAGE_MIN}, {128, DFS_TX_LEAKAGE_MIN}, + {132, DFS_TX_LEAKAGE_MIN}, {136, 479}, + {140, 499}, + {144, DFS_TX_LEAKAGE_MIN} + } }, + + {132, + {{36, 643}, {40, 668}, + {44, 651}, {48, 657}, + {52, DFS_TX_LEAKAGE_MAX}, {56, DFS_TX_LEAKAGE_MAX}, + {60, DFS_TX_LEAKAGE_MAX}, {64, DFS_TX_LEAKAGE_MAX}, + {100, DFS_TX_LEAKAGE_MAX}, {104, 602}, + {108, 578}, {112, 570}, + {116, 550}, {120, 530}, + {124, 510}, {128, DFS_TX_LEAKAGE_MIN}, + {132, DFS_TX_LEAKAGE_MIN}, {136, DFS_TX_LEAKAGE_MIN}, + {140, 490}, + {144, DFS_TX_LEAKAGE_MIN} + } }, + + {136, + {{36, 654}, {40, 667}, + {44, 666}, {48, 642}, + {52, DFS_TX_LEAKAGE_MAX}, {56, DFS_TX_LEAKAGE_MAX}, + {60, DFS_TX_LEAKAGE_MAX}, {64, DFS_TX_LEAKAGE_MAX}, + {100, DFS_TX_LEAKAGE_MAX}, {104, DFS_TX_LEAKAGE_MAX}, + {108, DFS_TX_LEAKAGE_MAX}, {112, 596}, + {116, 555}, {120, 535}, + {124, 515}, {128, 495}, + {132, DFS_TX_LEAKAGE_MIN}, {136, DFS_TX_LEAKAGE_MIN}, + {140, DFS_TX_LEAKAGE_MIN}, + {144, DFS_TX_LEAKAGE_MIN} + } }, + + {140, + {{36, 679}, {40, 673}, + {44, 667}, {48, 656}, + {52, 634}, {56, 663}, + {60, 662}, {64, 660}, + {100, DFS_TX_LEAKAGE_MAX}, {104, DFS_TX_LEAKAGE_MAX}, + {108, DFS_TX_LEAKAGE_MAX}, {112, 590}, + {116, 573}, {120, 553}, + {124, 533}, {128, 513}, + {132, 490}, {136, DFS_TX_LEAKAGE_MIN}, + {140, DFS_TX_LEAKAGE_MIN}, + {144, DFS_TX_LEAKAGE_MIN} + } }, + + {144, + {{36, 679}, {40, 673}, + {44, 667}, {48, 656}, + {52, 634}, {56, 663}, + {60, 662}, {64, 660}, + {100, DFS_TX_LEAKAGE_MAX}, {104, DFS_TX_LEAKAGE_MAX}, + {108, DFS_TX_LEAKAGE_MAX}, {112, 590}, + {116, 573}, {120, 553}, + {124, 533}, {128, 513}, + {132, 490}, {136, DFS_TX_LEAKAGE_MIN}, + {140, DFS_TX_LEAKAGE_MIN}, + {144, DFS_TX_LEAKAGE_MIN} + } }, +}; + +/* + * dfs_find_target_channel_in_channel_matrix() - finds the leakage matrix + * @ch_width: target channel width + * @NOL_channel: the NOL channel whose leakage matrix is required + * @pTarget_chnl_mtrx: pointer to target channel matrix returned. + * + * This function gives the leakage matrix for given NOL channel and ch_width + * + * Return: TRUE or FALSE + */ +static bool +dfs_find_target_channel_in_channel_matrix(enum phy_ch_width ch_width, + uint8_t NOL_channel, + struct dfs_tx_leak_info **pTarget_chnl_mtrx) +{ + struct dfs_tx_leak_info *target_chan_matrix = NULL; + struct dfs_matrix_tx_leak_info *pchan_matrix = NULL; + uint32_t nchan_matrix; + int i = 0; + + switch (ch_width) { + case CH_WIDTH_20MHZ: + /* HT20 */ + pchan_matrix = ht20_chan; + nchan_matrix = QDF_ARRAY_SIZE(ht20_chan); + break; + case CH_WIDTH_40MHZ: + /* HT40 */ + pchan_matrix = ht40_chan; + nchan_matrix = QDF_ARRAY_SIZE(ht40_chan); + break; + case CH_WIDTH_80MHZ: + /* HT80 */ + pchan_matrix = ht80_chan; + nchan_matrix = QDF_ARRAY_SIZE(ht80_chan); + break; + default: + /* handle exception and fall back to HT20 table */ + pchan_matrix = ht20_chan; + nchan_matrix = QDF_ARRAY_SIZE(ht20_chan); + break; + } + + for (i = 0; i < nchan_matrix; i++) { + /* find the SAP channel to map the leakage matrix */ + if (NOL_channel == pchan_matrix[i].channel) { + target_chan_matrix = pchan_matrix[i].chan_matrix; + break; + } + } + + if (NULL == target_chan_matrix) { + return false; + } else { + *pTarget_chnl_mtrx = target_chan_matrix; + return true; + } +} + +QDF_STATUS +dfs_mark_leaking_ch(struct wlan_dfs *dfs, + enum phy_ch_width ch_width, + uint8_t temp_ch_lst_sz, + uint8_t *temp_ch_lst) +{ + struct dfs_tx_leak_info *target_chan_matrix = NULL; + uint32_t num_channel = (CHAN_ENUM_144 - CHAN_ENUM_36) + 1; + uint32_t j = 0; + uint32_t k = 0; + uint8_t dfs_nol_channel; + struct dfs_nolelem *nol; + + nol = dfs->dfs_nol; + while (nol) { + dfs_nol_channel = wlan_freq_to_chan(nol->nol_freq); + if (false == dfs_find_target_channel_in_channel_matrix( + ch_width, dfs_nol_channel, + &target_chan_matrix)) { + /* + * should never happen, we should always find a table + * here, if we don't, need a fix here! + */ + dfs_err(dfs, WLAN_DEBUG_DFS_RANDOM_CHAN, + "Couldn't find target channel matrix!"); + QDF_ASSERT(0); + return QDF_STATUS_E_FAILURE; + } + /* + * following is based on assumption that both temp_ch_lst + * and target channel matrix are in increasing order of + * ch_id + */ + for (j = 0, k = 0; j < temp_ch_lst_sz && k < num_channel;) { + if (temp_ch_lst[j] == 0) { + j++; + continue; + } + if (target_chan_matrix[k].leak_chan != temp_ch_lst[j]) { + k++; + continue; + } + /* + * check leakage from candidate channel + * to NOL channel + */ + if (target_chan_matrix[k].leak_lvl <= + dfs->tx_leakage_threshold) { + /* + * candidate channel will have + * bad leakage in NOL channel, + * remove from temp list + */ + dfs_debug(dfs, WLAN_DEBUG_DFS_RANDOM_CHAN, + "dfs: channel: %d will have bad leakage due to channel: %d\n", + dfs_nol_channel, temp_ch_lst[j]); + temp_ch_lst[j] = 0; + } + j++; + k++; + } + nol = nol->nol_next; + } /* end of loop that selects each NOL */ + + return QDF_STATUS_SUCCESS; +} +#else +QDF_STATUS +dfs_mark_leaking_ch(struct wlan_dfs *dfs, + enum phy_ch_width ch_width, + uint8_t temp_ch_lst_sz, + uint8_t *temp_ch_lst) +{ + return QDF_STATUS_SUCCESS; +} +#endif + +/** + * dfs_populate_80mhz_available_channels()- Populate channels for 80MHz using + * bitmap + * @dfs: Pointer to DFS structure. + * @bitmap: bitmap + * @avail_chnl: prepared channel list + * + * Prepare 80MHz channels from the bitmap. + * + * Return: channel count + */ +static uint8_t dfs_populate_80mhz_available_channels( + struct wlan_dfs *dfs, + struct chan_bonding_bitmap *bitmap, + uint8_t *avail_chnl) +{ + uint8_t i = 0; + uint8_t chnl_count = 0; + uint8_t start_chan = 0; + + for (i = 0; i < DFS_MAX_80MHZ_BANDS; i++) { + start_chan = bitmap->chan_bonding_set[i].start_chan; + if (bitmap->chan_bonding_set[i].chan_map == + DFS_80MHZ_MASK) { + avail_chnl[chnl_count++] = start_chan + + (DFS_NEXT_5GHZ_CHANNEL * 0); + avail_chnl[chnl_count++] = start_chan + + (DFS_NEXT_5GHZ_CHANNEL * 1); + avail_chnl[chnl_count++] = start_chan + + (DFS_NEXT_5GHZ_CHANNEL * 2); + avail_chnl[chnl_count++] = start_chan + + (DFS_NEXT_5GHZ_CHANNEL * 3); + } + } + + dfs_info(dfs, WLAN_DEBUG_DFS_RANDOM_CHAN, + "channel count %d", chnl_count); + + return chnl_count; +} + +/** + * dfs_populate_40mhz_available_channels()- Populate channels for 40MHz using + * bitmap + * @dfs: Pointer to DFS structure. + * @bitmap: bitmap + * @avail_chnl: prepared channel list + * + * Prepare 40MHz channels from the bitmap. + * + * Return: channel count + */ +static uint8_t dfs_populate_40mhz_available_channels( + struct wlan_dfs *dfs, + struct chan_bonding_bitmap *bitmap, + uint8_t *avail_chnl) +{ + uint8_t i = 0; + uint8_t chnl_count = 0; + uint8_t start_chan = 0; + + for (i = 0; i < DFS_MAX_80MHZ_BANDS; i++) { + start_chan = bitmap->chan_bonding_set[i].start_chan; + if ((bitmap->chan_bonding_set[i].chan_map & + DFS_40MHZ_MASK_L) == DFS_40MHZ_MASK_L) { + avail_chnl[chnl_count++] = start_chan + + (DFS_NEXT_5GHZ_CHANNEL * 0); + avail_chnl[chnl_count++] = start_chan + + (DFS_NEXT_5GHZ_CHANNEL * 1); + } + if ((bitmap->chan_bonding_set[i].chan_map & + DFS_40MHZ_MASK_H) == DFS_40MHZ_MASK_H) { + avail_chnl[chnl_count++] = start_chan + + (DFS_NEXT_5GHZ_CHANNEL * 2); + avail_chnl[chnl_count++] = start_chan + + (DFS_NEXT_5GHZ_CHANNEL * 3); + } + } + + dfs_info(dfs, WLAN_DEBUG_DFS_RANDOM_CHAN, + "channel count %d", chnl_count); + + return chnl_count; +} + +/** + * dfs_populate_available_channels()- Populate channels based on width and + * bitmap + * @dfs: Pointer to DFS structure. + * @bitmap: bitmap + * @ch_width: channel width + * @avail_chnl: prepared channel list + * + * Prepare channel list based on width and channel bitmap. + * + * Return: channel count + */ +static uint8_t dfs_populate_available_channels( + struct wlan_dfs *dfs, + struct chan_bonding_bitmap *bitmap, + uint8_t ch_width, + uint8_t *avail_chnl) +{ + switch (ch_width) { + case DFS_CH_WIDTH_160MHZ: + case DFS_CH_WIDTH_80P80MHZ: + case DFS_CH_WIDTH_80MHZ: + return dfs_populate_80mhz_available_channels( + dfs, bitmap, avail_chnl); + case DFS_CH_WIDTH_40MHZ: + return dfs_populate_40mhz_available_channels( + dfs, bitmap, avail_chnl); + default: + dfs_err(dfs, WLAN_DEBUG_DFS_RANDOM_CHAN, + "Invalid ch_width %d", ch_width); + break; + } + + return 0; +} + +/** + * dfs_get_rand_from_lst()- Get random channel from a given channel list + * @dfs: Pointer to DFS structure. + * @ch_lst: channel list + * @num_ch: number of channels + * + * Get random channel from given channel list. + * + * Return: channel number + */ +static uint8_t dfs_get_rand_from_lst( + struct wlan_dfs *dfs, + uint8_t *ch_lst, + uint8_t num_ch) +{ + uint8_t i; + uint32_t rand_byte = 0; + + if (!num_ch || !ch_lst) { + dfs_err(NULL, WLAN_DEBUG_DFS_ALWAYS, + "invalid param ch_lst %pK, num_ch = %d", + ch_lst, num_ch); + return 0; + } + + get_random_bytes((uint8_t *)&rand_byte, 1); + i = (rand_byte + qdf_mc_timer_get_system_ticks()) % num_ch; + + dfs_info(dfs, WLAN_DEBUG_DFS_RANDOM_CHAN, + "random channel %d", ch_lst[i]); + + return ch_lst[i]; +} + +/** + * dfs_random_channel_sel_set_bitmap()- Set channel bit in bitmap based + * on given channel number + * @dfs: Pointer to DFS structure. + * @bitmap: bitmap + * @channel: channel number + * + * Set channel bit in bitmap based on given channel number. + * + * Return: None + */ +static void dfs_random_channel_sel_set_bitmap( + struct wlan_dfs *dfs, + struct chan_bonding_bitmap *bitmap, + uint8_t channel) +{ + int i = 0; + int start_chan = 0; + + for (i = 0; i < DFS_MAX_80MHZ_BANDS; i++) { + start_chan = bitmap->chan_bonding_set[i].start_chan; + if (channel >= start_chan && channel <= start_chan + 12) { + bitmap->chan_bonding_set[i].chan_map |= + (1 << ((channel - start_chan) / + DFS_80_NUM_SUB_CHANNEL)); + return; + } + } + + dfs_debug(dfs, WLAN_DEBUG_DFS_RANDOM_CHAN, + "Channel=%d is not in the bitmap", channel); +} + +/** + * dfs_find_ch_with_fallback()- find random channel + * @dfs: Pointer to DFS structure. + * @ch_wd: channel width + * @center_freq_seg1: center frequency of secondary segment. + * @ch_lst: list of available channels. + * @num_ch: number of channels in the list. + * + * Find random channel based on given channel width and channel list, + * fallback to lower width if requested channel width not available. + * + * Return: channel number + */ +static uint8_t dfs_find_ch_with_fallback( + struct wlan_dfs *dfs, + uint8_t *ch_wd, + uint8_t *center_freq_seg1, + uint8_t *ch_lst, + uint32_t num_ch) +{ + bool flag = false; + uint32_t rand_byte = 0; + struct chan_bonding_bitmap ch_map = { { {0} } }; + uint8_t count = 0, i, index = 0, final_cnt = 0, target_channel = 0; + uint8_t primary_seg_start_ch = 0, sec_seg_ch = 0, new_160_start_ch = 0; + uint8_t final_lst[DFS_MAX_NUM_CHAN] = {0}; + + /* initialize ch_map for all 80 MHz bands: we have 6 80MHz bands */ + ch_map.chan_bonding_set[0].start_chan = 36; + ch_map.chan_bonding_set[1].start_chan = 52; + ch_map.chan_bonding_set[2].start_chan = 100; + ch_map.chan_bonding_set[3].start_chan = 116; + ch_map.chan_bonding_set[4].start_chan = 132; + ch_map.chan_bonding_set[5].start_chan = 149; + + for (i = 0; i < num_ch; i++) { + dfs_debug(dfs, WLAN_DEBUG_DFS_RANDOM_CHAN, + "channel = %d added to bitmap", ch_lst[i]); + dfs_random_channel_sel_set_bitmap(dfs, &ch_map, ch_lst[i]); + } + + /* populate available channel list from bitmap */ + final_cnt = dfs_populate_available_channels(dfs, &ch_map, + *ch_wd, final_lst); + + /* If no valid ch bonding found, fallback */ + if (final_cnt == 0) { + if ((*ch_wd == DFS_CH_WIDTH_160MHZ) || + (*ch_wd == DFS_CH_WIDTH_80P80MHZ) || + (*ch_wd == DFS_CH_WIDTH_80MHZ)) { + dfs_info(dfs, WLAN_DEBUG_DFS_RANDOM_CHAN, + "from [%d] to 40Mhz", *ch_wd); + *ch_wd = DFS_CH_WIDTH_40MHZ; + } else if (*ch_wd == DFS_CH_WIDTH_40MHZ) { + dfs_info(dfs, WLAN_DEBUG_DFS_RANDOM_CHAN, + "from 40Mhz to 20MHz"); + *ch_wd = DFS_CH_WIDTH_20MHZ; + } + return 0; + } + + /* ch count should be > 8 to switch new channel in 160Mhz band */ + if (((*ch_wd == DFS_CH_WIDTH_160MHZ) || + (*ch_wd == DFS_CH_WIDTH_80P80MHZ)) && + (final_cnt < DFS_MAX_20M_SUB_CH)) { + dfs_info(dfs, WLAN_DEBUG_DFS_RANDOM_CHAN, + "from [%d] to 80Mhz", *ch_wd); + *ch_wd = DFS_CH_WIDTH_80MHZ; + return 0; + } + + if (*ch_wd == DFS_CH_WIDTH_160MHZ) { + /* + * Only 2 blocks for 160Mhz bandwidth i.e 36-64 & 100-128 + * and all the channels in these blocks are continuous + * and separated by 4Mhz. + */ + for (i = 1; ((i < final_cnt)); i++) { + if ((final_lst[i] - final_lst[i-1]) == + DFS_NEXT_5GHZ_CHANNEL) + count++; + else + count = 0; + if (count == DFS_MAX_20M_SUB_CH - 1) { + flag = true; + new_160_start_ch = final_lst[i - count]; + break; + } + } + } else if (*ch_wd == DFS_CH_WIDTH_80P80MHZ) { + flag = true; + } + + if ((flag == false) && (*ch_wd > DFS_CH_WIDTH_80MHZ)) { + dfs_info(dfs, WLAN_DEBUG_DFS_RANDOM_CHAN, + "from [%d] to 80Mhz", *ch_wd); + *ch_wd = DFS_CH_WIDTH_80MHZ; + return 0; + } + + if (*ch_wd == DFS_CH_WIDTH_160MHZ) { + get_random_bytes((uint8_t *)&rand_byte, 1); + rand_byte = (rand_byte + qdf_mc_timer_get_system_ticks()) + % DFS_MAX_20M_SUB_CH; + target_channel = new_160_start_ch + (rand_byte * + DFS_80_NUM_SUB_CHANNEL); + } else if (*ch_wd == DFS_CH_WIDTH_80P80MHZ) { + get_random_bytes((uint8_t *)&rand_byte, 1); + index = (rand_byte + qdf_mc_timer_get_system_ticks()) % + final_cnt; + target_channel = final_lst[index]; + index -= (index % DFS_80_NUM_SUB_CHANNEL); + primary_seg_start_ch = final_lst[index]; + + /* reset channels associate with primary 80Mhz */ + for (i = 0; i < DFS_80_NUM_SUB_CHANNEL; i++) + final_lst[i + index] = 0; + /* select and calculate center freq for secondary segment */ + for (i = 0; i < final_cnt / DFS_80_NUM_SUB_CHANNEL; i++) { + if (final_lst[i * DFS_80_NUM_SUB_CHANNEL] && + (abs(primary_seg_start_ch - + final_lst[i * DFS_80_NUM_SUB_CHANNEL]) > + (DFS_MAX_20M_SUB_CH * 2))) { + sec_seg_ch = + final_lst[i * DFS_80_NUM_SUB_CHANNEL] + + DFS_80MHZ_START_CENTER_CH_DIFF; + break; + } + } + + if (!sec_seg_ch && (final_cnt == DFS_MAX_20M_SUB_CH)) + *ch_wd = DFS_CH_WIDTH_160MHZ; + else if (!sec_seg_ch) + *ch_wd = DFS_CH_WIDTH_80MHZ; + + *center_freq_seg1 = sec_seg_ch; + dfs_info(dfs, WLAN_DEBUG_DFS_RANDOM_CHAN, + "Center frequency seg1 = %d", sec_seg_ch); + } else { + target_channel = dfs_get_rand_from_lst(dfs, + final_lst, final_cnt); + } + dfs_info(dfs, WLAN_DEBUG_DFS_RANDOM_CHAN, + "target channel = %d", target_channel); + + return target_channel; +} + +/** + * dfs_remove_cur_ch_from_list()- remove current operating channels + * @ch_list: list of avilable channel list + * @ch_cnt: number of channels. + * @ch_wd: channel width. + * @cur_chan: current channel. + * + * Remove current channels from list of available channels. + * + * Return: channel number + */ +static void dfs_remove_cur_ch_from_list( + struct dfs_channel *ch_list, + uint32_t *ch_cnt, + uint8_t *ch_wd, + struct dfs_channel *cur_chan) +{ + /* TODO */ + return; +} + +/** + * dfs_freq_is_in_nol()- check if given channel in nol list + * @dfs: dfs handler + * @freq: channel frequency. + * + * check if given channel in nol list. + * + * Return: true if channel in nol, false else + */ +static bool dfs_freq_is_in_nol(struct wlan_dfs *dfs, uint32_t freq) +{ + struct dfs_nolelem *nol; + + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_RANDOM_CHAN, "null dfs"); + return false; + } + + nol = dfs->dfs_nol; + while (nol) { + if (freq == nol->nol_freq) { + dfs_debug(dfs, WLAN_DEBUG_DFS_RANDOM_CHAN, + "%d is in nol", freq); + return true; + } + nol = nol->nol_next; + } + + return false; +} + +/** + * dfs_apply_rules()- prepare channel list based on flags + * @dfs: dfs handler + * @flags: channel flags + * @random_chan_list: output channel list + * @random_chan_cnt: output channel count + * @ch_list: input channel list + * @ch_cnt: input channel count + * @dfs_region: dfs region + * @acs_info: acs channel range information + * + * prepare channel list based on flags + * + * Return: None + */ +static void dfs_apply_rules(struct wlan_dfs *dfs, + uint32_t flags, + uint8_t *random_chan_list, + uint32_t *random_chan_cnt, + struct dfs_channel *ch_list, + uint32_t ch_cnt, + uint8_t dfs_region, + struct dfs_acs_info *acs_info) +{ + struct dfs_channel *chan; + uint16_t flag_no_weather = 0; + uint16_t flag_no_lower_5g = 0; + uint16_t flag_no_upper_5g = 0; + uint16_t flag_no_dfs_chan = 0; + uint16_t flag_no_2g_chan = 0; + uint16_t flag_no_5g_chan = 0; + int i; + bool found = false; + uint8_t j; + + dfs_debug(dfs, WLAN_DEBUG_DFS_RANDOM_CHAN, "flags %d", flags); + flag_no_weather = (dfs_region == DFS_ETSI_REGION_VAL) ? + flags & DFS_RANDOM_CH_FLAG_NO_WEATHER_CH : 0; + + flag_no_lower_5g = (dfs_region == DFS_MKK_REGION_VAL) ? + flags & DFS_RANDOM_CH_FLAG_NO_LOWER_5G_CH : 0; + + flag_no_upper_5g = (dfs_region == DFS_MKK_REGION_VAL) ? + flags & DFS_RANDOM_CH_FLAG_NO_UPEER_5G_CH : 0; + + flag_no_dfs_chan = flags & DFS_RANDOM_CH_FLAG_NO_DFS_CH; + flag_no_2g_chan = flags & DFS_RANDOM_CH_FLAG_NO_2GHZ_CH; + flag_no_5g_chan = flags & DFS_RANDOM_CH_FLAG_NO_5GHZ_CH; + + for (i = 0; i < ch_cnt; i++) { + chan = &ch_list[i]; + + if ((chan->dfs_ch_ieee == 0) || + (chan->dfs_ch_ieee > MAX_CHANNEL_NUM)) { + dfs_debug(dfs, WLAN_DEBUG_DFS_RANDOM_CHAN, + "invalid channel %d", + chan->dfs_ch_ieee); + continue; + } + + if (flags & DFS_RANDOM_CH_FLAG_NO_CURR_OPE_CH) { + /* TODO : Skip all HT20 channels in the given mode */ + if (chan->dfs_ch_ieee == + dfs->dfs_curchan->dfs_ch_ieee) { + dfs_debug(dfs, WLAN_DEBUG_DFS_RANDOM_CHAN, + "skip %d current operating channel\n", + chan->dfs_ch_ieee); + continue; + } + } + + if (acs_info && acs_info->acs_mode) { + for (j = 0; j < acs_info->num_of_channel; j++) { + if (acs_info->channel_list[j] == + chan->dfs_ch_ieee) { + found = true; + break; + } + } + + if (!found) { + dfs_debug(dfs, WLAN_DEBUG_DFS_RANDOM_CHAN, + "skip ch %d not in acs range", + chan->dfs_ch_ieee); + continue; + } + found = false; + } + + if (flag_no_2g_chan && + chan->dfs_ch_ieee <= DFS_MAX_24GHZ_CHANNEL) { + dfs_debug(dfs, WLAN_DEBUG_DFS_RANDOM_CHAN, + "skip 2.4 GHz channel=%d", + chan->dfs_ch_ieee); + continue; + } + + if (flag_no_5g_chan && + chan->dfs_ch_ieee > DFS_MAX_24GHZ_CHANNEL) { + dfs_debug(dfs, WLAN_DEBUG_DFS_RANDOM_CHAN, + "skip 5 GHz channel=%d", + chan->dfs_ch_ieee); + continue; + } + + if (flag_no_weather) { + if (DFS_IS_CHANNEL_WEATHER_RADAR(chan->dfs_ch_freq)) { + dfs_debug(dfs, WLAN_DEBUG_DFS_RANDOM_CHAN, + "skip weather channel=%d", + chan->dfs_ch_ieee); + continue; + } + } + + if (flag_no_lower_5g && + DFS_IS_CHAN_JAPAN_INDOOR(chan->dfs_ch_ieee)) { + dfs_debug(dfs, WLAN_DEBUG_DFS_RANDOM_CHAN, + "skip indoor channel=%d", + chan->dfs_ch_ieee); + continue; + } + + if (flag_no_upper_5g && + DFS_IS_CHAN_JAPAN_OUTDOOR(chan->dfs_ch_ieee)) { + dfs_debug(dfs, WLAN_DEBUG_DFS_RANDOM_CHAN, + "skip outdoor channel=%d", + chan->dfs_ch_ieee); + continue; + } + + if (flag_no_dfs_chan && + (chan->dfs_ch_flagext & WLAN_CHAN_DFS)) { + dfs_debug(dfs, WLAN_DEBUG_DFS_RANDOM_CHAN, + "skip dfs channel=%d", + chan->dfs_ch_ieee); + continue; + } + + if (dfs_freq_is_in_nol(dfs, chan->dfs_ch_freq)) { + dfs_debug(dfs, WLAN_DEBUG_DFS_RANDOM_CHAN, + "skip nol channel=%d", + chan->dfs_ch_ieee); + continue; + } + + random_chan_list[*random_chan_cnt] = chan->dfs_ch_ieee; + *random_chan_cnt += 1; + } +} + +uint8_t dfs_prepare_random_channel(struct wlan_dfs *dfs, + struct dfs_channel *ch_list, + uint32_t ch_cnt, + uint32_t flags, + uint8_t *ch_wd, + struct dfs_channel *cur_chan, + uint8_t dfs_region, + struct dfs_acs_info *acs_info) +{ + int i = 0; + uint8_t final_cnt = 0; + uint8_t target_ch = 0; + uint8_t *random_chan_list = NULL; + uint32_t random_chan_cnt = 0; + uint16_t flag_no_weather = 0; + uint8_t *leakage_adjusted_lst; + uint8_t final_lst[NUM_CHANNELS] = {0}; + + if (!ch_list || !ch_cnt) { + dfs_info(dfs, WLAN_DEBUG_DFS_RANDOM_CHAN, + "Invalid params %pK, ch_cnt=%d", + ch_list, ch_cnt); + return 0; + } + + if (*ch_wd < DFS_CH_WIDTH_20MHZ || *ch_wd > DFS_CH_WIDTH_80P80MHZ) { + dfs_info(dfs, WLAN_DEBUG_DFS_RANDOM_CHAN, + "Invalid ch_wd %d", *ch_wd); + return 0; + } + + random_chan_list = qdf_mem_malloc(ch_cnt * sizeof(*random_chan_list)); + if (!random_chan_list) { + dfs_alert(dfs, WLAN_DEBUG_DFS_RANDOM_CHAN, + "Memory allocation failed"); + return 0; + } + + if (flags & DFS_RANDOM_CH_FLAG_NO_CURR_OPE_CH) + dfs_remove_cur_ch_from_list(ch_list, &ch_cnt, ch_wd, cur_chan); + + dfs_apply_rules(dfs, flags, random_chan_list, &random_chan_cnt, + ch_list, ch_cnt, dfs_region, acs_info); + + flag_no_weather = (dfs_region == DFS_ETSI_REGION_VAL) ? + flags & DFS_RANDOM_CH_FLAG_NO_WEATHER_CH : 0; + + /* list adjusted after leakage has been marked */ + leakage_adjusted_lst = qdf_mem_malloc(random_chan_cnt); + if (!leakage_adjusted_lst) { + dfs_alert(dfs, WLAN_DEBUG_DFS_RANDOM_CHAN, + "Memory allocation failed"); + qdf_mem_free(random_chan_list); + return 0; + } + + do { + qdf_mem_copy(leakage_adjusted_lst, random_chan_list, + random_chan_cnt); + if (QDF_IS_STATUS_ERROR(dfs_mark_leaking_ch(dfs, *ch_wd, + random_chan_cnt, + leakage_adjusted_lst))) { + qdf_mem_free(random_chan_list); + qdf_mem_free(leakage_adjusted_lst); + return 0; + } + + if (*ch_wd == DFS_CH_WIDTH_20MHZ) { + /* + * PASS: 3 - from leakage_adjusted_lst, prepare valid + * ch list and use random number from that + */ + for (i = 0; i < random_chan_cnt; i++) { + if (leakage_adjusted_lst[i] == 0) + continue; + dfs_debug(dfs, WLAN_DEBUG_DFS_RANDOM_CHAN, + "dfs: Channel=%d added to available list", + leakage_adjusted_lst[i]); + final_lst[final_cnt] = leakage_adjusted_lst[i]; + final_cnt++; + } + target_ch = dfs_get_rand_from_lst( + dfs, final_lst, final_cnt); + break; + } + + target_ch = dfs_find_ch_with_fallback(dfs, ch_wd, + &cur_chan->dfs_ch_vhtop_ch_freq_seg2, + leakage_adjusted_lst, + random_chan_cnt); + + /* + * When flag_no_weather is set, avoid usage of Adjacent + * weather radar channel in HT40 mode as extension channel + * will be on 5600. + */ + if (flag_no_weather && + (target_ch == + DFS_ADJACENT_WEATHER_RADAR_CHANNEL_NUM) && + (*ch_wd == DFS_CH_WIDTH_40MHZ)) { + dfs_debug(dfs, WLAN_DEBUG_DFS_RANDOM_CHAN, + "skip weather adjacent ch=%d\n", + target_ch); + continue; + } + + if (target_ch) + break; + } while (true); + + qdf_mem_free(random_chan_list); + qdf_mem_free(leakage_adjusted_lst); + dfs_info(dfs, WLAN_DEBUG_DFS_RANDOM_CHAN, "target_ch = %d", target_ch); + + return target_ch; +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/misc/dfs_zero_cac.c b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/misc/dfs_zero_cac.c new file mode 100644 index 0000000000000000000000000000000000000000..f2efc15ca938f528e3b7ca7a20a43064c4d428f9 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/dfs/core/src/misc/dfs_zero_cac.c @@ -0,0 +1,945 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * Copyright (c) 2007-2008 Sam Leffler, Errno Consulting + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/** + * DOC: This file has ZERO CAC DFS functions. + * Abstract:- Operation in a DFS channel requires CAC that adds additional + * delay as well as loss of connection even when CSA is used. ETSI allows + * pre-CAC, i.e. performing CAC at a convenient time and using that channel + * later. Once Pre-CAC is done in a channel, it is no longer required to + * perform a CAC in the channel before TX/RX as long as radar is not found in + * it or we reset or restart the device. + * + * Design:- + * The pre-CAC is done in a RADIO that has VHT80_80 capable radio where the + * primary and secondary HT80s can be programmed independently with two + * different HT80 channels. Three new lists are introduced to handle pre-CAC. + * The lists are: + * 1)Pre-CAC-required list + * 2)Pre-CAC-done list + * 3)Pre-CAC-NOL list + * At the beginning the Pre-CAC-required list is populated with the unique + * secondary HT80 frequencies of HT80_80 channels. Whenever a HT80 channel + * change happens we convert the HT80 channel change to a HT80_80 channel and + * the secondary HT80 is set to the first element(HT80 frequency) from the + * Pre-CAC-required list. Pre-CAC period is same same as the CAC period. After + * the pre-CAC period is over the pre-CAC timer expires and the HT80 frequency + * (the pre-CAC-required list element) is removed from the Pre-CAC-required + * list and inserted into the Pre-CAC-done list. While Pre-CAC timer is running + * if there is any RADAR detect then the current HT80 frequency is removed from + * the Pre-CAC-required list and inserted into the Pre-CAC-NOL list. Each + * element of pre-CAC-NOL times out individually after 30 minutes of its + * insertion. Pre-CAC-NOL timeout is just like the regular NOL timeout. Upon + * Pre-CAC-NOL expiry of an element (HT80 frequency), the element is removed + * from the Pre-CAC-NOL list and inserted into the Pre-CAC-required list. + * At any point of time if there is a channel change and the new channel is + * DFS, the Pre-CAC-done list is consulted to check if pre-CAC has been + * completed for the entire bandwidth of the new channel. If Pre-CAC has + * already been done for the entire bandwidth of the channel then regular CAC + * can be skipped(this is what is known as Zero wait DFS) if we are in ETSI + * domain. + * + * New RadarTool commands:- + * 1)radartool -i wifi[X] secondSegmentBangradar + * It simulates RADAR from the secondary HT80 when the + * secondary HT80 is doing pre-CAC. If secondary is not + * doing any pre-CAC then this command has no effect. + * 2)radartool -i wifi[X] showPreCACLists + * It shows all 3 pre-CAC Lists' contents. + * + * New iwpriv commands:- + * 1)iwpriv wifi[X] preCACEn 0/1 + * This command enables/disables the zero-cac-DFS. + * 2)iwpriv wifi[X] pCACTimeout + * Override the pCACTimeout. + * + * FAQ(Frequently Asked Questions):- + * 1) + * Question)We already have NOL list. Why do we need separate pre-CAC-NOL + * list? + * Answer) pre-CAC is done on an HT80 channel and the same HT80 channel is + * inserted into pre-CAC-NOL list after pre-CAC radar detection. NOL list + * contains HT20 channels. Since after pre-CAC-NOL expiry we need + * to move the HT80 channel from pre-CAC-NOL list to pre-CAC-required list + * it is very easy to remove the HT80 channel and insert it. Having + * a separate pre-CAC-NOL also provides some separation from the existing + * code and helps modularize. + */ + +#include "../dfs_zero_cac.h" +#include "wlan_dfs_lmac_api.h" +#include "wlan_dfs_mlme_api.h" +#include "wlan_dfs_utils_api.h" +#include "../dfs_internal.h" + +void dfs_zero_cac_reset(struct wlan_dfs *dfs) +{ + struct dfs_precac_entry *tmp_precac_entry, *precac_entry; + + dfs_get_override_precac_timeout(dfs, + &(dfs->dfs_precac_timeout_override)); + qdf_timer_sync_cancel(&dfs->dfs_precac_timer); + dfs->dfs_precac_primary_freq = 0; + dfs->dfs_precac_secondary_freq = 0; + + PRECAC_LIST_LOCK(dfs); + if (!TAILQ_EMPTY(&dfs->dfs_precac_nol_list)) + TAILQ_FOREACH_SAFE(precac_entry, + &dfs->dfs_precac_nol_list, + pe_list, + tmp_precac_entry) { + qdf_timer_free(&precac_entry->precac_nol_timer); + TAILQ_REMOVE(&dfs->dfs_precac_required_list, + precac_entry, pe_list); + qdf_mem_free(precac_entry); + } + PRECAC_LIST_UNLOCK(dfs); +} + +void dfs_zero_cac_timer_free(struct wlan_dfs *dfs) +{ + struct dfs_precac_entry *tmp_precac_entry, *precac_entry; + + dfs_get_override_precac_timeout(dfs, + &dfs->dfs_precac_timeout_override); + + qdf_timer_free(&dfs->dfs_precac_timer); + + dfs->dfs_precac_primary_freq = 0; + dfs->dfs_precac_secondary_freq = 0; + + PRECAC_LIST_LOCK(dfs); + if (!TAILQ_EMPTY(&dfs->dfs_precac_nol_list)) + TAILQ_FOREACH_SAFE(precac_entry, + &dfs->dfs_precac_nol_list, + pe_list, + tmp_precac_entry) { + qdf_timer_free(&precac_entry->precac_nol_timer); + TAILQ_REMOVE(&dfs->dfs_precac_required_list, + precac_entry, pe_list); + qdf_mem_free(precac_entry); + } + PRECAC_LIST_UNLOCK(dfs); +} + +int dfs_override_precac_timeout(struct wlan_dfs *dfs, int precac_timeout) +{ + if (!dfs) + return -EIO; + + dfs->dfs_precac_timeout_override = precac_timeout; + dfs_info(dfs, WLAN_DEBUG_DFS_ALWAYS, "PreCAC timeout is now %s (%d)", + (precac_timeout == -1) ? "default" : "overridden", + precac_timeout); + + return 0; +} + +int dfs_get_override_precac_timeout(struct wlan_dfs *dfs, int *precac_timeout) +{ + if (!dfs) + return -EIO; + + (*precac_timeout) = dfs->dfs_precac_timeout_override; + + return 0; +} + +#define VHT80_OFFSET 6 +#define IS_WITHIN_RANGE(_A, _B, _C) \ + (((_A) >= ((_B)-(_C))) && ((_A) <= ((_B)+(_C)))) + +bool dfs_is_ht20_40_80_chan_in_precac_done_list(struct wlan_dfs *dfs) +{ + struct dfs_precac_entry *precac_entry; + bool ret_val = 0; + + /* + * A is within B-C and B+C + * (B-C) <= A <= (B+C) + */ + PRECAC_LIST_LOCK(dfs); + if (!TAILQ_EMPTY(&dfs->dfs_precac_done_list)) + TAILQ_FOREACH(precac_entry, + &dfs->dfs_precac_done_list, + pe_list) { + /* Find if the VHT80 freq1 is in Pre-CAC done list */ + if (IS_WITHIN_RANGE(dfs->dfs_curchan->dfs_ch_ieee, + precac_entry->vht80_freq, + VHT80_OFFSET)) { + ret_val = 1; + break; + } + } + PRECAC_LIST_UNLOCK(dfs); + + dfs_debug(dfs, WLAN_DEBUG_DFS, "vht80_freq = %u ret_val = %d", + dfs->dfs_curchan->dfs_ch_ieee, ret_val); + + return ret_val; +} + +bool dfs_is_ht80_80_chan_in_precac_done_list(struct wlan_dfs *dfs) +{ + struct dfs_precac_entry *precac_entry; + bool ret_val = 0; + + PRECAC_LIST_LOCK(dfs); + if (!TAILQ_EMPTY(&dfs->dfs_precac_done_list)) { + bool primary_found = 0; + /* Check if primary is DFS then search */ + if (WLAN_IS_CHAN_DFS(dfs->dfs_curchan)) { + TAILQ_FOREACH(precac_entry, + &dfs->dfs_precac_done_list, + pe_list) { + if (dfs->dfs_curchan->dfs_ch_vhtop_ch_freq_seg1 + == precac_entry->vht80_freq) { + primary_found = 1; + break; + } + } + } else { + primary_found = 1; + } + + /* Check if secondary DFS then search */ + if (WLAN_IS_CHAN_DFS_CFREQ2(dfs->dfs_curchan) && + primary_found) { + TAILQ_FOREACH(precac_entry, + &dfs->dfs_precac_done_list, + pe_list) { + if (dfs->dfs_curchan->dfs_ch_vhtop_ch_freq_seg2 + == precac_entry->vht80_freq) { + /* Now secondary also found */ + ret_val = 1; + break; + } + } + } else { + if (primary_found) + ret_val = 1; + } + } + PRECAC_LIST_UNLOCK(dfs); + + dfs_debug(dfs, WLAN_DEBUG_DFS, + "freq_seg1 = %u freq_seq2 = %u ret_val = %d", + dfs->dfs_curchan->dfs_ch_vhtop_ch_freq_seg1, + dfs->dfs_curchan->dfs_ch_vhtop_ch_freq_seg2, + ret_val); + + return ret_val; +} + +bool dfs_is_precac_done(struct wlan_dfs *dfs) +{ + bool ret_val = 0; + + if (WLAN_IS_CHAN_11AC_VHT20(dfs->dfs_curchan) || + WLAN_IS_CHAN_11AC_VHT40(dfs->dfs_curchan) || + WLAN_IS_CHAN_11AC_VHT80(dfs->dfs_curchan)) { + ret_val = dfs_is_ht20_40_80_chan_in_precac_done_list(dfs); + } else if (WLAN_IS_CHAN_11AC_VHT80_80(dfs->dfs_curchan) || + WLAN_IS_CHAN_11AC_VHT160(dfs->dfs_curchan)) { + ret_val = dfs_is_ht80_80_chan_in_precac_done_list(dfs); + } + + dfs_debug(dfs, WLAN_DEBUG_DFS, "ret_val = %d", ret_val); + + return ret_val; +} + +#define VHT80_IEEE_FREQ_OFFSET 6 + +void dfs_mark_precac_dfs(struct wlan_dfs *dfs, + uint8_t is_radar_found_on_secondary_seg) +{ + struct dfs_precac_entry *precac_entry = NULL, *tmp_precac_entry = NULL; + uint8_t found = 0; + + dfs_debug(dfs, WLAN_DEBUG_DFS, + "is_radar_found_on_secondary_seg = %u secondary_freq = %u primary_freq = %u", + is_radar_found_on_secondary_seg, + dfs->dfs_precac_secondary_freq, + dfs->dfs_precac_primary_freq); + + /* + * Even if radar found on primary, we need to move the channel from + * precac-required-list and precac-done-list to precac-nol-list. + */ + PRECAC_LIST_LOCK(dfs); + if (!TAILQ_EMPTY(&dfs->dfs_precac_required_list)) { + TAILQ_FOREACH_SAFE(precac_entry, + &dfs->dfs_precac_required_list, + pe_list, + tmp_precac_entry) { + /* + * If on primary then use IS_WITHIN_RANGE else use + * equality directly. + */ + if (is_radar_found_on_secondary_seg ? + (dfs->dfs_precac_secondary_freq == + precac_entry->vht80_freq) : IS_WITHIN_RANGE( + dfs->dfs_curchan->dfs_ch_ieee, + precac_entry->vht80_freq, + VHT80_IEEE_FREQ_OFFSET)) { + TAILQ_REMOVE(&dfs->dfs_precac_required_list, + precac_entry, pe_list); + + dfs_debug(dfs, WLAN_DEBUG_DFS, + "removing the freq = %u from required list and adding to NOL list", + precac_entry->vht80_freq); + TAILQ_INSERT_TAIL(&dfs->dfs_precac_nol_list, + precac_entry, pe_list); + qdf_timer_mod(&precac_entry->precac_nol_timer, + dfs_get_nol_timeout(dfs)*1000); + found = 1; + break; + } + } + } + + /* If not found in precac-required-list remove from precac-done-list */ + if (!found && !TAILQ_EMPTY(&dfs->dfs_precac_done_list)) { + TAILQ_FOREACH_SAFE(precac_entry, + &dfs->dfs_precac_done_list, + pe_list, + tmp_precac_entry) { + /* + * If on primary then use IS_WITHIN_RANGE else use + * equality directly. + */ + if (is_radar_found_on_secondary_seg ? + (dfs->dfs_precac_secondary_freq == + precac_entry->vht80_freq) : + IS_WITHIN_RANGE( + dfs->dfs_curchan->dfs_ch_ieee, + precac_entry->vht80_freq, 6)) { + TAILQ_REMOVE(&dfs->dfs_precac_done_list, + precac_entry, pe_list); + + dfs_debug(dfs, WLAN_DEBUG_DFS, + "removing the the freq = %u from done list and adding to NOL list", + precac_entry->vht80_freq); + TAILQ_INSERT_TAIL(&dfs->dfs_precac_nol_list, + precac_entry, pe_list); + qdf_timer_mod(&precac_entry->precac_nol_timer, + dfs_get_nol_timeout(dfs)*1000); + break; + } + } + } + PRECAC_LIST_UNLOCK(dfs); + + /* TODO xxx:- Need to lock the channel change */ + /* + * If radar Found on Primary no need to do restart VAP's channels since + * channel change will happen after RANDOM channel selection anyway. + */ + + if (dfs->dfs_precac_timer_running) { + /* Cancel the PreCAC timer */ + qdf_timer_stop(&dfs->dfs_precac_timer); + dfs->dfs_precac_timer_running = 0; + + /* + * Change the channel + * case 1:- No VHT80 channel for precac is available so bring + * it back to VHT80 + * case 2:- pick a new VHT80 channel for precac + */ + if (is_radar_found_on_secondary_seg) { + if (dfs_is_ap_cac_timer_running(dfs)) { + dfs->dfs_defer_precac_channel_change = 1; + dfs_debug(dfs, WLAN_DEBUG_DFS, + "Primary CAC is running, defer the channel change" + ); + } else { + dfs_mlme_channel_change_by_precac( + dfs->dfs_pdev_obj); + } + } + } +} + +bool dfs_is_precac_timer_running(struct wlan_dfs *dfs) +{ + return dfs->dfs_precac_timer_running ? true : false; +} + +#define VHT80_IEEE_FREQ_OFFSET 6 +void dfs_find_precac_secondary_vht80_chan(struct wlan_dfs *dfs, + struct dfs_channel *chan) +{ + uint8_t first_primary_dfs_ch_ieee; + + first_primary_dfs_ch_ieee = + dfs->dfs_precac_secondary_freq - VHT80_IEEE_FREQ_OFFSET; + + dfs_mlme_find_dot11_channel(dfs->dfs_pdev_obj, + first_primary_dfs_ch_ieee, 0, + WLAN_PHYMODE_11AC_VHT80, + &(chan->dfs_ch_freq), + &(chan->dfs_ch_flags), + &(chan->dfs_ch_flagext), + &(chan->dfs_ch_ieee), + &(chan->dfs_ch_vhtop_ch_freq_seg1), + &(chan->dfs_ch_vhtop_ch_freq_seg2)); +} + +/** + * dfs_precac_timeout() - Precac timeout. + * + * Removes the channel from precac_required list and adds it to the + * precac_done_list. Triggers a precac channel change. + */ +static os_timer_func(dfs_precac_timeout) +{ + struct dfs_precac_entry *precac_entry, *tmp_precac_entry; + struct wlan_dfs *dfs = NULL; + + OS_GET_TIMER_ARG(dfs, struct wlan_dfs *); + dfs->dfs_precac_timer_running = 0; + + /* + * Remove the HVT80 freq from the precac-required-list and add it to the + * precac-done-list + */ + + PRECAC_LIST_LOCK(dfs); + if (!TAILQ_EMPTY(&dfs->dfs_precac_required_list)) { + TAILQ_FOREACH_SAFE(precac_entry, + &dfs->dfs_precac_required_list, + pe_list, + tmp_precac_entry) { + if (dfs->dfs_precac_secondary_freq == + precac_entry->vht80_freq) { + TAILQ_REMOVE(&dfs->dfs_precac_required_list, + precac_entry, pe_list); + dfs_debug(dfs, WLAN_DEBUG_DFS, + "removing the the freq = %u from required list and adding to done list", + precac_entry->vht80_freq); + TAILQ_INSERT_TAIL(&dfs->dfs_precac_done_list, + precac_entry, pe_list); + break; + } + } + } + PRECAC_LIST_UNLOCK(dfs); + + dfs_debug(dfs, WLAN_DEBUG_DFS, + "Pre-cac expired, Precac Secondary chan %u curr time %d", + dfs->dfs_precac_secondary_freq, + (qdf_system_ticks_to_msecs(qdf_system_ticks()) / 1000)); + /* Do vdev restart so that we can change the secondary VHT80 channel. */ + + /* TODO xxx : Need to lock the channel change */ + dfs_mlme_channel_change_by_precac(dfs->dfs_pdev_obj); +} + +void dfs_zero_cac_timer_init(struct wlan_dfs *dfs) +{ + qdf_timer_init(NULL, + &(dfs->dfs_precac_timer), + dfs_precac_timeout, + (void *) dfs, + QDF_TIMER_TYPE_WAKE_APPS); +} + +void dfs_zero_cac_attach(struct wlan_dfs *dfs) +{ + dfs->dfs_precac_timeout_override = -1; + dfs_zero_cac_timer_init(dfs); + PRECAC_LIST_LOCK_CREATE(dfs); +} + +/** + * dfs_precac_nol_timeout() - NOL timeout for precac channel. + * + * Removes the VHT80 channel from precac nol list and adds it to precac required + * list. + */ +static os_timer_func(dfs_precac_nol_timeout) +{ + struct dfs_precac_entry *precac_entry; + struct wlan_dfs *dfs = NULL; + + OS_GET_TIMER_ARG(precac_entry, struct dfs_precac_entry *); + dfs = (struct wlan_dfs *)precac_entry->dfs; + + PRECAC_LIST_LOCK(dfs); + if (!TAILQ_EMPTY(&dfs->dfs_precac_nol_list)) { + /* Move the channel from precac-NOL to precac-required-list */ + TAILQ_REMOVE(&dfs->dfs_precac_nol_list, precac_entry, pe_list); + dfs_debug(dfs, WLAN_DEBUG_DFS, + "removing the the freq = %u from PreCAC NOL-list and adding Precac-required list", + precac_entry->vht80_freq); + TAILQ_INSERT_TAIL(&dfs->dfs_precac_required_list, precac_entry, + pe_list); + } + PRECAC_LIST_UNLOCK(dfs); + + /* TODO xxx : Need to lock the channel change */ + /* Do a channel change */ + dfs_mlme_channel_change_by_precac(dfs->dfs_pdev_obj); +} + +void dfs_init_precac_list(struct wlan_dfs *dfs) +{ + u_int i; + uint8_t found; + struct dfs_precac_entry *tmp_precac_entry; + int nchans = 0; + + /* + * We need to prepare list of uniq VHT80 center frequencies. But at the + * beginning we do not know how many uniq frequencies are present. + * Therefore, we calculate the MAX size and allocate a temporary + * list/array. However we fill the temporary array with uniq frequencies + * and copy the uniq list of frequencies to the final list with exact + * size. + */ + TAILQ_INIT(&dfs->dfs_precac_required_list); + TAILQ_INIT(&dfs->dfs_precac_done_list); + TAILQ_INIT(&dfs->dfs_precac_nol_list); + dfs_mlme_get_dfs_ch_nchans(dfs->dfs_pdev_obj, &nchans); + + PRECAC_LIST_LOCK(dfs); + /* Fill the precac-required-list with unique elements */ + for (i = 0; i < nchans; i++) { + struct dfs_channel *ichan = NULL, lc; + + ichan = &lc; + dfs_mlme_get_dfs_ch_channels(dfs->dfs_pdev_obj, + &(ichan->dfs_ch_freq), + &(ichan->dfs_ch_flags), + &(ichan->dfs_ch_flagext), + &(ichan->dfs_ch_ieee), + &(ichan->dfs_ch_vhtop_ch_freq_seg1), + &(ichan->dfs_ch_vhtop_ch_freq_seg2), + i); + + if (WLAN_IS_CHAN_11AC_VHT80(ichan) && + WLAN_IS_CHAN_DFS(ichan)) { + found = 0; + TAILQ_FOREACH(tmp_precac_entry, + &dfs->dfs_precac_required_list, + pe_list) { + if (tmp_precac_entry->vht80_freq == + ichan-> + dfs_ch_vhtop_ch_freq_seg1) { + found = 1; + break; + } + } + if (!found) { + struct dfs_precac_entry *precac_entry; + + precac_entry = qdf_mem_malloc( + sizeof(*precac_entry)); + if (!precac_entry) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, + "entry alloc fail for : %d", i); + continue; + } + precac_entry->vht80_freq = + ichan->dfs_ch_vhtop_ch_freq_seg1; + precac_entry->dfs = dfs; + + /* + * Initialize per entry timer. Shall be used + * when the entry moves to precac_nol_list. + */ + qdf_timer_init(NULL, + &(precac_entry->precac_nol_timer), + dfs_precac_nol_timeout, + (void *) (precac_entry), + QDF_TIMER_TYPE_WAKE_APPS); + TAILQ_INSERT_TAIL( + &dfs->dfs_precac_required_list, + precac_entry, pe_list); + } + } + } + PRECAC_LIST_UNLOCK(dfs); + + dfs_debug(dfs, WLAN_DEBUG_DFS, + "Print the list of VHT80 frequencies from linked list"); + TAILQ_FOREACH(tmp_precac_entry, + &dfs->dfs_precac_required_list, + pe_list) + dfs_info(dfs, WLAN_DEBUG_DFS_ALWAYS, "freq=%u", + tmp_precac_entry->vht80_freq); +} + +void dfs_deinit_precac_list(struct wlan_dfs *dfs) +{ + struct dfs_precac_entry *tmp_precac_entry, *precac_entry; + + dfs_debug(dfs, WLAN_DEBUG_DFS, + "Free the list of VHT80 frequencies from linked list(precac_required)" + ); + PRECAC_LIST_LOCK(dfs); + if (!TAILQ_EMPTY(&dfs->dfs_precac_required_list)) + TAILQ_FOREACH_SAFE(precac_entry, + &dfs->dfs_precac_required_list, + pe_list, tmp_precac_entry) { + TAILQ_REMOVE(&dfs->dfs_precac_required_list, + precac_entry, pe_list); + qdf_mem_free(precac_entry); + } + + dfs_debug(dfs, WLAN_DEBUG_DFS, + "Free the list of VHT80 frequencies from linked list(precac_done)" + ); + if (!TAILQ_EMPTY(&dfs->dfs_precac_done_list)) + TAILQ_FOREACH_SAFE(precac_entry, + &dfs->dfs_precac_done_list, + pe_list, tmp_precac_entry) { + TAILQ_REMOVE(&dfs->dfs_precac_done_list, + precac_entry, pe_list); + qdf_mem_free(precac_entry); + } + + dfs_debug(dfs, WLAN_DEBUG_DFS, + "Free the list of VHT80 frequencies from linked list(precac_nol)" + ); + if (!TAILQ_EMPTY(&dfs->dfs_precac_nol_list)) + TAILQ_FOREACH_SAFE(precac_entry, + &dfs->dfs_precac_nol_list, + pe_list, + tmp_precac_entry) { + qdf_timer_stop(&precac_entry->precac_nol_timer); + TAILQ_REMOVE(&dfs->dfs_precac_nol_list, + precac_entry, pe_list); + qdf_mem_free(precac_entry); + } + PRECAC_LIST_UNLOCK(dfs); + +} + +void dfs_zero_cac_detach(struct wlan_dfs *dfs) +{ + dfs_deinit_precac_list(dfs); + PRECAC_LIST_LOCK_DESTROY(dfs); +} + +uint8_t dfs_get_freq_from_precac_required_list(struct wlan_dfs *dfs, + uint8_t exclude_ieee_freq) +{ + struct dfs_precac_entry *precac_entry; + uint8_t ieee_freq = 0; + + dfs_debug(dfs, WLAN_DEBUG_DFS, "exclude_ieee_freq = %u", + exclude_ieee_freq); + + PRECAC_LIST_LOCK(dfs); + if (!TAILQ_EMPTY(&dfs->dfs_precac_required_list)) { + TAILQ_FOREACH(precac_entry, &dfs->dfs_precac_required_list, + pe_list) { + if (precac_entry->vht80_freq != exclude_ieee_freq) { + ieee_freq = precac_entry->vht80_freq; + break; + } + } + } + PRECAC_LIST_UNLOCK(dfs); + dfs_debug(dfs, WLAN_DEBUG_DFS, "ieee_freq = %u", ieee_freq); + + return ieee_freq; +} + +void dfs_cancel_precac_timer(struct wlan_dfs *dfs) +{ + qdf_timer_stop(&dfs->dfs_precac_timer); + dfs->dfs_precac_timer_running = 0; +} + +void dfs_start_precac_timer(struct wlan_dfs *dfs, uint8_t precac_chan) +{ + struct dfs_channel *ichan, lc; + uint8_t first_primary_dfs_ch_ieee; + int primary_cac_timeout; + int secondary_cac_timeout; + int precac_timeout; + +#define EXTRA_TIME_IN_SEC 5 + dfs->dfs_precac_timer_running = 1; + + /* + * Get the first primary ieee chan in the HT80 band and find the channel + * pointer. + */ + first_primary_dfs_ch_ieee = precac_chan - VHT80_IEEE_FREQ_OFFSET; + + primary_cac_timeout = dfs_mlme_get_cac_timeout(dfs->dfs_pdev_obj, + dfs->dfs_curchan->dfs_ch_freq, + dfs->dfs_curchan->dfs_ch_vhtop_ch_freq_seg2, + dfs->dfs_curchan->dfs_ch_flags); + + ichan = &lc; + dfs_mlme_find_dot11_channel(dfs->dfs_pdev_obj, + first_primary_dfs_ch_ieee, 0, + WLAN_PHYMODE_11AC_VHT80, + &(ichan->dfs_ch_freq), + &(ichan->dfs_ch_flags), + &(ichan->dfs_ch_flagext), + &(ichan->dfs_ch_ieee), + &(ichan->dfs_ch_vhtop_ch_freq_seg1), + &(ichan->dfs_ch_vhtop_ch_freq_seg2)); + + secondary_cac_timeout = (dfs->dfs_precac_timeout_override != -1) ? + dfs->dfs_precac_timeout_override : + dfs_mlme_get_cac_timeout(dfs->dfs_pdev_obj, + ichan->dfs_ch_freq, + ichan->dfs_ch_vhtop_ch_freq_seg2, + ichan->dfs_ch_flags); + + /* + * EXTRA time is needed so that if CAC and PreCAC is running + * simultaneously, PreCAC expiry function may be called before CAC + * expiry and PreCAC expiry does a channel change (vdev_restart) the + * restart response calls CAC_start function(ieee80211_dfs_cac_start) + * which cancels any previous CAC timer and starts a new CAC again. + * So CAC expiry does not happen and moreover a new CAC is started. + * Therefore do not disturb the CAC by channel restart (vdev_restart). + */ + precac_timeout = QDF_MAX(primary_cac_timeout, secondary_cac_timeout) + + EXTRA_TIME_IN_SEC; + dfs_debug(dfs, WLAN_DEBUG_DFS, + "precactimeout = %d", (precac_timeout)*1000); + qdf_timer_mod(&dfs->dfs_precac_timer, (precac_timeout) * 1000); +} + +void dfs_print_precaclists(struct wlan_dfs *dfs) +{ + struct dfs_precac_entry *tmp_precac_entry; + + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs is NULL"); + return; + } + + PRECAC_LIST_LOCK(dfs); + + /* Print the Pre-CAC required List */ + dfs_info(dfs, WLAN_DEBUG_DFS_ALWAYS, + "Pre-cac-required list of VHT80 frequencies"); + TAILQ_FOREACH(tmp_precac_entry, + &dfs->dfs_precac_required_list, + pe_list) { + dfs_info(dfs, WLAN_DEBUG_DFS_ALWAYS, + "freq=%u", tmp_precac_entry->vht80_freq); + } + + /* Print the Pre-CAC done List */ + dfs_info(dfs, WLAN_DEBUG_DFS_ALWAYS, + "Pre-cac-done list of VHT80 frequencies"); + TAILQ_FOREACH(tmp_precac_entry, + &dfs->dfs_precac_done_list, + pe_list) { + dfs_info(dfs, WLAN_DEBUG_DFS_ALWAYS, "freq=%u", + tmp_precac_entry->vht80_freq); + } + + /* Print the Pre-CAC NOL List */ + dfs_info(dfs, WLAN_DEBUG_DFS_ALWAYS, + "Pre-cac-NOL list of VHT80 frequencies"); + TAILQ_FOREACH(tmp_precac_entry, + &dfs->dfs_precac_nol_list, + pe_list) { + dfs_info(dfs, WLAN_DEBUG_DFS_ALWAYS, + "freq=%u", tmp_precac_entry->vht80_freq); + } + + PRECAC_LIST_UNLOCK(dfs); +} + +void dfs_reset_precaclists(struct wlan_dfs *dfs) +{ + dfs_debug(dfs, WLAN_DEBUG_DFS, + "Reset precaclist of VHT80 frequencies"); + dfs_deinit_precac_list(dfs); + dfs_init_precac_list(dfs); +} + +void dfs_reset_precac_lists(struct wlan_dfs *dfs) +{ + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs is NULL"); + return; + } + dfs_reset_precaclists(dfs); +} + +void dfs_find_vht80_chan_for_precac(struct wlan_dfs *dfs, + uint32_t chan_mode, + uint8_t ch_freq_seg1, + uint32_t *cfreq1, + uint32_t *cfreq2, + uint32_t *phy_mode, + bool *dfs_set_cfreq2, + bool *set_agile) +{ + struct wlan_objmgr_psoc *psoc; + struct wlan_lmac_if_target_tx_ops *tx_ops; + uint32_t target_type; + + psoc = wlan_pdev_get_psoc(dfs->dfs_pdev_obj); + if (!psoc) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "psoc is NULL"); + return; + } + + tx_ops = &(psoc->soc_cb.tx_ops.target_tx_ops); + target_type = lmac_get_target_type(dfs->dfs_pdev_obj); + + if (chan_mode == WLAN_PHYMODE_11AC_VHT80) { + /* + * If + * 1) The chip is CASCADE + * 2) The user phy_mode is VHT80 and + * 3) The user has enabled Pre-CAC and + * 4) The regdomain the ETSI + * then find a center frequency for the secondary VHT80 and + * Change the mode to VHT80_80 or VHT160 + */ + uint8_t ieee_freq; + + dfs_debug(dfs, WLAN_DEBUG_DFS, + "precac_secondary_freq = %u precac_running = %u", + dfs->dfs_precac_secondary_freq, + dfs->dfs_precac_timer_running); + + if (dfs->dfs_precac_enable && + tx_ops->tgt_is_tgt_type_qca9984(target_type) && + (utils_get_dfsdomain(dfs->dfs_pdev_obj) == + DFS_ETSI_DOMAIN)) { + /* + * If precac timer is running then do not change the + * secondary channel use the old secondary VHT80 + * channel. If precac timer is not running then try to + * find a new channel from precac-required-list. + */ + if (dfs->dfs_precac_timer_running) { + /* + * Primary and secondary VHT80 cannot be the + * same. Therefore exclude the primary + * frequency while getting new channel from + * precac-required-list. + */ + if (ch_freq_seg1 == + dfs->dfs_precac_secondary_freq) + ieee_freq = + dfs_get_freq_from_precac_required_list( + dfs, + ch_freq_seg1); + else + ieee_freq = + dfs->dfs_precac_secondary_freq; + } else + ieee_freq = + dfs_get_freq_from_precac_required_list( + dfs, ch_freq_seg1); + + if (ieee_freq) { + if (ieee_freq == (ch_freq_seg1 + + VHT160_IEEE_FREQ_DIFF)) { + /* + * Override the HW channel mode to + * VHT160 + */ + uint8_t ieee_160_cfreq; + + ieee_160_cfreq = + (ieee_freq + ch_freq_seg1)/2; + chan_mode = WLAN_PHYMODE_11AC_VHT160; + *cfreq1 = dfs_mlme_ieee2mhz( + dfs->dfs_pdev_obj, + ch_freq_seg1, + WLAN_CHAN_5GHZ); + *cfreq2 = dfs_mlme_ieee2mhz( + dfs->dfs_pdev_obj, + ieee_160_cfreq, + WLAN_CHAN_5GHZ); + } else { + /* + * Override the HW channel mode to + * VHT80_80. + */ + chan_mode = + WLAN_PHYMODE_11AC_VHT80_80; + *cfreq2 = dfs_mlme_ieee2mhz( + dfs->dfs_pdev_obj, + ieee_freq, + WLAN_CHAN_5GHZ); + } + *phy_mode = lmac_get_phymode_info( + dfs->dfs_pdev_obj, chan_mode); + *dfs_set_cfreq2 = true; + + /* + * Finally set the agile flag. + * When we want a full calibration of both + * primary VHT80 and secondary VHT80 the agile + * flag is set to FALSE else set to TRUE. When + * a channel is being set for the first time + * this flag must be FALSE because first time + * the entire channel must be calibrated. All + * subsequent times the flag must be set to TRUE + * if we are changing only the secondary VHT80. + */ + if (dfs->dfs_precac_primary_freq == + ch_freq_seg1) + *set_agile = true; + else + *set_agile = false; + + dfs_debug(dfs, WLAN_DEBUG_DFS, + "cfreq1 = %u cfreq2 = %u ieee_freq = %u mode = %u set_agile = %d", + *cfreq1, *cfreq2, ieee_freq, + chan_mode, *set_agile); + + dfs->dfs_precac_secondary_freq = ieee_freq; + dfs->dfs_precac_primary_freq = ch_freq_seg1; + + /* Start the pre_cac_timer */ + dfs_start_precac_timer(dfs, + dfs->dfs_precac_secondary_freq); + } /* End of if(ieee_freq) */ + } /* End of if(dfs->dfs_precac_enable) */ + } +} + +void dfs_set_precac_enable(struct wlan_dfs *dfs, uint32_t value) +{ + dfs->dfs_precac_enable = value; +} + +uint32_t dfs_get_precac_enable(struct wlan_dfs *dfs) +{ + return dfs->dfs_precac_enable; +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/dfs/dispatcher/inc/wlan_dfs_init_deinit_api.h b/drivers/staging/qca-wifi-host-cmn/umac/dfs/dispatcher/inc/wlan_dfs_init_deinit_api.h new file mode 100644 index 0000000000000000000000000000000000000000..4da0c94b85c74eb7910bee158352c810986b0352 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/dfs/dispatcher/inc/wlan_dfs_init_deinit_api.h @@ -0,0 +1,78 @@ +/* + * Copyright (c) 2016-2017 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: This file init/deint functions for DFS module. + */ + +#ifndef _WLAN_DFS_INIT_DEINIT_API_H_ +#define _WLAN_DFS_INIT_DEINIT_API_H_ + +#include "wlan_dfs_ucfg_api.h" + +/** + * wlan_pdev_get_dfs_obj() - Get DFS object from PDEV. + * @pdev: Pointer to PDEV structure. + * @id: DFS component ID. + * @obj: Pointer to DFS object. + */ +struct wlan_dfs *wlan_pdev_get_dfs_obj(struct wlan_objmgr_pdev *pdev); + +/** + * register_dfs_callbacks() - Fill mlme pointers. + */ +void register_dfs_callbacks(void); + +/** + * dfs_init() - Init DFS module + */ +QDF_STATUS dfs_init(void); + +/** + * dfs_deinit() - Deinit DFS module. + */ +QDF_STATUS dfs_deinit(void); + +/** + * wlan_dfs_pdev_obj_create_notification() - DFS pdev object create handler. + * @pdev: Pointer to DFS pdev object. + */ +QDF_STATUS wlan_dfs_pdev_obj_create_notification(struct wlan_objmgr_pdev *pdev, + void *arg); + +/** + * wlan_dfs_pdev_obj_destroy_notification() - DFS pdev object delete handler. + * @pdev: Pointer to DFS pdev object. + */ +QDF_STATUS wlan_dfs_pdev_obj_destroy_notification(struct wlan_objmgr_pdev *pdev, + void *arg); + +/** + * wifi_dfs_psoc_enable() - handles registering dfs event handlers. + * @psoc: psoc object. + */ +QDF_STATUS wifi_dfs_psoc_enable(struct wlan_objmgr_psoc *psoc); + +/** + * wifi_dfs_psoc_disable() - handles deregistering dfs event handlers. + * @psoc: psoc object. + */ +QDF_STATUS wifi_dfs_psoc_disable(struct wlan_objmgr_psoc *psoc); + +#endif /* _WLAN_DFS_INIT_DEINIT_API_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/dfs/dispatcher/inc/wlan_dfs_ioctl.h b/drivers/staging/qca-wifi-host-cmn/umac/dfs/dispatcher/inc/wlan_dfs_ioctl.h new file mode 100644 index 0000000000000000000000000000000000000000..978dd2df9c52677deb108335a26a4a2c1a5b772f --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/dfs/dispatcher/inc/wlan_dfs_ioctl.h @@ -0,0 +1,210 @@ +/* + * Copyright (c) 2011, 2016-2017 The Linux Foundation. All rights reserved. + * Copyright (c) 2010, Atheros Communications Inc. + * All Rights Reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: This file has dfs IOCTL Defines. + */ + +#ifndef _DFS_IOCTL_H_ +#define _DFS_IOCTL_H_ + +#define DFS_MUTE_TIME 1 +#define DFS_SET_THRESH 2 +#define DFS_GET_THRESH 3 +#define DFS_GET_USENOL 4 +#define DFS_SET_USENOL 5 +#define DFS_RADARDETECTS 6 +#define DFS_BANGRADAR 7 +#define DFS_SHOW_NOL 8 +#define DFS_DISABLE_DETECT 9 +#define DFS_ENABLE_DETECT 10 +#define DFS_DISABLE_FFT 11 +#define DFS_ENABLE_FFT 12 +#define DFS_SET_DEBUG_LEVEL 13 +#define DFS_GET_NOL 14 +#define DFS_SET_NOL 15 + +#define DFS_SET_FALSE_RSSI_THRES 16 +#define DFS_SET_PEAK_MAG 17 +#define DFS_IGNORE_CAC 18 +#define DFS_SET_NOL_TIMEOUT 19 +#define DFS_GET_CAC_VALID_TIME 20 +#define DFS_SET_CAC_VALID_TIME 21 +#define DFS_SHOW_NOLHISTORY 22 +#define DFS_SECOND_SEGMENT_BANGRADAR 23 +#define DFS_SHOW_PRECAC_LISTS 24 +#define DFS_RESET_PRECAC_LISTS 25 + +/* + * Spectral IOCTLs use DFS_LAST_IOCTL as the base. + * This must always be the last IOCTL in DFS and have + * the highest value. + */ +#define DFS_LAST_IOCTL 26 + +#ifndef DFS_CHAN_MAX +#define DFS_CHAN_MAX 1023 +#endif + +/** + * struct dfsreq_nolelem - NOL elements. + * @nol_freq: NOL channel frequency. + * @nol_chwidth: NOL channel width. + * @nol_start_ticks: OS ticks when the NOL timer started. + * @nol_timeout_ms: Nol timeout value in msec. + */ + +struct dfsreq_nolelem { + uint16_t nol_freq; + uint16_t nol_chwidth; + unsigned long nol_start_ticks; + uint32_t nol_timeout_ms; +}; + +struct dfsreq_nolinfo { + uint32_t dfs_ch_nchans; + struct dfsreq_nolelem dfs_nol[DFS_CHAN_MAX]; +}; + +/* + * IOCTL parameter types + */ + +#define DFS_PARAM_FIRPWR 1 +#define DFS_PARAM_RRSSI 2 +#define DFS_PARAM_HEIGHT 3 +#define DFS_PARAM_PRSSI 4 +#define DFS_PARAM_INBAND 5 +/* 5413 specific parameters */ +#define DFS_PARAM_RELPWR 7 +#define DFS_PARAM_RELSTEP 8 +#define DFS_PARAM_MAXLEN 9 + +/** + * struct dfs_ioctl_params - DFS ioctl params. + * @dfs_firpwr: FIR pwr out threshold. + * @dfs_rrssi: Radar rssi thresh. + * @dfs_height: Pulse height thresh. + * @dfs_prssi: Pulse rssi thresh. + * @dfs_inband: Inband thresh. + * @dfs_relpwr: Pulse relative pwr thresh. + * @dfs_relstep: Pulse relative step thresh. + * @dfs_maxlen: Pulse max duration. + */ +struct dfs_ioctl_params { + int32_t dfs_firpwr; + int32_t dfs_rrssi; + int32_t dfs_height; + int32_t dfs_prssi; + int32_t dfs_inband; + int32_t dfs_relpwr; + int32_t dfs_relstep; + int32_t dfs_maxlen; +}; + +#define DFS_IOCTL_PARAM_NOVAL 65535 +#define DFS_IOCTL_PARAM_ENABLE 0x8000 + +/* Random channel flags */ +/* Flag to exclude current operating channels */ +#define DFS_RANDOM_CH_FLAG_NO_CURR_OPE_CH 0x0001 /* 0000 0000 0000 0001 */ + +/* Flag to exclude weather channels */ +#define DFS_RANDOM_CH_FLAG_NO_WEATHER_CH 0x0002 /* 0000 0000 0000 0010 */ + +/* Flag to exclude indoor channels */ +#define DFS_RANDOM_CH_FLAG_NO_LOWER_5G_CH 0x0004 /* 0000 0000 0000 0100 */ + +/* Flag to exclude outdoor channels */ +#define DFS_RANDOM_CH_FLAG_NO_UPEER_5G_CH 0x0008 /* 0000 0000 0000 1000 */ + +/* Flag to exclude dfs channels */ +#define DFS_RANDOM_CH_FLAG_NO_DFS_CH 0x0010 /* 0000 0000 0001 0000 */ + +/* Flag to exclude all 5GHz channels */ +#define DFS_RANDOM_CH_FLAG_NO_5GHZ_CH 0x0020 /* 0000 0000 0010 0000 */ + +/* Flag to exclude all 2.4GHz channels */ +#define DFS_RANDOM_CH_FLAG_NO_2GHZ_CH 0x0040 /* 0000 0000 0100 0000 */ + +/** + * struct wlan_dfs_caps - DFS capability structure. + * @wlan_dfs_ext_chan_ok: Can radar be detected on the extension chan? + * @wlan_dfs_combined_rssi_ok: Can use combined radar RSSI? + * @wlan_dfs_use_enhancement: This flag is used to indicate if radar + * detection scheme should use enhanced chirping + * detection algorithm. This flag also determines + * if certain radar data should be discarded to + * minimize false detection of radar. + * @wlan_strong_signal_diversiry: Strong Signal fast diversity count. + * @wlan_chip_is_bb_tlv: Chip is BB TLV? + * @wlan_chip_is_over_sampled: Is Over sampled. + * @wlan_chip_is_ht160: IS VHT160? + * @wlan_chip_is_false_detect: Is False detected? + * @wlan_fastdiv_val: Goes with wlan_strong_signal_diversiry: If we + * have fast diversity capability, read off + * Strong Signal fast diversity count set in the + * ini file, and store so we can restore the + * value when radar is disabled. + */ +struct wlan_dfs_caps { + uint32_t wlan_dfs_ext_chan_ok:1, + wlan_dfs_combined_rssi_ok:1, + wlan_dfs_use_enhancement:1, + wlan_strong_signal_diversiry:1, + wlan_chip_is_bb_tlv:1, + wlan_chip_is_over_sampled:1, + wlan_chip_is_ht160:1, + wlan_chip_is_false_detect:1; + uint32_t wlan_fastdiv_val; +}; + +/** + * struct wlan_dfs_phyerr_param - DFS Phyerr structure. + * @pe_firpwr: FIR pwr out threshold. + * @pe_rrssi: Radar rssi thresh. + * @pe_height: Pulse height thresh. + * @pe_prssi: Pulse rssi thresh. + * @pe_inband: Inband thresh. + * @pe_relpwr: Relative power threshold in 0.5dB steps. + * @pe_relstep: Pulse Relative step threshold in 0.5dB steps. + * @pe_maxlen: Max length of radar sign in 0.8us units. + * @pe_usefir128: Use the average in-band power measured over 128 cycles. + * @pe_blockradar: Enable to block radar check if pkt detect is done via OFDM + * weak signal detect or pkt is detected immediately after tx + * to rx transition. + * @pe_enmaxrssi: Enable to use the max rssi instead of the last rssi during + * fine gain changes for radar detection. + */ +struct wlan_dfs_phyerr_param { + int32_t pe_firpwr; + int32_t pe_rrssi; + int32_t pe_height; + int32_t pe_prssi; + int32_t pe_inband; + uint32_t pe_relpwr; + uint32_t pe_relstep; + uint32_t pe_maxlen; + bool pe_usefir128; + bool pe_blockradar; + bool pe_enmaxrssi; +}; + +#endif /* _DFS_IOCTL_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/dfs/dispatcher/inc/wlan_dfs_lmac_api.h b/drivers/staging/qca-wifi-host-cmn/umac/dfs/dispatcher/inc/wlan_dfs_lmac_api.h new file mode 100644 index 0000000000000000000000000000000000000000..99b671e9159680a37b3a94712aa0dad03d681262 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/dfs/dispatcher/inc/wlan_dfs_lmac_api.h @@ -0,0 +1,121 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: These APIs are used by DFS core functions to call lmac/offload + * functions. + */ + +#ifndef _WLAN_DFS_LMAC_API_H_ +#define _WLAN_DFS_LMAC_API_H_ + +#include +#include + +/** + * lmac_get_caps() - Get DFS capabilities. + * @pdev: Pointer to PDEV structure. + * @dfs_caps: Pointer to dfs_caps structure + */ +void lmac_get_caps(struct wlan_objmgr_pdev *pdev, + struct wlan_dfs_caps *dfs_caps); + +/** + * lmac_get_tsf64() - Get tsf64 value. + * @pdev: Pointer to PDEV structure. + * + * Return: tsf64 timer value. + */ +uint64_t lmac_get_tsf64(struct wlan_objmgr_pdev *pdev); + +/** + * lmac_dfs_disable() - Disable DFS. + * @pdev: Pointer to PDEV structure. + * @no_cac: no_cac flag. + */ +void lmac_dfs_disable(struct wlan_objmgr_pdev *pdev, int no_cac); + +/** + * lmac_dfs_enable() - Enable DFS. + * @pdev: Pointer to PDEV structure. + * @is_fastclk: fastclk value. + * @param: Pointer to wlan_dfs_phyerr_param structure. + * @dfsdomain: DFS domain. + */ +void lmac_dfs_enable(struct wlan_objmgr_pdev *pdev, + int *is_fastclk, + struct wlan_dfs_phyerr_param *param, + int dfsdomain); + +/** + * lmac_dfs_get_thresholds() - Get thresholds. + * @pdev: Pointer to PDEV structure. + * @param: Pointer to wlan_dfs_phyerr_param structure. + */ +void lmac_dfs_get_thresholds(struct wlan_objmgr_pdev *pdev, + struct wlan_dfs_phyerr_param *param); + +/** + * lmac_get_ah_devid() - Get ah devid. + * @pdev: Pointer to PDEV structure. + */ +uint16_t lmac_get_ah_devid(struct wlan_objmgr_pdev *pdev); + +/** + * lmac_get_ext_busy() - Get ext_busy. + * @pdev: Pointer to PDEV structure. + */ +uint32_t lmac_get_ext_busy(struct wlan_objmgr_pdev *pdev); + +/** + * lmac_set_use_cac_prssi() - Set use_cac_prssi value. + * @pdev: Pointer to PDEV structure. + */ +void lmac_set_use_cac_prssi(struct wlan_objmgr_pdev *pdev); + +/** + * lmac_get_target_type() - Get target type. + * @pdev: Pointer to PDEV structure. + */ +uint32_t lmac_get_target_type(struct wlan_objmgr_pdev *pdev); + +/** + * lmac_get_phymode_info() - Get phymode info. + * @pdev: Pointer to PDEV structure. + */ +uint32_t lmac_get_phymode_info(struct wlan_objmgr_pdev *pdev, + uint32_t chan_mode); + +/** + * lmac_is_host_dfs_check_support_enabled() - Check if Host DFS confirmation + * feature is supported. + * @pdev: Pointer to PDEV structure. + * + * Return: true, host dfs check supported, else false. + */ +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) && defined(HOST_DFS_SPOOF_TEST) +bool lmac_is_host_dfs_check_support_enabled(struct wlan_objmgr_pdev *pdev); +#else +static inline bool lmac_is_host_dfs_check_support_enabled( + struct wlan_objmgr_pdev *pdev) +{ + return false; +} +#endif +#endif /* _WLAN_DFS_LMAC_API_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/dfs/dispatcher/inc/wlan_dfs_mlme_api.h b/drivers/staging/qca-wifi-host-cmn/umac/dfs/dispatcher/inc/wlan_dfs_mlme_api.h new file mode 100644 index 0000000000000000000000000000000000000000..ed7edc3ba09fcf4d21cc8a13ac5e2a52c3f95068 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/dfs/dispatcher/inc/wlan_dfs_mlme_api.h @@ -0,0 +1,246 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: These APIs are used by DFS core functions to call mlme functions. + */ + +#ifndef _WLAN_DFS_MLME_API_H_ +#define _WLAN_DFS_MLME_API_H_ + +#include "wlan_dfs_ucfg_api.h" + +extern struct dfs_to_mlme global_dfs_to_mlme; + +/** + * dfs_mlme_start_rcsa() - Send RCSA to RootAP. + * @pdev: Pointer to DFS pdev object. + * @wait_for_csa: Wait for CSA from RootAP. + */ +void dfs_mlme_start_rcsa(struct wlan_objmgr_pdev *pdev, + bool *wait_for_csa); + +/** + * dfs_mlme_mark_dfs() - Mark the channel in the channel list. + * @pdev: Pointer to DFS pdev object. + * @ieee: Channel number. + * @freq: Channel frequency. + * @vhtop_ch_freq_seg2: VHT80 Cfreq2. + * @flags: channel flags. + */ +void dfs_mlme_mark_dfs(struct wlan_objmgr_pdev *pdev, + uint8_t ieee, + uint16_t freq, + uint8_t vhtop_ch_freq_seg2, + uint64_t flags); + +/** + * dfs_mlme_start_csa() - Sends CSA in ieeeChan + * @pdev: Pointer to DFS pdev object. + * @ieee_chan: Channel number. + * @freq: Channel frequency. + * @cfreq2: HT80 cfreq2. + * @flags: channel flags. + */ +void dfs_mlme_start_csa(struct wlan_objmgr_pdev *pdev, + uint8_t ieee_chan, + uint16_t freq, + uint8_t cfreq2, + uint64_t flags); + +/** + * dfs_mlme_proc_cac() - Process the CAC completion event. + * @pdev: Pointer to DFS pdev object. + * @vdev_id: vdev id. + */ +void dfs_mlme_proc_cac(struct wlan_objmgr_pdev *pdev, uint32_t vdev_id); + +/** + * dfs_mlme_deliver_event_up_afrer_cac() - Send a CAC timeout, VAP up event to + * userspace. + * @pdev: Pointer to DFS pdev object. + */ +void dfs_mlme_deliver_event_up_afrer_cac(struct wlan_objmgr_pdev *pdev); + +/** + * dfs_mlme_get_dfs_ch_nchans() - Get number of channels in the channel list + * @pdev: Pointer to DFS pdev object. + * @nchans: Pointer to save the channel number. + */ +void dfs_mlme_get_dfs_ch_nchans(struct wlan_objmgr_pdev *pdev, int *nchans); + +/** + * dfs_mlme_get_extchan() - Get extension channel. + * @pdev: Pointer to DFS pdev object. + * @dfs_ch_freq: Frequency in Mhz. + * @dfs_ch_flags: Channel flags. + * @dfs_ch_flagext: Extended channel flags. + * @dfs_ch_ieee: IEEE channel number. + * @dfs_ch_vhtop_ch_freq_seg1: Channel Center frequency. + * @dfs_ch_vhtop_ch_freq_seg2: Channel Center frequency applicable for 80+80MHz + * mode of operation. + */ +QDF_STATUS dfs_mlme_get_extchan(struct wlan_objmgr_pdev *pdev, + uint16_t *dfs_ch_freq, + uint64_t *dfs_ch_flags, + uint16_t *dfs_ch_flagext, + uint8_t *dfs_ch_ieee, + uint8_t *dfs_ch_vhtop_ch_freq_seg1, + uint8_t *dfs_ch_vhtop_ch_freq_seg2); + +/** + * dfs_mlme_set_no_chans_available() - Set no_chans_available flag. + * @pdev: Pointer to DFS pdev object. + * @val: Set this value to no_chans_available flag. + */ +void dfs_mlme_set_no_chans_available(struct wlan_objmgr_pdev *pdev, + int val); + +/** + * dfs_mlme_ieee2mhz() - Get the frequency from channel number. + * @pdev: Pointer to DFS pdev object. + * @ieee: Channel number. + * @flag: Channel flag. + */ +int dfs_mlme_ieee2mhz(struct wlan_objmgr_pdev *pdev, + int ieee, + uint64_t flag); + +/** + * dfs_mlme_find_dot11_channel() - Get dot11 channel from ieee, cfreq2 and mode. + * @pdev: Pointer to DFS pdev object. + * @ieee: Channel number. + * @des_cfreq2: cfreq2 + * @mode: Phymode + * @dfs_ch_freq: Frequency in Mhz. + * @dfs_ch_flags: Channel flags. + * @dfs_ch_flagext: Extended channel flags. + * @dfs_ch_ieee: IEEE channel number. + * @dfs_ch_vhtop_ch_freq_seg1: Channel Center frequency. + * @dfs_ch_vhtop_ch_freq_seg2: Channel Center frequency applicable for 80+80MHz + * mode of operation. + */ +void dfs_mlme_find_dot11_channel(struct wlan_objmgr_pdev *pdev, + uint8_t ieee, + uint8_t des_cfreq2, + int mode, + uint16_t *dfs_ch_freq, + uint64_t *dfs_ch_flags, + uint16_t *dfs_ch_flagext, + uint8_t *dfs_ch_ieee, + uint8_t *dfs_ch_vhtop_ch_freq_seg1, + uint8_t *dfs_ch_vhtop_ch_freq_seg2); + +/** + * dfs_mlme_get_dfs_ch_channels() - Get channel from channel list. + * @pdev: Pointer to DFS pdev object. + * @dfs_ch_freq: Frequency in Mhz. + * @dfs_ch_flags: Channel flags. + * @dfs_ch_flagext: Extended channel flags. + * @dfs_ch_ieee: IEEE channel number. + * @dfs_ch_vhtop_ch_freq_seg1: Channel Center frequency. + * @dfs_ch_vhtop_ch_freq_seg2: Channel Center frequency applicable for 80+80MHz + * mode of operation. + * @index: Index into channel list. + */ +void dfs_mlme_get_dfs_ch_channels(struct wlan_objmgr_pdev *pdev, + uint16_t *dfs_ch_freq, + uint64_t *dfs_ch_flags, + uint16_t *dfs_ch_flagext, + uint8_t *dfs_ch_ieee, + uint8_t *dfs_ch_vhtop_ch_freq_seg1, + uint8_t *dfs_ch_vhtop_ch_freq_seg2, + int index); + +/** + * dfs_mlme_dfs_ch_flags_ext() - Get extension channel flags. + * @pdev: Pointer to DFS pdev object. + */ +uint32_t dfs_mlme_dfs_ch_flags_ext(struct wlan_objmgr_pdev *pdev); + +/** + * dfs_mlme_channel_change_by_precac() - Channel change by PreCAC. + * @pdev: Pointer to DFS pdev object. + */ +void dfs_mlme_channel_change_by_precac(struct wlan_objmgr_pdev *pdev); + +/** + * dfs_mlme_nol_timeout_notification() - NOL timeout notification to userspace. + * @pdev: Pointer to DFS pdev object. + */ +void dfs_mlme_nol_timeout_notification(struct wlan_objmgr_pdev *pdev); + +/** + * dfs_mlme_clist_update() - Mark the channel as RADAR. + * @pdev: Pointer to DFS pdev object. + * @nollist: Pointer to NOL list. + * @nentries: Number of channels in the NOL list. + */ +void dfs_mlme_clist_update(struct wlan_objmgr_pdev *pdev, + void *nollist, + int nentries); + +/** + * dfs_mlme_get_cac_timeout() - Get cac_timeout. + * @pdev: Pointer to DFS pdev object. + * @dfs_ch_freq: Frequency in Mhz. + * @dfs_ch_vhtop_ch_freq_seg2: Channel Center frequency applicable for 80+80MHz + * mode of operation. + * @dfs_ch_flags: Channel flags. + */ +int dfs_mlme_get_cac_timeout(struct wlan_objmgr_pdev *pdev, + uint16_t dfs_ch_freq, + uint8_t dfs_ch_vhtop_ch_freq_seg2, + uint64_t dfs_ch_flags); + +/** + * dfs_mlme_rebuild_chan_list_with_non_dfs_channels() - Rebuild the channel list + * with only non DFS channels. + * @pdev: Pointer to DFS pdev object. + * + * return: On success return 1 or 0, else failure. + */ +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) && defined(HOST_DFS_SPOOF_TEST) +int dfs_mlme_rebuild_chan_list_with_non_dfs_channels( + struct wlan_objmgr_pdev *pdev); +#else +static inline int dfs_mlme_rebuild_chan_list_with_non_dfs_channels( + struct wlan_objmgr_pdev *pdev) +{ + return 0; +} +#endif + +/** + * dfs_mlme_restart_vaps_with_non_dfs_chan() - Restart vaps with non DFS + * channels + * @pdev: Pointer to DFS pdev object. + * @no_chans_avail: Indicates if no channel is available. + */ +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) && defined(HOST_DFS_SPOOF_TEST) +void dfs_mlme_restart_vaps_with_non_dfs_chan(struct wlan_objmgr_pdev *pdev, + int no_chans_avail); +#else +static inline +void dfs_mlme_restart_vaps_with_non_dfs_chan(struct wlan_objmgr_pdev *pdev, + int no_chans_avail) +{ +} +#endif +#endif /* _WLAN_DFS_MLME_API_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/dfs/dispatcher/inc/wlan_dfs_public_struct.h b/drivers/staging/qca-wifi-host-cmn/umac/dfs/dispatcher/inc/wlan_dfs_public_struct.h new file mode 100644 index 0000000000000000000000000000000000000000..dd582db2b7dba162789121355f224637eaec09d2 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/dfs/dispatcher/inc/wlan_dfs_public_struct.h @@ -0,0 +1,127 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_dfs_public_struct.h + * This file contains DFS data structures + */ + +#ifndef __WLAN_DFS_PUBLIC_STRUCT_H_ +#define __WLAN_DFS_PUBLIC_STRUCT_H_ + +/* TODO: This structure has many redundant variables, needs cleanup */ +/** + * struct radar_found_info - radar found info + * @pdev_id: pdev id. + * @detection_mode: 0 indicates RADAR detected, non-zero indicates debug mode. + * @freq_offset: frequency offset. + * @chan_width: channel width. + * @detector_id: detector id for full-offload. + * @segment_id: segment id (same as detector_id) for partial-offload. + * @timestamp: timestamp (Time when filter match is found in Firmware). + * @is_chirp: is chirp or not. + * @chan_freq: channel frequency (Primary channel frequency). + * @radar_freq: radar frequency (Is it same as '@chan_freq'?). + * @sidx: sidx value (same as freq_offset). + */ +struct radar_found_info { + uint32_t pdev_id; + uint32_t detection_mode; + int32_t freq_offset; + uint32_t chan_width; + uint32_t detector_id; + uint32_t segment_id; + uint32_t timestamp; + uint32_t is_chirp; + uint32_t chan_freq; + uint32_t radar_freq; + int32_t sidx; +}; + +/** + * struct dfs_acs_info - acs info, ch range + * @acs_mode: to enable/disable acs 1/0. + * @channel_list: channel list in acs config + * @num_of_channel: number of channel in ACS channel list + */ +struct dfs_acs_info { + uint8_t acs_mode; + uint8_t *channel_list; + uint8_t num_of_channel; +}; + +/** + * struct radar_event_info - radar event info. + * @pulse_is_chirp: flag to indicate if this pulse is chirp. + * @pulse_center_freq: the center frequency of the radar pulse detected, KHz. + * @pulse_duration: the duaration of the pulse in us. + * @rssi: RSSI recorded in the ppdu. + * @pulse_detect_ts: timestamp indicates the time when DFS pulse is detected. + * @upload_fullts_low: low 32 tsf timestamp get from MAC tsf timer indicates + * the time that the radar event uploading to host. + * @upload_fullts_high: high 32 tsf timestamp get from MAC tsf timer indicates + * the time that the radar event uploading to host. + * @peak_sidx: index of peak magnitude bin (signed) + * @pdev_id: pdev_id for identifying the MAC. + * @delta_diff: Delta diff value. + * @delta_peak: Delta peak value. + * @psidx_diff: Psidx diff value. + * @is_psidx_diff_valid: Does fw send valid psidx diff. + */ +struct radar_event_info { + uint8_t pulse_is_chirp; + uint32_t pulse_center_freq; + uint32_t pulse_duration; + uint8_t rssi; + uint32_t pulse_detect_ts; + uint32_t upload_fullts_low; + uint32_t upload_fullts_high; + int32_t peak_sidx; + uint8_t pdev_id; + uint8_t delta_diff; + int8_t delta_peak; + int8_t psidx_diff; + int8_t is_psidx_diff_valid; +}; + +/** + * struct dfs_user_config - user configuration required for for DFS. + * @dfs_is_phyerr_filter_offload: flag to indicate DFS phyerr filtering offload. + */ +struct dfs_user_config { + bool dfs_is_phyerr_filter_offload; +}; + +/** + * struct dfs_radar_found_params - radar found parameters. + * @pri_min: Minimum PRI of detected radar pulse. + * @pri_max: Max PRI of detected radar pulse. + * @duration_min: Min duration of detected pulse in us. + * @duration_max: Max duration of detected pulse in us. + * @sidx_min: Min softare index of detected radar pulse. + * @sidx_max: Max software index of detected radar pulse. + */ +struct dfs_radar_found_params { + u_int32_t pri_min; + u_int32_t pri_max; + u_int32_t duration_min; + u_int32_t duration_max; + u_int32_t sidx_min; + u_int32_t sidx_max; +}; +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/dfs/dispatcher/inc/wlan_dfs_tgt_api.h b/drivers/staging/qca-wifi-host-cmn/umac/dfs/dispatcher/inc/wlan_dfs_tgt_api.h new file mode 100644 index 0000000000000000000000000000000000000000..963a5cdc8aef08ee5cd9cd47336bcc0af464e809 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/dfs/dispatcher/inc/wlan_dfs_tgt_api.h @@ -0,0 +1,397 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: This file has the DFS dispatcher API which is exposed to outside of DFS + * component. + */ + +#ifndef _WLAN_DFS_TGT_API_H_ +#define _WLAN_DFS_TGT_API_H_ + +#include + +/* Max number arguments for DFS unit test command */ +#define DFS_MAX_NUM_UNIT_TEST_ARGS 3 + +/* Command id to send test radar to firmware */ +#define DFS_PHYERR_OFFLOAD_TEST_SET_RADAR 0 + +/* Number of arguments for DFS unit test command */ +#define DFS_UNIT_TEST_NUM_ARGS 3 + +/* Segment ID corresponding to primary segment */ +#define SEG_ID_PRIMARY 0 + +/* Segment ID corresponding to secondary segment */ +#define SEG_ID_SECONDARY 1 + +/* Index id pointing to command id value */ +#define IDX_CMD_ID 0 + +/* Index id pointing to pdev id value */ +#define IDX_PDEV_ID 1 + +/* Index pointing to segment id value */ +#define IDX_SEG_ID 2 + +/** + * struct dfs_emulate_bang_radar_test_cmd - Unit test command structure to send + * WMI command to firmware from host + * and simulate bangradar event. + * @vdev_id: vdev id + * @num_args: number of arguments + * @args: arguments + */ +struct dfs_emulate_bang_radar_test_cmd { + uint32_t vdev_id; + uint32_t num_args; + uint32_t args[DFS_MAX_NUM_UNIT_TEST_ARGS]; +}; + +extern struct dfs_to_mlme global_dfs_to_mlme; + +/** + * tgt_dfs_set_current_channel() - Fill dfs channel structure from + * dfs_channel structure. + * @pdev: Pointer to DFS pdev object. + * @dfs_ch_freq: Frequency in Mhz. + * @dfs_ch_flags: Channel flags. + * @dfs_ch_flagext: Extended channel flags. + * @dfs_ch_ieee: IEEE channel number. + * @dfs_ch_vhtop_ch_freq_seg1: Channel Center frequency1. + * @dfs_ch_vhtop_ch_freq_seg2: Channel Center frequency2. + */ +#ifdef DFS_COMPONENT_ENABLE +QDF_STATUS tgt_dfs_set_current_channel(struct wlan_objmgr_pdev *pdev, + uint16_t dfs_ch_freq, + uint64_t dfs_ch_flags, + uint16_t dfs_ch_flagext, + uint8_t dfs_ch_ieee, + uint8_t dfs_ch_vhtop_ch_freq_seg1, + uint8_t dfs_ch_vhtop_ch_freq_seg2); + +/** + * tgt_dfs_radar_enable() - Enables the radar. + * @pdev: Pointer to DFS pdev object. + * @no_cac: If no_cac is 0, it cancels the CAC. + * + * This is called each time a channel change occurs, to (potentially) enable + * the radar code. + */ +QDF_STATUS tgt_dfs_radar_enable(struct wlan_objmgr_pdev *pdev, + int no_cac, uint32_t opmode); + +/** + * tgt_dfs_control()- Used to process ioctls related to DFS. + * @pdev: Pointer to DFS pdev object. + * @id: Command type. + * @indata: Input buffer. + * @insize: size of the input buffer. + * @outdata: A buffer for the results. + * @outsize: Size of the output buffer. + */ +QDF_STATUS tgt_dfs_control(struct wlan_objmgr_pdev *pdev, + u_int id, + void *indata, + uint32_t insize, + void *outdata, + uint32_t *outsize, + int *error); + +/** + * tgt_dfs_get_radars() - Based on the chipset, calls init radar table functions + * @pdev: Pointer to DFS pdev object. + * + * Wrapper function for dfs_get_radars(). This function called from + * outside of DFS component. + */ +QDF_STATUS tgt_dfs_get_radars(struct wlan_objmgr_pdev *pdev); + +/** + * tgt_dfs_process_radar_ind() - Process radar found indication. + * @pdev: Pointer to DFS pdev object. + * @radar_found: radar found info. + * + * Process radar found indication. + * + * Return QDF_STATUS. + */ +QDF_STATUS tgt_dfs_process_radar_ind(struct wlan_objmgr_pdev *pdev, + struct radar_found_info *radar_found); +#else +static inline QDF_STATUS tgt_dfs_set_current_channel( + struct wlan_objmgr_pdev *pdev, + uint16_t dfs_ch_freq, + uint64_t dfs_ch_flags, + uint16_t dfs_ch_flagext, + uint8_t dfs_ch_ieee, + uint8_t dfs_ch_vhtop_ch_freq_seg1, + uint8_t dfs_ch_vhtop_ch_freq_seg2) +{ + return QDF_STATUS_SUCCESS; +} + +static inline QDF_STATUS tgt_dfs_radar_enable(struct wlan_objmgr_pdev *pdev, + int no_cac, uint32_t opmode) +{ + return QDF_STATUS_SUCCESS; +} + +static inline QDF_STATUS tgt_dfs_control(struct wlan_objmgr_pdev *pdev, + u_int id, + void *indata, + uint32_t insize, + void *outdata, + uint32_t *outsize, + int *error) +{ + return QDF_STATUS_SUCCESS; +} + +static inline QDF_STATUS tgt_dfs_get_radars(struct wlan_objmgr_pdev *pdev) +{ + return QDF_STATUS_SUCCESS; +} + +static inline QDF_STATUS tgt_dfs_process_radar_ind( + struct wlan_objmgr_pdev *pdev, + struct radar_found_info *radar_found) +{ + return QDF_STATUS_SUCCESS; +} +#endif + +/** + * tgt_dfs_process_phyerr() - Process phyerr. + * @pdev: Pointer to DFS pdev object. + * @buf: Phyerr buffer. + * @datalen: phyerr buffer length. + * @r_rssi: RSSI. + * @r_ext_rssi: Extension channel RSSI. + * @r_rs_tstamp: Timestamp. + * @r_fulltsf: TSF64. + * + * Wrapper function for dfs_process_phyerr(). This function called from + * outside of DFS component. + */ +QDF_STATUS tgt_dfs_process_phyerr(struct wlan_objmgr_pdev *pdev, + void *buf, + uint16_t datalen, + uint8_t r_rssi, + uint8_t r_ext_rssi, + uint32_t r_rs_tstamp, + uint64_t r_fulltsf); + +/** + * tgt_dfs_process_phyerr_filter_offload() - Process radar event. + * Wrapper function for dfs_process_phyerr_filter_offload(). This function + * called from outside of DFS component. + * @pdev: Pointer to DFS pdev object. + * @wlan_radar_event: pointer to radar_event_info. + * + * Return: QDF_STATUS + */ +QDF_STATUS tgt_dfs_process_phyerr_filter_offload(struct wlan_objmgr_pdev *pdev, + struct radar_event_info *wlan_radar_event); + +/** + * tgt_dfs_is_phyerr_filter_offload() - Is phyerr filter offload. + * @psoc: Pointer to psoc object. + * @is_phyerr_filter_offload: Pointer to is_phyerr_filter_offload. + * + * Return: QDF_STATUS + */ +QDF_STATUS tgt_dfs_is_phyerr_filter_offload(struct wlan_objmgr_psoc *psoc, + bool *is_phyerr_filter_offload); + +/** + * tgt_dfs_destroy_object() - Destroys the DFS object. + * @pdev: Pointer to DFS pdev object. + * + * Wrapper function for dfs_destroy_object(). This function called from + * outside of DFS component. + */ +QDF_STATUS tgt_dfs_destroy_object(struct wlan_objmgr_pdev *pdev); + +#ifdef QCA_MCL_DFS_SUPPORT +/** + * tgt_dfs_set_tx_leakage_threshold() - set tx_leakage_threshold. + * @pdev: Pointer to DFS pdev object. + * @tx_leakage_threshold: tx leakage threshold for dfs. + * + * Return QDF_STATUS. + */ +QDF_STATUS tgt_dfs_set_tx_leakage_threshold(struct wlan_objmgr_pdev *pdev, + uint16_t tx_leakage_threshold); +#else +static inline QDF_STATUS tgt_dfs_set_tx_leakage_threshold + (struct wlan_objmgr_pdev *pdev, + uint16_t tx_leakage_threshold) +{ + return QDF_STATUS_SUCCESS; +} +#endif + +/** + * tgt_dfs_is_precac_timer_running() - Check whether precac timer is running. + * @pdev: Pointer to DFS pdev object. + * @is_precac_timer_running: Pointer to save precac timer value. + * + * Wrapper function for dfs_is_precac_timer_running(). This function called from + * outside of DFS component. + */ +QDF_STATUS tgt_dfs_is_precac_timer_running(struct wlan_objmgr_pdev *pdev, + bool *is_precac_timer_running); + +/** + * utils_dfs_find_vht80_chan_for_precac() - Find VHT80 channel for precac. + * @pdev: Pointer to DFS pdev object. + * @chan_mode: Channel mode. + * @ch_freq_seg1: Segment1 channel freq. + * @cfreq1: cfreq1. + * @cfreq2: cfreq2. + * @phy_mode: Precac phymode. + * @dfs_set_cfreq2: Precac cfreq2 + * @set_agile: Agile mode flag. + * + * wrapper function for dfs_find_vht80_chan_for_precacdfs_cancel_cac_timer(). + * This function called from outside of dfs component. + */ +QDF_STATUS tgt_dfs_find_vht80_chan_for_precac(struct wlan_objmgr_pdev *pdev, + uint32_t chan_mode, + uint8_t ch_freq_seg1, + uint32_t *cfreq1, + uint32_t *cfreq2, + uint32_t *phy_mode, + bool *dfs_set_cfreq2, + bool *set_agile); + +/** + * tgt_dfs_cac_complete() - Process cac complete indication. + * @pdev: Pointer to DFS pdev object. + * @vdev_id: vdev id. + * + * Process cac complete indication from firmware. + * + * Return QDF_STATUS. + */ +QDF_STATUS tgt_dfs_cac_complete(struct wlan_objmgr_pdev *pdev, + uint32_t vdev_id); + +/** + * tgt_dfs_reg_ev_handler() - Register dfs events. + * @psoc: Pointer to psoc. + * + * Register dfs events. + * + * Return: QDF_STATUS. + */ +QDF_STATUS tgt_dfs_reg_ev_handler(struct wlan_objmgr_psoc *psoc); + +/** + * tgt_dfs_stop() - Clear dfs timers. + * @dfs: Pointer to wlan_dfs structure. + */ +QDF_STATUS tgt_dfs_stop(struct wlan_objmgr_pdev *pdev); + +/** +* tgt_dfs_process_emulate_bang_radar_cmd() - Process to emulate dfs bangradar +* command. +* @pdev: Pointer to DFS pdev object. +* +* Process to emulate dfs bangradar command. +* +* Return: QDF_STATUS. +*/ +QDF_STATUS tgt_dfs_process_emulate_bang_radar_cmd(struct wlan_objmgr_pdev *pdev, + struct dfs_emulate_bang_radar_test_cmd *dfs_unit_test); + +#ifdef QCA_MCL_DFS_SUPPORT +/** + * tgt_dfs_set_phyerr_filter_offload() - config phyerr filter offload + * @pdev: Pointer to DFS pdev object. + * + * Return: QDF_STATUS + */ +QDF_STATUS tgt_dfs_set_phyerr_filter_offload(struct wlan_objmgr_pdev *pdev); +#else +static inline QDF_STATUS tgt_dfs_set_phyerr_filter_offload + (struct wlan_objmgr_pdev *pdev) +{ + return QDF_STATUS_SUCCESS; +} +#endif + +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) && defined(HOST_DFS_SPOOF_TEST) +/** + * tgt_dfs_send_avg_params_to_fw() - send average radar parameters to fw. + * @pdev: Pointer to DFS pdev object. + * @params: Pointer to dfs radar average parameters. + * + * Return: QDF_STATUS + */ +QDF_STATUS +tgt_dfs_send_avg_params_to_fw(struct wlan_objmgr_pdev *pdev, + struct dfs_radar_found_params *params); +#endif + +/** + * tgt_dfs_action_on_status_from_fw() - trigger the action to be taken based on + * host dfs status received from fw. + * @pdev: Pointer to pdev object. + * @status: Pointer to the host dfs status received from fw. + * + * Return: QDF_STATUS + */ +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) && defined(HOST_DFS_SPOOF_TEST) +QDF_STATUS tgt_dfs_action_on_status_from_fw(struct wlan_objmgr_pdev *pdev, + uint32_t *status); +#else +static inline +QDF_STATUS tgt_dfs_action_on_status_from_fw(struct wlan_objmgr_pdev *pdev, + uint32_t *status) +{ + return QDF_STATUS_SUCCESS; +} +#endif + +/** + * tgt_dfs_is_radar_enabled() - checks if radar detection is enabled. + * @pdev: Pointer to pdev object. + * @ignore_dfs: Pointer to check the value. If 1, radar detection is disabled. + */ +void tgt_dfs_is_radar_enabled(struct wlan_objmgr_pdev *pdev, int *ignore_dfs); + +/** + * tgt_dfs_reset_spoof_test() - reset the dfs spoof check variables + * @pdev: Pointer to pdev object. + * + * Return: QDF_STATUS + */ +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) && defined(HOST_DFS_SPOOF_TEST) +QDF_STATUS tgt_dfs_reset_spoof_test(struct wlan_objmgr_pdev *pdev); +#else +static inline +QDF_STATUS tgt_dfs_reset_spoof_test(struct wlan_objmgr_pdev *pdev) +{ + return QDF_STATUS_SUCCESS; +} +#endif +#endif /* _WLAN_DFS_TGT_API_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/dfs/dispatcher/inc/wlan_dfs_ucfg_api.h b/drivers/staging/qca-wifi-host-cmn/umac/dfs/dispatcher/inc/wlan_dfs_ucfg_api.h new file mode 100644 index 0000000000000000000000000000000000000000..a212763276aaa752ab65cacb249e8eca4218f23b --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/dfs/dispatcher/inc/wlan_dfs_ucfg_api.h @@ -0,0 +1,302 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: This file has the DFS dispatcher API which is exposed to outside of DFS + * component. + */ + +#ifndef _WLAN_DFS_UCFG_API_H_ +#define _WLAN_DFS_UCFG_API_H_ + +#include +#include + +/** + * struct dfs_to_mlme - These are MLME function pointer used by DFS component. + * @pdev_component_obj_attach: Attach DFS object to PDEV. + * @pdev_component_obj_detach: Detach DFS object from PDEV. + * @pdev_get_comp_private_obj: Get DFS object from PDEV. + * @dfs_start_rcsa: Send RCSA to RootAP. + * @mlme_mark_dfs: Calls dfs_action function. + * @mlme_start_csa: Sends CSA. + * @mlme_proc_cac: Process the CAC completion event. + * @mlme_deliver_event_up_afrer_cac: Send a CAC timeout, VAP up event to user + * space + * @mlme_get_dfs_ch_nchans: Get number of channels in the channel + * list. + * @mlme_get_extchan: Gets the extension channel. + * @mlme_set_no_chans_available: Sets no_chans_available flag. + * @mlme_ieee2mhz: Gets Channel freq from ieee number. + * @mlme_find_dot11_channel: Find dot11 channel. + * @mlme_get_dfs_ch_channels: Get the channel list. + * @mlme_dfs_ch_flags_ext: Gets channel extension flag. + * @mlme_channel_change_by_precac: Channel change triggered by PreCAC. + * @mlme_nol_timeout_notification: NOL timeout notification. + * @mlme_clist_update: Updates the channel list. + * @mlme_get_cac_timeout: Gets the CAC timeout. + * @mlme_rebuild_chan_list_with_non_dfs_channel: Rebuild channels with non-dfs + * channels. + * @mlme_restart_vaps_with_non_dfs_chan: Restart vaps with non-dfs channel. + */ +struct dfs_to_mlme { + QDF_STATUS (*pdev_component_obj_attach)(struct wlan_objmgr_pdev *pdev, + enum wlan_umac_comp_id id, + void *comp_priv_obj, + QDF_STATUS status); + QDF_STATUS (*pdev_component_obj_detach)(struct wlan_objmgr_pdev *pdev, + enum wlan_umac_comp_id id, + void *comp_priv_obj); + struct wlan_dfs *(*pdev_get_comp_private_obj)( + struct wlan_objmgr_pdev *pdev); + QDF_STATUS (*dfs_start_rcsa)(struct wlan_objmgr_pdev *pdev, + bool *wait_for_csa); + QDF_STATUS (*mlme_mark_dfs)(struct wlan_objmgr_pdev *pdev, + uint8_t ieee, + uint16_t freq, + uint8_t vhtop_ch_freq_seg2, + uint64_t flags); + QDF_STATUS (*mlme_start_csa)(struct wlan_objmgr_pdev *pdev, + uint8_t ieee_chan, uint16_t freq, + uint8_t cfreq2, uint64_t flags); + QDF_STATUS (*mlme_proc_cac)(struct wlan_objmgr_pdev *pdev); + QDF_STATUS (*mlme_deliver_event_up_afrer_cac)( + struct wlan_objmgr_pdev *pdev); + QDF_STATUS (*mlme_get_dfs_ch_nchans)(struct wlan_objmgr_pdev *pdev, + int *nchans); + QDF_STATUS (*mlme_get_extchan)(struct wlan_objmgr_pdev *pdev, + uint16_t *dfs_ch_freq, + uint64_t *dfs_ch_flags, + uint16_t *dfs_ch_flagext, + uint8_t *dfs_ch_ieee, + uint8_t *dfs_ch_vhtop_ch_freq_seg1, + uint8_t *dfs_ch_vhtop_ch_freq_seg2); + QDF_STATUS (*mlme_set_no_chans_available)(struct wlan_objmgr_pdev *pdev, + int val); + QDF_STATUS (*mlme_ieee2mhz)(struct wlan_objmgr_pdev *pdev, + int ieee, + uint64_t flag, + int *freq); + QDF_STATUS (*mlme_find_dot11_channel)(struct wlan_objmgr_pdev *pdev, + uint8_t ieee, + uint8_t des_cfreq2, + int mode, + uint16_t *dfs_ch_freq, + uint64_t *dfs_ch_flags, + uint16_t *dfs_ch_flagext, + uint8_t *dfs_ch_ieee, + uint8_t *dfs_ch_vhtop_ch_freq_seg1, + uint8_t *dfs_ch_vhtop_ch_freq_seg2); + + QDF_STATUS (*mlme_get_dfs_ch_channels)(struct wlan_objmgr_pdev *pdev, + uint16_t *dfs_ch_freq, + uint64_t *dfs_ch_flags, + uint16_t *dfs_ch_flagext, + uint8_t *dfs_ch_ieee, + uint8_t *dfs_ch_vhtop_ch_freq_seg1, + uint8_t *dfs_ch_vhtop_ch_freq_seg2, + int index); + QDF_STATUS (*mlme_dfs_ch_flags_ext)(struct wlan_objmgr_pdev *pdev, + uint16_t *flag_ext); + QDF_STATUS (*mlme_channel_change_by_precac)( + struct wlan_objmgr_pdev *pdev); + QDF_STATUS (*mlme_nol_timeout_notification)( + struct wlan_objmgr_pdev *pdev); + QDF_STATUS (*mlme_clist_update)(struct wlan_objmgr_pdev *pdev, + void *nollist, + int nentries); + QDF_STATUS (*mlme_get_cac_timeout)(struct wlan_objmgr_pdev *pdev, + uint16_t dfs_ch_freq, + uint8_t c_vhtop_ch_freq_seg2, + uint64_t dfs_ch_flags, + int *cac_timeout); +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) && defined(HOST_DFS_SPOOF_TEST) + QDF_STATUS (*mlme_rebuild_chan_list_with_non_dfs_channels) + (struct wlan_objmgr_pdev *pdev); + QDF_STATUS (*mlme_restart_vaps_with_non_dfs_chan) + (struct wlan_objmgr_pdev *pdev, int no_chans_avail); +#endif +}; + +extern struct dfs_to_mlme global_dfs_to_mlme; + +/** + * wlan_dfs_pdev_obj_create_notification() - DFS pdev object create handler. + * @pdev: Pointer to DFS pdev object. + */ +QDF_STATUS wlan_dfs_pdev_obj_create_notification(struct wlan_objmgr_pdev *pdev, + void *arg); + +/** + * wlan_dfs_pdev_obj_destroy_notification() - DFS pdev object delete handler. + * @pdev: Pointer to DFS pdev object. + */ +QDF_STATUS wlan_dfs_pdev_obj_destroy_notification(struct wlan_objmgr_pdev *pdev, + void *arg); + +/** + * ucfg_dfs_is_ap_cac_timer_running() - Returns the dfs cac timer. + * @pdev: Pointer to DFS pdev object. + * @is_ap_cac_timer_running: Pointer to save dfs_cac_timer_running value. + * + * Wrapper function for dfs_is_ap_cac_timer_running(). + * This function called from outside of dfs component. + */ +QDF_STATUS ucfg_dfs_is_ap_cac_timer_running(struct wlan_objmgr_pdev *pdev, + int *is_ap_cac_timer_running); + +/** + * ucfg_dfs_getnol() - Wrapper function for dfs_get_nol() + * @pdev: Pointer to DFS pdev object. + * @dfs_nolinfo: Pointer to dfsreq_nolinfo structure. + * + * Wrapper function for dfs_getnol(). + * This function called from outside of dfs component. + */ +QDF_STATUS ucfg_dfs_getnol(struct wlan_objmgr_pdev *pdev, void *dfs_nolinfo); + +/** + * ucfg_dfs_override_cac_timeout() - Override the default CAC timeout. + * @pdev: Pointer to DFS pdev object. + * @cac_timeout: CAC timeout value. + * + * Wrapper function for dfs_override_cac_timeout(). + * This function called from outside of dfs component. + */ +QDF_STATUS ucfg_dfs_override_cac_timeout(struct wlan_objmgr_pdev *pdev, + int cac_timeout, int *status); + +/** + * ucfg_dfs_get_override_cac_timeout() - Get override CAC timeout value. + * @pdev: Pointer to DFS pdev object. + * @cac_timeout: Pointer to save the CAC timeout value. + * + * Wrapper function for dfs_get_override_cac_timeout(). + * This function called from outside of dfs component. + */ +QDF_STATUS ucfg_dfs_get_override_cac_timeout(struct wlan_objmgr_pdev *pdev, + int *cac_timeout, int *status); + +/** + * ucfg_dfs_get_override_precac_timeout() - Get precac timeout. + * @pdev: Pointer to DFS pdev object. + * @precac_timeout: Get precac timeout value in this variable. + * + * Wrapper function for dfs_get_override_precac_timeout(). + * This function called from outside of dfs component. + */ +QDF_STATUS ucfg_dfs_get_override_precac_timeout(struct wlan_objmgr_pdev *pdev, + int *precac_timeout); + +/** + * ucfg_dfs_override_precac_timeout() - Override the default precac timeout. + * @pdev: Pointer to DFS pdev object. + * @precac_timeout: Precac timeout value. + * + * Wrapper function for dfs_override_precac_timeout(). + * This function called from outside of dfs component. + */ +QDF_STATUS ucfg_dfs_override_precac_timeout(struct wlan_objmgr_pdev *pdev, + int precac_timeout); + +/** + * ucfg_dfs_set_precac_enable() - Set precac enable flag. + * @pdev: Pointer to DFS pdev object. + * @value: input value for dfs_precac_enable flag. + * + * Wrapper function for dfs_set_precac_enable(). + * This function called from outside of dfs component. + */ +QDF_STATUS ucfg_dfs_set_precac_enable(struct wlan_objmgr_pdev *pdev, + uint32_t value); + +/** + * ucfg_dfs_get_precac_enable() - Get precac enable flag. + * @pdev: Pointer to DFS pdev object. + * @buff: Pointer to save precac_enable value. + * + * Wrapper function for dfs_get_precac_enable(). + * This function called from outside of dfs component. + */ +QDF_STATUS ucfg_dfs_get_precac_enable(struct wlan_objmgr_pdev *pdev, int *buff); + +#ifdef QCA_MCL_DFS_SUPPORT +/** + * ucfg_dfs_update_config() - Update DFS user config. + * @psoc: Pointer to psoc. + * @req: DFS user config. + * + * Return: QDF_STATUS + */ +QDF_STATUS ucfg_dfs_update_config(struct wlan_objmgr_psoc *psoc, + struct dfs_user_config *req); +#else +static inline QDF_STATUS ucfg_dfs_update_config(struct wlan_objmgr_psoc *psoc, + struct dfs_user_config *req) +{ + return QDF_STATUS_SUCCESS; +} +#endif + +/** + * ucfg_dfs_set_override_status_timeout() - override the value of host dfs + * status wait timeout. + * @pdev: Pointer to DFS pdev object. + * @status_timeout: timeout value. + * + * Wrapper function for dfs_set_override_status_timeout(). + * This function called from outside of dfs component. + * + * Return: QDF_STATUS + */ +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) && defined(HOST_DFS_SPOOF_TEST) +QDF_STATUS ucfg_dfs_set_override_status_timeout(struct wlan_objmgr_pdev *pdev, + int status_timeout); +#else +static inline +QDF_STATUS ucfg_dfs_set_override_status_timeout(struct wlan_objmgr_pdev *pdev, + int status_timeout) +{ + return QDF_STATUS_SUCCESS; +} +#endif + +/** + * ucfg_dfs_get_override_status_timeout() - Get the value of host dfs status + * wait timeout. + * @pdev: Pointer to DFS pdev object. + * @status_timeout: Pointer to save the timeout value. + * + * Wrapper function for dfs_get_override_status_timeout(). + * This function called from outside of dfs component. + * + * Return: QDF_STATUS + */ +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) && defined(HOST_DFS_SPOOF_TEST) +QDF_STATUS ucfg_dfs_get_override_status_timeout(struct wlan_objmgr_pdev *pdev, + int *status_timeout); +#else +static inline +QDF_STATUS ucfg_dfs_get_override_status_timeout(struct wlan_objmgr_pdev *pdev, + int *status_timeout) +{ + return QDF_STATUS_SUCCESS; +} +#endif +#endif /* _WLAN_DFS_UCFG_API_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/dfs/dispatcher/inc/wlan_dfs_utils_api.h b/drivers/staging/qca-wifi-host-cmn/umac/dfs/dispatcher/inc/wlan_dfs_utils_api.h new file mode 100644 index 0000000000000000000000000000000000000000..36523f96d573357cd0c87fa5df3a323098535cfb --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/dfs/dispatcher/inc/wlan_dfs_utils_api.h @@ -0,0 +1,500 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: This file has the DFS dispatcher API which is exposed to outside of DFS + * component. + */ + +#ifndef _WLAN_DFS_UTILS_API_H_ +#define _WLAN_DFS_UTILS_API_H_ + +#include "wlan_dfs_ucfg_api.h" +#include "wlan_reg_services_api.h" + +/* Add channel to nol */ +#define DFS_NOL_SET 1 + +/* Remove channel from nol */ +#define DFS_NOL_RESET 0 + +/* Max nol channels */ +#define DFS_MAX_NOL_CHANNEL 80 + +/* WLAN 2.4GHz start freq */ +#define DFS_24_GHZ_BASE_FREQ (2407) + +/* WLAN 5GHz start freq */ +#define DFS_5_GHZ_BASE_FREQ (5000) + +/* WLAN 2.4 GHz channel number 6 */ +#define DFS_24_GHZ_CHANNEL_6 (6) + +/* WLAN 2.4 GHz channel number 14 */ +#define DFS_24_GHZ_CHANNEL_14 (14) + +/* WLAN 2.4 GHz channel number 15 */ +#define DFS_24_GHZ_CHANNEL_15 (15) + +/* WLAN 2.4 GHz channel number 27 */ +#define DFS_24_GHZ_CHANNEL_27 (27) + +/* WLAN 5GHz channel number 170 */ +#define DFS_5_GHZ_CHANNEL_170 (170) + +/* WLAN 5MHz channel spacing */ +#define DFS_CHAN_SPACING_5MHZ (5) + +/* WLAN 20Hz channel spacing */ +#define DFS_CHAN_SPACING_20MHZ (20) + +/* WLAN 2.4GHz channel number 14 freq */ +#define DFS_CHAN_14_FREQ (2484) + +/* WLAN 2.4GHz channel number 15 freq */ +#define DFS_CHAN_15_FREQ (2512) + +/* WLAN 5GHz channel number 170 freq */ +#define DFS_CHAN_170_FREQ (5852) + + + +extern struct dfs_to_mlme global_dfs_to_mlme; + +/** + * utils_dfs_cac_valid_reset() - Cancels the dfs_cac_valid_timer timer. + * @pdev: Pointer to DFS pdev object. + * @prevchan_ieee: Prevchan number. + * @prevchan_flags: Prevchan flags. + * + * Wrapper function for dfs_cac_valid_reset(). This function called from + * outside of DFS component. + */ + +QDF_STATUS utils_dfs_cac_valid_reset(struct wlan_objmgr_pdev *pdev, + uint8_t prevchan_ieee, + uint32_t prevchan_flags); + +/** + * utils_dfs_reset() - Reset DFS members. + * @pdev: Pointer to DFS pdev object. + */ +QDF_STATUS utils_dfs_reset(struct wlan_objmgr_pdev *pdev); + +/** + * utils_dfs_reset_precaclists() - Clears and initiakizes precac_required_list, + * precac_done_list and precac_nol_list. + * @pdev: Pointer to DFS pdev object. + * + * Wrapper function for dfs_reset_precaclists(). This function called from + * outside of DFS component. + */ +QDF_STATUS utils_dfs_reset_precaclists(struct wlan_objmgr_pdev *pdev); + +/** + * utils_dfs_cancel_precac_timer() - Cancel the precac timer. + * @pdev: Pointer to DFS pdev object. + * + * wrapper function for dfs_cancel_precac_timer(). this function called from + * outside of dfs component. + */ +QDF_STATUS utils_dfs_cancel_precac_timer(struct wlan_objmgr_pdev *pdev); + +/** + * utils_dfs_is_precac_done() - Is precac done. + * @pdev: Pointer to DFS pdev object. + * + * wrapper function for dfs_is_precac_done(). this + * function called from outside of dfs component. + */ +QDF_STATUS utils_dfs_is_precac_done(struct wlan_objmgr_pdev *pdev, + bool *is_precac_done); + +/** + * utils_dfs_cancel_cac_timer() - Cancels the CAC timer. + * @pdev: Pointer to DFS pdev object. + * + * wrapper function for dfs_cancel_cac_timer(). this + * function called from outside of dfs component. + */ +QDF_STATUS utils_dfs_cancel_cac_timer(struct wlan_objmgr_pdev *pdev); + +/** + * utils_dfs_start_cac_timer() - Starts the CAC timer. + * @pdev: Pointer to DFS pdev object. + * + * wrapper function for dfs_start_cac_timer(). this + * function called from outside of dfs component. + */ +QDF_STATUS utils_dfs_start_cac_timer(struct wlan_objmgr_pdev *pdev); + +/** + * utils_dfs_cac_stop() - Clear the AP CAC timer. + * @pdev: Pointer to DFS pdev object. + * + * wrapper function for dfs_cac_stop(). this + * function called from outside of dfs component. + */ +QDF_STATUS utils_dfs_cac_stop(struct wlan_objmgr_pdev *pdev); + +/** + * utils_dfs_stacac_stop() - Clear the STA CAC timer. + * @pdev: Pointer to DFS pdev object. + * + * wrapper function for dfs_stacac_stop(). this + * function called from outside of dfs component. + */ +QDF_STATUS utils_dfs_stacac_stop(struct wlan_objmgr_pdev *pdev); + +/** + * utils_dfs_get_usenol() - Returns use_nol flag. + * @pdev: Pointer to DFS pdev object. + * @usenol: Pointer to usenol value. + * + * wrapper function for dfs_get_usenol(). this + * function called from outside of dfs component. + */ +QDF_STATUS utils_dfs_get_usenol(struct wlan_objmgr_pdev *pdev, + uint16_t *usenol); + +/** + * utils_dfs_radar_disable() - Disables the radar. + * @pdev: Pointer to DFS pdev object. + * + * wrapper function for dfs_radar_disable(). this + * function called from outside of dfs component. + */ +QDF_STATUS utils_dfs_radar_disable(struct wlan_objmgr_pdev *pdev); + +/** + * utils_dfs_set_update_nol_flag() - Sets update_nol flag. + * @pdev: Pointer to DFS pdev object. + * @val: update_nol flag. + * + * wrapper function for dfs_set_update_nol_flag(). this + * function called from outside of dfs component. + */ +QDF_STATUS utils_dfs_set_update_nol_flag(struct wlan_objmgr_pdev *pdev, + bool val); + +/** + * utils_dfs_get_update_nol_flag() - Returns update_nol flag. + * @pdev: Pointer to DFS pdev object. + * @nol_flag: Fill nol_flag in this variable. + * + * wrapper function for dfs_get_update_nol_flag(). this + * function called from outside of dfs component. + */ +QDF_STATUS utils_dfs_get_update_nol_flag(struct wlan_objmgr_pdev *pdev, + bool *nol_flag); + +/** + * utils_dfs_get_dfs_use_nol() - Get usenol. + * @pdev: Pointer to DFS pdev object. + * @dfs_use_nol: Pointer to dfs_use_nol. + * + * wrapper function for dfs_get_dfs_use_nol(). this + * function called from outside of dfs component. + */ +QDF_STATUS utils_dfs_get_dfs_use_nol(struct wlan_objmgr_pdev *pdev, + int *dfs_use_nol); + +/** + * utils_dfs_get_nol_timeout() - Get NOL timeout. + * @pdev: Pointer to DFS pdev object. + * @dfs_nol_timeout: Pointer to dfs_nol_timeout. + * + * wrapper function for dfs_get_nol_timeout(). this + * function called from outside of dfs component. + */ +QDF_STATUS utils_dfs_get_nol_timeout(struct wlan_objmgr_pdev *pdev, + int *dfs_nol_timeout); + +/** + * utils_dfs_nol_addchan() - Add channel to NOL. + * @pdev: Pointer to DFS pdev object. + * @chan: channel t o add NOL. + * @dfs_nol_timeout: NOL timeout. + * + * wrapper function for dfs_nol_addchan(). this + * function called from outside of dfs component. + */ +QDF_STATUS utils_dfs_nol_addchan(struct wlan_objmgr_pdev *pdev, + uint16_t freq, + uint32_t dfs_nol_timeout); + +/** + * utils_dfs_nol_update() - NOL update + * @pdev: Pointer to DFS pdev object. + * + * wrapper function for dfs_nol_update(). this + * function called from outside of dfs component. + */ +QDF_STATUS utils_dfs_nol_update(struct wlan_objmgr_pdev *pdev); + +/** + * utils_dfs_second_segment_radar_disable() - Disables the second segment radar. + * @pdev: Pointer to DFS pdev object. + * + * This is called when AP detects the radar, to (potentially) disable + * the radar code. + */ +QDF_STATUS utils_dfs_second_segment_radar_disable( + struct wlan_objmgr_pdev *pdev); + +/** + * utils_dfs_is_ignore_dfs() - Get Ignore DFS value. + * @pdev: Pointer to DFS pdev object. + * @ignore_dfs: Fill ignore_dfs value in this variable. + */ +QDF_STATUS utils_dfs_is_ignore_dfs(struct wlan_objmgr_pdev *pdev, + bool *ignore_dfs); + +/** + * utils_dfs_is_cac_valid() - Gets the value of is_cac_valid. + * @pdev: Pointer to DFS pdev object. + * @is_cac_valid: Fill is_cac_valid in this variable. + */ +QDF_STATUS utils_dfs_is_cac_valid(struct wlan_objmgr_pdev *pdev, + bool *is_cac_valid); + +/** + * utils_dfs_is_ignore_cac() - Gets the value of is_ignore_cac. + * @pdev: Pointer to DFS pdev object. + * @ignore_cac: Fill ignore_cac value in this variable. + */ +QDF_STATUS utils_dfs_is_ignore_cac(struct wlan_objmgr_pdev *pdev, + bool *ignore_cac); + +/** + * utils_dfs_set_cac_timer_running() - Sets the cac timer running. + * @pdev: Pointer to DFS pdev object. + * @val: Set this value to dfs_cac_timer_running variable. + */ +QDF_STATUS utils_dfs_set_cac_timer_running(struct wlan_objmgr_pdev *pdev, + int val); + +/** + * utils_dfs_get_nol_chfreq_and_chwidth() - Sets the cac timer running. + * @pdev: Pointer to DFS pdev object. + * @nollist: Pointer to NOL channel entry. + * @nol_chfreq: Pointer to save channel frequency. + * @nol_chwidth: Pointer to save channel width. + * @index: Index into nol list. + */ +QDF_STATUS utils_dfs_get_nol_chfreq_and_chwidth(struct wlan_objmgr_pdev *pdev, + void *nollist, + uint32_t *nol_chfreq, + uint32_t *nol_chwidth, + int index); + +/** + * utils_dfs_get_random_channel() - Get random channel. + * @pdev: Pointer to DFS pdev object. + * @flags: random channel selection flags. + * @ch_params: current channel params. + * @hw_mode: current operating mode. + * @target_chan: Pointer to target_chan. + * @acs_info: acs range info. + * + * wrapper function for get_random_chan(). this + * function called from outside of dfs component. + * + * Return: QDF_STATUS + */ +QDF_STATUS utils_dfs_get_random_channel(struct wlan_objmgr_pdev *pdev, + uint16_t flags, struct ch_params *ch_params, + uint32_t *hw_mode, uint8_t *target_chan, + struct dfs_acs_info *acs_info); + +/** + * utils_dfs_init_nol() - Initialize nol from platform driver. + * @pdev: pdev handler. + * + * Initialize nol from platform driver. + * + * Return: None + */ +#ifdef QCA_DFS_NOL_PLATFORM_DRV_SUPPORT +void utils_dfs_init_nol(struct wlan_objmgr_pdev *pdev); +#else +static inline void utils_dfs_init_nol(struct wlan_objmgr_pdev *pdev) +{ +} +#endif +/** + * utils_dfs_save_nol() - save nol list to platform driver. + * @pdev: pdev handler. + * + * Save nol list to platform driver. + * + * Return: None + */ +void utils_dfs_save_nol(struct wlan_objmgr_pdev *pdev); + +/** + * utils_dfs_print_nol_channels() - log nol channels. + * @pdev: pdev handler. + * + * log nol channels. + * + * Return: None + */ +#ifdef DFS_COMPONENT_ENABLE +void utils_dfs_print_nol_channels(struct wlan_objmgr_pdev *pdev); +#else +static inline void utils_dfs_print_nol_channels(struct wlan_objmgr_pdev *pdev) +{ +} +#endif + +/** + * utils_dfs_clear_nol_channels() - clear nol list. + * @pdev: pdev handler. + * + * log nol channels. + * + * Return: None + */ +void utils_dfs_clear_nol_channels(struct wlan_objmgr_pdev *pdev); + +/** + * utils_is_dfs_ch() - is channel dfs. + * @pdev: pdev handler. + * + * is channel dfs. + * + * Return: True if channel dfs, else false. + */ +static inline bool utils_is_dfs_ch(struct wlan_objmgr_pdev *pdev, uint32_t chan) +{ + return wlan_reg_is_dfs_ch(pdev, chan); +} +/** + * utils_dfs_reg_update_nol_ch() - set nol channel + * + * @pdev: pdev ptr + * @ch_list: channel list to be returned + * @num_ch: number of channels + * @nol_ch: nol flag + * + * Return: void + */ +void utils_dfs_reg_update_nol_ch(struct wlan_objmgr_pdev *pdev, + uint8_t *ch_list, + uint8_t num_ch, + bool nol_ch); + +/** + * utils_dfs_freq_to_chan () - convert channel freq to channel number + * @freq: frequency + * + * Return: channel number + */ +uint8_t utils_dfs_freq_to_chan(uint32_t freq); + +/** + * utils_dfs_chan_to_freq () - convert channel number to frequency + * @chan: channel number + * + * Return: frequency + */ +#ifdef DFS_COMPONENT_ENABLE +uint32_t utils_dfs_chan_to_freq(uint8_t chan); +#else +static inline uint32_t utils_dfs_chan_to_freq(uint8_t chan) +{ + return 0; +} +#endif +/** + * utils_dfs_update_cur_chan_flags() - Update DFS channel flag and flagext. + * @pdev: Pointer to DFS pdev object. + * @flags: New channel flags + * @flagext: New Extended flags + * + * Return: QDF_STATUS + */ +QDF_STATUS utils_dfs_update_cur_chan_flags(struct wlan_objmgr_pdev *pdev, + uint64_t flags, + uint16_t flagext); + +#ifdef QCA_MCL_DFS_SUPPORT +/** + * utils_dfs_mark_leaking_ch() - to mark channel leaking in to nol + * @pdev: Pointer to pdev structure. + * @ch_width: channel width + * @temp_ch_lst_sz: the target channel list + * @temp_ch_lst: the target channel list + * + * This function removes the channels from temp channel list that + * (if selected as target channel) will cause leakage in one of + * the NOL channels + * + * Return: QDF_STATUS + */ +QDF_STATUS utils_dfs_mark_leaking_ch(struct wlan_objmgr_pdev *pdev, + enum phy_ch_width ch_width, + uint8_t temp_ch_lst_sz, + uint8_t *temp_ch_lst); +#else +static inline QDF_STATUS utils_dfs_mark_leaking_ch + (struct wlan_objmgr_pdev *pdev, + enum phy_ch_width ch_width, + uint8_t temp_ch_lst_sz, + uint8_t *temp_ch_lst) +{ + return QDF_STATUS_SUCCESS; +} +#endif +/** + * utils_get_dfsdomain() - Get DFS domain. + * @pdev: Pointer to PDEV structure. + * + * Return: DFS domain. + */ +int utils_get_dfsdomain(struct wlan_objmgr_pdev *pdev); + +/** + * utils_dfs_get_cur_rd() - Get current regdomain. + * @pdev: pdev ptr + * + * Return: Regdomain pair id. + */ +uint16_t utils_dfs_get_cur_rd(struct wlan_objmgr_pdev *pdev); + +/** + * utils_dfs_is_spoof_check_failed() - get spoof check status. + * @pdev: pdev ptr + * @is_spoof_check_failed: pointer containing the status. + * + * Return: QDF_STATUS. + */ +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) && defined(HOST_DFS_SPOOF_TEST) +QDF_STATUS utils_dfs_is_spoof_check_failed(struct wlan_objmgr_pdev *pdev, + bool *is_spoof_check_failed); +#else +static inline +QDF_STATUS utils_dfs_is_spoof_check_failed(struct wlan_objmgr_pdev *pdev, + bool *is_spoof_check_failed) +{ + return QDF_STATUS_SUCCESS; +} +#endif +#endif /* _WLAN_DFS_UTILS_API_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/dfs/dispatcher/src/wlan_dfs_init_deinit_api.c b/drivers/staging/qca-wifi-host-cmn/umac/dfs/dispatcher/src/wlan_dfs_init_deinit_api.c new file mode 100644 index 0000000000000000000000000000000000000000..b7623be4ec09628a70b305e65b9dfe3088806dfd --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/dfs/dispatcher/src/wlan_dfs_init_deinit_api.c @@ -0,0 +1,489 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: This file init/deint functions for DFS module. + */ + +#include "wlan_dfs_ucfg_api.h" +#include "wlan_dfs_tgt_api.h" +#include "wlan_dfs_utils_api.h" +#ifndef QCA_MCL_DFS_SUPPORT +#include "ieee80211_mlme_dfs_interface.h" +#endif +#include "wlan_objmgr_global_obj.h" +#include "wlan_dfs_init_deinit_api.h" +#include "wlan_dfs_lmac_api.h" +#include "../../core/src/dfs.h" +#include "a_types.h" +#include "wlan_serialization_api.h" +#include + +struct dfs_to_mlme global_dfs_to_mlme; + +struct wlan_dfs *wlan_pdev_get_dfs_obj(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_dfs *dfs; + dfs = wlan_objmgr_pdev_get_comp_private_obj(pdev, + WLAN_UMAC_COMP_DFS); + + return dfs; +} + +#ifndef QCA_MCL_DFS_SUPPORT +void register_dfs_callbacks(void) +{ + struct dfs_to_mlme *tmp_dfs_to_mlme = &global_dfs_to_mlme; + + tmp_dfs_to_mlme->pdev_component_obj_attach = + wlan_objmgr_pdev_component_obj_attach; + tmp_dfs_to_mlme->pdev_component_obj_detach = + wlan_objmgr_pdev_component_obj_detach; + tmp_dfs_to_mlme->pdev_get_comp_private_obj = + wlan_pdev_get_dfs_obj; + + tmp_dfs_to_mlme->dfs_start_rcsa = mlme_dfs_start_rcsa; + tmp_dfs_to_mlme->mlme_mark_dfs = mlme_dfs_mark_dfs; + tmp_dfs_to_mlme->mlme_start_csa = mlme_dfs_start_csa; + tmp_dfs_to_mlme->mlme_proc_cac = mlme_dfs_proc_cac; + tmp_dfs_to_mlme->mlme_deliver_event_up_afrer_cac = + mlme_dfs_deliver_event_up_afrer_cac; + tmp_dfs_to_mlme->mlme_get_dfs_ch_nchans = mlme_dfs_get_dfs_ch_nchans; + tmp_dfs_to_mlme->mlme_get_extchan = mlme_dfs_get_extchan; + tmp_dfs_to_mlme->mlme_set_no_chans_available = + mlme_dfs_set_no_chans_available; + tmp_dfs_to_mlme->mlme_ieee2mhz = mlme_dfs_ieee2mhz; + tmp_dfs_to_mlme->mlme_find_dot11_channel = mlme_dfs_find_dot11_channel; + tmp_dfs_to_mlme->mlme_get_dfs_ch_channels = + mlme_dfs_get_dfs_ch_channels; + tmp_dfs_to_mlme->mlme_dfs_ch_flags_ext = mlme_dfs_dfs_ch_flags_ext; + tmp_dfs_to_mlme->mlme_channel_change_by_precac = + mlme_dfs_channel_change_by_precac; + tmp_dfs_to_mlme->mlme_nol_timeout_notification = + mlme_dfs_nol_timeout_notification; + tmp_dfs_to_mlme->mlme_clist_update = mlme_dfs_clist_update; + tmp_dfs_to_mlme->mlme_get_cac_timeout = mlme_dfs_get_cac_timeout; + tmp_dfs_to_mlme->mlme_rebuild_chan_list_with_non_dfs_channels = + mlme_dfs_rebuild_chan_list_with_non_dfs_channels; + tmp_dfs_to_mlme->mlme_restart_vaps_with_non_dfs_chan = + mlme_dfs_restart_vaps_with_non_dfs_chan; +} +#else +void register_dfs_callbacks(void) +{ + struct dfs_to_mlme *tmp_dfs_to_mlme = &global_dfs_to_mlme; + + tmp_dfs_to_mlme->pdev_component_obj_attach = + wlan_objmgr_pdev_component_obj_attach; + tmp_dfs_to_mlme->pdev_component_obj_detach = + wlan_objmgr_pdev_component_obj_detach; + tmp_dfs_to_mlme->pdev_get_comp_private_obj = + wlan_pdev_get_dfs_obj; +} +#endif + +/** + * dfs_psoc_obj_create_notification() - dfs psoc create notification handler + * @psoc: psoc object + * @arg_list: Argument list + * + * Return: QDF_STATUS + */ +static QDF_STATUS dfs_psoc_obj_create_notification(struct wlan_objmgr_psoc *psoc, + void *arg_list) +{ + QDF_STATUS status; + struct dfs_soc_priv_obj *dfs_soc_obj; + + dfs_soc_obj = qdf_mem_malloc(sizeof(*dfs_soc_obj)); + if (!dfs_soc_obj) { + dfs_err(NULL, WLAN_DEBUG_DFS_ALWAYS, + "Failed to allocate memory for dfs object"); + return QDF_STATUS_E_NOMEM; + } + + dfs_soc_obj->psoc = psoc; + + status = wlan_objmgr_psoc_component_obj_attach(psoc, + WLAN_UMAC_COMP_DFS, + (void *)dfs_soc_obj, + QDF_STATUS_SUCCESS); + + if (QDF_IS_STATUS_ERROR(status)) { + dfs_err(NULL, WLAN_DEBUG_DFS_ALWAYS, + "Failed to attach psoc dfs component"); + qdf_mem_free(dfs_soc_obj); + return status; + } + + dfs_debug(NULL, WLAN_DEBUG_DFS1, + "DFS obj attach to psoc successfully"); + + return status; +} + +/** + * dfs_psoc_obj_destroy_notification() - dfs psoc destroy notification handler + * @psoc: psoc object + * @arg_list: Argument list + * + * Return: QDF_STATUS + */ +static QDF_STATUS dfs_psoc_obj_destroy_notification(struct wlan_objmgr_psoc *psoc, + void *arg_list) +{ + QDF_STATUS status; + struct dfs_soc_priv_obj *dfs_soc_obj; + + dfs_soc_obj = wlan_objmgr_psoc_get_comp_private_obj(psoc, + WLAN_UMAC_COMP_DFS); + if (!dfs_soc_obj) { + dfs_err(NULL, WLAN_DEBUG_DFS_ALWAYS, + "Failed to get dfs obj in psoc"); + return QDF_STATUS_E_FAILURE; + } + + status = wlan_objmgr_psoc_component_obj_detach(psoc, + WLAN_UMAC_COMP_DFS, + dfs_soc_obj); + + if (QDF_IS_STATUS_ERROR(status)) + dfs_err(NULL, WLAN_DEBUG_DFS_ALWAYS, + "Failed to detach psoc dfs component"); + + qdf_mem_free(dfs_soc_obj); + + return status; +} + +QDF_STATUS dfs_init(void) +{ + QDF_STATUS status; + + status = wlan_objmgr_register_psoc_create_handler(WLAN_UMAC_COMP_DFS, + dfs_psoc_obj_create_notification, + NULL); + + if (QDF_IS_STATUS_ERROR(status)) { + dfs_err(NULL, WLAN_DEBUG_DFS_ALWAYS, + "Failed to register psoc create handler for dfs"); + goto err_psoc_create; + } + + status = wlan_objmgr_register_psoc_destroy_handler(WLAN_UMAC_COMP_DFS, + dfs_psoc_obj_destroy_notification, + NULL); + + if (QDF_IS_STATUS_ERROR(status)) { + dfs_err(NULL, WLAN_DEBUG_DFS_ALWAYS, + "Failed to register psoc delete handler for dfs"); + goto err_psoc_delete; + } + + register_dfs_callbacks(); + + status = wlan_objmgr_register_pdev_create_handler(WLAN_UMAC_COMP_DFS, + wlan_dfs_pdev_obj_create_notification, + NULL); + + if (QDF_IS_STATUS_ERROR(status)) { + dfs_err(NULL, WLAN_DEBUG_DFS_ALWAYS, + "Failed to register pdev create handler for dfs"); + goto err_pdev_create; + } + + status = wlan_objmgr_register_pdev_destroy_handler(WLAN_UMAC_COMP_DFS, + wlan_dfs_pdev_obj_destroy_notification, + NULL); + + if (QDF_IS_STATUS_ERROR(status)) { + dfs_err(NULL, WLAN_DEBUG_DFS_ALWAYS, + "Failed to register pdev delete handler for dfs"); + goto err_pdev_delete; + } + + status = qdf_print_set_category_verbose(qdf_get_pidx(), + QDF_MODULE_ID_DFS, QDF_TRACE_LEVEL_INFO, true); + + if (QDF_IS_STATUS_ERROR(status)) { + dfs_err(NULL, WLAN_DEBUG_DFS_ALWAYS, + "Failed to set verbose for category"); + goto err_category_verbose; + } + + return QDF_STATUS_SUCCESS; + +err_category_verbose: + wlan_objmgr_unregister_pdev_destroy_handler(WLAN_UMAC_COMP_DFS, + wlan_dfs_pdev_obj_destroy_notification, + NULL); +err_pdev_delete: + wlan_objmgr_unregister_pdev_create_handler(WLAN_UMAC_COMP_DFS, + wlan_dfs_pdev_obj_create_notification, + NULL); +err_pdev_create: + wlan_objmgr_unregister_psoc_destroy_handler(WLAN_UMAC_COMP_DFS, + dfs_psoc_obj_destroy_notification, + NULL); +err_psoc_delete: + wlan_objmgr_unregister_psoc_create_handler(WLAN_UMAC_COMP_DFS, + dfs_psoc_obj_create_notification, + NULL); +err_psoc_create: + return status; +} + +QDF_STATUS dfs_deinit(void) +{ + QDF_STATUS status; + + status = wlan_objmgr_unregister_psoc_create_handler(WLAN_UMAC_COMP_DFS, + dfs_psoc_obj_create_notification, + NULL); + + if (QDF_IS_STATUS_ERROR(status)) + dfs_err(NULL, WLAN_DEBUG_DFS_ALWAYS, + "Failed to deregister dfs psoc obj create"); + + status = wlan_objmgr_unregister_psoc_destroy_handler(WLAN_UMAC_COMP_DFS, + dfs_psoc_obj_destroy_notification, + NULL); + + if (QDF_IS_STATUS_ERROR(status)) + dfs_err(NULL, WLAN_DEBUG_DFS_ALWAYS, + "Failed to deregister dfs psoc obj destroy"); + + status = wlan_objmgr_unregister_pdev_create_handler(WLAN_UMAC_COMP_DFS, + wlan_dfs_pdev_obj_create_notification, + NULL); + + if (QDF_IS_STATUS_ERROR(status)) + dfs_err(NULL, WLAN_DEBUG_DFS_ALWAYS, + "Failed to deregister dfs pdev obj create"); + + status = wlan_objmgr_unregister_pdev_destroy_handler(WLAN_UMAC_COMP_DFS, + wlan_dfs_pdev_obj_destroy_notification, + NULL); + + if (QDF_IS_STATUS_ERROR(status)) + dfs_err(NULL, WLAN_DEBUG_DFS_ALWAYS, + "Failed to deregister dfs pdev obj destroy"); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_dfs_pdev_obj_create_notification(struct wlan_objmgr_pdev *pdev, + void *arg) +{ + struct wlan_dfs *dfs = NULL; + struct wlan_objmgr_psoc *psoc; + struct wlan_lmac_if_dfs_tx_ops *dfs_tx_ops; + uint8_t pdev_id; + QDF_STATUS status; + bool is_5ghz = false; + + if (!pdev) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "null pdev"); + return QDF_STATUS_E_FAILURE; + } + + psoc = wlan_pdev_get_psoc(pdev); + if (!psoc) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "null psoc"); + return QDF_STATUS_E_FAILURE; + } + + dfs_tx_ops = wlan_psoc_get_dfs_txops(psoc); + if (!(dfs_tx_ops && dfs_tx_ops->dfs_is_pdev_5ghz)) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs_tx_ops is null"); + return QDF_STATUS_E_FAILURE; + } + + status = dfs_tx_ops->dfs_is_pdev_5ghz(pdev, &is_5ghz); + if (QDF_IS_STATUS_ERROR(status)) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "Failed to get is_5ghz value"); + return QDF_STATUS_E_FAILURE; + } + + if (!is_5ghz) { + pdev_id = wlan_objmgr_pdev_get_pdev_id(pdev); + dfs_info(dfs, WLAN_DEBUG_DFS_ALWAYS, + "Do not allocate DFS object for 2G, pdev_id = %d", + pdev_id); + return QDF_STATUS_SUCCESS; + } + + if (dfs_create_object(&dfs) == 1) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "failed to create object"); + return QDF_STATUS_E_FAILURE; + } + + status = global_dfs_to_mlme.pdev_component_obj_attach(pdev, + WLAN_UMAC_COMP_DFS, (void *)dfs, QDF_STATUS_SUCCESS); + if (QDF_IS_STATUS_ERROR(status)) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "obj attach failed"); + dfs_destroy_object(dfs); + return QDF_STATUS_E_FAILURE; + } + + dfs->dfs_pdev_obj = pdev; + + if (!dfs_tx_ops->dfs_is_tgt_offload) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, + "dfs_is_tgt_offload is null"); + dfs_destroy_object(dfs); + return QDF_STATUS_E_FAILURE; + } + + dfs->dfs_is_offload_enabled = dfs_tx_ops->dfs_is_tgt_offload(psoc); + dfs_info(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs_offload %d", + dfs->dfs_is_offload_enabled); + + dfs = wlan_pdev_get_dfs_obj(pdev); + if (dfs_attach(dfs) == 1) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs_attch failed"); + dfs_destroy_object(dfs); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_dfs_pdev_obj_destroy_notification(struct wlan_objmgr_pdev *pdev, + void *arg) +{ + struct wlan_dfs *dfs = NULL; + + if (!pdev) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "PDEV is NULL"); + return QDF_STATUS_E_FAILURE; + } + + dfs = wlan_pdev_get_dfs_obj(pdev); + + /* DFS is NULL during unload. should we call this function before */ + if (dfs) { + global_dfs_to_mlme.pdev_component_obj_detach(pdev, + WLAN_UMAC_COMP_DFS, + (void *)dfs); + + dfs_detach(dfs); + dfs->dfs_pdev_obj = NULL; + dfs_destroy_object(dfs); + } + + return QDF_STATUS_SUCCESS; +} + +static void dfs_scan_serialization_comp_info_cb( + struct wlan_objmgr_vdev *vdev, + union wlan_serialization_rules_info *comp_info) +{ + struct wlan_dfs *dfs = NULL; + struct wlan_objmgr_pdev *pdev; + struct wlan_lmac_if_dfs_tx_ops *dfs_tx_ops; + struct wlan_objmgr_psoc *psoc; + bool is_5ghz = false; + QDF_STATUS status; + + if (!comp_info) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "comp_info is NULL"); + return; + } + + if (!vdev) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "vdev is NULL"); + return; + } + + pdev = wlan_vdev_get_pdev(vdev); + if (!pdev) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "pdev is NULL"); + return; + } + + comp_info->scan_info.is_cac_in_progress = false; + + psoc = wlan_pdev_get_psoc(pdev); + if (!psoc) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "null psoc"); + return; + } + + dfs_tx_ops = wlan_psoc_get_dfs_txops(psoc); + if (!(dfs_tx_ops && dfs_tx_ops->dfs_is_pdev_5ghz)) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs_tx_ops is null"); + return; + } + + status = dfs_tx_ops->dfs_is_pdev_5ghz(pdev, &is_5ghz); + if (QDF_IS_STATUS_ERROR(status)) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "Failed to get is_5ghz value"); + return; + } + + if (!is_5ghz) + return; + + dfs = wlan_pdev_get_dfs_obj(pdev); + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs is NULL"); + return; + } + + if (dfs_is_ap_cac_timer_running(dfs)) + comp_info->scan_info.is_cac_in_progress = true; +} + +QDF_STATUS wifi_dfs_psoc_enable(struct wlan_objmgr_psoc *psoc) +{ + QDF_STATUS status; + + status = tgt_dfs_reg_ev_handler(psoc); + if (status != QDF_STATUS_SUCCESS) { + dfs_err(NULL, WLAN_DEBUG_DFS_ALWAYS, "tgt_dfs_reg_ev_handler failed"); + return QDF_STATUS_E_FAILURE; + } + + status = wlan_serialization_register_comp_info_cb(psoc, + WLAN_UMAC_COMP_DFS, + WLAN_SER_CMD_SCAN, + dfs_scan_serialization_comp_info_cb); + if (status != QDF_STATUS_SUCCESS) { + dfs_err(NULL, WLAN_DEBUG_DFS_ALWAYS, "Serialize scan cmd register failed"); + return status; + } + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wifi_dfs_psoc_disable(struct wlan_objmgr_psoc *psoc) +{ + QDF_STATUS status; + + status = wlan_serialization_deregister_comp_info_cb(psoc, + WLAN_UMAC_COMP_DFS, + WLAN_SER_CMD_SCAN); + if (status != QDF_STATUS_SUCCESS) { + dfs_err(NULL, WLAN_DEBUG_DFS_ALWAYS, "Serialize scan cmd deregister failed"); + return status; + } + + return QDF_STATUS_SUCCESS; +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/dfs/dispatcher/src/wlan_dfs_lmac_api.c b/drivers/staging/qca-wifi-host-cmn/umac/dfs/dispatcher/src/wlan_dfs_lmac_api.c new file mode 100644 index 0000000000000000000000000000000000000000..bfa0b4ee4ae961cd4b910ad681e8eb1dbb13717b --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/dfs/dispatcher/src/wlan_dfs_lmac_api.c @@ -0,0 +1,199 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: Functions to call lmac/offload functions from DFS component. + */ + +#include "wlan_dfs_lmac_api.h" +#include "../../core/src/dfs_internal.h" +#include + +void lmac_get_caps(struct wlan_objmgr_pdev *pdev, + struct wlan_dfs_caps *dfs_caps) +{ + struct wlan_objmgr_psoc *psoc; + struct wlan_lmac_if_dfs_tx_ops *dfs_tx_ops; + + psoc = wlan_pdev_get_psoc(pdev); + + dfs_tx_ops = &psoc->soc_cb.tx_ops.dfs_tx_ops; + + if (dfs_tx_ops->dfs_get_caps) + dfs_tx_ops->dfs_get_caps(pdev, dfs_caps); +} + +uint64_t lmac_get_tsf64(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_objmgr_psoc *psoc; + struct wlan_lmac_if_dfs_tx_ops *dfs_tx_ops; + uint64_t tsf64 = 0; + + psoc = wlan_pdev_get_psoc(pdev); + + dfs_tx_ops = &psoc->soc_cb.tx_ops.dfs_tx_ops; + + if (dfs_tx_ops->dfs_gettsf64) + dfs_tx_ops->dfs_gettsf64(pdev, &tsf64); + + return tsf64; +} + +void lmac_dfs_disable(struct wlan_objmgr_pdev *pdev, int no_cac) +{ + struct wlan_objmgr_psoc *psoc; + struct wlan_lmac_if_dfs_tx_ops *dfs_tx_ops; + + psoc = wlan_pdev_get_psoc(pdev); + + dfs_tx_ops = &psoc->soc_cb.tx_ops.dfs_tx_ops; + + if (dfs_tx_ops->dfs_disable) + dfs_tx_ops->dfs_disable(pdev, no_cac); +} + +void lmac_dfs_enable(struct wlan_objmgr_pdev *pdev, + int *is_fastclk, + struct wlan_dfs_phyerr_param *param, + int dfsdomain) +{ + struct wlan_objmgr_psoc *psoc; + struct wlan_lmac_if_dfs_tx_ops *dfs_tx_ops; + + psoc = wlan_pdev_get_psoc(pdev); + + dfs_tx_ops = &psoc->soc_cb.tx_ops.dfs_tx_ops; + + if (dfs_tx_ops->dfs_enable) + dfs_tx_ops->dfs_enable(pdev, + is_fastclk, + param, + dfsdomain); +} + +void lmac_dfs_get_thresholds(struct wlan_objmgr_pdev *pdev, + struct wlan_dfs_phyerr_param *param) +{ + struct wlan_objmgr_psoc *psoc; + struct wlan_lmac_if_dfs_tx_ops *dfs_tx_ops; + + psoc = wlan_pdev_get_psoc(pdev); + + dfs_tx_ops = &psoc->soc_cb.tx_ops.dfs_tx_ops; + + if (dfs_tx_ops->dfs_get_thresholds) + dfs_tx_ops->dfs_get_thresholds(pdev, param); +} + +uint16_t lmac_get_ah_devid(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_objmgr_psoc *psoc; + struct wlan_lmac_if_dfs_tx_ops *dfs_tx_ops; + uint16_t devid = 0; + + psoc = wlan_pdev_get_psoc(pdev); + + dfs_tx_ops = &psoc->soc_cb.tx_ops.dfs_tx_ops; + + if (dfs_tx_ops->dfs_get_ah_devid) + dfs_tx_ops->dfs_get_ah_devid(pdev, &devid); + + return devid; +} + +uint32_t lmac_get_ext_busy(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_objmgr_psoc *psoc; + struct wlan_lmac_if_dfs_tx_ops *dfs_tx_ops; + uint32_t ext_chan_busy = 0; + + psoc = wlan_pdev_get_psoc(pdev); + + dfs_tx_ops = &psoc->soc_cb.tx_ops.dfs_tx_ops; + + if (dfs_tx_ops->dfs_get_ext_busy) + dfs_tx_ops->dfs_get_ext_busy(pdev, &ext_chan_busy); + + return ext_chan_busy; +} + +void lmac_set_use_cac_prssi(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_objmgr_psoc *psoc; + struct wlan_lmac_if_dfs_tx_ops *dfs_tx_ops; + + psoc = wlan_pdev_get_psoc(pdev); + + dfs_tx_ops = &psoc->soc_cb.tx_ops.dfs_tx_ops; + + if (dfs_tx_ops->dfs_set_use_cac_prssi) + dfs_tx_ops->dfs_set_use_cac_prssi(pdev); +} + +uint32_t lmac_get_target_type(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_objmgr_psoc *psoc; + struct wlan_lmac_if_dfs_tx_ops *dfs_tx_ops; + uint32_t target_type = 0; + + psoc = wlan_pdev_get_psoc(pdev); + + dfs_tx_ops = &psoc->soc_cb.tx_ops.dfs_tx_ops; + + if (dfs_tx_ops->dfs_get_target_type) + dfs_tx_ops->dfs_get_target_type(pdev, &target_type); + + return target_type; +} + +uint32_t lmac_get_phymode_info(struct wlan_objmgr_pdev *pdev, + uint32_t chan_mode) +{ + struct wlan_objmgr_psoc *psoc; + struct wlan_lmac_if_dfs_tx_ops *dfs_tx_ops; + uint32_t mode_info = 0; + + psoc = wlan_pdev_get_psoc(pdev); + + dfs_tx_ops = &psoc->soc_cb.tx_ops.dfs_tx_ops; + + /* since dfs never comes into 2G, hardcode is_2gvht_en flag to false */ + if (dfs_tx_ops->dfs_get_phymode_info) + dfs_tx_ops->dfs_get_phymode_info(pdev, chan_mode, &mode_info, + false); + + return mode_info; +} + +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) && defined(HOST_DFS_SPOOF_TEST) +bool lmac_is_host_dfs_check_support_enabled(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_objmgr_psoc *psoc; + struct wlan_lmac_if_dfs_tx_ops *dfs_tx_ops; + bool enabled = false; + + psoc = wlan_pdev_get_psoc(pdev); + dfs_tx_ops = &psoc->soc_cb.tx_ops.dfs_tx_ops; + + if (dfs_tx_ops->dfs_host_dfs_check_support) + dfs_tx_ops->dfs_host_dfs_check_support(pdev, &enabled); + + return enabled; +} +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/dfs/dispatcher/src/wlan_dfs_mlme_api.c b/drivers/staging/qca-wifi-host-cmn/umac/dfs/dispatcher/src/wlan_dfs_mlme_api.c new file mode 100644 index 0000000000000000000000000000000000000000..dcecd950561fff69489a7b90804065f76363beea --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/dfs/dispatcher/src/wlan_dfs_mlme_api.c @@ -0,0 +1,311 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: Functions to call mlme functions from DFS component. + */ + +#include "wlan_dfs_mlme_api.h" +#include "wlan_objmgr_vdev_obj.h" +#include "wlan_objmgr_pdev_obj.h" +#include "../../core/src/dfs.h" +#include "scheduler_api.h" +#ifdef QCA_MCL_DFS_SUPPORT +#include "wni_api.h" +#endif + +void dfs_mlme_start_rcsa(struct wlan_objmgr_pdev *pdev, + bool *wait_for_csa) +{ + if (global_dfs_to_mlme.dfs_start_rcsa != NULL) + global_dfs_to_mlme.dfs_start_rcsa(pdev, wait_for_csa); +} + +#ifndef QCA_MCL_DFS_SUPPORT +void dfs_mlme_mark_dfs(struct wlan_objmgr_pdev *pdev, + uint8_t ieee, + uint16_t freq, + uint8_t vhtop_ch_freq_seg2, + uint64_t flags) +{ + if (global_dfs_to_mlme.mlme_mark_dfs != NULL) + global_dfs_to_mlme.mlme_mark_dfs(pdev, + ieee, + freq, + vhtop_ch_freq_seg2, + flags); +} +#else +static void dfs_send_radar_ind(struct wlan_objmgr_pdev *pdev, + void *object, + void *arg) +{ + struct scheduler_msg sme_msg = {0}; + uint8_t vdev_id = wlan_vdev_get_id((struct wlan_objmgr_vdev *)object); + + sme_msg.type = eWNI_SME_DFS_RADAR_FOUND; + sme_msg.bodyptr = NULL; + sme_msg.bodyval = vdev_id; + scheduler_post_message(QDF_MODULE_ID_DFS, + QDF_MODULE_ID_SME, + QDF_MODULE_ID_SME, &sme_msg); + dfs_info(NULL, WLAN_DEBUG_DFS_ALWAYS, "eWNI_SME_DFS_RADAR_FOUND pdev%d posted", + vdev_id); +} + +void dfs_mlme_mark_dfs(struct wlan_objmgr_pdev *pdev, + uint8_t ieee, + uint16_t freq, + uint8_t vhtop_ch_freq_seg2, + uint64_t flags) +{ + if (!pdev) { + dfs_err(NULL, WLAN_DEBUG_DFS_ALWAYS, "null pdev"); + return; + } + + wlan_objmgr_pdev_iterate_obj_list(pdev, + WLAN_VDEV_OP, + dfs_send_radar_ind, + NULL, 0, WLAN_DFS_ID); +} +#endif + +#ifndef QCA_MCL_DFS_SUPPORT +void dfs_mlme_start_csa(struct wlan_objmgr_pdev *pdev, + uint8_t ieee_chan, uint16_t freq, + uint8_t cfreq2, uint64_t flags) +{ + if (global_dfs_to_mlme.mlme_start_csa != NULL) + global_dfs_to_mlme.mlme_start_csa(pdev, ieee_chan, freq, cfreq2, + flags); +} +#else +void dfs_mlme_start_csa(struct wlan_objmgr_pdev *pdev, + uint8_t ieee_chan, uint16_t freq, + uint8_t cfreq2, uint64_t flags) +{ + if (!pdev) { + dfs_err(NULL, WLAN_DEBUG_DFS_ALWAYS, "null pdev"); + return; + } + + wlan_objmgr_pdev_iterate_obj_list(pdev, + WLAN_VDEV_OP, + dfs_send_radar_ind, + NULL, 0, WLAN_DFS_ID); +} +#endif + +#ifndef QCA_MCL_DFS_SUPPORT +void dfs_mlme_proc_cac(struct wlan_objmgr_pdev *pdev, uint32_t vdev_id) +{ + if (global_dfs_to_mlme.mlme_proc_cac != NULL) + global_dfs_to_mlme.mlme_proc_cac(pdev); +} +#else +void dfs_mlme_proc_cac(struct wlan_objmgr_pdev *pdev, uint32_t vdev_id) +{ + struct scheduler_msg sme_msg = {0}; + + sme_msg.type = eWNI_SME_DFS_CAC_COMPLETE; + sme_msg.bodyptr = NULL; + sme_msg.bodyval = vdev_id; + scheduler_post_message(QDF_MODULE_ID_DFS, + QDF_MODULE_ID_SME, + QDF_MODULE_ID_SME, &sme_msg); + dfs_info(NULL, WLAN_DEBUG_DFS_ALWAYS, "eWNI_SME_DFS_CAC_COMPLETE vdev%d posted", + vdev_id); +} +#endif + +void dfs_mlme_deliver_event_up_afrer_cac(struct wlan_objmgr_pdev *pdev) +{ + if (global_dfs_to_mlme.mlme_deliver_event_up_afrer_cac != NULL) + global_dfs_to_mlme.mlme_deliver_event_up_afrer_cac( + pdev); +} + +void dfs_mlme_get_dfs_ch_nchans(struct wlan_objmgr_pdev *pdev, + int *nchans) +{ + if (global_dfs_to_mlme.mlme_get_dfs_ch_nchans != NULL) + global_dfs_to_mlme.mlme_get_dfs_ch_nchans(pdev, + nchans); +} + +QDF_STATUS dfs_mlme_get_extchan(struct wlan_objmgr_pdev *pdev, + uint16_t *dfs_ch_freq, + uint64_t *dfs_ch_flags, + uint16_t *dfs_ch_flagext, + uint8_t *dfs_ch_ieee, + uint8_t *dfs_ch_vhtop_ch_freq_seg1, + uint8_t *dfs_ch_vhtop_ch_freq_seg2) +{ + if (global_dfs_to_mlme.mlme_get_extchan != NULL) + return global_dfs_to_mlme.mlme_get_extchan(pdev, + dfs_ch_freq, + dfs_ch_flags, + dfs_ch_flagext, + dfs_ch_ieee, + dfs_ch_vhtop_ch_freq_seg1, + dfs_ch_vhtop_ch_freq_seg2); + + return QDF_STATUS_E_FAILURE; +} + +void dfs_mlme_set_no_chans_available(struct wlan_objmgr_pdev *pdev, + int val) +{ + if (global_dfs_to_mlme.mlme_set_no_chans_available != NULL) + global_dfs_to_mlme.mlme_set_no_chans_available( + pdev, + val); +} + +int dfs_mlme_ieee2mhz(struct wlan_objmgr_pdev *pdev, int ieee, uint64_t flag) +{ + int freq = 0; + + if (global_dfs_to_mlme.mlme_ieee2mhz != NULL) + global_dfs_to_mlme.mlme_ieee2mhz(pdev, + ieee, + flag, + &freq); + + return freq; +} + +void dfs_mlme_find_dot11_channel(struct wlan_objmgr_pdev *pdev, + uint8_t ieee, + uint8_t des_cfreq2, + int mode, + uint16_t *dfs_ch_freq, + uint64_t *dfs_ch_flags, + uint16_t *dfs_ch_flagext, + uint8_t *dfs_ch_ieee, + uint8_t *dfs_ch_vhtop_ch_freq_seg1, + uint8_t *dfs_ch_vhtop_ch_freq_seg2) +{ + if (global_dfs_to_mlme.mlme_find_dot11_channel != NULL) + global_dfs_to_mlme.mlme_find_dot11_channel(pdev, + ieee, + des_cfreq2, + mode, + dfs_ch_freq, + dfs_ch_flags, + dfs_ch_flagext, + dfs_ch_ieee, + dfs_ch_vhtop_ch_freq_seg1, + dfs_ch_vhtop_ch_freq_seg2); +} + +void dfs_mlme_get_dfs_ch_channels(struct wlan_objmgr_pdev *pdev, + uint16_t *dfs_ch_freq, + uint64_t *dfs_ch_flags, + uint16_t *dfs_ch_flagext, + uint8_t *dfs_ch_ieee, + uint8_t *dfs_ch_vhtop_ch_freq_seg1, + uint8_t *dfs_ch_vhtop_ch_freq_seg2, + int index) +{ + if (global_dfs_to_mlme.mlme_get_dfs_ch_channels != NULL) + global_dfs_to_mlme.mlme_get_dfs_ch_channels(pdev, + dfs_ch_freq, + dfs_ch_flags, + dfs_ch_flagext, + dfs_ch_ieee, + dfs_ch_vhtop_ch_freq_seg1, + dfs_ch_vhtop_ch_freq_seg2, + index); +} + +uint32_t dfs_mlme_dfs_ch_flags_ext(struct wlan_objmgr_pdev *pdev) +{ + uint16_t flag_ext = 0; + + if (global_dfs_to_mlme.mlme_dfs_ch_flags_ext != NULL) + global_dfs_to_mlme.mlme_dfs_ch_flags_ext(pdev, + &flag_ext); + + return flag_ext; +} + +void dfs_mlme_channel_change_by_precac(struct wlan_objmgr_pdev *pdev) +{ + if (global_dfs_to_mlme.mlme_channel_change_by_precac != NULL) + global_dfs_to_mlme.mlme_channel_change_by_precac( + pdev); +} + +void dfs_mlme_nol_timeout_notification(struct wlan_objmgr_pdev *pdev) +{ + if (global_dfs_to_mlme.mlme_nol_timeout_notification != NULL) + global_dfs_to_mlme.mlme_nol_timeout_notification( + pdev); +} + +void dfs_mlme_clist_update(struct wlan_objmgr_pdev *pdev, + void *nollist, + int nentries) +{ + if (global_dfs_to_mlme.mlme_clist_update != NULL) + global_dfs_to_mlme.mlme_clist_update(pdev, + nollist, + nentries); +} + +int dfs_mlme_get_cac_timeout(struct wlan_objmgr_pdev *pdev, + uint16_t dfs_ch_freq, + uint8_t dfs_ch_vhtop_ch_freq_seg2, + uint64_t dfs_ch_flags) +{ + int cac_timeout = 0; + + if (global_dfs_to_mlme.mlme_get_cac_timeout != NULL) + global_dfs_to_mlme.mlme_get_cac_timeout(pdev, + dfs_ch_freq, + dfs_ch_vhtop_ch_freq_seg2, + dfs_ch_flags, + &cac_timeout); + + return cac_timeout; +} + +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) && defined(HOST_DFS_SPOOF_TEST) +int dfs_mlme_rebuild_chan_list_with_non_dfs_channels( + struct wlan_objmgr_pdev *pdev) +{ + if (!global_dfs_to_mlme.mlme_rebuild_chan_list_with_non_dfs_channels) + return 1; + + return global_dfs_to_mlme.mlme_rebuild_chan_list_with_non_dfs_channels( + pdev); +} + +void dfs_mlme_restart_vaps_with_non_dfs_chan(struct wlan_objmgr_pdev *pdev, + int no_chans_avail) +{ + if (!global_dfs_to_mlme.mlme_restart_vaps_with_non_dfs_chan) + return; + + global_dfs_to_mlme.mlme_restart_vaps_with_non_dfs_chan(pdev, + no_chans_avail); +} +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/dfs/dispatcher/src/wlan_dfs_tgt_api.c b/drivers/staging/qca-wifi-host-cmn/umac/dfs/dispatcher/src/wlan_dfs_tgt_api.c new file mode 100644 index 0000000000000000000000000000000000000000..673cb1b0ddf4931808ac6f8ee75f685619e98938 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/dfs/dispatcher/src/wlan_dfs_tgt_api.c @@ -0,0 +1,538 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: This file has the DFS dispatcher API implementation which is exposed + * to outside of DFS component. + */ +#include +#include "wlan_dfs_tgt_api.h" +#include "wlan_lmac_if_def.h" +#include "wlan_lmac_if_api.h" +#include "wlan_dfs_mlme_api.h" +#include "../../core/src/dfs.h" +#include "../../core/src/dfs_zero_cac.h" +#include "../../core/src/dfs_process_radar_found_ind.h" +#include +#include "../../core/src/dfs_partial_offload_radar.h" + +struct wlan_lmac_if_dfs_tx_ops * +wlan_psoc_get_dfs_txops(struct wlan_objmgr_psoc *psoc) +{ + return &((psoc->soc_cb.tx_ops.dfs_tx_ops)); +} + +QDF_STATUS tgt_dfs_set_current_channel(struct wlan_objmgr_pdev *pdev, + uint16_t dfs_ch_freq, + uint64_t dfs_ch_flags, + uint16_t dfs_ch_flagext, + uint8_t dfs_ch_ieee, + uint8_t dfs_ch_vhtop_ch_freq_seg1, + uint8_t dfs_ch_vhtop_ch_freq_seg2) +{ + struct wlan_dfs *dfs; + + dfs = global_dfs_to_mlme.pdev_get_comp_private_obj(pdev); + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs is NULL"); + return QDF_STATUS_E_FAILURE; + } + + dfs_set_current_channel(dfs, + dfs_ch_freq, + dfs_ch_flags, + dfs_ch_flagext, + dfs_ch_ieee, + dfs_ch_vhtop_ch_freq_seg1, + dfs_ch_vhtop_ch_freq_seg2); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(tgt_dfs_set_current_channel); + +QDF_STATUS tgt_dfs_radar_enable(struct wlan_objmgr_pdev *pdev, + int no_cac, uint32_t opmode) +{ + struct wlan_dfs *dfs; + struct wlan_lmac_if_dfs_tx_ops *dfs_tx_ops; + struct wlan_objmgr_psoc *psoc; + QDF_STATUS status; + + dfs = global_dfs_to_mlme.pdev_get_comp_private_obj(pdev); + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs is NULL"); + return QDF_STATUS_E_FAILURE; + } + + if (!dfs->dfs_is_offload_enabled) { + dfs_radar_enable(dfs, no_cac, opmode); + return QDF_STATUS_SUCCESS; + } + + psoc = wlan_pdev_get_psoc(pdev); + if (!psoc) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "psoc is null"); + return QDF_STATUS_E_FAILURE; + } + + dfs_tx_ops = wlan_psoc_get_dfs_txops(psoc); + if (!dfs_tx_ops) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs_tx_ops is null"); + return QDF_STATUS_E_FAILURE; + } + + status = dfs_tx_ops->dfs_send_offload_enable_cmd(pdev, true); + if (QDF_IS_STATUS_ERROR(status)) + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, + "Failed to enable dfs offload, pdev_id: %d", + wlan_objmgr_pdev_get_pdev_id(pdev)); + + return status; +} +qdf_export_symbol(tgt_dfs_radar_enable); + +void tgt_dfs_is_radar_enabled(struct wlan_objmgr_pdev *pdev, int *ignore_dfs) +{ + struct wlan_dfs *dfs; + + dfs = global_dfs_to_mlme.pdev_get_comp_private_obj(pdev); + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs is NULL"); + return; + } + + dfs_is_radar_enabled(dfs, ignore_dfs); +} + +qdf_export_symbol(tgt_dfs_is_radar_enabled); + +QDF_STATUS tgt_dfs_process_phyerr(struct wlan_objmgr_pdev *pdev, + void *buf, + uint16_t datalen, + uint8_t r_rssi, + uint8_t r_ext_rssi, + uint32_t r_rs_tstamp, + uint64_t r_fulltsf) +{ + struct wlan_dfs *dfs; + + dfs = global_dfs_to_mlme.pdev_get_comp_private_obj(pdev); + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs is NULL"); + return QDF_STATUS_E_FAILURE; + } + + if (!dfs->dfs_is_offload_enabled) + dfs_process_phyerr(dfs, buf, datalen, r_rssi, + r_ext_rssi, r_rs_tstamp, r_fulltsf); + else + dfs_info(dfs, WLAN_DEBUG_DFS_ALWAYS, + "Unexpect phyerror as DFS is offloaded, pdev_id: %d", + wlan_objmgr_pdev_get_pdev_id(pdev)); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(tgt_dfs_process_phyerr); + +#ifdef QCA_MCL_DFS_SUPPORT +QDF_STATUS tgt_dfs_process_phyerr_filter_offload(struct wlan_objmgr_pdev *pdev, + struct radar_event_info + *wlan_radar_event) +{ + struct wlan_dfs *dfs; + + dfs = global_dfs_to_mlme.pdev_get_comp_private_obj(pdev); + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs is NULL"); + return QDF_STATUS_E_FAILURE; + } + if (!dfs->dfs_is_offload_enabled) + dfs_process_phyerr_filter_offload(dfs, wlan_radar_event); + else + dfs_info(dfs, WLAN_DEBUG_DFS_ALWAYS, + "Unexpect phyerror as DFS is offloaded, pdev_id: %d", + wlan_objmgr_pdev_get_pdev_id(pdev)); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(tgt_dfs_process_phyerr_filter_offload); + +QDF_STATUS tgt_dfs_is_phyerr_filter_offload(struct wlan_objmgr_psoc *psoc, + bool *is_phyerr_filter_offload) +{ + struct dfs_soc_priv_obj *soc_obj; + + if (!psoc) { + dfs_err(NULL, WLAN_DEBUG_DFS_ALWAYS, "psoc is null"); + return QDF_STATUS_E_FAILURE; + } + + soc_obj = wlan_objmgr_psoc_get_comp_private_obj(psoc, + WLAN_UMAC_COMP_DFS); + if (!soc_obj) { + dfs_err(NULL, WLAN_DEBUG_DFS_ALWAYS, + "Failed to get dfs psoc component"); + return QDF_STATUS_E_FAILURE; + } + + *is_phyerr_filter_offload = soc_obj->dfs_is_phyerr_filter_offload; + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(tgt_dfs_is_phyerr_filter_offload); +#else +QDF_STATUS tgt_dfs_process_phyerr_filter_offload(struct wlan_objmgr_pdev *pdev, + struct radar_event_info + *wlan_radar_event) +{ + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS tgt_dfs_is_phyerr_filter_offload(struct wlan_objmgr_psoc *psoc, + bool *is_phyerr_filter_offload) +{ + return QDF_STATUS_SUCCESS; +} +#endif + +QDF_STATUS tgt_dfs_is_precac_timer_running(struct wlan_objmgr_pdev *pdev, + bool *is_precac_timer_running) +{ + struct wlan_dfs *dfs; + + dfs = global_dfs_to_mlme.pdev_get_comp_private_obj(pdev); + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs is NULL"); + return QDF_STATUS_E_FAILURE; + } + + *is_precac_timer_running = dfs_is_precac_timer_running(dfs); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(tgt_dfs_is_precac_timer_running); + +QDF_STATUS tgt_dfs_get_radars(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_dfs *dfs; + + dfs = global_dfs_to_mlme.pdev_get_comp_private_obj(pdev); + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs is NULL"); + return QDF_STATUS_E_FAILURE; + } + + if (!dfs->dfs_is_offload_enabled) + dfs_get_radars(dfs); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(tgt_dfs_get_radars); + +QDF_STATUS tgt_dfs_destroy_object(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_dfs *dfs; + + dfs = global_dfs_to_mlme.pdev_get_comp_private_obj(pdev); + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs is NULL"); + return QDF_STATUS_E_FAILURE; + } + + dfs_destroy_object(dfs); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(tgt_dfs_destroy_object); + +#ifdef QCA_MCL_DFS_SUPPORT +QDF_STATUS tgt_dfs_set_tx_leakage_threshold(struct wlan_objmgr_pdev *pdev, + uint16_t tx_leakage_threshold) +{ + struct wlan_dfs *dfs; + + dfs = global_dfs_to_mlme.pdev_get_comp_private_obj(pdev); + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs is NULL"); + return QDF_STATUS_E_FAILURE; + } + + dfs->tx_leakage_threshold = tx_leakage_threshold; + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(tgt_dfs_set_tx_leakage_threshold); +#endif + +QDF_STATUS tgt_dfs_control(struct wlan_objmgr_pdev *pdev, + u_int id, + void *indata, + uint32_t insize, + void *outdata, + uint32_t *outsize, + int *error) +{ + struct wlan_dfs *dfs; + + dfs = global_dfs_to_mlme.pdev_get_comp_private_obj(pdev); + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs is NULL"); + return QDF_STATUS_E_FAILURE; + } + + *error = dfs_control(dfs, id, indata, insize, outdata, outsize); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(tgt_dfs_control); + +QDF_STATUS tgt_dfs_find_vht80_chan_for_precac(struct wlan_objmgr_pdev *pdev, + uint32_t chan_mode, + uint8_t ch_freq_seg1, + uint32_t *cfreq1, + uint32_t *cfreq2, + uint32_t *phy_mode, + bool *dfs_set_cfreq2, + bool *set_agile) +{ + struct wlan_dfs *dfs; + + dfs = global_dfs_to_mlme.pdev_get_comp_private_obj(pdev); + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs is NULL"); + return QDF_STATUS_E_FAILURE; + } + + dfs_find_vht80_chan_for_precac(dfs, + chan_mode, + ch_freq_seg1, + cfreq1, + cfreq2, + phy_mode, + dfs_set_cfreq2, + set_agile); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(tgt_dfs_find_vht80_chan_for_precac); + +QDF_STATUS tgt_dfs_process_radar_ind(struct wlan_objmgr_pdev *pdev, + struct radar_found_info *radar_found) +{ + struct wlan_dfs *dfs; + + if (!pdev) { + dfs_err(NULL, WLAN_DEBUG_DFS_ALWAYS, "null pdev"); + return QDF_STATUS_E_FAILURE; + } + + dfs = global_dfs_to_mlme.pdev_get_comp_private_obj(pdev); + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs is null"); + return QDF_STATUS_E_FAILURE; + } + + return dfs_process_radar_ind(dfs, radar_found); +} +qdf_export_symbol(tgt_dfs_process_radar_ind); + +#ifndef QCA_MCL_DFS_SUPPORT +QDF_STATUS tgt_dfs_cac_complete(struct wlan_objmgr_pdev *pdev, uint32_t vdev_id) +{ + return QDF_STATUS_SUCCESS; +} +#else +QDF_STATUS tgt_dfs_cac_complete(struct wlan_objmgr_pdev *pdev, uint32_t vdev_id) +{ + dfs_mlme_proc_cac(pdev, vdev_id); + + return QDF_STATUS_SUCCESS; +} +#endif +qdf_export_symbol(tgt_dfs_cac_complete); + +QDF_STATUS tgt_dfs_reg_ev_handler(struct wlan_objmgr_psoc *psoc) +{ + struct wlan_lmac_if_dfs_tx_ops *dfs_tx_ops; + + if (!psoc) { + dfs_err(NULL, WLAN_DEBUG_DFS_ALWAYS, "null psoc"); + return QDF_STATUS_E_FAILURE; + } + + dfs_tx_ops = wlan_psoc_get_dfs_txops(psoc); + if (!dfs_tx_ops) { + dfs_err(NULL, WLAN_DEBUG_DFS_ALWAYS, "null dfs_tx_ops"); + return QDF_STATUS_E_FAILURE; + } + + if (dfs_tx_ops->dfs_reg_ev_handler) + return dfs_tx_ops->dfs_reg_ev_handler(psoc); + + return QDF_STATUS_E_FAILURE; +} +qdf_export_symbol(tgt_dfs_reg_ev_handler); + +QDF_STATUS tgt_dfs_stop(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_dfs *dfs; + + dfs = global_dfs_to_mlme.pdev_get_comp_private_obj(pdev); + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs is NULL"); + return QDF_STATUS_E_FAILURE; + } + + dfs_stop(dfs); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(tgt_dfs_stop); + +QDF_STATUS tgt_dfs_process_emulate_bang_radar_cmd(struct wlan_objmgr_pdev *pdev, + struct dfs_emulate_bang_radar_test_cmd *dfs_unit_test) +{ + struct wlan_objmgr_psoc *psoc; + struct wlan_lmac_if_dfs_tx_ops *dfs_tx_ops; + + psoc = wlan_pdev_get_psoc(pdev); + if (!psoc) { + dfs_err(NULL, WLAN_DEBUG_DFS_ALWAYS, "psoc is null"); + return QDF_STATUS_E_FAILURE; + } + + dfs_tx_ops = wlan_psoc_get_dfs_txops(psoc); + if (dfs_tx_ops && dfs_tx_ops->dfs_process_emulate_bang_radar_cmd) + return dfs_tx_ops->dfs_process_emulate_bang_radar_cmd(pdev, + dfs_unit_test); + else + dfs_err(NULL, WLAN_DEBUG_DFS_ALWAYS, + "dfs_tx_ops=%pK", dfs_tx_ops); + + return QDF_STATUS_E_FAILURE; +} +qdf_export_symbol(tgt_dfs_process_emulate_bang_radar_cmd); + +#ifdef QCA_MCL_DFS_SUPPORT +QDF_STATUS tgt_dfs_set_phyerr_filter_offload(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_objmgr_psoc *psoc; + struct wlan_lmac_if_dfs_tx_ops *dfs_tx_ops; + struct dfs_soc_priv_obj *soc_obj; + + psoc = wlan_pdev_get_psoc(pdev); + if (!psoc) { + dfs_err(NULL, WLAN_DEBUG_DFS_ALWAYS, "psoc is null"); + return QDF_STATUS_E_FAILURE; + } + + soc_obj = wlan_objmgr_psoc_get_comp_private_obj(psoc, + WLAN_UMAC_COMP_DFS); + if (!soc_obj) { + dfs_err(NULL, WLAN_DEBUG_DFS_ALWAYS, + "Failed to get dfs psoc component"); + return QDF_STATUS_E_FAILURE; + } + dfs_tx_ops = wlan_psoc_get_dfs_txops(psoc); + if (dfs_tx_ops && dfs_tx_ops->dfs_set_phyerr_filter_offload) + return dfs_tx_ops->dfs_set_phyerr_filter_offload(pdev, + soc_obj->dfs_is_phyerr_filter_offload); + else + dfs_err(NULL, WLAN_DEBUG_DFS_ALWAYS, + "dfs_tx_ops=%pK", dfs_tx_ops); + + return QDF_STATUS_E_FAILURE; +} +qdf_export_symbol(tgt_dfs_set_phyerr_filter_offload); +#endif + +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) && defined(HOST_DFS_SPOOF_TEST) +QDF_STATUS +tgt_dfs_send_avg_params_to_fw(struct wlan_objmgr_pdev *pdev, + struct dfs_radar_found_params *params) +{ + struct wlan_objmgr_psoc *psoc; + struct wlan_lmac_if_dfs_tx_ops *dfs_tx_ops; + struct wlan_dfs *dfs; + QDF_STATUS status = QDF_STATUS_E_FAILURE; + + dfs = global_dfs_to_mlme.pdev_get_comp_private_obj(pdev); + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs is NULL"); + return status; + } + + psoc = wlan_pdev_get_psoc(pdev); + if (!psoc) { + dfs_err(NULL, WLAN_DEBUG_DFS_ALWAYS, "psoc is null"); + return status; + } + + dfs_tx_ops = wlan_psoc_get_dfs_txops(psoc); + if (dfs_tx_ops && dfs_tx_ops->dfs_send_avg_radar_params_to_fw) + status = dfs_tx_ops->dfs_send_avg_radar_params_to_fw(pdev, + params); + + if (QDF_IS_STATUS_SUCCESS(status)) { + dfs->dfs_average_params_sent = 1; + dfs_info(dfs, WLAN_DEBUG_DFS_ALWAYS, + "Average radar parameters sent %d", + dfs->dfs_average_params_sent); + } + + return status; +} + +qdf_export_symbol(tgt_dfs_send_avg_params_to_fw); + +QDF_STATUS tgt_dfs_action_on_status_from_fw(struct wlan_objmgr_pdev *pdev, + uint32_t *status) +{ + struct wlan_dfs *dfs; + + dfs = global_dfs_to_mlme.pdev_get_comp_private_obj(pdev); + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs is NULL"); + return QDF_STATUS_E_FAILURE; + } + + dfs_action_on_fw_radar_status_check(dfs, status); + + return QDF_STATUS_SUCCESS; +} + +qdf_export_symbol(tgt_dfs_action_on_status_from_fw); + +QDF_STATUS tgt_dfs_reset_spoof_test(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_dfs *dfs; + + dfs = global_dfs_to_mlme.pdev_get_comp_private_obj(pdev); + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs is NULL"); + return QDF_STATUS_E_FAILURE; + } + + dfs_reset_spoof_test(dfs); + + return QDF_STATUS_SUCCESS; +} + +qdf_export_symbol(tgt_dfs_reset_spoof_test); +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/dfs/dispatcher/src/wlan_dfs_ucfg_api.c b/drivers/staging/qca-wifi-host-cmn/umac/dfs/dispatcher/src/wlan_dfs_ucfg_api.c new file mode 100644 index 0000000000000000000000000000000000000000..6e4ac9bf7f75372ed44ca62a2e0b34bbaab0fe44 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/dfs/dispatcher/src/wlan_dfs_ucfg_api.c @@ -0,0 +1,221 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: This file has the DFS dispatcher API implementation which is exposed + * to outside of DFS component. + */ + +#include "wlan_dfs_ucfg_api.h" +#include "../../core/src/dfs.h" +#include "../../core/src/dfs_zero_cac.h" +#include "../../core/src/dfs_partial_offload_radar.h" +#include + +QDF_STATUS ucfg_dfs_is_ap_cac_timer_running(struct wlan_objmgr_pdev *pdev, + int *is_ap_cac_timer_running) +{ + struct wlan_dfs *dfs; + + dfs = global_dfs_to_mlme.pdev_get_comp_private_obj(pdev); + if (!dfs) + return QDF_STATUS_E_FAILURE; + + *is_ap_cac_timer_running = dfs_is_ap_cac_timer_running(dfs); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(ucfg_dfs_is_ap_cac_timer_running); + +QDF_STATUS ucfg_dfs_getnol(struct wlan_objmgr_pdev *pdev, + void *dfs_nolinfo) +{ + struct wlan_dfs *dfs; + + dfs = global_dfs_to_mlme.pdev_get_comp_private_obj(pdev); + if (!dfs) + return QDF_STATUS_E_FAILURE; + + dfs_getnol(dfs, dfs_nolinfo); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(ucfg_dfs_getnol); + +QDF_STATUS ucfg_dfs_override_cac_timeout(struct wlan_objmgr_pdev *pdev, + int cac_timeout, + int *status) +{ + struct wlan_dfs *dfs; + + dfs = global_dfs_to_mlme.pdev_get_comp_private_obj(pdev); + if (!dfs) + return QDF_STATUS_E_FAILURE; + + *status = dfs_override_cac_timeout(dfs, cac_timeout); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(ucfg_dfs_override_cac_timeout); + +QDF_STATUS ucfg_dfs_get_override_cac_timeout(struct wlan_objmgr_pdev *pdev, + int *cac_timeout, + int *status) +{ + struct wlan_dfs *dfs; + + dfs = global_dfs_to_mlme.pdev_get_comp_private_obj(pdev); + if (!dfs) + return QDF_STATUS_E_FAILURE; + + *status = dfs_get_override_cac_timeout(dfs, cac_timeout); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(ucfg_dfs_get_override_cac_timeout); + +QDF_STATUS ucfg_dfs_get_override_precac_timeout(struct wlan_objmgr_pdev *pdev, + int *precac_timeout) +{ + struct wlan_dfs *dfs; + + dfs = global_dfs_to_mlme.pdev_get_comp_private_obj(pdev); + if (!dfs) + return QDF_STATUS_E_FAILURE; + + dfs_get_override_precac_timeout(dfs, precac_timeout); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(ucfg_dfs_get_override_precac_timeout); + +QDF_STATUS ucfg_dfs_override_precac_timeout(struct wlan_objmgr_pdev *pdev, + int precac_timeout) +{ + struct wlan_dfs *dfs; + + dfs = global_dfs_to_mlme.pdev_get_comp_private_obj(pdev); + if (!dfs) + return QDF_STATUS_E_FAILURE; + + dfs_override_precac_timeout(dfs, precac_timeout); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(ucfg_dfs_override_precac_timeout); + +QDF_STATUS ucfg_dfs_set_precac_enable(struct wlan_objmgr_pdev *pdev, + uint32_t value) +{ + struct wlan_dfs *dfs; + + dfs = global_dfs_to_mlme.pdev_get_comp_private_obj(pdev); + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "null dfs"); + return QDF_STATUS_E_FAILURE; + } + + dfs_set_precac_enable(dfs, value); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(ucfg_dfs_set_precac_enable); + +QDF_STATUS ucfg_dfs_get_precac_enable(struct wlan_objmgr_pdev *pdev, + int *buff) +{ + struct wlan_dfs *dfs; + + dfs = global_dfs_to_mlme.pdev_get_comp_private_obj(pdev); + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "null dfs"); + return QDF_STATUS_E_FAILURE; + } + + *buff = dfs_get_precac_enable(dfs); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(ucfg_dfs_get_precac_enable); + +#ifdef QCA_MCL_DFS_SUPPORT +QDF_STATUS ucfg_dfs_update_config(struct wlan_objmgr_psoc *psoc, + struct dfs_user_config *req) +{ + struct dfs_soc_priv_obj *soc_obj; + + if (!psoc || !req) { + dfs_err(NULL, WLAN_DEBUG_DFS_ALWAYS, + "psoc: 0x%pK, req: 0x%pK", psoc, req); + return QDF_STATUS_E_FAILURE; + } + + soc_obj = wlan_objmgr_psoc_get_comp_private_obj(psoc, + WLAN_UMAC_COMP_DFS); + if (!soc_obj) { + dfs_err(NULL, WLAN_DEBUG_DFS_ALWAYS, + "Failed to get dfs psoc component"); + return QDF_STATUS_E_FAILURE; + } + + soc_obj->dfs_is_phyerr_filter_offload = + req->dfs_is_phyerr_filter_offload; + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(ucfg_dfs_update_config); +#endif + +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) && defined(HOST_DFS_SPOOF_TEST) +QDF_STATUS ucfg_dfs_set_override_status_timeout(struct wlan_objmgr_pdev *pdev, + int status_timeout) +{ + struct wlan_dfs *dfs; + + dfs = global_dfs_to_mlme.pdev_get_comp_private_obj(pdev); + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "null dfs"); + return QDF_STATUS_E_FAILURE; + } + + dfs_set_override_status_timeout(dfs, status_timeout); + + return QDF_STATUS_SUCCESS; +} + +qdf_export_symbol(ucfg_dfs_set_override_status_timeout); + +QDF_STATUS ucfg_dfs_get_override_status_timeout(struct wlan_objmgr_pdev *pdev, + int *status_timeout) +{ + struct wlan_dfs *dfs; + + dfs = global_dfs_to_mlme.pdev_get_comp_private_obj(pdev); + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "null dfs"); + return QDF_STATUS_E_FAILURE; + } + + dfs_get_override_status_timeout(dfs, status_timeout); + + return QDF_STATUS_SUCCESS; +} + +qdf_export_symbol(ucfg_dfs_get_override_status_timeout); +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/dfs/dispatcher/src/wlan_dfs_utils_api.c b/drivers/staging/qca-wifi-host-cmn/umac/dfs/dispatcher/src/wlan_dfs_utils_api.c new file mode 100644 index 0000000000000000000000000000000000000000..d94b5cf2f7697180a142860dfebfa6726e1081e4 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/dfs/dispatcher/src/wlan_dfs_utils_api.c @@ -0,0 +1,915 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: This file has the DFS dispatcher API implementation which is exposed + * to outside of DFS component. + */ +#include "wlan_dfs_utils_api.h" +#include "../../core/src/dfs.h" +#include "../../core/src/dfs_zero_cac.h" +#include +#include "../../core/src/dfs_random_chan_sel.h" +#ifdef QCA_DFS_USE_POLICY_MANAGER +#include "wlan_policy_mgr_api.h" +#endif +#ifdef QCA_DFS_NOL_PLATFORM_DRV_SUPPORT +#include +#endif +#include + +struct dfs_nol_info { + uint16_t num_chans; + struct dfsreq_nolelem dfs_nol[DFS_MAX_NOL_CHANNEL]; +}; + +QDF_STATUS utils_dfs_reset(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_dfs *dfs; + + dfs = global_dfs_to_mlme.pdev_get_comp_private_obj(pdev); + if (!dfs) + return QDF_STATUS_E_FAILURE; + + dfs_reset(dfs); + dfs_nol_update(dfs); + dfs_reset_precaclists(dfs); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS utils_dfs_cac_valid_reset(struct wlan_objmgr_pdev *pdev, + uint8_t prevchan_ieee, + uint32_t prevchan_flags) +{ + struct wlan_dfs *dfs; + + dfs = global_dfs_to_mlme.pdev_get_comp_private_obj(pdev); + if (!dfs) + return QDF_STATUS_E_FAILURE; + + dfs_cac_valid_reset(dfs, prevchan_ieee, prevchan_flags); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(utils_dfs_cac_valid_reset); + +QDF_STATUS utils_dfs_reset_precaclists(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_dfs *dfs; + + dfs = global_dfs_to_mlme.pdev_get_comp_private_obj(pdev); + if (!dfs) + return QDF_STATUS_E_FAILURE; + + dfs_reset_precaclists(dfs); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(utils_dfs_reset_precaclists); + +QDF_STATUS utils_dfs_cancel_precac_timer(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_dfs *dfs; + + dfs = global_dfs_to_mlme.pdev_get_comp_private_obj(pdev); + if (!dfs) + return QDF_STATUS_E_FAILURE; + + dfs_cancel_precac_timer(dfs); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(utils_dfs_cancel_precac_timer); + +QDF_STATUS utils_dfs_is_precac_done(struct wlan_objmgr_pdev *pdev, + bool *is_precac_done) +{ + struct wlan_dfs *dfs; + + dfs = global_dfs_to_mlme.pdev_get_comp_private_obj(pdev); + if (!dfs) + return QDF_STATUS_E_FAILURE; + + *is_precac_done = dfs_is_precac_done(dfs); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(utils_dfs_is_precac_done); + +QDF_STATUS utils_dfs_cancel_cac_timer(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_dfs *dfs; + + dfs = global_dfs_to_mlme.pdev_get_comp_private_obj(pdev); + if (!dfs) + return QDF_STATUS_E_FAILURE; + + dfs_cancel_cac_timer(dfs); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(utils_dfs_cancel_cac_timer); + +QDF_STATUS utils_dfs_start_cac_timer(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_dfs *dfs; + + dfs = global_dfs_to_mlme.pdev_get_comp_private_obj(pdev); + if (!dfs) + return QDF_STATUS_E_FAILURE; + + dfs_start_cac_timer(dfs); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(utils_dfs_start_cac_timer); + +QDF_STATUS utils_dfs_cac_stop(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_dfs *dfs; + + dfs = global_dfs_to_mlme.pdev_get_comp_private_obj(pdev); + if (!dfs) + return QDF_STATUS_E_FAILURE; + + dfs_cac_stop(dfs); + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(utils_dfs_cac_stop); + +QDF_STATUS utils_dfs_stacac_stop(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_dfs *dfs; + + dfs = global_dfs_to_mlme.pdev_get_comp_private_obj(pdev); + if (!dfs) + return QDF_STATUS_E_FAILURE; + + dfs_stacac_stop(dfs); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(utils_dfs_stacac_stop); + +QDF_STATUS utils_dfs_get_usenol(struct wlan_objmgr_pdev *pdev, uint16_t *usenol) +{ + struct wlan_dfs *dfs; + + dfs = global_dfs_to_mlme.pdev_get_comp_private_obj(pdev); + if (!dfs) + return QDF_STATUS_E_FAILURE; + + *usenol = dfs_get_use_nol(dfs); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(utils_dfs_get_usenol); + +QDF_STATUS utils_dfs_radar_disable(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_dfs *dfs; + + dfs = global_dfs_to_mlme.pdev_get_comp_private_obj(pdev); + if (!dfs) + return QDF_STATUS_E_FAILURE; + + dfs_radar_disable(dfs); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(utils_dfs_radar_disable); + +QDF_STATUS utils_dfs_set_update_nol_flag(struct wlan_objmgr_pdev *pdev, + bool val) +{ + struct wlan_dfs *dfs; + + dfs = global_dfs_to_mlme.pdev_get_comp_private_obj(pdev); + if (!dfs) + return QDF_STATUS_E_FAILURE; + + dfs_set_update_nol_flag(dfs, val); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(utils_dfs_set_update_nol_flag); + +QDF_STATUS utils_dfs_get_update_nol_flag(struct wlan_objmgr_pdev *pdev, + bool *nol_flag) +{ + struct wlan_dfs *dfs; + + dfs = global_dfs_to_mlme.pdev_get_comp_private_obj(pdev); + if (!dfs) + return QDF_STATUS_E_FAILURE; + + *nol_flag = dfs_get_update_nol_flag(dfs); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(utils_dfs_get_update_nol_flag); + +QDF_STATUS utils_dfs_get_dfs_use_nol(struct wlan_objmgr_pdev *pdev, + int *dfs_use_nol) +{ + struct wlan_dfs *dfs; + + dfs = global_dfs_to_mlme.pdev_get_comp_private_obj(pdev); + if (!dfs) + return QDF_STATUS_E_FAILURE; + + *dfs_use_nol = dfs_get_use_nol(dfs); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(utils_dfs_get_dfs_use_nol); + +QDF_STATUS utils_dfs_get_nol_timeout(struct wlan_objmgr_pdev *pdev, + int *dfs_nol_timeout) +{ + struct wlan_dfs *dfs; + + dfs = global_dfs_to_mlme.pdev_get_comp_private_obj(pdev); + if (!dfs) + return QDF_STATUS_E_FAILURE; + + *dfs_nol_timeout = dfs_get_nol_timeout(dfs); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(utils_dfs_get_nol_timeout); + +QDF_STATUS utils_dfs_nol_addchan(struct wlan_objmgr_pdev *pdev, + uint16_t freq, + uint32_t dfs_nol_timeout) +{ + struct wlan_dfs *dfs; + + dfs = global_dfs_to_mlme.pdev_get_comp_private_obj(pdev); + if (!dfs) + return QDF_STATUS_E_FAILURE; + + DFS_NOL_ADD_CHAN_LOCKED(dfs, freq, dfs_nol_timeout); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(utils_dfs_nol_addchan); + +QDF_STATUS utils_dfs_nol_update(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_dfs *dfs; + + dfs = global_dfs_to_mlme.pdev_get_comp_private_obj(pdev); + if (!dfs) + return QDF_STATUS_E_FAILURE; + + dfs_nol_update(dfs); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(utils_dfs_nol_update); + +QDF_STATUS utils_dfs_second_segment_radar_disable(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_dfs *dfs; + + dfs = global_dfs_to_mlme.pdev_get_comp_private_obj(pdev); + if (!dfs) + return QDF_STATUS_E_FAILURE; + + dfs_second_segment_radar_disable(dfs); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS utils_dfs_is_ignore_dfs(struct wlan_objmgr_pdev *pdev, + bool *ignore_dfs) +{ + struct wlan_dfs *dfs; + + dfs = global_dfs_to_mlme.pdev_get_comp_private_obj(pdev); + if (!dfs) + return QDF_STATUS_E_FAILURE; + + *ignore_dfs = dfs->dfs_ignore_dfs; + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(utils_dfs_is_ignore_dfs); + +QDF_STATUS utils_dfs_is_cac_valid(struct wlan_objmgr_pdev *pdev, + bool *is_cac_valid) +{ + struct wlan_dfs *dfs; + + dfs = global_dfs_to_mlme.pdev_get_comp_private_obj(pdev); + if (!dfs) + return QDF_STATUS_E_FAILURE; + + *is_cac_valid = dfs->dfs_cac_valid; + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(utils_dfs_is_cac_valid); + +QDF_STATUS utils_dfs_is_ignore_cac(struct wlan_objmgr_pdev *pdev, + bool *ignore_cac) +{ + struct wlan_dfs *dfs; + + dfs = global_dfs_to_mlme.pdev_get_comp_private_obj(pdev); + if (!dfs) + return QDF_STATUS_E_FAILURE; + + *ignore_cac = dfs->dfs_ignore_cac; + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(utils_dfs_is_ignore_cac); + +QDF_STATUS utils_dfs_set_cac_timer_running(struct wlan_objmgr_pdev *pdev, + int val) +{ + struct wlan_dfs *dfs; + + dfs = global_dfs_to_mlme.pdev_get_comp_private_obj(pdev); + if (!dfs) + return QDF_STATUS_E_FAILURE; + + dfs->dfs_cac_timer_running = val; + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(utils_dfs_set_cac_timer_running); + +QDF_STATUS utils_dfs_get_nol_chfreq_and_chwidth(struct wlan_objmgr_pdev *pdev, + void *nollist, + uint32_t *nol_chfreq, + uint32_t *nol_chwidth, + int index) +{ + struct wlan_dfs *dfs; + + dfs = global_dfs_to_mlme.pdev_get_comp_private_obj(pdev); + if (!dfs) + return QDF_STATUS_E_FAILURE; + + dfs_get_nol_chfreq_and_chwidth(nollist, nol_chfreq, nol_chwidth, index); + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(utils_dfs_get_nol_chfreq_and_chwidth); + +QDF_STATUS utils_dfs_update_cur_chan_flags(struct wlan_objmgr_pdev *pdev, + uint64_t flags, + uint16_t flagext) +{ + struct wlan_dfs *dfs; + + dfs = global_dfs_to_mlme.pdev_get_comp_private_obj(pdev); + if (!dfs) + return QDF_STATUS_E_FAILURE; + + dfs_update_cur_chan_flags(dfs, flags, flagext); + + return QDF_STATUS_SUCCESS; +} + +static void utils_dfs_get_max_phy_mode(struct wlan_objmgr_pdev *pdev, + uint32_t *phy_mode) +{ + return; +} + +static void utils_dfs_get_max_sup_width(struct wlan_objmgr_pdev *pdev, + uint8_t *ch_width) +{ + return; +} + +/** + * utils_dfs_get_chan_list() - Get channel list from regdb based on current + * operating channel. + * @pdev: Pointer to DFS pdev object. + * @chan_list: Pointer to current channel list + * @num_chan: number of channels in the current channel list. + */ +#ifndef QCA_DFS_USE_POLICY_MANAGER +static void utils_dfs_get_chan_list(struct wlan_objmgr_pdev *pdev, + struct dfs_channel *chan_list, uint32_t *num_chan) +{ + int i = 0, j = 0; + enum channel_state state; + struct regulatory_channel *cur_chan_list; + struct wlan_dfs *dfs; + + dfs = global_dfs_to_mlme.pdev_get_comp_private_obj(pdev); + if (!dfs) + return; + + cur_chan_list = qdf_mem_malloc(NUM_CHANNELS * + sizeof(struct regulatory_channel)); + if (!cur_chan_list) { + dfs_alert(dfs, WLAN_DEBUG_DFS_ALWAYS, "fail to alloc"); + *num_chan = 0; + return; + } + + if (wlan_reg_get_current_chan_list( + pdev, cur_chan_list) != QDF_STATUS_SUCCESS) { + *num_chan = 0; + dfs_alert(dfs, WLAN_DEBUG_DFS_ALWAYS, + "failed to get curr channel list"); + return; + } + + for (i = 0; i < NUM_CHANNELS; i++) { + state = cur_chan_list[i].state; + if (state == CHANNEL_STATE_DFS || + state == CHANNEL_STATE_ENABLE) { + chan_list[j].dfs_ch_ieee = cur_chan_list[i].chan_num; + chan_list[j].dfs_ch_freq = cur_chan_list[i].center_freq; + if (state == CHANNEL_STATE_DFS) + chan_list[j].dfs_ch_flagext = + WLAN_CHAN_DFS; + j++; + } + } + *num_chan = j; + qdf_mem_free(cur_chan_list); + + return; +} + +/** + * utils_dfs_get_channel_list() - Get channel list from regdb component based + * on current channel list. + * @pdev: Pointer to pdev structure. + * @chan_list: Pointer to regdb channel list. + * @num_chan: number of channels. + * + * Get regdb channel list based on dfs current channel. + * ex: When AP is operating in 5GHz channel, filter 2.4GHz and 4.9GHZ channels + * so that the random channel function does not select either 2.4GHz or 4.9GHz + * channel. + */ +static void utils_dfs_get_channel_list(struct wlan_objmgr_pdev *pdev, + struct dfs_channel *chan_list, uint32_t *num_chan) +{ + struct dfs_channel *tmp_chan_list = NULL; + struct wlan_dfs *dfs; + bool is_curchan_5g; + bool is_curchan_24g; + bool is_curchan_49g; + uint32_t chan_num; + uint32_t center_freq; + uint16_t flagext; + int i, j = 0; + + dfs = global_dfs_to_mlme.pdev_get_comp_private_obj(pdev); + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "null dfs"); + return; + } + + tmp_chan_list = qdf_mem_malloc(*num_chan * sizeof(*tmp_chan_list)); + if (!tmp_chan_list) { + dfs_alert(dfs, WLAN_DEBUG_DFS_ALWAYS, "mem alloc failed"); + return; + } + + utils_dfs_get_chan_list(pdev, tmp_chan_list, num_chan); + + chan_num = dfs->dfs_curchan->dfs_ch_ieee; + center_freq = dfs->dfs_curchan->dfs_ch_freq; + is_curchan_5g = WLAN_REG_IS_5GHZ_CH(chan_num); + is_curchan_24g = WLAN_REG_IS_24GHZ_CH(chan_num); + is_curchan_49g = WLAN_REG_IS_49GHZ_FREQ(center_freq); + + for (i = 0; i < *num_chan; i++) { + chan_num = tmp_chan_list[i].dfs_ch_ieee; + center_freq = tmp_chan_list[i].dfs_ch_freq; + flagext = tmp_chan_list[i].dfs_ch_flagext; + + if ((is_curchan_5g) && WLAN_REG_IS_5GHZ_CH(chan_num)) { + chan_list[j].dfs_ch_ieee = chan_num; + chan_list[j].dfs_ch_freq = center_freq; + chan_list[j].dfs_ch_flagext = flagext; + j++; + } else if ((is_curchan_24g) && + WLAN_REG_IS_24GHZ_CH(chan_num)) { + chan_list[j].dfs_ch_ieee = chan_num; + chan_list[j].dfs_ch_freq = center_freq; + j++; + } else if ((is_curchan_49g) && + WLAN_REG_IS_49GHZ_FREQ(center_freq)) { + chan_list[j].dfs_ch_ieee = chan_num; + chan_list[j].dfs_ch_freq = center_freq; + j++; + } + } + + *num_chan = j; + + qdf_mem_free(tmp_chan_list); +} + +#else + +static void utils_dfs_get_chan_list(struct wlan_objmgr_pdev *pdev, + struct dfs_channel *chan_list, uint32_t *num_chan) +{ + uint8_t pcl_ch[QDF_MAX_NUM_CHAN] = {0}; + uint8_t weight_list[QDF_MAX_NUM_CHAN] = {0}; + uint32_t len; + uint32_t weight_len; + int i; + struct wlan_objmgr_psoc *psoc; + uint32_t conn_count = 0; + + psoc = wlan_pdev_get_psoc(pdev); + if (!psoc) { + *num_chan = 0; + dfs_err(NULL, WLAN_DEBUG_DFS_ALWAYS, "null psoc"); + return; + } + + len = QDF_ARRAY_SIZE(pcl_ch); + weight_len = QDF_ARRAY_SIZE(weight_list); + conn_count = policy_mgr_mode_specific_connection_count( + psoc, PM_SAP_MODE, NULL); + if (0 == conn_count) + policy_mgr_get_pcl(psoc, PM_SAP_MODE, pcl_ch, + &len, weight_list, weight_len); + else + policy_mgr_get_pcl_for_existing_conn(psoc, PM_SAP_MODE, pcl_ch, + &len, weight_list, weight_len, true); + + if (*num_chan < len) { + dfs_err(NULL, WLAN_DEBUG_DFS_ALWAYS, + "Invalid len src=%d, dst=%d", + *num_chan, len); + *num_chan = 0; + return; + } + + for (i = 0; i < len; i++) { + chan_list[i].dfs_ch_ieee = pcl_ch[i]; + chan_list[i].dfs_ch_freq = + wlan_reg_chan_to_freq(pdev, pcl_ch[i]); + } + *num_chan = i; + dfs_info(NULL, WLAN_DEBUG_DFS_ALWAYS, "num channels %d", i); +} + +/** + * utils_dfs_get_channel_list() - Wrapper function to get channel list from + * regdb component. + * @pdev: Pointer to pdev structure. + * @chan_list: Pointer to regdb channel list. + * @num_chan: number of channels. + */ +static void utils_dfs_get_channel_list(struct wlan_objmgr_pdev *pdev, + struct dfs_channel *chan_list, uint32_t *num_chan) +{ + utils_dfs_get_chan_list(pdev, chan_list, num_chan); +} +#endif + +QDF_STATUS utils_dfs_get_random_channel( + struct wlan_objmgr_pdev *pdev, + uint16_t flags, + struct ch_params *ch_params, + uint32_t *hw_mode, + uint8_t *target_chan, + struct dfs_acs_info *acs_info) +{ + uint32_t dfs_reg; + uint32_t num_chan = NUM_CHANNELS; + struct wlan_dfs *dfs = NULL; + struct wlan_objmgr_psoc *psoc; + struct dfs_channel *chan_list = NULL; + struct dfs_channel cur_chan; + QDF_STATUS status = QDF_STATUS_E_FAILURE; + + *target_chan = 0; + psoc = wlan_pdev_get_psoc(pdev); + if (!psoc) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "null psoc"); + goto random_chan_error; + } + + dfs = global_dfs_to_mlme.pdev_get_comp_private_obj(pdev); + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "null dfs"); + goto random_chan_error; + } + + wlan_reg_get_dfs_region(pdev, &dfs_reg); + chan_list = qdf_mem_malloc(num_chan * sizeof(*chan_list)); + if (!chan_list) { + dfs_alert(dfs, WLAN_DEBUG_DFS_ALWAYS, "mem alloc failed"); + goto random_chan_error; + } + + utils_dfs_get_channel_list(pdev, chan_list, &num_chan); + if (!num_chan) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "zero channels"); + goto random_chan_error; + } + + cur_chan.dfs_ch_vhtop_ch_freq_seg1 = ch_params->center_freq_seg0; + cur_chan.dfs_ch_vhtop_ch_freq_seg2 = ch_params->center_freq_seg1; + + if (!ch_params->ch_width) + utils_dfs_get_max_sup_width(pdev, + (uint8_t *)&ch_params->ch_width); + + *target_chan = dfs_prepare_random_channel(dfs, chan_list, + num_chan, flags, (uint8_t *)&ch_params->ch_width, + &cur_chan, (uint8_t)dfs_reg, acs_info); + + ch_params->center_freq_seg0 = cur_chan.dfs_ch_vhtop_ch_freq_seg1; + ch_params->center_freq_seg1 = cur_chan.dfs_ch_vhtop_ch_freq_seg2; + dfs_info(dfs, WLAN_DEBUG_DFS_RANDOM_CHAN, + "input width=%d", ch_params->ch_width); + + if (*target_chan) { + wlan_reg_set_channel_params(pdev, + *target_chan, 0, ch_params); + utils_dfs_get_max_phy_mode(pdev, hw_mode); + status = QDF_STATUS_SUCCESS; + } + + dfs_info(dfs, WLAN_DEBUG_DFS_RANDOM_CHAN, + "ch=%d, seg0=%d, seg1=%d, width=%d", + *target_chan, ch_params->center_freq_seg0, + ch_params->center_freq_seg1, ch_params->ch_width); + +random_chan_error: + qdf_mem_free(chan_list); + + return status; +} +qdf_export_symbol(utils_dfs_get_random_channel); + +#ifdef QCA_DFS_NOL_PLATFORM_DRV_SUPPORT +void utils_dfs_init_nol(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_dfs *dfs; + struct wlan_objmgr_psoc *psoc; + qdf_device_t qdf_dev; + struct dfs_nol_info *dfs_nolinfo; + int len; + + dfs = global_dfs_to_mlme.pdev_get_comp_private_obj(pdev); + psoc = wlan_pdev_get_psoc(pdev); + if (!dfs || !psoc) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, + "dfs %pK, psoc %pK", dfs, psoc); + return; + } + + qdf_dev = psoc->soc_objmgr.qdf_dev; + if (!qdf_dev->dev) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "null device"); + return; + } + + dfs_nolinfo = qdf_mem_malloc(sizeof(*dfs_nolinfo)); + if (!dfs_nolinfo) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs_nolinfo alloc fail"); + return; + } + + qdf_mem_zero(dfs_nolinfo, sizeof(*dfs_nolinfo)); + len = pld_wlan_get_dfs_nol(qdf_dev->dev, (void *)dfs_nolinfo, + (uint16_t)sizeof(*dfs_nolinfo)); + if (len > 0) { + dfs_set_nol(dfs, dfs_nolinfo->dfs_nol, dfs_nolinfo->num_chans); + dfs_info(dfs, WLAN_DEBUG_DFS_ALWAYS, "nol channels in pld"); + DFS_PRINT_NOL_LOCKED(dfs); + } else { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "no nol in pld"); + } + qdf_mem_free(dfs_nolinfo); +} +#endif +qdf_export_symbol(utils_dfs_init_nol); + +#ifndef QCA_DFS_NOL_PLATFORM_DRV_SUPPORT +void utils_dfs_save_nol(struct wlan_objmgr_pdev *pdev) +{ +} +#else +void utils_dfs_save_nol(struct wlan_objmgr_pdev *pdev) +{ + struct dfs_nol_info *dfs_nolinfo; + struct wlan_dfs *dfs = NULL; + struct wlan_objmgr_psoc *psoc; + qdf_device_t qdf_dev; + int num_chans = 0; + + dfs = global_dfs_to_mlme.pdev_get_comp_private_obj(pdev); + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "null dfs"); + return; + } + + psoc = wlan_pdev_get_psoc(pdev); + if (!psoc) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "null psoc"); + return; + } + + qdf_dev = psoc->soc_objmgr.qdf_dev; + if (!qdf_dev->dev) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "null device"); + return; + } + + dfs_nolinfo = qdf_mem_malloc(sizeof(*dfs_nolinfo)); + if (!dfs_nolinfo) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "dfs_nolinfo alloc fail"); + return; + } + + qdf_mem_zero(dfs_nolinfo, sizeof(*dfs_nolinfo)); + DFS_GET_NOL_LOCKED(dfs, dfs_nolinfo->dfs_nol, &num_chans); + if (num_chans > 0) { + + if (num_chans > DFS_MAX_NOL_CHANNEL) + dfs_nolinfo->num_chans = DFS_MAX_NOL_CHANNEL; + else + dfs_nolinfo->num_chans = num_chans; + + pld_wlan_set_dfs_nol(qdf_dev->dev, (void *)dfs_nolinfo, + (uint16_t)sizeof(*dfs_nolinfo)); + } + qdf_mem_free(dfs_nolinfo); +} +#endif +qdf_export_symbol(utils_dfs_save_nol); + +void utils_dfs_print_nol_channels(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_dfs *dfs = NULL; + + dfs = global_dfs_to_mlme.pdev_get_comp_private_obj(pdev); + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "null dfs"); + return; + } + + DFS_PRINT_NOL_LOCKED(dfs); +} +qdf_export_symbol(utils_dfs_print_nol_channels); + +void utils_dfs_clear_nol_channels(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_dfs *dfs = NULL; + + dfs = global_dfs_to_mlme.pdev_get_comp_private_obj(pdev); + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "null dfs"); + return; + } + + /* First print list */ + DFS_PRINT_NOL_LOCKED(dfs); + + /* clear local cache first */ + dfs_nol_timer_cleanup(dfs); + dfs_nol_update(dfs); + + /* + * update platform driver nol list with local cache which is zero, + * cleared in above step, so this will clear list in platform driver. + */ + utils_dfs_save_nol(pdev); +} +qdf_export_symbol(utils_dfs_clear_nol_channels); + +void utils_dfs_reg_update_nol_ch(struct wlan_objmgr_pdev *pdev, + uint8_t *ch_list, + uint8_t num_ch, + bool nol_ch) +{ + /* TODO : Need locking?*/ + wlan_reg_update_nol_ch(pdev, ch_list, num_ch, nol_ch); +} +qdf_export_symbol(utils_dfs_reg_update_nol_ch); + +uint8_t utils_dfs_freq_to_chan(uint32_t freq) +{ + uint8_t chan; + + if (freq == 0) + return 0; + + if (freq > DFS_24_GHZ_BASE_FREQ && freq < DFS_CHAN_14_FREQ) + chan = ((freq - DFS_24_GHZ_BASE_FREQ) / DFS_CHAN_SPACING_5MHZ); + else if (freq == DFS_CHAN_14_FREQ) + chan = DFS_24_GHZ_CHANNEL_14; + else if ((freq > DFS_24_GHZ_BASE_FREQ) && (freq < DFS_5_GHZ_BASE_FREQ)) + chan = (((freq - DFS_CHAN_15_FREQ) / DFS_CHAN_SPACING_20MHZ) + + DFS_24_GHZ_CHANNEL_15); + else + chan = (freq - DFS_5_GHZ_BASE_FREQ) / DFS_CHAN_SPACING_5MHZ; + + return chan; +} +qdf_export_symbol(utils_dfs_freq_to_chan); + +uint32_t utils_dfs_chan_to_freq(uint8_t chan) +{ + if (chan == 0) + return 0; + + if (chan < DFS_24_GHZ_CHANNEL_14) + return DFS_24_GHZ_BASE_FREQ + (chan * DFS_CHAN_SPACING_5MHZ); + else if (chan == DFS_24_GHZ_CHANNEL_14) + return DFS_CHAN_14_FREQ; + else if (chan < DFS_24_GHZ_CHANNEL_27) + return DFS_CHAN_15_FREQ + ((chan - DFS_24_GHZ_CHANNEL_15) * + DFS_CHAN_SPACING_20MHZ); + else if (chan == DFS_5_GHZ_CHANNEL_170) + return DFS_CHAN_170_FREQ; + else + return DFS_5_GHZ_BASE_FREQ + (chan * DFS_CHAN_SPACING_5MHZ); +} +qdf_export_symbol(utils_dfs_chan_to_freq); + +#ifdef QCA_MCL_DFS_SUPPORT +QDF_STATUS utils_dfs_mark_leaking_ch(struct wlan_objmgr_pdev *pdev, + enum phy_ch_width ch_width, + uint8_t temp_ch_lst_sz, + uint8_t *temp_ch_lst) +{ + struct wlan_dfs *dfs = NULL; + + dfs = global_dfs_to_mlme.pdev_get_comp_private_obj(pdev); + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "null dfs"); + return QDF_STATUS_E_FAILURE; + } + + return dfs_mark_leaking_ch(dfs, ch_width, temp_ch_lst_sz, temp_ch_lst); +} +qdf_export_symbol(utils_dfs_mark_leaking_ch); +#endif + +int utils_get_dfsdomain(struct wlan_objmgr_pdev *pdev) +{ + enum dfs_reg dfsdomain; + + wlan_reg_get_dfs_region(pdev, &dfsdomain); + + return dfsdomain; +} + +uint16_t utils_dfs_get_cur_rd(struct wlan_objmgr_pdev *pdev) +{ + struct cur_regdmn_info cur_regdmn; + + wlan_reg_get_curr_regdomain(pdev, &cur_regdmn); + + return cur_regdmn.regdmn_pair_id; +} + +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) && defined(HOST_DFS_SPOOF_TEST) +QDF_STATUS utils_dfs_is_spoof_check_failed(struct wlan_objmgr_pdev *pdev, + bool *is_spoof_check_failed) +{ + struct wlan_dfs *dfs; + + dfs = global_dfs_to_mlme.pdev_get_comp_private_obj(pdev); + if (!dfs) { + dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "null dfs"); + return QDF_STATUS_E_FAILURE; + } + + *is_spoof_check_failed = dfs->dfs_spoof_check_failed; + + return QDF_STATUS_SUCCESS; +} + +qdf_export_symbol(utils_dfs_is_spoof_check_failed); +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/global_umac_dispatcher/lmac_if/inc/wlan_lmac_if_api.h b/drivers/staging/qca-wifi-host-cmn/umac/global_umac_dispatcher/lmac_if/inc/wlan_lmac_if_api.h new file mode 100644 index 0000000000000000000000000000000000000000..578a6fcaaf4ee0fd47e654ca0e96f4af6faa2738 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/global_umac_dispatcher/lmac_if/inc/wlan_lmac_if_api.h @@ -0,0 +1,282 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _WLAN_LMAC_IF_API_H_ +#define _WLAN_LMAC_IF_API_H_ + +#include "wlan_objmgr_cmn.h" +#include "wlan_objmgr_psoc_obj.h" +#include "wlan_objmgr_pdev_obj.h" + +/** + * wlan_lmac_if_umac_rx_ops_register() - UMAC rx handler register + * @rx_ops: Pointer to rx_ops structure to be populated + * + * Register umac RX callabacks which will be called by DA/OL/WMA/WMI + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_lmac_if_umac_rx_ops_register + (struct wlan_lmac_if_rx_ops *rx_ops); + +/** + * wlan_lmac_if_set_umac_txops_registration_cb() - tx registration + * callback assignment + * @dev_type: Dev type can be either Direct attach or Offload + * @handler: handler to be called for LMAC tx ops registration + * + * API to assign appropriate tx registration callback handler based on the + * device type(Offload or Direct attach) + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_lmac_if_set_umac_txops_registration_cb + (QDF_STATUS (*handler)(struct wlan_lmac_if_tx_ops *)); + + +/** + * wlan_lmac_if_get_mgmt_txrx_rx_ops() - retrieve the mgmt rx_ops + * @psoc: psoc context + * + * API to retrieve the mgmt rx_ops from the psoc context + * + * Return: mgmt_rx_ops pointer + */ +static inline struct wlan_lmac_if_mgmt_txrx_rx_ops * +wlan_lmac_if_get_mgmt_txrx_rx_ops(struct wlan_objmgr_psoc *psoc) +{ + if (!psoc) + return NULL; + + return &psoc->soc_cb.rx_ops.mgmt_txrx_rx_ops; +} + +/** + * wlan_lmac_if_get_dfs_rx_ops() - retrieve the dfs rx_ops + * @psoc: psoc context + * + * API to retrieve the dfs rx_ops from the psoc context + * + * Return: dfs_rx_ops pointer + */ +static inline struct wlan_lmac_if_dfs_rx_ops * +wlan_lmac_if_get_dfs_rx_ops(struct wlan_objmgr_psoc *psoc) +{ + if (!psoc) + return NULL; + + return &psoc->soc_cb.rx_ops.dfs_rx_ops; +} + +/** + * wlan_lmac_if_get_reg_rx_ops() - retrieve the reg rx_ops + * @psoc: psoc context + * + * API to retrieve the reg rx_ops from the psoc context + * + * Return: reg_rx_ops pointer + */ +static inline struct wlan_lmac_if_reg_rx_ops * +wlan_lmac_if_get_reg_rx_ops(struct wlan_objmgr_psoc *psoc) +{ + if (!psoc) + return NULL; + + return &psoc->soc_cb.rx_ops.reg_rx_ops; +} + +#ifdef WLAN_SUPPORT_GREEN_AP +/** + * wlan_lmac_if_get_green_ap_rx_ops() - retrieve the green ap rx_ops + * @psoc: psoc context + * + * API to retrieve the dfs rx_ops from the psoc context + * + * Return: green_ap_rx_ops pointer + */ +static inline struct wlan_lmac_if_green_ap_rx_ops * +wlan_lmac_if_get_green_ap_rx_ops(struct wlan_objmgr_psoc *psoc) +{ + if (!psoc) + return NULL; + + return &psoc->soc_cb.rx_ops.green_ap_rx_ops; +} +#endif + +/** + * mgmt_txrx_get_nbuf() - retrieve nbuf from mgmt desc_id + * @pdev: pdev context + * @desc_id: mgmt desc_id + * + * API to retrieve the nbuf from mgmt desc_id + * + * Return: nbuf + */ +static inline qdf_nbuf_t +mgmt_txrx_get_nbuf(struct wlan_objmgr_pdev *pdev, uint32_t desc_id) +{ + struct wlan_lmac_if_mgmt_txrx_rx_ops *mgmt_rx_ops; + struct wlan_objmgr_psoc *psoc; + + psoc = wlan_pdev_get_psoc(pdev); + mgmt_rx_ops = wlan_lmac_if_get_mgmt_txrx_rx_ops(psoc); + + if (mgmt_rx_ops && mgmt_rx_ops->mgmt_txrx_get_nbuf_from_desc_id) + return mgmt_rx_ops->mgmt_txrx_get_nbuf_from_desc_id(pdev, + desc_id); + + return NULL; +} + +/** + * mgmt_txrx_tx_completion_handler() - mgmt tx completion handler + * @pdev: pdev context + * @desc_id: mgmt desc_id + * @status: tx status + * @params: tx params + * + * API to handle the tx completion for mgmt frames + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +static inline QDF_STATUS +mgmt_txrx_tx_completion_handler(struct wlan_objmgr_pdev *pdev, + uint32_t desc_id, uint32_t status, + void *params) +{ + struct wlan_lmac_if_mgmt_txrx_rx_ops *mgmt_rx_ops; + struct wlan_objmgr_psoc *psoc; + qdf_nbuf_t nbuf; + + psoc = wlan_pdev_get_psoc(pdev); + mgmt_rx_ops = wlan_lmac_if_get_mgmt_txrx_rx_ops(psoc); + + if (mgmt_rx_ops && mgmt_rx_ops->mgmt_tx_completion_handler) + return mgmt_rx_ops->mgmt_tx_completion_handler(pdev, desc_id, + status, params); + + nbuf = mgmt_txrx_get_nbuf(pdev, desc_id); + if (nbuf) + qdf_nbuf_free(nbuf); + + return QDF_STATUS_E_NULL_VALUE; +} + +/** + * mgmt_txrx_rx_handler() - mgmt rx frame handler + * @psoc: psoc context + * @nbuf: nbuf + * @params: rx params + * + * API to receive mgmt frames + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +static inline QDF_STATUS +mgmt_txrx_rx_handler(struct wlan_objmgr_psoc *psoc, qdf_nbuf_t nbuf, + void *params) +{ + struct wlan_lmac_if_mgmt_txrx_rx_ops *mgmt_rx_ops; + + mgmt_rx_ops = wlan_lmac_if_get_mgmt_txrx_rx_ops(psoc); + + if (mgmt_rx_ops && mgmt_rx_ops->mgmt_rx_frame_handler) + return mgmt_rx_ops->mgmt_rx_frame_handler(psoc, nbuf, params); + + if (nbuf) + qdf_nbuf_free(nbuf); + + return QDF_STATUS_E_NULL_VALUE; +} + +/** + * mgmt_txrx_get_peer() - retrieve peer from mgmt desc_id + * @pdev: pdev context + * @desc_id: mgmt desc_id + * + * API to retrieve the peer from mgmt desc_id + * + * Return: objmgr peer pointer + */ +static inline struct wlan_objmgr_peer * +mgmt_txrx_get_peer(struct wlan_objmgr_pdev *pdev, uint32_t desc_id) +{ + struct wlan_lmac_if_mgmt_txrx_rx_ops *mgmt_rx_ops; + struct wlan_objmgr_psoc *psoc; + + psoc = wlan_pdev_get_psoc(pdev); + mgmt_rx_ops = wlan_lmac_if_get_mgmt_txrx_rx_ops(psoc); + + if (mgmt_rx_ops && mgmt_rx_ops->mgmt_txrx_get_peer_from_desc_id) + return mgmt_rx_ops->mgmt_txrx_get_peer_from_desc_id(pdev, + desc_id); + + return NULL; +} + +/** + * mgmt_txrx_get_vdev_id() - retrieve vdev_id from mgmt desc_id + * @pdev: pdev context + * @desc_id: mgmt desc_id + * + * API to retrieve the vdev_id from mgmt desc_id + * + * Return: vdev_id + */ +static inline uint8_t +mgmt_txrx_get_vdev_id(struct wlan_objmgr_pdev *pdev, uint32_t desc_id) +{ + struct wlan_lmac_if_mgmt_txrx_rx_ops *mgmt_rx_ops; + struct wlan_objmgr_psoc *psoc; + + psoc = wlan_pdev_get_psoc(pdev); + mgmt_rx_ops = wlan_lmac_if_get_mgmt_txrx_rx_ops(psoc); + + if (mgmt_rx_ops && mgmt_rx_ops->mgmt_txrx_get_vdev_id_from_desc_id) + return mgmt_rx_ops->mgmt_txrx_get_vdev_id_from_desc_id(pdev, + desc_id); + + return WLAN_UMAC_VDEV_ID_MAX; +} +/** + * mgmt_txrx_get_free_desc_count() - retrieve vdev_id from mgmt desc_id + * @pdev: pdev context + * + * API to get the free desc count mgmt desc pool + * + * Return: free_desc_count + */ +static inline uint32_t +mgmt_txrx_get_free_desc_count(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_lmac_if_mgmt_txrx_rx_ops *mgmt_rx_ops; + struct wlan_objmgr_psoc *psoc; + uint32_t free_desc_count = WLAN_INVALID_MGMT_DESC_COUNT; + + psoc = wlan_pdev_get_psoc(pdev); + mgmt_rx_ops = wlan_lmac_if_get_mgmt_txrx_rx_ops(psoc); + + if (mgmt_rx_ops && mgmt_rx_ops->mgmt_txrx_get_free_desc_pool_count) + free_desc_count = mgmt_rx_ops->mgmt_txrx_get_free_desc_pool_count( + pdev); + + return free_desc_count; +} +#endif /* _WLAN_LMAC_IF_API_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/global_umac_dispatcher/lmac_if/inc/wlan_lmac_if_def.h b/drivers/staging/qca-wifi-host-cmn/umac/global_umac_dispatcher/lmac_if/inc/wlan_lmac_if_def.h new file mode 100644 index 0000000000000000000000000000000000000000..b07f8840050113d9572fc7aec0ebc3aea6ef8174 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/global_umac_dispatcher/lmac_if/inc/wlan_lmac_if_def.h @@ -0,0 +1,1283 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _WLAN_LMAC_IF_DEF_H_ +#define _WLAN_LMAC_IF_DEF_H_ + +#include +#include "qdf_status.h" +#include "wlan_objmgr_cmn.h" +#ifdef DFS_COMPONENT_ENABLE +#include +#endif +#include "wlan_mgmt_txrx_utils_api.h" +#include "wlan_scan_public_structs.h" + +#ifdef WLAN_ATF_ENABLE +#include "wlan_atf_utils_defs.h" +#endif +#ifdef QCA_SUPPORT_SON +#include +#endif +#ifdef WLAN_SA_API_ENABLE +#include "wlan_sa_api_utils_defs.h" +#endif +#ifdef WLAN_CONV_SPECTRAL_ENABLE +#include "wlan_spectral_public_structs.h" +#endif +#include + +#ifdef WLAN_CONV_CRYPTO_SUPPORTED +#include "wlan_crypto_global_def.h" +#endif + +#include +#include + +/* Number of dev type: Direct attach and Offload */ +#define MAX_DEV_TYPE 2 + +#ifdef WIFI_POS_CONVERGED +/* forward declarations */ +struct oem_data_req; +struct oem_data_rsp; +#endif /* WIFI_POS_CONVERGED */ + +#ifdef DIRECT_BUF_RX_ENABLE +/* forward declarations for direct buf rx */ +struct direct_buf_rx_data; +#endif + +struct scheduler_msg; + +#ifdef CONVERGED_TDLS_ENABLE +#include "wlan_tdls_public_structs.h" +#endif + +#ifdef QCA_SUPPORT_CP_STATS +#include +#endif /* QCA_SUPPORT_CP_STATS */ + +#ifdef QCA_SUPPORT_CP_STATS +/** + * struct wlan_lmac_if_cp_stats_tx_ops - defines southbound tx callbacks for + * control plane statistics component + * @cp_stats_attach: function pointer to register events from FW + * @cp_stats_detach: function pointer to unregister events from FW + */ +struct wlan_lmac_if_cp_stats_tx_ops { + QDF_STATUS (*cp_stats_attach)(struct wlan_objmgr_psoc *psoc); + QDF_STATUS (*cp_stats_detach)(struct wlan_objmgr_psoc *posc); +#ifdef CONFIG_MCL + void (*inc_wake_lock_stats)(uint32_t reason, + struct wake_lock_stats *stats, + uint32_t *unspecified_wake_count); + QDF_STATUS (*send_req_stats)(struct wlan_objmgr_psoc *psoc, + enum stats_req_type type, + struct request_info *req); +#endif +}; + +/** + * struct wlan_lmac_if_cp_stats_rx_ops - defines southbound rx callbacks for + * control plane statistics component + * @cp_stats_rx_event_handler: function pointer to rx FW events + */ +struct wlan_lmac_if_cp_stats_rx_ops { + QDF_STATUS (*cp_stats_rx_event_handler)(struct wlan_objmgr_vdev *vdev); +#ifdef CONFIG_MCL + QDF_STATUS (*process_stats_event)(struct wlan_objmgr_psoc *psoc, + struct stats_event *ev); +#endif +}; +#endif + +/** + * struct wlan_lmac_if_mgmt_txrx_tx_ops - structure of tx function + * pointers for mgmt txrx component + * @mgmt_tx_send: function pointer to transmit mgmt tx frame + * @beacon_send: function pointer to transmit beacon frame + * @fd_action_frame_send: function pointer to transmit FD action frame + * @tx_drain_nbuf_op: function pointer for any umac nbuf realted ops for + * pending mgmt frames cleanup + */ +struct wlan_lmac_if_mgmt_txrx_tx_ops { + QDF_STATUS (*mgmt_tx_send)(struct wlan_objmgr_vdev *vdev, + qdf_nbuf_t nbuf, u_int32_t desc_id, + void *mgmt_tx_params); + QDF_STATUS (*beacon_send)(struct wlan_objmgr_vdev *vdev, + qdf_nbuf_t nbuf); + QDF_STATUS (*fd_action_frame_send)(struct wlan_objmgr_vdev *vdev, + qdf_nbuf_t nbuf); + void (*tx_drain_nbuf_op)(struct wlan_objmgr_pdev *pdev, + qdf_nbuf_t nbuf); +}; + +/** + * struct wlan_lmac_if_scan_tx_ops - south bound tx function pointers for scan + * @scan_start: function to start scan + * @scan_cancel: function to cancel scan + * @pno_start: start pno scan + * @pno_stop: stop pno scan + * @scan_reg_ev_handler: function to register for scan events + * @scan_unreg_ev_handler: function to unregister for scan events + * + * scan module uses these functions to avail ol/da lmac services + */ +struct wlan_lmac_if_scan_tx_ops { + QDF_STATUS (*scan_start)(struct wlan_objmgr_pdev *pdev, + struct scan_start_request *req); + QDF_STATUS (*scan_cancel)(struct wlan_objmgr_pdev *pdev, + struct scan_cancel_param *req); + QDF_STATUS (*pno_start)(struct wlan_objmgr_psoc *psoc, + struct pno_scan_req_params *req); + QDF_STATUS (*pno_stop)(struct wlan_objmgr_psoc *psoc, + uint8_t vdev_id); + QDF_STATUS (*scan_reg_ev_handler)(struct wlan_objmgr_psoc *psoc, + void *arg); + QDF_STATUS (*scan_unreg_ev_handler)(struct wlan_objmgr_psoc *psoc, + void *arg); + QDF_STATUS (*set_chan_list)(struct wlan_objmgr_pdev *pdev, void *arg); +}; + +/** + * struct wlan_lmac_if_ftm_tx_ops - south bound tx function pointers for ftm + * @ftm_attach: function to register event handlers with FW + * @ftm_detach: function to de-register event handlers with FW + * @ftm_cmd_send: function to send FTM commands to FW + * + * ftm module uses these functions to avail ol/da lmac services + */ +struct wlan_lmac_if_ftm_tx_ops { + QDF_STATUS (*ftm_attach)(struct wlan_objmgr_psoc *psoc); + QDF_STATUS (*ftm_detach)(struct wlan_objmgr_psoc *psoc); + QDF_STATUS (*ftm_cmd_send)(struct wlan_objmgr_pdev *pdev, + uint8_t *buf, uint32_t len, uint8_t mac_id); +}; + + +struct wlan_lmac_if_mlme_tx_ops { + void (*scan_sta_power_events)(struct wlan_objmgr_pdev *pdev, + int event_type, int event_status); + void (*scan_connection_lost)(struct wlan_objmgr_pdev *pdev); + void (*scan_end)(struct wlan_objmgr_pdev *pdev); + uint32_t (*get_wifi_iface_id) (struct wlan_objmgr_pdev *pdev); +}; + +/** + * struct wlan_lmac_if_scan_rx_ops - south bound rx function pointers for scan + * @scan_ev_handler: scan event handler + * @scan_set_max_active_scans: set max active scans allowed + * + * lmac modules uses this API to post scan events to scan module + */ +struct wlan_lmac_if_scan_rx_ops { + QDF_STATUS (*scan_ev_handler)(struct wlan_objmgr_psoc *psoc, + struct scan_event_info *event_info); + QDF_STATUS (*scan_set_max_active_scans)(struct wlan_objmgr_psoc *psoc, + uint32_t max_active_scans); +}; + +#ifdef CONVERGED_P2P_ENABLE + +/* forward declarations for p2p tx ops */ +struct p2p_ps_config; +struct p2p_lo_start; +struct p2p_set_mac_filter; + +/** + * struct wlan_lmac_if_p2p_tx_ops - structure of tx function pointers + * for P2P component + * @set_ps: function pointer to set power save + * @lo_start: function pointer to start listen offload + * @lo_stop: function pointer to stop listen offload + * @set_noa: function pointer to disable/enable NOA + * @reg_lo_ev_handler: function pointer to register lo event handler + * @reg_noa_ev_handler: function pointer to register noa event handler + * @unreg_lo_ev_handler: function pointer to unregister lo event handler + * @unreg_noa_ev_handler:function pointer to unregister noa event handler + * @reg_mac_addr_rx_filter_handler: function pointer to register/unregister + * set mac addr status event callback. + * @set_mac_addr_rx_filter_cmd: function pointer to set mac addr rx filter + */ +struct wlan_lmac_if_p2p_tx_ops { + QDF_STATUS (*set_ps)(struct wlan_objmgr_psoc *psoc, + struct p2p_ps_config *ps_config); + QDF_STATUS (*lo_start)(struct wlan_objmgr_psoc *psoc, + struct p2p_lo_start *lo_start); + QDF_STATUS (*lo_stop)(struct wlan_objmgr_psoc *psoc, + uint32_t vdev_id); + QDF_STATUS (*set_noa)(struct wlan_objmgr_psoc *psoc, + uint32_t vdev_id, bool disable_noa); + QDF_STATUS (*reg_lo_ev_handler)(struct wlan_objmgr_psoc *psoc, + void *arg); + QDF_STATUS (*reg_noa_ev_handler)(struct wlan_objmgr_psoc *psoc, + void *arg); + QDF_STATUS (*unreg_lo_ev_handler)(struct wlan_objmgr_psoc *psoc, + void *arg); + QDF_STATUS (*unreg_noa_ev_handler)(struct wlan_objmgr_psoc *psoc, + void *arg); + QDF_STATUS (*reg_mac_addr_rx_filter_handler)( + struct wlan_objmgr_psoc *psoc, bool reg); + QDF_STATUS (*set_mac_addr_rx_filter_cmd)( + struct wlan_objmgr_psoc *psoc, + struct p2p_set_mac_filter *param); +}; +#endif + +#ifdef WLAN_ATF_ENABLE + +/** + * struct wlan_lmac_if_atf_tx_ops - ATF specific tx function pointers + * @atf_update_peer_txtoken: Update peer Tx token + * @atf_set_enable_disable: Set atf enable/disable + * @atf_tokens_used: Get used atf tokens + * @atf_get_unused_txtoken: Get unused atf tokens + * @atf_peer_resume: Resume peer + * @atf_tokens_unassigned: Set unassigned atf tockens + * @atf_capable_peer: Set atf state change + * @atf_airtime_estimate: Get estimated airtime + * @atf_debug_peerstate: Get peer state + * @atf_enable_disable: Set atf peer stats enable/disable + * @atf_ssid_sched_policy: Set ssid schedule policy + * @atf_set: Set atf + * @atf_set_grouping: Set atf grouping + * @atf_send_peer_request: Send peer requests + * @atf_set_bwf: Set bandwidth fairness + * @atf_peer_buf_held: Get buffer held + * @atf_get_peer_airtime: Get peer airtime + * @atf_get_chbusyper: Get channel busy + * @atf_open: ATF open + * @atf_register_event_handler ATF register wmi event handlers + * @atf_unregister_event_handler ATF unregister wmi event handlers + */ +struct wlan_lmac_if_atf_tx_ops { + void (*atf_update_peer_txtoken)(struct wlan_objmgr_pdev *pdev, + struct wlan_objmgr_peer *peer, + struct atf_stats *stats); + void (*atf_set_enable_disable)(struct wlan_objmgr_pdev *pdev, + uint8_t value); + uint8_t (*atf_tokens_used)(struct wlan_objmgr_pdev *pdev, + struct wlan_objmgr_peer *peer); + void (*atf_get_unused_txtoken)(struct wlan_objmgr_pdev *pdev, + struct wlan_objmgr_peer *peer, + int *unused_token); + void (*atf_peer_resume)(struct wlan_objmgr_pdev *pdev, + struct wlan_objmgr_peer *peer); + void (*atf_tokens_unassigned)(struct wlan_objmgr_pdev *pdev, + uint32_t tokens_unassigned); + void (*atf_capable_peer)(struct wlan_objmgr_pdev *pdev, + struct wlan_objmgr_peer *peer, + uint8_t val, uint8_t atfstate_change); + uint32_t (*atf_airtime_estimate)(struct wlan_objmgr_pdev *pdev, + struct wlan_objmgr_peer *peer, + uint32_t tput, + uint32_t *possible_tput); + uint32_t (*atf_debug_peerstate)(struct wlan_objmgr_pdev *pdev, + struct wlan_objmgr_peer *peer); + + int32_t (*atf_enable_disable)(struct wlan_objmgr_vdev *vdev, + uint8_t value); + int32_t (*atf_ssid_sched_policy)(struct wlan_objmgr_vdev *vdev, + uint8_t value); + int32_t (*atf_set)(struct wlan_objmgr_pdev *pdev, + struct pdev_atf_req *atf_req, + uint8_t atf_tput_based); + int32_t (*atf_set_grouping)(struct wlan_objmgr_pdev *pdev, + struct pdev_atf_ssid_group_req *atf_grp_req, + uint8_t atf_tput_based); + int32_t (*atf_send_peer_request)(struct wlan_objmgr_pdev *pdev, + struct pdev_atf_peer_ext_request *atfr, + uint8_t atf_tput_based); + int32_t (*atf_set_bwf)(struct wlan_objmgr_pdev *pdev, + struct pdev_bwf_req *bwf_req); + uint32_t (*atf_peer_buf_held)(struct wlan_objmgr_peer *peer); + uint32_t (*atf_get_peer_airtime)(struct wlan_objmgr_peer *peer); + uint32_t (*atf_get_chbusyper)(struct wlan_objmgr_pdev *pdev); + void (*atf_open)(struct wlan_objmgr_psoc *psoc); + void (*atf_register_event_handler)(struct wlan_objmgr_psoc *psoc); + void (*atf_unregister_event_handler)(struct wlan_objmgr_psoc *psoc); +}; +#endif + +#ifdef WLAN_SUPPORT_FILS +/** + * struct wlan_lmac_if_fd_tx_ops - FILS Discovery specific Tx function pointers + * @fd_vdev_config_fils: Enable and configure FILS Discovery + * @fd_register_event_handler: Register swfda WMI event handler + * @fd_unregister_event_handler: Un-register swfda WMI event handler + */ +struct wlan_lmac_if_fd_tx_ops { + QDF_STATUS (*fd_vdev_config_fils)(struct wlan_objmgr_vdev *vdev, + uint32_t fd_period); + void (*fd_register_event_handler)(struct wlan_objmgr_psoc *psoc); + void (*fd_unregister_event_handler)(struct wlan_objmgr_psoc *psoc); +}; +#endif + +#ifdef WLAN_SA_API_ENABLE + +/** + * struct wlan_lmac_if_sa_api_tx_ops - SA API specific tx function pointers + */ + +struct wlan_lmac_if_sa_api_tx_ops { + void (*sa_api_register_event_handler)(struct wlan_objmgr_psoc *psoc); + void (*sa_api_unregister_event_handler)(struct wlan_objmgr_psoc *posc); + void (*sa_api_enable_sa) (struct wlan_objmgr_pdev *pdev, + uint32_t enable, uint32_t mode, uint32_t rx_antenna); + void (*sa_api_set_rx_antenna) (struct wlan_objmgr_pdev *pdev, + uint32_t antenna); + void (*sa_api_set_tx_antenna) (struct wlan_objmgr_peer *peer, + uint32_t *antenna_array); + void (*sa_api_set_tx_default_antenna) (struct wlan_objmgr_pdev *pdev, + u_int32_t antenna); + void (*sa_api_set_training_info) (struct wlan_objmgr_peer *peer, + uint32_t *rate_array, + uint32_t *antenna_array, + uint32_t numpkts); + void (*sa_api_prepare_rateset)(struct wlan_objmgr_pdev *pdev, + struct wlan_objmgr_peer *peer, + struct sa_rate_info *rate_info); + void (*sa_api_set_node_config_ops) (struct wlan_objmgr_peer *peer, + uint32_t cmd_id, uint16_t args_count, + u_int32_t args_arr[]); +}; + +#endif + +#ifdef WLAN_CONV_SPECTRAL_ENABLE +struct wmi_spectral_cmd_ops; +/** + * struct wlan_lmac_if_sptrl_tx_ops - Spectral south bound Tx operations + * @sptrlto_spectral_init: Initialize LMAC/target_if Spectral + * @sptrlto_spectral_deinit: De-initialize LMAC/target_if Spectral + * @sptrlto_set_spectral_config: Set Spectral configuration + * @sptrlto_get_spectral_config: Get Spectral configuration + * @sptrlto_start_spectral_scan: Start Spectral Scan + * @sptrlto_stop_spectral_scan: Stop Spectral Scan + * @sptrlto_is_spectral_active: Get whether Spectral is active + * @sptrlto_is_spectral_enabled: Get whether Spectral is enabled + * @sptrlto_set_icm_active: Set whether ICM is active or inactive + * @sptrlto_get_icm_active: Get whether ICM is active or inactive + * @sptrlto_get_nominal_nf: Get Nominal Noise Floor for the current + * frequency band + * @sptrlto_set_debug_level: Set Spectral debug level + * @sptrlto_get_debug_level: Get Spectral debug level + * @sptrlto_get_chaninfo: Get channel information + * @sptrlto_clear_chaninfo: Clear channel information + * @sptrlto_get_spectral_capinfo: Get Spectral capability information + * @sptrlto_get_spectral_diagstats: Get Spectral diagnostic statistics + * @sptrlto_register_netlink_cb: Register Spectral Netlink callbacks + * @sptrlto_use_nl_bcast: Get whether to use Netlink broadcast/unicast + * @sptrlto_deregister_netlink_cb: De-register Spectral Netlink callbacks + * @sptrlto_process_spectral_report: Process spectral report + **/ +struct wlan_lmac_if_sptrl_tx_ops { + void *(*sptrlto_pdev_spectral_init)(struct wlan_objmgr_pdev *pdev); + void (*sptrlto_pdev_spectral_deinit)(struct wlan_objmgr_pdev *pdev); + int (*sptrlto_set_spectral_config)(struct wlan_objmgr_pdev *pdev, + const u_int32_t threshtype, + const u_int32_t value); + void (*sptrlto_get_spectral_config)(struct wlan_objmgr_pdev *pdev, + struct spectral_config *sptrl_config + ); + int (*sptrlto_start_spectral_scan)(struct wlan_objmgr_pdev *pdev); + void (*sptrlto_stop_spectral_scan)(struct wlan_objmgr_pdev *pdev); + bool (*sptrlto_is_spectral_active)(struct wlan_objmgr_pdev *pdev); + bool (*sptrlto_is_spectral_enabled)(struct wlan_objmgr_pdev *pdev); + int (*sptrlto_set_debug_level)(struct wlan_objmgr_pdev *pdev, + u_int32_t debug_level); + u_int32_t (*sptrlto_get_debug_level)(struct wlan_objmgr_pdev *pdev); + void (*sptrlto_get_spectral_capinfo)(struct wlan_objmgr_pdev *pdev, + void *outdata); + void (*sptrlto_get_spectral_diagstats)(struct wlan_objmgr_pdev *pdev, + void *outdata); + void (*sptrlto_register_wmi_spectral_cmd_ops)( + struct wlan_objmgr_pdev *pdev, + struct wmi_spectral_cmd_ops *cmd_ops); + void (*sptrlto_register_netlink_cb)( + struct wlan_objmgr_pdev *pdev, + struct spectral_nl_cb *nl_cb); + bool (*sptrlto_use_nl_bcast)(struct wlan_objmgr_pdev *pdev); + void (*sptrlto_deregister_netlink_cb)(struct wlan_objmgr_pdev *pdev); + int (*sptrlto_process_spectral_report)( + struct wlan_objmgr_pdev *pdev, + void *payload); +}; +#endif /* WLAN_CONV_SPECTRAL_ENABLE */ + +#ifdef WIFI_POS_CONVERGED +/* + * struct wlan_lmac_if_wifi_pos_tx_ops - structure of firmware tx function + * pointers for wifi_pos component + * @data_req_tx: function pointer to send wifi_pos req to firmware + */ +struct wlan_lmac_if_wifi_pos_tx_ops { + QDF_STATUS (*data_req_tx)(struct wlan_objmgr_psoc *psoc, + struct oem_data_req *req); +}; +#endif + +#ifdef DIRECT_BUF_RX_ENABLE +/** + * struct wlan_lmac_if_direct_buf_rx_tx_ops - structire of direct buf rx txops + * @direct_buf_rx_module_register: Registration API callback for modules + * to register with direct buf rx framework + * @direct_buf_rx_register_events: Registration of WMI events for direct + * buffer rx framework + * @direct_buf_rx_unregister_events: Unregistraton of WMI events for direct + * buffer rx framework + */ +struct wlan_lmac_if_direct_buf_rx_tx_ops { + QDF_STATUS (*direct_buf_rx_module_register)( + struct wlan_objmgr_pdev *pdev, uint8_t mod_id, + int (*dbr_rsp_handler)(struct wlan_objmgr_pdev *pdev, + struct direct_buf_rx_data *dbr_data)); + QDF_STATUS (*direct_buf_rx_register_events)( + struct wlan_objmgr_psoc *psoc); + QDF_STATUS (*direct_buf_rx_unregister_events)( + struct wlan_objmgr_psoc *psoc); +}; +#endif + +#ifdef CONVERGED_TDLS_ENABLE +/* fwd declarations for tdls tx ops */ +struct tdls_info; +struct tdls_peer_update_state; +struct tdls_channel_switch_params; +struct sta_uapsd_trig_params; +/** + * struct wlan_lmac_if_tdls_tx_ops - south bound tx function pointers for tdls + * @update_fw_state: function to update tdls firmware state + * @update_peer_state: function to update tdls peer state + * @set_offchan_mode: function to set tdls offchannel mode + * @tdls_reg_ev_handler: function to register for tdls events + * @tdls_unreg_ev_handler: function to unregister for tdls events + * @tdls_set_uapsd: function to set upasdt trigger command + * + * tdls module uses these functions to avail ol/da lmac services + */ +struct wlan_lmac_if_tdls_tx_ops { + QDF_STATUS (*update_fw_state)(struct wlan_objmgr_psoc *psoc, + struct tdls_info *req); + QDF_STATUS (*update_peer_state)(struct wlan_objmgr_psoc *psoc, + struct tdls_peer_update_state *param); + QDF_STATUS (*set_offchan_mode)(struct wlan_objmgr_psoc *psoc, + struct tdls_channel_switch_params *param); + QDF_STATUS (*tdls_reg_ev_handler)(struct wlan_objmgr_psoc *psoc, + void *arg); + QDF_STATUS (*tdls_unreg_ev_handler) (struct wlan_objmgr_psoc *psoc, + void *arg); + QDF_STATUS (*tdls_set_uapsd)(struct wlan_objmgr_psoc *psoc, + struct sta_uapsd_trig_params *params); +}; + +/* fwd declarations for tdls rx ops */ +struct tdls_event_info; +/** + * struct wlan_lmac_if_tdls_rx_ops - south bound rx function pointers for tdls + * @tdls_ev_handler: function to handler tdls event + * + * lmac modules uses this API to post scan events to tdls module + */ +struct wlan_lmac_if_tdls_rx_ops { + QDF_STATUS (*tdls_ev_handler)(struct wlan_objmgr_psoc *psoc, + struct tdls_event_info *info); +}; +#endif + +#ifdef WLAN_FEATURE_NAN_CONVERGENCE +/** + * struct wlan_lmac_if_nan_tx_ops - structure of firwware tx function + * pointers for nan component + * @data_req_tx: function pointer to send nan req to firmware + */ +struct wlan_lmac_if_nan_tx_ops { + QDF_STATUS (*nan_req_tx)(void *req, uint32_t req_id); +}; +#endif + +/** + * struct wlan_lmac_if_ftm_rx_ops - south bound rx function pointers for FTM + * @ftm_ev_handler: function to handle FTM event + * + * lmac modules uses this API to post FTM events to FTM module + */ +struct wlan_lmac_if_ftm_rx_ops { + QDF_STATUS (*ftm_ev_handler)(struct wlan_objmgr_pdev *pdev, + uint8_t *event_buf, uint32_t len); +}; + +/** + * struct wlan_lmac_reg_if_tx_ops - structure of tx function + * pointers for regulatory component + * @register_master_handler: pointer to register event handler + * @unregister_master_handler: pointer to unregister event handler + * @register_11d_new_cc_handler: pointer to register 11d cc event handler + * @unregister_11d_new_cc_handler: pointer to unregister 11d cc event handler + */ +struct wlan_lmac_if_reg_tx_ops { + QDF_STATUS (*register_master_handler)(struct wlan_objmgr_psoc *psoc, + void *arg); + QDF_STATUS (*unregister_master_handler)(struct wlan_objmgr_psoc *psoc, + void *arg); + + QDF_STATUS (*set_country_code)(struct wlan_objmgr_psoc *psoc, + void *arg); + QDF_STATUS (*fill_umac_legacy_chanlist)(struct wlan_objmgr_pdev *pdev, + struct regulatory_channel *cur_chan_list); + QDF_STATUS (*register_11d_new_cc_handler)( + struct wlan_objmgr_psoc *psoc, void *arg); + QDF_STATUS (*unregister_11d_new_cc_handler)( + struct wlan_objmgr_psoc *psoc, void *arg); + QDF_STATUS (*start_11d_scan)(struct wlan_objmgr_psoc *psoc, + struct reg_start_11d_scan_req *reg_start_11d_scan_req); + QDF_STATUS (*stop_11d_scan)(struct wlan_objmgr_psoc *psoc, + struct reg_stop_11d_scan_req *reg_stop_11d_scan_req); + bool (*is_there_serv_ready_extn)(struct wlan_objmgr_psoc *psoc); + QDF_STATUS (*set_user_country_code)(struct wlan_objmgr_psoc *psoc, + uint8_t pdev_id, + struct cc_regdmn_s *rd); + QDF_STATUS (*set_country_failed)(struct wlan_objmgr_pdev *pdev); + QDF_STATUS (*register_ch_avoid_event_handler)( + struct wlan_objmgr_psoc *psoc, void *arg); + QDF_STATUS (*unregister_ch_avoid_event_handler)( + struct wlan_objmgr_psoc *psoc, void *arg); +}; + +/** + * struct wlan_lmac_if_dfs_tx_ops - Function pointer to call offload/lmac + * functions from DFS module. + * @dfs_enable: Enable DFS. + * @dfs_get_caps: Get DFS capabilities. + * @dfs_disable: Disable DFS + * @dfs_gettsf64: Get tsf64 value. + * @dfs_set_use_cac_prssi: Set use_cac_prssi value. + * @dfs_get_dfsdomain: Get DFS domain. + * @dfs_is_countryCode_CHINA: Check is country code CHINA. + * @dfs_get_thresholds: Get thresholds. + * @dfs_get_ext_busy: Get ext_busy. + * @dfs_get_target_type: Get target type. + * @dfs_is_countryCode_KOREA_ROC3: Check is county code Korea. + * @dfs_get_ah_devid: Get ah devid. + * @dfs_get_phymode_info: Get phymode info. + * @dfs_reg_ev_handler: Register dfs event handler. + * @dfs_process_emulate_bang_radar_cmd: Process emulate bang radar test command. + * @dfs_is_pdev_5ghz: Check if the given pdev is 5GHz. + * @dfs_set_phyerr_filter_offload: Config phyerr filter offload. + * @dfs_send_offload_enable_cmd: Send dfs offload enable command to fw. + * @dfs_host_dfs_check_support: To check Host DFS confirmation feature + * support. + * @dfs_send_avg_radar_params_to_fw: Send average radar parameters to FW. + */ + +struct wlan_lmac_if_dfs_tx_ops { + QDF_STATUS (*dfs_enable)(struct wlan_objmgr_pdev *pdev, + int *is_fastclk, + struct wlan_dfs_phyerr_param *param, + uint32_t dfsdomain); + QDF_STATUS (*dfs_get_caps)(struct wlan_objmgr_pdev *pdev, + struct wlan_dfs_caps *dfs_caps); + QDF_STATUS (*dfs_disable)(struct wlan_objmgr_pdev *pdev, + int no_cac); + QDF_STATUS (*dfs_gettsf64)(struct wlan_objmgr_pdev *pdev, + uint64_t *tsf64); + QDF_STATUS (*dfs_set_use_cac_prssi)(struct wlan_objmgr_pdev *pdev); + QDF_STATUS (*dfs_get_thresholds)(struct wlan_objmgr_pdev *pdev, + struct wlan_dfs_phyerr_param *param); + QDF_STATUS (*dfs_get_ext_busy)(struct wlan_objmgr_pdev *pdev, + int *dfs_ext_chan_busy); + QDF_STATUS (*dfs_get_target_type)(struct wlan_objmgr_pdev *pdev, + uint32_t *target_type); + QDF_STATUS (*dfs_get_ah_devid)(struct wlan_objmgr_pdev *pdev, + uint16_t *devid); + QDF_STATUS (*dfs_get_phymode_info)(struct wlan_objmgr_pdev *pdev, + uint32_t chan_mode, + uint32_t *mode_info, + bool is_2gvht_en); + QDF_STATUS (*dfs_reg_ev_handler)(struct wlan_objmgr_psoc *psoc); + QDF_STATUS (*dfs_process_emulate_bang_radar_cmd)( + struct wlan_objmgr_pdev *pdev, + struct dfs_emulate_bang_radar_test_cmd *dfs_unit_test); + QDF_STATUS (*dfs_is_pdev_5ghz)(struct wlan_objmgr_pdev *pdev, + bool *is_5ghz); + QDF_STATUS (*dfs_set_phyerr_filter_offload)( + struct wlan_objmgr_pdev *pdev, + bool dfs_phyerr_filter_offload); + bool (*dfs_is_tgt_offload)(struct wlan_objmgr_psoc *psoc); + QDF_STATUS (*dfs_send_offload_enable_cmd)( + struct wlan_objmgr_pdev *pdev, + bool enable); + QDF_STATUS (*dfs_host_dfs_check_support)(struct wlan_objmgr_pdev *pdev, + bool *enabled); + QDF_STATUS (*dfs_send_avg_radar_params_to_fw)( + struct wlan_objmgr_pdev *pdev, + struct dfs_radar_found_params *params); +}; + +/** + * struct wlan_lmac_if_target_tx_ops - Function pointers to call target + * functions from other modules. + * @tgt_is_tgt_type_ar900b: To check AR900B target type. + * @tgt_is_tgt_type_ipq4019: To check IPQ4019 target type. + * @tgt_is_tgt_type_qca9984: To check QCA9984 target type. + * @tgt_is_tgt_type_qca9888: To check QCA9888 target type. + * @tgt_get_tgt_type: Get target type + * @tgt_get_tgt_version: Get target version + * @tgt_get_tgt_revision: Get target revision + */ +struct wlan_lmac_if_target_tx_ops { + bool (*tgt_is_tgt_type_ar900b)(uint32_t); + bool (*tgt_is_tgt_type_ipq4019)(uint32_t); + bool (*tgt_is_tgt_type_qca9984)(uint32_t); + bool (*tgt_is_tgt_type_qca9888)(uint32_t); + uint32_t (*tgt_get_tgt_type)(struct wlan_objmgr_psoc *psoc); + uint32_t (*tgt_get_tgt_version)(struct wlan_objmgr_psoc *psoc); + uint32_t (*tgt_get_tgt_revision)(struct wlan_objmgr_psoc *psoc); +}; + +#ifdef WLAN_OFFCHAN_TXRX_ENABLE +/** + * struct wlan_lmac_if_offchan_txrx_ops - Function pointers to check target + * capabilities related to offchan txrx. + * @offchan_data_tid_support: To check if target supports separate tid for + * offchan data tx. + */ +struct wlan_lmac_if_offchan_txrx_ops { + bool (*offchan_data_tid_support)(struct wlan_objmgr_pdev *pdev); +}; +#endif + +#ifdef WLAN_SUPPORT_GREEN_AP +struct wlan_green_ap_egap_params; +/** + * struct wlan_lmac_if_green_ap_tx_ops - structure of tx function + * pointers for green ap component + * @enable_egap: function pointer to send enable egap indication to fw + * @ps_on_off_send: function pointer to send enable/disable green ap ps to fw + */ +struct wlan_lmac_if_green_ap_tx_ops { + QDF_STATUS (*enable_egap)(struct wlan_objmgr_pdev *pdev, + struct wlan_green_ap_egap_params *egap_params); + QDF_STATUS (*ps_on_off_send)(struct wlan_objmgr_pdev *pdev, + bool value, uint8_t pdev_id); + QDF_STATUS (*reset_dev)(struct wlan_objmgr_pdev *pdev); + uint16_t (*get_current_channel)(struct wlan_objmgr_pdev *pdev); + uint64_t (*get_current_channel_flags)(struct wlan_objmgr_pdev *pdev); + QDF_STATUS (*get_capab)(struct wlan_objmgr_pdev *pdev); +}; +#endif + +/** + * struct wlan_lmac_if_tx_ops - south bound tx function pointers + * @mgmt_txrx_tx_ops: mgmt txrx tx ops + * @scan: scan tx ops + * @dfs_tx_ops: dfs tx ops. + * @green_ap_tx_ops: green_ap tx_ops + * @cp_stats_tx_ops: cp stats tx_ops + * + * Callback function tabled to be registered with umac. + * umac will use the functional table to send events/frames to lmac/wmi + */ + +struct wlan_lmac_if_tx_ops { + /* Components to declare function pointers required by the module + * in component specific structure. + * The component specific ops structure can be declared in this file + * only + */ + struct wlan_lmac_if_mgmt_txrx_tx_ops mgmt_txrx_tx_ops; + struct wlan_lmac_if_scan_tx_ops scan; +#ifdef CONVERGED_P2P_ENABLE + struct wlan_lmac_if_p2p_tx_ops p2p; +#endif +#ifdef QCA_SUPPORT_SON + struct wlan_lmac_if_son_tx_ops son_tx_ops; +#endif + +#ifdef WLAN_ATF_ENABLE + struct wlan_lmac_if_atf_tx_ops atf_tx_ops; +#endif +#ifdef QCA_SUPPORT_CP_STATS + struct wlan_lmac_if_cp_stats_tx_ops cp_stats_tx_ops; +#endif +#ifdef WLAN_SA_API_ENABLE + struct wlan_lmac_if_sa_api_tx_ops sa_api_tx_ops; +#endif + +#ifdef WLAN_CONV_SPECTRAL_ENABLE + struct wlan_lmac_if_sptrl_tx_ops sptrl_tx_ops; +#endif + +#ifdef WLAN_CONV_CRYPTO_SUPPORTED + struct wlan_lmac_if_crypto_tx_ops crypto_tx_ops; +#endif + +#ifdef WIFI_POS_CONVERGED + struct wlan_lmac_if_wifi_pos_tx_ops wifi_pos_tx_ops; +#endif +#ifdef WLAN_FEATURE_NAN_CONVERGENCE + struct wlan_lmac_if_nan_tx_ops nan_tx_ops; +#endif + struct wlan_lmac_if_reg_tx_ops reg_ops; + struct wlan_lmac_if_dfs_tx_ops dfs_tx_ops; + +#ifdef CONVERGED_TDLS_ENABLE + struct wlan_lmac_if_tdls_tx_ops tdls_tx_ops; +#endif + +#ifdef WLAN_SUPPORT_FILS + struct wlan_lmac_if_fd_tx_ops fd_tx_ops; +#endif + struct wlan_lmac_if_mlme_tx_ops mops; + struct wlan_lmac_if_target_tx_ops target_tx_ops; + +#ifdef WLAN_OFFCHAN_TXRX_ENABLE + struct wlan_lmac_if_offchan_txrx_ops offchan_txrx_ops; +#endif + +#ifdef DIRECT_BUF_RX_ENABLE + struct wlan_lmac_if_direct_buf_rx_tx_ops dbr_tx_ops; +#endif + +#ifdef WLAN_SUPPORT_GREEN_AP + struct wlan_lmac_if_green_ap_tx_ops green_ap_tx_ops; +#endif + + struct wlan_lmac_if_ftm_tx_ops ftm_tx_ops; +}; + +/** + * struct wlan_lmac_if_mgmt_txrx_rx_ops - structure of rx function + * pointers for mgmt txrx component + * @mgmt_tx_completion_handler: function pointer to give tx completions + * to mgmt txrx comp. + * @mgmt_rx_frame_handler: function pointer to give rx frame to mgmt txrx comp. + * @mgmt_txrx_get_nbuf_from_desc_id: function pointer to get nbuf from desc id + * @mgmt_txrx_get_peer_from_desc_id: function pointer to get peer from desc id + * @mgmt_txrx_get_vdev_id_from_desc_id: function pointer to get vdev id from + * desc id + */ +struct wlan_lmac_if_mgmt_txrx_rx_ops { + QDF_STATUS (*mgmt_tx_completion_handler)( + struct wlan_objmgr_pdev *pdev, + uint32_t desc_id, uint32_t status, + void *tx_compl_params); + QDF_STATUS (*mgmt_rx_frame_handler)( + struct wlan_objmgr_psoc *psoc, + qdf_nbuf_t buf, + struct mgmt_rx_event_params *mgmt_rx_params); + qdf_nbuf_t (*mgmt_txrx_get_nbuf_from_desc_id)( + struct wlan_objmgr_pdev *pdev, + uint32_t desc_id); + struct wlan_objmgr_peer * (*mgmt_txrx_get_peer_from_desc_id)( + struct wlan_objmgr_pdev *pdev, uint32_t desc_id); + uint8_t (*mgmt_txrx_get_vdev_id_from_desc_id)( + struct wlan_objmgr_pdev *pdev, + uint32_t desc_id); + uint32_t (*mgmt_txrx_get_free_desc_pool_count)( + struct wlan_objmgr_pdev *pdev); +}; + +struct wlan_lmac_if_reg_rx_ops { + QDF_STATUS (*master_list_handler)(struct cur_regulatory_info + *reg_info); + QDF_STATUS (*reg_11d_new_cc_handler)(struct wlan_objmgr_psoc *psoc, + struct reg_11d_new_country *reg_11d_new_cc); + QDF_STATUS (*reg_set_regdb_offloaded)(struct wlan_objmgr_psoc *psoc, + bool val); + QDF_STATUS (*reg_set_11d_offloaded)(struct wlan_objmgr_psoc *psoc, + bool val); + QDF_STATUS (*get_dfs_region)(struct wlan_objmgr_pdev *pdev, + enum dfs_reg *dfs_reg); + QDF_STATUS (*reg_ch_avoid_event_handler)(struct wlan_objmgr_psoc *psoc, + struct ch_avoid_ind_type *ch_avoid_ind); + uint32_t (*reg_freq_to_chan)(struct wlan_objmgr_pdev *pdev, + uint32_t freq); + QDF_STATUS (*reg_set_chan_144)(struct wlan_objmgr_pdev *pdev, + bool enable_ch_144); + bool (*reg_get_chan_144)(struct wlan_objmgr_pdev *pdev); + QDF_STATUS (*reg_program_default_cc)(struct wlan_objmgr_pdev *pdev, + uint16_t regdmn); + QDF_STATUS (*reg_get_current_regdomain)(struct wlan_objmgr_pdev *pdev, + struct cur_regdmn_info *cur_regdmn); +}; + +#ifdef CONVERGED_P2P_ENABLE + +/* forward declarations for p2p rx ops */ +struct p2p_noa_info; +struct p2p_lo_event; +struct p2p_set_mac_filter_evt; + +/** + * struct wlan_lmac_if_p2p_rx_ops - structure of rx function pointers + * for P2P component + * @lo_ev_handler: function pointer to give listen offload event + * @noa_ev_handler: function pointer to give noa event + * @add_mac_addr_filter_evt_handler: function pointer to process add mac addr + * rx filter event + */ +struct wlan_lmac_if_p2p_rx_ops { + QDF_STATUS (*lo_ev_handler)(struct wlan_objmgr_psoc *psoc, + struct p2p_lo_event *event_info); + QDF_STATUS (*noa_ev_handler)(struct wlan_objmgr_psoc *psoc, + struct p2p_noa_info *event_info); + QDF_STATUS (*add_mac_addr_filter_evt_handler)( + struct wlan_objmgr_psoc *psoc, + struct p2p_set_mac_filter_evt *event_info); + +}; +#endif + +#ifdef WLAN_ATF_ENABLE + +/** + * struct wlan_lmac_if_atf_rx_ops - ATF south bound rx function pointers + * @atf_get_atf_commit: Get ATF commit state + * @atf_get_fmcap: Get firmware capability for ATF + * @atf_get_obss_scale: Get OBSS scale + * @atf_get_mode: Get mode of ATF + * @atf_get_msdu_desc: Get msdu desc for ATF + * @atf_get_max_vdevs: Get maximum vdevs for a Radio + * @atf_get_peers: Get number of peers for a radio + * @atf_get_tput_based: Get throughput based enabled/disabled + * @atf_get_logging: Get logging enabled/disabled + * @atf_get_txbuf_share: Get TxBuff share state + * @atf_get_txbuf_max: Get TxBuff MAX number + * @atf_get_txbuf_min: Get TxBuff MIN number + * @atf_get_ssidgroup: Get ssid group state + * @atf_get_tx_block_count: Get tx block count + * @atf_get_peer_blk_txtraffic: Get peer tx traffic block state + * @atf_get_vdev_blk_txtraffic: Get vdev tx traffic block state + * @atf_get_sched: Get ATF scheduled policy + * @atf_get_tx_tokens: Get Tx tokens + * @atf_get_shadow_tx_tokens: Get shadow tx tokens + * @atf_get_tx_tokens_common: Get common tx tokens + * @atf_get_shadow_alloted_tx_tokens: Get shadow alloted tx tokens + * @atf_get_peer_stats: Get atf peer stats + * @atf_get_token_allocated: Get atf token allocated + * @atf_get_token_utilized: Get atf token utilized + * @atf_set_sched: Set ATF schedule policy + * @atf_set_fmcap: Set firmware capability for ATF + * @atf_set_obss_scale: Set ATF obss scale + * @atf_set_mode: Set ATF mode + * @atf_set_msdu_desc: Set msdu desc + * @atf_set_max_vdevs: Set maximum vdevs number + * @atf_set_peers: Set peers number + * @atf_set_peer_stats: Set peer stats + * @atf_set_vdev_blk_txtraffic: Set Block/unblock vdev tx traffic + * @atf_set_peer_blk_txtraffic: Set Block/unblock peer tx traffic + * @atf_set_tx_block_count: Set tx block count + * @atf_set_token_allocated: Set atf token allocated + * @atf_set_token_utilized: Set atf token utilized + */ +struct wlan_lmac_if_atf_rx_ops { + uint8_t (*atf_get_atf_commit)(struct wlan_objmgr_pdev *pdev); + uint32_t (*atf_get_fmcap)(struct wlan_objmgr_psoc *psoc); + uint32_t (*atf_get_obss_scale)(struct wlan_objmgr_pdev *pdev); + uint32_t (*atf_get_mode)(struct wlan_objmgr_psoc *psoc); + uint32_t (*atf_get_msdu_desc)(struct wlan_objmgr_psoc *psoc); + uint32_t (*atf_get_max_vdevs)(struct wlan_objmgr_psoc *psoc); + uint32_t (*atf_get_peers)(struct wlan_objmgr_psoc *psoc); + uint32_t (*atf_get_tput_based)(struct wlan_objmgr_pdev *pdev); + uint32_t (*atf_get_logging)(struct wlan_objmgr_pdev *pdev); + uint8_t (*atf_get_txbuf_share)(struct wlan_objmgr_pdev *pdev); + uint16_t (*atf_get_txbuf_max)(struct wlan_objmgr_pdev *pdev); + uint16_t (*atf_get_txbuf_min)(struct wlan_objmgr_pdev *pdev); + uint32_t (*atf_get_ssidgroup)(struct wlan_objmgr_pdev *pdev); + uint32_t (*atf_get_tx_block_count)(struct wlan_objmgr_vdev *vdev); + uint8_t (*atf_get_peer_blk_txtraffic)(struct wlan_objmgr_peer *peer); + uint8_t (*atf_get_vdev_blk_txtraffic)(struct wlan_objmgr_vdev *vdev); + uint32_t (*atf_get_sched)(struct wlan_objmgr_pdev *pdev); + uint32_t (*atf_get_tx_tokens)(struct wlan_objmgr_peer *peer); + uint32_t (*atf_get_shadow_tx_tokens)(struct wlan_objmgr_peer *peer); + uint32_t (*atf_get_txtokens_common)(struct wlan_objmgr_pdev *pdev); + uint32_t (*atf_get_shadow_alloted_tx_tokens)( + struct wlan_objmgr_pdev *pdev); + void (*atf_get_peer_stats)(struct wlan_objmgr_peer *peer, + struct atf_stats *stats); + uint16_t (*atf_get_token_allocated)(struct wlan_objmgr_peer *peer); + uint16_t (*atf_get_token_utilized)(struct wlan_objmgr_peer *peer); + + void (*atf_set_sched)(struct wlan_objmgr_pdev *pdev, uint32_t value); + void (*atf_set_fmcap)(struct wlan_objmgr_psoc *psoc, uint32_t value); + void (*atf_set_obss_scale)(struct wlan_objmgr_pdev *pdev, + uint32_t value); + void (*atf_set_mode)(struct wlan_objmgr_psoc *psoc, uint8_t value); + void (*atf_set_msdu_desc)(struct wlan_objmgr_psoc *psoc, + uint32_t value); + void (*atf_set_max_vdevs)(struct wlan_objmgr_psoc *psoc, + uint32_t value); + void (*atf_set_peers)(struct wlan_objmgr_psoc *psoc, uint32_t value); + void (*atf_set_peer_stats)(struct wlan_objmgr_peer *peer, + struct atf_stats *stats); + void (*atf_set_vdev_blk_txtraffic)(struct wlan_objmgr_vdev *vdev, + uint8_t value); + void (*atf_set_peer_blk_txtraffic)(struct wlan_objmgr_peer *peer, + uint8_t value); + void (*atf_set_tx_block_count)(struct wlan_objmgr_vdev *vdev, + uint32_t count); + void (*atf_set_token_allocated)(struct wlan_objmgr_peer *peer, + uint16_t value); + void (*atf_set_token_utilized)(struct wlan_objmgr_peer *peer, + uint16_t value); +}; +#endif + +#ifdef WLAN_SUPPORT_FILS +/** + * struct wlan_lmac_if_fd_rx_ops - FILS Discovery specific Rx function pointers + * @fd_is_fils_enable: FILS enabled or not + * @fd_alloc: Allocate FD buffer + * @fd_stop: Stop and free deferred FD buffer + * @fd_free: Free FD frame buffer + * @fd_get_valid_fd_period: Get valid FD period + * @fd_swfda_handler: SWFDA event handler + */ +struct wlan_lmac_if_fd_rx_ops { + uint8_t (*fd_is_fils_enable)(struct wlan_objmgr_vdev *vdev); + void (*fd_alloc)(struct wlan_objmgr_vdev *vdev); + void (*fd_stop)(struct wlan_objmgr_vdev *vdev); + void (*fd_free)(struct wlan_objmgr_vdev *vdev); + uint32_t (*fd_get_valid_fd_period)(struct wlan_objmgr_vdev *vdev, + uint8_t *is_modified); + QDF_STATUS (*fd_swfda_handler)(struct wlan_objmgr_vdev *vdev); +}; +#endif + +#ifdef WLAN_SA_API_ENABLE + +/** + * struct wlan_lmac_if_sa_api_rx_ops - SA API south bound rx function pointers + */ +struct wlan_lmac_if_sa_api_rx_ops { + uint32_t (*sa_api_get_sa_supported)(struct wlan_objmgr_psoc *psoc); + uint32_t (*sa_api_get_validate_sw)(struct wlan_objmgr_psoc *psoc); + void (*sa_api_enable_sa)(struct wlan_objmgr_psoc *psoc, uint32_t value); + uint32_t (*sa_api_get_sa_enable)(struct wlan_objmgr_psoc *psoc); + void (*sa_api_peer_assoc_hanldler)(struct wlan_objmgr_pdev *pdev, + struct wlan_objmgr_peer *peer, struct sa_rate_cap *); + uint32_t (*sa_api_update_tx_feedback)(struct wlan_objmgr_pdev *pdev, + struct wlan_objmgr_peer *peer, + struct sa_tx_feedback *feedback); + uint32_t (*sa_api_update_rx_feedback)(struct wlan_objmgr_pdev *pdev, + struct wlan_objmgr_peer *peer, + struct sa_rx_feedback *feedback); + uint32_t (*sa_api_ucfg_set_param)(struct wlan_objmgr_pdev *pdev, + char *val); + uint32_t (*sa_api_ucfg_get_param)(struct wlan_objmgr_pdev *pdev, + char *val); + uint32_t (*sa_api_is_tx_feedback_enabled) + (struct wlan_objmgr_pdev *pdev); + uint32_t (*sa_api_is_rx_feedback_enabled) + (struct wlan_objmgr_pdev *pdev); + uint32_t (*sa_api_convert_rate_2g)(uint32_t rate); + uint32_t (*sa_api_convert_rate_5g)(uint32_t rate); + uint32_t (*sa_api_get_sa_mode)(struct wlan_objmgr_pdev *pdev); + uint32_t (*sa_api_get_beacon_txantenna)(struct wlan_objmgr_pdev *pdev); + uint32_t (*sa_api_cwm_action)(struct wlan_objmgr_pdev *pdev); +}; +#endif + +#ifdef WLAN_CONV_SPECTRAL_ENABLE +/** + * struct wlan_lmac_if_sptrl_rx_ops - Spectral south bound Rx operations + * + * @sptrlro_get_target_handle: Get Spectral handle for target/LMAC private data + */ +struct wlan_lmac_if_sptrl_rx_ops { + void * (*sptrlro_get_target_handle)(struct wlan_objmgr_pdev *pdev); + int16_t (*sptrlro_vdev_get_chan_freq)(struct wlan_objmgr_vdev *vdev); + enum phy_ch_width (*sptrlro_vdev_get_ch_width)( + struct wlan_objmgr_vdev *vdev); + int (*sptrlro_vdev_get_sec20chan_freq_mhz)( + struct wlan_objmgr_vdev *vdev, + uint16_t *sec20chan_freq); +}; +#endif /* WLAN_CONV_SPECTRAL_ENABLE */ + +#ifdef WIFI_POS_CONVERGED +/** + * struct wlan_lmac_if_wifi_pos_rx_ops - structure of rx function + * pointers for wifi_pos component + * @oem_rsp_event_rx: callback for WMI_OEM_RESPONSE_EVENTID + */ +struct wlan_lmac_if_wifi_pos_rx_ops { + int (*oem_rsp_event_rx)(struct wlan_objmgr_psoc *psoc, + struct oem_data_rsp *oem_rsp); +}; +#endif + +#ifdef WLAN_FEATURE_NAN_CONVERGENCE +/** + * struct wlan_lmac_if_nan_rx_ops - structure of rx function + * pointers for nan component + * @oem_rsp_event_rx: callback for WMI_OEM_RESPONSE_EVENTID + */ +struct wlan_lmac_if_nan_rx_ops { + QDF_STATUS (*nan_event_rx)(struct scheduler_msg *event); +}; +#endif + +/** + * struct wlan_lmac_if_dfs_rx_ops - Function pointers to call dfs functions + * from lmac/offload. + * @dfs_get_radars: Calls init radar table functions. + * @dfs_process_phyerr: Process phyerr. + * @dfs_destroy_object: Destroys the DFS object. + * @dfs_radar_enable: Enables the radar. + * @dfs_is_radar_enabled: Check if the radar is enabled. + * @dfs_control: Used to process ioctls related to DFS. + * @dfs_is_precac_timer_running: Check whether precac timer is running. + * @dfs_find_vht80_chan_for_precac: Find VHT80 channel for precac. + * @dfs_cancel_precac_timer: Cancel the precac timer. + * @dfs_override_precac_timeout: Override the default precac timeout. + * @dfs_set_precac_enable: Set precac enable flag. + * @dfs_get_precac_enable: Get precac enable flag. + * @dfs_get_override_precac_timeout: Get precac timeout. + * @dfs_set_current_channel: Set DFS current channel. + * @dfs_process_radar_ind: Process radar found indication. + * @dfs_dfs_cac_complete_ind: Process cac complete indication. + * @dfs_stop: Clear dfs timers. + * @dfs_process_phyerr_filter_offload:Process radar event. + * @dfs_is_phyerr_filter_offload: Check whether phyerr filter is offload. + * @dfs_action_on_status: Trigger the action to be taken based on + * on host dfs status received from fw. + * @dfs_override_status_timeout: Override the value of host dfs status + * wait timeout. + * @dfs_get_override_status_timeout: Get the value of host dfs status wait + * timeout. + * @dfs_reset_spoof_test: Checks if radar detection is enabled. + */ +struct wlan_lmac_if_dfs_rx_ops { + QDF_STATUS (*dfs_get_radars)(struct wlan_objmgr_pdev *pdev); + QDF_STATUS (*dfs_process_phyerr)(struct wlan_objmgr_pdev *pdev, + void *buf, + uint16_t datalen, + uint8_t r_rssi, + uint8_t r_ext_rssi, + uint32_t r_rs_tstamp, + uint64_t r_fulltsf); + QDF_STATUS (*dfs_destroy_object)(struct wlan_objmgr_pdev *pdev); + QDF_STATUS (*dfs_radar_enable)(struct wlan_objmgr_pdev *pdev, + int no_cac, + uint32_t opmode); + void (*dfs_is_radar_enabled)(struct wlan_objmgr_pdev *pdev, + int *ignore_dfs); + QDF_STATUS (*dfs_control)(struct wlan_objmgr_pdev *pdev, + u_int id, + void *indata, + uint32_t insize, + void *outdata, + uint32_t *outsize, + int *error); + QDF_STATUS (*dfs_is_precac_timer_running)(struct wlan_objmgr_pdev *pdev, + bool *is_precac_timer_running + ); + QDF_STATUS + (*dfs_find_vht80_chan_for_precac)(struct wlan_objmgr_pdev *pdev, + uint32_t chan_mode, + uint8_t ch_freq_seg1, + uint32_t *cfreq1, + uint32_t *cfreq2, + uint32_t *phy_mode, + bool *dfs_set_cfreq2, + bool *set_agile); + QDF_STATUS (*dfs_cancel_precac_timer)(struct wlan_objmgr_pdev *pdev); + QDF_STATUS (*dfs_override_precac_timeout)( + struct wlan_objmgr_pdev *pdev, + int precac_timeout); + QDF_STATUS (*dfs_set_precac_enable)(struct wlan_objmgr_pdev *pdev, + uint32_t value); + QDF_STATUS (*dfs_get_precac_enable)(struct wlan_objmgr_pdev *pdev, + int *buff); + QDF_STATUS (*dfs_get_override_precac_timeout)( + struct wlan_objmgr_pdev *pdev, + int *precac_timeout); + QDF_STATUS (*dfs_set_current_channel)(struct wlan_objmgr_pdev *pdev, + uint16_t ic_freq, + uint64_t ic_flags, + uint16_t ic_flagext, + uint8_t ic_ieee, + uint8_t ic_vhtop_ch_freq_seg1, + uint8_t ic_vhtop_ch_freq_seg2); +#ifdef DFS_COMPONENT_ENABLE + QDF_STATUS (*dfs_process_radar_ind)(struct wlan_objmgr_pdev *pdev, + struct radar_found_info *radar_found); + QDF_STATUS (*dfs_dfs_cac_complete_ind)(struct wlan_objmgr_pdev *pdev, + uint32_t vdev_id); +#endif + QDF_STATUS (*dfs_stop)(struct wlan_objmgr_pdev *pdev); + QDF_STATUS (*dfs_process_phyerr_filter_offload)( + struct wlan_objmgr_pdev *pdev, + struct radar_event_info *wlan_radar_info); + QDF_STATUS (*dfs_is_phyerr_filter_offload)( + struct wlan_objmgr_psoc *psoc, + bool *is_phyerr_filter_offload); + QDF_STATUS (*dfs_action_on_status)(struct wlan_objmgr_pdev *pdev, + u_int32_t *dfs_status_check); + QDF_STATUS (*dfs_override_status_timeout)( + struct wlan_objmgr_pdev *pdev, + int status_timeout); + QDF_STATUS (*dfs_get_override_status_timeout)( + struct wlan_objmgr_pdev *pdev, + int *status_timeout); + QDF_STATUS (*dfs_reset_spoof_test)(struct wlan_objmgr_pdev *pdev); +}; + +struct wlan_lmac_if_mlme_rx_ops { + + void (*wlan_mlme_scan_start)(struct wlan_objmgr_pdev *pdev); + void (*wlan_mlme_register_pm_event_handler)( + struct wlan_objmgr_pdev *pdev, + uint8_t vdev_id); + void (*wlan_mlme_unregister_pm_event_handler)( + struct wlan_objmgr_pdev *pdev, + uint8_t vdev_id); + QDF_STATUS (*wlan_mlme_register_vdev_event_handler)( + struct wlan_objmgr_pdev *pdev, + uint8_t vdev_id); + QDF_STATUS (*wlan_mlme_unregister_vdev_event_handler)( + struct wlan_objmgr_pdev *pdev, + uint8_t vdev_id); + int (*wlan_mlme_send_probe_request)(struct wlan_objmgr_pdev *pdev, + uint8_t vdev_id, + u_int8_t *destination, + u_int8_t *bssid, + u_int8_t *ssid, + u_int32_t ssidlen, + u_int8_t *ie, + size_t len); + int (*wlan_mlme_resmgr_request_bsschan)(struct wlan_objmgr_pdev *pdev); + int (*wlan_mlme_resmgr_request_offchan)(struct wlan_objmgr_pdev *pdev, + u_int32_t freq, + u_int32_t flags, + u_int32_t estimated_offchannel_time); + int (*wlan_mlme_resmgr_active)(struct wlan_objmgr_pdev *pdev); + int (*wlan_mlme_get_cw_inter_found)(struct wlan_objmgr_pdev *pdev); + int (*wlan_mlme_set_home_channel)(struct wlan_objmgr_pdev *pdev, + uint8_t vdev_id); + int (*wlan_mlme_set_channel)(struct wlan_objmgr_pdev *pdev, + u_int32_t freq, + u_int32_t flags); + void (*wlan_mlme_start_record_stats)(struct wlan_objmgr_pdev *pdev); + void (*wlan_mlme_end_record_stats)(struct wlan_objmgr_pdev *pdev); + int (*wlan_mlme_get_enh_rpt_ind)(struct wlan_objmgr_pdev *pdev); + int (*wlan_mlme_pause)(struct wlan_objmgr_pdev *pdev); + void (*wlan_mlme_unpause)(struct wlan_objmgr_pdev *pdev); + int (*wlan_mlme_vdev_pause_control)(struct wlan_objmgr_pdev *pdev, + uint8_t vdev_id); + int (*wlan_mlme_sta_power_pause)( + struct wlan_objmgr_pdev *pdev, + uint8_t vdev_id, + u_int32_t timeout); + int (*wlan_mlme_sta_power_unpause)(struct wlan_objmgr_pdev *pdev, + uint8_t vdev_id); + int (*wlan_mlme_set_vdev_sleep)(struct wlan_objmgr_pdev *pdev, + uint8_t vdev_id); + int (*wlan_mlme_set_vdev_wakeup)(struct wlan_objmgr_pdev *pdev, + uint8_t vdev_id); + qdf_time_t (*wlan_mlme_get_traffic_indication_timestamp)( + struct wlan_objmgr_pdev *pdev); + int (*wlan_mlme_get_acs_in_progress)(struct wlan_objmgr_pdev *pdev, + uint8_t vdev_id); + void (*wlan_mlme_end_scan)(struct wlan_objmgr_pdev *pdev); +}; + +#ifdef WLAN_SUPPORT_GREEN_AP +struct wlan_lmac_if_green_ap_rx_ops { + bool (*is_ps_enabled)(struct wlan_objmgr_pdev *pdev); + bool (*is_dbg_print_enabled)(struct wlan_objmgr_pdev *pdev); + QDF_STATUS (*ps_get)(struct wlan_objmgr_pdev *pdev, uint8_t *value); + QDF_STATUS (*ps_set)(struct wlan_objmgr_pdev *pdev, uint8_t value); + void (*suspend_handle)(struct wlan_objmgr_pdev *pdev); +}; +#endif + +/** + * struct wlan_lmac_if_rx_ops - south bound rx function pointers + * @mgmt_txrx_tx_ops: mgmt txrx rx ops + * @scan: scan rx ops + * @dfs_rx_ops: dfs rx ops. + * @cp_stats_rx_ops: cp stats rx ops + * + * Callback function tabled to be registered with lmac/wmi. + * lmac will use the functional table to send events/frames to umac + */ +struct wlan_lmac_if_rx_ops { + /* Components to declare function pointers required by the module + * in component specific structure. + * The component specific ops structure can be declared in this file + * only + */ + struct wlan_lmac_if_mgmt_txrx_rx_ops mgmt_txrx_rx_ops; + struct wlan_lmac_if_scan_rx_ops scan; +#ifdef CONVERGED_P2P_ENABLE + struct wlan_lmac_if_p2p_rx_ops p2p; +#endif + +#ifdef WLAN_ATF_ENABLE + struct wlan_lmac_if_atf_rx_ops atf_rx_ops; +#endif +#ifdef QCA_SUPPORT_CP_STATS + struct wlan_lmac_if_cp_stats_rx_ops cp_stats_rx_ops; +#endif +#ifdef WLAN_SA_API_ENABLE + struct wlan_lmac_if_sa_api_rx_ops sa_api_rx_ops; +#endif + +#ifdef WLAN_CONV_SPECTRAL_ENABLE + struct wlan_lmac_if_sptrl_rx_ops sptrl_rx_ops; +#endif + +#ifdef WLAN_CONV_CRYPTO_SUPPORTED + struct wlan_lmac_if_crypto_rx_ops crypto_rx_ops; +#endif +#ifdef WIFI_POS_CONVERGED + struct wlan_lmac_if_wifi_pos_rx_ops wifi_pos_rx_ops; +#endif +#ifdef WLAN_FEATURE_NAN_CONVERGENCE + struct wlan_lmac_if_nan_rx_ops nan_rx_ops; +#endif + struct wlan_lmac_if_reg_rx_ops reg_rx_ops; + struct wlan_lmac_if_dfs_rx_ops dfs_rx_ops; +#ifdef CONVERGED_TDLS_ENABLE + struct wlan_lmac_if_tdls_rx_ops tdls_rx_ops; +#endif + +#ifdef WLAN_SUPPORT_FILS + struct wlan_lmac_if_fd_rx_ops fd_rx_ops; +#endif + + struct wlan_lmac_if_mlme_rx_ops mops; + +#ifdef WLAN_SUPPORT_GREEN_AP + struct wlan_lmac_if_green_ap_rx_ops green_ap_rx_ops; +#endif + + struct wlan_lmac_if_ftm_rx_ops ftm_rx_ops; +}; + +/* Function pointer to call legacy tx_ops registration in OL/WMA. + */ +extern QDF_STATUS (*wlan_lmac_if_umac_tx_ops_register) + (struct wlan_lmac_if_tx_ops *tx_ops); +#endif /* _WLAN_LMAC_IF_DEF_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/global_umac_dispatcher/lmac_if/src/wlan_lmac_if.c b/drivers/staging/qca-wifi-host-cmn/umac/global_umac_dispatcher/lmac_if/src/wlan_lmac_if.c new file mode 100644 index 0000000000000000000000000000000000000000..c2e03de7724de4845b79bb46e717fac858d38b89 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/global_umac_dispatcher/lmac_if/src/wlan_lmac_if.c @@ -0,0 +1,499 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "qdf_mem.h" +#include +#include "wlan_lmac_if_def.h" +#include "wlan_lmac_if_api.h" +#include "wlan_mgmt_txrx_tgt_api.h" +#include "wlan_scan_tgt_api.h" +#include +#include +#ifdef WLAN_ATF_ENABLE +#include "wlan_atf_tgt_api.h" +#endif +#ifdef WLAN_SA_API_ENABLE +#include "wlan_sa_api_tgt_api.h" +#endif +#ifdef WIFI_POS_CONVERGED +#include "target_if_wifi_pos.h" +#endif /* WIFI_POS_CONVERGED */ +#ifdef WLAN_FEATURE_NAN_CONVERGENCE +#include "target_if_nan.h" +#endif /* WLAN_FEATURE_NAN_CONVERGENCE */ +#include "wlan_reg_tgt_api.h" +#ifdef CONVERGED_P2P_ENABLE +#include "wlan_p2p_tgt_api.h" +#endif +#ifdef CONVERGED_TDLS_ENABLE +#include "wlan_tdls_tgt_api.h" +#endif + +#ifdef WLAN_CONV_CRYPTO_SUPPORTED +#include "wlan_crypto_global_api.h" +#endif +#ifdef DFS_COMPONENT_ENABLE +#include +#include +#endif + +#ifdef WLAN_SUPPORT_GREEN_AP +#include +#include +#endif +#include + +#ifdef WLAN_SUPPORT_FILS +#include +#endif + +#ifdef QCA_SUPPORT_CP_STATS +#include +#endif /* QCA_SUPPORT_CP_STATS */ + +/* Function pointer for OL/WMA specific UMAC tx_ops + * registration. + */ +QDF_STATUS (*wlan_lmac_if_umac_tx_ops_register) + (struct wlan_lmac_if_tx_ops *tx_ops); +qdf_export_symbol(wlan_lmac_if_umac_tx_ops_register); + +#ifdef QCA_SUPPORT_CP_STATS +/** + * wlan_lmac_if_cp_stats_rx_ops_register() - API to register cp stats Rx Ops + * @rx_ops: pointer to lmac rx ops + * + * This API will be used to register function pointers for FW events + * + * Return: void + */ +static void +wlan_lmac_if_cp_stats_rx_ops_register(struct wlan_lmac_if_rx_ops *rx_ops) +{ + tgt_cp_stats_register_rx_ops(rx_ops); +} +#else +static void +wlan_lmac_if_cp_stats_rx_ops_register(struct wlan_lmac_if_rx_ops *rx_ops) +{ +} +#endif /* QCA_SUPPORT_CP_STATS */ + +#ifdef WLAN_ATF_ENABLE +/** + * wlan_lmac_if_atf_rx_ops_register() - Function to register ATF RX ops. + */ +static void +wlan_lmac_if_atf_rx_ops_register(struct wlan_lmac_if_rx_ops *rx_ops) +{ + struct wlan_lmac_if_atf_rx_ops *atf_rx_ops = &rx_ops->atf_rx_ops; + + /* ATF rx ops */ + atf_rx_ops->atf_get_atf_commit = tgt_atf_get_atf_commit; + atf_rx_ops->atf_get_fmcap = tgt_atf_get_fmcap; + atf_rx_ops->atf_get_obss_scale = tgt_atf_get_obss_scale; + atf_rx_ops->atf_get_mode = tgt_atf_get_mode; + atf_rx_ops->atf_get_msdu_desc = tgt_atf_get_msdu_desc; + atf_rx_ops->atf_get_max_vdevs = tgt_atf_get_max_vdevs; + atf_rx_ops->atf_get_peers = tgt_atf_get_peers; + atf_rx_ops->atf_get_tput_based = tgt_atf_get_tput_based; + atf_rx_ops->atf_get_logging = tgt_atf_get_logging; + atf_rx_ops->atf_get_txbuf_share = tgt_atf_get_txbuf_share; + atf_rx_ops->atf_get_txbuf_max = tgt_atf_get_txbuf_max; + atf_rx_ops->atf_get_txbuf_min = tgt_atf_get_txbuf_min; + atf_rx_ops->atf_get_ssidgroup = tgt_atf_get_ssidgroup; + atf_rx_ops->atf_get_tx_block_count = tgt_atf_get_tx_block_count; + atf_rx_ops->atf_get_peer_blk_txtraffic = tgt_atf_get_peer_blk_txtraffic; + atf_rx_ops->atf_get_vdev_blk_txtraffic = tgt_atf_get_vdev_blk_txtraffic; + atf_rx_ops->atf_get_sched = tgt_atf_get_sched; + atf_rx_ops->atf_get_tx_tokens = tgt_atf_get_tx_tokens; + atf_rx_ops->atf_get_shadow_tx_tokens = tgt_atf_get_shadow_tx_tokens; + atf_rx_ops->atf_get_shadow_alloted_tx_tokens = + tgt_atf_get_shadow_alloted_tx_tokens; + atf_rx_ops->atf_get_txtokens_common = tgt_atf_get_txtokens_common; + atf_rx_ops->atf_get_peer_stats = tgt_atf_get_peer_stats; + atf_rx_ops->atf_get_token_allocated = tgt_atf_get_token_allocated; + atf_rx_ops->atf_get_token_utilized = tgt_atf_get_token_utilized; + + atf_rx_ops->atf_set_sched = tgt_atf_set_sched; + atf_rx_ops->atf_set_fmcap = tgt_atf_set_fmcap; + atf_rx_ops->atf_set_obss_scale = tgt_atf_set_obss_scale; + atf_rx_ops->atf_set_mode = tgt_atf_set_mode; + atf_rx_ops->atf_set_msdu_desc = tgt_atf_set_msdu_desc; + atf_rx_ops->atf_set_max_vdevs = tgt_atf_set_max_vdevs; + atf_rx_ops->atf_set_peers = tgt_atf_set_peers; + atf_rx_ops->atf_set_peer_stats = tgt_atf_set_peer_stats; + atf_rx_ops->atf_set_vdev_blk_txtraffic = tgt_atf_set_vdev_blk_txtraffic; + atf_rx_ops->atf_set_peer_blk_txtraffic = tgt_atf_set_peer_blk_txtraffic; + atf_rx_ops->atf_set_tx_block_count = tgt_atf_set_tx_block_count; + atf_rx_ops->atf_set_token_allocated = tgt_atf_set_token_allocated; + atf_rx_ops->atf_set_token_utilized = tgt_atf_set_token_utilized; +} +#else +static void +wlan_lmac_if_atf_rx_ops_register(struct wlan_lmac_if_rx_ops *rx_ops) +{ +} +#endif + +#ifdef WLAN_SUPPORT_FILS +static void +wlan_lmac_if_fd_rx_ops_register(struct wlan_lmac_if_rx_ops *rx_ops) +{ + struct wlan_lmac_if_fd_rx_ops *fd_rx_ops = &rx_ops->fd_rx_ops; + + fd_rx_ops->fd_is_fils_enable = tgt_fd_is_fils_enable; + fd_rx_ops->fd_alloc = tgt_fd_alloc; + fd_rx_ops->fd_stop = tgt_fd_stop; + fd_rx_ops->fd_free = tgt_fd_free; + fd_rx_ops->fd_get_valid_fd_period = tgt_fd_get_valid_fd_period; + fd_rx_ops->fd_swfda_handler = tgt_fd_swfda_handler; +} +#else +static void +wlan_lmac_if_fd_rx_ops_register(struct wlan_lmac_if_rx_ops *rx_ops) +{ +} +#endif + +#ifdef WLAN_SA_API_ENABLE +/** + * wlan_lmac_if_sa_api_rx_ops_register() - Function to register SA_API RX ops. + */ +static void +wlan_lmac_if_sa_api_rx_ops_register(struct wlan_lmac_if_rx_ops *rx_ops) +{ + struct wlan_lmac_if_sa_api_rx_ops *sa_api_rx_ops = &rx_ops->sa_api_rx_ops; + + /* SA API rx ops */ + sa_api_rx_ops->sa_api_get_sa_supported = tgt_sa_api_get_sa_supported; + sa_api_rx_ops->sa_api_get_validate_sw = tgt_sa_api_get_validate_sw; + sa_api_rx_ops->sa_api_enable_sa = tgt_sa_api_enable_sa; + sa_api_rx_ops->sa_api_get_sa_enable = tgt_sa_api_get_sa_enable; + + sa_api_rx_ops->sa_api_peer_assoc_hanldler = tgt_sa_api_peer_assoc_hanldler; + sa_api_rx_ops->sa_api_update_tx_feedback = tgt_sa_api_update_tx_feedback; + sa_api_rx_ops->sa_api_update_rx_feedback = tgt_sa_api_update_rx_feedback; + + sa_api_rx_ops->sa_api_ucfg_set_param = tgt_sa_api_ucfg_set_param; + sa_api_rx_ops->sa_api_ucfg_get_param = tgt_sa_api_ucfg_get_param; + + sa_api_rx_ops->sa_api_is_tx_feedback_enabled = tgt_sa_api_is_tx_feedback_enabled; + sa_api_rx_ops->sa_api_is_rx_feedback_enabled = tgt_sa_api_is_rx_feedback_enabled; + + sa_api_rx_ops->sa_api_convert_rate_2g = tgt_sa_api_convert_rate_2g; + sa_api_rx_ops->sa_api_convert_rate_5g = tgt_sa_api_convert_rate_5g; + sa_api_rx_ops->sa_api_get_sa_mode = tgt_sa_api_get_sa_mode; + + sa_api_rx_ops->sa_api_get_beacon_txantenna = tgt_sa_api_get_beacon_txantenna; + sa_api_rx_ops->sa_api_cwm_action = tgt_sa_api_cwm_action; +} +#else +static void +wlan_lmac_if_sa_api_rx_ops_register(struct wlan_lmac_if_rx_ops *rx_ops) +{ +} +#endif + + +#ifdef WLAN_CONV_CRYPTO_SUPPORTED +static void +wlan_lmac_if_crypto_rx_ops_register(struct wlan_lmac_if_rx_ops *rx_ops) +{ + wlan_crypto_register_crypto_rx_ops(&rx_ops->crypto_rx_ops); +} +#else +static void +wlan_lmac_if_crypto_rx_ops_register(struct wlan_lmac_if_rx_ops *rx_ops) +{ +} +#endif + +#ifdef WIFI_POS_CONVERGED +static void wlan_lmac_if_umac_rx_ops_register_wifi_pos( + struct wlan_lmac_if_rx_ops *rx_ops) +{ + target_if_wifi_pos_register_rx_ops(rx_ops); +} +#else +static void wlan_lmac_if_umac_rx_ops_register_wifi_pos( + struct wlan_lmac_if_rx_ops *rx_ops) +{ +} +#endif /* WIFI_POS_CONVERGED */ + +#ifdef WLAN_FEATURE_NAN_CONVERGENCE +static void wlan_lmac_if_register_nan_rx_ops(struct wlan_lmac_if_rx_ops *rx_ops) +{ + target_if_nan_register_rx_ops(rx_ops); +} +#else +static void wlan_lmac_if_register_nan_rx_ops(struct wlan_lmac_if_rx_ops *rx_ops) +{ +} +#endif /* WLAN_FEATURE_NAN_CONVERGENCE */ + +static void wlan_lmac_if_umac_reg_rx_ops_register( + struct wlan_lmac_if_rx_ops *rx_ops) +{ + rx_ops->reg_rx_ops.master_list_handler = + tgt_reg_process_master_chan_list; + + rx_ops->reg_rx_ops.reg_11d_new_cc_handler = + tgt_reg_process_11d_new_country; + + rx_ops->reg_rx_ops.reg_set_regdb_offloaded = + tgt_reg_set_regdb_offloaded; + + rx_ops->reg_rx_ops.reg_set_11d_offloaded = + tgt_reg_set_11d_offloaded; + + rx_ops->reg_rx_ops.get_dfs_region = + wlan_reg_get_dfs_region; + + rx_ops->reg_rx_ops.reg_ch_avoid_event_handler = + tgt_reg_process_ch_avoid_event; + + rx_ops->reg_rx_ops.reg_freq_to_chan = + wlan_reg_freq_to_chan; + + rx_ops->reg_rx_ops.reg_set_chan_144 = + ucfg_reg_modify_chan_144; + + rx_ops->reg_rx_ops.reg_get_chan_144 = + ucfg_reg_get_en_chan_144; + + rx_ops->reg_rx_ops.reg_program_default_cc = + ucfg_reg_program_default_cc; + + rx_ops->reg_rx_ops.reg_get_current_regdomain = + wlan_reg_get_curr_regdomain; +} + +#ifdef CONVERGED_P2P_ENABLE +static void wlan_lmac_if_umac_rx_ops_register_p2p( + struct wlan_lmac_if_rx_ops *rx_ops) +{ + rx_ops->p2p.lo_ev_handler = tgt_p2p_lo_event_cb; + rx_ops->p2p.noa_ev_handler = tgt_p2p_noa_event_cb; + rx_ops->p2p.add_mac_addr_filter_evt_handler = + tgt_p2p_add_mac_addr_status_event_cb; +} +#else +static void wlan_lmac_if_umac_rx_ops_register_p2p( + struct wlan_lmac_if_rx_ops *rx_ops) +{ +} +#endif + +#ifdef DFS_COMPONENT_ENABLE +static QDF_STATUS +wlan_lmac_if_umac_dfs_rx_ops_register(struct wlan_lmac_if_rx_ops *rx_ops) +{ + struct wlan_lmac_if_dfs_rx_ops *dfs_rx_ops; + + dfs_rx_ops = &rx_ops->dfs_rx_ops; + + dfs_rx_ops->dfs_get_radars = tgt_dfs_get_radars; + dfs_rx_ops->dfs_process_phyerr = tgt_dfs_process_phyerr; + dfs_rx_ops->dfs_destroy_object = tgt_dfs_destroy_object; + dfs_rx_ops->dfs_radar_enable = tgt_dfs_radar_enable; + dfs_rx_ops->dfs_is_radar_enabled = tgt_dfs_is_radar_enabled; + dfs_rx_ops->dfs_control = tgt_dfs_control; + dfs_rx_ops->dfs_is_precac_timer_running = + tgt_dfs_is_precac_timer_running; + dfs_rx_ops->dfs_find_vht80_chan_for_precac = + tgt_dfs_find_vht80_chan_for_precac; + dfs_rx_ops->dfs_cancel_precac_timer = utils_dfs_cancel_precac_timer; + dfs_rx_ops->dfs_override_precac_timeout = + ucfg_dfs_override_precac_timeout; + dfs_rx_ops->dfs_set_precac_enable = ucfg_dfs_set_precac_enable; + dfs_rx_ops->dfs_get_precac_enable = ucfg_dfs_get_precac_enable; + dfs_rx_ops->dfs_get_override_precac_timeout = + ucfg_dfs_get_override_precac_timeout; + dfs_rx_ops->dfs_set_current_channel = tgt_dfs_set_current_channel; + dfs_rx_ops->dfs_process_radar_ind = tgt_dfs_process_radar_ind; + dfs_rx_ops->dfs_dfs_cac_complete_ind = tgt_dfs_cac_complete; + dfs_rx_ops->dfs_stop = tgt_dfs_stop; + dfs_rx_ops->dfs_process_phyerr_filter_offload = + tgt_dfs_process_phyerr_filter_offload; + dfs_rx_ops->dfs_is_phyerr_filter_offload = + tgt_dfs_is_phyerr_filter_offload; + dfs_rx_ops->dfs_action_on_status = tgt_dfs_action_on_status_from_fw; + dfs_rx_ops->dfs_override_status_timeout = + ucfg_dfs_set_override_status_timeout; + dfs_rx_ops->dfs_get_override_status_timeout = + ucfg_dfs_get_override_status_timeout; + dfs_rx_ops->dfs_reset_spoof_test = + tgt_dfs_reset_spoof_test; + + return QDF_STATUS_SUCCESS; +} +#else +static QDF_STATUS +wlan_lmac_if_umac_dfs_rx_ops_register(struct wlan_lmac_if_rx_ops *rx_ops) +{ + return QDF_STATUS_SUCCESS; +} +#endif + +#ifdef CONVERGED_TDLS_ENABLE +static QDF_STATUS +wlan_lmac_if_umac_tdls_rx_ops_register(struct wlan_lmac_if_rx_ops *rx_ops) +{ + rx_ops->tdls_rx_ops.tdls_ev_handler = tgt_tdls_event_handler; + + return QDF_STATUS_SUCCESS; +} +#else +static QDF_STATUS +wlan_lmac_if_umac_tdls_rx_ops_register(struct wlan_lmac_if_rx_ops *rx_ops) +{ + return QDF_STATUS_SUCCESS; +} +#endif + +#ifdef WLAN_SUPPORT_GREEN_AP +static QDF_STATUS +wlan_lmac_if_umac_green_ap_rx_ops_register(struct wlan_lmac_if_rx_ops *rx_ops) +{ + rx_ops->green_ap_rx_ops.is_ps_enabled = wlan_green_ap_is_ps_enabled; + rx_ops->green_ap_rx_ops.is_dbg_print_enabled = + ucfg_green_ap_get_debug_prints; + rx_ops->green_ap_rx_ops.ps_set = ucfg_green_ap_set_ps_config; + rx_ops->green_ap_rx_ops.ps_get = ucfg_green_ap_get_ps_config; + rx_ops->green_ap_rx_ops.suspend_handle = wlan_green_ap_suspend_handle; + + return QDF_STATUS_SUCCESS; +} +#else +static QDF_STATUS +wlan_lmac_if_umac_green_ap_rx_ops_register(struct wlan_lmac_if_rx_ops *rx_ops) +{ + return QDF_STATUS_SUCCESS; +} +#endif + +static QDF_STATUS +wlan_lmac_if_umac_ftm_rx_ops_register(struct wlan_lmac_if_rx_ops *rx_ops) +{ + struct wlan_lmac_if_ftm_rx_ops *ftm_rx_ops; + + ftm_rx_ops = &rx_ops->ftm_rx_ops; + + ftm_rx_ops->ftm_ev_handler = wlan_ftm_process_utf_event; + + return QDF_STATUS_SUCCESS; +} + +/** + * wlan_lmac_if_umac_rx_ops_register() - UMAC rx handler register + * @rx_ops: Pointer to rx_ops structure to be populated + * + * Register umac RX callabacks which will be called by DA/OL/WMA/WMI + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS +wlan_lmac_if_umac_rx_ops_register(struct wlan_lmac_if_rx_ops *rx_ops) +{ + /* Component specific public api's to be called to register + * respective callbacks + * Ex: rx_ops->fp = function; + */ + struct wlan_lmac_if_mgmt_txrx_rx_ops *mgmt_txrx_rx_ops; + + if (!rx_ops) { + qdf_print("%s: lmac if rx ops pointer is NULL", __func__); + return QDF_STATUS_E_INVAL; + } + + /* mgmt txrx rx ops */ + mgmt_txrx_rx_ops = &rx_ops->mgmt_txrx_rx_ops; + + mgmt_txrx_rx_ops->mgmt_tx_completion_handler = + tgt_mgmt_txrx_tx_completion_handler; + mgmt_txrx_rx_ops->mgmt_rx_frame_handler = + tgt_mgmt_txrx_rx_frame_handler; + mgmt_txrx_rx_ops->mgmt_txrx_get_nbuf_from_desc_id = + tgt_mgmt_txrx_get_nbuf_from_desc_id; + mgmt_txrx_rx_ops->mgmt_txrx_get_peer_from_desc_id = + tgt_mgmt_txrx_get_peer_from_desc_id; + mgmt_txrx_rx_ops->mgmt_txrx_get_vdev_id_from_desc_id = + tgt_mgmt_txrx_get_vdev_id_from_desc_id; + mgmt_txrx_rx_ops->mgmt_txrx_get_free_desc_pool_count = + tgt_mgmt_txrx_get_free_desc_pool_count; + + /* scan rx ops */ + rx_ops->scan.scan_ev_handler = tgt_scan_event_handler; + rx_ops->scan.scan_set_max_active_scans = tgt_scan_set_max_active_scans; + + wlan_lmac_if_atf_rx_ops_register(rx_ops); + + wlan_lmac_if_cp_stats_rx_ops_register(rx_ops); + + wlan_lmac_if_sa_api_rx_ops_register(rx_ops); + + wlan_lmac_if_crypto_rx_ops_register(rx_ops); + /* wifi_pos rx ops */ + wlan_lmac_if_umac_rx_ops_register_wifi_pos(rx_ops); + + /* tdls rx ops */ + wlan_lmac_if_umac_tdls_rx_ops_register(rx_ops); + + wlan_lmac_if_register_nan_rx_ops(rx_ops); + + wlan_lmac_if_umac_reg_rx_ops_register(rx_ops); + + /* p2p rx ops */ + wlan_lmac_if_umac_rx_ops_register_p2p(rx_ops); + + /* DFS rx_ops */ + wlan_lmac_if_umac_dfs_rx_ops_register(rx_ops); + + wlan_lmac_if_umac_green_ap_rx_ops_register(rx_ops); + + /* FTM rx_ops */ + wlan_lmac_if_umac_ftm_rx_ops_register(rx_ops); + + /* FILS Discovery */ + wlan_lmac_if_fd_rx_ops_register(rx_ops); + + return QDF_STATUS_SUCCESS; +} + +/** + * wlan_lmac_if_set_umac_txops_registration_cb() - tx registration + * callback assignment + * @dev_type: Dev type can be either Direct attach or Offload + * @handler: handler to be called for LMAC tx ops registration + * + * API to assign appropriate tx registration callback handler based on the + * device type(Offload or Direct attach) + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS wlan_lmac_if_set_umac_txops_registration_cb(QDF_STATUS (*handler) + (struct wlan_lmac_if_tx_ops *)) +{ + wlan_lmac_if_umac_tx_ops_register = handler; + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(wlan_lmac_if_set_umac_txops_registration_cb); + diff --git a/drivers/staging/qca-wifi-host-cmn/umac/green_ap/core/src/wlan_green_ap_main.c b/drivers/staging/qca-wifi-host-cmn/umac/green_ap/core/src/wlan_green_ap_main.c new file mode 100644 index 0000000000000000000000000000000000000000..aa0f7dec9bac5536e35f5e23c6aeb1e82ed1f055 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/green_ap/core/src/wlan_green_ap_main.c @@ -0,0 +1,365 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: This file contains main green ap function definitions + */ + +#include "wlan_green_ap_main_i.h" + +/* + * wlan_green_ap_ant_ps_reset() - Reset function + * @green_ap - green ap context + * + * Reset fiunction, so that Antenna Mask can come into effect. + * This applies for only few of the hardware chips + * + * Return: QDF_STATUS + */ +static QDF_STATUS wlan_green_ap_ant_ps_reset + (struct wlan_pdev_green_ap_ctx *green_ap_ctx) +{ + struct wlan_lmac_if_green_ap_tx_ops *green_ap_tx_ops; + struct wlan_objmgr_pdev *pdev; + + if (!green_ap_ctx) { + green_ap_err("green ap context obtained is NULL"); + return QDF_STATUS_E_FAILURE; + } + pdev = green_ap_ctx->pdev; + + green_ap_tx_ops = wlan_psoc_get_green_ap_tx_ops(green_ap_ctx); + if (!green_ap_tx_ops) { + green_ap_err("green ap tx ops obtained are NULL"); + return QDF_STATUS_E_FAILURE; + } + + if (!green_ap_tx_ops->reset_dev) + return QDF_STATUS_SUCCESS; + + /* + * Add protection against green AP enabling interrupts + * when not valid or no VAPs exist + */ + if (wlan_util_is_vap_active(pdev, WLAN_GREEN_AP_ID) == QDF_STATUS_SUCCESS) + green_ap_tx_ops->reset_dev(pdev); + else + green_ap_err("Green AP tried to enable IRQs when invalid"); + + return QDF_STATUS_SUCCESS; +} + +struct wlan_lmac_if_green_ap_tx_ops * +wlan_psoc_get_green_ap_tx_ops(struct wlan_pdev_green_ap_ctx *green_ap_ctx) +{ + struct wlan_objmgr_psoc *psoc; + struct wlan_objmgr_pdev *pdev = green_ap_ctx->pdev; + + if (!pdev) { + green_ap_err("pdev context obtained is NULL"); + return NULL; + } + + psoc = wlan_pdev_get_psoc(pdev); + if (!psoc) { + green_ap_err("pdev context obtained is NULL"); + return NULL; + } + + return &((psoc->soc_cb.tx_ops.green_ap_tx_ops)); +} + +bool wlan_is_egap_enabled(struct wlan_pdev_green_ap_ctx *green_ap_ctx) +{ + struct wlan_green_ap_egap_params *egap_params; + + if (!green_ap_ctx) { + green_ap_err("green ap context passed is NULL"); + return QDF_STATUS_E_INVAL; + } + egap_params = &green_ap_ctx->egap_params; + + if (egap_params->fw_egap_support && + egap_params->host_enable_egap && + egap_params->egap_feature_flags) + return true; + return false; +} +qdf_export_symbol(wlan_is_egap_enabled); + +/** + * wlan_green_ap_ps_event_state_update() - Update PS state and event + * @pdev: pdev pointer + * @state: ps state + * @event: ps event + * + * @Return: Success or Failure + */ +static QDF_STATUS wlan_green_ap_ps_event_state_update( + struct wlan_pdev_green_ap_ctx *green_ap_ctx, + enum wlan_green_ap_ps_state state, + enum wlan_green_ap_ps_event event) +{ + if (!green_ap_ctx) { + green_ap_err("green ap context obtained is NULL"); + return QDF_STATUS_E_FAILURE; + } + + green_ap_ctx->ps_state = state; + green_ap_ctx->ps_event = event; + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_green_ap_state_mc(struct wlan_pdev_green_ap_ctx *green_ap_ctx, + enum wlan_green_ap_ps_event event) +{ + struct wlan_lmac_if_green_ap_tx_ops *green_ap_tx_ops; + uint8_t pdev_id; + + /* + * Remove the assignments once channel info is available for + * converged component. + */ + uint16_t channel = 1; + uint32_t channel_flags = 1; + + if (!green_ap_ctx) { + green_ap_err("green ap context obtained is NULL"); + return QDF_STATUS_E_FAILURE; + } + + if (!green_ap_ctx->pdev) { + green_ap_err("pdev obtained is NULL"); + return QDF_STATUS_E_FAILURE; + } + pdev_id = wlan_objmgr_pdev_get_pdev_id(green_ap_ctx->pdev); + + green_ap_tx_ops = wlan_psoc_get_green_ap_tx_ops(green_ap_ctx); + if (!green_ap_tx_ops) { + green_ap_err("green ap tx ops obtained are NULL"); + return QDF_STATUS_E_FAILURE; + } + + if (!green_ap_tx_ops->ps_on_off_send) { + green_ap_err("tx op for sending enbale/disable green ap is NULL"); + return QDF_STATUS_E_FAILURE; + } + + qdf_spin_lock_bh(&green_ap_ctx->lock); + + if (green_ap_tx_ops->get_current_channel) + channel = green_ap_tx_ops->get_current_channel( + green_ap_ctx->pdev); + + if (green_ap_tx_ops->get_current_channel_flags) + channel_flags = green_ap_tx_ops->get_current_channel_flags( + green_ap_ctx->pdev); + + /* handle the green ap ps event */ + switch (event) { + case WLAN_GREEN_AP_ADD_STA_EVENT: + green_ap_ctx->num_nodes++; + break; + + case WLAN_GREEN_AP_DEL_STA_EVENT: + if (green_ap_ctx->num_nodes) + green_ap_ctx->num_nodes--; + break; + + case WLAN_GREEN_AP_PS_START_EVENT: + case WLAN_GREEN_AP_PS_STOP_EVENT: + case WLAN_GREEN_AP_PS_ON_EVENT: + case WLAN_GREEN_AP_PS_WAIT_EVENT: + break; + + default: + green_ap_err("Invalid event: %d", event); + break; + } + + green_ap_debug("Green-AP event: %d, state: %d, num_nodes: %d", + event, green_ap_ctx->ps_state, green_ap_ctx->num_nodes); + + /* Confirm that power save is enabled before doing state transitions */ + if (!green_ap_ctx->ps_enable) { + green_ap_debug("Green-AP is disabled"); + if (green_ap_ctx->ps_state == WLAN_GREEN_AP_PS_ON_STATE) { + if (green_ap_tx_ops->ps_on_off_send(green_ap_ctx->pdev, + false, pdev_id)) + green_ap_err("failed to set green ap mode"); + wlan_green_ap_ant_ps_reset(green_ap_ctx); + } + wlan_green_ap_ps_event_state_update( + green_ap_ctx, + WLAN_GREEN_AP_PS_IDLE_STATE, + WLAN_GREEN_AP_PS_WAIT_EVENT); + goto done; + } + + /* handle the green ap ps state */ + switch (green_ap_ctx->ps_state) { + case WLAN_GREEN_AP_PS_IDLE_STATE: + if (green_ap_ctx->num_nodes) { + /* Active nodes present, Switchoff the power save */ + green_ap_info("Transition to OFF from IDLE"); + wlan_green_ap_ps_event_state_update( + green_ap_ctx, + WLAN_GREEN_AP_PS_OFF_STATE, + WLAN_GREEN_AP_PS_WAIT_EVENT); + } else { + /* No Active nodes, get into power save */ + green_ap_info("Transition to WAIT from IDLE"); + wlan_green_ap_ps_event_state_update( + green_ap_ctx, + WLAN_GREEN_AP_PS_WAIT_STATE, + WLAN_GREEN_AP_PS_WAIT_EVENT); + qdf_timer_start(&green_ap_ctx->ps_timer, + green_ap_ctx->ps_trans_time * 1000); + } + break; + + case WLAN_GREEN_AP_PS_OFF_STATE: + if (!green_ap_ctx->num_nodes) { + green_ap_info("Transition to WAIT from OFF"); + wlan_green_ap_ps_event_state_update( + green_ap_ctx, + WLAN_GREEN_AP_PS_WAIT_STATE, + WLAN_GREEN_AP_PS_WAIT_EVENT); + qdf_timer_start(&green_ap_ctx->ps_timer, + green_ap_ctx->ps_trans_time * 1000); + } + break; + + case WLAN_GREEN_AP_PS_WAIT_STATE: + if (!green_ap_ctx->num_nodes) { + if ((channel == 0) || (channel_flags == 0)) { + /* + * Stay in the current state and restart the + * timer to check later. + */ + qdf_timer_start(&green_ap_ctx->ps_timer, + green_ap_ctx->ps_on_time * 1000); + } else { + wlan_green_ap_ps_event_state_update( + green_ap_ctx, + WLAN_GREEN_AP_PS_ON_STATE, + WLAN_GREEN_AP_PS_WAIT_EVENT); + + green_ap_info("Transition to ON from WAIT"); + green_ap_tx_ops->ps_on_off_send( + green_ap_ctx->pdev, true, pdev_id); + wlan_green_ap_ant_ps_reset(green_ap_ctx); + + if (green_ap_ctx->ps_on_time) + qdf_timer_start(&green_ap_ctx->ps_timer, + green_ap_ctx->ps_on_time * 1000); + } + } else { + green_ap_info("Transition to OFF from WAIT"); + qdf_timer_stop(&green_ap_ctx->ps_timer); + wlan_green_ap_ps_event_state_update( + green_ap_ctx, + WLAN_GREEN_AP_PS_OFF_STATE, + WLAN_GREEN_AP_PS_WAIT_EVENT); + } + break; + + case WLAN_GREEN_AP_PS_ON_STATE: + if (green_ap_ctx->num_nodes) { + qdf_timer_stop(&green_ap_ctx->ps_timer); + if (green_ap_tx_ops->ps_on_off_send( + green_ap_ctx->pdev, false, pdev_id)) { + green_ap_err("Failed to set Green AP mode"); + goto done; + } + wlan_green_ap_ant_ps_reset(green_ap_ctx); + green_ap_info("Transition to OFF from ON\n"); + wlan_green_ap_ps_event_state_update( + green_ap_ctx, + WLAN_GREEN_AP_PS_OFF_STATE, + WLAN_GREEN_AP_PS_WAIT_EVENT); + } else if ((green_ap_ctx->ps_event == + WLAN_GREEN_AP_PS_WAIT_EVENT) && + (green_ap_ctx->ps_on_time)) { + /* ps_on_time timeout, switch to ps wait */ + wlan_green_ap_ps_event_state_update( + green_ap_ctx, + WLAN_GREEN_AP_PS_WAIT_STATE, + WLAN_GREEN_AP_PS_ON_EVENT); + + if (green_ap_tx_ops->ps_on_off_send( + green_ap_ctx->pdev, false, pdev_id)) { + green_ap_err("Failed to set Green AP mode"); + goto done; + } + + wlan_green_ap_ant_ps_reset(green_ap_ctx); + green_ap_info("Transition to WAIT from ON\n"); + qdf_timer_start(&green_ap_ctx->ps_timer, + green_ap_ctx->ps_trans_time * 1000); + } + break; + + default: + green_ap_err("invalid state %d", green_ap_ctx->ps_state); + wlan_green_ap_ps_event_state_update( + green_ap_ctx, + WLAN_GREEN_AP_PS_OFF_STATE, + WLAN_GREEN_AP_PS_WAIT_EVENT); + break; + } + +done: + qdf_spin_unlock_bh(&green_ap_ctx->lock); + return QDF_STATUS_SUCCESS; +} + +void wlan_green_ap_timer_fn(void *pdev) +{ + struct wlan_pdev_green_ap_ctx *green_ap_ctx; + struct wlan_objmgr_pdev *pdev_ctx = (struct wlan_objmgr_pdev *)pdev; + + if (!pdev_ctx) { + green_ap_err("pdev context passed is NULL"); + return; + } + + green_ap_ctx = wlan_objmgr_pdev_get_comp_private_obj( + pdev_ctx, WLAN_UMAC_COMP_GREEN_AP); + if (!green_ap_ctx) { + green_ap_err("green ap context obtained is NULL"); + return; + } + wlan_green_ap_state_mc(green_ap_ctx, green_ap_ctx->ps_event); +} + +void wlan_green_ap_check_mode(struct wlan_objmgr_pdev *pdev, + void *object, + void *arg) +{ + struct wlan_objmgr_vdev *vdev = (struct wlan_objmgr_vdev *)object; + uint8_t *flag = (uint8_t *)arg; + + wlan_vdev_obj_lock(vdev); + if (wlan_vdev_mlme_get_opmode(vdev) != QDF_SAP_MODE) + *flag = 1; + + wlan_vdev_obj_unlock(vdev); +} + diff --git a/drivers/staging/qca-wifi-host-cmn/umac/green_ap/core/src/wlan_green_ap_main_i.h b/drivers/staging/qca-wifi-host-cmn/umac/green_ap/core/src/wlan_green_ap_main_i.h new file mode 100644 index 0000000000000000000000000000000000000000..9a98cecb11288852c9144cdf1b391dde40e4edc9 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/green_ap/core/src/wlan_green_ap_main_i.h @@ -0,0 +1,167 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + + +/** + * DOC: This file has main green ap structures. + */ + +#ifndef _WLAN_GREEN_AP_MAIN_I_H_ +#define _WLAN_GREEN_AP_MAIN_I_H_ + +#include +#include +#include +#include +#include +#include +#include "wlan_utility.h" +#include + +#define WLAN_GREEN_AP_PS_ON_TIME (0) +#define WLAN_GREEN_AP_PS_TRANS_TIME (20) + +#define green_ap_log(level, args...) \ + QDF_TRACE(QDF_MODULE_ID_GREEN_AP, level, ## args) +#define green_ap_logfl(level, format, args...) \ + green_ap_log(level, FL(format), ## args) + +#define green_ap_alert(format, args...) \ + green_ap_logfl(QDF_TRACE_LEVEL_FATAL, format, ## args) +#define green_ap_err(format, args...) \ + green_ap_logfl(QDF_TRACE_LEVEL_ERROR, format, ## args) +#define green_apwarn(format, args...) \ + green_ap_logfl(QDF_TRACE_LEVEL_WARN, format, ## args) +#define green_ap_notice(format, args...) \ + green_ap_logfl(QDF_TRACE_LEVEL_INFO, format, ## args) +#define green_ap_info(format, args...) \ + green_ap_logfl(QDF_TRACE_LEVEL_INFO_HIGH, format, ## args) +#define green_ap_debug(format, args...) \ + green_ap_logfl(QDF_TRACE_LEVEL_DEBUG, format, ## args) + +#define WLAN_GREEN_AP_PS_DISABLE 0 +#define WLAN_GREEN_AP_PS_ENABLE 1 +#define WLAN_GREEN_AP_PS_SUSPEND 2 +/** + * enum wlan_green_ap_ps_state - PS states + * @WLAN_GREEN_AP_PS_IDLE_STATE - Idle + * @WLAN_GREEN_AP_PS_OFF_STATE - Off + * @WLAN_GREEN_AP_PS_WAIT_STATE - Wait + * @WLAN_GREEN_AP_PS_ON_STATE - On + */ +enum wlan_green_ap_ps_state { + WLAN_GREEN_AP_PS_IDLE_STATE = 1, + WLAN_GREEN_AP_PS_OFF_STATE, + WLAN_GREEN_AP_PS_WAIT_STATE, + WLAN_GREEN_AP_PS_ON_STATE, +}; + +/** + * enum wlan_green_ap_ps_event - PS event + * @WLAN_GREEN_AP_PS_START_EVENT - Start + * @WLAN_GREEN_AP_PS_STOP_EVENT - Stop + * @WLAN_GREEN_AP_ADD_STA_EVENT - Sta assoc + * @WLAN_GREEN_AP_DEL_STA_EVENT - Sta disassoc + * @WLAN_GREEN_AP_PS_ON_EVENT - PS on + * @WLAN_GREEN_AP_PS_OFF_EVENT - PS off + */ +enum wlan_green_ap_ps_event { + WLAN_GREEN_AP_PS_START_EVENT = 1, + WLAN_GREEN_AP_PS_STOP_EVENT, + WLAN_GREEN_AP_ADD_STA_EVENT, + WLAN_GREEN_AP_DEL_STA_EVENT, + WLAN_GREEN_AP_PS_ON_EVENT, + WLAN_GREEN_AP_PS_WAIT_EVENT, +}; + +/** + * struct wlan_pdev_green_ap_ctx - green ap context + * @pdev - Pdev pointer + * @ps_enable - Enable PS + * @ps_on_time - PS on time, once enabled + * @ps_trans_time - PS transition time + * @num_nodes - Number of nodes associated to radio + * @ps_state - PS state + * @ps_event - PS event + * @ps_timer - Timer + * @lock: green ap spinlock + * @egap_params - Enhanced green ap params + */ +struct wlan_pdev_green_ap_ctx { + struct wlan_objmgr_pdev *pdev; + uint8_t ps_enable; + uint8_t ps_on_time; + uint32_t ps_trans_time; + uint32_t num_nodes; + enum wlan_green_ap_ps_state ps_state; + enum wlan_green_ap_ps_event ps_event; + qdf_timer_t ps_timer; + qdf_spinlock_t lock; + struct wlan_green_ap_egap_params egap_params; + bool dbg_enable; +}; + +/** + * wlan_psoc_get_green_ap_tx_ops() - Obtain green ap tx ops from green ap ctx + * @green_ap_ctx: green ap context + * + * @Return: green ap tx ops pointer + */ +struct wlan_lmac_if_green_ap_tx_ops * +wlan_psoc_get_green_ap_tx_ops(struct wlan_pdev_green_ap_ctx *green_ap_ctx); + +/** + * wlan_is_egap_enabled() - Get Enhance Green AP feature status + * @green_ap_ctx: green ap context + * + * Return: true if firmware, feature_flag and ini are all egap enabled + */ +bool wlan_is_egap_enabled(struct wlan_pdev_green_ap_ctx *green_ap_ctx); + +/** + * wlan_green_ap_state_mc() - Green ap state machine + * @green_ap_ctx: green ap context + * @event: ps event + * + * @Return: Success or Failure + */ +QDF_STATUS wlan_green_ap_state_mc(struct wlan_pdev_green_ap_ctx *green_ap_ctx, + enum wlan_green_ap_ps_event event); + +/** + * wlan_green_ap_timer_fn() - Green ap timer callback + * @pdev: pdev pointer + * + * @Return: None + */ +void wlan_green_ap_timer_fn(void *pdev); + +/** + * wlan_green_ap_check_mode() - Check for mode + * @pdev: pdev pointer + * @object: vdev object + * @arg: flag to be set + * + * Callback to check if all modes on radio are configured as AP + * + * @Return: None + */ +void wlan_green_ap_check_mode(struct wlan_objmgr_pdev *pdev, + void *object, + void *arg); +#endif /* _WLAN_GREEN_AP_MAIN_I_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/green_ap/dispatcher/inc/wlan_green_ap_api.h b/drivers/staging/qca-wifi-host-cmn/umac/green_ap/dispatcher/inc/wlan_green_ap_api.h new file mode 100644 index 0000000000000000000000000000000000000000..8e819ae81ef997938d4267ce3eb84ce64e711da7 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/green_ap/dispatcher/inc/wlan_green_ap_api.h @@ -0,0 +1,139 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: Contains green ap north bound interface definitions + */ + +#ifndef _WLAN_GREEN_AP_API_H_ +#define _WLAN_GREEN_AP_API_H_ + +#include +#include +#include + +/** + * struct wlan_green_ap_egap_params - enhance green ap params + * @fw_egap_support: fw enhance green ap support + * @host_enable_egap: HOST enhance green ap support + * @egap_inactivity_time: inactivity time + * @egap_wait_time: wait time + * @egap_feature_flags: feature flags + */ +struct wlan_green_ap_egap_params { + bool fw_egap_support; + bool host_enable_egap; + uint32_t egap_inactivity_time; + uint32_t egap_wait_time; + uint32_t egap_feature_flags; +}; + +/** + * struct wlan_green_ap_egap_status_info - enhance green ap params + * @status: egap status + * @mac_id: mac id + * @tx_chainmask: tx chainmask + * @rx_chainmask: rx chainmask + */ +struct wlan_green_ap_egap_status_info { + uint32_t status; + uint32_t mac_id; + uint32_t tx_chainmask; + uint32_t rx_chainmask; +}; + +/** + * wlan_green_ap_init() - initialize green ap component + * + * Return: Success or Failure + */ +QDF_STATUS wlan_green_ap_init(void); + +/** + * wlan_green_ap_deinit() - De-initialize green ap component + * + * Return: Success or Failure + */ +QDF_STATUS wlan_green_ap_deinit(void); + +/** + * wlan_green_ap_start() - Start green ap + * @pdev: pdev pointer + * + * Call this function when the first SAP comes up + * + * Return: Success or Failure + */ +QDF_STATUS wlan_green_ap_start(struct wlan_objmgr_pdev *pdev); + +/** + * wlan_green_ap_stop() - Stop green ap + * @pdev: pdev pointer + * + * Call this function when the last SAP goes down + * + * Return: Success or Failure + */ +QDF_STATUS wlan_green_ap_stop(struct wlan_objmgr_pdev *pdev); + +/** + * wlan_green_ap_add_sta() - On association + * @pdev: pdev pointer + * + * Call this function when new node is associated + * + * Return: Success or Failure + */ +QDF_STATUS wlan_green_ap_add_sta(struct wlan_objmgr_pdev *pdev); + +/** + * wlan_green_ap_del_sta() - On disassociation + * @pdev: pdev pointer + * + * Call this function when new node is disassociated + * + * Return: Success or Failure + */ +QDF_STATUS wlan_green_ap_del_sta(struct wlan_objmgr_pdev *pdev); + +/** + * wlan_green_ap_is_ps_enabled() - is power save enabled + * @pdev: pdev pointer + * + * Check if power save is enabled in FW + * + * Return: Success or Failure + */ +bool wlan_green_ap_is_ps_enabled(struct wlan_objmgr_pdev *pdev); + +/** + * wlan_green_ap_suspend_handle() - handle driver suspend + * @pdev: pdev pointer + * + * Return: None + */ +void wlan_green_ap_suspend_handle(struct wlan_objmgr_pdev *pdev); + +/** + * wlan_green_ap_get_capab() - get lmac capability + * @pdev: pdev pointer + * + * Return: Appropriate status + */ +QDF_STATUS wlan_green_ap_get_capab(struct wlan_objmgr_pdev *pdev); +#endif /* _WLAN_GREEN_AP_API_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/green_ap/dispatcher/inc/wlan_green_ap_ucfg_api.h b/drivers/staging/qca-wifi-host-cmn/umac/green_ap/dispatcher/inc/wlan_green_ap_ucfg_api.h new file mode 100644 index 0000000000000000000000000000000000000000..3d0a54ca5cb8ff8a2b06ace5cee8544ce2cdb7c1 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/green_ap/dispatcher/inc/wlan_green_ap_ucfg_api.h @@ -0,0 +1,131 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: Contains green ap north bound interface definitions + */ + +#ifndef _WLAN_GREEN_AP_UCFG_API_H_ +#define _WLAN_GREEN_AP_UCFG_API_H_ + +#include +#include +#include +#include "wlan_utility.h" + +/** + * struct green_ap_user_cfg - green ap user cfg + * @host_enable_egap: HOST enhance green ap support + * @egap_inactivity_time: inactivity time + * @egap_wait_time: wait time + * @egap_feature_flags: feature flags + */ +struct green_ap_user_cfg { + bool host_enable_egap; + uint32_t egap_inactivity_time; + uint32_t egap_wait_time; + uint32_t egap_feature_flags; +}; + +/** + * ucfg_green_ap_update_user_config() - Updates user cfg for green ap + * @pdev: pdev pointer + * @green_ap_cfg: pointer to green ap user cfg structure + * + * Return: Success or Failure + */ +QDF_STATUS ucfg_green_ap_update_user_config( + struct wlan_objmgr_pdev *pdev, + struct green_ap_user_cfg *green_ap_cfg); + +/** + * ucfg_green_ap_enable_egap() - Enable enhanced green ap + * @pdev: pdev pointer + * + * Return: Success or Failure + */ +QDF_STATUS ucfg_green_ap_enable_egap(struct wlan_objmgr_pdev *pdev); + +/** + * ucfg_green_ap_set_ps_config() - Set ps value + * @pdev: pdev pointer + * @value - value to be set + * + * Return: Success or Failure + */ +QDF_STATUS ucfg_green_ap_set_ps_config(struct wlan_objmgr_pdev *pdev, + uint8_t value); +/** + * ucfg_green_ap_get_ps_config() - Check if ps is enabled or not + * @pdev: pdev pointer + * @ps_enable: pointer to ps enable config value + * + * Return: Success or Failure + */ +QDF_STATUS ucfg_green_ap_get_ps_config(struct wlan_objmgr_pdev *pdev, + uint8_t *ps_enable); + +/** + * ucfg_green_ap_set_transition_time() - Set transition time + * @pdev: pdev pointer + * @val: transition time + * + * This API sets custom transition time + * + * Return: Success or Failure + */ +QDF_STATUS ucfg_green_ap_set_transition_time(struct wlan_objmgr_pdev *pdev, + uint32_t val); + +/** + * ucfg_green_ap_get_transition_time() - Get transition time + * @pdev: pdev pointer + * @ps_trans_time: pointer to transition time + * + * This API gets transition time + * + * Return: Success or Failure + */ +QDF_STATUS ucfg_green_ap_get_transition_time(struct wlan_objmgr_pdev *pdev, + uint32_t *ps_trans_time); + +/** + * ucfg_green_ap_config() - Config green AP + * @pdev: pdev pointer + * + * Return: Success or Failure + */ +QDF_STATUS ucfg_green_ap_config(struct wlan_objmgr_pdev *pdev, uint8_t val); + +/** + * ucfg_green_ap_enable_debug_prints() - Enable debugs + * @pdev: pdev pointer + * + * Return: None + */ +void ucfg_green_ap_enable_debug_prints(struct wlan_objmgr_pdev *pdev, + uint32_t val); + +/** + * ucfg_green_ap_get_debug_prints() - Check if debug enabled + * @pdev: pdev pointer + * + * Return: Debug value + */ +bool ucfg_green_ap_get_debug_prints(struct wlan_objmgr_pdev *pdev); +#endif /* _WLAN_GREEN_AP_UCFG_API_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/green_ap/dispatcher/src/wlan_green_ap_api.c b/drivers/staging/qca-wifi-host-cmn/umac/green_ap/dispatcher/src/wlan_green_ap_api.c new file mode 100644 index 0000000000000000000000000000000000000000..2c407e2c0f3b793df0cd2f8a41fbc550f03c0029 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/green_ap/dispatcher/src/wlan_green_ap_api.c @@ -0,0 +1,397 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: This file contains green ap north bound interface definitions + */ +#include +#include <../../core/src/wlan_green_ap_main_i.h> +#include + +QDF_STATUS wlan_green_ap_get_capab( + struct wlan_objmgr_pdev *pdev) +{ + struct wlan_lmac_if_green_ap_tx_ops *green_ap_tx_ops; + struct wlan_pdev_green_ap_ctx *green_ap_ctx; + + green_ap_ctx = wlan_objmgr_pdev_get_comp_private_obj(pdev, + WLAN_UMAC_COMP_GREEN_AP); + + if (!green_ap_ctx) { + green_ap_err("green ap context obtained is NULL"); + return QDF_STATUS_E_FAILURE; + } + + + green_ap_tx_ops = wlan_psoc_get_green_ap_tx_ops(green_ap_ctx); + if (!green_ap_tx_ops) { + green_ap_err("green ap tx ops obtained are NULL"); + return QDF_STATUS_E_EXISTS; + } + + if (green_ap_tx_ops->get_capab) + return green_ap_tx_ops->get_capab(pdev); + + return QDF_STATUS_SUCCESS; +} + +/** + * wlan_green_ap_pdev_obj_create_notification() - called from objmgr when pdev + * is created + * @pdev: pdev context + * @arg: argument + * + * This function gets called from object manager when pdev is being created and + * creates green ap context and attach it to objmgr. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +static QDF_STATUS wlan_green_ap_pdev_obj_create_notification( + struct wlan_objmgr_pdev *pdev, void *arg) +{ + struct wlan_pdev_green_ap_ctx *green_ap_ctx; + QDF_STATUS status = QDF_STATUS_SUCCESS; + + if (!pdev) { + green_ap_err("pdev context passed is NULL"); + return QDF_STATUS_E_INVAL; + } + + green_ap_ctx = qdf_mem_malloc(sizeof(*green_ap_ctx)); + if (!green_ap_ctx) { + green_ap_err("Memory allocation for Green AP context failed!"); + return QDF_STATUS_E_NOMEM; + } + + green_ap_ctx->ps_state = WLAN_GREEN_AP_PS_IDLE_STATE; + green_ap_ctx->ps_event = WLAN_GREEN_AP_PS_WAIT_EVENT; + green_ap_ctx->num_nodes = 0; + green_ap_ctx->ps_on_time = WLAN_GREEN_AP_PS_ON_TIME; + green_ap_ctx->ps_trans_time = WLAN_GREEN_AP_PS_TRANS_TIME; + + green_ap_ctx->pdev = pdev; + + qdf_timer_init(NULL, &green_ap_ctx->ps_timer, + wlan_green_ap_timer_fn, + pdev, QDF_TIMER_TYPE_WAKE_APPS); + + qdf_spinlock_create(&green_ap_ctx->lock); + if (wlan_objmgr_pdev_component_obj_attach(pdev, + WLAN_UMAC_COMP_GREEN_AP, + green_ap_ctx, QDF_STATUS_SUCCESS) + != QDF_STATUS_SUCCESS) { + green_ap_err("Failed to attach green ap ctx in pdev ctx"); + status = QDF_STATUS_E_FAILURE; + goto err_pdev_attach; + } + + green_ap_info("Green AP creation successful, green ap ctx: %pK, pdev: %pK", + green_ap_ctx, pdev); + + return QDF_STATUS_SUCCESS; + +err_pdev_attach: + qdf_spinlock_destroy(&green_ap_ctx->lock); + qdf_timer_free(&green_ap_ctx->ps_timer); + qdf_mem_free(green_ap_ctx); + return status; +} + +/** + * wlan_green_ap_pdev_obj_destroy_notification() - called from objmgr when + * pdev is destroyed + * @pdev: pdev context + * @arg: argument + * + * This function gets called from object manager when pdev is being destroyed + * and deletes green ap context and detach it from objmgr. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +static QDF_STATUS wlan_green_ap_pdev_obj_destroy_notification( + struct wlan_objmgr_pdev *pdev, void *arg) +{ + struct wlan_pdev_green_ap_ctx *green_ap_ctx; + + if (!pdev) { + green_ap_err("pdev context passed is NULL"); + return QDF_STATUS_E_INVAL; + } + + green_ap_ctx = wlan_objmgr_pdev_get_comp_private_obj( + pdev, WLAN_UMAC_COMP_GREEN_AP); + if (!green_ap_ctx) { + green_ap_err("green ap context is already NULL"); + return QDF_STATUS_E_FAILURE; + } + + green_ap_info("Deleting green ap pdev obj, green ap ctx: %pK, pdev: %pK", + green_ap_ctx, pdev); + + if (wlan_objmgr_pdev_component_obj_detach(pdev, + WLAN_UMAC_COMP_GREEN_AP, green_ap_ctx) != + QDF_STATUS_SUCCESS) { + green_ap_err("Failed to detach green ap ctx in psoc ctx"); + return QDF_STATUS_E_FAILURE; + } + + qdf_timer_free(&green_ap_ctx->ps_timer); + qdf_spinlock_destroy(&green_ap_ctx->lock); + + qdf_mem_free(green_ap_ctx); + green_ap_info("green ap deletion successful, pdev: %pK", pdev); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_green_ap_init(void) +{ + QDF_STATUS status = QDF_STATUS_SUCCESS; + + status = wlan_objmgr_register_pdev_create_handler( + WLAN_UMAC_COMP_GREEN_AP, + wlan_green_ap_pdev_obj_create_notification, + NULL); + if (status != QDF_STATUS_SUCCESS) { + green_ap_err("Failed to register green ap obj create handler"); + goto err_pdev_create; + } + + status = wlan_objmgr_register_pdev_destroy_handler( + WLAN_UMAC_COMP_GREEN_AP, + wlan_green_ap_pdev_obj_destroy_notification, + NULL); + if (status != QDF_STATUS_SUCCESS) { + green_ap_err("Failed to register green ap obj destroy handler"); + goto err_pdev_delete; + } + + green_ap_info("Successfully registered create and destroy handlers with objmgr"); + return QDF_STATUS_SUCCESS; + +err_pdev_delete: + wlan_objmgr_unregister_pdev_create_handler( + WLAN_UMAC_COMP_GREEN_AP, + wlan_green_ap_pdev_obj_create_notification, + NULL); +err_pdev_create: + return status; +} + +QDF_STATUS wlan_green_ap_deinit(void) +{ + if (wlan_objmgr_unregister_pdev_create_handler( + WLAN_UMAC_COMP_GREEN_AP, + wlan_green_ap_pdev_obj_create_notification, + NULL) + != QDF_STATUS_SUCCESS) { + return QDF_STATUS_E_FAILURE; + } + + if (wlan_objmgr_unregister_pdev_destroy_handler( + WLAN_UMAC_COMP_GREEN_AP, + wlan_green_ap_pdev_obj_destroy_notification, + NULL) + != QDF_STATUS_SUCCESS) { + return QDF_STATUS_E_FAILURE; + } + + green_ap_info("Successfully unregistered create and destroy handlers with objmgr"); + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wlan_green_ap_start(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_pdev_green_ap_ctx *green_ap_ctx; + + if (!pdev) { + green_ap_err("pdev context passed is NULL"); + return QDF_STATUS_E_INVAL; + } + + green_ap_ctx = wlan_objmgr_pdev_get_comp_private_obj( + pdev, WLAN_UMAC_COMP_GREEN_AP); + if (!green_ap_ctx) { + green_ap_err("green ap context obtained is NULL"); + return QDF_STATUS_E_FAILURE; + } + + green_ap_debug("Green AP start received"); + + /* Make sure the start function does not get called 2 times */ + qdf_spin_lock_bh(&green_ap_ctx->lock); + + if (wlan_is_egap_enabled(green_ap_ctx)) { + qdf_spin_unlock_bh(&green_ap_ctx->lock); + green_ap_debug("enhanced green ap support is enabled"); + return QDF_STATUS_SUCCESS; + } + + if (green_ap_ctx->ps_state == WLAN_GREEN_AP_PS_IDLE_STATE) { + if (green_ap_ctx->ps_enable) { + qdf_spin_unlock_bh(&green_ap_ctx->lock); + return wlan_green_ap_state_mc(green_ap_ctx, + WLAN_GREEN_AP_PS_START_EVENT); + } + } + + qdf_spin_unlock_bh(&green_ap_ctx->lock); + return QDF_STATUS_E_ALREADY; +} + +QDF_STATUS wlan_green_ap_stop(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_pdev_green_ap_ctx *green_ap_ctx; + + if (!pdev) { + green_ap_err("pdev context passed is NULL"); + return QDF_STATUS_E_INVAL; + } + + green_ap_ctx = wlan_objmgr_pdev_get_comp_private_obj( + pdev, WLAN_UMAC_COMP_GREEN_AP); + if (!green_ap_ctx) { + green_ap_err("green ap context obtained is NULL"); + return QDF_STATUS_E_FAILURE; + } + + green_ap_debug("Green AP stop received"); + + qdf_spin_lock_bh(&green_ap_ctx->lock); + if (wlan_is_egap_enabled(green_ap_ctx)) { + qdf_spin_unlock_bh(&green_ap_ctx->lock); + green_ap_debug("enhanced green ap support is enabled"); + return QDF_STATUS_SUCCESS; + } + + /* Delete the timer just to be sure */ + qdf_timer_stop(&green_ap_ctx->ps_timer); + + /* Disable the power save */ + green_ap_ctx->ps_enable = WLAN_GREEN_AP_PS_DISABLE; + + qdf_spin_unlock_bh(&green_ap_ctx->lock); + return wlan_green_ap_state_mc(green_ap_ctx, + WLAN_GREEN_AP_PS_STOP_EVENT); +} + +QDF_STATUS wlan_green_ap_add_sta(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_pdev_green_ap_ctx *green_ap_ctx; + + if (!pdev) { + green_ap_err("pdev context passed is NULL"); + return QDF_STATUS_E_INVAL; + } + + green_ap_ctx = wlan_objmgr_pdev_get_comp_private_obj( + pdev, WLAN_UMAC_COMP_GREEN_AP); + if (!green_ap_ctx) { + green_ap_err("green ap context obtained is NULL"); + return QDF_STATUS_E_FAILURE; + } + + green_ap_debug("Green AP add sta received"); + + qdf_spin_lock_bh(&green_ap_ctx->lock); + if (wlan_is_egap_enabled(green_ap_ctx)) { + qdf_spin_unlock_bh(&green_ap_ctx->lock); + green_ap_debug("enhanced green ap support is enabled"); + return QDF_STATUS_SUCCESS; + } + qdf_spin_unlock_bh(&green_ap_ctx->lock); + + return wlan_green_ap_state_mc(green_ap_ctx, + WLAN_GREEN_AP_ADD_STA_EVENT); +} + +QDF_STATUS wlan_green_ap_del_sta(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_pdev_green_ap_ctx *green_ap_ctx; + + if (!pdev) { + green_ap_err("pdev context passed is NULL"); + return QDF_STATUS_E_INVAL; + } + + green_ap_ctx = wlan_objmgr_pdev_get_comp_private_obj( + pdev, WLAN_UMAC_COMP_GREEN_AP); + if (!green_ap_ctx) { + green_ap_err("green ap context obtained is NULL"); + return QDF_STATUS_E_FAILURE; + } + + green_ap_debug("Green AP del sta received"); + + qdf_spin_lock_bh(&green_ap_ctx->lock); + if (wlan_is_egap_enabled(green_ap_ctx)) { + qdf_spin_unlock_bh(&green_ap_ctx->lock); + green_ap_info("enhanced green ap support is enabled"); + return QDF_STATUS_SUCCESS; + } + qdf_spin_unlock_bh(&green_ap_ctx->lock); + + return wlan_green_ap_state_mc(green_ap_ctx, + WLAN_GREEN_AP_DEL_STA_EVENT); +} + +bool wlan_green_ap_is_ps_enabled(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_pdev_green_ap_ctx *green_ap_ctx; + + if (!pdev) { + green_ap_err("pdev context passed is NULL"); + return QDF_STATUS_E_INVAL; + } + + green_ap_ctx = wlan_objmgr_pdev_get_comp_private_obj( + pdev, WLAN_UMAC_COMP_GREEN_AP); + if (!green_ap_ctx) { + green_ap_err("green ap context obtained is NULL"); + return QDF_STATUS_E_FAILURE; + } + + if ((green_ap_ctx->ps_state == WLAN_GREEN_AP_PS_ON_STATE) && + (green_ap_ctx->ps_enable)) + return true; + + return false; + +} + +void wlan_green_ap_suspend_handle(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_pdev_green_ap_ctx *green_ap_ctx; + + if (!pdev) { + green_ap_err("pdev context passed is NULL"); + return; + } + + green_ap_ctx = wlan_objmgr_pdev_get_comp_private_obj( + pdev, WLAN_UMAC_COMP_GREEN_AP); + + if (!green_ap_ctx) { + green_ap_err("green ap context obtained is NULL"); + return; + } + + wlan_green_ap_stop(pdev); + + green_ap_ctx->ps_enable = WLAN_GREEN_AP_PS_SUSPEND; +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/green_ap/dispatcher/src/wlan_green_ap_ucfg_api.c b/drivers/staging/qca-wifi-host-cmn/umac/green_ap/dispatcher/src/wlan_green_ap_ucfg_api.c new file mode 100644 index 0000000000000000000000000000000000000000..40f580cdcd193e9acdfac150c05b5916b0fdb740 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/green_ap/dispatcher/src/wlan_green_ap_ucfg_api.c @@ -0,0 +1,281 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: This file contains green ap north bound interface definitions + */ + +#include +#include +#include <../../core/src/wlan_green_ap_main_i.h> + +QDF_STATUS ucfg_green_ap_update_user_config( + struct wlan_objmgr_pdev *pdev, + struct green_ap_user_cfg *green_ap_cfg) +{ + struct wlan_pdev_green_ap_ctx *green_ap_ctx; + struct wlan_green_ap_egap_params *egap_params; + + if (!pdev) { + green_ap_err("pdev context passed is NULL"); + return QDF_STATUS_E_INVAL; + } + + green_ap_ctx = wlan_objmgr_pdev_get_comp_private_obj( + pdev, WLAN_UMAC_COMP_GREEN_AP); + if (!green_ap_ctx) { + green_ap_err("green ap context obtained is NULL"); + return QDF_STATUS_E_FAILURE; + } + + qdf_spin_lock_bh(&green_ap_ctx->lock); + egap_params = &green_ap_ctx->egap_params; + + egap_params->host_enable_egap = green_ap_cfg->host_enable_egap; + egap_params->egap_inactivity_time = green_ap_cfg->egap_inactivity_time; + egap_params->egap_wait_time = green_ap_cfg->egap_wait_time; + egap_params->egap_feature_flags = green_ap_cfg->egap_feature_flags; + qdf_spin_unlock_bh(&green_ap_ctx->lock); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS ucfg_green_ap_enable_egap(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_pdev_green_ap_ctx *green_ap_ctx; + struct wlan_lmac_if_green_ap_tx_ops *green_ap_tx_ops; + + if (!pdev) { + green_ap_err("pdev context passed is NULL"); + return QDF_STATUS_E_INVAL; + } + + green_ap_ctx = wlan_objmgr_pdev_get_comp_private_obj( + pdev, WLAN_UMAC_COMP_GREEN_AP); + if (!green_ap_ctx) { + green_ap_err("green ap context obtained is NULL"); + return QDF_STATUS_E_FAILURE; + } + + green_ap_tx_ops = wlan_psoc_get_green_ap_tx_ops(green_ap_ctx); + if (!green_ap_tx_ops) { + green_ap_err("green ap tx ops obtained are NULL"); + return QDF_STATUS_E_FAILURE; + } + + if (!green_ap_tx_ops->enable_egap) { + green_ap_err("tx op for sending enbale/disable green ap is NULL"); + return QDF_STATUS_E_FAILURE; + } + + return green_ap_tx_ops->enable_egap(pdev, &green_ap_ctx->egap_params); +} + +QDF_STATUS ucfg_green_ap_set_ps_config(struct wlan_objmgr_pdev *pdev, + uint8_t value) +{ + struct wlan_pdev_green_ap_ctx *green_ap_ctx; + + if (!pdev) { + green_ap_err("pdev context passed is NULL"); + return QDF_STATUS_E_INVAL; + } + + green_ap_ctx = wlan_objmgr_pdev_get_comp_private_obj( + pdev, WLAN_UMAC_COMP_GREEN_AP); + if (!green_ap_ctx) { + green_ap_err("green ap context obtained is NULL"); + return QDF_STATUS_E_FAILURE; + } + + qdf_spin_lock_bh(&green_ap_ctx->lock); + if (wlan_is_egap_enabled(green_ap_ctx)) { + qdf_spin_unlock_bh(&green_ap_ctx->lock); + return QDF_STATUS_SUCCESS; + } + + green_ap_ctx->ps_enable = value; + qdf_spin_unlock_bh(&green_ap_ctx->lock); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS ucfg_green_ap_get_ps_config(struct wlan_objmgr_pdev *pdev, + uint8_t *ps_enable) +{ + struct wlan_pdev_green_ap_ctx *green_ap_ctx; + + if (!pdev) { + green_ap_err("pdev context passed is NULL"); + return QDF_STATUS_E_INVAL; + } + + green_ap_ctx = wlan_objmgr_pdev_get_comp_private_obj( + pdev, WLAN_UMAC_COMP_GREEN_AP); + + if (!green_ap_ctx) { + green_ap_err("green ap context obtained is NULL"); + return QDF_STATUS_E_FAILURE; + } + + qdf_spin_lock_bh(&green_ap_ctx->lock); + if (wlan_is_egap_enabled(green_ap_ctx)) { + qdf_spin_unlock_bh(&green_ap_ctx->lock); + return QDF_STATUS_SUCCESS; + } + + *ps_enable = green_ap_ctx->ps_enable; + qdf_spin_unlock_bh(&green_ap_ctx->lock); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS ucfg_green_ap_set_transition_time(struct wlan_objmgr_pdev *pdev, + uint32_t val) +{ + struct wlan_pdev_green_ap_ctx *green_ap_ctx; + + if (!pdev) { + green_ap_err("pdev context passed is NULL"); + return QDF_STATUS_E_INVAL; + } + + green_ap_ctx = wlan_objmgr_pdev_get_comp_private_obj( + pdev, WLAN_UMAC_COMP_GREEN_AP); + + if (!green_ap_ctx) { + green_ap_err("green ap context obtained is NULL"); + return QDF_STATUS_E_FAILURE; + } + + qdf_spin_lock_bh(&green_ap_ctx->lock); + if (wlan_is_egap_enabled(green_ap_ctx)) { + qdf_spin_unlock_bh(&green_ap_ctx->lock); + return QDF_STATUS_SUCCESS; + } + + green_ap_ctx->ps_trans_time = val; + qdf_spin_unlock_bh(&green_ap_ctx->lock); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS ucfg_green_ap_get_transition_time(struct wlan_objmgr_pdev *pdev, + uint32_t *ps_trans_time) +{ + struct wlan_pdev_green_ap_ctx *green_ap_ctx; + + if (!pdev) { + green_ap_err("pdev context passed is NULL"); + return QDF_STATUS_E_INVAL; + } + + green_ap_ctx = wlan_objmgr_pdev_get_comp_private_obj( + pdev, WLAN_UMAC_COMP_GREEN_AP); + + if (!green_ap_ctx) { + green_ap_err("green ap context obtained is NULL"); + return QDF_STATUS_E_FAILURE; + } + + qdf_spin_lock_bh(&green_ap_ctx->lock); + if (wlan_is_egap_enabled(green_ap_ctx)) { + qdf_spin_unlock_bh(&green_ap_ctx->lock); + return QDF_STATUS_SUCCESS; + } + + *ps_trans_time = green_ap_ctx->ps_trans_time; + qdf_spin_unlock_bh(&green_ap_ctx->lock); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS ucfg_green_ap_config(struct wlan_objmgr_pdev *pdev, uint8_t val) +{ + + uint8_t flag; + + if (wlan_green_ap_get_capab(pdev) == QDF_STATUS_E_NOSUPPORT) { + green_ap_err("GreenAP not supported on radio\n"); + return QDF_STATUS_E_NOSUPPORT; + } + + if (val) { + struct wlan_pdev_green_ap_ctx *green_ap_ctx; + + wlan_objmgr_pdev_iterate_obj_list(pdev, + WLAN_VDEV_OP, + wlan_green_ap_check_mode, + &flag, 0, WLAN_GREEN_AP_ID); + if (flag == 1) { + green_ap_err("Radio not in AP mode." + "Feature not supported"); + return QDF_STATUS_E_NOSUPPORT; + } + + green_ap_ctx = wlan_objmgr_pdev_get_comp_private_obj(pdev, + WLAN_UMAC_COMP_GREEN_AP); + + if (!green_ap_ctx) { + green_ap_err("green ap context obtained is NULL"); + return QDF_STATUS_E_NOSUPPORT; + } + + ucfg_green_ap_set_ps_config(pdev, val); + + if (wlan_util_is_vap_active(pdev, WLAN_GREEN_AP_ID) == + QDF_STATUS_SUCCESS) + wlan_green_ap_start(pdev); + } else { + wlan_green_ap_stop(pdev); + } + + return QDF_STATUS_SUCCESS; +} + +void ucfg_green_ap_enable_debug_prints(struct wlan_objmgr_pdev *pdev, + uint32_t val) +{ + struct wlan_pdev_green_ap_ctx *green_ap_ctx; + + green_ap_ctx = wlan_objmgr_pdev_get_comp_private_obj( + pdev, WLAN_UMAC_COMP_GREEN_AP); + + if (!green_ap_ctx) { + green_ap_err("green ap context obtained is NULL"); + return; + } + + green_ap_ctx->dbg_enable = val; +} + +bool ucfg_green_ap_get_debug_prints(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_pdev_green_ap_ctx *green_ap_ctx; + + green_ap_ctx = wlan_objmgr_pdev_get_comp_private_obj( + pdev, WLAN_UMAC_COMP_GREEN_AP); + + if (!green_ap_ctx) { + green_ap_err("green ap context obtained is NULL"); + return false; + } + + return green_ap_ctx->dbg_enable; +} + diff --git a/drivers/staging/qca-wifi-host-cmn/umac/nan/core/inc/nan_public_structs.h b/drivers/staging/qca-wifi-host-cmn/umac/nan/core/inc/nan_public_structs.h new file mode 100644 index 0000000000000000000000000000000000000000..7f65d9ee33e936b1ae48e109e27ac8c958d25013 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/nan/core/inc/nan_public_structs.h @@ -0,0 +1,641 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: contains nan definitions exposed to other modules + */ + +#ifdef WLAN_FEATURE_NAN_CONVERGENCE +#ifndef _NAN_PUBLIC_STRUCTS_H_ +#define _NAN_PUBLIC_STRUCTS_H_ + +#include "qdf_types.h" +#include "qdf_status.h" + +struct wlan_objmgr_psoc; +struct wlan_objmgr_vdev; + +#define IFACE_NAME_SIZE 64 +#define NDP_QOS_INFO_LEN 255 +#define NDP_APP_INFO_LEN 255 +#define NDP_PMK_LEN 32 +#define NDP_SCID_BUF_LEN 256 +#define NDP_NUM_INSTANCE_ID 255 +#define NAN_MAX_SERVICE_NAME_LEN 255 +#define NAN_PASSPHRASE_MIN_LEN 8 +#define NAN_PASSPHRASE_MAX_LEN 63 +#define NAN_CH_INFO_MAX_CHANNELS 4 + +/** + * enum nan_datapath_msg_type - NDP msg type + * @NAN_DATAPATH_INF_CREATE_REQ: ndi create request + * @NAN_DATAPATH_INF_CREATE_RSP: ndi create response + * @NAN_DATAPATH_INF_DELETE_REQ: ndi delete request + * @NAN_DATAPATH_INF_DELETE_RSP: ndi delete response + * @NDP_INITIATOR_REQ: ndp initiator request + * @NDP_INITIATOR_RSP: ndp initiator response + * @NDP_RESPONDER_REQ: ndp responder request + * @NDP_RESPONDER_RSP: ndp responder response + * @NDP_END_REQ: ndp end request + * @NDP_END_RSP: ndp end response + * @NDP_INDICATION: ndp indication + * @NDP_CONFIRM: ndp confirm + * @NDP_END_IND: ndp end indication + * @NDP_NEW_PEER: ndp new peer created + * @NDP_PEER_DEPARTED: ndp peer departed/deleted + * @NDP_SCHEDULE_UPDATE: ndp schedule update + */ +enum nan_datapath_msg_type { + NAN_DATAPATH_INF_CREATE_REQ = 0, + NAN_DATAPATH_INF_CREATE_RSP = 1, + NAN_DATAPATH_INF_DELETE_REQ = 2, + NAN_DATAPATH_INF_DELETE_RSP = 3, + NDP_INITIATOR_REQ = 4, + NDP_INITIATOR_RSP = 5, + NDP_RESPONDER_REQ = 6, + NDP_RESPONDER_RSP = 7, + NDP_END_REQ = 8, + NDP_END_RSP = 9, + NDP_INDICATION = 10, + NDP_CONFIRM = 11, + NDP_END_IND = 12, + NDP_NEW_PEER = 13, + NDP_PEER_DEPARTED = 14, + NDP_SCHEDULE_UPDATE = 15, +}; + +/** + * enum nan_datapath_status_type - NDP status type + * @NAN_DATAPATH_RSP_STATUS_SUCCESS: request was successful + * @NAN_DATAPATH_RSP_STATUS_ERROR: request failed + */ +enum nan_datapath_status_type { + NAN_DATAPATH_RSP_STATUS_SUCCESS = 0x00, + NAN_DATAPATH_RSP_STATUS_ERROR = 0x01, +}; + +/** + * enum nan_datapath_reason_code - NDP command rsp reason code value + * @NDP_UNSUPPORTED_CONCURRENCY: Will be used in unsupported concurrency cases + * @NDP_NAN_DATA_IFACE_CREATE_FAILED: ndi create failed + * @NDP_NAN_DATA_IFACE_DELETE_FAILED: ndi delete failed + * @NDP_DATA_INITIATOR_REQ_FAILED: data initiator request failed + * @NDP_DATA_RESPONDER_REQ_FAILED: data responder request failed + * @NDP_INVALID_SERVICE_INSTANCE_ID: invalid service instance id + * @NDP_INVALID_NDP_INSTANCE_ID: invalid ndp instance id + * @NDP_INVALID_RSP_CODE: invalid response code in ndp responder request + * @NDP_INVALID_APP_INFO_LEN: invalid app info length + * @NDP_NMF_REQ_FAIL: OTA nan mgmt frame failure for data request + * @NDP_NMF_RSP_FAIL: OTA nan mgmt frame failure for data response + * @NDP_NMF_CNF_FAIL: OTA nan mgmt frame failure for confirm + * @NDP_END_FAILED: ndp end failed + * @NDP_NMF_END_REQ_FAIL: OTA nan mgmt frame failure for data end + * @NDP_VENDOR_SPECIFIC_ERROR: other vendor specific failures + */ +enum nan_datapath_reason_code { + NAN_DATAPATH_UNSUPPORTED_CONCURRENCY = 9000, + NAN_DATAPATH_NAN_DATA_IFACE_CREATE_FAILED = 9001, + NAN_DATAPATH_NAN_DATA_IFACE_DELETE_FAILED = 9002, + NAN_DATAPATH_DATA_INITIATOR_REQ_FAILED = 9003, + NAN_DATAPATH_DATA_RESPONDER_REQ_FAILED = 9004, + NAN_DATAPATH_INVALID_SERVICE_INSTANCE_ID = 9005, + NAN_DATAPATH_INVALID_NDP_INSTANCE_ID = 9006, + NAN_DATAPATH_INVALID_RSP_CODE = 9007, + NAN_DATAPATH_INVALID_APP_INFO_LEN = 9008, + NAN_DATAPATH_NMF_REQ_FAIL = 9009, + NAN_DATAPATH_NMF_RSP_FAIL = 9010, + NAN_DATAPATH_NMF_CNF_FAIL = 9011, + NAN_DATAPATH_END_FAILED = 9012, + NAN_DATAPATH_NMF_END_REQ_FAIL = 9013, + /* 9500 onwards vendor specific error codes */ + NAN_DATAPATH_VENDOR_SPECIFIC_ERROR = 9500, +}; + +/** + * enum nan_datapath_response_code - responder's response code to nan data path + * request + * @NAN_DATAPATH_RESPONSE_ACCEPT: ndp request accepted + * @NAN_DATAPATH_RESPONSE_REJECT: ndp request rejected + * @NAN_DATAPATH_RESPONSE_DEFER: ndp request deferred until later (response to + * follow any time later) + * + */ +enum nan_datapath_response_code { + NAN_DATAPATH_RESPONSE_ACCEPT = 0, + NAN_DATAPATH_RESPONSE_REJECT = 1, + NAN_DATAPATH_RESPONSE_DEFER = 2, +}; + +/** + * enum nan_datapath_accept_policy - nan data path accept policy + * @NAN_DATAPATH_ACCEPT_POLICY_NONE: the framework will decide the policy + * @NAN_DATAPATH_ACCEPT_POLICY_ALL: accept policy offloaded to fw + * + */ +enum nan_datapath_accept_policy { + NAN_DATAPATH_ACCEPT_POLICY_NONE = 0, + NAN_DATAPATH_ACCEPT_POLICY_ALL = 1, +}; + +/** + * enum nan_datapath_self_role - nan data path role + * @NAN_DATAPATH_ROLE_INITIATOR: initiator of nan data path request + * @NAN_DATAPATH_ROLE_RESPONDER: responder to nan data path request + * + */ +enum nan_datapath_self_role { + NAN_DATAPATH_ROLE_INITIATOR = 0, + NAN_DATAPATH_ROLE_RESPONDER = 1, +}; + +/** + * enum nan_datapath_end_type - NDP end type + * @NAN_DATAPATH_END_TYPE_UNSPECIFIED: type is unspecified + * @NAN_DATAPATH_END_TYPE_PEER_UNAVAILABLE: type is peer unavailable + * @NAN_DATAPATH_END_TYPE_OTA_FRAME: NDP end frame received from peer + * + */ +enum nan_datapath_end_type { + NAN_DATAPATH_END_TYPE_UNSPECIFIED = 0x00, + NAN_DATAPATH_END_TYPE_PEER_UNAVAILABLE = 0x01, + NAN_DATAPATH_END_TYPE_OTA_FRAME = 0x02, +}; + +/** + * enum nan_datapath_end_reason_code - NDP end reason code + * @NAN_DATAPATH_END_REASON_UNSPECIFIED: reason is unspecified + * @NAN_DATAPATH_END_REASON_INACTIVITY: reason is peer inactivity + * @NAN_DATAPATH_END_REASON_PEER_DATA_END: data end indication received from + * peer + * + */ +enum nan_datapath_end_reason_code { + NAN_DATAPATH_END_REASON_UNSPECIFIED = 0x00, + NAN_DATAPATH_END_REASON_INACTIVITY = 0x01, + NAN_DATAPATH_END_REASON_PEER_DATA_END = 0x02, +}; + +/** enum nan_datapath_state - NAN datapath states + * @NAN_DATA_NDI_CREATING_STATE: NDI create is in progress + * @NAN_DATA_NDI_CREATED_STATE: NDI successfully crated + * @NAN_DATA_NDI_DELETING_STATE: NDI delete is in progress + * @NAN_DATA_NDI_DELETED_STATE: NDI delete is in progress + * @NAN_DATA_PEER_CREATE_STATE: Peer create is in progress + * @NAN_DATA_PEER_DELETE_STATE: Peer delete is in progrss + * @NAN_DATA_CONNECTING_STATE: Data connection in progress + * @NAN_DATA_CONNECTED_STATE: Data connection successful + * @NAN_DATA_END_STATE: NDP end is in progress + * @NAN_DATA_DISCONNECTED_STATE: NDP is in disconnected state + */ +enum nan_datapath_state { + NAN_DATA_INVALID_STATE = -1, + NAN_DATA_NDI_CREATING_STATE = 0, + NAN_DATA_NDI_CREATED_STATE = 1, + NAN_DATA_NDI_DELETING_STATE = 2, + NAN_DATA_NDI_DELETED_STATE = 3, + NAN_DATA_PEER_CREATE_STATE = 4, + NAN_DATA_PEER_DELETE_STATE = 5, + NAN_DATA_CONNECTING_STATE = 6, + NAN_DATA_CONNECTED_STATE = 7, + NAN_DATA_END_STATE = 8, + NAN_DATA_DISCONNECTED_STATE = 9, +}; + +/** + * struct nan_datapath_app_info - application info shared during ndp setup + * @ndp_app_info_len: ndp app info length + * @ndp_app_info: variable length application information + * + */ +struct nan_datapath_app_info { + uint32_t ndp_app_info_len; + uint8_t ndp_app_info[NDP_APP_INFO_LEN]; +}; + +/** + * struct nan_datapath_cfg - ndp configuration + * @ndp_cfg_len: ndp configuration length + * @ndp_cfg: variable length ndp configuration + * + */ +struct nan_datapath_cfg { + uint32_t ndp_cfg_len; + uint8_t ndp_cfg[NDP_QOS_INFO_LEN]; +}; + +/** + * struct nan_datapath_pmk - structure to hold pairwise master key + * @pmk_len: length of pairwise master key + * @pmk: buffer containing pairwise master key + * + */ +struct nan_datapath_pmk { + uint32_t pmk_len; + uint8_t pmk[NDP_PMK_LEN]; +}; + +/** + * struct nan_datapath_scid - structure to hold sceurity context identifier + * @scid_len: length of scid + * @scid: scid + * + */ +struct nan_datapath_scid { + uint32_t scid_len; + uint8_t scid[NDP_SCID_BUF_LEN]; +}; + +/** + * struct ndp_passphrase - structure to hold passphrase + * @passphrase_len: length of passphrase + * @passphrase: buffer containing passphrase + * + */ +struct ndp_passphrase { + uint32_t passphrase_len; + uint8_t passphrase[NAN_PASSPHRASE_MAX_LEN]; +}; + +/** + * struct ndp_service_name - structure to hold service_name + * @service_name_len: length of service_name + * @service_name: buffer containing service_name + * + */ +struct ndp_service_name { + uint32_t service_name_len; + uint8_t service_name[NAN_MAX_SERVICE_NAME_LEN]; +}; + +/** + * struct peer_nan_datapath_map - mapping of NDP instances to peer to VDEV + * @vdev_id: session id of the interface over which ndp is being created + * @peer_ndi_mac_addr: peer NDI mac address + * @num_active_ndp_sessions: number of active NDP sessions on the peer + * @type: NDP end indication type + * @reason_code: NDP end indication reason code + * @ndp_instance_id: NDP instance ID + * + */ +struct peer_nan_datapath_map { + uint32_t vdev_id; + struct qdf_mac_addr peer_ndi_mac_addr; + uint32_t num_active_ndp_sessions; + enum nan_datapath_end_type type; + enum nan_datapath_end_reason_code reason_code; + uint32_t ndp_instance_id; +}; + +/** + * struct nan_datapath_channel_info - ndp channel and channel bandwidth + * @channel: channel freq in mhz of the ndp connection + * @ch_width: channel width (wmi_channel_width) of the ndp connection + * @nss: nss used for ndp connection + * + */ +struct nan_datapath_channel_info { + uint32_t channel; + uint32_t ch_width; + uint32_t nss; +}; + +#define NAN_CH_INFO_MAX_LEN \ + (NAN_CH_INFO_MAX_CHANNELS * sizeof(struct nan_datapath_channel_info)) + +/** + * struct nan_datapath_inf_create_req - ndi create request params + * @transaction_id: unique identifier + * @iface_name: interface name + * + */ +struct nan_datapath_inf_create_req { + uint32_t transaction_id; + char iface_name[IFACE_NAME_SIZE]; +}; + +/* + * struct nan_datapath_inf_create_rsp - ndi create response params + * @status: request status + * @reason: reason if any + * + */ +struct nan_datapath_inf_create_rsp { + uint32_t status; + uint32_t reason; + uint8_t sta_id; +}; + +/** + * struct nan_datapath_inf_delete_rsp - ndi delete response params + * @status: request status + * @reason: reason if any + * + */ +struct nan_datapath_inf_delete_rsp { + uint32_t status; + uint32_t reason; +}; + +/** + * struct nan_datapath_initiator_req - ndp initiator request params + * @vdev: pointer to vdev object + * @transaction_id: unique identifier + * @channel: suggested channel for ndp creation + * @channel_cfg: channel config, 0=no channel, 1=optional, 2=mandatory + * @service_instance_id: Service identifier + * @peer_discovery_mac_addr: Peer's discovery mac address + * @self_ndi_mac_addr: self NDI mac address + * @ndp_config: ndp configuration params + * @ndp_info: ndp application info + * @ncs_sk_type: indicates NCS_SK_128 or NCS_SK_256 + * @pmk: pairwise master key + * @passphrase: passphrase + * @service_name: service name + * @is_ipv6_addr_present: indicates if following ipv6 address is valid + * @ipv6_addr: ipv6 address address used by ndp + */ +struct nan_datapath_initiator_req { + struct wlan_objmgr_vdev *vdev; + uint32_t transaction_id; + uint32_t channel; + uint32_t channel_cfg; + uint32_t service_instance_id; + uint32_t ncs_sk_type; + struct qdf_mac_addr peer_discovery_mac_addr; + struct qdf_mac_addr self_ndi_mac_addr; + struct nan_datapath_cfg ndp_config; + struct nan_datapath_app_info ndp_info; + struct nan_datapath_pmk pmk; + struct ndp_passphrase passphrase; + struct ndp_service_name service_name; + bool is_ipv6_addr_present; + uint8_t ipv6_addr[QDF_IPV6_ADDR_SIZE]; +}; + +/** + * struct nan_datapath_initiator_rsp - response event from FW + * @vdev: pointer to vdev object + * @transaction_id: unique identifier + * @ndp_instance_id: locally created NDP instance ID + * @status: status of the ndp request + * @reason: reason for failure if any + * + */ +struct nan_datapath_initiator_rsp { + struct wlan_objmgr_vdev *vdev; + uint32_t transaction_id; + uint32_t ndp_instance_id; + uint32_t status; + uint32_t reason; +}; + +/** + * struct nan_datapath_responder_req - responder's response to ndp create + * request + * @vdev: pointer to vdev object + * @transaction_id: unique identifier + * @ndp_instance_id: locally created NDP instance ID + * @ndp_rsp: response to the ndp create request + * @ndp_config: ndp configuration params + * @ndp_info: ndp application info + * @pmk: pairwise master key + * @ncs_sk_type: indicates NCS_SK_128 or NCS_SK_256 + * @passphrase: passphrase + * @service_name: service name + * @is_ipv6_addr_present: indicates if following ipv6 address is valid + * @ipv6_addr: ipv6 address address used by ndp + * @is_port_present: indicates if following port is valid + * @port: port specified by for this NDP + * @is_protocol_present: indicates if following protocol is valid + * @protocol: protocol used by this NDP + * + */ +struct nan_datapath_responder_req { + struct wlan_objmgr_vdev *vdev; + uint32_t transaction_id; + uint32_t ndp_instance_id; + enum nan_datapath_response_code ndp_rsp; + struct nan_datapath_cfg ndp_config; + struct nan_datapath_app_info ndp_info; + struct nan_datapath_pmk pmk; + uint32_t ncs_sk_type; + struct ndp_passphrase passphrase; + struct ndp_service_name service_name; + bool is_ipv6_addr_present; + uint8_t ipv6_addr[QDF_IPV6_ADDR_SIZE]; + bool is_port_present; + uint16_t port; + bool is_protocol_present; + uint8_t protocol; +}; + +/** + * struct nan_datapath_responder_rsp - response to responder's request + * @vdev: pointer to vdev object + * @transaction_id: unique identifier + * @status: command status + * @reason: reason for failure if any + * @peer_mac_addr: Peer's mac address + * @create_peer: Flag to indicate to create peer + */ +struct nan_datapath_responder_rsp { + struct wlan_objmgr_vdev *vdev; + uint32_t transaction_id; + uint32_t status; + uint32_t reason; + struct qdf_mac_addr peer_mac_addr; + bool create_peer; +}; + +/** + * struct nan_datapath_end_req - ndp end request + * @vdev: pointer to vdev object + * @transaction_id: unique transaction identifier + * @num_ndp_instances: number of ndp instances to be terminated + * @ndp_ids: array of ndp_instance_id to be terminated + * + */ +struct nan_datapath_end_req { + struct wlan_objmgr_vdev *vdev; + uint32_t transaction_id; + uint32_t num_ndp_instances; + uint32_t ndp_ids[NDP_NUM_INSTANCE_ID]; +}; + +/** + * struct nan_datapath_end_rsp_event - firmware response to ndp end request + * @vdev: pointer to vdev object + * @transaction_id: unique identifier for the request + * @status: status of operation + * @reason: reason(opaque to host driver) + * + */ +struct nan_datapath_end_rsp_event { + struct wlan_objmgr_vdev *vdev; + uint32_t transaction_id; + uint32_t status; + uint32_t reason; +}; + +/** + * struct nan_datapath_end_indication_event - ndp termination notification from + * FW + * @vdev: pointer to vdev object + * @num_ndp_ids: number of NDP ids + * @ndp_map: mapping of NDP instances to peer and vdev + * + */ +struct nan_datapath_end_indication_event { + struct wlan_objmgr_vdev *vdev; + uint32_t num_ndp_ids; + struct peer_nan_datapath_map ndp_map[]; +}; + +/** + * struct nan_datapath_confirm_event - ndp confirmation event from FW + * @vdev: pointer to vdev object + * @ndp_instance_id: ndp instance id for which confirm is being generated + * @reason_code : reason code(opaque to driver) + * @num_active_ndps_on_peer: number of ndp instances on peer + * @peer_ndi_mac_addr: peer NDI mac address + * @rsp_code: ndp response code + * @num_channels: num channels + * @ch: channel info struct array + * @ndp_info: ndp application info + * @is_ipv6_addr_present: indicates if following ipv6 address is valid + * @ipv6_addr: ipv6 address address used by ndp + * @is_port_present: indicates if following port is valid + * @port: port specified by for this NDP + * @is_protocol_present: indicates if following protocol is valid + * @protocol: protocol used by this NDP + * + */ +struct nan_datapath_confirm_event { + struct wlan_objmgr_vdev *vdev; + uint32_t ndp_instance_id; + uint32_t reason_code; + uint32_t num_active_ndps_on_peer; + struct qdf_mac_addr peer_ndi_mac_addr; + enum nan_datapath_response_code rsp_code; + uint32_t num_channels; + struct nan_datapath_channel_info ch[NAN_CH_INFO_MAX_CHANNELS]; + struct nan_datapath_app_info ndp_info; + bool is_ipv6_addr_present; + uint8_t ipv6_addr[QDF_IPV6_ADDR_SIZE]; + bool is_port_present; + uint16_t port; + bool is_protocol_present; + uint8_t protocol; +}; + +/** + * struct nan_datapath_indication_event - create ndp indication on the responder + * @vdev: pointer to vdev object + * @service_instance_id: Service identifier + * @peer_discovery_mac_addr: Peer's discovery mac address + * @peer_mac_addr: Peer's NDI mac address + * @ndp_initiator_mac_addr: NDI mac address of the peer initiating NDP + * @ndp_instance_id: locally created NDP instance ID + * @role: self role for NDP + * @ndp_accept_policy: accept policy configured by the upper layer + * @ndp_config: ndp configuration params + * @ndp_info: ndp application info + * @ncs_sk_type: indicates NCS_SK_128 or NCS_SK_256 + * @scid: security context identifier + * @is_ipv6_addr_present: indicates if following ipv6 address is valid + * @ipv6_addr: ipv6 address address used by ndp + * + */ +struct nan_datapath_indication_event { + struct wlan_objmgr_vdev *vdev; + uint32_t service_instance_id; + struct qdf_mac_addr peer_discovery_mac_addr; + struct qdf_mac_addr peer_mac_addr; + uint32_t ndp_instance_id; + enum nan_datapath_self_role role; + enum nan_datapath_accept_policy policy; + struct nan_datapath_cfg ndp_config; + struct nan_datapath_app_info ndp_info; + uint32_t ncs_sk_type; + struct nan_datapath_scid scid; + bool is_ipv6_addr_present; + uint8_t ipv6_addr[QDF_IPV6_ADDR_SIZE]; +}; + +/** + * struct nan_datapath_peer_ind - ndp peer indication + * @peer_mac_addr: peer mac address + * @sta_id: station id + * + */ +struct nan_datapath_peer_ind { + struct qdf_mac_addr peer_mac_addr; + uint16_t sta_id; +}; + +/** + * struct nan_datapath_sch_update_event - ndp schedule update indication + * @vdev: vdev schedule update was received + * @peer_addr: peer for which schedule update was received + * @flags: reason for sch update (opaque to driver) + * @num_channels: num of channels + * @num_ndp_instances: num of ndp instances + * @ch: channel info array + * @ndp_instances: array of ndp instances + * + */ +struct nan_datapath_sch_update_event { + struct wlan_objmgr_vdev *vdev; + struct qdf_mac_addr peer_addr; + uint32_t flags; + uint32_t num_channels; + uint32_t num_ndp_instances; + struct nan_datapath_channel_info ch[NAN_CH_INFO_MAX_CHANNELS]; + uint32_t ndp_instances[NDP_NUM_INSTANCE_ID]; +}; + +/** + * struct nan_callbacks - struct containing callback to non-converged driver + * + */ +struct nan_callbacks { + /* callback to os_if layer from umac */ + void (*os_if_event_handler)(struct wlan_objmgr_psoc *psoc, + struct wlan_objmgr_vdev *vdev, + uint32_t type, void *msg); + + int (*ndi_open)(char *iface_name); + int (*ndi_start)(char *iface_name, uint16_t); + void (*ndi_close)(uint8_t); + int (*ndi_delete)(uint8_t, char *iface_name, uint16_t transaction_id); + void (*drv_ndi_create_rsp_handler)(uint8_t, + struct nan_datapath_inf_create_rsp *); + void (*drv_ndi_delete_rsp_handler)(uint8_t); + + int (*new_peer_ind)(uint8_t, uint16_t, struct qdf_mac_addr *, bool); + int (*get_peer_idx)(uint8_t, struct qdf_mac_addr *); + QDF_STATUS (*add_ndi_peer)(uint32_t, struct qdf_mac_addr); + + void (*peer_departed_ind)(uint8_t, uint16_t, struct qdf_mac_addr *, + bool); + void (*ndp_delete_peers)(struct peer_nan_datapath_map*, uint8_t); + void (*delete_peers_by_addr)(uint8_t, struct qdf_mac_addr); +}; + +#endif +#endif /* WLAN_FEATURE_NAN_CONVERGENCE */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/nan/core/inc/wlan_nan_api.h b/drivers/staging/qca-wifi-host-cmn/umac/nan/core/inc/wlan_nan_api.h new file mode 100644 index 0000000000000000000000000000000000000000..1498f23c37e1aac0a71026f66fe29cb40cf3b0d2 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/nan/core/inc/wlan_nan_api.h @@ -0,0 +1,60 @@ +/* + * Copyright (c) 2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: contains nan definitions exposed to other modules + */ + +#ifndef _WLAN_NAN_API_H_ +#define _WLAN_NAN_API_H_ + +#include "qdf_status.h" + +struct wlan_objmgr_psoc; + +/** + * nan_init: initializes NAN component, called by dispatcher init + * + * Return: status of operation + */ +QDF_STATUS nan_init(void); + +/** + * nan_deinit: de-initializes NAN component, called by dispatcher init + * + * Return: status of operation + */ +QDF_STATUS nan_deinit(void); + +/** + * nan_psoc_enable: psoc enable API for NANitioning component + * @psoc: pointer to PSOC + * + * Return: status of operation + */ +QDF_STATUS nan_psoc_enable(struct wlan_objmgr_psoc *psoc); + +/** + * nan_psoc_disable: psoc disable API for NANitioning component + * @psoc: pointer to PSOC + * + * Return: status of operation + */ +QDF_STATUS nan_psoc_disable(struct wlan_objmgr_psoc *psoc); + +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/nan/core/src/nan_api.c b/drivers/staging/qca-wifi-host-cmn/umac/nan/core/src/nan_api.c new file mode 100644 index 0000000000000000000000000000000000000000..1e388110c4d155078f3d2e12c7a2cbd96b39e09c --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/nan/core/src/nan_api.c @@ -0,0 +1,269 @@ +/* + * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: contains nan public API function definitions + */ + +#include "nan_main_i.h" +#include "wlan_nan_api.h" +#include "target_if_nan.h" +#include "nan_public_structs.h" +#include "wlan_objmgr_cmn.h" +#include "wlan_objmgr_global_obj.h" +#include "wlan_objmgr_psoc_obj.h" +#include "wlan_objmgr_pdev_obj.h" +#include "wlan_objmgr_vdev_obj.h" + +static QDF_STATUS nan_psoc_obj_created_notification( + struct wlan_objmgr_psoc *psoc, void *arg_list) +{ + QDF_STATUS status = QDF_STATUS_SUCCESS; + struct nan_psoc_priv_obj *nan_obj; + + nan_debug("nan_psoc_create_notif called"); + nan_obj = qdf_mem_malloc(sizeof(*nan_obj)); + if (!nan_obj) { + nan_alert("malloc failed for nan prv obj"); + return QDF_STATUS_E_NOMEM; + } + + qdf_spinlock_create(&nan_obj->lock); + status = wlan_objmgr_psoc_component_obj_attach(psoc, + WLAN_UMAC_COMP_NAN, nan_obj, + QDF_STATUS_SUCCESS); + if (QDF_IS_STATUS_ERROR(status)) { + nan_alert("obj attach with psoc failed"); + goto nan_psoc_notif_failed; + } + + return QDF_STATUS_SUCCESS; + +nan_psoc_notif_failed: + + qdf_spinlock_destroy(&nan_obj->lock); + qdf_mem_free(nan_obj); + return status; +} + +static QDF_STATUS nan_psoc_obj_destroyed_notification( + struct wlan_objmgr_psoc *psoc, void *arg_list) +{ + QDF_STATUS status = QDF_STATUS_SUCCESS; + struct nan_psoc_priv_obj *nan_obj = nan_get_psoc_priv_obj(psoc); + + nan_debug("nan_psoc_delete_notif called"); + if (!nan_obj) { + nan_err("nan_obj is NULL"); + return QDF_STATUS_E_FAULT; + } + + status = wlan_objmgr_psoc_component_obj_detach(psoc, + WLAN_UMAC_COMP_NAN, nan_obj); + if (QDF_IS_STATUS_ERROR(status)) + nan_err("nan_obj detach failed"); + + nan_debug("nan_obj deleted with status %d", status); + qdf_spinlock_destroy(&nan_obj->lock); + qdf_mem_free(nan_obj); + + return status; +} + +static QDF_STATUS nan_vdev_obj_created_notification( + struct wlan_objmgr_vdev *vdev, void *arg_list) +{ + struct nan_vdev_priv_obj *nan_obj; + QDF_STATUS status = QDF_STATUS_SUCCESS; + + nan_debug("nan_vdev_create_notif called"); + if (wlan_vdev_mlme_get_opmode(vdev) != QDF_NDI_MODE) { + nan_debug("not a ndi vdev. do nothing"); + return QDF_STATUS_SUCCESS; + } + + nan_obj = qdf_mem_malloc(sizeof(*nan_obj)); + if (!nan_obj) { + nan_err("malloc failed for nan prv obj"); + return QDF_STATUS_E_NOMEM; + } + + qdf_spinlock_create(&nan_obj->lock); + status = wlan_objmgr_vdev_component_obj_attach(vdev, + WLAN_UMAC_COMP_NAN, (void *)nan_obj, + QDF_STATUS_SUCCESS); + if (QDF_IS_STATUS_ERROR(status)) { + nan_alert("obj attach with vdev failed"); + goto nan_vdev_notif_failed; + } + + return QDF_STATUS_SUCCESS; + +nan_vdev_notif_failed: + + qdf_spinlock_destroy(&nan_obj->lock); + qdf_mem_free(nan_obj); + return status; +} + +static QDF_STATUS nan_vdev_obj_destroyed_notification( + struct wlan_objmgr_vdev *vdev, void *arg_list) +{ + struct nan_vdev_priv_obj *nan_obj; + QDF_STATUS status = QDF_STATUS_SUCCESS; + + nan_debug("nan_vdev_delete_notif called"); + if (wlan_vdev_mlme_get_opmode(vdev) != QDF_NDI_MODE) { + nan_debug("not a ndi vdev. do nothing"); + return QDF_STATUS_SUCCESS; + } + + nan_obj = nan_get_vdev_priv_obj(vdev); + if (!nan_obj) { + nan_err("nan_obj is NULL"); + return QDF_STATUS_E_FAULT; + } + + status = wlan_objmgr_vdev_component_obj_detach(vdev, + WLAN_UMAC_COMP_NAN, nan_obj); + if (QDF_IS_STATUS_ERROR(status)) + nan_err("nan_obj detach failed"); + + nan_debug("nan_obj deleted with status %d", status); + qdf_spinlock_destroy(&nan_obj->lock); + qdf_mem_free(nan_obj); + + return status; +} + +QDF_STATUS nan_init(void) +{ + QDF_STATUS status; + + /* register psoc create handler functions. */ + status = wlan_objmgr_register_psoc_create_handler( + WLAN_UMAC_COMP_NAN, + nan_psoc_obj_created_notification, + NULL); + if (QDF_IS_STATUS_ERROR(status)) { + nan_err("wlan_objmgr_register_psoc_create_handler failed"); + return status; + } + + /* register psoc delete handler functions. */ + status = wlan_objmgr_register_psoc_destroy_handler( + WLAN_UMAC_COMP_NAN, + nan_psoc_obj_destroyed_notification, + NULL); + if (QDF_IS_STATUS_ERROR(status)) { + nan_err("wlan_objmgr_register_psoc_destroy_handler failed"); + nan_deinit(); + return status; + } + + /* register vdev create handler functions. */ + status = wlan_objmgr_register_vdev_create_handler( + WLAN_UMAC_COMP_NAN, + nan_vdev_obj_created_notification, + NULL); + if (QDF_IS_STATUS_ERROR(status)) { + nan_err("wlan_objmgr_register_psoc_create_handler failed"); + nan_deinit(); + return status; + } + + /* register vdev delete handler functions. */ + status = wlan_objmgr_register_vdev_destroy_handler( + WLAN_UMAC_COMP_NAN, + nan_vdev_obj_destroyed_notification, + NULL); + if (QDF_IS_STATUS_ERROR(status)) { + nan_err("wlan_objmgr_register_psoc_destroy_handler failed"); + nan_deinit(); + return status; + } + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS nan_deinit(void) +{ + QDF_STATUS ret = QDF_STATUS_SUCCESS, status; + + /* register psoc create handler functions. */ + status = wlan_objmgr_unregister_psoc_create_handler( + WLAN_UMAC_COMP_NAN, + nan_psoc_obj_created_notification, + NULL); + if (QDF_IS_STATUS_ERROR(status)) { + nan_err("wlan_objmgr_unregister_psoc_create_handler failed"); + ret = status; + } + + /* register vdev create handler functions. */ + status = wlan_objmgr_unregister_psoc_destroy_handler( + WLAN_UMAC_COMP_NAN, + nan_psoc_obj_destroyed_notification, + NULL); + if (QDF_IS_STATUS_ERROR(status)) { + nan_err("wlan_objmgr_deregister_psoc_destroy_handler failed"); + ret = status; + } + + /* de-register vdev create handler functions. */ + status = wlan_objmgr_unregister_vdev_create_handler( + WLAN_UMAC_COMP_NAN, + nan_vdev_obj_created_notification, + NULL); + if (QDF_IS_STATUS_ERROR(status)) { + nan_err("wlan_objmgr_unregister_psoc_create_handler failed"); + ret = status; + } + + /* de-register vdev delete handler functions. */ + status = wlan_objmgr_unregister_vdev_destroy_handler( + WLAN_UMAC_COMP_NAN, + nan_vdev_obj_destroyed_notification, + NULL); + if (QDF_IS_STATUS_ERROR(status)) { + nan_err("wlan_objmgr_deregister_psoc_destroy_handler failed"); + ret = status; + } + + return ret; +} + +QDF_STATUS nan_psoc_enable(struct wlan_objmgr_psoc *psoc) +{ + QDF_STATUS status = target_if_nan_register_events(psoc); + + if (QDF_IS_STATUS_ERROR(status)) + nan_err("target_if_nan_register_events failed"); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS nan_psoc_disable(struct wlan_objmgr_psoc *psoc) +{ + QDF_STATUS status = target_if_nan_deregister_events(psoc); + + if (QDF_IS_STATUS_ERROR(status)) + nan_err("target_if_nan_deregister_events failed"); + + return QDF_STATUS_SUCCESS; +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/nan/core/src/nan_main.c b/drivers/staging/qca-wifi-host-cmn/umac/nan/core/src/nan_main.c new file mode 100644 index 0000000000000000000000000000000000000000..e759d307c3989bca25ceda1db1cbb377c3cdfed7 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/nan/core/src/nan_main.c @@ -0,0 +1,509 @@ +/* + * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: contains core nan function definitions + */ + +#include "nan_main_i.h" +#include "nan_ucfg_api.h" +#include "wlan_nan_api.h" +#include "target_if_nan.h" +#include "scheduler_api.h" +#include "wlan_serialization_api.h" +#include "wlan_objmgr_cmn.h" +#include "wlan_objmgr_global_obj.h" +#include "wlan_objmgr_psoc_obj.h" +#include "wlan_objmgr_pdev_obj.h" +#include "wlan_objmgr_vdev_obj.h" + +struct nan_vdev_priv_obj *nan_get_vdev_priv_obj( + struct wlan_objmgr_vdev *vdev) +{ + struct nan_vdev_priv_obj *obj; + + if (!vdev) { + nan_err("vdev is null"); + return NULL; + } + obj = wlan_objmgr_vdev_get_comp_private_obj(vdev, WLAN_UMAC_COMP_NAN); + + return obj; +} + +struct nan_psoc_priv_obj *nan_get_psoc_priv_obj( + struct wlan_objmgr_psoc *psoc) +{ + struct nan_psoc_priv_obj *obj; + + if (!psoc) { + nan_err("psoc is null"); + return NULL; + } + obj = wlan_objmgr_psoc_get_comp_private_obj(psoc, WLAN_UMAC_COMP_NAN); + + return obj; +} + +void nan_release_cmd(void *in_req, uint32_t cmdtype) +{ + struct wlan_objmgr_vdev *vdev = NULL; + + if (!in_req) + return; + + switch (cmdtype) { + case WLAN_SER_CMD_NDP_INIT_REQ: { + struct nan_datapath_initiator_req *req = in_req; + vdev = req->vdev; + break; + } + case WLAN_SER_CMD_NDP_RESP_REQ: { + struct nan_datapath_responder_req *req = in_req; + vdev = req->vdev; + break; + } + case WLAN_SER_CMD_NDP_DATA_END_INIT_REQ: { + struct nan_datapath_end_req *req = in_req; + vdev = req->vdev; + break; + } + default: + nan_err("invalid req type: %d", cmdtype); + break; + } + + if (vdev) + wlan_objmgr_vdev_release_ref(vdev, WLAN_NAN_ID); + else + nan_err("vdev is null"); + + qdf_mem_free(in_req); +} + +static void nan_req_incomplete(void *req, uint32_t cmdtype) +{ + /* send msg to userspace if needed that cmd got incomplete */ +} + +static void nan_req_activated(void *in_req, uint32_t cmdtype) +{ + uint32_t req_type; + struct wlan_objmgr_psoc *psoc; + struct wlan_objmgr_vdev *vdev; + struct wlan_lmac_if_nan_tx_ops *tx_ops; + + switch (cmdtype) { + case WLAN_SER_CMD_NDP_INIT_REQ: { + struct nan_datapath_initiator_req *req = in_req; + vdev = req->vdev; + req_type = NDP_INITIATOR_REQ; + break; + } + case WLAN_SER_CMD_NDP_RESP_REQ: { + struct nan_datapath_responder_req *req = in_req; + vdev = req->vdev; + req_type = NDP_RESPONDER_REQ; + break; + } + case WLAN_SER_CMD_NDP_DATA_END_INIT_REQ: { + struct nan_datapath_end_req *req = in_req; + vdev = req->vdev; + req_type = NDP_END_REQ; + break; + } + default: + nan_alert("in correct cmdtype: %d", cmdtype); + return; + } + + if (!vdev) { + nan_alert("vdev is null"); + return; + } + + psoc = wlan_vdev_get_psoc(vdev); + if (!psoc) { + nan_alert("psoc is null"); + return; + } + + tx_ops = target_if_nan_get_tx_ops(psoc); + if (!tx_ops) { + nan_alert("tx_ops is null"); + return; + } + + /* send ndp_intiator_req/responder_req/end_req to FW */ + tx_ops->nan_req_tx(in_req, req_type); +} + +static QDF_STATUS nan_serialized_cb(struct wlan_serialization_command *ser_cmd, + enum wlan_serialization_cb_reason reason) +{ + void *req; + + if (!ser_cmd || !ser_cmd->umac_cmd) { + nan_alert("cmd or umac_cmd is null"); + return QDF_STATUS_E_NULL_VALUE; + } + req = ser_cmd->umac_cmd; + + switch (reason) { + case WLAN_SER_CB_ACTIVATE_CMD: + nan_req_activated(req, ser_cmd->cmd_type); + break; + case WLAN_SER_CB_CANCEL_CMD: + case WLAN_SER_CB_ACTIVE_CMD_TIMEOUT: + nan_req_incomplete(req, ser_cmd->cmd_type); + break; + case WLAN_SER_CB_RELEASE_MEM_CMD: + nan_release_cmd(req, ser_cmd->cmd_type); + break; + default: + /* Do nothing but logging */ + nan_alert("invalid serialized cb reason: %d", reason); + break; + } + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS nan_scheduled_msg_handler(struct scheduler_msg *msg) +{ + enum wlan_serialization_status status = 0; + struct wlan_serialization_command cmd = {0}; + + if (!msg || !msg->bodyptr) { + nan_alert("msg or bodyptr is null"); + return QDF_STATUS_E_NULL_VALUE; + } + switch (msg->type) { + case NDP_INITIATOR_REQ: { + struct nan_datapath_initiator_req *req = msg->bodyptr; + cmd.cmd_type = WLAN_SER_CMD_NDP_INIT_REQ; + cmd.vdev = req->vdev; + break; + } + case NDP_RESPONDER_REQ: { + struct nan_datapath_responder_req *req = msg->bodyptr; + cmd.cmd_type = WLAN_SER_CMD_NDP_RESP_REQ; + cmd.vdev = req->vdev; + break; + } + case NDP_END_REQ: { + struct nan_datapath_end_req *req = msg->bodyptr; + cmd.cmd_type = WLAN_SER_CMD_NDP_DATA_END_INIT_REQ; + cmd.vdev = req->vdev; + break; + } + default: + nan_err("wrong request type: %d", msg->type); + return QDF_STATUS_E_INVAL; + } + + /* TBD - support more than one req of same type or avoid */ + cmd.cmd_id = 0; + cmd.cmd_cb = nan_serialized_cb; + cmd.umac_cmd = msg->bodyptr; + cmd.source = WLAN_UMAC_COMP_NAN; + cmd.is_high_priority = false; + cmd.cmd_timeout_duration = 30000 /* 30 sec for now. TBD */; + nan_debug("cmd_type: %d", cmd.cmd_type); + + status = wlan_serialization_request(&cmd); + /* following is TBD */ + if (status != WLAN_SER_CMD_ACTIVE && status != WLAN_SER_CMD_PENDING) { + nan_err("unable to serialize command"); + wlan_objmgr_vdev_release_ref(cmd.vdev, WLAN_NAN_ID); + return QDF_STATUS_E_INVAL; + } + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS nan_handle_confirm( + struct nan_datapath_confirm_event *confirm) +{ + uint8_t vdev_id; + struct wlan_objmgr_psoc *psoc; + struct nan_psoc_priv_obj *psoc_nan_obj; + + vdev_id = wlan_vdev_get_id(confirm->vdev); + psoc = wlan_vdev_get_psoc(confirm->vdev); + if (!psoc) { + nan_err("psoc is null"); + return QDF_STATUS_E_NULL_VALUE; + } + + psoc_nan_obj = nan_get_psoc_priv_obj(psoc); + if (!psoc_nan_obj) { + nan_err("psoc_nan_obj is null"); + return QDF_STATUS_E_NULL_VALUE; + } + + if (confirm->rsp_code != NAN_DATAPATH_RESPONSE_ACCEPT && + confirm->num_active_ndps_on_peer == 0) { + /* + * This peer was created at ndp_indication but + * confirm failed, so it needs to be deleted + */ + nan_err("NDP confirm with reject and no active ndp sessions. deleting peer: "QDF_MAC_ADDR_STR" on vdev_id: %d", + QDF_MAC_ADDR_ARRAY(confirm->peer_ndi_mac_addr.bytes), + vdev_id); + psoc_nan_obj->cb_obj.delete_peers_by_addr(vdev_id, + confirm->peer_ndi_mac_addr); + } + psoc_nan_obj->cb_obj.os_if_event_handler(psoc, confirm->vdev, + NDP_CONFIRM, confirm); + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS nan_handle_initiator_rsp( + struct nan_datapath_initiator_rsp *rsp, + struct wlan_objmgr_vdev **vdev) +{ + struct wlan_objmgr_psoc *psoc; + struct nan_psoc_priv_obj *psoc_nan_obj; + + *vdev = rsp->vdev; + psoc = wlan_vdev_get_psoc(rsp->vdev); + if (!psoc) { + nan_err("psoc is null"); + return QDF_STATUS_E_NULL_VALUE; + } + + psoc_nan_obj = nan_get_psoc_priv_obj(psoc); + if (!psoc_nan_obj) { + nan_err("psoc_nan_obj is null"); + return QDF_STATUS_E_NULL_VALUE; + } + + psoc_nan_obj->cb_obj.os_if_event_handler(psoc, rsp->vdev, + NDP_INITIATOR_RSP, rsp); + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS nan_handle_ndp_ind( + struct nan_datapath_indication_event *ndp_ind) +{ + uint8_t vdev_id; + struct wlan_objmgr_psoc *psoc; + QDF_STATUS status = QDF_STATUS_SUCCESS; + struct nan_psoc_priv_obj *psoc_nan_obj; + + vdev_id = wlan_vdev_get_id(ndp_ind->vdev); + psoc = wlan_vdev_get_psoc(ndp_ind->vdev); + if (!psoc) { + nan_err("psoc is null"); + return QDF_STATUS_E_NULL_VALUE; + } + + psoc_nan_obj = nan_get_psoc_priv_obj(psoc); + if (!psoc_nan_obj) { + nan_err("psoc_nan_obj is null"); + return QDF_STATUS_E_NULL_VALUE; + } + + nan_debug("role: %d, vdev: %d, csid: %d, peer_mac_addr " + QDF_MAC_ADDR_STR, + ndp_ind->role, vdev_id, ndp_ind->ncs_sk_type, + QDF_MAC_ADDR_ARRAY(ndp_ind->peer_mac_addr.bytes)); + + if ((ndp_ind->role == NAN_DATAPATH_ROLE_INITIATOR) || + ((NAN_DATAPATH_ROLE_RESPONDER == ndp_ind->role) && + (NAN_DATAPATH_ACCEPT_POLICY_ALL == ndp_ind->policy))) { + status = psoc_nan_obj->cb_obj.add_ndi_peer(vdev_id, + ndp_ind->peer_mac_addr); + if (QDF_IS_STATUS_ERROR(status)) { + nan_err("Couldn't add ndi peer, ndp_role: %d", + ndp_ind->role); + return status; + } + } + psoc_nan_obj->cb_obj.os_if_event_handler(psoc, ndp_ind->vdev, + NDP_INDICATION, ndp_ind); + + return status; +} + +static QDF_STATUS nan_handle_responder_rsp( + struct nan_datapath_responder_rsp *rsp, + struct wlan_objmgr_vdev **vdev) +{ + struct wlan_objmgr_psoc *psoc; + QDF_STATUS status = QDF_STATUS_SUCCESS; + struct nan_psoc_priv_obj *psoc_nan_obj; + + *vdev = rsp->vdev; + psoc = wlan_vdev_get_psoc(rsp->vdev); + if (!psoc) { + nan_err("psoc is null"); + return QDF_STATUS_E_NULL_VALUE; + } + + psoc_nan_obj = nan_get_psoc_priv_obj(psoc); + if (!psoc_nan_obj) { + nan_err("psoc_nan_obj is null"); + return QDF_STATUS_E_NULL_VALUE; + } + + if (QDF_IS_STATUS_SUCCESS(rsp->status) && rsp->create_peer == true) { + status = psoc_nan_obj->cb_obj.add_ndi_peer( + wlan_vdev_get_id(rsp->vdev), + rsp->peer_mac_addr); + if (QDF_IS_STATUS_ERROR(status)) { + nan_err("Couldn't add ndi peer"); + rsp->status = QDF_STATUS_E_FAILURE; + } + } + psoc_nan_obj->cb_obj.os_if_event_handler(psoc, rsp->vdev, + NDP_RESPONDER_RSP, rsp); + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS nan_handle_ndp_end_rsp( + struct nan_datapath_end_rsp_event *rsp, + struct wlan_objmgr_vdev **vdev) +{ + struct wlan_objmgr_psoc *psoc; + struct nan_psoc_priv_obj *psoc_nan_obj; + + *vdev = rsp->vdev; + psoc = wlan_vdev_get_psoc(rsp->vdev); + if (!psoc) { + nan_err("psoc is NULL"); + return QDF_STATUS_E_NULL_VALUE; + } + + psoc_nan_obj = nan_get_psoc_priv_obj(psoc); + if (!psoc_nan_obj) { + nan_err("psoc_nan_obj is NULL"); + return QDF_STATUS_E_NULL_VALUE; + } + + psoc_nan_obj->cb_obj.os_if_event_handler(psoc, rsp->vdev, + NDP_END_RSP, rsp); + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS nan_handle_end_ind( + struct nan_datapath_end_indication_event *ind) +{ + struct wlan_objmgr_psoc *psoc; + struct nan_psoc_priv_obj *psoc_nan_obj; + + psoc = wlan_vdev_get_psoc(ind->vdev); + if (!psoc) { + nan_err("psoc is NULL"); + return QDF_STATUS_E_NULL_VALUE; + } + + psoc_nan_obj = nan_get_psoc_priv_obj(psoc); + if (!psoc_nan_obj) { + nan_err("psoc_nan_obj is NULL"); + return QDF_STATUS_E_NULL_VALUE; + } + + psoc_nan_obj->cb_obj.ndp_delete_peers(ind->ndp_map, ind->num_ndp_ids); + psoc_nan_obj->cb_obj.os_if_event_handler(psoc, ind->vdev, + NDP_END_IND, ind); + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS nan_handle_schedule_update( + struct nan_datapath_sch_update_event *ind) +{ + struct wlan_objmgr_psoc *psoc; + struct nan_psoc_priv_obj *psoc_nan_obj; + + psoc = wlan_vdev_get_psoc(ind->vdev); + if (!psoc) { + nan_err("psoc is NULL"); + return QDF_STATUS_E_NULL_VALUE; + } + + psoc_nan_obj = nan_get_psoc_priv_obj(psoc); + if (!psoc_nan_obj) { + nan_err("psoc_nan_obj is NULL"); + return QDF_STATUS_E_NULL_VALUE; + } + + psoc_nan_obj->cb_obj.os_if_event_handler(psoc, ind->vdev, + NDP_SCHEDULE_UPDATE, ind); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS nan_event_handler(struct scheduler_msg *pe_msg) +{ + QDF_STATUS status = QDF_STATUS_SUCCESS; + struct wlan_serialization_queued_cmd_info cmd; + + cmd.requestor = WLAN_UMAC_COMP_NAN; + cmd.cmd_id = 0; + cmd.req_type = WLAN_SER_CANCEL_NON_SCAN_CMD; + cmd.queue_type = WLAN_SERIALIZATION_ACTIVE_QUEUE; + + if (!pe_msg->bodyptr) { + nan_err("msg body is null"); + return QDF_STATUS_E_NULL_VALUE; + } + + switch (pe_msg->type) { + case NDP_CONFIRM: { + nan_handle_confirm(pe_msg->bodyptr); + break; + } + case NDP_INITIATOR_RSP: { + nan_handle_initiator_rsp(pe_msg->bodyptr, &cmd.vdev); + cmd.cmd_type = WLAN_SER_CMD_NDP_INIT_REQ; + wlan_serialization_remove_cmd(&cmd); + break; + } + case NDP_INDICATION: { + nan_handle_ndp_ind(pe_msg->bodyptr); + break; + } + case NDP_RESPONDER_RSP: + nan_handle_responder_rsp(pe_msg->bodyptr, &cmd.vdev); + cmd.cmd_type = WLAN_SER_CMD_NDP_RESP_REQ; + wlan_serialization_remove_cmd(&cmd); + break; + case NDP_END_RSP: + nan_handle_ndp_end_rsp(pe_msg->bodyptr, &cmd.vdev); + cmd.cmd_type = WLAN_SER_CMD_NDP_DATA_END_INIT_REQ; + wlan_serialization_remove_cmd(&cmd); + break; + case NDP_END_IND: + nan_handle_end_ind(pe_msg->bodyptr); + break; + case NDP_SCHEDULE_UPDATE: + nan_handle_schedule_update(pe_msg->bodyptr); + break; + default: + nan_alert("Unhandled NDP event: %d", pe_msg->type); + status = QDF_STATUS_E_NOSUPPORT; + break; + } + return status; +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/nan/core/src/nan_main_i.h b/drivers/staging/qca-wifi-host-cmn/umac/nan/core/src/nan_main_i.h new file mode 100644 index 0000000000000000000000000000000000000000..7d2d770ce37ca4bef68d7af1919110ffac2928e2 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/nan/core/src/nan_main_i.h @@ -0,0 +1,133 @@ +/* + * Copyright (c) 2012-2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: contains declaration of common utility APIs and private structs to be + * used in NAN modules + */ + +#ifndef _WLAN_NAN_MAIN_I_H_ +#define _WLAN_NAN_MAIN_I_H_ + +#include "qdf_types.h" +#include "qdf_status.h" +#include "nan_public_structs.h" +#include "wlan_objmgr_cmn.h" + +struct wlan_objmgr_vdev; +struct wlan_objmgr_psoc; +struct scheduler_msg; + +#define nan_log(level, args...) \ + QDF_TRACE(QDF_MODULE_ID_NAN, level, ## args) +#define nan_logfl(level, format, args...) \ + nan_log(level, FL(format), ## args) + +#define nan_alert(format, args...) \ + nan_logfl(QDF_TRACE_LEVEL_FATAL, format, ## args) +#define nan_err(format, args...) \ + nan_logfl(QDF_TRACE_LEVEL_ERROR, format, ## args) +#define nan_warn(format, args...) \ + nan_logfl(QDF_TRACE_LEVEL_WARN, format, ## args) +#define nan_notice(format, args...) \ + nan_logfl(QDF_TRACE_LEVEL_INFO, format, ## args) +#define nan_info(format, args...) \ + nan_logfl(QDF_TRACE_LEVEL_INFO_HIGH, format, ## args) +#define nan_debug(format, args...) \ + nan_logfl(QDF_TRACE_LEVEL_DEBUG, format, ## args) + +#ifndef MAX_PEERS +#define MAX_PEERS 32 +#endif + +/** + * struct nan_psoc_priv_obj - nan private psoc obj + * @lock: lock to be acquired before reading or writing to object + * @cb_obj: struct contaning callback pointers + */ +struct nan_psoc_priv_obj { + qdf_spinlock_t lock; + struct nan_callbacks cb_obj; +}; + +/** + * struct nan_vdev_priv_obj - nan private vdev obj + * @lock: lock to be acquired before reading or writing to object + * @state: Current state of NDP + * @active_ndp_sessions: active ndp sessions per adapter + * @active_ndp_peers: number of active ndp peers + * @ndp_create_transaction_id: transaction id for create req + * @ndp_delete_transaction_id: transaction id for delete req + * @ndi_delete_rsp_reason: reason code for ndi_delete rsp + * @ndi_delete_rsp_status: status for ndi_delete rsp + */ +struct nan_vdev_priv_obj { + qdf_spinlock_t lock; + enum nan_datapath_state state; + /* idx in following array should follow conn_info.peerMacAddress */ + uint32_t active_ndp_sessions[MAX_PEERS]; + uint32_t active_ndp_peers; + uint16_t ndp_create_transaction_id; + uint16_t ndp_delete_transaction_id; + uint32_t ndi_delete_rsp_reason; + uint32_t ndi_delete_rsp_status; +}; + +/** + * nan_get_vdev_priv_obj: get NAN priv object from vdev object + * @vdev: pointer to vdev object + * + * Return: pointer to NAN vdev private object + */ +struct nan_vdev_priv_obj *nan_get_vdev_priv_obj(struct wlan_objmgr_vdev *vdev); + +/** + * nan_get_psoc_priv_obj: get NAN priv object from psoc object + * @psoc: pointer to psoc object + * + * Return: pointer to NAN psoc private object + */ +struct nan_psoc_priv_obj *nan_get_psoc_priv_obj(struct wlan_objmgr_psoc *psoc); + +/** + * nan_release_cmd: frees resources for NAN command. + * @in_req: pointer to msg buffer to be freed + * @req_type: type of request + * + * Return: None + */ +void nan_release_cmd(void *in_req, uint32_t req_type); + +/** + * nan_scheduled_msg_handler: callback pointer to be called when scheduler + * starts executing enqueued NAN command. + * @msg: pointer to msg + * + * Return: status of operation + */ +QDF_STATUS nan_scheduled_msg_handler(struct scheduler_msg *msg); + +/* + * nan_event_handler: function to process events from firmware + * @msg: message received from lmac + * + * Return: status of operation + */ +QDF_STATUS nan_event_handler(struct scheduler_msg *msg); + +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/nan/core/src/nan_utils.c b/drivers/staging/qca-wifi-host-cmn/umac/nan/core/src/nan_utils.c new file mode 100644 index 0000000000000000000000000000000000000000..69b8daf1875edb2299cb40e3e9b25faa5be6ef87 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/nan/core/src/nan_utils.c @@ -0,0 +1,22 @@ +/* + * Copyright (c) 2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: contains nan utility functions + */ + diff --git a/drivers/staging/qca-wifi-host-cmn/umac/nan/dispatcher/inc/nan_ucfg_api.h b/drivers/staging/qca-wifi-host-cmn/umac/nan/dispatcher/inc/nan_ucfg_api.h new file mode 100644 index 0000000000000000000000000000000000000000..2c24a483760c30b7efcc676119e0e41d1ca31c2e --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/nan/dispatcher/inc/nan_ucfg_api.h @@ -0,0 +1,239 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: contains interface prototypes for OS_IF layer + */ + +#ifndef _NAN_UCFG_API_H_ +#define _NAN_UCFG_API_H_ + +#include "qdf_types.h" +#include "qdf_status.h" +#include "wlan_objmgr_cmn.h" + +struct nan_callbacks; +struct wlan_objmgr_vdev; +struct wlan_objmgr_psoc; +struct wlan_objmgr_vdev; +struct nan_callbacks; + +/** + * ucfg_nan_set_ndi_state: set ndi state + * @vdev: pointer to vdev object + * @state: value to set + * + * Return: status of operation + */ +QDF_STATUS ucfg_nan_set_ndi_state(struct wlan_objmgr_vdev *vdev, + uint32_t state); + +/** + * ucfg_nan_get_ndi_state: get ndi state from vdev obj + * @vdev: pointer to vdev object + * + * Return: ndi state + */ +enum nan_datapath_state ucfg_nan_get_ndi_state(struct wlan_objmgr_vdev *vdev); + +/** + * ucfg_nan_set_active_peers: set active ndi peer + * @vdev: pointer to vdev object + * @val: value to set + * + * Return: status of operation + */ +QDF_STATUS ucfg_nan_set_active_peers(struct wlan_objmgr_vdev *vdev, + uint32_t val); + +/** + * ucfg_nan_get_active_peers: get active ndi peer from vdev obj + * @vdev: pointer to vdev object + * + * Return: active ndi peer + */ +uint32_t ucfg_nan_get_active_peers(struct wlan_objmgr_vdev *vdev); + +/** + * ucfg_nan_set_active_ndp_sessions: set active ndp sessions + * @vdev: pointer to vdev object + * + * Return: status of operation + */ +QDF_STATUS ucfg_nan_set_active_ndp_sessions(struct wlan_objmgr_vdev *vdev, + uint32_t val, uint8_t idx); + +/** + * ucfg_nan_get_active_ndp_sessions: get active ndp sessions from vdev obj + * @vdev: pointer to vdev object + * + * Return: pointer to NAN psoc private object + */ +uint32_t ucfg_nan_get_active_ndp_sessions(struct wlan_objmgr_vdev *vdev, + uint8_t idx); + +/** + * ucfg_nan_set_ndp_create_transaction_id: set ndp create transaction id + * @vdev: pointer to vdev object + * @val: value to set + * + * Return: status of operation + */ +QDF_STATUS ucfg_nan_set_ndp_create_transaction_id(struct wlan_objmgr_vdev *vdev, + uint16_t val); + +/** + * ucfg_nan_get_ndp_create_transaction_id: get ndp create transaction id + * vdev obj + * @vdev: pointer to vdev object + * + * Return: ndp create transaction_id + */ +uint16_t ucfg_nan_get_ndp_create_transaction_id(struct wlan_objmgr_vdev *vdev); + +/** + * ucfg_nan_set_ndp_delete_transaction_id: set ndp delete transaction id + * @vdev: pointer to vdev object + * @val: value to set + * + * Return: status of operation + */ +QDF_STATUS ucfg_nan_set_ndp_delete_transaction_id(struct wlan_objmgr_vdev *vdev, + uint16_t val); + +/** + * ucfg_nan_get_ndp_delete_transaction_id: get ndp delete transaction id from + * vdev obj + * @vdev: pointer to vdev object + * + * Return: ndp delete transaction_id + */ +uint16_t ucfg_nan_get_ndp_delete_transaction_id(struct wlan_objmgr_vdev *vdev); + +/** + * ucfg_nan_set_ndi_delete_rsp_reason: set ndi delete response reason + * @vdev: pointer to vdev object + * @val: value to set + * + * Return: status of operation + */ +QDF_STATUS ucfg_nan_set_ndi_delete_rsp_reason(struct wlan_objmgr_vdev *vdev, + uint32_t val); + +/** + * ucfg_nan_get_ndi_delete_rsp_reason: get ndi delete response reason from vdev + * obj + * @vdev: pointer to vdev object + * + * Return: ndi delete rsp reason + */ +uint32_t ucfg_nan_get_ndi_delete_rsp_reason(struct wlan_objmgr_vdev *vdev); + +/** + * ucfg_nan_set_ndi_delete_rsp_status: set ndi delete response reason + * @vdev: pointer to vdev object + * @val: value to set + * + * Return: status of operation + */ +QDF_STATUS ucfg_nan_set_ndi_delete_rsp_status(struct wlan_objmgr_vdev *vdev, + uint32_t val); + +/** + * ucfg_nan_get_ndi_delete_rsp_status: get ndi delete response status from vdev + * obj + * @vdev: pointer to vdev object + * + * Return: ndi delete rsp status + */ +uint32_t ucfg_nan_get_ndi_delete_rsp_status(struct wlan_objmgr_vdev *vdev); + +/** + * ucfg_nan_get_callbacks: ucfg API to return callbacks + * @psoc: pointer to psoc object + * @cb_obj: callback struct to populate + * + * Return: callback struct on success, NULL otherwise + */ +QDF_STATUS ucfg_nan_get_callbacks(struct wlan_objmgr_psoc *psoc, + struct nan_callbacks *cb_obj); + +/** + * ucfg_nan_req_processor: ucfg API to be called from HDD/OS_IF to + * process nan datapath initiator request from userspace + * @vdev: nan vdev pointer + * @in_req: NDP request + * @psoc: pointer to psoc object + * @req_type: type of request + * + * Return: status of operation + */ +QDF_STATUS ucfg_nan_req_processor(struct wlan_objmgr_vdev *vdev, + void *in_req, uint32_t req_type); + +/** + * ucfg_nan_event_handler: ucfg API to be called from legacy code to + * post events to os_if/hdd layer + * @psoc: pointer to psoc object + * @vdev: pointer to vdev object + * @type: message type + * @msg: msg buffer + * + * Return: None + */ +void ucfg_nan_event_handler(struct wlan_objmgr_psoc *psoc, + struct wlan_objmgr_vdev *vdev, + uint32_t type, void *msg); + +/** + * ucfg_nan_register_hdd_callbacks: ucfg API to set hdd callbacks + * @psoc: pointer to psoc object + * @cb_obj: structs containing callbacks + * @os_if_event_handler: os if event handler callback + * + * Return: status of operation + */ +int ucfg_nan_register_hdd_callbacks(struct wlan_objmgr_psoc *psoc, + struct nan_callbacks *cb_obj, + void (os_if_event_handler)( + struct wlan_objmgr_psoc *, + struct wlan_objmgr_vdev *, + uint32_t, void *)); + +/* + * ucfg_nan_register_lim_callbacks: ucfg API to set lim callbacks + * @psoc: pointer to psoc object + * @cb_obj: structs containing callbacks + * + * Return: status of operation + */ +int ucfg_nan_register_lim_callbacks(struct wlan_objmgr_psoc *psoc, + struct nan_callbacks *cb_obj); + +/** + * ucfg_nan_get_callbacks: ucfg API to return callbacks + * @psoc: pointer to psoc object + * @cb_obj: callback struct to populate + * + * Return: callback struct on success, NULL otherwise + */ +QDF_STATUS ucfg_nan_get_callbacks(struct wlan_objmgr_psoc *psoc, + struct nan_callbacks *cb_obj); + +#endif /* _NAN_UCFG_API_H_ */ + diff --git a/drivers/staging/qca-wifi-host-cmn/umac/nan/dispatcher/src/nan_ucfg_api.c b/drivers/staging/qca-wifi-host-cmn/umac/nan/dispatcher/src/nan_ucfg_api.c new file mode 100644 index 0000000000000000000000000000000000000000..78dca19ce228e2fefd64c7580f625775c26d2e3d --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/nan/dispatcher/src/nan_ucfg_api.c @@ -0,0 +1,412 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: contains interface definitions for OS_IF layer + */ + +#include "nan_ucfg_api.h" +#include "nan_public_structs.h" +#include "../../core/src/nan_main_i.h" +#include "scheduler_api.h" +#include "wlan_objmgr_psoc_obj.h" +#include "wlan_objmgr_pdev_obj.h" +#include "wlan_objmgr_vdev_obj.h" + +struct wlan_objmgr_psoc; +struct wlan_objmgr_vdev; + +inline QDF_STATUS ucfg_nan_set_ndi_state(struct wlan_objmgr_vdev *vdev, + uint32_t state) +{ + struct nan_vdev_priv_obj *priv_obj = nan_get_vdev_priv_obj(vdev); + + if (!priv_obj) { + nan_err("priv_obj is null"); + return QDF_STATUS_E_NULL_VALUE; + } + qdf_spin_lock_bh(&priv_obj->lock); + priv_obj->state = state; + qdf_spin_unlock_bh(&priv_obj->lock); + + return QDF_STATUS_SUCCESS; +} + +inline enum nan_datapath_state ucfg_nan_get_ndi_state( + struct wlan_objmgr_vdev *vdev) +{ + enum nan_datapath_state val; + struct nan_vdev_priv_obj *priv_obj = nan_get_vdev_priv_obj(vdev); + + if (!priv_obj) { + nan_err("priv_obj is null"); + return NAN_DATA_INVALID_STATE; + } + + qdf_spin_lock_bh(&priv_obj->lock); + val = priv_obj->state; + qdf_spin_unlock_bh(&priv_obj->lock); + + return val; +} + +inline QDF_STATUS ucfg_nan_set_active_peers(struct wlan_objmgr_vdev *vdev, + uint32_t val) +{ + struct nan_vdev_priv_obj *priv_obj = nan_get_vdev_priv_obj(vdev); + + if (!priv_obj) { + nan_err("priv_obj is null"); + return QDF_STATUS_E_NULL_VALUE; + } + + qdf_spin_lock_bh(&priv_obj->lock); + priv_obj->active_ndp_peers = val; + qdf_spin_unlock_bh(&priv_obj->lock); + + return QDF_STATUS_SUCCESS; +} + +inline uint32_t ucfg_nan_get_active_peers(struct wlan_objmgr_vdev *vdev) +{ + uint32_t val; + struct nan_vdev_priv_obj *priv_obj = nan_get_vdev_priv_obj(vdev); + + if (!priv_obj) { + nan_err("priv_obj is null"); + return 0; + } + + qdf_spin_lock_bh(&priv_obj->lock); + val = priv_obj->active_ndp_peers; + qdf_spin_unlock_bh(&priv_obj->lock); + + return val; +} + +inline QDF_STATUS ucfg_nan_set_active_ndp_sessions( + struct wlan_objmgr_vdev *vdev, uint32_t val, uint8_t idx) +{ + struct nan_vdev_priv_obj *priv_obj = nan_get_vdev_priv_obj(vdev); + + if (!priv_obj) { + nan_err("priv_obj is null"); + return QDF_STATUS_E_NULL_VALUE; + } + + if (idx >= MAX_PEERS) { + nan_err("peer_idx(%d), MAX(%d)", + idx, MAX_PEERS); + return QDF_STATUS_E_NULL_VALUE; + } + + qdf_spin_lock_bh(&priv_obj->lock); + priv_obj->active_ndp_sessions[idx] = val; + qdf_spin_unlock_bh(&priv_obj->lock); + + return QDF_STATUS_SUCCESS; +} + +inline uint32_t ucfg_nan_get_active_ndp_sessions(struct wlan_objmgr_vdev *vdev, + uint8_t idx) +{ + uint32_t val; + struct nan_vdev_priv_obj *priv_obj = nan_get_vdev_priv_obj(vdev); + + if (!priv_obj) { + nan_err("priv_obj is null"); + return 0; + } + + if (idx >= MAX_PEERS) { + nan_err("peer_idx(%d), MAX(%d)", + idx, MAX_PEERS); + return 0; + } + + qdf_spin_lock_bh(&priv_obj->lock); + val = priv_obj->active_ndp_sessions[idx]; + qdf_spin_unlock_bh(&priv_obj->lock); + + return val; +} + +inline QDF_STATUS ucfg_nan_set_ndp_create_transaction_id( + struct wlan_objmgr_vdev *vdev, uint16_t val) +{ + struct nan_vdev_priv_obj *priv_obj = nan_get_vdev_priv_obj(vdev); + + if (!priv_obj) { + nan_err("priv_obj is null"); + return QDF_STATUS_E_NULL_VALUE; + } + + qdf_spin_lock_bh(&priv_obj->lock); + priv_obj->ndp_create_transaction_id = val; + qdf_spin_unlock_bh(&priv_obj->lock); + + return QDF_STATUS_SUCCESS; +} + +inline uint16_t ucfg_nan_get_ndp_create_transaction_id( + struct wlan_objmgr_vdev *vdev) +{ + uint16_t val; + struct nan_vdev_priv_obj *priv_obj = nan_get_vdev_priv_obj(vdev); + + if (!priv_obj) { + nan_err("priv_obj is null"); + return 0; + } + + qdf_spin_lock_bh(&priv_obj->lock); + val = priv_obj->ndp_create_transaction_id; + qdf_spin_unlock_bh(&priv_obj->lock); + + return val; +} + +inline QDF_STATUS ucfg_nan_set_ndp_delete_transaction_id( + struct wlan_objmgr_vdev *vdev, uint16_t val) +{ + struct nan_vdev_priv_obj *priv_obj = nan_get_vdev_priv_obj(vdev); + + if (!priv_obj) { + nan_err("priv_obj is null"); + return QDF_STATUS_E_NULL_VALUE; + } + + qdf_spin_lock_bh(&priv_obj->lock); + priv_obj->ndp_delete_transaction_id = val; + qdf_spin_unlock_bh(&priv_obj->lock); + + return QDF_STATUS_SUCCESS; +} + +inline uint16_t ucfg_nan_get_ndp_delete_transaction_id( + struct wlan_objmgr_vdev *vdev) +{ + uint16_t val; + struct nan_vdev_priv_obj *priv_obj = nan_get_vdev_priv_obj(vdev); + + if (!priv_obj) { + nan_err("priv_obj is null"); + return 0; + } + + qdf_spin_lock_bh(&priv_obj->lock); + val = priv_obj->ndp_delete_transaction_id; + qdf_spin_unlock_bh(&priv_obj->lock); + + return val; +} + +inline QDF_STATUS ucfg_nan_set_ndi_delete_rsp_reason( + struct wlan_objmgr_vdev *vdev, uint32_t val) +{ + struct nan_vdev_priv_obj *priv_obj = nan_get_vdev_priv_obj(vdev); + + if (!priv_obj) { + nan_err("priv_obj is null"); + return QDF_STATUS_E_NULL_VALUE; + } + + qdf_spin_lock_bh(&priv_obj->lock); + priv_obj->ndi_delete_rsp_reason = val; + qdf_spin_unlock_bh(&priv_obj->lock); + + return QDF_STATUS_SUCCESS; +} + +inline uint32_t ucfg_nan_get_ndi_delete_rsp_reason( + struct wlan_objmgr_vdev *vdev) +{ + uint32_t val; + struct nan_vdev_priv_obj *priv_obj = nan_get_vdev_priv_obj(vdev); + + if (!priv_obj) { + nan_err("priv_obj is null"); + return 0; + } + + qdf_spin_lock_bh(&priv_obj->lock); + val = priv_obj->ndi_delete_rsp_reason; + qdf_spin_unlock_bh(&priv_obj->lock); + + return val; +} + +inline QDF_STATUS ucfg_nan_set_ndi_delete_rsp_status( + struct wlan_objmgr_vdev *vdev, uint32_t val) +{ + struct nan_vdev_priv_obj *priv_obj = nan_get_vdev_priv_obj(vdev); + + if (!priv_obj) { + nan_err("priv_obj is null"); + return QDF_STATUS_E_NULL_VALUE; + } + + qdf_spin_lock_bh(&priv_obj->lock); + priv_obj->ndi_delete_rsp_status = val; + qdf_spin_unlock_bh(&priv_obj->lock); + + return QDF_STATUS_SUCCESS; +} + +inline uint32_t ucfg_nan_get_ndi_delete_rsp_status( + struct wlan_objmgr_vdev *vdev) +{ + uint32_t val; + struct nan_vdev_priv_obj *priv_obj = nan_get_vdev_priv_obj(vdev); + + if (!priv_obj) { + nan_err("priv_obj is null"); + return QDF_STATUS_E_NULL_VALUE; + } + + qdf_spin_lock_bh(&priv_obj->lock); + val = priv_obj->ndi_delete_rsp_status; + qdf_spin_unlock_bh(&priv_obj->lock); + + return val; +} + +inline QDF_STATUS ucfg_nan_get_callbacks(struct wlan_objmgr_psoc *psoc, + struct nan_callbacks *cb_obj) +{ + struct nan_psoc_priv_obj *psoc_obj = nan_get_psoc_priv_obj(psoc); + + if (!psoc_obj) { + nan_err("nan psoc priv object is NULL"); + return QDF_STATUS_E_NULL_VALUE; + } + qdf_spin_lock_bh(&psoc_obj->lock); + qdf_mem_copy(cb_obj, &psoc_obj->cb_obj, sizeof(*cb_obj)); + qdf_spin_unlock_bh(&psoc_obj->lock); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS ucfg_nan_req_processor(struct wlan_objmgr_vdev *vdev, + void *in_req, uint32_t req_type) +{ + uint32_t len; + QDF_STATUS status; + struct scheduler_msg msg = {0}; + + if (!in_req) { + nan_alert("req is null"); + return QDF_STATUS_E_NULL_VALUE; + } + + switch (req_type) { + case NDP_INITIATOR_REQ: + len = sizeof(struct nan_datapath_initiator_req); + break; + case NDP_RESPONDER_REQ: + len = sizeof(struct nan_datapath_responder_req); + break; + case NDP_END_REQ: + len = sizeof(struct nan_datapath_end_req); + break; + default: + nan_err("in correct message req type: %d", req_type); + return QDF_STATUS_E_INVAL; + } + + msg.bodyptr = qdf_mem_malloc(len); + if (!msg.bodyptr) { + nan_err("malloc failed"); + return QDF_STATUS_E_NOMEM; + } + qdf_mem_copy(msg.bodyptr, in_req, len); + msg.type = req_type; + msg.callback = nan_scheduled_msg_handler; + status = scheduler_post_message(QDF_MODULE_ID_HDD, + QDF_MODULE_ID_NAN, + QDF_MODULE_ID_OS_IF, &msg); + if (QDF_IS_STATUS_ERROR(status)) { + nan_err("failed to post msg to NAN component, status: %d", + status); + } + + return status; +} + +void ucfg_nan_event_handler(struct wlan_objmgr_psoc *psoc, + struct wlan_objmgr_vdev *vdev, + uint32_t type, void *msg) +{ + struct nan_psoc_priv_obj *psoc_obj = nan_get_psoc_priv_obj(psoc); + + if (!psoc_obj) { + nan_err("nan psoc priv object is NULL"); + return; + } + + psoc_obj->cb_obj.os_if_event_handler(psoc, vdev, type, msg); +} + +int ucfg_nan_register_hdd_callbacks(struct wlan_objmgr_psoc *psoc, + struct nan_callbacks *cb_obj, + void (os_if_event_handler)( + struct wlan_objmgr_psoc *, + struct wlan_objmgr_vdev *, + uint32_t, void *)) +{ + struct nan_psoc_priv_obj *psoc_obj = nan_get_psoc_priv_obj(psoc); + + if (!psoc_obj) { + nan_err("nan psoc priv object is NULL"); + return -EINVAL; + } + + psoc_obj->cb_obj.os_if_event_handler = os_if_event_handler; + + psoc_obj->cb_obj.ndi_open = cb_obj->ndi_open; + psoc_obj->cb_obj.ndi_start = cb_obj->ndi_start; + psoc_obj->cb_obj.ndi_delete = cb_obj->ndi_delete; + psoc_obj->cb_obj.ndi_close = cb_obj->ndi_close; + psoc_obj->cb_obj.drv_ndi_create_rsp_handler = + cb_obj->drv_ndi_create_rsp_handler; + psoc_obj->cb_obj.drv_ndi_delete_rsp_handler = + cb_obj->drv_ndi_delete_rsp_handler; + + psoc_obj->cb_obj.get_peer_idx = cb_obj->get_peer_idx; + psoc_obj->cb_obj.new_peer_ind = cb_obj->new_peer_ind; + psoc_obj->cb_obj.peer_departed_ind = cb_obj->peer_departed_ind; + + return 0; +} + +int ucfg_nan_register_lim_callbacks(struct wlan_objmgr_psoc *psoc, + struct nan_callbacks *cb_obj) +{ + struct nan_psoc_priv_obj *psoc_obj = nan_get_psoc_priv_obj(psoc); + + if (!psoc_obj) { + nan_err("nan psoc priv object is NULL"); + return -EINVAL; + } + + psoc_obj->cb_obj.add_ndi_peer = cb_obj->add_ndi_peer; + psoc_obj->cb_obj.ndp_delete_peers = cb_obj->ndp_delete_peers; + psoc_obj->cb_obj.delete_peers_by_addr = cb_obj->delete_peers_by_addr; + + return 0; +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/p2p/core/src/wlan_p2p_main.c b/drivers/staging/qca-wifi-host-cmn/umac/p2p/core/src/wlan_p2p_main.c new file mode 100644 index 0000000000000000000000000000000000000000..0b082942da251fa9b57b7c4bf12dbc1c7eea92a4 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/p2p/core/src/wlan_p2p_main.c @@ -0,0 +1,1416 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: This file contains main P2P function definitions + */ + +#include +#include +#include +#include +#include +#include +#include +#include "wlan_p2p_public_struct.h" +#include "wlan_p2p_ucfg_api.h" +#include "wlan_p2p_tgt_api.h" +#include "wlan_p2p_main.h" +#include "wlan_p2p_roc.h" +#include "wlan_p2p_off_chan_tx.h" + +/** + * p2p_get_cmd_type_str() - parse cmd to string + * @cmd_type: P2P cmd type + * + * This function parse P2P cmd to string. + * + * Return: command string + */ +#ifdef WLAN_DEBUG +static char *p2p_get_cmd_type_str(enum p2p_cmd_type cmd_type) +{ + switch (cmd_type) { + case P2P_ROC_REQ: + return "P2P roc request"; + case P2P_CANCEL_ROC_REQ: + return "P2P cancel roc request"; + case P2P_MGMT_TX: + return "P2P mgmt tx request"; + case P2P_MGMT_TX_CANCEL: + return "P2P cancel mgmt tx request"; + case P2P_CLEANUP_ROC: + return "P2P cleanup roc"; + case P2P_CLEANUP_TX: + return "P2P cleanup tx"; + case P2P_SET_RANDOM_MAC: + return "P2P set random mac"; + default: + return "Invalid P2P command"; + } +} + +/** + * p2p_get_event_type_str() - parase event to string + * @event_type: P2P event type + * + * This function parse P2P event to string. + * + * Return: event string + */ +static char *p2p_get_event_type_str(enum p2p_event_type event_type) +{ + switch (event_type) { + case P2P_EVENT_SCAN_EVENT: + return "P2P scan event"; + case P2P_EVENT_MGMT_TX_ACK_CNF: + return "P2P mgmt tx ack event"; + case P2P_EVENT_RX_MGMT: + return "P2P mgmt rx event"; + case P2P_EVENT_LO_STOPPED: + return "P2P lo stop event"; + case P2P_EVENT_NOA: + return "P2P noa event"; + case P2P_EVENT_ADD_MAC_RSP: + return "P2P add mac filter resp event"; + default: + return "Invalid P2P event"; + } +} +#endif + +/** + * p2p_psoc_obj_create_notification() - Function to allocate per P2P + * soc private object + * @soc: soc context + * @data: Pointer to data + * + * This function gets called from object manager when psoc is being + * created and creates p2p soc context. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +static QDF_STATUS p2p_psoc_obj_create_notification( + struct wlan_objmgr_psoc *soc, void *data) +{ + struct p2p_soc_priv_obj *p2p_soc_obj; + QDF_STATUS status; + + if (!soc) { + p2p_err("psoc context passed is NULL"); + return QDF_STATUS_E_INVAL; + } + + p2p_soc_obj = qdf_mem_malloc(sizeof(*p2p_soc_obj)); + if (!p2p_soc_obj) { + p2p_err("Failed to allocate p2p soc private object"); + return QDF_STATUS_E_NOMEM; + } + + p2p_soc_obj->soc = soc; + + status = wlan_objmgr_psoc_component_obj_attach(soc, + WLAN_UMAC_COMP_P2P, p2p_soc_obj, + QDF_STATUS_SUCCESS); + if (status != QDF_STATUS_SUCCESS) { + qdf_mem_free(p2p_soc_obj); + p2p_err("Failed to attach p2p component, %d", status); + return status; + } + + p2p_debug("p2p soc object create successful, %pK", p2p_soc_obj); + + return QDF_STATUS_SUCCESS; +} + +/** + * p2p_psoc_obj_destroy_notification() - Free soc private object + * @soc: soc context + * @data: Pointer to data + * + * This function gets called from object manager when psoc is being + * deleted and delete p2p soc context. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +static QDF_STATUS p2p_psoc_obj_destroy_notification( + struct wlan_objmgr_psoc *soc, void *data) +{ + struct p2p_soc_priv_obj *p2p_soc_obj; + QDF_STATUS status; + + if (!soc) { + p2p_err("psoc context passed is NULL"); + return QDF_STATUS_E_INVAL; + } + + p2p_soc_obj = wlan_objmgr_psoc_get_comp_private_obj(soc, + WLAN_UMAC_COMP_P2P); + if (!p2p_soc_obj) { + p2p_err("p2p soc private object is NULL"); + return QDF_STATUS_E_FAILURE; + } + + p2p_soc_obj->soc = NULL; + + status = wlan_objmgr_psoc_component_obj_detach(soc, + WLAN_UMAC_COMP_P2P, p2p_soc_obj); + if (status != QDF_STATUS_SUCCESS) { + p2p_err("Failed to detach p2p component, %d", status); + return status; + } + + p2p_debug("destroy p2p soc object, %pK", p2p_soc_obj); + + qdf_mem_free(p2p_soc_obj); + + return QDF_STATUS_SUCCESS; +} + +/** + * p2p_vdev_obj_create_notification() - Allocate per p2p vdev object + * @vdev: vdev context + * @data: Pointer to data + * + * This function gets called from object manager when vdev is being + * created and creates p2p vdev context. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +static QDF_STATUS p2p_vdev_obj_create_notification( + struct wlan_objmgr_vdev *vdev, void *data) +{ + struct p2p_vdev_priv_obj *p2p_vdev_obj; + QDF_STATUS status; + enum QDF_OPMODE mode; + + if (!vdev) { + p2p_err("vdev context passed is NULL"); + return QDF_STATUS_E_INVAL; + } + + mode = wlan_vdev_mlme_get_opmode(vdev); + p2p_debug("vdev mode:%d", mode); + if (mode != QDF_P2P_GO_MODE && + mode != QDF_STA_MODE && + mode != QDF_P2P_CLIENT_MODE && + mode != QDF_P2P_DEVICE_MODE) { + p2p_debug("won't create p2p vdev private object for mode %d", + mode); + return QDF_STATUS_SUCCESS; + } + + p2p_vdev_obj = + qdf_mem_malloc(sizeof(*p2p_vdev_obj)); + if (!p2p_vdev_obj) { + p2p_err("Failed to allocate p2p vdev object"); + return QDF_STATUS_E_NOMEM; + } + + p2p_vdev_obj->vdev = vdev; + p2p_vdev_obj->noa_status = true; + p2p_vdev_obj->non_p2p_peer_count = 0; + p2p_init_random_mac_vdev(p2p_vdev_obj); + + status = wlan_objmgr_vdev_component_obj_attach(vdev, + WLAN_UMAC_COMP_P2P, p2p_vdev_obj, + QDF_STATUS_SUCCESS); + if (status != QDF_STATUS_SUCCESS) { + p2p_deinit_random_mac_vdev(p2p_vdev_obj); + qdf_mem_free(p2p_vdev_obj); + p2p_err("Failed to attach p2p component to vdev, %d", + status); + return status; + } + + p2p_debug("p2p vdev object create successful, %pK", p2p_vdev_obj); + + return QDF_STATUS_SUCCESS; +} + +/** + * p2p_vdev_obj_destroy_notification() - Free per P2P vdev object + * @vdev: vdev context + * @data: Pointer to data + * + * This function gets called from object manager when vdev is being + * deleted and delete p2p vdev context. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +static QDF_STATUS p2p_vdev_obj_destroy_notification( + struct wlan_objmgr_vdev *vdev, void *data) +{ + struct p2p_vdev_priv_obj *p2p_vdev_obj; + QDF_STATUS status; + enum QDF_OPMODE mode; + + if (!vdev) { + p2p_err("vdev context passed is NULL"); + return QDF_STATUS_E_INVAL; + } + + mode = wlan_vdev_mlme_get_opmode(vdev); + p2p_debug("vdev mode:%d", mode); + if (mode != QDF_P2P_GO_MODE && + mode != QDF_STA_MODE && + mode != QDF_P2P_CLIENT_MODE && + mode != QDF_P2P_DEVICE_MODE){ + p2p_debug("no p2p vdev private object for mode %d", mode); + return QDF_STATUS_SUCCESS; + } + + p2p_vdev_obj = wlan_objmgr_vdev_get_comp_private_obj(vdev, + WLAN_UMAC_COMP_P2P); + if (!p2p_vdev_obj) { + p2p_debug("p2p vdev object is NULL"); + return QDF_STATUS_SUCCESS; + } + p2p_deinit_random_mac_vdev(p2p_vdev_obj); + + p2p_vdev_obj->vdev = NULL; + + status = wlan_objmgr_vdev_component_obj_detach(vdev, + WLAN_UMAC_COMP_P2P, p2p_vdev_obj); + if (status != QDF_STATUS_SUCCESS) { + p2p_err("Failed to detach p2p component, %d", status); + return status; + } + + p2p_debug("destroy p2p vdev object, p2p vdev obj:%pK, noa info:%pK", + p2p_vdev_obj, p2p_vdev_obj->noa_info); + + if (p2p_vdev_obj->noa_info) + qdf_mem_free(p2p_vdev_obj->noa_info); + + qdf_mem_free(p2p_vdev_obj); + + return QDF_STATUS_SUCCESS; +} + +/** + * p2p_peer_obj_create_notification() - manages peer details per vdev + * @peer: peer object + * @arg: Pointer to private argument - NULL + * + * This function gets called from object manager when peer is being + * created. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +static QDF_STATUS p2p_peer_obj_create_notification( + struct wlan_objmgr_peer *peer, void *arg) +{ + struct wlan_objmgr_vdev *vdev; + struct p2p_vdev_priv_obj *p2p_vdev_obj; + enum QDF_OPMODE mode; + enum wlan_peer_type peer_type; + + if (!peer) { + p2p_err("peer context passed is NULL"); + return QDF_STATUS_E_INVAL; + } + + vdev = wlan_peer_get_vdev(peer); + mode = wlan_vdev_mlme_get_opmode(vdev); + if (mode != QDF_P2P_GO_MODE) + return QDF_STATUS_SUCCESS; + + p2p_vdev_obj = wlan_objmgr_vdev_get_comp_private_obj(vdev, + WLAN_UMAC_COMP_P2P); + peer_type = wlan_peer_get_peer_type(peer); + if ((peer_type == WLAN_PEER_STA) && p2p_vdev_obj) { + + mode = wlan_vdev_mlme_get_opmode(vdev); + if (mode == QDF_P2P_GO_MODE) { + p2p_vdev_obj->non_p2p_peer_count++; + p2p_debug("Non P2P peer count: %d", + p2p_vdev_obj->non_p2p_peer_count); + } + } + p2p_debug("p2p peer object create successful"); + + return QDF_STATUS_SUCCESS; +} + +/** + * p2p_peer_obj_destroy_notification() - clears peer details per vdev + * @peer: peer object + * @arg: Pointer to private argument - NULL + * + * This function gets called from object manager when peer is being + * destroyed. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +static QDF_STATUS p2p_peer_obj_destroy_notification( + struct wlan_objmgr_peer *peer, void *arg) +{ + struct wlan_objmgr_vdev *vdev; + struct p2p_vdev_priv_obj *p2p_vdev_obj; + struct wlan_objmgr_psoc *psoc; + enum QDF_OPMODE mode; + enum wlan_peer_type peer_type; + uint8_t vdev_id; + + if (!peer) { + p2p_err("peer context passed is NULL"); + return QDF_STATUS_E_INVAL; + } + + vdev = wlan_peer_get_vdev(peer); + mode = wlan_vdev_mlme_get_opmode(vdev); + if (mode != QDF_P2P_GO_MODE) + return QDF_STATUS_SUCCESS; + + p2p_vdev_obj = wlan_objmgr_vdev_get_comp_private_obj(vdev, + WLAN_UMAC_COMP_P2P); + psoc = wlan_vdev_get_psoc(vdev); + if (!p2p_vdev_obj || !psoc) { + p2p_debug("p2p_vdev_obj:%pK psoc:%pK", p2p_vdev_obj, psoc); + return QDF_STATUS_E_INVAL; + } + + mode = wlan_vdev_mlme_get_opmode(vdev); + + peer_type = wlan_peer_get_peer_type(peer); + + if ((peer_type == WLAN_PEER_STA) && (mode == QDF_P2P_GO_MODE)) { + + p2p_vdev_obj->non_p2p_peer_count--; + + if (!p2p_vdev_obj->non_p2p_peer_count && + (p2p_vdev_obj->noa_status == false)) { + + vdev_id = wlan_vdev_get_id(vdev); + + if (ucfg_p2p_set_noa(psoc, vdev_id, + false) == QDF_STATUS_SUCCESS) + p2p_vdev_obj->noa_status = true; + else + p2p_vdev_obj->noa_status = false; + + p2p_debug("Non p2p peer disconnected from GO,NOA status: %d.", + p2p_vdev_obj->noa_status); + } + p2p_debug("Non P2P peer count: %d", + p2p_vdev_obj->non_p2p_peer_count); + } + p2p_debug("p2p peer object destroy successful"); + + return QDF_STATUS_SUCCESS; +} + +/** + * p2p_send_noa_to_pe() - send noa information to pe + * @noa_info: vdev context + * + * This function sends noa information to pe since MCL layer need noa + * event. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +static QDF_STATUS p2p_send_noa_to_pe(struct p2p_noa_info *noa_info) +{ + struct p2p_noa_attr *noa_attr; + struct scheduler_msg msg = {0}; + QDF_STATUS status; + + if (!noa_info) { + p2p_err("noa info is null"); + return QDF_STATUS_E_INVAL; + } + + noa_attr = qdf_mem_malloc(sizeof(*noa_attr)); + if (!noa_attr) { + p2p_err("Failed to allocate memory for tSirP2PNoaAttr"); + return QDF_STATUS_E_NOMEM; + } + + noa_attr->index = noa_info->index; + noa_attr->opps_ps = noa_info->opps_ps; + noa_attr->ct_win = noa_info->ct_window; + if (!noa_info->num_desc) { + p2p_debug("Zero noa descriptors"); + } else { + p2p_debug("%d noa descriptors", noa_info->num_desc); + + noa_attr->noa1_count = + noa_info->noa_desc[0].type_count; + noa_attr->noa1_duration = + noa_info->noa_desc[0].duration; + noa_attr->noa1_interval = + noa_info->noa_desc[0].interval; + noa_attr->noa1_start_time = + noa_info->noa_desc[0].start_time; + if (noa_info->num_desc > 1) { + noa_attr->noa2_count = + noa_info->noa_desc[1].type_count; + noa_attr->noa2_duration = + noa_info->noa_desc[1].duration; + noa_attr->noa2_interval = + noa_info->noa_desc[1].interval; + noa_attr->noa2_start_time = + noa_info->noa_desc[1].start_time; + } + } + + p2p_debug("Sending P2P_NOA_ATTR_IND to pe"); + + msg.type = P2P_NOA_ATTR_IND; + msg.bodyval = 0; + msg.bodyptr = noa_attr; + status = scheduler_post_message(QDF_MODULE_ID_P2P, + QDF_MODULE_ID_P2P, + QDF_MODULE_ID_PE, + &msg); + if (QDF_IS_STATUS_ERROR(status)) { + qdf_mem_free(noa_attr); + p2p_err("post msg fail:%d", status); + } + + return status; +} + +/** + * process_peer_for_noa() - disable NoA + * @vdev: vdev object + * @psoc: soc object + * @peer: peer object + * + * This function disables NoA + * + * + * Return: QDF_STATUS + */ +static QDF_STATUS process_peer_for_noa(struct wlan_objmgr_vdev *vdev, + struct wlan_objmgr_psoc *psoc, + struct wlan_objmgr_peer *peer) +{ + struct p2p_vdev_priv_obj *p2p_vdev_obj = NULL; + enum QDF_OPMODE mode; + enum wlan_peer_type peer_type; + bool disable_noa; + uint8_t vdev_id; + + if (!vdev || !psoc || !peer) { + p2p_err("vdev:%pK psoc:%pK peer:%pK", vdev, psoc, peer); + return QDF_STATUS_E_INVAL; + } + p2p_vdev_obj = wlan_objmgr_vdev_get_comp_private_obj(vdev, + WLAN_UMAC_COMP_P2P); + if (!p2p_vdev_obj) { + p2p_err("p2p_vdev_obj:%pK", p2p_vdev_obj); + return QDF_STATUS_E_INVAL; + } + mode = wlan_vdev_mlme_get_opmode(vdev); + + peer_type = wlan_peer_get_peer_type(peer); + + disable_noa = ((mode == QDF_P2P_GO_MODE) + && p2p_vdev_obj->non_p2p_peer_count + && p2p_vdev_obj->noa_status); + + if (disable_noa && (peer_type == WLAN_PEER_STA)) { + + vdev_id = wlan_vdev_get_id(vdev); + + if (ucfg_p2p_set_noa(psoc, vdev_id, + true) == QDF_STATUS_SUCCESS) { + p2p_vdev_obj->noa_status = false; + } else { + p2p_vdev_obj->noa_status = true; + } + p2p_debug("NoA status: %d", p2p_vdev_obj->noa_status); + } + p2p_debug("process_peer_for_noa"); + + return QDF_STATUS_SUCCESS; +} + +#ifdef WLAN_FEATURE_P2P_DEBUG +/** + * wlan_p2p_init_connection_status() - init connection status + * @p2p_soc_obj: pointer to p2p psoc object + * + * This function initial p2p connection status. + * + * Return: None + */ +static void wlan_p2p_init_connection_status( + struct p2p_soc_priv_obj *p2p_soc_obj) +{ + if (!p2p_soc_obj) { + p2p_err("invalid p2p soc obj"); + return; + } + + p2p_soc_obj->connection_status = P2P_NOT_ACTIVE; +} +#else +static void wlan_p2p_init_connection_status( + struct p2p_soc_priv_obj *p2p_soc_obj) +{ +} +#endif /* WLAN_FEATURE_P2P_DEBUG */ + +QDF_STATUS p2p_component_init(void) +{ + QDF_STATUS status; + + status = wlan_objmgr_register_psoc_create_handler( + WLAN_UMAC_COMP_P2P, + p2p_psoc_obj_create_notification, + NULL); + if (status != QDF_STATUS_SUCCESS) { + p2p_err("Failed to register p2p obj create handler"); + goto err_reg_psoc_create; + } + + status = wlan_objmgr_register_psoc_destroy_handler( + WLAN_UMAC_COMP_P2P, + p2p_psoc_obj_destroy_notification, + NULL); + if (status != QDF_STATUS_SUCCESS) { + p2p_err("Failed to register p2p obj delete handler"); + goto err_reg_psoc_delete; + } + + status = wlan_objmgr_register_vdev_create_handler( + WLAN_UMAC_COMP_P2P, + p2p_vdev_obj_create_notification, + NULL); + if (status != QDF_STATUS_SUCCESS) { + p2p_err("Failed to register p2p vdev create handler"); + goto err_reg_vdev_create; + } + + status = wlan_objmgr_register_vdev_destroy_handler( + WLAN_UMAC_COMP_P2P, + p2p_vdev_obj_destroy_notification, + NULL); + if (status != QDF_STATUS_SUCCESS) { + p2p_err("Failed to register p2p vdev delete handler"); + goto err_reg_vdev_delete; + } + + status = wlan_objmgr_register_peer_create_handler( + WLAN_UMAC_COMP_P2P, + p2p_peer_obj_create_notification, + NULL); + if (status != QDF_STATUS_SUCCESS) { + p2p_err("Failed to register p2p peer create handler"); + goto err_reg_peer_create; + } + + status = wlan_objmgr_register_peer_destroy_handler( + WLAN_UMAC_COMP_P2P, + p2p_peer_obj_destroy_notification, + NULL); + if (status != QDF_STATUS_SUCCESS) { + p2p_err("Failed to register p2p peer destroy handler"); + goto err_reg_peer_destroy; + } + + p2p_debug("Register p2p obj handler successful"); + + return QDF_STATUS_SUCCESS; +err_reg_peer_destroy: + wlan_objmgr_unregister_peer_create_handler(WLAN_UMAC_COMP_P2P, + p2p_peer_obj_create_notification, NULL); +err_reg_peer_create: + wlan_objmgr_unregister_vdev_destroy_handler(WLAN_UMAC_COMP_P2P, + p2p_vdev_obj_destroy_notification, NULL); +err_reg_vdev_delete: + wlan_objmgr_unregister_vdev_create_handler(WLAN_UMAC_COMP_P2P, + p2p_vdev_obj_create_notification, NULL); +err_reg_vdev_create: + wlan_objmgr_unregister_psoc_destroy_handler(WLAN_UMAC_COMP_P2P, + p2p_psoc_obj_destroy_notification, NULL); +err_reg_psoc_delete: + wlan_objmgr_unregister_psoc_create_handler(WLAN_UMAC_COMP_P2P, + p2p_psoc_obj_create_notification, NULL); +err_reg_psoc_create: + return status; +} + +QDF_STATUS p2p_component_deinit(void) +{ + QDF_STATUS status; + QDF_STATUS ret_status = QDF_STATUS_SUCCESS; + + status = wlan_objmgr_unregister_vdev_create_handler( + WLAN_UMAC_COMP_P2P, + p2p_vdev_obj_create_notification, + NULL); + if (status != QDF_STATUS_SUCCESS) { + p2p_err("Failed to unregister p2p vdev create handler, %d", + status); + ret_status = status; + } + + status = wlan_objmgr_unregister_vdev_destroy_handler( + WLAN_UMAC_COMP_P2P, + p2p_vdev_obj_destroy_notification, + NULL); + if (status != QDF_STATUS_SUCCESS) { + p2p_err("Failed to unregister p2p vdev delete handler, %d", + status); + ret_status = status; + } + + status = wlan_objmgr_unregister_psoc_create_handler( + WLAN_UMAC_COMP_P2P, + p2p_psoc_obj_create_notification, + NULL); + if (status != QDF_STATUS_SUCCESS) { + p2p_err("Failed to unregister p2p obj create handler, %d", + status); + ret_status = status; + } + + status = wlan_objmgr_unregister_psoc_destroy_handler( + WLAN_UMAC_COMP_P2P, + p2p_psoc_obj_destroy_notification, + NULL); + if (status != QDF_STATUS_SUCCESS) { + p2p_err("Failed to unregister p2p obj delete handler, %d", + status); + ret_status = status; + } + + p2p_debug("Unregister p2p obj handler complete"); + + return ret_status; +} + +QDF_STATUS p2p_psoc_object_open(struct wlan_objmgr_psoc *soc) +{ + QDF_STATUS status; + struct p2p_soc_priv_obj *p2p_soc_obj; + + if (!soc) { + p2p_err("psoc context passed is NULL"); + return QDF_STATUS_E_INVAL; + } + + p2p_soc_obj = wlan_objmgr_psoc_get_comp_private_obj(soc, + WLAN_UMAC_COMP_P2P); + if (!p2p_soc_obj) { + p2p_err("p2p soc priviate object is NULL"); + return QDF_STATUS_E_FAILURE; + } + + qdf_list_create(&p2p_soc_obj->roc_q, MAX_QUEUE_LENGTH); + qdf_list_create(&p2p_soc_obj->tx_q_roc, MAX_QUEUE_LENGTH); + qdf_list_create(&p2p_soc_obj->tx_q_ack, MAX_QUEUE_LENGTH); + + status = qdf_event_create(&p2p_soc_obj->cancel_roc_done); + if (status != QDF_STATUS_SUCCESS) { + p2p_err("failed to create cancel roc done event"); + goto fail_cancel_roc; + } + + status = qdf_event_create(&p2p_soc_obj->cleanup_roc_done); + if (status != QDF_STATUS_SUCCESS) { + p2p_err("failed to create cleanup roc done event"); + goto fail_cleanup_roc; + } + + status = qdf_event_create(&p2p_soc_obj->cleanup_tx_done); + if (status != QDF_STATUS_SUCCESS) { + p2p_err("failed to create cleanup roc done event"); + goto fail_cleanup_tx; + } + + qdf_runtime_lock_init(&p2p_soc_obj->roc_runtime_lock); + p2p_soc_obj->cur_roc_vdev_id = P2P_INVALID_VDEV_ID; + qdf_idr_create(&p2p_soc_obj->p2p_idr); + + p2p_debug("p2p psoc object open successful"); + + return QDF_STATUS_SUCCESS; + +fail_cleanup_tx: + qdf_event_destroy(&p2p_soc_obj->cleanup_roc_done); + +fail_cleanup_roc: + qdf_event_destroy(&p2p_soc_obj->cancel_roc_done); + +fail_cancel_roc: + qdf_list_destroy(&p2p_soc_obj->tx_q_ack); + qdf_list_destroy(&p2p_soc_obj->tx_q_roc); + qdf_list_destroy(&p2p_soc_obj->roc_q); + + return status; +} + +QDF_STATUS p2p_psoc_object_close(struct wlan_objmgr_psoc *soc) +{ + struct p2p_soc_priv_obj *p2p_soc_obj; + + if (!soc) { + p2p_err("psoc context passed is NULL"); + return QDF_STATUS_E_INVAL; + } + + p2p_soc_obj = wlan_objmgr_psoc_get_comp_private_obj(soc, + WLAN_UMAC_COMP_P2P); + if (!p2p_soc_obj) { + p2p_err("p2p soc object is NULL"); + return QDF_STATUS_E_FAILURE; + } + + qdf_idr_destroy(&p2p_soc_obj->p2p_idr); + qdf_runtime_lock_deinit(&p2p_soc_obj->roc_runtime_lock); + qdf_event_destroy(&p2p_soc_obj->cleanup_tx_done); + qdf_event_destroy(&p2p_soc_obj->cleanup_roc_done); + qdf_event_destroy(&p2p_soc_obj->cancel_roc_done); + qdf_list_destroy(&p2p_soc_obj->tx_q_ack); + qdf_list_destroy(&p2p_soc_obj->tx_q_roc); + qdf_list_destroy(&p2p_soc_obj->roc_q); + + p2p_debug("p2p psoc object close successful"); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS p2p_psoc_start(struct wlan_objmgr_psoc *soc, + struct p2p_start_param *req) +{ + struct p2p_soc_priv_obj *p2p_soc_obj; + struct p2p_start_param *start_param; + + if (!soc) { + p2p_err("psoc context passed is NULL"); + return QDF_STATUS_E_INVAL; + } + + p2p_soc_obj = wlan_objmgr_psoc_get_comp_private_obj(soc, + WLAN_UMAC_COMP_P2P); + if (!p2p_soc_obj) { + p2p_err("P2P soc object is NULL"); + return QDF_STATUS_E_FAILURE; + } + + start_param = qdf_mem_malloc(sizeof(*start_param)); + if (!start_param) { + p2p_err("Failed to allocate start params"); + return QDF_STATUS_E_NOMEM; + } + start_param->rx_cb = req->rx_cb; + start_param->rx_cb_data = req->rx_cb_data; + start_param->event_cb = req->event_cb; + start_param->event_cb_data = req->event_cb_data; + start_param->tx_cnf_cb = req->tx_cnf_cb; + start_param->tx_cnf_cb_data = req->tx_cnf_cb_data; + start_param->lo_event_cb = req->lo_event_cb; + start_param->lo_event_cb_data = req->lo_event_cb_data; + p2p_soc_obj->start_param = start_param; + + wlan_p2p_init_connection_status(p2p_soc_obj); + + /* register p2p lo stop and noa event */ + tgt_p2p_register_lo_ev_handler(soc); + tgt_p2p_register_noa_ev_handler(soc); + tgt_p2p_register_macaddr_rx_filter_evt_handler(soc, true); + + /* register scan request id */ + p2p_soc_obj->scan_req_id = ucfg_scan_register_requester( + soc, P2P_MODULE_NAME, tgt_p2p_scan_event_cb, + p2p_soc_obj); + + /* register rx action frame */ + p2p_mgmt_rx_action_ops(soc, true); + + p2p_debug("p2p psoc start successful, scan request id:%d", + p2p_soc_obj->scan_req_id); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS p2p_psoc_stop(struct wlan_objmgr_psoc *soc) +{ + struct p2p_soc_priv_obj *p2p_soc_obj; + struct p2p_start_param *start_param; + + if (!soc) { + p2p_err("psoc context passed is NULL"); + return QDF_STATUS_E_INVAL; + } + + p2p_soc_obj = wlan_objmgr_psoc_get_comp_private_obj(soc, + WLAN_UMAC_COMP_P2P); + if (!p2p_soc_obj) { + p2p_err("P2P soc object is NULL"); + return QDF_STATUS_E_FAILURE; + } + + start_param = p2p_soc_obj->start_param; + p2p_soc_obj->start_param = NULL; + if (!start_param) { + p2p_err("start parameters is NULL"); + return QDF_STATUS_E_FAILURE; + } + + /* unregister rx action frame */ + p2p_mgmt_rx_action_ops(soc, false); + + /* clean up queue of p2p psoc private object */ + p2p_cleanup_tx_sync(p2p_soc_obj, NULL); + p2p_cleanup_roc_sync(p2p_soc_obj, NULL); + + /* unrgister scan request id*/ + ucfg_scan_unregister_requester(soc, p2p_soc_obj->scan_req_id); + + /* unregister p2p lo stop and noa event */ + tgt_p2p_register_macaddr_rx_filter_evt_handler(soc, false); + tgt_p2p_unregister_lo_ev_handler(soc); + tgt_p2p_unregister_noa_ev_handler(soc); + + start_param->rx_cb = NULL; + start_param->rx_cb_data = NULL; + start_param->event_cb = NULL; + start_param->event_cb_data = NULL; + start_param->tx_cnf_cb = NULL; + start_param->tx_cnf_cb_data = NULL; + qdf_mem_free(start_param); + + p2p_debug("p2p psoc stop successful"); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS p2p_process_cmd(struct scheduler_msg *msg) +{ + QDF_STATUS status; + + p2p_debug("msg type %d, %s", msg->type, + p2p_get_cmd_type_str(msg->type)); + + if (!(msg->bodyptr)) { + p2p_err("Invalid message body"); + return QDF_STATUS_E_INVAL; + } + switch (msg->type) { + case P2P_ROC_REQ: + status = p2p_process_roc_req( + (struct p2p_roc_context *) + msg->bodyptr); + break; + case P2P_CANCEL_ROC_REQ: + status = p2p_process_cancel_roc_req( + (struct cancel_roc_context *) + msg->bodyptr); + qdf_mem_free(msg->bodyptr); + break; + case P2P_MGMT_TX: + status = p2p_process_mgmt_tx( + (struct tx_action_context *) + msg->bodyptr); + break; + case P2P_MGMT_TX_CANCEL: + status = p2p_process_mgmt_tx_cancel( + (struct cancel_roc_context *) + msg->bodyptr); + qdf_mem_free(msg->bodyptr); + break; + case P2P_CLEANUP_ROC: + status = p2p_process_cleanup_roc_queue( + (struct p2p_cleanup_param *) + msg->bodyptr); + qdf_mem_free(msg->bodyptr); + break; + case P2P_CLEANUP_TX: + status = p2p_process_cleanup_tx_queue( + (struct p2p_cleanup_param *) + msg->bodyptr); + qdf_mem_free(msg->bodyptr); + break; + case P2P_SET_RANDOM_MAC: + status = p2p_process_set_rand_mac(msg->bodyptr); + qdf_mem_free(msg->bodyptr); + break; + + default: + p2p_err("drop unexpected message received %d", + msg->type); + status = QDF_STATUS_E_INVAL; + break; + } + + return status; +} + +QDF_STATUS p2p_process_evt(struct scheduler_msg *msg) +{ + QDF_STATUS status; + + p2p_debug("msg type %d, %s", msg->type, + p2p_get_event_type_str(msg->type)); + + if (!(msg->bodyptr)) { + p2p_err("Invalid message body"); + return QDF_STATUS_E_INVAL; + } + + switch (msg->type) { + case P2P_EVENT_MGMT_TX_ACK_CNF: + status = p2p_process_mgmt_tx_ack_cnf( + (struct p2p_tx_conf_event *) + msg->bodyptr); + break; + case P2P_EVENT_RX_MGMT: + status = p2p_process_rx_mgmt( + (struct p2p_rx_mgmt_event *) + msg->bodyptr); + break; + case P2P_EVENT_LO_STOPPED: + status = p2p_process_lo_stop( + (struct p2p_lo_stop_event *) + msg->bodyptr); + break; + case P2P_EVENT_NOA: + status = p2p_process_noa( + (struct p2p_noa_event *) + msg->bodyptr); + break; + case P2P_EVENT_ADD_MAC_RSP: + status = p2p_process_set_rand_mac_rsp( + (struct p2p_mac_filter_rsp *) + msg->bodyptr); + break; + default: + p2p_err("Drop unexpected message received %d", + msg->type); + status = QDF_STATUS_E_INVAL; + break; + } + + qdf_mem_free(msg->bodyptr); + msg->bodyptr = NULL; + + return status; +} + +QDF_STATUS p2p_msg_flush_callback(struct scheduler_msg *msg) +{ + struct tx_action_context *tx_action; + + if (!msg || !(msg->bodyptr)) { + p2p_err("invalid msg"); + return QDF_STATUS_E_INVAL; + } + + p2p_debug("flush msg, type:%d", msg->type); + switch (msg->type) { + case P2P_MGMT_TX: + tx_action = (struct tx_action_context *)msg->bodyptr; + qdf_mem_free(tx_action->buf); + qdf_mem_free(tx_action); + break; + default: + qdf_mem_free(msg->bodyptr); + break; + } + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS p2p_event_flush_callback(struct scheduler_msg *msg) +{ + struct p2p_noa_event *noa_event; + struct p2p_rx_mgmt_event *rx_mgmt_event; + struct p2p_tx_conf_event *tx_conf_event; + struct p2p_lo_stop_event *lo_stop_event; + + if (!msg || !(msg->bodyptr)) { + p2p_err("invalid msg"); + return QDF_STATUS_E_INVAL; + } + + p2p_debug("flush event, type:%d", msg->type); + switch (msg->type) { + case P2P_EVENT_NOA: + noa_event = (struct p2p_noa_event *)msg->bodyptr; + qdf_mem_free(noa_event->noa_info); + qdf_mem_free(noa_event); + break; + case P2P_EVENT_RX_MGMT: + rx_mgmt_event = (struct p2p_rx_mgmt_event *)msg->bodyptr; + qdf_mem_free(rx_mgmt_event->rx_mgmt); + qdf_mem_free(rx_mgmt_event); + break; + case P2P_EVENT_MGMT_TX_ACK_CNF: + tx_conf_event = (struct p2p_tx_conf_event *)msg->bodyptr; + qdf_mem_free(tx_conf_event); + qdf_nbuf_free(tx_conf_event->nbuf); + break; + case P2P_EVENT_LO_STOPPED: + lo_stop_event = (struct p2p_lo_stop_event *)msg->bodyptr; + qdf_mem_free(lo_stop_event->lo_event); + qdf_mem_free(lo_stop_event); + break; + default: + qdf_mem_free(msg->bodyptr); + break; + } + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS p2p_process_lo_stop( + struct p2p_lo_stop_event *lo_stop_event) +{ + struct p2p_lo_event *lo_evt; + struct p2p_soc_priv_obj *p2p_soc_obj; + struct p2p_start_param *start_param; + + if (!lo_stop_event) { + p2p_err("invalid lo stop event"); + return QDF_STATUS_E_INVAL; + } + + lo_evt = lo_stop_event->lo_event; + if (!lo_evt) { + p2p_err("invalid lo event"); + return QDF_STATUS_E_INVAL; + } + + p2p_soc_obj = lo_stop_event->p2p_soc_obj; + + p2p_debug("vdev_id %d, reason %d", + lo_evt->vdev_id, lo_evt->reason_code); + + if (!p2p_soc_obj || !(p2p_soc_obj->start_param)) { + p2p_err("Invalid p2p soc object or start parameters"); + qdf_mem_free(lo_evt); + return QDF_STATUS_E_INVAL; + } + start_param = p2p_soc_obj->start_param; + if (start_param->lo_event_cb) + start_param->lo_event_cb( + start_param->lo_event_cb_data, lo_evt); + else + p2p_err("Invalid p2p soc obj or hdd lo event callback"); + + qdf_mem_free(lo_evt); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS p2p_process_noa(struct p2p_noa_event *noa_event) +{ + QDF_STATUS status = QDF_STATUS_SUCCESS; + struct p2p_noa_info *noa_info; + struct p2p_vdev_priv_obj *p2p_vdev_obj; + struct p2p_soc_priv_obj *p2p_soc_obj; + struct wlan_objmgr_vdev *vdev; + struct wlan_objmgr_psoc *psoc; + enum QDF_OPMODE mode; + + if (!noa_event) { + p2p_err("invalid noa event"); + return QDF_STATUS_E_INVAL; + } + noa_info = noa_event->noa_info; + p2p_soc_obj = noa_event->p2p_soc_obj; + psoc = p2p_soc_obj->soc; + + p2p_debug("psoc:%pK, index:%d, opps_ps:%d, ct_window:%d, num_desc:%d, vdev_id:%d", + psoc, noa_info->index, noa_info->opps_ps, + noa_info->ct_window, noa_info->num_desc, + noa_info->vdev_id); + + vdev = wlan_objmgr_get_vdev_by_id_from_psoc(psoc, + noa_info->vdev_id, WLAN_P2P_ID); + if (!vdev) { + p2p_err("vdev obj is NULL"); + qdf_mem_free(noa_event->noa_info); + return QDF_STATUS_E_INVAL; + } + + mode = wlan_vdev_mlme_get_opmode(vdev); + p2p_debug("vdev mode:%d", mode); + if (mode != QDF_P2P_GO_MODE) { + p2p_err("invalid p2p vdev mode:%d", mode); + status = QDF_STATUS_E_INVAL; + goto fail; + } + + /* must send noa to pe since of limitation*/ + p2p_send_noa_to_pe(noa_info); + + p2p_vdev_obj = wlan_objmgr_vdev_get_comp_private_obj(vdev, + WLAN_UMAC_COMP_P2P); + if (!(p2p_vdev_obj->noa_info)) { + p2p_vdev_obj->noa_info = + qdf_mem_malloc(sizeof(struct p2p_noa_info)); + if (!(p2p_vdev_obj->noa_info)) { + p2p_err("Failed to allocate p2p noa info"); + status = QDF_STATUS_E_NOMEM; + goto fail; + } + } + qdf_mem_copy(p2p_vdev_obj->noa_info, noa_info, + sizeof(struct p2p_noa_info)); +fail: + qdf_mem_free(noa_event->noa_info); + wlan_objmgr_vdev_release_ref(vdev, WLAN_P2P_ID); + + return status; +} + +void p2p_peer_authorized(struct wlan_objmgr_vdev *vdev, uint8_t *mac_addr) +{ + QDF_STATUS status; + struct wlan_objmgr_psoc *psoc; + struct wlan_objmgr_peer *peer; + uint8_t pdev_id; + + if (!vdev) { + p2p_err("vdev:%pK", vdev); + return; + } + psoc = wlan_vdev_get_psoc(vdev); + if (!psoc) { + p2p_err("psoc:%pK", psoc); + return; + } + pdev_id = wlan_objmgr_pdev_get_pdev_id(wlan_vdev_get_pdev(vdev)); + peer = wlan_objmgr_get_peer(psoc, pdev_id, mac_addr, WLAN_P2P_ID); + if (!peer) { + p2p_debug("peer info not found"); + return; + } + status = process_peer_for_noa(vdev, psoc, peer); + wlan_objmgr_peer_release_ref(peer, WLAN_P2P_ID); + + if (status != QDF_STATUS_SUCCESS) { + p2p_err("status:%u", status); + return; + } + p2p_debug("peer is authorized"); +} + +#ifdef WLAN_FEATURE_P2P_DEBUG +static struct p2p_soc_priv_obj * +get_p2p_soc_obj_by_vdev(struct wlan_objmgr_vdev *vdev) +{ + struct p2p_soc_priv_obj *p2p_soc_obj; + struct wlan_objmgr_psoc *soc; + + if (!vdev) { + p2p_err("vdev context passed is NULL"); + return NULL; + } + + soc = wlan_vdev_get_psoc(vdev); + if (!soc) { + p2p_err("soc context is NULL"); + return NULL; + } + + p2p_soc_obj = wlan_objmgr_psoc_get_comp_private_obj(soc, + WLAN_UMAC_COMP_P2P); + if (!p2p_soc_obj) + p2p_err("P2P soc context is NULL"); + + return p2p_soc_obj; +} + +QDF_STATUS p2p_status_scan(struct wlan_objmgr_vdev *vdev) +{ + struct p2p_soc_priv_obj *p2p_soc_obj; + enum QDF_OPMODE mode; + + p2p_soc_obj = get_p2p_soc_obj_by_vdev(vdev); + if (!p2p_soc_obj) { + p2p_err("P2P soc context is NULL"); + return QDF_STATUS_E_FAILURE; + } + + mode = wlan_vdev_mlme_get_opmode(vdev); + if (mode != QDF_P2P_CLIENT_MODE && + mode != QDF_P2P_DEVICE_MODE) { + p2p_debug("this is not P2P CLIENT or DEVICE, mode:%d", + mode); + return QDF_STATUS_SUCCESS; + } + + p2p_debug("connection status:%d", p2p_soc_obj->connection_status); + switch (p2p_soc_obj->connection_status) { + case P2P_GO_NEG_COMPLETED: + case P2P_GO_NEG_PROCESS: + p2p_soc_obj->connection_status = + P2P_CLIENT_CONNECTING_STATE_1; + p2p_debug("[P2P State] Changing state from Go nego completed to Connection is started"); + p2p_debug("P2P Scanning is started for 8way Handshake"); + break; + case P2P_CLIENT_DISCONNECTED_STATE: + p2p_soc_obj->connection_status = + P2P_CLIENT_CONNECTING_STATE_2; + p2p_debug("[P2P State] Changing state from Disconnected state to Connection is started"); + p2p_debug("P2P Scanning is started for 4way Handshake"); + break; + default: + break; + } + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS p2p_status_connect(struct wlan_objmgr_vdev *vdev) +{ + struct p2p_soc_priv_obj *p2p_soc_obj; + enum QDF_OPMODE mode; + + p2p_soc_obj = get_p2p_soc_obj_by_vdev(vdev); + if (!p2p_soc_obj) { + p2p_err("P2P soc context is NULL"); + return QDF_STATUS_E_FAILURE; + } + + mode = wlan_vdev_mlme_get_opmode(vdev); + if (mode != QDF_P2P_CLIENT_MODE) { + p2p_debug("this is not P2P CLIENT, mode:%d", mode); + return QDF_STATUS_SUCCESS; + } + + p2p_debug("connection status:%d", p2p_soc_obj->connection_status); + switch (p2p_soc_obj->connection_status) { + case P2P_CLIENT_CONNECTING_STATE_1: + p2p_soc_obj->connection_status = + P2P_CLIENT_CONNECTED_STATE_1; + p2p_debug("[P2P State] Changing state from Connecting state to Connected State for 8-way Handshake"); + break; + case P2P_CLIENT_DISCONNECTED_STATE: + p2p_debug("No scan before 4-way handshake"); + /* + * Fall thru since no scan before 4-way handshake and + * won't enter state P2P_CLIENT_CONNECTING_STATE_2: + */ + case P2P_CLIENT_CONNECTING_STATE_2: + p2p_soc_obj->connection_status = + P2P_CLIENT_COMPLETED_STATE; + p2p_debug("[P2P State] Changing state from Connecting state to P2P Client Connection Completed"); + break; + default: + break; + } + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS p2p_status_disconnect(struct wlan_objmgr_vdev *vdev) +{ + struct p2p_soc_priv_obj *p2p_soc_obj; + enum QDF_OPMODE mode; + + p2p_soc_obj = get_p2p_soc_obj_by_vdev(vdev); + if (!p2p_soc_obj) { + p2p_err("P2P soc context is NULL"); + return QDF_STATUS_E_FAILURE; + } + + mode = wlan_vdev_mlme_get_opmode(vdev); + if (mode != QDF_P2P_CLIENT_MODE) { + p2p_debug("this is not P2P CLIENT, mode:%d", mode); + return QDF_STATUS_SUCCESS; + } + + p2p_debug("connection status:%d", p2p_soc_obj->connection_status); + switch (p2p_soc_obj->connection_status) { + case P2P_CLIENT_CONNECTED_STATE_1: + p2p_soc_obj->connection_status = + P2P_CLIENT_DISCONNECTED_STATE; + p2p_debug("[P2P State] 8 way Handshake completed and moved to disconnected state"); + break; + case P2P_CLIENT_COMPLETED_STATE: + p2p_soc_obj->connection_status = P2P_NOT_ACTIVE; + p2p_debug("[P2P State] P2P Client is removed and moved to inactive state"); + break; + default: + break; + } + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS p2p_status_start_bss(struct wlan_objmgr_vdev *vdev) +{ + struct p2p_soc_priv_obj *p2p_soc_obj; + enum QDF_OPMODE mode; + + p2p_soc_obj = get_p2p_soc_obj_by_vdev(vdev); + if (!p2p_soc_obj) { + p2p_err("P2P soc context is NULL"); + return QDF_STATUS_E_FAILURE; + } + + mode = wlan_vdev_mlme_get_opmode(vdev); + if (mode != QDF_P2P_GO_MODE) { + p2p_debug("this is not P2P GO, mode:%d", mode); + return QDF_STATUS_SUCCESS; + } + + p2p_debug("connection status:%d", p2p_soc_obj->connection_status); + switch (p2p_soc_obj->connection_status) { + case P2P_GO_NEG_COMPLETED: + p2p_soc_obj->connection_status = + P2P_GO_COMPLETED_STATE; + p2p_debug("[P2P State] From Go nego completed to Non-autonomous Group started"); + break; + case P2P_NOT_ACTIVE: + p2p_soc_obj->connection_status = + P2P_GO_COMPLETED_STATE; + p2p_debug("[P2P State] From Inactive to Autonomous Group started"); + break; + default: + break; + } + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS p2p_status_stop_bss(struct wlan_objmgr_vdev *vdev) +{ + struct p2p_soc_priv_obj *p2p_soc_obj; + enum QDF_OPMODE mode; + + p2p_soc_obj = get_p2p_soc_obj_by_vdev(vdev); + if (!p2p_soc_obj) { + p2p_err("P2P soc context is NULL"); + return QDF_STATUS_E_FAILURE; + } + + mode = wlan_vdev_mlme_get_opmode(vdev); + if (mode != QDF_P2P_GO_MODE) { + p2p_debug("this is not P2P GO, mode:%d", mode); + return QDF_STATUS_SUCCESS; + } + + p2p_debug("connection status:%d", p2p_soc_obj->connection_status); + if (p2p_soc_obj->connection_status == P2P_GO_COMPLETED_STATE) { + p2p_soc_obj->connection_status = P2P_NOT_ACTIVE; + p2p_debug("[P2P State] From GO completed to Inactive state GO got removed"); + } + + return QDF_STATUS_SUCCESS; +} +#endif /* WLAN_FEATURE_P2P_DEBUG */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/p2p/core/src/wlan_p2p_main.h b/drivers/staging/qca-wifi-host-cmn/umac/p2p/core/src/wlan_p2p_main.h new file mode 100644 index 0000000000000000000000000000000000000000..f1d62df14cb3445c9a3d8e99a6017417de966968 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/p2p/core/src/wlan_p2p_main.h @@ -0,0 +1,543 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: Defines main P2P functions & structures + */ + +#ifndef _WLAN_P2P_MAIN_H_ +#define _WLAN_P2P_MAIN_H_ + +#include +#include +#include +#include +#include +#include + +#define MAX_QUEUE_LENGTH 20 +#define P2P_NOA_ATTR_IND 0x1090 +#define P2P_MODULE_NAME "P2P" +#define P2P_INVALID_VDEV_ID 0xFFFFFFFF +#define MAX_RANDOM_MAC_ADDRS 4 + +#define p2p_log(level, args...) \ + QDF_TRACE(QDF_MODULE_ID_P2P, level, ## args) +#define p2p_logl(level, format, args...) \ + p2p_log(level, FL(format), ## args) + +#define p2p_debug(format, args ...) \ + p2p_logl(QDF_TRACE_LEVEL_DEBUG, format, ## args) +#define p2p_info(format, args ...) \ + p2p_logl(QDF_TRACE_LEVEL_INFO, format, ## args) +#define p2p_warn(format, args ...) \ + p2p_logl(QDF_TRACE_LEVEL_WARN, format, ## args) +#define p2p_err(format, args ...) \ + p2p_logl(QDF_TRACE_LEVEL_ERROR, format, ## args) +#define p2p_alert(format, args ...) \ + p2p_logl(QDF_TRACE_LEVEL_FATAL, format, ## args) +#define p2p_debug_rl(params...) \ + QDF_TRACE_DEBUG_RL(QDF_MODULE_ID_P2P, params) + +struct scheduler_msg; +struct p2p_tx_cnf; +struct p2p_rx_mgmt_frame; +struct p2p_lo_event; +struct p2p_start_param; +struct p2p_noa_info; +struct tx_action_context; + +/** + * enum p2p_cmd_type - P2P request type + * @P2P_ROC_REQ: P2P roc request + * @P2P_CANCEL_ROC_REQ: Cancel P2P roc request + * @P2P_MGMT_TX: P2P tx action frame request + * @P2P_MGMT_TX_CANCEL: Cancel tx action frame request + * @P2P_CLEANUP_ROC: Cleanup roc queue + * @P2P_CLEANUP_TX: Cleanup tx mgmt queue + * @P2P_SET_RANDOM_MAC: Set Random MAC addr filter request + */ +enum p2p_cmd_type { + P2P_ROC_REQ = 0, + P2P_CANCEL_ROC_REQ, + P2P_MGMT_TX, + P2P_MGMT_TX_CANCEL, + P2P_CLEANUP_ROC, + P2P_CLEANUP_TX, + P2P_SET_RANDOM_MAC, +}; + +/** + * enum p2p_event_type - P2P event type + * @P2P_EVENT_SCAN_EVENT: P2P scan event + * @P2P_EVENT_MGMT_TX_ACK_CNF: P2P mgmt tx confirm frame + * @P2P_EVENT_RX_MGMT: P2P rx mgmt frame + * @P2P_EVENT_LO_STOPPED: P2P listen offload stopped event + * @P2P_EVENT_NOA: P2P noa event + * @P2P_EVENT_ADD_MAC_RSP: Set Random MAC addr event + */ +enum p2p_event_type { + P2P_EVENT_SCAN_EVENT = 0, + P2P_EVENT_MGMT_TX_ACK_CNF, + P2P_EVENT_RX_MGMT, + P2P_EVENT_LO_STOPPED, + P2P_EVENT_NOA, + P2P_EVENT_ADD_MAC_RSP, +}; + +/** + * struct p2p_tx_conf_event - p2p tx confirm event + * @p2p_soc_obj: p2p soc private object + * @buf: buffer address + * @status: tx status + */ +struct p2p_tx_conf_event { + struct p2p_soc_priv_obj *p2p_soc_obj; + qdf_nbuf_t nbuf; + uint32_t status; +}; + +/** + * struct p2p_rx_mgmt_event - p2p rx mgmt frame event + * @p2p_soc_obj: p2p soc private object + * @rx_mgmt: p2p rx mgmt frame structure + */ +struct p2p_rx_mgmt_event { + struct p2p_soc_priv_obj *p2p_soc_obj; + struct p2p_rx_mgmt_frame *rx_mgmt; +}; + +/** + * struct p2p_lo_stop_event - p2p listen offload stop event + * @p2p_soc_obj: p2p soc private object + * @lo_event: p2p lo stop structure + */ +struct p2p_lo_stop_event { + struct p2p_soc_priv_obj *p2p_soc_obj; + struct p2p_lo_event *lo_event; +}; + +/** + * struct p2p_noa_event - p2p noa event + * @p2p_soc_obj: p2p soc private object + * @noa_info: p2p noa information structure + */ +struct p2p_noa_event { + struct p2p_soc_priv_obj *p2p_soc_obj; + struct p2p_noa_info *noa_info; +}; + +/** + * struct p2p_mac_filter_rsp - p2p set mac filter respone + * @p2p_soc_obj: p2p soc private object + * @vdev_id: vdev id + * @status: successfully(1) or not (0) + */ +struct p2p_mac_filter_rsp { + struct p2p_soc_priv_obj *p2p_soc_obj; + uint32_t vdev_id; + uint32_t status; +}; + +#ifdef WLAN_FEATURE_P2P_DEBUG +/** + * enum p2p_connection_status - p2p connection status + * @P2P_NOT_ACTIVE: P2P not active status + * @P2P_GO_NEG_PROCESS: P2P GO negotiation in process + * @P2P_GO_NEG_COMPLETED: P2P GO negotiation complete + * @P2P_CLIENT_CONNECTING_STATE_1: P2P client connecting state 1 + * @P2P_GO_COMPLETED_STATE: P2P GO complete state + * @P2P_CLIENT_CONNECTED_STATE_1: P2P client connected state 1 + * @P2P_CLIENT_DISCONNECTED_STATE: P2P client disconnected state + * @P2P_CLIENT_CONNECTING_STATE_2: P2P client connecting state 2 + * @P2P_CLIENT_COMPLETED_STATE: P2P client complete state + */ +enum p2p_connection_status { + P2P_NOT_ACTIVE, + P2P_GO_NEG_PROCESS, + P2P_GO_NEG_COMPLETED, + P2P_CLIENT_CONNECTING_STATE_1, + P2P_GO_COMPLETED_STATE, + P2P_CLIENT_CONNECTED_STATE_1, + P2P_CLIENT_DISCONNECTED_STATE, + P2P_CLIENT_CONNECTING_STATE_2, + P2P_CLIENT_COMPLETED_STATE +}; +#endif + +/** + * struct p2p_soc_priv_obj - Per SoC p2p private object + * @soc: Pointer to SoC context + * @roc_q: Queue for pending roc requests + * @tx_q_roc: Queue for tx frames waiting for RoC + * @tx_q_ack: Queue for tx frames waiting for ack + * @scan_req_id: Scan requestor id + * @start_param: Start parameters, include callbacks and user + * data to HDD + * @cancel_roc_done: Cancel roc done event + * @cleanup_roc_done: Cleanup roc done event + * @cleanup_tx_done: Cleanup tx done event + * @roc_runtime_lock: Runtime lock for roc request + * @p2p_cb: Callbacks to protocol stack + * @cur_roc_vdev_id: Vdev id of current roc + * @p2p_idr: p2p idr + * @connection_status:Global P2P connection status + */ +struct p2p_soc_priv_obj { + struct wlan_objmgr_psoc *soc; + qdf_list_t roc_q; + qdf_list_t tx_q_roc; + qdf_list_t tx_q_ack; + wlan_scan_requester scan_req_id; + struct p2p_start_param *start_param; + qdf_event_t cancel_roc_done; + qdf_event_t cleanup_roc_done; + qdf_event_t cleanup_tx_done; + qdf_runtime_lock_t roc_runtime_lock; + struct p2p_protocol_callbacks p2p_cb; + uint32_t cur_roc_vdev_id; + qdf_idr p2p_idr; +#ifdef WLAN_FEATURE_P2P_DEBUG + enum p2p_connection_status connection_status; +#endif +}; + +/** + * struct action_frame_cookie - Action frame cookie item in cookie list + * @cookie_node: qdf_list_node + * @cookie: Cookie value + */ +struct action_frame_cookie { + qdf_list_node_t cookie_node; + uint64_t cookie; +}; + +/** + * struct action_frame_random_mac - Action Frame random mac addr & + * related attrs + * @p2p_vdev_obj: p2p vdev private obj ptr + * @in_use: Checks whether random mac is in use + * @addr: Contains random mac addr + * @freq: Channel frequency + * @clear_timer: timer to clear random mac filter + * @cookie_list: List of cookies tied with random mac + */ +struct action_frame_random_mac { + struct p2p_vdev_priv_obj *p2p_vdev_obj; + bool in_use; + uint8_t addr[QDF_MAC_ADDR_SIZE]; + uint32_t freq; + qdf_mc_timer_t clear_timer; + qdf_list_t cookie_list; +}; + +/** + * p2p_request_mgr_callback_t() - callback to process set mac filter result + * @result: bool + * @context: callback context. + * + * Return: void + */ +typedef void (*p2p_request_mgr_callback_t)(bool result, void *context); + +/** + * struct random_mac_priv - request private data struct + * @result: result of request. + */ +struct random_mac_priv { + bool result; +}; + +/** + * struct p2p_set_mac_filter_req - set mac addr filter cmd data structure + * @soc: soc object + * @vdev_id: vdev id + * @mac: mac address to be set + * @freq: frequency + * @set: set or clear + * @cb: callback func to be called when the request completion + * @req_cookie: cookie to be used when request completed + */ +struct p2p_set_mac_filter_req { + struct wlan_objmgr_psoc *soc; + uint32_t vdev_id; + uint8_t mac[QDF_MAC_ADDR_SIZE]; + uint32_t freq; + bool set; + p2p_request_mgr_callback_t cb; + void *req_cookie; +}; + +/** + * struct p2p_vdev_priv_obj - Per vdev p2p private object + * @vdev: Pointer to vdev context + * @noa_info: NoA information + * @noa_status: NoA status i.e. Enabled / Disabled (TRUE/FALSE) + * @non_p2p_peer_count: Number of legacy stations connected to this GO + * @random_mac_lock: lock for random_mac list + * @random_mac: active random mac filter lists + * @pending_req: pending set mac filter request. + */ +struct p2p_vdev_priv_obj { + struct wlan_objmgr_vdev *vdev; + struct p2p_noa_info *noa_info; + bool noa_status; + uint16_t non_p2p_peer_count; + + /* random address management for management action frames */ + qdf_spinlock_t random_mac_lock; + struct action_frame_random_mac random_mac[MAX_RANDOM_MAC_ADDRS]; + struct p2p_set_mac_filter_req pending_req; +}; + +/** + * struct p2p_noa_attr - p2p noa attribute + * @rsvd1: reserved bits 1 + * @opps_ps: opps ps state of the AP + * @ct_win: ct window in TUs + * @index: identifies instance of NOA su element + * @rsvd2: reserved bits 2 + * @noa1_count: interval count of noa1 + * @noa1_duration: absent period duration of noa1 + * @noa1_interval: absent period interval of noa1 + * @noa1_start_time: 32 bit tsf time of noa1 + * @rsvd3: reserved bits 3 + * @noa2_count: interval count of noa2 + * @noa2_duration: absent period duration of noa2 + * @noa2_interval: absent period interval of noa2 + * @noa2_start_time: 32 bit tsf time of noa2 + */ +struct p2p_noa_attr { + uint32_t rsvd1:16; + uint32_t ct_win:7; + uint32_t opps_ps:1; + uint32_t index:8; + uint32_t rsvd2:24; + uint32_t noa1_count:8; + uint32_t noa1_duration; + uint32_t noa1_interval; + uint32_t noa1_start_time; + uint32_t rsvd3:24; + uint32_t noa2_count:8; + uint32_t noa2_duration; + uint32_t noa2_interval; + uint32_t noa2_start_time; +}; + +/** + * p2p_component_init() - P2P component initialization + * + * This function registers psoc/vdev create/delete handler. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS p2p_component_init(void); + +/** + * p2p_component_deinit() - P2P component de-init + * + * This function deregisters psoc/vdev create/delete handler. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS p2p_component_deinit(void); + +/** + * p2p_psoc_object_open() - Open P2P component + * @soc: soc context + * + * This function initialize p2p psoc object + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS p2p_psoc_object_open(struct wlan_objmgr_psoc *soc); + +/** + * p2p_psoc_object_close() - Close P2P component + * @soc: soc context + * + * This function de-init p2p psoc object. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS p2p_psoc_object_close(struct wlan_objmgr_psoc *soc); + +/** + * p2p_psoc_start() - Start P2P component + * @soc: soc context + * @req: P2P start parameters + * + * This function sets up layer call back in p2p psoc object + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS p2p_psoc_start(struct wlan_objmgr_psoc *soc, + struct p2p_start_param *req); + +/** + * p2p_psoc_stop() - Stop P2P component + * @soc: soc context + * + * This function clears up layer call back in p2p psoc object. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS p2p_psoc_stop(struct wlan_objmgr_psoc *soc); + +/** + * p2p_process_cmd() - Process P2P messages in OS interface queue + * @msg: message information + * + * This function is main handler for P2P messages in OS interface + * queue, it gets called by message scheduler. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS p2p_process_cmd(struct scheduler_msg *msg); + +/** + * p2p_process_evt() - Process P2P messages in target interface queue + * @msg: message information + * + * This function is main handler for P2P messages in target interface + * queue, it gets called by message scheduler. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS p2p_process_evt(struct scheduler_msg *msg); + +/** + * p2p_msg_flush_callback() - Callback used to flush P2P messages + * @msg: message information + * + * This callback will be called when scheduler flush some of P2P messages. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS p2p_msg_flush_callback(struct scheduler_msg *msg); + +/** + * p2p_event_flush_callback() - Callback used to flush P2P events + * @msg: event information + * + * This callback will be called when scheduler flush some of P2P events. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS p2p_event_flush_callback(struct scheduler_msg *msg); + +/** + * p2p_process_lo_stop() - Process lo stop event + * @lo_stop_event: listen offload stop event information + * + * This function handles listen offload stop event and deliver this + * event to HDD layer by registered callback. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS p2p_process_lo_stop( + struct p2p_lo_stop_event *lo_stop_event); + +/** + * p2p_process_noa() - Process noa event + * @noa_event: noa event information + * + * This function handles noa event and save noa information in p2p + * vdev object. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS p2p_process_noa(struct p2p_noa_event *noa_event); + +#ifdef WLAN_FEATURE_P2P_DEBUG +/** + * p2p_status_scan() - Update P2P connection status + * @vdev: vdev context + * + * This function updates P2P connection status when scanning + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS p2p_status_scan(struct wlan_objmgr_vdev *vdev); + +/** + * p2p_status_connect() - Update P2P connection status + * @vdev: vdev context + * + * This function updates P2P connection status when connecting. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS p2p_status_connect(struct wlan_objmgr_vdev *vdev); + +/** + * p2p_status_disconnect() - Update P2P connection status + * @vdev: vdev context + * + * This function updates P2P connection status when disconnecting. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS p2p_status_disconnect(struct wlan_objmgr_vdev *vdev); + +/** + * p2p_status_start_bss() - Update P2P connection status + * @vdev: vdev context + * + * This function updates P2P connection status when starting BSS. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS p2p_status_start_bss(struct wlan_objmgr_vdev *vdev); + +/** + * p2p_status_stop_bss() - Update P2P connection status + * @vdev: vdev context + * + * This function updates P2P connection status when stopping BSS. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS p2p_status_stop_bss(struct wlan_objmgr_vdev *vdev); +#else +static inline QDF_STATUS p2p_status_scan(struct wlan_objmgr_vdev *vdev) +{ + return QDF_STATUS_SUCCESS; +} + +static inline QDF_STATUS p2p_status_connect(struct wlan_objmgr_vdev *vdev) +{ + return QDF_STATUS_SUCCESS; +} + +static inline QDF_STATUS p2p_status_disconnect(struct wlan_objmgr_vdev *vdev) +{ + return QDF_STATUS_SUCCESS; +} + +static inline QDF_STATUS p2p_status_start_bss(struct wlan_objmgr_vdev *vdev) +{ + return QDF_STATUS_SUCCESS; +} + +static inline QDF_STATUS p2p_status_stop_bss(struct wlan_objmgr_vdev *vdev) +{ + return QDF_STATUS_SUCCESS; +} +#endif /* WLAN_FEATURE_P2P_DEBUG */ +#endif /* _WLAN_P2P_MAIN_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/p2p/core/src/wlan_p2p_off_chan_tx.c b/drivers/staging/qca-wifi-host-cmn/umac/p2p/core/src/wlan_p2p_off_chan_tx.c new file mode 100644 index 0000000000000000000000000000000000000000..7937565aa6467e161d3fa8dc44a0fefb00c024f4 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/p2p/core/src/wlan_p2p_off_chan_tx.c @@ -0,0 +1,3021 @@ +/* + * Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: This file contains off channel tx API definitions + */ + +#include +#include +#include +#include +#include +#include +#include "wlan_p2p_public_struct.h" +#include "wlan_p2p_tgt_api.h" +#include "wlan_p2p_ucfg_api.h" +#include "wlan_p2p_roc.h" +#include "wlan_p2p_main.h" +#include "wlan_p2p_off_chan_tx.h" +#include "wlan_osif_request_manager.h" +#include + +/** + * p2p_psoc_get_tx_ops() - get p2p tx ops + * @psoc: psoc object + * + * This function returns p2p tx ops callbacks. + * + * Return: wlan_lmac_if_p2p_tx_ops + */ +static inline struct wlan_lmac_if_p2p_tx_ops * +p2p_psoc_get_tx_ops(struct wlan_objmgr_psoc *psoc) +{ + return &psoc->soc_cb.tx_ops.p2p; +} + +/** + * p2p_tx_context_check_valid() - check tx action context + * @tx_ctx: tx context + * + * This function check if tx action context and parameters are valid. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +static QDF_STATUS p2p_tx_context_check_valid(struct tx_action_context *tx_ctx) +{ + struct wlan_objmgr_psoc *psoc; + struct p2p_soc_priv_obj *p2p_soc_obj; + + if (!tx_ctx) { + p2p_err("null tx action context"); + return QDF_STATUS_E_INVAL; + } + + p2p_soc_obj = tx_ctx->p2p_soc_obj; + if (!p2p_soc_obj) { + p2p_err("null p2p soc private object"); + return QDF_STATUS_E_INVAL; + } + + psoc = p2p_soc_obj->soc; + if (!psoc) { + p2p_err("null p2p soc object"); + return QDF_STATUS_E_INVAL; + } + + if (!tx_ctx->buf) { + p2p_err("null tx buffer"); + return QDF_STATUS_E_INVAL; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * p2p_vdev_check_valid() - check vdev and vdev mode + * @tx_ctx: tx context + * + * This function check if vdev and vdev mode are valid. It will drop + * probe response in sta mode. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +static QDF_STATUS p2p_vdev_check_valid(struct tx_action_context *tx_ctx) +{ + enum QDF_OPMODE mode; + struct wlan_objmgr_vdev *vdev; + struct wlan_objmgr_psoc *psoc; + struct p2p_soc_priv_obj *p2p_soc_obj; + QDF_STATUS status = QDF_STATUS_SUCCESS; + + p2p_soc_obj = tx_ctx->p2p_soc_obj; + psoc = p2p_soc_obj->soc; + vdev = wlan_objmgr_get_vdev_by_id_from_psoc(psoc, + tx_ctx->vdev_id, WLAN_P2P_ID); + if (!vdev) { + p2p_err("null vdev object"); + return QDF_STATUS_E_INVAL; + } + + mode = wlan_vdev_mlme_get_opmode(vdev); + p2p_debug("vdev mode:%d", mode); + + /* drop probe response for sta, go, sap */ + if ((mode == QDF_STA_MODE || + mode == QDF_SAP_MODE || + mode == QDF_P2P_GO_MODE) && + tx_ctx->frame_info.sub_type == P2P_MGMT_PROBE_RSP) { + p2p_debug("drop probe response, mode:%d", mode); + status = QDF_STATUS_E_FAILURE; + } + + wlan_objmgr_vdev_release_ref(vdev, WLAN_P2P_ID); + + return status; +} + +/** + * p2p_check_and_update_channel() - check and update tx channel + * @tx_ctx: tx context + * + * This function checks and updates tx channel if channel is 0 in tx context. + * It will update channel to current roc channel if vdev mode is + * P2P DEVICE/CLIENT/GO. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +static QDF_STATUS p2p_check_and_update_channel(struct tx_action_context *tx_ctx) +{ + enum QDF_OPMODE mode; + struct wlan_objmgr_vdev *vdev; + struct wlan_objmgr_psoc *psoc; + struct p2p_soc_priv_obj *p2p_soc_obj; + struct p2p_roc_context *curr_roc_ctx; + + if (!tx_ctx || tx_ctx->chan) { + p2p_err("NULL tx ctx or channel valid"); + return QDF_STATUS_E_INVAL; + } + + p2p_soc_obj = tx_ctx->p2p_soc_obj; + psoc = p2p_soc_obj->soc; + vdev = wlan_objmgr_get_vdev_by_id_from_psoc( + psoc, tx_ctx->vdev_id, WLAN_P2P_ID); + if (!vdev) { + p2p_err("null vdev object"); + return QDF_STATUS_E_INVAL; + } + + mode = wlan_vdev_mlme_get_opmode(vdev); + curr_roc_ctx = p2p_find_current_roc_ctx(p2p_soc_obj); + + if (curr_roc_ctx && + (mode == QDF_P2P_DEVICE_MODE || + mode == QDF_P2P_CLIENT_MODE || + mode == QDF_P2P_GO_MODE)) + tx_ctx->chan = curr_roc_ctx->chan; + + wlan_objmgr_vdev_release_ref(vdev, WLAN_P2P_ID); + + return QDF_STATUS_SUCCESS; +} + +/** + * p2p_get_p2pie_ptr() - get the pointer to p2p ie + * @ie: source ie + * @ie_len: source ie length + * + * This function finds out p2p ie by p2p oui and return the pointer. + * + * Return: pointer to p2p ie + */ +static const uint8_t *p2p_get_p2pie_ptr(const uint8_t *ie, uint16_t ie_len) +{ + return wlan_get_vendor_ie_ptr_from_oui(P2P_OUI, + P2P_OUI_SIZE, ie, ie_len); +} + +/** + * p2p_get_p2pie_from_probe_rsp() - get the pointer to p2p ie from + * probe response + * @tx_ctx: tx context + * + * This function finds out p2p ie and return the pointer if it is a + * probe response frame. + * + * Return: pointer to p2p ie + */ +static const uint8_t *p2p_get_p2pie_from_probe_rsp( + struct tx_action_context *tx_ctx) +{ + const uint8_t *ie; + const uint8_t *p2p_ie; + const uint8_t *tmp_p2p_ie = NULL; + uint16_t ie_len; + + if (tx_ctx->buf_len <= PROBE_RSP_IE_OFFSET) { + p2p_err("Invalid header len for probe response"); + return NULL; + } + + ie = tx_ctx->buf + PROBE_RSP_IE_OFFSET; + ie_len = tx_ctx->buf_len - PROBE_RSP_IE_OFFSET; + p2p_ie = p2p_get_p2pie_ptr(ie, ie_len); + while ((p2p_ie) && + (P2P_MAX_IE_LENGTH == p2p_ie[1])) { + ie_len = tx_ctx->buf_len - (p2p_ie - tx_ctx->buf); + if (ie_len > 2) { + ie = p2p_ie + P2P_MAX_IE_LENGTH + 2; + tmp_p2p_ie = p2p_get_p2pie_ptr(ie, ie_len); + } + + if (tmp_p2p_ie) { + p2p_ie = tmp_p2p_ie; + tmp_p2p_ie = NULL; + } else { + break; + } + } + + return p2p_ie; +} + +/** + * p2p_get_presence_noa_attr() - get the pointer to noa attr + * @pies: source ie + * @length: source ie length + * + * This function finds out noa attr by noa eid and return the pointer. + * + * Return: pointer to noa attr + */ +static const uint8_t *p2p_get_presence_noa_attr(const uint8_t *pies, int length) +{ + int left = length; + const uint8_t *ptr = pies; + uint8_t elem_id; + uint16_t elem_len; + + p2p_debug("pies:%pK, length:%d", pies, length); + + while (left >= 3) { + elem_id = ptr[0]; + elem_len = ((uint16_t) ptr[1]) | (ptr[2] << 8); + + left -= 3; + if (elem_len > left) { + p2p_err("****Invalid IEs, elem_len=%d left=%d*****", + elem_len, left); + return NULL; + } + if (elem_id == P2P_NOA_ATTR) + return ptr; + + left -= elem_len; + ptr += (elem_len + 3); + } + + return NULL; +} + +/** + * p2p_get_noa_attr_stream_in_mult_p2p_ies() - get the pointer to noa + * attr from multi p2p ie + * @noa_stream: noa stream + * @noa_len: noa stream length + * @overflow_len: overflow length + * + * This function finds out noa attr from multi p2p ies. + * + * Return: noa length + */ +static uint8_t p2p_get_noa_attr_stream_in_mult_p2p_ies(uint8_t *noa_stream, + uint8_t noa_len, uint8_t overflow_len) +{ + uint8_t overflow_p2p_stream[P2P_MAX_NOA_ATTR_LEN]; + + p2p_debug("noa_stream:%pK, noa_len:%d, overflow_len:%d", + noa_stream, noa_len, overflow_len); + if ((noa_len <= (P2P_MAX_NOA_ATTR_LEN + P2P_IE_HEADER_LEN)) && + (noa_len >= overflow_len) && + (overflow_len <= P2P_MAX_NOA_ATTR_LEN)) { + qdf_mem_copy(overflow_p2p_stream, + noa_stream + noa_len - overflow_len, + overflow_len); + noa_stream[noa_len - overflow_len] = + P2P_EID_VENDOR; + noa_stream[noa_len - overflow_len + 1] = + overflow_len + P2P_OUI_SIZE; + qdf_mem_copy(noa_stream + noa_len - overflow_len + 2, + P2P_OUI, P2P_OUI_SIZE); + qdf_mem_copy(noa_stream + noa_len + 2 + P2P_OUI_SIZE - + overflow_len, overflow_p2p_stream, + overflow_len); + } + + return noa_len + P2P_IE_HEADER_LEN; +} + +/** + * p2p_get_vdev_noa_info() - get vdev noa information + * @tx_ctx: tx context + * + * This function gets vdev noa information + * + * Return: pointer to noa information + */ +static struct p2p_noa_info *p2p_get_vdev_noa_info( + struct tx_action_context *tx_ctx) +{ + struct p2p_vdev_priv_obj *p2p_vdev_obj; + struct p2p_soc_priv_obj *p2p_soc_obj; + struct wlan_objmgr_vdev *vdev; + struct wlan_objmgr_psoc *psoc; + enum QDF_OPMODE mode; + struct p2p_noa_info *noa_info = NULL; + + p2p_soc_obj = tx_ctx->p2p_soc_obj; + psoc = p2p_soc_obj->soc; + vdev = wlan_objmgr_get_vdev_by_id_from_psoc(psoc, + tx_ctx->vdev_id, WLAN_P2P_ID); + if (!vdev) { + p2p_err("vdev obj is NULL"); + return NULL; + } + + mode = wlan_vdev_mlme_get_opmode(vdev); + p2p_debug("vdev mode:%d", mode); + if (mode != QDF_P2P_GO_MODE) { + p2p_debug("invalid p2p vdev mode:%d", mode); + goto fail; + } + + p2p_vdev_obj = wlan_objmgr_vdev_get_comp_private_obj(vdev, + WLAN_UMAC_COMP_P2P); + + if (!p2p_vdev_obj || !(p2p_vdev_obj->noa_info)) { + p2p_debug("null noa info"); + goto fail; + } + + noa_info = p2p_vdev_obj->noa_info; + +fail: + wlan_objmgr_vdev_release_ref(vdev, WLAN_P2P_ID); + + return noa_info; +} + +/** + * p2p_get_noa_attr_stream() - get noa stream from p2p vdev object + * @tx_ctx: tx context + * @pnoa_stream: pointer to noa stream + * + * This function finds out noa stream from p2p vdev object + * + * Return: noa stream length + */ +static uint8_t p2p_get_noa_attr_stream( + struct tx_action_context *tx_ctx, uint8_t *pnoa_stream) +{ + struct p2p_noa_info *noa_info; + struct noa_descriptor *noa_desc_0; + struct noa_descriptor *noa_desc_1; + uint8_t *pbody = pnoa_stream; + uint8_t len = 0; + + noa_info = p2p_get_vdev_noa_info(tx_ctx); + if (!noa_info) { + p2p_debug("not valid noa information"); + return 0; + } + + noa_desc_0 = &(noa_info->noa_desc[0]); + noa_desc_1 = &(noa_info->noa_desc[1]); + if ((!(noa_desc_0->duration)) && + (!(noa_desc_1->duration)) && + (!noa_info->opps_ps)) { + p2p_debug("opps ps and duration are 0"); + return 0; + } + + pbody[0] = P2P_NOA_ATTR; + pbody[3] = noa_info->index; + pbody[4] = noa_info->ct_window | (noa_info->opps_ps << 7); + len = 5; + pbody += len; + + if (noa_desc_0->duration) { + *pbody = noa_desc_0->type_count; + pbody += 1; + len += 1; + + *((uint32_t *) (pbody)) = noa_desc_0->duration; + pbody += sizeof(uint32_t); + len += 4; + + *((uint32_t *) (pbody)) = noa_desc_0->interval; + pbody += sizeof(uint32_t); + len += 4; + + *((uint32_t *) (pbody)) = noa_desc_0->start_time; + pbody += sizeof(uint32_t); + len += 4; + } + + if (noa_desc_1->duration) { + *pbody = noa_desc_1->type_count; + pbody += 1; + len += 1; + + *((uint32_t *) (pbody)) = noa_desc_1->duration; + pbody += sizeof(uint32_t); + len += 4; + + *((uint32_t *) (pbody)) = noa_desc_1->interval; + pbody += sizeof(uint32_t); + len += 4; + + *((uint32_t *) (pbody)) = noa_desc_1->start_time; + pbody += sizeof(uint32_t); + len += 4; + } + + pbody = pnoa_stream + 1; + /* one byte for Attr and 2 bytes for length */ + *((uint16_t *) (pbody)) = len - 3; + + return len; +} + +/** + * p2p_update_noa_stream() - update noa stream + * @tx_ctx: tx context + * @p2p_ie: pointer to p2p ie + * @noa_attr: pointer to noa attr + * @total_len: pointer to total length of ie + * + * This function updates noa stream. + * + * Return: noa stream length + */ +static uint16_t p2p_update_noa_stream(struct tx_action_context *tx_ctx, + uint8_t *p2p_ie, const uint8_t *noa_attr, uint32_t *total_len, + uint8_t *noa_stream) +{ + uint16_t noa_len; + uint16_t overflow_len; + uint8_t orig_len; + uint32_t nbytes_copy; + uint32_t buf_len = *total_len; + + noa_len = p2p_get_noa_attr_stream(tx_ctx, noa_stream); + if (noa_len <= 0) { + p2p_debug("do not find out noa attr"); + return 0; + } + + orig_len = p2p_ie[1]; + if (noa_attr) { + noa_len = noa_attr[1] | (noa_attr[2] << 8); + orig_len -= (noa_len + 1 + 2); + buf_len -= (noa_len + 1 + 2); + p2p_ie[1] = orig_len; + } + + if ((p2p_ie[1] + noa_len) > P2P_MAX_IE_LENGTH) { + overflow_len = p2p_ie[1] + noa_len - + P2P_MAX_IE_LENGTH; + noa_len = p2p_get_noa_attr_stream_in_mult_p2p_ies( + noa_stream, noa_len, overflow_len); + p2p_ie[1] = P2P_MAX_IE_LENGTH; + } else { + /* increment the length of P2P IE */ + p2p_ie[1] += noa_len; + } + + *total_len = buf_len; + nbytes_copy = (p2p_ie + orig_len + 2) - tx_ctx->buf; + + p2p_debug("noa_len=%d orig_len=%d p2p_ie=%pK buf_len=%d nbytes copy=%d ", + noa_len, orig_len, p2p_ie, buf_len, nbytes_copy); + + return noa_len; +} + +/** + * p2p_set_ht_caps() - set ht capability + * @tx_ctx: tx context + * @num_bytes: number bytes + * + * This function sets ht capability + * + * Return: None + */ +static void p2p_set_ht_caps(struct tx_action_context *tx_ctx, + uint32_t num_bytes) +{ +} + +/** + * p2p_populate_mac_header() - update sequence number + * @tx_ctx: tx context + * + * This function updates sequence number of this mgmt frame + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +static QDF_STATUS p2p_populate_mac_header( + struct tx_action_context *tx_ctx) +{ + struct wlan_seq_ctl *seq_ctl; + struct wlan_frame_hdr *wh; + struct wlan_objmgr_peer *peer; + struct wlan_objmgr_psoc *psoc; + void *mac_addr; + uint16_t seq_num; + uint8_t pdev_id; + struct wlan_objmgr_vdev *vdev; + + psoc = tx_ctx->p2p_soc_obj->soc; + + wh = (struct wlan_frame_hdr *)tx_ctx->buf; + mac_addr = wh->i_addr1; + pdev_id = wlan_get_pdev_id_from_vdev_id(psoc, tx_ctx->vdev_id, + WLAN_P2P_ID); + peer = wlan_objmgr_get_peer(psoc, pdev_id, mac_addr, WLAN_P2P_ID); + if (!peer) { + mac_addr = wh->i_addr2; + peer = wlan_objmgr_get_peer(psoc, pdev_id, mac_addr, + WLAN_P2P_ID); + } + if (!peer && tx_ctx->rand_mac_tx) { + vdev = wlan_objmgr_get_vdev_by_id_from_psoc(psoc, + tx_ctx->vdev_id, + WLAN_P2P_ID); + if (vdev) { + mac_addr = wlan_vdev_mlme_get_macaddr(vdev); + peer = wlan_objmgr_get_peer(psoc, pdev_id, mac_addr, + WLAN_P2P_ID); + wlan_objmgr_vdev_release_ref(vdev, WLAN_P2P_ID); + } + } + if (!peer) { + p2p_err("no valid peer"); + return QDF_STATUS_E_INVAL; + } + seq_num = (uint16_t)wlan_peer_mlme_get_next_seq_num(peer); + seq_ctl = (struct wlan_seq_ctl *)(tx_ctx->buf + + WLAN_SEQ_CTL_OFFSET); + seq_ctl->seq_num_lo = (seq_num & WLAN_LOW_SEQ_NUM_MASK); + seq_ctl->seq_num_hi = ((seq_num & WLAN_HIGH_SEQ_NUM_MASK) >> + WLAN_HIGH_SEQ_NUM_OFFSET); + + wlan_objmgr_peer_release_ref(peer, WLAN_P2P_ID); + + return QDF_STATUS_SUCCESS; +} + +/** + * p2p_get_frame_type_str() - parse frame type to string + * @frame_info: frame information + * + * This function parse frame type to string. + * + * Return: command string + */ +#ifdef WLAN_DEBUG +static char *p2p_get_frame_type_str(struct p2p_frame_info *frame_info) +{ + if (frame_info->type == P2P_FRAME_NOT_SUPPORT) + return "Not support frame"; + + if (frame_info->sub_type == P2P_MGMT_NOT_SUPPORT) + return "Not support sub frame"; + + switch (frame_info->sub_type) { + case P2P_MGMT_PROBE_REQ: + return "P2P roc request"; + case P2P_MGMT_PROBE_RSP: + return "P2P cancel roc request"; + case P2P_MGMT_ACTION: + break; + default: + return "Invalid P2P command"; + } + + if (frame_info->action_type == P2P_ACTION_PRESENCE_REQ) + return "P2P action presence request"; + if (frame_info->action_type == P2P_ACTION_PRESENCE_RSP) + return "P2P action presence response"; + + switch (frame_info->public_action_type) { + case P2P_PUBLIC_ACTION_NEG_REQ: + return "GO negotiation request frame"; + case P2P_PUBLIC_ACTION_NEG_RSP: + return "GO negotiation response frame"; + case P2P_PUBLIC_ACTION_NEG_CNF: + return "GO negotiation confirm frame"; + case P2P_PUBLIC_ACTION_INVIT_REQ: + return "P2P invitation request"; + case P2P_PUBLIC_ACTION_INVIT_RSP: + return "P2P invitation response"; + case P2P_PUBLIC_ACTION_DEV_DIS_REQ: + return "Device discoverability request"; + case P2P_PUBLIC_ACTION_DEV_DIS_RSP: + return "Device discoverability response"; + case P2P_PUBLIC_ACTION_PROV_DIS_REQ: + return "Provision discovery request"; + case P2P_PUBLIC_ACTION_PROV_DIS_RSP: + return "Provision discovery response"; + case P2P_PUBLIC_ACTION_GAS_INIT_REQ: + return "GAS init request"; + case P2P_PUBLIC_ACTION_GAS_INIT_RSP: + return "GAS init response"; + case P2P_PUBLIC_ACTION_GAS_COMB_REQ: + return "GAS come back request"; + case P2P_PUBLIC_ACTION_GAS_COMB_RSP: + return "GAS come back response"; + default: + return "Not support action frame"; + } +} +#endif + +/** + * p2p_init_frame_info() - init frame information structure + * @frame_info: pointer to frame information + * + * This function init frame information structure. + * + * Return: None + */ +static void p2p_init_frame_info(struct p2p_frame_info *frame_info) +{ + frame_info->type = P2P_FRAME_NOT_SUPPORT; + frame_info->sub_type = P2P_MGMT_NOT_SUPPORT; + frame_info->public_action_type = + P2P_PUBLIC_ACTION_NOT_SUPPORT; + frame_info->action_type = P2P_ACTION_NOT_SUPPORT; +} + +/** + * p2p_get_frame_info() - get frame information from packet + * @data_buf: data buffer address + * @length: buffer length + * @frame_info: frame information + * + * This function gets frame information from packet. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +static QDF_STATUS p2p_get_frame_info(uint8_t *data_buf, uint32_t length, + struct p2p_frame_info *frame_info) +{ + uint8_t type; + uint8_t sub_type; + uint8_t action_type; + uint8_t *buf = data_buf; + + p2p_init_frame_info(frame_info); + + if (length < P2P_ACTION_OFFSET + 1) { + p2p_err("invalid p2p mgmt hdr len"); + return QDF_STATUS_E_INVAL; + } + + type = P2P_GET_TYPE_FRM_FC(buf[0]); + sub_type = P2P_GET_SUBTYPE_FRM_FC(buf[0]); + if (type != P2P_FRAME_MGMT) { + p2p_err("just support mgmt frame"); + return QDF_STATUS_E_FAILURE; + } + + frame_info->type = P2P_FRAME_MGMT; + + if (sub_type == P2P_MGMT_PROBE_RSP) { + frame_info->sub_type = P2P_MGMT_PROBE_RSP; + p2p_debug("Probe Response"); + return QDF_STATUS_SUCCESS; + } + + if (sub_type == P2P_MGMT_PROBE_REQ) { + frame_info->sub_type = P2P_MGMT_PROBE_REQ; + p2p_debug("Probe Request"); + return QDF_STATUS_SUCCESS; + } + + if (sub_type != P2P_MGMT_ACTION) { + p2p_debug("not support sub type"); + return QDF_STATUS_E_FAILURE; + } + + frame_info->sub_type = P2P_MGMT_ACTION; + buf += P2P_ACTION_OFFSET; + if (length > P2P_PUBLIC_ACTION_FRAME_TYPE_OFFSET && + buf[0] == P2P_PUBLIC_ACTION_FRAME && + buf[1] == P2P_PUBLIC_ACTION_VENDOR_SPECIFIC && + !qdf_mem_cmp(&buf[2], P2P_OUI, P2P_OUI_SIZE)) { + buf = data_buf + + P2P_PUBLIC_ACTION_FRAME_TYPE_OFFSET; + action_type = buf[0]; + if (action_type > P2P_PUBLIC_ACTION_PROV_DIS_RSP) + frame_info->public_action_type = + P2P_PUBLIC_ACTION_NOT_SUPPORT; + else + frame_info->public_action_type = action_type; + } else if (length > P2P_ACTION_FRAME_TYPE_OFFSET && + buf[0] == P2P_ACTION_VENDOR_SPECIFIC_CATEGORY && + !qdf_mem_cmp(&buf[1], P2P_OUI, P2P_OUI_SIZE)) { + buf = data_buf + + P2P_ACTION_FRAME_TYPE_OFFSET; + action_type = buf[0]; + if (action_type == P2P_ACTION_PRESENCE_REQ) + frame_info->action_type = + P2P_ACTION_PRESENCE_REQ; + if (action_type == P2P_ACTION_PRESENCE_RSP) + frame_info->action_type = + P2P_ACTION_PRESENCE_RSP; + } else { + p2p_debug("this is not vendor specific p2p action frame"); + return QDF_STATUS_SUCCESS; + } + + p2p_debug("%s", p2p_get_frame_type_str(frame_info)); + + return QDF_STATUS_SUCCESS; +} + +#ifdef WLAN_FEATURE_P2P_DEBUG +/** + * p2p_tx_update_connection_status() - Update P2P connection status + * with tx frame + * @p2p_soc_obj: P2P soc private object + * @tx_frame_info: frame information + * @mac_to: Pointer to dest MAC address + * + * This function updates P2P connection status with tx frame. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +static QDF_STATUS p2p_tx_update_connection_status( + struct p2p_soc_priv_obj *p2p_soc_obj, + struct p2p_frame_info *tx_frame_info, + uint8_t *mac_to) +{ + if (!p2p_soc_obj || !tx_frame_info || !mac_to) { + p2p_err("invalid p2p_soc_obj:%pK or tx_frame_info:%pK or mac_to:%pK", + p2p_soc_obj, tx_frame_info, mac_to); + return QDF_STATUS_E_INVAL; + } + + if (tx_frame_info->public_action_type != + P2P_PUBLIC_ACTION_NOT_SUPPORT) + p2p_info("%s ---> OTA to " QDF_MAC_ADDR_STR, + p2p_get_frame_type_str(tx_frame_info), + QDF_MAC_ADDR_ARRAY(mac_to)); + + if ((tx_frame_info->public_action_type == + P2P_PUBLIC_ACTION_PROV_DIS_REQ) && + (p2p_soc_obj->connection_status == P2P_NOT_ACTIVE)) { + p2p_soc_obj->connection_status = P2P_GO_NEG_PROCESS; + p2p_info("[P2P State]Inactive state to GO negotiation progress state"); + } else if ((tx_frame_info->public_action_type == + P2P_PUBLIC_ACTION_NEG_CNF) && + (p2p_soc_obj->connection_status == + P2P_GO_NEG_PROCESS)) { + p2p_soc_obj->connection_status = P2P_GO_NEG_COMPLETED; + p2p_info("[P2P State]GO nego progress to GO nego completed state"); + } + + return QDF_STATUS_SUCCESS; +} + +/** + * p2p_rx_update_connection_status() - Update P2P connection status + * with rx frame + * @p2p_soc_obj: P2P soc private object + * @rx_frame_info: frame information + * @mac_from: Pointer to source MAC address + * + * This function updates P2P connection status with rx frame. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +static QDF_STATUS p2p_rx_update_connection_status( + struct p2p_soc_priv_obj *p2p_soc_obj, + struct p2p_frame_info *rx_frame_info, + uint8_t *mac_from) +{ + if (!p2p_soc_obj || !rx_frame_info || !mac_from) { + p2p_err("invalid p2p_soc_obj:%pK or rx_frame_info:%pK, mac_from:%pK", + p2p_soc_obj, rx_frame_info, mac_from); + return QDF_STATUS_E_INVAL; + } + + if (rx_frame_info->public_action_type != + P2P_PUBLIC_ACTION_NOT_SUPPORT) + p2p_info("%s <--- OTA from " QDF_MAC_ADDR_STR, + p2p_get_frame_type_str(rx_frame_info), + QDF_MAC_ADDR_ARRAY(mac_from)); + + if ((rx_frame_info->public_action_type == + P2P_PUBLIC_ACTION_PROV_DIS_REQ) && + (p2p_soc_obj->connection_status == P2P_NOT_ACTIVE)) { + p2p_soc_obj->connection_status = P2P_GO_NEG_PROCESS; + p2p_info("[P2P State]Inactive state to GO negotiation progress state"); + } else if ((rx_frame_info->public_action_type == + P2P_PUBLIC_ACTION_NEG_CNF) && + (p2p_soc_obj->connection_status == + P2P_GO_NEG_PROCESS)) { + p2p_soc_obj->connection_status = P2P_GO_NEG_COMPLETED; + p2p_info("[P2P State]GO negotiation progress to GO negotiation completed state"); + } else if ((rx_frame_info->public_action_type == + P2P_PUBLIC_ACTION_INVIT_REQ) && + (p2p_soc_obj->connection_status == P2P_NOT_ACTIVE)) { + p2p_soc_obj->connection_status = P2P_GO_NEG_COMPLETED; + p2p_info("[P2P State]Inactive state to GO negotiation completed state Autonomous GO formation"); + } + + return QDF_STATUS_SUCCESS; +} +#else +static QDF_STATUS p2p_tx_update_connection_status( + struct p2p_soc_priv_obj *p2p_soc_obj, + struct p2p_frame_info *tx_frame_info, + uint8_t *mac_to) +{ + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS p2p_rx_update_connection_status( + struct p2p_soc_priv_obj *p2p_soc_obj, + struct p2p_frame_info *rx_frame_info, + uint8_t *mac_from) +{ + return QDF_STATUS_SUCCESS; +} +#endif + +/** + * p2p_packet_alloc() - allocate qdf nbuf + * @size: buffe size + * @data: pointer to qdf nbuf data point + * @ppPacket: pointer to qdf nbuf point + * + * This function allocates qdf nbuf. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +static QDF_STATUS p2p_packet_alloc(uint16_t size, void **data, + qdf_nbuf_t *ppPacket) +{ + QDF_STATUS status = QDF_STATUS_E_FAILURE; + qdf_nbuf_t nbuf; + + nbuf = qdf_nbuf_alloc(NULL, + roundup(size + P2P_TX_PKT_MIN_HEADROOM, 4), + P2P_TX_PKT_MIN_HEADROOM, sizeof(uint32_t), + false); + + if (nbuf != NULL) { + qdf_nbuf_put_tail(nbuf, size); + qdf_nbuf_set_protocol(nbuf, ETH_P_CONTROL); + *ppPacket = nbuf; + *data = qdf_nbuf_data(nbuf); + qdf_mem_set(*data, size, 0); + status = QDF_STATUS_SUCCESS; + } + + return status; +} + +/** + * p2p_send_tx_conf() - send tx confirm + * @tx_ctx: tx context + * @status: tx status + * + * This function send tx confirm to osif + * + * Return: QDF_STATUS_SUCCESS - pointer to tx context + */ +static QDF_STATUS p2p_send_tx_conf(struct tx_action_context *tx_ctx, + bool status) +{ + struct p2p_tx_cnf tx_cnf; + struct p2p_soc_priv_obj *p2p_soc_obj; + struct p2p_start_param *start_param; + + p2p_soc_obj = tx_ctx->p2p_soc_obj; + + if (!p2p_soc_obj || !(p2p_soc_obj->start_param)) { + p2p_err("Invalid p2p soc object or start parameters"); + return QDF_STATUS_E_INVAL; + } + + start_param = p2p_soc_obj->start_param; + if (!(start_param->tx_cnf_cb)) { + p2p_err("no tx confirm callback"); + return QDF_STATUS_E_INVAL; + } + + if (tx_ctx->no_ack) + tx_cnf.action_cookie = 0; + else + tx_cnf.action_cookie = (uint64_t)tx_ctx->id; + + tx_cnf.vdev_id = tx_ctx->vdev_id; + tx_cnf.buf = tx_ctx->buf; + tx_cnf.buf_len = tx_ctx->buf_len; + tx_cnf.status = status ? 0 : 1; + + p2p_debug("soc:%pK, vdev_id:%d, action_cookie:%llx, len:%d, status:%d, buf:%pK", + p2p_soc_obj->soc, tx_cnf.vdev_id, + tx_cnf.action_cookie, tx_cnf.buf_len, + tx_cnf.status, tx_cnf.buf); + + p2p_rand_mac_tx_done(p2p_soc_obj->soc, tx_ctx); + + start_param->tx_cnf_cb(start_param->tx_cnf_cb_data, &tx_cnf); + + return QDF_STATUS_SUCCESS; +} + +/** + * p2p_mgmt_tx() - call mgmt tx api + * @tx_ctx: tx context + * @buf_len: buffer length + * @packet: pointer to qdf nbuf + * @frame: pointer to qdf nbuf data + * + * This function call mgmt tx api to tx this action frame. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +static QDF_STATUS p2p_mgmt_tx(struct tx_action_context *tx_ctx, + uint32_t buf_len, qdf_nbuf_t packet, uint8_t *frame) +{ + QDF_STATUS status; + mgmt_tx_download_comp_cb tx_comp_cb; + mgmt_ota_comp_cb tx_ota_comp_cb; + struct wlan_frame_hdr *wh; + struct wlan_objmgr_peer *peer; + struct wmi_mgmt_params mgmt_param = { 0 }; + struct wlan_objmgr_psoc *psoc; + void *mac_addr; + uint8_t pdev_id; + struct wlan_objmgr_vdev *vdev; + uint16_t chanfreq = 0; + + psoc = tx_ctx->p2p_soc_obj->soc; + mgmt_param.tx_frame = packet; + mgmt_param.frm_len = buf_len; + mgmt_param.vdev_id = tx_ctx->vdev_id; + mgmt_param.pdata = frame; + if (tx_ctx->chan) + chanfreq = (uint16_t)wlan_chan_to_freq(tx_ctx->chan); + mgmt_param.chanfreq = chanfreq; + + mgmt_param.qdf_ctx = wlan_psoc_get_qdf_dev(psoc); + if (!(mgmt_param.qdf_ctx)) { + p2p_err("qdf ctx is null"); + return QDF_STATUS_E_INVAL; + } + + wh = (struct wlan_frame_hdr *)frame; + mac_addr = wh->i_addr1; + pdev_id = wlan_get_pdev_id_from_vdev_id(psoc, tx_ctx->vdev_id, + WLAN_P2P_ID); + peer = wlan_objmgr_get_peer(psoc, pdev_id, mac_addr, WLAN_P2P_ID); + if (!peer) { + mac_addr = wh->i_addr2; + peer = wlan_objmgr_get_peer(psoc, pdev_id, mac_addr, + WLAN_P2P_ID); + } + if (!peer && tx_ctx->rand_mac_tx) { + vdev = wlan_objmgr_get_vdev_by_id_from_psoc( + psoc, tx_ctx->vdev_id, WLAN_P2P_ID); + if (vdev) { + mac_addr = wlan_vdev_mlme_get_macaddr(vdev); + peer = wlan_objmgr_get_peer(psoc, pdev_id, mac_addr, + WLAN_P2P_ID); + wlan_objmgr_vdev_release_ref(vdev, WLAN_P2P_ID); + } + } + + if (!peer) { + p2p_err("no valid peer"); + return QDF_STATUS_E_INVAL; + } + + if (tx_ctx->no_ack) { + tx_comp_cb = tgt_p2p_mgmt_download_comp_cb; + tx_ota_comp_cb = NULL; + } else { + tx_comp_cb = NULL; + tx_ota_comp_cb = tgt_p2p_mgmt_ota_comp_cb; + } + + p2p_debug("length:%d, vdev_id:%d, chanfreq:%d, no_ack:%d", + mgmt_param.frm_len, mgmt_param.vdev_id, + mgmt_param.chanfreq, tx_ctx->no_ack); + + tx_ctx->nbuf = packet; + + status = wlan_mgmt_txrx_mgmt_frame_tx(peer, tx_ctx->p2p_soc_obj, + packet, tx_comp_cb, tx_ota_comp_cb, + WLAN_UMAC_COMP_P2P, &mgmt_param); + + wlan_objmgr_peer_release_ref(peer, WLAN_P2P_ID); + + return status; +} + +/** + * p2p_roc_req_for_tx_action() - new a roc request for tx + * @tx_ctx: tx context + * + * This function new a roc request for tx and call roc api to process + * this new roc request. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +static QDF_STATUS p2p_roc_req_for_tx_action( + struct tx_action_context *tx_ctx) +{ + struct p2p_soc_priv_obj *p2p_soc_obj; + struct p2p_roc_context *roc_ctx; + QDF_STATUS status; + + roc_ctx = qdf_mem_malloc(sizeof(struct p2p_roc_context)); + if (!roc_ctx) { + p2p_err("Failed to allocate p2p roc context"); + return QDF_STATUS_E_NOMEM; + } + + p2p_soc_obj = tx_ctx->p2p_soc_obj; + roc_ctx->p2p_soc_obj = p2p_soc_obj; + roc_ctx->vdev_id = tx_ctx->vdev_id; + roc_ctx->chan = tx_ctx->chan; + roc_ctx->duration = tx_ctx->duration; + roc_ctx->roc_state = ROC_STATE_IDLE; + roc_ctx->roc_type = OFF_CHANNEL_TX; + roc_ctx->tx_ctx = tx_ctx; + roc_ctx->id = tx_ctx->id; + tx_ctx->roc_cookie = (uintptr_t)roc_ctx; + + p2p_debug("create roc request for off channel tx, tx ctx:%pK, roc ctx:%pK", + tx_ctx, roc_ctx); + + status = p2p_process_roc_req(roc_ctx); + if (status != QDF_STATUS_SUCCESS) { + p2p_err("request roc for tx action frrame fail"); + return status; + } + + status = qdf_list_insert_back(&p2p_soc_obj->tx_q_roc, + &tx_ctx->node); + if (status != QDF_STATUS_SUCCESS) + p2p_err("Failed to insert off chan tx context to wait roc req queue"); + + return status; +} + +/** + * p2p_find_tx_ctx() - find tx context by cookie + * @p2p_soc_obj: p2p soc object + * @cookie: cookie to this p2p tx context + * @is_roc_q: it is in waiting for roc queue + * @is_ack_q: it is in waiting for ack queue + * + * This function finds out tx context by cookie. + * + * Return: pointer to tx context + */ +static struct tx_action_context *p2p_find_tx_ctx( + struct p2p_soc_priv_obj *p2p_soc_obj, uint64_t cookie, + bool *is_roc_q, bool *is_ack_q) +{ + struct tx_action_context *cur_tx_ctx; + qdf_list_node_t *p_node; + QDF_STATUS status; + *is_roc_q = false; + *is_ack_q = false; + + p2p_debug("Start to find tx ctx, p2p soc_obj:%pK, cookie:%llx", + p2p_soc_obj, cookie); + + status = qdf_list_peek_front(&p2p_soc_obj->tx_q_roc, &p_node); + while (QDF_IS_STATUS_SUCCESS(status)) { + cur_tx_ctx = qdf_container_of(p_node, + struct tx_action_context, node); + if ((uintptr_t) cur_tx_ctx == cookie) { + *is_roc_q = true; + p2p_debug("find tx ctx, cookie:%llx", cookie); + return cur_tx_ctx; + } + status = qdf_list_peek_next(&p2p_soc_obj->tx_q_roc, + p_node, &p_node); + } + + status = qdf_list_peek_front(&p2p_soc_obj->tx_q_ack, &p_node); + while (QDF_IS_STATUS_SUCCESS(status)) { + cur_tx_ctx = qdf_container_of(p_node, + struct tx_action_context, node); + if ((uintptr_t) cur_tx_ctx == cookie) { + *is_ack_q = true; + p2p_debug("find tx ctx, cookie:%llx", cookie); + return cur_tx_ctx; + } + status = qdf_list_peek_next(&p2p_soc_obj->tx_q_ack, + p_node, &p_node); + } + + return NULL; +} + +/** + * p2p_find_tx_ctx_by_roc() - find tx context by roc + * @p2p_soc_obj: p2p soc object + * @cookie: cookie to roc context + * + * This function finds out tx context by roc context. + * + * Return: pointer to tx context + */ +static struct tx_action_context *p2p_find_tx_ctx_by_roc( + struct p2p_soc_priv_obj *p2p_soc_obj, uint64_t cookie) +{ + struct tx_action_context *cur_tx_ctx; + qdf_list_node_t *p_node; + QDF_STATUS status; + + p2p_debug("Start to find tx ctx, p2p soc_obj:%pK, cookie:%llx", + p2p_soc_obj, cookie); + + status = qdf_list_peek_front(&p2p_soc_obj->tx_q_roc, &p_node); + while (QDF_IS_STATUS_SUCCESS(status)) { + cur_tx_ctx = qdf_container_of(p_node, + struct tx_action_context, node); + if (cur_tx_ctx->roc_cookie == cookie) { + p2p_debug("find tx ctx, cookie:%llx", cookie); + return cur_tx_ctx; + } + status = qdf_list_peek_next(&p2p_soc_obj->tx_q_roc, + p_node, &p_node); + } + + return NULL; +} + +/** + * p2p_move_tx_context_to_ack_queue() - move tx context to tx_q_ack + * @tx_ctx: tx context + * + * This function moves tx context to waiting for ack queue. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +static QDF_STATUS p2p_move_tx_context_to_ack_queue( + struct tx_action_context *tx_ctx) +{ + bool is_roc_q = false; + bool is_ack_q = false; + struct p2p_soc_priv_obj *p2p_soc_obj = tx_ctx->p2p_soc_obj; + struct tx_action_context *cur_tx_ctx; + QDF_STATUS status; + + p2p_debug("move tx context to wait for roc queue, %pK", tx_ctx); + + cur_tx_ctx = p2p_find_tx_ctx(p2p_soc_obj, (uintptr_t)tx_ctx, + &is_roc_q, &is_ack_q); + if (cur_tx_ctx) { + if (is_roc_q) { + p2p_debug("find in wait for roc queue"); + status = qdf_list_remove_node( + &p2p_soc_obj->tx_q_roc, + (qdf_list_node_t *)tx_ctx); + if (status != QDF_STATUS_SUCCESS) + p2p_err("Failed to remove off chan tx context from wait roc req queue"); + } + + if (is_ack_q) { + p2p_debug("Already in waiting for ack queue"); + return QDF_STATUS_SUCCESS; + } + } + + status = qdf_list_insert_back( + &p2p_soc_obj->tx_q_ack, + &tx_ctx->node); + if (status != QDF_STATUS_SUCCESS) + p2p_err("Failed to insert off chan tx context to wait ack req queue"); + p2p_debug("insert tx context to wait for ack queue, status:%d", + status); + + return status; +} + +/** + * p2p_extend_roc_timer() - extend roc timer + * @p2p_soc_obj: p2p soc private object + * @frame_info: pointer to frame information + * + * This function extends roc timer for some of p2p public action frame. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +static QDF_STATUS p2p_extend_roc_timer( + struct p2p_soc_priv_obj *p2p_soc_obj, + struct p2p_frame_info *frame_info) +{ + struct p2p_roc_context *curr_roc_ctx; + uint32_t extend_time; + + curr_roc_ctx = p2p_find_current_roc_ctx(p2p_soc_obj); + if (!curr_roc_ctx) { + p2p_debug("no running roc request currently"); + return QDF_STATUS_SUCCESS; + } + + if (!frame_info) { + p2p_err("invalid frame information"); + return QDF_STATUS_E_INVAL; + } + + switch (frame_info->public_action_type) { + case P2P_PUBLIC_ACTION_NEG_REQ: + case P2P_PUBLIC_ACTION_NEG_RSP: + extend_time = 2 * P2P_ACTION_FRAME_DEFAULT_WAIT; + break; + case P2P_PUBLIC_ACTION_INVIT_REQ: + case P2P_PUBLIC_ACTION_DEV_DIS_REQ: + extend_time = P2P_ACTION_FRAME_DEFAULT_WAIT; + break; + default: + extend_time = 0; + break; + } + + if (extend_time) { + p2p_debug("extend roc timer, duration:%d", extend_time); + curr_roc_ctx->duration = extend_time; + return p2p_restart_roc_timer(curr_roc_ctx); + } + + return QDF_STATUS_SUCCESS; +} + +/** + * p2p_adjust_tx_wait() - adjust tx wait + * @tx_ctx: tx context + * + * This function adjust wait time of this tx context + * + * Return: None + */ +static void p2p_adjust_tx_wait(struct tx_action_context *tx_ctx) +{ + struct p2p_frame_info *frame_info; + + frame_info = &(tx_ctx->frame_info); + switch (frame_info->public_action_type) { + case P2P_PUBLIC_ACTION_NEG_RSP: + case P2P_PUBLIC_ACTION_PROV_DIS_RSP: + tx_ctx->duration += P2P_ACTION_FRAME_RSP_WAIT; + break; + case P2P_PUBLIC_ACTION_NEG_CNF: + case P2P_PUBLIC_ACTION_INVIT_RSP: + tx_ctx->duration += P2P_ACTION_FRAME_ACK_WAIT; + break; + default: + break; + } +} + +/** + * p2p_remove_tx_context() - remove tx ctx from queue + * @tx_ctx: tx context + * + * This function remove tx context from waiting for roc queue or + * waiting for ack queue. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +static QDF_STATUS p2p_remove_tx_context( + struct tx_action_context *tx_ctx) +{ + bool is_roc_q = false; + bool is_ack_q = false; + struct tx_action_context *cur_tx_ctx; + uint64_t cookie = (uintptr_t)tx_ctx; + struct p2p_soc_priv_obj *p2p_soc_obj = tx_ctx->p2p_soc_obj; + QDF_STATUS status = QDF_STATUS_E_FAILURE; + + p2p_debug("tx context:%pK", tx_ctx); + + cur_tx_ctx = p2p_find_tx_ctx(p2p_soc_obj, cookie, &is_roc_q, + &is_ack_q); + + /* for not off channel tx case, won't find from queue */ + if (!cur_tx_ctx) { + p2p_debug("Do not find tx context from queue"); + goto end; + } + + if (is_roc_q) { + status = qdf_list_remove_node( + &p2p_soc_obj->tx_q_roc, + (qdf_list_node_t *)cur_tx_ctx); + if (status != QDF_STATUS_SUCCESS) + p2p_err("Failed to tx context from wait roc req queue"); + } + + if (is_ack_q) { + status = qdf_list_remove_node( + &p2p_soc_obj->tx_q_ack, + (qdf_list_node_t *)cur_tx_ctx); + if (status != QDF_STATUS_SUCCESS) + p2p_err("Failed to tx context from wait ack req queue"); + } + +end: + if (!tx_ctx->roc_cookie) + qdf_idr_remove(&p2p_soc_obj->p2p_idr, tx_ctx->id); + qdf_mem_free(tx_ctx->buf); + qdf_mem_free(tx_ctx); + + return status; +} + +/** + * p2p_tx_timeout() - Callback for tx timeout + * @pdata: pointer to tx context + * + * This function is callback for tx time out. + * + * Return: None + */ +static void p2p_tx_timeout(void *pdata) +{ + struct tx_action_context *tx_ctx = pdata; + + p2p_info("pdata:%pK", pdata); + + if (!tx_ctx || !(tx_ctx->p2p_soc_obj)) { + p2p_err("invalid tx context or p2p soc object"); + return; + } + + qdf_mc_timer_destroy(&tx_ctx->tx_timer); + p2p_send_tx_conf(tx_ctx, false); + p2p_remove_tx_context(tx_ctx); +} + +/** + * p2p_enable_tx_timer() - enable tx timer + * @tx_ctx: tx context + * + * This function enable tx timer for action frame required ota tx. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +static QDF_STATUS p2p_enable_tx_timer(struct tx_action_context *tx_ctx) +{ + QDF_STATUS status; + + p2p_debug("tx context:%pK", tx_ctx); + + status = qdf_mc_timer_init(&tx_ctx->tx_timer, + QDF_TIMER_TYPE_SW, p2p_tx_timeout, + tx_ctx); + if (status != QDF_STATUS_SUCCESS) { + p2p_err("failed to init tx timer"); + return status; + } + + status = qdf_mc_timer_start(&tx_ctx->tx_timer, + P2P_ACTION_FRAME_TX_TIMEOUT); + if (status != QDF_STATUS_SUCCESS) + p2p_err("tx timer start failed"); + + return status; +} + +/** + * p2p_disable_tx_timer() - disable tx timer + * @tx_ctx: tx context + * + * This function disable tx timer for action frame required ota tx. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +static QDF_STATUS p2p_disable_tx_timer(struct tx_action_context *tx_ctx) +{ + QDF_STATUS status; + + p2p_debug("tx context:%pK", tx_ctx); + + status = qdf_mc_timer_stop(&tx_ctx->tx_timer); + if (status != QDF_STATUS_SUCCESS) + p2p_err("Failed to stop tx timer, status:%d", status); + + status = qdf_mc_timer_destroy(&tx_ctx->tx_timer); + if (status != QDF_STATUS_SUCCESS) + p2p_err("Failed to destroy tx timer, status:%d", status); + + return status; +} + +/** + * is_rmf_mgmt_action_frame() - check RMF action frame by category + * @action_category: action frame actegory + * + * This function check the frame is robust mgmt action frame or not + * + * Return: true - if category is robust mgmt type + */ +static bool is_rmf_mgmt_action_frame(uint8_t action_category) +{ + switch (action_category) { + case ACTION_CATEGORY_SPECTRUM_MGMT: + case ACTION_CATEGORY_QOS: + case ACTION_CATEGORY_DLS: + case ACTION_CATEGORY_BACK: + case ACTION_CATEGORY_RRM: + case ACTION_FAST_BSS_TRNST: + case ACTION_CATEGORY_SA_QUERY: + case ACTION_CATEGORY_PROTECTED_DUAL_OF_PUBLIC_ACTION: + case ACTION_CATEGORY_WNM: + case ACTION_CATEGORY_MESH_ACTION: + case ACTION_CATEGORY_MULTIHOP_ACTION: + case ACTION_CATEGORY_DMG: + case ACTION_CATEGORY_FST: + case ACTION_CATEGORY_VENDOR_SPECIFIC_PROTECTED: + return true; + default: + break; + } + return false; +} + +/** + * p2p_populate_rmf_field() - populate unicast rmf frame + * @tx_ctx: tx_action_context + * @size: input size of frame, and output new size + * @ppbuf: input frame ptr, and output new frame + * @ppkt: input pkt, output new pkt. + * + * This function allocates new pkt for rmf frame. The + * new frame has extra space for ccmp field. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +static QDF_STATUS p2p_populate_rmf_field(struct tx_action_context *tx_ctx, + uint32_t *size, uint8_t **ppbuf, qdf_nbuf_t *ppkt) +{ + struct wlan_frame_hdr *wh, *rmf_wh; + struct action_frm_hdr *action_hdr; + QDF_STATUS status = QDF_STATUS_SUCCESS; + qdf_nbuf_t pkt = NULL; + uint8_t *frame; + uint32_t frame_len; + struct p2p_soc_priv_obj *p2p_soc_obj; + + p2p_soc_obj = tx_ctx->p2p_soc_obj; + + if (tx_ctx->frame_info.sub_type != P2P_MGMT_ACTION || + !p2p_soc_obj->p2p_cb.is_mgmt_protected) + return QDF_STATUS_SUCCESS; + if (*size < (sizeof(struct wlan_frame_hdr) + + sizeof(struct action_frm_hdr))) { + return QDF_STATUS_E_INVAL; + } + + wh = (struct wlan_frame_hdr *)(*ppbuf); + action_hdr = (struct action_frm_hdr *)(*ppbuf + sizeof(*wh)); + + if (!is_rmf_mgmt_action_frame(action_hdr->action_category)) { + p2p_debug("non rmf act frame 0x%x cat %x", + tx_ctx->frame_info.sub_type, + action_hdr->action_category); + return QDF_STATUS_SUCCESS; + } + + if (!p2p_soc_obj->p2p_cb.is_mgmt_protected( + tx_ctx->vdev_id, wh->i_addr1)) { + p2p_debug("non rmf connection vdev %d "QDF_MAC_ADDR_STR, + tx_ctx->vdev_id, QDF_MAC_ADDR_ARRAY(wh->i_addr1)); + return QDF_STATUS_SUCCESS; + } + if (!qdf_is_macaddr_group((struct qdf_mac_addr *)wh->i_addr1) && + !qdf_is_macaddr_broadcast((struct qdf_mac_addr *)wh->i_addr1)) { + uint8_t mic_len, mic_hdr_len, pdev_id; + + pdev_id = + wlan_get_pdev_id_from_vdev_id(tx_ctx->p2p_soc_obj->soc, + tx_ctx->vdev_id, + WLAN_P2P_ID); + status = mlme_get_peer_mic_len(p2p_soc_obj->soc, pdev_id, + wh->i_addr1, &mic_len, + &mic_hdr_len); + if (QDF_IS_STATUS_ERROR(status)) { + p2p_err("Failed to get peer mic length."); + return status; + } + + frame_len = *size + mic_hdr_len + mic_len; + status = p2p_packet_alloc((uint16_t)frame_len, (void **)&frame, + &pkt); + if (status != QDF_STATUS_SUCCESS) { + p2p_err("Failed to allocate %d bytes for rmf frame.", + frame_len); + return QDF_STATUS_E_NOMEM; + } + + qdf_mem_copy(frame, wh, sizeof(*wh)); + qdf_mem_copy(frame + sizeof(*wh) + mic_hdr_len, + *ppbuf + sizeof(*wh), + *size - sizeof(*wh)); + rmf_wh = (struct wlan_frame_hdr *)frame; + (rmf_wh)->i_fc[1] |= IEEE80211_FC1_WEP; + p2p_debug("set protection 0x%x cat %d "QDF_MAC_ADDR_STR, + tx_ctx->frame_info.sub_type, + action_hdr->action_category, + QDF_MAC_ADDR_ARRAY(wh->i_addr1)); + + qdf_nbuf_free(*ppkt); + *ppbuf = frame; + *ppkt = pkt; + *size = frame_len; + } + + return status; +} + +/** + * p2p_execute_tx_action_frame() - execute tx action frame + * @tx_ctx: tx context + * + * This function modify p2p ie and tx this action frame. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +static QDF_STATUS p2p_execute_tx_action_frame( + struct tx_action_context *tx_ctx) +{ + uint8_t *frame; + qdf_nbuf_t packet; + QDF_STATUS status; + uint8_t noa_len = 0; + uint8_t noa_stream[P2P_NOA_STREAM_ARR_SIZE]; + uint8_t orig_len = 0; + const uint8_t *ie; + uint8_t ie_len; + uint8_t *p2p_ie = NULL; + const uint8_t *presence_noa_attr = NULL; + uint32_t nbytes_copy; + uint32_t buf_len = tx_ctx->buf_len; + struct p2p_frame_info *frame_info; + + frame_info = &(tx_ctx->frame_info); + if (frame_info->sub_type == P2P_MGMT_PROBE_RSP) { + p2p_ie = (uint8_t *)p2p_get_p2pie_from_probe_rsp(tx_ctx); + } else if (frame_info->action_type == + P2P_ACTION_PRESENCE_RSP) { + ie = tx_ctx->buf + + P2P_PUBLIC_ACTION_FRAME_TYPE_OFFSET; + ie_len = tx_ctx->buf_len - + P2P_PUBLIC_ACTION_FRAME_TYPE_OFFSET; + p2p_ie = (uint8_t *)p2p_get_p2pie_ptr(ie, ie_len); + if (p2p_ie) { + /* extract the presence of NoA attribute inside + * P2P IE */ + ie = p2p_ie + P2P_IE_HEADER_LEN; + ie_len = p2p_ie[1]; + presence_noa_attr = p2p_get_presence_noa_attr( + ie, ie_len); + } + } + + if ((frame_info->sub_type != P2P_MGMT_NOT_SUPPORT) && + p2p_ie) { + orig_len = p2p_ie[1]; + noa_len = p2p_update_noa_stream(tx_ctx, p2p_ie, + presence_noa_attr, &buf_len, + noa_stream); + buf_len += noa_len; + } + + if (frame_info->sub_type == P2P_MGMT_PROBE_RSP) + p2p_set_ht_caps(tx_ctx, buf_len); + + /* Ok-- try to allocate some memory: */ + status = p2p_packet_alloc((uint16_t) buf_len, (void **)&frame, + &packet); + if (status != QDF_STATUS_SUCCESS) { + p2p_err("Failed to allocate %d bytes for a Probe Request.", + buf_len); + return status; + } + + /* + * Add sequence number to action frames + * Frames are handed over in .11 format by supplicant already + */ + p2p_populate_mac_header(tx_ctx); + + if ((noa_len > 0) && p2p_ie + && (noa_len < (P2P_MAX_NOA_ATTR_LEN + + P2P_IE_HEADER_LEN))) { + /* Add 2 bytes for length and Arribute field */ + nbytes_copy = (p2p_ie + orig_len + 2) - tx_ctx->buf; + qdf_mem_copy(frame, tx_ctx->buf, nbytes_copy); + qdf_mem_copy((frame + nbytes_copy), noa_stream, + noa_len); + qdf_mem_copy((frame + nbytes_copy + noa_len), + tx_ctx->buf + nbytes_copy, + buf_len - nbytes_copy - noa_len); + } else { + qdf_mem_copy(frame, tx_ctx->buf, buf_len); + } + + status = p2p_populate_rmf_field(tx_ctx, &buf_len, &frame, &packet); + if (status != QDF_STATUS_SUCCESS) { + p2p_err("failed to populate rmf frame"); + qdf_nbuf_free(packet); + return status; + } + status = p2p_mgmt_tx(tx_ctx, buf_len, packet, frame); + if (status == QDF_STATUS_SUCCESS) { + if (tx_ctx->no_ack) { + p2p_send_tx_conf(tx_ctx, true); + p2p_remove_tx_context(tx_ctx); + } else { + p2p_enable_tx_timer(tx_ctx); + p2p_move_tx_context_to_ack_queue(tx_ctx); + } + } else { + p2p_err("failed to tx mgmt frame"); + qdf_nbuf_free(packet); + } + + return status; +} + +struct tx_action_context *p2p_find_tx_ctx_by_nbuf( + struct p2p_soc_priv_obj *p2p_soc_obj, void *nbuf) +{ + struct tx_action_context *cur_tx_ctx; + qdf_list_node_t *p_node; + QDF_STATUS status; + + if (!p2p_soc_obj) { + p2p_err("invalid p2p soc object"); + return NULL; + } + + status = qdf_list_peek_front(&p2p_soc_obj->tx_q_ack, &p_node); + while (QDF_IS_STATUS_SUCCESS(status)) { + cur_tx_ctx = + qdf_container_of(p_node, struct tx_action_context, node); + if (cur_tx_ctx->nbuf == nbuf) { + p2p_debug("find tx ctx, nbuf:%pK", nbuf); + status = qdf_mc_timer_stop(&cur_tx_ctx->tx_timer); + if (status != QDF_STATUS_SUCCESS) + p2p_err("Failed to stop tx timer, status:%d", + status); + return cur_tx_ctx; + } + status = qdf_list_peek_next(&p2p_soc_obj->tx_q_ack, + p_node, &p_node); + } + + return NULL; +} + +void p2p_dump_tx_queue(struct p2p_soc_priv_obj *p2p_soc_obj) +{ + struct tx_action_context *tx_ctx; + qdf_list_node_t *p_node; + QDF_STATUS status; + + p2p_debug("dump tx queue wait for roc, p2p soc obj:%pK, size:%d", + p2p_soc_obj, qdf_list_size(&p2p_soc_obj->tx_q_roc)); + + status = qdf_list_peek_front(&p2p_soc_obj->tx_q_roc, &p_node); + while (QDF_IS_STATUS_SUCCESS(status)) { + tx_ctx = qdf_container_of(p_node, + struct tx_action_context, node); + p2p_debug("p2p soc object:%pK, tx ctx:%pK, vdev_id:%d, scan_id:%d, roc_cookie:%llx, chan:%d, buf:%pK, len:%d, off_chan:%d, cck:%d, ack:%d, duration:%d", + p2p_soc_obj, tx_ctx, + tx_ctx->vdev_id, tx_ctx->scan_id, + tx_ctx->roc_cookie, tx_ctx->chan, + tx_ctx->buf, tx_ctx->buf_len, + tx_ctx->off_chan, tx_ctx->no_cck, + tx_ctx->no_ack, tx_ctx->duration); + + status = qdf_list_peek_next(&p2p_soc_obj->tx_q_roc, + p_node, &p_node); + } + + p2p_debug("dump tx queue wait for ack, size:%d", + qdf_list_size(&p2p_soc_obj->tx_q_ack)); + status = qdf_list_peek_front(&p2p_soc_obj->tx_q_ack, &p_node); + while (QDF_IS_STATUS_SUCCESS(status)) { + tx_ctx = qdf_container_of(p_node, + struct tx_action_context, node); + p2p_debug("p2p soc object:%pK, tx_ctx:%pK, vdev_id:%d, scan_id:%d, roc_cookie:%llx, chan:%d, buf:%pK, len:%d, off_chan:%d, cck:%d, ack:%d, duration:%d", + p2p_soc_obj, tx_ctx, + tx_ctx->vdev_id, tx_ctx->scan_id, + tx_ctx->roc_cookie, tx_ctx->chan, + tx_ctx->buf, tx_ctx->buf_len, + tx_ctx->off_chan, tx_ctx->no_cck, + tx_ctx->no_ack, tx_ctx->duration); + + status = qdf_list_peek_next(&p2p_soc_obj->tx_q_ack, + p_node, &p_node); + } +} + +QDF_STATUS p2p_ready_to_tx_frame(struct p2p_soc_priv_obj *p2p_soc_obj, + uint64_t cookie) +{ + struct tx_action_context *cur_tx_ctx; + QDF_STATUS status = QDF_STATUS_SUCCESS; + + cur_tx_ctx = p2p_find_tx_ctx_by_roc(p2p_soc_obj, cookie); + + while (cur_tx_ctx) { + p2p_debug("tx_ctx:%pK", cur_tx_ctx); + status = p2p_execute_tx_action_frame(cur_tx_ctx); + if (status != QDF_STATUS_SUCCESS) { + p2p_send_tx_conf(cur_tx_ctx, false); + p2p_remove_tx_context(cur_tx_ctx); + } + cur_tx_ctx = p2p_find_tx_ctx_by_roc(p2p_soc_obj, cookie); + } + + return status; +} + +QDF_STATUS p2p_cleanup_tx_sync( + struct p2p_soc_priv_obj *p2p_soc_obj, + struct wlan_objmgr_vdev *vdev) +{ + struct scheduler_msg msg = {0}; + struct p2p_cleanup_param *param; + QDF_STATUS status; + uint32_t vdev_id; + + if (!p2p_soc_obj) { + p2p_err("p2p soc context is NULL"); + return QDF_STATUS_E_FAILURE; + } + + p2p_debug("p2p_soc_obj:%pK, vdev:%pK", p2p_soc_obj, vdev); + param = qdf_mem_malloc(sizeof(*param)); + if (!param) { + p2p_err("failed to allocate cleanup param"); + return QDF_STATUS_E_NOMEM; + } + + param->p2p_soc_obj = p2p_soc_obj; + if (vdev) + vdev_id = (uint32_t)wlan_vdev_get_id(vdev); + else + vdev_id = P2P_INVALID_VDEV_ID; + param->vdev_id = vdev_id; + qdf_event_reset(&p2p_soc_obj->cleanup_tx_done); + msg.type = P2P_CLEANUP_TX; + msg.bodyptr = param; + msg.callback = p2p_process_cmd; + status = scheduler_post_message(QDF_MODULE_ID_P2P, + QDF_MODULE_ID_P2P, + QDF_MODULE_ID_OS_IF, &msg); + if (status != QDF_STATUS_SUCCESS) { + p2p_err("failed to post message"); + qdf_mem_free(param); + return status; + } + + status = qdf_wait_single_event( + &p2p_soc_obj->cleanup_tx_done, + P2P_WAIT_CLEANUP_ROC); + + if (status != QDF_STATUS_SUCCESS) + p2p_err("wait for cleanup tx timeout, %d", status); + + return status; +} + +QDF_STATUS p2p_process_cleanup_tx_queue(struct p2p_cleanup_param *param) +{ + struct tx_action_context *curr_tx_ctx; + qdf_list_node_t *p_node; + struct p2p_soc_priv_obj *p2p_soc_obj; + uint32_t vdev_id; + QDF_STATUS status, ret; + + if (!param || !(param->p2p_soc_obj)) { + p2p_err("Invalid cleanup param"); + return QDF_STATUS_E_FAILURE; + } + + p2p_soc_obj = param->p2p_soc_obj; + vdev_id = param->vdev_id; + + p2p_debug("clean up tx queue wait for roc, size:%d, vdev_id:%d", + qdf_list_size(&p2p_soc_obj->tx_q_roc), vdev_id); + + status = qdf_list_peek_front(&p2p_soc_obj->tx_q_roc, &p_node); + while (QDF_IS_STATUS_SUCCESS(status)) { + curr_tx_ctx = qdf_container_of(p_node, + struct tx_action_context, node); + status = qdf_list_peek_next(&p2p_soc_obj->tx_q_roc, + p_node, &p_node); + if ((vdev_id == P2P_INVALID_VDEV_ID) || + (vdev_id == curr_tx_ctx->vdev_id)) { + ret = qdf_list_remove_node(&p2p_soc_obj->tx_q_roc, + &curr_tx_ctx->node); + if (ret == QDF_STATUS_SUCCESS) { + p2p_send_tx_conf(curr_tx_ctx, false); + qdf_mem_free(curr_tx_ctx->buf); + qdf_mem_free(curr_tx_ctx); + } else + p2p_err("remove %pK from roc_q fail", + curr_tx_ctx); + } + } + + p2p_debug("clean up tx queue wait for ack, size:%d", + qdf_list_size(&p2p_soc_obj->tx_q_ack)); + + status = qdf_list_peek_front(&p2p_soc_obj->tx_q_ack, &p_node); + while (QDF_IS_STATUS_SUCCESS(status)) { + curr_tx_ctx = qdf_container_of(p_node, + struct tx_action_context, node); + status = qdf_list_peek_next(&p2p_soc_obj->tx_q_ack, + p_node, &p_node); + if ((vdev_id == P2P_INVALID_VDEV_ID) || + (vdev_id == curr_tx_ctx->vdev_id)) { + ret = qdf_list_remove_node(&p2p_soc_obj->tx_q_ack, + &curr_tx_ctx->node); + if (ret == QDF_STATUS_SUCCESS) { + p2p_disable_tx_timer(curr_tx_ctx); + p2p_send_tx_conf(curr_tx_ctx, false); + qdf_mem_free(curr_tx_ctx->buf); + qdf_mem_free(curr_tx_ctx); + } else + p2p_err("remove %pK from roc_q fail", + curr_tx_ctx); + } + } + + qdf_event_set(&p2p_soc_obj->cleanup_tx_done); + + return QDF_STATUS_SUCCESS; +} + +bool p2p_check_random_mac(struct wlan_objmgr_psoc *soc, uint32_t vdev_id, + uint8_t *random_mac_addr) +{ + uint32_t i = 0; + struct p2p_vdev_priv_obj *p2p_vdev_obj; + struct wlan_objmgr_vdev *vdev; + + vdev = wlan_objmgr_get_vdev_by_id_from_psoc(soc, vdev_id, WLAN_P2P_ID); + if (!vdev) { + p2p_debug("vdev is null"); + return false; + } + + p2p_vdev_obj = wlan_objmgr_vdev_get_comp_private_obj( + vdev, WLAN_UMAC_COMP_P2P); + if (!p2p_vdev_obj) { + wlan_objmgr_vdev_release_ref(vdev, WLAN_P2P_ID); + p2p_debug("p2p vdev object is NULL"); + return false; + } + + qdf_spin_lock(&p2p_vdev_obj->random_mac_lock); + for (i = 0; i < MAX_RANDOM_MAC_ADDRS; i++) { + if ((p2p_vdev_obj->random_mac[i].in_use) && + (!qdf_mem_cmp(p2p_vdev_obj->random_mac[i].addr, + random_mac_addr, QDF_MAC_ADDR_SIZE))) { + qdf_spin_unlock(&p2p_vdev_obj->random_mac_lock); + wlan_objmgr_vdev_release_ref(vdev, WLAN_P2P_ID); + return true; + } + } + qdf_spin_unlock(&p2p_vdev_obj->random_mac_lock); + wlan_objmgr_vdev_release_ref(vdev, WLAN_P2P_ID); + + return false; +} + +/** + * find_action_frame_cookie() - Checks for action cookie in cookie list + * @cookie_list: List of cookies + * @rnd_cookie: Cookie to be searched + * + * Return: If search is successful return pointer to action_frame_cookie + * object in which cookie item is encapsulated. + */ +static struct action_frame_cookie * +find_action_frame_cookie(qdf_list_t *cookie_list, uint64_t rnd_cookie) +{ + struct action_frame_cookie *action_cookie; + + qdf_list_for_each(cookie_list, action_cookie, cookie_node) { + if (action_cookie->cookie == rnd_cookie) + return action_cookie; + } + + return NULL; +} + +/** + * allocate_action_frame_cookie() - Allocate and add action cookie to + * given list + * @cookie_list: List of cookies + * @rnd_cookie: Cookie to be added + * + * Return: If allocation and addition is successful return pointer to + * action_frame_cookie object in which cookie item is encapsulated. + */ +static struct action_frame_cookie * +allocate_action_frame_cookie(qdf_list_t *cookie_list, uint64_t rnd_cookie) +{ + struct action_frame_cookie *action_cookie; + + action_cookie = qdf_mem_malloc(sizeof(*action_cookie)); + if (!action_cookie) + return NULL; + + action_cookie->cookie = rnd_cookie; + qdf_list_insert_front(cookie_list, &action_cookie->cookie_node); + + return action_cookie; +} + +/** + * delete_action_frame_cookie() - Delete the cookie from given list + * @cookie_list: List of cookies + * @action_cookie: Cookie to be deleted + * + * This function deletes the cookie item from given list and corresponding + * object in which it is encapsulated. + * + * Return: None + */ +static void +delete_action_frame_cookie(qdf_list_t *cookie_list, + struct action_frame_cookie *action_cookie) +{ + qdf_list_remove_node(cookie_list, &action_cookie->cookie_node); + qdf_mem_free(action_cookie); +} + +/** + * append_action_frame_cookie() - Append action cookie to given list + * @cookie_list: List of cookies + * @rnd_cookie: Cookie to be append + * + * This is a wrapper function which invokes allocate_action_frame_cookie + * if the cookie to be added is not duplicate + * + * Return: true - for successful case + * false - failed. + */ +static bool +append_action_frame_cookie(qdf_list_t *cookie_list, uint64_t rnd_cookie) +{ + struct action_frame_cookie *action_cookie; + + /* + * There should be no mac entry with empty cookie list, + * check and ignore if duplicate + */ + action_cookie = find_action_frame_cookie(cookie_list, rnd_cookie); + if (action_cookie) + /* random mac address is already programmed */ + return true; + + /* insert new cookie in cookie list */ + action_cookie = allocate_action_frame_cookie(cookie_list, rnd_cookie); + if (!action_cookie) + return false; + + return true; +} + +/** + * p2p_add_random_mac() - add or append random mac to given vdev rand mac list + * @soc: soc object + * @vdev_id: vdev id + * @mac: mac addr to be added or append + * @freq: frequency + * @rnd_cookie: random mac mgmt tx cookie + * + * This function will add or append the mac addr entry to vdev random mac list. + * Once the mac addr filter is not needed, it can be removed by + * p2p_del_random_mac. + * + * Return: QDF_STATUS_E_EXISTS - append to existing list + * QDF_STATUS_SUCCESS - add a new entry. + * other : failed to add the mac address entry. + */ +static QDF_STATUS +p2p_add_random_mac(struct wlan_objmgr_psoc *soc, uint32_t vdev_id, + uint8_t *mac, uint32_t freq, uint64_t rnd_cookie) +{ + uint32_t i; + uint32_t first_unused = MAX_RANDOM_MAC_ADDRS; + struct action_frame_cookie *action_cookie; + int32_t append_ret; + struct p2p_vdev_priv_obj *p2p_vdev_obj; + struct wlan_objmgr_vdev *vdev; + + p2p_debug("random_mac:vdev %d mac_addr:%pM rnd_cookie=%llu freq = %u", + vdev_id, mac, rnd_cookie, freq); + + vdev = wlan_objmgr_get_vdev_by_id_from_psoc(soc, vdev_id, WLAN_P2P_ID); + if (!vdev) { + p2p_debug("vdev is null"); + + return QDF_STATUS_E_INVAL; + } + + p2p_vdev_obj = wlan_objmgr_vdev_get_comp_private_obj( + vdev, WLAN_UMAC_COMP_P2P); + if (!p2p_vdev_obj) { + p2p_debug("random_mac:p2p vdev object is NULL"); + wlan_objmgr_vdev_release_ref(vdev, WLAN_P2P_ID); + + return QDF_STATUS_E_INVAL; + } + + qdf_spin_lock(&p2p_vdev_obj->random_mac_lock); + /* + * Following loop checks whether random mac entry is already + * present, if present get the index of matched entry else + * get the first unused slot to store this new random mac + */ + for (i = 0; i < MAX_RANDOM_MAC_ADDRS; i++) { + if (!p2p_vdev_obj->random_mac[i].in_use) { + if (first_unused == MAX_RANDOM_MAC_ADDRS) + first_unused = i; + continue; + } + + if (!qdf_mem_cmp(p2p_vdev_obj->random_mac[i].addr, mac, + QDF_MAC_ADDR_SIZE)) + break; + } + + if (i != MAX_RANDOM_MAC_ADDRS) { + append_ret = append_action_frame_cookie( + &p2p_vdev_obj->random_mac[i].cookie_list, + rnd_cookie); + qdf_spin_unlock(&p2p_vdev_obj->random_mac_lock); + wlan_objmgr_vdev_release_ref(vdev, WLAN_P2P_ID); + p2p_debug("random_mac:append %d vdev %d freq %d %pM rnd_cookie %llu", + append_ret, vdev_id, freq, mac, rnd_cookie); + if (!append_ret) { + p2p_debug("random_mac:failed to append rnd_cookie"); + return QDF_STATUS_E_NOMEM; + } + + return QDF_STATUS_E_EXISTS; + } + + if (first_unused == MAX_RANDOM_MAC_ADDRS) { + qdf_spin_unlock(&p2p_vdev_obj->random_mac_lock); + wlan_objmgr_vdev_release_ref(vdev, WLAN_P2P_ID); + p2p_debug("random_mac:Reached the limit of Max random addresses"); + + return QDF_STATUS_E_RESOURCES; + } + + /* get the first unused buf and store new random mac */ + i = first_unused; + + action_cookie = allocate_action_frame_cookie( + &p2p_vdev_obj->random_mac[i].cookie_list, + rnd_cookie); + if (!action_cookie) { + qdf_spin_unlock(&p2p_vdev_obj->random_mac_lock); + wlan_objmgr_vdev_release_ref(vdev, WLAN_P2P_ID); + p2p_err("random_mac:failed to alloc rnd cookie"); + + return QDF_STATUS_E_NOMEM; + } + qdf_mem_copy(p2p_vdev_obj->random_mac[i].addr, mac, QDF_MAC_ADDR_SIZE); + p2p_vdev_obj->random_mac[i].in_use = true; + p2p_vdev_obj->random_mac[i].freq = freq; + qdf_spin_unlock(&p2p_vdev_obj->random_mac_lock); + wlan_objmgr_vdev_release_ref(vdev, WLAN_P2P_ID); + p2p_debug("random_mac:add vdev %d freq %d %pM rnd_cookie %llu", + vdev_id, freq, mac, rnd_cookie); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS +p2p_del_random_mac(struct wlan_objmgr_psoc *soc, uint32_t vdev_id, + uint64_t rnd_cookie, uint32_t duration) +{ + uint32_t i; + struct action_frame_cookie *action_cookie; + struct p2p_vdev_priv_obj *p2p_vdev_obj; + struct wlan_objmgr_vdev *vdev; + + p2p_debug("random_mac:vdev %d cookie %llu duration %d", vdev_id, + rnd_cookie, duration); + vdev = wlan_objmgr_get_vdev_by_id_from_psoc(soc, vdev_id, + WLAN_P2P_ID); + if (!vdev) { + p2p_debug("vdev is null"); + return QDF_STATUS_E_INVAL; + } + p2p_vdev_obj = wlan_objmgr_vdev_get_comp_private_obj( + vdev, WLAN_UMAC_COMP_P2P); + if (!p2p_vdev_obj) { + wlan_objmgr_vdev_release_ref(vdev, WLAN_P2P_ID); + p2p_debug("p2p vdev object is NULL"); + return QDF_STATUS_E_INVAL; + } + + qdf_spin_lock(&p2p_vdev_obj->random_mac_lock); + for (i = 0; i < MAX_RANDOM_MAC_ADDRS; i++) { + struct action_frame_random_mac *random_mac; + + random_mac = &p2p_vdev_obj->random_mac[i]; + if (!random_mac->in_use) + continue; + + action_cookie = find_action_frame_cookie( + &random_mac->cookie_list, rnd_cookie); + if (!action_cookie) + continue; + + delete_action_frame_cookie( + &random_mac->cookie_list, + action_cookie); + + if (qdf_list_empty(&random_mac->cookie_list)) { + qdf_spin_unlock(&p2p_vdev_obj->random_mac_lock); + if (qdf_mc_timer_get_current_state( + &random_mac->clear_timer) == + QDF_TIMER_STATE_RUNNING) + qdf_mc_timer_stop(&random_mac->clear_timer); + qdf_mc_timer_start(&random_mac->clear_timer, duration); + + qdf_spin_lock(&p2p_vdev_obj->random_mac_lock); + p2p_debug("random_mac:noref on vdev %d addr %pM", + vdev_id, random_mac->addr); + } + break; + } + qdf_spin_unlock(&p2p_vdev_obj->random_mac_lock); + wlan_objmgr_vdev_release_ref(vdev, WLAN_P2P_ID); + + return QDF_STATUS_SUCCESS; +} + +void p2p_del_all_rand_mac_vdev(struct wlan_objmgr_vdev *vdev) +{ + int32_t i; + uint32_t freq; + uint8_t addr[QDF_MAC_ADDR_SIZE]; + struct p2p_vdev_priv_obj *p2p_vdev_obj; + + if (!vdev) + return; + p2p_vdev_obj = wlan_objmgr_vdev_get_comp_private_obj( + vdev, WLAN_UMAC_COMP_P2P); + if (!p2p_vdev_obj) + return; + + qdf_spin_lock(&p2p_vdev_obj->random_mac_lock); + for (i = 0; i < MAX_RANDOM_MAC_ADDRS; i++) { + struct action_frame_cookie *action_cookie; + struct action_frame_cookie *action_cookie_next; + + if (!p2p_vdev_obj->random_mac[i].in_use) + continue; + + /* empty the list and clear random addr */ + qdf_list_for_each_del(&p2p_vdev_obj->random_mac[i].cookie_list, + action_cookie, action_cookie_next, + cookie_node) { + qdf_list_remove_node( + &p2p_vdev_obj->random_mac[i].cookie_list, + &action_cookie->cookie_node); + qdf_mem_free(action_cookie); + } + + p2p_vdev_obj->random_mac[i].in_use = false; + freq = p2p_vdev_obj->random_mac[i].freq; + qdf_mem_copy(addr, p2p_vdev_obj->random_mac[i].addr, + QDF_MAC_ADDR_SIZE); + qdf_spin_unlock(&p2p_vdev_obj->random_mac_lock); + qdf_mc_timer_stop(&p2p_vdev_obj->random_mac[i].clear_timer); + p2p_clear_mac_filter(wlan_vdev_get_psoc(vdev), + wlan_vdev_get_id(vdev), addr, freq); + p2p_debug("random_mac:delall vdev %d freq %d addr %pM", + wlan_vdev_get_id(vdev), freq, addr); + + qdf_spin_lock(&p2p_vdev_obj->random_mac_lock); + } + qdf_spin_unlock(&p2p_vdev_obj->random_mac_lock); +} + +static void +p2p_del_rand_mac_vdev_enum_handler(struct wlan_objmgr_psoc *psoc, + void *obj, void *arg) +{ + struct wlan_objmgr_vdev *vdev = obj; + + if (!vdev) { + p2p_err("random_mac:invalid vdev"); + return; + } + + if (!p2p_is_vdev_support_rand_mac(vdev)) + return; + + p2p_del_all_rand_mac_vdev(vdev); +} + +void p2p_del_all_rand_mac_soc(struct wlan_objmgr_psoc *soc) +{ + if (!soc) { + p2p_err("random_mac:soc object is NULL"); + return; + } + + wlan_objmgr_iterate_obj_list(soc, WLAN_VDEV_OP, + p2p_del_rand_mac_vdev_enum_handler, + NULL, 0, WLAN_P2P_ID); +} + +/** + * p2p_is_random_mac() - check mac addr is random mac for vdev + * @soc: soc object + * @vdev_id: vdev id + * @mac: mac addr to be added or append + * + * This function will check the source mac addr same as vdev's mac addr or not. + * If not same, then the source mac addr should be random mac addr. + * + * Return: true if mac is random mac, otherwise false + */ +static bool +p2p_is_random_mac(struct wlan_objmgr_psoc *soc, uint32_t vdev_id, uint8_t *mac) +{ + bool ret = false; + struct wlan_objmgr_vdev *vdev; + + vdev = wlan_objmgr_get_vdev_by_id_from_psoc(soc, vdev_id, WLAN_P2P_ID); + if (!vdev) { + p2p_debug("random_mac:vdev is null"); + return false; + } + + if (qdf_mem_cmp(wlan_vdev_mlme_get_macaddr(vdev), + mac, QDF_MAC_ADDR_SIZE)) + ret = true; + wlan_objmgr_vdev_release_ref(vdev, WLAN_P2P_ID); + + return ret; +} + +static void p2p_set_mac_filter_callback(bool result, void *context) +{ + struct osif_request *request; + struct random_mac_priv *priv; + + p2p_debug("random_mac:set random mac filter result %d", result); + request = osif_request_get(context); + if (!request) { + p2p_err("random_mac:invalid response"); + return; + } + + priv = osif_request_priv(request); + priv->result = result; + + osif_request_complete(request); + osif_request_put(request); +} + +QDF_STATUS p2p_process_set_rand_mac_rsp(struct p2p_mac_filter_rsp *resp) +{ + struct wlan_objmgr_psoc *soc; + struct p2p_vdev_priv_obj *p2p_vdev_obj; + struct wlan_objmgr_vdev *vdev; + + if (!resp || !resp->p2p_soc_obj || !resp->p2p_soc_obj->soc) { + p2p_debug("random_mac:set_filter_req is null"); + return QDF_STATUS_E_INVAL; + } + p2p_debug("random_mac:process rsp on vdev %d status %d", resp->vdev_id, + resp->status); + soc = resp->p2p_soc_obj->soc; + vdev = wlan_objmgr_get_vdev_by_id_from_psoc(soc, resp->vdev_id, + WLAN_P2P_ID); + if (!vdev) { + p2p_debug("random_mac:vdev is null vdev %d", resp->vdev_id); + return QDF_STATUS_E_INVAL; + } + + p2p_vdev_obj = wlan_objmgr_vdev_get_comp_private_obj( + vdev, WLAN_UMAC_COMP_P2P); + if (!p2p_vdev_obj) { + wlan_objmgr_vdev_release_ref(vdev, WLAN_P2P_ID); + p2p_debug("random_mac:p2p_vdev_obj is null vdev %d", + resp->vdev_id); + return QDF_STATUS_E_INVAL; + } + if (!p2p_vdev_obj->pending_req.soc) { + wlan_objmgr_vdev_release_ref(vdev, WLAN_P2P_ID); + p2p_debug("random_mac:no pending set req for vdev %d", + resp->vdev_id); + return QDF_STATUS_E_INVAL; + } + + p2p_debug("random_mac:get pending req on vdev %d set %d mac filter %pM freq %d", + p2p_vdev_obj->pending_req.vdev_id, + p2p_vdev_obj->pending_req.set, p2p_vdev_obj->pending_req.mac, + p2p_vdev_obj->pending_req.freq); + if (p2p_vdev_obj->pending_req.cb) + p2p_vdev_obj->pending_req.cb( + !!resp->status, p2p_vdev_obj->pending_req.req_cookie); + + qdf_mem_zero(&p2p_vdev_obj->pending_req, + sizeof(p2p_vdev_obj->pending_req)); + wlan_objmgr_vdev_release_ref(vdev, WLAN_P2P_ID); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS +p2p_process_set_rand_mac(struct p2p_set_mac_filter_req *set_filter_req) +{ + struct wlan_objmgr_psoc *soc; + struct wlan_lmac_if_p2p_tx_ops *p2p_ops; + QDF_STATUS status = QDF_STATUS_E_FAILURE; + struct p2p_set_mac_filter param; + struct p2p_vdev_priv_obj *p2p_vdev_obj; + struct wlan_objmgr_vdev *vdev; + + if (!set_filter_req || !set_filter_req->soc) { + p2p_debug("random_mac:set_filter_req is null"); + return QDF_STATUS_E_INVAL; + } + p2p_debug("random_mac:vdev %d set %d mac filter %pM freq %d", + set_filter_req->vdev_id, set_filter_req->set, + set_filter_req->mac, set_filter_req->freq); + + soc = set_filter_req->soc; + vdev = wlan_objmgr_get_vdev_by_id_from_psoc( + soc, set_filter_req->vdev_id, WLAN_P2P_ID); + if (!vdev) { + p2p_debug("random_mac:vdev is null vdev %d", + set_filter_req->vdev_id); + goto get_vdev_failed; + } + p2p_vdev_obj = wlan_objmgr_vdev_get_comp_private_obj( + vdev, WLAN_UMAC_COMP_P2P); + if (!p2p_vdev_obj) { + p2p_debug("random_mac:p2p_vdev_obj is null vdev %d", + set_filter_req->vdev_id); + goto get_p2p_obj_failed; + } + if (p2p_vdev_obj->pending_req.soc) { + p2p_debug("random_mac:Busy on vdev %d set %d mac filter %pM freq %d", + p2p_vdev_obj->pending_req.vdev_id, + p2p_vdev_obj->pending_req.set, + p2p_vdev_obj->pending_req.mac, + p2p_vdev_obj->pending_req.freq); + goto get_p2p_obj_failed; + } + + p2p_ops = p2p_psoc_get_tx_ops(soc); + if (p2p_ops && p2p_ops->set_mac_addr_rx_filter_cmd) { + qdf_mem_zero(¶m, sizeof(param)); + param.vdev_id = set_filter_req->vdev_id; + qdf_mem_copy(param.mac, set_filter_req->mac, + QDF_MAC_ADDR_SIZE); + param.freq = set_filter_req->freq; + param.set = set_filter_req->set; + status = p2p_ops->set_mac_addr_rx_filter_cmd(soc, ¶m); + if (status == QDF_STATUS_SUCCESS && set_filter_req->set) + qdf_mem_copy(&p2p_vdev_obj->pending_req, + set_filter_req, sizeof(*set_filter_req)); + p2p_debug("random_mac:p2p set mac addr rx filter, status:%d", + status); + } + +get_p2p_obj_failed: + wlan_objmgr_vdev_release_ref(vdev, WLAN_P2P_ID); + +get_vdev_failed: + if (status != QDF_STATUS_SUCCESS && + set_filter_req->cb) + set_filter_req->cb(false, set_filter_req->req_cookie); + + return status; +} + +/** + * p2p_set_mac_filter() - send set mac addr filter cmd + * @soc: soc + * @vdev_id: vdev id + * @mac: mac addr + * @freq: freq + * @set: set or clear + * @cb: callback func to be called when the request completed. + * @req_cookie: cookie to be returned + * + * This function send set random mac addr filter command to p2p component + * msg core + * + * Return: QDF_STATUS_SUCCESS - if sent successfully. + * otherwise : failed. + */ +static QDF_STATUS +p2p_set_mac_filter(struct wlan_objmgr_psoc *soc, uint32_t vdev_id, + uint8_t *mac, uint32_t freq, bool set, + p2p_request_mgr_callback_t cb, void *req_cookie) +{ + struct p2p_set_mac_filter_req *set_filter_req; + struct scheduler_msg msg = {0}; + QDF_STATUS status; + + p2p_debug("random_mac:vdev %d freq %d set %d %pM", + vdev_id, freq, set, mac); + + set_filter_req = qdf_mem_malloc(sizeof(*set_filter_req)); + if (!set_filter_req) + return QDF_STATUS_E_NOMEM; + + set_filter_req->soc = soc; + set_filter_req->vdev_id = vdev_id; + set_filter_req->freq = freq; + qdf_mem_copy(set_filter_req->mac, mac, QDF_MAC_ADDR_SIZE); + set_filter_req->set = set; + set_filter_req->cb = cb; + set_filter_req->req_cookie = req_cookie; + + msg.type = P2P_SET_RANDOM_MAC; + msg.bodyptr = set_filter_req; + msg.callback = p2p_process_cmd; + status = scheduler_post_msg(QDF_MODULE_ID_OS_IF, &msg); + if (status != QDF_STATUS_SUCCESS) + qdf_mem_free(set_filter_req); + + return status; +} + +QDF_STATUS +p2p_clear_mac_filter(struct wlan_objmgr_psoc *soc, uint32_t vdev_id, + uint8_t *mac, uint32_t freq) +{ + return p2p_set_mac_filter(soc, vdev_id, mac, freq, false, NULL, NULL); +} + +bool +p2p_is_vdev_support_rand_mac(struct wlan_objmgr_vdev *vdev) +{ + enum QDF_OPMODE mode; + + mode = wlan_vdev_mlme_get_opmode(vdev); + if (mode == QDF_STA_MODE || + mode == QDF_P2P_CLIENT_MODE || + mode == QDF_P2P_DEVICE_MODE) + return true; + return false; +} + +/** + * p2p_is_vdev_support_rand_mac_by_id() - check vdev type support random mac + * mgmt tx or not + * @soc: soc obj + * @vdev_id: vdev id + * + * Return: true: support random mac mgmt tx + * false: not support random mac mgmt tx. + */ +static bool +p2p_is_vdev_support_rand_mac_by_id(struct wlan_objmgr_psoc *soc, + uint32_t vdev_id) +{ + struct wlan_objmgr_vdev *vdev; + bool ret = false; + + vdev = wlan_objmgr_get_vdev_by_id_from_psoc(soc, vdev_id, WLAN_P2P_ID); + if (!vdev) + return false; + ret = p2p_is_vdev_support_rand_mac(vdev); + wlan_objmgr_vdev_release_ref(vdev, WLAN_P2P_ID); + + return ret; +} + +/** + * p2p_set_rand_mac() - set random mac address rx filter + * @soc: soc + * @vdev_id: vdev id + * @mac: mac addr + * @freq: freq + * @rnd_cookie: cookie to be returned + * + * This function will post msg to p2p core to set random mac addr rx filter. + * It will wait the respone and return the result to caller. + * + * Return: true: set successfully + * false: failed + */ +static bool +p2p_set_rand_mac(struct wlan_objmgr_psoc *soc, uint32_t vdev_id, + uint8_t *mac, uint32_t freq, uint64_t rnd_cookie) +{ + bool ret = false; + int err; + QDF_STATUS status; + struct osif_request *request; + static const struct osif_request_params params = { + .priv_size = sizeof(struct random_mac_priv), + .timeout_ms = WLAN_WAIT_TIME_SET_RND, + }; + void *req_cookie; + struct random_mac_priv *priv; + + request = osif_request_alloc(¶ms); + if (!request) { + p2p_err("Request allocation failure"); + return false; + } + + req_cookie = osif_request_cookie(request); + + status = p2p_set_mac_filter(soc, vdev_id, mac, freq, true, + p2p_set_mac_filter_callback, req_cookie); + if (status != QDF_STATUS_SUCCESS) { + p2p_err("random_mac:set mac fitler failure %d", status); + } else { + err = osif_request_wait_for_response(request); + if (err) { + p2p_err("random_mac:timeout for set mac fitler %d", + err); + } else { + priv = osif_request_priv(request); + ret = priv->result; + p2p_debug("random_mac:vdev %d freq %d result %d %pM rnd_cookie %llu", + vdev_id, freq, priv->result, mac, rnd_cookie); + } + } + osif_request_put(request); + + return ret; +} + +/** + * p2p_request_random_mac() - request random mac mgmt tx + * @soc: soc + * @vdev_id: vdev id + * @mac: mac addr + * @freq: freq + * @rnd_cookie: cookie to be returned + * @duration: duration of tx timeout + * + * This function will add/append the random mac addr filter entry to vdev. + * If it is new added entry, it will request to set filter in target. + * + * Return: QDF_STATUS_SUCCESS: request successfully + * other: failed + */ +static QDF_STATUS +p2p_request_random_mac(struct wlan_objmgr_psoc *soc, uint32_t vdev_id, + uint8_t *mac, uint32_t freq, uint64_t rnd_cookie, + uint32_t duration) +{ + QDF_STATUS status; + + status = p2p_add_random_mac(soc, vdev_id, mac, freq, rnd_cookie); + if (status == QDF_STATUS_E_EXISTS) + return QDF_STATUS_SUCCESS; + else if (status != QDF_STATUS_SUCCESS) + return status; + + if (!p2p_set_rand_mac(soc, vdev_id, mac, freq, rnd_cookie)) + status = p2p_del_random_mac(soc, vdev_id, rnd_cookie, + duration); + + return status; +} + +void p2p_rand_mac_tx(struct tx_action_context *tx_action) +{ + struct wlan_objmgr_psoc *soc; + QDF_STATUS status; + + if (!tx_action || !tx_action->p2p_soc_obj || + !tx_action->p2p_soc_obj->soc) + return; + soc = tx_action->p2p_soc_obj->soc; + + if (!tx_action->no_ack && tx_action->chan && + tx_action->buf_len > MIN_MAC_HEADER_LEN && + p2p_is_vdev_support_rand_mac_by_id(soc, tx_action->vdev_id) && + p2p_is_random_mac(soc, tx_action->vdev_id, + &tx_action->buf[SRC_MAC_ADDR_OFFSET])) { + status = p2p_request_random_mac( + soc, tx_action->vdev_id, + &tx_action->buf[SRC_MAC_ADDR_OFFSET], + wlan_chan_to_freq(tx_action->chan), + tx_action->id, + tx_action->duration); + if (status == QDF_STATUS_SUCCESS) + tx_action->rand_mac_tx = true; + else + tx_action->rand_mac_tx = false; + } +} + +void +p2p_rand_mac_tx_done(struct wlan_objmgr_psoc *soc, + struct tx_action_context *tx_ctx) +{ + if (!tx_ctx || !tx_ctx->rand_mac_tx || !soc) + return; + + p2p_del_random_mac(soc, tx_ctx->vdev_id, tx_ctx->id, tx_ctx->duration); +} + +/** + * p2p_mac_clear_timeout() - clear random mac filter timeout + * @context: timer context + * + * This function will clear the mac addr rx filter in target if no + * reference to it. + * + * Return: void + */ +static void p2p_mac_clear_timeout(void *context) +{ + struct action_frame_random_mac *random_mac = context; + struct p2p_vdev_priv_obj *p2p_vdev_obj; + uint32_t freq; + uint8_t addr[QDF_MAC_ADDR_SIZE]; + uint32_t vdev_id; + bool clear = false; + + if (!random_mac || !random_mac->p2p_vdev_obj) { + p2p_err("invalid context for mac_clear timeout"); + return; + } + p2p_vdev_obj = random_mac->p2p_vdev_obj; + if (!p2p_vdev_obj || !p2p_vdev_obj->vdev) + return; + + qdf_spin_lock(&p2p_vdev_obj->random_mac_lock); + if (qdf_list_empty(&random_mac->cookie_list)) { + random_mac->in_use = false; + clear = true; + } + freq = random_mac->freq; + qdf_mem_copy(addr, random_mac->addr, QDF_MAC_ADDR_SIZE); + qdf_spin_unlock(&p2p_vdev_obj->random_mac_lock); + + vdev_id = wlan_vdev_get_id(p2p_vdev_obj->vdev); + p2p_debug("random_mac:clear timeout vdev %d %pM freq %d clr %d", + vdev_id, addr, freq, clear); + if (clear) + p2p_clear_mac_filter(wlan_vdev_get_psoc(p2p_vdev_obj->vdev), + vdev_id, addr, freq); +} + +void p2p_init_random_mac_vdev(struct p2p_vdev_priv_obj *p2p_vdev_obj) +{ + int32_t i; + + qdf_spinlock_create(&p2p_vdev_obj->random_mac_lock); + for (i = 0; i < MAX_RANDOM_MAC_ADDRS; i++) { + qdf_mem_zero(&p2p_vdev_obj->random_mac[i], + sizeof(struct action_frame_random_mac)); + p2p_vdev_obj->random_mac[i].in_use = false; + p2p_vdev_obj->random_mac[i].p2p_vdev_obj = p2p_vdev_obj; + qdf_list_create(&p2p_vdev_obj->random_mac[i].cookie_list, 0); + qdf_mc_timer_init(&p2p_vdev_obj->random_mac[i].clear_timer, + QDF_TIMER_TYPE_SW, p2p_mac_clear_timeout, + &p2p_vdev_obj->random_mac[i]); + } +} + +void p2p_deinit_random_mac_vdev(struct p2p_vdev_priv_obj *p2p_vdev_obj) +{ + int32_t i; + + p2p_del_all_rand_mac_vdev(p2p_vdev_obj->vdev); + for (i = 0; i < MAX_RANDOM_MAC_ADDRS; i++) { + qdf_mc_timer_destroy(&p2p_vdev_obj->random_mac[i].clear_timer); + qdf_list_destroy(&p2p_vdev_obj->random_mac[i].cookie_list); + } + qdf_spinlock_destroy(&p2p_vdev_obj->random_mac_lock); +} + +QDF_STATUS p2p_process_mgmt_tx(struct tx_action_context *tx_ctx) +{ + struct p2p_soc_priv_obj *p2p_soc_obj; + struct p2p_roc_context *curr_roc_ctx; + uint8_t *mac_to; + QDF_STATUS status; + + status = p2p_tx_context_check_valid(tx_ctx); + if (status != QDF_STATUS_SUCCESS) { + p2p_err("invalid tx action context"); + if (tx_ctx) { + if (tx_ctx->buf) { + p2p_send_tx_conf(tx_ctx, false); + qdf_mem_free(tx_ctx->buf); + } + qdf_mem_free(tx_ctx); + } + return QDF_STATUS_E_INVAL; + } + + p2p_soc_obj = tx_ctx->p2p_soc_obj; + + p2p_debug("soc:%pK, tx_ctx:%pK, vdev_id:%d, scan_id:%d, roc_cookie:%llx, chan:%d, buf:%pK, len:%d, off_chan:%d, cck:%d, ack:%d, duration:%d", + p2p_soc_obj->soc, tx_ctx, tx_ctx->vdev_id, + tx_ctx->scan_id, tx_ctx->roc_cookie, tx_ctx->chan, + tx_ctx->buf, tx_ctx->buf_len, tx_ctx->off_chan, + tx_ctx->no_cck, tx_ctx->no_ack, tx_ctx->duration); + + status = p2p_get_frame_info(tx_ctx->buf, tx_ctx->buf_len, + &(tx_ctx->frame_info)); + if (status != QDF_STATUS_SUCCESS) { + p2p_err("unsupport frame"); + status = QDF_STATUS_E_INVAL; + goto fail; + } + + /* update P2P connection status with tx frame info */ + mac_to = &(tx_ctx->buf[DST_MAC_ADDR_OFFSET]); + p2p_tx_update_connection_status(p2p_soc_obj, + &(tx_ctx->frame_info), mac_to); + + status = p2p_vdev_check_valid(tx_ctx); + if (status != QDF_STATUS_SUCCESS) { + p2p_debug("invalid vdev or vdev mode"); + status = QDF_STATUS_E_INVAL; + goto fail; + } + + /* Do not wait for ack for probe response */ + if (tx_ctx->frame_info.sub_type == P2P_MGMT_PROBE_RSP && + !(tx_ctx->no_ack)) { + p2p_debug("Force set no ack to 1"); + tx_ctx->no_ack = 1; + } + + if (!tx_ctx->off_chan || !tx_ctx->chan) { + if (!tx_ctx->chan) + p2p_check_and_update_channel(tx_ctx); + status = p2p_execute_tx_action_frame(tx_ctx); + if (status != QDF_STATUS_SUCCESS) { + p2p_err("execute tx fail"); + goto fail; + } else + return QDF_STATUS_SUCCESS; + } + + /* For off channel tx case */ + curr_roc_ctx = p2p_find_current_roc_ctx(p2p_soc_obj); + if (curr_roc_ctx && (curr_roc_ctx->chan == tx_ctx->chan)) { + if ((curr_roc_ctx->roc_state == ROC_STATE_REQUESTED) || + (curr_roc_ctx->roc_state == ROC_STATE_STARTED)) { + tx_ctx->roc_cookie = (uintptr_t)curr_roc_ctx; + status = qdf_list_insert_back( + &p2p_soc_obj->tx_q_roc, + &tx_ctx->node); + if (status != QDF_STATUS_SUCCESS) { + p2p_err("Failed to insert off chan tx context to wait roc req queue"); + goto fail; + } else + return QDF_STATUS_SUCCESS; + } else if (curr_roc_ctx->roc_state == ROC_STATE_ON_CHAN) { + p2p_adjust_tx_wait(tx_ctx); + status = p2p_restart_roc_timer(curr_roc_ctx); + curr_roc_ctx->tx_ctx = tx_ctx; + if (status != QDF_STATUS_SUCCESS) { + p2p_err("restart roc timer fail"); + goto fail; + } + status = p2p_execute_tx_action_frame(tx_ctx); + if (status != QDF_STATUS_SUCCESS) { + p2p_err("execute tx fail"); + goto fail; + } else + return QDF_STATUS_SUCCESS; + } + } + + curr_roc_ctx = p2p_find_roc_by_chan(p2p_soc_obj, tx_ctx->chan); + if (curr_roc_ctx && (curr_roc_ctx->roc_state == ROC_STATE_IDLE)) { + tx_ctx->roc_cookie = (uintptr_t)curr_roc_ctx; + status = qdf_list_insert_back( + &p2p_soc_obj->tx_q_roc, + &tx_ctx->node); + if (status != QDF_STATUS_SUCCESS) { + p2p_err("Failed to insert off chan tx context to wait roc req queue"); + goto fail; + } else { + return QDF_STATUS_SUCCESS; + } + } + + status = p2p_roc_req_for_tx_action(tx_ctx); + if (status != QDF_STATUS_SUCCESS) { + p2p_err("Failed to request roc before off chan tx"); + goto fail; + } + + return QDF_STATUS_SUCCESS; + +fail: + p2p_send_tx_conf(tx_ctx, false); + qdf_idr_remove(&p2p_soc_obj->p2p_idr, tx_ctx->id); + qdf_mem_free(tx_ctx->buf); + qdf_mem_free(tx_ctx); + + return status; +} + +QDF_STATUS p2p_process_mgmt_tx_cancel( + struct cancel_roc_context *cancel_tx) +{ + bool is_roc_q = false; + bool is_ack_q = false; + struct tx_action_context *cur_tx_ctx; + struct p2p_roc_context *cur_roc_ctx; + struct cancel_roc_context cancel_roc; + + if (!cancel_tx || !(cancel_tx->cookie)) { + p2p_info("invalid cancel info"); + return QDF_STATUS_SUCCESS; + } + + p2p_debug("cookie:0x%llx", cancel_tx->cookie); + + cur_tx_ctx = p2p_find_tx_ctx(cancel_tx->p2p_soc_obj, + cancel_tx->cookie, &is_roc_q, &is_ack_q); + if (cur_tx_ctx) { + if (is_roc_q) { + cancel_roc.p2p_soc_obj = + cancel_tx->p2p_soc_obj; + cancel_roc.cookie = + cur_tx_ctx->roc_cookie; + return p2p_process_cancel_roc_req(&cancel_roc); + } + if (is_ack_q) { + /*Has tx action frame, waiting for ack*/ + p2p_debug("Waiting for ack, cookie %llx", + cancel_tx->cookie); + } + } else { + p2p_debug("Failed to find tx ctx by cookie, cookie %llx", + cancel_tx->cookie); + + cur_roc_ctx = p2p_find_roc_by_tx_ctx(cancel_tx->p2p_soc_obj, + cancel_tx->cookie); + if (cur_roc_ctx) { + p2p_debug("tx ctx:%llx, roc:%pK", + cancel_tx->cookie, cur_roc_ctx); + cancel_roc.p2p_soc_obj = + cancel_tx->p2p_soc_obj; + cancel_roc.cookie = (uintptr_t) cur_roc_ctx; + return p2p_process_cancel_roc_req(&cancel_roc); + } + + p2p_debug("Failed to find roc by tx ctx"); + return QDF_STATUS_E_INVAL; + } + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS p2p_process_mgmt_tx_ack_cnf( + struct p2p_tx_conf_event *tx_cnf_event) +{ + struct p2p_tx_cnf tx_cnf; + struct tx_action_context *tx_ctx; + struct p2p_soc_priv_obj *p2p_soc_obj; + struct p2p_start_param *start_param; + + p2p_soc_obj = tx_cnf_event->p2p_soc_obj; + + if (!p2p_soc_obj || !(p2p_soc_obj->start_param)) { + qdf_nbuf_free(tx_cnf_event->nbuf); + p2p_err("Invalid p2p soc object or start parameters"); + return QDF_STATUS_E_INVAL; + } + + tx_ctx = p2p_find_tx_ctx_by_nbuf(p2p_soc_obj, tx_cnf_event->nbuf); + qdf_nbuf_free(tx_cnf_event->nbuf); + if (!tx_ctx) { + p2p_err("can't find tx_ctx, tx ack comes late"); + return QDF_STATUS_SUCCESS; + } + + tx_cnf.vdev_id = tx_ctx->vdev_id; + tx_cnf.action_cookie = (uint64_t)tx_ctx->id; + tx_cnf.buf = tx_ctx->buf; + tx_cnf.buf_len = tx_ctx->buf_len; + tx_cnf.status = tx_cnf_event->status; + + p2p_debug("soc:%pK, vdev_id:%d, action_cookie:%llx, len:%d, status:%d, buf:%pK", + p2p_soc_obj->soc, tx_cnf.vdev_id, + tx_cnf.action_cookie, tx_cnf.buf_len, + tx_cnf.status, tx_cnf.buf); + + p2p_rand_mac_tx_done(p2p_soc_obj->soc, tx_ctx); + + /* disable tx timer */ + p2p_disable_tx_timer(tx_ctx); + + start_param = p2p_soc_obj->start_param; + if (start_param->tx_cnf_cb) + start_param->tx_cnf_cb(start_param->tx_cnf_cb_data, + &tx_cnf); + else + p2p_debug("Got tx conf, but no valid up layer callback"); + + p2p_remove_tx_context(tx_ctx); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS p2p_process_rx_mgmt( + struct p2p_rx_mgmt_event *rx_mgmt_event) +{ + struct p2p_rx_mgmt_frame *rx_mgmt; + struct p2p_soc_priv_obj *p2p_soc_obj; + struct p2p_start_param *start_param; + struct p2p_frame_info frame_info; + uint8_t *mac_from; + + p2p_soc_obj = rx_mgmt_event->p2p_soc_obj; + rx_mgmt = rx_mgmt_event->rx_mgmt; + + if (!p2p_soc_obj || !(p2p_soc_obj->start_param)) { + p2p_err("Invalid psoc object or start parameters"); + qdf_mem_free(rx_mgmt); + return QDF_STATUS_E_INVAL; + } + + p2p_debug("soc:%pK, frame_len:%d, rx_chan:%d, vdev_id:%d, frm_type:%d, rx_rssi:%d, buf:%pK", + p2p_soc_obj->soc, rx_mgmt->frame_len, + rx_mgmt->rx_chan, rx_mgmt->vdev_id, rx_mgmt->frm_type, + rx_mgmt->rx_rssi, rx_mgmt->buf); + + if (rx_mgmt->frm_type == MGMT_ACTION_VENDOR_SPECIFIC) { + p2p_get_frame_info(rx_mgmt->buf, rx_mgmt->frame_len, + &frame_info); + + /* update P2P connection status with rx frame info */ + mac_from = &(rx_mgmt->buf[SRC_MAC_ADDR_OFFSET]); + p2p_rx_update_connection_status(p2p_soc_obj, + &frame_info, mac_from); + + p2p_debug("action_sub_type %u, action_type %d", + frame_info.public_action_type, + frame_info.action_type); + + if ((frame_info.public_action_type == + P2P_PUBLIC_ACTION_NOT_SUPPORT) && + (frame_info.action_type == + P2P_ACTION_NOT_SUPPORT)) { + p2p_debug("non-p2p frame, drop it"); + qdf_mem_free(rx_mgmt); + return QDF_STATUS_SUCCESS; + } else { + p2p_debug("p2p frame, extend roc accordingly"); + p2p_extend_roc_timer(p2p_soc_obj, &frame_info); + } + } + + if (rx_mgmt->frm_type == MGMT_ACTION_CATEGORY_VENDOR_SPECIFIC) + p2p_get_frame_info(rx_mgmt->buf, rx_mgmt->frame_len, + &frame_info); + + start_param = p2p_soc_obj->start_param; + if (start_param->rx_cb) + start_param->rx_cb(start_param->rx_cb_data, rx_mgmt); + else + p2p_debug("rx mgmt, but no valid up layer callback"); + + qdf_mem_free(rx_mgmt); + + return QDF_STATUS_SUCCESS; +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/p2p/core/src/wlan_p2p_off_chan_tx.h b/drivers/staging/qca-wifi-host-cmn/umac/p2p/core/src/wlan_p2p_off_chan_tx.h new file mode 100644 index 0000000000000000000000000000000000000000..367ecd632587d1eb25d0c0ab4bd2958b9f68fd22 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/p2p/core/src/wlan_p2p_off_chan_tx.h @@ -0,0 +1,439 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: Defines off channel tx API & structures + */ + +#ifndef _WLAN_P2P_OFF_CHAN_TX_H_ +#define _WLAN_P2P_OFF_CHAN_TX_H_ + +#include +#include +#include + +#define P2P_EID_VENDOR 0xdd +#define P2P_ACTION_VENDOR_SPECIFIC_CATEGORY 0x7F +#define P2P_PUBLIC_ACTION_FRAME 0x4 +#define P2P_MAC_MGMT_ACTION 0xD +#define P2P_PUBLIC_ACTION_VENDOR_SPECIFIC 0x9 +#define P2P_NOA_ATTR 0xC + +#define P2P_MAX_NOA_ATTR_LEN 31 +#define P2P_IE_HEADER_LEN 6 +#define P2P_MAX_IE_LENGTH 255 +#define P2P_ACTION_OFFSET 24 +#define P2P_PUBLIC_ACTION_FRAME_TYPE_OFFSET 30 +#define P2P_ACTION_FRAME_TYPE_OFFSET 29 +#define PROBE_RSP_IE_OFFSET 36 + +#define P2P_TX_PKT_MIN_HEADROOM (64) + +#define P2P_OUI "\x50\x6f\x9a\x09" +#define P2P_OUI_SIZE 4 + +#define P2P_ACTION_FRAME_RSP_WAIT 500 +#define P2P_ACTION_FRAME_ACK_WAIT 300 +#define P2P_ACTION_FRAME_TX_TIMEOUT 2000 + +#define DST_MAC_ADDR_OFFSET 4 +#define SRC_MAC_ADDR_OFFSET (DST_MAC_ADDR_OFFSET + QDF_MAC_ADDR_SIZE) + +#define P2P_NOA_STREAM_ARR_SIZE (P2P_MAX_NOA_ATTR_LEN + (2 * P2P_IE_HEADER_LEN)) + +#define P2P_GET_TYPE_FRM_FC(__fc__) (((__fc__) & 0x0F) >> 2) +#define P2P_GET_SUBTYPE_FRM_FC(__fc__) (((__fc__) & 0xF0) >> 4) + +#define WLAN_WAIT_TIME_SET_RND 100 + +struct p2p_soc_priv_obj; +struct cancel_roc_context; +struct p2p_tx_conf_event; +struct p2p_rx_mgmt_event; + +/** + * enum p2p_frame_type - frame type + * @P2P_FRAME_MGMT: mgmt frame + * @P2P_FRAME_NOT_SUPPORT: not support frame type + */ +enum p2p_frame_type { + P2P_FRAME_MGMT = 0, + P2P_FRAME_NOT_SUPPORT, +}; + +/** + * enum p2p_frame_sub_type - frame sub type + * @P2P_MGMT_PROBE_REQ: probe request frame + * @P2P_MGMT_PROBE_RSP: probe response frame + * @P2P_MGMT_ACTION: action frame + * @P2P_MGMT_NOT_SUPPORT: not support sub frame type + */ +enum p2p_frame_sub_type { + P2P_MGMT_PROBE_REQ = 4, + P2P_MGMT_PROBE_RSP, + P2P_MGMT_ACTION = 13, + P2P_MGMT_NOT_SUPPORT, +}; + +/** + * enum p2p_public_action_type - public action frame type + * @P2P_PUBLIC_ACTION_NEG_REQ: go negotiation request frame + * @P2P_PUBLIC_ACTION_NEG_RSP: go negotiation response frame + * @P2P_PUBLIC_ACTION_NEG_CNF: go negotiation confirm frame + * @P2P_PUBLIC_ACTION_INVIT_REQ: p2p invitation request frame + * @P2P_PUBLIC_ACTION_INVIT_RSP: p2p invitation response frame + * @P2P_PUBLIC_ACTION_DEV_DIS_REQ: device discoverability request + * @P2P_PUBLIC_ACTION_DEV_DIS_RSP: device discoverability response + * @P2P_PUBLIC_ACTION_PROV_DIS_REQ: provision discovery request + * @P2P_PUBLIC_ACTION_PROV_DIS_RSP: provision discovery response + * @P2P_PUBLIC_ACTION_GAS_INIT_REQ: gas initial request, + * @P2P_PUBLIC_ACTION_GAS_INIT_RSP: gas initial response + * @P2P_PUBLIC_ACTION_GAS_COMB_REQ: gas comeback request + * @P2P_PUBLIC_ACTION_GAS_COMB_RSP: gas comeback response + * @P2P_PUBLIC_ACTION_NOT_SUPPORT: not support p2p public action frame + */ +enum p2p_public_action_type { + P2P_PUBLIC_ACTION_NEG_REQ = 0, + P2P_PUBLIC_ACTION_NEG_RSP, + P2P_PUBLIC_ACTION_NEG_CNF, + P2P_PUBLIC_ACTION_INVIT_REQ, + P2P_PUBLIC_ACTION_INVIT_RSP, + P2P_PUBLIC_ACTION_DEV_DIS_REQ, + P2P_PUBLIC_ACTION_DEV_DIS_RSP, + P2P_PUBLIC_ACTION_PROV_DIS_REQ, + P2P_PUBLIC_ACTION_PROV_DIS_RSP, + P2P_PUBLIC_ACTION_GAS_INIT_REQ = 10, + P2P_PUBLIC_ACTION_GAS_INIT_RSP, + P2P_PUBLIC_ACTION_GAS_COMB_REQ, + P2P_PUBLIC_ACTION_GAS_COMB_RSP, + P2P_PUBLIC_ACTION_NOT_SUPPORT, +}; + +/** + * enum p2p_action_type - p2p action frame type + * @P2P_ACTION_PRESENCE_REQ: presence request frame + * @P2P_ACTION_PRESENCE_RSP: presence response frame + * @P2P_ACTION_NOT_SUPPORT: not support action frame type + */ +enum p2p_action_type { + P2P_ACTION_PRESENCE_REQ = 1, + P2P_ACTION_PRESENCE_RSP = 2, + P2P_ACTION_NOT_SUPPORT, +}; + +struct p2p_frame_info { + enum p2p_frame_type type; + enum p2p_frame_sub_type sub_type; + enum p2p_public_action_type public_action_type; + enum p2p_action_type action_type; +}; + +/** + * struct tx_action_context - tx action frame context + * @node: Node for next element in the list + * @p2p_soc_obj: Pointer to SoC global p2p private object + * @vdev_id: Vdev id on which this request has come + * @scan_id: Scan id given by scan component for this roc req + * @roc_cookie: Cookie for remain on channel request + * @id: Identifier of this tx context + * @chan: Chan for which this tx has been requested + * @buf: tx buffer + * @buf_len: Length of tx buffer + * @off_chan: Is this off channel tx + * @no_cck: Required cck or not + * @no_ack: Required ack or not + * @duration: Duration for the RoC + * @tx_timer: RoC timer + * @frame_info: Frame type information + */ +struct tx_action_context { + qdf_list_node_t node; + struct p2p_soc_priv_obj *p2p_soc_obj; + int vdev_id; + int scan_id; + uint64_t roc_cookie; + int32_t id; + uint8_t chan; + uint8_t *buf; + int buf_len; + bool off_chan; + bool no_cck; + bool no_ack; + bool rand_mac_tx; + uint32_t duration; + qdf_mc_timer_t tx_timer; + struct p2p_frame_info frame_info; + qdf_nbuf_t nbuf; +}; + +/** + * p2p_rand_mac_tx_done() - process random mac mgmt tx done + * @soc: soc + * @tx_ctx: tx context + * + * This function will remove the random mac addr filter reference. + * + * Return: void + */ +void +p2p_rand_mac_tx_done(struct wlan_objmgr_psoc *soc, + struct tx_action_context *tx_ctx); + +/** + * p2p_clear_mac_filter() - send clear mac addr filter cmd + * @soc: soc + * @vdev_id: vdev id + * @mac: mac addr + * @freq: freq + * + * This function send clear random mac addr filter command to p2p component + * msg core + * + * Return: QDF_STATUS_SUCCESS - if sent successfully. + * otherwise: failed. + */ +QDF_STATUS +p2p_clear_mac_filter(struct wlan_objmgr_psoc *soc, uint32_t vdev_id, + uint8_t *mac, uint32_t freq); + +/** + * p2p_is_vdev_support_rand_mac() - check vdev type support random mac mgmt + * tx or not + * @vdev: vdev object + * + * Return: true: support random mac mgmt tx + * false: not support random mac mgmt tx. + */ +bool +p2p_is_vdev_support_rand_mac(struct wlan_objmgr_vdev *vdev); + +/** + * p2p_dump_tx_queue() - dump tx queue + * @p2p_soc_obj: p2p soc private object + * + * This function dumps tx queue and output details about tx context in + * queue. + * + * Return: None + */ +void p2p_dump_tx_queue(struct p2p_soc_priv_obj *p2p_soc_obj); + +/** + * p2p_ready_to_tx_frame() - dump tx queue + * @p2p_soc_obj: p2p soc private object + * @cookie: cookie is pointer to roc + * + * This function find out the tx context in wait for roc queue and tx + * this frame. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS p2p_ready_to_tx_frame(struct p2p_soc_priv_obj *p2p_soc_obj, + uint64_t cookie); + +/** + * p2p_cleanup_tx_sync() - Cleanup tx queue + * @p2p_soc_obj: p2p psoc private object + * @vdev: vdev object + * + * This function cleanup tx context in queue until cancellation done. + * To avoid deadlock, don't call from scheduler thread. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS p2p_cleanup_tx_sync( + struct p2p_soc_priv_obj *p2p_soc_obj, + struct wlan_objmgr_vdev *vdev); + +/** + * p2p_process_cleanup_tx_queue() - process the message to cleanup tx + * @param: pointer to cleanup parameters + * + * This function cleanup wait for roc queue and wait for ack queue. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS p2p_process_cleanup_tx_queue( + struct p2p_cleanup_param *param); + +/** + * p2p_process_mgmt_tx() - Process mgmt frame tx request + * @tx_ctx: tx context + * + * This function handles mgmt frame tx request. It will call API from + * mgmt txrx component. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS p2p_process_mgmt_tx(struct tx_action_context *tx_ctx); + +/** + * p2p_process_mgmt_tx_cancel() - Process cancel mgmt frame tx request + * @cancel_tx: cancel tx context + * + * This function cancel mgmt frame tx request by cookie. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS p2p_process_mgmt_tx_cancel( + struct cancel_roc_context *cancel_tx); + +/** + * p2p_process_mgmt_tx_ack_cnf() - Process tx ack event + * @tx_cnf_event: tx confirmation event information + * + * This function mgmt frame tx confirmation. It will deliver this + * event to up layer + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS p2p_process_mgmt_tx_ack_cnf( + struct p2p_tx_conf_event *tx_cnf_event); + +/** + * p2p_process_rx_mgmt() - Process rx mgmt frame event + * @rx_mgmt_event: rx mgmt frame event information + * + * This function mgmt frame rx mgmt frame event. It will deliver this + * event to up layer + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS p2p_process_rx_mgmt( + struct p2p_rx_mgmt_event *rx_mgmt_event); + +/** + * p2p_find_tx_ctx_by_nbuf() - find tx context by nbuf + * @p2p_soc_obj: p2p soc object + * @nbuf: pointer to nbuf + * + * This function finds out tx context by nbuf. + * + * Return: pointer to tx context + */ +struct tx_action_context *p2p_find_tx_ctx_by_nbuf( + struct p2p_soc_priv_obj *p2p_soc_obj, void *nbuf); + +#define P2P_80211_FRM_SA_OFFSET 10 + +/** + * p2p_del_random_mac() - del mac fitler from given vdev rand mac list + * @soc: soc object + * @vdev_id: vdev id + * @rnd_cookie: random mac mgmt tx cookie + * @duration: timeout value to flush the addr in target. + * + * This function will del the mac addr filter from vdev random mac addr list. + * If there is no reference to mac addr, it will set a clear timer to flush it + * in target finally. + * + * Return: QDF_STATUS_SUCCESS - del successfully. + * other : failed to del the mac address entry. + */ +QDF_STATUS +p2p_del_random_mac(struct wlan_objmgr_psoc *soc, uint32_t vdev_id, + uint64_t rnd_cookie, uint32_t duration); + +/** + * p2p_check_random_mac() - check random mac addr or not + * @soc: soc context + * @vdev_id: vdev id + * @random_mac_addr: mac addr to be checked + * + * This function check the input addr is random mac addr or not for vdev. + * + * Return: true if addr is random mac address else false. + */ +bool p2p_check_random_mac(struct wlan_objmgr_psoc *soc, uint32_t vdev_id, + uint8_t *random_mac_addr); + +/** + * p2p_process_set_rand_mac() - process the set random mac command + * @set_filter_req: request data + * + * This function will process the set mac addr filter command. + * + * Return: QDF_STATUS_SUCCESS: if process successfully + * other: failed. + */ +QDF_STATUS p2p_process_set_rand_mac( + struct p2p_set_mac_filter_req *set_filter_req); + +/** + * p2p_process_set_rand_mac_rsp() - process the set random mac response + * @resp: response date + * + * This function will process the set mac addr filter event. + * + * Return: QDF_STATUS_SUCCESS: if process successfully + * other: failed. + */ +QDF_STATUS p2p_process_set_rand_mac_rsp(struct p2p_mac_filter_rsp *resp); + +/** + * p2p_del_all_rand_mac_vdev() - del all random mac filter in vdev + * @vdev: vdev object + * + * This function will del all random mac filter in vdev + * + * Return: void + */ +void p2p_del_all_rand_mac_vdev(struct wlan_objmgr_vdev *vdev); + +/** + * p2p_del_all_rand_mac_soc() - del all random mac filter in soc + * @soc: soc object + * + * This function will del all random mac filter in all vdev of soc + * + * Return: void + */ +void p2p_del_all_rand_mac_soc(struct wlan_objmgr_psoc *soc); + +/** + * p2p_rand_mac_tx() - handle random mac mgmt tx + * @tx_action: tx action context + * + * This function will check whether need to set random mac tx filter for a + * given mgmt tx request and do the mac addr filter process as needed. + * + * Return: void + */ +void p2p_rand_mac_tx(struct tx_action_context *tx_action); + +/** + * p2p_init_random_mac_vdev() - Init random mac data for vdev + * @p2p_vdev_obj: p2p vdev private object + * + * This function will init the per vdev random mac data structure. + * + * Return: void + */ +void p2p_init_random_mac_vdev(struct p2p_vdev_priv_obj *p2p_vdev_obj); + +/** + * p2p_deinit_random_mac_vdev() - Init random mac data for vdev + * @p2p_vdev_obj: p2p vdev private object + * + * This function will deinit the per vdev random mac data structure. + * + * Return: void + */ +void p2p_deinit_random_mac_vdev(struct p2p_vdev_priv_obj *p2p_vdev_obj); + +#endif /* _WLAN_P2P_OFF_CHAN_TX_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/p2p/core/src/wlan_p2p_roc.c b/drivers/staging/qca-wifi-host-cmn/umac/p2p/core/src/wlan_p2p_roc.c new file mode 100644 index 0000000000000000000000000000000000000000..13423ca302deef80451ac1ca8375203e46695fff --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/p2p/core/src/wlan_p2p_roc.c @@ -0,0 +1,932 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: This file contains RoC API definitions + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "wlan_p2p_public_struct.h" +#include "wlan_p2p_tgt_api.h" +#include "wlan_p2p_ucfg_api.h" +#include "wlan_p2p_roc.h" +#include "wlan_p2p_main.h" +#include "wlan_p2p_off_chan_tx.h" + +/** + * p2p_mgmt_rx_ops() - register or unregister rx callback + * @psoc: psoc object + * @isregister: register if true, unregister if false + * + * This function registers or unregisters rx callback to mgmt txrx + * component. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +static QDF_STATUS p2p_mgmt_rx_ops(struct wlan_objmgr_psoc *psoc, + bool isregister) +{ + struct mgmt_txrx_mgmt_frame_cb_info frm_cb_info; + QDF_STATUS status; + + p2p_debug("psoc:%pK, is register rx:%d", psoc, isregister); + + frm_cb_info.frm_type = MGMT_PROBE_REQ; + frm_cb_info.mgmt_rx_cb = tgt_p2p_mgmt_frame_rx_cb; + + if (isregister) + status = wlan_mgmt_txrx_register_rx_cb(psoc, + WLAN_UMAC_COMP_P2P, &frm_cb_info, 1); + else + status = wlan_mgmt_txrx_deregister_rx_cb(psoc, + WLAN_UMAC_COMP_P2P, &frm_cb_info, 1); + + return status; +} + +/** + * p2p_scan_start() - Start scan + * @roc_ctx: remain on channel request + * + * This function trigger a start scan request to scan component. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +static QDF_STATUS p2p_scan_start(struct p2p_roc_context *roc_ctx) +{ + QDF_STATUS status; + struct scan_start_request *req; + struct wlan_objmgr_vdev *vdev; + struct p2p_soc_priv_obj *p2p_soc_obj = roc_ctx->p2p_soc_obj; + uint32_t go_num; + + vdev = wlan_objmgr_get_vdev_by_id_from_psoc( + p2p_soc_obj->soc, roc_ctx->vdev_id, + WLAN_P2P_ID); + if (!vdev) { + p2p_err("vdev is NULL"); + return QDF_STATUS_E_INVAL; + } + + req = qdf_mem_malloc(sizeof(*req)); + if (!req) { + p2p_err("failed to alloc scan start request"); + status = QDF_STATUS_E_NOMEM; + goto fail; + } + + ucfg_scan_init_default_params(vdev, req); + + roc_ctx->scan_id = ucfg_scan_get_scan_id(p2p_soc_obj->soc); + req->vdev = vdev; + req->scan_req.scan_id = roc_ctx->scan_id; + req->scan_req.p2p_scan_type = SCAN_P2P_LISTEN; + req->scan_req.scan_req_id = p2p_soc_obj->scan_req_id; + req->scan_req.chan_list.num_chan = 1; + req->scan_req.chan_list.chan[0].freq = wlan_chan_to_freq(roc_ctx->chan); + req->scan_req.dwell_time_passive = roc_ctx->duration; + req->scan_req.dwell_time_active = 0; + req->scan_req.scan_priority = SCAN_PRIORITY_HIGH; + req->scan_req.num_bssid = 1; + qdf_set_macaddr_broadcast(&req->scan_req.bssid_list[0]); + + if (req->scan_req.dwell_time_passive < P2P_MAX_ROC_DURATION) { + go_num = policy_mgr_mode_specific_connection_count( + p2p_soc_obj->soc, PM_P2P_GO_MODE, NULL); + p2p_debug("present go number:%d", go_num); + if (go_num) + req->scan_req.dwell_time_passive *= + P2P_ROC_DURATION_MULTI_GO_PRESENT; + else + req->scan_req.dwell_time_passive *= + P2P_ROC_DURATION_MULTI_GO_ABSENT; + /* this is to protect too huge value if some customers + * give a higher value from supplicant + */ + if (req->scan_req.dwell_time_passive > P2P_MAX_ROC_DURATION) + req->scan_req.dwell_time_passive = P2P_MAX_ROC_DURATION; + } + p2p_debug("FW requested roc duration is:%d for chan: %d", + req->scan_req.dwell_time_passive, roc_ctx->chan); + + status = ucfg_scan_start(req); + + p2p_debug("start scan, scan req id:%d, scan id:%d, status:%d", + p2p_soc_obj->scan_req_id, roc_ctx->scan_id, status); +fail: + wlan_objmgr_vdev_release_ref(vdev, WLAN_P2P_ID); + + return status; +} + +/** + * p2p_scan_abort() - Abort scan + * @roc_ctx: remain on channel request + * + * This function trigger an abort scan request to scan component. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +static QDF_STATUS p2p_scan_abort(struct p2p_roc_context *roc_ctx) +{ + QDF_STATUS status; + struct scan_cancel_request *req; + struct wlan_objmgr_vdev *vdev; + struct p2p_soc_priv_obj *p2p_soc_obj = roc_ctx->p2p_soc_obj; + + p2p_debug("abort scan, scan req id:%d, scan id:%d", + p2p_soc_obj->scan_req_id, roc_ctx->scan_id); + + vdev = wlan_objmgr_get_vdev_by_id_from_psoc( + p2p_soc_obj->soc, roc_ctx->vdev_id, + WLAN_P2P_ID); + if (!vdev) { + p2p_err("vdev is NULL"); + return QDF_STATUS_E_INVAL; + } + + req = qdf_mem_malloc(sizeof(*req)); + if (!req) { + p2p_err("failed to alloc scan cancel request"); + status = QDF_STATUS_E_NOMEM; + goto fail; + } + + req->vdev = vdev; + req->cancel_req.requester = p2p_soc_obj->scan_req_id; + req->cancel_req.scan_id = roc_ctx->scan_id; + req->cancel_req.vdev_id = roc_ctx->vdev_id; + req->cancel_req.req_type = WLAN_SCAN_CANCEL_SINGLE; + + qdf_mtrace(QDF_MODULE_ID_P2P, QDF_MODULE_ID_SCAN, + req->cancel_req.req_type, + req->vdev->vdev_objmgr.vdev_id, req->cancel_req.scan_id); + status = ucfg_scan_cancel(req); + + p2p_debug("abort scan, scan req id:%d, scan id:%d, status:%d", + p2p_soc_obj->scan_req_id, roc_ctx->scan_id, status); +fail: + wlan_objmgr_vdev_release_ref(vdev, WLAN_P2P_ID); + + return status; +} + +/** + * p2p_send_roc_event() - Send roc event + * @roc_ctx: remain on channel request + * @evt: roc event information + * + * This function send out roc event to up layer. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +static QDF_STATUS p2p_send_roc_event( + struct p2p_roc_context *roc_ctx, enum p2p_roc_event evt) +{ + struct p2p_soc_priv_obj *p2p_soc_obj; + struct p2p_event p2p_evt; + struct p2p_start_param *start_param; + + p2p_soc_obj = roc_ctx->p2p_soc_obj; + if (!p2p_soc_obj || !(p2p_soc_obj->start_param)) { + p2p_err("Invalid p2p soc object or start parameters"); + return QDF_STATUS_E_INVAL; + } + start_param = p2p_soc_obj->start_param; + if (!(start_param->event_cb)) { + p2p_err("Invalid p2p event callback to up layer"); + return QDF_STATUS_E_INVAL; + } + + p2p_evt.vdev_id = roc_ctx->vdev_id; + p2p_evt.roc_event = evt; + p2p_evt.cookie = (uint64_t)roc_ctx->id; + p2p_evt.chan = roc_ctx->chan; + p2p_evt.duration = roc_ctx->duration; + + p2p_debug("p2p soc_obj:%pK, roc_ctx:%pK, vdev_id:%d, roc_event:" + "%d, cookie:%llx, chan:%d, duration:%d", p2p_soc_obj, + roc_ctx, p2p_evt.vdev_id, p2p_evt.roc_event, + p2p_evt.cookie, p2p_evt.chan, p2p_evt.duration); + + start_param->event_cb(start_param->event_cb_data, &p2p_evt); + + return QDF_STATUS_SUCCESS; +} + +/** + * p2p_destroy_roc_ctx() - destroy roc ctx + * @roc_ctx: remain on channel request + * @up_layer_event: if send uplayer event + * @in_roc_queue: if roc context in roc queue + * + * This function destroy roc context. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +static QDF_STATUS p2p_destroy_roc_ctx(struct p2p_roc_context *roc_ctx, + bool up_layer_event, bool in_roc_queue) +{ + QDF_STATUS status = QDF_STATUS_SUCCESS; + struct p2p_soc_priv_obj *p2p_soc_obj = roc_ctx->p2p_soc_obj; + + p2p_debug("p2p_soc_obj:%pK, roc_ctx:%pK, up_layer_event:%d, in_roc_queue:%d", + p2p_soc_obj, roc_ctx, up_layer_event, in_roc_queue); + + if (up_layer_event) { + if (roc_ctx->roc_state < ROC_STATE_ON_CHAN) + p2p_send_roc_event(roc_ctx, ROC_EVENT_READY_ON_CHAN); + p2p_send_roc_event(roc_ctx, ROC_EVENT_COMPLETED); + } + + if (in_roc_queue) { + status = qdf_list_remove_node(&p2p_soc_obj->roc_q, + (qdf_list_node_t *)roc_ctx); + if (QDF_STATUS_SUCCESS != status) + p2p_err("Failed to remove roc req, status %d", status); + } + + qdf_idr_remove(&p2p_soc_obj->p2p_idr, roc_ctx->id); + qdf_mem_free(roc_ctx); + + return status; +} + +/** + * p2p_execute_cancel_roc_req() - Execute cancel roc request + * @roc_ctx: remain on channel request + * + * This function stop roc timer, abort scan and unregister mgmt rx + * callbak. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +static QDF_STATUS p2p_execute_cancel_roc_req( + struct p2p_roc_context *roc_ctx) +{ + QDF_STATUS status; + struct p2p_soc_priv_obj *p2p_soc_obj = roc_ctx->p2p_soc_obj; + + p2p_debug("p2p soc obj:%pK, roc ctx:%pK, vdev_id:%d, scan_id:%d, tx ctx:%pK, chan:%d, phy_mode:%d, duration:%d, roc_type:%d, roc_state:%d", + p2p_soc_obj, roc_ctx, roc_ctx->vdev_id, + roc_ctx->scan_id, roc_ctx->tx_ctx, roc_ctx->chan, + roc_ctx->phy_mode, roc_ctx->duration, + roc_ctx->roc_type, roc_ctx->roc_state); + + roc_ctx->roc_state = ROC_STATE_CANCEL_IN_PROG; + qdf_event_reset(&p2p_soc_obj->cancel_roc_done); + status = qdf_mc_timer_stop_sync(&roc_ctx->roc_timer); + if (status != QDF_STATUS_SUCCESS) + p2p_err("Failed to stop roc timer, roc %pK", roc_ctx); + + status = p2p_scan_abort(roc_ctx); + if (status != QDF_STATUS_SUCCESS) { + p2p_err("Failed to abort scan, status:%d, destroy roc %pK", + status, roc_ctx); + qdf_mc_timer_destroy(&roc_ctx->roc_timer); + p2p_mgmt_rx_ops(p2p_soc_obj->soc, false); + p2p_destroy_roc_ctx(roc_ctx, true, true); + qdf_event_set(&p2p_soc_obj->cancel_roc_done); + return status; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * p2p_roc_timeout() - Callback for roc timeout + * @pdata: pointer to p2p soc private object + * + * This function is callback for roc time out. + * + * Return: None + */ +static void p2p_roc_timeout(void *pdata) +{ + struct p2p_roc_context *roc_ctx; + struct p2p_soc_priv_obj *p2p_soc_obj; + + p2p_debug("p2p soc obj:%pK", pdata); + + p2p_soc_obj = pdata; + if (!p2p_soc_obj) { + p2p_err("Invalid p2p soc object"); + return; + } + + roc_ctx = p2p_find_current_roc_ctx(p2p_soc_obj); + if (!roc_ctx) { + p2p_debug("No P2P roc is pending"); + return; + } + + p2p_debug("p2p soc obj:%pK, roc ctx:%pK, vdev_id:%d, scan_id:%d, tx ctx:%pK, chan:%d, phy_mode:%d, duration:%d, roc_type:%d, roc_state:%d", + roc_ctx->p2p_soc_obj, roc_ctx, roc_ctx->vdev_id, + roc_ctx->scan_id, roc_ctx->tx_ctx, roc_ctx->chan, + roc_ctx->phy_mode, roc_ctx->duration, + roc_ctx->roc_type, roc_ctx->roc_state); + + if (roc_ctx->roc_state == ROC_STATE_CANCEL_IN_PROG) { + p2p_err("Cancellation already in progress"); + return; + } + p2p_execute_cancel_roc_req(roc_ctx); +} + +/** + * p2p_execute_roc_req() - Execute roc request + * @roc_ctx: remain on channel request + * + * This function init roc timer, start scan and register mgmt rx + * callbak. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +static QDF_STATUS p2p_execute_roc_req(struct p2p_roc_context *roc_ctx) +{ + QDF_STATUS status; + struct p2p_soc_priv_obj *p2p_soc_obj = roc_ctx->p2p_soc_obj; + + p2p_debug("p2p soc obj:%pK, roc ctx:%pK, vdev_id:%d, scan_id:%d, tx ctx:%pK, chan:%d, phy_mode:%d, duration:%d, roc_type:%d, roc_state:%d", + p2p_soc_obj, roc_ctx, roc_ctx->vdev_id, + roc_ctx->scan_id, roc_ctx->tx_ctx, roc_ctx->chan, + roc_ctx->phy_mode, roc_ctx->duration, + roc_ctx->roc_type, roc_ctx->roc_state); + + /* prevent runtime suspend */ + qdf_runtime_pm_prevent_suspend(&p2p_soc_obj->roc_runtime_lock); + + status = qdf_mc_timer_init(&roc_ctx->roc_timer, + QDF_TIMER_TYPE_SW, p2p_roc_timeout, + p2p_soc_obj); + if (status != QDF_STATUS_SUCCESS) { + p2p_err("failed to init roc timer, status:%d", status); + goto fail; + } + + roc_ctx->roc_state = ROC_STATE_REQUESTED; + status = p2p_scan_start(roc_ctx); + if (status != QDF_STATUS_SUCCESS) { + qdf_mc_timer_destroy(&roc_ctx->roc_timer); + p2p_err("Failed to start scan, status:%d", status); + goto fail; + } + +fail: + if (status != QDF_STATUS_SUCCESS) { + p2p_destroy_roc_ctx(roc_ctx, true, true); + qdf_runtime_pm_allow_suspend( + &p2p_soc_obj->roc_runtime_lock); + return status; + } + + p2p_soc_obj->cur_roc_vdev_id = roc_ctx->vdev_id; + status = p2p_mgmt_rx_ops(p2p_soc_obj->soc, true); + if (status != QDF_STATUS_SUCCESS) + p2p_err("Failed to register mgmt rx callback, status:%d", + status); + + return status; +} + +/** + * p2p_find_roc_ctx() - Find out roc context by cookie + * @p2p_soc_obj: p2p psoc private object + * @cookie: cookie is the key to find out roc context + * + * This function find out roc context by cookie from p2p psoc private + * object + * + * Return: Pointer to roc context - success + * NULL - failure + */ +static struct p2p_roc_context *p2p_find_roc_ctx( + struct p2p_soc_priv_obj *p2p_soc_obj, uint64_t cookie) +{ + struct p2p_roc_context *curr_roc_ctx; + qdf_list_node_t *p_node; + QDF_STATUS status; + + p2p_debug("p2p soc obj:%pK, cookie:%llx", p2p_soc_obj, cookie); + + status = qdf_list_peek_front(&p2p_soc_obj->roc_q, &p_node); + while (QDF_IS_STATUS_SUCCESS(status)) { + curr_roc_ctx = qdf_container_of(p_node, + struct p2p_roc_context, node); + if ((uintptr_t) curr_roc_ctx == cookie) + return curr_roc_ctx; + status = qdf_list_peek_next(&p2p_soc_obj->roc_q, + p_node, &p_node); + } + + return NULL; +} + +/** + * p2p_process_scan_start_evt() - Process scan start event + * @roc_ctx: remain on channel request + * + * This function process scan start event. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +static QDF_STATUS p2p_process_scan_start_evt( + struct p2p_roc_context *roc_ctx) +{ + roc_ctx->roc_state = ROC_STATE_STARTED; + p2p_debug("scan started, roc ctx:%pK, scan id:%d", + roc_ctx, roc_ctx->scan_id); + + return QDF_STATUS_SUCCESS; +} + +/** + * p2p_process_ready_on_channel_evt() - Process ready on channel event + * @roc_ctx: remain on channel request + * + * This function process ready on channel event. Starts roc timer. + * Indicates this event to up layer if this is user request roc. Sends + * mgmt frame if this is off channel rx roc. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +static QDF_STATUS p2p_process_ready_on_channel_evt( + struct p2p_roc_context *roc_ctx) +{ + uint64_t cookie; + struct p2p_soc_priv_obj *p2p_soc_obj; + QDF_STATUS status; + + p2p_soc_obj = roc_ctx->p2p_soc_obj; + roc_ctx->roc_state = ROC_STATE_ON_CHAN; + + p2p_debug("p2p soc obj:%pK, roc ctx:%pK, vdev_id:%d, scan_id:%d, tx ctx:%pK, chan:%d, phy_mode:%d, duration:%d, roc_type:%d, roc_state:%d", + p2p_soc_obj, roc_ctx, roc_ctx->vdev_id, + roc_ctx->scan_id, roc_ctx->tx_ctx, roc_ctx->chan, + roc_ctx->phy_mode, roc_ctx->duration, + roc_ctx->roc_type, roc_ctx->roc_state); + + status = qdf_mc_timer_start(&roc_ctx->roc_timer, + (roc_ctx->duration + P2P_EVENT_PROPAGATE_TIME)); + if (status != QDF_STATUS_SUCCESS) + p2p_err("Remain on Channel timer start failed"); + if (roc_ctx->roc_type == USER_REQUESTED) { + p2p_debug("user required roc, send roc event"); + status = p2p_send_roc_event(roc_ctx, + ROC_EVENT_READY_ON_CHAN); + } + + cookie = (uintptr_t)roc_ctx; + /* ready to tx frame */ + p2p_ready_to_tx_frame(p2p_soc_obj, cookie); + + return status; +} + +/** + * p2p_process_scan_complete_evt() - Process scan complete event + * @roc_ctx: remain on channel request + * + * This function process scan complete event. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +static QDF_STATUS p2p_process_scan_complete_evt( + struct p2p_roc_context *roc_ctx) +{ + QDF_STATUS status; + qdf_list_node_t *next_node; + uint32_t size; + struct p2p_soc_priv_obj *p2p_soc_obj = roc_ctx->p2p_soc_obj; + + p2p_debug("p2p soc obj:%pK, roc ctx:%pK, vdev_id:%d, scan_id:%d, tx ctx:%pK, chan:%d, phy_mode:%d, duration:%d, roc_type:%d, roc_state:%d", + p2p_soc_obj, roc_ctx, roc_ctx->vdev_id, + roc_ctx->scan_id, roc_ctx->tx_ctx, roc_ctx->chan, + roc_ctx->phy_mode, roc_ctx->duration, + roc_ctx->roc_type, roc_ctx->roc_state); + + /* allow runtime suspend */ + qdf_runtime_pm_allow_suspend(&p2p_soc_obj->roc_runtime_lock); + + status = qdf_mc_timer_stop_sync(&roc_ctx->roc_timer); + if (QDF_IS_STATUS_ERROR(status)) + p2p_err("Failed to stop roc timer"); + + status = qdf_mc_timer_destroy(&roc_ctx->roc_timer); + if (status != QDF_STATUS_SUCCESS) + p2p_err("Failed to destroy roc timer"); + + status = p2p_mgmt_rx_ops(p2p_soc_obj->soc, false); + p2p_soc_obj->cur_roc_vdev_id = P2P_INVALID_VDEV_ID; + if (status != QDF_STATUS_SUCCESS) + p2p_err("Failed to deregister mgmt rx callback"); + + if (roc_ctx->roc_type == USER_REQUESTED) + status = p2p_send_roc_event(roc_ctx, + ROC_EVENT_COMPLETED); + + p2p_destroy_roc_ctx(roc_ctx, false, true); + qdf_event_set(&p2p_soc_obj->cancel_roc_done); + + size = qdf_list_size(&p2p_soc_obj->roc_q); + p2p_debug("P2P roc queue size is %d", status); + if (size > 0) { + status = qdf_list_peek_front(&p2p_soc_obj->roc_q, + &next_node); + if (QDF_STATUS_SUCCESS != status) { + p2p_err("Failed to peek roc req from front, status %d", + status); + return status; + } + roc_ctx = qdf_container_of(next_node, + struct p2p_roc_context, node); + status = p2p_execute_roc_req(roc_ctx); + } + return status; +} + +QDF_STATUS p2p_mgmt_rx_action_ops(struct wlan_objmgr_psoc *psoc, + bool isregister) +{ + struct mgmt_txrx_mgmt_frame_cb_info frm_cb_info[2]; + QDF_STATUS status; + + p2p_debug("psoc:%pK, is register rx:%d", psoc, isregister); + + frm_cb_info[0].frm_type = MGMT_ACTION_VENDOR_SPECIFIC; + frm_cb_info[0].mgmt_rx_cb = tgt_p2p_mgmt_frame_rx_cb; + frm_cb_info[1].frm_type = MGMT_ACTION_CATEGORY_VENDOR_SPECIFIC; + frm_cb_info[1].mgmt_rx_cb = tgt_p2p_mgmt_frame_rx_cb; + + if (isregister) + status = wlan_mgmt_txrx_register_rx_cb(psoc, + WLAN_UMAC_COMP_P2P, frm_cb_info, 2); + else + status = wlan_mgmt_txrx_deregister_rx_cb(psoc, + WLAN_UMAC_COMP_P2P, frm_cb_info, 2); + + return status; +} + +struct p2p_roc_context *p2p_find_current_roc_ctx( + struct p2p_soc_priv_obj *p2p_soc_obj) +{ + struct p2p_roc_context *roc_ctx; + qdf_list_node_t *p_node; + QDF_STATUS status; + + status = qdf_list_peek_front(&p2p_soc_obj->roc_q, &p_node); + while (QDF_IS_STATUS_SUCCESS(status)) { + roc_ctx = qdf_container_of(p_node, + struct p2p_roc_context, node); + if (roc_ctx->roc_state != ROC_STATE_IDLE) { + p2p_debug("p2p soc obj:%pK, roc ctx:%pK, vdev_id" + ":%d, scan_id:%d, tx ctx:%pK, chan:" + "%d, phy_mode:%d, duration:%d, " + "roc_type:%d, roc_state:%d", + roc_ctx->p2p_soc_obj, roc_ctx, + roc_ctx->vdev_id, roc_ctx->scan_id, + roc_ctx->tx_ctx, roc_ctx->chan, + roc_ctx->phy_mode, roc_ctx->duration, + roc_ctx->roc_type, roc_ctx->roc_state); + + return roc_ctx; + } + status = qdf_list_peek_next(&p2p_soc_obj->roc_q, + p_node, &p_node); + } + + return NULL; +} + +struct p2p_roc_context *p2p_find_roc_by_tx_ctx( + struct p2p_soc_priv_obj *p2p_soc_obj, uint64_t cookie) +{ + struct p2p_roc_context *curr_roc_ctx; + qdf_list_node_t *p_node; + QDF_STATUS status; + + p2p_debug("p2p soc obj:%pK, cookie:%llx", p2p_soc_obj, cookie); + + status = qdf_list_peek_front(&p2p_soc_obj->roc_q, &p_node); + while (QDF_IS_STATUS_SUCCESS(status)) { + curr_roc_ctx = qdf_container_of(p_node, + struct p2p_roc_context, node); + if ((uintptr_t) curr_roc_ctx->tx_ctx == cookie) + return curr_roc_ctx; + status = qdf_list_peek_next(&p2p_soc_obj->roc_q, + p_node, &p_node); + } + + return NULL; +} + +struct p2p_roc_context *p2p_find_roc_by_chan( + struct p2p_soc_priv_obj *p2p_soc_obj, uint8_t chan) +{ + struct p2p_roc_context *roc_ctx; + qdf_list_node_t *p_node; + QDF_STATUS status; + + status = qdf_list_peek_front(&p2p_soc_obj->roc_q, &p_node); + while (QDF_IS_STATUS_SUCCESS(status)) { + roc_ctx = qdf_container_of(p_node, + struct p2p_roc_context, + node); + if (roc_ctx->chan == chan) { + p2p_debug("p2p soc obj:%pK, roc ctx:%pK, vdev_id:%d, scan_id:%d, tx ctx:%pK, chan:%d, phy_mode:%d, duration:%d, roc_type:%d, roc_state:%d", + roc_ctx->p2p_soc_obj, roc_ctx, + roc_ctx->vdev_id, roc_ctx->scan_id, + roc_ctx->tx_ctx, roc_ctx->chan, + roc_ctx->phy_mode, roc_ctx->duration, + roc_ctx->roc_type, roc_ctx->roc_state); + + return roc_ctx; + } + status = qdf_list_peek_next(&p2p_soc_obj->roc_q, + p_node, &p_node); + } + + return NULL; +} + +QDF_STATUS p2p_restart_roc_timer(struct p2p_roc_context *roc_ctx) +{ + QDF_STATUS status = QDF_STATUS_E_FAILURE; + + if (QDF_TIMER_STATE_RUNNING == + qdf_mc_timer_get_current_state(&roc_ctx->roc_timer)) { + p2p_debug("roc timer is running"); + status = qdf_mc_timer_stop(&roc_ctx->roc_timer); + if (status != QDF_STATUS_SUCCESS) { + p2p_err("Failed to stop roc timer"); + return status; + } + + status = qdf_mc_timer_start(&roc_ctx->roc_timer, + roc_ctx->duration); + if (status != QDF_STATUS_SUCCESS) + p2p_err("Remain on Channel timer start failed"); + } + + return status; +} + +QDF_STATUS p2p_cleanup_roc_sync( + struct p2p_soc_priv_obj *p2p_soc_obj, + struct wlan_objmgr_vdev *vdev) +{ + struct scheduler_msg msg = {0}; + struct p2p_cleanup_param *param; + QDF_STATUS status; + uint32_t vdev_id; + + if (!p2p_soc_obj) { + p2p_err("p2p soc context is NULL"); + return QDF_STATUS_E_FAILURE; + } + + p2p_debug("p2p_soc_obj:%pK, vdev:%pK", p2p_soc_obj, vdev); + param = qdf_mem_malloc(sizeof(*param)); + if (!param) { + p2p_err("failed to allocate cleanup param"); + return QDF_STATUS_E_NOMEM; + } + + param->p2p_soc_obj = p2p_soc_obj; + if (vdev) + vdev_id = (uint32_t)wlan_vdev_get_id(vdev); + else + vdev_id = P2P_INVALID_VDEV_ID; + param->vdev_id = vdev_id; + qdf_event_reset(&p2p_soc_obj->cleanup_roc_done); + msg.type = P2P_CLEANUP_ROC; + msg.bodyptr = param; + msg.callback = p2p_process_cmd; + status = scheduler_post_message(QDF_MODULE_ID_P2P, + QDF_MODULE_ID_P2P, + QDF_MODULE_ID_OS_IF, &msg); + if (status != QDF_STATUS_SUCCESS) { + p2p_err("failed to post message"); + qdf_mem_free(param); + return status; + } + + status = qdf_wait_single_event( + &p2p_soc_obj->cleanup_roc_done, + P2P_WAIT_CLEANUP_ROC); + + if (status != QDF_STATUS_SUCCESS) + p2p_err("wait for cleanup roc timeout, %d", status); + + return status; +} + +QDF_STATUS p2p_process_cleanup_roc_queue( + struct p2p_cleanup_param *param) +{ + uint32_t vdev_id; + QDF_STATUS status, ret; + struct p2p_roc_context *roc_ctx; + qdf_list_node_t *p_node; + struct p2p_soc_priv_obj *p2p_soc_obj; + + if (!param || !(param->p2p_soc_obj)) { + p2p_err("Invalid cleanup param"); + return QDF_STATUS_E_FAILURE; + } + + p2p_soc_obj = param->p2p_soc_obj; + vdev_id = param->vdev_id; + + p2p_debug("clean up idle roc request, roc queue size:%d, vdev id:%d", + qdf_list_size(&p2p_soc_obj->roc_q), vdev_id); + status = qdf_list_peek_front(&p2p_soc_obj->roc_q, &p_node); + while (QDF_IS_STATUS_SUCCESS(status)) { + roc_ctx = qdf_container_of(p_node, + struct p2p_roc_context, node); + + p2p_debug("p2p soc obj:%pK, roc ctx:%pK, vdev_id:%d, scan_id:%d, tx ctx:%pK, chan:%d, phy_mode:%d, duration:%d, roc_type:%d, roc_state:%d", + roc_ctx->p2p_soc_obj, roc_ctx, + roc_ctx->vdev_id, roc_ctx->scan_id, + roc_ctx->tx_ctx, roc_ctx->chan, + roc_ctx->phy_mode, roc_ctx->duration, + roc_ctx->roc_type, roc_ctx->roc_state); + status = qdf_list_peek_next(&p2p_soc_obj->roc_q, + p_node, &p_node); + if ((roc_ctx->roc_state == ROC_STATE_IDLE) && + ((vdev_id == P2P_INVALID_VDEV_ID) || + (vdev_id == roc_ctx->vdev_id))) { + ret = qdf_list_remove_node( + &p2p_soc_obj->roc_q, + (qdf_list_node_t *)roc_ctx); + if (ret == QDF_STATUS_SUCCESS) + qdf_mem_free(roc_ctx); + else + p2p_err("Failed to remove roc ctx from queue"); + } + } + + p2p_debug("clean up started roc request, roc queue size:%d", + qdf_list_size(&p2p_soc_obj->roc_q)); + status = qdf_list_peek_front(&p2p_soc_obj->roc_q, &p_node); + while (QDF_IS_STATUS_SUCCESS(status)) { + roc_ctx = qdf_container_of(p_node, + struct p2p_roc_context, node); + + p2p_debug("p2p soc obj:%pK, roc ctx:%pK, vdev_id:%d, scan_id:%d, tx ctx:%pK, chan:%d, phy_mode:%d, duration:%d, roc_type:%d, roc_state:%d", + roc_ctx->p2p_soc_obj, roc_ctx, roc_ctx->vdev_id, + roc_ctx->scan_id, roc_ctx->tx_ctx, roc_ctx->chan, + roc_ctx->phy_mode, roc_ctx->duration, + roc_ctx->roc_type, roc_ctx->roc_state); + + status = qdf_list_peek_next(&p2p_soc_obj->roc_q, + p_node, &p_node); + if ((roc_ctx->roc_state != ROC_STATE_IDLE) && + ((vdev_id == P2P_INVALID_VDEV_ID) || + (vdev_id == roc_ctx->vdev_id))) { + if (roc_ctx->roc_state != + ROC_STATE_CANCEL_IN_PROG) + p2p_execute_cancel_roc_req(roc_ctx); + + ret = qdf_wait_single_event( + &p2p_soc_obj->cancel_roc_done, + P2P_WAIT_CANCEL_ROC); + p2p_debug("RoC cancellation done, return:%d", ret); + } + } + + qdf_event_set(&p2p_soc_obj->cleanup_roc_done); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS p2p_process_roc_req(struct p2p_roc_context *roc_ctx) +{ + struct p2p_soc_priv_obj *p2p_soc_obj; + struct p2p_roc_context *curr_roc_ctx; + QDF_STATUS status; + uint32_t size; + + p2p_soc_obj = roc_ctx->p2p_soc_obj; + + p2p_debug("p2p soc obj:%pK, roc ctx:%pK, vdev_id:%d, scan_id:%d, tx_ctx:%pK, chan:%d, phy_mode:%d, duration:%d, roc_type:%d, roc_state:%d", + p2p_soc_obj, roc_ctx, roc_ctx->vdev_id, + roc_ctx->scan_id, roc_ctx->tx_ctx, roc_ctx->chan, + roc_ctx->phy_mode, roc_ctx->duration, + roc_ctx->roc_type, roc_ctx->roc_state); + + status = qdf_list_insert_back(&p2p_soc_obj->roc_q, + &roc_ctx->node); + if (QDF_STATUS_SUCCESS != status) { + p2p_destroy_roc_ctx(roc_ctx, true, false); + p2p_debug("Failed to insert roc req, status %d", status); + return status; + } + + size = qdf_list_size(&p2p_soc_obj->roc_q); + if (size == 1) { + status = p2p_execute_roc_req(roc_ctx); + } else if (size > 1) { + curr_roc_ctx = p2p_find_current_roc_ctx(p2p_soc_obj); + /*TODO, to handle extend roc */ + } + + return status; +} + +QDF_STATUS p2p_process_cancel_roc_req( + struct cancel_roc_context *cancel_roc_ctx) +{ + struct p2p_soc_priv_obj *p2p_soc_obj; + struct p2p_roc_context *curr_roc_ctx; + QDF_STATUS status; + + p2p_soc_obj = cancel_roc_ctx->p2p_soc_obj; + curr_roc_ctx = p2p_find_roc_ctx(p2p_soc_obj, + cancel_roc_ctx->cookie); + + p2p_debug("p2p soc obj:%pK, cookie:%llx, roc ctx:%pK", + p2p_soc_obj, cancel_roc_ctx->cookie, curr_roc_ctx); + + if (!curr_roc_ctx) { + p2p_debug("Failed to find roc req by cookie, cookie %llx", + cancel_roc_ctx->cookie); + return QDF_STATUS_E_INVAL; + } + + if (curr_roc_ctx->roc_state == ROC_STATE_IDLE) { + status = p2p_destroy_roc_ctx(curr_roc_ctx, true, true); + } else if (curr_roc_ctx->roc_state == + ROC_STATE_CANCEL_IN_PROG) { + p2p_debug("Receive cancel roc req when roc req is canceling, cookie %llx", + cancel_roc_ctx->cookie); + status = QDF_STATUS_SUCCESS; + } else { + status = p2p_execute_cancel_roc_req(curr_roc_ctx); + } + + return status; +} + +void p2p_scan_event_cb(struct wlan_objmgr_vdev *vdev, + struct scan_event *event, void *arg) +{ + struct p2p_soc_priv_obj *p2p_soc_obj; + struct p2p_roc_context *curr_roc_ctx; + + p2p_debug("soc:%pK, scan event:%d", arg, event->type); + + p2p_soc_obj = (struct p2p_soc_priv_obj *)arg; + if (!p2p_soc_obj) { + p2p_err("Invalid P2P context"); + return; + } + + curr_roc_ctx = p2p_find_current_roc_ctx(p2p_soc_obj); + if (!curr_roc_ctx) { + p2p_err("Failed to find valid P2P roc context"); + return; + } + + qdf_mtrace(QDF_MODULE_ID_SCAN, QDF_MODULE_ID_P2P, event->type, + event->vdev_id, event->scan_id); + switch (event->type) { + case SCAN_EVENT_TYPE_STARTED: + p2p_process_scan_start_evt(curr_roc_ctx); + break; + case SCAN_EVENT_TYPE_FOREIGN_CHANNEL: + p2p_process_ready_on_channel_evt(curr_roc_ctx); + break; + case SCAN_EVENT_TYPE_COMPLETED: + case SCAN_EVENT_TYPE_DEQUEUED: + case SCAN_EVENT_TYPE_START_FAILED: + p2p_process_scan_complete_evt(curr_roc_ctx); + break; + default: + p2p_debug("drop scan event, %d", event->type); + } +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/p2p/core/src/wlan_p2p_roc.h b/drivers/staging/qca-wifi-host-cmn/umac/p2p/core/src/wlan_p2p_roc.h new file mode 100644 index 0000000000000000000000000000000000000000..5e49b1497647956ad09f8cfcecc37e8605a7fd3f --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/p2p/core/src/wlan_p2p_roc.h @@ -0,0 +1,247 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: Defines RoC API & structures + */ + +#ifndef _WLAN_P2P_ROC_H_ +#define _WLAN_P2P_ROC_H_ + +#include +#include +#include + +#define P2P_EVENT_PROPAGATE_TIME 10 +#define P2P_WAIT_CANCEL_ROC 1000 +#define P2P_WAIT_CLEANUP_ROC 2000 +#define P2P_MAX_ROC_DURATION 1500 + +#define P2P_ROC_DURATION_MULTI_GO_PRESENT 6 +#define P2P_ROC_DURATION_MULTI_GO_ABSENT 10 +#define P2P_ACTION_FRAME_DEFAULT_WAIT 200 + +struct wlan_objmgr_vdev; +struct scan_event; + +/** + * enum roc_type - user requested or off channel tx + * @USER_REQUESTED: Requested by supplicant + * @OFF_CHANNEL_TX: Issued internally for off channel tx + */ +enum roc_type { + USER_REQUESTED, + OFF_CHANNEL_TX, +}; + +/** + * enum roc_state - P2P RoC state + * @ROC_STATE_IDLE: RoC not yet started or completed + * @ROC_STATE_REQUESTED: Sent scan command to scan manager + * @ROC_STATE_STARTED: Got started event from scan manager + * @ROC_STATE_ON_CHAN: Got foreign channel event from SCM + * @ROC_STATE_CANCEL_IN_PROG: Requested abort scan to SCM + * @ROC_STATE_INVALID: We should not come to this state + */ +enum roc_state { + ROC_STATE_IDLE = 0, + ROC_STATE_REQUESTED, + ROC_STATE_STARTED, + ROC_STATE_ON_CHAN, + ROC_STATE_CANCEL_IN_PROG, + ROC_STATE_INVALID, +}; + +/** + * struct p2p_roc_context - RoC request context + * @node: Node for next element in the list + * @p2p_soc_obj: Pointer to SoC global p2p private object + * @vdev_id: Vdev id on which this request has come + * @scan_id: Scan id given by scan component for this roc req + * @tx_ctx: TX context if this ROC is for tx MGMT + * @chan: Chan for which this RoC has been requested + * @phy_mode: PHY mode + * @duration: Duration for the RoC + * @roc_type: RoC type User requested or internal + * @roc_timer: RoC timer + * @roc_state: Roc state + * @id: identifier of roc + */ +struct p2p_roc_context { + qdf_list_node_t node; + struct p2p_soc_priv_obj *p2p_soc_obj; + uint32_t vdev_id; + uint32_t scan_id; + void *tx_ctx; + uint8_t chan; + uint8_t phy_mode; + uint32_t duration; + enum roc_type roc_type; + qdf_mc_timer_t roc_timer; + enum roc_state roc_state; + int32_t id; +}; + +/** + * struct cancel_roc_context - p2p cancel roc context + * @p2p_soc_obj: Pointer to SoC global p2p private object + * @cookie: Cookie which is given by supplicant + */ +struct cancel_roc_context { + struct p2p_soc_priv_obj *p2p_soc_obj; + uint64_t cookie; +}; + +/** + * struct p2p_cleanup_param - p2p cleanup parameters + * @p2p_soc_obj: Pointer to SoC global p2p private object + * @vdev_id: vdev id + */ +struct p2p_cleanup_param { + struct p2p_soc_priv_obj *p2p_soc_obj; + uint32_t vdev_id; +}; + +/** + * p2p_mgmt_rx_action_ops() - register or unregister rx action callback + * @psoc: psoc object + * @isregister: register if true, unregister if false + * + * This function registers or unregisters rx action frame callback to + * mgmt txrx component. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS p2p_mgmt_rx_action_ops(struct wlan_objmgr_psoc *psoc, + bool isregister); + +/** + * p2p_find_current_roc_ctx() - Find out roc context in progressing + * @p2p_soc_obj: p2p psoc private object + * + * This function finds out roc context in progressing from p2p psoc + * private object + * + * Return: Pointer to roc context - success + * NULL - failure + */ +struct p2p_roc_context *p2p_find_current_roc_ctx( + struct p2p_soc_priv_obj *p2p_soc_obj); + +/** + * p2p_find_roc_by_tx_ctx() - Find out roc context by tx context + * @p2p_soc_obj: p2p psoc private object + * @cookie: cookie is the key to find out roc context + * + * This function finds out roc context by tx context from p2p psoc + * private object + * + * Return: Pointer to roc context - success + * NULL - failure + */ +struct p2p_roc_context *p2p_find_roc_by_tx_ctx( + struct p2p_soc_priv_obj *p2p_soc_obj, uint64_t cookie); + +/** + * p2p_find_roc_by_chan() - Find out roc context by channel + * @p2p_soc_obj: p2p psoc private object + * @chan: channel of the ROC + * + * This function finds out roc context by channel from p2p psoc + * private object + * + * Return: Pointer to roc context - success + * NULL - failure + */ +struct p2p_roc_context *p2p_find_roc_by_chan( + struct p2p_soc_priv_obj *p2p_soc_obj, uint8_t chan); + +/** + * p2p_restart_roc_timer() - Restarts roc timer + * @roc_ctx: remain on channel context + * + * This function restarts roc timer with updated duration. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS p2p_restart_roc_timer(struct p2p_roc_context *roc_ctx); + +/** + * p2p_cleanup_roc_sync() - Cleanup roc context in queue + * @p2p_soc_obj: p2p psoc private object + * @vdev: vdev object + * + * This function cleanup roc context in queue, include the roc + * context in progressing until cancellation done. To avoid deadlock, + * don't call from scheduler thread. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS p2p_cleanup_roc_sync( + struct p2p_soc_priv_obj *p2p_soc_obj, + struct wlan_objmgr_vdev *vdev); + +/** + * p2p_process_cleanup_roc_queue() - process the message to cleanup roc + * @param: pointer to cleanup parameters + * + * This function process the message to cleanup roc context in queue, + * include the roc context in progressing. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS p2p_process_cleanup_roc_queue( + struct p2p_cleanup_param *param); + +/** + * p2p_process_roc_req() - Process roc request + * @roc_ctx: roc request context + * + * This function handles roc request. It will call API from scan/mgmt + * txrx component. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS p2p_process_roc_req(struct p2p_roc_context *roc_ctx); + +/** + * p2p_process_cancel_roc_req() - Process cancel roc request + * @cancel_roc_ctx: cancel roc request context + * + * This function cancel roc request by cookie. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS p2p_process_cancel_roc_req( + struct cancel_roc_context *cancel_roc_ctx); + +/** + * p2p_scan_event_cb() - Process scan event + * @vdev: vdev associated to this scan event + * @event: event information + * @arg: registered arguments + * + * This function handles P2P scan event and deliver P2P event to HDD + * layer by registered callback. + * + * Return: None + */ +void p2p_scan_event_cb(struct wlan_objmgr_vdev *vdev, + struct scan_event *event, void *arg); + +#endif /* _WLAN_P2P_ROC_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/p2p/dispatcher/inc/wlan_p2p_public_struct.h b/drivers/staging/qca-wifi-host-cmn/umac/p2p/dispatcher/inc/wlan_p2p_public_struct.h new file mode 100644 index 0000000000000000000000000000000000000000..6da5d223c2737fc14cb6967315d6801a848d224f --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/p2p/dispatcher/inc/wlan_p2p_public_struct.h @@ -0,0 +1,250 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: Contains p2p public data structure definations + */ + +#ifndef _WLAN_P2P_PUBLIC_STRUCT_H_ +#define _WLAN_P2P_PUBLIC_STRUCT_H_ + +#include + +#define P2P_MAX_NOA_DESC 4 + +/** + * struct p2p_roc_req - P2P roc request + * @vdev_id: Vdev id on which this request has come + * @chan: Chan for which this RoC has been requested + * @phy_mode: PHY mode + * @duration: Duration for the RoC + */ +struct p2p_roc_req { + uint32_t vdev_id; + uint32_t chan; + uint32_t phy_mode; + uint32_t duration; +}; + +/** + * enum p2p_roc_event - P2P RoC event + * @ROC_EVENT_READY_ON_CHAN: RoC has started now + * @ROC_EVENT_COMPLETED: RoC has been completed + * @ROC_EVENT_INAVLID: Invalid event + */ +enum p2p_roc_event { + ROC_EVENT_READY_ON_CHAN = 0, + ROC_EVENT_COMPLETED, + ROC_EVENT_INAVLID, +}; + +/** + * struct p2p_event - p2p event + * @vdev_id: Vdev id + * @roc_event: RoC event + * @cookie: Cookie which is given to supplicant for this roc req + * @chan: Chan for which this RoC has been requested + * @duration: Duration for the RoC + */ +struct p2p_event { + uint32_t vdev_id; + enum p2p_roc_event roc_event; + uint64_t cookie; + uint32_t chan; + uint32_t duration; +}; + +/** + * struct p2p_rx_mgmt_frame - rx mgmt frame structure + * @frame_len: Frame length + * @rx_chan: RX channel + * @vdev_id: Vdev id + * @frm_type: Frame type + * @rx_rssi: RX rssi + * @buf: Buffer address + */ +struct p2p_rx_mgmt_frame { + uint32_t frame_len; + uint32_t rx_chan; + uint32_t vdev_id; + uint32_t frm_type; + uint32_t rx_rssi; + uint8_t buf[1]; +}; + +/** + * struct p2p_tx_cnf - tx confirm structure + * @vdev_id: Vdev id + * @action_cookie: TX cookie for this action frame + * @buf_len: Frame length + * @status: TX status + * @buf: Buffer address + */ +struct p2p_tx_cnf { + uint32_t vdev_id; + uint64_t action_cookie; + uint32_t buf_len; + uint32_t status; + uint8_t *buf; +}; + +/** + * struct p2p_mgmt_tx - p2p mgmt tx structure + * @vdev_id: Vdev id + * @chan: Chan for which this RoC has been requested + * @wait: Duration for the RoC + * @len: Length of tx buffer + * @no_cck: Required cck or not + * @dont_wait_for_ack: Wait for ack or not + * @off_chan: Off channel tx or not + * @buf: TX buffer + */ +struct p2p_mgmt_tx { + uint32_t vdev_id; + uint32_t chan; + uint32_t wait; + uint32_t len; + uint32_t no_cck; + uint32_t dont_wait_for_ack; + uint32_t off_chan; + const uint8_t *buf; +}; + +/** + * struct p2p_set_mac_filter + * @vdev_id: Vdev id + * @mac: mac addr + * @freq: frequency + * @set: set or clear + */ +struct p2p_set_mac_filter { + uint32_t vdev_id; + uint8_t mac[QDF_MAC_ADDR_SIZE]; + uint32_t freq; + bool set; +}; + +/** + * struct p2p_set_mac_filter_evt + * @vdev_id: Vdev id + * @status: target reported result of set mac addr filter + */ +struct p2p_set_mac_filter_evt { + uint32_t vdev_id; + uint32_t status; +}; + +/** + * struct p2p_ps_config + * @vdev_id: Vdev id + * @opp_ps: Opportunistic power save + * @ct_window: CT window + * @count: Count + * @duration: Duration + * @interval: Interval + * @single_noa_duration: Single shot noa duration + * @ps_selection: power save selection + */ +struct p2p_ps_config { + uint32_t vdev_id; + uint32_t opp_ps; + uint32_t ct_window; + uint32_t count; + uint32_t duration; + uint32_t interval; + uint32_t single_noa_duration; + uint32_t ps_selection; +}; + +/** + * struct p2p_lo_start - p2p listen offload start + * @vdev_id: Vdev id + * @ctl_flags: Control flag + * @freq: P2P listen frequency + * @period: Listen offload period + * @interval: Listen offload interval + * @count: Number listen offload intervals + * @dev_types_len: Device types length + * @probe_resp_len: Probe response template length + * @device_types: Device types + * @probe_resp_tmplt: Probe response template + */ +struct p2p_lo_start { + uint32_t vdev_id; + uint32_t ctl_flags; + uint32_t freq; + uint32_t period; + uint32_t interval; + uint32_t count; + uint32_t dev_types_len; + uint32_t probe_resp_len; + uint8_t *device_types; + uint8_t *probe_resp_tmplt; +}; + +/** + * struct p2p_lo_event + * @vdev_id: vdev id + * @reason_code: reason code + */ +struct p2p_lo_event { + uint32_t vdev_id; + uint32_t reason_code; +}; + +/** + * struct noa_descriptor - noa descriptor + * @type_count: 255: continuous schedule, 0: reserved + * @duration: Absent period duration in micro seconds + * @interval: Absent period interval in micro seconds + * @start_time: 32 bit tsf time when in starts + */ +struct noa_descriptor { + uint32_t type_count; + uint32_t duration; + uint32_t interval; + uint32_t start_time; +}; + +/** + * struct p2p_noa_info - p2p noa information + * @index: identifies instance of NOA su element + * @opps_ps: opps ps state of the AP + * @ct_window: ct window in TUs + * @vdev_id: vdev id + * @num_descriptors: number of NOA descriptors + * @noa_desc: noa descriptors + */ +struct p2p_noa_info { + uint32_t index; + uint32_t opps_ps; + uint32_t ct_window; + uint32_t vdev_id; + uint32_t num_desc; + struct noa_descriptor noa_desc[P2P_MAX_NOA_DESC]; +}; + +/** + * struct p2p_protocol_callbacks - callback to non-converged driver + * @is_mgmt_protected: func to get 11w mgmt protection status + */ +struct p2p_protocol_callbacks { + bool (*is_mgmt_protected)(uint32_t vdev_id, const uint8_t *peer_addr); +}; + +#endif /* _WLAN_P2P_PUBLIC_STRUCT_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/p2p/dispatcher/inc/wlan_p2p_tgt_api.h b/drivers/staging/qca-wifi-host-cmn/umac/p2p/dispatcher/inc/wlan_p2p_tgt_api.h new file mode 100644 index 0000000000000000000000000000000000000000..74b79bd8c04518a5787d3f28c810468d94ba5cc9 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/p2p/dispatcher/inc/wlan_p2p_tgt_api.h @@ -0,0 +1,190 @@ +/* + * Copyright (c) 2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: Contains p2p south bound interface definitions + */ + +#ifndef _WLAN_P2P_TGT_API_H_ +#define _WLAN_P2P_TGT_API_H_ + +#include +#include + +struct scan_event; +struct wlan_objmgr_psoc; +struct wlan_objmgr_peer; +struct p2p_noa_info; +struct p2p_lo_event; +struct mgmt_rx_event_params; +enum mgmt_frame_type; + +/** + * tgt_p2p_register_lo_ev_handler() - register lo event + * @psoc: soc object + * + * p2p tgt api to register listen offload event handler. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS tgt_p2p_register_lo_ev_handler( + struct wlan_objmgr_psoc *psoc); + +/** + * tgt_p2p_register_noa_ev_handler() - register noa event + * @psoc: soc object + * + * p2p tgt api to register noa event handler. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS tgt_p2p_register_noa_ev_handler( + struct wlan_objmgr_psoc *psoc); + +/** + * tgt_p2p_register_macaddr_rx_filter_evt_handler() - register add mac rx + * filter status event + * @psoc: soc object + * @register: register or unregister + * + * p2p tgt api to register add mac rx filter status event + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS tgt_p2p_register_macaddr_rx_filter_evt_handler( + struct wlan_objmgr_psoc *psoc, bool register); + +/** + * tgt_p2p_unregister_lo_ev_handler() - unregister lo event + * @psoc: soc object + * + * p2p tgt api to unregister listen offload event handler. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS tgt_p2p_unregister_lo_ev_handler( + struct wlan_objmgr_psoc *psoc); + +/** + * tgt_p2p_unregister_noa_ev_handler() - unregister noa event + * @psoc: soc object + * + * p2p tgt api to unregister noa event handler. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS tgt_p2p_unregister_noa_ev_handler( + struct wlan_objmgr_psoc *psoc); + +/** + * tgt_p2p_scan_event_cb() - Callback for scan event + * @vdev: vdev object + * @event: event information + * @arg: registered arguments + * + * This function gets called from scan component when getting P2P + * scan event. + * + * Return: None + */ +void tgt_p2p_scan_event_cb(struct wlan_objmgr_vdev *vdev, + struct scan_event *event, void *arg); + +/** + * tgt_p2p_mgmt_download_comp_cb() - Callback for mgmt frame tx + * complete + * @context: tx context + * @buf: buffer address + * @free: need to free or not + * + * This function gets called from mgmt tx/rx component when mgmt + * frame tx complete. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS tgt_p2p_mgmt_download_comp_cb(void *context, + qdf_nbuf_t buf, bool free); + +/** + * tgt_p2p_mgmt_ota_comp_cb() - Callback for mgmt frame tx ack + * @context: tx context + * @buf: buffer address + * @status: tx status + * @tx_compl_params: tx complete parameters + * + * This function gets called from mgmt tx/rx component when getting + * mgmt frame tx ack. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS tgt_p2p_mgmt_ota_comp_cb(void *context, qdf_nbuf_t buf, + uint32_t status, void *tx_compl_params); + +/** + * tgt_p2p_mgmt_frame_rx_cb() - Callback for rx mgmt frame + * @psoc: soc context + * @peer: peer context + * @buf: rx buffer + * @mgmt_rx_params: mgmt rx parameters + * @frm_type: frame type + * + * This function gets called from mgmt tx/rx component when rx mgmt + * frame. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS tgt_p2p_mgmt_frame_rx_cb(struct wlan_objmgr_psoc *psoc, + struct wlan_objmgr_peer *peer, qdf_nbuf_t buf, + struct mgmt_rx_event_params *mgmt_rx_params, + enum mgmt_frame_type frm_type); +/** + * tgt_p2p_noa_event_cb() - Callback for noa event + * @psoc: soc object + * @event_info: noa event information + * + * This function gets called from target interface. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS tgt_p2p_noa_event_cb(struct wlan_objmgr_psoc *psoc, + struct p2p_noa_info *event_info); + +/** + * tgt_p2p_lo_event_cb() - Listen offload stop request + * @psoc: soc object + * @event_info: lo stop event buffer + * + * This function gets called from target interface. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS tgt_p2p_lo_event_cb(struct wlan_objmgr_psoc *psoc, + struct p2p_lo_event *event_info); + +/** + * tgt_p2p_add_mac_addr_status_event_cb() - Callback for set mac addr filter evt + * @psoc: soc object + * @event_info: event information type of p2p_set_mac_filter_evt + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS +tgt_p2p_add_mac_addr_status_event_cb( + struct wlan_objmgr_psoc *psoc, + struct p2p_set_mac_filter_evt *event_info); +#endif /* _WLAN_P2P_TGT_API_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/p2p/dispatcher/inc/wlan_p2p_ucfg_api.h b/drivers/staging/qca-wifi-host-cmn/umac/p2p/dispatcher/inc/wlan_p2p_ucfg_api.h new file mode 100644 index 0000000000000000000000000000000000000000..60f5b79a8159b1e083f9efdf8aff8bf747191aea --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/p2p/dispatcher/inc/wlan_p2p_ucfg_api.h @@ -0,0 +1,407 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: Contains p2p north bound interface definitions + */ + +#ifndef _WLAN_P2P_UCFG_API_H_ +#define _WLAN_P2P_UCFG_API_H_ + +#include + +struct wlan_objmgr_psoc; +struct p2p_roc_req; +struct p2p_event; +struct p2p_rx_mgmt_frame; +struct p2p_tx_cnf; +struct p2p_mgmt_tx; +struct p2p_ps_config; +struct p2p_lo_start; +struct p2p_lo_event; + +/** + * p2p_rx_callback() - Callback for rx mgmt frame + * @user_data: user data associated to this rx mgmt frame. + * @rx_frame: RX mgmt frame + * + * This callback will be used to give rx frames to hdd. + * + * Return: None + */ +typedef void (*p2p_rx_callback)(void *user_data, + struct p2p_rx_mgmt_frame *rx_frame); + +/** + * p2p_action_tx_cnf_callback() - Callback for tx confirmation + * @user_data: user data associated to this tx confirmation + * @tx_cnf: tx confirmation information + * + * This callback will be used to give tx mgmt frame confirmation to + * hdd. + * + * Return: None + */ +typedef void (*p2p_action_tx_cnf_callback)(void *user_data, + struct p2p_tx_cnf *tx_cnf); + +/** + * p2p_lo_event_callback() - Callback for listen offload event + * @user_data: user data associated to this lo event + * @p2p_lo_event: listen offload event information + * + * This callback will be used to give listen offload event to hdd. + * + * Return: None + */ +typedef void (*p2p_lo_event_callback)(void *user_data, + struct p2p_lo_event *p2p_lo_event); + +/** + * p2p_event_callback() - Callback for P2P event + * @user_data: user data associated to this p2p event + * @p2p_event: p2p event information + * + * This callback will be used to give p2p event to hdd. + * + * Return: None + */ +typedef void (*p2p_event_callback)(void *user_data, + struct p2p_event *p2p_event); + +/** + * struct p2p_start_param - p2p soc start parameters. Below callbacks + * will be registered by the HDD + * @rx_callback: Function pointer to hdd rx callback. This + * function will be used to give rx frames to hdd + * @rx_cb_data: RX callback user data + * @event_cb: Founction pointer to hdd p2p event callback. + * This function will be used to give p2p event + * to hdd + * @event_cb_data: Pointer to p2p event callback user data + * @tx_cnf_cb: Function pointer to hdd tx confirm callback. + * This function will be used to give tx confirm + * to hdd + * @tx_cnf_cb_data: Pointer to p2p tx confirm callback user data + * @lo_event_cb: Founction pointer to p2p listen offload + * callback. This function will be used to give + * listen offload stopped event to hdd + * @lo_event_cb_data: Pointer to p2p listen offload callback user data + */ +struct p2p_start_param { + p2p_rx_callback rx_cb; + void *rx_cb_data; + p2p_event_callback event_cb; + void *event_cb_data; + p2p_action_tx_cnf_callback tx_cnf_cb; + void *tx_cnf_cb_data; + p2p_lo_event_callback lo_event_cb; + void *lo_event_cb_data; +}; + +/** + * ucfg_p2p_init() - P2P component initialization + * + * This function gets called when dispatcher initializing. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS ucfg_p2p_init(void); + +/** + * ucfg_p2p_deinit() - P2P component de-init + * + * This function gets called when dispatcher de-init. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS ucfg_p2p_deinit(void); + +/** + * ucfg_p2p_psoc_open() - Open P2P component + * @soc: soc context + * + * This function gets called when dispatcher opening. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS ucfg_p2p_psoc_open(struct wlan_objmgr_psoc *soc); + +/** + * ucfg_p2p_psoc_close() - Close P2P component + * @soc: soc context + * + * This function gets called when dispatcher closing. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS ucfg_p2p_psoc_close(struct wlan_objmgr_psoc *soc); + +/** + * ucfg_p2p_psoc_start() - Start P2P component + * @soc: soc context + * @req: P2P start parameters + * + * This function gets called when up layer starting up. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS ucfg_p2p_psoc_start(struct wlan_objmgr_psoc *soc, + struct p2p_start_param *req); + +/** + * ucfg_p2p_psoc_stop() - Stop P2P component + * @soc: soc context + * + * This function gets called when up layer exit. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS ucfg_p2p_psoc_stop(struct wlan_objmgr_psoc *soc); + +/** + * ucfg_p2p_roc_req() - Roc request + * @soc: soc context + * @roc_req: Roc request parameters + * @cookie: return cookie to caller + * + * This function delivers roc request to P2P component. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS ucfg_p2p_roc_req(struct wlan_objmgr_psoc *soc, + struct p2p_roc_req *roc_req, uint64_t *cookie); + +/** + * ucfg_p2p_roc_cancel_req() - Cancel roc request + * @soc: soc context + * @cookie: Find out the roc request by cookie + * + * This function delivers cancel roc request to P2P component. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS ucfg_p2p_roc_cancel_req(struct wlan_objmgr_psoc *soc, + uint64_t cookie); + +/** + * ucfg_p2p_cleanup_roc_by_vdev() - Cleanup roc request by vdev + * @vdev: pointer to vdev object + * + * This function call P2P API to cleanup roc request by vdev + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS ucfg_p2p_cleanup_roc_by_vdev(struct wlan_objmgr_vdev *vdev); + +/** + * ucfg_p2p_cleanup_roc_by_poc() - Cleanup roc request by psoc + * @psoc: pointer to psoc object + * + * This function call P2P API to cleanup roc request by psoc + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS ucfg_p2p_cleanup_roc_by_psoc(struct wlan_objmgr_psoc *psoc); + +/** + * ucfg_p2p_cleanup_tx_by_vdev() - Cleanup tx request by vdev + * @vdev: pointer to vdev object + * + * This function call P2P API to cleanup tx action frame request by vdev + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS ucfg_p2p_cleanup_tx_by_vdev(struct wlan_objmgr_vdev *vdev); + +/** + * ucfg_p2p_cleanup_tx_by_poc() - Cleanup tx request by psoc + * @psoc: pointer to psoc object + * + * This function call P2P API to cleanup tx action frame request by psoc + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS ucfg_p2p_cleanup_tx_by_psoc(struct wlan_objmgr_psoc *psoc); + +/** + * ucfg_p2p_mgmt_tx() - Mgmt frame tx request + * @soc: soc context + * @mgmt_frm: TX mgmt frame parameters + * @cookie: Return the cookie to caller + * + * This function delivers mgmt frame tx request to P2P component. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS ucfg_p2p_mgmt_tx(struct wlan_objmgr_psoc *soc, + struct p2p_mgmt_tx *mgmt_frm, uint64_t *cookie); + +/** + * ucfg_p2p_mgmt_tx_cancel() - Cancel mgmt frame tx request + * @soc: soc context + * @vdev: vdev object + * @cookie: Find out the mgmt tx request by cookie + * + * This function delivers cancel mgmt frame tx request request to P2P + * component. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS ucfg_p2p_mgmt_tx_cancel(struct wlan_objmgr_psoc *soc, + struct wlan_objmgr_vdev *vdev, uint64_t cookie); + +/** + * ucfg_p2p_set_ps() - P2P set power save + * @soc: soc context + * @ps_config: power save configure + * + * This function delivers p2p power save request to P2P component. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS ucfg_p2p_set_ps(struct wlan_objmgr_psoc *soc, + struct p2p_ps_config *ps_config); + +/** + * ucfg_p2p_lo_start() - Listen offload start request + * @soc: soc context + * @p2p_lo_start: lo start parameters + * + * This function delivers listen offload start request to P2P + * component. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS ucfg_p2p_lo_start(struct wlan_objmgr_psoc *soc, + struct p2p_lo_start *p2p_lo_start); + +/** + * ucfg_p2p_lo_stop() - Listen offload stop request + * @soc: soc context + * @vdev_id: vdev id + * + * This function delivers listen offload stop request to P2P component. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS ucfg_p2p_lo_stop(struct wlan_objmgr_psoc *soc, + uint32_t vdev_id); + +/** + * p2p_peer_authorized() - Process peer authorized event + * @vdev: vdev structure to which peer is associated + * @mac_addr: peer mac address + * + * This function handles disables noa whenever a legacy station + * complete 4-way handshake after association. + * + * Return: void + */ +void p2p_peer_authorized(struct wlan_objmgr_vdev *vdev, uint8_t *mac_addr); + +/** + * ucfg_p2p_set_noa() - Disable/Enable NOA + * @soc: soc context + * @vdev_id: vdev id + * @disable_noa: TRUE - Disable NoA, FALSE - Enable NoA + * + * This function send wmi command to enable / disable NoA. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS ucfg_p2p_set_noa(struct wlan_objmgr_psoc *soc, + uint32_t vdev_id, bool disable_noa); + +/** + * ucfg_p2p_check_random_mac() - check random mac addr or not + * @soc: soc context + * @vdev_id: vdev id + * @random_mac_addr: mac addr to be checked + * + * This function check the input addr is random mac addr or not for vdev. + * + * Return: true if addr is random mac address else false. + */ +bool ucfg_p2p_check_random_mac(struct wlan_objmgr_psoc *soc, uint32_t vdev_id, + uint8_t *random_mac_addr); + +/** + * ucfg_p2p_register_callbacks() - register p2p callbacks + * @soc: soc context + * @cb_obj: p2p_protocol_callbacks struct + * + * This function registers lim callbacks to p2p components to provide + * protocol information. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS ucfg_p2p_register_callbacks(struct wlan_objmgr_psoc *soc, + struct p2p_protocol_callbacks *cb_obj); + +/** + * ucfg_p2p_status_scan() - Show P2P connection status when scanning + * @vdev: vdev context + * + * This function shows P2P connection status when scanning. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS ucfg_p2p_status_scan(struct wlan_objmgr_vdev *vdev); + +/** + * ucfg_p2p_status_connect() - Update P2P connection status + * @vdev: vdev context + * + * Updates P2P connection status by up layer when connecting. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS ucfg_p2p_status_connect(struct wlan_objmgr_vdev *vdev); + +/** + * ucfg_p2p_status_disconnect() - Update P2P connection status + * @vdev: vdev context + * + * Updates P2P connection status by up layer when disconnecting. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS ucfg_p2p_status_disconnect(struct wlan_objmgr_vdev *vdev); + +/** + * ucfg_p2p_status_start_bss() - Update P2P connection status + * @vdev: vdev context + * + * Updates P2P connection status by up layer when starting bss. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS ucfg_p2p_status_start_bss(struct wlan_objmgr_vdev *vdev); + +/** + * ucfg_p2p_status_stop_bss() - Update P2P connection status + * @vdev: vdev context + * + * Updates P2P connection status by up layer when stopping bss. + * + * Return: QDF_STATUS_SUCCESS - in case of success + */ +QDF_STATUS ucfg_p2p_status_stop_bss(struct wlan_objmgr_vdev *vdev); + +#endif /* _WLAN_P2P_UCFG_API_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/p2p/dispatcher/src/wlan_p2p_tgt_api.c b/drivers/staging/qca-wifi-host-cmn/umac/p2p/dispatcher/src/wlan_p2p_tgt_api.c new file mode 100644 index 0000000000000000000000000000000000000000..d1a530ae52aef8e0cb9f6e4bd36e598c10834b34 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/p2p/dispatcher/src/wlan_p2p_tgt_api.c @@ -0,0 +1,427 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: This file contains p2p south bound interface definitions + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include "wlan_p2p_tgt_api.h" +#include "wlan_p2p_public_struct.h" +#include "../../core/src/wlan_p2p_main.h" +#include "../../core/src/wlan_p2p_roc.h" +#include "../../core/src/wlan_p2p_off_chan_tx.h" + +#define IEEE80211_FC0_TYPE_MASK 0x0c +#define P2P_NOISE_FLOOR_DBM_DEFAULT (-96) + +static inline struct wlan_lmac_if_p2p_tx_ops * +wlan_psoc_get_p2p_tx_ops(struct wlan_objmgr_psoc *psoc) +{ + return &(psoc->soc_cb.tx_ops.p2p); +} + +QDF_STATUS tgt_p2p_register_lo_ev_handler( + struct wlan_objmgr_psoc *psoc) +{ + struct wlan_lmac_if_p2p_tx_ops *p2p_ops; + QDF_STATUS status = QDF_STATUS_E_FAILURE; + + p2p_ops = wlan_psoc_get_p2p_tx_ops(psoc); + if (p2p_ops && p2p_ops->reg_lo_ev_handler) { + status = p2p_ops->reg_lo_ev_handler(psoc, NULL); + p2p_debug("register lo event, status:%d", status); + } + + return status; +} + +QDF_STATUS tgt_p2p_register_noa_ev_handler( + struct wlan_objmgr_psoc *psoc) +{ + struct wlan_lmac_if_p2p_tx_ops *p2p_ops; + QDF_STATUS status = QDF_STATUS_E_FAILURE; + + p2p_ops = wlan_psoc_get_p2p_tx_ops(psoc); + if (p2p_ops && p2p_ops->reg_noa_ev_handler) { + status = p2p_ops->reg_noa_ev_handler(psoc, NULL); + p2p_debug("register noa event, status:%d", status); + } + + return status; +} + +QDF_STATUS +tgt_p2p_add_mac_addr_status_event_cb(struct wlan_objmgr_psoc *psoc, + struct p2p_set_mac_filter_evt *event_info) +{ + struct p2p_mac_filter_rsp *mac_filter_rsp; + struct scheduler_msg msg = {0}; + struct p2p_soc_priv_obj *p2p_soc_obj; + QDF_STATUS status; + + if (!psoc) { + p2p_err("random_mac:psoc context passed is NULL"); + return QDF_STATUS_E_INVAL; + } + if (!event_info) { + p2p_err("random_mac:invalid event_info"); + return QDF_STATUS_E_INVAL; + } + + p2p_soc_obj = wlan_objmgr_psoc_get_comp_private_obj( + psoc, WLAN_UMAC_COMP_P2P); + if (!p2p_soc_obj) { + p2p_err("random_mac:p2p soc object is NULL"); + return QDF_STATUS_E_INVAL; + } + + mac_filter_rsp = qdf_mem_malloc(sizeof(*mac_filter_rsp)); + if (!mac_filter_rsp) { + p2p_err("random_mac:Failed to allocate mac_filter_rsp"); + return QDF_STATUS_E_NOMEM; + } + + mac_filter_rsp->p2p_soc_obj = p2p_soc_obj; + mac_filter_rsp->vdev_id = event_info->vdev_id; + mac_filter_rsp->status = event_info->status; + + msg.type = P2P_EVENT_ADD_MAC_RSP; + msg.bodyptr = mac_filter_rsp; + msg.callback = p2p_process_evt; + status = scheduler_post_msg(QDF_MODULE_ID_TARGET_IF, &msg); + if (status != QDF_STATUS_SUCCESS) + qdf_mem_free(mac_filter_rsp); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS tgt_p2p_register_macaddr_rx_filter_evt_handler( + struct wlan_objmgr_psoc *psoc, bool reg) +{ + struct wlan_lmac_if_p2p_tx_ops *p2p_ops; + QDF_STATUS status = QDF_STATUS_E_FAILURE; + + p2p_ops = wlan_psoc_get_p2p_tx_ops(psoc); + if (p2p_ops && p2p_ops->reg_mac_addr_rx_filter_handler) { + status = p2p_ops->reg_mac_addr_rx_filter_handler(psoc, reg); + p2p_debug("register mac addr rx filter event, register %d status:%d", + reg, status); + } + + return status; +} + +QDF_STATUS tgt_p2p_unregister_lo_ev_handler( + struct wlan_objmgr_psoc *psoc) +{ + struct wlan_lmac_if_p2p_tx_ops *p2p_ops; + QDF_STATUS status = QDF_STATUS_E_FAILURE; + + p2p_ops = wlan_psoc_get_p2p_tx_ops(psoc); + if (p2p_ops && p2p_ops->unreg_lo_ev_handler) { + status = p2p_ops->unreg_lo_ev_handler(psoc, NULL); + p2p_debug("unregister lo event, status:%d", status); + } + + return status; +} + +QDF_STATUS tgt_p2p_unregister_noa_ev_handler( + struct wlan_objmgr_psoc *psoc) +{ + struct wlan_lmac_if_p2p_tx_ops *p2p_ops; + QDF_STATUS status = QDF_STATUS_E_FAILURE; + + p2p_ops = wlan_psoc_get_p2p_tx_ops(psoc); + if (p2p_ops && p2p_ops->unreg_noa_ev_handler) { + status = p2p_ops->unreg_noa_ev_handler(psoc, NULL); + p2p_debug("unregister noa event, status:%d", status); + } + + return status; +} + +void tgt_p2p_scan_event_cb(struct wlan_objmgr_vdev *vdev, + struct scan_event *event, void *arg) +{ + p2p_scan_event_cb(vdev, event, arg); +} + +QDF_STATUS tgt_p2p_mgmt_download_comp_cb(void *context, + qdf_nbuf_t buf, bool free) +{ + p2p_debug("conext:%pK, buf:%pK, free:%d", context, + qdf_nbuf_data(buf), free); + + qdf_nbuf_free(buf); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS tgt_p2p_mgmt_ota_comp_cb(void *context, qdf_nbuf_t buf, + uint32_t status, void *tx_compl_params) +{ + struct p2p_tx_conf_event *tx_conf_event; + struct scheduler_msg msg = {0}; + QDF_STATUS ret; + + p2p_debug("context:%pK, buf:%pK, status:%d, tx complete params:%pK", + context, buf, status, tx_compl_params); + + if (!context) { + p2p_err("invalid context"); + qdf_nbuf_free(buf); + return QDF_STATUS_E_INVAL; + } + + tx_conf_event = qdf_mem_malloc(sizeof(*tx_conf_event)); + if (!tx_conf_event) { + p2p_err("Failed to allocate tx cnf event"); + qdf_nbuf_free(buf); + return QDF_STATUS_E_NOMEM; + } + + tx_conf_event->status = status; + tx_conf_event->nbuf = buf; + tx_conf_event->p2p_soc_obj = (struct p2p_soc_priv_obj *)context; + msg.type = P2P_EVENT_MGMT_TX_ACK_CNF; + msg.bodyptr = tx_conf_event; + msg.callback = p2p_process_evt; + msg.flush_callback = p2p_event_flush_callback; + ret = scheduler_post_message(QDF_MODULE_ID_P2P, + QDF_MODULE_ID_P2P, + QDF_MODULE_ID_TARGET_IF, + &msg); + if (QDF_IS_STATUS_ERROR(ret)) { + qdf_mem_free(tx_conf_event); + qdf_nbuf_free(buf); + p2p_err("post msg fail:%d", status); + } + + return ret; +} + +QDF_STATUS tgt_p2p_mgmt_frame_rx_cb(struct wlan_objmgr_psoc *psoc, + struct wlan_objmgr_peer *peer, qdf_nbuf_t buf, + struct mgmt_rx_event_params *mgmt_rx_params, + enum mgmt_frame_type frm_type) +{ + struct p2p_rx_mgmt_frame *rx_mgmt; + struct p2p_rx_mgmt_event *rx_mgmt_event; + struct p2p_soc_priv_obj *p2p_soc_obj; + struct scheduler_msg msg = {0}; + struct wlan_objmgr_vdev *vdev; + uint32_t vdev_id; + uint8_t *pdata; + QDF_STATUS status; + + p2p_debug("psoc:%pK, peer:%pK, type:%d", psoc, peer, frm_type); + + if (!mgmt_rx_params) { + p2p_err("mgmt rx params is NULL"); + qdf_nbuf_free(buf); + return QDF_STATUS_E_INVAL; + } + + p2p_soc_obj = wlan_objmgr_psoc_get_comp_private_obj(psoc, + WLAN_UMAC_COMP_P2P); + if (!p2p_soc_obj) { + p2p_err("p2p ctx is NULL, drop this frame"); + qdf_nbuf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + if (!peer) { + if (p2p_soc_obj->cur_roc_vdev_id == P2P_INVALID_VDEV_ID) { + p2p_err("vdev id of current roc invalid"); + qdf_nbuf_free(buf); + return QDF_STATUS_E_FAILURE; + } else { + vdev_id = p2p_soc_obj->cur_roc_vdev_id; + } + } else { + vdev = wlan_peer_get_vdev(peer); + if (!vdev) { + p2p_err("vdev is NULL in peer, drop this frame"); + qdf_nbuf_free(buf); + return QDF_STATUS_E_FAILURE; + } + vdev_id = wlan_vdev_get_id(vdev); + } + + rx_mgmt_event = qdf_mem_malloc_atomic(sizeof(*rx_mgmt_event)); + if (!rx_mgmt_event) { + p2p_debug_rl("Failed to allocate rx mgmt event"); + qdf_nbuf_free(buf); + return QDF_STATUS_E_NOMEM; + } + + rx_mgmt = qdf_mem_malloc_atomic(sizeof(*rx_mgmt) + + mgmt_rx_params->buf_len); + if (!rx_mgmt) { + p2p_debug_rl("Failed to allocate rx mgmt frame"); + qdf_nbuf_free(buf); + return QDF_STATUS_E_NOMEM; + } + + pdata = (uint8_t *)qdf_nbuf_data(buf); + rx_mgmt->frame_len = mgmt_rx_params->buf_len; + rx_mgmt->rx_chan = mgmt_rx_params->channel; + rx_mgmt->vdev_id = vdev_id; + rx_mgmt->frm_type = frm_type; + rx_mgmt->rx_rssi = mgmt_rx_params->snr + + P2P_NOISE_FLOOR_DBM_DEFAULT; + rx_mgmt_event->rx_mgmt = rx_mgmt; + rx_mgmt_event->p2p_soc_obj = p2p_soc_obj; + qdf_mem_copy(rx_mgmt->buf, pdata, mgmt_rx_params->buf_len); + msg.type = P2P_EVENT_RX_MGMT; + msg.bodyptr = rx_mgmt_event; + msg.callback = p2p_process_evt; + msg.flush_callback = p2p_event_flush_callback; + status = scheduler_post_message(QDF_MODULE_ID_P2P, + QDF_MODULE_ID_P2P, + QDF_MODULE_ID_TARGET_IF, + &msg); + if (QDF_IS_STATUS_ERROR(status)) { + qdf_mem_free(rx_mgmt_event->rx_mgmt); + qdf_mem_free(rx_mgmt_event); + p2p_err("post msg fail:%d", status); + } + qdf_nbuf_free(buf); + + return status; +} + +QDF_STATUS tgt_p2p_noa_event_cb(struct wlan_objmgr_psoc *psoc, + struct p2p_noa_info *event_info) +{ + struct p2p_noa_event *noa_event; + struct scheduler_msg msg = {0}; + struct p2p_soc_priv_obj *p2p_soc_obj; + QDF_STATUS status; + + p2p_debug("soc:%pK, event_info:%pK", psoc, event_info); + + if (!psoc) { + p2p_err("psoc context passed is NULL"); + if (event_info) + qdf_mem_free(event_info); + return QDF_STATUS_E_INVAL; + } + + p2p_soc_obj = wlan_objmgr_psoc_get_comp_private_obj(psoc, + WLAN_UMAC_COMP_P2P); + if (!p2p_soc_obj) { + p2p_err("p2p soc object is NULL"); + if (event_info) + qdf_mem_free(event_info); + return QDF_STATUS_E_INVAL; + } + + if (!event_info) { + p2p_err("invalid noa event information"); + return QDF_STATUS_E_INVAL; + } + + noa_event = qdf_mem_malloc(sizeof(*noa_event)); + if (!noa_event) { + p2p_err("Failed to allocate p2p noa event"); + qdf_mem_free(event_info); + return QDF_STATUS_E_NOMEM; + } + + noa_event->p2p_soc_obj = p2p_soc_obj; + noa_event->noa_info = event_info; + msg.type = P2P_EVENT_NOA; + msg.bodyptr = noa_event; + msg.callback = p2p_process_evt; + msg.flush_callback = p2p_event_flush_callback; + status = scheduler_post_message(QDF_MODULE_ID_P2P, + QDF_MODULE_ID_P2P, + QDF_MODULE_ID_TARGET_IF, + &msg); + if (QDF_IS_STATUS_ERROR(status)) { + qdf_mem_free(noa_event->noa_info); + qdf_mem_free(noa_event); + p2p_err("post msg fail:%d", status); + } + + return status; +} + +QDF_STATUS tgt_p2p_lo_event_cb(struct wlan_objmgr_psoc *psoc, + struct p2p_lo_event *event_info) +{ + struct p2p_lo_stop_event *lo_stop_event; + struct scheduler_msg msg = {0}; + struct p2p_soc_priv_obj *p2p_soc_obj; + QDF_STATUS status; + + p2p_debug("soc:%pK, event_info:%pK", psoc, event_info); + + if (!psoc) { + p2p_err("psoc context passed is NULL"); + if (event_info) + qdf_mem_free(event_info); + return QDF_STATUS_E_INVAL; + } + + p2p_soc_obj = wlan_objmgr_psoc_get_comp_private_obj(psoc, + WLAN_UMAC_COMP_P2P); + if (!p2p_soc_obj) { + p2p_err("p2p soc object is NULL"); + if (event_info) + qdf_mem_free(event_info); + return QDF_STATUS_E_INVAL; + } + + if (!event_info) { + p2p_err("invalid lo stop event information"); + return QDF_STATUS_E_INVAL; + } + + lo_stop_event = qdf_mem_malloc(sizeof(*lo_stop_event)); + if (!lo_stop_event) { + p2p_err("Failed to allocate p2p lo stop event"); + qdf_mem_free(event_info); + return QDF_STATUS_E_NOMEM; + } + + lo_stop_event->p2p_soc_obj = p2p_soc_obj; + lo_stop_event->lo_event = event_info; + msg.type = P2P_EVENT_LO_STOPPED; + msg.bodyptr = lo_stop_event; + msg.callback = p2p_process_evt; + msg.flush_callback = p2p_event_flush_callback; + status = scheduler_post_msg(QDF_MODULE_ID_TARGET_IF, &msg); + if (QDF_IS_STATUS_ERROR(status)) { + qdf_mem_free(lo_stop_event->lo_event); + qdf_mem_free(lo_stop_event); + p2p_err("post msg fail:%d", status); + } + + return status; +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/p2p/dispatcher/src/wlan_p2p_ucfg_api.c b/drivers/staging/qca-wifi-host-cmn/umac/p2p/dispatcher/src/wlan_p2p_ucfg_api.c new file mode 100644 index 0000000000000000000000000000000000000000..2cefa13dd5a6592cf4726acf07c4a8ccc235aef1 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/p2p/dispatcher/src/wlan_p2p_ucfg_api.c @@ -0,0 +1,650 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: This file contains p2p north bound interface definitions + */ + +#include +#include +#include +#include +#include "wlan_p2p_public_struct.h" +#include "wlan_p2p_ucfg_api.h" +#include "../../core/src/wlan_p2p_main.h" +#include "../../core/src/wlan_p2p_roc.h" +#include "../../core/src/wlan_p2p_off_chan_tx.h" + +static inline struct wlan_lmac_if_p2p_tx_ops * +ucfg_p2p_psoc_get_tx_ops(struct wlan_objmgr_psoc *psoc) +{ + return &(psoc->soc_cb.tx_ops.p2p); +} + +/** + * is_p2p_ps_allowed() - If P2P power save is allowed or not + * @vdev: vdev object + * @id: umac component id + * + * This function returns TRUE if P2P power-save is allowed + * else returns FALSE. + * + * Return: bool + */ +static bool is_p2p_ps_allowed(struct wlan_objmgr_vdev *vdev, + enum wlan_umac_comp_id id) +{ + struct p2p_vdev_priv_obj *p2p_vdev_obj; + uint8_t is_p2pgo = 0; + + if (!vdev) { + p2p_err("vdev:%pK", vdev); + return true; + } + p2p_vdev_obj = wlan_objmgr_vdev_get_comp_private_obj(vdev, + WLAN_UMAC_COMP_P2P); + + if (wlan_vdev_mlme_get_opmode(vdev) == QDF_P2P_GO_MODE) + is_p2pgo = 1; + + if (!p2p_vdev_obj || !is_p2pgo) { + p2p_err("p2p_vdev_obj:%pK is_p2pgo:%u", + p2p_vdev_obj, is_p2pgo); + return false; + } + if (p2p_vdev_obj->non_p2p_peer_count && + p2p_vdev_obj->noa_status == false) { + p2p_debug("non_p2p_peer_count: %u, noa_status: %d", + p2p_vdev_obj->non_p2p_peer_count, + p2p_vdev_obj->noa_status); + return false; + } + + return true; +} + +QDF_STATUS ucfg_p2p_init(void) +{ + return p2p_component_init(); +} + +QDF_STATUS ucfg_p2p_deinit(void) +{ + return p2p_component_deinit(); +} + +QDF_STATUS ucfg_p2p_psoc_open(struct wlan_objmgr_psoc *soc) +{ + return p2p_psoc_object_open(soc); +} + +QDF_STATUS ucfg_p2p_psoc_close(struct wlan_objmgr_psoc *soc) +{ + return p2p_psoc_object_close(soc); +} + +QDF_STATUS ucfg_p2p_psoc_start(struct wlan_objmgr_psoc *soc, + struct p2p_start_param *req) +{ + return p2p_psoc_start(soc, req); +} + +QDF_STATUS ucfg_p2p_psoc_stop(struct wlan_objmgr_psoc *soc) +{ + return p2p_psoc_stop(soc); +} + +QDF_STATUS ucfg_p2p_roc_req(struct wlan_objmgr_psoc *soc, + struct p2p_roc_req *roc_req, uint64_t *cookie) +{ + struct scheduler_msg msg = {0}; + struct p2p_soc_priv_obj *p2p_soc_obj; + struct p2p_roc_context *roc_ctx; + QDF_STATUS status; + int32_t id; + + p2p_debug("soc:%pK, vdev_id:%d, chan:%d, phy_mode:%d, duration:%d", + soc, roc_req->vdev_id, roc_req->chan, + roc_req->phy_mode, roc_req->duration); + + if (!soc) { + p2p_err("psoc context passed is NULL"); + return QDF_STATUS_E_INVAL; + } + + p2p_soc_obj = wlan_objmgr_psoc_get_comp_private_obj(soc, + WLAN_UMAC_COMP_P2P); + if (!p2p_soc_obj) { + p2p_err("P2P soc object is NULL"); + return QDF_STATUS_E_FAILURE; + } + + roc_ctx = qdf_mem_malloc(sizeof(*roc_ctx)); + if (!roc_ctx) { + p2p_err("failed to allocate p2p roc context"); + return QDF_STATUS_E_NOMEM; + } + + status = qdf_idr_alloc(&p2p_soc_obj->p2p_idr, roc_ctx, &id); + if (QDF_IS_STATUS_ERROR(status)) { + qdf_mem_free(roc_ctx); + p2p_err("failed to alloc idr, status %d", status); + return status; + } + + *cookie = (uint64_t)id; + roc_ctx->p2p_soc_obj = p2p_soc_obj; + roc_ctx->vdev_id = roc_req->vdev_id; + roc_ctx->chan = roc_req->chan; + roc_ctx->phy_mode = roc_req->phy_mode; + roc_ctx->duration = roc_req->duration; + roc_ctx->roc_state = ROC_STATE_IDLE; + roc_ctx->roc_type = USER_REQUESTED; + roc_ctx->id = id; + msg.type = P2P_ROC_REQ; + msg.bodyptr = roc_ctx; + msg.callback = p2p_process_cmd; + status = scheduler_post_message(QDF_MODULE_ID_HDD, + QDF_MODULE_ID_P2P, + QDF_MODULE_ID_OS_IF, + &msg); + if (QDF_IS_STATUS_ERROR(status)) { + qdf_mem_free(roc_ctx); + qdf_idr_remove(&p2p_soc_obj->p2p_idr, id); + p2p_err("post msg fail:%d", status); + } + p2p_debug("cookie = 0x%llx", *cookie); + + return status; +} + +QDF_STATUS ucfg_p2p_roc_cancel_req(struct wlan_objmgr_psoc *soc, + uint64_t cookie) +{ + struct scheduler_msg msg = {0}; + struct p2p_soc_priv_obj *p2p_soc_obj; + struct cancel_roc_context *cancel_roc; + void *roc_ctx = NULL; + QDF_STATUS status; + + p2p_debug("soc:%pK, cookie:0x%llx", soc, cookie); + + if (!soc) { + p2p_err("psoc context passed is NULL"); + return QDF_STATUS_E_INVAL; + } + + p2p_soc_obj = wlan_objmgr_psoc_get_comp_private_obj(soc, + WLAN_UMAC_COMP_P2P); + if (!p2p_soc_obj) { + p2p_err("p2p soc context is NULL"); + return QDF_STATUS_E_FAILURE; + } + + status = qdf_idr_find(&p2p_soc_obj->p2p_idr, + cookie, &roc_ctx); + if (QDF_IS_STATUS_ERROR(status)) { + p2p_err("invalid id"); + return QDF_STATUS_E_INVAL; + } + + cancel_roc = qdf_mem_malloc(sizeof(*cancel_roc)); + if (!cancel_roc) { + p2p_err("failed to allocate cancel p2p roc"); + return QDF_STATUS_E_NOMEM; + } + + cancel_roc->p2p_soc_obj = p2p_soc_obj; + cancel_roc->cookie = (uintptr_t)roc_ctx; + msg.type = P2P_CANCEL_ROC_REQ; + msg.bodyptr = cancel_roc; + msg.callback = p2p_process_cmd; + status = scheduler_post_message(QDF_MODULE_ID_HDD, + QDF_MODULE_ID_P2P, + QDF_MODULE_ID_OS_IF, + &msg); + + if (QDF_IS_STATUS_ERROR(status)) { + qdf_mem_free(cancel_roc); + p2p_err("post msg fail:%d", status); + } + + return status; +} + +QDF_STATUS ucfg_p2p_cleanup_roc_by_vdev(struct wlan_objmgr_vdev *vdev) +{ + struct p2p_soc_priv_obj *p2p_soc_obj; + struct wlan_objmgr_psoc *psoc; + + p2p_debug("vdev:%pK", vdev); + + if (!vdev) { + p2p_err("null vdev"); + return QDF_STATUS_E_INVAL; + } + + psoc = wlan_vdev_get_psoc(vdev); + if (!psoc) { + p2p_err("null psoc"); + return QDF_STATUS_E_INVAL; + } + + p2p_soc_obj = wlan_objmgr_psoc_get_comp_private_obj(psoc, + WLAN_UMAC_COMP_P2P); + if (!p2p_soc_obj) { + p2p_err("p2p soc context is NULL"); + return QDF_STATUS_E_FAILURE; + } + + return p2p_cleanup_roc_sync(p2p_soc_obj, vdev); +} + +QDF_STATUS ucfg_p2p_cleanup_roc_by_psoc(struct wlan_objmgr_psoc *psoc) +{ + struct p2p_soc_priv_obj *obj; + + if (!psoc) { + p2p_err("null psoc"); + return QDF_STATUS_E_INVAL; + } + + obj = wlan_objmgr_psoc_get_comp_private_obj(psoc, WLAN_UMAC_COMP_P2P); + if (!obj) { + p2p_err("null p2p soc obj"); + return QDF_STATUS_E_FAILURE; + } + + return p2p_cleanup_roc_sync(obj, NULL); +} + +QDF_STATUS ucfg_p2p_cleanup_tx_by_vdev(struct wlan_objmgr_vdev *vdev) +{ + struct p2p_soc_priv_obj *obj; + struct wlan_objmgr_psoc *psoc; + + if (!vdev) { + p2p_err("null vdev"); + return QDF_STATUS_E_INVAL; + } + + psoc = wlan_vdev_get_psoc(vdev); + if (!psoc) { + p2p_err("null psoc"); + return QDF_STATUS_E_INVAL; + } + + obj = wlan_objmgr_psoc_get_comp_private_obj(psoc, WLAN_UMAC_COMP_P2P); + if (!obj) { + p2p_err("null p2p soc obj"); + return QDF_STATUS_E_FAILURE; + } + p2p_del_all_rand_mac_vdev(vdev); + + return p2p_cleanup_tx_sync(obj, vdev); +} + +QDF_STATUS ucfg_p2p_cleanup_tx_by_psoc(struct wlan_objmgr_psoc *psoc) +{ + struct p2p_soc_priv_obj *obj; + + if (!psoc) { + p2p_err("null psoc"); + return QDF_STATUS_E_INVAL; + } + + obj = wlan_objmgr_psoc_get_comp_private_obj(psoc, WLAN_UMAC_COMP_P2P); + if (!obj) { + p2p_err("null p2p soc obj"); + return QDF_STATUS_E_FAILURE; + } + p2p_del_all_rand_mac_soc(psoc); + + return p2p_cleanup_tx_sync(obj, NULL); +} + +QDF_STATUS ucfg_p2p_mgmt_tx(struct wlan_objmgr_psoc *soc, + struct p2p_mgmt_tx *mgmt_frm, uint64_t *cookie) +{ + struct scheduler_msg msg = {0}; + struct p2p_soc_priv_obj *p2p_soc_obj; + struct tx_action_context *tx_action; + QDF_STATUS status; + int32_t id; + + p2p_debug("soc:%pK, vdev_id:%d, chan:%d, wait:%d, buf_len:%d, cck:%d, no ack:%d, off chan:%d", + soc, mgmt_frm->vdev_id, mgmt_frm->chan, + mgmt_frm->wait, mgmt_frm->len, mgmt_frm->no_cck, + mgmt_frm->dont_wait_for_ack, mgmt_frm->off_chan); + + if (!soc) { + p2p_err("psoc context passed is NULL"); + return QDF_STATUS_E_INVAL; + } + + p2p_soc_obj = wlan_objmgr_psoc_get_comp_private_obj(soc, + WLAN_UMAC_COMP_P2P); + if (!p2p_soc_obj) { + p2p_err("P2P soc context is NULL"); + return QDF_STATUS_E_FAILURE; + } + + tx_action = qdf_mem_malloc(sizeof(*tx_action)); + if (!tx_action) { + p2p_err("Failed to allocate tx action context"); + return QDF_STATUS_E_NOMEM; + } + + /* return cookie just for ota ack frames */ + if (mgmt_frm->dont_wait_for_ack) + id = 0; + else { + status = qdf_idr_alloc(&p2p_soc_obj->p2p_idr, + tx_action, &id); + if (QDF_IS_STATUS_ERROR(status)) { + qdf_mem_free(tx_action); + p2p_err("failed to alloc idr, status :%d", status); + return status; + } + } + + *cookie = (uint64_t)id; + tx_action->p2p_soc_obj = p2p_soc_obj; + tx_action->vdev_id = mgmt_frm->vdev_id; + tx_action->chan = mgmt_frm->chan; + tx_action->duration = mgmt_frm->wait; + tx_action->buf_len = mgmt_frm->len; + tx_action->no_cck = mgmt_frm->no_cck; + tx_action->no_ack = mgmt_frm->dont_wait_for_ack; + tx_action->off_chan = mgmt_frm->off_chan; + tx_action->buf = qdf_mem_malloc(tx_action->buf_len); + if (!(tx_action->buf)) { + p2p_err("Failed to allocate buffer for action frame"); + qdf_mem_free(tx_action); + return QDF_STATUS_E_NOMEM; + } + qdf_mem_copy(tx_action->buf, mgmt_frm->buf, tx_action->buf_len); + tx_action->nbuf = NULL; + tx_action->id = id; + + p2p_rand_mac_tx(tx_action); + + msg.type = P2P_MGMT_TX; + msg.bodyptr = tx_action; + msg.callback = p2p_process_cmd; + msg.flush_callback = p2p_msg_flush_callback; + status = scheduler_post_message(QDF_MODULE_ID_HDD, + QDF_MODULE_ID_P2P, + QDF_MODULE_ID_OS_IF, + &msg); + if (QDF_IS_STATUS_ERROR(status)) { + if (id) + qdf_idr_remove(&p2p_soc_obj->p2p_idr, id); + qdf_mem_free(tx_action->buf); + qdf_mem_free(tx_action); + p2p_err("post msg fail:%d", status); + } + + return status; +} + +QDF_STATUS ucfg_p2p_mgmt_tx_cancel(struct wlan_objmgr_psoc *soc, + struct wlan_objmgr_vdev *vdev, uint64_t cookie) +{ + struct scheduler_msg msg = {0}; + struct p2p_soc_priv_obj *p2p_soc_obj; + struct cancel_roc_context *cancel_tx; + void *tx_ctx; + QDF_STATUS status; + + p2p_debug("soc:%pK, cookie:0x%llx", soc, cookie); + + if (!soc) { + p2p_err("psoc context passed is NULL"); + return QDF_STATUS_E_INVAL; + } + + p2p_soc_obj = wlan_objmgr_psoc_get_comp_private_obj(soc, + WLAN_UMAC_COMP_P2P); + if (!p2p_soc_obj) { + p2p_err("p2p soc context is NULL"); + return QDF_STATUS_E_FAILURE; + } + + status = qdf_idr_find(&p2p_soc_obj->p2p_idr, + (int32_t)cookie, &tx_ctx); + if (QDF_IS_STATUS_ERROR(status)) { + p2p_debug("invalid id"); + return QDF_STATUS_E_INVAL; + } + p2p_del_random_mac(soc, wlan_vdev_get_id(vdev), cookie, 20); + + cancel_tx = qdf_mem_malloc(sizeof(*cancel_tx)); + if (!cancel_tx) { + p2p_err("Failed to allocate cancel p2p roc"); + return QDF_STATUS_E_NOMEM; + } + + cancel_tx->p2p_soc_obj = p2p_soc_obj; + cancel_tx->cookie = (uintptr_t)tx_ctx; + msg.type = P2P_MGMT_TX_CANCEL; + msg.bodyptr = cancel_tx; + msg.callback = p2p_process_cmd; + status = scheduler_post_message(QDF_MODULE_ID_HDD, + QDF_MODULE_ID_P2P, + QDF_MODULE_ID_OS_IF, + &msg); + if (QDF_IS_STATUS_ERROR(status)) { + qdf_mem_free(cancel_tx); + p2p_err("post msg fail: %d", status); + } + + return status; +} + +bool ucfg_p2p_check_random_mac(struct wlan_objmgr_psoc *soc, uint32_t vdev_id, + uint8_t *random_mac_addr) +{ + return p2p_check_random_mac(soc, vdev_id, random_mac_addr); +} + +QDF_STATUS ucfg_p2p_set_ps(struct wlan_objmgr_psoc *soc, + struct p2p_ps_config *ps_config) +{ + struct wlan_lmac_if_p2p_tx_ops *p2p_ops; + QDF_STATUS status = QDF_STATUS_E_FAILURE; + uint16_t obj_id; + struct wlan_objmgr_vdev *vdev; + struct p2p_ps_config go_ps_config; + + p2p_debug("soc:%pK, vdev_id:%d, opp_ps:%d, ct_window:%d, count:%d, duration:%d, duration:%d, ps_selection:%d", + soc, ps_config->vdev_id, ps_config->opp_ps, + ps_config->ct_window, ps_config->count, + ps_config->duration, ps_config->single_noa_duration, + ps_config->ps_selection); + + if (!soc) { + p2p_err("psoc context passed is NULL"); + return QDF_STATUS_E_INVAL; + } + + for (obj_id = 0; obj_id < WLAN_UMAC_PSOC_MAX_VDEVS; obj_id++) { + + vdev = wlan_objmgr_get_vdev_by_id_from_psoc(soc, obj_id, + WLAN_P2P_ID); + if (vdev) { + if (is_p2p_ps_allowed(vdev, WLAN_UMAC_COMP_P2P)) { + wlan_objmgr_vdev_release_ref(vdev, WLAN_P2P_ID); + break; + } + wlan_objmgr_vdev_release_ref(vdev, WLAN_P2P_ID); + p2p_debug("skip p2p set ps vdev %d, NoA is disabled as legacy STA is connected to GO.", + obj_id); + } + } + if (obj_id >= WLAN_UMAC_PSOC_MAX_VDEVS) { + p2p_debug("No GO found!"); + return QDF_STATUS_E_INVAL; + } + go_ps_config = *ps_config; + go_ps_config.vdev_id = obj_id; + + p2p_ops = ucfg_p2p_psoc_get_tx_ops(soc); + if (p2p_ops->set_ps) { + status = p2p_ops->set_ps(soc, &go_ps_config); + p2p_debug("p2p set ps vdev %d, status:%d", obj_id, status); + } + + return status; +} + +QDF_STATUS ucfg_p2p_lo_start(struct wlan_objmgr_psoc *soc, + struct p2p_lo_start *p2p_lo_start) +{ + struct wlan_lmac_if_p2p_tx_ops *p2p_ops; + QDF_STATUS status = QDF_STATUS_E_FAILURE; + + p2p_debug("soc:%pK, vdev_id:%d, ctl_flags:%d, freq:%d, period:%d, interval:%d, count:%d, dev_types_len:%d, probe_resp_len:%d, device_types:%pK, probe_resp_tmplt:%pK", + soc, p2p_lo_start->vdev_id, p2p_lo_start->ctl_flags, + p2p_lo_start->freq, p2p_lo_start->period, + p2p_lo_start->interval, p2p_lo_start->count, + p2p_lo_start->dev_types_len, p2p_lo_start->probe_resp_len, + p2p_lo_start->device_types, p2p_lo_start->probe_resp_tmplt); + + if (!soc) { + p2p_err("psoc context passed is NULL"); + return QDF_STATUS_E_INVAL; + } + + p2p_ops = ucfg_p2p_psoc_get_tx_ops(soc); + if (p2p_ops->lo_start) { + status = p2p_ops->lo_start(soc, p2p_lo_start); + p2p_debug("p2p lo start, status:%d", status); + } + + return status; +} + +QDF_STATUS ucfg_p2p_lo_stop(struct wlan_objmgr_psoc *soc, + uint32_t vdev_id) +{ + struct wlan_lmac_if_p2p_tx_ops *p2p_ops; + QDF_STATUS status = QDF_STATUS_E_FAILURE; + + p2p_debug("soc:%pK, vdev_id:%d", soc, vdev_id); + + if (!soc) { + p2p_err("psoc context passed is NULL"); + return QDF_STATUS_E_INVAL; + } + + p2p_ops = ucfg_p2p_psoc_get_tx_ops(soc); + if (p2p_ops->lo_stop) { + status = p2p_ops->lo_stop(soc, vdev_id); + p2p_debug("p2p lo stop, status:%d", status); + } + + return status; +} + +QDF_STATUS ucfg_p2p_set_noa(struct wlan_objmgr_psoc *soc, + uint32_t vdev_id, bool disable_noa) +{ + struct wlan_lmac_if_p2p_tx_ops *p2p_ops; + QDF_STATUS status = QDF_STATUS_E_INVAL; + + p2p_ops = ucfg_p2p_psoc_get_tx_ops(soc); + if (p2p_ops->set_noa) { + status = p2p_ops->set_noa(soc, vdev_id, disable_noa); + p2p_debug("p2p set noa, status:%d", status); + } + + return status; +} + +QDF_STATUS ucfg_p2p_register_callbacks(struct wlan_objmgr_psoc *soc, + struct p2p_protocol_callbacks *cb_obj) +{ + struct p2p_soc_priv_obj *p2p_soc_obj; + + if (!soc || !cb_obj) { + p2p_err("psoc %pM cb_obj %pM context passed is NULL", soc, + cb_obj); + return QDF_STATUS_E_INVAL; + } + + p2p_soc_obj = wlan_objmgr_psoc_get_comp_private_obj(soc, + WLAN_UMAC_COMP_P2P); + if (!p2p_soc_obj) { + p2p_err("p2p soc private object is NULL"); + return QDF_STATUS_E_FAILURE; + } + p2p_soc_obj->p2p_cb = *cb_obj; + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS ucfg_p2p_status_scan(struct wlan_objmgr_vdev *vdev) +{ + if (!vdev) { + p2p_err("vdev is NULL"); + return QDF_STATUS_E_INVAL; + } + + return p2p_status_scan(vdev); +} + +QDF_STATUS ucfg_p2p_status_connect(struct wlan_objmgr_vdev *vdev) +{ + if (!vdev) { + p2p_err("vdev is NULL"); + return QDF_STATUS_E_INVAL; + } + + return p2p_status_connect(vdev); +} + +QDF_STATUS ucfg_p2p_status_disconnect(struct wlan_objmgr_vdev *vdev) +{ + if (!vdev) { + p2p_err("vdev is NULL"); + return QDF_STATUS_E_INVAL; + } + + return p2p_status_disconnect(vdev); +} + +QDF_STATUS ucfg_p2p_status_start_bss(struct wlan_objmgr_vdev *vdev) +{ + if (!vdev) { + p2p_err("vdev is NULL"); + return QDF_STATUS_E_INVAL; + } + + return p2p_status_start_bss(vdev); +} + +QDF_STATUS ucfg_p2p_status_stop_bss(struct wlan_objmgr_vdev *vdev) +{ + if (!vdev) { + p2p_err("vdev is NULL"); + return QDF_STATUS_E_INVAL; + } + + return p2p_status_stop_bss(vdev); +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/regulatory/core/src/reg_db.c b/drivers/staging/qca-wifi-host-cmn/umac/regulatory/core/src/reg_db.c new file mode 100644 index 0000000000000000000000000000000000000000..081a835889e168ef6d7f9a21fa6de391b4884fe3 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/regulatory/core/src/reg_db.c @@ -0,0 +1,1384 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: reg_db.c + * This file implements QCA regulatory database. + * Current implementation conforms to database version 27. + */ + +#include +#include +#include "reg_db.h" + +enum country_code { + CTRY_AFGHANISTAN = 4, + CTRY_ALBANIA = 8, + CTRY_ALGERIA = 12, + CTRY_AMERICAN_SAMOA = 16, + CTRY_ANGUILLA = 660, + CTRY_ARGENTINA = 32, + CTRY_ARMENIA = 51, + CTRY_MYANMAR = 104, + CTRY_ARUBA = 533, + CTRY_AUSTRALIA = 36, + CTRY_AUSTRIA = 40, + CTRY_AZERBAIJAN = 31, + CTRY_BAHAMAS = 44, + CTRY_BAHRAIN = 48, + CTRY_BANGLADESH = 50, + CTRY_BARBADOS = 52, + CTRY_BELARUS = 112, + CTRY_BELGIUM = 56, + CTRY_BELIZE = 84, + CTRY_BERMUDA = 60, + CTRY_BHUTAN = 64, + CTRY_BOLIVIA = 68, + CTRY_BOSNIA_HERZ = 70, + CTRY_BRAZIL = 76, + CTRY_BRUNEI_DARUSSALAM = 96, + CTRY_BULGARIA = 100, + CTRY_BURKINA_FASO = 854, + CTRY_CAMBODIA = 116, + CTRY_CANADA = 124, + CTRY_CAYMAN_ISLANDS = 136, + CTRY_CENTRAL_AFRICA_REPUBLIC = 140, + CTRY_CHAD = 148, + CTRY_CHILE = 152, + CTRY_CHINA = 156, + CTRY_CHRISTMAS_ISLAND = 162, + CTRY_COLOMBIA = 170, + CTRY_COSTA_RICA = 188, + CTRY_COTE_DIVOIRE = 384, + CTRY_CROATIA = 191, + CTRY_CYPRUS = 196, + CTRY_CZECH = 203, + CTRY_DENMARK = 208, + CTRY_DOMINICA = 212, + CTRY_DOMINICAN_REPUBLIC = 214, + CTRY_ECUADOR = 218, + CTRY_EGYPT = 818, + CTRY_EL_SALVADOR = 222, + CTRY_ESTONIA = 233, + CTRY_ETHIOPIA = 231, + CTRY_FINLAND = 246, + CTRY_FRANCE = 250, + CTRY_FRENCH_GUIANA = 254, + CTRY_FRENCH_POLYNESIA = 258, + CTRY_GEORGIA = 268, + CTRY_GERMANY = 276, + CTRY_GHANA = 288, + CTRY_GIBRALTAR = 292, + CTRY_GREECE = 300, + CTRY_GREENLAND = 304, + CTRY_GRENADA = 308, + CTRY_GUADELOUPE = 312, + CTRY_GUAM = 316, + CTRY_GUATEMALA = 320, + CTRY_GUYANA = 328, + CTRY_HAITI = 332, + CTRY_HONDURAS = 340, + CTRY_HONG_KONG = 344, + CTRY_HUNGARY = 348, + CTRY_ICELAND = 352, + CTRY_INDIA = 356, + CTRY_INDONESIA = 360, + CTRY_IRAQ = 368, + CTRY_IRELAND = 372, + CTRY_ISRAEL = 376, + CTRY_ITALY = 380, + CTRY_JAMAICA = 388, + CTRY_JORDAN = 400, + CTRY_KAZAKHSTAN = 398, + CTRY_KENYA = 404, + CTRY_KOREA_ROC = 410, + CTRY_KUWAIT = 414, + CTRY_LATVIA = 428, + CTRY_LEBANON = 422, + CTRY_LESOTHO = 426, + CTRY_LIECHTENSTEIN = 438, + CTRY_LITHUANIA = 440, + CTRY_LUXEMBOURG = 442, + CTRY_MACAU = 446, + CTRY_MACEDONIA = 807, + CTRY_MALAWI = 454, + CTRY_MALAYSIA = 458, + CTRY_MALDIVES = 462, + CTRY_MALTA = 470, + CTRY_MARSHALL_ISLANDS = 584, + CTRY_MARTINIQUE = 474, + CTRY_MAURITANIA = 478, + CTRY_MAURITIUS = 480, + CTRY_MAYOTTE = 175, + CTRY_MEXICO = 484, + CTRY_MICRONESIA = 583, + CTRY_MOLDOVA = 498, + CTRY_MONACO = 492, + CTRY_MONGOLIA = 496, + CTRY_MONTENEGRO = 499, + CTRY_MOROCCO = 504, + CTRY_NAMIBIA = 516, + CTRY_NEPAL = 524, + CTRY_NETHERLANDS = 528, + CTRY_NETHERLANDS_ANTILLES = 530, + CTRY_NEW_ZEALAND = 554, + CTRY_NIGERIA = 566, + CTRY_NORTHERN_MARIANA_ISLANDS = 580, + CTRY_NICARAGUA = 558, + CTRY_NORWAY = 578, + CTRY_OMAN = 512, + CTRY_PAKISTAN = 586, + CTRY_PALAU = 585, + CTRY_PANAMA = 591, + CTRY_PAPUA_NEW_GUINEA = 598, + CTRY_PARAGUAY = 600, + CTRY_PERU = 604, + CTRY_PHILIPPINES = 608, + CTRY_POLAND = 616, + CTRY_PORTUGAL = 620, + CTRY_PUERTO_RICO = 630, + CTRY_QATAR = 634, + CTRY_REUNION = 638, + CTRY_ROMANIA = 642, + CTRY_RUSSIA = 643, + CTRY_RWANDA = 646, + CTRY_SAINT_BARTHELEMY = 652, + CTRY_SAINT_KITTS_AND_NEVIS = 659, + CTRY_SAINT_LUCIA = 662, + CTRY_SAINT_MARTIN = 663, + CTRY_SAINT_PIERRE_AND_MIQUELON = 666, + CTRY_SAINT_VINCENT_AND_THE_GRENADIENS = 670, + CTRY_SAMOA = 882, + CTRY_SAUDI_ARABIA = 682, + CTRY_SENEGAL = 686, + CTRY_SERBIA = 688, + CTRY_SINGAPORE = 702, + CTRY_SLOVAKIA = 703, + CTRY_SLOVENIA = 705, + CTRY_SOUTH_AFRICA = 710, + CTRY_SPAIN = 724, + CTRY_SURINAME = 740, + CTRY_SRI_LANKA = 144, + CTRY_SWEDEN = 752, + CTRY_SWITZERLAND = 756, + CTRY_TAIWAN = 158, + CTRY_TANZANIA = 834, + CTRY_THAILAND = 764, + CTRY_TOGO = 768, + CTRY_TRINIDAD_Y_TOBAGO = 780, + CTRY_TUNISIA = 788, + CTRY_TURKEY = 792, + CTRY_TURKS_AND_CAICOS = 796, + CTRY_UGANDA = 800, + CTRY_UKRAINE = 804, + CTRY_UAE = 784, + CTRY_UNITED_KINGDOM = 826, + CTRY_UNITED_STATES = 840, + CTRY_URUGUAY = 858, + CTRY_UZBEKISTAN = 860, + CTRY_VANUATU = 548, + CTRY_VENEZUELA = 862, + CTRY_VIET_NAM = 704, + CTRY_VIRGIN_ISLANDS = 850, + CTRY_WALLIS_AND_FUTUNA = 876, + CTRY_YEMEN = 887, + CTRY_ZIMBABWE = 716, + CTRY_JAPAN = 392, + CTRY_JAPAN15 = 4015, + CTRY_XA = 4100, +}; + +enum reg_domain { + NULL1_WORLD = 0x03, + + FCC1_FCCA = 0x10, + FCC1_WORLD = 0x11, + FCC2_FCCA = 0x20, + FCC2_WORLD = 0x21, + FCC2_ETSIC = 0x22, + FCC3_FCCA = 0x3A, + FCC3_WORLD = 0x3B, + FCC3_ETSIC = 0x3F, + FCC4_FCCA = 0x12, + FCC5_FCCA = 0x13, + FCC6_WORLD = 0x23, + FCC6_FCCA = 0x14, + FCC8_FCCA = 0x16, + FCC9_FCCA = 0x17, + FCC10_FCCA = 0x18, + FCC11_WORLD = 0x19, + FCC13_WORLD = 0xE4, + FCC14_FCCB = 0xE6, + + ETSI1_WORLD = 0x37, + ETSI3_WORLD = 0x36, + ETSI4_WORLD = 0x30, + ETSI8_WORLD = 0x3D, + ETSI9_WORLD = 0x3E, + ETSI10_WORLD = 0x24, + ETSI10_FCCA = 0x25, + ETSI11_WORLD = 0x26, + ETSI12_WORLD = 0x28, + ETSI13_WORLD = 0x27, + ETSI14_WORLD = 0x29, + ETSI15_WORLD = 0x31, + + APL1_WORLD = 0x52, + APL1_ETSIC = 0x55, + APL2_WORLD = 0x45, + APL2_ETSIC = 0x56, + APL4_WORLD = 0x42, + APL6_WORLD = 0x5B, + APL8_WORLD = 0x5D, + APL9_WORLD = 0x5E, + APL10_WORLD = 0x5F, + APL11_FCCA = 0x4F, + APL12_WORLD = 0x51, + APL13_WORLD = 0x5A, + APL14_WORLD = 0x57, + APL15_WORLD = 0x59, + APL16_WORLD = 0x70, + APL17_ETSID = 0xE0, + APL19_ETSIC = 0x71, + APL20_WORLD = 0xE5, + APL23_WORLD = 0xE3, + + MKK3_MKKC = 0x82, + MKK5_MKKA = 0x99, + MKK5_MKKC = 0x88, + MKK11_MKKC = 0xD7, + MKK16_MKKC = 0xDF, + + WORLD_60 = 0x60, + WORLD_61 = 0x61, + WORLD_62 = 0x62, + WORLD_63 = 0x63, + WORLD_65 = 0x65, + WORLD_64 = 0x64, + WORLD_66 = 0x66, + WORLD_69 = 0x69, + WORLD_67 = 0x67, + WORLD_68 = 0x68, + WORLD_6A = 0x6A, + WORLD_6C = 0x6C, +}; + +#ifndef CONFIG_MCL_REGDB +const struct country_code_to_reg_domain g_all_countries[] = { + {CTRY_AFGHANISTAN, ETSI1_WORLD, "AF", 40, 160, 0}, + {CTRY_ALBANIA, ETSI1_WORLD, "AL", 40, 160, 0}, + {CTRY_ALGERIA, APL13_WORLD, "DZ", 40, 160, 0}, + {CTRY_AMERICAN_SAMOA, FCC3_FCCA, "AS", 40, 160, 0}, + {CTRY_ANGUILLA, ETSI1_WORLD, "AI", 40, 160, 0}, + {CTRY_ARGENTINA, APL16_WORLD, "AR", 40, 160, 0}, + {CTRY_ARMENIA, ETSI4_WORLD, "AM", 40, 20, 0}, + {CTRY_ARUBA, ETSI1_WORLD, "AW", 40, 160, 0}, + {CTRY_AUSTRALIA, FCC6_WORLD, "AU", 40, 160, 0}, + {CTRY_AUSTRIA, ETSI1_WORLD, "AT", 40, 160, 0}, + {CTRY_AZERBAIJAN, ETSI4_WORLD, "AZ", 40, 160, 0}, + {CTRY_BAHAMAS, FCC3_WORLD, "BS", 40, 160, 0}, + {CTRY_BAHRAIN, APL15_WORLD, "BH", 40, 20, 0}, + {CTRY_BANGLADESH, APL1_WORLD, "BD", 40, 160, 0}, + {CTRY_BARBADOS, FCC2_WORLD, "BB", 40, 160, 0}, + {CTRY_BELARUS, ETSI1_WORLD, "BY", 40, 160, 0}, + {CTRY_BELGIUM, ETSI1_WORLD, "BE", 40, 160, 0}, + {CTRY_BELIZE, ETSI8_WORLD, "BZ", 40, 160, 0}, + {CTRY_BERMUDA, FCC3_FCCA, "BM", 40, 160, 0}, + {CTRY_BHUTAN, ETSI1_WORLD, "BT", 40, 160, 0}, + {CTRY_BOLIVIA, APL8_WORLD, "BO", 40, 160, 0}, + {CTRY_BOSNIA_HERZ, ETSI1_WORLD, "BA", 40, 160, 0}, + {CTRY_BRAZIL, FCC3_ETSIC, "BR", 40, 160, 0}, + {CTRY_BRUNEI_DARUSSALAM, APL6_WORLD, "BN", 40, 160, 0}, + {CTRY_BULGARIA, ETSI1_WORLD, "BG", 40, 160, 0}, + {CTRY_BURKINA_FASO, FCC3_WORLD, "BF", 40, 160, 0}, + {CTRY_CAMBODIA, ETSI1_WORLD, "KH", 40, 160, 0}, + {CTRY_CANADA, FCC6_FCCA, "CA", 40, 160, 0}, + {CTRY_CAYMAN_ISLANDS, FCC3_WORLD, "KY", 40, 160, 0}, + {CTRY_CENTRAL_AFRICA_REPUBLIC, FCC3_WORLD, "CF", 40, 40, 0}, + {CTRY_CHAD, ETSI1_WORLD, "TD", 40, 160, 0}, + {CTRY_CHILE, FCC13_WORLD, "CL", 40, 160, 0}, + {CTRY_CHINA, APL14_WORLD, "CN", 40, 160, 0}, + {CTRY_CHRISTMAS_ISLAND, FCC3_WORLD, "CX", 40, 160, 0}, + {CTRY_COLOMBIA, FCC3_WORLD, "CO", 40, 160, 0}, + {CTRY_COSTA_RICA, FCC3_WORLD, "CR", 40, 160, 0}, + {CTRY_COTE_DIVOIRE, FCC3_WORLD, "CI", 40, 160, 0}, + {CTRY_CROATIA, ETSI1_WORLD, "HR", 40, 160, 0}, + {CTRY_CYPRUS, ETSI1_WORLD, "CY", 40, 160, 0}, + {CTRY_CZECH, ETSI1_WORLD, "CZ", 40, 160, 0}, + {CTRY_DENMARK, ETSI1_WORLD, "DK", 40, 160, 0}, + {CTRY_DOMINICA, FCC1_FCCA, "DM", 40, 160, 0}, + {CTRY_DOMINICAN_REPUBLIC, FCC1_FCCA, "DO", 40, 160, 0}, + {CTRY_ECUADOR, FCC3_WORLD, "EC", 40, 20, 0}, + {CTRY_EGYPT, ETSI3_WORLD, "EG", 40, 160, 0}, + {CTRY_EL_SALVADOR, FCC1_WORLD, "SV", 40, 20, 0}, + {CTRY_ESTONIA, ETSI1_WORLD, "EE", 40, 160, 0}, + {CTRY_ETHIOPIA, ETSI1_WORLD, "ET", 40, 160, 0}, + {CTRY_FINLAND, ETSI1_WORLD, "FI", 40, 160, 0}, + {CTRY_FRANCE, ETSI1_WORLD, "FR", 40, 160, 0}, + {CTRY_FRENCH_GUIANA, ETSI1_WORLD, "GF", 40, 160, 0}, + {CTRY_FRENCH_POLYNESIA, ETSI1_WORLD, "PF", 40, 160, 0}, + {CTRY_GEORGIA, ETSI4_WORLD, "GE", 40, 160, 0}, + {CTRY_GERMANY, ETSI1_WORLD, "DE", 40, 160, 0}, + {CTRY_GHANA, FCC3_WORLD, "GH", 40, 160, 0}, + {CTRY_GIBRALTAR, ETSI1_WORLD, "GI", 40, 160, 0}, + {CTRY_GREECE, ETSI1_WORLD, "GR", 40, 160, 0}, + {CTRY_GREENLAND, ETSI1_WORLD, "GL", 40, 160, 0}, + {CTRY_GRENADA, FCC3_FCCA, "GD", 40, 160, 0}, + {CTRY_GUADELOUPE, ETSI1_WORLD, "GP", 40, 160, 0}, + {CTRY_GUAM, FCC3_FCCA, "GU", 40, 160, 0}, + {CTRY_GUATEMALA, ETSI1_WORLD, "GT", 40, 160, 0}, + {CTRY_GUYANA, APL1_ETSIC, "GY", 40, 160, 0}, + {CTRY_HAITI, FCC3_FCCA, "HT", 40, 160, 0}, + {CTRY_HONDURAS, FCC3_WORLD, "HN", 40, 160, 0}, + {CTRY_HONG_KONG, FCC3_WORLD, "HK", 40, 160, 0}, + {CTRY_HUNGARY, ETSI1_WORLD, "HU", 40, 160, 0}, + {CTRY_ICELAND, ETSI1_WORLD, "IS", 40, 160, 0}, + {CTRY_INDIA, APL19_ETSIC, "IN", 40, 160, 0}, + {CTRY_INDONESIA, APL2_ETSIC, "ID", 40, 20, 0}, + {CTRY_IRAQ, ETSI1_WORLD, "IQ", 40, 160, 0}, + {CTRY_IRELAND, ETSI1_WORLD, "IE", 40, 160, 0}, + {CTRY_ISRAEL, ETSI3_WORLD, "IL", 40, 160, 0}, + {CTRY_ITALY, ETSI1_WORLD, "IT", 40, 160, 0}, + {CTRY_JAMAICA, FCC13_WORLD, "JM", 40, 160, 0}, + {CTRY_JORDAN, APL4_WORLD, "JO", 40, 160, 0}, + {CTRY_KAZAKHSTAN, NULL1_WORLD, "KZ", 40, 0, 0}, + {CTRY_KENYA, APL12_WORLD, "KE", 40, 160, 0}, + {CTRY_KOREA_ROC, APL9_WORLD, "KR", 40, 160, 0}, + {CTRY_KUWAIT, ETSI3_WORLD, "KW", 40, 160, 0}, + {CTRY_LATVIA, ETSI1_WORLD, "LV", 40, 160, 0}, + {CTRY_LEBANON, FCC3_WORLD, "LB", 40, 160, 0}, + {CTRY_LESOTHO, ETSI1_WORLD, "LS", 40, 160, 0}, + {CTRY_LIECHTENSTEIN, ETSI1_WORLD, "LI", 40, 160, 0}, + {CTRY_LITHUANIA, ETSI1_WORLD, "LT", 40, 160, 0}, + {CTRY_LUXEMBOURG, ETSI1_WORLD, "LU", 40, 160, 0}, + {CTRY_MACAU, FCC3_WORLD, "MO", 40, 160, 0}, + {CTRY_MACEDONIA, ETSI1_WORLD, "MK", 40, 160, 0}, + {CTRY_MALAWI, ETSI1_WORLD, "MW", 40, 160, 0}, + {CTRY_MALAYSIA, FCC11_WORLD, "MY", 40, 160, 0}, + {CTRY_MALDIVES, APL6_WORLD, "MV", 40, 160, 0}, + {CTRY_MALTA, ETSI1_WORLD, "MT", 40, 160, 0}, + {CTRY_MARSHALL_ISLANDS, FCC3_FCCA, "MH", 40, 160, 0}, + {CTRY_MARTINIQUE, ETSI1_WORLD, "MQ", 40, 160, 0}, + {CTRY_MAURITANIA, ETSI1_WORLD, "MR", 40, 160, 0}, + {CTRY_MAURITIUS, ETSI1_WORLD, "MU", 40, 160, 0}, + {CTRY_MAYOTTE, ETSI1_WORLD, "YT", 40, 160, 0}, + {CTRY_MEXICO, FCC3_ETSIC, "MX", 40, 160, 0}, + {CTRY_MICRONESIA, FCC3_FCCA, "FM", 40, 160, 0}, + {CTRY_MOLDOVA, ETSI1_WORLD, "MD", 40, 160, 0}, + {CTRY_MONACO, ETSI1_WORLD, "MC", 40, 160, 0}, + {CTRY_MONGOLIA, FCC3_WORLD, "MN", 40, 160, 0}, + {CTRY_MONTENEGRO, ETSI1_WORLD, "ME", 40, 160, 0}, + {CTRY_MOROCCO, ETSI3_WORLD, "MA", 40, 160, 0}, + {CTRY_MYANMAR, APL1_WORLD, "MM", 40, 160, 0}, + {CTRY_NAMIBIA, APL20_WORLD, "NA", 40, 160, 0}, + {CTRY_NEPAL, APL23_WORLD, "NP", 40, 160, 0}, + {CTRY_NETHERLANDS, ETSI1_WORLD, "NL", 40, 160, 0}, + {CTRY_NETHERLANDS_ANTILLES, ETSI1_WORLD, "AN", 40, 160, 0}, + {CTRY_NEW_ZEALAND, FCC3_ETSIC, "NZ", 40, 160, 0}, + {CTRY_NIGERIA, APL8_WORLD, "NG", 40, 160, 0}, + {CTRY_NORTHERN_MARIANA_ISLANDS, FCC3_FCCA, "MP", 40, 160, 0}, + {CTRY_NICARAGUA, FCC3_FCCA, "NI", 40, 160, 0}, + {CTRY_NORWAY, ETSI1_WORLD, "NO", 40, 160, 0}, + {CTRY_OMAN, ETSI1_WORLD, "OM", 40, 160, 0}, + {CTRY_PAKISTAN, APL1_ETSIC, "PK", 40, 160, 0}, + {CTRY_PALAU, FCC3_FCCA, "PW", 40, 160, 0}, + {CTRY_PANAMA, FCC14_FCCB, "PA", 40, 160, 0}, + {CTRY_PAPUA_NEW_GUINEA, FCC3_WORLD, "PG", 40, 160, 0}, + {CTRY_PARAGUAY, FCC3_WORLD, "PY", 40, 160, 0}, + {CTRY_PERU, FCC3_WORLD, "PE", 40, 160, 0}, + {CTRY_PHILIPPINES, FCC3_WORLD, "PH", 40, 160, 0}, + {CTRY_POLAND, ETSI1_WORLD, "PL", 40, 160, 0}, + {CTRY_PORTUGAL, ETSI1_WORLD, "PT", 40, 160, 0}, + {CTRY_PUERTO_RICO, FCC3_FCCA, "PR", 40, 160, 0}, + {CTRY_QATAR, ETSI14_WORLD, "QA", 40, 160, 0}, + {CTRY_REUNION, ETSI1_WORLD, "RE", 40, 160, 0}, + {CTRY_ROMANIA, ETSI1_WORLD, "RO", 40, 160, 0}, + {CTRY_RUSSIA, ETSI8_WORLD, "RU", 40, 160, 0}, + {CTRY_RWANDA, FCC3_WORLD, "RW", 40, 160, 0}, + {CTRY_SAINT_BARTHELEMY, ETSI1_WORLD, "BL", 40, 160, 0}, + {CTRY_SAINT_KITTS_AND_NEVIS, APL10_WORLD, "KN", 40, 160, 0}, + {CTRY_SAINT_LUCIA, APL10_WORLD, "LC", 40, 160, 0}, + {CTRY_SAINT_MARTIN, ETSI1_WORLD, "MF", 40, 160, 0}, + {CTRY_SAINT_PIERRE_AND_MIQUELON, ETSI1_WORLD, "PM", 40, 160, 0}, + {CTRY_SAINT_VINCENT_AND_THE_GRENADIENS, ETSI1_WORLD, + "VC" , 40, 160, 0}, + {CTRY_SAMOA, ETSI1_WORLD, "WS", 40, 40, 0}, + {CTRY_SAUDI_ARABIA, ETSI15_WORLD, "SA", 40, 160, 0}, + {CTRY_SENEGAL, FCC13_WORLD, "SN", 40, 160, 0}, + {CTRY_SERBIA, ETSI1_WORLD, "RS", 40, 160, 0}, + {CTRY_SINGAPORE, FCC3_WORLD, "SG", 40, 160, 0}, + {CTRY_SLOVAKIA, ETSI1_WORLD, "SK", 40, 160, 0}, + {CTRY_SLOVENIA, ETSI1_WORLD, "SI", 40, 160, 0}, + {CTRY_SOUTH_AFRICA, FCC3_WORLD, "ZA", 40, 160, 0}, + {CTRY_SPAIN, ETSI1_WORLD, "ES", 40, 160, 0}, + {CTRY_SURINAME, ETSI1_WORLD, "SR", 40, 160, 0}, + {CTRY_SRI_LANKA, FCC3_WORLD, "LK", 40, 20, 0}, + {CTRY_SWEDEN, ETSI1_WORLD, "SE", 40, 160, 0}, + {CTRY_SWITZERLAND, ETSI1_WORLD, "CH", 40, 160, 0}, + {CTRY_TAIWAN, FCC3_FCCA, "TW", 40, 160, 0}, + {CTRY_TANZANIA, APL1_WORLD, "TZ", 40, 160, 0}, + {CTRY_THAILAND, FCC3_WORLD, "TH", 40, 160, 0}, + {CTRY_TOGO, ETSI1_WORLD, "TG", 40, 40, 0}, + {CTRY_TRINIDAD_Y_TOBAGO, FCC3_WORLD, "TT", 40, 160, 0}, + {CTRY_TUNISIA, ETSI3_WORLD, "TN", 40, 160, 0}, + {CTRY_TURKEY, ETSI1_WORLD, "TR", 40, 160, 0}, + {CTRY_TURKS_AND_CAICOS, FCC3_WORLD, "TC", 40, 160, 0}, + {CTRY_UGANDA, FCC3_WORLD, "UG", 40, 160, 0}, + {CTRY_UKRAINE, ETSI9_WORLD, "UA", 40, 160, 0}, + {CTRY_UAE, FCC3_WORLD, "AE", 40, 160, 0}, + {CTRY_UNITED_KINGDOM, ETSI1_WORLD, "GB", 40, 160, 0}, + {CTRY_UNITED_STATES, FCC8_FCCA, "US", 40, 160, 0}, + {CTRY_URUGUAY, FCC2_WORLD, "UY", 40, 160, 0}, + {CTRY_UZBEKISTAN, ETSI3_WORLD, "UZ", 40, 160, 0}, + {CTRY_VANUATU, FCC3_WORLD, "VU", 40, 160, 0}, + {CTRY_VENEZUELA, FCC2_ETSIC, "VE", 40, 160, 0}, + {CTRY_VIET_NAM, FCC3_WORLD, "VN", 40, 80, 0}, + {CTRY_VIRGIN_ISLANDS, FCC3_FCCA, "VI", 40, 160, 0}, + {CTRY_WALLIS_AND_FUTUNA, ETSI1_WORLD, "WF", 40, 160, 0}, + {CTRY_YEMEN, NULL1_WORLD, "YE", 40, 0, 0}, + {CTRY_ZIMBABWE, ETSI1_WORLD, "ZW", 40, 160, 0}, + {CTRY_JAPAN, MKK5_MKKC, "JP", 40, 160, 0}, + {CTRY_JAPAN15, MKK5_MKKC, "JP", 40, 160, 0}, +}; +#else +#ifdef WLAN_FEATURE_DSRC +const struct country_code_to_reg_domain g_all_countries[] = { + {CTRY_AFGHANISTAN, ETSI1_WORLD, "AF", 40, 160, 0}, + {CTRY_ALBANIA, ETSI13_WORLD, "AL", 40, 160, 0}, + {CTRY_ALGERIA, APL13_WORLD, "DZ", 40, 160, 0}, + {CTRY_AMERICAN_SAMOA, FCC3_FCCA, "AS", 40, 160, 0}, + {CTRY_ANGUILLA, ETSI1_WORLD, "AI", 40, 160, 0}, + {CTRY_ARGENTINA, APL17_ETSID, "AR", 40, 160, 0}, + {CTRY_ARMENIA, ETSI4_WORLD, "AM", 40, 20, 0}, + {CTRY_ARUBA, ETSI1_WORLD, "AW", 40, 160, 0}, + {CTRY_AUSTRALIA, FCC6_WORLD, "AU", 40, 160, 0}, + {CTRY_AUSTRIA, ETSI10_WORLD, "AT", 40, 160, 0}, + {CTRY_AZERBAIJAN, ETSI4_WORLD, "AZ", 40, 160, 0}, + {CTRY_BAHAMAS, FCC3_WORLD, "BS", 40, 160, 0}, + {CTRY_BAHRAIN, APL15_WORLD, "BH", 40, 20, 0}, + {CTRY_BANGLADESH, APL1_WORLD, "BD", 40, 160, 0}, + {CTRY_BARBADOS, FCC2_WORLD, "BB", 40, 160, 0}, + {CTRY_BELARUS, ETSI1_WORLD, "BY", 40, 160, 0}, + {CTRY_BELGIUM, ETSI10_WORLD, "BE", 40, 160, 0}, + {CTRY_BELIZE, ETSI8_WORLD, "BZ", 40, 160, 0}, + {CTRY_BERMUDA, FCC3_FCCA, "BM", 40, 160, 0}, + {CTRY_BHUTAN, ETSI1_WORLD, "BT", 40, 160, 0}, + {CTRY_BOLIVIA, APL8_WORLD, "BO", 40, 160, 0}, + {CTRY_BOSNIA_HERZ, ETSI13_WORLD, "BA", 40, 160, 0}, + {CTRY_BRAZIL, FCC3_ETSIC, "BR", 40, 160, 0}, + {CTRY_BRUNEI_DARUSSALAM, APL6_WORLD, "BN", 40, 160, 0}, + {CTRY_BULGARIA, ETSI10_WORLD, "BG", 40, 160, 0}, + {CTRY_BURKINA_FASO, FCC3_WORLD, "BF", 40, 160, 0}, + {CTRY_CAMBODIA, ETSI1_WORLD, "KH", 40, 160, 0}, + {CTRY_CANADA, FCC3_FCCA, "CA", 40, 160, 0}, + {CTRY_CAYMAN_ISLANDS, FCC3_WORLD, "KY", 40, 160, 0}, + {CTRY_CENTRAL_AFRICA_REPUBLIC, FCC3_WORLD, "CF", 40, 40, 0}, + {CTRY_CHAD, ETSI1_WORLD, "TD", 40, 160, 0}, + {CTRY_CHILE, FCC13_WORLD, "CL", 40, 160, 0}, + {CTRY_CHINA, APL14_WORLD, "CN", 40, 160, 0}, + {CTRY_CHRISTMAS_ISLAND, FCC3_WORLD, "CX", 40, 160, 0}, + {CTRY_COLOMBIA, FCC3_WORLD, "CO", 40, 160, 0}, + {CTRY_COSTA_RICA, FCC3_WORLD, "CR", 40, 160, 0}, + {CTRY_COTE_DIVOIRE, FCC3_WORLD, "CI", 40, 160, 0}, + {CTRY_CROATIA, ETSI10_WORLD, "HR", 40, 160, 0}, + {CTRY_CYPRUS, ETSI10_WORLD, "CY", 40, 160, 0}, + {CTRY_CZECH, ETSI10_WORLD, "CZ", 40, 160, 0}, + {CTRY_DENMARK, ETSI10_WORLD, "DK", 40, 160, 0}, + {CTRY_DOMINICA, FCC2_FCCA, "DM", 40, 160, 0}, + {CTRY_DOMINICAN_REPUBLIC, FCC2_FCCA, "DO", 40, 160, 0}, + {CTRY_ECUADOR, FCC3_WORLD, "EC", 40, 20, 0}, + {CTRY_EGYPT, ETSI3_WORLD, "EG", 40, 160, 0}, + {CTRY_EL_SALVADOR, FCC2_WORLD, "SV", 40, 20, 0}, + {CTRY_ESTONIA, ETSI10_WORLD, "EE", 40, 160, 0}, + {CTRY_ETHIOPIA, ETSI1_WORLD, "ET", 40, 160, 0}, + {CTRY_FINLAND, ETSI10_WORLD, "FI", 40, 160, 0}, + {CTRY_FRANCE, ETSI10_WORLD, "FR", 40, 160, 0}, + {CTRY_FRENCH_GUIANA, ETSI10_WORLD, "GF", 40, 160, 0}, + {CTRY_FRENCH_POLYNESIA, ETSI10_WORLD, "PF", 40, 160, 0}, + {CTRY_GEORGIA, ETSI4_WORLD, "GE", 40, 160, 0}, + {CTRY_GERMANY, ETSI10_WORLD, "DE", 40, 160, 0}, + {CTRY_GHANA, FCC3_WORLD, "GH", 40, 160, 0}, + {CTRY_GIBRALTAR, ETSI10_WORLD, "GI", 40, 160, 0}, + {CTRY_GREECE, ETSI10_WORLD, "GR", 40, 160, 0}, + {CTRY_GREENLAND, ETSI10_WORLD, "GL", 40, 160, 0}, + {CTRY_GRENADA, FCC3_FCCA, "GD", 40, 160, 0}, + {CTRY_GUADELOUPE, ETSI1_WORLD, "GP", 40, 160, 0}, + {CTRY_GUAM, FCC10_FCCA, "GU", 40, 160, 0}, + {CTRY_GUATEMALA, ETSI1_WORLD, "GT", 40, 160, 0}, + {CTRY_GUYANA, APL1_ETSIC, "GY", 40, 160, 0}, + {CTRY_HAITI, FCC3_FCCA, "HT", 40, 160, 0}, + {CTRY_HONDURAS, FCC13_WORLD, "HN", 40, 160, 0}, + {CTRY_HONG_KONG, FCC3_WORLD, "HK", 40, 160, 0}, + {CTRY_HUNGARY, ETSI10_WORLD, "HU", 40, 160, 0}, + {CTRY_ICELAND, ETSI10_WORLD, "IS", 40, 160, 0}, + {CTRY_INDIA, APL15_WORLD, "IN", 40, 160, 0}, + {CTRY_INDONESIA, APL2_ETSIC, "ID", 40, 20, 0}, + {CTRY_IRAQ, ETSI1_WORLD, "IQ", 40, 160, 0}, + {CTRY_IRELAND, ETSI10_WORLD, "IE", 40, 160, 0}, + {CTRY_ISRAEL, ETSI3_WORLD, "IL", 40, 160, 0}, + {CTRY_ITALY, ETSI10_WORLD, "IT", 40, 160, 0}, + {CTRY_JAMAICA, FCC13_WORLD, "JM", 40, 160, 0}, + {CTRY_JORDAN, APL4_WORLD, "JO", 40, 160, 0}, + {CTRY_KAZAKHSTAN, NULL1_WORLD, "KZ", 40, 0, 0}, + {CTRY_KENYA, APL12_WORLD, "KE", 40, 160, 0}, + {CTRY_KOREA_ROC, APL9_WORLD, "KR", 40, 160, 0}, + {CTRY_KUWAIT, ETSI3_WORLD, "KW", 40, 160, 0}, + {CTRY_LATVIA, ETSI10_WORLD, "LV", 40, 160, 0}, + {CTRY_LEBANON, FCC3_WORLD, "LB", 40, 160, 0}, + {CTRY_LESOTHO, ETSI1_WORLD, "LS", 40, 160, 0}, + {CTRY_LIECHTENSTEIN, ETSI10_WORLD, "LI", 40, 160, 0}, + {CTRY_LITHUANIA, ETSI10_WORLD, "LT", 40, 160, 0}, + {CTRY_LUXEMBOURG, ETSI10_WORLD, "LU", 40, 160, 0}, + {CTRY_MACAU, FCC3_WORLD, "MO", 40, 160, 0}, + {CTRY_MACEDONIA, ETSI13_WORLD, "MK", 40, 160, 0}, + {CTRY_MALAWI, ETSI1_WORLD, "MW", 40, 160, 0}, + {CTRY_MALAYSIA, FCC11_WORLD, "MY", 40, 160, 0}, + {CTRY_MALDIVES, APL6_WORLD, "MV", 40, 160, 0}, + {CTRY_MALTA, ETSI10_WORLD, "MT", 40, 160, 0}, + {CTRY_MARSHALL_ISLANDS, FCC3_FCCA, "MH", 40, 160, 0}, + {CTRY_MARTINIQUE, ETSI10_WORLD, "MQ", 40, 160, 0}, + {CTRY_MAURITANIA, ETSI1_WORLD, "MR", 40, 160, 0}, + {CTRY_MAURITIUS, ETSI13_WORLD, "MU", 40, 160, 0}, + {CTRY_MAYOTTE, ETSI1_WORLD, "YT", 40, 160, 0}, + {CTRY_MEXICO, FCC3_ETSIC, "MX", 40, 160, 0}, + {CTRY_MICRONESIA, FCC3_FCCA, "FM", 40, 160, 0}, + {CTRY_MOLDOVA, ETSI13_WORLD, "MD", 40, 160, 0}, + {CTRY_MONACO, ETSI10_WORLD, "MC", 40, 160, 0}, + {CTRY_MONGOLIA, FCC3_WORLD, "MN", 40, 160, 0}, + {CTRY_MONTENEGRO, ETSI10_WORLD, "ME", 40, 160, 0}, + {CTRY_MOROCCO, ETSI3_WORLD, "MA", 40, 160, 0}, + {CTRY_MYANMAR, APL1_WORLD, "MM", 40, 160, 0}, + {CTRY_NAMIBIA, APL20_WORLD, "NA", 40, 160, 0}, + {CTRY_NEPAL, APL23_WORLD, "NP", 40, 160, 0}, + {CTRY_NETHERLANDS, ETSI10_WORLD, "NL", 40, 160, 0}, + {CTRY_NETHERLANDS_ANTILLES, ETSI10_WORLD, "AN", 40, 160, 0}, + {CTRY_NEW_ZEALAND, FCC3_ETSIC, "NZ", 40, 160, 0}, + {CTRY_NIGERIA, APL8_WORLD, "NG", 40, 160, 0}, + {CTRY_NORTHERN_MARIANA_ISLANDS, FCC10_FCCA, "MP", 40, 160, 0}, + {CTRY_NICARAGUA, FCC3_FCCA, "NI", 40, 160, 0}, + {CTRY_NORWAY, ETSI10_WORLD, "NO", 40, 160, 0}, + {CTRY_OMAN, ETSI1_WORLD, "OM", 40, 160, 0}, + {CTRY_PAKISTAN, APL1_ETSIC, "PK", 40, 160, 0}, + {CTRY_PALAU, FCC3_FCCA, "PW", 40, 160, 0}, + {CTRY_PANAMA, FCC14_FCCB, "PA", 40, 160, 0}, + {CTRY_PAPUA_NEW_GUINEA, FCC3_WORLD, "PG", 40, 160, 0}, + {CTRY_PARAGUAY, FCC3_WORLD, "PY", 40, 160, 0}, + {CTRY_PERU, FCC3_WORLD, "PE", 40, 160, 0}, + {CTRY_PHILIPPINES, FCC3_WORLD, "PH", 40, 160, 0}, + {CTRY_POLAND, ETSI10_WORLD, "PL", 40, 160, 0}, + {CTRY_PORTUGAL, ETSI10_WORLD, "PT", 40, 160, 0}, + {CTRY_PUERTO_RICO, FCC10_FCCA, "PR", 40, 160, 0}, + {CTRY_QATAR, ETSI14_WORLD, "QA", 40, 160, 0}, + {CTRY_REUNION, ETSI1_WORLD, "RE", 40, 160, 0}, + {CTRY_ROMANIA, ETSI10_WORLD, "RO", 40, 160, 0}, + {CTRY_RUSSIA, ETSI8_WORLD, "RU", 40, 160, 0}, + {CTRY_RWANDA, FCC3_WORLD, "RW", 40, 160, 0}, + {CTRY_SAINT_BARTHELEMY, ETSI1_WORLD, "BL", 40, 160, 0}, + {CTRY_SAINT_KITTS_AND_NEVIS, APL10_WORLD, "KN", 40, 160, 0}, + {CTRY_SAINT_LUCIA, APL10_WORLD, "LC", 40, 160, 0}, + {CTRY_SAINT_MARTIN, ETSI1_WORLD, "MF", 40, 160, 0}, + {CTRY_SAINT_PIERRE_AND_MIQUELON, ETSI13_WORLD, "PM", 40, 160, 0}, + {CTRY_SAINT_VINCENT_AND_THE_GRENADIENS, ETSI13_WORLD, "VC", + 40, 160, 0}, + {CTRY_SAMOA, ETSI1_WORLD, "WS", 40, 40, 0}, + {CTRY_SAUDI_ARABIA, ETSI15_WORLD, "SA", 40, 160, 0}, + {CTRY_SENEGAL, FCC13_WORLD, "SN", 40, 160, 0}, + {CTRY_SERBIA, ETSI13_WORLD, "RS", 40, 160, 0}, + {CTRY_SINGAPORE, FCC3_WORLD, "SG", 40, 160, 0}, + {CTRY_SLOVAKIA, ETSI10_WORLD, "SK", 40, 160, 0}, + {CTRY_SLOVENIA, ETSI10_WORLD, "SI", 40, 160, 0}, + {CTRY_SOUTH_AFRICA, FCC3_WORLD, "ZA", 40, 160, 0}, + {CTRY_SPAIN, ETSI10_WORLD, "ES", 40, 160, 0}, + {CTRY_SURINAME, ETSI1_WORLD, "SR", 40, 160, 0}, + {CTRY_SRI_LANKA, FCC3_WORLD, "LK", 40, 20, 0}, + {CTRY_SWEDEN, ETSI10_WORLD, "SE", 40, 160, 0}, + {CTRY_SWITZERLAND, ETSI10_WORLD, "CH", 40, 160, 0}, + {CTRY_TAIWAN, FCC3_FCCA, "TW", 40, 160, 0}, + {CTRY_TANZANIA, APL1_WORLD, "TZ", 40, 160, 0}, + {CTRY_THAILAND, FCC3_WORLD, "TH", 40, 160, 0}, + {CTRY_TOGO, ETSI1_WORLD, "TG", 40, 40, 0}, + {CTRY_TRINIDAD_Y_TOBAGO, FCC3_WORLD, "TT", 40, 160, 0}, + {CTRY_TUNISIA, ETSI3_WORLD, "TN", 40, 160, 0}, + {CTRY_TURKEY, ETSI13_WORLD, "TR", 40, 160, 0}, + {CTRY_TURKS_AND_CAICOS, FCC3_WORLD, "TC", 40, 160, 0}, + {CTRY_UGANDA, FCC3_WORLD, "UG", 40, 160, 0}, + {CTRY_UKRAINE, ETSI9_WORLD, "UA", 40, 160, 0}, + {CTRY_UAE, FCC3_WORLD, "AE", 40, 160, 0}, + {CTRY_UNITED_KINGDOM, ETSI10_WORLD, "GB", 40, 160, 0}, + {CTRY_UNITED_STATES, FCC10_FCCA, "US", 40, 160, 0}, + {CTRY_URUGUAY, FCC2_WORLD, "UY", 40, 160, 0}, + {CTRY_UZBEKISTAN, ETSI3_WORLD, "UZ", 40, 160, 0}, + {CTRY_VANUATU, FCC3_WORLD, "VU", 40, 160, 0}, + {CTRY_VENEZUELA, FCC2_ETSIC, "VE", 40, 160, 0}, + {CTRY_VIET_NAM, FCC3_WORLD, "VN", 40, 80, 0}, + {CTRY_VIRGIN_ISLANDS, FCC10_FCCA, "VI", 40, 160, 0}, + {CTRY_WALLIS_AND_FUTUNA, ETSI1_WORLD, "WF", 40, 160, 0}, + {CTRY_YEMEN, NULL1_WORLD, "YE", 40, 0, 0}, + {CTRY_ZIMBABWE, ETSI1_WORLD, "ZW", 40, 160, 0}, + {CTRY_JAPAN, MKK5_MKKC, "JP", 40, 160, 0}, + {CTRY_XA, MKK5_MKKA, "XA", 40, 160, 0}, +}; +#else +const struct country_code_to_reg_domain g_all_countries[] = { + {CTRY_AFGHANISTAN, ETSI1_WORLD, "AF", 40, 160, 0}, + {CTRY_ALBANIA, ETSI13_WORLD, "AL", 40, 160, 0}, + {CTRY_ALGERIA, APL13_WORLD, "DZ", 40, 160, 0}, + {CTRY_AMERICAN_SAMOA, FCC3_FCCA, "AS", 40, 160, 0}, + {CTRY_ANGUILLA, ETSI1_WORLD, "AI", 40, 160, 0}, + {CTRY_ARGENTINA, APL17_ETSID, "AR", 40, 160, 0}, + {CTRY_ARMENIA, ETSI4_WORLD, "AM", 40, 20, 0}, + {CTRY_ARUBA, ETSI1_WORLD, "AW", 40, 160, 0}, + {CTRY_AUSTRALIA, FCC6_WORLD, "AU", 40, 160, 0}, + {CTRY_AUSTRIA, ETSI13_WORLD, "AT", 40, 160, 0}, + {CTRY_AZERBAIJAN, ETSI4_WORLD, "AZ", 40, 160, 0}, + {CTRY_BAHAMAS, FCC3_WORLD, "BS", 40, 160, 0}, + {CTRY_BAHRAIN, APL15_WORLD, "BH", 40, 20, 0}, + {CTRY_BANGLADESH, APL1_WORLD, "BD", 40, 160, 0}, + {CTRY_BARBADOS, FCC2_WORLD, "BB", 40, 160, 0}, + {CTRY_BELARUS, ETSI1_WORLD, "BY", 40, 160, 0}, + {CTRY_BELGIUM, ETSI13_WORLD, "BE", 40, 160, 0}, + {CTRY_BELIZE, ETSI8_WORLD, "BZ", 40, 160, 0}, + {CTRY_BERMUDA, FCC3_FCCA, "BM", 40, 160, 0}, + {CTRY_BHUTAN, ETSI1_WORLD, "BT", 40, 160, 0}, + {CTRY_BOLIVIA, APL8_WORLD, "BO", 40, 160, 0}, + {CTRY_BOSNIA_HERZ, ETSI13_WORLD, "BA", 40, 160, 0}, + {CTRY_BRAZIL, FCC3_ETSIC, "BR", 40, 160, 0}, + {CTRY_BRUNEI_DARUSSALAM, APL6_WORLD, "BN", 40, 160, 0}, + {CTRY_BULGARIA, ETSI13_WORLD, "BG", 40, 160, 0}, + {CTRY_BURKINA_FASO, FCC3_WORLD, "BF", 40, 160, 0}, + {CTRY_CAMBODIA, ETSI1_WORLD, "KH", 40, 160, 0}, + {CTRY_CANADA, FCC3_FCCA, "CA", 40, 160, 0}, + {CTRY_CAYMAN_ISLANDS, FCC3_WORLD, "KY", 40, 160, 0}, + {CTRY_CENTRAL_AFRICA_REPUBLIC, FCC3_WORLD, "CF", 40, 40, 0}, + {CTRY_CHAD, ETSI1_WORLD, "TD", 40, 160, 0}, + {CTRY_CHILE, FCC13_WORLD, "CL", 40, 160, 0}, + {CTRY_CHINA, APL14_WORLD, "CN", 40, 160, 0}, + {CTRY_CHRISTMAS_ISLAND, FCC3_WORLD, "CX", 40, 160, 0}, + {CTRY_COLOMBIA, FCC3_WORLD, "CO", 40, 160, 0}, + {CTRY_COSTA_RICA, FCC3_WORLD, "CR", 40, 160, 0}, + {CTRY_COTE_DIVOIRE, FCC3_WORLD, "CI", 40, 160, 0}, + {CTRY_CROATIA, ETSI13_WORLD, "HR", 40, 160, 0}, + {CTRY_CYPRUS, ETSI13_WORLD, "CY", 40, 160, 0}, + {CTRY_CZECH, ETSI13_WORLD, "CZ", 40, 160, 0}, + {CTRY_DENMARK, ETSI13_WORLD, "DK", 40, 160, 0}, + {CTRY_DOMINICA, FCC2_FCCA, "DM", 40, 160, 0}, + {CTRY_DOMINICAN_REPUBLIC, FCC2_FCCA, "DO", 40, 160, 0}, + {CTRY_ECUADOR, FCC3_WORLD, "EC", 40, 20, 0}, + {CTRY_EGYPT, ETSI3_WORLD, "EG", 40, 160, 0}, + {CTRY_EL_SALVADOR, FCC2_WORLD, "SV", 40, 20, 0}, + {CTRY_ESTONIA, ETSI13_WORLD, "EE", 40, 160, 0}, + {CTRY_ETHIOPIA, ETSI1_WORLD, "ET", 40, 160, 0}, + {CTRY_FINLAND, ETSI13_WORLD, "FI", 40, 160, 0}, + {CTRY_FRANCE, ETSI13_WORLD, "FR", 40, 160, 0}, + {CTRY_FRENCH_GUIANA, ETSI13_WORLD, "GF", 40, 160, 0}, + {CTRY_FRENCH_POLYNESIA, ETSI13_WORLD, "PF", 40, 160, 0}, + {CTRY_GEORGIA, ETSI4_WORLD, "GE", 40, 160, 0}, + {CTRY_GERMANY, ETSI13_WORLD, "DE", 40, 160, 0}, + {CTRY_GHANA, FCC3_WORLD, "GH", 40, 160, 0}, + {CTRY_GIBRALTAR, ETSI1_WORLD, "GI", 40, 160, 0}, + {CTRY_GREECE, ETSI13_WORLD, "GR", 40, 160, 0}, + {CTRY_GREENLAND, ETSI1_WORLD, "GL", 40, 160, 0}, + {CTRY_GRENADA, FCC3_FCCA, "GD", 40, 160, 0}, + {CTRY_GUADELOUPE, ETSI1_WORLD, "GP", 40, 160, 0}, + {CTRY_GUAM, FCC3_FCCA, "GU", 40, 160, 0}, + {CTRY_GUATEMALA, ETSI1_WORLD, "GT", 40, 160, 0}, + {CTRY_GUYANA, APL1_ETSIC, "GY", 40, 160, 0}, + {CTRY_HAITI, FCC3_FCCA, "HT", 40, 160, 0}, + {CTRY_HONDURAS, FCC13_WORLD, "HN", 40, 160, 0}, + {CTRY_HONG_KONG, FCC3_WORLD, "HK", 40, 160, 0}, + {CTRY_HUNGARY, ETSI13_WORLD, "HU", 40, 160, 0}, + {CTRY_ICELAND, ETSI13_WORLD, "IS", 40, 160, 0}, + {CTRY_INDIA, APL19_ETSIC, "IN", 40, 160, 0}, + {CTRY_INDONESIA, APL2_ETSIC, "ID", 40, 20, 0}, + {CTRY_IRAQ, ETSI1_WORLD, "IQ", 40, 160, 0}, + {CTRY_IRELAND, ETSI13_WORLD, "IE", 40, 160, 0}, + {CTRY_ISRAEL, ETSI3_WORLD, "IL", 40, 160, 0}, + {CTRY_ITALY, ETSI13_WORLD, "IT", 40, 160, 0}, + {CTRY_JAMAICA, FCC13_WORLD, "JM", 40, 160, 0}, + {CTRY_JORDAN, APL4_WORLD, "JO", 40, 160, 0}, + {CTRY_KAZAKHSTAN, NULL1_WORLD, "KZ", 40, 0, 0}, + {CTRY_KENYA, APL12_WORLD, "KE", 40, 160, 0}, + {CTRY_KOREA_ROC, APL9_WORLD, "KR", 40, 160, 0}, + {CTRY_KUWAIT, ETSI3_WORLD, "KW", 40, 160, 0}, + {CTRY_LATVIA, ETSI13_WORLD, "LV", 40, 160, 0}, + {CTRY_LEBANON, FCC3_WORLD, "LB", 40, 160, 0}, + {CTRY_LESOTHO, ETSI1_WORLD, "LS", 40, 160, 0}, + {CTRY_LIECHTENSTEIN, ETSI13_WORLD, "LI", 40, 160, 0}, + {CTRY_LITHUANIA, ETSI13_WORLD, "LT", 40, 160, 0}, + {CTRY_LUXEMBOURG, ETSI13_WORLD, "LU", 40, 160, 0}, + {CTRY_MACAU, FCC3_WORLD, "MO", 40, 160, 0}, + {CTRY_MACEDONIA, ETSI13_WORLD, "MK", 40, 160, 0}, + {CTRY_MALAWI, ETSI1_WORLD, "MW", 40, 160, 0}, + {CTRY_MALAYSIA, FCC11_WORLD, "MY", 40, 160, 0}, + {CTRY_MALDIVES, APL6_WORLD, "MV", 40, 160, 0}, + {CTRY_MALTA, ETSI13_WORLD, "MT", 40, 160, 0}, + {CTRY_MARSHALL_ISLANDS, FCC3_FCCA, "MH", 40, 160, 0}, + {CTRY_MARTINIQUE, ETSI13_WORLD, "MQ", 40, 160, 0}, + {CTRY_MAURITANIA, ETSI1_WORLD, "MR", 40, 160, 0}, + {CTRY_MAURITIUS, ETSI13_WORLD, "MU", 40, 160, 0}, + {CTRY_MAYOTTE, ETSI1_WORLD, "YT", 40, 160, 0}, + {CTRY_MEXICO, FCC3_ETSIC, "MX", 40, 160, 0}, + {CTRY_MICRONESIA, FCC3_FCCA, "FM", 40, 160, 0}, + {CTRY_MOLDOVA, ETSI13_WORLD, "MD", 40, 160, 0}, + {CTRY_MONACO, ETSI13_WORLD, "MC", 40, 160, 0}, + {CTRY_MONGOLIA, FCC3_WORLD, "MN", 40, 160, 0}, + {CTRY_MONTENEGRO, ETSI13_WORLD, "ME", 40, 160, 0}, + {CTRY_MOROCCO, ETSI3_WORLD, "MA", 40, 160, 0}, + {CTRY_MYANMAR, APL1_WORLD, "MM", 40, 160, 0}, + {CTRY_NAMIBIA, APL20_WORLD, "NA", 40, 160, 0}, + {CTRY_NEPAL, APL23_WORLD, "NP", 40, 160, 0}, + {CTRY_NETHERLANDS, ETSI13_WORLD, "NL", 40, 160, 0}, + {CTRY_NETHERLANDS_ANTILLES, ETSI13_WORLD, "AN", 40, 160, 0}, + {CTRY_NEW_ZEALAND, FCC3_ETSIC, "NZ", 40, 160, 0}, + {CTRY_NIGERIA, APL8_WORLD, "NG", 40, 160, 0}, + {CTRY_NORTHERN_MARIANA_ISLANDS, FCC3_FCCA, "MP", 40, 160, 0}, + {CTRY_NICARAGUA, FCC3_FCCA, "NI", 40, 160, 0}, + {CTRY_NORWAY, ETSI13_WORLD, "NO", 40, 160, 0}, + {CTRY_OMAN, ETSI1_WORLD, "OM", 40, 160, 0}, + {CTRY_PAKISTAN, APL1_ETSIC, "PK", 40, 160, 0}, + {CTRY_PALAU, FCC3_FCCA, "PW", 40, 160, 0}, + {CTRY_PANAMA, FCC14_FCCB, "PA", 40, 160, 0}, + {CTRY_PAPUA_NEW_GUINEA, FCC3_WORLD, "PG", 40, 160, 0}, + {CTRY_PARAGUAY, FCC3_WORLD, "PY", 40, 160, 0}, + {CTRY_PERU, FCC3_WORLD, "PE", 40, 160, 0}, + {CTRY_PHILIPPINES, FCC3_WORLD, "PH", 40, 160, 0}, + {CTRY_POLAND, ETSI13_WORLD, "PL", 40, 160, 0}, + {CTRY_PORTUGAL, ETSI13_WORLD, "PT", 40, 160, 0}, + {CTRY_PUERTO_RICO, FCC3_FCCA, "PR", 40, 160, 0}, + {CTRY_QATAR, ETSI14_WORLD, "QA", 40, 160, 0}, + {CTRY_REUNION, ETSI1_WORLD, "RE", 40, 160, 0}, + {CTRY_ROMANIA, ETSI13_WORLD, "RO", 40, 160, 0}, + {CTRY_RUSSIA, ETSI8_WORLD, "RU", 40, 160, 0}, + {CTRY_RWANDA, FCC3_WORLD, "RW", 40, 160, 0}, + {CTRY_SAINT_BARTHELEMY, ETSI1_WORLD, "BL", 40, 160, 0}, + {CTRY_SAINT_KITTS_AND_NEVIS, APL10_WORLD, "KN", 40, 160, 0}, + {CTRY_SAINT_LUCIA, APL10_WORLD, "LC", 40, 160, 0}, + {CTRY_SAINT_MARTIN, ETSI1_WORLD, "MF", 40, 160, 0}, + {CTRY_SAINT_PIERRE_AND_MIQUELON, ETSI13_WORLD, "PM", 40, 160, 0}, + {CTRY_SAINT_VINCENT_AND_THE_GRENADIENS, ETSI13_WORLD, "VC", + 40, 160, 0}, + {CTRY_SAMOA, ETSI1_WORLD, "WS", 40, 40, 0}, + {CTRY_SAUDI_ARABIA, ETSI15_WORLD, "SA", 40, 160, 0}, + {CTRY_SENEGAL, FCC13_WORLD, "SN", 40, 160, 0}, + {CTRY_SERBIA, ETSI13_WORLD, "RS", 40, 160, 0}, + {CTRY_SINGAPORE, FCC3_WORLD, "SG", 40, 160, 0}, + {CTRY_SLOVAKIA, ETSI13_WORLD, "SK", 40, 160, 0}, + {CTRY_SLOVENIA, ETSI13_WORLD, "SI", 40, 160, 0}, + {CTRY_SOUTH_AFRICA, FCC3_WORLD, "ZA", 40, 160, 0}, + {CTRY_SPAIN, ETSI13_WORLD, "ES", 40, 160, 0}, + {CTRY_SURINAME, ETSI1_WORLD, "SR", 40, 160, 0}, + {CTRY_SRI_LANKA, FCC3_WORLD, "LK", 40, 20, 0}, + {CTRY_SWEDEN, ETSI13_WORLD, "SE", 40, 160, 0}, + {CTRY_SWITZERLAND, ETSI13_WORLD, "CH", 40, 160, 0}, + {CTRY_TAIWAN, FCC3_FCCA, "TW", 40, 160, 0}, + {CTRY_TANZANIA, APL1_WORLD, "TZ", 40, 160, 0}, + {CTRY_THAILAND, FCC3_WORLD, "TH", 40, 160, 0}, + {CTRY_TOGO, ETSI1_WORLD, "TG", 40, 40, 0}, + {CTRY_TRINIDAD_Y_TOBAGO, FCC3_WORLD, "TT", 40, 160, 0}, + {CTRY_TUNISIA, ETSI3_WORLD, "TN", 40, 160, 0}, + {CTRY_TURKEY, ETSI13_WORLD, "TR", 40, 160, 0}, + {CTRY_TURKS_AND_CAICOS, FCC3_WORLD, "TC", 40, 160, 0}, + {CTRY_UGANDA, FCC3_WORLD, "UG", 40, 160, 0}, + {CTRY_UKRAINE, ETSI9_WORLD, "UA", 40, 160, 0}, + {CTRY_UAE, FCC3_WORLD, "AE", 40, 160, 0}, + {CTRY_UNITED_KINGDOM, ETSI13_WORLD, "GB", 40, 160, 0}, + {CTRY_UNITED_STATES, FCC3_FCCA, "US", 40, 160, 0}, + {CTRY_URUGUAY, FCC2_WORLD, "UY", 40, 160, 0}, + {CTRY_UZBEKISTAN, ETSI3_WORLD, "UZ", 40, 160, 0}, + {CTRY_VANUATU, FCC3_WORLD, "VU", 40, 160, 0}, + {CTRY_VENEZUELA, FCC2_ETSIC, "VE", 40, 160, 0}, + {CTRY_VIET_NAM, FCC3_WORLD, "VN", 40, 80, 0}, + {CTRY_VIRGIN_ISLANDS, FCC3_FCCA, "VI", 40, 160, 0}, + {CTRY_WALLIS_AND_FUTUNA, ETSI1_WORLD, "WF", 40, 160, 0}, + {CTRY_YEMEN, NULL1_WORLD, "YE", 40, 0, 0}, + {CTRY_ZIMBABWE, ETSI1_WORLD, "ZW", 40, 160, 0}, + {CTRY_JAPAN, MKK5_MKKC, "JP", 40, 160, 0}, + {CTRY_XA, MKK5_MKKA, "XA", 40, 160, 0}, +}; +#endif +#endif + +enum reg_domains_2g { + FCCA, + FCCB, + WORLD, + MKKA, + MKKC, + ETSIC, + ETSID, + WORLD_2G_1, + WORLD_2G_2, + WORLD_2G_3, +}; + +enum reg_domains_5g { + NULL1, + FCC1, + FCC2, + FCC3, + FCC4, + FCC5, + FCC6, + FCC8, + FCC10, + FCC11, + FCC13, + FCC14, + ETSI1, + ETSI3, + ETSI4, + ETSI8, + ETSI9, + ETSI10, + ETSI11, + ETSI12, + ETSI13, + ETSI14, + ETSI15, + APL1, + APL2, + APL4, + APL6, + APL8, + APL9, + APL10, + APL11, + APL12, + APL13, + APL14, + APL15, + APL16, + APL17, + APL19, + APL20, + APL23, + MKK3, + MKK4, + MKK5, + MKK9, + MKK10, + MKK11, + MKK16, + WORLD_5G_1, + WORLD_5G_2, +}; + + +const struct reg_domain_pair g_reg_dmn_pairs[] = { + {NULL1_WORLD, NULL1, WORLD}, + + {FCC1_FCCA, FCC1, FCCA}, + {FCC1_WORLD, FCC1, WORLD}, + {FCC2_FCCA, FCC2, FCCA}, + {FCC2_WORLD, FCC2, WORLD}, + {FCC2_ETSIC, FCC2, ETSIC}, + {FCC3_FCCA, FCC3, FCCA}, + {FCC3_WORLD, FCC3, WORLD}, + {FCC3_ETSIC, FCC3, ETSIC}, + {FCC4_FCCA, FCC4, FCCA}, + {FCC5_FCCA, FCC5, FCCA}, + {FCC6_WORLD, FCC6, WORLD}, + {FCC6_FCCA, FCC6, FCCA}, + {FCC8_FCCA, FCC8, FCCA}, + {FCC11_WORLD, FCC11, WORLD}, + {FCC13_WORLD, FCC13, WORLD}, + {FCC14_FCCB, FCC14, FCCB}, + + {ETSI1_WORLD, ETSI1, WORLD}, + {ETSI3_WORLD, ETSI3, WORLD}, + {ETSI4_WORLD, ETSI4, WORLD}, + {ETSI8_WORLD, ETSI8, WORLD}, + {ETSI9_WORLD, ETSI9, WORLD}, + {ETSI10_WORLD, ETSI10, WORLD}, + {ETSI10_FCCA, ETSI10, FCCA}, + {ETSI11_WORLD, ETSI11, WORLD}, + {ETSI12_WORLD, ETSI12, WORLD}, + {ETSI13_WORLD, ETSI13, WORLD}, + {ETSI14_WORLD, ETSI14, WORLD}, + {ETSI15_WORLD, ETSI15, WORLD}, + + {APL1_WORLD, APL1, WORLD}, + {APL1_ETSIC, APL1, ETSIC}, + {APL2_WORLD, APL2, WORLD}, + {APL2_ETSIC, APL2, ETSIC}, + {APL4_WORLD, APL4, WORLD}, + {APL6_WORLD, APL6, WORLD}, + {APL8_WORLD, APL8, WORLD}, + {APL9_WORLD, APL9, WORLD}, + {APL10_WORLD, APL10, WORLD}, + {APL11_FCCA, APL11, FCCA}, + {APL12_WORLD, APL12, WORLD}, + {APL13_WORLD, APL13, WORLD}, + {APL14_WORLD, APL14, WORLD}, + {APL15_WORLD, APL15, WORLD}, + {APL16_WORLD, APL16, WORLD}, + {APL17_ETSID, APL17, ETSID}, + {APL19_ETSIC, APL19, ETSIC}, + {APL20_WORLD, APL20, WORLD}, + {APL23_WORLD, APL23, WORLD}, + + {MKK3_MKKC, MKK3, MKKC}, + {MKK5_MKKA, MKK5, MKKA}, + {MKK5_MKKC, MKK5, MKKC}, + {MKK11_MKKC, MKK11, MKKC}, + {MKK16_MKKC, MKK16, MKKC}, + + {WORLD_60, WORLD_5G_2, WORLD_2G_3}, + {WORLD_61, WORLD_5G_2, WORLD_2G_3}, + {WORLD_62, WORLD_5G_2, WORLD_2G_3}, + {WORLD_63, WORLD_5G_1, WORLD_2G_2}, + {WORLD_65, WORLD_5G_1, WORLD_2G_2}, + {WORLD_64, WORLD_5G_1, WORLD_2G_1}, + {WORLD_66, WORLD_5G_2, WORLD_2G_1}, + {WORLD_69, WORLD_5G_2, WORLD_2G_1}, + {WORLD_67, WORLD_5G_2, WORLD_2G_2}, + {WORLD_68, WORLD_5G_2, WORLD_2G_2}, + {WORLD_6A, WORLD_5G_2, WORLD_2G_2}, + {WORLD_6C, WORLD_5G_2, WORLD_2G_2}, +}; + +enum reg_rules_2g { + + CHAN_1_11_1, + CHAN_1_11_2, + CHAN_1_11_3, + CHAN_1_13_1, + CHAN_1_13_2, + CHAN_1_13_3, + CHAN_1_13_4, + CHAN_12_12_1, + CHAN_12_13_1, + CHAN_14_1, + CHAN_14_2, +}; + +const struct regulatory_rule reg_rules_2g[] = { + + [CHAN_1_11_1] = {2402, 2472, 40, 30, 0}, + [CHAN_1_11_2] = {2402, 2472, 40, 20, 0}, + [CHAN_1_11_3] = {2402, 2472, 40, 36, 0}, + [CHAN_1_13_1] = {2402, 2482, 40, 20, 0}, + [CHAN_1_13_2] = {2402, 2482, 40, 30, 0}, + [CHAN_1_13_3] = {2402, 2482, 40, 36, 0}, + [CHAN_1_13_4] = {2402, 2482, 40, 23, 0}, + [CHAN_12_12_1] = {2457, 2477, 20, 20, REGULATORY_CHAN_NO_IR}, + [CHAN_12_13_1] = {2457, 2482, 20, 20, REGULATORY_CHAN_NO_IR}, + [CHAN_14_1] = {2474, 2494, 20, 23, REGULATORY_CHAN_NO_OFDM}, + [CHAN_14_2] = {2474, 2494, 20, 20, + REGULATORY_CHAN_NO_OFDM | REGULATORY_CHAN_NO_IR}, +}; + + +const struct regdomain regdomains_2g[] = { + + [FCCA] = {CTL_FCC, DFS_UNINIT_REG, 0, 6, 1, {CHAN_1_11_1} }, + [FCCB] = {CTL_FCC, DFS_UNINIT_REG, 0, 6, 1, {CHAN_1_11_3} }, + [WORLD] = {CTL_ETSI, DFS_UNINIT_REG, 0, 0, 1, {CHAN_1_13_1} }, + [MKKA] = {CTL_MKK, DFS_UNINIT_REG, 0, 0, 2, {CHAN_1_13_4, + CHAN_14_1} }, + [MKKC] = {CTL_MKK, DFS_UNINIT_REG, 0, 0, 1, {CHAN_1_13_4} }, + [ETSIC] = {CTL_ETSI, DFS_UNINIT_REG, 0, 0, 1, {CHAN_1_13_2} }, + [ETSID] = {CTL_ETSI, DFS_UNINIT_REG, 0, 0, 1, {CHAN_1_13_3} }, + [WORLD_2G_1] = {CTL_NONE, DFS_UNINIT_REG, 0, 0, 1, {CHAN_1_11_2} }, + [WORLD_2G_2] = {CTL_NONE, DFS_UNINIT_REG, 0, 0, 2, + {CHAN_1_11_2, CHAN_12_13_1} }, + [WORLD_2G_3] = {CTL_NONE, DFS_UNINIT_REG, 0, 0, 2, + {CHAN_1_11_2, CHAN_12_12_1} }, +}; + + +enum reg_rules_5g { + + CHAN_4910_4990_1, + CHAN_4940_4990_1, + CHAN_5030_5090_1, + CHAN_5170_5250_1, + CHAN_5170_5250_2, + CHAN_5170_5250_3, + CHAN_5170_5250_4, + CHAN_5170_5250_5, + CHAN_5170_5250_6, + CHAN_5170_5250_7, + CHAN_5170_5250_8, + CHAN_5170_5250_9, + CHAN_5170_5330_1, + CHAN_5170_5330_2, + CHAN_5250_5330_1, + CHAN_5250_5330_2, + CHAN_5250_5330_3, + CHAN_5250_5330_4, + CHAN_5250_5330_5, + CHAN_5250_5330_6, + CHAN_5250_5330_7, + CHAN_5250_5330_8, + CHAN_5250_5330_9, + CHAN_5250_5330_10, + CHAN_5250_5330_11, + CHAN_5250_5330_12, + CHAN_5250_5330_13, + CHAN_5250_5330_14, + CHAN_5490_5730_1, + CHAN_5490_5730_2, + CHAN_5490_5730_3, + CHAN_5490_5730_4, + CHAN_5490_5730_5, + CHAN_5490_5730_6, + CHAN_5490_5730_7, + CHAN_5490_5710_1, + CHAN_5490_5710_2, + CHAN_5490_5710_3, + CHAN_5490_5710_4, + CHAN_5490_5710_5, + CHAN_5490_5710_6, + CHAN_5490_5710_7, + CHAN_5490_5590_1, + CHAN_5490_5590_2, + CHAN_5490_5590_3, + CHAN_5490_5570_1, + CHAN_5490_5650_2, + CHAN_5490_5670_1, + CHAN_5490_5670_2, + CHAN_5490_5630_1, + CHAN_5650_5730_1, + CHAN_5650_5730_2, + CHAN_5650_5730_3, + CHAN_5735_5835_1, + CHAN_5735_5835_2, + CHAN_5735_5835_3, + CHAN_5735_5835_4, + CHAN_5735_5835_5, + CHAN_5735_5835_6, + CHAN_5735_5835_7, + CHAN_5735_5835_8, + CHAN_5735_5875_1, + CHAN_5735_5875_2, + CHAN_5735_5875_3, + CHAN_5735_5875_4, + CHAN_5735_5875_5, + CHAN_5735_5815_1, + CHAN_5735_5815_2, + CHAN_5735_5775_1, + CHAN_5835_5855_1, + CHAN_5855_5875_1, + CHAN_5850_5925_1, + CHAN_5850_5925_2, +}; + +const struct regulatory_rule reg_rules_5g[] = { + + [CHAN_4910_4990_1] = {4910, 4990, 20, 20, 0}, + [CHAN_4940_4990_1] = {4940, 4990, 20, 33, 0}, + [CHAN_5030_5090_1] = {5030, 5090, 20, 20, 0}, + [CHAN_5170_5250_1] = {5170, 5250, 80, 17, 0}, + [CHAN_5170_5250_2] = {5170, 5250, 80, 23, 0}, + [CHAN_5170_5250_3] = {5170, 5250, 80, 20, 0}, + [CHAN_5170_5250_4] = {5170, 5250, 80, 30, 0}, + [CHAN_5170_5250_5] = {5170, 5250, 80, 24, 0}, + [CHAN_5170_5250_6] = {5170, 5250, 80, 18, 0}, + [CHAN_5170_5250_7] = {5170, 5250, 80, 20, REGULATORY_CHAN_INDOOR_ONLY}, + [CHAN_5170_5250_8] = {5170, 5250, 80, 23, REGULATORY_CHAN_INDOOR_ONLY}, + [CHAN_5170_5250_9] = {5170, 5250, 40, 30, 0}, + [CHAN_5170_5330_1] = {5170, 5330, 160, 20, REGULATORY_CHAN_NO_IR}, + [CHAN_5170_5330_2] = {5170, 5330, 160, 24, 0}, + [CHAN_5250_5330_1] = {5250, 5330, 80, 23, REGULATORY_CHAN_RADAR}, + [CHAN_5250_5330_2] = {5250, 5330, 80, 20, REGULATORY_CHAN_RADAR}, + [CHAN_5250_5330_3] = {5250, 5330, 80, 18, REGULATORY_CHAN_RADAR}, + [CHAN_5250_5330_4] = {5250, 5330, 80, 30, REGULATORY_CHAN_RADAR}, + [CHAN_5250_5330_5] = {5250, 5330, 80, 23, 0}, + [CHAN_5250_5330_6] = {5250, 5330, 80, 30, 0}, + [CHAN_5250_5330_7] = {5250, 5330, 80, 24, REGULATORY_CHAN_RADAR}, + [CHAN_5250_5330_8] = {5250, 5330, 80, 36, 0}, + [CHAN_5250_5330_9] = {5250, 5330, 80, 20, 0}, + [CHAN_5250_5330_10] = {5250, 5330, 80, 24, 0}, + [CHAN_5250_5330_11] = {5250, 5330, 80, 20, REGULATORY_CHAN_INDOOR_ONLY}, + [CHAN_5250_5330_12] = {5250, 5330, 80, 23, REGULATORY_CHAN_RADAR | + REGULATORY_CHAN_INDOOR_ONLY}, + [CHAN_5250_5330_13] = {5250, 5330, 40, 30, REGULATORY_CHAN_RADAR}, + [CHAN_5250_5330_14] = {5250, 5330, 80, 20, REGULATORY_CHAN_RADAR | + REGULATORY_CHAN_INDOOR_ONLY}, + [CHAN_5490_5730_1] = {5490, 5730, 160, 24, REGULATORY_CHAN_RADAR}, + [CHAN_5490_5730_2] = {5490, 5730, 160, 20, REGULATORY_CHAN_NO_IR}, + [CHAN_5490_5730_3] = {5490, 5730, 160, 30, 0}, + [CHAN_5490_5730_4] = {5490, 5730, 160, 24, 0}, + [CHAN_5490_5730_5] = {5490, 5730, 160, 30, REGULATORY_CHAN_RADAR}, + [CHAN_5490_5730_6] = {5490, 5730, 160, 23, REGULATORY_CHAN_RADAR}, + [CHAN_5490_5730_7] = {5490, 5730, 160, 20, REGULATORY_CHAN_RADAR}, + [CHAN_5490_5710_1] = {5490, 5710, 160, 30, REGULATORY_CHAN_RADAR}, + [CHAN_5490_5710_2] = {5490, 5710, 160, 20, REGULATORY_CHAN_RADAR}, + [CHAN_5490_5710_3] = {5490, 5710, 160, 27, REGULATORY_CHAN_RADAR}, + [CHAN_5490_5710_4] = {5490, 5710, 40, 30, REGULATORY_CHAN_RADAR}, + [CHAN_5490_5710_5] = {5490, 5710, 160, 24, REGULATORY_CHAN_RADAR}, + [CHAN_5490_5710_6] = {5490, 5710, 160, 26, REGULATORY_CHAN_RADAR}, + [CHAN_5490_5710_7] = {5490, 5710, 160, 23, REGULATORY_CHAN_RADAR}, + [CHAN_5490_5590_1] = {5490, 5590, 80, 24, REGULATORY_CHAN_RADAR}, + [CHAN_5490_5590_2] = {5490, 5590, 80, 30, 0}, + [CHAN_5490_5590_3] = {5490, 5590, 80, 36, 0}, + [CHAN_5490_5570_1] = {5490, 5570, 80, 30, REGULATORY_CHAN_RADAR}, + [CHAN_5490_5650_2] = {5490, 5650, 160, 24, REGULATORY_CHAN_RADAR}, + [CHAN_5490_5670_1] = {5490, 5670, 160, 20, REGULATORY_CHAN_RADAR}, + [CHAN_5490_5670_2] = {5490, 5670, 160, 23, REGULATORY_CHAN_RADAR}, + [CHAN_5490_5630_1] = {5490, 5630, 80, 30, REGULATORY_CHAN_RADAR}, + [CHAN_5650_5730_1] = {5650, 5730, 80, 24, REGULATORY_CHAN_RADAR}, + [CHAN_5650_5730_2] = {5650, 5730, 80, 30, 0}, + [CHAN_5650_5730_3] = {5650, 5730, 80, 36, 0}, + [CHAN_5735_5835_1] = {5735, 5835, 80, 23, 0}, + [CHAN_5735_5835_2] = {5735, 5835, 80, 30, 0}, + [CHAN_5735_5835_3] = {5735, 5835, 80, 20, 0}, + [CHAN_5735_5835_4] = {5735, 5835, 80, 33, 0}, + [CHAN_5735_5835_5] = {5735, 5835, 80, 20, REGULATORY_CHAN_NO_IR}, + [CHAN_5735_5835_6] = {5735, 5835, 80, 24, 0}, + [CHAN_5735_5835_7] = {5735, 5835, 80, 36, 0}, + [CHAN_5735_5835_8] = {5735, 5835, 80, 23, REGULATORY_CHAN_RADAR}, + [CHAN_5735_5875_1] = {5735, 5875, 20, 27, REGULATORY_CHAN_RADAR}, + [CHAN_5735_5875_2] = {5735, 5875, 20, 30, 0}, + [CHAN_5735_5875_3] = {5735, 5875, 80, 30, 0}, + [CHAN_5735_5875_4] = {5735, 5875, 80, 14, 0}, + [CHAN_5735_5875_5] = {5735, 5875, 80, 20, REGULATORY_CHAN_RADAR}, + [CHAN_5735_5815_1] = {5735, 5815, 80, 30, 0}, + [CHAN_5735_5815_2] = {5735, 5815, 80, 30, REGULATORY_CHAN_RADAR}, + [CHAN_5735_5775_1] = {5735, 5775, 40, 23, 0}, + [CHAN_5835_5855_1] = {5835, 5855, 20, 30, 0}, + [CHAN_5855_5875_1] = {5855, 5875, 20, 30, 0}, + [CHAN_5850_5925_1] = {5850, 5925, 20, 24, 0}, + [CHAN_5850_5925_2] = {5850, 5925, 20, 30, 0}, +}; + + +const struct regdomain regdomains_5g[] = { + + [FCC1] = {CTL_FCC, DFS_FCC_REG, 2, 6, 3, {CHAN_5170_5250_1, + CHAN_5250_5330_1, + CHAN_5735_5835_2} }, + + [FCC2] = {CTL_FCC, DFS_FCC_REG, 2, 6, 3, {CHAN_5170_5250_2, + CHAN_5250_5330_1, + CHAN_5735_5835_2} }, + + [FCC3] = {CTL_FCC, DFS_FCC_REG, 2, 6, 4, {CHAN_5170_5250_5, + CHAN_5250_5330_7, + CHAN_5490_5730_1, + CHAN_5735_5835_2} }, + + [FCC4] = {CTL_FCC, DFS_FCC_REG, 2, 6, 1, {CHAN_4940_4990_1} }, + + [FCC5] = {CTL_FCC, DFS_UNINIT_REG, 2, 6, 2, {CHAN_5170_5250_4, + CHAN_5735_5835_2} }, + + [FCC6] = {CTL_FCC, DFS_FCC_REG, 2, 6, 5, {CHAN_5170_5250_5, + CHAN_5250_5330_7, + CHAN_5490_5590_1, + CHAN_5650_5730_1, + CHAN_5735_5835_2} }, + + [FCC8] = {CTL_FCC, DFS_FCC_REG, 2, 6, 4, {CHAN_5170_5250_4, + CHAN_5250_5330_7, + CHAN_5490_5730_1, + CHAN_5735_5835_2} }, + + [FCC10] = {CTL_FCC, DFS_FCC_REG, 2, 0, 5, {CHAN_5170_5250_4, + CHAN_5250_5330_7, + CHAN_5490_5730_1, + CHAN_5735_5835_2, + CHAN_5850_5925_1} }, + + [FCC11] = {CTL_FCC, DFS_FCC_REG, 2, 6, 4, {CHAN_5170_5250_5, + CHAN_5250_5330_7, + CHAN_5490_5650_2, + CHAN_5735_5835_6} }, + + [FCC13] = {CTL_FCC, DFS_UNINIT_REG, 2, 0, 4, {CHAN_5170_5330_2, + CHAN_5250_5330_10, + CHAN_5490_5730_4, + CHAN_5735_5835_2} }, + + [FCC14] = {CTL_FCC, DFS_UNINIT_REG, 2, 0, 4, {CHAN_5170_5250_4, + CHAN_5250_5330_10, + CHAN_5490_5730_4, + CHAN_5735_5835_2} }, + + [ETSI1] = {CTL_ETSI, DFS_ETSI_REG, 2, 0, 3, {CHAN_5170_5250_8, + CHAN_5250_5330_12, + CHAN_5490_5710_1} }, + + [ETSI3] = {CTL_ETSI, DFS_ETSI_REG, 5, 0, 2, {CHAN_5170_5250_2, + CHAN_5250_5330_1} }, + + [ETSI4] = {CTL_ETSI, DFS_ETSI_REG, 2, 0, 2, {CHAN_5170_5250_6, + CHAN_5250_5330_3} }, + + [ETSI8] = {CTL_ETSI, DFS_UNINIT_REG, 20, 0, 4, {CHAN_5170_5250_2, + CHAN_5250_5330_5, + CHAN_5490_5730_3, + CHAN_5735_5835_2} }, + + [ETSI9] = {CTL_ETSI, DFS_ETSI_REG, 20, 0, 4, {CHAN_5170_5250_2, + CHAN_5250_5330_1, + CHAN_5490_5710_5, + CHAN_5735_5835_6} }, + + [ETSI10] = {CTL_ETSI, DFS_ETSI_REG, 10, 0, 4, {CHAN_5170_5250_7, + CHAN_5250_5330_14, + CHAN_5490_5710_3, + CHAN_5850_5925_2} }, + + [ETSI11] = {CTL_ETSI, DFS_ETSI_REG, 10, 0, 4, {CHAN_5170_5250_7, + CHAN_5250_5330_14, + CHAN_5490_5710_3, + CHAN_5735_5875_1} }, + + [ETSI12] = {CTL_ETSI, DFS_ETSI_REG, 2, 0, 4, {CHAN_5170_5250_7, + CHAN_5250_5330_14, + CHAN_5490_5730_6, + CHAN_5735_5835_8} }, + + [ETSI13] = {CTL_ETSI, DFS_ETSI_REG, 2, 0, 4, {CHAN_5170_5250_8, + CHAN_5250_5330_12, + CHAN_5490_5730_5, + CHAN_5735_5875_4} }, + + [ETSI14] = {CTL_ETSI, DFS_ETSI_REG, 2, 0, 4, {CHAN_5170_5250_2, + CHAN_5250_5330_1, + CHAN_5490_5730_7, + CHAN_5735_5875_5} }, + + [ETSI15] = {CTL_ETSI, DFS_ETSI_REG, 2, 0, 4, {CHAN_5170_5250_2, + CHAN_5250_5330_1, + CHAN_5490_5730_5, + CHAN_5735_5815_2} }, + + [APL1] = {CTL_ETSI, DFS_UNINIT_REG, 2, 0, 1, {CHAN_5735_5835_2} }, + + [APL2] = {CTL_ETSI, DFS_UNINIT_REG, 2, 0, 1, {CHAN_5735_5815_1} }, + + [APL4] = {CTL_ETSI, DFS_UNINIT_REG, 2, 0, 2, {CHAN_5170_5250_2, + CHAN_5735_5835_1} }, + + [APL6] = {CTL_ETSI, DFS_ETSI_REG, 2, 0, 3, {CHAN_5170_5250_3, + CHAN_5250_5330_2, + CHAN_5735_5835_3} }, + + [APL8] = {CTL_FCC, DFS_ETSI_REG, 2, 0, 2, {CHAN_5250_5330_4, + CHAN_5735_5835_2} }, + + [APL9] = {CTL_ETSI, DFS_KR_REG, 2, 6, 4, {CHAN_5170_5250_2, + CHAN_5250_5330_1, + CHAN_5490_5730_5, + CHAN_5735_5835_2} }, + + [APL10] = {CTL_ETSI, DFS_FCC_REG, 2, 6, 4, {CHAN_5170_5250_2, + CHAN_5250_5330_4, + CHAN_5490_5710_1, + CHAN_5735_5815_1} }, + + [APL11] = { CTL_ETSI, DFS_ETSI_REG, 2, 0, 4, {CHAN_5170_5250_9, + CHAN_5250_5330_13, + CHAN_5490_5710_4, + CHAN_5735_5875_2} }, + + [APL12] = {CTL_ETSI, DFS_ETSI_REG, 2, 0, 3, {CHAN_5170_5250_2, + CHAN_5490_5570_1, + CHAN_5735_5775_1} }, + + [APL13] = {CTL_ETSI, DFS_ETSI_REG, 2, 0, 3, {CHAN_5170_5250_2, + CHAN_5250_5330_1, + CHAN_5490_5670_2} }, + + [APL14] = {CTL_FCC, DFS_CN_REG, 2, 0, 3, {CHAN_5170_5250_2, + CHAN_5250_5330_1, + CHAN_5735_5835_4} }, + + [APL15] = {CTL_FCC, DFS_UNINIT_REG, 2, 0, 3, {CHAN_5170_5250_2, + CHAN_5250_5330_5, + CHAN_5735_5835_4} }, + + [APL16] = {CTL_FCC, DFS_UNINIT_REG, 2, 0, 5, {CHAN_5170_5250_1, + CHAN_5250_5330_6, + CHAN_5490_5590_2, + CHAN_5650_5730_2, + CHAN_5735_5835_2} }, + + [APL17] = {CTL_FCC, DFS_UNINIT_REG, 2, 0, 5, {CHAN_5170_5250_2, + CHAN_5250_5330_8, + CHAN_5490_5590_3, + CHAN_5650_5730_3, + CHAN_5735_5835_7} }, + + [APL19] = {CTL_FCC, DFS_ETSI_REG, 2, 0, 4, {CHAN_5170_5250_4, + CHAN_5250_5330_7, + CHAN_5490_5730_1, + CHAN_5735_5875_3} }, + + [APL20] = {CTL_ETSI, DFS_ETSI_REG, 2, 0, 4, {CHAN_5170_5250_8, + CHAN_5250_5330_12, + CHAN_5490_5730_5, + CHAN_5735_5835_4} }, + + [APL23] = {CTL_ETSI, DFS_UNINIT_REG, 2, 0, 3, {CHAN_5170_5250_7, + CHAN_5250_5330_11, + CHAN_5735_5835_3} }, + + [MKK3] = {CTL_MKK, DFS_UNINIT_REG, 2, 0, 1, {CHAN_5170_5250_3} }, + + [MKK5] = {CTL_MKK, DFS_MKK_REG, 2, 0, 3, {CHAN_5170_5250_2, + CHAN_5250_5330_1, + CHAN_5490_5710_7} }, + + [MKK11] = {CTL_MKK, DFS_MKK_REG, 2, 0, 5, {CHAN_4910_4990_1, + CHAN_5170_5250_2, + CHAN_5030_5090_1, + CHAN_5250_5330_1, + CHAN_5490_5710_7} }, + + [MKK16] = {CTL_MKK, DFS_MKK_REG, 2, 0, 1, {CHAN_5490_5710_6} }, + + [WORLD_5G_1] = {CTL_NONE, DFS_UNINIT_REG, 2, 0, 2, + {CHAN_5170_5330_1, + CHAN_5735_5835_5} }, + + [WORLD_5G_2] = {CTL_NONE, DFS_UNINIT_REG, 2, 0, 3, + {CHAN_5170_5330_1, + CHAN_5490_5730_2, + CHAN_5735_5835_5} }, +}; + +QDF_STATUS reg_get_num_countries(int *num_countries) +{ + *num_countries = QDF_ARRAY_SIZE(g_all_countries); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS reg_get_num_reg_dmn_pairs(int *num_reg_dmn) +{ + *num_reg_dmn = QDF_ARRAY_SIZE(g_reg_dmn_pairs); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS reg_get_default_country(uint16_t *default_country) +{ + *default_country = CTRY_UNITED_STATES; + + return QDF_STATUS_SUCCESS; +} + +bool reg_etsi13_regdmn(uint8_t reg_dmn) +{ + return reg_dmn == ETSI13; +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/regulatory/core/src/reg_db.h b/drivers/staging/qca-wifi-host-cmn/umac/regulatory/core/src/reg_db.h new file mode 100644 index 0000000000000000000000000000000000000000..1563bdbfba7c2f905d81bdd0e5c2651e3039c91c --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/regulatory/core/src/reg_db.h @@ -0,0 +1,169 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: reg_db.h + * This file contains regulatory component data structures + */ + +#ifndef __REG_DB_H +#define __REG_DB_H + +/* Alpha2 code for world reg domain */ +#define REG_WORLD_ALPHA2 "00" + +#define REGULATORY_CHAN_DISABLED (1<<0) +#define REGULATORY_CHAN_NO_IR (1<<1) +#define REGULATORY_CHAN_RADAR (1<<3) +#define REGULATORY_CHAN_NO_OFDM (1<<6) +#define REGULATORY_CHAN_INDOOR_ONLY (1<<9) + +#define REGULATORY_CHAN_NO_HT40 (1<<4) +#define REGULATORY_CHAN_NO_80MHZ (1<<7) +#define REGULATORY_CHAN_NO_160MHZ (1<<8) +#define REGULATORY_CHAN_NO_20MHZ (1<<11) +#define REGULATORY_CHAN_NO_10MHZ (1<<12) + +#define REGULATORY_PHYMODE_NO11A (1<<0) +#define REGULATORY_PHYMODE_NO11B (1<<1) +#define REGULATORY_PHYMODE_NO11G (1<<2) +#define REGULATORY_CHAN_NO11N (1<<3) +#define REGULATORY_PHYMODE_NO11AC (1<<4) +#define REGULATORY_PHYMODE_NO11AX (1<<5) + +#define MAX_REG_RULES 10 +#define REG_ALPHA2_LEN 2 + +/** + * enum dfs_reg - DFS region + * @DFS_UNINIT_REG: un-initialized region + * @DFS_FCC_REG: FCC region + * @DFS_ETSI_REG: ETSI region + * @DFS_MKK_REG: MKK region + * @DFS_CN_REG: China region + * @DFS_KR_REG: Korea region + * @DFS_UNDEF_REG: Undefined region + */ +enum dfs_reg { + DFS_UNINIT_REG = 0, + DFS_FCC_REG = 1, + DFS_ETSI_REG = 2, + DFS_MKK_REG = 3, + DFS_CN_REG = 4, + DFS_KR_REG = 5, + DFS_UNDEF_REG = 0xFFFF, +}; + +/** + * struct regulatory_rule + * @start_freq: start frequency + * @end_freq: end frequency + * @max_bw: maximum bandwidth + * @reg_power: regulatory power + * @flags: regulatory flags + */ +struct regulatory_rule { + uint16_t start_freq; + uint16_t end_freq; + uint16_t max_bw; + uint8_t reg_power; + uint16_t flags; +}; + +/** + * struct regdomain + * @ctl_val: CTL value + * @dfs_region: dfs region + * @min_bw: minimum bandwidth + * @num_reg_rules: number of regulatory rules + * @reg_rules_id: regulatory rule index + */ +struct regdomain { + uint8_t ctl_val; + enum dfs_reg dfs_region; + uint16_t min_bw; + uint8_t ant_gain; + uint8_t num_reg_rules; + uint8_t reg_rule_id[MAX_REG_RULES]; +}; + +/** + * struct country_code_to_reg_domain + * @country_code: country code + * @reg_dmn_pair_id: reg domainpair id + * @alpha2: iso-3166 alpha2 + * @max_bw_2g: maximum 2g bandwidth + * @max_bw_5g: maximum 5g bandwidth + * @phymode_bitmap: phymodes not supported + */ +struct country_code_to_reg_domain { + uint16_t country_code; + uint16_t reg_dmn_pair_id; + uint8_t alpha2[REG_ALPHA2_LEN + 1]; + uint16_t max_bw_2g; + uint16_t max_bw_5g; + uint16_t phymode_bitmap; +}; + +/** + * struct reg_domain_pair + * @reg_dmn_pair_id: reg domainpiar value + * @dmn_id_5g: 5g reg domain value + * @dmn_id_2g: 2g regdomain value + */ +struct reg_domain_pair { + uint16_t reg_dmn_pair_id; + uint8_t dmn_id_5g; + uint8_t dmn_id_2g; +}; + +/** + * enum ctl_value - CTL value + * @CTL_FCC: CTL FCC + * @CTL_MKK: CTL MKK + * @CTL_ETSI: CTL ETSI + * @CTL_KOR: CTL KOR + * @CTL_CHN: CTL CHINA + * @CTL_USER_DEF: CTL USER_DEF + * @CTL_NONE: CTL NONE + */ +enum ctl_value { + CTL_FCC = 0x10, + CTL_ETSI = 0x30, + CTL_MKK = 0x40, + CTL_KOR = 0x50, + CTL_CHN = 0x60, + CTL_USER_DEF = 0x70, + CTL_NONE = 0xff +}; + +QDF_STATUS reg_get_num_countries(int *num_countries); + +QDF_STATUS reg_get_num_reg_dmn_pairs(int *num_reg_dmn); + +QDF_STATUS reg_get_default_country(uint16_t *default_country); + +/** + * reg_etsi13_regdmn () - Checks if the reg domain is ETSI13 or not + * @reg_dmn: reg domain + * + * Return: true or false + */ +bool reg_etsi13_regdmn(uint8_t reg_dmn); +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/regulatory/core/src/reg_db_parser.c b/drivers/staging/qca-wifi-host-cmn/umac/regulatory/core/src/reg_db_parser.c new file mode 100644 index 0000000000000000000000000000000000000000..ff23a0e5f963f5c89c380402d7d0f463a4421cf6 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/regulatory/core/src/reg_db_parser.c @@ -0,0 +1,390 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: reg_db_parser.c + * This file provides regulatory data base parser functions. + */ + +#include +#include "reg_db_parser.h" +#include +#include "reg_priv.h" +#include "reg_services.h" + +QDF_STATUS reg_is_country_code_valid(uint8_t *alpha2) +{ + uint16_t i; + int num_countries; + + reg_get_num_countries(&num_countries); + + for (i = 0; i < num_countries; i++) { + if ((g_all_countries[i].alpha2[0] == alpha2[0]) && + (g_all_countries[i].alpha2[1] == alpha2[1])) + return QDF_STATUS_SUCCESS; + else + continue; + } + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS reg_regrules_assign(uint8_t dmn_id_2g, + uint8_t dmn_id_5g, + uint8_t ant_gain_2g, + uint8_t ant_gain_5g, + struct cur_regulatory_info *reg_info) + +{ + uint8_t k; + uint8_t rule_index; + struct cur_reg_rule *r_r_2g = reg_info->reg_rules_2g_ptr; + struct cur_reg_rule *r_r_5g = reg_info->reg_rules_5g_ptr; + + for (k = 0; k < reg_info->num_2g_reg_rules; k++) { + rule_index = regdomains_2g[dmn_id_2g].reg_rule_id[k]; + r_r_2g->start_freq = reg_rules_2g[rule_index].start_freq; + r_r_2g->end_freq = reg_rules_2g[rule_index].end_freq; + r_r_2g->max_bw = reg_rules_2g[rule_index].max_bw; + r_r_2g->reg_power = reg_rules_2g[rule_index].reg_power; + r_r_2g->flags = reg_rules_2g[rule_index].flags; + r_r_2g->ant_gain = ant_gain_2g; + r_r_2g++; + } + + for (k = 0; k < reg_info->num_5g_reg_rules; k++) { + rule_index = regdomains_5g[dmn_id_5g].reg_rule_id[k]; + r_r_5g->start_freq = reg_rules_5g[rule_index].start_freq; + r_r_5g->end_freq = reg_rules_5g[rule_index].end_freq; + r_r_5g->max_bw = reg_rules_5g[rule_index].max_bw; + r_r_5g->reg_power = reg_rules_5g[rule_index].reg_power; + r_r_5g->flags = reg_rules_5g[rule_index].flags; + r_r_5g->ant_gain = ant_gain_5g; + r_r_5g++; + } + + if ((r_r_2g == reg_info->reg_rules_2g_ptr) && + (r_r_5g == reg_info->reg_rules_5g_ptr)) + return QDF_STATUS_E_FAILURE; + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS reg_get_rdpair_from_country_iso(uint8_t *alpha2, + uint16_t *country_index, + uint16_t *regdmn_pair) +{ + uint16_t i, j; + int num_countries; + int num_reg_dmn; + + reg_get_num_countries(&num_countries); + reg_get_num_reg_dmn_pairs(&num_reg_dmn); + + for (i = 0; i < num_countries; i++) { + if ((g_all_countries[i].alpha2[0] == alpha2[0]) && + (g_all_countries[i].alpha2[1] == alpha2[1])) + break; + } + + if (i == num_countries) { + *country_index = -1; + return QDF_STATUS_E_FAILURE; + } + + for (j = 0; j < num_reg_dmn; j++) { + if (g_reg_dmn_pairs[j].reg_dmn_pair_id == + g_all_countries[i].reg_dmn_pair_id) + break; + } + + if (j == num_reg_dmn) { + *regdmn_pair = -1; + return QDF_STATUS_E_FAILURE; + } + + *country_index = i; + *regdmn_pair = j; + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS reg_get_rdpair_from_regdmn_id(uint16_t regdmn_id, + uint16_t *regdmn_pair) +{ + uint16_t j; + int num_reg_dmn; + + reg_get_num_reg_dmn_pairs(&num_reg_dmn); + + for (j = 0; j < num_reg_dmn; j++) { + if (g_reg_dmn_pairs[j].reg_dmn_pair_id == regdmn_id) + break; + } + + if (j == num_reg_dmn) { + *regdmn_pair = -1; + return QDF_STATUS_E_FAILURE; + } + + *regdmn_pair = j; + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS reg_get_rdpair_from_country_code(uint16_t cc, + uint16_t *country_index, + uint16_t *regdmn_pair) +{ + uint16_t i, j; + int num_countries; + int num_reg_dmn; + + reg_get_num_countries(&num_countries); + reg_get_num_reg_dmn_pairs(&num_reg_dmn); + + for (i = 0; i < num_countries; i++) { + if (g_all_countries[i].country_code == cc) + break; + } + + if (i == num_countries) { + *country_index = -1; + return QDF_STATUS_E_FAILURE; + } + + for (j = 0; j < num_reg_dmn; j++) { + if (g_reg_dmn_pairs[j].reg_dmn_pair_id == + g_all_countries[i].reg_dmn_pair_id) + break; + } + + if (j == num_reg_dmn) { + *regdmn_pair = -1; + return QDF_STATUS_E_FAILURE; + } + + *country_index = i; + *regdmn_pair = j; + + return QDF_STATUS_SUCCESS; +} + +static inline QDF_STATUS reg_get_reginfo_form_country_code_and_regdmn_pair( + struct cur_regulatory_info *reg_info, + uint16_t country_index, + uint16_t regdmn_pair) +{ + uint8_t rule_size_2g, rule_size_5g; + uint8_t dmn_id_5g, dmn_id_2g; + uint8_t ant_gain_2g, ant_gain_5g; + QDF_STATUS err; + + dmn_id_5g = g_reg_dmn_pairs[regdmn_pair].dmn_id_5g; + dmn_id_2g = g_reg_dmn_pairs[regdmn_pair].dmn_id_2g; + + rule_size_2g = QDF_ARRAY_SIZE(regdomains_2g[dmn_id_2g].reg_rule_id); + rule_size_5g = QDF_ARRAY_SIZE(regdomains_5g[dmn_id_5g].reg_rule_id); + + if (((rule_size_2g + rule_size_5g) >= + regdomains_2g[dmn_id_2g].num_reg_rules + + regdomains_5g[dmn_id_5g].num_reg_rules)) { + + qdf_mem_copy(reg_info->alpha2, + g_all_countries[country_index].alpha2, + sizeof(g_all_countries[country_index].alpha2)); + + reg_info->ctry_code = + g_all_countries[country_index].country_code; + reg_info->reg_dmn_pair = + g_reg_dmn_pairs[regdmn_pair].reg_dmn_pair_id; + reg_info->dfs_region = regdomains_5g[dmn_id_5g].dfs_region; + reg_info->phybitmap = + g_all_countries[country_index].phymode_bitmap; + + reg_info->max_bw_2g = g_all_countries[country_index].max_bw_2g; + reg_info->max_bw_5g = g_all_countries[country_index].max_bw_5g; + + reg_info->min_bw_2g = regdomains_2g[dmn_id_2g].min_bw; + reg_info->min_bw_5g = regdomains_5g[dmn_id_5g].min_bw; + + ant_gain_2g = regdomains_2g[dmn_id_2g].ant_gain; + ant_gain_5g = regdomains_5g[dmn_id_5g].ant_gain; + + reg_info->num_2g_reg_rules = + regdomains_2g[dmn_id_2g].num_reg_rules; + reg_info->num_5g_reg_rules = + regdomains_5g[dmn_id_5g].num_reg_rules; + + reg_info->reg_rules_2g_ptr = (struct cur_reg_rule *) + qdf_mem_malloc((reg_info->num_2g_reg_rules) * + sizeof(struct cur_reg_rule)); + reg_info->reg_rules_5g_ptr = (struct cur_reg_rule *) + qdf_mem_malloc((reg_info->num_5g_reg_rules) * + sizeof(struct cur_reg_rule)); + + err = reg_regrules_assign(dmn_id_2g, dmn_id_5g, + ant_gain_2g, ant_gain_5g, reg_info); + + if (err == QDF_STATUS_E_FAILURE) { + reg_err("%s : No rule found for country index = %d regdmn_pair = %d\n", + __func__, country_index, regdmn_pair); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; + } else if (!(((rule_size_2g + rule_size_5g) >= + regdomains_2g[dmn_id_2g].num_reg_rules + + regdomains_5g[dmn_id_5g].num_reg_rules))) + return QDF_STATUS_E_NOMEM; + + return QDF_STATUS_SUCCESS; +} + +#ifdef CONFIG_MCL_REGDB +/** + * reg_update_alpha2_from_domain() - Get country alpha2 code from reg domain + * @reg_info: pointer to hold alpha2 code + * + * This function is used to populate alpha2 of @reg_info with: + * (a) "00" (REG_WORLD_ALPHA2) for WORLD domain and + * (b) alpha2 of first country matching with non WORLD domain. + * + * Return: None + */ +static void +reg_update_alpha2_from_domain(struct cur_regulatory_info *reg_info) +{ + uint16_t i; + int num_countries; + + if (reg_is_world_ctry_code(reg_info->reg_dmn_pair)) { + qdf_mem_copy(reg_info->alpha2, REG_WORLD_ALPHA2, + sizeof(reg_info->alpha2)); + return; + } + + reg_get_num_countries(&num_countries); + for (i = 0; i < (uint16_t)num_countries; i++) + if (g_all_countries[i].reg_dmn_pair_id == reg_info->reg_dmn_pair) + break; + + if (i == (uint16_t)num_countries) + return; + + qdf_mem_copy(reg_info->alpha2, g_all_countries[i].alpha2, + sizeof(g_all_countries[i].alpha2)); + reg_info->ctry_code = g_all_countries[i].country_code; +} +#else +static inline void +reg_update_alpha2_from_domain(struct cur_regulatory_info *reg_info) +{ +} +#endif + +static inline QDF_STATUS reg_get_reginfo_form_regdmn_pair( + struct cur_regulatory_info *reg_info, + uint16_t regdmn_pair) +{ + uint8_t rule_size_2g, rule_size_5g; + uint8_t dmn_id_5g, dmn_id_2g; + uint8_t ant_gain_2g, ant_gain_5g; + QDF_STATUS err; + + dmn_id_5g = g_reg_dmn_pairs[regdmn_pair].dmn_id_5g; + dmn_id_2g = g_reg_dmn_pairs[regdmn_pair].dmn_id_2g; + + rule_size_2g = QDF_ARRAY_SIZE(regdomains_2g[dmn_id_2g].reg_rule_id); + rule_size_5g = QDF_ARRAY_SIZE(regdomains_5g[dmn_id_5g].reg_rule_id); + + if (((rule_size_2g + rule_size_5g) >= + regdomains_2g[dmn_id_2g].num_reg_rules + + regdomains_5g[dmn_id_5g].num_reg_rules)) { + + qdf_mem_zero(reg_info->alpha2, sizeof(reg_info->alpha2)); + + reg_info->reg_dmn_pair = + g_reg_dmn_pairs[regdmn_pair].reg_dmn_pair_id; + reg_info->ctry_code = 0; + + reg_update_alpha2_from_domain(reg_info); + + reg_info->dfs_region = regdomains_5g[dmn_id_5g].dfs_region; + reg_info->phybitmap = 0; + + reg_info->max_bw_2g = 40; + reg_info->max_bw_5g = 160; + + reg_info->min_bw_2g = regdomains_2g[dmn_id_2g].min_bw; + reg_info->min_bw_5g = regdomains_5g[dmn_id_5g].min_bw; + + ant_gain_2g = regdomains_2g[dmn_id_2g].ant_gain; + ant_gain_5g = regdomains_5g[dmn_id_5g].ant_gain; + + reg_info->num_2g_reg_rules = + regdomains_2g[dmn_id_2g].num_reg_rules; + reg_info->num_5g_reg_rules = + regdomains_5g[dmn_id_5g].num_reg_rules; + + reg_info->reg_rules_2g_ptr = (struct cur_reg_rule *) + qdf_mem_malloc((reg_info->num_2g_reg_rules) * + sizeof(struct cur_reg_rule)); + reg_info->reg_rules_5g_ptr = (struct cur_reg_rule *) + qdf_mem_malloc((reg_info->num_5g_reg_rules) * + sizeof(struct cur_reg_rule)); + + err = reg_regrules_assign(dmn_id_2g, dmn_id_5g, + ant_gain_2g, ant_gain_5g, reg_info); + if (err == QDF_STATUS_E_FAILURE) { + reg_err("%s : No rule found for regdmn_pair = %d\n", + __func__, regdmn_pair); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; + } else if (!(((rule_size_2g + rule_size_5g) >= + regdomains_2g[dmn_id_2g].num_reg_rules + + regdomains_5g[dmn_id_5g].num_reg_rules))) + return QDF_STATUS_E_NOMEM; + + return QDF_STATUS_SUCCESS; +} + +/* Given a country code the function finds current regulatory information */ +QDF_STATUS reg_get_cur_reginfo(struct cur_regulatory_info *reg_info, + uint16_t country_index, + uint16_t regdmn_pair) +{ + if ((country_index != (uint16_t)(-1)) && + (regdmn_pair != (uint16_t)(-1))) + return reg_get_reginfo_form_country_code_and_regdmn_pair( + reg_info, + country_index, + regdmn_pair); + else if (regdmn_pair != (uint16_t)(-1)) + return reg_get_reginfo_form_regdmn_pair( + reg_info, + regdmn_pair); + else + return QDF_STATUS_E_FAILURE; + + return QDF_STATUS_SUCCESS; +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/regulatory/core/src/reg_db_parser.h b/drivers/staging/qca-wifi-host-cmn/umac/regulatory/core/src/reg_db_parser.h new file mode 100644 index 0000000000000000000000000000000000000000..ef4731441f3b4776419c086813396b40d0933533 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/regulatory/core/src/reg_db_parser.h @@ -0,0 +1,55 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: reg_db.h + * This file contains regulatory data base parser function declarations + */ + +#include + +extern const struct country_code_to_reg_domain g_all_countries[]; +extern const struct reg_domain_pair g_reg_dmn_pairs[]; +extern const struct regulatory_rule reg_rules_2g[]; +extern const struct regdomain regdomains_2g[]; +extern const struct regulatory_rule reg_rules_5g[]; +extern const struct regdomain regdomains_5g[]; + +QDF_STATUS reg_is_country_code_valid(uint8_t *alpha2); + +QDF_STATUS reg_regrules_assign(uint8_t dmn_id_2g, + uint8_t dmn_id_5g, + uint8_t ant_gain_2g, + uint8_t ant_gain_5g, + struct cur_regulatory_info *reg_info); + +QDF_STATUS reg_get_cur_reginfo(struct cur_regulatory_info *reg_info, + uint16_t country_index, + uint16_t regdmn_pair); + +QDF_STATUS reg_get_rdpair_from_country_iso(uint8_t *alpha, + uint16_t *country_index, + uint16_t *regdmn_pair); + +QDF_STATUS reg_get_rdpair_from_country_code(uint16_t cc, + uint16_t *country_index, + uint16_t *regdmn_pair); + +QDF_STATUS reg_get_rdpair_from_regdmn_id(uint16_t regdmn_id, + uint16_t *regdmn_pair); diff --git a/drivers/staging/qca-wifi-host-cmn/umac/regulatory/core/src/reg_priv.h b/drivers/staging/qca-wifi-host-cmn/umac/regulatory/core/src/reg_priv.h new file mode 100644 index 0000000000000000000000000000000000000000..e6d6e9e6eba9cf79c48846a2ac300f2fc051cbff --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/regulatory/core/src/reg_priv.h @@ -0,0 +1,138 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: reg_priv.h + * This file contains regulatory component private data structures. + */ + +#ifndef __REG_PRIV_H +#define __REG_PRIV_H + +#include "reg_db.h" +#include "reg_services.h" + +#define reg_log(level, args...) \ + QDF_TRACE(QDF_MODULE_ID_REGULATORY, level, ## args) +#define reg_logfl(level, format, args...) reg_log(level, FL(format), ## args) +#define reg_alert(format, args...) \ + reg_logfl(QDF_TRACE_LEVEL_FATAL, format, ## args) +#define reg_err(format, args...) \ + reg_logfl(QDF_TRACE_LEVEL_ERROR, format, ## args) +#define reg_warn(format, args...) \ + reg_logfl(QDF_TRACE_LEVEL_WARN, format, ## args) +#define reg_notice(format, args...) \ + reg_logfl(QDF_TRACE_LEVEL_INFO, format, ## args) +#define reg_info(format, args...) \ + reg_logfl(QDF_TRACE_LEVEL_INFO_HIGH, format, ## args) +#define reg_debug(format, args...) \ + reg_logfl(QDF_TRACE_LEVEL_DEBUG, format, ## args) +#define reg_debug_rl(params...) \ + QDF_TRACE_DEBUG_RL(QDF_MODULE_ID_REGULATORY, params) + + +/** + * struct wlan_regulatory_psoc_priv_obj - wlan regulatory psoc private object + * @new_user_ctry_pending: In this array, element[phy_id] is true if any user + * country update is pending for pdev (phy_id), used in case of MCL. + * @new_init_ctry_pending: In this array, element[phy_id] is true if any user + * country update is pending for pdev (phy_id), used in case of WIN. + * @new_11d_ctry_pending: In this array, element[phy_id] is true if any 11d + * country update is pending for pdev (phy_id). + * @world_country_pending: In this array, element[phy_id] is true if any world + * country update is pending for pdev (phy_id). + * @def_pdev_id: Default pdev id, used in case of MCL + */ +struct wlan_regulatory_psoc_priv_obj { + struct mas_chan_params mas_chan_params[PSOC_MAX_PHY_REG_CAP]; + bool offload_enabled; + uint8_t num_phy; + char cur_country[REG_ALPHA2_LEN + 1]; + char def_country[REG_ALPHA2_LEN + 1]; + uint16_t def_country_code; + uint16_t def_region_domain; + enum country_src cc_src; + struct wlan_objmgr_psoc *psoc_ptr; + bool new_user_ctry_pending[PSOC_MAX_PHY_REG_CAP]; + bool new_init_ctry_pending[PSOC_MAX_PHY_REG_CAP]; + bool new_11d_ctry_pending[PSOC_MAX_PHY_REG_CAP]; + bool world_country_pending[PSOC_MAX_PHY_REG_CAP]; + bool dfs_enabled; + enum band_info band_capability; + bool indoor_chan_enabled; + bool enable_11d_supp_original; + bool enable_11d_supp; + bool is_11d_offloaded; + uint8_t vdev_id_for_11d_scan; + uint8_t master_vdev_cnt; + uint8_t vdev_cnt_11d; + uint32_t scan_11d_interval; + uint8_t vdev_ids_11d[MAX_STA_VDEV_CNT]; + bool user_ctry_priority; + bool user_ctry_set; + struct chan_change_cbk_entry cbk_list[REG_MAX_CHAN_CHANGE_CBKS]; + uint8_t num_chan_change_cbks; + uint8_t ch_avoid_ind; + struct unsafe_ch_list unsafe_chan_list; + struct ch_avoid_ind_type avoid_freq_list; + enum restart_beaconing_on_ch_avoid_rule restart_beaconing; + struct wlan_psoc_host_hal_reg_capabilities_ext + reg_cap[PSOC_MAX_PHY_REG_CAP]; + bool force_ssc_disable_indoor_channel; + bool enable_srd_chan_in_master_mode; + bool enable_11d_in_world_mode; + int8_t def_pdev_id; + qdf_spinlock_t cbk_list_lock; +}; + +struct wlan_regulatory_pdev_priv_obj { + struct regulatory_channel cur_chan_list[NUM_CHANNELS]; + struct regulatory_channel mas_chan_list[NUM_CHANNELS]; +#ifdef DISABLE_CHANNEL_LIST + struct regulatory_channel cache_disable_chan_list[NUM_CHANNELS]; + uint32_t num_cache_channels; + bool disable_cached_channels; +#endif + char default_country[REG_ALPHA2_LEN + 1]; + uint16_t def_region_domain; + uint16_t def_country_code; + char current_country[REG_ALPHA2_LEN + 1]; + uint16_t reg_dmn_pair; + uint16_t ctry_code; + enum dfs_reg dfs_region; + uint32_t phybitmap; + struct wlan_objmgr_pdev *pdev_ptr; + uint32_t range_2g_low; + uint32_t range_2g_high; + uint32_t range_5g_low; + uint32_t range_5g_high; + bool dfs_enabled; + bool set_fcc_channel; + enum band_info band_capability; + bool indoor_chan_enabled; + bool en_chan_144; + uint32_t wireless_modes; + struct ch_avoid_ind_type freq_avoid_list; + bool force_ssc_disable_indoor_channel; + bool sap_state; + struct reg_rule_info reg_rules; + qdf_spinlock_t reg_rules_lock; +}; + +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/regulatory/core/src/reg_services.c b/drivers/staging/qca-wifi-host-cmn/umac/regulatory/core/src/reg_services.c new file mode 100644 index 0000000000000000000000000000000000000000..a5c9f6128c5ceecdc004211130260b7b584c71e8 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/regulatory/core/src/reg_services.c @@ -0,0 +1,5055 @@ +/* + * Copyright (c) 2014-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: reg_services.c + * This file defines regulatory component service functions + */ + +#include "reg_services.h" +#include "reg_priv.h" +#include "reg_db_parser.h" +#include +#include + +#define CHAN_12_CENT_FREQ 2467 +#define MAX_PWR_FCC_CHAN_12 8 +#define CHAN_13_CENT_FREQ 2472 +#define MAX_PWR_FCC_CHAN_13 2 +#define CHAN_144_CENT_FREQ 5720 +#define DEFAULT_WORLD_REGDMN 0x60 + +#define IS_VALID_PSOC_REG_OBJ(psoc_priv_obj) (NULL != psoc_priv_obj) +#define IS_VALID_PDEV_REG_OBJ(pdev_priv_obj) (NULL != pdev_priv_obj) + +const struct chan_map *channel_map; + +const struct bonded_channel bonded_chan_40mhz_list[] = { + {36, 40}, + {44, 48}, + {52, 56}, + {60, 64}, + {100, 104}, + {108, 112}, + {116, 120}, + {124, 128}, + {132, 136}, + {140, 144}, + {149, 153}, + {157, 161}, + {165, 169} +}; + +const struct bonded_channel bonded_chan_80mhz_list[] = { + {36, 48}, + {52, 64}, + {100, 112}, + {116, 128}, + {132, 144}, + {149, 161} +}; + +const struct bonded_channel bonded_chan_160mhz_list[] = { + {36, 64}, + {100, 128} +}; + +const enum phy_ch_width get_next_lower_bw[] = { + [CH_WIDTH_80P80MHZ] = CH_WIDTH_160MHZ, + [CH_WIDTH_160MHZ] = CH_WIDTH_80MHZ, + [CH_WIDTH_80MHZ] = CH_WIDTH_40MHZ, + [CH_WIDTH_40MHZ] = CH_WIDTH_20MHZ, + [CH_WIDTH_20MHZ] = CH_WIDTH_10MHZ, + [CH_WIDTH_10MHZ] = CH_WIDTH_5MHZ, + [CH_WIDTH_5MHZ] = CH_WIDTH_INVALID +}; + +#ifdef CONFIG_LEGACY_CHAN_ENUM +static const struct chan_map channel_map_old[NUM_CHANNELS] = { + [CHAN_ENUM_1] = {2412, 1, 2, 40}, + [CHAN_ENUM_2] = {2417, 2, 2, 40}, + [CHAN_ENUM_3] = {2422, 3, 2, 40}, + [CHAN_ENUM_4] = {2427, 4, 2, 40}, + [CHAN_ENUM_5] = {2432, 5, 2, 40}, + [CHAN_ENUM_6] = {2437, 6, 2, 40}, + [CHAN_ENUM_7] = {2442, 7, 2, 40}, + [CHAN_ENUM_8] = {2447, 8, 2, 40}, + [CHAN_ENUM_9] = {2452, 9, 2, 40}, + [CHAN_ENUM_10] = {2457, 10, 2, 40}, + [CHAN_ENUM_11] = {2462, 11, 2, 40}, + [CHAN_ENUM_12] = {2467, 12, 2, 40}, + [CHAN_ENUM_13] = {2472, 13, 2, 40}, + [CHAN_ENUM_14] = {2484, 14, 2, 40}, + + [CHAN_ENUM_36] = {5180, 36, 2, 160}, + [CHAN_ENUM_40] = {5200, 40, 2, 160}, + [CHAN_ENUM_44] = {5220, 44, 2, 160}, + [CHAN_ENUM_48] = {5240, 48, 2, 160}, + [CHAN_ENUM_52] = {5260, 52, 2, 160}, + [CHAN_ENUM_56] = {5280, 56, 2, 160}, + [CHAN_ENUM_60] = {5300, 60, 2, 160}, + [CHAN_ENUM_64] = {5320, 64, 2, 160}, + + [CHAN_ENUM_100] = {5500, 100, 2, 160}, + [CHAN_ENUM_104] = {5520, 104, 2, 160}, + [CHAN_ENUM_108] = {5540, 108, 2, 160}, + [CHAN_ENUM_112] = {5560, 112, 2, 160}, + [CHAN_ENUM_116] = {5580, 116, 2, 160}, + [CHAN_ENUM_120] = {5600, 120, 2, 160}, + [CHAN_ENUM_124] = {5620, 124, 2, 160}, + [CHAN_ENUM_128] = {5640, 128, 2, 160}, + [CHAN_ENUM_132] = {5660, 132, 2, 160}, + [CHAN_ENUM_136] = {5680, 136, 2, 160}, + [CHAN_ENUM_140] = {5700, 140, 2, 160}, + [CHAN_ENUM_144] = {5720, 144, 2, 160}, + + [CHAN_ENUM_149] = {5745, 149, 2, 160}, + [CHAN_ENUM_153] = {5765, 153, 2, 160}, + [CHAN_ENUM_157] = {5785, 157, 2, 160}, + [CHAN_ENUM_161] = {5805, 161, 2, 160}, + [CHAN_ENUM_165] = {5825, 165, 2, 160}, +#ifndef WLAN_FEATURE_DSRC + [CHAN_ENUM_169] = {5845, 169, 2, 40}, + [CHAN_ENUM_173] = {5865, 173, 2, 20}, +#else + [CHAN_ENUM_170] = {5852, 170, 2, 20}, + [CHAN_ENUM_171] = {5855, 171, 2, 20}, + [CHAN_ENUM_172] = {5860, 172, 2, 20}, + [CHAN_ENUM_173] = {5865, 173, 2, 20}, + [CHAN_ENUM_174] = {5870, 174, 2, 20}, + [CHAN_ENUM_175] = {5875, 175, 2, 20}, + [CHAN_ENUM_176] = {5880, 176, 2, 20}, + [CHAN_ENUM_177] = {5885, 177, 2, 20}, + [CHAN_ENUM_178] = {5890, 178, 2, 20}, + [CHAN_ENUM_179] = {5895, 179, 2, 20}, + [CHAN_ENUM_180] = {5900, 180, 2, 20}, + [CHAN_ENUM_181] = {5905, 181, 2, 20}, + [CHAN_ENUM_182] = {5910, 182, 2, 20}, + [CHAN_ENUM_183] = {5915, 183, 2, 20}, + [CHAN_ENUM_184] = {5920, 184, 2, 20}, +#endif +}; + +#else +static const struct chan_map channel_map_us[NUM_CHANNELS] = { + [CHAN_ENUM_2412] = {2412, 1, 20, 40}, + [CHAN_ENUM_2417] = {2417, 2, 20, 40}, + [CHAN_ENUM_2422] = {2422, 3, 20, 40}, + [CHAN_ENUM_2427] = {2427, 4, 20, 40}, + [CHAN_ENUM_2432] = {2432, 5, 20, 40}, + [CHAN_ENUM_2437] = {2437, 6, 20, 40}, + [CHAN_ENUM_2442] = {2442, 7, 20, 40}, + [CHAN_ENUM_2447] = {2447, 8, 20, 40}, + [CHAN_ENUM_2452] = {2452, 9, 20, 40}, + [CHAN_ENUM_2457] = {2457, 10, 20, 40}, + [CHAN_ENUM_2462] = {2462, 11, 20, 40}, + [CHAN_ENUM_2467] = {2467, 12, 20, 40}, + [CHAN_ENUM_2472] = {2472, 13, 20, 40}, + [CHAN_ENUM_2484] = {2484, 14, 20, 20}, + + [CHAN_ENUM_4912] = {4912, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4915] = {4915, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4917] = {4917, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4920] = {4920, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4922] = {4922, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4925] = {4925, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4927] = {4927, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4932] = {4932, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4935] = {4935, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4937] = {4937, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4940] = {4940, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4942] = {4942, 1, 5, 5}, + [CHAN_ENUM_4945] = {4945, 11, 10, 10}, + [CHAN_ENUM_4947] = {4947, 2, 5, 5}, + [CHAN_ENUM_4950] = {4950, 20, 10, 20}, + [CHAN_ENUM_4952] = {4952, 3, 5, 5}, + [CHAN_ENUM_4955] = {4955, 21, 10, 20}, + [CHAN_ENUM_4957] = {4957, 4, 5, 5}, + [CHAN_ENUM_4960] = {4960, 22, 10, 20}, + [CHAN_ENUM_4962] = {4962, 5, 5, 5}, + [CHAN_ENUM_4965] = {4965, 23, 10, 20}, + [CHAN_ENUM_4967] = {4967, 6, 5, 5}, + [CHAN_ENUM_4970] = {4970, 24, 10, 20}, + [CHAN_ENUM_4972] = {4972, 7, 5, 5}, + [CHAN_ENUM_4975] = {4975, 25, 10, 20}, + [CHAN_ENUM_4977] = {4977, 8, 5, 5}, + [CHAN_ENUM_4980] = {4980, 26, 10, 20}, + [CHAN_ENUM_4982] = {4982, 9, 5, 5}, + [CHAN_ENUM_4985] = {4985, 19, 10, 10}, + [CHAN_ENUM_4987] = {4987, 10, 5, 5}, + [CHAN_ENUM_5032] = {5032, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_5035] = {5035, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_5037] = {5037, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_5040] = {5040, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_5042] = {5042, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_5045] = {5045, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_5047] = {5047, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_5052] = {5052, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_5055] = {5055, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_5057] = {5057, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_5060] = {5060, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_5080] = {5080, INVALID_CHANNEL_NUM, 2, 20}, + + [CHAN_ENUM_5180] = {5180, 36, 2, 160}, + [CHAN_ENUM_5200] = {5200, 40, 2, 160}, + [CHAN_ENUM_5220] = {5220, 44, 2, 160}, + [CHAN_ENUM_5240] = {5240, 48, 2, 160}, + [CHAN_ENUM_5260] = {5260, 52, 2, 160}, + [CHAN_ENUM_5280] = {5280, 56, 2, 160}, + [CHAN_ENUM_5300] = {5300, 60, 2, 160}, + [CHAN_ENUM_5320] = {5320, 64, 2, 160}, + [CHAN_ENUM_5500] = {5500, 100, 2, 160}, + [CHAN_ENUM_5520] = {5520, 104, 2, 160}, + [CHAN_ENUM_5540] = {5540, 108, 2, 160}, + [CHAN_ENUM_5560] = {5560, 112, 2, 160}, + [CHAN_ENUM_5580] = {5580, 116, 2, 160}, + [CHAN_ENUM_5600] = {5600, 120, 2, 160}, + [CHAN_ENUM_5620] = {5620, 124, 2, 160}, + [CHAN_ENUM_5640] = {5640, 128, 2, 160}, + [CHAN_ENUM_5660] = {5660, 132, 2, 160}, + [CHAN_ENUM_5680] = {5680, 136, 2, 160}, + [CHAN_ENUM_5700] = {5700, 140, 2, 160}, + [CHAN_ENUM_5720] = {5720, 144, 2, 160}, + [CHAN_ENUM_5745] = {5745, 149, 2, 160}, + [CHAN_ENUM_5765] = {5765, 153, 2, 160}, + [CHAN_ENUM_5785] = {5785, 157, 2, 160}, + [CHAN_ENUM_5805] = {5805, 161, 2, 160}, + [CHAN_ENUM_5825] = {5825, 165, 2, 160}, + [CHAN_ENUM_5845] = {5845, 169, 2, 160}, + [CHAN_ENUM_5850] = {5850, 170, 2, 160}, + [CHAN_ENUM_5855] = {5855, 171, 2, 160}, + [CHAN_ENUM_5860] = {5860, 172, 2, 160}, + [CHAN_ENUM_5865] = {5865, 173, 2, 160}, + [CHAN_ENUM_5870] = {5870, 174, 2, 160}, + [CHAN_ENUM_5875] = {5875, 175, 2, 160}, + [CHAN_ENUM_5880] = {5880, 176, 2, 160}, + [CHAN_ENUM_5885] = {5885, 177, 2, 160}, + [CHAN_ENUM_5890] = {5890, 178, 2, 160}, + [CHAN_ENUM_5895] = {5895, 179, 2, 160}, + [CHAN_ENUM_5900] = {5900, 180, 2, 160}, + [CHAN_ENUM_5905] = {5905, 181, 2, 160}, + [CHAN_ENUM_5910] = {5910, 182, 2, 160}, + [CHAN_ENUM_5915] = {5915, 183, 2, 160}, + [CHAN_ENUM_5920] = {5920, 184, 2, 160}, +}; + +static const struct chan_map channel_map_eu[NUM_CHANNELS] = { + [CHAN_ENUM_2412] = {2412, 1, 20, 40}, + [CHAN_ENUM_2417] = {2417, 2, 20, 40}, + [CHAN_ENUM_2422] = {2422, 3, 20, 40}, + [CHAN_ENUM_2427] = {2427, 4, 20, 40}, + [CHAN_ENUM_2432] = {2432, 5, 20, 40}, + [CHAN_ENUM_2437] = {2437, 6, 20, 40}, + [CHAN_ENUM_2442] = {2442, 7, 20, 40}, + [CHAN_ENUM_2447] = {2447, 8, 20, 40}, + [CHAN_ENUM_2452] = {2452, 9, 20, 40}, + [CHAN_ENUM_2457] = {2457, 10, 20, 40}, + [CHAN_ENUM_2462] = {2462, 11, 20, 40}, + [CHAN_ENUM_2467] = {2467, 12, 20, 40}, + [CHAN_ENUM_2472] = {2472, 13, 20, 40}, + [CHAN_ENUM_2484] = {2484, 14, 20, 20}, + + [CHAN_ENUM_4912] = {4912, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4915] = {4915, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4917] = {4917, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4920] = {4920, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4922] = {4922, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4925] = {4925, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4927] = {4927, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4932] = {4932, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4935] = {4935, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4937] = {4937, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4940] = {4940, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4942] = {4942, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4945] = {4945, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4947] = {4947, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4950] = {4950, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4952] = {4952, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4955] = {4955, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4957] = {4957, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4960] = {4960, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4962] = {4962, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4965] = {4965, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4967] = {4967, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4970] = {4970, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4972] = {4972, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4975] = {4975, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4977] = {4977, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4980] = {4980, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4982] = {4982, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4985] = {4985, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4987] = {4987, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_5032] = {5032, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_5035] = {5035, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_5037] = {5037, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_5040] = {5040, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_5042] = {5042, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_5045] = {5045, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_5047] = {5047, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_5052] = {5052, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_5055] = {5055, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_5057] = {5057, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_5060] = {5060, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_5080] = {5080, INVALID_CHANNEL_NUM, 2, 20}, + + [CHAN_ENUM_5180] = {5180, 36, 2, 160}, + [CHAN_ENUM_5200] = {5200, 40, 2, 160}, + [CHAN_ENUM_5220] = {5220, 44, 2, 160}, + [CHAN_ENUM_5240] = {5240, 48, 2, 160}, + [CHAN_ENUM_5260] = {5260, 52, 2, 160}, + [CHAN_ENUM_5280] = {5280, 56, 2, 160}, + [CHAN_ENUM_5300] = {5300, 60, 2, 160}, + [CHAN_ENUM_5320] = {5320, 64, 2, 160}, + [CHAN_ENUM_5500] = {5500, 100, 2, 160}, + [CHAN_ENUM_5520] = {5520, 104, 2, 160}, + [CHAN_ENUM_5540] = {5540, 108, 2, 160}, + [CHAN_ENUM_5560] = {5560, 112, 2, 160}, + [CHAN_ENUM_5580] = {5580, 116, 2, 160}, + [CHAN_ENUM_5600] = {5600, 120, 2, 160}, + [CHAN_ENUM_5620] = {5620, 124, 2, 160}, + [CHAN_ENUM_5640] = {5640, 128, 2, 160}, + [CHAN_ENUM_5660] = {5660, 132, 2, 160}, + [CHAN_ENUM_5680] = {5680, 136, 2, 160}, + [CHAN_ENUM_5700] = {5700, 140, 2, 160}, + [CHAN_ENUM_5720] = {5720, 144, 2, 160}, + [CHAN_ENUM_5745] = {5745, 149, 2, 160}, + [CHAN_ENUM_5765] = {5765, 153, 2, 160}, + [CHAN_ENUM_5785] = {5785, 157, 2, 160}, + [CHAN_ENUM_5805] = {5805, 161, 2, 160}, + [CHAN_ENUM_5825] = {5825, 165, 2, 160}, + [CHAN_ENUM_5845] = {5845, 169, 2, 160}, + [CHAN_ENUM_5850] = {5850, INVALID_CHANNEL_NUM, 2, 160}, + [CHAN_ENUM_5855] = {5855, INVALID_CHANNEL_NUM, 2, 160}, + [CHAN_ENUM_5860] = {5860, INVALID_CHANNEL_NUM, 2, 160}, + [CHAN_ENUM_5865] = {5865, 173, 2, 160}, + [CHAN_ENUM_5870] = {5870, INVALID_CHANNEL_NUM, 2, 160}, + [CHAN_ENUM_5875] = {5875, 175, 2, 160}, + [CHAN_ENUM_5880] = {5880, 176, 2, 160}, + [CHAN_ENUM_5885] = {5885, 177, 2, 160}, + [CHAN_ENUM_5890] = {5890, 178, 2, 160}, + [CHAN_ENUM_5895] = {5895, 179, 2, 160}, + [CHAN_ENUM_5900] = {5900, 180, 2, 160}, + [CHAN_ENUM_5905] = {5905, 181, 2, 160}, + [CHAN_ENUM_5910] = {5910, 182, 2, 160}, + [CHAN_ENUM_5915] = {5915, 183, 2, 160}, + [CHAN_ENUM_5920] = {5920, 184, 2, 160}, +}; + +static const struct chan_map channel_map_jp[NUM_CHANNELS] = { + [CHAN_ENUM_2412] = {2412, 1, 20, 40}, + [CHAN_ENUM_2417] = {2417, 2, 20, 40}, + [CHAN_ENUM_2422] = {2422, 3, 20, 40}, + [CHAN_ENUM_2427] = {2427, 4, 20, 40}, + [CHAN_ENUM_2432] = {2432, 5, 20, 40}, + [CHAN_ENUM_2437] = {2437, 6, 20, 40}, + [CHAN_ENUM_2442] = {2442, 7, 20, 40}, + [CHAN_ENUM_2447] = {2447, 8, 20, 40}, + [CHAN_ENUM_2452] = {2452, 9, 20, 40}, + [CHAN_ENUM_2457] = {2457, 10, 20, 40}, + [CHAN_ENUM_2462] = {2462, 11, 20, 40}, + [CHAN_ENUM_2467] = {2467, 12, 20, 40}, + [CHAN_ENUM_2472] = {2472, 13, 20, 40}, + [CHAN_ENUM_2484] = {2484, 14, 20, 20}, + + [CHAN_ENUM_4912] = {4912, 182, 5, 5}, + [CHAN_ENUM_4915] = {4915, 183, 10, 10}, + [CHAN_ENUM_4917] = {4917, 183, 5, 5}, + [CHAN_ENUM_4920] = {4920, 184, 10, 20}, + [CHAN_ENUM_4922] = {4922, 184, 5, 5}, + [CHAN_ENUM_4925] = {4925, 185, 10, 10}, + [CHAN_ENUM_4927] = {4927, 185, 5, 5}, + [CHAN_ENUM_4932] = {4932, 186, 5, 5}, + [CHAN_ENUM_4935] = {4935, 187, 10, 10}, + [CHAN_ENUM_4937] = {4937, 187, 5, 5}, + [CHAN_ENUM_4940] = {4940, 188, 10, 20}, + [CHAN_ENUM_4942] = {4942, 188, 5, 5}, + [CHAN_ENUM_4945] = {4945, 189, 10, 10}, + [CHAN_ENUM_4947] = {4947, 189, 5, 5}, + [CHAN_ENUM_4950] = {4950, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4952] = {4952, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4955] = {4955, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4957] = {4957, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4960] = {4960, 192, 20, 20}, + [CHAN_ENUM_4962] = {4962, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4965] = {4965, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4967] = {4967, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4970] = {4970, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4972] = {4972, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4975] = {4975, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4977] = {4977, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4980] = {4980, 196, 20, 20}, + [CHAN_ENUM_4982] = {4982, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4985] = {4985, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4987] = {4987, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_5032] = {5032, 6, 5, 5}, + [CHAN_ENUM_5035] = {5035, 7, 10, 10}, + [CHAN_ENUM_5037] = {5037, 7, 5, 5}, + [CHAN_ENUM_5040] = {5040, 8, 10, 20}, + [CHAN_ENUM_5042] = {5042, 8, 5, 5}, + [CHAN_ENUM_5045] = {5045, 9, 10, 10}, + [CHAN_ENUM_5047] = {5047, 9, 5, 5}, + [CHAN_ENUM_5052] = {5052, 10, 5, 5}, + [CHAN_ENUM_5055] = {5055, 11, 10, 10}, + [CHAN_ENUM_5057] = {5057, 11, 5, 5}, + [CHAN_ENUM_5060] = {5060, 12, 20, 20}, + [CHAN_ENUM_5080] = {5080, 16, 20, 20}, + + [CHAN_ENUM_5180] = {5180, 36, 2, 160}, + [CHAN_ENUM_5200] = {5200, 40, 2, 160}, + [CHAN_ENUM_5220] = {5220, 44, 2, 160}, + [CHAN_ENUM_5240] = {5240, 48, 2, 160}, + [CHAN_ENUM_5260] = {5260, 52, 2, 160}, + [CHAN_ENUM_5280] = {5280, 56, 2, 160}, + [CHAN_ENUM_5300] = {5300, 60, 2, 160}, + [CHAN_ENUM_5320] = {5320, 64, 2, 160}, + [CHAN_ENUM_5500] = {5500, 100, 2, 160}, + [CHAN_ENUM_5520] = {5520, 104, 2, 160}, + [CHAN_ENUM_5540] = {5540, 108, 2, 160}, + [CHAN_ENUM_5560] = {5560, 112, 2, 160}, + [CHAN_ENUM_5580] = {5580, 116, 2, 160}, + [CHAN_ENUM_5600] = {5600, 120, 2, 160}, + [CHAN_ENUM_5620] = {5620, 124, 2, 160}, + [CHAN_ENUM_5640] = {5640, 128, 2, 160}, + [CHAN_ENUM_5660] = {5660, 132, 2, 160}, + [CHAN_ENUM_5680] = {5680, 136, 2, 160}, + [CHAN_ENUM_5700] = {5700, 140, 2, 160}, + [CHAN_ENUM_5720] = {5720, 144, 2, 160}, + [CHAN_ENUM_5745] = {5745, 149, 2, 160}, + [CHAN_ENUM_5765] = {5765, 153, 2, 160}, + [CHAN_ENUM_5785] = {5785, 157, 2, 160}, + [CHAN_ENUM_5805] = {5805, 161, 2, 160}, + [CHAN_ENUM_5825] = {5825, 165, 2, 160}, + [CHAN_ENUM_5845] = {5845, 169, 2, 160}, + [CHAN_ENUM_5850] = {5850, INVALID_CHANNEL_NUM, 2, 160}, + [CHAN_ENUM_5855] = {5855, INVALID_CHANNEL_NUM, 2, 160}, + [CHAN_ENUM_5860] = {5860, INVALID_CHANNEL_NUM, 2, 160}, + [CHAN_ENUM_5865] = {5865, INVALID_CHANNEL_NUM, 2, 160}, + [CHAN_ENUM_5870] = {5870, INVALID_CHANNEL_NUM, 2, 160}, + [CHAN_ENUM_5875] = {5875, INVALID_CHANNEL_NUM, 2, 160}, + [CHAN_ENUM_5880] = {5880, INVALID_CHANNEL_NUM, 2, 160}, + [CHAN_ENUM_5885] = {5885, INVALID_CHANNEL_NUM, 2, 160}, + [CHAN_ENUM_5890] = {5890, INVALID_CHANNEL_NUM, 2, 160}, + [CHAN_ENUM_5895] = {5895, INVALID_CHANNEL_NUM, 2, 160}, + [CHAN_ENUM_5900] = {5900, INVALID_CHANNEL_NUM, 2, 160}, + [CHAN_ENUM_5905] = {5905, INVALID_CHANNEL_NUM, 2, 160}, + [CHAN_ENUM_5910] = {5910, INVALID_CHANNEL_NUM, 2, 160}, + [CHAN_ENUM_5915] = {5915, INVALID_CHANNEL_NUM, 2, 160}, + [CHAN_ENUM_5920] = {5920, INVALID_CHANNEL_NUM, 2, 160}, +}; + +static const struct chan_map channel_map_global[NUM_CHANNELS] = { + [CHAN_ENUM_2412] = {2412, 1, 20, 40}, + [CHAN_ENUM_2417] = {2417, 2, 20, 40}, + [CHAN_ENUM_2422] = {2422, 3, 20, 40}, + [CHAN_ENUM_2427] = {2427, 4, 20, 40}, + [CHAN_ENUM_2432] = {2432, 5, 20, 40}, + [CHAN_ENUM_2437] = {2437, 6, 20, 40}, + [CHAN_ENUM_2442] = {2442, 7, 20, 40}, + [CHAN_ENUM_2447] = {2447, 8, 20, 40}, + [CHAN_ENUM_2452] = {2452, 9, 20, 40}, + [CHAN_ENUM_2457] = {2457, 10, 20, 40}, + [CHAN_ENUM_2462] = {2462, 11, 20, 40}, + [CHAN_ENUM_2467] = {2467, 12, 20, 40}, + [CHAN_ENUM_2472] = {2472, 13, 20, 40}, + [CHAN_ENUM_2484] = {2484, 14, 20, 20}, + + [CHAN_ENUM_4912] = {4912, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4915] = {4915, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4917] = {4917, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4920] = {4920, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4922] = {4922, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4925] = {4925, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4927] = {4927, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4932] = {4932, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4935] = {4935, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4937] = {4937, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4940] = {4940, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4942] = {4942, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4945] = {4945, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4947] = {4947, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4950] = {4950, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4952] = {4952, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4955] = {4955, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4957] = {4957, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4960] = {4960, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4962] = {4962, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4965] = {4965, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4967] = {4967, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4970] = {4970, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4972] = {4972, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4975] = {4975, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4977] = {4977, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4980] = {4980, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4982] = {4982, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4985] = {4985, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4987] = {4987, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_5032] = {5032, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_5035] = {5035, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_5037] = {5037, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_5040] = {5040, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_5042] = {5042, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_5045] = {5045, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_5047] = {5047, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_5052] = {5052, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_5055] = {5055, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_5057] = {5057, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_5060] = {5060, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_5080] = {5080, INVALID_CHANNEL_NUM, 2, 20}, + + [CHAN_ENUM_5180] = {5180, 36, 2, 160}, + [CHAN_ENUM_5200] = {5200, 40, 2, 160}, + [CHAN_ENUM_5220] = {5220, 44, 2, 160}, + [CHAN_ENUM_5240] = {5240, 48, 2, 160}, + [CHAN_ENUM_5260] = {5260, 52, 2, 160}, + [CHAN_ENUM_5280] = {5280, 56, 2, 160}, + [CHAN_ENUM_5300] = {5300, 60, 2, 160}, + [CHAN_ENUM_5320] = {5320, 64, 2, 160}, + [CHAN_ENUM_5500] = {5500, 100, 2, 160}, + [CHAN_ENUM_5520] = {5520, 104, 2, 160}, + [CHAN_ENUM_5540] = {5540, 108, 2, 160}, + [CHAN_ENUM_5560] = {5560, 112, 2, 160}, + [CHAN_ENUM_5580] = {5580, 116, 2, 160}, + [CHAN_ENUM_5600] = {5600, 120, 2, 160}, + [CHAN_ENUM_5620] = {5620, 124, 2, 160}, + [CHAN_ENUM_5640] = {5640, 128, 2, 160}, + [CHAN_ENUM_5660] = {5660, 132, 2, 160}, + [CHAN_ENUM_5680] = {5680, 136, 2, 160}, + [CHAN_ENUM_5700] = {5700, 140, 2, 160}, + [CHAN_ENUM_5720] = {5720, 144, 2, 160}, + [CHAN_ENUM_5745] = {5745, 149, 2, 160}, + [CHAN_ENUM_5765] = {5765, 153, 2, 160}, + [CHAN_ENUM_5785] = {5785, 157, 2, 160}, + [CHAN_ENUM_5805] = {5805, 161, 2, 160}, + [CHAN_ENUM_5825] = {5825, 165, 2, 160}, + [CHAN_ENUM_5845] = {5845, 169, 2, 160}, + [CHAN_ENUM_5850] = {5850, INVALID_CHANNEL_NUM, 2, 160}, + [CHAN_ENUM_5855] = {5855, INVALID_CHANNEL_NUM, 2, 160}, + [CHAN_ENUM_5860] = {5860, INVALID_CHANNEL_NUM, 2, 160}, + [CHAN_ENUM_5865] = {5865, 173, 2, 160}, + [CHAN_ENUM_5870] = {5870, INVALID_CHANNEL_NUM, 2, 160}, + [CHAN_ENUM_5875] = {5875, INVALID_CHANNEL_NUM, 2, 160}, + [CHAN_ENUM_5880] = {5880, INVALID_CHANNEL_NUM, 2, 160}, + [CHAN_ENUM_5885] = {5885, INVALID_CHANNEL_NUM, 2, 160}, + [CHAN_ENUM_5890] = {5890, INVALID_CHANNEL_NUM, 2, 160}, + [CHAN_ENUM_5895] = {5895, INVALID_CHANNEL_NUM, 2, 160}, + [CHAN_ENUM_5900] = {5900, INVALID_CHANNEL_NUM, 2, 160}, + [CHAN_ENUM_5905] = {5905, INVALID_CHANNEL_NUM, 2, 160}, + [CHAN_ENUM_5910] = {5910, INVALID_CHANNEL_NUM, 2, 160}, + [CHAN_ENUM_5915] = {5915, INVALID_CHANNEL_NUM, 2, 160}, + [CHAN_ENUM_5920] = {5920, INVALID_CHANNEL_NUM, 2, 160}, +}; + +static const struct chan_map channel_map_china[NUM_CHANNELS] = { + [CHAN_ENUM_2412] = {2412, 1, 20, 40}, + [CHAN_ENUM_2417] = {2417, 2, 20, 40}, + [CHAN_ENUM_2422] = {2422, 3, 20, 40}, + [CHAN_ENUM_2427] = {2427, 4, 20, 40}, + [CHAN_ENUM_2432] = {2432, 5, 20, 40}, + [CHAN_ENUM_2437] = {2437, 6, 20, 40}, + [CHAN_ENUM_2442] = {2442, 7, 20, 40}, + [CHAN_ENUM_2447] = {2447, 8, 20, 40}, + [CHAN_ENUM_2452] = {2452, 9, 20, 40}, + [CHAN_ENUM_2457] = {2457, 10, 20, 40}, + [CHAN_ENUM_2462] = {2462, 11, 20, 40}, + [CHAN_ENUM_2467] = {2467, 12, 20, 40}, + [CHAN_ENUM_2472] = {2472, 13, 20, 40}, + [CHAN_ENUM_2484] = {2484, 14, 20, 20}, + + [CHAN_ENUM_4912] = {4912, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4915] = {4915, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4917] = {4917, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4920] = {4920, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4922] = {4922, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4925] = {4925, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4927] = {4927, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4932] = {4932, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4935] = {4935, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4937] = {4937, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4940] = {4940, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4942] = {4942, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4945] = {4945, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4947] = {4947, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4950] = {4950, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4952] = {4952, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4955] = {4955, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4957] = {4957, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4960] = {4960, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4962] = {4962, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4965] = {4965, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4967] = {4967, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4970] = {4970, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4972] = {4972, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4975] = {4975, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4977] = {4977, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4980] = {4980, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4982] = {4982, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4985] = {4985, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_4987] = {4987, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_5032] = {5032, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_5035] = {5035, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_5037] = {5037, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_5040] = {5040, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_5042] = {5042, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_5045] = {5045, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_5047] = {5047, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_5052] = {5052, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_5055] = {5055, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_5057] = {5057, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_5060] = {5060, INVALID_CHANNEL_NUM, 2, 20}, + [CHAN_ENUM_5080] = {5080, INVALID_CHANNEL_NUM, 2, 20}, + + [CHAN_ENUM_5180] = {5180, 36, 2, 160}, + [CHAN_ENUM_5200] = {5200, 40, 2, 160}, + [CHAN_ENUM_5220] = {5220, 44, 2, 160}, + [CHAN_ENUM_5240] = {5240, 48, 2, 160}, + [CHAN_ENUM_5260] = {5260, 52, 2, 160}, + [CHAN_ENUM_5280] = {5280, 56, 2, 160}, + [CHAN_ENUM_5300] = {5300, 60, 2, 160}, + [CHAN_ENUM_5320] = {5320, 64, 2, 160}, + [CHAN_ENUM_5500] = {5500, 100, 2, 160}, + [CHAN_ENUM_5520] = {5520, 104, 2, 160}, + [CHAN_ENUM_5540] = {5540, 108, 2, 160}, + [CHAN_ENUM_5560] = {5560, 112, 2, 160}, + [CHAN_ENUM_5580] = {5580, 116, 2, 160}, + [CHAN_ENUM_5600] = {5600, 120, 2, 160}, + [CHAN_ENUM_5620] = {5620, 124, 2, 160}, + [CHAN_ENUM_5640] = {5640, 128, 2, 160}, + [CHAN_ENUM_5660] = {5660, 132, 2, 160}, + [CHAN_ENUM_5680] = {5680, 136, 2, 160}, + [CHAN_ENUM_5700] = {5700, 140, 2, 160}, + [CHAN_ENUM_5720] = {5720, 144, 2, 160}, + [CHAN_ENUM_5745] = {5745, 149, 2, 160}, + [CHAN_ENUM_5765] = {5765, 153, 2, 160}, + [CHAN_ENUM_5785] = {5785, 157, 2, 160}, + [CHAN_ENUM_5805] = {5805, 161, 2, 160}, + [CHAN_ENUM_5825] = {5825, 165, 2, 160}, + [CHAN_ENUM_5845] = {5845, 169, 2, 160}, + [CHAN_ENUM_5850] = {5850, INVALID_CHANNEL_NUM, 2, 160}, + [CHAN_ENUM_5855] = {5855, INVALID_CHANNEL_NUM, 2, 160}, + [CHAN_ENUM_5860] = {5860, INVALID_CHANNEL_NUM, 2, 160}, + [CHAN_ENUM_5865] = {5865, INVALID_CHANNEL_NUM, 2, 160}, + [CHAN_ENUM_5870] = {5870, INVALID_CHANNEL_NUM, 2, 160}, + [CHAN_ENUM_5875] = {5875, INVALID_CHANNEL_NUM, 2, 160}, + [CHAN_ENUM_5880] = {5880, INVALID_CHANNEL_NUM, 2, 160}, + [CHAN_ENUM_5885] = {5885, INVALID_CHANNEL_NUM, 2, 160}, + [CHAN_ENUM_5890] = {5890, INVALID_CHANNEL_NUM, 2, 160}, + [CHAN_ENUM_5895] = {5895, INVALID_CHANNEL_NUM, 2, 160}, + [CHAN_ENUM_5900] = {5900, INVALID_CHANNEL_NUM, 2, 160}, + [CHAN_ENUM_5905] = {5905, INVALID_CHANNEL_NUM, 2, 160}, + [CHAN_ENUM_5910] = {5910, INVALID_CHANNEL_NUM, 2, 160}, + [CHAN_ENUM_5915] = {5915, INVALID_CHANNEL_NUM, 2, 160}, + [CHAN_ENUM_5920] = {5920, INVALID_CHANNEL_NUM, 2, 160}, +}; +#endif + +static struct reg_dmn_supp_op_classes reg_dmn_curr_supp_opp_classes = { 0 }; + +static const struct reg_dmn_op_class_map_t global_op_class[] = { + {81, 25, BW20, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13} }, + {82, 25, BW20, {14} }, + {83, 40, BW40_LOW_PRIMARY, {1, 2, 3, 4, 5, 6, 7, 8, 9} }, + {84, 40, BW40_HIGH_PRIMARY, {5, 6, 7, 8, 9, 10, 11, 12, 13} }, + {115, 20, BW20, {36, 40, 44, 48} }, + {116, 40, BW40_LOW_PRIMARY, {36, 44} }, + {117, 40, BW40_HIGH_PRIMARY, {40, 48} }, + {118, 20, BW20, {52, 56, 60, 64} }, + {119, 40, BW40_LOW_PRIMARY, {52, 60} }, + {120, 40, BW40_HIGH_PRIMARY, {56, 64} }, + {121, 20, BW20, + {100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140} }, + {122, 40, BW40_LOW_PRIMARY, {100, 108, 116, 124, 132} }, + {123, 40, BW40_HIGH_PRIMARY, {104, 112, 120, 128, 136} }, + {125, 20, BW20, {149, 153, 157, 161, 165, 169} }, + {126, 40, BW40_LOW_PRIMARY, {149, 157} }, + {127, 40, BW40_HIGH_PRIMARY, {153, 161} }, + {128, 80, BW80, {36, 40, 44, 48, 52, 56, 60, 64, 100, 104, 108, + 112, 116, 120, 124, 128, 132, 136, 140, 144, + 149, 153, 157, 161} }, + {0, 0, 0, {0} }, +}; + +static const struct reg_dmn_op_class_map_t us_op_class[] = { + {1, 20, BW20, {36, 40, 44, 48} }, + {2, 20, BW20, {52, 56, 60, 64} }, + {4, 20, BW20, {100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, + 144} }, + {5, 20, BW20, {149, 153, 157, 161, 165} }, + {12, 25, BW20, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11} }, + {22, 40, BW40_LOW_PRIMARY, {36, 44} }, + {23, 40, BW40_LOW_PRIMARY, {52, 60} }, + {24, 40, BW40_LOW_PRIMARY, {100, 108, 116, 124, 132} }, + {26, 40, BW40_LOW_PRIMARY, {149, 157} }, + {27, 40, BW40_HIGH_PRIMARY, {40, 48} }, + {28, 40, BW40_HIGH_PRIMARY, {56, 64} }, + {29, 40, BW40_HIGH_PRIMARY, {104, 112, 120, 128, 136} }, + {31, 40, BW40_HIGH_PRIMARY, {153, 161} }, + {32, 40, BW40_LOW_PRIMARY, {1, 2, 3, 4, 5, 6, 7} }, + {33, 40, BW40_HIGH_PRIMARY, {5, 6, 7, 8, 9, 10, 11} }, + {128, 80, BW80, {36, 40, 44, 48, 52, 56, 60, 64, 100, 104, 108, + 112, 116, 120, 124, 128, 132, 136, 140, 144, + 149, 153, 157, 161} }, + {0, 0, 0, {0} }, +}; + +static const struct reg_dmn_op_class_map_t euro_op_class[] = { + {1, 20, BW20, {36, 40, 44, 48} }, + {2, 20, BW20, {52, 56, 60, 64} }, + {3, 20, BW20, {100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140} }, + {4, 25, BW20, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13} }, + {5, 40, BW40_LOW_PRIMARY, {36, 44} }, + {6, 40, BW40_LOW_PRIMARY, {52, 60} }, + {7, 40, BW40_LOW_PRIMARY, {100, 108, 116, 124, 132} }, + {8, 40, BW40_HIGH_PRIMARY, {40, 48} }, + {9, 40, BW40_HIGH_PRIMARY, {56, 64} }, + {10, 40, BW40_HIGH_PRIMARY, {104, 112, 120, 128, 136} }, + {11, 40, BW40_LOW_PRIMARY, {1, 2, 3, 4, 5, 6, 7, 8, 9} }, + {12, 40, BW40_HIGH_PRIMARY, {5, 6, 7, 8, 9, 10, 11, 12, 13} }, + {17, 20, BW20, {149, 153, 157, 161, 165, 169} }, + {128, 80, BW80, {36, 40, 44, 48, 52, 56, 60, 64, 100, 104, 108, 112, + 116, 120, 124, 128} }, + {0, 0, 0, {0} }, +}; + +static const struct reg_dmn_op_class_map_t japan_op_class[] = { + {1, 20, BW20, {36, 40, 44, 48} }, + {30, 25, BW20, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13} }, + {31, 25, BW20, {14} }, + {32, 20, BW20, {52, 56, 60, 64} }, + {34, 20, BW20, + {100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140} }, + {36, 40, BW40_LOW_PRIMARY, {36, 44} }, + {37, 40, BW40_LOW_PRIMARY, {52, 60} }, + {39, 40, BW40_LOW_PRIMARY, {100, 108, 116, 124, 132} }, + {41, 40, BW40_HIGH_PRIMARY, {40, 48} }, + {42, 40, BW40_HIGH_PRIMARY, {56, 64} }, + {44, 40, BW40_HIGH_PRIMARY, {104, 112, 120, 128, 136} }, + {128, 80, BW80, {36, 40, 44, 48, 52, 56, 60, 64, 100, 104, 108, 112, + 116, 120, 124, 128} }, + {0, 0, 0, {0} }, +}; + +struct wlan_regulatory_psoc_priv_obj *reg_get_psoc_obj( + struct wlan_objmgr_psoc *psoc) +{ + struct wlan_regulatory_psoc_priv_obj *soc_reg; + + if (!psoc) { + reg_alert("psoc is NULL"); + return NULL; + } + soc_reg = wlan_objmgr_psoc_get_comp_private_obj(psoc, + WLAN_UMAC_COMP_REGULATORY); + + return soc_reg; +} + +/** + * reg_get_pdev_obj() - Provides the reg component object pointer + * @psoc: pointer to psoc object. + * + * Return: reg component object pointer + */ +static struct wlan_regulatory_pdev_priv_obj *reg_get_pdev_obj( + struct wlan_objmgr_pdev *pdev) +{ + struct wlan_regulatory_pdev_priv_obj *pdev_reg; + + if (!pdev) { + reg_alert("pdev is NULL"); + return NULL; + } + pdev_reg = wlan_objmgr_pdev_get_comp_private_obj(pdev, + WLAN_UMAC_COMP_REGULATORY); + + return pdev_reg; +} +/** + * reg_get_bw_value() - give bandwidth value + * bw: bandwidth enum + * + * Return: uint16_t + */ +uint16_t reg_get_bw_value(enum phy_ch_width bw) +{ + switch (bw) { + case CH_WIDTH_20MHZ: + return 20; + case CH_WIDTH_40MHZ: + return 40; + case CH_WIDTH_80MHZ: + return 80; + case CH_WIDTH_160MHZ: + return 160; + case CH_WIDTH_80P80MHZ: + return 160; + case CH_WIDTH_INVALID: + return 0; + case CH_WIDTH_5MHZ: + return 5; + case CH_WIDTH_10MHZ: + return 10; + case CH_WIDTH_MAX: + return 160; + default: + return 0; + } +} + +/** + * reg_get_channel_list_with_power() - Provides the channel list with power + * @ch_list: pointer to the channel list. + * + * Return: QDF_STATUS + */ +QDF_STATUS reg_get_channel_list_with_power(struct wlan_objmgr_pdev *pdev, + struct channel_power *ch_list, + uint8_t *num_chan) +{ + int i, count; + struct regulatory_channel *reg_channels; + struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj; + + if (!num_chan || !ch_list) { + reg_err("chan_list or num_ch is NULL"); + return QDF_STATUS_E_FAILURE; + } + + pdev_priv_obj = reg_get_pdev_obj(pdev); + + if (!IS_VALID_PDEV_REG_OBJ(pdev_priv_obj)) { + reg_err("reg pdev priv obj is NULL"); + return QDF_STATUS_E_FAILURE; + } + + /* set the current channel list */ + reg_channels = pdev_priv_obj->cur_chan_list; + + for (i = 0, count = 0; i < NUM_CHANNELS; i++) { + if (reg_channels[i].state && + reg_channels[i].state != REGULATORY_CHAN_DISABLED) { + ch_list[count].chan_num = + reg_channels[i].chan_num; + ch_list[count++].tx_power = + reg_channels[i].tx_power; + } + } + + *num_chan = count; + + return QDF_STATUS_SUCCESS; +} + +enum channel_enum reg_get_chan_enum(uint32_t chan_num) +{ + uint32_t count; + + for (count = 0; count < NUM_CHANNELS; count++) + if (channel_map[count].chan_num == chan_num) + return count; + + reg_debug_rl("invalid channel %d", chan_num); + + return INVALID_CHANNEL; +} +/** + * reg_get_channel_state() - Get channel state from regulatory + * @ch: channel number. + * + * Return: channel state + */ +enum channel_state reg_get_channel_state(struct wlan_objmgr_pdev *pdev, + uint32_t ch) +{ + enum channel_enum ch_idx; + struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj; + + ch_idx = reg_get_chan_enum(ch); + + if (INVALID_CHANNEL == ch_idx) + return CHANNEL_STATE_INVALID; + + pdev_priv_obj = reg_get_pdev_obj(pdev); + + if (!IS_VALID_PDEV_REG_OBJ(pdev_priv_obj)) { + reg_err("pdev reg obj is NULL"); + return CHANNEL_STATE_INVALID; + } + + return pdev_priv_obj->cur_chan_list[ch_idx].state; +} + +bool reg_chan_has_dfs_attribute(struct wlan_objmgr_pdev *pdev, uint32_t ch) +{ + enum channel_enum ch_idx; + struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj; + + ch_idx = reg_get_chan_enum(ch); + + if (ch_idx == INVALID_CHANNEL) + return false; + + pdev_priv_obj = reg_get_pdev_obj(pdev); + + if (!IS_VALID_PDEV_REG_OBJ(pdev_priv_obj)) { + reg_err("pdev reg obj is NULL"); + return false; + } + if (pdev_priv_obj->cur_chan_list[ch_idx].chan_flags & + REGULATORY_CHAN_RADAR) + return true; + + return false; +} + +/** + * reg_get_5g_bonded_chan_array() - get ptr to bonded channel + * @oper_ch: operating channel number + * @bonded_chan_ar: bonded channel array + * @bonded_chan_ptr_ptr: bonded channel ptr ptr + * + * Return: bonded channel state + */ +static enum channel_state reg_get_5g_bonded_chan_array( + struct wlan_objmgr_pdev *pdev, + uint32_t oper_chan, + const struct bonded_channel bonded_chan_ar[], + uint16_t array_size, + const struct bonded_channel **bonded_chan_ptr_ptr) +{ + int i; + uint8_t chan_num; + const struct bonded_channel *bonded_chan_ptr = NULL; + enum channel_state chan_state = CHANNEL_STATE_INVALID; + enum channel_state temp_chan_state; + + for (i = 0; i < array_size; i++) { + if ((oper_chan >= bonded_chan_ar[i].start_ch) && + (oper_chan <= bonded_chan_ar[i].end_ch)) { + bonded_chan_ptr = &(bonded_chan_ar[i]); + break; + } + } + + if (NULL == bonded_chan_ptr) + return chan_state; + + *bonded_chan_ptr_ptr = bonded_chan_ptr; + chan_num = bonded_chan_ptr->start_ch; + while (chan_num <= bonded_chan_ptr->end_ch) { + temp_chan_state = reg_get_channel_state(pdev, chan_num); + if (temp_chan_state < chan_state) + chan_state = temp_chan_state; + chan_num = chan_num + 4; + } + + return chan_state; +} + +/** + * reg_get_5g_bonded_channel() - get the 5G bonded channel state + * @chan_num: channel number + * @ch_width: channel width + * @bonded_chan_ptr_ptr: bonded channel ptr ptr + * + * Return: channel state + */ +static enum channel_state reg_get_5g_bonded_channel( + struct wlan_objmgr_pdev *pdev, uint32_t chan_num, + enum phy_ch_width ch_width, + const struct bonded_channel **bonded_chan_ptr_ptr) +{ + if (CH_WIDTH_80P80MHZ == ch_width) + return reg_get_5g_bonded_chan_array(pdev, chan_num, + bonded_chan_80mhz_list, + QDF_ARRAY_SIZE(bonded_chan_80mhz_list), + bonded_chan_ptr_ptr); + else if (CH_WIDTH_160MHZ == ch_width) + return reg_get_5g_bonded_chan_array(pdev, chan_num, + bonded_chan_160mhz_list, + QDF_ARRAY_SIZE(bonded_chan_160mhz_list), + bonded_chan_ptr_ptr); + else if (CH_WIDTH_80MHZ == ch_width) + return reg_get_5g_bonded_chan_array(pdev, chan_num, + bonded_chan_80mhz_list, + QDF_ARRAY_SIZE(bonded_chan_80mhz_list), + bonded_chan_ptr_ptr); + else if (CH_WIDTH_40MHZ == ch_width) + return reg_get_5g_bonded_chan_array(pdev, chan_num, + bonded_chan_40mhz_list, + QDF_ARRAY_SIZE(bonded_chan_40mhz_list), + bonded_chan_ptr_ptr); + else + return reg_get_channel_state(pdev, chan_num); +} +/** + * reg_get_5g_bonded_channel_state() - Get channel state for 5G bonded channel + * @ch: channel number. + * @bw: channel band width + * + * Return: channel state + */ +enum channel_state reg_get_5g_bonded_channel_state( + struct wlan_objmgr_pdev *pdev, + uint8_t ch, enum phy_ch_width bw) +{ + enum channel_enum ch_indx; + enum channel_state chan_state; + struct regulatory_channel *reg_channels; + struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj; + bool bw_enabled = false; + const struct bonded_channel *bonded_chan_ptr = NULL; + + if (CH_WIDTH_80P80MHZ < bw) { + reg_err("bw passed is not good"); + return CHANNEL_STATE_INVALID; + } + + chan_state = reg_get_5g_bonded_channel(pdev, ch, bw, &bonded_chan_ptr); + + if ((CHANNEL_STATE_INVALID == chan_state) || + (CHANNEL_STATE_DISABLE == chan_state)) + return chan_state; + + pdev_priv_obj = reg_get_pdev_obj(pdev); + + if (!IS_VALID_PDEV_REG_OBJ(pdev_priv_obj)) { + reg_err("pdev reg obj is NULL"); + return CHANNEL_STATE_INVALID; + } + reg_channels = pdev_priv_obj->cur_chan_list; + + ch_indx = reg_get_chan_enum(ch); + if (INVALID_CHANNEL == ch_indx) + return CHANNEL_STATE_INVALID; + if (CH_WIDTH_5MHZ == bw) + bw_enabled = true; + else if (CH_WIDTH_10MHZ == bw) + bw_enabled = (reg_channels[ch_indx].min_bw <= 10) && + (reg_channels[ch_indx].max_bw >= 10); + else if (CH_WIDTH_20MHZ == bw) + bw_enabled = (reg_channels[ch_indx].min_bw <= 20) && + (reg_channels[ch_indx].max_bw >= 20); + else if (CH_WIDTH_40MHZ == bw) + bw_enabled = (reg_channels[ch_indx].min_bw <= 40) && + (reg_channels[ch_indx].max_bw >= 40); + else if (CH_WIDTH_80MHZ == bw) + bw_enabled = (reg_channels[ch_indx].min_bw <= 80) && + (reg_channels[ch_indx].max_bw >= 80); + else if (CH_WIDTH_160MHZ == bw) + bw_enabled = (reg_channels[ch_indx].min_bw <= 160) && + (reg_channels[ch_indx].max_bw >= 160); + else if (CH_WIDTH_80P80MHZ == bw) + bw_enabled = (reg_channels[ch_indx].min_bw <= 80) && + (reg_channels[ch_indx].max_bw >= 80); + + if (bw_enabled) + return chan_state; + else + return CHANNEL_STATE_DISABLE; +} + +/** + * reg_get_2g_bonded_channel_state() - Get channel state for 2G bonded channel + * @ch: channel number. + * @bw: channel band width + * + * Return: channel state + */ +enum channel_state reg_get_2g_bonded_channel_state( + struct wlan_objmgr_pdev *pdev, + uint8_t oper_ch, uint8_t sec_ch, + enum phy_ch_width bw) +{ + enum channel_enum chan_idx; + enum channel_state chan_state; + struct regulatory_channel *reg_channels; + struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj; + bool bw_enabled = false; + enum channel_state chan_state2 = CHANNEL_STATE_INVALID; + + if (CH_WIDTH_40MHZ < bw) + return CHANNEL_STATE_INVALID; + + if (CH_WIDTH_40MHZ == bw) { + if ((sec_ch + 4 != oper_ch) && + (oper_ch + 4 != sec_ch)) + return CHANNEL_STATE_INVALID; + chan_state2 = reg_get_channel_state(pdev, sec_ch); + if (CHANNEL_STATE_INVALID == chan_state2) + return chan_state2; + } + + pdev_priv_obj = reg_get_pdev_obj(pdev); + + if (!IS_VALID_PDEV_REG_OBJ(pdev_priv_obj)) { + reg_err("reg pdev priv obj is NULL"); + return CHANNEL_STATE_INVALID; + } + + reg_channels = pdev_priv_obj->cur_chan_list; + + chan_state = reg_get_channel_state(pdev, oper_ch); + if (chan_state2 < chan_state) + chan_state = chan_state2; + + if ((CHANNEL_STATE_INVALID == chan_state) || + (CHANNEL_STATE_DISABLE == chan_state)) + return chan_state; + + chan_idx = reg_get_chan_enum(oper_ch); + if (INVALID_CHANNEL == chan_idx) + return CHANNEL_STATE_INVALID; + if (CH_WIDTH_5MHZ == bw) + bw_enabled = true; + else if (CH_WIDTH_10MHZ == bw) + bw_enabled = (reg_channels[chan_idx].min_bw <= 10) && + (reg_channels[chan_idx].max_bw >= 10); + else if (CH_WIDTH_20MHZ == bw) + bw_enabled = (reg_channels[chan_idx].min_bw <= 20) && + (reg_channels[chan_idx].max_bw >= 20); + else if (CH_WIDTH_40MHZ == bw) + bw_enabled = (reg_channels[chan_idx].min_bw <= 40) && + (reg_channels[chan_idx].max_bw >= 40); + + if (bw_enabled) + return chan_state; + else + return CHANNEL_STATE_DISABLE; + + return CHANNEL_STATE_ENABLE; +} + +static enum channel_state reg_combine_channel_states( + enum channel_state chan_state1, + enum channel_state chan_state2) +{ + if ((CHANNEL_STATE_INVALID == chan_state1) || + (CHANNEL_STATE_INVALID == chan_state2)) + return CHANNEL_STATE_INVALID; + else + return min(chan_state1, chan_state2); +} + +/** + * reg_set_5g_channel_params () - Sets channel parameteres for given bandwidth + * @ch: channel number. + * @ch_params: pointer to the channel parameters. + * + * Return: None + */ +static void reg_set_5g_channel_params(struct wlan_objmgr_pdev *pdev, + uint8_t ch, + struct ch_params *ch_params) +{ + /* + * Set channel parameters like center frequency for a bonded channel + * state. Also return the maximum bandwidth supported by the channel. + */ + + enum phy_ch_width next_lower_bw; + enum channel_state chan_state = CHANNEL_STATE_ENABLE; + enum channel_state chan_state2 = CHANNEL_STATE_ENABLE; + const struct bonded_channel *bonded_chan_ptr = NULL; + const struct bonded_channel *bonded_chan_ptr2 = NULL; + + if (NULL == ch_params) { + reg_err("ch_params is NULL"); + return; + } + + if (CH_WIDTH_MAX <= ch_params->ch_width) { + if (0 != ch_params->center_freq_seg1) + ch_params->ch_width = CH_WIDTH_80P80MHZ; + else + ch_params->ch_width = CH_WIDTH_160MHZ; + } + next_lower_bw = ch_params->ch_width; + + while (ch_params->ch_width != CH_WIDTH_INVALID) { + ch_params->ch_width = next_lower_bw; + next_lower_bw = get_next_lower_bw[ch_params->ch_width]; + bonded_chan_ptr = NULL; + bonded_chan_ptr2 = NULL; + chan_state = reg_get_5g_bonded_channel(pdev, ch, + ch_params->ch_width, &bonded_chan_ptr); + + chan_state = reg_get_5g_bonded_channel_state(pdev, ch, + ch_params->ch_width); + + if (CH_WIDTH_80P80MHZ == ch_params->ch_width) { + chan_state2 = reg_get_5g_bonded_channel_state(pdev, + ch_params->center_freq_seg1 - 2, + CH_WIDTH_80MHZ); + + chan_state = reg_combine_channel_states(chan_state, + chan_state2); + } + + if ((CHANNEL_STATE_ENABLE != chan_state) && + (CHANNEL_STATE_DFS != chan_state)) + continue; + if (CH_WIDTH_20MHZ >= ch_params->ch_width) { + ch_params->sec_ch_offset = NO_SEC_CH; + ch_params->center_freq_seg0 = ch; + break; + } else if (CH_WIDTH_40MHZ <= ch_params->ch_width) { + reg_get_5g_bonded_chan_array(pdev, ch, + bonded_chan_40mhz_list, + QDF_ARRAY_SIZE(bonded_chan_40mhz_list), + &bonded_chan_ptr2); + if (!bonded_chan_ptr || !bonded_chan_ptr2) + continue; + if (ch == bonded_chan_ptr2->start_ch) + ch_params->sec_ch_offset = LOW_PRIMARY_CH; + else + ch_params->sec_ch_offset = HIGH_PRIMARY_CH; + + ch_params->center_freq_seg0 = + (bonded_chan_ptr->start_ch + + bonded_chan_ptr->end_ch)/2; + break; + } + } + + if (CH_WIDTH_160MHZ == ch_params->ch_width) { + ch_params->center_freq_seg1 = ch_params->center_freq_seg0; + chan_state = reg_get_5g_bonded_channel(pdev, ch, + CH_WIDTH_80MHZ, &bonded_chan_ptr); + if (bonded_chan_ptr) + ch_params->center_freq_seg0 = + (bonded_chan_ptr->start_ch + + bonded_chan_ptr->end_ch)/2; + } + + /* Overwrite center_freq_seg1 to 0 for non 160 and 80+80 width */ + if (!(ch_params->ch_width == CH_WIDTH_160MHZ || + ch_params->ch_width == CH_WIDTH_80P80MHZ)) + ch_params->center_freq_seg1 = 0; + + reg_debug("ch %d ch_wd %d freq0 %d freq1 %d", ch, + ch_params->ch_width, ch_params->center_freq_seg0, + ch_params->center_freq_seg1); +} + +/** + * reg_set_2g_channel_params() - set the 2.4G bonded channel parameters + * @oper_ch: operating channel + * @ch_params: channel parameters + * @sec_ch_2g: 2.4G secondary channel + * + * Return: void + */ +static void reg_set_2g_channel_params(struct wlan_objmgr_pdev *pdev, + uint16_t oper_ch, struct ch_params *ch_params, + uint16_t sec_ch_2g) +{ + enum channel_state chan_state = CHANNEL_STATE_ENABLE; + + if (CH_WIDTH_MAX <= ch_params->ch_width) + ch_params->ch_width = CH_WIDTH_40MHZ; + if ((reg_get_bw_value(ch_params->ch_width) > 20) && !sec_ch_2g) { + if (oper_ch >= 1 && oper_ch <= 5) + sec_ch_2g = oper_ch + 4; + else if (oper_ch >= 6 && oper_ch <= 13) + sec_ch_2g = oper_ch - 4; + } + + while (ch_params->ch_width != CH_WIDTH_INVALID) { + chan_state = reg_get_2g_bonded_channel_state(pdev, oper_ch, + sec_ch_2g, + ch_params->ch_width); + if (CHANNEL_STATE_ENABLE == chan_state) { + if (CH_WIDTH_40MHZ == ch_params->ch_width) { + if (oper_ch < sec_ch_2g) + ch_params->sec_ch_offset = + LOW_PRIMARY_CH; + else + ch_params->sec_ch_offset = + HIGH_PRIMARY_CH; + ch_params->center_freq_seg0 = + (oper_ch + sec_ch_2g)/2; + } else { + ch_params->sec_ch_offset = NO_SEC_CH; + ch_params->center_freq_seg0 = oper_ch; + } + break; + } + + ch_params->ch_width = get_next_lower_bw[ch_params->ch_width]; + } + /* Overwrite center_freq_seg1 to 0 for 2.4 Ghz */ + ch_params->center_freq_seg1 = 0; +} + +/** + * reg_set_channel_params () - Sets channel parameteres for given bandwidth + * @ch: channel number. + * @ch_params: pointer to the channel parameters. + * + * Return: None + */ +void reg_set_channel_params(struct wlan_objmgr_pdev *pdev, + uint8_t ch, uint8_t sec_ch_2g, + struct ch_params *ch_params) +{ + if (REG_IS_5GHZ_CH(ch)) + reg_set_5g_channel_params(pdev, ch, ch_params); + else if (REG_IS_24GHZ_CH(ch)) + reg_set_2g_channel_params(pdev, ch, ch_params, + sec_ch_2g); +} + +QDF_STATUS reg_get_curr_band(struct wlan_objmgr_pdev *pdev, + enum band_info *band) +{ + struct wlan_regulatory_pdev_priv_obj *pdev_reg; + + pdev_reg = reg_get_pdev_obj(pdev); + if (!IS_VALID_PDEV_REG_OBJ(pdev_reg)) { + reg_err("pdev reg component is NULL"); + return QDF_STATUS_E_INVAL; + } + + *band = pdev_reg->band_capability; + + return QDF_STATUS_SUCCESS; +} + +bool reg_is_world_ctry_code(uint16_t ctry_code) +{ + if ((ctry_code & 0xFFF0) == DEFAULT_WORLD_REGDMN) + return true; + + return false; +} + +QDF_STATUS reg_read_default_country(struct wlan_objmgr_psoc *psoc, + uint8_t *country_code) +{ + struct wlan_regulatory_psoc_priv_obj *psoc_reg; + + if (!country_code) { + reg_err("country_code is NULL"); + return QDF_STATUS_E_INVAL; + } + + psoc_reg = reg_get_psoc_obj(psoc); + if (!IS_VALID_PSOC_REG_OBJ(psoc_reg)) { + reg_err("psoc reg component is NULL"); + return QDF_STATUS_E_INVAL; + } + + qdf_mem_copy(country_code, + psoc_reg->def_country, + REG_ALPHA2_LEN + 1); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS reg_read_current_country(struct wlan_objmgr_psoc *psoc, + uint8_t *country_code) +{ + struct wlan_regulatory_psoc_priv_obj *psoc_reg; + + if (!country_code) { + reg_err("country_code is NULL"); + return QDF_STATUS_E_INVAL; + } + + psoc_reg = reg_get_psoc_obj(psoc); + if (!IS_VALID_PSOC_REG_OBJ(psoc_reg)) { + reg_err("psoc reg component is NULL"); + return QDF_STATUS_E_INVAL; + } + + qdf_mem_copy(country_code, + psoc_reg->cur_country, + REG_ALPHA2_LEN + 1); + + return QDF_STATUS_SUCCESS; +} +/** + * reg_set_default_country() - Read the default country for the regdomain + * @country: country code. + * + * Return: QDF_STATUS + */ +QDF_STATUS reg_set_default_country(struct wlan_objmgr_psoc *psoc, + uint8_t *country) +{ + struct wlan_regulatory_psoc_priv_obj *psoc_reg; + + if (!country) { + reg_err("country is NULL"); + return QDF_STATUS_E_INVAL; + } + psoc_reg = reg_get_psoc_obj(psoc); + if (!IS_VALID_PSOC_REG_OBJ(psoc_reg)) { + reg_err("psoc reg component is NULL"); + return QDF_STATUS_E_INVAL; + } + + reg_info("setting default_country: %s", country); + + qdf_mem_copy(psoc_reg->def_country, + country, REG_ALPHA2_LEN + 1); + + return QDF_STATUS_SUCCESS; +} + +bool reg_is_world_alpha2(uint8_t *alpha2) +{ + if ((alpha2[0] == '0') && (alpha2[1] == '0')) + return true; + + return false; +} + +bool reg_is_us_alpha2(uint8_t *alpha2) +{ + if ((alpha2[0] == 'U') && (alpha2[1] == 'S')) + return true; + + return false; +} + +QDF_STATUS reg_set_country(struct wlan_objmgr_pdev *pdev, + uint8_t *country) +{ + struct wlan_regulatory_psoc_priv_obj *psoc_reg; + struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj; + struct wlan_lmac_if_reg_tx_ops *tx_ops; + struct set_country cc; + struct wlan_objmgr_psoc *psoc; + struct cc_regdmn_s rd; + uint8_t pdev_id; + + if (!pdev) { + reg_err("pdev is NULL"); + return QDF_STATUS_E_INVAL; + } + + if (!country) { + reg_err("country code is NULL"); + return QDF_STATUS_E_INVAL; + } + + pdev_id = wlan_objmgr_pdev_get_pdev_id(pdev); + + psoc = wlan_pdev_get_psoc(pdev); + + psoc_reg = reg_get_psoc_obj(psoc); + if (!IS_VALID_PSOC_REG_OBJ(psoc_reg)) { + reg_err("psoc reg component is NULL"); + return QDF_STATUS_E_INVAL; + } + + if (!qdf_mem_cmp(psoc_reg->cur_country, + country, REG_ALPHA2_LEN)) { + reg_err("country is not different"); + return QDF_STATUS_SUCCESS; + } + + reg_debug("programming new country:%s to firmware", country); + + qdf_mem_copy(cc.country, country, REG_ALPHA2_LEN + 1); + cc.pdev_id = pdev_id; + + if (!psoc_reg->offload_enabled && !reg_is_world_alpha2(country)) { + QDF_STATUS status; + + status = reg_is_country_code_valid(country); + if (!QDF_IS_STATUS_SUCCESS(status)) { + reg_err("Unable to set country code: %s\n", country); + reg_err("Restoring to world domain"); + qdf_mem_copy(cc.country, REG_WORLD_ALPHA2, + REG_ALPHA2_LEN + 1); + } + } + + if (reg_is_world_alpha2(cc.country)) + psoc_reg->world_country_pending[pdev_id] = true; + else + psoc_reg->new_user_ctry_pending[pdev_id] = true; + + if (psoc_reg->offload_enabled) { + tx_ops = reg_get_psoc_tx_ops(psoc); + if (tx_ops->set_country_code) { + tx_ops->set_country_code(psoc, &cc); + } else { + reg_err("country set fw handler not present"); + psoc_reg->new_user_ctry_pending[pdev_id] = false; + return QDF_STATUS_E_FAULT; + } + } else { + if (reg_is_world_alpha2(cc.country)) { + pdev_priv_obj = reg_get_pdev_obj(pdev); + if (!IS_VALID_PDEV_REG_OBJ(pdev_priv_obj)) { + reg_err("reg component pdev priv is NULL"); + psoc_reg->world_country_pending[pdev_id] = + false; + return QDF_STATUS_E_INVAL; + } + if (reg_is_world_ctry_code( + pdev_priv_obj->def_region_domain)) + rd.cc.regdmn_id = + pdev_priv_obj->def_region_domain; + else + rd.cc.regdmn_id = DEFAULT_WORLD_REGDMN; + rd.flags = REGDMN_IS_SET; + } else { + qdf_mem_copy(rd.cc.alpha, cc.country, + REG_ALPHA2_LEN + 1); + rd.flags = ALPHA_IS_SET; + } + + reg_program_chan_list(pdev, &rd); + } + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS reg_set_11d_country(struct wlan_objmgr_pdev *pdev, + uint8_t *country) +{ + struct wlan_regulatory_psoc_priv_obj *psoc_reg; + struct set_country country_code; + struct wlan_objmgr_psoc *psoc; + struct cc_regdmn_s rd; + QDF_STATUS status; + uint8_t pdev_id; + + if (!country) { + reg_err("country code is NULL"); + return QDF_STATUS_E_INVAL; + } + + pdev_id = wlan_objmgr_pdev_get_pdev_id(pdev); + + psoc = wlan_pdev_get_psoc(pdev); + psoc_reg = reg_get_psoc_obj(psoc); + if (!IS_VALID_PSOC_REG_OBJ(psoc_reg)) { + reg_err("psoc reg component is NULL"); + return QDF_STATUS_E_INVAL; + } + + if (!qdf_mem_cmp(psoc_reg->cur_country, + country, REG_ALPHA2_LEN)) { + reg_debug("country is not different"); + return QDF_STATUS_SUCCESS; + } + + reg_info("programming new 11d country:%c%c to firmware", + country[0], country[1]); + + qdf_mem_copy(country_code.country, + country, REG_ALPHA2_LEN + 1); + country_code.pdev_id = pdev_id; + + psoc_reg->new_11d_ctry_pending[pdev_id] = true; + + if (psoc_reg->offload_enabled) { + reg_err("reg offload, 11d offload too!"); + status = QDF_STATUS_E_FAULT; + } else { + qdf_mem_copy(rd.cc.alpha, country, REG_ALPHA2_LEN + 1); + rd.flags = ALPHA_IS_SET; + reg_program_chan_list(pdev, &rd); + status = QDF_STATUS_SUCCESS; + } + + return status; +} + +QDF_STATUS reg_reset_country(struct wlan_objmgr_psoc *psoc) +{ + struct wlan_regulatory_psoc_priv_obj *psoc_reg; + + psoc_reg = reg_get_psoc_obj(psoc); + if (!IS_VALID_PSOC_REG_OBJ(psoc_reg)) { + reg_err("psoc reg component is NULL"); + return QDF_STATUS_E_INVAL; + } + + reg_info("re-setting user country to default"); + qdf_mem_copy(psoc_reg->cur_country, + psoc_reg->def_country, + REG_ALPHA2_LEN + 1); + reg_debug("set cur_country %.2s", psoc_reg->cur_country); + return QDF_STATUS_SUCCESS; +} + +/** + * reg_get_current_dfs_region () - Get the current dfs region + * @dfs_reg: pointer to dfs region + * + * Return: None + */ +void reg_get_current_dfs_region(struct wlan_objmgr_pdev *pdev, + enum dfs_reg *dfs_reg) +{ + struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj; + + pdev_priv_obj = reg_get_pdev_obj(pdev); + if (!IS_VALID_PDEV_REG_OBJ(pdev_priv_obj)) { + reg_err("reg component pdev priv is NULL"); + return; + } + + *dfs_reg = pdev_priv_obj->dfs_region; +} + +#ifdef CONFIG_LEGACY_CHAN_ENUM +static void reg_init_channel_map(enum dfs_reg dfs_region) +{ + channel_map = channel_map_old; +} +#else +static void reg_init_channel_map(enum dfs_reg dfs_region) +{ + switch (dfs_region) { + case DFS_UNINIT_REG: + case DFS_UNDEF_REG: + channel_map = channel_map_global; + break; + case DFS_FCC_REG: + channel_map = channel_map_us; + break; + case DFS_ETSI_REG: + channel_map = channel_map_eu; + break; + case DFS_MKK_REG: + channel_map = channel_map_jp; + break; + case DFS_CN_REG: + channel_map = channel_map_china; + break; + case DFS_KR_REG: + channel_map = channel_map_eu; + break; + } +} +#endif + + +/** + * reg_set_dfs_region () - Set the current dfs region + * @dfs_reg: pointer to dfs region + * + * Return: None + */ +void reg_set_dfs_region(struct wlan_objmgr_pdev *pdev, + enum dfs_reg dfs_reg) +{ + struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj; + + pdev_priv_obj = reg_get_pdev_obj(pdev); + if (!IS_VALID_PDEV_REG_OBJ(pdev_priv_obj)) { + reg_err("psoc reg component is NULL"); + return; + } + + pdev_priv_obj->dfs_region = dfs_reg; + + reg_init_channel_map(dfs_reg); +} + +QDF_STATUS reg_get_domain_from_country_code(v_REGDOMAIN_t *reg_domain_ptr, + const uint8_t *country_alpha2, + enum country_src source) +{ + if (NULL == reg_domain_ptr) { + reg_err("Invalid reg domain pointer"); + return QDF_STATUS_E_FAULT; + } + + *reg_domain_ptr = 0; + + if (NULL == country_alpha2) { + reg_err("Country code array is NULL"); + return QDF_STATUS_E_FAULT; + } + + return QDF_STATUS_SUCCESS; +} + +uint32_t reg_get_channel_reg_power(struct wlan_objmgr_pdev *pdev, + uint32_t chan_num) +{ + enum channel_enum chan_enum; + struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj; + struct regulatory_channel *reg_channels; + + chan_enum = reg_get_chan_enum(chan_num); + + if (chan_enum == INVALID_CHANNEL) { + reg_err("channel is invalid"); + return QDF_STATUS_E_FAILURE; + } + + pdev_priv_obj = reg_get_pdev_obj(pdev); + + if (!IS_VALID_PDEV_REG_OBJ(pdev_priv_obj)) { + reg_err("reg pdev priv obj is NULL"); + return QDF_STATUS_E_FAILURE; + } + + reg_channels = pdev_priv_obj->cur_chan_list; + + return reg_channels[chan_enum].tx_power; +} + +uint32_t reg_get_channel_freq(struct wlan_objmgr_pdev *pdev, + uint32_t chan_num) +{ + enum channel_enum chan_enum; + struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj; + struct regulatory_channel *reg_channels; + + chan_enum = reg_get_chan_enum(chan_num); + + if (chan_enum == INVALID_CHANNEL) + return CHANNEL_STATE_INVALID; + + pdev_priv_obj = reg_get_pdev_obj(pdev); + + if (!IS_VALID_PDEV_REG_OBJ(pdev_priv_obj)) { + reg_err("reg pdev priv obj is NULL"); + return QDF_STATUS_E_FAILURE; + } + + reg_channels = pdev_priv_obj->cur_chan_list; + + return reg_channels[chan_enum].center_freq; +} + + +bool reg_is_dfs_ch(struct wlan_objmgr_pdev *pdev, + uint32_t chan) +{ + enum channel_state ch_state; + + ch_state = reg_get_channel_state(pdev, chan); + + return ch_state == CHANNEL_STATE_DFS; +} + +bool reg_is_passive_or_disable_ch(struct wlan_objmgr_pdev *pdev, + uint32_t chan) +{ + enum channel_state ch_state; + + ch_state = reg_get_channel_state(pdev, chan); + + return (ch_state == CHANNEL_STATE_DFS) || + (ch_state == CHANNEL_STATE_DISABLE); +} + +#ifdef WLAN_FEATURE_DSRC +bool reg_is_dsrc_chan(struct wlan_objmgr_pdev *pdev, uint32_t chan) +{ + struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj; + uint32_t freq = 0; + + pdev_priv_obj = reg_get_pdev_obj(pdev); + + if (!IS_VALID_PDEV_REG_OBJ(pdev_priv_obj)) { + reg_err("reg pdev priv obj is NULL"); + return false; + } + + if (!REG_IS_5GHZ_CH(chan)) + return false; + + freq = reg_chan_to_freq(pdev, chan); + + if (!(freq >= REG_DSRC_START_FREQ && freq <= REG_DSRC_END_FREQ)) + return false; + + return true; +} + +#else + +bool reg_is_etsi13_regdmn(struct wlan_objmgr_pdev *pdev) +{ + struct cur_regdmn_info cur_reg_dmn; + QDF_STATUS status; + + status = reg_get_curr_regdomain(pdev, &cur_reg_dmn); + if (QDF_STATUS_SUCCESS != status) { + reg_err("Failed to get reg domain"); + return false; + } + + return reg_etsi13_regdmn(cur_reg_dmn.dmn_id_5g); +} + +bool reg_is_etsi13_srd_chan(struct wlan_objmgr_pdev *pdev, uint32_t chan) +{ + struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj; + uint32_t freq = 0; + + pdev_priv_obj = reg_get_pdev_obj(pdev); + + if (!IS_VALID_PDEV_REG_OBJ(pdev_priv_obj)) { + reg_err("reg pdev priv obj is NULL"); + return false; + } + + if (!REG_IS_5GHZ_CH(chan)) + return false; + + freq = reg_chan_to_freq(pdev, chan); + + if (!(freq >= REG_ETSI13_SRD_START_FREQ && + freq <= REG_ETSI13_SRD_END_FREQ)) + return false; + + return reg_is_etsi13_regdmn(pdev); +} + +bool reg_is_etsi13_srd_chan_allowed_master_mode(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_objmgr_psoc *psoc; + struct wlan_regulatory_psoc_priv_obj *psoc_priv_obj; + + if (!pdev) { + reg_alert("pdev is NULL"); + return true; + } + psoc = wlan_pdev_get_psoc(pdev); + + psoc_priv_obj = reg_get_psoc_obj(psoc); + if (!IS_VALID_PSOC_REG_OBJ(psoc_priv_obj)) { + reg_alert("psoc reg component is NULL"); + return true; + } + + return psoc_priv_obj->enable_srd_chan_in_master_mode && + reg_is_etsi13_regdmn(pdev); +} +#endif + +uint32_t reg_freq_to_chan(struct wlan_objmgr_pdev *pdev, + uint32_t freq) +{ + uint32_t count; + struct regulatory_channel *chan_list; + struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj; + + pdev_priv_obj = reg_get_pdev_obj(pdev); + + if (!IS_VALID_PDEV_REG_OBJ(pdev_priv_obj)) { + reg_err("reg pdev priv obj is NULL"); + return QDF_STATUS_E_FAILURE; + } + + chan_list = pdev_priv_obj->cur_chan_list; + + for (count = 0; count < NUM_CHANNELS; count++) + if (chan_list[count].center_freq == freq) + return chan_list[count].chan_num; + + reg_err("invalid frequency %d", freq); + + return 0; +} + +uint32_t reg_chan_to_freq(struct wlan_objmgr_pdev *pdev, + uint32_t chan_num) +{ + uint32_t count; + struct regulatory_channel *chan_list; + struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj; + + pdev_priv_obj = reg_get_pdev_obj(pdev); + + if (!IS_VALID_PDEV_REG_OBJ(pdev_priv_obj)) { + reg_err("reg pdev priv obj is NULL"); + return QDF_STATUS_E_FAILURE; + } + + chan_list = pdev_priv_obj->cur_chan_list; + + for (count = 0; count < NUM_CHANNELS; count++) + if (chan_list[count].chan_num == chan_num) { + if (reg_chan_in_range(chan_list, + pdev_priv_obj->range_2g_low, + pdev_priv_obj->range_2g_high, + pdev_priv_obj->range_5g_low, + pdev_priv_obj->range_5g_high, + count)) { + return chan_list[count].center_freq; + } + } + + reg_debug_rl("invalid channel %d", chan_num); + + return 0; +} + +#ifndef CONFIG_LEGACY_CHAN_ENUM +bool reg_chan_is_49ghz(struct wlan_objmgr_pdev *pdev, + uint8_t chan_num) +{ + uint32_t freq = 0; + + freq = reg_chan_to_freq(pdev, chan_num); + + return REG_IS_49GHZ_FREQ(freq) ? true : false; +} +#else +bool reg_chan_is_49ghz(struct wlan_objmgr_pdev *pdev, + uint8_t chan_num) +{ + return false; +} +#endif + +enum band_info reg_chan_to_band(uint32_t chan_num) +{ + if (chan_num <= 14) + return BAND_2G; + + return BAND_5G; +} + +uint16_t reg_dmn_get_chanwidth_from_opclass(uint8_t *country, + uint8_t channel, + uint8_t opclass) +{ + const struct reg_dmn_op_class_map_t *class; + uint16_t i; + + if (!qdf_mem_cmp(country, "US", 2)) + class = us_op_class; + else if (!qdf_mem_cmp(country, "EU", 2)) + class = euro_op_class; + else if (!qdf_mem_cmp(country, "JP", 2)) + class = japan_op_class; + else + class = global_op_class; + + while (class->op_class) { + if (opclass == class->op_class) { + for (i = 0; + (i < REG_MAX_CHANNELS_PER_OPERATING_CLASS && + class->channels[i]); + i++) { + if (channel == class->channels[i]) + return class->ch_spacing; + } + } + class++; + } + + return 0; +} + + +uint16_t reg_dmn_get_opclass_from_channel(uint8_t *country, + uint8_t channel, + uint8_t offset) +{ + const struct reg_dmn_op_class_map_t *class = NULL; + uint16_t i = 0; + + if (!qdf_mem_cmp(country, "US", 2)) + class = us_op_class; + else if (!qdf_mem_cmp(country, "EU", 2)) + class = euro_op_class; + else if (!qdf_mem_cmp(country, "JP", 2)) + class = japan_op_class; + else + class = global_op_class; + + while (class->op_class) { + if ((offset == class->offset) || (offset == BWALL)) { + for (i = 0; + (i < REG_MAX_CHANNELS_PER_OPERATING_CLASS && + class->channels[i]); i++) { + if (channel == class->channels[i]) + return class->op_class; + } + } + class++; + } + + return 0; +} + +uint16_t reg_dmn_set_curr_opclasses(uint8_t num_classes, + uint8_t *class) +{ + uint8_t i; + + if (REG_MAX_SUPP_OPER_CLASSES < num_classes) { + reg_err("invalid num classes %d", num_classes); + return 0; + } + + for (i = 0; i < num_classes; i++) + reg_dmn_curr_supp_opp_classes.classes[i] = class[i]; + + reg_dmn_curr_supp_opp_classes.num_classes = num_classes; + + return 0; +} + +uint16_t reg_dmn_get_curr_opclasses(uint8_t *num_classes, + uint8_t *class) +{ + uint8_t i; + + if (!num_classes || !class) { + reg_err("either num_classes or class is null"); + return 0; + } + + for (i = 0; i < reg_dmn_curr_supp_opp_classes.num_classes; i++) + class[i] = reg_dmn_curr_supp_opp_classes.classes[i]; + + *num_classes = reg_dmn_curr_supp_opp_classes.num_classes; + + return 0; +} + + +static void reg_fill_channel_info(enum channel_enum chan_enum, + struct cur_reg_rule *reg_rule, + struct regulatory_channel *master_list, + uint16_t min_bw) +{ + + master_list[chan_enum].chan_flags &= + ~REGULATORY_CHAN_DISABLED; + + master_list[chan_enum].tx_power = reg_rule->reg_power; + master_list[chan_enum].ant_gain = reg_rule->ant_gain; + master_list[chan_enum].state = CHANNEL_STATE_ENABLE; + + if (reg_rule->flags & REGULATORY_CHAN_NO_IR) { + + master_list[chan_enum].chan_flags |= + REGULATORY_CHAN_NO_IR; + + master_list[chan_enum].state = + CHANNEL_STATE_DFS; + } + + if (reg_rule->flags & REGULATORY_CHAN_RADAR) { + master_list[chan_enum].chan_flags |= + REGULATORY_CHAN_RADAR; + + master_list[chan_enum].state = + CHANNEL_STATE_DFS; + } + + if (reg_rule->flags & REGULATORY_CHAN_INDOOR_ONLY) + master_list[chan_enum].chan_flags |= + REGULATORY_CHAN_INDOOR_ONLY; + + if (reg_rule->flags & REGULATORY_CHAN_NO_OFDM) + master_list[chan_enum].chan_flags |= + REGULATORY_CHAN_NO_OFDM; + + master_list[chan_enum].min_bw = min_bw; + if (20 == master_list[chan_enum].max_bw) + master_list[chan_enum].max_bw = reg_rule->max_bw; +} + + +static void reg_populate_band_channels(enum channel_enum start_chan, + enum channel_enum end_chan, + struct cur_reg_rule *rule_start_ptr, + uint32_t num_reg_rules, + uint16_t min_reg_bw, + struct regulatory_channel *mas_chan_list) +{ + struct cur_reg_rule *found_rule_ptr; + struct cur_reg_rule *cur_rule_ptr; + struct regulatory_channel; + enum channel_enum chan_enum; + uint32_t rule_num, bw; + uint16_t max_bw; + uint16_t min_bw; + + for (chan_enum = start_chan; chan_enum <= end_chan; chan_enum++) { + found_rule_ptr = NULL; + + max_bw = QDF_MIN((uint16_t)20, channel_map[chan_enum].max_bw); + min_bw = QDF_MAX(min_reg_bw, channel_map[chan_enum].min_bw); + + if (channel_map[chan_enum].chan_num == INVALID_CHANNEL_NUM) + continue; + + for (bw = max_bw; bw >= min_bw; bw = bw/2) { + for (rule_num = 0, cur_rule_ptr = + rule_start_ptr; + rule_num < num_reg_rules; + cur_rule_ptr++, rule_num++) { + + if ((cur_rule_ptr->start_freq <= + mas_chan_list[chan_enum].center_freq - + bw/2) && + (cur_rule_ptr->end_freq >= + mas_chan_list[chan_enum].center_freq + + bw/2) && (min_bw <= bw)) { + found_rule_ptr = cur_rule_ptr; + break; + } + } + if (found_rule_ptr) + break; + } + + if (found_rule_ptr) { + mas_chan_list[chan_enum].max_bw = bw; + reg_fill_channel_info(chan_enum, found_rule_ptr, + mas_chan_list, min_bw); + /* Disable 2.4 Ghz channels that dont have 20 mhz bw */ + if (start_chan == MIN_24GHZ_CHANNEL && + 20 > mas_chan_list[chan_enum].max_bw) { + mas_chan_list[chan_enum].chan_flags |= + REGULATORY_CHAN_DISABLED; + mas_chan_list[chan_enum].state = + REGULATORY_CHAN_DISABLED; + } + } + } +} + +static void reg_update_max_bw_per_rule(uint32_t num_reg_rules, + struct cur_reg_rule *reg_rule_start, + uint16_t max_bw) +{ + uint32_t count; + + for (count = 0; count < num_reg_rules; count++) + reg_rule_start[count].max_bw = + min(reg_rule_start[count].max_bw, max_bw); +} + +static void reg_do_auto_bw_correction(uint32_t num_reg_rules, + struct cur_reg_rule *reg_rule_ptr, + uint16_t max_bw) +{ + uint32_t count; + uint16_t new_bw; + + for (count = 0; count < num_reg_rules - 1; count++) { + if ((reg_rule_ptr[count].end_freq == + reg_rule_ptr[count+1].start_freq) && + ((reg_rule_ptr[count].max_bw + reg_rule_ptr[count+1].max_bw) + <= max_bw)) { + new_bw = reg_rule_ptr[count].max_bw + + reg_rule_ptr[count+1].max_bw; + reg_rule_ptr[count].max_bw = new_bw; + reg_rule_ptr[count+1].max_bw = new_bw; + } + } +} + +static void reg_modify_chan_list_for_dfs_channels(struct regulatory_channel + *chan_list, + bool dfs_enabled) +{ + enum channel_enum chan_enum; + + if (dfs_enabled) + return; + + for (chan_enum = 0; chan_enum < NUM_CHANNELS; chan_enum++) { + if (CHANNEL_STATE_DFS == chan_list[chan_enum].state) { + chan_list[chan_enum].state = + CHANNEL_STATE_DISABLE; + chan_list[chan_enum].chan_flags |= + REGULATORY_CHAN_DISABLED; + } + } +} + +static void reg_modify_chan_list_for_indoor_channels( + struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj) +{ + enum channel_enum chan_enum; + struct regulatory_channel *chan_list = pdev_priv_obj->cur_chan_list; + + if (!pdev_priv_obj->indoor_chan_enabled) { + for (chan_enum = 0; chan_enum < NUM_CHANNELS; chan_enum++) { + if (REGULATORY_CHAN_INDOOR_ONLY & + chan_list[chan_enum].chan_flags) { + chan_list[chan_enum].state = + CHANNEL_STATE_DFS; + chan_list[chan_enum].chan_flags |= + REGULATORY_CHAN_NO_IR; + } + } + } + + if (pdev_priv_obj->force_ssc_disable_indoor_channel && + pdev_priv_obj->sap_state) { + for (chan_enum = 0; chan_enum < NUM_CHANNELS; chan_enum++) { + if (REGULATORY_CHAN_INDOOR_ONLY & + chan_list[chan_enum].chan_flags) { + chan_list[chan_enum].state = + CHANNEL_STATE_DISABLE; + chan_list[chan_enum].chan_flags |= + REGULATORY_CHAN_DISABLED; + } + } + } +} + +#ifdef DISABLE_CHANNEL_LIST +static void reg_modify_chan_list_for_cached_channels( + struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj) +{ + uint32_t i, j, num_cache_channels = pdev_priv_obj->num_cache_channels; + struct regulatory_channel *chan_list = pdev_priv_obj->cur_chan_list; + struct regulatory_channel *cache_chan_list = + pdev_priv_obj->cache_disable_chan_list; + + if (!num_cache_channels) + return; + + if (pdev_priv_obj->disable_cached_channels) { + for (i = 0; i < num_cache_channels; i++) + for (j = 0; j < NUM_CHANNELS; j++) + if (cache_chan_list[i].chan_num == + chan_list[j].chan_num) { + chan_list[j].state = + CHANNEL_STATE_DISABLE; + chan_list[j].chan_flags |= + REGULATORY_CHAN_DISABLED; + } + } +} +#else +static void reg_modify_chan_list_for_cached_channels( + struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj) +{ +} +#endif + +static void reg_modify_chan_list_for_band(struct regulatory_channel *chan_list, + enum band_info band_val) +{ + enum channel_enum chan_enum; + + if (BAND_2G == band_val) { + for (chan_enum = MIN_5GHZ_CHANNEL; + chan_enum <= MAX_5GHZ_CHANNEL; + chan_enum++) { + chan_list[chan_enum].chan_flags |= + REGULATORY_CHAN_DISABLED; + chan_list[chan_enum].state = CHANNEL_STATE_DISABLE; + } + } + + if (BAND_5G == band_val) { + for (chan_enum = MIN_24GHZ_CHANNEL; + chan_enum <= MAX_24GHZ_CHANNEL; + chan_enum++) { + chan_list[chan_enum].chan_flags |= + REGULATORY_CHAN_DISABLED; + chan_list[chan_enum].state = CHANNEL_STATE_DISABLE; + } + } +} + +static void reg_modify_chan_list_for_fcc_channel(struct regulatory_channel + *chan_list, + bool set_fcc_channel) +{ + enum channel_enum chan_enum; + + if (set_fcc_channel) { + for (chan_enum = 0; chan_enum < NUM_CHANNELS; chan_enum++) { + if (chan_list[chan_enum].center_freq == + CHAN_12_CENT_FREQ) + chan_list[chan_enum].tx_power = + MAX_PWR_FCC_CHAN_12; + if (chan_list[chan_enum].center_freq == + CHAN_13_CENT_FREQ) + chan_list[chan_enum].tx_power = + MAX_PWR_FCC_CHAN_13; + } + } +} + +static void reg_modify_chan_list_for_chan_144(struct regulatory_channel + *chan_list, + bool en_chan_144) +{ + enum channel_enum chan_enum; + + if (en_chan_144) + return; + + for (chan_enum = 0; chan_enum < NUM_CHANNELS; chan_enum++) { + if (chan_list[chan_enum].center_freq == CHAN_144_CENT_FREQ) { + chan_list[chan_enum].chan_flags |= + REGULATORY_CHAN_DISABLED; + chan_list[chan_enum].state = + CHANNEL_STATE_DISABLE; + } + } +} + +static void +reg_modify_chan_list_for_nol_list(struct regulatory_channel *chan_list) +{ + enum channel_enum chan_enum; + + for (chan_enum = 0; chan_enum < NUM_CHANNELS; + chan_enum++) { + if (chan_list[chan_enum].nol_chan) { + chan_list[chan_enum].state = + CHANNEL_STATE_DISABLE; + chan_list[chan_enum].chan_flags |= + REGULATORY_CHAN_DISABLED; + } + } +} + +/** + * reg_find_low_limit_chan_enum() - Find low limit 2G and 5G channel enums. + * @chan_list: Pointer to regulatory channel list. + * @low_freq: low limit frequency. + * @low_limit: pointer to output low limit enum. + * + * Return: None + */ +static void reg_find_low_limit_chan_enum(struct regulatory_channel *chan_list, + uint32_t low_freq, + uint32_t *low_limit) +{ + enum channel_enum chan_enum; + uint16_t min_bw; + uint16_t max_bw; + uint32_t center_freq; + + for (chan_enum = 0; chan_enum < NUM_CHANNELS; chan_enum++) { + min_bw = chan_list[chan_enum].min_bw; + max_bw = chan_list[chan_enum].max_bw; + center_freq = chan_list[chan_enum].center_freq; + + if ((center_freq - min_bw/2) >= low_freq) { + if ((center_freq - max_bw/2) < low_freq) { + if (max_bw <= 20) + max_bw = ((center_freq - + low_freq) * 2); + if (max_bw < min_bw) + max_bw = min_bw; + chan_list[chan_enum].max_bw = max_bw; + } + *low_limit = chan_enum; + break; + } + } +} + +/** + * reg_find_high_limit_chan_enum() - Find high limit 2G and 5G channel enums. + * @chan_list: Pointer to regulatory channel list. + * @high_freq: high limit frequency. + * @high_limit: pointer to output high limit enum. + * + * Return: None + */ +static void reg_find_high_limit_chan_enum(struct regulatory_channel *chan_list, + uint32_t high_freq, + uint32_t *high_limit) +{ + enum channel_enum chan_enum; + uint16_t min_bw; + uint16_t max_bw; + uint32_t center_freq; + + for (chan_enum = NUM_CHANNELS - 1; chan_enum >= 0; chan_enum--) { + min_bw = chan_list[chan_enum].min_bw; + max_bw = chan_list[chan_enum].max_bw; + center_freq = chan_list[chan_enum].center_freq; + + if (center_freq + min_bw/2 <= high_freq) { + if ((center_freq + max_bw/2) > high_freq) { + if (max_bw <= 20) + max_bw = ((high_freq - + center_freq) * 2); + if (max_bw < min_bw) + max_bw = min_bw; + chan_list[chan_enum].max_bw = max_bw; + } + *high_limit = chan_enum; + break; + } + if (chan_enum == 0) + break; + } +} + +/** + * reg_modify_chan_list_for_freq_range() - Modify channel list for the given low + * and high frequency range. + * @chan_list: Pointer to regulatory channel list. + * @low_freq_2g: Low frequency 2G. + * @high_freq_2g: High frequency 2G. + * @low_freq_5g: Low frequency 5G. + * @high_freq_5g: High frequency 5G. + * + * Return: None + */ +static void +reg_modify_chan_list_for_freq_range(struct regulatory_channel *chan_list, + uint32_t low_freq_2g, + uint32_t high_freq_2g, + uint32_t low_freq_5g, + uint32_t high_freq_5g) +{ + uint32_t low_limit_2g = NUM_CHANNELS; + uint32_t high_limit_2g = NUM_CHANNELS; + uint32_t low_limit_5g = NUM_CHANNELS; + uint32_t high_limit_5g = NUM_CHANNELS; + enum channel_enum chan_enum; + bool chan_in_range; + + reg_find_low_limit_chan_enum(chan_list, low_freq_2g, &low_limit_2g); + reg_find_low_limit_chan_enum(chan_list, low_freq_5g, &low_limit_5g); + reg_find_high_limit_chan_enum(chan_list, high_freq_2g, &high_limit_2g); + reg_find_high_limit_chan_enum(chan_list, high_freq_5g, &high_limit_5g); + + for (chan_enum = 0; chan_enum < NUM_CHANNELS; chan_enum++) { + chan_in_range = false; + if ((low_limit_2g <= chan_enum) && + (high_limit_2g >= chan_enum) && + (low_limit_2g != NUM_CHANNELS) && + (high_limit_2g != NUM_CHANNELS)) + chan_in_range = true; + if ((low_limit_5g <= chan_enum) && + (high_limit_5g >= chan_enum) && + (low_limit_5g != NUM_CHANNELS) && + (high_limit_5g != NUM_CHANNELS)) + chan_in_range = true; + if (!chan_in_range) { + chan_list[chan_enum].chan_flags |= + REGULATORY_CHAN_DISABLED; + chan_list[chan_enum].state = + CHANNEL_STATE_DISABLE; + } + } +} + +bool reg_chan_in_range(struct regulatory_channel *chan_list, + uint32_t low_freq_2g, + uint32_t high_freq_2g, + uint32_t low_freq_5g, + uint32_t high_freq_5g, + enum channel_enum ch_enum) +{ + uint32_t low_limit_2g = NUM_CHANNELS; + uint32_t high_limit_2g = NUM_CHANNELS; + uint32_t low_limit_5g = NUM_CHANNELS; + uint32_t high_limit_5g = NUM_CHANNELS; + bool chan_in_range; + enum channel_enum chan_enum; + uint16_t min_bw; + uint32_t center_freq; + + for (chan_enum = 0; chan_enum < NUM_CHANNELS; chan_enum++) { + min_bw = chan_list[chan_enum].min_bw; + center_freq = chan_list[chan_enum].center_freq; + + if ((center_freq - min_bw/2) >= low_freq_2g) { + low_limit_2g = chan_enum; + break; + } + } + + for (chan_enum = 0; chan_enum < NUM_CHANNELS; chan_enum++) { + min_bw = chan_list[chan_enum].min_bw; + center_freq = chan_list[chan_enum].center_freq; + + if ((center_freq - min_bw/2) >= low_freq_5g) { + low_limit_5g = chan_enum; + break; + } + } + + for (chan_enum = NUM_CHANNELS - 1; chan_enum >= 0; chan_enum--) { + min_bw = chan_list[chan_enum].min_bw; + center_freq = chan_list[chan_enum].center_freq; + + if (center_freq + min_bw/2 <= high_freq_2g) { + high_limit_2g = chan_enum; + break; + } + if (chan_enum == 0) + break; + } + + for (chan_enum = NUM_CHANNELS - 1; chan_enum >= 0; chan_enum--) { + min_bw = chan_list[chan_enum].min_bw; + center_freq = chan_list[chan_enum].center_freq; + + if (center_freq + min_bw/2 <= high_freq_5g) { + high_limit_5g = chan_enum; + break; + } + if (chan_enum == 0) + break; + } + + chan_in_range = false; + if ((low_limit_2g <= ch_enum) && + (high_limit_2g >= ch_enum) && + (low_limit_2g != NUM_CHANNELS) && + (high_limit_2g != NUM_CHANNELS)) + chan_in_range = true; + if ((low_limit_5g <= ch_enum) && + (high_limit_5g >= ch_enum) && + (low_limit_5g != NUM_CHANNELS) && + (high_limit_5g != NUM_CHANNELS)) + chan_in_range = true; + + if (chan_in_range) + return true; + else + return false; +} + +static void reg_init_pdev_mas_chan_list(struct wlan_regulatory_pdev_priv_obj + *pdev_priv_obj, + struct mas_chan_params + *mas_chan_params) +{ + qdf_mem_copy(pdev_priv_obj->mas_chan_list, + mas_chan_params->mas_chan_list, + NUM_CHANNELS * sizeof(struct regulatory_channel)); + + pdev_priv_obj->dfs_region = mas_chan_params->dfs_region; + + pdev_priv_obj->phybitmap = mas_chan_params->phybitmap; + + pdev_priv_obj->reg_dmn_pair = mas_chan_params->reg_dmn_pair; + pdev_priv_obj->ctry_code = mas_chan_params->ctry_code; + + pdev_priv_obj->def_region_domain = mas_chan_params->reg_dmn_pair; + pdev_priv_obj->def_country_code = mas_chan_params->ctry_code; + + qdf_mem_copy(pdev_priv_obj->default_country, + mas_chan_params->default_country, REG_ALPHA2_LEN + 1); + + qdf_mem_copy(pdev_priv_obj->current_country, + mas_chan_params->current_country, REG_ALPHA2_LEN + 1); +} + + +static void reg_compute_pdev_current_chan_list( + struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj) +{ + qdf_mem_copy(pdev_priv_obj->cur_chan_list, + pdev_priv_obj->mas_chan_list, + NUM_CHANNELS * sizeof(struct regulatory_channel)); + + reg_modify_chan_list_for_freq_range(pdev_priv_obj->cur_chan_list, + pdev_priv_obj->range_2g_low, + pdev_priv_obj->range_2g_high, + pdev_priv_obj->range_5g_low, + pdev_priv_obj->range_5g_high); + + reg_modify_chan_list_for_band(pdev_priv_obj->cur_chan_list, + pdev_priv_obj->band_capability); + + reg_modify_chan_list_for_dfs_channels(pdev_priv_obj->cur_chan_list, + pdev_priv_obj->dfs_enabled); + + reg_modify_chan_list_for_nol_list(pdev_priv_obj->cur_chan_list); + + reg_modify_chan_list_for_indoor_channels(pdev_priv_obj); + + reg_modify_chan_list_for_fcc_channel(pdev_priv_obj->cur_chan_list, + pdev_priv_obj->set_fcc_channel); + + reg_modify_chan_list_for_chan_144(pdev_priv_obj->cur_chan_list, + pdev_priv_obj->en_chan_144); + + reg_modify_chan_list_for_cached_channels(pdev_priv_obj); +} + +static void reg_call_chan_change_cbks(struct wlan_objmgr_psoc *psoc, + struct wlan_objmgr_pdev *pdev) +{ + struct chan_change_cbk_entry *cbk_list; + struct wlan_regulatory_psoc_priv_obj *psoc_priv_obj; + struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj; + struct regulatory_channel *cur_chan_list; + uint32_t ctr; + struct avoid_freq_ind_data *avoid_freq_ind = NULL; + reg_chan_change_callback callback; + + psoc_priv_obj = reg_get_psoc_obj(psoc); + if (!IS_VALID_PSOC_REG_OBJ(psoc_priv_obj)) { + reg_alert("psoc reg component is NULL"); + return; + } + + pdev_priv_obj = reg_get_pdev_obj(pdev); + if (!IS_VALID_PDEV_REG_OBJ(pdev_priv_obj)) { + reg_alert("pdev reg component is NULL"); + return; + } + + cur_chan_list = qdf_mem_malloc(NUM_CHANNELS * sizeof(*cur_chan_list)); + if (NULL == cur_chan_list) { + reg_alert("Mem alloc failed for current channel list"); + return; + } + + qdf_mem_copy(cur_chan_list, + pdev_priv_obj->cur_chan_list, + NUM_CHANNELS * + sizeof(struct regulatory_channel)); + + if (psoc_priv_obj->ch_avoid_ind) { + avoid_freq_ind = qdf_mem_malloc(sizeof(*avoid_freq_ind)); + if (!avoid_freq_ind) { + reg_alert("Mem alloc failed for avoid freq ind"); + goto skip_ch_avoid_ind; + } + qdf_mem_copy(&avoid_freq_ind->freq_list, + &psoc_priv_obj->avoid_freq_list, + sizeof(struct ch_avoid_ind_type)); + qdf_mem_copy(&avoid_freq_ind->chan_list, + &psoc_priv_obj->unsafe_chan_list, + sizeof(struct unsafe_ch_list)); + psoc_priv_obj->ch_avoid_ind = false; + } + +skip_ch_avoid_ind: + cbk_list = psoc_priv_obj->cbk_list; + + for (ctr = 0; ctr < REG_MAX_CHAN_CHANGE_CBKS; ctr++) { + callback = NULL; + qdf_spin_lock_bh(&psoc_priv_obj->cbk_list_lock); + if (cbk_list[ctr].cbk != NULL) + callback = cbk_list[ctr].cbk; + qdf_spin_unlock_bh(&psoc_priv_obj->cbk_list_lock); + if (callback != NULL) + callback(psoc, pdev, cur_chan_list, avoid_freq_ind, + cbk_list[ctr].arg); + } + qdf_mem_free(cur_chan_list); + if (avoid_freq_ind) + qdf_mem_free(avoid_freq_ind); +} + +static struct reg_sched_payload +*reg_alloc_and_fill_payload(struct wlan_objmgr_psoc *psoc, + struct wlan_objmgr_pdev *pdev) +{ + struct reg_sched_payload *payload; + + payload = qdf_mem_malloc(sizeof(*payload)); + if (payload != NULL) { + payload->psoc = psoc; + payload->pdev = pdev; + } + + return payload; +} + +#ifdef CONFIG_MCL +static QDF_STATUS reg_chan_change_flush_cbk_sb(struct scheduler_msg *msg) +{ + struct reg_sched_payload *load = msg->bodyptr; + struct wlan_objmgr_psoc *psoc = load->psoc; + struct wlan_objmgr_pdev *pdev = load->pdev; + + wlan_objmgr_pdev_release_ref(pdev, WLAN_REGULATORY_SB_ID); + wlan_objmgr_psoc_release_ref(psoc, WLAN_REGULATORY_SB_ID); + qdf_mem_free(load); + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS reg_sched_chan_change_cbks_sb(struct scheduler_msg *msg) +{ + struct reg_sched_payload *load = msg->bodyptr; + struct wlan_objmgr_psoc *psoc = load->psoc; + struct wlan_objmgr_pdev *pdev = load->pdev; + + reg_call_chan_change_cbks(psoc, pdev); + + wlan_objmgr_pdev_release_ref(pdev, WLAN_REGULATORY_SB_ID); + wlan_objmgr_psoc_release_ref(psoc, WLAN_REGULATORY_SB_ID); + qdf_mem_free(load); + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS reg_chan_change_flush_cbk_nb(struct scheduler_msg *msg) +{ + struct reg_sched_payload *load = msg->bodyptr; + struct wlan_objmgr_psoc *psoc = load->psoc; + struct wlan_objmgr_pdev *pdev = load->pdev; + + wlan_objmgr_pdev_release_ref(pdev, WLAN_REGULATORY_NB_ID); + wlan_objmgr_psoc_release_ref(psoc, WLAN_REGULATORY_NB_ID); + qdf_mem_free(load); + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS reg_sched_chan_change_cbks_nb(struct scheduler_msg *msg) +{ + struct reg_sched_payload *load = msg->bodyptr; + struct wlan_objmgr_psoc *psoc = load->psoc; + struct wlan_objmgr_pdev *pdev = load->pdev; + + reg_call_chan_change_cbks(psoc, pdev); + + wlan_objmgr_pdev_release_ref(pdev, WLAN_REGULATORY_NB_ID); + wlan_objmgr_psoc_release_ref(psoc, WLAN_REGULATORY_NB_ID); + qdf_mem_free(load); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS reg_send_scheduler_msg_sb(struct wlan_objmgr_psoc *psoc, + struct wlan_objmgr_pdev *pdev) +{ + struct scheduler_msg msg = {0}; + struct reg_sched_payload *payload; + QDF_STATUS status; + + status = wlan_objmgr_psoc_try_get_ref(psoc, WLAN_REGULATORY_SB_ID); + if (QDF_IS_STATUS_ERROR(status)) { + reg_err("error taking psoc ref cnt"); + return status; + } + + status = wlan_objmgr_pdev_try_get_ref(pdev, WLAN_REGULATORY_SB_ID); + if (QDF_IS_STATUS_ERROR(status)) { + wlan_objmgr_psoc_release_ref(psoc, WLAN_REGULATORY_SB_ID); + reg_err("error taking pdev ref cnt"); + return status; + } + + payload = reg_alloc_and_fill_payload(psoc, pdev); + if (payload == NULL) { + reg_err("payload memory alloc failed"); + wlan_objmgr_pdev_release_ref(pdev, WLAN_REGULATORY_SB_ID); + wlan_objmgr_psoc_release_ref(psoc, WLAN_REGULATORY_SB_ID); + return QDF_STATUS_E_NOMEM; + } + + msg.bodyptr = payload; + msg.callback = reg_sched_chan_change_cbks_sb; + msg.flush_callback = reg_chan_change_flush_cbk_sb; + + status = scheduler_post_message(QDF_MODULE_ID_REGULATORY, + QDF_MODULE_ID_REGULATORY, + QDF_MODULE_ID_TARGET_IF, &msg); + if (QDF_IS_STATUS_ERROR(status)) { + wlan_objmgr_pdev_release_ref(pdev, WLAN_REGULATORY_SB_ID); + wlan_objmgr_psoc_release_ref(psoc, WLAN_REGULATORY_SB_ID); + reg_err("scheduler msg posting failed"); + qdf_mem_free(payload); + } + + return status; +} + +static QDF_STATUS reg_send_scheduler_msg_nb(struct wlan_objmgr_psoc *psoc, + struct wlan_objmgr_pdev *pdev) +{ + struct scheduler_msg msg = {0}; + struct reg_sched_payload *payload; + QDF_STATUS status; + + status = wlan_objmgr_psoc_try_get_ref(psoc, WLAN_REGULATORY_NB_ID); + if (QDF_IS_STATUS_ERROR(status)) { + reg_err("error taking psoc ref cnt"); + return status; + } + + status = wlan_objmgr_pdev_try_get_ref(pdev, WLAN_REGULATORY_NB_ID); + if (QDF_IS_STATUS_ERROR(status)) { + wlan_objmgr_psoc_release_ref(psoc, WLAN_REGULATORY_NB_ID); + reg_err("error taking pdev ref cnt"); + return status; + } + + payload = reg_alloc_and_fill_payload(psoc, pdev); + if (payload == NULL) { + reg_err("payload memory alloc failed"); + wlan_objmgr_pdev_release_ref(pdev, WLAN_REGULATORY_NB_ID); + wlan_objmgr_psoc_release_ref(psoc, WLAN_REGULATORY_NB_ID); + return QDF_STATUS_E_NOMEM; + } + msg.bodyptr = payload; + msg.callback = reg_sched_chan_change_cbks_nb; + msg.flush_callback = reg_chan_change_flush_cbk_nb; + + status = scheduler_post_message(QDF_MODULE_ID_REGULATORY, + QDF_MODULE_ID_REGULATORY, + QDF_MODULE_ID_OS_IF, &msg); + if (QDF_IS_STATUS_ERROR(status)) { + wlan_objmgr_pdev_release_ref(pdev, WLAN_REGULATORY_NB_ID); + wlan_objmgr_psoc_release_ref(psoc, WLAN_REGULATORY_NB_ID); + reg_err("scheduler msg posting failed"); + qdf_mem_free(payload); + } + + return status; +} + +static QDF_STATUS reg_send_11d_flush_cbk(struct scheduler_msg *msg) +{ + struct wlan_objmgr_psoc *psoc = msg->bodyptr; + + wlan_objmgr_psoc_release_ref(psoc, WLAN_REGULATORY_SB_ID); + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS reg_send_11d_msg_cbk(struct scheduler_msg *msg) +{ + struct wlan_objmgr_psoc *psoc = msg->bodyptr; + struct wlan_lmac_if_reg_tx_ops *tx_ops; + struct reg_start_11d_scan_req start_req; + struct reg_stop_11d_scan_req stop_req; + struct wlan_regulatory_psoc_priv_obj *psoc_priv_obj; + tx_ops = reg_get_psoc_tx_ops(psoc); + + psoc_priv_obj = (struct wlan_regulatory_psoc_priv_obj *) + wlan_objmgr_psoc_get_comp_private_obj(psoc, + WLAN_UMAC_COMP_REGULATORY); + + if (!psoc_priv_obj) { + reg_err("psoc priv obj is NULL"); + goto end; + } + + if (psoc_priv_obj->vdev_id_for_11d_scan == INVALID_VDEV_ID) { + psoc_priv_obj->enable_11d_supp = false; + reg_err("No valid vdev for 11d scan command"); + goto end; + } + + if (psoc_priv_obj->enable_11d_supp) { + start_req.vdev_id = psoc_priv_obj->vdev_id_for_11d_scan; + start_req.scan_period_msec = psoc_priv_obj->scan_11d_interval; + start_req.start_interval_msec = 0; + reg_debug("sending start msg"); + tx_ops->start_11d_scan(psoc, &start_req); + } else { + stop_req.vdev_id = psoc_priv_obj->vdev_id_for_11d_scan; + reg_debug("sending stop msg"); + tx_ops->stop_11d_scan(psoc, &stop_req); + } + +end: + wlan_objmgr_psoc_release_ref(psoc, WLAN_REGULATORY_SB_ID); + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS reg_sched_11d_msg(struct wlan_objmgr_psoc *psoc) +{ + struct scheduler_msg msg = {0}; + QDF_STATUS status; + + status = wlan_objmgr_psoc_try_get_ref(psoc, WLAN_REGULATORY_SB_ID); + if (QDF_IS_STATUS_ERROR(status)) { + reg_err("error taking psoc ref cnt"); + return status; + } + + msg.bodyptr = psoc; + msg.callback = reg_send_11d_msg_cbk; + msg.flush_callback = reg_send_11d_flush_cbk; + + status = scheduler_post_message(QDF_MODULE_ID_REGULATORY, + QDF_MODULE_ID_REGULATORY, + QDF_MODULE_ID_TARGET_IF, &msg); + if (QDF_IS_STATUS_ERROR(status)) { + wlan_objmgr_psoc_release_ref(psoc, WLAN_REGULATORY_SB_ID); + reg_err("scheduler msg posting failed"); + } + + return status; +} +#else +QDF_STATUS reg_send_scheduler_msg_sb(struct wlan_objmgr_psoc *psoc, + struct wlan_objmgr_pdev *pdev) +{ + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS reg_send_scheduler_msg_nb(struct wlan_objmgr_psoc *psoc, + struct wlan_objmgr_pdev *pdev) +{ + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS reg_sched_11d_msg(struct wlan_objmgr_psoc *psoc) +{ + return QDF_STATUS_SUCCESS; +} +#endif + +void reg_reset_reg_rules(struct reg_rule_info *reg_rules) +{ + qdf_mem_zero(reg_rules, sizeof(*reg_rules)); +} + +static void reg_save_reg_rules_to_pdev(struct reg_rule_info *psoc_reg_rules, + struct wlan_regulatory_pdev_priv_obj + *pdev_priv_obj) +{ + uint32_t reg_rule_len; + struct reg_rule_info *pdev_reg_rules; + + qdf_spin_lock_bh(&pdev_priv_obj->reg_rules_lock); + + pdev_reg_rules = &pdev_priv_obj->reg_rules; + reg_reset_reg_rules(pdev_reg_rules); + + pdev_reg_rules->num_of_reg_rules = psoc_reg_rules->num_of_reg_rules; + if (!pdev_reg_rules->num_of_reg_rules) { + qdf_spin_unlock_bh(&pdev_priv_obj->reg_rules_lock); + reg_err("no reg rules in psoc"); + return; + } + + reg_rule_len = pdev_reg_rules->num_of_reg_rules * + sizeof(struct cur_reg_rule); + qdf_mem_copy(pdev_reg_rules->reg_rules, + psoc_reg_rules->reg_rules, + reg_rule_len); + + qdf_mem_copy(pdev_reg_rules->alpha2, pdev_priv_obj->current_country, + REG_ALPHA2_LEN + 1); + pdev_reg_rules->dfs_region = pdev_priv_obj->dfs_region; + + qdf_spin_unlock_bh(&pdev_priv_obj->reg_rules_lock); +} + +static void reg_propagate_mas_chan_list_to_pdev(struct wlan_objmgr_psoc *psoc, + void *object, void *arg) +{ + struct wlan_objmgr_pdev *pdev = (struct wlan_objmgr_pdev *)object; + struct wlan_regulatory_psoc_priv_obj *psoc_priv_obj; + struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj; + enum direction *dir = arg; + uint32_t pdev_id; + struct wlan_lmac_if_reg_tx_ops *reg_tx_ops; + struct reg_rule_info *psoc_reg_rules; + + psoc_priv_obj = (struct wlan_regulatory_psoc_priv_obj *) + wlan_objmgr_psoc_get_comp_private_obj(psoc, + WLAN_UMAC_COMP_REGULATORY); + + if (NULL == psoc_priv_obj) { + reg_err("psoc priv obj is NULL"); + return; + } + + pdev_priv_obj = reg_get_pdev_obj(pdev); + + if (!IS_VALID_PDEV_REG_OBJ(pdev_priv_obj)) { + reg_err("reg pdev priv obj is NULL"); + return; + } + + pdev_id = wlan_objmgr_pdev_get_pdev_id(pdev); + reg_init_pdev_mas_chan_list(pdev_priv_obj, + &psoc_priv_obj->mas_chan_params[pdev_id]); + psoc_reg_rules = &psoc_priv_obj->mas_chan_params[pdev_id].reg_rules; + reg_save_reg_rules_to_pdev(psoc_reg_rules, pdev_priv_obj); + reg_compute_pdev_current_chan_list(pdev_priv_obj); + + reg_tx_ops = reg_get_psoc_tx_ops(psoc); + if (reg_tx_ops->fill_umac_legacy_chanlist) { + reg_tx_ops->fill_umac_legacy_chanlist(pdev, + pdev_priv_obj->cur_chan_list); + } else { + if (*dir == NORTHBOUND) + reg_send_scheduler_msg_nb(psoc, pdev); + else + reg_send_scheduler_msg_sb(psoc, pdev); + } +} + +static void reg_run_11d_state_machine(struct wlan_objmgr_psoc *psoc) +{ + bool temp_11d_support; + struct wlan_regulatory_psoc_priv_obj *psoc_priv_obj; + bool world_mode; + + psoc_priv_obj = wlan_objmgr_psoc_get_comp_private_obj(psoc, + WLAN_UMAC_COMP_REGULATORY); + if (!psoc_priv_obj) { + reg_err("reg psoc private obj is NULL"); + return; + } + + if (psoc_priv_obj->vdev_id_for_11d_scan == INVALID_VDEV_ID) { + psoc_priv_obj->enable_11d_supp = false; + reg_err("No valid vdev for 11d scan command"); + return; + } + + world_mode = reg_is_world_alpha2(psoc_priv_obj->cur_country); + + temp_11d_support = psoc_priv_obj->enable_11d_supp; + if ((psoc_priv_obj->enable_11d_in_world_mode) && (world_mode)) + psoc_priv_obj->enable_11d_supp = true; + else if (((psoc_priv_obj->user_ctry_set) && + (psoc_priv_obj->user_ctry_priority)) || + (psoc_priv_obj->master_vdev_cnt)) + psoc_priv_obj->enable_11d_supp = false; + else + psoc_priv_obj->enable_11d_supp = + psoc_priv_obj->enable_11d_supp_original; + + reg_debug("inside 11d state machine"); + if ((temp_11d_support != psoc_priv_obj->enable_11d_supp) && + (psoc_priv_obj->is_11d_offloaded)) { + reg_sched_11d_msg(psoc); + } +} + +void reg_reset_ctry_pending_hints(struct wlan_regulatory_psoc_priv_obj + *soc_reg) +{ + uint8_t ctr; + + if (!soc_reg->offload_enabled) + return; + + for (ctr = 0; ctr < PSOC_MAX_PHY_REG_CAP; ctr++) { + soc_reg->new_user_ctry_pending[ctr] = false; + soc_reg->new_init_ctry_pending[ctr] = false; + soc_reg->new_11d_ctry_pending[ctr] = false; + soc_reg->world_country_pending[ctr] = false; + } +} + +/** + * reg_set_curr_country() - Set current country update + * @soc_reg: regulatory private object + * @regulat_info: regulatory info from firmware + * @tx_ops: send operations for regulatory component + * + * During SSR or restart of wlan modules after interface change timer phase, + * this function is used to send the recent user/11d country code to firmware. + * + * Return: QDF_STATUS_SUCCESS if correct country is configured + * else return failure + * error code. + */ +static QDF_STATUS reg_set_curr_country( + struct wlan_regulatory_psoc_priv_obj *soc_reg, + struct cur_regulatory_info *regulat_info, + struct wlan_lmac_if_reg_tx_ops *tx_ops) +{ + struct wlan_objmgr_psoc *psoc = regulat_info->psoc; + uint8_t pdev_id; + struct set_country country_code; + QDF_STATUS status; + + /* + * During SSR/WLAN restart ignore master channel list + * for all events and in the last event handling if + * current country and default country is different, send the last + * configured (soc_reg->cur_country) country. + */ + if ((regulat_info->num_phy != regulat_info->phy_id + 1) || + (!qdf_mem_cmp(soc_reg->cur_country, regulat_info->alpha2, + REG_ALPHA2_LEN))) + return QDF_STATUS_SUCCESS; + + pdev_id = soc_reg->def_pdev_id; + if (soc_reg->cc_src == SOURCE_USERSPACE) + soc_reg->new_user_ctry_pending[pdev_id] = true; + else if (soc_reg->cc_src == SOURCE_11D) + soc_reg->new_11d_ctry_pending[pdev_id] = true; + else + soc_reg->world_country_pending[pdev_id] = true; + + qdf_mem_zero(&country_code, sizeof(country_code)); + qdf_mem_copy(country_code.country, soc_reg->cur_country, + sizeof(soc_reg->cur_country)); + country_code.pdev_id = pdev_id; + + if (!tx_ops || !tx_ops->set_country_code) { + reg_err("No regulatory tx_ops for set_country_code"); + status = QDF_STATUS_E_FAULT; + goto error; + } + + status = tx_ops->set_country_code(psoc, &country_code); + if (QDF_IS_STATUS_ERROR(status)) { + reg_err("Failed to send country code to firmware"); + goto error; + } + + reg_debug("Target CC: %.2s, Restore to Previous CC: %.2s", + regulat_info->alpha2, soc_reg->cur_country); + + return status; + +error: + reg_reset_ctry_pending_hints(soc_reg); + + return status; +} + +/** + * reg_ignore_default_country() - Ignore default country update + * @soc_reg: regulatory private object + * @regulat_info: regulatory info from firmware + * + * During SSR or restart of wlan modules after interface change timer phase, + * this function is used to ignore default country code from firmware. + * + * Return: If default country needs to be ignored return true else false. + */ +static bool +reg_ignore_default_country(struct wlan_regulatory_psoc_priv_obj *soc_reg, + struct cur_regulatory_info *regulat_info) +{ + uint8_t pdev_id; + + if (!soc_reg->offload_enabled) + return false; + + if (soc_reg->cc_src == SOURCE_UNKNOWN) + return false; + + pdev_id = regulat_info->phy_id; + + if (soc_reg->new_user_ctry_pending[pdev_id] || + soc_reg->new_init_ctry_pending[pdev_id] || + soc_reg->new_11d_ctry_pending[pdev_id] || + soc_reg->world_country_pending[pdev_id]) + return false; + + return true; +} + +QDF_STATUS reg_process_master_chan_list(struct cur_regulatory_info + *regulat_info) +{ + struct wlan_regulatory_psoc_priv_obj *soc_reg; + uint32_t num_2g_reg_rules, num_5g_reg_rules; + struct cur_reg_rule *reg_rule_2g, *reg_rule_5g; + uint16_t min_bw_2g, max_bw_2g, min_bw_5g, max_bw_5g; + struct regulatory_channel *mas_chan_list; + struct wlan_objmgr_psoc *psoc; + enum channel_enum chan_enum; + wlan_objmgr_ref_dbgid dbg_id; + enum direction dir; + uint8_t phy_id; + struct wlan_objmgr_pdev *pdev; + struct wlan_lmac_if_reg_tx_ops *tx_ops; + struct reg_rule_info *reg_rules; + QDF_STATUS status; + + psoc = regulat_info->psoc; + soc_reg = reg_get_psoc_obj(psoc); + + if (!IS_VALID_PSOC_REG_OBJ(soc_reg)) { + reg_err("psoc reg component is NULL"); + return QDF_STATUS_E_FAILURE; + } + + tx_ops = reg_get_psoc_tx_ops(psoc); + phy_id = regulat_info->phy_id; + + if (reg_ignore_default_country(soc_reg, regulat_info)) { + status = reg_set_curr_country(soc_reg, regulat_info, tx_ops); + if (QDF_IS_STATUS_SUCCESS(status)) { + reg_debug("WLAN restart - Ignore default CC for phy_id: %u", + phy_id); + return QDF_STATUS_SUCCESS; + } + } + + reg_debug("process reg master chan list"); + + if (soc_reg->offload_enabled) { + dbg_id = WLAN_REGULATORY_NB_ID; + dir = NORTHBOUND; + } else { + dbg_id = WLAN_REGULATORY_SB_ID; + dir = SOUTHBOUND; + } + + if (regulat_info->status_code != REG_SET_CC_STATUS_PASS) { + reg_err("Setting country code failed, status code is %d", + regulat_info->status_code); + + pdev = wlan_objmgr_get_pdev_by_id(psoc, phy_id, dbg_id); + if (!pdev) { + reg_err("pdev is NULL"); + return QDF_STATUS_E_FAILURE; + } + + if (tx_ops->set_country_failed) + tx_ops->set_country_failed(pdev); + + wlan_objmgr_pdev_release_ref(pdev, dbg_id); + + if (regulat_info->status_code != REG_CURRENT_ALPHA2_NOT_FOUND) + return QDF_STATUS_E_FAILURE; + + soc_reg->new_user_ctry_pending[phy_id] = false; + soc_reg->new_11d_ctry_pending[phy_id] = false; + soc_reg->world_country_pending[phy_id] = true; + } + + mas_chan_list = soc_reg->mas_chan_params[phy_id].mas_chan_list; + + reg_init_channel_map(regulat_info->dfs_region); + + for (chan_enum = 0; chan_enum < NUM_CHANNELS; + chan_enum++) { + mas_chan_list[chan_enum].chan_num = + channel_map[chan_enum].chan_num; + mas_chan_list[chan_enum].center_freq = + channel_map[chan_enum].center_freq; + mas_chan_list[chan_enum].chan_flags = + REGULATORY_CHAN_DISABLED; + mas_chan_list[chan_enum].state = + CHANNEL_STATE_DISABLE; + mas_chan_list[chan_enum].nol_chan = false; + } + + soc_reg->num_phy = regulat_info->num_phy; + soc_reg->mas_chan_params[phy_id].phybitmap = + regulat_info->phybitmap; + soc_reg->mas_chan_params[phy_id].dfs_region = + regulat_info->dfs_region; + soc_reg->mas_chan_params[phy_id].ctry_code = + regulat_info->ctry_code; + soc_reg->mas_chan_params[phy_id].reg_dmn_pair = + regulat_info->reg_dmn_pair; + qdf_mem_copy(soc_reg->mas_chan_params[phy_id].current_country, + regulat_info->alpha2, + REG_ALPHA2_LEN + 1); + qdf_mem_copy(soc_reg->cur_country, + regulat_info->alpha2, + REG_ALPHA2_LEN + 1); + reg_debug("set cur_country %.2s", soc_reg->cur_country); + + min_bw_2g = regulat_info->min_bw_2g; + max_bw_2g = regulat_info->max_bw_2g; + reg_rule_2g = regulat_info->reg_rules_2g_ptr; + num_2g_reg_rules = regulat_info->num_2g_reg_rules; + reg_update_max_bw_per_rule(num_2g_reg_rules, + reg_rule_2g, max_bw_2g); + + min_bw_5g = regulat_info->min_bw_5g; + max_bw_5g = regulat_info->max_bw_5g; + reg_rule_5g = regulat_info->reg_rules_5g_ptr; + num_5g_reg_rules = regulat_info->num_5g_reg_rules; + reg_update_max_bw_per_rule(num_5g_reg_rules, + reg_rule_5g, max_bw_5g); + + reg_rules = &soc_reg->mas_chan_params[phy_id].reg_rules; + reg_reset_reg_rules(reg_rules); + + reg_rules->num_of_reg_rules = num_5g_reg_rules + num_2g_reg_rules; + if (reg_rules->num_of_reg_rules > MAX_REG_RULES) { + reg_err("number of reg rules exceeds limit"); + return QDF_STATUS_E_FAILURE; + } + + if (reg_rules->num_of_reg_rules) { + if (num_2g_reg_rules) + qdf_mem_copy(reg_rules->reg_rules, + reg_rule_2g, num_2g_reg_rules * + sizeof(struct cur_reg_rule)); + if (num_5g_reg_rules) + qdf_mem_copy(reg_rules->reg_rules + + num_2g_reg_rules, reg_rule_5g, + num_5g_reg_rules * + sizeof(struct cur_reg_rule)); + } + + if (num_5g_reg_rules != 0) + reg_do_auto_bw_correction(num_5g_reg_rules, + reg_rule_5g, max_bw_5g); + + if (num_2g_reg_rules != 0) + reg_populate_band_channels(MIN_24GHZ_CHANNEL, MAX_24GHZ_CHANNEL, + reg_rule_2g, num_2g_reg_rules, + min_bw_2g, mas_chan_list); + + if (num_5g_reg_rules != 0) + reg_populate_band_channels(MIN_5GHZ_CHANNEL, MAX_5GHZ_CHANNEL, + reg_rule_5g, num_5g_reg_rules, + min_bw_5g, mas_chan_list); + + if (num_5g_reg_rules != 0) + reg_populate_band_channels(MIN_49GHZ_CHANNEL, + MAX_49GHZ_CHANNEL, + reg_rule_5g, num_5g_reg_rules, + min_bw_5g, mas_chan_list); + + if (soc_reg->new_user_ctry_pending[phy_id]) { + soc_reg->new_user_ctry_pending[phy_id] = false; + soc_reg->cc_src = SOURCE_USERSPACE; + soc_reg->user_ctry_set = true; + reg_debug("new user country is set"); + reg_run_11d_state_machine(psoc); + } else if (soc_reg->new_init_ctry_pending[phy_id]) { + soc_reg->new_init_ctry_pending[phy_id] = false; + soc_reg->cc_src = SOURCE_USERSPACE; + reg_debug("new init country is set"); + } else if (soc_reg->new_11d_ctry_pending[phy_id]) { + soc_reg->new_11d_ctry_pending[phy_id] = false; + soc_reg->cc_src = SOURCE_11D; + soc_reg->user_ctry_set = false; + reg_run_11d_state_machine(psoc); + } else if (soc_reg->world_country_pending[phy_id]) { + soc_reg->world_country_pending[phy_id] = false; + soc_reg->cc_src = SOURCE_CORE; + soc_reg->user_ctry_set = false; + reg_run_11d_state_machine(psoc); + } else { + if (soc_reg->cc_src == SOURCE_UNKNOWN && + soc_reg->num_phy == phy_id + 1) + soc_reg->cc_src = SOURCE_DRIVER; + + qdf_mem_copy(soc_reg->mas_chan_params[phy_id].default_country, + regulat_info->alpha2, + REG_ALPHA2_LEN + 1); + + soc_reg->mas_chan_params[phy_id].def_country_code = + regulat_info->ctry_code; + soc_reg->mas_chan_params[phy_id].def_region_domain = + regulat_info->reg_dmn_pair; + + if (soc_reg->cc_src == SOURCE_DRIVER) { + qdf_mem_copy(soc_reg->def_country, + regulat_info->alpha2, + REG_ALPHA2_LEN + 1); + + soc_reg->def_country_code = regulat_info->ctry_code; + soc_reg->def_region_domain = + regulat_info->reg_dmn_pair; + + if (reg_is_world_alpha2(regulat_info->alpha2)) { + soc_reg->cc_src = SOURCE_CORE; + reg_run_11d_state_machine(psoc); + } + } + } + + pdev = wlan_objmgr_get_pdev_by_id(psoc, phy_id, dbg_id); + if (pdev != NULL) { + reg_propagate_mas_chan_list_to_pdev(psoc, pdev, &dir); + wlan_objmgr_pdev_release_ref(pdev, dbg_id); + reg_reset_reg_rules(reg_rules); + } + + return QDF_STATUS_SUCCESS; +} + +/** + * wlan_regulatory_psoc_obj_created_notification() - PSOC obj create callback + * @psoc: PSOC object + * @arg_list: Variable argument list + * + * This callback is registered with object manager during initialization to + * get notified when the object is created. + * + * Return: Success or Failure + */ +QDF_STATUS wlan_regulatory_psoc_obj_created_notification( + struct wlan_objmgr_psoc *psoc, void *arg_list) +{ + struct wlan_regulatory_psoc_priv_obj *soc_reg_obj; + struct regulatory_channel *mas_chan_list; + enum channel_enum chan_enum; + QDF_STATUS status; + uint8_t i; + uint8_t pdev_cnt; + + soc_reg_obj = qdf_mem_malloc(sizeof(*soc_reg_obj)); + if (NULL == soc_reg_obj) { + reg_alert("Mem alloc failed for reg psoc priv obj"); + return QDF_STATUS_E_NOMEM; + } + + soc_reg_obj->offload_enabled = false; + soc_reg_obj->psoc_ptr = psoc; + soc_reg_obj->dfs_enabled = true; + soc_reg_obj->band_capability = BAND_ALL; + soc_reg_obj->enable_11d_supp = false; + soc_reg_obj->indoor_chan_enabled = true; + soc_reg_obj->force_ssc_disable_indoor_channel = false; + soc_reg_obj->master_vdev_cnt = 0; + soc_reg_obj->vdev_cnt_11d = 0; + soc_reg_obj->vdev_id_for_11d_scan = INVALID_VDEV_ID; + soc_reg_obj->restart_beaconing = CH_AVOID_RULE_RESTART; + soc_reg_obj->enable_srd_chan_in_master_mode = false; + soc_reg_obj->enable_11d_in_world_mode = false; + soc_reg_obj->def_pdev_id = -1; + + for (i = 0; i < MAX_STA_VDEV_CNT; i++) + soc_reg_obj->vdev_ids_11d[i] = INVALID_VDEV_ID; + + qdf_spinlock_create(&soc_reg_obj->cbk_list_lock); + + for (pdev_cnt = 0; pdev_cnt < PSOC_MAX_PHY_REG_CAP; pdev_cnt++) { + mas_chan_list = + soc_reg_obj->mas_chan_params[pdev_cnt].mas_chan_list; + + for (chan_enum = 0; chan_enum < NUM_CHANNELS; + chan_enum++) { + mas_chan_list[chan_enum].chan_flags |= + REGULATORY_CHAN_DISABLED; + mas_chan_list[chan_enum].state = + CHANNEL_STATE_DISABLE; + mas_chan_list[chan_enum].nol_chan = false; + } + } + + status = wlan_objmgr_psoc_component_obj_attach(psoc, + WLAN_UMAC_COMP_REGULATORY, soc_reg_obj, + QDF_STATUS_SUCCESS); + if (QDF_IS_STATUS_ERROR(status)) { + qdf_spinlock_destroy(&soc_reg_obj->cbk_list_lock); + qdf_mem_free(soc_reg_obj); + reg_err("Obj attach failed"); + return status; + } + + reg_debug("reg psoc obj created with status %d", status); + + return status; +} + +/** + * wlan_regulatory_psoc_obj_destroyed_notification() - PSOC obj delete callback + * @psoc: PSOC object + * @arg_list: Variable argument list + * + * This callback is registered with object manager during initialization to + * get notified when the object is deleted. + * + * Return: Success or Failure + */ +QDF_STATUS wlan_regulatory_psoc_obj_destroyed_notification( + struct wlan_objmgr_psoc *psoc, void *arg_list) +{ + QDF_STATUS status; + struct wlan_regulatory_psoc_priv_obj *soc_reg; + + soc_reg = wlan_objmgr_psoc_get_comp_private_obj(psoc, + WLAN_UMAC_COMP_REGULATORY); + + if (NULL == soc_reg) { + reg_err("reg psoc private obj is NULL"); + return QDF_STATUS_E_FAULT; + } + + soc_reg->psoc_ptr = NULL; + qdf_spinlock_destroy(&soc_reg->cbk_list_lock); + + status = wlan_objmgr_psoc_component_obj_detach(psoc, + WLAN_UMAC_COMP_REGULATORY, + soc_reg); + + if (status != QDF_STATUS_SUCCESS) + reg_err("soc_reg private obj detach failed"); + + reg_debug("reg psoc obj detached with status %d", status); + + qdf_mem_free(soc_reg); + + return status; +} + +QDF_STATUS reg_set_band(struct wlan_objmgr_pdev *pdev, + enum band_info band) +{ + struct wlan_regulatory_psoc_priv_obj *psoc_priv_obj; + struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj; + struct wlan_objmgr_psoc *psoc; + QDF_STATUS status; + + pdev_priv_obj = reg_get_pdev_obj(pdev); + + if (!IS_VALID_PDEV_REG_OBJ(pdev_priv_obj)) { + reg_err("pdev reg component is NULL"); + return QDF_STATUS_E_INVAL; + } + + if (pdev_priv_obj->band_capability == band) { + reg_info("band is already set to %d", band); + return QDF_STATUS_SUCCESS; + } + + psoc = wlan_pdev_get_psoc(pdev); + if (!psoc) { + reg_err("psoc is NULL"); + return QDF_STATUS_E_INVAL; + } + + psoc_priv_obj = reg_get_psoc_obj(psoc); + if (!IS_VALID_PSOC_REG_OBJ(psoc_priv_obj)) { + reg_err("psoc reg component is NULL"); + return QDF_STATUS_E_INVAL; + } + + reg_info("setting band_info: %d", band); + pdev_priv_obj->band_capability = band; + + reg_compute_pdev_current_chan_list(pdev_priv_obj); + + status = reg_send_scheduler_msg_sb(psoc, pdev); + + return status; +} + +#ifdef DISABLE_CHANNEL_LIST +QDF_STATUS reg_restore_cached_channels(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_regulatory_psoc_priv_obj *psoc_priv_obj; + struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj; + struct wlan_objmgr_psoc *psoc; + QDF_STATUS status; + + pdev_priv_obj = reg_get_pdev_obj(pdev); + if (!IS_VALID_PDEV_REG_OBJ(pdev_priv_obj)) { + reg_err("pdev reg component is NULL"); + return QDF_STATUS_E_INVAL; + } + + psoc = wlan_pdev_get_psoc(pdev); + if (!psoc) { + reg_err("psoc is NULL"); + return QDF_STATUS_E_INVAL; + } + + psoc_priv_obj = reg_get_psoc_obj(psoc); + if (!IS_VALID_PSOC_REG_OBJ(psoc_priv_obj)) { + reg_err("psoc reg component is NULL"); + return QDF_STATUS_E_INVAL; + } + + pdev_priv_obj->disable_cached_channels = false; + reg_compute_pdev_current_chan_list(pdev_priv_obj); + status = reg_send_scheduler_msg_sb(psoc, pdev); + return status; +} + +QDF_STATUS reg_cache_channel_state(struct wlan_objmgr_pdev *pdev, + uint32_t *channel_list, + uint32_t num_channels) +{ + struct wlan_regulatory_psoc_priv_obj *psoc_priv_obj; + struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj; + struct wlan_objmgr_psoc *psoc; + uint8_t i, j; + + pdev_priv_obj = reg_get_pdev_obj(pdev); + + if (!IS_VALID_PDEV_REG_OBJ(pdev_priv_obj)) { + reg_err("pdev reg component is NULL"); + return QDF_STATUS_E_INVAL; + } + + psoc = wlan_pdev_get_psoc(pdev); + if (!psoc) { + reg_err("psoc is NULL"); + return QDF_STATUS_E_INVAL; + } + + psoc_priv_obj = reg_get_psoc_obj(psoc); + if (!IS_VALID_PSOC_REG_OBJ(psoc_priv_obj)) { + reg_err("psoc reg component is NULL"); + return QDF_STATUS_E_INVAL; + } + if (pdev_priv_obj->num_cache_channels > 0) { + pdev_priv_obj->num_cache_channels = 0; + qdf_mem_set(&pdev_priv_obj->cache_disable_chan_list, + sizeof(pdev_priv_obj->cache_disable_chan_list), 0); + } + + for (i = 0; i < num_channels; i++) { + for (j = 0; j < NUM_CHANNELS; j++) { + if (channel_list[i] == pdev_priv_obj-> + cur_chan_list[j].chan_num) { + pdev_priv_obj-> + cache_disable_chan_list[i].chan_num = + channel_list[i]; + pdev_priv_obj-> + cache_disable_chan_list[i].state = + pdev_priv_obj->cur_chan_list[j].state; + pdev_priv_obj-> + cache_disable_chan_list[i].chan_flags = + pdev_priv_obj-> + cur_chan_list[j].chan_flags; + } + } + } + pdev_priv_obj->num_cache_channels = num_channels; + + return QDF_STATUS_SUCCESS; +} + +static void set_disable_channel_state( + struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj) +{ + pdev_priv_obj->disable_cached_channels = pdev_priv_obj->sap_state; +} +#else +static void set_disable_channel_state( + struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj) +{ +} +#endif + +QDF_STATUS reg_notify_sap_event(struct wlan_objmgr_pdev *pdev, + bool sap_state) +{ + struct wlan_regulatory_psoc_priv_obj *psoc_priv_obj; + struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj; + struct wlan_objmgr_psoc *psoc; + QDF_STATUS status; + + pdev_priv_obj = reg_get_pdev_obj(pdev); + + if (!IS_VALID_PDEV_REG_OBJ(pdev_priv_obj)) { + reg_err("pdev reg component is NULL"); + return QDF_STATUS_E_INVAL; + } + + psoc = wlan_pdev_get_psoc(pdev); + if (!psoc) { + reg_err("psoc is NULL"); + return QDF_STATUS_E_INVAL; + } + + psoc_priv_obj = reg_get_psoc_obj(psoc); + if (!IS_VALID_PSOC_REG_OBJ(psoc_priv_obj)) { + reg_err("psoc reg component is NULL"); + return QDF_STATUS_E_INVAL; + } + + reg_info("sap_state: %d", sap_state); + + if (pdev_priv_obj->sap_state == sap_state) + return QDF_STATUS_SUCCESS; + + pdev_priv_obj->sap_state = sap_state; + set_disable_channel_state(pdev_priv_obj); + + reg_compute_pdev_current_chan_list(pdev_priv_obj); + status = reg_send_scheduler_msg_sb(psoc, pdev); + + return status; +} + +QDF_STATUS reg_set_fcc_constraint(struct wlan_objmgr_pdev *pdev, + bool fcc_constraint) +{ + struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj; + struct wlan_regulatory_psoc_priv_obj *psoc_priv_obj; + struct wlan_objmgr_psoc *psoc; + QDF_STATUS status; + + pdev_priv_obj = reg_get_pdev_obj(pdev); + if (!IS_VALID_PDEV_REG_OBJ(pdev_priv_obj)) { + reg_err("pdev reg component is NULL"); + return QDF_STATUS_E_INVAL; + } + + if (pdev_priv_obj->set_fcc_channel == fcc_constraint) { + reg_info("fcc_constraint is already set to %d", fcc_constraint); + return QDF_STATUS_SUCCESS; + } + + reg_info("setting set_fcc_channel: %d", fcc_constraint); + pdev_priv_obj->set_fcc_channel = fcc_constraint; + + psoc = wlan_pdev_get_psoc(pdev); + if (!psoc) { + reg_err("psoc is NULL"); + return QDF_STATUS_E_INVAL; + } + + psoc_priv_obj = reg_get_psoc_obj(psoc); + if (!IS_VALID_PSOC_REG_OBJ(psoc_priv_obj)) { + reg_err("psoc reg component is NULL"); + return QDF_STATUS_E_INVAL; + } + + reg_compute_pdev_current_chan_list(pdev_priv_obj); + + status = reg_send_scheduler_msg_sb(psoc, pdev); + + return status; +} + +bool reg_get_fcc_constraint(struct wlan_objmgr_pdev *pdev, uint32_t freq) +{ + struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj; + + pdev_priv_obj = reg_get_pdev_obj(pdev); + if (!IS_VALID_PDEV_REG_OBJ(pdev_priv_obj)) { + reg_err("pdev reg component is NULL"); + return false; + } + + if (freq != CHAN_12_CENT_FREQ && freq != CHAN_13_CENT_FREQ) + return false; + + if (!pdev_priv_obj->set_fcc_channel) + return false; + + return true; +} + +QDF_STATUS reg_enable_dfs_channels(struct wlan_objmgr_pdev *pdev, + bool enable) +{ + struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj; + struct wlan_regulatory_psoc_priv_obj *psoc_priv_obj; + struct wlan_objmgr_psoc *psoc; + QDF_STATUS status; + + pdev_priv_obj = reg_get_pdev_obj(pdev); + if (!IS_VALID_PDEV_REG_OBJ(pdev_priv_obj)) { + reg_err("pdev reg component is NULL"); + return QDF_STATUS_E_INVAL; + } + + if (pdev_priv_obj->dfs_enabled == enable) { + reg_info("dfs_enabled is already set to %d", enable); + return QDF_STATUS_SUCCESS; + } + + psoc = wlan_pdev_get_psoc(pdev); + if (!psoc) { + reg_err("psoc is NULL"); + return QDF_STATUS_E_INVAL; + } + + psoc_priv_obj = reg_get_psoc_obj(psoc); + if (!IS_VALID_PSOC_REG_OBJ(psoc_priv_obj)) { + reg_err("psoc reg component is NULL"); + return QDF_STATUS_E_INVAL; + } + + reg_info("setting dfs_enabled: %d", enable); + + pdev_priv_obj->dfs_enabled = enable; + + reg_compute_pdev_current_chan_list(pdev_priv_obj); + + status = reg_send_scheduler_msg_sb(psoc, pdev); + + return status; +} + +/** + * wlan_regulatory_pdev_obj_created_notification() - PDEV obj create callback + * @pdev: pdev object + * @arg_list: Variable argument list + * + * This callback is registered with object manager during initialization to + * get notified when the pdev object is created. + * + * Return: Success or Failure + */ +QDF_STATUS wlan_regulatory_pdev_obj_created_notification( + struct wlan_objmgr_pdev *pdev, void *arg_list) +{ + struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj; + struct wlan_regulatory_psoc_priv_obj *psoc_priv_obj; + struct wlan_psoc_host_hal_reg_capabilities_ext *reg_cap_ptr; + struct wlan_objmgr_psoc *parent_psoc; + uint32_t pdev_id; + uint32_t cnt; + uint32_t range_2g_low, range_2g_high; + uint32_t range_5g_low, range_5g_high; + QDF_STATUS status; + struct reg_rule_info *psoc_reg_rules; + + pdev_priv_obj = qdf_mem_malloc(sizeof(*pdev_priv_obj)); + if (NULL == pdev_priv_obj) { + reg_alert("Mem alloc failed for pdev priv obj"); + return QDF_STATUS_E_NOMEM; + } + + parent_psoc = wlan_pdev_get_psoc(pdev); + pdev_id = wlan_objmgr_pdev_get_pdev_id(pdev); + + psoc_priv_obj = (struct wlan_regulatory_psoc_priv_obj *) + wlan_objmgr_psoc_get_comp_private_obj(parent_psoc, + WLAN_UMAC_COMP_REGULATORY); + + if (NULL == psoc_priv_obj) { + reg_err("reg psoc private obj is NULL"); + qdf_mem_free(pdev_priv_obj); + return QDF_STATUS_E_FAULT; + } + + if (psoc_priv_obj->def_pdev_id == -1) + psoc_priv_obj->def_pdev_id = pdev_id; + else + reg_err("reg cannot handle more than one pdev"); + + pdev_priv_obj->pdev_ptr = pdev; + pdev_priv_obj->dfs_enabled = psoc_priv_obj->dfs_enabled; + pdev_priv_obj->set_fcc_channel = false; + pdev_priv_obj->band_capability = psoc_priv_obj->band_capability; + pdev_priv_obj->indoor_chan_enabled = + psoc_priv_obj->indoor_chan_enabled; + pdev_priv_obj->en_chan_144 = true; + + qdf_spinlock_create(&pdev_priv_obj->reg_rules_lock); + + reg_cap_ptr = psoc_priv_obj->reg_cap; + pdev_priv_obj->force_ssc_disable_indoor_channel = + psoc_priv_obj->force_ssc_disable_indoor_channel; + + for (cnt = 0; cnt < PSOC_MAX_PHY_REG_CAP; cnt++) { + if (reg_cap_ptr == NULL) { + qdf_mem_free(pdev_priv_obj); + reg_err(" reg cap ptr is NULL"); + return QDF_STATUS_E_FAULT; + } + + if (reg_cap_ptr->phy_id == pdev_id) + break; + reg_cap_ptr++; + } + + if (cnt == PSOC_MAX_PHY_REG_CAP) { + qdf_mem_free(pdev_priv_obj); + reg_err("extended capabilities not found for pdev"); + return QDF_STATUS_E_FAULT; + } + + range_2g_low = reg_cap_ptr->low_2ghz_chan; + range_2g_high = reg_cap_ptr->high_2ghz_chan; + range_5g_low = reg_cap_ptr->low_5ghz_chan; + range_5g_high = reg_cap_ptr->high_5ghz_chan; + + pdev_priv_obj->range_2g_low = range_2g_low; + pdev_priv_obj->range_2g_high = range_2g_high; + pdev_priv_obj->range_5g_low = range_5g_low; + pdev_priv_obj->range_5g_high = range_5g_high; + pdev_priv_obj->wireless_modes = reg_cap_ptr->wireless_modes; + + reg_init_pdev_mas_chan_list(pdev_priv_obj, + &psoc_priv_obj->mas_chan_params[pdev_id]); + + reg_compute_pdev_current_chan_list(pdev_priv_obj); + + psoc_reg_rules = &psoc_priv_obj->mas_chan_params[pdev_id].reg_rules; + reg_save_reg_rules_to_pdev(psoc_reg_rules, pdev_priv_obj); + + status = wlan_objmgr_pdev_component_obj_attach(pdev, + WLAN_UMAC_COMP_REGULATORY, + pdev_priv_obj, + QDF_STATUS_SUCCESS); + + if (QDF_IS_STATUS_ERROR(status)) { + reg_err("Obj attach failed"); + qdf_mem_free(pdev_priv_obj); + return status; + } + reg_debug("reg pdev obj created with status %d", status); + + return status; +} + +QDF_STATUS wlan_regulatory_pdev_obj_destroyed_notification( + struct wlan_objmgr_pdev *pdev, void *arg_list) +{ + QDF_STATUS status; + struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj; + + pdev_priv_obj = reg_get_pdev_obj(pdev); + + if (!IS_VALID_PDEV_REG_OBJ(pdev_priv_obj)) { + reg_err("reg pdev private obj is NULL"); + return QDF_STATUS_E_FAILURE; + } + + pdev_priv_obj->pdev_ptr = NULL; + + status = wlan_objmgr_pdev_component_obj_detach(pdev, + WLAN_UMAC_COMP_REGULATORY, + pdev_priv_obj); + + if (status != QDF_STATUS_SUCCESS) + reg_err("reg pdev private obj detach failed"); + + reg_debug("reg pdev obj deleted with status %d", status); + + qdf_spin_lock_bh(&pdev_priv_obj->reg_rules_lock); + reg_reset_reg_rules(&pdev_priv_obj->reg_rules); + qdf_spin_unlock_bh(&pdev_priv_obj->reg_rules_lock); + + qdf_spinlock_destroy(&pdev_priv_obj->reg_rules_lock); + + qdf_mem_free(pdev_priv_obj); + + return status; +} + +/** + * reg_11d_vdev_created_update() - vdev obj create callback + * @vdev: vdev pointer + * + * updates 11d state when a vdev is created. + * + * Return: Success or Failure + */ +QDF_STATUS reg_11d_vdev_created_update(struct wlan_objmgr_vdev *vdev) +{ + struct wlan_regulatory_psoc_priv_obj *psoc_priv_obj; + struct wlan_objmgr_pdev *parent_pdev; + struct wlan_objmgr_psoc *parent_psoc; + uint32_t vdev_id; + enum QDF_OPMODE op_mode; + uint8_t i; + + op_mode = wlan_vdev_mlme_get_opmode(vdev); + + parent_pdev = wlan_vdev_get_pdev(vdev); + parent_psoc = wlan_pdev_get_psoc(parent_pdev); + + psoc_priv_obj = (struct wlan_regulatory_psoc_priv_obj *) + wlan_objmgr_psoc_get_comp_private_obj(parent_psoc, + WLAN_UMAC_COMP_REGULATORY); + + if (!psoc_priv_obj) { + reg_err("reg psoc private obj is NULL"); + return QDF_STATUS_E_FAULT; + } + + if ((op_mode == QDF_STA_MODE) || + (op_mode == QDF_P2P_DEVICE_MODE) || + (op_mode == QDF_P2P_CLIENT_MODE)) { + vdev_id = wlan_vdev_get_id(vdev); + if (!psoc_priv_obj->vdev_cnt_11d) { + psoc_priv_obj->vdev_id_for_11d_scan = vdev_id; + reg_debug("running 11d state machine, opmode %d", + op_mode); + reg_run_11d_state_machine(parent_psoc); + } + + for (i = 0; i < MAX_STA_VDEV_CNT; i++) { + if (psoc_priv_obj->vdev_ids_11d[i] == + INVALID_VDEV_ID) { + psoc_priv_obj->vdev_ids_11d[i] = vdev_id; + break; + } + } + psoc_priv_obj->vdev_cnt_11d++; + } + + if ((op_mode == QDF_P2P_GO_MODE) || + (op_mode == QDF_SAP_MODE)) { + reg_debug("running 11d state machine, opmode %d", op_mode); + psoc_priv_obj->master_vdev_cnt++; + reg_run_11d_state_machine(parent_psoc); + } + + return QDF_STATUS_SUCCESS; +} + +/** + * reg_11d_vdev_delete_update() - update 11d state upon vdev delete + * @vdev: vdev pointer + * + * Return: Success or Failure + */ +QDF_STATUS reg_11d_vdev_delete_update(struct wlan_objmgr_vdev *vdev) +{ + struct wlan_regulatory_psoc_priv_obj *psoc_priv_obj; + struct wlan_objmgr_pdev *parent_pdev; + struct wlan_objmgr_psoc *parent_psoc; + enum QDF_OPMODE op_mode; + uint32_t vdev_id; + uint8_t i; + + if (!vdev) { + reg_err("vdev is NULL"); + return QDF_STATUS_E_INVAL; + } + op_mode = wlan_vdev_mlme_get_opmode(vdev); + + parent_pdev = wlan_vdev_get_pdev(vdev); + parent_psoc = wlan_pdev_get_psoc(parent_pdev); + + psoc_priv_obj = (struct wlan_regulatory_psoc_priv_obj *) + wlan_objmgr_psoc_get_comp_private_obj(parent_psoc, + WLAN_UMAC_COMP_REGULATORY); + + if (!psoc_priv_obj) { + reg_err("reg psoc private obj is NULL"); + return QDF_STATUS_E_FAULT; + } + + if ((op_mode == QDF_P2P_GO_MODE) || + (op_mode == QDF_SAP_MODE)) { + psoc_priv_obj->master_vdev_cnt--; + reg_debug("run 11d state machine, deleted opmode %d", + op_mode); + reg_run_11d_state_machine(parent_psoc); + return QDF_STATUS_SUCCESS; + } + + if ((op_mode == QDF_STA_MODE) || + (op_mode == QDF_P2P_DEVICE_MODE) || + (op_mode == QDF_P2P_CLIENT_MODE)) { + vdev_id = wlan_vdev_get_id(vdev); + for (i = 0; i < MAX_STA_VDEV_CNT; i++) { + if (psoc_priv_obj->vdev_ids_11d[i] == vdev_id) { + psoc_priv_obj->vdev_ids_11d[i] = + INVALID_VDEV_ID; + psoc_priv_obj->vdev_cnt_11d--; + break; + } + } + + if (psoc_priv_obj->vdev_id_for_11d_scan != vdev_id) + return QDF_STATUS_SUCCESS; + + if (!psoc_priv_obj->vdev_cnt_11d) { + psoc_priv_obj->vdev_id_for_11d_scan = INVALID_VDEV_ID; + psoc_priv_obj->enable_11d_supp = false; + return QDF_STATUS_SUCCESS; + } + + for (i = 0; i < MAX_STA_VDEV_CNT; i++) { + if (psoc_priv_obj->vdev_ids_11d[i] == + INVALID_VDEV_ID) + continue; + psoc_priv_obj->vdev_id_for_11d_scan = + psoc_priv_obj->vdev_ids_11d[i]; + psoc_priv_obj->enable_11d_supp = false; + reg_debug("running 11d state machine, vdev %d", + psoc_priv_obj->vdev_id_for_11d_scan); + reg_run_11d_state_machine(parent_psoc); + break; + } + } + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS reg_get_current_chan_list(struct wlan_objmgr_pdev *pdev, + struct regulatory_channel *chan_list) +{ + struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj; + + pdev_priv_obj = reg_get_pdev_obj(pdev); + + if (!IS_VALID_PDEV_REG_OBJ(pdev_priv_obj)) { + reg_err("reg pdev private obj is NULL"); + return QDF_STATUS_E_FAILURE; + } + + qdf_mem_copy(chan_list, pdev_priv_obj->cur_chan_list, + NUM_CHANNELS * sizeof(struct regulatory_channel)); + + return QDF_STATUS_SUCCESS; +} + +/** + * reg_update_nol_ch () - Updates NOL channels in current channel list + * @pdev: pointer to pdev object + * @ch_list: pointer to NOL channel list + * @num_ch: No.of channels in list + * @update_nol: set/reset the NOL status + * + * Return: None + */ +void reg_update_nol_ch(struct wlan_objmgr_pdev *pdev, + uint8_t *chan_list, + uint8_t num_chan, + bool nol_chan) +{ + enum channel_enum chan_enum; + struct regulatory_channel *mas_chan_list; + struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj; + uint16_t i; + + if (!num_chan || !chan_list) { + reg_err("chan_list or num_ch is NULL"); + return; + } + + pdev_priv_obj = wlan_objmgr_pdev_get_comp_private_obj(pdev, + WLAN_UMAC_COMP_REGULATORY); + + if (NULL == pdev_priv_obj) { + reg_err("reg psoc private obj is NULL"); + return; + } + + mas_chan_list = pdev_priv_obj->mas_chan_list; + for (i = 0; i < num_chan; i++) { + chan_enum = reg_get_chan_enum(chan_list[i]); + if (chan_enum == INVALID_CHANNEL) { + reg_err("Invalid ch in nol list, chan %d", + chan_list[i]); + continue; + } + mas_chan_list[chan_enum].nol_chan = nol_chan; + } + + reg_compute_pdev_current_chan_list(pdev_priv_obj); +} + +static void reg_change_pdev_for_config(struct wlan_objmgr_psoc *psoc, + void *object, void *arg) +{ + struct wlan_objmgr_pdev *pdev = (struct wlan_objmgr_pdev *)object; + struct wlan_regulatory_psoc_priv_obj *psoc_priv_obj; + struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj; + + psoc_priv_obj = (struct wlan_regulatory_psoc_priv_obj *) + wlan_objmgr_psoc_get_comp_private_obj(psoc, + WLAN_UMAC_COMP_REGULATORY); + + if (NULL == psoc_priv_obj) { + reg_err("psoc priv obj is NULL"); + return; + } + + pdev_priv_obj = reg_get_pdev_obj(pdev); + + if (!IS_VALID_PDEV_REG_OBJ(pdev_priv_obj)) { + reg_err("reg pdev private obj is NULL"); + return; + } + + pdev_priv_obj->dfs_enabled = + psoc_priv_obj->dfs_enabled; + pdev_priv_obj->indoor_chan_enabled = + psoc_priv_obj->indoor_chan_enabled; + pdev_priv_obj->force_ssc_disable_indoor_channel = + psoc_priv_obj->force_ssc_disable_indoor_channel; + pdev_priv_obj->band_capability = psoc_priv_obj->band_capability; + + reg_compute_pdev_current_chan_list(pdev_priv_obj); + + reg_send_scheduler_msg_sb(psoc, pdev); +} + +QDF_STATUS reg_set_config_vars(struct wlan_objmgr_psoc *psoc, + struct reg_config_vars config_vars) +{ + struct wlan_regulatory_psoc_priv_obj *psoc_priv_obj; + QDF_STATUS status; + + psoc_priv_obj = (struct wlan_regulatory_psoc_priv_obj *) + wlan_objmgr_psoc_get_comp_private_obj(psoc, + WLAN_UMAC_COMP_REGULATORY); + + if (NULL == psoc_priv_obj) { + reg_err("psoc priv obj is NULL"); + return QDF_STATUS_E_FAILURE; + } + + psoc_priv_obj->enable_11d_supp_original = + config_vars.enable_11d_support; + psoc_priv_obj->scan_11d_interval = + config_vars.scan_11d_interval; + psoc_priv_obj->user_ctry_priority = + config_vars.userspace_ctry_priority; + psoc_priv_obj->dfs_enabled = + config_vars.dfs_enabled; + psoc_priv_obj->indoor_chan_enabled = + config_vars.indoor_chan_enabled; + psoc_priv_obj->force_ssc_disable_indoor_channel = + config_vars.force_ssc_disable_indoor_channel; + psoc_priv_obj->band_capability = config_vars.band_capability; + psoc_priv_obj->restart_beaconing = config_vars.restart_beaconing; + psoc_priv_obj->enable_srd_chan_in_master_mode = + config_vars.enable_srd_chan_in_master_mode; + psoc_priv_obj->enable_11d_in_world_mode = + config_vars.enable_11d_in_world_mode; + + status = wlan_objmgr_psoc_try_get_ref(psoc, WLAN_REGULATORY_SB_ID); + if (QDF_IS_STATUS_ERROR(status)) { + reg_err("error taking psoc ref cnt"); + return status; + } + status = wlan_objmgr_iterate_obj_list(psoc, WLAN_PDEV_OP, + reg_change_pdev_for_config, + NULL, 1, WLAN_REGULATORY_SB_ID); + wlan_objmgr_psoc_release_ref(psoc, WLAN_REGULATORY_SB_ID); + + return status; +} + +bool reg_is_disable_ch(struct wlan_objmgr_pdev *pdev, uint32_t chan) +{ + enum channel_state ch_state; + + ch_state = reg_get_channel_state(pdev, chan); + + return ch_state == CHANNEL_STATE_DISABLE; +} + +bool reg_is_regdb_offloaded(struct wlan_objmgr_psoc *psoc) +{ + struct wlan_regulatory_psoc_priv_obj *psoc_priv_obj; + + psoc_priv_obj = wlan_objmgr_psoc_get_comp_private_obj(psoc, + WLAN_UMAC_COMP_REGULATORY); + + if (NULL == psoc_priv_obj) { + reg_err("reg psoc private obj is NULL"); + return false; + } + + return psoc_priv_obj->offload_enabled; +} + +void reg_program_mas_chan_list(struct wlan_objmgr_psoc *psoc, + struct regulatory_channel *reg_channels, + uint8_t *alpha2, + enum dfs_reg dfs_region) +{ + + struct wlan_regulatory_psoc_priv_obj *psoc_priv_obj; + QDF_STATUS status; + uint32_t count; + enum direction dir; + uint32_t pdev_cnt; + + psoc_priv_obj = wlan_objmgr_psoc_get_comp_private_obj(psoc, + WLAN_UMAC_COMP_REGULATORY); + + if (NULL == psoc_priv_obj) { + reg_err("reg psoc private obj is NULL"); + return; + } + + qdf_mem_copy(psoc_priv_obj->cur_country, alpha2, + REG_ALPHA2_LEN); + reg_debug("set cur_country %.2s", psoc_priv_obj->cur_country); + for (count = 0; count < NUM_CHANNELS; count++) { + reg_channels[count].chan_num = + channel_map[count].chan_num; + reg_channels[count].center_freq = + channel_map[count].center_freq; + reg_channels[count].nol_chan = false; + } + + for (pdev_cnt = 0; pdev_cnt < PSOC_MAX_PHY_REG_CAP; pdev_cnt++) { + qdf_mem_copy(psoc_priv_obj->mas_chan_params[pdev_cnt]. + mas_chan_list, + reg_channels, + NUM_CHANNELS * sizeof(struct regulatory_channel)); + + psoc_priv_obj->mas_chan_params[pdev_cnt].dfs_region = + dfs_region; + } + + dir = SOUTHBOUND; + status = wlan_objmgr_psoc_try_get_ref(psoc, WLAN_REGULATORY_SB_ID); + if (QDF_IS_STATUS_ERROR(status)) { + reg_err("error taking psoc ref cnt"); + return; + } + status = wlan_objmgr_iterate_obj_list(psoc, WLAN_PDEV_OP, + reg_propagate_mas_chan_list_to_pdev, + &dir, 1, WLAN_REGULATORY_SB_ID); + wlan_objmgr_psoc_release_ref(psoc, WLAN_REGULATORY_SB_ID); +} + +void reg_register_chan_change_callback(struct wlan_objmgr_psoc *psoc, + reg_chan_change_callback cbk, + void *arg) +{ + struct wlan_regulatory_psoc_priv_obj *psoc_priv_obj; + uint32_t count; + + + psoc_priv_obj = wlan_objmgr_psoc_get_comp_private_obj(psoc, + WLAN_UMAC_COMP_REGULATORY); + + if (NULL == psoc_priv_obj) { + reg_err("reg psoc private obj is NULL"); + return; + } + + qdf_spin_lock_bh(&psoc_priv_obj->cbk_list_lock); + for (count = 0; count < REG_MAX_CHAN_CHANGE_CBKS; count++) + if (psoc_priv_obj->cbk_list[count].cbk == NULL) { + psoc_priv_obj->cbk_list[count].cbk = cbk; + psoc_priv_obj->cbk_list[count].arg = arg; + psoc_priv_obj->num_chan_change_cbks++; + break; + } + qdf_spin_unlock_bh(&psoc_priv_obj->cbk_list_lock); + + if (count == REG_MAX_CHAN_CHANGE_CBKS) + reg_err("callback list is full, could not add the cbk"); +} + +void reg_unregister_chan_change_callback(struct wlan_objmgr_psoc *psoc, + reg_chan_change_callback cbk) +{ + struct wlan_regulatory_psoc_priv_obj *psoc_priv_obj; + uint32_t count; + + psoc_priv_obj = wlan_objmgr_psoc_get_comp_private_obj(psoc, + WLAN_UMAC_COMP_REGULATORY); + + if (NULL == psoc_priv_obj) { + reg_err("reg psoc private obj is NULL"); + return; + } + + qdf_spin_lock_bh(&psoc_priv_obj->cbk_list_lock); + for (count = 0; count < REG_MAX_CHAN_CHANGE_CBKS; count++) + if (psoc_priv_obj->cbk_list[count].cbk == cbk) { + psoc_priv_obj->cbk_list[count].cbk = NULL; + psoc_priv_obj->num_chan_change_cbks--; + break; + } + qdf_spin_unlock_bh(&psoc_priv_obj->cbk_list_lock); + + if (count == REG_MAX_CHAN_CHANGE_CBKS) + reg_err("callback not found in the list"); +} + +enum country_src reg_get_cc_and_src(struct wlan_objmgr_psoc *psoc, + uint8_t *alpha2) +{ + struct wlan_regulatory_psoc_priv_obj *psoc_priv_obj; + + psoc_priv_obj = wlan_objmgr_psoc_get_comp_private_obj(psoc, + WLAN_UMAC_COMP_REGULATORY); + + if (NULL == psoc_priv_obj) { + reg_err("reg psoc private obj is NULL"); + return SOURCE_UNKNOWN; + } + + qdf_mem_copy(alpha2, psoc_priv_obj->cur_country, + REG_ALPHA2_LEN + 1); + + return psoc_priv_obj->cc_src; +} + +QDF_STATUS reg_program_default_cc(struct wlan_objmgr_pdev *pdev, + uint16_t regdmn) +{ + struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj; + struct cur_regulatory_info *reg_info; + uint16_t cc = -1; + uint16_t country_index = -1, regdmn_pair = -1; + struct wlan_objmgr_psoc *psoc; + QDF_STATUS err; + + pdev_priv_obj = (struct wlan_regulatory_pdev_priv_obj *) + wlan_objmgr_pdev_get_comp_private_obj(pdev, + WLAN_UMAC_COMP_REGULATORY); + + if (NULL == pdev_priv_obj) { + reg_err("reg soc is NULL"); + return QDF_STATUS_E_FAILURE; + } + + reg_info = (struct cur_regulatory_info *)qdf_mem_malloc + (sizeof(struct cur_regulatory_info)); + if (reg_info == NULL) { + reg_err("reg info is NULL"); + return QDF_STATUS_E_NOMEM; + } + + psoc = wlan_pdev_get_psoc(pdev); + if (!psoc) { + reg_err("psoc is NULL"); + return QDF_STATUS_E_INVAL; + } + + reg_info->psoc = psoc; + reg_info->phy_id = wlan_objmgr_pdev_get_pdev_id(pdev); + + if (regdmn == 0) { + reg_get_default_country(®dmn); + regdmn |= COUNTRY_ERD_FLAG; + } + + if (regdmn & COUNTRY_ERD_FLAG) { + cc = regdmn & ~COUNTRY_ERD_FLAG; + + reg_get_rdpair_from_country_code(cc, + &country_index, + ®dmn_pair); + + err = reg_get_cur_reginfo(reg_info, country_index, regdmn_pair); + if (err == QDF_STATUS_E_FAILURE) { + reg_err("%s : Unable to set country code\n", __func__); + qdf_mem_free(reg_info->reg_rules_2g_ptr); + qdf_mem_free(reg_info->reg_rules_5g_ptr); + qdf_mem_free(reg_info); + return QDF_STATUS_E_FAILURE; + } + + pdev_priv_obj->ctry_code = cc; + + } else { + reg_get_rdpair_from_regdmn_id(regdmn, + ®dmn_pair); + + err = reg_get_cur_reginfo(reg_info, country_index, regdmn_pair); + if (err == QDF_STATUS_E_FAILURE) { + reg_err("%s : Unable to set country code\n", __func__); + qdf_mem_free(reg_info->reg_rules_2g_ptr); + qdf_mem_free(reg_info->reg_rules_5g_ptr); + qdf_mem_free(reg_info); + return QDF_STATUS_E_FAILURE; + } + + pdev_priv_obj->reg_dmn_pair = regdmn; + } + + reg_info->offload_enabled = false; + reg_process_master_chan_list(reg_info); + + qdf_mem_free(reg_info->reg_rules_2g_ptr); + qdf_mem_free(reg_info->reg_rules_5g_ptr); + qdf_mem_free(reg_info); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS reg_get_regd_rules(struct wlan_objmgr_pdev *pdev, + struct reg_rule_info *reg_rules) +{ + struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj; + + if (!pdev) { + reg_err("pdev is NULL"); + return QDF_STATUS_E_FAILURE; + } + + pdev_priv_obj = reg_get_pdev_obj(pdev); + if (!pdev_priv_obj) { + reg_err("pdev priv obj is NULL"); + return QDF_STATUS_E_FAILURE; + } + + qdf_spin_lock_bh(&pdev_priv_obj->reg_rules_lock); + qdf_mem_copy(reg_rules, &pdev_priv_obj->reg_rules, + sizeof(struct reg_rule_info)); + qdf_spin_unlock_bh(&pdev_priv_obj->reg_rules_lock); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS reg_program_chan_list(struct wlan_objmgr_pdev *pdev, + struct cc_regdmn_s *rd) +{ + struct cur_regulatory_info *reg_info; + struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj; + uint16_t country_index = -1, regdmn_pair = -1; + struct wlan_objmgr_psoc *psoc; + struct wlan_lmac_if_reg_tx_ops *tx_ops; + struct wlan_regulatory_psoc_priv_obj *soc_reg; + uint8_t pdev_id; + QDF_STATUS err; + + pdev_priv_obj = (struct wlan_regulatory_pdev_priv_obj *) + wlan_objmgr_pdev_get_comp_private_obj(pdev, + WLAN_UMAC_COMP_REGULATORY); + + if (NULL == pdev_priv_obj) { + reg_err(" pdev priv obj is NULL"); + return QDF_STATUS_E_FAILURE; + } + + psoc = wlan_pdev_get_psoc(pdev); + if (!psoc) { + reg_err("psoc is NULL"); + return QDF_STATUS_E_INVAL; + } + + soc_reg = reg_get_psoc_obj(psoc); + if (!IS_VALID_PSOC_REG_OBJ(soc_reg)) { + reg_err("psoc reg component is NULL"); + return QDF_STATUS_E_FAILURE; + } + + if (soc_reg->offload_enabled) { + if ((rd->flags == ALPHA_IS_SET) && (rd->cc.alpha[2] == 'O')) + pdev_priv_obj->indoor_chan_enabled = false; + else + pdev_priv_obj->indoor_chan_enabled = true; + + pdev_id = wlan_objmgr_pdev_get_pdev_id(pdev); + tx_ops = reg_get_psoc_tx_ops(psoc); + if (tx_ops->set_user_country_code) { + soc_reg->new_init_ctry_pending[pdev_id] = true; + return tx_ops->set_user_country_code(psoc, pdev_id, rd); + } + + return QDF_STATUS_E_FAILURE; + } + + reg_info = (struct cur_regulatory_info *)qdf_mem_malloc + (sizeof(struct cur_regulatory_info)); + if (reg_info == NULL) { + reg_err("reg info is NULL"); + return QDF_STATUS_E_NOMEM; + } + + reg_info->psoc = psoc; + reg_info->phy_id = wlan_objmgr_pdev_get_pdev_id(pdev); + + if (rd->flags == CC_IS_SET) { + reg_get_rdpair_from_country_code(rd->cc.country_code, + &country_index, + ®dmn_pair); + } else if (rd->flags == ALPHA_IS_SET) { + reg_get_rdpair_from_country_iso(rd->cc.alpha, + &country_index, + ®dmn_pair); + } else if (rd->flags == REGDMN_IS_SET) { + reg_get_rdpair_from_regdmn_id(rd->cc.regdmn_id, + ®dmn_pair); + } + + err = reg_get_cur_reginfo(reg_info, country_index, regdmn_pair); + if (err == QDF_STATUS_E_FAILURE) { + reg_err("%s : Unable to set country code\n", __func__); + qdf_mem_free(reg_info->reg_rules_2g_ptr); + qdf_mem_free(reg_info->reg_rules_5g_ptr); + qdf_mem_free(reg_info); + return QDF_STATUS_E_FAILURE; + } + + reg_info->offload_enabled = false; + reg_process_master_chan_list(reg_info); + + qdf_mem_free(reg_info->reg_rules_2g_ptr); + qdf_mem_free(reg_info->reg_rules_5g_ptr); + qdf_mem_free(reg_info); + + return QDF_STATUS_SUCCESS; +} + +bool reg_is_11d_scan_inprogress(struct wlan_objmgr_psoc *psoc) +{ + struct wlan_regulatory_psoc_priv_obj *psoc_priv_obj; + + psoc_priv_obj = wlan_objmgr_psoc_get_comp_private_obj(psoc, + WLAN_UMAC_COMP_REGULATORY); + + if (!psoc_priv_obj) { + reg_err("reg psoc private obj is NULL"); + return false; + } + + return psoc_priv_obj->enable_11d_supp; +} + +QDF_STATUS reg_get_current_cc(struct wlan_objmgr_pdev *pdev, + struct cc_regdmn_s *rd) +{ + struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj; + + pdev_priv_obj = (struct wlan_regulatory_pdev_priv_obj *) + wlan_objmgr_pdev_get_comp_private_obj(pdev, + WLAN_UMAC_COMP_REGULATORY); + + if (NULL == pdev_priv_obj) { + reg_err("reg pdev priv is NULL"); + return QDF_STATUS_E_FAILURE; + } + + if (rd->flags == CC_IS_SET) { + rd->cc.country_code = pdev_priv_obj->ctry_code; + } else if (rd->flags == ALPHA_IS_SET) { + qdf_mem_copy(rd->cc.alpha, pdev_priv_obj->current_country, + sizeof(rd->cc.alpha)); + } else if (rd->flags == REGDMN_IS_SET) { + rd->cc.regdmn_id = pdev_priv_obj->reg_dmn_pair; + } + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS reg_process_ch_avoid_freq(struct wlan_objmgr_psoc *psoc, + struct wlan_objmgr_pdev *pdev) +{ + enum channel_enum ch_loop; + enum channel_enum start_ch_idx; + enum channel_enum end_ch_idx; + uint16_t start_channel; + uint16_t end_channel; + uint32_t i; + struct wlan_regulatory_psoc_priv_obj *psoc_priv_obj; + struct ch_avoid_freq_type *range; + + psoc_priv_obj = wlan_objmgr_psoc_get_comp_private_obj(psoc, + WLAN_UMAC_COMP_REGULATORY); + + if (!psoc_priv_obj) { + reg_err("reg psoc private obj is NULL"); + return QDF_STATUS_E_FAILURE; + } + + for (i = 0; i < psoc_priv_obj->avoid_freq_list.ch_avoid_range_cnt; + i++) { + if (psoc_priv_obj->unsafe_chan_list.ch_cnt >= NUM_CHANNELS) { + reg_warn("LTE Coex unsafe channel list full"); + break; + } + + start_ch_idx = INVALID_CHANNEL; + end_ch_idx = INVALID_CHANNEL; + range = &psoc_priv_obj->avoid_freq_list.avoid_freq_range[i]; + + start_channel = reg_freq_to_chan(pdev, range->start_freq); + end_channel = reg_freq_to_chan(pdev, range->end_freq); + reg_debug("start: freq %d, ch %d, end: freq %d, ch %d", + range->start_freq, start_channel, range->end_freq, + end_channel); + + /* do not process frequency bands that are not mapped to + * predefined channels + */ + if (start_channel == 0 || end_channel == 0) + continue; + + for (ch_loop = 0; ch_loop < NUM_CHANNELS; + ch_loop++) { + if (REG_CH_TO_FREQ(ch_loop) >= range->start_freq) { + start_ch_idx = ch_loop; + break; + } + } + for (ch_loop = 0; ch_loop < NUM_CHANNELS; + ch_loop++) { + if (REG_CH_TO_FREQ(ch_loop) >= range->end_freq) { + end_ch_idx = ch_loop; + if (REG_CH_TO_FREQ(ch_loop) > range->end_freq) + end_ch_idx--; + break; + } + } + + if (start_ch_idx == INVALID_CHANNEL || + end_ch_idx == INVALID_CHANNEL) + continue; + + for (ch_loop = start_ch_idx; ch_loop <= end_ch_idx; + ch_loop++) { + psoc_priv_obj->unsafe_chan_list.ch_list[ + psoc_priv_obj->unsafe_chan_list.ch_cnt++] = + REG_CH_NUM(ch_loop); + if (psoc_priv_obj->unsafe_chan_list.ch_cnt >= + NUM_CHANNELS) { + reg_warn("LTECoex unsafe ch list full"); + break; + } + } + } + + reg_debug("number of unsafe channels is %d ", + psoc_priv_obj->unsafe_chan_list.ch_cnt); + + if (!psoc_priv_obj->unsafe_chan_list.ch_cnt) { + reg_debug("No valid ch are present in avoid freq event"); + return QDF_STATUS_SUCCESS; + } + + for (ch_loop = 0; ch_loop < psoc_priv_obj->unsafe_chan_list.ch_cnt; + ch_loop++) { + if (ch_loop >= NUM_CHANNELS) + break; + reg_debug("channel %d is not safe", + psoc_priv_obj->unsafe_chan_list. + ch_list[ch_loop]); + } + + return QDF_STATUS_SUCCESS; +} + +/** + * reg_update_unsafe_ch () - Updates unsafe channels in current channel list + * @pdev: pointer to pdev object + * @ch_avoid_list: pointer to unsafe channel list + * + * Return: None + */ +static void reg_update_unsafe_ch(struct wlan_objmgr_psoc *psoc, + void *object, void *arg) +{ + struct wlan_objmgr_pdev *pdev = (struct wlan_objmgr_pdev *)object; + struct wlan_regulatory_psoc_priv_obj *psoc_priv_obj; + struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj; + QDF_STATUS status; + + psoc_priv_obj = wlan_objmgr_psoc_get_comp_private_obj(psoc, + WLAN_UMAC_COMP_REGULATORY); + + if (!psoc_priv_obj) { + reg_err("reg psoc private obj is NULL"); + return; + } + + pdev_priv_obj = reg_get_pdev_obj(pdev); + + if (!IS_VALID_PDEV_REG_OBJ(pdev_priv_obj)) { + reg_err("reg pdev priv obj is NULL"); + return; + } + + if (psoc_priv_obj->ch_avoid_ind) { + status = reg_process_ch_avoid_freq(psoc, pdev); + if (QDF_IS_STATUS_ERROR(status)) + psoc_priv_obj->ch_avoid_ind = false; + } + + reg_compute_pdev_current_chan_list(pdev_priv_obj); + status = reg_send_scheduler_msg_nb(psoc, pdev); + + if (QDF_IS_STATUS_ERROR(status)) + reg_err("channel change msg schedule failed"); + +} + +QDF_STATUS reg_process_ch_avoid_event(struct wlan_objmgr_psoc *psoc, + struct ch_avoid_ind_type *ch_avoid_event) +{ + uint32_t i; + struct wlan_regulatory_psoc_priv_obj *psoc_priv_obj; + QDF_STATUS status; + + psoc_priv_obj = wlan_objmgr_psoc_get_comp_private_obj(psoc, + WLAN_UMAC_COMP_REGULATORY); + if (!psoc_priv_obj) { + reg_err("reg psoc private obj is NULL"); + return QDF_STATUS_E_FAILURE; + } + /* Make unsafe channel list */ + reg_debug("band count %d", ch_avoid_event->ch_avoid_range_cnt); + + /* generate vendor specific event */ + qdf_mem_zero((void *)&psoc_priv_obj->avoid_freq_list, + sizeof(struct ch_avoid_ind_type)); + qdf_mem_zero((void *)&psoc_priv_obj->unsafe_chan_list, + sizeof(struct unsafe_ch_list)); + + for (i = 0; i < ch_avoid_event->ch_avoid_range_cnt; i++) { + if ((CH_AVOID_RULE_RESTART_24G_ONLY == + psoc_priv_obj->restart_beaconing) && + REG_IS_5GHZ_FREQ(ch_avoid_event-> + avoid_freq_range[i].start_freq)) { + reg_debug("skipping 5Ghz LTE Coex unsafe channel range"); + continue; + } + psoc_priv_obj->avoid_freq_list.avoid_freq_range[i].start_freq = + ch_avoid_event->avoid_freq_range[i].start_freq; + psoc_priv_obj->avoid_freq_list.avoid_freq_range[i].end_freq = + ch_avoid_event->avoid_freq_range[i].end_freq; + } + psoc_priv_obj->avoid_freq_list.ch_avoid_range_cnt = + ch_avoid_event->ch_avoid_range_cnt; + + psoc_priv_obj->ch_avoid_ind = true; + + status = wlan_objmgr_psoc_try_get_ref(psoc, WLAN_REGULATORY_NB_ID); + + if (QDF_IS_STATUS_ERROR(status)) { + reg_err("error taking psoc ref cnt"); + return status; + } + + status = wlan_objmgr_iterate_obj_list(psoc, WLAN_PDEV_OP, + reg_update_unsafe_ch, NULL, 1, + WLAN_REGULATORY_NB_ID); + + wlan_objmgr_psoc_release_ref(psoc, WLAN_REGULATORY_NB_ID); + + return status; +} + +QDF_STATUS reg_save_new_11d_country(struct wlan_objmgr_psoc *psoc, + uint8_t *country) +{ + struct wlan_regulatory_psoc_priv_obj *psoc_priv_obj; + struct wlan_lmac_if_reg_tx_ops *tx_ops; + struct set_country country_code; + uint8_t pdev_id; + + psoc_priv_obj = wlan_objmgr_psoc_get_comp_private_obj(psoc, + WLAN_UMAC_COMP_REGULATORY); + + if (!psoc_priv_obj) { + reg_err("reg psoc private obj is NULL"); + + return QDF_STATUS_E_FAILURE; + } + + pdev_id = psoc_priv_obj->def_pdev_id; + psoc_priv_obj->new_11d_ctry_pending[pdev_id] = true; + qdf_mem_copy(country_code.country, country, REG_ALPHA2_LEN + 1); + country_code.pdev_id = pdev_id; + + if (psoc_priv_obj->offload_enabled) { + tx_ops = reg_get_psoc_tx_ops(psoc); + if (tx_ops->set_country_code) { + tx_ops->set_country_code(psoc, &country_code); + } else { + reg_err("country set handler is not present"); + psoc_priv_obj->new_11d_ctry_pending[pdev_id] = false; + return QDF_STATUS_E_FAULT; + } + } + + return QDF_STATUS_SUCCESS; +} + +bool reg_11d_original_enabled_on_host(struct wlan_objmgr_psoc *psoc) +{ + struct wlan_regulatory_psoc_priv_obj *psoc_priv_obj; + + psoc_priv_obj = + wlan_objmgr_psoc_get_comp_private_obj(psoc, + WLAN_UMAC_COMP_REGULATORY); + + if (NULL == psoc_priv_obj) { + reg_err("reg psoc private obj is NULL"); + return QDF_STATUS_E_FAILURE; + } + + return (psoc_priv_obj->enable_11d_supp_original && + !psoc_priv_obj->is_11d_offloaded); +} + +bool reg_11d_enabled_on_host(struct wlan_objmgr_psoc *psoc) +{ + struct wlan_regulatory_psoc_priv_obj *psoc_priv_obj; + + psoc_priv_obj = wlan_objmgr_psoc_get_comp_private_obj(psoc, + WLAN_UMAC_COMP_REGULATORY); + + if (NULL == psoc_priv_obj) { + reg_err("reg psoc private obj is NULL"); + return QDF_STATUS_E_FAILURE; + } + + return (psoc_priv_obj->enable_11d_supp && + !psoc_priv_obj->is_11d_offloaded); +} + +QDF_STATUS reg_set_regdb_offloaded(struct wlan_objmgr_psoc *psoc, + bool val) +{ + struct wlan_regulatory_psoc_priv_obj *soc_reg; + + soc_reg = reg_get_psoc_obj(psoc); + + if (!IS_VALID_PSOC_REG_OBJ(soc_reg)) { + reg_err("psoc reg component is NULL"); + return QDF_STATUS_E_FAILURE; + } + + soc_reg->offload_enabled = val; + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS reg_set_11d_offloaded(struct wlan_objmgr_psoc *psoc, + bool val) +{ + struct wlan_regulatory_psoc_priv_obj *soc_reg; + + soc_reg = reg_get_psoc_obj(psoc); + + if (!IS_VALID_PSOC_REG_OBJ(soc_reg)) { + reg_err("psoc reg component is NULL"); + return QDF_STATUS_E_FAILURE; + } + + soc_reg->is_11d_offloaded = val; + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS reg_get_curr_regdomain(struct wlan_objmgr_pdev *pdev, + struct cur_regdmn_info *cur_regdmn) +{ + struct wlan_objmgr_psoc *psoc; + struct wlan_regulatory_psoc_priv_obj *soc_reg; + uint16_t index; + int num_reg_dmn; + uint8_t phy_id; + + psoc = wlan_pdev_get_psoc(pdev); + soc_reg = reg_get_psoc_obj(psoc); + if (!IS_VALID_PSOC_REG_OBJ(soc_reg)) { + reg_err("soc reg component is NULL"); + return QDF_STATUS_E_INVAL; + } + + phy_id = wlan_objmgr_pdev_get_pdev_id(pdev); + cur_regdmn->regdmn_pair_id = + soc_reg->mas_chan_params[phy_id].reg_dmn_pair; + + reg_get_num_reg_dmn_pairs(&num_reg_dmn); + for (index = 0; index < num_reg_dmn; index++) { + if (g_reg_dmn_pairs[index].reg_dmn_pair_id == + cur_regdmn->regdmn_pair_id) + break; + } + + if (index == num_reg_dmn) { + reg_err("invalid regdomain"); + return QDF_STATUS_E_FAILURE; + } + + cur_regdmn->dmn_id_2g = g_reg_dmn_pairs[index].dmn_id_2g; + cur_regdmn->dmn_id_5g = g_reg_dmn_pairs[index].dmn_id_5g; + cur_regdmn->ctl_2g = regdomains_2g[cur_regdmn->dmn_id_2g].ctl_val; + cur_regdmn->ctl_5g = regdomains_5g[cur_regdmn->dmn_id_5g].ctl_val; + cur_regdmn->dfs_region = + regdomains_5g[cur_regdmn->dmn_id_5g].dfs_region; + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS reg_modify_chan_144(struct wlan_objmgr_pdev *pdev, + bool enable_ch_144) +{ + struct wlan_regulatory_psoc_priv_obj *psoc_priv_obj; + struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj; + struct wlan_objmgr_psoc *psoc; + struct wlan_lmac_if_reg_tx_ops *reg_tx_ops; + QDF_STATUS status; + + pdev_priv_obj = reg_get_pdev_obj(pdev); + + if (!IS_VALID_PDEV_REG_OBJ(pdev_priv_obj)) { + reg_err("pdev reg component is NULL"); + return QDF_STATUS_E_INVAL; + } + + if (pdev_priv_obj->en_chan_144 == enable_ch_144) { + reg_info("chan 144 is already %d", enable_ch_144); + return QDF_STATUS_SUCCESS; + } + + psoc = wlan_pdev_get_psoc(pdev); + if (!psoc) { + reg_err("psoc is NULL"); + return QDF_STATUS_E_INVAL; + } + + psoc_priv_obj = reg_get_psoc_obj(psoc); + if (!IS_VALID_PSOC_REG_OBJ(psoc_priv_obj)) { + reg_err("psoc reg component is NULL"); + return QDF_STATUS_E_INVAL; + } + + reg_debug("setting chan 144: %d", enable_ch_144); + pdev_priv_obj->en_chan_144 = enable_ch_144; + + reg_compute_pdev_current_chan_list(pdev_priv_obj); + + reg_tx_ops = reg_get_psoc_tx_ops(psoc); + if (reg_tx_ops->fill_umac_legacy_chanlist) + reg_tx_ops->fill_umac_legacy_chanlist(pdev, + pdev_priv_obj->cur_chan_list); + + status = reg_send_scheduler_msg_sb(psoc, pdev); + + return status; + +} + +bool reg_get_en_chan_144(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj; + + pdev_priv_obj = reg_get_pdev_obj(pdev); + if (!IS_VALID_PDEV_REG_OBJ(pdev_priv_obj)) { + reg_err("pdev reg component is NULL"); + return false; + } + + return pdev_priv_obj->en_chan_144; +} + +struct wlan_psoc_host_hal_reg_capabilities_ext *reg_get_hal_reg_cap( + struct wlan_objmgr_psoc *psoc) +{ + struct wlan_regulatory_psoc_priv_obj *soc_reg; + + soc_reg = reg_get_psoc_obj(psoc); + + if (!IS_VALID_PSOC_REG_OBJ(soc_reg)) { + reg_err("psoc reg component is NULL"); + return NULL; + } + + return soc_reg->reg_cap; +} + +QDF_STATUS reg_set_hal_reg_cap(struct wlan_objmgr_psoc *psoc, + struct wlan_psoc_host_hal_reg_capabilities_ext *reg_cap, + uint16_t phy_cnt) +{ + struct wlan_regulatory_psoc_priv_obj *soc_reg; + + soc_reg = reg_get_psoc_obj(psoc); + + if (!IS_VALID_PSOC_REG_OBJ(soc_reg)) { + reg_err("psoc reg component is NULL"); + return QDF_STATUS_E_FAILURE; + } + + if (phy_cnt > PSOC_MAX_PHY_REG_CAP) { + reg_err("phy cnt:%d is more than %d", phy_cnt, + PSOC_MAX_PHY_REG_CAP); + return QDF_STATUS_E_FAILURE; + } + + qdf_mem_copy(soc_reg->reg_cap, reg_cap, + phy_cnt * + sizeof(struct wlan_psoc_host_hal_reg_capabilities_ext)); + + return QDF_STATUS_SUCCESS; +} + diff --git a/drivers/staging/qca-wifi-host-cmn/umac/regulatory/core/src/reg_services.h b/drivers/staging/qca-wifi-host-cmn/umac/regulatory/core/src/reg_services.h new file mode 100644 index 0000000000000000000000000000000000000000..0a84d5a3c9795a41c894ef0759fd55b2e6bb47b4 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/regulatory/core/src/reg_services.h @@ -0,0 +1,689 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: reg_services.h + * This file provides prototypes of the regulatory component + * service functions + */ + +#ifndef __REG_SERVICES_H_ +#define __REG_SERVICES_H_ + +#include +#include +#include +#include +#include +#include +#include +#include "reg_db.h" +#include + +#define REG_MIN_24GHZ_CH_NUM channel_map[MIN_24GHZ_CHANNEL].chan_num +#define REG_MAX_24GHZ_CH_NUM channel_map[MAX_24GHZ_CHANNEL].chan_num +#define REG_MIN_5GHZ_CH_NUM channel_map[MIN_5GHZ_CHANNEL].chan_num +#define REG_MAX_5GHZ_CH_NUM channel_map[MAX_5GHZ_CHANNEL].chan_num + +#ifdef WLAN_FEATURE_DSRC +#define REG_DSRC_START_FREQ channel_map[MIN_DSRC_CHANNEL].center_freq +#define REG_DSRC_END_FREQ channel_map[MAX_DSRC_CHANNEL].center_freq +#endif + +#define REG_ETSI13_SRD_START_FREQ 5745 +#define REG_ETSI13_SRD_END_FREQ 5865 + +#define REG_IS_24GHZ_CH(chan_num) \ + ((chan_num >= REG_MIN_24GHZ_CH_NUM) && \ + (chan_num <= REG_MAX_24GHZ_CH_NUM)) + +#define REG_MIN_24GHZ_CH_FREQ channel_map[MIN_24GHZ_CHANNEL].center_freq +#define REG_MAX_24GHZ_CH_FREQ channel_map[MAX_24GHZ_CHANNEL].center_freq + +#define REG_IS_24GHZ_CH_FREQ(freq) \ + ((freq >= REG_MIN_24GHZ_CH_FREQ) && \ + (freq <= REG_MAX_24GHZ_CH_FREQ)) + +#ifndef CONFIG_LEGACY_CHAN_ENUM +#define REG_MIN_49GHZ_CH_FREQ channel_map[MIN_49GHZ_CHANNEL].center_freq +#define REG_MAX_49GHZ_CH_FREQ channel_map[MAX_49GHZ_CHANNEL].center_freq + +#define REG_IS_49GHZ_FREQ(freq) \ + ((freq >= REG_MIN_49GHZ_CH_FREQ) && \ + (freq <= REG_MAX_49GHZ_CH_FREQ)) +#endif + +#define REG_IS_5GHZ_CH(chan_num) \ + ((chan_num >= REG_MIN_5GHZ_CH_NUM) && \ + (chan_num <= REG_MAX_5GHZ_CH_NUM)) + +#define REG_IS_5GHZ_FREQ(freq) \ + ((freq >= channel_map[MIN_5GHZ_CHANNEL].center_freq) && \ + (freq <= channel_map[MAX_5GHZ_CHANNEL].center_freq)) + +#define REG_CH_NUM(ch_enum) channel_map[ch_enum].chan_num +#define REG_CH_TO_FREQ(ch_enum) channel_map[ch_enum].center_freq + +#define REG_IS_CHANNEL_VALID_5G_SBS(curchan, newchan) \ + (curchan > newchan ? \ + REG_CH_TO_FREQ(reg_get_chan_enum(curchan)) \ + - REG_CH_TO_FREQ(reg_get_chan_enum(newchan)) \ + > REG_SBS_SEPARATION_THRESHOLD : \ + REG_CH_TO_FREQ(reg_get_chan_enum(newchan)) \ + - REG_CH_TO_FREQ(reg_get_chan_enum(curchan)) \ + > REG_SBS_SEPARATION_THRESHOLD) + +#define IS_VALID_PSOC_REG_OBJ(psoc_priv_obj) (NULL != psoc_priv_obj) +#define IS_VALID_PDEV_REG_OBJ(pdev_priv_obj) (NULL != pdev_priv_obj) + +/* EEPROM setting is a country code */ +#define COUNTRY_ERD_FLAG 0x8000 + +/** + * reg_is_world_ctry_code() - Check if the given country code is WORLD regdomain + * @ctry_code: Country code value. + * + * Return: If country code is WORLD regdomain return true else false + */ +bool reg_is_world_ctry_code(uint16_t ctry_code); + +extern const struct chan_map *channel_map; + +enum channel_enum reg_get_chan_enum(uint32_t chan_num); + +QDF_STATUS reg_get_channel_list_with_power(struct wlan_objmgr_pdev *pdev, + struct channel_power *ch_list, + uint8_t *num_chan); + +enum channel_state reg_get_channel_state(struct wlan_objmgr_pdev *pdev, + uint32_t ch); + +/** + * reg_chan_has_dfs_attribute() - check channel has dfs attribue or not + * @ch: channel number. + * + * This API get chan initial dfs attribue flag from regdomain + * + * Return: true if chan is dfs, otherwise false + */ +bool reg_chan_has_dfs_attribute(struct wlan_objmgr_pdev *pdev, uint32_t ch); + +enum channel_state reg_get_5g_bonded_channel_state(struct wlan_objmgr_pdev + *pdev, + uint8_t ch, + enum phy_ch_width bw); + +enum channel_state reg_get_2g_bonded_channel_state(struct wlan_objmgr_pdev + *pdev, + uint8_t oper_ch, + uint8_t sec_ch, + enum phy_ch_width bw); + +void reg_set_channel_params(struct wlan_objmgr_pdev *pdev, + uint8_t ch, uint8_t sec_ch_2g, + struct ch_params *ch_params); + +/** + * reg_set_band() - Sets the band information for the PDEV + * @pdev: The physical dev to set the band for + * @band: The set band parameters to configure for the pysical device + * + * Return: QDF_STATUS + */ +QDF_STATUS reg_set_band(struct wlan_objmgr_pdev *pdev, enum band_info band); + +/** + * reg_restore_cached_channels() - Cache the current state of the channles + * @pdev: The physical dev to cache the channels for + */ +#ifdef DISABLE_CHANNEL_LIST +QDF_STATUS reg_restore_cached_channels(struct wlan_objmgr_pdev *pdev); +#else +static inline +QDF_STATUS reg_restore_cached_channels(struct wlan_objmgr_pdev *pdev) +{ + return QDF_STATUS_SUCCESS; +} +#endif + +/** + * reg_cache_channel_state() - Cache the current state of the channles + * @pdev: The physical dev to cache the channels for + * @channel_list: List of the channels for which states needs to be cached + * @num_channels: Number of channels in the list + * + */ +#ifdef DISABLE_CHANNEL_LIST +QDF_STATUS reg_cache_channel_state(struct wlan_objmgr_pdev *pdev, + uint32_t *channel_list, + uint32_t num_channels); +#else +static inline +QDF_STATUS reg_cache_channel_state(struct wlan_objmgr_pdev *pdev, + uint32_t *channel_list, + uint32_t num_channels) +{ + return QDF_STATUS_SUCCESS; +} +#endif + +/** + * reg_notify_sap_event() - Notify regulatory domain for sap event + * @pdev: The physical dev to set the band for + * @sap_state: true for sap start else false + * + * Return: QDF_STATUS + */ +QDF_STATUS reg_notify_sap_event(struct wlan_objmgr_pdev *pdev, + bool sap_state); +/** + * reg_set_fcc_constraint() - Apply fcc constraints on channels 12/13 + * @pdev: The physical dev to set the band for + * + * This function reduces the transmit power on channels 12 and 13, to comply + * with FCC regulations in the USA. + * + * Return: QDF_STATUS + */ +QDF_STATUS reg_set_fcc_constraint(struct wlan_objmgr_pdev *pdev, + bool fcc_constraint); + +/** + * reg_get_fcc_constraint() - Check FCC constraint on given frequency + * @pdev: physical dev to get + * @freq: frequency to be checked + * + * Return: If FCC constraint is applied on given frequency return true + * else return false. + */ +bool reg_get_fcc_constraint(struct wlan_objmgr_pdev *pdev, uint32_t freq); + +/** + * reg_read_default_country() - Get the default regulatory country + * @psoc: The physical SoC to get default country from + * @country_code: the buffer to populate the country code into + * + * Return: QDF_STATUS + */ +QDF_STATUS reg_read_default_country(struct wlan_objmgr_psoc *psoc, + uint8_t *country_code); + +/** + * reg_read_current_country() - Get the current regulatory country + * @psoc: The physical SoC to get current country from + * @country_code: the buffer to populate the country code into + * + * Return: QDF_STATUS + */ +QDF_STATUS reg_read_current_country(struct wlan_objmgr_psoc *psoc, + uint8_t *country_code); + +/** + * reg_set_default_country() - Set the default regulatory country + * @psoc: The physical SoC to set default country for + * @req: The country information to configure + * + * Return: QDF_STATUS + */ +QDF_STATUS reg_set_default_country(struct wlan_objmgr_psoc *psoc, + uint8_t *country); + +/** + * reg_is_world_alpha2 - is reg world mode + * @alpha2: country code pointer + * + * Return: true or false + */ +bool reg_is_world_alpha2(uint8_t *alpha2); + +/** + * reg_is_us_alpha2 - is US country code + * @alpha2: country code pointer + * + * Return: true or false + */ +bool reg_is_us_alpha2(uint8_t *alpha2); + +/** + * reg_set_country() - Set the current regulatory country + * @pdev: pdev device for country information + * @country: country value + * + * Return: QDF_STATUS + */ +QDF_STATUS reg_set_country(struct wlan_objmgr_pdev *pdev, uint8_t *country); + +/** + * reg_set_11d_country() - Set the 11d regulatory country + * @pdev: pdev device for country information + * @country: country value + * + * Return: QDF_STATUS + */ +QDF_STATUS reg_set_11d_country(struct wlan_objmgr_pdev *pdev, uint8_t *country); + +/** + * reg_reset_country() - Reset the regulatory country to default + * @psoc: The physical SoC to reset country for + * + * Return: QDF_STATUS + */ +QDF_STATUS reg_reset_country(struct wlan_objmgr_psoc *psoc); + +/** + * reg_enable_dfs_channels() - Enable the use of DFS channels + * @pdev: The physical dev to enable/disable DFS channels for + * + * Return: QDF_STATUS + */ +QDF_STATUS reg_enable_dfs_channels(struct wlan_objmgr_pdev *pdev, bool enable); + + +void reg_get_current_dfs_region(struct wlan_objmgr_pdev *pdev, + enum dfs_reg *dfs_reg); + +uint32_t reg_get_channel_reg_power(struct wlan_objmgr_pdev *pdev, + uint32_t chan_num); + +uint32_t reg_get_channel_freq(struct wlan_objmgr_pdev *pdev, + uint32_t chan_num); + + +uint16_t reg_get_bw_value(enum phy_ch_width bw); + +void reg_set_dfs_region(struct wlan_objmgr_pdev *pdev, + enum dfs_reg dfs_reg); + +QDF_STATUS reg_get_domain_from_country_code(v_REGDOMAIN_t *reg_domain_ptr, + const uint8_t *country_alpha2, + enum country_src source); + +enum band_info reg_chan_to_band(uint32_t chan_num); + +uint16_t reg_dmn_get_chanwidth_from_opclass(uint8_t *country, + uint8_t channel, + uint8_t opclass); + +uint16_t reg_dmn_get_opclass_from_channel(uint8_t *country, + uint8_t channel, + uint8_t offset); + +uint16_t reg_dmn_set_curr_opclasses(uint8_t num_classes, uint8_t *class); + +uint16_t reg_dmn_get_curr_opclasses(uint8_t *num_classes, uint8_t *class); + + +QDF_STATUS reg_process_master_chan_list(struct cur_regulatory_info *reg_info); + +QDF_STATUS wlan_regulatory_psoc_obj_created_notification( + struct wlan_objmgr_psoc *psoc, + void *arg_list); + +QDF_STATUS wlan_regulatory_psoc_obj_destroyed_notification( + struct wlan_objmgr_psoc *psoc, + void *arg_list); + +QDF_STATUS wlan_regulatory_pdev_obj_created_notification( + struct wlan_objmgr_pdev *pdev, + void *arg_list); + +QDF_STATUS wlan_regulatory_pdev_obj_destroyed_notification( + struct wlan_objmgr_pdev *pdev, + void *arg_list); + +static inline struct wlan_lmac_if_reg_tx_ops * +reg_get_psoc_tx_ops(struct wlan_objmgr_psoc *psoc) +{ + return &((psoc->soc_cb.tx_ops.reg_ops)); +} + +QDF_STATUS reg_get_current_chan_list(struct wlan_objmgr_pdev *pdev, + struct regulatory_channel + *chan_list); + +QDF_STATUS reg_program_chan_list(struct wlan_objmgr_pdev *pdev, + struct cc_regdmn_s *rd); + +void reg_update_nol_ch(struct wlan_objmgr_pdev *pdev, uint8_t *ch_list, + uint8_t num_ch, bool nol_ch); + +/** + * reg_is_dfs_ch () - Checks the channel state for DFS + * @chan: channel + * @pdev: pdev ptr + * + * Return: true or false + */ +bool reg_is_dfs_ch(struct wlan_objmgr_pdev *pdev, uint32_t chan); + +#ifdef WLAN_FEATURE_DSRC +/** + * reg_is_dsrc_chan () - Checks the channel for DSRC or not + * @chan: channel + * @pdev: pdev ptr + * + * Return: true or false + */ +bool reg_is_dsrc_chan(struct wlan_objmgr_pdev *pdev, uint32_t chan); + +static inline bool reg_is_etsi13_srd_chan(struct wlan_objmgr_pdev *pdev, + uint32_t chan) +{ + return false; +} + +static inline bool reg_is_etsi13_regdmn(struct wlan_objmgr_pdev *pdev) +{ + return false; +} + +static inline bool +reg_is_etsi13_srd_chan_allowed_master_mode(struct wlan_objmgr_pdev *pdev) +{ + return true; +} +#else +/** + * reg_is_etsi13_regdmn () - Checks if the current reg domain is ETSI13 or not + * @pdev: pdev ptr + * + * Return: true or false + */ +bool reg_is_etsi13_regdmn(struct wlan_objmgr_pdev *pdev); + +/** + * reg_is_etsi13_srd_chan () - Checks the channel for ETSI13 srd ch or not + * @chan: channel + * @pdev: pdev ptr + * + * Return: true or false + */ +bool reg_is_etsi13_srd_chan(struct wlan_objmgr_pdev *pdev, uint32_t chan); + +/** + * reg_is_etsi13_srd_chan_allowed_master_mode() - Checks if regdmn is ETSI13 + * and SRD channels are allowed in master mode or not. + * + * @pdev: pdev ptr + * + * Return: true or false + */ +bool reg_is_etsi13_srd_chan_allowed_master_mode(struct wlan_objmgr_pdev *pdev); + +static inline bool reg_is_dsrc_chan(struct wlan_objmgr_pdev *pdev, + uint32_t chan) +{ + return false; +} +#endif + +bool reg_is_passive_or_disable_ch(struct wlan_objmgr_pdev *pdev, + uint32_t chan); + +bool reg_is_disable_ch(struct wlan_objmgr_pdev *pdev, uint32_t chan); + +uint32_t reg_freq_to_chan(struct wlan_objmgr_pdev *pdev, uint32_t freq); + +uint32_t reg_chan_to_freq(struct wlan_objmgr_pdev *pdev, uint32_t chan_num); + +/** + * reg_chan_is_49ghz() - Check if the input channel number is 4.9GHz + * @pdev: Pdev pointer + * @chan_num: Input channel number + * + * Return: true if the channel is 4.9GHz else false. + */ +bool reg_chan_is_49ghz(struct wlan_objmgr_pdev *pdev, + uint8_t chan_num); + +/** + * reg_set_config_vars () - set configration variables + * @psoc: psoc ptr + * @config_vars: configuration struct + * + * Return: QDF_STATUS + */ +QDF_STATUS reg_set_config_vars(struct wlan_objmgr_psoc *psoc, + struct reg_config_vars config_vars); + +bool reg_is_regdb_offloaded(struct wlan_objmgr_psoc *psoc); + +void reg_program_mas_chan_list(struct wlan_objmgr_psoc *psoc, + struct regulatory_channel *reg_channels, + uint8_t *alpha2, + enum dfs_reg dfs_region); + +/** + * reg_get_regd_rules() - provides the reg domain rules info + * @pdev: pdev pointer + * @reg_rules: regulatory rules + * + * Return: QDF_STATUS + */ +QDF_STATUS reg_get_regd_rules(struct wlan_objmgr_pdev *pdev, + struct reg_rule_info *reg_rules); + +/** + * reg_reset_reg_rules() - provides the reg domain rules info + * @reg_rules: reg rules pointer + * + * Return: None + */ +void reg_reset_reg_rules(struct reg_rule_info *reg_rules); + +QDF_STATUS reg_program_default_cc(struct wlan_objmgr_pdev *pdev, + uint16_t regdmn); + +QDF_STATUS reg_get_current_cc(struct wlan_objmgr_pdev *pdev, + struct cc_regdmn_s *rd); + +QDF_STATUS reg_get_curr_band(struct wlan_objmgr_pdev *pdev, + enum band_info *band); + +typedef void (*reg_chan_change_callback)(struct wlan_objmgr_psoc *psoc, + struct wlan_objmgr_pdev *pdev, + struct regulatory_channel *chan_list, + struct avoid_freq_ind_data *avoid_freq_ind, + void *arg); + +void reg_register_chan_change_callback(struct wlan_objmgr_psoc *psoc, + reg_chan_change_callback cbk, + void *arg); + +void reg_unregister_chan_change_callback(struct wlan_objmgr_psoc *psoc, + reg_chan_change_callback cbk); + + +struct chan_change_cbk_entry { + reg_chan_change_callback cbk; + void *arg; +}; + +bool reg_is_11d_scan_inprogress(struct wlan_objmgr_psoc *psoc); + +enum country_src reg_get_cc_and_src(struct wlan_objmgr_psoc *psoc, + uint8_t *alpha2); + +/** + * reg_save_new_11d_country() - Save the 11d new country + * @psoc: psoc for country information + * @country: country value + * + * Return: QDF_STATUS + */ +QDF_STATUS reg_save_new_11d_country(struct wlan_objmgr_psoc *psoc, + uint8_t *country); + +/** + * reg_11d_original_enabled_on_host() - whether 11d original enabled on host + * @psoc: psoc ptr + * + * Return: bool + */ +bool reg_11d_original_enabled_on_host(struct wlan_objmgr_psoc *psoc); + +/** + * reg_11d_enabled_on_host() - know whether 11d enabled on host + * @psoc: psoc ptr + * + * Return: bool + */ +bool reg_11d_enabled_on_host(struct wlan_objmgr_psoc *psoc); + +/** + * reg_11d_vdev_delete_update() - update 11d state upon vdev delete + * @vdev: vdev pointer + * + * Return: Success or Failure + */ +QDF_STATUS reg_11d_vdev_delete_update(struct wlan_objmgr_vdev *vdev); + +/** + * reg_11d_vdev_created_update() - update 11d state upon vdev create + * @vdev: vdev pointer + * + * Return: Success or Failure + */ +QDF_STATUS reg_11d_vdev_created_update(struct wlan_objmgr_vdev *vdev); + +/** + * reg_get_psoc_obj() - Provides the reg component object pointer + * @psoc: pointer to psoc object. + * + * Return: reg component object pointer + */ +struct wlan_regulatory_psoc_priv_obj *reg_get_psoc_obj( + struct wlan_objmgr_psoc *psoc); + +/** + * reg_reset_ctry_pending_hints() - Reset all country pending hints + * @soc_reg: regulatory private object + * + * Return: None + */ +void +reg_reset_ctry_pending_hints(struct wlan_regulatory_psoc_priv_obj *soc_reg); + +/** + * reg_set_regdb_offloaded() - set/clear regulatory offloaded flag + * + * @psoc: psoc pointer + * Return: Success or Failure + */ +QDF_STATUS reg_set_regdb_offloaded(struct wlan_objmgr_psoc *psoc, + bool val); + +/** + * reg_set_11d_offloaded() - set/clear 11d offloaded flag + * + * @psoc: psoc pointer + * Return: Success or Failure + */ +QDF_STATUS reg_set_11d_offloaded(struct wlan_objmgr_psoc *psoc, + bool val); + +/** + * reg_get_curr_regdomain() - Get current regdomain in use + * @pdev: pdev pointer + * @cur_regdmn: Current regdomain info + * + * Return: QDF status + */ +QDF_STATUS reg_get_curr_regdomain(struct wlan_objmgr_pdev *pdev, + struct cur_regdmn_info *cur_regdmn); + +/** + * reg_modify_chan_144() - Enable/Disable channel 144 + * @pdev: pdev pointer + * @enable_chan_144: flag to disable/enable channel 144 + * + * Return: Success or Failure + */ +QDF_STATUS reg_modify_chan_144(struct wlan_objmgr_pdev *pdev, + bool en_chan_144); + +/** + * reg_get_en_chan_144() - get en_chan_144 flag value + * @pdev: pdev pointer + * + * Return: en_chan_144 flag value + */ +bool reg_get_en_chan_144(struct wlan_objmgr_pdev *pdev); + +/** + * reg_process_ch_avoid_event() - Process channel avoid event + * @psoc: psoc for country information + * @ch_avoid_event: channel avoid event buffer + * + * Return: QDF_STATUS + */ +QDF_STATUS reg_process_ch_avoid_event(struct wlan_objmgr_psoc *psoc, + struct ch_avoid_ind_type *ch_avoid_event); + +/** + * reg_send_scheduler_msg_sb() - Start scheduler to call list of callbacks + * registered whenever current chan list changes. + * @psoc: Pointer to PSOC structure. + * @pdev: Pointer to PDEV structure. + * + * Return: QDF_STATUS + */ +QDF_STATUS reg_send_scheduler_msg_sb(struct wlan_objmgr_psoc *psoc, + struct wlan_objmgr_pdev *pdev); + +/** + * reg_get_hal_reg_cap() - Get HAL REG capabilities + * @psoc: psoc for country information + * + * Return: hal reg cap pointer + */ +struct wlan_psoc_host_hal_reg_capabilities_ext *reg_get_hal_reg_cap( + struct wlan_objmgr_psoc *psoc); + +/** + * reg_set_hal_reg_cap() - Set HAL REG capabilities + * @psoc: psoc for country information + * @reg_cap: Regulatory caps pointer + * @phy_cnt: number of phy + * + * Return: hal reg cap pointer + */ +QDF_STATUS reg_set_hal_reg_cap(struct wlan_objmgr_psoc *psoc, + struct wlan_psoc_host_hal_reg_capabilities_ext *reg_cap, + uint16_t phy_cnt); + +/** + * reg_chan_in_range() - Check if the given channel is in pdev's channel range + * @chan_list: Pointer to regulatory channel list. + * @low_freq_2g: Low frequency 2G. + * @high_freq_2g: High frequency 2G. + * @low_freq_5g: Low frequency 5G. + * @high_freq_5g: High frequency 5G. + * @ch_enum: Channel enum. + * + * Return: true if ch_enum is with in pdev's channel range, else false. + */ +bool reg_chan_in_range(struct regulatory_channel *chan_list, + uint32_t low_freq_2g, + uint32_t high_freq_2g, + uint32_t low_freq_5g, + uint32_t high_freq_5g, + enum channel_enum ch_enum); + +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/regulatory/dispatcher/inc/reg_services_public_struct.h b/drivers/staging/qca-wifi-host-cmn/umac/regulatory/dispatcher/inc/reg_services_public_struct.h new file mode 100644 index 0000000000000000000000000000000000000000..a4ab5ac4a9642c0f9f16e303428178c8032ede5e --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/regulatory/dispatcher/inc/reg_services_public_struct.h @@ -0,0 +1,1020 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + + /** + * DOC: reg_services_public_struct.h + * This file contains regulatory data structures + */ + +#ifndef __REG_SERVICES_PUBLIC_STRUCT_H_ +#define __REG_SERVICES_PUBLIC_STRUCT_H_ + +#include "../../core/src/reg_db.h" + +#define REG_SBS_SEPARATION_THRESHOLD 100 +#define REG_MAX_CHANNELS_PER_OPERATING_CLASS 25 +#define REG_MAX_SUPP_OPER_CLASSES 32 +#define REG_MAX_CHAN_CHANGE_CBKS 30 +#define MAX_STA_VDEV_CNT 4 +#define INVALID_VDEV_ID 0xFF +#define INVALID_CHANNEL_NUM 0xBAD +#define CH_AVOID_MAX_RANGE 4 + +#ifdef CONFIG_LEGACY_CHAN_ENUM + +/** + * enum channel_enum - channel enumeration + * @CHAN_ENUM_1: channel number 1 + * @CHAN_ENUM_2: channel number 2 + * @CHAN_ENUM_3: channel number 3 + * @CHAN_ENUM_4: channel number 4 + * @CHAN_ENUM_5: channel number 5 + * @CHAN_ENUM_6: channel number 6 + * @CHAN_ENUM_7: channel number 7 + * @CHAN_ENUM_8: channel number 8 + * @CHAN_ENUM_9: channel number 9 + * @CHAN_ENUM_10: channel number 10 + * @CHAN_ENUM_11: channel number 11 + * @CHAN_ENUM_12: channel number 12 + * @CHAN_ENUM_13: channel number 13 + * @CHAN_ENUM_14: channel number 14 + * @CHAN_ENUM_183: channel number 183 + * @CHAN_ENUM_184: channel number 184 + * @CHAN_ENUM_185: channel number 185 + * @CHAN_ENUM_187: channel number 187 + * @CHAN_ENUM_188: channel number 188 + * @CHAN_ENUM_189: channel number 189 + * @CHAN_ENUM_192: channel number 192 + * @CHAN_ENUM_196: channel number 196 + * @CHAN_ENUM_36: channel number 36 + * @CHAN_ENUM_40: channel number 40 + * @CHAN_ENUM_44: channel number 44 + * @CHAN_ENUM_48: channel number 48 + * @CHAN_ENUM_52: channel number 52 + * @CHAN_ENUM_56: channel number 56 + * @CHAN_ENUM_60: channel number 60 + * @CHAN_ENUM_64: channel number 64 + * @CHAN_ENUM_100: channel number 100 + * @CHAN_ENUM_104: channel number 104 + * @CHAN_ENUM_108: channel number 108 + * @CHAN_ENUM_112: channel number 112 + * @CHAN_ENUM_116: channel number 116 + * @CHAN_ENUM_120: channel number 120 + * @CHAN_ENUM_124: channel number 124 + * @CHAN_ENUM_128: channel number 128 + * @CHAN_ENUM_132: channel number 132 + * @CHAN_ENUM_136: channel number 136 + * @CHAN_ENUM_140: channel number 140 + * @CHAN_ENUM_144: channel number 144 + * @CHAN_ENUM_149: channel number 149 + * @CHAN_ENUM_153: channel number 153 + * @CHAN_ENUM_157: channel number 157 + * @CHAN_ENUM_161: channel number 161 + * @CHAN_ENUM_165: channel number 165 + * @CHAN_ENUM_169: channel number 169 + * @CHAN_ENUM_170: channel number 170 + * @CHAN_ENUM_171: channel number 171 + * @CHAN_ENUM_172: channel number 172 + * @CHAN_ENUM_173: channel number 173 + * @CHAN_ENUM_174: channel number 174 + * @CHAN_ENUM_175: channel number 175 + * @CHAN_ENUM_176: channel number 176 + * @CHAN_ENUM_177: channel number 177 + * @CHAN_ENUM_178: channel number 178 + * @CHAN_ENUM_179: channel number 179 + * @CHAN_ENUM_180: channel number 180 + * @CHAN_ENUM_181: channel number 181 + * @CHAN_ENUM_182: channel number 182 + * @CHAN_ENUM_183: channel number 183 + * @CHAN_ENUM_184: channel number 184 + */ + +#ifdef WLAN_FEATURE_DSRC +enum channel_enum { + CHAN_ENUM_1, + CHAN_ENUM_2, + CHAN_ENUM_3, + CHAN_ENUM_4, + CHAN_ENUM_5, + CHAN_ENUM_6, + CHAN_ENUM_7, + CHAN_ENUM_8, + CHAN_ENUM_9, + CHAN_ENUM_10, + CHAN_ENUM_11, + CHAN_ENUM_12, + CHAN_ENUM_13, + CHAN_ENUM_14, + + CHAN_ENUM_36, + CHAN_ENUM_40, + CHAN_ENUM_44, + CHAN_ENUM_48, + CHAN_ENUM_52, + CHAN_ENUM_56, + CHAN_ENUM_60, + CHAN_ENUM_64, + + CHAN_ENUM_100, + CHAN_ENUM_104, + CHAN_ENUM_108, + CHAN_ENUM_112, + CHAN_ENUM_116, + CHAN_ENUM_120, + CHAN_ENUM_124, + CHAN_ENUM_128, + CHAN_ENUM_132, + CHAN_ENUM_136, + CHAN_ENUM_140, + CHAN_ENUM_144, + + CHAN_ENUM_149, + CHAN_ENUM_153, + CHAN_ENUM_157, + CHAN_ENUM_161, + CHAN_ENUM_165, + + CHAN_ENUM_170, + CHAN_ENUM_171, + CHAN_ENUM_172, + CHAN_ENUM_173, + CHAN_ENUM_174, + CHAN_ENUM_175, + CHAN_ENUM_176, + CHAN_ENUM_177, + CHAN_ENUM_178, + CHAN_ENUM_179, + CHAN_ENUM_180, + CHAN_ENUM_181, + CHAN_ENUM_182, + CHAN_ENUM_183, + CHAN_ENUM_184, + + NUM_CHANNELS, + + MIN_24GHZ_CHANNEL = CHAN_ENUM_1, + MAX_24GHZ_CHANNEL = CHAN_ENUM_14, + NUM_24GHZ_CHANNELS = (MAX_24GHZ_CHANNEL - MIN_24GHZ_CHANNEL + 1), + + MIN_49GHZ_CHANNEL = INVALID_CHANNEL_NUM, + MAX_49GHZ_CHANNEL = INVALID_CHANNEL_NUM - 1, + NUM_49GHZ_CHANNELS = MAX_49GHZ_CHANNEL - MIN_49GHZ_CHANNEL + 1, + + MIN_5GHZ_CHANNEL = CHAN_ENUM_36, + MAX_5GHZ_CHANNEL = CHAN_ENUM_184, + NUM_5GHZ_CHANNELS = (MAX_5GHZ_CHANNEL - MIN_5GHZ_CHANNEL + 1), + + MIN_DSRC_CHANNEL = CHAN_ENUM_170, + MAX_DSRC_CHANNEL = CHAN_ENUM_184, + NUM_DSRC_CHANNELS = (MAX_DSRC_CHANNEL - MIN_DSRC_CHANNEL + 1), + + INVALID_CHANNEL = 0xBAD, +}; + +#else +enum channel_enum { + CHAN_ENUM_1, + CHAN_ENUM_2, + CHAN_ENUM_3, + CHAN_ENUM_4, + CHAN_ENUM_5, + CHAN_ENUM_6, + CHAN_ENUM_7, + CHAN_ENUM_8, + CHAN_ENUM_9, + CHAN_ENUM_10, + CHAN_ENUM_11, + CHAN_ENUM_12, + CHAN_ENUM_13, + CHAN_ENUM_14, + + CHAN_ENUM_36, + CHAN_ENUM_40, + CHAN_ENUM_44, + CHAN_ENUM_48, + CHAN_ENUM_52, + CHAN_ENUM_56, + CHAN_ENUM_60, + CHAN_ENUM_64, + + CHAN_ENUM_100, + CHAN_ENUM_104, + CHAN_ENUM_108, + CHAN_ENUM_112, + CHAN_ENUM_116, + CHAN_ENUM_120, + CHAN_ENUM_124, + CHAN_ENUM_128, + CHAN_ENUM_132, + CHAN_ENUM_136, + CHAN_ENUM_140, + CHAN_ENUM_144, + + CHAN_ENUM_149, + CHAN_ENUM_153, + CHAN_ENUM_157, + CHAN_ENUM_161, + CHAN_ENUM_165, + CHAN_ENUM_169, + CHAN_ENUM_173, + + NUM_CHANNELS, + + MIN_24GHZ_CHANNEL = CHAN_ENUM_1, + MAX_24GHZ_CHANNEL = CHAN_ENUM_14, + NUM_24GHZ_CHANNELS = (MAX_24GHZ_CHANNEL - MIN_24GHZ_CHANNEL + 1), + + MIN_49GHZ_CHANNEL = INVALID_CHANNEL_NUM, + MAX_49GHZ_CHANNEL = INVALID_CHANNEL_NUM - 1, + NUM_49GHZ_CHANNELS = MAX_49GHZ_CHANNEL - MIN_49GHZ_CHANNEL + 1, + + MIN_5GHZ_CHANNEL = CHAN_ENUM_36, + + MAX_5GHZ_CHANNEL = CHAN_ENUM_173, + + NUM_5GHZ_CHANNELS = (MAX_5GHZ_CHANNEL - MIN_5GHZ_CHANNEL + 1), + INVALID_CHANNEL = 0xBAD, +}; +#endif /* WLAN_FEATURE_DSRC */ + +#else /* CONFIG_LEGACY_CHAN_ENUM */ +/** + * enum channel_enum - channel enumeration + * @CHAN_ENUM_2412: channel with freq 2412 + * @CHAN_ENUM_2417: channel with freq 2417 + * @CHAN_ENUM_2422: channel with freq 2422 + * @CHAN_ENUM_2427: channel with freq 2427 + * @CHAN_ENUM_2432: channel with freq 2432 + * @CHAN_ENUM_2437: channel with freq 2437 + * @CHAN_ENUM_2442: channel with freq 2442 + * @CHAN_ENUM_2447: channel with freq 2447 + * @CHAN_ENUM_2452: channel with freq 2452 + * @CHAN_ENUM_2457: channel with freq 2457 + * @CHAN_ENUM_2462: channel with freq 2462 + * @CHAN_ENUM_2467: channel with freq 2467 + * @CHAN_ENUM_2472: channel with freq 2472 + * @CHAN_ENUM_2484: channel with freq 2484 + * @CHAN_ENUM_4912: channel with freq 4912 + * @CHAN_ENUM_4915: channel with freq 4915 + * @CHAN_ENUM_4917: channel with freq 4917 + * @CHAN_ENUM_4920: channel with freq 4920 + * @CHAN_ENUM_4922: channel with freq 4922 + * @CHAN_ENUM_4925: channel with freq 4925 + * @CHAN_ENUM_4927: channel with freq 4927 + * @CHAN_ENUM_4932: channel with freq 4932 + * @CHAN_ENUM_4935: channel with freq 4935 + * @CHAN_ENUM_4937: channel with freq 4937 + * @CHAN_ENUM_4940: channel with freq 4940 + * @CHAN_ENUM_4942: channel with freq 4942 + * @CHAN_ENUM_4945: channel with freq 4945 + * @CHAN_ENUM_4947: channel with freq 4947 + * @CHAN_ENUM_4950: channel with freq 4950 + * @CHAN_ENUM_4952: channel with freq 4952 + * @CHAN_ENUM_4955: channel with freq 4955 + * @CHAN_ENUM_4957: channel with freq 4957 + * @CHAN_ENUM_4960: channel with freq 4960 + * @CHAN_ENUM_4962: channel with freq 4962 + * @CHAN_ENUM_4965: channel with freq 4965 + * @CHAN_ENUM_4967: channel with freq 4967 + * @CHAN_ENUM_4970: channel with freq 4970 + * @CHAN_ENUM_4972: channel with freq 4972 + * @CHAN_ENUM_4975: channel with freq 4975 + * @CHAN_ENUM_4977: channel with freq 4977 + * @CHAN_ENUM_4980: channel with freq 4980 + * @CHAN_ENUM_4982: channel with freq 4982 + * @CHAN_ENUM_4985: channel with freq 4985 + * @CHAN_ENUM_4987: channel with freq 4987 + * @CHAN_ENUM_5032: channel with freq 5032 + * @CHAN_ENUM_5035: channel with freq 5035 + * @CHAN_ENUM_5037: channel with freq 5037 + * @CHAN_ENUM_5040: channel with freq 5040 + * @CHAN_ENUM_5042: channel with freq 5042 + * @CHAN_ENUM_5045: channel with freq 5045 + * @CHAN_ENUM_5047: channel with freq 5047 + * @CHAN_ENUM_5052: channel with freq 5052 + * @CHAN_ENUM_5055: channel with freq 5055 + * @CHAN_ENUM_5057: channel with freq 5057 + * @CHAN_ENUM_5060: channel with freq 5060 + * @CHAN_ENUM_5080: channel with freq 5080 + * @CHAN_ENUM_5180: channel with freq 5180 + * @CHAN_ENUM_5200: channel with freq 5200 + * @CHAN_ENUM_5220: channel with freq 5220 + * @CHAN_ENUM_5240: channel with freq 5240 + * @CHAN_ENUM_5260: channel with freq 5260 + * @CHAN_ENUM_5280: channel with freq 5280 + * @CHAN_ENUM_5300: channel with freq 5300 + * @CHAN_ENUM_5320: channel with freq 5320 + * @CHAN_ENUM_5500: channel with freq 5500 + * @CHAN_ENUM_5520: channel with freq 5520 + * @CHAN_ENUM_5540: channel with freq 5540 + * @CHAN_ENUM_5560: channel with freq 5560 + * @CHAN_ENUM_5580: channel with freq 5580 + * @CHAN_ENUM_5600: channel with freq 5600 + * @CHAN_ENUM_5620: channel with freq 5620 + * @CHAN_ENUM_5640: channel with freq 5640 + * @CHAN_ENUM_5660: channel with freq 5660 + * @CHAN_ENUM_5680: channel with freq 5680 + * @CHAN_ENUM_5700: channel with freq 5700 + * @CHAN_ENUM_5720: channel with freq 5720 + * @CHAN_ENUM_5745: channel with freq 5745 + * @CHAN_ENUM_5765: channel with freq 5765 + * @CHAN_ENUM_5785: channel with freq 5785 + * @CHAN_ENUM_5805: channel with freq 5805 + * @CHAN_ENUM_5825: channel with freq 5825 + * @CHAN_ENUM_5845: channel with freq 5845 + * @CHAN_ENUM_5850: channel with freq 5850 + * @CHAN_ENUM_5855: channel with freq 5855 + * @CHAN_ENUM_5860: channel with freq 5860 + * @CHAN_ENUM_5865: channel with freq 5865 + * @CHAN_ENUM_5870: channel with freq 5870 + * @CHAN_ENUM_5875: channel with freq 5875 + * @CHAN_ENUM_5880: channel with freq 5880 + * @CHAN_ENUM_5885: channel with freq 5885 + * @CHAN_ENUM_5890: channel with freq 5890 + * @CHAN_ENUM_5895: channel with freq 5895 + * @CHAN_ENUM_5900: channel with freq 5900 + * @CHAN_ENUM_5905: channel with freq 5905 + * @CHAN_ENUM_5910: channel with freq 5910 + * @CHAN_ENUM_5915: channel with freq 5915 + * @CHAN_ENUM_5920: channel with freq 5920 + */ +enum channel_enum { + CHAN_ENUM_2412, + CHAN_ENUM_2417, + CHAN_ENUM_2422, + CHAN_ENUM_2427, + CHAN_ENUM_2432, + CHAN_ENUM_2437, + CHAN_ENUM_2442, + CHAN_ENUM_2447, + CHAN_ENUM_2452, + CHAN_ENUM_2457, + CHAN_ENUM_2462, + CHAN_ENUM_2467, + CHAN_ENUM_2472, + CHAN_ENUM_2484, + + CHAN_ENUM_4912, + CHAN_ENUM_4915, + CHAN_ENUM_4917, + CHAN_ENUM_4920, + CHAN_ENUM_4922, + CHAN_ENUM_4925, + CHAN_ENUM_4927, + CHAN_ENUM_4932, + CHAN_ENUM_4935, + CHAN_ENUM_4937, + CHAN_ENUM_4940, + CHAN_ENUM_4942, + CHAN_ENUM_4945, + CHAN_ENUM_4947, + CHAN_ENUM_4950, + CHAN_ENUM_4952, + CHAN_ENUM_4955, + CHAN_ENUM_4957, + CHAN_ENUM_4960, + CHAN_ENUM_4962, + CHAN_ENUM_4965, + CHAN_ENUM_4967, + CHAN_ENUM_4970, + CHAN_ENUM_4972, + CHAN_ENUM_4975, + CHAN_ENUM_4977, + CHAN_ENUM_4980, + CHAN_ENUM_4982, + CHAN_ENUM_4985, + CHAN_ENUM_4987, + CHAN_ENUM_5032, + CHAN_ENUM_5035, + CHAN_ENUM_5037, + CHAN_ENUM_5040, + CHAN_ENUM_5042, + CHAN_ENUM_5045, + CHAN_ENUM_5047, + CHAN_ENUM_5052, + CHAN_ENUM_5055, + CHAN_ENUM_5057, + CHAN_ENUM_5060, + CHAN_ENUM_5080, + + CHAN_ENUM_5180, + CHAN_ENUM_5200, + CHAN_ENUM_5220, + CHAN_ENUM_5240, + CHAN_ENUM_5260, + CHAN_ENUM_5280, + CHAN_ENUM_5300, + CHAN_ENUM_5320, + CHAN_ENUM_5500, + CHAN_ENUM_5520, + CHAN_ENUM_5540, + CHAN_ENUM_5560, + CHAN_ENUM_5580, + CHAN_ENUM_5600, + CHAN_ENUM_5620, + CHAN_ENUM_5640, + CHAN_ENUM_5660, + CHAN_ENUM_5680, + CHAN_ENUM_5700, + CHAN_ENUM_5720, + CHAN_ENUM_5745, + CHAN_ENUM_5765, + CHAN_ENUM_5785, + CHAN_ENUM_5805, + CHAN_ENUM_5825, + CHAN_ENUM_5845, + + CHAN_ENUM_5850, + CHAN_ENUM_5855, + CHAN_ENUM_5860, + CHAN_ENUM_5865, + CHAN_ENUM_5870, + CHAN_ENUM_5875, + CHAN_ENUM_5880, + CHAN_ENUM_5885, + CHAN_ENUM_5890, + CHAN_ENUM_5895, + CHAN_ENUM_5900, + CHAN_ENUM_5905, + CHAN_ENUM_5910, + CHAN_ENUM_5915, + CHAN_ENUM_5920, + + NUM_CHANNELS, + + MIN_24GHZ_CHANNEL = CHAN_ENUM_2412, + MAX_24GHZ_CHANNEL = CHAN_ENUM_2484, + NUM_24GHZ_CHANNELS = (MAX_24GHZ_CHANNEL - MIN_24GHZ_CHANNEL + 1), + + MIN_49GHZ_CHANNEL = CHAN_ENUM_4912, + MAX_49GHZ_CHANNEL = CHAN_ENUM_5080, + NUM_49GHZ_CHANNELS = (MAX_49GHZ_CHANNEL - MIN_49GHZ_CHANNEL + 1), + + MIN_5GHZ_CHANNEL = CHAN_ENUM_5180, + MAX_5GHZ_CHANNEL = CHAN_ENUM_5920, + NUM_5GHZ_CHANNELS = (MAX_5GHZ_CHANNEL - MIN_5GHZ_CHANNEL + 1), + + MIN_DSRC_CHANNEL = CHAN_ENUM_5850, + MAX_DSRC_CHANNEL = CHAN_ENUM_5920, + NUM_DSRC_CHANNELS = (MAX_DSRC_CHANNEL - MIN_DSRC_CHANNEL + 1), + + INVALID_CHANNEL = 0xBAD, +}; +#endif + +/** + * enum channel_state - channel state + * @CHANNEL_STATE_DISABLE: disabled state + * @CHANNEL_STATE_PASSIVE: passive state + * @CHANNEL_STATE_DFS: dfs state + * @CHANNEL_STATE_ENABLE: enabled state + * @CHANNEL_STATE_INVALID: invalid state + */ +enum channel_state { + CHANNEL_STATE_DISABLE, + CHANNEL_STATE_PASSIVE, + CHANNEL_STATE_DFS, + CHANNEL_STATE_ENABLE, + CHANNEL_STATE_INVALID, +}; + +/** + * enum reg_domain: reg domain + * @REGDOMAIN_FCC: FCC domain + * @REGDOMAIN_ETSI: ETSI domain + * @REGDOMAIN_JAPAN: JAPAN domain + * @REGDOMAIN_WORLD: WORLD domain + * @REGDOMAIN_COUNT: Max domain + */ +typedef enum { + REGDOMAIN_FCC, + REGDOMAIN_ETSI, + REGDOMAIN_JAPAN, + REGDOMAIN_WORLD, + REGDOMAIN_COUNT +} v_REGDOMAIN_t; + + +/** + * enum phy_ch_width - channel width + * @CH_WIDTH_20MHZ: 20 mhz width + * @CH_WIDTH_40MHZ: 40 mhz width + * @CH_WIDTH_80MHZ: 80 mhz width + * @CH_WIDTH_160MHZ: 160 mhz width + * @CH_WIDTH_80P80HZ: 80+80 mhz width + * @CH_WIDTH_5MHZ: 5 mhz width + * @CH_WIDTH_10MHZ: 10 mhz width + * @CH_WIDTH_INVALID: invalid width + * @CH_WIDTH_MAX: max possible width + */ +enum phy_ch_width { + CH_WIDTH_20MHZ = 0, + CH_WIDTH_40MHZ, + CH_WIDTH_80MHZ, + CH_WIDTH_160MHZ, + CH_WIDTH_80P80MHZ, + CH_WIDTH_5MHZ, + CH_WIDTH_10MHZ, + CH_WIDTH_INVALID, + CH_WIDTH_MAX +}; + +/** + * struct ch_params + * @ch_width: channel width + * @sec_ch_offset: secondary channel offset + * @center_freq_seg0: center freq for segment 0 + * @center_freq_seg1: center freq for segment 1 + */ +struct ch_params { + enum phy_ch_width ch_width; + uint8_t sec_ch_offset; + uint8_t center_freq_seg0; + uint8_t center_freq_seg1; +}; + +/** + * struct channel_power + * @chan_num: channel number + * @tx_power: TX power + */ +struct channel_power { + uint32_t chan_num; + uint32_t tx_power; +}; + +/** + * enum offset_t: channel offset + * @BW20: 20 mhz channel + * @BW40_LOW_PRIMARY: lower channel in 40 mhz + * @BW40_HIGH_PRIMARY: higher channel in 40 mhz + * @BW80: 80 mhz channel + * @BWALL: unknown bandwidth + */ +enum offset_t { + BW20 = 0, + BW40_LOW_PRIMARY = 1, + BW40_HIGH_PRIMARY = 3, + BW80, + BWALL, + BW_INVALID = 0xFF +}; + +/** + * struct reg_dmn_op_class_map_t: operating class + * @op_class: operating class number + * @ch_spacing: channel spacing + * @offset: offset + * @channels: channel set + */ +struct reg_dmn_op_class_map_t { + uint8_t op_class; + uint8_t ch_spacing; + enum offset_t offset; + uint8_t channels[REG_MAX_CHANNELS_PER_OPERATING_CLASS]; +}; + +/** + * struct reg_dmn_supp_op_classes: operating classes + * @num_classes: number of classes + * @classes: classes + */ +struct reg_dmn_supp_op_classes { + uint8_t num_classes; + uint8_t classes[REG_MAX_SUPP_OPER_CLASSES]; +}; + +/** + * struct reg_start_11d_scan_req: start 11d scan request + * @vdev_id: vdev id + * @scan_period_msec: scan duration in milli-seconds + * @start_interval_msec: offset duration to start the scan in milli-seconds + */ +struct reg_start_11d_scan_req { + uint8_t vdev_id; + uint32_t scan_period_msec; + uint32_t start_interval_msec; +}; + +/** + * struct reg_stop_11d_scan_req: stop 11d scan request + * @vdev_id: vdev id + */ +struct reg_stop_11d_scan_req { + uint8_t vdev_id; +}; + +/** + * struct reg_11d_new_country: regulatory 11d new coutry code + * @alpha2: new 11d alpha2 + */ +struct reg_11d_new_country { + uint8_t alpha2[REG_ALPHA2_LEN + 1]; +}; + +/** + * enum country_src: country source + * @SOURCE_QUERY: source query + * @SOURCE_CORE: source regulatory core + * @SOURCE_DRIVER: source driver + * @SOURCE_USERSPACE: source userspace + * @SOURCE_11D: source 11D + */ +enum country_src { + SOURCE_UNKNOWN, + SOURCE_QUERY, + SOURCE_CORE, + SOURCE_DRIVER, + SOURCE_USERSPACE, + SOURCE_11D +}; + +/** + * struct regulatory_channel + * @center_freq: center frequency + * @chan_num: channel number + * @state: channel state + * @chan_flags: channel flags + * @tx_power: TX powers + * @min_bw: min bandwidth + * @max_bw: max bandwidth + * @nol_chan: whether channel is nol + */ +struct regulatory_channel { + uint32_t center_freq; + uint32_t chan_num; + enum channel_state state; + uint32_t chan_flags; + uint32_t tx_power; + uint16_t min_bw; + uint16_t max_bw; + uint8_t ant_gain; + bool nol_chan; +}; + + +/** + * struct regulatory: regulatory information + * @reg_domain: regulatory domain pair + * @eeprom_rd_ext: eeprom value + * @country_code: current country in integer + * @alpha2: current alpha2 + * @def_country: default country alpha2 + * @def_region: DFS region + * @ctl_2g: 2G CTL value + * @ctl_5g: 5G CTL value + * @reg_pair: pointer to regulatory pair + * @cc_src: country code src + * @reg_flags: kernel regulatory flags + */ +struct regulatory { + uint32_t reg_domain; + uint32_t eeprom_rd_ext; + uint16_t country_code; + uint8_t alpha2[REG_ALPHA2_LEN + 1]; + uint8_t ctl_2g; + uint8_t ctl_5g; + const void *regpair; + enum country_src cc_src; + uint32_t reg_flags; +}; + +/** + * struct chan_map + * @center_freq: center freq in mhz + * @chan_num: channel number + * @min_bw: min bw + * @max_bw: max bw + */ +struct chan_map { + uint32_t center_freq; + uint32_t chan_num; + uint16_t min_bw; + uint16_t max_bw; +}; + +/** + * struct bonded_channel + * @start_ch: start channel + * @end_ch: end channel + */ +struct bonded_channel { + uint16_t start_ch; + uint16_t end_ch; +}; + +struct set_country { + uint8_t country[REG_ALPHA2_LEN + 1]; + uint8_t pdev_id; +}; +/** + * enum ht_sec_ch_offset + * @NO_SEC_CH: no secondary + * @LOW_PRIMARY_CH: low primary + * @HIGH_PRIMARY_CH: high primary + */ +enum ht_sec_ch_offset { + NO_SEC_CH = 0, + LOW_PRIMARY_CH = 1, + HIGH_PRIMARY_CH = 3, +}; + +enum cc_setting_code { + REG_SET_CC_STATUS_PASS = 0, + REG_CURRENT_ALPHA2_NOT_FOUND = 1, + REG_INIT_ALPHA2_NOT_FOUND = 2, + REG_SET_CC_CHANGE_NOT_ALLOWED = 3, + REG_SET_CC_STATUS_NO_MEMORY = 4, + REG_SET_CC_STATUS_FAIL = 5, +}; + +/** + * struct cur_reg_rule + * @start_freq: start frequency + * @end_freq: end frequency + * @max_bw: maximum bandwidth + * @reg_power: regulatory power + * @ant_gain: antenna gain + * @flags: regulatory flags + */ +struct cur_reg_rule { + uint16_t start_freq; + uint16_t end_freq; + uint16_t max_bw; + uint8_t reg_power; + uint8_t ant_gain; + uint16_t flags; +}; + +/** + * struct cur_regulatory_info + * @psoc: psoc ptr + * @status_code: status value + * @num_phy: number of phy + * @phy_id: phy id + * @reg_dmn_pair: reg domain pair + * @ctry_code: country code + * @alpha2: country alpha2 + * @offload_enabled: offload enabled + * @dfs_reg: dfs region + * @phybitmap: phy bit map + * @min_bw_2g: minimum 2G bw + * @max_bw_2g: maximum 2G bw + * @min_bw_5g: minimum 5G bw + * @max_bw_5g: maximum 5G bw + * @num_2g_reg_rules: number 2G reg rules + * @num_5g_reg_rules: number 5G reg rules + * @reg_rules_2g_ptr: ptr to 2G reg rules + * @reg_rules_5g_ptr: ptr to 5G reg rules + */ +struct cur_regulatory_info { + struct wlan_objmgr_psoc *psoc; + enum cc_setting_code status_code; + uint8_t num_phy; + uint8_t phy_id; + uint16_t reg_dmn_pair; + uint16_t ctry_code; + uint8_t alpha2[REG_ALPHA2_LEN + 1]; + bool offload_enabled; + enum dfs_reg dfs_region; + uint32_t phybitmap; + uint32_t min_bw_2g; + uint32_t max_bw_2g; + uint32_t min_bw_5g; + uint32_t max_bw_5g; + uint32_t num_2g_reg_rules; + uint32_t num_5g_reg_rules; + struct cur_reg_rule *reg_rules_2g_ptr; + struct cur_reg_rule *reg_rules_5g_ptr; +}; + +/** + * struct reg_rule_info + * @alpha2: alpha2 of reg rules + * @dfs_region: dfs region + * @num_of_reg_rules: number of reg rules + * @reg_rules: regulatory rules array + */ +struct reg_rule_info { + uint8_t alpha2[REG_ALPHA2_LEN + 1]; + enum dfs_reg dfs_region; + uint8_t num_of_reg_rules; + struct cur_reg_rule reg_rules[MAX_REG_RULES]; +}; + +/** + * enum band_info + * @BAND_ALL:all bands + * @BAND_2G: 2G band + * @BAND_5G: 5G band + * @BAND_UNKNOWN: Unsupported band + */ +enum band_info { + BAND_ALL, + BAND_2G, + BAND_5G, + BAND_UNKNOWN +}; + +/** + * enum restart_beaconing_on_ch_avoid_rule: control the beaconing entity to + * move away from active LTE channels + * @CH_AVOID_RULE_DO_NOT_RESTART: Do not move from active LTE + * channels + * @CH_AVOID_RULE_RESTART: Move from active LTE channels + * @CH_AVOID_RULE_RESTART_24G_ONLY: move from 2.4G active LTE + * channels only + */ +enum restart_beaconing_on_ch_avoid_rule { + CH_AVOID_RULE_DO_NOT_RESTART, + CH_AVOID_RULE_RESTART, + CH_AVOID_RULE_RESTART_24G_ONLY, +}; + +/** + * struct reg_config_vars + * @enable_11d_support: enable 11d support + * @scan_11d_interval: 11d scan interval in ms + * @userspace_ctry_priority: user priority + * @band_capability: band capability + * @dfs_disable: dfs disabled + * @indoor_channel_support: indoor channel support + * @force_ssc_disable_indoor_channel: Disable indoor channel on sap start + * @restart_beaconing: control the beaconing entity to move + * away from active LTE channels + * @enable_srd_chan_in_master_mode: SRD channel support in master mode + * @enable_11d_in_world_mode: enable 11d in world mode + */ +struct reg_config_vars { + uint32_t enable_11d_support; + uint32_t scan_11d_interval; + uint32_t userspace_ctry_priority; + enum band_info band_capability; + uint32_t dfs_enabled; + uint32_t indoor_chan_enabled; + uint32_t force_ssc_disable_indoor_channel; + enum restart_beaconing_on_ch_avoid_rule restart_beaconing; + bool enable_srd_chan_in_master_mode; + bool enable_11d_in_world_mode; +}; + +/** + * struct reg_freq_range + * @low_freq: low frequency + * @high_freq: high frequency + */ +struct reg_freq_range { + uint32_t low_freq; + uint32_t high_freq; +}; + +/** + * struct reg_sched_payload + * @psoc: psoc ptr + * @pdev: pdev ptr + */ +struct reg_sched_payload { + struct wlan_objmgr_psoc *psoc; + struct wlan_objmgr_pdev *pdev; +}; + +/** + * enum direction + * @NORTHBOUND: northbound + * @SOUTHBOUND: southbound + */ +enum direction { + NORTHBOUND, + SOUTHBOUND, +}; + +/** + * struct mas_chan_params + * @dfs_region: dfs region + * @phybitmap: phybitmap + * @mas_chan_list: master chan list + * @default_country: default country + * @current_country: current country + * @def_region_domain: default reg domain + * @def_country_code: default country code + * @reg_dmn_pair: reg domain pair + * @ctry_code: country code + * @reg_rules: regulatory rules + */ +struct mas_chan_params { + enum dfs_reg dfs_region; + uint32_t phybitmap; + struct regulatory_channel mas_chan_list[NUM_CHANNELS]; + char default_country[REG_ALPHA2_LEN + 1]; + char current_country[REG_ALPHA2_LEN + 1]; + uint16_t def_region_domain; + uint16_t def_country_code; + uint16_t reg_dmn_pair; + uint16_t ctry_code; + struct reg_rule_info reg_rules; +}; + +/** + * enum cc_regdmn_flag: Regdomain flags + * @INVALID: Invalid flag + * @CC_IS_SET: Country code is set + * @REGDMN_IS_SET: Regdomain ID is set + * @ALPHA_IS_SET: Country ISO is set + */ +enum cc_regdmn_flag { + INVALID_CC, + CC_IS_SET, + REGDMN_IS_SET, + ALPHA_IS_SET, +}; + +/** + * struct cc_regdmn_s: User country code or regdomain + * @country_code: Country code + * @regdmn_id: Regdomain pair ID + * @alpha: Country ISO + * @flags: Regdomain flags + */ +struct cc_regdmn_s { + union { + uint16_t country_code; + uint16_t regdmn_id; + uint8_t alpha[REG_ALPHA2_LEN + 1]; + } cc; + uint8_t flags; +}; + +/** + * struct cur_regdmn_info: Current regulatory info + * @regdmn_pair_id: Current regdomain pair ID + * @dmn_id_2g: 2GHz regdomain ID + * @dmn_id_5g: 5GHz regdomain ID + * @ctl_2g: 2GHz CTL value + * @ctl_5g: 5GHzCTL value + * @dfs_region: dfs region + */ +struct cur_regdmn_info { + uint16_t regdmn_pair_id; + uint16_t dmn_id_2g; + uint16_t dmn_id_5g; + uint8_t ctl_2g; + uint8_t ctl_5g; + uint8_t dfs_region; +}; + +/** + * struct ch_avoid_freq_type + * @start_freq: start freq + * @end_freq: end freq + */ +struct ch_avoid_freq_type { + uint32_t start_freq; + uint32_t end_freq; +}; + +/** + * struct ch_avoid_ind_type + * @ch_avoid_range_cnt: count + * @avoid_freq_range: avoid freq range array + */ +struct ch_avoid_ind_type { + uint32_t ch_avoid_range_cnt; + struct ch_avoid_freq_type avoid_freq_range[CH_AVOID_MAX_RANGE]; +}; + +/** + * struct unsafe_ch_list + * @ch_cnt: no.of channels + * @ch_list: channel list + */ +struct unsafe_ch_list { + uint16_t ch_cnt; + uint16_t ch_list[NUM_CHANNELS]; +}; + +/** + * struct avoid_freq_ind_data + * @freq_list: frequency list + * @chan_list: channel list + */ +struct avoid_freq_ind_data { + struct ch_avoid_ind_type freq_list; + struct unsafe_ch_list chan_list; +}; + +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/regulatory/dispatcher/inc/wlan_reg_services_api.h b/drivers/staging/qca-wifi-host-cmn/umac/regulatory/dispatcher/inc/wlan_reg_services_api.h new file mode 100644 index 0000000000000000000000000000000000000000..10a873c06cc0aab9665de143782dc44795fb63ea --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/regulatory/dispatcher/inc/wlan_reg_services_api.h @@ -0,0 +1,568 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_reg_services_api.h + * This file provides prototypes of the routines needed for the + * external components to utilize the services provided by the + * regulatory component. + */ + +#ifndef __WLAN_REG_SERVICES_API_H +#define __WLAN_REG_SERVICES_API_H + +#include "../../core/src/reg_services.h" +#include + + +#define WLAN_REG_MIN_24GHZ_CH_NUM REG_MIN_24GHZ_CH_NUM +#define WLAN_REG_MAX_24GHZ_CH_NUM REG_MAX_24GHZ_CH_NUM +#define WLAN_REG_MIN_5GHZ_CH_NUM REG_MIN_5GHZ_CH_NUM +#define WLAN_REG_MAX_5GHZ_CH_NUM REG_MAX_5GHZ_CH_NUM + +#define WLAN_REG_IS_24GHZ_CH(chan) REG_IS_24GHZ_CH(chan) +#define WLAN_REG_IS_5GHZ_CH(chan) REG_IS_5GHZ_CH(chan) + +#define WLAN_REG_IS_24GHZ_CH_FREQ(freq) REG_IS_24GHZ_CH_FREQ(freq) +#define WLAN_REG_IS_5GHZ_CH_FREQ(freq) REG_IS_5GHZ_FREQ(freq) + +#ifndef CONFIG_LEGACY_CHAN_ENUM +#define WLAN_REG_IS_49GHZ_FREQ(freq) REG_IS_49GHZ_FREQ(freq) +#endif + +#define WLAN_REG_CH_NUM(ch_enum) REG_CH_NUM(ch_enum) +#define WLAN_REG_CH_TO_FREQ(ch_enum) REG_CH_TO_FREQ(ch_enum) + +#define WLAN_REG_IS_SAME_BAND_CHANNELS(chan_num1, chan_num2) \ + (chan_num1 && chan_num2 && \ + (WLAN_REG_IS_5GHZ_CH(chan_num1) == WLAN_REG_IS_5GHZ_CH(chan_num2))) + + +#define WLAN_REG_IS_CHANNEL_VALID_5G_SBS(curchan, newchan) \ + (curchan > newchan ? \ + REG_CH_TO_FREQ(reg_get_chan_enum(curchan)) \ + - REG_CH_TO_FREQ(reg_get_chan_enum(newchan)) \ + > REG_SBS_SEPARATION_THRESHOLD : \ + REG_CH_TO_FREQ(reg_get_chan_enum(newchan)) \ + - REG_CH_TO_FREQ(reg_get_chan_enum(curchan)) \ + > REG_SBS_SEPARATION_THRESHOLD) + +#define WLAN_REG_INVALID_CHANNEL_ID +#define WLAN_REG_GET_24_END_CHAN_NUM 14 + +#define WLAN_REG_CHAN_TO_BAND(chan_num) reg_chan_to_band(chan_num) + +/** + * wlan_reg_get_channel_list_with_power() - Provide the channel list with power + * @ch_list: pointer to the channel list. + * + * Return: QDF_STATUS + */ +QDF_STATUS wlan_reg_get_channel_list_with_power(struct wlan_objmgr_pdev *pdev, + struct channel_power *ch_list, + uint8_t *num_chan); + +/** + * wlan_reg_read_default_country() - Read the default country for the regdomain + * @country: pointer to the country code. + * + * Return: QDF_STATUS + */ +QDF_STATUS wlan_reg_read_default_country(struct wlan_objmgr_psoc *psoc, + uint8_t *country); + +/** + * wlan_reg_get_fcc_constraint() - Check FCC constraint on given frequency + * @pdev: physical dev to get + * @freq: frequency to be checked + * + * Return: If FCC constraint is on applied given frequency return true + * else return false. + */ +bool wlan_reg_get_fcc_constraint(struct wlan_objmgr_pdev *pdev, uint32_t freq); + +/** + * wlan_reg_read_current_country() - Read the current country for the regdomain + * @country: pointer to the country code. + * + * Return: QDF_STATUS + */ +QDF_STATUS wlan_reg_read_current_country(struct wlan_objmgr_psoc *psoc, + uint8_t *country); + +/** + * wlan_reg_get_channel_state() - Get channel state from regulatory + * @ch: channel number. + * + * Return: channel state + */ +enum channel_state wlan_reg_get_channel_state(struct wlan_objmgr_pdev *pdev, + uint32_t ch); + +/** + * wlan_reg_chan_has_dfs_attribute() - check channel has dfs attribute flag + * @ch: channel number. + * + * This API get chan initial dfs attribute from regdomain + * + * Return: true if chan is dfs, otherwise false + */ +bool +wlan_reg_chan_has_dfs_attribute(struct wlan_objmgr_pdev *pdev, uint32_t ch); + +/** + * wlan_reg_get_5g_bonded_channel_state() - Get 5G bonded channel state + * @pdev: The physical dev to program country code or regdomain + * @ch: channel number. + * @bw: channel band width + * + * Return: channel state + */ +enum channel_state wlan_reg_get_5g_bonded_channel_state( + struct wlan_objmgr_pdev *pdev, uint8_t ch, + enum phy_ch_width bw); + +/** + * wlan_reg_get_2g_bonded_channel_state() - Get 2G bonded channel state + * @pdev: The physical dev to program country code or regdomain + * @ch: channel number. + * @sec_ch: Secondary channel. + * @bw: channel band width + * + * Return: channel state + */ +enum channel_state wlan_reg_get_2g_bonded_channel_state( + struct wlan_objmgr_pdev *pdev, uint8_t ch, + uint8_t sec_ch, enum phy_ch_width bw); + +/** + * wlan_reg_set_channel_params () - Sets channel parameteres for given bandwidth + * @pdev: The physical dev to program country code or regdomain + * @ch: channel number. + * @sec_ch_2g: Secondary channel. + * @ch_params: pointer to the channel parameters. + * + * Return: None + */ +void wlan_reg_set_channel_params(struct wlan_objmgr_pdev *pdev, uint8_t ch, + uint8_t sec_ch_2g, + struct ch_params *ch_params); + +/** + * wlan_reg_get_dfs_region () - Get the current dfs region + * @dfs_reg: pointer to dfs region + * + * Return: Status + */ +QDF_STATUS wlan_reg_get_dfs_region(struct wlan_objmgr_pdev *pdev, + enum dfs_reg *dfs_reg); + +/** + * wlan_reg_get_channel_reg_power() - Provide the channel regulatory power + * @chan_num: chennal number + * + * Return: int + */ +uint32_t wlan_reg_get_channel_reg_power(struct wlan_objmgr_pdev *pdev, + uint32_t chan_num); + +/** + * wlan_reg_get_channel_freq() - provide the channel center freq + * @chan_num: chennal number + * + * Return: int + */ +uint32_t wlan_reg_get_channel_freq(struct wlan_objmgr_pdev *pdev, + uint32_t chan_num); + +/** + * wlan_reg_get_current_chan_list() - provide the pdev current channel list + * @pdev: pdev pointer + * @chan_list: channel list pointer + * + * Return: QDF_STATUS + */ +QDF_STATUS wlan_reg_get_current_chan_list(struct wlan_objmgr_pdev *pdev, + struct regulatory_channel *chan_list); +/** + * wlan_reg_get_bonded_channel_state() - get bonded channel state + * @pdev: pdev ptr + * @ch: chennal number + * @bw: chennal number + * @sec_ch: secondary channel + * + * Return: enum channel_state + */ +enum channel_state wlan_reg_get_bonded_channel_state( + struct wlan_objmgr_pdev *pdev, uint8_t ch, + enum phy_ch_width bw, uint8_t sec_ch); + +/** + * wlan_reg_set_dfs_region() - set the dfs region + * @pdev: pdev ptr + * @dfs_reg: dfs region + * + * Return: void + */ +void wlan_reg_set_dfs_region(struct wlan_objmgr_pdev *pdev, + enum dfs_reg dfs_reg); + +/** + * wlan_reg_get_bw_value() - provide the channel center freq + * @chan_num: chennal number + * + * Return: int + */ +uint16_t wlan_reg_get_bw_value(enum phy_ch_width bw); + +/** + * wlan_reg_get_domain_from_country_code() - provide the channel center freq + * @reg_domain_ptr: regulatory domain ptr + * @country_alpha2: country alpha2 + * @source: alpha2 source + * + * Return: int + */ +QDF_STATUS wlan_reg_get_domain_from_country_code(v_REGDOMAIN_t *reg_domain_ptr, + const uint8_t *country_alpha2, + enum country_src source); + +/** + * wlan_reg_dmn_get_opclass_from_channel() - provide the channel center freq + * @country: country alpha2 + * @channel: channel number + * @offset: offset + * + * Return: int + */ +uint16_t wlan_reg_dmn_get_opclass_from_channel(uint8_t *country, + uint8_t channel, + uint8_t offset); + +/** + * wlan_reg_dmn_get_chanwidth_from_opclass() - get channel width from + * operating class + * @country: country alpha2 + * @channel: channel number + * @opclass: operating class + * + * Return: int + */ +uint16_t wlan_reg_dmn_get_chanwidth_from_opclass(uint8_t *country, + uint8_t channel, + uint8_t opclass); +/** + * wlan_reg_dmn_set_curr_opclasses() - set operating class + * @num_classes: number of classes + * @class: operating class + * + * Return: int + */ +uint16_t wlan_reg_dmn_set_curr_opclasses(uint8_t num_classes, + uint8_t *class); + +/** + * wlan_reg_dmn_get_curr_opclasses() - get current oper classes + * @num_classes: number of classes + * @class: operating class + * + * Return: int + */ +uint16_t wlan_reg_dmn_get_curr_opclasses(uint8_t *num_classes, + uint8_t *class); + + +/** + * wlan_regulatory_init() - init regulatory component + * + * Return: Success or Failure + */ +QDF_STATUS wlan_regulatory_init(void); + +/** + * wlan_regulatory_deinit() - deinit regulatory component + * + * Return: Success or Failure + */ +QDF_STATUS wlan_regulatory_deinit(void); + +/** + * regulatory_psoc_open() - open regulatory component + * + * Return: Success or Failure + */ +QDF_STATUS regulatory_psoc_open(struct wlan_objmgr_psoc *psoc); + + +/** + * regulatory_psoc_close() - close regulatory component + * + * Return: Success or Failure + */ +QDF_STATUS regulatory_psoc_close(struct wlan_objmgr_psoc *psoc); + +/** + * regulatory_pdev_open() - Open regulatory component + * @pdev: Pointer to pdev structure. + * + * Return: Success or Failure + */ +QDF_STATUS regulatory_pdev_open(struct wlan_objmgr_pdev *pdev); + +/** + * regulatory_pdev_close() - Close regulatory component + * @pdev: Pointer to pdev structure. + * + * Return: Success or Failure + */ +QDF_STATUS regulatory_pdev_close(struct wlan_objmgr_pdev *pdev); + +/** + * wlan_reg_update_nol_ch () - set nol channel + * @pdev: pdev ptr + * @ch_list: channel list to be returned + * @num_ch: number of channels + * @nol_ch: nol flag + * + * Return: void + */ +void wlan_reg_update_nol_ch(struct wlan_objmgr_pdev *pdev, + uint8_t *ch_list, + uint8_t num_ch, + bool nol_ch); + +/** + * wlan_reg_is_dfs_ch () - Checks the channel state for DFS + * @pdev: pdev ptr + * @chan: channel + * + * Return: true or false + */ +bool wlan_reg_is_dfs_ch(struct wlan_objmgr_pdev *pdev, uint32_t chan); + +/** + * wlan_reg_is_dsrc_chan () - Checks if the channel is dsrc channel or not + * @pdev: pdev ptr + * @chan_num: channel + * + * Return: true or false + */ +bool wlan_reg_is_dsrc_chan(struct wlan_objmgr_pdev *pdev, uint8_t chan_num); + +/** + * wlan_reg_is_etsi13_srd_chan () - Checks if the ch is ETSI13 srd ch or not + * @pdev: pdev ptr + * @chan_num: channel + * + * Return: true or false + */ +bool wlan_reg_is_etsi13_srd_chan(struct wlan_objmgr_pdev *pdev, + uint8_t chan_num); + +/** + * wlan_reg_is_etsi13_regdmn() - Checks if current reg domain is ETSI13 or not + * @pdev: pdev ptr + * + * Return: true or false + */ +bool wlan_reg_is_etsi13_regdmn(struct wlan_objmgr_pdev *pdev); + +/** + * wlan_reg_is_etsi13_srd_chan_allowed_master_mode() - Checks if regdmn is + * ETSI13 and SRD channels are allowed in master mode or not. + * + * @pdev: pdev ptr + * + * Return: true or false + */ +bool wlan_reg_is_etsi13_srd_chan_allowed_master_mode(struct wlan_objmgr_pdev + *pdev); + +/** + * wlan_reg_is_passive_or_disable_ch () - Checks chan state for passive + * and disabled + * @pdev: pdev ptr + * @chan: channel + * + * Return: true or false + */ +bool wlan_reg_is_passive_or_disable_ch(struct wlan_objmgr_pdev *pdev, + uint32_t chan); + +/** + * wlan_reg_is_disable_ch () - Checks chan state for disabled + * @pdev: pdev ptr + * @chan: channel + * + * Return: true or false + */ +bool wlan_reg_is_disable_ch(struct wlan_objmgr_pdev *pdev, uint32_t chan); + +/** + * wlan_reg_freq_to_chan () - convert channel freq to channel number + * @pdev: The physical dev to set current country for + * @freq: frequency + * + * Return: true or false + */ +uint32_t wlan_reg_freq_to_chan(struct wlan_objmgr_pdev *pdev, + uint32_t freq); + +/** + * wlan_reg_chan_to_freq () - convert channel number to frequency + * @chan: channel number + * + * Return: true or false + */ +uint32_t wlan_reg_chan_to_freq(struct wlan_objmgr_pdev *pdev, + uint32_t chan); +/** + * wlan_reg_is_world() - reg is world mode + * @country: The country information + * + * Return: true or false + */ +bool wlan_reg_is_world(uint8_t *country); + +/** + * wlan_reg_is_us() - reg is us country + * @country: The country information + * + * Return: true or false + */ +bool wlan_reg_is_us(uint8_t *country); + +/** + * wlan_reg_chan_is_49ghz() - Check if the input channel number is 4.9GHz + * @pdev: Pdev pointer + * @chan_num: Input channel number + * + * Return: true if the channel is 4.9GHz else false. + */ + +bool wlan_reg_chan_is_49ghz(struct wlan_objmgr_pdev *pdev, + uint8_t chan_num); + +/** + * wlan_reg_set_country() - Set the current regulatory country + * @pdev: The physical dev to set current country for + * @country: The country information to configure + * + * Return: QDF_STATUS + */ +QDF_STATUS wlan_reg_set_country(struct wlan_objmgr_pdev *pdev, + uint8_t *country); + +/** + * wlan_reg_set_11d_country() - Set the 11d regulatory country + * @pdev: The physical dev to set current country for + * @country: The country information to configure + * + * Return: QDF_STATUS + */ +QDF_STATUS wlan_reg_set_11d_country(struct wlan_objmgr_pdev *pdev, + uint8_t *country); + +/** + * wlan_reg_register_chan_change_callback () - add chan change cbk + * @psoc: psoc ptr + * @cbk: callback + * @arg: argument + * + * Return: true or false + */ +void wlan_reg_register_chan_change_callback(struct wlan_objmgr_psoc *psoc, + reg_chan_change_callback cbk, + void *arg); + +/** + * wlan_reg_unregister_chan_change_callback () - remove chan change cbk + * @psoc: psoc ptr + * @cbk:callback + * + * Return: true or false + */ +void wlan_reg_unregister_chan_change_callback(struct wlan_objmgr_psoc *psoc, + reg_chan_change_callback cbk); + +/** + * wlan_reg_11d_original_enabled_on_host() - 11d original enabled don host + * @psoc: psoc ptr + * + * Return: bool + */ +bool wlan_reg_11d_original_enabled_on_host(struct wlan_objmgr_psoc *psoc); + +/** + * wlan_reg_11d_enabled_on_host() - 11d enabled don host + * @psoc: psoc ptr + * + * Return: bool + */ +bool wlan_reg_11d_enabled_on_host(struct wlan_objmgr_psoc *psoc); + +/** + * wlan_reg_get_chip_mode() - get supported chip mode + * @pdev: pdev pointer + * @chip_mode: chip mode + * + * Return: QDF STATUS + */ +QDF_STATUS wlan_reg_get_chip_mode(struct wlan_objmgr_pdev *pdev, + uint32_t *chip_mode); + +/** + * wlan_reg_is_11d_scan_inprogress() - checks 11d scan status + * @psoc: psoc ptr + * + * Return: bool + */ +bool wlan_reg_is_11d_scan_inprogress(struct wlan_objmgr_psoc *psoc); +/** + * wlan_reg_get_freq_range() - Get 2GHz and 5GHz frequency range + * @pdev: pdev pointer + * @low_2g: low 2GHz frequency range + * @high_2g: high 2GHz frequency range + * @low_5g: low 5GHz frequency range + * @high_5g: high 5GHz frequency range + * + * Return: QDF status + */ +QDF_STATUS wlan_reg_get_freq_range(struct wlan_objmgr_pdev *pdev, + uint32_t *low_2g, + uint32_t *high_2g, + uint32_t *low_5g, + uint32_t *high_5g); +/** + * wlan_reg_get_tx_ops () - get regulatory tx ops + * @psoc: psoc ptr + * + */ +struct wlan_lmac_if_reg_tx_ops * +wlan_reg_get_tx_ops(struct wlan_objmgr_psoc *psoc); + +/** + * wlan_reg_get_curr_regdomain() - Get current regdomain in use + * @pdev: pdev pointer + * @cur_regdmn: Current regdomain info + * + * Return: QDF status + */ +QDF_STATUS wlan_reg_get_curr_regdomain(struct wlan_objmgr_pdev *pdev, + struct cur_regdmn_info *cur_regdmn); +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/regulatory/dispatcher/inc/wlan_reg_tgt_api.h b/drivers/staging/qca-wifi-host-cmn/umac/regulatory/dispatcher/inc/wlan_reg_tgt_api.h new file mode 100644 index 0000000000000000000000000000000000000000..6338430d2cafa58e00ed8733032fb5e69575ec26 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/regulatory/dispatcher/inc/wlan_reg_tgt_api.h @@ -0,0 +1,72 @@ +/* + * Copyright (c) 2017 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_reg_tgt_api.h + * This file provides prototypes of the regulatory component target + * interface routines + */ + +#ifndef __WLAN_REG_TGT_API_H +#define __WLAN_REG_TGT_API_H + +#include +#include +#include + +QDF_STATUS tgt_reg_process_master_chan_list(struct cur_regulatory_info + *reg_info); + +/** + * tgt_reg_process_11d_new_country() - process new 11d country event + * @psoc: pointer to psoc + * @reg_11d_new_cc: new 11d country pointer + * + * Return: QDF_STATUS + */ +QDF_STATUS tgt_reg_process_11d_new_country(struct wlan_objmgr_psoc *psoc, + struct reg_11d_new_country *reg_11d_new_cc); + +/** + * tgt_reg_set_regdb_offloaded() - set/clear regulatory offloaded flag + * + * @psoc: psoc pointer + * Return: Success or Failure + */ +QDF_STATUS tgt_reg_set_regdb_offloaded(struct wlan_objmgr_psoc *psoc, + bool val); + +/** + * tgt_reg_set_11d_offloaded() - set/clear 11d offloaded flag + * + * @psoc: psoc pointer + * Return: Success or Failure + */ +QDF_STATUS tgt_reg_set_11d_offloaded(struct wlan_objmgr_psoc *psoc, + bool val); +/** + * tgt_reg_process_ch_avoid_event() - process new ch avoid event + * @psoc: pointer to psoc + * @ch_avoid_evnt: channel avoid event + * + * Return: QDF_STATUS + */ +QDF_STATUS tgt_reg_process_ch_avoid_event(struct wlan_objmgr_psoc *psoc, + struct ch_avoid_ind_type *ch_avoid_evnt); +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/regulatory/dispatcher/inc/wlan_reg_ucfg_api.h b/drivers/staging/qca-wifi-host-cmn/umac/regulatory/dispatcher/inc/wlan_reg_ucfg_api.h new file mode 100644 index 0000000000000000000000000000000000000000..fa097dfbb66fa2b273832297213dd2b09e582313 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/regulatory/dispatcher/inc/wlan_reg_ucfg_api.h @@ -0,0 +1,348 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_reg_ucfg_api.h + * This file provides prototypes of the regulatory component user + * config interface routines + */ + +#ifndef __WLAN_REG_UCFG_API_H +#define __WLAN_REG_UCFG_API_H + +#include +#include +#include "../../core/src/reg_services.h" +#include + +typedef QDF_STATUS (*reg_event_cb)(void *status_struct); + +/** + * ucfg_reg_set_band() - Sets the band information for the PDEV + * @pdev: The physical pdev to set the band for + * @band: The set band parameter to configure for the pysical device + * + * Return: QDF_STATUS + */ +QDF_STATUS ucfg_reg_set_band(struct wlan_objmgr_pdev *pdev, + enum band_info band); + +/** + * ucfg_reg_notify_sap_event() - Notify regulatory domain for sap event + * @pdev: The physical dev to set the band for + * @sap_state: true for sap start else false + * + * Return: QDF_STATUS + */ +QDF_STATUS ucfg_reg_notify_sap_event(struct wlan_objmgr_pdev *pdev, + bool sap_state); + +/** + * ucfg_reg_cache_channel_state() - Cache the current state of the channles + * @pdev: The physical dev to cache the channels for + * @channel_list: List of the channels for which states needs to be cached + * @num_channels: Number of channels in the list + * + * Return: QDF_STATUS + */ +#ifdef DISABLE_CHANNEL_LIST +void ucfg_reg_cache_channel_state(struct wlan_objmgr_pdev *pdev, + uint32_t *channel_list, + uint32_t num_channels); +#else +static inline +void ucfg_reg_cache_channel_state(struct wlan_objmgr_pdev *pdev, + uint32_t *channel_list, + uint32_t num_channels) +{ +} +#endif + +/** + * ucfg_reg_restore_cached_channels() - Cache the current state of the channles + * @pdev: The physical dev to cache the channels for + * + * Return: QDF_STATUS + */ +#ifdef DISABLE_CHANNEL_LIST +void ucfg_reg_restore_cached_channels(struct wlan_objmgr_pdev *pdev); +#else +static inline +void ucfg_reg_restore_cached_channels(struct wlan_objmgr_pdev *pdev) +{ +} +#endif + +/** + * ucfg_reg_set_fcc_constraint() - apply fcc constraints on channels 12/13 + * @pdev: The physical pdev to reduce tx power for + * + * This function adjusts the transmit power on channels 12 and 13, to comply + * with FCC regulations in the USA. + * + * Return: QDF_STATUS + */ +QDF_STATUS ucfg_reg_set_fcc_constraint(struct wlan_objmgr_pdev *pdev, + bool fcc_constraint); + +/** + * ucfg_reg_get_default_country() - Get the default regulatory country + * @psoc: The physical SoC to get default country from + * @country_code: the buffer to populate the country code into + * + * Return: QDF_STATUS + */ +QDF_STATUS ucfg_reg_get_default_country(struct wlan_objmgr_psoc *psoc, + uint8_t *country_code); + +/** + * ucfg_reg_get_current_country() - Get the current regulatory country + * @psoc: The physical SoC to get current country from + * @country_code: the buffer to populate the country code into + * + * Return: QDF_STATUS + */ +QDF_STATUS ucfg_reg_get_current_country(struct wlan_objmgr_psoc *psoc, + uint8_t *country_code); +/** + * ucfg_reg_set_default_country() - Set the default regulatory country + * @psoc: The physical SoC to set default country for + * @country_code: The country information to configure + * + * Return: QDF_STATUS + */ +QDF_STATUS ucfg_reg_set_default_country(struct wlan_objmgr_psoc *psoc, + uint8_t *country_code); + +/** + * ucfg_reg_set_country() - Set the current regulatory country + * @pdev: The physical dev to set current country for + * @country_code: The country information to configure + * + * Return: QDF_STATUS + */ +QDF_STATUS ucfg_reg_set_country(struct wlan_objmgr_pdev *dev, + uint8_t *country_code); + +/** + * ucfg_reg_reset_country() - Reset the regulatory country to default + * @psoc: The physical SoC to reset country for + * + * Return: QDF_STATUS + */ +QDF_STATUS ucfg_reg_reset_country(struct wlan_objmgr_psoc *psoc); + +/** + * ucfg_reg_get_curr_band() - Get the current band capability + * @pdev: The physical dev to get default country from + * @band: buffer to populate the band into + * + * Return: QDF_STATUS + */ +QDF_STATUS ucfg_reg_get_curr_band(struct wlan_objmgr_pdev *pdev, + enum band_info *band); +/** + * ucfg_reg_enable_dfs_channels() - Enable the use of DFS channels + * @pdev: The physical dev to enable DFS channels for + * + * Return: QDF_STATUS + */ +QDF_STATUS ucfg_reg_enable_dfs_channels(struct wlan_objmgr_pdev *pdev, + bool dfs_enable); + +QDF_STATUS ucfg_reg_register_event_handler(uint8_t vdev_id, reg_event_cb cb, + void *arg); +QDF_STATUS ucfg_reg_unregister_event_handler(uint8_t vdev_id, reg_event_cb cb, + void *arg); +QDF_STATUS ucfg_reg_init_handler(uint8_t pdev_id); + +QDF_STATUS ucfg_reg_program_default_cc(struct wlan_objmgr_pdev *pdev, + uint16_t regdmn); + +/** + * ucfg_reg_program_cc() - Program user country code or regdomain + * @pdev: The physical dev to program country code or regdomain + * @rd: User country code or regdomain + * + * Return: QDF_STATUS + */ +QDF_STATUS ucfg_reg_program_cc(struct wlan_objmgr_pdev *pdev, + struct cc_regdmn_s *rd); + +/** + * ucfg_reg_get_current_cc() - get current country code or regdomain + * @pdev: The physical dev to program country code or regdomain + * @rd: Pointer to country code or regdomain + * + * Return: QDF_STATUS + */ +QDF_STATUS ucfg_reg_get_current_cc(struct wlan_objmgr_pdev *pdev, + struct cc_regdmn_s *rd); + +/** + * ucfg_reg_set_config_vars () - Set the config vars in reg component + * @psoc: psoc ptr + * @config_vars: config variables structure + * + * Return: QDF_STATUS + */ +QDF_STATUS ucfg_reg_set_config_vars(struct wlan_objmgr_psoc *psoc, + struct reg_config_vars config_vars); + +/** + * ucfg_reg_get_current_chan_list () - get current channel list + * @pdev: pdev ptr + * @chan_list: channel list + * + * Return: QDF_STATUS + */ +QDF_STATUS ucfg_reg_get_current_chan_list(struct wlan_objmgr_pdev *pdev, + struct regulatory_channel *chan_list); + +/** + * ucfg_reg_modify_chan_144() - Enable/Disable channel 144 + * @pdev: pdev pointer + * @enable_chan_144: flag to disable/enable channel 144 + * + * Return: Success or Failure + */ +QDF_STATUS ucfg_reg_modify_chan_144(struct wlan_objmgr_pdev *pdev, + bool enable_ch_144); + +/** + * ucfg_reg_get_en_chan_144() - get en_chan_144 flag value + * @pdev: pdev pointer + * + * Return: en_chan_144 flag value + */ +bool ucfg_reg_get_en_chan_144(struct wlan_objmgr_pdev *pdev); + +/** + * ucfg_reg_is_regdb_offloaded () - is regulatory database offloaded + * @psoc: psoc ptr + * + * Return: bool + */ +bool ucfg_reg_is_regdb_offloaded(struct wlan_objmgr_psoc *psoc); + +/** + * ucfg_reg_program_mas_chan_list () - program master channel list + * @psoc: psoc ptr + * @reg_channels: regulatory channels + * @alpha2: country code + * @dfs_region: dfs region + * + * Return: void + */ +void ucfg_reg_program_mas_chan_list(struct wlan_objmgr_psoc *psoc, + struct regulatory_channel *reg_channels, + uint8_t *alpha2, + enum dfs_reg dfs_region); + +/** + * ucfg_reg_get_regd_rules() - provides the reg domain rules info pointer + * @pdev: pdev ptr + * @reg_rules: regulatory rules + * + * Return: QDF_STATUS + */ +QDF_STATUS ucfg_reg_get_regd_rules(struct wlan_objmgr_pdev *pdev, + struct reg_rule_info *reg_rules); + +/** + * ucfg_reg_register_chan_change_callback () - add chan change cbk + * @psoc: psoc ptr + * @cbk: callback + * @arg: argument + * + * Return: void + */ +void ucfg_reg_register_chan_change_callback(struct wlan_objmgr_psoc *psoc, + reg_chan_change_callback cbk, + void *arg); + +/** + * ucfg_reg_unregister_chan_change_callback () - remove chan change cbk + * @psoc: psoc ptr + * @cbk: callback + * + * Return: void + */ +void ucfg_reg_unregister_chan_change_callback(struct wlan_objmgr_psoc *psoc, + reg_chan_change_callback cbk); + +/** + * ucfg_reg_get_cc_and_src () - get country code and src + * @psoc: psoc ptr + * @alpha2: country code alpha2 + * + * Return: void + */ +enum country_src ucfg_reg_get_cc_and_src(struct wlan_objmgr_psoc *psoc, + uint8_t *alpha2); + +/** + * ucfg_reg_unit_simulate_ch_avoid () - fake a ch avoid event + * @psoc: psoc ptr + * @ch_avoid: ch_avoid_ind_type ranges + * + * This function inject a ch_avoid event for unit test sap chan switch. + * + * Return: void + */ +void ucfg_reg_unit_simulate_ch_avoid(struct wlan_objmgr_psoc *psoc, + struct ch_avoid_ind_type *ch_avoid); + +/** + * ucfg_reg_11d_vdev_delete_update() - update vdev delete to regulatory + * @vdev: vdev ptr + * + * Return: QDF_STATUS + */ +QDF_STATUS ucfg_reg_11d_vdev_delete_update(struct wlan_objmgr_vdev *vdev); + +/** + * ucfg_reg_11d_vdev_created_update() - update vdev create to regulatory + * @vdev: vdev ptr + * + * Return: QDF_STATUS + */ +QDF_STATUS ucfg_reg_11d_vdev_created_update(struct wlan_objmgr_vdev *vdev); + +/** + * ucfg_reg_get_hal_reg_cap() - return hal reg cap + * @psoc: psoc ptr + * + * Return: ptr to wlan_psoc_host_hal_reg_capabilities_ext + */ +struct wlan_psoc_host_hal_reg_capabilities_ext *ucfg_reg_get_hal_reg_cap( + struct wlan_objmgr_psoc *psoc); + +/** + * ucfg_reg_set_hal_reg_cap() - update hal reg cap + * @psoc: psoc ptr + * @reg_cap: Regulatory cap array + * @phy_cnt: Number of phy + * + * Return: QDF_STATUS + */ +QDF_STATUS ucfg_reg_set_hal_reg_cap(struct wlan_objmgr_psoc *psoc, + struct wlan_psoc_host_hal_reg_capabilities_ext *reg_cap, + uint16_t phy_cnt); +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/regulatory/dispatcher/src/wlan_reg_services_api.c b/drivers/staging/qca-wifi-host-cmn/umac/regulatory/dispatcher/src/wlan_reg_services_api.c new file mode 100644 index 0000000000000000000000000000000000000000..edbb781e5632ee74f18dabd86bb8a316a23f8e14 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/regulatory/dispatcher/src/wlan_reg_services_api.c @@ -0,0 +1,602 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + + /** + * @file wlan_reg_services_api.c + * @brief contains regulatory service functions + */ + + +#include +#include +#include +#include "../../core/src/reg_services.h" +#include "../../core/src/reg_priv.h" +#include "../../core/src/reg_db_parser.h" + +/** + * wlan_reg_get_channel_list_with_power() - Provide the channel list with power + * @ch_list: pointer to the channel list. + * + * Return: QDF_STATUS + */ +QDF_STATUS wlan_reg_get_channel_list_with_power(struct wlan_objmgr_pdev *pdev, + struct channel_power *ch_list, + uint8_t *num_chan) +{ + /* + * Update the channel list with channel information with power. + */ + return reg_get_channel_list_with_power(pdev, ch_list, num_chan); +} + +/** + * wlan_reg_read_default_country() - Read the default country for the regdomain + * @country: pointer to the country code. + * + * Return: None + */ +QDF_STATUS wlan_reg_read_default_country(struct wlan_objmgr_psoc *psoc, + uint8_t *country) +{ + /* + * Get the default country information + */ + return reg_read_default_country(psoc, country); +} + +QDF_STATUS wlan_reg_read_current_country(struct wlan_objmgr_psoc *psoc, + uint8_t *country) +{ + /* + * Get the current country information + */ + return reg_read_current_country(psoc, country); +} + +/** + * wlan_reg_get_channel_state() - Get channel state from regulatory + * @ch: channel number. + * + * Return: channel state + */ +enum channel_state wlan_reg_get_channel_state(struct wlan_objmgr_pdev *pdev, + uint32_t ch) +{ + /* + * Get channel state from regulatory + */ + return reg_get_channel_state(pdev, ch); +} + +bool +wlan_reg_chan_has_dfs_attribute(struct wlan_objmgr_pdev *pdev, uint32_t ch) +{ + return reg_chan_has_dfs_attribute(pdev, ch); +} + +/** + * wlan_reg_get_5g_bonded_channel_state() - Get 5G bonded channel state + * @ch: channel number. + * @bw: channel band width + * + * Return: channel state + */ +enum channel_state wlan_reg_get_5g_bonded_channel_state( + struct wlan_objmgr_pdev *pdev, uint8_t ch, + enum phy_ch_width bw) +{ + /* + * Get channel state from regulatory + */ + return reg_get_5g_bonded_channel_state(pdev, ch, bw); +} + +/** + * wlan_reg_get_2g_bonded_channel_state() - Get 2G bonded channel state + * @ch: channel number. + * @bw: channel band width + * + * Return: channel state + */ +enum channel_state wlan_reg_get_2g_bonded_channel_state( + struct wlan_objmgr_pdev *pdev, uint8_t ch, + uint8_t sec_ch, enum phy_ch_width bw) +{ + /* + * Get channel state from regulatory + */ + return reg_get_2g_bonded_channel_state(pdev, ch, sec_ch, bw); +} + +/** + * wlan_reg_set_channel_params() - Sets channel parameteres for given bandwidth + * @ch: channel number. + * @ch_params: pointer to the channel parameters. + * + * Return: None + */ +void wlan_reg_set_channel_params(struct wlan_objmgr_pdev *pdev, uint8_t ch, + uint8_t sec_ch_2g, + struct ch_params *ch_params) +{ + /* + * Set channel parameters like center frequency for a bonded channel + * state. Also return the maximum bandwidth supported by the channel. + */ + reg_set_channel_params(pdev, ch, sec_ch_2g, ch_params); +} + +/** + * wlan_reg_get_dfs_region () - Get the current dfs region + * @dfs_reg: pointer to dfs region + * + * Return: Status + */ +QDF_STATUS wlan_reg_get_dfs_region(struct wlan_objmgr_pdev *pdev, + enum dfs_reg *dfs_reg) +{ + /* + * Get the current dfs region + */ + reg_get_current_dfs_region(pdev, dfs_reg); + + return QDF_STATUS_SUCCESS; +} + +uint32_t wlan_reg_get_channel_reg_power(struct wlan_objmgr_pdev *pdev, + uint32_t chan_num) +{ + return reg_get_channel_reg_power(pdev, chan_num); +} + +/** + * wlan_reg_get_channel_freq() - get regulatory power for channel + * @chan_num: channel number + * + * Return: int + */ +uint32_t wlan_reg_get_channel_freq(struct wlan_objmgr_pdev *pdev, + uint32_t chan_num) +{ + return reg_get_channel_freq(pdev, chan_num); +} + +QDF_STATUS wlan_reg_get_current_chan_list(struct wlan_objmgr_pdev *pdev, + struct regulatory_channel *chan_list) +{ + return reg_get_current_chan_list(pdev, chan_list); +} + +/** + * wlan_reg_get_bw_value() - give bandwidth value + * bw: bandwidth enum + * + * Return: uint16_t + */ +uint16_t wlan_reg_get_bw_value(enum phy_ch_width bw) +{ + return reg_get_bw_value(bw); +} + +/** + * wlan_reg_get_bonded_channel_state() - Get 2G bonded channel state + * @ch: channel number. + * @bw: channel band width + * + * Return: channel state + */ +enum channel_state wlan_reg_get_bonded_channel_state( + struct wlan_objmgr_pdev *pdev, uint8_t ch, + enum phy_ch_width bw, uint8_t sec_ch) +{ + if (WLAN_REG_IS_24GHZ_CH(ch)) + return reg_get_2g_bonded_channel_state(pdev, ch, + sec_ch, bw); + else + return reg_get_5g_bonded_channel_state(pdev, ch, + bw); +} + +/** + * wlan_reg_set_dfs_region () - Get the current dfs region + * @dfs_reg: pointer to dfs region + * + * Return: None + */ +void wlan_reg_set_dfs_region(struct wlan_objmgr_pdev *pdev, + enum dfs_reg dfs_reg) +{ + reg_set_dfs_region(pdev, dfs_reg); +} + +QDF_STATUS wlan_reg_get_domain_from_country_code(v_REGDOMAIN_t *reg_domain_ptr, + const uint8_t *country_alpha2, enum country_src source) +{ + + return reg_get_domain_from_country_code(reg_domain_ptr, + country_alpha2, source); +} + + +uint16_t wlan_reg_dmn_get_opclass_from_channel(uint8_t *country, + uint8_t channel, + uint8_t offset) +{ + return reg_dmn_get_opclass_from_channel(country, channel, + offset); +} + +uint16_t wlan_reg_dmn_get_chanwidth_from_opclass(uint8_t *country, + uint8_t channel, + uint8_t opclass) +{ + return reg_dmn_get_chanwidth_from_opclass(country, channel, + opclass); +} + +uint16_t wlan_reg_dmn_set_curr_opclasses(uint8_t num_classes, + uint8_t *class) +{ + return reg_dmn_set_curr_opclasses(num_classes, class); +} + +uint16_t wlan_reg_dmn_get_curr_opclasses(uint8_t *num_classes, + uint8_t *class) +{ + return reg_dmn_get_curr_opclasses(num_classes, class); +} + +QDF_STATUS wlan_regulatory_init(void) +{ + QDF_STATUS status; + + status = wlan_objmgr_register_psoc_create_handler( + WLAN_UMAC_COMP_REGULATORY, + wlan_regulatory_psoc_obj_created_notification, NULL); + if (status != QDF_STATUS_SUCCESS) { + reg_err("failed to register reg psoc obj create handler"); + return status; + } + + status = wlan_objmgr_register_psoc_destroy_handler( + WLAN_UMAC_COMP_REGULATORY, + wlan_regulatory_psoc_obj_destroyed_notification, NULL); + if (status != QDF_STATUS_SUCCESS) { + reg_err("failed to register reg psoc obj create handler"); + goto unreg_psoc_create; + } + + status = wlan_objmgr_register_pdev_create_handler( + WLAN_UMAC_COMP_REGULATORY, + wlan_regulatory_pdev_obj_created_notification, NULL); + if (status != QDF_STATUS_SUCCESS) { + reg_err("failed to register reg psoc obj create handler"); + goto unreg_psoc_destroy; + } + + status = wlan_objmgr_register_pdev_destroy_handler( + WLAN_UMAC_COMP_REGULATORY, + wlan_regulatory_pdev_obj_destroyed_notification, NULL); + if (status != QDF_STATUS_SUCCESS) { + reg_err("failed to register reg psoc obj create handler"); + goto unreg_pdev_create; + } + + reg_debug("regulatory handlers registered with obj mgr"); + + return status; + +unreg_pdev_create: + status = wlan_objmgr_unregister_pdev_create_handler( + WLAN_UMAC_COMP_REGULATORY, + wlan_regulatory_pdev_obj_created_notification, + NULL); + +unreg_psoc_destroy: + status = wlan_objmgr_unregister_psoc_destroy_handler( + WLAN_UMAC_COMP_REGULATORY, + wlan_regulatory_psoc_obj_destroyed_notification, + NULL); + +unreg_psoc_create: + status = wlan_objmgr_unregister_psoc_create_handler( + WLAN_UMAC_COMP_REGULATORY, + wlan_regulatory_psoc_obj_created_notification, + NULL); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wlan_regulatory_deinit(void) +{ + QDF_STATUS status, ret_status = QDF_STATUS_SUCCESS; + + status = wlan_objmgr_unregister_pdev_destroy_handler( + WLAN_UMAC_COMP_REGULATORY, + wlan_regulatory_pdev_obj_destroyed_notification, NULL); + if (status != QDF_STATUS_SUCCESS) { + reg_err("failed to unregister reg pdev obj destroy handler"); + ret_status = status; + } + + status = wlan_objmgr_unregister_pdev_create_handler( + WLAN_UMAC_COMP_REGULATORY, + wlan_regulatory_pdev_obj_created_notification, NULL); + if (status != QDF_STATUS_SUCCESS) { + reg_err("failed to unregister reg pdev obj create handler"); + ret_status = status; + } + + status = wlan_objmgr_unregister_psoc_destroy_handler( + WLAN_UMAC_COMP_REGULATORY, + wlan_regulatory_psoc_obj_destroyed_notification, NULL); + if (status != QDF_STATUS_SUCCESS) { + reg_err("failed to unregister reg psoc obj destroy handler"); + ret_status = status; + } + + status = wlan_objmgr_unregister_psoc_create_handler( + WLAN_UMAC_COMP_REGULATORY, + wlan_regulatory_psoc_obj_created_notification, NULL); + if (status != QDF_STATUS_SUCCESS) { + reg_err("failed to unregister reg psoc obj create handler"); + ret_status = status; + } + + reg_debug("deregistered callbacks with obj mgr"); + + return ret_status; +} + +QDF_STATUS regulatory_psoc_open(struct wlan_objmgr_psoc *psoc) +{ + struct wlan_lmac_if_reg_tx_ops *tx_ops; + + tx_ops = reg_get_psoc_tx_ops(psoc); + if (tx_ops->register_master_handler) + tx_ops->register_master_handler(psoc, NULL); + if (tx_ops->register_11d_new_cc_handler) + tx_ops->register_11d_new_cc_handler(psoc, NULL); + if (tx_ops->register_ch_avoid_event_handler) + tx_ops->register_ch_avoid_event_handler(psoc, NULL); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS regulatory_psoc_close(struct wlan_objmgr_psoc *psoc) +{ + struct wlan_lmac_if_reg_tx_ops *tx_ops; + + tx_ops = reg_get_psoc_tx_ops(psoc); + if (tx_ops->unregister_11d_new_cc_handler) + tx_ops->unregister_11d_new_cc_handler(psoc, NULL); + if (tx_ops->unregister_master_handler) + tx_ops->unregister_master_handler(psoc, NULL); + if (tx_ops->unregister_ch_avoid_event_handler) + tx_ops->unregister_ch_avoid_event_handler(psoc, NULL); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS regulatory_pdev_open(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_objmgr_psoc *parent_psoc; + QDF_STATUS status; + + parent_psoc = wlan_pdev_get_psoc(pdev); + + status = reg_send_scheduler_msg_sb(parent_psoc, pdev); + + if (QDF_IS_STATUS_ERROR(status)) + reg_err("scheduler send msg failed"); + + return status; +} + +QDF_STATUS regulatory_pdev_close(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_objmgr_psoc *psoc; + struct wlan_regulatory_psoc_priv_obj *soc_reg; + + psoc = wlan_pdev_get_psoc(pdev); + soc_reg = reg_get_psoc_obj(psoc); + if (!soc_reg) { + reg_err("reg psoc private obj is NULL"); + return QDF_STATUS_E_FAULT; + } + + reg_reset_ctry_pending_hints(soc_reg); + + return QDF_STATUS_SUCCESS; +} + +void wlan_reg_update_nol_ch(struct wlan_objmgr_pdev *pdev, uint8_t *ch_list, + uint8_t num_ch, bool nol_ch) +{ + reg_update_nol_ch(pdev, ch_list, num_ch, nol_ch); +} + +bool wlan_reg_is_dfs_ch(struct wlan_objmgr_pdev *pdev, + uint32_t chan) +{ + return reg_is_dfs_ch(pdev, chan); +} + +bool wlan_reg_is_passive_or_disable_ch(struct wlan_objmgr_pdev *pdev, + uint32_t chan) +{ + return reg_is_passive_or_disable_ch(pdev, chan); +} + +bool wlan_reg_is_disable_ch(struct wlan_objmgr_pdev *pdev, + uint32_t chan) +{ + return reg_is_disable_ch(pdev, chan); +} + +uint32_t wlan_reg_freq_to_chan(struct wlan_objmgr_pdev *pdev, + uint32_t freq) +{ + return reg_freq_to_chan(pdev, freq); +} + +uint32_t wlan_reg_chan_to_freq(struct wlan_objmgr_pdev *pdev, + uint32_t chan_num) +{ + return reg_chan_to_freq(pdev, chan_num); +} + +bool wlan_reg_chan_is_49ghz(struct wlan_objmgr_pdev *pdev, + uint8_t chan_num) +{ + return reg_chan_is_49ghz(pdev, chan_num); +} + +QDF_STATUS wlan_reg_set_country(struct wlan_objmgr_pdev *pdev, + uint8_t *country) +{ + return reg_set_country(pdev, country); +} + +QDF_STATUS wlan_reg_set_11d_country(struct wlan_objmgr_pdev *pdev, + uint8_t *country) +{ + return reg_set_11d_country(pdev, country); +} + +bool wlan_reg_is_world(uint8_t *country) +{ + return reg_is_world_alpha2(country); +} + +bool wlan_reg_is_us(uint8_t *country) +{ + return reg_is_us_alpha2(country); +} + +void wlan_reg_register_chan_change_callback(struct wlan_objmgr_psoc *psoc, + reg_chan_change_callback cbk, + void *arg) +{ + reg_register_chan_change_callback(psoc, cbk, arg); + +} + +void wlan_reg_unregister_chan_change_callback(struct wlan_objmgr_psoc *psoc, + reg_chan_change_callback cbk) +{ + reg_unregister_chan_change_callback(psoc, cbk); +} + +bool wlan_reg_11d_original_enabled_on_host(struct wlan_objmgr_psoc *psoc) +{ + return reg_11d_original_enabled_on_host(psoc); +} + +bool wlan_reg_11d_enabled_on_host(struct wlan_objmgr_psoc *psoc) +{ + return reg_11d_enabled_on_host(psoc); +} + +bool wlan_reg_is_dsrc_chan(struct wlan_objmgr_pdev *pdev, uint8_t chan_num) +{ + return reg_is_dsrc_chan(pdev, chan_num); +} + +bool wlan_reg_is_etsi13_srd_chan(struct wlan_objmgr_pdev *pdev, + uint8_t chan_num) +{ + return reg_is_etsi13_srd_chan(pdev, chan_num); +} + +bool wlan_reg_is_etsi13_regdmn(struct wlan_objmgr_pdev *pdev) +{ + return reg_is_etsi13_regdmn(pdev); +} + +bool wlan_reg_is_etsi13_srd_chan_allowed_master_mode(struct wlan_objmgr_pdev + *pdev) +{ + return reg_is_etsi13_srd_chan_allowed_master_mode(pdev); +} + +bool wlan_reg_get_fcc_constraint(struct wlan_objmgr_pdev *pdev, uint32_t freq) +{ + return reg_get_fcc_constraint(pdev, freq); +} + +QDF_STATUS wlan_reg_get_chip_mode(struct wlan_objmgr_pdev *pdev, + uint32_t *chip_mode) +{ + struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj; + + pdev_priv_obj = wlan_objmgr_pdev_get_comp_private_obj(pdev, + WLAN_UMAC_COMP_REGULATORY); + + if (NULL == pdev_priv_obj) { + reg_err("reg pdev private obj is NULL"); + return QDF_STATUS_E_FAULT; + } + + *chip_mode = pdev_priv_obj->wireless_modes; + + return QDF_STATUS_SUCCESS; +} + +bool wlan_reg_is_11d_scan_inprogress(struct wlan_objmgr_psoc *psoc) +{ + return reg_is_11d_scan_inprogress(psoc); +} + +QDF_STATUS wlan_reg_get_freq_range(struct wlan_objmgr_pdev *pdev, + uint32_t *low_2g, + uint32_t *high_2g, + uint32_t *low_5g, + uint32_t *high_5g) +{ + struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj; + + pdev_priv_obj = wlan_objmgr_pdev_get_comp_private_obj(pdev, + WLAN_UMAC_COMP_REGULATORY); + + if (NULL == pdev_priv_obj) { + reg_err("reg pdev private obj is NULL"); + return QDF_STATUS_E_FAULT; + } + + *low_2g = pdev_priv_obj->range_2g_low; + *high_2g = pdev_priv_obj->range_2g_high; + *low_5g = pdev_priv_obj->range_5g_low; + *high_5g = pdev_priv_obj->range_5g_high; + + return QDF_STATUS_SUCCESS; +} + +struct wlan_lmac_if_reg_tx_ops * +wlan_reg_get_tx_ops(struct wlan_objmgr_psoc *psoc) +{ + return reg_get_psoc_tx_ops(psoc); +} + +QDF_STATUS wlan_reg_get_curr_regdomain(struct wlan_objmgr_pdev *pdev, + struct cur_regdmn_info *cur_regdmn) +{ + return reg_get_curr_regdomain(pdev, cur_regdmn); +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/regulatory/dispatcher/src/wlan_reg_tgt_api.c b/drivers/staging/qca-wifi-host-cmn/umac/regulatory/dispatcher/src/wlan_reg_tgt_api.c new file mode 100644 index 0000000000000000000000000000000000000000..0c03d4af3b269b265fd825c62643a8efbb5aa4b7 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/regulatory/dispatcher/src/wlan_reg_tgt_api.c @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2017 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + + /** + * @file wlan_req_tgt_api.c + * @brief contains regulatory target interface definations + */ + +#include +#include +#include +#include "../../core/src/reg_services.h" + +/** + * tgt_process_master_chan_list() - process master channel list + * @reg_info: regulatory info + * + * Return: QDF_STATUS + */ +QDF_STATUS tgt_reg_process_master_chan_list(struct cur_regulatory_info + *reg_info) +{ + return reg_process_master_chan_list(reg_info); +} + +QDF_STATUS tgt_reg_process_11d_new_country(struct wlan_objmgr_psoc *psoc, + struct reg_11d_new_country *reg_11d_new_cc) +{ + return reg_save_new_11d_country(psoc, reg_11d_new_cc->alpha2); +} + +QDF_STATUS tgt_reg_set_regdb_offloaded(struct wlan_objmgr_psoc *psoc, + bool val) +{ + return reg_set_regdb_offloaded(psoc, val); +} + +QDF_STATUS tgt_reg_set_11d_offloaded(struct wlan_objmgr_psoc *psoc, + bool val) +{ + return reg_set_11d_offloaded(psoc, val); +} + +QDF_STATUS tgt_reg_process_ch_avoid_event(struct wlan_objmgr_psoc *psoc, + struct ch_avoid_ind_type *ch_avoid_evnt) +{ + return reg_process_ch_avoid_event(psoc, ch_avoid_evnt); +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/regulatory/dispatcher/src/wlan_reg_ucfg_api.c b/drivers/staging/qca-wifi-host-cmn/umac/regulatory/dispatcher/src/wlan_reg_ucfg_api.c new file mode 100644 index 0000000000000000000000000000000000000000..0663f04253dea75b6c4cd052b579c38dbf341544 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/regulatory/dispatcher/src/wlan_reg_ucfg_api.c @@ -0,0 +1,298 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + + /** + * @file wlan_req_ucfg_api.c + * @brief contains regulatory user config interface definations + */ + +#include +#include "../../core/src/reg_services.h" +#include + +QDF_STATUS ucfg_reg_register_event_handler(uint8_t vdev_id, reg_event_cb cb, + void *arg) +{ + /* Register a event cb handler */ + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS ucfg_reg_unregister_event_handler(uint8_t vdev_id, reg_event_cb cb, + void *arg) +{ + /* unregister a event cb handler */ + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS ucfg_reg_init_handler(uint8_t pdev_id) +{ + /* regulatory initialization handler */ + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS ucfg_reg_get_current_chan_list(struct wlan_objmgr_pdev *pdev, + struct regulatory_channel *chan_list) +{ + return reg_get_current_chan_list(pdev, chan_list); +} + +QDF_STATUS ucfg_reg_modify_chan_144(struct wlan_objmgr_pdev *pdev, + bool enable_ch_144) +{ + return reg_modify_chan_144(pdev, enable_ch_144); +} + +bool ucfg_reg_get_en_chan_144(struct wlan_objmgr_pdev *pdev) +{ + return reg_get_en_chan_144(pdev); +} + +QDF_STATUS ucfg_reg_set_config_vars(struct wlan_objmgr_psoc *psoc, + struct reg_config_vars config_vars) +{ + return reg_set_config_vars(psoc, config_vars); +} + +bool ucfg_reg_is_regdb_offloaded(struct wlan_objmgr_psoc *psoc) +{ + return reg_is_regdb_offloaded(psoc); +} + +void ucfg_reg_program_mas_chan_list(struct wlan_objmgr_psoc *psoc, + struct regulatory_channel *reg_channels, + uint8_t *alpha2, + enum dfs_reg dfs_region) +{ + reg_program_mas_chan_list(psoc, reg_channels, alpha2, dfs_region); +} + +QDF_STATUS ucfg_reg_get_regd_rules(struct wlan_objmgr_pdev *pdev, + struct reg_rule_info *reg_rules) +{ + return reg_get_regd_rules(pdev, reg_rules); +} + +QDF_STATUS ucfg_reg_program_default_cc(struct wlan_objmgr_pdev *pdev, + uint16_t regdmn) +{ + return reg_program_default_cc(pdev, regdmn); +} + +QDF_STATUS ucfg_reg_program_cc(struct wlan_objmgr_pdev *pdev, + struct cc_regdmn_s *rd) +{ + return reg_program_chan_list(pdev, rd); +} + +QDF_STATUS ucfg_reg_get_current_cc(struct wlan_objmgr_pdev *pdev, + struct cc_regdmn_s *rd) +{ + return reg_get_current_cc(pdev, rd); +} + +/** + * ucfg_reg_set_band() - Sets the band information for the PDEV + * @pdev: The physical pdev to set the band for + * @band: The set band parameter to configure for the pysical device + * + * Return: QDF_STATUS + */ +QDF_STATUS ucfg_reg_set_band(struct wlan_objmgr_pdev *pdev, + enum band_info band) +{ + return reg_set_band(pdev, band); +} + +/** + * ucfg_reg_notify_sap_event() - Notify regulatory domain for sap event + * @pdev: The physical dev to set the band for + * @sap_state: true for sap start else false + * + * Return: QDF_STATUS + */ +QDF_STATUS ucfg_reg_notify_sap_event(struct wlan_objmgr_pdev *pdev, + bool sap_state) +{ + return reg_notify_sap_event(pdev, sap_state); +} + +#ifdef DISABLE_CHANNEL_LIST +/** + * ucfg_reg_cache_channel_state() - Cache the current state of the channles + * @pdev: The physical dev to cache the channels for + * @channel_list: List of the channels for which states needs to be cached + * @num_channels: Number of channels in the list + * + */ +void ucfg_reg_cache_channel_state(struct wlan_objmgr_pdev *pdev, + uint32_t *channel_list, uint32_t num_channels) +{ + reg_cache_channel_state(pdev, channel_list, num_channels); +} + +/** + * ucfg_reg_restore_cached_channels() - Cache the current state of the channles + * @pdev: The physical dev to cache the channels for + */ +void ucfg_reg_restore_cached_channels(struct wlan_objmgr_pdev *pdev) +{ + reg_restore_cached_channels(pdev); +} +#endif + +/** + * ucfg_reg_set_fcc_constraint() - apply fcc constraints on channels 12/13 + * @pdev: The physical pdev to reduce tx power for + * + * This function adjusts the transmit power on channels 12 and 13, to comply + * with FCC regulations in the USA. + * + * Return: QDF_STATUS + */ +QDF_STATUS ucfg_reg_set_fcc_constraint(struct wlan_objmgr_pdev *pdev, + bool fcc_constraint) +{ + return reg_set_fcc_constraint(pdev, fcc_constraint); +} + + +/** + * ucfg_reg_get_default_country() - Get the default regulatory country + * @psoc: The physical SoC to get default country from + * @country_code: the buffer to populate the country code into + * + * Return: QDF_STATUS + */ +QDF_STATUS ucfg_reg_get_default_country(struct wlan_objmgr_psoc *psoc, + uint8_t *country_code) +{ + return reg_read_default_country(psoc, country_code); +} + +QDF_STATUS ucfg_reg_get_current_country(struct wlan_objmgr_psoc *psoc, + uint8_t *country_code) +{ + return reg_read_current_country(psoc, country_code); +} +/** + * ucfg_reg_set_default_country() - Set the default regulatory country + * @psoc: The physical SoC to set default country for + * @country: The country information to configure + * + * Return: QDF_STATUS + */ +QDF_STATUS ucfg_reg_set_default_country(struct wlan_objmgr_psoc *psoc, + uint8_t *country) +{ + return reg_set_default_country(psoc, country); +} + +/** + * ucfg_reg_set_country() - Set the current regulatory country + * @pdev: The physical dev to set current country for + * @country: The country information to configure + * + * Return: QDF_STATUS + */ +QDF_STATUS ucfg_reg_set_country(struct wlan_objmgr_pdev *pdev, + uint8_t *country) +{ + return reg_set_country(pdev, country); +} + +/** + * ucfg_reg_reset_country() - Reset the regulatory country to default + * @psoc: The physical SoC to reset country for + * + * Return: QDF_STATUS + */ +QDF_STATUS ucfg_reg_reset_country(struct wlan_objmgr_psoc *psoc) +{ + return reg_reset_country(psoc); +} + +/** + * ucfg_reg_enable_dfs_channels() - Enable the use of DFS channels + * @pdev: The physical dev to enable DFS channels for + * + * Return: QDF_STATUS + */ +QDF_STATUS ucfg_reg_enable_dfs_channels(struct wlan_objmgr_pdev *pdev, + bool dfs_enable) +{ + return reg_enable_dfs_channels(pdev, dfs_enable); +} + +QDF_STATUS ucfg_reg_get_curr_band(struct wlan_objmgr_pdev *pdev, + enum band_info *band) +{ + return reg_get_curr_band(pdev, band); + +} + +void ucfg_reg_register_chan_change_callback(struct wlan_objmgr_psoc *psoc, + reg_chan_change_callback cbk, + void *arg) +{ + reg_register_chan_change_callback(psoc, cbk, arg); +} + +void ucfg_reg_unregister_chan_change_callback(struct wlan_objmgr_psoc *psoc, + reg_chan_change_callback cbk) +{ + reg_unregister_chan_change_callback(psoc, cbk); +} + +enum country_src ucfg_reg_get_cc_and_src(struct wlan_objmgr_psoc *psoc, + uint8_t *alpha2) +{ + return reg_get_cc_and_src(psoc, alpha2); +} + +void ucfg_reg_unit_simulate_ch_avoid(struct wlan_objmgr_psoc *psoc, + struct ch_avoid_ind_type *ch_avoid) +{ + reg_process_ch_avoid_event(psoc, ch_avoid); +} + +QDF_STATUS ucfg_reg_11d_vdev_delete_update(struct wlan_objmgr_vdev *vdev) +{ + return reg_11d_vdev_delete_update(vdev); +} + +QDF_STATUS ucfg_reg_11d_vdev_created_update(struct wlan_objmgr_vdev *vdev) +{ + return reg_11d_vdev_created_update(vdev); +} + +struct wlan_psoc_host_hal_reg_capabilities_ext *ucfg_reg_get_hal_reg_cap( + struct wlan_objmgr_psoc *psoc) +{ + return reg_get_hal_reg_cap(psoc); +} +qdf_export_symbol(ucfg_reg_get_hal_reg_cap); + +QDF_STATUS ucfg_reg_set_hal_reg_cap(struct wlan_objmgr_psoc *psoc, + struct wlan_psoc_host_hal_reg_capabilities_ext *hal_reg_cap, + uint16_t phy_cnt) + +{ + return reg_set_hal_reg_cap(psoc, hal_reg_cap, phy_cnt); +} +qdf_export_symbol(ucfg_reg_set_hal_reg_cap); diff --git a/drivers/staging/qca-wifi-host-cmn/umac/scan/core/src/wlan_scan_11d.c b/drivers/staging/qca-wifi-host-cmn/umac/scan/core/src/wlan_scan_11d.c new file mode 100644 index 0000000000000000000000000000000000000000..dd1f48cb1cc68c54fa9b3b30b1261fdf3f41dae5 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/scan/core/src/wlan_scan_11d.c @@ -0,0 +1,352 @@ +/* + * Copyright (c) 2011-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/* + * DOC: contains scan 11d api and functionality + */ +#include +#include +#include +#include +#include +#include +#include "wlan_scan_main.h" +#include "wlan_scan_11d.h" +#include "wlan_reg_services_api.h" +#include "wlan_reg_ucfg_api.h" + +/** + * wlan_pdevid_get_cc_db() - private API to get cc db from pdev id + * @psoc: psoc object + * @pdev_id: pdev id + * + * Return: cc db for the pdev id + */ +static struct scan_country_code_db * +wlan_pdevid_get_cc_db(struct wlan_objmgr_psoc *psoc, uint8_t pdev_id) +{ + struct wlan_scan_obj *scan_obj; + + if (pdev_id > WLAN_UMAC_MAX_PDEVS) { + scm_err("invalid pdev_id %d", pdev_id); + return NULL; + } + + scan_obj = wlan_psoc_get_scan_obj(psoc); + if (!scan_obj) + return NULL; + + return &scan_obj->cc_db[pdev_id]; +} + +/** + * wlan_pdev_get_cc_db() - private API to get cc db from pdev + * @psoc: psoc object + * @pdev: Pdev object + * + * Return: cc db for the pdev + */ +static struct scan_country_code_db * +wlan_pdev_get_cc_db(struct wlan_objmgr_psoc *psoc, + struct wlan_objmgr_pdev *pdev) +{ + uint8_t pdev_id; + + if (!pdev) { + scm_err("pdev is NULL"); + return NULL; + } + pdev_id = wlan_objmgr_pdev_get_pdev_id(pdev); + + return wlan_pdevid_get_cc_db(psoc, pdev_id); +} + +/** + * scm_11d_elected_country_algo_fcc - private api to get cc per fcc algo + * @cc_db: scan country code db + * + * Return: true or false + */ +static bool +scm_11d_elected_country_algo_fcc(struct scan_country_code_db *cc_db) +{ + uint8_t i; + uint8_t country_idx; + uint16_t max_votes; + bool found = false; + + if (!cc_db->num_country_codes) { + scm_err("No AP with 11d Country code is present in scan list"); + return false; + } + + max_votes = cc_db->votes[0].votes; + if (wlan_reg_is_us(cc_db->votes[0].cc)) { + found = true; + country_idx = 0; + goto algo_done; + } else if (max_votes >= MIN_11D_AP_COUNT) { + found = true; + country_idx = 0; + } + + for (i = 1; i < cc_db->num_country_codes; i++) { + if (wlan_reg_is_us(cc_db->votes[i].cc)) { + found = true; + country_idx = i; + goto algo_done; + } + + if ((max_votes < cc_db->votes[i].votes) && + (cc_db->votes[i].votes >= MIN_11D_AP_COUNT)) { + scm_debug("Votes for Country %c%c : %d", + cc_db->votes[i].cc[0], + cc_db->votes[i].cc[1], + cc_db->votes[i].votes); + max_votes = cc_db->votes[i].votes; + country_idx = i; + found = true; + } + } + +algo_done: + if (found) { + qdf_mem_copy(cc_db->elected_cc, + cc_db->votes[country_idx].cc, + REG_ALPHA2_LEN + 1); + + scm_debug("Selected Country is %c%c With count %d", + cc_db->votes[country_idx].cc[0], + cc_db->votes[country_idx].cc[1], + cc_db->votes[country_idx].votes); + } + + return found; +} + +/** + * scm_11d_elected_country_info - private api to get cc + * @cc_db: scan country code db + * + * Return: true or false + */ +static bool +scm_11d_elected_country_info(struct scan_country_code_db *cc_db) +{ + uint8_t i, j = 0; + uint8_t max_votes; + + if (!cc_db->num_country_codes) { + scm_err("No AP with 11d Country code is present in scan list"); + return false; + } + + max_votes = cc_db->votes[0].votes; + + for (i = 1; i < cc_db->num_country_codes; i++) { + /* + * If we have a tie for max votes for 2 different country codes, + * pick random. + */ + if (max_votes < cc_db->votes[i].votes) { + scm_debug("Votes for Country %c%c : %d", + cc_db->votes[i].cc[0], + cc_db->votes[i].cc[1], + cc_db->votes[i].votes); + + max_votes = cc_db->votes[i].votes; + j = i; + } + } + + qdf_mem_copy(cc_db->elected_cc, cc_db->votes[j].cc, + REG_ALPHA2_LEN + 1); + + scm_debug("Selected Country is %c%c With count %d", + cc_db->votes[j].cc[0], + cc_db->votes[j].cc[1], + cc_db->votes[j].votes); + + return true; +} + +/** + * scm_11d_set_country_code - private api to set cc per 11d learning + * @pdev: pdev object + * @elected_cc: elected country code + * @current_cc: current country code + * + * Return: true or false + */ +static bool +scm_11d_set_country_code(struct wlan_objmgr_pdev *pdev, + uint8_t *elected_cc, uint8_t *current_cc) +{ + scm_debug("elected country %c%c, current country %c%c", + elected_cc[0], elected_cc[1], current_cc[0], current_cc[1]); + + if (!qdf_mem_cmp(elected_cc, current_cc, REG_ALPHA2_LEN + 1)) + return true; + + wlan_reg_set_11d_country(pdev, elected_cc); + return true; +} + +/** + * scm_11d_reset_cc_db - reset the country code db + * @cc_db: the pointer of country code db + * + * Return: void + */ +static void scm_11d_reset_cc_db(struct scan_country_code_db *cc_db) +{ + qdf_mem_zero(cc_db->votes, sizeof(cc_db->votes)); + qdf_mem_zero(cc_db->elected_cc, sizeof(cc_db->elected_cc)); + cc_db->num_country_codes = 0; +} + +QDF_STATUS scm_11d_cc_db_init(struct wlan_objmgr_psoc *psoc) +{ + struct scan_country_code_db *cc_db; + struct wlan_scan_obj *scan_obj; + + if (!psoc) { + scm_err("psoc is NULL"); + return QDF_STATUS_E_INVAL; + } + + scan_obj = wlan_psoc_get_scan_obj(psoc); + if (!scan_obj) { + scm_err("scan_obj is NULL"); + return QDF_STATUS_E_INVAL; + } + + cc_db = (struct scan_country_code_db *)qdf_mem_malloc_atomic( + sizeof(struct scan_country_code_db) * WLAN_UMAC_MAX_PDEVS); + if (!cc_db) { + scm_err("alloc country code db error"); + return QDF_STATUS_E_INVAL; + } + + qdf_mem_zero(cc_db, + sizeof(struct scan_country_code_db) * + WLAN_UMAC_MAX_PDEVS); + + scan_obj->cc_db = cc_db; + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS scm_11d_cc_db_deinit(struct wlan_objmgr_psoc *psoc) +{ + struct wlan_scan_obj *scan_obj; + + if (!psoc) { + scm_err("psoc is NULL"); + return QDF_STATUS_E_INVAL; + } + + scan_obj = wlan_psoc_get_scan_obj(psoc); + if (!scan_obj) { + scm_err("scan_obj is NULL"); + return QDF_STATUS_E_INVAL; + } + + qdf_mem_free(scan_obj->cc_db); + return QDF_STATUS_SUCCESS; +} + +void scm_11d_handle_country_info(struct wlan_objmgr_psoc *psoc, + struct wlan_objmgr_pdev *pdev, + struct scan_cache_entry *scan_entry) +{ + uint8_t i; + bool match = false; + uint8_t num_country_codes; + struct scan_country_code_db *cc_db; + struct wlan_country_ie *cc_ie; + + cc_ie = util_scan_entry_country(scan_entry); + if (!cc_ie) + return; + + cc_db = wlan_pdev_get_cc_db(psoc, pdev); + if (!cc_db) + return; + + /* just to be sure, convert to UPPER case here */ + for (i = 0; i < 3; i++) + cc_ie->cc[i] = qdf_toupper(cc_ie->cc[i]); + + num_country_codes = cc_db->num_country_codes; + for (i = 0; i < num_country_codes; i++) { + match = !qdf_mem_cmp(cc_db->votes[i].cc, cc_ie->cc, + REG_ALPHA2_LEN); + if (match) + break; + } + + if (match) { + cc_db->votes[i].votes++; + return; + } + + if (num_country_codes >= SCAN_MAX_NUM_COUNTRY_CODE) { + scm_debug("country code db already full: %d", + num_country_codes); + return; + } + + /* add country code to end of the list */ + qdf_mem_copy(cc_db->votes[num_country_codes].cc, cc_ie->cc, + REG_ALPHA2_LEN + 1); + cc_db->votes[num_country_codes].votes = 1; + cc_db->num_country_codes++; +} + +void scm_11d_decide_country_code(struct wlan_objmgr_vdev *vdev) +{ + uint8_t current_cc[REG_ALPHA2_LEN + 1]; + bool found; + struct scan_country_code_db *cc_db; + struct wlan_objmgr_pdev *pdev = wlan_vdev_get_pdev(vdev); + struct wlan_objmgr_psoc *psoc = wlan_pdev_get_psoc(pdev); + + if (!wlan_reg_11d_enabled_on_host(psoc)) + return; + + if (SOURCE_UNKNOWN == ucfg_reg_get_cc_and_src(psoc, current_cc)) { + scm_err("fail to get current country code"); + return; + } + + cc_db = wlan_pdev_get_cc_db(psoc, pdev); + if (!cc_db) { + scm_err("scan_db is NULL"); + return; + } + + if (wlan_reg_is_us(current_cc) || wlan_reg_is_world(current_cc)) + found = scm_11d_elected_country_algo_fcc(cc_db); + else + found = scm_11d_elected_country_info(cc_db); + + if (found) + scm_11d_set_country_code(pdev, cc_db->elected_cc, + current_cc); + scm_11d_reset_cc_db(cc_db); +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/scan/core/src/wlan_scan_11d.h b/drivers/staging/qca-wifi-host-cmn/umac/scan/core/src/wlan_scan_11d.h new file mode 100644 index 0000000000000000000000000000000000000000..9ec31b7fc068bd7c73c2608b7b19aba9f82e142b --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/scan/core/src/wlan_scan_11d.h @@ -0,0 +1,95 @@ +/* + * Copyright (c) 2011-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/* + * DOC: contains scan 11d entry api + */ + +#ifndef _WLAN_SCAN_11D_H_ +#define _WLAN_SCAN_11D_H_ + +#define SCAN_MAX_NUM_COUNTRY_CODE 100 +#define MIN_11D_AP_COUNT 3 + +/** + * struct scan_country_code_votes - votes to country code mapping structure + * @votes: votes + * @cc: country code + */ +struct scan_country_code_votes { + uint16_t votes; + uint8_t cc[REG_ALPHA2_LEN + 1]; +}; + +/** + * struct scan_country_code_db - country code data base definition + * @elected_cc: elected country code + * @num_country_codes: number of country codes encountered + * @votes: votes to country code mapping array + */ +struct scan_country_code_db { + uint8_t elected_cc[REG_ALPHA2_LEN + 1]; + uint8_t num_country_codes; + struct scan_country_code_votes votes[SCAN_MAX_NUM_COUNTRY_CODE]; +}; + +/** + * scm_11d_cc_db_init() - API to init 11d country code db + * @psoc: psoc object + * + * Initialize the country code database. + * + * Return: QDF_STATUS + */ +QDF_STATUS scm_11d_cc_db_init(struct wlan_objmgr_psoc *psoc); + +/** + * scm_11d_cc_db_deinit() - API to deinit 11d country code db + * @psoc: psoc object + * + * free the country code database. + * + * Return: QDF_STATUS + */ +QDF_STATUS scm_11d_cc_db_deinit(struct wlan_objmgr_psoc *psoc); + +/** + * scm_11d_handle_country_info() - API to handle 11d country info + * @psoc: psoc object + * @pdev: pdev object + * @scan_entry: the pointer to scan entry + * + * Update the country code database per the country code from country IE. + * + * Return: void + */ +void scm_11d_handle_country_info(struct wlan_objmgr_psoc *psoc, + struct wlan_objmgr_pdev *pdev, + struct scan_cache_entry *scan_entry); + +/** + * scm_11d_decide_country_code() - API to decide the country code per 11d + * @vdev: vdev object + * + * Decide which country will be elected from the country database. If one + * cadidate country is found, then it set the country code. + * + * Return: void + */ +void scm_11d_decide_country_code(struct wlan_objmgr_vdev *vdev); +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/scan/core/src/wlan_scan_bss_score.c b/drivers/staging/qca-wifi-host-cmn/umac/scan/core/src/wlan_scan_bss_score.c new file mode 100644 index 0000000000000000000000000000000000000000..6d7be6112791efa315d9e3d798203f7099e97dc0 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/scan/core/src/wlan_scan_bss_score.c @@ -0,0 +1,842 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ +/* + * DOC: contains scan bss scoring logic + */ + +#include +#include "wlan_scan_main.h" +#include "wlan_scan_cache_db_i.h" +#ifdef WLAN_POLICY_MGR_ENABLE +#include "wlan_policy_mgr_api.h" +#endif +#include "wlan_reg_services_api.h" + +#define SCM_20MHZ_BW_INDEX 0 +#define SCM_40MHZ_BW_INDEX 1 +#define SCM_80MHZ_BW_INDEX 2 +#define SCM_160MHZ_BW_INDEX 3 +#define SCM_MAX_BW_INDEX 4 + +#define SCM_NSS_1x1_INDEX 0 +#define SCM_NSS_2x2_INDEX 1 +#define SCM_NSS_3x3_INDEX 2 +#define SCM_NSS_4x4_INDEX 3 +#define SCM_MAX_NSS_INDEX 4 + +#define SCM_BAND_2G_INDEX 0 +#define SCM_BAND_5G_INDEX 1 +/* 2 and 3 are reserved */ +#define SCM_MAX_BAND_INDEX 4 + +#define SCM_SCORE_INDEX_0 0 +#define SCM_SCORE_INDEX_3 3 +#define SCM_SCORE_INDEX_7 7 +#define SCM_SCORE_OFFSET_INDEX_7_4 4 +#define SCM_SCORE_INDEX_11 11 +#define SCM_SCORE_OFFSET_INDEX_11_8 8 +#define SCM_SCORE_MAX_INDEX 15 +#define SCM_SCORE_OFFSET_INDEX_15_12 12 + +#define SCM_MAX_OCE_WAN_DL_CAP 16 + +#define SCM_MAX_CHANNEL_WEIGHT 100 +#define SCM_MAX_CHANNEL_UTILIZATION 100 +#define SCM_MAX_ESTIMATED_AIR_TIME_FRACTION 255 +#define MAX_AP_LOAD 255 + +#define SCM_MAX_WEIGHT_OF_PCL_CHANNELS 255 +#define SCM_PCL_GROUPS_WEIGHT_DIFFERENCE 20 + +bool scm_is_better_bss(struct scan_default_params *params, + struct scan_cache_entry *bss1, + struct scan_cache_entry *bss2) +{ + if (bss1->bss_score > bss2->bss_score) + return true; + else if (bss1->bss_score == bss2->bss_score) + if (bss1->rssi_raw > bss2->rssi_raw) + return true; + + return false; +} + +/** + * scm_limit_max_per_index_score() -check if per index score does not exceed + * 100% (0x64). If it exceed make it 100% + * + * @per_index_score: per_index_score as input + * + * Return: per_index_score within the max limit + */ +static uint32_t scm_limit_max_per_index_score(uint32_t per_index_score) +{ + uint8_t i, score; + + for (i = 0; i < MAX_INDEX_PER_INI; i++) { + score = WLAN_GET_SCORE_PERCENTAGE(per_index_score, i); + if (score > MAX_INDEX_SCORE) + WLAN_SET_SCORE_PERCENTAGE(per_index_score, + MAX_INDEX_SCORE, i); + } + + return per_index_score; +} + +void scm_validate_scoring_config(struct scoring_config *score_cfg) +{ + int total_weight; + + total_weight = score_cfg->weight_cfg.rssi_weightage + + score_cfg->weight_cfg.ht_caps_weightage + + score_cfg->weight_cfg.vht_caps_weightage + + score_cfg->weight_cfg.chan_width_weightage + + score_cfg->weight_cfg.chan_band_weightage + + score_cfg->weight_cfg.nss_weightage + + score_cfg->weight_cfg.beamforming_cap_weightage + + score_cfg->weight_cfg.pcl_weightage + + score_cfg->weight_cfg.channel_congestion_weightage + + score_cfg->weight_cfg.oce_wan_weightage; + + if (total_weight > BEST_CANDIDATE_MAX_WEIGHT) { + + scm_err("total weight is greater than %d fallback to default values", + BEST_CANDIDATE_MAX_WEIGHT); + + score_cfg->weight_cfg.rssi_weightage = RSSI_WEIGHTAGE; + score_cfg->weight_cfg.ht_caps_weightage = + HT_CAPABILITY_WEIGHTAGE; + score_cfg->weight_cfg.vht_caps_weightage = VHT_CAP_WEIGHTAGE; + score_cfg->weight_cfg.chan_width_weightage = + CHAN_WIDTH_WEIGHTAGE; + score_cfg->weight_cfg.chan_band_weightage = + CHAN_BAND_WEIGHTAGE; + score_cfg->weight_cfg.nss_weightage = NSS_WEIGHTAGE; + score_cfg->weight_cfg.beamforming_cap_weightage = + BEAMFORMING_CAP_WEIGHTAGE; + score_cfg->weight_cfg.pcl_weightage = PCL_WEIGHT; + score_cfg->weight_cfg.channel_congestion_weightage = + CHANNEL_CONGESTION_WEIGHTAGE; + score_cfg->weight_cfg.oce_wan_weightage = OCE_WAN_WEIGHTAGE; + } + + score_cfg->bandwidth_weight_per_index = + scm_limit_max_per_index_score( + score_cfg->bandwidth_weight_per_index); + score_cfg->nss_weight_per_index = + scm_limit_max_per_index_score(score_cfg->nss_weight_per_index); + score_cfg->band_weight_per_index = + scm_limit_max_per_index_score(score_cfg->band_weight_per_index); + + + score_cfg->esp_qbss_scoring.score_pcnt3_to_0 = + scm_limit_max_per_index_score( + score_cfg->esp_qbss_scoring.score_pcnt3_to_0); + score_cfg->esp_qbss_scoring.score_pcnt7_to_4 = + scm_limit_max_per_index_score( + score_cfg->esp_qbss_scoring.score_pcnt7_to_4); + score_cfg->esp_qbss_scoring.score_pcnt11_to_8 = + scm_limit_max_per_index_score( + score_cfg->esp_qbss_scoring.score_pcnt11_to_8); + score_cfg->esp_qbss_scoring.score_pcnt15_to_12 = + scm_limit_max_per_index_score( + score_cfg->esp_qbss_scoring.score_pcnt15_to_12); + + score_cfg->oce_wan_scoring.score_pcnt3_to_0 = + scm_limit_max_per_index_score( + score_cfg->oce_wan_scoring.score_pcnt3_to_0); + score_cfg->oce_wan_scoring.score_pcnt7_to_4 = + scm_limit_max_per_index_score( + score_cfg->oce_wan_scoring.score_pcnt7_to_4); + score_cfg->oce_wan_scoring.score_pcnt11_to_8 = + scm_limit_max_per_index_score( + score_cfg->oce_wan_scoring.score_pcnt11_to_8); + score_cfg->oce_wan_scoring.score_pcnt15_to_12 = + scm_limit_max_per_index_score( + score_cfg->oce_wan_scoring.score_pcnt15_to_12); + +} + +/** + * scm_get_rssi_pcnt_for_slot () - calculate rssi % score based on the slot + * index between the high rssi and low rssi threshold + * @high_rssi_threshold: High rssi of the window + * @low_rssi_threshold: low rssi of the window + * @high_rssi_pcnt: % score for the high rssi + * @low_rssi_pcnt: %score for the low rssi + * @bucket_size: bucket size of the window + * @bss_rssi: Input rssi for which value need to be calculated + * + * Return : rssi pct to use for the given rssi + */ +static inline +int8_t scm_get_rssi_pcnt_for_slot(int32_t high_rssi_threshold, + int32_t low_rssi_threshold, uint32_t high_rssi_pcnt, + uint32_t low_rssi_pcnt, uint32_t bucket_size, int8_t bss_rssi) +{ + int8_t slot_index, slot_size, rssi_diff, num_slot, rssi_pcnt; + + num_slot = ((high_rssi_threshold - + low_rssi_threshold) / bucket_size) + 1; + slot_size = ((high_rssi_pcnt - low_rssi_pcnt) + + (num_slot / 2)) / (num_slot); + rssi_diff = high_rssi_threshold - bss_rssi; + slot_index = (rssi_diff / bucket_size) + 1; + rssi_pcnt = high_rssi_pcnt - (slot_size * slot_index); + if (rssi_pcnt < low_rssi_pcnt) + rssi_pcnt = low_rssi_pcnt; + + scm_debug("Window %d -> %d pcnt range %d -> %d bucket_size %d bss_rssi %d num_slot %d slot_size %d rssi_diff %d slot_index %d rssi_pcnt %d", + high_rssi_threshold, low_rssi_threshold, high_rssi_pcnt, + low_rssi_pcnt, bucket_size, bss_rssi, num_slot, slot_size, + rssi_diff, slot_index, rssi_pcnt); + + return rssi_pcnt; +} + +/** + * scm_calculate_rssi_score () - Calculate RSSI score based on AP RSSI + * @score_param: rssi score params + * @rssi: rssi of the AP + * @rssi_weightage: rssi_weightage out of total weightage + * + * Return : rssi score + */ +static int32_t scm_calculate_rssi_score( + struct rssi_cfg_score *score_param, + int32_t rssi, uint8_t rssi_weightage) +{ + int8_t rssi_pcnt; + int32_t total_rssi_score; + int32_t best_rssi_threshold; + int32_t good_rssi_threshold; + int32_t bad_rssi_threshold; + uint32_t good_rssi_pcnt; + uint32_t bad_rssi_pcnt; + uint32_t good_bucket_size; + uint32_t bad_bucket_size; + + best_rssi_threshold = score_param->best_rssi_threshold*(-1); + good_rssi_threshold = score_param->good_rssi_threshold*(-1); + bad_rssi_threshold = score_param->bad_rssi_threshold*(-1); + good_rssi_pcnt = score_param->good_rssi_pcnt; + bad_rssi_pcnt = score_param->bad_rssi_pcnt; + good_bucket_size = score_param->good_rssi_bucket_size; + bad_bucket_size = score_param->bad_rssi_bucket_size; + + total_rssi_score = (BEST_CANDIDATE_MAX_WEIGHT * rssi_weightage); + + /* + * If RSSI is better than the best rssi threshold then it return full + * score. + */ + if (rssi > best_rssi_threshold) + return total_rssi_score; + /* + * If RSSI is less or equal to bad rssi threshold then it return + * least score. + */ + if (rssi <= bad_rssi_threshold) + return (total_rssi_score * bad_rssi_pcnt) / 100; + + /* RSSI lies between best to good rssi threshold */ + if (rssi > good_rssi_threshold) + rssi_pcnt = scm_get_rssi_pcnt_for_slot(best_rssi_threshold, + good_rssi_threshold, 100, good_rssi_pcnt, + good_bucket_size, rssi); + else + rssi_pcnt = scm_get_rssi_pcnt_for_slot(good_rssi_threshold, + bad_rssi_threshold, good_rssi_pcnt, + bad_rssi_pcnt, bad_bucket_size, + rssi); + + return (total_rssi_score * rssi_pcnt) / 100; + +} + +/** + * scm_calculate_pcl_score () - Calculate PCL score based on PCL weightage + * @pcl_chan_weight: pcl weight of BSS channel + * @pcl_weightage: PCL _weightage out of total weightage + * + * Return : pcl score + */ +static int32_t scm_calculate_pcl_score(int pcl_chan_weight, + uint8_t pcl_weightage) +{ + int32_t pcl_score = 0; + int32_t temp_pcl_chan_weight = 0; + + if (pcl_chan_weight) { + temp_pcl_chan_weight = + (SCM_MAX_WEIGHT_OF_PCL_CHANNELS - pcl_chan_weight); + temp_pcl_chan_weight = qdf_do_div(temp_pcl_chan_weight, + SCM_PCL_GROUPS_WEIGHT_DIFFERENCE); + pcl_score = pcl_weightage - temp_pcl_chan_weight; + if (pcl_score < 0) + pcl_score = 0; + } + return pcl_score * BEST_CANDIDATE_MAX_WEIGHT; + +} + +/** + * scm_rssi_is_same_bucket () - check if both rssi fall in same bucket + * @rssi_top_thresh: high rssi threshold of the the window + * @low_rssi_threshold: low rssi of the window + * @rssi_ref1: rssi ref one + * @rssi_ref2: rssi ref two + * @bucket_size: bucket size of the window + * + * Return : true if both fall in same window + */ +static inline bool scm_rssi_is_same_bucket(int8_t rssi_top_thresh, + int8_t rssi_ref1, int8_t rssi_ref2, int8_t bucket_size) +{ + int8_t rssi_diff1 = 0; + int8_t rssi_diff2 = 0; + + rssi_diff1 = rssi_top_thresh - rssi_ref1; + rssi_diff2 = rssi_top_thresh - rssi_ref2; + + return (rssi_diff1 / bucket_size) == (rssi_diff2 / bucket_size); +} + +/** + * scm_roam_calculate_prorated_pcnt_by_rssi () - Calculate prorated RSSI score + * based on AP RSSI. This will be used to determine HT VHT score + * @score_param: rssi score params + * @rssi: bss rssi + * @rssi_weightage: rssi_weightage out of total weightage + * + * If rssi is greater than good threshold return 100, if less than bad return 0, + * if between good and bad, return prorated rssi score for the index. + * + * Return : rssi prorated score + */ +static int8_t scm_roam_calculate_prorated_pcnt_by_rssi( + struct rssi_cfg_score *score_param, + int32_t rssi, uint8_t rssi_weightage) +{ + int32_t good_rssi_threshold; + int32_t bad_rssi_threshold; + int8_t rssi_pref_5g_rssi_thresh; + bool same_bucket; + + good_rssi_threshold = score_param->good_rssi_threshold * (-1); + bad_rssi_threshold = score_param->bad_rssi_threshold * (-1); + rssi_pref_5g_rssi_thresh = score_param->rssi_pref_5g_rssi_thresh * (-1); + + /* If RSSI is greater than good rssi return full weight */ + if (rssi > good_rssi_threshold) + return BEST_CANDIDATE_MAX_WEIGHT; + + same_bucket = scm_rssi_is_same_bucket(good_rssi_threshold, + rssi, rssi_pref_5g_rssi_thresh, + score_param->bad_rssi_bucket_size); + if (same_bucket || (rssi < rssi_pref_5g_rssi_thresh)) + return 0; + /* If RSSI is less or equal to bad rssi threshold then it return 0 */ + if (rssi <= bad_rssi_threshold) + return 0; + + /* If RSSI is between good and bad threshold */ + return scm_get_rssi_pcnt_for_slot(good_rssi_threshold, + bad_rssi_threshold, + score_param->good_rssi_pcnt, + score_param->bad_rssi_pcnt, + score_param->bad_rssi_bucket_size, + rssi); +} + +/** + * scm_calculate_bandwidth_score () - Calculate BW score + * @entry: scan entry + * @score_config: scoring config + * @prorated_pct: prorated % to return dependent on RSSI + * + * Return : bw score + */ +static int32_t scm_calculate_bandwidth_score( + struct scan_cache_entry *entry, + struct scoring_config *score_config, uint8_t prorated_pct) +{ + uint32_t score; + int32_t bw_weight_per_idx; + uint8_t cbmode = 0; + uint8_t ch_width_index; + bool is_vht = false; + + bw_weight_per_idx = score_config->bandwidth_weight_per_index; + + if (WLAN_CHAN_IS_2GHZ(entry->channel.chan_idx)) { + cbmode = score_config->cb_mode_24G; + if (score_config->vht_24G_cap) + is_vht = true; + } else if (score_config->vht_cap) { + is_vht = true; + cbmode = score_config->cb_mode_5G; + } + + if (entry->phy_mode == WLAN_PHYMODE_11AC_VHT80_80 || + entry->phy_mode == WLAN_PHYMODE_11AC_VHT160) + ch_width_index = SCM_160MHZ_BW_INDEX; + else if (entry->phy_mode == WLAN_PHYMODE_11AC_VHT80) + ch_width_index = SCM_80MHZ_BW_INDEX; + else if (entry->phy_mode == WLAN_PHYMODE_11NA_HT40PLUS || + entry->phy_mode == WLAN_PHYMODE_11NA_HT40MINUS || + entry->phy_mode == WLAN_PHYMODE_11NG_HT40PLUS || + entry->phy_mode == WLAN_PHYMODE_11NG_HT40MINUS || + entry->phy_mode == WLAN_PHYMODE_11NG_HT40 || + entry->phy_mode == WLAN_PHYMODE_11NA_HT40 || + entry->phy_mode == WLAN_PHYMODE_11AC_VHT40PLUS || + entry->phy_mode == WLAN_PHYMODE_11AC_VHT40MINUS || + entry->phy_mode == WLAN_PHYMODE_11AC_VHT40) + ch_width_index = SCM_40MHZ_BW_INDEX; + else + ch_width_index = SCM_20MHZ_BW_INDEX; + + + if (!score_config->ht_cap && ch_width_index > SCM_20MHZ_BW_INDEX) + ch_width_index = SCM_20MHZ_BW_INDEX; + + if (!is_vht && ch_width_index > SCM_40MHZ_BW_INDEX) + ch_width_index = SCM_40MHZ_BW_INDEX; + + if (cbmode && ch_width_index > SCM_20MHZ_BW_INDEX) + score = WLAN_GET_SCORE_PERCENTAGE(bw_weight_per_idx, + ch_width_index); + else + score = WLAN_GET_SCORE_PERCENTAGE(bw_weight_per_idx, + SCM_20MHZ_BW_INDEX); + + return (prorated_pct * score * + score_config->weight_cfg.chan_width_weightage) / + BEST_CANDIDATE_MAX_WEIGHT; +} + +/** + * scm_get_score_for_index () - get score for the given index + * @index: index for which we need the score + * @weightage: weigtage for the param + * @score: per slot score + * + * Return : score for the index + */ +static int32_t scm_get_score_for_index(uint8_t index, + uint8_t weightage, struct per_slot_scoring *score) +{ + if (index <= SCM_SCORE_INDEX_3) + return weightage * WLAN_GET_SCORE_PERCENTAGE( + score->score_pcnt3_to_0, + index); + else if (index <= SCM_SCORE_INDEX_7) + return weightage * WLAN_GET_SCORE_PERCENTAGE( + score->score_pcnt7_to_4, + index - SCM_SCORE_OFFSET_INDEX_7_4); + else if (index <= SCM_SCORE_INDEX_11) + return weightage * WLAN_GET_SCORE_PERCENTAGE( + score->score_pcnt11_to_8, + index - SCM_SCORE_OFFSET_INDEX_11_8); + else + return weightage * WLAN_GET_SCORE_PERCENTAGE( + score->score_pcnt15_to_12, + index - SCM_SCORE_OFFSET_INDEX_15_12); +} + +/** + * scm_calculate_congestion_score () - Calculate congestion score + * @entry: bss information + * @score_params: bss score params + * + * Return : congestion score + */ +static int32_t scm_calculate_congestion_score( + struct scan_cache_entry *entry, + struct scoring_config *score_params) +{ + uint32_t ap_load = 0; + uint32_t est_air_time_percentage = 0; + uint32_t congestion = 0; + uint32_t window_size; + uint8_t index; + int32_t good_rssi_threshold; + + if (!score_params->esp_qbss_scoring.num_slot) + return 0; + + if (score_params->esp_qbss_scoring.num_slot > + SCM_SCORE_MAX_INDEX) + score_params->esp_qbss_scoring.num_slot = + SCM_SCORE_MAX_INDEX; + + good_rssi_threshold = + score_params->rssi_score.good_rssi_threshold * (-1); + + /* For bad zone rssi get score from last index */ + if (entry->rssi_raw <= good_rssi_threshold) + return scm_get_score_for_index( + score_params->esp_qbss_scoring.num_slot, + score_params->weight_cfg. + channel_congestion_weightage, + &score_params->esp_qbss_scoring); + + if (entry->air_time_fraction) { + /* Convert 0-255 range to percentage */ + est_air_time_percentage = + entry->air_time_fraction * + SCM_MAX_CHANNEL_WEIGHT; + est_air_time_percentage = + qdf_do_div(est_air_time_percentage, + SCM_MAX_ESTIMATED_AIR_TIME_FRACTION); + /* + * Calculate channel congestion from estimated air time + * fraction. + */ + congestion = SCM_MAX_CHANNEL_UTILIZATION - + est_air_time_percentage; + } else if (entry->qbss_chan_load) { + ap_load = (entry->qbss_chan_load * + BEST_CANDIDATE_MAX_WEIGHT); + /* + * Calculate ap_load in % from qbss channel load from + * 0-255 range + */ + congestion = qdf_do_div(ap_load, MAX_AP_LOAD); + } else { + return score_params->weight_cfg.channel_congestion_weightage * + WLAN_GET_SCORE_PERCENTAGE( + score_params->esp_qbss_scoring.score_pcnt3_to_0, + SCM_SCORE_INDEX_0); + } + + window_size = BEST_CANDIDATE_MAX_WEIGHT / + score_params->esp_qbss_scoring.num_slot; + + /* Desired values are from 1 to 15, as 0 is for not present. so do +1 */ + index = qdf_do_div(congestion, window_size) + 1; + + if (index > score_params->esp_qbss_scoring.num_slot) + index = score_params->esp_qbss_scoring.num_slot; + + return scm_get_score_for_index(index, score_params->weight_cfg. + channel_congestion_weightage, + &score_params->esp_qbss_scoring); +} + +/** + * scm_calculate_nss_score () - Calculate congestion score + * @psoc: psoc ptr + * @score_config: scoring config + * @ap_nss: ap nss + * @prorated_pct: prorated % to return dependent on RSSI + * + * Return : nss score + */ +static int32_t scm_calculate_nss_score(struct wlan_objmgr_psoc *psoc, + struct scoring_config *score_config, uint8_t ap_nss, + uint8_t prorated_pct, uint32_t sta_nss) +{ + uint8_t nss; + uint8_t score_pct; + + nss = ap_nss; + if (sta_nss < nss) + nss = sta_nss; + + if (nss == 4) + score_pct = WLAN_GET_SCORE_PERCENTAGE( + score_config->nss_weight_per_index, + SCM_NSS_4x4_INDEX); + else if (nss == 3) + score_pct = WLAN_GET_SCORE_PERCENTAGE( + score_config->nss_weight_per_index, + SCM_NSS_3x3_INDEX); + else if (nss == 2) + score_pct = WLAN_GET_SCORE_PERCENTAGE( + score_config->nss_weight_per_index, + SCM_NSS_2x2_INDEX); + else + score_pct = WLAN_GET_SCORE_PERCENTAGE( + score_config->nss_weight_per_index, + SCM_NSS_1x1_INDEX); + + return (score_config->weight_cfg.nss_weightage * score_pct * + prorated_pct) / BEST_CANDIDATE_MAX_WEIGHT; +} + +/** + * scm_calculate_oce_wan_score () - Calculate oce wan score + * @entry: bss information + * @score_params: bss score params + * + * Return : oce wan score + */ +static int32_t scm_calculate_oce_wan_score( + struct scan_cache_entry *entry, + struct scoring_config *score_params) +{ + uint32_t window_size; + uint8_t index; + struct oce_reduced_wan_metrics wan_metrics; + uint8_t *mbo_oce_ie; + + if (!score_params->oce_wan_scoring.num_slot) + return 0; + + if (score_params->oce_wan_scoring.num_slot > + SCM_SCORE_MAX_INDEX) + score_params->oce_wan_scoring.num_slot = + SCM_SCORE_MAX_INDEX; + + window_size = SCM_SCORE_MAX_INDEX/ + score_params->oce_wan_scoring.num_slot; + mbo_oce_ie = util_scan_entry_mbo_oce(entry); + if (wlan_parse_oce_reduced_wan_metrics_ie(mbo_oce_ie, + &wan_metrics)) { + scm_err("downlink_av_cap %d", wan_metrics.downlink_av_cap); + /* if capacity is 0 return 0 score */ + if (!wan_metrics.downlink_av_cap) + return 0; + /* Desired values are from 1 to WLAN_SCORE_MAX_INDEX */ + index = qdf_do_div(wan_metrics.downlink_av_cap, + window_size); + } else { + index = SCM_SCORE_INDEX_0; + } + + if (index > score_params->oce_wan_scoring.num_slot) + index = score_params->oce_wan_scoring.num_slot; + + return scm_get_score_for_index(index, + score_params->weight_cfg.oce_wan_weightage, + &score_params->oce_wan_scoring); +} + +#ifdef WLAN_POLICY_MGR_ENABLE + +static uint32_t scm_get_sta_nss(struct wlan_objmgr_psoc *psoc, + uint8_t bss_channel, + uint8_t vdev_nss_2g, + uint8_t vdev_nss_5g) +{ + /* + * If station support nss as 2*2 but AP support NSS as 1*1, + * this AP will be given half weight compare to AP which are having + * NSS as 2*2. + */ + + if (policy_mgr_is_chnl_in_diff_band(psoc, bss_channel) && + policy_mgr_is_hw_dbs_capable(psoc) && + !(policy_mgr_is_hw_dbs_2x2_capable(psoc))) + return 1; + + return (WLAN_REG_IS_24GHZ_CH(bss_channel) ? + vdev_nss_2g : + vdev_nss_5g); +} +#else +static uint32_t scm_get_sta_nss(struct wlan_objmgr_psoc *psoc, + uint8_t bss_channel, + uint8_t vdev_nss_2g, + uint8_t vdev_nss_5g) +{ + return (WLAN_REG_IS_24GHZ_CH(bss_channel) ? + vdev_nss_2g : + vdev_nss_5g); +} +#endif + +int scm_calculate_bss_score(struct wlan_objmgr_psoc *psoc, + struct scan_default_params *params, + struct scan_cache_entry *entry, + int pcl_chan_weight) +{ + int32_t score = 0; + int32_t rssi_score = 0; + int32_t pcl_score = 0; + int32_t ht_score = 0; + int32_t vht_score = 0; + int32_t he_score = 0; + int32_t bandwidth_score = 0; + int32_t beamformee_score = 0; + int32_t band_score = 0; + int32_t nss_score = 0; + int32_t congestion_score = 0; + int32_t oce_wan_score = 0; + uint8_t prorated_pcnt; + bool is_vht = false; + int8_t good_rssi_threshold; + int8_t rssi_pref_5g_rssi_thresh; + bool same_bucket = false; + bool ap_su_beam_former = false; + struct wlan_ie_vhtcaps *vht_cap; + struct scoring_config *score_config; + struct weight_config *weight_config; + struct wlan_scan_obj *scan_obj; + uint32_t sta_nss; + + scan_obj = wlan_psoc_get_scan_obj(psoc); + if (!scan_obj) { + scm_err("scan_obj is NULL"); + return 0; + } + + score_config = &scan_obj->scan_def.score_config; + weight_config = &score_config->weight_cfg; + + rssi_score = scm_calculate_rssi_score(&score_config->rssi_score, + entry->rssi_raw, weight_config->rssi_weightage); + score += rssi_score; + + pcl_score = scm_calculate_pcl_score(pcl_chan_weight, + weight_config->pcl_weightage); + score += pcl_score; + + prorated_pcnt = scm_roam_calculate_prorated_pcnt_by_rssi( + &score_config->rssi_score, entry->rssi_raw, + weight_config->rssi_weightage); + /* If device and AP supports HT caps, extra 10% score will be added */ + if (score_config->ht_cap && entry->ie_list.htcap) + ht_score = prorated_pcnt * + weight_config->ht_caps_weightage; + score += ht_score; + + if (WLAN_CHAN_IS_2GHZ(entry->channel.chan_idx)) { + if (score_config->vht_24G_cap) + is_vht = true; + } else if (score_config->vht_cap) { + is_vht = true; + } + /* + * If device and AP supports VHT caps, Extra 6% score will + * be added to score + */ + if (is_vht && entry->ie_list.vhtcap) + vht_score = prorated_pcnt * + weight_config->vht_caps_weightage; + score += vht_score; + + if (score_config->he_cap && entry->ie_list.hecap) + he_score = prorated_pcnt * + weight_config->he_caps_weightage; + score += he_score; + + bandwidth_score = scm_calculate_bandwidth_score(entry, score_config, + prorated_pcnt); + score += bandwidth_score; + + good_rssi_threshold = + score_config->rssi_score.good_rssi_threshold * (-1); + rssi_pref_5g_rssi_thresh = + score_config->rssi_score.rssi_pref_5g_rssi_thresh * (-1); + if (entry->rssi_raw < good_rssi_threshold) + same_bucket = scm_rssi_is_same_bucket(good_rssi_threshold, + entry->rssi_raw, rssi_pref_5g_rssi_thresh, + score_config->rssi_score.bad_rssi_bucket_size); + + vht_cap = (struct wlan_ie_vhtcaps *) util_scan_entry_vhtcap(entry); + if (vht_cap && vht_cap->su_beam_former) + ap_su_beam_former = true; + if (is_vht && ap_su_beam_former && + (entry->rssi_raw > rssi_pref_5g_rssi_thresh) && !same_bucket) + beamformee_score = BEST_CANDIDATE_MAX_WEIGHT * + weight_config->beamforming_cap_weightage; + score += beamformee_score; + + /* + * If AP is on 5Ghz channel , extra weigtage is added to BSS score. + * if RSSI is greater tha 5g rssi threshold or fall in same bucket. + * else give weigtage to 2.4 GH. + */ + if ((entry->rssi_raw > rssi_pref_5g_rssi_thresh) && !same_bucket) { + if (WLAN_CHAN_IS_5GHZ(entry->channel.chan_idx)) + band_score = weight_config->chan_band_weightage * + WLAN_GET_SCORE_PERCENTAGE( + score_config->band_weight_per_index, + SCM_BAND_5G_INDEX); + } else if (WLAN_CHAN_IS_2GHZ(entry->channel.chan_idx)) { + band_score = weight_config->chan_band_weightage * + WLAN_GET_SCORE_PERCENTAGE( + score_config->band_weight_per_index, + SCM_BAND_2G_INDEX); + } + score += band_score; + + congestion_score = scm_calculate_congestion_score(entry, score_config); + score += congestion_score; + + sta_nss = scm_get_sta_nss(psoc, entry->channel.chan_idx, + score_config->vdev_nss_24g, + score_config->vdev_nss_5g); + + /* + * If station support nss as 2*2 but AP support NSS as 1*1, + * this AP will be given half weight compare to AP which are having + * NSS as 2*2. + */ + nss_score = scm_calculate_nss_score(psoc, score_config, entry->nss, + prorated_pcnt, sta_nss); + score += nss_score; + + oce_wan_score = scm_calculate_oce_wan_score(entry, score_config); + score += oce_wan_score; + + scm_debug("Self Cap: HT %d VHT %d HE %d VHT_24Ghz %d BF cap %d cb_mode_24g %d cb_mode_5G %d NSS %d", + score_config->ht_cap, score_config->vht_cap, + score_config->he_cap, score_config->vht_24G_cap, + score_config->beamformee_cap, score_config->cb_mode_24G, + score_config->cb_mode_5G, sta_nss); + + scm_debug("Candidate (BSSID: %pM Chan %d) Cap:: rssi=%d HT=%d VHT=%d HE %d su beamformer %d phymode=%d air time fraction %d qbss load %d NSS %d", + entry->bssid.bytes, entry->channel.chan_idx, + entry->rssi_raw, util_scan_entry_htcap(entry) ? 1 : 0, + util_scan_entry_vhtcap(entry) ? 1 : 0, + util_scan_entry_hecap(entry) ? 1 : 0, ap_su_beam_former, + entry->phy_mode, entry->air_time_fraction, + entry->qbss_chan_load, entry->nss); + + scm_debug("Candidate Scores : prorated_pcnt %d rssi %d pcl %d ht %d vht %d he %d beamformee %d bw %d band %d congestion %d nss %d oce wan %d TOTAL score %d", + prorated_pcnt, rssi_score, pcl_score, ht_score, vht_score, + he_score, beamformee_score, bandwidth_score, band_score, + congestion_score, nss_score, oce_wan_score, score); + + entry->bss_score = score; + return score; +} + +bool scm_get_pcl_weight_of_channel(int channel_id, + struct scan_filter *filter, + int *pcl_chan_weight, + uint8_t *weight_list) +{ + int i; + bool found = false; + + if (!filter) + return found; + + for (i = 0; i < filter->num_of_pcl_channels; i++) { + if (filter->pcl_channel_list[i] == channel_id) { + *pcl_chan_weight = filter->pcl_weight_list[i]; + found = true; + break; + } + } + return found; +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/scan/core/src/wlan_scan_cache_db.c b/drivers/staging/qca-wifi-host-cmn/umac/scan/core/src/wlan_scan_cache_db.c new file mode 100644 index 0000000000000000000000000000000000000000..cc5a1f974de89839e85f24caf0d8f76903be43f3 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/scan/core/src/wlan_scan_cache_db.c @@ -0,0 +1,1504 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/* + * DOC: contains scan cache api and functionality + * The Scan entries are protected by scan_db_lock. Holding the lock + * for whole scan operation during get/flush scan results may take + * more than 5 ms and thus ref count is used along with scan_db_lock. + * Below are the operation on scan cache entry: + * - While adding new node to the entry scan_db_lock is taken and ref_cnt + * is initialized and incremented. Also the cookie will be set to valid value. + * - The ref count incremented during adding new node should be decremented only + * by a delete operation on the node. But there can be multiple concurrent + * delete operations on a node from different threads which may lead to ref + * count being decremented multiple time and freeing the node even if node + * is in use. So to maintain atomicity between multiple delete operations + * on a same node from different threads, a cookie is used to check if node is + * logically deleted or not. A delete operation will set the cookie to 0 + * making it invalid. So if the 2nd thread find the cookie as invalid it will + * not try to delete and decrement the ref count of the node again. + * - This Cookie is also used to check if node is valid while iterating through + * the scan cache to avoid duplicate entries. + * - Once ref_cnt become 0, i.e. it is logically deleted and no thread is using + * it the node is physically deleted from the scan cache. + * - While reading the node the ref_cnt should be incremented. Once reading + * operation is done ref_cnt is decremented. + */ +#include +#include +#include +#include +#include +#include +#include "wlan_scan_main.h" +#include "wlan_scan_cache_db_i.h" +#include "wlan_reg_services_api.h" +#include "wlan_reg_ucfg_api.h" + +/** + * scm_del_scan_node() - API to remove scan node from the list + * @list: hash list + * @scan_node: node to be removed + * + * This should be called while holding scan_db_lock. + * + * Return: void + */ +static void scm_del_scan_node(qdf_list_t *list, + struct scan_cache_node *scan_node) +{ + QDF_STATUS status; + + status = qdf_list_remove_node(list, &scan_node->node); + if (QDF_IS_STATUS_SUCCESS(status)) { + util_scan_free_cache_entry(scan_node->entry); + qdf_mem_free(scan_node); + } +} + +/** + * scm_del_scan_node_from_db() - API to del the scan entry + * @scan_db: scan database + * @scan_entry:entry scan_node + * + * API to flush the scan entry. This should be called while + * holding scan_db_lock. + * + * Return: QDF status. + */ +static QDF_STATUS scm_del_scan_node_from_db(struct scan_dbs *scan_db, + struct scan_cache_node *scan_node) +{ + QDF_STATUS status = QDF_STATUS_SUCCESS; + uint8_t hash_idx; + + if (!scan_node) + return QDF_STATUS_E_INVAL; + + hash_idx = SCAN_GET_HASH(scan_node->entry->bssid.bytes); + scm_del_scan_node(&scan_db->scan_hash_tbl[hash_idx], scan_node); + scan_db->num_entries--; + + return status; +} + +/** + * scm_scan_entry_get_ref() - api to increase ref count of scan entry + * @scan_node: scan node + * + * Return: void + */ +static void scm_scan_entry_get_ref(struct scan_cache_node *scan_node) +{ + if (scan_node == NULL) { + scm_err("scan_node is NULL"); + QDF_ASSERT(0); + return; + } + qdf_atomic_inc(&scan_node->ref_cnt); +} + +/** + * scm_scan_entry_put_ref() - Api to decrease ref count of scan entry + * and free if it become 0 + * @scan_db: scan database + * @scan_node: scan node + * @lock_needed: if scan_db_lock is needed + * + * Return: void + */ +static void scm_scan_entry_put_ref(struct scan_dbs *scan_db, + struct scan_cache_node *scan_node, bool lock_needed) +{ + + if (!scan_node) { + scm_err("scan_node is NULL"); + QDF_ASSERT(0); + return; + } + + if (lock_needed) + qdf_spin_lock_bh(&scan_db->scan_db_lock); + + if (!qdf_atomic_read(&scan_node->ref_cnt)) { + if (lock_needed) + qdf_spin_unlock_bh(&scan_db->scan_db_lock); + scm_err("scan_node ref cnt is 0"); + QDF_ASSERT(0); + return; + } + + /* Decrement ref count, free scan_node, if ref count == 0 */ + if (qdf_atomic_dec_and_test(&scan_node->ref_cnt)) + scm_del_scan_node_from_db(scan_db, scan_node); + + if (lock_needed) + qdf_spin_unlock_bh(&scan_db->scan_db_lock); +} + +/** + * scm_scan_entry_del() - API to delete scan node + * @scan_db: data base + * @scan_node: node to be deleted + * + * Call must be protected by scan_db->scan_db_lock + * + * Return: void + */ + +static void scm_scan_entry_del(struct scan_dbs *scan_db, + struct scan_cache_node *scan_node) +{ + if (!scan_node) { + scm_err("scan node is NULL"); + QDF_ASSERT(0); + return; + } + + if (scan_node->cookie != SCAN_NODE_ACTIVE_COOKIE) { + scm_debug("node is already deleted"); + return; + } + /* Seems node is already deleted */ + if (!qdf_atomic_read(&scan_node->ref_cnt)) { + scm_debug("node is already deleted ref 0"); + return; + } + scan_node->cookie = 0; + + scm_scan_entry_put_ref(scan_db, scan_node, false); +} + +/** + * scm_add_scan_node() - API to add scan node + * @scan_db: data base + * @scan_node: node to be added + * @dup_node: node before which new node to be added + * if it's not NULL, otherwise add node to tail + * + * Call must be protected by scan_db->scan_db_lock + * + * Return: void + */ +static void scm_add_scan_node(struct scan_dbs *scan_db, + struct scan_cache_node *scan_node, + struct scan_cache_node *dup_node) +{ + uint8_t hash_idx; + + hash_idx = + SCAN_GET_HASH(scan_node->entry->bssid.bytes); + + qdf_atomic_init(&scan_node->ref_cnt); + scan_node->cookie = SCAN_NODE_ACTIVE_COOKIE; + scm_scan_entry_get_ref(scan_node); + if (!dup_node) + qdf_list_insert_back(&scan_db->scan_hash_tbl[hash_idx], + &scan_node->node); + else + qdf_list_insert_before(&scan_db->scan_hash_tbl[hash_idx], + &scan_node->node, &dup_node->node); + + scan_db->num_entries++; +} + + +/** + * scm_get_next_valid_node() - API get the next valid scan node from + * the list + * @list: hash list + * @cur_node: current node pointer + * + * API to get next active node from the list. If cur_node is NULL + * it will return first node of the list. + * Call must be protected by scan_db->scan_db_lock + * + * Return: next scan node + */ +static qdf_list_node_t * +scm_get_next_valid_node(qdf_list_t *list, + qdf_list_node_t *cur_node) +{ + qdf_list_node_t *next_node = NULL; + qdf_list_node_t *temp_node = NULL; + struct scan_cache_node *scan_node; + + if (cur_node) + qdf_list_peek_next(list, cur_node, &next_node); + else + qdf_list_peek_front(list, &next_node); + + while (next_node) { + scan_node = qdf_container_of(next_node, + struct scan_cache_node, node); + if (scan_node->cookie == SCAN_NODE_ACTIVE_COOKIE) + return next_node; + /* + * If node is not valid check for next entry + * to get next valid node. + */ + qdf_list_peek_next(list, next_node, &temp_node); + next_node = temp_node; + temp_node = NULL; + } + + return next_node; +} + +/** + * scm_get_next_node() - API get the next scan node from + * the list + * @scan_db: scan data base + * @list: hash list + * @cur_node: current node pointer + * + * API get the next node from the list. If cur_node is NULL + * it will return first node of the list + * + * Return: next scan cache node + */ +static struct scan_cache_node * +scm_get_next_node(struct scan_dbs *scan_db, + qdf_list_t *list, struct scan_cache_node *cur_node) +{ + struct scan_cache_node *next_node = NULL; + qdf_list_node_t *next_list = NULL; + + qdf_spin_lock_bh(&scan_db->scan_db_lock); + if (cur_node) { + next_list = scm_get_next_valid_node(list, &cur_node->node); + /* Decrement the ref count of the previous node */ + scm_scan_entry_put_ref(scan_db, + cur_node, false); + } else { + next_list = scm_get_next_valid_node(list, NULL); + } + /* Increase the ref count of the obtained node */ + if (next_list) { + next_node = qdf_container_of(next_list, + struct scan_cache_node, node); + scm_scan_entry_get_ref(next_node); + } + qdf_spin_unlock_bh(&scan_db->scan_db_lock); + + return next_node; +} + +/** + * scm_check_and_age_out() - check and age out the old entries + * @scan_db: scan db + * @scan_node: node to check for age out + * @scan_aging_time: scan cache aging time + * + * Return: void + */ +static void scm_check_and_age_out(struct scan_dbs *scan_db, + struct scan_cache_node *node, + uint32_t scan_aging_time) +{ + if (util_scan_entry_age(node->entry) >= + scan_aging_time) { + scm_debug("Aging out BSSID: %pM with age %d ms", + node->entry->bssid.bytes, + util_scan_entry_age(node->entry)); + qdf_spin_lock_bh(&scan_db->scan_db_lock); + scm_scan_entry_del(scan_db, node); + qdf_spin_unlock_bh(&scan_db->scan_db_lock); + } +} + +static bool scm_bss_is_connected(struct scan_cache_entry *entry) +{ + if (entry->mlme_info.assoc_state == SCAN_ENTRY_CON_STATE_ASSOC) + return true; + return false; +} +void scm_age_out_entries(struct wlan_objmgr_psoc *psoc, + struct scan_dbs *scan_db) +{ + int i; + struct scan_cache_node *cur_node = NULL; + struct scan_cache_node *next_node = NULL; + struct scan_default_params *def_param; + + def_param = wlan_scan_psoc_get_def_params(psoc); + if (!def_param) { + scm_err("wlan_scan_psoc_get_def_params failed"); + return; + } + + for (i = 0 ; i < SCAN_HASH_SIZE; i++) { + cur_node = scm_get_next_node(scan_db, + &scan_db->scan_hash_tbl[i], NULL); + while (cur_node) { + if (!scm_bss_is_connected(cur_node->entry)) + scm_check_and_age_out(scan_db, cur_node, + def_param->scan_cache_aging_time); + next_node = scm_get_next_node(scan_db, + &scan_db->scan_hash_tbl[i], cur_node); + cur_node = next_node; + next_node = NULL; + } + } +} + +/** + * scm_flush_oldest_entry() - Iterate over scan db and flust out the + * oldest entry + * @scan_db: scan db from which oldest entry needs to be flushed + * + * Return: QDF_STATUS + */ +static QDF_STATUS scm_flush_oldest_entry(struct scan_dbs *scan_db) +{ + int i; + struct scan_cache_node *oldest_node = NULL; + struct scan_cache_node *cur_node; + + for (i = 0 ; i < SCAN_HASH_SIZE; i++) { + /* Get the first valid node for the hash */ + cur_node = scm_get_next_node(scan_db, + &scan_db->scan_hash_tbl[i], + NULL); + /* Iterate scan db and flush out oldest node + * take ref_cnt for oldest_node + */ + + while (cur_node) { + if (!oldest_node || + (util_scan_entry_age(oldest_node->entry) < + util_scan_entry_age(cur_node->entry))) { + if (oldest_node) + scm_scan_entry_put_ref(scan_db, + oldest_node, + true); + oldest_node = cur_node; + scm_scan_entry_get_ref(oldest_node); + } + + cur_node = scm_get_next_node(scan_db, + &scan_db->scan_hash_tbl[i], + cur_node); + }; + } + + if (oldest_node) { + scm_debug("Flush oldest BSSID: %pM with age %d ms", + oldest_node->entry->bssid.bytes, + util_scan_entry_age(oldest_node->entry)); + /* Release ref_cnt taken for oldest_node and delete it */ + qdf_spin_lock_bh(&scan_db->scan_db_lock); + scm_scan_entry_del(scan_db, oldest_node); + scm_scan_entry_put_ref(scan_db, oldest_node, false); + qdf_spin_unlock_bh(&scan_db->scan_db_lock); + } + + return QDF_STATUS_SUCCESS; +} + +/** + * scm_update_alt_wcn_ie() - update the alternate WCN IE + * @from: copy from + * @dst: copy to + * + * Return: void + */ +static void scm_update_alt_wcn_ie(struct scan_cache_entry *from, + struct scan_cache_entry *dst) +{ + uint32_t alt_wcn_ie_len; + + if (from->frm_subtype == dst->frm_subtype) + return; + + if (!from->ie_list.wcn && !dst->ie_list.wcn) + return; + + /* Existing WCN IE is empty. */ + if (!from->ie_list.wcn) + return; + + alt_wcn_ie_len = 2 + from->ie_list.wcn[1]; + if (alt_wcn_ie_len > WLAN_MAX_IE_LEN + 2) { + scm_err("invalid IE len"); + return; + } + + if (!dst->alt_wcn_ie.ptr) { + /* allocate this additional buffer for alternate WCN IE */ + dst->alt_wcn_ie.ptr = + qdf_mem_malloc_atomic(WLAN_MAX_IE_LEN + 2); + if (!dst->alt_wcn_ie.ptr) { + scm_err("failed to allocate memory"); + return; + } + } + qdf_mem_copy(dst->alt_wcn_ie.ptr, + from->ie_list.wcn, alt_wcn_ie_len); + dst->alt_wcn_ie.len = alt_wcn_ie_len; +} + +/** + * scm_update_mlme_info() - update mlme info + * @src: source scan entry + * @dest: destination scan entry + * + * Return: void + */ +static inline void +scm_update_mlme_info(struct scan_cache_entry *src, + struct scan_cache_entry *dest) +{ + qdf_mem_copy(&dest->mlme_info, &src->mlme_info, + sizeof(struct mlme_info)); +} + +/** + * scm_copy_info_from_dup_entry() - copy duplicate node info + * to new scan entry + * @pdev: pdev ptr + * @scan_obj: scan obj ptr + * @scan_db: scan database + * @scan_params: new entry to be added + * @scan_node: duplicate entry + * + * Copy duplicate node info to new entry. + * + * Return: void + */ +static void +scm_copy_info_from_dup_entry(struct wlan_objmgr_pdev *pdev, + struct wlan_scan_obj *scan_obj, + struct scan_dbs *scan_db, + struct scan_cache_entry *scan_params, + struct scan_cache_node *scan_node) +{ + struct scan_cache_entry *scan_entry; + uint64_t time_gap; + + scan_entry = scan_node->entry; + + /* Update probe resp entry as well if AP is in hidden mode */ + if (scan_params->frm_subtype == MGMT_SUBTYPE_PROBE_RESP && + scan_entry->is_hidden_ssid) + scan_params->is_hidden_ssid = true; + + /* + * If AP changed its beacon from not having an SSID to showing it the + * kernel will drop the entry asumming that something is wrong with AP. + * This can result in connection failure while updating the bss during + * connection. So flush the hidden entry from kernel before indicating + * the new entry. + */ + if (scan_entry->is_hidden_ssid && + scan_params->frm_subtype == MGMT_SUBTYPE_BEACON && + !util_scan_is_null_ssid(&scan_params->ssid)) { + if (scan_obj->cb.unlink_bss) { + scm_debug("Hidden AP %pM switch to non-hidden SSID, So unlink the entry", + scan_entry->bssid.bytes); + scan_obj->cb.unlink_bss(pdev, scan_entry); + } + } + + /* If old entry have the ssid but new entry does not */ + if (util_scan_is_null_ssid(&scan_params->ssid) && + scan_entry->ssid.length) { + /* + * New entry has a hidden SSID and old one has the SSID. + * Add the entry by using the ssid of the old entry + * only if diff of saved SSID time and current time is + * less than HIDDEN_SSID_TIME time. + * This will avoid issues in case AP changes its SSID + * while remain hidden. + */ + time_gap = + qdf_mc_timer_get_system_time() - + scan_entry->hidden_ssid_timestamp; + if (time_gap <= HIDDEN_SSID_TIME) { + scan_params->hidden_ssid_timestamp = + scan_entry->hidden_ssid_timestamp; + scan_params->ssid.length = + scan_entry->ssid.length; + qdf_mem_copy(scan_params->ssid.ssid, + scan_entry->ssid.ssid, + scan_entry->ssid.length); + } + } + + /* + * Due to Rx sensitivity issue, sometime beacons are seen on adjacent + * channel so workaround in software is needed. If DS params or HT info + * are present driver can get proper channel info from these IEs and set + * channel_mismatch so that the older RSSI values are used in new entry. + * + * For the cases where DS params and HT info is not present, driver + * needs to check below conditions to get proper channel and set + * channel_mismatch so that the older RSSI values are used in new entry: + * -- The old entry channel and new entry channel are not same + * -- RSSI is less than -80, this indicate that the signal has leaked + * in adjacent channel. + */ + if ((scan_params->frm_subtype == MGMT_SUBTYPE_BEACON) && + !util_scan_entry_htinfo(scan_params) && + !util_scan_entry_ds_param(scan_params) && + (scan_params->channel.chan_idx != scan_entry->channel.chan_idx) && + (scan_params->rssi_raw < ADJACENT_CHANNEL_RSSI_THRESHOLD)) { + scan_params->channel.chan_idx = scan_entry->channel.chan_idx; + scan_params->channel_mismatch = true; + } + + /* Use old value for rssi if beacon was heard on adjacent channel. */ + if (scan_params->channel_mismatch) { + scan_params->rssi_raw = scan_entry->rssi_raw; + scan_params->avg_rssi = scan_entry->avg_rssi; + scan_params->rssi_timestamp = + scan_entry->rssi_timestamp; + } else { + /* If elapsed time since last rssi update for this + * entry is smaller than a thresold, calculate a + * running average of the RSSI values. + * Otherwise new frames RSSI is more representive + * of the signal strength. + */ + time_gap = + scan_params->scan_entry_time - + scan_entry->rssi_timestamp; + if (time_gap > WLAN_RSSI_AVERAGING_TIME) + scan_params->avg_rssi = + WLAN_RSSI_IN(scan_params->rssi_raw); + else { + /* Copy previous average rssi to new entry */ + scan_params->avg_rssi = scan_entry->avg_rssi; + /* Average with previous samples */ + WLAN_RSSI_LPF(scan_params->avg_rssi, + scan_params->rssi_raw); + } + + scan_params->rssi_timestamp = scan_params->scan_entry_time; + } + + /* copy wsn ie from scan_entry to scan_params*/ + scm_update_alt_wcn_ie(scan_entry, scan_params); + + /* copy mlme info from scan_entry to scan_params*/ + scm_update_mlme_info(scan_entry, scan_params); +} + +/** + * scm_find_duplicate() - find duplicate entry, + * if present, add input scan entry before it and delete + * duplicate entry. otherwise add entry to tail + * @pdev: pdev ptr + * @scan_obj: scan obj ptr + * @scan_db: scan db + * @entry: input scan cache entry + * @dup_node: node before which new entry to be added + * + * ref_cnt is taken for dup_node, caller should release ref taken + * if returns true. + * + * Return: bool + */ +static bool +scm_find_duplicate(struct wlan_objmgr_pdev *pdev, + struct wlan_scan_obj *scan_obj, + struct scan_dbs *scan_db, + struct scan_cache_entry *entry, + struct scan_cache_node **dup_node) +{ + uint8_t hash_idx; + struct scan_cache_node *cur_node; + struct scan_cache_node *next_node = NULL; + + hash_idx = SCAN_GET_HASH(entry->bssid.bytes); + + cur_node = scm_get_next_node(scan_db, + &scan_db->scan_hash_tbl[hash_idx], + NULL); + + while (cur_node) { + if (util_is_scan_entry_match(entry, + cur_node->entry)) { + scm_copy_info_from_dup_entry(pdev, scan_obj, scan_db, + entry, cur_node); + *dup_node = cur_node; + return true; + } + next_node = scm_get_next_node(scan_db, + &scan_db->scan_hash_tbl[hash_idx], cur_node); + cur_node = next_node; + next_node = NULL; + } + + return false; +} + +/** + * scm_add_update_entry() - add or update scan entry + * @psoc: psoc ptr + * @pdev: pdev pointer + * @scan_params: new received entry + * + * Return: QDF_STATUS + */ +static QDF_STATUS scm_add_update_entry(struct wlan_objmgr_psoc *psoc, + struct wlan_objmgr_pdev *pdev, struct scan_cache_entry *scan_params) +{ + struct scan_cache_node *dup_node = NULL; + struct scan_cache_node *scan_node = NULL; + bool is_dup_found = false; + QDF_STATUS status; + struct scan_dbs *scan_db; + struct wlan_scan_obj *scan_obj; + + scan_db = wlan_pdev_get_scan_db(psoc, pdev); + if (!scan_db) { + scm_err("scan_db is NULL"); + return QDF_STATUS_E_INVAL; + } + + scan_obj = wlan_psoc_get_scan_obj(psoc); + if (!scan_obj) { + scm_err("scan_obj is NULL"); + return QDF_STATUS_E_INVAL; + } + + if (scan_params->frm_subtype == + MGMT_SUBTYPE_PROBE_RESP && + !scan_params->ie_list.ssid) + scm_debug("Probe resp doesn't contain SSID"); + + + if (scan_params->ie_list.csa || + scan_params->ie_list.xcsa || + scan_params->ie_list.cswrp) + scm_debug("CSA IE present for BSSID: %pM", + scan_params->bssid.bytes); + + is_dup_found = scm_find_duplicate(pdev, scan_obj, scan_db, scan_params, + &dup_node); + + if (scan_obj->cb.inform_beacon) + scan_obj->cb.inform_beacon(pdev, scan_params); + + if (scan_db->num_entries >= MAX_SCAN_CACHE_SIZE) { + status = scm_flush_oldest_entry(scan_db); + if (QDF_IS_STATUS_ERROR(status)) { + /* release ref taken for dup node */ + if (is_dup_found) + scm_scan_entry_put_ref(scan_db, dup_node, true); + return status; + } + } + + scan_node = qdf_mem_malloc(sizeof(*scan_node)); + if (!scan_node) { + /* release ref taken for dup node */ + if (is_dup_found) + scm_scan_entry_put_ref(scan_db, dup_node, true); + return QDF_STATUS_E_NOMEM; + } + + scan_node->entry = scan_params; + qdf_spin_lock_bh(&scan_db->scan_db_lock); + scm_add_scan_node(scan_db, scan_node, dup_node); + + if (is_dup_found) { + /* release ref taken for dup node and delete it */ + scm_scan_entry_del(scan_db, dup_node); + scm_scan_entry_put_ref(scan_db, dup_node, false); + } + qdf_spin_unlock_bh(&scan_db->scan_db_lock); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS scm_handle_bcn_probe(struct scheduler_msg *msg) +{ + struct scan_bcn_probe_event *bcn; + struct wlan_objmgr_psoc *psoc; + struct wlan_objmgr_pdev *pdev = NULL; + struct scan_cache_entry *scan_entry; + struct wlan_scan_obj *scan_obj; + qdf_list_t *scan_list = NULL; + QDF_STATUS status = QDF_STATUS_SUCCESS; + uint32_t list_count, i; + qdf_list_node_t *next_node = NULL; + struct scan_cache_node *scan_node; + + bcn = msg->bodyptr; + if (!bcn) { + scm_err("bcn is NULL"); + return QDF_STATUS_E_INVAL; + } + if (!bcn->rx_data) { + scm_err("rx_data iS NULL"); + status = QDF_STATUS_E_INVAL; + goto free_nbuf; + } + if (!bcn->buf) { + scm_err("buf is NULL"); + status = QDF_STATUS_E_INVAL; + goto free_nbuf; + } + + psoc = bcn->psoc; + pdev = wlan_objmgr_get_pdev_by_id(psoc, + bcn->rx_data->pdev_id, WLAN_SCAN_ID); + if (!pdev) { + scm_err("pdev is NULL"); + status = QDF_STATUS_E_INVAL; + goto free_nbuf; + } + scan_obj = wlan_psoc_get_scan_obj(psoc); + if (!scan_obj) { + scm_err("scan_obj is NULL"); + status = QDF_STATUS_E_INVAL; + goto free_nbuf; + } + + if (qdf_nbuf_len(bcn->buf) <= + (sizeof(struct wlan_frame_hdr) + + offsetof(struct wlan_bcn_frame, ie))) { + scm_debug("invalid beacon/probe length"); + status = QDF_STATUS_E_INVAL; + goto free_nbuf; + } + + scan_list = + util_scan_unpack_beacon_frame(pdev, qdf_nbuf_data(bcn->buf), + qdf_nbuf_len(bcn->buf), bcn->frm_type, + bcn->rx_data); + if (!scan_list || qdf_list_empty(scan_list)) { + scm_debug("failed to unpack frame"); + status = QDF_STATUS_E_INVAL; + goto free_nbuf; + } + + list_count = qdf_list_size(scan_list); + for (i = 0; i < list_count; i++) { + status = qdf_list_remove_front(scan_list, &next_node); + if (QDF_IS_STATUS_ERROR(status) || next_node == NULL) { + scm_debug("failed to unpack frame"); + status = QDF_STATUS_E_INVAL; + goto free_nbuf; + } + + scan_node = qdf_container_of(next_node, + struct scan_cache_node, node); + + scan_entry = scan_node->entry; + + if (scan_obj->drop_bcn_on_chan_mismatch && + scan_entry->channel_mismatch) { + scm_debug("Drop frame, as channel mismatch Received for from BSSID: %pM Seq Num: %d", + scan_entry->bssid.bytes, + scan_entry->seq_num); + util_scan_free_cache_entry(scan_entry); + qdf_mem_free(scan_node); + continue; + } + + scm_nofl_debug("Received %s from BSSID: %pM tsf_delta = %u Seq Num: %d ssid:%.*s, rssi: %d channel %d", + (bcn->frm_type == MGMT_SUBTYPE_PROBE_RESP) ? + "Probe Rsp" : "Beacon", scan_entry->bssid.bytes, + scan_entry->tsf_delta, scan_entry->seq_num, + scan_entry->ssid.length, scan_entry->ssid.ssid, + scan_entry->rssi_raw, + scan_entry->channel.chan_idx); + + if (scan_obj->cb.update_beacon) + scan_obj->cb.update_beacon(pdev, scan_entry); + + if (wlan_reg_11d_enabled_on_host(psoc)) + scm_11d_handle_country_info(psoc, pdev, scan_entry); + + status = scm_add_update_entry(psoc, pdev, scan_entry); + if (QDF_IS_STATUS_ERROR(status)) { + scm_debug("failed to add entry for BSSID: %pM Seq Num: %d", + scan_entry->bssid.bytes, + scan_entry->seq_num); + util_scan_free_cache_entry(scan_entry); + qdf_mem_free(scan_node); + continue; + } + + qdf_mem_free(scan_node); + } + +free_nbuf: + if (scan_list) + qdf_mem_free(scan_list); + if (bcn->psoc) + wlan_objmgr_psoc_release_ref(bcn->psoc, WLAN_SCAN_ID); + if (pdev) + wlan_objmgr_pdev_release_ref(pdev, WLAN_SCAN_ID); + if (bcn->rx_data) + qdf_mem_free(bcn->rx_data); + if (bcn->buf) + qdf_nbuf_free(bcn->buf); + qdf_mem_free(bcn); + + return status; +} + +/** + * scm_list_insert_sorted() - add the entries in scan_list in sorted way + * @psoc: psoc ptr + * @filter: scan filter + * @scan_node: node entry to be inserted + * @scan_list: Temp scan list + * + * Add the entries in scan_list in sorted way considering + * cap_val and prefer val. The node is copy of original scan entry and + * thus no lock is required. + * + * Return: void + */ +static void scm_list_insert_sorted(struct wlan_objmgr_psoc *psoc, + struct scan_filter *filter, + struct scan_cache_node *scan_node, + qdf_list_t *scan_list) +{ + struct scan_cache_node *cur_node; + qdf_list_node_t *cur_lst = NULL, *next_lst = NULL; + struct scan_default_params *params; + int pcl_chan_weight = 0; + + params = wlan_scan_psoc_get_def_params(psoc); + if (!params) { + scm_err("wlan_scan_psoc_get_def_params failed"); + return; + } + + if (filter->num_of_pcl_channels > 0 && + (scan_node->entry->rssi_raw > SCM_PCL_RSSI_THRESHOLD)) { + if (scm_get_pcl_weight_of_channel( + scan_node->entry->channel.chan_idx, + filter, &pcl_chan_weight, + filter->pcl_weight_list)) { + scm_debug("pcl channel %d pcl_chan_weight %d", + scan_node->entry->channel.chan_idx, + pcl_chan_weight); + } + } + if (params->is_bssid_hint_priority && + !qdf_mem_cmp(filter->bssid_hint.bytes, + scan_node->entry->bssid.bytes, + QDF_MAC_ADDR_SIZE)) + scan_node->entry->bss_score = BEST_CANDIDATE_MAX_BSS_SCORE; + else + scm_calculate_bss_score(psoc, params, + scan_node->entry, pcl_chan_weight); + + if (qdf_list_empty(scan_list)) { + qdf_list_insert_front(scan_list, &scan_node->node); + return; + } + + qdf_list_peek_front(scan_list, &cur_lst); + + while (cur_lst) { + cur_node = qdf_container_of(cur_lst, + struct scan_cache_node, node); + if (scm_is_better_bss(params, + scan_node->entry, cur_node->entry)) { + qdf_list_insert_before(scan_list, + &scan_node->node, + &cur_node->node); + break; + } + qdf_list_peek_next(scan_list, + cur_lst, &next_lst); + cur_lst = next_lst; + next_lst = NULL; + } + + if (!cur_lst) + qdf_list_insert_back(scan_list, + &scan_node->node); + +} + +/** + * scm_scan_apply_filter_get_entry() - apply filter and get the + * scan entry + * @psoc: psoc pointer + * @db_entry: scan entry + * @filter: filter to be applied + * @scan_list: scan list to which entry is added + * + * Return: QDF_STATUS + */ +static QDF_STATUS +scm_scan_apply_filter_get_entry(struct wlan_objmgr_psoc *psoc, + struct scan_cache_entry *db_entry, + struct scan_filter *filter, + qdf_list_t *scan_list) +{ + struct scan_cache_node *scan_node = NULL; + struct security_info security = {0}; + bool match; + + if (!filter) + match = true; + else + match = scm_filter_match(psoc, db_entry, + filter, &security); + + if (!match) + return QDF_STATUS_SUCCESS; + + scan_node = qdf_mem_malloc_atomic(sizeof(*scan_node)); + if (!scan_node) + return QDF_STATUS_E_NOMEM; + + scan_node->entry = + util_scan_copy_cache_entry(db_entry); + + if (!scan_node->entry) { + qdf_mem_free(scan_node); + return QDF_STATUS_E_NOMEM; + } + + qdf_mem_copy(&scan_node->entry->neg_sec_info, + &security, sizeof(scan_node->entry->neg_sec_info)); + + if (!filter || !filter->bss_scoring_required) + qdf_list_insert_front(scan_list, + &scan_node->node); + else + scm_list_insert_sorted(psoc, filter, scan_node, scan_list); + + return QDF_STATUS_SUCCESS; +} + +/** + * scm_get_results() - Iterate and get scan results + * @psoc: psoc ptr + * @scan_db: scan db + * @filter: filter to be applied + * @scan_list: scan list to which entry is added + * + * Return: void + */ +static void scm_get_results(struct wlan_objmgr_psoc *psoc, + struct scan_dbs *scan_db, struct scan_filter *filter, + qdf_list_t *scan_list) +{ + int i, count; + struct scan_cache_node *cur_node; + struct scan_cache_node *next_node = NULL; + + for (i = 0 ; i < SCAN_HASH_SIZE; i++) { + cur_node = scm_get_next_node(scan_db, + &scan_db->scan_hash_tbl[i], NULL); + count = qdf_list_size(&scan_db->scan_hash_tbl[i]); + if (!count) + continue; + while (cur_node) { + scm_scan_apply_filter_get_entry(psoc, + cur_node->entry, filter, scan_list); + next_node = scm_get_next_node(scan_db, + &scan_db->scan_hash_tbl[i], cur_node); + cur_node = next_node; + } + } +} + +QDF_STATUS scm_purge_scan_results(qdf_list_t *scan_list) +{ + QDF_STATUS status; + struct scan_cache_node *cur_node; + qdf_list_node_t *cur_lst = NULL, *next_lst = NULL; + + if (!scan_list) { + scm_err("scan_result is NULL"); + return QDF_STATUS_E_INVAL; + } + + status = qdf_list_peek_front(scan_list, &cur_lst); + + while (cur_lst) { + qdf_list_peek_next( + scan_list, cur_lst, &next_lst); + cur_node = qdf_container_of(cur_lst, + struct scan_cache_node, node); + status = qdf_list_remove_node(scan_list, + cur_lst); + if (QDF_IS_STATUS_SUCCESS(status)) { + util_scan_free_cache_entry(cur_node->entry); + qdf_mem_free(cur_node); + } + cur_lst = next_lst; + next_lst = NULL; + } + + qdf_list_destroy(scan_list); + qdf_mem_free(scan_list); + + return status; +} + +qdf_list_t *scm_get_scan_result(struct wlan_objmgr_pdev *pdev, + struct scan_filter *filter) +{ + struct wlan_objmgr_psoc *psoc; + struct scan_dbs *scan_db; + qdf_list_t *tmp_list; + + if (!pdev) { + scm_err("pdev is NULL"); + return NULL; + } + + psoc = wlan_pdev_get_psoc(pdev); + if (!psoc) { + scm_err("psoc is NULL"); + return NULL; + } + + scan_db = wlan_pdev_get_scan_db(psoc, pdev); + if (!scan_db) { + scm_err("scan_db is NULL"); + return NULL; + } + + tmp_list = qdf_mem_malloc_atomic(sizeof(*tmp_list)); + if (!tmp_list) { + scm_err("failed tp allocate scan_result"); + return NULL; + } + qdf_list_create(tmp_list, + MAX_SCAN_CACHE_SIZE); + scm_age_out_entries(psoc, scan_db); + scm_get_results(psoc, scan_db, filter, tmp_list); + + return tmp_list; +} + +/** + * scm_iterate_db_and_call_func() - iterate and call the func + * @scan_db: scan db + * @func: func to be called + * @arg: func arg + * + * Return: QDF_STATUS + */ +static QDF_STATUS +scm_iterate_db_and_call_func(struct scan_dbs *scan_db, + scan_iterator_func func, void *arg) +{ + int i; + QDF_STATUS status = QDF_STATUS_SUCCESS; + struct scan_cache_node *cur_node; + struct scan_cache_node *next_node = NULL; + + if (!func) + return QDF_STATUS_E_INVAL; + + for (i = 0 ; i < SCAN_HASH_SIZE; i++) { + cur_node = scm_get_next_node(scan_db, + &scan_db->scan_hash_tbl[i], NULL); + while (cur_node) { + status = func(arg, cur_node->entry); + if (QDF_IS_STATUS_ERROR(status)) { + scm_scan_entry_put_ref(scan_db, + cur_node, true); + return status; + } + next_node = scm_get_next_node(scan_db, + &scan_db->scan_hash_tbl[i], cur_node); + cur_node = next_node; + } + } + + return status; +} + +QDF_STATUS +scm_iterate_scan_db(struct wlan_objmgr_pdev *pdev, + scan_iterator_func func, void *arg) +{ + struct wlan_objmgr_psoc *psoc; + struct scan_dbs *scan_db; + QDF_STATUS status; + + if (!func) { + scm_err("func is NULL"); + return QDF_STATUS_E_INVAL; + } + + if (!pdev) { + scm_err("pdev is NULL"); + return QDF_STATUS_E_INVAL; + } + + psoc = wlan_pdev_get_psoc(pdev); + if (!psoc) { + scm_err("psoc is NULL"); + return QDF_STATUS_E_INVAL; + } + scan_db = wlan_pdev_get_scan_db(psoc, pdev); + if (!scan_db) { + scm_err("scan_db is NULL"); + return QDF_STATUS_E_INVAL; + } + + scm_age_out_entries(psoc, scan_db); + status = scm_iterate_db_and_call_func(scan_db, func, arg); + + return status; +} + +/** + * scm_scan_apply_filter_flush_entry() -flush scan entries depending + * on filter + * @psoc: psoc ptr + * @scan_db: scan db + * @db_node: node on which filters are applied + * @filter: filter to be applied + * + * Return: QDF_STATUS + */ +static QDF_STATUS +scm_scan_apply_filter_flush_entry(struct wlan_objmgr_psoc *psoc, + struct scan_dbs *scan_db, + struct scan_cache_node *db_node, + struct scan_filter *filter) +{ + struct security_info security = {0}; + bool match; + + if (!filter) + match = true; + else + match = scm_filter_match(psoc, db_node->entry, + filter, &security); + + if (!match) + return QDF_STATUS_SUCCESS; + + qdf_spin_lock_bh(&scan_db->scan_db_lock); + scm_scan_entry_del(scan_db, db_node); + qdf_spin_unlock_bh(&scan_db->scan_db_lock); + + return QDF_STATUS_SUCCESS; +} + +/** + * scm_flush_scan_entries() - API to flush scan entries depending on filters + * @psoc: psoc ptr + * @scan_db: scan db + * @filter: filter + * + * Return: void + */ +static void scm_flush_scan_entries(struct wlan_objmgr_psoc *psoc, + struct scan_dbs *scan_db, + struct scan_filter *filter) +{ + int i; + struct scan_cache_node *cur_node; + struct scan_cache_node *next_node = NULL; + + for (i = 0 ; i < SCAN_HASH_SIZE; i++) { + cur_node = scm_get_next_node(scan_db, + &scan_db->scan_hash_tbl[i], NULL); + while (cur_node) { + scm_scan_apply_filter_flush_entry(psoc, scan_db, + cur_node, filter); + next_node = scm_get_next_node(scan_db, + &scan_db->scan_hash_tbl[i], cur_node); + cur_node = next_node; + } + } +} + +QDF_STATUS scm_flush_results(struct wlan_objmgr_pdev *pdev, + struct scan_filter *filter) +{ + struct wlan_objmgr_psoc *psoc; + struct scan_dbs *scan_db; + QDF_STATUS status = QDF_STATUS_SUCCESS; + + if (!pdev) { + scm_err("pdev is NULL"); + return QDF_STATUS_E_INVAL; + } + + psoc = wlan_pdev_get_psoc(pdev); + if (!psoc) { + scm_err("psoc is NULL"); + return QDF_STATUS_E_INVAL; + } + + scan_db = wlan_pdev_get_scan_db(psoc, pdev); + if (!scan_db) { + scm_err("scan_db is NULL"); + return QDF_STATUS_E_INVAL; + } + + scm_flush_scan_entries(psoc, scan_db, filter); + + return status; +} + +/** + * scm_filter_channels() - Remove entries not belonging to channel list + * @scan_db: scan db + * @db_node: node on which filters are applied + * @chan_list: valid channel list + * @num_chan: number of channels + * + * Return: QDF_STATUS + */ +static void scm_filter_channels(struct scan_dbs *scan_db, + struct scan_cache_node *db_node, + uint8_t *chan_list, uint32_t num_chan) +{ + int i; + bool match = false; + + for (i = 0; i < num_chan; i++) { + if (chan_list[i] == + util_scan_entry_channel_num(db_node->entry)) { + match = true; + break; + } + } + + if (!match) { + qdf_spin_lock_bh(&scan_db->scan_db_lock); + scm_scan_entry_del(scan_db, db_node); + qdf_spin_unlock_bh(&scan_db->scan_db_lock); + } +} + +void scm_filter_valid_channel(struct wlan_objmgr_pdev *pdev, + uint8_t *chan_list, uint32_t num_chan) +{ + int i; + struct wlan_objmgr_psoc *psoc; + struct scan_dbs *scan_db; + struct scan_cache_node *cur_node; + struct scan_cache_node *next_node = NULL; + + scm_debug("num_chan = %d", num_chan); + + if (!pdev) { + scm_err("pdev is NULL"); + return; + } + + psoc = wlan_pdev_get_psoc(pdev); + if (!psoc) { + scm_err("psoc is NULL"); + return; + } + + scan_db = wlan_pdev_get_scan_db(psoc, pdev); + if (!scan_db) { + scm_err("scan_db is NULL"); + return; + } + + for (i = 0 ; i < SCAN_HASH_SIZE; i++) { + cur_node = scm_get_next_node(scan_db, + &scan_db->scan_hash_tbl[i], NULL); + while (cur_node) { + scm_filter_channels(scan_db, + cur_node, chan_list, num_chan); + next_node = scm_get_next_node(scan_db, + &scan_db->scan_hash_tbl[i], cur_node); + cur_node = next_node; + } + } +} + +QDF_STATUS scm_scan_register_bcn_cb(struct wlan_objmgr_psoc *psoc, + update_beacon_cb cb, enum scan_cb_type type) +{ + struct wlan_scan_obj *scan_obj; + + scan_obj = wlan_psoc_get_scan_obj(psoc); + if (!scan_obj) { + scm_err("scan obj is NULL"); + return QDF_STATUS_E_INVAL; + } + switch (type) { + case SCAN_CB_TYPE_INFORM_BCN: + scan_obj->cb.inform_beacon = cb; + break; + case SCAN_CB_TYPE_UPDATE_BCN: + scan_obj->cb.update_beacon = cb; + break; + case SCAN_CB_TYPE_UNLINK_BSS: + scan_obj->cb.unlink_bss = cb; + break; + default: + scm_err("invalid cb type %d", type); + } + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS scm_db_init(struct wlan_objmgr_psoc *psoc) +{ + int i, j; + struct scan_dbs *scan_db; + + if (!psoc) { + scm_err("psoc is NULL"); + return QDF_STATUS_E_INVAL; + } + + /* Initialize the scan database per pdev */ + for (i = 0; i < WLAN_UMAC_MAX_PDEVS; i++) { + scan_db = wlan_pdevid_get_scan_db(psoc, i); + if (!scan_db) { + scm_err("scan_db is NULL %d", i); + continue; + } + scan_db->num_entries = 0; + qdf_spinlock_create(&scan_db->scan_db_lock); + for (j = 0; j < SCAN_HASH_SIZE; j++) + qdf_list_create(&scan_db->scan_hash_tbl[j], + MAX_SCAN_CACHE_SIZE); + } + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS scm_db_deinit(struct wlan_objmgr_psoc *psoc) +{ + int i, j; + struct scan_dbs *scan_db; + + if (!psoc) { + scm_err("scan obj is NULL"); + return QDF_STATUS_E_INVAL; + } + + /* Initialize the scan database per pdev */ + for (i = 0; i < WLAN_UMAC_MAX_PDEVS; i++) { + scan_db = wlan_pdevid_get_scan_db(psoc, i); + if (!scan_db) { + scm_err("scan_db is NULL %d", i); + continue; + } + + scm_flush_scan_entries(psoc, scan_db, NULL); + for (j = 0; j < SCAN_HASH_SIZE; j++) + qdf_list_destroy(&scan_db->scan_hash_tbl[j]); + qdf_spinlock_destroy(&scan_db->scan_db_lock); + } + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS scm_update_scan_mlme_info(struct wlan_objmgr_pdev *pdev, + struct scan_cache_entry *entry) +{ + uint8_t hash_idx; + struct scan_dbs *scan_db; + struct scan_cache_node *cur_node; + struct scan_cache_node *next_node = NULL; + struct wlan_objmgr_psoc *psoc; + + psoc = wlan_pdev_get_psoc(pdev); + if (!psoc) { + scm_err("psoc is NULL"); + return QDF_STATUS_E_INVAL; + } + scan_db = wlan_pdev_get_scan_db(psoc, pdev); + if (!scan_db) { + scm_err("scan_db is NULL"); + return QDF_STATUS_E_INVAL; + } + + hash_idx = SCAN_GET_HASH(entry->bssid.bytes); + + cur_node = scm_get_next_node(scan_db, + &scan_db->scan_hash_tbl[hash_idx], NULL); + + while (cur_node) { + if (util_is_scan_entry_match(entry, + cur_node->entry)) { + /* Acquire db lock to prevent simultaneous update */ + qdf_spin_lock_bh(&scan_db->scan_db_lock); + scm_update_mlme_info(entry, cur_node->entry); + qdf_spin_unlock_bh(&scan_db->scan_db_lock); + scm_scan_entry_put_ref(scan_db, + cur_node, true); + return QDF_STATUS_SUCCESS; + } + next_node = scm_get_next_node(scan_db, + &scan_db->scan_hash_tbl[hash_idx], cur_node); + cur_node = next_node; + } + + return QDF_STATUS_E_INVAL; +} + +QDF_STATUS scm_scan_update_mlme_by_bssinfo(struct wlan_objmgr_pdev *pdev, + struct bss_info *bss_info, struct mlme_info *mlme) +{ + uint8_t hash_idx; + struct scan_dbs *scan_db; + struct scan_cache_node *cur_node; + struct scan_cache_node *next_node = NULL; + struct wlan_objmgr_psoc *psoc; + struct scan_cache_entry *entry; + + psoc = wlan_pdev_get_psoc(pdev); + if (!psoc) { + scm_err("psoc is NULL"); + return QDF_STATUS_E_INVAL; + } + scan_db = wlan_pdev_get_scan_db(psoc, pdev); + if (!scan_db) { + scm_err("scan_db is NULL"); + return QDF_STATUS_E_INVAL; + } + + hash_idx = SCAN_GET_HASH(bss_info->bssid.bytes); + cur_node = scm_get_next_node(scan_db, + &scan_db->scan_hash_tbl[hash_idx], NULL); + while (cur_node) { + entry = cur_node->entry; + if (qdf_is_macaddr_equal(&bss_info->bssid, &entry->bssid) && + (util_is_ssid_match(&bss_info->ssid, &entry->ssid)) && + (bss_info->chan == entry->channel.chan_idx)) { + /* Acquire db lock to prevent simultaneous update */ + qdf_spin_lock_bh(&scan_db->scan_db_lock); + qdf_mem_copy(&entry->mlme_info, mlme, + sizeof(struct mlme_info)); + scm_scan_entry_put_ref(scan_db, + cur_node, false); + qdf_spin_unlock_bh(&scan_db->scan_db_lock); + return QDF_STATUS_SUCCESS; + } + next_node = scm_get_next_node(scan_db, + &scan_db->scan_hash_tbl[hash_idx], cur_node); + cur_node = next_node; + } + + return QDF_STATUS_E_INVAL; +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/scan/core/src/wlan_scan_cache_db.h b/drivers/staging/qca-wifi-host-cmn/umac/scan/core/src/wlan_scan_cache_db.h new file mode 100644 index 0000000000000000000000000000000000000000..6955beee39fe566985292418b88786f2e1530d23 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/scan/core/src/wlan_scan_cache_db.h @@ -0,0 +1,210 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/* + * DOC: contains scan cache entry api + */ + +#ifndef _WLAN_SCAN_CACHE_DB_H_ +#define _WLAN_SCAN_CACHE_DB_H_ + +#include +#include +#include +#include +#include + +#define SCAN_HASH_SIZE 64 +#define SCAN_GET_HASH(addr) \ + (((const uint8_t *)(addr))[QDF_MAC_ADDR_SIZE - 1] % SCAN_HASH_SIZE) + +#define SCM_PCL_RSSI_THRESHOLD -75 +#define BEST_CANDIDATE_MAX_BSS_SCORE 10000 + +#define ADJACENT_CHANNEL_RSSI_THRESHOLD -80 + +/** + * struct scan_dbs - scan cache data base definition + * @num_entries: number of scan entries + * @scan_hash_tbl: link list of bssid hashed scan cache entries for a pdev + */ +struct scan_dbs { + uint32_t num_entries; + qdf_spinlock_t scan_db_lock; + qdf_list_t scan_hash_tbl[SCAN_HASH_SIZE]; +}; + +/** + * struct scan_bcn_probe_event - beacon/probe info + * @frm_type: frame type + * @rx_data: mgmt rx data + * @psoc: psoc pointer + * @buf: rx frame + */ +struct scan_bcn_probe_event { + uint32_t frm_type; + struct mgmt_rx_event_params *rx_data; + struct wlan_objmgr_psoc *psoc; + qdf_nbuf_t buf; +}; + +/** + * scm_handle_bcn_probe() - Process beacon and probe rsp + * @bcn: beacon info; + * + * API to handle the beacon/probe resp + * + * Return: QDF status. + */ +QDF_STATUS scm_handle_bcn_probe(struct scheduler_msg *msg); + +/** + * scm_age_out_entries() - Age out entries older than aging time + * @psoc: psoc pointer + * @scan_db: scan database + * + * Return: void. + */ +void scm_age_out_entries(struct wlan_objmgr_psoc *psoc, + struct scan_dbs *scan_db); + +/** + * scm_get_scan_result() - fetches scan result + * @pdev: pdev info + * @filter: Filters + * + * This function fetches scan result + * + * Return: scan list + */ +qdf_list_t *scm_get_scan_result(struct wlan_objmgr_pdev *pdev, + struct scan_filter *filter); + +/** + * scm_purge_scan_results() - purge the scan list + * @scan_result: scan list to be purged + * + * This function purge the temp scan list + * + * Return: QDF_STATUS + */ +QDF_STATUS scm_purge_scan_results(qdf_list_t *scan_result); + +/** + * scm_update_scan_mlme_info() - updates scan entry with mlme data + * @pdev: pdev object + * @scan_entry: source scan entry to read mlme info + * + * This function updates scan db with scan_entry->mlme_info + * + * Return: QDF_STATUS + */ +QDF_STATUS scm_update_scan_mlme_info(struct wlan_objmgr_pdev *pdev, + struct scan_cache_entry *scan_entry); + +/** + * scm_flush_results() - flush scan entries matching the filter + * @pdev: vdev object + * @filter: filter to flush the scan entries + * + * Flush scan entries matching the filter. + * + * Return: QDF status. + */ +QDF_STATUS scm_flush_results(struct wlan_objmgr_pdev *pdev, + struct scan_filter *filter); + +/** + * scm_filter_valid_channel() - The Public API to filter scan result + * based on valid channel list + * @pdev: pdev object + * @chan_list: valid channel list + * @num_chan: number of valid channels + * + * The Public API to to filter scan result + * based on valid channel list. + * + * Return: void. + */ +void scm_filter_valid_channel(struct wlan_objmgr_pdev *pdev, + uint8_t *chan_list, uint32_t num_chan); + +/** + * scm_iterate_scan_db() - function to iterate scan table + * @pdev: pdev object + * @func: iterator function pointer + * @arg: argument to be passed to func() + * + * API, this API iterates scan table and invokes func + * on each scan enetry by passing scan entry and arg. + * + * Return: QDF_STATUS + */ +QDF_STATUS +scm_iterate_scan_db(struct wlan_objmgr_pdev *pdev, + scan_iterator_func func, void *arg); + +/** + * scm_scan_register_bcn_cb() - API to register api to indicate bcn/probe + * as soon as they are received + * @pdev: psoc + * @cb: callback to be registered + * @type: Type of callback to be registered + * + * Return: enum scm_scan_status + */ +QDF_STATUS scm_scan_register_bcn_cb(struct wlan_objmgr_psoc *psoc, + update_beacon_cb cb, enum scan_cb_type type); + +/** + * scm_db_init() - API to init scan db + * @psoc: psoc + * + * Return: QDF_STATUS + */ +QDF_STATUS scm_db_init(struct wlan_objmgr_psoc *psoc); + +/** + * scm_db_deinit() - API to deinit scan db + * @psoc: psoc + * + * Return: QDF_STATUS + */ +QDF_STATUS scm_db_deinit(struct wlan_objmgr_psoc *psoc); + +/** + * scm_validate_scoring_config() - validate score config + * @score_cfg: config to be validated + * + * Return: void + */ +void scm_validate_scoring_config( + struct scoring_config *score_cfg); + +/** + * scm_scan_update_mlme_by_bssinfo() - updates scan entry with mlme data + * @pdev: pdev object + * @bss_info: BSS information + * + * This function updates scan db with scan_entry->mlme_info + * + * Return: QDF_STATUS + */ +QDF_STATUS scm_scan_update_mlme_by_bssinfo(struct wlan_objmgr_pdev *pdev, + struct bss_info *bss_info, struct mlme_info *mlme); +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/scan/core/src/wlan_scan_cache_db_i.h b/drivers/staging/qca-wifi-host-cmn/umac/scan/core/src/wlan_scan_cache_db_i.h new file mode 100644 index 0000000000000000000000000000000000000000..d0c91c4e2e1594bd4cd660a42fa474a2d814f899 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/scan/core/src/wlan_scan_cache_db_i.h @@ -0,0 +1,133 @@ +/* + * Copyright (c) 2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/* + * DOC: contains scan internal api + */ + +#ifndef _WLAN_SCAN_CACHE_DB_I_H_ +#define _WLAN_SCAN_CACHE_DB_I_H_ + +/** + * scm_filter_match() - private API to check if entry is match to filter + * psoc: psoc ptr; + * @db_entry: db entry + * @filter: filter + * @security: negotiated security if match is found + * + * Return: true if entry match filter + */ +bool scm_filter_match(struct wlan_objmgr_psoc *psoc, + struct scan_cache_entry *db_entry, + struct scan_filter *filter, + struct security_info *security); + +/** + * scm_is_better_bss() - Is bss1 better than bss2 + * @params: scan params + * @bss1: Pointer to the first BSS. + * @bss2: Pointer to the second BSS. + * + * This routine helps in determining the preference value + * of a particular BSS in the scan result which is further + * used in the sorting logic of the final candidate AP's. + * + * Return: true, if bss1 is better than bss2 + * false, if bss2 is better than bss1. + */ +bool scm_is_better_bss(struct scan_default_params *params, + struct scan_cache_entry *bss1, + struct scan_cache_entry *bss2); + +/** + * scm_calculate_bss_score() - calculate BSS score used to get + * the preference + * @psoc: psoc ptr; + * @params: scan params + * @entry: scan entry for which score needs to be calculated + * @pcl_chan_weight: weight for pcl channel + * + * Return: scan db for the pdev id + */ +int scm_calculate_bss_score( + struct wlan_objmgr_psoc *psoc, + struct scan_default_params *params, + struct scan_cache_entry *entry, + int pcl_chan_weight); + +/** + * wlan_pdevid_get_scan_db() - private API to get scan db from pdev id + * @psoc: psoc object + * @pdev_id: Pdev_id + * Return: scan db for the pdev id + */ +static inline struct scan_dbs * +wlan_pdevid_get_scan_db(struct wlan_objmgr_psoc *psoc, uint8_t pdev_id) +{ + struct wlan_scan_obj *scan_obj = NULL; + + if (pdev_id > WLAN_UMAC_MAX_PDEVS) { + scm_err("invalid pdev_id %d", pdev_id); + return NULL; + } + scan_obj = wlan_psoc_get_scan_obj(psoc); + + if (!scan_obj) + return NULL; + + return &(scan_obj->scan_db[pdev_id]); +} + +/** + * wlan_pdev_get_scan_db() - private API to get scan db from pdev + * @psoc: psoc object + * @pdev: Pdev + * + * Return: scan db for the pdev + */ +static inline struct scan_dbs * +wlan_pdev_get_scan_db(struct wlan_objmgr_psoc *psoc, + struct wlan_objmgr_pdev *pdev) +{ + uint8_t pdev_id; + + if (!pdev) { + scm_err("pdev is NULL"); + return NULL; + } + pdev_id = wlan_objmgr_pdev_get_pdev_id(pdev); + + return wlan_pdevid_get_scan_db(psoc, pdev_id); +} + +/** + * scm_get_pcl_weight_of_channel() - Get PCL weight if channel is present in pcl + * @channel_id: channel of bss + * @filter: filter + * @pcl_chan_weight: Get PCL weight for corresponding channel + * @weight_list: Weight list for all the pcl channels. + * + * Get pcl_chan_weight if provided channel is present in pcl list + * + * Return: true or false + */ +bool scm_get_pcl_weight_of_channel(int channel_id, + struct scan_filter *filter, + int *pcl_chan_weight, + uint8_t *weight_list); +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/scan/core/src/wlan_scan_filter.c b/drivers/staging/qca-wifi-host-cmn/umac/scan/core/src/wlan_scan_filter.c new file mode 100644 index 0000000000000000000000000000000000000000..a32d2c200df03a6faf999d2b14321ddbc8be044e --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/scan/core/src/wlan_scan_filter.c @@ -0,0 +1,1219 @@ +/* + * Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ +/* + * DOC: contains scan cache filter logic + */ + +#include +#include "wlan_scan_main.h" +#include "wlan_scan_cache_db_i.h" + +/** + * scm_is_open_security() - Check if scan entry support open security + * @filter: scan filter + * @db_entry: db entry + * @security: matched security. + * + * Return: true if open security else false + */ +static bool scm_is_open_security(struct scan_filter *filter, + struct scan_cache_entry *db_entry, + struct security_info *security) +{ + bool match = false; + int i; + + if (db_entry->cap_info.wlan_caps.privacy) + return false; + + /* Check MC cipher and Auth type requested. */ + for (i = 0; i < filter->num_of_mc_enc_type; i++) { + if (WLAN_ENCRYPT_TYPE_NONE == + filter->mc_enc_type[i]) { + security->mc_enc = + filter->mc_enc_type[i]; + match = true; + break; + } + } + if (!match && filter->num_of_mc_enc_type) + return match; + + match = false; + /* Check Auth list. It should contain AuthOpen. */ + for (i = 0; i < filter->num_of_auth; i++) { + if ((WLAN_AUTH_TYPE_OPEN_SYSTEM == + filter->auth_type[i]) || + (WLAN_AUTH_TYPE_AUTOSWITCH == + filter->auth_type[i])) { + security->auth_type = + WLAN_AUTH_TYPE_OPEN_SYSTEM; + match = true; + break; + } + } + + return match; +} + +/** + * scm_is_cipher_match() - Check if cipher match the cipher list + * @cipher_list: cipher list to match + * @num_cipher: number of cipher in cipher list + * @cipher_to_match: cipher to found in cipher list + * + * Return: true if open security else false + */ +static bool scm_is_cipher_match( + uint32_t *cipher_list, + uint16_t num_cipher, uint32_t cipher_to_match) +{ + int i; + bool match = false; + + for (i = 0; i < num_cipher ; i++) { + match = (cipher_list[i] == cipher_to_match); + if (match) + break; + } + + return match; +} + +/** + * scm_get_cipher_suite_type() - get cypher suite type from enc type + * @enc: enc type + * + * Return: cypher suite type + */ +static uint8_t scm_get_cipher_suite_type(enum wlan_enc_type enc) +{ + uint8_t cipher_type; + + switch (enc) { + case WLAN_ENCRYPT_TYPE_WEP40: + case WLAN_ENCRYPT_TYPE_WEP40_STATICKEY: + cipher_type = WLAN_CSE_WEP40; + break; + case WLAN_ENCRYPT_TYPE_WEP104: + case WLAN_ENCRYPT_TYPE_WEP104_STATICKEY: + cipher_type = WLAN_CSE_WEP104; + break; + case WLAN_ENCRYPT_TYPE_TKIP: + cipher_type = WLAN_CSE_TKIP; + break; + case WLAN_ENCRYPT_TYPE_AES: + cipher_type = WLAN_CSE_CCMP; + break; + case WLAN_ENCRYPT_TYPE_AES_GCMP: + cipher_type = WLAN_CSE_GCMP_128; + break; + case WLAN_ENCRYPT_TYPE_AES_GCMP_256: + cipher_type = WLAN_CSE_GCMP_256; + break; + case WLAN_ENCRYPT_TYPE_NONE: + cipher_type = WLAN_CSE_NONE; + break; + case WLAN_ENCRYPT_TYPE_WPI: + cipher_type = WLAN_WAI_CERT_OR_SMS4; + break; + default: + cipher_type = WLAN_CSE_RESERVED; + break; + } + + return cipher_type; +} + +/** + * scm_is_wep_security() - Check if scan entry support WEP security + * @filter: scan filter + * @db_entry: db entry + * @security: matched security. + * + * Return: true if WEP security else false + */ +static bool scm_is_wep_security(struct scan_filter *filter, + struct scan_cache_entry *db_entry, + struct security_info *security) +{ + int i; + QDF_STATUS status; + bool match = false; + enum wlan_auth_type neg_auth = WLAN_AUTH_TYPE_OPEN_SYSTEM; + enum wlan_enc_type neg_mccipher = WLAN_ENCRYPT_TYPE_NONE; + + if (!security) + return false; + + /* If privacy bit is not set, consider no match */ + if (!db_entry->cap_info.wlan_caps.privacy) + return false; + + for (i = 0; i < filter->num_of_mc_enc_type; i++) { + switch (filter->mc_enc_type[i]) { + case WLAN_ENCRYPT_TYPE_WEP40_STATICKEY: + case WLAN_ENCRYPT_TYPE_WEP104_STATICKEY: + case WLAN_ENCRYPT_TYPE_WEP40: + case WLAN_ENCRYPT_TYPE_WEP104: + /* + * Multicast list may contain WEP40/WEP104. + * Check whether it matches UC. + */ + if (security->uc_enc == + filter->mc_enc_type[i]) { + match = true; + neg_mccipher = + filter->mc_enc_type[i]; + } + break; + default: + match = false; + break; + } + if (match) + break; + } + + if (!match) + return match; + + for (i = 0; i < filter->num_of_auth; i++) { + switch (filter->auth_type[i]) { + case WLAN_AUTH_TYPE_OPEN_SYSTEM: + case WLAN_AUTH_TYPE_SHARED_KEY: + case WLAN_AUTH_TYPE_AUTOSWITCH: + match = true; + neg_auth = filter->auth_type[i]; + break; + default: + match = false; + } + if (match) + break; + } + + if (!match) + return match; + + /* + * In case of WPA / WPA2, check whether it supports WEP as well. + * Prepare the encryption type for WPA/WPA2 functions + */ + if (security->uc_enc == WLAN_ENCRYPT_TYPE_WEP40_STATICKEY) + security->uc_enc = WLAN_ENCRYPT_TYPE_WEP40; + else if (security->uc_enc == WLAN_ENCRYPT_TYPE_WEP104) + security->uc_enc = WLAN_ENCRYPT_TYPE_WEP104; + + /* else we can use the encryption type directly */ + if (util_scan_entry_wpa(db_entry)) { + struct wlan_wpa_ie wpa = {0}; + uint8_t cipher_type; + + cipher_type = + scm_get_cipher_suite_type(security->uc_enc); + status = wlan_parse_wpa_ie(util_scan_entry_wpa(db_entry), &wpa); + if (QDF_IS_STATUS_ERROR(status)) { + scm_err("failed to parse WPA IE, status %d", status); + scm_hex_dump(QDF_TRACE_LEVEL_DEBUG, + util_scan_entry_wpa(db_entry), + util_scan_get_wpa_len(db_entry)); + return false; + } + + match = scm_is_cipher_match(&wpa.mc_cipher, + 1, WLAN_WPA_SEL(cipher_type)); + } + if (!match && util_scan_entry_rsn(db_entry)) { + struct wlan_rsn_ie rsn = {0}; + uint8_t cipher_type; + + cipher_type = + scm_get_cipher_suite_type(security->uc_enc); + status = wlan_parse_rsn_ie(util_scan_entry_rsn(db_entry), &rsn); + if (QDF_IS_STATUS_ERROR(status)) { + scm_err("failed to parse RSN IE, status %d", status); + scm_hex_dump(QDF_TRACE_LEVEL_DEBUG, + util_scan_entry_rsn(db_entry), + util_scan_get_rsn_len(db_entry)); + return false; + } + match = scm_is_cipher_match(&rsn.gp_cipher_suite, + 1, WLAN_RSN_SEL(cipher_type)); + } + + + if (match) { + security->auth_type = neg_auth; + security->mc_enc = neg_mccipher; + } + + return match; +} + +/** + * scm_check_pmf_match() - Check PMF security of entry match filter + * @filter: scan filter + * @db_entry: ap entry + * @rsn: rsn IE of the scan entry + * + * Return: true if PMF security match else false + */ +static bool +scm_check_pmf_match(struct scan_filter *filter, + struct scan_cache_entry *db_entry, + struct wlan_rsn_ie *rsn) +{ + enum wlan_pmf_cap ap_pmf_cap = WLAN_PMF_DISABLED; + bool match = true; + + if (rsn->cap & RSN_CAP_MFP_CAPABLE) + ap_pmf_cap = WLAN_PMF_CAPABLE; + if (rsn->cap & RSN_CAP_MFP_REQUIRED) + ap_pmf_cap = WLAN_PMF_REQUIRED; + + if ((filter->pmf_cap == WLAN_PMF_REQUIRED) && + (ap_pmf_cap == WLAN_PMF_DISABLED)) + match = false; + else if ((filter->pmf_cap == WLAN_PMF_DISABLED) && + (ap_pmf_cap == WLAN_PMF_REQUIRED)) + match = false; + + if (!match) + scm_debug("%pM : PMF cap didn't match (filter %d AP %d)", + db_entry->bssid.bytes, filter->pmf_cap, + ap_pmf_cap); + + return match; +} + +/** + * scm_is_rsn_mcast_cipher_match() - match the rsn mcast cipher type with AP's + * mcast cipher + * @rsn: AP's RSNE + * @filter: scan filter + * @neg_mccipher: negotiated mc cipher if matched. + * + * Return: true if mc cipher is negotiated + */ +static bool +scm_is_rsn_mcast_cipher_match(struct wlan_rsn_ie *rsn, + struct scan_filter *filter, enum wlan_enc_type *neg_mccipher) +{ + int i; + bool match; + uint8_t cipher_type; + + if (!rsn || !neg_mccipher || !filter) + return false; + + for (i = 0; i < filter->num_of_mc_enc_type; i++) { + + if (filter->mc_enc_type[i] == WLAN_ENCRYPT_TYPE_ANY) { + /* Try the more secured ones first. */ + /* Check GCMP_256 first */ + cipher_type = WLAN_CSE_GCMP_256; + match = scm_is_cipher_match(&rsn->gp_cipher_suite, 1, + WLAN_RSN_SEL(cipher_type)); + if (match) { + *neg_mccipher = WLAN_ENCRYPT_TYPE_AES_GCMP_256; + return true; + } + /* Check GCMP */ + cipher_type = WLAN_CSE_GCMP_128; + match = scm_is_cipher_match(&rsn->gp_cipher_suite, 1, + WLAN_RSN_SEL(cipher_type)); + if (match) { + *neg_mccipher = WLAN_ENCRYPT_TYPE_AES_GCMP; + return true; + } + /* Check AES */ + cipher_type = WLAN_CSE_CCMP; + match = scm_is_cipher_match(&rsn->gp_cipher_suite, 1, + WLAN_RSN_SEL(cipher_type)); + if (match) { + *neg_mccipher = WLAN_ENCRYPT_TYPE_AES; + return true; + } + /* Check TKIP */ + cipher_type = WLAN_CSE_TKIP; + match = scm_is_cipher_match(&rsn->gp_cipher_suite, 1, + WLAN_RSN_SEL(cipher_type)); + if (match) { + *neg_mccipher = WLAN_ENCRYPT_TYPE_TKIP; + return true; + } + } else { + cipher_type = + scm_get_cipher_suite_type(filter->mc_enc_type[i]); + match = scm_is_cipher_match(&rsn->gp_cipher_suite, 1, + WLAN_RSN_SEL(cipher_type)); + if (match) { + *neg_mccipher = filter->mc_enc_type[i]; + return true; + } + } + } + + return false; +} + +/** + * scm_is_rsn_security() - Check if scan entry support RSN security + * @filter: scan filter + * @db_entry: db entry + * @security: matched security. + * + * Return: true if RSN security else false + */ +static bool scm_is_rsn_security(struct scan_filter *filter, + struct scan_cache_entry *db_entry, + struct security_info *security) +{ + int i; + uint8_t cipher_type; + bool match_any_akm, match = false; + enum wlan_auth_type neg_auth = WLAN_NUM_OF_SUPPORT_AUTH_TYPE; + enum wlan_enc_type neg_mccipher = WLAN_ENCRYPT_TYPE_NONE; + struct wlan_rsn_ie rsn = {0}; + QDF_STATUS status; + + if (!security) + return false; + if (!util_scan_entry_rsn(db_entry)) { + scm_debug("%pM : doesn't have RSN IE", db_entry->bssid.bytes); + return false; + } + status = wlan_parse_rsn_ie(util_scan_entry_rsn(db_entry), &rsn); + if (QDF_IS_STATUS_ERROR(status)) { + scm_err("failed to parse RSN IE, status %d", status); + scm_hex_dump(QDF_TRACE_LEVEL_DEBUG, + util_scan_entry_rsn(db_entry), + util_scan_get_rsn_len(db_entry)); + return false; + } + + cipher_type = + scm_get_cipher_suite_type(security->uc_enc); + match = scm_is_cipher_match(rsn.pwise_cipher_suites, + rsn.pwise_cipher_count, WLAN_RSN_SEL(cipher_type)); + if (!match) { + scm_debug("%pM : pairwise cipher didn't match", + db_entry->bssid.bytes); + return false; + } + + match = scm_is_rsn_mcast_cipher_match(&rsn, filter, &neg_mccipher); + if (!match) { + scm_debug("%pM : mcast cipher didn't match", + db_entry->bssid.bytes); + return false; + } + + /* Initializing with false as it has true value already */ + match = false; + for (i = 0; i < filter->num_of_auth; i++) { + + if (filter->auth_type[i] == WLAN_AUTH_TYPE_ANY) + match_any_akm = true; + else + match_any_akm = false; + /* + * Ciphers are supported, Match authentication algorithm and + * pick first matching authtype. + */ + if (scm_is_cipher_match(rsn.akm_suites, + rsn.akm_suite_count, + WLAN_RSN_SEL(WLAN_AKM_FILS_FT_SHA384))) { + if (match_any_akm || (WLAN_AUTH_TYPE_FT_FILS_SHA384 == + filter->auth_type[i])) { + neg_auth = WLAN_AUTH_TYPE_FT_FILS_SHA384; + match = true; + break; + } + } + if (scm_is_cipher_match(rsn.akm_suites, + rsn.akm_suite_count, + WLAN_RSN_SEL(WLAN_AKM_FILS_FT_SHA256))) { + if (match_any_akm || (WLAN_AUTH_TYPE_FT_FILS_SHA256 == + filter->auth_type[i])) { + neg_auth = WLAN_AUTH_TYPE_FT_FILS_SHA256; + match = true; + break; + } + } + if (scm_is_cipher_match(rsn.akm_suites, + rsn.akm_suite_count, + WLAN_RSN_SEL(WLAN_AKM_FILS_SHA384))) { + if (match_any_akm || (WLAN_AUTH_TYPE_FILS_SHA384 == + filter->auth_type[i])) { + neg_auth = WLAN_AUTH_TYPE_FILS_SHA384; + match = true; + break; + } + } + if (scm_is_cipher_match(rsn.akm_suites, + rsn.akm_suite_count, + WLAN_RSN_SEL(WLAN_AKM_FILS_SHA256))) { + if (match_any_akm || (WLAN_AUTH_TYPE_FILS_SHA256 == + filter->auth_type[i])) { + neg_auth = WLAN_AUTH_TYPE_FILS_SHA256; + match = true; + break; + } + } + + if (scm_is_cipher_match(rsn.akm_suites, + rsn.akm_suite_count, + WLAN_RSN_SEL(WLAN_AKM_SAE))) { + if (match_any_akm || (WLAN_AUTH_TYPE_SAE == + filter->auth_type[i])) { + neg_auth = WLAN_AUTH_TYPE_SAE; + match = true; + break; + } + } + + if (scm_is_cipher_match(rsn.akm_suites, + rsn.akm_suite_count, WLAN_RSN_DPP_AKM)) { + if (match_any_akm || (WLAN_AUTH_TYPE_DPP_RSN == + filter->auth_type[i])) { + neg_auth = WLAN_AUTH_TYPE_DPP_RSN; + match = true; + break; + } + } + if (scm_is_cipher_match(rsn.akm_suites, + rsn.akm_suite_count, + WLAN_RSN_OSEN_AKM)) { + if (match_any_akm || + WLAN_AUTH_TYPE_OSEN == filter->auth_type[i]) { + neg_auth = WLAN_AUTH_TYPE_OSEN; + match = true; + break; + } + } + if (scm_is_cipher_match(rsn.akm_suites, + rsn.akm_suite_count, + WLAN_RSN_SEL(WLAN_AKM_OWE))) { + if (match_any_akm || (WLAN_AUTH_TYPE_OWE == + filter->auth_type[i])) { + neg_auth = WLAN_AUTH_TYPE_OWE; + match = true; + break; + } + } + if (scm_is_cipher_match(rsn.akm_suites, + rsn.akm_suite_count, + WLAN_RSN_SEL(WLAN_AKM_FT_IEEE8021X))) { + if (match_any_akm || (WLAN_AUTH_TYPE_FT_RSN == + filter->auth_type[i])) { + neg_auth = WLAN_AUTH_TYPE_FT_RSN; + match = true; + break; + } + } + + if (scm_is_cipher_match(rsn.akm_suites, + rsn.akm_suite_count, + WLAN_RSN_SEL(WLAN_AKM_FT_PSK))) { + if (match_any_akm || (WLAN_AUTH_TYPE_FT_RSN_PSK == + filter->auth_type[i])) { + neg_auth = WLAN_AUTH_TYPE_FT_RSN_PSK; + match = true; + break; + } + } + /* ESE only supports 802.1X. No PSK. */ + if (scm_is_cipher_match(rsn.akm_suites, + rsn.akm_suite_count, + WLAN_RSN_CCKM_AKM)) { + if (match_any_akm || (WLAN_AUTH_TYPE_CCKM_RSN == + filter->auth_type[i])) { + neg_auth = WLAN_AUTH_TYPE_CCKM_RSN; + match = true; + break; + } + } + /* RSN */ + if (scm_is_cipher_match(rsn.akm_suites, + rsn.akm_suite_count, + WLAN_RSN_SEL(WLAN_AKM_IEEE8021X))) { + if (match_any_akm || (WLAN_AUTH_TYPE_RSN == + filter->auth_type[i])) { + neg_auth = WLAN_AUTH_TYPE_RSN; + match = true; + break; + } + } + /* TKIP */ + if (scm_is_cipher_match(rsn.akm_suites, + rsn.akm_suite_count, + WLAN_RSN_SEL(WLAN_AKM_PSK))) { + if (match_any_akm || (WLAN_AUTH_TYPE_RSN_PSK == + filter->auth_type[i])) { + neg_auth = WLAN_AUTH_TYPE_RSN_PSK; + match = true; + break; + } + } + /* SHA256 */ + if (scm_is_cipher_match(rsn.akm_suites, + rsn.akm_suite_count, + WLAN_RSN_SEL(WLAN_AKM_SHA256_PSK))) { + if (match_any_akm || (WLAN_AUTH_TYPE_RSN_PSK_SHA256 == + filter->auth_type[i])) { + neg_auth = + WLAN_AUTH_TYPE_RSN_PSK_SHA256; + match = true; + break; + } + } + /* 8021X SHA256 */ + if (scm_is_cipher_match(rsn.akm_suites, + rsn.akm_suite_count, + WLAN_RSN_SEL(WLAN_AKM_SHA256_IEEE8021X))) { + if (match_any_akm || (WLAN_AUTH_TYPE_RSN_8021X_SHA256 == + filter->auth_type[i])) { + neg_auth = + WLAN_AUTH_TYPE_RSN_8021X_SHA256; + match = true; + break; + } + } + if (scm_is_cipher_match(rsn.akm_suites, + rsn.akm_suite_count, + WLAN_RSN_SEL(WLAN_AKM_SUITEB_EAP_SHA256))) { + if (match_any_akm || + (WLAN_AUTH_TYPE_SUITEB_EAP_SHA256 == + filter->auth_type[i])) { + neg_auth = WLAN_AUTH_TYPE_SUITEB_EAP_SHA256; + match = true; + break; + } + } + if (scm_is_cipher_match(rsn.akm_suites, + rsn.akm_suite_count, + WLAN_RSN_SEL(WLAN_AKM_SUITEB_EAP_SHA384))) { + if (match_any_akm || + (WLAN_AUTH_TYPE_SUITEB_EAP_SHA384 == + filter->auth_type[i])) { + neg_auth = WLAN_AUTH_TYPE_SUITEB_EAP_SHA384; + match = true; + break; + } + } + } + + if (!match) { + scm_debug("%pM : akm suites didn't match", + db_entry->bssid.bytes); + return false; + } + + if (!filter->ignore_pmf_cap) + match = scm_check_pmf_match(filter, db_entry, &rsn); + + if (match) { + security->auth_type = neg_auth; + security->mc_enc = neg_mccipher; + } + + return match; +} + +/** + * scm_is_wpa_mcast_cipher_match() - match the wpa mcast cipher type with AP's + * mcast cipher + * @wpa: AP's WPA IE + * @filter: scan filter + * @neg_mccipher: negotiated mc cipher if matched. + * + * Return: true if mc cipher is negotiated + */ +static bool +scm_is_wpa_mcast_cipher_match(struct wlan_wpa_ie *wpa, + struct scan_filter *filter, enum wlan_enc_type *neg_mccipher) +{ + int i; + bool match; + uint8_t cipher_type; + + if (!wpa || !neg_mccipher || !filter) + return false; + + for (i = 0; i < filter->num_of_mc_enc_type; i++) { + + if (filter->mc_enc_type[i] == WLAN_ENCRYPT_TYPE_ANY) { + /* Try the more secured ones first. */ + + /* Check AES */ + cipher_type = WLAN_CSE_CCMP; + match = scm_is_cipher_match(&wpa->mc_cipher, 1, + WLAN_WPA_SEL(cipher_type)); + if (match) { + *neg_mccipher = WLAN_ENCRYPT_TYPE_AES; + return true; + } + /* Check TKIP */ + cipher_type = WLAN_CSE_TKIP; + match = scm_is_cipher_match(&wpa->mc_cipher, 1, + WLAN_WPA_SEL(cipher_type)); + if (match) { + *neg_mccipher = WLAN_ENCRYPT_TYPE_TKIP; + return true; + } + } else { + cipher_type = + scm_get_cipher_suite_type(filter->mc_enc_type[i]); + match = scm_is_cipher_match(&wpa->mc_cipher, 1, + WLAN_WPA_SEL(cipher_type)); + if (match) { + *neg_mccipher = filter->mc_enc_type[i]; + return true; + } + } + } + + return false; +} + +/** + * scm_is_wpa_security() - Check if scan entry support WPA security + * @filter: scan filter + * @db_entry: db entry + * @security: matched security. + * + * Return: true if WPA security else false + */ +static bool scm_is_wpa_security(struct scan_filter *filter, + struct scan_cache_entry *db_entry, + struct security_info *security) +{ + int i; + QDF_STATUS status; + uint8_t cipher_type; + bool match_any_akm, match = false; + enum wlan_auth_type neg_auth = WLAN_NUM_OF_SUPPORT_AUTH_TYPE; + enum wlan_enc_type neg_mccipher = WLAN_ENCRYPT_TYPE_NONE; + struct wlan_wpa_ie wpa = {0}; + + if (!security) + return false; + if (!util_scan_entry_wpa(db_entry)) { + scm_debug("%pM : AP doesn't have WPA IE", + db_entry->bssid.bytes); + return false; + } + + status = wlan_parse_wpa_ie(util_scan_entry_wpa(db_entry), &wpa); + if (QDF_IS_STATUS_ERROR(status)) { + scm_err("failed to parse WPA IE, status %d", status); + scm_hex_dump(QDF_TRACE_LEVEL_DEBUG, + util_scan_entry_wpa(db_entry), + util_scan_get_wpa_len(db_entry)); + return false; + } + + cipher_type = + scm_get_cipher_suite_type(security->uc_enc); + match = scm_is_cipher_match(wpa.uc_ciphers, + wpa.uc_cipher_count, WLAN_WPA_SEL(cipher_type)); + if (!match) { + scm_debug("%pM : unicase cipher didn't match", + db_entry->bssid.bytes); + return false; + } + + match = scm_is_wpa_mcast_cipher_match(&wpa, filter, &neg_mccipher); + if (!match) { + scm_debug("%pM : mcast cipher didn't match", + db_entry->bssid.bytes); + return false; + } + + /* Initializing with false as it has true value already */ + match = false; + for (i = 0; i < filter->num_of_auth; i++) { + + if (filter->auth_type[i] == WLAN_AUTH_TYPE_ANY) + match_any_akm = true; + else + match_any_akm = false; + /* + * Ciphers are supported, Match authentication algorithm and + * pick first matching authtype. + */ + /**/ + if (scm_is_cipher_match(wpa.auth_suites, + wpa.auth_suite_count, + WLAN_WPA_SEL(WLAN_AKM_IEEE8021X))) { + if (match_any_akm || (WLAN_AUTH_TYPE_WPA == + filter->auth_type[i])) { + neg_auth = WLAN_AUTH_TYPE_WPA; + match = true; + break; + } + } + if (scm_is_cipher_match(wpa.auth_suites, + wpa.auth_suite_count, + WLAN_WPA_SEL(WLAN_AKM_PSK))) { + if (match_any_akm || (WLAN_AUTH_TYPE_WPA_PSK == + filter->auth_type[i])) { + neg_auth = WLAN_AUTH_TYPE_WPA_PSK; + match = true; + break; + } + } + if (scm_is_cipher_match(wpa.auth_suites, + wpa.auth_suite_count, + WLAN_WPA_CCKM_AKM)) { + if (match_any_akm || (WLAN_AUTH_TYPE_CCKM_WPA == + filter->auth_type[i])) { + neg_auth = WLAN_AUTH_TYPE_CCKM_WPA; + match = true; + break; + } + } + } + + if (!match) + scm_debug("%pM : akm didn't match", db_entry->bssid.bytes); + + if (match) { + security->auth_type = neg_auth; + security->mc_enc = neg_mccipher; + } + + return match; +} + +/** + * scm_is_wapi_security() - Check if scan entry support WAPI security + * @filter: scan filter + * @db_entry: db entry + * @security: matched security. + * + * Return: true if WAPI security else false + */ +static bool scm_is_wapi_security(struct scan_filter *filter, + struct scan_cache_entry *db_entry, + struct security_info *security) +{ + int i; + uint8_t cipher_type; + bool match = false; + enum wlan_auth_type neg_auth = WLAN_NUM_OF_SUPPORT_AUTH_TYPE; + enum wlan_enc_type neg_mccipher = WLAN_ENCRYPT_TYPE_NONE; + struct wlan_wapi_ie wapi = {0}; + + if (!security) + return false; + if (!util_scan_entry_wapi(db_entry)) { + scm_debug("%pM : mcast cipher didn't match", + db_entry->bssid.bytes); + return false; + } + + wlan_parse_wapi_ie( + util_scan_entry_wapi(db_entry), &wapi); + + cipher_type = + scm_get_cipher_suite_type(security->uc_enc); + match = scm_is_cipher_match(wapi.uc_cipher_suites, + wapi.uc_cipher_count, WLAN_WAPI_SEL(cipher_type)); + if (!match) { + scm_debug("%pM : unicast cipher didn't match", + db_entry->bssid.bytes); + return false; + } + + for (i = 0; i < filter->num_of_mc_enc_type; i++) { + cipher_type = + scm_get_cipher_suite_type( + filter->mc_enc_type[i]); + match = scm_is_cipher_match(&wapi.mc_cipher_suite, + 1, WLAN_WAPI_SEL(cipher_type)); + if (match) + break; + } + if (!match) { + scm_debug("%pM : mcast cipher didn't match", + db_entry->bssid.bytes); + return false; + } + neg_mccipher = filter->mc_enc_type[i]; + + if (scm_is_cipher_match(wapi.akm_suites, + wapi.akm_suite_count, + WLAN_WAPI_SEL(WLAN_WAI_CERT_OR_SMS4))) { + neg_auth = + WLAN_AUTH_TYPE_WAPI_WAI_CERTIFICATE; + } else if (scm_is_cipher_match(wapi.akm_suites, + wapi.akm_suite_count, WLAN_WAPI_SEL(WLAN_WAI_PSK))) { + neg_auth = WLAN_AUTH_TYPE_WAPI_WAI_PSK; + } else { + scm_debug("%pM : akm is not supported", + db_entry->bssid.bytes); + return false; + } + + match = false; + for (i = 0; i < filter->num_of_auth; i++) { + if (filter->auth_type[i] == neg_auth) { + match = true; + break; + } + } + + if (!match) + scm_debug("%pM : akm suite didn't match", + db_entry->bssid.bytes); + if (match) { + security->auth_type = neg_auth; + security->mc_enc = neg_mccipher; + } + + return match; +} + +/** + * scm_is_def_security() - Check if any security in filter match + * @filter: scan filter + * @db_entry: db entry + * @security: matched security. + * + * Return: true if any security else false + */ +static bool scm_is_def_security(struct scan_filter *filter, + struct scan_cache_entry *db_entry, + struct security_info *security) +{ + + /* It is allowed to match anything. Try the more secured ones first. */ + /* Check GCMP_256 first */ + security->uc_enc = WLAN_ENCRYPT_TYPE_AES_GCMP_256; + if (scm_is_rsn_security(filter, db_entry, security)) + return true; + + /* Check GCMP */ + security->uc_enc = WLAN_ENCRYPT_TYPE_AES_GCMP; + if (scm_is_rsn_security(filter, db_entry, security)) + return true; + + /* Check AES */ + security->uc_enc = WLAN_ENCRYPT_TYPE_AES; + if (scm_is_rsn_security(filter, db_entry, security)) + return true; + if (scm_is_wpa_security(filter, db_entry, security)) + return true; + + /* Check TKIP */ + security->uc_enc = WLAN_ENCRYPT_TYPE_TKIP; + if (scm_is_rsn_security(filter, db_entry, security)) + return true; + if (scm_is_wpa_security(filter, db_entry, security)) + return true; + + /* Check AES */ + security->uc_enc = WLAN_ENCRYPT_TYPE_AES; + if (scm_is_wpa_security(filter, db_entry, security)) + return true; + + /* Check TKIP */ + security->uc_enc = WLAN_ENCRYPT_TYPE_TKIP; + if (scm_is_wpa_security(filter, db_entry, security)) + return true; + + /* Check WAPI */ + security->uc_enc = WLAN_ENCRYPT_TYPE_WPI; + if (scm_is_wapi_security(filter, db_entry, security)) + return true; + + security->uc_enc = WLAN_ENCRYPT_TYPE_WEP104; + if (scm_is_wep_security(filter, db_entry, security)) + return true; + security->uc_enc = WLAN_ENCRYPT_TYPE_WEP40; + if (scm_is_wep_security(filter, db_entry, security)) + return true; + security->uc_enc = WLAN_ENCRYPT_TYPE_WEP104_STATICKEY; + if (scm_is_wep_security(filter, db_entry, security)) + return true; + security->uc_enc = WLAN_ENCRYPT_TYPE_WEP40_STATICKEY; + if (scm_is_wep_security(filter, db_entry, security)) + return true; + + /* It must be open and no enc */ + if (db_entry->cap_info.wlan_caps.privacy) + return false; + + security->auth_type = WLAN_AUTH_TYPE_OPEN_SYSTEM; + security->mc_enc = WLAN_ENCRYPT_TYPE_NONE; + security->uc_enc = WLAN_ENCRYPT_TYPE_NONE; + + return true; +} + +/** + * scm_is_fils_config_match() - Check if FILS config matches + * @filter: scan filter + * @db_entry: db entry + * + * Return: true if FILS config matches else false + */ +static bool scm_is_fils_config_match(struct scan_filter *filter, + struct scan_cache_entry *db_entry) +{ + int i; + struct fils_indication_ie *indication_ie; + uint8_t *data; + + if (!filter->fils_scan_filter.realm_check) + return true; + + if (!db_entry->ie_list.fils_indication) + return false; + + + indication_ie = + (struct fils_indication_ie *) db_entry->ie_list.fils_indication; + + data = indication_ie->variable_data; + if (indication_ie->is_cache_id_present) + data += CACHE_IDENTIFIER_LEN; + + if (indication_ie->is_hessid_present) + data += HESSID_LEN; + + for (i = 1; i <= indication_ie->realm_identifiers_cnt; i++) { + if (!qdf_mem_cmp(filter->fils_scan_filter.fils_realm, + data, REAM_HASH_LEN)) + return true; + /* Max realm count reached */ + if (indication_ie->realm_identifiers_cnt == i) + break; + else + data = data + REAM_HASH_LEN; + } + + return false; +} + +/** + * scm_is_security_match() - Check if security in filter match + * @filter: scan filter + * @db_entry: db entry + * @security: matched security. + * + * Return: true if security match else false + */ +static bool scm_is_security_match(struct scan_filter *filter, + struct scan_cache_entry *db_entry, + struct security_info *security) +{ + int i; + bool match = false; + struct security_info local_security = {0}; + + if (!filter->num_of_enc_type) + return true; + + for (i = 0; (i < filter->num_of_enc_type) && + !match; i++) { + + local_security.uc_enc = + filter->enc_type[i]; + + switch (filter->enc_type[i]) { + case WLAN_ENCRYPT_TYPE_NONE: + match = scm_is_open_security(filter, + db_entry, &local_security); + break; + case WLAN_ENCRYPT_TYPE_WEP40_STATICKEY: + case WLAN_ENCRYPT_TYPE_WEP104_STATICKEY: + case WLAN_ENCRYPT_TYPE_WEP40: + case WLAN_ENCRYPT_TYPE_WEP104: + match = scm_is_wep_security(filter, + db_entry, &local_security); + break; + case WLAN_ENCRYPT_TYPE_TKIP: + case WLAN_ENCRYPT_TYPE_AES: + case WLAN_ENCRYPT_TYPE_AES_GCMP: + case WLAN_ENCRYPT_TYPE_AES_GCMP_256: + /* First check if there is a RSN match */ + match = scm_is_rsn_security(filter, + db_entry, &local_security); + /* If not RSN, then check WPA match */ + if (!match) + match = scm_is_wpa_security(filter, + db_entry, &local_security); + break; + case WLAN_ENCRYPT_TYPE_WPI:/* WAPI */ + match = scm_is_wapi_security(filter, + db_entry, &local_security); + break; + case WLAN_ENCRYPT_TYPE_ANY: + default: + match = scm_is_def_security(filter, + db_entry, &local_security); + break; + } + } + + if (match && security) + qdf_mem_copy(security, + &local_security, sizeof(*security)); + + return match; +} + +bool scm_filter_match(struct wlan_objmgr_psoc *psoc, + struct scan_cache_entry *db_entry, + struct scan_filter *filter, + struct security_info *security) +{ + int i; + bool match = false; + struct roam_filter_params *roam_params; + struct scan_default_params *def_param; + struct wlan_country_ie *cc_ie; + + def_param = wlan_scan_psoc_get_def_params(psoc); + if (!def_param) + return false; + + roam_params = &def_param->roam_params; + + if (filter->p2p_results && !db_entry->is_p2p) + return false; + + for (i = 0; i < roam_params->num_bssid_avoid_list; i++) { + if (qdf_is_macaddr_equal(&roam_params->bssid_avoid_list[i], + &db_entry->bssid)) { + scm_debug("%pM : Ignore as its blacklisted", + db_entry->bssid.bytes); + return false; + } + } + + match = false; + if (db_entry->ssid.length) { + for (i = 0; i < filter->num_of_ssid; i++) { + if (util_is_ssid_match(&filter->ssid_list[i], + &db_entry->ssid)) { + match = true; + break; + } + } + } + /* + * In OWE transition mode, ssid is hidden. And supplicant does not issue + * scan with specific ssid prior to connect as in other hidden ssid + * cases. Add explicit check to allow OWE when ssid is hidden. + */ + if (!match && util_scan_entry_is_hidden_ap(db_entry)) { + for (i = 0; i < filter->num_of_auth; i++) { + if (filter->auth_type[i] == WLAN_AUTH_TYPE_OWE) { + match = true; + break; + } + } + } + if (!match && filter->num_of_ssid) + return false; + + match = false; + /* TO do Fill p2p MAC*/ + for (i = 0; i < filter->num_of_bssid; i++) { + if (util_is_bssid_match(&filter->bssid_list[i], + &db_entry->bssid)) { + match = true; + break; + } + /* TODO match p2p mac */ + } + if (!match && filter->num_of_bssid) + return false; + + match = false; + for (i = 0; i < filter->num_of_channels; i++) { + if (!filter->channel_list[i] || ( + (filter->channel_list[i] == + db_entry->channel.chan_idx))) { + match = true; + break; + } + } + + if (!match && filter->num_of_channels) + return false; + + if (filter->rrm_measurement_filter) + return true; + + /* TODO match phyMode */ + + if (!filter->ignore_auth_enc_type && + !scm_is_security_match(filter, + db_entry, security)) { + scm_debug("%pM : Ignore as security profile didn't match", + db_entry->bssid.bytes); + return false; + } + + if (!util_is_bss_type_match(filter->bss_type, + db_entry->cap_info)) { + scm_debug("%pM : Ignore as bss type didn't match cap_info %x bss_type %d", + db_entry->bssid.bytes, db_entry->cap_info.value, + filter->bss_type); + return false; + } + + /* TODO match rate set */ + + if (filter->only_wmm_ap && + !db_entry->ie_list.wmeinfo && + !db_entry->ie_list.wmeparam) { + scm_debug("%pM : Ignore as required wmeinfo and wme params not present", + db_entry->bssid.bytes); + return false; + } + + /* Match realm */ + if (!scm_is_fils_config_match(filter, db_entry)) { + scm_debug("%pM :Ignore as fils config didn't match", + db_entry->bssid.bytes); + return false; + } + + cc_ie = util_scan_entry_country(db_entry); + if (!util_country_code_match(filter->country, cc_ie)) { + scm_debug("%pM : Ignore as country %.*s didn't match", + db_entry->bssid.bytes, 2, filter->country); + return false; + } + + if (!util_mdie_match(filter->mobility_domain, + (struct rsn_mdie *)db_entry->ie_list.mdie)) { + scm_debug("%pM : Ignore as mdie didn't match", + db_entry->bssid.bytes); + return false; + } + + return true; +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/scan/core/src/wlan_scan_main.c b/drivers/staging/qca-wifi-host-cmn/umac/scan/core/src/wlan_scan_main.c new file mode 100644 index 0000000000000000000000000000000000000000..90de6c8aa72d12ad4f8f6d1ffd4583d0c453006b --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/scan/core/src/wlan_scan_main.c @@ -0,0 +1,124 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/* + * DOC: contains core scan function definitions + */ +#include +#include +#include "wlan_scan_main.h" + +QDF_STATUS wlan_scan_psoc_created_notification(struct wlan_objmgr_psoc *psoc, + void *arg_list) +{ + struct wlan_scan_obj *scan_obj; + QDF_STATUS status = QDF_STATUS_SUCCESS; + + scan_obj = qdf_mem_malloc_atomic(sizeof(struct wlan_scan_obj)); + if (scan_obj == NULL) { + scm_err("Failed to allocate memory"); + return QDF_STATUS_E_NOMEM; + } + + /* Attach scan private date to psoc */ + status = wlan_objmgr_psoc_component_obj_attach(psoc, + WLAN_UMAC_COMP_SCAN, (void *)scan_obj, + QDF_STATUS_SUCCESS); + if (QDF_IS_STATUS_ERROR(status)) + scm_err("Failed to attach psoc scan component"); + else + scm_debug("Scan object attach to psoc successful"); + + return status; +} + +QDF_STATUS wlan_scan_psoc_destroyed_notification( + struct wlan_objmgr_psoc *psoc, + void *arg_list) +{ + void *scan_obj = NULL; + QDF_STATUS status = QDF_STATUS_SUCCESS; + + scan_obj = wlan_objmgr_psoc_get_comp_private_obj(psoc, + WLAN_UMAC_COMP_SCAN); + + if (!scan_obj) { + scm_err("Failed to detach scan in psoc ctx"); + return QDF_STATUS_E_FAILURE; + } + + status = wlan_objmgr_psoc_component_obj_detach(psoc, + WLAN_UMAC_COMP_SCAN, scan_obj); + if (QDF_IS_STATUS_ERROR(status)) + scm_err("Failed to detach psoc scan component"); + + qdf_mem_free(scan_obj); + + return status; +} + +QDF_STATUS wlan_scan_vdev_created_notification(struct wlan_objmgr_vdev *vdev, + void *arg_list) +{ + struct scan_vdev_obj *scan_vdev_obj; + QDF_STATUS status = QDF_STATUS_SUCCESS; + + scan_vdev_obj = qdf_mem_malloc_atomic(sizeof(struct scan_vdev_obj)); + if (scan_vdev_obj == NULL) { + scm_err("Failed to allocate memory"); + return QDF_STATUS_E_NOMEM; + } + + /* Attach scan private date to vdev */ + status = wlan_objmgr_vdev_component_obj_attach(vdev, + WLAN_UMAC_COMP_SCAN, (void *)scan_vdev_obj, + QDF_STATUS_SUCCESS); + if (QDF_IS_STATUS_ERROR(status)) { + scm_err("Failed to attach vdev scan component"); + qdf_mem_free(scan_vdev_obj); + } else { + scm_debug("vdev scan object attach successful"); + } + + return status; +} + +QDF_STATUS wlan_scan_vdev_destroyed_notification( + struct wlan_objmgr_vdev *vdev, + void *arg_list) +{ + void *scan_vdev_obj = NULL; + QDF_STATUS status = QDF_STATUS_SUCCESS; + + scan_vdev_obj = wlan_objmgr_vdev_get_comp_private_obj(vdev, + WLAN_UMAC_COMP_SCAN); + + if (!scan_vdev_obj) { + scm_err("Failed to detach scan in vdev ctx"); + return QDF_STATUS_E_FAILURE; + } + + status = wlan_objmgr_vdev_component_obj_detach(vdev, + WLAN_UMAC_COMP_SCAN, scan_vdev_obj); + if (QDF_IS_STATUS_ERROR(status)) + scm_err("Failed to detach vdev scan component"); + + qdf_mem_free(scan_vdev_obj); + + return status; +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/scan/core/src/wlan_scan_main.h b/drivers/staging/qca-wifi-host-cmn/umac/scan/core/src/wlan_scan_main.h new file mode 100644 index 0000000000000000000000000000000000000000..a7c2b05210946c66affe0ac011231e028ce4e380 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/scan/core/src/wlan_scan_main.h @@ -0,0 +1,734 @@ +/* + * Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/* + * DOC: contains scan init/deinit public api + */ + +#ifndef _WLAN_SCAN_MAIN_API_H_ +#define _WLAN_SCAN_MAIN_API_H_ + +#include +#include +#include +#include +#include +#include "wlan_scan_cache_db.h" +#include "wlan_scan_11d.h" + +#define scm_log(level, args...) \ + QDF_TRACE(QDF_MODULE_ID_SCAN, level, ## args) +#define scm_logfl(level, format, args...) \ + scm_log(level, FL(format), ## args) +#define scm_alert(format, args...) \ + scm_logfl(QDF_TRACE_LEVEL_FATAL, format, ## args) +#define scm_err(format, args...) \ + scm_logfl(QDF_TRACE_LEVEL_ERROR, format, ## args) +#define scm_warn(format, args...) \ + scm_logfl(QDF_TRACE_LEVEL_WARN, format, ## args) +#define scm_notice(format, args...) \ + scm_logfl(QDF_TRACE_LEVEL_INFO, format, ## args) +#define scm_info(format, args...) \ + scm_logfl(QDF_TRACE_LEVEL_INFO_HIGH, format, ## args) +#define scm_debug(format, args...) \ + scm_logfl(QDF_TRACE_LEVEL_DEBUG, format, ## args) +/* Rate Limited Logs */ +#define scm_alert_rl(params...) \ + QDF_TRACE_FATAL_RL(QDF_MODULE_ID_SCAN, params) +#define scm_err_rl(params...) \ + QDF_TRACE_ERROR_RL(QDF_MODULE_ID_SCAN, params) +#define scm_warn_rl(params...) \ + QDF_TRACE_WARN_RL(QDF_MODULE_ID_SCAN, params) +#define scm_info_rl(params...) \ + QDF_TRACE_INFO_RL(QDF_MODULE_ID_SCAN, params) +#define scm_debug_rl(params...) \ + QDF_TRACE_DEBUG_RL(QDF_MODULE_ID_SCAN, params) + +#define scm_nofl_alert(params...) \ + QDF_TRACE_FATAL_NO_FL(QDF_MODULE_ID_SCAN, params) +#define scm_nofl_err(params...) \ + QDF_TRACE_ERROR_NO_FL(QDF_MODULE_ID_SCAN, params) +#define scm_nofl_warn(params...) \ + QDF_TRACE_WARN_NO_FL(QDF_MODULE_ID_SCAN, params) +#define scm_nofl_info(params...) \ + QDF_TRACE_INFO_NO_FL(QDF_MODULE_ID_SCAN, params) +#define scm_nofl_debug(params...) \ + QDF_TRACE_DEBUG_NO_FL(QDF_MODULE_ID_SCAN, params) + +#define scm_hex_dump(level, data, buf_len) \ + qdf_trace_hex_dump(QDF_MODULE_ID_SCAN, level, data, buf_len) + + +#define MAX_SCAN_EVENT_HANDLERS_PER_PDEV 100 +#define WLAN_MAX_MODULE_NAME 40 +#define WLAN_MAX_REQUESTORS 200 +#define WLAN_SCAN_ID_MASK 0x00000FFF +#define WLAN_HOST_SCAN_REQ_ID_PREFIX 0x0000A000 +#define SCAN_NPROBES_DEFAULT 2 +#define WLAN_P2P_SOCIAL_CHANNELS 3 + +#define SCAN_BURST_SCAN_MAX_NUM_OFFCHANNELS (3) +#define SCAN_SCAN_IDLE_TIME_DEFAULT (25) +#define SCAN_3PORT_CONC_SCAN_MAX_BURST_DURATION (25) +#define SCAN_CTS_DURATION_MS_MAX (32) +#define SCAN_ROAM_SCAN_CHANNEL_SWITCH_TIME (4) +#define SCAN_DWELL_TIME_PROBE_TIME_MAP_SIZE (11) +#define SCAN_GO_MIN_ACTIVE_SCAN_BURST_DURATION (40) +#define SCAN_GO_MAX_ACTIVE_SCAN_BURST_DURATION (240) +#define SCAN_P2P_SCAN_MAX_BURST_DURATION (240) +#define SCAN_GO_BURST_SCAN_MAX_NUM_OFFCHANNELS (6) + +/** + * struct probe_time_dwell_time - probe time, dwell time map + * @dwell_time: dwell time + * @probe_time: repeat probe time + */ +struct probe_time_dwell_time { + uint8_t dwell_time; + uint8_t probe_time; +}; + +/* + * For the requestor id: + * bit 0~12 is used for real requestor id. + * bit 13~15 is used for requestor prefix. + * bit 16~19 is used by specific user to aware it is issued by himself. + * bit 20~31 is reserved. + */ +#define WLAN_SCAN_REQUESTER_ID_PREFIX 0x0000A000 +#define WLAN_SCAN_REQUESTER_ID_MASK 0x00001FFF + +#define SCM_NUM_RSSI_CAT 15 +#define SCAN_STA_MIRACAST_MCC_REST_TIME 400 + +#ifdef CONFIG_MCL +#define MAX_SCAN_CACHE_SIZE 300 +#define SCAN_ACTIVE_DWELL_TIME 40 +#define SCAN_PASSIVE_DWELL_TIME 110 +#define SCAN_MAX_REST_TIME 0 +#define SCAN_MIN_REST_TIME 0 +#define SCAN_BURST_DURATION 0 +#define SCAN_CONC_ACTIVE_DWELL_TIME 20 +#define SCAN_CONC_PASSIVE_DWELL_TIME 100 +#define SCAN_CONC_IDLE_TIME 25 +#define SCAN_CONC_MAX_REST_TIME 20 +#define SCAN_CONC_MIN_REST_TIME 10 +#define SCAN_REPEAT_PROBE_TIME 20 +#define SCAN_PROBE_SPACING_TIME 0 +#define SCAN_PROBE_DELAY 0 +#define SCAN_MAX_SCAN_TIME 30000 +#define SCAN_NUM_PROBES 2 +#define SCAN_NETWORK_IDLE_TIMEOUT 0 +#define HIDDEN_SSID_TIME (1*60*1000) +#define SCAN_CHAN_STATS_EVENT_ENAB (false) +#define MAX_SCAN_COMMANDS 8 +#else +#define MAX_SCAN_CACHE_SIZE 1024 +#define SCAN_ACTIVE_DWELL_TIME 105 +#define SCAN_PASSIVE_DWELL_TIME 300 +#define SCAN_MAX_REST_TIME 0 +#define SCAN_MIN_REST_TIME 50 +#define SCAN_BURST_DURATION 0 +#define SCAN_CONC_ACTIVE_DWELL_TIME 0 +#define SCAN_CONC_PASSIVE_DWELL_TIME 0 +#define SCAN_CONC_IDLE_TIME 0 +#define SCAN_CONC_MAX_REST_TIME 0 +#define SCAN_CONC_MIN_REST_TIME 0 +#define SCAN_REPEAT_PROBE_TIME 50 +#define SCAN_PROBE_SPACING_TIME 0 +#define SCAN_PROBE_DELAY 0 +#define SCAN_MAX_SCAN_TIME 50000 +#define SCAN_NUM_PROBES 0 +#define SCAN_NETWORK_IDLE_TIMEOUT 200 +#define HIDDEN_SSID_TIME (0xFFFFFFFF) +#define SCAN_CHAN_STATS_EVENT_ENAB (true) +#define MAX_SCAN_COMMANDS 24 +#endif + +#define SCAN_TIMEOUT_GRACE_PERIOD 10 +/* scan age time in millisec */ +#ifdef QCA_WIFI_NAPIER_EMULATION +#define SCAN_CACHE_AGING_TIME (90 * 1000) +#else +#define SCAN_CACHE_AGING_TIME (30 * 1000) +#endif +#define SCAN_MAX_BSS_PDEV 100 +#define SCAN_PRIORITY SCAN_PRIORITY_LOW + +/* DBS Scan policy selection ext flags */ +#define SCAN_FLAG_EXT_DBS_SCAN_POLICY_MASK 0x00000003 +#define SCAN_FLAG_EXT_DBS_SCAN_POLICY_BIT 0 +#define SCAN_DBS_POLICY_DEFAULT 0x0 +#define SCAN_DBS_POLICY_FORCE_NONDBS 0x1 +#define SCAN_DBS_POLICY_IGNORE_DUTY 0x2 +#define SCAN_DBS_POLICY_MAX 0x3 +/* Minimum number of channels for enabling DBS Scan */ +#define SCAN_MIN_CHAN_DBS_SCAN_THRESHOLD 8 +/* + * Enable Reception of Public Action frame with this flag + */ +#define SCAN_FLAG_EXT_FILTER_PUBLIC_ACTION_FRAME 0x4 + +/* Passive dwell time if bt_a2dp is enabled. Time in msecs*/ +#define PASSIVE_DWELL_TIME_BT_A2DP_ENABLED 28 + +/** + * struct cb_handler - defines scan event handler + * call back function and arguments + * @func: handler function pointer + * @arg: argument to handler function + */ +struct cb_handler { + scan_event_handler func; + void *arg; +}; + +/** + * struct pdev_scan_ev_handler - pdev scan event handlers + * @cb_handler: array of registered scan handlers + */ +struct pdev_scan_ev_handler { + uint32_t handler_cnt; + struct cb_handler cb_handlers[MAX_SCAN_EVENT_HANDLERS_PER_PDEV]; +}; + +/** + * struct global_scan_ev_handlers - per pdev registered scan event handlers + * @pdev_scan_ev_handler: per pdev registered scan event handlers + */ +struct global_scan_ev_handlers { + struct pdev_scan_ev_handler pdev_ev_handlers[WLAN_UMAC_MAX_PDEVS]; +}; + +/** + * struct scan_requester_info - defines scan requester id + * and event handler mapping + * @requester: requester ID allocated + * @module: module name of requester + * @ev_handler: event handlerto be invoked + */ +struct scan_requester_info { + wlan_scan_requester requester; + uint8_t module[WLAN_MAX_MODULE_NAME]; + struct cb_handler ev_handler; +}; + +/** + * struct pdev_scan_info - defines per pdev scan info + * @wide_band_scan: wide band scan capability + * @last_scan_time: time of last scan start on this pdev + * @custom_chan_list: scan only these channels + */ +struct pdev_scan_info { + bool wide_band_scan; + qdf_time_t last_scan_time; + struct chan_list custom_chan_list; +}; + +/** + * struct scan_vdev_obj - scan vdev obj + * @pno_match_evt_received: pno match received + * @pno_in_progress: pno in progress + * @is_vdev_delete_in_progress: flag to indicate if vdev del is in progress + * @first_scan_done: Whether its the first scan or not for this particular vdev. + */ +struct scan_vdev_obj { + bool pno_match_evt_received; + bool pno_in_progress; + bool is_vdev_delete_in_progress; + bool first_scan_done; +}; + +/** + * struct pno_def_config - def configuration for PNO + * @channel_prediction: config PNO channel prediction feature status + * @top_k_num_of_channels: def top K number of channels are used for tanimoto + * distance calculation. + * @stationary_thresh: def threshold val to determine that STA is stationary. + * @scan_timer_repeat_value: PNO scan timer repeat value + * @slow_scan_multiplier: PNO slow scan timer multiplier + * @dfs_chnl_scan_enable: Enable dfs channel PNO scan + * @pnoscan_adaptive_dwell_mode: def adaptive dwelltime mode for pno scan + * @channel_prediction_full_scan: def periodic timer upon which full scan needs + * to be triggered. + * @pno_wake_lock: pno wake lock + * @pno_cb: callback to call on PNO completion + * @mawc_params: Configuration parameters for NLO MAWC. + */ +struct pno_def_config { + bool channel_prediction; + uint8_t top_k_num_of_channels; + uint8_t stationary_thresh; + uint32_t scan_timer_repeat_value; + uint32_t slow_scan_multiplier; + bool dfs_chnl_scan_enabled; + enum scan_dwelltime_adaptive_mode adaptive_dwell_mode; + uint32_t channel_prediction_full_scan; + qdf_wake_lock_t pno_wake_lock; + struct cb_handler pno_cb; + struct nlo_mawc_params mawc_params; +}; + + +/** + * struct scan_default_params - default scan parameters to be used + * @active_dwell: default active dwell time + * @allow_dfs_chan_in_first_scan: first scan should contain dfs channels or not. + * @allow_dfs_chan_in_scan: Scan DFS channels or not. + * @skip_dfs_chan_in_p2p_search: Skip dfs channels in p2p search. + * @use_wake_lock_in_user_scan: if wake lock will be acquired during user scan + * @active_dwell_2g: default active dwell time for 2G channels, if it's not zero + * @passive_dwell:default passive dwell time + * @max_rest_time: default max rest time + * @sta_miracast_mcc_rest_time: max rest time for miracast and mcc + * @min_rest_time: default min rest time + * @idle_time: default idle time + * @conc_active_dwell: default concurrent active dwell time + * @conc_passive_dwell: default concurrent passive dwell time + * @conc_max_rest_time: default concurrent max rest time + * @conc_min_rest_time: default concurrent min rest time + * @conc_idle_time: default concurrent idle time + * @repeat_probe_time: default repeat probe time + * @probe_spacing_time: default probe spacing time + * @probe_delay: default probe delay + * @burst_duration: default burst duration + * @max_scan_time: default max scan time + * @num_probes: default maximum number of probes to sent + * @cache_aging_time: default scan cache aging time + * @prefer_5ghz: Prefer 5ghz AP over 2.4Ghz AP + * @select_5gh_margin: Prefer connecting to 5G AP even if + * its RSSI is lower by select_5gh_margin dbm than 2.4G AP. + * applicable if prefer_5ghz is set. + * @is_bssid_hint_priority: True if bssid_hint is given priority + * @enable_mac_spoofing: enable mac address spoof in scan + * @bss_prefer_val: bss prefer value for the RSSI category + * @rssi_cat: RSSI category + * @max_bss_per_pdev: maximum number of bss entries to be maintained per pdev + * @max_active_scans_allowed: maximum number of active parallel scan allowed + * per psoc + * @scan_priority: default scan priority + * @adaptive_dwell_time_mode: adaptive dwell mode with connection + * @adaptive_dwell_time_mode_nc: adaptive dwell mode without connection + * @honour_nl_scan_policy_flags: honour nl80211 scan policy flags + * @scan_f_passive: passively scan all channels including active channels + * @scan_f_bcast_probe: add wild card ssid prbreq even if ssid_list is specified + * @scan_f_cck_rates: add cck rates to rates/xrates ie in prb req + * @scan_f_ofdm_rates: add ofdm rates to rates/xrates ie in prb req + * @scan_f_chan_stat_evnt: enable indication of chan load and noise floor + * @scan_f_filter_prb_req: filter Probe request frames + * @scan_f_bypass_dfs_chn: when set, do not scan DFS channels + * @scan_f_continue_on_err:continue scan even if few certain erros have occurred + * @scan_f_offchan_mgmt_tx: allow mgmt transmission during off channel scan + * @scan_f_offchan_data_tx: allow data transmission during off channel scan + * @scan_f_promisc_mode: scan with promiscuous mode + * @scan_f_capture_phy_err: enable capture ppdu with phy errrors + * @scan_f_strict_passive_pch: do passive scan on passive channels + * @scan_f_half_rate: enable HALF (10MHz) rate support + * @scan_f_quarter_rate: set Quarter (5MHz) rate support + * @scan_f_force_active_dfs_chn: allow to send probe req on DFS channel + * @scan_f_add_tpc_ie_in_probe: add TPC ie in probe req frame + * @scan_f_add_ds_ie_in_probe: add DS ie in probe req frame + * @scan_f_add_spoofed_mac_in_probe: use random mac address for TA in probe + * @scan_f_add_rand_seq_in_probe: use random sequence number in probe + * @scan_f_en_ie_whitelist_in_probe: enable ie whitelist in probe + * @scan_f_forced: force scan even in presence of data traffic + * @scan_f_2ghz: scan 2.4 GHz channels + * @scan_f_5ghz: scan 5 GHz channels + * @scan_f_wide_band: scan in 40 MHz or higher bandwidth + * @scan_flags: variable to read and set scan_f_* flags in one shot + * can be used to dump all scan_f_* flags for debug + * @scan_ev_started: notify scan started event + * @scan_ev_completed: notify scan completed event + * @scan_ev_bss_chan: notify bss chan event + * @scan_ev_foreign_chan: notify foreign chan event + * @scan_ev_dequeued: notify scan request dequed event + * @scan_ev_preempted: notify scan preempted event + * @scan_ev_start_failed: notify scan start failed event + * @scan_ev_restarted: notify scan restarted event + * @scan_ev_foreign_chn_exit: notify foreign chan exit event + * @scan_ev_invalid: notify invalid scan request event + * @scan_ev_gpio_timeout: notify gpio timeout event + * @scan_ev_suspended: notify scan suspend event + * @scan_ev_resumed: notify scan resumed event + * @scan_events: variable to read and set scan_ev_* flags in one shot + * can be used to dump all scan_ev_* flags for debug + * @roam_params: roam related params + */ +struct scan_default_params { + uint32_t active_dwell; + bool allow_dfs_chan_in_first_scan; + bool allow_dfs_chan_in_scan; + bool skip_dfs_chan_in_p2p_search; + bool use_wake_lock_in_user_scan; + uint32_t active_dwell_2g; + uint32_t passive_dwell; + uint32_t max_rest_time; + uint32_t sta_miracast_mcc_rest_time; + uint32_t min_rest_time; + uint32_t idle_time; + uint32_t conc_active_dwell; + uint32_t conc_passive_dwell; + uint32_t conc_max_rest_time; + uint32_t conc_min_rest_time; + uint32_t conc_idle_time; + uint32_t repeat_probe_time; + uint32_t probe_spacing_time; + uint32_t probe_delay; + uint32_t burst_duration; + uint32_t max_scan_time; + uint32_t num_probes; + uint32_t scan_cache_aging_time; + uint32_t prefer_5ghz; + uint32_t select_5ghz_margin; + bool enable_mac_spoofing; + bool is_bssid_hint_priority; + uint32_t usr_cfg_probe_rpt_time; + uint32_t usr_cfg_num_probes; + /* each RSSI category has one value */ + uint32_t bss_prefer_val[SCM_NUM_RSSI_CAT]; + int rssi_cat[SCM_NUM_RSSI_CAT]; + uint16_t max_bss_per_pdev; + uint32_t max_active_scans_allowed; + uint8_t sta_scan_burst_duration; + uint8_t p2p_scan_burst_duration; + uint8_t go_scan_burst_duration; + uint8_t ap_scan_burst_duration; + enum scan_priority scan_priority; + enum scan_dwelltime_adaptive_mode adaptive_dwell_time_mode; + enum scan_dwelltime_adaptive_mode adaptive_dwell_time_mode_nc; + bool honour_nl_scan_policy_flags; + union { + struct { + uint32_t scan_f_passive:1, + scan_f_bcast_probe:1, + scan_f_cck_rates:1, + scan_f_ofdm_rates:1, + scan_f_chan_stat_evnt:1, + scan_f_filter_prb_req:1, + scan_f_bypass_dfs_chn:1, + scan_f_continue_on_err:1, + scan_f_offchan_mgmt_tx:1, + scan_f_offchan_data_tx:1, + scan_f_promisc_mode:1, + scan_f_capture_phy_err:1, + scan_f_strict_passive_pch:1, + scan_f_half_rate:1, + scan_f_quarter_rate:1, + scan_f_force_active_dfs_chn:1, + scan_f_add_tpc_ie_in_probe:1, + scan_f_add_ds_ie_in_probe:1, + scan_f_add_spoofed_mac_in_probe:1, + scan_f_add_rand_seq_in_probe:1, + scan_f_en_ie_whitelist_in_probe:1, + scan_f_forced:1, + scan_f_2ghz:1, + scan_f_5ghz:1, + scan_f_wide_band:1; + }; + uint32_t scan_flags; + }; + union { + struct { + uint32_t scan_ev_started:1, + scan_ev_completed:1, + scan_ev_bss_chan:1, + scan_ev_foreign_chan:1, + scan_ev_dequeued:1, + scan_ev_preempted:1, + scan_ev_start_failed:1, + scan_ev_restarted:1, + scan_ev_foreign_chn_exit:1, + scan_ev_invalid:1, + scan_ev_gpio_timeout:1, + scan_ev_suspended:1, + scan_ev_resumed:1; + }; + uint32_t scan_events; + }; + struct roam_filter_params roam_params; + struct scoring_config score_config; +}; + +/** + * struct scan_cb - nif/sif function callbacks + * @inform_beacon: cb to indicate frame to OS + * @update_beacon: cb to indicate frame to MLME + * @unlink_bss: cb to unlink bss from kernel cache + */ +struct scan_cb { + update_beacon_cb inform_beacon; + update_beacon_cb update_beacon; + update_beacon_cb unlink_bss; + /* Define nif/sif function callbacks here */ +}; + +/** + * struct wlan_scan_obj - scan object definition + * @enable_scan: if scan is enabled + * @scan_db: scan cache data base + * @cc_db: pointer of country code data base + * @lock: spin lock + * @scan_def: default scan parameters + * @cb: nif/sif function callbacks + * @requesters: requester allocation pool + * @scan_ids: last allocated scan id + * @global_evhandlers: registered scan event handlers + * @pdev_info: pointer to pdev info + * @pno_cfg: default pno configuration + * @ie_whitelist: default ie whitelist attrs + * @bt_a2dp_enabled: if bt a2dp is enabled + * @miracast_enabled: miracast enabled + * @disable_timeout: command timeout disabled + * @drop_bcn_on_chan_mismatch: drop bcn if channel mismatch + * @scan_start_request_buff: buffer used to pass + * scan config to event handlers + */ +struct wlan_scan_obj { + bool enable_scan; + qdf_spinlock_t lock; + qdf_atomic_t scan_ids; + struct scan_dbs scan_db[WLAN_UMAC_MAX_PDEVS]; + struct scan_country_code_db *cc_db; + struct scan_default_params scan_def; + struct scan_cb cb; + struct scan_requester_info requesters[WLAN_MAX_REQUESTORS]; + struct global_scan_ev_handlers global_evhandlers; + struct pdev_scan_info pdev_info[WLAN_UMAC_MAX_PDEVS]; + struct pno_def_config pno_cfg; + struct probe_req_whitelist_attr ie_whitelist; + bool bt_a2dp_enabled; + bool miracast_enabled; + bool disable_timeout; + bool drop_bcn_on_chan_mismatch; + struct scan_start_request scan_start_request_buff; +}; + +/** + * wlan_psoc_get_scan_obj() - private API to get scan object from psoc + * @psoc: psoc object + * + * Return: scan object + */ +static inline struct wlan_scan_obj * +wlan_psoc_get_scan_obj(struct wlan_objmgr_psoc *psoc) +{ + struct wlan_scan_obj *scan_obj; + + scan_obj = (struct wlan_scan_obj *) + wlan_objmgr_psoc_get_comp_private_obj(psoc, + WLAN_UMAC_COMP_SCAN); + + return scan_obj; +} + +/** + * wlan_pdev_get_scan_obj() - private API to get scan object from pdev + * @psoc: pdev object + * + * Return: scan object + */ +static inline struct wlan_scan_obj * +wlan_pdev_get_scan_obj(struct wlan_objmgr_pdev *pdev) +{ + struct wlan_objmgr_psoc *psoc; + + psoc = wlan_pdev_get_psoc(pdev); + + return wlan_psoc_get_scan_obj(psoc); +} + +/** + * wlan_vdev_get_scan_obj() - private API to get scan object from vdev + * @psoc: vdev object + * + * Return: scan object + */ +static inline struct wlan_scan_obj * +wlan_vdev_get_scan_obj(struct wlan_objmgr_vdev *vdev) +{ + struct wlan_objmgr_pdev *pdev; + + pdev = wlan_vdev_get_pdev(vdev); + + return wlan_pdev_get_scan_obj(pdev); +} + +/** + * wlan_get_vdev_scan_obj() - private API to get scan object vdev + * @vdev: vdev object + * + * Return: scan object + */ +static inline struct scan_vdev_obj * +wlan_get_vdev_scan_obj(struct wlan_objmgr_vdev *vdev) +{ + struct scan_vdev_obj *scan_vdev_obj; + + scan_vdev_obj = (struct scan_vdev_obj *) + wlan_objmgr_vdev_get_comp_private_obj(vdev, + WLAN_UMAC_COMP_SCAN); + + return scan_vdev_obj; +} + +/** + * wlan_scan_vdev_get_pdev_id() - private API to get pdev id from vdev object + * @vdev: vdev object + * + * Return: parent pdev id + */ +static inline uint8_t +wlan_scan_vdev_get_pdev_id(struct wlan_objmgr_vdev *vdev) +{ + struct wlan_objmgr_pdev *pdev; + + pdev = wlan_vdev_get_pdev(vdev); + + return wlan_objmgr_pdev_get_pdev_id(pdev); +} + +/** + * wlan_pdev_get_pdev_scan_ev_handlers() - private API to get + * pdev scan event handlers + * @vdev: pdev object + * + * Return: pdev_scan_ev_handler object + */ +static inline struct pdev_scan_ev_handler* +wlan_pdev_get_pdev_scan_ev_handlers(struct wlan_objmgr_pdev *pdev) +{ + uint8_t pdevid; + struct wlan_scan_obj *scan = NULL; + + if (!pdev) + goto err; + + pdevid = wlan_objmgr_pdev_get_pdev_id(pdev); + scan = wlan_pdev_get_scan_obj(pdev); + if (!scan) + goto err; + + return &scan->global_evhandlers.pdev_ev_handlers[pdevid]; + +err: + scm_err("NULL pointer, pdev: 0x%pK, scan_obj: 0x%pK", + pdev, scan); + return NULL; +} + +/** + * wlan_vdev_get_pdev_scan_ev_handlers() - private API to get + * pdev scan event handlers + * @vdev: vdev object + * + * Return: pdev_scan_ev_handler object + */ +static inline struct pdev_scan_ev_handler* +wlan_vdev_get_pdev_scan_ev_handlers(struct wlan_objmgr_vdev *vdev) +{ + struct wlan_objmgr_pdev *pdev; + + pdev = wlan_vdev_get_pdev(vdev); + + return wlan_pdev_get_pdev_scan_ev_handlers(pdev); +} + +/** + * wlan_scan_psoc_get_def_params() - private API to get scan defaults + * @psoc: psoc object + * + * Return: scan defaults + */ +static inline struct scan_default_params* +wlan_scan_psoc_get_def_params(struct wlan_objmgr_psoc *psoc) +{ + struct wlan_scan_obj *scan = NULL; + + if (!psoc) { + scm_err("null psoc"); + return NULL; + } + scan = wlan_psoc_get_scan_obj(psoc); + + if (!scan) + return NULL; + + return &scan->scan_def; +} + +/** + * wlan_vdev_get_def_scan_params() - private API to get scan defaults + * @vdev: vdev object + * + * Return: scan defaults + */ +static inline struct scan_default_params* +wlan_vdev_get_def_scan_params(struct wlan_objmgr_vdev *vdev) +{ + struct wlan_objmgr_psoc *psoc = NULL; + + if (!vdev) { + scm_err("null vdev"); + return NULL; + } + psoc = wlan_vdev_get_psoc(vdev); + + return wlan_scan_psoc_get_def_params(psoc); +} + +/** + * wlan_scan_psoc_created_notification() - scan psoc create handler + * @psoc: psoc object + * @arg_list: Argument list + * + * Return: QDF_STATUS + */ +QDF_STATUS wlan_scan_psoc_created_notification(struct wlan_objmgr_psoc *psoc, + void *arg_list); + +/** + * wlan_scan_psoc_deleted_notification() - scan psoc delete handler + * @psoc: psoc object + * @arg_list: Argument list + * + * Return: QDF_STATUS + */ +QDF_STATUS wlan_scan_psoc_destroyed_notification(struct wlan_objmgr_psoc *psoc, + void *arg_list); + +/** + * wlan_scan_vdev_created_notification() - scan psoc create handler + * @vdev: vdev object + * @arg_list: Argument list + * + * Return: QDF_STATUS + */ +QDF_STATUS wlan_scan_vdev_created_notification(struct wlan_objmgr_vdev *vdev, + void *arg_list); + +/** + * wlan_scan_vdev_destroyed_notification() - scan psoc delete handler + * @vdev: vdev object + * @arg_list: Argument list + * + * Return: QDF_STATUS + */ +QDF_STATUS wlan_scan_vdev_destroyed_notification(struct wlan_objmgr_vdev *vdev, + void *arg_list); + +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/scan/core/src/wlan_scan_manager.c b/drivers/staging/qca-wifi-host-cmn/umac/scan/core/src/wlan_scan_manager.c new file mode 100644 index 0000000000000000000000000000000000000000..4961adff2192043ccb78685d2e40657ac512da55 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/scan/core/src/wlan_scan_manager.c @@ -0,0 +1,873 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/* + * DOC: contains scan manager functionality + */ + +#include +#include +#include +#include "wlan_scan_main.h" +#include "wlan_scan_manager.h" +#include "wlan_utility.h" +#ifdef FEATURE_WLAN_SCAN_PNO +#include +#endif + +QDF_STATUS +scm_scan_free_scan_request_mem(struct scan_start_request *req) +{ + void *ie; + + if (!req) { + scm_err("null request"); + QDF_ASSERT(0); + return QDF_STATUS_E_FAILURE; + } + scm_debug("freed scan request: 0x%pK, scan_id: %d, requester: %d", + req, req->scan_req.scan_id, req->scan_req.scan_req_id); + /* Free vendor(extra) ie */ + ie = req->scan_req.extraie.ptr; + if (ie) { + req->scan_req.extraie.ptr = NULL; + req->scan_req.extraie.len = 0; + qdf_mem_free(ie); + } + + /* Free htcap ie */ + ie = req->scan_req.htcap.ptr; + if (ie) { + req->scan_req.htcap.len = 0; + req->scan_req.htcap.ptr = NULL; + qdf_mem_free(ie); + } + + /* Free vhtcap ie */ + ie = req->scan_req.vhtcap.ptr; + if (ie) { + req->scan_req.vhtcap.len = 0; + req->scan_req.vhtcap.ptr = NULL; + qdf_mem_free(ie); + } + /* free scan_start_request memory */ + qdf_mem_free(req); + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS +scm_scan_get_pdev_global_event_handlers(struct scan_event_listeners *listeners, + struct pdev_scan_ev_handler *pdev_ev_handler) +{ + uint32_t i; + struct cb_handler *cb_handlers = &(pdev_ev_handler->cb_handlers[0]); + + for (i = 0; i < MAX_SCAN_EVENT_HANDLERS_PER_PDEV; i++, cb_handlers++) { + if ((cb_handlers->func) && + (listeners->count < MAX_SCAN_EVENT_LISTENERS)) { + listeners->cb[listeners->count].func = + cb_handlers->func; + listeners->cb[listeners->count].arg = + cb_handlers->arg; + listeners->count++; + } + } + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS +scm_scan_get_requester_event_handler(struct scan_event_listeners *listeners, + struct scan_requester_info *requesters, + wlan_scan_requester requester_id) +{ + uint32_t idx; + struct cb_handler *ev_handler; + + idx = requester_id & WLAN_SCAN_REQUESTER_ID_PREFIX; + if (idx != WLAN_SCAN_REQUESTER_ID_PREFIX) + return QDF_STATUS_SUCCESS; + + idx = requester_id & WLAN_SCAN_REQUESTER_ID_MASK; + if (idx < WLAN_MAX_REQUESTORS) { + ev_handler = &(requesters[idx].ev_handler); + if (ev_handler->func) { + if (listeners->count < MAX_SCAN_EVENT_LISTENERS) { + listeners->cb[listeners->count].func = + ev_handler->func; + listeners->cb[listeners->count].arg = + ev_handler->arg; + listeners->count++; + } + } + return QDF_STATUS_SUCCESS; + } else { + scm_err("invalid requester id"); + return QDF_STATUS_E_INVAL; + } + +} + +static void scm_scan_post_event(struct wlan_objmgr_vdev *vdev, + struct scan_event *event) +{ + uint32_t i = 0; + struct wlan_scan_obj *scan; + struct pdev_scan_ev_handler *pdev_ev_handler; + struct cb_handler *cb_handlers; + struct scan_requester_info *requesters; + struct scan_event_listeners *listeners; + + if (!vdev || !event) { + scm_err("vdev: 0x%pK, event: 0x%pK", vdev, event); + return; + } + if (!event->requester) { + scm_err("invalid requester id"); + QDF_ASSERT(0); + } + scan = wlan_vdev_get_scan_obj(vdev); + pdev_ev_handler = wlan_vdev_get_pdev_scan_ev_handlers(vdev); + if (!pdev_ev_handler) + return; + cb_handlers = &(pdev_ev_handler->cb_handlers[0]); + requesters = scan->requesters; + + scm_debug("vdev: %d, type: %d, reason: %d, freq: %d, req: %d, scanid: %d", + event->vdev_id, event->type, event->reason, event->chan_freq, + event->requester, event->scan_id); + + listeners = qdf_mem_malloc_atomic(sizeof(*listeners)); + if (!listeners) { + scm_warn("couldn't allocate listeners list"); + return; + } + + /* initialize number of listeners */ + listeners->count = 0; + + /* + * Initiator of scan request decides which all scan events + * he is interested in and FW will send only those scan events + * to host driver. + * All the events received by scan module will be notified + * to all registered handlers. + */ + + qdf_spin_lock_bh(&scan->lock); + /* find all global scan event handlers on this pdev */ + scm_scan_get_pdev_global_event_handlers(listeners, pdev_ev_handler); + /* find owner who triggered this scan request */ + scm_scan_get_requester_event_handler(listeners, requesters, + event->requester); + qdf_spin_unlock_bh(&scan->lock); + + /* notify all interested handlers */ + for (i = 0; i < listeners->count; i++) { + scm_debug("func: 0x%pK, arg: 0x%pK", + listeners->cb[i].func, listeners->cb[i].arg); + listeners->cb[i].func(vdev, event, listeners->cb[i].arg); + } + qdf_mem_free(listeners); +} + +static QDF_STATUS +scm_release_serialization_command(struct wlan_objmgr_vdev *vdev, + uint32_t scan_id) +{ + struct wlan_serialization_queued_cmd_info cmd = {0}; + + cmd.requestor = WLAN_UMAC_COMP_SCAN; + cmd.cmd_type = WLAN_SER_CMD_SCAN; + cmd.cmd_id = scan_id; + cmd.req_type = WLAN_SER_CANCEL_SINGLE_SCAN; + cmd.vdev = vdev; + cmd.queue_type = WLAN_SERIALIZATION_ACTIVE_QUEUE; + + /* Inform serialization for command completion */ + wlan_serialization_remove_cmd(&cmd); + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS +scm_post_internal_scan_complete_event(struct scan_start_request *req, + enum scan_completion_reason reason) +{ + struct scan_event event = {0, }; + + /* prepare internal scan complete event */ + event.type = SCAN_EVENT_TYPE_COMPLETED; + event.reason = reason; + event.chan_freq = 0; /* Invalid frequency */ + event.vdev_id = req->scan_req.vdev_id; + event.requester = req->scan_req.scan_req_id; + event.scan_id = req->scan_req.scan_id; + /* Fill scan_start_request used to trigger this scan */ + event.scan_start_req = req; + /* post scan event to registered handlers */ + scm_scan_post_event(req->vdev, &event); + + return QDF_STATUS_SUCCESS; +} + +static inline struct pdev_scan_info * +scm_scan_get_pdev_priv_info(uint8_t pdev_id, struct wlan_scan_obj *scan_obj) +{ + return &scan_obj->pdev_info[pdev_id]; +} + +static QDF_STATUS +scm_update_last_scan_time(struct scan_start_request *req) +{ + uint8_t pdev_id; + struct wlan_scan_obj *scan_obj; + struct pdev_scan_info *pdev_scan_info; + + scan_obj = wlan_vdev_get_scan_obj(req->vdev); + pdev_id = wlan_scan_vdev_get_pdev_id(req->vdev); + pdev_scan_info = scm_scan_get_pdev_priv_info(pdev_id, scan_obj); + /* update last scan start time */ + pdev_scan_info->last_scan_time = qdf_system_ticks(); + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS +scm_activate_scan_request(struct scan_start_request *req) +{ + QDF_STATUS status; + + status = tgt_scan_start(req); + if (status != QDF_STATUS_SUCCESS) { + scm_err("tgt_scan_start failed, status: %d", status); + /* scan could not be started and hence + * we will not receive any completions. + * post scan cancelled + */ + scm_post_internal_scan_complete_event(req, + SCAN_REASON_CANCELLED); + return status; + } + /* save last scan start time */ + status = scm_update_last_scan_time(req); + + return status; +} + +static QDF_STATUS +scm_cancel_scan_request(struct scan_start_request *req) +{ + struct scan_cancel_request cancel_req = {0, }; + QDF_STATUS status; + + cancel_req.vdev = req->vdev; + cancel_req.cancel_req.scan_id = req->scan_req.scan_id; + cancel_req.cancel_req.requester = req->scan_req.scan_req_id; + cancel_req.cancel_req.req_type = WLAN_SCAN_CANCEL_SINGLE; + cancel_req.cancel_req.vdev_id = req->scan_req.vdev_id; + /* send scan cancel to fw */ + status = tgt_scan_cancel(&cancel_req); + if (status != QDF_STATUS_SUCCESS) + scm_err("tgt_scan_cancel failed: status: %d, scanid: %d", + status, req->scan_req.scan_id); + /* notify event handler about scan cancellation */ + scm_post_internal_scan_complete_event(req, SCAN_REASON_CANCELLED); + + return status; +} + +static QDF_STATUS +scm_scan_serialize_callback(struct wlan_serialization_command *cmd, + enum wlan_serialization_cb_reason reason) +{ + struct scan_start_request *req; + QDF_STATUS status; + + if (!cmd) { + scm_err("cmd is NULL, reason: %d", reason); + QDF_ASSERT(0); + return QDF_STATUS_E_NULL_VALUE; + } + + if (!cmd->umac_cmd) { + scm_err("cmd->umac_cmd is NULL , reason: %d", reason); + QDF_ASSERT(0); + return QDF_STATUS_E_NULL_VALUE; + } + + req = cmd->umac_cmd; + scm_debug("reason:%d, reqid:%d, scanid:%d, vdevid:%d, vdev:0x%pK", + reason, req->scan_req.scan_req_id, req->scan_req.scan_id, + req->scan_req.vdev_id, req->vdev); + + if (!req->vdev) { + scm_err("NULL vdev. req:0x%pK, reason:%d\n", req, reason); + QDF_ASSERT(0); + return QDF_STATUS_E_NULL_VALUE; + } + + qdf_mtrace(QDF_MODULE_ID_SERIALIZATION, QDF_MODULE_ID_SCAN, reason, + req->scan_req.vdev_id, req->scan_req.scan_id); + + switch (reason) { + case WLAN_SER_CB_ACTIVATE_CMD: + /* command moved to active list + * modify the params if required for concurency case. + */ + status = scm_activate_scan_request(req); + break; + + case WLAN_SER_CB_CANCEL_CMD: + /* command removed from pending list. + * notify registered scan event handlers with + * status completed and reason cancelled. + */ + status = scm_post_internal_scan_complete_event(req, + SCAN_REASON_CANCELLED); + break; + + case WLAN_SER_CB_ACTIVE_CMD_TIMEOUT: + /* active command timed out. + * prepare internal scan cancel request + */ + status = scm_cancel_scan_request(req); + break; + + case WLAN_SER_CB_RELEASE_MEM_CMD: + /* command successfully completed. + * Release vdev reference and free scan_start_request memory + */ + cmd->umac_cmd = NULL; + wlan_objmgr_vdev_release_ref(req->vdev, WLAN_SCAN_ID); + status = scm_scan_free_scan_request_mem(req); + break; + + default: + /* Do nothing but logging */ + QDF_ASSERT(0); + status = QDF_STATUS_E_INVAL; + break; + } + + return status; +} + +QDF_STATUS +scm_scan_start_req(struct scheduler_msg *msg) +{ + struct wlan_serialization_command cmd = {0, }; + enum wlan_serialization_status ser_cmd_status; + struct scan_start_request *req = NULL; + struct wlan_scan_obj *scan_obj; + struct scan_vdev_obj *scan_vdev_priv_obj; + QDF_STATUS status = QDF_STATUS_SUCCESS; + + if (!msg) { + scm_err("msg received is NULL"); + QDF_ASSERT(0); + return QDF_STATUS_E_NULL_VALUE; + } + if (!msg->bodyptr) { + scm_err("bodyptr is NULL"); + QDF_ASSERT(0); + return QDF_STATUS_E_NULL_VALUE; + } + + req = msg->bodyptr; + scan_obj = wlan_vdev_get_scan_obj(req->vdev); + if (!scan_obj) { + scm_debug("Couldn't find scan object"); + status = QDF_STATUS_E_NULL_VALUE; + goto err; + } + + if (!scan_obj->enable_scan) { + scm_err("scan disabled, rejecting the scan req"); + status = QDF_STATUS_E_NULL_VALUE; + goto err; + } + + scan_vdev_priv_obj = wlan_get_vdev_scan_obj(req->vdev); + if (!scan_vdev_priv_obj) { + scm_debug("Couldn't find scan priv object"); + status = QDF_STATUS_E_NULL_VALUE; + goto err; + } + + if (scan_vdev_priv_obj->is_vdev_delete_in_progress) { + scm_err("Can't allow scan on vdev_id:%d", + wlan_vdev_get_id(req->vdev)); + status = QDF_STATUS_E_NULL_VALUE; + goto err; + } + + cmd.cmd_type = WLAN_SER_CMD_SCAN; + cmd.cmd_id = req->scan_req.scan_id; + cmd.cmd_cb = scm_scan_serialize_callback; + cmd.umac_cmd = req; + cmd.source = WLAN_UMAC_COMP_SCAN; + cmd.is_high_priority = false; + cmd.cmd_timeout_duration = req->scan_req.max_scan_time + + SCAN_TIMEOUT_GRACE_PERIOD; + cmd.vdev = req->vdev; + + if (scan_obj->disable_timeout) + cmd.cmd_timeout_duration = 0; + + scm_debug("req: 0x%pK, reqid: %d, scanid: %d, vdevid: %d", + req, req->scan_req.scan_req_id, req->scan_req.scan_id, + req->scan_req.vdev_id); + + qdf_mtrace(QDF_MODULE_ID_SCAN, QDF_MODULE_ID_SERIALIZATION, + WLAN_SER_CMD_SCAN, req->vdev->vdev_objmgr.vdev_id, + req->scan_req.scan_id); + + ser_cmd_status = wlan_serialization_request(&cmd); + scm_debug("wlan_serialization_request status:%d", ser_cmd_status); + + switch (ser_cmd_status) { + case WLAN_SER_CMD_PENDING: + /* command moved to pending list.Do nothing */ + break; + case WLAN_SER_CMD_ACTIVE: + /* command moved to active list. Do nothing */ + break; + case WLAN_SER_CMD_DENIED_LIST_FULL: + case WLAN_SER_CMD_DENIED_RULES_FAILED: + case WLAN_SER_CMD_DENIED_UNSPECIFIED: + goto err; + default: + QDF_ASSERT(0); + status = QDF_STATUS_E_INVAL; + goto err; + } + + return status; +err: + /* + * notify registered scan event handlers + * about internal error + */ + scm_post_internal_scan_complete_event(req, + SCAN_REASON_INTERNAL_FAILURE); + /* + * cmd can't be serviced. + * release vdev reference and free scan_start_request memory + */ + if (req) { + wlan_objmgr_vdev_release_ref(req->vdev, WLAN_SCAN_ID); + scm_scan_free_scan_request_mem(req); + } + + return status; +} + +static inline enum wlan_serialization_cancel_type +get_serialization_cancel_type(enum scan_cancel_req_type type) +{ + enum wlan_serialization_cancel_type serialization_type; + + switch (type) { + case WLAN_SCAN_CANCEL_SINGLE: + serialization_type = WLAN_SER_CANCEL_SINGLE_SCAN; + break; + case WLAN_SCAN_CANCEL_VDEV_ALL: + serialization_type = WLAN_SER_CANCEL_VDEV_SCANS; + break; + case WLAN_SCAN_CANCEL_PDEV_ALL: + serialization_type = WLAN_SER_CANCEL_PDEV_SCANS; + break; + default: + QDF_ASSERT(0); + scm_warn("invalid scan_cancel_req_type: %d", type); + serialization_type = WLAN_SER_CANCEL_PDEV_SCANS; + break; + } + + return serialization_type; +} + +QDF_STATUS +scm_scan_cancel_req(struct scheduler_msg *msg) +{ + struct wlan_serialization_queued_cmd_info cmd = {0,}; + struct wlan_serialization_command ser_cmd = {0,}; + enum wlan_serialization_cmd_status ser_cmd_status; + struct scan_cancel_request *req; + QDF_STATUS status = QDF_STATUS_SUCCESS; + + if (!msg) { + scm_err("msg received is NULL"); + QDF_ASSERT(0); + return QDF_STATUS_E_NULL_VALUE; + } + if (!msg->bodyptr) { + scm_err("Bodyptr is NULL"); + QDF_ASSERT(0); + return QDF_STATUS_E_NULL_VALUE; + } + + req = msg->bodyptr; + /* + * If requester wants to wait for target scan cancel event + * instead of internally generated cancel event, just check + * which queue this scan request belongs to and send scan + * cancel request to FW accordingly. + * Else generate internal scan cancel event and notify + * handlers and free scan request resources. + */ + if (req->wait_tgt_cancel && + (req->cancel_req.req_type == WLAN_SCAN_CANCEL_SINGLE)) { + ser_cmd.cmd_type = WLAN_SER_CMD_SCAN; + ser_cmd.cmd_id = req->cancel_req.scan_id; + ser_cmd.cmd_cb = NULL; + ser_cmd.umac_cmd = NULL; + ser_cmd.source = WLAN_UMAC_COMP_SCAN; + ser_cmd.is_high_priority = false; + ser_cmd.vdev = req->vdev; + if (wlan_serialization_is_cmd_present_in_active_queue(NULL, &ser_cmd)) + ser_cmd_status = WLAN_SER_CMD_IN_ACTIVE_LIST; + else if (wlan_serialization_is_cmd_present_in_pending_queue(NULL, &ser_cmd)) + ser_cmd_status = WLAN_SER_CMD_IN_PENDING_LIST; + else + ser_cmd_status = WLAN_SER_CMD_NOT_FOUND; + } else { + cmd.requestor = 0; + cmd.cmd_type = WLAN_SER_CMD_SCAN; + cmd.cmd_id = req->cancel_req.scan_id; + cmd.vdev = req->vdev; + cmd.queue_type = WLAN_SERIALIZATION_ACTIVE_QUEUE | + WLAN_SERIALIZATION_PENDING_QUEUE; + cmd.req_type = get_serialization_cancel_type(req->cancel_req.req_type); + + ser_cmd_status = wlan_serialization_cancel_request(&cmd); + } + + scm_debug("status: %d, reqid: %d, scanid: %d, vdevid: %d, type: %d", + ser_cmd_status, req->cancel_req.requester, + req->cancel_req.scan_id, req->cancel_req.vdev_id, + req->cancel_req.req_type); + + switch (ser_cmd_status) { + case WLAN_SER_CMD_IN_PENDING_LIST: + /* do nothing */ + break; + case WLAN_SER_CMD_IN_ACTIVE_LIST: + case WLAN_SER_CMDS_IN_ALL_LISTS: + /* send wmi scan cancel to fw */ + status = tgt_scan_cancel(req); + break; + case WLAN_SER_CMD_NOT_FOUND: + /* do nothing */ + break; + default: + QDF_ASSERT(0); + status = QDF_STATUS_E_INVAL; + break; + } + + /* Release vdev reference and scan cancel request + * processing is complete + */ + wlan_objmgr_vdev_release_ref(req->vdev, WLAN_SCAN_ID); + /* Free cancel request memory */ + qdf_mem_free(req); + + return status; +} + +#ifdef FEATURE_WLAN_SCAN_PNO +static QDF_STATUS +scm_pno_event_handler(struct wlan_objmgr_vdev *vdev, + struct scan_event *event) +{ + struct scan_vdev_obj *scan_vdev_obj; + struct wlan_scan_obj *scan_psoc_obj; + scan_event_handler pno_cb; + void *cb_arg; + + scan_vdev_obj = wlan_get_vdev_scan_obj(vdev); + scan_psoc_obj = wlan_vdev_get_scan_obj(vdev); + if (!scan_vdev_obj || !scan_psoc_obj) { + scm_err("null scan_vdev_obj %pK scan_obj %pK", + scan_vdev_obj, scan_psoc_obj); + return QDF_STATUS_E_INVAL; + } + + switch (event->type) { + case SCAN_EVENT_TYPE_NLO_COMPLETE: + if (!scan_vdev_obj->pno_match_evt_received) + return QDF_STATUS_SUCCESS; + qdf_wake_lock_release(&scan_psoc_obj->pno_cfg.pno_wake_lock, + WIFI_POWER_EVENT_WAKELOCK_PNO); + qdf_wake_lock_timeout_acquire( + &scan_psoc_obj->pno_cfg.pno_wake_lock, + SCAN_PNO_SCAN_COMPLETE_WAKE_LOCK_TIMEOUT); + scan_vdev_obj->pno_match_evt_received = false; + break; + case SCAN_EVENT_TYPE_NLO_MATCH: + scan_vdev_obj->pno_match_evt_received = true; + qdf_wake_lock_timeout_acquire( + &scan_psoc_obj->pno_cfg.pno_wake_lock, + SCAN_PNO_MATCH_WAKE_LOCK_TIMEOUT); + return QDF_STATUS_SUCCESS; + default: + return QDF_STATUS_E_INVAL; + } + qdf_spin_lock_bh(&scan_psoc_obj->lock); + pno_cb = scan_psoc_obj->pno_cfg.pno_cb.func; + cb_arg = scan_psoc_obj->pno_cfg.pno_cb.arg; + qdf_spin_unlock_bh(&scan_psoc_obj->lock); + + if (pno_cb) + pno_cb(vdev, event, cb_arg); + + return QDF_STATUS_SUCCESS; +} +#else + +static QDF_STATUS +scm_pno_event_handler(struct wlan_objmgr_vdev *vdev, + struct scan_event *event) +{ + return QDF_STATUS_SUCCESS; +} +#endif + +/** + * scm_scan_update_scan_event() - update scan event + * @scan: scan object + * @event: scan event + * @scan_start_req: scan_start_req used for triggering scan + * + * update scan params in scan event + * + * Return: QDF_STATUS + */ +static QDF_STATUS +scm_scan_update_scan_event(struct wlan_scan_obj *scan, + struct scan_event *event, + struct scan_start_request *scan_start_req) +{ + if (!event) + return QDF_STATUS_E_NULL_VALUE; + + if (!scan || !scan_start_req) { + event->scan_start_req = NULL; + return QDF_STATUS_E_NULL_VALUE; + } + /* copy scan start request to pass back buffer */ + qdf_mem_copy(&scan->scan_start_request_buff, scan_start_req, + sizeof(struct scan_start_request)); + /* reset all pointers */ + scan->scan_start_request_buff.scan_req.extraie.ptr = NULL; + scan->scan_start_request_buff.scan_req.extraie.len = 0; + scan->scan_start_request_buff.scan_req.htcap.ptr = NULL; + scan->scan_start_request_buff.scan_req.htcap.len = 0; + scan->scan_start_request_buff.scan_req.vhtcap.ptr = NULL; + scan->scan_start_request_buff.scan_req.vhtcap.len = 0; + + event->scan_start_req = &scan->scan_start_request_buff; + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS +scm_scan_event_handler(struct scheduler_msg *msg) +{ + struct wlan_objmgr_vdev *vdev; + struct scan_event *event; + struct scan_event_info *event_info; + struct wlan_serialization_command cmd = {0,}; + struct wlan_serialization_command *queued_cmd; + struct scan_start_request *scan_start_req; + struct wlan_scan_obj *scan; + + if (!msg) { + scm_err("NULL msg received "); + QDF_ASSERT(0); + return QDF_STATUS_E_NULL_VALUE; + } + if (!msg->bodyptr) { + scm_err("NULL scan event received"); + QDF_ASSERT(0); + return QDF_STATUS_E_NULL_VALUE; + } + + event_info = msg->bodyptr; + vdev = event_info->vdev; + event = &(event_info->event); + + scm_debug("vdevid:%d, type:%d, reason:%d, freq:%d, reqstr:%d, scanid:%d", + event->vdev_id, event->type, event->reason, event->chan_freq, + event->requester, event->scan_id); + /* + * NLO requests are never queued, so post NLO events + * without checking for their presence in active queue. + */ + switch (event->type) { + case SCAN_EVENT_TYPE_NLO_COMPLETE: + case SCAN_EVENT_TYPE_NLO_MATCH: + scm_pno_event_handler(vdev, event); + goto exit; + default: + break; + } + + cmd.cmd_type = WLAN_SER_CMD_SCAN; + cmd.cmd_id = event->scan_id; + cmd.cmd_cb = NULL; + cmd.umac_cmd = NULL; + cmd.source = WLAN_UMAC_COMP_SCAN; + cmd.is_high_priority = false; + cmd.vdev = vdev; + if (!wlan_serialization_is_cmd_present_in_active_queue(NULL, &cmd)) { + /* + * We received scan event for an already completed/cancelled + * scan request. Drop this event. + */ + scm_debug("Received scan event while request not in active queue"); + goto exit; + } + + /* Fill scan_start_request used to trigger this scan */ + queued_cmd = wlan_serialization_get_scan_cmd_using_scan_id( + wlan_vdev_get_psoc(vdev), wlan_vdev_get_id(vdev), + event->scan_id, true); + + if (!queued_cmd) { + scm_err("NULL queued_cmd"); + goto exit; + } + if (!queued_cmd->umac_cmd) { + scm_err("NULL umac_cmd"); + goto exit; + } + scan_start_req = queued_cmd->umac_cmd; + + if (scan_start_req->scan_req.scan_req_id != event->requester) { + scm_err("req ID mismatch, scan_req_id:%d, event_req_id:%d", + scan_start_req->scan_req.scan_req_id, + event->requester); + goto exit; + } + + scan = wlan_vdev_get_scan_obj(vdev); + if (scan) + scm_scan_update_scan_event(scan, event, scan_start_req); + + switch (event->type) { + case SCAN_EVENT_TYPE_COMPLETED: + if (event->reason == SCAN_REASON_COMPLETED) + scm_11d_decide_country_code(vdev); + /* fall through to release the command */ + case SCAN_EVENT_TYPE_START_FAILED: + case SCAN_EVENT_TYPE_DEQUEUED: + scm_release_serialization_command(vdev, event->scan_id); + break; + default: + break; + } + + /* Notify all interested parties */ + scm_scan_post_event(vdev, event); + +exit: + /* free event info memory */ + qdf_mem_free(event_info); + wlan_objmgr_vdev_release_ref(vdev, WLAN_SCAN_ID); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS scm_scan_event_flush_callback(struct scheduler_msg *msg) +{ + struct wlan_objmgr_vdev *vdev; + struct scan_event_info *event_info; + + if (!msg || !msg->bodyptr) { + scm_err("msg or msg->bodyptr is NULL"); + return QDF_STATUS_E_NULL_VALUE; + } + + event_info = msg->bodyptr; + vdev = event_info->vdev; + + /* free event info memory */ + qdf_mem_free(event_info); + wlan_objmgr_vdev_release_ref(vdev, WLAN_SCAN_ID); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS scm_bcn_probe_flush_callback(struct scheduler_msg *msg) +{ + struct scan_bcn_probe_event *bcn; + + bcn = msg->bodyptr; + + if (!bcn) { + scm_err("bcn is NULL"); + return QDF_STATUS_E_NULL_VALUE; + } + if (bcn->psoc) + wlan_objmgr_psoc_release_ref(bcn->psoc, WLAN_SCAN_ID); + if (bcn->rx_data) + qdf_mem_free(bcn->rx_data); + if (bcn->buf) + qdf_nbuf_free(bcn->buf); + qdf_mem_free(bcn); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS scm_scan_start_flush_callback(struct scheduler_msg *msg) +{ + struct scan_start_request *req; + + if (!msg || !msg->bodyptr) { + scm_err("msg or msg->bodyptr is NULL"); + return QDF_STATUS_E_NULL_VALUE; + } + + req = msg->bodyptr; + scm_post_internal_scan_complete_event(req, SCAN_REASON_CANCELLED); + wlan_objmgr_vdev_release_ref(req->vdev, WLAN_SCAN_ID); + scm_scan_free_scan_request_mem(req); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS scm_scan_cancel_flush_callback(struct scheduler_msg *msg) +{ + struct scan_cancel_request *req; + + if (!msg || !msg->bodyptr) { + scm_err("msg or msg->bodyptr is NULL"); + return QDF_STATUS_E_NULL_VALUE; + } + + req = msg->bodyptr; + wlan_objmgr_vdev_release_ref(req->vdev, WLAN_SCAN_ID); + /* Free cancel request memory */ + qdf_mem_free(req); + + return QDF_STATUS_SUCCESS; +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/scan/core/src/wlan_scan_manager.h b/drivers/staging/qca-wifi-host-cmn/umac/scan/core/src/wlan_scan_manager.h new file mode 100644 index 0000000000000000000000000000000000000000..e879c4d6e0ff208b8b96e8213d49ff2eb43ac151 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/scan/core/src/wlan_scan_manager.h @@ -0,0 +1,128 @@ +/* + * Copyright (c) 2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/* + * DOC: Defines internal scan manager api + * Core routines which deal with starting a scan, + * serializing scan requests, scan cancellation, scan completion, + * scan event processing. + */ + +#ifndef _WLAN_SCAN_MANAGER_API_H_ +#define _WLAN_SCAN_MANAGER_API_H_ + +#include "wlan_scan_main.h" + +/* + * Maximum numbers of callback functions that may be invoked + * for a particular scan event. + */ +#define MAX_SCAN_EVENT_LISTENERS (MAX_SCAN_EVENT_HANDLERS_PER_PDEV + 1) + +/** + * struct scan_event_listners - listeners interested in a particular scan event + * @count: number of listners + * @cb: callback handler + */ +struct scan_event_listeners { + uint32_t count; + struct cb_handler cb[MAX_SCAN_EVENT_LISTENERS]; +}; + +/** + * scm_scan_start_req() - scan start req core api + * @msg: scheduler message object containing start scan req params + * @req: start scan req params + * + * The API to start a scan + * + * Return: QDF_STATUS + */ +QDF_STATUS scm_scan_start_req(struct scheduler_msg *msg); + +/** + * scm_scan_cancel_req() - scan cancel req core api + * @msg: scheduler message object containing stop scan params + * @req: stop scan params + * + * The API to cancel a scan + * + * Return: QDF_STATUS + */ +QDF_STATUS scm_scan_cancel_req(struct scheduler_msg *msg); + + +/** + * scm_scan_event_handler() - core scan event handler from tgt interface + * @msg: scheduler message object containing scan event + * + * This function calls registered event handlers of various modules + * + * Return: QDF_STATUS + */ +QDF_STATUS scm_scan_event_handler(struct scheduler_msg *msg); + +/** + * scm_scan_free_scan_request_mem() - Free scan request memory + * @req: scan_start_request object + * + * Return: QDF_STATUS + */ +QDF_STATUS scm_scan_free_scan_request_mem(struct scan_start_request *req); + +/** + * scm_scan_event_flush_callback() - flush scan event + * @msg: scheduler message object containing scan event + * + * This function call is invoked when scheduler thread is going down + * + * Return: QDF_STATUS + */ +QDF_STATUS scm_scan_event_flush_callback(struct scheduler_msg *msg); + +/** + * scm_bcn_probe_flush_callback() - flush beacon/probe response + * @msg: scheduler message object containing scan event + * + * This function call is invoked when scheduler thread is going down + * + * Return: QDF_STATUS + */ +QDF_STATUS scm_bcn_probe_flush_callback(struct scheduler_msg *msg); + +/** + * scm_scan_start_flush_callback() - flush scan start request + * @msg: scheduler message object containing scan event + * + * This function call is invoked when scheduler thread is going down + * + * Return: QDF_STATUS + */ +QDF_STATUS scm_scan_start_flush_callback(struct scheduler_msg *msg); + +/** + * scm_scan_cancel_flush_callback() - flush scan cancel request + * @msg: scheduler message object containing scan event + * + * This function call is invoked when scheduler thread is going down + * + * Return: QDF_STATUS + */ +QDF_STATUS scm_scan_cancel_flush_callback(struct scheduler_msg *msg); + +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/scan/dispatcher/inc/wlan_scan_public_structs.h b/drivers/staging/qca-wifi-host-cmn/umac/scan/dispatcher/inc/wlan_scan_public_structs.h new file mode 100644 index 0000000000000000000000000000000000000000..e68e1ac80ed254b669a44a7451e53b1dc34953f2 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/scan/dispatcher/inc/wlan_scan_public_structs.h @@ -0,0 +1,1404 @@ +/* + * Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/* + * DOC: contains scan structure definations + */ + +#ifndef _WLAN_SCAN_STRUCTS_H_ +#define _WLAN_SCAN_STRUCTS_H_ +#include +#include +#include +#include +#include +#include + +typedef uint16_t wlan_scan_requester; +typedef uint32_t wlan_scan_id; + +#define WLAN_SCAN_MAX_NUM_SSID 16 +#define WLAN_SCAN_MAX_NUM_BSSID 4 +#define WLAN_SCAN_MAX_NUM_CHANNELS 68 + +#define SCM_CANCEL_SCAN_WAIT_TIME 50 +#define SCM_CANCEL_SCAN_WAIT_ITERATION 600 + +#define INVAL_SCAN_ID 0xFFFFFFFF +#define INVAL_VDEV_ID 0xFFFFFFFF +#define INVAL_PDEV_ID 0xFFFFFFFF + +#define USER_SCAN_REQUESTOR_ID 0xA0000 +#define PREAUTH_REQUESTOR_ID 0xC0000 + +#define BURST_SCAN_MAX_NUM_OFFCHANNELS 3 +#define P2P_SCAN_MAX_BURST_DURATION 180 +/* Increase dwell time for P2P search in ms */ +#define P2P_SEARCH_DWELL_TIME_INC 20 + +#define PROBE_REQ_BITMAP_LEN 8 +#define MAX_PROBE_REQ_OUIS 16 + +#define RSSI_WEIGHTAGE 20 +#define HT_CAPABILITY_WEIGHTAGE 2 +#define VHT_CAP_WEIGHTAGE 1 +#define HE_CAP_WEIGHTAGE 2 +#define CHAN_WIDTH_WEIGHTAGE 17 +#define CHAN_BAND_WEIGHTAGE 2 +#define NSS_WEIGHTAGE 16 +#define BEAMFORMING_CAP_WEIGHTAGE 2 +#define PCL_WEIGHT 10 +#define CHANNEL_CONGESTION_WEIGHTAGE 5 +#define OCE_WAN_WEIGHTAGE 0 +#define BEST_CANDIDATE_MAX_WEIGHT 100 +#define MAX_INDEX_SCORE 100 +#define MAX_INDEX_PER_INI 4 + +#ifdef CONFIG_MCL +#define MAX_BCN_PROBE_IN_SCAN_QUEUE 150 +#else +#define MAX_BCN_PROBE_IN_SCAN_QUEUE 2000 +#endif + +#define WLAN_GET_BITS(_val, _index, _num_bits) \ + (((_val) >> (_index)) & ((1 << (_num_bits)) - 1)) + +#define WLAN_SET_BITS(_var, _index, _num_bits, _val) do { \ + (_var) &= ~(((1 << (_num_bits)) - 1) << (_index)); \ + (_var) |= (((_val) & ((1 << (_num_bits)) - 1)) << (_index)); \ + } while (0) + +#define WLAN_GET_SCORE_PERCENTAGE(value32, bw_index) \ + WLAN_GET_BITS(value32, (8 * (bw_index)), 8) +#define WLAN_SET_SCORE_PERCENTAGE(value32, score_pcnt, bw_index) \ + WLAN_SET_BITS(value32, (8 * (bw_index)), 8, score_pcnt) + +/* forward declaration */ +struct wlan_objmgr_vdev; +struct wlan_objmgr_pdev; +struct wlan_objmgr_psoc; + +/** + * struct channel_info - BSS channel information + * @chan_idx: current operating channel index + * @cfreq0: channel frequency index0 + * @cfreq1: channel frequency index1 + * @priv: channel private information + */ +struct channel_info { + uint8_t chan_idx; + uint8_t cfreq0; + uint8_t cfreq1; + void *priv; +}; + +/** + * struct element_info - defines length of a memory block and memory block + * @len: length of memory block + * @ptr: memory block pointer + */ +struct element_info { + uint32_t len; + uint8_t *ptr; +}; + +/** + * struct ie_list - pointers to various IEs + * @tim: pointer to tim ie + * @country: pointer to country ie + * @ssid: pointer to ssid ie + * @rates: pointer to supported rates ie + * @xrates: pointer to extended supported rate ie + * @ds_param: pointer to ds params + * @csa: pointer to csa ie + * @xcsa: pointer to extended csa ie + * @wpa: pointer to wpa ie + * @wcn: pointer to wcn ie + * @rsn: pointer to rsn ie + * @wps: pointer to wps ie + * @wmeinfo: pointer to wmeinfo ie + * @wmeparam: pointer to wmeparam ie + * @quiet: pointer to quiet ie + * @htcap: pointer to htcap ie + * @htinfo: pointer to htinfo ie + * @athcaps: pointer to athcaps ie + * @athextcaps: pointer to extended athcaps ie + * @sfa: pointer to sfa ie + * @vendor: pointer to vendor ie + * @qbssload: pointer to qbssload ie + * @wapi: pointer to wapi ie + * @p2p: pointer to p2p ie + * @alt_wcn: pointer to alternate wcn ie + * @extcaps: pointer to extended caps ie + * @ibssdfs: pointer to ibssdfs ie + * @sonadv: pointer to wifi son ie + * @vhtcap: pointer to vhtcap ie + * @vhtop: pointer to vhtop ie + * @opmode: pointer to opmode ie + * @cswrp: pointer to channel switch announcement wrapper ie + * @widebw: pointer to wide band channel switch sub ie + * @txpwrenvlp: pointer to tx power envelop sub ie + * @srp: pointer to spatial reuse parameter sub extended ie + * @fils_indication: pointer to FILS indication ie + * @esp: pointer to ESP indication ie + * @mbo_oce: pointer to mbo/oce indication ie + */ +struct ie_list { + uint8_t *tim; + uint8_t *country; + uint8_t *ssid; + uint8_t *rates; + uint8_t *xrates; + uint8_t *ds_param; + uint8_t *csa; + uint8_t *xcsa; + uint8_t *wpa; + uint8_t *wcn; + uint8_t *rsn; + uint8_t *wps; + uint8_t *wmeinfo; + uint8_t *wmeparam; + uint8_t *quiet; + uint8_t *htcap; + uint8_t *htinfo; + uint8_t *athcaps; + uint8_t *athextcaps; + uint8_t *sfa; + uint8_t *vendor; + uint8_t *qbssload; + uint8_t *wapi; + uint8_t *p2p; + uint8_t *alt_wcn; + uint8_t *extcaps; + uint8_t *ibssdfs; + uint8_t *sonadv; + uint8_t *vhtcap; + uint8_t *vhtop; + uint8_t *opmode; + uint8_t *cswrp; + uint8_t *widebw; + uint8_t *txpwrenvlp; + uint8_t *bwnss_map; + uint8_t *secchanoff; + uint8_t *mdie; + uint8_t *hecap; + uint8_t *heop; + uint8_t *srp; + uint8_t *fils_indication; + uint8_t *esp; + uint8_t *mbo_oce; + uint8_t *muedca; +}; + +enum scan_entry_connection_state { + SCAN_ENTRY_CON_STATE_NONE, + SCAN_ENTRY_CON_STATE_AUTH, + SCAN_ENTRY_CON_STATE_ASSOC +}; + +/** + * struct mlme_info - mlme specific info + * temporarily maintained in scan cache for backward compatibility. + * must be removed as part of umac convergence. + * @bad_ap_time: time when this ap was marked bad + * @status: status + * @rank: rank + * @utility: utility + * @assoc_state: association state + * @chanload: channel load + */ +struct mlme_info { + qdf_time_t bad_ap_time; + uint32_t status; + uint32_t rank; + uint32_t utility; + uint32_t assoc_state; + uint32_t chanload; +}; + +/** + * struct bss_info - information required to uniquely define a bss + * @chan: bss operating primary channel index + * @ssid: ssid of bss + * @bssid: bssid of bss + */ +struct bss_info { + uint8_t chan; + struct wlan_ssid ssid; + struct qdf_mac_addr bssid; +}; + +#define SCAN_NODE_ACTIVE_COOKIE 0x1248F842 +/** + * struct scan_cache_node - Scan cache entry node + * @node: node pointers + * @ref_cnt: ref count if in use + * @cookie: cookie to check if entry is logically active + * @entry: scan entry pointer + */ +struct scan_cache_node { + qdf_list_node_t node; + qdf_atomic_t ref_cnt; + uint32_t cookie; + struct scan_cache_entry *entry; +}; + +struct security_info { + enum wlan_enc_type uc_enc; + enum wlan_enc_type mc_enc; + enum wlan_auth_type auth_type; +}; + +/** + * struct scan_cache_entry: structure containing scan entry + * @frm_subtype: updated from beacon/probe + * @bssid: bssid + * @mac_addr: mac address + * @ssid: ssid + * @is_hidden_ssid: is AP having hidden ssid. + * @seq_num: sequence number + * @phy_mode: Phy mode of the AP + * @avg_rssi: Average RSSI fof the AP + * @rssi_raw: The rssi of the last beacon/probe received + * @bcn_int: Beacon interval of the AP + * @cap_info: Capability of the AP + * @tsf_info: TSF info + * @erp: erp info + * @dtim_period: dtime period + * @air_time_fraction: Air time fraction from ESP param + * @qbss_chan_load: Qbss channel load + * @nss: supported NSS information + * @is_p2p_ssid: is P2P entry + * @scan_entry_time: boottime in microsec when last beacon/probe is received + * @rssi_timestamp: boottime in microsec when RSSI was updated + * @hidden_ssid_timestamp: boottime in microsec when hidden + * ssid was received + * @channel: channel info on which AP is present + * @channel_mismatch: if channel received in metadata + * doesnot match the one in beacon + * @tsf_delta: TSF delta + * @bss_score: bss score calculated on basis of RSSI/caps etc. + * @neg_sec_info: negotiated security info + * @per_chain_snr: per chain SNR value received. + * boottime_ns: boottime in ns. + * @rrm_parent_tsf: RRM parent tsf + * @mlme_info: Mlme info, this will be updated by MLME for the scan entry + * @alt_wcn_ie: alternate WCN IE + * @ie_list: IE list pointers + * @raw_frame: contain raw frame and the length of the raw frame + */ +struct scan_cache_entry { + uint8_t frm_subtype; + struct qdf_mac_addr bssid; + struct qdf_mac_addr mac_addr; + struct wlan_ssid ssid; + bool is_hidden_ssid; + uint16_t seq_num; + enum wlan_phymode phy_mode; + int32_t avg_rssi; + int8_t rssi_raw; + uint16_t bcn_int; + union wlan_capability cap_info; + union { + uint8_t data[8]; + uint64_t tsf; + } tsf_info; + uint8_t erp; + uint8_t dtim_period; + uint8_t air_time_fraction; + uint8_t qbss_chan_load; + uint8_t nss; + bool is_p2p; + qdf_time_t scan_entry_time; + qdf_time_t rssi_timestamp; + qdf_time_t hidden_ssid_timestamp; + struct channel_info channel; + bool channel_mismatch; + struct mlme_info mlme_info; + uint32_t tsf_delta; + uint32_t bss_score; + struct security_info neg_sec_info; + uint8_t per_chain_snr[WLAN_MGMT_TXRX_HOST_MAX_ANTENNA]; + uint64_t boottime_ns; + uint32_t rrm_parent_tsf; + struct element_info alt_wcn_ie; + struct ie_list ie_list; + struct element_info raw_frame; +}; + +#define MAX_FAVORED_BSSID 16 +#define MAX_AVOID_LIST_BSSID 16 +#define MAX_ALLOWED_SSID_LIST 4 + +/** + * struct roam_filter_params - Structure holding roaming parameters + * @num_bssid_avoid_list: The number of BSSID's that we should + * avoid connecting to. It is like a + * blacklist of BSSID's. + * also for roaming apart from the connected one's + * @bssid_avoid_list: Blacklist SSID's + * + * This structure holds all the key parameters related to + * initial connection and also roaming connections. + */ +struct roam_filter_params { + uint32_t num_bssid_avoid_list; + /* Variable params list */ + struct qdf_mac_addr bssid_avoid_list[MAX_AVOID_LIST_BSSID]; +}; + +/** + * struct weight_config - weight params to calculate best candidate + * @rssi_weightage: RSSI weightage + * @ht_caps_weightage: HT caps weightage + * @vht_caps_weightage: VHT caps weightage + * @he_caps_weightage: HE caps weightage + * @chan_width_weightage: Channel width weightage + * @chan_band_weightage: Channel band weightage + * @nss_weightage: NSS weightage + * @beamforming_cap_weightage: Beamforming caps weightage + * @pcl_weightage: PCL weightage + * @channel_congestion_weightage: channel congestion weightage + * @oce_wan_weightage: OCE WAN metrics weightage + */ +struct weight_config { + uint8_t rssi_weightage; + uint8_t ht_caps_weightage; + uint8_t vht_caps_weightage; + uint8_t he_caps_weightage; + uint8_t chan_width_weightage; + uint8_t chan_band_weightage; + uint8_t nss_weightage; + uint8_t beamforming_cap_weightage; + uint8_t pcl_weightage; + uint8_t channel_congestion_weightage; + uint8_t oce_wan_weightage; +}; + +/** + * struct rssi_cfg_score - rssi related params for scoring logic + * @best_rssi_threshold: RSSI weightage + * @good_rssi_threshold: HT caps weightage + * @bad_rssi_threshold: VHT caps weightage + * @good_rssi_pcnt: HE caps weightage + * @bad_rssi_pcnt: Channel width weightage + * @good_rssi_bucket_size: Channel band weightage + * @bad_rssi_bucket_size: NSS weightage + * @rssi_pref_5g_rssi_thresh: Beamforming caps weightage + */ +struct rssi_cfg_score { + uint32_t best_rssi_threshold; + uint32_t good_rssi_threshold; + uint32_t bad_rssi_threshold; + uint32_t good_rssi_pcnt; + uint32_t bad_rssi_pcnt; + uint32_t good_rssi_bucket_size; + uint32_t bad_rssi_bucket_size; + uint32_t rssi_pref_5g_rssi_thresh; +}; + +/** + * struct per_slot_scoring - define % score for differents slots for a + * scoring param. + * num_slot: number of slots in which the param will be divided. + * Max 15. index 0 is used for 'not_present. Num_slot will + * equally divide 100. e.g, if num_slot = 4 slot 0 = 0-25%, slot + * 1 = 26-50% slot 2 = 51-75%, slot 3 = 76-100% + * score_pcnt3_to_0: Conatins score percentage for slot 0-3 + * BITS 0-7 :- the scoring pcnt when not present + * BITS 8-15 :- SLOT_1 + * BITS 16-23 :- SLOT_2 + * BITS 24-31 :- SLOT_3 + * score_pcnt7_to_4: Conatins score percentage for slot 4-7 + * BITS 0-7 :- SLOT_4 + * BITS 8-15 :- SLOT_5 + * BITS 16-23 :- SLOT_6 + * BITS 24-31 :- SLOT_7 + * score_pcnt11_to_8: Conatins score percentage for slot 8-11 + * BITS 0-7 :- SLOT_8 + * BITS 8-15 :- SLOT_9 + * BITS 16-23 :- SLOT_10 + * BITS 24-31 :- SLOT_11 + * score_pcnt15_to_12: Conatins score percentage for slot 12-15 + * BITS 0-7 :- SLOT_12 + * BITS 8-15 :- SLOT_13 + * BITS 16-23 :- SLOT_14 + * BITS 24-31 :- SLOT_15 + */ +struct per_slot_scoring { + uint32_t num_slot; + uint32_t score_pcnt3_to_0; + uint32_t score_pcnt7_to_4; + uint32_t score_pcnt11_to_8; + uint32_t score_pcnt15_to_12; +}; + +/** + * struct scoring_config - Scoring related configuration + * @weight_cfg: weigtage config for config + * @rssi_score: Rssi related config for scoring config + * @esp_qbss_scoring: esp and qbss related scoring config + * @oce_wan_scoring: oce related scoring config + * @bandwidth_weight_per_index: BW wight per index + * @nss_weight_per_index: nss weight per index + * @band_weight_per_index: band weight per index + * @cb_mode_24G: cb mode supprted for 2.4Ghz + * @cb_mode_5G: cb mode supprted for 5Ghz + * @nss: Number of NSS the device support + * @ht_cap: If dev is configured as HT capable + * @vht_cap:If dev is configured as VHT capable + * @he_cap: If dev is configured as HE capable + * @vht_24G_cap:If dev is configured as VHT capable for 2.4Ghz + * @beamformee_cap:If dev is configured as BF capable + */ +struct scoring_config { + struct weight_config weight_cfg; + struct rssi_cfg_score rssi_score; + struct per_slot_scoring esp_qbss_scoring; + struct per_slot_scoring oce_wan_scoring; + uint32_t bandwidth_weight_per_index; + uint32_t nss_weight_per_index; + uint32_t band_weight_per_index; + uint8_t cb_mode_24G; + uint8_t cb_mode_5G; + uint8_t vdev_nss_24g; + uint8_t vdev_nss_5g; + uint8_t ht_cap:1, + vht_cap:1, + he_cap:1, + vht_24G_cap:1, + beamformee_cap:1; +}; + +#define WLAN_SCAN_FILTER_NUM_SSID 5 +#define WLAN_SCAN_FILTER_NUM_BSSID 5 + +#define REAM_HASH_LEN 2 +#define CACHE_IDENTIFIER_LEN 2 +#define HESSID_LEN 6 + +/** + * struct fils_filter_info: FILS info present in scan filter + * @realm_check: whether realm check is required + * @fils_realm: realm hash value + * @security_type: type of security supported + */ +struct fils_filter_info { + bool realm_check; + uint8_t fils_realm[REAM_HASH_LEN]; + uint8_t security_type; +}; + +/** + * @bss_scoring_required :- flag to bypass scoring filtered results + * @age_threshold: If set return entry which are newer than the age_threshold + * @p2p_results: If only p2p entries is required + * @rrm_measurement_filter: For measurement reports.if set, only SSID, BSSID + * and channel is considered for filtering. + * @num_of_bssid: number of bssid passed + * @num_of_ssid: number of ssid + * @num_of_channels: number of channels + * @num_of_auth: number of auth types + * @num_of_enc_type: number of unicast enc type + * @num_of_mc_enc_type: number of multicast enc type + * @pmf_cap: Pmf capability + * @ignore_pmf_cap: Ignore pmf capability match + * @num_of_pcl_channels: number of pcl channels + * @bss_type: bss type BSS/IBSS etc + * @dot11_mode: operating modes 0 mean any + * 11a , 11g, 11n , 11ac , 11b etc + * @band: to get specific band 2.4G, 5G or 4.9 G + * @rssi_threshold: AP having RSSI greater than + * rssi threasholed (ignored if set 0) + * @only_wmm_ap: If only Qos AP is needed + * @ignore_auth_enc_type: Ignore enc type if + * this is set (For WPS/OSEN connection) + * @mobility_domain: Mobility domain for 11r + * @country[3]: Ap with specific country code + * @bssid_list: bssid list + * @ssid_list: ssid list + * @channel_list: channel list + * @auth_type: auth type list + * @enc_type: unicast enc type list + * @mc_enc_type: multicast cast enc type list + * @pcl_channel_list: PCL channel list + * @fils_scan_filter: FILS info + * @pcl_weight_list: PCL Weight list + * @bssid_hint: Mac address of bssid_hint + */ +struct scan_filter { + bool bss_scoring_required; + uint32_t age_threshold; + uint32_t p2p_results; + uint32_t rrm_measurement_filter; + uint32_t num_of_bssid; + uint32_t num_of_ssid; + uint32_t num_of_channels; + uint32_t num_of_auth; + uint32_t num_of_enc_type; + uint32_t num_of_mc_enc_type; + enum wlan_pmf_cap pmf_cap; + bool ignore_pmf_cap; + uint32_t num_of_pcl_channels; + enum wlan_bss_type bss_type; + enum wlan_phymode dot11_mode; + enum wlan_band band; + uint32_t rssi_threshold; + uint32_t only_wmm_ap; + uint32_t ignore_auth_enc_type; + uint32_t mobility_domain; + /* Variable params list */ + uint8_t country[3]; + struct qdf_mac_addr bssid_list[WLAN_SCAN_FILTER_NUM_BSSID]; + struct wlan_ssid ssid_list[WLAN_SCAN_FILTER_NUM_SSID]; + uint8_t channel_list[QDF_MAX_NUM_CHAN]; + enum wlan_auth_type auth_type[WLAN_NUM_OF_SUPPORT_AUTH_TYPE]; + enum wlan_enc_type enc_type[WLAN_NUM_OF_ENCRYPT_TYPE]; + enum wlan_enc_type mc_enc_type[WLAN_NUM_OF_ENCRYPT_TYPE]; + uint8_t pcl_channel_list[QDF_MAX_NUM_CHAN]; + struct fils_filter_info fils_scan_filter; + uint8_t pcl_weight_list[QDF_MAX_NUM_CHAN]; + struct qdf_mac_addr bssid_hint; +}; + + +/** + * enum scan_priority - scan priority definitions + * @SCAN_PRIORITY_VERY_LOW: very low priority + * @SCAN_PRIORITY_LOW: low scan priority + * @SCAN_PRIORITY_MEDIUM: medium priority + * @SCAN_PRIORITY_HIGH: high priority + * @SCAN_PRIORITY_VERY_HIGH: very high priority + * @SCAN_PRIORITY_COUNT: number of priorities supported + */ +enum scan_priority { + SCAN_PRIORITY_VERY_LOW, + SCAN_PRIORITY_LOW, + SCAN_PRIORITY_MEDIUM, + SCAN_PRIORITY_HIGH, + SCAN_PRIORITY_VERY_HIGH, + SCAN_PRIORITY_COUNT, +}; + + +/** + * enum scan_type - type of scan + * @SCAN_TYPE_BACKGROUND: background scan + * @SCAN_TYPE_FOREGROUND: foregrounc scan + * @SCAN_TYPE_SPECTRAL: spectral scan + * @SCAN_TYPE_REPEATER_BACKGROUND: background scan in repeater + * @SCAN_TYPE_REPEATER_EXT_BACKGROUND: background scan in extended repeater + * @SCAN_TYPE_RADIO_MEASUREMENTS: redio measurement + * @SCAN_TYPE_COUNT: number of scan types supported + */ +enum scan_type { + SCAN_TYPE_BACKGROUND, + SCAN_TYPE_FOREGROUND, + SCAN_TYPE_SPECTRAL, + SCAN_TYPE_REPEATER_BACKGROUND, + SCAN_TYPE_REPEATER_EXT_BACKGROUND, + SCAN_TYPE_RADIO_MEASUREMENTS, + SCAN_TYPE_COUNT, +}; + +/** + * enum scan_phy_mode - phymode used for scan + * @SCAN_PHY_MODE_11A: 11a mode + * @SCAN_PHY_MODE_11G: 11g mode + * @SCAN_PHY_MODE_11B: 11b mode + * @SCAN_PHY_MODE_11GONLY: 11g only mode + * @SCAN_PHY_MODE_11NA_HT20: 11na ht20 mode + * @SCAN_PHY_MODE_11NG_HT20: 11ng ht20 mode + * @SCAN_PHY_MODE_11NA_HT40: 11na ht40 mode + * @SCAN_PHY_MODE_11NG_HT40: 11ng ht40 mode + * @SCAN_PHY_MODE_11AC_VHT20: 11ac vht20 mode + * @SCAN_PHY_MODE_11AC_VHT40: 11ac vht40 mode + * @SCAN_PHY_MODE_11AC_VHT80: 11ac vht80 mode + * @SCAN_PHY_MODE_11AC_VHT20_2G: 2GHz 11ac vht20 mode + * @SCAN_PHY_MODE_11AC_VHT40_2G: 2GHz 11ac vht40 mode + * @SCAN_PHY_MODE_11AC_VHT80_2G: 2GHz 11ac vht80 mode + * @SCAN_PHY_MODE_11AC_VHT80_80: 11ac vht 80+80 mode + * @SCAN_PHY_MODE_11AC_VHT160: 11ac vht160 mode + * @SCAN_PHY_MODE_11AX_HE20: 11ax he20 mode + * @SCAN_PHY_MODE_11AX_HE40: 11ax he40 mode + * @SCAN_PHY_MODE_11AX_HE80: 11ax he80 mode + * @SCAN_PHY_MODE_11AX_HE80_80: 11ax he80+80 mode + * @SCAN_PHY_MODE_11AX_HE160: 11ax he160 mode + * @SCAN_PHY_MODE_11AX_HE20_2G: 2GHz 11ax he20 mode + * @SCAN_PHY_MODE_11AX_HE40_2G: 2GHz 11ax he40 mode + * @SCAN_PHY_MODE_11AX_HE80_2G: 2GHz 11ax he80 mode + * @SCAN_PHY_MODE_UNKNOWN: unknown phy mode + * @SCAN_PHY_MODE_MAX: max valid phymode + */ +enum scan_phy_mode { + SCAN_PHY_MODE_11A = 0, + SCAN_PHY_MODE_11G = 1, + SCAN_PHY_MODE_11B = 2, + SCAN_PHY_MODE_11GONLY = 3, + SCAN_PHY_MODE_11NA_HT20 = 4, + SCAN_PHY_MODE_11NG_HT20 = 5, + SCAN_PHY_MODE_11NA_HT40 = 6, + SCAN_PHY_MODE_11NG_HT40 = 7, + SCAN_PHY_MODE_11AC_VHT20 = 8, + SCAN_PHY_MODE_11AC_VHT40 = 9, + SCAN_PHY_MODE_11AC_VHT80 = 10, + SCAN_PHY_MODE_11AC_VHT20_2G = 11, + SCAN_PHY_MODE_11AC_VHT40_2G = 12, + SCAN_PHY_MODE_11AC_VHT80_2G = 13, + SCAN_PHY_MODE_11AC_VHT80_80 = 14, + SCAN_PHY_MODE_11AC_VHT160 = 15, + SCAN_PHY_MODE_11AX_HE20 = 16, + SCAN_PHY_MODE_11AX_HE40 = 17, + SCAN_PHY_MODE_11AX_HE80 = 18, + SCAN_PHY_MODE_11AX_HE80_80 = 19, + SCAN_PHY_MODE_11AX_HE160 = 20, + SCAN_PHY_MODE_11AX_HE20_2G = 21, + SCAN_PHY_MODE_11AX_HE40_2G = 22, + SCAN_PHY_MODE_11AX_HE80_2G = 23, + SCAN_PHY_MODE_UNKNOWN = 24, + SCAN_PHY_MODE_MAX = 24 +}; + +/** + * struct scan_extra_params_legacy + * extra parameters required for legacy DA scan module + * @scan_type: type of scan + * @min_dwell_active: min active dwell time + * @min_dwell_passive: min passive dwell time + * @init_rest_time: init rest time for enhanced independent repeater + */ +struct scan_extra_params_legacy { + enum scan_type scan_type; + uint32_t min_dwell_active; + uint32_t min_dwell_passive; + uint32_t init_rest_time; +}; + +/** + * enum scan_dwelltime_adaptive_mode: dwelltime_mode + * @SCAN_DWELL_MODE_DEFAULT: Use firmware default mode + * @SCAN_DWELL_MODE_CONSERVATIVE: Conservative adaptive mode + * @SCAN_DWELL_MODE_MODERATE: Moderate adaptive mode + * @SCAN_DWELL_MODE_AGGRESSIVE: Aggressive adaptive mode + * @SCAN_DWELL_MODE_STATIC: static adaptive mode + */ +enum scan_dwelltime_adaptive_mode { + SCAN_DWELL_MODE_DEFAULT = 0, + SCAN_DWELL_MODE_CONSERVATIVE = 1, + SCAN_DWELL_MODE_MODERATE = 2, + SCAN_DWELL_MODE_AGGRESSIVE = 3, + SCAN_DWELL_MODE_STATIC = 4 +}; + +/** + * struct scan_random_attr - holds scan randomization attrs + * @randomize: set to true for scan randomization + * @mac_addr: mac addr to be randomized + * @mac_mask: used to represent bits in mac_addr for randomization + */ +struct scan_random_attr { + bool randomize; + uint8_t mac_addr[QDF_MAC_ADDR_SIZE]; + uint8_t mac_mask[QDF_MAC_ADDR_SIZE]; +}; + +/** + * struct probe_req_whitelist_attr - holds probe req ie whitelist attrs + * @white_list: enable/disable whitelist + * @ie_bitmap: bitmap of IEs to be enabled + * @num_vendor_oui: number of vendor OUIs + * @voui: vendor oui buffer + */ +struct probe_req_whitelist_attr { + bool white_list; + uint32_t ie_bitmap[PROBE_REQ_BITMAP_LEN]; + uint32_t num_vendor_oui; + uint32_t voui[MAX_PROBE_REQ_OUIS]; +}; + +/** + * struct chan_info - channel information + * @freq: frequency to scan + * @phymode: phymode in which @frequency should be scanned + */ +struct chan_info { + uint32_t freq; + uint32_t phymode; +}; + +/** + * struct chan_list - list of frequencies to be scanned + * and their phymode + * @num_chan: number of channels to scan + * @chan: channel parameters used for this scan + */ +struct chan_list { + uint32_t num_chan; + struct chan_info chan[WLAN_SCAN_MAX_NUM_CHANNELS]; +}; + +/** + * enum scan_type: scan type + * @SCAN_NON_P2P_DEFAULT: Def scan + * @SCAN_P2P_SEARCH: P2P Search + * @SCAN_P2P_LISTEN: P2P listed + */ +enum p2p_scan_type { + SCAN_NON_P2P_DEFAULT = 0, + SCAN_P2P_SEARCH = 1, + SCAN_P2P_LISTEN = 2, +}; + +/** + * struct scan_req_params - start scan request parameter + * @scan_id: scan id + * @scan_req_id: scan requester id + * @vdev_id: vdev id where scan was originated + * @pdev_id: pdev id of parent pdev + * @scan_priority: scan priority + * @scan_ev_started: notify scan started event + * @scan_ev_completed: notify scan completed event + * @scan_ev_bss_chan: notify bss chan event + * @scan_ev_foreign_chan: notify foreign chan event + * @scan_ev_dequeued: notify scan request dequed event + * @scan_ev_preempted: notify scan preempted event + * @scan_ev_start_failed: notify scan start failed event + * @scan_ev_restarted: notify scan restarted event + * @scan_ev_foreign_chn_exit: notify foreign chan exit event + * @scan_ev_invalid: notify invalid scan request event + * @scan_ev_gpio_timeout: notify gpio timeout event + * @scan_ev_suspended: notify scan suspend event + * @scan_ev_resumed: notify scan resumed event + * @scan_events: variable to read and set scan_ev_* flags in one shot + * can be used to dump all scan_ev_* flags for debug + * @dwell_time_active: active dwell time + * @dwell_time_active_2g: active dwell time for 2G channels, if it's not zero + * @dwell_time_passive: passive dwell time + * @min_rest_time: min rest time + * @max_rest_time: max rest time + * @repeat_probe_time: repeat probe time + * @probe_spacing_time: probe spacing time + * @idle_time: idle time + * @max_scan_time: max scan time + * @probe_delay: probe delay + * @scan_f_passive: passively scan all channels including active channels + * @scan_f_bcast_probe: add wild card ssid prbreq even if ssid_list is specified + * @scan_f_cck_rates: add cck rates to rates/xrates ie in prb req + * @scan_f_ofdm_rates: add ofdm rates to rates/xrates ie in prb req + * @scan_f_chan_stat_evnt: enable indication of chan load and noise floor + * @scan_f_filter_prb_req: filter Probe request frames + * @scan_f_bypass_dfs_chn: when set, do not scan DFS channels + * @scan_f_continue_on_err:continue scan even if few certain erros have occurred + * @scan_f_offchan_mgmt_tx: allow mgmt transmission during off channel scan + * @scan_f_offchan_data_tx: allow data transmission during off channel scan + * @scan_f_promisc_mode: scan with promiscuous mode + * @scan_f_capture_phy_err: enable capture ppdu with phy errrors + * @scan_f_strict_passive_pch: do passive scan on passive channels + * @scan_f_half_rate: enable HALF (10MHz) rate support + * @scan_f_quarter_rate: set Quarter (5MHz) rate support + * @scan_f_force_active_dfs_chn: allow to send probe req on DFS channel + * @scan_f_add_tpc_ie_in_probe: add TPC ie in probe req frame + * @scan_f_add_ds_ie_in_probe: add DS ie in probe req frame + * @scan_f_add_spoofed_mac_in_probe: use random mac address for TA in probe + * @scan_f_add_rand_seq_in_probe: use random sequence number in probe + * @scan_f_en_ie_whitelist_in_probe: enable ie whitelist in probe + * @scan_f_forced: force scan even in presence of data traffic + * @scan_f_2ghz: scan 2.4 GHz channels + * @scan_f_5ghz: scan 5 GHz channels + * @scan_f_wide_band: scan in 40 MHz or higher bandwidth + * @scan_flags: variable to read and set scan_f_* flags in one shot + * can be used to dump all scan_f_* flags for debug + * @burst_duration: burst duration + * @num_bssid: no of bssid + * @num_ssids: no of ssid + * @n_probes: no of probe + * @chan_list: channel list + * @ssid: ssid list + * @bssid_list: Lisst of bssid to scan + * @scan_random: scan randomization params + * @ie_whitelist: probe req IE whitelist attrs + * @extraie: list of optional/vendor specific ie's to be added in probe requests + * @htcap: htcap ie + * @vhtcap: vhtcap ie + * @scan_ctrl_flags_ext: scan control flag extended + */ + +struct scan_req_params { + uint32_t scan_id; + uint32_t scan_req_id; + uint32_t vdev_id; + uint32_t pdev_id; + enum scan_priority scan_priority; + enum p2p_scan_type p2p_scan_type; + union { + struct { + uint32_t scan_ev_started:1, + scan_ev_completed:1, + scan_ev_bss_chan:1, + scan_ev_foreign_chan:1, + scan_ev_dequeued:1, + scan_ev_preempted:1, + scan_ev_start_failed:1, + scan_ev_restarted:1, + scan_ev_foreign_chn_exit:1, + scan_ev_invalid:1, + scan_ev_gpio_timeout:1, + scan_ev_suspended:1, + scan_ev_resumed:1; + }; + uint32_t scan_events; + }; + uint32_t dwell_time_active; + uint32_t dwell_time_active_2g; + uint32_t dwell_time_passive; + uint32_t min_rest_time; + uint32_t max_rest_time; + uint32_t repeat_probe_time; + uint32_t probe_spacing_time; + uint32_t idle_time; + uint32_t max_scan_time; + uint32_t probe_delay; + union { + struct { + uint32_t scan_f_passive:1, + scan_f_bcast_probe:1, + scan_f_cck_rates:1, + scan_f_ofdm_rates:1, + scan_f_chan_stat_evnt:1, + scan_f_filter_prb_req:1, + scan_f_bypass_dfs_chn:1, + scan_f_continue_on_err:1, + scan_f_offchan_mgmt_tx:1, + scan_f_offchan_data_tx:1, + scan_f_promisc_mode:1, + scan_f_capture_phy_err:1, + scan_f_strict_passive_pch:1, + scan_f_half_rate:1, + scan_f_quarter_rate:1, + scan_f_force_active_dfs_chn:1, + scan_f_add_tpc_ie_in_probe:1, + scan_f_add_ds_ie_in_probe:1, + scan_f_add_spoofed_mac_in_probe:1, + scan_f_add_rand_seq_in_probe:1, + scan_f_en_ie_whitelist_in_probe:1, + scan_f_forced:1, + scan_f_2ghz:1, + scan_f_5ghz:1, + scan_f_wide_band:1; + }; + uint32_t scan_flags; + }; + union { + struct { + uint32_t scan_policy_high_accuracy:1, + scan_policy_low_span:1, + scan_policy_low_power:1; + }; + uint32_t scan_policy_type; + }; + + enum scan_dwelltime_adaptive_mode adaptive_dwell_time_mode; + uint32_t burst_duration; + uint32_t num_bssid; + uint32_t num_ssids; + uint32_t n_probes; + struct chan_list chan_list; + struct wlan_ssid ssid[WLAN_SCAN_MAX_NUM_SSID]; + struct qdf_mac_addr bssid_list[WLAN_SCAN_MAX_NUM_BSSID]; + struct scan_random_attr scan_random; + struct probe_req_whitelist_attr ie_whitelist; + struct element_info extraie; + struct element_info htcap; + struct element_info vhtcap; + uint32_t scan_ctrl_flags_ext; +}; + +/** + * struct scan_start_request - scan request config + * @vdev: vdev + * @legacy_params: extra parameters required for legacy DA arch + * @scan_req: common scan start request parameters + */ +struct scan_start_request { + struct wlan_objmgr_vdev *vdev; + struct scan_extra_params_legacy legacy_params; + struct scan_req_params scan_req; +}; + +/** + * enum scan_cancel_type - type specifiers for cancel scan request + * @WLAN_SCAN_CANCEL_SINGLE: cancel particular scan specified by scan_id + * @WLAN_SCAN_CANCEL_VAP_ALL: cancel all scans running on a particular vdevid + * WLAN_SCAN_CANCEL_PDEV_ALL: cancel all scans running on parent pdev of vdevid + */ +enum scan_cancel_req_type { + WLAN_SCAN_CANCEL_SINGLE = 1, + WLAN_SCAN_CANCEL_VDEV_ALL, + WLAN_SCAN_CANCEL_PDEV_ALL, +}; + +/** + * struct scan_cancel_param - stop scan cmd parameter + * @requester: scan requester + * @scan_id: scan id + * @req_type: scan request type + * @vdev_id: vdev id + * @pdev_id: pdev id of parent pdev + */ +struct scan_cancel_param { + uint32_t requester; + uint32_t scan_id; + enum scan_cancel_req_type req_type; + uint32_t vdev_id; + uint32_t pdev_id; +}; + +/** + * struct scan_cancel_request - stop scan cmd + * @vdev: vdev object + * @wait_tgt_cancel: wait for target to cancel scan + * @cancel_req: stop scan cmd parameter + */ +struct scan_cancel_request { + /* Extra parameters consumed by scan module or serialization */ + struct wlan_objmgr_vdev *vdev; + bool wait_tgt_cancel; + /* Actual scan cancel request parameters */ + struct scan_cancel_param cancel_req; +}; + +/** + * enum scan_event_type - scan event types + * @SCAN_EVENT_TYPE_STARTED: scan started + * @SCAN_EVENT_TYPE_COMPLETED: scan completed + * @SCAN_EVENT_TYPE_BSS_CHANNEL: HW came back to home channel + * @SCAN_EVENT_TYPE_FOREIGN_CHANNEL: HW moved to foreign channel + * @SCAN_EVENT_TYPE_DEQUEUED: scan request dequeued + * @SCAN_EVENT_TYPE_PREEMPTED: scan got preempted + * @SCAN_EVENT_TYPE_START_FAILED: couldn't start scan + * @SCAN_EVENT_TYPE_RESTARTED: scan restarted + * @SCAN_EVENT_TYPE_FOREIGN_CHANNEL_EXIT: HW exited foreign channel + * @SCAN_EVENT_TYPE_SUSPENDED: scan got suspended + * @SCAN_EVENT_TYPE_RESUMED: scan resumed + * @SCAN_EVENT_TYPE_NLO_COMPLETE: NLO completed + * @SCAN_EVENT_TYPE_NLO_MATCH: NLO match event + * @SCAN_EVENT_TYPE_INVALID: invalid request + * @SCAN_EVENT_TYPE_GPIO_TIMEOUT: gpio timeout + * @SCAN_EVENT_TYPE_RADIO_MEASUREMENT_START: radio measurement start + * @SCAN_EVENT_TYPE_RADIO_MEASUREMENT_END: radio measurement end + * @SCAN_EVENT_TYPE_BSSID_MATCH: bssid match found + * @SCAN_EVENT_TYPE_FOREIGN_CHANNEL_GET_NF: foreign channel noise floor + * @SCAN_EVENT_TYPE_MAX: marker for invalid event + */ +enum scan_event_type { + SCAN_EVENT_TYPE_STARTED, + SCAN_EVENT_TYPE_COMPLETED, + SCAN_EVENT_TYPE_BSS_CHANNEL, + SCAN_EVENT_TYPE_FOREIGN_CHANNEL, + SCAN_EVENT_TYPE_DEQUEUED, + SCAN_EVENT_TYPE_PREEMPTED, + SCAN_EVENT_TYPE_START_FAILED, + SCAN_EVENT_TYPE_RESTARTED, + SCAN_EVENT_TYPE_FOREIGN_CHANNEL_EXIT, + SCAN_EVENT_TYPE_SUSPENDED, + SCAN_EVENT_TYPE_RESUMED, + SCAN_EVENT_TYPE_NLO_COMPLETE, + SCAN_EVENT_TYPE_NLO_MATCH, + SCAN_EVENT_TYPE_INVALID, + SCAN_EVENT_TYPE_GPIO_TIMEOUT, + SCAN_EVENT_TYPE_RADIO_MEASUREMENT_START, + SCAN_EVENT_TYPE_RADIO_MEASUREMENT_END, + SCAN_EVENT_TYPE_BSSID_MATCH, + SCAN_EVENT_TYPE_FOREIGN_CHANNEL_GET_NF, + SCAN_EVENT_TYPE_MAX, +}; + +/** + * enum scan_completion_reason - scan completion reason + * @SCAN_REASON_NONE: un specified reason + * @SCAN_REASON_COMPLETED: scan successfully completed + * @SCAN_REASON_CANCELLED: scan got cancelled + * @SCAN_REASON_PREEMPTED: scan got preempted + * @SCAN_REASON_TIMEDOUT: couldnt complete within specified time + * @SCAN_REASON_INTERNAL_FAILURE: cancelled because of some failure + * @SCAN_REASON_SUSPENDED: scan suspended + * @SCAN_REASON_RUN_FAILED: run failed + * @SCAN_REASON_TERMINATION_FUNCTION: termination function + * @SCAN_REASON_MAX_OFFCHAN_RETRIES: max retries exceeded thresold + * @SCAN_REASON_MAX: invalid completion reason marker + */ +enum scan_completion_reason { + SCAN_REASON_NONE, + SCAN_REASON_COMPLETED, + SCAN_REASON_CANCELLED, + SCAN_REASON_PREEMPTED, + SCAN_REASON_TIMEDOUT, + SCAN_REASON_INTERNAL_FAILURE, + SCAN_REASON_SUSPENDED, + SCAN_REASON_RUN_FAILED, + SCAN_REASON_TERMINATION_FUNCTION, + SCAN_REASON_MAX_OFFCHAN_RETRIES, + SCAN_REASON_MAX, +}; + +/** + * struct scan_event - scan event definition + * @vdev_id: vdev where scan was run + * @type: type of scan event + * @reason: completion reason + * @chan_freq: channel centre frequency + * @requester: requester id + * @scan_id: scan id + * @timestamp: timestamp in microsec recorded by target for the scan event + * @scan_start_req: scan request object used to start this scan + */ +struct scan_event { + uint32_t vdev_id; + enum scan_event_type type; + enum scan_completion_reason reason; + uint32_t chan_freq; + uint32_t requester; + uint32_t scan_id; + uint32_t timestamp; + struct scan_start_request *scan_start_req; +}; + +/** + * struct scan_event_info - scan event information + * @vdev: vdev object + * @event: scan event + */ +struct scan_event_info { + struct wlan_objmgr_vdev *vdev; + struct scan_event event; +}; + +/** + * enum scm_scan_status - scan status + * @SCAN_NOT_IN_PROGRESS: Neither active nor pending scan in progress + * @SCAN_IS_ACTIVE: scan request is present only in active list + * @SCAN_IS_PENDING: scan request is present only in pending list + * @SCAN_IS_ACTIVE_AND_PENDING: scan request is present in active + * and pending both lists + */ +enum scm_scan_status { + SCAN_NOT_IN_PROGRESS = 0, /* Must be 0 */ + SCAN_IS_ACTIVE, + SCAN_IS_PENDING, + SCAN_IS_ACTIVE_AND_PENDING, +}; + +/** + * scan_event_handler() - function prototype of scan event handlers + * @vdev: vdev object + * @event: scan event + * @arg: argument + * + * PROTO TYPE, scan event handler call back function prototype + * + * @Return: void + */ +typedef void (*scan_event_handler) (struct wlan_objmgr_vdev *vdev, + struct scan_event *event, void *arg); + +/** + * enum scan_cb_type - update beacon cb type + * @SCAN_CB_TYPE_INFORM_BCN: Calback to indicate beacon to OS + * @SCAN_CB_TYPE_UPDATE_BCN: Calback to indicate beacon + * @SCAN_CB_TYPE_UNLINK_BSS: cb to unlink bss entry + * to MLME and update MLME info + * + */ +enum scan_cb_type { + SCAN_CB_TYPE_INFORM_BCN, + SCAN_CB_TYPE_UPDATE_BCN, + SCAN_CB_TYPE_UNLINK_BSS, +}; + +/* Set PNO */ +#define SCAN_PNO_MAX_PLAN_REQUEST 2 +#define SCAN_PNO_MAX_NETW_CHANNELS_EX 60 +#define SCAN_PNO_MAX_SUPP_NETWORKS 16 +#define SCAN_PNO_DEF_SLOW_SCAN_MULTIPLIER 6 +#define SCAN_PNO_DEF_SCAN_TIMER_REPEAT 20 +#define SCAN_PNO_MATCH_WAKE_LOCK_TIMEOUT (5 * 1000) /* in msec */ +#ifdef CONFIG_SLUB_DEBUG_ON +#define SCAN_PNO_SCAN_COMPLETE_WAKE_LOCK_TIMEOUT (2 * 1000) /* in msec */ +#else +#define SCAN_PNO_SCAN_COMPLETE_WAKE_LOCK_TIMEOUT (1 * 1000) /* in msec */ +#endif /* CONFIG_SLUB_DEBUG_ON */ + +#define SCAN_PNO_CHANNEL_PREDICTION 0 +#define SCAN_TOP_K_NUM_OF_CHANNELS 3 +#define SCAN_STATIONARY_THRESHOLD 10 +#define SCAN_CHANNEL_PREDICTION_FULL_SCAN_MS 60000 +#define SCAN_ADAPTIVE_PNOSCAN_DWELL_MODE 0 +#define SCAN_MAWC_NLO_ENABLED 1 +#define SCAN_MAWC_NLO_EXP_BACKOFF_RATIO 3 +#define SCAN_MAWC_NLO_INIT_SCAN_INTERVAL 10000 +#define SCAN_MAWC_NLO_MAX_SCAN_INTERVAL 60000 + + +/** + * enum ssid_bc_type - SSID broadcast type + * @SSID_BC_TYPE_UNKNOWN: Broadcast unknown + * @SSID_BC_TYPE_NORMAL: Broadcast normal + * @SSID_BC_TYPE_HIDDEN: Broadcast hidden + */ +enum ssid_bc_type { + SSID_BC_TYPE_UNKNOWN = 0, + SSID_BC_TYPE_NORMAL = 1, + SSID_BC_TYPE_HIDDEN = 2, +}; + +/** + * struct pno_nw_type - pno nw type + * @ssid: ssid + * @authentication: authentication type + * @encryption: encryption type + * @bcastNetwType: broadcast nw type + * @ucChannelCount: uc channel count + * @aChannels: pno channel + * @rssiThreshold: rssi threshold + */ +struct pno_nw_type { + struct wlan_ssid ssid; + uint32_t authentication; + uint32_t encryption; + uint32_t bc_new_type; + uint8_t channel_cnt; + uint32_t channels[SCAN_PNO_MAX_NETW_CHANNELS_EX]; + int32_t rssi_thresh; +}; + +/** + * struct connected_pno_band_rssi_pref - BSS preference based on band + * and RSSI + * @band: band preference + * @rssi_pref: RSSI preference + */ +struct cpno_band_rssi_pref { + int8_t band; + int8_t rssi; +}; + +/** + * struct nlo_mawc_params - Motion Aided Wireless Connectivity based + * Network List Offload configuration + * @vdev_id: VDEV ID on which the configuration needs to be applied + * @enable: flag to enable or disable + * @exp_backoff_ratio: ratio of exponential backoff + * @init_scan_interval: initial scan interval(msec) + * @max_scan_interval: max scan interval(msec) + */ +struct nlo_mawc_params { + uint8_t vdev_id; + bool enable; + uint32_t exp_backoff_ratio; + uint32_t init_scan_interval; + uint32_t max_scan_interval; +}; + +/** + * struct pno_scan_req_params - PNO Scan request structure + * @networks_cnt: Number of networks + * @do_passive_scan: Flag to request passive scan to fw + * @vdev_id: vdev id + * @fast_scan_period: Fast Scan period + * @slow_scan_period: Slow scan period + * @delay_start_time: delay in seconds to use before starting the first scan + * @fast_scan_max_cycles: Fast scan max cycles + * @scan_backoff_multiplier: multiply fast scan period by this after max cycles + * @pno_channel_prediction: PNO channel prediction feature status + * @uint32_t active_dwell_time: active dwell time + * @uint32_t passive_dwell_time: passive dwell time + * @top_k_num_of_channels: top K number of channels are used for tanimoto + * distance calculation. + * @stationary_thresh: threshold value to determine that the STA is stationary. + * @adaptive_dwell_mode: adaptive dwelltime mode for pno scan + * @channel_prediction_full_scan: periodic timer upon which a full scan needs + * to be triggered. + * @networks_list: Preferred network list + * @scan_random: scan randomization params + * @ie_whitelist: probe req IE whitelist attrs + * @relative_rssi_set: Flag to check whether realtive_rssi is set or not + * @relative_rssi: Relative rssi threshold, used for connected pno + * @band_rssi_pref: Band and RSSI preference that can be given to one BSS + * over the other BSS + * + * E.g. + * { fast_scan_period=120, fast_scan_max_cycles=2, + * slow_scan_period=1800, scan_backoff_multiplier=2 } + * Result: 120s x2, 240s x2, 480s x2, 960s x2, 1800s xN + * @mawc_params: Configuration parameters for NLO MAWC. + */ +struct pno_scan_req_params { + uint32_t networks_cnt; + bool do_passive_scan; + uint32_t vdev_id; + uint32_t fast_scan_period; + uint32_t slow_scan_period; + uint32_t delay_start_time; + uint32_t fast_scan_max_cycles; + uint8_t scan_backoff_multiplier; + uint32_t active_dwell_time; + uint32_t passive_dwell_time; + uint32_t pno_channel_prediction; + uint32_t top_k_num_of_channels; + uint32_t stationary_thresh; + enum scan_dwelltime_adaptive_mode adaptive_dwell_mode; + uint32_t channel_prediction_full_scan; + struct pno_nw_type networks_list[SCAN_PNO_MAX_SUPP_NETWORKS]; + struct scan_random_attr scan_random; + struct probe_req_whitelist_attr ie_whitelist; + bool relative_rssi_set; + int8_t relative_rssi; + struct cpno_band_rssi_pref band_rssi_pref; + struct nlo_mawc_params mawc_params; +}; + +/** + * struct pno_user_cfg - user configuration required for PNO + * @channel_prediction: config PNO channel prediction feature status + * @top_k_num_of_channels: def top K number of channels are used for tanimoto + * distance calculation. + * @stationary_thresh: def threshold val to determine that STA is stationary. + * @scan_timer_repeat_value: PNO scan timer repeat value + * @slow_scan_multiplier: PNO slow scan timer multiplier + * @dfs_chnl_scan_enable: Enable dfs channel PNO scan + * @pnoscan_adaptive_dwell_mode: def adaptive dwelltime mode for pno scan + * @channel_prediction_full_scan: def periodic timer upon which full scan needs + * to be triggered. + * @mawc_params: Configuration parameters for NLO MAWC. + */ +struct pno_user_cfg { + bool channel_prediction; + uint8_t top_k_num_of_channels; + uint8_t stationary_thresh; + uint32_t scan_timer_repeat_value; + uint32_t slow_scan_multiplier; + bool dfs_chnl_scan_enabled; + enum scan_dwelltime_adaptive_mode adaptive_dwell_mode; + uint32_t channel_prediction_full_scan; + struct nlo_mawc_params mawc_params; +}; + +/** + * struct scan_user_cfg - user configuration required for for scan + * @allow_dfs_chan_in_first_scan: first scan should contain dfs channels or not. + * @allow_dfs_chan_in_scan: Scan DFS channels or not. + * @skip_dfs_chan_in_p2p_search: Skip DFS channels in P2P search. + * @use_wake_lock_in_user_scan: if wake lock will be acquired during user scan + * @active_dwell: default active dwell time + * @active_dwell_2g: default active dwell time for 2G channels + * @passive_dwell:default passive dwell time + * @conc_active_dwell: default concurrent active dwell time + * @conc_passive_dwell: default concurrent passive dwell time + * @conc_max_rest_time: default concurrent max rest time + * @conc_min_rest_time: default concurrent min rest time + * @conc_idle_time: default concurrent idle time + * @scan_cache_aging_time: default scan cache aging time + * @is_snr_monitoring_enabled: whether snr monitoring enabled or not + * @prefer_5ghz: Prefer 5ghz AP over 2.4Ghz AP + * @select_5gh_margin: Prefer connecting to 5G AP even if + * its RSSI is lower by select_5gh_margin dbm than 2.4G AP. + * applicable if prefer_5ghz is set. + * @scan_bucket_threshold: first scan bucket + * threshold to the mentioned value and all the AP's which + * have RSSI under this threshold will fall under this + * bucket + * @rssi_cat_gap: set rssi category gap + * @scan_dwell_time_mode: Adaptive dweltime mode + * @scan_dwell_time_mode_nc: Adaptive dweltime mode without connection + * @honour_nl_scan_policy_flags: honour nl80211 scan policy flags + * @pno_cfg: Pno related config params + * @ie_whitelist: probe req IE whitelist attrs + * @is_bssid_hint_priority: True if bssid_hint is priority + * @enable_mac_spoofing: enable mac address spoof in scan + * @sta_miracast_mcc_rest_time: sta miracast mcc rest time + * @score_config: scoring logic configuration + */ +struct scan_user_cfg { + bool allow_dfs_chan_in_first_scan; + bool allow_dfs_chan_in_scan; + bool skip_dfs_chan_in_p2p_search; + bool use_wake_lock_in_user_scan; + uint32_t active_dwell; + uint32_t active_dwell_2g; + uint32_t passive_dwell; + uint32_t conc_active_dwell; + uint32_t conc_passive_dwell; + uint32_t conc_max_rest_time; + uint32_t conc_min_rest_time; + uint32_t conc_idle_time; + uint32_t scan_cache_aging_time; + bool is_snr_monitoring_enabled; + uint32_t prefer_5ghz; + uint32_t select_5ghz_margin; + int32_t scan_bucket_threshold; + uint32_t rssi_cat_gap; + enum scan_dwelltime_adaptive_mode scan_dwell_time_mode; + enum scan_dwelltime_adaptive_mode scan_dwell_time_mode_nc; + bool honour_nl_scan_policy_flags; + struct pno_user_cfg pno_cfg; + struct probe_req_whitelist_attr ie_whitelist; + uint32_t usr_cfg_probe_rpt_time; + uint32_t usr_cfg_num_probes; + bool is_bssid_hint_priority; + bool enable_mac_spoofing; + uint32_t sta_miracast_mcc_rest_time; + uint8_t sta_scan_burst_duration; + uint8_t p2p_scan_burst_duration; + uint8_t go_scan_burst_duration; + uint8_t ap_scan_burst_duration; + struct scoring_config score_config; +}; + +/** + * update_beacon_cb() - cb to inform/update beacon + * @psoc: psoc pointer + * @scan_params: scan entry to inform/update + * + * @Return: void + */ +typedef void (*update_beacon_cb) (struct wlan_objmgr_pdev *pdev, + struct scan_cache_entry *scan_entry); + +/** + * scan_iterator_func() - function prototype of scan iterator function + * @scan_entry: scan entry object + * @arg: extra argument + * + * PROTO TYPE, scan iterator function prototype + * + * @Return: QDF_STATUS + */ +typedef QDF_STATUS (*scan_iterator_func) (void *arg, + struct scan_cache_entry *scan_entry); + +/** + * enum scan_priority - scan priority definitions + * @SCAN_CFG_DISABLE_SCAN_COMMAND_TIMEOUT: disable scan command timeout + * @SCAN_CFG_DROP_BCN_ON_CHANNEL_MISMATCH: config to drop beacon/probe + * response frames if received channel and IE channels do not match + */ +enum scan_config { + SCAN_CFG_DISABLE_SCAN_COMMAND_TIMEOUT, + SCAN_CFG_DROP_BCN_ON_CHANNEL_MISMATCH, +}; +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/scan/dispatcher/inc/wlan_scan_tgt_api.h b/drivers/staging/qca-wifi-host-cmn/umac/scan/dispatcher/inc/wlan_scan_tgt_api.h new file mode 100644 index 0000000000000000000000000000000000000000..23127e13ecaae4f29cb8a07f407b23a11e1e33c0 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/scan/dispatcher/inc/wlan_scan_tgt_api.h @@ -0,0 +1,145 @@ +/* + * Copyright (c) 2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/* + * DOC: contains scan south bound interface definitions + */ + +#ifndef _WLAN_SCAN_TGT_API_H_ +#define _WLAN_SCAN_TGT_API_H_ + +#include +#include +#include +#include +#include +#include + +/** + * tgt_scan_bcn_probe_rx_callback() - The callbeack registered to tx/rx module + * @psoc: psoc context + * @peer: peer + * @buf: frame buf + * @params: rx event params + * @frm_type: frame type + * + * The callbeack registered to tx/rx module and is called when beacon + * or probe resp is recived. This will post a msg to target_if queue. + * + * Return: success or error code. + */ +QDF_STATUS tgt_scan_bcn_probe_rx_callback(struct wlan_objmgr_psoc *psoc, + struct wlan_objmgr_peer *peer, qdf_nbuf_t buf, + struct mgmt_rx_event_params *rx_param, + enum mgmt_frame_type frm_type); + +/** + * tgt_scan_event_handler() - The callbeack registered to WMI for scan events + * @psoc: psoc handle + * @event_info: event info + * + * The callbeack registered to WMI for scan events and is called + * event for scan is received. This will post a msg to target_if queue. + * + * Return: 0 for success or error code. + */ +QDF_STATUS +tgt_scan_event_handler(struct wlan_objmgr_psoc *psoc, + struct scan_event_info *event_info); + +#ifdef FEATURE_WLAN_SCAN_PNO + +/** + * tgt_scan_pno_start() - invoke lmac send PNO start req + * @vdev: vdev pointer + * @req: pno req params + * + * Return: 0 for success or error code. + */ +QDF_STATUS tgt_scan_pno_start(struct wlan_objmgr_vdev *vdev, + struct pno_scan_req_params *req); + +/** + * tgt_scan_pno_stop() - invoke lmac send PNO stop req + * @vdev: vdev pointer + * @vdev_id: pno req params + * + * Return: 0 for success or error code. + */ +QDF_STATUS tgt_scan_pno_stop(struct wlan_objmgr_vdev *vdev, + uint8_t vdev_id); + +#endif + +/** + * tgt_scan_start() - invoke lmac scan start + * @req: scan request object + * + * This API invokes lmac API function to start scan + * + * Return: QDF_STATUS_SUCCESS for success or error code. + */ +QDF_STATUS +tgt_scan_start(struct scan_start_request *req); + + +/** + * tgt_scan_cancel() - invoke lmac scan cancel + * @req: scan request object + * + * This API invokes lmac API function to cancel scan + * + * Return: QDF_STATUS_SUCCESS for success or error code. + */ +QDF_STATUS +tgt_scan_cancel(struct scan_cancel_request *req); + +/** + * tgt_scan_register_ev_handler() - invoke lmac register scan event handler + * @psoc: psoc object + * + * This API invokes lmac API function to register for scan events + * + * Return: QDF_STATUS_SUCCESS for success or error code. + */ +QDF_STATUS +tgt_scan_register_ev_handler(struct wlan_objmgr_psoc *psoc); + +/** + * tgt_scan_unregister_ev_handler() - invoke lmac unregister scan event handler + * @psoc: psoc object + * + * This API invokes lmac API function to unregister for scan events + * + * Return: QDF_STATUS_SUCCESS for success or error code. + */ +QDF_STATUS +tgt_scan_unregister_ev_handler(struct wlan_objmgr_psoc *psoc); + +/** + * tgt_scan_set_max_active_scans() - lmac handler to set max active scans + * @psoc: psoc object + * @max_active_scans: maximum active scans allowed on underlying psoc + * + * Return: QDF_STATUS + */ +QDF_STATUS +tgt_scan_set_max_active_scans(struct wlan_objmgr_psoc *psoc, + uint32_t max_active_scans); + +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/scan/dispatcher/inc/wlan_scan_ucfg_api.h b/drivers/staging/qca-wifi-host-cmn/umac/scan/dispatcher/inc/wlan_scan_ucfg_api.h new file mode 100644 index 0000000000000000000000000000000000000000..ac87f2f61b7a1b15f8dd9f12857d8689e964ec21 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/scan/dispatcher/inc/wlan_scan_ucfg_api.h @@ -0,0 +1,659 @@ +/* + * Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/* + * DOC: contains scan north bound interface api + */ + +#ifndef _WLAN_SCAN_UCFG_API_H_ +#define _WLAN_SCAN_UCFG_API_H_ + +#include +#include +#include +#include +#include + +/** + * ucfg_scan_register_requester() - assigns requester ID to caller and + * registers scan event call back handler + * @psoc: psoc object + * @module_name:name of requester module + * @event_cb: event callback function pointer + * @arg: argument to @event_cb + * + * API, allows other components to allocate requester id + * Normally used by modules at init time to register their callback + * and get one requester id. @event_cb will be invoked for + * all scan events whose requester id matches with @requester. + * + * Return: assigned non zero requester id for success + * zero (0) for failure + */ +wlan_scan_requester +ucfg_scan_register_requester(struct wlan_objmgr_psoc *psoc, + uint8_t *module_name, scan_event_handler event_cb, void *arg); + +/** + * ucfg_scan_unregister_requester() -reclaims previously allocated requester ID + * @psoc: psoc object + * @requester: requester ID to reclaim. + * + * API, reclaims previously allocated requester id by + * ucfg_scan_get_req_id_reg_cb() + * + * Return: void + */ +void ucfg_scan_unregister_requester(struct wlan_objmgr_psoc *psoc, + wlan_scan_requester requester); + + +/** + * ucfg_get_scan_requester_name()- returns module name of requester ID owner + * @psoc: psoc object + * @requester: requester ID + * + * API, returns module name of requester id owner + * + * Return: pointer to module name or "unknown" if requester id not found. + */ +uint8_t *ucfg_get_scan_requester_name(struct wlan_objmgr_psoc *psoc, + wlan_scan_requester requester); + + + +/** + * ucfg_scan_get_scan_id() - allocates scan ID + * @psoc: psoc object + * + * API, allocates a new scan id for caller + * + * Return: newly allocated scan ID + */ +wlan_scan_id +ucfg_scan_get_scan_id(struct wlan_objmgr_psoc *psoc); + +#ifdef FEATURE_WLAN_SCAN_PNO +/** + * ucfg_scan_pno_start() - Public API to start PNO + * @vdev: vdev pointer + * @req: pno req params + * + * Return: 0 for success or error code. + */ +QDF_STATUS ucfg_scan_pno_start(struct wlan_objmgr_vdev *vdev, +struct pno_scan_req_params *req); + +/** + * ucfg_scan_pno_stop() - Public API to stop PNO + * @vdev: vdev pointer + * @req: pno req params + * + * Return: 0 for success or error code. + */ +QDF_STATUS ucfg_scan_pno_stop(struct wlan_objmgr_vdev *vdev); + +/** + * ucfg_scan_get_pno_in_progress() - Public API to check if pno is in progress + * @vdev: vdev pointer + * + * Return: true if pno in progress else false. + */ +bool ucfg_scan_get_pno_in_progress(struct wlan_objmgr_vdev *vdev); + +/** + * ucfg_scan_get_pno_match() - Public API to check if pno matched + * @vdev: vdev pointer + * + * Return: true if pno matched else false. + */ +bool ucfg_scan_get_pno_match(struct wlan_objmgr_vdev *vdev); + +/** + * ucfg_scan_register_pno_cb() - register pno cb + * @psoc: psoc object + * @event_cb: callback function pointer + * @arg: argument to @event_cb + * + * Return: QDF_STATUS + */ +QDF_STATUS +ucfg_scan_register_pno_cb(struct wlan_objmgr_psoc *psoc, + scan_event_handler event_cb, void *arg); + +/** + * ucfg_scan_get_pno_def_params() - get the defaults pno params + * @vdev: vdev object + * @req: pno request object + * + * Return: QDF_STATUS_SUCCESS or error code + */ +QDF_STATUS +ucfg_scan_get_pno_def_params(struct wlan_objmgr_vdev *vdev, + struct pno_scan_req_params *req); + +/** + * ucfg_scan_is_dfs_chnl_scan_enabled() - Check if PNO dfs channel scan support + * is enabled + * @psoc: pointer to psoc object + * + * Return: dfs_chnl_scan_enabled flag + */ +bool ucfg_scan_is_dfs_chnl_scan_enabled(struct wlan_objmgr_psoc *psoc); + +/** +* ucfg_scan_get_scan_timer_repeat_value() - API to get PNO scan timer repeat +* value +* @psoc: pointer to psoc object +* +* Return: scan_timer_repeat_value +*/ +uint32_t ucfg_scan_get_scan_timer_repeat_value(struct wlan_objmgr_psoc *psoc); + +/** + * ucfg_scan_get_slow_scan_multiplier() - API to get PNO slow scan multiplier + * value + * @psoc: pointer to psoc object + * + * Return: slow_scan_multiplier value + */ +uint32_t ucfg_scan_get_slow_scan_multiplier(struct wlan_objmgr_psoc *psoc); +#else + +static inline bool +ucfg_scan_get_pno_in_progress(struct wlan_objmgr_vdev *vdev) +{ + return false; +} + +static inline bool +ucfg_scan_get_pno_match(struct wlan_objmgr_vdev *vdev) +{ + return false; +} +#endif /* FEATURE_WLAN_SCAN_PNO */ +/** + * ucfg_scan_start() - Public API to start a scan + * @req: start scan req params + * + * The Public API to start a scan. Post a msg to target_if queue + * + * Return: 0 for success or error code. + */ +QDF_STATUS +ucfg_scan_start(struct scan_start_request *req); + +/** + * ucfg_scan_set_enable() - Public API to disable/enable scans + * @psoc: psoc on which scans need to be disabled + * @enable: enable scan if true disable is false + * + * Return: QDF_STATUS. + */ +QDF_STATUS ucfg_scan_set_enable(struct wlan_objmgr_psoc *psoc, bool enable); + +/** + * ucfg_scan_get_enable() - Public API to get if scan is enabled or disabled + * @psoc: psoc on which scans status need to be checked + * + * Return: true if enabled else false. + */ +bool ucfg_scan_get_enable(struct wlan_objmgr_psoc *psoc); + +/** + * ucfg_scan_set_miracast() - Public API to disable/enable miracast flag + * @psoc: psoc pointer + * @enable: enable miracast if true disable is false + * + * Return: QDF_STATUS. + */ +QDF_STATUS ucfg_scan_set_miracast( + struct wlan_objmgr_psoc *psoc, bool enable); + +/** + * ucfg_scan_set_global_config() - Public API to set global scan config + * @psoc: psoc context + * @config: config to set + * @val: new config value + * + * Return: QDF_STATUS. + */ +QDF_STATUS +ucfg_scan_set_global_config(struct wlan_objmgr_psoc *psoc, + enum scan_config config, uint32_t val); + +/** + * ucfg_scan_get_global_config() - Public API to get global scan config + * @psoc: psoc context + * @config: config to set + * @val: uint32* to hold returned config value + * + * Return: QDF_STATUS. + */ +QDF_STATUS +ucfg_scan_get_global_config(struct wlan_objmgr_psoc *psoc, + enum scan_config config, uint32_t *val); + +/** + * ucfg_scan_set_wide_band_scan() - Public API to disable/enable wide band scan + * @pdev: psoc on which scans need to be disabled + * @enable: enable wide band scan if @enable is true, disable otherwise + * + * Return: QDF_STATUS. + */ +QDF_STATUS ucfg_scan_set_wide_band_scan( + struct wlan_objmgr_pdev *pdev, bool enable); + +/** + * ucfg_scan_get_wide_band_scan() - Public API to check if + * wide band scan is enabled or disabled + * @pdev: psoc on which scans status need to be checked + * + * Return: true if enabled else false. + */ +bool ucfg_scan_get_wide_band_scan(struct wlan_objmgr_pdev *pdev); + +/** + * ucfg_scan_set_custom_scan_chan_list() - Public API to restrict scan + * to few pre configured channels + * @pdev: psoc on which scans need to be disabled + * @chan_list: list of channels to scan if set + * + * Return: QDF_STATUS. + */ +QDF_STATUS ucfg_scan_set_custom_scan_chan_list( + struct wlan_objmgr_pdev *pdev, struct chan_list *chan_list); + +/** + * ucfg_scan_cancel() - Public API to stop a scan + * @req: stop scan request params + * + * The Public API to stop a scan. Post a msg to target_if queue + * + * Return: 0 for success or error code. + */ +QDF_STATUS +ucfg_scan_cancel(struct scan_cancel_request *req); + +/** + * ucfg_scan_cancel_sync() - Public API to stop a scan and wait + * till all scan are completed + * @req: stop scan request params + * + * The Public API to stop a scan and wait + * till all scan are completed + * + * Return: 0 for success or error code. + */ +QDF_STATUS +ucfg_scan_cancel_sync(struct scan_cancel_request *req); + +/** + * ucfg_scan_get_result() - The Public API to get scan results + * @pdev: pdev info + * @filter: Filters + * + * This function fetches scan result + * + * Return: scan list pointer + */ +qdf_list_t *ucfg_scan_get_result(struct wlan_objmgr_pdev *pdev, + struct scan_filter *filter); + +/** + * ucfg_scan_purge_results() - purge the scan list + * @scan_list: scan list to be purged + * + * This function purge the temp scan list + * + * Return: QDF_STATUS + */ +QDF_STATUS ucfg_scan_purge_results(qdf_list_t *scan_list); + +/** + * ucfg_scan_flush_results() - The Public API to flush scan result + * @pdev: pdev object + * @filter: filter to flush the scan entries + * + * The Public API to flush scan result. + * + * Return: 0 for success or error code. + */ +QDF_STATUS ucfg_scan_flush_results(struct wlan_objmgr_pdev *pdev, + struct scan_filter *filter); + +/** + * ucfg_scan_filter_valid_channel() - The Public API to filter scan result + * based on valid channel list + * @pdev: pdev object + * @chan_list: valid channel list + * @num_chan: number of valid channels + * + * The Public API to to filter scan result + * based on valid channel list. + * + * Return: void. + */ +void ucfg_scan_filter_valid_channel(struct wlan_objmgr_pdev *pdev, + uint8_t *chan_list, uint32_t num_chan); + +/** + * ucfg_scan_db_iterate() - function to iterate scan table + * @pdev: pdev object + * @func: iterator function pointer + * @arg: argument to be passed to func() + * + * API, this API iterates scan table and invokes func + * on each scan enetry by passing scan entry and arg. + * + * Return: QDF_STATUS + */ +QDF_STATUS +ucfg_scan_db_iterate(struct wlan_objmgr_pdev *pdev, + scan_iterator_func func, void *arg); + +/** + * ucfg_scan_update_mlme_by_bssinfo() - The Public API to update mlme + * info in the scan entry + * @pdev: pdev object + * @bssid: bssid info to find the matching scan entry + * @mlme_info: mlme info to be updated. + * + * The Public API to update mlme info in the scan entry. + * Post a msg to target_if queue + * + * Return: 0 for success or error code. + */ +QDF_STATUS +ucfg_scan_update_mlme_by_bssinfo(struct wlan_objmgr_pdev *pdev, + struct bss_info *bss_info, + struct mlme_info *mlme_info); + +/** + * ucfg_scan_register_event_handler() - The Public API to register + * an event cb handler + * @pdev: pdev object + * @event_cb: callback function to register + * @arg: component specific priv argument to @event_cb callback function + * + * The Public API to register a event cb handler. This cb is called whenever + * any scan event is received on @pdev. + * + * Return: 0 for success or error code. + */ + +QDF_STATUS +ucfg_scan_register_event_handler(struct wlan_objmgr_pdev *pdev, + scan_event_handler event_cb, void *arg); + +/** + * ucfg_scan_unregister_event_handler() - Public API to unregister + * event cb handler + * @pdev: pdev object + * @event_cb: callback function to unregister + * @arg: component specific priv argument to @event_cb callback function + * + * Unregister a event cb handler. cb and arg will be used to + * find the calback. + * + * Return: void + */ + +void +ucfg_scan_unregister_event_handler(struct wlan_objmgr_pdev *pdev, + scan_event_handler event_cb, void *arg); + +/** + * ucfg_scan_init_default_params() - get the defaults scan params + * @vdev: vdev object + * @req: scan request object + * + * get the defaults scan params + * + * Return: QDF_STATUS_SUCCESS or error code + */ +QDF_STATUS +ucfg_scan_init_default_params(struct wlan_objmgr_vdev *vdev, + struct scan_start_request *req); + +/** + * ucfg_scan_init_ssid_params() - initialize scan request ssid list + * + * @scan_req: scan request object + * @num_ssid: number of ssid's in ssid list + * @ssid_list: ssid list + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +QDF_STATUS +ucfg_scan_init_ssid_params(struct scan_start_request *scan_req, + uint32_t num_ssid, struct wlan_ssid *ssid_list); + +/** + * ucfg_scan_init_bssid_params() - initialize scan request bssid list + * @scan_req: scan request object + * @num_ssid: number of bssid's in bssid list + * @bssid_list: bssid list + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +QDF_STATUS +ucfg_scan_init_bssid_params(struct scan_start_request *scan_req, + uint32_t num_ssid, struct qdf_mac_addr *bssid_list); + +/** + * ucfg_scan_init_chanlist_params() - initialize scan request channel list + * @scan_req: scan request object + * @num_chans: number of channels in channel list + * @chan_list: channel list + * @phymode: phymode in which scan shall be done + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +QDF_STATUS +ucfg_scan_init_chanlist_params(struct scan_start_request *scan_req, + uint32_t num_chans, uint32_t *chan_list, uint32_t *phymode); + +/** + * ucfg_scan_get_vdev_status() - API to check vdev scan status + * @vdev: vdev object + * + * Return: enum scm_scan_status + */ +enum scm_scan_status +ucfg_scan_get_vdev_status(struct wlan_objmgr_vdev *vdev); + +/** + * ucfg_scan_get_pdev_status() - API to check pdev scan status + * @pdev: vdev object + * + * Return: enum scm_scan_status + */ +enum scm_scan_status +ucfg_scan_get_pdev_status(struct wlan_objmgr_pdev *pdev); + +/** + * ucfg_scan_register_bcn_cb() - API to register api + * to inform/update bcn/probe as soon as they are received + * @pdev: psoc + * @cb: callback to be registered + * @type: Type of callback to be registered + * + * Return: enum scm_scan_status + */ +QDF_STATUS ucfg_scan_register_bcn_cb(struct wlan_objmgr_psoc *psoc, + update_beacon_cb cb, enum scan_cb_type type); + +/* + * ucfg_scan_update_user_config() - Update scan cache user config + * @psoc: psoc + * @scan_cfg: scan user config + * + * Return: QDF_STATUS + */ +QDF_STATUS ucfg_scan_update_user_config(struct wlan_objmgr_psoc *psoc, + struct scan_user_cfg *scan_cfg); + +/** + * ucfg_scan_update_roam_params() - Store/Update the roam params + * @psoc: psoc + * @roam_params: roam params + * + * Return: QDF_STATUS + */ +QDF_STATUS ucfg_scan_update_roam_params(struct wlan_objmgr_psoc *psoc, + struct roam_filter_params *roam_params); + +/* + * ucfg_scan_init() - Scan module initialization API + * + * Return: QDF_STATUS + */ +QDF_STATUS ucfg_scan_init(void); + +/** + * ucfg_scan_deinit() - Scan module deinitialization API + * + * Return: QDF_STATUS + */ +QDF_STATUS ucfg_scan_deinit(void); + +/** + * ucfg_scan_psoc_enable() - Scan module enable API + * @psoc: psoc object + * + * Return: QDF_STATUS + */ +QDF_STATUS ucfg_scan_psoc_enable(struct wlan_objmgr_psoc *psoc); + +/** + * ucfg_scan_psoc_enable() - Scan module disable API + * @psoc: psoc object + * + * Return: QDF_STATUS + */ +QDF_STATUS ucfg_scan_psoc_disable(struct wlan_objmgr_psoc *psoc); + +/** + * ucfg_scan_psoc_open() - Scan module psoc open API + * @psoc: psoc object + * + * Return: QDF_STATUS + */ +QDF_STATUS ucfg_scan_psoc_open(struct wlan_objmgr_psoc *psoc); + +/** + * ucfg_scan_psoc_close() - Scan module psoc close API + * @psoc: psoc object + * + * Return: QDF_STATUS + */ +QDF_STATUS ucfg_scan_psoc_close(struct wlan_objmgr_psoc *psoc); + +/** + * ucfg_scan_get_max_active_scans() - API to get max active scans + * supported on this psoc + * @psoc: psoc object + * + * Return: uint32_t + */ +uint32_t ucfg_scan_get_max_active_scans(struct wlan_objmgr_psoc *psoc); + +/** + * ucfg_ie_whitelist_enabled() - Checks for IE whitelisting enable + * @psoc: pointer to psoc object + * @vdev: pointer to vdev + * + * This function is used to check whether IE whitelisting is enabled or not + * + * Return: If enabled returns true else returns false + */ +bool ucfg_ie_whitelist_enabled(struct wlan_objmgr_psoc *psoc, + struct wlan_objmgr_vdev *vdev); + +/** + * ucfg_copy_ie_whitelist_attrs() - Populate probe req IE whitelist attrs + * @psoc: pointer to psoc object + * @ie_whitelist: output parameter to hold ie whitelist attrs + * + * If IE whitelisting is enabled then invoke this function to copy + * IE whitelisting attrs from wlan scan object + * + * Return: true - successful copy + * false - copy failed + */ +bool ucfg_copy_ie_whitelist_attrs(struct wlan_objmgr_psoc *psoc, + struct probe_req_whitelist_attr *ie_whitelist); + +/** + * ucfg_scan_set_bt_activity() - API to set bt activity + * @psoc: pointer to psoc object + * @bt_a2dp_active: bt activiy value + * + * Return: None + */ +void ucfg_scan_set_bt_activity(struct wlan_objmgr_psoc *psoc, + bool bt_a2dp_active); +/** + * ucfg_scan_get_bt_activity() - API to get bt activity + * @psoc: pointer to psoc object + * + * Return: true if enabled else false. + */ +bool ucfg_scan_get_bt_activity(struct wlan_objmgr_psoc *psoc); + +/** + * ucfg_scan_set_vdev_del_in_progress() - API to mark vdev delete in progress + * @vdev: pointer to vdev object + * + * Return: none + */ +void ucfg_scan_set_vdev_del_in_progress(struct wlan_objmgr_vdev *vdev); +/** + * ucfg_scan_clear_vdev_del_in_progress() - API to reset vdev delete in progress + * @vdev: pointer to vdev object + * + * Return: none + */ +void ucfg_scan_clear_vdev_del_in_progress(struct wlan_objmgr_vdev *vdev); + +/** + * ucfg_scan_cfg_honour_nl_scan_policy_flags() - API to get nl scan policy + * flags honoured. + * @psoc: pointer to psoc object + * + * Return: nl scan flags is honoured or not + */ +bool ucfg_scan_cfg_honour_nl_scan_policy_flags(struct wlan_objmgr_psoc *psoc); + +/** + * ucfg_scan_wake_lock_in_user_scan() - API to determine if wake lock in user + * scan is used. + * @psoc: pointer to psoc object + * + * Return: true if wake lock in user scan is required + */ +bool ucfg_scan_wake_lock_in_user_scan(struct wlan_objmgr_psoc *psoc); + +/** + * ucfg_scan_get_max_cmd_allowed() - API To get max no. of scan commands allowed + * + * Return: Max Scan commands allowed count + */ +uint32_t ucfg_scan_get_max_cmd_allowed(void); +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/scan/dispatcher/inc/wlan_scan_utils_api.h b/drivers/staging/qca-wifi-host-cmn/umac/scan/dispatcher/inc/wlan_scan_utils_api.h new file mode 100644 index 0000000000000000000000000000000000000000..52b570d5dd33fd1d2586cf0393fab55aa75cea14 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/scan/dispatcher/inc/wlan_scan_utils_api.h @@ -0,0 +1,1515 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/* + * DOC: contains scan public utility functions + */ + +#ifndef _WLAN_SCAN_UTILS_H_ +#define _WLAN_SCAN_UTILS_H_ + +#include +#include +#include +#include +#include +#include +#include + +#define ASCII_SPACE_CHARACTER 32 + +/** + * util_is_scan_entry_match() - func to check if both scan entry + * are from same AP + * @entry1: scan entry 1 + * @entry2: scan entry 2 + * + * match the two scan entries + * + * Return: true if entry match else false. + */ +bool util_is_scan_entry_match( + struct scan_cache_entry *entry1, + struct scan_cache_entry *entry2); + +/** + * util_scan_unpack_beacon_frame() - func to unpack beacon frame to scan entry + * @pdev: pdev pointer + * @frame: beacon/probe frame + * @frame_len: beacon frame len + * @frm_subtype: beacon or probe + * @rx_param: rx meta data + * + * get the defaults scan params + * + * Return: unpacked list of scan entries. + */ +qdf_list_t *util_scan_unpack_beacon_frame( + struct wlan_objmgr_pdev *pdev, + uint8_t *frame, qdf_size_t frame_len, uint32_t frm_subtype, + struct mgmt_rx_event_params *rx_param); + +/** + * util_scan_get_ev_type_name() - converts enum event to printable string + * @event: event of type scan_event_type + * + * API, converts enum event to printable character string + * + * Return: pointer to printable string + */ +const char *util_scan_get_ev_type_name(enum scan_event_type event); + +/** + * util_scan_get_ev_reason_name() - converts enum reason to printable string + * @reason enum of scan completion reason + * + * API, converts enum event to printable character string + * + * Return: pointer to printable string + */ +const char *util_scan_get_ev_reason_name(enum scan_completion_reason reason); + +/** + * util_scan_entry_macaddr() - function to read transmitter address + * @scan_entry: scan entry + * + * API, function to read transmitter address of scan entry + * + * Return: pointer to mac address + */ +static inline uint8_t* +util_scan_entry_macaddr(struct scan_cache_entry *scan_entry) +{ + return &(scan_entry->mac_addr.bytes[0]); +} + +/** + * util_scan_entry_bssid() - function to read bssid + * @scan_entry: scan entry + * + * API, function to read bssid of scan entry + * + * Return: pointer to mac address + */ +static inline uint8_t* +util_scan_entry_bssid(struct scan_cache_entry *scan_entry) +{ + return &(scan_entry->bssid.bytes[0]); +} + +/** + * util_scan_entry_capinfo() - function to read capibility info + * @scan_entry: scan entry + * + * API, function to read capibility info of scan entry + * + * Return: capability info + */ +static inline union wlan_capability +util_scan_entry_capinfo(struct scan_cache_entry *scan_entry) +{ + return scan_entry->cap_info; +} + +/** + * util_scan_entry_beacon_interval() - function to read beacon interval + * @scan_entry: scan entry + * + * API, function to read beacon interval of scan entry + * + * Return: beacon interval + */ +static inline uint16_t +util_scan_entry_beacon_interval(struct scan_cache_entry *scan_entry) +{ + return scan_entry->bcn_int; +} + +/** + * util_scan_entry_sequence_number() - function to read sequence number + * @scan_entry: scan entry + * + * API, function to read sequence number of scan entry + * + * Return: sequence number + */ +static inline uint16_t +util_scan_entry_sequence_number(struct scan_cache_entry *scan_entry) +{ + return scan_entry->seq_num; +} + +/** + * util_scan_entry_tsf() - function to read tsf + * @scan_entry: scan entry + * + * API, function to read tsf of scan entry + * + * Return: tsf + */ +static inline uint8_t* +util_scan_entry_tsf(struct scan_cache_entry *scan_entry) +{ + return scan_entry->tsf_info.data; +} + +/** + * util_scan_entry_reset_timestamp() - function to reset bcn receive timestamp + * @scan_entry: scan entry + * + * API, function to reset bcn receive timestamp of scan entry + * + * Return: void + */ +static inline void +util_scan_entry_reset_timestamp(struct scan_cache_entry *scan_entry) +{ + scan_entry->scan_entry_time = 0; +} + +/* + * Macros used for RSSI calculation. + */ +#define WLAN_RSSI_AVERAGING_TIME (5 * 1000) /* 5 seconds */ + +#define WLAN_RSSI_EP_MULTIPLIER (1<<7) /* pow2 to optimize out * and / */ + +#define WLAN_RSSI_LPF_LEN 10 +#define WLAN_RSSI_DUMMY_MARKER 0x127 + +#define WLAN_EP_MUL(x, mul) ((x) * (mul)) + +#define WLAN_EP_RND(x, mul) ((((x)%(mul)) >= ((mul)/2)) ?\ + ((x) + ((mul) - 1)) / (mul) : (x)/(mul)) + +#define WLAN_RSSI_GET(x) WLAN_EP_RND(x, WLAN_RSSI_EP_MULTIPLIER) + +#define RSSI_LPF_THRESHOLD -20 + + +#define WLAN_RSSI_OUT(x) (((x) != WLAN_RSSI_DUMMY_MARKER) ? \ + (WLAN_EP_RND((x), WLAN_RSSI_EP_MULTIPLIER)) : WLAN_RSSI_DUMMY_MARKER) + + +#define WLAN_RSSI_IN(x) (WLAN_EP_MUL((x), WLAN_RSSI_EP_MULTIPLIER)) + +#define WLAN_LPF_RSSI(x, y, len) \ + ((x != WLAN_RSSI_DUMMY_MARKER) ? ((((x) << 3) + (y) - (x)) >> 3) : (y)) + +#define WLAN_RSSI_LPF(x, y) do { \ + if ((y) >= RSSI_LPF_THRESHOLD) \ + x = WLAN_LPF_RSSI((x), WLAN_RSSI_IN((y)), WLAN_RSSI_LPF_LEN); \ + } while (0) + +#define WLAN_ABS_RSSI_LPF(x, y) do { \ + if ((y) >= (RSSI_LPF_THRESHOLD + WLAN_DEFAULT_NOISE_FLOOR)) \ + x = WLAN_LPF_RSSI((x), WLAN_RSSI_IN((y)), WLAN_RSSI_LPF_LEN); \ + } while (0) + +/** + * util_scan_entry_rssi() - function to read rssi of scan entry + * @scan_entry: scan entry + * + * API, function to read rssi value of scan entry + * + * Return: rssi + */ +static inline uint8_t +util_scan_entry_rssi(struct scan_cache_entry *scan_entry) +{ + uint32_t rssi = WLAN_RSSI_OUT(scan_entry->avg_rssi); + /* + * An entry is in the BSS list means we've received at least one beacon + * from the corresponding AP, so the rssi must be initialized. + * + * If the RSSI is not initialized, return 0 (i.e. RSSI == Noise Floor). + * Once se_avgrssi field has been initialized, ATH_RSSI_OUT always + * returns values that fit in an 8-bit variable + * (RSSI values are typically 0-90). + */ + return (rssi >= WLAN_RSSI_DUMMY_MARKER) ? 0 : (uint8_t) rssi; +} + +/** + * util_scan_entry_phymode() - function to read phymode of scan entry + * @scan_entry: scan entry + * + * API, function to read phymode of scan entry + * + * Return: phymode + */ +static inline enum wlan_phymode +util_scan_entry_phymode(struct scan_cache_entry *scan_entry) +{ + return scan_entry->phy_mode; +} + +/** + * util_is_ssid_match() - to check if ssid match + * @ssid1: ssid 1 + * @ssid2: ssid 2 + * + * Return: true if ssid match + */ +static inline bool +util_is_ssid_match(struct wlan_ssid *ssid1, + struct wlan_ssid *ssid2) +{ + if (ssid1->length != ssid2->length) + return false; + + if (!qdf_mem_cmp(ssid1->ssid, + ssid2->ssid, ssid1->length)) + return true; + + return false; +} + +/** + * util_is_bssid_match() - to check if bssid match + * @bssid1: bssid 1 + * @bssid2: bssid 2 + * + * Return: true if bssid match + */ +static inline bool util_is_bssid_match(struct qdf_mac_addr *bssid1, + struct qdf_mac_addr *bssid2) +{ + + if (qdf_is_macaddr_zero(bssid1) || + qdf_is_macaddr_broadcast(bssid1)) + return true; + + if (qdf_is_macaddr_equal(bssid1, bssid2)) + return true; + + return false; +} + +/** + * util_is_bss_type_match() - to check if bss type + * @bss_type: bss type + * @cap: capability + * + * Return: true if bss type match + */ +static inline bool util_is_bss_type_match(enum wlan_bss_type bss_type, + union wlan_capability cap) +{ + bool match = true; + + switch (bss_type) { + case WLAN_TYPE_ANY: + break; + case WLAN_TYPE_IBSS: + if (!cap.wlan_caps.ibss) + match = false; + break; + case WLAN_TYPE_BSS: + if (!cap.wlan_caps.ess) + match = false; + break; + default: + match = false; + } + + return match; +} + +/** + * util_country_code_match() - to check if country match + * @country: country code pointer + * @country_ie: country IE in beacon + * + * Return: true if country match + */ +static inline bool util_country_code_match(uint8_t *country, + struct wlan_country_ie *cc) +{ + if (!country || !country[0]) + return true; + + if (!cc) + return false; + + if (cc->cc[0] == country[0] && + cc->cc[1] == country[1]) + return true; + + return false; +} + +/** + * util_mdie_match() - to check if mdie match + * @mobility_domain: mobility domain + * @mdie: mobility domain ie + * + * Return: true if country match + */ +static inline bool util_mdie_match(uint16_t mobility_domain, + struct rsn_mdie *mdie) +{ + uint16_t md; + + if (!mobility_domain) + return true; + + if (!mdie) + return false; + + md = + (mdie->mobility_domain[1] << 8) | + mdie->mobility_domain[0]; + + if (md == mobility_domain) + return true; + + return false; +} + +/** + * util_scan_entry_ssid() - function to read ssid of scan entry + * @scan_entry: scan entry + * + * API, function to read ssid of scan entry + * + * Return: ssid + */ +static inline struct wlan_ssid* +util_scan_entry_ssid(struct scan_cache_entry *scan_entry) +{ + return &(scan_entry->ssid); +} + +/** + * util_scan_entry_dtimperiod() - function to read dtim period of scan entry + * @scan_entry: scan entry + * + * API, function to read dtim period of scan entry + * + * Return: dtim period + */ +static inline uint8_t +util_scan_entry_dtimperiod(struct scan_cache_entry *scan_entry) +{ + return scan_entry->dtim_period; +} + +/** + * util_scan_entry_tim() - function to read tim ie of scan entry + * @scan_entry: scan entry + * + * API, function to read tim ie of scan entry + * + * Return: timie or NULL if ie is not present + */ +static inline uint8_t* +util_scan_entry_tim(struct scan_cache_entry *scan_entry) +{ + return scan_entry->ie_list.tim; +} + +/** + * util_scan_entry_beacon_frame() - function to read full beacon or + * probe resp frame + * @scan_entry: scan entry + * + * API, function to read full beacon or probe resp frame including frame header + * + * Return: beacon/probe resp frame + */ +static inline struct element_info +util_scan_entry_beacon_frame(struct scan_cache_entry *scan_entry) +{ + /* util_scan_entry_beacon_data */ + return scan_entry->raw_frame; +} + +/** + * util_scan_entry_ie_data() - function to read tagged IEs + * @scan_entry: scan entry + * + * API, function to read beacon/probe response frames starting from tagged IEs + * (excluding frame header and fixed parameters) + * + * Return: tagged IES of beacon/probe resp frame + */ +static inline uint8_t* +util_scan_entry_ie_data(struct scan_cache_entry *scan_entry) +{ + struct element_info bcn_frm; + uint8_t *ie_data = NULL; + + bcn_frm = util_scan_entry_beacon_frame(scan_entry); + ie_data = (uint8_t *) (bcn_frm.ptr + + sizeof(struct wlan_frame_hdr) + + offsetof(struct wlan_bcn_frame, ie)); + return ie_data; +} + +/** + * util_scan_entry_ie_len() - function to read length of all tagged IEs + * @scan_entry: scan entry + * + * API, function to read length of all tagged IEs + * + * Return: length of all tagged IEs + */ +static inline uint16_t +util_scan_entry_ie_len(struct scan_cache_entry *scan_entry) +{ + struct element_info bcn_frm; + uint16_t ie_len = 0; + + bcn_frm = util_scan_entry_beacon_frame(scan_entry); + ie_len = (uint16_t) (bcn_frm.len - + sizeof(struct wlan_frame_hdr) - + offsetof(struct wlan_bcn_frame, ie)); + return ie_len; +} + +/** + * util_scan_entry_frame_len() - function to frame length + * @scan_entry: scan entry + * + * API, function to read frame length + * + * Return: frame length + */ +static inline uint32_t +util_scan_entry_frame_len(struct scan_cache_entry *scan_entry) +{ + return scan_entry->raw_frame.len; +} + +/** + * util_scan_entry_frame_ptr() - function to get frame ptr + * @scan_entry: scan entry + * + * API, function to read frame ptr + * + * Return: frame ptr + */ +static inline uint8_t* +util_scan_entry_frame_ptr(struct scan_cache_entry *scan_entry) +{ + return scan_entry->raw_frame.ptr; +} + +/** + * util_scan_entry_copy_ie_data() - function to get a copy of all tagged IEs + * @scan_entry: scan entry + * + * API, function to get a copy of all tagged IEs in passed memory + * + * Return: QDF_STATUS_SUCCESS if tagged IEs copied successfully + * QDF_STATUS_E_NOMEM if passed memory/length can't hold all tagged IEs + */ +static inline QDF_STATUS +util_scan_entry_copy_ie_data(struct scan_cache_entry *scan_entry, + uint8_t *iebuf, uint16_t *ie_len) +{ + u_int8_t *buff; + u_int16_t buff_len; + + /* iebuf can be NULL, ie_len must be a valid pointer. */ + QDF_ASSERT(ie_len != NULL); + if (!ie_len) + return QDF_STATUS_E_NULL_VALUE; + + buff = util_scan_entry_ie_data(scan_entry); + buff_len = util_scan_entry_ie_len(scan_entry); + /* + * If caller passed a buffer, check the length to make sure + * it's large enough. + * If no buffer is passed, just return the length of the IE blob. + */ + if (iebuf != NULL) { + if (*ie_len >= buff_len) { + qdf_mem_copy(iebuf, buff, buff_len); + *ie_len = buff_len; + return QDF_STATUS_SUCCESS; + } + } + + *ie_len = buff_len; + return QDF_STATUS_E_NOMEM; +} + +/** + * util_scan_free_cache_entry() - function to free scan + * cache entry + * @scan_entry: scan entry + * + * API, function to free scan cache entry + * + * Return: void + */ +static inline void +util_scan_free_cache_entry(struct scan_cache_entry *scan_entry) +{ + if (!scan_entry) + return; + if (scan_entry->alt_wcn_ie.ptr) + qdf_mem_free(scan_entry->alt_wcn_ie.ptr); + if (scan_entry->raw_frame.ptr) + qdf_mem_free(scan_entry->raw_frame.ptr); + qdf_mem_free(scan_entry); +} + +#define conv_ptr(_address, _base1, _base2) \ + ((_address != NULL) ? (((u_int8_t *) (_address) - \ + (u_int8_t *) (_base1)) + (u_int8_t *) (_base2)) : NULL) + +/** + * util_scan_copy_beacon_data() - copy beacon and update ie ptrs + * cache entry + * @new_entry: new scan entry + * @scan_entry: entry from where data is copied + * + * API, function to copy beacon and update ie ptrs + * + * Return: QDF_STATUS + */ +static inline QDF_STATUS +util_scan_copy_beacon_data(struct scan_cache_entry *new_entry, + struct scan_cache_entry *scan_entry) +{ + u_int8_t *new_ptr, *old_ptr; + struct ie_list *ie_lst; + + new_entry->raw_frame.ptr = + qdf_mem_malloc_atomic(scan_entry->raw_frame.len); + if (!new_entry->raw_frame.ptr) + return QDF_STATUS_E_NOMEM; + + qdf_mem_copy(new_entry->raw_frame.ptr, + scan_entry->raw_frame.ptr, + scan_entry->raw_frame.len); + new_entry->raw_frame.len = scan_entry->raw_frame.len; + new_ptr = new_entry->raw_frame.ptr; + old_ptr = scan_entry->raw_frame.ptr; + + new_entry->ie_list = scan_entry->ie_list; + + ie_lst = &new_entry->ie_list; + + /* New info_element needs also be added in ieee80211_parse_beacon */ + ie_lst->tim = conv_ptr(ie_lst->tim, old_ptr, new_ptr); + ie_lst->country = conv_ptr(ie_lst->country, old_ptr, new_ptr); + ie_lst->ssid = conv_ptr(ie_lst->ssid, old_ptr, new_ptr); + ie_lst->rates = conv_ptr(ie_lst->rates, old_ptr, new_ptr); + ie_lst->xrates = conv_ptr(ie_lst->xrates, old_ptr, new_ptr); + ie_lst->ds_param = conv_ptr(ie_lst->ds_param, old_ptr, new_ptr); + ie_lst->csa = conv_ptr(ie_lst->csa, old_ptr, new_ptr); + ie_lst->xcsa = conv_ptr(ie_lst->xcsa, old_ptr, new_ptr); + ie_lst->secchanoff = conv_ptr(ie_lst->secchanoff, old_ptr, new_ptr); + ie_lst->wpa = conv_ptr(ie_lst->wpa, old_ptr, new_ptr); + ie_lst->wcn = conv_ptr(ie_lst->wcn, old_ptr, new_ptr); + ie_lst->rsn = conv_ptr(ie_lst->rsn, old_ptr, new_ptr); + ie_lst->wps = conv_ptr(ie_lst->wps, old_ptr, new_ptr); + ie_lst->wmeinfo = conv_ptr(ie_lst->wmeinfo, old_ptr, new_ptr); + ie_lst->wmeparam = conv_ptr(ie_lst->wmeparam, old_ptr, new_ptr); + ie_lst->quiet = conv_ptr(ie_lst->quiet, old_ptr, new_ptr); + ie_lst->htcap = conv_ptr(ie_lst->htcap, old_ptr, new_ptr); + ie_lst->htinfo = conv_ptr(ie_lst->htinfo, old_ptr, new_ptr); + ie_lst->athcaps = conv_ptr(ie_lst->athcaps, old_ptr, new_ptr); + ie_lst->athextcaps = conv_ptr(ie_lst->athextcaps, old_ptr, new_ptr); + ie_lst->sfa = conv_ptr(ie_lst->sfa, old_ptr, new_ptr); + ie_lst->vendor = conv_ptr(ie_lst->vendor, old_ptr, new_ptr); + ie_lst->qbssload = conv_ptr(ie_lst->qbssload, old_ptr, new_ptr); + ie_lst->wapi = conv_ptr(ie_lst->wapi, old_ptr, new_ptr); + ie_lst->p2p = conv_ptr(ie_lst->p2p, old_ptr, new_ptr); + ie_lst->alt_wcn = conv_ptr(ie_lst->alt_wcn, old_ptr, new_ptr); + ie_lst->extcaps = conv_ptr(ie_lst->extcaps, old_ptr, new_ptr); + ie_lst->ibssdfs = conv_ptr(ie_lst->ibssdfs, old_ptr, new_ptr); + ie_lst->sonadv = conv_ptr(ie_lst->sonadv, old_ptr, new_ptr); + ie_lst->vhtcap = conv_ptr(ie_lst->vhtcap, old_ptr, new_ptr); + ie_lst->vhtop = conv_ptr(ie_lst->vhtop, old_ptr, new_ptr); + ie_lst->opmode = conv_ptr(ie_lst->opmode, old_ptr, new_ptr); + ie_lst->cswrp = conv_ptr(ie_lst->cswrp, old_ptr, new_ptr); + ie_lst->widebw = conv_ptr(ie_lst->widebw, old_ptr, new_ptr); + ie_lst->txpwrenvlp = conv_ptr(ie_lst->txpwrenvlp, old_ptr, new_ptr); + ie_lst->bwnss_map = conv_ptr(ie_lst->bwnss_map, old_ptr, new_ptr); + ie_lst->mdie = conv_ptr(ie_lst->mdie, old_ptr, new_ptr); + ie_lst->hecap = conv_ptr(ie_lst->hecap, old_ptr, new_ptr); + ie_lst->heop = conv_ptr(ie_lst->heop, old_ptr, new_ptr); + ie_lst->fils_indication = conv_ptr(ie_lst->fils_indication, + old_ptr, new_ptr); + ie_lst->esp = conv_ptr(ie_lst->esp, old_ptr, new_ptr); + ie_lst->mbo_oce = conv_ptr(ie_lst->mbo_oce, old_ptr, new_ptr); + + return QDF_STATUS_SUCCESS; +} +/** + * util_scan_copy_cache_entry() - function to create a copy + * of scan cache entry + * @scan_entry: scan entry + * + * API, function to create a copy of scan cache entry + * + * Return: copy of scan_entry + */ +static inline struct scan_cache_entry * +util_scan_copy_cache_entry(struct scan_cache_entry *scan_entry) +{ + struct scan_cache_entry *new_entry; + QDF_STATUS status; + + if (!scan_entry) + return NULL; + + new_entry = + qdf_mem_malloc_atomic(sizeof(*scan_entry)); + if (!new_entry) + return NULL; + + qdf_mem_copy(new_entry, + scan_entry, sizeof(*scan_entry)); + + if (scan_entry->alt_wcn_ie.ptr) { + new_entry->alt_wcn_ie.ptr = + qdf_mem_malloc_atomic(scan_entry->alt_wcn_ie.len); + if (!new_entry->alt_wcn_ie.ptr) { + qdf_mem_free(new_entry); + return NULL; + } + qdf_mem_copy(new_entry->alt_wcn_ie.ptr, + scan_entry->alt_wcn_ie.ptr, + scan_entry->alt_wcn_ie.len); + new_entry->alt_wcn_ie.len = + scan_entry->alt_wcn_ie.len; + } + + status = util_scan_copy_beacon_data(new_entry, scan_entry); + if (QDF_IS_STATUS_ERROR(status)) { + util_scan_free_cache_entry(new_entry); + return NULL; + } + + return new_entry; +} + +/** + * util_scan_entry_channel() - function to read channel info + * @scan_entry: scan entry + * + * API, function to read channel info + * + * Return: channel info + */ +static inline struct channel_info* +util_scan_entry_channel(struct scan_cache_entry *scan_entry) +{ + return &(scan_entry->channel); +} + +/** + * util_scan_entry_channel_num() - function to read channel number + * @scan_entry: scan entry + * + * API, function to read channel number + * + * Return: channel number + */ +static inline uint8_t +util_scan_entry_channel_num(struct scan_cache_entry *scan_entry) +{ + return scan_entry->channel.chan_idx; +} + +/** + * util_scan_entry_erpinfo() - function to read erp info + * @scan_entry: scan entry + * + * API, function to read erp info + * + * Return: erp info + */ +static inline uint8_t +util_scan_entry_erpinfo(struct scan_cache_entry *scan_entry) +{ + return scan_entry->erp; +} + +/** + * util_scan_entry_rates() - function to read supported rates IE + * @scan_entry: scan entry + * + * API, function to read supported rates IE + * + * Return: basic ratesie or NULL if ie is not present + */ +static inline uint8_t* +util_scan_entry_rates(struct scan_cache_entry *scan_entry) +{ + return scan_entry->ie_list.rates; +} + +/** + * util_scan_entry_xrates()- function to read extended supported rates IE + * @scan_entry: scan entry + * + * API, function to read extended supported rates IE + * + * Return: extended supported ratesie or NULL if ie is not present + */ +static inline uint8_t* +util_scan_entry_xrates(struct scan_cache_entry *scan_entry) +{ + return scan_entry->ie_list.xrates; +} + +/** + * util_scan_entry_rsn()- function to read rsn IE + * @scan_entry: scan entry + * + * API, function to read rsn IE + * + * Return: rsnie or NULL if ie is not present + */ +static inline uint8_t* +util_scan_entry_rsn(struct scan_cache_entry *scan_entry) +{ + return scan_entry->ie_list.rsn; +} + +/** + * util_scan_get_rsn_len()- function to read rsn IE length if present + * @scan_entry: scan entry + * + * API, function to read rsn length if present + * + * Return: rsnie length + */ +static inline uint8_t +util_scan_get_rsn_len(struct scan_cache_entry *scan_entry) +{ + if (scan_entry && scan_entry->ie_list.rsn) + return scan_entry->ie_list.rsn[1] + 2; + else + return 0; +} + + +/** + * util_scan_entry_wpa() - function to read wpa IE + * @scan_entry: scan entry + * + * API, function to read wpa IE + * + * Return: wpaie or NULL if ie is not present + */ +static inline uint8_t* +util_scan_entry_wpa(struct scan_cache_entry *scan_entry) +{ + return scan_entry->ie_list.wpa; +} + +/** + * util_scan_get_wpa_len()- function to read wpa IE length if present + * @scan_entry: scan entry + * + * API, function to read wpa ie length if present + * + * Return: wpa ie length + */ +static inline uint8_t +util_scan_get_wpa_len(struct scan_cache_entry *scan_entry) +{ + if (scan_entry && scan_entry->ie_list.wpa) + return scan_entry->ie_list.wpa[1] + 2; + else + return 0; +} + + +/** + * util_scan_entry_wapi() - function to read wapi IE + * @scan_entry: scan entry + * + * API, function to read wapi IE + * + * Return: wapiie or NULL if ie is not present + */ +static inline uint8_t* +util_scan_entry_wapi(struct scan_cache_entry *scan_entry) +{ + return scan_entry->ie_list.wapi; +} + +/** + * util_scan_entry_wps() - function to read wps IE + * @scan_entry: scan entry + * + * API, function to read wps IE + * + * Return: wpsie or NULL if ie is not present + */ +static inline uint8_t* +util_scan_entry_wps(struct scan_cache_entry *scan_entry) +{ + return scan_entry->ie_list.wps; +} + +/** + * util_scan_entry_sfa() - function to read sfa IE + * @scan_entry: scan entry + * + * API, function to read sfa IE + * + * Return: sfaie or NULL if ie is not present + */ +static inline uint8_t* +util_scan_entry_sfa(struct scan_cache_entry *scan_entry) +{ + return scan_entry->ie_list.sfa; +} + +/** + * util_scan_entry_ds_param() - function to read ds params + * @scan_entry: scan entry + * + * API, function to read ds params + * + * Return: ds params or NULL if ie is not present + */ +static inline uint8_t* +util_scan_entry_ds_param(struct scan_cache_entry *scan_entry) +{ + if (scan_entry) + return scan_entry->ie_list.ds_param; + else + return NULL; +} + +/** + * util_scan_entry_csa() - function to read csa IE + * @scan_entry: scan entry + * + * API, function to read csa IE + * + * Return: csaie or NULL if ie is not present + */ +static inline uint8_t* +util_scan_entry_csa(struct scan_cache_entry *scan_entry) +{ + return scan_entry->ie_list.csa; +} + +/** + * util_scan_entry_xcsa() - function to read extended csa IE + * @scan_entry: scan entry + * + * API, function to read extended csa IE + * + * Return: extended csaie or NULL if ie is not present + */ +static inline uint8_t* +util_scan_entry_xcsa(struct scan_cache_entry *scan_entry) +{ + return scan_entry->ie_list.xcsa; +} + +/** + * util_scan_entry_htinfo() - function to read htinfo IE + * @scan_entry: scan entry + * + * API, function to read htinfo IE + * + * Return: htinfoie or NULL if ie is not present + */ +static inline uint8_t* +util_scan_entry_htinfo(struct scan_cache_entry *scan_entry) +{ + return scan_entry->ie_list.htinfo; +} + + +/** + * util_scan_entry_htcap() - function to read htcap IE + * @scan_entry: scan entry + * + * API, function to read htcap IE + * + * Return: htcapie or NULL if ie is not present + */ +static inline uint8_t* +util_scan_entry_htcap(struct scan_cache_entry *scan_entry) +{ + return scan_entry->ie_list.htcap; +} + +/** + * util_scan_entry_vhtcap() - function to read vhtcap IE + * @scan_entry: scan entry + * + * API, function to read vhtcap IE + * + * Return: vhtcapie or NULL if ie is not present + */ +static inline uint8_t* +util_scan_entry_vhtcap(struct scan_cache_entry *scan_entry) +{ + return scan_entry->ie_list.vhtcap; +} + +/** + * util_scan_entry_vhtop() - function to read vhtop IE + * @scan_entry: scan entry + * + * API, function to read vhtop IE + * + * Return: vhtopie or NULL if ie is not present + */ +static inline uint8_t* +util_scan_entry_vhtop(struct scan_cache_entry *scan_entry) +{ + return scan_entry->ie_list.vhtop; +} + +/** + * util_scan_entry_quiet() - function to read quiet IE + * @scan_entry: scan entry + * + * API, function to read quiet IE + * + * Return: quietie or NULL if ie is not present + */ +static inline uint8_t* +util_scan_entry_quiet(struct scan_cache_entry *scan_entry) +{ + return scan_entry->ie_list.quiet; +} + +/** + * util_scan_entry_qbssload() - function to read qbss load IE + * @scan_entry: scan entry + * + * API, function to read qbss load IE + * + * Return: qbss loadie or NULL if ie is not present + */ +static inline uint8_t* +util_scan_entry_qbssload(struct scan_cache_entry *scan_entry) +{ + return scan_entry->ie_list.qbssload; +} + +/** + * util_scan_entry_vendor() - function to read vendor IE + * @scan_entry: scan entry + * + * API, function to read vendor IE + * + * Return: vendorie or NULL if ie is not present + */ +static inline uint8_t* +util_scan_entry_vendor(struct scan_cache_entry *scan_entry) +{ + return scan_entry->ie_list.vendor; +} + +/** + * util_scan_entry_country() - function to read country IE + * @scan_entry: scan entry + * + * API, function to read country IE + * + * Return: countryie or NULL if ie is not present + */ +static inline struct wlan_country_ie* +util_scan_entry_country(struct scan_cache_entry *scan_entry) +{ + return (struct wlan_country_ie *)scan_entry->ie_list.country; +} + +/** + * util_scan_entry_copy_country() - function to copy country name + * @scan_entry: scan entry + * @cntry: out buffer + * + * API, function to copy country name code string in given memory @centry + * + * Return: QDF_STATUS_SUCCESS if successfully copied country name + * QDF_STATUS_E_INVAL if passed buffer is null + * QDF_STATUS_E_NOMEM if scan entry dont have country IE + */ +static inline QDF_STATUS +util_scan_entry_copy_country(struct scan_cache_entry *scan_entry, + uint8_t *cntry) +{ + struct wlan_country_ie *country_ie; + + if (!cntry) + return QDF_STATUS_E_INVAL; + + country_ie = util_scan_entry_country(scan_entry); + + if (!country_ie) + return QDF_STATUS_E_NOMEM; + + qdf_mem_copy(cntry, country_ie->cc, 3); + + return QDF_STATUS_SUCCESS; +} + +/** + * util_scan_entry_wmeinfo() - function to read wme info ie + * @scan_entry: scan entry + * + * API, function to read wme info ie + * + * Return: wme infoie or NULL if ie is not present + */ +static inline uint8_t* +util_scan_entry_wmeinfo(struct scan_cache_entry *scan_entry) +{ + return scan_entry->ie_list.wmeinfo; +} + +/** + * util_scan_entry_wmeparam() - function to read wme param ie + * @scan_entry: scan entry + * + * API, function to read wme param ie + * + * Return: wme paramie or NULL if ie is not present + */ +static inline uint8_t* +util_scan_entry_wmeparam(struct scan_cache_entry *scan_entry) +{ + return scan_entry->ie_list.wmeparam; +} + +/** + * util_scan_entry_age() - function to read age of scan entry + * @scan_entry: scan entry + * + * API, function to read age of scan entry + * + * Return: age in ms + */ +static inline uint32_t +util_scan_entry_age(struct scan_cache_entry *scan_entry) +{ + unsigned long ts = scan_entry->scan_entry_time; + + return qdf_mc_timer_get_system_time() - ts; +} + +/** + * util_scan_mlme_info() - function to read mlme info struct + * @scan_entry: scan entry + * + * API, function to read mlme info struct + * + * Return: mlme info + */ +static inline struct mlme_info* +util_scan_mlme_info(struct scan_cache_entry *scan_entry) +{ + return &scan_entry->mlme_info; +} + +/** + * util_scan_entry_bss_type() - function to read bss type + * @scan_entry: scan entry + * + * API, function to read bss type + * + * Return: bss type + */ +static inline enum wlan_bss_type +util_scan_entry_bss_type(struct scan_cache_entry *scan_entry) +{ + if (scan_entry->cap_info.value & WLAN_CAPINFO_ESS) + return WLAN_TYPE_BSS; + else if (scan_entry->cap_info.value & WLAN_CAPINFO_IBSS) + return WLAN_TYPE_IBSS; + else + return WLAN_TYPE_ANY; +} + +/** + * util_scan_entry_privacy() - function to check if privacy is enebled + * @scan_entry: scan entry + * + * API, function to check if privacy is enebled + * + * Return: true if privacy is enabled, false other wise + */ +static inline bool +util_scan_entry_privacy(struct scan_cache_entry *scan_entry) +{ + return (scan_entry->cap_info.value & + WLAN_CAPINFO_PRIVACY) ? true : false; +} + +/** + * util_scan_entry_athcaps() - function to read ath caps vendor ie + * @scan_entry: scan entry + * + * API, function to read ath caps vendor ie + * + * Return: ath caps vendorie or NULL if ie is not present + */ +static inline uint8_t* +util_scan_entry_athcaps(struct scan_cache_entry *scan_entry) +{ + return scan_entry->ie_list.athcaps; +} + +/** + * util_scan_entry_athextcaps() - function to read ath extcaps vendor ie + * @scan_entry: scan entry + * + * API, function to read ath extcaps vendor ie + * + * Return: ath extcaps vendorie or NULL if ie is not present + */ +static inline uint8_t* +util_scan_entry_athextcaps(struct scan_cache_entry *scan_entry) +{ + return scan_entry->ie_list.athextcaps; +} + +/** + * util_scan_entry_bwnss_map() - function to read bwnss_map ie + * @scan_entry: scan entry + * + * API, function to read bwnss_map ie + * + * Return: bwnss_map ie or NULL if ie is not present + */ +static inline uint8_t* +util_scan_entry_bwnss_map(struct scan_cache_entry *scan_entry) +{ + return scan_entry->ie_list.bwnss_map; +} + +/** + * util_scan_entry_sonie() - function to read son ie + * @scan_entry: scan entry + * + * API, function to read son ie + * + * Return: son ie or NULL if ie is not present + */ +static inline uint8_t* +util_scan_entry_sonie(struct scan_cache_entry *scan_entry) +{ + return scan_entry->ie_list.sonadv; +} + +/** + * util_scan_entry_widebw() - function to read wide band chan switch sub elem ie + * @scan_entry: scan entry + * + * API, function to read wide band chan switch sub elem ie + * + * Return: wide band chan switch sub elem or NULL if ie is not present + */ +static inline uint8_t* +util_scan_entry_widebw(struct scan_cache_entry *scan_entry) +{ + return scan_entry->ie_list.widebw; +} + +/** + * util_scan_entry_secchanoff() - function to read secondary channel offset ie + * @scan_entry: scan entry + * + * API, function to read secondary channel offset ie + * + * Return: secondary channel offset element or NULL if ie is not present + */ +static inline uint8_t* +util_scan_entry_secchanoff(struct scan_cache_entry *scan_entry) +{ + return scan_entry->ie_list.secchanoff; +} + +/** + * util_scan_entry_cswrp() - function to read channel switch wrapper ie + * @scan_entry: scan entry + * + * API, function to read channel switch wrapper ie + * + * Return: channel switch wrapper element or NULL if ie is not present + */ +static inline uint8_t* +util_scan_entry_cswrp(struct scan_cache_entry *scan_entry) +{ + return scan_entry->ie_list.cswrp; +} + +/** + * util_scan_entry_omn() - function to read operating mode notification ie + * @scan_entry: scan entry + * + * API, function to read operating mode notification + * + * Return: operating mode notification element or NULL if ie is not present + */ +static inline uint8_t* +util_scan_entry_omn(struct scan_cache_entry *scan_entry) +{ + return scan_entry->ie_list.opmode; +} + +/** + * util_scan_entry_extcaps() - function to read extcap ie + * @scan_entry: scan entry + * + * API, function to read extcap ie + * + * Return: extcap element or NULL if ie is not present + */ +static inline uint8_t* +util_scan_entry_extcaps(struct scan_cache_entry *scan_entry) +{ + return scan_entry->ie_list.extcaps; +} + +/** + * util_scan_entry_athcaps() - function to read ath caps vendor ie + * @scan_entry: scan entry + * + * API, function to read ath caps vendor ie + * + * Return: ath caps vendorie or NULL if ie is not present + */ +static inline struct mlme_info* +util_scan_entry_mlme_info(struct scan_cache_entry *scan_entry) +{ + return &(scan_entry->mlme_info); +} + +/** + * util_scan_entry_hecap() - function to read he caps vendor ie + * @scan_entry: scan entry + * + * API, function to read he caps vendor ie + * + * Return: he caps vendorie or NULL if ie is not present + */ +static inline uint8_t* +util_scan_entry_hecap(struct scan_cache_entry *scan_entry) +{ + return scan_entry->ie_list.hecap; +} + + +/** + * util_scan_entry_heop() - function to read heop vendor ie + * @scan_entry: scan entry + * + * API, function to read heop vendor ie + * + * Return, heop vendorie or NULL if ie is not present + */ +static inline uint8_t* +util_scan_entry_heop(struct scan_cache_entry *scan_entry) +{ + return scan_entry->ie_list.heop; +} + +/** + * util_scan_entry_muedca() - function to read MU-EDCA IE + * @scan_entry: scan entry + * + * API, function to read MU-EDCA IE + * + * Return, MUEDCA IE or NULL if IE is not present + */ +static inline uint8_t* +util_scan_entry_muedca(struct scan_cache_entry *scan_entry) +{ + return scan_entry->ie_list.muedca; +} + +/** + * util_scan_entry_spatial_reuse_parameter() - function to read spatial reuse + * parameter ie + * @scan_entry: scan entry + * + * API, function to read scan_entry reuse parameter ie + * + * Return, spatial reuse parameter ie or NULL if ie is not present + */ +static inline uint8_t* +util_scan_entry_spatial_reuse_parameter(struct scan_cache_entry *scan_entry) +{ + return scan_entry->ie_list.srp; +} + +/** + * util_scan_entry_fils_indication() - function to read FILS indication ie + * @scan_entry: scan entry + * + * API, function to read FILS indication ie + * + * Return, FILS indication ie or NULL if ie is not present + */ +static inline uint8_t* +util_scan_entry_fils_indication(struct scan_cache_entry *scan_entry) +{ + return scan_entry->ie_list.fils_indication; +} + +/** + * util_get_last_scan_time() - function to get last scan time on this pdev + * @vdev: vdev object + * + * API, function to read last scan time on this pdev + * + * Return: qdf_time_t + */ +qdf_time_t +util_get_last_scan_time(struct wlan_objmgr_vdev *vdev); + +/** + * util_scan_entry_update_mlme_info() - function to update mlme info + * @scan_entry: scan entry object + * + * API, function to update mlme info in scan DB + * + * Return: QDF_STATUS + */ +QDF_STATUS +util_scan_entry_update_mlme_info(struct wlan_objmgr_pdev *pdev, + struct scan_cache_entry *scan_entry); + +/** + * util_scan_is_hidden_ssid() - function to check if ssid is hidden + * @ssid: struct ie_ssid object + * + * API, function to check if ssid is hidden + * + * Return: true if ap is hidden, false otherwise + */ +bool +util_scan_is_hidden_ssid(struct ie_ssid *ssid); + +/** + * util_scan_entry_is_hidden_ap() - function to check if ap is hidden + * @scan_entry: scan entry + * + * API, function to check if ap is hidden + * + * Return: true if ap is hidden, false otherwise + */ +static inline bool +util_scan_entry_is_hidden_ap(struct scan_cache_entry *scan_entry) +{ + return util_scan_is_hidden_ssid( + (struct ie_ssid *)scan_entry->ie_list.ssid); +} + +/** + * util_scan_entry_espinfo() - function to read ESP info + * @scan_entry: scan entry + * + * API, function to read ESP info + * + * Return: erp info + */ +static inline uint8_t * +util_scan_entry_esp_info(struct scan_cache_entry *scan_entry) +{ + return scan_entry->ie_list.esp; +} + +/** + * util_scan_entry_mbo_oce() - function to read MBO/OCE ie + * @scan_entry: scan entry + * + * API, function to read MBO/OCE ie + * + * Return: MBO/OCE ie + */ +static inline uint8_t * +util_scan_entry_mbo_oce(struct scan_cache_entry *scan_entry) +{ + return scan_entry->ie_list.mbo_oce; +} + +/** + * util_scan_scm_chan_to_band() - function to tell band for channel number + * @chan: Channel number + * + * Return: Band information as per channel + */ +enum wlan_band util_scan_scm_chan_to_band(uint32_t chan); + +/** + * util_scan_scm_freq_to_band() - API to get band from frequency + * @freq: Channel frequency + * + * Return: Band information as per frequency + */ +enum wlan_band util_scan_scm_freq_to_band(uint16_t freq); + +/** + * util_is_scan_completed() - function to get scan complete status + * @event: scan event + * @success: true if scan complete success, false otherwise + * + * API, function to get the scan result + * + * Return: true if scan complete, false otherwise + */ +bool util_is_scan_completed(struct scan_event *event, bool *success); + +/** + * util_scan_is_null_ssid() - to check for NULL ssid + * @ssid: ssid + * + * Return: true if NULL ssid else false + */ +static inline bool util_scan_is_null_ssid(struct wlan_ssid *ssid) +{ + uint32_t ssid_length; + uint8_t *ssid_str; + + if (ssid->length == 0) + return true; + + /* Consider 0 or space for hidden SSID */ + if (0 == ssid->ssid[0]) + return true; + + ssid_length = ssid->length; + ssid_str = ssid->ssid; + + while (ssid_length) { + if (*ssid_str != ASCII_SPACE_CHARACTER && + *ssid_str) + break; + ssid_str++; + ssid_length--; + } + + if (ssid_length == 0) + return true; + + return false; +} + +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/scan/dispatcher/src/wlan_scan_tgt_api.c b/drivers/staging/qca-wifi-host-cmn/umac/scan/dispatcher/src/wlan_scan_tgt_api.c new file mode 100644 index 0000000000000000000000000000000000000000..009ee387ce86fbbeb07069057a76cf67a407780a --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/scan/dispatcher/src/wlan_scan_tgt_api.c @@ -0,0 +1,357 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/* + * DOC: contains scan south bound interface definitions + */ + +#include +#include +#include "../../core/src/wlan_scan_main.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include <../../core/src/wlan_scan_manager.h> + +static inline struct wlan_lmac_if_scan_tx_ops * +wlan_psoc_get_scan_txops(struct wlan_objmgr_psoc *psoc) +{ + return &((psoc->soc_cb.tx_ops.scan)); +} + +static inline struct wlan_lmac_if_scan_tx_ops * +wlan_vdev_get_scan_txops(struct wlan_objmgr_vdev *vdev) +{ + struct wlan_objmgr_psoc *psoc = NULL; + + psoc = wlan_vdev_get_psoc(vdev); + if (!psoc) { + scm_err("NULL psoc"); + return NULL; + } + + return wlan_psoc_get_scan_txops(psoc); +} + +static inline struct wlan_lmac_if_scan_rx_ops * +wlan_vdev_get_scan_rxops(struct wlan_objmgr_vdev *vdev) +{ + struct wlan_objmgr_psoc *psoc = NULL; + + psoc = wlan_vdev_get_psoc(vdev); + if (!psoc) { + scm_err("NULL psoc"); + return NULL; + } + + return &((psoc->soc_cb.rx_ops.scan)); +} + +#ifdef FEATURE_WLAN_SCAN_PNO + +QDF_STATUS tgt_scan_pno_start(struct wlan_objmgr_vdev *vdev, + struct pno_scan_req_params *req) +{ + struct wlan_lmac_if_scan_tx_ops *scan_ops; + struct wlan_objmgr_psoc *psoc; + + psoc = wlan_vdev_get_psoc(vdev); + + if (!psoc) { + scm_err("NULL PSOC"); + return QDF_STATUS_E_FAILURE; + } + scan_ops = wlan_psoc_get_scan_txops(psoc); + if (!scan_ops) { + scm_err("NULL scan_ops"); + return QDF_STATUS_E_FAILURE; + } + /* invoke wmi_unified_pno_start_cmd() */ + QDF_ASSERT(scan_ops->pno_start); + if (scan_ops->pno_start) + return scan_ops->pno_start(psoc, req); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS tgt_scan_pno_stop(struct wlan_objmgr_vdev *vdev, + uint8_t vdev_id) +{ + struct wlan_lmac_if_scan_tx_ops *scan_ops; + struct wlan_objmgr_psoc *psoc; + + psoc = wlan_vdev_get_psoc(vdev); + + if (!psoc) { + scm_err("NULL PSOC"); + return QDF_STATUS_E_FAILURE; + } + scan_ops = wlan_psoc_get_scan_txops(psoc); + if (!scan_ops) { + scm_err("NULL scan_ops"); + return QDF_STATUS_E_FAILURE; + } + /* invoke wmi_unified_pno_stop_cmd() */ + QDF_ASSERT(scan_ops->pno_stop); + if (scan_ops->pno_stop) + return scan_ops->pno_stop(psoc, vdev_id); + + return QDF_STATUS_SUCCESS; +} +#endif + +QDF_STATUS +tgt_scan_start(struct scan_start_request *req) +{ + struct wlan_lmac_if_scan_tx_ops *scan_ops; + struct wlan_objmgr_psoc *psoc; + struct wlan_objmgr_pdev *pdev; + struct wlan_objmgr_vdev *vdev = req->vdev; + + if (!vdev) { + scm_err("vdev is NULL"); + return QDF_STATUS_E_NULL_VALUE; + } + + psoc = wlan_vdev_get_psoc(vdev); + pdev = wlan_vdev_get_pdev(vdev); + if (!psoc || !pdev) { + scm_err("psoc: 0x%pK or pdev: 0x%pK is NULL", psoc, pdev); + return QDF_STATUS_E_NULL_VALUE; + } + + scan_ops = wlan_psoc_get_scan_txops(psoc); + /* invoke wmi_unified_scan_start_cmd_send() */ + QDF_ASSERT(scan_ops->scan_start); + if (scan_ops->scan_start) + return scan_ops->scan_start(pdev, req); + else + return QDF_STATUS_SUCCESS; +} + + +QDF_STATUS +tgt_scan_cancel(struct scan_cancel_request *req) +{ + struct wlan_lmac_if_scan_tx_ops *scan_ops; + struct wlan_objmgr_psoc *psoc; + struct wlan_objmgr_pdev *pdev; + struct wlan_objmgr_vdev *vdev = req->vdev; + + if (!vdev) { + scm_err("vdev is NULL"); + return QDF_STATUS_E_NULL_VALUE; + } + psoc = wlan_vdev_get_psoc(vdev); + pdev = wlan_vdev_get_pdev(vdev); + if (!psoc || !pdev) { + scm_err("psoc: 0x%pK or pdev: 0x%pK is NULL", psoc, pdev); + return QDF_STATUS_E_NULL_VALUE; + } + scan_ops = wlan_psoc_get_scan_txops(psoc); + /* invoke wmi_unified_scan_stop_cmd_send() */ + QDF_ASSERT(scan_ops->scan_cancel); + if (scan_ops->scan_cancel) + return scan_ops->scan_cancel(pdev, &req->cancel_req); + else + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS +tgt_scan_register_ev_handler(struct wlan_objmgr_psoc *psoc) +{ + struct wlan_lmac_if_scan_tx_ops *scan_ops = NULL; + + scan_ops = wlan_psoc_get_scan_txops(psoc); + /* invoke wmi_unified_register_event_handler() + * since event id, handler function and context is + * already known to offload lmac, passing NULL as argument. + * DA can pass necessary arguments by clubing then into + * some structure. + */ + QDF_ASSERT(scan_ops->scan_reg_ev_handler); + if (scan_ops->scan_reg_ev_handler) + return scan_ops->scan_reg_ev_handler(psoc, NULL); + else + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS +tgt_scan_unregister_ev_handler(struct wlan_objmgr_psoc *psoc) +{ + struct wlan_lmac_if_scan_tx_ops *scan_ops = NULL; + + scan_ops = wlan_psoc_get_scan_txops(psoc); + /* invoke wmi_unified_register_event_handler() + * since event id, handler function and context is + * already known to offload lmac, passing NULL as argument. + * DA can pass necessary arguments by clubing then into + * some structure. + */ + QDF_ASSERT(scan_ops->scan_unreg_ev_handler); + if (scan_ops->scan_unreg_ev_handler) + return scan_ops->scan_unreg_ev_handler(psoc, NULL); + else + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS +tgt_scan_event_handler(struct wlan_objmgr_psoc *psoc, + struct scan_event_info *event_info) +{ + struct scheduler_msg msg = {0}; + struct scan_event *event = &event_info->event; + uint8_t vdev_id = event->vdev_id; + QDF_STATUS status; + + if (!psoc || !event_info) { + scm_err("psoc: 0x%pK, event_info: 0x%pK", psoc, event_info); + return QDF_STATUS_E_NULL_VALUE; + } + scm_debug("vdev: %d, type: %d, reason: %d, freq: %d, req: %d, scanid: %d", + vdev_id, event->type, event->reason, event->chan_freq, + event->requester, event->scan_id); + + event_info->vdev = + wlan_objmgr_get_vdev_by_id_from_psoc(psoc, + vdev_id, WLAN_SCAN_ID); + if (!event_info->vdev) { + scm_err("null vdev, vdev_id: %d, psoc: 0x%pK", vdev_id, psoc); + return QDF_STATUS_E_INVAL; + } + msg.bodyptr = event_info; + msg.callback = scm_scan_event_handler; + msg.flush_callback = scm_scan_event_flush_callback; + + status = scheduler_post_message(QDF_MODULE_ID_SCAN, + QDF_MODULE_ID_SCAN, + QDF_MODULE_ID_SCAN, &msg); + if (QDF_IS_STATUS_ERROR(status)) { + wlan_objmgr_vdev_release_ref(event_info->vdev, WLAN_SCAN_ID); + } + + return status; +} + +QDF_STATUS tgt_scan_bcn_probe_rx_callback(struct wlan_objmgr_psoc *psoc, + struct wlan_objmgr_peer *peer, qdf_nbuf_t buf, + struct mgmt_rx_event_params *rx_param, + enum mgmt_frame_type frm_type) +{ + struct scheduler_msg msg = {0}; + struct scan_bcn_probe_event *bcn = NULL; + QDF_STATUS status; + uint32_t scan_queue_size = 0; + + if ((frm_type != MGMT_PROBE_RESP) && + (frm_type != MGMT_BEACON)) { + scm_err("frame is not beacon or probe resp"); + status = QDF_STATUS_E_INVAL; + goto free; + } + bcn = qdf_mem_malloc_atomic(sizeof(*bcn)); + + if (!bcn) { + scm_debug_rl("Failed to allocate memory for bcn"); + status = QDF_STATUS_E_NOMEM; + goto free; + } + bcn->rx_data = + qdf_mem_malloc_atomic(sizeof(*rx_param)); + if (!bcn->rx_data) { + scm_debug_rl("Failed to allocate memory for rx_data"); + status = QDF_STATUS_E_NOMEM; + goto free; + } + + if (frm_type == MGMT_PROBE_RESP) + bcn->frm_type = MGMT_SUBTYPE_PROBE_RESP; + else + bcn->frm_type = MGMT_SUBTYPE_BEACON; + + /* Check if the beacon/probe frame can be posted in the scan queue */ + status = scheduler_get_queue_size(QDF_MODULE_ID_SCAN, &scan_queue_size); + if (!QDF_IS_STATUS_SUCCESS(status) || + scan_queue_size > MAX_BCN_PROBE_IN_SCAN_QUEUE) { + scm_debug_rl("Dropping beacon/probe frame, queue size %d", + scan_queue_size); + status = QDF_STATUS_E_FAILURE; + goto free; + } + + status = wlan_objmgr_psoc_try_get_ref(psoc, WLAN_SCAN_ID); + if (QDF_IS_STATUS_ERROR(status)) { + scm_info("unable to get reference"); + goto free; + } + + bcn->psoc = psoc; + bcn->buf = buf; + qdf_mem_copy(bcn->rx_data, rx_param, sizeof(*rx_param)); + + msg.bodyptr = bcn; + msg.callback = scm_handle_bcn_probe; + msg.flush_callback = scm_bcn_probe_flush_callback; + + status = scheduler_post_message(QDF_MODULE_ID_SCAN, + QDF_MODULE_ID_SCAN, + QDF_MODULE_ID_SCAN, &msg); + + if (QDF_IS_STATUS_SUCCESS(status)) + return status; + + wlan_objmgr_psoc_release_ref(psoc, WLAN_SCAN_ID); + scm_err("failed to post to QDF_MODULE_ID_SCAN"); + +free: + if (bcn && bcn->rx_data) + qdf_mem_free(bcn->rx_data); + if (bcn) + qdf_mem_free(bcn); + if (buf) + qdf_nbuf_free(buf); + + return status; +} + +QDF_STATUS +tgt_scan_set_max_active_scans(struct wlan_objmgr_psoc *psoc, + uint32_t max_active_scans) +{ + struct scan_default_params *scan_params = NULL; + + if (!psoc) { + scm_err("null psoc"); + return QDF_STATUS_E_NULL_VALUE; + } + + scan_params = wlan_scan_psoc_get_def_params(psoc); + if (!scan_params) { + scm_err("wlan_scan_psoc_get_def_params returned NULL"); + return QDF_STATUS_E_NULL_VALUE; + } + + scan_params->max_active_scans_allowed = max_active_scans; + + return QDF_STATUS_SUCCESS; +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/scan/dispatcher/src/wlan_scan_ucfg_api.c b/drivers/staging/qca-wifi-host-cmn/umac/scan/dispatcher/src/wlan_scan_ucfg_api.c new file mode 100644 index 0000000000000000000000000000000000000000..d9f130687d52148fe0bde4290355d735930a9cf4 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/scan/dispatcher/src/wlan_scan_ucfg_api.c @@ -0,0 +1,2492 @@ +/* + * Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/* + * DOC: contains scan north bound interface definitions + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "../../core/src/wlan_scan_main.h" +#include "../../core/src/wlan_scan_manager.h" +#include "../../core/src/wlan_scan_cache_db.h" +#ifdef WLAN_POWER_MANAGEMENT_OFFLOAD +#include +#endif +#ifdef WLAN_POLICY_MGR_ENABLE +#include +#include +#endif + +QDF_STATUS ucfg_scan_register_bcn_cb(struct wlan_objmgr_psoc *psoc, + update_beacon_cb cb, enum scan_cb_type type) +{ + return scm_scan_register_bcn_cb(psoc, cb, type); +} + +qdf_list_t *ucfg_scan_get_result(struct wlan_objmgr_pdev *pdev, + struct scan_filter *filter) +{ + return scm_get_scan_result(pdev, filter); +} + +QDF_STATUS ucfg_scan_db_iterate(struct wlan_objmgr_pdev *pdev, + scan_iterator_func func, void *arg) +{ + return scm_iterate_scan_db(pdev, func, arg); +} + +QDF_STATUS ucfg_scan_purge_results(qdf_list_t *scan_list) +{ + return scm_purge_scan_results(scan_list); +} + +QDF_STATUS ucfg_scan_flush_results(struct wlan_objmgr_pdev *pdev, + struct scan_filter *filter) +{ + return scm_flush_results(pdev, filter); +} + +void ucfg_scan_filter_valid_channel(struct wlan_objmgr_pdev *pdev, + uint8_t *chan_list, uint32_t num_chan) +{ + scm_filter_valid_channel(pdev, chan_list, num_chan); +} + +QDF_STATUS ucfg_scan_init(void) +{ + QDF_STATUS status; + + status = wlan_objmgr_register_psoc_create_handler(WLAN_UMAC_COMP_SCAN, + wlan_scan_psoc_created_notification, NULL); + if (QDF_IS_STATUS_ERROR(status)) { + scm_err("Failed to register psoc create handler"); + goto fail_create_psoc; + } + + status = wlan_objmgr_register_psoc_destroy_handler(WLAN_UMAC_COMP_SCAN, + wlan_scan_psoc_destroyed_notification, NULL); + if (QDF_IS_STATUS_ERROR(status)) { + scm_err("Failed to create psoc delete handler"); + goto fail_psoc_destroy; + } + scm_debug("scan psoc create and delete handler registered with objmgr"); + + status = wlan_objmgr_register_vdev_create_handler(WLAN_UMAC_COMP_SCAN, + wlan_scan_vdev_created_notification, NULL); + if (QDF_IS_STATUS_ERROR(status)) { + scm_err("Failed to register vdev create handler"); + goto fail_pdev_create; + } + + status = wlan_objmgr_register_vdev_destroy_handler(WLAN_UMAC_COMP_SCAN, + wlan_scan_vdev_destroyed_notification, NULL); + if (QDF_IS_STATUS_SUCCESS(status)) { + scm_debug("scan vdev create and delete handler registered with objmgr"); + return QDF_STATUS_SUCCESS; + } + + scm_err("Failed to destroy vdev delete handler"); + wlan_objmgr_unregister_vdev_create_handler(WLAN_UMAC_COMP_SCAN, + wlan_scan_vdev_created_notification, NULL); +fail_pdev_create: + wlan_objmgr_unregister_psoc_destroy_handler(WLAN_UMAC_COMP_SCAN, + wlan_scan_psoc_destroyed_notification, NULL); +fail_psoc_destroy: + wlan_objmgr_unregister_psoc_create_handler(WLAN_UMAC_COMP_SCAN, + wlan_scan_psoc_created_notification, NULL); +fail_create_psoc: + return status; +} + +QDF_STATUS ucfg_scan_deinit(void) +{ + QDF_STATUS status; + + status = wlan_objmgr_unregister_psoc_create_handler(WLAN_UMAC_COMP_SCAN, + wlan_scan_psoc_created_notification, NULL); + if (status != QDF_STATUS_SUCCESS) + scm_err("Failed to unregister psoc create handler"); + + status = wlan_objmgr_unregister_psoc_destroy_handler( + WLAN_UMAC_COMP_SCAN, + wlan_scan_psoc_destroyed_notification, NULL); + if (status != QDF_STATUS_SUCCESS) + scm_err("Failed to unregister psoc delete handler"); + + status = wlan_objmgr_unregister_vdev_create_handler(WLAN_UMAC_COMP_SCAN, + wlan_scan_vdev_created_notification, NULL); + if (status != QDF_STATUS_SUCCESS) + scm_err("Failed to unregister vdev create handler"); + + status = wlan_objmgr_unregister_vdev_destroy_handler( + WLAN_UMAC_COMP_SCAN, + wlan_scan_vdev_destroyed_notification, NULL); + if (status != QDF_STATUS_SUCCESS) + scm_err("Failed to unregister vdev delete handler"); + + return status; +} + +#ifdef FEATURE_WLAN_SCAN_PNO + +QDF_STATUS ucfg_scan_pno_start(struct wlan_objmgr_vdev *vdev, + struct pno_scan_req_params *req) +{ + struct scan_vdev_obj *scan_vdev_obj; + QDF_STATUS status; + + scan_vdev_obj = wlan_get_vdev_scan_obj(vdev); + if (!scan_vdev_obj) { + scm_err("null scan_vdev_obj"); + return QDF_STATUS_E_INVAL; + } + if (scan_vdev_obj->pno_in_progress) { + scm_err("pno already in progress"); + return QDF_STATUS_E_ALREADY; + } + + status = tgt_scan_pno_start(vdev, req); + if (QDF_IS_STATUS_ERROR(status)) + scm_err("pno start failed"); + else + scan_vdev_obj->pno_in_progress = true; + + return status; +} + +QDF_STATUS ucfg_scan_pno_stop(struct wlan_objmgr_vdev *vdev) +{ + struct scan_vdev_obj *scan_vdev_obj; + QDF_STATUS status; + + scan_vdev_obj = wlan_get_vdev_scan_obj(vdev); + if (!scan_vdev_obj) { + scm_err("null scan_vdev_obj"); + return QDF_STATUS_E_INVAL; + } + if (!scan_vdev_obj->pno_in_progress) { + scm_debug("pno already stopped"); + return QDF_STATUS_E_ALREADY; + } + + status = tgt_scan_pno_stop(vdev, wlan_vdev_get_id(vdev)); + if (QDF_IS_STATUS_ERROR(status)) + scm_err("pno start failed"); + else + scan_vdev_obj->pno_in_progress = false; + + return status; +} + +bool ucfg_scan_get_pno_in_progress(struct wlan_objmgr_vdev *vdev) +{ + struct scan_vdev_obj *scan_vdev_obj; + + scan_vdev_obj = wlan_get_vdev_scan_obj(vdev); + if (!scan_vdev_obj) { + scm_err("null scan_vdev_obj"); + return false; + } + + return scan_vdev_obj->pno_in_progress; +} + +bool ucfg_scan_get_pno_match(struct wlan_objmgr_vdev *vdev) +{ + struct scan_vdev_obj *scan_vdev_obj; + + scan_vdev_obj = wlan_get_vdev_scan_obj(vdev); + if (!scan_vdev_obj) { + scm_err("null scan_vdev_obj"); + return false; + } + + return scan_vdev_obj->pno_match_evt_received; +} + +static QDF_STATUS +wlan_pno_global_init(struct pno_def_config *pno_def) +{ + struct nlo_mawc_params *mawc_cfg; + + qdf_wake_lock_create(&pno_def->pno_wake_lock, "wlan_pno_wl"); + mawc_cfg = &pno_def->mawc_params; + pno_def->channel_prediction = SCAN_PNO_CHANNEL_PREDICTION; + pno_def->top_k_num_of_channels = SCAN_TOP_K_NUM_OF_CHANNELS; + pno_def->stationary_thresh = SCAN_STATIONARY_THRESHOLD; + pno_def->channel_prediction_full_scan = + SCAN_CHANNEL_PREDICTION_FULL_SCAN_MS; + pno_def->scan_timer_repeat_value = SCAN_PNO_DEF_SCAN_TIMER_REPEAT; + pno_def->slow_scan_multiplier = SCAN_PNO_DEF_SLOW_SCAN_MULTIPLIER; + pno_def->dfs_chnl_scan_enabled = true; + pno_def->adaptive_dwell_mode = SCAN_ADAPTIVE_PNOSCAN_DWELL_MODE; + mawc_cfg->enable = SCAN_MAWC_NLO_ENABLED; + mawc_cfg->exp_backoff_ratio = SCAN_MAWC_NLO_EXP_BACKOFF_RATIO; + mawc_cfg->init_scan_interval = SCAN_MAWC_NLO_INIT_SCAN_INTERVAL; + mawc_cfg->max_scan_interval = SCAN_MAWC_NLO_MAX_SCAN_INTERVAL; + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS +wlan_pno_global_deinit(struct pno_def_config *pno_def) +{ + qdf_wake_lock_destroy(&pno_def->pno_wake_lock); + + return QDF_STATUS_SUCCESS; +} + +#ifdef WLAN_POLICY_MGR_ENABLE +/* + * ucfg_scan_update_pno_dwell_time() - update active and passive dwell time + * depending on active concurrency modes + * @vdev: vdev object pointer + * @req: scan request + * + * Return: void + */ +static void ucfg_scan_update_pno_dwell_time(struct wlan_objmgr_vdev *vdev, + struct pno_scan_req_params *req, struct scan_default_params *scan_def) +{ + bool sap_or_p2p_present; + struct wlan_objmgr_psoc *psoc; + + psoc = wlan_vdev_get_psoc(vdev); + + if (!psoc) + return; + + sap_or_p2p_present = policy_mgr_mode_specific_connection_count( + psoc, PM_SAP_MODE, NULL) || + policy_mgr_mode_specific_connection_count( + psoc, PM_P2P_GO_MODE, NULL) || + policy_mgr_mode_specific_connection_count( + psoc, PM_P2P_CLIENT_MODE, NULL); + + if (sap_or_p2p_present) { + req->active_dwell_time = scan_def->conc_active_dwell; + req->passive_dwell_time = scan_def->conc_passive_dwell; + } + +} +#else +static inline void ucfg_scan_update_pno_dwell_time(struct wlan_objmgr_vdev *vdev, + struct pno_scan_req_params *req, struct scan_default_params *scan_def){} +#endif + +QDF_STATUS +ucfg_scan_get_pno_def_params(struct wlan_objmgr_vdev *vdev, + struct pno_scan_req_params *req) +{ + struct scan_default_params *scan_def; + struct wlan_scan_obj *scan; + struct pno_def_config *pno_def; + + if (!vdev || !req) { + scm_err("vdev: 0x%pK, req: 0x%pK", + vdev, req); + return QDF_STATUS_E_INVAL; + } + + scan = wlan_vdev_get_scan_obj(vdev); + if (!scan) { + scm_err("scan is NULL"); + return QDF_STATUS_E_INVAL; + } + scan_def = wlan_vdev_get_def_scan_params(vdev); + if (!scan_def) { + scm_err("wlan_vdev_get_def_scan_params returned NULL"); + return QDF_STATUS_E_NULL_VALUE; + } + + pno_def = &scan->pno_cfg; + req->active_dwell_time = scan_def->active_dwell; + req->passive_dwell_time = scan_def->passive_dwell; + req->scan_random.randomize = scan_def->enable_mac_spoofing; + + /* + * Update active and passive dwell time depending + * upon the present active concurrency mode + */ + ucfg_scan_update_pno_dwell_time(vdev, req, scan_def); + req->adaptive_dwell_mode = pno_def->adaptive_dwell_mode; + req->pno_channel_prediction = pno_def->channel_prediction; + req->top_k_num_of_channels = pno_def->top_k_num_of_channels; + req->stationary_thresh = pno_def->stationary_thresh; + req->channel_prediction_full_scan = + pno_def->channel_prediction_full_scan; + req->mawc_params.vdev_id = wlan_vdev_get_id(vdev); + qdf_mem_copy(&req->mawc_params, &pno_def->mawc_params, + sizeof(req->mawc_params)); + + return QDF_STATUS_SUCCESS; +} + +bool ucfg_scan_is_dfs_chnl_scan_enabled(struct wlan_objmgr_psoc *psoc) +{ + struct wlan_scan_obj *scan_obj; + + scan_obj = wlan_psoc_get_scan_obj(psoc); + if (!scan_obj) { + scm_err("NULL scan obj"); + return true; + } + + return scan_obj->pno_cfg.dfs_chnl_scan_enabled; +} + +uint32_t ucfg_scan_get_scan_timer_repeat_value(struct wlan_objmgr_psoc *psoc) +{ + struct wlan_scan_obj *scan_obj; + + scan_obj = wlan_psoc_get_scan_obj(psoc); + if (!scan_obj) { + scm_err("NULL scan obj"); + return SCAN_PNO_DEF_SCAN_TIMER_REPEAT; + } + + return scan_obj->pno_cfg.scan_timer_repeat_value; +} + +uint32_t ucfg_scan_get_slow_scan_multiplier(struct wlan_objmgr_psoc *psoc) +{ + struct wlan_scan_obj *scan_obj; + + scan_obj = wlan_psoc_get_scan_obj(psoc); + if (!scan_obj) { + scm_err("NULL scan obj"); + return SCAN_PNO_DEF_SLOW_SCAN_MULTIPLIER; + } + + return scan_obj->pno_cfg.slow_scan_multiplier; +} + +static QDF_STATUS ucfg_scan_update_pno_config(struct pno_def_config *pno, + struct pno_user_cfg *pno_cfg) +{ + pno->channel_prediction = pno_cfg->channel_prediction; + pno->top_k_num_of_channels = pno_cfg->top_k_num_of_channels; + pno->stationary_thresh = pno_cfg->stationary_thresh; + pno->scan_timer_repeat_value = pno_cfg->scan_timer_repeat_value; + pno->slow_scan_multiplier = pno_cfg->slow_scan_multiplier; + pno->dfs_chnl_scan_enabled = pno_cfg->dfs_chnl_scan_enabled; + pno->adaptive_dwell_mode = pno_cfg->adaptive_dwell_mode; + pno->channel_prediction_full_scan = + pno_cfg->channel_prediction_full_scan; + qdf_mem_copy(&pno->mawc_params, &pno_cfg->mawc_params, + sizeof(pno->mawc_params)); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS +ucfg_scan_register_pno_cb(struct wlan_objmgr_psoc *psoc, + scan_event_handler event_cb, void *arg) +{ + struct wlan_scan_obj *scan; + + if (!psoc) { + scm_err("null psoc"); + return QDF_STATUS_E_INVAL; + } + scan = wlan_psoc_get_scan_obj(psoc); + qdf_spin_lock_bh(&scan->lock); + scan->pno_cfg.pno_cb.func = event_cb; + scan->pno_cfg.pno_cb.arg = arg; + qdf_spin_unlock_bh(&scan->lock); + scm_debug("event_cb: 0x%pK, arg: 0x%pK", event_cb, arg); + + return QDF_STATUS_SUCCESS; +} + +#else + +static inline QDF_STATUS +wlan_pno_global_init(struct pno_def_config *pno_def) +{ + return QDF_STATUS_SUCCESS; +} +static inline QDF_STATUS +wlan_pno_global_deinit(struct pno_def_config *pno_def) +{ + return QDF_STATUS_SUCCESS; +} + +static inline QDF_STATUS +ucfg_scan_update_pno_config(struct pno_def_config *pno, + struct pno_user_cfg *pno_cfg) +{ + return QDF_STATUS_SUCCESS; +} + +#endif + +#ifdef WLAN_POLICY_MGR_ENABLE +/** + * ucfg_scan_update_dbs_scan_ctrl_ext_flag() - update dbs scan ctrl flags + * @req: pointer to scan request + * + * This function sets scan_ctrl_flags_ext value depending on the type of + * scan and the channel lists. + * + * Non-DBS scan is requested if any of the below case is met: + * 1. HW is DBS incapable + * 2. A high accuracy scan request is sent by kernel. + * + * DBS scan is enabled for these conditions: + * 1. A low power or low span scan request is sent by kernel. + * For remaining cases DBS is enabled by default. + * Return: void + */ +static void +ucfg_scan_update_dbs_scan_ctrl_ext_flag(struct scan_start_request *req) +{ + struct wlan_objmgr_psoc *psoc; + uint32_t scan_dbs_policy = SCAN_DBS_POLICY_DEFAULT; + + psoc = wlan_vdev_get_psoc(req->vdev); + + if (!policy_mgr_is_hw_dbs_capable(psoc)) { + scm_debug("dbs disabled, going for non-dbs scan"); + scan_dbs_policy = SCAN_DBS_POLICY_FORCE_NONDBS; + goto end; + } + + if (!ucfg_scan_cfg_honour_nl_scan_policy_flags(psoc)) { + scm_debug("nl scan policy flags not honoured, goto end"); + goto end; + } + + if (req->scan_req.scan_policy_high_accuracy) { + scm_debug("high accuracy scan received, going for non-dbs scan"); + scan_dbs_policy = SCAN_DBS_POLICY_FORCE_NONDBS; + goto end; + } + if ((req->scan_req.scan_policy_low_power) || + (req->scan_req.scan_policy_low_span)) { + scm_debug("low power/span scan received, going for dbs scan"); + scan_dbs_policy = SCAN_DBS_POLICY_IGNORE_DUTY; + goto end; + } + +end: + req->scan_req.scan_ctrl_flags_ext |= + ((scan_dbs_policy << SCAN_FLAG_EXT_DBS_SCAN_POLICY_BIT) + & SCAN_FLAG_EXT_DBS_SCAN_POLICY_MASK); + scm_debug("scan_ctrl_flags_ext: 0x%x", + req->scan_req.scan_ctrl_flags_ext); +} + +/** + * ucfg_update_passive_dwell_time() - update dwell passive time + * @vdev: vdev object + * @req: scan request + * + * Return: None + */ +static void +ucfg_update_passive_dwell_time(struct wlan_objmgr_vdev *vdev, + struct scan_start_request *req) +{ + struct wlan_objmgr_psoc *psoc; + + psoc = wlan_vdev_get_psoc(vdev); + if (!psoc) + return; + + if (policy_mgr_is_sta_connected_2g(psoc) && + !policy_mgr_is_hw_dbs_capable(psoc) && + ucfg_scan_get_bt_activity(psoc)) + req->scan_req.dwell_time_passive = + PASSIVE_DWELL_TIME_BT_A2DP_ENABLED; +} + +static const struct probe_time_dwell_time + scan_probe_time_dwell_time_map[SCAN_DWELL_TIME_PROBE_TIME_MAP_SIZE] = { + {28, 11}, /* 0 SSID */ + {28, 20}, /* 1 SSID */ + {28, 20}, /* 2 SSID */ + {28, 20}, /* 3 SSID */ + {28, 20}, /* 4 SSID */ + {28, 20}, /* 5 SSID */ + {28, 20}, /* 6 SSID */ + {28, 11}, /* 7 SSID */ + {28, 11}, /* 8 SSID */ + {28, 11}, /* 9 SSID */ + {28, 8} /* 10 SSID */ +}; + +/** + * ucfg_scan_get_burst_duration() - get burst duration depending on max chan + * and miracast. + * @max_ch_time: max channel time + * @miracast_enabled: if miracast is enabled + * + * Return: burst_duration + */ +static inline +int ucfg_scan_get_burst_duration(int max_ch_time, + bool miracast_enabled) +{ + int burst_duration = 0; + + if (miracast_enabled) { + /* + * When miracast is running, burst + * duration needs to be minimum to avoid + * any stutter or glitch in miracast + * during station scan + */ + if (max_ch_time <= SCAN_GO_MIN_ACTIVE_SCAN_BURST_DURATION) + burst_duration = max_ch_time; + else + burst_duration = SCAN_GO_MIN_ACTIVE_SCAN_BURST_DURATION; + } else { + /* + * If miracast is not running, accommodate max + * stations to make the scans faster + */ + burst_duration = SCAN_GO_BURST_SCAN_MAX_NUM_OFFCHANNELS * + max_ch_time; + + if (burst_duration > SCAN_GO_MAX_ACTIVE_SCAN_BURST_DURATION) { + uint8_t channels = SCAN_P2P_SCAN_MAX_BURST_DURATION / + max_ch_time; + + if (channels) + burst_duration = channels * max_ch_time; + else + burst_duration = + SCAN_GO_MAX_ACTIVE_SCAN_BURST_DURATION; + } + } + return burst_duration; +} + +/** + * ucfg_scan_req_update_params() - update scan req params depending on + * concurrent mode present. + * @vdev: vdev object pointer + * @req: scan request + * @scan_obj: scan object + * + * Return: void + */ +static void ucfg_scan_req_update_concurrency_params( + struct wlan_objmgr_vdev *vdev, struct scan_start_request *req, + struct wlan_scan_obj *scan_obj) +{ + bool ap_present, go_present, sta_active, p2p_cli_present, ndi_present; + struct wlan_objmgr_psoc *psoc; + uint16_t sap_peer_count = 0; + uint16_t go_peer_count = 0; + struct wlan_objmgr_pdev *pdev; + + pdev = wlan_vdev_get_pdev(vdev); + psoc = wlan_vdev_get_psoc(vdev); + + if (!psoc || !pdev) + return; + ap_present = policy_mgr_mode_specific_connection_count( + psoc, PM_SAP_MODE, NULL); + go_present = policy_mgr_mode_specific_connection_count( + psoc, PM_P2P_GO_MODE, NULL); + p2p_cli_present = policy_mgr_mode_specific_connection_count( + psoc, PM_P2P_CLIENT_MODE, NULL); + sta_active = policy_mgr_mode_specific_connection_count( + psoc, PM_STA_MODE, NULL); + ndi_present = policy_mgr_mode_specific_connection_count( + psoc, PM_NDI_MODE, NULL); + if (ap_present) + sap_peer_count = + wlan_util_get_peer_count_for_mode(pdev, QDF_SAP_MODE); + if (go_present) + go_peer_count = + wlan_util_get_peer_count_for_mode(pdev, QDF_P2P_GO_MODE); + if (policy_mgr_get_connection_count(psoc)) { + if (req->scan_req.scan_f_passive) + req->scan_req.dwell_time_passive = + scan_obj->scan_def.conc_passive_dwell; + else + req->scan_req.dwell_time_active = + scan_obj->scan_def.conc_active_dwell; + req->scan_req.max_rest_time = + scan_obj->scan_def.conc_max_rest_time; + req->scan_req.min_rest_time = + scan_obj->scan_def.conc_min_rest_time; + req->scan_req.idle_time = scan_obj->scan_def.conc_idle_time; + } + + if (!wlan_vdev_is_up(req->vdev)) + req->scan_req.adaptive_dwell_time_mode = + scan_obj->scan_def.adaptive_dwell_time_mode_nc; + /* + * If AP/GO is active and has clients connectedset min rest time same + * as max rest time, so that firmware spends more time on home channel + * which will increase the probability of sending beacon at TBTT + */ + if ((ap_present && sap_peer_count) || + (go_present && go_peer_count)) { + req->scan_req.dwell_time_active_2g = 0; + req->scan_req.min_rest_time = req->scan_req.max_rest_time; + } + + /* + * If scan req for SAP (ACS Sacn) use dwell_time_active_def as dwell + * time for 2g channels instead of dwell_time_active_2g + */ + if (vdev->vdev_mlme.vdev_opmode == QDF_SAP_MODE) { + req->scan_req.dwell_time_active_2g = 0; + } + + if (req->scan_req.p2p_scan_type == SCAN_NON_P2P_DEFAULT) { + /* + * Decide burst_duration and dwell_time_active based on + * what type of devices are active. + */ + do { + if (ap_present && go_present && sta_active) { + if (req->scan_req.dwell_time_active <= + SCAN_3PORT_CONC_SCAN_MAX_BURST_DURATION) + req->scan_req.burst_duration = + req->scan_req.dwell_time_active; + else + req->scan_req.burst_duration = + SCAN_3PORT_CONC_SCAN_MAX_BURST_DURATION; + + break; + } + + if (scan_obj->miracast_enabled && + policy_mgr_is_mcc_in_24G(psoc)) + req->scan_req.max_rest_time = + scan_obj->scan_def.sta_miracast_mcc_rest_time; + + if (go_present) { + /* + * Background scan while GO is sending beacons. + * Every off-channel transition has overhead of + * 2 beacon intervals for NOA. Maximize number + * of channels in every transition by using + * burst scan. + */ + if (scan_obj->scan_def.go_scan_burst_duration) + req->scan_req.burst_duration = + scan_obj-> + scan_def.go_scan_burst_duration; + else + req->scan_req.burst_duration = + ucfg_scan_get_burst_duration( + req->scan_req. + dwell_time_active, + scan_obj-> + miracast_enabled); + break; + } + if ((sta_active || p2p_cli_present)) { + if (scan_obj->scan_def.sta_scan_burst_duration) + req->scan_req.burst_duration = + scan_obj->scan_def. + sta_scan_burst_duration; + break; + } + + if (ndi_present) { + req->scan_req.burst_duration = + ucfg_scan_get_burst_duration( + req->scan_req.dwell_time_active, + scan_obj->miracast_enabled); + break; + } + } while (0); + + if (ap_present) { + uint8_t ssid_num; + ssid_num = req->scan_req.num_ssids * + req->scan_req.num_bssid; + req->scan_req.repeat_probe_time = + scan_probe_time_dwell_time_map[ + QDF_MIN(ssid_num, + SCAN_DWELL_TIME_PROBE_TIME_MAP_SIZE + - 1)].probe_time; + req->scan_req.n_probes = + (req->scan_req.repeat_probe_time > 0) ? + req->scan_req.dwell_time_active / + req->scan_req.repeat_probe_time : 0; + } + } + + if (ap_present) { + uint8_t ap_chan; + struct wlan_objmgr_pdev *pdev = wlan_vdev_get_pdev(vdev); + + ap_chan = policy_mgr_get_channel(psoc, PM_SAP_MODE, NULL); + /* + * P2P/STA scan while SoftAP is sending beacons. + * Max duration of CTS2self is 32 ms, which limits the + * dwell time. If DBS is supported and if SAP is on 2G channel + * then keep passive dwell time default. + */ + if (sap_peer_count) { + req->scan_req.dwell_time_active = + QDF_MIN(req->scan_req.dwell_time_active, + (SCAN_CTS_DURATION_MS_MAX - + SCAN_ROAM_SCAN_CHANNEL_SWITCH_TIME)); + if (!policy_mgr_is_hw_dbs_capable(psoc) || + (policy_mgr_is_hw_dbs_capable(psoc) && + WLAN_CHAN_IS_5GHZ(ap_chan))) { + req->scan_req.dwell_time_passive = + req->scan_req.dwell_time_active; + } + } + if (scan_obj->scan_def.ap_scan_burst_duration) { + req->scan_req.burst_duration = + scan_obj->scan_def.ap_scan_burst_duration; + } else { + req->scan_req.burst_duration = 0; + if (utils_is_dfs_ch(pdev, ap_chan)) + req->scan_req.burst_duration = + SCAN_BURST_SCAN_MAX_NUM_OFFCHANNELS * + req->scan_req.dwell_time_active; + } + } +} + +#else +static inline void ucfg_scan_req_update_concurrency_params( + struct wlan_objmgr_vdev *vdev, struct scan_start_request *req, + struct wlan_scan_obj *scan_obj) +{ +} +static inline void +ucfg_update_passive_dwell_time(struct wlan_objmgr_vdev *vdev, + struct scan_start_request *req) {} +static inline void +ucfg_scan_update_dbs_scan_ctrl_ext_flag( + struct scan_start_request *req) {} +#endif + +QDF_STATUS +ucfg_scan_set_custom_scan_chan_list(struct wlan_objmgr_pdev *pdev, + struct chan_list *chan_list) +{ + uint8_t pdev_id; + struct wlan_scan_obj *scan_obj; + + if (!pdev || !chan_list) { + scm_warn("pdev: 0x%pK, chan_list: 0x%pK", pdev, chan_list); + return QDF_STATUS_E_NULL_VALUE; + } + pdev_id = wlan_objmgr_pdev_get_pdev_id(pdev); + scan_obj = wlan_pdev_get_scan_obj(pdev); + + qdf_mem_copy(&scan_obj->pdev_info[pdev_id].custom_chan_list, + chan_list, sizeof(*chan_list)); + + return QDF_STATUS_SUCCESS; +} + +/** + * ucfg_update_channel_list() - update scan req params depending on dfs inis + * and initial scan request. + * @req: scan request + * @scan_obj: scan object + * + * Return: void + */ +static void +ucfg_update_channel_list(struct scan_start_request *req, + struct wlan_scan_obj *scan_obj) +{ + uint8_t i; + uint8_t num_scan_channels = 0; + struct scan_vdev_obj *scan_vdev_obj; + struct wlan_objmgr_pdev *pdev; + bool first_scan_done = true; + bool p2p_search = false; + + pdev = wlan_vdev_get_pdev(req->vdev); + + scan_vdev_obj = wlan_get_vdev_scan_obj(req->vdev); + if (!scan_vdev_obj) { + scm_err("null scan_vdev_obj"); + return; + } + + if (!scan_vdev_obj->first_scan_done) { + first_scan_done = false; + scan_vdev_obj->first_scan_done = true; + } + + if (req->scan_req.p2p_scan_type == SCAN_P2P_SEARCH) + p2p_search = true; + /* + * No need to update channels if req is single channel* ie ROC, + * Preauth or a single channel scan etc. + */ + if (req->scan_req.chan_list.num_chan == 1) + return; + + /* do this only for STA and P2P-CLI mode */ + if ((!(wlan_vdev_mlme_get_opmode(req->vdev) == QDF_STA_MODE) && + !(wlan_vdev_mlme_get_opmode(req->vdev) == QDF_P2P_CLIENT_MODE)) && + !p2p_search) + return; + + if ((scan_obj->scan_def.allow_dfs_chan_in_scan && + (scan_obj->scan_def.allow_dfs_chan_in_first_scan || + first_scan_done)) && + !(scan_obj->scan_def.skip_dfs_chan_in_p2p_search && p2p_search)) + return; + + for (i = 0; i < req->scan_req.chan_list.num_chan; i++) { + if (wlan_reg_is_dfs_ch(pdev, wlan_reg_freq_to_chan(pdev, + req->scan_req.chan_list. + chan[i].freq))) + continue; + req->scan_req.chan_list.chan[num_scan_channels++] = + req->scan_req.chan_list.chan[i]; + } + req->scan_req.chan_list.num_chan = num_scan_channels; +} + +/** + * ucfg_scan_req_update_params() - update scan req params depending on modes + * and scan type. + * @vdev: vdev object pointer + * @req: scan request + * @scan_obj: scan object + * + * Return: void + */ +static void +ucfg_scan_req_update_params(struct wlan_objmgr_vdev *vdev, + struct scan_start_request *req, struct wlan_scan_obj *scan_obj) +{ + struct chan_list *custom_chan_list; + struct wlan_objmgr_pdev *pdev; + uint8_t pdev_id; + + /* Ensure correct number of probes are sent on active channel */ + if (!req->scan_req.repeat_probe_time) + req->scan_req.repeat_probe_time = + req->scan_req.dwell_time_active / SCAN_NPROBES_DEFAULT; + + if (req->scan_req.scan_f_passive) + req->scan_req.scan_ctrl_flags_ext |= + SCAN_FLAG_EXT_FILTER_PUBLIC_ACTION_FRAME; + + if (!req->scan_req.n_probes) + req->scan_req.n_probes = (req->scan_req.repeat_probe_time > 0) ? + req->scan_req.dwell_time_active / + req->scan_req.repeat_probe_time : 0; + + if (req->scan_req.p2p_scan_type == SCAN_NON_P2P_DEFAULT) { + req->scan_req.scan_f_cck_rates = true; + if (!req->scan_req.num_ssids) + req->scan_req.scan_f_bcast_probe = true; + req->scan_req.scan_f_add_ds_ie_in_probe = true; + req->scan_req.scan_f_filter_prb_req = true; + req->scan_req.scan_f_add_tpc_ie_in_probe = true; + } else { + req->scan_req.adaptive_dwell_time_mode = SCAN_DWELL_MODE_STATIC; + req->scan_req.dwell_time_active_2g = 0; + if (req->scan_req.p2p_scan_type == SCAN_P2P_LISTEN) { + req->scan_req.repeat_probe_time = 0; + } else { + req->scan_req.scan_f_filter_prb_req = true; + if (!req->scan_req.num_ssids) + req->scan_req.scan_f_bcast_probe = true; + + req->scan_req.dwell_time_active += + P2P_SEARCH_DWELL_TIME_INC; + /* + * 3 channels with default max dwell time 40 ms. + * Cap limit will be set by + * P2P_SCAN_MAX_BURST_DURATION. Burst duration + * should be such that no channel is scanned less + * than the dwell time in normal scenarios. + */ + if (req->scan_req.chan_list.num_chan == + WLAN_P2P_SOCIAL_CHANNELS && + !scan_obj->miracast_enabled) + req->scan_req.repeat_probe_time = + req->scan_req.dwell_time_active / 5; + else + req->scan_req.repeat_probe_time = + req->scan_req.dwell_time_active / 3; + + if (scan_obj->scan_def.p2p_scan_burst_duration) { + req->scan_req.burst_duration = + scan_obj->scan_def. + p2p_scan_burst_duration; + } else { + req->scan_req.burst_duration = + BURST_SCAN_MAX_NUM_OFFCHANNELS * + req->scan_req.dwell_time_active; + if (req->scan_req.burst_duration > + P2P_SCAN_MAX_BURST_DURATION) { + uint8_t channels = + P2P_SCAN_MAX_BURST_DURATION / + req->scan_req.dwell_time_active; + if (channels) + req->scan_req.burst_duration = + channels * + req->scan_req.dwell_time_active; + else + req->scan_req.burst_duration = + P2P_SCAN_MAX_BURST_DURATION; + } + } + + req->scan_req.scan_ev_bss_chan = false; + } + } + + if (!req->scan_req.scan_f_passive) + ucfg_update_passive_dwell_time(vdev, req); + ucfg_scan_update_dbs_scan_ctrl_ext_flag(req); + + /* + * No need to update conncurrency parmas if req is passive scan on + * single channel ie ROC, Preauth etc + */ + if (!(req->scan_req.scan_f_passive && + req->scan_req.chan_list.num_chan == 1)) + ucfg_scan_req_update_concurrency_params(vdev, req, scan_obj); + + /* Set wide band flag if enabled. This will cause + * phymode TLV being sent to FW. + */ + pdev = wlan_vdev_get_pdev(vdev); + pdev_id = wlan_objmgr_pdev_get_pdev_id(pdev); + if (ucfg_scan_get_wide_band_scan(pdev)) + req->scan_req.scan_f_wide_band = true; + else + req->scan_req.scan_f_wide_band = false; + + /* Overwrite scan channles with custom scan channel + * list if configured. + */ + custom_chan_list = &scan_obj->pdev_info[pdev_id].custom_chan_list; + if (custom_chan_list->num_chan) + qdf_mem_copy(&req->scan_req.chan_list, custom_chan_list, + sizeof(struct chan_list)); + else if (!req->scan_req.chan_list.num_chan) + ucfg_scan_init_chanlist_params(req, 0, NULL, NULL); + + ucfg_update_channel_list(req, scan_obj); + scm_debug("dwell time: active %d, passive %d, repeat_probe_time %d " + "n_probes %d flags_ext %x, wide_bw_scan: %d", + req->scan_req.dwell_time_active, + req->scan_req.dwell_time_passive, + req->scan_req.repeat_probe_time, req->scan_req.n_probes, + req->scan_req.scan_ctrl_flags_ext, + req->scan_req.scan_f_wide_band); +} + +QDF_STATUS +ucfg_scan_start(struct scan_start_request *req) +{ + struct scheduler_msg msg = {0}; + QDF_STATUS status; + struct wlan_scan_obj *scan_obj; + struct wlan_objmgr_pdev *pdev; + uint8_t idx; + + if (!req || !req->vdev) { + scm_err("req or vdev within req is NULL"); + if (req) + scm_scan_free_scan_request_mem(req); + return QDF_STATUS_E_NULL_VALUE; + } + + pdev = wlan_vdev_get_pdev(req->vdev); + if (!pdev) { + scm_err("Failed to get pdev object"); + scm_scan_free_scan_request_mem(req); + return QDF_STATUS_E_NULL_VALUE; + } + + scan_obj = wlan_pdev_get_scan_obj(pdev); + if (!scan_obj) { + scm_err("Failed to get scan object"); + scm_scan_free_scan_request_mem(req); + return QDF_STATUS_E_NULL_VALUE; + } + + if (!scan_obj->enable_scan) { + scm_err("scan disabled, rejecting the scan req"); + scm_scan_free_scan_request_mem(req); + return QDF_STATUS_E_AGAIN; + } + + scm_debug("reqid: %d, scanid: %d, vdevid: %d", + req->scan_req.scan_req_id, req->scan_req.scan_id, + req->scan_req.vdev_id); + + ucfg_scan_req_update_params(req->vdev, req, scan_obj); + + if (!req->scan_req.chan_list.num_chan) { + scm_err("0 channel to scan, reject scan"); + scm_scan_free_scan_request_mem(req); + return QDF_STATUS_E_NULL_VALUE; + } + + /* Try to get vdev reference. Return if reference could + * not be taken. Reference will be released once scan + * request handling completes along with free of @req. + */ + status = wlan_objmgr_vdev_try_get_ref(req->vdev, WLAN_SCAN_ID); + if (QDF_IS_STATUS_ERROR(status)) { + scm_info("unable to get reference"); + scm_scan_free_scan_request_mem(req); + return status; + } + + scm_info("request to scan %d channels", + req->scan_req.chan_list.num_chan); + for (idx = 0; idx < req->scan_req.chan_list.num_chan; idx++) + scm_debug("chan[%d]: freq:%d, phymode:%d", idx, + req->scan_req.chan_list.chan[idx].freq, + req->scan_req.chan_list.chan[idx].phymode); + + msg.bodyptr = req; + msg.callback = scm_scan_start_req; + msg.flush_callback = scm_scan_start_flush_callback; + + status = scheduler_post_message(QDF_MODULE_ID_OS_IF, + QDF_MODULE_ID_SCAN, + QDF_MODULE_ID_OS_IF, &msg); + if (QDF_IS_STATUS_ERROR(status)) { + wlan_objmgr_vdev_release_ref(req->vdev, WLAN_SCAN_ID); + scm_err("failed to post to QDF_MODULE_ID_OS_IF"); + scm_scan_free_scan_request_mem(req); + } + + return status; +} + +QDF_STATUS ucfg_scan_set_enable(struct wlan_objmgr_psoc *psoc, bool enable) +{ + struct wlan_scan_obj *scan_obj; + + scan_obj = wlan_psoc_get_scan_obj(psoc); + if (!scan_obj) { + scm_err("Failed to get scan object"); + return QDF_STATUS_E_NULL_VALUE; + } + scan_obj->enable_scan = enable; + scm_debug("set enable_scan to %d", scan_obj->enable_scan); + + return QDF_STATUS_SUCCESS; +} + +bool ucfg_scan_get_enable(struct wlan_objmgr_psoc *psoc) +{ + struct wlan_scan_obj *scan_obj; + + scan_obj = wlan_psoc_get_scan_obj(psoc); + if (!scan_obj) { + scm_err("Failed to get scan object"); + return false; + } + return scan_obj->enable_scan; +} + +QDF_STATUS ucfg_scan_set_miracast( + struct wlan_objmgr_psoc *psoc, bool enable) +{ + struct wlan_scan_obj *scan_obj; + + scan_obj = wlan_psoc_get_scan_obj(psoc); + if (!scan_obj) { + scm_err("Failed to get scan object"); + return QDF_STATUS_E_NULL_VALUE; + } + scan_obj->miracast_enabled = enable; + scm_debug("set miracast_enable to %d", scan_obj->miracast_enabled); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS +ucfg_scan_set_wide_band_scan(struct wlan_objmgr_pdev *pdev, bool enable) +{ + uint8_t pdev_id; + struct wlan_scan_obj *scan_obj; + + if (!pdev) { + scm_warn("null vdev"); + return QDF_STATUS_E_NULL_VALUE; + } + pdev_id = wlan_objmgr_pdev_get_pdev_id(pdev); + scan_obj = wlan_pdev_get_scan_obj(pdev); + if (!scan_obj) + return QDF_STATUS_E_FAILURE; + + scm_debug("set wide_band_scan to %d", enable); + scan_obj->pdev_info[pdev_id].wide_band_scan = enable; + + return QDF_STATUS_SUCCESS; +} + +bool ucfg_scan_get_wide_band_scan(struct wlan_objmgr_pdev *pdev) +{ + uint8_t pdev_id; + struct wlan_scan_obj *scan_obj; + + if (!pdev) { + scm_warn("null vdev"); + return QDF_STATUS_E_NULL_VALUE; + } + pdev_id = wlan_objmgr_pdev_get_pdev_id(pdev); + scan_obj = wlan_pdev_get_scan_obj(pdev); + if (!scan_obj) + return QDF_STATUS_E_FAILURE; + + return scan_obj->pdev_info[pdev_id].wide_band_scan; +} + +QDF_STATUS +ucfg_scan_cancel(struct scan_cancel_request *req) +{ + struct scheduler_msg msg = {0}; + QDF_STATUS status; + + if (!req || !req->vdev) { + scm_err("req or vdev within req is NULL"); + if (req) + qdf_mem_free(req); + return QDF_STATUS_E_NULL_VALUE; + } + scm_debug("reqid: %d, scanid: %d, vdevid: %d, type: %d", + req->cancel_req.requester, req->cancel_req.scan_id, + req->cancel_req.vdev_id, req->cancel_req.req_type); + + status = wlan_objmgr_vdev_try_get_ref(req->vdev, WLAN_SCAN_ID); + if (QDF_IS_STATUS_ERROR(status)) { + scm_info("Failed to get vdev ref; status:%d", status); + goto req_free; + } + + msg.bodyptr = req; + msg.callback = scm_scan_cancel_req; + msg.flush_callback = scm_scan_cancel_flush_callback; + + status = scheduler_post_message(QDF_MODULE_ID_OS_IF, + QDF_MODULE_ID_SCAN, + QDF_MODULE_ID_OS_IF, &msg); + if (QDF_IS_STATUS_ERROR(status)) { + scm_err("failed to post to QDF_MODULE_ID_OS_IF"); + goto vdev_put; + } + + return QDF_STATUS_SUCCESS; + +vdev_put: + wlan_objmgr_vdev_release_ref(req->vdev, WLAN_SCAN_ID); + +req_free: + qdf_mem_free(req); + + return status; +} + +QDF_STATUS +ucfg_scan_cancel_sync(struct scan_cancel_request *req) +{ + QDF_STATUS status; + bool cancel_vdev = false, cancel_pdev = false; + struct wlan_objmgr_vdev *vdev; + struct wlan_objmgr_pdev *pdev; + uint32_t max_wait_iterations = SCM_CANCEL_SCAN_WAIT_ITERATION; + qdf_event_t cancel_scan_event; + + if (!req || !req->vdev) { + scm_err("req or vdev within req is NULL"); + if (req) + qdf_mem_free(req); + return QDF_STATUS_E_NULL_VALUE; + } + + if (req->cancel_req.req_type == + WLAN_SCAN_CANCEL_PDEV_ALL) + cancel_pdev = true; + else if (req->cancel_req.req_type == + WLAN_SCAN_CANCEL_VDEV_ALL) + cancel_vdev = true; + + vdev = req->vdev; + status = ucfg_scan_cancel(req); + if (QDF_IS_STATUS_ERROR(status)) { + scm_err("failed to post to QDF_MODULE_ID_OS_IF"); + return status; + } + + memset(&cancel_scan_event, 0, sizeof(cancel_scan_event)); + /* + * If cancel req is to cancel all scan of pdev or vdev + * wait until all scan of pdev or vdev get cancelled + */ + qdf_event_create(&cancel_scan_event); + qdf_event_reset(&cancel_scan_event); + + if (cancel_pdev) { + pdev = wlan_vdev_get_pdev(vdev); + while ((ucfg_scan_get_pdev_status(pdev) != + SCAN_NOT_IN_PROGRESS) && max_wait_iterations) { + scm_debug("wait for all pdev scan to get complete"); + qdf_wait_single_event(&cancel_scan_event, + qdf_system_msecs_to_ticks( + SCM_CANCEL_SCAN_WAIT_TIME)); + max_wait_iterations--; + } + } else if (cancel_vdev) { + while ((ucfg_scan_get_vdev_status(vdev) != + SCAN_NOT_IN_PROGRESS) && max_wait_iterations) { + scm_debug("wait for all vdev scan to get complete"); + qdf_wait_single_event(&cancel_scan_event, + qdf_system_msecs_to_ticks( + SCM_CANCEL_SCAN_WAIT_TIME)); + max_wait_iterations--; + } + } + + qdf_event_destroy(&cancel_scan_event); + + if (!max_wait_iterations) { + scm_err("Failed to wait for scans to get complete"); + return QDF_STATUS_E_TIMEOUT; + } + + return status; +} + +wlan_scan_requester +ucfg_scan_register_requester(struct wlan_objmgr_psoc *psoc, + uint8_t *name, scan_event_handler event_cb, void *arg) +{ + int i, j; + struct wlan_scan_obj *scan; + struct scan_requester_info *requesters; + wlan_scan_requester requester = {0}; + + if (!psoc) { + scm_err("null psoc"); + return 0; + } + scan = wlan_psoc_get_scan_obj(psoc); + requesters = scan->requesters; + qdf_spin_lock_bh(&scan->lock); + for (i = 0; i < WLAN_MAX_REQUESTORS; ++i) { + if (requesters[i].requester == 0) { + requesters[i].requester = + WLAN_SCAN_REQUESTER_ID_PREFIX | i; + j = 0; + while (name[j] && (j < (WLAN_MAX_MODULE_NAME - 1))) { + requesters[i].module[j] = name[j]; + ++j; + } + requesters[i].module[j] = 0; + requesters[i].ev_handler.func = event_cb; + requesters[i].ev_handler.arg = arg; + requester = requesters[i].requester; + break; + } + } + qdf_spin_unlock_bh(&scan->lock); + scm_debug("module: %s, event_cb: 0x%pK, arg: 0x%pK, reqid: %d", + name, event_cb, arg, requester); + + return requester; +} + +void +ucfg_scan_unregister_requester(struct wlan_objmgr_psoc *psoc, + wlan_scan_requester requester) +{ + int idx; + struct wlan_scan_obj *scan; + struct scan_requester_info *requesters; + + idx = requester & WLAN_SCAN_REQUESTER_ID_PREFIX; + if (idx != WLAN_SCAN_REQUESTER_ID_PREFIX) { + scm_err("prefix didn't match for requester id %d", requester); + return; + } + + idx = requester & WLAN_SCAN_REQUESTER_ID_MASK; + if (idx >= WLAN_MAX_REQUESTORS) { + scm_err("requester id %d greater than max value", requester); + return; + } + + if (!psoc) { + scm_err("null psoc"); + return; + } + scan = wlan_psoc_get_scan_obj(psoc); + requesters = scan->requesters; + scm_debug("reqid: %d", requester); + + qdf_spin_lock_bh(&scan->lock); + requesters[idx].requester = 0; + requesters[idx].module[0] = 0; + requesters[idx].ev_handler.func = NULL; + requesters[idx].ev_handler.arg = NULL; + qdf_spin_unlock_bh(&scan->lock); +} + +uint8_t* +ucfg_get_scan_requester_name(struct wlan_objmgr_psoc *psoc, + wlan_scan_requester requester) +{ + int idx = requester & WLAN_SCAN_REQUESTER_ID_MASK; + struct wlan_scan_obj *scan; + struct scan_requester_info *requesters; + + if (!psoc) { + scm_err("null psoc"); + return "null"; + } + scan = wlan_psoc_get_scan_obj(psoc); + requesters = scan->requesters; + + if ((idx < WLAN_MAX_REQUESTORS) && + (requesters[idx].requester == requester)) { + return requesters[idx].module; + } + + return (uint8_t *)"unknown"; +} + +wlan_scan_id +ucfg_scan_get_scan_id(struct wlan_objmgr_psoc *psoc) +{ + wlan_scan_id id; + struct wlan_scan_obj *scan; + + if (!psoc) { + QDF_ASSERT(0); + scm_err("null psoc"); + return 0; + } + scan = wlan_psoc_get_scan_obj(psoc); + + id = qdf_atomic_inc_return(&scan->scan_ids); + id = id & WLAN_SCAN_ID_MASK; + /* Mark this scan request as triggered by host + * by setting WLAN_HOST_SCAN_REQ_ID_PREFIX flag. + */ + id = id | WLAN_HOST_SCAN_REQ_ID_PREFIX; + scm_debug("scan_id: 0x%x", id); + + return id; +} + +static QDF_STATUS +scm_add_scan_event_handler(struct pdev_scan_ev_handler *pdev_ev_handler, + scan_event_handler event_cb, void *arg) +{ + struct cb_handler *cb_handler; + uint32_t handler_cnt = pdev_ev_handler->handler_cnt; + + /* Assign next available slot to this registration request */ + cb_handler = &(pdev_ev_handler->cb_handlers[handler_cnt]); + cb_handler->func = event_cb; + cb_handler->arg = arg; + pdev_ev_handler->handler_cnt++; + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS +ucfg_scan_register_event_handler(struct wlan_objmgr_pdev *pdev, + scan_event_handler event_cb, void *arg) +{ + uint32_t idx; + struct wlan_scan_obj *scan; + struct pdev_scan_ev_handler *pdev_ev_handler; + struct cb_handler *cb_handler; + + /* scan event handler call back can't be NULL */ + if (!pdev || !event_cb) { + scm_err("pdev: %pK, event_cb: %pK", pdev, event_cb); + return QDF_STATUS_E_NULL_VALUE; + } + + scm_debug("pdev: %pK, event_cb: %pK, arg: %pK\n", pdev, event_cb, arg); + + scan = wlan_pdev_get_scan_obj(pdev); + pdev_ev_handler = wlan_pdev_get_pdev_scan_ev_handlers(pdev); + if (!pdev_ev_handler) { + scm_err("null pdev_ev_handler"); + return QDF_STATUS_E_NULL_VALUE; + } + cb_handler = &(pdev_ev_handler->cb_handlers[0]); + + qdf_spin_lock_bh(&scan->lock); + /* Ensure its not a duplicate registration request */ + for (idx = 0; idx < MAX_SCAN_EVENT_HANDLERS_PER_PDEV; + idx++, cb_handler++) { + if ((cb_handler->func == event_cb) && + (cb_handler->arg == arg)) { + qdf_spin_unlock_bh(&scan->lock); + scm_debug("func: %pK, arg: %pK already exists", + event_cb, arg); + return QDF_STATUS_SUCCESS; + } + } + + QDF_ASSERT(pdev_ev_handler->handler_cnt < + MAX_SCAN_EVENT_HANDLERS_PER_PDEV); + + if (pdev_ev_handler->handler_cnt >= MAX_SCAN_EVENT_HANDLERS_PER_PDEV) { + qdf_spin_unlock_bh(&scan->lock); + scm_warn("No more registrations possible"); + return QDF_STATUS_E_NOMEM; + } + + scm_add_scan_event_handler(pdev_ev_handler, event_cb, arg); + qdf_spin_unlock_bh(&scan->lock); + + scm_debug("event_cb: 0x%pK, arg: 0x%pK", event_cb, arg); + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS +wlan_scan_global_init(struct wlan_scan_obj *scan_obj) +{ + scan_obj->enable_scan = true; + scan_obj->drop_bcn_on_chan_mismatch = true; + scan_obj->disable_timeout = false; + scan_obj->scan_def.active_dwell = SCAN_ACTIVE_DWELL_TIME; + scan_obj->scan_def.passive_dwell = SCAN_PASSIVE_DWELL_TIME; + /* the ini is disallow DFS channel scan if ini is 1, so negate that */ + scan_obj->scan_def.allow_dfs_chan_in_first_scan = true; + scan_obj->scan_def.allow_dfs_chan_in_scan = true; + scan_obj->scan_def.use_wake_lock_in_user_scan = false; + scan_obj->scan_def.max_rest_time = SCAN_MAX_REST_TIME; + scan_obj->scan_def.sta_miracast_mcc_rest_time = + SCAN_STA_MIRACAST_MCC_REST_TIME; + scan_obj->scan_def.min_rest_time = SCAN_MIN_REST_TIME; + scan_obj->scan_def.conc_active_dwell = SCAN_CONC_ACTIVE_DWELL_TIME; + scan_obj->scan_def.conc_passive_dwell = SCAN_CONC_PASSIVE_DWELL_TIME; + scan_obj->scan_def.conc_max_rest_time = SCAN_CONC_MAX_REST_TIME; + scan_obj->scan_def.conc_min_rest_time = SCAN_CONC_MIN_REST_TIME; + scan_obj->scan_def.honour_nl_scan_policy_flags = true; + scan_obj->scan_def.conc_idle_time = SCAN_CONC_IDLE_TIME; + scan_obj->scan_def.repeat_probe_time = SCAN_REPEAT_PROBE_TIME; + scan_obj->scan_def.probe_spacing_time = SCAN_PROBE_SPACING_TIME; + scan_obj->scan_def.probe_delay = SCAN_PROBE_DELAY; + scan_obj->scan_def.burst_duration = SCAN_BURST_DURATION; + scan_obj->scan_def.max_scan_time = SCAN_MAX_SCAN_TIME; + scan_obj->scan_def.num_probes = SCAN_NUM_PROBES; + scan_obj->scan_def.scan_cache_aging_time = SCAN_CACHE_AGING_TIME; + scan_obj->scan_def.max_bss_per_pdev = SCAN_MAX_BSS_PDEV; + scan_obj->scan_def.scan_priority = SCAN_PRIORITY; + scan_obj->scan_def.idle_time = SCAN_NETWORK_IDLE_TIMEOUT; + scan_obj->scan_def.adaptive_dwell_time_mode = SCAN_DWELL_MODE_DEFAULT; + scan_obj->scan_def.adaptive_dwell_time_mode_nc = + SCAN_DWELL_MODE_DEFAULT; + /* init burst durations */ + scan_obj->scan_def.sta_scan_burst_duration = 0; + scan_obj->scan_def.p2p_scan_burst_duration = 0; + scan_obj->scan_def.go_scan_burst_duration = 0; + scan_obj->scan_def.ap_scan_burst_duration = 0; + /* scan contrl flags */ + scan_obj->scan_def.scan_f_passive = true; + scan_obj->scan_def.scan_f_ofdm_rates = true; + scan_obj->scan_def.scan_f_2ghz = true; + scan_obj->scan_def.scan_f_5ghz = true; + scan_obj->scan_def.scan_f_chan_stat_evnt = SCAN_CHAN_STATS_EVENT_ENAB; + /* scan event flags */ + scan_obj->scan_def.scan_ev_started = true; + scan_obj->scan_def.scan_ev_completed = true; + scan_obj->scan_def.scan_ev_bss_chan = true; + scan_obj->scan_def.scan_ev_foreign_chan = true; + scan_obj->scan_def.scan_ev_foreign_chn_exit = true; + scan_obj->scan_def.scan_ev_dequeued = true; + scan_obj->scan_def.scan_ev_preempted = true; + scan_obj->scan_def.scan_ev_start_failed = true; + scan_obj->scan_def.scan_ev_restarted = true; + /* init scan id seed */ + qdf_atomic_init(&scan_obj->scan_ids); + + return wlan_pno_global_init(&scan_obj->pno_cfg); +} + +static QDF_STATUS +scm_remove_scan_event_handler(struct pdev_scan_ev_handler *pdev_ev_handler, + struct cb_handler *entry) +{ + struct cb_handler *last_entry; + uint32_t handler_cnt = pdev_ev_handler->handler_cnt; + + /* Replace event handler being deleted + * with the last one in the list. + */ + last_entry = &(pdev_ev_handler->cb_handlers[handler_cnt - 1]); + entry->func = last_entry->func; + entry->arg = last_entry->arg; + + /* Clear our last entry */ + last_entry->func = NULL; + last_entry->arg = NULL; + pdev_ev_handler->handler_cnt--; + + return QDF_STATUS_SUCCESS; +} + +void +ucfg_scan_unregister_event_handler(struct wlan_objmgr_pdev *pdev, + scan_event_handler event_cb, void *arg) +{ + uint8_t found = false; + uint32_t idx; + uint32_t handler_cnt; + struct wlan_scan_obj *scan; + struct cb_handler *cb_handler; + struct pdev_scan_ev_handler *pdev_ev_handler; + + scm_debug("pdev: %pK, event_cb: 0x%pK, arg: 0x%pK", pdev, event_cb, + arg); + if (!pdev) { + scm_err("null pdev"); + return; + } + scan = wlan_pdev_get_scan_obj(pdev); + pdev_ev_handler = wlan_pdev_get_pdev_scan_ev_handlers(pdev); + if (!pdev_ev_handler) + return; + + cb_handler = &(pdev_ev_handler->cb_handlers[0]); + + qdf_spin_lock_bh(&scan->lock); + handler_cnt = pdev_ev_handler->handler_cnt; + if (!handler_cnt) { + qdf_spin_unlock_bh(&scan->lock); + scm_info("No event handlers registered"); + return; + } + + for (idx = 0; idx < MAX_SCAN_EVENT_HANDLERS_PER_PDEV; + idx++, cb_handler++) { + if ((cb_handler->func == event_cb) && + (cb_handler->arg == arg)) { + /* Event handler found, remove it + * from event handler list. + */ + found = true; + scm_remove_scan_event_handler(pdev_ev_handler, + cb_handler); + handler_cnt--; + break; + } + } + qdf_spin_unlock_bh(&scan->lock); + + scm_debug("event handler %s, remaining handlers: %d", + (found ? "removed" : "not found"), handler_cnt); +} + +QDF_STATUS +ucfg_scan_init_default_params(struct wlan_objmgr_vdev *vdev, + struct scan_start_request *req) +{ + struct scan_default_params *def; + + if (!vdev | !req) { + scm_err("vdev: 0x%pK, req: 0x%pK", vdev, req); + return QDF_STATUS_E_INVAL; + } + def = wlan_vdev_get_def_scan_params(vdev); + if (!def) { + scm_err("wlan_vdev_get_def_scan_params returned NULL"); + return QDF_STATUS_E_NULL_VALUE; + } + + /* Zero out everything and explicitly set fields as required */ + qdf_mem_zero(req, sizeof(*req)); + + req->vdev = vdev; + req->scan_req.vdev_id = wlan_vdev_get_id(vdev); + req->scan_req.p2p_scan_type = SCAN_NON_P2P_DEFAULT; + req->scan_req.scan_priority = def->scan_priority; + req->scan_req.dwell_time_active = def->active_dwell; + req->scan_req.dwell_time_active_2g = def->active_dwell_2g; + req->scan_req.dwell_time_passive = def->passive_dwell; + req->scan_req.min_rest_time = def->min_rest_time; + req->scan_req.max_rest_time = def->max_rest_time; + req->scan_req.repeat_probe_time = def->repeat_probe_time; + req->scan_req.probe_spacing_time = def->probe_spacing_time; + req->scan_req.idle_time = def->idle_time; + req->scan_req.max_scan_time = def->max_scan_time; + req->scan_req.probe_delay = def->probe_delay; + req->scan_req.burst_duration = def->burst_duration; + req->scan_req.n_probes = def->num_probes; + req->scan_req.adaptive_dwell_time_mode = + def->adaptive_dwell_time_mode; + req->scan_req.scan_flags = def->scan_flags; + req->scan_req.scan_events = def->scan_events; + req->scan_req.scan_random.randomize = def->enable_mac_spoofing; + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS +ucfg_scan_init_ssid_params(struct scan_start_request *req, + uint32_t num_ssid, struct wlan_ssid *ssid_list) +{ + uint32_t max_ssid = sizeof(req->scan_req.ssid) / + sizeof(req->scan_req.ssid[0]); + + if (!req) { + scm_err("null request"); + return QDF_STATUS_E_NULL_VALUE; + } + if (!num_ssid) { + /* empty channel list provided */ + req->scan_req.num_ssids = 0; + qdf_mem_zero(&req->scan_req.ssid[0], + sizeof(req->scan_req.ssid)); + return QDF_STATUS_SUCCESS; + } + if (!ssid_list) { + scm_err("null ssid_list while num_ssid: %d", num_ssid); + return QDF_STATUS_E_NULL_VALUE; + } + if (num_ssid > max_ssid) { + /* got a big list. alert and continue */ + scm_warn("overflow: received %d, max supported : %d", + num_ssid, max_ssid); + return QDF_STATUS_E_E2BIG; + } + + if (max_ssid > num_ssid) + max_ssid = num_ssid; + + req->scan_req.num_ssids = max_ssid; + qdf_mem_copy(&req->scan_req.ssid[0], ssid_list, + (req->scan_req.num_ssids * sizeof(req->scan_req.ssid[0]))); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS +ucfg_scan_init_bssid_params(struct scan_start_request *req, + uint32_t num_bssid, struct qdf_mac_addr *bssid_list) +{ + uint32_t max_bssid = sizeof(req->scan_req.bssid_list) / + sizeof(req->scan_req.bssid_list[0]); + + if (!req) { + scm_err("null request"); + return QDF_STATUS_E_NULL_VALUE; + } + if (!num_bssid) { + /* empty channel list provided */ + req->scan_req.num_bssid = 0; + qdf_mem_zero(&req->scan_req.bssid_list[0], + sizeof(req->scan_req.bssid_list)); + return QDF_STATUS_SUCCESS; + } + if (!bssid_list) { + scm_err("null bssid_list while num_bssid: %d", num_bssid); + return QDF_STATUS_E_NULL_VALUE; + } + if (num_bssid > max_bssid) { + /* got a big list. alert and continue */ + scm_warn("overflow: received %d, max supported : %d", + num_bssid, max_bssid); + return QDF_STATUS_E_E2BIG; + } + + if (max_bssid > num_bssid) + max_bssid = num_bssid; + + req->scan_req.num_bssid = max_bssid; + qdf_mem_copy(&req->scan_req.bssid_list[0], bssid_list, + req->scan_req.num_bssid * sizeof(req->scan_req.bssid_list[0])); + + return QDF_STATUS_SUCCESS; +} + +/** + * is_chan_enabled_for_scan() - helper API to check if a frequency + * is allowed to scan. + * @reg_chan: regulatory_channel object + * @low_2g: lower 2.4 GHz frequency thresold + * @high_2g: upper 2.4 GHz frequency thresold + * @low_5g: lower 5 GHz frequency thresold + * @high_5g: upper 5 GHz frequency thresold + * + * Return: true if scan is allowed. false otherwise. + */ +static bool +is_chan_enabled_for_scan(struct regulatory_channel *reg_chan, + uint32_t low_2g, uint32_t high_2g, uint32_t low_5g, + uint32_t high_5g) +{ + if (reg_chan->state == CHANNEL_STATE_DISABLE) + return false; + if (reg_chan->nol_chan) + return false; + /* 2 GHz channel */ + if ((util_scan_scm_chan_to_band(reg_chan->chan_num) == + WLAN_BAND_2_4_GHZ) && + ((reg_chan->center_freq < low_2g) || + (reg_chan->center_freq > high_2g))) + return false; + else if ((util_scan_scm_chan_to_band(reg_chan->chan_num) == + WLAN_BAND_5_GHZ) && + ((reg_chan->center_freq < low_5g) || + (reg_chan->center_freq > high_5g))) + return false; + + return true; +} + +QDF_STATUS +ucfg_scan_init_chanlist_params(struct scan_start_request *req, + uint32_t num_chans, uint32_t *chan_list, uint32_t *phymode) +{ + uint32_t idx; + QDF_STATUS status; + struct regulatory_channel *reg_chan_list = NULL; + uint32_t low_2g, high_2g, low_5g, high_5g; + struct wlan_objmgr_pdev *pdev = NULL; + uint32_t *scan_freqs = NULL; + uint32_t max_chans = sizeof(req->scan_req.chan_list.chan) / + sizeof(req->scan_req.chan_list.chan[0]); + if (!req) { + scm_err("null request"); + return QDF_STATUS_E_NULL_VALUE; + } + + if (req->vdev) + pdev = wlan_vdev_get_pdev(req->vdev); + /* + * If 0 channels are provided for scan and + * wide band scan is enabled, scan all 20 mhz + * available channels. This is required as FW + * scans all channel/phy mode combinations + * provided in scan channel list if 0 chans are + * provided in scan request causing scan to take + * too much time to complete. + */ + if (pdev && !num_chans) { + reg_chan_list = qdf_mem_malloc_atomic(NUM_CHANNELS * + sizeof(struct regulatory_channel)); + if (!reg_chan_list) { + scm_err("Couldn't allocate reg_chan_list memory"); + status = QDF_STATUS_E_NOMEM; + goto end; + } + scan_freqs = + qdf_mem_malloc_atomic(sizeof(uint32_t) * max_chans); + if (!scan_freqs) { + scm_err("Couldn't allocate scan_freqs memory"); + status = QDF_STATUS_E_NOMEM; + goto end; + } + status = ucfg_reg_get_current_chan_list(pdev, reg_chan_list); + if (QDF_IS_STATUS_ERROR(status)) { + scm_err("Couldn't get current chan list"); + goto end; + } + status = wlan_reg_get_freq_range(pdev, &low_2g, + &high_2g, &low_5g, &high_5g); + if (QDF_IS_STATUS_ERROR(status)) { + scm_err("Couldn't get frequency range"); + goto end; + } + + for (idx = 0, num_chans = 0; + (idx < NUM_CHANNELS && num_chans < max_chans); idx++) + if (is_chan_enabled_for_scan(®_chan_list[idx], + low_2g, high_2g, low_5g, high_5g)) + scan_freqs[num_chans++] = + reg_chan_list[idx].center_freq; + + chan_list = scan_freqs; + } + + if (!num_chans) { + /* empty channel list provided */ + qdf_mem_zero(&req->scan_req.chan_list, + sizeof(req->scan_req.chan_list)); + req->scan_req.chan_list.num_chan = 0; + status = QDF_STATUS_SUCCESS; + goto end; + } + if (!chan_list) { + scm_err("null chan_list while num_chans: %d", num_chans); + status = QDF_STATUS_E_NULL_VALUE; + goto end; + } + + if (num_chans > max_chans) { + /* got a big list. alert and fail */ + scm_warn("overflow: received %d, max supported : %d", + num_chans, max_chans); + status = QDF_STATUS_E_E2BIG; + goto end; + } + + req->scan_req.chan_list.num_chan = num_chans; + for (idx = 0; idx < num_chans; idx++) { + req->scan_req.chan_list.chan[idx].freq = + (chan_list[idx] > WLAN_24_GHZ_BASE_FREQ) ? + chan_list[idx] : + wlan_reg_chan_to_freq(pdev, chan_list[idx]); + if (phymode) + req->scan_req.chan_list.chan[idx].phymode = + phymode[idx]; + else if (req->scan_req.chan_list.chan[idx].freq <= + WLAN_CHAN_15_FREQ) + req->scan_req.chan_list.chan[idx].phymode = + SCAN_PHY_MODE_11G; + else + req->scan_req.chan_list.chan[idx].phymode = + SCAN_PHY_MODE_11A; + + scm_debug("chan[%d]: freq:%d, phymode:%d", idx, + req->scan_req.chan_list.chan[idx].freq, + req->scan_req.chan_list.chan[idx].phymode); + } + +end: + if (scan_freqs) + qdf_mem_free(scan_freqs); + + if (reg_chan_list) + qdf_mem_free(reg_chan_list); + + return QDF_STATUS_SUCCESS; +} + +static inline enum scm_scan_status +get_scan_status_from_serialization_status( + enum wlan_serialization_cmd_status status) +{ + enum scm_scan_status scan_status; + + switch (status) { + case WLAN_SER_CMD_IN_PENDING_LIST: + scan_status = SCAN_IS_PENDING; + break; + case WLAN_SER_CMD_IN_ACTIVE_LIST: + scan_status = SCAN_IS_ACTIVE; + break; + case WLAN_SER_CMDS_IN_ALL_LISTS: + scan_status = SCAN_IS_ACTIVE_AND_PENDING; + break; + case WLAN_SER_CMD_NOT_FOUND: + scan_status = SCAN_NOT_IN_PROGRESS; + break; + default: + scm_warn("invalid serialization status %d", status); + QDF_ASSERT(0); + scan_status = SCAN_NOT_IN_PROGRESS; + break; + } + + return scan_status; +} + +enum scm_scan_status +ucfg_scan_get_vdev_status(struct wlan_objmgr_vdev *vdev) +{ + enum wlan_serialization_cmd_status status; + + if (!vdev) { + scm_err("null vdev"); + return SCAN_NOT_IN_PROGRESS; + } + status = wlan_serialization_vdev_scan_status(vdev); + + return get_scan_status_from_serialization_status(status); +} + +enum scm_scan_status +ucfg_scan_get_pdev_status(struct wlan_objmgr_pdev *pdev) +{ + enum wlan_serialization_cmd_status status; + + if (!pdev) { + scm_err("null pdev"); + return SCAN_NOT_IN_PROGRESS; + } + status = wlan_serialization_pdev_scan_status(pdev); + + return get_scan_status_from_serialization_status(status); +} + +static void +ucfg_scan_register_unregister_bcn_cb(struct wlan_objmgr_psoc *psoc, + bool enable) +{ + QDF_STATUS status; + struct mgmt_txrx_mgmt_frame_cb_info cb_info[2]; + + cb_info[0].frm_type = MGMT_PROBE_RESP; + cb_info[0].mgmt_rx_cb = tgt_scan_bcn_probe_rx_callback; + cb_info[1].frm_type = MGMT_BEACON; + cb_info[1].mgmt_rx_cb = tgt_scan_bcn_probe_rx_callback; + + if (enable) + status = wlan_mgmt_txrx_register_rx_cb(psoc, + WLAN_UMAC_COMP_SCAN, cb_info, 2); + else + status = wlan_mgmt_txrx_deregister_rx_cb(psoc, + WLAN_UMAC_COMP_SCAN, cb_info, 2); + if (status != QDF_STATUS_SUCCESS) + scm_err("%s the Handle with MGMT TXRX layer has failed", + enable ? "Registering" : "Deregistering"); +} + +static void ucfg_scan_assign_rssi_category(struct scan_default_params *params, + int32_t best_ap_rssi, uint32_t cat_offset) +{ + int i; + + scm_debug("best AP RSSI:%d, cat offset: %d", best_ap_rssi, cat_offset); + if (cat_offset) + for (i = 0; i < SCM_NUM_RSSI_CAT; i++) { + params->rssi_cat[SCM_NUM_RSSI_CAT - i - 1] = + (best_ap_rssi - + params->select_5ghz_margin - + (int)(i * cat_offset)); + params->bss_prefer_val[i] = i; + } +} + +QDF_STATUS ucfg_scan_update_user_config(struct wlan_objmgr_psoc *psoc, + struct scan_user_cfg *scan_cfg) +{ + struct wlan_scan_obj *scan_obj; + struct scan_default_params *scan_def; + + if (!psoc) { + scm_err("null psoc"); + return QDF_STATUS_E_FAILURE; + } + scan_obj = wlan_psoc_get_scan_obj(psoc); + if (scan_obj == NULL) { + scm_err("Failed to get scan object"); + return QDF_STATUS_E_FAILURE; + } + + scan_def = &scan_obj->scan_def; + scan_def->allow_dfs_chan_in_first_scan = + scan_cfg->allow_dfs_chan_in_first_scan; + scan_def->allow_dfs_chan_in_scan = scan_cfg->allow_dfs_chan_in_scan; + scan_def->use_wake_lock_in_user_scan = + scan_cfg->use_wake_lock_in_user_scan; + scan_def->active_dwell = scan_cfg->active_dwell; + scan_def->active_dwell_2g = scan_cfg->active_dwell_2g; + scan_def->passive_dwell = scan_cfg->passive_dwell; + scan_def->conc_active_dwell = scan_cfg->conc_active_dwell; + scan_def->conc_passive_dwell = scan_cfg->conc_passive_dwell; + scan_def->conc_max_rest_time = scan_cfg->conc_max_rest_time; + scan_def->conc_min_rest_time = scan_cfg->conc_min_rest_time; + scan_def->conc_idle_time = scan_cfg->conc_idle_time; + scan_def->scan_cache_aging_time = scan_cfg->scan_cache_aging_time; + scan_def->prefer_5ghz = scan_cfg->prefer_5ghz; + scan_def->select_5ghz_margin = scan_cfg->select_5ghz_margin; + scan_def->adaptive_dwell_time_mode = scan_cfg->scan_dwell_time_mode; + scan_def->adaptive_dwell_time_mode_nc = + scan_cfg->scan_dwell_time_mode_nc; + scan_def->honour_nl_scan_policy_flags = + scan_cfg->honour_nl_scan_policy_flags; + scan_def->scan_f_chan_stat_evnt = scan_cfg->is_snr_monitoring_enabled; + scan_obj->ie_whitelist = scan_cfg->ie_whitelist; + scan_def->repeat_probe_time = scan_cfg->usr_cfg_probe_rpt_time; + scan_def->num_probes = scan_cfg->usr_cfg_num_probes; + scan_def->is_bssid_hint_priority = scan_cfg->is_bssid_hint_priority; + scan_def->enable_mac_spoofing = scan_cfg->enable_mac_spoofing; + scan_def->sta_miracast_mcc_rest_time = + scan_cfg->sta_miracast_mcc_rest_time; + scan_def->sta_scan_burst_duration = scan_cfg->sta_scan_burst_duration; + scan_def->p2p_scan_burst_duration = scan_cfg->p2p_scan_burst_duration; + scan_def->go_scan_burst_duration = scan_cfg->go_scan_burst_duration; + scan_def->ap_scan_burst_duration = scan_cfg->ap_scan_burst_duration; + scan_def->skip_dfs_chan_in_p2p_search = + scan_cfg->skip_dfs_chan_in_p2p_search; + ucfg_scan_assign_rssi_category(scan_def, + scan_cfg->scan_bucket_threshold, + scan_cfg->rssi_cat_gap); + + ucfg_scan_update_pno_config(&scan_obj->pno_cfg, + &scan_cfg->pno_cfg); + + qdf_mem_copy(&scan_def->score_config, &scan_cfg->score_config, + sizeof(struct scoring_config)); + scm_validate_scoring_config(&scan_def->score_config); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS ucfg_scan_update_roam_params(struct wlan_objmgr_psoc *psoc, + struct roam_filter_params *roam_params) +{ + struct scan_default_params *scan_def; + + if (!psoc) { + scm_err("null psoc"); + return QDF_STATUS_E_FAILURE; + } + scan_def = wlan_scan_psoc_get_def_params(psoc); + if (!scan_def) { + scm_err("Failed to get scan object"); + return QDF_STATUS_E_FAILURE; + } + + qdf_mem_copy(&scan_def->roam_params, roam_params, + sizeof(struct roam_filter_params)); + + return QDF_STATUS_SUCCESS; +} + +#ifdef WLAN_POWER_MANAGEMENT_OFFLOAD +static QDF_STATUS +ucfg_scan_cancel_pdev_scan(struct wlan_objmgr_pdev *pdev) +{ + struct scan_cancel_request *req; + QDF_STATUS status; + struct wlan_objmgr_vdev *vdev; + + req = qdf_mem_malloc_atomic(sizeof(*req)); + if (!req) { + scm_err("Failed to allocate memory"); + return QDF_STATUS_E_NOMEM; + } + + vdev = wlan_objmgr_pdev_get_first_vdev(pdev, WLAN_SCAN_ID); + if (!vdev) { + scm_err("Failed to get vdev"); + qdf_mem_free(req); + return QDF_STATUS_E_INVAL; + } + req->vdev = vdev; + req->cancel_req.scan_id = INVAL_SCAN_ID; + req->cancel_req.pdev_id = wlan_objmgr_pdev_get_pdev_id(pdev); + req->cancel_req.vdev_id = INVAL_VDEV_ID; + req->cancel_req.req_type = WLAN_SCAN_CANCEL_PDEV_ALL; + status = ucfg_scan_cancel_sync(req); + if (QDF_IS_STATUS_ERROR(status)) + scm_err("Cancel scan request failed"); + wlan_objmgr_vdev_release_ref(vdev, WLAN_SCAN_ID); + + return status; +} + +static QDF_STATUS +ucfg_scan_suspend_handler(struct wlan_objmgr_psoc *psoc, void *arg) +{ + struct wlan_objmgr_pdev *pdev = NULL; + QDF_STATUS status = QDF_STATUS_SUCCESS; + int i; + + ucfg_scan_set_enable(psoc, false); + /* Check all pdev */ + for (i = 0; i < WLAN_UMAC_MAX_PDEVS; i++) { + pdev = wlan_objmgr_get_pdev_by_id(psoc, i, WLAN_SCAN_ID); + if (!pdev) + continue; + if (ucfg_scan_get_pdev_status(pdev) != + SCAN_NOT_IN_PROGRESS) + status = ucfg_scan_cancel_pdev_scan(pdev); + wlan_objmgr_pdev_release_ref(pdev, WLAN_SCAN_ID); + if (QDF_IS_STATUS_ERROR(status)) { + scm_err("failed to cancel scan for pdev_id %d", i); + return status; + } + } + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS +ucfg_scan_resume_handler(struct wlan_objmgr_psoc *psoc, void *arg) +{ + ucfg_scan_set_enable(psoc, true); + return QDF_STATUS_SUCCESS; +} + +static inline void +ucfg_scan_register_pmo_handler(void) +{ + pmo_register_suspend_handler(WLAN_UMAC_COMP_SCAN, + ucfg_scan_suspend_handler, NULL); + pmo_register_resume_handler(WLAN_UMAC_COMP_SCAN, + ucfg_scan_resume_handler, NULL); +} + +static inline void +ucfg_scan_unregister_pmo_handler(void) +{ + pmo_unregister_suspend_handler(WLAN_UMAC_COMP_SCAN, + ucfg_scan_suspend_handler); + pmo_unregister_resume_handler(WLAN_UMAC_COMP_SCAN, + ucfg_scan_resume_handler); +} + +#else +static inline void +ucfg_scan_register_pmo_handler(void) +{ +} + +static inline void +ucfg_scan_unregister_pmo_handler(void) +{ +} +#endif + +QDF_STATUS +ucfg_scan_psoc_open(struct wlan_objmgr_psoc *psoc) +{ + struct wlan_scan_obj *scan_obj; + + scm_debug("psoc open: 0x%pK", psoc); + if (!psoc) { + scm_err("null psoc"); + return QDF_STATUS_E_FAILURE; + } + scan_obj = wlan_psoc_get_scan_obj(psoc); + if (scan_obj == NULL) { + scm_err("Failed to get scan object"); + return QDF_STATUS_E_FAILURE; + } + /* Initialize the scan Globals */ + wlan_scan_global_init(scan_obj); + qdf_spinlock_create(&scan_obj->lock); + ucfg_scan_register_pmo_handler(); + scm_db_init(psoc); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS +ucfg_scan_psoc_close(struct wlan_objmgr_psoc *psoc) +{ + struct wlan_scan_obj *scan_obj; + + scm_debug("psoc close: 0x%pK", psoc); + if (!psoc) { + scm_err("null psoc"); + return QDF_STATUS_E_FAILURE; + } + scm_db_deinit(psoc); + scan_obj = wlan_psoc_get_scan_obj(psoc); + if (scan_obj == NULL) { + scm_err("Failed to get scan object"); + return QDF_STATUS_E_FAILURE; + } + ucfg_scan_unregister_pmo_handler(); + qdf_spinlock_destroy(&scan_obj->lock); + wlan_pno_global_deinit(&scan_obj->pno_cfg); + + return QDF_STATUS_SUCCESS; +} + +static bool scm_serialization_scan_rules_cb( + union wlan_serialization_rules_info *comp_info, + uint8_t comp_id) +{ + switch (comp_id) { + case WLAN_UMAC_COMP_TDLS: + if (comp_info->scan_info.is_tdls_in_progress) { + scm_debug("Cancel scan. Tdls in progress"); + return false; + } + break; + case WLAN_UMAC_COMP_DFS: + if (comp_info->scan_info.is_cac_in_progress) { + scm_debug("Cancel scan. CAC in progress"); + return false; + } + break; + default: + scm_debug("not handled comp_id %d", comp_id); + break; + } + + return true; +} + +QDF_STATUS +ucfg_scan_psoc_enable(struct wlan_objmgr_psoc *psoc) +{ + QDF_STATUS status; + + scm_debug("psoc enable: 0x%pK", psoc); + if (!psoc) { + scm_err("null psoc"); + return QDF_STATUS_E_FAILURE; + } + /* Subscribe for scan events from lmac layesr */ + status = tgt_scan_register_ev_handler(psoc); + QDF_ASSERT(status == QDF_STATUS_SUCCESS); + if (wlan_reg_11d_original_enabled_on_host(psoc)) + scm_11d_cc_db_init(psoc); + ucfg_scan_register_unregister_bcn_cb(psoc, true); + status = wlan_serialization_register_apply_rules_cb(psoc, + WLAN_SER_CMD_SCAN, + scm_serialization_scan_rules_cb); + QDF_ASSERT(status == QDF_STATUS_SUCCESS); + return status; +} + +QDF_STATUS +ucfg_scan_psoc_disable(struct wlan_objmgr_psoc *psoc) +{ + QDF_STATUS status; + + scm_debug("psoc disable: 0x%pK", psoc); + if (!psoc) { + scm_err("null psoc"); + return QDF_STATUS_E_FAILURE; + } + /* Unsubscribe for scan events from lmac layesr */ + status = tgt_scan_unregister_ev_handler(psoc); + QDF_ASSERT(status == QDF_STATUS_SUCCESS); + ucfg_scan_register_unregister_bcn_cb(psoc, false); + if (wlan_reg_11d_original_enabled_on_host(psoc)) + scm_11d_cc_db_deinit(psoc); + + return status; +} + +uint32_t +ucfg_scan_get_max_active_scans(struct wlan_objmgr_psoc *psoc) +{ + struct scan_default_params *scan_params = NULL; + + if (!psoc) { + scm_err("null psoc"); + return 0; + } + scan_params = wlan_scan_psoc_get_def_params(psoc); + if (!scan_params) { + scm_err("Failed to get scan object"); + return 0; + } + + return scan_params->max_active_scans_allowed; +} + +bool ucfg_copy_ie_whitelist_attrs(struct wlan_objmgr_psoc *psoc, + struct probe_req_whitelist_attr *ie_whitelist) +{ + struct wlan_scan_obj *scan_obj = NULL; + + scan_obj = wlan_psoc_get_scan_obj(psoc); + if (!scan_obj) + return false; + + qdf_mem_copy(ie_whitelist, &scan_obj->ie_whitelist, + sizeof(*ie_whitelist)); + + return true; +} + +bool ucfg_ie_whitelist_enabled(struct wlan_objmgr_psoc *psoc, + struct wlan_objmgr_vdev *vdev) +{ + struct wlan_scan_obj *scan_obj = NULL; + + scan_obj = wlan_psoc_get_scan_obj(psoc); + if (!scan_obj) + return false; + + if ((wlan_vdev_mlme_get_opmode(vdev) != QDF_STA_MODE) || + wlan_vdev_is_up(vdev)) + return false; + + if (!scan_obj->ie_whitelist.white_list) + return false; + + return true; +} + +void ucfg_scan_set_bt_activity(struct wlan_objmgr_psoc *psoc, + bool bt_a2dp_active) +{ + struct wlan_scan_obj *scan_obj; + + scan_obj = wlan_psoc_get_scan_obj(psoc); + if (!scan_obj) { + scm_err("Failed to get scan object"); + return; + } + scan_obj->bt_a2dp_enabled = bt_a2dp_active; +} + +bool ucfg_scan_get_bt_activity(struct wlan_objmgr_psoc *psoc) +{ + struct wlan_scan_obj *scan_obj; + + scan_obj = wlan_psoc_get_scan_obj(psoc); + if (!scan_obj) { + scm_err("Failed to get scan object"); + return false; + } + + return scan_obj->bt_a2dp_enabled; +} + +void ucfg_scan_set_vdev_del_in_progress(struct wlan_objmgr_vdev *vdev) +{ + struct scan_vdev_obj *scan_vdev_obj; + + if (!vdev) { + scm_err("invalid vdev"); + return; + } + scan_vdev_obj = wlan_get_vdev_scan_obj(vdev); + if (!scan_vdev_obj) { + scm_err("null scan_vdev_obj"); + return; + } + scan_vdev_obj->is_vdev_delete_in_progress = true; +} + +void ucfg_scan_clear_vdev_del_in_progress(struct wlan_objmgr_vdev *vdev) +{ + struct scan_vdev_obj *scan_vdev_obj; + + if (!vdev) { + scm_err("invalid vdev"); + return; + } + scan_vdev_obj = wlan_get_vdev_scan_obj(vdev); + if (!scan_vdev_obj) { + scm_err("null scan_vdev_obj"); + return; + } + scan_vdev_obj->is_vdev_delete_in_progress = false; +} + +bool ucfg_scan_cfg_honour_nl_scan_policy_flags(struct wlan_objmgr_psoc *psoc) +{ + struct wlan_scan_obj *scan_obj; + + scan_obj = wlan_psoc_get_scan_obj(psoc); + if (!scan_obj) + return false; + + return scan_obj->scan_def.honour_nl_scan_policy_flags; +} + +bool ucfg_scan_wake_lock_in_user_scan(struct wlan_objmgr_psoc *psoc) +{ + struct wlan_scan_obj *scan_obj; + + scan_obj = wlan_psoc_get_scan_obj(psoc); + if (!scan_obj) + return false; + + return scan_obj->scan_def.use_wake_lock_in_user_scan; +} + +QDF_STATUS +ucfg_scan_set_global_config(struct wlan_objmgr_psoc *psoc, + enum scan_config config, uint32_t val) +{ + struct wlan_scan_obj *scan_obj; + QDF_STATUS status = QDF_STATUS_SUCCESS; + + scan_obj = wlan_psoc_get_scan_obj(psoc); + if (!scan_obj) { + scm_err("Failed to get scan object config:%d, val:%d", + config, val); + return QDF_STATUS_E_INVAL; + } + switch (config) { + case SCAN_CFG_DISABLE_SCAN_COMMAND_TIMEOUT: + scan_obj->disable_timeout = !!val; + break; + case SCAN_CFG_DROP_BCN_ON_CHANNEL_MISMATCH: + scan_obj->drop_bcn_on_chan_mismatch = !!val; + break; + + default: + status = QDF_STATUS_E_INVAL; + break; + } + + return status; +} + +QDF_STATUS ucfg_scan_update_mlme_by_bssinfo(struct wlan_objmgr_pdev *pdev, + struct bss_info *bss_info, struct mlme_info *mlme) +{ + QDF_STATUS status; + + status = scm_scan_update_mlme_by_bssinfo(pdev, bss_info, mlme); + + return status; +} + +QDF_STATUS +ucfg_scan_get_global_config(struct wlan_objmgr_psoc *psoc, + enum scan_config config, uint32_t *val) +{ + struct wlan_scan_obj *scan_obj; + QDF_STATUS status = QDF_STATUS_SUCCESS; + + scan_obj = wlan_psoc_get_scan_obj(psoc); + if (!scan_obj || !val) { + scm_err("scan object:%pK config:%d, val:0x%pK", + scan_obj, config, val); + return QDF_STATUS_E_INVAL; + } + switch (config) { + case SCAN_CFG_DISABLE_SCAN_COMMAND_TIMEOUT: + *val = scan_obj->disable_timeout; + break; + case SCAN_CFG_DROP_BCN_ON_CHANNEL_MISMATCH: + *val = scan_obj->drop_bcn_on_chan_mismatch; + break; + + default: + status = QDF_STATUS_E_INVAL; + break; + } + + return status; +} + +uint32_t ucfg_scan_get_max_cmd_allowed(void) +{ + return MAX_SCAN_COMMANDS; +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/scan/dispatcher/src/wlan_scan_utils_api.c b/drivers/staging/qca-wifi-host-cmn/umac/scan/dispatcher/src/wlan_scan_utils_api.c new file mode 100644 index 0000000000000000000000000000000000000000..a6a031889691400fa8d26c935aee2bfe6143a815 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/scan/dispatcher/src/wlan_scan_utils_api.c @@ -0,0 +1,1012 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/* + * DOC: Defines scan utility functions + */ + +#include +#include +#include +#include <../../core/src/wlan_scan_cache_db.h> +#include <../../core/src/wlan_scan_main.h> +#include + +const char* +util_scan_get_ev_type_name(enum scan_event_type type) +{ + static const char * const event_name[] = { + [SCAN_EVENT_TYPE_STARTED] = "STARTED", + [SCAN_EVENT_TYPE_COMPLETED] = "COMPLETED", + [SCAN_EVENT_TYPE_BSS_CHANNEL] = "HOME_CHANNEL", + [SCAN_EVENT_TYPE_FOREIGN_CHANNEL] = "FOREIGN_CHANNEL", + [SCAN_EVENT_TYPE_DEQUEUED] = "DEQUEUED", + [SCAN_EVENT_TYPE_PREEMPTED] = "PREEMPTED", + [SCAN_EVENT_TYPE_START_FAILED] = "START_FAILED", + [SCAN_EVENT_TYPE_RESTARTED] = "RESTARTED", + [SCAN_EVENT_TYPE_FOREIGN_CHANNEL_EXIT] = "FOREIGN_CHANNEL_EXIT", + [SCAN_EVENT_TYPE_SUSPENDED] = "SUSPENDED", + [SCAN_EVENT_TYPE_RESUMED] = "RESUMED", + [SCAN_EVENT_TYPE_NLO_COMPLETE] = "NLO_COMPLETE", + [SCAN_EVENT_TYPE_NLO_MATCH] = "NLO_MATCH", + [SCAN_EVENT_TYPE_INVALID] = "INVALID", + [SCAN_EVENT_TYPE_GPIO_TIMEOUT] = "GPIO_TIMEOUT", + [SCAN_EVENT_TYPE_RADIO_MEASUREMENT_START] = + "RADIO_MEASUREMENT_START", + [SCAN_EVENT_TYPE_RADIO_MEASUREMENT_END] = + "RADIO_MEASUREMENT_END", + [SCAN_EVENT_TYPE_BSSID_MATCH] = "BSSID_MATCH", + [SCAN_EVENT_TYPE_FOREIGN_CHANNEL_GET_NF] = + "FOREIGN_CHANNEL_GET_NF", + }; + + if (type >= SCAN_EVENT_TYPE_MAX) + return "UNKNOWN"; + + return event_name[type]; +} + + +const char* +util_scan_get_ev_reason_name(enum scan_completion_reason reason) +{ + static const char * const reason_name[] = { + [SCAN_REASON_NONE] = "NONE", + [SCAN_REASON_COMPLETED] = "COMPLETED", + [SCAN_REASON_CANCELLED] = "CANCELLED", + [SCAN_REASON_PREEMPTED] = "PREEMPTED", + [SCAN_REASON_TIMEDOUT] = "TIMEDOUT", + [SCAN_REASON_INTERNAL_FAILURE] = "INTERNAL_FAILURE", + [SCAN_REASON_SUSPENDED] = "SUSPENDED", + [SCAN_REASON_RUN_FAILED] = "RUN_FAILED", + [SCAN_REASON_TERMINATION_FUNCTION] = "TERMINATION_FUNCTION", + [SCAN_REASON_MAX_OFFCHAN_RETRIES] = "MAX_OFFCHAN_RETRIES", + }; + + if (reason >= SCAN_REASON_MAX) + return "UNKNOWN"; + + return reason_name[reason]; +} + +qdf_time_t +util_get_last_scan_time(struct wlan_objmgr_vdev *vdev) +{ + uint8_t pdev_id; + struct wlan_scan_obj *scan_obj; + + if (!vdev) { + scm_warn("null vdev"); + QDF_ASSERT(0); + return 0; + } + pdev_id = wlan_scan_vdev_get_pdev_id(vdev); + scan_obj = wlan_vdev_get_scan_obj(vdev); + + if (scan_obj) + return scan_obj->pdev_info[pdev_id].last_scan_time; + else + return 0; +} + +enum wlan_band util_scan_scm_chan_to_band(uint32_t chan) +{ + if (WLAN_CHAN_IS_2GHZ(chan)) + return WLAN_BAND_2_4_GHZ; + + return WLAN_BAND_5_GHZ; +} + +enum wlan_band util_scan_scm_freq_to_band(uint16_t freq) +{ + if (WLAN_REG_IS_24GHZ_CH_FREQ(freq)) + return WLAN_BAND_2_4_GHZ; + + return WLAN_BAND_5_GHZ; +} + +bool util_is_scan_entry_match( + struct scan_cache_entry *entry1, + struct scan_cache_entry *entry2) +{ + + if (entry1->cap_info.wlan_caps.ess != + entry2->cap_info.wlan_caps.ess) + return false; + + if (entry1->cap_info.wlan_caps.ess && + !qdf_mem_cmp(entry1->bssid.bytes, + entry2->bssid.bytes, QDF_MAC_ADDR_SIZE)) { + /* Check for BSS */ + if (util_is_ssid_match(&entry1->ssid, &entry2->ssid) || + util_scan_is_null_ssid(&entry1->ssid) || + util_scan_is_null_ssid(&entry2->ssid)) + return true; + } else if (entry1->cap_info.wlan_caps.ibss && + (entry1->channel.chan_idx == + entry2->channel.chan_idx)) { + /* + * Same channel cannot have same SSID for + * different IBSS, so no need to check BSSID + */ + if (util_is_ssid_match( + &entry1->ssid, &entry2->ssid)) + return true; + } else if (!entry1->cap_info.wlan_caps.ibss && + !entry1->cap_info.wlan_caps.ess && + !qdf_mem_cmp(entry1->bssid.bytes, + entry2->bssid.bytes, QDF_MAC_ADDR_SIZE)) { + /* In case of P2P devices, ess and ibss will be set to zero */ + return true; + } + + return false; +} + +static bool util_is_pureg_rate(uint8_t *rates, uint8_t nrates) +{ + static const uint8_t g_rates[] = {12, 18, 24, 36, 48, 72, 96, 108}; + bool pureg = false; + uint8_t i, j; + + for (i = 0; i < nrates; i++) { + for (j = 0; j < QDF_ARRAY_SIZE(g_rates); j++) { + if (WLAN_RV(rates[i]) == g_rates[j]) { + pureg = true; + break; + } + } + if (pureg) + break; + } + + return pureg; +} +static enum wlan_phymode +util_scan_get_phymode_5g(struct scan_cache_entry *scan_params) +{ + enum wlan_phymode phymode = WLAN_PHYMODE_AUTO; + uint16_t ht_cap = 0; + struct htcap_cmn_ie *htcap; + struct wlan_ie_htinfo_cmn *htinfo; + struct wlan_ie_vhtop *vhtop; + + htcap = (struct htcap_cmn_ie *) + util_scan_entry_htcap(scan_params); + htinfo = (struct wlan_ie_htinfo_cmn *) + util_scan_entry_htinfo(scan_params); + vhtop = (struct wlan_ie_vhtop *) + util_scan_entry_vhtop(scan_params); + + if (!(htcap && htinfo)) + return WLAN_PHYMODE_11A; + + if (htcap) + ht_cap = le16toh(htcap->hc_cap); + + if (util_scan_entry_vhtcap(scan_params) && vhtop) { + switch (vhtop->vht_op_chwidth) { + case WLAN_VHTOP_CHWIDTH_2040: + if ((ht_cap & WLAN_HTCAP_C_CHWIDTH40) && + (htinfo->hi_extchoff == + WLAN_HTINFO_EXTOFFSET_ABOVE)) + phymode = WLAN_PHYMODE_11AC_VHT40PLUS; + else if ((ht_cap & WLAN_HTCAP_C_CHWIDTH40) && + (htinfo->hi_extchoff == + WLAN_HTINFO_EXTOFFSET_BELOW)) + phymode = WLAN_PHYMODE_11AC_VHT40MINUS; + else + phymode = WLAN_PHYMODE_11AC_VHT20; + break; + case WLAN_VHTOP_CHWIDTH_80: + if (WLAN_IS_REVSIG_VHT80_80(vhtop)) + phymode = WLAN_PHYMODE_11AC_VHT80_80; + else if (WLAN_IS_REVSIG_VHT160(vhtop)) + phymode = WLAN_PHYMODE_11AC_VHT160; + else + phymode = WLAN_PHYMODE_11AC_VHT80; + break; + case WLAN_VHTOP_CHWIDTH_160: + phymode = WLAN_PHYMODE_11AC_VHT160; + break; + case WLAN_VHTOP_CHWIDTH_80_80: + phymode = WLAN_PHYMODE_11AC_VHT80_80; + break; + default: + scm_err("bad channel: %d", + vhtop->vht_op_chwidth); + break; + } + } else if ((ht_cap & WLAN_HTCAP_C_CHWIDTH40) && + (htinfo->hi_extchoff == WLAN_HTINFO_EXTOFFSET_ABOVE)) + phymode = WLAN_PHYMODE_11NA_HT40PLUS; + else if ((ht_cap & WLAN_HTCAP_C_CHWIDTH40) && + (htinfo->hi_extchoff == WLAN_HTINFO_EXTOFFSET_BELOW)) + phymode = WLAN_PHYMODE_11NA_HT40MINUS; + else + phymode = WLAN_PHYMODE_11NA_HT20; + + return phymode; +} + +static enum wlan_phymode +util_scan_get_phymode_2g(struct scan_cache_entry *scan_params) +{ + enum wlan_phymode phymode = WLAN_PHYMODE_AUTO; + uint16_t ht_cap = 0; + struct htcap_cmn_ie *htcap; + struct wlan_ie_htinfo_cmn *htinfo; + struct wlan_ie_vhtop *vhtop; + + htcap = (struct htcap_cmn_ie *) + util_scan_entry_htcap(scan_params); + htinfo = (struct wlan_ie_htinfo_cmn *) + util_scan_entry_htinfo(scan_params); + vhtop = (struct wlan_ie_vhtop *) + util_scan_entry_vhtop(scan_params); + + if (htcap) + ht_cap = le16toh(htcap->hc_cap); + + if (htcap && htinfo) { + if ((ht_cap & WLAN_HTCAP_C_CHWIDTH40) && + (htinfo->hi_extchoff == WLAN_HTINFO_EXTOFFSET_ABOVE)) + phymode = WLAN_PHYMODE_11NG_HT40PLUS; + else if ((ht_cap & WLAN_HTCAP_C_CHWIDTH40) && + (htinfo->hi_extchoff == WLAN_HTINFO_EXTOFFSET_BELOW)) + phymode = WLAN_PHYMODE_11NG_HT40MINUS; + else + phymode = WLAN_PHYMODE_11NG_HT20; + } else if (util_scan_entry_xrates(scan_params)) { + /* only 11G stations will have more than 8 rates */ + phymode = WLAN_PHYMODE_11G; + } else { + /* Some mischievous g-only APs do not set extended rates */ + if (util_scan_entry_rates(scan_params)) { + if (util_is_pureg_rate(&scan_params->ie_list.rates[2], + scan_params->ie_list.rates[1])) + phymode = WLAN_PHYMODE_11G; + else + phymode = WLAN_PHYMODE_11B; + } else { + phymode = WLAN_PHYMODE_11B; + } + } + + return phymode; +} + +static QDF_STATUS +util_scan_parse_chan_switch_wrapper_ie(struct scan_cache_entry *scan_params, + struct ie_header *sub_ie, qdf_size_t sub_ie_len) +{ + /* Walk through to check nothing is malformed */ + while (sub_ie_len >= sizeof(struct ie_header)) { + /* At least one more header is present */ + sub_ie_len -= sizeof(struct ie_header); + + if (sub_ie->ie_len == 0) { + sub_ie += 1; + continue; + } + if (sub_ie_len < sub_ie->ie_len) { + scm_err("Incomplete corrupted IE:%x", + WLAN_ELEMID_CHAN_SWITCH_WRAP); + return QDF_STATUS_E_INVAL; + } + switch (sub_ie->ie_id) { + case WLAN_ELEMID_COUNTRY: + scan_params->ie_list.country = (uint8_t *)sub_ie; + break; + case WLAN_ELEMID_WIDE_BAND_CHAN_SWITCH: + scan_params->ie_list.widebw = (uint8_t *)sub_ie; + break; + case WLAN_ELEMID_VHT_TX_PWR_ENVLP: + scan_params->ie_list.txpwrenvlp = (uint8_t *)sub_ie; + break; + } + /* Consume sub info element */ + sub_ie_len -= sub_ie->ie_len; + /* go to next Sub IE */ + sub_ie = (struct ie_header *) + (((uint8_t *) sub_ie) + + sizeof(struct ie_header) + sub_ie->ie_len); + } + + return QDF_STATUS_SUCCESS; +} + +bool +util_scan_is_hidden_ssid(struct ie_ssid *ssid) +{ + uint8_t i; + + /* + * We flag this as Hidden SSID if the Length is 0 + * of the SSID only contains 0's + */ + if (!ssid || !ssid->ssid_len) + return true; + + for (i = 0; i < ssid->ssid_len; i++) + if (ssid->ssid[i] != 0) + return false; + + /* All 0's */ + return true; +} + +static QDF_STATUS +util_scan_parse_extn_ie(struct scan_cache_entry *scan_params, + struct ie_header *ie) +{ + struct extn_ie_header *extn_ie = (struct extn_ie_header *) ie; + + switch (extn_ie->ie_extn_id) { + case WLAN_EXTN_ELEMID_SRP: + scan_params->ie_list.srp = (uint8_t *)ie; + break; + case WLAN_EXTN_ELEMID_HECAP: + scan_params->ie_list.hecap = (uint8_t *)ie; + break; + case WLAN_EXTN_ELEMID_HEOP: + scan_params->ie_list.heop = (uint8_t *)ie; + break; + case WLAN_EXTN_ELEMID_ESP: + scan_params->ie_list.esp = (uint8_t *)ie; + break; + case WLAN_EXTN_ELEMID_MUEDCA: + scan_params->ie_list.muedca = (uint8_t *)ie; + break; + default: + break; + } + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS +util_scan_parse_vendor_ie(struct scan_cache_entry *scan_params, + struct ie_header *ie) +{ + if (scan_params->ie_list.vendor == NULL) + scan_params->ie_list.vendor = (uint8_t *)ie; + + if (is_wpa_oui((uint8_t *)ie)) { + scan_params->ie_list.wpa = (uint8_t *)ie; + } else if (is_wps_oui((uint8_t *)ie)) { + scan_params->ie_list.wps = (uint8_t *)ie; + /* WCN IE should be a subset of WPS IE */ + if (is_wcn_oui((uint8_t *)ie)) + scan_params->ie_list.wcn = (uint8_t *)ie; + } else if (is_wme_param((uint8_t *)ie)) { + scan_params->ie_list.wmeparam = (uint8_t *)ie; + } else if (is_wme_info((uint8_t *)ie)) { + scan_params->ie_list.wmeinfo = (uint8_t *)ie; + } else if (is_atheros_oui((uint8_t *)ie)) { + scan_params->ie_list.athcaps = (uint8_t *)ie; + } else if (is_atheros_extcap_oui((uint8_t *)ie)) { + scan_params->ie_list.athextcaps = (uint8_t *)ie; + } else if (is_sfa_oui((uint8_t *)ie)) { + scan_params->ie_list.sfa = (uint8_t *)ie; + } else if (is_p2p_oui((uint8_t *)ie)) { + scan_params->ie_list.p2p = (uint8_t *)ie; + } else if (is_qca_son_oui((uint8_t *)ie, + QCA_OUI_WHC_AP_INFO_SUBTYPE)) { + scan_params->ie_list.sonadv = (uint8_t *)ie; + } else if (is_ht_cap((uint8_t *)ie)) { + /* we only care if there isn't already an HT IE (ANA) */ + if (scan_params->ie_list.htcap == NULL) { + if (ie->ie_len != (WLAN_VENDOR_HT_IE_OFFSET_LEN + + sizeof(struct htcap_cmn_ie))) + return QDF_STATUS_E_INVAL; + scan_params->ie_list.htcap = + (uint8_t *)&(((struct wlan_vendor_ie_htcap *)ie)->ie); + } + } else if (is_ht_info((uint8_t *)ie)) { + /* we only care if there isn't already an HT IE (ANA) */ + if (scan_params->ie_list.htinfo == NULL) { + if (ie->ie_len != WLAN_VENDOR_HT_IE_OFFSET_LEN + + sizeof(struct wlan_ie_htinfo_cmn)) + return QDF_STATUS_E_INVAL; + scan_params->ie_list.htinfo = + (uint8_t *)&(((struct wlan_vendor_ie_htinfo *) + ie)->hi_ie); + } + } else if (is_interop_vht((uint8_t *)ie) && + !(scan_params->ie_list.vhtcap)) { + uint8_t *vendor_ie = (uint8_t *)(ie); + + if (ie->ie_len < ((WLAN_VENDOR_VHTCAP_IE_OFFSET + + sizeof(struct wlan_ie_vhtcaps)) - + sizeof(struct ie_header))) + return QDF_STATUS_E_INVAL; + vendor_ie = ((uint8_t *)(ie)) + WLAN_VENDOR_VHTCAP_IE_OFFSET; + if (vendor_ie[1] != (sizeof(struct wlan_ie_vhtcaps)) - + sizeof(struct ie_header)) + return QDF_STATUS_E_INVAL; + /* location where Interop Vht Cap IE and VHT OP IE Present */ + scan_params->ie_list.vhtcap = (((uint8_t *)(ie)) + + WLAN_VENDOR_VHTCAP_IE_OFFSET); + if (ie->ie_len > ((WLAN_VENDOR_VHTCAP_IE_OFFSET + + sizeof(struct wlan_ie_vhtcaps)) - + sizeof(struct ie_header))) { + if (ie->ie_len < ((WLAN_VENDOR_VHTOP_IE_OFFSET + + sizeof(struct wlan_ie_vhtop)) - + sizeof(struct ie_header))) + return QDF_STATUS_E_INVAL; + vendor_ie = ((uint8_t *)(ie)) + + WLAN_VENDOR_VHTOP_IE_OFFSET; + if (vendor_ie[1] != (sizeof(struct wlan_ie_vhtop) - + sizeof(struct ie_header))) + return QDF_STATUS_E_INVAL; + scan_params->ie_list.vhtop = (((uint8_t *)(ie)) + + WLAN_VENDOR_VHTOP_IE_OFFSET); + } + } else if (is_bwnss_oui((uint8_t *)ie)) { + /* + * Bandwidth-NSS map has sub-type & version. + * hence copy data just after version byte + */ + scan_params->ie_list.bwnss_map = (((uint8_t *)ie) + 8); + } else if (is_mbo_oce_oui((uint8_t *)ie)) { + scan_params->ie_list.mbo_oce = (uint8_t *)ie; + } + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS +util_scan_populate_bcn_ie_list(struct scan_cache_entry *scan_params) +{ + struct ie_header *ie, *sub_ie; + uint32_t ie_len, sub_ie_len; + QDF_STATUS status; + + ie_len = util_scan_entry_ie_len(scan_params); + ie = (struct ie_header *) + util_scan_entry_ie_data(scan_params); + + while (ie_len >= sizeof(struct ie_header)) { + ie_len -= sizeof(struct ie_header); + + if (!ie->ie_len) { + ie += 1; + continue; + } + + if (ie_len < ie->ie_len) { + scm_debug("Incomplete corrupted IE:%x", + ie->ie_id); + return QDF_STATUS_E_INVAL; + } + + switch (ie->ie_id) { + case WLAN_ELEMID_SSID: + if (ie->ie_len > (sizeof(struct ie_ssid) - + sizeof(struct ie_header))) + return QDF_STATUS_E_INVAL; + scan_params->ie_list.ssid = (uint8_t *)ie; + break; + case WLAN_ELEMID_RATES: + if (ie->ie_len > WLAN_SUPPORTED_RATES_IE_MAX_LEN) + return QDF_STATUS_E_INVAL; + scan_params->ie_list.rates = (uint8_t *)ie; + break; + case WLAN_ELEMID_DSPARMS: + if (ie->ie_len != WLAN_DS_PARAM_IE_MAX_LEN) + return QDF_STATUS_E_INVAL; + scan_params->ie_list.ds_param = (uint8_t *)ie; + scan_params->channel.chan_idx = + ((struct ds_ie *)ie)->cur_chan; + break; + case WLAN_ELEMID_TIM: + if (ie->ie_len < WLAN_TIM_IE_MIN_LENGTH) + return QDF_STATUS_E_INVAL; + scan_params->ie_list.tim = (uint8_t *)ie; + scan_params->dtim_period = + ((struct wlan_tim_ie *)ie)->tim_period; + break; + case WLAN_ELEMID_COUNTRY: + if (ie->ie_len < WLAN_COUNTRY_IE_MIN_LEN) + return QDF_STATUS_E_INVAL; + scan_params->ie_list.country = (uint8_t *)ie; + break; + case WLAN_ELEMID_QBSS_LOAD: + if (ie->ie_len != sizeof(struct qbss_load_ie) - + sizeof(struct ie_header)) { + /* + * Expected QBSS IE length is 5Bytes; For some + * old cisco AP, QBSS IE length is 4Bytes, which + * doesn't match with latest spec, So ignore + * QBSS IE in such case. + */ + break; + } + scan_params->ie_list.qbssload = (uint8_t *)ie; + break; + case WLAN_ELEMID_CHANSWITCHANN: + if (ie->ie_len != WLAN_CSA_IE_MAX_LEN) + return QDF_STATUS_E_INVAL; + scan_params->ie_list.csa = (uint8_t *)ie; + break; + case WLAN_ELEMID_IBSSDFS: + if (ie->ie_len < WLAN_IBSSDFS_IE_MIN_LEN) + return QDF_STATUS_E_INVAL; + scan_params->ie_list.ibssdfs = (uint8_t *)ie; + break; + case WLAN_ELEMID_QUIET: + if (ie->ie_len != WLAN_QUIET_IE_MAX_LEN) + return QDF_STATUS_E_INVAL; + scan_params->ie_list.quiet = (uint8_t *)ie; + break; + case WLAN_ELEMID_ERP: + if (ie->ie_len != (sizeof(struct erp_ie) - + sizeof(struct ie_header))) + return QDF_STATUS_E_INVAL; + scan_params->erp = ((struct erp_ie *)ie)->value; + break; + case WLAN_ELEMID_HTCAP_ANA: + if (ie->ie_len != sizeof(struct htcap_cmn_ie)) + return QDF_STATUS_E_INVAL; + scan_params->ie_list.htcap = + (uint8_t *)&(((struct htcap_ie *)ie)->ie); + break; + case WLAN_ELEMID_RSN: + if (ie->ie_len < WLAN_RSN_IE_MIN_LEN) + return QDF_STATUS_E_INVAL; + scan_params->ie_list.rsn = (uint8_t *)ie; + break; + case WLAN_ELEMID_XRATES: + scan_params->ie_list.xrates = (uint8_t *)ie; + break; + case WLAN_ELEMID_EXTCHANSWITCHANN: + if (ie->ie_len != WLAN_XCSA_IE_MAX_LEN) + return QDF_STATUS_E_INVAL; + scan_params->ie_list.xcsa = (uint8_t *)ie; + break; + case WLAN_ELEMID_SECCHANOFFSET: + if (ie->ie_len != WLAN_SECCHANOFF_IE_MAX_LEN) + return QDF_STATUS_E_INVAL; + scan_params->ie_list.secchanoff = (uint8_t *)ie; + break; + case WLAN_ELEMID_HTINFO_ANA: + if (ie->ie_len != sizeof(struct wlan_ie_htinfo_cmn)) + return QDF_STATUS_E_INVAL; + scan_params->ie_list.htinfo = + (uint8_t *)&(((struct wlan_ie_htinfo *) ie)->hi_ie); + scan_params->channel.chan_idx = + ((struct wlan_ie_htinfo_cmn *) + (scan_params->ie_list.htinfo))->hi_ctrlchannel; + break; + case WLAN_ELEMID_WAPI: + if (ie->ie_len < WLAN_WAPI_IE_MIN_LEN) + return QDF_STATUS_E_INVAL; + scan_params->ie_list.wapi = (uint8_t *)ie; + break; + case WLAN_ELEMID_XCAPS: + if (ie->ie_len > WLAN_EXTCAP_IE_MAX_LEN) + return QDF_STATUS_E_INVAL; + scan_params->ie_list.extcaps = (uint8_t *)ie; + break; + case WLAN_ELEMID_VHTCAP: + if (ie->ie_len != (sizeof(struct wlan_ie_vhtcaps) - + sizeof(struct ie_header))) + return QDF_STATUS_E_INVAL; + scan_params->ie_list.vhtcap = (uint8_t *)ie; + break; + case WLAN_ELEMID_VHTOP: + if (ie->ie_len != (sizeof(struct wlan_ie_vhtop) - + sizeof(struct ie_header))) + return QDF_STATUS_E_INVAL; + scan_params->ie_list.vhtop = (uint8_t *)ie; + break; + case WLAN_ELEMID_OP_MODE_NOTIFY: + if (ie->ie_len != WLAN_OPMODE_IE_MAX_LEN) + return QDF_STATUS_E_INVAL; + scan_params->ie_list.opmode = (uint8_t *)ie; + break; + case WLAN_ELEMID_MOBILITY_DOMAIN: + if (ie->ie_len != WLAN_MOBILITY_DOMAIN_IE_MAX_LEN) + return QDF_STATUS_E_INVAL; + scan_params->ie_list.mdie = (uint8_t *)ie; + break; + case WLAN_ELEMID_VENDOR: + status = util_scan_parse_vendor_ie(scan_params, + ie); + if (QDF_IS_STATUS_ERROR(status)) + return status; + break; + case WLAN_ELEMID_CHAN_SWITCH_WRAP: + scan_params->ie_list.cswrp = (uint8_t *)ie; + /* Go to next sub IE */ + sub_ie = (struct ie_header *) + (((uint8_t *)ie) + sizeof(struct ie_header)); + sub_ie_len = ie->ie_len; + status = + util_scan_parse_chan_switch_wrapper_ie( + scan_params, sub_ie, sub_ie_len); + if (QDF_IS_STATUS_ERROR(status)) { + scm_err("failed to parse chan_switch_wrapper_ie"); + return status; + } + break; + case WLAN_ELEMID_FILS_INDICATION: + if (ie->ie_len < WLAN_FILS_INDICATION_IE_MIN_LEN) + return QDF_STATUS_E_INVAL; + scan_params->ie_list.fils_indication = (uint8_t *)ie; + break; + case WLAN_ELEMID_EXTN_ELEM: + status = util_scan_parse_extn_ie(scan_params, ie); + if (QDF_IS_STATUS_ERROR(status)) + return status; + break; + default: + break; + } + + /* Consume info element */ + ie_len -= ie->ie_len; + /* Go to next IE */ + ie = (struct ie_header *) + (((uint8_t *) ie) + + sizeof(struct ie_header) + + ie->ie_len); + } + + return QDF_STATUS_SUCCESS; +} + +/** + * util_scan_update_esp_data: update ESP params from beacon/probe response + * @esp_information: pointer to wlan_esp_information + * @scan_entry: new received entry + * + * The Estimated Service Parameters element is + * used by a AP to provide information to another STA which + * can then use the information as input to an algorithm to + * generate an estimate of throughput between the two STAs. + * The ESP Information List field contains from 1 to 4 ESP + * Information fields(each field 24 bits), each corresponding + * to an access category for which estimated service parameters + * information is provided. + * + * Return: None + */ +static void util_scan_update_esp_data(struct wlan_esp_ie *esp_information, + struct scan_cache_entry *scan_entry) +{ + + uint8_t *data; + int i = 0; + uint64_t total_elements; + struct wlan_esp_info *esp_info; + struct wlan_esp_ie *esp_ie; + + esp_ie = (struct wlan_esp_ie *) + util_scan_entry_esp_info(scan_entry); + + total_elements = esp_ie->esp_len; + data = (uint8_t *)esp_ie + 3; + do_div(total_elements, ESP_INFORMATION_LIST_LENGTH); + + if (total_elements > MAX_ESP_INFORMATION_FIELD) { + scm_err("No of Air time fractions are greater than supported"); + return; + } + + for (i = 0; i < total_elements; i++) { + esp_info = (struct wlan_esp_info *)data; + if (esp_info->access_category == ESP_AC_BK) { + qdf_mem_copy(&esp_information->esp_info_AC_BK, + data, 3); + data = data + ESP_INFORMATION_LIST_LENGTH; + continue; + } + if (esp_info->access_category == ESP_AC_BE) { + qdf_mem_copy(&esp_information->esp_info_AC_BE, + data, 3); + data = data + ESP_INFORMATION_LIST_LENGTH; + continue; + } + if (esp_info->access_category == ESP_AC_VI) { + qdf_mem_copy(&esp_information->esp_info_AC_VI, + data, 3); + data = data + ESP_INFORMATION_LIST_LENGTH; + continue; + } + if (esp_info->access_category == ESP_AC_VO) { + qdf_mem_copy(&esp_information->esp_info_AC_VO, + data, 3); + data = data + ESP_INFORMATION_LIST_LENGTH; + break; + } + } +} + +/** + * util_scan_scm_update_bss_with_esp_dataa: calculate estimated air time + * fraction + * @scan_entry: new received entry + * + * This function process all Access category ESP params and provide + * best effort air time fraction. + * If best effort is not available, it will choose VI, VO and BK in sequence + * + */ +static void util_scan_scm_update_bss_with_esp_data( + struct scan_cache_entry *scan_entry) +{ + uint8_t air_time_fraction = 0; + struct wlan_esp_ie esp_information; + + if (!scan_entry->ie_list.esp) + return; + + util_scan_update_esp_data(&esp_information, scan_entry); + + /* + * If the ESP metric is transmitting multiple airtime fractions, then + * follow the sequence AC_BE, AC_VI, AC_VO, AC_BK and pick whichever is + * the first one available + */ + if (esp_information.esp_info_AC_BE.access_category + == ESP_AC_BE) + air_time_fraction = + esp_information.esp_info_AC_BE. + estimated_air_fraction; + else if (esp_information.esp_info_AC_VI.access_category + == ESP_AC_VI) + air_time_fraction = + esp_information.esp_info_AC_VI. + estimated_air_fraction; + else if (esp_information.esp_info_AC_VO.access_category + == ESP_AC_VO) + air_time_fraction = + esp_information.esp_info_AC_VO. + estimated_air_fraction; + else if (esp_information.esp_info_AC_BK.access_category + == ESP_AC_BK) + air_time_fraction = + esp_information.esp_info_AC_BK. + estimated_air_fraction; + scan_entry->air_time_fraction = air_time_fraction; +} + +/** + * util_scan_scm_calc_nss_supported_by_ap() - finds out nss from AP + * @scan_entry: new received entry + * + * Return: number of nss advertised by AP + */ +static int util_scan_scm_calc_nss_supported_by_ap( + struct scan_cache_entry *scan_params) +{ + struct htcap_cmn_ie *htcap; + struct wlan_ie_vhtcaps *vhtcaps; + uint8_t rx_mcs_map; + + htcap = (struct htcap_cmn_ie *) + util_scan_entry_htcap(scan_params); + vhtcaps = (struct wlan_ie_vhtcaps *) + util_scan_entry_vhtcap(scan_params); + if (vhtcaps) { + rx_mcs_map = vhtcaps->rx_mcs_map; + if ((rx_mcs_map & 0xC0) != 0xC0) + return 4; + + if ((rx_mcs_map & 0x30) != 0x30) + return 3; + + if ((rx_mcs_map & 0x0C) != 0x0C) + return 2; + } else if (htcap) { + if (htcap->mcsset[3]) + return 4; + + if (htcap->mcsset[2]) + return 3; + + if (htcap->mcsset[1]) + return 2; + + } + return 1; +} + +qdf_list_t * +util_scan_unpack_beacon_frame(struct wlan_objmgr_pdev *pdev, uint8_t *frame, + qdf_size_t frame_len, uint32_t frm_subtype, + struct mgmt_rx_event_params *rx_param) +{ + struct wlan_frame_hdr *hdr; + struct wlan_bcn_frame *bcn; + QDF_STATUS status; + struct ie_ssid *ssid; + struct scan_cache_entry *scan_entry; + struct qbss_load_ie *qbss_load; + qdf_list_t *scan_list; + struct scan_cache_node *scan_node; + + scan_list = qdf_mem_malloc_atomic(sizeof(*scan_list)); + if (!scan_list) { + scm_err("failed to allocate scan_list"); + return NULL; + } + qdf_list_create(scan_list, MAX_SCAN_CACHE_SIZE); + + scan_entry = qdf_mem_malloc_atomic(sizeof(*scan_entry)); + if (!scan_entry) { + scm_err("failed to allocate memory for scan_entry"); + qdf_mem_free(scan_list); + return NULL; + } + scan_entry->raw_frame.ptr = + qdf_mem_malloc_atomic(frame_len); + if (!scan_entry->raw_frame.ptr) { + scm_err("failed to allocate memory for frame"); + qdf_mem_free(scan_entry); + qdf_mem_free(scan_list); + return NULL; + } + + bcn = (struct wlan_bcn_frame *) + (frame + sizeof(*hdr)); + hdr = (struct wlan_frame_hdr *)frame; + + /* update timestamp in nanoseconds needed by kernel layers */ + scan_entry->boottime_ns = qdf_get_bootbased_boottime_ns(); + + scan_entry->frm_subtype = frm_subtype; + qdf_mem_copy(scan_entry->bssid.bytes, + hdr->i_addr3, QDF_MAC_ADDR_SIZE); + /* Scr addr */ + qdf_mem_copy(scan_entry->mac_addr.bytes, + hdr->i_addr2, QDF_MAC_ADDR_SIZE); + scan_entry->seq_num = + (le16toh(*(uint16_t *)hdr->i_seq) >> WLAN_SEQ_SEQ_SHIFT); + + scan_entry->rssi_raw = rx_param->rssi; + scan_entry->avg_rssi = WLAN_RSSI_IN(scan_entry->rssi_raw); + scan_entry->tsf_delta = rx_param->tsf_delta; + + /* Copy per chain rssi to scan entry */ + qdf_mem_copy(scan_entry->per_chain_snr, rx_param->rssi_ctl, + WLAN_MGMT_TXRX_HOST_MAX_ANTENNA); + + /* store jiffies */ + scan_entry->rrm_parent_tsf = (u_int32_t) qdf_system_ticks(); + + scan_entry->bcn_int = le16toh(bcn->beacon_interval); + + /* + * In case if the beacon dosnt have + * valid beacon interval falback to def + */ + if (!scan_entry->bcn_int) + scan_entry->bcn_int = 100; + scan_entry->cap_info.value = le16toh(bcn->capability.value); + qdf_mem_copy(scan_entry->tsf_info.data, + bcn->timestamp, 8); + scan_entry->erp = ERP_NON_ERP_PRESENT; + + scan_entry->scan_entry_time = + qdf_mc_timer_get_system_time(); + + scan_entry->raw_frame.len = frame_len; + qdf_mem_copy(scan_entry->raw_frame.ptr, + frame, frame_len); + status = util_scan_populate_bcn_ie_list(scan_entry); + if (QDF_IS_STATUS_ERROR(status)) { + scm_debug("failed to parse beacon IE"); + qdf_mem_free(scan_entry->raw_frame.ptr); + qdf_mem_free(scan_entry); + qdf_mem_free(scan_list); + return NULL; + } + + ssid = (struct ie_ssid *) + scan_entry->ie_list.ssid; + + if (ssid && (ssid->ssid_len > WLAN_SSID_MAX_LEN)) { + qdf_mem_free(scan_entry->raw_frame.ptr); + qdf_mem_free(scan_entry); + qdf_mem_free(scan_list); + return NULL; + } + + if (scan_entry->ie_list.p2p) + scan_entry->is_p2p = true; + + /* If no channel info is present in beacon use meta channel */ + if (!scan_entry->channel.chan_idx) { + scan_entry->channel.chan_idx = + rx_param->channel; + } else if (rx_param->channel != + scan_entry->channel.chan_idx) { + if (!wlan_reg_chan_is_49ghz(pdev, scan_entry->channel.chan_idx)) + scan_entry->channel_mismatch = true; + } + + if (util_scan_is_hidden_ssid(ssid)) { + scan_entry->ie_list.ssid = NULL; + scan_entry->is_hidden_ssid = true; + } else { + qdf_mem_copy(scan_entry->ssid.ssid, + ssid->ssid, ssid->ssid_len); + scan_entry->ssid.length = ssid->ssid_len; + scan_entry->hidden_ssid_timestamp = + scan_entry->scan_entry_time; + } + + if (WLAN_CHAN_IS_5GHZ(scan_entry->channel.chan_idx)) + scan_entry->phy_mode = util_scan_get_phymode_5g(scan_entry); + else + scan_entry->phy_mode = util_scan_get_phymode_2g(scan_entry); + + scan_entry->nss = util_scan_scm_calc_nss_supported_by_ap(scan_entry); + util_scan_scm_update_bss_with_esp_data(scan_entry); + qbss_load = (struct qbss_load_ie *) + util_scan_entry_qbssload(scan_entry); + if (qbss_load) + scan_entry->qbss_chan_load = qbss_load->qbss_chan_load; + + scan_node = qdf_mem_malloc_atomic(sizeof(*scan_node)); + if (!scan_node) { + qdf_mem_free(scan_entry->raw_frame.ptr); + qdf_mem_free(scan_entry); + qdf_mem_free(scan_list); + return NULL; + } + + scan_node->entry = scan_entry; + qdf_list_insert_front(scan_list, &scan_node->node); + + /* TODO calculate channel struct */ + return scan_list; +} + +QDF_STATUS +util_scan_entry_update_mlme_info(struct wlan_objmgr_pdev *pdev, + struct scan_cache_entry *scan_entry) +{ + + if (!pdev || !scan_entry) { + scm_err("pdev 0x%pK, scan_entry: 0x%pK", pdev, scan_entry); + return QDF_STATUS_E_INVAL; + } + + return scm_update_scan_mlme_info(pdev, scan_entry); +} + +bool util_is_scan_completed(struct scan_event *event, bool *success) +{ + if ((event->type == SCAN_EVENT_TYPE_COMPLETED) || + (event->type == SCAN_EVENT_TYPE_DEQUEUED) || + (event->type == SCAN_EVENT_TYPE_START_FAILED)) { + if ((event->type == SCAN_EVENT_TYPE_COMPLETED) && + (event->reason == SCAN_REASON_COMPLETED)) + *success = true; + else + *success = false; + + return true; + } + + *success = false; + return false; +} + diff --git a/drivers/staging/qca-wifi-host-cmn/umac/tdls/core/src/wlan_tdls_cmds_process.c b/drivers/staging/qca-wifi-host-cmn/umac/tdls/core/src/wlan_tdls_cmds_process.c new file mode 100644 index 0000000000000000000000000000000000000000..ce76dc6705b66b22efb77cdbe749bab17ce982ed --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/tdls/core/src/wlan_tdls_cmds_process.c @@ -0,0 +1,2434 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_tdls_cmds_process.c + * + * TDLS north bound commands implementation + */ +#include +#include +#include "wlan_tdls_main.h" +#include "wlan_tdls_peer.h" +#include "wlan_tdls_ct.h" +#include "wlan_tdls_mgmt.h" +#include "wlan_tdls_cmds_process.h" +#include "wlan_tdls_tgt_api.h" +#include "wlan_policy_mgr_api.h" + +static uint16_t tdls_get_connected_peer(struct tdls_soc_priv_obj *soc_obj) +{ + return soc_obj->connected_peer_count; +} + +/** + * tdls_decrement_peer_count() - decrement connected TDLS peer counter + * @soc_obj: TDLS soc object + * + * Used in scheduler thread context, no lock needed. + * + * Return: None. + */ +void tdls_decrement_peer_count(struct tdls_soc_priv_obj *soc_obj) +{ + if (soc_obj->connected_peer_count) + soc_obj->connected_peer_count--; + + tdls_debug("Connected peer count %d", soc_obj->connected_peer_count); +} + +/** + * tdls_increment_peer_count() - increment connected TDLS peer counter + * @soc_obj: TDLS soc object + * + * Used in scheduler thread context, no lock needed. + * + * Return: None. + */ +static void tdls_increment_peer_count(struct tdls_soc_priv_obj *soc_obj) +{ + soc_obj->connected_peer_count++; + tdls_debug("Connected peer count %d", soc_obj->connected_peer_count); +} + +/** + * tdls_validate_current_mode() - check current TDL mode + * @soc_obj: TDLS soc object + * + * Return: QDF_STATUS_SUCCESS if TDLS enabled, other for disabled + */ +static QDF_STATUS tdls_validate_current_mode(struct tdls_soc_priv_obj *soc_obj) +{ + if (soc_obj->tdls_current_mode == TDLS_SUPPORT_DISABLED || + soc_obj->tdls_current_mode == TDLS_SUPPORT_SUSPENDED) { + tdls_err("TDLS mode disabled OR not enabled, current mode %d", + soc_obj->tdls_current_mode); + return QDF_STATUS_E_NOSUPPORT; + } + return QDF_STATUS_SUCCESS; +} + +#ifdef WLAN_DEBUG +static char *tdls_get_ser_cmd_str(enum wlan_serialization_cmd_type type) +{ + switch (type) { + case WLAN_SER_CMD_TDLS_ADD_PEER: + return "TDLS_ADD_PEER_CMD"; + case WLAN_SER_CMD_TDLS_DEL_PEER: + return "TDLS_DEL_PEER_CMD"; + case WLAN_SER_CMD_TDLS_SEND_MGMT: + return "TDLS_SEND_MGMT_CMD"; + default: + return "UNKNOWN"; + } +} +#endif + +void +tdls_release_serialization_command(struct wlan_objmgr_vdev *vdev, + enum wlan_serialization_cmd_type type) +{ + struct wlan_serialization_queued_cmd_info cmd = {0}; + + cmd.cmd_type = type; + cmd.cmd_id = 0; + cmd.vdev = vdev; + + tdls_debug("release %s", tdls_get_ser_cmd_str(type)); + /* Inform serialization for command completion */ + wlan_serialization_remove_cmd(&cmd); +} + +/** + * tdls_pe_add_peer() - send TDLS add peer request to PE + * @req: TDL add peer request + * + * Return: QDF_STATUS_SUCCESS for success; other values if failed + */ +static QDF_STATUS tdls_pe_add_peer(struct tdls_add_peer_request *req) +{ + struct tdls_add_sta_req *addstareq; + struct wlan_objmgr_vdev *vdev; + struct wlan_objmgr_peer *peer; + struct tdls_soc_priv_obj *soc_obj; + struct scheduler_msg msg = {0,}; + QDF_STATUS status; + + addstareq = qdf_mem_malloc(sizeof(*addstareq)); + if (!addstareq) { + tdls_err("allocate failed"); + return QDF_STATUS_E_NOMEM; + } + vdev = req->vdev; + soc_obj = wlan_vdev_get_tdls_soc_obj(vdev); + if (!soc_obj) { + tdls_err("NULL tdls soc object"); + status = QDF_STATUS_E_INVAL; + goto error; + } + + addstareq->tdls_oper = TDLS_OPER_ADD; + addstareq->transaction_id = 0; + + addstareq->session_id = wlan_vdev_get_id(vdev); + peer = wlan_vdev_get_bsspeer(vdev); + if (!peer) { + tdls_err("bss peer is NULL"); + status = QDF_STATUS_E_INVAL; + goto error; + } + status = wlan_objmgr_peer_try_get_ref(peer, WLAN_TDLS_NB_ID); + if (QDF_IS_STATUS_ERROR(status)) { + tdls_err("can't get bss peer"); + goto error; + } + wlan_peer_obj_lock(peer); + qdf_mem_copy(addstareq->bssid.bytes, + wlan_peer_get_macaddr(peer), QDF_MAC_ADDR_SIZE); + wlan_peer_obj_unlock(peer); + wlan_objmgr_peer_release_ref(peer, WLAN_TDLS_NB_ID); + qdf_mem_copy(addstareq->peermac.bytes, req->add_peer_req.peer_addr, + QDF_MAC_ADDR_SIZE); + + tdls_debug("for " QDF_MAC_ADDR_STR, + QDF_MAC_ADDR_ARRAY(addstareq->peermac.bytes)); + msg.type = soc_obj->tdls_add_sta_req; + msg.bodyptr = addstareq; + status = scheduler_post_message(QDF_MODULE_ID_TDLS, + QDF_MODULE_ID_PE, + QDF_MODULE_ID_PE, &msg); + if (QDF_IS_STATUS_ERROR(status)) { + tdls_err("fail to post pe msg to add peer"); + goto error; + } + return status; +error: + qdf_mem_free(addstareq); + return status; +} + +/** + * tdls_pe_del_peer() - send TDLS delete peer request to PE + * @req: TDLS delete peer request + * + * Return: QDF_STATUS_SUCCESS for success; other values if failed + */ +QDF_STATUS tdls_pe_del_peer(struct tdls_del_peer_request *req) +{ + struct tdls_del_sta_req *delstareq; + struct wlan_objmgr_vdev *vdev; + struct wlan_objmgr_peer *peer; + struct tdls_soc_priv_obj *soc_obj; + struct scheduler_msg msg = {0,}; + QDF_STATUS status; + + delstareq = qdf_mem_malloc(sizeof(*delstareq)); + if (!delstareq) { + tdls_err("allocate failed"); + return QDF_STATUS_E_NOMEM; + } + vdev = req->vdev; + soc_obj = wlan_vdev_get_tdls_soc_obj(vdev); + if (!soc_obj) { + tdls_err("NULL tdls soc object"); + status = QDF_STATUS_E_INVAL; + goto error; + } + + delstareq->transaction_id = 0; + + delstareq->session_id = wlan_vdev_get_id(vdev); + peer = wlan_vdev_get_bsspeer(vdev); + if (!peer) { + tdls_err("bss peer is NULL"); + status = QDF_STATUS_E_INVAL; + goto error; + } + status = wlan_objmgr_peer_try_get_ref(peer, WLAN_TDLS_NB_ID); + if (QDF_IS_STATUS_ERROR(status)) { + tdls_err("can't get bss peer"); + goto error; + } + wlan_peer_obj_lock(peer); + qdf_mem_copy(delstareq->bssid.bytes, + wlan_peer_get_macaddr(peer), QDF_MAC_ADDR_SIZE); + wlan_peer_obj_unlock(peer); + wlan_objmgr_peer_release_ref(peer, WLAN_TDLS_NB_ID); + qdf_mem_copy(delstareq->peermac.bytes, req->del_peer_req.peer_addr, + QDF_MAC_ADDR_SIZE); + + tdls_debug("for " QDF_MAC_ADDR_STR, + QDF_MAC_ADDR_ARRAY(delstareq->peermac.bytes)); + msg.type = soc_obj->tdls_del_sta_req; + msg.bodyptr = delstareq; + status = scheduler_post_message(QDF_MODULE_ID_TDLS, + QDF_MODULE_ID_PE, + QDF_MODULE_ID_PE, &msg); + if (QDF_IS_STATUS_ERROR(status)) { + tdls_err("fail to post pe msg to del peer"); + goto error; + } + return status; +error: + qdf_mem_free(delstareq); + return status; +} + +/** + * tdls_pe_update_peer() - send TDLS update peer request to PE + * @req: TDLS update peer request + * + * Return: QDF_STATUS_SUCCESS for success; other values if failed + */ +static QDF_STATUS tdls_pe_update_peer(struct tdls_update_peer_request *req) +{ + struct tdls_add_sta_req *addstareq; + struct wlan_objmgr_vdev *vdev; + struct wlan_objmgr_peer *peer; + struct tdls_soc_priv_obj *soc_obj; + struct scheduler_msg msg = {0,}; + struct tdls_update_peer_params *update_peer; + QDF_STATUS status; + + addstareq = qdf_mem_malloc(sizeof(*addstareq)); + if (!addstareq) { + tdls_err("allocate failed"); + return QDF_STATUS_E_NOMEM; + } + vdev = req->vdev; + soc_obj = wlan_vdev_get_tdls_soc_obj(vdev); + if (!soc_obj) { + tdls_err("NULL tdls soc object"); + status = QDF_STATUS_E_INVAL; + goto error; + } + update_peer = &req->update_peer_req; + + addstareq->tdls_oper = TDLS_OPER_UPDATE; + addstareq->transaction_id = 0; + + addstareq->session_id = wlan_vdev_get_id(vdev); + peer = wlan_vdev_get_bsspeer(vdev); + if (!peer) { + tdls_err("bss peer is NULL"); + status = QDF_STATUS_E_INVAL; + goto error; + } + status = wlan_objmgr_peer_try_get_ref(peer, WLAN_TDLS_NB_ID); + if (QDF_IS_STATUS_ERROR(status)) { + tdls_err("can't get bss peer"); + goto error; + } + wlan_peer_obj_lock(peer); + qdf_mem_copy(addstareq->bssid.bytes, + wlan_peer_get_macaddr(peer), QDF_MAC_ADDR_SIZE); + wlan_peer_obj_unlock(peer); + wlan_objmgr_peer_release_ref(peer, WLAN_TDLS_NB_ID); + qdf_mem_copy(addstareq->peermac.bytes, update_peer->peer_addr, + QDF_MAC_ADDR_SIZE); + addstareq->capability = update_peer->capability; + addstareq->uapsd_queues = update_peer->uapsd_queues; + addstareq->max_sp = update_peer->max_sp; + + qdf_mem_copy(addstareq->extn_capability, + update_peer->extn_capability, WLAN_MAC_MAX_EXTN_CAP); + addstareq->htcap_present = update_peer->htcap_present; + qdf_mem_copy(&addstareq->ht_cap, + &update_peer->ht_cap, + sizeof(update_peer->ht_cap)); + addstareq->vhtcap_present = update_peer->vhtcap_present; + qdf_mem_copy(&addstareq->vht_cap, + &update_peer->vht_cap, + sizeof(update_peer->vht_cap)); + addstareq->supported_rates_length = update_peer->supported_rates_len; + qdf_mem_copy(&addstareq->supported_rates, + update_peer->supported_rates, + update_peer->supported_rates_len); + tdls_debug("for " QDF_MAC_ADDR_STR, + QDF_MAC_ADDR_ARRAY(addstareq->peermac.bytes)); + + msg.type = soc_obj->tdls_add_sta_req; + msg.bodyptr = addstareq; + status = scheduler_post_message(QDF_MODULE_ID_TDLS, + QDF_MODULE_ID_PE, + QDF_MODULE_ID_PE, &msg); + if (QDF_IS_STATUS_ERROR(status)) { + tdls_err("fail to post pe msg to update peer"); + goto error; + } + return status; +error: + qdf_mem_free(addstareq); + return status; +} + +static QDF_STATUS +tdls_internal_add_peer_rsp(struct tdls_add_peer_request *req, + QDF_STATUS status) +{ + struct tdls_soc_priv_obj *soc_obj; + struct wlan_objmgr_vdev *vdev; + struct tdls_osif_indication ind; + QDF_STATUS ret; + + if (!req || !req->vdev) { + tdls_err("req: %pK", req); + return QDF_STATUS_E_INVAL; + } + vdev = req->vdev; + ret = wlan_objmgr_vdev_try_get_ref(vdev, WLAN_TDLS_SB_ID); + if (QDF_IS_STATUS_ERROR(ret)) { + tdls_err("can't get vdev object"); + return ret; + } + + soc_obj = wlan_vdev_get_tdls_soc_obj(vdev); + if (soc_obj && soc_obj->tdls_event_cb) { + ind.vdev = vdev; + ind.status = status; + soc_obj->tdls_event_cb(soc_obj->tdls_evt_cb_data, + TDLS_EVENT_ADD_PEER, &ind); + } + wlan_objmgr_vdev_release_ref(vdev, WLAN_TDLS_SB_ID); + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS +tdls_internal_update_peer_rsp(struct tdls_update_peer_request *req, + QDF_STATUS status) +{ + struct tdls_soc_priv_obj *soc_obj; + struct tdls_osif_indication ind; + struct wlan_objmgr_vdev *vdev; + QDF_STATUS ret; + + if (!req || !req->vdev) { + tdls_err("req: %pK", req); + return QDF_STATUS_E_INVAL; + } + vdev = req->vdev; + ret = wlan_objmgr_vdev_try_get_ref(vdev, WLAN_TDLS_SB_ID); + if (QDF_IS_STATUS_ERROR(ret)) { + tdls_err("can't get vdev object"); + return ret; + } + + soc_obj = wlan_vdev_get_tdls_soc_obj(vdev); + if (soc_obj && soc_obj->tdls_event_cb) { + ind.vdev = vdev; + ind.status = status; + soc_obj->tdls_event_cb(soc_obj->tdls_evt_cb_data, + TDLS_EVENT_ADD_PEER, &ind); + } + wlan_objmgr_vdev_release_ref(vdev, WLAN_TDLS_SB_ID); + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS tdls_internal_del_peer_rsp(struct tdls_oper_request *req) +{ + struct tdls_soc_priv_obj *soc_obj; + struct tdls_osif_indication ind; + struct wlan_objmgr_vdev *vdev; + QDF_STATUS status; + + if (!req || !req->vdev) { + tdls_err("req: %pK", req); + return QDF_STATUS_E_INVAL; + } + vdev = req->vdev; + status = wlan_objmgr_vdev_try_get_ref(vdev, WLAN_TDLS_NB_ID); + if (QDF_IS_STATUS_ERROR(status)) { + tdls_err("can't get vdev object"); + return status; + } + + soc_obj = wlan_vdev_get_tdls_soc_obj(req->vdev); + if (soc_obj && soc_obj->tdls_event_cb) { + ind.vdev = req->vdev; + soc_obj->tdls_event_cb(soc_obj->tdls_evt_cb_data, + TDLS_EVENT_DEL_PEER, &ind); + } + wlan_objmgr_vdev_release_ref(vdev, WLAN_TDLS_NB_ID); + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS tdls_activate_add_peer(struct tdls_add_peer_request *req) +{ + QDF_STATUS status; + struct tdls_soc_priv_obj *soc_obj; + struct tdls_vdev_priv_obj *vdev_obj; + struct tdls_peer *peer; + uint16_t curr_tdls_peers; + const uint8_t *mac; + struct tdls_osif_indication ind; + + if (!req->vdev) { + tdls_err("vdev null when add tdls peer"); + QDF_ASSERT(0); + return QDF_STATUS_E_NULL_VALUE; + } + + mac = req->add_peer_req.peer_addr; + soc_obj = wlan_vdev_get_tdls_soc_obj(req->vdev); + vdev_obj = wlan_vdev_get_tdls_vdev_obj(req->vdev); + + if (!soc_obj || !vdev_obj) { + tdls_err("soc_obj: %pK, vdev_obj: %pK", soc_obj, vdev_obj); + return QDF_STATUS_E_INVAL; + } + status = tdls_validate_current_mode(soc_obj); + if (QDF_IS_STATUS_ERROR(status)) + goto addrsp; + + peer = tdls_get_peer(vdev_obj, mac); + if (!peer) { + tdls_err("peer: " QDF_MAC_ADDR_STR " not exist. invalid", + QDF_MAC_ADDR_ARRAY(mac)); + status = QDF_STATUS_E_INVAL; + goto addrsp; + } + + /* in add station, we accept existing valid sta_id if there is */ + if ((peer->link_status > TDLS_LINK_CONNECTING) || + (TDLS_STA_INDEX_CHECK((peer->sta_id)))) { + tdls_notice("link_status %d sta_id %d add peer ignored", + peer->link_status, peer->sta_id); + status = QDF_STATUS_SUCCESS; + goto addrsp; + } + + /* when others are on-going, we want to change link_status to idle */ + if (tdls_is_progress(vdev_obj, mac, true)) { + tdls_notice(QDF_MAC_ADDR_STR " TDLS setuping. Req declined.", + QDF_MAC_ADDR_ARRAY(mac)); + status = QDF_STATUS_E_PERM; + goto setlink; + } + + /* first to check if we reached to maximum supported TDLS peer. */ + curr_tdls_peers = tdls_get_connected_peer(soc_obj); + if (soc_obj->max_num_tdls_sta <= curr_tdls_peers) { + tdls_err(QDF_MAC_ADDR_STR + " Request declined. Current %d, Max allowed %d.", + QDF_MAC_ADDR_ARRAY(mac), curr_tdls_peers, + soc_obj->max_num_tdls_sta); + status = QDF_STATUS_E_PERM; + goto setlink; + } + + tdls_set_peer_link_status(peer, + TDLS_LINK_CONNECTING, TDLS_LINK_SUCCESS); + + status = tdls_pe_add_peer(req); + if (QDF_IS_STATUS_ERROR(status)) + goto setlink; + + return QDF_STATUS_SUCCESS; + +setlink: + tdls_set_link_status(vdev_obj, mac, TDLS_LINK_IDLE, + TDLS_LINK_UNSPECIFIED); +addrsp: + if (soc_obj->tdls_event_cb) { + ind.status = status; + ind.vdev = req->vdev; + soc_obj->tdls_event_cb(soc_obj->tdls_evt_cb_data, + TDLS_EVENT_ADD_PEER, &ind); + } + + return QDF_STATUS_E_PERM; +} + +static QDF_STATUS +tdls_add_peer_serialize_callback(struct wlan_serialization_command *cmd, + enum wlan_serialization_cb_reason reason) +{ + struct tdls_add_peer_request *req; + QDF_STATUS status = QDF_STATUS_SUCCESS; + + if (!cmd || !cmd->umac_cmd) { + tdls_err("cmd: %pK, reason: %d", cmd, reason); + return QDF_STATUS_E_NULL_VALUE; + } + + req = cmd->umac_cmd; + tdls_debug("reason: %d, req %pK", reason, req); + + switch (reason) { + case WLAN_SER_CB_ACTIVATE_CMD: + /* command moved to active list + */ + status = tdls_activate_add_peer(req); + break; + + case WLAN_SER_CB_CANCEL_CMD: + /* command removed from pending list. + * notify os interface the status + */ + status = tdls_internal_add_peer_rsp(req, QDF_STATUS_E_FAILURE); + break; + + case WLAN_SER_CB_ACTIVE_CMD_TIMEOUT: + /* active command time out. */ + status = tdls_internal_add_peer_rsp(req, QDF_STATUS_E_FAILURE); + break; + + case WLAN_SER_CB_RELEASE_MEM_CMD: + /* command successfully completed. + * release memory & vdev reference count + */ + wlan_objmgr_vdev_release_ref(req->vdev, WLAN_TDLS_NB_ID); + qdf_mem_free(req); + break; + + default: + /* Do nothing but logging */ + QDF_ASSERT(0); + status = QDF_STATUS_E_INVAL; + break; + } + + return status; +} + +void tdls_reset_nss(struct tdls_soc_priv_obj *tdls_soc, + uint8_t action_code) +{ + if (!tdls_soc) + return; + + if (TDLS_TEARDOWN != action_code || + !tdls_soc->tdls_nss_switch_in_progress) + return; + + if (tdls_soc->tdls_teardown_peers_cnt != 0) + tdls_soc->tdls_teardown_peers_cnt--; + if (tdls_soc->tdls_teardown_peers_cnt == 0) { + if (tdls_soc->tdls_nss_transition_mode == + TDLS_NSS_TRANSITION_S_1x1_to_2x2) { + /* TDLS NSS switch is fully completed, so + * reset the flags. + */ + tdls_notice("TDLS NSS switch is fully completed"); + tdls_soc->tdls_nss_switch_in_progress = false; + tdls_soc->tdls_nss_teardown_complete = false; + } else { + /* TDLS NSS switch is not yet completed, but + * tdls teardown is completed for all the + * peers. + */ + tdls_notice("teardown done & NSS switch in progress"); + tdls_soc->tdls_nss_teardown_complete = true; + } + tdls_soc->tdls_nss_transition_mode = + TDLS_NSS_TRANSITION_S_UNKNOWN; + } + +} + +/** + * tdls_set_cap() - set TDLS capability type + * @tdls_vdev: tdls vdev object + * @mac: peer mac address + * @cap: TDLS capability type + * + * Return: 0 if successful or negative errno otherwise + */ +int tdls_set_cap(struct tdls_vdev_priv_obj *tdls_vdev, const uint8_t *mac, + enum tdls_peer_capab cap) +{ + struct tdls_peer *curr_peer; + + curr_peer = tdls_get_peer(tdls_vdev, mac); + if (curr_peer == NULL) { + tdls_err("curr_peer is NULL"); + return -EINVAL; + } + + curr_peer->tdls_support = cap; + return 0; +} + +static int tdls_validate_setup_frames(struct tdls_soc_priv_obj *tdls_soc, + struct tdls_validate_action_req *tdls_validate) +{ + /* supplicant still sends tdls_mgmt(SETUP_REQ) + * even after we return error code at + * 'add_station()'. Hence we have this check + * again in addition to add_station(). Anyway, + * there is no harm to double-check. + */ + if (TDLS_SETUP_REQUEST == tdls_validate->action_code) { + tdls_err(QDF_MAC_ADDR_STR " TDLS Max peer already connected. action (%d) declined. Num of peers (%d), Max allowed (%d).", + QDF_MAC_ADDR_ARRAY(tdls_validate->peer_mac), + tdls_validate->action_code, + tdls_soc->connected_peer_count, + tdls_soc->max_num_tdls_sta); + return -EINVAL; + } + /* maximum reached. tweak to send + * error code to peer and return error + * code to supplicant + */ + tdls_validate->status_code = QDF_STATUS_E_RESOURCES; + tdls_err(QDF_MAC_ADDR_STR " TDLS Max peer already connected, send response status (%d). Num of peers (%d), Max allowed (%d).", + QDF_MAC_ADDR_ARRAY(tdls_validate->peer_mac), + tdls_validate->action_code, + tdls_soc->connected_peer_count, + tdls_soc->max_num_tdls_sta); + + return -EPERM; +} + +int tdls_validate_mgmt_request(struct tdls_action_frame_request *tdls_mgmt_req) +{ + struct tdls_vdev_priv_obj *tdls_vdev; + struct tdls_soc_priv_obj *tdls_soc; + struct tdls_peer *curr_peer; + struct tdls_peer *temp_peer; + QDF_STATUS status; + uint8_t vdev_id; + + struct wlan_objmgr_vdev *vdev = tdls_mgmt_req->vdev; + struct tdls_validate_action_req *tdls_validate = + &tdls_mgmt_req->chk_frame; + + if (QDF_STATUS_SUCCESS != tdls_get_vdev_objects(vdev, + &tdls_vdev, + &tdls_soc)) + return -ENOTSUPP; + + /* + * STA or P2P client should be connected and authenticated before + * sending any TDLS frames + */ + if (!wlan_vdev_is_up(vdev) || + !tdls_is_vdev_authenticated(vdev)) { + tdls_err("STA is not connected or not authenticated."); + return -EAGAIN; + } + + /* other than teardown frame, mgmt frames are not sent if disabled */ + if (TDLS_TEARDOWN != tdls_validate->action_code) { + if (!tdls_check_is_tdls_allowed(vdev)) { + tdls_err("TDLS not allowed, reject MGMT, action = %d", + tdls_validate->action_code); + return -EPERM; + } + /* if tdls_mode is disabled, then decline the peer's request */ + if (TDLS_SUPPORT_DISABLED == tdls_soc->tdls_current_mode || + TDLS_SUPPORT_SUSPENDED == tdls_soc->tdls_current_mode) { + tdls_notice(QDF_MAC_ADDR_STR + " TDLS mode is disabled. action %d declined.", + QDF_MAC_ADDR_ARRAY(tdls_validate->peer_mac), + tdls_validate->action_code); + return -ENOTSUPP; + } + if (tdls_soc->tdls_nss_switch_in_progress) { + tdls_err("nss switch in progress, action %d declined " + QDF_MAC_ADDR_STR, + tdls_validate->action_code, + QDF_MAC_ADDR_ARRAY(tdls_validate->peer_mac)); + return -EAGAIN; + } + } + + if (TDLS_IS_SETUP_ACTION(tdls_validate->action_code)) { + if (NULL != tdls_is_progress(tdls_vdev, + tdls_validate->peer_mac, true)) { + tdls_err("setup is ongoing. action %d declined for " + QDF_MAC_ADDR_STR, + tdls_validate->action_code, + QDF_MAC_ADDR_ARRAY(tdls_validate->peer_mac)); + return -EPERM; + } + } + + /* call hdd_wmm_is_acm_allowed() */ + vdev_id = wlan_vdev_get_id(vdev); + if (!tdls_soc->tdls_wmm_cb(vdev_id)) { + tdls_debug("admission ctrl set to VI, send the frame with least AC (BK) for action %d", + tdls_validate->action_code); + tdls_mgmt_req->use_default_ac = false; + } else { + tdls_mgmt_req->use_default_ac = true; + } + + if (TDLS_SETUP_REQUEST == tdls_validate->action_code || + TDLS_SETUP_RESPONSE == tdls_validate->action_code) { + if (tdls_soc->max_num_tdls_sta <= + tdls_soc->connected_peer_count) { + status = tdls_validate_setup_frames(tdls_soc, + tdls_validate); + if (QDF_STATUS_SUCCESS != status) + return status; + /* fall through to send setup resp + * with failure status code + */ + } else { + curr_peer = + tdls_find_peer(tdls_vdev, + tdls_validate->peer_mac); + if (curr_peer) { + if (TDLS_IS_LINK_CONNECTED(curr_peer)) { + tdls_err(QDF_MAC_ADDR_STR " already connected action %d declined.", + QDF_MAC_ADDR_ARRAY( + tdls_validate->peer_mac), + tdls_validate->action_code); + + return -EPERM; + } + } + } + } + + tdls_notice("tdls_mgmt" QDF_MAC_ADDR_STR " action %d, dialog_token %d status %d, len = %zu", + QDF_MAC_ADDR_ARRAY(tdls_validate->peer_mac), + tdls_validate->action_code, tdls_validate->dialog_token, + tdls_validate->status_code, tdls_validate->len); + + /*Except teardown responder will not be used so just make 0 */ + tdls_validate->responder = 0; + if (TDLS_TEARDOWN == tdls_validate->action_code) { + temp_peer = tdls_find_peer(tdls_vdev, tdls_validate->peer_mac); + if (!temp_peer) { + tdls_err(QDF_MAC_ADDR_STR " peer doesn't exist", + QDF_MAC_ADDR_ARRAY( + tdls_validate->peer_mac)); + return -EPERM; + } + + if (TDLS_IS_LINK_CONNECTED(temp_peer)) + tdls_validate->responder = temp_peer->is_responder; + else { + tdls_err(QDF_MAC_ADDR_STR " peer doesn't exist or not connected %d dialog_token %d status %d, tdls_validate->len = %zu", + QDF_MAC_ADDR_ARRAY(tdls_validate->peer_mac), + temp_peer->link_status, + tdls_validate->dialog_token, + tdls_validate->status_code, + tdls_validate->len); + return -EPERM; + } + } + + /* For explicit trigger of DIS_REQ come out of BMPS for + * successfully receiving DIS_RSP from peer. + */ + if ((TDLS_SETUP_RESPONSE == tdls_validate->action_code) || + (TDLS_SETUP_CONFIRM == tdls_validate->action_code) || + (TDLS_DISCOVERY_RESPONSE == tdls_validate->action_code) || + (TDLS_DISCOVERY_REQUEST == tdls_validate->action_code)) { + /* Fw will take care if PS offload is enabled. */ + if (TDLS_DISCOVERY_REQUEST != tdls_validate->action_code) + tdls_set_cap(tdls_vdev, tdls_validate->peer_mac, + TDLS_CAP_SUPPORTED); + } + return 0; +} + +QDF_STATUS tdls_process_add_peer(struct tdls_add_peer_request *req) +{ + struct wlan_serialization_command cmd = {0,}; + enum wlan_serialization_status ser_cmd_status; + struct wlan_objmgr_vdev *vdev; + QDF_STATUS status = QDF_STATUS_SUCCESS; + + if (!req || !req->vdev) { + tdls_err("req: %pK", req); + status = QDF_STATUS_E_INVAL; + goto error; + } + vdev = req->vdev; + cmd.cmd_type = WLAN_SER_CMD_TDLS_ADD_PEER; + cmd.cmd_id = 0; + cmd.cmd_cb = tdls_add_peer_serialize_callback; + cmd.umac_cmd = req; + cmd.source = WLAN_UMAC_COMP_TDLS; + cmd.is_high_priority = false; + cmd.cmd_timeout_duration = TDLS_DEFAULT_SERIALIZE_CMD_TIMEOUT; + cmd.vdev = vdev; + + ser_cmd_status = wlan_serialization_request(&cmd); + tdls_debug("req: 0x%pK wlan_serialization_request status:%d", req, + ser_cmd_status); + + switch (ser_cmd_status) { + case WLAN_SER_CMD_PENDING: + /* command moved to pending list. Do nothing */ + break; + case WLAN_SER_CMD_ACTIVE: + /* command moved to active list. Do nothing */ + break; + case WLAN_SER_CMD_DENIED_LIST_FULL: + case WLAN_SER_CMD_DENIED_RULES_FAILED: + case WLAN_SER_CMD_DENIED_UNSPECIFIED: + /* notify os interface about internal error*/ + status = tdls_internal_add_peer_rsp(req, QDF_STATUS_E_FAILURE); + wlan_objmgr_vdev_release_ref(vdev, WLAN_TDLS_NB_ID); + /* cmd can't be serviced. + * release tdls_add_peer_request memory + */ + qdf_mem_free(req); + break; + default: + QDF_ASSERT(0); + status = QDF_STATUS_E_INVAL; + goto error; + } + + return status; +error: + status = tdls_internal_add_peer_rsp(req, QDF_STATUS_E_FAILURE); + qdf_mem_free(req); + return status; +} + +static QDF_STATUS +tdls_activate_update_peer(struct tdls_update_peer_request *req) +{ + QDF_STATUS status; + struct tdls_soc_priv_obj *soc_obj; + struct tdls_vdev_priv_obj *vdev_obj; + struct wlan_objmgr_vdev *vdev; + struct tdls_peer *curr_peer; + uint16_t curr_tdls_peers; + const uint8_t *mac; + struct tdls_update_peer_params *update_peer; + struct tdls_osif_indication ind; + + if (!req->vdev) { + tdls_err("vdev object NULL when add TDLS peer"); + QDF_ASSERT(0); + return QDF_STATUS_E_NULL_VALUE; + } + + mac = req->update_peer_req.peer_addr; + vdev = req->vdev; + soc_obj = wlan_vdev_get_tdls_soc_obj(vdev); + vdev_obj = wlan_vdev_get_tdls_vdev_obj(vdev); + if (!soc_obj || !vdev_obj) { + tdls_err("soc_obj: %pK, vdev_obj: %pK", soc_obj, vdev_obj); + return QDF_STATUS_E_INVAL; + } + + status = tdls_validate_current_mode(soc_obj); + if (QDF_IS_STATUS_ERROR(status)) + goto updatersp; + + curr_peer = tdls_get_peer(vdev_obj, mac); + if (!curr_peer) { + tdls_err(QDF_MAC_ADDR_STR " not exist. return invalid", + QDF_MAC_ADDR_ARRAY(mac)); + status = QDF_STATUS_E_INVAL; + goto updatersp; + } + + /* in change station, we accept only when sta_id is valid */ + if (curr_peer->link_status == TDLS_LINK_TEARING || + !(TDLS_STA_INDEX_CHECK(curr_peer->sta_id))) { + tdls_err(QDF_MAC_ADDR_STR " link %d. sta %d. update peer rejected", + QDF_MAC_ADDR_ARRAY(mac), curr_peer->link_status, + curr_peer->sta_id); + status = QDF_STATUS_E_PERM; + goto updatersp; + } + + if (curr_peer->link_status == TDLS_LINK_CONNECTED && + TDLS_STA_INDEX_CHECK(curr_peer->sta_id)) { + tdls_err(QDF_MAC_ADDR_STR " link %d. sta %d. update peer is igonored as tdls state is already connected ", + QDF_MAC_ADDR_ARRAY(mac), curr_peer->link_status, + curr_peer->sta_id); + status = QDF_STATUS_SUCCESS; + goto updatersp; + } + + /* when others are on-going, we want to change link_status to idle */ + if (tdls_is_progress(vdev_obj, mac, true)) { + tdls_notice(QDF_MAC_ADDR_STR " TDLS setuping. Req declined.", + QDF_MAC_ADDR_ARRAY(mac)); + status = QDF_STATUS_E_PERM; + goto setlink; + } + + curr_tdls_peers = tdls_get_connected_peer(soc_obj); + if (soc_obj->max_num_tdls_sta <= curr_tdls_peers) { + tdls_err(QDF_MAC_ADDR_STR + " Request declined. Current: %d, Max allowed: %d.", + QDF_MAC_ADDR_ARRAY(mac), curr_tdls_peers, + soc_obj->max_num_tdls_sta); + status = QDF_STATUS_E_PERM; + goto setlink; + } + update_peer = &req->update_peer_req; + + if (update_peer->htcap_present) + curr_peer->spatial_streams = update_peer->ht_cap.mcsset[1]; + + tdls_set_peer_caps(vdev_obj, mac, &req->update_peer_req); + status = tdls_pe_update_peer(req); + if (QDF_IS_STATUS_ERROR(status)) + goto setlink; + + return QDF_STATUS_SUCCESS; + +setlink: + tdls_set_link_status(vdev_obj, mac, TDLS_LINK_IDLE, + TDLS_LINK_UNSPECIFIED); +updatersp: + if (soc_obj->tdls_event_cb) { + ind.status = status; + ind.vdev = vdev; + soc_obj->tdls_event_cb(soc_obj->tdls_evt_cb_data, + TDLS_EVENT_ADD_PEER, &ind); + } + + return QDF_STATUS_E_PERM; +} + +static QDF_STATUS +tdls_update_peer_serialize_callback(struct wlan_serialization_command *cmd, + enum wlan_serialization_cb_reason reason) +{ + struct tdls_update_peer_request *req; + QDF_STATUS status = QDF_STATUS_SUCCESS; + + if (!cmd || !cmd->umac_cmd) { + tdls_err("cmd: %pK, reason: %d", cmd, reason); + return QDF_STATUS_E_NULL_VALUE; + } + + req = cmd->umac_cmd; + tdls_debug("reason: %d, req %pK", reason, req); + + switch (reason) { + case WLAN_SER_CB_ACTIVATE_CMD: + /* command moved to active list + */ + status = tdls_activate_update_peer(req); + break; + + case WLAN_SER_CB_CANCEL_CMD: + /* command removed from pending list. + * notify os interface the status + */ + status = tdls_internal_update_peer_rsp(req, + QDF_STATUS_E_FAILURE); + break; + + case WLAN_SER_CB_ACTIVE_CMD_TIMEOUT: + /* active command time out. */ + status = tdls_internal_update_peer_rsp(req, + QDF_STATUS_E_FAILURE); + break; + + case WLAN_SER_CB_RELEASE_MEM_CMD: + /* command successfully completed. + * release memory & release reference count + */ + wlan_objmgr_vdev_release_ref(req->vdev, WLAN_TDLS_NB_ID); + qdf_mem_free(req); + break; + + default: + /* Do nothing but logging */ + QDF_ASSERT(0); + status = QDF_STATUS_E_INVAL; + break; + } + + return status; +} + +QDF_STATUS tdls_process_update_peer(struct tdls_update_peer_request *req) +{ + struct wlan_serialization_command cmd = {0,}; + enum wlan_serialization_status ser_cmd_status; + struct wlan_objmgr_vdev *vdev; + QDF_STATUS status = QDF_STATUS_SUCCESS; + + if (!req || !req->vdev) { + tdls_err("req: %pK", req); + status = QDF_STATUS_E_FAILURE; + goto error; + } + + vdev = req->vdev; + cmd.cmd_type = WLAN_SER_CMD_TDLS_ADD_PEER; + cmd.cmd_id = 0; + cmd.cmd_cb = tdls_update_peer_serialize_callback; + cmd.umac_cmd = req; + cmd.source = WLAN_UMAC_COMP_TDLS; + cmd.is_high_priority = false; + cmd.cmd_timeout_duration = TDLS_DEFAULT_SERIALIZE_CMD_TIMEOUT; + cmd.vdev = req->vdev; + + ser_cmd_status = wlan_serialization_request(&cmd); + tdls_debug("req: 0x%pK wlan_serialization_request status:%d", req, + ser_cmd_status); + + switch (ser_cmd_status) { + case WLAN_SER_CMD_PENDING: + /* command moved to pending list. Do nothing */ + break; + case WLAN_SER_CMD_ACTIVE: + /* command moved to active list. Do nothing */ + break; + case WLAN_SER_CMD_DENIED_LIST_FULL: + case WLAN_SER_CMD_DENIED_RULES_FAILED: + case WLAN_SER_CMD_DENIED_UNSPECIFIED: + /* notify os interface about internal error*/ + status = tdls_internal_update_peer_rsp(req, + QDF_STATUS_E_FAILURE); + wlan_objmgr_vdev_release_ref(vdev, WLAN_TDLS_NB_ID); + /* cmd can't be serviced. + * release tdls_add_peer_request memory + */ + qdf_mem_free(req); + break; + default: + QDF_ASSERT(0); + status = QDF_STATUS_E_INVAL; + break; + } + + return status; +error: + status = tdls_internal_update_peer_rsp(req, QDF_STATUS_E_FAILURE); + qdf_mem_free(req); + return status; +} + +static QDF_STATUS tdls_activate_del_peer(struct tdls_oper_request *req) +{ + struct tdls_del_peer_request request = {0,}; + + request.vdev = req->vdev; + request.del_peer_req.peer_addr = req->peer_addr; + + return tdls_pe_del_peer(&request); +} + +static QDF_STATUS +tdls_del_peer_serialize_callback(struct wlan_serialization_command *cmd, + enum wlan_serialization_cb_reason reason) +{ + struct tdls_oper_request *req; + QDF_STATUS status = QDF_STATUS_SUCCESS; + + if (!cmd || !cmd->umac_cmd) { + tdls_err("cmd: %pK, reason: %d", cmd, reason); + return QDF_STATUS_E_NULL_VALUE; + } + + req = cmd->umac_cmd; + tdls_debug("reason: %d, req %pK", reason, req); + + switch (reason) { + case WLAN_SER_CB_ACTIVATE_CMD: + /* command moved to active list + */ + status = tdls_activate_del_peer(req); + break; + + case WLAN_SER_CB_CANCEL_CMD: + /* command removed from pending list. + * notify os interface the status + */ + status = tdls_internal_del_peer_rsp(req); + break; + + case WLAN_SER_CB_ACTIVE_CMD_TIMEOUT: + /* active command time out. */ + status = tdls_internal_del_peer_rsp(req); + break; + + case WLAN_SER_CB_RELEASE_MEM_CMD: + /* command successfully completed. + * release memory & vdev reference count + */ + wlan_objmgr_vdev_release_ref(req->vdev, WLAN_TDLS_NB_ID); + qdf_mem_free(req); + break; + + default: + /* Do nothing but logging */ + QDF_ASSERT(0); + status = QDF_STATUS_E_INVAL; + break; + } + + return status; +} + +QDF_STATUS tdls_process_del_peer(struct tdls_oper_request *req) +{ + struct wlan_serialization_command cmd = {0,}; + enum wlan_serialization_status ser_cmd_status; + struct wlan_objmgr_vdev *vdev; + struct tdls_vdev_priv_obj *vdev_obj; + struct tdls_soc_priv_obj *soc_obj; + uint8_t *mac; + struct tdls_peer *peer; + QDF_STATUS status = QDF_STATUS_SUCCESS; + + if (!req || !req->vdev) { + tdls_err("req: %pK", req); + status = QDF_STATUS_E_INVAL; + goto free_req; + } + + vdev = req->vdev; + + /* vdev reference cnt is acquired in ucfg_tdls_oper */ + vdev_obj = wlan_vdev_get_tdls_vdev_obj(vdev); + soc_obj = wlan_vdev_get_tdls_soc_obj(vdev); + + if (!vdev_obj || !soc_obj) { + tdls_err("tdls vdev_obj: %pK soc_obj: %pK", vdev_obj, soc_obj); + status = QDF_STATUS_E_NULL_VALUE; + goto error; + } + + mac = req->peer_addr; + peer = tdls_find_peer(vdev_obj, mac); + if (!peer) { + tdls_err(QDF_MAC_ADDR_STR + " not found, ignore NL80211_TDLS_ENABLE_LINK", + QDF_MAC_ADDR_ARRAY(mac)); + status = QDF_STATUS_E_INVAL; + goto error; + } + + if (soc_obj->tdls_dp_vdev_update) + soc_obj->tdls_dp_vdev_update(&soc_obj->soc, + peer->sta_id, + soc_obj->tdls_update_dp_vdev_flags, + false); + + cmd.cmd_type = WLAN_SER_CMD_TDLS_DEL_PEER; + cmd.cmd_id = 0; + cmd.cmd_cb = tdls_del_peer_serialize_callback; + cmd.umac_cmd = req; + cmd.source = WLAN_UMAC_COMP_TDLS; + cmd.is_high_priority = false; + cmd.cmd_timeout_duration = TDLS_DEFAULT_SERIALIZE_CMD_TIMEOUT; + cmd.vdev = vdev; + + ser_cmd_status = wlan_serialization_request(&cmd); + tdls_debug("req: 0x%pK wlan_serialization_request status:%d", req, + ser_cmd_status); + + switch (ser_cmd_status) { + case WLAN_SER_CMD_PENDING: + /* command moved to pending list. Do nothing */ + break; + case WLAN_SER_CMD_ACTIVE: + /* command moved to active list. Do nothing */ + break; + case WLAN_SER_CMD_DENIED_LIST_FULL: + case WLAN_SER_CMD_DENIED_RULES_FAILED: + case WLAN_SER_CMD_DENIED_UNSPECIFIED: + /* notify os interface about internal error*/ + status = tdls_internal_del_peer_rsp(req); + wlan_objmgr_vdev_release_ref(vdev, WLAN_TDLS_NB_ID); + /* cmd can't be serviced. + * release tdls_add_peer_request memory + */ + qdf_mem_free(req); + break; + default: + QDF_ASSERT(0); + status = QDF_STATUS_E_INVAL; + break; + } + + return status; +error: + status = tdls_internal_del_peer_rsp(req); + wlan_objmgr_vdev_release_ref(vdev, WLAN_TDLS_NB_ID); +free_req: + qdf_mem_free(req); + return status; +} + +/** + * tdls_process_add_peer_rsp() - handle response for update TDLS peer + * @rsp: TDLS add peer response + * + * Return: QDF_STATUS_SUCCESS for success; other values if failed + */ +static QDF_STATUS tdls_update_peer_rsp(struct tdls_add_sta_rsp *rsp) +{ + struct wlan_objmgr_vdev *vdev; + struct wlan_objmgr_psoc *psoc; + struct tdls_soc_priv_obj *soc_obj; + QDF_STATUS status = QDF_STATUS_SUCCESS; + struct tdls_osif_indication ind; + + psoc = rsp->psoc; + if (!psoc) { + tdls_err("psoc is NULL"); + QDF_ASSERT(0); + return QDF_STATUS_E_FAILURE; + } + vdev = wlan_objmgr_get_vdev_by_id_from_psoc(psoc, rsp->session_id, + WLAN_TDLS_SB_ID); + if (!vdev) { + tdls_err("invalid vdev: %d", rsp->session_id); + status = QDF_STATUS_E_INVAL; + goto error; + } + + tdls_release_serialization_command(vdev, WLAN_SER_CMD_TDLS_ADD_PEER); + wlan_objmgr_vdev_release_ref(vdev, WLAN_TDLS_SB_ID); +error: + soc_obj = wlan_psoc_get_tdls_soc_obj(psoc); + if (soc_obj && soc_obj->tdls_event_cb) { + ind.status = rsp->status_code; + ind.vdev = vdev; + soc_obj->tdls_event_cb(soc_obj->tdls_evt_cb_data, + TDLS_EVENT_ADD_PEER, &ind); + } + qdf_mem_free(rsp); + + return status; +} + +/** + * tdls_process_send_mgmt_rsp() - handle response for send mgmt + * @rsp: TDLS send mgmt response + * + * Return: QDF_STATUS_SUCCESS for success; other values if failed + */ +QDF_STATUS tdls_process_send_mgmt_rsp(struct tdls_send_mgmt_rsp *rsp) +{ + struct wlan_objmgr_vdev *vdev; + struct wlan_objmgr_psoc *psoc; + struct tdls_vdev_priv_obj *tdls_vdev; + struct tdls_soc_priv_obj *tdls_soc = NULL; + QDF_STATUS status = QDF_STATUS_SUCCESS; + struct tdls_osif_indication ind; + + psoc = rsp->psoc; + vdev = wlan_objmgr_get_vdev_by_id_from_psoc(psoc, rsp->session_id, + WLAN_TDLS_SB_ID); + if (!vdev) { + tdls_err("invalid vdev"); + status = QDF_STATUS_E_INVAL; + qdf_mem_free(rsp); + return status; + } + tdls_soc = wlan_psoc_get_tdls_soc_obj(psoc); + tdls_vdev = wlan_vdev_get_tdls_vdev_obj(vdev); + if (!tdls_soc || !tdls_vdev) { + tdls_err("soc object:%pK, vdev object:%pK", tdls_soc, tdls_vdev); + status = QDF_STATUS_E_FAILURE; + } + + tdls_release_serialization_command(vdev, WLAN_SER_CMD_TDLS_SEND_MGMT); + + if (legacy_result_success == rsp->status_code) + goto free_rsp; + tdls_err("send mgmt failed. status code(=%d)", rsp->status_code); + status = QDF_STATUS_E_FAILURE; + + if (tdls_soc && tdls_soc->tdls_event_cb) { + ind.vdev = vdev; + ind.status = status; + tdls_soc->tdls_event_cb(tdls_soc->tdls_evt_cb_data, + TDLS_EVENT_MGMT_TX_ACK_CNF, &ind); + } + +free_rsp: + qdf_mem_free(rsp); + wlan_objmgr_vdev_release_ref(vdev, WLAN_TDLS_SB_ID); + return status; +} + +/** + * tdls_send_mgmt_tx_completion() - process tx completion + * @tx_complete: TDLS mgmt completion info + * + * Return: QDF_STATUS_SUCCESS for success; other values if failed + */ +QDF_STATUS tdls_send_mgmt_tx_completion( + struct tdls_mgmt_tx_completion_ind *tx_complete) +{ + struct wlan_objmgr_vdev *vdev; + struct wlan_objmgr_psoc *psoc; + struct tdls_vdev_priv_obj *tdls_vdev; + struct tdls_soc_priv_obj *tdls_soc = NULL; + QDF_STATUS status = QDF_STATUS_SUCCESS; + struct tdls_osif_indication ind; + + psoc = tx_complete->psoc; + vdev = wlan_objmgr_get_vdev_by_id_from_psoc(psoc, + tx_complete->session_id, + WLAN_TDLS_SB_ID); + + if (!vdev) { + tdls_err("invalid vdev"); + status = QDF_STATUS_E_INVAL; + goto free_tx_complete; + } + + tdls_soc = wlan_psoc_get_tdls_soc_obj(psoc); + tdls_vdev = wlan_vdev_get_tdls_vdev_obj(vdev); + + if (!tdls_soc || !tdls_vdev) { + tdls_err("soc object:%pK, vdev object:%pK", tdls_soc, tdls_vdev); + status = QDF_STATUS_E_FAILURE; + } + + if (tdls_soc && tdls_soc->tdls_event_cb) { + ind.vdev = vdev; + ind.status = tx_complete->tx_complete_status; + tdls_soc->tdls_event_cb(tdls_soc->tdls_evt_cb_data, + TDLS_EVENT_MGMT_TX_ACK_CNF, &ind); + } + + wlan_objmgr_vdev_release_ref(vdev, WLAN_TDLS_SB_ID); +free_tx_complete: + qdf_mem_free(tx_complete); + return status; +} + +/** + * tdls_add_peer_rsp() - handle response for add TDLS peer + * @rsp: TDLS add peer response + * + * Return: QDF_STATUS_SUCCESS for success; other values if failed + */ +static QDF_STATUS tdls_add_peer_rsp(struct tdls_add_sta_rsp *rsp) +{ + uint8_t sta_idx; + struct wlan_objmgr_vdev *vdev; + struct wlan_objmgr_psoc *psoc; + struct tdls_vdev_priv_obj *vdev_obj; + struct tdls_soc_priv_obj *soc_obj = NULL; + struct tdls_conn_info *conn_rec; + QDF_STATUS status = QDF_STATUS_SUCCESS; + struct tdls_osif_indication ind; + + psoc = rsp->psoc; + vdev = wlan_objmgr_get_vdev_by_id_from_psoc(psoc, rsp->session_id, + WLAN_TDLS_SB_ID); + if (!vdev) { + tdls_err("invalid vdev: %d", rsp->session_id); + status = QDF_STATUS_E_INVAL; + goto error; + } + soc_obj = wlan_psoc_get_tdls_soc_obj(psoc); + vdev_obj = wlan_vdev_get_tdls_vdev_obj(vdev); + if (!soc_obj || !vdev_obj) { + tdls_err("soc object:%pK, vdev object:%pK", soc_obj, vdev_obj); + status = QDF_STATUS_E_FAILURE; + goto cmddone; + } + if (rsp->status_code) { + tdls_err("add sta failed. status code(=%d)", rsp->status_code); + status = QDF_STATUS_E_FAILURE; + } else { + conn_rec = soc_obj->tdls_conn_info; + for (sta_idx = 0; sta_idx < soc_obj->max_num_tdls_sta; + sta_idx++) { + if (INVALID_TDLS_PEER_ID == conn_rec[sta_idx].sta_id) { + conn_rec[sta_idx].session_id = rsp->session_id; + conn_rec[sta_idx].sta_id = rsp->sta_id; + conn_rec[sta_idx].index = sta_idx; + qdf_copy_macaddr(&conn_rec[sta_idx].peer_mac, + &rsp->peermac); + tdls_warn("TDLS: STA IDX at %d is %d of mac " + QDF_MAC_ADDR_STR, sta_idx, + rsp->sta_id, QDF_MAC_ADDR_ARRAY + (rsp->peermac.bytes)); + break; + } + } + + if (sta_idx < soc_obj->max_num_tdls_sta) { + status = tdls_set_sta_id(vdev_obj, rsp->peermac.bytes, + rsp->sta_id); + if (QDF_IS_STATUS_ERROR(status)) { + tdls_err("set staid failed"); + status = QDF_STATUS_E_FAILURE; + } + } else { + status = QDF_STATUS_E_FAILURE; + } + } + +cmddone: + tdls_release_serialization_command(vdev, WLAN_SER_CMD_TDLS_ADD_PEER); + wlan_objmgr_vdev_release_ref(vdev, WLAN_TDLS_SB_ID); +error: + if (soc_obj && soc_obj->tdls_event_cb) { + ind.vdev = vdev; + ind.status = rsp->status_code; + soc_obj->tdls_event_cb(soc_obj->tdls_evt_cb_data, + TDLS_EVENT_ADD_PEER, &ind); + } + qdf_mem_free(rsp); + + return status; +} + +QDF_STATUS tdls_process_add_peer_rsp(struct tdls_add_sta_rsp *rsp) +{ + tdls_debug("peer oper %d", rsp->tdls_oper); + + if (rsp->tdls_oper == TDLS_OPER_ADD) + return tdls_add_peer_rsp(rsp); + else if (rsp->tdls_oper == TDLS_OPER_UPDATE) + return tdls_update_peer_rsp(rsp); + + return QDF_STATUS_E_INVAL; +} + +QDF_STATUS tdls_process_del_peer_rsp(struct tdls_del_sta_rsp *rsp) +{ + uint8_t sta_idx, id; + QDF_STATUS status = QDF_STATUS_E_FAILURE; + struct wlan_objmgr_vdev *vdev; + struct wlan_objmgr_psoc *psoc; + struct tdls_vdev_priv_obj *vdev_obj; + struct tdls_soc_priv_obj *soc_obj = NULL; + struct tdls_conn_info *conn_rec; + struct tdls_peer *curr_peer = NULL; + const uint8_t *macaddr; + struct tdls_osif_indication ind; + + tdls_debug("del peer rsp: vdev %d peer " QDF_MAC_ADDR_STR, + rsp->session_id, QDF_MAC_ADDR_ARRAY(rsp->peermac.bytes)); + psoc = rsp->psoc; + vdev = wlan_objmgr_get_vdev_by_id_from_psoc(psoc, rsp->session_id, + WLAN_TDLS_SB_ID); + if (!vdev) { + tdls_err("invalid vdev: %d", rsp->session_id); + status = QDF_STATUS_E_INVAL; + goto error; + } + soc_obj = wlan_psoc_get_tdls_soc_obj(psoc); + vdev_obj = wlan_vdev_get_tdls_vdev_obj(vdev); + if (!soc_obj || !vdev_obj) { + tdls_err("soc object:%pK, vdev object:%pK", soc_obj, vdev_obj); + status = QDF_STATUS_E_FAILURE; + goto cmddone; + } + + conn_rec = soc_obj->tdls_conn_info; + for (sta_idx = 0; sta_idx < soc_obj->max_num_tdls_sta; sta_idx++) { + if (conn_rec[sta_idx].session_id != rsp->session_id || + conn_rec[sta_idx].sta_id != rsp->sta_id) + continue; + + macaddr = rsp->peermac.bytes; + tdls_warn("TDLS: del STA IDX = %x", rsp->sta_id); + curr_peer = tdls_find_peer(vdev_obj, macaddr); + if (curr_peer) { + tdls_debug(QDF_MAC_ADDR_STR " status is %d", + QDF_MAC_ADDR_ARRAY(macaddr), + curr_peer->link_status); + + id = wlan_vdev_get_id(vdev); + + if (TDLS_IS_LINK_CONNECTED(curr_peer)) { + soc_obj->tdls_dereg_peer( + soc_obj->tdls_peer_context, + id, curr_peer->sta_id); + tdls_decrement_peer_count(soc_obj); + } else if (TDLS_LINK_CONNECTING == + curr_peer->link_status) { + soc_obj->tdls_dereg_peer( + soc_obj->tdls_peer_context, + id, curr_peer->sta_id); + } + } + tdls_reset_peer(vdev_obj, macaddr); + conn_rec[sta_idx].sta_id = INVALID_TDLS_PEER_ID; + conn_rec[sta_idx].session_id = 0xff; + conn_rec[sta_idx].index = INVALID_TDLS_PEER_INDEX; + qdf_mem_zero(&conn_rec[sta_idx].peer_mac, + QDF_MAC_ADDR_SIZE); + + status = QDF_STATUS_SUCCESS; + break; + } + macaddr = rsp->peermac.bytes; + if (!curr_peer) { + curr_peer = tdls_find_peer(vdev_obj, macaddr); + + if (curr_peer) + tdls_set_peer_link_status(curr_peer, TDLS_LINK_IDLE, + (curr_peer->link_status == + TDLS_LINK_TEARING) ? + TDLS_LINK_UNSPECIFIED : + TDLS_LINK_DROPPED_BY_REMOTE); + } + +cmddone: + tdls_release_serialization_command(vdev, WLAN_SER_CMD_TDLS_DEL_PEER); + wlan_objmgr_vdev_release_ref(vdev, WLAN_TDLS_SB_ID); +error: + + if (soc_obj && soc_obj->tdls_event_cb) { + ind.vdev = vdev; + soc_obj->tdls_event_cb(soc_obj->tdls_evt_cb_data, + TDLS_EVENT_DEL_PEER, &ind); + } + qdf_mem_free(rsp); + + return status; +} + +static QDF_STATUS +tdls_wma_update_peer_state(struct tdls_soc_priv_obj *soc_obj, + struct tdls_peer_update_state *peer_state) +{ + struct scheduler_msg msg = {0,}; + QDF_STATUS status; + + tdls_debug("update TDLS peer " QDF_MAC_ADDR_STR " vdev %d, state %d", + QDF_MAC_ADDR_ARRAY(peer_state->peer_macaddr), + peer_state->vdev_id, peer_state->peer_state); + msg.type = soc_obj->tdls_update_peer_state; + msg.reserved = 0; + msg.bodyptr = peer_state; + + status = scheduler_post_message(QDF_MODULE_ID_TDLS, + QDF_MODULE_ID_WMA, + QDF_MODULE_ID_WMA, &msg); + if (QDF_IS_STATUS_ERROR(status)) { + tdls_err("scheduler_post_msg failed"); + status = QDF_STATUS_E_FAILURE; + } + + return status; +} + +static QDF_STATUS +tdls_update_uapsd(struct wlan_objmgr_psoc *psoc, struct wlan_objmgr_vdev *vdev, + uint8_t sta_id, uint32_t srvc_int, uint32_t sus_int, + uint8_t dir, uint8_t psb, uint32_t delay_interval) +{ + uint8_t i; + static const uint8_t ac[AC_PRIORITY_NUM] = {UAPSD_AC_VO, UAPSD_AC_VI, + UAPSD_AC_BK, UAPSD_AC_BE}; + static const uint8_t tid[AC_PRIORITY_NUM] = {7, 5, 2, 3}; + uint32_t vdev_id; + + struct sta_uapsd_params tdls_uapsd_params; + struct sta_uapsd_trig_params tdls_trig_params; + struct wlan_objmgr_peer *bsspeer; + uint8_t macaddr[QDF_MAC_ADDR_SIZE]; + QDF_STATUS status; + + if (!psb) { + tdls_debug("No need to configure auto trigger:psb is 0"); + return QDF_STATUS_SUCCESS; + } + vdev_id = wlan_vdev_get_id(vdev); + bsspeer = wlan_vdev_get_bsspeer(vdev); + if (!bsspeer) { + tdls_err("bss peer is NULL"); + return QDF_STATUS_E_FAILURE; + } + wlan_vdev_obj_lock(vdev); + qdf_mem_copy(macaddr, + wlan_peer_get_macaddr(bsspeer), QDF_MAC_ADDR_SIZE); + wlan_vdev_obj_unlock(vdev); + + tdls_debug("TDLS uapsd id %d, srvc %d, sus %d, dir %d psb %d delay %d", + sta_id, srvc_int, sus_int, dir, psb, delay_interval); + for (i = 0; i < AC_PRIORITY_NUM; i++) { + tdls_uapsd_params.wmm_ac = ac[i]; + tdls_uapsd_params.user_priority = tid[i]; + tdls_uapsd_params.service_interval = srvc_int; + tdls_uapsd_params.delay_interval = delay_interval; + tdls_uapsd_params.suspend_interval = sus_int; + + tdls_trig_params.vdevid = vdev_id; + tdls_trig_params.num_ac = 1; + tdls_trig_params.auto_triggerparam = &tdls_uapsd_params; + + qdf_mem_copy(tdls_trig_params.peer_addr, + macaddr, QDF_MAC_ADDR_SIZE); + status = tgt_tdls_set_uapsd(psoc, &tdls_trig_params); + if (QDF_IS_STATUS_ERROR(status)) { + tdls_err("Failed to set uapsd for vdev %d, status %d", + vdev_id, status); + } + } + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS tdls_process_enable_link(struct tdls_oper_request *req) +{ + struct tdls_peer *peer; + struct tdls_vdev_priv_obj *vdev_obj; + struct tdls_soc_priv_obj *soc_obj; + struct wlan_objmgr_vdev *vdev; + uint8_t *mac; + struct tdls_peer_update_state *peer_update_param; + QDF_STATUS status; + uint32_t feature; + uint8_t id; + + vdev = req->vdev; + if (!vdev) { + tdls_err("NULL vdev object"); + qdf_mem_free(req); + return QDF_STATUS_E_NULL_VALUE; + } + + /* vdev reference cnt is acquired in ucfg_tdls_oper */ + vdev_obj = wlan_vdev_get_tdls_vdev_obj(vdev); + soc_obj = wlan_vdev_get_tdls_soc_obj(vdev); + + if (!vdev_obj || !soc_obj) { + tdls_err("tdls vdev_obj: %pK soc_obj: %pK", vdev_obj, soc_obj); + status = QDF_STATUS_E_NULL_VALUE; + goto error; + } + + mac = req->peer_addr; + peer = tdls_find_peer(vdev_obj, mac); + if (!peer) { + tdls_err(QDF_MAC_ADDR_STR + " not found, ignore NL80211_TDLS_ENABLE_LINK", + QDF_MAC_ADDR_ARRAY(mac)); + status = QDF_STATUS_E_INVAL; + goto error; + } + + tdls_debug("enable link for peer " QDF_MAC_ADDR_STR " link state %d", + QDF_MAC_ADDR_ARRAY(mac), peer->link_status); + if (!TDLS_STA_INDEX_CHECK(peer->sta_id)) { + tdls_err("invalid sta idx %u for " QDF_MAC_ADDR_STR, + peer->sta_id, QDF_MAC_ADDR_ARRAY(mac)); + status = QDF_STATUS_E_INVAL; + goto error; + } + + peer->tdls_support = TDLS_CAP_SUPPORTED; + if (TDLS_LINK_CONNECTED != peer->link_status) + tdls_set_peer_link_status(peer, TDLS_LINK_CONNECTED, + TDLS_LINK_SUCCESS); + + id = wlan_vdev_get_id(vdev); + status = soc_obj->tdls_reg_peer(soc_obj->tdls_peer_context, + id, mac, peer->sta_id, + peer->qos); + if (QDF_IS_STATUS_ERROR(status)) { + tdls_err("TDLS register peer fail, status %d", status); + goto error; + } + + peer_update_param = qdf_mem_malloc(sizeof(*peer_update_param)); + if (!peer_update_param) { + tdls_err("memory allocation failed"); + status = QDF_STATUS_E_NOMEM; + goto error; + } + + tdls_extract_peer_state_param(peer_update_param, peer); + + status = tdls_wma_update_peer_state(soc_obj, peer_update_param); + if (QDF_IS_STATUS_ERROR(status)) { + qdf_mem_free(peer_update_param); + status = QDF_STATUS_E_PERM; + goto error; + } + + tdls_increment_peer_count(soc_obj); + feature = soc_obj->tdls_configs.tdls_feature_flags; + + if (soc_obj->tdls_dp_vdev_update) + soc_obj->tdls_dp_vdev_update(&soc_obj->soc, + peer->sta_id, + soc_obj->tdls_update_dp_vdev_flags, + ((peer->link_status == + TDLS_LINK_CONNECTED) ? true : false)); + + tdls_debug("TDLS buffer sta: %d, uapsd_mask %d", + TDLS_IS_BUFFER_STA_ENABLED(feature), + soc_obj->tdls_configs.tdls_uapsd_mask); + + if (TDLS_IS_BUFFER_STA_ENABLED(feature) || + soc_obj->tdls_configs.tdls_uapsd_mask) + tdls_update_uapsd(soc_obj->soc, + vdev, peer->sta_id, 0, 0, BI_DIR, 1, + soc_obj->tdls_configs.delayed_trig_framint); +error: + wlan_objmgr_vdev_release_ref(vdev, WLAN_TDLS_NB_ID); + qdf_mem_free(req); + + return status; +} + +/** + * tdls_config_force_peer() - configure an externally controllable TDLS peer + * @req: TDLS operation request + * + * This is not the tdls_process_cmd function. No need to acquire the reference + * count, release reference count and free the request, the caller handle it + * correctly. + * + * Return: QDF_STATUS_SUCCESS if success; other values if failed + */ +static QDF_STATUS tdls_config_force_peer( + struct tdls_oper_config_force_peer_request *req) +{ + struct tdls_peer *peer; + struct tdls_soc_priv_obj *soc_obj; + struct tdls_vdev_priv_obj *vdev_obj; + struct wlan_objmgr_pdev *pdev; + struct wlan_objmgr_vdev *vdev; + const uint8_t *macaddr; + uint32_t feature; + QDF_STATUS status; + struct tdls_peer_update_state *peer_update_param; + + macaddr = req->peer_addr; + tdls_debug("NL80211_TDLS_SETUP for " QDF_MAC_ADDR_STR, + QDF_MAC_ADDR_ARRAY(macaddr)); + + vdev = req->vdev; + pdev = wlan_vdev_get_pdev(vdev); + vdev_obj = wlan_vdev_get_tdls_vdev_obj(vdev); + soc_obj = wlan_vdev_get_tdls_soc_obj(vdev); + if (!pdev || !vdev_obj || !soc_obj) { + tdls_err("pdev: %pK, vdev_obj: %pK, soc_obj: %pK", + pdev, vdev_obj, soc_obj); + return QDF_STATUS_E_INVAL; + } + + feature = soc_obj->tdls_configs.tdls_feature_flags; + if (!TDLS_IS_EXTERNAL_CONTROL_ENABLED(feature) || + !TDLS_IS_IMPLICIT_TRIG_ENABLED(feature)) { + tdls_err("TDLS ext ctrl or Imp Trig not enabled, %x", feature); + return QDF_STATUS_E_NOSUPPORT; + } + + peer_update_param = qdf_mem_malloc(sizeof(*peer_update_param)); + if (!peer_update_param) { + tdls_err("memory allocation failed"); + return QDF_STATUS_E_NOMEM; + } + + peer = tdls_get_peer(vdev_obj, macaddr); + if (!peer) { + tdls_err("peer " QDF_MAC_ADDR_STR " does not exist", + QDF_MAC_ADDR_ARRAY(macaddr)); + status = QDF_STATUS_E_NULL_VALUE; + goto error; + } + status = tdls_set_force_peer(vdev_obj, macaddr, true); + if (QDF_IS_STATUS_ERROR(status)) { + tdls_err("set force peer failed"); + goto error; + } + + /* Update the peer mac to firmware, so firmware could update the + * connection table + */ + peer_update_param->vdev_id = wlan_vdev_get_id(vdev); + qdf_mem_copy(peer_update_param->peer_macaddr, + macaddr, QDF_MAC_ADDR_SIZE); + peer_update_param->peer_state = TDLS_PEER_ADD_MAC_ADDR; + + status = tdls_wma_update_peer_state(soc_obj, peer_update_param); + if (QDF_IS_STATUS_ERROR(status)) { + tdls_err("update peer state failed"); + goto error; + } + + soc_obj->tdls_external_peer_count++; + + /* Validate if off channel is DFS channel */ + if (wlan_reg_is_dfs_ch(pdev, req->chan)) { + tdls_err("Resetting TDLS off-channel from %d to %d", + req->chan, WLAN_TDLS_PREFERRED_OFF_CHANNEL_NUM_DEF); + req->chan = WLAN_TDLS_PREFERRED_OFF_CHANNEL_NUM_DEF; + } + tdls_set_extctrl_param(peer, req->chan, req->max_latency, req->op_class, + req->min_bandwidth); + + tdls_set_callback(peer, req->callback); + + tdls_set_ct_mode(soc_obj->soc); + if (soc_obj->enable_tdls_connection_tracker) + tdls_implicit_enable(vdev_obj); + + return status; +error: + qdf_mem_free(peer_update_param); + return status; +} + +/** + * tdls_process_setup_peer() - process configure an externally + * controllable TDLS peer + * @req: TDLS operation request + * + * Return: QDF_STATUS_SUCCESS if success; other values if failed + */ +QDF_STATUS tdls_process_setup_peer(struct tdls_oper_request *req) +{ + struct tdls_oper_config_force_peer_request peer_req; + struct tdls_soc_priv_obj *soc_obj; + struct wlan_objmgr_vdev *vdev; + QDF_STATUS status; + + tdls_debug("Configure external TDLS peer " QDF_MAC_ADDR_STR, + QDF_MAC_ADDR_ARRAY(req->peer_addr)); + + /* reference cnt is acquired in ucfg_tdls_oper */ + vdev = req->vdev; + if (!vdev) { + tdls_err("NULL vdev object"); + status = QDF_STATUS_E_NULL_VALUE; + goto freereq; + } + + qdf_mem_zero(&peer_req, sizeof(peer_req)); + peer_req.vdev = vdev; + qdf_mem_copy(peer_req.peer_addr, req->peer_addr, QDF_MAC_ADDR_SIZE); + + soc_obj = wlan_vdev_get_tdls_soc_obj(vdev); + if (!soc_obj) { + tdls_err("NULL soc object"); + status = QDF_STATUS_E_INVAL; + goto error; + } + + peer_req.chan = soc_obj->tdls_configs.tdls_pre_off_chan_num; + + status = tdls_config_force_peer(&peer_req); +error: + wlan_objmgr_vdev_release_ref(vdev, WLAN_TDLS_NB_ID); +freereq: + qdf_mem_free(req); + + return status; +} + +QDF_STATUS tdls_process_remove_force_peer(struct tdls_oper_request *req) +{ + struct tdls_peer *peer; + struct tdls_soc_priv_obj *soc_obj; + struct tdls_vdev_priv_obj *vdev_obj; + struct wlan_objmgr_vdev *vdev; + const uint8_t *macaddr; + uint32_t feature; + QDF_STATUS status; + struct tdls_peer_update_state *peer_update_param; + struct tdls_osif_indication ind; + + macaddr = req->peer_addr; + tdls_debug("NL80211_TDLS_TEARDOWN for " QDF_MAC_ADDR_STR, + QDF_MAC_ADDR_ARRAY(macaddr)); + + vdev = req->vdev; + if (!vdev) { + tdls_err("NULL vdev object"); + qdf_mem_free(req); + return QDF_STATUS_E_NULL_VALUE; + } + + /* reference cnt is acquired in ucfg_tdls_oper */ + vdev_obj = wlan_vdev_get_tdls_vdev_obj(req->vdev); + soc_obj = wlan_vdev_get_tdls_soc_obj(req->vdev); + if (!soc_obj || !vdev_obj) { + tdls_err("soc_obj: %pK, vdev_obj: %pK", soc_obj, vdev_obj); + status = QDF_STATUS_E_INVAL; + goto error; + } + + feature = soc_obj->tdls_configs.tdls_feature_flags; + if (!TDLS_IS_EXTERNAL_CONTROL_ENABLED(feature) || + !TDLS_IS_IMPLICIT_TRIG_ENABLED(feature)) { + tdls_err("TDLS ext ctrl or Imp Trig not enabled, %x", feature); + status = QDF_STATUS_E_NOSUPPORT; + goto error; + } + + peer = tdls_find_peer(vdev_obj, macaddr); + if (!peer) { + tdls_err("peer matching " QDF_MAC_ADDR_STR " not found", + QDF_MAC_ADDR_ARRAY(macaddr)); + status = QDF_STATUS_E_NULL_VALUE; + goto error; + } + + tdls_set_peer_link_status(peer, TDLS_LINK_TEARING, + TDLS_LINK_UNSPECIFIED); + + if (soc_obj->tdls_dp_vdev_update) + soc_obj->tdls_dp_vdev_update(&soc_obj->soc, + peer->sta_id, + soc_obj->tdls_update_dp_vdev_flags, + false); + + if (soc_obj->tdls_event_cb) { + qdf_mem_copy(ind.peer_mac, macaddr, QDF_MAC_ADDR_SIZE); + ind.vdev = vdev; + ind.reason = TDLS_TEARDOWN_PEER_UNSPEC_REASON; + soc_obj->tdls_event_cb(soc_obj->tdls_evt_cb_data, + TDLS_EVENT_TEARDOWN_REQ, &ind); + } + + status = tdls_set_force_peer(vdev_obj, macaddr, false); + if (QDF_IS_STATUS_ERROR(status)) { + tdls_err("set force peer failed"); + status = QDF_STATUS_E_INVAL; + goto error; + } + + if (soc_obj->tdls_external_peer_count) + soc_obj->tdls_external_peer_count--; + + tdls_set_callback(peer, NULL); + peer_update_param = qdf_mem_malloc(sizeof(*peer_update_param)); + if (!peer_update_param) { + tdls_err("memory allocation failed"); + status = QDF_STATUS_E_NOMEM; + goto error; + } + + peer_update_param->vdev_id = wlan_vdev_get_id(vdev); + qdf_mem_copy(peer_update_param->peer_macaddr, + macaddr, QDF_MAC_ADDR_SIZE); + peer_update_param->peer_state = TDLS_PEER_REMOVE_MAC_ADDR; + status = tdls_wma_update_peer_state(soc_obj, peer_update_param); + if (QDF_IS_STATUS_ERROR(status)) { + qdf_mem_free(peer_update_param); + goto error; + } + tdls_set_ct_mode(soc_obj->soc); + if (!soc_obj->enable_tdls_connection_tracker) + tdls_implicit_disable(vdev_obj); + +error: + wlan_objmgr_vdev_release_ref(vdev, WLAN_TDLS_NB_ID); + qdf_mem_free(req); + + return status; +} + +#ifdef WLAN_DEBUG +static const char *tdls_evt_to_str(enum tdls_event_msg_type type) +{ + switch (type) { + case TDLS_SHOULD_DISCOVER: + return "SHOULD_DISCOVER"; + case TDLS_SHOULD_TEARDOWN: + return "SHOULD_TEARDOWN"; + case TDLS_PEER_DISCONNECTED: + return "SHOULD_PEER_DISCONNECTED"; + case TDLS_CONNECTION_TRACKER_NOTIFY: + return "CONNECTION_TRACKER_NOTIFICATION"; + default: + return "INVALID_TYPE"; + } +} +#endif + +QDF_STATUS tdls_process_should_discover(struct wlan_objmgr_vdev *vdev, + struct tdls_event_info *evt) +{ + struct tdls_soc_priv_obj *soc_obj; + struct tdls_vdev_priv_obj *vdev_obj; + struct tdls_peer *curr_peer; + uint32_t feature; + uint16_t type; + + /*TODO ignore this if any concurrency detected*/ + soc_obj = wlan_vdev_get_tdls_soc_obj(vdev); + vdev_obj = wlan_vdev_get_tdls_vdev_obj(vdev); + type = evt->message_type; + + tdls_debug("TDLS %s: " QDF_MAC_ADDR_STR "reason %d", + tdls_evt_to_str(type), + QDF_MAC_ADDR_ARRAY(evt->peermac.bytes), + evt->peer_reason); + if (!soc_obj || !vdev_obj) { + tdls_err("soc_obj: %pK, vdev_obj: %pK, ignore %s", + soc_obj, vdev_obj, tdls_evt_to_str(type)); + return QDF_STATUS_E_NULL_VALUE; + } + if (soc_obj->tdls_nss_switch_in_progress) { + tdls_err("TDLS antenna switching, ignore %s", + tdls_evt_to_str(type)); + return QDF_STATUS_SUCCESS; + } + + curr_peer = tdls_get_peer(vdev_obj, evt->peermac.bytes); + if (!curr_peer) { + tdls_notice("curr_peer is null"); + return QDF_STATUS_E_NULL_VALUE; + } + + if (TDLS_LINK_CONNECTED == curr_peer->link_status) { + tdls_err("TDLS link status is connected, ignore"); + return QDF_STATUS_SUCCESS; + } + + feature = soc_obj->tdls_configs.tdls_feature_flags; + if (TDLS_IS_EXTERNAL_CONTROL_ENABLED(feature) && + !curr_peer->is_forced_peer) { + tdls_debug("curr_peer is not forced, ignore %s", + tdls_evt_to_str(type)); + return QDF_STATUS_SUCCESS; + } + + tdls_debug("initiate TDLS setup on %s, ext: %d, force: %d, reason: %d", + tdls_evt_to_str(type), + TDLS_IS_EXTERNAL_CONTROL_ENABLED(feature), + curr_peer->is_forced_peer, evt->peer_reason); + vdev_obj->curr_candidate = curr_peer; + tdls_implicit_send_discovery_request(vdev_obj); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS tdls_process_should_teardown(struct wlan_objmgr_vdev *vdev, + struct tdls_event_info *evt) +{ + struct tdls_soc_priv_obj *soc_obj; + struct tdls_vdev_priv_obj *vdev_obj; + struct tdls_peer *curr_peer; + uint32_t reason; + uint16_t type; + + type = evt->message_type; + soc_obj = wlan_vdev_get_tdls_soc_obj(vdev); + vdev_obj = wlan_vdev_get_tdls_vdev_obj(vdev); + + tdls_debug("TDLS %s: " QDF_MAC_ADDR_STR "reason %d", + tdls_evt_to_str(type), + QDF_MAC_ADDR_ARRAY(evt->peermac.bytes), evt->peer_reason); + + if (!soc_obj || !vdev_obj) { + tdls_err("soc_obj: %pK, vdev_obj: %pK, ignore %s", + soc_obj, vdev_obj, tdls_evt_to_str(type)); + return QDF_STATUS_E_NULL_VALUE; + } + + curr_peer = tdls_find_peer(vdev_obj, evt->peermac.bytes); + if (!curr_peer) { + tdls_notice("curr_peer is null"); + return QDF_STATUS_E_NULL_VALUE; + } + + reason = evt->peer_reason; + if (TDLS_LINK_CONNECTED == curr_peer->link_status) { + tdls_err("%s reason: %d for" QDF_MAC_ADDR_STR, + tdls_evt_to_str(type), evt->peer_reason, + QDF_MAC_ADDR_ARRAY(evt->peermac.bytes)); + if (reason == TDLS_TEARDOWN_RSSI || + reason == TDLS_DISCONNECTED_PEER_DELETE || + reason == TDLS_TEARDOWN_PTR_TIMEOUT || + reason == TDLS_TEARDOWN_NO_RSP) + reason = TDLS_TEARDOWN_PEER_UNREACHABLE; + else + reason = TDLS_TEARDOWN_PEER_UNSPEC_REASON; + + tdls_indicate_teardown(vdev_obj, curr_peer, reason); + } else { + tdls_err("TDLS link is not connected, ignore %s", + tdls_evt_to_str(type)); + } + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS tdls_process_connection_tracker_notify(struct wlan_objmgr_vdev *vdev, + struct tdls_event_info *evt) +{ + struct tdls_soc_priv_obj *soc_obj; + struct tdls_vdev_priv_obj *vdev_obj; + uint16_t type; + + type = evt->message_type; + soc_obj = wlan_vdev_get_tdls_soc_obj(vdev); + vdev_obj = wlan_vdev_get_tdls_vdev_obj(vdev); + + if (!soc_obj || !vdev_obj) { + tdls_err("soc_obj: %pK, vdev_obj: %pK, ignore %s", + soc_obj, vdev_obj, tdls_evt_to_str(type)); + return QDF_STATUS_E_NULL_VALUE; + } + + /*TODO connection tracker update*/ + return QDF_STATUS_SUCCESS; +} + +/** + * tdls_process_set_responder() - Set/clear TDLS peer's responder role + * @set_req: set responder request + * + * Return: 0 for success or -EINVAL otherwise + */ +static +int tdls_process_set_responder(struct tdls_set_responder_req *set_req) +{ + struct tdls_peer *curr_peer; + struct tdls_vdev_priv_obj *tdls_vdev; + + tdls_vdev = wlan_vdev_get_tdls_vdev_obj(set_req->vdev); + if (!tdls_vdev) { + tdls_err("tdls vdev obj is NULL"); + return -EINVAL; + } + curr_peer = tdls_get_peer(tdls_vdev, set_req->peer_mac); + if (curr_peer == NULL) { + tdls_err("curr_peer is NULL"); + return -EINVAL; + } + + curr_peer->is_responder = set_req->responder; + return 0; +} + + +/** + * tdls_set_responder() - Set/clear TDLS peer's responder role + * @set_req: set responder request + * + * Return: 0 for success or -EINVAL otherwise + */ +int tdls_set_responder(struct tdls_set_responder_req *set_req) +{ + QDF_STATUS status; + + if (!set_req || !set_req->vdev) { + tdls_err("Invalid input params %pK", set_req); + return -EINVAL; + } + + status = wlan_objmgr_vdev_try_get_ref(set_req->vdev, WLAN_TDLS_NB_ID); + if (QDF_STATUS_SUCCESS != status) { + tdls_err("vdev object is deleted"); + return -EINVAL; + } + + status = tdls_process_set_responder(set_req); + + wlan_objmgr_vdev_release_ref(set_req->vdev, WLAN_TDLS_NB_ID); + qdf_mem_free(set_req); + return status; +} + +static int tdls_teardown_links(struct tdls_soc_priv_obj *soc_obj, uint32_t mode) +{ + uint8_t staidx; + struct tdls_peer *curr_peer; + struct tdls_conn_info *conn_rec; + int ret = 0; + + conn_rec = soc_obj->tdls_conn_info; + for (staidx = 0; staidx < soc_obj->max_num_tdls_sta; staidx++) { + if (conn_rec[staidx].sta_id == INVALID_TDLS_PEER_ID) + continue; + + curr_peer = tdls_find_all_peer(soc_obj, + conn_rec[staidx].peer_mac.bytes); + if (!curr_peer) + continue; + + /* if supported only 1x1, skip it */ + if (curr_peer->spatial_streams == HW_MODE_SS_1x1) + continue; + + tdls_debug("Indicate TDLS teardown (staId %d)", + curr_peer->sta_id); + tdls_indicate_teardown(curr_peer->vdev_priv, curr_peer, + TDLS_TEARDOWN_PEER_UNSPEC_REASON); + + soc_obj->tdls_teardown_peers_cnt++; + } + + if (soc_obj->tdls_teardown_peers_cnt >= 1) { + soc_obj->tdls_nss_switch_in_progress = true; + tdls_debug("TDLS peers to be torn down = %d", + soc_obj->tdls_teardown_peers_cnt); + + /* set the antenna switch transition mode */ + if (mode == HW_MODE_SS_1x1) { + soc_obj->tdls_nss_transition_mode = + TDLS_NSS_TRANSITION_S_2x2_to_1x1; + ret = -EAGAIN; + } else { + soc_obj->tdls_nss_transition_mode = + TDLS_NSS_TRANSITION_S_1x1_to_2x2; + ret = 0; + } + tdls_debug("TDLS teardown for antenna switch operation starts"); + } + + return ret; +} + +QDF_STATUS tdls_process_antenna_switch(struct tdls_antenna_switch_request *req) +{ + QDF_STATUS status; + struct tdls_soc_priv_obj *soc_obj; + struct tdls_vdev_priv_obj *vdev_obj; + struct wlan_objmgr_vdev *vdev = NULL; + uint32_t vdev_nss; + int ant_switch_state = 0; + uint32_t vdev_id; + enum QDF_OPMODE opmode; + uint8_t channel; + struct tdls_osif_indication ind; + + if (!req) { + tdls_err("null req"); + return QDF_STATUS_E_INVAL; + } + + vdev = req->vdev; + if (!vdev) { + tdls_err("null vdev"); + qdf_mem_free(req); + return QDF_STATUS_E_INVAL; + } + + status = tdls_get_vdev_objects(vdev, &vdev_obj, &soc_obj); + if (QDF_IS_STATUS_ERROR(status)) { + tdls_err("can't get vdev_obj & soc_obj"); + goto get_obj_err; + } + + if (soc_obj->connected_peer_count == 0) + goto ant_sw_done; + + if (soc_obj->tdls_nss_switch_in_progress) { + if (!soc_obj->tdls_nss_teardown_complete) { + tdls_err("TDLS antenna switch is in progress"); + goto ant_sw_in_progress; + } else { + goto ant_sw_done; + } + } + + vdev_id = wlan_vdev_get_id(vdev); + opmode = wlan_vdev_mlme_get_opmode(vdev); + channel = policy_mgr_get_channel(soc_obj->soc, + policy_mgr_convert_device_mode_to_qdf_type(opmode), + &vdev_id); + + /* Check supported nss for TDLS, if is 1x1, no need to teardown links */ + if (WLAN_REG_IS_24GHZ_CH(channel)) + vdev_nss = soc_obj->tdls_configs.tdls_vdev_nss_2g; + else + vdev_nss = soc_obj->tdls_configs.tdls_vdev_nss_5g; + + if (vdev_nss == HW_MODE_SS_1x1) { + tdls_debug("Supported NSS is 1x1, no need to teardown TDLS links"); + goto ant_sw_done; + } + + if (tdls_teardown_links(soc_obj, req->mode) == 0) + goto ant_sw_done; + +ant_sw_in_progress: + ant_switch_state = -EAGAIN; +ant_sw_done: + if (soc_obj->tdls_event_cb) { + ind.vdev = vdev; + ind.status = ant_switch_state; + soc_obj->tdls_event_cb(soc_obj->tdls_evt_cb_data, + TDLS_EVENT_ANTENNA_SWITCH, &ind); + } + + if (soc_obj->tdls_nss_switch_in_progress && + soc_obj->tdls_nss_teardown_complete) { + soc_obj->tdls_nss_switch_in_progress = false; + soc_obj->tdls_nss_teardown_complete = false; + } + tdls_debug("tdls_nss_switch_in_progress: %d tdls_nss_teardown_complete: %d", + soc_obj->tdls_nss_switch_in_progress, + soc_obj->tdls_nss_teardown_complete); + +get_obj_err: + wlan_objmgr_vdev_release_ref(vdev, WLAN_TDLS_NB_ID); + qdf_mem_free(req); + + return status; +} + +QDF_STATUS tdls_antenna_switch_flush_callback(struct scheduler_msg *msg) +{ + struct tdls_antenna_switch_request *req; + + if (!msg || !msg->bodyptr) { + tdls_err("msg: 0x%pK", msg); + return QDF_STATUS_E_NULL_VALUE; + } + req = msg->bodyptr; + wlan_objmgr_vdev_release_ref(req->vdev, WLAN_TDLS_NB_ID); + qdf_mem_free(req); + + return QDF_STATUS_SUCCESS; +} + +void wlan_tdls_offchan_parms_callback(struct wlan_objmgr_vdev *vdev) +{ + if (!vdev) { + tdls_err("vdev is NULL"); + return; + } + + wlan_objmgr_vdev_release_ref(vdev, WLAN_TDLS_NB_ID); +} + +int tdls_process_set_offchannel(struct tdls_set_offchannel *req) +{ + int status; + struct tdls_vdev_priv_obj *tdls_vdev_obj; + struct tdls_soc_priv_obj *tdls_soc_obj; + + if (tdls_get_vdev_objects(req->vdev, &tdls_vdev_obj, &tdls_soc_obj) != + QDF_STATUS_SUCCESS) { + status = -ENOTSUPP; + goto free; + } + + tdls_debug("TDLS offchannel to be configured %d", req->offchannel); + + if (req->offchannel) + status = tdls_set_tdls_offchannel(tdls_soc_obj, + req->offchannel); + else + status = -ENOTSUPP; + +free: + + if (req->callback) + req->callback(req->vdev); + qdf_mem_free(req); + + return status; +} + +int tdls_process_set_offchan_mode(struct tdls_set_offchanmode *req) +{ + int status; + + tdls_debug("TDLS offchan mode to be configured %d", req->offchan_mode); + status = tdls_set_tdls_offchannelmode(req->vdev, req->offchan_mode); + + if (req->callback) + req->callback(req->vdev); + qdf_mem_free(req); + + return status; +} + +int tdls_process_set_secoffchanneloffset( + struct tdls_set_secoffchanneloffset *req) +{ + int status; + struct tdls_vdev_priv_obj *tdls_vdev_obj; + struct tdls_soc_priv_obj *tdls_soc_obj; + + if (tdls_get_vdev_objects(req->vdev, &tdls_vdev_obj, &tdls_soc_obj) != + QDF_STATUS_SUCCESS) { + status = -ENOTSUPP; + goto free; + } + + tdls_debug("TDLS offchannel offset to be configured %d", + req->offchan_offset); + status = tdls_set_tdls_secoffchanneloffset(tdls_soc_obj, + req->offchan_offset); + +free: + + if (req->callback) + req->callback(req->vdev); + qdf_mem_free(req); + + return status; +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/tdls/core/src/wlan_tdls_cmds_process.h b/drivers/staging/qca-wifi-host-cmn/umac/tdls/core/src/wlan_tdls_cmds_process.h new file mode 100644 index 0000000000000000000000000000000000000000..bebf1b099f540bc67f700a247d498edde3f6e94f --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/tdls/core/src/wlan_tdls_cmds_process.h @@ -0,0 +1,433 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_tdls_cmds_process.h + * + * TDLS north bound commands include file + */ + +#ifndef _WLAN_TDLS_CMDS_PROCESS_H_ +#define _WLAN_TDLS_CMDS_PROCESS_H_ + +#define TDLS_IS_SETUP_ACTION(action) \ + ((TDLS_SETUP_REQUEST <= action) && \ + (TDLS_SETUP_CONFIRM >= action)) + + +/** + * enum tdls_add_oper - add peer type + * @TDLS_OPER_NONE: none + * @TDLS_OPER_ADD: add new peer + * @TDLS_OPER_UPDATE: used to update peer + */ +enum tdls_add_oper { + TDLS_OPER_NONE, + TDLS_OPER_ADD, + TDLS_OPER_UPDATE +}; + +/** + * enum legacy_result_code - defined to comply with tSirResultCodes, need refine + * when mlme converged. + * @legacy_result_success: success + * @legacy_result_max: max result value + */ +enum legacy_result_code { + legacy_result_success, + legacy_result_max = 0x7FFFFFFF +}; + +/** + * struct tdls_send_mgmt_rsp - TDLS Response struct PE --> TDLS module + * same as struct tSirSmeRsp + * @message_type: message type eWNI_SME_TDLS_SEND_MGMT_RSP + * @length: message length + * @session_id: session id + * @transaction_id: transaction id + * @status_code: status code as tSirResultCodes + * @psoc: soc object + */ +struct tdls_send_mgmt_rsp { + uint16_t message_type; + uint16_t length; + uint8_t session_id; + uint16_t transaction_id; + enum legacy_result_code status_code; + struct wlan_objmgr_psoc *psoc; +}; + +/** + * struct tdls_mgmt_tx_completion_ind - TDLS TX completion PE --> TDLS module + * same as struct sSirMgmtTxCompletionInd + * @message_type: message type eWNI_SME_MGMT_FRM_TX_COMPLETION_IND + * @length: message length + * @session_id: session id + * @tx_complete_status: tx complete status + * @psoc: soc object + */ +struct tdls_mgmt_tx_completion_ind { + uint16_t message_type; + uint16_t length; + uint8_t session_id; /* Session ID */ + uint32_t tx_complete_status; + struct wlan_objmgr_psoc *psoc; +}; + +/** + * struct tdls_add_sta_req - TDLS request struct TDLS module --> PE + * same as struct tSirTdlsAddStaReq; + * @message_type: eWNI_SME_TDLS_ADD_STA_REQ + * @length: message length + * @session_id: session id + * @transaction_id: transaction id for cmd + * @bssid: bssid + * @tdls_oper: add peer type + * @peermac: MAC address for TDLS peer + * @capability: mac capability as sSirMacCapabilityInfo + * @extn_capability: extent capability + * @supported_rates_length: rates length + * @supported_rates: supported rates + * @htcap_present: ht capability present + * @ht_cap: ht capability + * @vhtcap_present: vht capability present + * @vht_cap: vht capability + * @uapsd_queues: uapsd queue as sSirMacQosInfoStation + * @max_sp: maximum service period + */ +struct tdls_add_sta_req { + uint16_t message_type; + uint16_t length; + uint8_t session_id; + uint16_t transaction_id; + struct qdf_mac_addr bssid; + enum tdls_add_oper tdls_oper; + struct qdf_mac_addr peermac; + uint16_t capability; + uint8_t extn_capability[WLAN_MAC_MAX_EXTN_CAP]; + uint8_t supported_rates_length; + uint8_t supported_rates[WLAN_MAC_MAX_SUPP_RATES]; + uint8_t htcap_present; + struct htcap_cmn_ie ht_cap; + uint8_t vhtcap_present; + struct vhtcap vht_cap; + uint8_t uapsd_queues; + uint8_t max_sp; +}; + +/** + * struct tdls_add_sta_rsp - TDLS Response struct PE --> TDLS module + * same as struct sSirTdlsAddStaRsp + * @message_type: message type eWNI_SME_TDLS_ADD_STA_RSP + * @length: message length + * @status_code: status code as tSirResultCodes + * @peermac: MAC address of the TDLS peer + * @session_id: session id + * @sta_id: sta id + * @sta_type: sta type + * @tdls_oper: add peer type + * @psoc: soc object + */ +struct tdls_add_sta_rsp { + uint16_t message_type; + uint16_t length; + QDF_STATUS status_code; + struct qdf_mac_addr peermac; + uint8_t session_id; + uint16_t sta_id; + uint16_t sta_type; + enum tdls_add_oper tdls_oper; + struct wlan_objmgr_psoc *psoc; +}; + +/** + * struct tdls_del_sta_req - TDLS Request struct TDLS module --> PE + * same as sSirTdlsDelStaReq + * @message_type: message type eWNI_SME_TDLS_DEL_STA_REQ + * @length: message length + * @session_id: session id + * @transaction_id: transaction id for cmd + * @bssid: bssid + * @peermac: MAC address of the TDLS peer + */ +struct tdls_del_sta_req { + uint16_t message_type; + uint16_t length; + uint8_t session_id; + uint16_t transaction_id; + struct qdf_mac_addr bssid; + struct qdf_mac_addr peermac; +}; + +/** + * struct tdls_del_sta_rsp - TDLS Response struct PE --> TDLS module + * same as sSirTdlsDelStaRsp + * @message_type: message type eWNI_SME_TDLS_DEL_STA_RSP + * @length: message length + * @session_id: session id + * @status_code: status code as tSirResultCodes + * @peermac: MAC address of the TDLS peer + * @sta_id: sta id + * @psoc: soc object + */ +struct tdls_del_sta_rsp { + uint16_t message_type; + uint16_t length; + uint8_t session_id; + QDF_STATUS status_code; + struct qdf_mac_addr peermac; + uint16_t sta_id; + struct wlan_objmgr_psoc *psoc; +}; + +/** + * tdls_process_add_peer() - add TDLS peer + * @req: TDLS add peer request + * + * Return: QDF_STATUS_SUCCESS if success; other value if failed + */ +QDF_STATUS tdls_process_add_peer(struct tdls_add_peer_request *req); + +/** + * tdls_process_del_peer() - del TDLS peer + * @req: TDLS del peer request + * + * Return: QDF_STATUS_SUCCESS if success; other value if failed + */ +QDF_STATUS tdls_process_del_peer(struct tdls_oper_request *req); + +/** + * tdls_process_enable_link() - enable TDLS link + * @req: TDLS enable link request + * + * Return: QDF_STATUS_SUCCESS if success; other value if failed + */ +QDF_STATUS tdls_process_enable_link(struct tdls_oper_request *req); + +/** + * tdls_process_setup_peer() - process configure an externally + * controllable TDLS peer + * @req: TDLS configure force peer request + * + * Return: QDF_STATUS_SUCCESS if success; other values if failed + */ +QDF_STATUS tdls_process_setup_peer(struct tdls_oper_request *req); + +/** + * tdls_process_remove_force_peer() - process remove an externally controllable + * TDLS peer + * @req: TDLS operation request + * + * Return: QDF_STATUS_SUCCESS if success; other values if failed + */ +QDF_STATUS tdls_process_remove_force_peer(struct tdls_oper_request *req); + +/** + * tdls_process_update_peer() - update TDLS peer + * @req: TDLS update peer request + * + * Return: QDF_STATUS_SUCCESS if success; other value if failed + */ +QDF_STATUS tdls_process_update_peer(struct tdls_update_peer_request *req); + +/** + * tdls_process_antenna_switch() - handle TDLS antenna switch + * @req: TDLS antenna switch request + * + * Rely on callback to indicate the antenna switch state to caller. + * + * Return: QDF_STATUS_SUCCESS if success; other value if failed. + */ +QDF_STATUS tdls_process_antenna_switch(struct tdls_antenna_switch_request *req); + +/** + * tdls_antenna_switch_flush_callback() - flush TDLS antenna switch request + * @msg: scheduler message contains tdls antenna switch event + * + * This function call is invoked when scheduler thread is going down + * + * Return: QDF_STATUS + */ +QDF_STATUS tdls_antenna_switch_flush_callback(struct scheduler_msg *msg); + +/** + * tdls_pe_del_peer() - send TDLS delete peer request to PE + * @req: TDLS delete peer request + * + * Return: QDF status + */ +QDF_STATUS tdls_pe_del_peer(struct tdls_del_peer_request *req); + +/** + * tdls_process_add_peer_rsp() - handle response for add or update TDLS peer + * @rsp: TDLS add peer response + * + * Return: QDF status + */ +QDF_STATUS tdls_process_add_peer_rsp(struct tdls_add_sta_rsp *rsp); + +/** + * tdls_reset_nss() - reset tdls nss parameters + * @tdls_soc: TDLS soc object + * @action_code: action code + * + * Return: None + */ +void tdls_reset_nss(struct tdls_soc_priv_obj *tdls_soc, + uint8_t action_code); + +/** + * tdls_release_serialization_command() - TDLS wrapper to + * relases serialization command. + * @vdev: Object manager vdev + * @type: command to release. + * + * Return: None + */ + +void +tdls_release_serialization_command(struct wlan_objmgr_vdev *vdev, + enum wlan_serialization_cmd_type type); + +/** + * tdls_set_cap() - set TDLS capability type + * @tdls_vdev: tdls vdev object + * @mac: peer mac address + * @cap: TDLS capability type + * + * Return: 0 if successful or negative errno otherwise + */ +int tdls_set_cap(struct tdls_vdev_priv_obj *tdls_vdev, const uint8_t *mac, + enum tdls_peer_capab cap); + +/** + * tdls_process_send_mgmt_rsp() - handle response for send mgmt + * @rsp: TDLS send mgmt response + * + * Return: QDF_STATUS_SUCCESS for success; other values if failed + */ +QDF_STATUS tdls_process_send_mgmt_rsp(struct tdls_send_mgmt_rsp *rsp); + +/** + * tdls_send_mgmt_tx_completion() - process tx completion + * @tx_complete: TDLS mgmt completion info + * + * Return: QDF_STATUS_SUCCESS for success; other values if failed + */ +QDF_STATUS tdls_send_mgmt_tx_completion( + struct tdls_mgmt_tx_completion_ind *tx_complete); + +/** + * tdls_process_add_peer_rsp() - handle response for delete TDLS peer + * @rsp: TDLS delete peer response + * + * Return: QDF status + */ +QDF_STATUS tdls_process_del_peer_rsp(struct tdls_del_sta_rsp *rsp); + +/** + * tdls_process_should_discover() - handle tdls should_discover event + * @vdev: vdev object + * @evt: event info + * + * Return: QDF_STATUS + */ +QDF_STATUS tdls_process_should_discover(struct wlan_objmgr_vdev *vdev, + struct tdls_event_info *evt); + +/** + * tdls_process_should_teardown() - handle tdls should_teardown event + * @vdev: vdev object + * @evt: event info + * + * Return: QDF_STATUS + */ +QDF_STATUS tdls_process_should_teardown(struct wlan_objmgr_vdev *vdev, + struct tdls_event_info *evt); + +/** + * tdls_process_connection_tracker_notify() -handle tdls connect tracker notify + * @vdev: vdev object + * @evt: event info + * + * Return: QDF_STATUS + */ +QDF_STATUS tdls_process_connection_tracker_notify(struct wlan_objmgr_vdev *vdev, + struct tdls_event_info *evt); + +/** + * tdls_validate_mgmt_request() -validate mgmt request + * @tdls_validate: action frame request + * + * Return: 0 for success or -EINVAL otherwise + */ +int tdls_validate_mgmt_request(struct tdls_action_frame_request *tdls_mgmt_req); + +/** + * tdls_set_responder() - Set/clear TDLS peer's responder role + * @set_req: set responder request + * + * Return: 0 for success or -EINVAL otherwise + */ +int tdls_set_responder(struct tdls_set_responder_req *set_req); + +/** + * tdls_decrement_peer_count() - decrement connected TDLS peer counter + * @soc_obj: TDLS soc object + * + * Used in scheduler thread context, no lock needed. + * + * Return: None. + */ +void tdls_decrement_peer_count(struct tdls_soc_priv_obj *soc_obj); + +/** + * wlan_tdls_offchan_parms_callback() - Callback to release ref count + * @vdev: vdev object + * + * Return: none + */ +void wlan_tdls_offchan_parms_callback(struct wlan_objmgr_vdev *vdev); + +/** + * tdls_process_set_offchannel() - Handle set offchannel request for TDLS + * @req: TDLS set offchannel request + * + * Return: int status + */ +int tdls_process_set_offchannel(struct tdls_set_offchannel *req); + +/** + * tdls_process_set_offchan_mode() - Handle set offchan mode request for TDLS + * @req: TDLS set offchannel mode request + * + * Return: int status + */ +int tdls_process_set_offchan_mode(struct tdls_set_offchanmode *req); + +/** + * tdls_process_set_secoffchanneloffset() - Handle set sec offchannel + * offset request for TDLS + * @req: TDLS set secoffchannel offchannel request + * + * Return: int status + */ +int tdls_process_set_secoffchanneloffset( + struct tdls_set_secoffchanneloffset *req); + +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/tdls/core/src/wlan_tdls_ct.c b/drivers/staging/qca-wifi-host-cmn/umac/tdls/core/src/wlan_tdls_ct.c new file mode 100644 index 0000000000000000000000000000000000000000..2f3179718e5dc2e34328f33e160b647545472aa7 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/tdls/core/src/wlan_tdls_ct.c @@ -0,0 +1,1302 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_tdls_ct.c + * + * TDLS connection tracker function definitions + */ + +#include "wlan_tdls_main.h" +#include "wlan_tdls_peer.h" +#include "wlan_tdls_ct.h" +#include "wlan_tdls_cmds_process.h" + +bool tdls_is_vdev_authenticated(struct wlan_objmgr_vdev *vdev) +{ + struct wlan_objmgr_peer *peer; + bool is_authenticated = false; + + peer = wlan_vdev_get_bsspeer(vdev); + + if (!peer) { + tdls_err("peer is null"); + return false; + } + + is_authenticated = wlan_peer_mlme_get_auth_state(peer); + + return is_authenticated; +} + +/** + * tdls_peer_reset_discovery_processed() - reset discovery status + * @tdls_vdev: TDLS vdev object + * + * This function resets discovery processing bit for all TDLS peers + * + * Caller has to take the lock before calling this function + * + * Return: 0 + */ +static int32_t tdls_peer_reset_discovery_processed( + struct tdls_vdev_priv_obj *tdls_vdev) +{ + int i; + qdf_list_t *head; + qdf_list_node_t *p_node; + struct tdls_peer *peer; + QDF_STATUS status; + + tdls_vdev->discovery_peer_cnt = 0; + + for (i = 0; i < WLAN_TDLS_PEER_LIST_SIZE; i++) { + head = &tdls_vdev->peer_list[i]; + status = qdf_list_peek_front(head, &p_node); + while (QDF_IS_STATUS_SUCCESS(status)) { + peer = qdf_container_of(p_node, struct tdls_peer, node); + peer->discovery_processed = 0; + status = qdf_list_peek_next(head, p_node, &p_node); + } + } + + return 0; +} + +void tdls_discovery_timeout_peer_cb(void *user_data) +{ + int i; + qdf_list_t *head; + qdf_list_node_t *p_node; + struct tdls_peer *peer; + QDF_STATUS status; + struct tdls_vdev_priv_obj *tdls_vdev; + + if (!user_data) { + tdls_err("discovery time out data is null"); + return; + } + + tdls_vdev = (struct tdls_vdev_priv_obj *) user_data; + + for (i = 0; i < WLAN_TDLS_PEER_LIST_SIZE; i++) { + head = &tdls_vdev->peer_list[i]; + status = qdf_list_peek_front(head, &p_node); + while (QDF_IS_STATUS_SUCCESS(status)) { + peer = qdf_container_of(p_node, struct tdls_peer, + node); + if (TDLS_LINK_DISCOVERING != peer->link_status) { + status = qdf_list_peek_next(head, p_node, + &p_node); + continue; + } + tdls_debug(QDF_MAC_ADDR_STR " to idle state", + QDF_MAC_ADDR_ARRAY(peer->peer_mac.bytes)); + tdls_set_peer_link_status(peer, + TDLS_LINK_IDLE, + TDLS_LINK_NOT_SUPPORTED); + } + } + tdls_vdev->discovery_sent_cnt = 0; + + /* add tdls power save prohibited */ + + return; +} + +/** + * tdls_reset_tx_rx() - reset tx/rx counters for all tdls peers + * @tdls_vdev: TDLS vdev object + * + * Caller has to take the TDLS lock before calling this function + * + * Return: Void + */ +static void tdls_reset_tx_rx(struct tdls_vdev_priv_obj *tdls_vdev) +{ + int i; + qdf_list_t *head; + qdf_list_node_t *p_node; + struct tdls_peer *peer; + QDF_STATUS status; + + for (i = 0; i < WLAN_TDLS_PEER_LIST_SIZE; i++) { + head = &tdls_vdev->peer_list[i]; + status = qdf_list_peek_front(head, &p_node); + while (QDF_IS_STATUS_SUCCESS(status)) { + peer = qdf_container_of(p_node, struct tdls_peer, node); + peer->tx_pkt = 0; + peer->rx_pkt = 0; + status = qdf_list_peek_next(head, p_node, &p_node); + } + } + return; +} + +void tdls_implicit_disable(struct tdls_vdev_priv_obj *tdls_vdev) +{ + tdls_debug("Disable Implicit TDLS"); + tdls_timers_stop(tdls_vdev); +} + +/** + * tdls_implicit_enable() - enable implicit tdls triggering + * @tdls_vdev: TDLS vdev + * + * Return: Void + */ +void tdls_implicit_enable(struct tdls_vdev_priv_obj *tdls_vdev) +{ + tdls_debug("Enable Implicit TDLS"); + if (!tdls_vdev) + return; + + tdls_peer_reset_discovery_processed(tdls_vdev); + tdls_reset_tx_rx(tdls_vdev); + /* TODO check whether tdls power save prohibited */ + + /* Restart the connection tracker timer */ + tdls_timer_restart(tdls_vdev->vdev, &tdls_vdev->peer_update_timer, + tdls_vdev->threshold_config.tx_period_t); +} + +/** + * tdls_ct_sampling_tx_rx() - collect tx/rx traffic sample + * @tdls_vdev_obj: tdls vdev object + * @tdls_soc_obj: tdls soc object + * + * Function to update data traffic information in tdls connection + * tracker data structure for connection tracker operation + * + * Return: None + */ +static void tdls_ct_sampling_tx_rx(struct tdls_vdev_priv_obj *tdls_vdev, + struct tdls_soc_priv_obj *tdls_soc) +{ + struct tdls_peer *curr_peer; + uint8_t mac[QDF_MAC_ADDR_SIZE]; + uint8_t mac_cnt; + uint8_t mac_entries; + struct tdls_conn_tracker_mac_table mac_table[WLAN_TDLS_CT_TABLE_SIZE]; + + qdf_spin_lock_bh(&tdls_soc->tdls_ct_spinlock); + + if (0 == tdls_vdev->valid_mac_entries) { + qdf_spin_unlock_bh(&tdls_soc->tdls_ct_spinlock); + return; + } + + mac_entries = QDF_MIN(tdls_vdev->valid_mac_entries, + WLAN_TDLS_CT_TABLE_SIZE); + + qdf_mem_copy(mac_table, tdls_vdev->ct_peer_table, + (sizeof(struct tdls_conn_tracker_mac_table)) * mac_entries); + + qdf_mem_set(tdls_vdev->ct_peer_table, + (sizeof(struct tdls_conn_tracker_mac_table)) * mac_entries, 0); + + tdls_vdev->valid_mac_entries = 0; + + qdf_spin_unlock_bh(&tdls_soc->tdls_ct_spinlock); + + for (mac_cnt = 0; mac_cnt < mac_entries; mac_cnt++) { + qdf_mem_copy(mac, mac_table[mac_cnt].mac_address.bytes, + QDF_MAC_ADDR_SIZE); + curr_peer = tdls_get_peer(tdls_vdev, mac); + if (NULL != curr_peer) { + curr_peer->tx_pkt = + mac_table[mac_cnt].tx_packet_cnt; + curr_peer->rx_pkt = + mac_table[mac_cnt].rx_packet_cnt; + } + } +} + +void tdls_update_rx_pkt_cnt(struct wlan_objmgr_vdev *vdev, + struct qdf_mac_addr *mac_addr, + struct qdf_mac_addr *dest_mac_addr) +{ + struct tdls_vdev_priv_obj *tdls_vdev_obj; + struct tdls_soc_priv_obj *tdls_soc_obj; + uint8_t mac_cnt; + uint8_t valid_mac_entries; + struct tdls_conn_tracker_mac_table *mac_table; + + if (QDF_STATUS_SUCCESS != tdls_get_vdev_objects(vdev, &tdls_vdev_obj, + &tdls_soc_obj)) + return; + + if (!tdls_soc_obj->enable_tdls_connection_tracker) + return; + + if (qdf_is_macaddr_group(mac_addr)) + return; + + if (qdf_is_macaddr_group(dest_mac_addr)) + return; + + if (qdf_mem_cmp(vdev->vdev_mlme.macaddr, mac_addr, + QDF_MAC_ADDR_SIZE) == 0) + return; + + qdf_spin_lock_bh(&tdls_soc_obj->tdls_ct_spinlock); + valid_mac_entries = tdls_vdev_obj->valid_mac_entries; + mac_table = tdls_vdev_obj->ct_peer_table; + + for (mac_cnt = 0; mac_cnt < valid_mac_entries; mac_cnt++) { + if (qdf_mem_cmp(mac_table[mac_cnt].mac_address.bytes, + mac_addr, QDF_MAC_ADDR_SIZE) == 0) { + mac_table[mac_cnt].rx_packet_cnt++; + goto rx_cnt_return; + } + } + + /* If we have more than 8 peers within 30 mins. we will + * stop tracking till the old entries are removed + */ + if (mac_cnt < WLAN_TDLS_CT_TABLE_SIZE) { + qdf_mem_copy(mac_table[mac_cnt].mac_address.bytes, + mac_addr, QDF_MAC_ADDR_SIZE); + tdls_vdev_obj->valid_mac_entries = mac_cnt+1; + mac_table[mac_cnt].rx_packet_cnt = 1; + } + +rx_cnt_return: + qdf_spin_unlock_bh(&tdls_soc_obj->tdls_ct_spinlock); + return; +} + +void tdls_update_tx_pkt_cnt(struct wlan_objmgr_vdev *vdev, + struct qdf_mac_addr *mac_addr) +{ + struct tdls_vdev_priv_obj *tdls_vdev_obj; + struct tdls_soc_priv_obj *tdls_soc_obj; + uint8_t mac_cnt; + uint8_t valid_mac_entries; + struct tdls_conn_tracker_mac_table *mac_table; + + if (QDF_STATUS_SUCCESS != tdls_get_vdev_objects(vdev, &tdls_vdev_obj, + &tdls_soc_obj)) + return; + + if (!tdls_soc_obj->enable_tdls_connection_tracker) + return; + + if (qdf_is_macaddr_group(mac_addr)) + return; + + if (qdf_mem_cmp(vdev->vdev_mlme.macaddr, mac_addr, + QDF_MAC_ADDR_SIZE) == 0) + return; + + qdf_spin_lock_bh(&tdls_soc_obj->tdls_ct_spinlock); + mac_table = tdls_vdev_obj->ct_peer_table; + + valid_mac_entries = tdls_vdev_obj->valid_mac_entries; + + for (mac_cnt = 0; mac_cnt < valid_mac_entries; mac_cnt++) { + if (qdf_mem_cmp(mac_table[mac_cnt].mac_address.bytes, + mac_addr, QDF_MAC_ADDR_SIZE) == 0) { + mac_table[mac_cnt].tx_packet_cnt++; + goto tx_cnt_return; + } + } + + /* If we have more than 8 peers within 30 mins. we will + * stop tracking till the old entries are removed + */ + if (mac_cnt < WLAN_TDLS_CT_TABLE_SIZE) { + qdf_mem_copy(mac_table[mac_cnt].mac_address.bytes, + mac_addr, QDF_MAC_ADDR_SIZE); + mac_table[mac_cnt].tx_packet_cnt = 1; + tdls_vdev_obj->valid_mac_entries++; + } + +tx_cnt_return: + qdf_spin_unlock_bh(&tdls_soc_obj->tdls_ct_spinlock); + return; +} + +void tdls_implicit_send_discovery_request( + struct tdls_vdev_priv_obj *tdls_vdev_obj) +{ + struct tdls_peer *curr_peer; + struct tdls_peer *temp_peer; + struct tdls_soc_priv_obj *tdls_psoc; + struct tdls_osif_indication tdls_ind; + + if (NULL == tdls_vdev_obj) { + tdls_notice("tdls_vdev_obj is NULL"); + return; + } + + tdls_psoc = wlan_vdev_get_tdls_soc_obj(tdls_vdev_obj->vdev); + + if (NULL == tdls_psoc) { + tdls_notice("tdls_psoc_obj is NULL"); + return; + } + + curr_peer = tdls_vdev_obj->curr_candidate; + + if (NULL == curr_peer) { + tdls_err("curr_peer is NULL"); + return; + } + + /* This function is called in mutex_lock */ + temp_peer = tdls_is_progress(tdls_vdev_obj, NULL, 0); + if (NULL != temp_peer) { + tdls_notice(QDF_MAC_ADDR_STR " ongoing. pre_setup ignored", + QDF_MAC_ADDR_ARRAY(temp_peer->peer_mac.bytes)); + goto done; + } + + if (TDLS_CAP_UNKNOWN != curr_peer->tdls_support) + tdls_set_peer_link_status(curr_peer, + TDLS_LINK_DISCOVERING, + TDLS_LINK_SUCCESS); + + qdf_mem_copy(tdls_ind.peer_mac, curr_peer->peer_mac.bytes, + QDF_MAC_ADDR_SIZE); + + tdls_ind.vdev = tdls_vdev_obj->vdev; + + tdls_debug("Implicit TDLS, Send Discovery request event"); + + tdls_psoc->tdls_event_cb(tdls_psoc->tdls_evt_cb_data, + TDLS_EVENT_DISCOVERY_REQ, &tdls_ind); + + tdls_vdev_obj->discovery_sent_cnt++; + + tdls_timer_restart(tdls_vdev_obj->vdev, + &tdls_vdev_obj->peer_discovery_timer, + tdls_vdev_obj->threshold_config.tx_period_t - + TDLS_DISCOVERY_TIMEOUT_ERE_UPDATE); + + tdls_debug("discovery count %u timeout %u msec", + tdls_vdev_obj->discovery_sent_cnt, + tdls_vdev_obj->threshold_config.tx_period_t - + TDLS_DISCOVERY_TIMEOUT_ERE_UPDATE); +done: + tdls_vdev_obj->curr_candidate = NULL; + tdls_vdev_obj->magic = 0; + return; +} + +int tdls_recv_discovery_resp(struct tdls_vdev_priv_obj *tdls_vdev, + const uint8_t *mac) +{ + struct tdls_peer *curr_peer; + struct tdls_soc_priv_obj *tdls_soc; + struct tdls_osif_indication indication; + struct tdls_config_params *tdls_cfg; + int status = 0; + + if (!tdls_vdev) + return -EINVAL; + + tdls_soc = wlan_vdev_get_tdls_soc_obj(tdls_vdev->vdev); + if (NULL == tdls_soc) { + tdls_err("tdls soc is NULL"); + return -EINVAL; + } + + curr_peer = tdls_get_peer(tdls_vdev, mac); + if (NULL == curr_peer) { + tdls_err("curr_peer is NULL"); + return -EINVAL; + } + + if (tdls_vdev->discovery_sent_cnt) + tdls_vdev->discovery_sent_cnt--; + + if (0 == tdls_vdev->discovery_sent_cnt) + qdf_mc_timer_stop(&tdls_vdev->peer_discovery_timer); + + tdls_debug("Discovery(%u) Response from " QDF_MAC_ADDR_STR + " link_status %d", tdls_vdev->discovery_sent_cnt, + QDF_MAC_ADDR_ARRAY(curr_peer->peer_mac.bytes), + curr_peer->link_status); + + tdls_cfg = &tdls_vdev->threshold_config; + if (TDLS_LINK_DISCOVERING == curr_peer->link_status) { + /* Since we are here, it means Throughput threshold is + * already met. Make sure RSSI threshold is also met + * before setting up TDLS link. + */ + if ((int32_t) curr_peer->rssi > + (int32_t) tdls_cfg->rssi_trigger_threshold) { + tdls_set_peer_link_status(curr_peer, + TDLS_LINK_DISCOVERED, + TDLS_LINK_SUCCESS); + tdls_debug("Rssi Threshold met: " QDF_MAC_ADDR_STR + " rssi = %d threshold= %d", + QDF_MAC_ADDR_ARRAY(curr_peer->peer_mac.bytes), + curr_peer->rssi, + tdls_cfg->rssi_trigger_threshold); + + qdf_mem_copy(indication.peer_mac, mac, + QDF_MAC_ADDR_SIZE); + + indication.vdev = tdls_vdev->vdev; + + tdls_soc->tdls_event_cb(tdls_soc->tdls_evt_cb_data, + TDLS_EVENT_SETUP_REQ, + &indication); + } else { + tdls_debug("Rssi Threshold not met: " QDF_MAC_ADDR_STR + " rssi = %d threshold = %d ", + QDF_MAC_ADDR_ARRAY(curr_peer->peer_mac.bytes), + curr_peer->rssi, + tdls_cfg->rssi_trigger_threshold); + + tdls_set_peer_link_status(curr_peer, + TDLS_LINK_IDLE, + TDLS_LINK_UNSPECIFIED); + + /* if RSSI threshold is not met then allow + * further discovery attempts by decrementing + * count for the last attempt + */ + if (curr_peer->discovery_attempt) + curr_peer->discovery_attempt--; + } + } + + curr_peer->tdls_support = TDLS_CAP_SUPPORTED; + + return status; +} + +void tdls_indicate_teardown(struct tdls_vdev_priv_obj *tdls_vdev, + struct tdls_peer *curr_peer, + uint16_t reason) +{ + struct tdls_soc_priv_obj *tdls_soc; + struct tdls_osif_indication indication; + + if (!tdls_vdev || !curr_peer) { + tdls_err("tdls_vdev: %pK, curr_peer: %pK", + tdls_vdev, curr_peer); + return; + } + + tdls_soc = wlan_vdev_get_tdls_soc_obj(tdls_vdev->vdev); + if (!tdls_soc) { + tdls_err("tdls_soc: %pK", tdls_soc); + return; + } + + if (TDLS_LINK_CONNECTED != curr_peer->link_status) + return; + + tdls_set_peer_link_status(curr_peer, + TDLS_LINK_TEARING, + TDLS_LINK_UNSPECIFIED); + tdls_notice("Teardown reason %d", reason); + + if (tdls_soc->tdls_dp_vdev_update) + tdls_soc->tdls_dp_vdev_update(&tdls_soc->soc, + curr_peer->sta_id, + tdls_soc->tdls_update_dp_vdev_flags, + false); + + indication.reason = reason; + indication.vdev = tdls_vdev->vdev; + qdf_mem_copy(indication.peer_mac, curr_peer->peer_mac.bytes, + QDF_MAC_ADDR_SIZE); + + if (tdls_soc->tdls_event_cb) + tdls_soc->tdls_event_cb(tdls_soc->tdls_evt_cb_data, + TDLS_EVENT_TEARDOWN_REQ, &indication); +} + +/** + * tdls_get_conn_info() - get the tdls connection information. + * @tdls_soc: tdls soc object + * @idx: sta id + * + * Function to check tdls sta index + * + * Return: tdls connection information + */ +static struct tdls_conn_info * +tdls_get_conn_info(struct tdls_soc_priv_obj *tdls_soc, uint8_t idx) +{ + uint8_t sta_idx; + + /* check if there is available index for this new TDLS STA */ + for (sta_idx = 0; sta_idx < WLAN_TDLS_STA_MAX_NUM; sta_idx++) { + if (idx == tdls_soc->tdls_conn_info[sta_idx].sta_id) { + tdls_debug("tdls peer with sta_idx %u exists", idx); + tdls_soc->tdls_conn_info[sta_idx].index = sta_idx; + return &tdls_soc->tdls_conn_info[sta_idx]; + } + } + + tdls_err("tdls peer with staIdx %u not exists", idx); + return NULL; +} + +static void +tdls_ct_process_idle_handler(struct wlan_objmgr_vdev *vdev, + struct tdls_conn_info *tdls_info) +{ + struct tdls_peer *curr_peer; + struct tdls_vdev_priv_obj *tdls_vdev_obj; + struct tdls_soc_priv_obj *tdls_soc_obj; + + if (QDF_STATUS_SUCCESS != tdls_get_vdev_objects(vdev, &tdls_vdev_obj, + &tdls_soc_obj)) + return; + + if (INVALID_TDLS_PEER_ID == tdls_info->sta_id) { + tdls_err("peer (staidx %u) doesn't exists", tdls_info->sta_id); + return; + } + + curr_peer = tdls_find_peer(tdls_vdev_obj, + (u8 *) &tdls_info->peer_mac.bytes[0]); + + if (NULL == curr_peer) { + tdls_err("Invalid tdls idle timer expired"); + return; + } + + tdls_debug(QDF_MAC_ADDR_STR + " tx_pkt: %d, rx_pkt: %d, idle_packet_n: %d", + QDF_MAC_ADDR_ARRAY(curr_peer->peer_mac.bytes), + curr_peer->tx_pkt, + curr_peer->rx_pkt, + tdls_vdev_obj->threshold_config.idle_packet_n); + + /* Check tx/rx statistics on this tdls link for recent activities and + * then decide whether to tear down the link or keep it. + */ + if ((curr_peer->tx_pkt >= + tdls_vdev_obj->threshold_config.idle_packet_n) || + (curr_peer->rx_pkt >= + tdls_vdev_obj->threshold_config.idle_packet_n)) { + /* this tdls link got back to normal, so keep it */ + tdls_debug("tdls link to " QDF_MAC_ADDR_STR + " back to normal, will stay", + QDF_MAC_ADDR_ARRAY(curr_peer->peer_mac.bytes)); + } else { + /* this tdls link needs to get torn down */ + tdls_notice("trigger tdls link to "QDF_MAC_ADDR_STR" down", + QDF_MAC_ADDR_ARRAY(curr_peer->peer_mac.bytes)); + tdls_indicate_teardown(tdls_vdev_obj, + curr_peer, + TDLS_TEARDOWN_PEER_UNSPEC_REASON); + } + + return; +} + +void tdls_ct_idle_handler(void *user_data) +{ + struct wlan_objmgr_vdev *vdev; + struct tdls_conn_info *tdls_info; + struct tdls_soc_priv_obj *tdls_soc_obj; + uint32_t idx; + + tdls_info = (struct tdls_conn_info *)user_data; + if (!tdls_info) + return; + + idx = tdls_info->index; + if (tdls_info->index == INVALID_TDLS_PEER_INDEX) + return; + + tdls_soc_obj = qdf_container_of(tdls_info, struct tdls_soc_priv_obj, + tdls_conn_info[idx]); + + vdev = tdls_get_vdev(tdls_soc_obj->soc, WLAN_TDLS_NB_ID); + if (!vdev) { + tdls_err("Unable to fetch the vdev"); + return; + } + + tdls_ct_process_idle_handler(vdev, tdls_info); + wlan_objmgr_vdev_release_ref(vdev, + WLAN_TDLS_NB_ID); +} + + +/** + * tdls_ct_process_idle_and_discovery() - process the traffic data + * @curr_peer: tdls peer needs to be examined + * @tdls_vdev_obj: tdls vdev object + * @tdls_soc_obj: tdls soc object + * + * Function to check the peer traffic data in idle link and tdls + * discovering link + * + * Return: None + */ +static void +tdls_ct_process_idle_and_discovery(struct tdls_peer *curr_peer, + struct tdls_vdev_priv_obj *tdls_vdev_obj, + struct tdls_soc_priv_obj *tdls_soc_obj) +{ + uint16_t valid_peers; + + valid_peers = tdls_soc_obj->connected_peer_count; + + if ((curr_peer->tx_pkt + curr_peer->rx_pkt) >= + tdls_vdev_obj->threshold_config.tx_packet_n) { + if (WLAN_TDLS_STA_MAX_NUM > valid_peers) { + tdls_notice("Tput trigger TDLS pre-setup"); + tdls_vdev_obj->curr_candidate = curr_peer; + tdls_implicit_send_discovery_request(tdls_vdev_obj); + } else { + tdls_notice("Maximum peers connected already! %d", + valid_peers); + } + } +} + +/** + * tdls_ct_process_connected_link() - process the traffic + * @curr_peer: tdls peer needs to be examined + * @tdls_vdev_obj: tdls vdev + * @tdls_soc_obj: tdls soc context + * + * Function to check the peer traffic data in active STA + * session + * + * Return: None + */ +static void tdls_ct_process_connected_link( + struct tdls_peer *curr_peer, + struct tdls_vdev_priv_obj *tdls_vdev, + struct tdls_soc_priv_obj *tdls_soc) +{ + + if ((int32_t)curr_peer->rssi < + (int32_t)tdls_vdev->threshold_config.rssi_teardown_threshold) { + tdls_warn("Tear down - low RSSI: " QDF_MAC_ADDR_STR "!", + QDF_MAC_ADDR_ARRAY(curr_peer->peer_mac.bytes)); + tdls_indicate_teardown(tdls_vdev, + curr_peer, + TDLS_TEARDOWN_PEER_UNSPEC_REASON); + return; + } + + /* Only teardown based on non zero idle packet threshold, to address + * a use case where this threshold does not get consider for TEAR DOWN + */ + if ((0 != tdls_vdev->threshold_config.idle_packet_n) && + ((curr_peer->tx_pkt < + tdls_vdev->threshold_config.idle_packet_n) && + (curr_peer->rx_pkt < + tdls_vdev->threshold_config.idle_packet_n))) { + if (!curr_peer->is_peer_idle_timer_initialised) { + uint8_t sta_id = (uint8_t)curr_peer->sta_id; + struct tdls_conn_info *tdls_info; + tdls_info = tdls_get_conn_info(tdls_soc, sta_id); + qdf_mc_timer_init(&curr_peer->peer_idle_timer, + QDF_TIMER_TYPE_SW, + tdls_ct_idle_handler, + (void *)tdls_info); + curr_peer->is_peer_idle_timer_initialised = true; + } + if (QDF_TIMER_STATE_RUNNING != + curr_peer->peer_idle_timer.state) { + tdls_warn("Tx/Rx Idle timer start: " + QDF_MAC_ADDR_STR "!", + QDF_MAC_ADDR_ARRAY(curr_peer->peer_mac.bytes)); + tdls_timer_restart(tdls_vdev->vdev, + &curr_peer->peer_idle_timer, + tdls_vdev->threshold_config.idle_timeout_t); + } + } else if (QDF_TIMER_STATE_RUNNING == + curr_peer->peer_idle_timer.state) { + tdls_warn("Tx/Rx Idle timer stop: " QDF_MAC_ADDR_STR "!", + QDF_MAC_ADDR_ARRAY(curr_peer->peer_mac.bytes)); + qdf_mc_timer_stop(&curr_peer->peer_idle_timer); + } +} + +/** + * tdls_ct_process_cap_supported() - process TDLS supported peer. + * @curr_peer: tdls peer needs to be examined + * @tdls_vdev_obj: tdls vdev context + * @tdls_soc_obj: tdls soc context + * + * Function to check the peer traffic data for tdls supported peer + * + * Return: None + */ +static void tdls_ct_process_cap_supported(struct tdls_peer *curr_peer, + struct tdls_vdev_priv_obj *tdls_vdev, + struct tdls_soc_priv_obj *tdls_soc_obj) +{ + tdls_debug("tx %d rx %d thr.pkt %d/idle %d rssi %d thr.trig %d/tear %d", + curr_peer->tx_pkt, curr_peer->rx_pkt, + tdls_vdev->threshold_config.tx_packet_n, + tdls_vdev->threshold_config.idle_packet_n, + curr_peer->rssi, + tdls_vdev->threshold_config.rssi_trigger_threshold, + tdls_vdev->threshold_config.rssi_teardown_threshold); + + switch (curr_peer->link_status) { + case TDLS_LINK_IDLE: + case TDLS_LINK_DISCOVERING: + if (TDLS_IS_EXTERNAL_CONTROL_ENABLED( + tdls_soc_obj->tdls_configs.tdls_feature_flags) && + (!curr_peer->is_forced_peer)) + break; + tdls_ct_process_idle_and_discovery(curr_peer, tdls_vdev, + tdls_soc_obj); + break; + case TDLS_LINK_CONNECTED: + tdls_ct_process_connected_link(curr_peer, tdls_vdev, + tdls_soc_obj); + break; + default: + break; + } +} + +/** + * tdls_ct_process_cap_unknown() - process unknown peer + * @curr_peer: tdls peer needs to be examined + * @tdls_vdev_obj: tdls vdev object + * @tdls_soc_obj: tdls soc object + * + * Function check the peer traffic data , when tdls capability is unknown + * + * Return: None + */ +static void tdls_ct_process_cap_unknown(struct tdls_peer *curr_peer, + struct tdls_vdev_priv_obj *tdls_vdev, + struct tdls_soc_priv_obj *tdlsa_soc) +{ + if (TDLS_IS_EXTERNAL_CONTROL_ENABLED( + tdlsa_soc->tdls_configs.tdls_feature_flags) && + (!curr_peer->is_forced_peer)) + return; + + tdls_debug("threshold tx pkt = %d peer tx_pkt = %d & rx_pkt = %d ", + tdls_vdev->threshold_config.tx_packet_n, curr_peer->tx_pkt, + curr_peer->rx_pkt); + + if (!TDLS_IS_LINK_CONNECTED(curr_peer) && + ((curr_peer->tx_pkt + curr_peer->rx_pkt) >= + tdls_vdev->threshold_config.tx_packet_n)) { + /* Ignore discovery attempt if External Control is enabled, that + * is, peer is forced. In that case, continue discovery attempt + * regardless attempt count + */ + tdls_debug("TDLS UNKNOWN pre discover "); + if (curr_peer->is_forced_peer || + curr_peer->discovery_attempt++ < + tdls_vdev->threshold_config.discovery_tries_n) { + tdls_debug("TDLS UNKNOWN discover "); + tdls_vdev->curr_candidate = curr_peer; + tdls_implicit_send_discovery_request(tdls_vdev); + } else { + curr_peer->tdls_support = TDLS_CAP_NOT_SUPPORTED; + tdls_set_peer_link_status( + curr_peer, + TDLS_LINK_IDLE, + TDLS_LINK_NOT_SUPPORTED); + } + } +} + +/** + * tdls_ct_process_peers() - process the peer + * @curr_peer: tdls peer needs to be examined + * @tdls_vdev_obj: tdls vdev object + * @tdls_soc_obj: tdls soc object + * + * This function check the peer capability and process the metadata from + * the peer + * + * Return: None + */ +static void tdls_ct_process_peers(struct tdls_peer *curr_peer, + struct tdls_vdev_priv_obj *tdls_vdev_obj, + struct tdls_soc_priv_obj *tdls_soc_obj) +{ + tdls_debug(QDF_MAC_ADDR_STR " link_status %d tdls_support %d", + QDF_MAC_ADDR_ARRAY(curr_peer->peer_mac.bytes), + curr_peer->link_status, curr_peer->tdls_support); + + switch (curr_peer->tdls_support) { + case TDLS_CAP_SUPPORTED: + tdls_ct_process_cap_supported(curr_peer, tdls_vdev_obj, + tdls_soc_obj); + break; + + case TDLS_CAP_UNKNOWN: + tdls_ct_process_cap_unknown(curr_peer, tdls_vdev_obj, + tdls_soc_obj); + break; + default: + break; + } + +} + +static void tdls_ct_process_handler(struct wlan_objmgr_vdev *vdev) +{ + int i; + qdf_list_t *head; + qdf_list_node_t *list_node; + struct tdls_peer *curr_peer; + QDF_STATUS status; + struct tdls_vdev_priv_obj *tdls_vdev_obj; + struct tdls_soc_priv_obj *tdls_soc_obj; + + if (QDF_STATUS_SUCCESS != tdls_get_vdev_objects(vdev, &tdls_vdev_obj, + &tdls_soc_obj)) + return; + + /* If any concurrency is detected */ + if (!tdls_soc_obj->enable_tdls_connection_tracker) { + tdls_notice("Connection tracker is disabled"); + return; + } + + /* Update tx rx traffic sample in tdls data structures */ + tdls_ct_sampling_tx_rx(tdls_vdev_obj, tdls_soc_obj); + + for (i = 0; i < WLAN_TDLS_PEER_LIST_SIZE; i++) { + head = &tdls_vdev_obj->peer_list[i]; + status = qdf_list_peek_front(head, &list_node); + while (QDF_IS_STATUS_SUCCESS(status)) { + curr_peer = qdf_container_of(list_node, + struct tdls_peer, node); + tdls_ct_process_peers(curr_peer, tdls_vdev_obj, + tdls_soc_obj); + curr_peer->tx_pkt = 0; + curr_peer->rx_pkt = 0; + status = qdf_list_peek_next(head, + list_node, &list_node); + } + } + + tdls_timer_restart(tdls_vdev_obj->vdev, + &tdls_vdev_obj->peer_update_timer, + tdls_vdev_obj->threshold_config.tx_period_t); + +} + +void tdls_ct_handler(void *user_data) +{ + struct wlan_objmgr_vdev *vdev; + + if (!user_data) + return; + + vdev = (struct wlan_objmgr_vdev *)user_data; + + if (QDF_STATUS_SUCCESS != wlan_objmgr_vdev_try_get_ref(vdev, + WLAN_TDLS_NB_ID)) + return; + + tdls_ct_process_handler(vdev); + + wlan_objmgr_vdev_release_ref(vdev, + WLAN_TDLS_NB_ID); +} + +int tdls_set_tdls_offchannel(struct tdls_soc_priv_obj *tdls_soc, + int offchannel) +{ + uint32_t tdls_feature_flags; + + tdls_feature_flags = tdls_soc->tdls_configs.tdls_feature_flags; + + if (TDLS_IS_OFF_CHANNEL_ENABLED(tdls_feature_flags) && + (TDLS_SUPPORT_EXP_TRIG_ONLY == tdls_soc->tdls_current_mode || + TDLS_SUPPORT_IMP_MODE == tdls_soc->tdls_current_mode || + TDLS_SUPPORT_EXT_CONTROL == tdls_soc->tdls_current_mode)) { + if (offchannel < TDLS_PREFERRED_OFF_CHANNEL_NUM_MIN || + offchannel > TDLS_PREFERRED_OFF_CHANNEL_NUM_MAX) { + tdls_err("Invalid tdls off channel %u", offchannel); + return -EINVAL; + } + } else { + tdls_err("Either TDLS or TDLS Off-channel is not enabled"); + return -ENOTSUPP; + } + tdls_notice("change tdls off channel from %d to %d", + tdls_soc->tdls_off_channel, offchannel); + tdls_soc->tdls_off_channel = offchannel; + return 0; +} + +int tdls_set_tdls_secoffchanneloffset(struct tdls_soc_priv_obj *tdls_soc, + int offchanoffset) +{ + uint32_t tdls_feature_flags; + + tdls_feature_flags = tdls_soc->tdls_configs.tdls_feature_flags; + + if (!TDLS_IS_OFF_CHANNEL_ENABLED(tdls_feature_flags) || + TDLS_SUPPORT_SUSPENDED >= tdls_soc->tdls_current_mode) { + tdls_err("Either TDLS or TDLS Off-channel is not enabled"); + return -ENOTSUPP; + } + + tdls_soc->tdls_channel_offset = BW_INVALID; + + switch (offchanoffset) { + case TDLS_SEC_OFFCHAN_OFFSET_0: + tdls_soc->tdls_channel_offset = BW20; + break; + case TDLS_SEC_OFFCHAN_OFFSET_40PLUS: + tdls_soc->tdls_channel_offset = BW40_LOW_PRIMARY; + break; + case TDLS_SEC_OFFCHAN_OFFSET_40MINUS: + tdls_soc->tdls_channel_offset = BW40_LOW_PRIMARY; + break; + case TDLS_SEC_OFFCHAN_OFFSET_80: + tdls_soc->tdls_channel_offset = BW80; + break; + case TDLS_SEC_OFFCHAN_OFFSET_160: + tdls_soc->tdls_channel_offset = BWALL; + break; + default: + tdls_err("Invalid tdls secondary off channel offset %d", + offchanoffset); + return -EINVAL; + } /* end switch */ + + tdls_notice("change tdls secondary off channel offset to 0x%x", + tdls_soc->tdls_channel_offset); + return 0; +} + +int tdls_set_tdls_offchannelmode(struct wlan_objmgr_vdev *vdev, + int offchanmode) +{ + struct tdls_peer *conn_peer = NULL; + struct tdls_channel_switch_params chan_switch_params; + QDF_STATUS status = QDF_STATUS_E_FAILURE; + int ret_value = 0; + struct tdls_vdev_priv_obj *tdls_vdev; + struct tdls_soc_priv_obj *tdls_soc; + uint32_t tdls_feature_flags; + + + status = tdls_get_vdev_objects(vdev, &tdls_vdev, &tdls_soc); + + if (status != QDF_STATUS_SUCCESS) + return -EINVAL; + + + if (offchanmode < ENABLE_CHANSWITCH || + offchanmode > DISABLE_CHANSWITCH) { + tdls_err("Invalid tdls off channel mode %d", offchanmode); + return -EINVAL; + } + + if (!wlan_vdev_is_up(vdev)) { + tdls_err("tdls off channel req in not associated state %d", + offchanmode); + return -EPERM; + } + + tdls_feature_flags = tdls_soc->tdls_configs.tdls_feature_flags; + if (!TDLS_IS_OFF_CHANNEL_ENABLED(tdls_feature_flags) || + TDLS_SUPPORT_SUSPENDED >= tdls_soc->tdls_current_mode) { + tdls_err("Either TDLS or TDLS Off-channel is not enabled"); + return -ENOTSUPP; + } + + conn_peer = tdls_find_first_connected_peer(tdls_vdev); + if (NULL == conn_peer) { + tdls_err("No TDLS Connected Peer"); + return -EPERM; + } + + tdls_notice("TDLS Channel Switch in swmode=%d tdls_off_channel %d offchanoffset %d", + offchanmode, tdls_soc->tdls_off_channel, + tdls_soc->tdls_channel_offset); + + switch (offchanmode) { + case ENABLE_CHANSWITCH: + if (tdls_soc->tdls_off_channel && + tdls_soc->tdls_channel_offset != BW_INVALID) { + chan_switch_params.tdls_off_ch = + tdls_soc->tdls_off_channel; + chan_switch_params.tdls_off_ch_bw_offset = + tdls_soc->tdls_channel_offset; + chan_switch_params.oper_class = + tdls_find_opclass(tdls_soc->soc, + chan_switch_params.tdls_off_ch, + chan_switch_params.tdls_off_ch_bw_offset); + } else { + tdls_err("TDLS off-channel parameters are not set yet!!!"); + return -EINVAL; + + } + break; + case DISABLE_CHANSWITCH: + chan_switch_params.tdls_off_ch = 0; + chan_switch_params.tdls_off_ch_bw_offset = 0; + chan_switch_params.oper_class = 0; + break; + default: + tdls_err("Incorrect Parameters mode: %d tdls_off_channel: %d offchanoffset: %d", + offchanmode, tdls_soc->tdls_off_channel, + tdls_soc->tdls_channel_offset); + return -EINVAL; + } /* end switch */ + + chan_switch_params.vdev_id = tdls_vdev->session_id; + chan_switch_params.tdls_sw_mode = offchanmode; + chan_switch_params.is_responder = + conn_peer->is_responder; + qdf_mem_copy(&chan_switch_params.peer_mac_addr, + &conn_peer->peer_mac.bytes, + QDF_MAC_ADDR_SIZE); + tdls_notice("Peer " QDF_MAC_ADDR_STR " vdevId: %d, off channel: %d, offset: %d, mode: %d, is_responder: %d", + QDF_MAC_ADDR_ARRAY(chan_switch_params.peer_mac_addr), + chan_switch_params.vdev_id, + chan_switch_params.tdls_off_ch, + chan_switch_params.tdls_off_ch_bw_offset, + chan_switch_params.tdls_sw_mode, + chan_switch_params.is_responder); + + status = tdls_set_offchan_mode(tdls_soc->soc, + &chan_switch_params); + + if (status != QDF_STATUS_SUCCESS) { + tdls_err("Failed to send channel switch request to wmi"); + return -EINVAL; + } + + tdls_soc->tdls_fw_off_chan_mode = offchanmode; + + if (ENABLE_CHANSWITCH == offchanmode) { + conn_peer = tdls_find_first_connected_peer(tdls_vdev); + if (NULL == conn_peer) { + tdls_err("No TDLS Connected Peer"); + return -EPERM; + } + conn_peer->pref_off_chan_num = + chan_switch_params.tdls_off_ch; + conn_peer->op_class_for_pref_off_chan = + chan_switch_params.oper_class; + } + + return ret_value; +} + +static QDF_STATUS tdls_delete_all_tdls_peers_flush_cb(struct scheduler_msg *msg) +{ + if (msg && msg->bodyptr) { + qdf_mem_free(msg->bodyptr); + msg->bodyptr = NULL; + } + + return QDF_STATUS_SUCCESS; +} +/** + * tdls_delete_all_tdls_peers(): send request to delete tdls peers + * @vdev: vdev object + * @tdls_soc: tdls soc object + * + * This function sends request to lim to delete tdls peers + * + * Return: QDF_STATUS + */ +QDF_STATUS tdls_delete_all_tdls_peers(struct wlan_objmgr_vdev *vdev, + struct tdls_soc_priv_obj *tdls_soc) +{ + struct wlan_objmgr_peer *peer; + struct tdls_del_all_tdls_peers *del_msg; + struct scheduler_msg msg = {0}; + QDF_STATUS status; + + peer = wlan_vdev_get_bsspeer(vdev); + if (!peer) + return QDF_STATUS_E_FAILURE; + if (QDF_STATUS_SUCCESS != + wlan_objmgr_peer_try_get_ref(peer, WLAN_TDLS_SB_ID)) + return QDF_STATUS_E_FAILURE; + + del_msg = qdf_mem_malloc(sizeof(*del_msg)); + if (!del_msg) { + tdls_err("memory alloc failed"); + wlan_objmgr_peer_release_ref(peer, WLAN_TDLS_SB_ID); + return QDF_STATUS_E_FAILURE; + } + + qdf_mem_copy(del_msg->bssid.bytes, + wlan_peer_get_macaddr(peer), QDF_MAC_ADDR_SIZE); + + del_msg->msg_type = tdls_soc->tdls_del_all_peers; + del_msg->msg_len = (uint16_t) sizeof(*del_msg); + + /* Send the request to PE. */ + qdf_mem_zero(&msg, sizeof(msg)); + + tdls_debug("sending delete all peers req to PE "); + + msg.type = del_msg->msg_type; + msg.bodyptr = del_msg; + msg.flush_callback = tdls_delete_all_tdls_peers_flush_cb; + + status = scheduler_post_message(QDF_MODULE_ID_TDLS, + QDF_MODULE_ID_PE, + QDF_MODULE_ID_PE, &msg); + if (QDF_IS_STATUS_ERROR(status)) { + tdls_err("post delete all peer req failed, status %d", status); + qdf_mem_free(del_msg); + } + + wlan_objmgr_peer_release_ref(peer, WLAN_TDLS_SB_ID); + return status; +} + +void tdls_disable_offchan_and_teardown_links( + struct wlan_objmgr_vdev *vdev) +{ + uint16_t connected_tdls_peers = 0; + uint8_t staidx; + struct tdls_peer *curr_peer = NULL; + struct tdls_vdev_priv_obj *tdls_vdev; + struct tdls_soc_priv_obj *tdls_soc; + QDF_STATUS status; + uint8_t vdev_id; + bool tdls_in_progress = false; + + status = tdls_get_vdev_objects(vdev, &tdls_vdev, &tdls_soc); + if (QDF_STATUS_SUCCESS != status) { + tdls_err("tdls objects are NULL "); + return; + } + + if (TDLS_SUPPORT_SUSPENDED >= tdls_soc->tdls_current_mode) { + tdls_notice("TDLS mode %d is disabled OR not suspended now", + tdls_soc->tdls_current_mode); + return; + } + + connected_tdls_peers = tdls_soc->connected_peer_count; + if (tdls_is_progress(tdls_vdev, NULL, 0)) + tdls_in_progress = true; + + if (!(connected_tdls_peers || tdls_in_progress)) { + tdls_notice("No TDLS connected/progress peers to delete"); + vdev_id = vdev->vdev_objmgr.vdev_id; + if (tdls_soc->set_state_info.set_state_cnt > 0) { + tdls_debug("Disable the tdls in FW as second interface is coming up"); + tdls_send_update_to_fw(tdls_vdev, tdls_soc, true, + true, false, vdev_id); + } + return; + } + + /* TDLS is not supported in case of concurrency. + * Disable TDLS Offchannel in FW to avoid more + * than two concurrent channels and generate TDLS + * teardown indication to supplicant. + * Below function Finds the first connected peer and + * disables TDLS offchannel for that peer. + * FW enables TDLS offchannel only when there is + * one TDLS peer. When there are more than one TDLS peer, + * there will not be TDLS offchannel in FW. + * So to avoid sending multiple request to FW, for now, + * just invoke offchannel mode functions only once + */ + tdls_set_tdls_offchannel(tdls_soc, + tdls_soc->tdls_configs.tdls_pre_off_chan_num); + tdls_set_tdls_secoffchanneloffset(tdls_soc, + TDLS_SEC_OFFCHAN_OFFSET_40PLUS); + tdls_set_tdls_offchannelmode(vdev, DISABLE_CHANSWITCH); + + /* Send Msg to PE for deleting all the TDLS peers */ + tdls_delete_all_tdls_peers(vdev, tdls_soc); + + for (staidx = 0; staidx < tdls_soc->max_num_tdls_sta; + staidx++) { + if (tdls_soc->tdls_conn_info[staidx].sta_id + == INVALID_TDLS_PEER_ID) + continue; + + curr_peer = tdls_find_all_peer(tdls_soc, + tdls_soc->tdls_conn_info[staidx].peer_mac.bytes); + if (!curr_peer) + continue; + + tdls_notice("indicate TDLS teardown (staId %d)", + curr_peer->sta_id); + + /* Indicate teardown to supplicant */ + tdls_indicate_teardown(tdls_vdev, + curr_peer, + TDLS_TEARDOWN_PEER_UNSPEC_REASON); + + /* + * Del Sta happened already as part of tdls_delete_all_tdls_peers + * Hence clear tdls vdev data structure. + */ + tdls_reset_peer(tdls_vdev, curr_peer->peer_mac.bytes); + + if (tdls_soc->tdls_dereg_peer) + tdls_soc->tdls_dereg_peer( + tdls_soc->tdls_peer_context, + wlan_vdev_get_id(vdev), + curr_peer->sta_id); + tdls_decrement_peer_count(tdls_soc); + tdls_soc->tdls_conn_info[staidx].sta_id = INVALID_TDLS_PEER_ID; + tdls_soc->tdls_conn_info[staidx].index = + INVALID_TDLS_PEER_INDEX; + tdls_soc->tdls_conn_info[staidx].session_id = 255; + + qdf_mem_zero(&tdls_soc->tdls_conn_info[staidx].peer_mac, + sizeof(struct qdf_mac_addr)); + } +} + +void tdls_teardown_connections(struct wlan_objmgr_psoc *psoc) +{ + struct tdls_osif_indication indication; + struct tdls_soc_priv_obj *tdls_soc; + struct wlan_objmgr_vdev *tdls_vdev; + + tdls_soc = wlan_psoc_get_tdls_soc_obj(psoc); + if (!tdls_soc) + return; + + /* Get the tdls specific vdev and clear the links */ + tdls_vdev = tdls_get_vdev(psoc, WLAN_TDLS_SB_ID); + if (!tdls_vdev) { + tdls_err("Unable to get the tdls vdev"); + return; + } + + tdls_disable_offchan_and_teardown_links(tdls_vdev); + indication.vdev = tdls_vdev; + + if (tdls_soc->tdls_event_cb) + tdls_soc->tdls_event_cb(tdls_soc->tdls_evt_cb_data, + TDLS_EVENT_TEARDOWN_LINKS_DONE, + &indication); + + wlan_objmgr_vdev_release_ref(tdls_vdev, WLAN_TDLS_SB_ID); +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/tdls/core/src/wlan_tdls_ct.h b/drivers/staging/qca-wifi-host-cmn/umac/tdls/core/src/wlan_tdls_ct.h new file mode 100644 index 0000000000000000000000000000000000000000..b7e7d8e56983dcc9a7568fd5c65da046149ff36d --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/tdls/core/src/wlan_tdls_ct.h @@ -0,0 +1,230 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_tdls_ct.h + * + * TDLS connection tracker declarations + */ + +#ifndef _WLAN_TDLS_CT_H_ +#define _WLAN_TDLS_CT_H_ + + /* + * Before UpdateTimer expires, we want to timeout discovery response + * should not be more than 2000. + */ +#define TDLS_DISCOVERY_TIMEOUT_ERE_UPDATE 1000 + +#define TDLS_PREFERRED_OFF_CHANNEL_NUM_MIN 1 +#define TDLS_PREFERRED_OFF_CHANNEL_NUM_MAX 165 +#define TDLS_PREFERRED_OFF_CHANNEL_NUM_DEFAULT 36 + +/** + * tdls_implicit_enable() - enable implicit tdls triggering + * @tdls_vdev: TDLS vdev + * + * Return: Void + */ +void tdls_implicit_enable(struct tdls_vdev_priv_obj *tdls_vdev); + +/** + * tdls_update_rx_pkt_cnt() - Update rx packet count + * @vdev: vdev object manager + * @mac_addr: mac address of the data + * @dest_mac_addr: dest mac address of the data + * + * Increase the rx packet count, if the sender is not bssid and the packet is + * not broadcast and multicast packet + * + * This sampling information will be used in TDLS connection tracker + * + * This function expected to be called in an atomic context so blocking APIs + * not allowed + * + * Return: None + */ +void tdls_update_rx_pkt_cnt(struct wlan_objmgr_vdev *vdev, + struct qdf_mac_addr *mac_addr, + struct qdf_mac_addr *dest_mac_addr); + +/** + * tdls_update_tx_pkt_cnt() - update tx packet + * @vdev: vdev object + * @mac_addr: mac address of the data + * + * Increase the tx packet count, if the sender is not bssid and the packet is + * not broadcast and multicast packet + * + * This sampling information will be used in TDLS connection tracker + * + * This function expected to be called in an atomic context so blocking APIs + * not allowed + * + * Return: None + */ +void tdls_update_tx_pkt_cnt(struct wlan_objmgr_vdev *vdev, + struct qdf_mac_addr *mac_addr); + +/** + * wlan_hdd_tdls_implicit_send_discovery_request() - send discovery request + * @tdls_vdev_obj: tdls vdev object + * + * Return: None + */ +void tdls_implicit_send_discovery_request( + struct tdls_vdev_priv_obj *tdls_vdev_obj); + +/** + * tdls_recv_discovery_resp() - handling of tdls discovery response + * @soc: object manager + * @mac: mac address of peer from which the response was received + * + * Return: 0 for success or negative errno otherwise + */ +int tdls_recv_discovery_resp(struct tdls_vdev_priv_obj *tdls_vdev, + const uint8_t *mac); + +/** + * tdls_indicate_teardown() - indicate teardown to upper layer + * @tdls_vdev: tdls vdev object + * @curr_peer: teardown peer + * @reason: teardown reason + * + * Return: Void + */ +void tdls_indicate_teardown(struct tdls_vdev_priv_obj *tdls_vdev, + struct tdls_peer *curr_peer, + uint16_t reason); + +/** + * tdls_ct_handler() - TDLS connection tracker handler + * @user_data: user data from timer + * + * tdls connection tracker timer starts, when the STA connected to AP + * and it's scan the traffic between two STA peers and make TDLS + * connection and teardown, based on the traffic threshold + * + * Return: None + */ +void tdls_ct_handler(void *user_data); + +/** + * tdls_ct_idle_handler() - Check tdls idle traffic + * @user_data: data from tdls idle timer + * + * Function to check the tdls idle traffic and make a decision about + * tdls teardown + * + * Return: None + */ +void tdls_ct_idle_handler(void *user_data); + +/** + * tdls_discovery_timeout_peer_cb() - tdls discovery timeout callback + * @userData: tdls vdev + * + * Return: None + */ +void tdls_discovery_timeout_peer_cb(void *user_data); + +/** + * tdls_implicit_disable() - disable implicit tdls triggering + * @pHddTdlsCtx: TDLS context + * + * Return: Void + */ +void tdls_implicit_disable(struct tdls_vdev_priv_obj *tdls_vdev); + +/** + * tdls_is_vdev_authenticated() -check the vdev authentication state + * @vdev: vdev oobject + * + * Return: true or false + */ +bool tdls_is_vdev_authenticated(struct wlan_objmgr_vdev *vdev); + +/** + * tdls_teardown_connections() -teardown and delete all the tdls peers + * @psoc: psoc oobject + * + * Return: true or false + */ +void tdls_teardown_connections(struct wlan_objmgr_psoc *psoc); + +/** + * tdls_disable_offchan_and_teardown_links - Disable offchannel + * and teardown TDLS links + * @tdls_soc : tdls soc object + * + * Return: None + */ +void tdls_disable_offchan_and_teardown_links( + struct wlan_objmgr_vdev *vdev); + +/** + * tdls_delete_all_tdls_peers(): send request to delete tdls peers + * @vdev: vdev object + * @tdls_soc: tdls soc object + * + * This function sends request to lim to delete tdls peers + * + * Return: QDF_STATUS + */ +QDF_STATUS tdls_delete_all_tdls_peers(struct wlan_objmgr_vdev *vdev, + struct tdls_soc_priv_obj *tdls_soc); + +/** + * tdls_set_tdls_offchannel() - set tdls off-channel number + * @tdls_soc: tdls soc object + * @offchanmode: tdls off-channel number + * + * This function sets tdls off-channel number + * + * Return: 0 on success; negative errno otherwise + */ +int tdls_set_tdls_offchannel(struct tdls_soc_priv_obj *tdls_soc, + int offchannel); + +/** + * tdls_set_tdls_offchannelmode() - set tdls off-channel mode + * @adapter: Pointer to the HDD adapter + * @offchannel: tdls off-channel mode + * + * This function sets tdls off-channel mode + * + * Return: 0 on success; negative errno otherwise + */ + +int tdls_set_tdls_offchannelmode(struct wlan_objmgr_vdev *vdev, + int offchanmode); + +/** + * tdls_set_tdls_secoffchanneloffset() - set secondary tdls off-channel offset + * @tdls_soc: tdls soc object + * @offchanoffset: tdls off-channel offset + * + * This function sets secondary tdls off-channel offset + * + * Return: 0 on success; negative errno otherwise + */ + +int tdls_set_tdls_secoffchanneloffset(struct tdls_soc_priv_obj *tdls_soc, + int offchanoffset); + +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/tdls/core/src/wlan_tdls_main.c b/drivers/staging/qca-wifi-host-cmn/umac/tdls/core/src/wlan_tdls_main.c new file mode 100644 index 0000000000000000000000000000000000000000..65e685a67ec95647c3c1c7ccffaede61d70bba47 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/tdls/core/src/wlan_tdls_main.c @@ -0,0 +1,1675 @@ +/* + * Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_tdls_main.c + * + * TDLS core function definitions + */ + +#include "wlan_tdls_main.h" +#include "wlan_tdls_cmds_process.h" +#include "wlan_tdls_peer.h" +#include "wlan_tdls_ct.h" +#include "wlan_tdls_mgmt.h" +#include "wlan_tdls_tgt_api.h" +#include "wlan_policy_mgr_public_struct.h" +#include "wlan_policy_mgr_api.h" +#include "wlan_scan_ucfg_api.h" + + +/* Global tdls soc pvt object + * this is useful for some functions which does not receive either vdev or psoc + * objects. + */ +static struct tdls_soc_priv_obj *tdls_soc_global; + +QDF_STATUS tdls_psoc_obj_create_notification(struct wlan_objmgr_psoc *psoc, + void *arg_list) +{ + QDF_STATUS status; + struct tdls_soc_priv_obj *tdls_soc_obj; + + tdls_soc_obj = qdf_mem_malloc(sizeof(*tdls_soc_obj)); + if (!tdls_soc_obj) { + tdls_err("Failed to allocate memory for tdls object"); + return QDF_STATUS_E_NOMEM; + } + + tdls_soc_obj->soc = psoc; + + status = wlan_objmgr_psoc_component_obj_attach(psoc, + WLAN_UMAC_COMP_TDLS, + (void *)tdls_soc_obj, + QDF_STATUS_SUCCESS); + + if (QDF_IS_STATUS_ERROR(status)) { + tdls_err("Failed to attach psoc tdls component"); + qdf_mem_free(tdls_soc_obj); + return status; + } + + tdls_soc_global = tdls_soc_obj; + tdls_notice("TDLS obj attach to psoc successfully"); + + return status; +} + +QDF_STATUS tdls_psoc_obj_destroy_notification(struct wlan_objmgr_psoc *psoc, + void *arg_list) +{ + QDF_STATUS status; + struct tdls_soc_priv_obj *tdls_soc_obj; + + tdls_soc_obj = wlan_objmgr_psoc_get_comp_private_obj(psoc, + WLAN_UMAC_COMP_TDLS); + if (!tdls_soc_obj) { + tdls_err("Failed to get tdls obj in psoc"); + return QDF_STATUS_E_FAILURE; + } + + status = wlan_objmgr_psoc_component_obj_detach(psoc, + WLAN_UMAC_COMP_TDLS, + tdls_soc_obj); + + if (QDF_IS_STATUS_ERROR(status)) + tdls_err("Failed to detach psoc tdls component"); + qdf_mem_free(tdls_soc_obj); + + return status; +} + +static QDF_STATUS tdls_vdev_init(struct tdls_vdev_priv_obj *vdev_obj) +{ + uint8_t i; + struct tdls_config_params *config; + struct tdls_user_config *user_config; + struct tdls_soc_priv_obj *soc_obj; + + soc_obj = wlan_vdev_get_tdls_soc_obj(vdev_obj->vdev); + if (!soc_obj) { + tdls_err("tdls soc obj NULL"); + return QDF_STATUS_E_FAILURE; + } + + config = &vdev_obj->threshold_config; + user_config = &soc_obj->tdls_configs; + config->tx_period_t = user_config->tdls_tx_states_period; + config->tx_packet_n = user_config->tdls_tx_pkt_threshold; + config->discovery_tries_n = user_config->tdls_max_discovery_attempt; + config->idle_timeout_t = user_config->tdls_idle_timeout; + config->idle_packet_n = user_config->tdls_idle_pkt_threshold; + config->rssi_trigger_threshold = + user_config->tdls_rssi_trigger_threshold; + config->rssi_teardown_threshold = + user_config->tdls_rssi_teardown_threshold; + config->rssi_delta = user_config->tdls_rssi_delta; + + for (i = 0; i < WLAN_TDLS_PEER_LIST_SIZE; i++) { + qdf_list_create(&vdev_obj->peer_list[i], + WLAN_TDLS_PEER_SUB_LIST_SIZE); + } + qdf_mc_timer_init(&vdev_obj->peer_update_timer, QDF_TIMER_TYPE_SW, + tdls_ct_handler, vdev_obj->vdev); + qdf_mc_timer_init(&vdev_obj->peer_discovery_timer, QDF_TIMER_TYPE_SW, + tdls_discovery_timeout_peer_cb, vdev_obj); + + return QDF_STATUS_SUCCESS; +} + +static void tdls_vdev_deinit(struct tdls_vdev_priv_obj *vdev_obj) +{ + qdf_mc_timer_stop(&vdev_obj->peer_update_timer); + qdf_mc_timer_stop(&vdev_obj->peer_discovery_timer); + + qdf_mc_timer_destroy(&vdev_obj->peer_update_timer); + qdf_mc_timer_destroy(&vdev_obj->peer_discovery_timer); + + tdls_peer_idle_timers_destroy(vdev_obj); + tdls_free_peer_list(vdev_obj); +} + +QDF_STATUS tdls_vdev_obj_create_notification(struct wlan_objmgr_vdev *vdev, + void *arg) +{ + QDF_STATUS status; + struct tdls_vdev_priv_obj *tdls_vdev_obj; + struct wlan_objmgr_pdev *pdev; + struct tdls_soc_priv_obj *tdls_soc_obj; + uint32_t tdls_feature_flags; + + tdls_debug("tdls vdev mode %d", wlan_vdev_mlme_get_opmode(vdev)); + if (wlan_vdev_mlme_get_opmode(vdev) != QDF_STA_MODE && + wlan_vdev_mlme_get_opmode(vdev) != QDF_P2P_CLIENT_MODE) + return QDF_STATUS_SUCCESS; + + tdls_soc_obj = wlan_vdev_get_tdls_soc_obj(vdev); + if (!tdls_soc_obj) { + tdls_err("get soc by vdev failed"); + return QDF_STATUS_E_NOMEM; + } + + tdls_feature_flags = tdls_soc_obj->tdls_configs.tdls_feature_flags; + if (!TDLS_IS_ENABLED(tdls_feature_flags)) { + tdls_debug("disabled in ini"); + return QDF_STATUS_E_NOSUPPORT; + } + + if (tdls_soc_obj->tdls_osif_init_cb) { + status = tdls_soc_obj->tdls_osif_init_cb(vdev); + if (QDF_IS_STATUS_ERROR(status)) + return status; + } + + /* TODO: Add concurrency check */ + + tdls_vdev_obj = qdf_mem_malloc(sizeof(*tdls_vdev_obj)); + if (!tdls_vdev_obj) { + tdls_err("Failed to allocate memory for tdls vdev object"); + status = QDF_STATUS_E_NOMEM; + goto err; + } + + status = wlan_objmgr_vdev_component_obj_attach(vdev, + WLAN_UMAC_COMP_TDLS, + (void *)tdls_vdev_obj, + QDF_STATUS_SUCCESS); + if (QDF_IS_STATUS_ERROR(status)) { + tdls_err("Failed to attach vdev tdls component"); + goto err; + } + tdls_vdev_obj->vdev = vdev; + status = tdls_vdev_init(tdls_vdev_obj); + if (QDF_IS_STATUS_ERROR(status)) + goto err; + + pdev = wlan_vdev_get_pdev(vdev); + + status = ucfg_scan_register_event_handler(pdev, + tdls_scan_complete_event_handler, + tdls_soc_obj); + + if (QDF_STATUS_SUCCESS != status) { + tdls_err("scan event register failed "); + tdls_vdev_deinit(tdls_vdev_obj); + goto err; + } + + tdls_debug("tdls object attach to vdev successfully"); + return status; +err: + if (tdls_soc_obj->tdls_osif_deinit_cb) + tdls_soc_obj->tdls_osif_deinit_cb(vdev); + if (tdls_vdev_obj) { + qdf_mem_free(tdls_vdev_obj); + tdls_vdev_obj = NULL; + } + return status; +} + +QDF_STATUS tdls_vdev_obj_destroy_notification(struct wlan_objmgr_vdev *vdev, + void *arg) +{ + QDF_STATUS status; + void *tdls_vdev_obj; + struct tdls_soc_priv_obj *tdls_soc_obj; + uint32_t tdls_feature_flags; + + tdls_debug("tdls vdev mode %d", wlan_vdev_mlme_get_opmode(vdev)); + if (wlan_vdev_mlme_get_opmode(vdev) != QDF_STA_MODE && + wlan_vdev_mlme_get_opmode(vdev) != QDF_P2P_CLIENT_MODE) + return QDF_STATUS_SUCCESS; + + tdls_soc_obj = wlan_vdev_get_tdls_soc_obj(vdev); + if (!tdls_soc_obj) { + tdls_err("get soc by vdev failed"); + return QDF_STATUS_E_NOMEM; + } + + tdls_feature_flags = tdls_soc_obj->tdls_configs.tdls_feature_flags; + if (!TDLS_IS_ENABLED(tdls_feature_flags)) { + tdls_debug("disabled in ini"); + return QDF_STATUS_E_NOSUPPORT; + } + + tdls_vdev_obj = wlan_objmgr_vdev_get_comp_private_obj(vdev, + WLAN_UMAC_COMP_TDLS); + if (!tdls_vdev_obj) { + tdls_err("Failed to get tdls vdev object"); + return QDF_STATUS_E_FAILURE; + } + + status = wlan_objmgr_vdev_component_obj_detach(vdev, + WLAN_UMAC_COMP_TDLS, + tdls_vdev_obj); + if (QDF_IS_STATUS_ERROR(status)) + tdls_err("Failed to detach vdev tdls component"); + + tdls_vdev_deinit(tdls_vdev_obj); + qdf_mem_free(tdls_vdev_obj); + + if (tdls_soc_obj->tdls_osif_deinit_cb) + tdls_soc_obj->tdls_osif_deinit_cb(vdev); + + return status; +} + +/** + * tdls_process_reset_all_peers() - Reset all tdls peers + * @delete_all_peers_ind: Delete all peers indication + * + * This function is called to reset all tdls peers and + * notify upper layers of teardown inidcation + * + * Return: QDF_STATUS + */ + +static QDF_STATUS tdls_process_reset_all_peers(struct wlan_objmgr_vdev *vdev) +{ + QDF_STATUS status = QDF_STATUS_SUCCESS; + uint8_t staidx; + struct tdls_peer *curr_peer = NULL; + struct tdls_vdev_priv_obj *tdls_vdev; + struct tdls_soc_priv_obj *tdls_soc; + uint8_t reset_session_id; + + status = tdls_get_vdev_objects(vdev, &tdls_vdev, &tdls_soc); + if (QDF_STATUS_SUCCESS != status) { + tdls_err("tdls objects are NULL "); + return status; + } + + if (!tdls_soc->connected_peer_count) { + tdls_debug("No tdls connected peers"); + return status; + } + + reset_session_id = tdls_vdev->session_id; + for (staidx = 0; staidx < tdls_soc->max_num_tdls_sta; + staidx++) { + if (tdls_soc->tdls_conn_info[staidx].sta_id + == INVALID_TDLS_PEER_ID) + continue; + if (tdls_soc->tdls_conn_info[staidx].session_id != + reset_session_id) + continue; + + curr_peer = + tdls_find_all_peer(tdls_soc, + tdls_soc->tdls_conn_info[staidx]. + peer_mac.bytes); + if (!curr_peer) + continue; + + tdls_notice("indicate TDLS teardown (staId %d)", + curr_peer->sta_id); + + /* Indicate teardown to supplicant */ + tdls_indicate_teardown(tdls_vdev, + curr_peer, + TDLS_TEARDOWN_PEER_UNSPEC_REASON); + + tdls_reset_peer(tdls_vdev, curr_peer->peer_mac.bytes); + + if (tdls_soc->tdls_dereg_peer) + tdls_soc->tdls_dereg_peer( + tdls_soc->tdls_peer_context, + wlan_vdev_get_id(vdev), + curr_peer->sta_id); + tdls_decrement_peer_count(tdls_soc); + tdls_soc->tdls_conn_info[staidx].sta_id = INVALID_TDLS_PEER_ID; + tdls_soc->tdls_conn_info[staidx].session_id = 255; + tdls_soc->tdls_conn_info[staidx].index = + INVALID_TDLS_PEER_INDEX; + qdf_mem_zero(&tdls_soc->tdls_conn_info[staidx].peer_mac, + sizeof(struct qdf_mac_addr)); + } + return status; +} + +/** + * tdls_reset_all_peers() - Reset all tdls peers + * @delete_all_peers_ind: Delete all peers indication + * + * This function is called to reset all tdls peers and + * notify upper layers of teardown inidcation + * + * Return: QDF_STATUS + */ +static QDF_STATUS tdls_reset_all_peers( + struct tdls_delete_all_peers_params *delete_all_peers_ind) +{ + QDF_STATUS status; + + if (!delete_all_peers_ind || !delete_all_peers_ind->vdev) { + tdls_err("invalid param"); + return QDF_STATUS_E_INVAL; + } + + status = tdls_process_reset_all_peers(delete_all_peers_ind->vdev); + + if (delete_all_peers_ind->callback) + delete_all_peers_ind->callback(delete_all_peers_ind->vdev); + + qdf_mem_free(delete_all_peers_ind); + return status; +} + +QDF_STATUS tdls_process_cmd(struct scheduler_msg *msg) +{ + QDF_STATUS status = QDF_STATUS_SUCCESS; + + if (!msg || !msg->bodyptr) { + tdls_err("msg: 0x%pK", msg); + QDF_ASSERT(0); + return QDF_STATUS_E_NULL_VALUE; + } + tdls_debug("TDLS process command: %d", msg->type); + + switch (msg->type) { + case TDLS_CMD_TX_ACTION: + tdls_process_mgmt_req(msg->bodyptr); + break; + case TDLS_CMD_ADD_STA: + tdls_process_add_peer(msg->bodyptr); + break; + case TDLS_CMD_CHANGE_STA: + tdls_process_update_peer(msg->bodyptr); + break; + case TDLS_CMD_ENABLE_LINK: + tdls_process_enable_link(msg->bodyptr); + break; + case TDLS_CMD_DISABLE_LINK: + tdls_process_del_peer(msg->bodyptr); + break; + case TDLS_CMD_CONFIG_FORCE_PEER: + tdls_process_setup_peer(msg->bodyptr); + break; + case TDLS_CMD_REMOVE_FORCE_PEER: + tdls_process_remove_force_peer(msg->bodyptr); + break; + case TDLS_CMD_STATS_UPDATE: + break; + case TDLS_CMD_CONFIG_UPDATE: + break; + case TDLS_CMD_SET_RESPONDER: + tdls_set_responder(msg->bodyptr); + break; + case TDLS_CMD_SCAN_DONE: + tdls_scan_done_callback(msg->bodyptr); + break; + case TDLS_NOTIFY_STA_CONNECTION: + tdls_notify_sta_connect(msg->bodyptr); + break; + case TDLS_NOTIFY_STA_DISCONNECTION: + tdls_notify_sta_disconnect(msg->bodyptr); + break; + case TDLS_CMD_SET_TDLS_MODE: + tdls_set_operation_mode(msg->bodyptr); + break; + case TDLS_CMD_SESSION_DECREMENT: + tdls_process_decrement_active_session(msg->bodyptr); + /*Fall through to take decision on connection tracker.*/ + case TDLS_CMD_SESSION_INCREMENT: + tdls_process_policy_mgr_notification(msg->bodyptr); + break; + case TDLS_CMD_TEARDOWN_LINKS: + tdls_teardown_connections(msg->bodyptr); + break; + case TDLS_NOTIFY_RESET_ADAPTERS: + tdls_notify_reset_adapter(msg->bodyptr); + break; + case TDLS_CMD_ANTENNA_SWITCH: + tdls_process_antenna_switch(msg->bodyptr); + break; + case TDLS_CMD_GET_ALL_PEERS: + tdls_get_all_peers_from_list(msg->bodyptr); + break; + case TDLS_CMD_SET_OFFCHANNEL: + tdls_process_set_offchannel(msg->bodyptr); + break; + case TDLS_CMD_SET_OFFCHANMODE: + tdls_process_set_offchan_mode(msg->bodyptr); + break; + case TDLS_CMD_SET_SECOFFCHANOFFSET: + tdls_process_set_secoffchanneloffset(msg->bodyptr); + break; + case TDLS_DELETE_ALL_PEERS_INDICATION: + tdls_reset_all_peers(msg->bodyptr); + break; + default: + break; + } + + return status; +} + +QDF_STATUS tdls_process_evt(struct scheduler_msg *msg) +{ + struct wlan_objmgr_vdev *vdev; + struct tdls_event_notify *notify; + struct tdls_event_info *event; + + if (!msg || !msg->bodyptr) { + tdls_err("msg is not valid: %pK", msg); + return QDF_STATUS_E_NULL_VALUE; + } + notify = msg->bodyptr; + vdev = notify->vdev; + if (!vdev) { + tdls_err("NULL vdev object"); + qdf_mem_free(notify); + return QDF_STATUS_E_NULL_VALUE; + } + event = ¬ify->event; + + tdls_debug("evt type: %d", event->message_type); + switch (event->message_type) { + case TDLS_SHOULD_DISCOVER: + tdls_process_should_discover(vdev, event); + break; + case TDLS_SHOULD_TEARDOWN: + case TDLS_PEER_DISCONNECTED: + tdls_process_should_teardown(vdev, event); + break; + case TDLS_CONNECTION_TRACKER_NOTIFY: + tdls_process_connection_tracker_notify(vdev, event); + break; + default: + break; + } + + wlan_objmgr_vdev_release_ref(vdev, WLAN_TDLS_SB_ID); + qdf_mem_free(notify); + + return QDF_STATUS_SUCCESS; +} + +void tdls_timer_restart(struct wlan_objmgr_vdev *vdev, + qdf_mc_timer_t *timer, + uint32_t expiration_time) +{ + qdf_mc_timer_start(timer, expiration_time); +} + +/** + * wlan_hdd_tdls_monitor_timers_stop() - stop all monitoring timers + * @hdd_tdls_ctx: TDLS context + * + * Return: none + */ +static void tdls_monitor_timers_stop(struct tdls_vdev_priv_obj *tdls_vdev) +{ + qdf_mc_timer_stop(&tdls_vdev->peer_discovery_timer); +} + +/** + * tdls_peer_idle_timers_stop() - stop peer idle timers + * @tdls_vdev: TDLS vdev object + * + * Loop through the idle peer list and stop their timers + * + * Return: None + */ +static void tdls_peer_idle_timers_stop(struct tdls_vdev_priv_obj *tdls_vdev) +{ + int i; + qdf_list_t *head; + qdf_list_node_t *p_node; + struct tdls_peer *curr_peer; + QDF_STATUS status; + + tdls_vdev->discovery_peer_cnt = 0; + + for (i = 0; i < WLAN_TDLS_PEER_LIST_SIZE; i++) { + head = &tdls_vdev->peer_list[i]; + status = qdf_list_peek_front(head, &p_node); + while (QDF_IS_STATUS_SUCCESS(status)) { + curr_peer = qdf_container_of(p_node, struct tdls_peer, + node); + if (curr_peer->is_peer_idle_timer_initialised) + qdf_mc_timer_stop(&curr_peer->peer_idle_timer); + status = qdf_list_peek_next(head, p_node, &p_node); + } + } + +} + +/** + * wlan_hdd_tdls_ct_timers_stop() - stop tdls connection tracker timers + * @tdls_vdev: TDLS vdev + * + * Return: None + */ +static void tdls_ct_timers_stop(struct tdls_vdev_priv_obj *tdls_vdev) +{ + qdf_mc_timer_stop(&tdls_vdev->peer_update_timer); + tdls_peer_idle_timers_stop(tdls_vdev); +} + +/** + * wlan_hdd_tdls_timers_stop() - stop all the tdls timers running + * @tdls_vdev: TDLS vdev + * + * Return: none + */ +void tdls_timers_stop(struct tdls_vdev_priv_obj *tdls_vdev) +{ + tdls_monitor_timers_stop(tdls_vdev); + tdls_ct_timers_stop(tdls_vdev); +} + +QDF_STATUS tdls_get_vdev_objects(struct wlan_objmgr_vdev *vdev, + struct tdls_vdev_priv_obj **tdls_vdev_obj, + struct tdls_soc_priv_obj **tdls_soc_obj) +{ + enum QDF_OPMODE device_mode; + + if (NULL == vdev) + return QDF_STATUS_E_FAILURE; + + *tdls_vdev_obj = wlan_vdev_get_tdls_vdev_obj(vdev); + if (NULL == (*tdls_vdev_obj)) + return QDF_STATUS_E_FAILURE; + + *tdls_soc_obj = wlan_vdev_get_tdls_soc_obj(vdev); + if (NULL == (*tdls_soc_obj)) + return QDF_STATUS_E_FAILURE; + + device_mode = wlan_vdev_mlme_get_opmode(vdev); + + if (device_mode != QDF_STA_MODE && + device_mode != QDF_P2P_CLIENT_MODE) + return QDF_STATUS_E_FAILURE; + + return QDF_STATUS_SUCCESS; +} + +/** + * tdls_state_param_setting_dump() - print tdls state & parameters to send to fw + * @info: tdls setting to be sent to fw + * + * Return: void + */ +static void tdls_state_param_setting_dump(struct tdls_info *info) +{ + if (!info) + return; + + tdls_debug("Setting tdls state and param in fw: vdev_id: %d, tdls_state: %d, notification_interval_ms: %d, tx_discovery_threshold: %d, tx_teardown_threshold: %d, rssi_teardown_threshold: %d, rssi_delta: %d, tdls_options: 0x%x, peer_traffic_ind_window: %d, peer_traffic_response_timeout: %d, puapsd_mask: 0x%x, puapsd_inactivity_time: %d, puapsd_rx_frame_threshold: %d, teardown_notification_ms: %d, tdls_peer_kickout_threshold: %d", + info->vdev_id, + info->tdls_state, + info->notification_interval_ms, + info->tx_discovery_threshold, + info->tx_teardown_threshold, + info->rssi_teardown_threshold, + info->rssi_delta, + info->tdls_options, + info->peer_traffic_ind_window, + info->peer_traffic_response_timeout, + info->puapsd_mask, + info->puapsd_inactivity_time, + info->puapsd_rx_frame_threshold, + info->teardown_notification_ms, + info->tdls_peer_kickout_threshold); + +} + +QDF_STATUS tdls_set_offchan_mode(struct wlan_objmgr_psoc *psoc, + struct tdls_channel_switch_params *param) +{ + QDF_STATUS status; + + /* wmi_unified_set_tdls_offchan_mode_cmd() will be called directly */ + status = tgt_tdls_set_offchan_mode(psoc, param); + + if (!QDF_IS_STATUS_SUCCESS(status)) + status = QDF_STATUS_E_FAILURE; + + return status; +} + +/** + * tdls_update_fw_tdls_state() - update tdls status info + * @tdls_soc_obj: TDLS soc object + * @tdls_info_to_fw: TDLS state info to update in f/w. + * + * send message to WMA to set TDLS state in f/w + * + * Return: QDF_STATUS. + */ +static +QDF_STATUS tdls_update_fw_tdls_state(struct tdls_soc_priv_obj *tdls_soc_obj, + struct tdls_info *tdls_info_to_fw) +{ + QDF_STATUS status; + + /* wmi_unified_update_fw_tdls_state_cmd() will be called directly */ + status = tgt_tdls_set_fw_state(tdls_soc_obj->soc, tdls_info_to_fw); + + if (!QDF_IS_STATUS_SUCCESS(status)) + status = QDF_STATUS_E_FAILURE; + + return status; +} + +bool tdls_check_is_tdls_allowed(struct wlan_objmgr_vdev *vdev) +{ + struct tdls_vdev_priv_obj *tdls_vdev_obj; + struct tdls_soc_priv_obj *tdls_soc_obj; + bool state = false; + + if (QDF_STATUS_SUCCESS != wlan_objmgr_vdev_try_get_ref(vdev, + WLAN_TDLS_NB_ID)) + return state; + + if (QDF_STATUS_SUCCESS != tdls_get_vdev_objects(vdev, &tdls_vdev_obj, + &tdls_soc_obj)) { + wlan_objmgr_vdev_release_ref(vdev, + WLAN_TDLS_NB_ID); + return state; + } + + if (policy_mgr_get_connection_count(tdls_soc_obj->soc) == 1) + state = true; + else + tdls_warn("Concurrent sessions are running or TDLS disabled"); + /* If any concurrency is detected */ + /* print session information */ + wlan_objmgr_vdev_release_ref(vdev, + WLAN_TDLS_NB_ID); + return state; +} + +/** + * cds_set_tdls_ct_mode() - Set the tdls connection tracker mode + * @hdd_ctx: hdd context + * + * This routine is called to set the tdls connection tracker operation status + * + * Return: NONE + */ +void tdls_set_ct_mode(struct wlan_objmgr_psoc *psoc) +{ + bool state = false; + struct tdls_soc_priv_obj *tdls_soc_obj; + + tdls_soc_obj = wlan_psoc_get_tdls_soc_obj(psoc); + if (NULL == tdls_soc_obj) + return; + + /* If any concurrency is detected, skip tdls pkt tracker */ + if (policy_mgr_get_connection_count(psoc) > 1) { + state = false; + goto set_state; + } + + if (TDLS_SUPPORT_DISABLED == tdls_soc_obj->tdls_current_mode || + TDLS_SUPPORT_SUSPENDED == tdls_soc_obj->tdls_current_mode || + !TDLS_IS_IMPLICIT_TRIG_ENABLED( + tdls_soc_obj->tdls_configs.tdls_feature_flags)) { + state = false; + goto set_state; + } else if (policy_mgr_mode_specific_connection_count(psoc, + PM_STA_MODE, + NULL) == 1) { + state = true; + } else if (policy_mgr_mode_specific_connection_count(psoc, + PM_P2P_CLIENT_MODE, + NULL) == 1){ + state = true; + } else { + state = false; + goto set_state; + } + + /* In case of TDLS external control, peer should be added + * by the user space to start connection tracker. + */ + if (TDLS_IS_EXTERNAL_CONTROL_ENABLED( + tdls_soc_obj->tdls_configs.tdls_feature_flags)) { + if (tdls_soc_obj->tdls_external_peer_count) + state = true; + else + state = false; + } + +set_state: + tdls_soc_obj->enable_tdls_connection_tracker = state; + + tdls_debug("enable_tdls_connection_tracker %d", + tdls_soc_obj->enable_tdls_connection_tracker); +} + +QDF_STATUS +tdls_process_policy_mgr_notification(struct wlan_objmgr_psoc *psoc) +{ + struct tdls_vdev_priv_obj *tdls_priv_vdev; + struct wlan_objmgr_vdev *tdls_obj_vdev; + struct tdls_soc_priv_obj *tdls_priv_soc; + + if (!psoc) { + tdls_err("psoc: %pK", psoc); + return QDF_STATUS_E_NULL_VALUE; + } + tdls_obj_vdev = tdls_get_vdev(psoc, WLAN_TDLS_NB_ID); + tdls_debug("enter "); + tdls_set_ct_mode(psoc); + if (tdls_obj_vdev && (tdls_get_vdev_objects(tdls_obj_vdev, + &tdls_priv_vdev, &tdls_priv_soc) == QDF_STATUS_SUCCESS) && + tdls_priv_soc->enable_tdls_connection_tracker) + tdls_implicit_enable(tdls_priv_vdev); + + if (tdls_obj_vdev) + wlan_objmgr_vdev_release_ref(tdls_obj_vdev, WLAN_TDLS_NB_ID); + + tdls_debug("exit "); + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS +tdls_process_decrement_active_session(struct wlan_objmgr_psoc *psoc) +{ + struct tdls_soc_priv_obj *tdls_priv_soc; + struct tdls_vdev_priv_obj *tdls_priv_vdev; + struct wlan_objmgr_vdev *tdls_obj_vdev; + uint8_t vdev_id; + + tdls_debug("Enter"); + if (!psoc) + return QDF_STATUS_E_NULL_VALUE; + + tdls_obj_vdev = tdls_get_vdev(psoc, WLAN_TDLS_NB_ID); + if (tdls_obj_vdev) { + tdls_debug("Enable TDLS in FW and host as only one active sta/p2p_cli interface is present"); + vdev_id = wlan_vdev_get_id(tdls_obj_vdev); + if (tdls_get_vdev_objects(tdls_obj_vdev, &tdls_priv_vdev, + &tdls_priv_soc) == QDF_STATUS_SUCCESS) + tdls_send_update_to_fw(tdls_priv_vdev, tdls_priv_soc, + false, false, true, vdev_id); + wlan_objmgr_vdev_release_ref(tdls_obj_vdev, WLAN_TDLS_NB_ID); + } + + return QDF_STATUS_SUCCESS; +} + +/** + * tdls_get_vdev() - Get tdls specific vdev object manager + * @psoc: wlan psoc object manager + * @dbg_id: debug id + * + * If TDLS possible, return the corresponding vdev + * to enable TDLS in the system. + * + * Return: vdev manager pointer or NULL. + */ +struct wlan_objmgr_vdev *tdls_get_vdev(struct wlan_objmgr_psoc *psoc, + wlan_objmgr_ref_dbgid dbg_id) +{ + uint32_t vdev_id; + + if (policy_mgr_get_connection_count(psoc) > 1) + return NULL; + + vdev_id = policy_mgr_mode_specific_vdev_id(psoc, PM_STA_MODE); + + if (WLAN_INVALID_VDEV_ID != vdev_id) + return wlan_objmgr_get_vdev_by_id_from_psoc(psoc, + vdev_id, + dbg_id); + + vdev_id = policy_mgr_mode_specific_vdev_id(psoc, PM_P2P_CLIENT_MODE); + + if (WLAN_INVALID_VDEV_ID != vdev_id) + return wlan_objmgr_get_vdev_by_id_from_psoc(psoc, + vdev_id, + dbg_id); + + return NULL; +} + +static QDF_STATUS tdls_post_msg_flush_cb(struct scheduler_msg *msg) +{ + void *ptr = msg->bodyptr; + struct wlan_objmgr_vdev *vdev = NULL; + + switch (msg->type) { + case TDLS_NOTIFY_STA_DISCONNECTION: + vdev = ((struct tdls_sta_notify_params *)ptr)->vdev; + wlan_objmgr_vdev_release_ref(vdev, WLAN_TDLS_NB_ID); + qdf_mem_free(ptr); + break; + + case TDLS_CMD_SCAN_DONE: + case TDLS_CMD_SESSION_INCREMENT: + case TDLS_CMD_SESSION_DECREMENT: + break; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * tdls_process_session_update() - update session count information + * @psoc: soc object + * @notification: TDLS os if notification + * + * update the session information in connection tracker + * + * Return: None + */ +static void tdls_process_session_update(struct wlan_objmgr_psoc *psoc, + enum tdls_command_type cmd_type) +{ + struct scheduler_msg msg = {0}; + QDF_STATUS status; + + msg.bodyptr = psoc; + msg.callback = tdls_process_cmd; + msg.flush_callback = tdls_post_msg_flush_cb; + msg.type = (uint16_t)cmd_type; + + status = scheduler_post_message(QDF_MODULE_ID_TDLS, + QDF_MODULE_ID_TDLS, + QDF_MODULE_ID_OS_IF, &msg); + if (QDF_IS_STATUS_ERROR(status)) + tdls_alert("message post failed "); +} + +void tdls_notify_increment_session(struct wlan_objmgr_psoc *psoc) +{ + tdls_process_session_update(psoc, TDLS_CMD_SESSION_INCREMENT); +} + +void tdls_notify_decrement_session(struct wlan_objmgr_psoc *psoc) +{ + tdls_process_session_update(psoc, TDLS_CMD_SESSION_DECREMENT); +} + +void tdls_send_update_to_fw(struct tdls_vdev_priv_obj *tdls_vdev_obj, + struct tdls_soc_priv_obj *tdls_soc_obj, + bool tdls_prohibited, + bool tdls_chan_swit_prohibited, + bool sta_connect_event, + uint8_t session_id) +{ + struct tdls_info *tdls_info_to_fw; + struct tdls_config_params *threshold_params; + uint32_t tdls_feature_flags; + QDF_STATUS status; + uint8_t set_state_cnt; + + tdls_debug("Enter"); + tdls_feature_flags = tdls_soc_obj->tdls_configs.tdls_feature_flags; + if (!TDLS_IS_ENABLED(tdls_feature_flags)) { + tdls_debug("TDLS mode is not enabled"); + return; + } + + set_state_cnt = tdls_soc_obj->set_state_info.set_state_cnt; + if ((set_state_cnt == 0 && !sta_connect_event) || + (set_state_cnt && sta_connect_event)) { + tdls_debug("FW TDLS state is already in requested state"); + return; + } + + /* If AP or caller indicated TDLS Prohibited then disable tdls mode */ + if (sta_connect_event) { + if (tdls_prohibited) { + tdls_soc_obj->tdls_current_mode = + TDLS_SUPPORT_DISABLED; + } else { + tdls_debug("TDLS feature flags from ini %d ", + tdls_feature_flags); + if (!TDLS_IS_IMPLICIT_TRIG_ENABLED(tdls_feature_flags)) + tdls_soc_obj->tdls_current_mode = + TDLS_SUPPORT_EXP_TRIG_ONLY; + else if (TDLS_IS_EXTERNAL_CONTROL_ENABLED( + tdls_feature_flags)) + tdls_soc_obj->tdls_current_mode = + TDLS_SUPPORT_EXT_CONTROL; + else + tdls_soc_obj->tdls_current_mode = + TDLS_SUPPORT_IMP_MODE; + } + } else { + tdls_soc_obj->tdls_current_mode = + TDLS_SUPPORT_DISABLED; + } + + tdls_info_to_fw = qdf_mem_malloc(sizeof(struct tdls_info)); + + if (!tdls_info_to_fw) { + tdls_err("memory allocation failed for tdlsParams"); + QDF_ASSERT(0); + return; + } + + threshold_params = &tdls_vdev_obj->threshold_config; + + tdls_info_to_fw->notification_interval_ms = + threshold_params->tx_period_t; + tdls_info_to_fw->tx_discovery_threshold = + threshold_params->tx_packet_n; + tdls_info_to_fw->tx_teardown_threshold = + threshold_params->idle_packet_n; + tdls_info_to_fw->rssi_teardown_threshold = + threshold_params->rssi_teardown_threshold; + tdls_info_to_fw->rssi_delta = threshold_params->rssi_delta; + tdls_info_to_fw->vdev_id = session_id; + + /* record the session id in vdev context */ + tdls_vdev_obj->session_id = session_id; + tdls_info_to_fw->tdls_state = tdls_soc_obj->tdls_current_mode; + tdls_info_to_fw->tdls_options = 0; + + /* Do not enable TDLS offchannel, if AP prohibited TDLS + * channel switch + */ + if (TDLS_IS_OFF_CHANNEL_ENABLED(tdls_feature_flags) && + (!tdls_chan_swit_prohibited)) + tdls_info_to_fw->tdls_options = ENA_TDLS_OFFCHAN; + + if (TDLS_IS_BUFFER_STA_ENABLED(tdls_feature_flags)) + tdls_info_to_fw->tdls_options |= ENA_TDLS_BUFFER_STA; + if (TDLS_IS_SLEEP_STA_ENABLED(tdls_feature_flags)) + tdls_info_to_fw->tdls_options |= ENA_TDLS_SLEEP_STA; + + + tdls_info_to_fw->peer_traffic_ind_window = + tdls_soc_obj->tdls_configs.tdls_uapsd_pti_window; + tdls_info_to_fw->peer_traffic_response_timeout = + tdls_soc_obj->tdls_configs.tdls_uapsd_ptr_timeout; + tdls_info_to_fw->puapsd_mask = + tdls_soc_obj->tdls_configs.tdls_uapsd_mask; + tdls_info_to_fw->puapsd_inactivity_time = + tdls_soc_obj->tdls_configs.tdls_uapsd_inactivity_time; + tdls_info_to_fw->puapsd_rx_frame_threshold = + tdls_soc_obj->tdls_configs.tdls_rx_pkt_threshold; + tdls_info_to_fw->teardown_notification_ms = + tdls_soc_obj->tdls_configs.tdls_idle_timeout; + tdls_info_to_fw->tdls_peer_kickout_threshold = + tdls_soc_obj->tdls_configs.tdls_peer_kickout_threshold; + + tdls_state_param_setting_dump(tdls_info_to_fw); + + status = tdls_update_fw_tdls_state(tdls_soc_obj, tdls_info_to_fw); + if (QDF_STATUS_SUCCESS != status) + goto done; + + if (sta_connect_event) { + tdls_soc_obj->set_state_info.set_state_cnt++; + tdls_soc_obj->set_state_info.vdev_id = session_id; + } else { + tdls_soc_obj->set_state_info.set_state_cnt--; + } + + tdls_debug("TDLS Set state cnt %d", + tdls_soc_obj->set_state_info.set_state_cnt); +done: + qdf_mem_free(tdls_info_to_fw); + return; +} + +static QDF_STATUS +tdls_process_sta_connect(struct tdls_sta_notify_params *notify) +{ + struct tdls_vdev_priv_obj *tdls_vdev_obj; + struct tdls_soc_priv_obj *tdls_soc_obj; + + if (QDF_STATUS_SUCCESS != tdls_get_vdev_objects(notify->vdev, + &tdls_vdev_obj, + &tdls_soc_obj)) + return QDF_STATUS_E_INVAL; + + + tdls_debug("Check and update TDLS state"); + + if (policy_mgr_get_connection_count(tdls_soc_obj->soc) > 1) { + tdls_debug("Concurrent sessions exist, TDLS can't be enabled"); + return QDF_STATUS_SUCCESS; + } + + /* Association event */ + if (!tdls_soc_obj->tdls_disable_in_progress) { + tdls_send_update_to_fw(tdls_vdev_obj, + tdls_soc_obj, + notify->tdls_prohibited, + notify->tdls_chan_swit_prohibited, + true, + notify->session_id); + } + + /* check and set the connection tracker */ + tdls_set_ct_mode(tdls_soc_obj->soc); + if (tdls_soc_obj->enable_tdls_connection_tracker) + tdls_implicit_enable(tdls_vdev_obj); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS tdls_notify_sta_connect(struct tdls_sta_notify_params *notify) +{ + QDF_STATUS status; + + if (!notify || !notify->vdev) { + tdls_err("invalid param"); + return QDF_STATUS_E_INVAL; + } + + status = tdls_process_sta_connect(notify); + + wlan_objmgr_vdev_release_ref(notify->vdev, WLAN_TDLS_NB_ID); + qdf_mem_free(notify); + + return status; +} + +static QDF_STATUS +tdls_process_sta_disconnect(struct tdls_sta_notify_params *notify) +{ + struct tdls_vdev_priv_obj *tdls_vdev_obj; + struct tdls_vdev_priv_obj *curr_tdls_vdev; + struct tdls_soc_priv_obj *tdls_soc_obj; + struct tdls_soc_priv_obj *curr_tdls_soc; + struct wlan_objmgr_vdev *temp_vdev = NULL; + uint8_t vdev_id; + + + QDF_STATUS status = QDF_STATUS_SUCCESS; + + if (QDF_STATUS_SUCCESS != tdls_get_vdev_objects(notify->vdev, + &tdls_vdev_obj, + &tdls_soc_obj)) + return QDF_STATUS_E_INVAL; + + /* if the disconnect comes from user space, we have to delete all the + * tdls peers before sending the set state cmd. + */ + if (notify->user_disconnect) + return tdls_delete_all_tdls_peers(notify->vdev, tdls_soc_obj); + + tdls_debug("Check and update TDLS state"); + + curr_tdls_vdev = tdls_vdev_obj; + curr_tdls_soc = tdls_soc_obj; + + /* Disassociation event */ + if (!tdls_soc_obj->tdls_disable_in_progress) + tdls_send_update_to_fw(tdls_vdev_obj, tdls_soc_obj, false, + false, false, notify->session_id); + + /* If concurrency is not marked, then we have to + * check, whether TDLS could be enabled in the + * system after this disassoc event. + */ + if (!notify->lfr_roam && !tdls_soc_obj->tdls_disable_in_progress) { + temp_vdev = tdls_get_vdev(tdls_soc_obj->soc, WLAN_TDLS_NB_ID); + if (NULL != temp_vdev) { + status = tdls_get_vdev_objects(temp_vdev, + &tdls_vdev_obj, + &tdls_soc_obj); + vdev_id = wlan_vdev_get_id(temp_vdev); + if (QDF_STATUS_SUCCESS == status) { + tdls_send_update_to_fw(tdls_vdev_obj, + tdls_soc_obj, + false, + false, + true, + vdev_id); + curr_tdls_vdev = tdls_vdev_obj; + curr_tdls_soc = tdls_soc_obj; + } + } + } + + /* Check and set the connection tracker and implicit timers */ + tdls_set_ct_mode(curr_tdls_soc->soc); + if (curr_tdls_soc->enable_tdls_connection_tracker) + tdls_implicit_enable(curr_tdls_vdev); + else + tdls_implicit_disable(curr_tdls_vdev); + + /* release the vdev ref , if temp vdev was acquired */ + if (temp_vdev) + wlan_objmgr_vdev_release_ref(temp_vdev, + WLAN_TDLS_NB_ID); + + return status; +} + +QDF_STATUS tdls_notify_sta_disconnect(struct tdls_sta_notify_params *notify) +{ + QDF_STATUS status; + + if (!notify || !notify->vdev) { + tdls_err("invalid param"); + return QDF_STATUS_E_INVAL; + } + + status = tdls_process_sta_disconnect(notify); + + wlan_objmgr_vdev_release_ref(notify->vdev, WLAN_TDLS_NB_ID); + qdf_mem_free(notify); + + return status; +} + +static void tdls_process_reset_adapter(struct wlan_objmgr_vdev *vdev) +{ + struct tdls_vdev_priv_obj *tdls_vdev; + + tdls_vdev = wlan_vdev_get_tdls_vdev_obj(vdev); + if (!tdls_vdev) + return; + tdls_timers_stop(tdls_vdev); +} + +static int __tdls_get_all_peers_from_list( + struct tdls_get_all_peers *get_tdls_peers) +{ + int i; + int len, init_len; + qdf_list_t *head; + qdf_list_node_t *p_node; + struct tdls_peer *curr_peer; + char *buf; + int buf_len; + struct tdls_vdev_priv_obj *tdls_vdev; + QDF_STATUS status; + + tdls_notice("Enter "); + + buf = get_tdls_peers->buf; + buf_len = get_tdls_peers->buf_len; + + if (!wlan_vdev_is_up(get_tdls_peers->vdev)) { + len = qdf_scnprintf(buf, buf_len, + "\nSTA is not associated\n"); + return len; + } + + tdls_vdev = wlan_vdev_get_tdls_vdev_obj(get_tdls_peers->vdev); + + if (!tdls_vdev) { + len = qdf_scnprintf(buf, buf_len, "TDLS not enabled\n"); + return len; + } + + init_len = buf_len; + len = qdf_scnprintf(buf, buf_len, + "\n%-18s%-3s%-4s%-3s%-5s\n", + "MAC", "Id", "cap", "up", "RSSI"); + buf += len; + buf_len -= len; + len = qdf_scnprintf(buf, buf_len, + "---------------------------------\n"); + buf += len; + buf_len -= len; + + for (i = 0; i < WLAN_TDLS_PEER_LIST_SIZE; i++) { + head = &tdls_vdev->peer_list[i]; + status = qdf_list_peek_front(head, &p_node); + while (QDF_IS_STATUS_SUCCESS(status)) { + curr_peer = qdf_container_of(p_node, + struct tdls_peer, node); + if (buf_len < 32 + 1) + break; + len = qdf_scnprintf(buf, buf_len, + QDF_MAC_ADDR_STR "%3d%4s%3s%5d\n", + QDF_MAC_ADDR_ARRAY(curr_peer->peer_mac.bytes), + curr_peer->sta_id, + (curr_peer->tdls_support == + TDLS_CAP_SUPPORTED) ? "Y" : "N", + TDLS_IS_LINK_CONNECTED(curr_peer) ? "Y" : + "N", curr_peer->rssi); + buf += len; + buf_len -= len; + status = qdf_list_peek_next(head, p_node, &p_node); + } + } + + tdls_notice("Exit "); + return init_len - buf_len; +} + +void tdls_get_all_peers_from_list( + struct tdls_get_all_peers *get_tdls_peers) +{ + int32_t len; + struct tdls_soc_priv_obj *tdls_soc_obj; + struct tdls_osif_indication indication; + + if (!get_tdls_peers->vdev) + qdf_mem_free(get_tdls_peers); + + len = __tdls_get_all_peers_from_list(get_tdls_peers); + + indication.status = len; + indication.vdev = get_tdls_peers->vdev; + + tdls_soc_obj = wlan_vdev_get_tdls_soc_obj(get_tdls_peers->vdev); + if (tdls_soc_obj && tdls_soc_obj->tdls_event_cb) + tdls_soc_obj->tdls_event_cb(tdls_soc_obj->tdls_evt_cb_data, + TDLS_EVENT_USER_CMD, &indication); + + qdf_mem_free(get_tdls_peers); +} + +void tdls_notify_reset_adapter(struct wlan_objmgr_vdev *vdev) +{ + if (!vdev) { + QDF_ASSERT(0); + return; + } + + if (QDF_STATUS_SUCCESS != wlan_objmgr_vdev_try_get_ref(vdev, + WLAN_TDLS_NB_ID)) + return; + + tdls_process_reset_adapter(vdev); + wlan_objmgr_vdev_release_ref(vdev, WLAN_TDLS_NB_ID); +} + +QDF_STATUS tdls_peers_deleted_notification(struct wlan_objmgr_psoc *psoc, + uint8_t vdev_id) +{ + struct scheduler_msg msg = {0, }; + struct tdls_sta_notify_params *notify; + QDF_STATUS status; + struct wlan_objmgr_vdev *vdev; + + notify = qdf_mem_malloc(sizeof(*notify)); + if (!notify) { + tdls_err("memory allocation failed !!!"); + return QDF_STATUS_E_NULL_VALUE; + } + + vdev = wlan_objmgr_get_vdev_by_id_from_psoc(psoc, + vdev_id, + WLAN_TDLS_NB_ID); + + if (!vdev) { + tdls_err("vdev not exist for the vdev id %d", + vdev_id); + qdf_mem_free(notify); + return QDF_STATUS_E_INVAL; + } + + notify->lfr_roam = true; + notify->tdls_chan_swit_prohibited = false; + notify->tdls_prohibited = false; + notify->session_id = vdev_id; + notify->vdev = vdev; + notify->user_disconnect = false; + + msg.bodyptr = notify; + msg.callback = tdls_process_cmd; + msg.flush_callback = tdls_post_msg_flush_cb; + msg.type = TDLS_NOTIFY_STA_DISCONNECTION; + + status = scheduler_post_message(QDF_MODULE_ID_TDLS, + QDF_MODULE_ID_TDLS, + QDF_MODULE_ID_OS_IF, &msg); + if (QDF_IS_STATUS_ERROR(status)) { + wlan_objmgr_vdev_release_ref(vdev, WLAN_TDLS_NB_ID); + qdf_mem_free(notify); + tdls_alert("message post failed "); + + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS tdls_delete_all_peers_indication( + struct tdls_delete_all_peers_params *delete_peers_ind) +{ + struct scheduler_msg msg = {0, }; + struct tdls_delete_all_peers_params *indication; + QDF_STATUS status; + + indication = qdf_mem_malloc(sizeof(*indication)); + if (!indication) { + tdls_err("memory allocation failed !!!"); + return QDF_STATUS_E_NULL_VALUE; + } + + *indication = *delete_peers_ind; + + msg.bodyptr = indication; + msg.callback = tdls_process_cmd; + msg.type = TDLS_DELETE_ALL_PEERS_INDICATION; + + status = scheduler_post_message(QDF_MODULE_ID_TDLS, + QDF_MODULE_ID_TDLS, + QDF_MODULE_ID_OS_IF, &msg); + if (QDF_IS_STATUS_ERROR(status)) { + qdf_mem_free(indication); + tdls_alert("message post failed "); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * tdls_set_mode_in_vdev() - set TDLS mode + * @tdls_vdev: tdls vdev object + * @tdls_soc: tdls soc object + * @tdls_mode: TDLS mode + * @source: TDLS disable source enum values + * + * Return: Void + */ +static void tdls_set_mode_in_vdev(struct tdls_vdev_priv_obj *tdls_vdev, + struct tdls_soc_priv_obj *tdls_soc, + enum tdls_feature_mode tdls_mode, + enum tdls_disable_sources source) +{ + if (!tdls_vdev) + return; + tdls_debug("enter tdls mode is %d", tdls_mode); + + if (TDLS_SUPPORT_IMP_MODE == tdls_mode || + TDLS_SUPPORT_EXT_CONTROL == tdls_mode) { + clear_bit((unsigned long)source, + &tdls_soc->tdls_source_bitmap); + /* + * Check if any TDLS source bit is set and if + * bitmap is not zero then we should not + * enable TDLS + */ + if (tdls_soc->tdls_source_bitmap) { + tdls_notice("Don't enable TDLS, source bitmap: %lu", + tdls_soc->tdls_source_bitmap); + return; + } + tdls_implicit_enable(tdls_vdev); + /* tdls implicit mode is enabled, so + * enable the connection tracker + */ + tdls_soc->enable_tdls_connection_tracker = + true; + } else if (TDLS_SUPPORT_DISABLED == tdls_mode) { + set_bit((unsigned long)source, + &tdls_soc->tdls_source_bitmap); + tdls_implicit_disable(tdls_vdev); + /* If tdls implicit mode is disabled, then + * stop the connection tracker. + */ + tdls_soc->enable_tdls_connection_tracker = + false; + } else if (TDLS_SUPPORT_EXP_TRIG_ONLY == + tdls_mode) { + clear_bit((unsigned long)source, + &tdls_soc->tdls_source_bitmap); + tdls_implicit_disable(tdls_vdev); + /* If tdls implicit mode is disabled, then + * stop the connection tracker. + */ + tdls_soc->enable_tdls_connection_tracker = + false; + + /* + * Check if any TDLS source bit is set and if + * bitmap is not zero then we should not + * enable TDLS + */ + if (tdls_soc->tdls_source_bitmap) + return; + } + tdls_debug("exit "); + +} + +/** + * tdls_set_current_mode() - set TDLS mode + * @tdls_soc: tdls soc object + * @tdls_mode: TDLS mode + * @update_last: indicate to record the last tdls mode + * @source: TDLS disable source enum values + * + * Return: Void + */ +static void tdls_set_current_mode(struct tdls_soc_priv_obj *tdls_soc, + enum tdls_feature_mode tdls_mode, + bool update_last, + enum tdls_disable_sources source) +{ + struct wlan_objmgr_vdev *vdev; + struct tdls_vdev_priv_obj *tdls_vdev; + + if (!tdls_soc) + return; + + tdls_debug("mode %d", (int)tdls_mode); + + if (update_last) + tdls_soc->tdls_last_mode = tdls_mode; + + if (tdls_soc->tdls_current_mode == tdls_mode) { + tdls_debug("already in mode %d", tdls_mode); + + switch (tdls_mode) { + /* TDLS is already enabled hence clear source mask, return */ + case TDLS_SUPPORT_IMP_MODE: + case TDLS_SUPPORT_EXP_TRIG_ONLY: + case TDLS_SUPPORT_EXT_CONTROL: + clear_bit((unsigned long)source, + &tdls_soc->tdls_source_bitmap); + tdls_debug("clear source mask:%d", source); + return; + /* TDLS is already disabled hence set source mask, return */ + case TDLS_SUPPORT_DISABLED: + set_bit((unsigned long)source, + &tdls_soc->tdls_source_bitmap); + tdls_debug("set source mask:%d", source); + return; + default: + return; + } + } + + /* get sta vdev */ + vdev = wlan_objmgr_get_vdev_by_opmode_from_psoc(tdls_soc->soc, + QDF_STA_MODE, + WLAN_TDLS_NB_ID); + if (NULL != vdev) { + tdls_debug("set mode in tdls vdev "); + tdls_vdev = wlan_vdev_get_tdls_vdev_obj(vdev); + if (!tdls_vdev) + tdls_set_mode_in_vdev(tdls_vdev, tdls_soc, + tdls_mode, source); + wlan_objmgr_vdev_release_ref(vdev, WLAN_TDLS_NB_ID); + } + + /* get p2p client vdev */ + vdev = wlan_objmgr_get_vdev_by_opmode_from_psoc(tdls_soc->soc, + QDF_P2P_CLIENT_MODE, + WLAN_TDLS_NB_ID); + if (NULL != vdev) { + tdls_debug("set mode in tdls vdev "); + tdls_vdev = wlan_vdev_get_tdls_vdev_obj(vdev); + if (!tdls_vdev) + tdls_set_mode_in_vdev(tdls_vdev, tdls_soc, + tdls_mode, source); + wlan_objmgr_vdev_release_ref(vdev, WLAN_TDLS_NB_ID); + } + + if (!update_last) + tdls_soc->tdls_last_mode = tdls_soc->tdls_current_mode; + + tdls_soc->tdls_current_mode = tdls_mode; + +} + +QDF_STATUS tdls_set_operation_mode(struct tdls_set_mode_params *tdls_set_mode) +{ + struct tdls_soc_priv_obj *tdls_soc; + struct tdls_vdev_priv_obj *tdls_vdev; + QDF_STATUS status; + + if (!tdls_set_mode || !tdls_set_mode->vdev) + return QDF_STATUS_E_INVAL; + + status = tdls_get_vdev_objects(tdls_set_mode->vdev, + &tdls_vdev, &tdls_soc); + + if (QDF_IS_STATUS_ERROR(status)) + goto release_mode_ref; + + tdls_set_current_mode(tdls_soc, + tdls_set_mode->tdls_mode, + tdls_set_mode->update_last, + tdls_set_mode->source); + +release_mode_ref: + wlan_objmgr_vdev_release_ref(tdls_set_mode->vdev, WLAN_TDLS_NB_ID); + qdf_mem_free(tdls_set_mode); + return status; +} + +/** + * wlan_hdd_tdls_scan_done_callback() - callback for tdls scan done event + * @pAdapter: HDD adapter + * + * Return: Void + */ +void tdls_scan_done_callback(struct tdls_soc_priv_obj *tdls_soc) +{ + if (!tdls_soc) + return; + + if (TDLS_SUPPORT_DISABLED == tdls_soc->tdls_current_mode) { + tdls_debug("TDLS mode is disabled OR not enabled"); + return; + } + + /* if tdls was enabled before scan, re-enable tdls mode */ + if (TDLS_SUPPORT_IMP_MODE == tdls_soc->tdls_last_mode || + TDLS_SUPPORT_EXT_CONTROL == tdls_soc->tdls_last_mode || + TDLS_SUPPORT_EXP_TRIG_ONLY == tdls_soc->tdls_last_mode) { + tdls_debug("revert tdls mode %d", + tdls_soc->tdls_last_mode); + + tdls_set_current_mode(tdls_soc, tdls_soc->tdls_last_mode, + false, + TDLS_SET_MODE_SOURCE_SCAN); + } +} + +/** + * tdls_post_scan_done_msg() - post scan done message to tdls cmd queue + * @tdls_soc: tdls soc object + * + * Return: QDF_STATUS_SUCCESS or QDF_STATUS_E_NULL_VALUE + */ +static QDF_STATUS tdls_post_scan_done_msg(struct tdls_soc_priv_obj *tdls_soc) +{ + struct scheduler_msg msg = {0, }; + + if (!tdls_soc) { + tdls_err("tdls_soc: %pK ", tdls_soc); + return QDF_STATUS_E_NULL_VALUE; + } + + msg.bodyptr = tdls_soc; + msg.callback = tdls_process_cmd; + msg.flush_callback = tdls_post_msg_flush_cb; + msg.type = TDLS_CMD_SCAN_DONE; + + return scheduler_post_message(QDF_MODULE_ID_TDLS, + QDF_MODULE_ID_TDLS, + QDF_MODULE_ID_OS_IF, &msg); +} + +void tdls_scan_complete_event_handler(struct wlan_objmgr_vdev *vdev, + struct scan_event *event, + void *arg) +{ + enum QDF_OPMODE device_mode; + struct tdls_soc_priv_obj *tdls_soc; + + if (!vdev || !event || !arg) + return; + + if (SCAN_EVENT_TYPE_COMPLETED != event->type) + return; + + device_mode = wlan_vdev_mlme_get_opmode(vdev); + + if (device_mode != QDF_STA_MODE && + device_mode != QDF_P2P_CLIENT_MODE) + return; + tdls_soc = (struct tdls_soc_priv_obj *) arg; + tdls_post_scan_done_msg(tdls_soc); +} + +QDF_STATUS tdls_scan_callback(struct tdls_soc_priv_obj *tdls_soc) +{ + struct tdls_vdev_priv_obj *tdls_vdev; + struct wlan_objmgr_vdev *vdev; + QDF_STATUS status = QDF_STATUS_SUCCESS; + + /* if tdls is not enabled, then continue scan */ + if (TDLS_SUPPORT_DISABLED == tdls_soc->tdls_current_mode) + return status; + + /* Get the vdev based on vdev operating mode*/ + vdev = tdls_get_vdev(tdls_soc->soc, WLAN_TDLS_NB_ID); + if (!vdev) + return status; + + tdls_vdev = wlan_vdev_get_tdls_vdev_obj(vdev); + if (!tdls_vdev) + goto return_success; + + if (tdls_is_progress(tdls_vdev, NULL, 0)) { + if (tdls_soc->scan_reject_count++ >= TDLS_SCAN_REJECT_MAX) { + tdls_notice("Allow this scan req. as already max no of scan's are rejected"); + tdls_soc->scan_reject_count = 0; + status = QDF_STATUS_SUCCESS; + } else { + tdls_warn("tdls in progress. scan rejected %d", + tdls_soc->scan_reject_count); + status = QDF_STATUS_E_BUSY; + } + } +return_success: + wlan_objmgr_vdev_release_ref(vdev, + WLAN_TDLS_NB_ID); + return status; +} + +void tdls_scan_serialization_comp_info_cb(struct wlan_objmgr_vdev *vdev, + union wlan_serialization_rules_info *comp_info) +{ + struct tdls_soc_priv_obj *tdls_soc; + QDF_STATUS status; + if (!comp_info) + return; + + tdls_soc = tdls_soc_global; + comp_info->scan_info.is_tdls_in_progress = false; + status = tdls_scan_callback(tdls_soc); + if (QDF_STATUS_E_BUSY == status) + comp_info->scan_info.is_tdls_in_progress = true; +} + + diff --git a/drivers/staging/qca-wifi-host-cmn/umac/tdls/core/src/wlan_tdls_main.h b/drivers/staging/qca-wifi-host-cmn/umac/tdls/core/src/wlan_tdls_main.h new file mode 100644 index 0000000000000000000000000000000000000000..6cfacfa80fd735382031bb0cee75d9a4417bf961 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/tdls/core/src/wlan_tdls_main.h @@ -0,0 +1,752 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_tdls_main.h + * + * TDLS core function declaration + */ + +#if !defined(_WLAN_TDLS_MAIN_H_) +#define _WLAN_TDLS_MAIN_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include "wlan_serialization_api.h" +#include "wlan_utility.h" + +/* Bit mask flag for tdls_option to FW */ +#define ENA_TDLS_OFFCHAN (1 << 0) /* TDLS Off Channel support */ +#define ENA_TDLS_BUFFER_STA (1 << 1) /* TDLS Buffer STA support */ +#define ENA_TDLS_SLEEP_STA (1 << 2) /* TDLS Sleep STA support */ + +#define BW_20_OFFSET_BIT 0 +#define BW_40_OFFSET_BIT 1 +#define BW_80_OFFSET_BIT 2 +#define BW_160_OFFSET_BIT 3 + +#define TDLS_SEC_OFFCHAN_OFFSET_0 0 +#define TDLS_SEC_OFFCHAN_OFFSET_40PLUS 40 +#define TDLS_SEC_OFFCHAN_OFFSET_40MINUS (-40) +#define TDLS_SEC_OFFCHAN_OFFSET_80 80 +#define TDLS_SEC_OFFCHAN_OFFSET_160 160 +/* + * Before UpdateTimer expires, we want to timeout discovery response + * should not be more than 2000. + */ +#define TDLS_DISCOVERY_TIMEOUT_BEFORE_UPDATE 1000 +#define TDLS_SCAN_REJECT_MAX 5 + +#define tdls_log(level, args...) \ + QDF_TRACE(QDF_MODULE_ID_TDLS, level, ## args) +#define tdls_logfl(level, format, args...) \ + tdls_log(level, FL(format), ## args) + +#define tdls_debug(format, args...) \ + tdls_logfl(QDF_TRACE_LEVEL_DEBUG, format, ## args) +#define tdls_notice(format, args...) \ + tdls_logfl(QDF_TRACE_LEVEL_INFO, format, ## args) +#define tdls_warn(format, args...) \ + tdls_logfl(QDF_TRACE_LEVEL_WARN, format, ## args) +#define tdls_err(format, args...) \ + tdls_logfl(QDF_TRACE_LEVEL_ERROR, format, ## args) +#define tdls_alert(format, args...) \ + tdls_logfl(QDF_TRACE_LEVEL_FATAL, format, ## args) +#define tdls_debug_rl(params...) \ + QDF_TRACE_DEBUG_RL(QDF_MODULE_ID_TDLS, params) + +#define TDLS_IS_LINK_CONNECTED(peer) \ + ((TDLS_LINK_CONNECTED == (peer)->link_status) || \ + (TDLS_LINK_TEARING == (peer)->link_status)) + +#define SET_BIT(value, mask) ((value) |= (1 << (mask))) +#define CLEAR_BIT(value, mask) ((value) &= ~(1 << (mask))) +#define CHECK_BIT(value, mask) ((value) & (1 << (mask))) +/** + * struct tdls_conn_info - TDLS connection record + * @session_id: session id + * @sta_id: sta id + * @index: index to store array offset. + * @peer_mac: peer address + */ +struct tdls_conn_info { + uint8_t session_id; + uint8_t sta_id; + uint8_t index; + struct qdf_mac_addr peer_mac; +}; + +/** + * enum tdls_nss_transition_state - TDLS NSS transition states + * @TDLS_NSS_TRANSITION_UNKNOWN: default state + * @TDLS_NSS_TRANSITION_2x2_to_1x1: transition from 2x2 to 1x1 stream + * @TDLS_NSS_TRANSITION_1x1_to_2x2: transition from 1x1 to 2x2 stream + */ +enum tdls_nss_transition_state { + TDLS_NSS_TRANSITION_S_UNKNOWN = 0, + TDLS_NSS_TRANSITION_S_2x2_to_1x1, + TDLS_NSS_TRANSITION_S_1x1_to_2x2, +}; + +/** + * struct tdls_conn_tracker_mac_table - connection tracker peer table + * @mac_address: peer mac address + * @tx_packet_cnt: number of tx pkts + * @rx_packet_cnt: number of rx pkts + * @peer_timestamp_ms: time stamp of latest peer traffic + */ +struct tdls_conn_tracker_mac_table { + struct qdf_mac_addr mac_address; + uint32_t tx_packet_cnt; + uint32_t rx_packet_cnt; + uint32_t peer_timestamp_ms; +}; + +/** + * struct tdls_set_state_db - to record set tdls state command, we need to + * set correct tdls state to firmware: + * 1. enable tdls in firmware before tdls connection; + * 2. disable tdls if concurrency happen, before disable tdls, all active peer + * should be deleted in firmware. + * + * @set_state_cnt: tdls set state count + * @vdev_id: vdev id of last set state command + */ +struct tdls_set_state_info { + uint8_t set_state_cnt; + uint8_t vdev_id; +}; + +/** + * struct tdls_psoc_priv_ctx - tdls context + * @soc: objmgr psoc + * @tdls_current_mode: current tdls mode + * @tdls_last_mode: last tdls mode + * @scan_reject_count: number of times scan rejected due to TDLS + * @tdls_source_bitmap: bit map to set/reset TDLS by different sources + * @tdls_conn_info: this tdls_conn_info can be removed and we can use peer type + * of peer object to get the active tdls peers + * @tdls_configs: tdls user configure + * @max_num_tdls_sta: maximum TDLS station number allowed upon runtime condition + * @connected_peer_count: tdls peer connected count + * @tdls_off_channel: tdls off channel number + * @tdls_channel_offset: tdls channel offset + * @tdls_fw_off_chan_mode: tdls fw off channel mode + * @enable_tdls_connection_tracker: enable tdls connection tracker + * @tdls_external_peer_count: external tdls peer count + * @tdls_nss_switch_in_progress: tdls antenna switch in progress + * @tdls_nss_teardown_complete: tdls tear down complete + * @tdls_nss_transition_mode: tdls nss transition mode + * @tdls_teardown_peers_cnt: tdls tear down peer count + * @set_state_info: set tdls state info + * @tdls_event_cb: tdls event callback + * @tdls_evt_cb_data: tdls event user data + * @tdls_peer_context: userdata for register/deregister TDLS peer + * @tdls_reg_peer: register tdls peer with datapath + * @tdls_dereg_peer: deregister tdls peer from datapath + * @tx_q_ack: queue for tx frames waiting for ack + * @tdls_con_cap: tdls concurrency support + * @tdls_send_mgmt_req: store eWNI_SME_TDLS_SEND_MGMT_REQ value + * @tdls_add_sta_req: store eWNI_SME_TDLS_ADD_STA_REQ value + * @tdls_del_sta_req: store eWNI_SME_TDLS_DEL_STA_REQ value + * @tdls_update_peer_state: store WMA_UPDATE_TDLS_PEER_STATE value + * @tdls_del_all_peers:store eWNI_SME_DEL_ALL_TDLS_PEERS + * @tdls_update_dp_vdev_flags store CDP_UPDATE_TDLS_FLAGS + * @tdls_idle_peer_data: provide information about idle peer + * @tdls_ct_spinlock: connection tracker spin lock + * @tdls_osif_init_cb: Callback to initialize the tdls private + * @tdls_osif_deinit_cb: Callback to deinitialize the tdls private + */ +struct tdls_soc_priv_obj { + struct wlan_objmgr_psoc *soc; + enum tdls_feature_mode tdls_current_mode; + enum tdls_feature_mode tdls_last_mode; + int scan_reject_count; + unsigned long tdls_source_bitmap; + struct tdls_conn_info tdls_conn_info[WLAN_TDLS_STA_MAX_NUM]; + struct tdls_user_config tdls_configs; + uint16_t max_num_tdls_sta; + uint16_t connected_peer_count; + uint8_t tdls_off_channel; + uint16_t tdls_channel_offset; + int32_t tdls_fw_off_chan_mode; + bool enable_tdls_connection_tracker; + uint8_t tdls_external_peer_count; + bool tdls_nss_switch_in_progress; + bool tdls_nss_teardown_complete; + bool tdls_disable_in_progress; + enum tdls_nss_transition_state tdls_nss_transition_mode; + int32_t tdls_teardown_peers_cnt; + struct tdls_set_state_info set_state_info; + tdls_rx_callback tdls_rx_cb; + void *tdls_rx_cb_data; + tdls_wmm_check tdls_wmm_cb; + void *tdls_wmm_cb_data; + tdls_evt_callback tdls_event_cb; + void *tdls_evt_cb_data; + void *tdls_peer_context; + tdls_register_peer_callback tdls_reg_peer; + tdls_deregister_peer_callback tdls_dereg_peer; + tdls_dp_vdev_update_flags_callback tdls_dp_vdev_update; + qdf_list_t tx_q_ack; + enum tdls_conc_cap tdls_con_cap; + uint16_t tdls_send_mgmt_req; + uint16_t tdls_add_sta_req; + uint16_t tdls_del_sta_req; + uint16_t tdls_update_peer_state; + uint16_t tdls_del_all_peers; + uint32_t tdls_update_dp_vdev_flags; + qdf_spinlock_t tdls_ct_spinlock; + tdls_vdev_init_cb tdls_osif_init_cb; + tdls_vdev_deinit_cb tdls_osif_deinit_cb; +}; + +/** + * struct tdls_vdev_priv_obj - tdls private vdev object + * @vdev: vdev objmgr object + * @peer_list: tdls peer list on this vdev + * @peer_update_timer: connection tracker timer + * @peer_dicovery_timer: peer discovery timer + * @threshold_config: threshold config + * @discovery_peer_cnt: discovery peer count + * @discovery_sent_cnt: discovery sent count + * @ap_rssi: ap rssi + * @curr_candidate: current candidate + * @ct_peer_table: linear mac address table for counting the packets + * @valid_mac_entries: number of valid mac entry in @ct_peer_mac_table + * @magic: magic + * @tx_queue: tx frame queue + */ +struct tdls_vdev_priv_obj { + struct wlan_objmgr_vdev *vdev; + qdf_list_t peer_list[WLAN_TDLS_PEER_LIST_SIZE]; + qdf_mc_timer_t peer_update_timer; + qdf_mc_timer_t peer_discovery_timer; + struct tdls_config_params threshold_config; + int32_t discovery_peer_cnt; + uint32_t discovery_sent_cnt; + int8_t ap_rssi; + struct tdls_peer *curr_candidate; + struct tdls_conn_tracker_mac_table + ct_peer_table[WLAN_TDLS_CT_TABLE_SIZE]; + uint8_t valid_mac_entries; + uint32_t magic; + uint8_t session_id; + qdf_list_t tx_queue; +}; + +/** + * struct tdls_peer_mlme_info - tdls peer mlme info + **/ +struct tdls_peer_mlme_info { +}; + +/** + * struct tdls_peer - tdls peer data + * @node: node + * @vdev_priv: tdls vdev priv obj + * @peer_mac: peer mac address + * @sta_id: station identifier + * @rssi: rssi + * @tdls_support: tdls support + * @link_status: tdls link status + * @is_responder: is responder + * @discovery_processed: dicovery processed + * @discovery_attempt: discovery attempt + * @tx_pkt: tx packet + * @rx_pkt: rx packet + * @uapsd_queues: uapsd queues + * @max_sp: max sp + * @buf_sta_capable: is buffer sta + * @off_channel_capable: is offchannel supported flag + * @supported_channels_len: supported channels length + * @supported_channels: supported channels + * @supported_oper_classes_len: supported operation classes length + * @supported_oper_classes: supported operation classes + * @is_forced_peer: is forced peer + * @op_class_for_pref_off_chan: op class for preferred off channel + * @pref_off_chan_num: preferred off channel number + * @op_class_for_pref_off_chan_is_set: op class for preferred off channel set + * @peer_idle_timer: time to check idle traffic in tdls peers + * @is_peer_idle_timer_initialised: Flag to check idle timer init + * @spatial_streams: Number of TX/RX spatial streams for TDLS + * @reason: reason + * @state_change_notification: state change notification + * @qos: QOS capability of TDLS link + */ +struct tdls_peer { + qdf_list_node_t node; + struct tdls_vdev_priv_obj *vdev_priv; + struct qdf_mac_addr peer_mac; + uint16_t sta_id; + int8_t rssi; + enum tdls_peer_capab tdls_support; + enum tdls_link_state link_status; + uint8_t is_responder; + uint8_t discovery_processed; + uint16_t discovery_attempt; + uint16_t tx_pkt; + uint16_t rx_pkt; + uint8_t uapsd_queues; + uint8_t max_sp; + uint8_t buf_sta_capable; + uint8_t off_channel_capable; + uint8_t supported_channels_len; + uint8_t supported_channels[WLAN_MAC_MAX_SUPP_CHANNELS]; + uint8_t supported_oper_classes_len; + uint8_t supported_oper_classes[WLAN_MAX_SUPP_OPER_CLASSES]; + bool is_forced_peer; + uint8_t op_class_for_pref_off_chan; + uint8_t pref_off_chan_num; + uint8_t op_class_for_pref_off_chan_is_set; + qdf_mc_timer_t peer_idle_timer; + bool is_peer_idle_timer_initialised; + uint8_t spatial_streams; + enum tdls_link_state_reason reason; + tdls_state_change_callback state_change_notification; + uint8_t qos; + struct tdls_peer_mlme_info *tdls_info; +}; + +/** + * struct tdls_os_if_event - TDLS os event info + * @type: type of event + * @info: pointer to event information + */ +struct tdls_os_if_event { + uint32_t type; + void *info; +}; + +/** + * enum tdls_os_if_notification - TDLS notification from OS IF + * @TDLS_NOTIFY_STA_SESSION_INCREMENT: sta session count incremented + * @TDLS_NOTIFY_STA_SESSION_DECREMENT: sta session count decremented + */ +enum tdls_os_if_notification { + TDLS_NOTIFY_STA_SESSION_INCREMENT, + TDLS_NOTIFY_STA_SESSION_DECREMENT +}; +/** + * wlan_vdev_get_tdls_soc_obj - private API to get tdls soc object from vdev + * @vdev: vdev object + * + * Return: tdls soc object + */ +static inline struct tdls_soc_priv_obj * +wlan_vdev_get_tdls_soc_obj(struct wlan_objmgr_vdev *vdev) +{ + struct wlan_objmgr_psoc *psoc; + struct tdls_soc_priv_obj *soc_obj; + + if (!vdev) { + tdls_err("NULL vdev"); + return NULL; + } + + psoc = wlan_vdev_get_psoc(vdev); + if (!psoc) { + tdls_err("can't get psoc"); + return NULL; + } + + soc_obj = (struct tdls_soc_priv_obj *) + wlan_objmgr_psoc_get_comp_private_obj(psoc, + WLAN_UMAC_COMP_TDLS); + + return soc_obj; +} + +/** + * wlan_psoc_get_tdls_soc_obj - private API to get tdls soc object from psoc + * @psoc: psoc object + * + * Return: tdls soc object + */ +static inline struct tdls_soc_priv_obj * +wlan_psoc_get_tdls_soc_obj(struct wlan_objmgr_psoc *psoc) +{ + struct tdls_soc_priv_obj *soc_obj; + if (!psoc) { + tdls_err("NULL psoc"); + return NULL; + } + soc_obj = (struct tdls_soc_priv_obj *) + wlan_objmgr_psoc_get_comp_private_obj(psoc, + WLAN_UMAC_COMP_TDLS); + + return soc_obj; +} + +/** + * wlan_vdev_get_tdls_vdev_obj - private API to get tdls vdev object from vdev + * @vdev: vdev object + * + * Return: tdls vdev object + */ +static inline struct tdls_vdev_priv_obj * +wlan_vdev_get_tdls_vdev_obj(struct wlan_objmgr_vdev *vdev) +{ + struct tdls_vdev_priv_obj *vdev_obj; + + if (!vdev) { + tdls_err("NULL vdev"); + return NULL; + } + + vdev_obj = (struct tdls_vdev_priv_obj *) + wlan_objmgr_vdev_get_comp_private_obj(vdev, + WLAN_UMAC_COMP_TDLS); + + return vdev_obj; +} + +/** + * tdls_set_link_status - tdls set link status + * @vdev: vdev object + * @mac: mac address of tdls peer + * @link_state: tdls link state + * @link_reason: reason + */ +void tdls_set_link_status(struct tdls_vdev_priv_obj *vdev, + const uint8_t *mac, + enum tdls_link_state link_state, + enum tdls_link_state_reason link_reason); +/** + * tdls_psoc_obj_create_notification() - tdls psoc create notification handler + * @psoc: psoc object + * @arg_list: Argument list + * + * Return: QDF_STATUS + */ +QDF_STATUS tdls_psoc_obj_create_notification(struct wlan_objmgr_psoc *psoc, + void *arg_list); + +/** + * tdls_psoc_obj_destroy_notification() - tdls psoc destroy notification handler + * @psoc: psoc object + * @arg_list: Argument list + * + * Return: QDF_STATUS + */ +QDF_STATUS tdls_psoc_obj_destroy_notification(struct wlan_objmgr_psoc *psoc, + void *arg_list); + +/** + * tdls_vdev_obj_create_notification() - tdls vdev create notification handler + * @vdev: vdev object + * @arg_list: Argument list + * + * Return: QDF_STATUS + */ +QDF_STATUS tdls_vdev_obj_create_notification(struct wlan_objmgr_vdev *vdev, + void *arg_list); + +/** + * tdls_vdev_obj_destroy_notification() - tdls vdev destroy notification handler + * @vdev: vdev object + * @arg_list: Argument list + * + * Return: QDF_STATUS + */ +QDF_STATUS tdls_vdev_obj_destroy_notification(struct wlan_objmgr_vdev *vdev, + void *arg_list); + +/** + * tdls_process_cmd() - tdls main command process function + * @msg: scheduler msg + * + * Return: QDF_STATUS + */ +QDF_STATUS tdls_process_cmd(struct scheduler_msg *msg); + +/** + * tdls_process_evt() - tdls main event process function + * @msg: scheduler msg + * + * Return: QDF_STATUS + */ +QDF_STATUS tdls_process_evt(struct scheduler_msg *msg); + +/** + * tdls_timer_restart() - restart TDLS timer + * @vdev: VDEV object manager + * @timer: timer to restart + * @expiration_time: new expiration time to set for the timer + * + * Return: Void + */ +void tdls_timer_restart(struct wlan_objmgr_vdev *vdev, + qdf_mc_timer_t *timer, + uint32_t expiration_time); + +/** + * wlan_hdd_tdls_timers_stop() - stop all the tdls timers running + * @tdls_vdev: TDLS vdev + * + * Return: none + */ +void tdls_timers_stop(struct tdls_vdev_priv_obj *tdls_vdev); + +/** + * tdls_get_vdev_objects() - Get TDLS private objects + * @vdev: VDEV object manager + * @tdls_vdev_obj: tdls vdev object + * @tdls_soc_obj: tdls soc object + * + * Return: QDF_STATUS + */ +QDF_STATUS tdls_get_vdev_objects(struct wlan_objmgr_vdev *vdev, + struct tdls_vdev_priv_obj **tdls_vdev_obj, + struct tdls_soc_priv_obj **tdls_soc_obj); + +/** + * cds_set_tdls_ct_mode() - Set the tdls connection tracker mode + * @hdd_ctx: hdd context + * + * This routine is called to set the tdls connection tracker operation status + * + * Return: NONE + */ +void tdls_set_ct_mode(struct wlan_objmgr_psoc *psoc); + +/** + * tdls_set_operation_mode() - set tdls operating mode + * @tdls_set_mode: tdls mode set params + * + * Return: QDF_STATUS + */ +QDF_STATUS tdls_set_operation_mode(struct tdls_set_mode_params *tdls_set_mode); + +/** + * tdls_notify_sta_connect() - Update tdls state for every + * connect event. + * @notify: sta connect params + * + * After every connect event in the system, check whether TDLS + * can be enabled in the system. If TDLS can be enabled, update the + * TDLS state as needed. + * + * Return: QDF_STATUS + */ +QDF_STATUS tdls_notify_sta_connect(struct tdls_sta_notify_params *notify); + +/** + * tdls_notify_sta_disconnect() - Update tdls state for every + * disconnect event. + * @notify: sta disconnect params + * + * After every disconnect event in the system, check whether TDLS + * can be disabled/enabled in the system and update the + * TDLS state as needed. + * + * Return: QDF_STATUS + */ +QDF_STATUS tdls_notify_sta_disconnect(struct tdls_sta_notify_params *notify); + + +/** + * tdls_get_all_peers_from_list() - get all the tdls peers from the list + * @get_tdls_peers: get_tdls_peers object + * + * Return: None + */ +void tdls_get_all_peers_from_list( + struct tdls_get_all_peers *get_tdls_peers); + +/** + * tdls_notify_reset_adapter() - notify reset adapter + * @vdev: vdev object + * + * Notify TDLS about the adapter reset + * + * Return: None + */ +void tdls_notify_reset_adapter(struct wlan_objmgr_vdev *vdev); + +/** + * tdls_peers_deleted_notification() - peer delete notification + * @psoc: soc object + * @vdev_id: vdev id + * + * Legacy lim layer will delete tdls peers for roaming and heart beat failures + * and notify the component about the delete event to update the tdls. + * state. + * + * Return: QDF_STATUS + */ +QDF_STATUS tdls_peers_deleted_notification(struct wlan_objmgr_psoc *psoc, + uint8_t vdev_id); + +/** + * tdls_notify_decrement_session() - Notify the session decrement + * @psoc: psoc object manager + * + * Policy manager notify TDLS about session decrement + * + * Return: None + */ +void tdls_notify_decrement_session(struct wlan_objmgr_psoc *psoc); + +/** + * tdls_send_update_to_fw - update tdls status info + * @tdls_vdev_obj: tdls vdev private object. + * @tdls_prohibited: indicates whether tdls is prohibited. + * @tdls_chan_swit_prohibited: indicates whether tdls channel switch + * is prohibited. + * @sta_connect_event: indicate sta connect or disconnect event + * @session_id: session id + * + * Normally an AP does not influence TDLS connection between STAs + * associated to it. But AP may set bits for TDLS Prohibited or + * TDLS Channel Switch Prohibited in Extended Capability IE in + * Assoc/Re-assoc response to STA. So after STA is connected to + * an AP, call this function to update TDLS status as per those + * bits set in Ext Cap IE in received Assoc/Re-assoc response + * from AP. + * + * Return: None. + */ +void tdls_send_update_to_fw(struct tdls_vdev_priv_obj *tdls_vdev_obj, + struct tdls_soc_priv_obj *tdls_soc_obj, + bool tdls_prohibited, + bool tdls_chan_swit_prohibited, + bool sta_connect_event, + uint8_t session_id); + +/** + * tdls_notify_increment_session() - Notify the session increment + * @psoc: psoc object manager + * + * Policy manager notify TDLS about session increment + * + * Return: None + */ +void tdls_notify_increment_session(struct wlan_objmgr_psoc *psoc); + +/** + * tdls_check_is_tdls_allowed() - check is tdls allowed or not + * @vdev: vdev object + * + * Function determines the whether TDLS allowed in the system + * + * Return: true or false + */ +bool tdls_check_is_tdls_allowed(struct wlan_objmgr_vdev *vdev); + +/** + * tdls_get_vdev() - Get tdls specific vdev object manager + * @psoc: wlan psoc object manager + * @dbg_id: debug id + * + * If TDLS possible, return the corresponding vdev + * to enable TDLS in the system. + * + * Return: vdev manager pointer or NULL. + */ +struct wlan_objmgr_vdev *tdls_get_vdev(struct wlan_objmgr_psoc *psoc, + wlan_objmgr_ref_dbgid dbg_id); + +/** + * tdls_process_policy_mgr_notification() - process policy manager notification + * @psoc: soc object manager + * + * Return: QDF_STATUS + */ +QDF_STATUS +tdls_process_policy_mgr_notification(struct wlan_objmgr_psoc *psoc); + +/** + * tdls_process_decrement_active_session() - process policy manager decrement + * sessions. + * @psoc: soc object manager + * + * Return: QDF_STATUS + */ +QDF_STATUS +tdls_process_decrement_active_session(struct wlan_objmgr_psoc *psoc); + +/** + * tdls_scan_complete_event_handler() - scan complete event handler for tdls + * @vdev: vdev object + * @event: scan event + * @arg: tdls soc object + * + * Return: None + */ +void tdls_scan_complete_event_handler(struct wlan_objmgr_vdev *vdev, + struct scan_event *event, + void *arg); + +/** + * tdls_scan_callback() - callback for TDLS scan operation + * @soc: tdls soc pvt object + * + * Return: QDF_STATUS + */ +QDF_STATUS tdls_scan_callback(struct tdls_soc_priv_obj *tdls_soc); + +/** + * wlan_hdd_tdls_scan_done_callback() - callback for tdls scan done event + * @tdls_soc: tdls soc object + * + * Return: Void + */ +void tdls_scan_done_callback(struct tdls_soc_priv_obj *tdls_soc); + +/** + * tdls_scan_serialization_comp_info_cb() - callback for scan start + * @vdev: VDEV on which the scan command is being processed + * @comp_info: serialize rules info + * + * Return: negative = caller should stop and return error code immediately + * 1 = caller can continue to scan + */ +void tdls_scan_serialization_comp_info_cb(struct wlan_objmgr_vdev *vdev, + union wlan_serialization_rules_info *comp_info); + +/** + * tdls_set_offchan_mode() - update tdls status info + * @psoc: soc object + * @param: channel switch params + * + * send message to WMI to set TDLS off channel in f/w + * + * Return: QDF_STATUS. + */ +QDF_STATUS tdls_set_offchan_mode(struct wlan_objmgr_psoc *psoc, + struct tdls_channel_switch_params *param); + +/** + * tdls_delete_all_peers_indication() - update tdls status info + * @delete_peers_ind: Delete peers indication params + * + * Notify tdls component to cleanup all peers + * + * Return: QDF_STATUS. + */ + +QDF_STATUS tdls_delete_all_peers_indication( + struct tdls_delete_all_peers_params *delete_peers_ind); +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/tdls/core/src/wlan_tdls_mgmt.c b/drivers/staging/qca-wifi-host-cmn/umac/tdls/core/src/wlan_tdls_mgmt.c new file mode 100644 index 0000000000000000000000000000000000000000..7f96a59d5276ff3b17a269175e4245ec55f53139 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/tdls/core/src/wlan_tdls_mgmt.c @@ -0,0 +1,447 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_tdls_mgmt.c + * + * TDLS management frames implementation + */ + +#include "wlan_tdls_main.h" +#include "wlan_tdls_tgt_api.h" +#include +#include "wlan_mgmt_txrx_utils_api.h" +#include "wlan_tdls_peer.h" +#include "wlan_tdls_ct.h" +#include "wlan_tdls_cmds_process.h" +#include "wlan_tdls_mgmt.h" + +static +const char *const tdls_action_frames_type[] = { "TDLS Setup Request", + "TDLS Setup Response", + "TDLS Setup Confirm", + "TDLS Teardown", + "TDLS Peer Traffic Indication", + "TDLS Channel Switch Request", + "TDLS Channel Switch Response", + "TDLS Peer PSM Request", + "TDLS Peer PSM Response", + "TDLS Peer Traffic Response", + "TDLS Discovery Request"}; + +/** + * tdls_set_rssi() - Set TDLS RSSI on peer given by mac + * @tdls_vdev: tdls vdev object + * @mac: MAC address of Peer + * @rx_rssi: rssi value + * + * Set RSSI on TDSL peer + * + * Return: 0 for success or -EINVAL otherwise + */ +static int tdls_set_rssi(struct tdls_vdev_priv_obj *tdls_vdev, + const uint8_t *mac, + int8_t rx_rssi) +{ + struct tdls_peer *curr_peer; + + curr_peer = tdls_find_peer(tdls_vdev, mac); + if (curr_peer == NULL) { + tdls_err("curr_peer is NULL"); + return -EINVAL; + } + + curr_peer->rssi = rx_rssi; + + return 0; +} + +/** + * tdls_process_rx_mgmt() - process tdls rx mgmt frames + * @rx_mgmt_event: tdls rx mgmt event + * @tdls_vdev: tdls vdev object + * + * Return: QDF_STATUS + */ +static QDF_STATUS tdls_process_rx_mgmt( + struct tdls_rx_mgmt_event *rx_mgmt_event, + struct tdls_vdev_priv_obj *tdls_vdev) +{ + struct tdls_rx_mgmt_frame *rx_mgmt; + struct tdls_soc_priv_obj *tdls_soc_obj; + uint8_t *mac; + enum tdls_actioncode action_frame_type; + + if (!rx_mgmt_event) + return QDF_STATUS_E_INVAL; + + tdls_soc_obj = rx_mgmt_event->tdls_soc_obj; + rx_mgmt = rx_mgmt_event->rx_mgmt; + + if (!tdls_soc_obj || !rx_mgmt) { + tdls_err("invalid psoc object or rx mgmt"); + return QDF_STATUS_E_INVAL; + } + + tdls_debug("soc:%pK, frame_len:%d, rx_chan:%d, vdev_id:%d, frm_type:%d, rx_rssi:%d, buf:%pK", + tdls_soc_obj->soc, rx_mgmt->frame_len, + rx_mgmt->rx_chan, rx_mgmt->vdev_id, rx_mgmt->frm_type, + rx_mgmt->rx_rssi, rx_mgmt->buf); + + if (rx_mgmt->buf[TDLS_PUBLIC_ACTION_FRAME_OFFSET + 1] == + TDLS_PUBLIC_ACTION_DISC_RESP) { + mac = &rx_mgmt->buf[TDLS_80211_PEER_ADDR_OFFSET]; + tdls_notice("[TDLS] TDLS Discovery Response," + QDF_MAC_ADDR_STR " RSSI[%d] <--- OTA", + QDF_MAC_ADDR_ARRAY(mac), rx_mgmt->rx_rssi); + tdls_recv_discovery_resp(tdls_vdev, mac); + tdls_set_rssi(tdls_vdev, mac, rx_mgmt->rx_rssi); + } + + if (rx_mgmt->buf[TDLS_PUBLIC_ACTION_FRAME_OFFSET] == + TDLS_ACTION_FRAME) { + action_frame_type = + rx_mgmt->buf[TDLS_PUBLIC_ACTION_FRAME_OFFSET + 1]; + if (action_frame_type >= TDLS_ACTION_FRAME_TYPE_MAX) { + tdls_debug("[TDLS] unknown[%d] <--- OTA", + action_frame_type); + } else { + tdls_notice("[TDLS] %s <--- OTA", + tdls_action_frames_type[action_frame_type]); + } + } + + /* tdls_soc_obj->tdls_rx_cb ==> wlan_cfg80211_tdls_rx_callback() */ + if (tdls_soc_obj && tdls_soc_obj->tdls_rx_cb) + tdls_soc_obj->tdls_rx_cb(tdls_soc_obj->tdls_rx_cb_data, + rx_mgmt); + else + tdls_debug("rx mgmt, but no valid up layer callback"); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS tdls_process_rx_frame(struct scheduler_msg *msg) +{ + struct wlan_objmgr_vdev *vdev; + struct tdls_rx_mgmt_event *tdls_rx; + struct tdls_vdev_priv_obj *tdls_vdev; + QDF_STATUS status = QDF_STATUS_E_FAILURE; + + if (!(msg->bodyptr)) { + tdls_err("invalid message body"); + return QDF_STATUS_E_INVAL; + } + + tdls_rx = (struct tdls_rx_mgmt_event *) msg->bodyptr; + + vdev = wlan_objmgr_get_vdev_by_id_from_psoc(tdls_rx->tdls_soc_obj->soc, + tdls_rx->rx_mgmt->vdev_id, WLAN_TDLS_NB_ID); + + if (vdev) { + tdls_debug("tdls rx mgmt frame received"); + tdls_vdev = wlan_objmgr_vdev_get_comp_private_obj(vdev, + WLAN_UMAC_COMP_TDLS); + if (tdls_vdev) + status = tdls_process_rx_mgmt(tdls_rx, tdls_vdev); + wlan_objmgr_vdev_release_ref(vdev, WLAN_TDLS_NB_ID); + } + + qdf_mem_free(tdls_rx->rx_mgmt); + qdf_mem_free(msg->bodyptr); + msg->bodyptr = NULL; + + return status; +} + +QDF_STATUS tdls_mgmt_rx_ops(struct wlan_objmgr_psoc *psoc, + bool isregister) +{ + struct mgmt_txrx_mgmt_frame_cb_info frm_cb_info; + QDF_STATUS status; + int num_of_entries; + + tdls_debug("psoc:%pK, is register rx:%d", psoc, isregister); + + frm_cb_info.frm_type = MGMT_ACTION_TDLS_DISCRESP; + frm_cb_info.mgmt_rx_cb = tgt_tdls_mgmt_frame_rx_cb; + num_of_entries = 1; + + if (isregister) + status = wlan_mgmt_txrx_register_rx_cb(psoc, + WLAN_UMAC_COMP_TDLS, &frm_cb_info, + num_of_entries); + else + status = wlan_mgmt_txrx_deregister_rx_cb(psoc, + WLAN_UMAC_COMP_TDLS, &frm_cb_info, + num_of_entries); + + return status; +} + +static QDF_STATUS +tdls_internal_send_mgmt_tx_done(struct tdls_action_frame_request *req, + QDF_STATUS status) +{ + struct tdls_soc_priv_obj *tdls_soc_obj; + struct tdls_osif_indication indication; + + if (!req || !req->vdev) + return QDF_STATUS_E_NULL_VALUE; + + indication.status = status; + indication.vdev = req->vdev; + tdls_soc_obj = wlan_vdev_get_tdls_soc_obj(req->vdev); + if (tdls_soc_obj && tdls_soc_obj->tdls_event_cb) + tdls_soc_obj->tdls_event_cb(tdls_soc_obj->tdls_evt_cb_data, + TDLS_EVENT_MGMT_TX_ACK_CNF, &indication); + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS tdls_activate_send_mgmt_request_flush_cb( + struct scheduler_msg *msg) +{ + struct tdls_send_mgmt_request *tdls_mgmt_req; + + tdls_mgmt_req = msg->bodyptr; + + qdf_mem_free(tdls_mgmt_req); + msg->bodyptr = NULL; + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS tdls_activate_send_mgmt_request( + struct tdls_action_frame_request *action_req) +{ + struct wlan_objmgr_peer *peer; + struct tdls_soc_priv_obj *tdls_soc_obj; + QDF_STATUS status = QDF_STATUS_SUCCESS; + struct scheduler_msg msg = {0}; + struct tdls_send_mgmt_request *tdls_mgmt_req; + + if (!action_req || !action_req->vdev) + return QDF_STATUS_E_NULL_VALUE; + + tdls_soc_obj = wlan_vdev_get_tdls_soc_obj(action_req->vdev); + if (!tdls_soc_obj) { + status = QDF_STATUS_E_NULL_VALUE; + goto release_cmd; + } + + tdls_mgmt_req = qdf_mem_malloc(sizeof(struct tdls_send_mgmt_request) + + action_req->tdls_mgmt.len); + if (NULL == tdls_mgmt_req) { + status = QDF_STATUS_E_NOMEM; + tdls_err("mem alloc failed "); + QDF_ASSERT(0); + goto release_cmd; + } + + tdls_debug("session_id %d " + "tdls_mgmt.dialog %d " + "tdls_mgmt.frame_type %d " + "tdls_mgmt.status_code %d " + "tdls_mgmt.responder %d " + "tdls_mgmt.peer_capability %d", + action_req->session_id, + action_req->tdls_mgmt.dialog, + action_req->tdls_mgmt.frame_type, + action_req->tdls_mgmt.status_code, + action_req->tdls_mgmt.responder, + action_req->tdls_mgmt.peer_capability); + + tdls_mgmt_req->session_id = action_req->session_id; + /* Using dialog as transactionId. This can be used to + * match response with request + */ + tdls_mgmt_req->transaction_id = action_req->tdls_mgmt.dialog; + tdls_mgmt_req->req_type = action_req->tdls_mgmt.frame_type; + tdls_mgmt_req->dialog = action_req->tdls_mgmt.dialog; + tdls_mgmt_req->status_code = action_req->tdls_mgmt.status_code; + tdls_mgmt_req->responder = action_req->tdls_mgmt.responder; + tdls_mgmt_req->peer_capability = action_req->tdls_mgmt.peer_capability; + + peer = wlan_vdev_get_bsspeer(action_req->vdev); + + status = wlan_objmgr_peer_try_get_ref(peer, WLAN_TDLS_SB_ID); + if (QDF_IS_STATUS_ERROR(status)) { + qdf_mem_free(tdls_mgmt_req); + goto release_cmd; + } + + qdf_mem_copy(tdls_mgmt_req->bssid.bytes, + wlan_peer_get_macaddr(peer), QDF_MAC_ADDR_SIZE); + + qdf_mem_copy(tdls_mgmt_req->peer_mac.bytes, + action_req->tdls_mgmt.peer_mac.bytes, QDF_MAC_ADDR_SIZE); + + if (action_req->tdls_mgmt.len) { + qdf_mem_copy(tdls_mgmt_req->add_ie, action_req->tdls_mgmt.buf, + action_req->tdls_mgmt.len); + } + + tdls_mgmt_req->length = sizeof(struct tdls_send_mgmt_request) + + action_req->tdls_mgmt.len; + if (action_req->use_default_ac) + tdls_mgmt_req->ac = WIFI_AC_VI; + else + tdls_mgmt_req->ac = WIFI_AC_BK; + + /* Send the request to PE. */ + qdf_mem_zero(&msg, sizeof(msg)); + + tdls_debug("sending TDLS Mgmt Frame req to PE "); + tdls_mgmt_req->message_type = tdls_soc_obj->tdls_send_mgmt_req; + + msg.type = tdls_soc_obj->tdls_send_mgmt_req; + msg.bodyptr = tdls_mgmt_req; + msg.flush_callback = tdls_activate_send_mgmt_request_flush_cb; + + status = scheduler_post_message(QDF_MODULE_ID_TDLS, + QDF_MODULE_ID_TDLS, + QDF_MODULE_ID_PE, &msg); + if (QDF_IS_STATUS_ERROR(status)) { + tdls_err("failed to post msg, status %d", status); + qdf_mem_free(tdls_mgmt_req); + } + + wlan_objmgr_peer_release_ref(peer, WLAN_TDLS_SB_ID); + +release_cmd: + /*update tdls nss infornation based on action code */ + tdls_reset_nss(tdls_soc_obj, action_req->chk_frame.action_code); + if (QDF_IS_STATUS_ERROR(status)) { + tdls_internal_send_mgmt_tx_done(action_req, status); + tdls_release_serialization_command(action_req->vdev, + WLAN_SER_CMD_TDLS_SEND_MGMT); + } + + return status; +} + +static QDF_STATUS +tdls_send_mgmt_serialize_callback(struct wlan_serialization_command *cmd, + enum wlan_serialization_cb_reason reason) +{ + struct tdls_action_frame_request *req; + QDF_STATUS status = QDF_STATUS_SUCCESS; + + if (!cmd || !cmd->umac_cmd) { + tdls_err("invalid params cmd: %pK, ", cmd); + return QDF_STATUS_E_NULL_VALUE; + } + req = cmd->umac_cmd; + + tdls_debug("reason: %d, vdev_id: %d", + reason, req->vdev_id); + + switch (reason) { + case WLAN_SER_CB_ACTIVATE_CMD: + /* command moved to active list */ + status = tdls_activate_send_mgmt_request(req); + break; + + case WLAN_SER_CB_CANCEL_CMD: + case WLAN_SER_CB_ACTIVE_CMD_TIMEOUT: + /* command removed from pending list. + * notify status complete with failure + */ + status = tdls_internal_send_mgmt_tx_done(req, + QDF_STATUS_E_FAILURE); + break; + + case WLAN_SER_CB_RELEASE_MEM_CMD: + /* command successfully completed. + * release tdls_action_frame_request memory + */ + wlan_objmgr_vdev_release_ref(req->vdev, WLAN_TDLS_NB_ID); + qdf_mem_free(req); + break; + + default: + /* Do nothing but logging */ + QDF_ASSERT(0); + status = QDF_STATUS_E_INVAL; + break; + } + + return status; +} + +QDF_STATUS tdls_process_mgmt_req( + struct tdls_action_frame_request *tdls_mgmt_req) +{ + QDF_STATUS status = QDF_STATUS_SUCCESS; + struct wlan_serialization_command cmd = {0, }; + enum wlan_serialization_status ser_cmd_status; + + /* If connected and in Infra. Only then allow this */ + status = tdls_validate_mgmt_request(tdls_mgmt_req); + if (status != QDF_STATUS_SUCCESS) { + status = tdls_internal_send_mgmt_tx_done(tdls_mgmt_req, + status); + goto error_mgmt; + } + + /* update the responder, status code information + * after the cmd validation + */ + tdls_mgmt_req->tdls_mgmt.responder = + !tdls_mgmt_req->chk_frame.responder; + tdls_mgmt_req->tdls_mgmt.status_code = + tdls_mgmt_req->chk_frame.status_code; + + cmd.cmd_type = WLAN_SER_CMD_TDLS_SEND_MGMT; + /* Cmd Id not applicable for non scan cmds */ + cmd.cmd_id = 0; + cmd.cmd_cb = tdls_send_mgmt_serialize_callback; + cmd.umac_cmd = tdls_mgmt_req; + cmd.source = WLAN_UMAC_COMP_TDLS; + cmd.is_high_priority = false; + cmd.cmd_timeout_duration = TDLS_DEFAULT_SERIALIZE_CMD_TIMEOUT; + + cmd.vdev = tdls_mgmt_req->vdev; + + ser_cmd_status = wlan_serialization_request(&cmd); + tdls_debug("wlan_serialization_request status:%d", ser_cmd_status); + + switch (ser_cmd_status) { + case WLAN_SER_CMD_PENDING: + /* command moved to pending list.Do nothing */ + break; + case WLAN_SER_CMD_ACTIVE: + /* command moved to active list. Do nothing */ + break; + case WLAN_SER_CMD_DENIED_LIST_FULL: + case WLAN_SER_CMD_DENIED_RULES_FAILED: + case WLAN_SER_CMD_DENIED_UNSPECIFIED: + status = QDF_STATUS_E_FAILURE; + goto error_mgmt; + default: + QDF_ASSERT(0); + status = QDF_STATUS_E_INVAL; + goto error_mgmt; + } + return status; + +error_mgmt: + wlan_objmgr_vdev_release_ref(tdls_mgmt_req->vdev, WLAN_TDLS_NB_ID); + qdf_mem_free(tdls_mgmt_req); + return status; +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/tdls/core/src/wlan_tdls_mgmt.h b/drivers/staging/qca-wifi-host-cmn/umac/tdls/core/src/wlan_tdls_mgmt.h new file mode 100644 index 0000000000000000000000000000000000000000..ce6f9307988f3ddcab97956d8aa31bb158297217 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/tdls/core/src/wlan_tdls_mgmt.h @@ -0,0 +1,111 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_tdls_mgmt.h + * + * TDLS management frames include file + */ + +#ifndef _WLAN_TDLS_MGMT_H_ +#define _WLAN_TDLS_MGMT_H_ + +#define TDLS_PUBLIC_ACTION_FRAME_OFFSET 24 +#define TDLS_PUBLIC_ACTION_FRAME 4 +#define TDLS_PUBLIC_ACTION_DISC_RESP 14 +#define TDLS_ACTION_FRAME 12 +#define TDLS_80211_PEER_ADDR_OFFSET (TDLS_PUBLIC_ACTION_FRAME + \ + QDF_MAC_ADDR_SIZE) +#define TDLS_ACTION_FRAME_TYPE_MAX 11 + +/** + * struct tdls_rx_mgmt_event - tdls rx mgmt frame event + * @tdls_soc_obj: tdls soc private object + * @rx_mgmt: tdls rx mgmt frame structure + */ +struct tdls_rx_mgmt_event { + struct tdls_soc_priv_obj *tdls_soc_obj; + struct tdls_rx_mgmt_frame *rx_mgmt; +}; + +/* + * struct tdls_send_mgmt_request - tdls management request + * @message_type: type of pe message + * @length: length of the frame. + * @session_id: session id + * @transaction_id: transaction ID for cmd + * @req_type: type of action frame + * @dialog: dialog token used in the frame. + * @status_code: status to be incuded in the frame. + * @responder: tdls request type + * @peer_capability: peer capability information + * @bssid: bssid + * @peer_mac: mac address of the peer + * @add_ie: additional ie's to be included + */ +struct tdls_send_mgmt_request { + uint16_t message_type; + uint16_t length; + uint8_t session_id; + uint16_t transaction_id; + uint8_t req_type; + uint8_t dialog; + uint16_t status_code; + uint8_t responder; + uint32_t peer_capability; + struct qdf_mac_addr bssid; + struct qdf_mac_addr peer_mac; + enum wifi_traffic_ac ac; + /* Variable length. Dont add any field after this. */ + uint8_t add_ie[1]; +}; + +/** + * tdls_process_mgmt_req() - send a TDLS mgmt request to serialize module + * @tdls_mgmt_req: tdls management request + * + * TDLS request API, called from cfg80211 to send a TDLS frame in + * serialized manner to PE + * + *Return: QDF_STATUS + */ +QDF_STATUS tdls_process_mgmt_req( + struct tdls_action_frame_request *tdls_mgmt_req); + +/** + * tdls_mgmt_rx_ops() - register or unregister rx callback + * @psoc: psoc object + * @isregister: register if true, unregister if false + * + * This function registers or unregisters rx callback to mgmt txrx + * component. + * + * Return: QDF_STATUS + */ +QDF_STATUS tdls_mgmt_rx_ops(struct wlan_objmgr_psoc *psoc, + bool isregister); + +/** + * tdls_process_rx_frame() - process tdls rx frames + * @msg: scheduler msg + * + * Return: QDF_STATUS + */ +QDF_STATUS tdls_process_rx_frame(struct scheduler_msg *msg); +#endif + diff --git a/drivers/staging/qca-wifi-host-cmn/umac/tdls/core/src/wlan_tdls_peer.c b/drivers/staging/qca-wifi-host-cmn/umac/tdls/core/src/wlan_tdls_peer.c new file mode 100644 index 0000000000000000000000000000000000000000..3f8a132e0e870c08bb67cd19d2ab0ad874e23e35 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/tdls/core/src/wlan_tdls_peer.c @@ -0,0 +1,819 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_tdls_peer.c + * + * TDLS peer basic operations + */ +#include "wlan_tdls_main.h" +#include "wlan_tdls_peer.h" +#include +#include +#include + +static uint8_t calculate_hash_key(const uint8_t *macaddr) +{ + uint8_t i, key; + + for (i = 0, key = 0; i < 6; i++) + key ^= macaddr[i]; + + return key % WLAN_TDLS_PEER_LIST_SIZE; +} + +struct tdls_peer *tdls_find_peer(struct tdls_vdev_priv_obj *vdev_obj, + const uint8_t *macaddr) +{ + uint8_t key; + QDF_STATUS status; + struct tdls_peer *peer; + qdf_list_t *head; + qdf_list_node_t *p_node; + + key = calculate_hash_key(macaddr); + head = &vdev_obj->peer_list[key]; + + status = qdf_list_peek_front(head, &p_node); + while (QDF_IS_STATUS_SUCCESS(status)) { + peer = qdf_container_of(p_node, struct tdls_peer, node); + if (WLAN_ADDR_EQ(&peer->peer_mac, macaddr) + == QDF_STATUS_SUCCESS) { + return peer; + } + status = qdf_list_peek_next(head, p_node, &p_node); + } + + tdls_debug("no tdls peer " QDF_MAC_ADDR_STR, + QDF_MAC_ADDR_ARRAY(macaddr)); + return NULL; +} + +/** + * tdls_find_peer_handler() - helper function for tdls_find_all_peer + * @psoc: soc object + * @obj: vdev object + * @arg: used to keep search peer parameters + * + * Return: None. + */ +static void +tdls_find_peer_handler(struct wlan_objmgr_psoc *psoc, void *obj, void *arg) +{ + struct wlan_objmgr_vdev *vdev = obj; + struct tdls_search_peer_param *tdls_param = arg; + struct tdls_vdev_priv_obj *vdev_obj; + + if (tdls_param->peer) + return; + + if (!vdev) { + tdls_err("invalid vdev"); + return; + } + + if (wlan_vdev_mlme_get_opmode(vdev) != QDF_STA_MODE && + wlan_vdev_mlme_get_opmode(vdev) != QDF_P2P_CLIENT_MODE) + return; + + vdev_obj = wlan_objmgr_vdev_get_comp_private_obj(vdev, + WLAN_UMAC_COMP_TDLS); + if (!vdev_obj) + return; + + tdls_param->peer = tdls_find_peer(vdev_obj, tdls_param->macaddr); +} + +struct tdls_peer * +tdls_find_all_peer(struct tdls_soc_priv_obj *soc_obj, const uint8_t *macaddr) +{ + struct tdls_search_peer_param tdls_search_param; + struct wlan_objmgr_psoc *psoc; + + if (!soc_obj) { + tdls_err("tdls soc object is NULL"); + return NULL; + } + + psoc = soc_obj->soc; + if (!psoc) { + tdls_err("psoc is NULL"); + return NULL; + } + tdls_search_param.macaddr = macaddr; + tdls_search_param.peer = NULL; + + wlan_objmgr_iterate_obj_list(psoc, WLAN_VDEV_OP, + tdls_find_peer_handler, + &tdls_search_param, 0, WLAN_TDLS_NB_ID); + + return tdls_search_param.peer; +} + +uint8_t tdls_find_opclass(struct wlan_objmgr_psoc *psoc, uint8_t channel, + uint8_t bw_offset) +{ + char country[REG_ALPHA2_LEN + 1]; + QDF_STATUS status; + + if (!psoc) { + tdls_err("psoc is NULL"); + return 0; + } + + status = wlan_reg_read_default_country(psoc, country); + if (QDF_IS_STATUS_ERROR(status)) + return 0; + + return wlan_reg_dmn_get_opclass_from_channel(country, channel, + bw_offset); +} + +/** + * tdls_add_peer() - add TDLS peer in TDLS vdev object + * @vdev_obj: TDLS vdev object + * @macaddr: MAC address of peer + * + * Allocate memory for the new peer, and add it to hash table. + * + * Return: new added TDLS peer, NULL if failed. + */ +static struct tdls_peer *tdls_add_peer(struct tdls_vdev_priv_obj *vdev_obj, + const uint8_t *macaddr) +{ + struct tdls_peer *peer; + struct tdls_soc_priv_obj *soc_obj; + uint8_t key = 0; + qdf_list_t *head; + + peer = qdf_mem_malloc(sizeof(*peer)); + if (!peer) { + tdls_err("add tdls peer malloc memory failed!"); + return NULL; + } + + soc_obj = wlan_vdev_get_tdls_soc_obj(vdev_obj->vdev); + if (!soc_obj) { + tdls_err("NULL tdls soc object"); + return NULL; + } + + key = calculate_hash_key(macaddr); + head = &vdev_obj->peer_list[key]; + + qdf_mem_copy(&peer->peer_mac, macaddr, sizeof(peer->peer_mac)); + peer->vdev_priv = vdev_obj; + + peer->pref_off_chan_num = + soc_obj->tdls_configs.tdls_pre_off_chan_num; + peer->op_class_for_pref_off_chan = + tdls_find_opclass(soc_obj->soc, + peer->pref_off_chan_num, + soc_obj->tdls_configs.tdls_pre_off_chan_bw); + peer->sta_id = INVALID_TDLS_PEER_ID; + + qdf_list_insert_back(head, &peer->node); + + tdls_debug("add tdls peer: " QDF_MAC_ADDR_STR, + QDF_MAC_ADDR_ARRAY(macaddr)); + return peer; +} + +struct tdls_peer *tdls_get_peer(struct tdls_vdev_priv_obj *vdev_obj, + const uint8_t *macaddr) +{ + struct tdls_peer *peer; + + peer = tdls_find_peer(vdev_obj, macaddr); + if (!peer) + peer = tdls_add_peer(vdev_obj, macaddr); + + return peer; +} + +static struct tdls_peer * +tdls_find_progress_peer_in_list(qdf_list_t *head, + const uint8_t *macaddr, uint8_t skip_self) +{ + QDF_STATUS status; + struct tdls_peer *peer; + qdf_list_node_t *p_node; + + status = qdf_list_peek_front(head, &p_node); + while (QDF_IS_STATUS_SUCCESS(status)) { + peer = qdf_container_of(p_node, struct tdls_peer, node); + if (skip_self && macaddr && + WLAN_ADDR_EQ(&peer->peer_mac, macaddr) + == QDF_STATUS_SUCCESS) { + status = qdf_list_peek_next(head, p_node, &p_node); + continue; + } else if (TDLS_LINK_CONNECTING == peer->link_status) { + tdls_debug(QDF_MAC_ADDR_STR " TDLS_LINK_CONNECTING", + QDF_MAC_ADDR_ARRAY(peer->peer_mac.bytes)); + return peer; + } + status = qdf_list_peek_next(head, p_node, &p_node); + } + + return NULL; +} + +/** + * tdls_find_progress_peer() - find the peer with ongoing TDLS progress + * on present vdev + * @vdev_obj: TDLS vdev object + * @macaddr: MAC address of peer, if NULL check for all the peer list + * @skip_self: If true, skip this macaddr. Otherwise, check all the peer list. + * if macaddr is NULL, this argument is ignored, and check for all + * the peer list. + * + * Return: Pointer to tdls_peer if TDLS is ongoing. Otherwise return NULL. + */ +static struct tdls_peer * +tdls_find_progress_peer(struct tdls_vdev_priv_obj *vdev_obj, + const uint8_t *macaddr, uint8_t skip_self) +{ + uint8_t i; + struct tdls_peer *peer; + qdf_list_t *head; + + if (!vdev_obj) { + tdls_err("invalid tdls vdev object"); + return NULL; + } + + for (i = 0; i < WLAN_TDLS_PEER_LIST_SIZE; i++) { + head = &vdev_obj->peer_list[i]; + + peer = tdls_find_progress_peer_in_list(head, macaddr, + skip_self); + if (peer) + return peer; + } + + return NULL; +} + +/** + * tdls_find_progress_peer_handler() - helper function for tdls_is_progress + * @psoc: soc object + * @obj: vdev object + * @arg: used to keep search peer parameters + * + * Return: None. + */ +static void +tdls_find_progress_peer_handler(struct wlan_objmgr_psoc *psoc, + void *obj, void *arg) +{ + struct wlan_objmgr_vdev *vdev = obj; + struct tdls_search_progress_param *tdls_progress = arg; + struct tdls_vdev_priv_obj *vdev_obj; + + if (tdls_progress->peer) + return; + + if (!vdev) { + tdls_err("invalid vdev"); + return; + } + + if (wlan_vdev_mlme_get_opmode(vdev) != QDF_STA_MODE && + wlan_vdev_mlme_get_opmode(vdev) != QDF_P2P_CLIENT_MODE) + return; + + vdev_obj = wlan_objmgr_vdev_get_comp_private_obj(vdev, + WLAN_UMAC_COMP_TDLS); + + tdls_progress->peer = tdls_find_progress_peer(vdev_obj, + tdls_progress->macaddr, + tdls_progress->skip_self); +} + +struct tdls_peer *tdls_is_progress(struct tdls_vdev_priv_obj *vdev_obj, + const uint8_t *macaddr, uint8_t skip_self) +{ + struct tdls_search_progress_param tdls_progress; + struct wlan_objmgr_psoc *psoc; + + if (!vdev_obj) { + tdls_err("invalid tdls vdev object"); + return NULL; + } + + psoc = wlan_vdev_get_psoc(vdev_obj->vdev); + if (!psoc) { + tdls_err("invalid psoc"); + return NULL; + } + tdls_progress.macaddr = macaddr; + tdls_progress.skip_self = skip_self; + tdls_progress.peer = NULL; + + wlan_objmgr_iterate_obj_list(psoc, WLAN_VDEV_OP, + tdls_find_progress_peer_handler, + &tdls_progress, 0, WLAN_TDLS_NB_ID); + + return tdls_progress.peer; +} + +struct tdls_peer * +tdls_find_first_connected_peer(struct tdls_vdev_priv_obj *vdev_obj) +{ + uint16_t i; + struct tdls_peer *peer; + qdf_list_t *head; + qdf_list_node_t *p_node; + QDF_STATUS status; + + if (!vdev_obj) { + tdls_err("invalid tdls vdev object"); + return NULL; + } + + for (i = 0; i < WLAN_TDLS_PEER_LIST_SIZE; i++) { + head = &vdev_obj->peer_list[i]; + + status = qdf_list_peek_front(head, &p_node); + while (QDF_IS_STATUS_SUCCESS(status)) { + peer = qdf_container_of(p_node, struct tdls_peer, node); + + if (peer && TDLS_LINK_CONNECTED == peer->link_status) { + tdls_debug(QDF_MAC_ADDR_STR + " TDLS_LINK_CONNECTED", + QDF_MAC_ADDR_ARRAY( + peer->peer_mac.bytes)); + return peer; + } + status = qdf_list_peek_next(head, p_node, &p_node); + } + } + + return NULL; +} + +/** + * tdls_determine_channel_opclass() - determine channel and opclass + * @soc_obj: TDLS soc object + * @vdev_obj: TDLS vdev object + * @peer: TDLS peer + * @channel: pointer to channel + * @opclass: pinter to opclass + * + * Function determines the channel and operating class + * + * Return: None. + */ +static void tdls_determine_channel_opclass(struct tdls_soc_priv_obj *soc_obj, + struct tdls_vdev_priv_obj *vdev_obj, + struct tdls_peer *peer, + uint32_t *channel, uint32_t *opclass) +{ + uint32_t vdev_id; + enum QDF_OPMODE opmode; + /* + * If tdls offchannel is not enabled then we provide base channel + * and in that case pass opclass as 0 since opclass is mainly needed + * for offchannel cases. + */ + if (!(TDLS_IS_OFF_CHANNEL_ENABLED( + soc_obj->tdls_configs.tdls_feature_flags)) || + soc_obj->tdls_fw_off_chan_mode != ENABLE_CHANSWITCH) { + vdev_id = wlan_vdev_get_id(vdev_obj->vdev); + opmode = wlan_vdev_mlme_get_opmode(vdev_obj->vdev); + + *channel = policy_mgr_get_channel(soc_obj->soc, + policy_mgr_convert_device_mode_to_qdf_type(opmode), + &vdev_id); + *opclass = 0; + } else { + *channel = peer->pref_off_chan_num; + *opclass = peer->op_class_for_pref_off_chan; + } + tdls_debug("channel:%d opclass:%d", *channel, *opclass); +} + +/** + * tdls_get_wifi_hal_state() - get TDLS wifi hal state on current peer + * @peer: TDLS peer + * @state: output parameter to store the TDLS wifi hal state + * @reason: output parameter to store the reason of the current peer + * + * Return: None. + */ +static void tdls_get_wifi_hal_state(struct tdls_peer *peer, uint32_t *state, + int32_t *reason) +{ + struct wlan_objmgr_vdev *vdev; + struct tdls_soc_priv_obj *soc_obj; + + vdev = peer->vdev_priv->vdev; + soc_obj = wlan_vdev_get_tdls_soc_obj(vdev); + if (!soc_obj) { + tdls_err("can't get tdls object"); + return; + } + + *reason = peer->reason; + + switch (peer->link_status) { + case TDLS_LINK_IDLE: + case TDLS_LINK_DISCOVERED: + case TDLS_LINK_DISCOVERING: + case TDLS_LINK_CONNECTING: + *state = QCA_WIFI_HAL_TDLS_S_ENABLED; + break; + case TDLS_LINK_CONNECTED: + if ((TDLS_IS_OFF_CHANNEL_ENABLED( + soc_obj->tdls_configs.tdls_feature_flags)) && + (soc_obj->tdls_fw_off_chan_mode == ENABLE_CHANSWITCH)) + *state = QCA_WIFI_HAL_TDLS_S_ESTABLISHED_OFF_CHANNEL; + else + *state = QCA_WIFI_HAL_TDLS_S_ENABLED; + break; + case TDLS_LINK_TEARING: + *state = QCA_WIFI_HAL_TDLS_S_DROPPED; + break; + } +} + +/** + * tdls_extract_peer_state_param() - extract peer update params from TDLS peer + * @peer_param: output peer update params + * @peer: TDLS peer + * + * This is used when enable TDLS link + * + * Return: None. + */ +void tdls_extract_peer_state_param(struct tdls_peer_update_state *peer_param, + struct tdls_peer *peer) +{ + uint16_t i, num; + struct tdls_vdev_priv_obj *vdev_obj; + struct tdls_soc_priv_obj *soc_obj; + enum channel_state ch_state; + struct wlan_objmgr_pdev *pdev; + uint8_t chan_id; + + vdev_obj = peer->vdev_priv; + soc_obj = wlan_vdev_get_tdls_soc_obj(vdev_obj->vdev); + pdev = wlan_vdev_get_pdev(vdev_obj->vdev); + if (!soc_obj || !pdev) { + tdls_err("soc_obj: %pK, pdev: %pK", soc_obj, pdev); + return; + } + + qdf_mem_zero(peer_param, sizeof(*peer_param)); + peer_param->vdev_id = wlan_vdev_get_id(vdev_obj->vdev); + + qdf_mem_copy(peer_param->peer_macaddr, + peer->peer_mac.bytes, QDF_MAC_ADDR_SIZE); + peer_param->peer_state = TDLS_PEER_STATE_CONNCTED; + peer_param->peer_cap.is_peer_responder = peer->is_responder; + peer_param->peer_cap.peer_uapsd_queue = peer->uapsd_queues; + peer_param->peer_cap.peer_max_sp = peer->max_sp; + peer_param->peer_cap.peer_buff_sta_support = peer->buf_sta_capable; + peer_param->peer_cap.peer_off_chan_support = + peer->off_channel_capable; + peer_param->peer_cap.peer_curr_operclass = 0; + peer_param->peer_cap.self_curr_operclass = 0; + peer_param->peer_cap.peer_chanlen = peer->supported_channels_len; + peer_param->peer_cap.pref_off_channum = peer->pref_off_chan_num; + peer_param->peer_cap.pref_off_chan_bandwidth = + soc_obj->tdls_configs.tdls_pre_off_chan_bw; + peer_param->peer_cap.opclass_for_prefoffchan = + peer->op_class_for_pref_off_chan; + + if (wlan_reg_is_dfs_ch(pdev, peer_param->peer_cap.pref_off_channum)) { + tdls_err("Resetting TDLS off-channel from %d to %d", + peer_param->peer_cap.pref_off_channum, + WLAN_TDLS_PREFERRED_OFF_CHANNEL_NUM_DEF); + peer_param->peer_cap.pref_off_channum = + WLAN_TDLS_PREFERRED_OFF_CHANNEL_NUM_DEF; + } + + num = 0; + for (i = 0; i < peer->supported_channels_len; i++) { + chan_id = peer->supported_channels[i]; + ch_state = wlan_reg_get_channel_state(pdev, chan_id); + + if (CHANNEL_STATE_INVALID != ch_state && + CHANNEL_STATE_DFS != ch_state && + !wlan_reg_is_dsrc_chan(pdev, chan_id)) { + peer_param->peer_cap.peer_chan[num].chan_id = chan_id; + peer_param->peer_cap.peer_chan[num].pwr = + wlan_reg_get_channel_reg_power(pdev, chan_id); + peer_param->peer_cap.peer_chan[num].dfs_set = false; + num++; + } + } + + peer_param->peer_cap.peer_oper_classlen = + peer->supported_oper_classes_len; + for (i = 0; i < peer->supported_oper_classes_len; i++) + peer_param->peer_cap.peer_oper_class[i] = + peer->supported_oper_classes[i]; +} + +/** + * tdls_set_link_status() - set link statue for TDLS peer + * @vdev_obj: TDLS vdev object + * @mac: MAC address of current TDLS peer + * @link_status: link status + * @link_reason: reason with link status + * + * Return: None. + */ +void tdls_set_link_status(struct tdls_vdev_priv_obj *vdev_obj, + const uint8_t *mac, + enum tdls_link_state link_status, + enum tdls_link_state_reason link_reason) +{ + uint32_t state = 0; + int32_t res = 0; + uint32_t op_class = 0; + uint32_t channel = 0; + struct tdls_peer *peer; + struct tdls_soc_priv_obj *soc_obj; + + peer = tdls_find_peer(vdev_obj, mac); + if (!peer) { + tdls_err("peer is NULL, can't set link status %d, reason %d", + link_status, link_reason); + return; + } + + peer->link_status = link_status; + + if (link_status >= TDLS_LINK_DISCOVERED) + peer->discovery_attempt = 0; + + if (peer->is_forced_peer && peer->state_change_notification) { + peer->reason = link_reason; + + soc_obj = wlan_vdev_get_tdls_soc_obj(vdev_obj->vdev); + if (!soc_obj) { + tdls_err("NULL psoc object"); + return; + } + + tdls_determine_channel_opclass(soc_obj, vdev_obj, + peer, &channel, &op_class); + tdls_get_wifi_hal_state(peer, &state, &res); + peer->state_change_notification(mac, op_class, channel, + state, res, soc_obj->soc); + } +} + +void tdls_set_peer_link_status(struct tdls_peer *peer, + enum tdls_link_state link_status, + enum tdls_link_state_reason link_reason) +{ + uint32_t state = 0; + int32_t res = 0; + uint32_t op_class = 0; + uint32_t channel = 0; + struct tdls_soc_priv_obj *soc_obj; + struct tdls_vdev_priv_obj *vdev_obj; + + peer->link_status = link_status; + + if (link_status >= TDLS_LINK_DISCOVERED) + peer->discovery_attempt = 0; + + if (peer->is_forced_peer && peer->state_change_notification) { + peer->reason = link_reason; + + vdev_obj = peer->vdev_priv; + soc_obj = wlan_vdev_get_tdls_soc_obj(vdev_obj->vdev); + if (!soc_obj) { + tdls_err("NULL psoc object"); + return; + } + + tdls_determine_channel_opclass(soc_obj, vdev_obj, + peer, &channel, &op_class); + tdls_get_wifi_hal_state(peer, &state, &res); + peer->state_change_notification(peer->peer_mac.bytes, + op_class, channel, state, + res, soc_obj->soc); + } +} + +void tdls_set_peer_caps(struct tdls_vdev_priv_obj *vdev_obj, + const uint8_t *macaddr, + struct tdls_update_peer_params *req_info) +{ + uint8_t is_buffer_sta = 0; + uint8_t is_off_channel_supported = 0; + uint8_t is_qos_wmm_sta = 0; + struct tdls_soc_priv_obj *soc_obj; + struct tdls_peer *curr_peer; + uint32_t feature; + + soc_obj = wlan_vdev_get_tdls_soc_obj(vdev_obj->vdev); + if (!soc_obj) { + tdls_err("NULL psoc object"); + return; + } + + curr_peer = tdls_find_peer(vdev_obj, macaddr); + if (!curr_peer) { + tdls_err("NULL tdls peer"); + return; + } + + feature = soc_obj->tdls_configs.tdls_feature_flags; + if ((1 << 4) & req_info->extn_capability[3]) + is_buffer_sta = 1; + + if ((1 << 6) & req_info->extn_capability[3]) + is_off_channel_supported = 1; + + if (TDLS_IS_WMM_ENABLED(feature) && req_info->is_qos_wmm_sta) + is_qos_wmm_sta = 1; + + curr_peer->uapsd_queues = req_info->uapsd_queues; + curr_peer->max_sp = req_info->max_sp; + curr_peer->buf_sta_capable = is_buffer_sta; + curr_peer->off_channel_capable = is_off_channel_supported; + + qdf_mem_copy(curr_peer->supported_channels, + req_info->supported_channels, + req_info->supported_channels_len); + + curr_peer->supported_channels_len = req_info->supported_channels_len; + + qdf_mem_copy(curr_peer->supported_oper_classes, + req_info->supported_oper_classes, + req_info->supported_oper_classes_len); + + curr_peer->supported_oper_classes_len = + req_info->supported_oper_classes_len; + + curr_peer->qos = is_qos_wmm_sta; +} + +QDF_STATUS tdls_set_sta_id(struct tdls_vdev_priv_obj *vdev_obj, + const uint8_t *macaddr, uint8_t sta_id) +{ + struct tdls_peer *peer; + + peer = tdls_find_peer(vdev_obj, macaddr); + if (!peer) { + tdls_err("peer is NULL"); + return QDF_STATUS_E_FAILURE; + } + + peer->sta_id = sta_id; + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS tdls_set_force_peer(struct tdls_vdev_priv_obj *vdev_obj, + const uint8_t *macaddr, bool forcepeer) +{ + struct tdls_peer *peer; + + peer = tdls_find_peer(vdev_obj, macaddr); + if (!peer) { + tdls_err("peer is NULL"); + return QDF_STATUS_E_FAILURE; + } + peer->is_forced_peer = forcepeer; + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS tdls_set_callback(struct tdls_peer *peer, + tdls_state_change_callback callback) +{ + if (!peer) { + tdls_err("peer is NULL"); + return QDF_STATUS_E_FAILURE; + } + peer->state_change_notification = callback; + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS tdls_set_extctrl_param(struct tdls_peer *peer, uint32_t chan, + uint32_t max_latency, uint32_t op_class, + uint32_t min_bandwidth) +{ + if (!peer) { + tdls_err("peer is NULL"); + return QDF_STATUS_E_FAILURE; + } + peer->op_class_for_pref_off_chan = (uint8_t)op_class; + peer->pref_off_chan_num = (uint8_t)chan; + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS tdls_reset_peer(struct tdls_vdev_priv_obj *vdev_obj, + const uint8_t *macaddr) +{ + struct tdls_soc_priv_obj *soc_obj; + struct tdls_peer *curr_peer; + struct tdls_user_config *config; + + soc_obj = wlan_vdev_get_tdls_soc_obj(vdev_obj->vdev); + if (!soc_obj) { + tdls_err("NULL psoc object"); + return QDF_STATUS_E_FAILURE; + } + + curr_peer = tdls_find_peer(vdev_obj, macaddr); + if (!curr_peer) { + tdls_err("NULL tdls peer"); + return QDF_STATUS_E_FAILURE; + } + + if (!curr_peer->is_forced_peer) { + config = &soc_obj->tdls_configs; + curr_peer->pref_off_chan_num = config->tdls_pre_off_chan_num; + curr_peer->op_class_for_pref_off_chan = + tdls_find_opclass(soc_obj->soc, + curr_peer->pref_off_chan_num, + config->tdls_pre_off_chan_bw); + } + + tdls_set_peer_link_status(curr_peer, TDLS_LINK_IDLE, + TDLS_LINK_UNSPECIFIED); + curr_peer->sta_id = INVALID_TDLS_PEER_ID; + + return QDF_STATUS_SUCCESS; +} + +void tdls_peer_idle_timers_destroy(struct tdls_vdev_priv_obj *vdev_obj) +{ + uint16_t i; + struct tdls_peer *peer; + qdf_list_t *head; + qdf_list_node_t *p_node; + QDF_STATUS status; + + if (!vdev_obj) { + tdls_err("NULL tdls vdev object"); + return; + } + + for (i = 0; i < WLAN_TDLS_PEER_LIST_SIZE; i++) { + head = &vdev_obj->peer_list[i]; + + status = qdf_list_peek_front(head, &p_node); + while (QDF_IS_STATUS_SUCCESS(status)) { + peer = qdf_container_of(p_node, struct tdls_peer, node); + if (peer && peer->is_peer_idle_timer_initialised) { + tdls_debug(QDF_MAC_ADDR_STR + ": destroy idle timer ", + QDF_MAC_ADDR_ARRAY( + peer->peer_mac.bytes)); + qdf_mc_timer_stop(&peer->peer_idle_timer); + qdf_mc_timer_destroy(&peer->peer_idle_timer); + } + status = qdf_list_peek_next(head, p_node, &p_node); + } + } +} + +void tdls_free_peer_list(struct tdls_vdev_priv_obj *vdev_obj) +{ + uint16_t i; + struct tdls_peer *peer; + qdf_list_t *head; + qdf_list_node_t *p_node; + + if (!vdev_obj) { + tdls_err("NULL tdls vdev object"); + return; + } + + for (i = 0; i < WLAN_TDLS_PEER_LIST_SIZE; i++) { + head = &vdev_obj->peer_list[i]; + + while (QDF_IS_STATUS_SUCCESS( + qdf_list_remove_front(head, &p_node))) { + peer = qdf_container_of(p_node, struct tdls_peer, node); + qdf_mem_free(peer); + } + qdf_list_destroy(head); + } +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/tdls/core/src/wlan_tdls_peer.h b/drivers/staging/qca-wifi-host-cmn/umac/tdls/core/src/wlan_tdls_peer.h new file mode 100644 index 0000000000000000000000000000000000000000..41ffa8096f54598ebd11de42e09853afe48976f7 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/tdls/core/src/wlan_tdls_peer.h @@ -0,0 +1,246 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_tdls_peer.h + * + * TDLS peer function declaration + */ + +#if !defined(_WLAN_TDLS_PEER_H_) +#define _WLAN_TDLS_PEER_H_ + +/** + * struct tdls_search_peer_param - used to search TDLS peer + * @macaddr: MAC address of peer + * @peer: pointer to the found peer + */ +struct tdls_search_peer_param { + const uint8_t *macaddr; + struct tdls_peer *peer; +}; + +/** + * struct tdls_progress_param - used to search progress TDLS peer + * @skip_self: skip self peer + * @macaddr: MAC address of peer + * @peer: pointer to the found peer + */ +struct tdls_search_progress_param { + uint8_t skip_self; + const uint8_t *macaddr; + struct tdls_peer *peer; +}; + +/** + * tdls_get_peer() - find or add an TDLS peer in TDLS vdev object + * @vdev_obj: TDLS vdev object + * @macaddr: MAC address of peer + * + * Search the TDLS peer in the hash table and create a new one if not found. + * + * Return: Pointer to tdls_peer, NULL if failed. + */ +struct tdls_peer *tdls_get_peer(struct tdls_vdev_priv_obj *vdev_obj, + const uint8_t *macaddr); + +/** + * tdls_find_peer() - find TDLS peer in TDLS vdev object + * @vdev_obj: TDLS vdev object + * @macaddr: MAC address of peer + * + * This is in scheduler thread context, no lock required. + * + * Return: If peer is found, then it returns pointer to tdls_peer; + * otherwise, it returns NULL. + */ +struct tdls_peer *tdls_find_peer(struct tdls_vdev_priv_obj *vdev_obj, + const uint8_t *macaddr); + +/** + * tdls_find_all_peer() - find peer matching the input MACaddr in soc range + * @soc_obj: TDLS soc object + * @macaddr: MAC address of TDLS peer + * + * This is in scheduler thread context, no lock required. + * + * Return: TDLS peer if a matching is detected; NULL otherwise + */ +struct tdls_peer * +tdls_find_all_peer(struct tdls_soc_priv_obj *soc_obj, const uint8_t *macaddr); + +/** + * tdls_find_all_peer() - find peer matching the input MACaddr in soc range + * @soc_obj: TDLS soc object + * @channel:channel number + * @bw_offset: offset to bandwidth + * + * This is in scheduler thread context, no lock required. + * + * Return: Operating class + */ +uint8_t tdls_find_opclass(struct wlan_objmgr_psoc *psoc, + uint8_t channel, + uint8_t bw_offset); + +/** + * tdls_find_first_connected_peer() - find the 1st connected tdls peer from vdev + * @vdev_obj: tdls vdev object + * + * This function searches for the 1st connected TDLS peer + * + * Return: The 1st connected TDLS peer if found; NULL otherwise + */ +struct tdls_peer * +tdls_find_first_connected_peer(struct tdls_vdev_priv_obj *vdev_obj); + +/** + * tdls_is_progress() - find the peer with ongoing TDLS progress on present psoc + * @vdev_obj: TDLS vdev object + * @macaddr: MAC address of the peer + * @skip_self: if 1, skip checking self. If 0, search include self + * + * This is used in scheduler thread context, no lock required. + * + * Return: TDLS peer if found; NULL otherwise + */ +struct tdls_peer *tdls_is_progress(struct tdls_vdev_priv_obj *vdev_obj, + const uint8_t *macaddr, uint8_t skip_self); + +/** + * tdls_extract_peer_state_param() - extract peer update params from TDL peer + * @peer_param: output peer update params + * @peer: TDLS peer + * + * This is used when enable TDLS link + * + * Return: None. + */ +void tdls_extract_peer_state_param(struct tdls_peer_update_state *peer_param, + struct tdls_peer *peer); + +/** + * tdls_set_link_status() - set link statue for TDLS peer + * @peer: TDLS peer + * @link_state: link state + * @link_reason: reason with link status + * + * This is in scheduler thread context, no lock required. + * + * Return: None. + */ +void tdls_set_peer_link_status(struct tdls_peer *peer, + enum tdls_link_state link_state, + enum tdls_link_state_reason link_reason); + +/** + * tdls_set_peer_caps() - set capability for TDLS peer + * @vdev_obj: TDLS vdev object + * @macaddr: MAC address for the TDLS peer + * @req_info: parameters to update peer capability + * + * This is in scheduler thread context, no lock required. + * + * Return: None. + */ +void tdls_set_peer_caps(struct tdls_vdev_priv_obj *vdev_obj, + const uint8_t *macaddr, + struct tdls_update_peer_params *req_info); + +/** + * tdls_set_sta_id() - set station ID on a TDLS peer + * @vdev_obj: TDLS vdev object + * @macaddr: MAC address of the TDLS peer + * @sta_id: station ID + * + * Return: QDF_STATUS_SUCCESS if success; other values if failed + */ +QDF_STATUS tdls_set_sta_id(struct tdls_vdev_priv_obj *vdev_obj, + const uint8_t *macaddr, uint8_t sta_id); + +/** + * tdls_set_force_peer() - set/clear is_forced_peer flag on peer + * @vdev_obj: TDLS vdev object + * @macaddr: MAC address of TDLS peer + * @forcepeer: value used to set is_forced_peer flag + * + * This is used in scheduler thread context, no lock required. + * + * Return: QDF_STATUS_SUCCESS if success; other values if failed + */ +QDF_STATUS tdls_set_force_peer(struct tdls_vdev_priv_obj *vdev_obj, + const uint8_t *macaddr, bool forcepeer); + +/** + * tdls_set_callback() - set state change callback on current TDLS peer + * @peer: TDLS peer + * @callback: state change callback + * + * This is used in scheduler thread context, no lock required. + * + * Return: QDF_STATUS_SUCCESS if success; other values if failed + */ +QDF_STATUS tdls_set_callback(struct tdls_peer *peer, + tdls_state_change_callback callback); + +/** + * tdls_set_extctrl_param() - set external control parameter on TDLS peer + * @peer: TDLS peer + * @chan: channel + * @max_latency: maximum latency + * @op_class: operation class + * @min_bandwidth: minimal bandwidth + * + * This is used in scheduler thread context, no lock required. + * + * Return: QDF_STATUS_SUCCESS if success; other values if failed + */ +QDF_STATUS tdls_set_extctrl_param(struct tdls_peer *peer, uint32_t chan, + uint32_t max_latency, uint32_t op_class, + uint32_t min_bandwidth); + +/** + * tdls_reset_peer() - reset TDLS peer identified by MAC address + * @vdev_obj: TDLS vdev object + * @mac: MAC address of the peer + * + * Return: QDF_STATUS_SUCCESS if success; other values if failed + */ +QDF_STATUS tdls_reset_peer(struct tdls_vdev_priv_obj *vdev_obj, + const uint8_t *mac); + +/** + * tdls_peer_idle_timers_destroy() - destroy peer idle timers + * @vdev_obj: TDLS vdev object + * + * Loop through the idle peer list and destroy their timers + * + * Return: None + */ +void tdls_peer_idle_timers_destroy(struct tdls_vdev_priv_obj *vdev_obj); + +/** + * tdls_free_peer_list() - free TDLS peer list + * @vdev_obj: TDLS vdev object + * + * Free all the tdls peers + * + * Return: None + */ +void tdls_free_peer_list(struct tdls_vdev_priv_obj *vdev_obj); +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/tdls/core/src/wlan_tdls_txrx.c b/drivers/staging/qca-wifi-host-cmn/umac/tdls/core/src/wlan_tdls_txrx.c new file mode 100644 index 0000000000000000000000000000000000000000..6ded57c974f8b268666fcf6abfbc9aa457afa549 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/tdls/core/src/wlan_tdls_txrx.c @@ -0,0 +1,23 @@ +/* + * Copyright (c) 2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_tdls_txrx.c + * + * TDLS txrx function definitions + */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/tdls/core/src/wlan_tdls_txrx.h b/drivers/staging/qca-wifi-host-cmn/umac/tdls/core/src/wlan_tdls_txrx.h new file mode 100644 index 0000000000000000000000000000000000000000..b001ffe745992a340bc03d431fbbb3dce69a546c --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/tdls/core/src/wlan_tdls_txrx.h @@ -0,0 +1,23 @@ +/* + * Copyright (c) 2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_tdls_txrx.h + * + * TDLS txrx api declaration + */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/tdls/dispatcher/inc/wlan_tdls_public_structs.h b/drivers/staging/qca-wifi-host-cmn/umac/tdls/dispatcher/inc/wlan_tdls_public_structs.h new file mode 100644 index 0000000000000000000000000000000000000000..2dc80875c1f56ab114ccad50dc950ba26013b6aa --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/tdls/dispatcher/inc/wlan_tdls_public_structs.h @@ -0,0 +1,1164 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_tdls_public_structs.h + * + * TDLS public structure definations + */ + +#ifndef _WLAN_TDLS_STRUCTS_H_ +#define _WLAN_TDLS_STRUCTS_H_ +#include +#include +#include +#include +#include + + +#define WLAN_TDLS_STA_MAX_NUM 8 +#define WLAN_TDLS_STA_P_UAPSD_OFFCHAN_MAX_NUM 1 +#define WLAN_TDLS_PEER_LIST_SIZE 16 +#define WLAN_TDLS_CT_TABLE_SIZE 8 +#define WLAN_TDLS_PEER_SUB_LIST_SIZE 10 +#define WLAN_MAC_MAX_EXTN_CAP 8 +#define WLAN_MAC_MAX_SUPP_CHANNELS 100 +#define WLAN_MAC_WMI_MAX_SUPP_CHANNELS 128 +#define WLAN_MAX_SUPP_OPER_CLASSES 32 +#define WLAN_MAC_MAX_SUPP_RATES 32 +#define WLAN_CHANNEL_14 14 +#define ENABLE_CHANSWITCH 1 +#define DISABLE_CHANSWITCH 2 +#define WLAN_TDLS_PREFERRED_OFF_CHANNEL_NUM_MIN 1 +#define WLAN_TDLS_PREFERRED_OFF_CHANNEL_NUM_MAX 165 +#define WLAN_TDLS_PREFERRED_OFF_CHANNEL_NUM_DEF 36 + +#define AC_PRIORITY_NUM 4 + +/* default tdls serialize timeout is set to 10 secs */ +#define TDLS_DEFAULT_SERIALIZE_CMD_TIMEOUT 10000 + +/** Maximum time(ms) to wait for tdls add sta to complete **/ +#define WAIT_TIME_TDLS_ADD_STA (TDLS_DEFAULT_SERIALIZE_CMD_TIMEOUT + 1000) + +/** Maximum time(ms) to wait for tdls del sta to complete **/ +#define WAIT_TIME_TDLS_DEL_STA (TDLS_DEFAULT_SERIALIZE_CMD_TIMEOUT + 1000) + +/** Maximum time(ms) to wait for Link Establish Req to complete **/ +#define WAIT_TIME_TDLS_LINK_ESTABLISH_REQ 1500 + +/** Maximum time(ms) to wait for tdls mgmt to complete **/ +#define WAIT_TIME_FOR_TDLS_MGMT 11000 + +/** Maximum time(ms) to wait for tdls mgmt to complete **/ +#define WAIT_TIME_FOR_TDLS_USER_CMD 11000 + +/** Maximum waittime for TDLS teardown links **/ +#define WAIT_TIME_FOR_TDLS_TEARDOWN_LINKS 10000 + +/** Maximum waittime for TDLS antenna switch **/ +#define WAIT_TIME_FOR_TDLS_ANTENNA_SWITCH 1000 + +#define TDLS_TEARDOWN_PEER_UNREACHABLE 25 +#define TDLS_TEARDOWN_PEER_UNSPEC_REASON 26 + +#define INVALID_TDLS_PEER_ID 0xFF +#define INVALID_TDLS_PEER_INDEX 0xFF + +#define TDLS_STA_INDEX_CHECK(sta_id) \ + (((sta_id) >= 0) && ((sta_id) < 0xFF)) +/** + * enum tdls_conc_cap - tdls concurrency support + * @TDLS_SUPPORTED_ONLY_ON_STA: only support sta tdls + * @TDLS_SUPPORTED_ONLY_ON_P2P_CLIENT: only support p2p client tdls + */ +enum tdls_conc_cap { + TDLS_SUPPORTED_ONLY_ON_STA = 0, + TDLS_SUPPORTED_ONLY_ON_P2P_CLIENT, +}; + +/** + * enum tdls_peer_capab - tdls capability type + * @TDLS_CAP_NOT_SUPPORTED: tdls not supported + * @TDLS_CAP_UNKNOWN: unknown capability + * @TDLS_CAP_SUPPORTED: tdls capability supported + */ +enum tdls_peer_capab { + TDLS_CAP_NOT_SUPPORTED = -1, + TDLS_CAP_UNKNOWN = 0, + TDLS_CAP_SUPPORTED = 1, +}; + +/** + * enum tdls_peer_state - tdls peer state + * @TDLS_PEER_STATE_PEERING: tdls connection in progress + * @TDLS_PEER_STATE_CONNCTED: tdls peer is connected + * @TDLS_PEER_STATE_TEARDOWN: tdls peer is tear down + * @TDLS_PEER_ADD_MAC_ADDR: add peer mac into connection table + * @TDLS_PEER_REMOVE_MAC_ADDR: remove peer mac from connection table + */ +enum tdls_peer_state { + TDLS_PEER_STATE_PEERING, + TDLS_PEER_STATE_CONNCTED, + TDLS_PEER_STATE_TEARDOWN, + TDLS_PEER_ADD_MAC_ADDR, + TDLS_PEER_REMOVE_MAC_ADDR +}; + +/** + * enum tdls_link_state - tdls link state + * @TDLS_LINK_IDLE: tdls link idle + * @TDLS_LINK_DISCOVERING: tdls link discovering + * @TDLS_LINK_DISCOVERED: tdls link discovered + * @TDLS_LINK_CONNECTING: tdls link connecting + * @TDLS_LINK_CONNECTED: tdls link connected + * @TDLS_LINK_TEARING: tdls link tearing + */ +enum tdls_link_state { + TDLS_LINK_IDLE = 0, + TDLS_LINK_DISCOVERING, + TDLS_LINK_DISCOVERED, + TDLS_LINK_CONNECTING, + TDLS_LINK_CONNECTED, + TDLS_LINK_TEARING, +}; + +/** + * enum tdls_link_state_reason - tdls link reason + * @TDLS_LINK_SUCCESS: Success + * @TDLS_LINK_UNSPECIFIED: Unspecified reason + * @TDLS_LINK_NOT_SUPPORTED: Remote side doesn't support TDLS + * @TDLS_LINK_UNSUPPORTED_BAND: Remote side doesn't support this band + * @TDLS_LINK_NOT_BENEFICIAL: Going to AP is better than direct + * @TDLS_LINK_DROPPED_BY_REMOTE: Remote side doesn't want it anymore + */ +enum tdls_link_state_reason { + TDLS_LINK_SUCCESS, + TDLS_LINK_UNSPECIFIED = -1, + TDLS_LINK_NOT_SUPPORTED = -2, + TDLS_LINK_UNSUPPORTED_BAND = -3, + TDLS_LINK_NOT_BENEFICIAL = -4, + TDLS_LINK_DROPPED_BY_REMOTE = -5, +}; + +/** + * enum tdls_feature_mode - TDLS support mode + * @TDLS_SUPPORT_DISABLED: Disabled in ini or FW + * @TDLS_SUPPORT_SUSPENDED: TDLS supported by ini and FW, but disabled + * temporarily due to off-channel operations or due to other reasons + * @TDLS_SUPPORT_EXP_TRIG_ONLY: Explicit trigger mode + * @TDLS_SUPPORT_IMP_MODE: Implicit mode + * @TDLS_SUPPORT_EXT_CONTROL: External control mode + */ +enum tdls_feature_mode { + TDLS_SUPPORT_DISABLED = 0, + TDLS_SUPPORT_SUSPENDED, + TDLS_SUPPORT_EXP_TRIG_ONLY, + TDLS_SUPPORT_IMP_MODE, + TDLS_SUPPORT_EXT_CONTROL, +}; + +/** + * enum tdls_command_type - TDLS command type + * @TDLS_CMD_TX_ACTION: send tdls action frame + * @TDLS_CMD_ADD_STA: add tdls peer + * @TDLS_CMD_CHANGE_STA: change tdls peer + * @TDLS_CMD_ENABLE_LINK: enable tdls link + * @TDLS_CMD_DISABLE_LINK: disable tdls link + * @TDLS_CMD_CONFIG_FORCE_PEER: config external peer + * @TDLS_CMD_REMOVE_FORCE_PEER: remove external peer + * @TDLS_CMD_STATS_UPDATE: update tdls stats + * @TDLS_CMD_CONFIG_UPDATE: config tdls + * @TDLS_CMD_SCAN_DONE: scon done event + * @TDLS_CMD_SET_RESPONDER: responder event + * @TDLS_NOTIFY_STA_CONNECTION: notify sta connection + * @TDLS_NOTIFY_STA_DISCONNECTION: notify sta disconnection + * @TDLS_CMD_SET_TDLS_MODE: set the tdls mode + * @TDLS_CMD_SESSION_INCREMENT: notify session increment + * @TDLS_CMD_SESSION_DECREMENT: notify session decrement + * @TDLS_CMD_TEARDOWN_LINKS: notify teardown + * @TDLS_NOTIFY_RESET_ADAPTERS: notify adapter reset + * @TDLS_CMD_GET_ALL_PEERS: get all the tdls peers from the list + * @TDLS_CMD_ANTENNA_SWITCH: dynamic tdls antenna switch + * @TDLS_CMD_SET_OFFCHANNEL: tdls offchannel + * @TDLS_CMD_SET_OFFCHANMODE: tdls offchannel mode + * @TDLS_CMD_SET_SECOFFCHANOFFSET: tdls secondary offchannel offset + * @TDLS_DELETE_ALL_PEERS_INDICATION: tdls delete all peers indication + */ +enum tdls_command_type { + TDLS_CMD_TX_ACTION = 1, + TDLS_CMD_ADD_STA, + TDLS_CMD_CHANGE_STA, + TDLS_CMD_ENABLE_LINK, + TDLS_CMD_DISABLE_LINK, + TDLS_CMD_CONFIG_FORCE_PEER, + TDLS_CMD_REMOVE_FORCE_PEER, + TDLS_CMD_STATS_UPDATE, + TDLS_CMD_CONFIG_UPDATE, + TDLS_CMD_SCAN_DONE, + TDLS_CMD_SET_RESPONDER, + TDLS_NOTIFY_STA_CONNECTION, + TDLS_NOTIFY_STA_DISCONNECTION, + TDLS_CMD_SET_TDLS_MODE, + TDLS_CMD_SESSION_INCREMENT, + TDLS_CMD_SESSION_DECREMENT, + TDLS_CMD_TEARDOWN_LINKS, + TDLS_NOTIFY_RESET_ADAPTERS, + TDLS_CMD_GET_ALL_PEERS, + TDLS_CMD_ANTENNA_SWITCH, + TDLS_CMD_SET_OFFCHANNEL, + TDLS_CMD_SET_OFFCHANMODE, + TDLS_CMD_SET_SECOFFCHANOFFSET, + TDLS_DELETE_ALL_PEERS_INDICATION +}; + +/** + * enum tdls_event_type - TDLS event type + * @TDLS_EVENT_VDEV_STATE_CHANGE: umac connect/disconnect event + * @TDLS_EVENT_MGMT_TX_ACK_CNF: tx tdls frame ack event + * @TDLS_EVENT_RX_MGMT: rx discovery response frame + * @TDLS_EVENT_ADD_PEER: add peer or update peer + * @TDLS_EVENT_DEL_PEER: delete peer + * @TDLS_EVENT_DISCOVERY_REQ: dicovery request + * @TDLS_EVENT_TEARDOWN_REQ: teardown request + * @TDLS_EVENT_SETUP_REQ: setup request + * @TDLS_EVENT_TEARDOWN_LINKS_DONE: teardown completion event + * @TDLS_EVENT_USER_CMD: tdls user command + * @TDLS_EVENT_ANTENNA_SWITCH: antenna switch event + */ +enum tdls_event_type { + TDLS_EVENT_VDEV_STATE_CHANGE = 0, + TDLS_EVENT_MGMT_TX_ACK_CNF, + TDLS_EVENT_RX_MGMT, + TDLS_EVENT_ADD_PEER, + TDLS_EVENT_DEL_PEER, + TDLS_EVENT_DISCOVERY_REQ, + TDLS_EVENT_TEARDOWN_REQ, + TDLS_EVENT_SETUP_REQ, + TDLS_EVENT_TEARDOWN_LINKS_DONE, + TDLS_EVENT_USER_CMD, + TDLS_EVENT_ANTENNA_SWITCH, +}; + +/** + * enum tdls_state_t - tdls state + * @QCA_WIFI_HAL_TDLS_DISABLED: TDLS is not enabled, or is disabled now + * @QCA_WIFI_HAL_TDLS_ENABLED: TDLS is enabled, but not yet tried + * @QCA_WIFI_HAL_TDLS_ESTABLISHED: Direct link is established + * @QCA_WIFI_HAL_TDLS_ESTABLISHED_OFF_CHANNEL: Direct link established using MCC + * @QCA_WIFI_HAL_TDLS_DROPPED: Direct link was established, but is now dropped + * @QCA_WIFI_HAL_TDLS_FAILED: Direct link failed + */ +enum tdls_state_t { + QCA_WIFI_HAL_TDLS_S_DISABLED = 1, + QCA_WIFI_HAL_TDLS_S_ENABLED, + QCA_WIFI_HAL_TDLS_S_ESTABLISHED, + QCA_WIFI_HAL_TDLS_S_ESTABLISHED_OFF_CHANNEL, + QCA_WIFI_HAL_TDLS_S_DROPPED, + QCA_WIFI_HAL_TDLS_S_FAILED, +}; + +/** + * enum tdls_off_chan_mode - mode for WMI_TDLS_SET_OFFCHAN_MODE_CMDID + * @TDLS_ENABLE_OFFCHANNEL: enable off channel + * @TDLS_DISABLE_OFFCHANNEL: disable off channel + */ +enum tdls_off_chan_mode { + TDLS_ENABLE_OFFCHANNEL, + TDLS_DISABLE_OFFCHANNEL +}; + +/** + * enum tdls_event_msg_type - TDLS event message type + * @TDLS_SHOULD_DISCOVER: should do discover for peer (based on tx bytes per + * second > tx_discover threshold) + * @TDLS_SHOULD_TEARDOWN: recommend teardown the link for peer due to tx bytes + * per second below tx_teardown_threshold + * @TDLS_PEER_DISCONNECTED: tdls peer disconnected + * @TDLS_CONNECTION_TRACKER_NOTIFY: TDLS/BT role change notification for + * connection tracker + */ +enum tdls_event_msg_type { + TDLS_SHOULD_DISCOVER = 0, + TDLS_SHOULD_TEARDOWN, + TDLS_PEER_DISCONNECTED, + TDLS_CONNECTION_TRACKER_NOTIFY +}; + +/** + * enum tdls_event_reason - TDLS event reason + * @TDLS_TEARDOWN_TX: tdls teardown recommended due to low transmits + * @TDLS_TEARDOWN_RSSI: tdls link tear down recommended due to poor RSSI + * @TDLS_TEARDOWN_SCAN: tdls link tear down recommended due to offchannel scan + * @TDLS_TEARDOWN_PTR_TIMEOUT: tdls peer disconnected due to PTR timeout + * @TDLS_TEARDOWN_BAD_PTR: tdls peer disconnected due wrong PTR format + * @TDLS_TEARDOWN_NO_RSP: tdls peer not responding + * @TDLS_DISCONNECTED_PEER_DELETE: tdls peer disconnected due to peer deletion + * @TDLS_PEER_ENTER_BUF_STA: tdls entered buffer STA role, TDLS connection + * tracker needs to handle this + * @TDLS_PEER_EXIT_BUF_STA: tdls exited buffer STA role, TDLS connection tracker + * needs to handle this + * @TDLS_ENTER_BT_BUSY: BT entered busy mode, TDLS connection tracker needs to + * handle this + * @TDLS_EXIT_BT_BUSY: BT exited busy mode, TDLS connection tracker needs to + * handle this + * @DLS_SCAN_STARTED: TDLS module received a scan start event, TDLS connection + * tracker needs to handle this + * @TDLS_SCAN_COMPLETED: TDLS module received a scan complete event, TDLS + * connection tracker needs to handle this + */ +enum tdls_event_reason { + TDLS_TEARDOWN_TX, + TDLS_TEARDOWN_RSSI, + TDLS_TEARDOWN_SCAN, + TDLS_TEARDOWN_PTR_TIMEOUT, + TDLS_TEARDOWN_BAD_PTR, + TDLS_TEARDOWN_NO_RSP, + TDLS_DISCONNECTED_PEER_DELETE, + TDLS_PEER_ENTER_BUF_STA, + TDLS_PEER_EXIT_BUF_STA, + TDLS_ENTER_BT_BUSY, + TDLS_EXIT_BT_BUSY, + TDLS_SCAN_STARTED, + TDLS_SCAN_COMPLETED, +}; + +/** + * enum tdls_disable_sources - TDLS disable sources + * @TDLS_SET_MODE_SOURCE_USER: disable from user + * @TDLS_SET_MODE_SOURCE_SCAN: disable during scan + * @TDLS_SET_MODE_SOURCE_OFFCHANNEL: disable during offchannel + * @TDLS_SET_MODE_SOURCE_BTC: disable during bluetooth + * @TDLS_SET_MODE_SOURCE_P2P: disable during p2p + */ +enum tdls_disable_sources { + TDLS_SET_MODE_SOURCE_USER = 0, + TDLS_SET_MODE_SOURCE_SCAN, + TDLS_SET_MODE_SOURCE_OFFCHANNEL, + TDLS_SET_MODE_SOURCE_BTC, + TDLS_SET_MODE_SOURCE_P2P, +}; + +/** + * struct tdls_osif_indication - tdls indication to os if layer + * @vdev: vdev object + * @reason: used with teardown indication + * @peer_mac: MAC address of the TDLS peer + */ +struct tdls_osif_indication { + struct wlan_objmgr_vdev *vdev; + uint16_t reason; + uint8_t peer_mac[QDF_MAC_ADDR_SIZE]; + QDF_STATUS status; +}; + +/** + * struct tx_frame - tx frame + * @buf: frame buffer + * @buf_len: buffer length + * @tx_timer: tx send timer + */ +struct tx_frame { + uint8_t *buf; + size_t buf_len; + qdf_timer_t tx_timer; +}; + +/** + * enum tdls_feature_bit + * @TDLS_FEATURE_OFF_CHANNEL: tdls off channel + * @TDLS_FEATURE_WMM: tdls wmm + * @TDLS_FEATURE_BUFFER_STA: tdls buffer sta + * @TDLS_FEATURE_SLEEP_STA: tdls sleep sta feature + * @TDLS_FEATURE_SCAN: tdls scan + * @TDLS_FEATURE_ENABLE: tdls enabled + * @TDLS_FEAUTRE_IMPLICIT_TRIGGER: tdls implicit trigger + * @TDLS_FEATURE_EXTERNAL_CONTROL: tdls external control + */ +enum tdls_feature_bit { + TDLS_FEATURE_OFF_CHANNEL, + TDLS_FEATURE_WMM, + TDLS_FEATURE_BUFFER_STA, + TDLS_FEATURE_SLEEP_STA, + TDLS_FEATURE_SCAN, + TDLS_FEATURE_ENABLE, + TDLS_FEAUTRE_IMPLICIT_TRIGGER, + TDLS_FEATURE_EXTERNAL_CONTROL +}; + +#define TDLS_IS_OFF_CHANNEL_ENABLED(flags) \ + CHECK_BIT(flags, TDLS_FEATURE_OFF_CHANNEL) +#define TDLS_IS_WMM_ENABLED(flags) \ + CHECK_BIT(flags, TDLS_FEATURE_WMM) +#define TDLS_IS_BUFFER_STA_ENABLED(flags) \ + CHECK_BIT(flags, TDLS_FEATURE_BUFFER_STA) +#define TDLS_IS_SLEEP_STA_ENABLED(flags) \ + CHECK_BIT(flags, TDLS_FEATURE_SLEEP_STA) +#define TDLS_IS_SCAN_ENABLED(flags) \ + CHECK_BIT(flags, TDLS_FEATURE_SCAN) +#define TDLS_IS_ENABLED(flags) \ + CHECK_BIT(flags, TDLS_FEATURE_ENABLE) +#define TDLS_IS_IMPLICIT_TRIG_ENABLED(flags) \ + CHECK_BIT(flags, TDLS_FEAUTRE_IMPLICIT_TRIGGER) +#define TDLS_IS_EXTERNAL_CONTROL_ENABLED(flags) \ + CHECK_BIT(flags, TDLS_FEATURE_EXTERNAL_CONTROL) + +/** + * struct tdls_user_config - TDLS user configuration + * @tdls_tx_states_period: tdls tx states period + * @tdls_tx_pkt_threshold: tdls tx packets threshold + * @tdls_rx_pkt_threshold: tdls rx packets threshold + * @tdls_max_discovery_attempt: tdls discovery max times + * @tdls_idle_timeout: tdls idle timeout + * @tdls_idle_pkt_threshold: tdls idle packets threshold + * @tdls_rssi_trigger_threshold: tdls rssi trigger threshold + * @tdls_rssi_teardown_threshold: tdls rssi tear down threshold + * @tdls_rssi_delta: tdls rssi delta + * @tdls_uapsd_mask: tdls uapsd mask + * @tdls_uapsd_inactivity_time: tdls uapsd inactivity time + * @tdls_uapsd_pti_window: tdls peer traffic indication window + * @tdls_uapsd_ptr_timeout: tdls peer response timeout + * @tdls_feature_flags: tdls feature flags + * @tdls_pre_off_chan_num: tdls off channel number + * @tdls_pre_off_chan_bw: tdls off channel bandwidth + * @tdls_peer_kickout_threshold: sta kickout threshold for tdls peer + * @delayed_trig_framint: delayed trigger frame interval + * @tdls_vdev_nss_2g: tdls NSS setting for 2G band + * @tdls_vdev_nss_5g: tdls NSS setting for 5G band + */ +struct tdls_user_config { + uint32_t tdls_tx_states_period; + uint32_t tdls_tx_pkt_threshold; + uint32_t tdls_rx_pkt_threshold; + uint32_t tdls_max_discovery_attempt; + uint32_t tdls_idle_timeout; + uint32_t tdls_idle_pkt_threshold; + uint32_t tdls_rssi_trigger_threshold; + uint32_t tdls_rssi_teardown_threshold; + uint32_t tdls_rssi_delta; + uint32_t tdls_uapsd_mask; + uint32_t tdls_uapsd_inactivity_time; + uint32_t tdls_uapsd_pti_window; + uint32_t tdls_uapsd_ptr_timeout; + uint32_t tdls_feature_flags; + uint32_t tdls_pre_off_chan_num; + uint32_t tdls_pre_off_chan_bw; + uint32_t tdls_peer_kickout_threshold; + uint32_t delayed_trig_framint; + uint8_t tdls_vdev_nss_2g; + uint8_t tdls_vdev_nss_5g; +}; + +/** + * struct tdls_config_params - tdls configure paramets + * @tdls: tdls support mode + * @tx_period_t: tdls tx stats period + * @tx_packet_n: tdls tx packets number threshold + * @discovery_tries_n: tdls max discovery attempt count + * @idle_timeout_t: tdls idle time timeout + * @idle_packet_n: tdls idle pkt threshold + * @rssi_trigger_threshold: tdls rssi trigger threshold, checked before setup + * @rssi_teardown_threshold: tdls rssi teardown threshold + * @rssi_delta: rssi delta + */ +struct tdls_config_params { + uint32_t tdls; + uint32_t tx_period_t; + uint32_t tx_packet_n; + uint32_t discovery_tries_n; + uint32_t idle_timeout_t; + uint32_t idle_packet_n; + int32_t rssi_trigger_threshold; + int32_t rssi_teardown_threshold; + int32_t rssi_delta; +}; + +/** + * struct tdls_tx_cnf: tdls tx ack + * @vdev_id: vdev id + * @action_cookie: frame cookie + * @buf: frame buf + * @buf_len: buffer length + * @status: tx send status + */ +struct tdls_tx_cnf { + int vdev_id; + uint64_t action_cookie; + void *buf; + size_t buf_len; + int status; +}; + +/** + * struct tdls_rx_mgmt_frame - rx mgmt frame structure + * @frame_len: frame length + * @rx_chan: rx channel + * @vdev_id: vdev id + * @frm_type: frame type + * @rx_rssi: rx rssi + * @buf: buffer address + */ +struct tdls_rx_mgmt_frame { + uint32_t frame_len; + uint32_t rx_chan; + uint32_t vdev_id; + uint32_t frm_type; + uint32_t rx_rssi; + uint8_t buf[1]; +}; + +/** + * tdls_rx_callback() - Callback for rx mgmt frame + * @user_data: user data associated to this rx mgmt frame. + * @rx_frame: RX mgmt frame + * + * This callback will be used to give rx frames to hdd. + * + * Return: None + */ +typedef void (*tdls_rx_callback)(void *user_data, + struct tdls_rx_mgmt_frame *rx_frame); + +/** + * tdls_wmm_check() - Callback for wmm info + * @psoc: psoc object + * + * This callback will be used to check wmm information + * + * Return: true or false + */ +typedef bool (*tdls_wmm_check)(uint8_t vdev_id); + + +/* This callback is used to report state change of peer to wpa_supplicant */ +typedef int (*tdls_state_change_callback)(const uint8_t *mac, + uint32_t opclass, + uint32_t channel, + uint32_t state, + int32_t reason, void *ctx); + +/* This callback is used to report events to os_if layer */ +typedef void (*tdls_evt_callback) (void *data, + enum tdls_event_type ev_type, + struct tdls_osif_indication *event); + +/* This callback is used to register TDLS peer with the datapath */ +typedef QDF_STATUS (*tdls_register_peer_callback)(void *userdata, + uint32_t vdev_id, + const uint8_t *mac, + uint16_t stat_id, + uint8_t qos); + +/* This callback is used to deregister TDLS peer from the datapath */ +typedef QDF_STATUS (*tdls_deregister_peer_callback)(void *userdata, + uint32_t vdev_id, + uint8_t sta_id); + +/* This callback is used to update datapath vdev flags */ +typedef QDF_STATUS +(*tdls_dp_vdev_update_flags_callback)(void *cbk_data, + uint8_t sta_id, + uint32_t vdev_param, + bool is_link_up); + +/* This callback is to release vdev ref for tdls offchan param related msg */ +typedef void (*tdls_offchan_parms_callback)(struct wlan_objmgr_vdev *vdev); + +/* This callback is to release vdev ref for tdls_delete_all_peers_ + * callback related msg. + */ +typedef void (*tdls_delete_all_peers_callback)(struct wlan_objmgr_vdev *vdev); + +/** + * tdls_vdev_init_cb() - Callback for initializing the tdls private structure + * @vdev: vdev object + * + * This callback will be used to create the vdev private object and store + * in os_priv. + * + * Return: QDF_STATUS + */ +typedef QDF_STATUS (*tdls_vdev_init_cb)(struct wlan_objmgr_vdev *vdev); + +/** + * tdls_vdev_deinit_cb() - Callback for deinitializing the tdls private struct + * @vdev: vdev object + * + * This callback will be used to destroy the vdev private object. + * + * Return: None + */ +typedef void (*tdls_vdev_deinit_cb)(struct wlan_objmgr_vdev *vdev); + +/** + * struct tdls_start_params - tdls start params + * @config: tdls user config + * @tdls_send_mgmt_req: pass eWNI_SME_TDLS_SEND_MGMT_REQ value + * @tdls_add_sta_req: pass eWNI_SME_TDLS_ADD_STA_REQ value + * @tdls_del_sta_req: pass eWNI_SME_TDLS_DEL_STA_REQ value + * @tdls_update_peer_state: pass WMA_UPDATE_TDLS_PEER_STATE value + * @tdls_del_all_peers: pass eWNI_SME_DEL_ALL_TDLS_PEERS + * @tdls_update_dp_vdev_flags: pass CDP_UPDATE_TDLS_FLAGS + * @tdls_event_cb: tdls event callback + * @tdls_evt_cb_data: tdls event data + * @tdls_peer_context: userdata for register/deregister TDLS peer + * @tdls_reg_peer: register tdls peer with datapath + * @tdls_dereg_peer: deregister tdls peer from datapath + * @tdls_dp_vdev_update: update vdev flags in datapath + * @tdls_osif_init_cb: callback to initialize the tdls priv + * @tdls_osif_deinit_cb: callback to deinitialize the tdls priv + */ +struct tdls_start_params { + struct tdls_user_config config; + uint16_t tdls_send_mgmt_req; + uint16_t tdls_add_sta_req; + uint16_t tdls_del_sta_req; + uint16_t tdls_update_peer_state; + uint16_t tdls_del_all_peers; + uint32_t tdls_update_dp_vdev_flags; + tdls_rx_callback tdls_rx_cb; + void *tdls_rx_cb_data; + tdls_wmm_check tdls_wmm_cb; + void *tdls_wmm_cb_data; + tdls_evt_callback tdls_event_cb; + void *tdls_evt_cb_data; + void *tdls_peer_context; + tdls_register_peer_callback tdls_reg_peer; + tdls_deregister_peer_callback tdls_dereg_peer; + tdls_dp_vdev_update_flags_callback tdls_dp_vdev_update; + tdls_vdev_init_cb tdls_osif_init_cb; + tdls_vdev_deinit_cb tdls_osif_deinit_cb; +}; + +/** + * struct tdls_add_peer_params - add peer request parameter + * @peer_addr: peer mac addr + * @peer_type: peer type + * @vdev_id: vdev id + */ +struct tdls_add_peer_params { + uint8_t peer_addr[QDF_MAC_ADDR_SIZE]; + uint32_t peer_type; + uint32_t vdev_id; +}; + +/** + * struct tdls_add_peer_request - peer add request + * @vdev: vdev + * @add_peer_req: add peer request parameters + */ +struct tdls_add_peer_request { + struct wlan_objmgr_vdev *vdev; + struct tdls_add_peer_params add_peer_req; +}; + +/** + * struct tdls_del_peer_params - delete peer request parameter + * @peer_addr: peer mac addr + * @peer_type: peer type + * @vdev_id: vdev id + */ +struct tdls_del_peer_params { + const uint8_t *peer_addr; + uint32_t peer_type; + uint32_t vdev_id; +}; + +/** + * struct tdls_del_peer_request - peer delete request + * @vdev: vdev + * @del_peer_req: delete peer request parameters + */ +struct tdls_del_peer_request { + struct wlan_objmgr_vdev *vdev; + struct tdls_del_peer_params del_peer_req; +}; + +/** + * struct vhgmcsinfo - VHT MCS information + * @rx_mcs_map: RX MCS map 2 bits for each stream, total 8 streams + * @rx_highest: Indicates highest long GI VHT PPDU data rate + * STA can receive. Rate expressed in units of 1 Mbps. + * If this field is 0 this value should not be used to + * consider the highest RX data rate supported. + * @tx_mcs_map: TX MCS map 2 bits for each stream, total 8 streams + * @tx_highest: Indicates highest long GI VHT PPDU data rate + * STA can transmit. Rate expressed in units of 1 Mbps. + * If this field is 0 this value should not be used to + * consider the highest TX data rate supported. + */ +struct vhtmcsinfo { + uint16_t rx_mcs_map; + uint16_t rx_highest; + uint16_t tx_mcs_map; + uint16_t tx_highest; +}; + +/** + * struct vhtcap - VHT capabilities + * + * This structure is the "VHT capabilities element" as + * described in 802.11ac D3.0 8.4.2.160 + * @vht_cap_info: VHT capability info + * @supp_mcs: VHT MCS supported rates + */ +struct vhtcap { + uint32_t vht_capinfo; + struct vhtmcsinfo supp_mcs; +}; + +struct tdls_update_peer_params { + uint8_t peer_addr[QDF_MAC_ADDR_SIZE]; + uint32_t peer_type; + uint32_t vdev_id; + uint16_t capability; + uint8_t extn_capability[WLAN_MAC_MAX_EXTN_CAP]; + uint8_t supported_rates_len; + uint8_t supported_rates[WLAN_MAC_MAX_SUPP_RATES]; + uint8_t htcap_present; + struct htcap_cmn_ie ht_cap; + uint8_t vhtcap_present; + struct vhtcap vht_cap; + uint8_t uapsd_queues; + uint8_t max_sp; + uint8_t supported_channels_len; + uint8_t supported_channels[WLAN_MAC_MAX_SUPP_CHANNELS]; + uint8_t supported_oper_classes_len; + uint8_t supported_oper_classes[WLAN_MAX_SUPP_OPER_CLASSES]; + bool is_qos_wmm_sta; +}; + +struct tdls_update_peer_request { + struct wlan_objmgr_vdev *vdev; + struct tdls_update_peer_params update_peer_req; +}; + +/** + * struct tdls_oper_request - tdls operation request + * @vdev: vdev object + * @peer_addr: MAC address of the TDLS peer + */ +struct tdls_oper_request { + struct wlan_objmgr_vdev *vdev; + uint8_t peer_addr[QDF_MAC_ADDR_SIZE]; +}; + +/** + * struct tdls_oper_config_force_peer_request - tdls enable force peer request + * @vdev: vdev object + * @peer_addr: MAC address of the TDLS peer + * @chan: channel + * @max_latency: maximum latency + * @op_class: operation class + * @min_bandwidth: minimal bandwidth + * @callback: state change callback + */ +struct tdls_oper_config_force_peer_request { + struct wlan_objmgr_vdev *vdev; + uint8_t peer_addr[QDF_MAC_ADDR_SIZE]; + uint32_t chan; + uint32_t max_latency; + uint32_t op_class; + uint32_t min_bandwidth; + tdls_state_change_callback callback; +}; + +/** + * struct tdls_info - tdls info + * + * @vdev_id: vdev id + * @tdls_state: tdls state + * @notification_interval_ms: notification interval in ms + * @tx_discovery_threshold: tx discovery threshold + * @tx_teardown_threshold: tx teardown threshold + * @rssi_teardown_threshold: rx teardown threshold + * @rssi_delta: rssi delta + * @tdls_options: tdls options + * @peer_traffic_ind_window: peer traffic indication window + * @peer_traffic_response_timeout: peer traffic response timeout + * @puapsd_mask: puapsd mask + * @puapsd_inactivity_time: puapsd inactivity time + * @puapsd_rx_frame_threshold: puapsd rx frame threshold + * @teardown_notification_ms: tdls teardown notification interval + * @tdls_peer_kickout_threshold: tdls packets threshold + * for peer kickout operation + */ +struct tdls_info { + uint32_t vdev_id; + uint32_t tdls_state; + uint32_t notification_interval_ms; + uint32_t tx_discovery_threshold; + uint32_t tx_teardown_threshold; + int32_t rssi_teardown_threshold; + int32_t rssi_delta; + uint32_t tdls_options; + uint32_t peer_traffic_ind_window; + uint32_t peer_traffic_response_timeout; + uint32_t puapsd_mask; + uint32_t puapsd_inactivity_time; + uint32_t puapsd_rx_frame_threshold; + uint32_t teardown_notification_ms; + uint32_t tdls_peer_kickout_threshold; +}; + +/** + * struct tdls_ch_params - channel parameters + * @chan_id: ID of the channel + * @pwr: power level + * @dfs_set: is dfs supported or not + * @half_rate: is the channel operating at 10MHz + * @quarter_rate: is the channel operating at 5MHz + */ +struct tdls_ch_params { + uint8_t chan_id; + uint8_t pwr; + bool dfs_set; + bool half_rate; + bool quarter_rate; +}; + +/** + * struct tdls_peer_params - TDLS peer capablities parameters + * @is_peer_responder: is peer responder or not + * @peer_uapsd_queue: peer uapsd queue + * @peer_max_sp: peer max SP value + * @peer_buff_sta_support: peer buffer sta supported or not + * @peer_off_chan_support: peer offchannel support + * @peer_curr_operclass: peer current operating class + * @self_curr_operclass: self current operating class + * @peer_chanlen: peer channel length + * @peer_chan: peer channel list + * @peer_oper_classlen: peer operating class length + * @peer_oper_class: peer operating class + * @pref_off_channum: peer offchannel number + * @pref_off_chan_bandwidth: peer offchannel bandwidth + * @opclass_for_prefoffchan: operating class for offchannel + */ +struct tdls_peer_params { + uint8_t is_peer_responder; + uint8_t peer_uapsd_queue; + uint8_t peer_max_sp; + uint8_t peer_buff_sta_support; + uint8_t peer_off_chan_support; + uint8_t peer_curr_operclass; + uint8_t self_curr_operclass; + uint8_t peer_chanlen; + struct tdls_ch_params peer_chan[WLAN_MAC_WMI_MAX_SUPP_CHANNELS]; + uint8_t peer_oper_classlen; + uint8_t peer_oper_class[WLAN_MAX_SUPP_OPER_CLASSES]; + uint8_t pref_off_channum; + uint8_t pref_off_chan_bandwidth; + uint8_t opclass_for_prefoffchan; +}; + +/** + * struct tdls_peer_update_state - TDLS peer state parameters + * @vdev_id: vdev id + * @peer_macaddr: peer mac address + * @peer_cap: peer capabality + * @resp_reqd: response needed + */ +struct tdls_peer_update_state { + uint32_t vdev_id; + uint8_t peer_macaddr[QDF_MAC_ADDR_SIZE]; + uint32_t peer_state; + struct tdls_peer_params peer_cap; + bool resp_reqd; +}; + +/** + * struct tdls_channel_switch_params - channel switch parameter structure + * @vdev_id: vdev ID + * @peer_mac_addr: Peer mac address + * @tdls_off_ch_bw_offset: Target off-channel bandwitdh offset + * @tdls_off_ch: Target Off Channel + * @oper_class: Operating class for target channel + * @is_responder: Responder or initiator + */ +struct tdls_channel_switch_params { + uint32_t vdev_id; + uint8_t peer_mac_addr[QDF_MAC_ADDR_SIZE]; + uint16_t tdls_off_ch_bw_offset; + uint8_t tdls_off_ch; + uint8_t tdls_sw_mode; + uint8_t oper_class; + uint8_t is_responder; +}; + +/** + * enum uapsd_access_cat - U-APSD Access Categories + * @UAPSD_AC_BE: best effort + * @UAPSD_AC_BK: back ground + * @UAPSD_AC_VI: video + * @UAPSD_AC_VO: voice + */ +enum uapsd_access_cat { + UAPSD_AC_BE, + UAPSD_AC_BK, + UAPSD_AC_VI, + UAPSD_AC_VO +}; + +/** + * enum tspec_dir_type - TSPEC Direction type + * @TX_DIR: uplink + * @RX_DIR: downlink + * @BI_DIR: bidirectional + */ +enum tspec_dir_type { + TX_DIR = 0, + RX_DIR = 1, + BI_DIR = 2, +}; + +/** + * struct sta_uapsd_params - uapsd auto trig params + * @wmm_ac: WMM access category from 0 to 3 + * @user_priority: User priority to use in trigger frames + * @service_interval: service interval + * @suspend_interval: suspend interval + * @delay_interval: delay interval + */ +struct sta_uapsd_params { + uint32_t wmm_ac; + uint32_t user_priority; + uint32_t service_interval; + uint32_t suspend_interval; + uint32_t delay_interval; +}; + +/** + * struct sta_uapsd_trig_params - uapsd trigger parameter + * @vdevid: vdev id + * @peer_addr: peer address + * @auto_triggerparam: trigger parameters + * @num_ac: no of access category + */ +struct sta_uapsd_trig_params { + uint32_t vdevid; + uint8_t peer_addr[QDF_MAC_ADDR_SIZE]; + struct sta_uapsd_params *auto_triggerparam; + uint32_t num_ac; +}; + +/** + * struct tdls_event_info - firmware tdls event + * @vdev_id: vdev id + * @peermac: peer mac address + * @message_type: message type + * @peer_reason: reason + */ +struct tdls_event_info { + uint8_t vdev_id; + struct qdf_mac_addr peermac; + uint16_t message_type; + uint32_t peer_reason; +}; + +/** + * struct tdls_event_notify - tdls event notify + * @vdev: vdev object + * @event: tdls event + */ +struct tdls_event_notify { + struct wlan_objmgr_vdev *vdev; + struct tdls_event_info event; +}; + +/** + * struct tdls_event_notify - tdls event notify + * @peer_mac: peer's mac address + * @frame_type: Type of TDLS mgmt frame to be sent + * @dialog: dialog token used in the frame. + * @status_code: status to be incuded in the frame + * @responder: Tdls request type + * @peer_capability: peer cpabilities + * @len: length of additional Ies + * @buf: additional IEs to be included + */ +struct tdls_send_mgmt { + struct qdf_mac_addr peer_mac; + uint8_t frame_type; + uint8_t dialog; + uint16_t status_code; + uint8_t responder; + uint32_t peer_capability; + uint8_t len; + /* Variable length, do not add anything after this */ + uint8_t buf[]; +}; + +/** + * struct tdls_validate_action_req - tdls validate mgmt request + * @action_code: action code + * @peer_mac: peer mac address + * @dialog_token: dialog code + * @status_code: status code to add + * @len: len of the frame + * @responder: whether to respond or not + */ +struct tdls_validate_action_req { + uint8_t action_code; + uint8_t peer_mac[QDF_MAC_ADDR_SIZE]; + uint8_t dialog_token; + uint8_t status_code; + size_t len; + int responder; +}; + +/** + * struct tdls_get_all_peers - get all peers from the list + * @vdev: vdev object + * @buf: output string buffer to hold the peer info + * @buf_len: the size of output string buffer + */ +struct tdls_get_all_peers { + struct wlan_objmgr_vdev *vdev; + char *buf; + int buf_len; +}; + +/** + * struct tdls_send_action_frame_request - tdls send mgmt request + * @vdev: vdev object + * @chk_frame: This struct used to validate mgmt frame + * @session_id: session id + * @vdev_id: vdev id + * @cmd_buf: cmd buffer + * @len: length of the frame + * @use_default_ac: access category + * @tdls_mgmt: tdls management + */ +struct tdls_action_frame_request { + struct wlan_objmgr_vdev *vdev; + struct tdls_validate_action_req chk_frame; + uint8_t session_id; + uint8_t vdev_id; + const uint8_t *cmd_buf; + uint8_t len; + bool use_default_ac; + /* Variable length, do not add anything after this */ + struct tdls_send_mgmt tdls_mgmt; +}; + +/** + * struct tdls_set_responder_req - tdls set responder in peer + * @vdev: vdev object + * @peer_mac: peer mac address + * @responder: whether to respond or not + */ +struct tdls_set_responder_req { + struct wlan_objmgr_vdev *vdev; + uint8_t peer_mac[QDF_MAC_ADDR_SIZE]; + uint8_t responder; +}; + +/** + * struct tdls_sta_notify_params - STA connection notify info + * @vdev: vdev object + * @tdls_prohibited: peer mac addr + * @tdls_chan_swit_prohibited: peer type + * @lfr_roam: is trigger due to lfr + * @session_id: session id + */ +struct tdls_sta_notify_params { + struct wlan_objmgr_vdev *vdev; + bool tdls_prohibited; + bool tdls_chan_swit_prohibited; + bool lfr_roam; + bool user_disconnect; + uint8_t session_id; +}; + +/** + * struct tdls_delete_all_peers_params - TDLS set mode params + * @vdev: vdev object + * @callback: callback to release vdev ref + */ +struct tdls_delete_all_peers_params { + struct wlan_objmgr_vdev *vdev; + tdls_delete_all_peers_callback callback; +}; + +/** + * struct tdls_set_mode_params - TDLS set mode params + * @vdev: vdev object + * @tdls_mode: tdls mode to set + * @update_last: inform to update last tdls mode + * @source: mode change requester + */ +struct tdls_set_mode_params { + struct wlan_objmgr_vdev *vdev; + enum tdls_feature_mode tdls_mode; + bool update_last; + enum tdls_disable_sources source; +}; + +/** + * struct tdls_del_all_tdls_peers - delete all tdls peers + * @msg_type: type of message + * @msg_len: length of message + * @bssid: bssid of peer device + */ +struct tdls_del_all_tdls_peers { + uint16_t msg_type; + uint16_t msg_len; + struct qdf_mac_addr bssid; +}; + +/** + * struct tdls_antenna_switch_request - TDLS antenna switch request + * @vdev: vdev object + * @mode: antenna mode, 1x1 or 2x2 + */ +struct tdls_antenna_switch_request { + struct wlan_objmgr_vdev *vdev; + uint32_t mode; +}; + +/** + * struct tdls_set_offchannel - TDLS set offchannel + * @offchannel: Updated tdls offchannel value. + */ +struct tdls_set_offchannel { + struct wlan_objmgr_vdev *vdev; + uint16_t offchannel; + tdls_offchan_parms_callback callback; +}; + +/** + * struct tdls_set_offchan_mode - TDLS set offchannel mode + * @offchan_mode: Updated tdls offchannel mode value. + */ +struct tdls_set_offchanmode { + struct wlan_objmgr_vdev *vdev; + uint8_t offchan_mode; + tdls_offchan_parms_callback callback; +}; + +/** + * struct tdls_set_offchan_offset - TDLS set offchannel mode + * @offchan_offset: Offchan offset value. + */ +struct tdls_set_secoffchanneloffset { + struct wlan_objmgr_vdev *vdev; + int offchan_offset; + tdls_offchan_parms_callback callback; +}; + +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/tdls/dispatcher/inc/wlan_tdls_tgt_api.h b/drivers/staging/qca-wifi-host-cmn/umac/tdls/dispatcher/inc/wlan_tdls_tgt_api.h new file mode 100644 index 0000000000000000000000000000000000000000..14a19006b9974be2f3976fd5f674d8bb3bda4fe9 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/tdls/dispatcher/inc/wlan_tdls_tgt_api.h @@ -0,0 +1,173 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_tdls_tgt_api.h + * + * TDLS south bound interface declaration + */ + +#ifndef _WLAN_TDLS_TGT_API_H_ +#define _WLAN_TDLS_TGT_API_H_ +#include +#include "../../core/src/wlan_tdls_main.h" + +/** + * tgt_tdls_set_fw_state() - invoke lmac tdls update fw + * @psoc: soc object + * @tdls_param: update tdls state parameters + * + * Return: QDF_STATUS + */ +QDF_STATUS tgt_tdls_set_fw_state(struct wlan_objmgr_psoc *psoc, + struct tdls_info *tdls_param); + +/** + * tgt_tdls_set_peer_state() - invoke lmac tdls update peer state + * @psoc: soc object + * @peer_param: update tdls peer parameters + * + * Return: QDF_STATUS + */ +QDF_STATUS tgt_tdls_set_peer_state(struct wlan_objmgr_psoc *psoc, + struct tdls_peer_update_state *peer_param); + +/** + * tgt_tdls_set_offchan_mode() - invoke lmac tdls set off-channel mode + * @psoc: soc object + * @param: set tdls off channel parameters + * + * Return: QDF_STATUS + */ +QDF_STATUS tgt_tdls_set_offchan_mode(struct wlan_objmgr_psoc *psoc, + struct tdls_channel_switch_params *param); + +/** + * tgt_tdls_set_uapsd()- invoke lamc tdls set uapsd function + * @psoc: soc object + * @params: uapsd parameters + * + * Return: QDF_STATUS + */ +QDF_STATUS tgt_tdls_set_uapsd(struct wlan_objmgr_psoc *psoc, + struct sta_uapsd_trig_params *params); + +/** + * tgt_tdls_send_mgmt_rsp() - process tdls mgmt response + * @pmsg: sheduler msg + * + * Return: QDF_STATUS + */ +QDF_STATUS tgt_tdls_send_mgmt_rsp(struct scheduler_msg *pmsg); + +/** + * tgt_tdls_send_mgmt_tx_completion() -process tx completion message + * @pmsg: sheduler msg + * + * Return: QDF_STATUS + */ +QDF_STATUS tgt_tdls_send_mgmt_tx_completion(struct scheduler_msg *pmsg); + +/** + * tgt_tdls_del_peer_rsp() - handle TDLS del peer response + * @pmsg: sheduler msg + * + * Return: QDF_STATUS + */ +QDF_STATUS tgt_tdls_del_peer_rsp(struct scheduler_msg *pmsg); + +/** + * tgt_tdls_add_peer_rsp() - handle TDLS add peer response + * @pmsg: sheduler msg + * + * Return: QDF_STATUS + */ +QDF_STATUS tgt_tdls_add_peer_rsp(struct scheduler_msg *pmsg); + +/** + * tgt_tdls_register_ev_handler() - invoke lmac register tdls event handler + * @psoc: soc object + * + * Return: QDF_STATUS_SUCCESS for success or error code. + */ +QDF_STATUS tgt_tdls_register_ev_handler(struct wlan_objmgr_psoc *psoc); + +/** + * tgt_tdls_unregister_ev_handler() - invoke lmac unregister tdls event handler + * @psoc: soc object + * + * Return: QDF_STATUS_SUCCESS for success or error code. + */ +QDF_STATUS tgt_tdls_unregister_ev_handler(struct wlan_objmgr_psoc *psoc); + +/** + * tgt_tdls_event_handler() - The callback registered to WMI for tdls events + * @psoc: psoc object + * @info: tdls event info + * + * The callback is registered by tgt as tdls rx ops handler. + * + * Return: 0 for success or err code. + */ +QDF_STATUS +tgt_tdls_event_handler(struct wlan_objmgr_psoc *psoc, + struct tdls_event_info *info); + +/** + * tgt_tdls_mgmt_frame_rx_cb() - callback for rx mgmt frame + * @psoc: soc context + * @peer: peer context + * @buf: rx buffer + * @mgmt_rx_params: mgmt rx parameters + * @frm_type: frame type + * + * This function gets called from mgmt tx/rx component when rx mgmt + * received. + * + * Return: QDF_STATUS_SUCCESS + */ +QDF_STATUS tgt_tdls_mgmt_frame_rx_cb(struct wlan_objmgr_psoc *psoc, + struct wlan_objmgr_peer *peer, qdf_nbuf_t buf, + struct mgmt_rx_event_params *mgmt_rx_params, + enum mgmt_frame_type frm_type); + +/** + * tgt_tdls_peers_deleted_notification()- notification from legacy lim + * @psoc: soc object + * @session_id: session id + * + * This function called from legacy lim to notify tdls peer deletion + * + * Return: None + */ +void tgt_tdls_peers_deleted_notification(struct wlan_objmgr_psoc *psoc, + uint32_t session_id); + +/** + * tgt_tdls_delete_all_peers_indication()- Indication to tdls component + * @psoc: soc object + * @session_id: session id + * + * This function called from legacy lim to tdls component to delete tdls peers. + * + * Return: None + */ +void tgt_tdls_delete_all_peers_indication(struct wlan_objmgr_psoc *psoc, + uint32_t session_id); + +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/tdls/dispatcher/inc/wlan_tdls_ucfg_api.h b/drivers/staging/qca-wifi-host-cmn/umac/tdls/dispatcher/inc/wlan_tdls_ucfg_api.h new file mode 100644 index 0000000000000000000000000000000000000000..a80e9c3c0b296ea7f3c74dc91d5d4af1085bb32e --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/tdls/dispatcher/inc/wlan_tdls_ucfg_api.h @@ -0,0 +1,279 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_tdls_ucfg_api.h + * + * TDLS north bound interface declaration + */ + +#if !defined(_WLAN_TDLS_UCFG_API_H_) +#define _WLAN_TDLS_UCFG_API_H_ + +#include +#include +#include +#include +#include +#include + +/** + * ucfg_tdls_init() - TDLS module initialization API + * + * Return: QDF_STATUS + */ +QDF_STATUS ucfg_tdls_init(void); + +/** + * ucfg_tdls_deinit() - TDLS module deinitialization API + * + * Return: QDF_STATUS + */ +QDF_STATUS ucfg_tdls_deinit(void); + +/** + * ucfg_tdls_psoc_open() - TDLS module psoc open API + * @psoc: psoc object + * + * Return: QDF_STATUS + */ +QDF_STATUS ucfg_tdls_psoc_open(struct wlan_objmgr_psoc *psoc); + +/** + * ucfg_tdls_psoc_close() - TDLS module psoc close API + * @psoc: psoc object + * + * Return: QDF_STATUS + */ +QDF_STATUS ucfg_tdls_psoc_close(struct wlan_objmgr_psoc *psoc); + +/** + * ucfg_tdls_psoc_start() - TDLS module start + * @psoc: psoc object + * @req: tdls start paramets + * + * Return: QDF_STATUS + */ +QDF_STATUS ucfg_tdls_update_config(struct wlan_objmgr_psoc *psoc, + struct tdls_start_params *req); + +/** + * ucfg_tdls_psoc_enable() - TDLS module enable API + * @psoc: psoc object + * + * Return: QDF_STATUS + */ +QDF_STATUS ucfg_tdls_psoc_enable(struct wlan_objmgr_psoc *psoc); + +/** + * ucfg_tdls_psoc_disable() - TDLS moudle disable API + * @psoc: psoc object + * + * Return: QDF_STATUS + */ +QDF_STATUS ucfg_tdls_psoc_disable(struct wlan_objmgr_psoc *psoc); + +/** + * ucfg_tdls_add_peer() - handle TDLS add peer + * @vdev: vdev object + * @add_peer_req: add peer request parameters + * + * Return: QDF_STATUS + */ +QDF_STATUS ucfg_tdls_add_peer(struct wlan_objmgr_vdev *vdev, + struct tdls_add_peer_params *add_peer_req); + +/** + * ucfg_tdls_update_peer() - handle TDLS update peer + * @vdev: vdev object + * @update_peer: update TDLS request parameters + * + * Return: QDF_STATUS + */ +QDF_STATUS ucfg_tdls_update_peer(struct wlan_objmgr_vdev *vdev, + struct tdls_update_peer_params *update_peer); + +/** + * ucfg_tdls_oper() - handle TDLS oper functions + * @vdev: vdev object + * @macaddr: MAC address of TDLS peer + * @cmd: oper cmd + * + * Return: QDF_STATUS + */ +QDF_STATUS ucfg_tdls_oper(struct wlan_objmgr_vdev *vdev, + const uint8_t *macaddr, enum tdls_command_type cmd); + +/** + * ucfg_tdls_get_all_peers() - get all tdls peers from the list + * @vdev: vdev object + * @buf: output buffer + * @buflen: length of written data + * + * Return: QDF_STATUS + */ +QDF_STATUS ucfg_tdls_get_all_peers(struct wlan_objmgr_vdev *vdev, + char *buf, int buflen); + +/** + * ucfg_tdls_send_mgmt_frame() - send TDLS mgmt frame + * @mgmt_req: pointer to TDLS action frame request struct + * + * This will TDLS action frames to peer + * + * Return: QDF_STATUS + */ +QDF_STATUS ucfg_tdls_send_mgmt_frame( + struct tdls_action_frame_request *mgmt_req); + +/** + * ucfg_tdls_responder() - set responder in TDLS peer + * @msg_req: responder msg + * + * Return: QDF_STATUS + */ +QDF_STATUS ucfg_tdls_responder(struct tdls_set_responder_req *msg_req); + +/** + * ucfg_tdls_teardown_links() - teardown all TDLS links + * @psoc: psoc object manager + * + * Return: None + */ +QDF_STATUS ucfg_tdls_teardown_links(struct wlan_objmgr_psoc *psoc); + +/** + * ucfg_tdls_notify_reset_adapter() - notify reset adapter + * @vdev: vdev object manager + * + * Return: QDF_STATUS + */ +QDF_STATUS ucfg_tdls_notify_reset_adapter(struct wlan_objmgr_vdev *vdev); + +/** + * ucfg_tdls_notify_sta_connect() - notify sta connect + * @notify_info: sta notification info + * + * Return: QDF_STATUS + */ +QDF_STATUS ucfg_tdls_notify_sta_connect( + struct tdls_sta_notify_params *notify_info); + +/** + * ucfg_tdls_notify_sta_disconnect() - notify sta disconnect + * @notify_info: sta notification info + * + * Return: QDF_STATUS + */ +QDF_STATUS ucfg_tdls_notify_sta_disconnect( + struct tdls_sta_notify_params *notify_info); + +/** + * ucfg_tdls_set_operating_mode() - set operating mode + * @set_mode_params: set mode params + * + * Return: QDF_STATUS + */ +QDF_STATUS ucfg_tdls_set_operating_mode( + struct tdls_set_mode_params *set_mode_params); + +/** + * ucfg_tdls_update_rx_pkt_cnt() - update rx pkt count + * @vdev: tdls vdev object + * @mac_addr: peer mac address + * @dest_mac_addr: dest mac address + * + * Return: None + */ +void ucfg_tdls_update_rx_pkt_cnt(struct wlan_objmgr_vdev *vdev, + struct qdf_mac_addr *mac_addr, + struct qdf_mac_addr *dest_mac_addr); + +/** + * ucfg_tdls_update_tx_pkt_cnt() - update tx pkt count + * @vdev: tdls vdev object + * @mac_addr: peer mac address + * + * Return: None + */ +void ucfg_tdls_update_tx_pkt_cnt(struct wlan_objmgr_vdev *vdev, + struct qdf_mac_addr *mac_addr); + +/** + * ucfg_tdls_antenna_switch() - tdls antenna switch + * @vdev: tdls vdev object + * @mode: antenna mode + * + * Return: QDF_STATUS + */ +QDF_STATUS ucfg_tdls_antenna_switch(struct wlan_objmgr_vdev *vdev, + uint32_t mode); + +/** + * ucfg_set_tdls_offchannel() - Handle TDLS set offchannel + * @vdev: vdev object + * @offchannel: updated offchannel + * + * Return: QDF_STATUS + */ +QDF_STATUS ucfg_set_tdls_offchannel(struct wlan_objmgr_vdev *vdev, + int offchannel); + +/** + * ucfg_set_tdls_offchan_mode() - Handle TDLS set offchannel mode + * @vdev: vdev object + * @offchanmode: updated off-channel mode + * + * Return: QDF_STATUS + */ +QDF_STATUS ucfg_set_tdls_offchan_mode(struct wlan_objmgr_vdev *vdev, + int offchanmode); + +/** + * ucfg_set_tdls_secoffchanneloffset() - Handle TDLS set offchannel offset + * @vdev: vdev object + * @offchanoffset: tdls off-channel offset + * + * Return: QDF_STATUS + */ +QDF_STATUS ucfg_set_tdls_secoffchanneloffset(struct wlan_objmgr_vdev *vdev, + int offchanoffset); + +/** + * ucfg_tdls_notify_connect_failure() - This api is called if STA/P2P + * connection fails on one iface and to enable/disable TDLS on the other + * STA/P2P iface which is already connected. + * @psoc: psoc object + * + * Return: void + */ +void ucfg_tdls_notify_connect_failure(struct wlan_objmgr_psoc *psoc); + +/** + * ucfg_get_tdls_vdev() - Ucfg api to get tdls specific vdev object + * @psoc: wlan psoc object manager + * @dbg_id: debug id + * + * If TDLS is enabled on any vdev then return the corresponding vdev. + * + * This api increases the ref count of the returned vdev. + * Return: vdev manager pointer or NULL. + */ +struct wlan_objmgr_vdev *ucfg_get_tdls_vdev(struct wlan_objmgr_psoc *psoc, + wlan_objmgr_ref_dbgid dbg_id); +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/tdls/dispatcher/src/wlan_tdls_tgt_api.c b/drivers/staging/qca-wifi-host-cmn/umac/tdls/dispatcher/src/wlan_tdls_tgt_api.c new file mode 100644 index 0000000000000000000000000000000000000000..911899e8b4b12ddf3394b50fcde0db68680cf901 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/tdls/dispatcher/src/wlan_tdls_tgt_api.c @@ -0,0 +1,424 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_tdls_tgt_api.c + * + * TDLS south bound interface definitions + */ + +#include "qdf_status.h" +#include +#include "../../core/src/wlan_tdls_main.h" +#include "../../core/src/wlan_tdls_cmds_process.h" +#include "../../core/src/wlan_tdls_mgmt.h" + +static inline struct wlan_lmac_if_tdls_tx_ops * +wlan_psoc_get_tdls_txops(struct wlan_objmgr_psoc *psoc) +{ + return &psoc->soc_cb.tx_ops.tdls_tx_ops; +} + +static inline struct wlan_lmac_if_tdls_rx_ops * +wlan_psoc_get_tdls_rxops(struct wlan_objmgr_psoc *psoc) +{ + return &psoc->soc_cb.rx_ops.tdls_rx_ops; +} + +QDF_STATUS tgt_tdls_set_fw_state(struct wlan_objmgr_psoc *psoc, + struct tdls_info *tdls_param) +{ + struct wlan_lmac_if_tdls_tx_ops *tdls_ops = NULL; + + tdls_ops = wlan_psoc_get_tdls_txops(psoc); + if (tdls_ops && tdls_ops->update_fw_state) + return tdls_ops->update_fw_state(psoc, tdls_param); + else + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS tgt_tdls_set_peer_state(struct wlan_objmgr_psoc *psoc, + struct tdls_peer_update_state *peer_param) +{ + struct wlan_lmac_if_tdls_tx_ops *tdls_ops = NULL; + + tdls_ops = wlan_psoc_get_tdls_txops(psoc); + if (tdls_ops && tdls_ops->update_peer_state) + return tdls_ops->update_peer_state(psoc, peer_param); + else + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS tgt_tdls_set_offchan_mode(struct wlan_objmgr_psoc *psoc, + struct tdls_channel_switch_params *param) +{ + struct wlan_lmac_if_tdls_tx_ops *tdls_ops = NULL; + + tdls_ops = wlan_psoc_get_tdls_txops(psoc); + if (tdls_ops && tdls_ops->set_offchan_mode) + return tdls_ops->set_offchan_mode(psoc, param); + else + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS tgt_tdls_set_uapsd(struct wlan_objmgr_psoc *psoc, + struct sta_uapsd_trig_params *params) +{ + struct wlan_lmac_if_tdls_tx_ops *tdls_ops = NULL; + + tdls_ops = wlan_psoc_get_tdls_txops(psoc); + if (tdls_ops && tdls_ops->tdls_set_uapsd) + return tdls_ops->tdls_set_uapsd(psoc, params); + else + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS tgt_tdls_send_mgmt_tx_completion(struct scheduler_msg *pmsg) +{ + QDF_STATUS status = QDF_STATUS_SUCCESS; + + if (!pmsg || !pmsg->bodyptr) { + tdls_err("msg: 0x%pK", pmsg); + QDF_ASSERT(0); + return QDF_STATUS_E_NULL_VALUE; + } + + status = tdls_send_mgmt_tx_completion(pmsg->bodyptr); + + return status; +} + +QDF_STATUS tgt_tdls_send_mgmt_rsp(struct scheduler_msg *pmsg) +{ + QDF_STATUS status = QDF_STATUS_SUCCESS; + + if (!pmsg || !pmsg->bodyptr) { + tdls_err("msg: 0x%pK", pmsg); + QDF_ASSERT(0); + return QDF_STATUS_E_NULL_VALUE; + } + + status = tdls_process_send_mgmt_rsp(pmsg->bodyptr); + + return status; +} + +QDF_STATUS tgt_tdls_add_peer_rsp(struct scheduler_msg *pmsg) +{ + QDF_STATUS status = QDF_STATUS_SUCCESS; + + if (!pmsg || !pmsg->bodyptr) { + tdls_err("msg: 0x%pK", pmsg); + QDF_ASSERT(0); + return QDF_STATUS_E_NULL_VALUE; + } + + status = tdls_process_add_peer_rsp(pmsg->bodyptr); + + return status; +} + +QDF_STATUS tgt_tdls_del_peer_rsp(struct scheduler_msg *pmsg) +{ + QDF_STATUS status = QDF_STATUS_SUCCESS; + + if (!pmsg || !pmsg->bodyptr) { + tdls_err("msg: 0x%pK", pmsg); + QDF_ASSERT(0); + return QDF_STATUS_E_NULL_VALUE; + } + + status = tdls_process_del_peer_rsp(pmsg->bodyptr); + + return status; +} + +QDF_STATUS tgt_tdls_register_ev_handler(struct wlan_objmgr_psoc *psoc) +{ + struct wlan_lmac_if_tdls_tx_ops *tdls_ops = NULL; + + tdls_ops = wlan_psoc_get_tdls_txops(psoc); + if (tdls_ops && tdls_ops->tdls_reg_ev_handler) + return tdls_ops->tdls_reg_ev_handler(psoc, NULL); + else + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS tgt_tdls_unregister_ev_handler(struct wlan_objmgr_psoc *psoc) +{ + struct wlan_lmac_if_tdls_tx_ops *tdls_ops = NULL; + + tdls_ops = wlan_psoc_get_tdls_txops(psoc); + if (tdls_ops->tdls_unreg_ev_handler) + return tdls_ops->tdls_unreg_ev_handler(psoc, NULL); + else + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS tgt_tdls_event_flush_cb(struct scheduler_msg *msg) +{ + struct tdls_event_notify *notify; + + notify = msg->bodyptr; + if (notify && notify->vdev) { + wlan_objmgr_vdev_release_ref(notify->vdev, WLAN_TDLS_SB_ID); + qdf_mem_free(notify); + } + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS +tgt_tdls_event_handler(struct wlan_objmgr_psoc *psoc, + struct tdls_event_info *info) +{ + struct scheduler_msg msg = {0,}; + struct tdls_event_notify *notify; + uint8_t vdev_id; + QDF_STATUS status; + + if (!psoc || !info) { + tdls_err("psoc: 0x%pK, info: 0x%pK", psoc, info); + return QDF_STATUS_E_NULL_VALUE; + } + tdls_debug("vdev: %d, type: %d, reason: %d" QDF_MAC_ADDR_STR, + info->vdev_id, info->message_type, info->peer_reason, + QDF_MAC_ADDR_ARRAY(info->peermac.bytes)); + notify = qdf_mem_malloc(sizeof(*notify)); + if (!notify) { + tdls_err("mem allocate fail"); + return QDF_STATUS_E_NOMEM; + } + + vdev_id = info->vdev_id; + notify->vdev = + wlan_objmgr_get_vdev_by_id_from_psoc(psoc, + vdev_id, WLAN_TDLS_SB_ID); + if (!notify->vdev) { + tdls_err("null vdev, vdev_id: %d, psoc: 0x%pK", vdev_id, psoc); + return QDF_STATUS_E_INVAL; + } + qdf_mem_copy(¬ify->event, info, sizeof(*info)); + + msg.bodyptr = notify; + msg.callback = tdls_process_evt; + msg.flush_callback = tgt_tdls_event_flush_cb; + + status = scheduler_post_message(QDF_MODULE_ID_TDLS, + QDF_MODULE_ID_TDLS, + QDF_MODULE_ID_TARGET_IF, &msg); + if (QDF_IS_STATUS_ERROR(status)) { + tdls_err("can't post msg to handle tdls event"); + wlan_objmgr_vdev_release_ref(notify->vdev, WLAN_TDLS_SB_ID); + qdf_mem_free(notify); + } + + return status; +} + +static QDF_STATUS tgt_tdls_mgmt_frame_rx_flush_cb(struct scheduler_msg *msg) +{ + struct tdls_rx_mgmt_event *rx_mgmt_event; + + rx_mgmt_event = msg->bodyptr; + + if (rx_mgmt_event) { + if (rx_mgmt_event->rx_mgmt) + qdf_mem_free(rx_mgmt_event->rx_mgmt); + + qdf_mem_free(rx_mgmt_event); + } + msg->bodyptr = NULL; + + return QDF_STATUS_SUCCESS; +} + +static +QDF_STATUS tgt_tdls_mgmt_frame_process_rx_cb( + struct wlan_objmgr_psoc *psoc, + struct wlan_objmgr_peer *peer, + qdf_nbuf_t buf, + struct mgmt_rx_event_params *mgmt_rx_params, + enum mgmt_frame_type frm_type) +{ + struct tdls_rx_mgmt_frame *rx_mgmt; + struct tdls_rx_mgmt_event *rx_mgmt_event; + struct tdls_soc_priv_obj *tdls_soc_obj; + struct scheduler_msg msg = {0}; + struct wlan_objmgr_vdev *vdev; + uint32_t vdev_id; + uint8_t *pdata; + QDF_STATUS status; + + tdls_soc_obj = wlan_objmgr_psoc_get_comp_private_obj(psoc, + WLAN_UMAC_COMP_TDLS); + if (!tdls_soc_obj) { + tdls_err("tdls ctx is NULL, drop this frame"); + return QDF_STATUS_E_FAILURE; + } + + if (!peer) { + vdev = tdls_get_vdev(psoc, WLAN_TDLS_SB_ID); + if (!vdev) { + tdls_err("current tdls vdev is null, can't get vdev id"); + return QDF_STATUS_E_FAILURE; + } + vdev_id = wlan_vdev_get_id(vdev); + wlan_objmgr_vdev_release_ref(vdev, WLAN_TDLS_SB_ID); + } else { + vdev = wlan_peer_get_vdev(peer); + if (!vdev) { + tdls_err("vdev is NULL in peer, drop this frame"); + return QDF_STATUS_E_FAILURE; + } + vdev_id = wlan_vdev_get_id(vdev); + } + + rx_mgmt_event = qdf_mem_malloc_atomic(sizeof(*rx_mgmt_event)); + if (!rx_mgmt_event) { + tdls_debug_rl("Failed to allocate rx mgmt event"); + return QDF_STATUS_E_NOMEM; + } + + rx_mgmt = qdf_mem_malloc_atomic(sizeof(*rx_mgmt) + + mgmt_rx_params->buf_len); + if (!rx_mgmt) { + tdls_debug_rl("Failed to allocate rx mgmt frame"); + qdf_mem_free(rx_mgmt_event); + return QDF_STATUS_E_NOMEM; + } + + pdata = (uint8_t *)qdf_nbuf_data(buf); + rx_mgmt->frame_len = mgmt_rx_params->buf_len; + rx_mgmt->rx_chan = mgmt_rx_params->channel; + rx_mgmt->vdev_id = vdev_id; + rx_mgmt->frm_type = frm_type; + rx_mgmt->rx_rssi = mgmt_rx_params->rssi; + + rx_mgmt_event->rx_mgmt = rx_mgmt; + rx_mgmt_event->tdls_soc_obj = tdls_soc_obj; + qdf_mem_copy(rx_mgmt->buf, pdata, mgmt_rx_params->buf_len); + msg.type = TDLS_EVENT_RX_MGMT; + msg.bodyptr = rx_mgmt_event; + msg.callback = tdls_process_rx_frame; + msg.flush_callback = tgt_tdls_mgmt_frame_rx_flush_cb; + status = scheduler_post_message(QDF_MODULE_ID_TDLS, + QDF_MODULE_ID_TDLS, + QDF_MODULE_ID_TARGET_IF, &msg); + if (QDF_IS_STATUS_ERROR(status)) { + qdf_mem_free(rx_mgmt); + qdf_mem_free(rx_mgmt_event); + } + + qdf_nbuf_free(buf); + + return status; +} + +QDF_STATUS tgt_tdls_mgmt_frame_rx_cb( + struct wlan_objmgr_psoc *psoc, + struct wlan_objmgr_peer *peer, + qdf_nbuf_t buf, + struct mgmt_rx_event_params *mgmt_rx_params, + enum mgmt_frame_type frm_type) +{ + QDF_STATUS status; + + tdls_debug("psoc:%pK, peer:%pK, type:%d", psoc, peer, frm_type); + + + if (!buf) { + tdls_err("rx frame buff is null buf:%pK", buf); + return QDF_STATUS_E_INVAL; + } + + if (!mgmt_rx_params || !psoc) { + tdls_err("input is NULL mgmt_rx_params:%pK psoc:%pK, peer:%pK", + mgmt_rx_params, psoc, peer); + status = QDF_STATUS_E_INVAL; + goto release_nbuf; + } + + status = wlan_objmgr_peer_try_get_ref(peer, WLAN_TDLS_SB_ID); + if (QDF_STATUS_SUCCESS != status) + goto release_nbuf; + + status = tgt_tdls_mgmt_frame_process_rx_cb(psoc, peer, buf, + mgmt_rx_params, frm_type); + + wlan_objmgr_peer_release_ref(peer, WLAN_TDLS_SB_ID); + + if (QDF_STATUS_SUCCESS != status) +release_nbuf: + qdf_nbuf_free(buf); + return status; +} + +void tgt_tdls_peers_deleted_notification(struct wlan_objmgr_psoc *psoc, + uint32_t session_id) +{ + tdls_peers_deleted_notification(psoc, session_id); +} + +/** + * tgt_tdls_delete_all_peers_ind_callback()- Callback to call from + * TDLS component + * @psoc: soc object + * @session_id: session id + * + * This function release the obj mgr vdev ref + * + * Return: None + */ +static void tgt_tdls_delete_all_peers_ind_callback( + struct wlan_objmgr_vdev *vdev) +{ + if (!vdev) { + tdls_err("vdev is NULL"); + return; + } + + wlan_objmgr_vdev_release_ref(vdev, WLAN_TDLS_SB_ID); +} + +void tgt_tdls_delete_all_peers_indication(struct wlan_objmgr_psoc *psoc, + uint32_t session_id) +{ + struct wlan_objmgr_vdev *vdev; + struct tdls_delete_all_peers_params delete_peers_ind; + QDF_STATUS status; + + vdev = wlan_objmgr_get_vdev_by_id_from_psoc(psoc, + session_id, + WLAN_TDLS_SB_ID); + + if (!vdev) { + tdls_err("vdev not exist for the session id %d", + session_id); + return; + } + + delete_peers_ind.vdev = vdev; + delete_peers_ind.callback = tgt_tdls_delete_all_peers_ind_callback; + status = tdls_delete_all_peers_indication(&delete_peers_ind); + if (QDF_IS_STATUS_ERROR(status)) { + tdls_err("tdls_delete_all_peers_indication failed"); + wlan_objmgr_vdev_release_ref(vdev, WLAN_TDLS_SB_ID); + } +} + diff --git a/drivers/staging/qca-wifi-host-cmn/umac/tdls/dispatcher/src/wlan_tdls_ucfg_api.c b/drivers/staging/qca-wifi-host-cmn/umac/tdls/dispatcher/src/wlan_tdls_ucfg_api.c new file mode 100644 index 0000000000000000000000000000000000000000..fd4689003b18b8b3ded800fd1a018819d08e8634 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/tdls/dispatcher/src/wlan_tdls_ucfg_api.c @@ -0,0 +1,1020 @@ +/* + * Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_tdls_ucfg_api.c + * + * TDLS north bound interface definitions + */ + +#include +#include +#include "../../core/src/wlan_tdls_main.h" +#include "../../core/src/wlan_tdls_cmds_process.h" +#include "../../core/src/wlan_tdls_ct.h" +#include "../../core/src/wlan_tdls_mgmt.h" +#include +#include +#include "wlan_policy_mgr_api.h" +#include "wlan_scan_ucfg_api.h" + +QDF_STATUS ucfg_tdls_init(void) +{ + QDF_STATUS status; + + tdls_notice("tdls module dispatcher init"); + status = wlan_objmgr_register_psoc_create_handler(WLAN_UMAC_COMP_TDLS, + tdls_psoc_obj_create_notification, NULL); + + if (QDF_IS_STATUS_ERROR(status)) { + tdls_err("Failed to register psoc create handler for tdls"); + return status; + } + + status = wlan_objmgr_register_psoc_destroy_handler(WLAN_UMAC_COMP_TDLS, + tdls_psoc_obj_destroy_notification, NULL); + + if (QDF_IS_STATUS_ERROR(status)) { + tdls_err("Failed to register psoc delete handler for tdls"); + goto fail_delete_psoc; + } + + status = wlan_objmgr_register_vdev_create_handler(WLAN_UMAC_COMP_TDLS, + tdls_vdev_obj_create_notification, NULL); + + if (QDF_IS_STATUS_ERROR(status)) { + tdls_err("Failed to register vdev create handler for tdls"); + goto fail_create_vdev; + } + + status = wlan_objmgr_register_vdev_destroy_handler(WLAN_UMAC_COMP_TDLS, + tdls_vdev_obj_destroy_notification, NULL); + + if (QDF_IS_STATUS_ERROR(status)) { + tdls_err("Failed to register vdev create handler for tdls"); + goto fail_delete_vdev; + } + tdls_notice("tdls module dispatcher init done"); + + return status; +fail_delete_vdev: + wlan_objmgr_unregister_vdev_create_handler(WLAN_UMAC_COMP_TDLS, + tdls_vdev_obj_create_notification, NULL); + +fail_create_vdev: + wlan_objmgr_unregister_psoc_destroy_handler(WLAN_UMAC_COMP_TDLS, + tdls_psoc_obj_destroy_notification, NULL); + +fail_delete_psoc: + wlan_objmgr_unregister_psoc_create_handler(WLAN_UMAC_COMP_TDLS, + tdls_psoc_obj_create_notification, NULL); + + return status; +} + +QDF_STATUS ucfg_tdls_deinit(void) +{ + QDF_STATUS ret; + + tdls_notice("tdls module dispatcher deinit"); + ret = wlan_objmgr_unregister_psoc_create_handler(WLAN_UMAC_COMP_TDLS, + tdls_psoc_obj_create_notification, NULL); + if (QDF_IS_STATUS_ERROR(ret)) + tdls_err("Failed to unregister psoc create handler"); + + ret = wlan_objmgr_unregister_psoc_destroy_handler(WLAN_UMAC_COMP_TDLS, + tdls_psoc_obj_destroy_notification, NULL); + if (QDF_IS_STATUS_ERROR(ret)) + tdls_err("Failed to unregister psoc delete handler"); + + ret = wlan_objmgr_unregister_vdev_create_handler(WLAN_UMAC_COMP_TDLS, + tdls_vdev_obj_create_notification, NULL); + if (QDF_IS_STATUS_ERROR(ret)) + tdls_err("Failed to unregister vdev create handler"); + + ret = wlan_objmgr_unregister_vdev_destroy_handler(WLAN_UMAC_COMP_TDLS, + tdls_vdev_obj_destroy_notification, NULL); + + if (QDF_IS_STATUS_ERROR(ret)) + tdls_err("Failed to unregister vdev delete handler"); + + return ret; +} + +static QDF_STATUS tdls_global_init(struct tdls_soc_priv_obj *soc_obj) +{ + + soc_obj->connected_peer_count = 0; + soc_obj->tdls_nss_switch_in_progress = false; + soc_obj->tdls_teardown_peers_cnt = 0; + soc_obj->tdls_nss_teardown_complete = false; + soc_obj->tdls_nss_transition_mode = TDLS_NSS_TRANSITION_S_UNKNOWN; + soc_obj->enable_tdls_connection_tracker = false; + soc_obj->tdls_external_peer_count = 0; + soc_obj->tdls_disable_in_progress = false; + + qdf_spinlock_create(&soc_obj->tdls_ct_spinlock); + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS tdls_global_deinit(struct tdls_soc_priv_obj *soc_obj) +{ + qdf_spinlock_destroy(&soc_obj->tdls_ct_spinlock); + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS ucfg_tdls_psoc_open(struct wlan_objmgr_psoc *psoc) +{ + QDF_STATUS status; + struct tdls_soc_priv_obj *soc_obj; + + tdls_debug("tdls psoc open"); + soc_obj = wlan_objmgr_psoc_get_comp_private_obj(psoc, + WLAN_UMAC_COMP_TDLS); + if (!soc_obj) { + tdls_err("Failed to get tdls psoc component"); + return QDF_STATUS_E_FAILURE; + } + + status = tdls_global_init(soc_obj); + + return status; +} + +QDF_STATUS ucfg_tdls_update_config(struct wlan_objmgr_psoc *psoc, + struct tdls_start_params *req) +{ + struct tdls_soc_priv_obj *soc_obj; + uint32_t tdls_feature_flags; + struct policy_mgr_tdls_cbacks tdls_pm_call_backs; + uint8_t sta_idx; + + tdls_debug("tdls update config "); + if (!psoc || !req) { + tdls_err("psoc: 0x%pK, req: 0x%pK", psoc, req); + return QDF_STATUS_E_FAILURE; + } + + soc_obj = wlan_objmgr_psoc_get_comp_private_obj(psoc, + WLAN_UMAC_COMP_TDLS); + if (!soc_obj) { + tdls_err("Failed to get tdls psoc component"); + return QDF_STATUS_E_FAILURE; + } + + soc_obj->tdls_rx_cb = req->tdls_rx_cb; + soc_obj->tdls_rx_cb_data = req->tdls_rx_cb_data; + + soc_obj->tdls_wmm_cb = req->tdls_wmm_cb; + soc_obj->tdls_wmm_cb_data = req->tdls_wmm_cb_data; + + soc_obj->tdls_event_cb = req->tdls_event_cb; + soc_obj->tdls_evt_cb_data = req->tdls_evt_cb_data; + + /* Save callbacks to register/deregister TDLS sta with datapath */ + soc_obj->tdls_reg_peer = req->tdls_reg_peer; + soc_obj->tdls_dereg_peer = req->tdls_dereg_peer; + soc_obj->tdls_peer_context = req->tdls_peer_context; + + /* Save legacy PE/WMA commands in TDLS soc object */ + soc_obj->tdls_send_mgmt_req = req->tdls_send_mgmt_req; + soc_obj->tdls_add_sta_req = req->tdls_add_sta_req; + soc_obj->tdls_del_sta_req = req->tdls_del_sta_req; + soc_obj->tdls_update_peer_state = req->tdls_update_peer_state; + soc_obj->tdls_del_all_peers = req->tdls_del_all_peers; + soc_obj->tdls_update_dp_vdev_flags = req->tdls_update_dp_vdev_flags; + soc_obj->tdls_dp_vdev_update = req->tdls_dp_vdev_update; + soc_obj->tdls_osif_init_cb = req->tdls_osif_init_cb; + soc_obj->tdls_osif_deinit_cb = req->tdls_osif_deinit_cb; + tdls_pm_call_backs.tdls_notify_increment_session = + tdls_notify_increment_session; + + tdls_pm_call_backs.tdls_notify_decrement_session = + tdls_notify_decrement_session; + if (QDF_STATUS_SUCCESS != policy_mgr_register_tdls_cb( + psoc, &tdls_pm_call_backs)) { + tdls_err("policy manager callback registration failed "); + return QDF_STATUS_E_FAILURE; + } + + /* Update TDLS user config */ + qdf_mem_copy(&soc_obj->tdls_configs, &req->config, sizeof(req->config)); + tdls_feature_flags = soc_obj->tdls_configs.tdls_feature_flags; + + if (!TDLS_IS_IMPLICIT_TRIG_ENABLED(tdls_feature_flags)) + soc_obj->tdls_current_mode = TDLS_SUPPORT_EXP_TRIG_ONLY; + else if (TDLS_IS_EXTERNAL_CONTROL_ENABLED(tdls_feature_flags)) + soc_obj->tdls_current_mode = TDLS_SUPPORT_EXT_CONTROL; + else + soc_obj->tdls_current_mode = TDLS_SUPPORT_IMP_MODE; + + soc_obj->tdls_last_mode = soc_obj->tdls_current_mode; + + if (TDLS_IS_BUFFER_STA_ENABLED(tdls_feature_flags) || + TDLS_IS_SLEEP_STA_ENABLED(tdls_feature_flags) || + TDLS_IS_OFF_CHANNEL_ENABLED(tdls_feature_flags)) + soc_obj->max_num_tdls_sta = + WLAN_TDLS_STA_P_UAPSD_OFFCHAN_MAX_NUM; + else + soc_obj->max_num_tdls_sta = WLAN_TDLS_STA_MAX_NUM; + + for (sta_idx = 0; sta_idx < soc_obj->max_num_tdls_sta; sta_idx++) { + soc_obj->tdls_conn_info[sta_idx].sta_id = INVALID_TDLS_PEER_ID; + soc_obj->tdls_conn_info[sta_idx].index = + INVALID_TDLS_PEER_INDEX; + soc_obj->tdls_conn_info[sta_idx].session_id = 255; + qdf_mem_zero(&soc_obj->tdls_conn_info[sta_idx].peer_mac, + QDF_MAC_ADDR_SIZE); + } + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS ucfg_tdls_psoc_enable(struct wlan_objmgr_psoc *psoc) +{ + QDF_STATUS status; + + tdls_notice("psoc tdls enable: 0x%pK", psoc); + if (!psoc) { + tdls_err("NULL psoc"); + return QDF_STATUS_E_FAILURE; + } + + status = tgt_tdls_register_ev_handler(psoc); + + if (status != QDF_STATUS_SUCCESS) + return status; + + status = wlan_serialization_register_comp_info_cb(psoc, + WLAN_UMAC_COMP_TDLS, + WLAN_SER_CMD_SCAN, + tdls_scan_serialization_comp_info_cb); + if (QDF_STATUS_SUCCESS != status) { + tdls_err("Serialize scan cmd register failed "); + return status; + } + + /* register callbacks with tx/rx mgmt */ + status = tdls_mgmt_rx_ops(psoc, true); + if (status != QDF_STATUS_SUCCESS) + tdls_err("Failed to register mgmt rx callback, status:%d", + status); + return status; +} + +QDF_STATUS ucfg_tdls_psoc_disable(struct wlan_objmgr_psoc *psoc) +{ + QDF_STATUS status; + struct tdls_soc_priv_obj *soc_obj = NULL; + + tdls_notice("psoc tdls disable: 0x%pK", psoc); + if (!psoc) { + tdls_err("NULL psoc"); + return QDF_STATUS_E_FAILURE; + } + + status = tgt_tdls_unregister_ev_handler(psoc); + if (QDF_IS_STATUS_ERROR(status)) + tdls_err("Failed to unregister tdls event handler"); + + status = tdls_mgmt_rx_ops(psoc, false); + if (QDF_IS_STATUS_ERROR(status)) + tdls_err("Failed to unregister mgmt rx callback"); + + soc_obj = wlan_objmgr_psoc_get_comp_private_obj(psoc, + WLAN_UMAC_COMP_TDLS); + if (!soc_obj) { + tdls_err("Failed to get tdls psoc component"); + return QDF_STATUS_E_FAILURE; + } + + soc_obj->tdls_event_cb = NULL; + soc_obj->tdls_evt_cb_data = NULL; + + return status; +} + +QDF_STATUS ucfg_tdls_psoc_close(struct wlan_objmgr_psoc *psoc) +{ + QDF_STATUS status = QDF_STATUS_SUCCESS; + struct tdls_soc_priv_obj *tdls_soc; + + tdls_notice("tdls psoc close"); + tdls_soc = wlan_objmgr_psoc_get_comp_private_obj(psoc, + WLAN_UMAC_COMP_TDLS); + if (!tdls_soc) { + tdls_err("Failed to get tdls psoc component"); + return QDF_STATUS_E_FAILURE; + } + + status = tdls_global_deinit(tdls_soc); + + return status; +} + +QDF_STATUS ucfg_tdls_add_peer(struct wlan_objmgr_vdev *vdev, + struct tdls_add_peer_params *add_peer_req) +{ + struct scheduler_msg msg = {0, }; + struct tdls_add_peer_request *req; + QDF_STATUS status; + + if (!vdev || !add_peer_req) { + tdls_err("vdev: %pK, req %pK", vdev, add_peer_req); + return QDF_STATUS_E_NULL_VALUE; + } + tdls_debug("vdevid: %d, peertype: %d", + add_peer_req->vdev_id, add_peer_req->peer_type); + + status = wlan_objmgr_vdev_try_get_ref(vdev, WLAN_TDLS_NB_ID); + if (QDF_IS_STATUS_ERROR(status)) { + tdls_err("can't get vdev"); + return status; + } + + req = qdf_mem_malloc(sizeof(*req)); + if (!req) { + tdls_err("mem allocate fail"); + status = QDF_STATUS_E_NOMEM; + goto dec_ref; + } + + qdf_mem_copy(&req->add_peer_req, add_peer_req, sizeof(*add_peer_req)); + req->vdev = vdev; + + msg.bodyptr = req; + msg.callback = tdls_process_cmd; + msg.type = TDLS_CMD_ADD_STA; + status = scheduler_post_message(QDF_MODULE_ID_HDD, + QDF_MODULE_ID_TDLS, + QDF_MODULE_ID_OS_IF, &msg); + if (QDF_IS_STATUS_ERROR(status)) { + tdls_err("post add peer msg fail"); + qdf_mem_free(req); + goto dec_ref; + } + + return status; +dec_ref: + wlan_objmgr_vdev_release_ref(vdev, WLAN_TDLS_NB_ID); + return status; +} + +QDF_STATUS ucfg_tdls_update_peer(struct wlan_objmgr_vdev *vdev, + struct tdls_update_peer_params *update_peer) +{ + struct scheduler_msg msg = {0,}; + struct tdls_update_peer_request *req; + QDF_STATUS status; + + if (!vdev || !update_peer) { + tdls_err("vdev: %pK, update_peer: %pK", vdev, update_peer); + return QDF_STATUS_E_NULL_VALUE; + } + + tdls_debug("vdev_id: %d, peertype: %d", + update_peer->vdev_id, update_peer->peer_type); + status = wlan_objmgr_vdev_try_get_ref(vdev, WLAN_TDLS_NB_ID); + if (QDF_IS_STATUS_ERROR(status)) { + tdls_err("can't get vdev"); + return status; + } + req = qdf_mem_malloc(sizeof(*req)); + if (!req) { + tdls_err("mem allocate fail"); + status = QDF_STATUS_E_NOMEM; + goto dec_ref; + } + qdf_mem_copy(&req->update_peer_req, update_peer, sizeof(*update_peer)); + req->vdev = vdev; + + msg.bodyptr = req; + msg.callback = tdls_process_cmd; + msg.type = TDLS_CMD_CHANGE_STA; + status = scheduler_post_message(QDF_MODULE_ID_HDD, + QDF_MODULE_ID_TDLS, + QDF_MODULE_ID_OS_IF, &msg); + if (QDF_IS_STATUS_ERROR(status)) { + tdls_err("post update peer msg fail"); + qdf_mem_free(req); + goto dec_ref; + } + + return status; +dec_ref: + wlan_objmgr_vdev_release_ref(vdev, WLAN_TDLS_NB_ID); + return status; +} + +#ifdef WLAN_DEBUG +static char *tdls_get_oper_str(enum tdls_command_type cmd_type) +{ + switch (cmd_type) { + case TDLS_CMD_ENABLE_LINK: + return "Enable_TDLS_LINK"; + case TDLS_CMD_DISABLE_LINK: + return "DISABLE_TDLS_LINK"; + case TDLS_CMD_REMOVE_FORCE_PEER: + return "REMOVE_FORCE_PEER"; + case TDLS_CMD_CONFIG_FORCE_PEER: + return "CONFIG_FORCE_PEER"; + default: + return "ERR:UNKNOWN OPER"; + } +} +#endif + +QDF_STATUS ucfg_tdls_oper(struct wlan_objmgr_vdev *vdev, + const uint8_t *macaddr, enum tdls_command_type cmd) +{ + struct scheduler_msg msg = {0,}; + struct tdls_oper_request *req; + QDF_STATUS status; + + if (!vdev || !macaddr) { + tdls_err("vdev: %pK, mac %pK", vdev, macaddr); + return QDF_STATUS_E_NULL_VALUE; + } + + tdls_debug("%s for peer " QDF_MAC_ADDR_STR, + tdls_get_oper_str(cmd), + QDF_MAC_ADDR_ARRAY(macaddr)); + + req = qdf_mem_malloc(sizeof(*req)); + if (!req) { + tdls_err("%s: mem allocate fail", tdls_get_oper_str(cmd)); + return QDF_STATUS_E_NOMEM; + } + + status = wlan_objmgr_vdev_try_get_ref(vdev, WLAN_TDLS_NB_ID); + if (QDF_IS_STATUS_ERROR(status)) { + tdls_err("can't get vdev"); + goto error; + } + + qdf_mem_copy(req->peer_addr, macaddr, QDF_MAC_ADDR_SIZE); + req->vdev = vdev; + + msg.bodyptr = req; + msg.callback = tdls_process_cmd; + msg.type = cmd; + status = scheduler_post_message(QDF_MODULE_ID_HDD, + QDF_MODULE_ID_TDLS, + QDF_MODULE_ID_OS_IF, &msg); + if (QDF_IS_STATUS_ERROR(status)) { + tdls_err("post msg for %s fail", tdls_get_oper_str(cmd)); + goto dec_ref; + } + + return status; +dec_ref: + wlan_objmgr_vdev_release_ref(vdev, WLAN_TDLS_NB_ID); +error: + qdf_mem_free(req); + return status; +} + +QDF_STATUS ucfg_tdls_get_all_peers(struct wlan_objmgr_vdev *vdev, + char *buf, int buflen) +{ + struct scheduler_msg msg = {0, }; + struct tdls_get_all_peers *tdls_peers; + QDF_STATUS status; + + tdls_peers = qdf_mem_malloc(sizeof(*tdls_peers)); + + if (!tdls_peers) { + tdls_err("mem allocate fail"); + return QDF_STATUS_E_NOMEM; + } + + tdls_peers->vdev = vdev; + tdls_peers->buf_len = buflen; + tdls_peers->buf = buf; + + msg.bodyptr = tdls_peers; + msg.callback = tdls_process_cmd; + msg.type = TDLS_CMD_GET_ALL_PEERS; + status = scheduler_post_message(QDF_MODULE_ID_HDD, + QDF_MODULE_ID_TDLS, + QDF_MODULE_ID_OS_IF, &msg); + + if (status != QDF_STATUS_SUCCESS) + qdf_mem_free(tdls_peers); + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS tdls_send_mgmt_frame_flush_callback(struct scheduler_msg *msg) +{ + struct tdls_action_frame_request *req; + + if (!msg || !msg->bodyptr) { + tdls_err("msg or msg->bodyptr is NULL"); + return QDF_STATUS_E_NULL_VALUE; + } + req = msg->bodyptr; + if (req->vdev) + wlan_objmgr_vdev_release_ref(req->vdev, WLAN_TDLS_NB_ID); + + qdf_mem_free(req); + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS ucfg_tdls_post_msg_flush_cb(struct scheduler_msg *msg) +{ + void *ptr = msg->bodyptr; + struct wlan_objmgr_vdev *vdev = NULL; + + switch (msg->type) { + case TDLS_CMD_TEARDOWN_LINKS: + case TDLS_NOTIFY_RESET_ADAPTERS: + ptr = NULL; + break; + case TDLS_NOTIFY_STA_CONNECTION: + vdev = ((struct tdls_sta_notify_params *)ptr)->vdev; + break; + case TDLS_NOTIFY_STA_DISCONNECTION: + vdev = ((struct tdls_sta_notify_params *)ptr)->vdev; + break; + case TDLS_CMD_SET_TDLS_MODE: + vdev = ((struct tdls_set_mode_params *)ptr)->vdev; + break; + case TDLS_CMD_TX_ACTION: + case TDLS_CMD_SET_RESPONDER: + break; + } + + if (vdev) + wlan_objmgr_vdev_release_ref(vdev, WLAN_TDLS_NB_ID); + + if (ptr) + qdf_mem_free(ptr); + + msg->bodyptr = NULL; + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS ucfg_tdls_send_mgmt_frame( + struct tdls_action_frame_request *req) +{ + struct scheduler_msg msg = {0, }; + struct tdls_action_frame_request *mgmt_req; + QDF_STATUS status; + + if (!req || !req->vdev) { + tdls_err("Invalid mgmt req params %pK", req); + return QDF_STATUS_E_NULL_VALUE; + } + + mgmt_req = qdf_mem_malloc(sizeof(*mgmt_req) + + req->len); + if (!mgmt_req) { + tdls_err("mem allocate fail"); + return QDF_STATUS_E_NOMEM; + } + + qdf_mem_copy(mgmt_req, req, sizeof(*req)); + + /*populate the additional IE's */ + if ((0 != req->len) && (NULL != req->cmd_buf)) { + qdf_mem_copy(mgmt_req->tdls_mgmt.buf, req->cmd_buf, + req->len); + mgmt_req->tdls_mgmt.len = req->len; + } else { + mgmt_req->tdls_mgmt.len = 0; + } + + tdls_debug("vdev id: %d, session id : %d", mgmt_req->vdev_id, + mgmt_req->session_id); + status = wlan_objmgr_vdev_try_get_ref(req->vdev, WLAN_TDLS_NB_ID); + + if (QDF_IS_STATUS_ERROR(status)) { + tdls_err("Unable to get vdev reference for tdls module"); + goto mem_free; + } + + msg.bodyptr = mgmt_req; + msg.callback = tdls_process_cmd; + msg.flush_callback = tdls_send_mgmt_frame_flush_callback; + msg.type = TDLS_CMD_TX_ACTION; + + status = scheduler_post_msg(QDF_MODULE_ID_OS_IF, &msg); + if (QDF_IS_STATUS_ERROR(status)) { + tdls_err("Failed to post the mgmt tx cmd to scheduler thread"); + goto release_ref; + } + + return status; + +release_ref: + wlan_objmgr_vdev_release_ref(req->vdev, WLAN_TDLS_NB_ID); +mem_free: + qdf_mem_free(mgmt_req); + return status; +} + +QDF_STATUS ucfg_tdls_responder(struct tdls_set_responder_req *req) +{ + struct scheduler_msg msg = {0, }; + struct tdls_set_responder_req *msg_req; + QDF_STATUS status; + + if (!req || !req->vdev) { + tdls_err("invalid input %pK", req); + return QDF_STATUS_E_NULL_VALUE; + } + + msg_req = qdf_mem_malloc(sizeof(*msg_req)); + if (!msg_req) + return QDF_STATUS_E_NULL_VALUE; + + msg_req->responder = req->responder; + msg_req->vdev = req->vdev; + qdf_mem_copy(msg_req->peer_mac, req->peer_mac, QDF_MAC_ADDR_SIZE); + + msg.bodyptr = msg_req; + msg.callback = tdls_process_cmd; + msg.flush_callback = ucfg_tdls_post_msg_flush_cb; + msg.type = TDLS_CMD_SET_RESPONDER; + status = scheduler_post_message(QDF_MODULE_ID_HDD, + QDF_MODULE_ID_TDLS, + QDF_MODULE_ID_OS_IF, &msg); + if (QDF_IS_STATUS_ERROR(status)) { + tdls_err("failed to post msg, status %d", status); + qdf_mem_free(msg_req); + } + + return status; +} + +QDF_STATUS ucfg_tdls_teardown_links(struct wlan_objmgr_psoc *psoc) +{ + QDF_STATUS status; + struct scheduler_msg msg = {0, }; + + tdls_debug("Enter "); + + msg.bodyptr = psoc; + msg.callback = tdls_process_cmd; + msg.flush_callback = ucfg_tdls_post_msg_flush_cb; + msg.type = TDLS_CMD_TEARDOWN_LINKS; + status = scheduler_post_message(QDF_MODULE_ID_HDD, + QDF_MODULE_ID_TDLS, + QDF_MODULE_ID_OS_IF, &msg); + + tdls_debug("Exit "); + return status; +} + +QDF_STATUS ucfg_tdls_notify_reset_adapter(struct wlan_objmgr_vdev *vdev) +{ + QDF_STATUS status; + struct scheduler_msg msg = {0, }; + + if (!vdev) { + tdls_err("vdev is NULL "); + return QDF_STATUS_E_NULL_VALUE; + } + tdls_debug("Enter "); + msg.bodyptr = vdev; + msg.callback = tdls_process_cmd; + msg.flush_callback = ucfg_tdls_post_msg_flush_cb; + msg.type = TDLS_NOTIFY_RESET_ADAPTERS; + status = scheduler_post_message(QDF_MODULE_ID_HDD, + QDF_MODULE_ID_TDLS, + QDF_MODULE_ID_OS_IF, &msg); + return status; +} + +QDF_STATUS ucfg_tdls_notify_sta_connect( + struct tdls_sta_notify_params *notify_info) +{ + struct scheduler_msg msg = {0, }; + struct tdls_sta_notify_params *notify; + QDF_STATUS status; + + if (!notify_info || !notify_info->vdev) { + tdls_err("notify_info %pK", notify_info); + return QDF_STATUS_E_NULL_VALUE; + } + tdls_debug("Enter "); + + notify = qdf_mem_malloc(sizeof(*notify)); + if (!notify) { + wlan_objmgr_vdev_release_ref(notify_info->vdev, + WLAN_TDLS_NB_ID); + return QDF_STATUS_E_NULL_VALUE; + } + + *notify = *notify_info; + + msg.bodyptr = notify; + msg.callback = tdls_process_cmd; + msg.type = TDLS_NOTIFY_STA_CONNECTION; + msg.flush_callback = ucfg_tdls_post_msg_flush_cb; + status = scheduler_post_message(QDF_MODULE_ID_HDD, + QDF_MODULE_ID_TDLS, + QDF_MODULE_ID_TARGET_IF, &msg); + if (QDF_IS_STATUS_ERROR(status)) { + tdls_err("failed to post message, status %d", status); + wlan_objmgr_vdev_release_ref(notify->vdev, WLAN_TDLS_NB_ID); + qdf_mem_free(notify); + } + + tdls_debug("Exit "); + return status; +} + +QDF_STATUS ucfg_tdls_notify_sta_disconnect( + struct tdls_sta_notify_params *notify_info) +{ + struct scheduler_msg msg = {0, }; + struct tdls_sta_notify_params *notify; + QDF_STATUS status; + + if (!notify_info || !notify_info->vdev) { + tdls_err("notify_info %pK", notify_info); + return QDF_STATUS_E_NULL_VALUE; + } + + tdls_debug("Enter "); + + notify = qdf_mem_malloc(sizeof(*notify)); + if (!notify) + return QDF_STATUS_E_NULL_VALUE; + + *notify = *notify_info; + + msg.bodyptr = notify; + msg.callback = tdls_process_cmd; + msg.type = TDLS_NOTIFY_STA_DISCONNECTION; + msg.flush_callback = ucfg_tdls_post_msg_flush_cb; + status = scheduler_post_message(QDF_MODULE_ID_HDD, + QDF_MODULE_ID_TDLS, + QDF_MODULE_ID_TARGET_IF, &msg); + if (QDF_IS_STATUS_ERROR(status)) { + tdls_err("failed to post message, status %d", status); + wlan_objmgr_vdev_release_ref(notify->vdev, WLAN_TDLS_NB_ID); + qdf_mem_free(notify); + } + + tdls_debug("Exit "); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS ucfg_tdls_set_operating_mode( + struct tdls_set_mode_params *set_mode_params) +{ + struct scheduler_msg msg = {0, }; + struct tdls_set_mode_params *set_mode; + QDF_STATUS status; + + if (!set_mode_params || !set_mode_params->vdev) { + tdls_err("set_mode_params %pK", set_mode_params); + return QDF_STATUS_E_NULL_VALUE; + } + + tdls_debug("Enter "); + + set_mode = qdf_mem_malloc(sizeof(*set_mode)); + if (!set_mode) { + tdls_err("memory allocate fail"); + return QDF_STATUS_E_NULL_VALUE; + } + + status = wlan_objmgr_vdev_try_get_ref(set_mode->vdev, WLAN_TDLS_NB_ID); + if (QDF_IS_STATUS_ERROR(status)) { + tdls_err("failed to get vdev ref"); + qdf_mem_free(set_mode); + return status; + } + + set_mode->source = set_mode_params->source; + set_mode->tdls_mode = set_mode_params->tdls_mode; + set_mode->update_last = set_mode_params->update_last; + set_mode->vdev = set_mode_params->vdev; + + msg.bodyptr = set_mode; + msg.callback = tdls_process_cmd; + msg.type = TDLS_CMD_SET_TDLS_MODE; + msg.flush_callback = ucfg_tdls_post_msg_flush_cb; + status = scheduler_post_message(QDF_MODULE_ID_HDD, + QDF_MODULE_ID_TDLS, + QDF_MODULE_ID_OS_IF, &msg); + if (QDF_IS_STATUS_ERROR(status)) { + wlan_objmgr_vdev_release_ref(set_mode->vdev, WLAN_TDLS_NB_ID); + qdf_mem_free(set_mode); + } + + tdls_debug("Exit "); + + return QDF_STATUS_SUCCESS; +} + +void ucfg_tdls_update_rx_pkt_cnt(struct wlan_objmgr_vdev *vdev, + struct qdf_mac_addr *mac_addr, + struct qdf_mac_addr *dest_mac_addr) +{ + tdls_update_rx_pkt_cnt(vdev, mac_addr, dest_mac_addr); +} + +void ucfg_tdls_update_tx_pkt_cnt(struct wlan_objmgr_vdev *vdev, + struct qdf_mac_addr *mac_addr) +{ + tdls_update_tx_pkt_cnt(vdev, mac_addr); + +} + +QDF_STATUS ucfg_tdls_antenna_switch(struct wlan_objmgr_vdev *vdev, + uint32_t mode) +{ + QDF_STATUS status; + struct tdls_antenna_switch_request *req; + struct scheduler_msg msg = {0, }; + + req = qdf_mem_malloc(sizeof(*req)); + if (!req) { + tdls_err("mem allocate fail"); + return QDF_STATUS_E_NOMEM; + } + + status = wlan_objmgr_vdev_try_get_ref(vdev, WLAN_TDLS_NB_ID); + if (QDF_IS_STATUS_ERROR(status)) { + tdls_err("can't get vdev"); + goto error; + } + + req->vdev = vdev; + req->mode = mode; + + msg.bodyptr = req; + msg.callback = tdls_process_cmd; + msg.flush_callback = tdls_antenna_switch_flush_callback; + msg.type = TDLS_CMD_ANTENNA_SWITCH; + status = scheduler_post_message(QDF_MODULE_ID_HDD, + QDF_MODULE_ID_TDLS, + QDF_MODULE_ID_OS_IF, &msg); + if (QDF_IS_STATUS_ERROR(status)) { + tdls_err("post antenna switch msg fail"); + goto dec_ref; + } + + return status; + +dec_ref: + wlan_objmgr_vdev_release_ref(vdev, WLAN_TDLS_NB_ID); +error: + qdf_mem_free(req); + return status; +} + +QDF_STATUS ucfg_set_tdls_offchannel(struct wlan_objmgr_vdev *vdev, + int offchannel) +{ + QDF_STATUS status = QDF_STATUS_SUCCESS; + struct scheduler_msg msg = {0, }; + struct tdls_set_offchannel *req; + + req = qdf_mem_malloc(sizeof(*req)); + if (!req) { + tdls_err("mem allocate fail"); + return QDF_STATUS_E_NOMEM; + } + + status = wlan_objmgr_vdev_try_get_ref(vdev, WLAN_TDLS_NB_ID); + if (QDF_IS_STATUS_ERROR(status)) { + tdls_err("can't get vdev"); + goto free; + } + + req->offchannel = offchannel; + req->vdev = vdev; + req->callback = wlan_tdls_offchan_parms_callback; + msg.bodyptr = req; + msg.callback = tdls_process_cmd; + msg.type = TDLS_CMD_SET_OFFCHANNEL; + status = scheduler_post_msg(QDF_MODULE_ID_OS_IF, &msg); + if (QDF_IS_STATUS_ERROR(status)) { + tdls_err("post set tdls offchannel msg fail"); + goto dec_ref; + } + + return status; + +dec_ref: + wlan_objmgr_vdev_release_ref(vdev, WLAN_TDLS_NB_ID); + +free: + qdf_mem_free(req); + return status; +} + +QDF_STATUS ucfg_set_tdls_offchan_mode(struct wlan_objmgr_vdev *vdev, + int offchanmode) +{ + QDF_STATUS status = QDF_STATUS_SUCCESS; + struct scheduler_msg msg = {0, }; + struct tdls_set_offchanmode *req; + + req = qdf_mem_malloc(sizeof(*req)); + if (!req) { + tdls_err("mem allocate fail"); + return QDF_STATUS_E_NOMEM; + } + + status = wlan_objmgr_vdev_try_get_ref(vdev, WLAN_TDLS_NB_ID); + if (QDF_IS_STATUS_ERROR(status)) { + tdls_err("can't get vdev"); + goto free; + } + + req->offchan_mode = offchanmode; + req->vdev = vdev; + req->callback = wlan_tdls_offchan_parms_callback; + msg.bodyptr = req; + msg.callback = tdls_process_cmd; + msg.type = TDLS_CMD_SET_OFFCHANMODE; + status = scheduler_post_msg(QDF_MODULE_ID_OS_IF, &msg); + if (QDF_IS_STATUS_ERROR(status)) { + tdls_err("post set offchanmode msg fail"); + goto dec_ref; + } + + return status; + +dec_ref: + wlan_objmgr_vdev_release_ref(vdev, WLAN_TDLS_NB_ID); + +free: + qdf_mem_free(req); + return status; +} + +QDF_STATUS ucfg_set_tdls_secoffchanneloffset(struct wlan_objmgr_vdev *vdev, + int offchanoffset) +{ + int status = QDF_STATUS_SUCCESS; + struct scheduler_msg msg = {0, }; + struct tdls_set_secoffchanneloffset *req; + + req = qdf_mem_malloc(sizeof(*req)); + if (!req) { + tdls_err("mem allocate fail"); + return QDF_STATUS_E_NOMEM; + } + + status = wlan_objmgr_vdev_try_get_ref(vdev, WLAN_TDLS_NB_ID); + if (QDF_IS_STATUS_ERROR(status)) { + tdls_err("can't get vdev"); + goto free; + } + + req->offchan_offset = offchanoffset; + req->vdev = vdev; + req->callback = wlan_tdls_offchan_parms_callback; + msg.bodyptr = req; + msg.callback = tdls_process_cmd; + msg.type = TDLS_CMD_SET_SECOFFCHANOFFSET; + status = scheduler_post_msg(QDF_MODULE_ID_OS_IF, &msg); + if (QDF_IS_STATUS_ERROR(status)) { + tdls_err("post set secoffchan offset msg fail"); + goto dec_ref; + } + return status; + +dec_ref: + wlan_objmgr_vdev_release_ref(vdev, WLAN_TDLS_NB_ID); + +free: + qdf_mem_free(req); + return status; +} + +void ucfg_tdls_notify_connect_failure(struct wlan_objmgr_psoc *psoc) +{ + return tdls_notify_decrement_session(psoc); +} + +struct wlan_objmgr_vdev *ucfg_get_tdls_vdev(struct wlan_objmgr_psoc *psoc, + wlan_objmgr_ref_dbgid dbg_id) +{ + return tdls_get_vdev(psoc, dbg_id); +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/tdls/dispatcher/src/wlan_tdls_utils_api.c b/drivers/staging/qca-wifi-host-cmn/umac/tdls/dispatcher/src/wlan_tdls_utils_api.c new file mode 100644 index 0000000000000000000000000000000000000000..2e9b2ad43ecd12b8d4da10e0a4765920dc5fdd50 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/tdls/dispatcher/src/wlan_tdls_utils_api.c @@ -0,0 +1,23 @@ +/* + * Copyright (c) 2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wlan_tdls_utils_api.c + * + * TDLS utility functions definitions + */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/wifi_pos/inc/wifi_pos_api.h b/drivers/staging/qca-wifi-host-cmn/umac/wifi_pos/inc/wifi_pos_api.h new file mode 100644 index 0000000000000000000000000000000000000000..022f0c8d32681887d2c2550f81027a3ce120f1d7 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/wifi_pos/inc/wifi_pos_api.h @@ -0,0 +1,406 @@ +/* + * Copyright (c) 2012-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wifi_pos_api.h + * This file declares public APIs of wifi positioning component + */ +#ifndef _WIFI_POS_API_H_ +#define _WIFI_POS_API_H_ + +/* Include files */ +#include "qdf_types.h" +#include "qdf_status.h" +#include "qdf_trace.h" + +/* forward reference */ +struct wlan_objmgr_psoc; +struct wifi_pos_driver_caps; + +/** + * struct wifi_pos_field - wifi positioning field element + * @id: RTT field id + * @offset: data offset in field info buffer + * @length: length of related data in field info buffer + */ +struct wifi_pos_field { + uint32_t id; + uint32_t offset; + uint32_t length; +}; + +/** + * struct wifi_pos_field_info - wifi positioning field info buffer + * @count: number of @wifi_pos_field elements + * @fields: buffer to hold @wifi_pos_field elements + */ +struct wifi_pos_field_info { + uint32_t count; + struct wifi_pos_field fields[1]; +}; + +#ifdef WIFI_POS_CONVERGED +/** + * enum oem_err_msg - err msg returned to user space + * @OEM_ERR_NULL_CONTEXT: NULL context + * @OEM_ERR_APP_NOT_REGISTERED: OEM App is not registered + * @OEM_ERR_INVALID_SIGNATURE: Invalid signature + * @OEM_ERR_NULL_MESSAGE_HEADER: Invalid message header + * @OEM_ERR_INVALID_MESSAGE_TYPE: Invalid message type + * @OEM_ERR_INVALID_MESSAGE_LENGTH: Invalid length in message body + */ +enum oem_err_msg { + OEM_ERR_NULL_CONTEXT = 1, + OEM_ERR_APP_NOT_REGISTERED, + OEM_ERR_INVALID_SIGNATURE, + OEM_ERR_NULL_MESSAGE_HEADER, + OEM_ERR_INVALID_MESSAGE_TYPE, + OEM_ERR_INVALID_MESSAGE_LENGTH +}; + +/* this struct is needed since MLME is not converged yet */ +struct wifi_pos_ch_info { + uint8_t chan_id; + uint32_t mhz; + uint32_t band_center_freq1; + uint32_t band_center_freq2; + uint32_t info; + uint32_t reg_info_1; + uint32_t reg_info_2; + uint8_t nss; + uint32_t rate_flags; + uint8_t sec_ch_offset; + uint32_t ch_width; +}; + +/** + * typedef wifi_pos_ch_info_rsp - Channel information + * @chan_id: channel id + * @reserved0: reserved for padding and future use + * @mhz: primary 20 MHz channel frequency in mhz + * @band_center_freq1: Center frequency 1 in MHz + * @band_center_freq2: Center frequency 2 in MHz, valid only for 11ac + * VHT 80+80 mode + * @info: channel info + * @reg_info_1: regulatory information field 1 which contains min power, + * max power, reg power and reg class id + * @reg_info_2: regulatory information field 2 which contains antennamax + */ +struct qdf_packed wifi_pos_ch_info_rsp { + uint32_t chan_id; + uint32_t reserved0; + uint32_t mhz; + uint32_t band_center_freq1; + uint32_t band_center_freq2; + uint32_t info; + uint32_t reg_info_1; + uint32_t reg_info_2; +}; + +/** + * struct wmi_pos_peer_status_info - Status information for a given peer + * @peer_mac_addr: peer mac address + * @peer_status: peer status: 1: CONNECTED, 2: DISCONNECTED + * @vdev_id: vdev_id for the peer mac + * @peer_capability: peer capability: 0: RTT/RTT2, 1: RTT3. Default is 0 + * @reserved0: reserved0 + * @peer_chan_info: channel info on which peer is connected + */ +struct qdf_packed wmi_pos_peer_status_info { + uint8_t peer_mac_addr[ETH_ALEN]; + uint8_t peer_status; + uint8_t vdev_id; + uint32_t peer_capability; + uint32_t reserved0; + struct wifi_pos_ch_info_rsp peer_chan_info; +}; + +/** + * struct wifi_pos_req_msg - wifi pos request struct + * @msg_type: message type + * @pid: process id + * @buf: request buffer + * @buf_len: request buffer length + * @field_info_buf: buffer containing field info + * @field_info_buf_len: length of field info buffer + * + */ +struct wifi_pos_req_msg { + uint32_t msg_type; + uint32_t pid; + uint8_t *buf; + uint32_t buf_len; + struct wifi_pos_field_info *field_info_buf; + uint32_t field_info_buf_len; +}; + +/** + * ucfg_wifi_pos_process_req: ucfg API to be called from HDD/OS_IF to process a + * wifi_pos request from userspace + * @psoc: pointer to psoc object + * @req: wifi_pos request msg + * @send_rsp_cb: callback pointer required to send msg to userspace + * + * Return: status of operation + */ +QDF_STATUS ucfg_wifi_pos_process_req(struct wlan_objmgr_psoc *psoc, + struct wifi_pos_req_msg *req, + void (*send_rsp_cb)(uint32_t, uint32_t, uint32_t, uint8_t *)); + +/** + * wifi_pos_init: initializes WIFI POS component, called by dispatcher init + * + * Return: status of operation + */ +QDF_STATUS wifi_pos_init(void); + +/** + * wifi_pos_deinit: de-initializes WIFI POS component, called by dispatcher init + * + * Return: status of operation + */ +QDF_STATUS wifi_pos_deinit(void); + +/** + * wifi_pos_psoc_enable: psoc enable API for wifi positioning component + * @psoc: pointer to PSOC + * + * Return: status of operation + */ +QDF_STATUS wifi_pos_psoc_enable(struct wlan_objmgr_psoc *psoc); + +/** + * wifi_pos_psoc_disable: psoc disable API for wifi positioning component + * @psoc: pointer to PSOC + * + * Return: status of operation + */ +QDF_STATUS wifi_pos_psoc_disable(struct wlan_objmgr_psoc *psoc); + +/** + * wifi_pos_set_oem_target_type: public API to set param in wifi_pos private + * object + * @psoc: pointer to PSOC + * @val: value to set + * + * Return: None + */ +void wifi_pos_set_oem_target_type(struct wlan_objmgr_psoc *psoc, uint32_t val); + +/** + * wifi_pos_set_oem_fw_version: public API to set param in wifi_pos private + * object + * @psoc: pointer to PSOC + * @val: value to set + * + * Return: None + */ +void wifi_pos_set_oem_fw_version(struct wlan_objmgr_psoc *psoc, uint32_t val); + +/** + * wifi_pos_set_drv_ver_major: public API to set param in wifi_pos private + * object + * @psoc: pointer to PSOC + * @val: value to set + * + * Return: None + */ +void wifi_pos_set_drv_ver_major(struct wlan_objmgr_psoc *psoc, uint8_t val); + +/** + * wifi_pos_set_drv_ver_minor: public API to set param in wifi_pos private + * object + * @psoc: pointer to PSOC + * @val: value to set + * + * Return: None + */ +void wifi_pos_set_drv_ver_minor(struct wlan_objmgr_psoc *psoc, uint8_t val); + +/** + * wifi_pos_set_drv_ver_patch: public API to set param in wifi_pos private + * object + * @psoc: pointer to PSOC + * @val: value to set + * + * Return: None + */ +void wifi_pos_set_drv_ver_patch(struct wlan_objmgr_psoc *psoc, uint8_t val); + +/** + * wifi_pos_set_drv_ver_build: public API to set param in wifi_pos private + * object + * @psoc: pointer to PSOC + * @val: value to set + * + * Return: None + */ +void wifi_pos_set_drv_ver_build(struct wlan_objmgr_psoc *psoc, uint8_t val); + +/** + * wifi_pos_set_dwell_time_min: public API to set param in wifi_pos private + * object + * @psoc: pointer to PSOC + * @val: value to set + * + * Return: None + */ +void wifi_pos_set_dwell_time_min(struct wlan_objmgr_psoc *psoc, uint16_t val); + +/** + * wifi_pos_set_dwell_time_max: public API to set param in wifi_pos private + * object + * @psoc: pointer to PSOC + * @val: value to set + * + * Return: None + */ +void wifi_pos_set_dwell_time_max(struct wlan_objmgr_psoc *psoc, uint16_t val); + +/** + * wifi_pos_set_current_dwell_time_min: public API to set param in wifi_pos + * private object + * @psoc: pointer to PSOC + * @val: value to set + * + * Return: None + */ +void wifi_pos_set_current_dwell_time_min(struct wlan_objmgr_psoc *psoc, + uint16_t val); + +/** + * wifi_pos_set_current_dwell_time_max: public API to set param in wifi_pos + * private object + * @psoc: pointer to PSOC + * @val: value to set + * + * Return: None + */ +void wifi_pos_set_current_dwell_time_max(struct wlan_objmgr_psoc *psoc, + uint16_t val); + +/** + * wifi_pos_populate_caps() - populate oem capabilities + * @psoc: psoc object + * @caps: pointer to populate the capabilities + * + * Return: error code + */ +QDF_STATUS wifi_pos_populate_caps(struct wlan_objmgr_psoc *psoc, + struct wifi_pos_driver_caps *caps); + +/** + * ucfg_wifi_pos_get_ftm_cap: API to get fine timing measurement caps + * @psoc: psoc object + * + * Return: FTM value + */ +uint32_t ucfg_wifi_pos_get_ftm_cap(struct wlan_objmgr_psoc *psoc); + +/** + * ucfg_wifi_pos_set_ftm_cap: API to set fine timing measurement caps + * @psoc: psoc object + * @val: value to set + * + * Return: None + */ +void ucfg_wifi_pos_set_ftm_cap(struct wlan_objmgr_psoc *psoc, uint32_t val); + +/** + * wifi_pos_get_app_pid: returns oem app pid. + * @psoc: pointer to psoc object + * + * Return: oem app pid + */ +uint32_t wifi_pos_get_app_pid(struct wlan_objmgr_psoc *psoc); + +/** + * wifi_pos_is_app_registered: indicates if oem app is registered. + * @psoc: pointer to psoc object + * + * Return: true if app is registered, false otherwise + */ +bool wifi_pos_is_app_registered(struct wlan_objmgr_psoc *psoc); + +/** + * wifi_pos_get_psoc: API to get global PSOC object + * + * Since request from userspace is not associated with any vdev/pdev/psoc, this + * API is used to get global psoc object. + * Return: global psoc object. + */ +struct wlan_objmgr_psoc *wifi_pos_get_psoc(void); + +#else +static inline QDF_STATUS wifi_pos_init(void) +{ + return QDF_STATUS_SUCCESS; +} + +static inline QDF_STATUS wifi_pos_deinit(void) +{ + return QDF_STATUS_SUCCESS; +} + +static inline QDF_STATUS wifi_pos_psoc_enable(struct wlan_objmgr_psoc *psoc) +{ + return QDF_STATUS_SUCCESS; +} + +static inline QDF_STATUS wifi_pos_psoc_disable(struct wlan_objmgr_psoc *psoc) +{ + return QDF_STATUS_SUCCESS; +} +#endif + +#if defined(WLAN_FEATURE_CIF_CFR) && defined(WIFI_POS_CONVERGED) +/** + * wifi_pos_init_cir_cfr_rings: API to set DMA ring cap in wifi pos psoc private + * object + * @psoc: pointer to psoc object + * @hal_soc: hal soc pointer + * @num_mac: number of macs + * @buf: buffer containing dma ring cap + * + * Return: status of operation. + */ +QDF_STATUS wifi_pos_init_cir_cfr_rings(struct wlan_objmgr_psoc *psoc, + void *hal_soc, uint8_t num_mac, void *buf); +#else +static inline QDF_STATUS wifi_pos_init_cir_cfr_rings( + struct wlan_objmgr_psoc *psoc, + void *hal_soc, uint8_t num_mac, void *buf) +{ + return QDF_STATUS_SUCCESS; +} +#endif + +/** + * wifi_pos_register_get_phy_mode_cb: API to register callback to get + * current PHY mode + * @psoc: pointer to psoc object + * @handler: callback to be registered + * + * Return: QDF_STATUS_SUCCESS in case of success, error codes in + * case of failure + */ +QDF_STATUS wifi_pos_register_get_phy_mode_cb( + struct wlan_objmgr_psoc *psoc, + void (*handler)(uint8_t, uint32_t, uint32_t *)); + +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/wifi_pos/src/wifi_pos_api.c b/drivers/staging/qca-wifi-host-cmn/umac/wifi_pos/src/wifi_pos_api.c new file mode 100644 index 0000000000000000000000000000000000000000..b465b2a1fccd6a5fe3c16ed1379ba0d7058f1aef --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/wifi_pos/src/wifi_pos_api.c @@ -0,0 +1,334 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ +/** + * DOC: wifi_pos_api.c + * This file defines the APIs wifi_pos component. + */ + +#include "wifi_pos_api.h" +#include "wifi_pos_utils_i.h" +#include "wifi_pos_main_i.h" +#include "os_if_wifi_pos.h" +#include "target_if_wifi_pos.h" +#include "wlan_objmgr_cmn.h" +#include "wlan_objmgr_global_obj.h" +#include "wlan_objmgr_psoc_obj.h" + +QDF_STATUS wifi_pos_init(void) +{ + QDF_STATUS status; + + wifi_pos_lock_init(); + + /* register psoc create handler functions. */ + status = wlan_objmgr_register_psoc_create_handler( + WLAN_UMAC_COMP_WIFI_POS, + wifi_pos_psoc_obj_created_notification, + NULL); + if (QDF_IS_STATUS_ERROR(status)) { + wifi_pos_err("register_psoc_create_handler failed, status: %d", + status); + return status; + } + + /* register psoc delete handler functions. */ + status = wlan_objmgr_register_psoc_destroy_handler( + WLAN_UMAC_COMP_WIFI_POS, + wifi_pos_psoc_obj_destroyed_notification, + NULL); + if (QDF_IS_STATUS_ERROR(status)) { + wifi_pos_err("register_psoc_destroy_handler failed, status: %d", + status); + } + + return status; +} + +QDF_STATUS wifi_pos_deinit(void) +{ + QDF_STATUS status; + + /* deregister psoc create handler functions. */ + status = wlan_objmgr_unregister_psoc_create_handler( + WLAN_UMAC_COMP_WIFI_POS, + wifi_pos_psoc_obj_created_notification, + NULL); + if (QDF_IS_STATUS_ERROR(status)) { + wifi_pos_err("unregister_psoc_create_handler failed, status: %d", + status); + return status; + } + + /* deregister psoc delete handler functions. */ + status = wlan_objmgr_unregister_psoc_destroy_handler( + WLAN_UMAC_COMP_WIFI_POS, + wifi_pos_psoc_obj_destroyed_notification, + NULL); + if (QDF_IS_STATUS_ERROR(status)) { + wifi_pos_err("unregister_psoc_destroy_handler failed, status: %d", + status); + } + + wifi_pos_lock_deinit(); + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wifi_pos_psoc_enable(struct wlan_objmgr_psoc *psoc) +{ + QDF_STATUS status = target_if_wifi_pos_register_events(psoc); + + if (QDF_IS_STATUS_ERROR(status)) + wifi_pos_err("target_if_wifi_pos_register_events failed"); + + return status; +} + +QDF_STATUS wifi_pos_psoc_disable(struct wlan_objmgr_psoc *psoc) +{ + QDF_STATUS status = target_if_wifi_pos_deregister_events(psoc); + + if (QDF_IS_STATUS_ERROR(status)) + wifi_pos_err("target_if_wifi_pos_deregister_events failed"); + + return QDF_STATUS_SUCCESS; +} + +void wifi_pos_set_oem_target_type(struct wlan_objmgr_psoc *psoc, uint32_t val) +{ + struct wifi_pos_psoc_priv_obj *wifi_pos_psoc = + wifi_pos_get_psoc_priv_obj(psoc); + + if (!wifi_pos_psoc) { + wifi_pos_err("wifi_pos priv obj is null"); + return; + } + + qdf_spin_lock_bh(&wifi_pos_psoc->wifi_pos_lock); + wifi_pos_psoc->oem_target_type = val; + qdf_spin_unlock_bh(&wifi_pos_psoc->wifi_pos_lock); +} + +void wifi_pos_set_oem_fw_version(struct wlan_objmgr_psoc *psoc, uint32_t val) +{ + struct wifi_pos_psoc_priv_obj *wifi_pos_psoc = + wifi_pos_get_psoc_priv_obj(psoc); + + if (!wifi_pos_psoc) { + wifi_pos_err("wifi_pos priv obj is null"); + return; + } + + qdf_spin_lock_bh(&wifi_pos_psoc->wifi_pos_lock); + wifi_pos_psoc->oem_fw_version = val; + qdf_spin_unlock_bh(&wifi_pos_psoc->wifi_pos_lock); +} + +void wifi_pos_set_drv_ver_major(struct wlan_objmgr_psoc *psoc, uint8_t val) +{ + struct wifi_pos_psoc_priv_obj *wifi_pos_psoc = + wifi_pos_get_psoc_priv_obj(psoc); + + if (!wifi_pos_psoc) { + wifi_pos_err("wifi_pos priv obj is null"); + return; + } + + qdf_spin_lock_bh(&wifi_pos_psoc->wifi_pos_lock); + wifi_pos_psoc->driver_version.major = val; + qdf_spin_unlock_bh(&wifi_pos_psoc->wifi_pos_lock); +} + +void wifi_pos_set_drv_ver_minor(struct wlan_objmgr_psoc *psoc, uint8_t val) +{ + struct wifi_pos_psoc_priv_obj *wifi_pos_psoc = + wifi_pos_get_psoc_priv_obj(psoc); + + if (!wifi_pos_psoc) { + wifi_pos_err("wifi_pos priv obj is null"); + return; + } + + qdf_spin_lock_bh(&wifi_pos_psoc->wifi_pos_lock); + wifi_pos_psoc->driver_version.minor = val; + qdf_spin_unlock_bh(&wifi_pos_psoc->wifi_pos_lock); +} + +void wifi_pos_set_drv_ver_patch(struct wlan_objmgr_psoc *psoc, uint8_t val) +{ + struct wifi_pos_psoc_priv_obj *wifi_pos_psoc = + wifi_pos_get_psoc_priv_obj(psoc); + + if (!wifi_pos_psoc) { + wifi_pos_err("wifi_pos priv obj is null"); + return; + } + + qdf_spin_lock_bh(&wifi_pos_psoc->wifi_pos_lock); + wifi_pos_psoc->driver_version.patch = val; + qdf_spin_unlock_bh(&wifi_pos_psoc->wifi_pos_lock); +} + +void wifi_pos_set_drv_ver_build(struct wlan_objmgr_psoc *psoc, uint8_t val) +{ + struct wifi_pos_psoc_priv_obj *wifi_pos_psoc = + wifi_pos_get_psoc_priv_obj(psoc); + + if (!wifi_pos_psoc) { + wifi_pos_err("wifi_pos priv obj is null"); + return; + } + + qdf_spin_lock_bh(&wifi_pos_psoc->wifi_pos_lock); + wifi_pos_psoc->driver_version.build = val; + qdf_spin_unlock_bh(&wifi_pos_psoc->wifi_pos_lock); +} + +void wifi_pos_set_dwell_time_min(struct wlan_objmgr_psoc *psoc, uint16_t val) +{ + struct wifi_pos_psoc_priv_obj *wifi_pos_psoc = + wifi_pos_get_psoc_priv_obj(psoc); + + if (!wifi_pos_psoc) { + wifi_pos_err("wifi_pos priv obj is null"); + return; + } + + qdf_spin_lock_bh(&wifi_pos_psoc->wifi_pos_lock); + wifi_pos_psoc->allowed_dwell_time_min = val; + qdf_spin_unlock_bh(&wifi_pos_psoc->wifi_pos_lock); +} +void wifi_pos_set_dwell_time_max(struct wlan_objmgr_psoc *psoc, uint16_t val) +{ + struct wifi_pos_psoc_priv_obj *wifi_pos_psoc = + wifi_pos_get_psoc_priv_obj(psoc); + + if (!wifi_pos_psoc) { + wifi_pos_err("wifi_pos priv obj is null"); + return; + } + + qdf_spin_lock_bh(&wifi_pos_psoc->wifi_pos_lock); + wifi_pos_psoc->allowed_dwell_time_max = val; + qdf_spin_unlock_bh(&wifi_pos_psoc->wifi_pos_lock); +} + +void wifi_pos_set_current_dwell_time_max(struct wlan_objmgr_psoc *psoc, + uint16_t val) +{ + struct wifi_pos_psoc_priv_obj *wifi_pos_psoc = + wifi_pos_get_psoc_priv_obj(psoc); + + if (!wifi_pos_psoc) { + wifi_pos_err("wifi_pos priv obj is null"); + return; + } + + qdf_spin_lock_bh(&wifi_pos_psoc->wifi_pos_lock); + wifi_pos_psoc->current_dwell_time_max = val; + qdf_spin_unlock_bh(&wifi_pos_psoc->wifi_pos_lock); +} + +void wifi_pos_set_current_dwell_time_min(struct wlan_objmgr_psoc *psoc, + uint16_t val) +{ + struct wifi_pos_psoc_priv_obj *wifi_pos_psoc = + wifi_pos_get_psoc_priv_obj(psoc); + + if (!wifi_pos_psoc) { + wifi_pos_err("wifi_pos priv obj is null"); + return; + } + + qdf_spin_lock_bh(&wifi_pos_psoc->wifi_pos_lock); + wifi_pos_psoc->current_dwell_time_max = val; + qdf_spin_unlock_bh(&wifi_pos_psoc->wifi_pos_lock); +} + +uint32_t wifi_pos_get_app_pid(struct wlan_objmgr_psoc *psoc) +{ + uint32_t app_pid; + struct wifi_pos_psoc_priv_obj *wifi_pos_psoc = + wifi_pos_get_psoc_priv_obj(psoc); + + if (!wifi_pos_psoc) { + wifi_pos_err("wifi_pos priv obj is null"); + return 0; + } + + qdf_spin_lock_bh(&wifi_pos_psoc->wifi_pos_lock); + app_pid = wifi_pos_psoc->app_pid; + qdf_spin_unlock_bh(&wifi_pos_psoc->wifi_pos_lock); + + return app_pid; + +} + +bool wifi_pos_is_app_registered(struct wlan_objmgr_psoc *psoc) +{ + bool is_app_registered; + struct wifi_pos_psoc_priv_obj *wifi_pos_psoc = + wifi_pos_get_psoc_priv_obj(psoc); + + if (!wifi_pos_psoc) { + wifi_pos_err("wifi_pos priv obj is null"); + return false; + } + + qdf_spin_lock_bh(&wifi_pos_psoc->wifi_pos_lock); + is_app_registered = wifi_pos_psoc->is_app_registered; + qdf_spin_unlock_bh(&wifi_pos_psoc->wifi_pos_lock); + + return is_app_registered; +} + +#ifdef WLAN_FEATURE_CIF_CFR +QDF_STATUS wifi_pos_init_cir_cfr_rings(struct wlan_objmgr_psoc *psoc, + void *hal_soc, uint8_t num_mac, void *buf) +{ + return target_if_wifi_pos_init_cir_cfr_rings(psoc, hal_soc, + num_mac, buf); +} +#endif + +QDF_STATUS wifi_pos_register_get_phy_mode_cb( + struct wlan_objmgr_psoc *psoc, + void (*handler)(uint8_t, uint32_t, uint32_t *)) +{ + struct wifi_pos_psoc_priv_obj *wifi_pos_psoc; + + if (!psoc) { + wifi_pos_err("psoc is null"); + return QDF_STATUS_E_NULL_VALUE; + } + + if (!handler) { + wifi_pos_err("Null callback"); + return QDF_STATUS_E_NULL_VALUE; + } + wifi_pos_psoc = wifi_pos_get_psoc_priv_obj(psoc); + if (!wifi_pos_psoc) { + wifi_pos_err("wifi_pos priv obj is null"); + return QDF_STATUS_E_NULL_VALUE; + } + + wifi_pos_psoc->wifi_pos_get_phy_mode = handler; + + return QDF_STATUS_SUCCESS; +} + diff --git a/drivers/staging/qca-wifi-host-cmn/umac/wifi_pos/src/wifi_pos_main.c b/drivers/staging/qca-wifi-host-cmn/umac/wifi_pos/src/wifi_pos_main.c new file mode 100644 index 0000000000000000000000000000000000000000..73d2f4e0052d0c7689e2d80ae1db81d1436a2af0 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/wifi_pos/src/wifi_pos_main.c @@ -0,0 +1,707 @@ +/* + * Copyright (c) 2012-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wifi_pos_main.c + * This file defines the important functions pertinent to + * wifi positioning to initialize and de-initialize the component. + */ +#include "target_if_wifi_pos.h" +#include "wifi_pos_oem_interface_i.h" +#include "wifi_pos_utils_i.h" +#include "wifi_pos_api.h" +#include "wifi_pos_main_i.h" +#include "wifi_pos_ucfg_i.h" +#include "wlan_objmgr_cmn.h" +#include "wlan_objmgr_global_obj.h" +#include "wlan_objmgr_psoc_obj.h" +#include "wlan_objmgr_pdev_obj.h" +#include "wlan_objmgr_vdev_obj.h" +#include "wlan_ptt_sock_svc.h" + +#include "wlan_reg_services_api.h" +/* forward declartion */ +struct regulatory_channel; + +#define REG_SET_CHANNEL_REG_POWER(reg_info_1, val) do { \ + reg_info_1 &= 0xff00ffff; \ + reg_info_1 |= ((val & 0xff) << 16); \ +} while (0) + +/* max tx power is in 1 dBm units */ +#define REG_SET_CHANNEL_MAX_TX_POWER(reg_info_2, val) do { \ + reg_info_2 &= 0xffff00ff; \ + reg_info_2 |= ((val & 0xff) << 8); \ +} while (0) + +/* channel info consists of 6 bits of channel mode */ + +#define REG_SET_CHANNEL_MODE(reg_channel, val) do { \ + (reg_channel)->info &= 0xffffffc0; \ + (reg_channel)->info |= (val); \ +} while (0) + +/* + * obj mgr api to iterate over vdevs does not provide a direct array or vdevs, + * rather takes a callback that is called for every vdev. wifi pos needs to + * store device mode and vdev id of all active vdevs and provide this info to + * user space as part of APP registration response. due to this, vdev_idx is + * used to identify how many vdevs have been populated by obj manager API. + */ +static uint32_t vdev_idx; + +/** + * wifi_pos_get_tlv_support: indicates if firmware supports TLV wifi pos msg + * @psoc: psoc object + * + * Return: status of operation + */ +static bool wifi_pos_get_tlv_support(struct wlan_objmgr_psoc *psoc) +{ + /* this is TBD */ + return true; +} + +static QDF_STATUS wifi_pos_process_data_req(struct wlan_objmgr_psoc *psoc, + struct wifi_pos_req_msg *req) +{ + uint8_t idx; + uint32_t sub_type = 0; + uint32_t channel_mhz = 0; + void *pdev_id = NULL; + uint32_t offset; + struct oem_data_req data_req; + struct wlan_lmac_if_wifi_pos_tx_ops *tx_ops; + + wifi_pos_debug("Received data req pid(%d), len(%d)", + req->pid, req->buf_len); + + /* look for fields */ + if (req->field_info_buf) + for (idx = 0; idx < req->field_info_buf->count; idx++) { + offset = req->field_info_buf->fields[idx].offset; + /* + * replace following reads with read_api based on + * length + */ + if (req->field_info_buf->fields[idx].id == + WMIRTT_FIELD_ID_oem_data_sub_type) { + sub_type = *((uint32_t *)&req->buf[offset]); + continue; + } + + if (req->field_info_buf->fields[idx].id == + WMIRTT_FIELD_ID_channel_mhz) { + channel_mhz = *((uint32_t *)&req->buf[offset]); + continue; + } + + if (req->field_info_buf->fields[idx].id == + WMIRTT_FIELD_ID_pdev) { + pdev_id = &req->buf[offset]; + continue; + } + } + + switch (sub_type) { + case TARGET_OEM_CAPABILITY_REQ: + /* TBD */ + break; + case TARGET_OEM_CONFIGURE_LCR: + /* TBD */ + break; + case TARGET_OEM_CONFIGURE_LCI: + /* TBD */ + break; + case TARGET_OEM_MEASUREMENT_REQ: + /* TBD */ + break; + case TARGET_OEM_CONFIGURE_FTMRR: + /* TBD */ + break; + case TARGET_OEM_CONFIGURE_WRU: + /* TBD */ + break; + default: + wifi_pos_debug("invalid sub type or not passed"); + /* + * this is legacy MCL operation. pass whole msg to firmware as + * it is. + */ + tx_ops = target_if_wifi_pos_get_txops(psoc); + if (!tx_ops) { + wifi_pos_err("tx ops null"); + return QDF_STATUS_E_INVAL; + } + data_req.data_len = req->buf_len; + data_req.data = req->buf; + tx_ops->data_req_tx(psoc, &data_req); + break; + } + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS wifi_pos_process_set_cap_req(struct wlan_objmgr_psoc *psoc, + struct wifi_pos_req_msg *req) +{ + int error_code; + struct wifi_pos_psoc_priv_obj *wifi_pos_obj = + wifi_pos_get_psoc_priv_obj(psoc); + struct wifi_pos_user_defined_caps *caps = + (struct wifi_pos_user_defined_caps *)req->buf; + + if (!wifi_pos_obj) { + wifi_pos_err("wifi_pos priv obj is null"); + return QDF_STATUS_E_INVAL; + } + + wifi_pos_debug("Received set cap req pid(%d), len(%d)", + req->pid, req->buf_len); + + wifi_pos_obj->ftm_rr = caps->ftm_rr; + wifi_pos_obj->lci_capability = caps->lci_capability; + error_code = qdf_status_to_os_return(QDF_STATUS_SUCCESS); + wifi_pos_obj->wifi_pos_send_rsp(wifi_pos_obj->app_pid, + ANI_MSG_SET_OEM_CAP_RSP, + sizeof(error_code), + (uint8_t *)&error_code); + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS wifi_pos_process_get_cap_req(struct wlan_objmgr_psoc *psoc, + struct wifi_pos_req_msg *req) +{ + struct wifi_pos_oem_get_cap_rsp cap_rsp = { { {0} } }; + struct wifi_pos_psoc_priv_obj *wifi_pos_obj = + wifi_pos_get_psoc_priv_obj(psoc); + + if (!wifi_pos_obj) { + wifi_pos_err("wifi_pos priv obj is null"); + return QDF_STATUS_E_INVAL; + } + + wifi_pos_debug("Received get cap req pid(%d), len(%d)", + req->pid, req->buf_len); + + wifi_pos_populate_caps(psoc, &cap_rsp.driver_cap); + cap_rsp.user_defined_cap.ftm_rr = wifi_pos_obj->ftm_rr; + cap_rsp.user_defined_cap.lci_capability = wifi_pos_obj->lci_capability; + wifi_pos_obj->wifi_pos_send_rsp(wifi_pos_obj->app_pid, + ANI_MSG_GET_OEM_CAP_RSP, + sizeof(cap_rsp), + (uint8_t *)&cap_rsp); + + return QDF_STATUS_SUCCESS; +} + +static void wifi_update_channel_bw_info(struct wlan_objmgr_psoc *psoc, + struct wlan_objmgr_pdev *pdev, + uint16_t chan, + struct wifi_pos_ch_info_rsp *chan_info) +{ + struct ch_params ch_params = {0}; + uint16_t sec_ch_2g = 0; + struct wifi_pos_psoc_priv_obj *wifi_pos_psoc = + wifi_pos_get_psoc_priv_obj(psoc); + uint32_t phy_mode; + + if (!wifi_pos_psoc) { + wifi_pos_err("wifi_pos priv obj is null"); + return; + } + + /* Passing CH_WIDTH_MAX will give the max bandwidth supported */ + ch_params.ch_width = CH_WIDTH_MAX; + + wlan_reg_set_channel_params(pdev, chan, sec_ch_2g, &ch_params); + if (ch_params.center_freq_seg0) + chan_info->band_center_freq1 = + wlan_reg_get_channel_freq(pdev, + ch_params.center_freq_seg0); + + wifi_pos_psoc->wifi_pos_get_phy_mode(chan, ch_params.ch_width, + &phy_mode); + + REG_SET_CHANNEL_MODE(chan_info, phy_mode); +} + +static void wifi_pos_get_reg_info(struct wlan_objmgr_pdev *pdev, + uint32_t chan_num, uint32_t *reg_info_1, + uint32_t *reg_info_2) +{ + uint32_t reg_power = wlan_reg_get_channel_reg_power(pdev, chan_num); + + *reg_info_1 = 0; + *reg_info_2 = 0; + + REG_SET_CHANNEL_REG_POWER(*reg_info_1, reg_power); + REG_SET_CHANNEL_MAX_TX_POWER(*reg_info_2, reg_power); +} + +/** + * wifi_pos_get_valid_channels: Get the list of valid channels from the + * given channel list + * @channels: Channel list to be validated + * @num_ch: NUmber of channels in the channel list to be validated + * @valid_channel_list: Pointer to valid channel list + * + * Return: Number of valid channels in the given list + */ + +static uint32_t wifi_pos_get_valid_channels(uint8_t *channels, uint32_t num_ch, + uint8_t *valid_channel_list) { + uint32_t i, num_valid_channels = 0; + + for (i = 0; i < num_ch; i++) { + if (INVALID_CHANNEL == reg_get_chan_enum(channels[i])) + continue; + valid_channel_list[num_valid_channels++] = channels[i]; + } + return num_valid_channels; +} + +static QDF_STATUS wifi_pos_process_ch_info_req(struct wlan_objmgr_psoc *psoc, + struct wifi_pos_req_msg *req) +{ + uint8_t idx; + uint8_t *buf; + uint32_t len; + uint32_t reg_info_1; + uint32_t reg_info_2; + uint8_t *channels = req->buf; + struct wlan_objmgr_pdev *pdev; + uint32_t num_ch = req->buf_len; + uint8_t valid_channel_list[NUM_CHANNELS]; + uint32_t num_valid_channels; + struct wifi_pos_ch_info_rsp *ch_info; + struct wifi_pos_psoc_priv_obj *wifi_pos_obj = + wifi_pos_get_psoc_priv_obj(psoc); + + if (!wifi_pos_obj) { + wifi_pos_err("wifi_pos priv obj is null"); + return QDF_STATUS_E_INVAL; + } + + wifi_pos_debug("Received ch info req pid(%d), len(%d)", + req->pid, req->buf_len); + + /* get first pdev since we need that only for freq and dfs state */ + pdev = wlan_objmgr_get_pdev_by_id(psoc, 0, WLAN_WIFI_POS_CORE_ID); + if (!pdev) { + wifi_pos_err("pdev get API failed"); + return QDF_STATUS_E_INVAL; + } + if (num_ch > NUM_CHANNELS) { + wifi_pos_err("Invalid number of channels"); + return QDF_STATUS_E_INVAL; + } + num_valid_channels = wifi_pos_get_valid_channels(channels, num_ch, + valid_channel_list); + + len = sizeof(uint8_t) + sizeof(struct wifi_pos_ch_info_rsp) * + num_valid_channels; + buf = qdf_mem_malloc(len); + if (!buf) { + wifi_pos_alert("malloc failed"); + wlan_objmgr_pdev_release_ref(pdev, WLAN_WIFI_POS_CORE_ID); + return QDF_STATUS_E_NOMEM; + } + + qdf_mem_zero(buf, len); + + /* First byte of message body will have num of channels */ + buf[0] = num_valid_channels; + ch_info = (struct wifi_pos_ch_info_rsp *)&buf[1]; + for (idx = 0; idx < num_valid_channels; idx++) { + ch_info[idx].chan_id = valid_channel_list[idx]; + wifi_pos_get_reg_info(pdev, ch_info[idx].chan_id, + ®_info_1, ®_info_2); + ch_info[idx].reserved0 = 0; + ch_info[idx].mhz = wlan_reg_get_channel_freq( + pdev, + valid_channel_list[idx]); + ch_info[idx].band_center_freq1 = ch_info[idx].mhz; + ch_info[idx].band_center_freq2 = 0; + ch_info[idx].info = 0; + if (wlan_reg_is_dfs_ch(pdev, valid_channel_list[idx])) + WIFI_POS_SET_DFS(ch_info[idx].info); + + wifi_update_channel_bw_info(psoc, pdev, + ch_info[idx].chan_id, + &ch_info[idx]); + + ch_info[idx].reg_info_1 = reg_info_1; + ch_info[idx].reg_info_2 = reg_info_2; + } + + wifi_pos_obj->wifi_pos_send_rsp(wifi_pos_obj->app_pid, + ANI_MSG_CHANNEL_INFO_RSP, + len, buf); + qdf_mem_free(buf); + wlan_objmgr_pdev_release_ref(pdev, WLAN_WIFI_POS_CORE_ID); + + return QDF_STATUS_SUCCESS; +} + +static void wifi_pos_vdev_iterator(struct wlan_objmgr_psoc *psoc, + void *vdev, void *arg) +{ + struct app_reg_rsp_vdev_info *vdev_info = arg; + + vdev_info[vdev_idx].dev_mode = wlan_vdev_mlme_get_opmode(vdev); + vdev_info[vdev_idx].vdev_id = wlan_vdev_get_id(vdev); + vdev_idx++; +} + +static QDF_STATUS wifi_pos_process_app_reg_req(struct wlan_objmgr_psoc *psoc, + struct wifi_pos_req_msg *req) +{ + QDF_STATUS ret = QDF_STATUS_SUCCESS; + uint8_t err = 0; + uint32_t rsp_len; + char *sign_str = NULL; + struct wifi_app_reg_rsp *app_reg_rsp; + struct app_reg_rsp_vdev_info vdevs_info[WLAN_UMAC_PSOC_MAX_VDEVS] + = { { 0 } }; + struct wifi_pos_psoc_priv_obj *wifi_pos_obj = + wifi_pos_get_psoc_priv_obj(psoc); + + if (!wifi_pos_obj) { + wifi_pos_err("wifi_pos priv obj is null"); + return QDF_STATUS_E_INVAL; + } + + wifi_pos_err("Received App Req Req pid(%d), len(%d)", + req->pid, req->buf_len); + + sign_str = (char *)req->buf; + /* Registration request is only allowed for QTI Application */ + if ((OEM_APP_SIGNATURE_LEN != req->buf_len) || + (strncmp(sign_str, OEM_APP_SIGNATURE_STR, + OEM_APP_SIGNATURE_LEN))) { + wifi_pos_err("Invalid signature pid(%d)", req->pid); + ret = QDF_STATUS_E_PERM; + err = OEM_ERR_INVALID_SIGNATURE; + goto app_reg_failed; + } + + wifi_pos_debug("Valid App Req Req from pid(%d)", req->pid); + wifi_pos_obj->is_app_registered = true; + wifi_pos_obj->app_pid = req->pid; + + vdev_idx = 0; + wlan_objmgr_iterate_obj_list(psoc, WLAN_VDEV_OP, + wifi_pos_vdev_iterator, + vdevs_info, true, WLAN_WIFI_POS_CORE_ID); + rsp_len = (sizeof(struct app_reg_rsp_vdev_info) * vdev_idx) + + sizeof(uint8_t); + app_reg_rsp = qdf_mem_malloc(rsp_len); + if (!app_reg_rsp) { + wifi_pos_alert("malloc failed"); + ret = QDF_STATUS_E_NOMEM; + err = OEM_ERR_NULL_CONTEXT; + goto app_reg_failed; + } + + app_reg_rsp->num_inf = vdev_idx; + qdf_mem_copy(&app_reg_rsp->vdevs, vdevs_info, + sizeof(struct app_reg_rsp_vdev_info) * vdev_idx); + if (!vdev_idx) + wifi_pos_debug("no active vdev"); + + vdev_idx = 0; + wifi_pos_obj->wifi_pos_send_rsp(req->pid, ANI_MSG_APP_REG_RSP, + rsp_len, (uint8_t *)app_reg_rsp); + + qdf_mem_free(app_reg_rsp); + return ret; + +app_reg_failed: + + wifi_pos_obj->wifi_pos_send_rsp(req->pid, ANI_MSG_OEM_ERROR, + sizeof(err), &err); + return ret; +} + +/** + * wifi_pos_tlv_callback: wifi pos msg handler registered for TLV type req + * @wmi_msg: wmi type request msg + * + * Return: status of operation + */ +static QDF_STATUS wifi_pos_tlv_callback(struct wlan_objmgr_psoc *psoc, + struct wifi_pos_req_msg *req) +{ + wifi_pos_debug("enter: msg_type: %d", req->msg_type); + switch (req->msg_type) { + case ANI_MSG_APP_REG_REQ: + return wifi_pos_process_app_reg_req(psoc, req); + case ANI_MSG_OEM_DATA_REQ: + return wifi_pos_process_data_req(psoc, req); + case ANI_MSG_CHANNEL_INFO_REQ: + return wifi_pos_process_ch_info_req(psoc, req); + case ANI_MSG_SET_OEM_CAP_REQ: + return wifi_pos_process_set_cap_req(psoc, req); + case ANI_MSG_GET_OEM_CAP_REQ: + return wifi_pos_process_get_cap_req(psoc, req); + default: + wifi_pos_err("invalid request type"); + break; + } + return 0; +} + +/** + * wifi_pos_non_tlv_callback: wifi pos msg handler registered for non-TLV + * type req + * @wmi_msg: wmi type request msg + * + * Return: status of operation + */ +static QDF_STATUS wifi_pos_non_tlv_callback(struct wlan_objmgr_psoc *psoc, + struct wifi_pos_req_msg *req) +{ + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wifi_pos_psoc_obj_created_notification( + struct wlan_objmgr_psoc *psoc, void *arg_list) +{ + QDF_STATUS status; + struct wifi_pos_psoc_priv_obj *wifi_pos_obj; + + /* + * this is for WIN, if they have multiple psoc, we dont want to create + * multiple priv object. Since there is just one LOWI app registered to + * one driver, avoid 2nd private object with another psoc. + */ + if (wifi_pos_get_psoc()) { + wifi_pos_debug("global psoc obj already set. do not allocate another psoc private object"); + return QDF_STATUS_SUCCESS; + } else { + wifi_pos_debug("setting global pos object"); + wifi_pos_set_psoc(psoc); + } + + /* initialize wifi-pos psoc priv object */ + wifi_pos_obj = qdf_mem_malloc(sizeof(*wifi_pos_obj)); + if (!wifi_pos_obj) { + wifi_pos_alert("Mem alloc failed for wifi pos psoc priv obj"); + wifi_pos_clear_psoc(); + return QDF_STATUS_E_NOMEM; + } + + qdf_spinlock_create(&wifi_pos_obj->wifi_pos_lock); + /* Register TLV or non-TLV callbacks depending on target fw version */ + if (wifi_pos_get_tlv_support(psoc)) + wifi_pos_obj->wifi_pos_req_handler = wifi_pos_tlv_callback; + else + wifi_pos_obj->wifi_pos_req_handler = wifi_pos_non_tlv_callback; + + /* + * MGMT Rx is not handled in this phase since wifi pos only uses few + * measurement subtypes under RRM_RADIO_MEASURE_REQ. Rest of them are + * used for 80211k. That part is not yet converged and still follows + * legacy MGMT Rx to work. Action frame in new TXRX can be registered + * at per ACTION Frame type granularity only. + */ + + status = wlan_objmgr_psoc_component_obj_attach(psoc, + WLAN_UMAC_COMP_WIFI_POS, + wifi_pos_obj, + QDF_STATUS_SUCCESS); + + if (QDF_IS_STATUS_ERROR(status)) { + wifi_pos_err("obj attach with psoc failed with status: %d", + status); + qdf_spinlock_destroy(&wifi_pos_obj->wifi_pos_lock); + qdf_mem_free(wifi_pos_obj); + wifi_pos_clear_psoc(); + } + + return status; +} + +QDF_STATUS wifi_pos_psoc_obj_destroyed_notification( + struct wlan_objmgr_psoc *psoc, void *arg_list) +{ + QDF_STATUS status; + struct wifi_pos_psoc_priv_obj *wifi_pos_obj = NULL; + + if (wifi_pos_get_psoc() == psoc) { + wifi_pos_debug("deregistering wifi_pos_psoc object"); + wifi_pos_clear_psoc(); + } else { + wifi_pos_warn("un-related PSOC closed. do nothing"); + return QDF_STATUS_SUCCESS; + } + + wifi_pos_obj = wifi_pos_get_psoc_priv_obj(psoc); + if (!wifi_pos_obj) { + wifi_pos_err("wifi_pos_obj is NULL"); + return QDF_STATUS_E_FAULT; + } + + target_if_wifi_pos_deinit_dma_rings(psoc); + + status = wlan_objmgr_psoc_component_obj_detach(psoc, + WLAN_UMAC_COMP_WIFI_POS, + wifi_pos_obj); + if (status != QDF_STATUS_SUCCESS) + wifi_pos_err("wifi_pos_obj detach failed"); + + wifi_pos_debug("wifi_pos_obj deleted with status %d", status); + qdf_spinlock_destroy(&wifi_pos_obj->wifi_pos_lock); + qdf_mem_free(wifi_pos_obj); + + return status; +} + +int wifi_pos_oem_rsp_handler(struct wlan_objmgr_psoc *psoc, + struct oem_data_rsp *oem_rsp) +{ + uint32_t len; + uint8_t *data; + uint32_t app_pid; + struct wifi_pos_psoc_priv_obj *priv = + wifi_pos_get_psoc_priv_obj(psoc); + void (*wifi_pos_send_rsp)(uint32_t, uint32_t, uint32_t, uint8_t *); + + if (!priv) { + wifi_pos_err("private object is NULL"); + return -EINVAL; + } + + qdf_spin_lock_bh(&priv->wifi_pos_lock); + app_pid = priv->app_pid; + wifi_pos_send_rsp = priv->wifi_pos_send_rsp; + qdf_spin_unlock_bh(&priv->wifi_pos_lock); + + len = oem_rsp->rsp_len_1 + oem_rsp->rsp_len_2 + oem_rsp->dma_len; + if (oem_rsp->rsp_len_1 > OEM_DATA_RSP_SIZE || + oem_rsp->rsp_len_2 > OEM_DATA_RSP_SIZE) { + wifi_pos_err("invalid length of Oem Data response"); + return -EINVAL; + } + + if (!wifi_pos_send_rsp) { + wifi_pos_err("invalid response handler"); + return -EINVAL; + } + + wifi_pos_debug("oem data rsp, len: %d to pid: %d", len, app_pid); + + if (oem_rsp->rsp_len_2 + oem_rsp->dma_len) { + /* stitch togther the msg data_1 + CIR/CFR + data_2 */ + data = qdf_mem_malloc(len); + if (!data) { + wifi_pos_err("malloc failed"); + return -ENOMEM; + } + qdf_mem_copy(data, oem_rsp->data_1, oem_rsp->rsp_len_1); + qdf_mem_copy(&data[oem_rsp->rsp_len_1], + oem_rsp->vaddr, oem_rsp->dma_len); + qdf_mem_copy(&data[oem_rsp->rsp_len_1 + oem_rsp->dma_len], + oem_rsp->data_2, oem_rsp->rsp_len_2); + + wifi_pos_send_rsp(app_pid, ANI_MSG_OEM_DATA_RSP, len, data); + qdf_mem_free(data); + } else { + wifi_pos_send_rsp(app_pid, ANI_MSG_OEM_DATA_RSP, + oem_rsp->rsp_len_1, oem_rsp->data_1); + } + + return 0; +} + +static void wifi_pos_pdev_iterator(struct wlan_objmgr_psoc *psoc, + void *obj, void *arg) +{ + uint32_t i; + QDF_STATUS status; + struct wlan_objmgr_pdev *pdev = obj; + struct regulatory_channel *psoc_ch_lst = arg; + struct regulatory_channel pdev_ch_lst[NUM_CHANNELS]; + + status = wlan_reg_get_current_chan_list(pdev, pdev_ch_lst); + if (QDF_IS_STATUS_ERROR(status)) { + wifi_pos_err("wlan_reg_get_current_chan_list_by_range failed"); + return; + } + + for (i = 0; i < NUM_CHANNELS; i++) { + if (pdev_ch_lst[i].state != CHANNEL_STATE_DISABLE && + pdev_ch_lst[i].state != CHANNEL_STATE_INVALID) + psoc_ch_lst[i] = pdev_ch_lst[i]; + } +} + +static void wifi_pos_get_ch_info(struct wlan_objmgr_psoc *psoc, + struct wifi_pos_driver_caps *caps) +{ + uint32_t i, num_ch = 0; + struct regulatory_channel ch_lst[NUM_CHANNELS] = {{0}}; + + wlan_objmgr_iterate_obj_list(psoc, WLAN_PDEV_OP, + wifi_pos_pdev_iterator, + ch_lst, true, WLAN_WIFI_POS_CORE_ID); + + for (i = 0; i < NUM_CHANNELS && num_ch < OEM_CAP_MAX_NUM_CHANNELS; + i++) { + if (ch_lst[i].state != CHANNEL_STATE_DISABLE && + ch_lst[i].state != CHANNEL_STATE_INVALID) { + num_ch++; + caps->channel_list[i] = ch_lst[i].chan_num; + } + } + + caps->num_channels = num_ch; + wifi_pos_err("num channels: %d", num_ch); +} + +QDF_STATUS wifi_pos_populate_caps(struct wlan_objmgr_psoc *psoc, + struct wifi_pos_driver_caps *caps) +{ + struct wifi_pos_psoc_priv_obj *wifi_pos_obj = + wifi_pos_get_psoc_priv_obj(psoc); + + wifi_pos_debug("Enter"); + if (!wifi_pos_obj) { + wifi_pos_err("wifi_pos_obj is null"); + return QDF_STATUS_E_NULL_VALUE; + } + + strlcpy(caps->oem_target_signature, + OEM_TARGET_SIGNATURE, + OEM_TARGET_SIGNATURE_LEN); + caps->oem_target_type = wifi_pos_obj->oem_target_type; + caps->oem_fw_version = wifi_pos_obj->oem_fw_version; + caps->driver_version.major = wifi_pos_obj->driver_version.major; + caps->driver_version.minor = wifi_pos_obj->driver_version.minor; + caps->driver_version.patch = wifi_pos_obj->driver_version.patch; + caps->driver_version.build = wifi_pos_obj->driver_version.build; + caps->allowed_dwell_time_min = wifi_pos_obj->allowed_dwell_time_min; + caps->allowed_dwell_time_max = wifi_pos_obj->allowed_dwell_time_max; + caps->curr_dwell_time_min = wifi_pos_obj->current_dwell_time_min; + caps->curr_dwell_time_max = wifi_pos_obj->current_dwell_time_max; + caps->supported_bands = wlan_objmgr_psoc_get_band_capability(psoc); + wifi_pos_get_ch_info(psoc, caps); + return QDF_STATUS_SUCCESS; +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/wifi_pos/src/wifi_pos_main_i.h b/drivers/staging/qca-wifi-host-cmn/umac/wifi_pos/src/wifi_pos_main_i.h new file mode 100644 index 0000000000000000000000000000000000000000..1d423103d5f3dbd22fb20453c40d3a7714d5a427 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/wifi_pos/src/wifi_pos_main_i.h @@ -0,0 +1,68 @@ +/* + * Copyright (c) 2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wifi_pos_main_i.h + * This file prototyps the important functions pertinent to wifi positioning + * component. + */ + +#ifndef _WIFI_POS_MAIN_H_ +#define _WIFI_POS_MAIN_H_ + +/* forward reference */ +struct wlan_objmgr_psoc; + +/** + * wifi_pos_psoc_obj_created_notification: callback registered to be called when + * psoc object is created. + * @psoc: pointer to psoc object just created + * @arg_list: argument list + * + * This function will: + * create WIFI POS psoc object and attach to psoc + * register TLV vs nonTLV callbacks + * Return: status of operation + */ +QDF_STATUS wifi_pos_psoc_obj_created_notification( + struct wlan_objmgr_psoc *psoc, void *arg_list); + +/** + * wifi_pos_psoc_obj_destroyed_notification: callback registered to be called + * when psoc object is destroyed. + * @psoc: pointer to psoc object just about to be destroyed + * @arg_list: argument list + * + * This function will: + * detach WIFI POS from psoc object and free + * Return: status of operation + */ +QDF_STATUS wifi_pos_psoc_obj_destroyed_notification( + struct wlan_objmgr_psoc *psoc, void *arg_list); + +/** + * wifi_pos_oem_rsp_handler: lmac rx ops registered + * @psoc: pointer to psoc object + * @oem_rsp: response from firmware + * + * Return: status of operation + */ +int wifi_pos_oem_rsp_handler(struct wlan_objmgr_psoc *psoc, + struct oem_data_rsp *oem_rsp); + +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/wifi_pos/src/wifi_pos_oem_interface_i.h b/drivers/staging/qca-wifi-host-cmn/umac/wifi_pos/src/wifi_pos_oem_interface_i.h new file mode 100644 index 0000000000000000000000000000000000000000..53ace2d270d650376667f4afa928f02632946c4e --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/wifi_pos/src/wifi_pos_oem_interface_i.h @@ -0,0 +1,58 @@ +/* + * Copyright (c) 2017, 2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ +/** + * DOC: wifi_pos_oem_interface.h + * This file defines the interface between host driver and userspace layer. + */ +#ifndef _WIFI_POS_OEM_INTERFACE_H_ +#define _WIFI_POS_OEM_INTERFACE_H_ + +/* Include files */ +#include "qdf_types.h" +#include "qdf_status.h" +#include "wlan_objmgr_cmn.h" + +#define TARGET_OEM_CAPABILITY_REQ 0x01 +#define TARGET_OEM_CAPABILITY_RSP 0x02 +#define TARGET_OEM_MEASUREMENT_REQ 0x03 +#define TARGET_OEM_MEASUREMENT_RSP 0x04 +#define TARGET_OEM_ERROR_REPORT_RSP 0x05 +#define TARGET_OEM_NAN_MEAS_REQ 0x06 +#define TARGET_OEM_NAN_MEAS_RSP 0x07 +#define TARGET_OEM_NAN_PEER_INFO 0x08 +#define TARGET_OEM_CONFIGURE_LCR 0x09 +#define TARGET_OEM_CONFIGURE_LCI 0x0A +#define TARGET_OEM_CONFIGURE_WRU 0x80 +#define TARGET_OEM_CONFIGURE_FTMRR 0x81 + +#define WIFI_POS_FLAG_DFS 10 +#define WIFI_POS_SET_DFS(info) (info |= (1 << WIFI_POS_FLAG_DFS)) + +/** + * enum WMIRTT_FIELD_ID - identifies which field is being specified + * @WMIRTT_FIELD_ID_oem_data_sub_type: oem data req sub type + * @WMIRTT_FIELD_ID_channel_mhz: channel mhz info + * @WMIRTT_FIELD_ID_pdev: pdev info + */ +enum WMIRTT_FIELD_ID { + WMIRTT_FIELD_ID_oem_data_sub_type, + WMIRTT_FIELD_ID_channel_mhz, + WMIRTT_FIELD_ID_pdev, +}; + +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/wifi_pos/src/wifi_pos_ucfg.c b/drivers/staging/qca-wifi-host-cmn/umac/wifi_pos/src/wifi_pos_ucfg.c new file mode 100644 index 0000000000000000000000000000000000000000..5808ec8baf792802381a81164d605b5f2ae32a1a --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/wifi_pos/src/wifi_pos_ucfg.c @@ -0,0 +1,108 @@ +/* + * Copyright (c) 2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * This file defines the important dispatcher APIs pertinent to + * wifi positioning. + */ +#include "wifi_pos_utils_i.h" +#include "wifi_pos_api.h" +#include "wifi_pos_ucfg_i.h" +#include "wlan_ptt_sock_svc.h" + +QDF_STATUS ucfg_wifi_pos_process_req(struct wlan_objmgr_psoc *psoc, + struct wifi_pos_req_msg *req, + void (*send_rsp_cb)(uint32_t, uint32_t, uint32_t, uint8_t *)) +{ + uint8_t err; + uint32_t app_pid; + QDF_STATUS status; + bool is_app_registered; + struct wifi_pos_psoc_priv_obj *wifi_pos_psoc_obj = + wifi_pos_get_psoc_priv_obj(psoc); + + wifi_pos_debug("enter"); + + if (!wifi_pos_psoc_obj) { + wifi_pos_err("wifi_pos_psoc_obj is null"); + return QDF_STATUS_E_NULL_VALUE; + } + + qdf_spin_lock_bh(&wifi_pos_psoc_obj->wifi_pos_lock); + wifi_pos_psoc_obj->wifi_pos_send_rsp = send_rsp_cb; + is_app_registered = wifi_pos_psoc_obj->is_app_registered; + app_pid = wifi_pos_psoc_obj->app_pid; + if (!wifi_pos_psoc_obj->wifi_pos_req_handler) { + wifi_pos_err("wifi_pos_psoc_obj->wifi_pos_req_handler is null"); + err = OEM_ERR_NULL_CONTEXT; + send_rsp_cb(app_pid, ANI_MSG_OEM_ERROR, sizeof(err), &err); + status = QDF_STATUS_E_NULL_VALUE; + goto unlock_and_exit; + } + + if (req->msg_type != ANI_MSG_APP_REG_REQ && + (!is_app_registered || app_pid != req->pid)) { + wifi_pos_err("requesting app is not registered, app_registered: %d, requesting pid: %d, stored pid: %d", + is_app_registered, req->pid, app_pid); + err = OEM_ERR_APP_NOT_REGISTERED; + send_rsp_cb(app_pid, ANI_MSG_OEM_ERROR, sizeof(err), &err); + status = QDF_STATUS_E_INVAL; + goto unlock_and_exit; + } + + status = wifi_pos_psoc_obj->wifi_pos_req_handler(psoc, req); + +unlock_and_exit: + qdf_spin_unlock_bh(&wifi_pos_psoc_obj->wifi_pos_lock); + return status; +} + + +uint32_t ucfg_wifi_pos_get_ftm_cap(struct wlan_objmgr_psoc *psoc) +{ + uint32_t val = 0; + struct wifi_pos_psoc_priv_obj *wifi_pos_psoc = + wifi_pos_get_psoc_priv_obj(psoc); + + if (!wifi_pos_psoc) { + wifi_pos_alert("unable to get wifi_pos psoc obj"); + return val; + } + + qdf_spin_lock_bh(&wifi_pos_psoc->wifi_pos_lock); + val = wifi_pos_psoc->fine_time_meas_cap; + qdf_spin_unlock_bh(&wifi_pos_psoc->wifi_pos_lock); + + return val; +} + +void ucfg_wifi_pos_set_ftm_cap(struct wlan_objmgr_psoc *psoc, uint32_t val) +{ + struct wifi_pos_psoc_priv_obj *wifi_pos_psoc = + wifi_pos_get_psoc_priv_obj(psoc); + + if (!wifi_pos_psoc) { + wifi_pos_alert("unable to get wifi_pos psoc obj"); + return; + } + + qdf_spin_lock_bh(&wifi_pos_psoc->wifi_pos_lock); + wifi_pos_psoc->fine_time_meas_cap = val; + qdf_spin_unlock_bh(&wifi_pos_psoc->wifi_pos_lock); +} + diff --git a/drivers/staging/qca-wifi-host-cmn/umac/wifi_pos/src/wifi_pos_ucfg_i.h b/drivers/staging/qca-wifi-host-cmn/umac/wifi_pos/src/wifi_pos_ucfg_i.h new file mode 100644 index 0000000000000000000000000000000000000000..a5dd1f4f878f389e04b965ec339e01ff9676507e --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/wifi_pos/src/wifi_pos_ucfg_i.h @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wifi_pos_ucfg_i.h + * This file prototyps the important functions pertinent to wifi positioning + * component. + */ + +#ifndef _WIFI_POS_UCFG_H_ +#define _WIFI_POS_UCFG_H_ + +#include "qdf_types.h" +#include "qdf_status.h" + +struct wlan_objmgr_psoc; +struct wifi_pos_req_msg; + +/** + * ucfg_wifi_pos_process_req: ucfg API to be called from HDD/OS_IF to process a + * wifi_pos request from userspace + * @psoc: pointer to psoc object + * @req: wifi_pos request msg + * @send_rsp_cb: callback pointer required to send msg to userspace + * + * Return: status of operation + */ +QDF_STATUS ucfg_wifi_pos_process_req(struct wlan_objmgr_psoc *psoc, + struct wifi_pos_req_msg *req, + void (*send_rsp_cb)(uint32_t, uint32_t, uint32_t, uint8_t *)); + +#endif /* _WIFI_POS_UCFG_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/umac/wifi_pos/src/wifi_pos_utils.c b/drivers/staging/qca-wifi-host-cmn/umac/wifi_pos/src/wifi_pos_utils.c new file mode 100644 index 0000000000000000000000000000000000000000..ce7a9adac411e4015067abcf30a371894ab5767f --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/wifi_pos/src/wifi_pos_utils.c @@ -0,0 +1,105 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ +/** + * DOC: wifi_pos_utils.c + * This file defines the utility helper functions for wifi_pos component. + */ + +#include "qdf_types.h" +#include "wlan_objmgr_cmn.h" +#include "wlan_objmgr_global_obj.h" +#include "wlan_objmgr_psoc_obj.h" +#include "wifi_pos_utils_i.h" + +/* lock to protect use of psoc global pointer variable */ +static qdf_spinlock_t psoc_ptr_lock; + +/* + * WIFI pos command are not associated with any pdev/psoc/vdev, so the callback + * registered with GENL socket does not receive any pdev/pdev/vdev object. + * Since PSOC is top most object, it was decided to keep WIFI POS private obj + * within PSOC and hence, this module need to hang on to the first PSOC that + * was created for all its internal usage. + */ +static struct wlan_objmgr_psoc *wifi_pos_psoc_obj; + +void wifi_pos_lock_init(void) +{ + qdf_spinlock_create(&psoc_ptr_lock); +} + +void wifi_pos_lock_deinit(void) +{ + qdf_spinlock_destroy(&psoc_ptr_lock); +} + +struct wlan_objmgr_psoc *wifi_pos_get_psoc(void) +{ + struct wlan_objmgr_psoc *tmp; + + qdf_spin_lock_bh(&psoc_ptr_lock); + tmp = wifi_pos_psoc_obj; + qdf_spin_unlock_bh(&psoc_ptr_lock); + + return tmp; +} + +void wifi_pos_set_psoc(struct wlan_objmgr_psoc *psoc) +{ + struct wlan_objmgr_psoc *tmp; + + qdf_spin_lock_bh(&psoc_ptr_lock); + tmp = wifi_pos_psoc_obj; + if (!wifi_pos_psoc_obj) + wifi_pos_psoc_obj = psoc; + qdf_spin_unlock_bh(&psoc_ptr_lock); + + if (tmp) + wifi_pos_warn("global psoc obj already set"); +} + +void wifi_pos_clear_psoc(void) +{ + struct wlan_objmgr_psoc *tmp; + + qdf_spin_lock_bh(&psoc_ptr_lock); + tmp = wifi_pos_psoc_obj; + if (wifi_pos_psoc_obj) + wifi_pos_psoc_obj = NULL; + qdf_spin_unlock_bh(&psoc_ptr_lock); + + if (!tmp) + wifi_pos_warn("global psoc obj already cleared"); +} + +/** + * wifi_pos_get_psoc_priv_obj: returns wifi_pos priv object within psoc + * @psoc: pointer to psoc object + * + * Return: wifi_pos_psoc_priv_obj + */ +struct wifi_pos_psoc_priv_obj *wifi_pos_get_psoc_priv_obj( + struct wlan_objmgr_psoc *psoc) +{ + struct wifi_pos_psoc_priv_obj *obj; + + obj = wlan_objmgr_psoc_get_comp_private_obj(psoc, + WLAN_UMAC_COMP_WIFI_POS); + + return obj; +} diff --git a/drivers/staging/qca-wifi-host-cmn/umac/wifi_pos/src/wifi_pos_utils_i.h b/drivers/staging/qca-wifi-host-cmn/umac/wifi_pos/src/wifi_pos_utils_i.h new file mode 100644 index 0000000000000000000000000000000000000000..7172cbe9c5c2e1b9e6a709096e435295c1ed64b6 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/umac/wifi_pos/src/wifi_pos_utils_i.h @@ -0,0 +1,383 @@ +/* + * Copyright (c) 2012-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wifi_pos_utils_i.h + * This file defines the prototypes for the utility helper functions + * for the wifi_pos component. + */ + +#ifdef WIFI_POS_CONVERGED +#ifndef _WIFI_POS_UTILS_H_ +#define _WIFI_POS_UTILS_H_ +/* Include files */ +#include "qdf_types.h" +#include "qdf_status.h" +#include "ol_defines.h" +#include "qdf_trace.h" + +struct wlan_objmgr_psoc; +struct wifi_pos_req_msg; + +#define wifi_pos_log(level, args...) \ + QDF_TRACE(QDF_MODULE_ID_WIFIPOS, level, ## args) +#define wifi_pos_logfl(level, format, args...) \ + wifi_pos_log(level, FL(format), ## args) + +#define wifi_pos_alert(format, args...) \ + wifi_pos_logfl(QDF_TRACE_LEVEL_FATAL, format, ## args) +#define wifi_pos_err(format, args...) \ + wifi_pos_logfl(QDF_TRACE_LEVEL_ERROR, format, ## args) +#define wifi_pos_warn(format, args...) \ + wifi_pos_logfl(QDF_TRACE_LEVEL_WARN, format, ## args) +#define wifi_pos_notice(format, args...) \ + wifi_pos_logfl(QDF_TRACE_LEVEL_INFO, format, ## args) +#define wifi_pos_debug(format, args...) \ + wifi_pos_logfl(QDF_TRACE_LEVEL_DEBUG, format, ## args) + +#define OEM_APP_SIGNATURE_LEN 16 +#define OEM_APP_SIGNATURE_STR "QUALCOMM-OEM-APP" + +#define OEM_TARGET_SIGNATURE_LEN 8 +#define OEM_TARGET_SIGNATURE "QUALCOMM" + +#define OEM_CAP_MAX_NUM_CHANNELS 128 + +#ifndef OEM_DATA_RSP_SIZE +#define OEM_DATA_RSP_SIZE 1724 +#endif + +/** + * struct app_reg_rsp_vdev_info - vdev info struct + * @dev_mode: device mode + * @vdev_id: vdev id + * + */ +struct qdf_packed app_reg_rsp_vdev_info { + uint8_t dev_mode; + uint8_t vdev_id; +}; + +/** + * struct wifi_app_reg_rsp - app registration response struct + * @num_inf: number of interfaces active + * @vdevs: array indicating all active vdev's information + * + */ +struct qdf_packed wifi_app_reg_rsp { + uint8_t num_inf; + struct app_reg_rsp_vdev_info vdevs[1]; +}; + +/** + * struct oem_data_req - data request to be sent to firmware + * @data_len: len of data + * @data: buffer containing data + * + */ +struct oem_data_req { + uint32_t data_len; + uint8_t *data; +}; + +/** + * struct oem_data_rsp - response from firmware to data request sent earlier + * @rsp_len_1: len of data_1 + * @data_1: first part of payload + * @rsp_len_2: len of data_2 + * @data_2: second part of payload + * @dma_len: len of DMAed data + * @vaddr: virtual address of DMA data start + * + */ +struct oem_data_rsp { + uint32_t rsp_len_1; + uint8_t *data_1; + uint32_t rsp_len_2; + uint8_t *data_2; + uint32_t dma_len; + void *vaddr; +}; + +/** + * struct wifi_pos_driver_version - Driver version identifier (w.x.y.z) + * @major: Version ID major number + * @minor: Version ID minor number + * @patch: Version ID patch number + * @build: Version ID build number + */ +struct qdf_packed wifi_pos_driver_version { + uint8_t major; + uint8_t minor; + uint8_t patch; + uint8_t build; +}; + +/** + * struct wifi_pos_driver_caps - OEM Data Capabilities + * @oem_target_signature: Signature of chipset vendor, e.g. QUALCOMM + * @oem_target_type: Chip type + * @oem_fw_version: Firmware version + * @driver_version: Host software version + * @allowed_dwell_time_min: Channel dwell time - allowed minimum + * @allowed_dwell_time_max: Channel dwell time - allowed maximum + * @curr_dwell_time_min: Channel dwell time - current minimim + * @curr_dwell_time_max: Channel dwell time - current maximum + * @supported_bands: Supported bands, 2.4G or 5G Hz + * @num_channels: Num of channels IDs to follow + * @channel_list: List of channel IDs + */ +struct qdf_packed wifi_pos_driver_caps { + uint8_t oem_target_signature[OEM_TARGET_SIGNATURE_LEN]; + uint32_t oem_target_type; + uint32_t oem_fw_version; + struct wifi_pos_driver_version driver_version; + uint16_t allowed_dwell_time_min; + uint16_t allowed_dwell_time_max; + uint16_t curr_dwell_time_min; + uint16_t curr_dwell_time_max; + uint16_t supported_bands; + uint16_t num_channels; + uint8_t channel_list[OEM_CAP_MAX_NUM_CHANNELS]; +}; + +/** + * struct wifi_pos_user_defined_caps - OEM capability to be exchanged between + * host and userspace + * @ftm_rr: FTM range report capability bit + * @lci_capability: LCI capability bit + * @reserved1: reserved + * @reserved2: reserved + */ +struct wifi_pos_user_defined_caps { + uint32_t ftm_rr:1; + uint32_t lci_capability:1; + uint32_t reserved1:30; + uint32_t reserved2; +}; + +/** + * struct wifi_pos_oem_get_cap_rsp - capabilities set by userspace and target. + * @driver_cap: target capabilities + * @user_defined_cap: capabilities set by userspace via set request + */ +struct qdf_packed wifi_pos_oem_get_cap_rsp { + struct wifi_pos_driver_caps driver_cap; + struct wifi_pos_user_defined_caps user_defined_cap; +}; + +/** + * struct wifi_pos_dma_rings_cap - capabilities requested by firmware. + * @pdev_id: pdev_id or mac_id of ring + * @min_num_ptr: minimum depth of ring required + * @min_buf_size: minimum size of each buffer + * @min_buf_align: minimum allignment of buffer memory + */ +struct wifi_pos_dma_rings_cap { + uint32_t pdev_id; + uint32_t min_num_ptr; + uint32_t min_buf_size; + uint32_t min_buf_align; +}; + +/** + * struct wifi_pos_dma_buf_info - buffer info struct containing phy to virtual + * mapping. + * @cookie: this identifies location of DMA buffer in pool array + * @paddr: aligned physical address as exchanged with firmware + * @vaddr: virtual address - unaligned. this helps in freeing later + * @offset: offset of aligned address from unaligned + */ +struct wifi_pos_dma_buf_info { + uint32_t cookie; + void *paddr; + void *vaddr; + uint8_t offset; +}; + +/** + * struct wifi_pos_dma_rings_cfg - DMA ring parameters to be programmed to FW. + * @pdev_id: pdev_id of ring + * @num_ptr: depth of ring + * @base_paddr_unaligned: base physical addr unaligned + * @base_vaddr_unaligned: base virtual addr unaligned + * @base_paddr_aligned: base physical addr aligned + * @base_vaddr_aligned: base virtual addr unaligned + * @head_idx_addr: head index addr + * @tail_idx_addr: tail index addr + * @srng: hal srng + */ +struct wifi_pos_dma_rings_cfg { + uint32_t pdev_id; + uint32_t num_ptr; + uint32_t ring_alloc_size; + void *base_paddr_unaligned; + void *base_vaddr_unaligned; + void *base_paddr_aligned; + void *base_vaddr_aligned; + void *head_idx_addr; + void *tail_idx_addr; + void *srng; +}; + +/** + * struct wifi_pos_psoc_priv_obj - psoc obj data for wifi_pos + * @app_pid: pid of app registered to host driver + * @is_app_registered: indicates if app is registered + * @fine_time_meas_cap: FTM cap for different roles, reflection of ini + * @ftm_rr: configured value of FTM Ranging Request capability + * @lci_capability: configured value of LCI capability + * @rsvd: reserved + * @oem_target_type + * @oem_target_type: oem target type, populated from HDD + * @oem_fw_version: firmware version, populated from HDD + * @driver_version: driver version, populated from HDD + * @allowed_dwell_time_min: allowed dwell time min, populated from HDD + * @allowed_dwell_time_max: allowed dwell time max, populated from HDD + * @current_dwell_time_min: current dwell time min, populated from HDD + * @current_dwell_time_max: current dwell time max, populated from HDD + * @hal_soc: hal_soc + * @num_rings: DMA ring cap requested by firmware + * @dma_cap: dma cap as read from service ready ext event + * @dma_cfg: DMA ring cfg to be programmed to firmware + * @dma_buf_pool: DMA buffer pools maintained at host: this will be 2-D array + * where with num_rows = number of rings num_elements in each row = ring depth + * @wifi_pos_lock: lock to access wifi pos priv object + * @wifi_pos_req_handler: function pointer to handle TLV or non-TLV + * @wifi_pos_send_rsp: function pointer to send msg to userspace APP + * + * wifi pos request messages + * <----- fine_time_meas_cap (in bits) -----> + *+----------+-----+-----+------+------+-------+-------+-----+-----+ + *| 8-31 | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 | + *+----------+-----+-----+------+------+-------+-------+-----+-----+ + *| reserved | SAP | SAP |P2P-GO|P2P-GO|P2P-CLI|P2P-CLI| STA | STA | + *| |resp |init |resp |init |resp |init |resp |init | + *+----------+-----+-----+------+------+-------+-------+-----+-----+ + * resp - responder role; init- initiator role + * + */ +struct wifi_pos_psoc_priv_obj { + uint32_t app_pid; + bool is_app_registered; + uint32_t fine_time_meas_cap; + uint32_t ftm_rr:1; + uint32_t lci_capability:1; + uint32_t rsvd:30; + + uint32_t oem_target_type; + uint32_t oem_fw_version; + struct wifi_pos_driver_version driver_version; + uint16_t allowed_dwell_time_min; + uint16_t allowed_dwell_time_max; + uint16_t current_dwell_time_min; + uint16_t current_dwell_time_max; + + void *hal_soc; + uint8_t num_rings; + struct wifi_pos_dma_rings_cap *dma_cap; + struct wifi_pos_dma_rings_cfg *dma_cfg; + struct wifi_pos_dma_buf_info **dma_buf_pool; + + qdf_spinlock_t wifi_pos_lock; + QDF_STATUS (*wifi_pos_req_handler)(struct wlan_objmgr_psoc *psoc, + struct wifi_pos_req_msg *req); + void (*wifi_pos_send_rsp)(uint32_t, uint32_t, uint32_t, uint8_t *); + void (*wifi_pos_get_phy_mode)(uint8_t, uint32_t, uint32_t *); +}; + +/** + * wifi_pos_get_psoc_priv_obj: API to get wifi_psoc private object + * @psoc: pointer to psoc object + * + * Return: psoc private object on success, NULL otherwise + */ +struct wifi_pos_psoc_priv_obj *wifi_pos_get_psoc_priv_obj( + struct wlan_objmgr_psoc *psoc); + +/** + * wifi_pos_lock_init: API to init lock used protect use of psoc global pointer + * variable + * + * Return: none. + */ +void wifi_pos_lock_init(void); + +/** + * wifi_pos_lock_deinit: API to deinit lock used protect use of psoc global + * pointer variable + * + * Return: none. + */ +void wifi_pos_lock_deinit(void); + +/** + * wifi_pos_set_psoc: API to set global PSOC object + * @psoc: pointer to psoc object + * + * Since request from userspace is not associated with any vdev/pdev/psoc, this + * API is used to set global psoc object. + * + * Return: none. + */ +void wifi_pos_set_psoc(struct wlan_objmgr_psoc *psoc); + +/** + * wifi_pos_get_psoc: API to get global PSOC object + * + * Since request from userspace is not associated with any vdev/pdev/psoc, this + * API is used to get global psoc object. + * Return: global psoc object. + */ +struct wlan_objmgr_psoc *wifi_pos_get_psoc(void); + +/** + * wifi_pos_get_psoc: API to clear global PSOC object + * + * Return: none. + */ +void wifi_pos_clear_psoc(void); + +/** + * wifi_pos_populate_caps: API to get OEM caps + * @psoc: psoc object + * @caps: capabilities buffer to populate + * + * Return: status of operation. + */ +QDF_STATUS wifi_pos_populate_caps(struct wlan_objmgr_psoc *psoc, + struct wifi_pos_driver_caps *caps); + +/** + * wifi_pos_get_app_pid: returns oem app pid. + * @psoc: pointer to psoc object + * + * Return: oem app pid + */ +uint32_t wifi_pos_get_app_pid(struct wlan_objmgr_psoc *psoc); + +/** + * wifi_pos_is_app_registered: indicates if oem app is registered. + * @psoc: pointer to psoc object + * + * Return: true if app is registered, false otherwise + */ +bool wifi_pos_is_app_registered(struct wlan_objmgr_psoc *psoc); + +#endif /* _WIFI_POS_UTILS_H_ */ +#endif /* WIFI_POS_CONVERGED */ diff --git a/drivers/staging/qca-wifi-host-cmn/utils/epping/inc/epping_internal.h b/drivers/staging/qca-wifi-host-cmn/utils/epping/inc/epping_internal.h new file mode 100644 index 0000000000000000000000000000000000000000..bf058fe3c156d84c262d1f25ccf9e24584d8d164 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/utils/epping/inc/epping_internal.h @@ -0,0 +1,191 @@ +/* + * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef EPPING_INTERNAL_H +#define EPPING_INTERNAL_H +/**=========================================================================== + + \file epping_internal.h + + \brief Linux epping internal head file + + ==========================================================================*/ + +/*--------------------------------------------------------------------------- + Include files + -------------------------------------------------------------------------*/ + +#include +#include +#include +#include +#include +#if defined(WLAN_OPEN_SOURCE) && defined(CONFIG_HAS_WAKELOCK) +#include +#endif +#include "htc_api.h" +#include "htc_packet.h" +#include "epping_test.h" +#include +#include +#include + +#define EPPING_LOG_MASK (1< +#include + +/* epping_main signatures */ +#ifdef WLAN_FEATURE_EPPING +int epping_open(void); +void epping_close(void); +void epping_disable(void); +int epping_enable(struct device *parent_dev); +#else +static inline int epping_open(void) +{ + return QDF_STATUS_E_INVAL; +} + +static inline int epping_enable(struct device *parent_dev) +{ + return QDF_STATUS_E_INVAL; +} + +static inline void epping_close(void) {} +static inline void epping_disable(void) {} +#endif +#endif /* end #ifndef EPPING_MAIN_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/utils/epping/src/epping_helper.c b/drivers/staging/qca-wifi-host-cmn/utils/epping/src/epping_helper.c new file mode 100644 index 0000000000000000000000000000000000000000..dbbfc4d9db76efc9207dae7222185158288ff948 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/utils/epping/src/epping_helper.c @@ -0,0 +1,201 @@ +/* + * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/*======================================================================== + + \file epping_main.c + + \brief WLAN End Point Ping test tool implementation + + ========================================================================*/ + +/*-------------------------------------------------------------------------- + Include Files + ------------------------------------------------------------------------*/ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "epping_main.h" +#include "epping_internal.h" + +int epping_cookie_init(epping_context_t *pEpping_ctx) +{ + uint32_t i, j; + + pEpping_ctx->cookie_list = NULL; + pEpping_ctx->cookie_count = 0; + for (i = 0; i < MAX_COOKIE_SLOTS_NUM; i++) { + pEpping_ctx->s_cookie_mem[i] = + qdf_mem_malloc(sizeof(struct epping_cookie) * + MAX_COOKIE_SLOT_SIZE); + if (pEpping_ctx->s_cookie_mem[i] == NULL) { + EPPING_LOG(QDF_TRACE_LEVEL_FATAL, + "%s: no mem for cookie (idx = %d)", __func__, + i); + goto error; + } + } + qdf_spinlock_create(&pEpping_ctx->cookie_lock); + + for (i = 0; i < MAX_COOKIE_SLOTS_NUM; i++) { + struct epping_cookie *cookie_mem = pEpping_ctx->s_cookie_mem[i]; + for (j = 0; j < MAX_COOKIE_SLOT_SIZE; j++) { + epping_free_cookie(pEpping_ctx, &cookie_mem[j]); + } + } + return 0; +error: + for (i = 0; i < MAX_COOKIE_SLOTS_NUM; i++) { + if (pEpping_ctx->s_cookie_mem[i]) { + qdf_mem_free(pEpping_ctx->s_cookie_mem[i]); + pEpping_ctx->s_cookie_mem[i] = NULL; + } + } + return -ENOMEM; +} + +/* cleanup cookie queue */ +void epping_cookie_cleanup(epping_context_t *pEpping_ctx) +{ + int i; + qdf_spin_lock_bh(&pEpping_ctx->cookie_lock); + pEpping_ctx->cookie_list = NULL; + pEpping_ctx->cookie_count = 0; + qdf_spin_unlock_bh(&pEpping_ctx->cookie_lock); + for (i = 0; i < MAX_COOKIE_SLOTS_NUM; i++) { + if (pEpping_ctx->s_cookie_mem[i]) { + qdf_mem_free(pEpping_ctx->s_cookie_mem[i]); + pEpping_ctx->s_cookie_mem[i] = NULL; + } + } +} + +void epping_free_cookie(epping_context_t *pEpping_ctx, + struct epping_cookie *cookie) +{ + qdf_spin_lock_bh(&pEpping_ctx->cookie_lock); + cookie->next = pEpping_ctx->cookie_list; + pEpping_ctx->cookie_list = cookie; + pEpping_ctx->cookie_count++; + qdf_spin_unlock_bh(&pEpping_ctx->cookie_lock); +} + +struct epping_cookie *epping_alloc_cookie(epping_context_t *pEpping_ctx) +{ + struct epping_cookie *cookie; + + qdf_spin_lock_bh(&pEpping_ctx->cookie_lock); + cookie = pEpping_ctx->cookie_list; + if (cookie != NULL) { + pEpping_ctx->cookie_list = cookie->next; + pEpping_ctx->cookie_count--; + } + qdf_spin_unlock_bh(&pEpping_ctx->cookie_lock); + return cookie; +} + +void epping_get_dummy_mac_addr(tSirMacAddr macAddr) +{ + macAddr[0] = 69; /* E */ + macAddr[1] = 80; /* P */ + macAddr[2] = 80; /* P */ + macAddr[3] = 73; /* I */ + macAddr[4] = 78; /* N */ + macAddr[5] = 71; /* G */ +} + +void epping_hex_dump(void *data, int buf_len, const char *str) +{ + char *buf = (char *)data; + int i; + + printk("%s: E, %s\n", __func__, str); + for (i = 0; (i + 7) < buf_len; i += 8) { + printk("%02x %02x %02x %02x %02x %02x %02x %02x\n", + buf[i], + buf[i + 1], + buf[i + 2], + buf[i + 3], + buf[i + 4], buf[i + 5], buf[i + 6], buf[i + 7]); + } + + /* Dump the bytes in the last line */ + for (; i < buf_len; i++) { + printk("%02x ", buf[i]); + } + printk("\n%s: X %s\n", __func__, str); +} + +void *epping_get_qdf_ctx(void) +{ + qdf_device_t *qdf_ctx; + + qdf_ctx = cds_get_context(QDF_MODULE_ID_QDF_DEVICE); + return qdf_ctx; +} + +void epping_log_packet(epping_adapter_t *adapter, + EPPING_HEADER *eppingHdr, int ret, const char *str) +{ + if (eppingHdr->Cmd_h & EPPING_LOG_MASK) { + EPPING_LOG(QDF_TRACE_LEVEL_FATAL, + "%s: cmd = %d, seqNo = %u, flag = 0x%x, ret = %d, " + "txCount = %lu, txDrop = %lu, txBytes = %lu," + "rxCount = %lu, rxDrop = %lu, rxBytes = %lu\n", + str, eppingHdr->Cmd_h, eppingHdr->SeqNo, + eppingHdr->CmdFlags_h, ret, + adapter->stats.tx_packets, + adapter->stats.tx_dropped, + adapter->stats.tx_bytes, + adapter->stats.rx_packets, + adapter->stats.rx_dropped, + adapter->stats.rx_bytes); + } +} + +void epping_log_stats(epping_adapter_t *adapter, const char *str) +{ + EPPING_LOG(QDF_TRACE_LEVEL_FATAL, + "%s: txCount = %lu, txDrop = %lu, tx_bytes = %lu, " + "rxCount = %lu, rxDrop = %lu, rx_bytes = %lu, tx_acks = %u\n", + str, + adapter->stats.tx_packets, + adapter->stats.tx_dropped, + adapter->stats.tx_bytes, + adapter->stats.rx_packets, + adapter->stats.rx_dropped, + adapter->stats.rx_bytes, + adapter->pEpping_ctx->total_tx_acks); +} + +void epping_set_kperf_flag(epping_adapter_t *adapter, + HTC_ENDPOINT_ID eid, uint8_t kperf_flag) +{ + adapter->pEpping_ctx->kperf_num_rx_recv[eid] = 0; + adapter->pEpping_ctx->kperf_num_tx_acks[eid] = 0; +} diff --git a/drivers/staging/qca-wifi-host-cmn/utils/epping/src/epping_main.c b/drivers/staging/qca-wifi-host-cmn/utils/epping/src/epping_main.c new file mode 100644 index 0000000000000000000000000000000000000000..11d71faa2760792d814bfaf1ab6d688a02ada00f --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/utils/epping/src/epping_main.c @@ -0,0 +1,347 @@ +/* + * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/*======================================================================== + + \file epping_main.c + + \brief WLAN End Point Ping test tool implementation + + ========================================================================*/ + +/*-------------------------------------------------------------------------- + Include Files + ------------------------------------------------------------------------*/ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "bmi.h" +#include "ol_fw.h" +#include "ol_if_athvar.h" +#include "hif.h" +#include "epping_main.h" +#include "epping_internal.h" +#include "wlan_policy_mgr_api.h" + +#ifdef TIMER_MANAGER +#define TIMER_MANAGER_STR " +TIMER_MANAGER" +#else +#define TIMER_MANAGER_STR "" +#endif + +#ifdef MEMORY_DEBUG +#define MEMORY_DEBUG_STR " +MEMORY_DEBUG" +#else +#define MEMORY_DEBUG_STR "" +#endif + +#ifdef HIF_SDIO +#define WLAN_WAIT_TIME_WLANSTART 10000 +#else +#define WLAN_WAIT_TIME_WLANSTART 2000 +#endif + +#ifdef WLAN_FEATURE_EPPING +static struct epping_context *g_epping_ctx; + +/** + * epping_open(): End point ping driver open Function + * + * This function is called by HDD to open epping module + * + * + * return - 0 for success, negative for failure + */ +int epping_open(void) +{ + EPPING_LOG(QDF_TRACE_LEVEL_INFO_HIGH, "%s: Enter", __func__); + + g_epping_ctx = qdf_mem_malloc(sizeof(*g_epping_ctx)); + + if (g_epping_ctx == NULL) { + EPPING_LOG(QDF_TRACE_LEVEL_ERROR, + "%s: cannot alloc epping context", __func__); + return -ENOMEM; + } + + g_epping_ctx->con_mode = cds_get_conparam(); + return 0; +} + +/** + * epping_disable(): End point ping driver disable Function + * + * This is the driver disable function - called by HDD to + * disable epping module + * + * return: none + */ +void epping_disable(void) +{ + epping_context_t *pEpping_ctx; + struct hif_opaque_softc *hif_ctx; + HTC_HANDLE htc_handle; + + pEpping_ctx = g_epping_ctx; + if (pEpping_ctx == NULL) { + EPPING_LOG(QDF_TRACE_LEVEL_FATAL, + "%s: error: pEpping_ctx = NULL", __func__); + return; + } + + if (pEpping_ctx->epping_adapter) { + epping_destroy_adapter(pEpping_ctx->epping_adapter); + pEpping_ctx->epping_adapter = NULL; + } + + hif_ctx = cds_get_context(QDF_MODULE_ID_HIF); + if (hif_ctx == NULL) { + EPPING_LOG(QDF_TRACE_LEVEL_FATAL, + "%s: error: hif_ctx = NULL", __func__); + return; + } + hif_disable_isr(hif_ctx); + hif_reset_soc(hif_ctx); + + htc_handle = cds_get_context(QDF_MODULE_ID_HTC); + if (htc_handle == NULL) { + EPPING_LOG(QDF_TRACE_LEVEL_FATAL, + "%s: error: htc_handle = NULL", __func__); + return; + } + htc_stop(htc_handle); + epping_cookie_cleanup(pEpping_ctx); + htc_destroy(htc_handle); +} + +/** + * epping_close(): End point ping driver close Function + * + * This is the driver close function - called by HDD to close epping module + * + * return: none + */ +void epping_close(void) +{ + epping_context_t *to_free; + + + if (g_epping_ctx == NULL) { + EPPING_LOG(QDF_TRACE_LEVEL_FATAL, + "%s: error: g_epping_ctx = NULL", __func__); + return; + } + + to_free = g_epping_ctx; + g_epping_ctx = NULL; + qdf_mem_free(to_free); +} + +/** + * epping_target_suspend_acknowledge() - process wow ack/nack from fw + * @context: htc_init_info->context + * @wow_nack: true when wow is rejected + */ +static void epping_target_suspend_acknowledge(void *context, bool wow_nack) +{ + if (NULL == g_epping_ctx) { + EPPING_LOG(QDF_TRACE_LEVEL_FATAL, + "%s: epping_ctx is NULL", __func__); + return; + } + /* EPPING_TODO: do we need wow_nack? */ + g_epping_ctx->wow_nack = wow_nack; +} + +/** + * epping_update_ol_config - API to update ol configuration parameters + * + * Return: void + */ +static void epping_update_ol_config(void) +{ + struct ol_config_info cfg; + struct ol_context *ol_ctx = cds_get_context(QDF_MODULE_ID_BMI); + + if (!ol_ctx) + return; + + cfg.enable_self_recovery = 0; + cfg.enable_uart_print = 0; + cfg.enable_fw_log = 0; + cfg.enable_ramdump_collection = 0; + cfg.enable_lpass_support = 0; + + ol_init_ini_config(ol_ctx, &cfg); +} +/** + * epping_enable(): End point ping driver enable Function + * + * This is the driver enable function - called by HDD to enable + * epping module + * + * return - 0 : success, negative: error + */ +int epping_enable(struct device *parent_dev) +{ + int ret = 0; + epping_context_t *pEpping_ctx = NULL; + struct cds_context *p_cds_context = NULL; + qdf_device_t qdf_ctx; + struct htc_init_info htcInfo; + struct hif_opaque_softc *scn; + tSirMacAddr adapter_macAddr; + struct hif_target_info *tgt_info; + struct ol_context *ol_ctx; + + EPPING_LOG(QDF_TRACE_LEVEL_INFO_HIGH, "%s: Enter", __func__); + + p_cds_context = cds_get_global_context(); + + if (p_cds_context == NULL) { + EPPING_LOG(QDF_TRACE_LEVEL_FATAL, + "%s: Failed cds_get_global_context", __func__); + ret = -1; + return ret; + } + + pEpping_ctx = g_epping_ctx; + if (pEpping_ctx == NULL) { + EPPING_LOG(QDF_TRACE_LEVEL_FATAL, + "%s: Failed to get pEpping_ctx", __func__); + ret = -1; + return ret; + } + pEpping_ctx->parent_dev = (void *)parent_dev; + epping_get_dummy_mac_addr(adapter_macAddr); + + /* Initialize the timer module */ + qdf_timer_module_init(); + + scn = cds_get_context(QDF_MODULE_ID_HIF); + if (!scn) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_FATAL, + "%s: scn is null!", __func__); + return A_ERROR; + } + + tgt_info = hif_get_target_info_handle(scn); + + /* store target type and target version info in hdd ctx */ + pEpping_ctx->target_type = tgt_info->target_type; + + ol_ctx = cds_get_context(QDF_MODULE_ID_BMI); + if (!ol_ctx) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_FATAL, + "%s: ol_ctx is NULL", __func__); + return A_ERROR; + } + + epping_update_ol_config(); +#ifndef FEATURE_BMI_2 + /* Initialize BMI and Download firmware */ + if (bmi_download_firmware(ol_ctx)) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_FATAL, + "%s: BMI failed to download target", __func__); + bmi_cleanup(ol_ctx); + return A_ERROR; + } +#endif + EPPING_LOG(QDF_TRACE_LEVEL_INFO_HIGH, + "%s: bmi_download_firmware done", __func__); + + htcInfo.pContext = ol_ctx; + htcInfo.TargetFailure = ol_target_failure; + htcInfo.TargetSendSuspendComplete = epping_target_suspend_acknowledge; + qdf_ctx = cds_get_context(QDF_MODULE_ID_QDF_DEVICE); + + /* Create HTC */ + p_cds_context->htc_ctx = htc_create(scn, &htcInfo, qdf_ctx, + cds_get_conparam()); + if (!p_cds_context->htc_ctx) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_FATAL, + "%s: Failed to Create HTC", __func__); + bmi_cleanup(ol_ctx); + return A_ERROR; + } + pEpping_ctx->HTCHandle = + cds_get_context(QDF_MODULE_ID_HTC); + if (pEpping_ctx->HTCHandle == NULL) { + EPPING_LOG(QDF_TRACE_LEVEL_FATAL, + "%s: HTCHandle is NULL", __func__); + return A_ERROR; + } + + if (bmi_done(ol_ctx)) { + EPPING_LOG(QDF_TRACE_LEVEL_FATAL, + "%s: Failed to complete BMI phase", __func__); + goto error_end; + } + + /* start HIF */ + if (htc_wait_target(pEpping_ctx->HTCHandle) != QDF_STATUS_SUCCESS) { + EPPING_LOG(QDF_TRACE_LEVEL_FATAL, + "%s: htc_wait_target error", __func__); + goto error_end; + } + EPPING_LOG(QDF_TRACE_LEVEL_INFO_HIGH, "%s: HTC ready", __func__); + + ret = epping_connect_service(pEpping_ctx); + if (ret != 0) { + EPPING_LOG(QDF_TRACE_LEVEL_FATAL, + "%s: htc_wait_targetdone", __func__); + goto error_end; + } + if (htc_start(pEpping_ctx->HTCHandle) != QDF_STATUS_SUCCESS) { + goto error_end; + } + EPPING_LOG(QDF_TRACE_LEVEL_INFO_HIGH, "%s: HTC started", __func__); + + /* init the tx cookie resource */ + ret = epping_cookie_init(pEpping_ctx); + if (ret == 0) { + pEpping_ctx->epping_adapter = epping_add_adapter(pEpping_ctx, + adapter_macAddr, + QDF_STA_MODE); + } + if (ret < 0 || pEpping_ctx->epping_adapter == NULL) { + EPPING_LOG(QDF_TRACE_LEVEL_FATAL, + "%s: epping_add_adaptererror error", __func__); + htc_stop(pEpping_ctx->HTCHandle); + epping_cookie_cleanup(pEpping_ctx); + goto error_end; + } + + EPPING_LOG(QDF_TRACE_LEVEL_INFO_HIGH, "%s: Exit", __func__); + return ret; + +error_end: + htc_destroy(p_cds_context->htc_ctx); + p_cds_context->htc_ctx = NULL; + bmi_cleanup(ol_ctx); + return A_ERROR; +} +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/utils/epping/src/epping_rx.c b/drivers/staging/qca-wifi-host-cmn/utils/epping/src/epping_rx.c new file mode 100644 index 0000000000000000000000000000000000000000..22d08f43569748a4085a7571b3911a8b64ec40e0 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/utils/epping/src/epping_rx.c @@ -0,0 +1,155 @@ +/* + * Copyright (c) 2014-2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/*======================================================================== + + \file epping_rx.c + + \brief WLAN End Point Ping test tool implementation + + ========================================================================*/ + +/*-------------------------------------------------------------------------- + Include Files + ------------------------------------------------------------------------*/ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "epping_main.h" +#include "epping_internal.h" +#include "epping_test.h" +#include + +#define AR6000_MAX_RX_BUFFERS 16 +#define AR6000_BUFFER_SIZE 1664 +#define AR6000_MIN_HEAD_ROOM 64 + +static bool enb_rx_dump; + +#ifdef HIF_SDIO +void epping_refill(void *ctx, HTC_ENDPOINT_ID Endpoint) +{ + epping_context_t *pEpping_ctx = (epping_context_t *) ctx; + void *osBuf; + int RxBuffers; + int buffersToRefill; + HTC_PACKET *pPacket; + HTC_PACKET_QUEUE queue; + + buffersToRefill = (int)AR6000_MAX_RX_BUFFERS - + htc_get_num_recv_buffers(pEpping_ctx->HTCHandle, Endpoint); + + if (buffersToRefill <= 0) { + /* fast return, nothing to fill */ + return; + } + + INIT_HTC_PACKET_QUEUE(&queue); + + EPPING_LOG(QDF_TRACE_LEVEL_INFO, + "%s: providing htc with %d buffers at eid=%d\n", + __func__, buffersToRefill, Endpoint); + + for (RxBuffers = 0; RxBuffers < buffersToRefill; RxBuffers++) { + osBuf = qdf_nbuf_alloc(NULL, AR6000_BUFFER_SIZE, + AR6000_MIN_HEAD_ROOM, 4, false); + if (NULL == osBuf) { + break; + } + /* the HTC packet wrapper is at the head of the reserved area + * in the skb */ + pPacket = (HTC_PACKET *) (A_NETBUF_HEAD(osBuf)); + /* set re-fill info */ + SET_HTC_PACKET_INFO_RX_REFILL(pPacket, osBuf, + qdf_nbuf_data(osBuf), + AR6000_BUFFER_SIZE, Endpoint); + SET_HTC_PACKET_NET_BUF_CONTEXT(pPacket, osBuf); + /* add to queue */ + HTC_PACKET_ENQUEUE(&queue, pPacket); + } + + if (!HTC_QUEUE_EMPTY(&queue)) { + /* add packets */ + htc_add_receive_pkt_multiple(pEpping_ctx->HTCHandle, &queue); + } +} +#endif /* HIF_SDIO */ + +void epping_rx(void *ctx, HTC_PACKET *pPacket) +{ + epping_context_t *pEpping_ctx = (epping_context_t *) ctx; + epping_adapter_t *adapter = pEpping_ctx->epping_adapter; + struct net_device *dev = adapter->dev; + QDF_STATUS status = pPacket->Status; +#ifdef WLAN_DEBUG + HTC_ENDPOINT_ID eid = pPacket->Endpoint; +#endif + struct sk_buff *pktSkb = (struct sk_buff *)pPacket->pPktContext; + + EPPING_LOG(QDF_TRACE_LEVEL_INFO, + "%s: adapter = 0x%pK eid=%d, skb=0x%pK, data=0x%pK, len=0x%x status:%d", + __func__, adapter, eid, pktSkb, pPacket->pBuffer, + pPacket->ActualLength, status); + + if (status != QDF_STATUS_SUCCESS) { + if (status != QDF_STATUS_E_CANCELED) { + printk("%s: RX ERR (%d)\n", __func__, status); + } + qdf_nbuf_free(pktSkb); + return; + } + + /* deliver to up layer */ + if (pktSkb) { + if (EPPING_ALIGNMENT_PAD > 0) { + A_NETBUF_PULL(pktSkb, EPPING_ALIGNMENT_PAD); + } + if (enb_rx_dump) + epping_hex_dump((void *)qdf_nbuf_data(pktSkb), + pktSkb->len, __func__); + pktSkb->dev = dev; + if ((pktSkb->dev->flags & IFF_UP) == IFF_UP) { + pktSkb->protocol = eth_type_trans(pktSkb, pktSkb->dev); + ++adapter->stats.rx_packets; + adapter->stats.rx_bytes += pktSkb->len; + qdf_net_buf_debug_release_skb(pktSkb); + if (hdd_napi_enabled(HDD_NAPI_ANY)) + netif_receive_skb(pktSkb); + else + netif_rx_ni(pktSkb); + if ((adapter->stats.rx_packets % + EPPING_STATS_LOG_COUNT) == 0) { + EPPING_LOG(QDF_TRACE_LEVEL_FATAL, + "%s: total_rx_pkts = %lu", + __func__, + adapter->stats.rx_packets); + } + } else { + ++adapter->stats.rx_dropped; + qdf_nbuf_free(pktSkb); + } + } +} diff --git a/drivers/staging/qca-wifi-host-cmn/utils/epping/src/epping_tx.c b/drivers/staging/qca-wifi-host-cmn/utils/epping/src/epping_tx.c new file mode 100644 index 0000000000000000000000000000000000000000..48bc507c223f9916fcdeaae37084384c8e78555a --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/utils/epping/src/epping_tx.c @@ -0,0 +1,393 @@ +/* + * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/*======================================================================== + + \file epping_tx.c + + \brief WLAN End Point Ping test tool implementation + + ========================================================================*/ + +/*-------------------------------------------------------------------------- + Include Files + ------------------------------------------------------------------------*/ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "epping_main.h" +#include "epping_internal.h" +#include "epping_test.h" + +#define TX_RETRY_TIMEOUT_IN_MS 1 + +static bool enb_tx_dump; + +void epping_tx_dup_pkt(epping_adapter_t *adapter, + HTC_ENDPOINT_ID eid, qdf_nbuf_t skb) +{ + struct epping_cookie *cookie = NULL; + int skb_len, ret; + qdf_nbuf_t new_skb; + + cookie = epping_alloc_cookie(adapter->pEpping_ctx); + if (cookie == NULL) { + EPPING_LOG(QDF_TRACE_LEVEL_FATAL, + "%s: epping_alloc_cookie returns no resource\n", + __func__); + return; + } + new_skb = qdf_nbuf_copy(skb); + if (!new_skb) { + EPPING_LOG(QDF_TRACE_LEVEL_FATAL, + "%s: qdf_nbuf_copy returns no resource\n", __func__); + epping_free_cookie(adapter->pEpping_ctx, cookie); + return; + } + SET_HTC_PACKET_INFO_TX(&cookie->HtcPkt, + cookie, qdf_nbuf_data(skb), + qdf_nbuf_len(new_skb), eid, 0); + SET_HTC_PACKET_NET_BUF_CONTEXT(&cookie->HtcPkt, new_skb); + skb_len = (int)qdf_nbuf_len(new_skb); + /* send the packet */ + ret = htc_send_pkt(adapter->pEpping_ctx->HTCHandle, &cookie->HtcPkt); + if (ret != QDF_STATUS_SUCCESS) { + EPPING_LOG(QDF_TRACE_LEVEL_FATAL, + "%s: htc_send_pkt failed, ret = %d\n", __func__, ret); + epping_free_cookie(adapter->pEpping_ctx, cookie); + qdf_nbuf_free(new_skb); + return; + } + adapter->stats.tx_bytes += skb_len; + ++adapter->stats.tx_packets; + if (((adapter->stats.tx_packets + + adapter->stats.tx_dropped) % EPPING_STATS_LOG_COUNT) == 0 && + (adapter->stats.tx_packets || adapter->stats.tx_dropped)) { + epping_log_stats(adapter, __func__); + } +} + +static int epping_tx_send_int(qdf_nbuf_t skb, epping_adapter_t *adapter) +{ + EPPING_HEADER *eppingHdr = (EPPING_HEADER *) qdf_nbuf_data(skb); + HTC_ENDPOINT_ID eid = ENDPOINT_UNUSED; + struct epping_cookie *cookie = NULL; + uint8_t ac = 0; + QDF_STATUS ret = QDF_STATUS_SUCCESS; + int skb_len; + EPPING_HEADER tmpHdr = *eppingHdr; + + /* allocate resource for this packet */ + cookie = epping_alloc_cookie(adapter->pEpping_ctx); + /* no resource */ + if (cookie == NULL) { + EPPING_LOG(QDF_TRACE_LEVEL_FATAL, + "%s: epping_alloc_cookie returns no resource\n", + __func__); + return A_ERROR; + } + + if (enb_tx_dump) + epping_hex_dump((void *)eppingHdr, skb->len, __func__); + /* + * a quirk of linux, the payload of the frame is 32-bit aligned and thus + * the addition of the HTC header will mis-align the start of the HTC + * frame, so we add some padding which will be stripped off in the target + */ + if (EPPING_ALIGNMENT_PAD > 0) { + A_NETBUF_PUSH(skb, EPPING_ALIGNMENT_PAD); + } + /* prepare ep/HTC information */ + ac = eppingHdr->StreamNo_h; + eid = adapter->pEpping_ctx->EppingEndpoint[ac]; + if (eid < 0 || eid >= EPPING_MAX_NUM_EPIDS) { + EPPING_LOG(QDF_TRACE_LEVEL_FATAL, + "%s: invalid eid = %d, ac = %d\n", __func__, eid, + ac); + return A_ERROR; + } + if (tmpHdr.Cmd_h == EPPING_CMD_RESET_RECV_CNT || + tmpHdr.Cmd_h == EPPING_CMD_CONT_RX_START) { + epping_set_kperf_flag(adapter, eid, tmpHdr.CmdBuffer_t[0]); + } + SET_HTC_PACKET_INFO_TX(&cookie->HtcPkt, + cookie, qdf_nbuf_data(skb), qdf_nbuf_len(skb), + eid, 0); + SET_HTC_PACKET_NET_BUF_CONTEXT(&cookie->HtcPkt, skb); + skb_len = skb->len; + /* send the packet */ + ret = htc_send_pkt(adapter->pEpping_ctx->HTCHandle, &cookie->HtcPkt); + epping_log_packet(adapter, &tmpHdr, ret, __func__); + if (ret != QDF_STATUS_SUCCESS) { + EPPING_LOG(QDF_TRACE_LEVEL_FATAL, + "%s: htc_send_pkt failed, status = %d\n", __func__, + ret); + epping_free_cookie(adapter->pEpping_ctx, cookie); + return A_ERROR; + } + adapter->stats.tx_bytes += skb_len; + ++adapter->stats.tx_packets; + if (((adapter->stats.tx_packets + + adapter->stats.tx_dropped) % EPPING_STATS_LOG_COUNT) == 0 && + (adapter->stats.tx_packets || adapter->stats.tx_dropped)) { + epping_log_stats(adapter, __func__); + } + + return 0; +} + +void epping_tx_timer_expire(epping_adapter_t *adapter) +{ + qdf_nbuf_t nodrop_skb; + + EPPING_LOG(QDF_TRACE_LEVEL_INFO, "%s: queue len: %d\n", __func__, + qdf_nbuf_queue_len(&adapter->nodrop_queue)); + + if (!qdf_nbuf_queue_len(&adapter->nodrop_queue)) { + /* nodrop queue is empty so no need to arm timer */ + adapter->epping_timer_state = EPPING_TX_TIMER_STOPPED; + return; + } + + /* try to flush nodrop queue */ + while ((nodrop_skb = qdf_nbuf_queue_remove(&adapter->nodrop_queue))) { + htc_set_nodrop_pkt(adapter->pEpping_ctx->HTCHandle, true); + if (epping_tx_send_int(nodrop_skb, adapter)) { + EPPING_LOG(QDF_TRACE_LEVEL_FATAL, + "%s: nodrop: %pK xmit fail in timer\n", + __func__, nodrop_skb); + /* fail to xmit so put the nodrop packet to the nodrop queue */ + qdf_nbuf_queue_insert_head(&adapter->nodrop_queue, + nodrop_skb); + break; + } else { + htc_set_nodrop_pkt(adapter->pEpping_ctx->HTCHandle, false); + EPPING_LOG(QDF_TRACE_LEVEL_INFO, + "%s: nodrop: %pK xmit ok in timer\n", + __func__, nodrop_skb); + } + } + + /* if nodrop queue is not empty, continue to arm timer */ + if (nodrop_skb) { + qdf_spin_lock_bh(&adapter->data_lock); + /* if nodrop queue is not empty, continue to arm timer */ + if (adapter->epping_timer_state != EPPING_TX_TIMER_RUNNING) { + adapter->epping_timer_state = EPPING_TX_TIMER_RUNNING; + qdf_timer_mod(&adapter->epping_timer, + TX_RETRY_TIMEOUT_IN_MS); + } + qdf_spin_unlock_bh(&adapter->data_lock); + } else { + adapter->epping_timer_state = EPPING_TX_TIMER_STOPPED; + } +} + +int epping_tx_send(qdf_nbuf_t skb, epping_adapter_t *adapter) +{ + qdf_nbuf_t nodrop_skb; + EPPING_HEADER *eppingHdr; + uint8_t ac = 0; + + eppingHdr = (EPPING_HEADER *) qdf_nbuf_data(skb); + + if (!IS_EPPING_PACKET(eppingHdr)) { + EPPING_LOG(QDF_TRACE_LEVEL_FATAL, + "%s: Recived non endpoint ping packets\n", __func__); + /* no packet to send, cleanup */ + qdf_nbuf_free(skb); + return -ENOMEM; + } + + /* the stream ID is mapped to an access class */ + ac = eppingHdr->StreamNo_h; + /* hard coded two ep ids */ + if (ac != 0 && ac != 1) { + EPPING_LOG(QDF_TRACE_LEVEL_FATAL, + "%s: ac %d is not mapped to mboxping service\n", + __func__, ac); + qdf_nbuf_free(skb); + return -ENOMEM; + } + + /* + * some EPPING packets cannot be dropped no matter what access class + * it was sent on. A special care has been taken: + * 1. when there is no TX resource, queue the control packets to + * a special queue + * 2. when there is TX resource, send the queued control packets first + * and then other packets + * 3. a timer launches to check if there is queued control packets and + * flush them + */ + + /* check the nodrop queue first */ + while ((nodrop_skb = qdf_nbuf_queue_remove(&adapter->nodrop_queue))) { + htc_set_nodrop_pkt(adapter->pEpping_ctx->HTCHandle, true); + if (epping_tx_send_int(nodrop_skb, adapter)) { + EPPING_LOG(QDF_TRACE_LEVEL_FATAL, + "%s: nodrop: %pK xmit fail\n", __func__, + nodrop_skb); + /* fail to xmit so put the nodrop packet to the nodrop queue */ + qdf_nbuf_queue_insert_head(&adapter->nodrop_queue, + nodrop_skb); + /* no cookie so free the current skb */ + goto tx_fail; + } else { + htc_set_nodrop_pkt(adapter->pEpping_ctx->HTCHandle, false); + EPPING_LOG(QDF_TRACE_LEVEL_INFO, + "%s: nodrop: %pK xmit ok\n", __func__, + nodrop_skb); + } + } + + /* send the original packet */ + if (epping_tx_send_int(skb, adapter)) + goto tx_fail; + + return 0; + +tx_fail: + if (!IS_EPING_PACKET_NO_DROP(eppingHdr)) { + /* allow to drop the skb so drop it */ + qdf_nbuf_free(skb); + ++adapter->stats.tx_dropped; + EPPING_LOG(QDF_TRACE_LEVEL_FATAL, + "%s: Tx skb %pK dropped, stats.tx_dropped = %ld\n", + __func__, skb, adapter->stats.tx_dropped); + return -ENOMEM; + } else { + EPPING_LOG(QDF_TRACE_LEVEL_FATAL, + "%s: nodrop: %pK queued\n", __func__, skb); + qdf_nbuf_queue_add(&adapter->nodrop_queue, skb); + qdf_spin_lock_bh(&adapter->data_lock); + if (adapter->epping_timer_state != EPPING_TX_TIMER_RUNNING) { + adapter->epping_timer_state = EPPING_TX_TIMER_RUNNING; + qdf_timer_mod(&adapter->epping_timer, + TX_RETRY_TIMEOUT_IN_MS); + } + qdf_spin_unlock_bh(&adapter->data_lock); + } + + return 0; +} + +#ifdef HIF_SDIO +enum htc_send_full_action epping_tx_queue_full(void *Context, + HTC_PACKET *pPacket) +{ + /* + * Call netif_stop_queue frequently will impact the mboxping tx t-put. + * Return HTC_SEND_FULL_KEEP directly in epping_tx_queue_full to avoid. + */ + return HTC_SEND_FULL_KEEP; +} +#endif /* HIF_SDIO */ +void epping_tx_complete(void *ctx, HTC_PACKET *htc_pkt) +{ + epping_context_t *pEpping_ctx = (epping_context_t *) ctx; + epping_adapter_t *adapter = pEpping_ctx->epping_adapter; + struct net_device *dev = adapter->dev; + QDF_STATUS status; + HTC_ENDPOINT_ID eid; + qdf_nbuf_t pktSkb; + struct epping_cookie *cookie; + A_BOOL flushing = false; + qdf_nbuf_queue_t skb_queue; + + if (htc_pkt == NULL) + return; + + qdf_nbuf_queue_init(&skb_queue); + + qdf_spin_lock_bh(&adapter->data_lock); + + status = htc_pkt->Status; + eid = htc_pkt->Endpoint; + pktSkb = GET_HTC_PACKET_NET_BUF_CONTEXT(htc_pkt); + cookie = htc_pkt->pPktContext; + + if (!pktSkb) { + EPPING_LOG(QDF_TRACE_LEVEL_ERROR, + "%s: NULL skb from hc packet", __func__); + QDF_BUG(0); + } else { + if (htc_pkt->pBuffer != qdf_nbuf_data(pktSkb)) { + EPPING_LOG(QDF_TRACE_LEVEL_ERROR, + "%s: htc_pkt buffer not equal to skb->data", + __func__); + QDF_BUG(0); + } + /* add this to the list, use faster non-lock API */ + qdf_nbuf_queue_add(&skb_queue, pktSkb); + + if (QDF_IS_STATUS_SUCCESS(status)) { + if (htc_pkt->ActualLength != + qdf_nbuf_len(pktSkb)) { + EPPING_LOG(QDF_TRACE_LEVEL_ERROR, + "%s: htc_pkt length not equal to skb->len", + __func__); + QDF_BUG(0); + } + } + } + + EPPING_LOG(QDF_TRACE_LEVEL_INFO, + "%s skb=%pK data=%pK len=0x%x eid=%d ", + __func__, pktSkb, htc_pkt->pBuffer, + htc_pkt->ActualLength, eid); + + if (QDF_IS_STATUS_ERROR(status)) { + if (status == QDF_STATUS_E_CANCELED) { + /* a packet was flushed */ + flushing = true; + } + if (status != QDF_STATUS_E_RESOURCES) { + printk("%s() -TX ERROR, status: 0x%x\n", + __func__, status); + } + } else { + EPPING_LOG(QDF_TRACE_LEVEL_INFO, "%s: OK\n", __func__); + flushing = false; + } + + epping_free_cookie(adapter->pEpping_ctx, cookie); + qdf_spin_unlock_bh(&adapter->data_lock); + + /* free all skbs in our local list */ + while (qdf_nbuf_queue_len(&skb_queue)) { + /* use non-lock version */ + pktSkb = qdf_nbuf_queue_remove(&skb_queue); + if (pktSkb == NULL) + break; + qdf_nbuf_tx_free(pktSkb, QDF_NBUF_PKT_ERROR); + pEpping_ctx->total_tx_acks++; + } + + if (!flushing) { + netif_wake_queue(dev); + } +} diff --git a/drivers/staging/qca-wifi-host-cmn/utils/epping/src/epping_txrx.c b/drivers/staging/qca-wifi-host-cmn/utils/epping/src/epping_txrx.c new file mode 100644 index 0000000000000000000000000000000000000000..fd71a433e0b15646bc76a8647f17c20830d01c09 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/utils/epping/src/epping_txrx.c @@ -0,0 +1,463 @@ +/* + * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/*======================================================================== + + \file epping_txrx.c + + \brief WLAN End Point Ping test tool implementation + + ========================================================================*/ + +/*-------------------------------------------------------------------------- + Include Files + ------------------------------------------------------------------------*/ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "epping_main.h" +#include "epping_internal.h" + +static int epping_start_adapter(epping_adapter_t *adapter); +static void epping_stop_adapter(epping_adapter_t *adapter); + +static void epping_timer_expire(void *data) +{ + struct net_device *dev = (struct net_device *)data; + epping_adapter_t *adapter; + + if (dev == NULL) { + EPPING_LOG(QDF_TRACE_LEVEL_FATAL, + "%s: netdev = NULL", __func__); + return; + } + + adapter = netdev_priv(dev); + if (adapter == NULL) { + EPPING_LOG(QDF_TRACE_LEVEL_FATAL, + "%s: adapter = NULL", __func__); + return; + } + adapter->epping_timer_state = EPPING_TX_TIMER_STOPPED; + epping_tx_timer_expire(adapter); +} + +static int epping_ndev_open(struct net_device *dev) +{ + epping_adapter_t *adapter; + int ret = 0; + + adapter = netdev_priv(dev); + epping_start_adapter(adapter); + return ret; +} + +static int epping_ndev_stop(struct net_device *dev) +{ + epping_adapter_t *adapter; + int ret = 0; + + adapter = netdev_priv(dev); + if (NULL == adapter) { + EPPING_LOG(QDF_TRACE_LEVEL_FATAL, + "%s: EPPING adapter context is Null", __func__); + ret = -ENODEV; + goto end; + } + epping_stop_adapter(adapter); +end: + return ret; +} + +static void epping_ndev_uninit(struct net_device *dev) +{ + epping_adapter_t *adapter; + + adapter = netdev_priv(dev); + if (NULL == adapter) { + EPPING_LOG(QDF_TRACE_LEVEL_FATAL, + "%s: EPPING adapter context is Null", __func__); + goto end; + } + epping_stop_adapter(adapter); +end: + return; +} + +static void epping_tx_queue_timeout(struct net_device *dev) +{ + epping_adapter_t *adapter; + + adapter = netdev_priv(dev); + if (NULL == adapter) { + EPPING_LOG(QDF_TRACE_LEVEL_FATAL, + "%s: EPPING adapter context is Null", __func__); + goto end; + } + + EPPING_LOG(QDF_TRACE_LEVEL_ERROR, + "%s: Transmission timeout occurred, adapter->started= %d", + __func__, adapter->started); + + /* Getting here implies we disabled the TX queues + * for too long. Since this is epping + * (not because of disassociation or low resource scenarios), + * try to restart the queue + */ + if (adapter->started) + netif_wake_queue(dev); +end: + return; + +} + +static netdev_tx_t epping_hard_start_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + epping_adapter_t *adapter; + int ret = 0; + + adapter = netdev_priv(dev); + if (NULL == adapter) { + EPPING_LOG(QDF_TRACE_LEVEL_FATAL, + "%s: EPPING adapter context is Null", __func__); + kfree_skb(skb); + ret = -ENODEV; + goto end; + } + ret = epping_tx_send(skb, adapter); +end: + return NETDEV_TX_OK; +} + +static struct net_device_stats *epping_get_stats(struct net_device *dev) +{ + epping_adapter_t *adapter = netdev_priv(dev); + + if (NULL == adapter) { + EPPING_LOG(QDF_TRACE_LEVEL_ERROR, "%s: adapter = NULL", + __func__); + return NULL; + } + + return &adapter->stats; +} + +static int epping_ndev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + epping_adapter_t *adapter; + int ret = 0; + + adapter = netdev_priv(dev); + if (NULL == adapter) { + EPPING_LOG(QDF_TRACE_LEVEL_FATAL, + "%s: EPPING adapter context is Null", __func__); + ret = -ENODEV; + goto end; + } + if (dev != adapter->dev) { + EPPING_LOG(QDF_TRACE_LEVEL_FATAL, + "%s: HDD adapter/dev inconsistency", __func__); + ret = -ENODEV; + goto end; + } + + if ((!ifr) || (!ifr->ifr_data)) { + ret = -EINVAL; + goto end; + } + + switch (cmd) { + case (SIOCDEVPRIVATE + 1): + EPPING_LOG(QDF_TRACE_LEVEL_ERROR, + "%s: do not support ioctl %d (SIOCDEVPRIVATE + 1)", + __func__, cmd); + break; + default: + EPPING_LOG(QDF_TRACE_LEVEL_ERROR, "%s: unknown ioctl %d", + __func__, cmd); + ret = -EINVAL; + break; + } + +end: + return ret; +} + +static int epping_set_mac_address(struct net_device *dev, void *addr) +{ + epping_adapter_t *adapter = netdev_priv(dev); + struct sockaddr *psta_mac_addr = addr; + qdf_mem_copy(&adapter->macAddressCurrent, + psta_mac_addr->sa_data, ETH_ALEN); + qdf_mem_copy(dev->dev_addr, psta_mac_addr->sa_data, ETH_ALEN); + return 0; +} + +static void epping_stop_adapter(epping_adapter_t *adapter) +{ + qdf_device_t qdf_ctx = cds_get_context(QDF_MODULE_ID_QDF_DEVICE); + + if (!qdf_ctx) { + EPPING_LOG(QDF_TRACE_LEVEL_FATAL, + "%s: qdf_ctx is NULL\n", __func__); + return; + } + + if (adapter && adapter->started) { + EPPING_LOG(LOG1, FL("Disabling queues")); + netif_tx_disable(adapter->dev); + netif_carrier_off(adapter->dev); + adapter->started = false; + pld_request_bus_bandwidth(qdf_ctx->dev, + PLD_BUS_WIDTH_LOW); + } +} + +static int epping_start_adapter(epping_adapter_t *adapter) +{ + qdf_device_t qdf_ctx = cds_get_context(QDF_MODULE_ID_QDF_DEVICE); + + if (!qdf_ctx) { + EPPING_LOG(QDF_TRACE_LEVEL_FATAL, + "%s: qdf_ctx is NULL", __func__); + return -EINVAL; + } + + if (!adapter) { + EPPING_LOG(QDF_TRACE_LEVEL_FATAL, + "%s: adapter= NULL\n", __func__); + return -EINVAL; + } + if (!adapter->started) { + pld_request_bus_bandwidth(qdf_ctx->dev, + PLD_BUS_WIDTH_HIGH); + netif_carrier_on(adapter->dev); + EPPING_LOG(LOG1, FL("Enabling queues")); + netif_tx_start_all_queues(adapter->dev); + adapter->started = true; + } else { + EPPING_LOG(QDF_TRACE_LEVEL_WARN, + "%s: adapter %pK already started\n", __func__, + adapter); + } + return 0; +} + +static int epping_register_adapter(epping_adapter_t *adapter) +{ + int ret = 0; + + ret = register_netdev(adapter->dev); + if (ret != 0) { + EPPING_LOG(QDF_TRACE_LEVEL_FATAL, + "%s: unable to register device\n", + adapter->dev->name); + } else { + adapter->registered = true; + } + return ret; +} + +static void epping_unregister_adapter(epping_adapter_t *adapter) +{ + if (adapter) { + epping_stop_adapter(adapter); + if (adapter->registered) { + unregister_netdev(adapter->dev); + adapter->registered = false; + } + } else { + EPPING_LOG(QDF_TRACE_LEVEL_FATAL, + "%s: adapter = NULL, unable to unregister device\n", + __func__); + } +} + +void epping_destroy_adapter(epping_adapter_t *adapter) +{ + struct net_device *dev = NULL; + epping_context_t *pEpping_ctx; + + if (!adapter || !adapter->pEpping_ctx) { + EPPING_LOG(QDF_TRACE_LEVEL_FATAL, + "%s: adapter = NULL\n", __func__); + return; + } + + dev = adapter->dev; + pEpping_ctx = adapter->pEpping_ctx; + epping_unregister_adapter(adapter); + + qdf_spinlock_destroy(&adapter->data_lock); + qdf_timer_free(&adapter->epping_timer); + adapter->epping_timer_state = EPPING_TX_TIMER_STOPPED; + + while (qdf_nbuf_queue_len(&adapter->nodrop_queue)) { + qdf_nbuf_t tmp_nbuf = NULL; + tmp_nbuf = qdf_nbuf_queue_remove(&adapter->nodrop_queue); + if (tmp_nbuf) + qdf_nbuf_free(tmp_nbuf); + } + + free_netdev(dev); + if (!pEpping_ctx) + EPPING_LOG(QDF_TRACE_LEVEL_FATAL, + "%s: pEpping_ctx = NULL\n", __func__); + else + pEpping_ctx->epping_adapter = NULL; +} + +static struct net_device_ops epping_drv_ops = { + .ndo_open = epping_ndev_open, + .ndo_stop = epping_ndev_stop, + .ndo_uninit = epping_ndev_uninit, + .ndo_start_xmit = epping_hard_start_xmit, + .ndo_tx_timeout = epping_tx_queue_timeout, + .ndo_get_stats = epping_get_stats, + .ndo_do_ioctl = epping_ndev_ioctl, + .ndo_set_mac_address = epping_set_mac_address, + .ndo_select_queue = NULL, +}; + +#define EPPING_TX_QUEUE_MAX_LEN 128 /* need to be power of 2 */ + +epping_adapter_t *epping_add_adapter(epping_context_t *pEpping_ctx, + tSirMacAddr macAddr, + enum QDF_OPMODE device_mode) +{ + struct net_device *dev; + epping_adapter_t *adapter; + + dev = alloc_netdev(sizeof(epping_adapter_t), "wifi%d", +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 17, 0)) + NET_NAME_UNKNOWN, +#endif + ether_setup); + if (dev == NULL) { + EPPING_LOG(QDF_TRACE_LEVEL_FATAL, + "%s: Cannot allocate epping_adapter_t\n", __func__); + return NULL; + } + + adapter = netdev_priv(dev); + qdf_mem_zero(adapter, sizeof(*adapter)); + adapter->dev = dev; + adapter->pEpping_ctx = pEpping_ctx; + adapter->device_mode = device_mode; /* station, SAP, etc */ + qdf_mem_copy(dev->dev_addr, (void *)macAddr, sizeof(tSirMacAddr)); + qdf_mem_copy(adapter->macAddressCurrent.bytes, + macAddr, sizeof(tSirMacAddr)); + qdf_spinlock_create(&adapter->data_lock); + qdf_nbuf_queue_init(&adapter->nodrop_queue); + adapter->epping_timer_state = EPPING_TX_TIMER_STOPPED; + qdf_timer_init(epping_get_qdf_ctx(), &adapter->epping_timer, + epping_timer_expire, dev, QDF_TIMER_TYPE_SW); + dev->type = ARPHRD_IEEE80211; + dev->netdev_ops = &epping_drv_ops; + dev->watchdog_timeo = 5 * HZ; /* XXX */ + dev->tx_queue_len = EPPING_TXBUF - 1; /* 1 for mgmt frame */ + if (epping_register_adapter(adapter) == 0) { + EPPING_LOG(LOG1, FL("Disabling queues")); + netif_tx_disable(dev); + netif_carrier_off(dev); + return adapter; + } else { + epping_destroy_adapter(adapter); + return NULL; + } +} + +int epping_connect_service(epping_context_t *pEpping_ctx) +{ + int status, i; + struct htc_service_connect_req connect; + struct htc_service_connect_resp response; + + qdf_mem_zero(&connect, sizeof(connect)); + qdf_mem_zero(&response, sizeof(response)); + + /* these fields are the same for all service endpoints */ + connect.EpCallbacks.pContext = pEpping_ctx; + connect.EpCallbacks.EpTxCompleteMultiple = NULL; + connect.EpCallbacks.EpRecv = epping_rx; + /* epping_tx_complete use Multiple version */ + connect.EpCallbacks.EpTxComplete = epping_tx_complete; + connect.MaxSendQueueDepth = 64; + +#ifdef HIF_SDIO + connect.EpCallbacks.EpRecvRefill = epping_refill; + connect.EpCallbacks.EpSendFull = + epping_tx_queue_full /* ar6000_tx_queue_full */; +#elif defined(HIF_USB) || defined(HIF_PCI) + connect.EpCallbacks.EpRecvRefill = NULL /* provided by HIF */; + connect.EpCallbacks.EpSendFull = NULL /* provided by HIF */; + /* disable flow control for hw flow control */ + connect.ConnectionFlags |= HTC_CONNECT_FLAGS_DISABLE_CREDIT_FLOW_CTRL; +#endif + + /* connect to service */ + connect.service_id = WMI_DATA_BE_SVC; + status = htc_connect_service(pEpping_ctx->HTCHandle, &connect, &response); + if (status != EOK) { + EPPING_LOG(QDF_TRACE_LEVEL_FATAL, + "Failed to connect to Endpoint Ping BE service status:%d\n", + status); + return status; + } else { + EPPING_LOG(QDF_TRACE_LEVEL_FATAL, + "eppingtest BE endpoint:%d\n", response.Endpoint); + } + pEpping_ctx->EppingEndpoint[0] = response.Endpoint; + +#if defined(HIF_PCI) || defined(HIF_USB) + connect.service_id = WMI_DATA_BK_SVC; + status = htc_connect_service(pEpping_ctx->HTCHandle, &connect, &response); + if (status != EOK) { + EPPING_LOG(QDF_TRACE_LEVEL_FATAL, + "Failed to connect to Endpoint Ping BK service status:%d\n", + status); + return status; + } else { + EPPING_LOG(QDF_TRACE_LEVEL_FATAL, + "eppingtest BK endpoint:%d\n", response.Endpoint); + } + pEpping_ctx->EppingEndpoint[1] = response.Endpoint; + /* Since we do not create other two SVC use BK endpoint + * for rest ACs (2, 3) */ + for (i = 2; i < EPPING_MAX_NUM_EPIDS; i++) { + pEpping_ctx->EppingEndpoint[i] = response.Endpoint; + } +#else + /* we only use one endpoint for high latenance bus. + * Map all AC's EPIDs to the same endpoint ID returned by HTC */ + for (i = 0; i < EPPING_MAX_NUM_EPIDS; i++) { + pEpping_ctx->EppingEndpoint[i] = response.Endpoint; + } +#endif + return 0; +} diff --git a/drivers/staging/qca-wifi-host-cmn/utils/fwlog/dbglog_host.c b/drivers/staging/qca-wifi-host-cmn/utils/fwlog/dbglog_host.c new file mode 100644 index 0000000000000000000000000000000000000000..6e226fe63a9b28d4c0d2b51a74177a454454a0c4 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/utils/fwlog/dbglog_host.c @@ -0,0 +1,4571 @@ +/* + * Copyright (c) 2013-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/* Host Debug log implementation */ + +#include "athdefs.h" +#include "a_types.h" +#include "dbglog_host.h" +#include "wmi.h" +#include "wmi_unified_api.h" +#include "wma.h" +#include "ol_defines.h" +#include +#include "host_diag_core_event.h" +#include "qwlan_version.h" +#include +#include +#include + +#ifdef WLAN_OPEN_SOURCE +#include +#endif /* WLAN_OPEN_SOURCE */ +#include "wmi_unified_priv.h" + +#ifdef CNSS_GENL +#include +#include "wlan_cfg80211.h" +#endif + +#ifdef MULTI_IF_NAME +#define CLD_DEBUGFS_DIR "cld" MULTI_IF_NAME +#else + +#define CLD_DEBUGFS_DIR "cld" +#endif +#define DEBUGFS_BLOCK_NAME "dbglog_block" + +#define ATH_MODULE_NAME fwlog +#include +#define FWLOG_DEBUG ATH_DEBUG_MAKE_MODULE_MASK(0) + +#ifdef WLAN_DEBUG + +static int get_version; +static int gprint_limiter; +static bool tgt_assert_enable; + +static ATH_DEBUG_MASK_DESCRIPTION g_fwlog_debug_description[] = { + {FWLOG_DEBUG, "fwlog"}, +}; + +ATH_DEBUG_INSTANTIATE_MODULE_VAR(fwlog, + "fwlog", + "Firmware Debug Log", + ATH_DEBUG_MASK_DEFAULTS | ATH_DEBUG_INFO | + ATH_DEBUG_ERR, + ATH_DEBUG_DESCRIPTION_COUNT + (g_fwlog_debug_description), + g_fwlog_debug_description); +#endif + +module_dbg_print mod_print[WLAN_MODULE_ID_MAX]; + +uint32_t dbglog_process_type = DBGLOG_PROCESS_NET_RAW; + +static const char *dbglog_get_module_str(uint32_t module_id) +{ + switch (module_id) { + case WLAN_MODULE_INF: + return "INF"; + case WLAN_MODULE_WMI: + return "WMI"; + case WLAN_MODULE_STA_PWRSAVE: + return "STA PS"; + case WLAN_MODULE_WHAL: + return "WHAL"; + case WLAN_MODULE_COEX: + return "COEX"; + case WLAN_MODULE_ROAM: + return "ROAM"; + case WLAN_MODULE_RESMGR_CHAN_MANAGER: + return "CHANMGR"; + case WLAN_MODULE_RESMGR: + return "RESMGR"; + case WLAN_MODULE_VDEV_MGR: + return "VDEV"; + case WLAN_MODULE_SCAN: + return "SCAN"; + case WLAN_MODULE_RATECTRL: + return "RC"; + case WLAN_MODULE_AP_PWRSAVE: + return "AP PS"; + case WLAN_MODULE_BLOCKACK: + return "BA"; + case WLAN_MODULE_MGMT_TXRX: + return "MGMT"; + case WLAN_MODULE_DATA_TXRX: + return "DATA"; + case WLAN_MODULE_HTT: + return "HTT"; + case WLAN_MODULE_HOST: + return "HOST"; + case WLAN_MODULE_BEACON: + return "BEACON"; + case WLAN_MODULE_OFFLOAD: + return "OFFLOAD"; + case WLAN_MODULE_WAL: + return "WAL"; + case WAL_MODULE_DE: + return "DE"; + case WLAN_MODULE_PCIELP: + return "PCIELP"; + case WLAN_MODULE_RTT: + return "RTT"; + case WLAN_MODULE_DCS: + return "DCS"; + case WLAN_MODULE_CACHEMGR: + return "CACHEMGR"; + case WLAN_MODULE_ANI: + return "ANI"; + case WLAN_MODULE_TEST: + return "TESTPOINT"; + case WLAN_MODULE_STA_SMPS: + return "STA_SMPS"; + case WLAN_MODULE_TDLS: + return "TDLS"; + case WLAN_MODULE_P2P: + return "P2P"; + case WLAN_MODULE_WOW: + return "WoW"; + case WLAN_MODULE_IBSS_PWRSAVE: + return "IBSS PS"; + case WLAN_MODULE_EXTSCAN: + return "ExtScan"; + case WLAN_MODULE_UNIT_TEST: + return "UNIT_TEST"; + case WLAN_MODULE_MLME: + return "MLME"; + case WLAN_MODULE_SUPPL: + return "SUPPLICANT"; + default: + return "UNKNOWN"; + } +} + +char *DBG_MSG_ARR[WLAN_MODULE_ID_MAX][MAX_DBG_MSGS] = { + { + "INF_MSG_START", + "INF_ASSERTION_FAILED", + "INF_TARGET_ID", + "INF_MSG_END" + }, + { + "WMI_DBGID_DEFINITION_START", + "WMI_CMD_RX_XTND_PKT_TOO_SHORT", + "WMI_EXTENDED_CMD_NOT_HANDLED", + "WMI_CMD_RX_PKT_TOO_SHORT", + "WMI_CALLING_WMI_EXTENSION_FN", + "WMI_CMD_NOT_HANDLED", + "WMI_IN_SYNC", + "WMI_TARGET_WMI_SYNC_CMD", + "WMI_SET_SNR_THRESHOLD_PARAMS", + "WMI_SET_RSSI_THRESHOLD_PARAMS", + "WMI_SET_LQ_THRESHOLD_PARAMS", + "WMI_TARGET_CREATE_PSTREAM_CMD", + "WMI_WI_DTM_INUSE", + "WMI_TARGET_DELETE_PSTREAM_CMD", + "WMI_TARGET_IMPLICIT_DELETE_PSTREAM_CMD", + "WMI_TARGET_GET_BIT_RATE_CMD", + "WMI_GET_RATE_MASK_CMD_FIX_RATE_MASK_IS", + "WMI_TARGET_GET_AVAILABLE_CHANNELS_CMD", + "WMI_TARGET_GET_TX_PWR_CMD", + "WMI_FREE_EVBUF_WMIBUF", + "WMI_FREE_EVBUF_DATABUF", + "WMI_FREE_EVBUF_BADFLAG", + "WMI_HTC_RX_ERROR_DATA_PACKET", + "WMI_HTC_RX_SYNC_PAUSING_FOR_MBOX", + "WMI_INCORRECT_WMI_DATA_HDR_DROPPING_PKT", + "WMI_SENDING_READY_EVENT", + "WMI_SETPOWER_MDOE_TO_MAXPERF", + "WMI_SETPOWER_MDOE_TO_REC", + "WMI_BSSINFO_EVENT_FROM", + "WMI_TARGET_GET_STATS_CMD", + "WMI_SENDING_SCAN_COMPLETE_EVENT", + "WMI_SENDING_RSSI_INDB_THRESHOLD_EVENT ", + "WMI_SENDING_RSSI_INDBM_THRESHOLD_EVENT", + "WMI_SENDING_LINK_QUALITY_THRESHOLD_EVENT", + "WMI_SENDING_ERROR_REPORT_EVENT", + "WMI_SENDING_CAC_EVENT", + "WMI_TARGET_GET_ROAM_TABLE_CMD", + "WMI_TARGET_GET_ROAM_DATA_CMD", + "WMI_SENDING_GPIO_INTR_EVENT", + "WMI_SENDING_GPIO_ACK_EVENT", + "WMI_SENDING_GPIO_DATA_EVENT", + "WMI_CMD_RX", + "WMI_CMD_RX_XTND", + "WMI_EVENT_SEND", + "WMI_EVENT_SEND_XTND", + "WMI_CMD_PARAMS_DUMP_START", + "WMI_CMD_PARAMS_DUMP_END", + "WMI_CMD_PARAMS", + "WMI_EVENT_ALLOC_FAILURE", + "WMI_DBGID_DCS_PARAM_CMD", + "WMI_SEND_EVENT_WRONG_TLV", + "WMI_SEND_EVENT_NO_TLV_DEF", + "WMI_DBGID_DEFNITION_END", + }, + { + "PS_STA_DEFINITION_START", + "PS_STA_PM_ARB_REQUEST", + "PS_STA_DELIVER_EVENT", + "PS_STA_PSPOLL_SEQ_DONE", + "PS_STA_COEX_MODE", + "PS_STA_PSPOLL_ALLOW", + "PS_STA_SET_PARAM", + "PS_STA_SPECPOLL_TIMER_STARTED", + "PS_STA_SPECPOLL_TIMER_STOPPED", + }, + { + "WHAL_DBGID_DEFINITION_START", + "WHAL_ERROR_ANI_CONTROL", + "WHAL_ERROR_CHIP_TEST1", + "WHAL_ERROR_CHIP_TEST2", + "WHAL_ERROR_EEPROM_CHECKSUM", + "WHAL_ERROR_EEPROM_MACADDR", + "WHAL_ERROR_INTERRUPT_HIU", + "WHAL_ERROR_KEYCACHE_RESET", + "WHAL_ERROR_KEYCACHE_SET", + "WHAL_ERROR_KEYCACHE_TYPE", + "WHAL_ERROR_KEYCACHE_TKIPENTRY", + "WHAL_ERROR_KEYCACHE_WEPLENGTH", + "WHAL_ERROR_PHY_INVALID_CHANNEL", + "WHAL_ERROR_POWER_AWAKE", + "WHAL_ERROR_POWER_SET", + "WHAL_ERROR_RECV_STOPDMA", + "WHAL_ERROR_RECV_STOPPCU", + "WHAL_ERROR_RESET_CHANNF1", + "WHAL_ERROR_RESET_CHANNF2", + "WHAL_ERROR_RESET_PM", + "WHAL_ERROR_RESET_OFFSETCAL", + "WHAL_ERROR_RESET_RFGRANT", + "WHAL_ERROR_RESET_RXFRAME", + "WHAL_ERROR_RESET_STOPDMA", + "WHAL_ERROR_RESET_ERRID", + "WHAL_ERROR_RESET_ADCDCCAL1", + "WHAL_ERROR_RESET_ADCDCCAL2", + "WHAL_ERROR_RESET_TXIQCAL", + "WHAL_ERROR_RESET_RXIQCAL", + "WHAL_ERROR_RESET_CARRIERLEAK", + "WHAL_ERROR_XMIT_COMPUTE", + "WHAL_ERROR_XMIT_NOQUEUE", + "WHAL_ERROR_XMIT_ACTIVEQUEUE", + "WHAL_ERROR_XMIT_BADTYPE", + "WHAL_ERROR_XMIT_STOPDMA", + "WHAL_ERROR_INTERRUPT_BB_PANIC", + "WHAL_ERROR_PAPRD_MAXGAIN_ABOVE_WINDOW", + "WHAL_ERROR_QCU_HW_PAUSE_MISMATCH", + "WHAL_DBGID_DEFINITION_END", + }, + { + "COEX_DEBUGID_START", + "BTCOEX_DBG_MCI_1", + "BTCOEX_DBG_MCI_2", + "BTCOEX_DBG_MCI_3", + "BTCOEX_DBG_MCI_4", + "BTCOEX_DBG_MCI_5", + "BTCOEX_DBG_MCI_6", + "BTCOEX_DBG_MCI_7", + "BTCOEX_DBG_MCI_8", + "BTCOEX_DBG_MCI_9", + "BTCOEX_DBG_MCI_10", + "COEX_WAL_BTCOEX_INIT", + "COEX_WAL_PAUSE", + "COEX_WAL_RESUME", + "COEX_UPDATE_AFH", + "COEX_HWQ_EMPTY_CB", + "COEX_MCI_TIMER_HANDLER", + "COEX_MCI_RECOVER", + "ERROR_COEX_MCI_ISR", + "ERROR_COEX_MCI_GPM", + "COEX_ProfileType", + "COEX_LinkID", + "COEX_LinkState", + "COEX_LinkRole", + "COEX_LinkRate", + "COEX_VoiceType", + "COEX_TInterval", + "COEX_WRetrx", + "COEX_Attempts", + "COEX_PerformanceState", + "COEX_LinkType", + "COEX_RX_MCI_GPM_VERSION_QUERY", + "COEX_RX_MCI_GPM_VERSION_RESPONSE", + "COEX_RX_MCI_GPM_STATUS_QUERY", + "COEX_STATE_WLAN_VDEV_DOWN", + "COEX_STATE_WLAN_VDEV_START", + "COEX_STATE_WLAN_VDEV_CONNECTED", + "COEX_STATE_WLAN_VDEV_SCAN_STARTED", + "COEX_STATE_WLAN_VDEV_SCAN_END", + "COEX_STATE_WLAN_DEFAULT", + "COEX_CHANNEL_CHANGE", + "COEX_POWER_CHANGE", + "COEX_CONFIG_MGR", + "COEX_TX_MCI_GPM_BT_CAL_REQ", + "COEX_TX_MCI_GPM_BT_CAL_GRANT", + "COEX_TX_MCI_GPM_BT_CAL_DONE", + "COEX_TX_MCI_GPM_WLAN_CAL_REQ", + "COEX_TX_MCI_GPM_WLAN_CAL_GRANT", + "COEX_TX_MCI_GPM_WLAN_CAL_DONE", + "COEX_TX_MCI_GPM_BT_DEBUG", + "COEX_TX_MCI_GPM_VERSION_QUERY", + "COEX_TX_MCI_GPM_VERSION_RESPONSE", + "COEX_TX_MCI_GPM_STATUS_QUERY", + "COEX_TX_MCI_GPM_HALT_BT_GPM", + "COEX_TX_MCI_GPM_WLAN_CHANNELS", + "COEX_TX_MCI_GPM_BT_PROFILE_INFO", + "COEX_TX_MCI_GPM_BT_STATUS_UPDATE", + "COEX_TX_MCI_GPM_BT_UPDATE_FLAGS", + "COEX_TX_MCI_GPM_UNKNOWN", + "COEX_TX_MCI_SYS_WAKING", + "COEX_TX_MCI_LNA_TAKE", + "COEX_TX_MCI_LNA_TRANS", + "COEX_TX_MCI_SYS_SLEEPING", + "COEX_TX_MCI_REQ_WAKE", + "COEX_TX_MCI_REMOTE_RESET", + "COEX_TX_MCI_TYPE_UNKNOWN", + "COEX_WHAL_MCI_RESET", + "COEX_POLL_BT_CAL_DONE_TIMEOUT", + "COEX_WHAL_PAUSE", + "COEX_RX_MCI_GPM_BT_CAL_REQ", + "COEX_RX_MCI_GPM_BT_CAL_DONE", + "COEX_RX_MCI_GPM_BT_CAL_GRANT", + "COEX_WLAN_CAL_START", + "COEX_WLAN_CAL_RESULT", + "COEX_BtMciState", + "COEX_BtCalState", + "COEX_WlanCalState", + "COEX_RxReqWakeCount", + "COEX_RxRemoteResetCount", + "COEX_RESTART_CAL", + "COEX_SENDMSG_QUEUE", + "COEX_RESETSEQ_LNAINFO_TIMEOUT", + "COEX_MCI_ISR_IntRaw", + "COEX_MCI_ISR_Int1Raw", + "COEX_MCI_ISR_RxMsgRaw", + "COEX_WHAL_COEX_RESET", + "COEX_WAL_COEX_INIT", + "COEX_TXRX_CNT_LIMIT_ISR", + "COEX_CH_BUSY", + "COEX_REASSESS_WLAN_STATE", + "COEX_BTCOEX_WLAN_STATE_UPDATE", + "COEX_BT_NUM_OF_PROFILES", + "COEX_BT_NUM_OF_HID_PROFILES", + "COEX_BT_NUM_OF_ACL_PROFILES", + "COEX_BT_NUM_OF_HI_ACL_PROFILES", + "COEX_BT_NUM_OF_VOICE_PROFILES", + "COEX_WLAN_AGGR_LIMIT", + "COEX_BT_LOW_PRIO_BUDGET", + "COEX_BT_HI_PRIO_BUDGET", + "COEX_BT_IDLE_TIME", + "COEX_SET_COEX_WEIGHT", + "COEX_WLAN_WEIGHT_GROUP", + "COEX_BT_WEIGHT_GROUP", + "COEX_BT_INTERVAL_ALLOC", + "COEX_BT_SCHEME", + "COEX_BT_MGR", + "COEX_BT_SM_ERROR", + "COEX_SYSTEM_UPDATE", + "COEX_LOW_PRIO_LIMIT", + "COEX_HI_PRIO_LIMIT", + "COEX_BT_INTERVAL_START", + "COEX_WLAN_INTERVAL_START", + "COEX_NON_LINK_BUDGET", + "COEX_CONTENTION_MSG", + "COEX_SET_NSS", + "COEX_SELF_GEN_MASK", + "COEX_PROFILE_ERROR", + "COEX_WLAN_INIT", + "COEX_BEACON_MISS", + "COEX_BEACON_OK", + "COEX_BTCOEX_SCAN_ACTIVITY", + "COEX_SCAN_ACTIVITY", + "COEX_FORCE_QUIETTIME", + "COEX_BT_MGR_QUIETTIME", + "COEX_BT_INACTIVITY_TRIGGER", + "COEX_BT_INACTIVITY_REPORTED", + "COEX_TX_MCI_GPM_WLAN_PRIO", + "COEX_TX_MCI_GPM_BT_PAUSE_PROFILE", + "COEX_TX_MCI_GPM_WLAN_SET_ACL_INACTIVITY", + "COEX_RX_MCI_GPM_BT_ACL_INACTIVITY_REPORT", + "COEX_GENERIC_ERROR", + "COEX_RX_RATE_THRESHOLD", + "COEX_RSSI", + "COEX_WLAN_VDEV_NOTIF_START", /* 133 */ + "COEX_WLAN_VDEV_NOTIF_UP", /* 134 */ + "COEX_WLAN_VDEV_NOTIF_DOWN", /* 135 */ + "COEX_WLAN_VDEV_NOTIF_STOP", /* 136 */ + "COEX_WLAN_VDEV_NOTIF_ADD_PEER", /* 137 */ + "COEX_WLAN_VDEV_NOTIF_DELETE_PEER", /* 138 */ + "COEX_WLAN_VDEV_NOTIF_CONNECTED_PEER", /* 139 */ + "COEX_WLAN_VDEV_NOTIF_PAUSE", /* 140 */ + "COEX_WLAN_VDEV_NOTIF_UNPAUSED", /* 141 */ + "COEX_STATE_WLAN_VDEV_PEER_ADD", /* 142 */ + "COEX_STATE_WLAN_VDEV_CONNECTED_PEER", /* 143 */ + "COEX_STATE_WLAN_VDEV_DELETE_PEER", /* 144 */ + "COEX_STATE_WLAN_VDEV_PAUSE", /* 145 */ + "COEX_STATE_WLAN_VDEV_UNPAUSED", /* 146 */ + "COEX_SCAN_CALLBACK", /* 147 */ + "COEX_RC_SET_CHAINMASK", /* 148 */ + "COEX_TX_MCI_GPM_WLAN_SET_BT_RXSS_THRES", /* 149 */ + "COEX_TX_MCI_GPM_BT_RXSS_THRES_QUERY", /* 150 */ + "COEX_BT_RXSS_THRES", /* 151 */ + "COEX_BT_PROFILE_ADD_RMV", /* 152 */ + "COEX_BT_SCHED_INFO", /* 153 */ + "COEX_TRF_MGMT", /* 154 */ + "COEX_SCHED_START", /* 155 */ + "COEX_SCHED_RESULT", /* 156 */ + "COEX_SCHED_ERROR", /* 157 */ + "COEX_SCHED_PRE_OP", /* 158 */ + "COEX_SCHED_POST_OP", /* 159 */ + "COEX_RX_RATE", /* 160 */ + "COEX_ACK_PRIORITY", /* 161 */ + "COEX_STATE_WLAN_VDEV_UP", /* 162 */ + "COEX_STATE_WLAN_VDEV_PEER_UPDATE", /* 163 */ + "COEX_STATE_WLAN_VDEV_STOP", /* 164 */ + "COEX_WLAN_PAUSE_PEER", /* 165 */ + "COEX_WLAN_UNPAUSE_PEER", /* 166 */ + "COEX_WLAN_PAUSE_INTERVAL_START", /* 167 */ + "COEX_WLAN_POSTPAUSE_INTERVAL_START", /* 168 */ + "COEX_TRF_FREERUN", /* 169 */ + "COEX_TRF_SHAPE_PM", /* 170 */ + "COEX_TRF_SHAPE_PSP", /* 171 */ + "COEX_TRF_SHAPE_S_CTS", /* 172 */ + "COEX_CHAIN_CONFIG", /* 173 */ + "COEX_SYSTEM_MONITOR", /* 174 */ + "COEX_SINGLECHAIN_INIT", /* 175 */ + "COEX_MULTICHAIN_INIT", /* 176 */ + "COEX_SINGLECHAIN_DBG_1", /* 177 */ + "COEX_SINGLECHAIN_DBG_2", /* 178 */ + "COEX_SINGLECHAIN_DBG_3", /* 179 */ + "COEX_MULTICHAIN_DBG_1", /* 180 */ + "COEX_MULTICHAIN_DBG_2", /* 181 */ + "COEX_MULTICHAIN_DBG_3", /* 182 */ + "COEX_PSP_TX_CB", /* 183 */ + "COEX_PSP_RX_CB", /* 184 */ + "COEX_PSP_STAT_1", /* 185 */ + "COEX_PSP_SPEC_POLL", /* 186 */ + "COEX_PSP_READY_STATE", /* 187 */ + "COEX_PSP_TX_STATUS_STATE", /* 188 */ + "COEX_PSP_RX_STATUS_STATE_1", /* 189 */ + "COEX_PSP_NOT_READY_STATE", /* 190 */ + "COEX_PSP_DISABLED_STATE", /* 191 */ + "COEX_PSP_ENABLED_STATE", /* 192 */ + "COEX_PSP_SEND_PSPOLL", /* 193 */ + "COEX_PSP_MGR_ENTER", /* 194 */ + "COEX_PSP_MGR_RESULT", /* 195 */ + "COEX_PSP_NONWLAN_INTERVAL", /* 196 */ + "COEX_PSP_STAT_2", /* 197 */ + "COEX_PSP_RX_STATUS_STATE_2", /* 198 */ + "COEX_PSP_ERROR", /* 199 */ + "COEX_T2BT", /* 200 */ + "COEX_BT_DURATION", /* 201 */ + "COEX_TX_MCI_GPM_WLAN_SCHED_INFO_TRIG", /* 202 */ + "COEX_TX_MCI_GPM_WLAN_SCHED_INFO_TRIG_RSP", /* 203 */ + "COEX_TX_MCI_GPM_SCAN_OP", /* 204 */ + "COEX_TX_MCI_GPM_BT_PAUSE_GPM_TX", /* 205 */ + "COEX_CTS2S_SEND", /* 206 */ + "COEX_CTS2S_RESULT", /* 207 */ + "COEX_ENTER_OCS", /* 208 */ + "COEX_EXIT_OCS", /* 209 */ + "COEX_UPDATE_OCS", /* 210 */ + "COEX_STATUS_OCS", /* 211 */ + "COEX_STATS_BT", /* 212 */ + "COEX_MWS_WLAN_INIT", + "COEX_MWS_WBTMR_SYNC", + "COEX_MWS_TYPE2_RX", + "COEX_MWS_TYPE2_TX", + "COEX_MWS_WLAN_CHAVD", + "COEX_MWS_WLAN_CHAVD_INSERT", + "COEX_MWS_WLAN_CHAVD_MERGE", + "COEX_MWS_WLAN_CHAVD_RPT", + "COEX_MWS_CP_MSG_SEND", + "COEX_MWS_CP_ESCAPE", + "COEX_MWS_CP_UNFRAME", + "COEX_MWS_CP_SYNC_UPDATE", + "COEX_MWS_CP_SYNC", + "COEX_MWS_CP_WLAN_STATE_IND", + "COEX_MWS_CP_SYNCRESP_TIMEOUT", + "COEX_MWS_SCHEME_UPDATE", + "COEX_MWS_WLAN_EVENT", + "COEX_MWS_UART_UNESCAPE", + "COEX_MWS_UART_ENCODE_SEND", + "COEX_MWS_UART_RECV_DECODE", + "COEX_MWS_UL_HDL", + "COEX_MWS_REMOTE_EVENT", + "COEX_MWS_OTHER", + "COEX_MWS_ERROR", + "COEX_MWS_ANT_DIVERSITY", /* 237 */ + "COEX_P2P_GO", + "COEX_P2P_CLIENT", + "COEX_SCC_1", + "COEX_SCC_2", + "COEX_MCC_1", + "COEX_MCC_2", + "COEX_TRF_SHAPE_NOA", + "COEX_NOA_ONESHOT", + "COEX_NOA_PERIODIC", + "COEX_LE_1", + "COEX_LE_2", + "COEX_ANT_1", + "COEX_ANT_2", + "COEX_ENTER_NOA", + "COEX_EXIT_NOA", + "COEX_BT_SCAN_PROTECT", /* 253 */ + "COEX_DEBUG_ID_END" /* 254 */ + }, + { + "ROAM_DBGID_DEFINITION_START", + "ROAM_MODULE_INIT", + "ROAM_DEV_START", + "ROAM_CONFIG_RSSI_THRESH", + "ROAM_CONFIG_SCAN_PERIOD", + "ROAM_CONFIG_AP_PROFILE", + "ROAM_CONFIG_CHAN_LIST", + "ROAM_CONFIG_SCAN_PARAMS", + "ROAM_CONFIG_RSSI_CHANGE", + "ROAM_SCAN_TIMER_START", + "ROAM_SCAN_TIMER_EXPIRE", + "ROAM_SCAN_TIMER_STOP", + "ROAM_SCAN_STARTED", + "ROAM_SCAN_COMPLETE", + "ROAM_SCAN_CANCELLED", + "ROAM_CANDIDATE_FOUND", + "ROAM_RSSI_ACTIVE_SCAN", + "ROAM_RSSI_ACTIVE_ROAM", + "ROAM_RSSI_GOOD", + "ROAM_BMISS_FIRST_RECV", + "ROAM_DEV_STOP", + "ROAM_FW_OFFLOAD_ENABLE", + "ROAM_CANDIDATE_SSID_MATCH", + "ROAM_CANDIDATE_SECURITY_MATCH", + "ROAM_LOW_RSSI_INTERRUPT", + "ROAM_HIGH_RSSI_INTERRUPT", + "ROAM_SCAN_REQUESTED", + "ROAM_BETTER_CANDIDATE_FOUND", + "ROAM_BETTER_AP_EVENT", + "ROAM_CANCEL_LOW_PRIO_SCAN", + "ROAM_FINAL_BMISS_RECVD", + "ROAM_CONFIG_SCAN_MODE", + "ROAM_BMISS_FINAL_SCAN_ENABLE", + "ROAM_SUITABLE_AP_EVENT", + "ROAM_RSN_IE_PARSE_ERROR", + "ROAM_WPA_IE_PARSE_ERROR", + "ROAM_SCAN_CMD_FROM_HOST", + "ROAM_HO_SORT_CANDIDATE", + "ROAM_HO_SAVE_CANDIDATE", + "ROAM_HO_GET_CANDIDATE", + "ROAM_HO_OFFLOAD_SET_PARAM", + "ROAM_HO_SM", + "ROAM_HO_HTT_SAVED", + "ROAM_HO_SYNC_START", + "ROAM_HO_START", + "ROAM_HO_COMPLETE", + "ROAM_HO_STOP", + "ROAM_HO_HTT_FORWARD", + "ROAM_DBGID_DEFINITION_END" + }, + { + "RESMGR_CHMGR_DEFINITION_START", + "RESMGR_CHMGR_PAUSE_COMPLETE", + "RESMGR_CHMGR_CHANNEL_CHANGE", + "RESMGR_CHMGR_RESUME_COMPLETE", + "RESMGR_CHMGR_VDEV_PAUSE", + "RESMGR_CHMGR_VDEV_UNPAUSE", + "RESMGR_CHMGR_CTS2S_TX_COMP", + "RESMGR_CHMGR_CFEND_TX_COMP", + "RESMGR_CHMGR_DEFINITION_END" + }, + { + "RESMGR_DEFINITION_START", + "RESMGR_OCS_ALLOCRAM_SIZE", + "RESMGR_OCS_RESOURCES", + "RESMGR_LINK_CREATE", + "RESMGR_LINK_DELETE", + "RESMGR_OCS_CHREQ_CREATE", + "RESMGR_OCS_CHREQ_DELETE", + "RESMGR_OCS_CHREQ_START", + "RESMGR_OCS_CHREQ_STOP", + "RESMGR_OCS_SCHEDULER_INVOKED", + "RESMGR_OCS_CHREQ_GRANT", + "RESMGR_OCS_CHREQ_COMPLETE", + "RESMGR_OCS_NEXT_TSFTIME", + "RESMGR_OCS_TSF_TIMEOUT_US", + "RESMGR_OCS_CURR_CAT_WINDOW", + "RESMGR_OCS_CURR_CAT_WINDOW_REQ", + "RESMGR_OCS_CURR_CAT_WINDOW_TIMESLOT", + "RESMGR_OCS_CHREQ_RESTART", + "RESMGR_OCS_CLEANUP_CH_ALLOCATORS", + "RESMGR_OCS_PURGE_CHREQ", + "RESMGR_OCS_CH_ALLOCATOR_FREE", + "RESMGR_OCS_RECOMPUTE_SCHEDULE", + "RESMGR_OCS_NEW_CAT_WINDOW_REQ", + "RESMGR_OCS_NEW_CAT_WINDOW_TIMESLOT", + "RESMGR_OCS_CUR_CH_ALLOC", + "RESMGR_OCS_WIN_CH_ALLOC", + "RESMGR_OCS_SCHED_CH_CHANGE", + "RESMGR_OCS_CONSTRUCT_CAT_WIN", + "RESMGR_OCS_CHREQ_PREEMPTED", + "RESMGR_OCS_CH_SWITCH_REQ", + "RESMGR_OCS_CHANNEL_SWITCHED", + "RESMGR_OCS_CLEANUP_STALE_REQS", + "RESMGR_OCS_CHREQ_UPDATE", + "RESMGR_OCS_REG_NOA_NOTIF", + "RESMGR_OCS_DEREG_NOA_NOTIF", + "RESMGR_OCS_GEN_PERIODIC_NOA", + "RESMGR_OCS_RECAL_QUOTAS", + "RESMGR_OCS_GRANTED_QUOTA_STATS", + "RESMGR_OCS_ALLOCATED_QUOTA_STATS", + "RESMGR_OCS_REQ_QUOTA_STATS", + "RESMGR_OCS_TRACKING_TIME_FIRED", + "RESMGR_VC_ARBITRATE_ATTRIBUTES", + "RESMGR_OCS_LATENCY_STRICT_TIME_SLOT", + "RESMGR_OCS_CURR_TSF", + "RESMGR_OCS_QUOTA_REM", + "RESMGR_OCS_LATENCY_CASE_NO", + "RESMGR_OCS_WIN_CAT_DUR", + "RESMGR_VC_UPDATE_CUR_VC", + "RESMGR_VC_REG_UNREG_LINK", + "RESMGR_VC_PRINT_LINK", + "RESMGR_OCS_MISS_TOLERANCE", + "RESMGR_DYN_SCH_ALLOCRAM_SIZE", + "RESMGR_DYN_SCH_ENABLE", + "RESMGR_DYN_SCH_ACTIVE", + "RESMGR_DYN_SCH_CH_STATS_START", + "RESMGR_DYN_SCH_CH_SX_STATS", + "RESMGR_DYN_SCH_TOT_UTIL_PER", + "RESMGR_DYN_SCH_HOME_CH_QUOTA", + "RESMGR_OCS_REG_RECAL_QUOTA_NOTIF", + "RESMGR_OCS_DEREG_RECAL_QUOTA_NOTIF", + "RESMGR_DEFINITION_END" + }, + { + "VDEV_MGR_DEBID_DEFINITION_START", /* vdev Mgr */ + "VDEV_MGR_FIRST_BEACON_MISS_DETECTED", + "VDEV_MGR_FINAL_BEACON_MISS_DETECTED", + "VDEV_MGR_BEACON_IN_SYNC", + "VDEV_MGR_AP_KEEPALIVE_IDLE", + "VDEV_MGR_AP_KEEPALIVE_INACTIVE", + "VDEV_MGR_AP_KEEPALIVE_UNRESPONSIVE", + "VDEV_MGR_AP_TBTT_CONFIG", + "VDEV_MGR_FIRST_BCN_RECEIVED", + "VDEV_MGR_VDEV_START", + "VDEV_MGR_VDEV_UP", + "VDEV_MGR_PEER_AUTHORIZED", + "VDEV_MGR_OCS_HP_LP_REQ_POSTED", + "VDEV_MGR_VDEV_START_OCS_HP_REQ_COMPLETE", + "VDEV_MGR_VDEV_START_OCS_HP_REQ_STOP", + "VDEV_MGR_HP_START_TIME", + "VDEV_MGR_VDEV_PAUSE_DELAY_UPDATE", + "VDEV_MGR_VDEV_PAUSE_FAIL", + "VDEV_MGR_GEN_PERIODIC_NOA", + "VDEV_MGR_OFF_CHAN_GO_CH_REQ_SETUP", + "VDEV_MGR_DEFINITION_END", + }, + { + "SCAN_START_COMMAND_FAILED", /* scan */ + "SCAN_STOP_COMMAND_FAILED", + "SCAN_EVENT_SEND_FAILED", + "SCAN_ENGINE_START", + "SCAN_ENGINE_CANCEL_COMMAND", + "SCAN_ENGINE_STOP_DUE_TO_TIMEOUT", + "SCAN_EVENT_SEND_TO_HOST", + "SCAN_FWLOG_EVENT_ADD", + "SCAN_FWLOG_EVENT_REM", + "SCAN_FWLOG_EVENT_PREEMPTED", + "SCAN_FWLOG_EVENT_RESTARTED", + "SCAN_FWLOG_EVENT_COMPLETED", + }, + { + "RATECTRL_DBGID_DEFINITION_START", /* Rate ctrl */ + "RATECTRL_DBGID_ASSOC", + "RATECTRL_DBGID_NSS_CHANGE", + "RATECTRL_DBGID_CHAINMASK_ERR", + "RATECTRL_DBGID_UNEXPECTED_FRAME", + "RATECTRL_DBGID_WAL_RCQUERY", + "RATECTRL_DBGID_WAL_RCUPDATE", + "RATECTRL_DBGID_GTX_UPDATE", + "RATECTRL_DBGID_DEFINITION_END" + }, + { + "AP_PS_DBGID_DEFINITION_START", + "AP_PS_DBGID_UPDATE_TIM", + "AP_PS_DBGID_PEER_STATE_CHANGE", + "AP_PS_DBGID_PSPOLL", + "AP_PS_DBGID_PEER_CREATE", + "AP_PS_DBGID_PEER_DELETE", + "AP_PS_DBGID_VDEV_CREATE", + "AP_PS_DBGID_VDEV_DELETE", + "AP_PS_DBGID_SYNC_TIM", + "AP_PS_DBGID_NEXT_RESPONSE", + "AP_PS_DBGID_START_SP", + "AP_PS_DBGID_COMPLETED_EOSP", + "AP_PS_DBGID_TRIGGER", + "AP_PS_DBGID_DUPLICATE_TRIGGER", + "AP_PS_DBGID_UAPSD_RESPONSE", + "AP_PS_DBGID_SEND_COMPLETE", + "AP_PS_DBGID_SEND_N_COMPLETE", + "AP_PS_DBGID_DETECT_OUT_OF_SYNC_STA", + "AP_PS_DBGID_DELIVER_CAB", + }, + { + "" /* Block Ack */ + }, + /* Mgmt TxRx */ + { + "MGMT_TXRX_DBGID_DEFINITION_START", + "MGMT_TXRX_FORWARD_TO_HOST", + "MGMT_TXRX_DBGID_DEFINITION_END", + }, + { /* Data TxRx */ + "DATA_TXRX_DBGID_DEFINITION_START", + "DATA_TXRX_DBGID_RX_DATA_SEQ_LEN_INFO", + "DATA_TXRX_DBGID_DEFINITION_END", + }, + {"" /* HTT */ + }, + {"" /* HOST */ + }, + {"" /* BEACON */ + "BEACON_EVENT_SWBA_SEND_FAILED", + "BEACON_EVENT_EARLY_RX_BMISS_STATUS", + "BEACON_EVENT_EARLY_RX_SLEEP_SLOP", + "BEACON_EVENT_EARLY_RX_CONT_BMISS_TIMEOUT", + "BEACON_EVENT_EARLY_RX_PAUSE_SKIP_BCN_NUM", + "BEACON_EVENT_EARLY_RX_CLK_DRIFT", + "BEACON_EVENT_EARLY_RX_AP_DRIFT", + "BEACON_EVENT_EARLY_RX_BCN_TYPE",}, + { /* Offload Mgr */ + "OFFLOAD_MGR_DBGID_DEFINITION_START", + "OFFLOADMGR_REGISTER_OFFLOAD", + "OFFLOADMGR_DEREGISTER_OFFLOAD", + "OFFLOADMGR_NO_REG_DATA_HANDLERS", + "OFFLOADMGR_NO_REG_EVENT_HANDLERS", + "OFFLOADMGR_REG_OFFLOAD_FAILED", + "OFFLOADMGR_DBGID_DEFINITION_END", + }, + { + "WAL_DBGID_DEFINITION_START", + "WAL_DBGID_FAST_WAKE_REQUEST", + "WAL_DBGID_FAST_WAKE_RELEASE", + "WAL_DBGID_SET_POWER_STATE", + "WAL_DBGID_MISSING", + "WAL_DBGID_CHANNEL_CHANGE_FORCE_RESET", + "WAL_DBGID_CHANNEL_CHANGE", + "WAL_DBGID_VDEV_START", + "WAL_DBGID_VDEV_STOP", + "WAL_DBGID_VDEV_UP", + "WAL_DBGID_VDEV_DOWN", + "WAL_DBGID_SW_WDOG_RESET", + "WAL_DBGID_TX_SCH_REGISTER_TIDQ", + "WAL_DBGID_TX_SCH_UNREGISTER_TIDQ", + "WAL_DBGID_TX_SCH_TICKLE_TIDQ", + "WAL_DBGID_XCESS_FAILURES", + "WAL_DBGID_AST_ADD_WDS_ENTRY", + "WAL_DBGID_AST_DEL_WDS_ENTRY", + "WAL_DBGID_AST_WDS_ENTRY_PEER_CHG", + "WAL_DBGID_AST_WDS_SRC_LEARN_FAIL", + "WAL_DBGID_STA_KICKOUT", + "WAL_DBGID_BAR_TX_FAIL", + "WAL_DBGID_BAR_ALLOC_FAIL", + "WAL_DBGID_LOCAL_DATA_TX_FAIL", + "WAL_DBGID_SECURITY_PM4_QUEUED", + "WAL_DBGID_SECURITY_GM1_QUEUED", + "WAL_DBGID_SECURITY_PM4_SENT", + "WAL_DBGID_SECURITY_ALLOW_DATA", + "WAL_DBGID_SECURITY_UCAST_KEY_SET", + "WAL_DBGID_SECURITY_MCAST_KEY_SET", + "WAL_DBGID_SECURITY_ENCR_EN", + "WAL_DBGID_BB_WDOG_TRIGGERED", + "WAL_DBGID_RX_LOCAL_BUFS_LWM", + "WAL_DBGID_RX_LOCAL_DROP_LARGE_MGMT", + "WAL_DBGID_VHT_ILLEGAL_RATE_PHY_ERR_DETECTED", + "WAL_DBGID_DEV_RESET", + "WAL_DBGID_TX_BA_SETUP", + "WAL_DBGID_RX_BA_SETUP", + "WAL_DBGID_DEV_TX_TIMEOUT", + "WAL_DBGID_DEV_RX_TIMEOUT", + "WAL_DBGID_STA_VDEV_XRETRY", + "WAL_DBGID_DCS", + "WAL_DBGID_MGMT_TX_FAIL", + "WAL_DBGID_SET_M4_SENT_MANUALLY", + "WAL_DBGID_PROCESS_4_WAY_HANDSHAKE", + "WAL_DBGID_WAL_CHANNEL_CHANGE_START", + "WAL_DBGID_WAL_CHANNEL_CHANGE_COMPLETE", + "WAL_DBGID_WHAL_CHANNEL_CHANGE_START", + "WAL_DBGID_WHAL_CHANNEL_CHANGE_COMPLETE", + "WAL_DBGID_TX_MGMT_DESCID_SEQ_TYPE_LEN", + "WAL_DBGID_TX_DATA_MSDUID_SEQ_TYPE_LEN", + "WAL_DBGID_TX_DISCARD", + "WAL_DBGID_TX_MGMT_COMP_DESCID_STATUS", + "WAL_DBGID_TX_DATA_COMP_MSDUID_STATUS", + "WAL_DBGID_RESET_PCU_CYCLE_CNT", + "WAL_DBGID_SETUP_RSSI_INTERRUPTS", + "WAL_DBGID_BRSSI_CONFIG", + "WAL_DBGID_CURRENT_BRSSI_AVE", + "WAL_DBGID_BCN_TX_COMP", + "WAL_DBGID_SET_HW_CHAINMASK", + "WAL_DBGID_SET_HW_CHAINMASK_TXRX_STOP_FAIL", + "WAL_DBGID_GET_HW_CHAINMASK", + "WAL_DBGID_SMPS_DISABLE", + "WAL_DBGID_SMPS_ENABLE_HW_CNTRL", + "WAL_DBGID_SMPS_SWSEL_CHAINMASK", + "WAL_DBGID_DEFINITION_END", + }, + { + "" /* DE */ + }, + { + "" /* pcie lp */ + }, + { + /* RTT */ + "RTT_CALL_FLOW", + "RTT_REQ_SUB_TYPE", + "RTT_MEAS_REQ_HEAD", + "RTT_MEAS_REQ_BODY", + "", + "", + "RTT_INIT_GLOBAL_STATE", + "", + "RTT_REPORT", + "", + "RTT_ERROR_REPORT", + "RTT_TIMER_STOP", + "RTT_SEND_TM_FRAME", + "RTT_V3_RESP_CNT", + "RTT_V3_RESP_FINISH", + "RTT_CHANNEL_SWITCH_REQ", + "RTT_CHANNEL_SWITCH_GRANT", + "RTT_CHANNEL_SWITCH_COMPLETE", + "RTT_CHANNEL_SWITCH_PREEMPT", + "RTT_CHANNEL_SWITCH_STOP", + "RTT_TIMER_START", + }, + { /* RESOURCE */ + "RESOURCE_DBGID_DEFINITION_START", + "RESOURCE_PEER_ALLOC", + "RESOURCE_PEER_FREE", + "RESOURCE_PEER_ALLOC_WAL_PEER", + "RESOURCE_PEER_NBRHOOD_MGMT_ALLOC", + "RESOURCE_PEER_NBRHOOD_MGMT_INFO,RESOURCE_DBGID_DEFINITION_END", + }, + { /* DCS */ + "WLAN_DCS_DBGID_INIT", + "WLAN_DCS_DBGID_WMI_CWINT", + "WLAN_DCS_DBGID_TIMER", + "WLAN_DCS_DBGID_CMDG", + "WLAN_DCS_DBGID_CMDS", + "WLAN_DCS_DBGID_DINIT" + }, + { /* CACHEMGR */ + "" + }, + { /* ANI */ + "ANI_DBGID_POLL", + "ANI_DBGID_CONTROL", + "ANI_DBGID_OFDM_PARAMS", + "ANI_DBGID_CCK_PARAMS", + "ANI_DBGID_RESET", + "ANI_DBGID_RESTART", + "ANI_DBGID_OFDM_LEVEL", + "ANI_DBGID_CCK_LEVEL", + "ANI_DBGID_FIRSTEP", + "ANI_DBGID_CYCPWR", + "ANI_DBGID_MRC_CCK", + "ANI_DBGID_SELF_CORR_LOW", + "ANI_DBGID_ENABLE", + "ANI_DBGID_CURRENT_LEVEL", + "ANI_DBGID_POLL_PERIOD", + "ANI_DBGID_LISTEN_PERIOD", + "ANI_DBGID_OFDM_LEVEL_CFG", + "ANI_DBGID_CCK_LEVEL_CFG" + }, + { + "P2P_DBGID_DEFINITION_START", + "P2P_DEV_REGISTER", + "P2P_HANDLE_NOA", + "P2P_UPDATE_SCHEDULE_OPPS", + "P2P_UPDATE_SCHEDULE", + "P2P_UPDATE_START_TIME", + "P2P_UPDATE_START_TIME_DIFF_TSF32", + "P2P_UPDATE_START_TIME_FINAL", + "P2P_SETUP_SCHEDULE_TIMER", + "P2P_PROCESS_SCHEDULE_AFTER_CALC", + "P2P_PROCESS_SCHEDULE_STARTED_TIMER", + "P2P_CALC_SCHEDULES_FIRST_CALL_ALL_NEXT_EVENT", + "P2P_CALC_SCHEDULES_FIRST_VALUE", + "P2P_CALC_SCHEDULES_EARLIEST_NEXT_EVENT", + "P2P_CALC_SCHEDULES_SANITY_COUNT", + "P2P_CALC_SCHEDULES_CALL_ALL_NEXT_EVENT_FROM_WHILE_LOOP", + "P2P_CALC_SCHEDULES_TIMEOUT_1", + "P2P_CALC_SCHEDULES_TIMEOUT_2", + "P2P_FIND_ALL_NEXT_EVENTS_REQ_EXPIRED", + "P2P_FIND_ALL_NEXT_EVENTS_REQ_ACTIVE", + "P2P_FIND_NEXT_EVENT_REQ_NOT_STARTED", + "P2P_FIND_NEXT_EVENT_REQ_COMPLETE_NON_PERIODIC", + "P2P_FIND_NEXT_EVENT_IN_MID_OF_NOA", + "P2P_FIND_NEXT_EVENT_REQ_COMPLETE", + "P2P_SCHEDULE_TIMEOUT", + "P2P_CALC_SCHEDULES_ENTER", + "P2P_PROCESS_SCHEDULE_ENTER", + "P2P_FIND_ALL_NEXT_EVENTS_INDIVIDUAL_REQ_AFTER_CHANGE", + "P2P_FIND_ALL_NEXT_EVENTS_INDIVIDUAL_REQ_BEFORE_CHANGE", + "P2P_FIND_ALL_NEXT_EVENTS_ENTER", + "P2P_FIND_NEXT_EVENT_ENTER", + "P2P_NOA_GO_PRESENT", + "P2P_NOA_GO_ABSENT", + "P2P_GO_NOA_NOTIF", + "P2P_GO_TBTT_OFFSET", + "P2P_GO_GET_NOA_INFO", + "P2P_GO_ADD_ONE_SHOT_NOA", + "P2P_GO_GET_NOA_IE", + "P2P_GO_BCN_TX_COMP", + "P2P_DBGID_DEFINITION_END", + }, + { + "CSA_DBGID_DEFINITION_START", + "CSA_OFFLOAD_POOL_INIT", + "CSA_OFFLOAD_REGISTER_VDEV", + "CSA_OFFLOAD_DEREGISTER_VDEV", + "CSA_DEREGISTER_VDEV_ERROR", + "CSA_OFFLOAD_BEACON_RECEIVED", + "CSA_OFFLOAD_BEACON_CSA_RECV", + "CSA_OFFLOAD_CSA_RECV_ERROR_IE", + "CSA_OFFLOAD_CSA_TIMER_ERROR", + "CSA_OFFLOAD_CSA_TIMER_EXP", + "CSA_OFFLOAD_WMI_EVENT_ERROR", + "CSA_OFFLOAD_WMI_EVENT_SENT", + "CSA_OFFLOAD_WMI_CHANSWITCH_RECV", + "CSA_DBGID_DEFINITION_END", + }, + { /* NLO offload */ + "" + }, + { + "WLAN_CHATTER_DBGID_DEFINITION_START", + "WLAN_CHATTER_ENTER", + "WLAN_CHATTER_EXIT", + "WLAN_CHATTER_FILTER_HIT", + "WLAN_CHATTER_FILTER_MISS", + "WLAN_CHATTER_FILTER_FULL", + "WLAN_CHATTER_FILTER_TM_ADJ", + "WLAN_CHATTER_BUFFER_FULL", + "WLAN_CHATTER_TIMEOUT", + "WLAN_CHATTER_DBGID_DEFINITION_END", + }, + { + "WOW_DBGID_DEFINITION_START", + "WOW_ENABLE_CMDID", + "WOW_RECV_DATA_PKT", + "WOW_WAKE_HOST_DATA", + "WOW_RECV_MGMT", + "WOW_WAKE_HOST_MGMT", + "WOW_RECV_EVENT", + "WOW_WAKE_HOST_EVENT", + "WOW_INIT", + "WOW_RECV_MAGIC_PKT", + "WOW_RECV_BITMAP_PATTERN", + "WOW_AP_VDEV_DISALLOW", + "WOW_STA_VDEV_DISALLOW", + "WOW_P2PGO_VDEV_DISALLOW", + "WOW_NS_OFLD_ENABLE", + "WOW_ARP_OFLD_ENABLE", + "WOW_NS_ARP_OFLD_DISABLE", + "WOW_NS_RECEIVED", + "WOW_NS_REPLIED", + "WOW_ARP_RECEIVED", + "WOW_ARP_REPLIED", + "WOW_DBGID_DEFINITION_END", + }, + { /* WAL VDEV */ + "" + }, + { /* WAL PDEV */ + "" + }, + { /* TEST */ + "TP_CHANGE_CHANNEL", + "TP_LOCAL_SEND", + }, + { /* STA SMPS */ + "STA_SMPS_DBGID_DEFINITION_START", + "STA_SMPS_DBGID_CREATE_PDEV_INSTANCE", + "STA_SMPS_DBGID_CREATE_VIRTUAL_CHAN_INSTANCE", + "STA_SMPS_DBGID_DELETE_VIRTUAL_CHAN_INSTANCE", + "STA_SMPS_DBGID_CREATE_STA_INSTANCE", + "STA_SMPS_DBGID_DELETE_STA_INSTANCE", + "STA_SMPS_DBGID_VIRTUAL_CHAN_SMPS_START", + "STA_SMPS_DBGID_VIRTUAL_CHAN_SMPS_STOP", + "STA_SMPS_DBGID_SEND_SMPS_ACTION_FRAME", + "STA_SMPS_DBGID_HOST_FORCED_MODE", + "STA_SMPS_DBGID_FW_FORCED_MODE", + "STA_SMPS_DBGID_RSSI_THRESHOLD_CROSSED", + "STA_SMPS_DBGID_SMPS_ACTION_FRAME_COMPLETION", + "STA_SMPS_DBGID_DTIM_EBT_EVENT_CHMASK_UPDATE", + "STA_SMPS_DBGID_DTIM_CHMASK_UPDATE", + "STA_SMPS_DBGID_DTIM_BEACON_EVENT_CHMASK_UPDATE", + "STA_SMPS_DBGID_DTIM_POWER_STATE_CHANGE", + "STA_SMPS_DBGID_DTIM_CHMASK_UPDATE_SLEEP", + "STA_SMPS_DBGID_DTIM_CHMASK_UPDATE_AWAKE", + "SMPS_DBGID_DEFINITION_END", + }, + { /* SWBMISS */ + "SWBMISS_DBGID_DEFINITION_START", + "SWBMISS_ENABLED", + "SWBMISS_DISABLED", + "SWBMISS_DBGID_DEFINITION_END", + }, + { /* WMMAC */ + "" + }, + { /* TDLS */ + "TDLS_DBGID_DEFINITION_START", + "TDLS_DBGID_VDEV_CREATE", + "TDLS_DBGID_VDEV_DELETE", + "TDLS_DBGID_ENABLED_PASSIVE", + "TDLS_DBGID_ENABLED_ACTIVE", + "TDLS_DBGID_DISABLED", + "TDLS_DBGID_CONNTRACK_TIMER", + "TDLS_DBGID_WAL_SET", + "TDLS_DBGID_WAL_GET", + "TDLS_DBGID_WAL_PEER_UPDATE_SET", + "TDLS_DBGID_WAL_PEER_UPDATE_EVT", + "TDLS_DBGID_WAL_VDEV_CREATE", + "TDLS_DBGID_WAL_VDEV_DELETE", + "TDLS_DBGID_WLAN_EVENT", + "TDLS_DBGID_WLAN_PEER_UPDATE_SET", + "TDLS_DBGID_PEER_EVT_DRP_THRESH", + "TDLS_DBGID_PEER_EVT_DRP_RATE", + "TDLS_DBGID_PEER_EVT_DRP_RSSI", + "TDLS_DBGID_PEER_EVT_DISCOVER", + "TDLS_DBGID_PEER_EVT_DELETE", + "TDLS_DBGID_PEER_CAP_UPDATE", + "TDLS_DBGID_UAPSD_SEND_PTI_FRAME", + "TDLS_DBGID_UAPSD_SEND_PTI_FRAME2PEER", + "TDLS_DBGID_UAPSD_START_PTR_TIMER", + "TDLS_DBGID_UAPSD_CANCEL_PTR_TIMER", + "TDLS_DBGID_UAPSD_PTR_TIMER_TIMEOUT", + "TDLS_DBGID_UAPSD_STA_PS_EVENT_HANDLER", + "TDLS_DBGID_UAPSD_PEER_EVENT_HANDLER", + "TDLS_DBGID_UAPSD_PS_DEFAULT_SETTINGS", + "TDLS_DBGID_UAPSD_GENERIC", + }, + { /* HB */ + "WLAN_HB_DBGID_DEFINITION_START", + "WLAN_HB_DBGID_INIT", + "WLAN_HB_DBGID_TCP_GET_TXBUF_FAIL", + "WLAN_HB_DBGID_TCP_SEND_FAIL", + "WLAN_HB_DBGID_BSS_PEER_NULL", + "WLAN_HB_DBGID_UDP_GET_TXBUF_FAIL", + "WLAN_HB_DBGID_UDP_SEND_FAIL", + "WLAN_HB_DBGID_WMI_CMD_INVALID_PARAM", + "WLAN_HB_DBGID_WMI_CMD_INVALID_OP", + "WLAN_HB_DBGID_WOW_NOT_ENTERED", + "WLAN_HB_DBGID_ALLOC_SESS_FAIL", + "WLAN_HB_DBGID_CTX_NULL", + "WLAN_HB_DBGID_CHKSUM_ERR", + "WLAN_HB_DBGID_UDP_TX", + "WLAN_HB_DBGID_TCP_TX", + "WLAN_HB_DBGID_DEFINITION_END", + }, + { /* TXBF */ + "TXBFEE_DBGID_START", + "TXBFEE_DBGID_NDPA_RECEIVED", + "TXBFEE_DBGID_HOST_CONFIG_TXBFEE_TYPE", + "TXBFER_DBGID_SEND_NDPA", + "TXBFER_DBGID_GET_NDPA_BUF_FAIL", + "TXBFER_DBGID_SEND_NDPA_FAIL", + "TXBFER_DBGID_GET_NDP_BUF_FAIL", + "TXBFER_DBGID_SEND_NDP_FAIL", + "TXBFER_DBGID_GET_BRPOLL_BUF_FAIL", + "TXBFER_DBGID_SEND_BRPOLL_FAIL", + "TXBFER_DBGID_HOST_CONFIG_CMDID", + "TXBFEE_DBGID_HOST_CONFIG_CMDID", + "TXBFEE_DBGID_ENABLED_ENABLED_UPLOAD_H", + "TXBFEE_DBGID_UPLOADH_CV_TAG", + "TXBFEE_DBGID_UPLOADH_H_TAG", + "TXBFEE_DBGID_CAPTUREH_RECEIVED", + "TXBFEE_DBGID_PACKET_IS_STEERED", + "TXBFEE_UPLOADH_EVENT_ALLOC_MEM_FAIL", + "TXBFEE_DBGID_END", + }, + { /*BATCH SCAN */ + }, + { /*THERMAL MGR */ + "THERMAL_MGR_DBGID_DEFINITION_START", + "THERMAL_MGR_NEW_THRESH", + "THERMAL_MGR_THRESH_CROSSED", + "THERMAL_MGR_DBGID_DEFINITION END", + }, + { /* WLAN_MODULE_PHYERR_DFS */ + "" + }, + { + /* WLAN_MODULE_RMC */ + "RMC_DBGID_DEFINITION_START", + "RMC_CREATE_INSTANCE", + "RMC_DELETE_INSTANCE", + "RMC_LDR_SEL", + "RMC_NO_LDR", + "RMC_LDR_NOT_SEL", + "RMC_LDR_INF_SENT", + "RMC_PEER_ADD", + "RMC_PEER_DELETE", + "RMC_PEER_UNKNOWN", + "RMC_SET_MODE", + "RMC_SET_ACTION_PERIOD", + "RMC_ACRION_FRAME_RX", + "RMC_DBGID_DEFINITION_END", + }, + { + /* WLAN_MODULE_STATS */ + "WLAN_STATS_DBGID_DEFINITION_START", + "WLAN_STATS_DBGID_EST_LINKSPEED_VDEV_EN_DIS", + "WLAN_STATS_DBGID_EST_LINKSPEED_CHAN_TIME_START", + "WLAN_STATS_DBGID_EST_LINKSPEED_CHAN_TIME_END", + "WLAN_STATS_DBGID_EST_LINKSPEED_CALC", + "WLAN_STATS_DBGID_EST_LINKSPEED_UPDATE_HOME_CHAN", + "WLAN_STATS_DBGID_DEFINITION_END", + }, + { + /* WLAN_MODULE_NAN */ + }, + { + /* WLAN_MODULE_IBSS_PWRSAVE */ + "IBSS_PS_DBGID_DEFINITION_START", + "IBSS_PS_DBGID_PEER_CREATE", + "IBSS_PS_DBGID_PEER_DELETE", + "IBSS_PS_DBGID_VDEV_CREATE", + "IBSS_PS_DBGID_VDEV_DELETE", + "IBSS_PS_DBGID_VDEV_EVENT", + "IBSS_PS_DBGID_PEER_EVENT", + "IBSS_PS_DBGID_DELIVER_CAB", + "IBSS_PS_DBGID_DELIVER_UC_DATA", + "IBSS_PS_DBGID_DELIVER_UC_DATA_ERROR", + "IBSS_PS_DBGID_UC_INACTIVITY_TMR_RESTART", + "IBSS_PS_DBGID_MC_INACTIVITY_TMR_RESTART", + "IBSS_PS_DBGID_NULL_TX_COMPLETION", + "IBSS_PS_DBGID_ATIM_TIMER_START", + "IBSS_PS_DBGID_UC_ATIM_SEND", + "IBSS_PS_DBGID_BC_ATIM_SEND", + "IBSS_PS_DBGID_UC_TIMEOUT", + "IBSS_PS_DBGID_PWR_COLLAPSE_ALLOWED", + "IBSS_PS_DBGID_PWR_COLLAPSE_NOT_ALLOWED", + "IBSS_PS_DBGID_SET_PARAM", + "IBSS_PS_DBGID_HOST_TX_PAUSE", + "IBSS_PS_DBGID_HOST_TX_UNPAUSE", + "IBSS_PS_DBGID_PS_DESC_BIN_HWM", + "IBSS_PS_DBGID_PS_DESC_BIN_LWM", + "IBSS_PS_DBGID_PS_KICKOUT_PEER", + "IBSS_PS_DBGID_SET_PEER_PARAM", + "IBSS_PS_DBGID_BCN_ATIM_WIN_MISMATCH", + "IBSS_PS_DBGID_RX_CHAINMASK_CHANGE", + }, + { + /* HIF UART Interface DBGIDs */ + "HIF_UART_DBGID_START", + "HIF_UART_DBGID_POWER_STATE", + "HIF_UART_DBGID_TXRX_FLOW", + "HIF_UART_DBGID_TXRX_CTRL_CHAR", + "HIF_UART_DBGID_TXRX_BUF_DUMP", + }, + { + /* LPI */ + "" + }, + { + /* EXTSCAN DBGIDs */ + "EXTSCAN_START", + "EXTSCAN_STOP", + "EXTSCAN_CLEAR_ENTRY_CONTENT", + "EXTSCAN_GET_FREE_ENTRY_SUCCESS", + "EXTSCAN_GET_FREE_ENTRY_INCONSISTENT", + "EXTSCAN_GET_FREE_ENTRY_NO_MORE_ENTRIES", + "EXTSCAN_CREATE_ENTRY_SUCCESS", + "EXTSCAN_CREATE_ENTRY_ERROR", + "EXTSCAN_SEARCH_SCAN_ENTRY_QUEUE", + "EXTSCAN_SEARCH_SCAN_ENTRY_KEY_FOUND", + "EXTSCAN_SEARCH_SCAN_ENTRY_KEY_NOT_FOUND", + "EXTSCAN_ADD_ENTRY", + "EXTSCAN_BUCKET_SEND_OPERATION_EVENT", + "EXTSCAN_BUCKET_SEND_OPERATION_EVENT_FAILED", + "EXTSCAN_BUCKET_START_SCAN_CYCLE", + "EXTSCAN_BUCKET_PERIODIC_TIMER", + "EXTSCAN_SEND_START_STOP_EVENT", + "EXTSCAN_NOTIFY_WLAN_CHANGE", + "EXTSCAN_NOTIFY_WLAN_HOTLIST_MATCH", + "EXTSCAN_MAIN_RECEIVED_FRAME", + "EXTSCAN_MAIN_NO_SSID_IE", + "EXTSCAN_MAIN_MALFORMED_FRAME", + "EXTSCAN_FIND_BSSID_BY_REFERENCE", + "EXTSCAN_FIND_BSSID_BY_REFERENCE_ERROR", + "EXTSCAN_NOTIFY_TABLE_USAGE", + "EXTSCAN_FOUND_RSSI_ENTRY", + "EXTSCAN_BSSID_FOUND_RSSI_SAMPLE", + "EXTSCAN_BSSID_ADDED_RSSI_SAMPLE", + "EXTSCAN_BSSID_REPLACED_RSSI_SAMPLE", + "EXTSCAN_BSSID_TRANSFER_CURRENT_SAMPLES", + "EXTSCAN_BUCKET_PROCESS_SCAN_EVENT", + "EXTSCAN_BUCKET_CANNOT_FIND_BUCKET", + "EXTSCAN_START_SCAN_REQUEST_FAILED", + "EXTSCAN_BUCKET_STOP_CURRENT_SCANS", + "EXTSCAN_BUCKET_SCAN_STOP_REQUEST", + "EXTSCAN_BUCKET_PERIODIC_TIMER_ERROR", + "EXTSCAN_BUCKET_START_OPERATION", + "EXTSCAN_START_INTERNAL_ERROR", + "EXTSCAN_NOTIFY_HOTLIST_MATCH", + "EXTSCAN_CONFIG_HOTLIST_TABLE", + "EXTSCAN_CONFIG_WLAN_CHANGE_TABLE", + }, + { /* UNIT_TEST */ + "UNIT_TEST_GEN", + }, + { /* MLME */ + "MLME_DEBUG_CMN", + "MLME_IF", + "MLME_AUTH", + "MLME_REASSOC", + "MLME_DEAUTH", + "MLME_DISASSOC", + "MLME_ROAM", + "MLME_RETRY", + "MLME_TIMER", + "MLME_FRMPARSE", + }, + { /*SUPPLICANT */ + "SUPPL_INIT", + "SUPPL_RECV_EAPOL", + "SUPPL_RECV_EAPOL_TIMEOUT", + "SUPPL_SEND_EAPOL", + "SUPPL_MIC_MISMATCH", + "SUPPL_FINISH", + }, +}; + +int dbglog_module_log_enable(wmi_unified_t wmi_handle, uint32_t mod_id, + bool isenable) +{ + uint32_t val = 0; + + if (mod_id > WLAN_MODULE_ID_MAX) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("dbglog_module_log_enable: Invalid module id %d\n", + mod_id)); + return -EINVAL; + } + + WMI_DBGLOG_SET_MODULE_ID(val, mod_id); + if (isenable) { + /* set it to global module level */ + WMI_DBGLOG_SET_LOG_LEVEL(val, DBGLOG_INFO); + } else { + /* set it to ERROR level */ + WMI_DBGLOG_SET_LOG_LEVEL(val, DBGLOG_ERR); + } + wma_config_debug_module_cmd(wmi_handle, WMI_DEBUG_LOG_PARAM_LOG_LEVEL, + val, NULL, 0); + + return 0; +} + +int dbglog_vap_log_enable(wmi_unified_t wmi_handle, uint16_t vap_id, + bool isenable) +{ + if (vap_id > DBGLOG_MAX_VDEVID) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("dbglog_vap_log_enable:Invalid vap_id %d\n", + vap_id)); + return -EINVAL; + } + + wma_config_debug_module_cmd(wmi_handle, + isenable ? WMI_DEBUG_LOG_PARAM_VDEV_ENABLE : + WMI_DEBUG_LOG_PARAM_VDEV_DISABLE, vap_id, + NULL, 0); + + return 0; +} + +int dbglog_set_log_lvl(wmi_unified_t wmi_handle, DBGLOG_LOG_LVL log_lvl) +{ + uint32_t val = 0; + + if (log_lvl > DBGLOG_LVL_MAX) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("dbglog_set_log_lvl:Invalid log level %d\n", + log_lvl)); + return -EINVAL; + } + + WMI_DBGLOG_SET_MODULE_ID(val, WMI_DEBUG_LOG_MODULE_ALL); + WMI_DBGLOG_SET_LOG_LEVEL(val, log_lvl); + wma_config_debug_module_cmd(wmi_handle, WMI_DEBUG_LOG_PARAM_LOG_LEVEL, + val, NULL, 0); + + return 0; +} + +int dbglog_set_mod_log_lvl(wmi_unified_t wmi_handle, uint32_t mod_log_lvl) +{ + /* set the global module level to log_lvl */ + wma_config_debug_module_cmd(wmi_handle, WMI_DEBUG_LOG_PARAM_LOG_LEVEL, + mod_log_lvl, NULL, 0); + + return 0; +} + +void +dbglog_set_vap_enable_bitmap(wmi_unified_t wmi_handle, + uint32_t vap_enable_bitmap) +{ + wma_config_debug_module_cmd(wmi_handle, + WMI_DEBUG_LOG_PARAM_VDEV_ENABLE_BITMAP, + vap_enable_bitmap, NULL, 0); +} + +void +dbglog_set_mod_enable_bitmap(wmi_unified_t wmi_handle, uint32_t log_level, + uint32_t *mod_enable_bitmap, uint32_t bitmap_len) +{ + wma_config_debug_module_cmd(wmi_handle, + WMI_DEBUG_LOG_PARAM_MOD_ENABLE_BITMAP, + log_level, mod_enable_bitmap, bitmap_len); +} + +int dbglog_report_enable(wmi_unified_t wmi_handle, bool isenable) +{ + int bitmap[2] = { 0 }; + + if (isenable) { + /* set the vap enable bitmap */ + dbglog_set_vap_enable_bitmap(wmi_handle, 0xFFFF); + bitmap[0] = 0xFFFFFFFF; + bitmap[1] = 0x1F; + /* set the module level bitmap */ + dbglog_set_mod_enable_bitmap(wmi_handle, 0x0, bitmap, 2); + } else { + dbglog_set_vap_enable_bitmap(wmi_handle, bitmap[0]); + dbglog_set_mod_enable_bitmap(wmi_handle, DBGLOG_LVL_MAX, bitmap, + 2); + } + return 0; +} + +static char *dbglog_get_msg(uint32_t moduleid, uint32_t debugid) +{ + static char unknown_str[64]; + + if (moduleid < WLAN_MODULE_ID_MAX && debugid < MAX_DBG_MSGS) { + char *str = DBG_MSG_ARR[moduleid][debugid]; + if (str && str[0] != '\0') + return str; + } + + snprintf(unknown_str, sizeof(unknown_str), + "UNKNOWN %u:%u", moduleid, debugid); + + return unknown_str; +} + +static +void dbglog_printf(uint32_t timestamp, uint16_t vap_id, const char *fmt, ...) +{ + char buf[128]; + va_list ap; + + if (vap_id < DBGLOG_MAX_VDEVID) { + AR_DEBUG_PRINTF(ATH_DEBUG_INFO, + (DBGLOG_PRINT_PREFIX "[%u] vap-%u ", timestamp, + vap_id)); + } else { + AR_DEBUG_PRINTF(ATH_DEBUG_INFO, + (DBGLOG_PRINT_PREFIX "[%u] ", timestamp)); + } + + va_start(ap, fmt); + vsnprintf(buf, sizeof(buf), fmt, ap); + va_end(ap); + + AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s\n", buf)); +} + +static void +dbglog_printf_no_line_break(uint32_t timestamp, + uint16_t vap_id, const char *fmt, ...) +{ + char buf[128]; + va_list ap; + + if (vap_id < DBGLOG_MAX_VDEVID) { + AR_DEBUG_PRINTF(ATH_DEBUG_INFO, + (DBGLOG_PRINT_PREFIX "[%u] vap-%u ", timestamp, + vap_id)); + } else { + AR_DEBUG_PRINTF(ATH_DEBUG_INFO, + (DBGLOG_PRINT_PREFIX "[%u] ", timestamp)); + } + + va_start(ap, fmt); + vsnprintf(buf, sizeof(buf), fmt, ap); + va_end(ap); + + AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s", buf)); +} + +#define USE_NUMERIC 0 + +static A_BOOL +dbglog_default_print_handler(uint32_t mod_id, uint16_t vap_id, uint32_t dbg_id, + uint32_t timestamp, uint16_t numargs, + uint32_t *args) +{ + int i; + + if (vap_id < DBGLOG_MAX_VDEVID) { + AR_DEBUG_PRINTF(ATH_DEBUG_INFO, + (DBGLOG_PRINT_PREFIX "[%u] vap-%u %s ( ", + timestamp, vap_id, dbglog_get_msg(mod_id, + dbg_id))); + } else { + AR_DEBUG_PRINTF(ATH_DEBUG_INFO, + (DBGLOG_PRINT_PREFIX "[%u] %s ( ", timestamp, + dbglog_get_msg(mod_id, dbg_id))); + } + + for (i = 0; i < numargs; i++) { +#if USE_NUMERIC + AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%u", args[i])); +#else + AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%#x", args[i])); +#endif + if ((i + 1) < numargs) { + AR_DEBUG_PRINTF(ATH_DEBUG_INFO, (", ")); + } + } + AR_DEBUG_PRINTF(ATH_DEBUG_INFO, (" )\n")); + + return true; +} + +#define DBGLOG_PARSE_ARGS_STRING_LENGTH (DBGLOG_NUM_ARGS_MAX * 11 + 10) +static int dbglog_print_raw_data(uint32_t *buffer, uint32_t length) +{ + uint32_t timestamp; + uint32_t debugid; + uint32_t moduleid; + uint16_t numargs, curArgs; + uint32_t count = 0, totalWriteLen, writeLen; + char parseArgsString[DBGLOG_PARSE_ARGS_STRING_LENGTH]; + char *dbgidString; + + while ((count + 1) < length) { + + debugid = DBGLOG_GET_DBGID(buffer[count + 1]); + moduleid = DBGLOG_GET_MODULEID(buffer[count + 1]); + numargs = DBGLOG_GET_NUMARGS(buffer[count + 1]); + timestamp = DBGLOG_GET_TIME_STAMP(buffer[count]); + + if (moduleid < WLAN_MODULE_ID_MAX && debugid < MAX_DBG_MSGS + && numargs <= DBGLOG_NUM_ARGS_MAX) { + + OS_MEMZERO(parseArgsString, sizeof(parseArgsString)); + totalWriteLen = 0; + + if (!numargs || (count + numargs + 2 > length)) + goto skip_args_processing; + + for (curArgs = 0; curArgs < numargs; curArgs++) { + /* + * Using sprintf_s instead of sprintf, + * to avoid length overflow + */ + writeLen = + snprintf(parseArgsString + totalWriteLen, + DBGLOG_PARSE_ARGS_STRING_LENGTH - + totalWriteLen, "%x ", + buffer[count + 2 + curArgs]); + totalWriteLen += writeLen; + } +skip_args_processing: + if (debugid < MAX_DBG_MSGS) { + dbgidString = DBG_MSG_ARR[moduleid][debugid]; + if (dbgidString != NULL) { + AR_DEBUG_PRINTF(ATH_DEBUG_INFO, + ("fw:%s(%x %x):%s\n", + dbgidString, timestamp, + buffer[count + 1], + parseArgsString)); + } else { + /* host need sync with FW id */ + AR_DEBUG_PRINTF(ATH_DEBUG_INFO, + ("fw:%s:m:%x,id:%x(%x %x):%s\n", + "UNKNOWN", moduleid, + debugid, timestamp, + buffer[count + 1], + parseArgsString)); + } + } else if (debugid == + DBGLOG_DBGID_SM_FRAMEWORK_PROXY_DBGLOG_MSG) { + /* specific debugid */ + AR_DEBUG_PRINTF(ATH_DEBUG_INFO, + ("fw:%s:m:%x,id:%x(%x %x):%s\n", + "DBGLOG_SM_MSG", moduleid, + debugid, timestamp, + buffer[count + 1], + parseArgsString)); + } else { + AR_DEBUG_PRINTF(ATH_DEBUG_INFO, + ("fw:%s:m:%x,id:%x(%x %x):%s\n", + "UNKNOWN", moduleid, debugid, + timestamp, buffer[count + 1], + parseArgsString)); + } + } + + /* 32 bit Time stamp + 32 bit Dbg header */ + count += numargs + 2; + } + + return 0; + +} + +#ifdef WLAN_OPEN_SOURCE +static int +dbglog_debugfs_raw_data(wmi_unified_t wmi_handle, const uint8_t *buf, + uint32_t length, uint32_t dropped) +{ + struct fwdebug *fwlog = (struct fwdebug *)&wmi_handle->dbglog; + struct dbglog_slot *slot; + struct sk_buff *skb; + size_t slot_len; + + if (WARN_ON(length > ATH6KL_FWLOG_PAYLOAD_SIZE)) + return -ENODEV; + + slot_len = sizeof(*slot) + ATH6KL_FWLOG_PAYLOAD_SIZE; + + skb = alloc_skb(slot_len, GFP_KERNEL); + if (!skb) + return -ENOMEM; + + slot = (struct dbglog_slot *)skb_put(skb, slot_len); + slot->diag_type = (uint32_t) DIAG_TYPE_FW_DEBUG_MSG; + slot->timestamp = cpu_to_le32(jiffies); + slot->length = cpu_to_le32(length); + slot->dropped = cpu_to_le32(dropped); + memcpy(slot->payload, buf, length); + + /* Need to pad each record to fixed length ATH6KL_FWLOG_PAYLOAD_SIZE */ + memset(slot->payload + length, 0, ATH6KL_FWLOG_PAYLOAD_SIZE - length); + + spin_lock(&fwlog->fwlog_queue.lock); + + __skb_queue_tail(&fwlog->fwlog_queue, skb); + + complete(&fwlog->fwlog_completion); + + /* drop oldest entries */ + while (skb_queue_len(&fwlog->fwlog_queue) > ATH6KL_FWLOG_MAX_ENTRIES) { + skb = __skb_dequeue(&fwlog->fwlog_queue); + kfree_skb(skb); + } + + spin_unlock(&fwlog->fwlog_queue.lock); + + return true; +} +#endif /* WLAN_OPEN_SOURCE */ + +/** + * nl_srv_bcast_fw_logs() - Wrapper func to send bcast msgs to FW logs mcast grp + * @skb: sk buffer pointer + * + * Sends the bcast message to FW logs multicast group with generic nl socket + * if CNSS_GENL is enabled. Else, use the legacy netlink socket to send. + * + * Return: zero on success, error code otherwise + */ +static int nl_srv_bcast_fw_logs(struct sk_buff *skb) +{ +#ifdef CNSS_GENL + return nl_srv_bcast(skb, CLD80211_MCGRP_FW_LOGS, WLAN_NL_MSG_CNSS_DIAG); +#else + return nl_srv_bcast(skb); +#endif +} + +/** + * send_fw_diag_nl_data - pack the data from fw diag event handler + * @buffer: buffer of diag event + * @len: length of the diag event + * @event: the even type + * + * return: 0 if sent successfully, otherwise error code + */ +static int send_fw_diag_nl_data(const uint8_t *buffer, uint32_t len, + uint32_t event_type) +{ + struct sk_buff *skb_out; + struct nlmsghdr *nlh; + int res = 0; + tAniNlHdr *wnl; + int radio; + int msg_len; + + if (WARN_ON(len > ATH6KL_FWLOG_PAYLOAD_SIZE)) + return -ENODEV; + + if (nl_srv_is_initialized() != 0) + return -EIO; + + radio = cds_get_radio_index(); + if (radio == -EINVAL) + return -EIO; + + if (cds_is_multicast_logging()) { + msg_len = len + sizeof(radio); + skb_out = nlmsg_new(msg_len, GFP_KERNEL); + if (!skb_out) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("Failed to allocate new skb\n")); + return -ENOMEM; + } + nlh = nlmsg_put(skb_out, 0, 0, WLAN_NL_MSG_CNSS_DIAG, msg_len, + 0); + if (!nlh) { + kfree_skb(skb_out); + return -EMSGSIZE; + } + wnl = (tAniNlHdr *)nlh; + wnl->radio = radio; + + /* data buffer offset from nlmsg_hdr + sizeof(int) radio */ + memcpy(nlmsg_data(nlh) + sizeof(radio), buffer, len); + + res = nl_srv_bcast_fw_logs(skb_out); + if ((res < 0) && (res != -ESRCH)) { + AR_DEBUG_PRINTF(ATH_DEBUG_RSVD1, + ("%s: nl_srv_bcast_fw_logs failed 0x%x\n", + __func__, res)); + return res; + } + } + return res; +} + +/** + * process_fw_diag_event_data() - process diag events and fw messages + * @datap: data to be processed + * @num_data: number of data chunks + * + * return: success + */ +static int +process_fw_diag_event_data(uint8_t *datap, uint32_t num_data) +{ + uint32_t diag_type; + uint32_t nl_data_len; /* diag hdr + payload */ + uint32_t diag_data_len; /* each fw diag payload */ + struct wlan_diag_data *diag_data; + + while (num_data > 0) { + diag_data = (struct wlan_diag_data *)datap; + diag_type = WLAN_DIAG_0_TYPE_GET(diag_data->word0); + diag_data_len = WLAN_DIAG_0_LEN_GET(diag_data->word0); + /* Length of diag struct and len of payload */ + nl_data_len = sizeof(struct wlan_diag_data) + diag_data_len; + if (nl_data_len > num_data) { + AR_DEBUG_PRINTF(ATH_DEBUG_INFO, + ("processed all the messages\n")); + return 0; + } + + switch (diag_type) { + case DIAG_TYPE_FW_EVENT: + return send_fw_diag_nl_data(datap, nl_data_len, + diag_type); + break; + case DIAG_TYPE_FW_LOG: + return send_fw_diag_nl_data(datap, nl_data_len, + diag_type); + break; + } + /* Move to the next event and send to cnss-diag */ + datap += nl_data_len; + num_data -= nl_data_len; + } + + return 0; +} + +static int +send_diag_netlink_data(const uint8_t *buffer, uint32_t len, uint32_t cmd) +{ + struct sk_buff *skb_out; + struct nlmsghdr *nlh; + int res = 0; + struct dbglog_slot *slot; + size_t slot_len; + tAniNlHdr *wnl; + int radio; + + if (WARN_ON(len > ATH6KL_FWLOG_PAYLOAD_SIZE)) + return -ENODEV; + + if (nl_srv_is_initialized() != 0) + return -EIO; + + radio = cds_get_radio_index(); + if (radio == -EINVAL) + return -EIO; + + if (cds_is_multicast_logging()) { + slot_len = sizeof(*slot) + ATH6KL_FWLOG_PAYLOAD_SIZE + + sizeof(radio); + + skb_out = nlmsg_new(slot_len, GFP_KERNEL); + if (!skb_out) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("Failed to allocate new skb\n")); + return A_ERROR; + } + + nlh = nlmsg_put(skb_out, 0, 0, WLAN_NL_MSG_CNSS_DIAG, + slot_len, 0); + if (!nlh) { + kfree_skb(skb_out); + return -EMSGSIZE; + } + wnl = (tAniNlHdr *)nlh; + wnl->radio = radio; + /* data buffer offset from: nlmsg_hdr + sizeof(int) radio */ + slot = (struct dbglog_slot *) (nlmsg_data(nlh) + sizeof(radio)); + slot->diag_type = cmd; + slot->timestamp = cpu_to_le32(jiffies); + slot->length = cpu_to_le32(len); + /* Version mapped to get_version here */ +#ifdef WLAN_DEBUG + slot->dropped = get_version; +#endif + memcpy(slot->payload, buffer, len); + + /* Need to pad each record to fixed length + * ATH6KL_FWLOG_PAYLOAD_SIZE + */ + memset(slot->payload + len, 0, ATH6KL_FWLOG_PAYLOAD_SIZE - len); + + res = nl_srv_bcast_fw_logs(skb_out); + if ((res < 0) && (res != -ESRCH)) { + AR_DEBUG_PRINTF(ATH_DEBUG_RSVD1, + ("%s: nl_srv_bcast_fw_logs failed 0x%x\n", + __func__, res)); + return res; + } + } + return res; +} + +static int +dbglog_process_netlink_data(wmi_unified_t wmi_handle, const uint8_t *buffer, + uint32_t len, uint32_t dropped) +{ + struct sk_buff *skb_out; + struct nlmsghdr *nlh; + int res = 0; + struct dbglog_slot *slot; + size_t slot_len; + tAniNlHdr *wnl; + int radio; + + if (WARN_ON(len > ATH6KL_FWLOG_PAYLOAD_SIZE)) + return -ENODEV; + + if (nl_srv_is_initialized() != 0) + return -EIO; + + radio = cds_get_radio_index(); + if (radio == -EINVAL) + return -EIO; + + if (cds_is_multicast_logging()) { + slot_len = sizeof(*slot) + ATH6KL_FWLOG_PAYLOAD_SIZE + + sizeof(radio); + + skb_out = nlmsg_new(slot_len, GFP_KERNEL); + if (!skb_out) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("Failed to allocate new skb\n")); + return A_ERROR; + } + + nlh = nlmsg_put(skb_out, 0, 0, WLAN_NL_MSG_CNSS_DIAG, + slot_len, 0); + if (!nlh) { + kfree_skb(skb_out); + return -EMSGSIZE; + } + wnl = (tAniNlHdr *)nlh; + wnl->radio = radio; + /* data buffer offset from: nlmsg_hdr + sizeof(int) radio */ + slot = (struct dbglog_slot *) (nlmsg_data(nlh) + sizeof(radio)); + slot->diag_type = (uint32_t) DIAG_TYPE_FW_DEBUG_MSG; + slot->timestamp = cpu_to_le32(jiffies); + slot->length = cpu_to_le32(len); + slot->dropped = cpu_to_le32(dropped); + memcpy(slot->payload, buffer, len); + + /* Need to pad each record to fixed length + * ATH6KL_FWLOG_PAYLOAD_SIZE + */ + memset(slot->payload + len, 0, ATH6KL_FWLOG_PAYLOAD_SIZE - len); + + res = nl_srv_bcast_fw_logs(skb_out); + if ((res < 0) && (res != -ESRCH)) { + AR_DEBUG_PRINTF(ATH_DEBUG_RSVD1, + ("%s: nl_srv_bcast_fw_logs failed 0x%x\n", + __func__, res)); + return res; + } + } + return res; +} + +/* + * WMI diag data event handler, this function invoked as a CB + * when there DIAG_EVENT, DIAG_MSG, DIAG_DBG to be + * forwarded from the FW. This is the new implementation for + * replacement of fw_dbg and dbg messages + */ + +static int diag_fw_handler(ol_scn_t scn, uint8_t *data, uint32_t datalen) +{ + + tp_wma_handle wma = (tp_wma_handle) scn; + WMI_DIAG_EVENTID_param_tlvs *param_buf; + uint8_t *datap; + uint32_t len = 0; +#ifdef WLAN_DEBUG + uint32_t *buffer; +#endif + + if (!wma) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("NULL Pointer assigned\n")); + return A_ERROR; + } + /* when fw asser occurs,host can't use TLV format. */ + if (wma->is_fw_assert) { + datap = data; + len = datalen; + wma->is_fw_assert = 0; + } else { + param_buf = (WMI_DIAG_EVENTID_param_tlvs *) data; + if (!param_buf) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("Get NULL point message from FW\n")); + return A_ERROR; + } + + datap = param_buf->bufp; + len = param_buf->num_bufp; + +#ifdef WLAN_DEBUG + if (!get_version) { + if (len < 2*(sizeof(uint32_t))) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("len is less than expected\n")); + return A_ERROR; + } + buffer = (uint32_t *) datap; + buffer++; /* skip offset */ + if (WLAN_DIAG_TYPE_CONFIG == DIAG_GET_TYPE(*buffer)) { + if (len < 3*(sizeof(uint32_t))) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("len is less than expected\n")); + return A_ERROR; + } + buffer++; /* skip */ + if (DIAG_VERSION_INFO == DIAG_GET_ID(*buffer)) { + if (len < 4*(sizeof(uint32_t))) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("len is less than expected\n")); + return A_ERROR; + } + buffer++; /* skip */ + /* get payload */ + get_version = *buffer; + } + } + } +#endif + } + if (dbglog_process_type == DBGLOG_PROCESS_PRINT_RAW) { +#ifdef WLAN_DEBUG + if (!gprint_limiter) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("NOT Supported only supports net link socket\n")); + gprint_limiter = true; + } +#endif + return 0; + } + + if (dbglog_process_type == DBGLOG_PROCESS_NET_RAW) { + return send_diag_netlink_data((uint8_t *) datap, + len, DIAG_TYPE_FW_MSG); + } +#ifdef WLAN_OPEN_SOURCE + if (dbglog_process_type == DBGLOG_PROCESS_POOL_RAW) { +#ifdef WLAN_DEBUG + if (!gprint_limiter) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("NOT Supported only supports net link socket\n")); + gprint_limiter = true; + } +#endif + return 0; + } +#endif /* WLAN_OPEN_SOURCE */ +#ifdef WLAN_DEBUG + if (!gprint_limiter) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("NOT Supported only supports net link socket\n")); + gprint_limiter = true; + } +#endif + /* Always returns zero */ + return 0; +} + +/* + * WMI diag data event handler, this function invoked as a CB + * when there DIAG_DATA to be forwarded from the FW. + */ +static int +fw_diag_data_event_handler(ol_scn_t scn, uint8_t *data, uint32_t datalen) +{ + + WMI_DIAG_DATA_CONTAINER_EVENTID_param_tlvs *param_buf; + uint8_t *datap; + uint32_t num_data; /* Total events */ + + param_buf = (WMI_DIAG_DATA_CONTAINER_EVENTID_param_tlvs *) data; + if (!param_buf) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("Got NULL point message from FW\n")); + return A_ERROR; + } + + num_data = param_buf->num_bufp; + + datap = (uint8_t *) param_buf->bufp; + + return process_fw_diag_event_data(datap, num_data); +} + +int dbglog_parse_debug_logs(ol_scn_t scn, uint8_t *data, uint32_t datalen) +{ + tp_wma_handle wma = (tp_wma_handle) scn; + uint32_t count; + uint32_t *buffer; + uint32_t timestamp; + uint32_t debugid; + uint32_t moduleid; + uint16_t vapid; + uint16_t numargs; + qdf_size_t length; + uint32_t dropped; + WMI_DEBUG_MESG_EVENTID_param_tlvs *param_buf; + uint8_t *datap; + uint32_t len; + + if (!wma) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("NULL Pointer assigned\n")); + return A_ERROR; + } + /*when fw asser occurs,host can't use TLV format. */ + if (wma->is_fw_assert) { + datap = data; + len = datalen; + wma->is_fw_assert = 0; + } else { + param_buf = (WMI_DEBUG_MESG_EVENTID_param_tlvs *) data; + if (!param_buf) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("Get NULL point message from FW\n")); + return A_ERROR; + } + + datap = param_buf->bufp; + len = param_buf->num_bufp; + } + + if (len < sizeof(dropped)) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Invalid length\n")); + return A_ERROR; + } + + dropped = *((uint32_t *) datap); + if (dropped > 0) { + AR_DEBUG_PRINTF(ATH_DEBUG_TRC, + ("%d log buffers are dropped\n", dropped)); + } + datap += sizeof(dropped); + len -= sizeof(dropped); + + count = 0; + buffer = (uint32_t *) datap; + length = (len >> 2); + + if (dbglog_process_type == DBGLOG_PROCESS_PRINT_RAW) + return dbglog_print_raw_data(buffer, length); + + if (dbglog_process_type == DBGLOG_PROCESS_NET_RAW) { + return dbglog_process_netlink_data((wmi_unified_t) wma-> + wmi_handle, + (uint8_t *) buffer, + len, dropped); + } +#ifdef WLAN_OPEN_SOURCE + if (dbglog_process_type == DBGLOG_PROCESS_POOL_RAW) { + return dbglog_debugfs_raw_data((wmi_unified_t) wma->wmi_handle, + (uint8_t *) buffer, len, + dropped); + } +#endif /* WLAN_OPEN_SOURCE */ + + while ((count + 2) < length) { + timestamp = DBGLOG_GET_TIME_STAMP(buffer[count]); + debugid = DBGLOG_GET_DBGID(buffer[count + 1]); + moduleid = DBGLOG_GET_MODULEID(buffer[count + 1]); + vapid = DBGLOG_GET_VDEVID(buffer[count + 1]); + numargs = DBGLOG_GET_NUMARGS(buffer[count + 1]); + + if ((count + 2 + numargs) > length) + return A_OK; + + if (moduleid >= WLAN_MODULE_ID_MAX) + return A_OK; + + if (mod_print[moduleid] == NULL) { + /* + * No module specific log registered + * use the default handler + */ + dbglog_default_print_handler(moduleid, vapid, debugid, + timestamp, numargs, + (((uint32_t *) buffer) + + 2 + count)); + } else { + if (!(mod_print[moduleid](moduleid, vapid, debugid, + timestamp, numargs, + (((uint32_t *) buffer) + + 2 + count)))) { + /* + * The message is not handled + * by the module specific handler + */ + dbglog_default_print_handler(moduleid, vapid, + debugid, timestamp, + numargs, + (((uint32_t *) + buffer) + 2 + + count)); + + } + } + + /* 32 bit Time stamp + 32 bit Dbg header */ + count += numargs + 2; + } + /* Always returns zero */ + return A_OK; +} + +void dbglog_reg_modprint(uint32_t mod_id, module_dbg_print printfn) +{ + if (!mod_print[mod_id]) { + mod_print[mod_id] = printfn; + } else { + AR_DEBUG_PRINTF(ATH_DEBUG_INFO, + ("module print is already registered for this module %d\n", + mod_id)); + } +} + +static void +dbglog_sm_print(uint32_t timestamp, + uint16_t vap_id, + uint16_t numargs, + uint32_t *args, + const char *module_prefix, + const char *const states[], uint32_t num_states, + const char *const events[], uint32_t num_events) +{ + uint8_t type, arg1, arg2, arg3; + uint32_t extra, extra2, extra3; + + if (numargs != 4) + return; + + type = (args[0] >> 24) & 0xff; + arg1 = (args[0] >> 16) & 0xff; + arg2 = (args[0] >> 8) & 0xff; + arg3 = (args[0] >> 0) & 0xff; + + extra = args[1]; + extra2 = args[2]; + extra3 = args[3]; + + switch (type) { + case 0: /* state transition */ + if (arg1 < num_states && arg2 < num_states) { + dbglog_printf(timestamp, vap_id, + "%s: %s => %s (%#x, %#x, %#x)", + module_prefix, states[arg1], states[arg2], + extra, extra2, extra3); + } else { + dbglog_printf(timestamp, vap_id, + "%s: %u => %u (%#x, %#x, %#x)", + module_prefix, arg1, arg2, extra, extra2, + extra3); + } + break; + case 1: /* dispatch event */ + if (arg1 < num_states && arg2 < num_events) { + dbglog_printf(timestamp, vap_id, + "%s: %s < %s (%#x, %#x, %#x)", + module_prefix, states[arg1], events[arg2], + extra, extra2, extra3); + } else { + dbglog_printf(timestamp, vap_id, + "%s: %u < %u (%#x, %#x, %#x)", + module_prefix, arg1, arg2, extra, extra2, + extra3); + } + break; + case 2: /* warning */ + switch (arg1) { + case 0: /* unhandled event */ + if (arg2 < num_states && arg3 < num_events) { + dbglog_printf(timestamp, vap_id, + "%s: unhandled event %s in state %s (%#x, %#x, %#x)", + module_prefix, events[arg3], + states[arg2], extra, extra2, + extra3); + } else { + dbglog_printf(timestamp, vap_id, + "%s: unhandled event %u in state %u (%#x, %#x, %#x)", + module_prefix, arg3, arg2, extra, + extra2, extra3); + } + break; + default: + break; + + } + break; + } +} + +static A_BOOL +dbglog_sta_powersave_print_handler(uint32_t mod_id, + uint16_t vap_id, + uint32_t dbg_id, + uint32_t timestamp, + uint16_t numargs, uint32_t *args) +{ + static const char *const states[] = { + "IDLE", + "ACTIVE", + "SLEEP_TXQ_FLUSH", + "SLEEP_TX_SENT", + "PAUSE", + "SLEEP_DOZE", + "SLEEP_AWAKE", + "ACTIVE_TXQ_FLUSH", + "ACTIVE_TX_SENT", + "PAUSE_TXQ_FLUSH", + "PAUSE_TX_SENT", + "IDLE_TXQ_FLUSH", + "IDLE_TX_SENT", + }; + + static const char *const events[] = { + "START", + "STOP", + "PAUSE", + "UNPAUSE", + "TIM", + "DTIM", + "SEND_COMPLETE", + "PRE_SEND", + "RX", + "HWQ_EMPTY", + "PAUSE_TIMEOUT", + "TXRX_INACTIVITY_TIMEOUT", + "PSPOLL_TIMEOUT", + "UAPSD_TIMEOUT", + "DELAYED_SLEEP_TIMEOUT", + "SEND_N_COMPLETE", + "TIDQ_PAUSE_COMPLETE", + "SEND_PSPOLL", + "SEND_SPEC_PSPOLL", + }; + + switch (dbg_id) { + case DBGLOG_DBGID_SM_FRAMEWORK_PROXY_DBGLOG_MSG: + dbglog_sm_print(timestamp, vap_id, numargs, args, "STA PS", + states, QDF_ARRAY_SIZE(states), events, + QDF_ARRAY_SIZE(events)); + break; + case PS_STA_PM_ARB_REQUEST: + if (numargs == 4) { + dbglog_printf(timestamp, vap_id, + "PM ARB request flags=%x, last_time=%x %s: %s", + args[1], args[2], + dbglog_get_module_str(args[0]), + args[3] ? "SLEEP" : "WAKE"); + } + break; + case PS_STA_DELIVER_EVENT: + if (numargs == 2) { + dbglog_printf(timestamp, vap_id, "STA PS: %s %s", + (args[0] == 0 ? "PAUSE_COMPLETE" : + (args[0] == 1 ? "UNPAUSE_COMPLETE" : + (args[0] == 2 ? "SLEEP" : + (args[0] == + 3 ? "AWAKE" : "UNKNOWN")))), + (args[1] == + 0 ? "SUCCESS" : (args[1] == + 1 ? "TXQ_FLUSH_TIMEOUT" + : (args[1] == + 2 ? "NO_ACK" + : (args[1] == + 3 ? + "RX_LEAK_TIMEOUT" + : (args[1] == + 4 ? + "PSPOLL_UAPSD_BUSY_TIMEOUT" + : + "UNKNOWN")))))); + } + break; + case PS_STA_PSPOLL_SEQ_DONE: + if (numargs == 5) { + dbglog_printf(timestamp, vap_id, + "STA PS poll: queue=%u comp=%u rsp=%u rsp_dur=%u fc=%x qos=%x %s", + args[0], args[1], args[2], args[3], + (args[4] >> 16) & 0xffff, + (args[4] >> 8) & 0xff, + (args[4] & 0xff) == + 0 ? "SUCCESS" : (args[4] & 0xff) == + 1 ? "NO_ACK" : (args[4] & 0xff) == + 2 ? "DROPPED" : (args[4] & 0xff) == + 3 ? "FILTERED" : (args[4] & 0xff) == + 4 ? "RSP_TIMEOUT" : "UNKNOWN"); + } + break; + case PS_STA_COEX_MODE: + if (numargs == 1) { + dbglog_printf(timestamp, vap_id, "STA PS COEX MODE %s", + args[0] ? "ENABLED" : "DISABLED"); + } + break; + case PS_STA_PSPOLL_ALLOW: + if (numargs == 3) { + dbglog_printf(timestamp, vap_id, + "STA PS-Poll %s flags=%x time=%u", + args[0] ? "ALLOW" : "DISALLOW", args[1], + args[2]); + } + break; + case PS_STA_SET_PARAM: + if (numargs == 2) { + struct { + char *name; + int is_time_param; + } params[] = { + { + "MAX_SLEEP_ATTEMPTS", 0 + }, { + "DELAYED_SLEEP", 1 + }, { + "TXRX_INACTIVITY", 1 + }, { + "MAX_TX_BEFORE_WAKE", 0 + }, { + "UAPSD_TIMEOUT", 1 + }, { + "UAPSD_CONFIG", 0 + }, { + "PSPOLL_RESPONSE_TIMEOUT", 1 + }, { + "MAX_PSPOLL_BEFORE_WAKE", 0 + }, { + "RX_WAKE_POLICY", 0 + }, { + "DELAYED_PAUSE_RX_LEAK", 1 + }, { + "TXRX_INACTIVITY_BLOCKED_RETRY", 1 + }, { + "SPEC_WAKE_INTERVAL", 1 + }, { + "MAX_SPEC_NODATA_PSPOLL", 0 + }, { + "ESTIMATED_PSPOLL_RESP_TIME", 1 + }, { + "QPOWER_MAX_PSPOLL_BEFORE_WAKE", 0 + }, { + "QPOWER_ENABLE", 0 + }, + }; + uint32_t param = args[0]; + uint32_t value = args[1]; + + if (param < QDF_ARRAY_SIZE(params)) { + if (params[param].is_time_param) { + dbglog_printf(timestamp, vap_id, + "STA PS SET_PARAM %s => %u (us)", + params[param].name, + value); + } else { + dbglog_printf(timestamp, vap_id, + "STA PS SET_PARAM %s => %#x", + params[param].name, + value); + } + } else { + dbglog_printf(timestamp, vap_id, + "STA PS SET_PARAM %x => %#x", + param, value); + } + } + break; + case PS_STA_SPECPOLL_TIMER_STARTED: + dbglog_printf(timestamp, vap_id, + "SPEC Poll Timer Started: Beacon time Remaining:%d wakeup interval:%d", + args[0], args[1]); + break; + case PS_STA_SPECPOLL_TIMER_STOPPED: + dbglog_printf(timestamp, vap_id, "SPEC Poll Timer Stopped"); + break; + default: + return false; + } + + return true; +} + +/* IBSS PS sub modules */ +enum wlan_ibss_ps_sub_module { + WLAN_IBSS_PS_SUB_MODULE_IBSS_NW_SM = 0, + WLAN_IBSS_PS_SUB_MODULE_IBSS_SELF_PS = 1, + WLAN_IBSS_PS_SUB_MODULE_IBSS_PEER_PS = 2, + WLAN_IBSS_PS_SUB_MODULE_MAX = 3, +}; + +#define WLAN_IBSS_PS_SUB_MODULE_OFFSET 0x1E + +static A_BOOL +dbglog_ibss_powersave_print_handler(uint32_t mod_id, + uint16_t vap_id, + uint32_t dbg_id, + uint32_t timestamp, + uint16_t numargs, uint32_t *args) +{ + static const char *const nw_states[] = { + "WAIT_FOR_TBTT", + "ATIM_WINDOW_PRE_BCN", + "ATIM_WINDOW_POST_BCN", + "OUT_OF_ATIM_WINDOW", + "PAUSE_PENDING", + "PAUSED", + }; + + static const char *const ps_states[] = { + "ACTIVE", + "SLEEP_TX_SEND", + "SLEEP_DOZE_PAUSE_PENDING", + "SLEEP_DOZE", + "SLEEP_AWAKE", + "ACTIVE_TX_SEND", + "PAUSE_TX_SEND", + "PAUSED", + }; + + static const char *const peer_ps_states[] = { + "ACTIVE", + "SLEEP_AWAKE", + "SLEEP_DOZE", + "PS_UNKNOWN", + }; + + static const char *const events[] = { + "START", + "STOP", + "SWBA", + "TBTT", + "TX_BCN_CMP", + "SEND_COMPLETE", + "SEND_N_COMPLETE", + "PRE_SEND", + "RX", + "UC_INACTIVITY_TIMEOUT", + "BC_INACTIVITY_TIMEOUT", + "ATIM_WINDOW_BEGIN", + "ATIM_WINDOW_END", + "HWQ_EMPTY", + "UC_ATIM_RCVD", + "TRAFFIC_EXCHANGE_DONE", + "POWER_SAVE_STATE_CHANGE", + "NEW_PEER_JOIN", + "IBSS_VDEV_PAUSE_REQUEST", + "IBSS_VDEV_PAUSE_RESPONSE", + "IBSS_VDEV_PAUSE_TIMEOUT", + "IBSS_VDEV_UNPAUSE_REQUEST", + "PS_STATE_CHANGE", + }; + + enum wlan_ibss_ps_sub_module sub_module; + + switch (dbg_id) { + case DBGLOG_DBGID_SM_FRAMEWORK_PROXY_DBGLOG_MSG: + sub_module = (args[1] >> WLAN_IBSS_PS_SUB_MODULE_OFFSET) & 0x3; + switch (sub_module) { + case WLAN_IBSS_PS_SUB_MODULE_IBSS_NW_SM: + dbglog_sm_print(timestamp, vap_id, numargs, args, + "IBSS PS NW", nw_states, + QDF_ARRAY_SIZE(nw_states), events, + QDF_ARRAY_SIZE(events)); + break; + case WLAN_IBSS_PS_SUB_MODULE_IBSS_SELF_PS: + dbglog_sm_print(timestamp, vap_id, numargs, args, + "IBSS PS Self", ps_states, + QDF_ARRAY_SIZE(ps_states), events, + QDF_ARRAY_SIZE(events)); + break; + case WLAN_IBSS_PS_SUB_MODULE_IBSS_PEER_PS: + dbglog_sm_print(timestamp, vap_id, numargs, args, + "IBSS PS Peer", peer_ps_states, + QDF_ARRAY_SIZE(peer_ps_states), events, + QDF_ARRAY_SIZE(events)); + break; + default: + break; + } + break; + case IBSS_PS_DBGID_PEER_CREATE: + if (numargs == 2) { + dbglog_printf(timestamp, vap_id, + "IBSS PS: peer alloc failed for peer ID:%u", + args[0]); + } else if (numargs == 1) { + dbglog_printf(timestamp, vap_id, + "IBSS PS: create peer ID=%u", args[0]); + } + break; + case IBSS_PS_DBGID_PEER_DELETE: + if (numargs == 4) { + dbglog_printf(timestamp, vap_id, + "IBSS PS: delete peer ID=%u num_peers:%d num_sleeping_peers:%d ps_enabled_for_this_peer:%d", + args[0], args[1], args[2], args[3]); + } + break; + case IBSS_PS_DBGID_VDEV_CREATE: + if (numargs == 1) { + dbglog_printf(timestamp, vap_id, + "IBSS PS: vdev alloc failed", args[0]); + } else if (numargs == 0) { + dbglog_printf(timestamp, vap_id, + "IBSS PS: vdev created"); + } + break; + case IBSS_PS_DBGID_VDEV_DELETE: + dbglog_printf(timestamp, vap_id, "IBSS PS: vdev deleted"); + break; + + case IBSS_PS_DBGID_VDEV_EVENT: + if (numargs == 1) { + if (args[0] == 5) { + dbglog_printf(timestamp, vap_id, + "IBSS PS: vdev event for peer add"); + } else if (args[0] == 7) { + dbglog_printf(timestamp, vap_id, + "IBSS PS: vdev event for peer delete"); + } else { + dbglog_printf(timestamp, vap_id, + "IBSS PS: vdev event %u", + args[0]); + } + } + break; + + case IBSS_PS_DBGID_PEER_EVENT: + if (numargs == 4) { + if (args[0] == 0xFFFF) { + dbglog_printf(timestamp, vap_id, + "IBSS PS: pre_send for peer:%u peer_type:%u sm_event_mask:%0x", + args[1], args[3], args[2]); + } else if (args[0] == 0x20000) { + dbglog_printf(timestamp, vap_id, + "IBSS PS: send_complete for peer:%u peer_type:%u sm_event_mask:%0x", + args[1], args[3], args[2]); + } else if (args[0] == 0x10) { + dbglog_printf(timestamp, vap_id, + "IBSS PS: send_n_complete for peer:%u peer_type:%u sm_event_mask:%0x", + args[1], args[3], args[2]); + } else if (args[0] == 0x40) { + dbglog_printf(timestamp, vap_id, + "IBSS PS: rx event for peer:%u peer_type:%u sm_event_mask:%0x", + args[1], args[3], args[2]); + } else if (args[0] == 0x4) { + dbglog_printf(timestamp, vap_id, + "IBSS PS: hw_q_empty for peer:%u peer_type:%u sm_event_mask:%0x", + args[1], args[3], args[2]); + } + } + break; + + case IBSS_PS_DBGID_DELIVER_CAB: + if (numargs == 4) { + dbglog_printf(timestamp, vap_id, + "IBSS PS: Deliver CAB n_mpdu:%d send_flags:%0x tid_cur:%d q_depth_for_other_tid:%d", + args[0], args[1], args[2], args[3]); + } + break; + + case IBSS_PS_DBGID_DELIVER_UC_DATA: + if (numargs == 4) { + dbglog_printf(timestamp, vap_id, + "IBSS PS: Deliver UC data peer:%d tid:%d n_mpdu:%d send_flags:%0x", + args[0], args[1], args[2], args[3]); + } + break; + + case IBSS_PS_DBGID_DELIVER_UC_DATA_ERROR: + if (numargs == 4) { + dbglog_printf(timestamp, vap_id, + "IBSS PS: Deliver UC data error peer:%d tid:%d allowed_tidmask:%0x, pending_tidmap:%0x", + args[0], args[1], args[2], args[3]); + } + break; + + case IBSS_PS_DBGID_UC_INACTIVITY_TMR_RESTART: + if (numargs == 2) { + dbglog_printf(timestamp, vap_id, + "IBSS PS: UC timer restart peer:%d timer_val:%0x", + args[0], args[1]); + } + break; + + case IBSS_PS_DBGID_MC_INACTIVITY_TMR_RESTART: + if (numargs == 1) { + dbglog_printf(timestamp, vap_id, + "IBSS PS: MC timer restart timer_val:%0x", + args[0]); + } + break; + + case IBSS_PS_DBGID_NULL_TX_COMPLETION: + if (numargs == 3) { + dbglog_printf(timestamp, vap_id, + "IBSS PS: null tx completion peer:%d tx_completion_status:%d flags:%0x", + args[0], args[1], args[2]); + } + break; + + case IBSS_PS_DBGID_ATIM_TIMER_START: + if (numargs == 4) { + dbglog_printf(timestamp, vap_id, + "IBSS PS: ATIM timer start tsf:%0x %0x tbtt:%0x %0x", + args[0], args[1], args[2], args[3]); + } + break; + + case IBSS_PS_DBGID_UC_ATIM_SEND: + if (numargs == 2) { + dbglog_printf(timestamp, vap_id, + "IBSS PS: Send ATIM to peer:%d", args[1]); + } else if (numargs == 1) { + dbglog_printf(timestamp, vap_id, + "IBSS PS: no peers to send UC ATIM", + args[1]); + } + break; + + case IBSS_PS_DBGID_BC_ATIM_SEND: + if (numargs == 2) { + dbglog_printf(timestamp, vap_id, + "IBSS PS: MC Data, num_of_peers:%d bc_atim_sent:%d", + args[1], args[0]); + } + break; + + case IBSS_PS_DBGID_UC_TIMEOUT: + if (numargs == 2) { + dbglog_printf(timestamp, vap_id, + "IBSS PS: UC timeout for peer:%d send_null:%d", + args[0], args[1]); + } + break; + + case IBSS_PS_DBGID_PWR_COLLAPSE_ALLOWED: + dbglog_printf(timestamp, vap_id, + "IBSS PS: allow power collapse"); + break; + + case IBSS_PS_DBGID_PWR_COLLAPSE_NOT_ALLOWED: + if (numargs == 0) { + dbglog_printf(timestamp, vap_id, + "IBSS PS: power collapse not allowed by INI"); + } else if (numargs == 1) { + dbglog_printf(timestamp, vap_id, + "IBSS PS: power collapse not allowed since peer id:%d is not PS capable", + args[0]); + } else if (numargs == 2) { + dbglog_printf(timestamp, vap_id, + "IBSS PS: power collapse not allowed - no peers in NW"); + } else if (numargs == 3) { + if (args[0] == 2) { + dbglog_printf(timestamp, vap_id, + "IBSS PS: power collapse not allowed, non-zero qdepth %d %d", + args[1], args[2]); + } else if (args[0] == 3) { + dbglog_printf(timestamp, vap_id, + "IBSS PS: power collapse not allowed by peer:%d peer_flags:%0x", + args[1], args[2]); + } + } else if (numargs == 5) { + dbglog_printf(timestamp, vap_id, + "IBSS PS: power collapse not allowed by state m/c nw_cur_state:%d nw_next_state:%d ps_cur_state:%d flags:%0x", + args[1], args[2], args[3], args[4]); + } + break; + + case IBSS_PS_DBGID_SET_PARAM: + if (numargs == 2) { + dbglog_printf(timestamp, vap_id, + "IBSS PS: Set Param ID:%0x Value:%0x", + args[0], args[1]); + } + break; + + case IBSS_PS_DBGID_HOST_TX_PAUSE: + if (numargs == 1) { + dbglog_printf(timestamp, vap_id, + "IBSS PS: Pausing host, vdev_map:%0x", + args[0]); + } + break; + + case IBSS_PS_DBGID_HOST_TX_UNPAUSE: + if (numargs == 1) { + dbglog_printf(timestamp, vap_id, + "IBSS PS: Unpausing host, vdev_map:%0x", + args[0]); + } + break; + case IBSS_PS_DBGID_PS_DESC_BIN_LWM: + if (numargs == 1) { + dbglog_printf(timestamp, vap_id, + "IBSS PS: LWM, vdev_map:%0x", args[0]); + } + break; + + case IBSS_PS_DBGID_PS_DESC_BIN_HWM: + if (numargs == 1) { + dbglog_printf(timestamp, vap_id, + "IBSS PS: HWM, vdev_map:%0x", args[0]); + } + break; + + case IBSS_PS_DBGID_PS_KICKOUT_PEER: + if (numargs == 3) { + dbglog_printf(timestamp, vap_id, + "IBSS PS: Kickout peer id:%d atim_fail_cnt:%d status:%d", + args[0], args[1], args[2]); + } + break; + + case IBSS_PS_DBGID_SET_PEER_PARAM: + if (numargs == 3) { + dbglog_printf(timestamp, vap_id, + "IBSS PS: Set Peer Id:%d Param ID:%0x Value:%0x", + args[0], args[1], args[2]); + } + break; + + case IBSS_PS_DBGID_BCN_ATIM_WIN_MISMATCH: + if (numargs == 4) { + if (args[0] == 0xDEAD) { + dbglog_printf(timestamp, vap_id, + "IBSS PS: ATIM window length mismatch, our's:%d, peer id:%d, peer's:%d", + args[1], args[2], args[3]); + } else if (args[0] == 0xBEEF) { + dbglog_printf(timestamp, vap_id, + "IBSS PS: Peer ATIM window length changed, peer id:%d, peer recorded atim window:%d new atim window:%d", + args[1], args[2], args[3]); + } + } + break; + + case IBSS_PS_DBGID_RX_CHAINMASK_CHANGE: + if (numargs == 2) { + if (args[1] == 0x1) { + dbglog_printf(timestamp, vap_id, + "IBSS PS: Voting for low power chainmask from :%d", + args[0]); + } else { + dbglog_printf(timestamp, vap_id, + "IBSS PS: Voting for high power chainmask from :%d", + args[0]); + } + } + break; + + default: + return false; + } + + return true; +} + +static +A_BOOL dbglog_ratectrl_print_handler(uint32_t mod_id, + uint16_t vap_id, + uint32_t dbg_id, + uint32_t timestamp, + uint16_t numargs, uint32_t *args) +{ + switch (dbg_id) { + case RATECTRL_DBGID_ASSOC: + dbglog_printf(timestamp, vap_id, + "RATE: ChainMask %d, phymode %d, ni_flags 0x%08x, vht_mcs_set 0x%04x, ht_mcs_set 0x%04x", + args[0], args[1], args[2], args[3], args[4]); + break; + case RATECTRL_DBGID_NSS_CHANGE: + dbglog_printf(timestamp, vap_id, "RATE: NEW NSS %d\n", args[0]); + break; + case RATECTRL_DBGID_CHAINMASK_ERR: + dbglog_printf(timestamp, vap_id, + "RATE: Chainmask ERR %d %d %d\n", args[0], + args[1], args[2]); + break; + case RATECTRL_DBGID_UNEXPECTED_FRAME: + dbglog_printf(timestamp, vap_id, + "RATE: WARN1: rate %d flags 0x%08x\n", args[0], + args[1]); + break; + case RATECTRL_DBGID_WAL_RCQUERY: + dbglog_printf(timestamp, vap_id, + "ratectrl_dbgid_wal_rcquery [rix1 %d rix2 %d rix3 %d proberix %d ppduflag 0x%x] ", + args[0], args[1], args[2], args[3], args[4]); + break; + case RATECTRL_DBGID_WAL_RCUPDATE: + dbglog_printf(timestamp, vap_id, + "ratectrl_dbgid_wal_rcupdate [numelems %d ppduflag 0x%x] ", + args[0], args[1]); + break; + case RATECTRL_DBGID_GTX_UPDATE: + { + switch (args[0]) { + case 255: + dbglog_printf(timestamp, vap_id, + "GtxInitPwrCfg [bw[last %d|cur %d] rtcode 0x%x tpc %d tpc_init_pwr_cfg %d] ", + args[1] >> 8, args[1] & 0xff, + args[2], args[3], args[4]); + break; + case 254: + dbglog_printf(timestamp, vap_id, + "gtx_cfg_addr [RTMask0@0x%x PERThreshold@0x%x gtxTPCMin@0x%x userGtxMask@0x%x] ", + args[1], args[2], args[3], + args[4]); + break; + default: + dbglog_printf(timestamp, vap_id, + "gtx_update [act %d bw %d rix 0x%x tpc %d per %d lastrssi %d] ", + args[0], args[1], args[2], + args[3], args[4], args[5]); + } + } + break; + } + return true; +} + +static +A_BOOL dbglog_ani_print_handler(uint32_t mod_id, + uint16_t vap_id, + uint32_t dbg_id, + uint32_t timestamp, + uint16_t numargs, uint32_t *args) +{ + switch (dbg_id) { + case ANI_DBGID_ENABLE: + dbglog_printf(timestamp, vap_id, "ANI Enable: %d", args[0]); + break; + case ANI_DBGID_POLL: + dbglog_printf(timestamp, vap_id, + "ANI POLLING: AccumListenTime %d ListenTime %d ofdmphyerr %d cckphyerr %d", + args[0], args[1], args[2], args[3]); + break; + case ANI_DBGID_RESTART: + dbglog_printf(timestamp, vap_id, "ANI Restart"); + break; + case ANI_DBGID_CURRENT_LEVEL: + dbglog_printf(timestamp, vap_id, + "ANI CURRENT LEVEL ofdm level %d cck level %d", + args[0], args[1]); + break; + case ANI_DBGID_OFDM_LEVEL: + dbglog_printf(timestamp, vap_id, + "ANI UPDATE ofdm level %d firstep %d firstep_low %d cycpwr_thr %d self_corr_low %d", + args[0], args[1], args[2], args[3], args[4]); + break; + case ANI_DBGID_CCK_LEVEL: + dbglog_printf(timestamp, vap_id, + "ANI UPDATE cck level %d firstep %d firstep_low %d mrc_cck %d", + args[0], args[1], args[2], args[3]); + break; + case ANI_DBGID_CONTROL: + dbglog_printf(timestamp, vap_id, + "ANI CONTROL ofdmlevel %d ccklevel %d\n", + args[0]); + + break; + case ANI_DBGID_OFDM_PARAMS: + dbglog_printf(timestamp, vap_id, + "ANI ofdm_control firstep %d cycpwr %d\n", + args[0], args[1]); + break; + case ANI_DBGID_CCK_PARAMS: + dbglog_printf(timestamp, vap_id, + "ANI cck_control mrc_cck %d barker_threshold %d\n", + args[0], args[1]); + break; + case ANI_DBGID_RESET: + dbglog_printf(timestamp, vap_id, + "ANI resetting resetflag %d resetCause %8x channel index %d", + args[0], args[1], args[2]); + break; + case ANI_DBGID_SELF_CORR_LOW: + dbglog_printf(timestamp, vap_id, "ANI self_corr_low %d", + args[0]); + break; + case ANI_DBGID_FIRSTEP: + dbglog_printf(timestamp, vap_id, + "ANI firstep %d firstep_low %d", args[0], + args[1]); + break; + case ANI_DBGID_MRC_CCK: + dbglog_printf(timestamp, vap_id, "ANI mrc_cck %d", args[0]); + break; + case ANI_DBGID_CYCPWR: + dbglog_printf(timestamp, vap_id, "ANI cypwr_thresh %d", + args[0]); + break; + case ANI_DBGID_POLL_PERIOD: + dbglog_printf(timestamp, vap_id, + "ANI Configure poll period to %d", args[0]); + break; + case ANI_DBGID_LISTEN_PERIOD: + dbglog_printf(timestamp, vap_id, + "ANI Configure listen period to %d", args[0]); + break; + case ANI_DBGID_OFDM_LEVEL_CFG: + dbglog_printf(timestamp, vap_id, + "ANI Configure ofdm level to %d", args[0]); + break; + case ANI_DBGID_CCK_LEVEL_CFG: + dbglog_printf(timestamp, vap_id, + "ANI Configure cck level to %d", args[0]); + break; + default: + dbglog_printf(timestamp, vap_id, "ANI arg1 %d arg2 %d arg3 %d", + args[0], args[1], args[2]); + break; + } + return true; +} + +static A_BOOL +dbglog_ap_powersave_print_handler(uint32_t mod_id, + uint16_t vap_id, + uint32_t dbg_id, + uint32_t timestamp, + uint16_t numargs, uint32_t *args) +{ + switch (dbg_id) { + case AP_PS_DBGID_UPDATE_TIM: + if (numargs == 2) { + dbglog_printf(timestamp, vap_id, + "AP PS: TIM update AID=%u %s", + args[0], args[1] ? "set" : "clear"); + } + break; + case AP_PS_DBGID_PEER_STATE_CHANGE: + if (numargs == 2) { + dbglog_printf(timestamp, vap_id, + "AP PS: AID=%u power save %s", + args[0], + args[1] ? "enabled" : "disabled"); + } + break; + case AP_PS_DBGID_PSPOLL: + if (numargs == 3) { + dbglog_printf(timestamp, vap_id, + "AP PS: AID=%u pspoll response tid=%u flags=%x", + args[0], args[1], args[2]); + } + break; + case AP_PS_DBGID_PEER_CREATE: + if (numargs == 1) { + dbglog_printf(timestamp, vap_id, + "AP PS: create peer AID=%u", args[0]); + } + break; + case AP_PS_DBGID_PEER_DELETE: + if (numargs == 1) { + dbglog_printf(timestamp, vap_id, + "AP PS: delete peer AID=%u", args[0]); + } + break; + case AP_PS_DBGID_VDEV_CREATE: + dbglog_printf(timestamp, vap_id, "AP PS: vdev create"); + break; + case AP_PS_DBGID_VDEV_DELETE: + dbglog_printf(timestamp, vap_id, "AP PS: vdev delete"); + break; + case AP_PS_DBGID_SYNC_TIM: + if (numargs == 3) { + dbglog_printf(timestamp, vap_id, + "AP PS: AID=%u advertised=%#x buffered=%#x", + args[0], args[1], args[2]); + } + break; + case AP_PS_DBGID_NEXT_RESPONSE: + if (numargs == 4) { + dbglog_printf(timestamp, vap_id, + "AP PS: AID=%u select next response %s%s%s", + args[0], args[1] ? "(usp active) " : "", + args[2] ? "(pending usp) " : "", + args[3] ? "(pending poll response)" : ""); + } + break; + case AP_PS_DBGID_START_SP: + if (numargs == 3) { + dbglog_printf(timestamp, vap_id, + "AP PS: AID=%u START SP tsf=%#x (%u)", + args[0], args[1], args[2]); + } + break; + case AP_PS_DBGID_COMPLETED_EOSP: + if (numargs == 3) { + dbglog_printf(timestamp, vap_id, + "AP PS: AID=%u EOSP eosp_tsf=%#x trigger_tsf=%#x", + args[0], args[1], args[2]); + } + break; + case AP_PS_DBGID_TRIGGER: + if (numargs == 4) { + dbglog_printf(timestamp, vap_id, + "AP PS: AID=%u TRIGGER tsf=%#x %s%s", + args[0], args[1], + args[2] ? "(usp active) " : "", + args[3] ? "(send_n in progress)" : ""); + } + break; + case AP_PS_DBGID_DUPLICATE_TRIGGER: + if (numargs == 4) { + dbglog_printf(timestamp, vap_id, + "AP PS: AID=%u DUP TRIGGER tsf=%#x seq=%u ac=%u", + args[0], args[1], args[2], args[3]); + } + break; + case AP_PS_DBGID_UAPSD_RESPONSE: + if (numargs == 5) { + dbglog_printf(timestamp, vap_id, + "AP PS: AID=%u UAPSD response tid=%u, n_mpdu=%u flags=%#x max_sp=%u current_sp=%u", + args[0], args[1], args[2], args[3], + (args[4] >> 16) & 0xffff, + args[4] & 0xffff); + } + break; + case AP_PS_DBGID_SEND_COMPLETE: + if (numargs == 5) { + dbglog_printf(timestamp, vap_id, + "AP PS: AID=%u SEND_COMPLETE fc=%#x qos=%#x %s%s", + args[0], args[1], args[2], + args[3] ? "(usp active) " : "", + args[4] ? "(pending poll response)" : ""); + } + break; + case AP_PS_DBGID_SEND_N_COMPLETE: + if (numargs == 3) { + dbglog_printf(timestamp, vap_id, + "AP PS: AID=%u SEND_N_COMPLETE %s%s", + args[0], + args[1] ? "(usp active) " : "", + args[2] ? "(pending poll response)" : ""); + } + break; + case AP_PS_DBGID_DETECT_OUT_OF_SYNC_STA: + if (numargs == 4) { + dbglog_printf(timestamp, vap_id, + "AP PS: AID=%u detected out-of-sync now=%u tx_waiting=%u txq_depth=%u", + args[0], args[1], args[2], args[3]); + } + break; + case AP_PS_DBGID_DELIVER_CAB: + if (numargs == 4) { + dbglog_printf(timestamp, vap_id, + "AP PS: CAB %s n_mpdus=%u, flags=%x, extra=%u", + (args[0] == 17) ? "MGMT" : "DATA", + args[1], args[2], args[3]); + } + break; + default: + return false; + } + + return true; +} + +static A_BOOL +dbglog_wal_print_handler(uint32_t mod_id, + uint16_t vap_id, + uint32_t dbg_id, + uint32_t timestamp, uint16_t numargs, uint32_t *args) +{ + static const char *const states[] = { + "ACTIVE", + "WAIT", + "WAIT_FILTER", + "PAUSE", + "PAUSE_SEND_N", + "BLOCK", + }; + + static const char *const events[] = { + "PAUSE", + "PAUSE_FILTER", + "UNPAUSE", + + "BLOCK", + "BLOCK_FILTER", + "UNBLOCK", + + "HWQ_EMPTY", + "ALLOW_N", + }; + +#define WAL_VDEV_TYPE(type) \ + (type == 0 ? "AP" : \ + (type == 1 ? "STA" : \ + (type == 2 ? "IBSS" : \ + (type == 2 ? "MONITOR" : \ + "UNKNOWN")))) + +#define WAL_SLEEP_STATE(state) \ + (state == 1 ? "NETWORK SLEEP" : \ + (state == 2 ? "AWAKE" : \ + (state == 3 ? "SYSTEM SLEEP" : \ + "UNKNOWN"))) + + switch (dbg_id) { + case DBGLOG_DBGID_SM_FRAMEWORK_PROXY_DBGLOG_MSG: + dbglog_sm_print(timestamp, vap_id, numargs, args, "TID PAUSE", + states, QDF_ARRAY_SIZE(states), events, + QDF_ARRAY_SIZE(events)); + break; + case WAL_DBGID_SET_POWER_STATE: + if (numargs == 3) { + dbglog_printf(timestamp, vap_id, + "WAL %s => %s, req_count=%u", + WAL_SLEEP_STATE(args[0]), + WAL_SLEEP_STATE(args[1]), args[2]); + } + break; + case WAL_DBGID_CHANNEL_CHANGE_FORCE_RESET: + if (numargs == 4) { + dbglog_printf(timestamp, vap_id, + "WAL channel change (force reset) freq=%u, flags=%u mode=%u rx_ok=%u tx_ok=%u", + args[0] & 0x0000ffff, + (args[0] & 0xffff0000) >> 16, args[1], + args[2], args[3]); + } + break; + case WAL_DBGID_CHANNEL_CHANGE: + if (numargs == 2) { + dbglog_printf(timestamp, vap_id, + "WAL channel change freq=%u, mode=%u flags=%u rx_ok=1 tx_ok=1", + args[0] & 0x0000ffff, + (args[0] & 0xffff0000) >> 16, args[1]); + } + break; + case WAL_DBGID_VDEV_START: + if (numargs == 1) { + dbglog_printf(timestamp, vap_id, "WAL %s vdev started", + WAL_VDEV_TYPE(args[0])); + } + break; + case WAL_DBGID_VDEV_STOP: + dbglog_printf(timestamp, vap_id, "WAL %s vdev stopped", + WAL_VDEV_TYPE(args[0])); + break; + case WAL_DBGID_VDEV_UP: + dbglog_printf(timestamp, vap_id, "WAL %s vdev up, count=%u", + WAL_VDEV_TYPE(args[0]), args[1]); + break; + case WAL_DBGID_VDEV_DOWN: + dbglog_printf(timestamp, vap_id, "WAL %s vdev down, count=%u", + WAL_VDEV_TYPE(args[0]), args[1]); + break; + case WAL_DBGID_TX_MGMT_DESCID_SEQ_TYPE_LEN: + dbglog_printf(timestamp, vap_id, + "WAL Tx Mgmt frame desc_id=0x%x, seq=0x%x, type=0x%x, len=0x%x islocal=0x%x", + args[0], args[1], args[2], + (args[3] & 0xffff0000) >> 16, + args[3] & 0x0000ffff); + break; + case WAL_DBGID_TX_MGMT_COMP_DESCID_STATUS: + dbglog_printf(timestamp, vap_id, + "WAL Tx Mgmt frame completion desc_id=0x%x, status=0x%x, islocal=0x%x", + args[0], args[1], args[2]); + break; + case WAL_DBGID_TX_DATA_MSDUID_SEQ_TYPE_LEN: + dbglog_printf(timestamp, vap_id, + "WAL Tx Data frame msdu_id=0x%x, seq=0x%x, type=0x%x, len=0x%x", + args[0], args[1], args[2], args[3]); + break; + case WAL_DBGID_TX_DATA_COMP_MSDUID_STATUS: + dbglog_printf(timestamp, vap_id, + "WAL Tx Data frame completion desc_id=0x%x, status=0x%x, seq=0x%x", + args[0], args[1], args[2]); + break; + case WAL_DBGID_RESET_PCU_CYCLE_CNT: + dbglog_printf(timestamp, vap_id, + "WAL PCU cycle counter value at reset:%x", + args[0]); + break; + case WAL_DBGID_TX_DISCARD: + dbglog_printf(timestamp, vap_id, + "WAL Tx enqueue discard msdu_id=0x%x", args[0]); + break; + case WAL_DBGID_SET_HW_CHAINMASK: + dbglog_printf(timestamp, vap_id, + "WAL_DBGID_SET_HW_CHAINMASK pdev=%d, txchain=0x%x, rxchain=0x%x", + args[0], args[1], args[2]); + break; + case WAL_DBGID_SET_HW_CHAINMASK_TXRX_STOP_FAIL: + dbglog_printf(timestamp, vap_id, + "WAL_DBGID_SET_HW_CHAINMASK_TXRX_STOP_FAIL rxstop=%d, txstop=%d", + args[0], args[1]); + break; + case WAL_DBGID_GET_HW_CHAINMASK: + dbglog_printf(timestamp, vap_id, "WAL_DBGID_GET_HW_CHAINMASK " + "txchain=0x%x, rxchain=0x%x", args[0], args[1]); + break; + case WAL_DBGID_SMPS_DISABLE: + dbglog_printf(timestamp, vap_id, "WAL_DBGID_SMPS_DISABLE"); + break; + case WAL_DBGID_SMPS_ENABLE_HW_CNTRL: + dbglog_printf(timestamp, vap_id, + "WAL_DBGID_SMPS_ENABLE_HW_CNTRL low_pwr_mask=0x%x, high_pwr_mask=0x%x", + args[0], args[1]); + break; + case WAL_DBGID_SMPS_SWSEL_CHAINMASK: + dbglog_printf(timestamp, vap_id, + "WAL_DBGID_SMPS_SWSEL_CHAINMASK low_pwr=0x%x, chain_mask=0x%x", + args[0], args[1]); + break; + default: + return false; + } + + return true; +} + +static A_BOOL +dbglog_scan_print_handler(uint32_t mod_id, + uint16_t vap_id, + uint32_t dbg_id, + uint32_t timestamp, uint16_t numargs, uint32_t *args) +{ + static const char *const states[] = { + "IDLE", + "BSSCHAN", + "WAIT_FOREIGN_CHAN", + "FOREIGN_CHANNEL", + "TERMINATING" + }; + + static const char *const events[] = { + "REQ", + "STOP", + "BSSCHAN", + "FOREIGN_CHAN", + "CHECK_ACTIVITY", + "REST_TIME_EXPIRE", + "DWELL_TIME_EXPIRE", + "PROBE_TIME_EXPIRE", + }; + + switch (dbg_id) { + case DBGLOG_DBGID_SM_FRAMEWORK_PROXY_DBGLOG_MSG: + dbglog_sm_print(timestamp, vap_id, numargs, args, "SCAN", + states, QDF_ARRAY_SIZE(states), events, + QDF_ARRAY_SIZE(events)); + break; + default: + return false; + } + + return true; +} + +static +A_BOOL dbglog_coex_print_handler(uint32_t mod_id, + uint16_t vap_id, + uint32_t dbg_id, + uint32_t timestamp, + uint16_t numargs, uint32_t *args) +{ + uint8_t i; + char *dbg_id_str; + + static const char *const wlan_rx_xput_status[] = { + "WLAN_XPUT_NORMAL", + "WLAN_XPUT_UNDER_THRESH", + "WLAN_XPUT_CRITICAL", + "WLAN_XPUT_RECOVERY_TIMEOUT", + }; + + static const char *const coex_sched_req[] = { + "SCHED_REQ_NEXT", + "SCHED_REQ_BT", + "SCHED_REQ_WLAN", + "SCHED_REQ_POSTPAUSE", + "SCHED_REQ_UNPAUSE", + }; + + static const char *const coex_sched_type[] = { + "SCHED_NONE", + "SCHED_WLAN", + "SCHED_BT", + "SCHED_WLAN_PAUSE", + "SCHED_WLAN_POSTPAUSE", + "SCHED_WLAN_UNPAUSE", + "COEX_SCHED_MWS", + }; + + static const char *const coex_trf_mgmt_type[] = { + "TRF_MGMT_FREERUN", + "TRF_MGMT_SHAPE_PM", + "TRF_MGMT_SHAPE_PSP", + "TRF_MGMT_SHAPE_S_CTS", + "TRF_MGMT_SHAPE_OCS", + "TRF_MGMT_SHAPE_FIXED_TIME", + "TRF_MGMT_SHAPE_NOA", + "TRF_MGMT_SHAPE_OCS_CRITICAL", + "TRF_MGMT_NONE", + }; + + static const char *const coex_system_status[] = { + "ALL_OFF", + "BTCOEX_NOT_REQD", + "WLAN_IS_IDLE", + "EXECUTE_SCHEME", + "BT_FULL_CONCURRENCY", + "WLAN_SLEEPING", + "WLAN_IS_PAUSED", + "WAIT_FOR_NEXT_ACTION", + "SOC_WAKE", + }; + + static const char *const wlan_rssi_type[] = { + "LOW_RSSI", + "MID_RSSI", + "HI_RSSI", + "INVALID_RSSI", + }; + + static const char *const coex_bt_scheme[] = { + "IDLE_CTRL", + "ACTIVE_ASYNC_CTRL", + "PASSIVE_SYNC_CTRL", + "ACTIVE_SYNC_CTRL", + "DEFAULT_CTRL", + "CONCURRENCY_CTRL", + }; + + static const char *const wal_peer_rx_rate_stats_event_sent[] = { + "PR_RX_EVT_SENT_NONE", + "PR_RX_EVT_SENT_LOWER", + "PR_RX_EVT_SENT_UPPER", + }; + + static const char *const wlan_psp_stimulus[] = { + "ENTRY", + "EXIT", + "PS_READY", + "PS_NOT_READY", + "RX_MORE_DATA_RCVD", + "RX_NO_MORE_DATA_RCVD", + "TX_DATA_COMPLT", + "TX_COMPLT", + "TIM_SET", + "REQ", + "DONE_SUCCESS", + "DONE_NO_PS_POLL_ACK", + "DONE_RESPONSE_TMO", + "DONE_DROPPED", + "DONE_FILTERED", + "WLAN_START", + "NONWLAN_START", + "NONWLAN_INTVL_UPDATE", + "NULL_TX", + "NULL_TX_COMPLT", + "BMISS_FIRST", + "NULL_TX_FAIL", + "RX_NO_MORE_DATA_DATAFRM", + }; + + static const char *const coex_pspoll_state[] = { + "STATE_DISABLED", + "STATE_NOT_READY", + "STATE_ENABLED", + "STATE_READY", + "STATE_TX_STATUS", + "STATE_RX_STATUS", + }; + + static const char *const coex_scheduler_interval[] = { + "COEX_SCHED_NONWLAN_INT", + "COEX_SCHED_WLAN_INT", + }; + + static const char *const wlan_weight[] = { + "BT_COEX_BASE", + "BT_COEX_LOW", + "BT_COEX_MID", + "BT_COEX_MID_NONSYNC", + "BT_COEX_HI_NONVOICE", + "BT_COEX_HI", + "BT_COEX_CRITICAL", + }; + + static const char *const wlan_power_state[] = { + "SLEEP", + "AWAKE", + "FULL_SLEEP", + }; + + static const char *const coex_psp_error_type[] = { + "DISABLED_STATE", + "VDEV_NULL", + "COEX_PSP_ENTRY", + "ZERO_INTERVAL", + "COEX_PSP_EXIT", + "READY_DISABLED", + "READY_NOT_DISABLED", + "POLL_PKT_DROPPED", + "SET_TIMER_PARAM", + }; + + static const char *const wlan_phymode[] = { + "A", + "G", + "B", + "G_ONLY", + "NA_HT20", + "NG_HT20", + "NA_HT40", + "NG_HT40", + "AC_VHT20", + "AC_VHT40", + "AC_VHT80", + "AC_VHT20_2G", + "AC_VHT40_2G", + "AC_VHT80_2G", + "UNKNOWN", + }; + + static const char *const wlan_curr_band[] = { + "2G", + "5G", + }; + + dbg_id_str = dbglog_get_msg(mod_id, dbg_id); + + switch (dbg_id) { + case COEX_SYSTEM_UPDATE: + if (numargs == 1 && args[0] < 9) { + dbglog_printf(timestamp, vap_id, "%s: %s", dbg_id_str, + coex_system_status[args[0]]); + } else if (numargs >= 5 && args[0] < 9 && args[2] < 9) { + dbglog_printf(timestamp, vap_id, + "%s: %s, WlanSysState(0x%x), %s, NumChains(%u), AggrLimit(%u)", + dbg_id_str, coex_system_status[args[0]], + args[1], coex_trf_mgmt_type[args[2]], + args[3], args[4]); + } else { + return false; + } + break; + case COEX_SCHED_START: + if (numargs >= 5 && args[0] < 5 && args[2] < 9 && args[3] < 4 + && args[4] < 4) { + if (args[1] == 0xffffffff) { + dbglog_printf(timestamp, vap_id, + "%s: %s, DETERMINE_DURATION, %s, %s, %s", + dbg_id_str, + coex_sched_req[args[0]], + coex_trf_mgmt_type[args[2]], + wlan_rx_xput_status[args[3]], + wlan_rssi_type[args[4]]); + } else { + dbglog_printf(timestamp, vap_id, + "%s: %s, IntvlDur(%u), %s, %s, %s", + dbg_id_str, + coex_sched_req[args[0]], args[1], + coex_trf_mgmt_type[args[2]], + wlan_rx_xput_status[args[3]], + wlan_rssi_type[args[4]]); + } + } else { + return false; + } + break; + case COEX_SCHED_RESULT: + if (numargs >= 5 && args[0] < 5 && args[1] < 9 && args[2] < 9) { + dbglog_printf(timestamp, vap_id, + "%s: %s, %s, %s, CoexMgrPolicy(%u), IdleOverride(%u)", + dbg_id_str, coex_sched_req[args[0]], + coex_trf_mgmt_type[args[1]], + coex_trf_mgmt_type[args[2]], args[3], + args[4]); + } else { + return false; + } + break; + case COEX_BT_SCHEME: + if (numargs >= 1 && args[0] < 6) { + dbglog_printf(timestamp, vap_id, "%s: %s", dbg_id_str, + coex_bt_scheme[args[0]]); + } else { + return false; + } + break; + case COEX_TRF_FREERUN: + if (numargs >= 5 && args[0] < 7) { + dbglog_printf(timestamp, vap_id, + "%s: %s, AllocatedBtIntvls(%u), BtIntvlCnt(%u), AllocatedWlanIntvls(%u), WlanIntvlCnt(%u)", + dbg_id_str, coex_sched_type[args[0]], + args[1], args[2], args[3], args[4]); + } else { + return false; + } + break; + case COEX_TRF_SHAPE_PM: /* used by ocs now */ + if (numargs >= 3) { + dbglog_printf(timestamp, vap_id, + "%s: IntvlLength(%u), BtDuration(%u), WlanDuration(%u)", + dbg_id_str, args[0], args[1], args[2]); + } else { + return false; + } + break; + case COEX_SYSTEM_MONITOR: + if (numargs >= 5 && args[1] < 4 && args[4] < 4) { + dbglog_printf(timestamp, vap_id, + "%s: WlanRxCritical(%u), %s, MinDirectRxRate(%u), MonitorActiveNum(%u), %s", + dbg_id_str, args[0], + wlan_rx_xput_status[args[1]], args[2], + args[3], wlan_rssi_type[args[4]]); + } else { + return false; + } + break; + case COEX_RX_RATE: + if (numargs >= 5 && args[4] < 3) { + dbglog_printf(timestamp, vap_id, + "%s: NumUnderThreshPeers(%u), MinDirectRate(%u), LastRateSample(%u), DeltaT(%u), %s", + dbg_id_str, args[0], args[1], args[2], + args[3], + wal_peer_rx_rate_stats_event_sent[args + [4]]); + } else { + return false; + } + break; + case COEX_WLAN_INTERVAL_START: + if (numargs >= 5) { + dbglog_printf(timestamp, vap_id, + "%s: WlanIntvlCnt(%u), Duration(%u), Weight(%u), BaseIdleOverride(%u), WeightMat[0](0x%x)", + dbg_id_str, args[0], args[1], args[2], + args[3], args[4]); + } else { + return false; + } + break; + case COEX_WLAN_POSTPAUSE_INTERVAL_START: + if (numargs >= 4) { + dbglog_printf(timestamp, vap_id, + "%s: WlanPostPauseIntvlCnt(%u), XputMonitorActiveNum(%u), Duration(%u), Weight(%u)", + dbg_id_str, args[0], args[1], args[2], + args[3]); + } else { + return false; + } + break; + case COEX_BT_INTERVAL_START: + if (numargs >= 5) { + dbglog_printf(timestamp, vap_id, + "%s: BtIntvlCnt(%u), Duration(%u), Weight(%u), BaseIdleOverride(%u), WeightMat[0](0x%x), ", + dbg_id_str, args[0], args[1], args[2], + args[3], args[4]); + } else { + return false; + } + break; + case COEX_POWER_CHANGE: + if (numargs >= 3 && args[1] < 3 && args[2] < 3) { + dbglog_printf(timestamp, vap_id, + "%s: Event(0x%x) %s->%s", dbg_id_str, + args[0], wlan_power_state[args[1]], + wlan_power_state[args[2]]); + } else { + return false; + } + break; + case COEX_CHANNEL_CHANGE: + if (numargs >= 5 && args[3] < 2 && args[4] < 15) { + dbglog_printf(timestamp, vap_id, + "%s: %uMhz->%uMhz, WlanSysState(0x%x), CurrBand(%s), PhyMode(%s)", + dbg_id_str, args[0], args[1], args[2], + wlan_curr_band[args[3]], + wlan_phymode[args[4]]); + } else { + return false; + } + break; + case COEX_PSP_MGR_ENTER: + if (numargs >= 5 && args[0] < 23 && + args[1] < 6 && args[3] < 2) { + dbglog_printf(timestamp, vap_id, + "%s: %s, %s, PsPollAvg(%u), %s, CurrT(%u)", + dbg_id_str, wlan_psp_stimulus[args[0]], + coex_pspoll_state[args[1]], args[2], + coex_scheduler_interval[args[3]], + args[4]); + } else { + return false; + } + break; + /* Translate following into decimal */ + case COEX_SINGLECHAIN_DBG_1: + case COEX_SINGLECHAIN_DBG_2: + case COEX_SINGLECHAIN_DBG_3: + case COEX_MULTICHAIN_DBG_1: + case COEX_MULTICHAIN_DBG_2: + case COEX_MULTICHAIN_DBG_3: + case BTCOEX_DBG_MCI_1: + case BTCOEX_DBG_MCI_2: + case BTCOEX_DBG_MCI_3: + case BTCOEX_DBG_MCI_4: + case BTCOEX_DBG_MCI_5: + case BTCOEX_DBG_MCI_6: + case BTCOEX_DBG_MCI_7: + case BTCOEX_DBG_MCI_8: + case BTCOEX_DBG_MCI_9: + case BTCOEX_DBG_MCI_10: + + if (numargs > 0) { + dbglog_printf_no_line_break(timestamp, vap_id, "%s: %u", + dbg_id_str, args[0]); + for (i = 1; i < numargs; i++) + printk("%u", args[i]); + printk("\n"); + } else { + return false; + } + break; + case COEX_LinkID: + if (numargs >= 4) { + if (args[0]) { /* Add profile */ + dbglog_printf(timestamp, vap_id, + "%s Alloc: LocalID(%u), RemoteID(%u), MinFreeLocalID(%u)", + dbg_id_str, args[1], args[2], + args[3]); + } else { /* Remove profile */ + dbglog_printf(timestamp, vap_id, + "%s Dealloc: LocalID(%u), RemoteID(%u), MinFreeLocalID(%u)", + dbg_id_str, args[1], args[2], + args[3]); + } + } else { + return false; + } + break; + case COEX_PSP_MGR_RESULT: + if (numargs >= 5 && args[0] < 6) { + dbglog_printf(timestamp, vap_id, + "%s: %s, PsPollAvg(%u), EstimationOverrun(%u), EstimationUnderun(%u), NotReadyErr(%u)", + dbg_id_str, coex_pspoll_state[args[0]], + args[1], args[2], args[3], args[4]); + } else { + return false; + } + break; + case COEX_TRF_SHAPE_PSP: + if (numargs >= 5 && args[0] < 7 && args[1] < 7) { + dbglog_printf(timestamp, vap_id, + "%s: %s, %s, Dur(%u), BtTriggerRecvd(%u), PspWlanCritical(%u)", + dbg_id_str, coex_sched_type[args[0]], + wlan_weight[args[1]], args[2], args[3], + args[4]); + } else { + return false; + } + break; + case COEX_PSP_SPEC_POLL: + if (numargs >= 5) { + dbglog_printf(timestamp, vap_id, + "%s: PsPollSpecEna(%u), Count(%u), NextTS(%u), AllowSpecPsPollTx(%u), Intvl(%u)", + dbg_id_str, args[0], args[1], args[2], + args[3], args[4]); + } else { + return false; + } + break; + case COEX_PSP_READY_STATE: + if (numargs >= 5) { + dbglog_printf(timestamp, vap_id, + "%s: T2NonWlan(%u), CoexSchedulerEndTS(%u), MoreData(%u), PSPRespExpectedTS(%u), NonWlanIdleT(%u)", + dbg_id_str, args[0], args[1], args[2], + args[3], args[4]); + } else { + return false; + } + break; + case COEX_PSP_NONWLAN_INTERVAL: + if (numargs >= 4) { + dbglog_printf(timestamp, vap_id, + "%s: NonWlanBaseIntvl(%u), NonWlanIdleT(%u), PSPSpecIntvl(%u), ApRespTimeout(%u)", + dbg_id_str, args[0], args[1], args[2], + args[3]); + } else { + return false; + } + break; + case COEX_PSP_ERROR: + if (numargs >= 1 && args[0] < 9) { + dbglog_printf_no_line_break(timestamp, vap_id, "%s: %s", + dbg_id_str, + coex_psp_error_type[args + [0]]); + for (i = 1; i < numargs; i++) { + AR_DEBUG_PRINTF(ATH_DEBUG_INFO, + (", %u", args[i])); + } + AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("\n")); + } else { + return false; + } + break; + case COEX_PSP_STAT_1: + if (numargs >= 5) { + dbglog_printf(timestamp, vap_id, + "%s: ApResp0(%u), ApResp1(%u), ApResp2(%u), ApResp3(%u), ApResp4(%u)", + dbg_id_str, args[0], args[1], args[2], + args[3], args[4]); + } else { + return false; + } + break; + case COEX_PSP_STAT_2: + if (numargs >= 5) { + dbglog_printf(timestamp, vap_id, + "%s: DataPt(%u), Max(%u), NextApRespIndex(%u), NumOfValidDataPts(%u), PsPollAvg(%u)", + dbg_id_str, args[0], args[1], args[2], + args[3], args[4]); + } else { + return false; + } + break; + case COEX_PSP_RX_STATUS_STATE_1: + if (numargs >= 5) { + if (args[2]) { + dbglog_printf(timestamp, vap_id, + "%s: RsExpectedTS(%u), RespActualTS(%u), Overrun, RsOverrunT(%u), RsRxDur(%u)", + dbg_id_str, args[0], args[1], + args[3], args[4]); + } else { + dbglog_printf(timestamp, vap_id, + "%s: RsExpectedTS(%u), RespActualTS(%u), Underrun, RsUnderrunT(%u), RsRxDur(%u)", + dbg_id_str, args[0], args[1], + args[3], args[4]); + } + } else { + return false; + } + break; + default: + return false; + } + + return true; +} + +static A_BOOL +dbglog_beacon_print_handler(uint32_t mod_id, + uint16_t vap_id, + uint32_t dbg_id, + uint32_t timestamp, + uint16_t numargs, uint32_t *args) +{ + static const char *const states[] = { + "INIT", + "ADJUST_START", + "ADJUSTING", + "ADJUST_HOLD", + }; + + static const char *const events[] = { + "ADJUST_START", + "ADJUST_RESTART", + "ADJUST_STOP", + "ADJUST_PAUSE", + "ADJUST_UNPAUSE", + "ADJUST_INC_SLOP_STEP", + "ADJUST_HOLD", + "ADJUST_HOLD_TIME_OUT", + }; + + switch (dbg_id) { + case DBGLOG_DBGID_SM_FRAMEWORK_PROXY_DBGLOG_MSG: + dbglog_sm_print(timestamp, vap_id, numargs, args, "EARLY_RX", + states, QDF_ARRAY_SIZE(states), events, + QDF_ARRAY_SIZE(events)); + break; + case BEACON_EVENT_EARLY_RX_BMISS_STATUS: + if (numargs == 3) { + dbglog_printf(timestamp, vap_id, + "early_rx bmiss status:rcv=%d total=%d miss=%d", + args[0], args[1], args[2]); + } + break; + case BEACON_EVENT_EARLY_RX_SLEEP_SLOP: + if (numargs == 1) { + dbglog_printf(timestamp, vap_id, + "early_rx update sleep_slop:%d", args[0]); + } + break; + case BEACON_EVENT_EARLY_RX_CONT_BMISS_TIMEOUT: + if (numargs == 1) { + dbglog_printf(timestamp, vap_id, + "early_rx cont bmiss timeout,update sleep_slop:%d", + args[0]); + } + break; + case BEACON_EVENT_EARLY_RX_PAUSE_SKIP_BCN_NUM: + if (numargs == 1) { + dbglog_printf(timestamp, vap_id, + "early_rx skip bcn num:%d", args[0]); + } + break; + case BEACON_EVENT_EARLY_RX_CLK_DRIFT: + if (numargs == 1) { + dbglog_printf(timestamp, vap_id, + "early_rx clk drift:%d", args[0]); + } + break; + case BEACON_EVENT_EARLY_RX_AP_DRIFT: + if (numargs == 1) { + dbglog_printf(timestamp, vap_id, + "early_rx ap drift:%d", args[0]); + } + break; + case BEACON_EVENT_EARLY_RX_BCN_TYPE: + if (numargs == 1) { + dbglog_printf(timestamp, vap_id, + "early_rx bcn type:%d", args[0]); + } + break; + default: + return false; + } + + return true; +} + +static A_BOOL +dbglog_data_txrx_print_handler(uint32_t mod_id, + uint16_t vap_id, + uint32_t dbg_id, + uint32_t timestamp, + uint16_t numargs, uint32_t *args) +{ + switch (dbg_id) { + case DATA_TXRX_DBGID_RX_DATA_SEQ_LEN_INFO: + dbglog_printf(timestamp, vap_id, + "DATA RX seq=0x%x, len=0x%x, stored=0x%x, duperr=0x%x", + args[0], args[1], (args[2] & 0xffff0000) >> 16, + args[2] & 0x0000ffff); + break; + default: + return false; + } + + return true; +} + +static +A_BOOL dbglog_smps_print_handler(uint32_t mod_id, + uint16_t vap_id, + uint32_t dbg_id, + uint32_t timestamp, + uint16_t numargs, uint32_t *args) +{ + static const char *const states[] = { + "S_INACTIVE", + "S_STATIC", + "S_DYNAMIC", + "S_STALLED", + "S_INACTIVE_WAIT", + "S_STATIC_WAIT", + "S_DYNAMIC_WAIT", + }; + + static const char *const events[] = { + "E_STOP", + "E_STOP_COMPL", + "E_START", + "E_STATIC", + "E_STATIC_COMPL", + "E_DYNAMIC", + "E_DYNAMIC_COMPL", + "E_STALL", + "E_RSSI_ABOVE_THRESH", + "E_RSSI_BELOW_THRESH", + "E_FORCED_NONE", + }; + switch (dbg_id) { + case DBGLOG_DBGID_SM_FRAMEWORK_PROXY_DBGLOG_MSG: + dbglog_sm_print(timestamp, vap_id, numargs, args, "STA_SMPS SM", + states, QDF_ARRAY_SIZE(states), events, + QDF_ARRAY_SIZE(events)); + break; + case STA_SMPS_DBGID_CREATE_PDEV_INSTANCE: + dbglog_printf(timestamp, vap_id, "STA_SMPS Create PDEV ctx %#x", + args[0]); + break; + case STA_SMPS_DBGID_CREATE_VIRTUAL_CHAN_INSTANCE: + dbglog_printf(timestamp, vap_id, + "STA_SMPS Create Virtual Chan ctx %#x", args[0]); + break; + case STA_SMPS_DBGID_DELETE_VIRTUAL_CHAN_INSTANCE: + dbglog_printf(timestamp, vap_id, + "STA_SMPS Delete Virtual Chan ctx %#x", args[0]); + break; + case STA_SMPS_DBGID_CREATE_STA_INSTANCE: + dbglog_printf(timestamp, vap_id, "STA_SMPS Create STA ctx %#x", + args[0]); + break; + case STA_SMPS_DBGID_DELETE_STA_INSTANCE: + dbglog_printf(timestamp, vap_id, "STA_SMPS Delete STA ctx %#x", + args[0]); + break; + case STA_SMPS_DBGID_VIRTUAL_CHAN_SMPS_START: + break; + case STA_SMPS_DBGID_VIRTUAL_CHAN_SMPS_STOP: + break; + case STA_SMPS_DBGID_SEND_SMPS_ACTION_FRAME: + dbglog_printf(timestamp, vap_id, + "STA_SMPS STA %#x Signal SMPS mode as %s; cb_flags %#x", + args[0], + (args[1] == + 0 ? "DISABLED" : (args[1] == + 0x1 ? "STATIC" : (args[1] == + 0x3 ? + "DYNAMIC" : + "UNKNOWN"))), + args[2]); + break; + case STA_SMPS_DBGID_DTIM_EBT_EVENT_CHMASK_UPDATE: + dbglog_printf(timestamp, vap_id, + "STA_SMPS_DBGID_DTIM_EBT_EVENT_CHMASK_UPDATE"); + break; + case STA_SMPS_DBGID_DTIM_CHMASK_UPDATE: + dbglog_printf(timestamp, vap_id, + "STA_SMPS_DBGID_DTIM_CHMASK_UPDATE tx_mask %#x rx_mask %#x arb_dtim_mask %#x", + args[0], args[1], args[2]); + break; + case STA_SMPS_DBGID_DTIM_BEACON_EVENT_CHMASK_UPDATE: + dbglog_printf(timestamp, vap_id, + "STA_SMPS_DBGID_DTIM_BEACON_EVENT_CHMASK_UPDATE"); + break; + case STA_SMPS_DBGID_DTIM_POWER_STATE_CHANGE: + dbglog_printf(timestamp, vap_id, + "STA_SMPS_DBGID_DTIM_POWER_STATE_CHANGE cur_pwr_state %s new_pwr_state %s", + (args[0] == + 0x1 ? "SLEEP" : (args[0] == + 0x2 ? "AWAKE" : (args[0] == + 0x3 ? + "FULL_SLEEP" : + "UNKNOWN"))), + (args[1] == + 0x1 ? "SLEEP" : (args[1] == + 0x2 ? "AWAKE" : (args[1] == + 0x3 ? + "FULL_SLEEP" : + "UNKNOWN")))); + break; + case STA_SMPS_DBGID_DTIM_CHMASK_UPDATE_SLEEP: + dbglog_printf(timestamp, vap_id, + "STA_SMPS_DBGID_DTIM_CHMASK_UPDATE_SLEEP tx_mask %#x rx_mask %#x orig_rx %#x dtim_rx %#x", + args[0], args[1], args[2], args[3]); + break; + case STA_SMPS_DBGID_DTIM_CHMASK_UPDATE_AWAKE: + dbglog_printf(timestamp, vap_id, + "STA_SMPS_DBGID_DTIM_CHMASK_UPDATE_AWAKE tx_mask %#x rx_mask %#x orig_rx %#x", + args[0], args[1], args[2]); + break; + default: + dbglog_printf(timestamp, vap_id, "STA_SMPS: UNKNOWN DBGID!"); + return false; + } + + return true; +} + +static A_BOOL +dbglog_p2p_print_handler(uint32_t mod_id, + uint16_t vap_id, + uint32_t dbg_id, + uint32_t timestamp, uint16_t numargs, uint32_t *args) +{ + static const char *const states[] = { + "ACTIVE", + "DOZE", + "TX_BCN", + "CTWIN", + "OPPPS", + }; + + static const char *const events[] = { + "ONESHOT_NOA", + "CTWINDOW", + "PERIODIC_NOA", + "IDLE", + "NOA_CHANGED", + "TBTT", + "TX_BCN_CMP", + "OPPPS_OK", + "OPPPS_CHANGED", + }; + + switch (dbg_id) { + case DBGLOG_DBGID_SM_FRAMEWORK_PROXY_DBGLOG_MSG: + dbglog_sm_print(timestamp, vap_id, numargs, args, "P2P GO PS", + states, QDF_ARRAY_SIZE(states), events, + QDF_ARRAY_SIZE(events)); + break; + default: + return false; + } + + return true; +} + +static A_BOOL +dbglog_pcielp_print_handler(uint32_t mod_id, + uint16_t vap_id, + uint32_t dbg_id, + uint32_t timestamp, + uint16_t numargs, uint32_t *args) +{ + static const char *const states[] = { + "STOP", + "TX", + "RX", + "SLEEP", + "SUSPEND", + }; + + static const char *const events[] = { + "VDEV_UP", + "ALL_VDEV_DOWN", + "AWAKE", + "SLEEP", + "TX_ACTIVITY", + "TX_INACTIVITY", + "TX_AC_CHANGE", + "SUSPEND", + "RESUME", + }; + + switch (dbg_id) { + case DBGLOG_DBGID_SM_FRAMEWORK_PROXY_DBGLOG_MSG: + dbglog_sm_print(timestamp, vap_id, numargs, args, "PCIELP", + states, QDF_ARRAY_SIZE(states), events, + QDF_ARRAY_SIZE(events)); + break; + default: + return false; + } + + return true; +} + +#ifdef WLAN_OPEN_SOURCE +static int dbglog_block_open(struct inode *inode, struct file *file) +{ + struct fwdebug *fwlog = inode->i_private; + + if (fwlog->fwlog_open) + return -EBUSY; + + fwlog->fwlog_open = true; + + file->private_data = inode->i_private; + return 0; +} + +static int dbglog_block_release(struct inode *inode, struct file *file) +{ + struct fwdebug *fwlog = inode->i_private; + + fwlog->fwlog_open = false; + + return 0; +} + +static ssize_t dbglog_block_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct fwdebug *fwlog = file->private_data; + struct sk_buff *skb; + ssize_t ret_cnt; + size_t len = 0, not_copied; + char *buf; + int ret; + + buf = vzalloc(count); + if (!buf) + return -ENOMEM; + + spin_lock_bh(&fwlog->fwlog_queue.lock); + + if (skb_queue_len(&fwlog->fwlog_queue) == 0) { + /* we must init under queue lock */ + init_completion(&fwlog->fwlog_completion); + + spin_unlock_bh(&fwlog->fwlog_queue.lock); + + ret = + wait_for_completion_interruptible(&fwlog->fwlog_completion); + if (ret == -ERESTARTSYS) { + vfree(buf); + return ret; + } + + spin_lock_bh(&fwlog->fwlog_queue.lock); + } + + while ((skb = __skb_dequeue(&fwlog->fwlog_queue))) { + if (skb->len > count - len) { + /* not enough space, put skb back and leave */ + __skb_queue_head(&fwlog->fwlog_queue, skb); + break; + } + + memcpy(buf + len, skb->data, skb->len); + len += skb->len; + + kfree_skb(skb); + } + + spin_unlock_bh(&fwlog->fwlog_queue.lock); + + /* FIXME: what to do if len == 0? */ + not_copied = copy_to_user(user_buf, buf, len); + if (not_copied != 0) { + ret_cnt = -EFAULT; + goto out; + } + + *ppos = *ppos + len; + + ret_cnt = len; + +out: + vfree(buf); + + return ret_cnt; +} + +static const struct file_operations fops_dbglog_block = { + .open = dbglog_block_open, + .release = dbglog_block_release, + .read = dbglog_block_read, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +#ifdef WLAN_DEBUGFS + +static void dbglog_debugfs_init(wmi_unified_t wmi_handle) +{ + + wmi_handle->debugfs_phy = debugfs_create_dir(CLD_DEBUGFS_DIR, NULL); + if (!wmi_handle->debugfs_phy) { + qdf_print("Failed to create WMI debug fs"); + return; + } + + debugfs_create_file(DEBUGFS_BLOCK_NAME, 0400, + wmi_handle->debugfs_phy, &wmi_handle->dbglog, + &fops_dbglog_block); + + return; +} + +static void dbglog_debugfs_remove(wmi_unified_t wmi_handle) +{ + debugfs_remove_recursive(wmi_handle->debugfs_phy); +} + +#else + +static void dbglog_debugfs_init(wmi_unified_t wmi_handle) +{ +} + +static void dbglog_debugfs_remove(wmi_unified_t wmi_handle) +{ +} + +#endif /* End of WLAN_DEBUGFS */ + +#endif /* WLAN_OPEN_SOURCE */ + +/** + * cnss_diag_handle_crash_inject() - API to handle crash inject command + * @slot: pointer to struct dbglog_slot + * + * API to handle CNSS diag crash inject command + * + * Return: None + */ +static void cnss_diag_handle_crash_inject(struct dbglog_slot *slot) +{ + switch (slot->diag_type) { + case DIAG_TYPE_CRASH_INJECT: + if (slot->length != 2) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("crash_inject cmd error\n")); + return; + } + + AR_DEBUG_PRINTF(ATH_DEBUG_INFO, + ("%s : DIAG_TYPE_CRASH_INJECT: %d %d\n", + __func__, slot->payload[0], + slot->payload[1])); +#ifdef WLAN_DEBUG + if (!tgt_assert_enable) { + AR_DEBUG_PRINTF(ATH_DEBUG_INFO, + ("%s: tgt Assert Disabled\n", + __func__)); + return; + } +#endif + wma_cli_set2_command(0, (int)GEN_PARAM_CRASH_INJECT, + slot->payload[0], + slot->payload[1], GEN_CMD); + break; + default: + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unknown cmd[%d] error\n", + slot->diag_type)); + break; + } +} + +#ifdef CNSS_GENL +/** + * cnss_diag_cmd_handler() - API to handle CNSS diag command + * @data: Data received + * @data_len: length of the data received + * @ctx: Pointer to stored context + * @pid: Process ID + * + * API to handle CNSS diag commands from user space + * + * Return: None + */ +static void cnss_diag_cmd_handler(const void *data, int data_len, + void *ctx, int pid) +{ + struct dbglog_slot *slot = NULL; + struct nlattr *tb[QCA_WLAN_VENDOR_ATTR_MAX + 1]; + + /* + * audit note: it is ok to pass a NULL policy here since a + * length check on the data is added later already + */ + if (wlan_cfg80211_nla_parse(tb, CLD80211_ATTR_MAX, + data, data_len, NULL)) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: nla parse fails\n", + __func__)); + return; + } + + if (!tb[CLD80211_ATTR_DATA]) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: attr VENDOR_DATA fails\n", + __func__)); + return; + } + + if (nla_len(tb[CLD80211_ATTR_DATA]) != sizeof(struct dbglog_slot)) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: attr length check fails\n", + __func__)); + return; + } + slot = (struct dbglog_slot *)nla_data(tb[CLD80211_ATTR_DATA]); + + if (!slot) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: data NULL\n", __func__)); + return; + } + + cnss_diag_handle_crash_inject(slot); + return; +} + +int cnss_diag_activate_service(void) +{ + register_cld_cmd_cb(WLAN_NL_MSG_CNSS_DIAG, cnss_diag_cmd_handler, NULL); + return 0; +} + +int cnss_diag_deactivate_service(void) +{ + deregister_cld_cmd_cb(WLAN_NL_MSG_CNSS_DIAG); + return 0; +} + +#else + +/** + * brief cnss_diag_msg_callback() - Call back invoked by netlink service + * + * This function gets invoked by netlink service when a message is recevied + * from the cnss-diag application in user-space. + * + * param - + * - skb - skb with netlink message + * + * return - 0 for success, non zero for failure + */ +static int cnss_diag_msg_callback(struct sk_buff *skb) +{ + struct nlmsghdr *nlh; + uint8_t *msg; + + nlh = (struct nlmsghdr *)skb->data; + if (!nlh) { + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("%s: Netlink header null\n", __func__)); + return A_ERROR; + } + + msg = NLMSG_DATA(nlh); + cnss_diag_handle_crash_inject((struct dbglog_slot *)msg); + + return 0; +} + +int cnss_diag_activate_service(void) +{ + int ret; + + /* + * Register the msg handler for msgs addressed to WLAN_NL_MSG_OEM + */ + ret = nl_srv_register(WLAN_NL_MSG_CNSS_DIAG, cnss_diag_msg_callback); + if (ret) + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("CNSS-DIAG Registration failed")); + + return ret; +} + +int cnss_diag_deactivate_service(void) +{ + int ret; + + /* + * Deregister the msg handler for msgs addressed to + * WLAN_NL_MSG_CNSS_DIAG + */ + ret = nl_srv_unregister(WLAN_NL_MSG_CNSS_DIAG, cnss_diag_msg_callback); + if (ret) + AR_DEBUG_PRINTF(ATH_DEBUG_ERR, + ("CNSS-DIAG Registration failed")); + + return ret; +} +#endif + +static A_BOOL +dbglog_wow_print_handler(uint32_t mod_id, + uint16_t vap_id, + uint32_t dbg_id, + uint32_t timestamp, uint16_t numargs, uint32_t *args) +{ + + switch (dbg_id) { + case WOW_NS_OFLD_ENABLE: + if (4 == numargs) { + dbglog_printf(timestamp, vap_id, + "Enable NS offload, for sender %02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x", + *(uint8_t *) &args[0], + *((uint8_t *) &args[0] + 1), + *((uint8_t *) &args[0] + 2), + *((uint8_t *) &args[0] + 3), + *(uint8_t *) &args[1], + *((uint8_t *) &args[1] + 1), + *((uint8_t *) &args[1] + 2), + *((uint8_t *) &args[1] + 3), + *(uint8_t *) &args[2], + *((uint8_t *) &args[2] + 1), + *((uint8_t *) &args[2] + 2), + *((uint8_t *) &args[2] + 3), + *(uint8_t *) &args[3], + *((uint8_t *) &args[3] + 1), + *((uint8_t *) &args[3] + 2), + *((uint8_t *) &args[3] + 3)); + } else { + return false; + } + break; + case WOW_ARP_OFLD_ENABLE: + if (1 == numargs) { + dbglog_printf(timestamp, vap_id, + "Enable ARP offload, for sender %d.%d.%d.%d", + *(uint8_t *) args, + *((uint8_t *) args + 1), + *((uint8_t *) args + 2), + *((uint8_t *) args + 3)); + } else { + return false; + } + break; + case WOW_NS_ARP_OFLD_DISABLE: + if (0 == numargs) { + dbglog_printf(timestamp, vap_id, + "disable NS/ARP offload"); + } else { + return false; + } + break; + case WOW_NS_RECEIVED: + if (4 == numargs) { + dbglog_printf(timestamp, vap_id, + "NS requested from %02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x", + *(uint8_t *) &args[0], + *((uint8_t *) &args[0] + 1), + *((uint8_t *) &args[0] + 2), + *((uint8_t *) &args[0] + 3), + *(uint8_t *) &args[1], + *((uint8_t *) &args[1] + 1), + *((uint8_t *) &args[1] + 2), + *((uint8_t *) &args[1] + 3), + *(uint8_t *) &args[2], + *((uint8_t *) &args[2] + 1), + *((uint8_t *) &args[2] + 2), + *((uint8_t *) &args[2] + 3), + *(uint8_t *) &args[3], + *((uint8_t *) &args[3] + 1), + *((uint8_t *) &args[3] + 2), + *((uint8_t *) &args[3] + 3)); + } else { + return false; + } + break; + case WOW_NS_REPLIED: + if (4 == numargs) { + dbglog_printf(timestamp, vap_id, + "NS replied to %02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x", + *(uint8_t *) &args[0], + *((uint8_t *) &args[0] + 1), + *((uint8_t *) &args[0] + 2), + *((uint8_t *) &args[0] + 3), + *(uint8_t *) &args[1], + *((uint8_t *) &args[1] + 1), + *((uint8_t *) &args[1] + 2), + *((uint8_t *) &args[1] + 3), + *(uint8_t *) &args[2], + *((uint8_t *) &args[2] + 1), + *((uint8_t *) &args[2] + 2), + *((uint8_t *) &args[2] + 3), + *(uint8_t *) &args[3], + *((uint8_t *) &args[3] + 1), + *((uint8_t *) &args[3] + 2), + *((uint8_t *) &args[3] + 3)); + } else { + return false; + } + break; + case WOW_ARP_RECEIVED: + if (1 == numargs) { + dbglog_printf(timestamp, vap_id, + "ARP requested from %d.%d.%d.%d", + *(uint8_t *) args, + *((uint8_t *) args + 1), + *((uint8_t *) args + 2), + *((uint8_t *) args + 3)); + } else { + return false; + } + break; + break; + case WOW_ARP_REPLIED: + if (1 == numargs) { + dbglog_printf(timestamp, vap_id, + "ARP replied to %d.%d.%d.%d", + *(uint8_t *) args, + *((uint8_t *) args + 1), + *((uint8_t *) args + 2), + *((uint8_t *) args + 3)); + } else { + return false; + } + break; + default: + return false; + } + + return true; +} + +int dbglog_parser_type_init(wmi_unified_t wmi_handle, int type) +{ + if (type >= DBGLOG_PROCESS_MAX) + return A_ERROR; + + dbglog_process_type = type; +#ifdef WLAN_DEBUG + gprint_limiter = false; +#endif + + return A_OK; +} + +int dbglog_init(wmi_unified_t wmi_handle) +{ + int res = 0; + + OS_MEMSET(mod_print, 0, sizeof(mod_print)); + + dbglog_reg_modprint(WLAN_MODULE_STA_PWRSAVE, + dbglog_sta_powersave_print_handler); + dbglog_reg_modprint(WLAN_MODULE_AP_PWRSAVE, + dbglog_ap_powersave_print_handler); + dbglog_reg_modprint(WLAN_MODULE_WAL, dbglog_wal_print_handler); + dbglog_reg_modprint(WLAN_MODULE_SCAN, dbglog_scan_print_handler); + dbglog_reg_modprint(WLAN_MODULE_RATECTRL, + dbglog_ratectrl_print_handler); + dbglog_reg_modprint(WLAN_MODULE_ANI, dbglog_ani_print_handler); + dbglog_reg_modprint(WLAN_MODULE_COEX, dbglog_coex_print_handler); + dbglog_reg_modprint(WLAN_MODULE_BEACON, dbglog_beacon_print_handler); + dbglog_reg_modprint(WLAN_MODULE_WOW, dbglog_wow_print_handler); + dbglog_reg_modprint(WLAN_MODULE_DATA_TXRX, + dbglog_data_txrx_print_handler); + dbglog_reg_modprint(WLAN_MODULE_STA_SMPS, dbglog_smps_print_handler); + dbglog_reg_modprint(WLAN_MODULE_P2P, dbglog_p2p_print_handler); + dbglog_reg_modprint(WLAN_MODULE_PCIELP, dbglog_pcielp_print_handler); + dbglog_reg_modprint(WLAN_MODULE_IBSS_PWRSAVE, + dbglog_ibss_powersave_print_handler); +#ifdef WLAN_DEBUG + tgt_assert_enable = wmi_handle->tgt_force_assert_enable; +#endif + + /* Register handler for F3 or debug messages */ + res = + wmi_unified_register_event_handler(wmi_handle, + wmi_dbg_msg_event_id, + dbglog_parse_debug_logs, + WMA_RX_WORK_CTX); + if (res != 0) + return res; + + /* Register handler for FW diag events */ + res = wmi_unified_register_event_handler(wmi_handle, + wmi_diag_container_event_id, + fw_diag_data_event_handler, + WMA_RX_WORK_CTX); + if (res != 0) + return res; + + /* Register handler for new FW diag Event, LOG, MSG combined */ + res = wmi_unified_register_event_handler(wmi_handle, wmi_diag_event_id, + diag_fw_handler, + WMA_RX_WORK_CTX); + if (res != 0) + return res; + +#ifdef WLAN_OPEN_SOURCE + /* Initialize the fw debug log queue */ + skb_queue_head_init(&wmi_handle->dbglog.fwlog_queue); + init_completion(&wmi_handle->dbglog.fwlog_completion); + + /* Initialize debugfs */ + dbglog_debugfs_init(wmi_handle); +#endif /* WLAN_OPEN_SOURCE */ + + return res; +} + +int dbglog_deinit(wmi_unified_t wmi_handle) +{ + int res = 0; + +#ifdef WLAN_OPEN_SOURCE + /* DeInitialize the fw debug log queue */ + skb_queue_purge(&wmi_handle->dbglog.fwlog_queue); + complete(&wmi_handle->dbglog.fwlog_completion); + + /* Deinitialize the debugfs */ + dbglog_debugfs_remove(wmi_handle); +#endif /* WLAN_OPEN_SOURCE */ +#ifdef WLAN_DEBUG + tgt_assert_enable = 0; +#endif + res = + wmi_unified_unregister_event_handler(wmi_handle, + wmi_dbg_msg_event_id); + if (res != 0) + return res; + + return res; +} diff --git a/drivers/staging/qca-wifi-host-cmn/utils/fwlog/dbglog_host.h b/drivers/staging/qca-wifi-host-cmn/utils/fwlog/dbglog_host.h new file mode 100644 index 0000000000000000000000000000000000000000..9606689a57417d5ee5dfc3156c0c04a90057cbb8 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/utils/fwlog/dbglog_host.h @@ -0,0 +1,255 @@ +/* + * Copyright (c) 2011, 2014-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _DBGLOG_HOST_H_ +#define _DBGLOG_HOST_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "dbglog_common.h" +#include "wmi_unified_param.h" + +#define DIAG_FWID_OFFSET 24 +#define DIAG_FWID_MASK 0xFF000000 /* Bit 24-31 */ + +#define DIAG_TIMESTAMP_OFFSET 0 +#define DIAG_TIMESTAMP_MASK 0x00FFFFFF /* Bit 0-23 */ + +#define DIAG_ID_OFFSET 16 +#define DIAG_ID_MASK 0xFFFF0000 /* Bit 16-31 */ + +#define DIAG_VDEVID_OFFSET 11 +#define DIAG_VDEVID_MASK 0x0000F800 /* Bit 11-15 */ +#define DIAG_VDEVID_NUM_MAX 16 + +#define DIAG_VDEVLEVEL_OFFSET 8 +#define DIAG_VDEVLEVEL_MASK 0x00000700 /* Bit 8-10 */ + +#define DIAG_PAYLEN_OFFSET 0 +#define DIAG_PAYLEN_MASK 0x000000FF /* Bit 0-7 */ + +#define DIAG_PAYLEN_OFFSET16 0 +#define DIAG_PAYLEN_MASK16 0x0000FFFF /* Bit 0-16 */ + +#define DIAG_GET_TYPE(arg) \ + ((arg & DIAG_FWID_MASK) >> DIAG_FWID_OFFSET) + +#define DIAG_GET_TIME_STAMP(arg) \ + ((arg & DIAG_TIMESTAMP_MASK) >> DIAG_TIMESTAMP_OFFSET) + +#define DIAG_GET_ID(arg) \ + ((arg & DIAG_ID_MASK) >> DIAG_ID_OFFSET) + +#define DIAG_GET_VDEVID(arg) \ + ((arg & DIAG_VDEVID_MASK) >> DIAG_VDEVID_OFFSET) + +#define DIAG_GET_VDEVLEVEL(arg) \ + ((arg & DIAG_VDEVLEVEL_MASK) >> DIAG_VDEVLEVEL_OFFSET) + +#define DIAG_GET_PAYLEN(arg) \ + ((arg & DIAG_PAYLEN_MASK) >> DIAG_PAYLEN_OFFSET) + +#define DIAG_GET_PAYLEN16(arg) \ + ((arg & DIAG_PAYLEN_MASK16) >> DIAG_PAYLEN_OFFSET16) + +#ifdef FEATURE_FW_LOG_PARSING +/* + * set the dbglog parser type + */int +dbglog_parser_type_init(wmi_unified_t wmi_handle, int type); + +/** dbglog_int - Registers a WMI event handle for WMI_DBGMSG_EVENT + * @brief wmi_handle - handle to wmi module + */ +int +dbglog_init(wmi_unified_t wmi_handle); + +/** dbglog_deinit - UnRegisters a WMI event handle for WMI_DBGMSG_EVENT + * @brief wmi_handle - handle to wmi module + */ +int +dbglog_deinit(wmi_unified_t wmi_handle); + +/** set the size of the report size + * @brief wmi_handle - handle to Wmi module + * @brief size - Report size + */ +int +dbglog_set_report_size(wmi_unified_t wmi_handle, uint16_t size); + +/** Set the resolution for time stamp + * @brief wmi_handle - handle to Wmi module + * @ brief tsr - time stamp resolution + */ +int +dbglog_set_timestamp_resolution(wmi_unified_t wmi_handle, + uint16_t tsr); + +/** Enable reporting. If it is set to false then Traget wont deliver + * any debug information + */ +int +dbglog_report_enable(wmi_unified_t wmi_handle, A_BOOL isenable); + +#ifdef CONFIG_MCL +/* + * enum DBGLOG_LOG_LVL is not converged between WIN and MCL. + * So this function declaration needs to be disabled from WIN side. + */ + +/** Set the log level + * @brief DBGLOG_INFO - Information lowest log level + * @brief DBGLOG_WARNING + * @brief DBGLOG_ERROR - default log level + */ +int +dbglog_set_log_lvl(wmi_unified_t wmi_handle, DBGLOG_LOG_LVL log_lvl); +#endif + +/* + * set the debug log level for a given module + * mod_id_lvl : the format is more user friendly. + * module_id = mod_id_lvl/10; + * log_level = mod_id_lvl%10; + * example : mod_id_lvl is 153. then module id is 15 and log level is 3. + * this format allows user to pass a sinlge value + * (which is the most convenient way for most of the OSs) + * to be passed from user to the driver. + */ +int +dbglog_set_mod_log_lvl(wmi_unified_t wmi_handle, uint32_t mod_id_lvl); + +/** Enable/Disable the logging for VAP */ +int +dbglog_vap_log_enable(wmi_unified_t wmi_handle, uint16_t vap_id, + A_BOOL isenable); +/** Enable/Disable logging for Module */ +int +dbglog_module_log_enable(wmi_unified_t wmi_handle, uint32_t mod_id, + A_BOOL isenable); + +/** set vap enablie bitmap */ +void +dbglog_set_vap_enable_bitmap(wmi_unified_t wmi_handle, + uint32_t vap_enable_bitmap); + +/** set log level for all the modules specified in the bitmap. + * for all other modules with 0 in the bitmap (or) outside the bitmap, + * the log level be reset to DBGLOG_ERR. + */ +void +dbglog_set_mod_enable_bitmap(wmi_unified_t wmi_handle, + uint32_t log_level, + uint32_t *mod_enable_bitmap, + uint32_t bitmap_len); + +int +dbglog_parse_debug_logs(ol_scn_t scn, u_int8_t *datap, + u_int32_t len); + +/** + * cnss_diag_activate_service() - API to register CNSS diag cmd handler + * + * API to register the handler for the NL message received from cnss_diag + * application. + * + * Return: 0 + */ +int cnss_diag_activate_service(void); + +/** + * cnss_diag_deactivate_service() - API to deregister CNSS diag cmd handler + * + * API to deregister the handler for the NL message received from cnss_diag + * application. + * + * Return: 0 + */ +int cnss_diag_deactivate_service(void); + +#else +static inline int +dbglog_parser_type_init(wmi_unified_t wmi_handle, int type) +{ + return A_OK; +} + +static inline int +dbglog_init(wmi_unified_t wmi_handle) +{ + return A_OK; +} + +static inline int +dbglog_deinit(wmi_unified_t wmi_handle) +{ + return A_OK; +} + +static inline int +dbglog_report_enable(wmi_unified_t wmi_handle, A_BOOL isenable) +{ + return A_OK; +} + +#ifdef CONFIG_MCL +static inline int +dbglog_set_log_lvl(wmi_unified_t wmi_handle, DBGLOG_LOG_LVL log_lvl) +{ + return A_OK; +} +#endif + +static inline int cnss_diag_activate_service(void) +{ + return A_OK; +} + +static inline int cnss_diag_deactivate_service(void) +{ + return A_OK; +} + +static inline int +dbglog_module_log_enable(wmi_unified_t wmi_handle, uint32_t mod_id, + A_BOOL isenable) +{ + return A_OK; +} + +static inline int +dbglog_vap_log_enable(wmi_unified_t wmi_handle, uint16_t vap_id, + A_BOOL isenable) +{ + return A_OK; +} + +static inline int +dbglog_set_mod_log_lvl(wmi_unified_t wmi_handle, uint32_t mod_id_lvl) +{ + return A_OK; +} +#endif /* FEATURE_FW_LOG_PARSING */ + +#ifdef __cplusplus +} +#endif + +#endif /* _DBGLOG_HOST_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/utils/fwlog/fw_dbglog_api.c b/drivers/staging/qca-wifi-host-cmn/utils/fwlog/fw_dbglog_api.c new file mode 100644 index 0000000000000000000000000000000000000000..ee07234874dfb46ce932deddd16ae383d1bf91bb --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/utils/fwlog/fw_dbglog_api.c @@ -0,0 +1,140 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "fw_dbglog_api.h" +#include "fw_dbglog_priv.h" + +static inline struct dbglog_info *handle2info( + struct common_dbglog_handle *dbg_handle) +{ + return (struct dbglog_info *)dbg_handle; +} + +void fwdbg_set_log_lvl(struct common_dbglog_handle *dbg_handle, ol_scn_t scn, + uint32_t log_lvl) +{ + struct dbglog_info *dbg_info = handle2info(dbg_handle); + + if (dbg_info->ops->dbglog_set_log_lvl) + dbg_info->ops->dbglog_set_log_lvl(scn, log_lvl); + +} + +int fwdbg_fw_handler(struct common_dbglog_handle *dbg_handle, ol_scn_t soc, + uint8_t *data, uint32_t datalen) +{ + struct dbglog_info *dbg_info = handle2info(dbg_handle); + + if (dbg_info->ops->dbglog_fw_handler) + return dbg_info->ops->dbglog_fw_handler(soc, data, datalen); + + return 0; +} + +int fwdbg_parse_debug_logs(struct common_dbglog_handle *dbg_handle, + const char *name, uint8_t *datap, uint16_t len, void *context) +{ + struct dbglog_info *dbg_info = handle2info(dbg_handle); + + if (dbg_info->ops->dbglog_parse_debug_logs) + return dbg_info->ops->dbglog_parse_debug_logs(name, + datap, len, context); + + return 0; +} + +void fwdbg_ratelimit_set(struct common_dbglog_handle *dbg_handle, + uint32_t burst_limit) +{ + struct dbglog_info *dbg_info = handle2info(dbg_handle); + + if (dbg_info->ops->dbglog_ratelimit_set) + dbg_info->ops->dbglog_ratelimit_set(burst_limit); + +} + +void fwdbg_vap_log_enable(struct common_dbglog_handle *dbg_handle, ol_scn_t scn, + uint16_t vap_id, bool isenable) +{ + struct dbglog_info *dbg_info = handle2info(dbg_handle); + + if (dbg_info->ops->dbglog_vap_log_enable) + dbg_info->ops->dbglog_vap_log_enable(scn, vap_id, + isenable); + +} + +void fwdbg_set_timestamp_resolution(struct common_dbglog_handle *dbg_handle, + ol_scn_t scn, uint16_t tsr) +{ + struct dbglog_info *dbg_info = handle2info(dbg_handle); + + if (dbg_info->ops->dbglog_set_timestamp_resolution) + dbg_info->ops->dbglog_set_timestamp_resolution(scn, tsr); + +} + +void fwdbg_reporting_enable(struct common_dbglog_handle *dbg_handle, + ol_scn_t scn, bool isenable) +{ + struct dbglog_info *dbg_info = handle2info(dbg_handle); + + if (dbg_info->ops->dbglog_reporting_enable) + dbg_info->ops->dbglog_reporting_enable(scn, isenable); + +} + +void fwdbg_module_log_enable(struct common_dbglog_handle *dbg_handle, + ol_scn_t scn, uint32_t mod_id, bool isenable) +{ + struct dbglog_info *dbg_info = handle2info(dbg_handle); + + if (dbg_info->ops->dbglog_module_log_enable) + dbg_info->ops->dbglog_module_log_enable(scn, mod_id, + isenable); + +} + +void fwdbg_init(struct common_dbglog_handle *dbg_handle, void *soc) +{ + struct dbglog_info *dbg_info = handle2info(dbg_handle); + + if (dbg_info->ops->dbglog_init) + dbg_info->ops->dbglog_init(soc); + +} + +void fwdbg_free(struct common_dbglog_handle *dbg_handle, void *soc) +{ + struct dbglog_info *dbg_info = handle2info(dbg_handle); + + if (dbg_info->ops->dbglog_free) + dbg_info->ops->dbglog_free(soc); + +} + +void fwdbg_set_report_size(struct common_dbglog_handle *dbg_handle, + ol_scn_t scn, uint16_t size) +{ + struct dbglog_info *dbg_info = handle2info(dbg_handle); + + if (dbg_info->ops->dbglog_set_report_size) + dbg_info->ops->dbglog_set_report_size(scn, size); + +} + diff --git a/drivers/staging/qca-wifi-host-cmn/utils/fwlog/inc/fw_dbglog_api.h b/drivers/staging/qca-wifi-host-cmn/utils/fwlog/inc/fw_dbglog_api.h new file mode 100644 index 0000000000000000000000000000000000000000..b649efeb35c0e852c90cfc1b127633c409da455a --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/utils/fwlog/inc/fw_dbglog_api.h @@ -0,0 +1,172 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _FW_DBGLOG_API_H_ +#define _FW_DBGLOG_API_H_ + +#include "target_if.h" + +/** + * fwdbg_set_log_lvl() - API to set debug log level + * @dbg_handle: Debug module handle + * @scn: scn handle + * @log_lvl: value of log level + * + * Send wmi configuration command to set debug log level. + * + * Return: None + */ +void fwdbg_set_log_lvl(struct common_dbglog_handle *dbg_handle, ol_scn_t scn, + uint32_t log_lvl); + +/** + * fwdbg_fw_handler() - Firmware handler interface + * @dbg_handle: Debug module handle + * @sc: soc handle + * @data: Reference to command data + * @datalen: length of data + * + * Return: 0 success + */ +int fwdbg_fw_handler(struct common_dbglog_handle *dbg_handle, ol_scn_t sc, + uint8_t *data, uint32_t datalen); + +/** + * fwdbg_parse_debug_logs() - API to parse firmware debug logs + * @dbg_handle: Debug module handle + * @name: device name + * @datap: Reference to log data + * @len: length of data + * @context: log context + * + * API parse firmware debug log messages and prints to console. + * + * Return: 0 success + */ +int fwdbg_parse_debug_logs(struct common_dbglog_handle *dbg_handle, + const char *name, uint8_t *datap, + uint16_t len, void *context); + +/** + * fwdbg_ratelimit_set() - API to set rate limit + * @dbg_handle: Debug module handle + * @burst_limit: burst limit + * + * Return: None + */ +void fwdbg_ratelimit_set(struct common_dbglog_handle *dbg_handle, + uint32_t burst_limit); + +/** + * fwdbg_vap_log_enable() - API to Enable/Disable the logging for VAP + * @dbg_handle: Debug module handle + * @scn: scn handle + * @vap_id: VAP id + * @isenable: Enable/disable + * + * API allows to enable or disable debuglogs at VAP level. It encodes wmi + * config command based on VAP id and sends wmi command to firmware to + * enable/disable debuglog. + * + * Return: None + */ +void fwdbg_vap_log_enable(struct common_dbglog_handle *dbg_handle, ol_scn_t scn, + uint16_t vap_id, bool isenable); + +/** + * fwdbg_set_timestamp_resolution - Set the resolution for time stamp + * @dbg_handle: Debug module handle + * @scn: scn handle + * @tsr: time stamp resolution + * + * Set the resolution for time stamp in debug logs. It encodes wmi + * config command to desired timestamp resolution and sends wmi command to + * firmware. + * + * Return: None + */ +void fwdbg_set_timestamp_resolution(struct common_dbglog_handle *dbg_handle, + ol_scn_t scn, uint16_t tsr); + +/** + * fwdbg_reporting_enable() - Enable reporting. + * @dbg_handle: Debug module handle + * @scn: scn handle + * @isenable: Enable/disable + * + * API to enable debug information reporting. It encodes wmi config command + * to enable reporting. If set to false then Target wont deliver any debug + * information. + * + * Return: None + */ +void fwdbg_reporting_enable(struct common_dbglog_handle *dbg_handle, + ol_scn_t scn, bool isenable); + +/** + * fwdbg_module_log_enable() - Enable/Disable logging for Module. + * @dbg_handle: Debug module handle + * @scn: scn handle + * @mod_id: Module id + * @isenable: Enable/disable + * + * API allows to enable or disable debuglogs per module. It encodes wmi + * config command based on module id and sends wmi command to firmware to + * enable/disable debuglog for that module. + * + * Return: None + */ +void fwdbg_module_log_enable(struct common_dbglog_handle *dbg_handle, + ol_scn_t scn, uint32_t mod_id, bool isenable); + +/** + * fwdbg_init() - Initialize debuglog. + * @dbg_handle: Debug module handle + * @soc: soc handle + * + * It initializes debuglog print function for set of modules and + * initializes WMI event handler for debuglog message event. + * + * Return: None + */ +void fwdbg_init(struct common_dbglog_handle *dbg_handle, void *soc); + +/** + * fwdbg_free() - Free debug handler. + * @dbg_handle: Debug module handle + * @soc: soc handle + * + * Return: None + */ +void fwdbg_free(struct common_dbglog_handle *dbg_handle, void *soc); + +/** + * fwdbg_set_report_size() - set the size of the report size + * @dbg_handle: Debug module handle + * @scn: soc handler + * @size: Report size + * + * Set the debug log report size. It encodes wmi config command to + * desired report size and sends wmi command to firmware. + * + * Return: None + */ +void fwdbg_set_report_size(struct common_dbglog_handle *dbg_handle, + ol_scn_t scn, uint16_t size); + +#endif /* _FW_DBGLOG_API_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/utils/fwlog/inc/fw_dbglog_priv.h b/drivers/staging/qca-wifi-host-cmn/utils/fwlog/inc/fw_dbglog_priv.h new file mode 100644 index 0000000000000000000000000000000000000000..3820432b80c02785924c5281f373ba82bb55f0f3 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/utils/fwlog/inc/fw_dbglog_priv.h @@ -0,0 +1,50 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/* + * This file contains the API definitions for the Unified Wireless + * Module Interface (WMI). + */ +#ifndef _FW_DBGLOG_PRIV_H_ +#define _FW_DBGLOG_PRIV_H_ + +#include + +struct dbglog_ops { + +void (*dbglog_set_log_lvl)(ol_scn_t scn, uint32_t log_lvl); +int (*dbglog_fw_handler)(ol_scn_t soc, uint8_t *data, uint32_t datalen); +int (*dbglog_parse_debug_logs)(const char *name, + u_int8_t *datap, uint16_t len, void *context); +void (*dbglog_ratelimit_set)(uint32_t burst_limit); +void (*dbglog_vap_log_enable)(ol_scn_t soc, uint16_t vap_id, + bool isenable); +void (*dbglog_set_timestamp_resolution)(ol_scn_t soc, uint16_t tsr); +void (*dbglog_reporting_enable)(ol_scn_t soc, bool isenable); +void (*dbglog_module_log_enable)(ol_scn_t scn, + uint32_t mod_id, bool isenable); +void (*dbglog_init)(void *scn); +void (*dbglog_set_report_size)(ol_scn_t scn, uint16_t size); +void (*dbglog_free)(void *soc); + +}; + +struct dbglog_info { + struct dbglog_ops *ops; +}; +#endif /*_FW_DBGLOG_PRIV_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/utils/host_diag_log/inc/host_diag_core_event.h b/drivers/staging/qca-wifi-host-cmn/utils/host_diag_log/inc/host_diag_core_event.h new file mode 100644 index 0000000000000000000000000000000000000000..d820056ca8fbf012d372d96e261a1aa3660a87b9 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/utils/host_diag_log/inc/host_diag_core_event.h @@ -0,0 +1,995 @@ +/* + * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#if !defined(__HOST_DIAG_CORE_EVENT_H) +#define __HOST_DIAG_CORE_EVENT_H + +/**========================================================================= + + \file host_diag_core_event.h + + \brief WLAN UTIL host DIAG Events + + Definitions for DIAG Events + + ========================================================================*/ + +/* $Header$ */ + +/*-------------------------------------------------------------------------- + Include Files + ------------------------------------------------------------------------*/ +#include "qdf_types.h" +#include "i_host_diag_core_event.h" + +/*-------------------------------------------------------------------------- + Preprocessor definitions and constants + ------------------------------------------------------------------------*/ +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +#define WAKE_LOCK_NAME_LEN 80 +#define RSN_OUI_SIZE 4 + +/** + * enum wifi_frm_type: type of frame + * + * @MGMT: Indicates management frames + * @CTRL: Indicates control frames + * @DATA: Inidcates data frames + */ +enum wifi_frm_type { + MGMT = 0x00, + CTRL = 0x01, + DATA = 0x02, +}; + +/* + * enum mgmt_frm_subtype: sub types of mgmt frames + * + * @ASSOC_REQ: association request frame + * @ASSOC_RESP: association response frame + * @REASSOC_REQ: reassociation request frame + * @REASSOC_RESP: reassociation response frame + * @PROBE_REQ: probe request frame + * @PROBE_RESP: probe response frame + * @BEACON: beacon frame + * @ATIM: ATIM frame + * @DISASSOC: disassociation frame + * @AUTH: authentication frame + * @DEAUTH: deauthentication frame + * @ACTION: action frame + * @ACTION_NO_ACK: action no ack frame + */ +enum mgmt_frm_subtype { + ASSOC_REQ = 0x00, + ASSOC_RESP = 0x01, + REASSOC_REQ = 0x02, + REASSOC_RESP = 0x03, + PROBE_REQ = 0x04, + PROBE_RESP = 0x05, + BEACON = 0x08, + ATIM = 0x09, + DISASSOC = 0x0a, + AUTH = 0x0b, + DEAUTH = 0x0c, + ACTION = 0x0d, + ACTION_NO_ACK = 0x0e, +}; + +/** + * enum mgmt_auth_type: type of authentication + * + * @AUTH_OPEN: no security applied + * @AUTH_SHARED: WEP type of auth + * @AUTH_WPA_EAP: WPA1 EAP based auth + * @AUTH_WPA_PSK: WPA1 PSK based auth + * @AUTH_WPA2_EAP: WPA2 EAP based auth + * @AUTH_WPA2_PSK: WPA2 PSK based auth + * @AUTH_WAPI_CERT: WAPI CERT based auth + * @AUTH_WAPI_PSK: WAPI PSK based auth + */ +enum mgmt_auth_type { + AUTH_OPEN = 0x00, + AUTH_SHARED = 0x01, + AUTH_WPA_EAP = 0x02, + AUTH_WPA_PSK = 0x03, + AUTH_WPA2_EAP = 0x04, + AUTH_WPA2_PSK = 0x05, + AUTH_WAPI_CERT = 0x06, + AUTH_WAPI_PSK = 0x07, + AUTH_MAX = 0xff, +}; + +/** + * enum mgmt_encrypt_type: type of encryption + * + * @ENC_MODE_OPEN: no encryption applied + * @ENC_MODE_WEP40: WEP 40 bits encryption + * @ENC_MODE_WEP104: WEP 104 bits encryption + * @ENC_MODE_TKIP: TKIP based encryption + * @ENC_MODE_AES: AES based encryption + * @ENC_MODE_AES_GCMP: AES with GCMP encryption + * @ENC_MODE_AES_GCMP_256: AES with 256 bit GCMP encryption + * @ENC_MODE_SMS4: WAPI based SMS4 encryption + */ +enum mgmt_encrypt_type { + ENC_MODE_OPEN = 0x00, + ENC_MODE_WEP40 = 0x01, + ENC_MODE_WEP104 = 0x02, + ENC_MODE_TKIP = 0x03, + ENC_MODE_AES = 0x04, + ENC_MODE_AES_GCMP = 0x05, + ENC_MODE_AES_GCMP_256 = 0x06, + ENC_MODE_SMS4 = 0x07, + ENC_MODE_MAX = 0x0f, +}; + +/** + * enum mgmt_ch_width: channel width of connection + * + * @BW_20MHZ: 20 MHz of channel bonding + * @BW_40MHZ: 40 MHz of channel bonding + * @BW_80MHZ: 80 MHz of channel bonding + * @BW_160MHZ: 160 MHz of channel bonding + * @BW_80P80MHZ: 80 + 80 MHz of channel bonding + * @BW_5MHZ: 5 MHz of channel bonding + * @BW_10MHZ: 10 MHz of channel bonding + */ +enum mgmt_ch_width { + BW_20MHZ = 0x00, + BW_40MHZ = 0x01, + BW_80MHZ = 0x02, + BW_160MHZ = 0x03, + BW_80P80MHZ = 0x04, + BW_5MHZ = 0x05, + BW_10MHZ = 0x06, + BW_MAX = 0xff, +}; + +/** + * enum mgmt_dot11_mode: 80211 mode of operation + * + * @DOT11_MODE_ABG: 802.11-ABG mix mode + * @DOT11_MODE_11A: 802.11-A mode + * @DOT11_MODE_11B: 802.11-B mode + * @DOT11_MODE_11G: 802.11-G mode + * @DOT11_MODE_11N: 802.11-N mode + * @DOT11_MODE_11AC: 802.11-AC mode + * @DOT11_MODE_11G_ONLY: 802.11-G only mode + * @DOT11_MODE_11N_ONLY: 802.11-N only mode + * @DOT11_MODE_11AC_ONLY: 802.11-AC only mode + * @DOT11_MODE_AUTO: 802.11 auto mode + * @DOT11_MODE_11AX: 802.11-AX mode + * @DOT11_MODE_11AX_ONLY: 802.11-AX only mode + */ +enum mgmt_dot11_mode { + DOT11_MODE_ABG = 0x00, + DOT11_MODE_11A = 0x01, + DOT11_MODE_11B = 0x02, + DOT11_MODE_11G = 0x03, + DOT11_MODE_11N = 0x04, + DOT11_MODE_11AC = 0x05, + DOT11_MODE_11G_ONLY = 0x06, + DOT11_MODE_11N_ONLY = 0x07, + DOT11_MODE_11AC_ONLY = 0x08, + DOT11_MODE_AUTO = 0x09, + DOT11_MODE_11AX = 0x0a, + DOT11_MODE_11AX_ONLY = 0x0b, + DOT11_MODE_MAX = 0xff, +}; + +/** + * enum mgmt_bss_type: persona type + * + * @STA_PERSONA: STA mode + * @SAP_PERSONA: SAP mode + * @P2P_CLIENT_PERSONA: P2P cli mode + * @P2P_GO_PERSONA: P2P go mode + * @FTM_PERSONA: FTM mode + * @IBSS_PERSONA: IBSS mode + * @MONITOR_PERSONA: monitor mode + * @P2P_DEVICE_PERSONA: P2P device mode + * @OCB_PERSONA: OCB mode + * @EPPING_PERSONA: epping mode + * @QVIT_PERSONA: QVIT mode + * @NDI_PERSONA: NDI mode + * @WDS_PERSONA: WDS mode + * @BTAMP_PERSONA: BT amp mode + * @AHDEMO_PERSONA: AH demo mode + */ +enum mgmt_bss_type { + STA_PERSONA = 0x00, + SAP_PERSONA = 0x01, + P2P_CLIENT_PERSONA = 0x02, + P2P_GO_PERSONA = 0x03, + FTM_PERSONA = 0x04, + IBSS_PERSONA = 0x05, + MONITOR_PERSONA = 0x06, + P2P_DEVICE_PERSONA = 0x07, + OCB_PERSONA = 0x08, + EPPING_PERSONA = 0x09, + QVIT_PERSONA = 0x0a, + NDI_PERSONA = 0x0b, + WDS_PERSONA = 0x0c, + BTAMP_PERSONA = 0x0d, + AHDEMO_PERSONA = 0x0e, + MAX_PERSONA = 0xff, +}; + +/*------------------------------------------------------------------------- + Event ID: EVENT_WLAN_SECURITY + ------------------------------------------------------------------------*/ +typedef struct { + uint8_t eventId; + uint8_t authMode; + uint8_t encryptionModeUnicast; + uint8_t encryptionModeMulticast; + uint8_t pmkIDMatch; + uint8_t bssid[6]; + uint8_t keyId; + uint8_t status; +} host_event_wlan_security_payload_type; + +/*------------------------------------------------------------------------- + Event ID: EVENT_WLAN_STATUS_V2 + ------------------------------------------------------------------------*/ +typedef struct { + uint8_t eventId; + uint8_t ssid[32]; + uint8_t bssType; + uint8_t rssi; + uint8_t channel; + uint8_t qosCapability; + uint8_t authType; + uint8_t encryptionType; + uint8_t reason; + uint8_t reasonDisconnect; +} host_event_wlan_status_payload_type; + +/*------------------------------------------------------------------------- + Event ID: EVENT_WLAN_HANDOFF + ------------------------------------------------------------------------*/ +typedef struct { + uint8_t eventId; + uint8_t currentApBssid[6]; + uint8_t currentApRssi; + uint8_t candidateApBssid[6]; + uint8_t candidateApRssi; +} host_event_wlan_handoff_payload_type; + +/*------------------------------------------------------------------------- + Event ID: EVENT_WLAN_VCC + ------------------------------------------------------------------------*/ +typedef struct { + uint8_t eventId; + uint8_t rssi; + uint8_t txPer; + uint8_t rxPer; + int linkQuality; +} host_event_wlan_vcc_payload_type; + +/*------------------------------------------------------------------------- + Event ID: EVENT_WLAN_QOS + ------------------------------------------------------------------------*/ +typedef struct { + uint8_t eventId; + uint8_t reasonCode; +} host_event_wlan_qos_payload_type; + +/** + * host_event_wlan_connection_stats: to capture connection details + * + * @rssi: RSSI signal strength of connected AP, units in dbM + * @ssid_len: length of SSID + * @ssid: SSID of AP where STA is connected + * @bssid: bssid of AP where STA is connected + * @operating_channel: channel on which AP is connected + * @qos_capability: QoS is enabled or no + * @chnl_bw: channel BW of connection, units in MHz + * Range: enum mgmt_ch_width + * @dot11mode: 802.11 mode of current connection + * Range: enum mgmt_dot11_mode + * @bss_type: type of the BSS whether AP/IBSS/P2PGO + * Range: enum mgmt_bss_type bss_type + * @auth_type: type of authentication for connected AP + * Range: enum mgmt_auth_type + * @encryption_type: type of encryption applied + * Range: enum mgmt_encrypt_type + * @reserved1: reserved for future use + * @est_link_speed: link speed of connection, units in Mbps + * @result_code: result code of connection success or failure + * @reason_code: if failed then what is the reason + */ +struct host_event_wlan_connection_stats { + int8_t rssi; + uint8_t ssid_len; + char ssid[32]; + uint8_t bssid[6]; + uint8_t operating_channel; + uint8_t qos_capability; + uint8_t chnl_bw; + uint8_t dot11mode; + uint8_t bss_type; + uint8_t auth_type; + uint8_t encryption_type; + uint8_t reserved1; + uint32_t est_link_speed; + uint16_t result_code; + uint16_t reason_code; +} qdf_packed; + +/*------------------------------------------------------------------------- + Event ID: EVENT_WLAN_PE + ------------------------------------------------------------------------*/ +typedef struct { + char bssid[6]; + uint16_t event_type; + uint16_t sme_state; + uint16_t mlm_state; + uint16_t status; + uint16_t reason_code; +} host_event_wlan_pe_payload_type; + +/** + * host_event_wlan_mgmt_payload_type: To capture TX/RX mgmt frames' payload + * + * @mgmt_type: type of frames, value: enum wifi_frm_type + * @mgmt_subtype: subtype of mgmt frame, value: enum mgmt_frm_subtype + * @operating_channel: operating channel of AP + * @ssid_len: length of SSID, max 32 bytes long as per standard + * @ssid: SSID of connected AP + * @self_mac_addr: mac address of self interface + * @bssid: BSSID for which frame is received + * @result_code: result code TX/RX OTA delivery + * @reason_code: reason code given in TX/RX frame + */ +struct host_event_wlan_mgmt_payload_type { + uint8_t mgmt_type; + uint8_t mgmt_subtype; + uint8_t operating_channel; + uint8_t ssid_len; + char ssid[32]; + char self_mac_addr[6]; + char bssid[6]; + uint16_t result_code; + uint16_t reason_code; +} qdf_packed; + +/*------------------------------------------------------------------------- + Event ID: EVENT_WLAN_ADD_BLOCK_ACK_SUCCESS + ------------------------------------------------------------------------*/ +typedef struct { + char ucBaPeerMac[6]; + uint8_t ucBaTid; + uint8_t ucBaBufferSize; + uint16_t usBaSSN; + uint8_t fInitiator; +} host_event_wlan_add_block_ack_success_payload_type; + +/*------------------------------------------------------------------------- + Event ID: EVENT_WLAN_ADD_BLOCK_ACK_FAILED + ------------------------------------------------------------------------*/ +typedef struct { + char ucBaPeerMac[6]; + uint8_t ucBaTid; + uint8_t ucReasonCode; + uint8_t fInitiator; +} host_event_wlan_add_block_ack_failed_payload_type; + +/*------------------------------------------------------------------------- + Event ID: EVENT_WLAN_DELETE_BLOCK_ACK_SUCCESS + ------------------------------------------------------------------------*/ +typedef struct { + char ucBaPeerMac[6]; + uint8_t ucBaTid; + uint8_t ucDeleteReasonCode; +} host_event_wlan_add_block_ack_deleted_payload_type; + +/*------------------------------------------------------------------------- + Event ID: EVENT_WLAN_DELETE_BLOCK_ACK_FAILED + ------------------------------------------------------------------------*/ +typedef struct { + char ucBaPeerMac[6]; + uint8_t ucBaTid; + uint8_t ucDeleteReasonCode; + uint8_t ucFailReasonCode; +} host_event_wlan_add_block_ack_delete_failed_payload_type; + +/*------------------------------------------------------------------------- + Event ID: EVENT_WLAN_BSS_PROTECTION + ------------------------------------------------------------------------*/ +typedef struct { + uint8_t event_type; + uint8_t prot_type; +} host_event_wlan_bss_prot_payload_type; + +/*------------------------------------------------------------------------- + Event ID: EVENT_WLAN_BRINGUP_STATUS + ------------------------------------------------------------------------*/ +typedef struct { + uint16_t wlanStatus; + char driverVersion[10]; +} host_event_wlan_bringup_status_payload_type; + +/*------------------------------------------------------------------------- + Event ID: EVENT_WLAN_POWERSAVE_WOW + ------------------------------------------------------------------------*/ +typedef struct { + uint8_t event_subtype; + uint8_t wow_type; + uint8_t wow_magic_pattern[6]; + uint8_t wow_del_ptrn_id; + uint8_t wow_wakeup_cause; + uint8_t wow_wakeup_cause_pbm_ptrn_id; +} host_event_wlan_powersave_wow_payload_type; + +/*------------------------------------------------------------------------- + Event ID: EVENT_WLAN_POWERSAVE_WOW_STATS + ------------------------------------------------------------------------*/ +/** + * host_event_wlan_powersave_wow_stats - Structure holding wow stats information + * @wow_ucast_wake_up_count: wow unicast packet wakeup count + * @wow_bcast_wake_up_count: wow broadcast packet wakeup count + * @wow_ipv4_mcast_wake_up_count: wow ipv4 multicast packet wakeup count + * @wow_ipv6_mcast_wake_up_count: wow ipv6 multicast packet wakeup count + * @wow_ipv6_mcast_ra_stats: wow ipv6 multicast router advertisement + * packet wakeup count + * @wow_ipv6_mcast_ns_stats: wow ipv6 multicast Neighbor Solicitation + * packet wakeup count + * @wow_ipv6_mcast_na_stats: wow ipv6 multicast address space + * packet wakeup count + * @wow_pno_match_wake_up_count: wow preferred network offload match + * packet wakeup count + * @wow_pno_complete_wake_up_count: wow preferred network offload complete + * packet wakeup count + * @wow_gscan_wake_up_count: wow external scan packet wakeup count + * @wow_low_rssi_wake_up_count: wow low rssi packet wakeup count + * @wow_rssi_breach_wake_up_count: wow rssi breach packet wakeup count + * @wow_icmpv4_count: wow icmpv4 packet count + * @wow_icmpv6_count: wow icmpv6 packet count + * @wow_oem_response_wake_up_count: wow oem response packet wakeup count + * + * This structure contains the wow stats information related to diag event + */ +struct host_event_wlan_powersave_wow_stats { + uint32_t wow_ucast_wake_up_count; + uint32_t wow_bcast_wake_up_count; + uint32_t wow_ipv4_mcast_wake_up_count; + uint32_t wow_ipv6_mcast_wake_up_count; + uint32_t wow_ipv6_mcast_ra_stats; + uint32_t wow_ipv6_mcast_ns_stats; + uint32_t wow_ipv6_mcast_na_stats; + uint32_t wow_pno_match_wake_up_count; + uint32_t wow_pno_complete_wake_up_count; + uint32_t wow_gscan_wake_up_count; + uint32_t wow_low_rssi_wake_up_count; + uint32_t wow_rssi_breach_wake_up_count; + uint32_t wow_icmpv4_count; + uint32_t wow_icmpv6_count; + uint32_t wow_oem_response_wake_up_count; + uint32_t Reserved_1; + uint32_t Reserved_2; + uint32_t Reserved_3; + uint32_t Reserved_4; +}; + +/*------------------------------------------------------------------------- + Event ID: EVENT_WLAN_BTC + ------------------------------------------------------------------------*/ +typedef struct { + uint8_t eventId; + uint8_t btAddr[6]; + uint16_t connHandle; + uint8_t connStatus; + uint8_t linkType; + uint8_t scoInterval; + uint8_t scoWindow; + uint8_t retransWindow; + uint8_t mode; +} host_event_wlan_btc_type; + +/*------------------------------------------------------------------------- + Event ID: EVENT_WLAN_EAPOL + ------------------------------------------------------------------------*/ +/** + * struct host_event_wlan_eapol - Structure holding the eapol information + * @event_sub_type: 0-Transmitted, 1-Received + * @eapol_packet_type: 0 - EAP Start, 1 - EAPOL Start, 2 - EAPOL Logoff + 3 - EAPOL Key, 4 - EAPOL Encapsulated Alert + * @eapol_key_info: This field from the driver is in big endian format. + * So, the masks .0x8013. can be used to extract the + * message type. After masking, the values corresponding + * to messages 1/2/3/4 are given below: + * Msg. 1 0x8000 + * Msg. 2 0x0001 + * Msg. 3 0x8013 + * Msg. 4 0x0003 + * @eapol_rate: Rate at which the frame is received + * @dest_addr: Destination address + * @src_addr: Source address + * + * This structure contains the EAPOL information related to logging + */ +struct host_event_wlan_eapol { + uint8_t event_sub_type; + uint8_t eapol_packet_type; + uint16_t eapol_key_info; + uint16_t eapol_rate; + uint8_t dest_addr[6]; + uint8_t src_addr[6]; +}; + +/*------------------------------------------------------------------------- + Event ID: EVENT_WLAN_LOW_RESOURCE_FAILURE + ------------------------------------------------------------------------*/ +/** + * struct host_event_wlan_low_resource_failure - Structure holding the + * low resource failure information + * @event_sub_type: Gives further information about reason for + * low resource condition + * + * This structure will hold the low resource failure information + */ +struct host_event_wlan_low_resource_failure { + uint8_t event_sub_type; +}; + +/** + * enum resource_failure_type - Reason for low resource failure condition + * @WIFI_EVENT_MEMORY_FAILURE: Memory failure + * + * This enum has the reason codes why the low resource situation is observed + */ +enum resource_failure_type { + WIFI_EVENT_MEMORY_FAILURE, +}; + +/*------------------------------------------------------------------------- + Event ID: EVENT_WLAN_RSN_INFO + ------------------------------------------------------------------------- + */ +/** + * struct event_wlan_csr_rsn_info - Structure holding the + * RSN information for assoc request + * @akm_suite: Gives information about akm suites used in assoc request + * @ucast_cipher: Unicast cipher used in assoc request + * @mcast_cipher: Multi cast cipher used in assoc request + * @group_mgmt: Requested group mgmt cipher suite + * + * This structure will hold the RSN information for assoc request + */ +struct event_wlan_csr_rsn_info { + uint8_t akm_suite[RSN_OUI_SIZE]; + uint8_t ucast_cipher[RSN_OUI_SIZE]; + uint8_t mcast_cipher[RSN_OUI_SIZE]; + uint8_t group_mgmt[RSN_OUI_SIZE]; +}; + +/*------------------------------------------------------------------------- + Event ID: EVENT_WLAN_WAKE_LOCK + ------------------------------------------------------------------------*/ +/** + * struct host_event_wlan_wake_lock - Structure holding the wakelock information + * @status: Whether the wakelock is taken/released + * @reason: Reason for taking this wakelock + * @timeout: Timeout value in case of timed wakelocks + * @name_len: Length of the name of the wakelock that will follow + * @name: Name of the wakelock + * + * This structure will hold the wakelock information + */ +struct host_event_wlan_wake_lock { + uint32_t status; + uint32_t reason; + uint32_t timeout; + uint32_t name_len; + char name[WAKE_LOCK_NAME_LEN]; +}; + +/*------------------------------------------------------------------------- + Event ID: EVENT_WLAN_LOG_COMPLETE + ------------------------------------------------------------------------*/ +/** + * struct host_event_wlan_log_complete - Holds log completion details + * @is_fatal: Indicates if the event is fatal or not + * @indicator: Source of the bug report - Framework/Host/Firmware + * @reason_code: Reason for triggering bug report + * @reserved: Reserved field + * + * This structure holds the log completion related information + */ +struct host_event_wlan_log_complete { + uint32_t is_fatal; + uint32_t indicator; + uint32_t reason_code; + uint32_t reserved; +}; + +/*------------------------------------------------------------------------- + Event ID: EVENT_WLAN_STA_KICKOUT + ------------------------------------------------------------------------*/ +/** + * struct host_event_wlan_kickout - Holds diag event details + * @reasoncode: Indicates the reasoncode of event + * @peer_macaddr: Indicates the peer macaddr + * @vdev_id: Indicate unique id for identifying the VDEV + * + * This structure holds the diag event related information + */ + +struct host_event_wlan_kickout { + uint32_t reasoncode; + uint8_t peer_mac[QDF_MAC_ADDR_SIZE]; + uint8_t vdev_id; +}; + +/*------------------------------------------------------------------------- + Event ID: EVENT_WLAN_SOFTAP_DATASTALL/EVENT_WLAN_STA_DATASTALL + ------------------------------------------------------------------------*/ +/** + * struct host_event_wlan_softap_datastall - Holds diag event details + * @reason: Indicates the reason of event + * + *This structure holds the host diag event related information + */ + +struct host_event_wlan_datastall { + uint32_t reason; +}; + +/*------------------------------------------------------------------------- + Event ID: EVENT_WLAN_SSR_REINIT_SUBSYSTEM + ------------------------------------------------------------------------*/ +/** + * struct host_event_wlan_ssr_reinit - Holds diag event details + * @status: Indicates the status of event + * + *This structure holds the host diag event related information + */ + +struct host_event_wlan_ssr_reinit { + uint32_t status; +}; + +/*------------------------------------------------------------------------- + Event ID: EVENT_WLAN_SSR_SHUTDOWN_SUBSYSTEM + ------------------------------------------------------------------------*/ +/** + * struct host_event_wlan_ssr_shutdown - Holds diag event details + * @status: Indicates the status of event + * + *This structure holds the host diag event related information + */ + +struct host_event_wlan_ssr_shutdown { + uint32_t status; +}; + + +/*------------------------------------------------------------------------- + Function declarations and documenation + ------------------------------------------------------------------------*/ +/** + * enum host_sta_kickout_events - Enum containing sta kickout subtype + * @HOST_STA_KICKOUT_REASON_BMISS: Indicate sta got disconnected reason + * beacon miss + * @HOST_STA_KICKOUT_REASON_XRETRY: Indicate sta got disconnected reason xretry + * @HOST_STA_KICKOUT_REASON_UNSPECIFIED: Indicate sta disconnection + * reason unspecified + * @HOST_STA_KICKOUT_REASON_KEEP_ALIVE: Indicate sta is disconnected + * because of keep alive + * @HOST_STA_KICKOUT_REASON_BTM: BTM request from AP with disassoc imminent + * reason + * + * This enum contains the event subtype + */ +enum host_sta_kickout_events { + HOST_STA_KICKOUT_REASON_BMISS, + HOST_STA_KICKOUT_REASON_XRETRY, + HOST_STA_KICKOUT_REASON_UNSPECIFIED, + HOST_STA_KICKOUT_REASON_KEEP_ALIVE, + HOST_STA_KICKOUT_REASON_BTM, +}; + +/*------------------------------------------------------------------------- + Function declarations and documenation + ------------------------------------------------------------------------*/ +/** + * enum host_datastall_events - Enum containing datastall subtype + * @DATA_STALL_NONE: Indicate no data stall + * @FW_VDEV_PAUSE: Indicate FW vdev Pause + * @HWSCHED_CMD_FILTER:Indicate HW sched command filter + * @HWSCHED_CMD_FLUSH: Indicate HW sched command flush + * @FW_RX_REFILL_FAILED:Indicate FW rx refill failed + * @FW_RX_FCS_LEN_ERROR:Indicate FW fcs len error + * @FW_WDOG_ERRORS:Indicate watchdog error + * @FW_BB_WDOG_ERROR:Indicate BB watchdog error + * @STA_TX_TIMEOUT: Indicate sta tx timeout + * @SOFTAP_TX_TIMEOUT:Indicate softap tx timeout + * @NUD_FAILURE: Indicare NUD Failure + * + * This enum contains the event subtype + */ +enum host_datastall_events { + DATA_STALL_NONE, + FW_VDEV_PAUSE, + HWSCHED_CMD_FILTER, + HWSCHED_CMD_FLUSH, + FW_RX_REFILL_FAILED, + FW_RX_FCS_LEN_ERROR, + FW_WDOG_ERRORS, + FW_BB_WDOG_ERROR, + STA_TX_TIMEOUT, + SOFTAP_TX_TIMEOUT, + NUD_FAILURE, +}; + +/*------------------------------------------------------------------------- + Function declarations and documenation + ------------------------------------------------------------------------*/ +/** + * enum host_ssr_events - Enum containing ssr subtype + * @SSR_SUB_SYSTEM_REINIT: Indicate ssr reinit state + * @SSR_SUB_SYSTEM_SHUTDOWN: Indicate ssr shutdown state + * + * This enum contains the event subtype + */ +enum host_ssr_events { + SSR_SUB_SYSTEM_REINIT, + SSR_SUB_SYSTEM_SHUTDOWN, +}; + +/** + * struct host_event_tdls_teardown - tdls teardown diag event + * @reason: reason for tear down + * @peer_mac: peer mac + * + * This structure contains tdls teardown diag event info + */ +struct host_event_tdls_teardown { + uint32_t reason; + uint8_t peer_mac[QDF_MAC_ADDR_SIZE]; +}; + +/** + * struct host_event_tdls_enable_link - tdls enable link event + * @peer_mac: peer mac + * @is_off_chan_supported: if off channel supported + * @is_off_chan_configured: if off channel configured + * @is_off_chan_established: if off channel established + * + * This structure contain tdls enable link diag event info + */ +struct host_event_tdls_enable_link { + uint8_t peer_mac[QDF_MAC_ADDR_SIZE]; + uint8_t is_off_chan_supported; + uint8_t is_off_chan_configured; + uint8_t is_off_chan_established; +}; + +/** + * struct host_event_suspend - suspend/resume state + * @state: suspend/resume state + * + * This structure contains suspend resume diag event info + */ +struct host_event_suspend { + uint8_t state; +}; + +/** + * struct host_event_offload_req - offload state + * @offload_type: offload type + * @state: enabled or disabled state + * + * This structure contains offload diag event info + */ +struct host_event_offload_req { + uint8_t offload_type; + uint8_t state; +}; + +/** + * struct host_event_tdls_scan_rejected - scan + * rejected due to tdls + * @status: rejected status + * + * This structure contains scan rejected due to + * tdls event info + */ +struct host_event_tdls_scan_rejected { + uint8_t status; +}; + +/** + * struct host_event_tdls_tx_rx_mgmt - for TX RX management frame + * @event_id: event ID + * @tx_rx: tx or rx + * @type: type of frame + * @action_sub_type: action frame type + * @peer_mac: peer mac + * + * This structure contains tdls TX RX management frame info + */ +struct host_event_tdls_tx_rx_mgmt { + uint8_t event_id; + uint8_t tx_rx; + uint8_t type; + uint8_t action_sub_type; + uint8_t peer_mac[QDF_MAC_ADDR_SIZE]; +}; + +/*------------------------------------------------------------------------- + Function declarations and documenation + ------------------------------------------------------------------------*/ +/** + * enum wifi_connectivity_events - Enum containing EAPOL sub type + * @WIFI_EVENT_DRIVER_EAPOL_FRAME_TRANSMIT_REQUESTED: EAPOL transmitted + * @WIFI_EVENT_DRIVER_EAPOL_FRAME_RECEIVED: EAPOL received + * + * This enum contains the EAPOL subtype + */ +enum wifi_connectivity_events { + WIFI_EVENT_DRIVER_EAPOL_FRAME_TRANSMIT_REQUESTED, + WIFI_EVENT_DRIVER_EAPOL_FRAME_RECEIVED, +}; + +/** + * enum wake_lock_reason - Reason for taking/releasing wakelock + * @WIFI_POWER_EVENT_WAKELOCK_DRIVER_INIT: Driver initialization + * @WIFI_POWER_EVENT_WAKELOCK_DRIVER_REINIT: Driver re-initialization + * @WIFI_POWER_EVENT_WAKELOCK_DRIVER_EXIT: Driver shutdown + * @WIFI_POWER_EVENT_WAKELOCK_SCAN: Scan request/response handling + * @WIFI_POWER_EVENT_WAKELOCK_EXT_SCAN: Extended scan request/response handling + * @WIFI_POWER_EVENT_WAKELOCK_RESUME_WLAN: Driver resume + * @WIFI_POWER_EVENT_WAKELOCK_ROC: Remain on channel request/response handling + * @WIFI_POWER_EVENT_WAKELOCK_AUTO_SUSPEND: Auto suspend related handling + * @WIFI_POWER_EVENT_WAKELOCK_IPA: IPA related handling + * @WIFI_POWER_EVENT_WAKELOCK_ADD_STA: Addition of STA + * @WIFI_POWER_EVENT_WAKELOCK_HOLD_RX: Wakelocks taken for receive + * @WIFI_POWER_EVENT_WAKELOCK_SAP: SoftAP related wakelocks + * @WIFI_POWER_EVENT_WAKELOCK_WOW: WoW feature related + * @WIFI_POWER_EVENT_WAKELOCK_PNO: PNO feature related + * @WIFI_POWER_EVENT_WAKELOCK_DEL_STA: Deletion of a station + * @WIFI_POWER_EVENT_WAKELOCK_DFS: DFS related wakelocks + * @WIFI_POWER_EVENT_WAKELOCK_WMI_CMD_RSP: Firmware response + * @WIFI_POWER_EVENT_WAKELOCK_MISC: Miscellaneous wakelocks + * @WIFI_POWER_EVENT_WAKELOCK_DHCP: DHCP negotiation under way + * @WIFI_POWER_EVENT_WAKELOCK_CONNECT: connection in progress + * @WIFI_POWER_EVENT_WAKELOCK_IFACE_CHANGE_TIMER: iface change timer running + * @WIFI_POWER_EVENT_WAKELOCK_MONITOR_MODE: Montitor mode wakelock + * + * Indicates the reason for which the wakelock was taken/released + */ +enum wake_lock_reason { + WIFI_POWER_EVENT_WAKELOCK_DRIVER_INIT, + WIFI_POWER_EVENT_WAKELOCK_DRIVER_REINIT, + WIFI_POWER_EVENT_WAKELOCK_DRIVER_EXIT, + WIFI_POWER_EVENT_WAKELOCK_SCAN, + WIFI_POWER_EVENT_WAKELOCK_EXT_SCAN, + WIFI_POWER_EVENT_WAKELOCK_RESUME_WLAN, + WIFI_POWER_EVENT_WAKELOCK_ROC, + WIFI_POWER_EVENT_WAKELOCK_AUTO_SUSPEND, + WIFI_POWER_EVENT_WAKELOCK_IPA, + WIFI_POWER_EVENT_WAKELOCK_ADD_STA, + WIFI_POWER_EVENT_WAKELOCK_HOLD_RX, + WIFI_POWER_EVENT_WAKELOCK_SAP, + WIFI_POWER_EVENT_WAKELOCK_WOW, + WIFI_POWER_EVENT_WAKELOCK_PNO, + WIFI_POWER_EVENT_WAKELOCK_DEL_STA, + WIFI_POWER_EVENT_WAKELOCK_DFS, + WIFI_POWER_EVENT_WAKELOCK_WMI_CMD_RSP, + WIFI_POWER_EVENT_WAKELOCK_MISC, + WIFI_POWER_EVENT_WAKELOCK_DHCP, + WIFI_POWER_EVENT_WAKELOCK_CONNECT, + WIFI_POWER_EVENT_WAKELOCK_IFACE_CHANGE_TIMER, + WIFI_POWER_EVENT_WAKELOCK_MONITOR_MODE, +}; + +/* The length of interface name should >= IFNAMSIZ */ +#define HOST_EVENT_INTF_STR_LEN 16 +#define HOST_EVENT_HW_MODE_STR_LEN 12 + +/** + * struct host_event_wlan_acs_req - payload for ACS diag event + * @intf: network interface name for WLAN + * @hw_mode: hw mode configured by hostapd + * @bw: channel bandwidth(MHz) + * @ht: a flag indicating whether HT phy mode is enabled + * @vht: a flag indicating whether VHT phy mode is enabled + * @chan_start: starting channel number for ACS scan + * @chan_end: ending channel number for ACS scan + * + * This structure includes all the payload related to ACS request parameters + */ +struct host_event_wlan_acs_req { + uint8_t intf[HOST_EVENT_INTF_STR_LEN]; + uint8_t hw_mode[HOST_EVENT_HW_MODE_STR_LEN]; + uint16_t bw; + uint8_t ht; + uint8_t vht; + uint16_t chan_start; + uint16_t chan_end; +}; + +/** + * struct host_event_wlan_acs_scan_start - payload for ACS scan request + * @scan_id: scan request ID + * @vdev_id: vdev/session ID + * + * This structure includes all the payload related to ACS scan request + * parameters + */ +struct host_event_wlan_acs_scan_start { + uint32_t scan_id; + uint8_t vdev_id; +}; + +#define HOST_EVENT_STATUS_STR_LEN 24 + +/** + * struct host_event_wlan_acs_scan_done - payload for ACS scan done event + * @status: indicating whether ACS scan is successful + * @vdev_id: vdev/session ID + * @scan_id: scan request ID + * + * This structure includes all the payload related to ACS scan done event + */ +struct host_event_wlan_acs_scan_done { + uint8_t status[HOST_EVENT_STATUS_STR_LEN]; + uint32_t scan_id; + uint8_t vdev_id; +}; + +/** + * struct host_event_wlan_acs_chan_spectral_weight - payload for spectral + * weight event indication + * @chan: channel number + * @weight: channel weight + * @rssi: RSSI value obtained after scanning + * @bss_count: number of BSS detected on this channel + * + * This structure includes all the payload related to a channel's weight + * evaluation result + */ +struct host_event_wlan_acs_chan_spectral_weight { + uint16_t chan; + uint16_t weight; + int32_t rssi; + uint16_t bss_count; +}; + +/** + * struct host_event_wlan_acs_best_chan - payload for ACS best channel event + * @chan: channel number + * @weight: channel weight + * + * This structure includes all the payload related to the best channel + * selected after ACS procedure + */ +struct host_event_wlan_acs_best_chan { + uint16_t chan; + uint16_t weight; +}; + +#ifdef __cplusplus +} +#endif /* __cplusplus */ +#endif /* __HOST_DIAG_CORE_EVENT_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/utils/host_diag_log/inc/host_diag_core_log.h b/drivers/staging/qca-wifi-host-cmn/utils/host_diag_log/inc/host_diag_core_log.h new file mode 100644 index 0000000000000000000000000000000000000000..4b72f237e92cede0af7cb0df62ce875dfa7eba56 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/utils/host_diag_log/inc/host_diag_core_log.h @@ -0,0 +1,294 @@ +/* + * Copyright (c) 2014-2017, 2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#if !defined(__HOST_DIAG_CORE_LOG_H) +#define __HOST_DIAG_CORE_LOG_H + +/**========================================================================= + + \file host_diag_core_log.h + + \brief WLAN UTIL host DIAG logs + + Definitions for WLAN UTIL host diag events + + ========================================================================*/ + +/* $Header$ */ + +/*-------------------------------------------------------------------------- + Include Files + ------------------------------------------------------------------------*/ +#include "qdf_types.h" +#include "i_host_diag_core_log.h" + +/*-------------------------------------------------------------------------- + Preprocessor definitions and constants + ------------------------------------------------------------------------*/ +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +/*-------------------------------------------------------------------------- + Preprocessor definitions and constants + ------------------------------------------------------------------------*/ +#define HOST_LOG_MAX_NUM_SSID (21) +#define HOST_LOG_MAX_NUM_BSSID (21) +#define HOST_LOG_MAX_SSID_SIZE (32) +#define HOST_LOG_MAX_BSSID_SIZE (6) +#define HOST_LOG_MAX_NUM_CHANNEL (64) +#define HOST_LOG_MAX_NUM_HO_CANDIDATE_APS (20) +#define HOST_LOG_MAX_WOW_PTRN_SIZE (128) +#define HOST_LOG_MAX_WOW_PTRN_MASK_SIZE (16) +#define VOS_LOG_PKT_LOG_SIZE (2048) +#define HOST_LOG_PKT_LOG_THRESHOLD 40960 +#define HOST_LOG_MAX_COLD_BOOT_CAL_DATA_SIZE (2048) + +/* Version to be updated whenever format of vos_log_pktlog_info changes */ +#define VERSION_LOG_WLAN_PKT_LOG_INFO_C 1 +/* Version to be updated whenever format of host_log_cold_boot_cal_data_type + * changes + */ +#define VERSION_LOG_WLAN_COLD_BOOT_CAL_DATA_C 1 + +/*--------------------------------------------------------------------------- + This packet contains the scan results of the recent scan operation + LOG_WLAN_SCAN_C 0x1496 + ---------------------------------------------------------------------------*/ +typedef struct { + log_hdr_type hdr; + uint8_t eventId; + uint8_t numSsid; + uint8_t ssid[HOST_LOG_MAX_NUM_SSID][HOST_LOG_MAX_SSID_SIZE]; + uint8_t bssid[HOST_LOG_MAX_NUM_BSSID][HOST_LOG_MAX_BSSID_SIZE]; + uint8_t totalSsid; + uint8_t minChnTime; + uint8_t maxChnTime; + uint16_t timeBetweenBgScan; + uint8_t BSSMode; + uint8_t numChannel; + uint8_t channels[HOST_LOG_MAX_NUM_CHANNEL]; + uint16_t status; +} host_log_scan_pkt_type; + +/*--------------------------------------------------------------------------- + This packet contains the information related to IBSS connection setup + LOG_WLAN_IBSS_C 0x1497 + ---------------------------------------------------------------------------*/ +typedef struct { + log_hdr_type hdr; + uint8_t eventId; + uint8_t channelSetting; + struct qdf_mac_addr bssid; + struct qdf_mac_addr peer_macaddr; + uint8_t ssid[HOST_LOG_MAX_SSID_SIZE]; + uint8_t operatingChannel; + uint8_t beaconInterval; + uint8_t status; +} host_log_ibss_pkt_type; + +/*--------------------------------------------------------------------------- + This packet contains the information related to 802.11D + LOG_WLAN_80211D_C 0x1498 + ---------------------------------------------------------------------------*/ +typedef struct { + log_hdr_type hdr; + uint8_t eventId; + uint8_t numChannel; + uint8_t Channels[HOST_LOG_MAX_NUM_CHANNEL]; + uint8_t TxPwr[HOST_LOG_MAX_NUM_CHANNEL]; + uint8_t countryCode[3]; + uint8_t supportMultipleDomain; +} host_log_802_11d_pkt_type; + +/*--------------------------------------------------------------------------- + This is a log packet which contains below handoff information: + - Current AP + RSSI (if already associated) + - Candidate AP + RSSI (before association and when the list is updated) + - For each BSSID in candidate list, provide RSSI, QoS and security compatibility + LOG_WLAN_HANDOFF_C 0x1499 + ---------------------------------------------------------------------------*/ +typedef struct { + uint8_t ssid[9]; + uint8_t bssid[HOST_LOG_MAX_BSSID_SIZE]; + uint8_t channel_id; + uint32_t qos_score; + uint32_t sec_score; + uint32_t rssi_score; + uint32_t overall_score; + uint32_t tx_per; /* represented as a % */ + uint32_t rx_per; /* represented as a % */ + +} host_log_ho_ap_info; + +typedef struct { + log_hdr_type hdr; + uint32_t num_aps; + host_log_ho_ap_info current_ap_info; + host_log_ho_ap_info + candidate_ap_info[HOST_LOG_MAX_NUM_HO_CANDIDATE_APS]; +} host_log_ho_pkt_type; + +/*--------------------------------------------------------------------------- + This packet contains the information related to the EDCA parameters + advertised by the AP + LOG_WLAN_QOS_EDCA_C 0x149A + ---------------------------------------------------------------------------*/ +typedef struct { + log_hdr_type hdr; + uint8_t aci_be; + uint8_t cw_be; + uint16_t txoplimit_be; + uint8_t aci_bk; + uint8_t cw_bk; + uint16_t txoplimit_bk; + uint8_t aci_vi; + uint8_t cw_vi; + uint16_t txoplimit_vi; + uint8_t aci_vo; + uint8_t cw_vo; + uint16_t txoplimit_vo; +} host_log_qos_edca_pkt_type; + +/*--------------------------------------------------------------------------- + This packet contains the total number of beacon received value + LOG_WLAN_BEACON_UPDATE_C 0x149B + ---------------------------------------------------------------------------*/ +typedef struct { + log_hdr_type hdr; + uint32_t bcn_rx_cnt; +} host_log_beacon_update_pkt_type; + +/*--------------------------------------------------------------------------- + This packet contains the information related to a WoW patern value when set + LOG_WLAN_POWERSAVE_WOW_ADD_PTRN_C 0x149C + ---------------------------------------------------------------------------*/ +typedef struct { + log_hdr_type hdr; + uint8_t pattern_id; + uint8_t pattern_byte_offset; + uint8_t pattern_size; + uint8_t pattern[HOST_LOG_MAX_WOW_PTRN_SIZE]; + uint8_t pattern_mask_size; + uint8_t pattern_mask[HOST_LOG_MAX_WOW_PTRN_MASK_SIZE]; +} host_log_powersave_wow_add_ptrn_pkt_type; + +/*--------------------------------------------------------------------------- + This packet contains the Tspec info negotiated with the AP for the + specific AC + LOG_WLAN_QOS_TSPEC_C 0x14A2 + ---------------------------------------------------------------------------*/ +typedef struct { + log_hdr_type hdr; + uint8_t tsinfo[3]; + uint16_t nominal_msdu_size; + uint16_t maximum_msdu_size; + uint32_t min_service_interval; + uint32_t max_service_interval; + uint32_t inactivity_interval; + uint32_t suspension_interval; + uint32_t svc_start_time; + uint32_t min_data_rate; + uint32_t mean_data_rate; + uint32_t peak_data_rate; + uint32_t max_burst_size; + uint32_t delay_bound; + uint32_t min_phy_rate; + uint16_t surplus_bw_allowance; + uint16_t medium_time; +} host_log_qos_tspec_pkt_type; + +/*--------------------------------------------------------------------------- + This packet contains data information when stall detected + LOG_TRSP_DATA_STALL_C 0x1801 + ---------------------------------------------------------------------------*/ + +typedef struct { + char channelName[4]; + uint32_t numDesc; + uint32_t numFreeDesc; + uint32_t numRsvdDesc; + uint32_t headDescOrder; + uint32_t tailDescOrder; + uint32_t ctrlRegVal; + uint32_t statRegVal; + uint32_t numValDesc; + uint32_t numInvalDesc; +} host_log_data_stall_channel_type; + +typedef struct { + log_hdr_type hdr; + uint32_t PowerState; + uint32_t numFreeBd; + host_log_data_stall_channel_type dxeChannelInfo[4]; +} host_log_data_stall_type; + +/*--------------------------------------------------------------------------- + This packet contains the rssi value from BSS descriptor + LOG_WLAN_RSSI_UPDATE_C 0x1354 + ---------------------------------------------------------------------------*/ +typedef struct { + log_hdr_type hdr; + int8_t rssi; +} host_log_rssi_pkt_type; + +/** + * struct host_log_pktlog_info - Packet log info + * @log_hdr: Log header + * @buf_len: Length of the buffer that follows + * @buf: Buffer containing the packet log info + * + * Structure containing the packet log information + * LOG_WLAN_PKT_LOG_INFO_C 0x18E0 + */ +struct host_log_pktlog_info { + log_hdr_type log_hdr; + uint32_t version; + uint32_t seq_no; + uint32_t buf_len; + uint8_t buf[]; +}; + +/** + * struct host_log_cold_boot_cal_data_type - Cold boot cal log info + * @hdr: Log header + * @version: version + * @flags: Flag to indicate if more data follows + * @cb_cal_data_len: Length of the cal data + * @cb_cal_data: Cold boot cal data + * + * Structure containing the cold boot calibration data + * log information + * LOG_WLAN_COLD_BOOT_CAL_DATA_C 0x1A18 + */ +struct host_log_cold_boot_cal_data_type { + log_hdr_type hdr; + uint32_t version; + uint32_t flags; + uint32_t cb_cal_data_len; + uint8_t cb_cal_data[HOST_LOG_MAX_COLD_BOOT_CAL_DATA_SIZE]; +}; + +/*------------------------------------------------------------------------- + Function declarations and documenation + ------------------------------------------------------------------------*/ + +#ifdef __cplusplus +} +#endif /* __cplusplus */ +#endif /* __HOST_DIAG_CORE_LOG_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/utils/host_diag_log/inc/host_diag_event_defs.h b/drivers/staging/qca-wifi-host-cmn/utils/host_diag_log/inc/host_diag_event_defs.h new file mode 100644 index 0000000000000000000000000000000000000000..ff55ff16c7a218a4b7923f8b7ddfe951636eabf2 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/utils/host_diag_log/inc/host_diag_event_defs.h @@ -0,0 +1,920 @@ +/* + * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef EVENT_DEFS_H +#define EVENT_DEFS_H + +typedef enum { + EVENT_DROP_ID = 0, + + /* Events between 0x1 to 0x674 are not used */ + + /* + * + * EVENT_WLAN_SECURITY + * @ eventId: Event id + * @ authMode: Shows the auth mode + * @ encryptionModeUnicast: Encryption Mode Unicast + * @ encryptionModeMulticast: Encryption Mode Multicast + * @ pmkIDMatch: PMK ID Match + * @ bssid: BSSID < 0 to 5 > + * @ keyId: Key ID + * @ status: Shows the status 0 is Success and 1 is failure + * + * This event is used in SECURITY to send various wlan security modes + * Values for parameters are defined below: + * Event ID: offset: 0 length: 1 + * 5 - Remove Key Req + * 6 - Remove Key Rsp + * 7 - PMKID Candidate Found + * 8 - PMKID Update + * 9 - Mic Error + * 10 - Set UniCast Key Req + * 11 - Set UniCast Key Rsp + * 12 - Set BCast Key Req + * 13 - Set BCast Key Rsp + * + * Auth Mode: offset: 1 length: 1 + * 0 - Open + * 1 - Shared + * 2 - WPA EAP + * 3 - WPA PSK + * 4 - WPA2 EAP + * 5 - WPA2 PSK + * + * Encryption Mode Unicast: offset: 2 length: 1 + * 0 - Open + * 1 - WEP40 + * 2 - WEP104 + * 3 - TKIP + * 4 - AES + * + * Encryption Mode Multicast: offset: 3 length: 1 + * 0 - Open + * 1 - WEP40 + * 2 - WEP104 + * 3 - TKIP + * 4 - AES + * + * ENC_MODE_SMS4: offset: 4 length:1 + * + * PMK ID Match: offset: 5 length: 1 + * 0 - No Match + * 1 - Match + * + * BSSID[0]: offset: 6 length: 1 + * BSSID[1]: offset: 7 length: 1 + * BSSID[2]: offset: 8 length: 1 + * BSSID[3]: offset: 9 length: 1 + * BSSID[4]: offset: 10 length: 1 + * BSSID[5]: offset: 11 length: 1 + * + * Key ID: offset: 12 length: 1 + * Status: offset: 13 length: 1 + * 0 - Success + * 1 - Failure + * + * Supported Feature: wlan security + * + * + */ + + EVENT_WLAN_SECURITY = 0x675, /* 13 byte payload */ + + /* + * + * EVENT_WLAN_STATUS + * @ eventId: Event id + * @ ssid: SSID + * @ bssType: BSS Type + * @ rssi: RSSI + * @ channel: Channel + * @ qosCapability: QoS Capability + * @ authmode: Auth Mode + * @ encryptionType: Encryption Type + * @ reason: Reason + * @ reasonDisconnect: Reason Disconnect + * + * This event is used to send wlan status + * Values for parameters are defined below: + * Event ID: offset: 0 length: 1 + * 0 - Connect + * 1 - Disconnect + * + * SSID: offset: 1 length: 32 + * BSS Type: offset: 33 + SSID length, length: 1 + * 0 - None + * 1 - BSS_TYPE_INFRASTRUCTURE + * 2 - BSS_TYPE_INFRA_AP + * 3 - BSS_TYPE_IBSS + * 4 - BSS_TYPE_START_IBSS + * 5 - BSS_TYPE_NDI + * 6 - BSS_TYPE_ANY + * + * RSSI: offset: 34 length: 1 + * Channel: offset: 35 length: 1 + * QoS Capability: offset: 36 length: 1 + * Auth Mode: offset: 37 length: 1 + * 0 - Open + * 1 - Shared + * 2 - WPA EAP + * 3 - WPA PSK + * 4 - WPA2 EAP + * 5 - WPA2 PSK + * 6 - WAPI CERT + * 7 - WAPI PSK + * + * Encryption Type: offset: 38 length: 1 + * 0 - Open + * 1 - WEP40 + * 2 - WEP104 + * 3 - TKIP + * 4 - AES + * + * ENC_MODE_SMS4: offset: 39 length: 1 + * + * Reason: offset: 40 length: 1 + * 0 - Unspecified + * 1 - User Requested + * 2 - Mic Error + * 3 - Diassoc + * 4 - Deauth + * 5 - Handoff + * + * Reason Disconnect: offset: 41 length: 1 + * + * Supported Feature: wlan status + * + * + */ + + EVENT_WLAN_STATUS, /* 15 byte payload */ + + /* Events 0x677 and 0x678 are not used */ + + /* + * + * EVENT_WLAN_QOS + * @ eventId: event id + * @ reasonCode: Reason for event + * + * This event is used to send quality set services + * Values for parameters are defined below: + * Event ID: offset: 0 length: 1 + * 0 - Add TS Req + * 1 - Add TS Rsp + * 2 - Delts + * + * Reason Code: offset: 1 length: 1 + * 0 - Admission Accepted + * 1 - Invalid Params + * 2 - Reserved + * 3 - Refused + * 4 - User Requested + * 5 - Ind From AP + * + * Supported Feature: Qos wlan + * + * + */ + + EVENT_WLAN_QOS = 0x679, /* 2 byte payload */ + + /* + * + * EVENT_WLAN_PE + * @bssid: BSSID + * @ event_type: Event type + * @ sme_state: SME state + * @ mlm_state: MLM state + * @ status: 0 - Success, 1 - Failure < majority 0 is success > + * @reason_code: reason for event report + * + * This event is used in PE to send different diag events. + * Values for parameters are defined below: + * + * bssid[0]: offset: 0 length: 1 + * bssid[1]: offset: 1 length: 1 + * bssid[2]: offset: 2 length: 1 + * bssid[3]: offset: 3 length: 1 + * bssid[4]: offset: 4 length: 1 + * bssid[5]: offset: 5 length: 1 + * + * Event Type: offset: 6 length: 2 + * 0 - SCAN REQ EVENT + * 1 - SCAN ABORT IND EVENT + * 2 - SCAN_RSP_EVENT + * 3 - JOIN_REQ_EVENT + * 4 - JOIN_RSP_EVENT + * 5 - SETCONTEXT_REQ_EVENT + * 6 - SETCONTEXT_RSP_EVENT + * 7 - REASSOC_REQ_EVENT + * 8 - REASSOC_RSP_EVENT + * 9 - AUTH_REQ_EVENT + * 10 - AUTH_RSP_EVENT + * 11 - DISASSOC_REQ_EVENT + * 12 - DISASSOC_RSP_EVENT + * 13 - DISASSOC_IND_EVENT + * 14 - DISASSOC_CNF_EVENT + * 15 - DEAUTH_REQ_EVENT + * 16 - DEAUTH_RSP_EVENT + * 17 - DEAUTH_IND_EVENT + * 18 - START_BSS_REQ_EVENT + * 19 - START_BSS_RSP_EVENT + * 20 - AUTH_IND_EVENT + * 21 - ASSOC_IND_EVENT + * 22 - ASSOC_CNF_EVENT + * 23 - REASSOC_IND_EVENT + * 24 - SWITCH_CHL_IND_EVENT + * 25 - SWITCH_CHL_RSP_EVENT + * 26 - STOP_BSS_REQ_EVENT + * 27 - STOP_BSS_RSP_EVENT + * 28 - DEAUTH_CNF_EVENT + * 29 - ADDTS_REQ_EVENT + * 30 - ADDTS_RSP_EVENT + * 31 - DELTS_REQ_EVENT + * 32 - DELTS_RSP_EVENT + * 33 - DELTS_IND_EVENT + * 34 - ENTER_BMPS_REQ_EVENT + * 35 - ENTER_BMPS_RSP_EVENT + * 36 - EXIT_BMPS_REQ_EVENT + * 37 - BMPS_RSP_EVENT + * 38 - EXIT_BMPS_IND_EVENT + * 39 - ENTER_IMPS_REQ_EVENT + * 40 - ENTER_IMPS_RSP_EVENT + * 41 - EXIT_IMPS_REQ_EVENT + * 42 - EXIT_IMPS_RSP_EVENT + * 43 - ENTER_UAPSD_REQ_EVENT + * 44 - ENTER_UAPSD_RSP_EVENT + * 45 - EXIT_UAPSD_REQ_EVENT + * 46 - EXIT_UAPSD_RSP_EVENT + * 47 - WOWL_ADD_BCAST_PTRN_EVENT + * 48 - WOWL_DEL_BCAST_PTRN_EVENT + * 49 - ENTER_WOWL_REQ_EVENT + * 50 - ENTER_WOWL_RSP_EVENT + * 51 - EXIT_WOWL_REQ_EVENT + * 52 - EXIT_WOWL_RSP_EVENT + * 53 - HAL_ADDBA_REQ_EVENT + * 54 - HAL_ADDBA_RSP_EVENT + * 55 - HAL_DELBA_IND_EVENT + * 56 - HB_FAILURE_TIMEOUT + * 57 - PRE_AUTH_REQ_EVENT + * 58 - PRE_AUTH_RSP_EVENT + * 59 - PREAUTH_DONE + * 60 - REASSOCIATING + * 61 - CONNECTED + * 62 - ASSOC_REQ_EVENT + * 63 - AUTH_COMP_EVENT + * 64 - ASSOC_COMP_EVENT + * 65 - AUTH_START_EVENT + * 66 - ASSOC_START_EVENT + * 67 - REASSOC_START_EVENT + * 68 - ROAM_AUTH_START_EVENT + * 69 - ROAM_AUTH_COMP_EVENT + * 70 - ROAM_ASSOC_START_EVENT + * 71 - ROAM_ASSOC_COMP_EVENT + * 72 - SCAN_COMPLETE_EVENT + * 73 - SCAN_RESULT_FOUND_EVENT + * 74 - ASSOC_TIMEOUT + * 75 - AUTH_TIMEOUT + * 76 - DEAUTH_FRAME_EVENT + * 77 - DISASSOC_FRAME_EVENT + * + * SME State: offset: 8 length: 2 + * 0 - OFFLINE + * 1 - IDLE + * 2 - SUSPEND + * 3 - WT SCAN + * 4 - WT JOIN + * 5 - WT AUTH + * 6 - WT ASSOC + * 7 - WT REASSOC + * 8 - WT REASSOC LINK FAIL + * 9 - JOIN FAILURE + * 10 - ASSOCIATED + * 11 - REASSOCIATED + * 12 - LINK EST + * 13 - LINK EST WT SCAN + * 14 - WT PRE AUTH + * 15 - WT DISASSOC + * 16 - WT DEAUTH + * 17 - WT START BSS + * 18 - WT STOP BSS + * 19 - NORMAL + * 20 - CHANNEL SCAN + * 21 - NORMAL CHANNEL SCAN + * + * MLM State: offset: 10 legth: 2 + * 0 - MLM OFFLINE + * 1 - MLM IDLE + * 2 - MLM WT PROBE RESP + * 3 - MLM PASSIVE SCAN + * 4 - MLM WT JOIN BEACON + * 5 - MLM JOINED + * 6 - MLM BSS STARTED + * 7 - MLM WT AUTH FRAME + * 8 - MLM WT AUTH FRAME + * 9 - MLM WT AUTH FRAME + * 10 - MLM AUTH RSP TIMEOUT + * 11 - MLM AUTHENTICATED + * 12 - MLM WT ASSOC RSP + * 13 - MLM WT REASSOC RSP + * 14 - MLM ASSOCIATED + * 15 - MLM REASSOCIATED + * 16 - MLM LINK ESTABLISHED + * 17 - MLM WT ASSOC CNF + * 18 - MLM LEARN + * 19 - MLM WT ADD BSS RSP + * 20 - MLM WT DEL BSS RSP + * 21 - MLM WT ADD BSS RSP ASSOC + * 22 - MLM WT ADD BSS RSP REASSOC + * 23 - MLM WT ADD BSS RSP PREASSOC + * 24 - MLM WT ADD STA RSP + * 25 - MLM WT DEL STA RSP + * 26 - MLM WT ASSOC DEL STA RSP + * 27 - MLM WT SET BSS KEY + * 28 - MLM WT SET STA KEY + * 29 - MLM WT SET STA BCASTKEY + * 30 - MLM WT ADDBA RSP + * 31 - MLM WT REMOVE BSS KEY + * 32 - MLM WT REMOVE STA KEY + * 33 - MLM WT SET MIMOPS + * + * Status: offset: 12 length: 2 + * Reason Code: offset: 14 length: 2 + * + * Supported Feature: STA + * + * + */ + + EVENT_WLAN_PE, /* 16 byte payload */ + + /* Events between 0x67b to 0x67f are not used */ + + /* + * + * EVENT_WLAN_BRINGUP_STATUS + * @ wlanStatus: Describe wlan status + * @ driverVersion: Driver version between 0 to 9 + * + * This event is used in BRINGUP to send wlan status + * Values for parameters are defined below: + * WLAN Status: offset: 0 length: 2 + * 0 - WLAN Disabled + * 1 - WLAN Enabled + * 2 - Reset Fail + * 3 - Reset Success + * 4 - Device Removed + * 5 - Devide Inserted + * 6 - Driver Unloaded + * 7 - Driver Loaded + * + * driverVersion: offset: 2 length: 10 + * + * Supported Feature: Bringup + * + * + */ + + EVENT_WLAN_BRINGUP_STATUS = 0x680, /* 12 byte payload */ + + /* + * + * EVENT_WLAN_POWERSAVE_GENERIC + * @ event_subtype: Event subtype + * @ full_power_request_reason: Full power request reason + * @ pmc_current_state: Pmc current state + * @ enable_disable_powersave_mode: Enable disable powersave mode + * @ winmob_d_power_state: winmob d power state + * @ dtim_period: DTIM period + * @ final_listen_intv: Final listen int + * @ bmps_auto_timer_duration: BMPS auto timer duration + * @ bmps_period: BMPS period + * + * This event is used in POWERSAVE to send wlan status + * Values for parameters are defined below: + * Event Sub Type: offset: 0 length: 1 + * Full Power Req Reason: offset: 1 length: 1 + * PMC Current State: offset: 2 length: 1 + * Enable disable powersave mode: 3 length: 1 + * Winmob D Power State: offset: 4 length: 1 + * DTIM Period: offset:5 length: 1 + * Final Listen INTV: offset:6 length: 2 + * BMPS Auto Timer Duration: 8 length: 2 + * BMPS Period: offset: 10 length:2 + * + * Supported Feature: POWERSAVE GENERIC + * + * + */ + + EVENT_WLAN_POWERSAVE_GENERIC, /* 16 byte payload */ + + /* + * + * EVENT_WLAN_POWERSAVE_WOW + * @ event_subtype: Event subtype + * @ wow_type: Wow type + * @ wow_magic_pattern: It will use pattern from 0 to 5 + * @ wow_del_ptrn_id: Wow delete pattern id + * @ wow_wakeup_cause: Wow wakeup cause + * @ wow_wakeup_cause_pbm_ptrn_id: Wow wakeup cause pbm pattern id + * + * This event is used in POWERSAVE WOW to send the wow wakeup pattern, + * cause etc + * Values for parameters are defined below: + * Event Sub Type: offset: 0 length: 1 + * 0 - Enter + * 1 - Exit + * 2 - Del Pattern + * 3 - Wakup + * + * WOW Type: offset: 1 length: 1 + * 0 - None + * 1 - Magic Pkt Only + * 2 - Byte Match Only + * 3 - Magic Pkt Byte Match + * + * WOW Magic Pattern: offset:2 length: 6 + * WOW Del Pattern ID: offset:8 length: 1 + * WOW Wakeup Cause: offset: 9 length: 1 + * 0 - Magic Pkt Match + * 1 - Ptrn Byte Match + * WOW Wakeup Cause PBM Ptrn ID: offset: 10 length: 1 + * + * Supported Feature: Powersave wow + * + * + */ + + EVENT_WLAN_POWERSAVE_WOW, /* 11 byte payload */ + + /* Events between 0x683 to 0x690 are not used */ + + /* + * + * EVENT_WLAN_BTC + * @ eventId: Event id + * @ btAddr: BT address + * @ connHandle: Connection handle + * @ connStatus: Connection status + * @ linkType: Link Type + * @ scoInterval: Synchronous Connection Oriented interval + * @ scoWindow: Synchronous Connection Oriented window + * @ retransWindow: Retransmisson window + * @ mode: Mode + * + * This event is used in Bluetooth to send the btc status + * Values for parameters are defined below: + * Event ID: offset: 0 length: 1 + * 0 - DEVICE SWITCHED ON + * 1 - DEVICE SWITCHED OFF + * 2 - INQUIRY STARTED + * 3 - INQUIRY STOPPED + * 4 - PAGE STARTED + * 5 - PAGE STOPPED + * 6 - CREATE ACL CONNECTION + * 7 - ACL CONNECTION COMPLETE + * 8 - CREATE SYNC CONNECTION + * 9 - SYNC CONNECTION COMPLETE + * 10 - SYNC CONNECTION UPDATED + * 11 - DISCONNECTION COMPLETE + * 12 - MODE CHANGED + * 13 - A2DP STREAM START + * 14 - A2DP STREAM STOP + * + * BT Addr[0]: offset: 1 length: 1 + * BT Addr[1]: offset: 2 length: 1 + * BT Addr[2]: offset: 3 length: 1 + * BT Addr[3]: offset: 4 length: 1 + * BT Addr[4]: offset: 5 length: 1 + * BT Addr[5]: offset: 6 length: 1 + * + * Conn Handle: offset: 7 length: 2 + * 65535 - Invalid + * + * Conn Status: offset:9 length: 1 + * 0 - Fail + * 1 - success + * + * Link Type: offset: 10 length: 1 + * 0 - SCO + * 1 - ACL + * 2 - ESCO + * + * Sco Interval: offset: 11 length: 1 + * Sco Window: offset: 12 length: 1 + * Retrans Window: offset: 13 length: 1 + * + * Mode: offset: 14 length: 1 + * 0 - Active + * 1 - Hold + * 2 - Sniff + * 3 - Park + * + * Supported Feature: Bluetooth + * + * + */ + + EVENT_WLAN_BTC = 0x691, /* 15 byte payload */ + + /* + * + * EVENT_WLAN_EAPOL + * @ event_sub_type: 0-Transmitted, 1-Received + * @ eapol_packet_type: 0 - EAP Start, 1 - EAPOL Start, 2 - EAPOL + * Logoff, 3 - EAPOL Key, 4 - EAPOL Encapsulated Alert + * @ eapol_key_info: This field from the driver is in big endian format + * @ eapol_rate: Rate at which the frame is received + * @ dest_addr: Destination address + * * @ src_addr: Source address + * + * This event is used to send Extensible Authentication Protocol + * information + * Values for parameters are defined below: + * event_sub_type: offset: 0 length: 1 + * eapol_packet_type: offset: 1 length: 1 + * eapol_key_info: offset:2 length: 2 + * eapol_rate: offset: 4 length: 2 + * dest_addr[0]: offset: 6 length: 1 + * dest_addr[1]: offset: 7 length: 1 + * dest_addr[2]: offset: 8 length: 1 + * dest_addr[3]: offset: 9 length: 1 + * dest_addr[4]: offset: 10 length: 1 + * dest_addr[5]: offset: 11 length: 1 + * src_addr[0]: offset: 12 length: 1 + * src_addr[1]: offset: 13 length: 1 + * src_addr[2]: offset: 14 length: 1 + * src_addr[3]: offset: 15 length: 1 + * src_addr[4]: offset: 16 length: 1 + * src_addr[5]: offset: 17 length: 1 + * + * Supported Feature: Extensible Authentication Protocol + * + * + */ + + EVENT_WLAN_EAPOL = 0xA8D,/* 18 bytes payload */ + + /* + * + * EVENT_WLAN_WAKE_LOCK + * @ status: Whether the wakelock is taken/released + * @ reason: Reason for taking this wakelock + * @ timeout: Timeout value in case of timed wakelocks + * @ name_len: Length of the name of the wakelock that will follow + * @ name: Name of the wakelock + * + * This event is used to send wakelock information + * Values for parameters are defined below: + * status: offset: 0 length: 4 + * reason: offset: 4 length: 4 + * timeout: offset: 8 length: 4 + * name_len: offset: 12 length: 4 + * + * Supported Feature: wlan wakelock + * + * + */ + + EVENT_WLAN_WAKE_LOCK = 0xAA2, /* 96 bytes payload */ + EVENT_WLAN_BEACON_RECEIVED = 0xAA6, /* FW event: 2726 */ + + /* + * + * EVENT_WLAN_LOG_COMPLETE + * @ is_fatal: Indicates if the event is fatal or not + * @ indicator: Source of the bug report - Framework/Host/Firmware + * @ reason_code: Reason for triggering bug report + * @ reserved: Reserved field + * + * This event is used to send log completion related information + * Values for parameters are defined below: + * is_fatal: offset: 0 length: 4 + * indicator: offset: 4 length: 4 + * reason_code: offset: 8 length: 4 + * reserved: offset: 12 length: 4 + * + * Supported Feature: Logging + * + * + */ + + EVENT_WLAN_LOG_COMPLETE = 0xAA7, /* 16 bytes payload */ + + /* + * + * EVENT_WLAN_STATUS_V2 + * @ event_id: Event id + * @ ssid: Network SSID + * @ bssType: BSS Type + * @ rssi: RSSI + * @ channel: Channel Numbers + * @ qosCapability: quality of service capability + * @ authType: Authentication type + * @ encryptionType: Encryption type + * @ reason: Reason for triggering status + * @ reasonDisconnect:Reason for disconnection + * + * This event is used to send varius wlan status + * Values for parameters are defined below: + * eventId: offset: 0 length: 1 + * ssid[0] - ssid[31]: offset: 1 to 32, length: 1 + * bssType: offset: 33 length: 1 + * rssi: offset: 34 length: 1 + * channel: offset: 35 length: 1 + * qosCapability: offset: 36 length: 1 + * authType: offset: 37 length: 1 + * encryptionType: offset: 38 length: 1 + * reason: offset: 39 length: 1 + * reasonDisconnect: offset: 40 length: 1 + * + * Supported Feature: Wlan status + * + * + */ + + EVENT_WLAN_STATUS_V2 = 0xAB3, + + /* + * + * EVENT_WLAN_TDLS_TEARDOWN + * @ reason: reason for tear down. + * @peer_mac: Peer mac address + * + * + * This event is sent when TDLS tear down happens. + * + * Supported Feature: TDLS + * + * + */ + EVENT_WLAN_TDLS_TEARDOWN = 0xAB5, + + /* + * + * EVENT_WLAN_TDLS_ENABLE_LINK + * @peer_mac: peer mac + * @is_off_chan_supported: If peer supports off channel + * @is_off_chan_configured: If off channel is configured + * @is_off_chan_established: If off channel is established + * + * + * This event is sent when TDLS enable link happens. + * + * Supported Feature: TDLS + * + * + */ + EVENT_WLAN_TDLS_ENABLE_LINK = 0XAB6, + + /* + * + * EVENT_WLAN_SUSPEND_RESUME + * @ state: suspend/resume state + * + * This event is used to send suspend resume info + * Values for parameters are defined below: + * suspend: offset: 0 length: 1 + * 0 - HDD_WLAN_EARLY_SUSPEND + * 1 - HDD_WLAN_SUSPEND + * 2 - HDD_WLAN_EARLY_RESUME + * 3 - HDD_WLAN_RESUME + * + * Supported Feature: suspend/resume + * + * + */ + + EVENT_WLAN_SUSPEND_RESUME = 0xAB7, + + /* + * + * EVENT_WLAN_OFFLOAD_REQ + * @ offload_type: offload type + * @ state: enabled or disabled state + * + * This event is used to send offload info + * Values for parameters are defined below: + * offloadType: offset: 0 length: 1 + * 0 - SIR_IPV4_ARP_REPLY_OFFLOAD + * 1 - SIR_IPV6_NEIGHBOR_DISCOVERY_OFFLOAD + * 2 - SIR_IPV6_NS_OFFLOAD + * + * enableOrDisable: offset: 1 length: 1 + * 0 - SIR_OFFLOAD_DISABLE + * 1 - SIR_OFFLOAD_ENABLE + * + * Supported Feature: offload + * + * + */ + + EVENT_WLAN_OFFLOAD_REQ = 0xAB8, + + /* + * + * EVENT_TDLS_SCAN_BLOCK + * @status: rejected status + * + * + * This event is sent when scan is rejected due to TDLS. + * + * Supported Feature: TDLS + * + * + */ + EVENT_TDLS_SCAN_BLOCK = 0xAB9, + + /* + * + * EVENT_WLAN_TDLS_TX_RX_MGMT + * @event_id: event id + * @tx_rx: tx or rx + * @type: type of frame + * @action_sub_type: action frame type + * @peer_mac: peer mac + * + * + * This event is sent when TDLS mgmt rx tx happens. + * + * Supported Feature: TDLS + * + * + */ + EVENT_WLAN_TDLS_TX_RX_MGMT = 0xABA, + + /* + * + * EVENT_WLAN_LOW_RESOURCE_FAILURE + * @ WIFI_EVENT_MEMORY_FAILURE: Memory failure + * + * This event is used to send reason why low resource situation + * is observed + * + * Supported Feature: Memory + * + * + */ + + EVENT_WLAN_LOW_RESOURCE_FAILURE = 0xABB, + + /* + * + * EVENT_WLAN_POWERSAVE_WOW_STATS + * @ wow_ucast_wake_up_count: send unicast packet count + * @ wow_bcast_wake_up_count: send broadcast packet count + * @ wow_ipv4_mcast_wake_up_coun: send ipv4 multicast packet count + * @ wow_ipv6_mcast_wake_up_count: send ipv6 multicast packet count + * @ wow_ipv6_mcast_ra_stats: send ipv6 multicast ra packet count + * @ wow_ipv6_mcast_ns_stats: send ipv6 multicast ns packet count + * @ wow_ipv6_mcast_na_stats: send ipv6 multicast na packet count + * @ wow_pno_match_wake_up_count: preferred network offload match count + * @ wow_pno_complete_wake_up_count: preferred network offload complete + * @ wow_gscan_wake_up_count:Reason: send external scan packet count + * @ wow_low_rssi_wake_up_count: send low rssi packet count + * @ wow_rssi_breach_wake_up_count: send rssi breach packet count + * @ wow_icmpv4_count: Send icmpv4 packet count + * @ wow_icmpv6_count: send icmpv6 packet count + * @ wow_oem_response_wake_up_count: Send oem response packet count + * + * This event is used to send wow wakeup stats information + * + * Supported Feature: Wlan powersave wow + * + * + */ + EVENT_WLAN_POWERSAVE_WOW_STATS = 0xB33, + + /* + * + * EVENT_WLAN_STA_KICKOUT + * @reasoncode: Indicates the reasoncode of event + * @peer_macaddr: Indicates the peer macaddr + * @vdev_id: Indicate unique id for identifying the VDEV + * + * This event is used to send sta kickout information + * Values for parameters are defined below: + * Reasoncode: offset: 0 length: 4 + * Peer macaddr: offset: 4 length: 6 + * VDEV ID: offset: 10 length 1 + * + * Supported Feature: STA + * + * + */ + + EVENT_WLAN_STA_KICKOUT = 0xB39, + + /* + * + * EVENT_WLAN_STA_DATASTALL + * @reason: Indicates the reason of event + * + * This event is used to send sta datastall information + * Values for parameters are defined below: + * Reason: offset:0 length: 4 + * + * Supported Feature: STA + * + * + */ + + EVENT_WLAN_STA_DATASTALL = 0xB3A, + + /* + * + * EVENT_WLAN_SOFTAP_DATASTALL + * @reason: Indicates the reason of event + * + * This event is used to send SAP datastall information + * Values for parameters are defined below: + * Reason: offset:0 length: 4 + * + * Supported Feature: SAP + * + * + */ + + EVENT_WLAN_SOFTAP_DATASTALL = 0xB3B, + + /* + * + * EVENT_WLAN_SSR_REINIT_SUBSYSTEM + * @status: Indicates the status of event + * + * This event is used to send ssr reinit status + * Values for parameters are defined below: + * Status: offset: 0 length: 4 + * + * Supported Feature: SSR + * + * + */ + + EVENT_WLAN_SSR_REINIT_SUBSYSTEM = 0xB3C, + + /* + * + * EVENT_WLAN_SSR_SHUTDOWN_SUBSYSTEM + * @status: Indicates the status of event + * + * This event is used to send ssr shutdown status + * Values for parameters are defined below: + * Status: offset: 0 length: 4 + * + * Supported Feature: SSR + * + * + */ + + EVENT_WLAN_SSR_SHUTDOWN_SUBSYSTEM = 0xB3D, + EVENT_WLAN_ACS_REQ = 0xC4A, + EVENT_WLAN_ACS_SCAN_START = 0xC4B, + EVENT_WLAN_ACS_SCAN_DONE = 0xC4C, + EVENT_WLAN_ACS_CHANNEL_SPECTRAL_WEIGHT = 0xC4D, + EVENT_WLAN_ACS_BEST_CHANNEL = 0xC4E, + EVENT_WLAN_HOST_MGMT_TX_V2 = 0xC52, + EVENT_WLAN_HOST_MGMT_RX_V2 = 0xC53, + EVENT_WLAN_CONN_STATS_V2 = 0xC56, + + /* + * + * EVENT_WLAN_RSN_INFO + * @akm_suite: Gives information about akm suites used in assoc request + * @ucast_cipher: Unicast cipher used in assoc request + * @mcast_cipher: Multi cast cipher used in assoc request + * @group_mgmt: Requested group mgmt cipher suite + * + * This event is used to send RSN information used + * in assoc request. + * + * Supported Feature: STA + * + * + */ + + EVENT_WLAN_RSN_INFO = 0xC5B, + + + EVENT_MAX_ID = 0x0FFF +} event_id_enum_type; + +#endif /* EVENT_DEFS_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/utils/host_diag_log/inc/log_codes.h b/drivers/staging/qca-wifi-host-cmn/utils/host_diag_log/inc/log_codes.h new file mode 100644 index 0000000000000000000000000000000000000000..9f3fe6c0cf671071552aa470b42eca1fe25a0565 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/utils/host_diag_log/inc/log_codes.h @@ -0,0 +1,2068 @@ +/* + * Copyright (c) 2014-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef LOG_CODES_H +#define LOG_CODES_H + +/*=========================================================================== + + Log Code Definitions + + General Description + This file contains log code definitions and is shared with the tools. + + ===========================================================================*/ + +/* DO NOT MODIFY THIS FILE WITHOUT PRIOR APPROVAL +** +** Log codes, by design, are a tightly controlled set of values. +** Developers may not create log codes at will. +** +** Request new logs using the following process: +** +** 1. Send email to asw.diag.request requesting log codassignments. +** 2. Identify the log needed by name. +** 3. Provide a brief description for the log. +** +*/ + +/*=========================================================================== + + Edit History + + $Header: //source/qcom/qct/core/services/diag/api/inc/main/latest/log_codes.h#9 $ + + when who what, where, why + -------- --- ---------------------------------------------------------- + 07/30/09 dhao Consolidate log_codes_apps.h + 07/30/09 dhao Add Last log code definition for Equip ID 11 + 06/26/09 dhao Update format the macro + 06/24/09 sar Reverted last change. + 06/24/09 sar Added log code for LOG_MC_STM_C. + 11/02/01 sfh Featurize common NAS log codes for UMTS. + 10/30/01 sfh Added log code for LOG_GPS_FATPATH_INFO_C. + 10/24/01 sfh Added updates for UMTS equipment ID and log codes. + 06/27/01 lad Added multiple equipment ID support. + 05/22/01 sfh Reserved log codes 158 - 168. + 05/21/01 sfh Keep confusing XXX_BASE_C names for backwards compatibility. + 05/16/01 sfh Reserved log code 155. + 05/08/01 sfh Reserved log codes 150 - 154. + 04/06/01 lad Added definitions of base IDs (such as LOG_WCDMA_BASE_C). + This is currently using temporary ID values in the 0x1000 + range. + 02/23/01 lad Created file from DMSS log.h. Log codes only + + ===========================================================================*/ +#include + +/* ------------------------------------------------------------------------- + * Data Declarations + * ------------------------------------------------------------------------- */ + +/* ------------------------------------------------------------------------- + * Log equipment IDs. + * The most significant 4 bits of the 16 bit log code is the equipment ID. + * Orignally, the mobile was to have an ID, and base stations and other + * IDs. As QCT technology diversifies, new equipment IDs are assigned to new + * technology areas. 0x2000 and 0x3000 are reserved for legacy reasons, so + * the first + * addition starts at 0x4000. + * ------------------------------------------------------------------------- */ + +#define LOG_1X_BASE_C ((uint16_t) 0x1000) +#define LOG_WCDMA_BASE_C ((uint16_t) 0x4000) +#define LOG_GSM_BASE_C ((uint16_t) 0x5000) +#define LOG_LBS_BASE_C ((uint16_t) 0x6000) +#define LOG_UMTS_BASE_C ((uint16_t) 0x7000) +#define LOG_TDMA_BASE_C ((uint16_t) 0x8000) +#define LOG_DTV_BASE_C ((uint16_t) 0xA000) +#define LOG_APPS_BASE_C ((uint16_t) 0xB000) +#define LOG_LTE_BASE_C ((uint16_t) (0xB000 + 0x0010)) +#define LOG_LTE_LAST_C ((uint16_t) 0xB1FF) +#define LOG_WIMAX_BASE_C ((uint16_t) 0xB400) +#define LOG_DSP_BASE_C ((uint16_t) 0xC000) + +#define LOG_TOOLS_BASE_C ((uint16_t) 0xF000) + +/* LOG_BASE_C is what was used before expanding the use of the equipment ID. + * TODO: Once all targets are using the "core" diag system, this should be + * omitted. */ +#define LOG_BASE_C LOG_1X_BASE_C + +/* ------------------------------------------------------------------------- + * Log Codes + * These codes identify the kind of information contained in a log entry. + * They are used in conjunction with the 'code' field of the log entry + * header. The data types associated with each code are defined below. + * ------------------------------------------------------------------------- */ + +/* The upper 4 bits of the 16 bit log entry code specify which type + * of equipment created the log entry. */ + +/* 0 Mobile Station temporal analyzer entry */ +#define LOG_TA_C (0x0 + LOG_1X_BASE_C) + +/* 1 AGC values and closed loop power control entry */ +#define LOG_AGC_PCTL_C (0x1 + LOG_1X_BASE_C) + +/* 2 Forward link frame rates and types entry */ +#define LOG_F_MUX1_C (0x2 + LOG_1X_BASE_C) + +/* 3 Reverse link frame rates and types entry */ +#define LOG_R_MUX1_C (0x3 + LOG_1X_BASE_C) + +/* 4 Access channel message entry */ +#define LOG_AC_MSG_C (0x4 + LOG_1X_BASE_C) + +/* 5 Reverse link traffic channel message entry */ +#define LOG_R_TC_MSG_C (0x5 + LOG_1X_BASE_C) + +/* 6 Sync channel message entry */ +#define LOG_SC_MSG_C (0x6 + LOG_1X_BASE_C) + +/* 7 Paging channel message entry */ +#define LOG_PC_MSG_C (0x7 + LOG_1X_BASE_C) + +/* 8 Forward link traffic channel message entry */ +#define LOG_F_TC_MSG_C (0x8 + LOG_1X_BASE_C) + +/* 9 Forward link vocoder packet entry */ +#define LOG_VOC_FOR_C (0x9 + LOG_1X_BASE_C) + +/* 10 Reverse link vocoder packet entry */ +#define LOG_VOC_REV_C (0xA + LOG_1X_BASE_C) + +/* 11 Temporal analyzer finger info only */ +#define LOG_FING_C (0xB + LOG_1X_BASE_C) + +/* 12 Searcher pathlog info (Reused old SRCH logtype) */ +#define LOG_SRCH_C (0xC + LOG_1X_BASE_C) + +/* 13 Position and speed information read from ETAK */ +#define LOG_ETAK_C (0xD + LOG_1X_BASE_C) + +/* 14 Markov frame statistics */ +#define LOG_MAR_C (0xE + LOG_1X_BASE_C) + +/* 15 New and improved temporal analyzer searcher info */ +#define LOG_SRCH2_C (0xF + LOG_1X_BASE_C) + +/* 16 The Fujitsu handset information */ +#define LOG_HANDSET_C (0x10 + LOG_1X_BASE_C) + +/* 17 Vocoder bit error rate mask */ +#define LOG_ERRMASK_C (0x11 + LOG_1X_BASE_C) + +/* 18 Analog voice channel information */ +#define LOG_ANALOG_INFO_C (0x12 + LOG_1X_BASE_C) + +/* 19 Access probe information */ +#define LOG_ACC_INFO_C (0x13 + LOG_1X_BASE_C) + +/* 20 Position & speed info read from GPS receiver */ +#define LOG_GPS_C (0x14 + LOG_1X_BASE_C) + +/* 21 Test Command information */ +#define LOG_TEST_CMD_C (0x15 + LOG_1X_BASE_C) + +/* 22 Sparse (20ms) AGC / closed loop power control entry */ +#define LOG_S_AGC_PCTL_C (0x16 + LOG_1X_BASE_C) + +/* 23 Notification of a band class change */ +#define LOG_BAND_CHANGE_C (0x17 + LOG_1X_BASE_C) + +/* 24 DM debug messages, if being logged via log services */ +#define LOG_DBG_MSG_C (0x18 + LOG_1X_BASE_C) + +/* 25 General temporal analyzer entry */ +#define LOG_GENRL_TA_C (0x19 + LOG_1X_BASE_C) + +/* 26 General temporal analyzer w/supplemental channels */ +#define LOG_GENRL_TA_SUP_CH_C (0x1A + LOG_1X_BASE_C) + +/* Featurization Removal requested by CMI + #ifdef FEATURE_PLT + */ + +/* 27 Decoder raw bits logging */ +#define LOG_PLT_C (0x1B + LOG_1X_BASE_C) + +/* Featurization Removal requested by CMI + #else + 27 EFS Usage Info - No implementation as yet + #define LOG_EFS_INFO_C (0x1B + LOG_1X_BASE_C) + #endif + */ + +/* 28 Analog Forward Channel */ +#define LOG_ANALOG_FORW_C (0x1C + LOG_1X_BASE_C) + +/* 29 Analog Reverse Channel */ +#define LOG_ANALOG_REVS_C (0x1D + LOG_1X_BASE_C) + +/* 30 Analog Handoff Entry */ +#define LOG_ANALOG_HANDOFF_C (0x1E + LOG_1X_BASE_C) + +/* 31 FM Slot Statistis entry */ +#define LOG_ANALOG_FMSLOT_C (0x1F + LOG_1X_BASE_C) + +/* 32 FOCC Word Sync Count entry */ +#define LOG_ANALOG_WS_COUNT_C (0x20 + LOG_1X_BASE_C) + +/* 33 */ +#define LOG_RLP_PACKET_C (0x21 + LOG_1X_BASE_C) + +/* 34 */ +#define LOG_ASYNC_TCP_SEG_C (0x22 + LOG_1X_BASE_C) + +/* 35 */ +#define LOG_PACKET_DATA_IP_PACKETS_C (0x23 + LOG_1X_BASE_C) + +/* 36 */ +#define LOG_FNBDT_MESSAGE_LOG_C (0x24 + LOG_1X_BASE_C) + +/* Begin IS-2000 LOG features */ + +/* 37 RLP RX Frames logging */ +#define LOG_RLP_RX_FRAMES_C (0x25 + LOG_1X_BASE_C) + +/* 38 RLP TX Frames logging */ +#define LOG_RLP_TX_FRAMES_C (0x26 + LOG_1X_BASE_C) + +/* 39 Reserved for additions to RLP frames */ +#define LOG_RLP_RSVD1_C (0x27 + LOG_1X_BASE_C) + +/* 40 Reserved for additions to RLP frames */ +#define LOG_RLP_RSVD2_C (0x28 + LOG_1X_BASE_C) + +/* 41 Forward Link Frame Types logging */ +#define LOG_FWD_FRAME_TYPES_C (0x29 + LOG_1X_BASE_C) + +/* 42 Reverse Link Frame Types logging */ +#define LOG_REV_FRAME_TYPES_C (0x2A + LOG_1X_BASE_C) + +/* 43 Fast Forward Power Control Parameters logging */ +#define LOG_FFWD_PCTRL_C (0x2B + LOG_1X_BASE_C) + +/* 44 Reverse Power Control Parameters logging */ +#define LOG_REV_PCTRL_C (0x2C + LOG_1X_BASE_C) + +/* 45 Searcher and Finger Information logging */ +#define LOG_SRCH_FING_INFO_C (0x2D + LOG_1X_BASE_C) + +/* 46 Service Configuration logging */ +#define LOG_SVC_CONFIG_C (0x2E + LOG_1X_BASE_C) + +/* 47 Active Set Configuration logging */ +#define LOG_ASET_CONFIG_C (0x2F + LOG_1X_BASE_C) + +/* 48 Quick Paging Channel logging */ +#define LOG_QPCH_C (0x30 + LOG_1X_BASE_C) + +/* 49 RLP Statistics logging */ +#define LOG_RLP_STAT_C (0x31 + LOG_1X_BASE_C) + +/* 50 Simple Test Data Service Option logging */ +#define LOG_STDSO_C (0x32 + LOG_1X_BASE_C) + +/* 51 Pilot Phase Measurement results logging */ +#define LOG_SRCH_PPM_RES_C (0x33 + LOG_1X_BASE_C) + +/* 52 Pilot Phase Measurement Data Base logging */ +#define LOG_SRCH_PPM_DB_C (0x34 + LOG_1X_BASE_C) + +/* 53 Pilot Phase Measurement search results logging */ +#define LOG_SRCH_PPM_C (0x35 + LOG_1X_BASE_C) + +/* 54 IS-801 forward link message */ +#define LOG_GPS_FWD_MSG_C (0x36 + LOG_1X_BASE_C) + +/* 55 IS-801 reverse link message */ +#define LOG_GPS_REV_MSG_C (0x37 + LOG_1X_BASE_C) + +/* 56 GPS search session statistics */ +#define LOG_GPS_STATS_MSG_C (0x38 + LOG_1X_BASE_C) + +/* 57 GPS search results */ +#define LOG_GPS_SRCH_PEAKS_MSG_C (0x39 + LOG_1X_BASE_C) + +/* 58 Factory Testmode logging */ +#define LOG_FTM_C (0x3A + LOG_1X_BASE_C) + +/* 59 Multiple Peak Logging */ +#define LOG_SRCH_GPS_MULTI_PEAKS_INFO_C (0x3B + LOG_1X_BASE_C) + +/* 60 Post processed search results logs */ +#define LOG_SRCH_GPS_POST_PROC_C (0x3C + LOG_1X_BASE_C) + +/* 61 FULL Test Data Service Option logging */ +#define LOG_FTDSO_C (0x3D + LOG_1X_BASE_C) + +/* 62 Bluetooth logging */ +#define LOG_BT_RESERVED_CODES_BASE_C (0x3E + LOG_1X_BASE_C) +/* Keep confusing name for backwards compatibility. */ +#define LOG_BT_BASE_C LOG_BT_RESERVED_CODES_BASE_C + +/* 92 Bluetooth's last log code */ +#define LOG_BT_LAST_C (30 + LOG_BT_RESERVED_CODES_BASE_C) + +/* 93 HDR log codes */ +#define LOG_HDR_RESERVED_CODES_BASE_C (0x5D + LOG_1X_BASE_C) +/* Keep confusing name for backwards compatibility. */ +#define LOG_HDR_BASE_C LOG_HDR_RESERVED_CODES_BASE_C + +/* 143 is HDR's last log code */ +#define LOG_HDR_LAST_C (50 + LOG_HDR_RESERVED_CODES_BASE_C) + +/* 144 IS2000 DCCH Forward link channel */ +#define LOG_FOR_DCCH_MSG_C (0x90 + LOG_1X_BASE_C) +#define LOG_DCCH_FWD_C LOG_FOR_DCCH_MSG_C + +/* 145 IS2000 DCCH Forward link channel */ +#define LOG_REV_DCCH_MSG_C (0x91 + LOG_1X_BASE_C) +#define LOG_DCCH_REV_C LOG_REV_DCCH_MSG_C + +/* 146 IS2000 DCCH Forward link channel */ +#define LOG_ZREX_C (0x92 + LOG_1X_BASE_C) + +/* 147 Active set info logging, similar to ASET_CONFIG, but simpler. */ +#define LOG_ASET_INFO_C (0x93 + LOG_1X_BASE_C) + +/* 148 Pilot Phase Measurement four-shoulder-search resutlts logging */ +#define LOG_SRCH_PPM_4SHOULDER_RES_C (0x94 + LOG_1X_BASE_C) + +/* 149 Extended Pilot Phase Measurement Data Base logging */ +#define LOG_SRCH_EXT_PPM_DB_C (0x95 + LOG_1X_BASE_C) + +/* 150 GPS Visit Parameters */ +#define LOG_GPS_VISIT_PARAMETERS_C (0x96 + LOG_1X_BASE_C) + +/* 151 GPS Measurement */ +#define LOG_GPS_MEASUREMENT_C (0x97 + LOG_1X_BASE_C) + +/* 152 UIM Data */ +#define LOG_UIM_DATA_C (0x98 + LOG_1X_BASE_C) + +/* 153 STDSO plus P2 */ +#define LOG_STDSO_P2_C (0x99 + LOG_1X_BASE_C) + +/* 154 FTDSO plus P2 */ +#define LOG_FTDSO_P2_C (0x9A + LOG_1X_BASE_C) + +/* 155 Search PPM Statistics */ +#define LOG_SRCH_PPM_STATS_C (0x9B + LOG_1X_BASE_C) + +/* 156 PPP Tx Frames */ +#define LOG_PPP_TX_FRAMES_C (0x9C + LOG_1X_BASE_C) + +/* 157 PPP Rx Frames */ +#define LOG_PPP_RX_FRAMES_C (0x9D + LOG_1X_BASE_C) + +/* 158-187 SSL reserved log codes */ +#define LOG_SSL_RESERVED_CODES_BASE_C (0x9E + LOG_1X_BASE_C) +#define LOG_SSL_LAST_C (29 + LOG_SSL_RESERVED_CODES_BASE_C) + +/* 188-199 Puma reserved log codes */ +/* 188 QPCH, version 2 */ +#define LOG_QPCH_VER_2_C (0xBC + LOG_1X_BASE_C) + +/* 189 Enhanced Access Probe */ +#define LOG_EA_PROBE_C (0xBD + LOG_1X_BASE_C) + +/* 190 BCCH Frame Information */ +#define LOG_BCCH_FRAME_INFO_C (0xBE + LOG_1X_BASE_C) + +/* 191 FCCCH Frame Information */ +#define LOG_FCCCH_FRAME_INFO_C (0xBF + LOG_1X_BASE_C) + +/* 192 FDCH Frame Information */ +#define LOG_FDCH_FRAME_INFO_C (0xC0 + LOG_1X_BASE_C) + +/* 193 RDCH Frame Information */ +#define LOG_RDCH_FRAME_INFO_C (0xC1 + LOG_1X_BASE_C) + +/* 194 FFPC Information */ +#define LOG_FFPC_INFO_C (0xC2 + LOG_1X_BASE_C) + +/* 195 RPC Information */ +#define LOG_RPC_INFO_C (0xC3 + LOG_1X_BASE_C) + +/* 196 Searcher and Finger Information */ +#define LOG_SRCH_FING_INFO_VER_2_C (0xC4 + LOG_1X_BASE_C) + +/* 197 Service Configuration, version 2 */ +#define LOG_SRV_CONFIG_VER_2_C (0xC5 + LOG_1X_BASE_C) + +/* 198 Active Set Information, version 2 */ +#define LOG_ASET_INFO_VER_2_C (0xC6 + LOG_1X_BASE_C) + +/* 199 Reduced Active Set */ +#define LOG_REDUCED_ASET_INFO_C (0xC7 + LOG_1X_BASE_C) + +/* 200 Search Triage Info */ +#define LOG_SRCH_TRIAGE_INFO_C (0xC8 + LOG_1X_BASE_C) + +/* 201 RDA Frame Information */ +#define LOG_RDA_FRAME_INFO_C (0xC9 + LOG_1X_BASE_C) + +/* 202 gpsOne fatpath information */ +#define LOG_GPS_FATPATH_INFO_C (0xCA + LOG_1X_BASE_C) + +/* 203 Extended AGC */ +#define LOG_EXTENDED_AGC_C (0xCB + LOG_1X_BASE_C) + +/* 204 Transmit AGC */ +#define LOG_TRANSMIT_AGC_C (0xCC + LOG_1X_BASE_C) + +/* 205 I/Q Offset registers */ +#define LOG_IQ_OFFSET_REGISTERS_C (0xCD + LOG_1X_BASE_C) + +/* 206 DACC I/Q Accumulator registers */ +#define LOG_DACC_IQ_ACCUMULATOR_C (0xCE + LOG_1X_BASE_C) + +/* 207 Register polling results */ +#define LOG_REGISTER_POLLING_RESULTS_C (0xCF + LOG_1X_BASE_C) + +/* 208 System arbitration module */ +#define LOG_AT_SAM_C (0xD0 + LOG_1X_BASE_C) + +/* 209 Diablo searcher finger log */ +#define LOG_DIABLO_SRCH_FING_INFO_C (0xD1 + LOG_1X_BASE_C) + +/* 210 log reserved for dandrus */ +#define LOG_SD20_LAST_ACTION_C (0xD2 + LOG_1X_BASE_C) + +/* 211 log reserved for dandrus */ +#define LOG_SD20_LAST_ACTION_HYBRID_C (0xD3 + LOG_1X_BASE_C) + +/* 212 log reserved for dandrus */ +#define LOG_SD20_SS_OBJECT_C (0xD4 + LOG_1X_BASE_C) + +/* 213 log reserved for dandrus */ +#define LOG_SD20_SS_OBJECT_HYBRID_C (0xD5 + LOG_1X_BASE_C) + +/* 214 log reserved for jpinos */ +#define LOG_BCCH_SIGNALING_C (0xD6 + LOG_1X_BASE_C) + +/* 215 log reserved for jpinos */ +#define LOG_REACH_SIGNALING_C (0xD7 + LOG_1X_BASE_C) + +/* 216 log reserved for jpinos */ +#define LOG_FCCCH_SIGNALING_C (0xD8 + LOG_1X_BASE_C) + +/* 217 RDA Frame Information 2 */ +#define LOG_RDA_FRAME_INFO_2_C (0xD9 + LOG_1X_BASE_C) + +/* 218 */ +#define LOG_GPS_BIT_EDGE_RESULTS_C (0xDA + LOG_1X_BASE_C) + +/* 219 */ +#define LOG_PE_DATA_C (0xDB + LOG_1X_BASE_C) + +/* 220 */ +#define LOG_PE_PARTIAL_DATA_C (0xDC + LOG_1X_BASE_C) + +/* 221 */ +#define LOG_GPS_SINGLE_PEAK_SRCH_RESULTS_C (0xDD + LOG_1X_BASE_C) + +/* 222 */ +#define LOG_SRCH4_SAMPRAM_C (0xDE + LOG_1X_BASE_C) + +/* 223 */ +#define HDR_AN_PPP_TX_FRAMES (0xDF + LOG_1X_BASE_C) + +/* 224 */ +#define HDR_AN_PPP_RX_FRAMES (0xE0 + LOG_1X_BASE_C) + +/* 225 */ +#define LOG_GPS_SCHEDULER_TRACE_C (0xE1 + LOG_1X_BASE_C) + +/* 226 */ +#define LOG_MPEG4_YUV_FRAME_C (0xE2 + LOG_1X_BASE_C) + +/* 227 */ +#define LOG_MPEG4_CLIP_STATS_C (0xE3 + LOG_1X_BASE_C) + +/* 228 */ +#define LOG_MPEG4_CLIP_STATS_VER2_C (0xE4 + LOG_1X_BASE_C) + +/* 226-241 MMEG reserved. */ +#define LOG_MPEG_RESERVED_CODES_BASE_C (0xF1 + LOG_1X_BASE_C) + +/* 242-274 BREW reserved log range */ +#define LOG_BREW_RESERVED_CODES_BASE_C (0xF2 + LOG_1X_BASE_C) +#define LOG_BREW_LAST_C (32 + LOG_BREW_RESERVED_CODES_BASE_C) + +/* 275-339 PPP Extended Frames */ +#define LOG_PPP_FRAMES_RESERVED_CODES_BASE_C (0x113 + LOG_1X_BASE_C) +#define LOG_PPP_FRAMES_LAST_C (64 + LOG_PPP_FRAMES_RESERVED_CODES_BASE_C) + +#define LOG_PPP_EXT_FRAMED_RX_UM_C (0x113 + LOG_1X_BASE_C) +#define LOG_PPP_EXT_FRAMED_RX_RM_C (0x114 + LOG_1X_BASE_C) +#define LOG_PPP_EXT_FRAMED_RX_AN_C (0x115 + LOG_1X_BASE_C) + +#define LOG_PPP_EXT_FRAMED_TX_UM_C (0x123 + LOG_1X_BASE_C) +#define LOG_PPP_EXT_FRAMED_TX_RM_C (0x124 + LOG_1X_BASE_C) +#define LOG_PPP_EXT_FRAMED_TX_AN_C (0x125 + LOG_1X_BASE_C) + +#define LOG_PPP_EXT_UNFRAMED_RX_UM_C (0x133 + LOG_1X_BASE_C) +#define LOG_PPP_EXT_UNFRAMED_RX_RM_C (0x134 + LOG_1X_BASE_C) +#define LOG_PPP_EXT_UNFRAMED_RX_AN_C (0x135 + LOG_1X_BASE_C) + +#define LOG_PPP_EXT_UNFRAMED_TX_UM_C (0x143 + LOG_1X_BASE_C) +#define LOG_PPP_EXT_UNFRAMED_TX_RM_C (0x144 + LOG_1X_BASE_C) +#define LOG_PPP_EXT_UNFRAMED_TX_AN_C (0x145 + LOG_1X_BASE_C) + +/* 340 LOG_PE_DATA_EXT_C */ +#define LOG_PE_DATA_EXT_C (0x154 + LOG_1X_BASE_C) + +/* REX Subsystem logs */ +#define LOG_MEMDEBUG_C (0x155 + LOG_1X_BASE_C) +#define LOG_SYSPROFILE_C (0x156 + LOG_1X_BASE_C) +#define LOG_TASKPROFILE_C (0x157 + LOG_1X_BASE_C) +#define LOG_COREDUMP_C (0x158 + LOG_1X_BASE_C) + +/* 341-349 REX subsystem logs */ +#define LOG_REX_RESERVED_CODES_BASE_C (0x155 + LOG_1X_BASE_C) +#define LOG_REX_LAST_C (8 + LOG_REX_RESERVED_CODES_BASE_C) + +/* 350 LOG_PE_PARTIAL_DATA_EXT_C */ +#define LOG_PE_PARTIAL_DATA_EXT_C (0x15E + LOG_1X_BASE_C) + +/* 351 LOG_DIAG_STRESS_TEST_C */ +#define LOG_DIAG_STRESS_TEST_C (0x15F + LOG_1X_BASE_C) + +/* 352 LOG_WMS_READ_C */ +#define LOG_WMS_READ_C (0x160 + LOG_1X_BASE_C) + +/* 353 Search Triage Info Version 2 */ +#define LOG_SRCH_TRIAGE_INFO2_C (0x161 + LOG_1X_BASE_C) + +/* 354 RLP Rx FDCH Frames */ +#define LOG_RLP_RX_FDCH_FRAMES_C (0x162 + LOG_1X_BASE_C) + +/* 355 RLP Tx FDCH Frames */ +#define LOG_RLP_TX_FDCH_FRAMES_C (0x163 + LOG_1X_BASE_C) + +/* 356-371 QTV subsystem logs */ +#define LOG_QTV_RESERVED_CODES_BASE_C (0x164 + LOG_1X_BASE_C) +#define LOG_QTV_LAST_C (15 + LOG_QTV_RESERVED_CODES_BASE_C) + +/* 372 Searcher 4 1X */ +#define LOG_SRCH4_1X_C (0x174 + LOG_1X_BASE_C) + +/* 373 Searcher sleep statistics */ +#define LOG_SRCH_SLEEP_STATS_C (0x175 + LOG_1X_BASE_C) + +/* 374 Service Configuration, version 3 */ +#define LOG_SRV_CONFIG_VER_3_C (0x176 + LOG_1X_BASE_C) + +/* 375 Searcher 4 HDR */ +#define LOG_SRCH4_HDR_C (0x177 + LOG_1X_BASE_C) + +/* 376 Searcher 4 AFLT */ +#define LOG_SRCH4_AFLT_C (0x178 + LOG_1X_BASE_C) + +/* 377 Enhanced Finger Information */ +#define LOG_ENH_FING_INFO_C (0x179 + LOG_1X_BASE_C) + +/* 378 DV Information */ +#define LOG_DV_INFO_C (0x17A + LOG_1X_BASE_C) + +/* 379 WMS set routes information */ +#define LOG_WMS_SET_ROUTES_C (0x17B + LOG_1X_BASE_C) + +/* 380 FTM Version 2 Logs */ +#define LOG_FTM_VER_2_C (0x17C + LOG_1X_BASE_C) + +/* 381 GPS Multipeak logging */ +#define LOG_SRCH_GPS_MULTI_PEAKS_SIMPLIFIED_INFO_C (0x17D + LOG_1X_BASE_C) + +/* 382 GPS Multipeak logging */ +#define LOG_SRCH_GPS_MULTI_PEAKS_VERBOSE_INFO_C (0x17E + LOG_1X_BASE_C) + +/* 383-403 HDR reserved logs */ +#define LOG_HDR_RESERVED_CODES_BASE_2_C (0x17F + LOG_1X_BASE_C) +#define LOG_HDR_LAST_2_C (20 + LOG_HDR_RESERVED_CODES_BASE_2_C) + +/* RLP Rx - PDCH partial MuxPDU5 frames */ +#define LOG_RLP_RX_PDCH_PARTIAL_MUXPDU5_FRAMES_C (0x194 + LOG_1X_BASE_C) + +/* RLP Tx - PDCH partial MuxPDU5 frames */ +#define LOG_RLP_TX_PDCH_PARTIAL_MUXPDU5_FRAMES_C (0x195 + LOG_1X_BASE_C) + +/* RLP Rx internal details */ +#define LOG_RLP_RX_INTERNAL_DETAILS_C (0x196 + LOG_1X_BASE_C) + +/* RLP Tx internal details */ +#define LOG_RLP_TX_INTERNAL_DETAILS_C (0x197 + LOG_1X_BASE_C) + +/* MPEG4 Clip Statistics version 3 */ +#define LOG_MPEG4_CLIP_STATS_VER3_C (0x198 + LOG_1X_BASE_C) + +/* Mobile IP Performance */ +#define LOG_MOBILE_IP_PERFORMANCE_C (0x199 + LOG_1X_BASE_C) + +/* 410-430 Searcher reserved logs */ +#define LOG_SEARCHER_RESERVED_CODES_BASE_C (0x19A + LOG_1X_BASE_C) +#define LOG_SEARCHER_LAST_2_C (21 + LOG_SEARCHER_RESERVED_CODES_BASE_C) + +/* 432-480 QTV reserved logs */ +#define LOG_QTV2_RESERVED_CODES_BASE_C (0x1B0 + LOG_1X_BASE_C) +#define LOG_QTV2_LAST_C (48 + LOG_QTV2_RESERVED_CODES_BASE_C) + +#define LOG_QTV_PDS2_STATS (0x1B6 + LOG_1X_BASE_C) +#define LOG_QTV_PDS2_GET_REQUEST (0x1B7 + LOG_1X_BASE_C) +#define LOG_QTV_PDS2_GET_RESP_HEADER (0x1B8 + LOG_1X_BASE_C) +#define LOG_QTV_PDS2_GET_RESP_PCKT (0x1B9 + LOG_1X_BASE_C) +#define LOG_QTV_CMX_AUDIO_INPUT_DATA_C (0x1BA + LOG_1X_BASE_C) +#define LOG_QTV_RTSP_OPTIONS_C (0x1BB + LOG_1X_BASE_C) +#define LOG_QTV_RTSP_GET_PARAMETER_C (0x1BC + LOG_1X_BASE_C) +#define LOG_QTV_RTSP_SET_PARAMETER_C (0x1BD + LOG_1X_BASE_C) +#define LOG_QTV_VIDEO_BITSTREAM (0x1BE + LOG_1X_BASE_C) +#define LOG_ARM_VIDEO_DECODE_STATS (0x1BF + LOG_1X_BASE_C) +#define LOG_QTV_DSP_SLICE_BUFFER_C (0x1C0 + LOG_1X_BASE_C) +#define LOG_QTV_CMD_LOGGING_C (0x1C1 + LOG_1X_BASE_C) +#define LOG_QTV_AUDIO_FRAME_PTS_INFO_C (0x1C2 + LOG_1X_BASE_C) +#define LOG_QTV_VIDEO_FRAME_DECODE_INFO_C (0x1C3 + LOG_1X_BASE_C) +#define LOG_QTV_RTCP_COMPOUND_RR_C (0x1C4 + LOG_1X_BASE_C) +#define LOG_QTV_FRAME_BUFFER_RELEASE_REASON_C (0x1C5 + LOG_1X_BASE_C) +#define LOG_QTV_AUDIO_CHANNEL_SWITCH_FRAME_C (0x1C6 + LOG_1X_BASE_C) +#define LOG_QTV_RTP_DECRYPTED_PKT_C (0x1C7 + LOG_1X_BASE_C) +#define LOG_QTV_PCR_DRIFT_RATE_C (0x1C8 + LOG_1X_BASE_C) + +/* GPS PDSM logs */ +#define LOG_PDSM_POSITION_REPORT_CALLBACK_C (0x1E1 + LOG_1X_BASE_C) +#define LOG_PDSM_PD_EVENT_CALLBACK_C (0x1E2 + LOG_1X_BASE_C) +#define LOG_PDSM_PA_EVENT_CALLBACK_C (0x1E3 + LOG_1X_BASE_C) +#define LOG_PDSM_NOTIFY_VERIFY_REQUEST_C (0x1E4 + LOG_1X_BASE_C) +#define LOG_PDSM_RESERVED1_C (0x1E5 + LOG_1X_BASE_C) +#define LOG_PDSM_RESERVED2_C (0x1E6 + LOG_1X_BASE_C) + +/* Searcher Demodulation Status log */ +#define LOG_SRCH_DEMOD_STATUS_C (0x1E7 + LOG_1X_BASE_C) + +/* Searcher Call Statistics log */ +#define LOG_SRCH_CALL_STATISTICS_C (0x1E8 + LOG_1X_BASE_C) + +/* GPS MS-MPC Forward link */ +#define LOG_MS_MPC_FWD_LINK_C (0x1E9 + LOG_1X_BASE_C) + +/* GPS MS-MPC Reverse link */ +#define LOG_MS_MPC_REV_LINK_C (0x1EA + LOG_1X_BASE_C) + +/* Protocol Services Data */ +#define LOG_DATA_PROTOCOL_LOGGING_C (0x1EB + LOG_1X_BASE_C) + +/* MediaFLO reserved log codes */ +#define LOG_MFLO_RESERVED_CODES_BASE_C (0x1EC + LOG_1X_BASE_C) +#define LOG_MFLO_LAST_C (99 + LOG_MFLO_RESERVED_CODES_BASE_C) + +/* GPS demodulation tracking header info */ +#define LOG_GPS_DEMOD_TRACKING_HEADER_C (0x250 + LOG_1X_BASE_C) + +/* GPS demodulation tracking results */ +#define LOG_GPS_DEMOD_TRACKING_C (0x251 + LOG_1X_BASE_C) + +/* GPS bit edge logs from demod tracking */ +#define LOG_GPS_DEMOD_BIT_EDGE_C (0x252 + LOG_1X_BASE_C) + +/* GPS demodulation soft decisions */ +#define LOG_GPS_DEMOD_SOFT_DECISIONS_C (0x253 + LOG_1X_BASE_C) + +/* GPS post-processed demod tracking results */ +#define LOG_GPS_DEMOD_TRACKING_POST_PROC_C (0x254 + LOG_1X_BASE_C) + +/* GPS subframe log */ +#define LOG_GPS_DEMOD_SUBFRAME_C (0x255 + LOG_1X_BASE_C) + +/* F-CPCCH Quality Information */ +#define LOG_F_CPCCH_QUALITY_INFO_C (0x256 + LOG_1X_BASE_C) + +/* Reverse PDCCH/PDCH Frame Information */ +#define LOG_R_PDCCH_R_PDCH_FRAME_INFO_C (0x257 + LOG_1X_BASE_C) + +/* Forward G Channel Information */ +#define LOG_F_GCH_INFO_C (0x258 + LOG_1X_BASE_C) + +/* Forward G Channel Frame Information */ +#define LOG_F_GCH_FRAME_INFO_C (0x259 + LOG_1X_BASE_C) + +/* Forward RC Channel Information */ +#define LOG_F_RCCH_INFO_C (0x25A + LOG_1X_BASE_C) + +/* Forward ACK Channel Information */ +#define LOG_F_ACKCH_INFO_C (0x25B + LOG_1X_BASE_C) + +/* Forward ACK Channel ACKDA Information */ +#define LOG_F_ACKCH_ACKDA_C (0x25C + LOG_1X_BASE_C) + +/* Reverse REQ Channel Information */ +#define LOG_R_REQCH_INFO_C (0x25D + LOG_1X_BASE_C) + +/* Sleep Task Statistics */ +#define LOG_SLEEP_STATS_C (0x25E + LOG_1X_BASE_C) + +/* Sleep controller statistics 1X */ +#define LOG_1X_SLEEP_CONTROLLER_STATS_C (0x25F + LOG_1X_BASE_C) + +/* Sleep controller statistics HDR */ +#define LOG_HDR_SLEEP_CONTROLLER_STATS_C (0x260 + LOG_1X_BASE_C) + +/* Sleep controller statistics GSM */ +#define LOG_GSM_SLEEP_CONTROLLER_STATS_C (0x261 + LOG_1X_BASE_C) + +/* Sleep controller statistics WCDMA */ +#define LOG_WCDMA_SLEEP_CONTROLLER_STATS_C (0x262 + LOG_1X_BASE_C) + +/* Sleep task and controller reserved logs */ +#define LOG_SLEEP_APPS_STATS_C (0x263 + LOG_1X_BASE_C) +#define LOG_SLEEP_STATS_RESERVED2_C (0x264 + LOG_1X_BASE_C) +#define LOG_SLEEP_STATS_RESERVED3_C (0x265 + LOG_1X_BASE_C) + +/* DV Information placeholder channel logs */ +#define LOG_PDCCH_LO_SELECTED_C (0x266 + LOG_1X_BASE_C) +#define LOG_PDCCH_HI_SELECTED_C (0x267 + LOG_1X_BASE_C) +#define LOG_WALSH_SELECTED_C (0x268 + LOG_1X_BASE_C) +#define LOG_PDCH_BE_SELECTED_C (0x269 + LOG_1X_BASE_C) +#define LOG_PDCCH_LLR_SELECTED_C (0x26A + LOG_1X_BASE_C) +#define LOG_CQI_ACK_LO_SELECTED_C (0x26B + LOG_1X_BASE_C) +#define LOG_CQI_ACK_HI_SELECTED_C (0x26C + LOG_1X_BASE_C) +#define LOG_RL_GAIN_SELECTED_C (0x26D + LOG_1X_BASE_C) +#define LOG_PDCCH0_SNDA_SELECTED_C (0x26E + LOG_1X_BASE_C) +#define LOG_PDCCH1_SNDA_SELECTED_C (0x26F + LOG_1X_BASE_C) + +/* 624 WMS Message List */ +#define LOG_WMS_MESSAGE_LIST_C (0x270 + LOG_1X_BASE_C) + +/* 625 Multimode Generic SIM Driver Interface */ +#define LOG_MM_GENERIC_SIM_DRIVER_C (0x271 + LOG_1X_BASE_C) + +/* 626 Generic SIM Toolkit Task */ +#define LOG_GENERIC_SIM_TOOLKIT_TASK_C (0x272 + LOG_1X_BASE_C) + +/* 627 Call Manager Phone events log */ +#define LOG_CM_PH_EVENT_C (0x273 + LOG_1X_BASE_C) + +/* 628 WMS Set Message List */ +#define LOG_WMS_SET_MESSAGE_LIST_C (0x274 + LOG_1X_BASE_C) + +/* 629-704 HDR reserved logs */ +#define LOG_HDR_RESERVED_CODES_BASE_3_C (0x275 + LOG_1X_BASE_C) +#define LOG_HDR_LAST_3_C (75 + LOG_HDR_RESERVED_CODES_BASE_3_C) + +/* 705 Call Manager call event log */ +#define LOG_CM_CALL_EVENT_C (0x2C1 + LOG_1X_BASE_C) + +/* 706-738 QVP reserved logs */ +#define LOG_QVP_RESERVED_CODES_BASE_C (0x2C2 + LOG_1X_BASE_C) +#define LOG_QVP_LAST_C (32 + LOG_QVP_RESERVED_CODES_BASE_C) + +/* 739 GPS PE Position Report log */ +#define LOG_GPS_PE_POSITION_REPORT_C (0x2E3 + LOG_1X_BASE_C) + +/* 740 GPS PE Position Report Extended log */ +#define LOG_GPS_PE_POSITION_REPORT_EXT_C (0x2E4 + LOG_1X_BASE_C) + +/* 741 log */ +#define LOG_MDDI_HOST_STATS_C (0x2E5 + LOG_1X_BASE_C) + +/* GPS Decoded Ephemeris */ +#define LOG_GPS_DECODED_EPHEMERIS_C (0x2E6 + LOG_1X_BASE_C) + +/* GPS Decoded Almanac */ +#define LOG_GPS_DECODED_ALMANAC_C (0x2E7 + LOG_1X_BASE_C) + +/* Transceiver Resource Manager */ +#define LOG_TRANSCEIVER_RESOURCE_MGR_C (0x2E8 + LOG_1X_BASE_C) + +/* GPS Position Engine Info */ +#define LOG_GPS_POSITION_ENGINE_INFO_C (0x2E9 + LOG_1X_BASE_C) + +/* 746-810 RAPTOR reserved log range */ +#define LOG_RAPTOR_RESERVED_CODES_BASE_C (0x2EA + LOG_1X_BASE_C) +#define LOG_RAPTOR_LAST_C (64 + LOG_RAPTOR_RESERVED_CODES_BASE_C) + +/* QOS Specification Logging */ + +/* QOS Requested Log */ +#define LOG_QOS_REQUESTED_C (0x32B + LOG_1X_BASE_C) + +/* QOS Granted Log */ +#define LOG_QOS_GRANTED_C (0x32C + LOG_1X_BASE_C) + +/* QOS State Log */ +#define LOG_QOS_STATE_C (0x32D + LOG_1X_BASE_C) + +#define LOG_QOS_MODIFIED_C (0x32E + LOG_1X_BASE_C) + +#define LOG_QDJ_ENQUEUE_C (0x32F + LOG_1X_BASE_C) +#define LOG_QDJ_DEQUEUE_C (0x330 + LOG_1X_BASE_C) +#define LOG_QDJ_UPDATE_C (0x331 + LOG_1X_BASE_C) +#define LOG_QDTX_ENCODER_C (0x332 + LOG_1X_BASE_C) +#define LOG_QDTX_DECODER_C (0x333 + LOG_1X_BASE_C) + +#define LOG_PORT_ASSIGNMENT_STATUS_C (0x334 + LOG_1X_BASE_C) + +/* Protocol Services reserved log codes */ +#define LOG_PS_RESERVED_CODES_BASE_C (0x335 + LOG_1X_BASE_C) +#define LOG_PS_LAST_C (25 + LOG_PS_RESERVED_C) + +#define LOG_PS_STAT_IP_C (0x335 + LOG_1X_BASE_C) +#define LOG_PS_STAT_GLOBAL_IPV4_C (0x335 + LOG_1X_BASE_C) +#define LOG_PS_STAT_GLOBAL_IPV6_C (0x336 + LOG_1X_BASE_C) +#define LOG_PS_STAT_GLOBAL_ICMPV4_C (0x337 + LOG_1X_BASE_C) +#define LOG_PS_STAT_GLOBAL_ICMPV6_C (0x338 + LOG_1X_BASE_C) +#define LOG_PS_STAT_GLOBAL_TCP_C (0x339 + LOG_1X_BASE_C) +#define LOG_PS_STAT_GLOBAL_UDP_C (0x33A + LOG_1X_BASE_C) + +/* Protocol Services describe all TCP instances */ +#define LOG_PS_STAT_DESC_ALL_TCP_INST_C (0x33B + LOG_1X_BASE_C) + +/* Protocol Services describe all memory pool instances */ +#define LOG_PS_STAT_DESC_ALL_MEM_POOL_INST_C (0x33C + LOG_1X_BASE_C) + +/* Protocol Services describe all IFACE instances */ +#define LOG_PS_STAT_DESC_ALL_IFACE_INST_C (0x33D + LOG_1X_BASE_C) + +/* Protocol Services describe all PPP instances */ +#define LOG_PS_STAT_DESC_ALL_PPP_INST_C (0x33E + LOG_1X_BASE_C) + +/* Protocol Services describe all ARP instances */ +#define LOG_PS_STAT_DESC_ALL_ARP_INST_C (0x33F + LOG_1X_BASE_C) + +/* Protocol Services describe delta instance */ +#define LOG_PS_STAT_DESC_DELTA_INST_C (0x340 + LOG_1X_BASE_C) + +/* Protocol Services instance TCP statistics */ +#define LOG_PS_STAT_TCP_INST_C (0x341 + LOG_1X_BASE_C) + +/* Protocol Services instance UDP statistics */ +#define LOG_PS_STAT_UDP_INST_C (0x342 + LOG_1X_BASE_C) + +/* Protocol Services instance PPP statistics */ +#define LOG_PS_STAT_PPP_INST_C (0x343 + LOG_1X_BASE_C) + +/* Protocol Services instance IFACE statistics */ +#define LOG_PS_STAT_IFACE_INST_C (0x344 + LOG_1X_BASE_C) + +/* Protocol Services instance memory statistics */ +#define LOG_PS_STAT_MEM_INST_C (0x345 + LOG_1X_BASE_C) + +/* Protocol Services instance flow statistics */ +#define LOG_PS_STAT_FLOW_INST_C (0x346 + LOG_1X_BASE_C) + +/* Protocol Services instance physical link statistics */ +#define LOG_PS_STAT_PHYS_LINK_INST_C (0x347 + LOG_1X_BASE_C) + +/* Protocol Services instance ARP statistics */ +#define LOG_PS_STAT_ARP_INST_C (0x348 + LOG_1X_BASE_C) + +/* Protocol Services instance LLC statistics */ +#define LOG_PS_STAT_LLC_INST_C (0x349 + LOG_1X_BASE_C) + +/* Protocol Services instance IPHC statistics */ +#define LOG_PS_STAT_IPHC_INST_C (0x34A + LOG_1X_BASE_C) + +/* Protocol Services instance ROHC statistics */ +#define LOG_PS_STAT_ROHC_INST_C (0x34B + LOG_1X_BASE_C) + +/* Protocol Services instance RSVP statistics */ +#define LOG_PS_STAT_RSVP_INST_C (0x34C + LOG_1X_BASE_C) + +/* Protocol Services describe all LLC instances */ +#define LOG_PS_STAT_DESC_ALL_LLC_INST_C (0x34D + LOG_1X_BASE_C) + +/* Protocol Services describe all RSVP instances */ +#define LOG_PS_STAT_DESC_ALL_RSVP_INST_C (0x34E + LOG_1X_BASE_C) + +/* Call Manager Serving System event log */ +#define LOG_CM_SS_EVENT_C (0x34F + LOG_1X_BASE_C) + +/* VcTcxo manager’s automatic frequency control log */ +#define LOG_TCXOMGR_AFC_DATA_C (0x350 + LOG_1X_BASE_C) + +/* Clock transactions and general clocks status log */ +#define LOG_CLOCK_C (0x351 + LOG_1X_BASE_C) + +/* GPS search processed peak results and their associated search parameters */ +#define LOG_GPS_PROCESSED_PEAK_C (0x352 + LOG_1X_BASE_C) + +#define LOG_MDSP_LOG_CHUNKS_C (0x353 + LOG_1X_BASE_C) + +/* Periodic RSSI update log */ +#define LOG_WLAN_RSSI_UPDATE_C (0x354 + LOG_1X_BASE_C) + +/* Periodic Link Layer statistics log */ +#define LOG_WLAN_LL_STAT_C (0x355 + LOG_1X_BASE_C) + +/* QOS Extended State Log */ +#define LOG_QOS_STATE_EX_C (0x356 + LOG_1X_BASE_C) + +/* Bluetooth host HCI transmitted data */ +#define LOG_BT_HOST_HCI_TX_C (0x357 + LOG_1X_BASE_C) + +/* Bluetooth host HCI received data */ +#define LOG_BT_HOST_HCI_RX_C (0x358 + LOG_1X_BASE_C) + +/* Internal - GPS PE Position Report Part 3 */ +#define LOG_GPS_PE_POSITION_REPORT_PART3_C (0x359 + LOG_1X_BASE_C) + +/* Extended log code which logs requested QoS */ +#define LOG_QOS_REQUESTED_EX_C (0x35A + LOG_1X_BASE_C) + +/* Extended log code which logs granted QoS */ +#define LOG_QOS_GRANTED_EX_C (0x35B + LOG_1X_BASE_C) + +/* Extended log code which logs modified QoS */ +#define LOG_QOS_MODIFIED_EX_C (0x35C + LOG_1X_BASE_C) + +/* Bus Monitor Profiling Info */ +#define LOG_BUS_MON_PROF_INFO_C (0x35D + LOG_1X_BASE_C) + +/* Pilot Phase Measurement Search results */ +#define LOG_SRCH_PPM_RES_VER_2_C (0x35E + LOG_1X_BASE_C) + +/* Pilot Phase Measurement Data Base */ +#define LOG_SRCH_PPM_DB_VER_2_C (0x35F + LOG_1X_BASE_C) + +/* Pilot Phase Measurement state machine */ +#define LOG_PPM_SM_C (0x360 + LOG_1X_BASE_C) + +/* Robust Header Compression - Compressor */ +#define LOG_ROHC_COMPRESSOR_C (0x361 + LOG_1X_BASE_C) + +/* Robust Header Compression - Decompressor */ +#define LOG_ROHC_DECOMPRESSOR_C (0x362 + LOG_1X_BASE_C) + +/* Robust Header Compression - Feedback Compressor */ +#define LOG_ROHC_FEEDBACK_COMPRESSOR_C (0x363 + LOG_1X_BASE_C) + +/* Robust Header Compression - Feedback Decompressor */ +#define LOG_ROHC_FEEDBACK_DECOMPRESSOR_C (0x364 + LOG_1X_BASE_C) + +/* Bluetooth HCI commands */ +#define LOG_BT_HCI_CMD_C (0x365 + LOG_1X_BASE_C) + +/* Bluetooth HCI events */ +#define LOG_BT_HCI_EV_C (0x366 + LOG_1X_BASE_C) + +/* Bluetooth HCI Transmitted ACL data */ +#define LOG_BT_HCI_TX_ACL_C (0x367 + LOG_1X_BASE_C) + +/* Bluetooth HCI Received ACL data */ +#define LOG_BT_HCI_RX_ACL_C (0x368 + LOG_1X_BASE_C) + +/* Bluetooth SOC H4 Deep Sleep */ +#define LOG_BT_SOC_H4DS_C (0x369 + LOG_1X_BASE_C) + +/* UMTS to CDMA Handover Message */ +#define LOG_UMTS_TO_CDMA_HANDOVER_MSG_C (0x36A + LOG_1X_BASE_C) + +/* Graphic Event Data */ +#define LOG_PROFILER_GRAPHIC_DATA_C (0x36B + LOG_1X_BASE_C) + +/* Audio Event Data */ +#define LOG_PROFILER_AUDIO_DATA_C (0x36C + LOG_1X_BASE_C) + +/* GPS Spectral Information */ +#define LOG_GPS_SPECTRAL_INFO_C (0x36D + LOG_1X_BASE_C) + +/* AHB Performance Monitor LOG data */ +#define LOG_APM_C (0x36E + LOG_1X_BASE_C) + +/* GPS Clock Report */ +#define LOG_CONVERGED_GPS_CLOCK_REPORT_C (0x36F + LOG_1X_BASE_C) + +/* GPS Position Report */ +#define LOG_CONVERGED_GPS_POSITION_REPORT_C (0x370 + LOG_1X_BASE_C) + +/* GPS Measurement Report */ +#define LOG_CONVERGED_GPS_MEASUREMENT_REPORT_C (0x371 + LOG_1X_BASE_C) + +/* GPS RF Status Report */ +#define LOG_CONVERGED_GPS_RF_STATUS_REPORT_C (0x372 + LOG_1X_BASE_C) + +/* VOIP To CDMA Handover Message - Obsoleted by 0x138B - 0x138D */ +#define LOG_VOIP_TO_CDMA_HANDOVER_MSG_C (0x373 + LOG_1X_BASE_C) + +/* GPS Prescribed Dwell Result */ +#define LOG_GPS_PRESCRIBED_DWELL_RESULT_C (0x374 + LOG_1X_BASE_C) + +/* CGPS IPC Data */ +#define LOG_CGPS_IPC_DATA_C (0x375 + LOG_1X_BASE_C) + +/* CGPS Non IPC Data */ +#define LOG_CGPS_NON_IPC_DATA_C (0x376 + LOG_1X_BASE_C) + +/* CGPS Session Report */ +#define LOG_CGPS_REP_EVT_LOG_PACKET_C (0x377 + LOG_1X_BASE_C) + +/* CGPS PDSM Get Position */ +#define LOG_CGPS_PDSM_GET_POSITION_C (0x378 + LOG_1X_BASE_C) + +/* CGPS PDSM Set Parameters */ +#define LOG_CGPS_PDSM_SET_PARAMETERS_C (0x379 + LOG_1X_BASE_C) + +/* CGPS PDSM End Session */ +#define LOG_CGPS_PDSM_END_SESSION_C (0x37A + LOG_1X_BASE_C) + +/* CGPS PDSM notify Verify Response */ +#define LOG_CGPS_PDSM_NOTIFY_VERIFY_RESP_C (0x37B + LOG_1X_BASE_C) + +/* CGPS PDSM Position Report Callback */ +#define LOG_CGPS_PDSM_POSITION_REPORT_CALLBACK_C (0x37C + LOG_1X_BASE_C) + +/* CGPS PDSM PD Event Callback */ +#define LOG_CGPS_PDSM_PD_EVENT_CALLBACK_C (0x37D + LOG_1X_BASE_C) + +/* CGPS PDSM PA Event Callback */ +#define LOG_CGPS_PDSM_PA_EVENT_CALLBACK_C (0x37E + LOG_1X_BASE_C) + +/* CGPS PDSM notify Verify Request Callback */ +#define LOG_CGPS_PDSM_NOTIFY_VERIFY_REQUEST_C (0x37F + LOG_1X_BASE_C) + +/* CGPS PDSM PD Command Error Callback */ +#define LOG_CGPS_PDSM_PD_CMD_ERR_CALLBACK_C (0x380 + LOG_1X_BASE_C) + +/* CGPS PDSM PA Command Error Callback */ +#define LOG_CGPS_PDSM_PA_CMD_ERR_CALLBACK_C (0x381 + LOG_1X_BASE_C) + +/* CGPS PDSM Position Error */ +#define LOG_CGPS_PDSM_POS_ERROR_C (0x382 + LOG_1X_BASE_C) + +/* CGPS PDSM Extended Status Position Report */ +#define LOG_CGPS_PDSM_EXT_STATUS_POS_REPORT_C (0x383 + LOG_1X_BASE_C) + +/* CGPS PDSM Extended Status NMEA Report */ +#define LOG_CGPS_PDSM_EXT_STATUS_NMEA_REPORT_C (0x384 + LOG_1X_BASE_C) + +/* CGPS PDSM Extended Status Measurement Report */ +#define LOG_CGPS_PDSM_EXT_STATUS_MEAS_REPORT_C (0x385 + LOG_1X_BASE_C) + +/* CGPS Report Server TX Packet */ +#define LOG_CGPS_REP_SVR_TX_LOG_PACKET_C (0x386 + LOG_1X_BASE_C) + +/* CGPS Report Server RX Packet */ +#define LOG_CGPS_REP_SVR_RX_LOG_PACKET_C (0x387 + LOG_1X_BASE_C) + +/* UMTS To CDMA Handover Paging Channel Message */ +#define LOG_UMTS_TO_CDMA_HANDOVER_PCH_MSG_C (0x388 + LOG_1X_BASE_C) + +/* UMTS To CDMA Handover Traffic Channel Message */ +#define LOG_UMTS_TO_CDMA_HANDOVER_TCH_MSG_C (0x389 + LOG_1X_BASE_C) + +/* Converged GPS IQ Report */ +#define LOG_CONVERGED_GPS_IQ_REPORT_C (0x38A + LOG_1X_BASE_C) + +/* VOIP To CDMA Paging Channel Handover Message */ +#define LOG_VOIP_TO_CDMA_PCH_HANDOVER_MSG_C (0x38B + LOG_1X_BASE_C) + +/* VOIP To CDMA Access Channel Handover Message */ +#define LOG_VOIP_TO_CDMA_ACH_HANDOVER_MSG_C (0x38C + LOG_1X_BASE_C) + +/* VOIP To CDMA Forward Traffic Channel Handover Message */ +#define LOG_VOIP_TO_CDMA_FTC_HANDOVER_MSG_C (0x38D + LOG_1X_BASE_C) + +/* QMI reserved logs */ +#define LOG_QMI_RESERVED_CODES_BASE_C (0x38E + LOG_1X_BASE_C) +#define LOG_QMI_LAST_C (32 + LOG_QMI_RESERVED_CODES_BASE_C) + +/* QOS Info Code Update Log */ +#define LOG_QOS_INFO_CODE_UPDATE_C (0x3AF + LOG_1X_BASE_C) + +/* Transmit(Uplink) Vocoder PCM Packet Log */ +#define LOG_TX_PCM_PACKET_C (0x3B0 + LOG_1X_BASE_C) + +/* Audio Vocoder Data Paths */ +#define LOG_AUDVOC_DATA_PATHS_PACKET_C (0x3B0 + LOG_1X_BASE_C) + +/* Receive(Downlink) Vocoder PCM Packet Log */ +#define LOG_RX_PCM_PACKET_C (0x3B1 + LOG_1X_BASE_C) + +/* CRC of YUV frame log */ +#define LOG_DEC_CRC_FRAME_C (0x3B2 + LOG_1X_BASE_C) + +/* FLUTE Session Information */ +#define LOG_FLUTE_SESSION_INFO_C (0x3B3 + LOG_1X_BASE_C) + +/* FLUTE ADP File Information */ +#define LOG_FLUTE_ADP_FILE_INFO_C (0x3B4 + LOG_1X_BASE_C) + +/* FLUTE File Request Information */ +#define LOG_FLUTE_FILE_REQ_INFO_C (0x3B5 + LOG_1X_BASE_C) + +/* FLUTE FDT Instance Information */ +#define LOG_FLUTE_FDT_INST_C (0x3B6 + LOG_1X_BASE_C) + +/* FLUTE FDT Information */ +#define LOG_FLUTE_FDT_INFO_C (0x3B7 + LOG_1X_BASE_C) + +/* FLUTE File Log Packet Information */ +#define LOG_FLUTE_FILE_INFO_C (0x3B8 + LOG_1X_BASE_C) + +/* 3G 1X Parameter Overhead Information */ +#define LOG_VOIP_TO_CDMA_3G1X_PARAMETERS_C (0x3B9 + LOG_1X_BASE_C) + +/* CGPS ME Job Info */ +#define LOG_CGPS_ME_JOB_INFO_C (0x3BA + LOG_1X_BASE_C) + +/* CGPS ME SV Lists */ +#define LOG_CPGS_ME_SV_LISTS_C (0x3BB + LOG_1X_BASE_C) + +/* Flexible Profiling Status */ +#define LOG_PROFDIAG_GEN_STATUS_C (0x3BC + LOG_1X_BASE_C) + +/* Flexible Profiling Results */ +#define LOG_PROFDIAG_GEN_PROF_C (0x3BD + LOG_1X_BASE_C) + +/* FLUTE ADP File Content Log Packet Information */ +#define LOG_FLUTE_ADP_FILE_C (0x3BE + LOG_1X_BASE_C) + +/* FLUTE FDT Instance File Content Log Packet Information */ +#define LOG_FLUTE_FDT_INST_FILE_C (0x3BF + LOG_1X_BASE_C) + +/* FLUTE FDT Entries Information */ +#define LOG_FLUTE_FDT_ENTRIES_INFO_C (0x3C0 + LOG_1X_BASE_C) + +/* FLUTE File Contents Log Packet Information */ +#define LOG_FLUTE_FILE_C (0x3C1 + LOG_1X_BASE_C) + +/* CGPS ME Time-Transfer Info */ +#define LOG_CGPS_ME_TIME_TRANSFER_INFO_C (0x3C2 + LOG_1X_BASE_C) + +/* CGPS ME UMTS Time-Tagging Info */ +#define LOG_CGPS_ME_UMTS_TIME_TAGGING_INFO_C (0x3C3 + LOG_1X_BASE_C) + +/* CGPS ME Generic Time Estimate Put lnfo */ +#define LOG_CGPS_ME_TIME_EST_PUT_INFO_C (0x3C4 + LOG_1X_BASE_C) + +/* CGPS ME Generic Freq Estimate Put lnfo */ +#define LOG_CGPS_ME_FREQ_EST_PUT_INFO_C (0x3C5 + LOG_1X_BASE_C) + +/* CGPS Slow Clock Report */ +#define LOG_CGPS_SLOW_CLOCK_REPORT_C (0x3C6 + LOG_1X_BASE_C) + +/* Converged GPS Medium Grid */ +#define LOG_CONVERGED_GPS_MEDIUM_GRID_C (0x3C7 + LOG_1X_BASE_C) + +/* Static information about the driver or device */ +#define LOG_SNSD_INFO_C (0x3C8 + LOG_1X_BASE_C) + +/* Dynamic state information about the device or driver */ +#define LOG_SNSD_STATE_C (0x3C9 + LOG_1X_BASE_C) + +/* Data from a driver */ +#define LOG_SNSD_DATA (0x3CA + LOG_1X_BASE_C) +#define LOG_SNSD_DATA_C (0x3CA + LOG_1X_BASE_C) + +/* CGPS Cell DB Cell Change Info */ +#define LOG_CGPS_CELLDB_CELL_CHANGE_INFO_C (0x3CB + LOG_1X_BASE_C) + +/* xScalar YUV frame log */ +#define LOG_DEC_XSCALE_YUV_FRAME_C (0x3CC + LOG_1X_BASE_C) + +/* CRC of xScaled YUV frame log */ +#define LOG_DEC_XSCALE_CRC_FRAME_C (0x3CD + LOG_1X_BASE_C) + +/* CGPS Frequency Estimate Report */ +#define LOG_CGPS_FREQ_EST_REPORT_C (0x3CE + LOG_1X_BASE_C) + +/* GPS DCME Srch Job Completed */ +#define LOG_GPS_DCME_SRCH_JOB_COMPLETED_C (0x3CF + LOG_1X_BASE_C) + +/* CGPS ME Fastscan results */ +#define LOG_CGPS_ME_FASTSCAN_RESULTS_C (0x3D0 + LOG_1X_BASE_C) + +/* XO frequency Estimation log */ +#define LOG_XO_FREQ_EST_C (0x3D1 + LOG_1X_BASE_C) + +/* Tcxomgr field calibration data */ +#define LOG_TCXOMGR_FIELD_CAL_C (0x3D2 + LOG_1X_BASE_C) + +/* UMB Call Processing Connection Attempt */ +#define LOG_UMBCP_CONNECTION_ATTEMPT_C (0x3D3 + LOG_1X_BASE_C) + +/* UMB Call Processing Connection Release */ +#define LOG_UMBCP_CONNECTION_RELEASE_C (0x3D4 + LOG_1X_BASE_C) + +/* UMB Call Processing Page Message */ +#define LOG_UMBCP_PAGE_MESSAGE_C (0x3D5 + LOG_1X_BASE_C) + +/* UMB Call Processing OVHD Information */ +#define LOG_UMBCP_OVHD_INFO_C (0x3D6 + LOG_1X_BASE_C) + +/* UMB Call Processing Session Attempt */ +#define LOG_UMBCP_SESSION_ATTEMPT_C (0x3D7 + LOG_1X_BASE_C) + +/* UMB Call Processing Route Information */ +#define LOG_UMBCP_ROUTE_INFO_C (0x3D8 + LOG_1X_BASE_C) + +/* UMB Call Processing State Information */ +#define LOG_UMBCP_STATE_INFO_C (0x3D9 + LOG_1X_BASE_C) + +/* UMB Call Processing SNP */ +#define LOG_UMBCP_SNP_C (0x3DA + LOG_1X_BASE_C) + +/* CGPS Session Early Exit Decision */ +#define LOG_CGPS_SESSION_EARLY_EXIT_DECISION_C (0x3DB + LOG_1X_BASE_C) + +/* GPS RF Linearity Status */ +#define LOG_CGPS_ME_RF_LINEARITY_INFO_C (0x3DC + LOG_1X_BASE_C) + +/* CGPS ME 5ms IQ Sums */ +#define LOG_CGPS_ME_5MS_IQ_SUMS_C (0x3DD + LOG_1X_BASE_C) + +/* CGPS ME 20ms IQ Sums */ +#define LOG_CPGS_ME_20MS_IQ_SUMS_C (0x3DE + LOG_1X_BASE_C) + +/* ROHC Compressor Statistics */ +#define LOG_ROHC_COMPRESSOR_STATS_C (0x3DF + LOG_1X_BASE_C) + +/* ROHC Decompressor Statistics */ +#define LOG_ROHC_DECOMPRESSOR_STATS_C (0x3E0 + LOG_1X_BASE_C) + +/* Sensors - Kalman filter information */ +#define LOG_SENSOR_KF_INFO_C (0x3E1 + LOG_1X_BASE_C) + +/* Sensors - Integrated measurements */ +#define LOG_SENSOR_INT_MEAS_C (0x3E2 + LOG_1X_BASE_C) + +/* Sensors - Bias calibration values */ +#define LOG_SENSOR_BIAS_CALIBRATION_C (0x3E3 + LOG_1X_BASE_C) + +/* Log codes 0x13E4-0x13E7 are not following standard log naming convention */ + +/* DTV ISDB-T Transport Stream Packets */ +#define LOG_DTV_ISDB_TS_PACKETS (0x3E4 + LOG_1X_BASE_C) + +/* DTV ISDB-T PES Packets */ +#define LOG_DTV_ISDB_PES_PACKETS (0x3E5 + LOG_1X_BASE_C) + +/* DTV ISDB-T Sections */ +#define LOG_DTV_ISDB_SECTIONS (0x3E6 + LOG_1X_BASE_C) + +/* DTV ISDB-T Buffering */ +#define LOG_DTV_ISDB_BUFFERING (0x3E7 + LOG_1X_BASE_C) + +/* WLAN System Acquisition and Handoff */ +#define LOG_WLAN_SYS_ACQ_HO_C (0x3E8 + LOG_1X_BASE_C) + +/* WLAN General Configurable Parameters */ +#define LOG_WLAN_GEN_CONFIG_PARAMS_C (0x3E9 + LOG_1X_BASE_C) + +/* UMB Physical Layer Channel and Interference Estimation */ +#define LOG_UMB_PHY_RX_DPICH_CIE_C (0x3EA + LOG_1X_BASE_C) + +/* UMB Physical Layer MMSE/MRC Demodulated Data Symbols (Low) */ +#define LOG_UMB_PHY_RX_DATA_DEMOD_LOW_C (0x3EB + LOG_1X_BASE_C) + +/* UMB Physical Layer MMSE/MRC Demodulated Data Symbols (High) */ +#define LOG_UMB_PHY_RX_DATA_DEMOD_HIGH_C (0x3EC + LOG_1X_BASE_C) + +/* UMB Physical Layer DCH Decoder */ +#define LOG_UMB_PHY_RX_DCH_DECODER_C (0x3ED + LOG_1X_BASE_C) + +/* UMB Physical Layer DCH Statistics */ +#define LOG_UMB_PHY_DCH_STATISTICS_C (0x3EE + LOG_1X_BASE_C) + +/* UMB Physical Layer CqiPich Processing */ +#define LOG_UMB_PHY_RX_CQIPICH_C (0x3EF + LOG_1X_BASE_C) + +/* UMB Physical Layer MIMO/SIMO in CqiPich (High) */ +#define LOG_UMB_PHY_RX_CQIPICH_CHANTAPS_HIGH_C (0x3F0 + LOG_1X_BASE_C) + +/* UMB Physical Layer MIMO/SIMO in CquiPich (Low) */ +#define LOG_UMB_PHY_RX_CQIPICH_CHANTAPS_LOW_C (0x3F1 + LOG_1X_BASE_C) + +/* UMB Physical Layer Time-Domain Channel Taps (High) */ +#define LOG_UMB_PHY_RX_PPICH_CHAN_EST_HIGH_C (0x3F2 + LOG_1X_BASE_C) + +/* UMB Physical Layer Time-Domain Channel Taps (Low) */ +#define LOG_UMB_PHY_RX_PPICH_CHAN_EST_LOW_C (0x3F3 + LOG_1X_BASE_C) + +/* UMB Physical Layer AT Modulator */ +#define LOG_UMB_PHY_TX_PICH_CONFIG_C (0x3F4 + LOG_1X_BASE_C) + +/* UMB Physical Layer AT Modulator for R-ACK (High) */ +#define LOG_UMB_PHY_TX_ACK_HIGH_C (0x3F5 + LOG_1X_BASE_C) + +/* UMB Physical Layer AT Modulator for R-ACK (Low) */ +#define LOG_UMB_PHY_TX_ACK_LOW_C (0x3F6 + LOG_1X_BASE_C) + +/* UMB Physical Layer AT Modulator for R-PICH */ +#define LOG_UMB_PHY_TX_PICH_C (0x3F7 + LOG_1X_BASE_C) + +/* UMB Physical Layer AT Modulator for R-ACH (Access) */ +#define LOG_UMB_PHY_TX_ACH_C (0x3F8 + LOG_1X_BASE_C) + +/* UMB Physical Layer AT Modulator for R-ODDCCH (High) */ +#define LOG_UMB_PHY_TX_ODCCH_HIGH_C (0x3F9 + LOG_1X_BASE_C) + +/* UMB Physical Layer AT Modulator for R-ODDCCH (Low) */ +#define LOG_UMB_PHY_TX_ODCCH_LOW_C (0x3FA + LOG_1X_BASE_C) + +/* UMB Physical Layer AT Modulator for R-CDCCH */ +#define LOG_UMB_PHY_TX_RCDCCH_CONFIG_C (0x3FB + LOG_1X_BASE_C) + +/* UMB Physical Layer AT Modulator for CQI sent on RCDCCH */ +#define LOG_UMB_PHY_TX_NONFLSS_CQICH_C (0x3FC + LOG_1X_BASE_C) + +/* UMB Physical Layer AT Modulator for CQI sent on RCDCCH */ +#define LOG_UMB_PHY_TX_FLSS_CQICH_C (0x3FD + LOG_1X_BASE_C) + +/* UMB Physical Layer AT Modulator for PACH sent on RCDCCH */ +#define LOG_UMB_PHY_TX_PAHCH_C (0x3FE + LOG_1X_BASE_C) + +/* UMB Physical Layer AT Modulator for REQ sent on RCDCCH */ +#define LOG_UMB_PHY_TX_REQCH_C (0x3FF + LOG_1X_BASE_C) + +/* UMB Physical Layer AT Modulator for PSD sent on RCDCCH */ +#define LOG_UMB_PHY_TX_PSDCH_C (0x400 + LOG_1X_BASE_C) + +/* UMB Physical Layer AT Modulator for R-DCH */ +#define LOG_UMB_PHY_TX_DCH_C (0x401 + LOG_1X_BASE_C) + +/* UMB Physical Layer Time/Frequency/RxPower Estimate */ +#define LOG_UMB_PHY_RX_TIME_FREQ_POWER_ESTIMATE_C (0x402 + LOG_1X_BASE_C) + +/* UMB Physical Layer FLCS Processing */ +#define LOG_UMB_PHY_RX_FLCS_PROCESSING_C (0x403 + LOG_1X_BASE_C) + +/* UMB Physical Layer PBCCH Processing */ +#define LOG_UMB_PHY_RX_PBCCH_PROCESSING_C (0x404 + LOG_1X_BASE_C) + +/* UMB Physical Layer SBCCH Processing */ +#define LOG_UMB_PHY_RX_SBCCH_PROCESSING_C (0x405 + LOG_1X_BASE_C) + +/* UMB Physical Layer QPCH Processing */ +#define LOG_UMB_PHY_RX_QPCH_PROCESSING_C (0x406 + LOG_1X_BASE_C) + +/* UMB Physical Layer MRC Demodulated Data Symbols (Preamble SBCCH/QPCH) */ +#define LOG_UMB_PHY_RX_SBCCH_DEMOD_C (0x407 + LOG_1X_BASE_C) + +/* UMB Physical Layer MRC Demodulated Data Symbols (Preamble PBCCH) */ +#define LOG_UMB_PHY_RX_PBCCH_DEMOD_C (0x408 + LOG_1X_BASE_C) + +/* UMB Physical Layer VCQI */ +#define LOG_UMB_PHY_RX_VCQI_C (0x409 + LOG_1X_BASE_C) + +/* UMB Physical Layer Acquisition Algorithm */ +#define LOG_UMB_PHY_RX_INITIAL_ACQUISITION_C (0x40A + LOG_1X_BASE_C) + +/* UMB Physical Layer Handoff Search Algorithm */ +#define LOG_UMB_PHY_RX_HANDOFF_SEARCH_C (0x40B + LOG_1X_BASE_C) + +/* UMB RF RFFE Configuration Info */ +#define LOG_UMB_AT_RFFE_CONFG_C (0x40C + LOG_1X_BASE_C) + +/* UMB RF Calibrated Values After Powerup */ +#define LOG_UMB_AT_RFFE_RX_CALIB_C (0x40D + LOG_1X_BASE_C) + +/* UMB RF AGC Block in Acquisition Mode */ +#define LOG_UMB_AT_RFFE_RX_ACQ_C (0x40E + LOG_1X_BASE_C) + +/* UMB RF AGC Block in Idle Mode */ +#define LOG_UMB_AT_RFFE_RX_IDLE_C (0x40F + LOG_1X_BASE_C) + +/* UMB RF AGC Block in Connected Mode */ +#define LOG_UMB_AT_RFFE_RX_CONNECTED_C (0x410 + LOG_1X_BASE_C) + +/* UMB RF AGC Block in Connected Mode (FTM) */ +#define LOG_UMB_AT_RFFE_RX_CONNECTED_FTM_C (0x411 + LOG_1X_BASE_C) + +/* UMB RF Jammer Detector Functionality */ +#define LOG_UMB_AT_RFFE_RX_JAMMER_DETECTOR_FUNCTIONALITY_C (0x412 + LOG_1X_BASE_C) + +/* UMB RF Jammer Detector Response */ +#define LOG_UMB_AT_RFFE_RX_JAMMER_DETECTOR_RESPONSE_C (0x413 + LOG_1X_BASE_C) + +/* UMB RF RFFE TX Power Control */ +#define LOG_UMB_AT_RFFE_TX_BETA_SCALING_C (0x414 + LOG_1X_BASE_C) + +/* UMB Searcher Dump */ +#define LOG_UMB_SEARCHER_DUMP_C (0x415 + LOG_1X_BASE_C) + +/* UMB System Acquire */ +#define LOG_UMB_SYSTEM_ACQUIRE_C (0x416 + LOG_1X_BASE_C) + +/* UMB Set Maintenance */ +#define LOG_UMB_SET_MAINTENANCE_C (0x417 + LOG_1X_BASE_C) + +/* UMB QPCH */ +#define LOG_UMB_QPCH_C (0x418 + LOG_1X_BASE_C) + +/* UMB RLL Forward Partial RP Packet */ +#define LOG_UMB_RLL_FORWARD_PARTIAL_RP_C (0x419 + LOG_1X_BASE_C) + +/* UMB RLL Reverse Partial RP Packet */ +#define LOG_UMB_RLL_REVERSE_PARTIAL_RP_C (0x41A + LOG_1X_BASE_C) + +/* UMB RLL Forward Signal Packet */ +#define LOG_UMB_RLL_FORWARD_SIGNAL_C (0x41B + LOG_1X_BASE_C) + +/* UMB RLL Reverse Signal Packet */ +#define LOG_UMB_RLL_REVERSE_SIGNAL_C (0x41C + LOG_1X_BASE_C) + +/* UMB RLL Forward Statistics */ +#define LOG_UMB_RLL_FORWARD_STATS_C (0x41D + LOG_1X_BASE_C) + +/* UMB RLL Reverse Statistics */ +#define LOG_UMB_RLL_REVERSE_STATS_C (0x41E + LOG_1X_BASE_C) + +/* UMB RLL IRTP */ +#define LOG_UMB_RLL_IRTP_C (0x41F + LOG_1X_BASE_C) + +/* UMB AP Forward Link MAC Packets */ +#define LOG_UMB_AP_FL_MAC_PACKET_C (0x420 + LOG_1X_BASE_C) + +/* UMB AP Reverse Link MAC Packets */ +#define LOG_UMB_AP_RL_MAC_PACKET_C (0x421 + LOG_1X_BASE_C) + +/* GPS Performance Statistics log */ +#define LOG_CGPS_PERFORMANCE_STATS_C (0x422 + LOG_1X_BASE_C) + +/* UMB Searcher General Status */ +#define LOG_UMB_SRCH_GENERAL_STATUS_C (0x423 + LOG_1X_BASE_C) + +/* UMB Superframe Scheduler */ +#define LOG_UMB_SUPERFRAME_SCHEDULER_C (0x424 + LOG_1X_BASE_C) + +/* UMB Sector List */ +#define LOG_UMB_SECTOR_LIST_C (0x425 + LOG_1X_BASE_C) + +/* UMB MAC Access Attempt Command */ +#define LOG_UMB_MAC_ACCESS_ATTEMPT_CMD_C (0x426 + LOG_1X_BASE_C) + +/* UMB MAC Access Probe Information */ +#define LOG_UMB_MAC_ACCESS_PROBE_INFO_C (0x427 + LOG_1X_BASE_C) + +/* UMB MAC RTCMAC Package Information */ +#define LOG_UMB_MAC_RTCMAC_PKG_INFO_C (0x428 + LOG_1X_BASE_C) + +/* UMB MAC Super Frame Information */ +#define LOG_UMB_MAC_SI_INFO_C (0x429 + LOG_1X_BASE_C) + +/* UMB MAC Quick Channel Information */ +#define LOG_UMB_MAC_QCI_INFO_C (0x42A + LOG_1X_BASE_C) + +/* UMB MAC Paging Id List */ +#define LOG_UMB_MAC_PAGING_ID_LIST_C (0x42B + LOG_1X_BASE_C) + +/* UMB MAC Quick Paging Channel Information */ +#define LOG_UMB_MAC_QPCH_INFO_C (0x42C + LOG_1X_BASE_C) + +/* UMB MAC FTCMAC Information */ +#define LOG_UMB_MAC_FTCMAC_PKG_INFO_C (0x42D + LOG_1X_BASE_C) + +/* UMB MAC Access Grant Receiving */ +#define LOG_UMB_MAC_ACCESS_GRANT_C (0x42E + LOG_1X_BASE_C) + +/* UMB MAC Generic Debug Log */ +#define LOG_UMB_MAC_GEN_DEBUG_LOG_PKG_C (0x42F + LOG_1X_BASE_C) + +/* CGPS Frequency Bias Estimate */ +#define LOG_CGPS_MC_FREQ_BIAS_EST_C (0x430 + LOG_1X_BASE_C) + +/* UMB MAC Request Report Information Log */ +#define LOG_UMB_MAC_REQCH_REPORT_INFO_C (0x431 + LOG_1X_BASE_C) + +/* UMB MAC Reverse Link QoS Token Bucket Information Log */ +#define LOG_UMB_MAC_RLQOS_TOKEN_BUCKET_INFO_C (0x432 + LOG_1X_BASE_C) + +/* UMB MAC Reverse Link QoS Stream Information Log */ +#define LOG_UMB_MAC_RLQOS_STREAM_INFO_C (0x433 + LOG_1X_BASE_C) + +/* UMB MAC Reverse Link QoS Allotment Information Log */ +#define LOG_UMB_MAC_RLQOS_ALLOTMENT_INFO_C (0x434 + LOG_1X_BASE_C) + +/* UMB Searcher Recent State Machine Transactions */ +#define LOG_UMB_SRCH_STM_ACTIVITY_C (0x435 + LOG_1X_BASE_C) + +/* Performance Counters on ARM11 Profiling Information */ +#define LOG_ARM11_PERF_CNT_INFO_C (0x436 + LOG_1X_BASE_C) + +/* Protocol Services describe all flow instances */ +#define LOG_PS_STAT_DESC_ALL_FLOW_INST_C (0x437 + LOG_1X_BASE_C) + +/* Protocol Services describe all physical link instances */ +#define LOG_PS_STAT_DESC_ALL_PHYS_LINK_INST_C (0x438 + LOG_1X_BASE_C) + +/* Protocol Services describe all UDP instances */ +#define LOG_PS_STAT_DESC_ALL_UDP_INST_C (0x439 + LOG_1X_BASE_C) + +/* Searcher 4 Multi-Carrier HDR */ +#define LOG_SRCH4_MC_HDR_C (0x43A + LOG_1X_BASE_C) + +/* Protocol Services describe all IPHC instances */ +#define LOG_PS_STAT_DESC_ALL_IPHC_INST_C (0x43B + LOG_1X_BASE_C) + +/* Protocol Services describe all ROHC instances */ +#define LOG_PS_STAT_DESC_ALL_ROHC_INST_C (0x43C + LOG_1X_BASE_C) + +/* BCast security add program information */ +#define LOG_BCAST_SEC_ADD_PROGRAM_INFO_C (0x43D + LOG_1X_BASE_C) + +/* BCast security add program complete */ +#define LOG_BCAST_SEC_ADD_PROGRAM_COMPLETE_C (0x43E + LOG_1X_BASE_C) + +/* BCast security SDP parse */ +#define LOG_BCAST_SEC_SDP_PARSE_C (0x43F + LOG_1X_BASE_C) + +/* CGPS ME dynamic power optimization status */ +#define LOG_CGPS_ME_DPO_STATUS_C (0x440 + LOG_1X_BASE_C) + +/* CGPS PDSM on demand session start */ +#define LOG_CGPS_PDSM_ON_DEMAND_SESSION_START_C (0x441 + LOG_1X_BASE_C) + +/* CGPS PDSM on demand session stop */ +#define LOG_CGPS_PDSM_ON_DEMAND_SESSION_STOP_C (0x442 + LOG_1X_BASE_C) + +/* CGPS PDSM on demand session not started */ +#define LOG_CGPS_PDSM_ON_DEMAND_SESSION_NOT_STARTED_C (0x443 + LOG_1X_BASE_C) + +/* CGPS PDSM extern coarse position inject start */ +#define LOG_CGPS_PDSM_EXTERN_COARSE_POS_INJ_START_C (0x444 + LOG_1X_BASE_C) + +/* DTV ISDB-T TMCC information */ +#define LOG_DTV_ISDB_TMCC_C (0x445 + LOG_1X_BASE_C) + +/* RF development */ +#define LOG_RF_DEV_C (0x446 + LOG_1X_BASE_C) + +/* RF RFM API */ +#define LOG_RF_RFM_API_C (0x447 + LOG_1X_BASE_C) + +/* RF RFM state */ +#define LOG_RF_RFM_STATE_C (0x448 + LOG_1X_BASE_C) + +/* 1X RF Warmup */ +#define LOG_1X_RF_WARMUP_C (0x449 + LOG_1X_BASE_C) + +/* 1X RF power limiting */ +#define LOG_1X_RF_PWR_LMT_C (0x44A + LOG_1X_BASE_C) + +/* 1X RF state */ +#define LOG_1X_RF_STATE_C (0x44B + LOG_1X_BASE_C) + +/* 1X RF sleep */ +#define LOG_1X_RF_SLEEP_C (0x44C + LOG_1X_BASE_C) + +/* 1X RF TX state */ +#define LOG_1X_RF_TX_STATE_C (0x44D + LOG_1X_BASE_C) + +/* 1X RF IntelliCeiver state */ +#define LOG_1X_RF_INT_STATE_C (0x44E + LOG_1X_BASE_C) + +/* 1X RF RX ADC clock */ +#define LOG_1X_RF_RX_ADC_CLK_C (0x44F + LOG_1X_BASE_C) + +/* 1X RF LNA switch point */ +#define LOG_1X_RF_LNA_SWITCHP_C (0x450 + LOG_1X_BASE_C) + +/* 1X RF RX calibration */ +#define LOG_1X_RF_RX_CAL_C (0x451 + LOG_1X_BASE_C) + +/* 1X RF API */ +#define LOG_1X_RF_API_C (0x452 + LOG_1X_BASE_C) + +/* 1X RF RX PLL locking status */ +#define LOG_1X_RF_RX_PLL_LOCK_C (0x453 + LOG_1X_BASE_C) + +/* 1X RF voltage regulator */ +#define LOG_1X_RF_VREG_C (0x454 + LOG_1X_BASE_C) + +/* CGPS DIAG successful fix count */ +#define LOG_CGPS_DIAG_SUCCESSFUL_FIX_COUNT_C (0x455 + LOG_1X_BASE_C) + +/* CGPS MC track dynamic power optimization status */ +#define LOG_CGPS_MC_TRACK_DPO_STATUS_C (0x456 + LOG_1X_BASE_C) + +/* CGPS MC SBAS demodulated bits */ +#define LOG_CGPS_MC_SBAS_DEMOD_BITS_C (0x457 + LOG_1X_BASE_C) + +/* CGPS MC SBAS demodulated soft symbols */ +#define LOG_CGPS_MC_SBAS_DEMOD_SOFT_SYMBOLS_C (0x458 + LOG_1X_BASE_C) + +/* Data Services PPP configuration */ +#define LOG_DS_PPP_CONFIG_PARAMS_C (0x459 + LOG_1X_BASE_C) + +/* Data Services physical link configuration */ +#define LOG_DS_PHYS_LINK_CONFIG_PARAMS_C (0x45A + LOG_1X_BASE_C) + +/* Data Services PPP device configuration */ +#define LOG_PS_PPP_DEV_CONFIG_PARAMS_C (0x45B + LOG_1X_BASE_C) + +/* CGPS PDSM GPS state information */ +#define LOG_CGPS_PDSM_GPS_STATE_INFO_C (0x45C + LOG_1X_BASE_C) + +/* CGPS PDSM EXT status GPS state information */ +#define LOG_CGPS_PDSM_EXT_STATUS_GPS_STATE_INFO_C (0x45D + LOG_1X_BASE_C) + +/* CGPS ME Rapid Search Report */ +#define LOG_CGPS_ME_RAPID_SEARCH_REPORT_C (0x45E + LOG_1X_BASE_C) + +/* CGPS PDSM XTRA-T session */ +#define LOG_CGPS_PDSM_XTRA_T_SESSION_C (0x45F + LOG_1X_BASE_C) + +/* CGPS PDSM XTRA-T upload */ +#define LOG_CGPS_PDSM_XTRA_T_UPLOAD_C (0x460 + LOG_1X_BASE_C) + +/* CGPS Wiper Position Report */ +#define LOG_CGPS_WIPER_POSITION_REPORT_C (0x461 + LOG_1X_BASE_C) + +/* DTV DVBH Security SmartCard HTTP Digest Request Info */ +#define LOG_DTV_DVBH_SEC_SC_HTTP_DIGEST_REQ_C (0x462 + LOG_1X_BASE_C) + +/* DTV DVBH Security SmartCard HTTP Digest Response Info */ +#define LOG_DTV_DVBH_SEC_SC_HTTP_DIGEST_RSP_C (0x463 + LOG_1X_BASE_C) + +/* DTV DVBH Security SmartCard Services Registration Request Info */ +#define LOG_DTV_DVBH_SEC_SC_SVC_REG_REQ_C (0x464 + LOG_1X_BASE_C) + +/* DTV DVBH Security SmartCard Services Registration Complete Info */ +#define LOG_DTV_DVBH_SEC_SC_SVC_REG_COMPLETE_C (0x465 + LOG_1X_BASE_C) + +/* DTV DVBH Security SmartCard Services Deregistration Request Info */ +#define LOG_DTV_DVBH_SEC_SC_SVC_DEREG_REQ_C (0x466 + LOG_1X_BASE_C) + +/* DTV DVBH Security SmartCard Services Deregistration Complete Info */ +#define LOG_DTV_DVBH_SEC_SC_SVC_DEREG_COMPLETE_C (0x467 + LOG_1X_BASE_C) + +/* DTV DVBH Security SmartCard LTKM Request Info */ +#define LOG_DTV_DVBH_SEC_SC_LTKM_REQ_C (0x468 + LOG_1X_BASE_C) + +/* DTV DVBH Security SmartCard LTKM Request Complete Info */ +#define LOG_DTV_DVBH_SEC_SC_LTKM_REQ_COMPLETE_C (0x469 + LOG_1X_BASE_C) + +/* DTV DVBH Security SmartCard Program Selection Info */ +#define LOG_DTV_DVBH_SEC_SC_PROG_SEL_C (0x46A + LOG_1X_BASE_C) + +/* DTV DVBH Security SmartCard Program Selection Complete Info */ +#define LOG_DTV_DVBH_SEC_SC_PROG_SEL_COMPLETE_C (0x46B + LOG_1X_BASE_C) + +/* DTV DVBH Security SmartCard LTKM */ +#define LOG_DTV_DVBH_SEC_SC_LTKM_C (0x46C + LOG_1X_BASE_C) + +/* DTV DVBH Security SmartCard LTKM Verification Message */ +#define LOG_DTV_DVBH_SEC_SC_LTKM_VERIFICATION_C (0x46D + LOG_1X_BASE_C) + +/* DTV DVBH Security SmartCard Parental Control Message */ +#define LOG_DTV_DVBH_SEC_SC_PARENTAL_CTRL_C (0x46E + LOG_1X_BASE_C) + +/* DTV DVBH Security SmartCard STKM */ +#define LOG_DTV_DVBH_SEC_SC_STKM_C (0x46F + LOG_1X_BASE_C) + +/* Protocol Services Statistics Global Socket */ +#define LOG_PS_STAT_GLOBAL_SOCK_C (0x470 + LOG_1X_BASE_C) + +/* MCS Application Manager */ +#define LOG_MCS_APPMGR_C (0x471 + LOG_1X_BASE_C) + +/* MCS MSGR */ +#define LOG_MCS_MSGR_C (0x472 + LOG_1X_BASE_C) + +/* MCS QTF */ +#define LOG_MCS_QTF_C (0x473 + LOG_1X_BASE_C) + +/* Sensors Stationary Detector Output */ +#define LOG_STATIONARY_DETECTOR_OUTPUT_C (0x474 + LOG_1X_BASE_C) + +/* Print out the ppm data portion */ +#define LOG_CGPS_PDSM_EXT_STATUS_MEAS_REPORT_PPM_C (0x475 + LOG_1X_BASE_C) + +/* GNSS Position Report */ +#define LOG_GNSS_POSITION_REPORT_C (0x476 + LOG_1X_BASE_C) + +/* GNSS GPS Measurement Report */ +#define LOG_GNSS_GPS_MEASUREMENT_REPORT_C (0x477 + LOG_1X_BASE_C) + +/* GNSS Clock Report */ +#define LOG_GNSS_CLOCK_REPORT_C (0x478 + LOG_1X_BASE_C) + +/* GNSS Demod Soft Decision */ +#define LOG_GNSS_DEMOD_SOFT_DECISIONS_C (0x479 + LOG_1X_BASE_C) + +/* GNSS ME 5MS IQ sum */ +#define LOG_GNSS_ME_5MS_IQ_SUMS_C (0x47A + LOG_1X_BASE_C) + +/* GNSS CD DB report */ +#define LOG_GNSS_CD_DB_REPORT_C (0x47B + LOG_1X_BASE_C) + +/* GNSS PE WLS position report */ +#define LOG_GNSS_PE_WLS_POSITION_REPORT_C (0x47C + LOG_1X_BASE_C) + +/* GNSS PE KF position report */ +#define LOG_GNSS_PE_KF_POSITION_REPORT_C (0x47D + LOG_1X_BASE_C) + +/* GNSS PRX RF HW status report */ +#define LOG_GNSS_PRX_RF_HW_STATUS_REPORT_C (0x47E + LOG_1X_BASE_C) + +/* GNSS DRX RF HW status report */ +#define LOG_GNSS_DRX_RF_HW_STATUS_REPORT_C (0x47F + LOG_1X_BASE_C) + +/* GNSS Glonass Measurement report */ +#define LOG_GNSS_GLONASS_MEASUREMENT_REPORT_C (0x480 + LOG_1X_BASE_C) + +/* GNSS GPS HBW RXD measurement */ +#define LOG_GNSS_GPS_HBW_RXD_MEASUREMENT_C (0x481 + LOG_1X_BASE_C) + +/* GNSS PDSM position report callback */ +#define LOG_GNSS_PDSM_POSITION_REPORT_CALLBACK_C (0x482 + LOG_1X_BASE_C) + +/* ISense Request String */ +#define LOG_ISENSE_REQUEST_STR_C (0x483 + LOG_1X_BASE_C) + +/* ISense Response String */ +#define LOG_ISENSE_RESPONSE_STR_C (0x484 + LOG_1X_BASE_C) + +/* Bluetooth SOC General Log Packet*/ +#define LOG_BT_SOC_GENERAL_C (0x485 + LOG_1X_BASE_C) + +/* QCRil Call Flow */ +#define LOG_QCRIL_CALL_FLOW_C (0x486 + LOG_1X_BASE_C) + +/* CGPS Wideband FFT stats */ +#define LOG_CGPS_WB_FFT_STATS_C (0x487 + LOG_1X_BASE_C) + +/* CGPS Slow Clock Calibration Report*/ +#define LOG_CGPS_SLOW_CLOCK_CALIB_REPORT_C (0x488 + LOG_1X_BASE_C) + +/* SNS GPS TIMESTAMP */ +#define LOG_SNS_GPS_TIMESTAMP_C (0x489 + LOG_1X_BASE_C) + +/* GNSS Search Strategy Task Allocation */ +#define LOG_GNSS_SEARCH_STRATEGY_TASK_ALLOCATION_C (0x48A + LOG_1X_BASE_C) + +/* RF MC STM state */ +#define LOG_1XHDR_MC_STATE_C (0x48B + LOG_1X_BASE_C) + +/* Record in the Sparse Network DB */ +#define LOG_CGPS_SNDB_RECORD_C (0x48C + LOG_1X_BASE_C) + +/* Record removed from the DB */ +#define LOG_CGPS_SNDB_REMOVE_C (0x48D + LOG_1X_BASE_C) + +/* CGPS Reserved */ +#define LOG_GNSS_CC_PERFORMANCE_STATS_C (0x48E + LOG_1X_BASE_C) + +/* GNSS PDSM Set Paramerters */ +#define LOG_GNSS_PDSM_SET_PARAMETERS_C (0x48F + LOG_1X_BASE_C) + +/* GNSS PDSM PD Event Callback */ +#define LOG_GNSS_PDSM_PD_EVENT_CALLBACK_C (0x490 + LOG_1X_BASE_C) + +/* GNSS PDSM PA Event Callback */ +#define LOG_GNSS_PDSM_PA_EVENT_CALLBACK_C (0x491 + LOG_1X_BASE_C) + +/* CGPS Reserved */ +#define LOG_CGPS_RESERVED2_C (0x492 + LOG_1X_BASE_C) + +/* CGPS Reserved */ +#define LOG_CGPS_RESERVED3_C (0x493 + LOG_1X_BASE_C) + +/* GNSS PDSM EXT Status MEAS Report */ +#define LOG_GNSS_PDSM_EXT_STATUS_MEAS_REPORT_C (0x494 + LOG_1X_BASE_C) + +/* GNSS SM Error */ +#define LOG_GNSS_SM_ERROR_C (0x495 + LOG_1X_BASE_C) + +/* WLAN Scan */ +#define LOG_WLAN_SCAN_C (0x496 + LOG_1X_BASE_C) + +/* WLAN IBSS */ +#define LOG_WLAN_IBSS_C (0x497 + LOG_1X_BASE_C) + +/* WLAN 802.11d*/ +#define LOG_WLAN_80211D_C (0x498 + LOG_1X_BASE_C) + +/* WLAN Handoff */ +#define LOG_WLAN_HANDOFF_C (0x499 + LOG_1X_BASE_C) + +/* WLAN QoS EDCA */ +#define LOG_WLAN_QOS_EDCA_C (0x49A + LOG_1X_BASE_C) + +/* WLAN Beacon Update */ +#define LOG_WLAN_BEACON_UPDATE_C (0x49B + LOG_1X_BASE_C) + +/* WLAN Power save wow add pattern */ +#define LOG_WLAN_POWERSAVE_WOW_ADD_PTRN_C (0x49C + LOG_1X_BASE_C) + +/* WLAN WCM link metrics */ +#define LOG_WLAN_WCM_LINKMETRICS_C (0x49D + LOG_1X_BASE_C) + +/* WLAN wps scan complete*/ +#define LOG_WLAN_WPS_SCAN_COMPLETE_C (0x49E + LOG_1X_BASE_C) + +/* WLAN WPS WSC Message */ +#define LOG_WLAN_WPS_WSC_MESSAGE_C (0x49F + LOG_1X_BASE_C) + +/* WLAN WPS credentials */ +#define LOG_WLAN_WPS_CREDENTIALS_C (0x4A0 + LOG_1X_BASE_C) + +/* WLAN Qos TSpec*/ +#define LOG_WLAN_QOS_TSPEC_C (0x4A2 + LOG_1X_BASE_C) + +/* PMIC Vreg Control */ +#define LOG_PM_VREG_CONTROL_C (0x4A3 + LOG_1X_BASE_C) + +/* PMIC Vreg Level */ +#define LOG_PM_VREG_LEVEL_C (0x4A4 + LOG_1X_BASE_C) + +/* PMIC Vreg State */ +#define LOG_PM_VREG_STATE_C (0x4A5 + LOG_1X_BASE_C) + +/* CGPS SM EPH Randomization info */ +#define LOG_CGPS_SM_EPH_RANDOMIZATION_INFO_C (0x4A6 + LOG_1X_BASE_C) + +/* Audio calibration data */ +#define LOG_QACT_DATA_C (0x4A7 + LOG_1X_BASE_C) + +/* Compass 2D Tracked Calibration Set */ +#define LOG_SNS_VCPS_2D_TRACKED_CAL_SET (0x4A8 + LOG_1X_BASE_C) + +/* Compass 3D Tracked Calibration Set */ +#define LOG_SNS_VCPS_3D_TRACKED_CAL_SET (0x4A9 + LOG_1X_BASE_C) + +/* Calibration metric */ +#define LOG_SNS_VCPS_CAL_METRIC (0x4AA + LOG_1X_BASE_C) + +/* Accelerometer distance */ +#define LOG_SNS_VCPS_ACCEL_DIST (0x4AB + LOG_1X_BASE_C) + +/* Plane update */ +#define LOG_SNS_VCPS_PLANE_UPDATE (0x4AC + LOG_1X_BASE_C) + +/* Location report */ +#define LOG_SNS_VCPS_LOC_REPORT (0x4AD + LOG_1X_BASE_C) + +/* CM Active subscription */ +#define LOG_CM_PH_EVENT_SUBSCRIPTION_PREF_INFO_C (0x4AE + LOG_1X_BASE_C) + +/* DSDS version of CM call event */ +#define LOG_CM_DS_CALL_EVENT_C (0x4AF + LOG_1X_BASE_C) + +/* Sensors ?MobiSens Output */ +#define LOG_MOBISENS_OUTPUT_C (0x4B0 + LOG_1X_BASE_C) + +/* Accelerometer Data */ +#define LOG_ACCEL_DATA_C (0x4B1 + LOG_1X_BASE_C) + +/* Accelerometer Compensated Data */ +#define LOG_ACCEL_COMP_DATA_C (0x4B2 + LOG_1X_BASE_C) + +/* Motion State Data */ +#define LOG_MOTION_STATE_DATA_C (0x4B3 + LOG_1X_BASE_C) + +/* Stationary Position Indicator */ +#define LOG_STAT_POS_IND_C (0x4B4 + LOG_1X_BASE_C) + +/* Motion State Features */ +#define LOG_MOTION_STATE_FEATURES_C (0x4B5 + LOG_1X_BASE_C) + +/* Motion State Hard Decision */ +#define LOG_MOTION_STATE_HARD_DECISION_C (0x4B6 + LOG_1X_BASE_C) + +/* Motion State Soft Decision */ +#define LOG_MOTION_STATE_SOFT_DECISION_C (0x4B7 + LOG_1X_BASE_C) + +/* Sensors Software Version */ +#define LOG_SENSORS_SOFTWARE_VERSION_C (0x4B8 + LOG_1X_BASE_C) + +/* MobiSens Stationary Position Indicator Log Packet */ +#define LOG_MOBISENS_SPI_C (0x4B9 + LOG_1X_BASE_C) + +/* XO calibration raw IQ data */ +#define LOG_XO_IQ_DATA_C (0x4BA + LOG_1X_BASE_C) + +/*DTV CMMB Control Tabl Updated*/ +#define LOG_DTV_CMMB_CONTROL_TABLE_UPDATE ((0x4BB) + LOG_1X_BASE_C) + +/*DTV CMMB Media API Buffering Status*/ +#define LOG_DTV_CMMB_MEDIA_BUFFERING_STATUS ((0x4BC) + LOG_1X_BASE_C) + +/*DTV CMMB *Emergency Broadcast Data*/ +#define LOG_DTV_CMMB_CONTROL_EMERGENCY_BCAST ((0x4BD) + LOG_1X_BASE_C) + +/*DTV CMMB EMM/ECM Data*/ +#define LOG_DTV_CMMB_CAS_EMM_ECM ((0x4BE) + LOG_1X_BASE_C) + +/*DTV CMMB HW Status*/ +#define LOG_DTV_CMMB_HW_PERFORMANCE ((0x4BF) + LOG_1X_BASE_C) + +/*DTV CMMB ESSG Program Indication Information*/ +#define LOG_DTV_CMMB_ESG_PROGRAM_INDICATION_INFORMATION ((0x4C0) + LOG_1X_BASE_C) + +/* Sensors ¨C binary output of converted sensor data */ +#define LOG_CONVERTED_SENSOR_DATA_C ((0x4C1) + LOG_1X_BASE_C) + +/* CM Subscription event */ +#define LOG_CM_SUBSCRIPTION_EVENT_C ((0x4C2) + LOG_1X_BASE_C) + +/* Sensor Ambient Light Data */ +#define LOG_SNS_ALS_DATA_C ((0x4C3) + LOG_1X_BASE_C) + +/*Sensor Ambient Light Adaptive Data */ +#define LOG_SNS_ALS_DATA_ADAPTIVE_C ((0x4C4) + LOG_1X_BASE_C) + +/*Sensor Proximity Distance Data */ +#define LOG_SNS_PRX_DIST_DATA_C ((0x4C5) + LOG_1X_BASE_C) + +/*Sensor Proximity Data */ +#define LOG_SNS_PRX_DATA_C ((0x4C6) + LOG_1X_BASE_C) + +#define LOG_GNSS_SBAS_REPORT_C ((0x4C7) + LOG_1X_BASE_C) + +#define LOG_CPU_MONITOR_MODEM_C ((0x4C8) + LOG_1X_BASE_C) + +#define LOG_CPU_MONITOR_APPS_C ((0x4C9) + LOG_1X_BASE_C) + +#define LOG_BLAST_TASKPROFILE_C ((0x4CA) + LOG_1X_BASE_C) + +#define LOG_BLAST_SYSPROFILE_C ((0x4CB) + LOG_1X_BASE_C) + +#define LOG_FM_RADIO_FTM_C ((0x4CC) + LOG_1X_BASE_C) + +#define LOG_FM_RADIO_C ((0x4CD) + LOG_1X_BASE_C) + +#define LOG_UIM_DS_DATA_C ((0x4CE) + LOG_1X_BASE_C) + +#define LOG_QMI_CALL_FLOW_C ((0x4CF) + LOG_1X_BASE_C) + +#define LOG_APR_MODEM_C ((0x4D0) + LOG_1X_BASE_C) + +#define LOG_APR_APPS_C ((0x4D1) + LOG_1X_BASE_C) + +#define LOG_APR_ADSP_C ((0x4D2) + LOG_1X_BASE_C) + +#define LOG_DATA_MUX_RX_RAW_PACKET_C ((0x4D3) + LOG_1X_BASE_C) + +#define LOG_DATA_MUX_TX_RAW_PACKET_C ((0x4D4) + LOG_1X_BASE_C) + +#define LOG_DATA_MUX_RX_FRAME_PACKET_C ((0x4D5) + LOG_1X_BASE_C) + +#define LOG_DATA_MUX_TX_FRAME_PACKET_C ((0x4D6) + LOG_1X_BASE_C) + +#define LOG_CGPS_PDSM_EXT_STATUS_POS_INJ_REQ_INFO_C ((0x4D7) + LOG_1X_BASE_C) + +#define LOG_TEMPERATURE_MONITOR_C ((0x4D8) + LOG_1X_BASE_C) + +#define LOG_SNS_GESTURES_REST_DETECT_C ((0x4D9) + LOG_1X_BASE_C) + +#define LOG_SNS_GESTURES_ORIENTATION_C ((0x4DA) + LOG_1X_BASE_C) + +#define LOG_SNS_GESTURES_FACING_C ((0x4DB) + LOG_1X_BASE_C) + +#define LOG_SNS_GESTURES_BASIC_C ((0x4DC) + LOG_1X_BASE_C) + +#define LOG_SNS_GESTURES_HINBYE_C ((0x4DD) + LOG_1X_BASE_C) + +#define LOG_GNSS_OEMDRE_MEASUREMENT_REPORT_C ((0x4DE) + LOG_1X_BASE_C) + +#define LOG_GNSS_OEMDRE_POSITION_REPORT_C ((0x4E0) + LOG_1X_BASE_C) + +#define LOG_GNSS_OEMDRE_SVPOLY_REPORT_C ((0x4E1) + LOG_1X_BASE_C) + +#define LOG_GNSS_OEMDRSYNC_C ((0x4E2) + LOG_1X_BASE_C) + +#define LOG_SNS_MGR_EVENT_NOTIFY_C ((0x4E3) + LOG_1X_BASE_C) + +#define LOG_SNS_MGR_EVENT_REGISTER_C ((0x4E4) + LOG_1X_BASE_C) + +#define LOG_GNSS_PDSM_PPM_SESSION_BEGIN_C ((0x4E5) + LOG_1X_BASE_C) + +#define LOG_GNSS_PDSM_PPM_SESSION_PPM_SUSPEND_C ((0x4E6) + LOG_1X_BASE_C) + +#define LOG_GNSS_PDSM_PPM_REPORT_THROTTLED_C ((0x4E7) + LOG_1X_BASE_C) + +#define LOG_GNSS_PDSM_PPM_REPORT_FIRED_C ((0x4E8) + LOG_1X_BASE_C) + +#define LOG_GNSS_PDSM_PPM_SESSION_END_C ((0x4E9) + LOG_1X_BASE_C) + +#define LOG_TRSP_DATA_STALL_C ((0x801) + LOG_1X_BASE_C) + +#define LOG_WLAN_PKT_LOG_INFO_C ((0x8E0) + LOG_1X_BASE_C) + +/* The last defined DMSS log code */ +#define LOG_1X_LAST_C ((0x8E0) + LOG_1X_BASE_C) + +#define LOG_WLAN_COLD_BOOT_CAL_DATA_C ((0xA18) + LOG_1X_BASE_C) + +/* This is only here for old (pre equipment ID update) logging code */ +#define LOG_LAST_C (LOG_1X_LAST_C & 0xFFF) + +/* ------------------------------------------------------------------------- + * APPS LOG definition: + * The max number of 16 log codes is assigned for Apps. + * The last apps log code could be 0xB00F. + * Below definition is consolidated from log_codes_apps.h + * ------------------------------------------------------------------------- */ + +/* ======================== APPS Profiling ======================== */ +#define LOG_APPS_SYSPROFILE_C (0x01 + LOG_APPS_BASE_C) +#define LOG_APPS_TASKPROFILE_C (0x02 + LOG_APPS_BASE_C) + +/* The last defined APPS log code */ +/* Change it to (0x02 + LOG_LTE_LAST_C) to allow LTE log codes */ +#define LOG_APPS_LAST_C (0x02 + LOG_LTE_LAST_C) + +/* ------------------------------------------------------------------------- + * Log Equipment IDs. + * The number is represented by 4 bits. + * ------------------------------------------------------------------------- */ +typedef enum { + LOG_EQUIP_ID_OEM = 0, /* 3rd party OEM (licensee) use */ + LOG_EQUIP_ID_1X = 1, /* Traditional 1X line of products */ + LOG_EQUIP_ID_RSVD2 = 2, + LOG_EQUIP_ID_RSVD3 = 3, + LOG_EQUIP_ID_WCDMA = 4, + LOG_EQUIP_ID_GSM = 5, + LOG_EQUIP_ID_LBS = 6, + LOG_EQUIP_ID_UMTS = 7, + LOG_EQUIP_ID_TDMA = 8, + LOG_EQUIP_ID_BOA = 9, + LOG_EQUIP_ID_DTV = 10, + LOG_EQUIP_ID_APPS = 11, + LOG_EQUIP_ID_DSP = 12, + + LOG_EQUIP_ID_LAST_DEFAULT = LOG_EQUIP_ID_DSP +} log_equip_id_enum_type; + +#define LOG_EQUIP_ID_MAX 0xF /* The equipment ID is 4 bits */ + +/* Note that these are the official values and are used by default in + diagtune.h. + */ +#define LOG_EQUIP_ID_0_LAST_CODE_DEFAULT 0 +#define LOG_EQUIP_ID_1_LAST_CODE_DEFAULT LOG_1X_LAST_C +#define LOG_EQUIP_ID_2_LAST_CODE_DEFAULT 0 +#define LOG_EQUIP_ID_3_LAST_CODE_DEFAULT 0 +#define LOG_EQUIP_ID_4_LAST_CODE_DEFAULT 0 +#define LOG_EQUIP_ID_5_LAST_CODE_DEFAULT 0 +#define LOG_EQUIP_ID_6_LAST_CODE_DEFAULT 0 +#define LOG_EQUIP_ID_7_LAST_CODE_DEFAULT 0 +#define LOG_EQUIP_ID_8_LAST_CODE_DEFAULT 0 +#define LOG_EQUIP_ID_9_LAST_CODE_DEFAULT 0 +#define LOG_EQUIP_ID_10_LAST_CODE_DEFAULT 0 +#define LOG_EQUIP_ID_11_LAST_CODE_DEFAULT LOG_LTE_LAST_C +#define LOG_EQUIP_ID_12_LAST_CODE_DEFAULT 0 +#define LOG_EQUIP_ID_13_LAST_CODE_DEFAULT 0 +#define LOG_EQUIP_ID_14_LAST_CODE_DEFAULT 0 +#define LOG_EQUIP_ID_15_LAST_CODE_DEFAULT 0 + +#endif /* LOG_CODES_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/utils/host_diag_log/src/host_diag_log.c b/drivers/staging/qca-wifi-host-cmn/utils/host_diag_log/src/host_diag_log.c new file mode 100644 index 0000000000000000000000000000000000000000..ad1432c66a9716ec3267e965acda203cc711e7bd --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/utils/host_diag_log/src/host_diag_log.c @@ -0,0 +1,388 @@ +/* + * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/*============================================================================ + FILE: host_diag_log.c + + OVERVIEW: This source file contains definitions for WLAN UTIL diag APIs + + DEPENDENCIES: + ============================================================================*/ + +#include "qdf_types.h" +#include "i_host_diag_core_log.h" +#include "host_diag_core_event.h" +#include "wlan_nlink_common.h" +#include "cds_sched.h" +#include "wlan_ptt_sock_svc.h" +#include "wlan_nlink_srv.h" +#include "cds_api.h" +#include "wlan_ps_wow_diag.h" +#include "qdf_str.h" + +#define PTT_MSG_DIAG_CMDS_TYPE (0x5050) + +#define DIAG_TYPE_LOGS (1) +#define DIAG_TYPE_EVENTS (2) + +#define DIAG_SWAP16(A) ((((uint16_t)(A) & 0xff00) >> 8) | (((uint16_t)(A) & 0x00ff) << 8)) + +typedef struct event_report_s { + uint32_t diag_type; + uint16_t event_id; + uint16_t length; +} event_report_t; + +/**--------------------------------------------------------------------------- + + \brief host_diag_log_set_code() - + + This function sets the logging code in the given log record. + + \param - ptr - Pointer to the log header type. + - code - log code. + \return - None + + --------------------------------------------------------------------------*/ + +void host_diag_log_set_code(void *ptr, uint16_t code) +{ + if (ptr) { + /* All log packets are required to start with 'log_header_type' */ + ((log_hdr_type *) ptr)->code = code; + } +} + +/**--------------------------------------------------------------------------- + + \brief host_diag_log_set_length() - + + This function sets the length field in the given log record. + + \param - ptr - Pointer to the log header type. + - length - log length. + + \return - None + + --------------------------------------------------------------------------*/ + +void host_diag_log_set_length(void *ptr, uint16_t length) +{ + if (ptr) { + /* All log packets are required to start with 'log_header_type' */ + ((log_hdr_type *) ptr)->len = (uint16_t) length; + } +} + +/**--------------------------------------------------------------------------- + + \brief host_diag_log_submit() - + + This function sends the log data to the ptt socket app only if it is registered with the driver. + + \param - ptr - Pointer to the log header type. + + \return - None + + --------------------------------------------------------------------------*/ + +void host_diag_log_submit(void *plog_hdr_ptr) +{ + log_hdr_type *pHdr = (log_hdr_type *) plog_hdr_ptr; + tAniHdr *wmsg = NULL; + uint8_t *pBuf; + uint16_t data_len; + uint16_t total_len; + + if (cds_is_load_or_unload_in_progress()) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO, + "%s: Unloading/Loading in Progress. Ignore!!!", + __func__); + return; + } + + if (nl_srv_is_initialized() != 0) + return; + + if (cds_is_multicast_logging()) { + data_len = pHdr->len; + + total_len = sizeof(tAniHdr) + sizeof(uint32_t) + data_len; + + pBuf = (uint8_t *) qdf_mem_malloc(total_len); + + if (!pBuf) { + QDF_TRACE(QDF_MODULE_ID_HDD, QDF_TRACE_LEVEL_ERROR, + "qdf_mem_malloc failed"); + return; + } + + wmsg = (tAniHdr *) pBuf; + wmsg->type = PTT_MSG_DIAG_CMDS_TYPE; + wmsg->length = total_len; + wmsg->length = DIAG_SWAP16(wmsg->length); + pBuf += sizeof(tAniHdr); + + /* Diag Type events or log */ + *(uint32_t *) pBuf = DIAG_TYPE_LOGS; + pBuf += sizeof(uint32_t); + + memcpy(pBuf, pHdr, data_len); + ptt_sock_send_msg_to_app (wmsg, 0, ANI_NL_MSG_PUMAC, + INVALID_PID); + qdf_mem_free((void *)wmsg); + } + return; +} + +/** + * host_diag_log_wlock() - This function is used to send wake lock diag events + * @reason: Reason why the wakelock was taken or released + * @wake_lock_name: Function in which the wakelock was taken or released + * @timeout: Timeout value in case of timed wakelocks + * @status: Status field indicating whether the wake lock was taken/released + * + * This function is used to send wake lock diag events to user space + * + * Return: None + * + */ +void host_diag_log_wlock(uint32_t reason, const char *wake_lock_name, + uint32_t timeout, uint32_t status) +{ + WLAN_HOST_DIAG_EVENT_DEF(wlan_diag_event, + struct host_event_wlan_wake_lock); + + if ((nl_srv_is_initialized() != 0) || + (cds_is_wakelock_enabled() == false)) + return; + + wlan_diag_event.status = status; + wlan_diag_event.reason = reason; + wlan_diag_event.timeout = timeout; + wlan_diag_event.name_len = strlen(wake_lock_name); + strlcpy(&wlan_diag_event.name[0], + wake_lock_name, + wlan_diag_event.name_len+1); + + WLAN_HOST_DIAG_EVENT_REPORT(&wlan_diag_event, EVENT_WLAN_WAKE_LOCK); +} + +/**--------------------------------------------------------------------------- + + \brief host_diag_event_report_payload() - + + This function sends the event data to the ptt socket app only if it is + registered with the driver. + + \param - ptr - Pointer to the log header type. + + \return - None + + --------------------------------------------------------------------------*/ + +void host_diag_event_report_payload(uint16_t event_Id, uint16_t length, + void *pPayload) +{ + tAniHdr *wmsg = NULL; + uint8_t *pBuf; + event_report_t *pEvent_report; + uint16_t total_len; + + if (cds_is_load_or_unload_in_progress()) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO, + "%s: Unloading/Loading in Progress. Ignore!!!", + __func__); + return; + } + + if (nl_srv_is_initialized() != 0) + return; + + if (cds_is_multicast_logging()) { + total_len = sizeof(tAniHdr) + sizeof(event_report_t) + length; + + pBuf = (uint8_t *) qdf_mem_malloc(total_len); + + if (!pBuf) { + QDF_TRACE(QDF_MODULE_ID_HDD, QDF_TRACE_LEVEL_ERROR, + "qdf_mem_malloc failed"); + return; + } + wmsg = (tAniHdr *) pBuf; + wmsg->type = PTT_MSG_DIAG_CMDS_TYPE; + wmsg->length = total_len; + wmsg->length = DIAG_SWAP16(wmsg->length); + pBuf += sizeof(tAniHdr); + + pEvent_report = (event_report_t *) pBuf; + pEvent_report->diag_type = DIAG_TYPE_EVENTS; + pEvent_report->event_id = event_Id; + pEvent_report->length = length; + + pBuf += sizeof(event_report_t); + + memcpy(pBuf, pPayload, length); + + if (ptt_sock_send_msg_to_app + (wmsg, 0, ANI_NL_MSG_PUMAC, INVALID_PID) < 0) { + QDF_TRACE(QDF_MODULE_ID_HDD, QDF_TRACE_LEVEL_WARN, + "Ptt Socket error sending message to the app!!"); + qdf_mem_free((void *)wmsg); + return; + } + + qdf_mem_free((void *)wmsg); + } + + return; + +} + +/** + * host_log_low_resource_failure() - This function is used to send low + * resource failure event + * @event_sub_type: Reason why the failure was observed + * + * This function is used to send low resource failure events to user space + * + * Return: None + * + */ +void host_log_low_resource_failure(uint8_t event_sub_type) +{ + WLAN_HOST_DIAG_EVENT_DEF(wlan_diag_event, + struct host_event_wlan_low_resource_failure); + + wlan_diag_event.event_sub_type = event_sub_type; + + WLAN_HOST_DIAG_EVENT_REPORT(&wlan_diag_event, + EVENT_WLAN_LOW_RESOURCE_FAILURE); +} + +void host_log_rsn_info(uint8_t *ucast_cipher, uint8_t *mcast_cipher, + uint8_t *akm_suite, uint8_t *group_mgmt) +{ + WLAN_HOST_DIAG_EVENT_DEF(wlan_diag_event, + struct event_wlan_csr_rsn_info); + + qdf_mem_copy(wlan_diag_event.ucast_cipher, ucast_cipher, + RSN_OUI_SIZE); + qdf_mem_copy(wlan_diag_event.mcast_cipher, mcast_cipher, + RSN_OUI_SIZE); + qdf_mem_copy(wlan_diag_event.akm_suite, akm_suite, + RSN_OUI_SIZE); + qdf_mem_copy(wlan_diag_event.group_mgmt, group_mgmt, + RSN_OUI_SIZE); + + WLAN_HOST_DIAG_EVENT_REPORT(&wlan_diag_event, + EVENT_WLAN_RSN_INFO); +} + +#ifdef FEATURE_WLAN_DIAG_SUPPORT +/** + * qdf_wow_wakeup_host_event()- send wow wakeup event + * @wow_wakeup_cause: WOW wakeup reason code + * + * This function sends wow wakeup reason code diag event + * + * Return: void. + */ +void qdf_wow_wakeup_host_event(uint8_t wow_wakeup_cause) +{ + WLAN_HOST_DIAG_EVENT_DEF(wowRequest, + host_event_wlan_powersave_wow_payload_type); + qdf_mem_zero(&wowRequest, sizeof(wowRequest)); + + wowRequest.event_subtype = WLAN_WOW_WAKEUP; + wowRequest.wow_wakeup_cause = wow_wakeup_cause; + WLAN_HOST_DIAG_EVENT_REPORT(&wowRequest, + EVENT_WLAN_POWERSAVE_WOW); +} + +void host_log_acs_req_event(uint8_t *intf, const uint8_t *hw_mode, uint16_t bw, + uint8_t ht, uint8_t vht, uint16_t chan_start, + uint16_t chan_end) +{ + WLAN_HOST_DIAG_EVENT_DEF(acs_req, struct host_event_wlan_acs_req); + + qdf_str_lcopy(acs_req.intf, intf, HOST_EVENT_INTF_STR_LEN); + qdf_str_lcopy(acs_req.hw_mode, hw_mode, HOST_EVENT_HW_MODE_STR_LEN); + acs_req.bw = bw; + acs_req.ht = ht; + acs_req.vht = vht; + acs_req.chan_start = chan_start; + acs_req.chan_end = chan_end; + + WLAN_HOST_DIAG_EVENT_REPORT(&acs_req, EVENT_WLAN_ACS_REQ); +} + +void host_log_acs_scan_start(uint32_t scan_id, uint8_t vdev_id) +{ + WLAN_HOST_DIAG_EVENT_DEF(acs_scan_start, + struct host_event_wlan_acs_scan_start); + + acs_scan_start.scan_id = scan_id; + acs_scan_start.vdev_id = vdev_id; + + WLAN_HOST_DIAG_EVENT_REPORT(&acs_scan_start, + EVENT_WLAN_ACS_SCAN_START); +} + +void host_log_acs_scan_done(const uint8_t *status, + uint8_t vdev_id, uint32_t scan_id) +{ + WLAN_HOST_DIAG_EVENT_DEF(acs_scan_done, + struct host_event_wlan_acs_scan_done); + + qdf_str_lcopy(acs_scan_done.status, status, HOST_EVENT_STATUS_STR_LEN); + acs_scan_done.vdev_id = vdev_id; + acs_scan_done.scan_id = scan_id; + + WLAN_HOST_DIAG_EVENT_REPORT(&acs_scan_done, EVENT_WLAN_ACS_SCAN_DONE); +} + +void host_log_acs_chan_spect_weight(uint16_t chan, uint16_t weight, + int32_t rssi, uint16_t bss_count) +{ + WLAN_HOST_DIAG_EVENT_DEF( + acs_chan_spect_weight, + struct host_event_wlan_acs_chan_spectral_weight); + + acs_chan_spect_weight.chan = chan; + acs_chan_spect_weight.weight = weight; + acs_chan_spect_weight.rssi = rssi; + acs_chan_spect_weight.bss_count = bss_count; + + WLAN_HOST_DIAG_EVENT_REPORT(&acs_chan_spect_weight, + EVENT_WLAN_ACS_CHANNEL_SPECTRAL_WEIGHT); +} + +void host_log_acs_best_chan(uint16_t chan, uint16_t weight) +{ + WLAN_HOST_DIAG_EVENT_DEF(acs_best_chan, + struct host_event_wlan_acs_best_chan); + + acs_best_chan.chan = chan; + acs_best_chan.weight = weight; + + WLAN_HOST_DIAG_EVENT_REPORT(&acs_best_chan, + EVENT_WLAN_ACS_BEST_CHANNEL); +} + +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/utils/host_diag_log/src/i_host_diag_core_event.h b/drivers/staging/qca-wifi-host-cmn/utils/host_diag_log/src/i_host_diag_core_event.h new file mode 100644 index 0000000000000000000000000000000000000000..50240f48132c99613f0aefe6cef606ca9cc1622f --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/utils/host_diag_log/src/i_host_diag_core_event.h @@ -0,0 +1,246 @@ +/* + * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#if !defined(__I_HOST_DIAG_CORE_EVENT_H) +#define __I_HOST_DIAG_CORE_EVENT_H + +/**========================================================================= + + \file i_host_diag_core_event.h + + \brief Android specific definitions for WLAN UTIL DIAG events + + ========================================================================*/ + +/* $Header$ */ + +/*-------------------------------------------------------------------------- + Include Files + ------------------------------------------------------------------------*/ +#include +#ifdef FEATURE_WLAN_DIAG_SUPPORT +#include +#endif + +/*-------------------------------------------------------------------------- + Preprocessor definitions and constants + ------------------------------------------------------------------------*/ + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +#ifdef FEATURE_WLAN_DIAG_SUPPORT + +void host_diag_event_report_payload(uint16_t event_Id, uint16_t length, + void *pPayload); +/*--------------------------------------------------------------------------- + Allocate an event payload holder + ---------------------------------------------------------------------------*/ +#define WLAN_HOST_DIAG_EVENT_DEF(payload_name, payload_type) \ + payload_type(payload_name) + +/*--------------------------------------------------------------------------- + Report the event + ---------------------------------------------------------------------------*/ +#define WLAN_HOST_DIAG_EVENT_REPORT(payload_ptr, ev_id) \ + do { \ + host_diag_event_report_payload(ev_id, \ + sizeof(*(payload_ptr)), \ + (void *)(payload_ptr)); \ + } while (0) + +#else /* FEATURE_WLAN_DIAG_SUPPORT */ + +#define WLAN_HOST_DIAG_EVENT_DEF(payload_name, payload_type) +#define WLAN_HOST_DIAG_EVENT_REPORT(payload_ptr, ev_id) + +#endif /* FEATURE_WLAN_DIAG_SUPPORT */ + +/** + * enum auth_timeout_type - authentication timeout type + * @AUTH_FAILURE_TIMEOUT: auth failure timeout + * @AUTH_RESPONSE_TIMEOUT: auth response timeout + */ +enum auth_timeout_type { + AUTH_FAILURE_TIMEOUT, + AUTH_RESPONSE_TIMEOUT, +}; + +/*------------------------------------------------------------------------- + Function declarations and documenation + ------------------------------------------------------------------------*/ +#ifdef FEATURE_WLAN_DIAG_SUPPORT +void host_diag_log_wlock(uint32_t reason, const char *wake_lock_name, + uint32_t timeout, uint32_t status); +#else +static inline void host_diag_log_wlock(uint32_t reason, + const char *wake_lock_name, + uint32_t timeout, uint32_t status) +{ + +} +#endif /* FEATURE_WLAN_DIAG_SUPPORT */ + +#ifdef FEATURE_WLAN_DIAG_SUPPORT +void host_log_low_resource_failure(uint8_t event_sub_type); +#else +static inline void host_log_low_resource_failure(uint8_t event_sub_type) +{ + +} +#endif /* FEATURE_WLAN_DIAG_SUPPORT */ + +#ifdef FEATURE_WLAN_DIAG_SUPPORT +/** + * host_log_rsn_info() - This function is used to send + * requested rsn info in assoc request + * @ucast_cipher: Unicast ciphers used in assoc request + * @mcast_cipher: Group ciphers used in assoc request + * @akm_suite: Gives information about akm suites used in assoc request + * @group_mgmt: Requested group mgmt cipher suite + * + * This function is used to send RSN info used in assoc req to user space + * + * Return: None + * + */ +void host_log_rsn_info(uint8_t *ucast_cipher, uint8_t *mcast_cipher, + uint8_t *auth_suite, uint8_t *gp_mgmt_cipher); + +#else +static inline void host_log_rsn_info(uint8_t *ucast_cipher, + uint8_t *mcast_cipher, + uint8_t *auth_suite, + uint8_t *gp_mgmt_cipher) +{ + +} +#endif /* FEATURE_WLAN_DIAG_SUPPORT */ + + +#ifdef FEATURE_WLAN_DIAG_SUPPORT +void qdf_wow_wakeup_host_event(uint8_t wow_wakeup_cause); + +/** + * host_log_acs_req_event() - ACS request event indication + * @intf: network interface name for WLAN + * @hw_mode: hw mode configured by hostapd + * @bw: channel bandwidth (MHz) + * @ht: a flag indicating whether HT phy mode is enabled + * @vht: a flag indicating whether VHT phy mode is enabled + * @chan_start: starting channel number for ACS scan + * @chan_end: ending channel number for ACS scan + * + * Indicates the diag event for ACS request with payload related + * to parameters populated by hostapd + * + * Return: None + */ +void host_log_acs_req_event(uint8_t *intf, const uint8_t *hw_mode, + uint16_t bw, uint8_t ht, uint8_t vht, + uint16_t chan_start, uint16_t chan_end); + +/** + * host_log_acs_scan_start() - ACS scan start event indication + * @scan_id: scan request ID + * @vdev_id: vdev/session ID + * + * Indicates the diag event for ACS scan start request + * + * Return: None + */ +void host_log_acs_scan_start(uint32_t scan_id, uint8_t vdev_id); + +/** + * host_log_acs_scan_done() - ACS scan done event indication + * @status: indicating whether ACS scan is successful + * @vdev_id: vdev/session ID + * @scan_id: scan request ID + * + * Indicates the diag event for ACS scan done + * + * Return: None + */ +void host_log_acs_scan_done(const uint8_t *status, uint8_t vdev_id, + uint32_t scan_id); + +/** + * host_log_acs_chan_spect_weight() - ACS channel spectral weight indication + * weight event indication + * @chan: channel number + * @weight: channel weight + * @rssi: RSSI value obtained after scanning + * @bss_count: number of BSS detected on this channel + * + * Indicates a diag event for ACS channel weight evaluation result + * + * Return: None + */ +void host_log_acs_chan_spect_weight(uint16_t chan, uint16_t weight, + int32_t rssi, uint16_t bss_count); + +/** + * host_log_acs_best_chan() - ACS best channel event indication + * @chan: channel number + * @weight: channel weight + * + * Indicates the best channel has been selected after ACS + * + * Return: None + */ +void host_log_acs_best_chan(uint16_t chan, uint16_t weight); + +#else +static inline void qdf_wow_wakeup_host_event(uint8_t wow_wakeup_cause) +{ + return; +} + +static inline void host_log_acs_req_event(uint8_t *intf, uint8_t *hw_mode, + uint16_t bw, uint8_t ht, uint8_t vht, + uint16_t chan_start, + uint16_t chan_end) +{ +} + +static inline void host_log_acs_scan_start(uint8_t *scan_type, + uint8_t *bss_type, uint32_t scan_id, + uint8_t vdev_id) +{ +} + +static inline void host_log_acs_scan_done(const uint8_t *status, + uint8_t vdev_id, uint32_t scan_id) +{ +} + +static inline void host_log_acs_chan_spect_weight(uint16_t chan, + uint16_t weight, int32_t rssi, + uint16_t bss_count) +{ +} + +static inline void host_log_acs_best_chan(uint16_t chan, uint32_t weight) +{ +} +#endif /* FEATURE_WLAN_DIAG_SUPPORT */ +#ifdef __cplusplus +} +#endif /* __cplusplus */ +#endif /* __I_HOST_DIAG_CORE_EVENT_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/utils/host_diag_log/src/i_host_diag_core_log.h b/drivers/staging/qca-wifi-host-cmn/utils/host_diag_log/src/i_host_diag_core_log.h new file mode 100644 index 0000000000000000000000000000000000000000..7c06d611b872ffb49204e55bd1f0c95fedb5156f --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/utils/host_diag_log/src/i_host_diag_core_log.h @@ -0,0 +1,120 @@ +/* + * Copyright (c) 2014-2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#if !defined(__I_HOST_DIAG_CORE_LOG_H) +#define __I_HOST_DIAG_CORE_LOG_H + +#ifdef FEATURE_WLAN_DIAG_SUPPORT +#include +#endif + +/**========================================================================= + + \file i_host_diag_core_event.h + + \brief android-specific definitions for WLAN UTIL DIAG logs + + ========================================================================*/ + +/* $Header$ */ + +/*-------------------------------------------------------------------------- + Include Files + ------------------------------------------------------------------------*/ +#include +#include + +/*-------------------------------------------------------------------------- + Preprocessor definitions and constants + ------------------------------------------------------------------------*/ +/* FIXME To be removed when DIAG support is added. This definiton should be */ +/* picked from log.h file above. */ +typedef struct { + /* Specifies the length, in bytes of the entry, including this header. */ + uint16_t len; + + /* Specifies the log code for the entry */ + uint16_t code; + + /*Time Stamp lo */ + uint32_t ts_lo; + + /*Time Stamp hi */ + uint32_t ts_hi; +} __packed log_hdr_type; + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +#ifdef FEATURE_WLAN_DIAG_SUPPORT +void host_diag_log_set_code(void *ptr, uint16_t code); +void host_diag_log_set_length(void *ptr, uint16_t length); +void host_diag_log_set_timestamp(void *plog_hdr_ptr); +void host_diag_log_submit(void *plog_hdr_ptr); + +/*--------------------------------------------------------------------------- + Allocate an event payload holder + ---------------------------------------------------------------------------*/ + +#define WLAN_HOST_DIAG_LOG_ALLOC(payload_ptr, payload_type, log_code) \ + do { \ + payload_ptr = (payload_type *)qdf_mem_malloc(sizeof(payload_type)); \ + if (payload_ptr) { \ + host_diag_log_set_code(payload_ptr, log_code); \ + host_diag_log_set_length(payload_ptr, sizeof(payload_type)); \ + } \ + } while (0) + +/*--------------------------------------------------------------------------- + Report the event + ---------------------------------------------------------------------------*/ +#define WLAN_HOST_DIAG_LOG_REPORT(payload_ptr) \ + do { \ + if (payload_ptr) { \ + host_diag_log_submit(payload_ptr); \ + qdf_mem_free(payload_ptr); \ + } \ + } while (0) + +/*--------------------------------------------------------------------------- + Free the payload + ---------------------------------------------------------------------------*/ +#define WLAN_HOST_DIAG_LOG_FREE(payload_ptr) \ + do { \ + if (payload_ptr) { \ + qdf_mem_free(payload_ptr); \ + } \ + } while (0) + +#else /* FEATURE_WLAN_DIAG_SUPPORT */ + +#define WLAN_HOST_DIAG_LOG_ALLOC(payload_ptr, payload_type, log_code) +#define WLAN_HOST_DIAG_LOG_REPORT(payload_ptr) +#define WLAN_HOST_DIAG_LOG_FREE(payload_ptr) + +#endif /* FEATURE_WLAN_DIAG_SUPPORT */ + +/*------------------------------------------------------------------------- + Function declarations and documenation + ------------------------------------------------------------------------*/ + +#ifdef __cplusplus +} +#endif /* __cplusplus */ +#endif /* __I_HOST_DIAG_CORE_LOG_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/utils/logging/inc/wlan_logging_sock_svc.h b/drivers/staging/qca-wifi-host-cmn/utils/logging/inc/wlan_logging_sock_svc.h new file mode 100644 index 0000000000000000000000000000000000000000..2d38e3fffb64c21559a79c2e7f41441aebc42822 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/utils/logging/inc/wlan_logging_sock_svc.h @@ -0,0 +1,103 @@ +/* + * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/****************************************************************************** +* wlan_logging_sock_svc.h +* +******************************************************************************/ + +#ifndef WLAN_LOGGING_SOCK_SVC_H +#define WLAN_LOGGING_SOCK_SVC_H + +#include +#include +#include +#include + +int wlan_logging_sock_init_svc(void); +int wlan_logging_sock_deinit_svc(void); +int wlan_log_to_user(QDF_TRACE_LEVEL log_level, char *to_be_sent, int length); + +#ifdef WLAN_LOGGING_SOCK_SVC_ENABLE +void wlan_logging_set_per_pkt_stats(void); +void wlan_logging_set_fw_flush_complete(void); +void wlan_flush_host_logs_for_fatal(void); +void wlan_logging_set_active(bool active); +void wlan_logging_set_log_to_console(bool log_to_console); +#else +static inline void wlan_flush_host_logs_for_fatal(void) {} +static inline void wlan_logging_set_per_pkt_stats(void) {} +static inline void wlan_logging_set_fw_flush_complete(void) {} +static inline void wlan_logging_set_active(bool active) {} +static inline void wlan_logging_set_log_to_console(bool log_to_console) {} +#endif /* WLAN_LOGGING_SOCK_SVC_ENABLE */ + +#if defined(WLAN_LOGGING_SOCK_SVC_ENABLE) && !defined(REMOVE_PKT_LOG) +void wlan_deregister_txrx_packetdump(void); +void wlan_register_txrx_packetdump(void); +#else +static inline void wlan_deregister_txrx_packetdump(void) {} +static inline void wlan_register_txrx_packetdump(void) {} +#endif + +#if defined(WLAN_LOGGING_SOCK_SVC_ENABLE) && defined(FEATURE_WLAN_DIAG_SUPPORT) +void wlan_report_log_completion(uint32_t is_fatal, + uint32_t indicator, + uint32_t reason_code, + uint8_t ring_id); +#else +static inline void wlan_report_log_completion(uint32_t is_fatal, + uint32_t indicator, + uint32_t reason_code, + uint8_t ring_id) +{ + return; +} + +#endif /* FEATURE_WLAN_DIAG_SUPPORT */ + +#if defined(CONFIG_MCL) && !defined(REMOVE_PKT_LOG) +void wlan_pkt_stats_to_logger_thread(void *pl_hdr, void *pkt_dump, void *data); +#else +static inline +void wlan_pkt_stats_to_logger_thread(void *pl_hdr, void *pkt_dump, void *data) +{ +} +#endif + +/** + * enum tx_status - tx status + * @tx_status_ok: successfully sent + acked + * @tx_status_discard: discard - not sent (congestion control) + * @tx_status_no_ack: no_ack - sent, but no ack + * @tx_status_download_fail: download_fail - + * the host could not deliver the tx frame to the target + * @tx_status_peer_del: peer_del - tx completion for + * already deleted peer used for HL case + * + * This enum has tx status types + */ +enum tx_status { + tx_status_ok, + tx_status_discard, + tx_status_no_ack, + tx_status_download_fail, + tx_status_peer_del, +}; + +#endif /* WLAN_LOGGING_SOCK_SVC_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/utils/logging/inc/wlan_roam_debug.h b/drivers/staging/qca-wifi-host-cmn/utils/logging/inc/wlan_roam_debug.h new file mode 100644 index 0000000000000000000000000000000000000000..844da5d82a569b8580df40f89d4aa2b0ca7eeea3 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/utils/logging/inc/wlan_roam_debug.h @@ -0,0 +1,125 @@ +/* + * Copyright (c) 2013-2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ +/* + * DOC: Roaming debug log operations declarations + */ +#ifndef _WLAN_ROAM_DEBUG_H_ +#define _WLAN_ROAM_DEBUG_H_ + + +#define roam_debug(args ...) \ + QDF_TRACE_DEBUG(QDF_MODULE_ID_ROAM_DEBUG, ## args) + +/** + * struct wlan_roam_debug_rec - roam debug information record definition + * @time: timestamp when record was added + * @operation: identifier for operation, command, event, etc. + * @vdev_id: vdev identifier + * @peer_id: peer_id. Range 0 - 255, 0xffff is invalid peer_id. + * @mac_addr: mac address of peer + * @peer_obj: pointer to peer object + * @arg1: Optional argument #1 + * @arg2: Opttional argument #2 + */ +struct wlan_roam_debug_rec { + uint64_t time; + uint8_t operation; + uint8_t vdev_id; + uint16_t peer_id; + struct qdf_mac_addr mac_addr; + void *peer_obj; + uint32_t arg1; + uint32_t arg2; +}; + +#define WLAN_ROAM_DEBUG_MAX_REC 256 +/** + * struct wlan_roam_debug_info - Buffer to store the wma debug records + * @index: index of the most recent entry in the circular buffer + * @num_max_rec: maximum records stored in the records array + * @rec: array to store wma debug records, used in circular fashion + */ +struct wlan_roam_debug_info { + qdf_atomic_t index; + uint32_t num_max_rec; + struct wlan_roam_debug_rec rec[WLAN_ROAM_DEBUG_MAX_REC]; +}; + +/** + * @DEBUG_PEER_CREATE_SEND: sent peer_create command to firmware + * @DEBUG_PEER_CREATE_RESP: received peer create response + * @DEBUG_PEER_DELETE_SEND: sent peer delete command to firmware + * @DEBUG_PEER_DELETE_RESP: received peer delete response + * @DEBUG_PEER_MAP_EVENT: received peer map event + * @DEBUG_PEER_UNMAP_EVENT: received peer unmap event + * @DEBUG_PEER_UNREF_DELETE: peer reference is decremented + * @DEBUG_DELETING_PEER_OBJ: peer object is deleted + * @DEBUG_ROAM_SYNCH_IND: received roam offload sync indication + * @DEBUG_ROAM_SYNCH_CNF: sent roam offload sync confirmation + * @DEBUG_ROAM_SYNCH_FAIL: received roam sync failure indication + * @DEBUG_ROAM_EVENT: received roam event + * @DEBUG_BUS_SUSPEND: host going into suspend mode + * @DEBUG_BUS_RESUME: host operation resumed + */ + +enum peer_debug_op { + DEBUG_PEER_CREATE_SEND = 0, + DEBUG_PEER_CREATE_RESP, + DEBUG_PEER_DELETE_SEND, + DEBUG_PEER_DELETE_RESP, + DEBUG_PEER_MAP_EVENT, + DEBUG_PEER_UNMAP_EVENT, + DEBUG_PEER_UNREF_DELETE, + DEBUG_DELETING_PEER_OBJ, + DEBUG_ROAM_SYNCH_IND, + DEBUG_ROAM_SYNCH_CNF, + DEBUG_ROAM_SYNCH_FAIL, + DEBUG_ROAM_EVENT, + DEBUG_WOW_ROAM_EVENT, + DEBUG_BUS_SUSPEND, + DEBUG_BUS_RESUME, + DEBUG_WOW_REASON, +}; + +#define DEBUG_INVALID_PEER_ID 0xffff +#define DEBUG_INVALID_VDEV_ID 0xff + +/** + * wlan_roam_debug_log() - Add a debug log entry to wlan roam debug records + * @vdev_id: vdev identifier + * @op: operation identifier + * @peer_id: peer id + * @mac_addr: mac address of peer, can be NULL + * @peer_obj: peer object address, can be NULL + * @arg1: extra argument #1 + * @arg2: extra argument #2 + * + * Return: none + */ +void wlan_roam_debug_log(uint8_t vdev_id, uint8_t op, + uint16_t peer_id, void *mac_addr, + void *peer_obj, uint32_t arg1, uint32_t arg2); + +/** + * wlan_roam_debug_dump_table() - Print the roam debug log records + * print all the valid debug records in the order of timestamp + * + * Return: none + */ +void wlan_roam_debug_dump_table(void); +#endif /* _WLAN_ROAM_DEBUG_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/utils/logging/src/wlan_logging_sock_svc.c b/drivers/staging/qca-wifi-host-cmn/utils/logging/src/wlan_logging_sock_svc.c new file mode 100644 index 0000000000000000000000000000000000000000..0df84a413d528f11a7ca3099704fcfc4b3bf491b --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/utils/logging/src/wlan_logging_sock_svc.c @@ -0,0 +1,1356 @@ +/* + * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/****************************************************************************** +* wlan_logging_sock_svc.c +* +******************************************************************************/ + +#ifdef WLAN_LOGGING_SOCK_SVC_ENABLE +#include +#ifdef CONFIG_MCL +#include +#include +#include "cds_utils.h" +#include "csr_api.h" +#include "wlan_hdd_main.h" +#include "wma.h" +#include "ol_txrx_api.h" +#include "pktlog_ac.h" +#endif +#include +#include +#include +#include +#include +#include +#include +#include "host_diag_core_log.h" + +#ifdef CNSS_GENL +#include +#endif + +#define MAX_NUM_PKT_LOG 32 + +#define ALLOWED_LOG_LEVELS_TO_CONSOLE(level) \ + ((QDF_TRACE_LEVEL_FATAL == (level)) || \ + (QDF_TRACE_LEVEL_ERROR == (level)) || \ + (QDF_TRACE_LEVEL_WARN == (level)) || \ + (QDF_TRACE_LEVEL_INFO == (level))) + +#define LOGGING_TRACE(level, args ...) \ + QDF_TRACE(QDF_MODULE_ID_HDD, level, ## args) + +/* Global variables */ + +#define ANI_NL_MSG_LOG_TYPE 89 +#define ANI_NL_MSG_READY_IND_TYPE 90 +#ifndef MAX_LOGMSG_COUNT +#define MAX_LOGMSG_COUNT 256 +#endif +#define MAX_LOGMSG_LENGTH 2048 +#define MAX_SKBMSG_LENGTH 4096 +#define MAX_PKTSTATS_LENGTH 2048 +#define MAX_PKTSTATS_BUFF 16 + +#define HOST_LOG_DRIVER_MSG 0x001 +#define HOST_LOG_PER_PKT_STATS 0x002 +#define HOST_LOG_FW_FLUSH_COMPLETE 0x003 +#define DIAG_TYPE_LOGS 1 +#define PTT_MSG_DIAG_CMDS_TYPE 0x5050 + +struct log_msg { + struct list_head node; + unsigned int radio; + unsigned int index; + /* indicates the current filled log length in logbuf */ + unsigned int filled_length; + /* + * Buf to hold the log msg + * tAniHdr + log + */ + char logbuf[MAX_LOGMSG_LENGTH]; +}; + +/** + * struct packet_dump - This data structure contains the + * Tx/Rx packet stats + * @status: Status + * @type: Type + * @driver_ts: driver timestamp + * @fw_ts: fw timestamp + */ +struct packet_dump { + unsigned char status; + unsigned char type; + uint32_t driver_ts; + uint16_t fw_ts; +} __attribute__((__packed__)); + +/** + * struct pkt_stats_msg - This data structure contains the + * pkt stats node for link list + * @node: LinkList node + * @node: Pointer to skb + */ +struct pkt_stats_msg { + struct list_head node; + struct sk_buff *skb; +}; + +struct wlan_logging { + /* Log Fatal and ERROR to console */ + bool log_to_console; + /* Number of buffers to be used for logging */ + uint32_t num_buf; + uint32_t buffer_length; + /* Lock to synchronize access to shared logging resource */ + spinlock_t spin_lock; + /* Holds the free node which can be used for filling logs */ + struct list_head free_list; + /* Holds the filled nodes which needs to be indicated to APP */ + struct list_head filled_list; + /* Wait queue for Logger thread */ + wait_queue_head_t wait_queue; + /* Logger thread */ + struct task_struct *thread; + /* Logging thread sets this variable on exit */ + struct completion shutdown_comp; + /* Indicates to logger thread to exit */ + bool exit; + /* Holds number of dropped logs */ + unsigned int drop_count; + /* current logbuf to which the log will be filled to */ + struct log_msg *pcur_node; + /* Event flag used for wakeup and post indication*/ + unsigned long eventFlag; + /* Indicates logger thread is activated */ + bool is_active; + /* Flush completion check */ + bool is_flush_complete; + /* paramaters for pkt stats */ + struct list_head pkt_stat_free_list; + struct list_head pkt_stat_filled_list; + struct pkt_stats_msg *pkt_stats_pcur_node; + unsigned int pkt_stat_drop_cnt; + spinlock_t pkt_stats_lock; + unsigned int pkt_stats_msg_idx; +}; + +static struct wlan_logging gwlan_logging; +static struct log_msg gplog_msg[MAX_LOGMSG_COUNT]; +static struct pkt_stats_msg *gpkt_stats_buffers; + +/* Need to call this with spin_lock acquired */ +static int wlan_queue_logmsg_for_app(void) +{ + char *ptr; + int ret = 0; + ptr = &gwlan_logging.pcur_node->logbuf[sizeof(tAniHdr)]; + ptr[gwlan_logging.pcur_node->filled_length] = '\0'; + + *(unsigned short *)(gwlan_logging.pcur_node->logbuf) = + ANI_NL_MSG_LOG_TYPE; + *(unsigned short *)(gwlan_logging.pcur_node->logbuf + 2) = + gwlan_logging.pcur_node->filled_length; + list_add_tail(&gwlan_logging.pcur_node->node, + &gwlan_logging.filled_list); + + if (!list_empty(&gwlan_logging.free_list)) { + /* Get buffer from free list */ + gwlan_logging.pcur_node = + (struct log_msg *)(gwlan_logging.free_list.next); + list_del_init(gwlan_logging.free_list.next); + } else if (!list_empty(&gwlan_logging.filled_list)) { + /* Get buffer from filled list */ + /* This condition will drop the packet from being + * indicated to app + */ + gwlan_logging.pcur_node = + (struct log_msg *)(gwlan_logging.filled_list.next); + ++gwlan_logging.drop_count; + list_del_init(gwlan_logging.filled_list.next); + ret = 1; + } + + /* Reset the current node values */ + gwlan_logging.pcur_node->filled_length = 0; + return ret; +} + +static const char *current_process_name(void) +{ + if (in_irq()) + return "irq"; + + if (in_softirq()) + return "soft_irq"; + + return current->comm; +} + +#ifdef QCA_WIFI_3_0_ADRASTEA +/** + * wlan_add_user_log_time_stamp() - populate firmware and kernel timestamps + * @tbuf: Pointer to time stamp buffer + * @tbuf_sz: Time buffer size + * @ts: Time stamp value + * + * For adrastea time stamp is QTIMER raw tick which will be used by cnss_diag + * to convert it into user visible time stamp. In adrstea FW also uses QTIMER + * raw ticks which is needed to synchronize host and fw log time stamps + * + * Also add logcat timestamp so that driver logs and + * logcat logs can be co-related + * + * For discrete solution e.g rome use system tick and convert it into + * seconds.milli seconds + * + * Return: number of characters written in target buffer not including + * trailing '/0' + */ +static int wlan_add_user_log_time_stamp(char *tbuf, size_t tbuf_sz, uint64_t ts) +{ + char time_buf[20]; + + qdf_get_time_of_the_day_in_hr_min_sec_usec(time_buf, sizeof(time_buf)); + + return scnprintf(tbuf, tbuf_sz, "[%.16s][0x%llx]%s", + current_process_name(), ts, time_buf); +} +#else +static int wlan_add_user_log_time_stamp(char *tbuf, size_t tbuf_sz, uint64_t ts) +{ + uint32_t rem; + char time_buf[20]; + + qdf_get_time_of_the_day_in_hr_min_sec_usec(time_buf, sizeof(time_buf)); + + rem = do_div(ts, QDF_MC_TIMER_TO_SEC_UNIT); + return scnprintf(tbuf, tbuf_sz, "[%.16s][%lu.%06lu]%s", + current_process_name(), (unsigned long)ts, + (unsigned long)rem, time_buf); +} +#endif /* QCA_WIFI_3_0_ADRASTEA */ + +#ifdef CONFIG_MCL +static inline void print_to_console(char *tbuf, char *to_be_sent) +{ + pr_info("%s %s\n", tbuf, to_be_sent); +} +#else +#define print_to_console(str1, str2) +#endif + +int wlan_log_to_user(QDF_TRACE_LEVEL log_level, char *to_be_sent, int length) +{ + char *ptr; + char tbuf[60]; + int tlen; + int total_log_len; + unsigned int *pfilled_length; + bool wake_up_thread = false; + unsigned long flags; + uint64_t ts; + + /* if logging isn't up yet, just dump to dmesg */ + if (!gwlan_logging.is_active) { + pr_info("%s\n", to_be_sent); + return 0; + } + + /* Add the current time stamp */ + ts = qdf_get_log_timestamp(); + tlen = wlan_add_user_log_time_stamp(tbuf, sizeof(tbuf), ts); + + /* 1+1 indicate '\n'+'\0' */ + total_log_len = length + tlen + 1 + 1; + + spin_lock_irqsave(&gwlan_logging.spin_lock, flags); + /* wlan logging svc resources are not yet initialized */ + if (!gwlan_logging.pcur_node) { + spin_unlock_irqrestore(&gwlan_logging.spin_lock, flags); + return -EIO; + } + + pfilled_length = &gwlan_logging.pcur_node->filled_length; + + /* Check if we can accommodate more log into current node/buffer */ + if ((MAX_LOGMSG_LENGTH - (*pfilled_length + + sizeof(tAniNlHdr))) < total_log_len) { + wake_up_thread = true; + wlan_queue_logmsg_for_app(); + pfilled_length = &gwlan_logging.pcur_node->filled_length; + } + + ptr = &gwlan_logging.pcur_node->logbuf[sizeof(tAniHdr)]; + + if (unlikely(MAX_LOGMSG_LENGTH < (sizeof(tAniNlHdr) + total_log_len))) { + /* + * Assumption here is that we receive logs which is less than + * MAX_LOGMSG_LENGTH, where we can accommodate the + * tAniNlHdr + [context][timestamp] + log + * If log length is over MAX_LOGMSG_LENGTH, + * the overflow part will be discarded. + */ + length = MAX_LOGMSG_LENGTH - sizeof(tAniNlHdr) - tlen - 2; + /* + * QDF_ASSERT if complete log was not accommodated into + * the available buffer. + */ + QDF_ASSERT(0); + } + + memcpy(&ptr[*pfilled_length], tbuf, tlen); + memcpy(&ptr[*pfilled_length + tlen], to_be_sent, length); + *pfilled_length += tlen + length; + ptr[*pfilled_length] = '\n'; + *pfilled_length += 1; + + spin_unlock_irqrestore(&gwlan_logging.spin_lock, flags); + + /* Wakeup logger thread */ + if (wake_up_thread) { + set_bit(HOST_LOG_DRIVER_MSG, &gwlan_logging.eventFlag); + wake_up_interruptible(&gwlan_logging.wait_queue); + } + + if (gwlan_logging.log_to_console + && ALLOWED_LOG_LEVELS_TO_CONSOLE(log_level)) { + print_to_console(tbuf, to_be_sent); + } + + return 0; +} + +/** + * nl_srv_bcast_host_logs() - Wrapper to send bcast msgs to host logs mcast grp + * @skb: sk buffer pointer + * + * Sends the bcast message to host logs multicast group with generic nl socket + * if CNSS_GENL is enabled. Else, use the legacy netlink socket to send. + * + * Return: zero on success, error code otherwise + */ +#ifdef CNSS_GENL +static int nl_srv_bcast_host_logs(struct sk_buff *skb) +{ + return nl_srv_bcast(skb, CLD80211_MCGRP_HOST_LOGS, ANI_NL_MSG_LOG); +} +#else +static int nl_srv_bcast_host_logs(struct sk_buff *skb) +{ + return nl_srv_bcast(skb); +} +#endif + +#ifndef REMOVE_PKT_LOG +/** + * pkt_stats_fill_headers() - This function adds headers to skb + * @skb: skb to which headers need to be added + * + * Return: 0 on success or Errno on failure + */ +static int pkt_stats_fill_headers(struct sk_buff *skb) +{ + struct host_log_pktlog_info cds_pktlog; + int cds_pkt_size = sizeof(struct host_log_pktlog_info); + tAniNlHdr msg_header; + int extra_header_len, nl_payload_len; + static int nlmsg_seq; + int diag_type; + + qdf_mem_zero(&cds_pktlog, cds_pkt_size); + cds_pktlog.version = VERSION_LOG_WLAN_PKT_LOG_INFO_C; + cds_pktlog.buf_len = skb->len; + cds_pktlog.seq_no = gwlan_logging.pkt_stats_msg_idx++; +#ifdef CONFIG_MCL + host_diag_log_set_code(&cds_pktlog, LOG_WLAN_PKT_LOG_INFO_C); + host_diag_log_set_length(&cds_pktlog.log_hdr, skb->len + + cds_pkt_size); +#endif + + if (unlikely(skb_headroom(skb) < cds_pkt_size)) { + pr_err("VPKT [%d]: Insufficient headroom, head[%pK], data[%pK], req[%zu]", + __LINE__, skb->head, skb->data, sizeof(msg_header)); + return -EIO; + } + + qdf_mem_copy(skb_push(skb, cds_pkt_size), + &cds_pktlog, cds_pkt_size); + + if (unlikely(skb_headroom(skb) < sizeof(int))) { + pr_err("VPKT [%d]: Insufficient headroom, head[%pK], data[%pK], req[%zu]", + __LINE__, skb->head, skb->data, sizeof(int)); + return -EIO; + } + + diag_type = DIAG_TYPE_LOGS; + qdf_mem_copy(skb_push(skb, sizeof(int)), &diag_type, sizeof(int)); + + extra_header_len = sizeof(msg_header.radio) + sizeof(tAniHdr) + + sizeof(struct nlmsghdr); + nl_payload_len = extra_header_len + skb->len; + + msg_header.nlh.nlmsg_type = ANI_NL_MSG_PUMAC; + msg_header.nlh.nlmsg_len = nl_payload_len; + msg_header.nlh.nlmsg_flags = NLM_F_REQUEST; + msg_header.nlh.nlmsg_pid = 0; + msg_header.nlh.nlmsg_seq = nlmsg_seq++; + msg_header.radio = 0; + msg_header.wmsg.type = PTT_MSG_DIAG_CMDS_TYPE; + msg_header.wmsg.length = cpu_to_be16(skb->len); + + if (unlikely(skb_headroom(skb) < sizeof(msg_header))) { + pr_err("VPKT [%d]: Insufficient headroom, head[%pK], data[%pK], req[%zu]", + __LINE__, skb->head, skb->data, sizeof(msg_header)); + return -EIO; + } + + qdf_mem_copy(skb_push(skb, sizeof(msg_header)), &msg_header, + sizeof(msg_header)); + + return 0; +} + +/** + * nl_srv_bcast_diag() - Wrapper to send bcast msgs to diag events mcast grp + * @skb: sk buffer pointer + * + * Sends the bcast message to diag events multicast group with generic nl socket + * if CNSS_GENL is enabled. Else, use the legacy netlink socket to send. + * + * Return: zero on success, error code otherwise + */ +static int nl_srv_bcast_diag(struct sk_buff *skb) +{ +#ifdef CNSS_GENL + return nl_srv_bcast(skb, CLD80211_MCGRP_DIAG_EVENTS, ANI_NL_MSG_PUMAC); +#else + return nl_srv_bcast(skb); +#endif +} + +/** + * pktlog_send_per_pkt_stats_to_user() - This function is used to send the per + * packet statistics to the user + * + * This function is used to send the per packet statistics to the user + * + * Return: Success if the message is posted to user + */ +static int pktlog_send_per_pkt_stats_to_user(void) +{ + int ret = -1; + struct pkt_stats_msg *pstats_msg; + unsigned long flags; + struct sk_buff *skb_new = NULL; + static int rate_limit; + bool free_old_skb = false; + + while (!list_empty(&gwlan_logging.pkt_stat_filled_list) + && !gwlan_logging.exit) { + skb_new = dev_alloc_skb(MAX_SKBMSG_LENGTH); + if (skb_new == NULL) { + if (!rate_limit) { + pr_err("%s: dev_alloc_skb() failed for msg size[%d] drop count = %u\n", + __func__, MAX_SKBMSG_LENGTH, + gwlan_logging.drop_count); + } + rate_limit = 1; + ret = -ENOMEM; + break; + } + + spin_lock_irqsave(&gwlan_logging.pkt_stats_lock, flags); + + pstats_msg = (struct pkt_stats_msg *) + (gwlan_logging.pkt_stat_filled_list.next); + list_del_init(gwlan_logging.pkt_stat_filled_list.next); + spin_unlock_irqrestore(&gwlan_logging.pkt_stats_lock, flags); + + ret = pkt_stats_fill_headers(pstats_msg->skb); + if (ret < 0) { + pr_err("%s failed to fill headers %d\n", __func__, ret); + free_old_skb = true; + goto err; + } + ret = nl_srv_bcast_diag(pstats_msg->skb); + if (ret < 0) { + pr_info("%s: Send Failed %d drop_count = %u\n", + __func__, ret, + ++gwlan_logging.pkt_stat_drop_cnt); + } else { + ret = 0; + } +err: + /* + * Free old skb in case or error before assigning new skb + * to the free list. + */ + if (free_old_skb) + dev_kfree_skb(pstats_msg->skb); + + spin_lock_irqsave(&gwlan_logging.pkt_stats_lock, flags); + pstats_msg->skb = skb_new; + list_add_tail(&pstats_msg->node, + &gwlan_logging.pkt_stat_free_list); + spin_unlock_irqrestore(&gwlan_logging.pkt_stats_lock, flags); + ret = 0; + } + + return ret; + +} +#else +static inline +int pktlog_send_per_pkt_stats_to_user(void) +{ + return 0; +} +#endif + +static int send_filled_buffers_to_user(void) +{ + int ret = -1; + struct log_msg *plog_msg; + int payload_len; + int tot_msg_len; + tAniNlHdr *wnl; + struct sk_buff *skb = NULL; + struct nlmsghdr *nlh; + static int nlmsg_seq; + unsigned long flags; + static int rate_limit; + + while (!list_empty(&gwlan_logging.filled_list) + && !gwlan_logging.exit) { + + skb = dev_alloc_skb(MAX_LOGMSG_LENGTH); + if (skb == NULL) { + if (!rate_limit) { + pr_err + ("%s: dev_alloc_skb() failed for msg size[%d] drop count = %u\n", + __func__, MAX_LOGMSG_LENGTH, + gwlan_logging.drop_count); + } + rate_limit = 1; + ret = -ENOMEM; + break; + } + rate_limit = 0; + + spin_lock_irqsave(&gwlan_logging.spin_lock, flags); + + plog_msg = (struct log_msg *) + (gwlan_logging.filled_list.next); + list_del_init(gwlan_logging.filled_list.next); + spin_unlock_irqrestore(&gwlan_logging.spin_lock, flags); + /* 4 extra bytes for the radio idx */ + payload_len = plog_msg->filled_length + + sizeof(wnl->radio) + sizeof(tAniHdr); + + tot_msg_len = NLMSG_SPACE(payload_len); + nlh = nlmsg_put(skb, 0, nlmsg_seq++, + ANI_NL_MSG_LOG, payload_len, NLM_F_REQUEST); + if (NULL == nlh) { + spin_lock_irqsave(&gwlan_logging.spin_lock, flags); + list_add_tail(&plog_msg->node, + &gwlan_logging.free_list); + spin_unlock_irqrestore(&gwlan_logging.spin_lock, flags); + pr_err("%s: drop_count = %u\n", __func__, + ++gwlan_logging.drop_count); + pr_err("%s: nlmsg_put() failed for msg size[%d]\n", + __func__, tot_msg_len); + dev_kfree_skb(skb); + skb = NULL; + ret = -EINVAL; + continue; + } + + wnl = (tAniNlHdr *) nlh; + wnl->radio = plog_msg->radio; + memcpy(&wnl->wmsg, plog_msg->logbuf, + plog_msg->filled_length + sizeof(tAniHdr)); + + spin_lock_irqsave(&gwlan_logging.spin_lock, flags); + list_add_tail(&plog_msg->node, &gwlan_logging.free_list); + spin_unlock_irqrestore(&gwlan_logging.spin_lock, flags); + + ret = nl_srv_bcast_host_logs(skb); + /* print every 64th drop count */ + if (ret < 0 && (!(gwlan_logging.drop_count % 0x40))) { + pr_err("%s: Send Failed %d drop_count = %u\n", + __func__, ret, ++gwlan_logging.drop_count); + } + } + + return ret; +} + +#ifdef FEATURE_WLAN_DIAG_SUPPORT +/** + * wlan_report_log_completion() - Report bug report completion to userspace + * @is_fatal: Type of event, fatal or not + * @indicator: Source of bug report, framework/host/firmware + * @reason_code: Reason for triggering bug report + * @ring_id: Ring id of logging entities + * + * This function is used to report the bug report completion to userspace + * + * Return: None + */ +void wlan_report_log_completion(uint32_t is_fatal, + uint32_t indicator, + uint32_t reason_code, + uint8_t ring_id) +{ + WLAN_HOST_DIAG_EVENT_DEF(wlan_diag_event, + struct host_event_wlan_log_complete); + + wlan_diag_event.is_fatal = is_fatal; + wlan_diag_event.indicator = indicator; + wlan_diag_event.reason_code = reason_code; + wlan_diag_event.reserved = ring_id; + + WLAN_HOST_DIAG_EVENT_REPORT(&wlan_diag_event, EVENT_WLAN_LOG_COMPLETE); +} +#endif + +#ifdef CONFIG_MCL +/** + * send_flush_completion_to_user() - Indicate flush completion to the user + * @ring_id: Ring id of logging entities + * + * This function is used to send the flush completion message to user space + * + * Return: None + */ +static void send_flush_completion_to_user(uint8_t ring_id) +{ + uint32_t is_fatal, indicator, reason_code; + bool recovery_needed; + + cds_get_and_reset_log_completion(&is_fatal, + &indicator, &reason_code, &recovery_needed); + + /* Error on purpose, so that it will get logged in the kmsg */ + LOGGING_TRACE(QDF_TRACE_LEVEL_DEBUG, + "%s: Sending flush done to userspace reason code %d", + __func__, reason_code); + + wlan_report_log_completion(is_fatal, indicator, reason_code, ring_id); + + if (recovery_needed) + cds_trigger_recovery(QDF_REASON_UNSPECIFIED); +} +#endif + +/** + * wlan_logging_thread() - The WLAN Logger thread + * @Arg - pointer to the HDD context + * + * This thread logs log message to App registered for the logs. + */ +static int wlan_logging_thread(void *Arg) +{ + int ret_wait_status = 0; + int ret = 0; + unsigned long flags; + + while (!gwlan_logging.exit) { + ret_wait_status = + wait_event_interruptible(gwlan_logging.wait_queue, + (!list_empty + (&gwlan_logging.filled_list) + || test_bit( + HOST_LOG_DRIVER_MSG, + &gwlan_logging.eventFlag) + || test_bit( + HOST_LOG_PER_PKT_STATS, + &gwlan_logging.eventFlag) + || test_bit( + HOST_LOG_FW_FLUSH_COMPLETE, + &gwlan_logging.eventFlag) + || gwlan_logging.exit)); + + if (ret_wait_status == -ERESTARTSYS) { + pr_err + ("%s: wait_event_interruptible returned -ERESTARTSYS", + __func__); + break; + } + + if (gwlan_logging.exit) + break; + + + if (test_and_clear_bit(HOST_LOG_DRIVER_MSG, + &gwlan_logging.eventFlag)) { + ret = send_filled_buffers_to_user(); + if (-ENOMEM == ret) + msleep(200); +#ifdef CONFIG_MCL + if (WLAN_LOG_INDICATOR_HOST_ONLY == + cds_get_log_indicator()) { + send_flush_completion_to_user( + RING_ID_DRIVER_DEBUG); + } +#endif + } + + if (test_and_clear_bit(HOST_LOG_PER_PKT_STATS, + &gwlan_logging.eventFlag)) { + ret = pktlog_send_per_pkt_stats_to_user(); + if (-ENOMEM == ret) + msleep(200); + } + + if (test_and_clear_bit(HOST_LOG_FW_FLUSH_COMPLETE, + &gwlan_logging.eventFlag)) { + /* Flush bit could have been set while we were mid + * way in the logging thread. So, need to check other + * buffers like log messages, per packet stats again + * to flush any residual data in them + */ + if (gwlan_logging.is_flush_complete == true) { + gwlan_logging.is_flush_complete = false; +#ifdef CONFIG_MCL + send_flush_completion_to_user( + RING_ID_DRIVER_DEBUG); +#endif + } else { + gwlan_logging.is_flush_complete = true; + /* Flush all current host logs*/ + spin_lock_irqsave(&gwlan_logging.spin_lock, + flags); + wlan_queue_logmsg_for_app(); + spin_unlock_irqrestore(&gwlan_logging.spin_lock, + flags); + set_bit(HOST_LOG_DRIVER_MSG, + &gwlan_logging.eventFlag); + set_bit(HOST_LOG_PER_PKT_STATS, + &gwlan_logging.eventFlag); + set_bit(HOST_LOG_FW_FLUSH_COMPLETE, + &gwlan_logging.eventFlag); + wake_up_interruptible( + &gwlan_logging.wait_queue); + } + } + } + + complete_and_exit(&gwlan_logging.shutdown_comp, 0); + + return 0; +} + +void wlan_logging_set_active(bool active) +{ + gwlan_logging.is_active = active; +} + +void wlan_logging_set_log_to_console(bool log_to_console) +{ + gwlan_logging.log_to_console = log_to_console; +} + +int wlan_logging_sock_init_svc(void) +{ + int i = 0, j, pkt_stats_size; + unsigned long irq_flag; + + spin_lock_init(&gwlan_logging.spin_lock); + spin_lock_init(&gwlan_logging.pkt_stats_lock); + + gwlan_logging.log_to_console = true; + gwlan_logging.num_buf = MAX_LOGMSG_COUNT; + gwlan_logging.buffer_length = MAX_LOGMSG_LENGTH; + + spin_lock_irqsave(&gwlan_logging.spin_lock, irq_flag); + INIT_LIST_HEAD(&gwlan_logging.free_list); + INIT_LIST_HEAD(&gwlan_logging.filled_list); + + for (i = 0; i < gwlan_logging.num_buf; i++) { + list_add(&gplog_msg[i].node, &gwlan_logging.free_list); + gplog_msg[i].index = i; + } + gwlan_logging.pcur_node = (struct log_msg *) + (gwlan_logging.free_list.next); + list_del_init(gwlan_logging.free_list.next); + spin_unlock_irqrestore(&gwlan_logging.spin_lock, irq_flag); + + /* Initialize the pktStats data structure here */ + pkt_stats_size = sizeof(struct pkt_stats_msg); + gpkt_stats_buffers = vmalloc(MAX_PKTSTATS_BUFF * pkt_stats_size); + if (!gpkt_stats_buffers) { + pr_err("%s: Could not allocate memory for Pkt stats\n", + __func__); + goto err1; + } + qdf_mem_zero(gpkt_stats_buffers, + MAX_PKTSTATS_BUFF * pkt_stats_size); + + spin_lock_irqsave(&gwlan_logging.pkt_stats_lock, irq_flag); + gwlan_logging.pkt_stats_msg_idx = 0; + INIT_LIST_HEAD(&gwlan_logging.pkt_stat_free_list); + INIT_LIST_HEAD(&gwlan_logging.pkt_stat_filled_list); + spin_unlock_irqrestore(&gwlan_logging.pkt_stats_lock, irq_flag); + + + for (i = 0; i < MAX_PKTSTATS_BUFF; i++) { + gpkt_stats_buffers[i].skb = dev_alloc_skb(MAX_PKTSTATS_LENGTH); + if (gpkt_stats_buffers[i].skb == NULL) { + pr_err("%s: Memory alloc failed for skb", __func__); + /* free previously allocated skb and return */ + for (j = 0; j < i ; j++) + dev_kfree_skb(gpkt_stats_buffers[j].skb); + goto err2; + } + spin_lock_irqsave(&gwlan_logging.pkt_stats_lock, irq_flag); + list_add(&gpkt_stats_buffers[i].node, + &gwlan_logging.pkt_stat_free_list); + spin_unlock_irqrestore(&gwlan_logging.pkt_stats_lock, irq_flag); + } + spin_lock_irqsave(&gwlan_logging.pkt_stats_lock, irq_flag); + gwlan_logging.pkt_stats_pcur_node = (struct pkt_stats_msg *) + (gwlan_logging.pkt_stat_free_list.next); + list_del_init(gwlan_logging.pkt_stat_free_list.next); + spin_unlock_irqrestore(&gwlan_logging.pkt_stats_lock, irq_flag); + /* Pkt Stats intialization done */ + + init_waitqueue_head(&gwlan_logging.wait_queue); + gwlan_logging.exit = false; + clear_bit(HOST_LOG_DRIVER_MSG, &gwlan_logging.eventFlag); + clear_bit(HOST_LOG_PER_PKT_STATS, &gwlan_logging.eventFlag); + clear_bit(HOST_LOG_FW_FLUSH_COMPLETE, &gwlan_logging.eventFlag); + init_completion(&gwlan_logging.shutdown_comp); + gwlan_logging.thread = kthread_create(wlan_logging_thread, NULL, + "wlan_logging_thread"); + if (IS_ERR(gwlan_logging.thread)) { + pr_err("%s: Could not Create LogMsg Thread Controller", + __func__); + goto err3; + } + wake_up_process(gwlan_logging.thread); + gwlan_logging.is_active = true; + gwlan_logging.is_flush_complete = false; + + return 0; + +err3: + for (i = 0; i < MAX_PKTSTATS_BUFF; i++) { + if (gpkt_stats_buffers[i].skb) + dev_kfree_skb(gpkt_stats_buffers[i].skb); + } +err2: + spin_lock_irqsave(&gwlan_logging.pkt_stats_lock, irq_flag); + gwlan_logging.pkt_stats_pcur_node = NULL; + spin_unlock_irqrestore(&gwlan_logging.pkt_stats_lock, irq_flag); + vfree(gpkt_stats_buffers); + gpkt_stats_buffers = NULL; +err1: + spin_lock_irqsave(&gwlan_logging.spin_lock, irq_flag); + gwlan_logging.pcur_node = NULL; + spin_unlock_irqrestore(&gwlan_logging.spin_lock, irq_flag); + + return -ENOMEM; +} + +int wlan_logging_sock_deinit_svc(void) +{ + unsigned long irq_flag; + int i; + + if (!gwlan_logging.pcur_node) + return 0; + +#ifdef CONFIG_MCL + INIT_COMPLETION(gwlan_logging.shutdown_comp); +#endif + gwlan_logging.exit = true; + gwlan_logging.is_active = false; +#ifdef CONFIG_MCL + cds_set_multicast_logging(0); +#endif + gwlan_logging.is_flush_complete = false; + clear_bit(HOST_LOG_DRIVER_MSG, &gwlan_logging.eventFlag); + clear_bit(HOST_LOG_PER_PKT_STATS, &gwlan_logging.eventFlag); + clear_bit(HOST_LOG_FW_FLUSH_COMPLETE, &gwlan_logging.eventFlag); + wake_up_interruptible(&gwlan_logging.wait_queue); + wait_for_completion(&gwlan_logging.shutdown_comp); + + spin_lock_irqsave(&gwlan_logging.spin_lock, irq_flag); + gwlan_logging.pcur_node = NULL; + spin_unlock_irqrestore(&gwlan_logging.spin_lock, irq_flag); + + spin_lock_irqsave(&gwlan_logging.pkt_stats_lock, irq_flag); + gwlan_logging.pkt_stats_pcur_node = NULL; + gwlan_logging.pkt_stats_msg_idx = 0; + gwlan_logging.pkt_stat_drop_cnt = 0; + for (i = 0; i < MAX_PKTSTATS_BUFF; i++) { + if (gpkt_stats_buffers[i].skb) + dev_kfree_skb(gpkt_stats_buffers[i].skb); + } + spin_unlock_irqrestore(&gwlan_logging.pkt_stats_lock, irq_flag); + + vfree(gpkt_stats_buffers); + gpkt_stats_buffers = NULL; + + return 0; +} + +/** + * wlan_logging_set_per_pkt_stats() - This function triggers per packet logging + * + * This function is used to send signal to the logger thread for logging per + * packet stats + * + * Return: None + * + */ +void wlan_logging_set_per_pkt_stats(void) +{ + if (gwlan_logging.is_active == false) + return; + + set_bit(HOST_LOG_PER_PKT_STATS, &gwlan_logging.eventFlag); + wake_up_interruptible(&gwlan_logging.wait_queue); +} + +/* + * wlan_logging_set_fw_flush_complete() - FW log flush completion + * + * This function is used to send signal to the logger thread to indicate + * that the flushing of FW logs is complete by the FW + * + * Return: None + * + */ +void wlan_logging_set_fw_flush_complete(void) +{ + if (gwlan_logging.is_active == false +#ifdef CONFIG_MCL + || !cds_is_fatal_event_enabled() +#endif + ) + return; + + set_bit(HOST_LOG_FW_FLUSH_COMPLETE, &gwlan_logging.eventFlag); + wake_up_interruptible(&gwlan_logging.wait_queue); +} + +/** + * wlan_flush_host_logs_for_fatal() - Flush host logs + * + * This function is used to send signal to the logger thread to + * Flush the host logs + * + * Return: None + */ +void wlan_flush_host_logs_for_fatal(void) +{ + unsigned long flags; + +#ifdef CONFIG_MCL + if (cds_is_log_report_in_progress()) { +#endif + pr_info("%s:flush all host logs Setting HOST_LOG_POST_MASK\n", + __func__); + spin_lock_irqsave(&gwlan_logging.spin_lock, flags); + wlan_queue_logmsg_for_app(); + spin_unlock_irqrestore(&gwlan_logging.spin_lock, flags); + set_bit(HOST_LOG_DRIVER_MSG, &gwlan_logging.eventFlag); + wake_up_interruptible(&gwlan_logging.wait_queue); +#ifdef CONFIG_MCL + } +#endif +} + +#ifdef CONFIG_MCL +#ifndef REMOVE_PKT_LOG + +static uint8_t gtx_count; +static uint8_t grx_count; + +/** + * wlan_get_pkt_stats_free_node() - Get the free node for pkt stats + * + * This function is used to get the free node for pkt stats from + * free list/filles list + * + * Return: int + * + */ +static int wlan_get_pkt_stats_free_node(void) +{ + int ret = 0; + + list_add_tail(&gwlan_logging.pkt_stats_pcur_node->node, + &gwlan_logging.pkt_stat_filled_list); + + if (!list_empty(&gwlan_logging.pkt_stat_free_list)) { + /* Get buffer from free list */ + gwlan_logging.pkt_stats_pcur_node = + (struct pkt_stats_msg *)(gwlan_logging.pkt_stat_free_list.next); + list_del_init(gwlan_logging.pkt_stat_free_list.next); + } else if (!list_empty(&gwlan_logging.pkt_stat_filled_list)) { + /* Get buffer from filled list. This condition will drop the + * packet from being indicated to app + */ + gwlan_logging.pkt_stats_pcur_node = + (struct pkt_stats_msg *) + (gwlan_logging.pkt_stat_filled_list.next); + ++gwlan_logging.pkt_stat_drop_cnt; + /* print every 64th drop count */ + if ( + cds_is_multicast_logging() && + (!(gwlan_logging.pkt_stat_drop_cnt % 0x40))) { + pr_err("%s: drop_count = %u\n", + __func__, gwlan_logging.pkt_stat_drop_cnt); + } + list_del_init(gwlan_logging.pkt_stat_filled_list.next); + ret = 1; + } + + /* Reset the skb values, essential if dequeued from filled list */ + skb_trim(gwlan_logging.pkt_stats_pcur_node->skb, 0); + return ret; +} + +/** + * wlan_pkt_stats_to_logger_thread() - Add the pkt stats to SKB + * @pl_hdr: Pointer to pl_hdr + * @pkt_dump: Pointer to pkt_dump + * @data: Pointer to data + * + * This function adds the pktstats hdr and data to current + * skb node of free list. + * + * Return: None + */ +void wlan_pkt_stats_to_logger_thread(void *pl_hdr, void *pkt_dump, void *data) +{ + struct ath_pktlog_hdr *pktlog_hdr; + struct packet_dump *pkt_stats_dump; + int total_stats_len = 0; + bool wake_up_thread = false; + unsigned long flags; + struct sk_buff *ptr; + int hdr_size; + + pktlog_hdr = (struct ath_pktlog_hdr *)pl_hdr; + + if (pktlog_hdr == NULL) { + pr_err("%s : Invalid pkt_stats_header\n", __func__); + return; + } + + pkt_stats_dump = (struct packet_dump *)pkt_dump; + total_stats_len = sizeof(struct ath_pktlog_hdr) + + pktlog_hdr->size; + + spin_lock_irqsave(&gwlan_logging.pkt_stats_lock, flags); + + if (!gwlan_logging.pkt_stats_pcur_node) { + spin_unlock_irqrestore(&gwlan_logging.pkt_stats_lock, flags); + return; + } + + /* Check if we can accommodate more log into current node/buffer */ + hdr_size = sizeof(struct host_log_pktlog_info) + + sizeof(tAniNlHdr); + if ((total_stats_len + hdr_size) >= + skb_tailroom(gwlan_logging.pkt_stats_pcur_node->skb)) { + wake_up_thread = true; + wlan_get_pkt_stats_free_node(); + } + + ptr = gwlan_logging.pkt_stats_pcur_node->skb; + qdf_mem_copy(skb_put(ptr, + sizeof(struct ath_pktlog_hdr)), + pktlog_hdr, + sizeof(struct ath_pktlog_hdr)); + + if (pkt_stats_dump) { + qdf_mem_copy(skb_put(ptr, + sizeof(struct packet_dump)), + pkt_stats_dump, + sizeof(struct packet_dump)); + pktlog_hdr->size -= sizeof(struct packet_dump); + } + + if (data) + qdf_mem_copy(skb_put(ptr, + pktlog_hdr->size), + data, pktlog_hdr->size); + + if (pkt_stats_dump && pkt_stats_dump->type == STOP_MONITOR) { + wake_up_thread = true; + wlan_get_pkt_stats_free_node(); + } + + spin_unlock_irqrestore(&gwlan_logging.pkt_stats_lock, flags); + + /* Wakeup logger thread */ + if (true == wake_up_thread) { + set_bit(HOST_LOG_PER_PKT_STATS, &gwlan_logging.eventFlag); + wake_up_interruptible(&gwlan_logging.wait_queue); + } +} + +/** + * driver_hal_status_map() - maps driver to hal + * status + * @status: status to be mapped + * + * This function is used to map driver to hal status + * + * Return: None + * + */ +static void driver_hal_status_map(uint8_t *status) +{ + switch (*status) { + case tx_status_ok: + *status = TX_PKT_FATE_ACKED; + break; + case tx_status_discard: + *status = TX_PKT_FATE_DRV_DROP_OTHER; + break; + case tx_status_no_ack: + *status = TX_PKT_FATE_SENT; + break; + case tx_status_download_fail: + *status = TX_PKT_FATE_FW_QUEUED; + break; + default: + *status = TX_PKT_FATE_DRV_DROP_OTHER; + break; + } +} + +/* + * send_packetdump() - send packet dump + * @netbuf: netbuf + * @status: status of tx packet + * @vdev_id: virtual device id + * @type: type of packet + * + * This function is used to send packet dump to HAL layer + * using wlan_pkt_stats_to_logger_thread + * + * Return: None + * + */ +static void send_packetdump(qdf_nbuf_t netbuf, uint8_t status, + uint8_t vdev_id, uint8_t type) +{ + struct ath_pktlog_hdr pktlog_hdr = {0}; + struct packet_dump pd_hdr = {0}; + struct hdd_context *hdd_ctx; + struct hdd_adapter *adapter; + + hdd_ctx = (struct hdd_context *)cds_get_context(QDF_MODULE_ID_HDD); + if (!hdd_ctx) + return; + + adapter = hdd_get_adapter_by_vdev(hdd_ctx, vdev_id); + if (!adapter) + return; + + /* Send packet dump only for STA interface */ + if (adapter->device_mode != QDF_STA_MODE) + return; + +#if defined(HELIUMPLUS) + pktlog_hdr.flags |= PKTLOG_HDR_SIZE_16; +#endif + + pktlog_hdr.log_type = PKTLOG_TYPE_PKT_DUMP; + pktlog_hdr.size = sizeof(pd_hdr) + netbuf->len; + + pd_hdr.status = status; + pd_hdr.type = type; + pd_hdr.driver_ts = qdf_get_monotonic_boottime(); + + if ((type == TX_MGMT_PKT) || (type == TX_DATA_PKT)) + gtx_count++; + else if ((type == RX_MGMT_PKT) || (type == RX_DATA_PKT)) + grx_count++; + + wlan_pkt_stats_to_logger_thread(&pktlog_hdr, &pd_hdr, netbuf->data); +} + + +/* + * send_packetdump_monitor() - sends start/stop packet dump indication + * @type: type of packet + * + * This function is used to indicate HAL layer to start/stop monitoring + * of packets + * + * Return: None + * + */ +static void send_packetdump_monitor(uint8_t type) +{ + struct ath_pktlog_hdr pktlog_hdr = {0}; + struct packet_dump pd_hdr = {0}; + +#if defined(HELIUMPLUS) + pktlog_hdr.flags |= PKTLOG_HDR_SIZE_16; +#endif + + pktlog_hdr.log_type = PKTLOG_TYPE_PKT_DUMP; + pktlog_hdr.size = sizeof(pd_hdr); + + pd_hdr.type = type; + + LOGGING_TRACE(QDF_TRACE_LEVEL_DEBUG, + "fate Tx-Rx %s: type: %d", __func__, type); + + wlan_pkt_stats_to_logger_thread(&pktlog_hdr, &pd_hdr, NULL); +} + +/** + * wlan_deregister_txrx_packetdump() - tx/rx packet dump + * deregistration + * + * This function is used to deregister tx/rx packet dump callbacks + * with ol, pe and htt layers + * + * Return: None + * + */ +void wlan_deregister_txrx_packetdump(void) +{ + if (gtx_count || grx_count) { + ol_deregister_packetdump_callback(); + wma_deregister_packetdump_callback(); + send_packetdump_monitor(STOP_MONITOR); + csr_packetdump_timer_stop(); + + gtx_count = 0; + grx_count = 0; + } else + LOGGING_TRACE(QDF_TRACE_LEVEL_DEBUG, + "%s: deregistered packetdump already", __func__); +} + +/* + * check_txrx_packetdump_count() - function to check + * tx/rx packet dump global counts + * + * This function is used to check global counts of tx/rx + * packet dump functionality. + * + * Return: 1 if either gtx_count or grx_count reached 32 + * 0 otherwise + * + */ +static bool check_txrx_packetdump_count(void) +{ + if (gtx_count == MAX_NUM_PKT_LOG || + grx_count == MAX_NUM_PKT_LOG) { + LOGGING_TRACE(QDF_TRACE_LEVEL_DEBUG, + "%s gtx_count: %d grx_count: %d deregister packetdump", + __func__, gtx_count, grx_count); + wlan_deregister_txrx_packetdump(); + return 1; + } + return 0; +} + +/* + * tx_packetdump_cb() - tx packet dump callback + * @netbuf: netbuf + * @status: status of tx packet + * @vdev_id: virtual device id + * @type: packet type + * + * This function is used to send tx packet dump to HAL layer + * and deregister packet dump callbacks + * + * Return: None + * + */ +static void tx_packetdump_cb(qdf_nbuf_t netbuf, uint8_t status, + uint8_t vdev_id, uint8_t type) +{ + bool temp; + + temp = check_txrx_packetdump_count(); + if (temp) + return; + + driver_hal_status_map(&status); + send_packetdump(netbuf, status, vdev_id, type); +} + + +/* + * rx_packetdump_cb() - rx packet dump callback + * @netbuf: netbuf + * @status: status of rx packet + * @vdev_id: virtual device id + * @type: packet type + * + * This function is used to send rx packet dump to HAL layer + * and deregister packet dump callbacks + * + * Return: None + * + */ +static void rx_packetdump_cb(qdf_nbuf_t netbuf, uint8_t status, + uint8_t vdev_id, uint8_t type) +{ + bool temp; + + temp = check_txrx_packetdump_count(); + if (temp) + return; + + send_packetdump(netbuf, status, vdev_id, type); +} + + +/** + * wlan_register_txrx_packetdump() - tx/rx packet dump + * registration + * + * This function is used to register tx/rx packet dump callbacks + * with ol, pe and htt layers + * + * Return: None + * + */ +void wlan_register_txrx_packetdump(void) +{ + ol_register_packetdump_callback(tx_packetdump_cb, + rx_packetdump_cb); + wma_register_packetdump_callback(tx_packetdump_cb, + rx_packetdump_cb); + send_packetdump_monitor(START_MONITOR); + + gtx_count = 0; + grx_count = 0; +} +#endif /* REMOVE_PKT_LOG */ +#endif /* CONFIG_MCL */ +#endif /* WLAN_LOGGING_SOCK_SVC_ENABLE */ diff --git a/drivers/staging/qca-wifi-host-cmn/utils/logging/src/wlan_roam_debug.c b/drivers/staging/qca-wifi-host-cmn/utils/logging/src/wlan_roam_debug.c new file mode 100644 index 0000000000000000000000000000000000000000..13bc8f4169a9bc28ef601d34e1f91cd5e002b088 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/utils/logging/src/wlan_roam_debug.c @@ -0,0 +1,205 @@ +/* + * Copyright (c) 2013-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/* + * DOC: Roaming debug log operations routines and global data + */ + +#include +#include +#include +#include +#include +#include +#include +#include "wlan_roam_debug.h" + +/* + * wlan roam debug log is stored in this global structure. It can be accessed + * without requiring any psoc or vdev context. It will be accessible in + * the crash dump without having to dereference complex stack traces. + */ +static struct wlan_roam_debug_info global_wlan_roam_debug_table = { + { 0 }, + WLAN_ROAM_DEBUG_MAX_REC, +}; + +/** + * wlan_roam_next_debug_log_index() - atomically increment and wrap around index + * @index: address of index to increment + * @size: wrap around this value + * + * Return: new value of index + */ +static int wlan_roam_next_debug_log_index(qdf_atomic_t *index, int size) +{ + int i = qdf_atomic_inc_return(index); + + if (i == WLAN_ROAM_DEBUG_MAX_REC) + qdf_atomic_sub(WLAN_ROAM_DEBUG_MAX_REC, index); + while (i >= size) + i -= WLAN_ROAM_DEBUG_MAX_REC; + + return i; +} + +/** + * wlan_roam_debug_log() - Add a debug log entry to wlan roam debug records + * @vdev_id: vdev identifier + * @op: operation identifier + * @peer_id: peer id + * @mac_addr: mac address of peer, can be NULL + * @peer_obj: peer object address, can be NULL + * @arg1: extra argument #1 + * @arg2: extra argument #2 + * + * Return: none + */ +void wlan_roam_debug_log(uint8_t vdev_id, uint8_t op, + uint16_t peer_id, void *mac_addr, + void *peer_obj, uint32_t arg1, uint32_t arg2) +{ + uint32_t i; + struct wlan_roam_debug_rec *rec; + + i = wlan_roam_next_debug_log_index( + &global_wlan_roam_debug_table.index, + WLAN_ROAM_DEBUG_MAX_REC); + rec = &global_wlan_roam_debug_table.rec[i]; + rec->time = qdf_get_log_timestamp(); + rec->operation = op; + rec->vdev_id = vdev_id; + rec->peer_id = peer_id; + if (mac_addr) + qdf_mem_copy(rec->mac_addr.bytes, mac_addr, + QDF_MAC_ADDR_SIZE); + else + qdf_mem_zero(rec->mac_addr.bytes, + QDF_MAC_ADDR_SIZE); + rec->peer_obj = peer_obj; + rec->arg1 = arg1; + rec->arg2 = arg2; +} +qdf_export_symbol(wlan_roam_debug_log); + +/** + * wlan_roam_debug_string() - convert operation value to printable string + * @op: operation identifier + * + * Return: printable string for the operation + */ +#ifdef WLAN_DEBUG +static char *wlan_roam_debug_string(uint32_t op) +{ + switch (op) { + case DEBUG_PEER_CREATE_SEND: + return "peer create send"; + case DEBUG_PEER_CREATE_RESP: + return "peer create resp_event"; + case DEBUG_PEER_DELETE_SEND: + return "peer delete send"; + case DEBUG_PEER_DELETE_RESP: + return "peer delete resp_event"; + case DEBUG_PEER_MAP_EVENT: + return "peer map event"; + case DEBUG_PEER_UNMAP_EVENT: + return "peer unmap event"; + case DEBUG_PEER_UNREF_DELETE: + return "peer unref delete"; + case DEBUG_DELETING_PEER_OBJ: + return "peer obj deleted"; + case DEBUG_ROAM_SYNCH_IND: + return "roam synch ind event"; + case DEBUG_ROAM_SYNCH_CNF: + return "roam sync conf sent"; + case DEBUG_ROAM_SYNCH_FAIL: + return "roam sync fail event"; + case DEBUG_ROAM_EVENT: + return "roam event"; + case DEBUG_WOW_ROAM_EVENT: + return "wow wakeup roam event"; + case DEBUG_BUS_SUSPEND: + return "host suspend"; + case DEBUG_BUS_RESUME: + return "host wakeup"; + case DEBUG_WOW_REASON: + return "wow wakeup reason"; + default: + return "unknown"; + } +} +#endif + +/** + * wlan_roam_debug_dump_table() - Print the wlan roam debug log records + * print all the valid debug records in the order of timestamp + * + * Return: none + */ +void wlan_roam_debug_dump_table(void) +{ + uint32_t i; + int32_t current_index; + struct wlan_roam_debug_rec *dbg_rec; + uint64_t startt = 0; + uint32_t delta; + +#define DEBUG_CLOCK_TICKS_PER_MSEC 19200 + + current_index = qdf_atomic_read(&global_wlan_roam_debug_table.index); + if (current_index < 0) { + roam_debug("No records to dump"); + return; + } + roam_debug("Dumping all records. current index %d", current_index); + + i = current_index; + do { + /* wrap around */ + i = (i + 1) % WLAN_ROAM_DEBUG_MAX_REC; + dbg_rec = &global_wlan_roam_debug_table.rec[i]; + /* skip unused entry */ + if (dbg_rec->time == 0) + continue; + if (startt == 0) + startt = dbg_rec->time; + + /* + * Divide by 19200 == right shift 8 bits, then divide by 75 + * 32 bit computation keeps both 32 and 64 bit compilers happy. + * The value will roll over after approx. 33554 seconds. + */ + delta = (uint32_t) (((dbg_rec->time - startt) >> 8) & + 0xffffffff); + delta = delta / (DEBUG_CLOCK_TICKS_PER_MSEC >> 8); + + roam_debug("index = %5d timestamp = 0x%016llx delta ms = %-12u", + i, dbg_rec->time, delta); + roam_debug("info = %-24s vdev_id = %-3d mac addr = %pM", + wlan_roam_debug_string(dbg_rec->operation), + (int8_t)dbg_rec->vdev_id, dbg_rec->mac_addr.bytes); + roam_debug("peer obj = 0x%pK peer_id = %-4d", dbg_rec->peer_obj, + (int8_t)dbg_rec->peer_id); + roam_debug("arg1 = 0x%-8x arg2 = 0x%-8x", dbg_rec->arg1, + dbg_rec->arg2); + } while (i != current_index); +} +qdf_export_symbol(global_wlan_roam_debug_table); + + + diff --git a/drivers/staging/qca-wifi-host-cmn/utils/nlink/inc/wlan_nlink_common.h b/drivers/staging/qca-wifi-host-cmn/utils/nlink/inc/wlan_nlink_common.h new file mode 100644 index 0000000000000000000000000000000000000000..6b43052389f1b5c1e008b5c6010276da448fb9d7 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/utils/nlink/inc/wlan_nlink_common.h @@ -0,0 +1,286 @@ +/* + * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/*=========================================================================== + \file wlan_nlink_common.h + + Exports and types for the Netlink Service interface. This header file contains + message types and definitions that is shared between the user space service + (e.g. logging service) and WLAN kernel module. + + ===========================================================================*/ + +#ifndef WLAN_NLINK_COMMON_H__ +#define WLAN_NLINK_COMMON_H__ + +#include + +#ifdef __KERNEL__ +#include +#else +#include +#endif + +/*--------------------------------------------------------------------------- + * External Functions + *-------------------------------------------------------------------------*/ + +/*--------------------------------------------------------------------------- + * Preprocessor Definitions and Constants + *-------------------------------------------------------------------------*/ +#define WLAN_NL_MAX_PAYLOAD 5120 /* maximum size for netlink message */ +#define WLAN_NLINK_PROTO_FAMILY NETLINK_USERSOCK +#define WLAN_NLINK_MCAST_GRP_ID 0x01 + +/*--------------------------------------------------------------------------- + * Type Declarations + *-------------------------------------------------------------------------*/ + +/* + * The following enum defines the target service within WLAN driver for which the + * message is intended for. Each service along with its counterpart + * in the user space, define a set of messages they recognize. + * Each of this message will have an header of type tAniMsgHdr defined below. + * Each Netlink message to/from a kernel module will contain only one + * message which is preceded by a tAniMsgHdr. The maximun size (in bytes) of + * a netlink message is assumed to be MAX_PAYLOAD bytes. + * + * +------------+-------+----------+----------+ + * |Netlink hdr | Align |tAniMsgHdr| msg body | + * +------------+-------+----------|----------+ + */ + +/* Message Types */ +#define WLAN_SVC_FW_CRASHED_IND 0x100 +#define WLAN_SVC_LTE_COEX_IND 0x101 +#define WLAN_SVC_WLAN_AUTO_SHUTDOWN_IND 0x102 +#define WLAN_SVC_DFS_CAC_START_IND 0x103 +#define WLAN_SVC_DFS_CAC_END_IND 0x104 +#define WLAN_SVC_DFS_RADAR_DETECT_IND 0x105 +#define WLAN_SVC_WLAN_STATUS_IND 0x106 +#define WLAN_SVC_WLAN_VERSION_IND 0x107 +#define WLAN_SVC_DFS_ALL_CHANNEL_UNAVAIL_IND 0x108 +#define WLAN_SVC_WLAN_TP_IND 0x109 +#define WLAN_SVC_RPS_ENABLE_IND 0x10A +#define WLAN_SVC_WLAN_TP_TX_IND 0x10B +#define WLAN_SVC_WLAN_AUTO_SHUTDOWN_CANCEL_IND 0x10C +#define WLAN_SVC_WLAN_RADIO_INDEX 0x10D +#define WLAN_SVC_FW_SHUTDOWN_IND 0x10E +#define WLAN_SVC_CORE_MINFREQ 0x10F +#define WLAN_SVC_MAX_SSID_LEN 32 +#define WLAN_SVC_MAX_BSSID_LEN 6 +#define WLAN_SVC_MAX_STR_LEN 16 +#define WLAN_SVC_MAX_NUM_CHAN 128 +#define WLAN_SVC_COUNTRY_CODE_LEN 3 + +#define ANI_NL_MSG_BASE 0x10 /* Some arbitrary base */ + +typedef enum eAniNlModuleTypes { + ANI_NL_MSG_PUMAC = ANI_NL_MSG_BASE + 0x01, /* PTT Socket App */ + ANI_NL_MSG_PTT = ANI_NL_MSG_BASE + 0x07, /* Quarky GUI */ + WLAN_NL_MSG_OEM = ANI_NL_MSG_BASE + 0x09, + WLAN_NL_MSG_SVC, + WLAN_NL_MSG_CNSS_DIAG = ANI_NL_MSG_BASE + 0x0B, /* Value needs to be 27 */ + ANI_NL_MSG_LOG, + WLAN_NL_MSG_SPECTRAL_SCAN, + ANI_NL_MSG_MAX +} tAniNlModTypes, tWlanNlModTypes; + +#define WLAN_NL_MSG_BASE ANI_NL_MSG_BASE +#define WLAN_NL_MSG_MAX ANI_NL_MSG_MAX + +/* All Netlink messages must contain this header */ +typedef struct sAniHdr { + unsigned short type; + unsigned short length; +} tAniHdr, tAniMsgHdr; + +typedef struct sAniNlMsg { + struct nlmsghdr nlh; /* Netlink Header */ + int radio; /* unit number of the radio */ + tAniHdr wmsg; /* Airgo Message Header */ +} tAniNlHdr; + +struct radio_index_tlv { + unsigned short type; + unsigned short length; + int radio; +}; + +/** + * struct svc_channel_info - Channel information + * @chan_id: Channel ID + * @reserved0: Reserved for padding and future use + * @mhz: Primary 20 MHz channel frequency in MHz + * @band_center_freq1: Center frequency 1 in MHz + * @band_center_freq2: Center frequency 2 in MHz + * @info: Channel info + * @reg_info_1: Regulatory information field 1 which contains + * MIN power, MAX power, reg power and reg class ID + * @reg_info_2: Regulatory information field 2 which contains antennamax + */ +struct svc_channel_info { + uint32_t chan_id; + uint32_t reserved0; + uint32_t mhz; + uint32_t band_center_freq1; + uint32_t band_center_freq2; + uint32_t info; + uint32_t reg_info_1; + uint32_t reg_info_2; +}; + +struct wlan_status_data { + uint8_t lpss_support; + uint8_t is_on; + uint8_t vdev_id; + uint8_t is_connected; + int8_t rssi; + uint8_t ssid_len; + uint8_t country_code[WLAN_SVC_COUNTRY_CODE_LEN]; + uint32_t vdev_mode; + uint32_t freq; + uint32_t numChannels; + uint8_t channel_list[WLAN_SVC_MAX_NUM_CHAN]; + uint8_t ssid[WLAN_SVC_MAX_SSID_LEN]; + uint8_t bssid[WLAN_SVC_MAX_BSSID_LEN]; + struct svc_channel_info channel_info[WLAN_SVC_MAX_NUM_CHAN]; +}; + +struct wlan_version_data { + uint32_t chip_id; + char chip_name[WLAN_SVC_MAX_STR_LEN]; + char chip_from[WLAN_SVC_MAX_STR_LEN]; + char host_version[WLAN_SVC_MAX_STR_LEN]; + char fw_version[WLAN_SVC_MAX_STR_LEN]; +}; + +struct wlan_dfs_info { + uint16_t channel; + uint8_t country_code[WLAN_SVC_COUNTRY_CODE_LEN]; +}; + +/* + * Maximim number of queues supported by WLAN driver. Setting an upper + * limit. Actual number of queues may be smaller than this value. + */ +#define WLAN_SVC_IFACE_NUM_QUEUES 6 + +/** + * struct wlan_rps_data - structure to send RPS info to cnss-daemon + * @ifname: interface name for which the RPS data belongs to + * @num_queues: number of rx queues for which RPS data is being sent + * @cpu_map_list: array of cpu maps for different rx queues supported by + * the wlan driver + * + * The structure specifies the format of data exchanged between wlan + * driver and cnss-daemon. On receipt of the data, cnss-daemon is expected + * to apply the 'cpu_map' for each rx queue belonging to the interface 'ifname' + */ +struct wlan_rps_data { + char ifname[IFNAMSIZ]; + uint16_t num_queues; + uint16_t cpu_map_list[WLAN_SVC_IFACE_NUM_QUEUES]; +}; + +/** + * enum wlan_tp_level - indicates wlan throughput level + * @WLAN_SVC_TP_NONE: used for initialization + * @WLAN_SVC_TP_LOW: used to identify low throughput level + * @WLAN_SVC_TP_MEDIUM: used to identify medium throughput level + * @WLAN_SVC_TP_HIGH: used to identify high throughput level + * + * The different throughput levels are determined on the basis of # of tx and + * rx packets and other threshold values. For example, if the # of total + * packets sent or received by the driver is greater than 500 in the last 100ms + * , the driver has a high throughput requirement. The driver may tweak certain + * system parameters based on the throughput level. + */ +enum wlan_tp_level { + WLAN_SVC_TP_NONE, + WLAN_SVC_TP_LOW, + WLAN_SVC_TP_MEDIUM, + WLAN_SVC_TP_HIGH, +}; + +/** + * struct wlan_core_minfreq - msg to [re]set the min freq of a set of cores + * @magic: signature token: 0xBABA + * @reserved: unused for now + * @coremask: bitmap of cores (16 bits) bit0=CORE0, bit1=CORE1, ... + * coremask is ONLY valid for set command + * valid values: 0xf0, or 0x0f + * @freq: frequency in KH + * > 0: "set to the given frequency" + * == 0: "free; remove the lock" + * + * Msg structure passed by the driver to cnss-daemon. + * + * Semantical Alert: + * There can be only one outstanding lock, even for different masks. + */ +#define WLAN_CORE_MINFREQ_MAGIC 0xBABA +struct wlan_core_minfreq { + uint16_t magic; + uint16_t reserved; + uint16_t coremask; + uint16_t freq; +}; + +/* Indication to enable TCP delayed ack in TPUT indication */ +#define TCP_DEL_ACK_IND (1 << 0) +#define TCP_DEL_ACK_IND_MASK 0x1 +/* Indication to enable TCP advance window scaling in TPUT indication */ +#define TCP_ADV_WIN_SCL (1 << 1) +#define TCP_ADV_WIN_SCL_MASK 0x2 + +/* TCP limit output bytes for low and high TPUT */ +#define TCP_LIMIT_OUTPUT_BYTES_LOW 506072 +#define TCP_LIMIT_OUTPUT_BYTES_HI 4048579 + +/* TCP window scale for low and high TPUT */ +#define WIN_SCALE_LOW 2 +#define WIN_SCALE_HI 1 + +/* TCP DEL ACK value for low and high TPUT */ +#define TCP_DEL_ACK_LOW 0 +#define TCP_DEL_ACK_HI 20 + +/** + * struct wlan_rx_tp_data - msg to TCP delayed ack and advance window scaling + * @level: Throughput level. + * @rx_tp_flags: Bit map of flags, for which this indcation will take + * effect, bit map for TCP_ADV_WIN_SCL and TCP_DEL_ACK_IND. + */ +struct wlan_rx_tp_data { + enum wlan_tp_level level; + uint16_t rx_tp_flags; +}; + +/** + * struct wlan_tx_tp_data - msg to TCP for Tx Dir + * @level: Throughput level. + * @tcp_limit_output: Tcp limit output flag. + * + */ +struct wlan_tx_tp_data { + enum wlan_tp_level level; + bool tcp_limit_output; +}; + +#endif /* WLAN_NLINK_COMMON_H__ */ diff --git a/drivers/staging/qca-wifi-host-cmn/utils/nlink/inc/wlan_nlink_srv.h b/drivers/staging/qca-wifi-host-cmn/utils/nlink/inc/wlan_nlink_srv.h new file mode 100644 index 0000000000000000000000000000000000000000..53c2d6e283382b61d98f9460ee187fdd3ea1ed59 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/utils/nlink/inc/wlan_nlink_srv.h @@ -0,0 +1,61 @@ +/* + * Copyright (c) 2012-2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/****************************************************************************** +* wlan_nlink_srv.h +* +* wlan_nlink_srv is used to RX/TX Netlink messages from user space to kernel +* modules and vice versa. Kernel modules must register a message handler for a +* message type so that the wlan_nlink_srv can invoke the corresponding msg handler +* whenever a Netlink message of a particular type has been received from an +* application. In the opposite direction, wlan_nlink_srv provides a mechanism +* which kernel modules can use to send Netlink messages to applications. +* +******************************************************************************/ + +#ifndef WLAN_NLINK_SRV_H +#define WLAN_NLINK_SRV_H + +#include +#include +#include + +#define INVALID_PID -1 +#define NLINK_MAX_CALLBACKS (WLAN_NL_MSG_MAX - WLAN_NL_MSG_BASE) + +typedef int (*nl_srv_msg_callback)(struct sk_buff *skb); + +int nl_srv_init(void *wiphy); +void nl_srv_exit(void); +int nl_srv_register(tWlanNlModTypes msg_type, nl_srv_msg_callback msg_handler); +int nl_srv_unregister(tWlanNlModTypes msg_type, + nl_srv_msg_callback msg_handler); + +#ifdef CNSS_GENL +int nl_srv_ucast(struct sk_buff *skb, int dst_pid, int flag, + int app_id, int mcgroup_id); +int nl_srv_bcast(struct sk_buff *skb, int mcgroup_id, int app_id); +#else +int nl_srv_ucast(struct sk_buff *skb, int dst_pid, int flag); +int nl_srv_bcast(struct sk_buff *skb); +#endif + +int nl_srv_is_initialized(void); +void nl_srv_ucast_oem(struct sk_buff *skb, int dst_pid, int flag); + +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/utils/nlink/src/wlan_nlink_srv.c b/drivers/staging/qca-wifi-host-cmn/utils/nlink/src/wlan_nlink_srv.c new file mode 100644 index 0000000000000000000000000000000000000000..785b71a544786046bf4be398ce7d460aaae04f93 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/utils/nlink/src/wlan_nlink_srv.c @@ -0,0 +1,800 @@ +/* + * Copyright (c) 2012-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/****************************************************************************** +* wlan_nlink_srv.c +* +* This file contains the definitions specific to the wlan_nlink_srv +* +******************************************************************************/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define WLAN_CLD80211_MAX_SIZE (SKB_WITH_OVERHEAD(8192UL) - NLMSG_HDRLEN) + +#if defined(CONFIG_CNSS_LOGGER) + +#include + +static int radio_idx = -EINVAL; +static void *wiphy_ptr; +static bool logger_initialized; + +/** + * nl_srv_init() - wrapper function to register to cnss_logger + * @wiphy: the pointer to the wiphy structure + * + * The netlink socket is no longer initialized in the driver itself, instead + * will be initialized in the cnss_logger module, the driver should register + * itself to cnss_logger module to get the radio_index for all the netlink + * operation. (cfg80211 vendor command is using different netlink socket). + * + * The cnss_logger_device_register() use to register the driver with the + * wiphy structure and the module name (debug purpose) and then return the + * radio_index depending on the availibility. + * + * Return: radio index for success and -EINVAL for failure + */ +int nl_srv_init(void *wiphy) +{ + if (logger_initialized) + goto initialized; + + wiphy_ptr = wiphy; + radio_idx = cnss_logger_device_register(wiphy, THIS_MODULE->name); + QDF_TRACE(QDF_MODULE_ID_HDD, QDF_TRACE_LEVEL_ERROR, + "%s: radio_index: %d, wiphy_ptr: %pK", + __func__, radio_idx, wiphy_ptr); + + if (radio_idx >= 0) + logger_initialized = true; + +initialized: + return radio_idx; +} + +/** + * nl_srv_exit() - wrapper function to unregister from cnss_logger + * + * The cnss_logger_device_unregister() use to unregister the driver with + * the radio_index assigned and wiphy structure from cnss_logger. + * + * Return: None + */ +void nl_srv_exit(void) +{ + if (logger_initialized) { + cnss_logger_device_unregister(radio_idx, wiphy_ptr); + radio_idx = -EINVAL; + wiphy_ptr = NULL; + logger_initialized = false; + } +} + +/** + * nl_srv_ucast() - wrapper function to do unicast tx through cnss_logger + * @skb: the socket buffer to send + * @dst_pid: the port id + * @flag: the blocking or nonblocking flag + * + * The nl_srv_is_initialized() is used to do sanity check if the netlink + * service is ready, e.g if the radio_index is assigned properly, if not + * the driver should take the responsibility to free the skb. + * + * The cnss_logger_nl_ucast() use the same parameters to send the socket + * buffers. + * + * Return: the error of the transmission status + */ +int nl_srv_ucast(struct sk_buff *skb, int dst_pid, int flag) +{ + int err = -EINVAL; + + /* sender's pid */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0)) + NETLINK_CB(skb).pid = 0; +#else + NETLINK_CB(skb).portid = 0; +#endif + /* not multicast */ + NETLINK_CB(skb).dst_group = 0; + + if (nl_srv_is_initialized() == 0) { + err = cnss_logger_nl_ucast(skb, dst_pid, flag); + if (err < 0) + QDF_TRACE(QDF_MODULE_ID_HDD, QDF_TRACE_LEVEL_WARN, + "NLINK: netlink_unicast to pid[%d] failed, ret[%d]", + dst_pid, err); + } else { + dev_kfree_skb(skb); + } + + return err; +} + +/** + * nl_srv_bcast() - wrapper function to do broadcast tx through cnss_logger + * @skb: the socket buffer to send + * + * The cnss_logger_nl_bcast() is used to transmit the socket buffer. + * + * Return: status of transmission + */ +int nl_srv_bcast(struct sk_buff *skb) +{ + int err = -EINVAL; + int flags = GFP_KERNEL; + + if (in_interrupt() || irqs_disabled() || in_atomic()) + flags = GFP_ATOMIC; + + /* sender's pid */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0)) + NETLINK_CB(skb).pid = 0; +#else + NETLINK_CB(skb).portid = 0; +#endif + /* destination group */ + NETLINK_CB(skb).dst_group = WLAN_NLINK_MCAST_GRP_ID; + + if (nl_srv_is_initialized() == 0) { + err = cnss_logger_nl_bcast(skb, WLAN_NLINK_MCAST_GRP_ID, flags); + if ((err < 0) && (err != -ESRCH)) { + QDF_TRACE(QDF_MODULE_ID_HDD, QDF_TRACE_LEVEL_WARN, + "NLINK: netlink_broadcast failed err = %d", + err); + dev_kfree_skb(skb); + } + } + else + dev_kfree_skb(skb); + return err; +} +qdf_export_symbol(nl_srv_bcast); + +/** + * nl_srv_unregister() - wrapper function to unregister event to cnss_logger + * @msg_type: the message to unregister + * @msg_handler: the message handler + * + * The cnss_logger_event_unregister() is used to unregister the message and + * message handler. + * + * Return: 0 if successfully unregister, otherwise proper error code + */ +int nl_srv_unregister(tWlanNlModTypes msg_type, nl_srv_msg_callback msg_handler) +{ + int ret = -EINVAL; + + if (nl_srv_is_initialized() != 0) + return ret; + + if ((msg_type >= WLAN_NL_MSG_BASE) && (msg_type < WLAN_NL_MSG_MAX) && + msg_handler != NULL) { + ret = cnss_logger_event_unregister(radio_idx, msg_type, + msg_handler); + } else { + QDF_TRACE(QDF_MODULE_ID_HDD, QDF_TRACE_LEVEL_ERROR, + "NLINK: nl_srv_unregister failed for msg_type %d", + msg_type); + ret = -EINVAL; + } + + return ret; +} + +/** + * nl_srv_register() - wrapper function to register event to cnss_logger + * @msg_type: the message to register + * @msg_handler: the message handler + * + * The cnss_logger_event_register() is used to register the message and + * message handler. + * + * Return: 0 if successfully register, otherwise proper error code + */ +int nl_srv_register(tWlanNlModTypes msg_type, nl_srv_msg_callback msg_handler) +{ + int ret = -EINVAL; + + if (nl_srv_is_initialized() != 0) + return ret; + + if ((msg_type >= WLAN_NL_MSG_BASE) && (msg_type < WLAN_NL_MSG_MAX) && + msg_handler != NULL) { + ret = cnss_logger_event_register(radio_idx, msg_type, + msg_handler); + } else { + QDF_TRACE(QDF_MODULE_ID_HDD, QDF_TRACE_LEVEL_ERROR, + "NLINK: nl_srv_register failed for msg_type %d", + msg_type); + ret = -EINVAL; + } + + return ret; +} + +/** + * nl_srv_is_initialized() - check if netlink service is initialized + * + * Return: 0 if it is initialized, otherwise error code + */ +inline int nl_srv_is_initialized(void) +{ + if (logger_initialized) + return 0; + else + return -EPERM; +} +qdf_export_symbol(nl_srv_is_initialized); + +/* + * If MULTI_IF_NAME is not defined, then this is the primary instance of the + * driver and the diagnostics netlink socket will be available. If + * MULTI_IF_NAME is defined then this is not the primary instance of the driver + * and the diagnotics netlink socket will not be available since this + * diagnostics netlink socket can only be exposed by one instance of the driver. + */ +#elif defined(CNSS_GENL) +#include +#include +#include +#include + +/* For CNSS_GENL netlink sockets will be initialized by CNSS Kernel Module */ +int nl_srv_init(void *wiphy) +{ + return 0; +} + +void nl_srv_exit(void) +{ +} + +int nl_srv_is_initialized(void) +{ + return 0; +} + +/* Not implemented by CNSS kernel module */ +int nl_srv_register(tWlanNlModTypes msg_type, nl_srv_msg_callback msg_handler) +{ + return 0; +} + +int nl_srv_unregister(tWlanNlModTypes msg_type, nl_srv_msg_callback msg_handler) +{ + return 0; +} + + +/** + * nl80211hdr_put() - API to fill genlmsg header + * @skb: Sk buffer + * @portid: Port ID + * @seq: Sequence number + * @flags: Flags + * @cmd: Command id + * + * API to fill genl message header for brodcast events to user space + * + * Return: Pointer to user specific header/payload + */ +static inline void *nl80211hdr_put(struct sk_buff *skb, uint32_t portid, + uint32_t seq, int flags, uint8_t cmd) +{ + struct genl_family *cld80211_fam = cld80211_get_genl_family(); + + return genlmsg_put(skb, portid, seq, cld80211_fam, flags, cmd); +} + +/** + * cld80211_fill_data() - API to fill payload to nl message + * @msg: Sk buffer + * @portid: Port ID + * @seq: Sequence number + * @flags: Flags + * @cmd: Command ID + * @buf: data buffer/payload to be filled + * @len: length of the payload ie. @buf + * + * API to fill the payload/data of the nl message to be sent + * + * Return: zero on success + */ +static int cld80211_fill_data(struct sk_buff *msg, uint32_t portid, + uint32_t seq, int flags, uint8_t cmd, + uint8_t *buf, int len) +{ + void *hdr; + struct nlattr *nest; + + hdr = nl80211hdr_put(msg, portid, seq, flags, cmd); + if (!hdr) { + QDF_TRACE(QDF_MODULE_ID_HDD, QDF_TRACE_LEVEL_ERROR, + "nl80211 hdr put failed"); + return -EPERM; + } + + nest = nla_nest_start(msg, CLD80211_ATTR_VENDOR_DATA); + if (!nest) { + QDF_TRACE(QDF_MODULE_ID_HDD, QDF_TRACE_LEVEL_ERROR, + "nla_nest_start failed"); + goto nla_put_failure; + } + + if (nla_put(msg, CLD80211_ATTR_DATA, len, buf)) { + QDF_TRACE(QDF_MODULE_ID_HDD, QDF_TRACE_LEVEL_ERROR, + "nla_put failed"); + goto nla_put_failure; + } + + nla_nest_end(msg, nest); + genlmsg_end(msg, hdr); + + return 0; +nla_put_failure: + genlmsg_cancel(msg, hdr); + return -EPERM; +} + +/** + * send_msg_to_cld80211() - API to send message to user space Application + * @mcgroup_id: Multicast group ID + * @pid: Port ID + * @app_id: Application ID + * @buf: Data/payload buffer to be sent + * @len: Length of the data ie. @buf + * + * API to send the nl message to user space application. + * + * Return: zero on success + */ +static int send_msg_to_cld80211(int mcgroup_id, int pid, int app_id, + uint8_t *buf, int len) +{ + struct sk_buff *msg; + struct genl_family *cld80211_fam = cld80211_get_genl_family(); + int status; + int flags = GFP_KERNEL; + + if (in_interrupt() || irqs_disabled() || in_atomic()) + flags = GFP_ATOMIC; + + if (len > NLMSG_DEFAULT_SIZE) { + if (len > WLAN_CLD80211_MAX_SIZE) { + QDF_TRACE(QDF_MODULE_ID_HDD, QDF_TRACE_LEVEL_ERROR, + "buf size:%d if more than max size: %d", + len, (int) WLAN_CLD80211_MAX_SIZE); + return -ENOMEM; + } + msg = nlmsg_new(WLAN_CLD80211_MAX_SIZE, flags); + } else { + msg = nlmsg_new(NLMSG_DEFAULT_SIZE, flags); + } + if (!msg) { + QDF_TRACE(QDF_MODULE_ID_HDD, QDF_TRACE_LEVEL_ERROR, + "nlmsg malloc fails"); + return -EPERM; + } + + status = cld80211_fill_data(msg, pid, 0, 0, app_id, buf, len); + if (status) { + nlmsg_free(msg); + return -EPERM; + } + + genlmsg_multicast_netns(cld80211_fam, &init_net, msg, 0, + mcgroup_id, flags); + return 0; +} + +/** + * nl_srv_bcast() - wrapper function to do broadcast events to user space apps + * @skb: the socket buffer to send + * @mcgroup_id: multicast group id + * @app_id: application id + * + * This function is common wrapper to send broadcast events to different + * user space applications. + * + * return: none + */ +int nl_srv_bcast(struct sk_buff *skb, int mcgroup_id, int app_id) +{ + struct nlmsghdr *nlh = (struct nlmsghdr *)skb->data; + void *msg = NLMSG_DATA(nlh); + uint32_t msg_len = nlmsg_len(nlh); + int status; + + status = send_msg_to_cld80211(mcgroup_id, 0, app_id, msg, msg_len); + if (status) { + QDF_TRACE(QDF_MODULE_ID_HDD, QDF_TRACE_LEVEL_ERROR, + "send msg to cld80211 fails for app id %d", app_id); + dev_kfree_skb(skb); + return -EPERM; + } + + dev_kfree_skb(skb); + return 0; +} +qdf_export_symbol(nl_srv_bcast); + +/** + * nl_srv_ucast() - wrapper function to do unicast events to user space apps + * @skb: the socket buffer to send + * @dst_pid: destination process IF + * @flag: flags + * @app_id: application id + * @mcgroup_id: Multicast group ID + * + * This function is common wrapper to send unicast events to different + * user space applications. This internally used broadcast API with multicast + * group mcgrp_id. This wrapper serves as a common API in both + * new generic netlink infra and legacy implementation. + * + * return: zero on success, error code otherwise + */ +int nl_srv_ucast(struct sk_buff *skb, int dst_pid, int flag, + int app_id, int mcgroup_id) +{ + struct nlmsghdr *nlh = (struct nlmsghdr *)skb->data; + void *msg = NLMSG_DATA(nlh); + uint32_t msg_len = nlmsg_len(nlh); + int status; + + status = send_msg_to_cld80211(mcgroup_id, dst_pid, app_id, + msg, msg_len); + if (status) { + QDF_TRACE(QDF_MODULE_ID_HDD, QDF_TRACE_LEVEL_ERROR, + "send msg to cld80211 fails for app id %d", app_id); + dev_kfree_skb(skb); + return -EPERM; + } + + dev_kfree_skb(skb); + return 0; +} + +#elif !defined(MULTI_IF_NAME) + +/* Global variables */ +static DEFINE_MUTEX(nl_srv_sem); +static struct sock *nl_srv_sock; +static nl_srv_msg_callback nl_srv_msg_handler[NLINK_MAX_CALLBACKS]; + +/* Forward declaration */ +static void nl_srv_rcv(struct sk_buff *sk); +static void nl_srv_rcv_skb(struct sk_buff *skb); +static void nl_srv_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh); + +/* + * Initialize the netlink service. + * Netlink service is usable after this. + */ +int nl_srv_init(void *wiphy) +{ + int retcode = 0; + struct netlink_kernel_cfg cfg = { + .groups = WLAN_NLINK_MCAST_GRP_ID, + .input = nl_srv_rcv + }; + + nl_srv_sock = netlink_kernel_create(&init_net, WLAN_NLINK_PROTO_FAMILY, + &cfg); + + if (nl_srv_sock != NULL) { + memset(nl_srv_msg_handler, 0, sizeof(nl_srv_msg_handler)); + } else { + QDF_TRACE(QDF_MODULE_ID_HDD, QDF_TRACE_LEVEL_ERROR, + "NLINK: netlink_kernel_create failed"); + retcode = -ECONNREFUSED; + } + return retcode; +} + +/* + * Deinit the netlink service. + * Netlink service is unusable after this. + */ +void nl_srv_exit(void) +{ + if (nl_srv_is_initialized() == 0) + netlink_kernel_release(nl_srv_sock); + + nl_srv_sock = NULL; +} + +/* + * Register a message handler for a specified module. + * Each module (e.g. WLAN_NL_MSG_BTC )will register a + * handler to handle messages addressed to it. + */ +int nl_srv_register(tWlanNlModTypes msg_type, nl_srv_msg_callback msg_handler) +{ + int retcode = 0; + + if ((msg_type >= WLAN_NL_MSG_BASE) && (msg_type < WLAN_NL_MSG_MAX) && + msg_handler != NULL) { + nl_srv_msg_handler[msg_type - WLAN_NL_MSG_BASE] = msg_handler; + } else { + QDF_TRACE(QDF_MODULE_ID_HDD, QDF_TRACE_LEVEL_WARN, + "NLINK: nl_srv_register failed for msg_type %d", + msg_type); + retcode = -EINVAL; + } + + return retcode; +} + +/* + * Unregister the message handler for a specified module. + */ +int nl_srv_unregister(tWlanNlModTypes msg_type, nl_srv_msg_callback msg_handler) +{ + int retcode = 0; + + if ((msg_type >= WLAN_NL_MSG_BASE) && (msg_type < WLAN_NL_MSG_MAX) && + (nl_srv_msg_handler[msg_type - WLAN_NL_MSG_BASE] == msg_handler)) { + nl_srv_msg_handler[msg_type - WLAN_NL_MSG_BASE] = NULL; + } else { + QDF_TRACE(QDF_MODULE_ID_HDD, QDF_TRACE_LEVEL_WARN, + "NLINK: nl_srv_unregister failed for msg_type %d", + msg_type); + retcode = -EINVAL; + } + + return retcode; +} + +/* + * Unicast the message to the process in user space identfied + * by the dst-pid + */ +int nl_srv_ucast(struct sk_buff *skb, int dst_pid, int flag) +{ + int err = -EINVAL; + + NETLINK_CB(skb).portid = 0; /* sender's pid */ + NETLINK_CB(skb).dst_group = 0; /* not multicast */ + + if (nl_srv_sock) { + err = netlink_unicast(nl_srv_sock, skb, dst_pid, flag); + if (err < 0) + QDF_TRACE(QDF_MODULE_ID_HDD, QDF_TRACE_LEVEL_WARN, + "NLINK: netlink_unicast to pid[%d] failed, ret[%d]", + dst_pid, err); + } else { + dev_kfree_skb(skb); + } + + return err; +} + +/* + * Broadcast the message. Broadcast will return an error if + * there are no listeners + */ +int nl_srv_bcast(struct sk_buff *skb) +{ + int err = -EINVAL; + int flags = GFP_KERNEL; + + if (in_interrupt() || irqs_disabled() || in_atomic()) + flags = GFP_ATOMIC; + + NETLINK_CB(skb).portid = 0; /* sender's pid */ + NETLINK_CB(skb).dst_group = WLAN_NLINK_MCAST_GRP_ID; /* destination group */ + + if (nl_srv_sock) { + err = netlink_broadcast(nl_srv_sock, skb, 0, + WLAN_NLINK_MCAST_GRP_ID, flags); + if ((err < 0) && (err != -ESRCH)) { + QDF_TRACE(QDF_MODULE_ID_HDD, QDF_TRACE_LEVEL_WARN, + "NLINK: netlink_broadcast failed err = %d", + err); + dev_kfree_skb(skb); + } + } else + dev_kfree_skb(skb); + return err; +} +qdf_export_symbol(nl_srv_bcast); + +/* + * Processes the Netlink socket input queue. + * Dequeue skb's from the socket input queue and process + * all the netlink messages in that skb, before moving + * to the next skb. + */ +static void nl_srv_rcv(struct sk_buff *sk) +{ + mutex_lock(&nl_srv_sem); + nl_srv_rcv_skb(sk); + mutex_unlock(&nl_srv_sem); +} + +/* + * Each skb could contain multiple Netlink messages. Process all the + * messages in one skb and discard malformed skb's silently. + */ +static void nl_srv_rcv_skb(struct sk_buff *skb) +{ + struct nlmsghdr *nlh; + + while (skb->len >= NLMSG_SPACE(0)) { + u32 rlen; + + nlh = (struct nlmsghdr *)skb->data; + + if (nlh->nlmsg_len < sizeof(*nlh) || skb->len < nlh->nlmsg_len) { + QDF_TRACE(QDF_MODULE_ID_HDD, QDF_TRACE_LEVEL_WARN, + "NLINK: Invalid " + "Netlink message: skb[%pK], len[%d], nlhdr[%pK], nlmsg_len[%d]", + skb, skb->len, nlh, nlh->nlmsg_len); + return; + } + + rlen = NLMSG_ALIGN(nlh->nlmsg_len); + if (rlen > skb->len) + rlen = skb->len; + nl_srv_rcv_msg(skb, nlh); + skb_pull(skb, rlen); + } +} + +/* + * Process a netlink message. + * Each netlink message will have a message of type tAniMsgHdr inside. + */ +static void nl_srv_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) +{ + int type; + + /* Only requests are handled by kernel now */ + if (!(nlh->nlmsg_flags & NLM_F_REQUEST)) { + QDF_TRACE(QDF_MODULE_ID_HDD, QDF_TRACE_LEVEL_WARN, + "NLINK: Received Invalid NL Req type [%x]", + nlh->nlmsg_flags); + return; + } + + type = nlh->nlmsg_type; + + /* Unknown message */ + if (type < WLAN_NL_MSG_BASE || type >= WLAN_NL_MSG_MAX) { + QDF_TRACE(QDF_MODULE_ID_HDD, QDF_TRACE_LEVEL_WARN, + "NLINK: Received Invalid NL Msg type [%x]", type); + return; + } + + /* + * All the messages must at least carry the tAniMsgHdr + * Drop any message with invalid length + */ + if (nlh->nlmsg_len < NLMSG_LENGTH(sizeof(tAniMsgHdr))) { + QDF_TRACE(QDF_MODULE_ID_HDD, QDF_TRACE_LEVEL_WARN, + "NLINK: Received NL Msg with invalid len[%x]", + nlh->nlmsg_len); + return; + } + + /* turn type into dispatch table offset */ + type -= WLAN_NL_MSG_BASE; + + /* dispatch to handler */ + if (nl_srv_msg_handler[type] != NULL) { + (nl_srv_msg_handler[type])(skb); + } else { + QDF_TRACE(QDF_MODULE_ID_HDD, QDF_TRACE_LEVEL_WARN, + "NLINK: No handler for Netlink Msg [0x%X]", type); + } +} + +/** + * nl_srv_is_initialized() - This function is used check if the netlink + * service is initialized + * + * This function is used check if the netlink service is initialized + * + * Return: Return -EPERM if the service is not initialized + * + */ +int nl_srv_is_initialized(void) +{ + if (nl_srv_sock) + return 0; + + return -EPERM; +} +qdf_export_symbol(nl_srv_is_initialized); + +#else + +int nl_srv_init(void *wiphy) +{ + return 0; +} + +void nl_srv_exit(void) +{ +} + +int nl_srv_register(tWlanNlModTypes msg_type, nl_srv_msg_callback msg_handler) +{ + return 0; +} + +int nl_srv_unregister(tWlanNlModTypes msg_type, nl_srv_msg_callback msg_handler) +{ + return 0; +} + +int nl_srv_ucast(struct sk_buff *skb, int dst_pid, int flag) +{ + dev_kfree_skb(skb); + return 0; +} + +int nl_srv_bcast(struct sk_buff *skb) +{ + dev_kfree_skb(skb); + return 0; +} +qdf_export_symbol(nl_srv_bcast); + +int nl_srv_is_initialized(void) +{ + return -EPERM; +} +qdf_export_symbol(nl_srv_is_initialized); +#endif + +/** + * nl_srv_ucast_oem() - Wrapper function to send ucast msgs to OEM + * @skb: sk buffer pointer + * @dst_pid: Destination PID + * @flag: flags + * + * Sends the ucast message to OEM with generic nl socket if CNSS_GENL + * is enabled. Else, use the legacy netlink socket to send. + * + * Return: None + */ +#ifdef CNSS_GENL +void nl_srv_ucast_oem(struct sk_buff *skb, int dst_pid, int flag) +{ + nl_srv_ucast(skb, dst_pid, flag, WLAN_NL_MSG_OEM, + CLD80211_MCGRP_OEM_MSGS); +} +#else +void nl_srv_ucast_oem(struct sk_buff *skb, int dst_pid, int flag) +{ + nl_srv_ucast(skb, dst_pid, flag); +} +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/utils/pktlog/include/pktlog.h b/drivers/staging/qca-wifi-host-cmn/utils/pktlog/include/pktlog.h new file mode 100644 index 0000000000000000000000000000000000000000..ef370ee0863aa0b1468e79668e6d709e38dd671a --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/utils/pktlog/include/pktlog.h @@ -0,0 +1,30 @@ +/* + * Copyright (c) 2013-2014, 2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _PKTLOG_ +#define _PKTLOG_ +#ifndef REMOVE_PKT_LOG + +/** + * @typedef ol_pktlog_dev_handle + * @brief opaque handle for pktlog device object + */ +struct ol_pktlog_dev_t; +typedef struct ol_pktlog_dev_t *ol_pktlog_dev_handle; +#endif /* #ifndef REMOVE_PKT_LOG */ +#endif /* _PKTLOG_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/utils/pktlog/include/pktlog_ac.h b/drivers/staging/qca-wifi-host-cmn/utils/pktlog/include/pktlog_ac.h new file mode 100644 index 0000000000000000000000000000000000000000..afa5f3d64d5373eac5e3a9fbecf05b16efe5463a --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/utils/pktlog/include/pktlog_ac.h @@ -0,0 +1,198 @@ +/* + * Copyright (c) 2012-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _PKTLOG_AC_H_ +#define _PKTLOG_AC_H_ + +#include "hif.h" +#ifndef REMOVE_PKT_LOG +#include "ol_if_athvar.h" +#include "osdep.h" +#include +#include +#include +#include +#include +#include + +#define NO_REG_FUNCS 4 + +/* Locking interface for pktlog */ +#define PKTLOG_LOCK_INIT(_pl_info) qdf_spinlock_create(&(_pl_info)->log_lock) +#define PKTLOG_LOCK_DESTROY(_pl_info) \ + qdf_spinlock_destroy(&(_pl_info)->log_lock) +#define PKTLOG_LOCK(_pl_info) qdf_spin_lock_bh(&(_pl_info)->log_lock) +#define PKTLOG_UNLOCK(_pl_info) qdf_spin_unlock_bh(&(_pl_info)->log_lock) + +#define PKTLOG_MODE_SYSTEM 1 +#define PKTLOG_MODE_ADAPTER 2 + +/* + * The proc entry starts with magic number and version field which will be + * used by post processing scripts. These fields are not needed by applications + * that do not use these scripts. This is skipped using the offset value. + */ +#define PKTLOG_READ_OFFSET 8 + +/* forward declaration for cdp_pdev */ +struct cdp_pdev; + +/* Opaque softc */ +struct ol_ath_generic_softc_t; +typedef struct ol_ath_generic_softc_t *ol_ath_generic_softc_handle; +extern void pktlog_disable_adapter_logging(struct hif_opaque_softc *scn); +extern int pktlog_alloc_buf(struct hif_opaque_softc *scn); +extern void pktlog_release_buf(struct hif_opaque_softc *scn); + +ssize_t pktlog_read_proc_entry(char *buf, size_t nbytes, loff_t *ppos, + struct ath_pktlog_info *pl_info, bool *read_complete); +A_STATUS wdi_pktlog_unsubscribe(struct cdp_pdev *txrx_pdev, uint32_t log_state); + +struct ol_pl_arch_dep_funcs { + void (*pktlog_init)(struct hif_opaque_softc *scn); + int (*pktlog_enable)(struct hif_opaque_softc *scn, int32_t log_state, + bool ini, uint8_t user, + uint32_t is_iwpriv_command); + int (*pktlog_setsize)(struct hif_opaque_softc *scn, int32_t log_state); + int (*pktlog_disable)(struct hif_opaque_softc *scn); +}; + +struct ol_pl_os_dep_funcs { + int (*pktlog_attach)(struct hif_opaque_softc *scn); + void (*pktlog_detach)(struct hif_opaque_softc *scn); + +}; + +struct ath_pktlog_wmi_params { + WMI_PKTLOG_EVENT pktlog_event; + WMI_CMD_ID cmd_id; + bool ini_triggered; + uint8_t user_triggered; +}; + +extern struct ol_pl_arch_dep_funcs ol_pl_funcs; +extern struct ol_pl_os_dep_funcs *g_ol_pl_os_dep_funcs; + +/* Pktlog handler to save the state of the pktlogs */ +struct pktlog_dev_t { + struct ol_pl_arch_dep_funcs *pl_funcs; + struct ath_pktlog_info *pl_info; + ol_ath_generic_softc_handle scn; + char *name; + bool tgt_pktlog_alloced; + bool is_pktlog_cb_subscribed; + bool mt_pktlog_enabled; + uint32_t htc_err_cnt; + uint8_t htc_endpoint; + void *htc_pdev; + bool vendor_cmd_send; + uint8_t callback_type; +}; + +#define PKTLOG_SYSCTL_SIZE 14 +#define PKTLOG_MAX_SEND_QUEUE_DEPTH 64 + +/* + * Linux specific pktlog state information + */ +struct ath_pktlog_info_lnx { + struct ath_pktlog_info info; + struct ctl_table sysctls[PKTLOG_SYSCTL_SIZE]; + struct proc_dir_entry *proc_entry; + struct ctl_table_header *sysctl_header; +}; + +#define PL_INFO_LNX(_pl_info) ((struct ath_pktlog_info_lnx *)(_pl_info)) + +extern struct ol_pktlog_dev_t ol_pl_dev; + +/* + * WDI related data and functions + * Callback function to the WDI events + */ +void pktlog_callback(void *pdev, enum WDI_EVENT event, void *log_data, + u_int16_t peer_id, uint32_t status); + +void pktlog_init(struct hif_opaque_softc *scn); +int pktlog_enable(struct hif_opaque_softc *scn, int32_t log_state, + bool, uint8_t, uint32_t); +int pktlog_setsize(struct hif_opaque_softc *scn, int32_t log_state); +int pktlog_clearbuff(struct hif_opaque_softc *scn, bool clear_buff); +int pktlog_disable(struct hif_opaque_softc *scn); +int pktlogmod_init(void *context); +void pktlogmod_exit(void *context); +int pktlog_htc_attach(void); +void pktlog_process_fw_msg(uint32_t *msg_word, uint32_t msg_len); +void lit_pktlog_callback(void *context, enum WDI_EVENT event, void *log_data, + u_int16_t peer_id, uint32_t status); + +#define ol_pktlog_attach(_scn) \ + do { \ + if (g_ol_pl_os_dep_funcs) { \ + g_ol_pl_os_dep_funcs->pktlog_attach(_scn); \ + } \ + } while (0) + +#define ol_pktlog_detach(_scn) \ + do { \ + if (g_ol_pl_os_dep_funcs) { \ + g_ol_pl_os_dep_funcs->pktlog_detach(_scn); \ + } \ + } while (0) + +#else /* REMOVE_PKT_LOG */ +#define ol_pktlog_attach(_scn) ({ (void)_scn; }) +#define ol_pktlog_detach(_scn) ({ (void)_scn; }) +static inline void pktlog_init(struct hif_opaque_softc *scn) +{ + return; +} + +static inline int pktlog_enable(struct hif_opaque_softc *scn, int32_t log_state, + bool ini, uint8_t user, + uint32_t is_iwpriv_command) +{ + return 0; +} + +static inline int pktlog_setsize(struct hif_opaque_softc *scn, + int32_t log_state) +{ + return 0; +} + +static inline int pktlog_clearbuff(struct hif_opaque_softc *scn, + bool clear_buff) +{ + return 0; +} + +static inline int pktlog_disable(struct hif_opaque_softc *scn) +{ + return 0; +} + +static inline int pktlog_htc_attach(void) +{ + return 0; +} + +static inline void pktlog_process_fw_msg(uint32_t *msg_word, uint32_t msg_len) +{ } +#endif /* REMOVE_PKT_LOG */ +#endif /* _PKTLOG_AC_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/utils/pktlog/include/pktlog_ac_api.h b/drivers/staging/qca-wifi-host-cmn/utils/pktlog/include/pktlog_ac_api.h new file mode 100644 index 0000000000000000000000000000000000000000..60226922fff6067302bbe8ebf0a056f0b9eb84eb --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/utils/pktlog/include/pktlog_ac_api.h @@ -0,0 +1,145 @@ +/* + * Copyright (c) 2012-2014, 2016-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/* + * The file is used to define structures that are shared between + * kernel space and user space pktlog application. + */ + +#ifndef _PKTLOG_AC_API_ +#define _PKTLOG_AC_API_ +#ifndef REMOVE_PKT_LOG + +/** + * @typedef ol_pktlog_dev_handle + * @brief opaque handle for pktlog device object + */ +struct ol_pktlog_dev_t; + +/** + * @typedef hif_opaque_softc_handle + * @brief opaque handle for hif_opaque_softc + */ +struct hif_opaque_softc; +typedef struct hif_opaque_softc *hif_opaque_softc_handle; + +enum pktlog_callback_regtype { + PKTLOG_DEFAULT_CALLBACK_REGISTRATION, + PKTLOG_LITE_CALLBACK_REGISTRATION +}; + +/** + * @typedef net_device_handle + * @brief opaque handle linux phy device object + */ +struct net_device; +typedef struct net_device *net_device_handle; + +struct pktlog_dev_t; + +void pktlog_sethandle(struct pktlog_dev_t **pl_handle, + hif_opaque_softc_handle scn); + +void *get_txrx_context(void); + +struct pktlog_dev_t *get_pktlog_handle(void); +void pktlog_set_callback_regtype(enum pktlog_callback_regtype callback_type); + +/* Packet log state information */ +#ifndef _PKTLOG_INFO +#define _PKTLOG_INFO + +/** + * enum ath_pktlog_state - pktlog status + * @PKTLOG_OPR_IN_PROGRESS : pktlog command in progress + * @PKTLOG_OPR_IN_PROGRESS_READ_START: pktlog read is issued + * @PKTLOG_OPR_IN_PROGRESS_READ_START_PKTLOG_DISABLED: + * as part of pktlog read, pktlog is disabled + * @PKTLOG_OPR_IN_PROGRESS_READ_COMPLETE: + * as part of read, till pktlog read is complete + * @PKTLOG_OPR_IN_PROGRESS_CLEARBUFF_COMPLETE: + * as part of read, pktlog clear buffer is done + * @PKTLOG_OPR_NOT_IN_PROGRESS: no pktlog command in progress + */ +enum ath_pktlog_state { + PKTLOG_OPR_IN_PROGRESS = 0, + PKTLOG_OPR_IN_PROGRESS_READ_START, + PKTLOG_OPR_IN_PROGRESS_READ_START_PKTLOG_DISABLED, + PKTLOG_OPR_IN_PROGRESS_READ_COMPLETE, + PKTLOG_OPR_IN_PROGRESS_CLEARBUFF_COMPLETE, + PKTLOG_OPR_NOT_IN_PROGRESS +}; + +struct ath_pktlog_info { + struct ath_pktlog_buf *buf; + uint32_t log_state; + uint32_t saved_state; + uint32_t options; + /* Initial saved state: It will save the log state in pktlog + * open and used in pktlog release after + * pktlog read is complete. + */ + uint32_t init_saved_state; + enum ath_pktlog_state curr_pkt_state; + + /* Size of buffer in bytes */ + int32_t buf_size; + qdf_spinlock_t log_lock; + struct mutex pktlog_mutex; + + /* Threshold of TCP SACK packets for triggered stop */ + int sack_thr; + + /* # of tail packets to log after triggered stop */ + int tail_length; + + /* throuput threshold in bytes for triggered stop */ + uint32_t thruput_thresh; + + /* (aggregated or single) packet size in bytes */ + uint32_t pktlen; + + /* a temporary variable for counting TX throughput only */ + /* PER threshold for triggered stop, 10 for 10%, range [1, 99] */ + uint32_t per_thresh; + + /* Phyerr threshold for triggered stop */ + uint32_t phyerr_thresh; + + /* time period for counting trigger parameters, in milisecond */ + uint32_t trigger_interval; + uint32_t start_time_thruput; + uint32_t start_time_per; +}; +#endif /* _PKTLOG_INFO */ +#else /* REMOVE_PKT_LOG */ +typedef void *pktlog_dev_handle; +#define pktlog_sethandle(pl_handle, scn) \ + do { \ + (void)pl_handle; \ + (void)scn; \ + } while (0) + +#define ol_pl_set_name(dev) \ + do { \ + (void)scn; \ + (void)dev; \ + } while (0) + +#endif /* REMOVE_PKT_LOG */ +#endif /* _PKTLOG_AC_API_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/utils/pktlog/include/pktlog_ac_i.h b/drivers/staging/qca-wifi-host-cmn/utils/pktlog/include/pktlog_ac_i.h new file mode 100644 index 0000000000000000000000000000000000000000..4968f141fb1103a242573b32877e4c1c45f19ba5 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/utils/pktlog/include/pktlog_ac_i.h @@ -0,0 +1,68 @@ +/* + * Copyright (c) 2012-2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _PKTLOG_AC_I_ +#define _PKTLOG_AC_I_ + +#ifndef REMOVE_PKT_LOG + +#include +#include + + +#define PKTLOG_DEFAULT_BUFSIZE (10 * 1024 * 1024) /* 10MB */ +#define PKTLOG_DEFAULT_SACK_THR 3 +#define PKTLOG_DEFAULT_TAIL_LENGTH 100 +#define PKTLOG_DEFAULT_THRUPUT_THRESH (64 * 1024) +#define PKTLOG_DEFAULT_PER_THRESH 30 +#define PKTLOG_DEFAULT_PHYERR_THRESH 300 +#define PKTLOG_DEFAULT_TRIGGER_INTERVAL 500 + +struct ath_pktlog_arg { + struct ath_pktlog_info *pl_info; + uint32_t flags; + uint16_t missed_cnt; +#ifdef HELIUMPLUS + uint8_t log_type; + uint8_t macId; +#else + uint16_t log_type; +#endif + size_t log_size; + uint16_t timestamp; +#ifdef HELIUMPLUS + uint32_t type_specific_data; +#endif + char *buf; +}; + +void pktlog_getbuf_intsafe(struct ath_pktlog_arg *plarg); +char *pktlog_getbuf(struct pktlog_dev_t *pl_dev, + struct ath_pktlog_info *pl_info, + size_t log_size, struct ath_pktlog_hdr *pl_hdr); + +A_STATUS process_tx_info(struct cdp_pdev *pdev, void *data); +A_STATUS process_rx_info(void *pdev, void *data); +A_STATUS process_rx_info_remote(void *pdev, void *data); +A_STATUS process_rate_find(void *pdev, void *data); +A_STATUS process_rate_update(void *pdev, void *data); +A_STATUS process_sw_event(void *pdev, void *data); +int process_pktlog_lite(void *context, void *log_data, uint16_t log_type); +int process_rx_desc_remote(void *pdev, void *data); +#endif /* REMOVE_PKT_LOG */ +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/utils/pktlog/linux_ac.c b/drivers/staging/qca-wifi-host-cmn/utils/pktlog/linux_ac.c new file mode 100644 index 0000000000000000000000000000000000000000..db50aa9b26f72b1667a059a5093ea01f58cc3317 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/utils/pktlog/linux_ac.c @@ -0,0 +1,1047 @@ +/* + * Copyright (c) 2012-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef REMOVE_PKT_LOG +#ifndef EXPORT_SYMTAB +#define EXPORT_SYMTAB +#endif +#ifndef __KERNEL__ +#define __KERNEL__ +#endif +/* + * Linux specific implementation of Pktlogs for 802.11ac + */ +#include +#include +#include +#include +#include +#include +#include +#include "i_host_diag_core_log.h" +#include "host_diag_core_log.h" +#include "ani_global.h" + +#define PKTLOG_TAG "ATH_PKTLOG" +#define PKTLOG_DEVNAME_SIZE 32 +#define MAX_WLANDEV 1 + +#ifdef MULTI_IF_NAME +#define PKTLOG_PROC_DIR "ath_pktlog" MULTI_IF_NAME +#else +#define PKTLOG_PROC_DIR "ath_pktlog" +#endif + +/* Permissions for creating proc entries */ +#define PKTLOG_PROC_PERM 0444 +#define PKTLOG_PROCSYS_DIR_PERM 0555 +#define PKTLOG_PROCSYS_PERM 0644 + +#ifndef __MOD_INC_USE_COUNT +#define PKTLOG_MOD_INC_USE_COUNT do { \ + if (!try_module_get(THIS_MODULE)) { \ + printk(KERN_WARNING "try_module_get failed\n"); \ + } } while (0) + +#define PKTLOG_MOD_DEC_USE_COUNT module_put(THIS_MODULE) +#else +#define PKTLOG_MOD_INC_USE_COUNT MOD_INC_USE_COUNT +#define PKTLOG_MOD_DEC_USE_COUNT MOD_DEC_USE_COUNT +#endif + +static struct ath_pktlog_info *g_pktlog_info; + +static struct proc_dir_entry *g_pktlog_pde; + +static DEFINE_MUTEX(proc_mutex); + +static int pktlog_attach(struct hif_opaque_softc *scn); +static void pktlog_detach(struct hif_opaque_softc *scn); +static int pktlog_open(struct inode *i, struct file *f); +static int pktlog_release(struct inode *i, struct file *f); +static ssize_t pktlog_read(struct file *file, char *buf, size_t nbytes, + loff_t *ppos); + +static struct file_operations pktlog_fops = { + open: pktlog_open, + release:pktlog_release, + read : pktlog_read, +}; + +void pktlog_disable_adapter_logging(struct hif_opaque_softc *scn) +{ + struct pktlog_dev_t *pl_dev = get_pktlog_handle(); + if (pl_dev) + pl_dev->pl_info->log_state = 0; +} + +int pktlog_alloc_buf(struct hif_opaque_softc *scn) +{ + uint32_t page_cnt; + unsigned long vaddr; + struct page *vpg; + struct pktlog_dev_t *pl_dev; + struct ath_pktlog_info *pl_info; + struct ath_pktlog_buf *buffer; + + pl_dev = get_pktlog_handle(); + + if (!pl_dev) { + printk(PKTLOG_TAG + "%s: Unable to allocate buffer pdev_txrx_handle or pdev_txrx_handle->pl_dev is null\n", + __func__); + return -EINVAL; + } + + pl_info = pl_dev->pl_info; + + page_cnt = (sizeof(*(pl_info->buf)) + pl_info->buf_size) / PAGE_SIZE; + + qdf_spin_lock_bh(&pl_info->log_lock); + if (pl_info->buf != NULL) { + qdf_spin_unlock_bh(&pl_info->log_lock); + printk(PKTLOG_TAG "Buffer is already in use\n"); + return -EINVAL; + } + qdf_spin_unlock_bh(&pl_info->log_lock); + + buffer = vmalloc((page_cnt + 2) * PAGE_SIZE); + if (buffer == NULL) { + printk(PKTLOG_TAG + "%s: Unable to allocate buffer " + "(%d pages)\n", __func__, page_cnt); + return -ENOMEM; + } + + buffer = (struct ath_pktlog_buf *) + (((unsigned long)(buffer) + PAGE_SIZE - 1) + & PAGE_MASK); + + for (vaddr = (unsigned long)(buffer); + vaddr < ((unsigned long)(buffer) + (page_cnt * PAGE_SIZE)); + vaddr += PAGE_SIZE) { + vpg = vmalloc_to_page((const void *)vaddr); + SetPageReserved(vpg); + } + + qdf_spin_lock_bh(&pl_info->log_lock); + if (pl_info->buf != NULL) + pktlog_release_buf(scn); + + pl_info->buf = buffer; + qdf_spin_unlock_bh(&pl_info->log_lock); + return 0; +} + +void pktlog_release_buf(struct hif_opaque_softc *scn) +{ + unsigned long page_cnt; + unsigned long vaddr; + struct page *vpg; + struct pktlog_dev_t *pl_dev; + struct ath_pktlog_info *pl_info; + + pl_dev = get_pktlog_handle(); + + if (!pl_dev) { + qdf_print("%s: invalid pl_dev handle", __func__); + return; + } + + if (!pl_dev->pl_info) { + qdf_print("%s: invalid pl_dev handle", __func__); + return; + } + + pl_info = pl_dev->pl_info; + + page_cnt = ((sizeof(*(pl_info->buf)) + pl_info->buf_size) / + PAGE_SIZE) + 1; + + for (vaddr = (unsigned long)(pl_info->buf); + vaddr < (unsigned long)(pl_info->buf) + (page_cnt * PAGE_SIZE); + vaddr += PAGE_SIZE) { + vpg = vmalloc_to_page((const void *)vaddr); + ClearPageReserved(vpg); + } + + vfree(pl_info->buf); + pl_info->buf = NULL; +} + +static void pktlog_cleanup(struct ath_pktlog_info *pl_info) +{ + pl_info->log_state = 0; + PKTLOG_LOCK_DESTROY(pl_info); + mutex_destroy(&pl_info->pktlog_mutex); +} + +/* sysctl procfs handler to enable pktlog */ +static int +qdf_sysctl_decl(ath_sysctl_pktlog_enable, ctl, write, filp, buffer, lenp, ppos) +{ + int ret, enable; + ol_ath_generic_softc_handle scn; + struct pktlog_dev_t *pl_dev; + + mutex_lock(&proc_mutex); + scn = (ol_ath_generic_softc_handle) ctl->extra1; + + if (!scn) { + mutex_unlock(&proc_mutex); + printk("%s: Invalid scn context\n", __func__); + ASSERT(0); + return -EINVAL; + } + + pl_dev = get_pktlog_handle(); + + if (!pl_dev) { + mutex_unlock(&proc_mutex); + printk("%s: Invalid pktlog context\n", __func__); + ASSERT(0); + return -ENODEV; + } + + ctl->data = &enable; + ctl->maxlen = sizeof(enable); + + if (write) { + ret = QDF_SYSCTL_PROC_DOINTVEC(ctl, write, filp, buffer, + lenp, ppos); + if (ret == 0) { + ret = pl_dev->pl_funcs->pktlog_enable( + (struct hif_opaque_softc *)scn, enable, + cds_is_packet_log_enabled(), 0, 1); + } + else + QDF_TRACE(QDF_MODULE_ID_SYS, QDF_TRACE_LEVEL_DEBUG, + "Line:%d %s:proc_dointvec failed reason %d", + __LINE__, __func__, ret); + } else { + ret = QDF_SYSCTL_PROC_DOINTVEC(ctl, write, filp, buffer, + lenp, ppos); + if (ret) + QDF_TRACE(QDF_MODULE_ID_SYS, QDF_TRACE_LEVEL_DEBUG, + "Line:%d %s:proc_dointvec failed reason %d", + __LINE__, __func__, ret); + } + + ctl->data = NULL; + ctl->maxlen = 0; + mutex_unlock(&proc_mutex); + + return ret; +} + +static int get_pktlog_bufsize(struct pktlog_dev_t *pl_dev) +{ + return pl_dev->pl_info->buf_size; +} + +/* sysctl procfs handler to set/get pktlog size */ +static int +qdf_sysctl_decl(ath_sysctl_pktlog_size, ctl, write, filp, buffer, lenp, ppos) +{ + int ret, size; + ol_ath_generic_softc_handle scn; + struct pktlog_dev_t *pl_dev; + + mutex_lock(&proc_mutex); + scn = (ol_ath_generic_softc_handle) ctl->extra1; + + if (!scn) { + mutex_unlock(&proc_mutex); + printk("%s: Invalid scn context\n", __func__); + ASSERT(0); + return -EINVAL; + } + + pl_dev = get_pktlog_handle(); + + if (!pl_dev) { + mutex_unlock(&proc_mutex); + printk("%s: Invalid pktlog handle\n", __func__); + ASSERT(0); + return -ENODEV; + } + + ctl->data = &size; + ctl->maxlen = sizeof(size); + + if (write) { + ret = QDF_SYSCTL_PROC_DOINTVEC(ctl, write, filp, buffer, + lenp, ppos); + if (ret == 0) + ret = pl_dev->pl_funcs->pktlog_setsize( + (struct hif_opaque_softc *)scn, size); + } else { + size = get_pktlog_bufsize(pl_dev); + ret = QDF_SYSCTL_PROC_DOINTVEC(ctl, write, filp, buffer, + lenp, ppos); + } + + ctl->data = NULL; + ctl->maxlen = 0; + mutex_unlock(&proc_mutex); + + return ret; +} + +/* Register sysctl table */ +static int pktlog_sysctl_register(struct hif_opaque_softc *scn) +{ + struct pktlog_dev_t *pl_dev = get_pktlog_handle(); + struct ath_pktlog_info_lnx *pl_info_lnx; + char *proc_name; + + if (pl_dev) { + pl_info_lnx = PL_INFO_LNX(pl_dev->pl_info); + proc_name = pl_dev->name; + } else { + pl_info_lnx = PL_INFO_LNX(g_pktlog_info); + proc_name = PKTLOG_PROC_SYSTEM; + } + + /* + * Setup the sysctl table for creating the following sysctl entries: + * /proc/sys/PKTLOG_PROC_DIR//enable for enabling/disabling + * pktlog + * /proc/sys/PKTLOG_PROC_DIR//size for changing the buffer size + */ + memset(pl_info_lnx->sysctls, 0, sizeof(pl_info_lnx->sysctls)); + pl_info_lnx->sysctls[0].procname = PKTLOG_PROC_DIR; + pl_info_lnx->sysctls[0].mode = PKTLOG_PROCSYS_DIR_PERM; + pl_info_lnx->sysctls[0].child = &pl_info_lnx->sysctls[2]; + + /* [1] is NULL terminator */ + pl_info_lnx->sysctls[2].procname = proc_name; + pl_info_lnx->sysctls[2].mode = PKTLOG_PROCSYS_DIR_PERM; + pl_info_lnx->sysctls[2].child = &pl_info_lnx->sysctls[4]; + + /* [3] is NULL terminator */ + pl_info_lnx->sysctls[4].procname = "enable"; + pl_info_lnx->sysctls[4].mode = PKTLOG_PROCSYS_PERM; + pl_info_lnx->sysctls[4].proc_handler = ath_sysctl_pktlog_enable; + pl_info_lnx->sysctls[4].extra1 = scn; + + pl_info_lnx->sysctls[5].procname = "size"; + pl_info_lnx->sysctls[5].mode = PKTLOG_PROCSYS_PERM; + pl_info_lnx->sysctls[5].proc_handler = ath_sysctl_pktlog_size; + pl_info_lnx->sysctls[5].extra1 = scn; + + pl_info_lnx->sysctls[6].procname = "options"; + pl_info_lnx->sysctls[6].mode = PKTLOG_PROCSYS_PERM; + pl_info_lnx->sysctls[6].proc_handler = proc_dointvec; + pl_info_lnx->sysctls[6].data = &pl_info_lnx->info.options; + pl_info_lnx->sysctls[6].maxlen = sizeof(pl_info_lnx->info.options); + + pl_info_lnx->sysctls[7].procname = "sack_thr"; + pl_info_lnx->sysctls[7].mode = PKTLOG_PROCSYS_PERM; + pl_info_lnx->sysctls[7].proc_handler = proc_dointvec; + pl_info_lnx->sysctls[7].data = &pl_info_lnx->info.sack_thr; + pl_info_lnx->sysctls[7].maxlen = sizeof(pl_info_lnx->info.sack_thr); + + pl_info_lnx->sysctls[8].procname = "tail_length"; + pl_info_lnx->sysctls[8].mode = PKTLOG_PROCSYS_PERM; + pl_info_lnx->sysctls[8].proc_handler = proc_dointvec; + pl_info_lnx->sysctls[8].data = &pl_info_lnx->info.tail_length; + pl_info_lnx->sysctls[8].maxlen = sizeof(pl_info_lnx->info.tail_length); + + pl_info_lnx->sysctls[9].procname = "thruput_thresh"; + pl_info_lnx->sysctls[9].mode = PKTLOG_PROCSYS_PERM; + pl_info_lnx->sysctls[9].proc_handler = proc_dointvec; + pl_info_lnx->sysctls[9].data = &pl_info_lnx->info.thruput_thresh; + pl_info_lnx->sysctls[9].maxlen = + sizeof(pl_info_lnx->info.thruput_thresh); + + pl_info_lnx->sysctls[10].procname = "phyerr_thresh"; + pl_info_lnx->sysctls[10].mode = PKTLOG_PROCSYS_PERM; + pl_info_lnx->sysctls[10].proc_handler = proc_dointvec; + pl_info_lnx->sysctls[10].data = &pl_info_lnx->info.phyerr_thresh; + pl_info_lnx->sysctls[10].maxlen = + sizeof(pl_info_lnx->info.phyerr_thresh); + + pl_info_lnx->sysctls[11].procname = "per_thresh"; + pl_info_lnx->sysctls[11].mode = PKTLOG_PROCSYS_PERM; + pl_info_lnx->sysctls[11].proc_handler = proc_dointvec; + pl_info_lnx->sysctls[11].data = &pl_info_lnx->info.per_thresh; + pl_info_lnx->sysctls[11].maxlen = sizeof(pl_info_lnx->info.per_thresh); + + pl_info_lnx->sysctls[12].procname = "trigger_interval"; + pl_info_lnx->sysctls[12].mode = PKTLOG_PROCSYS_PERM; + pl_info_lnx->sysctls[12].proc_handler = proc_dointvec; + pl_info_lnx->sysctls[12].data = &pl_info_lnx->info.trigger_interval; + pl_info_lnx->sysctls[12].maxlen = + sizeof(pl_info_lnx->info.trigger_interval); + /* [13] is NULL terminator */ + + /* and register everything */ + /* register_sysctl_table changed from 2.6.21 onwards */ + pl_info_lnx->sysctl_header = + register_sysctl_table(pl_info_lnx->sysctls); + + if (!pl_info_lnx->sysctl_header) { + printk("%s: failed to register sysctls!\n", proc_name); + return -EINVAL; + } + + return 0; +} + +/* + * Initialize logging for system or adapter + * Parameter scn should be NULL for system wide logging + */ +static int pktlog_attach(struct hif_opaque_softc *scn) +{ + struct pktlog_dev_t *pl_dev; + struct ath_pktlog_info_lnx *pl_info_lnx; + char *proc_name; + struct proc_dir_entry *proc_entry; + + /* Allocate pktlog dev for later use */ + pl_dev = get_pktlog_handle(); + + if (pl_dev != NULL) { + pl_info_lnx = kmalloc(sizeof(*pl_info_lnx), GFP_KERNEL); + if (pl_info_lnx == NULL) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "%s: Allocation failed for pl_info\n", + __func__); + goto attach_fail1; + } + + pl_dev->pl_info = &pl_info_lnx->info; + pl_dev->name = WLANDEV_BASENAME; + proc_name = pl_dev->name; + + if (!pl_dev->pl_funcs) + pl_dev->pl_funcs = &ol_pl_funcs; + + /* + * Valid for both direct attach and offload architecture + */ + pl_dev->pl_funcs->pktlog_init(scn); + } else { + return -EINVAL; + } + + /* + * initialize log info + * might be good to move to pktlog_init + */ + /* pl_dev->tgt_pktlog_alloced = false; */ + pl_info_lnx->proc_entry = NULL; + pl_info_lnx->sysctl_header = NULL; + + proc_entry = proc_create_data(proc_name, PKTLOG_PROC_PERM, + g_pktlog_pde, &pktlog_fops, + &pl_info_lnx->info); + + if (proc_entry == NULL) { + printk(PKTLOG_TAG "%s: create_proc_entry failed for %s\n", + __func__, proc_name); + goto attach_fail1; + } + + pl_info_lnx->proc_entry = proc_entry; + + if (pktlog_sysctl_register(scn)) { + printk(PKTLOG_TAG "%s: sysctl register failed for %s\n", + __func__, proc_name); + goto attach_fail2; + } + + return 0; + +attach_fail2: + remove_proc_entry(proc_name, g_pktlog_pde); + +attach_fail1: + if (pl_dev) + kfree(pl_dev->pl_info); + + return -EINVAL; +} + +static void pktlog_sysctl_unregister(struct pktlog_dev_t *pl_dev) +{ + struct ath_pktlog_info_lnx *pl_info_lnx; + + if (!pl_dev) { + printk("%s: Invalid pktlog context\n", __func__); + ASSERT(0); + return; + } + + pl_info_lnx = (pl_dev) ? PL_INFO_LNX(pl_dev->pl_info) : + PL_INFO_LNX(g_pktlog_info); + + if (pl_info_lnx->sysctl_header) { + unregister_sysctl_table(pl_info_lnx->sysctl_header); + pl_info_lnx->sysctl_header = NULL; + } +} + +static void pktlog_detach(struct hif_opaque_softc *scn) +{ + struct ath_pktlog_info *pl_info; + struct pktlog_dev_t *pl_dev = get_pktlog_handle(); + + if (!pl_dev) { + printk("%s: Invalid pktlog context\n", __func__); + ASSERT(0); + return; + } + + pl_info = pl_dev->pl_info; + remove_proc_entry(WLANDEV_BASENAME, g_pktlog_pde); + pktlog_sysctl_unregister(pl_dev); + + qdf_spin_lock_bh(&pl_info->log_lock); + + if (pl_info->buf) { + pktlog_release_buf(scn); + pl_dev->tgt_pktlog_alloced = false; + } + qdf_spin_unlock_bh(&pl_info->log_lock); + pktlog_cleanup(pl_info); + + if (pl_dev) { + kfree(pl_info); + pl_dev->pl_info = NULL; + } +} + +static int __pktlog_open(struct inode *i, struct file *f) +{ + struct hif_opaque_softc *scn; + struct pktlog_dev_t *pl_dev; + struct ath_pktlog_info *pl_info; + int ret = 0; + + PKTLOG_MOD_INC_USE_COUNT; + pl_info = (struct ath_pktlog_info *) + PDE_DATA(f->f_path.dentry->d_inode); + + if (!pl_info) { + pr_err("%s: pl_info NULL", __func__); + return -EINVAL; + } + + if (pl_info->curr_pkt_state != PKTLOG_OPR_NOT_IN_PROGRESS) { + pr_info("%s: plinfo state (%d) != PKTLOG_OPR_NOT_IN_PROGRESS", + __func__, pl_info->curr_pkt_state); + return -EBUSY; + } + + if (qdf_is_module_state_transitioning()) { + pr_info("%s: module transition in progress", __func__); + return -EAGAIN; + } + + pl_info->curr_pkt_state = PKTLOG_OPR_IN_PROGRESS_READ_START; + scn = cds_get_context(QDF_MODULE_ID_HIF); + if (!scn) { + pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS; + qdf_print("%s: Invalid scn context\n", __func__); + ASSERT(0); + return -EINVAL; + } + + pl_dev = get_pktlog_handle(); + + if (!pl_dev) { + pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS; + qdf_print("%s: Invalid pktlog handle\n", __func__); + ASSERT(0); + return -ENODEV; + } + + pl_info->init_saved_state = pl_info->log_state; + if (!pl_info->log_state) { + /* Pktlog is already disabled. + * Proceed to read directly. + */ + pl_info->curr_pkt_state = + PKTLOG_OPR_IN_PROGRESS_READ_START_PKTLOG_DISABLED; + return ret; + } + /* Disbable the pktlog internally. */ + ret = pl_dev->pl_funcs->pktlog_disable(scn); + pl_info->log_state = 0; + pl_info->curr_pkt_state = + PKTLOG_OPR_IN_PROGRESS_READ_START_PKTLOG_DISABLED; + return ret; +} + +static int pktlog_open(struct inode *i, struct file *f) +{ + int ret; + + qdf_ssr_protect(__func__); + ret = __pktlog_open(i, f); + qdf_ssr_unprotect(__func__); + + return ret; +} + +static int __pktlog_release(struct inode *i, struct file *f) +{ + struct hif_opaque_softc *scn; + struct pktlog_dev_t *pl_dev; + struct ath_pktlog_info *pl_info; + int ret = 0; + + PKTLOG_MOD_DEC_USE_COUNT; + + pl_info = (struct ath_pktlog_info *) + PDE_DATA(f->f_path.dentry->d_inode); + + if (!pl_info) + return -EINVAL; + + if (qdf_is_module_state_transitioning()) { + pr_info("%s: module transition in progress", __func__); + return -EAGAIN; + } + + scn = cds_get_context(QDF_MODULE_ID_HIF); + if (!scn) { + pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS; + qdf_print("%s: Invalid scn context\n", __func__); + ASSERT(0); + return -EINVAL; + } + + pl_dev = get_pktlog_handle(); + + if (!pl_dev) { + pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS; + qdf_print("%s: Invalid pktlog handle\n", __func__); + ASSERT(0); + return -ENODEV; + } + + pl_info->curr_pkt_state = PKTLOG_OPR_IN_PROGRESS_READ_COMPLETE; + /*clear pktlog buffer.*/ + pktlog_clearbuff(scn, true); + pl_info->log_state = pl_info->init_saved_state; + pl_info->init_saved_state = 0; + + /*Enable pktlog again*/ + ret = pl_dev->pl_funcs->pktlog_enable( + (struct hif_opaque_softc *)scn, pl_info->log_state, + cds_is_packet_log_enabled(), 0, 1); + + if (ret != 0) + pr_warn("%s: pktlog cannot be enabled. ret value %d\n", + __func__, ret); + + pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS; + return ret; +} + +static int pktlog_release(struct inode *i, struct file *f) +{ + int ret; + + qdf_ssr_protect(__func__); + ret = __pktlog_release(i, f); + qdf_ssr_unprotect(__func__); + + return ret; +} + +#ifndef MIN +#define MIN(a, b) (((a) < (b)) ? (a) : (b)) +#endif + +/** + * pktlog_read_proc_entry() - This function is used to read data from the + * proc entry into the readers buffer + * @buf: Readers buffer + * @nbytes: Number of bytes to read + * @ppos: Offset within the drivers buffer + * @pl_info: Packet log information pointer + * @read_complete: Boolean value indication whether read is complete + * + * This function is used to read data from the proc entry into the readers + * buffer. Its functionality is similar to 'pktlog_read' which does + * copy to user to the user space buffer + * + * Return: Number of bytes read from the buffer + * + */ + ssize_t +pktlog_read_proc_entry(char *buf, size_t nbytes, loff_t *ppos, + struct ath_pktlog_info *pl_info, bool *read_complete) +{ + size_t bufhdr_size; + size_t count = 0, ret_val = 0; + int rem_len; + int start_offset, end_offset; + int fold_offset, ppos_data, cur_rd_offset, cur_wr_offset; + struct ath_pktlog_buf *log_buf; + + qdf_spin_lock_bh(&pl_info->log_lock); + log_buf = pl_info->buf; + + *read_complete = false; + + if (log_buf == NULL) { + *read_complete = true; + qdf_spin_unlock_bh(&pl_info->log_lock); + return 0; + } + + if (*ppos == 0 && pl_info->log_state) { + pl_info->saved_state = pl_info->log_state; + pl_info->log_state = 0; + } + + bufhdr_size = sizeof(log_buf->bufhdr); + + /* copy valid log entries from circular buffer into user space */ + rem_len = nbytes; + count = 0; + + if (*ppos < bufhdr_size) { + count = MIN((bufhdr_size - *ppos), rem_len); + qdf_mem_copy(buf, ((char *)&log_buf->bufhdr) + *ppos, + count); + rem_len -= count; + ret_val += count; + } + + start_offset = log_buf->rd_offset; + cur_wr_offset = log_buf->wr_offset; + + if ((rem_len == 0) || (start_offset < 0)) + goto rd_done; + + fold_offset = -1; + cur_rd_offset = start_offset; + + /* Find the last offset and fold-offset if the buffer is folded */ + do { + struct ath_pktlog_hdr *log_hdr; + int log_data_offset; + + log_hdr = (struct ath_pktlog_hdr *) (log_buf->log_data + + cur_rd_offset); + + log_data_offset = cur_rd_offset + sizeof(struct ath_pktlog_hdr); + + if ((fold_offset == -1) + && ((pl_info->buf_size - log_data_offset) + <= log_hdr->size)) + fold_offset = log_data_offset - 1; + + PKTLOG_MOV_RD_IDX(cur_rd_offset, log_buf, pl_info->buf_size); + + if ((fold_offset == -1) && (cur_rd_offset == 0) + && (cur_rd_offset != cur_wr_offset)) + fold_offset = log_data_offset + log_hdr->size - 1; + + end_offset = log_data_offset + log_hdr->size - 1; + } while (cur_rd_offset != cur_wr_offset); + + ppos_data = *ppos + ret_val - bufhdr_size + start_offset; + + if (fold_offset == -1) { + if (ppos_data > end_offset) + goto rd_done; + + count = MIN(rem_len, (end_offset - ppos_data + 1)); + qdf_mem_copy(buf + ret_val, + log_buf->log_data + ppos_data, + count); + ret_val += count; + rem_len -= count; + } else { + if (ppos_data <= fold_offset) { + count = MIN(rem_len, (fold_offset - ppos_data + 1)); + qdf_mem_copy(buf + ret_val, + log_buf->log_data + ppos_data, + count); + ret_val += count; + rem_len -= count; + } + + if (rem_len == 0) + goto rd_done; + + ppos_data = + *ppos + ret_val - (bufhdr_size + + (fold_offset - start_offset + 1)); + + if (ppos_data <= end_offset) { + count = MIN(rem_len, (end_offset - ppos_data + 1)); + qdf_mem_copy(buf + ret_val, + log_buf->log_data + ppos_data, + count); + ret_val += count; + rem_len -= count; + } + } + +rd_done: + if ((ret_val < nbytes) && pl_info->saved_state) { + pl_info->log_state = pl_info->saved_state; + pl_info->saved_state = 0; + } + *ppos += ret_val; + + if (ret_val == 0) { + /* Write pointer might have been updated during the read. + * So, if some data is written into, lets not reset the pointers + * We can continue to read from the offset position + */ + if (cur_wr_offset != log_buf->wr_offset) { + *read_complete = false; + } else { + pl_info->buf->rd_offset = -1; + pl_info->buf->wr_offset = 0; + pl_info->buf->bytes_written = 0; + pl_info->buf->offset = PKTLOG_READ_OFFSET; + *read_complete = true; + } + } + qdf_spin_unlock_bh(&pl_info->log_lock); + return ret_val; +} + +static ssize_t +__pktlog_read(struct file *file, char *buf, size_t nbytes, loff_t *ppos) +{ + size_t bufhdr_size; + size_t count = 0, ret_val = 0; + int rem_len; + int start_offset, end_offset; + int fold_offset, ppos_data, cur_rd_offset; + struct ath_pktlog_info *pl_info; + struct ath_pktlog_buf *log_buf; + + if (qdf_is_module_state_transitioning()) { + pr_info("%s: module transition in progress", __func__); + return -EAGAIN; + } + + pl_info = (struct ath_pktlog_info *) + PDE_DATA(file->f_path.dentry->d_inode); + if (!pl_info) + return 0; + + qdf_spin_lock_bh(&pl_info->log_lock); + log_buf = pl_info->buf; + + if (log_buf == NULL) { + qdf_spin_unlock_bh(&pl_info->log_lock); + return 0; + } + + if (pl_info->log_state) { + /* Read is not allowed when write is going on + * When issuing cat command, ensure to send + * pktlog disable command first. + */ + qdf_spin_unlock_bh(&pl_info->log_lock); + return -EINVAL; + } + + if (*ppos == 0 && pl_info->log_state) { + pl_info->saved_state = pl_info->log_state; + pl_info->log_state = 0; + } + + bufhdr_size = sizeof(log_buf->bufhdr); + + /* copy valid log entries from circular buffer into user space */ + + rem_len = nbytes; + count = 0; + + if (*ppos < bufhdr_size) { + count = QDF_MIN((bufhdr_size - *ppos), rem_len); + qdf_spin_unlock_bh(&pl_info->log_lock); + if (copy_to_user(buf, ((char *)&log_buf->bufhdr) + *ppos, + count)) { + return -EFAULT; + } + rem_len -= count; + ret_val += count; + qdf_spin_lock_bh(&pl_info->log_lock); + } + + start_offset = log_buf->rd_offset; + + if ((rem_len == 0) || (start_offset < 0)) + goto rd_done; + + fold_offset = -1; + cur_rd_offset = start_offset; + + /* Find the last offset and fold-offset if the buffer is folded */ + do { + struct ath_pktlog_hdr *log_hdr; + int log_data_offset; + + log_hdr = (struct ath_pktlog_hdr *)(log_buf->log_data + + cur_rd_offset); + + log_data_offset = cur_rd_offset + sizeof(struct ath_pktlog_hdr); + + if ((fold_offset == -1) + && ((pl_info->buf_size - log_data_offset) + <= log_hdr->size)) + fold_offset = log_data_offset - 1; + + PKTLOG_MOV_RD_IDX(cur_rd_offset, log_buf, pl_info->buf_size); + + if ((fold_offset == -1) && (cur_rd_offset == 0) + && (cur_rd_offset != log_buf->wr_offset)) + fold_offset = log_data_offset + log_hdr->size - 1; + + end_offset = log_data_offset + log_hdr->size - 1; + } while (cur_rd_offset != log_buf->wr_offset); + + ppos_data = *ppos + ret_val - bufhdr_size + start_offset; + + if (fold_offset == -1) { + if (ppos_data > end_offset) + goto rd_done; + + count = QDF_MIN(rem_len, (end_offset - ppos_data + 1)); + qdf_spin_unlock_bh(&pl_info->log_lock); + + if (copy_to_user(buf + ret_val, + log_buf->log_data + ppos_data, count)) { + return -EFAULT; + } + + ret_val += count; + rem_len -= count; + qdf_spin_lock_bh(&pl_info->log_lock); + } else { + if (ppos_data <= fold_offset) { + count = QDF_MIN(rem_len, (fold_offset - ppos_data + 1)); + qdf_spin_unlock_bh(&pl_info->log_lock); + if (copy_to_user(buf + ret_val, + log_buf->log_data + ppos_data, + count)) { + return -EFAULT; + } + ret_val += count; + rem_len -= count; + qdf_spin_lock_bh(&pl_info->log_lock); + } + + if (rem_len == 0) + goto rd_done; + + ppos_data = + *ppos + ret_val - (bufhdr_size + + (fold_offset - start_offset + 1)); + + if (ppos_data <= end_offset) { + count = QDF_MIN(rem_len, (end_offset - ppos_data + 1)); + qdf_spin_unlock_bh(&pl_info->log_lock); + if (copy_to_user(buf + ret_val, + log_buf->log_data + ppos_data, + count)) { + return -EFAULT; + } + ret_val += count; + rem_len -= count; + qdf_spin_lock_bh(&pl_info->log_lock); + } + } + +rd_done: + if ((ret_val < nbytes) && pl_info->saved_state) { + pl_info->log_state = pl_info->saved_state; + pl_info->saved_state = 0; + } + *ppos += ret_val; + + qdf_spin_unlock_bh(&pl_info->log_lock); + return ret_val; +} + +static ssize_t +pktlog_read(struct file *file, char *buf, size_t nbytes, loff_t *ppos) +{ + size_t ret; + struct ath_pktlog_info *pl_info; + + pl_info = (struct ath_pktlog_info *) + PDE_DATA(file->f_path.dentry->d_inode); + if (!pl_info) + return 0; + + qdf_ssr_protect(__func__); + mutex_lock(&pl_info->pktlog_mutex); + ret = __pktlog_read(file, buf, nbytes, ppos); + mutex_unlock(&pl_info->pktlog_mutex); + qdf_ssr_unprotect(__func__); + return ret; +} + +int pktlogmod_init(void *context) +{ + int ret; + + /* create the proc directory entry */ + g_pktlog_pde = proc_mkdir(PKTLOG_PROC_DIR, NULL); + + if (g_pktlog_pde == NULL) { + printk(PKTLOG_TAG "%s: proc_mkdir failed\n", __func__); + return -EPERM; + } + + /* Attach packet log */ + ret = pktlog_attach((struct hif_opaque_softc *)context); + + /* If packet log init failed */ + if (ret) + goto attach_fail; + + return ret; + +attach_fail: + remove_proc_entry(PKTLOG_PROC_DIR, NULL); + g_pktlog_pde = NULL; + + return ret; +} + +void pktlogmod_exit(void *context) +{ + if (g_pktlog_pde == NULL) + return; + + pktlog_detach((struct hif_opaque_softc *)context); + + /* + * pdev kill needs to be implemented + */ + remove_proc_entry(PKTLOG_PROC_DIR, NULL); +} +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/utils/pktlog/pktlog_ac.c b/drivers/staging/qca-wifi-host-cmn/utils/pktlog/pktlog_ac.c new file mode 100644 index 0000000000000000000000000000000000000000..9a31ad35a3de9c11b578b39b30dc0f1751801bd3 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/utils/pktlog/pktlog_ac.c @@ -0,0 +1,1082 @@ +/* + * Copyright (c) 2012-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/* + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef REMOVE_PKT_LOG + +#include "qdf_mem.h" +#include "athdefs.h" +#include "pktlog_ac_i.h" +#include "cds_api.h" +#include "wma_types.h" +#include "htc.h" +#include +#include + +wdi_event_subscribe PKTLOG_TX_SUBSCRIBER; +wdi_event_subscribe PKTLOG_RX_SUBSCRIBER; +wdi_event_subscribe PKTLOG_RX_REMOTE_SUBSCRIBER; +wdi_event_subscribe PKTLOG_RCFIND_SUBSCRIBER; +wdi_event_subscribe PKTLOG_RCUPDATE_SUBSCRIBER; +wdi_event_subscribe PKTLOG_SW_EVENT_SUBSCRIBER; +wdi_event_subscribe PKTLOG_LITE_T2H_SUBSCRIBER; +wdi_event_subscribe PKTLOG_LITE_RX_SUBSCRIBER; + +struct ol_pl_arch_dep_funcs ol_pl_funcs = { + .pktlog_init = pktlog_init, + .pktlog_enable = pktlog_enable, + .pktlog_setsize = pktlog_setsize, + .pktlog_disable = pktlog_disable, /* valid for f/w disable */ +}; + +struct pktlog_dev_t pl_dev = { + .pl_funcs = &ol_pl_funcs, +}; + +void pktlog_sethandle(struct pktlog_dev_t **pl_handle, + struct hif_opaque_softc *scn) +{ + pl_dev.scn = (ol_ath_generic_softc_handle) scn; + *pl_handle = &pl_dev; +} + +void pktlog_set_callback_regtype( + enum pktlog_callback_regtype callback_type) +{ + struct pktlog_dev_t *pl_dev = get_pktlog_handle(); + + if (!pl_dev) { + qdf_print("Invalid pl_dev"); + return; + } + + pl_dev->callback_type = callback_type; +} + +#ifdef CONFIG_MCL +struct pktlog_dev_t *get_pktlog_handle(void) +{ + struct cdp_pdev *pdev_txrx_handle = + cds_get_context(QDF_MODULE_ID_TXRX); + void *soc = cds_get_context(QDF_MODULE_ID_SOC); + + return cdp_get_pldev(soc, pdev_txrx_handle); +} + +/* + * Get current txrx context + */ +void *get_txrx_context(void) +{ + return cds_get_context(QDF_MODULE_ID_TXRX); +} + +#else +/* TODO: Need to use WIN implementation to return pktlog_dev handle */ +static inline struct pktlog_dev_t *get_pktlog_handle(void) +{ + return NULL; +} +static struct pktlog_dev_t *get_txrx_context(void) { } +#endif + +static A_STATUS pktlog_wma_post_msg(WMI_PKTLOG_EVENT event_types, + WMI_CMD_ID cmd_id, bool ini_triggered, + uint8_t user_triggered) +{ + struct scheduler_msg msg = { 0 }; + QDF_STATUS status; + struct ath_pktlog_wmi_params *param; + + param = qdf_mem_malloc(sizeof(struct ath_pktlog_wmi_params)); + + if (!param) + return A_NO_MEMORY; + + param->cmd_id = cmd_id; + param->pktlog_event = event_types; + param->ini_triggered = ini_triggered; + param->user_triggered = user_triggered; + + msg.type = WMA_PKTLOG_ENABLE_REQ; + msg.bodyptr = param; + msg.bodyval = 0; + + status = scheduler_post_message(QDF_MODULE_ID_WMA, + QDF_MODULE_ID_WMA, + QDF_MODULE_ID_WMA, &msg); + + if (status != QDF_STATUS_SUCCESS) { + qdf_mem_free(param); + return A_ERROR; + } + + return A_OK; +} + +static inline A_STATUS +pktlog_enable_tgt(struct hif_opaque_softc *_scn, uint32_t log_state, + bool ini_triggered, uint8_t user_triggered) +{ + uint32_t types = 0; + + if (log_state & ATH_PKTLOG_TX) + types |= WMI_PKTLOG_EVENT_TX; + + if (log_state & ATH_PKTLOG_RX) + types |= WMI_PKTLOG_EVENT_RX; + + if (log_state & ATH_PKTLOG_RCFIND) + types |= WMI_PKTLOG_EVENT_RCF; + + if (log_state & ATH_PKTLOG_RCUPDATE) + types |= WMI_PKTLOG_EVENT_RCU; + + if (log_state & ATH_PKTLOG_SW_EVENT) + types |= WMI_PKTLOG_EVENT_SW; + + return pktlog_wma_post_msg(types, WMI_PDEV_PKTLOG_ENABLE_CMDID, + ini_triggered, user_triggered); +} + +static inline A_STATUS +wdi_pktlog_subscribe(struct cdp_pdev *cdp_pdev, int32_t log_state) +{ +#ifdef CONFIG_MCL + void *soc = cds_get_context(QDF_MODULE_ID_SOC); +#else + /*TODO: WIN implementation to get soc */ +#endif + + if (!cdp_pdev) { + qdf_print("Invalid pdev in %s\n", __func__); + return A_ERROR; + } + + if (log_state & ATH_PKTLOG_TX) { + if (cdp_wdi_event_sub(soc, cdp_pdev, &PKTLOG_TX_SUBSCRIBER, + WDI_EVENT_TX_STATUS)) { + return A_ERROR; + } + } + if (log_state & ATH_PKTLOG_RX) { + if (cdp_wdi_event_sub(soc, cdp_pdev, &PKTLOG_RX_SUBSCRIBER, + WDI_EVENT_RX_DESC)) { + return A_ERROR; + } + if (cdp_wdi_event_sub(soc, cdp_pdev, + &PKTLOG_RX_REMOTE_SUBSCRIBER, + WDI_EVENT_RX_DESC_REMOTE)) { + return A_ERROR; + } + } + if (log_state & ATH_PKTLOG_RCFIND) { + if (cdp_wdi_event_sub(soc, cdp_pdev, + &PKTLOG_RCFIND_SUBSCRIBER, + WDI_EVENT_RATE_FIND)) { + return A_ERROR; + } + } + if (log_state & ATH_PKTLOG_RCUPDATE) { + if (cdp_wdi_event_sub(soc, cdp_pdev, + &PKTLOG_RCUPDATE_SUBSCRIBER, + WDI_EVENT_RATE_UPDATE)) { + return A_ERROR; + } + } + if (log_state & ATH_PKTLOG_SW_EVENT) { + if (cdp_wdi_event_sub(soc, cdp_pdev, + &PKTLOG_SW_EVENT_SUBSCRIBER, + WDI_EVENT_SW_EVENT)) { + return A_ERROR; + } + } + if (log_state & ATH_PKTLOG_LITE_T2H) { + if (cdp_wdi_event_sub(soc, cdp_pdev, + &PKTLOG_LITE_T2H_SUBSCRIBER, + WDI_EVENT_LITE_T2H)) { + return A_ERROR; + } + } + if (log_state & ATH_PKTLOG_LITE_RX) { + if (cdp_wdi_event_sub(soc, cdp_pdev, + &PKTLOG_LITE_RX_SUBSCRIBER, + WDI_EVENT_LITE_RX)) { + return A_ERROR; + } + } + + return A_OK; +} + +void pktlog_callback(void *pdev, enum WDI_EVENT event, void *log_data, + u_int16_t peer_id, uint32_t status) +{ + switch (event) { + case WDI_EVENT_TX_STATUS: + { + /* + * process TX message + */ + if (process_tx_info(pdev, log_data)) { + qdf_print("Unable to process TX info\n"); + return; + } + break; + } + case WDI_EVENT_RX_DESC: + { + /* + * process RX message for local frames + */ + if (process_rx_info(pdev, log_data)) { + qdf_print("Unable to process RX info\n"); + return; + } + break; + } + case WDI_EVENT_RX_DESC_REMOTE: + { + /* + * process RX message for remote frames + */ + if (process_rx_info_remote(pdev, log_data)) { + qdf_print("Unable to process RX info\n"); + return; + } + break; + } + case WDI_EVENT_RATE_FIND: + { + /* + * process RATE_FIND message + */ + if (process_rate_find(pdev, log_data)) { + qdf_print("Unable to process RC_FIND info\n"); + return; + } + break; + } + case WDI_EVENT_RATE_UPDATE: + { + /* + * process RATE_UPDATE message + */ + if (process_rate_update(pdev, log_data)) { + qdf_print("Unable to process RC_UPDATE\n"); + return; + } + break; + } + case WDI_EVENT_SW_EVENT: + { + /* + * process SW EVENT message + */ + if (process_sw_event(pdev, log_data)) { + qdf_print("Unable to process SW_EVENT\n"); + return; + } + break; + } + default: + break; + } +} + +void +lit_pktlog_callback(void *context, enum WDI_EVENT event, void *log_data, + u_int16_t peer_id, uint32_t status) +{ + switch (event) { + case WDI_EVENT_RX_DESC: + { + if (process_rx_desc_remote(context, log_data)) { + qdf_print("Unable to process RX info\n"); + return; + } + break; + } + case WDI_EVENT_LITE_T2H: + { + if (process_pktlog_lite(context, log_data, + PKTLOG_TYPE_LITE_T2H)) { + qdf_print("Unable to process lite_t2h\n"); + return; + } + break; + } + case WDI_EVENT_LITE_RX: + { + if (process_pktlog_lite(context, log_data, + PKTLOG_TYPE_LITE_RX)) { + qdf_print("Unable to process lite_rx\n"); + return; + } + break; + } + default: + break; + } +} + +A_STATUS +wdi_pktlog_unsubscribe(struct cdp_pdev *pdev, uint32_t log_state) +{ +#ifdef CONFIG_MCL + void *soc = cds_get_context(QDF_MODULE_ID_SOC); +#else + /* TODO: WIN implementation to get soc */ +#endif + + if (log_state & ATH_PKTLOG_TX) { + if (cdp_wdi_event_unsub(soc, pdev, + &PKTLOG_TX_SUBSCRIBER, + WDI_EVENT_TX_STATUS)) { + return A_ERROR; + } + } + if (log_state & ATH_PKTLOG_RX) { + if (cdp_wdi_event_unsub(soc, pdev, + &PKTLOG_RX_SUBSCRIBER, WDI_EVENT_RX_DESC)) { + return A_ERROR; + } + if (cdp_wdi_event_unsub(soc, pdev, + &PKTLOG_RX_REMOTE_SUBSCRIBER, + WDI_EVENT_RX_DESC_REMOTE)) { + return A_ERROR; + } + } + if (log_state & ATH_PKTLOG_RCFIND) { + if (cdp_wdi_event_unsub(soc, pdev, + &PKTLOG_RCFIND_SUBSCRIBER, + WDI_EVENT_RATE_FIND)) { + return A_ERROR; + } + } + if (log_state & ATH_PKTLOG_RCUPDATE) { + if (cdp_wdi_event_unsub(soc, pdev, + &PKTLOG_RCUPDATE_SUBSCRIBER, + WDI_EVENT_RATE_UPDATE)) { + return A_ERROR; + } + } + if (log_state & ATH_PKTLOG_RCUPDATE) { + if (cdp_wdi_event_unsub(soc, pdev, + &PKTLOG_SW_EVENT_SUBSCRIBER, + WDI_EVENT_SW_EVENT)) { + return A_ERROR; + } + } + if (log_state & ATH_PKTLOG_LITE_T2H) { + if (cdp_wdi_event_unsub(soc, pdev, + &PKTLOG_LITE_T2H_SUBSCRIBER, + WDI_EVENT_LITE_T2H)) { + return A_ERROR; + } + } + if (log_state & ATH_PKTLOG_LITE_RX) { + if (cdp_wdi_event_unsub(soc, pdev, + &PKTLOG_LITE_RX_SUBSCRIBER, + WDI_EVENT_LITE_RX)) { + return A_ERROR; + } + } + + return A_OK; +} + +int pktlog_disable(struct hif_opaque_softc *scn) +{ + struct pktlog_dev_t *pl_dev; + struct ath_pktlog_info *pl_info; + uint8_t save_pktlog_state; + struct cdp_pdev *txrx_pdev = get_txrx_context(); + + pl_dev = get_pktlog_handle(); + + if (!pl_dev) { + qdf_print("Invalid pl_dev"); + return -EINVAL; + } + + pl_info = pl_dev->pl_info; + + if (!pl_dev->pl_info) { + qdf_print("Invalid pl_info"); + return -EINVAL; + } + + if (!txrx_pdev) { + qdf_print("Invalid cdp_pdev"); + return -EINVAL; + } + + if (pl_info->curr_pkt_state == PKTLOG_OPR_IN_PROGRESS || + pl_info->curr_pkt_state == + PKTLOG_OPR_IN_PROGRESS_READ_START_PKTLOG_DISABLED || + pl_info->curr_pkt_state == PKTLOG_OPR_IN_PROGRESS_READ_COMPLETE || + pl_info->curr_pkt_state == + PKTLOG_OPR_IN_PROGRESS_CLEARBUFF_COMPLETE) + return -EBUSY; + + save_pktlog_state = pl_info->curr_pkt_state; + pl_info->curr_pkt_state = PKTLOG_OPR_IN_PROGRESS; + + if (pktlog_wma_post_msg(0, WMI_PDEV_PKTLOG_DISABLE_CMDID, 0, 0)) { + pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS; + qdf_print("Failed to disable pktlog in target\n"); + return -EINVAL; + } + + if (pl_dev->is_pktlog_cb_subscribed && + wdi_pktlog_unsubscribe(txrx_pdev, pl_info->log_state)) { + pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS; + qdf_print("Cannot unsubscribe pktlog from the WDI\n"); + return -EINVAL; + } + pl_dev->is_pktlog_cb_subscribed = false; + if (save_pktlog_state == PKTLOG_OPR_IN_PROGRESS_READ_START) + pl_info->curr_pkt_state = + PKTLOG_OPR_IN_PROGRESS_READ_START_PKTLOG_DISABLED; + else + pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS; + return 0; +} + +void pktlog_init(struct hif_opaque_softc *scn) +{ + struct pktlog_dev_t *pl_dev = get_pktlog_handle(); + struct ath_pktlog_info *pl_info; + + if (pl_dev == NULL || pl_dev->pl_info == NULL) { + qdf_print("pl_dev or pl_info is invalid\n"); + return; + } + + pl_info = pl_dev->pl_info; + + OS_MEMZERO(pl_info, sizeof(*pl_info)); + PKTLOG_LOCK_INIT(pl_info); + mutex_init(&pl_info->pktlog_mutex); + + pl_info->buf_size = PKTLOG_DEFAULT_BUFSIZE; + pl_info->buf = NULL; + pl_info->log_state = 0; + pl_info->init_saved_state = 0; + pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS; + pl_info->sack_thr = PKTLOG_DEFAULT_SACK_THR; + pl_info->tail_length = PKTLOG_DEFAULT_TAIL_LENGTH; + pl_info->thruput_thresh = PKTLOG_DEFAULT_THRUPUT_THRESH; + pl_info->per_thresh = PKTLOG_DEFAULT_PER_THRESH; + pl_info->phyerr_thresh = PKTLOG_DEFAULT_PHYERR_THRESH; + pl_info->trigger_interval = PKTLOG_DEFAULT_TRIGGER_INTERVAL; + pl_info->pktlen = 0; + pl_info->start_time_thruput = 0; + pl_info->start_time_per = 0; + pl_dev->vendor_cmd_send = false; + + if (pl_dev->callback_type == PKTLOG_DEFAULT_CALLBACK_REGISTRATION) { + PKTLOG_TX_SUBSCRIBER.callback = pktlog_callback; + PKTLOG_RX_SUBSCRIBER.callback = pktlog_callback; + PKTLOG_RX_REMOTE_SUBSCRIBER.callback = pktlog_callback; + PKTLOG_RCFIND_SUBSCRIBER.callback = pktlog_callback; + PKTLOG_RCUPDATE_SUBSCRIBER.callback = pktlog_callback; + PKTLOG_SW_EVENT_SUBSCRIBER.callback = pktlog_callback; + } else if (pl_dev->callback_type == PKTLOG_LITE_CALLBACK_REGISTRATION) { + PKTLOG_LITE_T2H_SUBSCRIBER.callback = lit_pktlog_callback; + PKTLOG_LITE_RX_SUBSCRIBER.callback = lit_pktlog_callback; + } +} + +static int __pktlog_enable(struct hif_opaque_softc *scn, int32_t log_state, + bool ini_triggered, uint8_t user_triggered, + uint32_t is_iwpriv_command) +{ + struct pktlog_dev_t *pl_dev; + struct ath_pktlog_info *pl_info; + struct cdp_pdev *cdp_pdev; + int error; + + if (!scn) { + qdf_print("%s: Invalid scn context\n", __func__); + ASSERT(0); + return -EINVAL; + } + + pl_dev = get_pktlog_handle(); + if (!pl_dev) { + qdf_print("%s: Invalid pktlog context\n", __func__); + ASSERT(0); + return -EINVAL; + } + + cdp_pdev = get_txrx_context(); + if (!cdp_pdev) { + qdf_print("%s: Invalid txrx context\n", __func__); + ASSERT(0); + return -EINVAL; + } + + pl_info = pl_dev->pl_info; + if (!pl_info) { + qdf_print("%s: Invalid pl_info context\n", __func__); + ASSERT(0); + return -EINVAL; + } + + if (pl_info->curr_pkt_state < PKTLOG_OPR_IN_PROGRESS_CLEARBUFF_COMPLETE) + return -EBUSY; + + pl_info->curr_pkt_state = PKTLOG_OPR_IN_PROGRESS; + /* is_iwpriv_command : 0 indicates its a vendor command + * log_state: 0 indicates pktlog disable command + * vendor_cmd_send flag; false means no vendor pktlog enable + * command was sent previously + */ + if (is_iwpriv_command == 0 && log_state == 0 && + pl_dev->vendor_cmd_send == false) { + pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS; + qdf_print("%s: pktlog operation not in progress\n", __func__); + return 0; + } + + if (!pl_dev->tgt_pktlog_alloced) { + if (pl_info->buf == NULL) { + error = pktlog_alloc_buf(scn); + + if (error != 0) { + pl_info->curr_pkt_state = + PKTLOG_OPR_NOT_IN_PROGRESS; + qdf_print("%s: pktlog buff alloc failed\n", + __func__); + return -ENOMEM; + } + + if (!pl_info->buf) { + pl_info->curr_pkt_state = + PKTLOG_OPR_NOT_IN_PROGRESS; + qdf_print("%s: pktlog buf alloc failed\n", + __func__); + ASSERT(0); + return -ENOMEM; + } + + } + + qdf_spin_lock_bh(&pl_info->log_lock); + pl_info->buf->bufhdr.version = CUR_PKTLOG_VER; + pl_info->buf->bufhdr.magic_num = PKTLOG_MAGIC_NUM; + pl_info->buf->wr_offset = 0; + pl_info->buf->rd_offset = -1; + /* These below variables are used by per packet stats*/ + pl_info->buf->bytes_written = 0; + pl_info->buf->msg_index = 1; + pl_info->buf->offset = PKTLOG_READ_OFFSET; + qdf_spin_unlock_bh(&pl_info->log_lock); + + pl_info->start_time_thruput = os_get_timestamp(); + pl_info->start_time_per = pl_info->start_time_thruput; + + pl_dev->tgt_pktlog_alloced = true; + } + if (log_state != 0) { + /* WDI subscribe */ + if (!pl_dev->is_pktlog_cb_subscribed) { + error = wdi_pktlog_subscribe(cdp_pdev, log_state); + if (error) { + pl_info->curr_pkt_state = + PKTLOG_OPR_NOT_IN_PROGRESS; + qdf_print("Unable to subscribe to the WDI %s\n", + __func__); + return -EINVAL; + } + } else { + pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS; + qdf_print("Unable to subscribe %d to the WDI %s\n", + log_state, __func__); + return -EINVAL; + } + /* WMI command to enable pktlog on the firmware */ + if (pktlog_enable_tgt(scn, log_state, ini_triggered, + user_triggered)) { + pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS; + qdf_print("Device cannot be enabled, %s\n", __func__); + return -EINVAL; + } + pl_dev->is_pktlog_cb_subscribed = true; + + if (is_iwpriv_command == 0) + pl_dev->vendor_cmd_send = true; + } else { + pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS; + pl_dev->pl_funcs->pktlog_disable(scn); + if (is_iwpriv_command == 0) + pl_dev->vendor_cmd_send = false; + } + + pl_info->log_state = log_state; + pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS; + return 0; +} + +int pktlog_enable(struct hif_opaque_softc *scn, int32_t log_state, + bool ini_triggered, uint8_t user_triggered, + uint32_t is_iwpriv_command) +{ + struct pktlog_dev_t *pl_dev; + struct ath_pktlog_info *pl_info; + int err; + + pl_dev = get_pktlog_handle(); + + if (!pl_dev) { + qdf_print("%s: invalid pl_dev handle", __func__); + return -EINVAL; + } + + pl_info = pl_dev->pl_info; + + if (!pl_info) { + qdf_print("%s: invalid pl_info handle", __func__); + return -EINVAL; + } + + mutex_lock(&pl_info->pktlog_mutex); + err = __pktlog_enable(scn, log_state, ini_triggered, + user_triggered, is_iwpriv_command); + mutex_unlock(&pl_info->pktlog_mutex); + return err; +} + +#define ONE_MEGABYTE (1024 * 1024) +#define MAX_ALLOWED_PKTLOG_SIZE (16 * ONE_MEGABYTE) + +static int __pktlog_setsize(struct hif_opaque_softc *scn, int32_t size) +{ + struct pktlog_dev_t *pl_dev; + struct ath_pktlog_info *pl_info; + struct cdp_pdev *pdev; + + pl_dev = get_pktlog_handle(); + + if (!pl_dev) { + qdf_print("%s: invalid pl_dev handle", __func__); + return -EINVAL; + } + + pl_info = pl_dev->pl_info; + + if (!pl_info) { + qdf_print("%s: invalid pl_dev handle", __func__); + return -EINVAL; + } + + pdev = get_txrx_context(); + + if (!pdev) { + qdf_print("%s: invalid pdev handle", __func__); + return -EINVAL; + } + + if (pl_info->curr_pkt_state < PKTLOG_OPR_NOT_IN_PROGRESS) { + qdf_print("%s: pktlog is not configured", __func__); + return -EBUSY; + } + + pl_info->curr_pkt_state = PKTLOG_OPR_IN_PROGRESS; + + if (size < ONE_MEGABYTE || size > MAX_ALLOWED_PKTLOG_SIZE) { + qdf_print("%s: Cannot Set Pktlog Buffer size of %d bytes." + "Min required is %d MB and Max allowed is %d MB.\n", + __func__, size, (ONE_MEGABYTE/ONE_MEGABYTE), + (MAX_ALLOWED_PKTLOG_SIZE/ONE_MEGABYTE)); + pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS; + qdf_print("%s: Invalid requested buff size", __func__); + return -EINVAL; + } + + if (size == pl_info->buf_size) { + pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS; + qdf_print("%s: Pktlog Buff Size is already of same size.", + __func__); + return 0; + } + + if (pl_info->log_state) { + pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS; + qdf_print("%s: Logging should be disabled before changing" + "buffer size.", __func__); + return -EINVAL; + } + + qdf_spin_lock_bh(&pl_info->log_lock); + if (pl_info->buf != NULL) { + if (pl_dev->is_pktlog_cb_subscribed && + wdi_pktlog_unsubscribe(pdev, pl_info->log_state)) { + pl_info->curr_pkt_state = + PKTLOG_OPR_NOT_IN_PROGRESS; + qdf_spin_unlock_bh(&pl_info->log_lock); + qdf_print("Cannot unsubscribe pktlog from the WDI"); + return -EFAULT; + } + pktlog_release_buf(scn); + pl_dev->is_pktlog_cb_subscribed = false; + pl_dev->tgt_pktlog_alloced = false; + } + + if (size != 0) { + qdf_print("%s: New Pktlog Buff Size is %d\n", __func__, size); + pl_info->buf_size = size; + } + pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS; + qdf_spin_unlock_bh(&pl_info->log_lock); + return 0; +} + +int pktlog_setsize(struct hif_opaque_softc *scn, int32_t size) +{ + struct pktlog_dev_t *pl_dev; + struct ath_pktlog_info *pl_info; + int status; + + pl_dev = get_pktlog_handle(); + + if (!pl_dev) { + qdf_print("%s: invalid pl_dev handle", __func__); + return -EINVAL; + } + + pl_info = pl_dev->pl_info; + + if (!pl_info) { + qdf_print("%s: invalid pl_dev handle", __func__); + return -EINVAL; + } + + mutex_lock(&pl_info->pktlog_mutex); + status = __pktlog_setsize(scn, size); + mutex_unlock(&pl_info->pktlog_mutex); + + return status; +} + +int pktlog_clearbuff(struct hif_opaque_softc *scn, bool clear_buff) +{ + struct pktlog_dev_t *pl_dev; + struct ath_pktlog_info *pl_info; + uint8_t save_pktlog_state; + + pl_dev = get_pktlog_handle(); + + if (!pl_dev) { + qdf_print("%s: invalid pl_dev handle", __func__); + return -EINVAL; + } + + pl_info = pl_dev->pl_info; + + if (!pl_info) { + qdf_print("%s: invalid pl_dev handle", __func__); + return -EINVAL; + } + + if (!clear_buff) + return -EINVAL; + + if (pl_info->curr_pkt_state < PKTLOG_OPR_IN_PROGRESS_READ_COMPLETE || + pl_info->curr_pkt_state == + PKTLOG_OPR_IN_PROGRESS_CLEARBUFF_COMPLETE) + return -EBUSY; + + save_pktlog_state = pl_info->curr_pkt_state; + pl_info->curr_pkt_state = PKTLOG_OPR_IN_PROGRESS; + + if (pl_info->log_state) { + pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS; + qdf_print("%s: Logging should be disabled before clearing " + "pktlog buffer.", __func__); + return -EINVAL; + } + + if (pl_info->buf != NULL) { + if (pl_info->buf_size > 0) { + qdf_debug("pktlog buffer is cleared"); + memset(pl_info->buf, 0, pl_info->buf_size); + pl_dev->is_pktlog_cb_subscribed = false; + pl_dev->tgt_pktlog_alloced = false; + pl_info->buf->rd_offset = -1; + } else { + pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS; + qdf_print("%s: pktlog buffer size is not proper. " + "Existing Buf size %d", __func__, + pl_info->buf_size); + return -EFAULT; + } + } else { + pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS; + qdf_print("%s: pktlog buff is NULL", __func__); + return -EFAULT; + } + + if (save_pktlog_state == PKTLOG_OPR_IN_PROGRESS_READ_COMPLETE) + pl_info->curr_pkt_state = + PKTLOG_OPR_IN_PROGRESS_CLEARBUFF_COMPLETE; + else + pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS; + + return 0; +} + +/** + * pktlog_process_fw_msg() - process packetlog message + * @buff: buffer + * + * Return: None + */ +void pktlog_process_fw_msg(uint32_t *buff, uint32_t len) +{ + uint32_t *pl_hdr; + uint32_t log_type; + struct cdp_pdev *pdev = get_txrx_context(); + struct ol_fw_data pl_fw_data; + + if (!pdev) { + qdf_print("%s: txrx_pdev is NULL", __func__); + return; + } + pl_hdr = buff; + pl_fw_data.data = pl_hdr; + pl_fw_data.len = len; + + log_type = + (*(pl_hdr + 1) & ATH_PKTLOG_HDR_LOG_TYPE_MASK) >> + ATH_PKTLOG_HDR_LOG_TYPE_SHIFT; + + if ((log_type == PKTLOG_TYPE_TX_CTRL) + || (log_type == PKTLOG_TYPE_TX_STAT) + || (log_type == PKTLOG_TYPE_TX_MSDU_ID) + || (log_type == PKTLOG_TYPE_TX_FRM_HDR) + || (log_type == PKTLOG_TYPE_TX_VIRT_ADDR)) + wdi_event_handler(WDI_EVENT_TX_STATUS, + pdev, &pl_fw_data); + else if (log_type == PKTLOG_TYPE_RC_FIND) + wdi_event_handler(WDI_EVENT_RATE_FIND, + pdev, &pl_fw_data); + else if (log_type == PKTLOG_TYPE_RC_UPDATE) + wdi_event_handler(WDI_EVENT_RATE_UPDATE, + pdev, &pl_fw_data); + else if (log_type == PKTLOG_TYPE_RX_STAT) + wdi_event_handler(WDI_EVENT_RX_DESC, + pdev, &pl_fw_data); + else if (log_type == PKTLOG_TYPE_SW_EVENT) + wdi_event_handler(WDI_EVENT_SW_EVENT, + pdev, &pl_fw_data); +} + +#if defined(QCA_WIFI_3_0_ADRASTEA) +static inline int pktlog_nbuf_check_sanity(qdf_nbuf_t nbuf) +{ + int rc = 0; /* sane */ + + if ((!nbuf) || + (nbuf->data < nbuf->head) || + ((nbuf->data + skb_headlen(nbuf)) > skb_end_pointer(nbuf))) + rc = -EINVAL; + + return rc; +} +/** + * pktlog_t2h_msg_handler() - Target to host message handler + * @context: pdev context + * @pkt: HTC packet + * + * Return: None + */ +static void pktlog_t2h_msg_handler(void *context, HTC_PACKET *pkt) +{ + struct pktlog_dev_t *pdev = (struct pktlog_dev_t *)context; + qdf_nbuf_t pktlog_t2h_msg = (qdf_nbuf_t) pkt->pPktContext; + uint32_t *msg_word; + uint32_t msg_len; + + /* check for sanity of the packet, have seen corrupted pkts */ + if (pktlog_nbuf_check_sanity(pktlog_t2h_msg)) { + qdf_print("%s: packet 0x%pK corrupted? Leaking...", + __func__, pktlog_t2h_msg); + /* do not free; may crash! */ + QDF_ASSERT(0); + return; + } + + /* check for successful message reception */ + if (pkt->Status != QDF_STATUS_SUCCESS) { + if (pkt->Status != QDF_STATUS_E_CANCELED) + pdev->htc_err_cnt++; + qdf_nbuf_free(pktlog_t2h_msg); + return; + } + + /* confirm alignment */ + qdf_assert((((unsigned long)qdf_nbuf_data(pktlog_t2h_msg)) & 0x3) == 0); + + msg_word = (uint32_t *) qdf_nbuf_data(pktlog_t2h_msg); + msg_len = qdf_nbuf_len(pktlog_t2h_msg); + pktlog_process_fw_msg(msg_word, msg_len); + + qdf_nbuf_free(pktlog_t2h_msg); +} + +/** + * pktlog_tx_resume_handler() - resume callback + * @context: pdev context + * + * Return: None + */ +static void pktlog_tx_resume_handler(void *context) +{ + qdf_print("%s: Not expected", __func__); + qdf_assert(0); +} + +/** + * pktlog_h2t_send_complete() - send complete indication + * @context: pdev context + * @htc_pkt: HTC packet + * + * Return: None + */ +static void pktlog_h2t_send_complete(void *context, HTC_PACKET *htc_pkt) +{ + qdf_print("%s: Not expected", __func__); + qdf_assert(0); +} + +/** + * pktlog_h2t_full() - queue full indication + * @context: pdev context + * @pkt: HTC packet + * + * Return: HTC action + */ +static enum htc_send_full_action pktlog_h2t_full(void *context, HTC_PACKET *pkt) +{ + return HTC_SEND_FULL_KEEP; +} + +/** + * pktlog_htc_connect_service() - create new endpoint for packetlog + * @pdev - pktlog pdev + * + * Return: 0 for success/failure + */ +static int pktlog_htc_connect_service(struct pktlog_dev_t *pdev) +{ + struct htc_service_connect_req connect; + struct htc_service_connect_resp response; + QDF_STATUS status; + + qdf_mem_set(&connect, sizeof(connect), 0); + qdf_mem_set(&response, sizeof(response), 0); + + connect.pMetaData = NULL; + connect.MetaDataLength = 0; + connect.EpCallbacks.pContext = pdev; + connect.EpCallbacks.EpTxComplete = pktlog_h2t_send_complete; + connect.EpCallbacks.EpTxCompleteMultiple = NULL; + connect.EpCallbacks.EpRecv = pktlog_t2h_msg_handler; + connect.EpCallbacks.ep_resume_tx_queue = pktlog_tx_resume_handler; + + /* rx buffers currently are provided by HIF, not by EpRecvRefill */ + connect.EpCallbacks.EpRecvRefill = NULL; + connect.EpCallbacks.RecvRefillWaterMark = 1; + /* N/A, fill is done by HIF */ + + connect.EpCallbacks.EpSendFull = pktlog_h2t_full; + /* + * Specify how deep to let a queue get before htc_send_pkt will + * call the EpSendFull function due to excessive send queue depth. + */ + connect.MaxSendQueueDepth = PKTLOG_MAX_SEND_QUEUE_DEPTH; + + /* disable flow control for HTT data message service */ + connect.ConnectionFlags |= HTC_CONNECT_FLAGS_DISABLE_CREDIT_FLOW_CTRL; + + /* connect to control service */ + connect.service_id = PACKET_LOG_SVC; + + status = htc_connect_service(pdev->htc_pdev, &connect, &response); + + if (status != QDF_STATUS_SUCCESS) { + pdev->mt_pktlog_enabled = false; + return -EIO; /* failure */ + } + + pdev->htc_endpoint = response.Endpoint; + pdev->mt_pktlog_enabled = true; + + return 0; /* success */ +} + +/** + * pktlog_htc_attach() - attach pktlog HTC service + * + * Return: 0 for success/failure + */ +int pktlog_htc_attach(void) +{ + struct pktlog_dev_t *pl_pdev = get_pktlog_handle(); + void *htc_pdev = cds_get_context(QDF_MODULE_ID_HTC); + + if ((!pl_pdev) || (!htc_pdev)) { + qdf_print("Invalid pl_dev or htc_pdev handle"); + return -EINVAL; + } + + pl_pdev->htc_pdev = htc_pdev; + return pktlog_htc_connect_service(pl_pdev); +} +#else +int pktlog_htc_attach(void) +{ + struct pktlog_dev_t *pl_dev = get_pktlog_handle(); + + if (!pl_dev) { + qdf_print("Invalid pl_dev handle"); + return -EINVAL; + } + + pl_dev->mt_pktlog_enabled = false; + return 0; +} +#endif +#endif /* REMOVE_PKT_LOG */ diff --git a/drivers/staging/qca-wifi-host-cmn/utils/pktlog/pktlog_internal.c b/drivers/staging/qca-wifi-host-cmn/utils/pktlog/pktlog_internal.c new file mode 100644 index 0000000000000000000000000000000000000000..73986df1851f9b7688e58b7820f6a92911b40b80 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/utils/pktlog/pktlog_internal.c @@ -0,0 +1,1415 @@ +/* + * Copyright (c) 2013-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/* + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef REMOVE_PKT_LOG +#include "ol_txrx_types.h" +#include "ol_htt_tx_api.h" +#include "ol_tx_desc.h" +#include "qdf_mem.h" +#include "htt.h" +#include "htt_internal.h" +#include "pktlog_ac_i.h" +#include "wma_api.h" +#include "wlan_logging_sock_svc.h" + +#define TX_DESC_ID_LOW_MASK 0xffff +#define TX_DESC_ID_LOW_SHIFT 0 +#define TX_DESC_ID_HIGH_MASK 0xffff0000 +#define TX_DESC_ID_HIGH_SHIFT 16 + +void pktlog_getbuf_intsafe(struct ath_pktlog_arg *plarg) +{ + struct ath_pktlog_buf *log_buf; + int32_t buf_size; + struct ath_pktlog_hdr *log_hdr; + int32_t cur_wr_offset; + char *log_ptr; + struct ath_pktlog_info *pl_info; + uint16_t log_type; + size_t log_size; + uint32_t flags; +#ifdef HELIUMPLUS + uint8_t mac_id; +#endif + + if (!plarg) { + printk("Invalid parg in %s\n", __func__); + return; + } + + pl_info = plarg->pl_info; +#ifdef HELIUMPLUS + mac_id = plarg->macId; + log_type = plarg->log_type; +#else + log_type = plarg->log_type; +#endif + log_size = plarg->log_size; + log_buf = pl_info->buf; + flags = plarg->flags; + + if (!log_buf) { + printk("Invalid log_buf in %s\n", __func__); + return; + } + + + buf_size = pl_info->buf_size; + cur_wr_offset = log_buf->wr_offset; + /* Move read offset to the next entry if there is a buffer overlap */ + if (log_buf->rd_offset >= 0) { + if ((cur_wr_offset <= log_buf->rd_offset) + && (cur_wr_offset + sizeof(struct ath_pktlog_hdr)) > + log_buf->rd_offset) { + PKTLOG_MOV_RD_IDX(log_buf->rd_offset, log_buf, + buf_size); + } + } else { + log_buf->rd_offset = cur_wr_offset; + } + + log_hdr = (struct ath_pktlog_hdr *)(log_buf->log_data + cur_wr_offset); + + log_hdr->flags = flags; +#ifdef HELIUMPLUS + log_hdr->macId = mac_id; + log_hdr->log_type = log_type; +#else + log_hdr->log_type = log_type; +#endif + log_hdr->size = (uint16_t) log_size; + log_hdr->missed_cnt = plarg->missed_cnt; + log_hdr->timestamp = plarg->timestamp; +#ifdef HELIUMPLUS + log_hdr->type_specific_data = plarg->type_specific_data; +#endif + cur_wr_offset += sizeof(*log_hdr); + + if ((buf_size - cur_wr_offset) < log_size) { + while ((cur_wr_offset <= log_buf->rd_offset) + && (log_buf->rd_offset < buf_size)) { + PKTLOG_MOV_RD_IDX(log_buf->rd_offset, log_buf, + buf_size); + } + cur_wr_offset = 0; + } + + while ((cur_wr_offset <= log_buf->rd_offset) + && (cur_wr_offset + log_size) > log_buf->rd_offset) { + PKTLOG_MOV_RD_IDX(log_buf->rd_offset, log_buf, buf_size); + } + + log_ptr = &(log_buf->log_data[cur_wr_offset]); + cur_wr_offset += log_hdr->size; + + log_buf->wr_offset = ((buf_size - cur_wr_offset) >= + sizeof(struct ath_pktlog_hdr)) ? cur_wr_offset : + 0; + + plarg->buf = log_ptr; +} + +char *pktlog_getbuf(struct pktlog_dev_t *pl_dev, + struct ath_pktlog_info *pl_info, + size_t log_size, struct ath_pktlog_hdr *pl_hdr) +{ + struct ath_pktlog_arg plarg = { 0, }; + uint8_t flags = 0; + + plarg.pl_info = pl_info; +#ifdef HELIUMPLUS + plarg.macId = pl_hdr->macId; + plarg.log_type = pl_hdr->log_type; +#else + plarg.log_type = pl_hdr->log_type; +#endif + plarg.log_size = log_size; + plarg.flags = pl_hdr->flags; + plarg.missed_cnt = pl_hdr->missed_cnt; + plarg.timestamp = pl_hdr->timestamp; +#ifdef HELIUMPLUS + plarg.type_specific_data = pl_hdr->type_specific_data; +#endif + if (flags & PHFLAGS_INTERRUPT_CONTEXT) { + /* + * We are already in interrupt context, no need to make it + * intsafe. call the function directly. + */ + pktlog_getbuf_intsafe(&plarg); + } else { + PKTLOG_LOCK(pl_info); + pktlog_getbuf_intsafe(&plarg); + PKTLOG_UNLOCK(pl_info); + } + + return plarg.buf; +} + +static struct txctl_frm_hdr frm_hdr; + +#ifndef HELIUMPLUS +static void process_ieee_hdr(void *data) +{ + uint8_t dir; + struct ieee80211_frame *wh = (struct ieee80211_frame *)(data); + + frm_hdr.framectrl = *(uint16_t *) (wh->i_fc); + frm_hdr.seqctrl = *(uint16_t *) (wh->i_seq); + dir = (wh->i_fc[1] & IEEE80211_FC1_DIR_MASK); + + if (dir == IEEE80211_FC1_DIR_TODS) { + frm_hdr.bssid_tail = + (wh->i_addr1[IEEE80211_ADDR_LEN - 2] << 8) | (wh-> + i_addr1 + [IEEE80211_ADDR_LEN + - 1]); + frm_hdr.sa_tail = + (wh->i_addr2[IEEE80211_ADDR_LEN - 2] << 8) | (wh-> + i_addr2 + [IEEE80211_ADDR_LEN + - 1]); + frm_hdr.da_tail = + (wh->i_addr3[IEEE80211_ADDR_LEN - 2] << 8) | (wh-> + i_addr3 + [IEEE80211_ADDR_LEN + - 1]); + } else if (dir == IEEE80211_FC1_DIR_FROMDS) { + frm_hdr.bssid_tail = + (wh->i_addr2[IEEE80211_ADDR_LEN - 2] << 8) | (wh-> + i_addr2 + [IEEE80211_ADDR_LEN + - 1]); + frm_hdr.sa_tail = + (wh->i_addr3[IEEE80211_ADDR_LEN - 2] << 8) | (wh-> + i_addr3 + [IEEE80211_ADDR_LEN + - 1]); + frm_hdr.da_tail = + (wh->i_addr1[IEEE80211_ADDR_LEN - 2] << 8) | (wh-> + i_addr1 + [IEEE80211_ADDR_LEN + - 1]); + } else { + frm_hdr.bssid_tail = + (wh->i_addr3[IEEE80211_ADDR_LEN - 2] << 8) | (wh-> + i_addr3 + [IEEE80211_ADDR_LEN + - 1]); + frm_hdr.sa_tail = + (wh->i_addr2[IEEE80211_ADDR_LEN - 2] << 8) | (wh-> + i_addr2 + [IEEE80211_ADDR_LEN + - 1]); + frm_hdr.da_tail = + (wh->i_addr1[IEEE80211_ADDR_LEN - 2] << 8) | (wh-> + i_addr1 + [IEEE80211_ADDR_LEN + - 1]); + } +} + +/** + * fill_ieee80211_hdr_data() - fill ieee802.11 data header + * @txrx_pdev: txrx pdev + * @pl_msdu_info: msdu info + * @data: data received from event + * + * Return: none + */ +/* TODO: Platform specific function */ +static void +fill_ieee80211_hdr_data(struct cdp_pdev *pdev, + struct ath_pktlog_msdu_info *pl_msdu_info, void *data) +{ + uint32_t i; + uint32_t *htt_tx_desc; + struct ol_tx_desc_t *tx_desc; + uint8_t msdu_id_offset = MSDU_ID_INFO_ID_OFFSET; + uint16_t tx_desc_id; + uint32_t *msdu_id_info = (uint32_t *) + ((void *)data + sizeof(struct ath_pktlog_hdr)); + uint32_t *msdu_id = (uint32_t *) ((char *)msdu_id_info + + msdu_id_offset); + uint8_t *addr, *vap_addr; + uint8_t vdev_id; + qdf_nbuf_t netbuf; + uint32_t len; + struct ol_txrx_pdev_t *txrx_pdev = (struct ol_txrx_pdev_t *)pdev; + + + pl_msdu_info->num_msdu = *msdu_id_info; + pl_msdu_info->priv_size = sizeof(uint32_t) * + pl_msdu_info->num_msdu + sizeof(uint32_t); + + if (pl_msdu_info->num_msdu > MAX_PKT_INFO_MSDU_ID) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "%s: Invalid num_msdu count", + __func__); + qdf_assert(0); + return; + } + for (i = 0; i < pl_msdu_info->num_msdu; i++) { + /* + * Handle big endianness + * Increment msdu_id once after retrieving + * lower 16 bits and uppper 16 bits + */ + if (!(i % 2)) { + tx_desc_id = ((*msdu_id & TX_DESC_ID_LOW_MASK) + >> TX_DESC_ID_LOW_SHIFT); + } else { + tx_desc_id = ((*msdu_id & TX_DESC_ID_HIGH_MASK) + >> TX_DESC_ID_HIGH_SHIFT); + msdu_id += 1; + } + if (tx_desc_id >= txrx_pdev->tx_desc.pool_size) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, + "%s: drop due to invalid msdu id = %x\n", + __func__, tx_desc_id); + return; + } + tx_desc = ol_tx_desc_find(txrx_pdev, tx_desc_id); + qdf_assert(tx_desc); + netbuf = tx_desc->netbuf; + htt_tx_desc = (uint32_t *) tx_desc->htt_tx_desc; + qdf_assert(htt_tx_desc); + + qdf_nbuf_peek_header(netbuf, &addr, &len); + + if (len < (2 * IEEE80211_ADDR_LEN)) { + qdf_print("TX frame does not have a valid address\n"); + return; + } + /* Adding header information for the TX data frames */ + vdev_id = (uint8_t) (*(htt_tx_desc + + HTT_TX_VDEV_ID_WORD) >> + HTT_TX_VDEV_ID_SHIFT) & + HTT_TX_VDEV_ID_MASK; + + vap_addr = wma_get_vdev_address_by_vdev_id(vdev_id); + + frm_hdr.da_tail = (addr[IEEE80211_ADDR_LEN - 2] << 8) | + (addr[IEEE80211_ADDR_LEN - 1]); + frm_hdr.sa_tail = + (addr[2 * IEEE80211_ADDR_LEN - 2] << 8) | + (addr[2 * IEEE80211_ADDR_LEN - 1]); + if (vap_addr) { + frm_hdr.bssid_tail = + (vap_addr[IEEE80211_ADDR_LEN - 2] << 8) | + (vap_addr[IEEE80211_ADDR_LEN - 1]); + } else { + frm_hdr.bssid_tail = 0x0000; + } + pl_msdu_info->priv.msdu_len[i] = *(htt_tx_desc + + HTT_TX_MSDU_LEN_DWORD) + & HTT_TX_MSDU_LEN_MASK; + /* + * Add more information per MSDU + * e.g., protocol information + */ + } + +} +#endif + +#ifdef HELIUMPLUS +A_STATUS process_tx_info(struct cdp_pdev *txrx_pdev, void *data) +{ + /* + * Must include to process different types + * TX_CTL, TX_STATUS, TX_MSDU_ID, TX_FRM_HDR + */ + struct pktlog_dev_t *pl_dev = get_pktlog_handle(); + struct ath_pktlog_hdr pl_hdr; + struct ath_pktlog_info *pl_info; + uint32_t *pl_tgt_hdr; + struct ol_fw_data *fw_data; + uint32_t len; + + if (!txrx_pdev) { + printk("Invalid pdev in %s\n", __func__); + return A_ERROR; + } + + if (!pl_dev) { + pr_err("Invalid pktlog handle in %s\n", __func__); + qdf_assert(pl_dev); + return A_ERROR; + } + + qdf_assert(data); + + fw_data = (struct ol_fw_data *)data; + len = fw_data->len; + if (len < (sizeof(uint32_t) * + (ATH_PKTLOG_HDR_FLAGS_OFFSET + 1)) || + len < (sizeof(uint32_t) * + (ATH_PKTLOG_HDR_MISSED_CNT_OFFSET + 1)) || + len < (sizeof(uint32_t) * + (ATH_PKTLOG_HDR_LOG_TYPE_OFFSET + 1)) || + len < (sizeof(uint32_t) * + (ATH_PKTLOG_HDR_MAC_ID_OFFSET + 1)) || + len < (sizeof(uint32_t) * + (ATH_PKTLOG_HDR_SIZE_OFFSET + 1)) || + len < (sizeof(uint32_t) * + (ATH_PKTLOG_HDR_TYPE_SPECIFIC_DATA_OFFSET + 1))) { + qdf_print("Invalid msdu len in %s\n", __func__); + qdf_assert(0); + return A_ERROR; + } + + pl_tgt_hdr = (uint32_t *)fw_data->data; + /* + * Makes the short words (16 bits) portable b/w little endian + * and big endian + */ + pl_hdr.flags = (*(pl_tgt_hdr + ATH_PKTLOG_HDR_FLAGS_OFFSET) & + ATH_PKTLOG_HDR_FLAGS_MASK) >> + ATH_PKTLOG_HDR_FLAGS_SHIFT; + pl_hdr.flags |= PKTLOG_HDR_SIZE_16; + pl_hdr.missed_cnt = (*(pl_tgt_hdr + ATH_PKTLOG_HDR_MISSED_CNT_OFFSET) & + ATH_PKTLOG_HDR_MISSED_CNT_MASK) >> + ATH_PKTLOG_HDR_MISSED_CNT_SHIFT; + pl_hdr.log_type = (*(pl_tgt_hdr + ATH_PKTLOG_HDR_LOG_TYPE_OFFSET) & + ATH_PKTLOG_HDR_LOG_TYPE_MASK) >> + ATH_PKTLOG_HDR_LOG_TYPE_SHIFT; + pl_hdr.macId = (*(pl_tgt_hdr + ATH_PKTLOG_HDR_MAC_ID_OFFSET) & + ATH_PKTLOG_HDR_MAC_ID_MASK) >> + ATH_PKTLOG_HDR_MAC_ID_SHIFT; + pl_hdr.size = (*(pl_tgt_hdr + ATH_PKTLOG_HDR_SIZE_OFFSET) & + ATH_PKTLOG_HDR_SIZE_MASK) >> ATH_PKTLOG_HDR_SIZE_SHIFT; + pl_hdr.timestamp = *(pl_tgt_hdr + ATH_PKTLOG_HDR_TIMESTAMP_OFFSET); + pl_hdr.type_specific_data = + *(pl_tgt_hdr + ATH_PKTLOG_HDR_TYPE_SPECIFIC_DATA_OFFSET); + pl_info = pl_dev->pl_info; + + if (sizeof(struct ath_pktlog_hdr) + pl_hdr.size > len) { + qdf_assert(0); + return A_ERROR; + } + + if (pl_hdr.log_type == PKTLOG_TYPE_TX_CTRL) { + size_t log_size = sizeof(frm_hdr) + pl_hdr.size; + void *txdesc_hdr_ctl = (void *) + pktlog_getbuf(pl_dev, pl_info, log_size, &pl_hdr); + qdf_assert(txdesc_hdr_ctl); + qdf_assert(pl_hdr.size < (370 * sizeof(u_int32_t))); + + qdf_mem_copy(txdesc_hdr_ctl, &frm_hdr, sizeof(frm_hdr)); + qdf_mem_copy((char *)txdesc_hdr_ctl + sizeof(frm_hdr), + ((void *)fw_data->data + + sizeof(struct ath_pktlog_hdr)), + pl_hdr.size); + pl_hdr.size = log_size; + cds_pkt_stats_to_logger_thread(&pl_hdr, NULL, + txdesc_hdr_ctl); + } + + if (pl_hdr.log_type == PKTLOG_TYPE_TX_STAT) { + struct ath_pktlog_tx_status txstat_log; + size_t log_size = pl_hdr.size; + + txstat_log.ds_status = (void *) + pktlog_getbuf(pl_dev, pl_info, + log_size, &pl_hdr); + qdf_assert(txstat_log.ds_status); + qdf_mem_copy(txstat_log.ds_status, + ((void *)fw_data->data + + sizeof(struct ath_pktlog_hdr)), + pl_hdr.size); + /* TODO: MCL specific API */ + cds_pkt_stats_to_logger_thread(&pl_hdr, NULL, + txstat_log.ds_status); + } + return A_OK; +} + +#else +A_STATUS process_tx_info(struct cdp_pdev *txrx_pdev, void *data) +{ + /* + * Must include to process different types + * TX_CTL, TX_STATUS, TX_MSDU_ID, TX_FRM_HDR + */ + struct pktlog_dev_t *pl_dev = get_pktlog_handle(); + struct ath_pktlog_hdr pl_hdr; + struct ath_pktlog_info *pl_info; + uint32_t *pl_tgt_hdr; + struct ol_fw_data *fw_data; + uint32_t len; + + if (!txrx_pdev) { + qdf_print("Invalid pdev in %s\n", __func__); + return A_ERROR; + } + + if (!pl_dev) { + pr_err("Invalid pktlog handle in %s\n", __func__); + qdf_assert(pl_dev); + return A_ERROR; + } + + qdf_assert(data); + + fw_data = (struct ol_fw_data *)data; + len = fw_data->len; + if (len < (sizeof(uint32_t) * + (ATH_PKTLOG_HDR_FLAGS_OFFSET + 1)) || + len < (sizeof(uint32_t) * + (ATH_PKTLOG_HDR_MISSED_CNT_OFFSET + 1)) || + len < (sizeof(uint32_t) * + (ATH_PKTLOG_HDR_LOG_TYPE_OFFSET + 1)) || + len < (sizeof(uint32_t) * + (ATH_PKTLOG_HDR_SIZE_OFFSET + 1)) || + len < (sizeof(uint32_t) * + (ATH_PKTLOG_HDR_TYPE_SPECIFIC_DATA_OFFSET + 1))) { + qdf_print("Invalid msdu len in %s\n", __func__); + qdf_assert(0); + return A_ERROR; + } + + pl_tgt_hdr = (uint32_t *)fw_data->data; + /* + * Makes the short words (16 bits) portable b/w little endian + * and big endian + */ + pl_hdr.flags = (*(pl_tgt_hdr + ATH_PKTLOG_HDR_FLAGS_OFFSET) & + ATH_PKTLOG_HDR_FLAGS_MASK) >> + ATH_PKTLOG_HDR_FLAGS_SHIFT; + pl_hdr.missed_cnt = (*(pl_tgt_hdr + ATH_PKTLOG_HDR_MISSED_CNT_OFFSET) & + ATH_PKTLOG_HDR_MISSED_CNT_MASK) >> + ATH_PKTLOG_HDR_MISSED_CNT_SHIFT; + pl_hdr.log_type = (*(pl_tgt_hdr + ATH_PKTLOG_HDR_LOG_TYPE_OFFSET) & + ATH_PKTLOG_HDR_LOG_TYPE_MASK) >> + ATH_PKTLOG_HDR_LOG_TYPE_SHIFT; + pl_hdr.size = (*(pl_tgt_hdr + ATH_PKTLOG_HDR_SIZE_OFFSET) & + ATH_PKTLOG_HDR_SIZE_MASK) >> ATH_PKTLOG_HDR_SIZE_SHIFT; + pl_hdr.timestamp = *(pl_tgt_hdr + ATH_PKTLOG_HDR_TIMESTAMP_OFFSET); + + pl_info = pl_dev->pl_info; + + if (pl_hdr.log_type == PKTLOG_TYPE_TX_FRM_HDR) { + /* Valid only for the TX CTL */ + process_ieee_hdr(fw_data->data + sizeof(pl_hdr)); + } + + if (pl_hdr.log_type == PKTLOG_TYPE_TX_VIRT_ADDR) { + uint32_t desc_id = (uint32_t) *((uint32_t *)(fw_data->data + + sizeof(pl_hdr))); + uint32_t vdev_id = desc_id; + + /* if the pkt log msg is for the bcn frame the vdev id + * is piggybacked in desc_id and the MSB of the desc ID + * would be set to FF + */ +#define BCN_DESC_ID 0xFF + if ((desc_id >> 24) == BCN_DESC_ID) { + void *data; + uint32_t buf_size; + + vdev_id &= 0x00FFFFFF; + /* TODO: MCL specific API */ + data = wma_get_beacon_buffer_by_vdev_id(vdev_id, + &buf_size); + if (data) { + /* TODO: platform specific API */ + process_ieee_hdr(data); + qdf_mem_free(data); + } + } else { + /* + * TODO: get the hdr content for mgmt frames from + * Tx mgmt desc pool + */ + } + } + + if (pl_hdr.log_type == PKTLOG_TYPE_TX_CTRL) { + struct ath_pktlog_txctl txctl_log; + size_t log_size = sizeof(txctl_log.priv); + + txctl_log.txdesc_hdr_ctl = (void *)pktlog_getbuf(pl_dev, + pl_info, + log_size, + &pl_hdr); + + if (!txctl_log.txdesc_hdr_ctl) { + printk + ("failed to get buf for txctl_log.txdesc_hdr_ctl\n"); + return A_ERROR; + } + + /* + * frm hdr is currently Valid only for local frames + * Add capability to include the fmr hdr for remote frames + */ + txctl_log.priv.frm_hdr = frm_hdr; + qdf_assert(txctl_log.priv.txdesc_ctl); + qdf_assert(pl_hdr.size < sizeof(txctl_log.priv.txdesc_ctl)); + pl_hdr.size = (pl_hdr.size > sizeof(txctl_log.priv.txdesc_ctl)) + ? sizeof(txctl_log.priv.txdesc_ctl) : + pl_hdr.size; + + if (sizeof(struct ath_pktlog_hdr) + pl_hdr.size > len) { + qdf_assert(0); + return A_ERROR; + } + qdf_mem_copy((void *)&txctl_log.priv.txdesc_ctl, + ((void *)fw_data->data + + sizeof(struct ath_pktlog_hdr)), + pl_hdr.size); + qdf_assert(txctl_log.txdesc_hdr_ctl); + qdf_mem_copy(txctl_log.txdesc_hdr_ctl, &txctl_log.priv, + sizeof(txctl_log.priv)); + pl_hdr.size = log_size; + cds_pkt_stats_to_logger_thread(&pl_hdr, NULL, + txctl_log.txdesc_hdr_ctl); + /* Add Protocol information and HT specific information */ + } + + if (pl_hdr.log_type == PKTLOG_TYPE_TX_STAT) { + struct ath_pktlog_tx_status txstat_log; + size_t log_size = pl_hdr.size; + + txstat_log.ds_status = (void *) + pktlog_getbuf(pl_dev, pl_info, log_size, &pl_hdr); + qdf_assert(txstat_log.ds_status); + qdf_mem_copy(txstat_log.ds_status, + ((void *)fw_data->data + + sizeof(struct ath_pktlog_hdr)), + pl_hdr.size); + + cds_pkt_stats_to_logger_thread(&pl_hdr, NULL, + txstat_log.ds_status); + } + + if (pl_hdr.log_type == PKTLOG_TYPE_TX_MSDU_ID) { + struct ath_pktlog_msdu_info pl_msdu_info; + size_t log_size; + + qdf_mem_set(&pl_msdu_info, sizeof(pl_msdu_info), 0); + log_size = sizeof(pl_msdu_info.priv); + + if (pl_dev->mt_pktlog_enabled == false) + fill_ieee80211_hdr_data(txrx_pdev, + &pl_msdu_info, fw_data->data); + + pl_msdu_info.ath_msdu_info = pktlog_getbuf(pl_dev, pl_info, + log_size, &pl_hdr); + qdf_mem_copy((void *)&pl_msdu_info.priv.msdu_id_info, + ((void *)fw_data->data + + sizeof(struct ath_pktlog_hdr)), + sizeof(pl_msdu_info.priv.msdu_id_info)); + qdf_mem_copy(pl_msdu_info.ath_msdu_info, &pl_msdu_info.priv, + sizeof(pl_msdu_info.priv)); + cds_pkt_stats_to_logger_thread(&pl_hdr, NULL, + pl_msdu_info.ath_msdu_info); + } + + return A_OK; +} +#endif + +/* TODO: hardware dependent function */ +A_STATUS process_rx_info_remote(void *pdev, void *data) +{ + struct pktlog_dev_t *pl_dev = get_pktlog_handle(); + struct ath_pktlog_info *pl_info; + struct htt_host_rx_desc_base *rx_desc; + struct ath_pktlog_hdr pl_hdr; + struct ath_pktlog_rx_info rxstat_log; + size_t log_size; + struct ol_rx_remote_data *r_data = (struct ol_rx_remote_data *)data; + qdf_nbuf_t msdu; + + if (!pdev || !r_data || !pl_dev) { + qdf_print("%s: Invalid handle", __func__); + return A_ERROR; + } + + pl_info = pl_dev->pl_info; + msdu = r_data->msdu; + + while (msdu) { + rx_desc = + (struct htt_host_rx_desc_base *)(qdf_nbuf_data(msdu)) - 1; + log_size = + sizeof(*rx_desc) - sizeof(struct htt_host_fw_desc_base); + + /* + * Construct the pktlog header pl_hdr + * Because desc is DMA'd to the host memory + */ + pl_hdr.flags = (1 << PKTLOG_FLG_FRM_TYPE_REMOTE_S); + pl_hdr.missed_cnt = 0; +#if defined(HELIUMPLUS) + pl_hdr.macId = r_data->mac_id; + pl_hdr.log_type = PKTLOG_TYPE_RX_STAT; + pl_hdr.flags |= PKTLOG_HDR_SIZE_16; +#else + pl_hdr.log_type = PKTLOG_TYPE_RX_STAT; +#endif + pl_hdr.size = sizeof(*rx_desc) - + sizeof(struct htt_host_fw_desc_base); +#if defined(HELIUMPLUS) + pl_hdr.timestamp = + rx_desc->ppdu_end.rx_pkt_end.phy_timestamp_1_lower_32; + pl_hdr.type_specific_data = 0xDEADAA; +#else + pl_hdr.timestamp = rx_desc->ppdu_end.tsf_timestamp; +#endif /* !defined(HELIUMPLUS) */ + rxstat_log.rx_desc = (void *)pktlog_getbuf(pl_dev, pl_info, + log_size, &pl_hdr); + qdf_mem_copy(rxstat_log.rx_desc, (void *)rx_desc + + sizeof(struct htt_host_fw_desc_base), pl_hdr.size); + cds_pkt_stats_to_logger_thread(&pl_hdr, NULL, + rxstat_log.rx_desc); + msdu = qdf_nbuf_next(msdu); + } + return A_OK; +} + +#ifdef HELIUMPLUS +A_STATUS process_rx_info(void *pdev, void *data) +{ + struct pktlog_dev_t *pl_dev = get_pktlog_handle(); + struct ath_pktlog_info *pl_info; + struct ath_pktlog_rx_info rxstat_log; + struct ath_pktlog_hdr pl_hdr; + size_t log_size; + uint32_t *pl_tgt_hdr; + struct ol_fw_data *fw_data; + uint32_t len; + + if (!pdev) { + printk("Invalid pdev in %s", __func__); + return A_ERROR; + } + + pl_dev = ((struct ol_txrx_pdev_t *)pdev)->pl_dev; + if (!pl_dev) { + printk("Invalid pl_dev in %s", __func__); + return A_ERROR; + } + + fw_data = (struct ol_fw_data *)data; + len = fw_data->len; + if (len < (sizeof(uint32_t) * + (ATH_PKTLOG_HDR_FLAGS_OFFSET + 1)) || + len < (sizeof(uint32_t) * + (ATH_PKTLOG_HDR_MISSED_CNT_OFFSET + 1)) || + len < (sizeof(uint32_t) * + (ATH_PKTLOG_HDR_LOG_TYPE_OFFSET + 1)) || + len < (sizeof(uint32_t) * + (ATH_PKTLOG_HDR_MAC_ID_OFFSET + 1)) || + len < (sizeof(uint32_t) * + (ATH_PKTLOG_HDR_SIZE_OFFSET + 1)) || + len < (sizeof(uint32_t) * + (ATH_PKTLOG_HDR_TYPE_SPECIFIC_DATA_OFFSET + 1))) { + qdf_print("Invalid msdu len in %s\n", __func__); + qdf_assert(0); + return A_ERROR; + } + + pl_info = pl_dev->pl_info; + pl_tgt_hdr = (uint32_t *)fw_data->data; + + qdf_mem_set(&pl_hdr, sizeof(pl_hdr), 0); + pl_hdr.flags = (*(pl_tgt_hdr + ATH_PKTLOG_HDR_FLAGS_OFFSET) & + ATH_PKTLOG_HDR_FLAGS_MASK) >> + ATH_PKTLOG_HDR_FLAGS_SHIFT; + pl_hdr.missed_cnt = (*(pl_tgt_hdr + ATH_PKTLOG_HDR_MISSED_CNT_OFFSET) & + ATH_PKTLOG_HDR_MISSED_CNT_MASK) >> + ATH_PKTLOG_HDR_MISSED_CNT_SHIFT; + pl_hdr.log_type = (*(pl_tgt_hdr + ATH_PKTLOG_HDR_LOG_TYPE_OFFSET) & + ATH_PKTLOG_HDR_LOG_TYPE_MASK) >> + ATH_PKTLOG_HDR_LOG_TYPE_SHIFT; + pl_hdr.macId = (*(pl_tgt_hdr + ATH_PKTLOG_HDR_MAC_ID_OFFSET) & + ATH_PKTLOG_HDR_MAC_ID_MASK) >> + ATH_PKTLOG_HDR_MAC_ID_SHIFT; + pl_hdr.flags |= PKTLOG_HDR_SIZE_16; + pl_hdr.size = (*(pl_tgt_hdr + ATH_PKTLOG_HDR_SIZE_OFFSET) & + ATH_PKTLOG_HDR_SIZE_MASK) >> ATH_PKTLOG_HDR_SIZE_SHIFT; + pl_hdr.timestamp = *(pl_tgt_hdr + ATH_PKTLOG_HDR_TIMESTAMP_OFFSET); + if (sizeof(struct ath_pktlog_hdr) + pl_hdr.size > len) { + qdf_assert(0); + return A_ERROR; + } + + log_size = pl_hdr.size; + rxstat_log.rx_desc = (void *)pktlog_getbuf(pl_dev, pl_info, + log_size, &pl_hdr); + qdf_mem_copy(rxstat_log.rx_desc, + (void *)fw_data->data + sizeof(struct ath_pktlog_hdr), + pl_hdr.size); + cds_pkt_stats_to_logger_thread(&pl_hdr, NULL, rxstat_log.rx_desc); + + return A_OK; +} + +#else +A_STATUS process_rx_info(void *pdev, void *data) +{ + struct pktlog_dev_t *pl_dev = get_pktlog_handle(); + struct ath_pktlog_info *pl_info; + struct ath_pktlog_rx_info rxstat_log; + struct ath_pktlog_hdr pl_hdr; + size_t log_size; + uint32_t *pl_tgt_hdr; + struct ol_fw_data *fw_data; + uint32_t len; + + if (!pdev) { + printk("Invalid pdev in %s", __func__); + return A_ERROR; + } + + pl_dev = ((struct ol_txrx_pdev_t *)pdev)->pl_dev; + if (!pl_dev) { + printk("Invalid pl_dev in %s", __func__); + return A_ERROR; + } + + fw_data = (struct ol_fw_data *)data; + len = fw_data->len; + if (len < (sizeof(uint32_t) * + (ATH_PKTLOG_HDR_FLAGS_OFFSET + 1)) || + len < (sizeof(uint32_t) * + (ATH_PKTLOG_HDR_MISSED_CNT_OFFSET + 1)) || + len < (sizeof(uint32_t) * + (ATH_PKTLOG_HDR_LOG_TYPE_OFFSET + 1)) || + len < (sizeof(uint32_t) * + (ATH_PKTLOG_HDR_SIZE_OFFSET + 1)) || + len < (sizeof(uint32_t) * + (ATH_PKTLOG_HDR_TYPE_SPECIFIC_DATA_OFFSET + 1))) { + qdf_print("Invalid msdu len in %s\n", __func__); + qdf_assert(0); + return A_ERROR; + } + + pl_info = pl_dev->pl_info; + pl_tgt_hdr = (uint32_t *)fw_data->data; + qdf_mem_set(&pl_hdr, sizeof(pl_hdr), 0); + pl_hdr.flags = (*(pl_tgt_hdr + ATH_PKTLOG_HDR_FLAGS_OFFSET) & + ATH_PKTLOG_HDR_FLAGS_MASK) >> + ATH_PKTLOG_HDR_FLAGS_SHIFT; + pl_hdr.missed_cnt = (*(pl_tgt_hdr + ATH_PKTLOG_HDR_MISSED_CNT_OFFSET) & + ATH_PKTLOG_HDR_MISSED_CNT_MASK) >> + ATH_PKTLOG_HDR_MISSED_CNT_SHIFT; + pl_hdr.log_type = (*(pl_tgt_hdr + ATH_PKTLOG_HDR_LOG_TYPE_OFFSET) & + ATH_PKTLOG_HDR_LOG_TYPE_MASK) >> + ATH_PKTLOG_HDR_LOG_TYPE_SHIFT; + pl_hdr.size = (*(pl_tgt_hdr + ATH_PKTLOG_HDR_SIZE_OFFSET) & + ATH_PKTLOG_HDR_SIZE_MASK) >> ATH_PKTLOG_HDR_SIZE_SHIFT; + pl_hdr.timestamp = *(pl_tgt_hdr + ATH_PKTLOG_HDR_TIMESTAMP_OFFSET); + if (sizeof(struct ath_pktlog_hdr) + pl_hdr.size > len) { + qdf_assert(0); + return A_ERROR; + } + + log_size = pl_hdr.size; + rxstat_log.rx_desc = (void *)pktlog_getbuf(pl_dev, pl_info, + log_size, &pl_hdr); + qdf_mem_copy(rxstat_log.rx_desc, + (void *)fw_data->data + sizeof(struct ath_pktlog_hdr), + pl_hdr.size); + cds_pkt_stats_to_logger_thread(&pl_hdr, NULL, rxstat_log.rx_desc); + + return A_OK; +} +#endif + +#ifdef HELIUMPLUS +A_STATUS process_rate_find(void *pdev, void *data) +{ + struct pktlog_dev_t *pl_dev = get_pktlog_handle(); + struct ath_pktlog_hdr pl_hdr; + struct ath_pktlog_info *pl_info; + size_t log_size; + uint32_t len; + struct ol_fw_data *fw_data; + + /* + * Will be uncommented when the rate control find + * for pktlog is implemented in the firmware. + * Currently derived from the TX PPDU status + */ + struct ath_pktlog_rc_find rcf_log; + uint32_t *pl_tgt_hdr; + + if (!pdev || !data || !pl_dev) { + qdf_print("%s: Invalid handle", __func__); + return A_ERROR; + } + + fw_data = (struct ol_fw_data *)data; + len = fw_data->len; + if (len < (sizeof(uint32_t) * + (ATH_PKTLOG_HDR_FLAGS_OFFSET + 1)) || + len < (sizeof(uint32_t) * + (ATH_PKTLOG_HDR_MISSED_CNT_OFFSET + 1)) || + len < (sizeof(uint32_t) * + (ATH_PKTLOG_HDR_LOG_TYPE_OFFSET + 1)) || + len < (sizeof(uint32_t) * + (ATH_PKTLOG_HDR_MAC_ID_OFFSET + 1)) || + len < (sizeof(uint32_t) * + (ATH_PKTLOG_HDR_SIZE_OFFSET + 1)) || + len < (sizeof(uint32_t) * + (ATH_PKTLOG_HDR_TYPE_SPECIFIC_DATA_OFFSET + 1))) { + qdf_print("Invalid msdu len in %s\n", __func__); + qdf_assert(0); + return A_ERROR; + } + + pl_tgt_hdr = (uint32_t *)fw_data->data; + /* + * Makes the short words (16 bits) portable b/w little endian + * and big endian + */ + + qdf_mem_set(&pl_hdr, sizeof(pl_hdr), 0); + pl_hdr.flags = (*(pl_tgt_hdr + ATH_PKTLOG_HDR_FLAGS_OFFSET) & + ATH_PKTLOG_HDR_FLAGS_MASK) >> + ATH_PKTLOG_HDR_FLAGS_SHIFT; + pl_hdr.missed_cnt = (*(pl_tgt_hdr + ATH_PKTLOG_HDR_MISSED_CNT_OFFSET) & + ATH_PKTLOG_HDR_MISSED_CNT_MASK) >> + ATH_PKTLOG_HDR_MISSED_CNT_SHIFT; + pl_hdr.log_type = (*(pl_tgt_hdr + ATH_PKTLOG_HDR_LOG_TYPE_OFFSET) & + ATH_PKTLOG_HDR_LOG_TYPE_MASK) >> + ATH_PKTLOG_HDR_LOG_TYPE_SHIFT; + pl_hdr.macId = (*(pl_tgt_hdr + ATH_PKTLOG_HDR_MAC_ID_OFFSET) & + ATH_PKTLOG_HDR_MAC_ID_MASK) >> + ATH_PKTLOG_HDR_MAC_ID_SHIFT; + pl_hdr.flags |= PKTLOG_HDR_SIZE_16; + pl_hdr.size = (*(pl_tgt_hdr + ATH_PKTLOG_HDR_SIZE_OFFSET) & + ATH_PKTLOG_HDR_SIZE_MASK) >> ATH_PKTLOG_HDR_SIZE_SHIFT; + pl_hdr.timestamp = *(pl_tgt_hdr + ATH_PKTLOG_HDR_TIMESTAMP_OFFSET); + pl_info = pl_dev->pl_info; + log_size = pl_hdr.size; + rcf_log.rcFind = (void *)pktlog_getbuf(pl_dev, pl_info, + log_size, &pl_hdr); + + if (sizeof(struct ath_pktlog_hdr) + pl_hdr.size > len) { + qdf_assert(0); + return A_ERROR; + } + qdf_mem_copy(rcf_log.rcFind, + ((char *)fw_data->data + sizeof(struct ath_pktlog_hdr)), + pl_hdr.size); + cds_pkt_stats_to_logger_thread(&pl_hdr, NULL, rcf_log.rcFind); + + return A_OK; +} + +#else +A_STATUS process_rate_find(void *pdev, void *data) +{ + struct pktlog_dev_t *pl_dev = get_pktlog_handle(); + struct ath_pktlog_hdr pl_hdr; + struct ath_pktlog_info *pl_info; + size_t log_size; + uint32_t len; + struct ol_fw_data *fw_data; + + /* + * Will be uncommented when the rate control find + * for pktlog is implemented in the firmware. + * Currently derived from the TX PPDU status + */ + struct ath_pktlog_rc_find rcf_log; + uint32_t *pl_tgt_hdr; + + if (!pdev || !data || !pl_dev) { + qdf_print("%s: Invalid handle", __func__); + return A_ERROR; + } + + fw_data = (struct ol_fw_data *)data; + len = fw_data->len; + if (len < (sizeof(uint32_t) * + (ATH_PKTLOG_HDR_FLAGS_OFFSET + 1)) || + len < (sizeof(uint32_t) * + (ATH_PKTLOG_HDR_MISSED_CNT_OFFSET + 1)) || + len < (sizeof(uint32_t) * + (ATH_PKTLOG_HDR_LOG_TYPE_OFFSET + 1)) || + len < (sizeof(uint32_t) * + (ATH_PKTLOG_HDR_SIZE_OFFSET + 1)) || + len < (sizeof(uint32_t) * + (ATH_PKTLOG_HDR_TYPE_SPECIFIC_DATA_OFFSET + 1))) { + qdf_print("Invalid msdu len in %s\n", __func__); + qdf_assert(0); + return A_ERROR; + } + + pl_tgt_hdr = (uint32_t *)fw_data->data; + /* + * Makes the short words (16 bits) portable b/w little endian + * and big endian + */ + + qdf_mem_set(&pl_hdr, sizeof(pl_hdr), 0); + pl_hdr.flags = (*(pl_tgt_hdr + ATH_PKTLOG_HDR_FLAGS_OFFSET) & + ATH_PKTLOG_HDR_FLAGS_MASK) >> + ATH_PKTLOG_HDR_FLAGS_SHIFT; + pl_hdr.missed_cnt = (*(pl_tgt_hdr + ATH_PKTLOG_HDR_MISSED_CNT_OFFSET) & + ATH_PKTLOG_HDR_MISSED_CNT_MASK) >> + ATH_PKTLOG_HDR_MISSED_CNT_SHIFT; + pl_hdr.log_type = (*(pl_tgt_hdr + ATH_PKTLOG_HDR_LOG_TYPE_OFFSET) & + ATH_PKTLOG_HDR_LOG_TYPE_MASK) >> + ATH_PKTLOG_HDR_LOG_TYPE_SHIFT; + pl_hdr.size = (*(pl_tgt_hdr + ATH_PKTLOG_HDR_SIZE_OFFSET) & + ATH_PKTLOG_HDR_SIZE_MASK) >> ATH_PKTLOG_HDR_SIZE_SHIFT; + pl_hdr.timestamp = *(pl_tgt_hdr + ATH_PKTLOG_HDR_TIMESTAMP_OFFSET); + pl_info = pl_dev->pl_info; + log_size = pl_hdr.size; + rcf_log.rcFind = (void *)pktlog_getbuf(pl_dev, pl_info, + log_size, &pl_hdr); + + if (sizeof(struct ath_pktlog_hdr) + pl_hdr.size > len) { + qdf_assert(0); + return A_ERROR; + } + qdf_mem_copy(rcf_log.rcFind, + ((char *)fw_data->data + sizeof(struct ath_pktlog_hdr)), + pl_hdr.size); + cds_pkt_stats_to_logger_thread(&pl_hdr, NULL, rcf_log.rcFind); + + return A_OK; +} +#endif + +#ifdef HELIUMPLUS +A_STATUS process_sw_event(void *pdev, void *data) +{ + struct pktlog_dev_t *pl_dev = get_pktlog_handle(); + struct ath_pktlog_hdr pl_hdr; + struct ath_pktlog_info *pl_info; + size_t log_size; + uint32_t len; + struct ol_fw_data *fw_data; + + /* + * Will be uncommented when the rate control find + * for pktlog is implemented in the firmware. + * Currently derived from the TX PPDU status + */ + struct ath_pktlog_sw_event sw_event; + uint32_t *pl_tgt_hdr; + + if (!pdev) { + qdf_print("Invalid pdev in %s\n", __func__); + return A_ERROR; + } + if (!data) { + qdf_print("Invalid data in %s\n", __func__); + return A_ERROR; + } + if (!pl_dev) { + qdf_print("Invalid pl_dev in %s", __func__); + return A_ERROR; + } + + fw_data = (struct ol_fw_data *)data; + len = fw_data->len; + if (len < (sizeof(uint32_t) * + (ATH_PKTLOG_HDR_FLAGS_OFFSET + 1)) || + len < (sizeof(uint32_t) * + (ATH_PKTLOG_HDR_MISSED_CNT_OFFSET + 1)) || + len < (sizeof(uint32_t) * + (ATH_PKTLOG_HDR_LOG_TYPE_OFFSET + 1)) || + len < (sizeof(uint32_t) * + (ATH_PKTLOG_HDR_MAC_ID_OFFSET + 1)) || + len < (sizeof(uint32_t) * + (ATH_PKTLOG_HDR_SIZE_OFFSET + 1)) || + len < (sizeof(uint32_t) * + (ATH_PKTLOG_HDR_TYPE_SPECIFIC_DATA_OFFSET + 1))) { + qdf_print("Invalid msdu len in %s\n", __func__); + qdf_assert(0); + return A_ERROR; + } + + pl_tgt_hdr = (uint32_t *)fw_data->data; + /* + * Makes the short words (16 bits) portable b/w little endian + * and big endian + */ + pl_hdr.flags = (*(pl_tgt_hdr + ATH_PKTLOG_HDR_FLAGS_OFFSET) & + ATH_PKTLOG_HDR_FLAGS_MASK) >> + ATH_PKTLOG_HDR_FLAGS_SHIFT; + pl_hdr.missed_cnt = (*(pl_tgt_hdr + ATH_PKTLOG_HDR_MISSED_CNT_OFFSET) & + ATH_PKTLOG_HDR_MISSED_CNT_MASK) >> + ATH_PKTLOG_HDR_MISSED_CNT_SHIFT; + pl_hdr.log_type = (*(pl_tgt_hdr + ATH_PKTLOG_HDR_LOG_TYPE_OFFSET) & + ATH_PKTLOG_HDR_LOG_TYPE_MASK) >> + ATH_PKTLOG_HDR_LOG_TYPE_SHIFT; + pl_hdr.macId = (*(pl_tgt_hdr + ATH_PKTLOG_HDR_MAC_ID_OFFSET) & + ATH_PKTLOG_HDR_MAC_ID_MASK) >> + ATH_PKTLOG_HDR_MAC_ID_SHIFT; + pl_hdr.size = (*(pl_tgt_hdr + ATH_PKTLOG_HDR_SIZE_OFFSET) & + ATH_PKTLOG_HDR_SIZE_MASK) >> ATH_PKTLOG_HDR_SIZE_SHIFT; + pl_hdr.timestamp = *(pl_tgt_hdr + ATH_PKTLOG_HDR_TIMESTAMP_OFFSET); + + pl_hdr.type_specific_data = + *(pl_tgt_hdr + ATH_PKTLOG_HDR_TYPE_SPECIFIC_DATA_OFFSET); + pl_info = pl_dev->pl_info; + log_size = pl_hdr.size; + sw_event.sw_event = (void *)pktlog_getbuf(pl_dev, pl_info, + log_size, &pl_hdr); + if (sizeof(struct ath_pktlog_hdr) + pl_hdr.size > len) { + qdf_assert(0); + return A_ERROR; + } + qdf_mem_copy(sw_event.sw_event, + ((char *)fw_data->data + sizeof(struct ath_pktlog_hdr)), + pl_hdr.size); + + cds_pkt_stats_to_logger_thread(&pl_hdr, NULL, sw_event.sw_event); + + return A_OK; +} + +#else +A_STATUS process_sw_event(void *pdev, void *data) +{ + struct pktlog_dev_t *pl_dev = get_pktlog_handle(); + struct ath_pktlog_hdr pl_hdr; + struct ath_pktlog_info *pl_info; + size_t log_size; + uint32_t len; + struct ol_fw_data *fw_data; + + /* + * Will be uncommented when the rate control find + * for pktlog is implemented in the firmware. + * Currently derived from the TX PPDU status + */ + struct ath_pktlog_sw_event sw_event; + uint32_t *pl_tgt_hdr; + + if (!pdev) { + qdf_print("Invalid pdev in %s\n", __func__); + return A_ERROR; + } + if (!data) { + qdf_print("Invalid data in %s\n", __func__); + return A_ERROR; + } + if (!pl_dev) { + qdf_print("Invalid pl_dev in %s", __func__); + return A_ERROR; + } + + fw_data = (struct ol_fw_data *)data; + len = fw_data->len; + if (len < (sizeof(uint32_t) * + (ATH_PKTLOG_HDR_FLAGS_OFFSET + 1)) || + len < (sizeof(uint32_t) * + (ATH_PKTLOG_HDR_MISSED_CNT_OFFSET + 1)) || + len < (sizeof(uint32_t) * + (ATH_PKTLOG_HDR_LOG_TYPE_OFFSET + 1)) || + len < (sizeof(uint32_t) * + (ATH_PKTLOG_HDR_SIZE_OFFSET + 1)) || + len < (sizeof(uint32_t) * + (ATH_PKTLOG_HDR_TYPE_SPECIFIC_DATA_OFFSET + 1))) { + qdf_print("Invalid msdu len in %s\n", __func__); + qdf_assert(0); + return A_ERROR; + } + + pl_tgt_hdr = (uint32_t *)fw_data->data; + /* + * Makes the short words (16 bits) portable b/w little endian + * and big endian + */ + pl_hdr.flags = (*(pl_tgt_hdr + ATH_PKTLOG_HDR_FLAGS_OFFSET) & + ATH_PKTLOG_HDR_FLAGS_MASK) >> + ATH_PKTLOG_HDR_FLAGS_SHIFT; + pl_hdr.missed_cnt = (*(pl_tgt_hdr + ATH_PKTLOG_HDR_MISSED_CNT_OFFSET) & + ATH_PKTLOG_HDR_MISSED_CNT_MASK) >> + ATH_PKTLOG_HDR_MISSED_CNT_SHIFT; + pl_hdr.log_type = (*(pl_tgt_hdr + ATH_PKTLOG_HDR_LOG_TYPE_OFFSET) & + ATH_PKTLOG_HDR_LOG_TYPE_MASK) >> + ATH_PKTLOG_HDR_LOG_TYPE_SHIFT; + pl_hdr.size = (*(pl_tgt_hdr + ATH_PKTLOG_HDR_SIZE_OFFSET) & + ATH_PKTLOG_HDR_SIZE_MASK) >> ATH_PKTLOG_HDR_SIZE_SHIFT; + pl_hdr.timestamp = *(pl_tgt_hdr + ATH_PKTLOG_HDR_TIMESTAMP_OFFSET); + pl_info = pl_dev->pl_info; + log_size = pl_hdr.size; + sw_event.sw_event = (void *)pktlog_getbuf(pl_dev, pl_info, + log_size, &pl_hdr); + if (sizeof(struct ath_pktlog_hdr) + pl_hdr.size > len) { + qdf_assert(0); + return A_ERROR; + } + qdf_mem_copy(sw_event.sw_event, + ((char *)fw_data->data + sizeof(struct ath_pktlog_hdr)), + pl_hdr.size); + + cds_pkt_stats_to_logger_thread(&pl_hdr, NULL, sw_event.sw_event); + + return A_OK; +} +#endif + +#ifdef HELIUMPLUS +A_STATUS process_rate_update(void *pdev, void *data) +{ + struct pktlog_dev_t *pl_dev = get_pktlog_handle(); + struct ath_pktlog_hdr pl_hdr; + size_t log_size; + struct ath_pktlog_info *pl_info; + struct ath_pktlog_rc_update rcu_log; + uint32_t *pl_tgt_hdr; + struct ol_fw_data *fw_data; + uint32_t len; + + if (!pdev || !data || !pl_dev) { + qdf_print("%s: Invalid handle", __func__); + return A_ERROR; + } + + fw_data = (struct ol_fw_data *)data; + len = fw_data->len; + if (len < (sizeof(uint32_t) * + (ATH_PKTLOG_HDR_FLAGS_OFFSET + 1)) || + len < (sizeof(uint32_t) * + (ATH_PKTLOG_HDR_MISSED_CNT_OFFSET + 1)) || + len < (sizeof(uint32_t) * + (ATH_PKTLOG_HDR_LOG_TYPE_OFFSET + 1)) || + len < (sizeof(uint32_t) * + (ATH_PKTLOG_HDR_MAC_ID_OFFSET + 1)) || + len < (sizeof(uint32_t) * + (ATH_PKTLOG_HDR_SIZE_OFFSET + 1)) || + len < (sizeof(uint32_t) * + (ATH_PKTLOG_HDR_TYPE_SPECIFIC_DATA_OFFSET + 1))) { + qdf_print("Invalid msdu len in %s\n", __func__); + qdf_assert(0); + return A_ERROR; + } + + pl_tgt_hdr = (uint32_t *)fw_data->data; + /* + * Makes the short words (16 bits) portable b/w little endian + * and big endian + */ + qdf_mem_set(&pl_hdr, sizeof(pl_hdr), 0); + pl_hdr.flags = (*(pl_tgt_hdr + ATH_PKTLOG_HDR_FLAGS_OFFSET) & + ATH_PKTLOG_HDR_FLAGS_MASK) >> + ATH_PKTLOG_HDR_FLAGS_SHIFT; + pl_hdr.missed_cnt = (*(pl_tgt_hdr + ATH_PKTLOG_HDR_MISSED_CNT_OFFSET) & + ATH_PKTLOG_HDR_MISSED_CNT_MASK) >> + ATH_PKTLOG_HDR_MISSED_CNT_SHIFT; + pl_hdr.log_type = (*(pl_tgt_hdr + ATH_PKTLOG_HDR_LOG_TYPE_OFFSET) & + ATH_PKTLOG_HDR_LOG_TYPE_MASK) >> + ATH_PKTLOG_HDR_LOG_TYPE_SHIFT; + pl_hdr.macId = (*(pl_tgt_hdr + ATH_PKTLOG_HDR_MAC_ID_OFFSET) & + ATH_PKTLOG_HDR_MAC_ID_MASK) >> + ATH_PKTLOG_HDR_MAC_ID_SHIFT; + pl_hdr.flags |= PKTLOG_HDR_SIZE_16; + pl_hdr.size = (*(pl_tgt_hdr + ATH_PKTLOG_HDR_SIZE_OFFSET) & + ATH_PKTLOG_HDR_SIZE_MASK) >> ATH_PKTLOG_HDR_SIZE_SHIFT; + pl_hdr.timestamp = *(pl_tgt_hdr + ATH_PKTLOG_HDR_TIMESTAMP_OFFSET); + log_size = pl_hdr.size; + pl_info = pl_dev->pl_info; + + /* + * Will be uncommented when the rate control update + * for pktlog is implemented in the firmware. + * Currently derived from the TX PPDU status + */ + rcu_log.txRateCtrl = (void *)pktlog_getbuf(pl_dev, pl_info, + log_size, &pl_hdr); + if (sizeof(struct ath_pktlog_hdr) + pl_hdr.size > len) { + qdf_assert(0); + return A_ERROR; + } + qdf_mem_copy(rcu_log.txRateCtrl, + ((char *)fw_data->data + + sizeof(struct ath_pktlog_hdr)), + pl_hdr.size); + cds_pkt_stats_to_logger_thread(&pl_hdr, NULL, rcu_log.txRateCtrl); + return A_OK; +} + +#else +A_STATUS process_rate_update(void *pdev, void *data) +{ + struct pktlog_dev_t *pl_dev = get_pktlog_handle(); + struct ath_pktlog_hdr pl_hdr; + size_t log_size; + struct ath_pktlog_info *pl_info; + struct ath_pktlog_rc_update rcu_log; + uint32_t *pl_tgt_hdr; + struct ol_fw_data *fw_data; + uint32_t len; + + if (!pdev || !data || !pl_dev) { + qdf_print("%s: Invalid handle", __func__); + return A_ERROR; + } + + fw_data = (struct ol_fw_data *)data; + len = fw_data->len; + if (len < (sizeof(uint32_t) * + (ATH_PKTLOG_HDR_FLAGS_OFFSET + 1)) || + len < (sizeof(uint32_t) * + (ATH_PKTLOG_HDR_MISSED_CNT_OFFSET + 1)) || + len < (sizeof(uint32_t) * + (ATH_PKTLOG_HDR_LOG_TYPE_OFFSET + 1)) || + len < (sizeof(uint32_t) * + (ATH_PKTLOG_HDR_SIZE_OFFSET + 1)) || + len < (sizeof(uint32_t) * + (ATH_PKTLOG_HDR_TYPE_SPECIFIC_DATA_OFFSET + 1))) { + qdf_print("Invalid msdu len in %s\n", __func__); + qdf_assert(0); + return A_ERROR; + } + + pl_tgt_hdr = (uint32_t *)fw_data->data; + /* + * Makes the short words (16 bits) portable b/w little endian + * and big endian + */ + qdf_mem_set(&pl_hdr, sizeof(pl_hdr), 0); + pl_hdr.flags = (*(pl_tgt_hdr + ATH_PKTLOG_HDR_FLAGS_OFFSET) & + ATH_PKTLOG_HDR_FLAGS_MASK) >> + ATH_PKTLOG_HDR_FLAGS_SHIFT; + pl_hdr.missed_cnt = (*(pl_tgt_hdr + ATH_PKTLOG_HDR_MISSED_CNT_OFFSET) & + ATH_PKTLOG_HDR_MISSED_CNT_MASK) >> + ATH_PKTLOG_HDR_MISSED_CNT_SHIFT; + pl_hdr.log_type = (*(pl_tgt_hdr + ATH_PKTLOG_HDR_LOG_TYPE_OFFSET) & + ATH_PKTLOG_HDR_LOG_TYPE_MASK) >> + ATH_PKTLOG_HDR_LOG_TYPE_SHIFT; + pl_hdr.size = (*(pl_tgt_hdr + ATH_PKTLOG_HDR_SIZE_OFFSET) & + ATH_PKTLOG_HDR_SIZE_MASK) >> ATH_PKTLOG_HDR_SIZE_SHIFT; + pl_hdr.timestamp = *(pl_tgt_hdr + ATH_PKTLOG_HDR_TIMESTAMP_OFFSET); + log_size = pl_hdr.size; + pl_info = pl_dev->pl_info; + + /* + * Will be uncommented when the rate control update + * for pktlog is implemented in the firmware. + * Currently derived from the TX PPDU status + */ + rcu_log.txRateCtrl = (void *)pktlog_getbuf(pl_dev, pl_info, + log_size, &pl_hdr); + if (sizeof(struct ath_pktlog_hdr) + pl_hdr.size > len) { + qdf_assert(0); + return A_ERROR; + } + qdf_mem_copy(rcu_log.txRateCtrl, + ((char *)fw_data->data + + sizeof(struct ath_pktlog_hdr)), + pl_hdr.size); + cds_pkt_stats_to_logger_thread(&pl_hdr, NULL, rcu_log.txRateCtrl); + return A_OK; +} +#endif + +#ifdef QCA_WIFI_QCA6290 +int process_rx_desc_remote(void *pdev, void *data) +{ + struct pktlog_dev_t *pl_dev = get_pktlog_handle(); + struct ath_pktlog_hdr pl_hdr; + struct ath_pktlog_rx_info rxstat_log; + size_t log_size; + struct ath_pktlog_info *pl_info; + qdf_nbuf_t log_nbuf = (qdf_nbuf_t)data; + + pl_info = pl_dev->pl_info; + qdf_mem_set(&pl_hdr, sizeof(pl_hdr), 0); + pl_hdr.flags = (1 << PKTLOG_FLG_FRM_TYPE_REMOTE_S); + pl_hdr.missed_cnt = 0; + pl_hdr.log_type = 22; /*PKTLOG_TYPE_RX_STATBUF*/ + pl_hdr.size = qdf_nbuf_len(log_nbuf); + pl_hdr.timestamp = 0; + log_size = pl_hdr.size; + rxstat_log.rx_desc = (void *)pktlog_getbuf(pl_dev, pl_info, + log_size, &pl_hdr); + + if (rxstat_log.rx_desc == NULL) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_DEBUG, + "%s: Rx descriptor is NULL", __func__); + return -EFAULT; + } + + qdf_mem_copy(rxstat_log.rx_desc, qdf_nbuf_data(log_nbuf), pl_hdr.size); + cds_pkt_stats_to_logger_thread(&pl_hdr, NULL, + rxstat_log.rx_desc); + return 0; +} + +int +process_pktlog_lite(void *context, void *log_data, uint16_t log_type) +{ + struct pktlog_dev_t *pl_dev = get_pktlog_handle(); + struct ath_pktlog_info *pl_info; + struct ath_pktlog_hdr pl_hdr; + struct ath_pktlog_rx_info rxstat_log; + size_t log_size; + qdf_nbuf_t log_nbuf = (qdf_nbuf_t)log_data; + + pl_info = pl_dev->pl_info; + qdf_mem_set(&pl_hdr, sizeof(pl_hdr), 0); + pl_hdr.flags = (1 << PKTLOG_FLG_FRM_TYPE_REMOTE_S); + pl_hdr.missed_cnt = 0; + pl_hdr.log_type = log_type; + pl_hdr.size = qdf_nbuf_len(log_nbuf); + pl_hdr.timestamp = 0; + log_size = pl_hdr.size; + rxstat_log.rx_desc = (void *)pktlog_getbuf(pl_dev, pl_info, + log_size, &pl_hdr); + + if (rxstat_log.rx_desc == NULL) { + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_DEBUG, + "%s: Rx descriptor is NULL", __func__); + return -EFAULT; + } + + qdf_mem_copy(rxstat_log.rx_desc, qdf_nbuf_data(log_nbuf), pl_hdr.size); + + cds_pkt_stats_to_logger_thread(&pl_hdr, NULL, rxstat_log.rx_desc); + return 0; +} +#else +int process_rx_desc_remote(void *pdev, void *data) +{ + return 0; +} +int +process_pktlog_lite(void *context, void *log_data, uint16_t log_type) +{ + return 0; +} +#endif +#endif /*REMOVE_PKT_LOG */ diff --git a/drivers/staging/qca-wifi-host-cmn/utils/ptt/inc/wlan_ptt_sock_svc.h b/drivers/staging/qca-wifi-host-cmn/utils/ptt/inc/wlan_ptt_sock_svc.h new file mode 100644 index 0000000000000000000000000000000000000000..972dabb075074f12b25a72a5889177cb20c957de --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/utils/ptt/inc/wlan_ptt_sock_svc.h @@ -0,0 +1,152 @@ +/* + * Copyright (c) 2012-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/****************************************************************************** +* wlan_ptt_sock_svc.c +* +******************************************************************************/ +#ifndef PTT_SOCK_SVC_H +#define PTT_SOCK_SVC_H +#include +#include +#include +#include + +/* + * Quarky Message Format: + * The following is the messaging protocol between Quarky and PTT Socket App. + * The totalMsgLen is the length from Radio till msgBody. The value of Radio + * is always defaulted to 0. The MsgLen is the length from msgId till msgBody. + * The length of the msgBody varies with respect to the MsgId. Buffer space + * for MsgBody is already allocated in the received buffer. So in case of READ + * we just need to populate the values in the received message and send it + * back + * +------------+-------+-------+--------+-------+---------+ + * |TotalMsgLen | Radio | MsgId | MsgLen |Status |MsgBody | + * +------------+-------+-------|--------+-------+---------+ + * <------4----><--4---><---2--><---2---><---4--><---------> + */ +/* PTT Socket App Message Ids */ +#define PTT_MSG_READ_REGISTER 0x3040 +#define PTT_MSG_WRITE_REGISTER 0x3041 +#define PTT_MSG_READ_MEMORY 0x3044 +#define PTT_MSG_WRITE_MEMORY 0x3045 +#define PTT_MSG_LOG_DUMP_DBG 0x32A1 +#define PTT_MSG_FTM_CMDS_TYPE 0x4040 +#define ANI_DRIVER_MSG_START 0x0001 +#define ANI_MSG_APP_REG_REQ (ANI_DRIVER_MSG_START + 0) +#define ANI_MSG_APP_REG_RSP (ANI_DRIVER_MSG_START + 1) +#define ANI_MSG_OEM_DATA_REQ (ANI_DRIVER_MSG_START + 2) +#define ANI_MSG_OEM_DATA_RSP (ANI_DRIVER_MSG_START + 3) +#define ANI_MSG_CHANNEL_INFO_REQ (ANI_DRIVER_MSG_START + 4) +#define ANI_MSG_CHANNEL_INFO_RSP (ANI_DRIVER_MSG_START + 5) +#define ANI_MSG_OEM_ERROR (ANI_DRIVER_MSG_START + 6) +#define ANI_MSG_PEER_STATUS_IND (ANI_DRIVER_MSG_START + 7) +#define ANI_MSG_SET_OEM_CAP_REQ (ANI_DRIVER_MSG_START + 8) +#define ANI_MSG_SET_OEM_CAP_RSP (ANI_DRIVER_MSG_START + 9) +#define ANI_MSG_GET_OEM_CAP_REQ (ANI_DRIVER_MSG_START + 10) +#define ANI_MSG_GET_OEM_CAP_RSP (ANI_DRIVER_MSG_START + 11) + +#define ANI_MAX_RADIOS 3 +#define ANI_NL_MSG_OK 0 +#define ANI_NL_MSG_ERROR -1 +#define ANI_NL_MSG_OVERHEAD (NLMSG_SPACE(tAniHdr + 4)) +/* + * Packet Format for READ_REGISTER & WRITE_REGISTER: + * TotalMsgLen : 4 bytes [value=20 bytes] + * Radio : 4 bytes + * MsgId : 2 bytes + * MsgLen : 2 bytes + * Status : 4 bytes + * Address : 4 bytes + * Payload : 4 bytes + */ +/* + * Packet Format for READ_MEMORY & WRITE_MEMORY : + * TotalMsgLen : 4 bytes [value= 20+LEN_PAYLOAD bytes] + * Radio : 4 bytes + * MsgId : 2 bytes + * MsgLen : 2 bytes + * Status : 4 bytes + * Address : 4 bytes + * Length : 4 bytes [LEN_PAYLOAD] + * Payload : LEN_PAYLOAD bytes + */ +#ifdef PTT_SOCK_SVC_ENABLE +/** + * ptt_sock_activate_svc() - API to register PTT/PUMAC command handlers + * + * API to register the handler for PTT/PUMAC NL messages. + * + * Return: None + */ +void ptt_sock_activate_svc(void); + +/** + * ptt_sock_deactivate_svc() - API to deregister PTT/PUMAC command handlers + * + * API to deregister the handler for PTT/PUMAC NL messages. + * + * Return: None + */ +void ptt_sock_deactivate_svc(void); +int ptt_sock_send_msg_to_app(tAniHdr *wmsg, int radio, int src_mod, int pid); +#else +static inline void ptt_sock_activate_svc(void) +{ +} + +static inline void ptt_sock_deactivate_svc(void) +{ +} + +static inline int ptt_sock_send_msg_to_app(tAniHdr *wmsg, int radio, + int src_mod, int pid) +{ + return 0; +} +#endif + +/* + * Format of message exchanged between the PTT Socket App in userspace and the + * WLAN Driver, in either direction. Each msg will begin with this header and + * will followed by the Quarky message + */ +struct sAniAppRegReq { + tAniNlModTypes type; /* module id */ + int pid; /* process id */ +}; + +/** + * struct sptt_app_reg_req - PTT register request structure + * @radio: Radio ID + * @wmsg: ANI header + * + * payload structure received as nl data from PTT app/user space + */ +struct sptt_app_reg_req { + int radio; + tAniHdr wmsg; +}; + +struct sAniNlAppRegRsp { + tAniHdr wniHdr; /* Generic WNI msg header */ + struct sAniAppRegReq regReq; /* The original request msg */ + int ret; /* Return code */ +}; +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/utils/ptt/src/wlan_ptt_sock_svc.c b/drivers/staging/qca-wifi-host-cmn/utils/ptt/src/wlan_ptt_sock_svc.c new file mode 100644 index 0000000000000000000000000000000000000000..089f29ef7b871d08cdaa723086ef6104f9507724 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/utils/ptt/src/wlan_ptt_sock_svc.c @@ -0,0 +1,346 @@ +/* + * Copyright (c) 2012-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/****************************************************************************** +* wlan_ptt_sock_svc.c +* +******************************************************************************/ +#ifdef PTT_SOCK_SVC_ENABLE +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef CNSS_GENL +#include +#include +#else + +/** ptt Process ID */ +static int32_t ptt_pid = INVALID_PID; +#endif + +#define PTT_SOCK_DEBUG +#ifdef PTT_SOCK_DEBUG +#define PTT_TRACE(level, args ...) QDF_TRACE(QDF_MODULE_ID_QDF, level, ## args) +#else +#define PTT_TRACE(level, args ...) +#endif + +#ifdef PTT_SOCK_DEBUG_VERBOSE +/* Utility function to perform a hex dump */ +static void ptt_sock_dump_buf(const unsigned char *pbuf, int cnt) +{ + int i; + + for (i = 0; i < cnt; i++) { + if ((i % 16) == 0) + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO, + "\n%pK:", pbuf); + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO, " %02X", + *pbuf); + pbuf++; + } + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO, "\n"); +} +#endif + +/** + * nl_srv_ucast_ptt() - Wrapper function to send ucast msgs to PTT + * @skb: sk buffer pointer + * @dst_pid: Destination PID + * @flag: flags + * + * Sends the ucast message to PTT with generic nl socket if CNSS_GENL + * is enabled. Else, use the legacy netlink socket to send. + * + * Return: zero on success, error code otherwise + */ +static int nl_srv_ucast_ptt(struct sk_buff *skb, int dst_pid, int flag) +{ +#ifdef CNSS_GENL + return nl_srv_ucast(skb, dst_pid, flag, ANI_NL_MSG_PUMAC, + CLD80211_MCGRP_DIAG_EVENTS); +#else + return nl_srv_ucast(skb, dst_pid, flag); +#endif +} + +/** + * nl_srv_bcast_ptt() - Wrapper function to send bcast msgs to DIAG mcast group + * @skb: sk buffer pointer + * + * Sends the bcast message to DIAG multicast group with generic nl socket + * if CNSS_GENL is enabled. Else, use the legacy netlink socket to send. + * + * Return: zero on success, error code otherwise + */ +static int nl_srv_bcast_ptt(struct sk_buff *skb) +{ +#ifdef CNSS_GENL + return nl_srv_bcast(skb, CLD80211_MCGRP_DIAG_EVENTS, ANI_NL_MSG_PUMAC); +#else + return nl_srv_bcast(skb); +#endif +} + +/** + * ptt_sock_send_msg_to_app() - Send nl message to user space + * wmsg: Message header + * radio: Unit number of the radio + * src_mod: Message type + * pid: Process ID to which message will be unicast. Message + * will be broadcast when PID is INVALID_PID + * + * Utility function to send a netlink message to an application in user space + * + * Return: 0 on success and negative value on failure + */ +int ptt_sock_send_msg_to_app(tAniHdr *wmsg, int radio, int src_mod, int pid) +{ + int err = -1; + int payload_len; + int tot_msg_len; + tAniNlHdr *wnl; + struct sk_buff *skb; + struct nlmsghdr *nlh; + int wmsg_length = be16_to_cpu(wmsg->length); + static int nlmsg_seq; + + if (radio < 0 || radio > ANI_MAX_RADIOS) { + PTT_TRACE(QDF_TRACE_LEVEL_ERROR, "%s: invalid radio id [%d]\n", + __func__, radio); + return -EINVAL; + } + payload_len = wmsg_length + sizeof(wnl->radio) + sizeof(*wmsg); + tot_msg_len = NLMSG_SPACE(payload_len); + skb = dev_alloc_skb(tot_msg_len); + if (skb == NULL) { + PTT_TRACE(QDF_TRACE_LEVEL_ERROR, + "%s: dev_alloc_skb() failed for msg size[%d]\n", + __func__, tot_msg_len); + return -ENOMEM; + } + nlh = + nlmsg_put(skb, pid, nlmsg_seq++, src_mod, payload_len, + NLM_F_REQUEST); + if (NULL == nlh) { + PTT_TRACE(QDF_TRACE_LEVEL_ERROR, + "%s: nlmsg_put() failed for msg size[%d]\n", __func__, + tot_msg_len); + kfree_skb(skb); + return -ENOMEM; + } + wnl = (tAniNlHdr *) nlh; + wnl->radio = radio; + memcpy(&wnl->wmsg, wmsg, wmsg_length); +#ifdef PTT_SOCK_DEBUG_VERBOSE + ptt_sock_dump_buf((const unsigned char *)skb->data, skb->len); +#endif + + if (pid != INVALID_PID) + err = nl_srv_ucast_ptt(skb, pid, MSG_DONTWAIT); + else + err = nl_srv_bcast_ptt(skb); + + if (err) + PTT_TRACE(QDF_TRACE_LEVEL_INFO, + "%s:Failed sending Msg Type [0x%X] to pid[%d]\n", + __func__, be16_to_cpu(wmsg->type), pid); + return err; +} + +#ifndef CNSS_GENL +/* + * Process tregisteration request and send registration response messages + * to the PTT Socket App in user space + */ +static void ptt_sock_proc_reg_req(tAniHdr *wmsg, int radio) +{ + struct sAniAppRegReq *reg_req; + struct sAniNlAppRegRsp rspmsg; + + reg_req = (struct sAniAppRegReq *) (wmsg + 1); + memset((char *)&rspmsg, 0, sizeof(rspmsg)); + /* send reg response message to the application */ + rspmsg.ret = ANI_NL_MSG_OK; + rspmsg.regReq.type = reg_req->type; + /*Save the pid */ + ptt_pid = reg_req->pid; + rspmsg.regReq.pid = reg_req->pid; + rspmsg.wniHdr.type = cpu_to_be16(ANI_MSG_APP_REG_RSP); + rspmsg.wniHdr.length = cpu_to_be16(sizeof(rspmsg)); + if (ptt_sock_send_msg_to_app((tAniHdr *) &rspmsg.wniHdr, radio, + ANI_NL_MSG_PUMAC, ptt_pid) < 0) { + PTT_TRACE(QDF_TRACE_LEVEL_INFO, + "%s: Error sending ANI_MSG_APP_REG_RSP to pid[%d]\n", + __func__, ptt_pid); + } +} + +/* + * Process all the messages from the PTT Socket App in user space + */ +static void ptt_proc_pumac_msg(struct sk_buff *skb, tAniHdr *wmsg, int radio) +{ + u16 ani_msg_type = be16_to_cpu(wmsg->type); + + switch (ani_msg_type) { + case ANI_MSG_APP_REG_REQ: + PTT_TRACE(QDF_TRACE_LEVEL_INFO, + "%s: Received ANI_MSG_APP_REG_REQ [0x%X]\n", __func__, + ani_msg_type); + ptt_sock_proc_reg_req(wmsg, radio); + break; + default: + PTT_TRACE(QDF_TRACE_LEVEL_ERROR, + "%s: Received Unknown Msg Type[0x%X]\n", __func__, + ani_msg_type); + break; + } +} + +/* + * Process all the Netlink messages from PTT Socket app in user space + */ +static int ptt_sock_rx_nlink_msg(struct sk_buff *skb) +{ + tAniNlHdr *wnl; + int radio; + int type; + + wnl = (tAniNlHdr *) skb->data; + radio = wnl->radio; + type = wnl->nlh.nlmsg_type; + switch (type) { + case ANI_NL_MSG_PUMAC: /* Message from the PTT socket APP */ + PTT_TRACE(QDF_TRACE_LEVEL_INFO, + "%s: Received ANI_NL_MSG_PUMAC Msg [0x%X]\n", + __func__, type); + ptt_proc_pumac_msg(skb, &wnl->wmsg, radio); + break; + default: + PTT_TRACE(QDF_TRACE_LEVEL_ERROR, "%s: Unknown NL Msg [0x%X]\n", + __func__, type); + break; + } + return 0; +} +#endif + +#ifdef CNSS_GENL +/** + * ptt_cmd_handler() - Handler function for PTT commands + * @data: Data to be parsed + * @data_len: Length of the data received + * @ctx: Registered context reference + * @pid: Process id of the user space application + * + * This function handles the command from PTT user space application + * + * Return: None + */ +static void ptt_cmd_handler(const void *data, int data_len, void *ctx, int pid) +{ + uint16_t length; + struct sptt_app_reg_req *payload; + struct nlattr *tb[CLD80211_ATTR_MAX + 1]; + + /* + * audit note: it is ok to pass a NULL policy here since a + * length check on the data is added later already + */ + if (wlan_cfg80211_nla_parse(tb, CLD80211_ATTR_MAX, + data, data_len, NULL)) { + PTT_TRACE(QDF_TRACE_LEVEL_ERROR, "Invalid ATTR"); + return; + } + + if (!tb[CLD80211_ATTR_DATA]) { + PTT_TRACE(QDF_TRACE_LEVEL_ERROR, "attr ATTR_DATA failed"); + return; + } + + if (nla_len(tb[CLD80211_ATTR_DATA]) < sizeof(struct sptt_app_reg_req)) { + PTT_TRACE(QDF_TRACE_LEVEL_ERROR, "%s:attr length check fails\n", + __func__); + return; + } + + payload = (struct sptt_app_reg_req *)(nla_data(tb[CLD80211_ATTR_DATA])); + length = be16_to_cpu(payload->wmsg.length); + if ((USHRT_MAX - length) < (sizeof(payload->radio) + sizeof(tAniHdr))) { + PTT_TRACE(QDF_TRACE_LEVEL_ERROR, + "u16 overflow length %d %zu %zu", + length, + sizeof(payload->radio), + sizeof(tAniHdr)); + return; + } + + if (nla_len(tb[CLD80211_ATTR_DATA]) < (length + + sizeof(payload->radio) + + sizeof(tAniHdr))) { + PTT_TRACE(QDF_TRACE_LEVEL_ERROR, "ATTR_DATA len check failed"); + return; + } + + switch (payload->wmsg.type) { + case ANI_MSG_APP_REG_REQ: + ptt_sock_send_msg_to_app(&payload->wmsg, payload->radio, + ANI_NL_MSG_PUMAC, pid); + break; + default: + PTT_TRACE(QDF_TRACE_LEVEL_ERROR, "Unknown msg type %d", + payload->wmsg.type); + break; + } +} + +void ptt_sock_activate_svc(void) +{ + register_cld_cmd_cb(ANI_NL_MSG_PUMAC, ptt_cmd_handler, NULL); + register_cld_cmd_cb(ANI_NL_MSG_PTT, ptt_cmd_handler, NULL); +} + +void ptt_sock_deactivate_svc(void) +{ + deregister_cld_cmd_cb(ANI_NL_MSG_PTT); + deregister_cld_cmd_cb(ANI_NL_MSG_PUMAC); +} +#else + +void ptt_sock_activate_svc(void) +{ + ptt_pid = INVALID_PID; + nl_srv_register(ANI_NL_MSG_PUMAC, ptt_sock_rx_nlink_msg); + nl_srv_register(ANI_NL_MSG_PTT, ptt_sock_rx_nlink_msg); +} + +void ptt_sock_deactivate_svc(void) +{ + nl_srv_unregister(ANI_NL_MSG_PTT, ptt_sock_rx_nlink_msg); + nl_srv_unregister(ANI_NL_MSG_PUMAC, ptt_sock_rx_nlink_msg); + ptt_pid = INVALID_PID; +} +#endif +#endif /* PTT_SOCK_SVC_ENABLE */ diff --git a/drivers/staging/qca-wifi-host-cmn/wbuff/inc/wbuff.h b/drivers/staging/qca-wifi-host-cmn/wbuff/inc/wbuff.h new file mode 100644 index 0000000000000000000000000000000000000000..4e7c93fc44eecb894a0824c48b043dbcb2a1e096 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/wbuff/inc/wbuff.h @@ -0,0 +1,155 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wbuff.h + * wbuff buffer management APIs + */ + +#ifndef _WBUFF_H +#define _WBUFF_H + +#include +#include + +/* wbuff available pools */ +/* Pool of nbuf size 256 bytes */ +#define WBUFF_POOL_0 0 +/* Pool of nbuf size 512 bytes */ +#define WBUFF_POOL_1 1 +/* Pool of nbuf size 1024 bytes */ +#define WBUFF_POOL_2 2 +/* Pool of nbuf 2048 bytes */ +#define WBUFF_POOL_3 3 + +/** + * struct wbuff_alloc_request - allocation structure for registering each + * pool for wbuff module. + * @slot: pool_slot identifier + * @size: number of buffers for @pool_slot + */ +struct wbuff_alloc_request { + uint8_t slot; + uint16_t size; +}; + +/* Opaque handle for wbuff */ +struct wbuff_mod_handle; + +#ifdef WLAN_FEATURE_WBUFF +/** + * wbuff_module_init() - Initializes the wbuff module + * + * Return: QDF_STATUS_SUCCESS - init success + * QDF_STATUS_E_NOSUPPORT - init failure + */ +QDF_STATUS wbuff_module_init(void); + +/** + * wbuff_module_deinit() - De-initializes the wbuff module + * + * Return: QDF_STATUS_SUCCESS - de-init success + * QDF_STATUS_E_INVAL - de-init failure (wbuff not initialized) + */ +QDF_STATUS wbuff_module_deinit(void); + +/** + * wbuff_module_register() - Registers a module with wbuff + * @req: allocation request from registered module + * @num: number of pools required + * @reserve: nbuf headroom to start with + * @align: alignment for the nbuf + * + * Return: Handle if registration success + * NULL if registration failure + */ +struct wbuff_mod_handle * +wbuff_module_register(struct wbuff_alloc_request *req, uint8_t num, + int reserve, int align); + +/** + * wbuff_module_deregister() - De-registers a module with wbuff + * @hdl: wbuff_handle corresponding to the module + * + * Return: QDF_STATUS_SUCCESS - deregistration success + * QDF_STATUS_E_INVAL - deregistration failure + */ +QDF_STATUS wbuff_module_deregister(struct wbuff_mod_handle *hdl); + +/** + * wbuff_buff_get() - return buffer to the requester + * @handle: wbuff_handle corresponding to the module + * @len: length of buffer requested + * file_name: file from which buffer is requested + * line_num: line number in the file + * + * Return: Network buffer if success + * NULL if failure + */ +qdf_nbuf_t wbuff_buff_get(struct wbuff_mod_handle *hdl, uint32_t len, + uint8_t *file_name, uint32_t line_num); + +/** + * wbuff_buff_put() - put the buffer back to wbuff pool + * @hdl: wbuff_handle corresponding to the module + * @buf: pointer to network buffer + * + * Return: NULL if success (buffer consumed) + * @buf if failure (buffer not consumed) + */ +qdf_nbuf_t wbuff_buff_put(qdf_nbuf_t buf); + +#else + +static inline QDF_STATUS wbuff_module_init(void) +{ + return QDF_STATUS_E_NOSUPPORT; +} + +static inline QDF_STATUS wbuff_module_deinit(void) +{ + return QDF_STATUS_E_NOSUPPORT; +} + +static inline struct wbuff_mod_handle * +wbuff_module_register(struct wbuff_alloc_request *req, uint8_t num, + int reserve, int align) +{ + return NULL; +} + +static inline QDF_STATUS wbuff_module_deregister(struct wbuff_mod_handle *hdl) +{ + return QDF_STATUS_E_NOSUPPORT; +} + +static inline qdf_nbuf_t +wbuff_buff_get(struct wbuff_mod_handle *hdl, uint32_t len, int8_t *file_name, + uint32_t line_num) +{ + return NULL; +} + +static inline qdf_nbuf_t +wbuff_buff_put(qdf_nbuf_t buf) +{ + return buf; +} + +#endif +#endif /* _WBUFF_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/wbuff/src/i_wbuff.h b/drivers/staging/qca-wifi-host-cmn/wbuff/src/i_wbuff.h new file mode 100644 index 0000000000000000000000000000000000000000..ce7b69ec718416ac4bd0c97359affc541668e5be --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/wbuff/src/i_wbuff.h @@ -0,0 +1,103 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: i_wbuff.h + * wbuff private + */ + +#ifndef _I_WBUFF_H +#define _I_WBUFF_H + +#include + +/* Number of modules supported by wbuff */ +#define WBUFF_MAX_MODULES 4 + +/* Number of pools supported per module */ +#define WBUFF_MAX_POOLS 4 + +/* Max buffer size supported by wbuff in bytes */ +#define WBUFF_MAX_BUFFER_SIZE 2048 + +/* wbuff pool buffer lengths in bytes*/ +#define WBUFF_LEN_POOL0 256 +#define WBUFF_LEN_POOL1 512 +#define WBUFF_LEN_POOL2 1024 +#define WBUFF_LEN_POOL3 2048 + +/* wbuff max pool sizes */ +/* Allocation of size 256 bytes */ +#define WBUFF_POOL_0_MAX 256 +/* Allocation of size 512 bytes */ +#define WBUFF_POOL_1_MAX 128 +/* Allocation of size 1024 bytes */ +#define WBUFF_POOL_2_MAX 64 +/* Allocation of size 2048 bytes */ +#define WBUFF_POOL_3_MAX 32 + +#define WBUFF_MSLOT_SHIFT 4 +#define WBUFF_MSLOT_BITMASK 0xF0 + +#define WBUFF_PSLOT_SHIFT 1 +#define WBUFF_PSLOT_BITMASK 0xE + +/* Comparison array for maximum allocation per pool*/ +uint16_t wbuff_alloc_max[WBUFF_MAX_POOLS] = {WBUFF_POOL_0_MAX, + WBUFF_POOL_1_MAX, + WBUFF_POOL_2_MAX, + WBUFF_POOL_3_MAX}; + +/** + * struct wbuff_handle - wbuff handle to the registered module + * @id: the identifier for the registered module. + */ +struct wbuff_handle { + uint8_t id; +}; + +/** + * struct wbuff_module - allocation holder for wbuff registered module + * @registered: To identify whether module is registered + * @pending_returns: Number of buffers pending to be returned to + * wbuff by the module + * @lock: Lock for accessing per module buffer slots + * @handle: wbuff handle for the registered module + * @reserve: nbuf headroom to start with + * @align: alignment for the nbuf + * @pool[]: pools for all available buffers for the module + */ +struct wbuff_module { + bool registered; + uint16_t pending_returns; + qdf_spinlock_t lock; + struct wbuff_handle handle; + int reserve; + int align; + qdf_nbuf_t pool[WBUFF_MAX_POOLS]; +}; + +/** + * struct wbuff_holder - allocation holder for wbuff + * @initialized: to identified whether module is initialized + */ +struct wbuff_holder { + bool initialized; + struct wbuff_module mod[WBUFF_MAX_MODULES]; +}; +#endif /* _WBUFF_H */ diff --git a/drivers/staging/qca-wifi-host-cmn/wbuff/src/wbuff.c b/drivers/staging/qca-wifi-host-cmn/wbuff/src/wbuff.c new file mode 100644 index 0000000000000000000000000000000000000000..b77c2213bea386d1a6d1d7ebab860f93d17c8770 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/wbuff/src/wbuff.c @@ -0,0 +1,360 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: wbuff.c + * wbuff buffer management APIs + */ + +#include +#include "i_wbuff.h" + +/** + * Allocation holder array for all wbuff registered modules + */ +struct wbuff_holder wbuff; + +/** + * wbuff_get_pool_slot_from_len() - get pool_slot from length + * @len: length of the buffer + * + * Return: pool slot + */ +static uint8_t wbuff_get_pool_slot_from_len(uint16_t len) +{ + if ((len > 0) && (len <= WBUFF_LEN_POOL0)) + return WBUFF_POOL_0; + else if ((len > WBUFF_LEN_POOL0) && (len <= WBUFF_LEN_POOL1)) + return WBUFF_POOL_1; + else if ((len > WBUFF_LEN_POOL1) && (len <= WBUFF_LEN_POOL2)) + return WBUFF_POOL_2; + else + return WBUFF_POOL_3; +} + +/** + * wbuff_get_len_from_pool_slot() - get len from pool slot + * @pool_slot: wbuff pool_slot + * + * Return: nbuf length from pool slot + */ +static uint32_t wbuff_get_len_from_pool_slot(uint16_t pool_slot) +{ + uint32_t len = 0; + + switch (pool_slot) { + case 0: + len = WBUFF_LEN_POOL0; + break; + case 1: + len = WBUFF_LEN_POOL1; + break; + case 2: + len = WBUFF_LEN_POOL2; + break; + case 3: + len = WBUFF_LEN_POOL3; + break; + default: + len = 0; + } + + return len; +} + +/** + * wbuff_get_free_mod_slot() - get free module slot + * + * Return: module slot + */ +static uint8_t wbuff_get_free_mod_slot(void) +{ + uint8_t mslot = 0; + + for (mslot = 0; mslot < WBUFF_MAX_MODULES; mslot++) { + qdf_spin_lock_bh(&wbuff.mod[mslot].lock); + if (!wbuff.mod[mslot].registered) { + wbuff.mod[mslot].registered = true; + qdf_spin_unlock_bh(&wbuff.mod[mslot].lock); + break; + } + qdf_spin_unlock_bh(&wbuff.mod[mslot].lock); + } + + return mslot; +} + +/** + * wbuff_is_valid_alloc_req() - validate alloc request + * @req: allocation request from registered module + * @num: number of pools required + * + * Return: true if valid wbuff_alloc_request + * false if invalid wbuff_alloc_request + */ +static bool wbuff_is_valid_alloc_req(struct wbuff_alloc_request *req, + uint8_t num) +{ + uint16_t psize = 0; + uint8_t alloc = 0, pslot = 0; + + for (alloc = 0; alloc < num; alloc++) { + pslot = req[alloc].slot; + psize = req[alloc].size; + if ((pslot > WBUFF_MAX_POOLS - 1) || + (psize > wbuff_alloc_max[pslot])) + return false; + } + + return true; +} + +/** + * wbuff_prepare_nbuf() - allocate nbuf + * @mslot: module slot + * @pslot: pool slot + * @len: length of the buffer + * @reserve: nbuf headroom to start with + * @align: alignment for the nbuf + * + * Return: nbuf if success + * NULL if failure + */ +static qdf_nbuf_t wbuff_prepare_nbuf(uint8_t mslot, uint8_t pslot, + uint32_t len, int reserve, int align) +{ + qdf_nbuf_t buf; + unsigned long dev_scratch = 0; + + buf = qdf_nbuf_alloc(NULL, roundup(len + reserve, align), reserve, + align, false); + if (!buf) + return NULL; + dev_scratch = mslot; + dev_scratch <<= WBUFF_MSLOT_SHIFT; + dev_scratch |= ((pslot << WBUFF_PSLOT_SHIFT) | 1); + qdf_nbuf_set_dev_scratch(buf, dev_scratch); + + return buf; +} + +/** + * wbuff_is_valid_handle() - validate wbuff handle + * @handle: wbuff handle passed by module + * + * Return: true - valid wbuff_handle + * false - invalid wbuff_handle + */ +static bool wbuff_is_valid_handle(struct wbuff_handle *handle) +{ + if ((handle) && (handle->id < WBUFF_MAX_MODULES) && + (wbuff.mod[handle->id].registered)) + return true; + + return false; +} + +QDF_STATUS wbuff_module_init(void) +{ + struct wbuff_module *mod = NULL; + uint8_t mslot = 0, pslot = 0; + + if (!qdf_nbuf_is_dev_scratch_supported()) { + wbuff.initialized = false; + return QDF_STATUS_E_NOSUPPORT; + } + + for (mslot = 0; mslot < WBUFF_MAX_MODULES; mslot++) { + mod = &wbuff.mod[mslot]; + qdf_spinlock_create(&mod->lock); + for (pslot = 0; pslot < WBUFF_MAX_POOLS; pslot++) + mod->pool[pslot] = NULL; + mod->registered = false; + } + wbuff.initialized = true; + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wbuff_module_deinit(void) +{ + struct wbuff_module *mod = NULL; + uint8_t mslot = 0; + + if (!wbuff.initialized) + return QDF_STATUS_E_INVAL; + + wbuff.initialized = false; + for (mslot = 0; mslot < WBUFF_MAX_MODULES; mslot++) { + mod = &wbuff.mod[mslot]; + if (mod->registered) + wbuff_module_deregister((struct wbuff_mod_handle *) + &mod->handle); + qdf_spinlock_destroy(&mod->lock); + } + + return QDF_STATUS_SUCCESS; +} + +struct wbuff_mod_handle * +wbuff_module_register(struct wbuff_alloc_request *req, uint8_t num, + int reserve, int align) +{ + struct wbuff_module *mod = NULL; + qdf_nbuf_t buf = NULL; + uint32_t len = 0; + uint16_t idx = 0, psize = 0; + uint8_t alloc = 0, mslot = 0, pslot = 0; + + if (!wbuff.initialized) + return NULL; + + if ((num == 0) || (num > WBUFF_MAX_POOLS)) + return NULL; + + if (!wbuff_is_valid_alloc_req(req, num)) + return NULL; + + mslot = wbuff_get_free_mod_slot(); + if (mslot == WBUFF_MAX_MODULES) + return NULL; + + mod = &wbuff.mod[mslot]; + + mod->handle.id = mslot; + + for (alloc = 0; alloc < num; alloc++) { + pslot = req[alloc].slot; + psize = req[alloc].size; + len = wbuff_get_len_from_pool_slot(pslot); + /** + * Allocate pool_cnt number of buffers for + * the pool given by pslot + */ + for (idx = 0; idx < psize; idx++) { + buf = wbuff_prepare_nbuf(mslot, pslot, len, reserve, + align); + if (!buf) + continue; + if (!mod->pool[pslot]) { + qdf_nbuf_set_next(buf, NULL); + mod->pool[pslot] = buf; + } else { + qdf_nbuf_set_next(buf, mod->pool[pslot]); + mod->pool[pslot] = buf; + } + } + } + mod->reserve = reserve; + mod->align = align; + + return (struct wbuff_mod_handle *)&mod->handle; +} + +QDF_STATUS wbuff_module_deregister(struct wbuff_mod_handle *hdl) +{ + struct wbuff_handle *handle; + struct wbuff_module *mod = NULL; + uint8_t mslot = 0, pslot = 0; + qdf_nbuf_t first = NULL, buf = NULL; + + handle = (struct wbuff_handle *)hdl; + + if ((!wbuff.initialized) || (!wbuff_is_valid_handle(handle))) + return QDF_STATUS_E_INVAL; + + mslot = handle->id; + mod = &wbuff.mod[mslot]; + + qdf_spin_lock_bh(&mod->lock); + for (pslot = 0; pslot < WBUFF_MAX_POOLS; pslot++) { + first = mod->pool[pslot]; + while (first) { + buf = first; + first = qdf_nbuf_next(buf); + qdf_nbuf_free(buf); + } + } + mod->registered = false; + qdf_spin_unlock_bh(&mod->lock); + + return QDF_STATUS_SUCCESS; +} + +qdf_nbuf_t wbuff_buff_get(struct wbuff_mod_handle *hdl, uint32_t len, + uint8_t *file_name, uint32_t line_num) +{ + struct wbuff_handle *handle; + struct wbuff_module *mod = NULL; + uint8_t mslot = 0; + uint8_t pslot = 0; + qdf_nbuf_t buf = NULL; + + handle = (struct wbuff_handle *)hdl; + + if ((!wbuff.initialized) || (!wbuff_is_valid_handle(handle)) || !len || + (len > WBUFF_MAX_BUFFER_SIZE)) + return NULL; + + mslot = handle->id; + pslot = wbuff_get_pool_slot_from_len(len); + mod = &wbuff.mod[mslot]; + + qdf_spin_lock_bh(&mod->lock); + if (mod->pool[pslot]) { + buf = mod->pool[pslot]; + mod->pool[pslot] = qdf_nbuf_next(buf); + mod->pending_returns++; + } + qdf_spin_unlock_bh(&mod->lock); + if (buf) { + qdf_nbuf_set_next(buf, NULL); + qdf_net_buf_debug_update_node(buf, file_name, line_num); + } + + return buf; +} + +qdf_nbuf_t wbuff_buff_put(qdf_nbuf_t buf) +{ + qdf_nbuf_t buffer = buf; + unsigned long slot_info = 0; + uint8_t mslot = 0, pslot = 0; + + if (!wbuff.initialized) + return buffer; + + slot_info = qdf_nbuf_get_dev_scratch(buf); + if (!slot_info) + return buffer; + + mslot = (slot_info & WBUFF_MSLOT_BITMASK) >> WBUFF_MSLOT_SHIFT; + pslot = (slot_info & WBUFF_PSLOT_BITMASK) >> WBUFF_PSLOT_SHIFT; + qdf_nbuf_reset(buffer, wbuff.mod[mslot].reserve, wbuff.mod[mslot]. + align); + qdf_spin_lock_bh(&wbuff.mod[mslot].lock); + if (wbuff.mod[mslot].registered) { + qdf_nbuf_set_next(buffer, wbuff.mod[mslot].pool[pslot]); + wbuff.mod[mslot].pool[pslot] = buffer; + wbuff.mod[mslot].pending_returns--; + buffer = NULL; + } + qdf_spin_unlock_bh(&wbuff.mod[mslot].lock); + + return buffer; +} diff --git a/drivers/staging/qca-wifi-host-cmn/wlan_cfg/wlan_cfg.c b/drivers/staging/qca-wifi-host-cmn/wlan_cfg/wlan_cfg.c new file mode 100644 index 0000000000000000000000000000000000000000..6aa77c3b47fa7dcdc8dd5902790bef28ad0bea88 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/wlan_cfg/wlan_cfg.c @@ -0,0 +1,759 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#if defined(CONFIG_HL_SUPPORT) +#include "wlan_tgt_def_config_hl.h" +#else +#include "wlan_tgt_def_config.h" +#endif + +#include "qdf_trace.h" +#include "qdf_mem.h" +#include "wlan_cfg.h" + +/* + * FIX THIS - + * For now, all these configuration parameters are hardcoded. + * Many of these should actually be coming from dts file/ini file + */ + +#ifdef CONFIG_MCL +#define WLAN_CFG_PER_PDEV_RX_RING 0 +#define WLAN_CFG_PER_PDEV_LMAC_RING 0 +#define WLAN_LRO_ENABLE 1 +#ifdef IPA_OFFLOAD +#define WLAN_CFG_TX_RING_SIZE 2048 +#else +#define WLAN_CFG_TX_RING_SIZE 512 +#endif +#define WLAN_CFG_TX_COMP_RING_SIZE 1024 + +/* Tx Descriptor and Tx Extension Descriptor pool sizes */ +#define WLAN_CFG_NUM_TX_DESC 1024 +#define WLAN_CFG_NUM_TX_EXT_DESC 1024 + +/* Interrupt Mitigation - Batch threshold in terms of number of frames */ +#define WLAN_CFG_INT_BATCH_THRESHOLD_TX 1 +#define WLAN_CFG_INT_BATCH_THRESHOLD_RX 1 +#define WLAN_CFG_INT_BATCH_THRESHOLD_OTHER 1 + +/* Interrupt Mitigation - Timer threshold in us */ +#define WLAN_CFG_INT_TIMER_THRESHOLD_TX 8 +#define WLAN_CFG_INT_TIMER_THRESHOLD_RX 8 +#define WLAN_CFG_INT_TIMER_THRESHOLD_OTHER 8 +#endif + +#ifdef CONFIG_WIN +#define WLAN_CFG_PER_PDEV_RX_RING 0 +#define WLAN_CFG_PER_PDEV_LMAC_RING 1 +#define WLAN_LRO_ENABLE 0 + +/* Tx Descriptor and Tx Extension Descriptor pool sizes */ +#define WLAN_CFG_NUM_TX_DESC (32 << 10) +#define WLAN_CFG_NUM_TX_EXT_DESC (8 << 10) + + +/* Interrupt Mitigation - Batch threshold in terms of number of frames */ +#define WLAN_CFG_INT_BATCH_THRESHOLD_TX 256 +#define WLAN_CFG_INT_BATCH_THRESHOLD_RX 128 +#define WLAN_CFG_INT_BATCH_THRESHOLD_OTHER 1 + +/* Interrupt Mitigation - Timer threshold in us */ +#define WLAN_CFG_INT_TIMER_THRESHOLD_TX 1000 +#define WLAN_CFG_INT_TIMER_THRESHOLD_RX 500 +#define WLAN_CFG_INT_TIMER_THRESHOLD_OTHER 1000 + +#define WLAN_CFG_TX_RING_SIZE 512 + +/* Size the completion ring using following 2 parameters + * - NAPI schedule latency (assuming 1 netdev competing for CPU) = 20 ms (2 jiffies) + * - Worst case PPS requirement = 400K PPS + * + * Ring size = 20 * 400 = 8000 + * 8192 is nearest power of 2 + */ +#define WLAN_CFG_TX_COMP_RING_SIZE (8 << 10) +#endif + +/* + * The max allowed size for tx comp ring is 8191. + * This is limitted by h/w ring max size. + * As this is not a power of 2 it does not work with nss offload so the + * nearest available size which is power of 2 is 4096 chosen for nss + */ +#define NSS_TX_COMP_RING_SIZE (4 << 10) + +#define RXDMA_BUF_RING_SIZE 2048 +#define RXDMA_MONITOR_BUF_RING_SIZE 2048 +#define RXDMA_MONITOR_DEST_RING_SIZE 2048 +#define RXDMA_MONITOR_STATUS_RING_SIZE 2048 + +#ifdef QCA_LL_TX_FLOW_CONTROL_V2 + +/* Per vdev pools */ +#define WLAN_CFG_NUM_TX_DESC_POOL 3 +#define WLAN_CFG_NUM_TXEXT_DESC_POOL 3 + +#else /* QCA_LL_TX_FLOW_CONTROL_V2 */ + +#ifdef TX_PER_PDEV_DESC_POOL +#define WLAN_CFG_NUM_TX_DESC_POOL MAX_PDEV_CNT +#define WLAN_CFG_NUM_TXEXT_DESC_POOL MAX_PDEV_CNT + +#else /* TX_PER_PDEV_DESC_POOL */ + +#define WLAN_CFG_NUM_TX_DESC_POOL 3 +#define WLAN_CFG_NUM_TXEXT_DESC_POOL 3 + +#endif /* TX_PER_PDEV_DESC_POOL */ +#endif /* QCA_LL_TX_FLOW_CONTROL_V2 */ + +#define WLAN_CFG_TX_RING_MASK_0 0x1 +#define WLAN_CFG_TX_RING_MASK_1 0x2 +#define WLAN_CFG_TX_RING_MASK_2 0x4 +#define WLAN_CFG_TX_RING_MASK_3 0x0 + +#define WLAN_CFG_RX_RING_MASK_0 0x1 +#define WLAN_CFG_RX_RING_MASK_1 0x2 +#define WLAN_CFG_RX_RING_MASK_2 0x4 +#define WLAN_CFG_RX_RING_MASK_3 0x8 + +#define WLAN_CFG_RX_MON_RING_MASK_0 0x1 +#define WLAN_CFG_RX_MON_RING_MASK_1 0x2 +#define WLAN_CFG_RX_MON_RING_MASK_2 0x4 +#define WLAN_CFG_RX_MON_RING_MASK_3 0x0 + +#define WLAN_CFG_RX_ERR_RING_MASK_0 0x1 +#define WLAN_CFG_RX_ERR_RING_MASK_1 0x0 +#define WLAN_CFG_RX_ERR_RING_MASK_2 0x0 +#define WLAN_CFG_RX_ERR_RING_MASK_3 0x0 + +#define WLAN_CFG_RX_WBM_REL_RING_MASK_0 0x1 +#define WLAN_CFG_RX_WBM_REL_RING_MASK_1 0x0 +#define WLAN_CFG_RX_WBM_REL_RING_MASK_2 0x0 +#define WLAN_CFG_RX_WBM_REL_RING_MASK_3 0x0 + +#define WLAN_CFG_REO_STATUS_RING_MASK_0 0x1 +#define WLAN_CFG_REO_STATUS_RING_MASK_1 0x0 +#define WLAN_CFG_REO_STATUS_RING_MASK_2 0x0 +#define WLAN_CFG_REO_STATUS_RING_MASK_3 0x0 + +#define WLAN_CFG_RXDMA2HOST_RING_MASK_0 0x1 +#define WLAN_CFG_RXDMA2HOST_RING_MASK_1 0x2 +#define WLAN_CFG_RXDMA2HOST_RING_MASK_2 0x4 +#define WLAN_CFG_RXDMA2HOST_RING_MASK_3 0x0 + +#define WLAN_CFG_HOST2RXDMA_RING_MASK_0 0x1 +#define WLAN_CFG_HOST2RXDMA_RING_MASK_1 0x2 +#define WLAN_CFG_HOST2RXDMA_RING_MASK_2 0x4 +#define WLAN_CFG_HOST2RXDMA_RING_MASK_3 0x0 + +#define WLAN_CFG_DP_TX_NUM_POOLS 3 +/* Change this to a lower value to enforce scattered idle list mode */ +#define WLAN_CFG_MAX_ALLOC_SIZE (2 << 20) + +#define WLAN_CFG_MAX_CLIENTS 64 + +#ifdef CONFIG_MCL +#ifdef IPA_OFFLOAD +#define WLAN_CFG_PER_PDEV_TX_RING 0 +#else +#define WLAN_CFG_PER_PDEV_TX_RING 1 +#endif +#else +#define WLAN_CFG_PER_PDEV_TX_RING 0 +#endif + +#define WLAN_CFG_NUM_TCL_DATA_RINGS 3 +#define WLAN_CFG_NUM_REO_DEST_RING 4 + +#define WLAN_CFG_HTT_PKT_TYPE 2 +#define WLAN_CFG_MAX_PEER_ID 64 + +#define WLAN_CFG_RX_DEFRAG_TIMEOUT 100 + +#ifdef CONFIG_MCL +static const int tx_ring_mask[WLAN_CFG_INT_NUM_CONTEXTS] = { + 0, + WLAN_CFG_TX_RING_MASK_0, + 0, + 0, + 0, + 0, + 0}; + +static const int rx_ring_mask[WLAN_CFG_INT_NUM_CONTEXTS] = { + 0, + 0, + WLAN_CFG_RX_RING_MASK_0, + 0, + WLAN_CFG_RX_RING_MASK_1, + WLAN_CFG_RX_RING_MASK_2, + WLAN_CFG_RX_RING_MASK_3}; + +static const int rx_mon_ring_mask[WLAN_CFG_INT_NUM_CONTEXTS] = { + 0, + 0, + 0, + WLAN_CFG_RX_MON_RING_MASK_0, + WLAN_CFG_RX_MON_RING_MASK_1, + WLAN_CFG_RX_MON_RING_MASK_2, + WLAN_CFG_RX_MON_RING_MASK_3}; +#else +static const int tx_ring_mask[WLAN_CFG_INT_NUM_CONTEXTS] = { + WLAN_CFG_TX_RING_MASK_0, + WLAN_CFG_TX_RING_MASK_1, + WLAN_CFG_TX_RING_MASK_2, + WLAN_CFG_TX_RING_MASK_3}; + +static const int rx_ring_mask[WLAN_CFG_INT_NUM_CONTEXTS] = { + WLAN_CFG_RX_RING_MASK_0, + WLAN_CFG_RX_RING_MASK_1, + WLAN_CFG_RX_RING_MASK_2, + WLAN_CFG_RX_RING_MASK_3}; + +static const int rx_mon_ring_mask[WLAN_CFG_INT_NUM_CONTEXTS] = { + 0, + 0, + 0, + 0, + WLAN_CFG_RX_MON_RING_MASK_0, + WLAN_CFG_RX_MON_RING_MASK_1, + WLAN_CFG_RX_MON_RING_MASK_2}; + +#endif + +static const int rx_err_ring_mask[WLAN_CFG_INT_NUM_CONTEXTS] = { + WLAN_CFG_RX_ERR_RING_MASK_0, + WLAN_CFG_RX_ERR_RING_MASK_1, + WLAN_CFG_RX_ERR_RING_MASK_2, + WLAN_CFG_RX_ERR_RING_MASK_3}; + +static const int rx_wbm_rel_ring_mask[WLAN_CFG_INT_NUM_CONTEXTS] = { + WLAN_CFG_RX_WBM_REL_RING_MASK_0, + WLAN_CFG_RX_WBM_REL_RING_MASK_1, + WLAN_CFG_RX_WBM_REL_RING_MASK_2, + WLAN_CFG_RX_WBM_REL_RING_MASK_3}; + +static const int reo_status_ring_mask[WLAN_CFG_INT_NUM_CONTEXTS] = { + WLAN_CFG_REO_STATUS_RING_MASK_0, + WLAN_CFG_REO_STATUS_RING_MASK_1, + WLAN_CFG_REO_STATUS_RING_MASK_2, + WLAN_CFG_REO_STATUS_RING_MASK_3}; + +static const int rxdma2host_ring_mask[WLAN_CFG_INT_NUM_CONTEXTS] = { + WLAN_CFG_RXDMA2HOST_RING_MASK_0, + WLAN_CFG_RXDMA2HOST_RING_MASK_1, + WLAN_CFG_RXDMA2HOST_RING_MASK_2, + WLAN_CFG_RXDMA2HOST_RING_MASK_3}; + +static const int host2rxdma_ring_mask[WLAN_CFG_INT_NUM_CONTEXTS] = { + WLAN_CFG_HOST2RXDMA_RING_MASK_0, + WLAN_CFG_HOST2RXDMA_RING_MASK_1, + WLAN_CFG_HOST2RXDMA_RING_MASK_2, + WLAN_CFG_HOST2RXDMA_RING_MASK_3}; + +/** + * struct wlan_cfg_dp_pdev_ctxt - Configuration parameters for pdev (radio) + * @rx_dma_buf_ring_size - Size of RxDMA buffer ring + * @dma_mon_buf_ring_size - Size of RxDMA Monitor buffer ring + * @dma_mon_dest_ring_size - Size of RxDMA Monitor Destination ring + * @dma_mon_status_ring_size - Size of RxDMA Monitor Status ring + */ +struct wlan_cfg_dp_pdev_ctxt { + int rx_dma_buf_ring_size; + int dma_mon_buf_ring_size; + int dma_mon_dest_ring_size; + int dma_mon_status_ring_size; + int num_mac_rings; + int nss_enabled; +}; + +/** + * wlan_cfg_soc_attach() - Allocate and prepare SoC configuration + * + * Return: wlan_cfg_ctx - Handle to Configuration context + */ +struct wlan_cfg_dp_soc_ctxt *wlan_cfg_soc_attach() +{ + int i = 0; + + struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx = + qdf_mem_malloc(sizeof(struct wlan_cfg_dp_soc_ctxt)); + + if (wlan_cfg_ctx == NULL) + return NULL; + + wlan_cfg_ctx->num_int_ctxts = WLAN_CFG_INT_NUM_CONTEXTS; + wlan_cfg_ctx->max_clients = WLAN_CFG_MAX_CLIENTS; + wlan_cfg_ctx->max_alloc_size = WLAN_CFG_MAX_ALLOC_SIZE; + wlan_cfg_ctx->per_pdev_tx_ring = WLAN_CFG_PER_PDEV_TX_RING; + wlan_cfg_ctx->num_tcl_data_rings = WLAN_CFG_NUM_TCL_DATA_RINGS; + wlan_cfg_ctx->per_pdev_rx_ring = WLAN_CFG_PER_PDEV_RX_RING; + wlan_cfg_ctx->per_pdev_lmac_ring = WLAN_CFG_PER_PDEV_LMAC_RING; + wlan_cfg_ctx->num_reo_dest_rings = WLAN_CFG_NUM_REO_DEST_RING; + wlan_cfg_ctx->num_tx_desc_pool = MAX_TXDESC_POOLS; + wlan_cfg_ctx->num_tx_ext_desc_pool = WLAN_CFG_NUM_TXEXT_DESC_POOL; + wlan_cfg_ctx->num_tx_desc = WLAN_CFG_NUM_TX_DESC; + wlan_cfg_ctx->num_tx_ext_desc = WLAN_CFG_NUM_TX_EXT_DESC; + wlan_cfg_ctx->htt_packet_type = WLAN_CFG_HTT_PKT_TYPE; + wlan_cfg_ctx->max_peer_id = WLAN_CFG_MAX_PEER_ID; + + wlan_cfg_ctx->tx_ring_size = WLAN_CFG_TX_RING_SIZE; + wlan_cfg_ctx->tx_comp_ring_size = WLAN_CFG_TX_COMP_RING_SIZE; + + wlan_cfg_ctx->int_batch_threshold_tx = WLAN_CFG_INT_BATCH_THRESHOLD_TX; + wlan_cfg_ctx->int_timer_threshold_tx = WLAN_CFG_INT_TIMER_THRESHOLD_TX; + wlan_cfg_ctx->int_batch_threshold_rx = WLAN_CFG_INT_BATCH_THRESHOLD_RX; + wlan_cfg_ctx->int_timer_threshold_rx = WLAN_CFG_INT_TIMER_THRESHOLD_RX; + wlan_cfg_ctx->int_batch_threshold_other = + WLAN_CFG_INT_BATCH_THRESHOLD_OTHER; + wlan_cfg_ctx->int_timer_threshold_other = + WLAN_CFG_INT_TIMER_THRESHOLD_OTHER; + + for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) { + wlan_cfg_ctx->int_tx_ring_mask[i] = tx_ring_mask[i]; + wlan_cfg_ctx->int_rx_ring_mask[i] = rx_ring_mask[i]; + wlan_cfg_ctx->int_rx_mon_ring_mask[i] = rx_mon_ring_mask[i]; + wlan_cfg_ctx->int_rx_err_ring_mask[i] = rx_err_ring_mask[i]; + wlan_cfg_ctx->int_rx_wbm_rel_ring_mask[i] = + rx_wbm_rel_ring_mask[i]; + wlan_cfg_ctx->int_reo_status_ring_mask[i] = + reo_status_ring_mask[i]; + wlan_cfg_ctx->int_rxdma2host_ring_mask[i] = + rxdma2host_ring_mask[i]; + wlan_cfg_ctx->int_host2rxdma_ring_mask[i] = + host2rxdma_ring_mask[i]; + } + + /* This is default mapping and can be overridden by HW config + * received from FW */ + wlan_cfg_set_hw_macid(wlan_cfg_ctx, 0, 1); + if (MAX_PDEV_CNT > 1) + wlan_cfg_set_hw_macid(wlan_cfg_ctx, 1, 3); + if (MAX_PDEV_CNT > 2) + wlan_cfg_set_hw_macid(wlan_cfg_ctx, 2, 2); + + wlan_cfg_ctx->base_hw_macid = 1; + /*Enable checksum offload by default*/ + wlan_cfg_ctx->tcp_udp_checksumoffload = 1; + + wlan_cfg_ctx->defrag_timeout_check = 1; + wlan_cfg_ctx->rx_defrag_min_timeout = WLAN_CFG_RX_DEFRAG_TIMEOUT; + + return wlan_cfg_ctx; +} + +void wlan_cfg_soc_detach(struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx) +{ + qdf_mem_free(wlan_cfg_ctx); +} + +struct wlan_cfg_dp_pdev_ctxt *wlan_cfg_pdev_attach(void) +{ + struct wlan_cfg_dp_pdev_ctxt *wlan_cfg_ctx = + qdf_mem_malloc(sizeof(struct wlan_cfg_dp_pdev_ctxt)); + + if (wlan_cfg_ctx == NULL) + return NULL; + + wlan_cfg_ctx->rx_dma_buf_ring_size = RXDMA_BUF_RING_SIZE; + wlan_cfg_ctx->dma_mon_buf_ring_size = RXDMA_MONITOR_BUF_RING_SIZE; + wlan_cfg_ctx->dma_mon_dest_ring_size = RXDMA_MONITOR_DEST_RING_SIZE; + wlan_cfg_ctx->dma_mon_status_ring_size = RXDMA_MONITOR_STATUS_RING_SIZE; + wlan_cfg_ctx->num_mac_rings = NUM_RXDMA_RINGS_PER_PDEV; + + return wlan_cfg_ctx; +} + +void wlan_cfg_pdev_detach(struct wlan_cfg_dp_pdev_ctxt *wlan_cfg_ctx) +{ + qdf_mem_free(wlan_cfg_ctx); +} + +void wlan_cfg_set_num_contexts(struct wlan_cfg_dp_soc_ctxt *cfg, int num) +{ + cfg->num_int_ctxts = num; +} + +void wlan_cfg_set_max_peer_id(struct wlan_cfg_dp_soc_ctxt *cfg, uint32_t val) +{ + cfg->max_peer_id = val;; +} + +void wlan_cfg_set_tx_ring_mask(struct wlan_cfg_dp_soc_ctxt *cfg, + int context, int mask) +{ + cfg->int_tx_ring_mask[context] = mask; +} + +void wlan_cfg_set_rx_ring_mask(struct wlan_cfg_dp_soc_ctxt *cfg, + int context, int mask) +{ + cfg->int_rx_ring_mask[context] = mask; +} + +void wlan_cfg_set_rx_mon_ring_mask(struct wlan_cfg_dp_soc_ctxt *cfg, + int context, int mask) +{ + cfg->int_rx_mon_ring_mask[context] = mask; +} + +void wlan_cfg_set_rxdma2host_ring_mask(struct wlan_cfg_dp_soc_ctxt *cfg, + int context, int mask) +{ + cfg->int_rxdma2host_ring_mask[context] = mask; +} + +int wlan_cfg_get_rxdma2host_ring_mask(struct wlan_cfg_dp_soc_ctxt *cfg, + int context) +{ + return cfg->int_rxdma2host_ring_mask[context]; +} + +void wlan_cfg_set_host2rxdma_ring_mask(struct wlan_cfg_dp_soc_ctxt *cfg, + int context, int mask) +{ + cfg->int_host2rxdma_ring_mask[context] = mask; +} + +int wlan_cfg_get_host2rxdma_ring_mask(struct wlan_cfg_dp_soc_ctxt *cfg, + int context) +{ + return cfg->int_host2rxdma_ring_mask[context]; +} + +void wlan_cfg_set_hw_macid(struct wlan_cfg_dp_soc_ctxt *cfg, int pdev_idx, + int hw_macid) +{ + qdf_assert_always(pdev_idx < MAX_PDEV_CNT); + cfg->hw_macid[pdev_idx] = hw_macid; +} + +int wlan_cfg_get_hw_macid(struct wlan_cfg_dp_soc_ctxt *cfg, int pdev_idx) +{ + qdf_assert_always(pdev_idx < MAX_PDEV_CNT); + return cfg->hw_macid[pdev_idx]; +} + +int wlan_cfg_get_hw_mac_idx(struct wlan_cfg_dp_soc_ctxt *cfg, int pdev_idx) +{ + qdf_assert_always(pdev_idx < MAX_PDEV_CNT); + return cfg->hw_macid[pdev_idx] - cfg->base_hw_macid; +} + +void wlan_cfg_set_ce_ring_mask(struct wlan_cfg_dp_soc_ctxt *cfg, + int context, int mask) +{ + cfg->int_ce_ring_mask[context] = mask; +} + +void wlan_cfg_set_rxbuf_ring_mask(struct wlan_cfg_dp_soc_ctxt *cfg, int context, + int mask) +{ + cfg->int_rx_ring_mask[context] = mask; +} + +int wlan_cfg_set_rx_err_ring_mask(struct wlan_cfg_dp_soc_ctxt *cfg, + int context, int mask) +{ + return cfg->int_rx_err_ring_mask[context] = mask; +} + +int wlan_cfg_set_rx_wbm_rel_ring_mask(struct wlan_cfg_dp_soc_ctxt *cfg, + int context, int mask) +{ + return cfg->int_rx_wbm_rel_ring_mask[context] = mask; +} + +int wlan_cfg_set_reo_status_ring_mask(struct wlan_cfg_dp_soc_ctxt *cfg, + int context, int mask) +{ + return cfg->int_reo_status_ring_mask[context] = mask; +} + +int wlan_cfg_get_num_contexts(struct wlan_cfg_dp_soc_ctxt *cfg) +{ + return cfg->num_int_ctxts; +} + +int wlan_cfg_get_tx_ring_mask(struct wlan_cfg_dp_soc_ctxt *cfg, int context) +{ + return cfg->int_tx_ring_mask[context]; +} + +int wlan_cfg_get_rx_ring_mask(struct wlan_cfg_dp_soc_ctxt *cfg, int context) +{ + return cfg->int_rx_ring_mask[context]; +} + +int wlan_cfg_get_rx_err_ring_mask(struct wlan_cfg_dp_soc_ctxt *cfg, + int context) +{ + return cfg->int_rx_err_ring_mask[context]; +} + +int wlan_cfg_get_rx_wbm_rel_ring_mask(struct wlan_cfg_dp_soc_ctxt *cfg, + int context) +{ + return cfg->int_rx_wbm_rel_ring_mask[context]; +} + +int wlan_cfg_get_reo_status_ring_mask(struct wlan_cfg_dp_soc_ctxt *cfg, + int context) +{ + return cfg->int_reo_status_ring_mask[context]; +} + +int wlan_cfg_get_rx_mon_ring_mask(struct wlan_cfg_dp_soc_ctxt *cfg, int context) +{ + return cfg->int_rx_mon_ring_mask[context]; +} + +int wlan_cfg_get_ce_ring_mask(struct wlan_cfg_dp_soc_ctxt *cfg, int context) +{ + return cfg->int_ce_ring_mask[context]; +} + +uint32_t wlan_cfg_get_max_clients(struct wlan_cfg_dp_soc_ctxt *cfg) +{ + return cfg->max_clients; +} + +uint32_t wlan_cfg_max_alloc_size(struct wlan_cfg_dp_soc_ctxt *cfg) +{ + return cfg->max_alloc_size; +} + +int wlan_cfg_per_pdev_tx_ring(struct wlan_cfg_dp_soc_ctxt *cfg) +{ + return cfg->per_pdev_tx_ring; +} + +int wlan_cfg_per_pdev_lmac_ring(struct wlan_cfg_dp_soc_ctxt *cfg) +{ + return cfg->per_pdev_lmac_ring; +} + +int wlan_cfg_num_tcl_data_rings(struct wlan_cfg_dp_soc_ctxt *cfg) +{ + return cfg->num_tcl_data_rings; +} + +int wlan_cfg_tx_ring_size(struct wlan_cfg_dp_soc_ctxt *cfg) +{ + return cfg->tx_ring_size; +} + +int wlan_cfg_tx_comp_ring_size(struct wlan_cfg_dp_soc_ctxt *cfg) +{ + return cfg->tx_comp_ring_size; +} + +int wlan_cfg_per_pdev_rx_ring(struct wlan_cfg_dp_soc_ctxt *cfg) +{ + return cfg->per_pdev_rx_ring; +} + +int wlan_cfg_num_reo_dest_rings(struct wlan_cfg_dp_soc_ctxt *cfg) +{ + return cfg->num_reo_dest_rings; +} + +int wlan_cfg_pkt_type(struct wlan_cfg_dp_soc_ctxt *cfg) +{ + return cfg->htt_packet_type; /*htt_pkt_type_ethernet*/ +} + +int wlan_cfg_get_num_tx_desc_pool(struct wlan_cfg_dp_soc_ctxt *cfg) +{ + return cfg->num_tx_desc_pool; +} + +void wlan_cfg_set_num_tx_desc_pool(struct wlan_cfg_dp_soc_ctxt *cfg, int num_pool) +{ + cfg->num_tx_desc_pool = num_pool; +} + +int wlan_cfg_get_num_tx_ext_desc_pool(struct wlan_cfg_dp_soc_ctxt *cfg) +{ + return cfg->num_tx_ext_desc_pool; +} + +void wlan_cfg_set_num_tx_ext_desc_pool(struct wlan_cfg_dp_soc_ctxt *cfg, int num_pool) +{ + cfg->num_tx_ext_desc_pool = num_pool; +} + +int wlan_cfg_get_num_tx_desc(struct wlan_cfg_dp_soc_ctxt *cfg) +{ + return cfg->num_tx_desc; +} + +void wlan_cfg_set_num_tx_desc(struct wlan_cfg_dp_soc_ctxt *cfg, int num_desc) +{ + cfg->num_tx_desc = num_desc; +} + +int wlan_cfg_get_num_tx_ext_desc(struct wlan_cfg_dp_soc_ctxt *cfg) +{ + return cfg->num_tx_ext_desc; +} + +void wlan_cfg_set_num_tx_ext_desc(struct wlan_cfg_dp_soc_ctxt *cfg, int num_ext_desc) +{ + cfg->num_tx_ext_desc = num_ext_desc; +} + +uint32_t wlan_cfg_max_peer_id(struct wlan_cfg_dp_soc_ctxt *cfg) +{ + /* TODO: This should be calculated based on target capabilities */ + return cfg->max_peer_id; +} + +int wlan_cfg_get_dma_mon_buf_ring_size(struct wlan_cfg_dp_pdev_ctxt *cfg) +{ + return cfg->dma_mon_buf_ring_size; +} + +int wlan_cfg_get_dma_mon_dest_ring_size(struct wlan_cfg_dp_pdev_ctxt *cfg) +{ + return cfg->dma_mon_dest_ring_size; +} + +int wlan_cfg_get_dma_mon_stat_ring_size(struct wlan_cfg_dp_pdev_ctxt *cfg) +{ + return cfg->dma_mon_status_ring_size; +} + +int wlan_cfg_get_rx_dma_buf_ring_size(struct wlan_cfg_dp_pdev_ctxt *cfg) +{ + return cfg->rx_dma_buf_ring_size; +} + +int wlan_cfg_get_num_mac_rings(struct wlan_cfg_dp_pdev_ctxt *cfg) +{ + return cfg->num_mac_rings; +} + +bool wlan_cfg_is_lro_enabled(struct wlan_cfg_dp_soc_ctxt *cfg) +{ + return cfg->lro_enabled; +} + +void wlan_cfg_set_rx_hash(struct wlan_cfg_dp_soc_ctxt *cfg, bool val) +{ + cfg->rx_hash = val; +} + +bool wlan_cfg_is_rx_hash_enabled(struct wlan_cfg_dp_soc_ctxt *cfg) +{ + return cfg->rx_hash; +} + +int wlan_cfg_get_dp_pdev_nss_enabled(struct wlan_cfg_dp_pdev_ctxt *cfg) +{ + return cfg->nss_enabled; +} + +void wlan_cfg_set_dp_pdev_nss_enabled(struct wlan_cfg_dp_pdev_ctxt *cfg, int nss_enabled) +{ + cfg->nss_enabled = nss_enabled; +} + +int wlan_cfg_get_dp_soc_nss_cfg(struct wlan_cfg_dp_soc_ctxt *cfg) +{ + return cfg->nss_cfg; +} + +void wlan_cfg_set_dp_soc_nss_cfg(struct wlan_cfg_dp_soc_ctxt *cfg, int nss_cfg) +{ + cfg->nss_cfg = nss_cfg; + if (cfg->nss_cfg) + cfg->tx_comp_ring_size = NSS_TX_COMP_RING_SIZE; +} + +int wlan_cfg_get_int_batch_threshold_tx(struct wlan_cfg_dp_soc_ctxt *cfg) +{ + return cfg->int_batch_threshold_tx; +} + +int wlan_cfg_get_int_timer_threshold_tx(struct wlan_cfg_dp_soc_ctxt *cfg) +{ + return cfg->int_timer_threshold_tx; +} + +int wlan_cfg_get_int_batch_threshold_rx(struct wlan_cfg_dp_soc_ctxt *cfg) +{ + return cfg->int_batch_threshold_rx; +} + +int wlan_cfg_get_int_timer_threshold_rx(struct wlan_cfg_dp_soc_ctxt *cfg) +{ + return cfg->int_timer_threshold_rx; +} + +int wlan_cfg_get_int_batch_threshold_other(struct wlan_cfg_dp_soc_ctxt *cfg) +{ + return cfg->int_batch_threshold_other; +} + +int wlan_cfg_get_int_timer_threshold_other(struct wlan_cfg_dp_soc_ctxt *cfg) +{ + return cfg->int_timer_threshold_other; +} + +int wlan_cfg_get_checksum_offload(struct wlan_cfg_dp_soc_ctxt *cfg) +{ + return cfg->tcp_udp_checksumoffload; +} + +int wlan_cfg_get_rx_defrag_min_timeout(struct wlan_cfg_dp_soc_ctxt *cfg) +{ + return cfg->rx_defrag_min_timeout; +} + +int wlan_cfg_get_defrag_timeout_check(struct wlan_cfg_dp_soc_ctxt *cfg) +{ + return cfg->defrag_timeout_check; +} + +#ifdef QCA_LL_TX_FLOW_CONTROL_V2 +/** + * wlan_cfg_get_tx_flow_stop_queue_th() - Get flow control stop threshold + * @cfg: config context + * + * Return: stop threshold + */ +int wlan_cfg_get_tx_flow_stop_queue_th(struct wlan_cfg_dp_soc_ctxt *cfg) +{ + return cfg->tx_flow_stop_queue_threshold; +} + +/** + * wlan_cfg_get_tx_flow_start_queue_offset() - Get flow control start offset + * for TX to resume + * @cfg: config context + * + * Return: stop threshold + */ +int wlan_cfg_get_tx_flow_start_queue_offset(struct wlan_cfg_dp_soc_ctxt *cfg) +{ + return cfg->tx_flow_start_queue_offset; +} +#endif /* QCA_LL_TX_FLOW_CONTROL_V2 */ diff --git a/drivers/staging/qca-wifi-host-cmn/wlan_cfg/wlan_cfg.h b/drivers/staging/qca-wifi-host-cmn/wlan_cfg/wlan_cfg.h new file mode 100644 index 0000000000000000000000000000000000000000..a0565f7534f949b1993bfe005dc0cf456fe71297 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/wlan_cfg/wlan_cfg.h @@ -0,0 +1,713 @@ +/* +* * Copyright (c) 2013-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef __WLAN_CFG_H +#define __WLAN_CFG_H + +/* + * Temporary place holders. These should come either from target config + * or platform configuration + */ +#if defined(CONFIG_MCL) +#define MAX_PDEV_CNT 1 +#define WLAN_CFG_INT_NUM_CONTEXTS 7 +/* + * This mask defines how many transmit frames account for 1 NAPI work unit + * 0 means each tx completion is 1 unit + */ +#define DP_TX_NAPI_BUDGET_DIV_MASK 0 + +/* PPDU Stats Configuration - Configure bitmask for enabling tx ppdu tlv's */ +#define DP_PPDU_TXLITE_STATS_BITMASK_CFG 0x1FFF + +#define NUM_RXDMA_RINGS_PER_PDEV 2 +#else +#define MAX_PDEV_CNT 3 +#define WLAN_CFG_INT_NUM_CONTEXTS 7 +/* + * This mask defines how many transmit frames account for 1 NAPI work unit + * 0xFFFF means each 64K tx frame completions account for 1 unit of NAPI budget + */ +#define DP_TX_NAPI_BUDGET_DIV_MASK 0xFFFF + +/* PPDU Stats Configuration - Configure bitmask for enabling tx ppdu tlv's */ +#define DP_PPDU_TXLITE_STATS_BITMASK_CFG 0xFFFF + +#define NUM_RXDMA_RINGS_PER_PDEV 1 +#endif + +/* Tx configuration */ +#define MAX_LINK_DESC_BANKS 8 +#define MAX_TXDESC_POOLS 4 +#define MAX_TCL_DATA_RINGS 4 + +/* Rx configuration */ +#define MAX_RXDESC_POOLS 4 +#define MAX_REO_DEST_RINGS 4 +#define MAX_RX_MAC_RINGS 2 + +/* DP process status */ +#ifdef CONFIG_MCL +#define CONFIG_PROCESS_RX_STATUS 1 +#define CONFIG_PROCESS_TX_STATUS 1 +#else +#define CONFIG_PROCESS_RX_STATUS 0 +#define CONFIG_PROCESS_TX_STATUS 0 +#endif + +/* Miscellaneous configuration */ +#define MAX_IDLE_SCATTER_BUFS 16 +#define DP_MAX_IRQ_PER_CONTEXT 12 +#define DP_MAX_INTERRUPT_CONTEXTS 8 +#define DP_MAX_INTERRUPT_CONTEXTS 8 +#define MAX_HTT_METADATA_LEN 32 +#define MAX_NUM_PEER_ID_PER_PEER 8 +#define DP_MAX_TIDS 17 +#define DP_NON_QOS_TID 16 + +struct wlan_cfg_dp_pdev_ctxt; + +/** + * struct wlan_cfg_dp_soc_ctxt - Configuration parameters for SoC (core TxRx) + * @num_int_ctxts - Number of NAPI/Interrupt contexts to be registered for DP + * @max_clients - Maximum number of peers/stations supported by device + * @max_alloc_size - Maximum allocation size for any dynamic memory + * allocation request for this device + * @per_pdev_tx_ring - 0 - TCL ring is not mapped per radio + * 1 - Each TCL ring is mapped to one radio/pdev + * @num_tcl_data_rings - Number of TCL Data rings supported by device + * @per_pdev_rx_ring - 0 - REO ring is not mapped per radio + * 1 - Each REO ring is mapped to one radio/pdev + * @num_tx_desc_pool - Number of Tx Descriptor pools + * @num_tx_ext_desc_pool - Number of Tx MSDU extension Descriptor pools + * @num_tx_desc - Number of Tx Descriptors per pool + * @num_tx_ext_desc - Number of Tx MSDU extension Descriptors per pool + * @max_peer_id - Maximum value of peer id that FW can assign for a client + * @htt_packet_type - Default 802.11 encapsulation type for any VAP created + * @int_tx_ring_mask - Bitmap of Tx interrupts mapped to each NAPI/Intr context + * @int_rx_ring_mask - Bitmap of Rx interrupts mapped to each NAPI/Intr context + * @int_rx_mon_ring_mask - Bitmap of Rx monitor ring interrupts mapped to each + * NAPI/Intr context + * @int_rx_err_ring_mask - Bitmap of Rx err ring interrupts mapped to each + * NAPI/Intr context + * @int_wbm_rel_ring_mask - Bitmap of wbm rel ring interrupts mapped to each + * NAPI/Intr context + * @int_reo_status_ring_mask - Bitmap of reo status ring interrupts mapped to each + * NAPI/Intr context + * @int_ce_ring_mask - Bitmap of CE interrupts mapped to each NAPI/Intr context + * @lro_enabled - enable/disable lro feature + * @rx_hash - Enable hash based steering of rx packets + * @tso_enabled - enable/disable tso feature + * @napi_enabled - enable/disable interrupt mode for reaping tx and rx packets + * @tcp_Udp_Checksumoffload - enable/disable checksum offload + * @nss_cfg - nss configuration + */ +struct wlan_cfg_dp_soc_ctxt { + int num_int_ctxts; + int max_clients; + int max_alloc_size; + int per_pdev_tx_ring; + int num_tcl_data_rings; + int per_pdev_rx_ring; + int per_pdev_lmac_ring; + int num_reo_dest_rings; + int num_tx_desc_pool; + int num_tx_ext_desc_pool; + int num_tx_desc; + int num_tx_ext_desc; + int max_peer_id; + int htt_packet_type; + int int_batch_threshold_tx; + int int_timer_threshold_tx; + int int_batch_threshold_rx; + int int_timer_threshold_rx; + int int_batch_threshold_other; + int int_timer_threshold_other; + int tx_ring_size; + int tx_comp_ring_size; + int int_tx_ring_mask[WLAN_CFG_INT_NUM_CONTEXTS]; + int int_rx_ring_mask[WLAN_CFG_INT_NUM_CONTEXTS]; + int int_rx_mon_ring_mask[WLAN_CFG_INT_NUM_CONTEXTS]; + int int_ce_ring_mask[WLAN_CFG_INT_NUM_CONTEXTS]; + int int_rx_err_ring_mask[WLAN_CFG_INT_NUM_CONTEXTS]; + int int_rx_wbm_rel_ring_mask[WLAN_CFG_INT_NUM_CONTEXTS]; + int int_reo_status_ring_mask[WLAN_CFG_INT_NUM_CONTEXTS]; + int int_rxdma2host_ring_mask[WLAN_CFG_INT_NUM_CONTEXTS]; + int int_host2rxdma_ring_mask[WLAN_CFG_INT_NUM_CONTEXTS]; + int hw_macid[MAX_PDEV_CNT]; + int base_hw_macid; + bool lro_enabled; + bool rx_hash; + bool tso_enabled; + bool napi_enabled; + bool tcp_udp_checksumoffload; + bool defrag_timeout_check; + int nss_cfg; +#ifdef QCA_LL_TX_FLOW_CONTROL_V2 + uint32_t tx_flow_stop_queue_threshold; + uint32_t tx_flow_start_queue_offset; +#endif + uint32_t rx_defrag_min_timeout; +}; + +/** + * wlan_cfg_soc_attach() - Attach configuration interface for SoC + * + * Allocates context for Soc configuration parameters, + * Read configuration information from device tree/ini file and + * returns back handle + * + * Return: Handle to configuration context + */ +struct wlan_cfg_dp_soc_ctxt *wlan_cfg_soc_attach(void); + +/** + * wlan_cfg_soc_detach() - Detach soc configuration handle + * @wlan_cfg_ctx: soc configuration handle + * + * De-allocates memory allocated for SoC configuration + * + * Return:none + */ +void wlan_cfg_soc_detach(struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx); + +/** + * wlan_cfg_pdev_attach() Attach configuration interface for pdev + * + * Allocates context for pdev configuration parameters, + * Read configuration information from device tree/ini file and + * returns back handle + * + * Return: Handle to configuration context + */ +struct wlan_cfg_dp_pdev_ctxt *wlan_cfg_pdev_attach(void); + +/** + * wlan_cfg_pdev_detach() Detach and free pdev configuration handle + * @wlan_cfg_pdev_ctx - PDEV Configuration Handle + * + * Return: void + */ +void wlan_cfg_pdev_detach(struct wlan_cfg_dp_pdev_ctxt *wlan_cfg_pdev_ctx); + +void wlan_cfg_set_num_contexts(struct wlan_cfg_dp_soc_ctxt *cfg, int num); +void wlan_cfg_set_tx_ring_mask(struct wlan_cfg_dp_soc_ctxt *cfg, + int context, int mask); +void wlan_cfg_set_rx_ring_mask(struct wlan_cfg_dp_soc_ctxt *cfg, + int context, int mask); +void wlan_cfg_set_rx_mon_ring_mask(struct wlan_cfg_dp_soc_ctxt *cfg, + int context, int mask); +void wlan_cfg_set_ce_ring_mask(struct wlan_cfg_dp_soc_ctxt *cfg, + int context, int mask); +void wlan_cfg_set_rxbuf_ring_mask(struct wlan_cfg_dp_soc_ctxt *cfg, int context, + int mask); +void wlan_cfg_set_max_peer_id(struct wlan_cfg_dp_soc_ctxt *cfg, uint32_t val); + +int wlan_cfg_set_rx_err_ring_mask(struct wlan_cfg_dp_soc_ctxt *cfg, + int context, int mask); +int wlan_cfg_set_rx_wbm_rel_ring_mask(struct wlan_cfg_dp_soc_ctxt *cfg, + int context, int mask); +int wlan_cfg_set_reo_status_ring_mask(struct wlan_cfg_dp_soc_ctxt *cfg, + int context, int mask); +/** + * wlan_cfg_get_num_contexts() - Number of interrupt contexts to be registered + * @wlan_cfg_ctx - Configuration Handle + * + * For WIN, DP_NUM_INTERRUPT_CONTEXTS will be equal to number of CPU cores. + * Each context (for linux it is a NAPI context) will have a tx_ring_mask, + * rx_ring_mask ,and rx_monitor_ring mask to indicate the rings + * that are processed by the handler. + * + * Return: num_contexts + */ +int wlan_cfg_get_num_contexts(struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx); + +/** + * wlan_cfg_get_tx_ring_mask() - Return Tx interrupt mask mapped to an + * interrupt context + * @wlan_cfg_ctx - Configuration Handle + * @context - Numerical ID identifying the Interrupt/NAPI context + * + * Return: int_tx_ring_mask[context] + */ +int wlan_cfg_get_tx_ring_mask(struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx, + int context); + +/** + * wlan_cfg_get_rx_ring_mask() - Return Rx interrupt mask mapped to an + * interrupt context + * @wlan_cfg_ctx - Configuration Handle + * @context - Numerical ID identifying the Interrupt/NAPI context + * + * Return: int_rx_ring_mask[context] + */ +int wlan_cfg_get_rx_ring_mask(struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx, + int context); + +/** + * wlan_cfg_get_rx_mon_ring_mask() - Return Rx monitor ring interrupt mask + * mapped to an interrupt context + * @wlan_cfg_ctx - Configuration Handle + * @context - Numerical ID identifying the Interrupt/NAPI context + * + * Return: int_rx_mon_ring_mask[context] + */ +int wlan_cfg_get_rx_mon_ring_mask(struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx, + int context); + +/** + * wlan_cfg_set_rxdma2host_ring_mask() - Set rxdma2host ring interrupt mask + * for the given interrupt context + * @wlan_cfg_ctx - Configuration Handle + * @context - Numerical ID identifying the Interrupt/NAPI context + * + */ +void wlan_cfg_set_rxdma2host_ring_mask(struct wlan_cfg_dp_soc_ctxt *cfg, + int context, int mask); + +/** + * wlan_cfg_get_rxdma2host_ring_mask() - Return rxdma2host ring interrupt mask + * mapped to an interrupt context + * @wlan_cfg_ctx - Configuration Handle + * @context - Numerical ID identifying the Interrupt/NAPI context + * + * Return: int_rxdma2host_ring_mask[context] + */ +int wlan_cfg_get_rxdma2host_ring_mask(struct wlan_cfg_dp_soc_ctxt *cfg, + int context); + +/** + * wlan_cfg_set_host2rxdma_ring_mask() - Set host2rxdma ring interrupt mask + * for the given interrupt context + * @wlan_cfg_ctx - Configuration Handle + * @context - Numerical ID identifying the Interrupt/NAPI context + * + */ +void wlan_cfg_set_host2rxdma_ring_mask(struct wlan_cfg_dp_soc_ctxt *cfg, + int context, int mask); + +/** + * wlan_cfg_get_host2rxdma_ring_mask() - Return host2rxdma ring interrupt mask + * mapped to an interrupt context + * @wlan_cfg_ctx - Configuration Handle + * @context - Numerical ID identifying the Interrupt/NAPI context + * + * Return: int_host2rxdma_ring_mask[context] + */ +int wlan_cfg_get_host2rxdma_ring_mask(struct wlan_cfg_dp_soc_ctxt *cfg, + int context); + +/** + * wlan_cfg_set_hw_macid() - Set HW MAC Id for the given PDEV index + * + * @wlan_cfg_ctx - Configuration Handle + * @pdev_idx - Index of SW PDEV + * @hw_macid - HW MAC Id + * + */ +void wlan_cfg_set_hw_macid(struct wlan_cfg_dp_soc_ctxt *cfg, int pdev_idx, + int hw_macid); + +/** + * wlan_cfg_get_hw_macid() - Get HW MAC Id for the given PDEV index + * + * @wlan_cfg_ctx - Configuration Handle + * @pdev_idx - Index of SW PDEV + * + * Return: HW MAC Id + */ +int wlan_cfg_get_hw_macid(struct wlan_cfg_dp_soc_ctxt *cfg, int pdev_idx); + +/** + * wlan_cfg_get_hw_mac_idx() - Get 0 based HW MAC index for the given + * PDEV index + * + * @wlan_cfg_ctx - Configuration Handle + * @pdev_idx - Index of SW PDEV + * + * Return: HW MAC index + */ +int wlan_cfg_get_hw_mac_idx(struct wlan_cfg_dp_soc_ctxt *cfg, int pdev_idx); + +/** + * wlan_cfg_get_rx_err_ring_mask() - Return Rx monitor ring interrupt mask + * mapped to an interrupt context + * @wlan_cfg_ctx - Configuration Handle + * @context - Numerical ID identifying the Interrupt/NAPI context + * + * Return: int_rx_err_ring_mask[context] + */ +int wlan_cfg_get_rx_err_ring_mask(struct wlan_cfg_dp_soc_ctxt *cfg, int + context); + +/** + * wlan_cfg_get_rx_wbm_rel_ring_mask() - Return Rx monitor ring interrupt mask + * mapped to an interrupt context + * @wlan_cfg_ctx - Configuration Handle + * @context - Numerical ID identifying the Interrupt/NAPI context + * + * Return: int_wbm_rel_ring_mask[context] + */ +int wlan_cfg_get_rx_wbm_rel_ring_mask(struct wlan_cfg_dp_soc_ctxt *cfg, int + context); + +/** + * wlan_cfg_get_reo_status_ring_mask() - Return Rx monitor ring interrupt mask + * mapped to an interrupt context + * @wlan_cfg_ctx - Configuration Handle + * @context - Numerical ID identifying the Interrupt/NAPI context + * + * Return: int_reo_status_ring_mask[context] + */ +int wlan_cfg_get_reo_status_ring_mask(struct wlan_cfg_dp_soc_ctxt *cfg, int + context); + +/** + * wlan_cfg_get_ce_ring_mask() - Return CE ring interrupt mask + * mapped to an interrupt context + * @wlan_cfg_ctx - Configuration Handle + * @context - Numerical ID identifying the Interrupt/NAPI context + * + * Return: int_ce_ring_mask[context] + */ +int wlan_cfg_get_ce_ring_mask(struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx, + int context); + +/** + * wlan_cfg_get_max_clients() - Return maximum number of peers/stations + * supported by device + * @wlan_cfg_ctx - Configuration Handle + * + * Return: max_clients + */ +uint32_t wlan_cfg_get_max_clients(struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx); + +/** + * wlan_cfg_max_alloc_size() - Return Maximum allocation size for any dynamic + * memory allocation request for this device + * @wlan_cfg_ctx - Configuration Handle + * + * Return: max_alloc_size + */ +uint32_t wlan_cfg_max_alloc_size(struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx); + +/* + * wlan_cfg_per_pdev_tx_ring() - Return true if Tx rings are mapped as + * one per radio + * @wlan_cfg_ctx - Configuration Handle + * + * Return: per_pdev_tx_ring + */ +int wlan_cfg_per_pdev_tx_ring(struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx); + +/* + * wlan_cfg_num_tcl_data_rings() - Number of TCL Data rings supported by device + * @wlan_cfg_ctx + * + * Return: num_tcl_data_rings + */ +int wlan_cfg_num_tcl_data_rings(struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx); + +/* + * wlan_cfg_per_pdev_rx_ring() - Return true if Rx rings are mapped as + * one per radio + * @wlan_cfg_ctx + * + * Return: per_pdev_rx_ring + */ +int wlan_cfg_per_pdev_rx_ring(struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx); + +/* + * wlan_cfg_per_pdev_lmac_ring() - Return true if error rings are mapped as + * one per radio + * @wlan_cfg_ctx + * + * Return: return 1 if per pdev error ring else 0 + */ +int wlan_cfg_per_pdev_lmac_ring(struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx); + +/* + * wlan_cfg_num_reo_dest_rings() - Number of REO Data rings supported by device + * @wlan_cfg_ctx - Configuration Handle + * + * Return: num_reo_dest_rings + */ +int wlan_cfg_num_reo_dest_rings(struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx); + +/* + * wlan_cfg_pkt_type() - Default 802.11 encapsulation type + * @wlan_cfg_ctx - Configuration Handle + * + * Return: htt_pkt_type_ethernet + */ +int wlan_cfg_pkt_type(struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx); + +/* + * wlan_cfg_get_num_tx_desc_pool() - Number of Tx Descriptor pools for the + * device + * @wlan_cfg_ctx - Configuration Handle + * + * Return: num_tx_desc_pool + */ +int wlan_cfg_get_num_tx_desc_pool(struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx); + +/* + * wlan_cfg_set_num_tx_desc_pool() - Set the number of Tx Descriptor pools for the + * device + * @wlan_cfg_ctx - Configuration Handle + * @num_pool - Number of pool + */ +void wlan_cfg_set_num_tx_desc_pool(struct wlan_cfg_dp_soc_ctxt *cfg, int num_pool); + +/* + * wlan_cfg_get_num_tx_ext_desc_pool() - Number of Tx MSDU ext Descriptor + * pools + * @wlan_cfg_ctx - Configuration Handle + * + * Return: num_tx_ext_desc_pool + */ +int wlan_cfg_get_num_tx_ext_desc_pool( + struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx); + +/* + * wlan_cfg_set_num_tx_ext_desc_pool() - Set the number of Tx MSDU ext Descriptor + * pools + * @wlan_cfg_ctx - Configuration Handle + * @num_pool - Number of pool + */ +void wlan_cfg_set_num_tx_ext_desc_pool(struct wlan_cfg_dp_soc_ctxt *cfg, int num_pool); + +/* + * wlan_cfg_get_num_tx_desc() - Number of Tx Descriptors per pool + * @wlan_cfg_ctx - Configuration Handle + * + * Return: num_tx_desc + */ +int wlan_cfg_get_num_tx_desc(struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx); + +/* + * wlan_cfg_set_num_tx_desc() - Set the number of Tx Descriptors per pool + * + * @wlan_cfg_ctx - Configuration Handle + * @num_desc: Number of descriptor + */ +void wlan_cfg_set_num_tx_desc(struct wlan_cfg_dp_soc_ctxt *cfg, int num_desc); + +/* + * wlan_cfg_get_num_tx_ext_desc() - Number of Tx MSDU extension Descriptors + * per pool + * @wlan_cfg_ctx - Configuration Handle + * + * Return: num_tx_ext_desc + */ +int wlan_cfg_get_num_tx_ext_desc(struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx); + +/* + * wlan_cfg_set_num_tx_ext_desc() - Set the number of Tx MSDU extension Descriptors + * per pool + * @wlan_cfg_ctx - Configuration Handle + * @num_desc: Number of descriptor + */ +void wlan_cfg_set_num_tx_ext_desc(struct wlan_cfg_dp_soc_ctxt *cfg, int num_ext_desc); + +/* + * wlan_cfg_max_peer_id() - Get maximum peer ID + * @cfg: Configuration Handle + * + * Return: maximum peer ID + */ +uint32_t wlan_cfg_max_peer_id(struct wlan_cfg_dp_soc_ctxt *cfg); + +/* + * wlan_cfg_get_dma_mon_buf_ring_size() - Return Size of monitor buffer ring + * @wlan_cfg_pdev_ctx + * + * Return: dma_mon_buf_ring_size + */ +int wlan_cfg_get_dma_mon_buf_ring_size( + struct wlan_cfg_dp_pdev_ctxt *wlan_cfg_pdev_ctx); + +/* + * wlan_cfg_get_dma_mon_dest_ring_size() - Return Size of RxDMA Monitor + * Destination ring + * @wlan_cfg_pdev_ctx + * + * Return: dma_mon_dest_size + */ +int wlan_cfg_get_dma_mon_dest_ring_size( + struct wlan_cfg_dp_pdev_ctxt *wlan_cfg_pdev_ctx); + +/* + * wlan_cfg_get_dma_mon_stat_ring_size() - Return size of Monitor Status ring + * @wlan_cfg_pdev_ctx + * + * Return: dma_mon_stat_ring_size + */ +int wlan_cfg_get_dma_mon_stat_ring_size( + struct wlan_cfg_dp_pdev_ctxt *wlan_cfg_pdev_ctx); + +/* + * wlan_cfg_get_rx_dma_buf_ring_size() - Return Size of RxDMA buffer ring + * @wlan_cfg_pdev_ctx + * + * Return: rx_dma_buf_ring_size + */ +int wlan_cfg_get_rx_dma_buf_ring_size( + struct wlan_cfg_dp_pdev_ctxt *wlan_cfg_pdev_ctx); + +/* + * wlan_cfg_get_num_mac_rings() - Return the number of MAC RX DMA rings + * per pdev + * @wlan_cfg_pdev_ctx + * + * Return: number of mac DMA rings per pdev + */ +int wlan_cfg_get_num_mac_rings(struct wlan_cfg_dp_pdev_ctxt *cfg); + +/* + * wlan_cfg_is_lro_enabled - Return LRO enabled/disabled + * @wlan_cfg_pdev_ctx + * + * Return: true - LRO enabled false - LRO disabled + */ +bool wlan_cfg_is_lro_enabled(struct wlan_cfg_dp_soc_ctxt *cfg); + +/* + * wlan_cfg_is_lro_enabled - Return RX hash enabled/disabled + * @wlan_cfg_pdev_ctx + * + * Return: true - enabled false - disabled + */ +bool wlan_cfg_is_rx_hash_enabled(struct wlan_cfg_dp_soc_ctxt *cfg); + +/* + * wlan_cfg_set_rx_hash - set rx hash enabled/disabled + * @wlan_cfg_soc_ctx + * @rx_hash + */ +void wlan_cfg_set_rx_hash(struct wlan_cfg_dp_soc_ctxt *cfg, bool rx_hash); + +/* + * wlan_cfg_get_dp_pdev_nss_enabled - Return pdev nss enabled/disabled + * @wlan_cfg_pdev_ctx + * + * Return: 1 - enabled 0 - disabled + */ +int wlan_cfg_get_dp_pdev_nss_enabled(struct wlan_cfg_dp_pdev_ctxt *cfg); + +/* + * wlan_cfg_set_dp_pdev_nss_enabled - set pdev nss enabled/disabled + * @wlan_cfg_pdev_ctx + */ +void wlan_cfg_set_dp_pdev_nss_enabled(struct wlan_cfg_dp_pdev_ctxt *cfg, int nss_enabled); + +/* + * wlan_cfg_get_dp_soc_nss_cfg - Return soc nss config + * @wlan_cfg_pdev_ctx + * + * Return: nss_cfg + */ +int wlan_cfg_get_dp_soc_nss_cfg(struct wlan_cfg_dp_soc_ctxt *cfg); + +/* + * wlan_cfg_set_dp_soc_nss_cfg - set soc nss config + * @wlan_cfg_pdev_ctx + * + */ +void wlan_cfg_set_dp_soc_nss_cfg(struct wlan_cfg_dp_soc_ctxt *cfg, int nss_cfg); + +/* + * wlan_cfg_get_int_batch_threshold_tx - Get interrupt mitigation cfg for Tx + * @wlan_cfg_soc_ctx + * + * Return: Batch threshold + */ +int wlan_cfg_get_int_batch_threshold_tx(struct wlan_cfg_dp_soc_ctxt *cfg); + +/* + * wlan_cfg_get_int_timer_threshold_tx - Get interrupt mitigation cfg for Tx + * @wlan_cfg_soc_ctx + * + * Return: Timer threshold + */ +int wlan_cfg_get_int_timer_threshold_tx(struct wlan_cfg_dp_soc_ctxt *cfg); + +/* + * wlan_cfg_get_int_batch_threshold_rx - Get interrupt mitigation cfg for Rx + * @wlan_cfg_soc_ctx + * + * Return: Batch threshold + */ +int wlan_cfg_get_int_batch_threshold_rx(struct wlan_cfg_dp_soc_ctxt *cfg); + +/* + * wlan_cfg_get_int_batch_threshold_rx - Get interrupt mitigation cfg for Rx + * @wlan_cfg_soc_ctx + * + * Return: Timer threshold + */ +int wlan_cfg_get_int_timer_threshold_rx(struct wlan_cfg_dp_soc_ctxt *cfg); + +/* + * wlan_cfg_get_int_batch_threshold_tx - Get interrupt mitigation cfg for other srngs + * @wlan_cfg_soc_ctx + * + * Return: Batch threshold + */ +int wlan_cfg_get_int_batch_threshold_other(struct wlan_cfg_dp_soc_ctxt *cfg); + +/* + * wlan_cfg_get_int_batch_threshold_tx - Get interrupt mitigation cfg for other srngs + * @wlan_cfg_soc_ctx + * + * Return: Timer threshold + */ +int wlan_cfg_get_int_timer_threshold_other(struct wlan_cfg_dp_soc_ctxt *cfg); + +/* + * wlan_cfg_get_checksum_offload - Get checksum offload enable or disable status + * @wlan_cfg_soc_ctx + * + * Return: Checksum offload enable or disable + */ +int wlan_cfg_get_checksum_offload(struct wlan_cfg_dp_soc_ctxt *cfg); + +/* + * wlan_cfg_tx_ring_size - Get Tx DMA ring size (TCL Data Ring) + * @wlan_cfg_soc_ctx + * + * Return: Tx Ring Size + */ +int wlan_cfg_tx_ring_size(struct wlan_cfg_dp_soc_ctxt *cfg); + +/* + * wlan_cfg_tx_comp_ring_size - Get Tx completion ring size (WBM Ring) + * @wlan_cfg_soc_ctx + * + * Return: Tx Completion ring size + */ +int wlan_cfg_tx_comp_ring_size(struct wlan_cfg_dp_soc_ctxt *cfg); +#ifdef QCA_LL_TX_FLOW_CONTROL_V2 +int wlan_cfg_get_tx_flow_stop_queue_th(struct wlan_cfg_dp_soc_ctxt *cfg); + +int wlan_cfg_get_tx_flow_start_queue_offset(struct wlan_cfg_dp_soc_ctxt *cfg); +#endif /* QCA_LL_TX_FLOW_CONTROL_V2 */ +int wlan_cfg_get_rx_defrag_min_timeout(struct wlan_cfg_dp_soc_ctxt *cfg); + +int wlan_cfg_get_defrag_timeout_check(struct wlan_cfg_dp_soc_ctxt *cfg); +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_action_oui_tlv.h b/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_action_oui_tlv.h new file mode 100644 index 0000000000000000000000000000000000000000..efc7fa6e1e5f067dfa6b562bf16a06c60de7a028 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_action_oui_tlv.h @@ -0,0 +1,95 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _WMI_UNIFIED_ACTION_OUI_TLV_H_ +#define _WMI_UNIFIED_ACTION_OUI_TLV_H_ + +#ifdef WLAN_FEATURE_ACTION_OUI + +#include "wmi.h" +#include "wmi_unified.h" +#include "wmi_unified_api.h" +#include "wmi_unified_param.h" + +/** + * wmi_get_action_oui_info_mask() - convert info mask to firmware specific + * @info_mask: host specific info mask + * + * Return: firmware specific information mask + */ +uint32_t wmi_get_action_oui_info_mask(uint32_t info_mask); + +/** + * wmi_get_action_oui_id() - convert action id to firmware specific + * @action_id: host specific action id + * @id: output pointer to hold converted fw specific action id + * + * Return: true on conversion else failure + */ +bool wmi_get_action_oui_id(enum action_oui_id action_id, + wmi_vendor_oui_action_id *id); + +/** + * wmi_fill_oui_extensions() - populates wmi_vendor_oui_ext array + * @extension: pointer to user supplied action oui extensions + * @no_oui_extns: number of action oui extensions + * @cmd_ext: output pointer to TLV + * + * This function parses the user supplied input data and populates the + * array of variable structures TLV in WMI_PDEV_CONFIG_VENDOR_OUI_ACTION_CMDID + * + * Return: None + */ +void wmi_fill_oui_extensions(struct action_oui_extension *extension, + uint32_t no_oui_extns, + wmi_vendor_oui_ext *cmd_ext); + +/** + * wmi_fill_oui_extensions_buffer() - populates data buffer in action oui cmd + * @extension: pointer to user supplied action oui extensions + * @cmd_ext: pointer to vendor_oui_ext TLV in action oui cmd + * @no_oui_extns: number of action oui extensions + * @rem_var_buf_len: remaining length of buffer to be populated + * @var_buf: output pointer to hold variable length data + * + * This function parses the user supplied input data and populates the variable + * buffer of type array byte TLV in WMI_PDEV_CONFIG_VENDOR_OUI_ACTION_CMDID + * + * Return: QDF_STATUS_SUCCESS for successful fill else QDF_STATUS_E_INVAL + */ +QDF_STATUS +wmi_fill_oui_extensions_buffer(struct action_oui_extension *extension, + wmi_vendor_oui_ext *cmd_ext, + uint32_t no_oui_extns, uint32_t rem_var_buf_len, + uint8_t *var_buf); + +/** + * send_action_oui_cmd_tlv() - send action oui cmd to firmware + * @wmi_handle: wmi handler + * @req: pointer to action oui info + * + * Return: QDF_STATUS_SUCCESS on successful transmission else + * QDF_STATUS_E_INVAL or QDF_STATUS_E_NOMEM + */ +QDF_STATUS +send_action_oui_cmd_tlv(wmi_unified_t wmi_handle, + struct action_oui_request *req); + +#endif /* WLAN_FEATURE_ACTION_OUI */ + +#endif /* _WMI_UNIFIED_ACTION_OUI_TLV_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_apf_tlv.h b/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_apf_tlv.h new file mode 100644 index 0000000000000000000000000000000000000000..cbeba3e2d1d3344d50ffcab8730154911ba47281 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_apf_tlv.h @@ -0,0 +1,100 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _WMI_UNIFIED_APF_TLV_H_ +#define _WMI_UNIFIED_APF_TLV_H_ + +#ifdef FEATURE_WLAN_APF + +#include "wmi_unified.h" +#include "wmi_unified_api.h" +#include "wmi_unified_param.h" + +/** + * wmi_send_set_active_apf_mode_cmd_tlv() - configure active APF mode in FW + * @wmi_handle: the WMI handle + * @vdev_id: the Id of the vdev to apply the configuration to + * @ucast_mode: the active APF mode to configure for unicast packets + * @mcast_bcast_mode: the active APF mode to configure for multicast/broadcast + * packets + * + * Return: QDF status + */ +QDF_STATUS +wmi_send_set_active_apf_mode_cmd_tlv(wmi_unified_t wmi_handle, + uint8_t vdev_id, + enum wmi_host_active_apf_mode ucast_mode, + enum wmi_host_active_apf_mode + mcast_bcast_mode); + +/** + * wmi_send_apf_enable_cmd_tlv() - send cmd to enable/disable APF interpreter + * @wmi_handle: the WMI handle + * @vdev_id: VDEV on which APF interpreter is to be enabled/disabled + * @enable: true: enable, false: disable + * + * Return: QDF status + */ +QDF_STATUS +wmi_send_apf_enable_cmd_tlv(wmi_unified_t wmi_handle, uint32_t vdev_id, + bool enable); + +/** + * wmi_send_apf_write_work_memory_cmd_tlv() - send cmd to write into the APF + * work + * memory + * @wmi_handle: the WMI handle + * @apf_write_params: parameters and buffer pointer for the write + * + * Return: QDF status + */ +QDF_STATUS +wmi_send_apf_write_work_memory_cmd_tlv(wmi_unified_t wmi_handle, + struct wmi_apf_write_memory_params + *apf_write_params); + +/** + * wmi_send_apf_read_work_memory_cmd_tlv() - send cmd to read part of APF + * work memory + * @wmi_handle: the WMI handle + * @apf_read_params: contains relative address and length to read from + * + * Return: QDF status + */ +QDF_STATUS +wmi_send_apf_read_work_memory_cmd_tlv(wmi_unified_t wmi_handle, + struct wmi_apf_read_memory_params + *apf_read_params); + +/** + * wmi_extract_apf_read_memory_resp_event_tlv() - extract read memory response + * event into the given structure pointer + * @wmi_handle: the WMI handle + * @evt_buf: Pointer to the event buffer + * @resp: pointer to memory to extract event parameters into + * + * Return: QDF status + */ +QDF_STATUS +wmi_extract_apf_read_memory_resp_event_tlv(wmi_unified_t wmi_handle, + void *evt_buf, + struct wmi_apf_read_memory_resp_event_params + *resp); +#endif /* FEATURE_WLAN_APF */ + +#endif /* _WMI_UNIFIED_APF_TLV_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_api.h b/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_api.h new file mode 100644 index 0000000000000000000000000000000000000000..99268168658f3883e27b4812a00111e27abcc073 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_api.h @@ -0,0 +1,2465 @@ +/* + * Copyright (c) 2013-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/* + * This file contains the API definitions for the Unified Wireless Module + * Interface (WMI). + */ + +#ifndef _WMI_UNIFIED_API_H_ +#define _WMI_UNIFIED_API_H_ + +#include +#ifdef CONFIG_MCL +#include "wmi.h" +#endif +#include "htc_api.h" +#include "wmi_unified_param.h" +#include "service_ready_param.h" +#include "wlan_objmgr_psoc_obj.h" +#include "wlan_mgmt_txrx_utils_api.h" +#ifdef WLAN_POWER_MANAGEMENT_OFFLOAD +#include "wmi_unified_pmo_api.h" +#endif +#ifdef CONVERGED_P2P_ENABLE +#include "wlan_p2p_public_struct.h" +#endif +#include "wlan_scan_public_structs.h" +#ifdef WLAN_FEATURE_DISA +#include "wlan_disa_public_struct.h" +#endif +#ifdef WLAN_FEATURE_ACTION_OUI +#include "wlan_action_oui_public_struct.h" +#endif +#ifdef WLAN_FEATURE_NAN_CONVERGENCE +#include "nan_public_structs.h" +#endif +#ifdef WLAN_SUPPORT_GREEN_AP +#include "wlan_green_ap_api.h" +#endif +#ifdef WLAN_FEATURE_DSRC +#include "wlan_ocb_public_structs.h" +#endif +#ifdef WLAN_SUPPORT_TWT +#include "wmi_unified_twt_param.h" +#include "wmi_unified_twt_api.h" +#endif + +#ifdef FEATURE_WLAN_EXTSCAN +#include "wmi_unified_extscan_api.h" +#endif + +#ifdef IPA_OFFLOAD +#include "wlan_ipa_public_struct.h" +#endif + +typedef qdf_nbuf_t wmi_buf_t; +#define wmi_buf_data(_buf) qdf_nbuf_data(_buf) + +#define WMI_LOGD(args ...) \ + QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_DEBUG, ## args) +#define WMI_LOGI(args ...) \ + QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_INFO, ## args) +#define WMI_LOGW(args ...) \ + QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_WARN, ## args) +#define WMI_LOGE(args ...) \ + QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_ERROR, ## args) +#define WMI_LOGP(args ...) \ + QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_FATAL, ## args) + +/* Number of bits to shift to combine 32 bit integer to 64 bit */ +#define WMI_LOWER_BITS_SHIFT_32 0x20 + +#define PHYERROR_MAX_BUFFER_LENGTH 0x7F000000 + +struct wmi_soc; +struct policy_mgr_dual_mac_config; +/** + * struct wmi_ops - service callbacks to upper layer + * @service_ready_cbk: service ready callback + * @service_ready_ext_cbk: service ready ext callback + * @ready_cbk: ready calback + * @wma_process_fw_event_handler_cbk: generic event handler callback + */ +struct wmi_rx_ops { + + int (*wma_process_fw_event_handler_cbk)(void *ctx, + void *ev, uint8_t rx_ctx); +}; + +/** + * enum wmi_target_type - type of supported wmi command + * @WMI_TLV_TARGET: tlv based target + * @WMI_NON_TLV_TARGET: non-tlv based target + * + */ +enum wmi_target_type { + WMI_TLV_TARGET, + WMI_NON_TLV_TARGET, + WMI_MAX_TARGET_TYPE +}; + +/** + * enum wmi_rx_exec_ctx - wmi rx execution context + * @WMI_RX_WORK_CTX: work queue context execution provided by WMI layer + * @WMI_RX_UMAC_CTX: execution context provided by umac layer + * + */ +enum wmi_rx_exec_ctx { + WMI_RX_WORK_CTX, + WMI_RX_UMAC_CTX +}; + +/** + * struct wmi_unified_attach_params - wmi init parameters + * @param osdev : NIC device + * @param target_type : type of supported wmi command + * @param use_cookie : flag to indicate cookie based allocation + * @param ops : handle to wmi ops + * @psoc : objmgr psoc + * @max_commands : max commands + */ +struct wmi_unified_attach_params { + osdev_t osdev; + enum wmi_target_type target_type; + bool use_cookie; + struct wmi_rx_ops *rx_ops; + struct wlan_objmgr_psoc *psoc; + uint16_t max_commands; +}; + +/** + * attach for unified WMI + * + * @param scn_handle : handle to SCN. + * @param params : attach params for WMI + * + */ +void *wmi_unified_attach(void *scn_handle, + struct wmi_unified_attach_params *params); + + + +/** + * wmi_mgmt_cmd_record() - Wrapper function for mgmt command logging macro + * + * @wmi_handle: wmi handle + * @cmd: mgmt command + * @header: pointer to 802.11 header + * @vdev_id: vdev id + * @chanfreq: channel frequency + * + * Return: none + */ +void wmi_mgmt_cmd_record(wmi_unified_t wmi_handle, uint32_t cmd, + void *header, uint32_t vdev_id, uint32_t chanfreq); + +/** + * detach for unified WMI + * + * @param wmi_handle : handle to WMI. + * @return void. + */ +void wmi_unified_detach(struct wmi_unified *wmi_handle); + +/** + * API to sync time between host and firmware + * + * @param wmi_handle : handle to WMI. + * @return void. + */ +void wmi_send_time_stamp_sync_cmd_tlv(void *wmi_hdl); + +void +wmi_unified_remove_work(struct wmi_unified *wmi_handle); + +/** + * generic function to allocate WMI buffer + * + * @param wmi_handle : handle to WMI. + * @param len : length of the buffer + * @return wmi_buf_t. + */ +#ifdef NBUF_MEMORY_DEBUG +#define wmi_buf_alloc(h, l) wmi_buf_alloc_debug(h, l, __FILE__, __LINE__) +wmi_buf_t +wmi_buf_alloc_debug(wmi_unified_t wmi_handle, uint16_t len, + uint8_t *file_name, uint32_t line_num); +#else +/** + * wmi_buf_alloc() - generic function to allocate WMI buffer + * @wmi_handle: handle to WMI. + * @len: length of the buffer + * + * Return: return wmi_buf_t or null if memory alloc fails + */ +#define wmi_buf_alloc(wmi_handle, len) \ + wmi_buf_alloc_fl(wmi_handle, len, __func__, __LINE__) + +wmi_buf_t wmi_buf_alloc_fl(wmi_unified_t wmi_handle, uint32_t len, + const char *func, uint32_t line); +#endif + +/** + * generic function frees WMI net buffer + * + * @param net_buf : Pointer ot net_buf to be freed + */ +void wmi_buf_free(wmi_buf_t net_buf); + +/** + * generic function to send unified WMI command + * + * @param wmi_handle : handle to WMI. + * @param buf : wmi command buffer + * @param buflen : wmi command buffer length + * @param cmd_id : WMI cmd id + * @return 0 on success and -ve on failure. + */ +QDF_STATUS +wmi_unified_cmd_send(wmi_unified_t wmi_handle, wmi_buf_t buf, uint32_t buflen, + uint32_t cmd_id); + +/** + * wmi_unified_register_event() - WMI event handler + * registration function for converged components + * + * @wmi_handle: handle to WMI. + * @event_id: WMI event ID + * @handler_func: Event handler call back function + * + * @return 0 on success and -ve on failure. + */ +int +wmi_unified_register_event(wmi_unified_t wmi_handle, + uint32_t event_id, + wmi_unified_event_handler handler_func); + +/** + * wmi_unified_register_event_handler() - WMI event handler + * registration function + * + * @wmi_handle: handle to WMI. + * @event_id: WMI event ID + * @handler_func: Event handler call back function + * @rx_ctx: rx event processing context + * + * @return 0 on success and -ve on failure. + */ +int +wmi_unified_register_event_handler(wmi_unified_t wmi_handle, + wmi_conv_event_id event_id, + wmi_unified_event_handler handler_func, + uint8_t rx_ctx); + +/** + * WMI event handler unregister function for converged componets + * + * @param wmi_handle : handle to WMI. + * @param event_id : WMI event ID + * @return 0 on success and -ve on failure. + */ +int +wmi_unified_unregister_event(wmi_unified_t wmi_handle, + uint32_t event_id); + +/** + * WMI event handler unregister function + * + * @param wmi_handle : handle to WMI. + * @param event_id : WMI event ID + * @return 0 on success and -ve on failure. + */ +int +wmi_unified_unregister_event_handler(wmi_unified_t wmi_handle, + wmi_conv_event_id event_id); + +/** + * request wmi to connet its htc service. + * @param wmi_handle : handle to WMI. + * @param htc_handle : handle to HTC. + * @return void + */ +QDF_STATUS +wmi_unified_connect_htc_service(struct wmi_unified *wmi_handle, + void *htc_handle); + +/* + * WMI API to verify the host has enough credits to suspend + * @param wmi_handle : handle to WMI. + */ + +int wmi_is_suspend_ready(wmi_unified_t wmi_handle); + +/** + * WMI API to get updated host_credits + * @param wmi_handle : handle to WMI. + */ + +int wmi_get_host_credits(wmi_unified_t wmi_handle); + +/** + * WMI API to get WMI Pending Commands in the HTC queue + * @param wmi_handle : handle to WMI. + */ + +int wmi_get_pending_cmds(wmi_unified_t wmi_handle); + +/** + * WMI API to set target suspend state + * @param wmi_handle : handle to WMI. + * @param val : suspend state boolean + */ +void wmi_set_target_suspend(wmi_unified_t wmi_handle, bool val); + +/** + * WMI API to set bus suspend state + * @param wmi_handle: handle to WMI. + * @param val: suspend state boolean + */ +void wmi_set_is_wow_bus_suspended(wmi_unified_t wmi_handle, A_BOOL val); + +/** + * WMI API to set crash injection state + * @param wmi_handle: handle to WMI. + * @param val: crash injection state boolean + */ +void wmi_tag_crash_inject(wmi_unified_t wmi_handle, A_BOOL flag); + +/** + * WMI API to set target assert + * @param wmi_handle: handle to WMI. + * @param val: target assert config value. + * + * Return: none. + */ +void wmi_set_tgt_assert(wmi_unified_t wmi_handle, bool val); + +/** + * generic function to block unified WMI command + * @param wmi_handle : handle to WMI. + * @return 0 on success and -ve on failure. + */ +int +wmi_stop(wmi_unified_t wmi_handle); + +/** + * API to flush all the previous packets associated with the wmi endpoint + * + * @param wmi_handle : handle to WMI. + */ +void +wmi_flush_endpoint(wmi_unified_t wmi_handle); + +/** + * wmi_pdev_id_conversion_enable() - API to enable pdev_id conversion in WMI + * By default pdev_id conversion is not done in WMI. + * This API can be used enable conversion in WMI. + * @param wmi_handle : handle to WMI + * Return none + */ +void wmi_pdev_id_conversion_enable(wmi_unified_t wmi_handle); + +/** + * API to handle wmi rx event after UMAC has taken care of execution + * context + * + * @param wmi_handle : handle to WMI. + * @param evt_buf : wmi event buffer + */ +void __wmi_control_rx(struct wmi_unified *wmi_handle, wmi_buf_t evt_buf); +#ifdef FEATURE_RUNTIME_PM +void +wmi_set_runtime_pm_inprogress(wmi_unified_t wmi_handle, bool val); +bool wmi_get_runtime_pm_inprogress(wmi_unified_t wmi_handle); +#else +static inline void +wmi_set_runtime_pm_inprogress(wmi_unified_t wmi_handle, bool val) +{ + return; +} +static inline bool wmi_get_runtime_pm_inprogress(wmi_unified_t wmi_handle) +{ + return false; +} +#endif + +void *wmi_unified_get_soc_handle(struct wmi_unified *wmi_handle); + +void *wmi_unified_get_pdev_handle(struct wmi_soc *soc, uint32_t pdev_idx); + +/** + * UMAC Callback to process fw event. + * @param wmi_handle : handle to WMI. + * @param evt_buf : wmi event buffer + */ +void wmi_process_fw_event(struct wmi_unified *wmi_handle, wmi_buf_t evt_buf); +uint16_t wmi_get_max_msg_len(wmi_unified_t wmi_handle); + + +QDF_STATUS wmi_unified_vdev_create_send(void *wmi_hdl, + uint8_t macaddr[IEEE80211_ADDR_LEN], + struct vdev_create_params *param); + +QDF_STATUS wmi_unified_vdev_delete_send(void *wmi_hdl, + uint8_t if_id); + +/** + * wmi_unified_vdev_nss_chain_params_send() - send VDEV nss chain params to fw + * @wmi_handle: wmi handle + * @vdev_id: vdev id + * @nss_chains_user_cfg: user configured params to send + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_vdev_nss_chain_params_send(void *wmi_hdl, + uint8_t vdev_id, + struct mlme_nss_chains *nss_chains_user_cfg); + +QDF_STATUS wmi_unified_vdev_stop_send(void *wmi_hdl, + uint8_t vdev_id); + +QDF_STATUS wmi_unified_vdev_up_send(void *wmi_hdl, + uint8_t bssid[IEEE80211_ADDR_LEN], + struct vdev_up_params *params); + +QDF_STATUS wmi_unified_vdev_down_send(void *wmi_hdl, + uint8_t vdev_id); + +QDF_STATUS wmi_unified_vdev_start_send(void *wmi_hdl, + struct vdev_start_params *req); +/** + * wmi_unified_vdev_set_nac_rssi_send() - send NAC_RSSI command to fw + * @param wmi_handle : handle to WMI + * @param req : pointer to hold nac rssi request data + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_vdev_set_nac_rssi_send(void *wmi_hdl, + struct vdev_scan_nac_rssi_params *req); + +QDF_STATUS wmi_unified_hidden_ssid_vdev_restart_send(void *wmi_hdl, + struct hidden_ssid_vdev_restart_params *restart_params); + +QDF_STATUS wmi_unified_vdev_set_param_send(void *wmi_hdl, + struct vdev_set_params *param); + +QDF_STATUS wmi_unified_peer_delete_send(void *wmi_hdl, + uint8_t + peer_addr[IEEE80211_ADDR_LEN], + uint8_t vdev_id); + +QDF_STATUS wmi_unified_peer_unmap_conf_send(void *wmi_hdl, + uint8_t vdev_id, + uint32_t peer_id_cnt, + uint16_t *peer_id_list); + +QDF_STATUS wmi_unified_peer_flush_tids_send(void *wmi_hdl, + uint8_t peer_addr[IEEE80211_ADDR_LEN], + struct peer_flush_params *param); + +QDF_STATUS wmi_set_peer_param_send(void *wmi_hdl, + uint8_t peer_addr[IEEE80211_ADDR_LEN], + struct peer_set_params *param); + +QDF_STATUS wmi_unified_peer_create_send(void *wmi_hdl, + struct peer_create_params *param); + +QDF_STATUS wmi_unified_stats_request_send(void *wmi_hdl, + uint8_t macaddr[IEEE80211_ADDR_LEN], + struct stats_request_params *param); + +QDF_STATUS wmi_unified_green_ap_ps_send(void *wmi_hdl, + uint32_t value, uint8_t pdev_id); + +#ifdef FEATURE_WLAN_D0WOW +QDF_STATUS wmi_unified_d0wow_enable_send(void *wmi_hdl, + uint8_t mac_id); +QDF_STATUS wmi_unified_d0wow_disable_send(void *wmi_hdl, + uint8_t mac_id); +#endif + +QDF_STATUS wmi_unified_wow_enable_send(void *wmi_hdl, + struct wow_cmd_params *param, + uint8_t mac_id); + +QDF_STATUS wmi_unified_wow_wakeup_send(void *wmi_hdl); + +QDF_STATUS wmi_unified_wow_add_wakeup_event_send(void *wmi_hdl, + struct wow_add_wakeup_params *param); + +QDF_STATUS wmi_unified_wow_add_wakeup_pattern_send(void *wmi_hdl, + struct wow_add_wakeup_pattern_params *param); + +QDF_STATUS wmi_unified_wow_remove_wakeup_pattern_send(void *wmi_hdl, + struct wow_remove_wakeup_pattern_params *param); + +#ifndef CONFIG_MCL +QDF_STATUS wmi_unified_packet_log_enable_send(void *wmi_hdl, + WMI_HOST_PKTLOG_EVENT PKTLOG_EVENT, uint8_t mac_id); +#else +QDF_STATUS wmi_unified_packet_log_enable_send(void *wmi_hdl, + uint8_t macaddr[IEEE80211_ADDR_LEN], + struct packet_enable_params *param); +#endif + +QDF_STATUS wmi_unified_packet_log_disable_send(void *wmi_hdl, uint8_t mac_id); + +QDF_STATUS wmi_unified_suspend_send(void *wmi_hdl, + struct suspend_params *param, + uint8_t mac_id); + +QDF_STATUS wmi_unified_resume_send(void *wmi_hdl, + uint8_t mac_id); + +QDF_STATUS +wmi_unified_pdev_param_send(void *wmi_hdl, + struct pdev_params *param, + uint8_t mac_id); + +QDF_STATUS wmi_unified_beacon_tmpl_send_cmd(void *wmi_hdl, + struct beacon_tmpl_params *param); + + +QDF_STATUS wmi_unified_beacon_send_cmd(void *wmi_hdl, + struct beacon_params *param); + +QDF_STATUS wmi_unified_peer_assoc_send(void *wmi_hdl, + struct peer_assoc_params *param); + +QDF_STATUS wmi_unified_sta_ps_cmd_send(void *wmi_hdl, + struct sta_ps_params *param); + +QDF_STATUS wmi_unified_ap_ps_cmd_send(void *wmi_hdl, + uint8_t macaddr[IEEE80211_ADDR_LEN], + struct ap_ps_params *param); + +QDF_STATUS wmi_unified_scan_start_cmd_send(void *wmi_hdl, + struct scan_req_params *param); + +QDF_STATUS wmi_unified_scan_stop_cmd_send(void *wmi_hdl, + struct scan_cancel_param *param); + +QDF_STATUS wmi_unified_scan_chan_list_cmd_send(void *wmi_hdl, + struct scan_chan_list_params *param); + + +QDF_STATUS wmi_crash_inject(void *wmi_hdl, + struct crash_inject *param); + +QDF_STATUS wmi_unified_pdev_utf_cmd_send(void *wmi_hdl, + struct pdev_utf_params *param, + uint8_t mac_id); + +#ifdef FEATURE_FW_LOG_PARSING +QDF_STATUS wmi_unified_dbglog_cmd_send(void *wmi_hdl, + struct dbglog_params *param); +#else +static inline QDF_STATUS +wmi_unified_dbglog_cmd_send(void *wmi_hdl, + struct dbglog_params *param) +{ + return QDF_STATUS_SUCCESS; +} +#endif + +QDF_STATUS wmi_mgmt_unified_cmd_send(void *wmi_hdl, + struct wmi_mgmt_params *param); + +QDF_STATUS wmi_offchan_data_tx_cmd_send(void *wmi_hdl, + struct wmi_offchan_data_tx_params *param); + +QDF_STATUS wmi_unified_modem_power_state(void *wmi_hdl, + uint32_t param_value); + +QDF_STATUS wmi_unified_set_sta_ps_mode(void *wmi_hdl, + uint32_t vdev_id, uint8_t val); +QDF_STATUS +wmi_unified_set_sta_uapsd_auto_trig_cmd(void *wmi_hdl, + struct sta_uapsd_trig_params *param); + +QDF_STATUS wmi_unified_get_temperature(void *wmi_hdl); + +QDF_STATUS wmi_unified_set_p2pgo_oppps_req(void *wmi_hdl, + struct p2p_ps_params *oppps); + +QDF_STATUS wmi_unified_set_p2pgo_noa_req_cmd(void *wmi_hdl, + struct p2p_ps_params *noa); + +#ifdef CONVERGED_P2P_ENABLE +QDF_STATUS wmi_unified_p2p_lo_start_cmd(void *wmi_hdl, + struct p2p_lo_start *param); + +QDF_STATUS wmi_unified_p2p_lo_stop_cmd(void *wmi_hdl, uint8_t vdev_id); +#endif + +QDF_STATUS wmi_unified_set_smps_params(void *wmi_hdl, uint8_t vdev_id, + int value); + +QDF_STATUS wmi_unified_set_mimops(void *wmi_hdl, uint8_t vdev_id, int value); + +#ifdef WLAN_FEATURE_DSRC +/** + * wmi_unified_ocb_start_timing_advert() - start sending the timing + * advertisement frames on a channel + * @wmi_handle: pointer to the wmi handle + * @timing_advert: pointer to the timing advertisement struct + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_ocb_start_timing_advert(struct wmi_unified *wmi_handle, + struct ocb_timing_advert_param *timing_advert); + +/** + * wmi_unified_ocb_stop_timing_advert() - stop sending the timing + * advertisement frames on a channel + * @wmi_handle: pointer to the wmi handle + * @timing_advert: pointer to the timing advertisement struct + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_ocb_stop_timing_advert(struct wmi_unified *wmi_handle, + struct ocb_timing_advert_param *timing_advert); + +/** + * wmi_unified_ocb_set_config() - send the OCB config to the FW + * @wmi_handle: pointer to the wmi handle + * @config: the OCB configuration + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failures + */ +QDF_STATUS wmi_unified_ocb_set_config(struct wmi_unified *wmi_handle, + struct ocb_config *config); + +/** + * wmi_unified_ocb_get_tsf_timer() - get ocb tsf timer val + * @wmi_handle: pointer to the wmi handle + * @req: request for tsf timer + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_ocb_get_tsf_timer(struct wmi_unified *wmi_handle, + struct ocb_get_tsf_timer_param *req); + +/** + * wmi_unified_ocb_set_utc_time_cmd() - get ocb tsf timer val + * @wmi_handle: pointer to the wmi handle + * @vdev_id: vdev id + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_ocb_set_utc_time_cmd(struct wmi_unified *wmi_handle, + struct ocb_utc_param *utc); + +/** + * wmi_unified_dcc_get_stats_cmd() - get the DCC channel stats + * @wmi_handle: pointer to the wmi handle + * @get_stats_param: pointer to the dcc stats + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_dcc_get_stats_cmd(struct wmi_unified *wmi_handle, + struct ocb_dcc_get_stats_param *get_stats_param); + +/** + * wmi_unified_dcc_clear_stats() - command to clear the DCC stats + * @wmi_handle: pointer to the wmi handle + * @clear_stats_param: parameters to the command + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_dcc_clear_stats(struct wmi_unified *wmi_handle, + struct ocb_dcc_clear_stats_param *clear_stats_param); + +/** + * wmi_unified_dcc_update_ndl() - command to update the NDL data + * @wmi_handle: pointer to the wmi handle + * @update_ndl_param: pointer to the request parameters + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failures + */ +QDF_STATUS wmi_unified_dcc_update_ndl(struct wmi_unified *wmi_handle, + struct ocb_dcc_update_ndl_param *update_ndl_param); + +/** + * wmi_extract_ocb_set_channel_config_resp() - extract status from wmi event + * @wmi_handle: wmi handle + * @evt_buf: pointer to event buffer + * @status: status buffer + * + * Return: QDF_STATUS_SUCCESS on success + */ +QDF_STATUS +wmi_extract_ocb_set_channel_config_resp(struct wmi_unified *wmi_handle, + void *evt_buf, + uint32_t *status); + +/** + * wmi_extract_ocb_tsf_timer() - extract tsf timer from wmi event + * @wmi_handle: wmi handle + * @evt_buf: pointer to event buffer + * @resp: tsf timer + * + * Return: QDF_STATUS_SUCCESS on success + */ +QDF_STATUS wmi_extract_ocb_tsf_timer(struct wmi_unified *wmi_handle, + void *evt_buf, + struct ocb_get_tsf_timer_response *resp); + +/** + * wmi_extract_dcc_update_ndl_resp() - extract NDL update from wmi event + * @wmi_handle: wmi handle + * @evt_buf: pointer to event buffer + * @resp: ndl update status + * + * Return: QDF_STATUS_SUCCESS on success + */ +QDF_STATUS wmi_extract_dcc_update_ndl_resp(struct wmi_unified *wmi_handle, + void *evt_buf, struct ocb_dcc_update_ndl_response *resp); + +/** + * wmi_extract_dcc_stats() - extract DCC stats from wmi event + * @wmi_handle: wmi handle + * @evt_buf: pointer to event buffer + * @resp: DCC stats + * + * Since length of the response is variable, response buffer will be allocated. + * The caller must free the response buffer. + * + * Return: QDF_STATUS_SUCCESS on success + */ +QDF_STATUS wmi_extract_dcc_stats(struct wmi_unified *wmi_handle, + void *evt_buf, + struct ocb_dcc_get_stats_response **response); +#endif + +QDF_STATUS wmi_unified_lro_config_cmd(void *wmi_hdl, + struct wmi_lro_config_cmd_t *wmi_lro_cmd); + +QDF_STATUS wmi_unified_set_thermal_mgmt_cmd(void *wmi_hdl, + struct thermal_cmd_params *thermal_info); + +QDF_STATUS wmi_unified_peer_rate_report_cmd(void *wmi_hdl, + struct wmi_peer_rate_report_params *rate_report_params); + +QDF_STATUS wmi_unified_set_mcc_channel_time_quota_cmd + (void *wmi_hdl, + uint32_t adapter_1_chan_freq, + uint32_t adapter_1_quota, uint32_t adapter_2_chan_freq); + +QDF_STATUS wmi_unified_set_mcc_channel_time_latency_cmd + (void *wmi_hdl, + uint32_t mcc_channel_freq, uint32_t mcc_channel_time_latency); + +QDF_STATUS wmi_unified_set_enable_disable_mcc_adaptive_scheduler_cmd( + void *wmi_hdl, uint32_t mcc_adaptive_scheduler, + uint32_t pdev_id); + +#ifdef CONFIG_MCL +QDF_STATUS wmi_unified_bcn_buf_ll_cmd(void *wmi_hdl, + wmi_bcn_send_from_host_cmd_fixed_param *param); +#endif + +QDF_STATUS wmi_unified_set_sta_sa_query_param_cmd(void *wmi_hdl, + uint8_t vdev_id, uint32_t max_retries, + uint32_t retry_interval); + + +QDF_STATUS wmi_unified_set_sta_keep_alive_cmd(void *wmi_hdl, + struct sta_params *params); + +QDF_STATUS wmi_unified_vdev_set_gtx_cfg_cmd(void *wmi_hdl, uint32_t if_id, + struct wmi_gtx_config *gtx_info); + +QDF_STATUS wmi_unified_process_update_edca_param(void *wmi_hdl, + uint8_t vdev_id, bool mu_edca_param, + struct wmi_host_wme_vparams wmm_vparams[WMI_MAX_NUM_AC]); + +QDF_STATUS wmi_unified_probe_rsp_tmpl_send_cmd(void *wmi_hdl, + uint8_t vdev_id, + struct wmi_probe_resp_params *probe_rsp_info); + +QDF_STATUS wmi_unified_setup_install_key_cmd(void *wmi_hdl, + struct set_key_params *key_params); + +#ifdef WLAN_FEATURE_DISA +/** + * wmi_unified_encrypt_decrypt_send_cmd() - send encryptdecrypt cmd to fw + * @wmi_hdl: wmi handle + * @params: encrypt/decrypt params + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_encrypt_decrypt_send_cmd(void *wmi_hdl, + struct disa_encrypt_decrypt_req_params *params); + +/** + * wmi_extract_encrypt_decrypt_resp_params() - + * extract encrypt decrypt resp params from event buffer + * @wmi_handle: wmi handle + * @evt_buf: pointer to event buffer + * @resp: encrypt decrypt resp params + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +QDF_STATUS wmi_extract_encrypt_decrypt_resp_params(void *wmi_hdl, + uint8_t *evt_buf, + struct disa_encrypt_decrypt_resp_params *resp); +#endif + +QDF_STATUS wmi_unified_p2p_go_set_beacon_ie_cmd(void *wmi_hdl, + uint32_t vdev_id, uint8_t *p2p_ie); + + +QDF_STATUS wmi_unified_set_gateway_params_cmd(void *wmi_hdl, + struct gateway_update_req_param *req); + +QDF_STATUS wmi_unified_set_rssi_monitoring_cmd(void *wmi_hdl, + struct rssi_monitor_param *req); + +QDF_STATUS wmi_unified_scan_probe_setoui_cmd(void *wmi_hdl, + struct scan_mac_oui *psetoui); + +#ifdef CONFIG_MCL +QDF_STATUS wmi_unified_roam_scan_offload_mode_cmd(void *wmi_hdl, + wmi_start_scan_cmd_fixed_param *scan_cmd_fp, + struct roam_offload_scan_params *roam_req); +#endif + +/** + * wmi_unified_roam_mawc_params_cmd() - configure roaming MAWC parameters + * @wmi_hdl: wmi handle + * @params: Parameters to be configured + * + * Pass the MAWC(Motion Aided wireless connectivity) related roaming + * parameters from the host to the target + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_roam_mawc_params_cmd(void *wmi_hdl, + struct wmi_mawc_roam_params *params); + +QDF_STATUS wmi_unified_roam_scan_offload_rssi_thresh_cmd(void *wmi_hdl, + struct roam_offload_scan_rssi_params *roam_req); + +QDF_STATUS wmi_unified_roam_scan_filter_cmd(void *wmi_hdl, + struct roam_scan_filter_params *roam_req); + +#ifdef IPA_OFFLOAD +QDF_STATUS wmi_unified_ipa_offload_control_cmd(void *wmi_hdl, + struct ipa_uc_offload_control_params *ipa_offload); +#endif + +QDF_STATUS wmi_unified_plm_stop_cmd(void *wmi_hdl, + const struct plm_req_params *plm); + +QDF_STATUS wmi_unified_plm_start_cmd(void *wmi_hdl, + const struct plm_req_params *plm, + uint32_t *gchannel_list); + +QDF_STATUS wmi_unified_pno_stop_cmd(void *wmi_hdl, uint8_t vdev_id); + +#ifdef FEATURE_WLAN_SCAN_PNO +QDF_STATUS wmi_unified_pno_start_cmd(void *wmi_hdl, + struct pno_scan_req_params *pno); +#endif + +QDF_STATUS wmi_unified_nlo_mawc_cmd(void *wmi_hdl, + struct nlo_mawc_params *params); + +QDF_STATUS wmi_unified_set_ric_req_cmd(void *wmi_hdl, void *msg, + uint8_t is_add_ts); + +QDF_STATUS wmi_unified_process_ll_stats_clear_cmd + (void *wmi_hdl, const struct ll_stats_clear_params *clear_req, + uint8_t addr[IEEE80211_ADDR_LEN]); + +QDF_STATUS wmi_unified_process_ll_stats_set_cmd + (void *wmi_hdl, const struct ll_stats_set_params *set_req); + +QDF_STATUS wmi_unified_process_ll_stats_get_cmd + (void *wmi_hdl, const struct ll_stats_get_params *get_req, + uint8_t addr[IEEE80211_ADDR_LEN]); + +/** + * wmi_unified_congestion_request_cmd() - send request to fw to get CCA + * @wmi_hdl: wma handle + * @vdev_id: vdev id + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_congestion_request_cmd(void *wmi_hdl, + uint8_t vdev_id); + +QDF_STATUS wmi_unified_snr_request_cmd(void *wmi_hdl); + +QDF_STATUS wmi_unified_snr_cmd(void *wmi_hdl, uint8_t vdev_id); + +QDF_STATUS wmi_unified_link_status_req_cmd(void *wmi_hdl, + struct link_status_params *link_status); + +#ifdef CONFIG_MCL +QDF_STATUS wmi_unified_process_dhcp_ind(void *wmi_hdl, + wmi_peer_set_param_cmd_fixed_param *ta_dhcp_ind); + +QDF_STATUS wmi_unified_get_link_speed_cmd(void *wmi_hdl, + wmi_mac_addr peer_macaddr); +#endif + +#ifdef WLAN_SUPPORT_GREEN_AP +QDF_STATUS wmi_unified_egap_conf_params_cmd(void *wmi_hdl, + struct wlan_green_ap_egap_params *egap_params); +#endif + +QDF_STATUS wmi_unified_fw_profiling_data_cmd(void *wmi_hdl, + uint32_t cmd, uint32_t value1, uint32_t value2); + +QDF_STATUS wmi_unified_wow_timer_pattern_cmd(void *wmi_hdl, uint8_t vdev_id, + uint32_t cookie, uint32_t time); + +QDF_STATUS wmi_unified_nat_keepalive_en_cmd(void *wmi_hdl, uint8_t vdev_id); + +/** + * wmi_unified_set_latency_config_cmd() + * @wmi_handle: wmi handle + * @param: WLM parameters + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_wlm_latency_level_cmd(void *wmi_hdl, + struct wlm_latency_level_param *param); + +QDF_STATUS wmi_unified_csa_offload_enable(void *wmi_hdl, uint8_t vdev_id); + +#ifdef WLAN_FEATURE_CIF_CFR +/** + * wmi_unified_oem_dma_ring_cfg() - configure OEM DMA rings + * @wmi_handle: wmi handle + * @data_len: len of dma cfg req + * @data: dma cfg req + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_oem_dma_ring_cfg(void *wmi_hdl, + wmi_oem_dma_ring_cfg_req_fixed_param *cfg); +#endif + +/** + * wmi_unified_dbr_ring_cfg: Configure direct buffer rx rings + * @wmi_hdl: WMI handle + * @cfg: pointer to direct buffer rx config request + * + * Return: QDF status of operation + */ +QDF_STATUS wmi_unified_dbr_ring_cfg(void *wmi_hdl, + struct direct_buf_rx_cfg_req *cfg); + +QDF_STATUS wmi_unified_start_oem_data_cmd(void *wmi_hdl, + uint32_t data_len, + uint8_t *data); + +QDF_STATUS wmi_unified_dfs_phyerr_filter_offload_en_cmd(void *wmi_hdl, + bool dfs_phyerr_filter_offload); + +#ifdef CONFIG_MCL +QDF_STATUS wmi_unified_pktlog_wmi_send_cmd(void *wmi_hdl, + WMI_PKTLOG_EVENT pktlog_event, + uint32_t cmd_id, + uint8_t user_triggered); +#endif + +QDF_STATUS wmi_unified_wow_delete_pattern_cmd(void *wmi_hdl, uint8_t ptrn_id, + uint8_t vdev_id); + +QDF_STATUS wmi_unified_host_wakeup_ind_to_fw_cmd(void *wmi_hdl); +QDF_STATUS wmi_unified_del_ts_cmd(void *wmi_hdl, uint8_t vdev_id, + uint8_t ac); + +QDF_STATUS wmi_unified_aggr_qos_cmd(void *wmi_hdl, + struct aggr_add_ts_param *aggr_qos_rsp_msg); + +QDF_STATUS wmi_unified_add_ts_cmd(void *wmi_hdl, + struct add_ts_param *msg); + +QDF_STATUS wmi_unified_process_add_periodic_tx_ptrn_cmd(void *wmi_hdl, + struct periodic_tx_pattern * + pAddPeriodicTxPtrnParams, + uint8_t vdev_id); + +QDF_STATUS wmi_unified_process_del_periodic_tx_ptrn_cmd(void *wmi_hdl, + uint8_t vdev_id, + uint8_t pattern_id); + +QDF_STATUS wmi_unified_stats_ext_req_cmd(void *wmi_hdl, + struct stats_ext_params *preq); + +QDF_STATUS wmi_unified_enable_ext_wow_cmd(void *wmi_hdl, + struct ext_wow_params *params); + +QDF_STATUS wmi_unified_set_app_type2_params_in_fw_cmd(void *wmi_hdl, + struct app_type2_params *appType2Params); + +QDF_STATUS wmi_unified_set_auto_shutdown_timer_cmd(void *wmi_hdl, + uint32_t timer_val); + +QDF_STATUS wmi_unified_nan_req_cmd(void *wmi_hdl, + struct nan_req_params *nan_req); + +QDF_STATUS wmi_unified_process_dhcpserver_offload_cmd(void *wmi_hdl, + struct dhcp_offload_info_params *params); + +QDF_STATUS wmi_unified_process_ch_avoid_update_cmd(void *wmi_hdl); + +QDF_STATUS wmi_unified_send_regdomain_info_to_fw_cmd(void *wmi_hdl, + uint32_t reg_dmn, uint16_t regdmn2G, + uint16_t regdmn5G, uint8_t ctl2G, + uint8_t ctl5G); + +QDF_STATUS wmi_unified_set_tdls_offchan_mode_cmd(void *wmi_hdl, + struct tdls_channel_switch_params *chan_switch_params); + +QDF_STATUS wmi_unified_update_fw_tdls_state_cmd(void *wmi_hdl, + void *tdls_param, uint8_t tdls_state); + +QDF_STATUS wmi_unified_update_tdls_peer_state_cmd(void *wmi_hdl, + struct tdls_peer_state_params *peerStateParams, + uint32_t *ch_mhz); + +QDF_STATUS wmi_unified_process_fw_mem_dump_cmd(void *wmi_hdl, + struct fw_dump_req_param *mem_dump_req); + +QDF_STATUS wmi_unified_process_set_ie_info_cmd(void *wmi_hdl, + struct vdev_ie_info_param *ie_info); + +QDF_STATUS wmi_unified_save_fw_version_cmd(void *wmi_hdl, + void *evt_buf); + +QDF_STATUS wmi_unified_set_base_macaddr_indicate_cmd(void *wmi_hdl, + uint8_t *custom_addr); + +QDF_STATUS wmi_unified_log_supported_evt_cmd(void *wmi_hdl, + uint8_t *event, + uint32_t len); + +QDF_STATUS wmi_unified_enable_specific_fw_logs_cmd(void *wmi_hdl, + struct wmi_wifi_start_log *start_log); + +QDF_STATUS wmi_unified_flush_logs_to_fw_cmd(void *wmi_hdl); + +QDF_STATUS wmi_unified_pdev_set_pcl_cmd(void *wmi_hdl, + struct wmi_pcl_chan_weights *msg); + +QDF_STATUS wmi_unified_soc_set_hw_mode_cmd(void *wmi_hdl, + uint32_t hw_mode_index); + +QDF_STATUS wmi_unified_pdev_set_dual_mac_config_cmd(void *wmi_hdl, + struct policy_mgr_dual_mac_config *msg); + +QDF_STATUS wmi_unified_set_led_flashing_cmd(void *wmi_hdl, + struct flashing_req_params *flashing); + +QDF_STATUS wmi_unified_app_type1_params_in_fw_cmd(void *wmi_hdl, + struct app_type1_params *app_type1_params); + +QDF_STATUS wmi_unified_set_ssid_hotlist_cmd(void *wmi_hdl, + struct ssid_hotlist_request_params *request); + +QDF_STATUS wmi_unified_roam_synch_complete_cmd(void *wmi_hdl, + uint8_t vdev_id); + +QDF_STATUS wmi_unified_unit_test_cmd(void *wmi_hdl, + struct wmi_unit_test_cmd *wmi_utest); + +QDF_STATUS wmi_unified_roam_invoke_cmd(void *wmi_hdl, + struct wmi_roam_invoke_cmd *roaminvoke, + uint32_t ch_hz); + +QDF_STATUS wmi_unified_roam_scan_offload_cmd(void *wmi_hdl, + uint32_t command, uint32_t vdev_id); + +#ifdef CONFIG_MCL +QDF_STATUS wmi_unified_send_roam_scan_offload_ap_cmd(void *wmi_hdl, + struct ap_profile_params *ap_profile); +#endif + +QDF_STATUS wmi_unified_roam_scan_offload_scan_period(void *wmi_hdl, + uint32_t scan_period, + uint32_t scan_age, + uint32_t vdev_id); + +QDF_STATUS wmi_unified_roam_scan_offload_chan_list_cmd(void *wmi_hdl, + uint8_t chan_count, + uint32_t *chan_list, + uint8_t list_type, uint32_t vdev_id); + +QDF_STATUS wmi_unified_roam_scan_offload_rssi_change_cmd(void *wmi_hdl, + uint32_t vdev_id, + int32_t rssi_change_thresh, + uint32_t bcn_rssi_weight, + uint32_t hirssi_delay_btw_scans); + +/** + * wmi_unified_set_per_roam_config() - set PER roam config in FW + * @wmi_hdl: wmi handle + * @req_buf: per roam config request buffer + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_set_per_roam_config(void *wmi_hdl, + struct wmi_per_roam_config_req *req_buf); + +#ifdef FEATURE_WLAN_APF +/** + * wmi_unified_set_active_apf_mode_cmd() - config active APF mode in FW + * @wmi: the WMI handle + * @vdev_id: the Id of the vdev to apply the configuration to + * @ucast_mode: the active APF mode to configure for unicast packets + * @mcast_bcast_mode: the active APF mode to configure for multicast/broadcast + * packets + */ +QDF_STATUS +wmi_unified_set_active_apf_mode_cmd(wmi_unified_t wmi, uint8_t vdev_id, + enum wmi_host_active_apf_mode ucast_mode, + enum wmi_host_active_apf_mode + mcast_bcast_mode); + +/** + * wmi_unified_send_apf_enable_cmd() - send apf enable/disable cmd + * @wmi: wmi handle + * @vdev_id: VDEV id + * @enable: true: enable, false: disable + * + * This function passes the apf enable command to fw + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_send_apf_enable_cmd(wmi_unified_t wmi, + uint32_t vdev_id, bool enable); + +/** + * wmi_unified_send_apf_write_work_memory_cmd() - send cmd to write into the APF + * work memory. + * @wmi: wmi handle + * @write_params: parameters and buffer pointer for the write + * + * This function passes the write apf work mem command to fw + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_send_apf_write_work_memory_cmd(wmi_unified_t wmi, + struct wmi_apf_write_memory_params *write_params); + +/** + * wmi_unified_send_apf_read_work_memory_cmd() - send cmd to read part of APF + * work memory + * @wmi: wmi handle + * @read_params: contains relative address and length to read from + * + * This function passes the read apf work mem command to fw + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_send_apf_read_work_memory_cmd(wmi_unified_t wmi, + struct wmi_apf_read_memory_params *read_params); + +/** + * wmi_extract_apf_read_memory_resp_event() - exctract read mem resp event + * @wmi: wmi handle + * @evt_buf: Pointer to the event buffer + * @resp: pointer to memory to extract event parameters into + * + * This function exctracts read mem response event into the given structure ptr + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_extract_apf_read_memory_resp_event(wmi_unified_t wmi, void *evt_buf, + struct wmi_apf_read_memory_resp_event_params + *read_mem_evt); +#endif /* FEATURE_WLAN_APF */ + +QDF_STATUS wmi_unified_stats_request_send(void *wmi_hdl, + uint8_t macaddr[IEEE80211_ADDR_LEN], + struct stats_request_params *param); + +QDF_STATUS wmi_unified_pdev_get_tpc_config_cmd_send(void *wmi_hdl, + uint32_t param); + +QDF_STATUS wmi_unified_set_bwf_cmd_send(void *wmi_hdl, + struct set_bwf_params *param); + +QDF_STATUS wmi_send_get_user_position_cmd(void *wmi_hdl, uint32_t value); + +QDF_STATUS wmi_send_get_peer_mumimo_tx_count_cmd(void *wmi_hdl, uint32_t value); + +QDF_STATUS wmi_send_reset_peer_mumimo_tx_count_cmd(void *wmi_hdl, + uint32_t value); + +QDF_STATUS wmi_send_pdev_caldata_version_check_cmd(void *wmi_hdl, + uint32_t value); + +QDF_STATUS wmi_unified_send_btcoex_wlan_priority_cmd(void *wmi_hdl, + struct btcoex_cfg_params *param); + +QDF_STATUS wmi_unified_send_btcoex_duty_cycle_cmd(void *wmi_hdl, + struct btcoex_cfg_params *param); + +QDF_STATUS wmi_unified_send_coex_ver_cfg_cmd(void *wmi_hdl, + coex_ver_cfg_t *param); + +QDF_STATUS wmi_unified_send_coex_config_cmd(void *wmi_hdl, + struct coex_config_params *param); + +QDF_STATUS wmi_unified_set_atf_cmd_send(void *wmi_hdl, + struct set_atf_params *param); + +QDF_STATUS wmi_unified_pdev_fips_cmd_send(void *wmi_hdl, + struct fips_params *param); + +QDF_STATUS wmi_unified_wlan_profile_enable_cmd_send(void *wmi_hdl, + struct wlan_profile_params *param); + +QDF_STATUS wmi_unified_wlan_profile_trigger_cmd_send(void *wmi_hdl, + struct wlan_profile_params *param); + +QDF_STATUS wmi_unified_set_chan_cmd_send(void *wmi_hdl, + struct channel_param *param); + +QDF_STATUS wmi_unified_set_ht_ie_cmd_send(void *wmi_hdl, + struct ht_ie_params *param); + +QDF_STATUS wmi_unified_set_vht_ie_cmd_send(void *wmi_hdl, + struct vht_ie_params *param); + +QDF_STATUS wmi_unified_wmm_update_cmd_send(void *wmi_hdl, + struct wmm_update_params *param); + +QDF_STATUS wmi_unified_set_ant_switch_tbl_cmd_send(void *wmi_hdl, + struct ant_switch_tbl_params *param); + +QDF_STATUS wmi_unified_set_ratepwr_table_cmd_send(void *wmi_hdl, + struct ratepwr_table_params *param); + +QDF_STATUS wmi_unified_get_ratepwr_table_cmd_send(void *wmi_hdl); + +QDF_STATUS wmi_unified_set_ctl_table_cmd_send(void *wmi_hdl, + struct ctl_table_params *param); + +QDF_STATUS wmi_unified_set_mimogain_table_cmd_send(void *wmi_hdl, + struct mimogain_table_params *param); + +QDF_STATUS wmi_unified_set_ratepwr_chainmsk_cmd_send(void *wmi_hdl, + struct ratepwr_chainmsk_params *param); + +QDF_STATUS wmi_unified_set_macaddr_cmd_send(void *wmi_hdl, + struct macaddr_params *param); + +QDF_STATUS wmi_unified_pdev_scan_start_cmd_send(void *wmi_hdl); + +QDF_STATUS wmi_unified_pdev_scan_end_cmd_send(void *wmi_hdl); + +QDF_STATUS wmi_unified_set_acparams_cmd_send(void *wmi_hdl, + struct acparams_params *param); + +QDF_STATUS wmi_unified_set_vap_dscp_tid_map_cmd_send(void *wmi_hdl, + struct vap_dscp_tid_map_params *param); + +QDF_STATUS wmi_unified_proxy_ast_reserve_cmd_send(void *wmi_hdl, + struct proxy_ast_reserve_params *param); + +QDF_STATUS wmi_unified_pdev_qvit_cmd_send(void *wmi_hdl, + struct pdev_qvit_params *param); + +QDF_STATUS wmi_unified_mcast_group_update_cmd_send(void *wmi_hdl, + struct mcast_group_update_params *param); + +QDF_STATUS wmi_unified_peer_add_wds_entry_cmd_send(void *wmi_hdl, + struct peer_add_wds_entry_params *param); + +QDF_STATUS wmi_unified_peer_del_wds_entry_cmd_send(void *wmi_hdl, + struct peer_del_wds_entry_params *param); + +/** + * wmi_unified_set_bridge_mac_addr_cmd_send() - WMI set bridge mac addr cmd function + * @param wmi_hdl : handle to WMI. + * @param param : pointer to hold bridge mac addr param + * + * @return QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_set_bridge_mac_addr_cmd_send(void *wmi_hdl, + struct set_bridge_mac_addr_params *param); + + +QDF_STATUS wmi_unified_peer_update_wds_entry_cmd_send(void *wmi_hdl, + struct peer_update_wds_entry_params *param); + +QDF_STATUS wmi_unified_phyerr_enable_cmd_send(void *wmi_hdl); + +QDF_STATUS wmi_unified_phyerr_enable_cmd_send(void *wmi_hdl); + +QDF_STATUS wmi_unified_phyerr_disable_cmd_send(void *wmi_hdl); + +QDF_STATUS wmi_unified_smart_ant_enable_cmd_send(void *wmi_hdl, + struct smart_ant_enable_params *param); + +QDF_STATUS wmi_unified_smart_ant_set_rx_ant_cmd_send(void *wmi_hdl, + struct smart_ant_rx_ant_params *param); + +QDF_STATUS wmi_unified_smart_ant_set_tx_ant_cmd_send(void *wmi_hdl, + uint8_t macaddr[IEEE80211_ADDR_LEN], + struct smart_ant_tx_ant_params *param); + +QDF_STATUS wmi_unified_smart_ant_set_training_info_cmd_send(void *wmi_hdl, + uint8_t macaddr[IEEE80211_ADDR_LEN], + struct smart_ant_training_info_params *param); + +QDF_STATUS wmi_unified_smart_ant_node_config_cmd_send(void *wmi_hdl, + uint8_t macaddr[IEEE80211_ADDR_LEN], + struct smart_ant_node_config_params *param); + +QDF_STATUS wmi_unified_smart_ant_enable_tx_feedback_cmd_send(void *wmi_hdl, + struct smart_ant_enable_tx_feedback_params *param); + +QDF_STATUS wmi_unified_vdev_spectral_configure_cmd_send(void *wmi_hdl, + struct vdev_spectral_configure_params *param); + +QDF_STATUS wmi_unified_vdev_spectral_enable_cmd_send(void *wmi_hdl, + struct vdev_spectral_enable_params *param); + +QDF_STATUS wmi_unified_bss_chan_info_request_cmd_send(void *wmi_hdl, + struct bss_chan_info_request_params *param); + +QDF_STATUS wmi_unified_thermal_mitigation_param_cmd_send(void *wmi_hdl, + struct thermal_mitigation_params *param); + +QDF_STATUS wmi_unified_vdev_set_neighbour_rx_cmd_send(void *wmi_hdl, + uint8_t macaddr[IEEE80211_ADDR_LEN], + struct set_neighbour_rx_params *param); + +QDF_STATUS wmi_unified_vdev_set_fwtest_param_cmd_send(void *wmi_hdl, + struct set_fwtest_params *param); + +QDF_STATUS wmi_unified_vdev_config_ratemask_cmd_send(void *wmi_hdl, + struct config_ratemask_params *param); + +/** + * wmi_unified_vdev_set_custom_aggr_size_cmd_send() - WMI set custom aggr + * size command + * @param wmi_hdl : handle to WMI. + * @param param : pointer to hold custom aggr size param + * + * @return QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_vdev_set_custom_aggr_size_cmd_send(void *wmi_hdl, + struct set_custom_aggr_size_params *param); + +/** + * wmi_unified_vdev_set_qdepth_thresh_cmd_send() - WMI set qdepth threshold + * @param wmi_hdl : handle to WMI. + * @param param : pointer to hold set qdepth thresh param + * + * @return QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_vdev_set_qdepth_thresh_cmd_send(void *wmi_hdl, + struct set_qdepth_thresh_params *param); + +QDF_STATUS wmi_unified_pdev_set_regdomain_cmd_send(void *wmi_hdl, + struct pdev_set_regdomain_params *param); + +QDF_STATUS wmi_unified_set_quiet_mode_cmd_send(void *wmi_hdl, + struct set_quiet_mode_params *param); + +QDF_STATUS wmi_unified_set_beacon_filter_cmd_send(void *wmi_hdl, + struct set_beacon_filter_params *param); + +QDF_STATUS wmi_unified_remove_beacon_filter_cmd_send(void *wmi_hdl, + struct remove_beacon_filter_params *param); + +QDF_STATUS wmi_unified_addba_clearresponse_cmd_send(void *wmi_hdl, + uint8_t macaddr[IEEE80211_ADDR_LEN], + struct addba_clearresponse_params *param); + +QDF_STATUS wmi_unified_addba_send_cmd_send(void *wmi_hdl, + uint8_t macaddr[IEEE80211_ADDR_LEN], + struct addba_send_params *param); + +QDF_STATUS wmi_unified_delba_send_cmd_send(void *wmi_hdl, + uint8_t macaddr[IEEE80211_ADDR_LEN], + struct delba_send_params *param); + +QDF_STATUS wmi_unified_addba_setresponse_cmd_send(void *wmi_hdl, + uint8_t macaddr[IEEE80211_ADDR_LEN], + struct addba_setresponse_params *param); + +QDF_STATUS wmi_unified_singleamsdu_cmd_send(void *wmi_hdl, + uint8_t macaddr[IEEE80211_ADDR_LEN], + struct singleamsdu_params *param); + +QDF_STATUS wmi_unified_set_qboost_param_cmd_send(void *wmi_hdl, + uint8_t macaddr[IEEE80211_ADDR_LEN], + struct set_qboost_params *param); + +QDF_STATUS wmi_unified_mu_scan_cmd_send(void *wmi_hdl, + struct mu_scan_params *param); + +QDF_STATUS wmi_unified_lteu_config_cmd_send(void *wmi_hdl, + struct lteu_config_params *param); + +QDF_STATUS wmi_unified_set_psmode_cmd_send(void *wmi_hdl, + struct set_ps_mode_params *param); + +QDF_STATUS wmi_unified_init_cmd_send(void *wmi_hdl, + struct wmi_init_cmd_param *param); + +bool wmi_service_enabled(void *wmi_hdl, uint32_t service_id); + +/** + * wmi_save_service_bitmap() - save service bitmap + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS failure code + */ +QDF_STATUS wmi_save_service_bitmap(void *wmi_hdl, void *evt_buf, + void *bitmap_buf); + +/** + * wmi_save_ext_service_bitmap() - save extended service bitmap + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS failure code + */ +QDF_STATUS wmi_save_ext_service_bitmap(void *wmi_hdl, void *evt_buf, + void *bitmap_buf); + +QDF_STATUS wmi_save_fw_version(void *wmi_hdl, void *evt_buf); + +QDF_STATUS wmi_get_target_cap_from_service_ready(void *wmi_hdl, + void *evt_buf, + struct wlan_psoc_target_capability_info *ev); + +QDF_STATUS wmi_extract_hal_reg_cap(void *wmi_hdl, void *evt_buf, + struct wlan_psoc_hal_reg_capability *hal_reg_cap); + +host_mem_req *wmi_extract_host_mem_req_from_service_ready(void *wmi_hdl, + void *evt_buf, uint8_t *num_entries); + +uint32_t wmi_ready_extract_init_status(void *wmi_hdl, void *ev); + +QDF_STATUS wmi_ready_extract_mac_addr(void *wmi_hdl, + void *ev, uint8_t *macaddr); + +wmi_host_mac_addr *wmi_ready_extract_mac_addr_list(void *wmi_hdl, void *ev, + uint8_t *num_mac_addr); + +/** + * wmi_extract_ready_params() - Extract data from ready event apart from + * status, macaddr and version. + * @wmi_handle: Pointer to WMI handle. + * @evt_buf: Pointer to Ready event buffer. + * @ev_param: Pointer to host defined struct to copy the data from event. + * + * Return: QDF_STATUS_SUCCESS on success. + */ +QDF_STATUS wmi_extract_ready_event_params(void *wmi_hdl, + void *evt_buf, struct wmi_host_ready_ev_param *ev_param); + +QDF_STATUS wmi_extract_fw_version(void *wmi_hdl, + void *ev, struct wmi_host_fw_ver *fw_ver); + +QDF_STATUS wmi_extract_fw_abi_version(void *wmi_hdl, + void *ev, struct wmi_host_fw_abi_ver *fw_ver); + +QDF_STATUS wmi_check_and_update_fw_version(void *wmi_hdl, void *ev); + +uint8_t *wmi_extract_dbglog_data_len(void *wmi_hdl, + void *evt_b, uint32_t *len); + +QDF_STATUS wmi_send_ext_resource_config(void *wmi_hdl, + wmi_host_ext_resource_config *ext_cfg); + +QDF_STATUS wmi_unified_nf_dbr_dbm_info_get_cmd_send(void *wmi_hdl, + uint8_t mac_id); + +QDF_STATUS wmi_unified_packet_power_info_get_cmd_send(void *wmi_hdl, + struct packet_power_info_params *param); + +QDF_STATUS wmi_unified_gpio_config_cmd_send(void *wmi_hdl, + struct gpio_config_params *param); + +QDF_STATUS wmi_unified_gpio_output_cmd_send(void *wmi_hdl, + struct gpio_output_params *param); + +QDF_STATUS wmi_unified_rtt_meas_req_test_cmd_send(void *wmi_hdl, + struct rtt_meas_req_test_params *param); + +QDF_STATUS wmi_unified_rtt_meas_req_cmd_send(void *wmi_hdl, + struct rtt_meas_req_params *param); + +QDF_STATUS wmi_unified_rtt_keepalive_req_cmd_send(void *wmi_hdl, + struct rtt_keepalive_req_params *param); + +QDF_STATUS wmi_unified_lci_set_cmd_send(void *wmi_hdl, + struct lci_set_params *param); + +QDF_STATUS wmi_unified_lcr_set_cmd_send(void *wmi_hdl, + struct lcr_set_params *param); + +QDF_STATUS wmi_unified_send_periodic_chan_stats_config_cmd(void *wmi_hdl, + struct periodic_chan_stats_params *param); + +QDF_STATUS +wmi_send_atf_peer_request_cmd(void *wmi_hdl, + struct atf_peer_request_params *param); + +QDF_STATUS +wmi_send_set_atf_grouping_cmd(void *wmi_hdl, + struct atf_grouping_params *param); +/* Extract APIs */ + +QDF_STATUS wmi_extract_wds_addr_event(void *wmi_hdl, + void *evt_buf, uint16_t len, wds_addr_event_t *wds_ev); + +QDF_STATUS wmi_extract_dcs_interference_type(void *wmi_hdl, + void *evt_buf, struct wmi_host_dcs_interference_param *param); + +QDF_STATUS wmi_extract_dcs_cw_int(void *wmi_hdl, void *evt_buf, + wmi_host_ath_dcs_cw_int *cw_int); + +QDF_STATUS wmi_extract_dcs_im_tgt_stats(void *wmi_hdl, void *evt_buf, + wmi_host_dcs_im_tgt_stats_t *wlan_stat); + +QDF_STATUS wmi_extract_fips_event_data(void *wmi_hdl, void *evt_buf, + struct wmi_host_fips_event_param *param); + +QDF_STATUS wmi_extract_vdev_start_resp(void *wmi_hdl, void *evt_buf, + wmi_host_vdev_start_resp *vdev_rsp); + +/** + * wmi_extract_vdev_delete_resp - api to extract vdev delete + * response event params + * @wmi_handle: wma handle + * @evt_buf: pointer to event buffer + * @delele_rsp: pointer to hold delete response from firmware + * + * Return: QDF_STATUS_SUCCESS for successful event parse + * else QDF_STATUS_E_INVAL or QDF_STATUS_E_FAILURE + */ +QDF_STATUS wmi_extract_vdev_delete_resp(void *wmi_hdl, void *evt_buf, + struct wmi_host_vdev_delete_resp *delele_rsp); + +QDF_STATUS wmi_extract_tbttoffset_update_params(void *wmi_hdl, void *evt_buf, + uint8_t idx, struct tbttoffset_params *tbtt_param); + +QDF_STATUS wmi_extract_ext_tbttoffset_update_params(void *wmi_hdl, + void *evt_buf, uint8_t idx, + struct tbttoffset_params *tbtt_param); + +QDF_STATUS wmi_extract_tbttoffset_num_vdevs(void *wmi_hdl, void *evt_buf, + uint32_t *num_vdevs); + +QDF_STATUS wmi_extract_ext_tbttoffset_num_vdevs(void *wmi_hdl, void *evt_buf, + uint32_t *num_vdevs); + +QDF_STATUS wmi_extract_mgmt_rx_params(void *wmi_hdl, void *evt_buf, + struct mgmt_rx_event_params *hdr, uint8_t **bufp); + +QDF_STATUS wmi_extract_vdev_stopped_param(void *wmi_hdl, void *evt_buf, + uint32_t *vdev_id); + +QDF_STATUS wmi_extract_vdev_roam_param(void *wmi_hdl, void *evt_buf, + wmi_host_roam_event *ev); + +QDF_STATUS wmi_extract_vdev_scan_ev_param(void *wmi_hdl, void *evt_buf, + struct scan_event *param); + +#ifdef CONVERGED_TDLS_ENABLE +/** + * wmi_extract_vdev_tdls_ev_param - extract vdev tdls param from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param param: Pointer to hold vdev tdls param + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_extract_vdev_tdls_ev_param(void *wmi_hdl, void *evt_buf, + struct tdls_event_info *param); +#endif + +QDF_STATUS wmi_extract_mu_ev_param(void *wmi_hdl, void *evt_buf, + wmi_host_mu_report_event *param); + +QDF_STATUS wmi_extract_mu_db_entry(void *wmi_hdl, void *evt_buf, + uint8_t idx, wmi_host_mu_db_entry *param); + +QDF_STATUS wmi_extract_mumimo_tx_count_ev_param(void *wmi_hdl, void *evt_buf, + wmi_host_peer_txmu_cnt_event *param); + +QDF_STATUS wmi_extract_peer_gid_userpos_list_ev_param(void *wmi_hdl, + void *evt_buf, wmi_host_peer_gid_userpos_list_event *param); + +QDF_STATUS wmi_extract_pdev_caldata_version_check_ev_param(void *wmi_hdl, + void *evt_buf, wmi_host_pdev_check_cal_version_event *param); + +QDF_STATUS wmi_extract_pdev_tpc_config_ev_param(void *wmi_hdl, void *evt_buf, + wmi_host_pdev_tpc_config_event *param); + +QDF_STATUS wmi_extract_gpio_input_ev_param(void *wmi_hdl, + void *evt_buf, uint32_t *gpio_num); + +QDF_STATUS wmi_extract_pdev_reserve_ast_ev_param(void *wmi_hdl, + void *evt_buf, struct wmi_host_proxy_ast_reserve_param *param); + +QDF_STATUS wmi_extract_nfcal_power_ev_param(void *wmi_hdl, void *evt_buf, + wmi_host_pdev_nfcal_power_all_channels_event *param); + +QDF_STATUS wmi_extract_pdev_tpc_ev_param(void *wmi_hdl, void *evt_buf, + wmi_host_pdev_tpc_event *param); + +QDF_STATUS wmi_extract_pdev_generic_buffer_ev_param(void *wmi_hdl, + void *evt_buf, + wmi_host_pdev_generic_buffer_event *param); + +QDF_STATUS wmi_extract_mgmt_tx_compl_param(void *wmi_hdl, void *evt_buf, + wmi_host_mgmt_tx_compl_event *param); + +QDF_STATUS wmi_extract_offchan_data_tx_compl_param(void *wmi_hdl, void *evt_buf, + struct wmi_host_offchan_data_tx_compl_event *param); + +QDF_STATUS wmi_extract_pdev_csa_switch_count_status(void *wmi_hdl, + void *evt_buf, + struct pdev_csa_switch_count_status *param); + +QDF_STATUS wmi_extract_swba_num_vdevs(void *wmi_hdl, void *evt_buf, + uint32_t *num_vdevs); + +QDF_STATUS wmi_extract_swba_tim_info(void *wmi_hdl, void *evt_buf, + uint32_t idx, wmi_host_tim_info *tim_info); + +QDF_STATUS wmi_extract_swba_noa_info(void *wmi_hdl, void *evt_buf, + uint32_t idx, wmi_host_p2p_noa_info *p2p_desc); + +#ifdef CONVERGED_P2P_ENABLE +QDF_STATUS wmi_extract_p2p_lo_stop_ev_param(void *wmi_hdl, + void *evt_buf, struct p2p_lo_event *param); + +QDF_STATUS wmi_extract_p2p_noa_ev_param(void *wmi_hdl, + void *evt_buf, struct p2p_noa_info *param); + +QDF_STATUS +wmi_send_set_mac_addr_rx_filter_cmd(void *wmi_hdl, + struct p2p_set_mac_filter *param); + +QDF_STATUS +wmi_extract_mac_addr_rx_filter_evt_param(void *wmi_hdl, void *evt_buf, + struct p2p_set_mac_filter_evt *param); +#endif + +QDF_STATUS wmi_extract_peer_sta_ps_statechange_ev(void *wmi_hdl, + void *evt_buf, wmi_host_peer_sta_ps_statechange_event *ev); + +QDF_STATUS wmi_extract_peer_sta_kickout_ev(void *wmi_hdl, void *evt_buf, + wmi_host_peer_sta_kickout_event *ev); + +QDF_STATUS wmi_extract_peer_ratecode_list_ev(void *wmi_hdl, void *evt_buf, + uint8_t *peer_mac, wmi_sa_rate_cap *rate_cap); + +QDF_STATUS wmi_extract_bcnflt_stats(void *wmi_hdl, void *evt_buf, + uint32_t index, wmi_host_bcnflt_stats *bcnflt_stats); + +QDF_STATUS wmi_extract_rtt_hdr(void *wmi_hdl, void *evt_buf, + wmi_host_rtt_event_hdr *ev); + +QDF_STATUS wmi_extract_rtt_ev(void *wmi_hdl, void *evt_buf, + wmi_host_rtt_meas_event *ev, uint8_t *hdump, + uint16_t hdump_len); + +QDF_STATUS wmi_extract_rtt_error_report_ev(void *wmi_hdl, void *evt_buf, + wmi_host_rtt_error_report_event *ev); + +QDF_STATUS wmi_extract_chan_stats(void *wmi_hdl, void *evt_buf, + uint32_t index, wmi_host_chan_stats *chan_stats); + +QDF_STATUS wmi_extract_thermal_stats(void *wmi_hdl, void *evt_buf, + uint32_t *temp, uint32_t *level, uint32_t *pdev_id); + +QDF_STATUS wmi_extract_thermal_level_stats(void *wmi_hdl, void *evt_buf, + uint8_t idx, uint32_t *levelcount, uint32_t *dccount); + +QDF_STATUS wmi_extract_comb_phyerr(void *wmi_hdl, void *evt_buf, + uint16_t datalen, uint16_t *buf_offset, + wmi_host_phyerr_t *phyerr); + +QDF_STATUS wmi_extract_single_phyerr(void *wmi_hdl, void *evt_buf, + uint16_t datalen, uint16_t *buf_offset, + wmi_host_phyerr_t *phyerr); + +QDF_STATUS wmi_extract_composite_phyerr(void *wmi_hdl, void *evt_buf, + uint16_t datalen, wmi_host_phyerr_t *phyerr); + +QDF_STATUS wmi_extract_profile_ctx(void *wmi_hdl, void *evt_buf, + wmi_host_wlan_profile_ctx_t *profile_ctx); + +QDF_STATUS wmi_extract_profile_data(void *wmi_hdl, void *evt_buf, uint8_t idx, + wmi_host_wlan_profile_t *profile_data); + +QDF_STATUS wmi_extract_chan_info_event(void *wmi_hdl, void *evt_buf, + wmi_host_chan_info_event *chan_info); + +QDF_STATUS wmi_extract_channel_hopping_event(void *wmi_hdl, void *evt_buf, + wmi_host_pdev_channel_hopping_event *ch_hopping); + +QDF_STATUS wmi_extract_stats_param(void *wmi_hdl, void *evt_buf, + wmi_host_stats_event *stats_param); + +QDF_STATUS wmi_extract_pdev_stats(void *wmi_hdl, void *evt_buf, + uint32_t index, + wmi_host_pdev_stats *pdev_stats); + +QDF_STATUS wmi_extract_unit_test(void *wmi_hdl, void *evt_buf, + wmi_unit_test_event *unit_test, uint32_t maxspace); + +QDF_STATUS wmi_extract_pdev_ext_stats(void *wmi_hdl, void *evt_buf, + uint32_t index, + wmi_host_pdev_ext_stats *pdev_ext_stats); + +QDF_STATUS wmi_extract_peer_extd_stats(void *wmi_hdl, void *evt_buf, + uint32_t index, + wmi_host_peer_extd_stats *peer_extd_stats); + +QDF_STATUS wmi_extract_peer_adv_stats(wmi_unified_t wmi_handle, void *evt_buf, + struct wmi_host_peer_adv_stats + *peer_adv_stats); + +QDF_STATUS wmi_extract_bss_chan_info_event(void *wmi_hdl, void *evt_buf, + wmi_host_pdev_bss_chan_info_event *bss_chan_info); + +QDF_STATUS wmi_extract_inst_rssi_stats_event(void *wmi_hdl, void *evt_buf, + wmi_host_inst_stats_resp *inst_rssi_resp); + +QDF_STATUS wmi_extract_peer_stats(void *wmi_hdl, void *evt_buf, + uint32_t index, wmi_host_peer_stats *peer_stats); + +QDF_STATUS wmi_extract_tx_data_traffic_ctrl_ev(void *wmi_hdl, void *evt_buf, + wmi_host_tx_data_traffic_ctrl_event *ev); + +QDF_STATUS wmi_extract_atf_peer_stats_ev(void *wmi_hdl, void *evt_buf, + wmi_host_atf_peer_stats_event *ev); + +QDF_STATUS wmi_extract_atf_token_info_ev(void *wmi_hdl, void *evt_buf, + uint8_t idx, wmi_host_atf_peer_stats_info *atf_token_info); + +QDF_STATUS wmi_extract_vdev_stats(void *wmi_hdl, void *evt_buf, + uint32_t index, wmi_host_vdev_stats *vdev_stats); + +QDF_STATUS wmi_extract_per_chain_rssi_stats(void *wmi_hdl, void *evt_buf, + uint32_t index, struct wmi_host_per_chain_rssi_stats *rssi_stats); + +QDF_STATUS wmi_extract_vdev_extd_stats(void *wmi_hdl, void *evt_buf, + uint32_t index, wmi_host_vdev_extd_stats *vdev_extd_stats); + +QDF_STATUS wmi_extract_bcn_stats(void *wmi_hdl, void *evt_buf, + uint32_t index, wmi_host_bcn_stats *vdev_bcn_stats); + +/** + * wmi_extract_vdev_nac_rssi_stats() - extract NAC_RSSI stats from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param vdev_extd_stats: Pointer to hold nac rssi stats + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_extract_vdev_nac_rssi_stats(void *wmi_hdl, void *evt_buf, + struct wmi_host_vdev_nac_rssi_event *vdev_nac_rssi_stats); + +QDF_STATUS wmi_unified_send_power_dbg_cmd(void *wmi_hdl, + struct wmi_power_dbg_params *param); + +QDF_STATUS wmi_unified_send_multiple_vdev_restart_req_cmd(void *wmi_hdl, + struct multiple_vdev_restart_params *param); + +/** + * wmi_unified_send_sar_limit_cmd() - send sar limit cmd to fw + * @wmi_hdl: wmi handle + * @params: sar limit command params + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_send_sar_limit_cmd(void *wmi_hdl, + struct sar_limit_cmd_params *params); + +/** + * wmi_unified_get_sar_limit_cmd() - request current SAR limits from FW + * @wmi_hdl: wmi handle + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +QDF_STATUS wmi_unified_get_sar_limit_cmd(void *wmi_hdl); + +/** + * wmi_unified_extract_sar_limit_event() - extract SAR limits from FW event + * @wmi_hdl: wmi handle + * @evt_buf: event buffer received from firmware + * @event: SAR limit event which is to be populated by data extracted from + * the @evt_buf buffer + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +QDF_STATUS wmi_unified_extract_sar_limit_event(void *wmi_hdl, + uint8_t *evt_buf, + struct sar_limit_event *event); + +/** + * wmi_unified_extract_sar2_result_event() - extract SAR limits from FW event + * @handle: wmi handle + * @event: event buffer received from firmware + * @len: length of the event buffer + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +QDF_STATUS wmi_unified_extract_sar2_result_event(void *handle, + uint8_t *event, uint32_t len); + +/** + * wmi_extract_sar_cap_service_ready_ext() - extract SAR cap from + * FW service ready event + * @wmi_hdl: wmi handle + * @evt_buf: event buffer received from firmware + * @ext_param: extended target info + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +QDF_STATUS wmi_extract_sar_cap_service_ready_ext( + void *wmi_hdl, + uint8_t *evt_buf, + struct wlan_psoc_host_service_ext_param *ext_param); + +QDF_STATUS wmi_unified_send_adapt_dwelltime_params_cmd(void *wmi_hdl, + struct wmi_adaptive_dwelltime_params * + wmi_param); +QDF_STATUS wmi_unified_fw_test_cmd(void *wmi_hdl, + struct set_fwtest_params *wmi_fwtest); + +QDF_STATUS wmi_unified_peer_rx_reorder_queue_setup_send(void *wmi_hdl, + struct rx_reorder_queue_setup_params *param); +QDF_STATUS wmi_unified_peer_rx_reorder_queue_remove_send(void *wmi_hdl, + struct rx_reorder_queue_remove_params *param); + +QDF_STATUS wmi_extract_service_ready_ext(void *wmi_hdl, uint8_t *evt_buf, + struct wlan_psoc_host_service_ext_param *param); +QDF_STATUS wmi_extract_hw_mode_cap_service_ready_ext( + void *wmi_hdl, + uint8_t *evt_buf, uint8_t hw_mode_idx, + struct wlan_psoc_host_hw_mode_caps *param); +QDF_STATUS wmi_extract_mac_phy_cap_service_ready_ext( + void *wmi_hdl, + uint8_t *evt_buf, + uint8_t hw_mode_id, + uint8_t phy_id, + struct wlan_psoc_host_mac_phy_caps *param); +QDF_STATUS wmi_extract_reg_cap_service_ready_ext( + void *wmi_hdl, + uint8_t *evt_buf, uint8_t phy_idx, + struct wlan_psoc_host_hal_reg_capabilities_ext *param); + +/** + * wmi_extract_dbr_ring_cap_service_ready_ext: Extract direct buffer rx + * capability received through + * extended service ready event + * @wmi_hdl: WMI handle + * @evt_buf: Event buffer + * @idx: Index of the module for which capability is received + * @param: Pointer to direct buffer rx ring cap struct + * + * Return: QDF status of operation + */ +QDF_STATUS wmi_extract_dbr_ring_cap_service_ready_ext( + void *wmi_hdl, + uint8_t *evt_buf, uint8_t idx, + struct wlan_psoc_host_dbr_ring_caps *param); + +/** + * wmi_extract_dbr_buf_release_fixed : Extract direct buffer rx fixed param + * from buffer release event + * @wmi_hdl: WMI handle + * @evt_buf: Event buffer + * @param: Pointer to direct buffer rx response struct + * + * Return: QDF status of operation + */ +QDF_STATUS wmi_extract_dbr_buf_release_fixed( + void *wmi_hdl, + uint8_t *evt_buf, + struct direct_buf_rx_rsp *param); + +/** + * wmi_extract_dbr_buf_release_entry: Extract direct buffer rx buffer tlv + * + * @wmi_hdl: WMI handle + * @evt_buf: Event buffer + * @idx: Index of the module for which capability is received + * @param: Pointer to direct buffer rx entry + * + * Return: QDF status of operation + */ +QDF_STATUS wmi_extract_dbr_buf_release_entry( + void *wmi_hdl, + uint8_t *evt_buf, uint8_t idx, + struct direct_buf_rx_entry *param); + +/** + * wmi_extract_dbr_buf_metadata: Extract direct buffer metadata + * + * @wmi_hdl: WMI handle + * @evt_buf: Event buffer + * @idx: Index of the module for which capability is received + * @param: Pointer to direct buffer metadata + * + * Return: QDF status of operation + */ +QDF_STATUS wmi_extract_dbr_buf_metadata( + void *wmi_hdl, + uint8_t *evt_buf, uint8_t idx, + struct direct_buf_rx_metadata *param); + +QDF_STATUS wmi_extract_pdev_utf_event(void *wmi_hdl, + uint8_t *evt_buf, + struct wmi_host_pdev_utf_event *param); + +QDF_STATUS wmi_extract_pdev_qvit_event(void *wmi_hdl, + uint8_t *evt_buf, + struct wmi_host_pdev_qvit_event *param); + +QDF_STATUS wmi_extract_peer_delete_response_event(void *wmi_hdl, + uint8_t *evt_buf, + struct wmi_host_peer_delete_response_event *param); + +QDF_STATUS wmi_extract_chainmask_tables(void *wmi_hdl, uint8_t *evt_buf, + struct wlan_psoc_host_chainmask_table *chainmask_table); +/** + * wmi_unified_dfs_phyerr_offload_en_cmd() - enable dfs phyerr offload + * @wmi_handle: wmi handle + * @pdev_id: pdev id + * + * Return: QDF_STATUS + */ +QDF_STATUS wmi_unified_dfs_phyerr_offload_en_cmd(void *wmi_hdl, + uint32_t pdev_id); + +/** + * wmi_unified_dfs_phyerr_offload_dis_cmd() - disable dfs phyerr offload + * @wmi_handle: wmi handle + * @pdev_id: pdev id + * + * Return: QDF_STATUS + */ +QDF_STATUS wmi_unified_dfs_phyerr_offload_dis_cmd(void *wmi_hdl, + uint32_t pdev_id); + +QDF_STATUS wmi_unified_set_country_cmd_send(void *wmi_hdl, + struct set_country *param); + +#ifdef WLAN_FEATURE_ACTION_OUI +/** + * wmi_unified_send_action_oui_cmd() - send action oui cmd to fw + * @wmi_hdl: wma handle + * @req: wmi action oui message to be send + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_unified_send_action_oui_cmd(void *wmi_hdl, + struct action_oui_request *req); +#endif /* WLAN_FEATURE_ACTION_OUI */ + +/* + * wmi_unified_set_del_pmkid_cache() - set delete PMKID + * @wmi_hdl: wma handle + * @pmksa: pointer to pmk cache entry + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_set_del_pmkid_cache(void *wmi_hdl, + struct wmi_unified_pmk_cache *pmksa); + +#if defined(WLAN_FEATURE_FILS_SK) +/* + * wmi_unified_roam_send_hlp_cmd() -send HLP command info + * @wmi_hdl: wma handle + * @req_buf: Pointer to HLP params + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_roam_send_hlp_cmd(void *wmi_hdl, + struct hlp_params *req_buf); +#endif + +/** + * wmi_unified_send_request_get_rcpi_cmd() - command to request rcpi value + * @wmi_hdl: wma handle + * @get_rcpi_param: rcpi params + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_send_request_get_rcpi_cmd(void *wmi_hdl, + struct rcpi_req *get_rcpi_param); + +/** + * wmi_extract_rcpi_response_event - api to extract RCPI event params + * @wmi_handle: wma handle + * @evt_buf: pointer to event buffer + * @res: pointer to hold rcpi response from firmware + * + * Return: QDF_STATUS_SUCCESS for successful event parse + * else QDF_STATUS_E_INVAL or QDF_STATUS_E_FAILURE + */ +QDF_STATUS wmi_extract_rcpi_response_event(void *wmi_hdl, void *evt_buf, + struct rcpi_res *res); + +#ifdef WMI_INTERFACE_EVENT_LOGGING +void wmi_print_cmd_log(wmi_unified_t wmi, uint32_t count, + qdf_abstract_print *print, void *print_priv); + +void wmi_print_cmd_tx_cmp_log(wmi_unified_t wmi, uint32_t count, + qdf_abstract_print *print, void *print_priv); + +void wmi_print_mgmt_cmd_log(wmi_unified_t wmi, uint32_t count, + qdf_abstract_print *print, void *print_priv); + +void wmi_print_mgmt_cmd_tx_cmp_log(wmi_unified_t wmi, uint32_t count, + qdf_abstract_print *print, void *print_priv); + +void wmi_print_event_log(wmi_unified_t wmi, uint32_t count, + qdf_abstract_print *print, void *print_priv); + +void wmi_print_rx_event_log(wmi_unified_t wmi, uint32_t count, + qdf_abstract_print *print, void *print_priv); + +void wmi_print_mgmt_event_log(wmi_unified_t wmi, uint32_t count, + qdf_abstract_print *print, void *print_priv); + +#endif /* WMI_INTERFACE_EVENT_LOGGING */ + +QDF_STATUS wmi_unified_send_dbs_scan_sel_params_cmd(void *wmi_hdl, + struct wmi_dbs_scan_sel_params *wmi_param); + +QDF_STATUS wmi_unified_send_limit_off_chan_cmd(void *wmi_hdl, + struct wmi_limit_off_chan_param *wmi_param); +QDF_STATUS wmi_unified_set_arp_stats_req(void *wmi_hdl, + struct set_arp_stats *req_buf); +QDF_STATUS wmi_unified_get_arp_stats_req(void *wmi_hdl, + struct get_arp_stats *req_buf); + +/** + * wmi_send_bcn_offload_control_cmd - send beacon ofload control cmd to fw + * @wmi_hdl: wmi handle + * @bcn_ctrl_param: pointer to bcn_offload_control param + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +QDF_STATUS wmi_send_bcn_offload_control_cmd(void *wmi_hdl, + struct bcn_offload_control *bcn_ctrl_param); +/** + * wmi_unified_send_wds_entry_list_cmd() - WMI function to get list of + * wds entries from FW + * @wmi_hdl: wmi handle + * + * Send WMI_PDEV_WDS_ENTRY_LIST_CMDID parameters to fw. + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ + +QDF_STATUS wmi_unified_send_dump_wds_table_cmd(void *wmi_hdl); + +/** + * wmi_extract_wds_entry - api to extract wds entry + * @wmi_hdl: wmi handle + * @evt_buf: pointer to event buffer + * @wds_entry: wds entry + * @idx: index to point wds entry in event buffer + * + * Return: QDF_STATUS_SUCCESS for successful event parse + * else QDF_STATUS_E_INVAL or QDF_STATUS_E_FAILURE + */ + +QDF_STATUS wmi_extract_wds_entry(void *wmi_hdl, uint8_t *evt_buf, + struct wdsentry *wds_entry, u_int32_t idx); + +#ifdef WLAN_FEATURE_NAN_CONVERGENCE +/** + * wmi_unified_ndp_initiator_req_cmd_send - api to send initiator request to FW + * @wmi_hdl: wmi handle + * @req: pointer to request buffer + * + * Return: status of operation + */ +QDF_STATUS wmi_unified_ndp_initiator_req_cmd_send(void *wmi_hdl, + struct nan_datapath_initiator_req *req); + +/** + * wmi_unified_ndp_responder_req_cmd_send - api to send responder request to FW + * @wmi_hdl: wmi handle + * @req: pointer to request buffer + * + * Return: status of operation + */ +QDF_STATUS wmi_unified_ndp_responder_req_cmd_send(void *wmi_hdl, + struct nan_datapath_responder_req *req); + +/** + * wmi_unified_ndp_end_req_cmd_send - api to send end request to FW + * @wmi_hdl: wmi handle + * @req: pointer to request buffer + * + * Return: status of operation + */ +QDF_STATUS wmi_unified_ndp_end_req_cmd_send(void *wmi_hdl, + struct nan_datapath_end_req *req); + +/** + * wmi_extract_ndp_initiator_rsp - api to extract initiator rsp from even buffer + * @wmi_hdl: wmi handle + * @data: event buffer + * @rsp: buffer to populate + * + * Return: status of operation + */ +QDF_STATUS wmi_extract_ndp_initiator_rsp(wmi_unified_t wmi_handle, + uint8_t *data, struct nan_datapath_initiator_rsp *rsp); + +/** + * wmi_extract_ndp_ind - api to extract ndp indication struct from even buffer + * @wmi_hdl: wmi handle + * @data: event buffer + * @ind: buffer to populate + * + * Return: status of operation + */ +QDF_STATUS wmi_extract_ndp_ind(wmi_unified_t wmi_handle, uint8_t *data, + struct nan_datapath_indication_event *ind); + +/** + * wmi_extract_ndp_confirm - api to extract ndp confim struct from even buffer + * @wmi_hdl: wmi handle + * @data: event buffer + * @ev: buffer to populate + * + * Return: status of operation + */ +QDF_STATUS wmi_extract_ndp_confirm(wmi_unified_t wmi_handle, uint8_t *data, + struct nan_datapath_confirm_event *ev); + +/** + * wmi_extract_ndp_responder_rsp - api to extract responder rsp from even buffer + * @wmi_hdl: wmi handle + * @data: event buffer + * @rsp: buffer to populate + * + * Return: status of operation + */ +QDF_STATUS wmi_extract_ndp_responder_rsp(wmi_unified_t wmi_handle, + uint8_t *data, struct nan_datapath_responder_rsp *rsp); + +/** + * wmi_extract_ndp_end_rsp - api to extract ndp end rsp from even buffer + * @wmi_hdl: wmi handle + * @data: event buffer + * @rsp: buffer to populate + * + * Return: status of operation + */ +QDF_STATUS wmi_extract_ndp_end_rsp(wmi_unified_t wmi_handle, uint8_t *data, + struct nan_datapath_end_rsp_event *rsp); + +/** + * wmi_extract_ndp_end_ind - api to extract ndp end indication from even buffer + * @wmi_hdl: wmi handle + * @data: event buffer + * @ind: buffer to populate + * + * Return: status of operation + */ +QDF_STATUS wmi_extract_ndp_end_ind(wmi_unified_t wmi_handle, uint8_t *data, + struct nan_datapath_end_indication_event **ind); + +/** + * wmi_extract_ndp_sch_update - api to extract ndp sch update from event buffer + * @wmi_hdl: wmi handle + * @data: event buffer + * @ind: buffer to populate + * + * Return: status of operation + */ +QDF_STATUS wmi_extract_ndp_sch_update(wmi_unified_t wmi_handle, uint8_t *data, + struct nan_datapath_sch_update_event *ind); +#endif + +/** + * wmi_unified_send_btm_config() - Send BTM config to fw + * @wmi_hdl: wmi handle + * @params: pointer to wmi_btm_config + * + * Return: QDF_STATUS + */ +QDF_STATUS wmi_unified_send_btm_config(void *wmi_hdl, + struct wmi_btm_config *params); + +/** + * wmi_unified_send_btm_config() - Send BTM config to fw + * @wmi_hdl: wmi handle + * @params: pointer to wmi_btm_config + * + * Return: QDF_STATUS + */ +QDF_STATUS wmi_unified_send_bss_load_config(void *wmi_hdl, + struct wmi_bss_load_config *params); + +/** + * wmi_unified_send_obss_detection_cfg_cmd() - WMI function to send obss + * detection configuration to FW. + * @wmi_hdl: wmi handle + * @cfg: obss detection configuration + * + * Send WMI_SAP_OBSS_DETECTION_CFG_CMDID parameters to fw. + * + * Return: QDF_STATUS + */ + +QDF_STATUS wmi_unified_send_obss_detection_cfg_cmd(void *wmi_hdl, + struct wmi_obss_detection_cfg_param *cfg); + +/** + * wmi_unified_extract_obss_detection_info() - WMI function to extract obss + * detection info from FW. + * @wmi_hdl: wmi handle + * @data: event data from firmware + * @info: Pointer to hold obss detection info + * + * This function is used to extract obss info from firmware. + * + * Return: QDF_STATUS + */ + +QDF_STATUS wmi_unified_extract_obss_detection_info(void *wmi_hdl, + uint8_t *data, + struct wmi_obss_detect_info + *info); +/** + * wmi_unified_send_bss_color_change_enable_cmd() - WMI function to send bss + * color change enable to FW. + * @wmi_hdl: wmi handle + * @vdev_id: vdev ID + * @enable: enable or disable color change handeling within firmware + * + * Send WMI_BSS_COLOR_CHANGE_ENABLE_CMDID parameters to fw, + * thereby firmware updates bss color when AP announces bss color change. + * + * Return: QDF_STATUS + */ + +QDF_STATUS wmi_unified_send_bss_color_change_enable_cmd(void *wmi_hdl, + uint32_t vdev_id, + bool enable); + +/** + * wmi_unified_send_obss_color_collision_cfg_cmd() - WMI function to send bss + * color collision detection configuration to FW. + * @wmi_hdl: wmi handle + * @cfg: obss color collision detection configuration + * + * Send WMI_OBSS_COLOR_COLLISION_DET_CONFIG_CMDID parameters to fw. + * + * Return: QDF_STATUS + */ + +QDF_STATUS wmi_unified_send_obss_color_collision_cfg_cmd(void *wmi_hdl, + struct wmi_obss_color_collision_cfg_param *cfg); + +/** + * wmi_unified_extract_obss_color_collision_info() - WMI function to extract + * obss color collision info from FW. + * @wmi_hdl: wmi handle + * @data: event data from firmware + * @info: Pointer to hold bss color collision info + * + * This function is used to extract bss collision info from firmware. + * + * Return: QDF_STATUS + */ + +QDF_STATUS wmi_unified_extract_obss_color_collision_info(void *wmi_hdl, + uint8_t *data, struct wmi_obss_color_collision_info *info); + +#ifdef WLAN_SUPPORT_GREEN_AP +QDF_STATUS wmi_extract_green_ap_egap_status_info( + void *wmi_hdl, uint8_t *evt_buf, + struct wlan_green_ap_egap_status_info *egap_status_info_params); +#endif + +#ifdef WLAN_SUPPORT_FILS +/** + * wmi_unified_fils_vdev_config_send_cmd() - send FILS config cmd to fw + * @wmi_hdl: wmi handle + * @param: fils config params + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +QDF_STATUS +wmi_unified_fils_vdev_config_send_cmd(void *wmi_hdl, + struct config_fils_params *param); + +/** + * wmi_extract_swfda_vdev_id() - api to extract vdev id + * @wmi_hdl: wmi handle + * @evt_buf: pointer to event buffer + * @vdev_id: pointer to vdev id + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +QDF_STATUS wmi_extract_swfda_vdev_id(void *wmi_hdl, void *evt_buf, + uint32_t *vdev_id); + +/** + * wmi_unified_fils_discovery_send_cmd() - send FILS discovery cmd to fw + * @wmi_hdl: wmi handle + * @param: fils discovery params + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +QDF_STATUS wmi_unified_fils_discovery_send_cmd(void *wmi_hdl, + struct fd_params *param); +#endif /* WLAN_SUPPORT_FILS */ + +/** + * wmi_unified_send_roam_scan_stats_cmd() - Wrapper to request roam scan stats + * @wmi_hdl: wmi handle + * @params: request params + * + * This function is used to send the roam scan stats request command to + * firmware. + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_unified_send_roam_scan_stats_cmd(void *wmi_hdl, + struct wmi_roam_scan_stats_req *params); + +/** + * wmi_extract_roam_scan_stats_res_evt() - API to extract roam scan stats res + * @wmi: wmi handle + * @evt_buf: pointer to the event buffer + * @vdev_id: output pointer to hold vdev id + * @res_param: output pointer to hold extracted memory + * + * Return: QDF_STATUS + */ +QDF_STATUS +wmi_extract_roam_scan_stats_res_evt(wmi_unified_t wmi, void *evt_buf, + uint32_t *vdev_id, + struct wmi_roam_scan_stats_res **res_param); +/** + * wmi_unified_offload_11k_cmd() - send 11k offload command + * @wmi_hdl: wmi handle + * @params: 11k offload params + * + * This function passes the 11k offload command params to FW + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_offload_11k_cmd(void *wmi_hdl, + struct wmi_11k_offload_params *params); +/** + * wmi_unified_invoke_neighbor_report_cmd() - send invoke neighbor report cmd + * @wmi_hdl: wmi handle + * @params: invoke neighbor report params + * + * This function passes the invoke neighbor report command to fw + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_invoke_neighbor_report_cmd(void *wmi_hdl, + struct wmi_invoke_neighbor_report_params *params); + +/* wmi_get_ch_width_from_phy_mode() - convert phy mode to channel width + * @wmi_hdl: wmi handle + * @phymode: phy mode + * + * Return: wmi channel width + */ +wmi_host_channel_width wmi_get_ch_width_from_phy_mode(void *wmi_hdl, + WMI_HOST_WLAN_PHY_MODE phymode); + +#ifdef QCA_SUPPORT_CP_STATS +/** + * wmi_extract_cca_stats() - api to extract congestion stats from event buffer + * @wmi_handle: wma handle + * @evt_buf: event buffer + * @datalen: length of buffer + * @stats: buffer to populated after stats extraction + * + * Return: status of operation + */ +QDF_STATUS wmi_extract_cca_stats(wmi_unified_t wmi_handle, void *evt_buf, + struct wmi_host_congestion_stats *stats); +#endif /* QCA_SUPPORT_CP_STATS */ + +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) && defined(HOST_DFS_SPOOF_TEST) +/** + * wmi_unified_dfs_send_avg_params_cmd() - send average radar parameters cmd. + * @wmi_hdl: wmi handle + * @params: radar found params + * + * This function passes the average radar parameters to fw + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_unified_dfs_send_avg_params_cmd(void *wmi_hdl, + struct dfs_radar_found_params *params); + +/** + * wmi_extract_dfs_status_from_fw() - extract host dfs status from fw. + * @wmi_hdl: wmi handle + * @evt_buf: pointer to event buffer + * @dfs_status_check: pointer to the host dfs status + * + * This function extracts the result of host dfs from fw + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_extract_dfs_status_from_fw(void *wmi_hdl, void *evt_buf, + uint32_t *dfs_status_check); +#endif + +void wmi_process_fw_event_worker_thread_ctx(struct wmi_unified *wmi_handle, + HTC_PACKET *htc_packet); + +/** + * wmi_unified_send_mws_coex_req_cmd() - WMI function to send coex req cmd + * @wmi_hdl: wmi handle + * @vdev_id: Vdev Id + * @cmd_id: Coex cmd for which info is required + * + * Send wmi coex command to fw. + * + * Return: QDF_STATUS + */ +QDF_STATUS wmi_unified_send_mws_coex_req_cmd(struct wmi_unified *wmi_handle, + uint32_t vdev_id, uint32_t cmd_id); +#endif /* _WMI_UNIFIED_API_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_dfs_api.h b/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_dfs_api.h new file mode 100644 index 0000000000000000000000000000000000000000..5a35821b0046b41da7d5556f3fd9a1df2b84e96d --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_dfs_api.h @@ -0,0 +1,72 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ +/** + * DOC: This file contains the API definitions for the Unified Wireless Module + * Interface (WMI) which are specific to DFS module. + */ + +#ifndef _WMI_UNIFIED_DFS_API_H_ +#define _WMI_UNIFIED_DFS_API_H_ + +#include + +/** + * wmi_extract_dfs_cac_complete_event() - function to handle cac complete event + * @handle: wma handle + * @event_buf: event buffer + * @vdev_id: vdev id + * @len: length of buffer + * + * Return: 0 for success or error code + */ +QDF_STATUS wmi_extract_dfs_cac_complete_event(void *wmi_hdl, + uint8_t *evt_buf, + uint32_t *vdev_id, + uint32_t len); + +/** + * wmi_extract_dfs_radar_detection_event() - function to handle radar event + * @handle: wma handle + * @event_buf: event buffer + * @radar_found: radar found event info + * @vdev_id: vdev id + * @len: length of buffer + * + * Return: 0 for success or error code + */ +QDF_STATUS wmi_extract_dfs_radar_detection_event(void *wmi_hdl, + uint8_t *evt_buf, + struct radar_found_info *radar_found, + uint32_t len); + +#ifdef QCA_MCL_DFS_SUPPORT +/** + * wmi_extract_wlan_radar_event_info() - function to handle radar pulse event. + * @wmi_hdl: wmi handle + * @evt_buf: event buffer + * @wlan_radar_event: pointer to radar event info structure + * @len: length of buffer + * + * Return: QDF_STATUS + */ +QDF_STATUS wmi_extract_wlan_radar_event_info(void *wmi_hdl, + uint8_t *evt_buf, + struct radar_event_info *wlan_radar_event, + uint32_t len); +#endif +#endif /* _WMI_UNIFIED_DFS_API_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_extscan_api.h b/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_extscan_api.h new file mode 100644 index 0000000000000000000000000000000000000000..12e6e93ebd39f3a6ccc49cd06464b17cdf356517 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_extscan_api.h @@ -0,0 +1,65 @@ +/* + * Copyright (c) 2013-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _WMI_UNIFIED_EXTSCAN_API_H_ +#define _WMI_UNIFIED_EXTSCAN_API_H_ + +QDF_STATUS wmi_unified_reset_passpoint_network_list_cmd(void *wmi_hdl, + struct wifi_passpoint_req_param *req); + +QDF_STATUS wmi_unified_set_passpoint_network_list_cmd(void *wmi_hdl, + struct wifi_passpoint_req_param *req); + +QDF_STATUS wmi_unified_set_epno_network_list_cmd(void *wmi_hdl, + struct wifi_enhanced_pno_params *req); + +QDF_STATUS wmi_unified_extscan_get_capabilities_cmd(void *wmi_hdl, + struct extscan_capabilities_params *pgetcapab); + +QDF_STATUS wmi_unified_extscan_get_cached_results_cmd(void *wmi_hdl, + struct extscan_cached_result_params *pcached_results); + +QDF_STATUS wmi_unified_extscan_stop_change_monitor_cmd(void *wmi_hdl, + struct extscan_capabilities_reset_params *reset_req); + +QDF_STATUS wmi_unified_extscan_start_change_monitor_cmd(void *wmi_hdl, + struct extscan_set_sig_changereq_params * + psigchange); + +QDF_STATUS wmi_unified_extscan_stop_hotlist_monitor_cmd(void *wmi_hdl, + struct extscan_bssid_hotlist_reset_params *photlist_reset); + +/** + * wmi_unified_extscan_start_hotlist_monitor_cmd() - start hotlist monitor + * @wmi_hdl: wmi handle + * @params: hotlist params + * + * This function configures hotlist monitor to start in fw. + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_extscan_start_hotlist_monitor_cmd(void *wmi_hdl, + struct extscan_bssid_hotlist_set_params *params); + +QDF_STATUS wmi_unified_stop_extscan_cmd(void *wmi_hdl, + struct extscan_stop_req_params *pstopcmd); + +QDF_STATUS wmi_unified_start_extscan_cmd(void *wmi_hdl, + struct wifi_scan_cmd_req_params *pstart); + +#endif /* _WMI_UNIFIED_EXTSCAN_API_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_param.h b/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_param.h new file mode 100644 index 0000000000000000000000000000000000000000..441acdaffa6fdc51607d14056d6e7233b1edad9f --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_param.h @@ -0,0 +1,8800 @@ +/* + * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/* + * This file contains the API definitions for the Unified Wireless Module + * Interface (WMI). + */ + +#ifndef _WMI_UNIFIED_PARAM_H_ +#define _WMI_UNIFIED_PARAM_H_ + +#include +#ifdef CONVERGED_TDLS_ENABLE +#include +#endif + +#define MAC_MAX_KEY_LENGTH 32 +#define MAC_PN_LENGTH 8 +#define MAX_MAC_HEADER_LEN 32 +#define MIN_MAC_HEADER_LEN 24 +#define QOS_CONTROL_LEN 2 + +#define IEEE80211_ADDR_LEN 6 /* size of 802.11 address */ +#define WMI_MAC_MAX_SSID_LENGTH 32 +#define mgmt_tx_dl_frm_len 64 +#define WMI_SMPS_MASK_LOWER_16BITS 0xFF +#define WMI_SMPS_MASK_UPPER_3BITS 0x7 +#define WMI_SMPS_PARAM_VALUE_S 29 +#define WMI_UNIT_TEST_MAX_NUM_ARGS 100 +/* The size of the utc time in bytes. */ +#define WMI_SIZE_UTC_TIME (10) +/* The size of the utc time error in bytes. */ +#define WMI_SIZE_UTC_TIME_ERROR (5) +#define WMI_MCC_MIN_CHANNEL_QUOTA 20 +#define WMI_MCC_MAX_CHANNEL_QUOTA 80 +#define WMI_MCC_MIN_NON_ZERO_CHANNEL_LATENCY 30 +#define WMI_BEACON_TX_BUFFER_SIZE (512) +#define WMI_WIFI_SCANNING_MAC_OUI_LENGTH 3 +#define WMI_EXTSCAN_MAX_SIGNIFICANT_CHANGE_APS 64 +#define WMI_RSSI_THOLD_DEFAULT -300 +#define WMI_NLO_FREQ_THRESH 1000 +#define WMI_SEC_TO_MSEC(sec) (sec * 1000) +#define WMI_MSEC_TO_USEC(msec) (msec * 1000) +#define WMI_ETH_LEN 64 +#define WMI_QOS_NUM_TSPEC_MAX 2 +#define WMI_QOS_NUM_AC_MAX 4 +#define WMI_IPV4_ADDR_LEN 4 +#define WMI_KEEP_ALIVE_NULL_PKT 1 +#define WMI_KEEP_ALIVE_UNSOLICIT_ARP_RSP 2 +#define WMI_MAC_MAX_KEY_LENGTH 32 +#define WMI_KRK_KEY_LEN 16 +#ifdef WLAN_FEATURE_ROAM_OFFLOAD +#define WMI_BTK_KEY_LEN 32 +#define WMI_ROAM_R0KH_ID_MAX_LEN 48 +#define WMI_ROAM_SCAN_PSK_SIZE 32 +#endif +#define WMI_NOISE_FLOOR_DBM_DEFAULT (-96) +#define WMI_EXTSCAN_MAX_HOTLIST_SSIDS 8 +#define WMI_ROAM_MAX_CHANNELS 80 +#ifdef FEATURE_WLAN_EXTSCAN +#define WMI_MAX_EXTSCAN_MSG_SIZE 1536 +#define WMI_EXTSCAN_REST_TIME 100 +#define WMI_EXTSCAN_MAX_SCAN_TIME 50000 +#define WMI_EXTSCAN_BURST_DURATION 150 +#endif +#define WMI_SCAN_NPROBES_DEFAULT (2) +#define WMI_SEC_TO_MSEC(sec) (sec * 1000) /* sec to msec */ +#define WMI_MSEC_TO_USEC(msec) (msec * 1000) /* msec to usec */ +#define WMI_NLO_FREQ_THRESH 1000 /* in MHz */ + +#define WMI_SVC_MSG_MAX_SIZE 1536 +#define MAX_UTF_EVENT_LENGTH 2048 +#define MAX_WMI_UTF_LEN 252 +#define MAX_WMI_QVIT_LEN 252 +#define THERMAL_LEVELS 4 +#define WMI_HOST_BCN_FLT_MAX_SUPPORTED_IES 256 +#define WMI_HOST_BCN_FLT_MAX_ELEMS_IE_LIST \ + (WMI_HOST_BCN_FLT_MAX_SUPPORTED_IES/32) +#define LTEU_MAX_BINS 10 +#define ATF_ACTIVED_MAX_CLIENTS 50 +#define ATF_ACTIVED_MAX_ATFGROUPS 8 +#define CTL_5G_SIZE 1536 +#define CTL_2G_SIZE 684 +#define MAX_CTL_SIZE (CTL_5G_SIZE > CTL_2G_SIZE ? CTL_5G_SIZE : CTL_2G_SIZE) +#define IEEE80211_MICBUF_SIZE (8+8) +#define IEEE80211_TID_SIZE 17 +#define WME_NUM_AC 4 +#define SMART_ANT_MODE_SERIAL 0 +#define SMART_ANT_MODE_PARALLEL 1 +#define IEEE80211_WEP_NKID 4 /* number of key ids */ +#define WPI_IV_LEN 16 +#define WMI_SCAN_MAX_NUM_BSSID 10 +#define MAX_CHANS 1023 +#define TARGET_OEM_CONFIGURE_LCI 0x0A +#define RTT_LCI_ALTITUDE_MASK 0x3FFFFFFF +#define TARGET_OEM_CONFIGURE_LCR 0x09 +#define RTT_TIMEOUT_MS 180 +#define MAX_SUPPORTED_RATES 128 +#define WMI_HOST_MAX_BUFFER_SIZE 1712 +#define WMI_HAL_MAX_SANTENNA 4 +#define WMI_HOST_PDEV_VI_PRIORITY_BIT (1<<2) +#define WMI_HOST_PDEV_BEACON_PRIORITY_BIT (1<<4) +#define WMI_HOST_PDEV_MGMT_PRIORITY_BIT (1<<5) +#define WMI_MAX_CMDS 1024 + +#define FIPS_ALIGN 4 +#define FIPS_ALIGNTO(__addr, __to) \ + ((((unsigned long int)(__addr)) + (__to) - 1) & ~((__to) - 1)) +#define FIPS_IS_ALIGNED(__addr, __to) \ + (!(((unsigned long int)(__addr)) & ((__to)-1))) + +#define WMI_HOST_MAX_SERIAL_ANTENNA 2 +#define WMI_SMART_ANT_MAX_RATE_SERIES 2 + +#define WMI_HOST_F_MS(_v, _f) \ + (((_v) & (_f)) >> (_f##_S)) + +#define WMI_HOST_F_RMW(_var, _v, _f) \ + do { \ + (_var) &= ~(_f); \ + (_var) |= (((_v) << (_f##_S)) & (_f)); \ + } while (0) + +/* vdev capabilities bit mask */ +#define WMI_HOST_VDEV_BEACON_SUPPORT 0x1 +#define WMI_HOST_VDEV_WDS_LRN_ENABLED 0x2 +#define WMI_HOST_VDEV_VOW_ENABLED 0x4 +#define WMI_HOST_VDEV_IS_BEACON_SUPPORTED(param) \ + ((param) & WMI_HOST_VDEV_BEACON_SUPPORT) +#define WMI_HOST_VDEV_IS_WDS_LRN_ENABLED(param) \ + ((param) & WMI_HOST_VDEV_WDS_LRN_ENABLED) +#define WMI_HOST_VDEV_IS_VOW_ENABLED(param) \ + ((param) & WMI_HOST_VDEV_VOW_ENABLED) + +/* TXBF capabilities masks */ +#define WMI_HOST_TXBF_CONF_SU_TX_BFEE_S 0 +#define WMI_HOST_TXBF_CONF_SU_TX_BFEE_M 0x1 +#define WMI_HOST_TXBF_CONF_SU_TX_BFEE \ + (WMI_HOST_TXBF_CONF_SU_TX_BFEE_M << WMI_HOST_TXBF_CONF_SU_TX_BFEE_S) +#define WMI_HOST_TXBF_CONF_SU_TX_BFEE_GET(x) \ + WMI_HOST_F_MS(x, WMI_HOST_TXBF_CONF_SU_TX_BFEE) +#define WMI_HOST_TXBF_CONF_SU_TX_BFEE_SET(x, z) \ + WMI_HOST_F_RMW(x, z, WMI_HOST_TXBF_CONF_SU_TX_BFEE) + + +#define WMI_HOST_TXBF_CONF_MU_TX_BFEE_S 1 +#define WMI_HOST_TXBF_CONF_MU_TX_BFEE_M 0x1 +#define WMI_HOST_TXBF_CONF_MU_TX_BFEE \ + (WMI_HOST_TXBF_CONF_MU_TX_BFEE_M << WMI_HOST_TXBF_CONF_MU_TX_BFEE_S) +#define WMI_HOST_TXBF_CONF_MU_TX_BFEE_GET(x) \ + WMI_HOST_F_MS(x, WMI_HOST_TXBF_CONF_MU_TX_BFEE) +#define WMI_HOST_TXBF_CONF_MU_TX_BFEE_SET(x, z) \ + WMI_HOST_F_RMW(x, z, WMI_HOST_TXBF_CONF_MU_TX_BFEE) + +#define WMI_HOST_TXBF_CONF_SU_TX_BFER_S 2 +#define WMI_HOST_TXBF_CONF_SU_TX_BFER_M 0x1 +#define WMI_HOST_TXBF_CONF_SU_TX_BFER \ + (WMI_HOST_TXBF_CONF_SU_TX_BFER_M << WMI_HOST_TXBF_CONF_SU_TX_BFER_S) +#define WMI_HOST_TXBF_CONF_SU_TX_BFER_GET(x) \ + WMI_HOST_F_MS(x, WMI_HOST_TXBF_CONF_SU_TX_BFER) +#define WMI_HOST_TXBF_CONF_SU_TX_BFER_SET(x, z) \ + WMI_HOST_F_RMW(x, z, WMI_HOST_TXBF_CONF_SU_TX_BFER) + +#define WMI_HOST_TXBF_CONF_MU_TX_BFER_S 3 +#define WMI_HOST_TXBF_CONF_MU_TX_BFER_M 0x1 +#define WMI_HOST_TXBF_CONF_MU_TX_BFER \ + (WMI_HOST_TXBF_CONF_MU_TX_BFER_M << WMI_HOST_TXBF_CONF_MU_TX_BFER_S) +#define WMI_HOST_TXBF_CONF_MU_TX_BFER_GET(x) \ + WMI_HOST_F_MS(x, WMI_HOST_TXBF_CONF_MU_TX_BFER) +#define WMI_HOST_TXBF_CONF_MU_TX_BFER_SET(x, z) \ + WMI_HOST_F_RMW(x, z, WMI_HOST_TXBF_CONF_MU_TX_BFER) + +#define WMI_HOST_TXBF_CONF_STS_CAP_S 4 +#define WMI_HOST_TXBF_CONF_STS_CAP_M 0x7 +#define WMI_HOST_TXBF_CONF_STS_CAP \ + (WMI_HOST_TXBF_CONF_STS_CAP_M << WMI_HOST_TXBF_CONF_STS_CAP_S) +#define WMI_HOST_TXBF_CONF_STS_CAP_GET(x) \ + WMI_HOST_F_MS(x, WMI_HOST_TXBF_CONF_STS_CAP); +#define WMI_HOST_TXBF_CONF_STS_CAP_SET(x, z) \ + WMI_HOST_F_RMW(x, z, WMI_HOST_TXBF_CONF_STS_CAP) + +#define WMI_HOST_TXBF_CONF_IMPLICIT_BF_S 7 +#define WMI_HOST_TXBF_CONF_IMPLICIT_BF_M 0x1 +#define WMI_HOST_TXBF_CONF_IMPLICIT_BF \ + (WMI_HOST_TXBF_CONF_IMPLICIT_BF_M << WMI_HOST_TXBF_CONF_IMPLICIT_BF_S) +#define WMI_HOST_TXBF_CONF_IMPLICIT_BF_GET(x) \ + WMI_HOST_F_MS(x, WMI_HOST_TXBF_CONF_IMPLICIT_BF) +#define WMI_HOST_TXBF_CONF_IMPLICIT_BF_SET(x, z) \ + WMI_HOST_F_RMW(x, z, WMI_HOST_TXBF_CONF_IMPLICIT_BF) + +#define WMI_HOST_TXBF_CONF_BF_SND_DIM_S 8 +#define WMI_HOST_TXBF_CONF_BF_SND_DIM_M 0x7 +#define WMI_HOST_TXBF_CONF_BF_SND_DIM \ + (WMI_HOST_TXBF_CONF_BF_SND_DIM_M << WMI_HOST_TXBF_CONF_BF_SND_DIM_S) +#define WMI_HOST_TXBF_CONF_BF_SND_DIM_GET(x) \ + WMI_HOST_F_MS(x, WMI_HOST_TXBF_CONF_BF_SND_DIM) +#define WMI_HOST_TXBF_CONF_BF_SND_DIM_SET(x, z) \ + WMI_HOST_F_RMW(x, z, WMI_HOST_TXBF_CONF_BF_SND_DIM) + +/* The following WMI_HOST_HEOPS_BSSCOLOR_XXX macros correspond to the + * WMI_HEOPS_COLOR_XXX macros in the FW wmi_unified.h */ +#define WMI_HOST_HEOPS_BSSCOLOR_S 0 +#define WMI_HOST_HEOPS_BSSCOLOR_M 0x3f +#define WMI_HOST_HEOPS_BSSCOLOR \ + (WMI_HOST_HEOPS_BSSCOLOR_M << WMI_HOST_HEOPS_BSSCOLOR_S) +#define WMI_HOST_HEOPS_BSSCOLOR_GET(x) \ + WMI_HOST_F_MS(x, WMI_HOST_HEOPS_BSSCOLOR) +#define WMI_HOST_HEOPS_BSSCOLOR_SET(x, z) \ + WMI_HOST_F_RMW(x, z, WMI_HOST_HEOPS_BSSCOLOR) + +/* The following WMI_HOST_HEOPS_BSSCOLOR_DISABLE_XXX macros correspond to the + * WMI_HEOPS_BSSCOLORDISABLE_XXX macros in the FW wmi_unified.h */ +#define WMI_HOST_HEOPS_BSSCOLOR_DISABLE_S 30 +#define WMI_HOST_HEOPS_BSSCOLOR_DISABLE_M 0x1 +#define WMI_HOST_HEOPS_BSSCOLOR_DISABLE \ + (WMI_HOST_HEOPS_BSSCOLOR_DISABLE_M << WMI_HOST_HEOPS_BSSCOLOR_DISABLE_S) +#define WMI_HOST_HEOPS_BSSCOLOR_DISABLE_GET(x) \ + WMI_HOST_F_MS(x, WMI_HOST_HEOPS_BSSCOLOR_DISABLE) +#define WMI_HOST_HEOPS_BSSCOLOR_DISABLE_SET(x, z) \ + WMI_HOST_F_RMW(x, z, WMI_HOST_HEOPS_BSSCOLOR_DISABLE) + +/* HE BF capabilities mask */ +#define WMI_HOST_HE_BF_CONF_SU_BFEE_S 0 +#define WMI_HOST_HE_BF_CONF_SU_BFEE_M 0x1 +#define WMI_HOST_HE_BF_CONF_SU_BFEE \ + (WMI_HOST_HE_BF_CONF_SU_BFEE_M << WMI_HOST_HE_BF_CONF_SU_BFEE_S) +#define WMI_HOST_HE_BF_CONF_SU_BFEE_GET(x) \ + WMI_HOST_F_MS(x, WMI_HOST_HE_BF_CONF_SU_BFEE) +#define WMI_HOST_HE_BF_CONF_SU_BFEE_SET(x, z) \ + WMI_HOST_F_RMW(x, z, WMI_HOST_HE_BF_CONF_SU_BFEE) + +#define WMI_HOST_HE_BF_CONF_SU_BFER_S 1 +#define WMI_HOST_HE_BF_CONF_SU_BFER_M 0x1 +#define WMI_HOST_HE_BF_CONF_SU_BFER \ + (WMI_HOST_HE_BF_CONF_SU_BFER_M << WMI_HOST_HE_BF_CONF_SU_BFER_S) +#define WMI_HOST_HE_BF_CONF_SU_BFER_GET(x) \ + WMI_HOST_F_MS(x, WMI_HOST_HE_BF_CONF_SU_BFER) +#define WMI_HOST_HE_BF_CONF_SU_BFER_SET(x, z) \ + WMI_HOST_F_RMW(x, z, WMI_HOST_HE_BF_CONF_SU_BFER) + +#define WMI_HOST_HE_BF_CONF_MU_BFEE_S 2 +#define WMI_HOST_HE_BF_CONF_MU_BFEE_M 0x1 +#define WMI_HOST_HE_BF_CONF_MU_BFEE \ + (WMI_HOST_HE_BF_CONF_MU_BFEE_M << WMI_HOST_HE_BF_CONF_MU_BFEE_S) +#define WMI_HOST_HE_BF_CONF_MU_BFEE_GET(x) \ + WMI_HOST_F_MS(x, WMI_HOST_HE_BF_CONF_MU_BFEE) +#define WMI_HOST_HE_BF_CONF_MU_BFEE_SET(x, z) \ + WMI_HOST_F_RMW(x, z, WMI_HOST_HE_BF_CONF_MU_BFEE) + +#define WMI_HOST_HE_BF_CONF_MU_BFER_S 3 +#define WMI_HOST_HE_BF_CONF_MU_BFER_M 0x1 +#define WMI_HOST_HE_BF_CONF_MU_BFER \ + (WMI_HOST_HE_BF_CONF_MU_BFER_M << WMI_HOST_HE_BF_CONF_MU_BFER_S) +#define WMI_HOST_HE_BF_CONF_MU_BFER_GET(x) \ + WMI_HOST_F_MS(x, WMI_HOST_HE_BF_CONF_MU_BFER) +#define WMI_HOST_HE_BF_CONF_MU_BFER_SET(x, z) \ + WMI_HOST_F_RMW(x, z, WMI_HOST_HE_BF_CONF_MU_BFER) + +#define WMI_HOST_HE_BF_CONF_DL_OFDMA_S 4 +#define WMI_HOST_HE_BF_CONF_DL_OFDMA_M 0x1 +#define WMI_HOST_HE_BF_CONF_DL_OFDMA \ + (WMI_HOST_HE_BF_CONF_DL_OFDMA_M << WMI_HOST_HE_BF_CONF_DL_OFDMA_S) +#define WMI_HOST_HE_BF_CONF_DL_OFDMA_GET(x) \ + WMI_HOST_F_MS(x, WMI_HOST_HE_BF_CONF_DL_OFDMA) +#define WMI_HOST_HE_BF_CONF_DL_OFDMA_SET(x, z) \ + WMI_HOST_F_RMW(x, z, WMI_HOST_HE_BF_CONF_DL_OFDMA) + +#define WMI_HOST_HE_BF_CONF_UL_OFDMA_S 5 +#define WMI_HOST_HE_BF_CONF_UL_OFDMA_M 0x1 +#define WMI_HOST_HE_BF_CONF_UL_OFDMA \ + (WMI_HOST_HE_BF_CONF_UL_OFDMA_M << WMI_HOST_HE_BF_CONF_UL_OFDMA_S) +#define WMI_HOST_HE_BF_CONF_UL_OFDMA_GET(x) \ + WMI_HOST_F_MS(x, WMI_HOST_HE_BF_CONF_UL_OFDMA) +#define WMI_HOST_HE_BF_CONF_UL_OFDMA_SET(x, z) \ + WMI_HOST_F_RMW(x, z, WMI_HOST_HE_BF_CONF_UL_OFDMA) + +#define WMI_HOST_HE_BF_CONF_UL_MUMIMO_S 6 +#define WMI_HOST_HE_BF_CONF_UL_MUMIMO_M 0x1 +#define WMI_HOST_HE_BF_CONF_UL_MUMIMO \ + (WMI_HOST_HE_BF_CONF_UL_MUMIMO_M << WMI_HOST_HE_BF_CONF_UL_MUMIMO_S) +#define WMI_HOST_HE_BF_CONF_UL_MUMIMO_GET(x) \ + WMI_HOST_F_MS(x, WMI_HOST_HE_BF_CONF_UL_MUMIMO) +#define WMI_HOST_HE_BF_CONF_UL_MUMIMO_SET(x, z) \ + WMI_HOST_F_RMW(x, z, WMI_HOST_HE_BF_CONF_UL_MUMIMO) + +#define WMI_HOST_TPC_RATE_MAX 160 +#define WMI_HOST_TPC_TX_NUM_CHAIN 4 +#define WMI_HOST_RXG_CAL_CHAN_MAX 8 +#define WMI_HOST_MAX_NUM_CHAINS 8 +#define WMI_MAX_NUM_OF_RATE_THRESH 4 + +#define WMI_HOST_PDEV_MAX_VDEVS 17 + +/* for QC98XX only */ +/*6 modes (A, HT20, HT40, VHT20, VHT40, VHT80) * 3 reg dommains + */ +#define WMI_HOST_NUM_CTLS_5G 18 +/*6 modes (B, G, HT20, HT40, VHT20, VHT40) * 3 reg domains */ +#define WMI_HOST_NUM_CTLS_2G 18 +#define WMI_HOST_NUM_BAND_EDGES_5G 8 +#define WMI_HOST_NUM_BAND_EDGES_2G 4 + +/*Beelinier 5G*/ +#define WMI_HOST_NUM_CTLS_5G_11A 9 +#define WMI_HOST_NUM_BAND_EDGES_5G_11A 25 +#define WMI_HOST_NUM_CTLS_5G_HT20 24 +#define WMI_HOST_NUM_BAND_EDGES_5G_HT20 25 +#define WMI_HOST_NUM_CTLS_5G_HT40 18 +#define WMI_HOST_NUM_BAND_EDGES_5G_HT40 12 +#define WMI_HOST_NUM_CTLS_5G_HT80 18 +#define WMI_HOST_NUM_BAND_EDGES_5G_HT80 6 +#define WMI_HOST_NUM_CTLS_5G_HT160 9 +#define WMI_HOST_NUM_BAND_EDGES_5G_HT160 2 + +/* Beeliner 2G */ +#define WMI_HOST_NUM_CTLS_2G_11B 6 +#define WMI_HOST_NUM_BAND_EDGES_2G_11B 9 +#define WMI_HOST_NUM_CTLS_2G_20MHZ 30 +#define WMI_HOST_NUM_BAND_EDGES_2G_20MHZ 11 +#define WMI_HOST_NUM_CTLS_2G_40MHZ 18 +#define WMI_HOST_NUM_BAND_EDGES_2G_40MHZ 6 + +/* for QC98XX only */ +#define WMI_HOST_TX_NUM_CHAIN 0x3 +#define WMI_HOST_TPC_REGINDEX_MAX 4 +#define WMI_HOST_ARRAY_GAIN_NUM_STREAMS 2 + +#include "qdf_atomic.h" + +#ifdef BIG_ENDIAN_HOST + /* This API is used in copying in elements to WMI message, + since WMI message uses multilpes of 4 bytes, This API + converts length into multiples of 4 bytes, and performs copy + */ +#define WMI_HOST_IF_MSG_COPY_CHAR_ARRAY(destp, srcp, len) do { \ + int j; \ + u_int32_t *src, *dest; \ + src = (u_int32_t *)srcp; \ + dest = (u_int32_t *)destp; \ + for (j = 0; j < roundup(len, sizeof(u_int32_t))/4; j++) { \ + *(dest+j) = qdf_le32_to_cpu(*(src+j)); \ + } \ +} while (0) +#else + +#define WMI_HOST_IF_MSG_COPY_CHAR_ARRAY(destp, srcp, len) OS_MEMCPY(destp,\ + srcp, len) + +#endif + +/** macro to convert MAC address from WMI word format to char array */ +#define WMI_HOST_MAC_ADDR_TO_CHAR_ARRAY(pwmi_mac_addr, c_macaddr) do { \ + (c_macaddr)[0] = ((pwmi_mac_addr)->mac_addr31to0) & 0xff; \ + (c_macaddr)[1] = (((pwmi_mac_addr)->mac_addr31to0) >> 8) & 0xff; \ + (c_macaddr)[2] = (((pwmi_mac_addr)->mac_addr31to0) >> 16) & 0xff; \ + (c_macaddr)[3] = (((pwmi_mac_addr)->mac_addr31to0) >> 24) & 0xff; \ + (c_macaddr)[4] = ((pwmi_mac_addr)->mac_addr47to32) & 0xff; \ + (c_macaddr)[5] = (((pwmi_mac_addr)->mac_addr47to32) >> 8) & 0xff; \ + } while (0) + +#define TARGET_INIT_STATUS_SUCCESS 0x0 +#define TARGET_INIT_STATUS_GEN_FAILED 0x1 +#define TARGET_GET_INIT_STATUS_REASON(status) ((status) & 0xffff) +#define TARGET_GET_INIT_STATUS_MODULE_ID(status) (((status) >> 16) & 0xffff) + +#define MAX_ASSOC_IE_LENGTH 1024 +typedef uint32_t TARGET_INIT_STATUS; + +/** + * @brief Opaque handle of wmi structure + */ +struct wmi_unified; +typedef struct wmi_unified *wmi_unified_t; + +typedef void *ol_scn_t; +/** + * @wmi_event_handler function prototype + */ +typedef int (*wmi_unified_event_handler)(ol_scn_t scn_handle, + uint8_t *event_buf, uint32_t len); + +/** + * @WMI_HOST_WLAN_PHY_MODE: Host based enum ID for corresponding in + * WLAN_PHY_MODE. This should be consistent with WLAN_PHY_MODE always to avoid + * breaking the WMI + */ +typedef enum { + WMI_HOST_MODE_11A = 0, /* 11a Mode */ + WMI_HOST_MODE_11G = 1, /* 11b/g Mode */ + WMI_HOST_MODE_11B = 2, /* 11b Mode */ + WMI_HOST_MODE_11GONLY = 3, /* 11g only Mode */ + WMI_HOST_MODE_11NA_HT20 = 4, /* 11a HT20 mode */ + WMI_HOST_MODE_11NG_HT20 = 5, /* 11g HT20 mode */ + WMI_HOST_MODE_11NA_HT40 = 6, /* 11a HT40 mode */ + WMI_HOST_MODE_11NG_HT40 = 7, /* 11g HT40 mode */ + WMI_HOST_MODE_11AC_VHT20 = 8, + WMI_HOST_MODE_11AC_VHT40 = 9, + WMI_HOST_MODE_11AC_VHT80 = 10, + WMI_HOST_MODE_11AC_VHT20_2G = 11, + WMI_HOST_MODE_11AC_VHT40_2G = 12, + WMI_HOST_MODE_11AC_VHT80_2G = 13, + WMI_HOST_MODE_11AC_VHT80_80 = 14, + WMI_HOST_MODE_11AC_VHT160 = 15, + WMI_HOST_MODE_11AX_HE20 = 16, + WMI_HOST_MODE_11AX_HE40 = 17, + WMI_HOST_MODE_11AX_HE80 = 18, + WMI_HOST_MODE_11AX_HE80_80 = 19, + WMI_HOST_MODE_11AX_HE160 = 20, + WMI_HOST_MODE_11AX_HE20_2G = 21, + WMI_HOST_MODE_11AX_HE40_2G = 22, + WMI_HOST_MODE_11AX_HE80_2G = 23, + WMI_HOST_MODE_UNKNOWN = 24, + WMI_HOST_MODE_MAX = 24 +} WMI_HOST_WLAN_PHY_MODE; + +typedef enum { + WMI_HOST_VDEV_START_OK = 0, + WMI_HOST_VDEV_START_CHAN_INVALID, + WMI_HOST_VDEV_START_CHAN_BLOCKED, +} WMI_HOST_VDEV_START_STATUS; + +/* + * Needs to be removed and use channel_param based + * on how it is processed + */ +typedef struct { + /** primary 20 MHz channel frequency in mhz */ + uint32_t mhz; + /** Center frequency 1 in MHz*/ + uint32_t band_center_freq1; + /** Center frequency 2 in MHz - valid only for 11acvht 80plus80 mode*/ + uint32_t band_center_freq2; + /** channel info described below */ + uint32_t info; + /** contains min power, max power, reg power and reg class id. */ + uint32_t reg_info_1; + /** contains antennamax */ + uint32_t reg_info_2; +} wmi_host_channel; + +/** + * enum WMI_HOST_REGDMN_MODE: + * @WMI_HOST_REGDMN_MODE_11A: 11a channels + * @WMI_HOST_REGDMN_MODE_TURBO: 11a turbo-only channels + * @WMI_HOST_REGDMN_MODE_11B: 11b channels + * @WMI_HOST_REGDMN_MODE_PUREG: 11g channels (OFDM only) + * @WMI_HOST_REGDMN_MODE_11G: historical + * @WMI_HOST_REGDMN_MODE_108G: 11g+Turbo channels + * @WMI_HOST_REGDMN_MODE_108A: 11a+Turbo channels + * @WMI_HOST_REGDMN_MODE_XR: XR channels + * @WMI_HOST_REGDMN_MODE_11A_HALF_RATE: 11a half rate channels + * @WMI_HOST_REGDMN_MODE_11A_QUARTER_RATE: 11a quarter rate channels + * @WMI_HOST_REGDMN_MODE_11NG_HT20: 11ng HT20 channels + * @WMI_HOST_REGDMN_MODE_11NA_HT20: 11na HT20 channels + * @WMI_HOST_REGDMN_MODE_11NG_HT40PLUS: 11ng HT40+ channels + * @WMI_HOST_REGDMN_MODE_11NG_HT40MINUS: 11ng HT40- channels + * @WMI_HOST_REGDMN_MODE_11NA_HT40PLUS: 11na HT40+ channels + * @WMI_HOST_REGDMN_MODE_11NA_HT40MINUS: 11na HT40- channels + * @WMI_HOST_REGDMN_MODE_11AC_VHT20: 5GHz, VHT20 + * @WMI_HOST_REGDMN_MODE_11AC_VHT40PLUS: 5GHz, VHT40+ channels + * @WMI_HOST_REGDMN_MODE_11AC_VHT40MINUS: 5GHz, VHT40- channels + * @WMI_HOST_REGDMN_MODE_11AC_VHT80: 5GHz, VHT80 channels + * @WMI_HOST_REGDMN_MODE_11AC_VHT160: 5GHz, VHT160 channels + * @WMI_HOST_REGDMN_MODE_11AC_VHT80_80: 5GHz, VHT80+80 channels + * @WMI_HOST_REGDMN_MODE_11AXG_HE20: 11ax 2.4GHz, HE20 channels + * @WMI_HOST_REGDMN_MODE_11AXA_HE20: 11ax 5GHz, HE20 channels + * @WMI_HOST_REGDMN_MODE_11AXG_HE40PLUS: 11ax 2.4GHz, HE40+ channels + * @WMI_HOST_REGDMN_MODE_11AXG_HE40MINUS: 11ax 2.4GHz, HE40- channels + * @WMI_HOST_REGDMN_MODE_11AXA_HE40PLUS: 11ax 5GHz, HE40+ channels + * @WMI_HOST_REGDMN_MODE_11AXA_HE40MINUS: 11ax 5GHz, HE40- channels + * @WMI_HOST_REGDMN_MODE_11AXA_HE80: 11ax 5GHz, HE80 channels + * @WMI_HOST_REGDMN_MODE_11AXA_HE160: 11ax 5GHz, HE160 channels + * @WMI_HOST_REGDMN_MODE_11AXA_HE80_80: 11ax 5GHz, HE80+80 channels + */ +typedef enum { + WMI_HOST_REGDMN_MODE_11A = 0x00000001, + WMI_HOST_REGDMN_MODE_TURBO = 0x00000002, + WMI_HOST_REGDMN_MODE_11B = 0x00000004, + WMI_HOST_REGDMN_MODE_PUREG = 0x00000008, + WMI_HOST_REGDMN_MODE_11G = 0x00000008, + WMI_HOST_REGDMN_MODE_108G = 0x00000020, + WMI_HOST_REGDMN_MODE_108A = 0x00000040, + WMI_HOST_REGDMN_MODE_XR = 0x00000100, + WMI_HOST_REGDMN_MODE_11A_HALF_RATE = 0x00000200, + WMI_HOST_REGDMN_MODE_11A_QUARTER_RATE = 0x00000400, + WMI_HOST_REGDMN_MODE_11NG_HT20 = 0x00000800, + WMI_HOST_REGDMN_MODE_11NA_HT20 = 0x00001000, + WMI_HOST_REGDMN_MODE_11NG_HT40PLUS = 0x00002000, + WMI_HOST_REGDMN_MODE_11NG_HT40MINUS = 0x00004000, + WMI_HOST_REGDMN_MODE_11NA_HT40PLUS = 0x00008000, + WMI_HOST_REGDMN_MODE_11NA_HT40MINUS = 0x00010000, + WMI_HOST_REGDMN_MODE_11AC_VHT20 = 0x00020000, + WMI_HOST_REGDMN_MODE_11AC_VHT40PLUS = 0x00040000, + WMI_HOST_REGDMN_MODE_11AC_VHT40MINUS = 0x00080000, + WMI_HOST_REGDMN_MODE_11AC_VHT80 = 0x00100000, + WMI_HOST_REGDMN_MODE_11AC_VHT160 = 0x00200000, + WMI_HOST_REGDMN_MODE_11AC_VHT80_80 = 0x00400000, + WMI_HOST_REGDMN_MODE_11AXG_HE20 = 0x00800000, + WMI_HOST_REGDMN_MODE_11AXA_HE20 = 0x01000000, + WMI_HOST_REGDMN_MODE_11AXG_HE40PLUS = 0x02000000, + WMI_HOST_REGDMN_MODE_11AXG_HE40MINUS = 0x04000000, + WMI_HOST_REGDMN_MODE_11AXA_HE40PLUS = 0x08000000, + WMI_HOST_REGDMN_MODE_11AXA_HE40MINUS = 0x10000000, + WMI_HOST_REGDMN_MODE_11AXA_HE80 = 0x20000000, + WMI_HOST_REGDMN_MODE_11AXA_HE160 = 0x40000000, + WMI_HOST_REGDMN_MODE_11AXA_HE80_80 = 0x80000000, + WMI_HOST_REGDMN_MODE_ALL = 0xffffffff +} WMI_HOST_REGDMN_MODE; + +/** + * enum WMI_HOST_WLAN_BAND_CAPABILITY: Band capability (2.4 GHz, 5 GHz). Maps to + * WLAN_BAND_CAPABILITY used in firmware header file(s). + * @WMI_HOST_WLAN_2G_CAPABILITY: 2.4 GHz capable + * @WMI_HOST_WLAN_5G_CAPABILITY: 5 GHz capable + */ +typedef enum { + WMI_HOST_WLAN_2G_CAPABILITY = 0x1, + WMI_HOST_WLAN_5G_CAPABILITY = 0x2, +} WMI_HOST_WLAN_BAND_CAPABILITY; + +/** + * enum wmi_host_channel_width: Channel operating width. Maps to + * wmi_channel_width used in firmware header file(s). + * @WMI_HOST_CHAN_WIDTH_20: 20 MHz channel operating width + * @WMI_HOST_CHAN_WIDTH_40: 40 MHz channel operating width + * @WMI_HOST_CHAN_WIDTH_80: 80 MHz channel operating width + * @WMI_HOST_CHAN_WIDTH_160: 160 MHz channel operating width + * @WMI_HOST_CHAN_WIDTH_80P80: 80+80 MHz channel operating width + * @WMI_HOST_CHAN_WIDTH_5: 5 MHz channel operating width + * @WMI_HOST_CHAN_WIDTH_10: 10 MHz channel operating width + */ +typedef enum { + WMI_HOST_CHAN_WIDTH_20 = 0, + WMI_HOST_CHAN_WIDTH_40 = 1, + WMI_HOST_CHAN_WIDTH_80 = 2, + WMI_HOST_CHAN_WIDTH_160 = 3, + WMI_HOST_CHAN_WIDTH_80P80 = 4, + WMI_HOST_CHAN_WIDTH_5 = 5, + WMI_HOST_CHAN_WIDTH_10 = 6, +} wmi_host_channel_width; + +#define MAX_NUM_CHAN 128 + +#define ATH_EXPONENT_TO_VALUE(v) ((1< CCK 1 Mbps rate is allowed + * bit 1 -> CCK 2 Mbps rate is allowed + * bit 2 -> CCK 5.5 Mbps rate is allowed + * bit 3 -> CCK 11 Mbps rate is allowed + * bit 4 -> OFDM BPSK modulation, 1/2 coding rate is allowed + * bit 5 -> OFDM BPSK modulation, 3/4 coding rate is allowed + * bit 6 -> OFDM QPSK modulation, 1/2 coding rate is allowed + * bit 7 -> OFDM QPSK modulation, 3/4 coding rate is allowed + * bit 8 -> OFDM 16-QAM modulation, 1/2 coding rate is allowed + * bit 9 -> OFDM 16-QAM modulation, 3/4 coding rate is allowed + * bit 10 -> OFDM 64-QAM modulation, 2/3 coding rate is allowed + * bit 11 -> OFDM 64-QAM modulation, 3/4 coding rate is allowed + * @nss_mask: Spatial streams permitted + * bit 0: if set, Nss = 1 (non-MIMO) is permitted + * bit 1: if set, Nss = 2 (2x2 MIMO) is permitted + * bit 2: if set, Nss = 3 (3x3 MIMO) is permitted + * bit 3: if set, Nss = 4 (4x4 MIMO) is permitted + * bit 4: if set, Nss = 5 (5x5 MIMO) is permitted + * bit 5: if set, Nss = 6 (6x6 MIMO) is permitted + * bit 6: if set, Nss = 7 (7x7 MIMO) is permitted + * bit 7: if set, Nss = 8 (8x8 MIMO) is permitted + * If no bits are set, target will choose what NSS type to use + * @retry_limit: Maximum number of retries before ACK + * @chain_mask: Chains to be used for transmission + * @bw_mask: Bandwidth to be used for transmission + * bit 0 -> 5MHz + * bit 1 -> 10MHz + * bit 2 -> 20MHz + * bit 3 -> 40MHz + * bit 4 -> 80MHz + * bit 5 -> 160MHz + * bit 6 -> 80_80MHz + * @preamble_type: Preamble types for transmission + * bit 0: if set, OFDM + * bit 1: if set, CCK + * bit 2: if set, HT + * bit 3: if set, VHT + * bit 4: if set, HE + * @frame_type: Data or Management frame + * Data:1 Mgmt:0 + */ +struct tx_send_params { + uint32_t pwr:8, + mcs_mask:12, + nss_mask:8, + retry_limit:4; + uint32_t chain_mask:8, + bw_mask:7, + preamble_type:5, + frame_type:1, + reserved:11; +}; + +/** + * struct wmi_mgmt_params - wmi mgmt cmd parameters + * @tx_frame: management tx frame + * @frm_len: frame length + * @vdev_id: vdev id + * @chanfreq: channel frequency + * @pdata: frame data + * @desc_id: descriptor id relyaed back by target + * @macaddr: macaddr of peer + * @qdf_ctx: qdf context for qdf_nbuf_map + * @tx_param: TX send parameters + * @tx_params_valid: Flag that indicates if TX params are valid + * @use_6mbps: specify whether management frame to transmit should + * use 6 Mbps rather than 1 Mbps min rate(for 5GHz band or P2P) + * @tx_type: type of management frame (determines what callback to use) + */ +struct wmi_mgmt_params { + void *tx_frame; + uint16_t frm_len; + uint8_t vdev_id; + uint16_t chanfreq; + void *pdata; + uint16_t desc_id; + uint8_t *macaddr; + void *qdf_ctx; + struct tx_send_params tx_param; + bool tx_params_valid; + uint8_t use_6mbps; + uint8_t tx_type; +}; + +/** + * struct wmi_offchan_data_tx_params - wmi offchan data tx cmd parameters + * @tx_frame: management tx frame + * @frm_len: frame length + * @vdev_id: vdev id + * @chanfreq: channel frequency + * @pdata: frame data + * @desc_id: descriptor id relyaed back by target + * @macaddr: macaddr of peer + * @qdf_ctx: qdf context for qdf_nbuf_map + * @tx_param: TX send parameters + * @tx_params_valid: Flag that indicates if TX params are valid + */ +struct wmi_offchan_data_tx_params { + void *tx_frame; + uint16_t frm_len; + uint8_t vdev_id; + uint16_t chanfreq; + void *pdata; + uint16_t desc_id; + uint8_t *macaddr; + void *qdf_ctx; + struct tx_send_params tx_param; + bool tx_params_valid; +}; + +/** + * struct p2p_ps_params - P2P powersave related params + * @opp_ps: opportunistic power save + * @ctwindow: CT window + * @count: count + * @duration: duration + * @interval: interval + * @single_noa_duration: single shot noa duration + * @ps_selection: power save selection + * @session_id: session id + */ +struct p2p_ps_params { + uint8_t opp_ps; + uint32_t ctwindow; + uint8_t count; + uint32_t duration; + uint32_t interval; + uint32_t single_noa_duration; + uint8_t ps_selection; + uint8_t session_id; +}; + +#ifndef CONVERGED_TDLS_ENABLE +/** + * struct sta_uapsd_params - uapsd auto trig params + * @wmm_ac: WMM access category from 0 to 3 + * @user_priority: User priority to use in trigger frames + * @service_interval: service interval + * @suspend_interval: suspend interval + * @delay_interval: delay interval + */ +struct sta_uapsd_params { + uint32_t wmm_ac; + uint32_t user_priority; + uint32_t service_interval; + uint32_t suspend_interval; + uint32_t delay_interval; +}; + +/** + * struct ta_uapsd_trig_params - uapsd trigger parameter + * @vdevid: vdev id + * @peer_addr: peer address + * @auto_triggerparam: trigger parameters + * @num_ac: no of access category + */ +struct sta_uapsd_trig_params { + uint32_t vdevid; + uint8_t peer_addr[IEEE80211_ADDR_LEN]; + struct sta_uapsd_params *auto_triggerparam; + uint32_t num_ac; +}; +#endif + +#define WMI_NUM_AC (4) +#define WMI_MAX_NUM_AC 4 + + +enum wmi_peer_rate_report_cond_phy_type { + WMI_PEER_RATE_REPORT_COND_11B = 0, + WMI_PEER_RATE_REPORT_COND_11A_G, + WMI_PEER_RATE_REPORT_COND_11N, + WMI_PEER_RATE_REPORT_COND_11AC, + WMI_PEER_RATE_REPORT_COND_MAX_NUM +}; + +/** + * struct report_rate_delta - peer specific parameters + * @percent: percentage + * @delta_min: rate min delta + */ +struct report_rate_delta { + uint32_t percent; /* in unit of 12.5% */ + uint32_t delta_min; /* in unit of Mbps */ +}; + +/** + * struct report_rate_per_phy - per phy report parameters + * @cond_flags: condition flag val + * @delta: rate delta + * @report_rate_threshold: rate threshold + */ +struct report_rate_per_phy { + /* + * PEER_RATE_REPORT_COND_FLAG_DELTA, + * PEER_RATE_REPORT_COND_FLAG_THRESHOLD + * Any of these two conditions or both of + * them can be set. + */ + uint32_t cond_flags; + struct report_rate_delta delta; + /* + * In unit of Mbps. There are at most 4 thresholds + * If the threshold count is less than 4, set zero to + * the one following the last threshold + */ + uint32_t report_rate_threshold[WMI_MAX_NUM_OF_RATE_THRESH]; +}; + +/** + * struct peer_rate_report_params - peer rate report parameters + * @rate_report_enable: enable rate report param + * @backoff_time: backoff time + * @timer_period: timer + * @report_per_phy: report per phy type + */ +struct wmi_peer_rate_report_params { + uint32_t rate_report_enable; + uint32_t backoff_time; /* in unit of msecond */ + uint32_t timer_period; /* in unit of msecond */ + /* + *In the following field, the array index means the phy type, + * please see enum wmi_peer_rate_report_cond_phy_type for detail + */ + struct report_rate_per_phy report_per_phy[ + WMI_PEER_RATE_REPORT_COND_MAX_NUM]; + +}; + +/** + * struct t_thermal_cmd_params - thermal command parameters + * @min_temp: minimum temprature + * @max_temp: maximum temprature + * @thermal_enable: thermal enable + */ +struct thermal_cmd_params { + uint16_t min_temp; + uint16_t max_temp; + uint8_t thermal_enable; +}; + +#define WMI_LRO_IPV4_SEED_ARR_SZ 5 +#define WMI_LRO_IPV6_SEED_ARR_SZ 11 + +/** + * struct wmi_lro_config_cmd_t - set LRO init parameters + * @lro_enable: indicates whether lro is enabled + * @tcp_flag: If the TCP flags from the packet do not match + * the values in this field after masking with TCP flags mask + * below, packet is not LRO eligible + * @tcp_flag_mask: field for comparing the TCP values provided + * above with the TCP flags field in the received packet + * @toeplitz_hash_ipv4: contains seed needed to compute the flow id + * 5-tuple toeplitz hash for ipv4 packets + * @toeplitz_hash_ipv6: contains seed needed to compute the flow id + * 5-tuple toeplitz hash for ipv6 packets + */ +struct wmi_lro_config_cmd_t { + uint32_t lro_enable; + uint32_t tcp_flag:9, + tcp_flag_mask:9; + uint32_t toeplitz_hash_ipv4[WMI_LRO_IPV4_SEED_ARR_SZ]; + uint32_t toeplitz_hash_ipv6[WMI_LRO_IPV6_SEED_ARR_SZ]; +}; + +/** + * struct gtx_config_t - GTX config + * @gtx_rt_mask: for HT and VHT rate masks + * @gtx_usrcfg: host request for GTX mask + * @gtx_threshold: PER Threshold (default: 10%) + * @gtx_margin: PER margin (default: 2%) + * @gtx_tcpstep: TCP step (default: 1) + * @gtx_tpcMin: TCP min (default: 5) + * @gtx_bwmask: BW mask (20/40/80/160 Mhz) + */ +struct wmi_gtx_config { + uint32_t gtx_rt_mask[2]; + uint32_t gtx_usrcfg; + uint32_t gtx_threshold; + uint32_t gtx_margin; + uint32_t gtx_tpcstep; + uint32_t gtx_tpcmin; + uint32_t gtx_bwmask; +}; + +/** + * struct wmi_probe_resp_params - send probe response parameters + * @prb_rsp_template_frm: pointer to template probe response template + * @prb_rsp_template_len: length of probe response template + */ +struct wmi_probe_resp_params { + uint8_t *prb_rsp_template_frm; + uint32_t prb_rsp_template_len; +}; + +/* struct set_key_params: structure containing + * installation key parameters + * @vdev_id: vdev id + * @key_len: key length + * @key_idx: key index + * @peer_mac: peer mac address + * @key_flags: key flags, 0:pairwise key, 1:group key, 2:static key + * @key_cipher: key cipher based on security mode + * @key_txmic_len: tx mic length + * @key_rxmic_len: rx mic length + * @key_tsc_counter: key tx sc counter + * @key_rsc_counter: key rx sc counter + * @rx_iv: receive IV, applicable only in case of WAPI + * @tx_iv: transmit IV, applicable only in case of WAPI + * @key_data: key data + */ +struct set_key_params { + uint8_t vdev_id; + uint16_t key_len; + uint32_t key_idx; + uint8_t peer_mac[IEEE80211_ADDR_LEN]; + uint32_t key_flags; + uint32_t key_cipher; + uint32_t key_txmic_len; + uint32_t key_rxmic_len; + uint64_t key_tsc_counter; + uint64_t *key_rsc_counter; +#if defined(ATH_SUPPORT_WAPI) || defined(FEATURE_WLAN_WAPI) + uint8_t rx_iv[16]; + uint8_t tx_iv[16]; +#endif + uint8_t key_data[WMI_MAC_MAX_KEY_LENGTH]; +}; + +/** + * struct sta_params - sta keep alive parameters + * @vdev_id: vdev id + * @method: keep alive method + * @timeperiod: time to keep alive + * @hostv4addr: host ipv4 address + * @destv4addr: destination ipv4 address + * @destmac: destination mac address + */ +struct sta_params { + uint8_t vdev_id; + uint32_t method; + uint32_t timeperiod; + uint8_t *hostv4addr; + uint8_t *destv4addr; + uint8_t *destmac; +}; + +/** + * struct gateway_update_req_param - gateway parameter update request + * @request_id: request id + * @session_id: session id + * @max_retries: Max ARP/NS retry attempts + * @timeout: Retry interval + * @ipv4_addr_type: on ipv4 network + * @ipv6_addr_type: on ipv6 network + * @gw_mac_addr: gateway mac addr + * @ipv4_addr: ipv4 addr + * @ipv6_addr: ipv6 addr + */ +struct gateway_update_req_param { + uint32_t request_id; + uint32_t session_id; + uint32_t max_retries; + uint32_t timeout; + uint32_t ipv4_addr_type; + uint32_t ipv6_addr_type; + struct qdf_mac_addr gw_mac_addr; + uint8_t ipv4_addr[QDF_IPV4_ADDR_SIZE]; + uint8_t ipv6_addr[QDF_IPV6_ADDR_SIZE]; +}; + +/** + * struct rssi_monitor_param - rssi monitoring + * @request_id: request id + * @session_id: session id + * @min_rssi: minimum rssi + * @max_rssi: maximum rssi + * @control: flag to indicate start or stop + */ +struct rssi_monitor_param { + uint32_t request_id; + uint32_t session_id; + int8_t min_rssi; + int8_t max_rssi; + bool control; +}; + +/** + * struct scan_mac_oui - oui parameters + * @oui: oui parameters + * @vdev_id: interface id + * @enb_probe_req_sno_randomization: control probe req sequence no randomization + * @ie_whitelist: probe req IE whitelist attrs + */ +struct scan_mac_oui { + uint8_t oui[WMI_WIFI_SCANNING_MAC_OUI_LENGTH]; + uint32_t vdev_id; + bool enb_probe_req_sno_randomization; + struct probe_req_whitelist_attr ie_whitelist; +}; + +#define WMI_PASSPOINT_REALM_LEN 256 +#define WMI_PASSPOINT_ROAMING_CONSORTIUM_ID_NUM 16 +#define WMI_PASSPOINT_PLMN_LEN 3 +/** + * struct wifi_passpoint_network_param - passpoint network block + * @id: identifier of this network block + * @realm: null terminated UTF8 encoded realm, 0 if unspecified + * @roaming_consortium_ids: roaming consortium ids to match, 0s if unspecified + * @plmn: mcc/mnc combination as per rules, 0s if unspecified + */ +struct wifi_passpoint_network_param { + uint32_t id; + uint8_t realm[WMI_PASSPOINT_REALM_LEN]; + int64_t roaming_consortium_ids[WMI_PASSPOINT_ROAMING_CONSORTIUM_ID_NUM]; + uint8_t plmn[WMI_PASSPOINT_PLMN_LEN]; +}; + +/** + * struct wifi_passpoint_req_param - passpoint request + * @request_id: request identifier + * @vdev_id: vdev that is the target of the request + * @num_networks: number of valid entries in @networks + * @networks: passpoint networks + */ +struct wifi_passpoint_req_param { + uint32_t request_id; + uint32_t vdev_id; + uint32_t num_networks; + struct wifi_passpoint_network_param networks[]; +}; + +/* struct mobility_domain_info - structure containing + * mobility domain info + * @mdie_present: mobility domain present or not + * @mobility_domain: mobility domain + */ +struct mobility_domain_info { + uint8_t mdie_present; + uint16_t mobility_domain; +}; + +#define WMI_HOST_ROAM_OFFLOAD_NUM_MCS_SET (16) + +/* This TLV will be filled only in case roam offload + * for wpa2-psk/pmkid/ese/11r is enabled */ +typedef struct { + /* + * TLV tag and len; tag equals + * WMITLV_TAG_STRUC_wmi_roam_offload_fixed_param + */ + uint32_t tlv_header; + uint32_t rssi_cat_gap; /* gap for every category bucket */ + uint32_t prefer_5g; /* prefer select 5G candidate */ + uint32_t select_5g_margin; + uint32_t reassoc_failure_timeout; /* reassoc failure timeout */ + uint32_t capability; + uint32_t ht_caps_info; + uint32_t ampdu_param; + uint32_t ht_ext_cap; + uint32_t ht_txbf; + uint32_t asel_cap; + uint32_t qos_enabled; + uint32_t qos_caps; + uint32_t wmm_caps; + /* since this is 4 byte aligned, we don't declare it as tlv array */ + uint32_t mcsset[WMI_HOST_ROAM_OFFLOAD_NUM_MCS_SET >> 2]; + uint32_t ho_delay_for_rx; + uint32_t roam_preauth_retry_count; + uint32_t roam_preauth_no_ack_timeout; +} roam_offload_param; + +#define WMI_FILS_MAX_RRK_LENGTH 64 +#define WMI_FILS_MAX_RIK_LENGTH WMI_FILS_MAX_RRK_LENGTH +#define WMI_FILS_MAX_REALM_LENGTH 256 +#define WMI_FILS_MAX_USERNAME_LENGTH 16 + +/** + * struct roam_fils_params - Roam FILS params + * @username: username + * @username_length: username length + * @next_erp_seq_num: next ERP sequence number + * @rrk: RRK + * @rrk_length: length of @rrk + * @rik: RIK + * @rik_length: length of @rik + * @realm: realm + * @realm_len: length of @realm + */ +struct roam_fils_params { + uint8_t username[WMI_FILS_MAX_USERNAME_LENGTH]; + uint32_t username_length; + uint32_t next_erp_seq_num; + uint8_t rrk[WMI_FILS_MAX_RRK_LENGTH]; + uint32_t rrk_length; + uint8_t rik[WMI_FILS_MAX_RIK_LENGTH]; + uint32_t rik_length; + uint8_t realm[WMI_FILS_MAX_REALM_LENGTH]; + uint32_t realm_len; +}; + +/* struct roam_offload_scan_params - structure + * containing roaming offload scan parameters + * @is_roam_req_valid: flag to tell whether roam req + * is valid or NULL + * @mode: stores flags for scan + * @vdev_id: vdev id + * @roam_offload_enabled: flag for offload enable + * @psk_pmk: pre shared key/pairwise master key + * @pmk_len: length of PMK + * @prefer_5ghz: prefer select 5G candidate + * @roam_rssi_cat_gap: gap for every category bucket + * @select_5ghz_margin: select 5 Ghz margin + * @krk: KRK + * @btk: BTK + * @reassoc_failure_timeout: reassoc failure timeout + * @rokh_id_length: r0kh id length + * @rokh_id: r0kh id + * @roam_key_mgmt_offload_enabled: roam offload flag + * @auth_mode: authentication mode + * @fw_okc: use OKC in firmware + * @fw_pmksa_cache: use PMKSA cache in firmware + * @is_ese_assoc: flag to determine ese assoc + * @mdid: mobility domain info + * @roam_offload_params: roam offload tlv params + * @min_delay_btw_roam_scans: Delay btw two scans + * @roam_trigger_reason_bitmask: Roam reason bitmark + * @assoc_ie_length: Assoc IE length + * @assoc_ie: Assoc IE buffer + * @add_fils_tlv: add FILS TLV boolean + * @roam_fils_params: roam fils params + * @rct_validity_timer: duration value for which the entries in + * roam candidate table are valid + */ +struct roam_offload_scan_params { + uint8_t is_roam_req_valid; + uint32_t mode; + uint32_t vdev_id; +#ifdef WLAN_FEATURE_ROAM_OFFLOAD + uint8_t roam_offload_enabled; + uint8_t psk_pmk[WMI_ROAM_SCAN_PSK_SIZE]; + uint32_t pmk_len; + uint8_t prefer_5ghz; + uint8_t roam_rssi_cat_gap; + uint8_t select_5ghz_margin; + uint8_t krk[WMI_KRK_KEY_LEN]; + uint8_t btk[WMI_BTK_KEY_LEN]; + uint32_t reassoc_failure_timeout; + uint32_t rokh_id_length; + uint8_t rokh_id[WMI_ROAM_R0KH_ID_MAX_LEN]; + uint8_t roam_key_mgmt_offload_enabled; + int auth_mode; + bool fw_okc; + bool fw_pmksa_cache; + uint32_t rct_validity_timer; +#endif + uint32_t min_delay_btw_roam_scans; + uint32_t roam_trigger_reason_bitmask; + bool is_ese_assoc; + bool is_11r_assoc; + struct mobility_domain_info mdid; +#ifdef CONFIG_MCL + /* THis is not available in non tlv target. + * please remove this and replace with a host based + * structure */ + roam_offload_param roam_offload_params; +#endif + uint32_t assoc_ie_length; + uint8_t assoc_ie[MAX_ASSOC_IE_LENGTH]; + bool add_fils_tlv; +#ifdef WLAN_FEATURE_FILS_SK + struct roam_fils_params roam_fils_params; +#endif +}; + +/* struct roam_offload_scan_rssi_params - structure containing + * parameters for roam offload scan based on RSSI + * @rssi_thresh: rssi threshold + * @rssi_thresh_diff: difference in rssi threshold + * @hi_rssi_scan_max_count: 5G scan max count + * @hi_rssi_scan_rssi_delta: 5G scan rssi change threshold value + * @hi_rssi_scan_rssi_ub: 5G scan upper bound + * @raise_rssi_thresh_5g: flag to determine penalty and boost thresholds + * @session_id: vdev id + * @penalty_threshold_5g: RSSI threshold below which 5GHz RSSI is penalized + * @boost_threshold_5g: RSSI threshold above which 5GHz RSSI is favored + * @raise_factor_5g: factor by which 5GHz RSSI is boosted + * @drop_factor_5g: factor by which 5GHz RSSI is penalized + * @max_raise_rssi_5g: maximum boost that can be applied to a 5GHz RSSI + * @max_drop_rssi_5g: maximum penalty that can be applied to a 5GHz RSSI + * @good_rssi_threshold: RSSI below which roam is kicked in by background + * scan although rssi is still good + * @roam_earlystop_thres_min: Minimum RSSI threshold value for early stop, + * unit is dB above NF + * @roam_earlystop_thres_max: Maximum RSSI threshold value for early stop, + * unit is dB above NF + * @dense_rssi_thresh_offset: dense roam RSSI threshold difference + * @dense_min_aps_cnt: dense roam minimum APs + * @initial_dense_status: dense status detected by host + * @traffic_threshold: dense roam RSSI threshold + * @bg_scan_bad_rssi_thresh: Bad RSSI threshold to perform bg scan + * @roam_bad_rssi_thresh_offset_2g: Offset from Bad RSSI threshold for 2G to 5G Roam + * @bg_scan_client_bitmap: Bitmap used to identify the client scans to snoop + * @flags: Flags for Background Roaming + * Bit 0 : BG roaming enabled when we connect to 2G AP only and roaming to 5G AP only. + * Bit 1-31: Reserved + */ +struct roam_offload_scan_rssi_params { + int8_t rssi_thresh; + uint8_t rssi_thresh_diff; + uint32_t hi_rssi_scan_max_count; + uint32_t hi_rssi_scan_rssi_delta; + int32_t hi_rssi_scan_rssi_ub; + int raise_rssi_thresh_5g; + uint8_t session_id; + uint32_t penalty_threshold_5g; + uint32_t boost_threshold_5g; + uint8_t raise_factor_5g; + uint8_t drop_factor_5g; + int max_raise_rssi_5g; + int max_drop_rssi_5g; + uint32_t good_rssi_threshold; + uint32_t roam_earlystop_thres_min; + uint32_t roam_earlystop_thres_max; + int dense_rssi_thresh_offset; + int dense_min_aps_cnt; + int initial_dense_status; + int traffic_threshold; + int32_t rssi_thresh_offset_5g; + int8_t bg_scan_bad_rssi_thresh; + uint8_t roam_bad_rssi_thresh_offset_2g; + uint32_t bg_scan_client_bitmap; + uint32_t flags; +}; + +/** + * struct ap_profile - Structure ap profile to match candidate + * @flags: flags + * @rssi_threshold: the value of the the candidate AP should higher by this + * threshold than the rssi of the currrently associated AP + * @ssid: ssid vlaue to be matched + * @rsn_authmode: security params to be matched + * @rsn_ucastcipherset: unicast cipher set + * @rsn_mcastcipherset: mcast/group cipher set + * @rsn_mcastmgmtcipherset: mcast/group management frames cipher set + * @rssi_abs_thresh: the value of the candidate AP should higher than this + * absolute RSSI threshold. Zero means no absolute minimum + * RSSI is required. units are the offset from the noise + * floor in dB + */ +struct ap_profile { + uint32_t flags; + uint32_t rssi_threshold; + struct mac_ssid ssid; + uint32_t rsn_authmode; + uint32_t rsn_ucastcipherset; + uint32_t rsn_mcastcipherset; + uint32_t rsn_mcastmgmtcipherset; + uint32_t rssi_abs_thresh; +}; + +/** + * struct rssi_scoring - rssi scoring param to sortlist selected AP + * @best_rssi_threshold: Roamable AP RSSI equal or better than this threshold, + * full rssi score 100. Units in dBm. + * @good_rssi_threshold: Below threshold, scoring linear percentage between + * rssi_good_pnt and 100. Units in dBm. + * @bad_rssi_threshold: Between good and bad rssi threshold, scoring linear + * % between rssi_bad_pcnt and rssi_good_pct in dBm. + * @good_rssi_pcnt: Used to assigned scoring percentage of each slot between + * best to good rssi threshold. Units in percentage. + * @bad_rssi_pcnt: Used to assigned scoring percentage of each slot between good + * to bad rssi threshold. Unites in percentage. + * @good_bucket_size : bucket size of slot in good zone + * @bad_bucket_size : bucket size of slot in bad zone + * @rssi_pref_5g_rssi_thresh: Below rssi threshold, 5G AP have given preference + * of band percentage. Units in dBm. + */ +struct rssi_scoring { + int32_t best_rssi_threshold; + int32_t good_rssi_threshold; + int32_t bad_rssi_threshold; + uint32_t good_rssi_pcnt; + uint32_t bad_rssi_pcnt; + uint32_t good_bucket_size; + uint32_t bad_bucket_size; + int32_t rssi_pref_5g_rssi_thresh; +}; + +/** + * struct param_slot_scoring - define % score for differents slots for a + * scoring param. + * @num_slot: number of slots in which the param will be divided. + * Max 15. index 0 is used for 'not_present. Num_slot will + * equally divide 100. e.g, if num_slot = 4 slot 0 = 0-25%, slot + * 1 = 26-50% slot 2 = 51-75%, slot 3 = 76-100% + * @score_pcnt3_to_0: Conatins score percentage for slot 0-3 + * BITS 0-7 :- the scoring pcnt when not present + * BITS 8-15 :- SLOT_1 + * BITS 16-23 :- SLOT_2 + * BITS 24-31 :- SLOT_3 + * @score_pcnt7_to_4: Conatins score percentage for slot 4-7 + * BITS 0-7 :- SLOT_4 + * BITS 8-15 :- SLOT_5 + * BITS 16-23 :- SLOT_6 + * BITS 24-31 :- SLOT_7 + * @score_pcnt11_to_8: Conatins score percentage for slot 8-11 + * BITS 0-7 :- SLOT_8 + * BITS 8-15 :- SLOT_9 + * BITS 16-23 :- SLOT_10 + * BITS 24-31 :- SLOT_11 + * @score_pcnt15_to_12: Conatins score percentage for slot 12-15 + * BITS 0-7 :- SLOT_12 + * BITS 8-15 :- SLOT_13 + * BITS 16-23 :- SLOT_14 + * BITS 24-31 :- SLOT_15 + */ +struct param_slot_scoring { + uint32_t num_slot; + uint32_t score_pcnt3_to_0; + uint32_t score_pcnt7_to_4; + uint32_t score_pcnt11_to_8; + uint32_t score_pcnt15_to_12; +}; + +/** + * struct scoring_param - scoring param to sortlist selected AP + * @disable_bitmap: Each bit will be either allow(0)/disallow(1) to + * considered the roam score param. + * @rssi_weightage: RSSI weightage out of total score in % + * @ht_weightage: HT weightage out of total score in %. + * @vht_weightage: VHT weightage out of total score in %. + * @he_weightaget: 11ax weightage out of total score in %. + * @bw_weightage: Bandwidth weightage out of total score in %. + * @band_weightage: Band(2G/5G) weightage out of total score in %. + * @nss_weightage: NSS(1x1 / 2x2)weightage out of total score in %. + * @esp_qbss_weightage: ESP/QBSS weightage out of total score in %. + * @beamforming_weightage: Beamforming weightage out of total score in %. + * @pcl_weightage: PCL weightage out of total score in %. + * @oce_wan_weightage OCE WAN metrics weightage out of total score in %. + * @bw_index_score: channel BW scoring percentage information. + * BITS 0-7 :- It contains scoring percentage of 20MHz BW + * BITS 8-15 :- It contains scoring percentage of 40MHz BW + * BITS 16-23 :- It contains scoring percentage of 80MHz BW + * BITS 24-31 :- It contains scoring percentage of 1600MHz BW + * The value of each index must be 0-100 + * @band_index_score: band scording percentage information. + * BITS 0-7 :- It contains scoring percentage of 2G + * BITS 8-15 :- It contains scoring percentage of 5G + * BITS 16-23 :- reserved + * BITS 24-31 :- reserved + * The value of each index must be 0-100 + * @nss_index_score: NSS scoring percentage information. + * BITS 0-7 :- It contains scoring percentage of 1x1 + * BITS 8-15 :- It contains scoring percentage of 2x2 + * BITS 16-23 :- It contains scoring percentage of 3x3 + * BITS 24-31 :- It contains scoring percentage of 4x4 + * The value of each index must be 0-100 + * @roam_score_delta: delta value expected over the roam score of the candidate + * ap over the roam score of the current ap + * @roam_trigger_bitmap: bitmap of roam triggers on which roam_score_delta + * will be applied + * @rssi_scoring: RSSI scoring information. + * @esp_qbss_scoring: ESP/QBSS scoring percentage information + * @oce_wan_scoring: OCE WAN metrics percentage information +*/ +struct scoring_param { + uint32_t disable_bitmap; + int32_t rssi_weightage; + int32_t ht_weightage; + int32_t vht_weightage; + int32_t he_weightage; + int32_t bw_weightage; + int32_t band_weightage; + int32_t nss_weightage; + int32_t esp_qbss_weightage; + int32_t beamforming_weightage; + int32_t pcl_weightage; + int32_t oce_wan_weightage; + uint32_t bw_index_score; + uint32_t band_index_score; + uint32_t nss_index_score; + uint32_t roam_score_delta; + uint32_t roam_trigger_bitmap; + struct rssi_scoring rssi_scoring; + struct param_slot_scoring esp_qbss_scoring; + struct param_slot_scoring oce_wan_scoring; +}; + +/** + * struct ap_profile_params - ap profile params + * @vdev_id: vdev id + * @profile: ap profile to match candidate + * @param: scoring params to short candidate + */ +struct ap_profile_params { + uint8_t vdev_id; + struct ap_profile profile; + struct scoring_param param; +}; + +/** + * struct wifi_epno_network - enhanced pno network block + * @ssid: ssid + * @rssi_threshold: threshold for considering this SSID as found, required + * granularity for this threshold is 4dBm to 8dBm + * @flags: WIFI_PNO_FLAG_XXX + * @auth_bit_field: auth bit field for matching WPA IE + */ +struct wifi_epno_network_params { + struct mac_ssid ssid; + int8_t rssi_threshold; + uint8_t flags; + uint8_t auth_bit_field; +}; + +/** + * struct wifi_enhanced_pno_params - enhanced pno network params + * @request_id: request id number + * @vdev_id: vdev id + * @min_5ghz_rssi: minimum 5GHz RSSI for a BSSID to be considered + * @min_24ghz_rssi: minimum 2.4GHz RSSI for a BSSID to be considered + * @initial_score_max: maximum score that a network can have before bonuses + * @current_connection_bonus: only report when there is a network's score this + * much higher than the current connection + * @same_network_bonus: score bonus for all n/w with the same network flag + * @secure_bonus: score bonus for networks that are not open + * @band_5ghz_bonus: 5GHz RSSI score bonus (applied to all 5GHz networks) + * @num_networks: number of ssids + * @networks: EPNO networks + */ +struct wifi_enhanced_pno_params { + uint32_t request_id; + uint32_t vdev_id; + uint32_t min_5ghz_rssi; + uint32_t min_24ghz_rssi; + uint32_t initial_score_max; + uint32_t current_connection_bonus; + uint32_t same_network_bonus; + uint32_t secure_bonus; + uint32_t band_5ghz_bonus; + uint32_t num_networks; + struct wifi_epno_network_params networks[]; +}; + +enum { + WMI_AP_RX_DATA_OFFLOAD = 0x00, + WMI_STA_RX_DATA_OFFLOAD = 0x01, +}; + +/** + * enum extscan_configuration_flags - extscan config flags + * @WMI_EXTSCAN_LP_EXTENDED_BATCHING: extended batching + */ +enum wmi_extscan_configuration_flags { + WMI_EXTSCAN_LP_EXTENDED_BATCHING = 0x00000001, +}; + +/** + * enum extscan_report_events_type - extscan report events type + * @EXTSCAN_REPORT_EVENTS_BUFFER_FULL: report only when scan history is % full + * @EXTSCAN_REPORT_EVENTS_EACH_SCAN: report a scan completion event after scan + * @EXTSCAN_REPORT_EVENTS_FULL_RESULTS: forward scan results + * (beacons/probe responses + IEs) + * in real time to HAL, in addition to completion events. + * Note: To keep backward compatibility, + * fire completion events regardless of REPORT_EVENTS_EACH_SCAN. + * @EXTSCAN_REPORT_EVENTS_NO_BATCH: controls batching, + * 0 => batching, 1 => no batching + */ +enum wmi_extscan_report_events_type { + WMI_EXTSCAN_REPORT_EVENTS_BUFFER_FULL = 0x00, + WMI_EXTSCAN_REPORT_EVENTS_EACH_SCAN = 0x01, + WMI_EXTSCAN_REPORT_EVENTS_FULL_RESULTS = 0x02, + WMI_EXTSCAN_REPORT_EVENTS_NO_BATCH = 0x04, +}; + +/** + * struct extscan_capabilities_params - ext scan capablities + * @request_id: request_id + * @vdev_id: vdev id + */ +struct extscan_capabilities_params { + uint32_t request_id; + uint8_t vdev_id; +}; + +/** + * struct extscan_capabilities_reset_params - ext scan capablities reset + * parameter + * @request_id: request_id + * @vdev_id: vdev id + */ +struct extscan_capabilities_reset_params { + uint32_t request_id; + uint8_t vdev_id; +}; + +/** + * struct extscan_bssid_hotlist_reset_params - ext scan hotlist reset parameter + * @request_id: request_id + * @vdev_id: vdev id + */ +struct extscan_bssid_hotlist_reset_params { + uint32_t request_id; + uint8_t vdev_id; +}; + +/** + * struct extscan_stop_req_params - ext scan stop parameter + * @request_id: request_id + * @vdev_id: vdev id + */ +struct extscan_stop_req_params { + uint32_t request_id; + uint8_t vdev_id; +}; + +/** + * struct ap_threshold_params - ap threshold parameter + * @bssid: mac address + * @low: low threshold + * @high: high threshold + */ +struct ap_threshold_params { + struct qdf_mac_addr bssid; + int32_t low; + int32_t high; +}; + +/** + * struct extscan_set_sig_changereq_params - ext scan channel parameter + * @request_id: request_id + * @vdev_id: vdev id + * @rssi_sample_size: Number of samples for averaging RSSI + * @lostap_sample_size: Number of missed samples to confirm AP loss + * @min_breaching: Number of APs breaching threshold required for firmware + * @num_ap: no of scanned ap + * @ap: ap threshold parameter + */ +struct extscan_set_sig_changereq_params { + uint32_t request_id; + uint8_t vdev_id; + uint32_t rssi_sample_size; + uint32_t lostap_sample_size; + uint32_t min_breaching; + uint32_t num_ap; + struct ap_threshold_params ap[WMI_EXTSCAN_MAX_SIGNIFICANT_CHANGE_APS]; +}; + +/** + * struct extscan_cached_result_params - ext scan cached parameter + * @request_id: request_id + * @vdev_id: vdev id + * @flush: cached results flush + */ +struct extscan_cached_result_params { + uint32_t request_id; + uint8_t vdev_id; + bool flush; +}; + +#define WMI_WLAN_EXTSCAN_MAX_CHANNELS 36 +#define WMI_WLAN_EXTSCAN_MAX_BUCKETS 16 +#define WMI_WLAN_EXTSCAN_MAX_HOTLIST_APS 128 +#define WMI_WLAN_EXTSCAN_MAX_SIGNIFICANT_CHANGE_APS 64 +#define WMI_EXTSCAN_MAX_HOTLIST_SSIDS 8 + +/** + * struct wifi_scan_channelspec_params - wifi scan channel parameter + * @channel: Frequency in MHz + * @dwell_time_ms: dwell time in milliseconds + * @passive: passive scan + * @channel_class: channel class + */ +struct wifi_scan_channelspec_params { + uint32_t channel; + uint32_t dwell_time_ms; + bool passive; + uint8_t channel_class; +}; + +/** + * enum wmi_wifi_band - wifi band + * @WMI_WIFI_BAND_UNSPECIFIED: unspecified band + * @WMI_WIFI_BAND_BG: 2.4 GHz + * @WMI_WIFI_BAND_A: 5 GHz without DFS + * @WMI_WIFI_BAND_ABG: 2.4 GHz + 5 GHz; no DFS + * @WMI_WIFI_BAND_A_DFS_ONLY: 5 GHz DFS only + * @WMI_WIFI_BAND_A_WITH_DFS: 5 GHz with DFS + * @WMI_WIFI_BAND_ABG_WITH_DFS: 2.4 GHz + 5 GHz with DFS + * @WMI_WIFI_BAND_MAX: max range + */ +enum wmi_wifi_band { + WMI_WIFI_BAND_UNSPECIFIED, + WMI_WIFI_BAND_BG = 1, + WMI_WIFI_BAND_A = 2, + WMI_WIFI_BAND_ABG = 3, + WMI_WIFI_BAND_A_DFS_ONLY = 4, + /* 5 is reserved */ + WMI_WIFI_BAND_A_WITH_DFS = 6, + WMI_WIFI_BAND_ABG_WITH_DFS = 7, + /* Keep it last */ + WMI_WIFI_BAND_MAX +}; + +/** + * struct wifi_scan_bucket_params - wifi scan bucket spec + * @bucket: bucket identifier + * @band: wifi band + * @period: Desired period, in millisecond; if this is too + * low, the firmware should choose to generate results as fast as + * it can instead of failing the command byte + * for exponential backoff bucket this is the min_period + * @report_events: 0 => normal reporting (reporting rssi history + * only, when rssi history buffer is % full) + * 1 => same as 0 + report a scan completion event after scanning + * this bucket + * 2 => same as 1 + forward scan results + * (beacons/probe responses + IEs) in real time to HAL + * @max_period: if max_period is non zero or different than period, + * then this bucket is an exponential backoff bucket and + * the scan period will grow exponentially as per formula: + * actual_period(N) = period ^ (N/(step_count+1)) to a + * maximum period of max_period + * @exponent: for exponential back off bucket: multiplier: + * new_period = old_period * exponent + * @step_count: for exponential back off bucket, number of scans performed + * at a given period and until the exponent is applied + * @num_channels: channels to scan; these may include DFS channels + * Note that a given channel may appear in multiple buckets + * @min_dwell_time_active: per bucket minimum active dwell time + * @max_dwell_time_active: per bucket maximum active dwell time + * @min_dwell_time_passive: per bucket minimum passive dwell time + * @max_dwell_time_passive: per bucket maximum passive dwell time + * @channels: Channel list + */ +struct wifi_scan_bucket_params { + uint8_t bucket; + enum wmi_wifi_band band; + uint32_t period; + uint32_t report_events; + uint32_t max_period; + uint32_t exponent; + uint32_t step_count; + uint32_t num_channels; + uint32_t min_dwell_time_active; + uint32_t max_dwell_time_active; + uint32_t min_dwell_time_passive; + uint32_t max_dwell_time_passive; + struct wifi_scan_channelspec_params + channels[WMI_WLAN_EXTSCAN_MAX_CHANNELS]; +}; + +/** + * struct wifi_scan_cmd_req_params - wifi scan command request params + * @base_period: base timer period + * @max_ap_per_scan: max ap per scan + * @report_threshold_percent: report threshold + * in %, when buffer is this much full, wake up host + * @report_threshold_num_scans: report threshold number of scans + * in number of scans, wake up host after these many scans + * @request_id: request id + * @vdev_id: vdev that is the target of the request + * @num_buckets: number of buckets + * @min_dwell_time_active: per bucket minimum active dwell time + * @max_dwell_time_active: per bucket maximum active dwell time + * @min_dwell_time_passive: per bucket minimum passive dwell time + * @max_dwell_time_passive: per bucket maximum passive dwell time + * @configuration_flags: configuration flags + * @extscan_adaptive_dwell_mode: adaptive dwelltime mode for extscan + * @buckets: buckets array + */ +struct wifi_scan_cmd_req_params { + uint32_t base_period; + uint32_t max_ap_per_scan; + uint32_t report_threshold_percent; + uint32_t report_threshold_num_scans; + uint32_t request_id; + uint8_t vdev_id; + uint32_t num_buckets; + uint32_t min_dwell_time_active; + uint32_t max_dwell_time_active; + uint32_t min_dwell_time_passive; + uint32_t max_dwell_time_passive; + uint32_t configuration_flags; + enum scan_dwelltime_adaptive_mode extscan_adaptive_dwell_mode; + struct wifi_scan_bucket_params buckets[WMI_WLAN_EXTSCAN_MAX_BUCKETS]; +}; + +#define WMI_CFG_VALID_CHANNEL_LIST_LEN 100 +/* Occupied channel list remains static */ +#define WMI_CHANNEL_LIST_STATIC 1 +/* Occupied channel list can be learnt after init */ +#define WMI_CHANNEL_LIST_DYNAMIC_INIT 2 +/* Occupied channel list can be learnt after flush */ +#define WMI_CHANNEL_LIST_DYNAMIC_FLUSH 3 +/* Occupied channel list can be learnt after update */ +#define WMI_CHANNEL_LIST_DYNAMIC_UPDATE 4 + +/** + * struct plm_req_params - plm req parameter + * @diag_token: Dialog token + * @meas_token: measurement token + * @num_bursts: total number of bursts + * @burst_int: burst interval in seconds + * @meas_duration:in TU's,STA goes off-ch + * @burst_len: no of times the STA should cycle through PLM ch list + * @desired_tx_pwr: desired tx power + * @mac_addr: MC dest addr + * @plm_num_ch: channel numbers + * @plm_ch_list: channel list + * @session_id: session id + * @enable: enable/disable + */ +struct plm_req_params { + uint16_t diag_token; + uint16_t meas_token; + uint16_t num_bursts; + uint16_t burst_int; + uint16_t meas_duration; + /* no of times the STA should cycle through PLM ch list */ + uint8_t burst_len; + int8_t desired_tx_pwr; + struct qdf_mac_addr mac_addr; + /* no of channels */ + uint8_t plm_num_ch; + /* channel numbers */ + uint8_t plm_ch_list[WMI_CFG_VALID_CHANNEL_LIST_LEN]; + uint8_t session_id; + bool enable; +}; + +#define MAX_SSID_ALLOWED_LIST 4 +#define MAX_BSSID_AVOID_LIST 16 +#define MAX_BSSID_FAVORED 16 +#define MAX_RSSI_AVOID_BSSID_LIST 10 + +/** + * struct mac_ts_info_tfc - mac ts info parameters + * @burstSizeDefn: burst size + * @reserved: reserved + * @ackPolicy: ack policy + * @psb: psb + * @aggregation: aggregation + * @accessPolicy: access policy + * @direction: direction + * @tsid: direction + * @trafficType: traffic type + */ +struct mac_ts_info_tfc { +#ifndef ANI_LITTLE_BIT_ENDIAN + uint8_t burstSizeDefn:1; + uint8_t reserved:7; +#else + uint8_t reserved:7; + uint8_t burstSizeDefn:1; +#endif + +#ifndef ANI_LITTLE_BIT_ENDIAN + uint16_t ackPolicy:2; + uint16_t userPrio:3; + uint16_t psb:1; + uint16_t aggregation:1; + uint16_t accessPolicy:2; + uint16_t direction:2; + uint16_t tsid:4; + uint16_t trafficType:1; +#else + uint16_t trafficType:1; + uint16_t tsid:4; + uint16_t direction:2; + uint16_t accessPolicy:2; + uint16_t aggregation:1; + uint16_t psb:1; + uint16_t userPrio:3; + uint16_t ackPolicy:2; +#endif +} qdf_packed; + +/** + * struct mac_ts_info_sch - mac ts info schedule parameters + * @rsvd: reserved + * @schedule: schedule bit + */ +struct mac_ts_info_sch { +#ifndef ANI_LITTLE_BIT_ENDIAN + uint8_t rsvd:7; + uint8_t schedule:1; +#else + uint8_t schedule:1; + uint8_t rsvd:7; +#endif +} qdf_packed; + +/** + * struct mac_ts_info_sch - mac ts info schedule parameters + * @traffic: mac tfc parameter + * @schedule: mac schedule parameters + */ +struct mac_ts_info { + struct mac_ts_info_tfc traffic; + struct mac_ts_info_sch schedule; +} qdf_packed; + +/** + * struct mac_tspec_ie - mac ts spec + * @type: type + * @length: length + * @tsinfo: tsinfo + * @nomMsduSz: nomMsduSz + * @maxMsduSz: maxMsduSz + * @minSvcInterval: minSvcInterval + * @maxSvcInterval: maxSvcInterval + * @inactInterval: inactInterval + * @suspendInterval: suspendInterval + * @svcStartTime: svcStartTime + * @minDataRate: minDataRate + * @meanDataRate: meanDataRate + * @peakDataRate: peakDataRate + * @maxBurstSz: maxBurstSz + * @delayBound: delayBound + * @minPhyRate: minPhyRate + * @surplusBw: surplusBw + * @mediumTime: mediumTime + */ +struct mac_tspec_ie { + uint8_t type; + uint8_t length; + struct mac_ts_info tsinfo; + uint16_t nomMsduSz; + uint16_t maxMsduSz; + uint32_t minSvcInterval; + uint32_t maxSvcInterval; + uint32_t inactInterval; + uint32_t suspendInterval; + uint32_t svcStartTime; + uint32_t minDataRate; + uint32_t meanDataRate; + uint32_t peakDataRate; + uint32_t maxBurstSz; + uint32_t delayBound; + uint32_t minPhyRate; + uint16_t surplusBw; + uint16_t mediumTime; +} qdf_packed; + +/** + * struct add_ts_param - ADDTS related parameters + * @staIdx: station index + * @tspecIdx: TSPEC handler uniquely identifying a TSPEC for a STA in a BSS + * @tspec: tspec value + * @status: CDF status + * @sessionId: session id + * @tsm_interval: TSM interval period passed from UMAC to WMI + * @setRICparams: RIC parameters + * @sme_session_id: sme session id + */ +struct add_ts_param { + uint16_t staIdx; + uint16_t tspecIdx; + struct mac_tspec_ie tspec; + QDF_STATUS status; + uint8_t sessionId; +#ifdef FEATURE_WLAN_ESE + uint16_t tsm_interval; +#endif /* FEATURE_WLAN_ESE */ +#ifdef WLAN_FEATURE_ROAM_OFFLOAD + uint8_t setRICparams; +#endif /* WLAN_FEATURE_ROAM_OFFLOAD */ + uint8_t sme_session_id; +}; + +/** + * struct delts_req_info - DELTS request parameter + * @tsinfo: ts info + * @tspec: ts spec + * @wmeTspecPresent: wme ts spec flag + * @wsmTspecPresent: wsm ts spec flag + * @lleTspecPresent: lle ts spec flag + */ +struct delts_req_info { + struct mac_ts_info tsinfo; + struct mac_tspec_ie tspec; + uint8_t wmeTspecPresent:1; + uint8_t wsmTspecPresent:1; + uint8_t lleTspecPresent:1; +}; + +/** + * struct del_ts_params - DELTS related parameters + * @staIdx: station index + * @tspecIdx: TSPEC identifier uniquely identifying a TSPEC for a STA in a BSS + * @bssId: BSSID + * @sessionId: session id + * @userPrio: user priority + * @delTsInfo: DELTS info + * @setRICparams: RIC parameters + */ +struct del_ts_params { + uint16_t staIdx; + uint16_t tspecIdx; + uint8_t bssId[IEEE80211_ADDR_LEN]; + uint8_t sessionId; + uint8_t userPrio; +#ifdef WLAN_FEATURE_ROAM_OFFLOAD + struct delts_req_info delTsInfo; + uint8_t setRICparams; +#endif /* WLAN_FEATURE_ROAM_OFFLOAD */ +}; + +/** + * struct ll_stats_clear_params - ll stats clear parameter + * @req_id: request id + * @sta_id: sta id + * @stats_clear_mask: stats clear mask + * @stop_req: stop request + */ +struct ll_stats_clear_params { + uint32_t req_id; + uint8_t sta_id; + uint32_t stats_clear_mask; + uint8_t stop_req; +}; + +/** + * struct ll_stats_set_params - ll stats get parameter + * @req_id: request id + * @sta_id: sta id + * @mpdu_size_threshold: mpdu sixe threshold + * @aggressive_statistics_gathering: aggressive_statistics_gathering + */ +struct ll_stats_set_params { + uint32_t req_id; + uint8_t sta_id; + uint32_t mpdu_size_threshold; + uint32_t aggressive_statistics_gathering; +}; + +/** + * struct ll_stats_get_params - ll stats parameter + * @req_id: request id + * @sta_id: sta id + * @param_id_mask: param is mask + */ +struct ll_stats_get_params { + uint32_t req_id; + uint8_t sta_id; + uint32_t param_id_mask; +}; + + +/** + * struct link_status_params - link stats parameter + * @msg_type: message type is same as the request type + * @msg_len: length of the entire request + * @link_status: wme ts spec flag + * @session_id: wsm ts spec flag + */ +struct link_status_params { + uint16_t msg_type; + uint16_t msg_len; + uint8_t link_status; + uint8_t session_id; +}; + +/** + * struct dhcp_stop_ind_params - DHCP Stop indication message + * @msgtype: message type is same as the request type + * @msglen: length of the entire request + * @device_mode: Mode of the device(ex:STA, AP) + * @adapter_macaddr: MAC address of the adapter + * @peer_macaddr: MAC address of the connected peer + */ +struct dhcp_stop_ind_params { + uint16_t msgtype; + uint16_t msglen; + uint8_t device_mode; + struct qdf_mac_addr adapter_macaddr; + struct qdf_mac_addr peer_macaddr; +}; + +/** + * struct aggr_add_ts_param - ADDTS parameters + * @staIdx: station index + * @tspecIdx: TSPEC handler uniquely identifying a TSPEC for a STA in a BSS + * @tspec: tspec value + * @status: CDF status + * @sessionId: session id + * @vdev_id: vdev id + */ +struct aggr_add_ts_param { + uint16_t staIdx; + uint16_t tspecIdx; + struct mac_tspec_ie tspec[WMI_QOS_NUM_AC_MAX]; + QDF_STATUS status[WMI_QOS_NUM_AC_MAX]; + uint8_t sessionId; + uint8_t vdev_id; +}; + + +/** + * struct wlm_latency_level_param - WLM parameters + * @wlm_latency_level: wlm latency level to set + * 0 - normal, 1 - moderate, 2 - low, 3 - ultralow + * @wlm_latency_flags: wlm latency flags to set + * |31 12| 11 | 10 |9 8|7 6|5 4|3 2| 1 | 0 | + * +------+------+------+------+------+------+------+-----+-----+ + * | RSVD | SSLP | CSLP | RSVD | Roam | RSVD | DWLT | DFS | SUP | + * +------+-------------+-------------+-------------------------+ + * | WAL | PS | Roam | Scan | + * + * bit 0: Avoid scan request from HLOS if setting + * bit 1: Skip DFS channel SCAN if setting + * bit 2-3: Define policy of dwell time/duration for each foreign channel + * (b2 b3) + * (0 0 ): Default scan dwell time + * (0 1 ): Reserve + * (1 0 ): Shrink off channel dwell time + * (1 1 ): Reserve + * bit 4-5: Reserve for scan + * bit 6-7: Define roaming policy + * (b6 b7) + * (0 0 ): Default roaming behavior, allow roaming in all scenarios + * (0 1 ): Disallow all roaming + * (1 0 ): Allow roaming when final bmissed + * (1 1 ): Reserve + * bit 8-9: Reserve for roaming + * bit 10: Disable css power collapse if setting + * bit 11: Disable sys sleep if setting + * bit 12-31: Reserve for future useage + * @vdev_id: vdev id + */ +struct wlm_latency_level_param { + uint16_t wlm_latency_level; + uint32_t wlm_latency_flags; + uint16_t vdev_id; +}; + +#define WMI_MAX_FILTER_TEST_DATA_LEN 8 +#define WMI_MAX_NUM_MULTICAST_ADDRESS 240 +#define WMI_MAX_NUM_FILTERS 20 +#define WMI_MAX_NUM_TESTS_PER_FILTER 10 + +/** + * enum packet_filter_type - packet filter type + * @WMI_RCV_FILTER_TYPE_INVALID: invalid type + * @WMI_RCV_FILTER_TYPE_FILTER_PKT: filter packet type + * @WMI_RCV_FILTER_TYPE_BUFFER_PKT: buffer packet type + * @WMI_RCV_FILTER_TYPE_MAX_ENUM_SIZE: max enum size + */ +enum packet_filter_type { + WMI_RCV_FILTER_TYPE_INVALID, + WMI_RCV_FILTER_TYPE_FILTER_PKT, + WMI_RCV_FILTER_TYPE_BUFFER_PKT, + WMI_RCV_FILTER_TYPE_MAX_ENUM_SIZE +}; + +/** + * enum packet_protocol_type - packet protocol type + * @WMI_FILTER_HDR_TYPE_INVALID: invalid type + * @WMI_FILTER_HDR_TYPE_MAC: mac type + * @WMI_FILTER_HDR_TYPE_ARP: trp type + * @WMI_FILTER_HDR_TYPE_IPV4: ipv4 type + * @WMI_FILTER_HDR_TYPE_IPV6: ipv6 type + * @WMI_FILTER_HDR_TYPE_UDP: udp type + * @WMI_FILTER_HDR_TYPE_MAX: max type + */ +enum packet_protocol_type { + WMI_FILTER_HDR_TYPE_INVALID, + WMI_FILTER_HDR_TYPE_MAC, + WMI_FILTER_HDR_TYPE_ARP, + WMI_FILTER_HDR_TYPE_IPV4, + WMI_FILTER_HDR_TYPE_IPV6, + WMI_FILTER_HDR_TYPE_UDP, + WMI_FILTER_HDR_TYPE_MAX +}; + +/** + * enum packet_filter_comp_type - packet filter comparison type + * @WMI_FILTER_CMP_TYPE_INVALID: invalid type + * @WMI_FILTER_CMP_TYPE_EQUAL: type equal + * @WMI_FILTER_CMP_TYPE_MASK_EQUAL: mask equal + * @WMI_FILTER_CMP_TYPE_NOT_EQUAL: type not equal + * @WMI_FILTER_CMP_TYPE_MASK_NOT_EQUAL: mask not equal + * @WMI_FILTER_CMP_TYPE_MAX: max type + */ +enum packet_filter_comp_type { + WMI_FILTER_CMP_TYPE_INVALID, + WMI_FILTER_CMP_TYPE_EQUAL, + WMI_FILTER_CMP_TYPE_MASK_EQUAL, + WMI_FILTER_CMP_TYPE_NOT_EQUAL, + WMI_FILTER_CMP_TYPE_MASK_NOT_EQUAL, + WMI_FILTER_CMP_TYPE_MAX +}; + +/** + * struct rcv_pkt_filter_params - receive packet filter parameters + * @protocolLayer - protocol layer + * @cmpFlag - comparison flag + * @dataLength - data length + * @dataOffset - data offset + * @reserved - resserved + * @compareData - compare data + * @dataMask - data mask + */ +struct rcv_pkt_filter_params { + enum packet_protocol_type protocolLayer; + enum packet_filter_comp_type cmpFlag; + uint16_t dataLength; + uint8_t dataOffset; + uint8_t reserved; + uint8_t compareData[WMI_MAX_FILTER_TEST_DATA_LEN]; + uint8_t dataMask[WMI_MAX_FILTER_TEST_DATA_LEN]; +}; + +/** + * struct rcv_pkt_filter_config - receive packet filter info + * @filterId - filter id + * @filterType - filter type + * @numFieldParams - no of fields + * @coalesceTime - reserved parameter + * @self_macaddr - self mac address + * @bssid - Bssid of the connected AP + * @paramsData - data parameter + */ +struct rcv_pkt_filter_config { + uint8_t filterId; + enum packet_filter_type filterType; + uint32_t numFieldParams; + uint32_t coalesceTime; + struct qdf_mac_addr self_macaddr; + struct qdf_mac_addr bssid; + struct rcv_pkt_filter_params paramsData[WMI_MAX_NUM_TESTS_PER_FILTER]; +}; + +/** + * struct vdev_ie_info_param - IE info + * @vdev_id - vdev for which the IE is being sent + * @ie_id - ID of the IE + * @length - length of the IE data + * @data - IE data + * + * This structure is used to store the IE information. + */ +struct vdev_ie_info_param { + uint32_t vdev_id; + uint32_t ie_id; + uint32_t length; + uint32_t ie_source; + uint32_t band; + uint8_t *data; +}; + +#define WMI_MAX_NUM_FW_SEGMENTS 4 + +/** + * struct fw_dump_seg_req_param - individual segment details + * @seg_id - segment id. + * @seg_start_addr_lo - lower address of the segment. + * @seg_start_addr_hi - higher address of the segment. + * @seg_length - length of the segment. + * @dst_addr_lo - lower address of the destination buffer. + * @dst_addr_hi - higher address of the destination buffer. + * + * This structure carries the information to firmware about the + * individual segments. This structure is part of firmware memory + * dump request. + */ +struct fw_dump_seg_req_param { + uint8_t seg_id; + uint32_t seg_start_addr_lo; + uint32_t seg_start_addr_hi; + uint32_t seg_length; + uint32_t dst_addr_lo; + uint32_t dst_addr_hi; +}; + +/** + * struct fw_dump_req_param - firmware memory dump request details. + * @request_id - request id. + * @num_seg - requested number of segments. + * @fw_dump_seg_req - individual segment information. + * + * This structure carries information about the firmware + * memory dump request. + */ +struct fw_dump_req_param { + uint32_t request_id; + uint32_t num_seg; + struct fw_dump_seg_req_param segment[WMI_MAX_NUM_FW_SEGMENTS]; +}; + +#define WMI_TDLS_MAX_SUPP_CHANNELS 128 +#define WMI_TDLS_MAX_SUPP_OPER_CLASSES 32 +#define WMI_2_4_GHZ_MAX_FREQ 3000 + +/** + * struct tdls_update_ch_params - channel parameters + * @chanId: ID of the channel + * @pwr: power level + * @dfsSet: is dfs supported or not + * @half_rate: is the channel operating at 10MHz + * @quarter_rate: is the channel operating at 5MHz + */ +struct tdls_update_ch_params { + uint8_t chanId; + uint8_t pwr; + bool dfsSet; + bool half_rate; + bool quarter_rate; +}; + +/** + * struct tdls_peer_cap_params - TDLS peer capablities parameters + * @isPeerResponder: is peer responder or not + * @peerUapsdQueue: peer uapsd queue + * @peerMaxSp: peer max SP value + * @peerBuffStaSupport: peer buffer sta supported or not + * @peerOffChanSupport: peer offchannel support + * @peerCurrOperClass: peer current operating class + * @selfCurrOperClass: self current operating class + * @peerChanLen: peer channel length + * @peerChan: peer channel list + * @peerOperClassLen: peer operating class length + * @peerOperClass: peer operating class + * @prefOffChanNum: peer offchannel number + * @prefOffChanBandwidth: peer offchannel bandwidth + * @opClassForPrefOffChan: operating class for offchannel + */ +struct tdls_peer_cap_params { + uint8_t isPeerResponder; + uint8_t peerUapsdQueue; + uint8_t peerMaxSp; + uint8_t peerBuffStaSupport; + uint8_t peerOffChanSupport; + uint8_t peerCurrOperClass; + uint8_t selfCurrOperClass; + uint8_t peerChanLen; + struct tdls_update_ch_params peerChan[WMI_TDLS_MAX_SUPP_CHANNELS]; + uint8_t peerOperClassLen; + uint8_t peerOperClass[WMI_TDLS_MAX_SUPP_OPER_CLASSES]; + uint8_t prefOffChanNum; + uint8_t prefOffChanBandwidth; + uint8_t opClassForPrefOffChan; +}; + +/** + * struct tdls_peer_state_params - TDLS peer state parameters + * @vdevId: vdev id + * @peerMacAddr: peer mac address + * @peerCap: peer capabality + */ +struct tdls_peer_state_params { + uint32_t vdevId; + uint8_t peerMacAddr[IEEE80211_ADDR_LEN]; + uint32_t peerState; + struct tdls_peer_cap_params peerCap; +}; + +/** + * struct wmi_tdls_params - TDLS parameters + * @vdev_id: vdev id + * @tdls_state: TDLS state + * @notification_interval_ms: notification inerval + * @tx_discovery_threshold: tx discovery threshold + * @tx_teardown_threshold: tx teardown threashold + * @rssi_teardown_threshold: RSSI teardown threshold + * @rssi_delta: RSSI delta + * @tdls_options: TDLS options + * @peer_traffic_ind_window: raffic indication window + * @peer_traffic_response_timeout: traffic response timeout + * @puapsd_mask: uapsd mask + * @puapsd_inactivity_time: uapsd inactivity time + * @puapsd_rx_frame_threshold: uapsd rx frame threshold + * @teardown_notification_ms: tdls teardown notification interval + * @tdls_peer_kickout_threshold: tdls packet threshold for + * peer kickout operation + */ +struct wmi_tdls_params { + uint32_t vdev_id; + uint32_t tdls_state; + uint32_t notification_interval_ms; + uint32_t tx_discovery_threshold; + uint32_t tx_teardown_threshold; + int32_t rssi_teardown_threshold; + int32_t rssi_delta; + uint32_t tdls_options; + uint32_t peer_traffic_ind_window; + uint32_t peer_traffic_response_timeout; + uint32_t puapsd_mask; + uint32_t puapsd_inactivity_time; + uint32_t puapsd_rx_frame_threshold; + uint32_t teardown_notification_ms; + uint32_t tdls_peer_kickout_threshold; +}; + +#ifndef CONVERGED_TDLS_ENABLE +/** + * struct tdls_chan_switch_params - channel switch parameter structure + * @vdev_id: vdev ID + * @peer_mac_addr: Peer mac address + * @tdls_off_ch_bw_offset: Target off-channel bandwitdh offset + * @tdls_off_ch: Target Off Channel + * @oper_class: Operating class for target channel + * @is_responder: Responder or initiator + */ +struct tdls_channel_switch_params { + uint32_t vdev_id; + uint8_t peer_mac_addr[IEEE80211_ADDR_LEN]; + uint16_t tdls_off_ch_bw_offset; + uint8_t tdls_off_ch; + uint8_t tdls_sw_mode; + uint8_t oper_class; + uint8_t is_responder; +}; +#endif + +/** + * struct dhcp_offload_info_params - dhcp offload parameters + * @vdev_id: request data length + * @dhcp_offload_enabled: dhcp offload enabled + * @dhcp_client_num: dhcp client no + * @dhcp_srv_addr: dhcp server ip + */ +struct dhcp_offload_info_params { + uint32_t vdev_id; + bool dhcp_offload_enabled; + uint32_t dhcp_client_num; + uint32_t dhcp_srv_addr; +}; + +/** + * struct nan_req_params - NAN request params + * @request_data_len: request data length + * @request_data: request data + */ +struct nan_req_params { + uint16_t request_data_len; + uint8_t request_data[]; +}; + + +/** + * struct app_type2_params - app type2parameter + * @vdev_id: vdev id + * @rc4_key: rc4 key + * @rc4_key_len: rc4 key length + * @ip_id: NC id + * @ip_device_ip: NC IP address + * @ip_server_ip: Push server IP address + * @tcp_src_port: NC TCP port + * @tcp_dst_port: Push server TCP port + * @tcp_seq: tcp sequence + * @tcp_ack_seq: tcp ack sequence + * @keepalive_init: Initial ping interval + * @keepalive_min: Minimum ping interval + * @keepalive_max: Maximum ping interval + * @keepalive_inc: Increment of ping interval + * @gateway_mac: gateway mac address + * @tcp_tx_timeout_val: tcp tx timeout value + * @tcp_rx_timeout_val: tcp rx timeout value + */ +struct app_type2_params { + uint8_t vdev_id; + uint8_t rc4_key[16]; + uint32_t rc4_key_len; + /** ip header parameter */ + uint32_t ip_id; + uint32_t ip_device_ip; + uint32_t ip_server_ip; + /** tcp header parameter */ + uint16_t tcp_src_port; + uint16_t tcp_dst_port; + uint32_t tcp_seq; + uint32_t tcp_ack_seq; + uint32_t keepalive_init; + uint32_t keepalive_min; + uint32_t keepalive_max; + uint32_t keepalive_inc; + struct qdf_mac_addr gateway_mac; + uint32_t tcp_tx_timeout_val; + uint32_t tcp_rx_timeout_val; +}; + +/** + * struct app_type1_params - app type1 parameter + * @vdev_id: vdev id + * @wakee_mac_addr: mac address + * @identification_id: identification id + * @password: password + * @id_length: id length + * @pass_length: password length + */ +struct app_type1_params { + uint8_t vdev_id; + struct qdf_mac_addr wakee_mac_addr; + uint8_t identification_id[8]; + uint8_t password[16]; + uint32_t id_length; + uint32_t pass_length; +}; + +/** + * enum wmi_ext_wow_type - wow type + * @WMI_EXT_WOW_TYPE_APP_TYPE1: only enable wakeup for app type1 + * @WMI_EXT_WOW_TYPE_APP_TYPE2: only enable wakeup for app type2 + * @WMI_EXT_WOW_TYPE_APP_TYPE1_2: enable wakeup for app type1&2 + */ +enum wmi_ext_wow_type { + WMI_EXT_WOW_TYPE_APP_TYPE1, + WMI_EXT_WOW_TYPE_APP_TYPE2, + WMI_EXT_WOW_TYPE_APP_TYPE1_2, +}; + +/** + * struct ext_wow_params - ext wow parameters + * @vdev_id: vdev id + * @type: wow type + * @wakeup_pin_num: wake up gpio no + */ +struct ext_wow_params { + uint8_t vdev_id; + enum wmi_ext_wow_type type; + uint32_t wakeup_pin_num; +}; + +/** + * struct stats_ext_params - ext stats request + * @vdev_id: vdev id + * @request_data_len: request data length + * @request_data: request data + */ +struct stats_ext_params { + uint32_t vdev_id; + uint32_t request_data_len; + uint8_t request_data[]; +}; + +#define WMI_PERIODIC_TX_PTRN_MAX_SIZE 1536 +/** + * struct periodic_tx_pattern - periodic tx pattern + * @mac_address: MAC Address for the adapter + * @ucPtrnId: Pattern ID + * @ucPtrnSize: Pattern size + * @usPtrnIntervalMs: in ms + * @ucPattern: Pattern buffer + */ +struct periodic_tx_pattern { + struct qdf_mac_addr mac_address; + uint8_t ucPtrnId; + uint16_t ucPtrnSize; + uint32_t usPtrnIntervalMs; + uint8_t ucPattern[WMI_PERIODIC_TX_PTRN_MAX_SIZE]; +}; + +#define WMI_GTK_OFFLOAD_KEK_BYTES 64 +#define WMI_GTK_OFFLOAD_KCK_BYTES 16 +#define WMI_GTK_OFFLOAD_ENABLE 0 +#define WMI_GTK_OFFLOAD_DISABLE 1 + +/** + * struct flashing_req_params - led flashing parameter + * @reqId: request id + * @pattern_id: pattern identifier. 0: disconnected 1: connected + * @led_x0: led flashing parameter0 + * @led_x1: led flashing parameter1 + */ +struct flashing_req_params { + uint32_t req_id; + uint32_t pattern_id; + uint32_t led_x0; + uint32_t led_x1; +}; + +#define MAX_MEM_CHUNKS 32 +/** + * struct wmi_host_mem_chunk - host memory chunk structure + * @vaddr: Pointer to virtual address + * @paddr: Physical address + * @memctx: qdf memory context for mapped address. + * @len: length of chunk + * @req_id: request id from target + */ +struct wmi_host_mem_chunk { + uint32_t *vaddr; + uint32_t paddr; + qdf_dma_mem_context(memctx); + uint32_t len; + uint32_t req_id; +}; + +/** + * struct wmi_wifi_start_log - Structure to store the params sent to start/ + * stop logging + * @name: Attribute which indicates the type of logging like per packet + * statistics, connectivity etc. + * @verbose_level: Verbose level which can be 0,1,2,3 + * @flag: Flag field for future use + */ +struct wmi_wifi_start_log { + uint32_t ring_id; + uint32_t verbose_level; + uint32_t flag; +}; + +/** + * struct wmi_pcl_list - Format of PCL + * @pcl_list: List of preferred channels + * @weight_list: Weights of the PCL + * @pcl_len: Number of channels in the PCL + */ +struct wmi_pcl_list { + uint8_t pcl_list[128]; + uint8_t weight_list[128]; + uint32_t pcl_len; +}; + +/** + * struct wmi_pcl_chan_weights - Params to get the valid weighed list + * @pcl_list: Preferred channel list already sorted in the order of preference + * @pcl_len: Length of the PCL + * @saved_chan_list: Valid channel list updated as part of + * WMA_UPDATE_CHAN_LIST_REQ + * @saved_num_chan: Length of the valid channel list + * @weighed_valid_list: Weights of the valid channel list. This will have one + * to one mapping with valid_chan_list. FW expects channel order and size to be + * as per the list provided in WMI_SCAN_CHAN_LIST_CMDID. + * @weight_list: Weights assigned by policy manager + */ +struct wmi_pcl_chan_weights { + uint8_t pcl_list[MAX_NUM_CHAN]; + uint32_t pcl_len; + uint8_t saved_chan_list[MAX_NUM_CHAN]; + uint32_t saved_num_chan; + uint8_t weighed_valid_list[MAX_NUM_CHAN]; + uint8_t weight_list[MAX_NUM_CHAN]; +}; + +/** + * struct wmi_hw_mode_params - HW mode params + * @mac0_tx_ss: MAC0 Tx spatial stream + * @mac0_rx_ss: MAC0 Rx spatial stream + * @mac1_tx_ss: MAC1 Tx spatial stream + * @mac1_rx_ss: MAC1 Rx spatial stream + * @mac0_bw: MAC0 bandwidth + * @mac1_bw: MAC1 bandwidth + * @dbs_cap: DBS capabality + * @agile_dfs_cap: Agile DFS capabality + */ +struct wmi_hw_mode_params { + uint8_t mac0_tx_ss; + uint8_t mac0_rx_ss; + uint8_t mac1_tx_ss; + uint8_t mac1_rx_ss; + uint8_t mac0_bw; + uint8_t mac1_bw; + uint8_t dbs_cap; + uint8_t agile_dfs_cap; +}; + +/** + * struct ssid_hotlist_param - param for SSID Hotlist + * @ssid: SSID which is being hotlisted + * @band: Band in which the given SSID should be scanned + * @rssi_low: Low bound on RSSI + * @rssi_high: High bound on RSSI + */ +struct ssid_hotlist_param { + struct mac_ssid ssid; + uint8_t band; + int32_t rssi_low; + int32_t rssi_high; +}; + +/** + * struct rssi_disallow_bssid - Structure holding Rssi based avoid candidate + * @bssid: BSSID of the AP + * @remaining_duration: remaining disallow duration in ms + * @expected_rssi: RSSI at which STA can initate in dBm + */ +struct rssi_disallow_bssid { + struct qdf_mac_addr bssid; + uint32_t remaining_duration; + int8_t expected_rssi; +}; + + +/** + * struct roam_scan_filter_params - Structure holding roaming scan + * parameters + * @op_bitmap: bitmap to determine reason of roaming + * @session_id: vdev id + * @num_bssid_black_list: The number of BSSID's that we should + * avoid connecting to. It is like a + * blacklist of BSSID's. + * @num_ssid_white_list: The number of SSID profiles that are + * in the Whitelist. When roaming, we + * consider the BSSID's with this SSID + * also for roaming apart from the connected one's + * @num_bssid_preferred_list: Number of BSSID's which have a preference over + * others + * @bssid_avoid_list: Blacklist SSID's + * @ssid_allowed_list: Whitelist SSID's + * @bssid_favored: Favorable BSSID's + * @bssid_favored_factor: RSSI to be added to this BSSID to prefer it + * @lca_disallow_config_present: LCA [Last Connected AP] disallow config present + * @disallow_duration: How long LCA AP will be disallowed before it + * can be a roaming candidate again, in seconds + * @rssi_channel_penalization:How much RSSI will be penalized if candidate(s) + * are found in the same channel as disallowed AP's, + * in units of db + * @num_disallowed_aps: How many APs the target should maintain in its + * LCA list + * + * This structure holds all the key parameters related to + * initial connection and roaming connections. + */ + +struct roam_scan_filter_params { + uint32_t op_bitmap; + uint8_t session_id; + uint32_t num_bssid_black_list; + uint32_t num_ssid_white_list; + uint32_t num_bssid_preferred_list; + struct qdf_mac_addr bssid_avoid_list[MAX_BSSID_AVOID_LIST]; + struct mac_ssid ssid_allowed_list[MAX_SSID_ALLOWED_LIST]; + struct qdf_mac_addr bssid_favored[MAX_BSSID_FAVORED]; + uint8_t bssid_favored_factor[MAX_BSSID_FAVORED]; + uint8_t lca_disallow_config_present; + uint32_t disallow_duration; + uint32_t rssi_channel_penalization; + uint32_t num_disallowed_aps; + uint32_t num_rssi_rejection_ap; + struct rssi_disallow_bssid rssi_rejection_ap[MAX_RSSI_AVOID_BSSID_LIST]; +}; + +#define WMI_MAX_HLP_IE_LEN 2048 +/** + * struct hlp_params - HLP info params + * @vdev_id: vdev id + * @hlp_ie_len: HLP IE length + * @hlp_ie: HLP IE + */ +struct hlp_params { + uint8_t vdev_id; + uint32_t hlp_ie_len; + uint8_t hlp_ie[WMI_MAX_HLP_IE_LEN]; +}; + +#define WMI_UNIFIED_MAX_PMKID_LEN 16 +#define WMI_UNIFIED_MAX_PMK_LEN 64 + +/** + * struct wmi_unified_pmk_cache - used to set del pmkid cache + * @tlv_header: TLV header, TLV tag and len; tag equals WMITLV_TAG_ARRAY_UINT32 + * @pmk_len: PMK len + * for big-endian hosts, manual endian conversion will be needed to keep + * the array values in their original order in spite of the automatic + * byte-swap applied to WMI messages during download + * @pmk: PMK array + * @pmkid_len: PMK ID Len + * @pmkid: PMK ID Array + * @bssid: BSSID + * @ssid: SSID + * @cache_id: PMK Cache ID + * @cat_flag: whether (bssid) or (ssid,cache_id) is valid + * @action_flag: add/delete the entry + */ +struct wmi_unified_pmk_cache { + uint32_t tlv_header; + uint32_t pmk_len; + uint8_t session_id; + uint8_t pmk[WMI_UNIFIED_MAX_PMK_LEN]; + uint32_t pmkid_len; + uint8_t pmkid[WMI_UNIFIED_MAX_PMKID_LEN]; + wmi_host_mac_addr bssid; + struct mac_ssid ssid; + uint32_t cache_id; + uint32_t cat_flag; + uint32_t action_flag; +}; + + +/** + * struct ssid_hotlist_request_params - set SSID hotlist request struct + * @request_id: ID of the request + * @session_id: ID of the session + * @lost_ssid_sample_size: Number of consecutive scans in which the SSID + * must not be seen in order to consider the SSID "lost" + * @ssid_count: Number of valid entries in the @ssids array + * @ssids: Array that defines the SSIDs that are in the hotlist + */ +struct ssid_hotlist_request_params { + uint32_t request_id; + uint8_t session_id; + uint32_t lost_ssid_sample_size; + uint32_t ssid_count; + struct ssid_hotlist_param ssids[WMI_EXTSCAN_MAX_HOTLIST_SSIDS]; +}; + +/** + * struct wmi_unit_test_cmd - unit test command parameters + * @vdev_id: vdev id + * @module_id: module id + * @num_args: number of arguments + * @diag_token: dialog token, which identifies the transaction. + * this number is generated by wifitool and may be used to + * identify the transaction in the event path + * @args: arguments + */ +struct wmi_unit_test_cmd { + uint32_t vdev_id; + uint32_t module_id; + uint32_t num_args; + uint32_t diag_token; + uint32_t args[WMI_UNIT_TEST_MAX_NUM_ARGS]; +}; + +/** + * struct wmi_roam_invoke_cmd - roam invoke command + * @vdev_id: vdev id + * @bssid: mac address + * @channel: channel + * @frame_len: frame length, includs mac header, fixed params and ies + * @frame_buf: buffer contaning probe response or beacon + * @is_same_bssid: flag to indicate if roaming is requested for same bssid + */ +struct wmi_roam_invoke_cmd { + uint32_t vdev_id; + uint8_t bssid[IEEE80211_ADDR_LEN]; + uint32_t channel; + uint32_t frame_len; + uint8_t *frame_buf; + uint8_t is_same_bssid; +}; + +/** + * struct extscan_bssid_hotlist_set_params - set hotlist request + * @request_id: request_id + * @vdev_id: vdev id + * @lost_ap_sample_size: number of samples to confirm AP loss + * @num_ap: Number of hotlist APs + * @ap: hotlist APs + */ +struct extscan_bssid_hotlist_set_params { + uint32_t request_id; + uint8_t vdev_id; + uint32_t lost_ap_sample_size; + uint32_t num_ap; + struct ap_threshold_params ap[WMI_WLAN_EXTSCAN_MAX_HOTLIST_APS]; +}; + +/** + * struct host_mem_req - Host memory request paramseters request by target + * @req_id: Request id to identify the request. + * @unit_size: Size of single unit requested. + * @num_unit_info: Memory chunk info + * @num_units: number of units requested. + */ +typedef struct { + uint32_t req_id; + uint32_t unit_size; + uint32_t num_unit_info; + uint32_t num_units; +} host_mem_req; + +#define WMI_HOST_DSCP_MAP_MAX (64) + +/** + * struct wmi_host_ext_resource_config - Extended resource config + * @host_platform_config: Host plaform configuration. + * @fw_featuew_bitmap: FW feature requested bitmap. + */ +typedef struct { + uint32_t host_platform_config; + +#define WMI_HOST_FW_FEATURE_LTEU_SUPPORT 0x0001 +#define WMI_HOST_FW_FEATURE_COEX_GPIO_SUPPORT 0x0002 +#define WMI_HOST_FW_FEATURE_AUX_RADIO_SPECTRAL_INTF 0x0004 +#define WMI_HOST_FW_FEATURE_AUX_RADIO_CHAN_LOAD_INTF 0x0008 +#define WMI_HOST_FW_FEATURE_BSS_CHANNEL_INFO_64 0x0010 +#define WMI_HOST_FW_FEATURE_PEER_STATS 0x0020 +#define WMI_HOST_FW_FEATURE_VDEV_STATS 0x0040 + /** + * @brief fw_feature_bitmask - Enable/Disable features in FW + * @details + * The bits in fw_feature_bitmask are used as shown by the masks below: + * 0x0001 - LTEU Config enable/disable + * 0x0002 - COEX GPIO Config enable/disable + * 0x0004 - Aux Radio enhancement for spectral scan enable/disable + * 0x0008 - Aux Radio enhancement for chan load scan enable/disable + * 0x0010 - BSS channel info stats enable/disable + * The features in question are enabled by setting + * the feature's bit to 1, + * or disabled by setting the feature's bit to 0. + */ + uint32_t fw_feature_bitmap; + + /* WLAN priority GPIO number + * The target uses a GPIO pin to indicate when it is transmitting + * high-priority traffic (e.g. beacon, management, or AC_VI) or + * low-priority traffic (e.g. AC_BE, AC_BK). The HW uses this + * WLAN GPIO pin to determine whether to abort WLAN tx in favor of + * BT activity. + * Which GPIO is used for this WLAN tx traffic priority specification + * varies between platforms, so the host needs to indicate to the + * target which GPIO to use. + */ + uint32_t wlan_priority_gpio; + + /* Host will notify target which coex algorithm has to be + * enabled based on HW, FW capability and device tree config. + * Till now the coex algorithms were target specific. Now the + * same target can choose between multiple coex algorithms + * depending on device tree config on host. For backward + * compatibility, version support will have option 0 and will + * rely on FW compile time flags to decide the coex version + * between VERSION_1, VERSION_2 and VERSION_3. Version info is + * mandatory from VERSION_4 onwards for any new coex algorithms. + * + * 0 = no version support + * 1 = COEX_VERSION_1 (3 wire coex) + * 2 = COEX_VERSION_2 (2.5 wire coex) + * 3 = COEX_VERSION_3 (2.5 wire coex+duty cycle) + * 4 = COEX_VERSION_4 (4 wire coex) + */ + uint32_t coex_version; + + /* There are multiple coex implementations on FW to support different + * hardwares. Since the coex algos are mutually exclusive, host will + * use below fields to send GPIO info to FW and these GPIO pins will + * have different usages depending on the feature enabled. This is to + * avoid adding multiple GPIO fields here for different features. + * + * COEX VERSION_4 (4 wire coex) : + * 4 wire coex feature uses 1 common input request line from BT/ZB/ + * Thread which interrupts the WLAN target processor directly, 1 input + * priority line from BT and ZB each, 1 output line to grant access to + * requesting IOT subsystem. WLAN uses the input priority line to + * identify the requesting IOT subsystem. Request is granted based on + * IOT interface priority and WLAN traffic. GPIO pin usage is as below: + * coex_gpio_pin_1 = BT PRIORITY INPUT GPIO + * coex_gpio_pin_2 = ZIGBEE PRIORITY INPUT GPIO + * coex_gpio_pin_3 = GRANT OUTPUT GPIO + * when a BT active interrupt is raised, WLAN reads + * BT and ZB priority input GPIO pins to compare against the coex + * priority table and accordingly sets the grant output GPIO to give + * access to requesting IOT subsystem. + */ + uint32_t coex_gpio_pin_1; + uint32_t coex_gpio_pin_2; + uint32_t coex_gpio_pin_3; + + /* add new members here */ +} wmi_host_ext_resource_config; + +/** + * struct set_neighbour_rx_params - Neighbour RX params + * @vdev_id: vdev id + * @idx: index of param + * @action: action + * @type: Type of param + */ +struct set_neighbour_rx_params { + uint8_t vdev_id; + uint32_t idx; + uint32_t action; + uint32_t type; +}; + +/** + * struct set_fwtest_params - FW test params + * @arg: FW param id + * @value: value + */ +struct set_fwtest_params { + uint32_t arg; + uint32_t value; +}; + +/** + * struct set_custom_aggr_size_params - custom aggr size params + * @vdev_id : vdev id + * @tx_aggr_size : TX aggr size + * @rx_aggr_size : RX aggr size + * @enable_bitmap: Bitmap for aggr size check + */ +struct set_custom_aggr_size_params { + uint32_t vdev_id; + uint32_t tx_aggr_size; + uint32_t rx_aggr_size; + uint32_t ac:2, + aggr_type:1, + tx_aggr_size_disable:1, + rx_aggr_size_disable:1, + tx_ac_enable:1, + reserved:26; +}; + +/** + * enum wmi_host_custom_aggr_type_t: custon aggregate type + * @WMI_HOST_CUSTOM_AGGR_TYPE_AMPDU: A-MPDU aggregation + * @WMI_HOST_CUSTOM_AGGR_TYPE_AMSDU: A-MSDU aggregation + * @WMI_HOST_CUSTOM_AGGR_TYPE_MAX: Max type + */ +enum wmi_host_custom_aggr_type_t { + WMI_HOST_CUSTOM_AGGR_TYPE_AMPDU = 0, + WMI_HOST_CUSTOM_AGGR_TYPE_AMSDU = 1, + WMI_HOST_CUSTOM_AGGR_TYPE_MAX, +}; + +/* + * msduq_update_params - MSDUQ update param structure + * @tid_num: TID number + * @msduq_update_mask: update bit mask + * @qdepth_thresh_value: threshold value for the queue depth + */ + +#define QDEPTH_THRESH_MAX_UPDATES 1 + +typedef struct { + uint32_t tid_num; + uint32_t msduq_update_mask; + uint32_t qdepth_thresh_value; +} msduq_update_params; + +/** + * struct set_qdepth_thresh_params - MSDU Queue Depth Threshold Params + * @vdev_id: vdev id + * @pdev_id: pdev id + * @mac_addr: MAC address + * @num_of_msduq_updates: holds the number of tid updates + */ + +struct set_qdepth_thresh_params { + uint32_t pdev_id; + uint32_t vdev_id; + uint8_t mac_addr[IEEE80211_ADDR_LEN]; + uint32_t num_of_msduq_updates; + msduq_update_params update_params[QDEPTH_THRESH_MAX_UPDATES]; +}; + + + +/** + * struct config_ratemask_params - ratemask config parameters + * @vdev_id: vdev id + * @type: Type + * @lower32: Lower 32 bits + * @higher32: Hogher 32 bits + */ +struct config_ratemask_params { + uint8_t vdev_id; + uint8_t type; + uint32_t lower32; + uint32_t higher32; +}; + +/** + * struct config_fils_params - FILS config params + * @vdev_id: vdev id + * @fd_period: 0 - Disabled, non-zero - Period in ms (mili seconds) + */ +struct config_fils_params { + uint8_t vdev_id; + uint32_t fd_period; +}; + +/** + * struct peer_add_wds_entry_params - WDS peer entry add params + * @dest_addr: Pointer to destination macaddr + * @peer_addr: Pointer to peer mac addr + * @flags: flags + * @vdev_id: Vdev id + */ +struct peer_add_wds_entry_params { + const uint8_t *dest_addr; + uint8_t *peer_addr; + uint32_t flags; + uint32_t vdev_id; +}; + +/** + * struct peer_del_wds_entry_params - WDS peer entry del params + * @dest_addr: Pointer to destination macaddr + * @vdev_id: Vdev id + */ +struct peer_del_wds_entry_params { + uint8_t *dest_addr; + uint32_t vdev_id; +}; + +/** + * struct set_bridge_mac_addr_params - set bridge MAC addr params + * @dest_addr: Pointer to bridge macaddr + */ +struct set_bridge_mac_addr_params { + uint8_t *bridge_addr; +}; + +/** + * struct peer_updatewds_entry_params - WDS peer entry update params + * @wds_macaddr: Pointer to destination macaddr + * @peer_add: Pointer to peer mac addr + * @flags: flags + * @vdev_id: Vdev id + */ +struct peer_update_wds_entry_params { + uint8_t *wds_macaddr; + uint8_t *peer_macaddr; + uint32_t flags; + uint32_t vdev_id; +}; + +/** + * struct set_ps_mode_params - PS mode params + * @vdev_id: vdev id + * @psmode: PS mode + */ +struct set_ps_mode_params { + uint8_t vdev_id; + uint8_t psmode; +}; + +/** + * @struct tt_level_config - Set Thermal throttlling config + * @tmplwm: Temperature low water mark + * @tmphwm: Temperature high water mark + * @dcoffpercent: dc off percentage + * @priority: priority + */ +typedef struct { + uint32_t tmplwm; + uint32_t tmphwm; + uint32_t dcoffpercent; + uint32_t priority; +} tt_level_config; + +/** + * struct thermal_mitigation_params - Thermal mitigation params + * @enable: Enable/Disable Thermal mitigation + * @dc: DC + * @dc_per_event: DC per event + * @tt_level_config: TT level config params + */ +struct thermal_mitigation_params { + uint32_t pdev_id; + uint32_t enable; + uint32_t dc; + uint32_t dc_per_event; + tt_level_config levelconf[THERMAL_LEVELS]; +}; + +/** + * struct smart_ant_enable_params - Smart antenna params + * @enable: Enable/Disable + * @mode: SA mode + * @rx_antenna: RX antenna config + * @gpio_pin : GPIO pin config + * @gpio_func : GPIO function config + */ +struct smart_ant_enable_params { + uint32_t enable; + uint32_t mode; + uint32_t rx_antenna; + uint32_t gpio_pin[WMI_HAL_MAX_SANTENNA]; + uint32_t gpio_func[WMI_HAL_MAX_SANTENNA]; + uint32_t pdev_id; +}; + +/** + * struct smart_ant_rx_ant_params - RX antenna params + * @antenna: RX antenna + */ +struct smart_ant_rx_ant_params { + uint32_t antenna; + uint32_t pdev_id; +}; + +/** + * struct smart_ant_tx_ant_params - TX antenna param + * @antenna_array: Antenna arry + * @vdev_id: VDEV id + */ +struct smart_ant_tx_ant_params { + uint32_t *antenna_array; + uint8_t vdev_id; +}; + +/** + * struct smart_ant_training_info_params - SA training params + * @vdev_id: VDEV id + * @rate_array: Rates array + * @antenna_array: Antenna array + * @numpkts: num packets for training + */ +struct smart_ant_training_info_params { + uint8_t vdev_id; + uint32_t *rate_array; + uint32_t *antenna_array; + uint32_t numpkts; +}; + +/** + * struct smart_ant_node_config_params - SA node config params + * @vdev_id: VDEV id + * @cmd_id: Command id + * @args_count: Arguments count + */ +struct smart_ant_node_config_params { + uint8_t vdev_id; + uint32_t cmd_id; + uint16_t args_count; + uint32_t *args_arr; +}; +/** + * struct smart_ant_enable_tx_feedback_params - SA tx feeback params + * @enable: Enable TX feedback for SA + */ +struct smart_ant_enable_tx_feedback_params { + int enable; +}; + +/** + * struct vdev_spectral_configure_params - SPectral config params + * @vdev_id: VDEV id + * @count: count + * @period: period + * @spectral_pri: Spectral priority + * @fft_size: FFT size + * @gc_enable: GC enable + * @restart_enable: restart enabled + * @noise_floor_ref: Noise floor reference + * @init_delay: Init delays + * @nb_tone_thr: NB tone threshold + * @str_bin_thr: STR BIN threshold + * @wb_rpt_mode: WB BIN threshold + * @rssi_rpt_mode: RSSI report mode + * @rssi_thr: RSSI threshold + * @pwr_format: Power format + * @rpt_mode: Report mdoe + * @bin_scale: BIN scale + * @dbm_adj: DBM adjust + * @chn_mask: chain mask + */ +struct vdev_spectral_configure_params { + uint8_t vdev_id; + uint16_t count; + uint16_t period; + uint16_t spectral_pri; + uint16_t fft_size; + uint16_t gc_enable; + uint16_t restart_enable; + uint16_t noise_floor_ref; + uint16_t init_delay; + uint16_t nb_tone_thr; + uint16_t str_bin_thr; + uint16_t wb_rpt_mode; + uint16_t rssi_rpt_mode; + uint16_t rssi_thr; + uint16_t pwr_format; + uint16_t rpt_mode; + uint16_t bin_scale; + uint16_t dbm_adj; + uint16_t chn_mask; +}; + +/** + * struct vdev_spectral_enable_params - Spectral enabled params + * @vdev_id: VDEV id + * @active_valid: Active valid + * @active: active + * @enabled_valid: Enabled valid + * @enabled: enabled + */ +struct vdev_spectral_enable_params { + uint8_t vdev_id; + uint8_t active_valid; + uint8_t active; + uint8_t enabled_valid; + uint8_t enabled; +}; + +/** + * struct pdev_set_regdomain_params - PDEV set reg domain params + * @currentRDinuse: Current Reg domain + * @currentRD2G: Current Reg domain 2G + * @currentRD5G: Current Reg domain 5G + * @ctl_2G: CTL 2G + * @ctl_5G: CTL 5G + * @dfsDomain: DFS domain + * @pdev_id: pdev_id + */ +struct pdev_set_regdomain_params { + uint16_t currentRDinuse; + uint16_t currentRD2G; + uint16_t currentRD5G; + uint32_t ctl_2G; + uint32_t ctl_5G; + uint8_t dfsDomain; + uint32_t pdev_id; +}; + +/** + * struct set_quiet_mode_params - Set quiet mode params + * @enabled: Enabled + * @period: Quite period + * @intval: Quite interval + * @duration: Quite duration + * @offset: offset + */ +struct set_quiet_mode_params { + uint8_t enabled; + uint8_t period; + uint16_t intval; + uint16_t duration; + uint16_t offset; +}; + +/** + * struct set_beacon_filter_params - Set beacon filter params + * @vdev_id: VDEV id + * @ie: Pointer to IE fields + */ +struct set_beacon_filter_params { + uint8_t vdev_id; + uint32_t *ie; +}; + +/** + * struct remove_beacon_filter_params - Remove beacon filter params + * @vdev_id: VDEV id + */ +struct remove_beacon_filter_params { + uint8_t vdev_id; +}; + +/** + * struct mgmt_params - Mgmt params + * @vdev_id: vdev id + * @buf_len: length of frame buffer + * @wbuf: frame buffer + */ +struct mgmt_params { + int vdev_id; + uint32_t buf_len; + qdf_nbuf_t wbuf; +}; + +/** + * struct addba_clearresponse_params - Addba clear response params + * @vdev_id: VDEV id + */ +struct addba_clearresponse_params { + uint8_t vdev_id; +}; + +/** + * struct addba_send_params - ADDBA send params + * @vdev_id: vdev id + * @tidno: TID + * @buffersize: buffer size + */ +struct addba_send_params { + uint8_t vdev_id; + uint8_t tidno; + uint16_t buffersize; +}; + +/** + * struct delba_send_params - DELBA send params + * @vdev_id: vdev id + * @tidno: TID + * @initiator: initiator + * @reasoncode: reason code + */ +struct delba_send_params { + uint8_t vdev_id; + uint8_t tidno; + uint8_t initiator; + uint16_t reasoncode; +}; +/** + * struct addba_setresponse_arams - Set ADDBA response params + * @vdev_id: vdev id + * @tidno: TID + * @statuscode: status code in response + */ +struct addba_setresponse_params { + uint8_t vdev_id; + uint8_t tidno; + uint16_t statuscode; +}; + +/** + * struct singleamsdu_params - Single AMSDU params + * @vdev_id: vdev is + * @tidno: TID + */ +struct singleamsdu_params { + uint8_t vdev_id; + uint8_t tidno; +}; + +/** + * struct set_qbosst_params - Set QBOOST params + * @vdev_id: vdev id + * @value: value + */ +struct set_qboost_params { + uint8_t vdev_id; + uint32_t value; +}; + +/** + * struct mu_scan_params - MU scan params + * @id: id + * @type: type + * @duration: Duration + * @lteu_tx_power: LTEU tx power + */ +struct mu_scan_params { + uint8_t id; + uint8_t type; + uint32_t duration; + uint32_t lteu_tx_power; + uint32_t rssi_thr_bssid; + uint32_t rssi_thr_sta; + uint32_t rssi_thr_sc; + uint32_t plmn_id; + uint32_t alpha_num_bssid; +}; + +/** + * struct lteu_config_params - LTEU config params + * @lteu_gpio_start: start MU/AP scan after GPIO toggle + * @lteu_num_bins: no. of elements in the following arrays + * @use_actual_nf: whether to use the actual NF obtained or a hardcoded one + * @lteu_weight: weights for MU algo + * @lteu_thresh: thresholds for MU algo + * @lteu_gamma: gamma's for MU algo + * @lteu_scan_timeout: timeout in ms to gpio toggle + * @alpha_num_ssid: alpha for num active bssid calculation + * @wifi_tx_power: Wifi Tx power + */ +struct lteu_config_params { + uint8_t lteu_gpio_start; + uint8_t lteu_num_bins; + uint8_t use_actual_nf; + uint32_t lteu_weight[LTEU_MAX_BINS]; + uint32_t lteu_thresh[LTEU_MAX_BINS]; + uint32_t lteu_gamma[LTEU_MAX_BINS]; + uint32_t lteu_scan_timeout; + uint32_t alpha_num_bssid; + uint32_t wifi_tx_power; + uint32_t allow_err_packets; +}; + +struct wmi_macaddr_t { + /** upper 4 bytes of MAC address */ + uint32_t mac_addr31to0; + /** lower 2 bytes of MAC address */ + uint32_t mac_addr47to32; +}; + +/** + * struct atf_peer_info - ATF peer info params + * @peer_macaddr: peer mac addr + * @percentage_peer: percentage of air time for this peer + * @vdev_id: Associated vdev id + * @pdev_id: Associated pdev id + */ +typedef struct { + struct wmi_macaddr_t peer_macaddr; + uint32_t percentage_peer; + uint32_t vdev_id; + uint32_t pdev_id; +} atf_peer_info; + +/** + * struct bwf_peer_info_t - BWF peer info params + * @peer_macaddr: peer mac addr + * @throughput: Throughput + * @max_airtime: Max airtime + * @priority: Priority level + * @reserved: Reserved array + * @vdev_id: Associated vdev id + * @pdev_id: Associated pdev id + */ +typedef struct { + struct wmi_macaddr_t peer_macaddr; + uint32_t throughput; + uint32_t max_airtime; + uint32_t priority; + uint32_t reserved[4]; + uint32_t vdev_id; + uint32_t pdev_id; +} bwf_peer_info; + +/** + * struct set_bwf_params - BWF params + * @num_peers: number of peers + * @atf_peer_info: BWF peer info + */ +struct set_bwf_params { + uint32_t num_peers; + bwf_peer_info peer_info[1]; +}; + +/** + * struct atf_peer_ext_info - ATF peer ext info params + * @peer_macaddr: peer mac address + * @group_index: group index + * @atf_index_reserved: ATF index rsvd + * @vdev_id: Associated vdev id + * @pdev_id: Associated pdev id + */ +typedef struct { + struct wmi_macaddr_t peer_macaddr; + uint32_t group_index; + uint32_t atf_index_reserved; + uint16_t vdev_id; + uint16_t pdev_id; +} atf_peer_ext_info; + +/** + * struct set_atf_params - ATF params + * @num_peers: number of peers + * @atf_peer_info: ATF peer info + */ +struct set_atf_params { + uint32_t num_peers; + atf_peer_info peer_info[ATF_ACTIVED_MAX_CLIENTS]; +}; + +/** + * struct atf_peer_request_params - ATF peer req params + * @num_peers: number of peers + * @atf_peer_ext_info: ATF peer ext info + */ +struct atf_peer_request_params { + uint32_t num_peers; + atf_peer_ext_info peer_ext_info[ATF_ACTIVED_MAX_CLIENTS]; +}; + +/** + * struct atf_group_info - ATF group info params + * @percentage_group: Percentage AT for group + * @atf_group_units_reserved: ATF group information + * @pdev_id: Associated pdev id + */ +typedef struct { + uint32_t percentage_group; + uint32_t atf_group_units_reserved; + uint32_t pdev_id; +} atf_group_info; + +/** + * struct atf_grouping_params - ATF grouping params + * @num_groups: number of groups + * @group_inf: Group informaition + */ +struct atf_grouping_params { + uint32_t num_groups; + atf_group_info group_info[ATF_ACTIVED_MAX_ATFGROUPS]; +}; + +/** + * struct wlan_profile_params - WLAN profile params + * @param_id: param id + * @profile_id: profile id + * @enable: enable + */ +struct wlan_profile_params { + uint32_t param_id; + uint32_t profile_id; + uint32_t enable; +}; + +/* struct ht_ie_params - HT IE params + * @ie_len: IE length + * @ie_data: pointer to IE data + * @tx_streams: Tx streams supported for this HT IE + * @rx_streams: Rx streams supported for this HT IE + */ +struct ht_ie_params { + uint32_t ie_len; + uint8_t *ie_data; + uint32_t tx_streams; + uint32_t rx_streams; +}; + +/* struct vht_ie_params - VHT IE params + * @ie_len: IE length + * @ie_data: pointer to IE data + * @tx_streams: Tx streams supported for this VHT IE + * @rx_streams: Rx streams supported for this VHT IE + */ +struct vht_ie_params { + uint32_t ie_len; + uint8_t *ie_data; + uint32_t tx_streams; + uint32_t rx_streams; +}; + +/** + * struct wmi_host_wmeParams - WME params + * @wmep_acm: ACM paramete + * @wmep_aifsn: AIFSN parameters + * @wmep_logcwmin: cwmin in exponential form + * @wmep_logcwmax: cwmax in exponential form + * @wmep_txopLimit: txopLimit + * @wmep_noackPolicy: No-Ack Policy: 0=ack, 1=no-ack + */ +struct wmi_host_wmeParams { + u_int8_t wmep_acm; + u_int8_t wmep_aifsn; + u_int8_t wmep_logcwmin; + u_int8_t wmep_logcwmax; + u_int16_t wmep_txopLimit; + u_int8_t wmep_noackPolicy; +}; + +/** + * struct wmm_update_params - WMM update params + * @wmep_array: WME params for each AC + */ +struct wmm_update_params { + struct wmi_host_wmeParams *wmep_array; +}; + +/** + * struct wmi_host_wmevParams - WME params + * @wmep_acm: ACM paramete + * @wmep_aifsn: AIFSN parameters + * @wmep_logcwmin: cwmin in exponential form + * @wmep_logcwmax: cwmax in exponential form + * @wmep_txopLimit: txopLimit + * @wmep_noackPolicy: No-Ack Policy: 0=ack, 1=no-ack + */ +struct wmi_host_wme_vparams { + u_int32_t acm; + u_int32_t aifs; + u_int32_t cwmin; + u_int32_t cwmax; + union { + u_int32_t txoplimit; + u_int32_t mu_edca_timer; + }; + u_int32_t noackpolicy; +}; + +/** + * struct ant_switch_tbl_params - Antenna switch table params + * @ant_ctrl_common1: ANtenna control common param 1 + * @ant_ctrl_common2: Antenna control commn param 2 + */ +struct ant_switch_tbl_params { + uint32_t ant_ctrl_common1; + uint32_t ant_ctrl_common2; + uint32_t pdev_id; + uint32_t antCtrlChain; +}; + +/** + * struct ratepwr_table_params - Rate power table params + * @ratepwr_tbl: pointer to rate power table + * @ratepwr_len: rate power table len + */ +struct ratepwr_table_params { + uint8_t *ratepwr_tbl; + uint16_t ratepwr_len; +}; + +/** + * struct ctl_table_params - Ctl table params + * @ctl_array: pointer to ctl array + * @ctl_cmd_len: ctl command length + * @is_acfg_ctl: is acfg_ctl table + */ +struct ctl_table_params { + uint8_t *ctl_array; + uint16_t ctl_cmd_len; + uint32_t target_type; + bool is_2g; + uint32_t ctl_band; + uint32_t pdev_id; +}; + +/** + * struct mimogain_table_params - MIMO gain table params + * @array_gain: pointer to array gain table + * @tbl_len: table length + * @multichain_gain_bypass: bypass multichain gain + */ +struct mimogain_table_params { + uint8_t *array_gain; + uint16_t tbl_len; + bool multichain_gain_bypass; + uint32_t pdev_id; +}; + +/** + * struct ratepwr_chainmask_params - Rate power chainmask params + * @ratepwr_chain_tbl: pointer to ratepwr chain table + * @num_rate: number of rate in table + * @pream_type: preamble type + * @ops: ops + */ +struct ratepwr_chainmsk_params { + uint32_t *ratepwr_chain_tbl; + uint16_t num_rate; + uint8_t pream_type; + uint8_t ops; +}; + +struct macaddr_params { + uint8_t *macaddr; +}; + +/** + * struct acparams_params - acparams config structure + * @ac: AC to configure + * @use_rts: Use rts for this AC + * @aggrsize_scaling: Aggregrate size scaling for the AC + * @min_kbps: min kbps req + */ +struct acparams_params { + uint8_t ac; + uint8_t use_rts; + uint8_t aggrsize_scaling; + uint32_t min_kbps; +}; + +/** + * struct vap_dscp_tid_map_params - DSCP tid map params + * @vdev_id: vdev id + * @dscp_to_tid_map: pointer to arry of tid to dscp map table + */ +struct vap_dscp_tid_map_params { + uint8_t vdev_id; + uint32_t *dscp_to_tid_map; +}; + +/** + * struct proxy_ast_reserve_params - Proxy AST reserve params + * @macaddr: macaddr for proxy ast entry + */ +struct proxy_ast_reserve_params { + uint8_t *macaddr; +}; + +/** + * struct fips_params - FIPS params config + * @key: pointer to key + * @key_len: length of key + * @data: pointer data buf + * @data_len: length of data buf + * @mode: mode + * @op: operation + * @pdev_id: pdev_id for identifying the MAC + */ +struct fips_params { + uint8_t *key; + uint32_t key_len; + uint8_t *data; + uint32_t data_len; + uint32_t mode; + uint32_t op; + uint32_t pdev_id; +}; + +/** + * struct mcast_group_update_param - Mcast group table update to target + * @action: Addition/deletion + * @wildcard: iwldcard table entry? + * @mcast_ip_addr: mcast ip address to be updated + * @mcast_ip_addr_bytes: mcast ip addr bytes + * @ucast_mac_addr: ucast peer mac subscribed to mcast ip + * @filter_mode: filter mode + * @nsrcs: number of entries in source list + * @srcs: source mac accpted + * @mask: mask + * @vap_id: vdev id + * @is_action_delete: is delete + * @is_filter_mode_snoop: + * @is_mcast_addr_len: + */ +struct mcast_group_update_params { + int action; + int wildcard; + uint8_t *mcast_ip_addr; + int mcast_ip_addr_bytes; + uint8_t *ucast_mac_addr; + uint8_t filter_mode; + uint8_t nsrcs; + uint8_t *srcs; + uint8_t *mask; + uint8_t vap_id; + bool is_action_delete; + bool is_filter_mode_snoop; + bool is_mcast_addr_len; +}; + +/** + * struct periodic_chan_stats_param - periodic channel stats req param + * @stats_period: stats period update + * @enable: enable/disable + */ +struct periodic_chan_stats_params { + uint32_t stats_period; + bool enable; + uint32_t pdev_id; +}; + +/** + * enum wmi_host_packet_power_rate_flags: packer power rate flags + * @WMI_HOST_FLAG_RTSENA: RTS enabled + * @WMI_HOST_FLAG_CTSENA: CTS enabled + * @WMI_HOST_FLAG_STBC: STBC is set + * @WMI_HOST_FLAG_LDPC: LDPC is set + * @WMI_HOST_FLAG_TXBF: Tx Bf enabled + * @WMI_HOST_FLAG_MU2: MU2 data + * @WMI_HOST_FLAG_MU3: MU3 data + * @WMI_HOST_FLAG_SERIES1: Rate series 1 + * @WMI_HOST_FLAG_SGI: Short gaurd interval + */ +enum wmi_host_packet_power_rate_flags { + WMI_HOST_FLAG_RTSENA = 0x0001, + WMI_HOST_FLAG_CTSENA = 0x0002, + WMI_HOST_FLAG_STBC = 0x0004, + WMI_HOST_FLAG_LDPC = 0x0008, + WMI_HOST_FLAG_TXBF = 0x0010, + WMI_HOST_FLAG_MU2 = 0x0020, + WMI_HOST_FLAG_MU3 = 0x0040, + WMI_HOST_FLAG_SERIES1 = 0x0080, + WMI_HOST_FLAG_SGI = 0x0100, +}; + +/** + * enum wmi_host_su_mu_ofdma_flags: packer power su mu ofdma flags + * @WMI_HOST_FLAG_SU: SU Data + * @WMI_HOST_FLAG_DL_MU_MIMO_AC: DL AC MU data + * @WMI_HOST_FLAG_DL_MU_MIMO_AX: DL AX MU data + * @WMI_HOST_FLAG_DL_OFDMA: DL OFDMA data + * @WMI_HOST_FLAG_UL_OFDMA: UL OFDMA data + * @WMI_HOST_FLAG_UL_MU_MIMO: UL MU data + */ +enum wmi_host_su_mu_ofdma_flags { + WMI_HOST_FLAG_SU = 0x0001, + WMI_HOST_FLAG_DL_MU_MIMO_AC = 0x0002, + WMI_HOST_FLAG_DL_MU_MIMO_AX = 0x0003, + WMI_HOST_FLAG_DL_OFDMA = 0x0004, + WMI_HOST_FLAG_UL_OFDMA = 0x0005, + WMI_HOST_FLAG_UL_MU_MIMO = 0x0006, +}; + +/** + * enum wmi_host_preamble_type: preamble type + * @WMI_HOST_PREAMBLE_OFDM: ofdm rate + * @WMI_HOST_PREAMBLE_CCK: cck rate + * @WMI_HOST_PREAMBLE_HT: ht rate + * @WMI_HOST_PREAMBLE_VHT: vht rate + * @WMI_HOST_PREAMBLE_HE: 11ax he rate + */ +enum wmi_host_preamble_type { + WMI_HOST_PREAMBLE_OFDM = 0, + WMI_HOST_PREAMBLE_CCK = 1, + WMI_HOST_PREAMBLE_HT = 2, + WMI_HOST_PREAMBLE_VHT = 3, + WMI_HOST_PREAMBLE_HE = 4, +}; + +/** + * struct packet_power_info_params - packet power info params + * @chainmask: chain mask + * @chan_width: channel bandwidth + * @rate_flags: rate flags + * @su_mu_ofdma: su/mu/ofdma flags + * @nss: number of spatial streams + * @preamble: preamble + * @hw_rate: + */ +struct packet_power_info_params { + uint16_t chainmask; + uint16_t chan_width; + uint16_t rate_flags; + uint16_t su_mu_ofdma; + uint16_t nss; + uint16_t preamble; + uint16_t hw_rate; + uint32_t pdev_id; +}; + +/** + * WMI_GPIO_CONFIG_CMDID + */ +enum { + WMI_HOST_GPIO_PULL_NONE, + WMI_HOST_GPIO_PULL_UP, + WMI_HOST_GPIO_PULL_DOWN, +}; + +/** + * WMI_GPIO_INTTYPE + */ +enum { + WMI_HOST_GPIO_INTTYPE_DISABLE, + WMI_HOST_GPIO_INTTYPE_RISING_EDGE, + WMI_HOST_GPIO_INTTYPE_FALLING_EDGE, + WMI_HOST_GPIO_INTTYPE_BOTH_EDGE, + WMI_HOST_GPIO_INTTYPE_LEVEL_LOW, + WMI_HOST_GPIO_INTTYPE_LEVEL_HIGH +}; + +/** + * struct wmi_host_gpio_input_event - GPIO input event structure + * @gpio_num: GPIO number which changed state + */ +typedef struct { + uint32_t gpio_num; /* GPIO number which changed state */ +} wmi_host_gpio_input_event; + +/** + * struct gpio_config_params - GPIO config params + * @gpio_num: GPIO number to config + * @input: input/output + * @pull_type: pull type + * @intr_mode: int mode + */ +struct gpio_config_params { + uint32_t gpio_num; + uint32_t input; + uint32_t pull_type; + uint32_t intr_mode; +}; + +/** + * struct gpio_output_params - GPIO output params + * @gpio_num: GPIO number to configure + * @set: set/reset + */ +struct gpio_output_params { + uint32_t gpio_num; + uint32_t set; +}; + +/* flags bit 0: to configure wlan priority bitmap */ +#define WMI_HOST_BTCOEX_PARAM_FLAGS_WLAN_PRIORITY_BITMAP_BIT (1<<0) +/* flags bit 1: to configure both period and wlan duration */ +#define WMI_HOST_BTCOEX_PARAM_FLAGS_DUTY_CYCLE_BIT (1<<1) +struct btcoex_cfg_params { + /* WLAN priority bitmask for different frame types */ + uint32_t btcoex_wlan_priority_bitmap; + /* This command is used to configure different btcoex params + * in different situations.The host sets the appropriate bit(s) + * in btcoex_param_flags to indicate which configuration parameters + * are valid within a particular BT coex config message, so that one + * BT configuration parameter can be configured without affecting + * other BT configuration parameters.E.g. if the host wants to + * configure only btcoex_wlan_priority_bitmap it sets only + * WMI_BTCOEX_PARAM_FLAGS_WLAN_PRIORITY_BITMAP_BIT in + * btcoex_param_flags so that firmware will not overwrite + * other params with default value passed in the command. + * Host can also set multiple bits in btcoex_param_flags + * to configure more than one param in single message. + */ + uint32_t btcoex_param_flags; + /* period denotes the total time in milliseconds which WLAN and BT share + * configured percentage for transmission and reception. + */ + uint32_t period; + /* wlan duration is the time in milliseconds given for wlan + * in above period. + */ + uint32_t wlan_duration; +}; + +#define WMI_HOST_COEX_CONFIG_BUF_MAX_LEN 32 /* 128 bytes */ +/** + * coex_ver_cfg_t + * @coex_version: Version for 4 wire coex + * @length: Length of payload buffer based on version + * @config_buf: Payload Buffer + */ +typedef struct { + /* VERSION_4 (4 wire coex) */ + uint32_t coex_version; + + /* No. of uint32_t elements in payload buffer. Will depend on the coex + * version + */ + uint32_t length; + + /* Payload buffer */ + uint32_t config_buf[WMI_HOST_COEX_CONFIG_BUF_MAX_LEN]; +} coex_ver_cfg_t; + +#define WMI_HOST_RTT_REPORT_CFR 0 +#define WMI_HOST_RTT_NO_REPORT_CFR 1 +#define WMI_HOST_RTT_AGGREGATE_REPORT_NON_CFR 2 +/** + * struct rtt_meas_req_test_params + * @peer: peer mac address + * @req_frame_type: RTT request frame type + * @req_bw: requested bandwidth + * @req_preamble: Preamble + * @req_num_req: num of requests + * @req_report_type: report type + * @num_measurements: number of measurements + * @asap_mode: priority + * @lci_requested: LCI requested + * @loc_civ_requested: + * @channel_param: channel param + * @req_id: requested id + */ +struct rtt_meas_req_test_params { + uint8_t peer[IEEE80211_ADDR_LEN]; + int req_frame_type; + int req_bw; + int req_preamble; + int req_num_req; + int req_report_type; + uint32_t num_measurements; + uint32_t asap_mode; + uint32_t lci_requested; + uint32_t loc_civ_requested; + struct channel_param channel; + uint8_t req_id; +}; + +/** + * struct rtt_meas_req_params - RTT measurement request params + * @req_id: Request id + * @vdev_id: vdev id + * @sta_mac_addr: pointer to station mac address + * @spoof_mac_addr: pointer to spoof mac address + * @is_mode_na: 11NA + * @is_mode_ac: AC + * @is_bw_20: 20 + * @is_bw_40: 40 + * @is_bw_80: 80 + * @num_probe_rqst: number of probe request + * @channel_param: channel param + */ +struct rtt_meas_req_params { + uint8_t req_id; + uint8_t vdev_id; + uint8_t *sta_mac_addr; + uint8_t *spoof_mac_addr; + bool is_mode_na; + bool is_mode_ac; + bool is_bw_20; + bool is_bw_40; + bool is_bw_80; + uint32_t num_probe_rqst; + struct channel_param channel; +}; + +/** + * struct lci_set_params - LCI params + * @lci_data: pointer to LCI data + * @latitude_unc: latitude + * @latitude_0_12: bits 0 to 1 of latitude + * @latitude_2_33: bits 2 to 33 of latitude + * @longitude_unc: longitude + * @longitude_0_1: bits 0 to 1 of longitude + * @longitude_2_33: bits 2 to 33 of longitude + * @altitude_type: altitude type + * @altitude_unc_0_3: altitude bits 0 - 3 + * @altitude_unc_4_5: altitude bits 4 - 5 + * @altitude: altitude + * @datum: dataum + * @reg_loc_agmt: + * @reg_loc_dse: + * @dep_sta: + * @version: version + */ +struct lci_set_params { + void *lci_data; + uint8_t latitude_unc:6, + latitude_0_1:2; + uint32_t latitude_2_33; + uint8_t longitude_unc:6, + longitude_0_1:2; + uint32_t longitude_2_33; + uint8_t altitude_type:4, + altitude_unc_0_3:4; + uint32_t altitude_unc_4_5:2, + altitude:30; + uint8_t datum:3, + reg_loc_agmt:1, + reg_loc_dse:1, + dep_sta:1, + version:2; + uint8_t *colocated_bss; + int msg_len; +}; + +/** + * struct lcr_set_params - LCR params + * @lcr_data: pointer to lcr data + */ +struct lcr_set_params { + void *lcr_data; + int msg_len; +}; + +/** + * struct rtt_keepalive_req_params - RTT keepalive params + * @macaddr: pointer to macaddress + * @req_id: Request id + * @vdev_id: vdev id + * @stop: start/stop + */ +struct rtt_keepalive_req_params { + uint8_t *macaddr; + uint8_t req_id; + uint8_t vdev_id; + bool stop; +}; + +/** + * struct rx_reorder_queue_setup_params - Reorder queue setup params + * @peer_mac_addr: Peer mac address + * @tid: TID + * @vdev_id: vdev id + * @hw_qdesc_paddr_lo: lower 32 bits of queue desc adddress + * @hw_qdesc_paddr_hi: upper 32 bits of queue desc adddress + * @queue_no: 16-bit number assigned by host for queue + */ +struct rx_reorder_queue_setup_params { + uint8_t *peer_macaddr; + uint16_t tid; + uint16_t vdev_id; + uint32_t hw_qdesc_paddr_lo; + uint32_t hw_qdesc_paddr_hi; + uint16_t queue_no; +}; + +/** + * struct rx_reorder_queue_remove_params - Reorder queue setup params + * @peer_mac_addr: Peer mac address + * @vdev_id: vdev id + * @peer_tid_bitmap: peer tid bitmap + */ +struct rx_reorder_queue_remove_params { + uint8_t *peer_macaddr; + uint16_t vdev_id; + uint32_t peer_tid_bitmap; +}; + +/** + * struct wmi_host_stats_event - Stats event params + * @stats_id: stats id of type wmi_host_stats_event + * @num_pdev_stats: number of pdev stats event structures 0 or 1 + * @num_pdev_ext_stats: number of pdev ext stats event structures + * @num_vdev_stats: number of vdev stats + * @num_peer_stats: number of peer stats event structures 0 or max peers + * @num_bcnflt_stats: number of beacon filter stats + * @num_chan_stats: number of channel stats + * @pdev_id: device id for the radio + * @num_bcn_stats: number of beacon stats + * @num_rssi_stats: number of rssi stats + * @num_peer_adv_stats: number of peer adv stats + * @last_event: specify if the current event is the last event + */ +typedef struct { + wmi_host_stats_id stats_id; + uint32_t num_pdev_stats; + uint32_t num_pdev_ext_stats; + uint32_t num_vdev_stats; + uint32_t num_peer_stats; + uint32_t num_bcnflt_stats; + uint32_t num_chan_stats; + uint32_t pdev_id; + uint32_t num_bcn_stats; + uint32_t num_rssi_stats; + uint32_t num_peer_adv_stats; + uint32_t last_event; +} wmi_host_stats_event; + +/** + * struct wmi_host_peer_extd_stats - peer extd stats event structure + * @peer_macaddr: Peer mac address + * @inactive_time: inactive time in secs + * @peer_chain_rssi: peer rssi + * @rx_duration: RX duration + * @peer_tx_bytes: TX bytes + * @last_tx_rate_code: Tx rate code of last frame + * @last_tx_power: Tx power latest + * @atf_tokens_allocated: atf tokens allocated + * @atf_tokens_utilized: atf tokens utilized + * @reserved: for future use + */ +typedef struct { + wmi_host_mac_addr peer_macaddr; + uint32_t inactive_time; + uint32_t peer_chain_rssi; + uint32_t rx_duration; + uint32_t peer_tx_bytes; + uint32_t last_tx_rate_code; + uint32_t last_tx_power; + uint32_t atf_tokens_allocated; + uint32_t atf_tokens_utilized; + uint32_t reserved[4]; +} wmi_host_peer_extd_stats; + +/** + * struct wmi_host_peer_adv_stats - peer adv stats event structure + * @peer_macaddr: mac address + * @fcs_count: fcs count + * @rx_bytes: rx bytes + * @rx_count: rx count + */ +struct wmi_host_peer_adv_stats { + uint8_t peer_macaddr[WLAN_MACADDR_LEN]; + uint32_t fcs_count; + uint64_t rx_bytes; + uint32_t rx_count; +}; + +/** + * struct wmi_host_pdev_ext_stats - peer ext stats structure + * @rx_rssi_comb: RX rssi + * @rx_rssi_chain0: RX rssi chain 0 + * @rx_rssi_chain1: RX rssi chain 1 + * @rx_rssi_chain2: RX rssi chain 2 + * @rx_rssi_chain3: RX rssi chain 3 + * @rx_mcs: RX MCS array + * @tx_mcs: TX MCS array + * @ack_rssi: Ack rssi + */ +typedef struct { + uint32_t rx_rssi_comb; + uint32_t rx_rssi_chain0; + uint32_t rx_rssi_chain1; + uint32_t rx_rssi_chain2; + uint32_t rx_rssi_chain3; + uint32_t rx_mcs[10]; + uint32_t tx_mcs[10]; + uint32_t ack_rssi; +} wmi_host_pdev_ext_stats; + +/** + * struct wmi_host_dbg_tx_stats - Debug stats + * @comp_queued: Num HTT cookies queued to dispatch list + * @comp_delivered: Num HTT cookies dispatched + * @msdu_enqued: Num MSDU queued to WAL + * @mpdu_enqued: Num MPDU queue to WAL + * @wmm_drop: Num MSDUs dropped by WMM limit + * @local_enqued: Num Local frames queued + * @local_freed: Num Local frames done + * @hw_queued: Num queued to HW + * @hw_reaped: Num PPDU reaped from HW + * @underrun: Num underruns + * @hw_paused: HW Paused. + * @tx_abort: Num PPDUs cleaned up in TX abort + * @mpdus_requed: Num MPDUs requed by SW + * @tx_ko: excessive retries + * @tx_xretry: + * @data_rc: data hw rate code + * @self_triggers: Scheduler self triggers + * @sw_retry_failure: frames dropped due to excessive sw retries + * @illgl_rate_phy_err: illegal rate phy errors + * @pdev_cont_xretry: wal pdev continuous xretry + * @pdev_tx_timeout: wal pdev continuous xretry + * @pdev_resets: wal pdev resets + * @stateless_tid_alloc_failure: frames dropped due to non-availability of + * stateless TIDs + * @phy_underrun: PhY/BB underrun + * @txop_ovf: MPDU is more than txop limit + * @seq_posted: Number of Sequences posted + * @seq_failed_queueing: Number of Sequences failed queueing + * @seq_completed: Number of Sequences completed + * @seq_restarted: Number of Sequences restarted + * @mu_seq_posted: Number of MU Sequences posted + * @mpdus_sw_flush: Num MPDUs flushed by SW, HWPAUSED, SW TXABORT + * (Reset,channel change) + * @mpdus_hw_filter: Num MPDUs filtered by HW, all filter condition + * (TTL expired) + * @mpdus_truncated: Num MPDUs truncated by PDG (TXOP, TBTT, + * PPDU_duration based on rate, dyn_bw) + * @mpdus_ack_failed: Num MPDUs that was tried but didn't receive ACK or BA + * @mpdus_expired: Num MPDUs that was dropped du to expiry. + * @mc_dropr: Num mc drops + */ +typedef struct { + int32_t comp_queued; + int32_t comp_delivered; + int32_t msdu_enqued; + int32_t mpdu_enqued; + int32_t wmm_drop; + int32_t local_enqued; + int32_t local_freed; + int32_t hw_queued; + int32_t hw_reaped; + int32_t underrun; + uint32_t hw_paused; + int32_t tx_abort; + int32_t mpdus_requed; + uint32_t tx_ko; + uint32_t tx_xretry; + uint32_t data_rc; + uint32_t self_triggers; + uint32_t sw_retry_failure; + uint32_t illgl_rate_phy_err; + uint32_t pdev_cont_xretry; + uint32_t pdev_tx_timeout; + uint32_t pdev_resets; + uint32_t stateless_tid_alloc_failure; + uint32_t phy_underrun; + uint32_t txop_ovf; + uint32_t seq_posted; + uint32_t seq_failed_queueing; + uint32_t seq_completed; + uint32_t seq_restarted; + uint32_t mu_seq_posted; + int32_t mpdus_sw_flush; + int32_t mpdus_hw_filter; + int32_t mpdus_truncated; + int32_t mpdus_ack_failed; + int32_t mpdus_expired; + uint32_t mc_drop; +} wmi_host_dbg_tx_stats; + +/** + * struct wmi_host_dbg_rx_stats - RX Debug stats + * @mid_ppdu_route_change: Cnts any change in ring routing mid-ppdu + * @status_rcvd: Total number of statuses processed + * @r0_frags: Extra frags on rings 0 + * @r1_frags: Extra frags on rings 1 + * @r2_frags: Extra frags on rings 2 + * @r3_frags: Extra frags on rings 3 + * @htt_msdus: MSDUs delivered to HTT + * @htt_mpdus: MPDUs delivered to HTT + * @loc_msdus: MSDUs delivered to local stack + * @loc_mpdus: MPDUS delivered to local stack + * @oversize_amsdu: AMSDUs that have more MSDUs than the status ring size + * @phy_errs: Number of PHY errors + * @phy_err_drop: Number of PHY errors drops + * @mpdu_errs: Number of mpdu errors - FCS, MIC, ENC etc. + * @pdev_rx_timeout: Number of rx inactivity timeouts + * @rx_ovfl_errs: Number of rx overflow errors. + */ +typedef struct { + int32_t mid_ppdu_route_change; + int32_t status_rcvd; + int32_t r0_frags; + int32_t r1_frags; + int32_t r2_frags; + int32_t r3_frags; + int32_t htt_msdus; + int32_t htt_mpdus; + int32_t loc_msdus; + int32_t loc_mpdus; + int32_t oversize_amsdu; + int32_t phy_errs; + int32_t phy_err_drop; + int32_t mpdu_errs; + uint32_t pdev_rx_timeout; + int32_t rx_ovfl_errs; +} wmi_host_dbg_rx_stats; + +/** struct wmi_host_dbg_mem_stats - memory stats + * @iram_free_size: IRAM free size on target + * @dram_free_size: DRAM free size on target + * @sram_free_size: SRAM free size on target + */ +typedef struct { + uint32_t iram_free_size; + uint32_t dram_free_size; + /* Only Non-TLV */ + uint32_t sram_free_size; +} wmi_host_dbg_mem_stats; + +typedef struct { + /* Only TLV */ + int32_t dummy;/* REMOVE THIS ONCE REAL PEER STAT COUNTERS ARE ADDED */ +} wmi_host_dbg_peer_stats; + +/** + * struct wmi_host_dbg_stats - host debug stats + * @tx: TX stats of type wmi_host_dbg_tx_stats + * @rx: RX stats of type wmi_host_dbg_rx_stats + * @mem: Memory stats of type wmi_host_dbg_mem_stats + * @peer: peer stats of type wmi_host_dbg_peer_stats + */ +typedef struct { + wmi_host_dbg_tx_stats tx; + wmi_host_dbg_rx_stats rx; + wmi_host_dbg_mem_stats mem; + wmi_host_dbg_peer_stats peer; +} wmi_host_dbg_stats; + +/** + * struct wmi_host_pdev_stats - PDEV stats + * @chan_nf: Channel noise floor + * @tx_frame_count: TX frame count + * @rx_frame_count: RX frame count + * @rx_clear_count: rx clear count + * @cycle_count: cycle count + * @phy_err_count: Phy error count + * @chan_tx_pwr: Channel Tx Power + * @pdev_stats: WAL dbg stats + * @ackRcvBad: + * @rtsBad: + * @rtsGood: + * @fcsBad: + * @noBeacons: + * @mib_int_count: + */ +typedef struct { + int32_t chan_nf; + uint32_t tx_frame_count; + uint32_t rx_frame_count; + uint32_t rx_clear_count; + uint32_t cycle_count; + uint32_t phy_err_count; + uint32_t chan_tx_pwr; + wmi_host_dbg_stats pdev_stats; + uint32_t ackRcvBad; + uint32_t rtsBad; + uint32_t rtsGood; + uint32_t fcsBad; + uint32_t noBeacons; + uint32_t mib_int_count; +} wmi_host_pdev_stats; + + +/** + * struct wmi_unit_test_event - Structure corresponding to WMI Unit test event + * @vdev_id: VDEV ID + * @module_id: MODULE ID + * @diag_token: Diag Token (the number that was generated in the unit-test cmd) + * @flag: flag has 2 bits 0x1 indicates status, and 0x2 indicates done-bit + * @payload_len: payload_len (blindly copied from payload_len field in WMI) + * @buffer_len: actual number of data bytes in the variable data size TLV + * buffer_len is likely to be the nearest multiple of 4 (from + * payload_len). both buffer_len and payload_len need to be + * passed to wifitool so that the driver can be agnostic + * regarding these differences. + * @buffer: data buffer + */ +typedef struct { + uint32_t vdev_id; + uint32_t module_id; + uint32_t diag_token; + uint32_t flag; + uint32_t payload_len; + uint32_t buffer_len; + uint8_t buffer[1]; +} wmi_unit_test_event; + + +/** + * struct wmi_host_snr_info - WMI host Signal to noise ration info + * @bcn_snr: beacon SNR + * @dat_snr: Data frames SNR + */ +typedef struct { + int32_t bcn_snr; + int32_t dat_snr; +} wmi_host_snr_info; + +#define WMI_HOST_MAX_TX_RATE_VALUES 10 /*Max Tx Rates */ +#define WMI_HOST_MAX_RSSI_VALUES 10 /*Max Rssi values */ + +/* The WLAN_MAX_AC macro cannot be changed without breaking + * * WMI compatibility. + * * The maximum value of access category + * */ +#define WMI_HOST_WLAN_MAX_AC 4 + +/* The WMI_HOST_MAX_CHAINS macro cannot be changed without breaking WMI + * compatibility. + * The maximum value of number of chains + */ +#define WMI_HOST_MAX_CHAINS 8 + +/** + * struct wmi_host_vdev_stats - vdev stats structure + * @vdev_id: unique id identifying the VDEV, generated by the caller + * Rest all Only TLV + * @vdev_snr: wmi_host_snr_info + * @tx_frm_cnt: Total number of packets(per AC) that were successfully + * transmitted (with and without retries, + * including multi-cast, broadcast) + * @rx_frm_cnt: Total number of packets that were successfully received + * (after appropriate filter rules including multi-cast, broadcast) + * @multiple_retry_cnt: The number of MSDU packets and MMPDU frames per AC + * that the 802.11 station successfully transmitted after + * more than one retransmission attempt + * @fail_cnt: Total number packets(per AC) failed to transmit + * @rts_fail_cnt: Total number of RTS/CTS sequence failures for transmission + * of a packet + * @rts_succ_cnt: Total number of RTS/CTS sequence success for transmission + * of a packet + * @rx_err_cnt: The receive error count. HAL will provide the + * RxP FCS error global + * @rx_discard_cnt: The sum of the receive error count and + * dropped-receive-buffer error count (FCS error) + * @ack_fail_cnt: Total number packets failed transmit because of no + * ACK from the remote entity + * @tx_rate_history:History of last ten transmit rate, in units of 500 kbit/sec + * @bcn_rssi_history: History of last ten Beacon rssi of the connected Bss + */ +typedef struct { + uint32_t vdev_id; + /* Rest all Only TLV */ + wmi_host_snr_info vdev_snr; + uint32_t tx_frm_cnt[WMI_HOST_WLAN_MAX_AC]; + uint32_t rx_frm_cnt; + uint32_t multiple_retry_cnt[WMI_HOST_WLAN_MAX_AC]; + uint32_t fail_cnt[WMI_HOST_WLAN_MAX_AC]; + uint32_t rts_fail_cnt; + uint32_t rts_succ_cnt; + uint32_t rx_err_cnt; + uint32_t rx_discard_cnt; + uint32_t ack_fail_cnt; + uint32_t tx_rate_history[WMI_HOST_MAX_TX_RATE_VALUES]; + uint32_t bcn_rssi_history[WMI_HOST_MAX_RSSI_VALUES]; +} wmi_host_vdev_stats; + +/** + * struct wmi_host_vdev_stats - vdev stats structure + * @vdev_id: unique id identifying the VDEV, generated by the caller + * @tx_bcn_succ_cnt: Total number of beacon frame transmitted successfully + * @tx_bcn_outage_cnt: Total number of failed beacons + */ +typedef struct { + uint32_t vdev_id; + uint32_t tx_bcn_succ_cnt; + uint32_t tx_bcn_outage_cnt; +} wmi_host_bcn_stats; + +/** + * struct wmi_host_vdev_extd_stats - VDEV extended stats + * @vdev_id: unique id identifying the VDEV, generated by the caller + * @ppdu_aggr_cnt: No of Aggrs Queued to HW + * @ppdu_noack: No of PPDU's not Acked includes both aggr and nonaggr's + * @mpdu_queued: No of MPDU/Subframes's queued to HW in Aggregates + * @ppdu_nonaggr_cnt: No of NonAggr/MPDU/Subframes's queued to HW + * in Legacy NonAggregates + * @mpdu_sw_requed: No of MPDU/Subframes's SW requeued includes + * both Aggr and NonAggr + * @mpdu_suc_retry: No of MPDU/Subframes's transmitted Successfully + * after Single/mul HW retry + * @mpdu_suc_multitry: No of MPDU/Subframes's transmitted Success + * after Multiple HW retry + * @mpdu_fail_retry: No of MPDU/Subframes's failed transmission + * after Multiple HW retry + * @reserved[13]: for future extensions set to 0x0 + */ +typedef struct { + uint32_t vdev_id; + uint32_t ppdu_aggr_cnt; + uint32_t ppdu_noack; + uint32_t mpdu_queued; + uint32_t ppdu_nonaggr_cnt; + uint32_t mpdu_sw_requed; + uint32_t mpdu_suc_retry; + uint32_t mpdu_suc_multitry; + uint32_t mpdu_fail_retry; + uint32_t reserved[13]; +} wmi_host_vdev_extd_stats; + +/** + * struct wmi_host_vdev_nac_rssi_event - VDEV nac rssi stats + * @vdev_id: unique id identifying the VDEV, generated by the caller + * @last_rssi: rssi + * @avg_rssi: averge rssi + * @rssi_seq_num: rssi sequence number + */ +struct wmi_host_vdev_nac_rssi_event { + uint32_t vdev_id; + uint32_t last_rssi; + uint32_t avg_rssi; + uint32_t rssi_seq_num; +}; + + +/** + * struct wmi_host_per_chain_rssi_stats - VDEV nac rssi stats + * @vdev_id: unique id identifying the VDEV, generated by the caller + * @rssi_avg_beacon: per chain avg rssi for beacon + * @rssi_avg_data: per chain avg rssi for data + * @peer_macaddr: peer macaddr + */ +struct wmi_host_per_chain_rssi_stats { + uint32_t vdev_id; + int32_t rssi_avg_beacon[WMI_HOST_MAX_CHAINS]; + int32_t rssi_avg_data[WMI_HOST_MAX_CHAINS]; + wmi_host_mac_addr peer_macaddr; +}; + +/** + * struct wmi_host_peer_stats - peer stats + * @peer_macaddr: peer MAC address + * @peer_rssi: rssi + * @peer_rssi_seq_num: rssi sequence number + * @peer_tx_rate: last tx data rate used for peer + * @peer_rx_rate: last rx data rate used for peer + * @currentper: Current PER + * @retries: Retries happened during transmission + * @txratecount: Maximum Aggregation Size + * @max4msframelen: Max4msframelen of tx rates used + * @totalsubframes: Total no of subframes + * @txbytes: No of bytes transmitted to the client + * @nobuffs[4]: Packet Loss due to buffer overflows + * @excretries[4]: Packet Loss due to excessive retries + * @peer_rssi_changed: how many times peer's RSSI changed by a + * non-negligible amount + */ +typedef struct { + wmi_host_mac_addr peer_macaddr; + uint32_t peer_rssi; + uint32_t peer_rssi_seq_num; + uint32_t peer_tx_rate; + uint32_t peer_rx_rate; + uint32_t currentper; + uint32_t retries; + uint32_t txratecount; + uint32_t max4msframelen; + uint32_t totalsubframes; + uint32_t txbytes; + uint32_t nobuffs[4]; + uint32_t excretries[4]; + uint32_t peer_rssi_changed; +} wmi_host_peer_stats; + +typedef struct { + uint32_t dummy; +} wmi_host_bcnflt_stats; + +/** + * struct wmi_host_chan_stats - WMI chan stats + * @chan_mhz: Primary channel freq of the channel for which stats are sent + * @sampling_period_us: Time spent on the channel + * @rx_clear_count: Aggregate duration over a sampling period for + * which channel activity was observed + * @tx_duration_us: Accumalation of the TX PPDU duration over a sampling period + * @rx_duration_us: Accumalation of the RX PPDU duration over a sampling period + */ +typedef struct { + uint32_t chan_mhz; + uint32_t sampling_period_us; + uint32_t rx_clear_count; + uint32_t tx_duration_us; + uint32_t rx_duration_us; +} wmi_host_chan_stats; + +#define WMI_EVENT_ID_INVALID 0 +/** + * Host based ENUM IDs for events to abstract target enums for event_id + */ +typedef enum { + wmi_service_ready_event_id = 0, + wmi_ready_event_id, + wmi_dbg_msg_event_id, + wmi_scan_event_id, + wmi_echo_event_id, + wmi_update_stats_event_id, + wmi_inst_rssi_stats_event_id, + wmi_vdev_start_resp_event_id, + wmi_vdev_standby_req_event_id, + wmi_vdev_resume_req_event_id, + wmi_vdev_stopped_event_id, + wmi_peer_sta_kickout_event_id, + wmi_host_swba_event_id, + wmi_tbttoffset_update_event_id, + wmi_mgmt_rx_event_id, + wmi_chan_info_event_id, + wmi_phyerr_event_id, + wmi_roam_event_id, + wmi_profile_match, + wmi_debug_print_event_id, + wmi_pdev_qvit_event_id, + wmi_wlan_profile_data_event_id, + wmi_rtt_meas_report_event_id, + wmi_tsf_meas_report_event_id, + wmi_rtt_error_report_event_id, + wmi_rtt_keepalive_event_id, + wmi_oem_cap_event_id, + wmi_oem_meas_report_event_id, + wmi_oem_report_event_id, + wmi_nan_event_id, + wmi_wow_wakeup_host_event_id, + wmi_gtk_offload_status_event_id, + wmi_gtk_rekey_fail_event_id, + wmi_dcs_interference_event_id, + wmi_pdev_tpc_config_event_id, + wmi_csa_handling_event_id, + wmi_gpio_input_event_id, + wmi_peer_ratecode_list_event_id, + wmi_generic_buffer_event_id, + wmi_mcast_buf_release_event_id, + wmi_mcast_list_ageout_event_id, + wmi_vdev_get_keepalive_event_id, + wmi_wds_peer_event_id, + wmi_peer_sta_ps_statechg_event_id, + wmi_pdev_fips_event_id, + wmi_tt_stats_event_id, + wmi_pdev_channel_hopping_event_id, + wmi_pdev_ani_cck_level_event_id, + wmi_pdev_ani_ofdm_level_event_id, + wmi_pdev_reserve_ast_entry_event_id, + wmi_pdev_nfcal_power_event_id, + wmi_pdev_tpc_event_id, + wmi_pdev_get_ast_info_event_id, + wmi_pdev_temperature_event_id, + wmi_pdev_nfcal_power_all_channels_event_id, + wmi_pdev_bss_chan_info_event_id, + wmi_mu_report_event_id, + wmi_pdev_utf_event_id, + wmi_pdev_dump_event_id, + wmi_tx_pause_event_id, + wmi_dfs_radar_event_id, + wmi_pdev_l1ss_track_event_id, + wmi_service_ready_ext_event_id, + wmi_vdev_install_key_complete_event_id, + wmi_vdev_mcc_bcn_intvl_change_req_event_id, + wmi_vdev_tsf_report_event_id, + wmi_peer_info_event_id, + wmi_peer_tx_fail_cnt_thr_event_id, + wmi_peer_estimated_linkspeed_event_id, + wmi_peer_state_event_id, + wmi_offload_bcn_tx_status_event_id, + wmi_offload_prob_resp_tx_status_event_id, + wmi_mgmt_tx_completion_event_id, + wmi_tx_delba_complete_event_id, + wmi_tx_addba_complete_event_id, + wmi_ba_rsp_ssn_event_id, + wmi_aggr_state_trig_event_id, + wmi_roam_synch_event_id, + wmi_roam_synch_frame_event_id, + wmi_p2p_disc_event_id, + wmi_p2p_noa_event_id, + wmi_p2p_lo_stop_event_id, + wmi_vdev_add_macaddr_rx_filter_event_id, + wmi_pdev_resume_event_id, + wmi_d0_wow_disable_ack_event_id, + wmi_wow_initial_wakeup_event_id, + wmi_stats_ext_event_id, + wmi_iface_link_stats_event_id, + wmi_peer_link_stats_event_id, + wmi_radio_link_stats_link, + wmi_update_fw_mem_dump_event_id, + wmi_diag_event_id_log_supported_event_id, + wmi_nlo_match_event_id, + wmi_nlo_scan_complete_event_id, + wmi_apfind_event_id, + wmi_passpoint_match_event_id, + wmi_chatter_pc_query_event_id, + wmi_pdev_ftm_intg_event_id, + wmi_wlan_freq_avoid_event_id, + wmi_thermal_mgmt_event_id, + wmi_diag_container_event_id, + wmi_host_auto_shutdown_event_id, + wmi_update_whal_mib_stats_event_id, + wmi_update_vdev_rate_stats_event_id, + wmi_diag_event_id, + wmi_unit_test_event_id, + wmi_ocb_set_sched_event_id, + wmi_dbg_mesg_flush_complete_event_id, + wmi_rssi_breach_event_id, + wmi_uploadh_event_id, + wmi_captureh_event_id, + wmi_rfkill_state_change_event_id, + wmi_tdls_peer_event_id, + wmi_batch_scan_enabled_event_id, + wmi_batch_scan_result_event_id, + wmi_lpi_result_event_id, + wmi_lpi_status_event_id, + wmi_lpi_handoff_event_id, + wmi_extscan_start_stop_event_id, + wmi_extscan_operation_event_id, + wmi_extscan_table_usage_event_id, + wmi_extscan_cached_results_event_id, + wmi_extscan_wlan_change_results_event_id, + wmi_extscan_hotlist_match_event_id, + wmi_extscan_capabilities_event_id, + wmi_extscan_hotlist_ssid_match_event_id, + wmi_mdns_stats_event_id, + wmi_sap_ofl_add_sta_event_id, + wmi_sap_ofl_del_sta_event_id, + wmi_ocb_set_config_resp_event_id, + wmi_ocb_get_tsf_timer_resp_event_id, + wmi_dcc_get_stats_resp_event_id, + wmi_dcc_update_ndl_resp_event_id, + wmi_dcc_stats_event_id, + wmi_soc_set_hw_mode_resp_event_id, + wmi_soc_hw_mode_transition_event_id, + wmi_soc_set_dual_mac_config_resp_event_id, + wmi_tx_data_traffic_ctrl_event_id, + wmi_peer_tx_mu_txmit_count_event_id, + wmi_peer_gid_userpos_list_event_id, + wmi_pdev_check_cal_version_event_id, + wmi_atf_peer_stats_event_id, + wmi_peer_delete_response_event_id, + wmi_pdev_csa_switch_count_status_event_id, + wmi_reg_chan_list_cc_event_id, + wmi_offchan_data_tx_completion_event, + wmi_dfs_cac_complete_id, + wmi_dfs_radar_detection_event_id, + wmi_ext_tbttoffset_update_event_id, + wmi_11d_new_country_event_id, + wmi_get_arp_stats_req_id, + wmi_service_available_event_id, + wmi_update_rcpi_event_id, + wmi_pdev_wds_entry_list_event_id, + wmi_ndp_initiator_rsp_event_id, + wmi_ndp_indication_event_id, + wmi_ndp_confirm_event_id, + wmi_ndp_responder_rsp_event_id, + wmi_ndp_end_indication_event_id, + wmi_ndp_end_rsp_event_id, + wmi_ndl_schedule_update_event_id, + wmi_oem_response_event_id, + wmi_peer_stats_info_event_id, + wmi_pdev_chip_power_stats_event_id, + wmi_ap_ps_egap_info_event_id, + wmi_peer_assoc_conf_event_id, + wmi_vdev_delete_resp_event_id, + wmi_apf_capability_info_event_id, + wmi_vdev_encrypt_decrypt_data_rsp_event_id, + wmi_report_rx_aggr_failure_event_id, + wmi_pdev_chip_pwr_save_failure_detect_event_id, + wmi_peer_antdiv_info_event_id, + wmi_pdev_set_hw_mode_rsp_event_id, + wmi_pdev_hw_mode_transition_event_id, + wmi_pdev_set_mac_config_resp_event_id, + wmi_coex_bt_activity_event_id, + wmi_mgmt_tx_bundle_completion_event_id, + wmi_radio_tx_power_level_stats_event_id, + wmi_report_stats_event_id, + wmi_dma_buf_release_event_id, + wmi_sap_obss_detection_report_event_id, + wmi_obss_color_collision_report_event_id, + wmi_host_swfda_event_id, + wmi_sar_get_limits_event_id, + wmi_pdev_div_rssi_antid_event_id, +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) && defined(HOST_DFS_SPOOF_TEST) + wmi_host_dfs_status_check_event_id, +#endif + wmi_twt_enable_complete_event_id, + wmi_apf_get_vdev_work_memory_resp_event_id, + wmi_roam_scan_stats_event_id, + wmi_wlan_sar2_result_event_id, + wmi_vdev_bcn_reception_stats_event_id, + wmi_roam_blacklist_event_id, + wmi_pdev_cold_boot_cal_event_id, + wmi_vdev_get_mws_coex_state_eventid, + wmi_vdev_get_mws_coex_dpwb_state_eventid, + wmi_vdev_get_mws_coex_tdm_state_eventid, + wmi_vdev_get_mws_coex_idrx_state_eventid, + wmi_vdev_get_mws_coex_antenna_sharing_state_eventid, + wmi_coex_report_antenna_isolation_event_id, + wmi_events_max, +} wmi_conv_event_id; + +#define WMI_UNAVAILABLE_PARAM 0 +/** + * Host based ENUM IDs for PDEV params to abstract target enums + */ +typedef enum { + wmi_pdev_param_tx_chain_mask = 0, + wmi_pdev_param_rx_chain_mask, + wmi_pdev_param_txpower_limit2g, + wmi_pdev_param_txpower_limit5g, + wmi_pdev_param_txpower_scale, + wmi_pdev_param_beacon_gen_mode, + wmi_pdev_param_beacon_tx_mode, + wmi_pdev_param_resmgr_offchan_mode, + wmi_pdev_param_protection_mode, + wmi_pdev_param_dynamic_bw, + wmi_pdev_param_non_agg_sw_retry_th, + wmi_pdev_param_agg_sw_retry_th, + wmi_pdev_param_sta_kickout_th, + wmi_pdev_param_ac_aggrsize_scaling, + wmi_pdev_param_ltr_enable, + wmi_pdev_param_ltr_ac_latency_be, + wmi_pdev_param_ltr_ac_latency_bk, + wmi_pdev_param_ltr_ac_latency_vi, + wmi_pdev_param_ltr_ac_latency_vo, + wmi_pdev_param_ltr_ac_latency_timeout, + wmi_pdev_param_ltr_sleep_override, + wmi_pdev_param_ltr_rx_override, + wmi_pdev_param_ltr_tx_activity_timeout, + wmi_pdev_param_l1ss_enable, + wmi_pdev_param_dsleep_enable, + wmi_pdev_param_pcielp_txbuf_flush, + wmi_pdev_param_pcielp_txbuf_watermark, + wmi_pdev_param_pcielp_txbuf_tmo_en, + wmi_pdev_param_pcielp_txbuf_tmo_value, + wmi_pdev_param_pdev_stats_update_period, + wmi_pdev_param_vdev_stats_update_period, + wmi_pdev_param_peer_stats_update_period, + wmi_pdev_param_bcnflt_stats_update_period, + wmi_pdev_param_pmf_qos, + wmi_pdev_param_arp_ac_override, + wmi_pdev_param_dcs, + wmi_pdev_param_ani_enable, + wmi_pdev_param_ani_poll_period, + wmi_pdev_param_ani_listen_period, + wmi_pdev_param_ani_ofdm_level, + wmi_pdev_param_ani_cck_level, + wmi_pdev_param_dyntxchain, + wmi_pdev_param_proxy_sta, + wmi_pdev_param_idle_ps_config, + wmi_pdev_param_power_gating_sleep, + wmi_pdev_param_aggr_burst, + wmi_pdev_param_rx_decap_mode, + wmi_pdev_param_fast_channel_reset, + wmi_pdev_param_burst_dur, + wmi_pdev_param_burst_enable, + wmi_pdev_param_smart_antenna_default_antenna, + wmi_pdev_param_igmpmld_override, + wmi_pdev_param_igmpmld_tid, + wmi_pdev_param_antenna_gain, + wmi_pdev_param_rx_filter, + wmi_pdev_set_mcast_to_ucast_tid, + wmi_pdev_param_proxy_sta_mode, + wmi_pdev_param_set_mcast2ucast_mode, + wmi_pdev_param_set_mcast2ucast_buffer, + wmi_pdev_param_remove_mcast2ucast_buffer, + wmi_pdev_peer_sta_ps_statechg_enable, + wmi_pdev_param_igmpmld_ac_override, + wmi_pdev_param_block_interbss, + wmi_pdev_param_set_disable_reset_cmdid, + wmi_pdev_param_set_msdu_ttl_cmdid, + wmi_pdev_param_set_ppdu_duration_cmdid, + wmi_pdev_param_txbf_sound_period_cmdid, + wmi_pdev_param_set_promisc_mode_cmdid, + wmi_pdev_param_set_burst_mode_cmdid, + wmi_pdev_param_en_stats, + wmi_pdev_param_mu_group_policy, + wmi_pdev_param_noise_detection, + wmi_pdev_param_noise_threshold, + wmi_pdev_param_dpd_enable, + wmi_pdev_param_set_mcast_bcast_echo, + wmi_pdev_param_atf_strict_sch, + wmi_pdev_param_atf_sched_duration, + wmi_pdev_param_ant_plzn, + wmi_pdev_param_mgmt_retry_limit, + wmi_pdev_param_sensitivity_level, + wmi_pdev_param_signed_txpower_2g, + wmi_pdev_param_signed_txpower_5g, + wmi_pdev_param_enable_per_tid_amsdu, + wmi_pdev_param_enable_per_tid_ampdu, + wmi_pdev_param_cca_threshold, + wmi_pdev_param_rts_fixed_rate, + wmi_pdev_param_cal_period, + wmi_pdev_param_pdev_reset, + wmi_pdev_param_wapi_mbssid_offset, + wmi_pdev_param_arp_srcaddr, + wmi_pdev_param_arp_dstaddr, + wmi_pdev_param_txpower_decr_db, + wmi_pdev_param_rx_batchmode, + wmi_pdev_param_packet_aggr_delay, + wmi_pdev_param_atf_obss_noise_sch, + wmi_pdev_param_atf_obss_noise_scaling_factor, + wmi_pdev_param_cust_txpower_scale, + wmi_pdev_param_atf_dynamic_enable, + wmi_pdev_param_atf_ssid_group_policy, + wmi_pdev_param_rfkill_enable, + wmi_pdev_param_hw_rfkill_config, + wmi_pdev_param_low_power_rf_enable, + wmi_pdev_param_l1ss_track, + wmi_pdev_param_hyst_en, + wmi_pdev_param_power_collapse_enable, + wmi_pdev_param_led_sys_state, + wmi_pdev_param_led_enable, + wmi_pdev_param_audio_over_wlan_latency, + wmi_pdev_param_audio_over_wlan_enable, + wmi_pdev_param_whal_mib_stats_update_enable, + wmi_pdev_param_vdev_rate_stats_update_period, + wmi_pdev_param_cts_cbw, + wmi_pdev_param_wnts_config, + wmi_pdev_param_adaptive_early_rx_enable, + wmi_pdev_param_adaptive_early_rx_min_sleep_slop, + wmi_pdev_param_adaptive_early_rx_inc_dec_step, + wmi_pdev_param_early_rx_fix_sleep_slop, + wmi_pdev_param_bmiss_based_adaptive_bto_enable, + wmi_pdev_param_bmiss_bto_min_bcn_timeout, + wmi_pdev_param_bmiss_bto_inc_dec_step, + wmi_pdev_param_bto_fix_bcn_timeout, + wmi_pdev_param_ce_based_adaptive_bto_enable, + wmi_pdev_param_ce_bto_combo_ce_value, + wmi_pdev_param_tx_chain_mask_2g, + wmi_pdev_param_rx_chain_mask_2g, + wmi_pdev_param_tx_chain_mask_5g, + wmi_pdev_param_rx_chain_mask_5g, + wmi_pdev_param_tx_chain_mask_cck, + wmi_pdev_param_tx_chain_mask_1ss, + wmi_pdev_param_enable_btcoex, + wmi_pdev_param_atf_peer_stats, + wmi_pdev_param_btcoex_cfg, + wmi_pdev_param_mesh_mcast_enable, + wmi_pdev_param_tx_ack_timeout, + wmi_pdev_param_soft_tx_chain_mask, + wmi_pdev_param_cck_tx_enable, + + wmi_pdev_param_max, +} wmi_conv_pdev_params_id; + + +/** + * Host based ENUM IDs for VDEV params to abstract target enums + */ +typedef enum { + wmi_vdev_param_rts_threshold = 0, + wmi_vdev_param_fragmentation_threshold, + wmi_vdev_param_beacon_interval, + wmi_vdev_param_listen_interval, + wmi_vdev_param_multicast_rate, + wmi_vdev_param_mgmt_tx_rate, + wmi_vdev_param_slot_time, + wmi_vdev_param_preamble, + wmi_vdev_param_swba_time, + wmi_vdev_stats_update_period, + wmi_vdev_pwrsave_ageout_time, + wmi_vdev_host_swba_interval, + wmi_vdev_param_dtim_period, + wmi_vdev_oc_scheduler_air_time_limit, + wmi_vdev_param_wds, + wmi_vdev_param_atim_window, + wmi_vdev_param_bmiss_count_max, + wmi_vdev_param_bmiss_first_bcnt, + wmi_vdev_param_bmiss_final_bcnt, + wmi_vdev_param_feature_wmm, + wmi_vdev_param_chwidth, + wmi_vdev_param_chextoffset, + wmi_vdev_param_disable_htprotection, + wmi_vdev_param_sta_quickkickout, + wmi_vdev_param_mgmt_rate, + wmi_vdev_param_protection_mode, + wmi_vdev_param_fixed_rate, + wmi_vdev_param_sgi, + wmi_vdev_param_ldpc, + wmi_vdev_param_tx_stbc, + wmi_vdev_param_rx_stbc, + wmi_vdev_param_intra_bss_fwd, + wmi_vdev_param_def_keyid, + wmi_vdev_param_nss, + wmi_vdev_param_bcast_data_rate, + wmi_vdev_param_mcast_data_rate, + wmi_vdev_param_mcast_indicate, + wmi_vdev_param_dhcp_indicate, + wmi_vdev_param_unknown_dest_indicate, + wmi_vdev_param_ap_keepalive_min_idle_inactive_time_secs, + wmi_vdev_param_ap_keepalive_max_idle_inactive_time_secs, + wmi_vdev_param_ap_keepalive_max_unresponsive_time_secs, + wmi_vdev_param_ap_enable_nawds, + wmi_vdev_param_mcast2ucast_set, + wmi_vdev_param_enable_rtscts, + wmi_vdev_param_rc_num_retries, + wmi_vdev_param_txbf, + wmi_vdev_param_packet_powersave, + wmi_vdev_param_drop_unencry, + wmi_vdev_param_tx_encap_type, + wmi_vdev_param_ap_detect_out_of_sync_sleeping_sta_time_secs, + wmi_vdev_param_cabq_maxdur, + wmi_vdev_param_mfptest_set, + wmi_vdev_param_rts_fixed_rate, + wmi_vdev_param_vht_sgimask, + wmi_vdev_param_vht80_ratemask, + wmi_vdev_param_early_rx_adjust_enable, + wmi_vdev_param_early_rx_tgt_bmiss_num, + wmi_vdev_param_early_rx_bmiss_sample_cycle, + wmi_vdev_param_early_rx_slop_step, + wmi_vdev_param_early_rx_init_slop, + wmi_vdev_param_early_rx_adjust_pause, + wmi_vdev_param_proxy_sta, + wmi_vdev_param_meru_vc, + wmi_vdev_param_rx_decap_type, + wmi_vdev_param_bw_nss_ratemask, + wmi_vdev_param_sensor_ap, + wmi_vdev_param_beacon_rate, + wmi_vdev_param_dtim_enable_cts, + wmi_vdev_param_sta_kickout, + wmi_vdev_param_tx_pwrlimit, + wmi_vdev_param_snr_num_for_cal, + wmi_vdev_param_roam_fw_offload, + wmi_vdev_param_enable_rmc, + wmi_vdev_param_ibss_max_bcn_lost_ms, + wmi_vdev_param_max_rate, + wmi_vdev_param_early_rx_drift_sample, + wmi_vdev_param_set_ibss_tx_fail_cnt_thr, + wmi_vdev_param_ebt_resync_timeout, + wmi_vdev_param_aggr_trig_event_enable, + wmi_vdev_param_is_ibss_power_save_allowed, + wmi_vdev_param_is_power_collapse_allowed, + wmi_vdev_param_is_awake_on_txrx_enabled, + wmi_vdev_param_inactivity_cnt, + wmi_vdev_param_txsp_end_inactivity_time_ms, + wmi_vdev_param_dtim_policy, + wmi_vdev_param_ibss_ps_warmup_time_secs, + wmi_vdev_param_ibss_ps_1rx_chain_in_atim_window_enable, + wmi_vdev_param_rx_leak_window, + wmi_vdev_param_stats_avg_factor, + wmi_vdev_param_disconnect_th, + wmi_vdev_param_rtscts_rate, + wmi_vdev_param_mcc_rtscts_protection_enable, + wmi_vdev_param_mcc_broadcast_probe_enable, + wmi_vdev_param_capabilities, + wmi_vdev_param_mgmt_tx_power, + wmi_vdev_param_atf_ssid_sched_policy, + wmi_vdev_param_disable_dyn_bw_rts, + wmi_vdev_param_ampdu_subframe_size_per_ac, + wmi_vdev_param_he_dcm_enable, + wmi_vdev_param_he_bss_color, + wmi_vdev_param_he_range_ext_enable, + wmi_vdev_param_set_hemu_mode, + wmi_vdev_param_set_he_ltf, + wmi_vdev_param_set_heop, + wmi_vdev_param_disable_cabq, + wmi_vdev_param_rate_dropdown_bmap, + wmi_vdev_param_tx_power, + wmi_vdev_param_set_ba_mode, + wmi_vdev_param_autorate_misc_cfg, + wmi_vdev_param_amsdu_subframe_size_per_ac, + + wmi_vdev_param_max, +} wmi_conv_vdev_param_id; + +/** + * Host based ENUM IDs for service bits to abstract target enums + */ +typedef enum { + wmi_service_beacon_offload = 0, + wmi_service_scan_offload, + wmi_service_roam_offload, + wmi_service_bcn_miss_offload, + wmi_service_sta_pwrsave, + wmi_service_sta_advanced_pwrsave, + wmi_service_ap_uapsd, + wmi_service_ap_dfs, + wmi_service_11ac, + wmi_service_blockack, + wmi_service_phyerr, + wmi_service_bcn_filter, + wmi_service_rtt, + wmi_service_ratectrl, + wmi_service_wow, + wmi_service_ratectrl_cache, + wmi_service_iram_tids, + wmi_service_burst, + wmi_service_smart_antenna_sw_support, + wmi_service_gtk_offload, + wmi_service_scan_sch, + wmi_service_csa_offload, + wmi_service_chatter, + wmi_service_coex_freqavoid, + wmi_service_packet_power_save, + wmi_service_force_fw_hang, + wmi_service_smart_antenna_hw_support, + wmi_service_gpio, + wmi_sta_uapsd_basic_auto_trig, + wmi_sta_uapsd_var_auto_trig, + wmi_service_sta_keep_alive, + wmi_service_tx_encap, + wmi_service_ap_ps_detect_out_of_sync, + wmi_service_early_rx, + wmi_service_enhanced_proxy_sta, + wmi_service_tt, + wmi_service_atf, + wmi_service_peer_caching, + wmi_service_coex_gpio, + wmi_service_aux_spectral_intf, + wmi_service_aux_chan_load_intf, + wmi_service_bss_channel_info_64, + wmi_service_ext_res_cfg_support, + wmi_service_mesh, + wmi_service_restrt_chnl_support, + wmi_service_roam_scan_offload, + wmi_service_arpns_offload, + wmi_service_nlo, + wmi_service_sta_dtim_ps_modulated_dtim, + wmi_service_sta_smps, + wmi_service_fwtest, + wmi_service_sta_wmmac, + wmi_service_tdls, + wmi_service_mcc_bcn_interval_change, + wmi_service_adaptive_ocs, + wmi_service_ba_ssn_support, + wmi_service_filter_ipsec_natkeepalive, + wmi_service_wlan_hb, + wmi_service_lte_ant_share_support, + wmi_service_batch_scan, + wmi_service_qpower, + wmi_service_plmreq, + wmi_service_thermal_mgmt, + wmi_service_rmc, + wmi_service_mhf_offload, + wmi_service_coex_sar, + wmi_service_bcn_txrate_override, + wmi_service_nan, + wmi_service_l1ss_stat, + wmi_service_estimate_linkspeed, + wmi_service_obss_scan, + wmi_service_tdls_offchan, + wmi_service_tdls_uapsd_buffer_sta, + wmi_service_tdls_uapsd_sleep_sta, + wmi_service_ibss_pwrsave, + wmi_service_lpass, + wmi_service_extscan, + wmi_service_d0wow, + wmi_service_hsoffload, + wmi_service_roam_ho_offload, + wmi_service_rx_full_reorder, + wmi_service_dhcp_offload, + wmi_service_sta_rx_ipa_offload_support, + wmi_service_mdns_offload, + wmi_service_sap_auth_offload, + wmi_service_dual_band_simultaneous_support, + wmi_service_ocb, + wmi_service_ap_arpns_offload, + wmi_service_per_band_chainmask_support, + wmi_service_packet_filter_offload, + wmi_service_mgmt_tx_htt, + wmi_service_mgmt_tx_wmi, + wmi_service_ext_msg, + wmi_service_mawc, + + wmi_service_peer_stats, + wmi_service_mesh_11s, + wmi_service_periodic_chan_stat_support, + wmi_service_tx_mode_push_only, + wmi_service_tx_mode_push_pull, + wmi_service_tx_mode_dynamic, + wmi_service_check_cal_version, + wmi_service_btcoex_duty_cycle, + wmi_service_4_wire_coex_support, + wmi_service_multiple_vdev_restart, + wmi_service_peer_assoc_conf, + wmi_service_egap, + wmi_service_sta_pmf_offload, + wmi_service_unified_wow_capability, + wmi_service_enterprise_mesh, + wmi_service_apf_offload, + wmi_service_sync_delete_cmds, + wmi_service_ratectrl_limit_max_min_rates, + wmi_service_nan_data, + wmi_service_nan_rtt, + wmi_service_11ax, + wmi_service_deprecated_replace, + wmi_service_tdls_conn_tracker_in_host_mode, + wmi_service_enhanced_mcast_filter, + wmi_service_half_rate_quarter_rate_support, + wmi_service_vdev_rx_filter, + wmi_service_p2p_listen_offload_support, + wmi_service_mark_first_wakeup_packet, + wmi_service_multiple_mcast_filter_set, + wmi_service_host_managed_rx_reorder, + wmi_service_flash_rdwr_support, + wmi_service_wlan_stats_report, + wmi_service_tx_msdu_id_new_partition_support, + wmi_service_dfs_phyerr_offload, + wmi_service_rcpi_support, + wmi_service_fw_mem_dump_support, + wmi_service_peer_stats_info, + wmi_service_regulatory_db, + wmi_service_11d_offload, + wmi_service_hw_data_filtering, + wmi_service_pkt_routing, + wmi_service_offchan_tx_wmi, + wmi_service_chan_load_info, + wmi_service_extended_nss_support, + wmi_service_ack_timeout, + wmi_service_widebw_scan, + wmi_service_bcn_offload_start_stop_support, + wmi_service_offchan_data_tid_support, + wmi_service_support_dma, + wmi_service_8ss_tx_bfee, + wmi_service_fils_support, + wmi_service_mawc_support, + wmi_service_wow_wakeup_by_timer_pattern, + wmi_service_11k_neighbour_report_support, + wmi_service_ap_obss_detection_offload, + wmi_service_bss_color_offload, + wmi_service_gmac_offload_support, + wmi_service_host_dfs_check_support, + wmi_service_dual_beacon_on_single_mac_scc_support, + wmi_service_dual_beacon_on_single_mac_mcc_support, + wmi_service_twt_requestor, + wmi_service_twt_responder, + wmi_service_listen_interval_offload_support, + wmi_service_per_vdev_chain_support, + wmi_service_new_htt_msg_format, + wmi_service_peer_unmap_cnf_support, + wmi_service_beacon_reception_stats, + wmi_service_vdev_latency_config, + wmi_service_sta_plus_sta_support, + wmi_service_tx_compl_tsf64, + wmi_service_three_way_coex_config_legacy, + wmi_services_max, +} wmi_conv_service_ids; +#define WMI_SERVICE_UNAVAILABLE 0xFFFF + +/** + * enum WMI_DBG_PARAM - Debug params + * @WMI_DBGLOG_LOG_LEVEL: Set the loglevel + * @WMI_DBGLOG_VAP_ENABLE: Enable VAP level debug + * @WMI_DBGLOG_VAP_DISABLE: Disable VAP level debug + * @WMI_DBGLOG_MODULE_ENABLE: Enable MODULE level debug + * @WMI_DBGLOG_MODULE_DISABLE: Disable MODULE level debug + * @WMI_DBGLOG_MOD_LOG_LEVEL: Enable MODULE level debug + * @WMI_DBGLOG_TYPE: set type of the debug output + * @WMI_DBGLOG_REPORT_ENABLE: Enable Disable debug + */ +typedef enum { + WMI_DBGLOG_LOG_LEVEL = 0x1, + WMI_DBGLOG_VAP_ENABLE, + WMI_DBGLOG_VAP_DISABLE, + WMI_DBGLOG_MODULE_ENABLE, + WMI_DBGLOG_MODULE_DISABLE, + WMI_DBGLOG_MOD_LOG_LEVEL, + WMI_DBGLOG_TYPE, + WMI_DBGLOG_REPORT_ENABLE +} WMI_DBG_PARAM; + +/** + * struct wmi_host_fw_ver - FW version in non-tlv target + * @sw_version: Versin info + * @sw_version_1: Second dword of version + */ +struct wmi_host_fw_ver { + uint32_t sw_version; + uint32_t sw_version_1; +}; + +/** + * struct wmi_host_fw_abi_ver - FW version in non-tlv target + * @sw_version: Versin info + * @abi_version: ABI version + */ +struct wmi_host_fw_abi_ver { + uint32_t sw_version; + uint32_t abi_version; +}; + +/** + * struct target_resource_config - Resource config sent from host to target + * abstracted out to include union of both configs + * @num_vdevs: Number vdevs configured + * @num_peers: Number of peers + * @num_active_peers: Number of active peers for peer cache + * @num_offload_peers: Number of offload peers + * @num_offload_reorder_buffs: number of offload reorder buffs + * @num_peer_keys: number of peer keys + * @num_tids: number of tids + * @ast_skid_limit: AST skid limit + * @tx_chain_mask: TX chain mask + * @rx_chain_mask: RX chain mask + * @rx_timeout_pri: RX reorder timeout per AC + * @rx_decap_mode: RX decap mode + * @scan_max_pending_req: Scan mac pending req + * @bmiss_offload_max_vdev: Beacom miss offload max vdevs + * @roam_offload_max_vdev: Roam offload max vdevs + * @roam_offload_max_ap_profiles: roam offload max ap profiles + * @num_mcast_groups: num mcast groups + * @num_mcast_table_elems: number of macst table elems + * @mcast2ucast_mode: mcast enhance mode + * @tx_dbg_log_size: DBG log buf size + * @num_wds_entries: number of WDS entries + * @dma_burst_size: DMA burst size. + * @mac_aggr_delim: Mac aggr delim + * @rx_skip_defrag_timeout_dup_detection_check: Defrag dup check in host? + * @vow_config: vow configuration + * @gtk_offload_max_vdev: Max vdevs for GTK offload + * @num_msdu_desc: Number of msdu desc + * @max_frag_entries: Max frag entries + * End common + * @max_peer_ext_stats: Max peer EXT stats + * @smart_ant_cap: Smart antenna capabilities + * @BK_Minfree: BIN configuration for BK traffic + * @BE_Minfree: BIN configuration for BE traffic + * @VI_Minfree: BIN configuration for VI traffic + * @VO_Minfree: BIN configuration for VO traffic + * @rx_batchmode: RX batch mode + * @tt_support: Thermal throttling support + * @atf_config: ATF config + * @mgmt_comp_evt_bundle_support: bundle support required for mgmt complete evt + * @tx_msdu_new_partition_id_support: new partiition id support for tx msdu + * @peer_unmap_conf_support: peer unmap conf support in fw + * @iphdr_pad_config: ipheader pad config + * @qwrap_config: Qwrap configuration + * @alloc_frag_desc_for_data_pkt: Frag desc for data + * Added in MCL + * @num_tdls_vdevs: + * @num_tdls_conn_table_entries: + * @beacon_tx_offload_max_vdev: + * @num_multicast_filter_entries: + * @num_wow_filters: + * @num_keep_alive_pattern: + * @keep_alive_pattern_size: + * @max_tdls_concurrent_sleep_sta: + * @max_tdls_concurrent_buffer_sta: + * @wmi_send_separate: + * @num_ocb_vdevs: + * @num_ocb_channels: + * @num_ocb_schedules: + * @num_packet_filters: maximum number of packet filter rules to support + * @num_max_sta_vdevs: maximum number of concurrent station vdevs to support + * @num_ns_ext_tuples_cfg: + * @apf_instruction_size: + * @max_bssid_rx_filters: + * @use_pdev_id: + * @max_num_dbs_scan_duty_cycle: max dbs can duty cycle value + * @cce_disable: disable cce component + * @twt_ap_pdev_count: Number of MAC on which AP TWT feature is supported + * @twt_ap_sta_count: Max no of STA with which TWT sessions can be formed + * by the AP + * @three_way_coex_config_legacy_en: enable three way coex legacy feature + */ +typedef struct { + uint32_t num_vdevs; + uint32_t num_peers; + uint32_t num_active_peers; + uint32_t num_offload_peers; + uint32_t num_offload_reorder_buffs; + uint32_t num_peer_keys; + uint32_t num_tids; + uint32_t ast_skid_limit; + uint32_t tx_chain_mask; + uint32_t rx_chain_mask; + uint32_t rx_timeout_pri[4]; + uint32_t rx_decap_mode; + uint32_t scan_max_pending_req; + uint32_t bmiss_offload_max_vdev; + uint32_t roam_offload_max_vdev; + uint32_t roam_offload_max_ap_profiles; + uint32_t num_mcast_groups; + uint32_t num_mcast_table_elems; + uint32_t mcast2ucast_mode; + uint32_t tx_dbg_log_size; + uint32_t num_wds_entries; + uint32_t dma_burst_size; + uint32_t mac_aggr_delim; + uint32_t rx_skip_defrag_timeout_dup_detection_check; + uint32_t vow_config; + uint32_t gtk_offload_max_vdev; + uint32_t num_msdu_desc; /* Number of msdu desc */ + uint32_t max_frag_entries; + uint32_t scheduler_params; + /* End common */ + + /* Added for Beeliner */ + uint32_t max_peer_ext_stats; + uint32_t smart_ant_cap; + uint32_t BK_Minfree; + uint32_t BE_Minfree; + uint32_t VI_Minfree; + uint32_t VO_Minfree; + uint32_t rx_batchmode; + uint32_t tt_support; + uint32_t atf_config:1, + mgmt_comp_evt_bundle_support:1, + tx_msdu_new_partition_id_support:1, + new_htt_msg_format:1, + peer_unmap_conf_support:1; + uint32_t iphdr_pad_config; + uint32_t + qwrap_config:16, + alloc_frag_desc_for_data_pkt:16; + + /* Added in MCL */ + uint32_t num_tdls_vdevs; + uint32_t num_tdls_conn_table_entries; + uint32_t beacon_tx_offload_max_vdev; + uint32_t num_multicast_filter_entries; + uint32_t num_wow_filters; + uint32_t num_keep_alive_pattern; + uint32_t keep_alive_pattern_size; + uint32_t max_tdls_concurrent_sleep_sta; + uint32_t max_tdls_concurrent_buffer_sta; + uint32_t wmi_send_separate; + uint32_t num_ocb_vdevs; + uint32_t num_ocb_channels; + uint32_t num_ocb_schedules; + uint32_t num_packet_filters; + uint32_t num_max_sta_vdevs; + uint32_t num_ns_ext_tuples_cfg; + uint32_t apf_instruction_size; + uint32_t max_bssid_rx_filters; + uint32_t use_pdev_id; + uint32_t max_num_dbs_scan_duty_cycle; + bool cce_disable; + uint32_t twt_ap_pdev_count; + uint32_t twt_ap_sta_count; + bool tstamp64_en; + bool three_way_coex_config_legacy_en; +} target_resource_config; + +/** + * struct wds_addr_event - WDS addr event structure + * @event_type: event type add/delete + * @peer_mac: peer mac + * @dest_mac: destination mac address + * @vdev_id: vdev id + */ +typedef struct { + uint32_t event_type[4]; + u_int8_t peer_mac[IEEE80211_ADDR_LEN]; + u_int8_t dest_mac[IEEE80211_ADDR_LEN]; + uint32_t vdev_id; +} wds_addr_event_t; +/** + * Enum replicated for host abstraction with FW + */ +typedef enum { + /* Event respose of START CMD */ + WMI_HOST_VDEV_START_RESP_EVENT = 0, + /* Event respose of RESTART CMD */ + WMI_HOST_VDEV_RESTART_RESP_EVENT, +} WMI_HOST_START_EVENT_PARAM; + +/** + * struct wmi_host_vdev_start_resp - VDEV start response + * @vdev_id: vdev id + * @requestor_id: requestor id that requested the VDEV start request + * @resp_type: Respose of Event type START/RESTART + * @status: status of the response + * @chain_mask: Vdev chain mask + * @smps_mode: Vdev mimo power save mode + * @mac_id: mac_id field contains the MAC identifier that the + * VDEV is bound to. The valid range is 0 to (num_macs-1). + * @cfgd_tx_streams: Configured Transmit Streams + * @cfgd_rx_streams: Configured Receive Streams + */ +typedef struct { + uint32_t vdev_id; + uint32_t requestor_id; + WMI_HOST_START_EVENT_PARAM resp_type; + uint32_t status; + uint32_t chain_mask; + uint32_t smps_mode; + uint32_t mac_id; + uint32_t cfgd_tx_streams; + uint32_t cfgd_rx_streams; +} wmi_host_vdev_start_resp; + +/** + * struct wmi_host_vdev_delete_resp - VDEV delete response + * @vdev_id: vdev id + */ +struct wmi_host_vdev_delete_resp { + uint32_t vdev_id; +}; + +/** + * struct wmi_host_roam_event - host roam event param + * @vdev_id: vdev id + * @reason: roam reason + * @rssi: RSSI + */ +typedef struct { + uint32_t vdev_id; + uint32_t reason; + uint32_t rssi; +} wmi_host_roam_event; + +/** + * ENUM wmi_host_scan_event_type - Scan event type + */ +enum wmi_host_scan_event_type { + WMI_HOST_SCAN_EVENT_STARTED = 0x1, + WMI_HOST_SCAN_EVENT_COMPLETED = 0x2, + WMI_HOST_SCAN_EVENT_BSS_CHANNEL = 0x4, + WMI_HOST_SCAN_EVENT_FOREIGN_CHANNEL = 0x8, + WMI_HOST_SCAN_EVENT_DEQUEUED = 0x10, + WMI_HOST_SCAN_EVENT_PREEMPTED = 0x20, + WMI_HOST_SCAN_EVENT_START_FAILED = 0x40, + WMI_HOST_SCAN_EVENT_RESTARTED = 0x80, + WMI_HOST_SCAN_EVENT_FOREIGN_CHANNEL_EXIT = 0x100, + WMI_HOST_SCAN_EVENT_INVALID = 0x200, + WMI_HOST_SCAN_EVENT_GPIO_TIMEOUT = 0x400, + WMI_HOST_SCAN_EVENT_MAX = 0x8000 +}; + +/** + * ENUM wmi_host_scan_completion_reason - Scan completion event type + */ +enum wmi_host_scan_completion_reason { + /** scan related events */ + WMI_HOST_SCAN_REASON_NONE = 0xFF, + WMI_HOST_SCAN_REASON_COMPLETED = 0, + WMI_HOST_SCAN_REASON_CANCELLED = 1, + WMI_HOST_SCAN_REASON_PREEMPTED = 2, + WMI_HOST_SCAN_REASON_TIMEDOUT = 3, + WMI_HOST_SCAN_REASON_INTERNAL_FAILURE = 4, + WMI_HOST_SCAN_REASON_MAX, +}; + +/** + * struct wmi_host_scan_event - Scan event response from target + * @event: event type + * @reason: Reason for event + * @channel_freq: channel frequency + * @requestor: requestor id + * @scan_id: scan id + * @vdev_id: vdev id + */ +typedef struct { + uint32_t event; + uint32_t reason; + uint32_t channel_freq; + uint32_t requestor; + uint32_t scan_id; + uint32_t vdev_id; +} wmi_host_scan_event; + +/** + * struct wmi_host_pdev_reserve_ast_entry_event - Reserve AST entry + * @result: result + */ +typedef struct { + uint32_t result; +} wmi_host_pdev_reserve_ast_entry_event; + +/** + * struct wmi_host_mcast_ageout_entry - mcast aged-out entry + * @grp_addr: IPv4/6 mcast group addr + * @vdev_id: vdev id + */ +typedef struct { + uint8_t grp_addr[16]; + uint32_t vdev_id; +} wmi_host_mcast_ageout_entry; + +/** + * struct wmi_host_mcast_list_ageout_event - List of mcast entry aged-out + * @num_entry: Number of mcast entries timed-out + * @entry: List of wmi_host_mcast_ageout_entry + */ +typedef struct { + uint32_t num_entry; + wmi_host_mcast_ageout_entry entry[1]; +} wmi_host_mcast_list_ageout_event; + +/** + * struct wmi_host_pdev_nfcal_power_all_channels_event - NF cal event data + * @nfdbr: + * chan[0 ~ 7]: {NFCalPower_chain0, NFCalPower_chain1, + * NFCalPower_chain2, NFCalPower_chain3, + * NFCalPower_chain4, NFCalPower_chain5, + * NFCalPower_chain6, NFCalPower_chain7}, + * @nfdbm: + * chan[0 ~ 7]: {NFCalPower_chain0, NFCalPower_chain1, + * NFCalPower_chain2, NFCalPower_chain3, + * NFCalPower_chain4, NFCalPower_chain5, + * NFCalPower_chain6, NFCalPower_chain7}, + * @freqnum: + * chan[0 ~ 7]: frequency number + * @pdev_id: pdev_id + */ +typedef struct { + int8_t nfdbr[WMI_HOST_RXG_CAL_CHAN_MAX * WMI_HOST_MAX_NUM_CHAINS]; + int8_t nfdbm[WMI_HOST_RXG_CAL_CHAN_MAX * WMI_HOST_MAX_NUM_CHAINS]; + uint32_t freqnum[WMI_HOST_RXG_CAL_CHAN_MAX]; + uint32_t pdev_id; +} wmi_host_pdev_nfcal_power_all_channels_event; + +/** + * enum wmi_host_pdev_tpc_event_offset: offsets of TPC events + * @WMI_HOST_TX_POWER_MAX: offset of max tx power + * @WMI_HOST_TX_POWER_MIN: offset of min tx power + * @WMI_HOST_TX_POWER_LEN: size of tpc values + */ +enum wmi_host_pdev_tpc_event_offset { + WMI_HOST_TX_POWER_MAX, + WMI_HOST_TX_POWER_MIN, + WMI_HOST_TX_POWER_LEN, +}; + +/** + * struct wmi_host_pdev_tpc_event - WMI host pdev TPC event + * @pdev_id: pdev_id + * @tpc: + */ +typedef struct { + uint32_t pdev_id; + int32_t tpc[WMI_HOST_TX_POWER_LEN]; +} wmi_host_pdev_tpc_event; + +/** + * struct wmi_host_pdev_generic_buffer_event + * @buf_type: Buffer type + * @frag_id: Frag id + * @more_frag: more frags pending + * @buf_len: buffer length + * @buf_info: variable length buffer + */ +typedef struct { + uint32_t buf_type; + uint32_t frag_id; + uint32_t more_frag; + uint32_t buf_len; + uint32_t buf_info[1]; +} wmi_host_pdev_generic_buffer_event; +/** + * Enum for host buffer event + */ +enum { + WMI_HOST_BUFFER_TYPE_RATEPWR_TABLE, + WMI_HOST_BUFFER_TYPE_CTL_TABLE, +}; + +/** + * struct wmi_host_pdev_tpc_config_event - host pdev tpc config event + * @pdev_id: pdev_id + * @regDomain: + * @chanFreq: + * @phyMode: + * @twiceAntennaReduction: + * @twiceMaxRDPower: + * @twiceAntennaGain: + * @powerLimit: + * @rateMax: + * @numTxChain: + * @ctl: + * @flags: + * @maxRegAllowedPower: + * @maxRegAllowedPowerAGCDD: + * @maxRegAllowedPowerAGSTBC: + * @maxRegAllowedPowerAGTXBF: + * @ratesArray: + */ +typedef struct { + uint32_t pdev_id; + uint32_t regDomain; + uint32_t chanFreq; + uint32_t phyMode; + uint32_t twiceAntennaReduction; + uint32_t twiceMaxRDPower; + int32_t twiceAntennaGain; + uint32_t powerLimit; + uint32_t rateMax; + uint32_t numTxChain; + uint32_t ctl; + uint32_t flags; + int8_t maxRegAllowedPower[WMI_HOST_TPC_TX_NUM_CHAIN]; + int8_t maxRegAllowedPowerAGCDD[WMI_HOST_TPC_TX_NUM_CHAIN][WMI_HOST_TPC_TX_NUM_CHAIN]; + int8_t maxRegAllowedPowerAGSTBC[WMI_HOST_TPC_TX_NUM_CHAIN][WMI_HOST_TPC_TX_NUM_CHAIN]; + int8_t maxRegAllowedPowerAGTXBF[WMI_HOST_TPC_TX_NUM_CHAIN][WMI_HOST_TPC_TX_NUM_CHAIN]; + uint8_t ratesArray[WMI_HOST_TPC_RATE_MAX]; +} wmi_host_pdev_tpc_config_event; +/** + * Enums for TPC event + */ +typedef enum { + WMI_HOST_TPC_CONFIG_EVENT_FLAG_TABLE_CDD = 0x1, + WMI_HOST_TPC_CONFIG_EVENT_FLAG_TABLE_STBC = 0x2, + WMI_HOST_TPC_CONFIG_EVENT_FLAG_TABLE_TXBF = 0x4, +} WMI_HOST_TPC_CONFIG_EVENT_FLAG; + +/** + * Medium Utilization evaluation algorithms + * These algorithms can be complementary rather than exclusive. + */ +typedef enum { + WMI_HOST_MU_BASIC_ALGO = 0x1, + WMI_HOST_MU_PER_BSSID_ALGO = 0x2, + WMI_HOST_MU_HIDDEN_NODE_ALGO = 0x4, +} WMI_HOST_MU_ALGO_TYPE; +/* max MU alg combinations supported by target */ +#define WMI_HOST_MU_MAX_ALGO_TYPE 3 + +/** + * struct wmi_host_mu_db_entry + * @event_type: 0=AP, 1=STA, 2=Small Cell(SC) + * @bssid_mac_addr: Transmitter MAC if entry is WiFi node. PLMNID if SC + * @tx_addr: Transmitter MAC if entry is WiFi node. PLMNID if SC + * @avg_duration_us: Avg. duration for which node was transmitting + * @avg_rssi: Avg. RSSI of all TX packets by node. Unit dBm + * @mu_percent: % medium utilization by node + */ +typedef struct { + uint32_t entry_type; + wmi_host_mac_addr bssid_mac_addr; + wmi_host_mac_addr tx_addr; + uint32_t avg_duration_us; + uint32_t avg_rssi; + uint32_t mu_percent; +} wmi_host_mu_db_entry; + +/** + * struct wmi_host_mu_report_event - WMI_MU_REPORT_EVENTID + * @mu_request_id: request id + * @status_reason: MU_STATUS_REASON + * @total_mu: MU_ALG_TYPE combinations + * @num_active_bssid: number of active bssid + * @hidden_node_mu : hidden node algo MU per bin + * @num_TA_entries : No. of entries found in MU db report + */ +typedef struct { + uint32_t mu_request_id; + uint32_t status_reason; + uint32_t total_mu[WMI_HOST_MU_MAX_ALGO_TYPE]; + uint32_t num_active_bssid; + uint32_t hidden_node_mu[LTEU_MAX_BINS]; + uint32_t num_TA_entries; +} wmi_host_mu_report_event; + +/** + * struct wmi_host_mgmt_tx_compl_event - TX completion event + * @desc_id: from tx_send_cmd + * @status: WMI_MGMT_TX_COMP_STATUS_TYPE + * @pdev_id: pdev_id + * @ppdu_id: ppdu_id + */ +typedef struct { + uint32_t desc_id; + uint32_t status; + uint32_t pdev_id; + uint32_t ppdu_id; +} wmi_host_mgmt_tx_compl_event; + +/** + * struct wmi_host_offchan_data_tx_compl_event - TX completion event + * @desc_id: from tx_send_cmd + * @status: VWMI_MGMT_TX_COMP_STATUS_TYPE + * @pdev_id: pdev_id + */ +struct wmi_host_offchan_data_tx_compl_event { + uint32_t desc_id; + uint32_t status; + uint32_t pdev_id; +}; + +#define WMI_HOST_TIM_BITMAP_ARRAY_SIZE 17 + +/** + * struct wmi_host_tim_info - TIM info in SWBA event + * @tim_len: TIM length + * @tim_mcast: + * @tim_bitmap: TIM bitmap + * @tim_changed: TIM changed + * @tim_num_ps_pending: TIM num PS sta pending + * @vdev_id: Vdev id + */ +typedef struct { + uint32_t tim_len; + uint32_t tim_mcast; + uint32_t tim_bitmap[WMI_HOST_TIM_BITMAP_ARRAY_SIZE]; + uint32_t tim_changed; + uint32_t tim_num_ps_pending; + uint32_t vdev_id; +} wmi_host_tim_info; + +/** + * struct wmi_host_p2p_noa_descriptor - NoA desc in SWBA event + * @type_count: Absence count + * @duration: NoA duration + * @interval: NoA interval + * @start_time: start time + */ +typedef struct { + uint32_t type_count; + uint32_t duration; + uint32_t interval; + uint32_t start_time; +} wmi_host_p2p_noa_descriptor; +/* Maximum number of NOA Descriptors supported */ +#define WMI_HOST_P2P_MAX_NOA_DESCRIPTORS 4 +/** + * struct wmi_host_p2p_noa_info - p2p noa information + * @modified: NoA modified + * @index: Index + * @oppPS: Oppurtunstic ps + * @ctwindow: CT window + * @num_descriptors: number of descriptors + * @noa_descriptors: noa descriptors + * @vdev_id: Vdev id + */ +typedef struct { + uint8_t modified; + uint8_t index; + uint8_t oppPS; + uint8_t ctwindow; + uint8_t num_descriptors; + wmi_host_p2p_noa_descriptor + noa_descriptors[WMI_HOST_P2P_MAX_NOA_DESCRIPTORS]; + uint32_t vdev_id; +} wmi_host_p2p_noa_info; + +/** + * struct wmi_host_peer_sta_kickout_event + * @peer_macaddr: peer mac address + * @reason: kickout reason + * @rssi: rssi + * @pdev_id: pdev_id + */ +typedef struct { + uint8_t peer_macaddr[IEEE80211_ADDR_LEN]; + uint32_t reason; + uint32_t rssi; +} wmi_host_peer_sta_kickout_event; + +/** + * struct wmi_host_peer_sta_ps_statechange_event - ST ps state change event + * @peer_macaddr: peer mac address + * @peer_ps_stats: peer PS state + * @pdev_id: pdev_id + */ +typedef struct { + uint8_t peer_macaddr[IEEE80211_ADDR_LEN]; + uint32_t peer_ps_state; +} wmi_host_peer_sta_ps_statechange_event; + +/* Maximum CCK, OFDM rates supported */ +#define WMI_SA_MAX_CCK_OFDM_RATES 12 +/* Maximum MCS rates supported; 4 rates in each dword */ +#define WMI_SA_MAX_MCS_RATES 40 +#define WMI_SA_MAX_RATE_COUNTERS 4 +/* Maximum rate series used for transmission */ +#define SA_MAX_RATE_SERIES 2 + +#define SA_MAX_LEGACY_RATE_DWORDS 3 +#define SA_MAX_HT_RATE_DWORDS 10 +#define SA_BYTES_IN_DWORD 4 +#define SA_MASK_BYTE 0xff +/* TODO: ratecode_160 needs to add for future chips */ +/** + * struct wmi_sa_rate_cap - smart antenna rat capabilities + * @pdev_id: pdev_id + * @ratecode_legacy: Rate code array for CCK OFDM + * @ratecode_20: Rate code array for 20MHz BW + * @ratecode_40: Rate code array for 40MHz BW + * @ratecode_80: Rate code array for 80MHz BW + * @ratecount: Max Rate count for each mode + */ +typedef struct { + uint8_t ratecode_legacy[WMI_SA_MAX_CCK_OFDM_RATES]; + uint8_t ratecode_20[WMI_SA_MAX_MCS_RATES]; + uint8_t ratecode_40[WMI_SA_MAX_MCS_RATES]; + uint8_t ratecode_80[WMI_SA_MAX_MCS_RATES]; + uint8_t ratecount[WMI_SA_MAX_RATE_COUNTERS]; +} wmi_sa_rate_cap; + +/** Preamble types to be used with VDEV fixed rate configuration */ +typedef enum { + WMI_HOST_RATE_PREAMBLE_OFDM, + WMI_HOST_RATE_PREAMBLE_CCK, + WMI_HOST_RATE_PREAMBLE_HT, + WMI_HOST_RATE_PREAMBLE_VHT, + WMI_HOST_RATE_PREAMBLE_HE, +} WMI_HOST_RATE_PREAMBLE; + +#define WMI_HOST_FIXED_RATE_NONE (0xff) + +/** slot time long */ +#define WMI_HOST_VDEV_SLOT_TIME_LONG 0x1 +/** slot time short */ +#define WMI_HOST_VDEV_SLOT_TIME_SHORT 0x2 +/** preablbe long */ +#define WMI_HOST_VDEV_PREAMBLE_LONG 0x1 +/** preablbe short */ +#define WMI_HOST_VDEV_PREAMBLE_SHORT 0x2 +/** found a better AP */ +#define WMI_HOST_ROAM_REASON_BETTER_AP 0x1 +/** beacon miss detected */ +#define WMI_HOST_ROAM_REASON_BMISS 0x2 +/** deauth/disassoc received */ +#define WMI_HOST_ROAM_REASON_DEAUTH 0x2 +/** connected AP's low rssi condition detected */ +#define WMI_HOST_ROAM_REASON_LOW_RSSI 0x3 +/** found another AP that matches SSID and Security profile in + * WMI_ROAM_AP_PROFILE, found during scan triggered upon FINAL_BMISS + */ +#define WMI_HOST_ROAM_REASON_SUITABLE_AP 0x4 +/** LFR3.0 roaming failed, indicate the disconnection to host */ +#define WMI_HOST_ROAM_REASON_HO_FAILED 0x5 + +/** values for vdev_type */ +#define WMI_HOST_VDEV_TYPE_AP 0x1 +#define WMI_HOST_VDEV_TYPE_STA 0x2 +#define WMI_HOST_VDEV_TYPE_IBSS 0x3 +#define WMI_HOST_VDEV_TYPE_MONITOR 0x4 + +/** values for vdev_subtype */ +#define WMI_HOST_VDEV_SUBTYPE_P2P_DEVICE 0x1 +#define WMI_HOST_VDEV_SUBTYPE_P2P_CLIENT 0x2 +#define WMI_HOST_VDEV_SUBTYPE_P2P_GO 0x3 +#define WMI_HOST_VDEV_SUBTYPE_PROXY_STA 0x4 +#define WMI_HOST_VDEV_SUBTYPE_MESH 0x5 + +#define WMI_HOST_MGMT_TID 17 +/* Disable aging & learning */ +#define WMI_HOST_WDS_FLAG_STATIC 0x1 + +/** + * Peer param enum abstracted from target + */ +typedef enum { + /** mimo powersave state */ + WMI_HOST_PEER_MIMO_PS_STATE = 0x1, + /** enable/disable AMPDU . initial value (enabled) */ + WMI_HOST_PEER_AMPDU = 0x2, + /** authorize/unauthorize peer. initial value is unauthorized (0) */ + WMI_HOST_PEER_AUTHORIZE = 0x3, + /** peer channel bandwidth */ + WMI_HOST_PEER_CHWIDTH = 0x4, + /** peer NSS */ + WMI_HOST_PEER_NSS = 0x5, + /** USE 4 ADDR */ + WMI_HOST_PEER_USE_4ADDR = 0x6, + /** Enable extended peer stats */ + WMI_HOST_PEER_EXT_STATS_ENABLE = 0x7, + /*Use FIXED Pwr */ + WMI_HOST_PEER_USE_FIXED_PWR = 0x8, + /* Set peer fixed rate */ + WMI_HOST_PEER_PARAM_FIXED_RATE = 0x9, + /* Whitelist peer TIDs */ + WMI_HOST_PEER_SET_MU_WHITELIST = 0xa, + /* set group membership status */ + WMI_HOST_PEER_MEMBERSHIP = 0xb, + WMI_HOST_PEER_USERPOS = 0xc, + WMI_HOST_PEER_CRIT_PROTO_HINT_ENABLED = 0xd, + WMI_HOST_PEER_TX_FAIL_CNT_THR = 0xe, + WMI_HOST_PEER_SET_HW_RETRY_CTS2S = 0xf, + WMI_HOST_PEER_IBSS_ATIM_WINDOW_LENGTH = 0x10, + WMI_HOST_PEER_PHYMODE = 0x11, + WMI_HOST_PEER_SET_MAC_TX_RATE = 0x12, + /* Set default Rx routing */ + WMI_HOST_PEER_SET_DEFAULT_ROUTING = 0x13, + WMI_HOST_PEER_SET_MIN_TX_RATE = 0x14, + /* peer NSS for 160Mhx */ + WMI_HOST_PEER_NSS_VHT160 = 0x15, + /* peer NSS for 160Mhx */ + WMI_HOST_PEER_NSS_VHT80_80 = 0x16, + /* Set SU sounding interval */ + WMI_HOST_PEER_PARAM_SU_TXBF_SOUNDING_INTERVAL = 0x17, + /* Set MU sounding interval */ + WMI_HOST_PEER_PARAM_MU_TXBF_SOUNDING_INTERVAL = 0x18, + /* Enable sounding interval set */ + WMI_HOST_PEER_PARAM_TXBF_SOUNDING_ENABLE = 0x19, + /* Enable MU support */ + WMI_HOST_PEER_PARAM_MU_ENABLE = 0x1a, + /* Enable OFDMA support */ + WMI_HOST_PEER_PARAM_OFDMA_ENABLE = 0x1b, +} PEER_PARAM_ENUM; +#define WMI_HOST_PEER_MIMO_PS_NONE 0x0 +#define WMI_HOST_PEER_MIMO_PS_STATIC 0x1 +#define WMI_HOST_PEER_MIMO_PS_DYNAMIC 0x2 +typedef enum { + HOST_PLATFORM_HIGH_PERF, + HOST_PLATFORM_LOW_PERF, + HOST_PLATFORM_LOW_PERF_NO_FETCH, +} HOST_PLATFORM_TYPE; + +enum wmi_host_sta_ps_mode { + /** enable power save for the given STA VDEV */ + WMI_HOST_STA_PS_MODE_DISABLED = 0, + /** disable power save for a given STA VDEV */ + WMI_HOST_STA_PS_MODE_ENABLED = 1, +}; +enum wmi_host_sta_powersave_param { + /** + * Controls how frames are retrievd from AP while STA is sleeping + * + * (see enum wmi_sta_ps_param_rx_wake_policy) + */ + WMI_HOST_STA_PS_PARAM_RX_WAKE_POLICY = 0, + + /** + * The STA will go active after this many TX + * + * (see enum wmi_sta_ps_param_tx_wake_threshold) + */ + WMI_HOST_STA_PS_PARAM_TX_WAKE_THRESHOLD = 1, + + /** + * Number of PS-Poll to send before STA wakes up + * + * (see enum wmi_sta_ps_param_pspoll_count) + * + */ + WMI_HOST_STA_PS_PARAM_PSPOLL_COUNT = 2, + + /** + * TX/RX inactivity time in msec before going to sleep. + * + * The power save SM will monitor tx/rx activity on the VDEV, if no + * activity for the specified msec of the parameter + * the Power save SM will go to sleep. + */ + WMI_HOST_STA_PS_PARAM_INACTIVITY_TIME = 3, + + /** + * Set uapsd configuration. + * + * (see enum wmi_sta_ps_param_uapsd) + */ + WMI_HOST_STA_PS_PARAM_UAPSD = 4, +}; +/* prefix used by scan requestor ids on the host + * replicated here form wmi_unified.h*/ +#define WMI_HOST_P_SCAN_REQUESTOR_ID_PREFIX 0xA000 +/* prefix used by scan request ids generated on the host */ +/* host cycles through the lower 12 bits to generate ids */ +#define WMI_HOST_P_SCAN_REQ_ID_PREFIX 0xA000 + +#define WMI_HOST_RC_DS_FLAG 0x01 /* Dual stream flag */ +#define WMI_HOST_RC_CW40_FLAG 0x02 /* CW 40 */ +#define WMI_HOST_RC_SGI_FLAG 0x04 /* Short Guard Interval */ +#define WMI_HOST_RC_HT_FLAG 0x08 /* HT */ +#define WMI_HOST_RC_RTSCTS_FLAG 0x10 /* RTS-CTS */ +#define WMI_HOST_RC_TX_STBC_FLAG 0x20 /* TX STBC */ +#define WMI_HOST_RC_RX_STBC_FLAG 0xC0 /* RX STBC ,2 bits */ +#define WMI_HOST_RC_RX_STBC_FLAG_S 6 /* RX STBC ,2 bits */ +#define WMI_HOST_RC_WEP_TKIP_FLAG 0x100 /* WEP/TKIP encryption */ +#define WMI_HOST_RC_TS_FLAG 0x200 /* Three stream flag */ +#define WMI_HOST_RC_UAPSD_FLAG 0x400 /* UAPSD Rate Control */ + +/** HT Capabilities*/ +#define WMI_HOST_HT_CAP_ENABLED 0x0001 /* HT Enabled/ disabled */ +/* Short Guard Interval with HT20 */ +#define WMI_HOST_HT_CAP_HT20_SGI 0x0002 +#define WMI_HOST_HT_CAP_DYNAMIC_SMPS 0x0004 /* Dynamic MIMO powersave */ +#define WMI_HOST_HT_CAP_TX_STBC 0x0008 /* B3 TX STBC */ +#define WMI_HOST_HT_CAP_TX_STBC_MASK_SHIFT 3 +#define WMI_HOST_HT_CAP_RX_STBC 0x0030 /* B4-B5 RX STBC */ +#define WMI_HOST_HT_CAP_RX_STBC_MASK_SHIFT 4 +#define WMI_HOST_HT_CAP_LDPC 0x0040 /* LDPC supported */ +#define WMI_HOST_HT_CAP_L_SIG_TXOP_PROT 0x0080 /* L-SIG TXOP Protection */ +#define WMI_HOST_HT_CAP_MPDU_DENSITY 0x0700 /* MPDU Density */ +#define WMI_HOST_HT_CAP_MPDU_DENSITY_MASK_SHIFT 8 +#define WMI_HOST_HT_CAP_HT40_SGI 0x0800 +#define WMI_HOST_HT_CAP_RX_LDPC 0x1000 +#define WMI_HOST_HT_CAP_TX_LDPC 0x2000 +#define WMI_HOST_HT_CAP_IBF_BFER 0x4000 + +/* These macros should be used when we wish to advertise STBC support for + * only 1SS or 2SS or 3SS. */ +#define WMI_HOST_HT_CAP_RX_STBC_1SS 0x0010 /* B4-B5 RX STBC */ +#define WMI_HOST_HT_CAP_RX_STBC_2SS 0x0020 /* B4-B5 RX STBC */ +#define WMI_HOST_HT_CAP_RX_STBC_3SS 0x0030 /* B4-B5 RX STBC */ + + +#define WMI_HOST_HT_CAP_DEFAULT_ALL (WMI_HOST_HT_CAP_ENABLED | \ + WMI_HOST_HT_CAP_HT20_SGI | \ + WMI_HOST_HT_CAP_HT40_SGI | \ + WMI_HOST_HT_CAP_TX_STBC | \ + WMI_HOST_HT_CAP_RX_STBC | \ + WMI_HOST_HT_CAP_LDPC) + +/* WMI_HOST_VHT_CAP_* these maps to ieee 802.11ac vht capability information + field. The fields not defined here are not supported, or reserved. + Do not change these masks and if you have to add new one follow the + bitmask as specified by 802.11ac draft. +*/ + +#define WMI_HOST_VHT_CAP_MAX_MPDU_LEN_MASK 0x00000003 +#define WMI_HOST_VHT_CAP_RX_LDPC 0x00000010 +#define WMI_HOST_VHT_CAP_SGI_80MHZ 0x00000020 +#define WMI_HOST_VHT_CAP_SGI_160MHZ 0x00000040 +#define WMI_HOST_VHT_CAP_TX_STBC 0x00000080 +#define WMI_HOST_VHT_CAP_RX_STBC_MASK 0x00000300 +#define WMI_HOST_VHT_CAP_RX_STBC_MASK_SHIFT 8 +#define WMI_HOST_VHT_CAP_SU_BFER 0x00000800 +#define WMI_HOST_VHT_CAP_SU_BFEE 0x00001000 +#define WMI_HOST_VHT_CAP_MAX_CS_ANT_MASK 0x0000E000 +#define WMI_HOST_VHT_CAP_MAX_CS_ANT_MASK_SHIFT 13 +#define WMI_HOST_VHT_CAP_MAX_SND_DIM_MASK 0x00070000 +#define WMI_HOST_VHT_CAP_MAX_SND_DIM_MASK_SHIFT 16 +#define WMI_HOST_VHT_CAP_MU_BFER 0x00080000 +#define WMI_HOST_VHT_CAP_MU_BFEE 0x00100000 +#define WMI_HOST_VHT_CAP_MAX_AMPDU_LEN_EXP 0x03800000 +#define WMI_HOST_VHT_CAP_MAX_AMPDU_LEN_EXP_SHIT 23 +#define WMI_HOST_VHT_CAP_RX_FIXED_ANT 0x10000000 +#define WMI_HOST_VHT_CAP_TX_FIXED_ANT 0x20000000 + +#define WMI_HOST_VHT_CAP_MAX_MPDU_LEN_11454 0x00000002 + +/* These macros should be used when we wish to advertise STBC support for + * only 1SS or 2SS or 3SS. */ +#define WMI_HOST_VHT_CAP_RX_STBC_1SS 0x00000100 +#define WMI_HOST_VHT_CAP_RX_STBC_2SS 0x00000200 +#define WMI_HOST_VHT_CAP_RX_STBC_3SS 0x00000300 + +#define WMI_HOST_VHT_CAP_DEFAULT_ALL (WMI_HOST_VHT_CAP_MAX_MPDU_LEN_11454 | \ + WMI_HOST_VHT_CAP_SGI_80MHZ | \ + WMI_HOST_VHT_CAP_TX_STBC | \ + WMI_HOST_VHT_CAP_RX_STBC_MASK | \ + WMI_HOST_VHT_CAP_RX_LDPC | \ + WMI_HOST_VHT_CAP_MAX_AMPDU_LEN_EXP | \ + WMI_HOST_VHT_CAP_RX_FIXED_ANT | \ + WMI_HOST_VHT_CAP_TX_FIXED_ANT) + +/* Interested readers refer to Rx/Tx MCS Map definition as defined in + 802.11ac +*/ +#define WMI_HOST_VHT_MAX_MCS_4_SS_MASK(r, ss) ((3 & (r)) << (((ss) - 1) << 1)) +#define WMI_HOST_VHT_MAX_SUPP_RATE_MASK 0x1fff0000 +#define WMI_HOST_VHT_MAX_SUPP_RATE_MASK_SHIFT 16 + +/** U-APSD configuration of peer station from (re)assoc request and TSPECs */ +enum wmi_host_ap_ps_param_uapsd { + WMI_HOST_AP_PS_UAPSD_AC0_DELIVERY_EN = (1 << 0), + WMI_HOST_AP_PS_UAPSD_AC0_TRIGGER_EN = (1 << 1), + WMI_HOST_AP_PS_UAPSD_AC1_DELIVERY_EN = (1 << 2), + WMI_HOST_AP_PS_UAPSD_AC1_TRIGGER_EN = (1 << 3), + WMI_HOST_AP_PS_UAPSD_AC2_DELIVERY_EN = (1 << 4), + WMI_HOST_AP_PS_UAPSD_AC2_TRIGGER_EN = (1 << 5), + WMI_HOST_AP_PS_UAPSD_AC3_DELIVERY_EN = (1 << 6), + WMI_HOST_AP_PS_UAPSD_AC3_TRIGGER_EN = (1 << 7), +}; +/** U-APSD maximum service period of peer station */ +enum wmi_host_ap_ps_peer_param_max_sp { + WMI_HOST_AP_PS_PEER_PARAM_MAX_SP_UNLIMITED = 0, + WMI_HOST_AP_PS_PEER_PARAM_MAX_SP_2 = 1, + WMI_HOST_AP_PS_PEER_PARAM_MAX_SP_4 = 2, + WMI_HOST_AP_PS_PEER_PARAM_MAX_SP_6 = 3, + + /* keep last! */ + MAX_HOST_WMI_AP_PS_PEER_PARAM_MAX_SP, +}; + +#define WMI_HOST_UAPSD_AC_TYPE_DELI 0 +#define WMI_HOST_UAPSD_AC_TYPE_TRIG 1 + +#define WMI_HOST_UAPSD_AC_BIT_MASK(ac, type) \ + ((type == WMI_HOST_UAPSD_AC_TYPE_DELI) ? (1<<(ac<<1)) :\ + (1<<((ac<<1)+1))) + +enum wmi_host_ap_ps_peer_param_wnm_sleep { + WMI_HOST_AP_PS_PEER_PARAM_WNM_SLEEP_ENABLE, + WMI_HOST_AP_PS_PEER_PARAM_WNM_SLEEP_DISABLE, +}; + +enum wmi_host_ap_ps_peer_param { + /** Set uapsd configuration for a given peer. + * + * This will include the delivery and trigger enabled state for every AC. + * The host MLME needs to set this based on AP capability and stations + * request Set in the association request received from the station. + * + * Lower 8 bits of the value specify the UAPSD configuration. + * + * (see enum wmi_ap_ps_param_uapsd) + * The default value is 0. + */ + WMI_HOST_AP_PS_PEER_PARAM_UAPSD = 0, + + /** + * Set the service period for a UAPSD capable station + * + * The service period from wme ie in the (re)assoc request frame. + * + * (see enum wmi_ap_ps_peer_param_max_sp) + */ + WMI_HOST_AP_PS_PEER_PARAM_MAX_SP = 1, + + /** Time in seconds for aging out buffered frames + * for STA in power save */ + WMI_HOST_AP_PS_PEER_PARAM_AGEOUT_TIME = 2, + + /** Specify frame types that are considered SIFS + * RESP trigger frame */ + WMI_HOST_AP_PS_PEER_PARAM_SIFS_RESP_FRMTYPE = 3, + + /** Specifies the trigger state of TID. + * Valid only for UAPSD frame type */ + WMI_HOST_AP_PS_PEER_PARAM_SIFS_RESP_UAPSD = 4, + + /** Specifies the WNM sleep state of a STA */ + WMI_HOST_AP_PS_PEER_PARAM_WNM_SLEEP = 5, +}; +#define WMI_HOST_RXERR_CRC 0x01 /* CRC error on frame */ +#define WMI_HOST_RXERR_DECRYPT 0x08 /* non-Michael decrypt error */ +#define WMI_HOST_RXERR_MIC 0x10 /* Michael MIC decrypt error */ +#define WMI_HOST_RXERR_KEY_CACHE_MISS 0x20 /* No/incorrect key matter in h/w */ + +enum wmi_host_sta_ps_param_uapsd { + WMI_HOST_STA_PS_UAPSD_AC0_DELIVERY_EN = (1 << 0), + WMI_HOST_STA_PS_UAPSD_AC0_TRIGGER_EN = (1 << 1), + WMI_HOST_STA_PS_UAPSD_AC1_DELIVERY_EN = (1 << 2), + WMI_HOST_STA_PS_UAPSD_AC1_TRIGGER_EN = (1 << 3), + WMI_HOST_STA_PS_UAPSD_AC2_DELIVERY_EN = (1 << 4), + WMI_HOST_STA_PS_UAPSD_AC2_TRIGGER_EN = (1 << 5), + WMI_HOST_STA_PS_UAPSD_AC3_DELIVERY_EN = (1 << 6), + WMI_HOST_STA_PS_UAPSD_AC3_TRIGGER_EN = (1 << 7), +}; + +enum wmi_host_sta_ps_param_rx_wake_policy { + /* Wake up when ever there is an RX activity on the VDEV. In this mode + * the Power save SM(state machine) will come out of sleep by either + * sending null frame (or) a data frame (with PS==0) in response to TIM + * bit set in the received beacon frame from AP. + */ + WMI_HOST_STA_PS_RX_WAKE_POLICY_WAKE = 0, + + /* Here the power save state machine will not wakeup in response to TIM + * bit, instead it will send a PSPOLL (or) UASPD trigger based on UAPSD + * configuration setup by WMISET_PS_SET_UAPSD WMI command. When all + * access categories are delivery-enabled, the station will send a UAPSD + * trigger frame, otherwise it will send a PS-Poll. + */ + WMI_HOST_STA_PS_RX_WAKE_POLICY_POLL_UAPSD = 1, +}; +enum wmi_host_sta_ps_param_pspoll_count { + WMI_HOST_STA_PS_PSPOLL_COUNT_NO_MAX = 0, + /* Values greater than 0 indicate the maximum numer of PS-Poll frames FW + * will send before waking up. + */ +}; +/** Number of tx frames/beacon that cause the power save SM to wake up. + * + * Value 1 causes the SM to wake up for every TX. Value 0 has a special + * meaning, It will cause the SM to never wake up. This is useful if you want + * to keep the system to sleep all the time for some kind of test mode . host + * can change this parameter any time. It will affect at the next tx frame. + */ +enum wmi_host_sta_ps_param_tx_wake_threshold { + WMI_HOST_STA_PS_TX_WAKE_THRESHOLD_NEVER = 0, + WMI_HOST_STA_PS_TX_WAKE_THRESHOLD_ALWAYS = 1, + + /* Values greater than one indicate that many TX attempts per beacon + * interval before the STA will wake up + */ +}; +/* + * Transmit power scale factor. + * + */ +typedef enum { + WMI_HOST_TP_SCALE_MAX = 0, /* no scaling (default) */ + WMI_HOST_TP_SCALE_50 = 1, /* 50% of max (-3 dBm) */ + WMI_HOST_TP_SCALE_25 = 2, /* 25% of max (-6 dBm) */ + WMI_HOST_TP_SCALE_12 = 3, /* 12% of max (-9 dBm) */ + WMI_HOST_TP_SCALE_MIN = 4, /* min, but still on */ + WMI_HOST_TP_SCALE_SIZE = 5, /* max num of enum */ +} WMI_HOST_TP_SCALE; +enum { + WMI_HOST_RATEPWR_TABLE_OPS_SET, + WMI_HOST_RATEPWR_TABLE_OPS_GET, +}; +/* reserved up through 0xF */ +/** + * struct wmi_host_dcs_mib_stats - WLAN IM stats from target to host + * Below statistics are sent from target to host periodically. + * These are collected at target as long as target is running + * and target chip is not in sleep. + * @listen_time: + * @reg_tx_frame_cnt: + * @reg_rx_frame_cnt: + * @reg_rxclr_cnt: + * @reg_cycle_cnt: delta cycle count + * @reg_rxclr_ext_cnt: + * @reg_ofdm_phyerr_cnt: + * @reg_cck_phyerr_cnt: CCK err count since last reset, read from register + */ +typedef struct _hp_dcs_mib_stats { + int32_t listen_time; + uint32_t reg_tx_frame_cnt; + uint32_t reg_rx_frame_cnt; + uint32_t reg_rxclr_cnt; + uint32_t reg_cycle_cnt; + uint32_t reg_rxclr_ext_cnt; + uint32_t reg_ofdm_phyerr_cnt; + uint32_t reg_cck_phyerr_cnt; +} wmi_host_dcs_mib_stats_t; + +/** + * struct wmi_host_dcs_im_tgt_stats - DCS IM target stats + * @reg_tsf32: current running TSF from the TSF-1 + * @last_ack_rssi: Known last frame rssi, in case of multiple stations, if + * and at different ranges, this would not gaurantee that + * this is the least rssi. + * @tx_waste_time: Sum of all the failed durations in the last + * one second interval. + * @rx_time: count how many times the hal_rxerr_phy is marked, in this + * time period + * @phyerr_cnt: + * @mib_stats: wmi_host_dcs_mib_stats_t - collected mib stats as explained + * in mib structure + * @chan_nf: Channel noise floor (Units are in dBm) + * @my_bss_rx_cycle_count: BSS rx cycle count + */ +typedef struct _wmi_host_dcs_im_tgt_stats { + uint32_t reg_tsf32; + uint32_t last_ack_rssi; + uint32_t tx_waste_time; + uint32_t rx_time; + uint32_t phyerr_cnt; + wmi_host_dcs_mib_stats_t mib_stats; + uint32_t chan_nf; + uint32_t my_bss_rx_cycle_count; +} wmi_host_dcs_im_tgt_stats_t; + +/** + * Enum for pktlog req + */ +typedef enum { + WMI_HOST_PKTLOG_EVENT_RX = 0x1, + WMI_HOST_PKTLOG_EVENT_TX = 0x2, + WMI_HOST_PKTLOG_EVENT_RCF = 0x4, /* Rate Control Find */ + WMI_HOST_PKTLOG_EVENT_RCU = 0x8, /* Rate Control Update */ + WMI_HOST_PKTLOG_EVENT_DBG_PRINT = 0x10, /* DEBUG prints */ + /* To support Smart Antenna */ + WMI_HOST_PKTLOG_EVENT_SMART_ANTENNA = 0x20, + WMI_HOST_PKTLOG_EVENT_H_INFO = 0x40, + WMI_HOST_PKTLOG_EVENT_STEERING = 0x80, + /* To support Tx data Capture */ + WMI_HOST_PKTLOG_EVENT_TX_DATA_CAPTURE = 0x100, +} WMI_HOST_PKTLOG_EVENT; + +/** + * wmi_host_phyerr + * + */ +#define WMI_HOST_PHY_ERROR_SPECTRAL_SCAN 0x26 +#define WMI_HOST_PHY_ERROR_FALSE_RADAR_EXT 0x24 + +#define WMI_HOST_AR900B_DFS_PHYERR_MASK 0x4 +#define WMI_HOST_AR900B_SPECTRAL_PHYERR_MASK 0x4000000 + +/** + * struct wmi_host_perchain_rssi_info - per chain RSSI info + * @rssi_pri20: RSSI on primary 20 + * @rssi_sec20: RSSI on secomdary 20 + * @rssi_sec40: RSSI secondary 40 + * @rssi_sec80: RSSI secondary 80 + */ +typedef struct wmi_host_perchain_rssi_info { + int8_t rssi_pri20; + int8_t rssi_sec20; + int8_t rssi_sec40; + int8_t rssi_sec80; +} wmi_host_perchain_rssi_info_t; + +/** + * struct _wmi_host_rf_info - RF measurement information + * @rssi_comb: RSSI Information + * @pc_rssi_info[4]: For now, we know we are getting information + * for only 4 chains at max. For future extensions + * use a define + * @noise_floor: Noise floor information + */ +typedef struct _wmi_host_rf_info { + int8_t rssi_comb; + wmi_host_perchain_rssi_info_t pc_rssi_info[4]; + int16_t noise_floor[4]; +} wmi_host_rf_info_t; + +/** + * struct _wmi_host_chan_info + * @center_freq1: center frequency 1 in MHz + * @center_freq2: center frequency 2 in MHz -valid only for + * 11ACVHT 80PLUS80 mode + * @chan_width: channel width in MHz + */ +typedef struct _wmi_host_chan_info { + u_int16_t center_freq1; + u_int16_t center_freq2; + u_int8_t chan_width; +} wmi_host_chan_info_t; + +/** + * struct wmi_host_phyerr + * @rf_info: + * @chan_info: + * @tsf64: + * @phy_err_code: + * @tsf_timestamp: + * @bufp: + * @buf_len: + * @phy_err_mask0: + * @phy_err_mask1: + * @pdev_id: pdev_id + */ +typedef struct _wmi_host_phyerr { + wmi_host_rf_info_t rf_info; + wmi_host_chan_info_t chan_info; + uint64_t tsf64; + int32_t phy_err_code; + uint32_t tsf_timestamp; + uint8_t *bufp; + uint32_t buf_len; + uint32_t phy_err_mask0; + uint32_t phy_err_mask1; + uint32_t pdev_id; +} wmi_host_phyerr_t; + +/** + * struct wmi_host_rtt_event_hdr + * @req_id: request id + * @status: status + * @meas_done: measurement done flag + * @meas_type: measurement type + * @report_type: report type + * @v3_status: v2 status + * @v3_finish: + * @v3_tm_start: + * @num_ap: number of AP + * @result: resuult + * @dest_mac: destination mac + */ +typedef struct { + uint16_t req_id; + uint16_t status:1, + meas_done:1, + meas_type:3, + report_type:3, + v3_status:2, + v3_finish:1, + v3_tm_start:1, + num_ap:4; + uint16_t result; + uint8_t dest_mac[IEEE80211_ADDR_LEN]; +} wmi_host_rtt_event_hdr; + +/** + * struct wmi_host_rtt_meas_event - RTT measurement event + * @chain_mask: + * @bw: + * @rsvd: + * @txrxchain_mask: Bit:0-3:chain mask + * Bit 4-5: band width info + * 00 --Legacy 20, 01 --HT/VHT20 + * 10 --HT/VHT40, 11 -- VHT80 + * @tod: resolution of 0.1ns + * @toa: resolution of 0.1ns + * @t3: + * @t4: + * @rssi0: + * @rssi1: + * @rssi2: + * @rssi3: + */ +typedef struct { + uint32_t chain_mask:3, + bw:2, + rsvd:27; + uint32_t txrxchain_mask; + uint64_t tod; + uint64_t toa; + uint64_t t3; + uint64_t t4; + uint32_t rssi0; + uint32_t rssi1; + uint32_t rssi2; + uint32_t rssi3; +} wmi_host_rtt_meas_event; + +/*----RTT Report event definition ----*/ +typedef enum { + /* rtt cmd header parsing error --terminate */ + WMI_HOST_RTT_COMMAND_HEADER_ERROR = 0, + /* rtt body parsing error -- skip current STA REQ */ + WMI_HOST_RTT_COMMAND_ERROR, + /* rtt no resource -- terminate */ + WMI_HOST_RTT_MODULE_BUSY, + /* STA exceed the support limit -- only server the first n STA */ + WMI_HOST_RTT_TOO_MANY_STA, + /* any allocate failure */ + WMI_HOST_RTT_NO_RESOURCE, + /* can not find vdev with vdev ID - skip current STA REQ */ + WMI_HOST_RTT_VDEV_ERROR, + /* Tx failure -- continiue and measure number */ + WMI_HOST_RTT_TRANSIMISSION_ERROR, + /* wait for first TM timer expire-terminate current STA measurement */ + WMI_HOST_RTT_TM_TIMER_EXPIRE, + /* we do not support RTT measurement with this type of frame */ + WMI_HOST_RTT_FRAME_TYPE_NOSUPPORT, + /* whole RTT measurement timer expire-terminate + ** current STA measurement */ + WMI_HOST_RTT_TIMER_EXPIRE, + /* channel swicth failed */ + WMI_HOST_RTT_CHAN_SWITCH_ERROR, + /* TMR trans error, this dest peer will be skipped */ + WMI_HOST_RTT_TMR_TRANS_ERROR, + /* V3 only. If both CFR and Token mismatch, do not report */ + WMI_HOST_RTT_NO_REPORT_BAD_CFR_TOKEN, + /* For First TM, if CFR is bad, then do not report */ + WMI_HOST_RTT_NO_REPORT_FIRST_TM_BAD_CFR, + /* do not allow report type2 mix with type 0, 1 */ + WMI_HOST_RTT_REPORT_TYPE2_MIX, + /* LCI Configuration OK. - Responder only */ + WMI_HOST_RTT_LCI_CFG_OK, + /* LCR configuration OK. - Responder only */ + WMI_HOST_RTT_LCR_CFG_OK, + /* Bad configuration LCI (or) LCR request - Responder only */ + WMI_HOST_RTT_CFG_ERROR, + WMI_HOST_WMI_RTT_REJECT_MAX, +} WMI_HOST_RTT_ERROR_INDICATOR; +typedef struct { + wmi_host_rtt_event_hdr hdr; + WMI_HOST_RTT_ERROR_INDICATOR reject_reason; +} wmi_host_rtt_error_report_event; + +#if defined(AR9888) +typedef enum { + WMI_HOST_PROF_CPU_IDLE, + WMI_HOST_PROF_PPDU_PROC, + WMI_HOST_PROF_PPDU_POST, + WMI_HOST_PROF_HTT_TX_INPUT, + WMI_HOST_PROF_MSDU_ENQ, + WMI_HOST_PROF_PPDU_POST_HAL, + WMI_HOST_PROF_COMPUTE_TX_TIME, + + /* Add new ID's above this. */ + WMI_HOST_PROF_MAX_ID, +} wmi_host_profile_id_t; +#endif + +#define WMI_HOST_WLAN_PROFILE_MAX_HIST 3 +#define WMI_HOST_WLAN_PROFILE_MAX_BIN_CNT 32 + +#if defined(AR9888) +#define WMI_HOST_MAX_PROFILE WMI_HOST_PROF_MAX_ID +#else +#define WMI_HOST_MAX_PROFILE WMI_HOST_WLAN_PROFILE_MAX_BIN_CNT +#endif + +/** + * struct wmi_host_wlan_profile - Host profile param + * @id: profile id + * @cnt: Count + * @tot: + * @min: minimum + * @max: Mac + * @hist_intvl: history interval + * @hist: profile data history + */ +typedef struct { + uint32_t id; + uint32_t cnt; + uint32_t tot; + uint32_t min; + uint32_t max; + uint32_t hist_intvl; + uint32_t hist[WMI_HOST_WLAN_PROFILE_MAX_HIST]; +} wmi_host_wlan_profile_t; + +/** + * struct wmi_host_wlan_profile_ctx_t - profile context + * @tot: time in us + * @tx_msdu_cnt: MSDU TX count + * @tx_mpdu_cnt: MPDU tx count + * @tx_ppdu_cnt: PPDU tx count + * @rx_msdu_cnt: MSDU RX count + * @rx_mpdu_cnt: MPDU RXcount + * @bin_count: Bin count + */ +typedef struct { + uint32_t tot; + uint32_t tx_msdu_cnt; + uint32_t tx_mpdu_cnt; + uint32_t tx_ppdu_cnt; + uint32_t rx_msdu_cnt; + uint32_t rx_mpdu_cnt; + uint32_t bin_count; +} wmi_host_wlan_profile_ctx_t; + +/** + * struct wmi_host_chan_info_event - Channel info WMI event + * @pdev_id: pdev_id + * @err_code: Error code + * @freq: Channel freq + * @cmd_flags: Read flags + * @noise_floor: Noise Floor value + * @rx_clear_count: rx clear count + * @cycle_count: cycle count + * @chan_tx_pwr_range: channel tx power per range + * @chan_tx_pwr_tp: channel tx power per throughput + * @rx_frame_count: rx frame count + * @rx_11b_mode_data_duration: 11b mode data duration + * @my_bss_rx_cycle_count: BSS rx cycle count + * @tx_frame_cnt: tx frame count + * @mac_clk_mhz: mac clock + * @vdev_id: unique id identifying the VDEV + */ +typedef struct { + uint32_t pdev_id; + uint32_t err_code; + uint32_t freq; + uint32_t cmd_flags; + uint32_t noise_floor; + uint32_t rx_clear_count; + uint32_t cycle_count; + uint32_t chan_tx_pwr_range; + uint32_t chan_tx_pwr_tp; + uint32_t rx_frame_count; + uint32_t rx_11b_mode_data_duration; + uint32_t my_bss_rx_cycle_count; + uint32_t tx_frame_cnt; + uint32_t mac_clk_mhz; + uint32_t vdev_id; +} wmi_host_chan_info_event; + +/** + * struct wmi_host_pdev_channel_hopping_event + * @pdev_id: pdev_id + * @noise_floor_report_iter: Noise threshold iterations with high values + * @noise_floor_total_iter: Total noise threshold iterations + */ +typedef struct { + uint32_t pdev_id; + uint32_t noise_floor_report_iter; + uint32_t noise_floor_total_iter; +} wmi_host_pdev_channel_hopping_event; + +/** + * struct wmi_host_pdev_bss_chan_info_event + * @pdev_id: pdev_id + * @freq: Units in MHz + * @noise_floor: units are dBm + * @rx_clear_count_low: + * @rx_clear_count_high: + * @cycle_count_low: + * @cycle_count_high: + * @tx_cycle_count_low: + * @tx_cycle_count_high: + * @rx_cycle_count_low: + * @rx_cycle_count_high: + * @rx_bss_cycle_count_low: + * @rx_bss_cycle_count_high: + * @reserved: + */ +typedef struct { + uint32_t pdev_id; + uint32_t freq; + uint32_t noise_floor; + uint32_t rx_clear_count_low; + uint32_t rx_clear_count_high; + uint32_t cycle_count_low; + uint32_t cycle_count_high; + uint32_t tx_cycle_count_low; + uint32_t tx_cycle_count_high; + uint32_t rx_cycle_count_low; + uint32_t rx_cycle_count_high; + uint32_t rx_bss_cycle_count_low; + uint32_t rx_bss_cycle_count_high; + uint32_t reserved; +} wmi_host_pdev_bss_chan_info_event; + +#define WMI_HOST_INST_STATS_INVALID_RSSI 0 +/** + * struct wmi_host_inst_stats_resp + * @iRSSI: Instantaneous RSSI + * @peer_macaddr: peer mac address + * @pdev_id: pdev_id + */ +typedef struct { + uint32_t iRSSI; + wmi_host_mac_addr peer_macaddr; + uint32_t pdev_id; +} wmi_host_inst_stats_resp; + +/* Event definition and new structure addition to send event + * to host to block/unblock tx data traffic based on peer_ast_idx or vdev id + */ +#define WMI_HOST_INVALID_PEER_AST_INDEX 0xffff +#define WMI_HOST_TX_DATA_TRAFFIC_CTRL_BLOCK 0x1 +#define WMI_HOST_TX_DATA_TRAFFIC_CTRL_UNBLOCK 0x2 +/** + * struct wmi_host_tx_data_traffic_ctrl_event + * @peer_ast_idx: For vdev based control, peer_ast_idx will be + * WMI_INVALID_PEER_AST_INDEX + * @vdev_id: only applies if peer_ast_idx == INVALID + * @ctrl_cmd: WMI_TX_DATA_TRAFFIC_CTRL_BLOCK or + * WMI_TX_DATA_TRAFFIC_CTRL_UNBLOCK + */ +typedef struct { + uint32_t peer_ast_idx; + uint32_t vdev_id; + uint32_t ctrl_cmd; +} wmi_host_tx_data_traffic_ctrl_event; + +enum { + WMI_HOST_ATF_PEER_STATS_DISABLED = 0, + WMI_HOST_ATF_PEER_STATS_ENABLED = 1, +}; + +#define WMI_HOST_ATF_PEER_STATS_GET_PEER_AST_IDX(token_info) \ + (token_info.field1 & 0xffff) + +#define WMI_HOST_ATF_PEER_STATS_GET_USED_TOKENS(token_info) \ + ((token_info.field2 & 0xffff0000) >> 16) + +#define WMI_HOST_ATF_PEER_STATS_GET_UNUSED_TOKENS(token_info) \ + (token_info.field2 & 0xffff) + +#define WMI_HOST_ATF_PEER_STATS_SET_PEER_AST_IDX(token_info, peer_ast_idx) \ + do { \ + token_info.field1 &= 0xffff0000; \ + token_info.field1 |= ((peer_ast_idx) & 0xffff); \ + } while (0) + +#define WMI_HOST_ATF_PEER_STATS_SET_USED_TOKENS(token_info, used_token) \ + do { \ + token_info.field2 &= 0x0000ffff; \ + token_info.field2 |= (((used_token) & 0xffff) << 16); \ + } while (0) + +#define WMI_HOST_ATF_PEER_STATS_SET_UNUSED_TOKENS(token_info, unused_token) \ + do { \ + token_info.field2 &= 0xffff0000; \ + token_info.field2 |= ((unused_token) & 0xffff); \ + } while (0) + +/** + * struct wmi_host_atf_peer_stats_info + * @field1: bits 15:0 peer_ast_index WMI_ATF_PEER_STATS_GET_PEER_AST_IDX + * bits 31:16 reserved + * @field2: bits 15:0 used tokens WMI_ATF_PEER_STATS_GET_USED_TOKENS + * bits 31:16 unused tokens WMI_ATF_PEER_STATS_GET_UNUSED_TOKENS + * @field3: for future use + */ +typedef struct { + uint32_t field1; + uint32_t field2; + uint32_t field3; +} wmi_host_atf_peer_stats_info; + +/** + * struct wmi_host_atf_peer_stats_event + * @pdev_id: pdev_id + * @num_atf_peers: number of peers in token_info_list + * @comp_usable_airtime: computed usable airtime in tokens + * @reserved[4]: reserved for future use + * @wmi_host_atf_peer_stats_info token_info_list: list of num_atf_peers + */ +typedef struct { + uint32_t pdev_id; + uint32_t num_atf_peers; + uint32_t comp_usable_airtime; + uint32_t reserved[4]; + wmi_host_atf_peer_stats_info token_info_list[1]; +} wmi_host_atf_peer_stats_event; + +/** + * struct wmi_host_ath_dcs_cw_int + * @channel: either number or freq in mhz + */ +typedef struct { + uint32_t channel; +} wmi_host_ath_dcs_cw_int; + +#define WMI_MAX_POWER_DBG_ARGS 8 + +/** + * struct wmi_power_dbg_params - power debug command parameter + * @pdev_id: subsystem identifier + * @module_id: parameter id + * @num_arg: no of arguments + * @args: arguments + */ +struct wmi_power_dbg_params { + uint32_t pdev_id; + uint32_t module_id; + uint32_t num_args; + uint32_t args[WMI_MAX_POWER_DBG_ARGS]; +}; + +/** + * struct wmi_adaptive_dwelltime_params - the adaptive dwelltime params + * @vdev_id: vdev id + * @is_enabled: Adaptive dwell time is enabled/disabled + * @dwelltime_mode: global default adaptive dwell mode + * @lpf_weight: weight to calculate the average low pass + * filter for channel congestion + * @passive_mon_intval: intval to monitor wifi activity in passive scan in msec + * @wifi_act_threshold: % of wifi activity used in passive scan 0-100 + * + */ +struct wmi_adaptive_dwelltime_params { + uint32_t vdev_id; + bool is_enabled; + enum scan_dwelltime_adaptive_mode dwelltime_mode; + uint8_t lpf_weight; + uint8_t passive_mon_intval; + uint8_t wifi_act_threshold; +}; + +/** + * struct wmi_per_roam_config - per based roaming parameters + * @enable: if PER based roaming is enabled/disabled + * @tx_high_rate_thresh: high rate threshold at which PER based + * roam will stop in tx path + * @rx_high_rate_thresh: high rate threshold at which PER based + * roam will stop in rx path + * @tx_low_rate_thresh: rate below which traffic will be considered + * for PER based roaming in Tx path + * @rx_low_rate_thresh: rate below which traffic will be considered + * for PER based roaming in Tx path + * @tx_rate_thresh_percnt: % above which when traffic is below low_rate_thresh + * will be considered for PER based scan in tx path + * @rx_rate_thresh_percnt: % above which when traffic is below low_rate_thresh + * will be considered for PER based scan in rx path + * @per_rest_time: time for which PER based roam will wait once it + * issues a roam scan. + * @tx_per_mon_time: Minimum time required to be considered as valid scenario + * for PER based roam in tx path + * @rx_per_mon_time: Minimum time required to be considered as valid scenario + * for PER based roam in rx path + * @min_candidate_rssi: Minimum RSSI threshold for candidate AP to be used for + * PER based roaming + */ +struct wmi_per_roam_config { + uint32_t enable; + uint32_t tx_high_rate_thresh; + uint32_t rx_high_rate_thresh; + uint32_t tx_low_rate_thresh; + uint32_t rx_low_rate_thresh; + uint32_t tx_rate_thresh_percnt; + uint32_t rx_rate_thresh_percnt; + uint32_t per_rest_time; + uint32_t tx_per_mon_time; + uint32_t rx_per_mon_time; + uint32_t min_candidate_rssi; +}; + +/** + * struct wmi_per_roam_config_req: PER based roaming config request + * @vdev_id: vdev id on which config needs to be set + * @per_config: PER config + */ +struct wmi_per_roam_config_req { + uint8_t vdev_id; + struct wmi_per_roam_config per_config; +}; + +/** + * struct wmi_fw_dump_seg_req - individual segment details + * @seg_id - segment id. + * @seg_start_addr_lo - lower address of the segment. + * @seg_start_addr_hi - higher address of the segment. + * @seg_length - length of the segment. + * @dst_addr_lo - lower address of the destination buffer. + * @dst_addr_hi - higher address of the destination buffer. + * + * This structure carries the information to firmware about the + * individual segments. This structure is part of firmware memory + * dump request. + */ +struct wmi_fw_dump_seg_req { + uint8_t seg_id; + uint32_t seg_start_addr_lo; + uint32_t seg_start_addr_hi; + uint32_t seg_length; + uint32_t dst_addr_lo; + uint32_t dst_addr_hi; +}; + +/** + * enum wmi_userspace_log_level - Log level at userspace + * @WMI_LOG_LEVEL_NO_COLLECTION: verbose_level 0 corresponds to no collection + * @WMI_LOG_LEVEL_NORMAL_COLLECT: verbose_level 1 correspond to normal log + * level with minimal user impact. This is the default value. + * @WMI_LOG_LEVEL_ISSUE_REPRO: verbose_level 2 are enabled when user is lazily + * trying to reproduce a problem, wifi performances and power can be impacted + * but device should not otherwise be significantly impacted + * @WMI_LOG_LEVEL_ACTIVE: verbose_level 3+ are used when trying to + * actively debug a problem + * + * Various log levels defined in the userspace for logging applications + */ +enum wmi_userspace_log_level { + WMI_LOG_LEVEL_NO_COLLECTION, + WMI_LOG_LEVEL_NORMAL_COLLECT, + WMI_LOG_LEVEL_ISSUE_REPRO, + WMI_LOG_LEVEL_ACTIVE, +}; + +/** + * HW mode config type replicated from FW header + * @WMI_HOST_HW_MODE_SINGLE: Only one PHY is active. + * @WMI_HOST_HW_MODE_DBS: Both PHYs are active in different bands, + * one in 2G and another in 5G. + * @WMI_HOST_HW_MODE_SBS_PASSIVE: Both PHYs are in passive mode (only rx) in + * same band; no tx allowed. + * @WMI_HOST_HW_MODE_SBS: Both PHYs are active in the same band. + * Support for both PHYs within one band is planned + * for 5G only(as indicated in WMI_MAC_PHY_CAPABILITIES), + * but could be extended to other bands in the future. + * The separation of the band between the two PHYs needs + * to be communicated separately. + * @WMI_HOST_HW_MODE_DBS_SBS: 3 PHYs, with 2 on the same band doing SBS + * as in WMI_HW_MODE_SBS, and 3rd on the other band + * @WMI_HOST_HW_MODE_DBS_OR_SBS: Two PHY with one PHY capabale of both 2G and + * 5G. It can support SBS (5G + 5G) OR DBS (5G + 2G). + * @WMI_HOST_HW_MODE_MAX: Max hw_mode_id. Used to indicate invalid mode. + */ +enum wmi_host_hw_mode_config_type { + WMI_HOST_HW_MODE_SINGLE = 0, + WMI_HOST_HW_MODE_DBS = 1, + WMI_HOST_HW_MODE_SBS_PASSIVE = 2, + WMI_HOST_HW_MODE_SBS = 3, + WMI_HOST_HW_MODE_DBS_SBS = 4, + WMI_HOST_HW_MODE_DBS_OR_SBS = 5, + WMI_HOST_HW_MODE_MAX, +}; + +/* + * struct wmi_host_peer_txmu_cnt_event + * @tx_mu_transmitted - MU-MIMO tx count + */ +typedef struct { + uint32_t tx_mu_transmitted; +} wmi_host_peer_txmu_cnt_event; + +#define MAX_SAR_LIMIT_ROWS_SUPPORTED 64 +/** + * struct sar_limit_cmd_row - sar limits row + * @band_id: Optional param for frequency band + * See %enum wmi_sar_band_id_flags for possible values + * @chain_id: Optional param for antenna chain id + * @mod_id: Optional param for modulation scheme + * See %enum wmi_sar_mod_id_flags for possible values + * @limit_value: Mandatory param providing power limits in steps of 0.5 dbm + * @validity_bitmap: bitmap of valid optional params in sar_limit_cmd_row struct + * See WMI_SAR_*_VALID_MASK for possible values + */ +struct sar_limit_cmd_row { + uint32_t band_id; + uint32_t chain_id; + uint32_t mod_id; + uint32_t limit_value; + uint32_t validity_bitmap; +}; + +/** + * struct sar_limit_cmd_params - sar limits params + * @sar_enable: flag to enable SAR + * See %enum wmi_sar_feature_state_flags for possible values + * @num_limit_rows: number of items in sar_limits + * @commit_limits: indicates firmware to start apply new SAR values + * @sar_limit_row_list: pointer to array of sar limit rows + */ +struct sar_limit_cmd_params { + uint32_t sar_enable; + uint32_t num_limit_rows; + uint32_t commit_limits; + struct sar_limit_cmd_row *sar_limit_row_list; +}; + +/** + * struct sar_limit_event_row - sar limits row + * @band_id: Frequency band. + * See %enum wmi_sar_band_id_flags for possible values + * @chain_id: Chain id + * @mod_id: Modulation scheme + * See %enum wmi_sar_mod_id_flags for possible values + * @limit_value: Power limits in steps of 0.5 dbm that is currently active for + * the given @band_id, @chain_id, and @mod_id + */ +struct sar_limit_event_row { + uint32_t band_id; + uint32_t chain_id; + uint32_t mod_id; + uint32_t limit_value; +}; + +/** + * struct sar_limit_event - sar limits params + * @sar_enable: Current status of SAR enablement. + * See %enum wmi_sar_feature_state_flags for possible values + * @num_limit_rows: number of items in sar_limits + * @sar_limit_row: array of sar limit rows. Only @num_limit_rows + * should be considered valid. + */ +struct sar_limit_event { + uint32_t sar_enable; + uint32_t num_limit_rows; + struct sar_limit_event_row + sar_limit_row[MAX_SAR_LIMIT_ROWS_SUPPORTED]; +}; + +/* + * struct wmi_peer_gid_userpos_list_event + * @usr_list - User list + */ +#define GID_OVERLOAD_GROUP_COUNT 15 +typedef struct { + uint32_t usr_list[GID_OVERLOAD_GROUP_COUNT]; +} wmi_host_peer_gid_userpos_list_event; + +/** + * enum rcpi_measurement_type - for identifying type of rcpi measurement + * @RCPI_MEASUREMENT_TYPE_AVG_MGMT: avg rcpi of mgmt frames + * @RCPI_MEASUREMENT_TYPE_AVG_DATA: avg rcpi of data frames + * @RCPI_MEASUREMENT_TYPE_LAST_MGMT: rcpi of last mgmt frame + * @RCPI_MEASUREMENT_TYPE_LAST_DATA: rcpi of last data frame + * @RCPI_MEASUREMENT_TYPE_INVALID: invalid rcpi measurement type + */ +enum rcpi_measurement_type { + RCPI_MEASUREMENT_TYPE_AVG_MGMT = 0x1, + RCPI_MEASUREMENT_TYPE_AVG_DATA = 0x2, + RCPI_MEASUREMENT_TYPE_LAST_MGMT = 0x3, + RCPI_MEASUREMENT_TYPE_LAST_DATA = 0x4, + RCPI_MEASUREMENT_TYPE_INVALID = 0x5, +}; + +/** + * struct rcpi_req - RCPI req parameter + * @vdev_id: virtual device id + * @measurement_type: type of rcpi from enum wmi_rcpi_measurement_type + * @mac_addr: peer mac addr for which measurement is required + */ +struct rcpi_req { + uint32_t vdev_id; + enum rcpi_measurement_type measurement_type; + uint8_t mac_addr[IEEE80211_ADDR_LEN]; +}; + +/** + * struct rcpi_res - RCPI response parameter + * @vdev_id: virtual device id + * @measurement_type: type of rcpi from enum wmi_rcpi_measurement_type + * @mac_addr: peer mac addr for which measurement is required + * @rcpi_value: value of RCPI computed by firmware + */ +struct rcpi_res { + uint32_t vdev_id; + enum rcpi_measurement_type measurement_type; + uint8_t mac_addr[IEEE80211_ADDR_LEN]; + int32_t rcpi_value; +}; + +#define WMI_HOST_BOARD_MCN_STRING_MAX_SIZE 19 +#define WMI_HOST_BOARD_MCN_STRING_BUF_SIZE \ + (WMI_HOST_BOARD_MCN_STRING_MAX_SIZE+1) /* null-terminator */ + +typedef struct { + uint32_t software_cal_version; + uint32_t board_cal_version; + /* board_mcn_detail: + * Provide a calibration message string for the host to display. + * Note: on a big-endian host, the 4 bytes within each uint32_t portion + * of a WMI message will be automatically byteswapped by the copy engine + * as the messages are transferred between host and target, to convert + * between the target's little-endianness and the host's big-endianness. + * Consequently, a big-endian host should manually unswap the bytes + * within the board_mcn_detail string buffer to get the bytes back into + * the desired natural order. + */ + uint8_t board_mcn_detail[WMI_HOST_BOARD_MCN_STRING_BUF_SIZE]; + uint32_t cal_ok; /* filled with CALIBRATION_STATUS enum value */ +} wmi_host_pdev_check_cal_version_event; + +/** + * enum WMI_HOST_CALIBRATION_STATUS - Host defined Enums for cal status + * @WMI_HOST_NO_FEATURE: The board was calibrated with a meta + * which did not have this feature + * @WMI_HOST_CALIBRATION_OK: The calibration status is OK + * @WMI_HOST_CALIBRATION_NOT_OK: The calibration status is NOT OK + */ +enum WMI_HOST_CALIBRATION_STATUS { + WMI_HOST_NO_FEATURE = 0, + WMI_HOST_CALIBRATION_OK, + WMI_HOST_CALIBRATION_NOT_OK, +}; + +/** + * struct wmi_host_pdev_utf_event - Host defined struct to hold utf event data + * @data: Pointer to data + * @datalen: Data length + * @pdev_id: Pdev_id of data + * + */ +struct wmi_host_pdev_utf_event { + uint8_t *data; + uint16_t datalen; + uint32_t pdev_id; +}; + +/** + * struct wmi_host_utf_seg_header_info - Host defined struct to map seg info in + * UTF event + * @len: segment length + * @msgref: message reference + * @segment_info: segment info + * @pdev_id: pdev_id + * + */ +struct wmi_host_utf_seg_header_info { + uint32_t len; + uint32_t msgref; + uint32_t segment_info; + uint32_t pdev_id; +}; + +/** + * struct wmi_host_pdev_qvit_event - Host defined struct to hold qvit event data + * @data: Pointer to data + * @datalen: Data length + * + */ +struct wmi_host_pdev_qvit_event { + uint8_t *data; + uint16_t datalen; + uint32_t pdev_id; +}; + +/** + * struct wmi_host_peer_delete_response_event - Peer Delete response event param + * @vdev_id: vdev id + * @mac_address: Peer Mac Address + * + */ +struct wmi_host_peer_delete_response_event { + uint32_t vdev_id; + struct qdf_mac_addr mac_address; +}; + +/** + * @struct wmi_host_dcs_interference_param + * @interference_type: Type of DCS Interference + * @uint32_t pdev_id: pdev id + */ +struct wmi_host_dcs_interference_param { + uint32_t interference_type; + uint32_t pdev_id; +}; + +/* + * struct wmi_host_fips_event_param: FIPS event param + * @pdev_id: pdev id + * @error_status: Error status: 0 (no err), 1, or OPER_TIMEOUR + * @data_len: FIPS data length + * @data: pointer to data + */ +struct wmi_host_fips_event_param { + uint32_t pdev_id; + uint32_t error_status; + uint32_t data_len; + uint32_t *data; +}; + +/** + * struct wmi_host_proxy_ast_reserve_param + * @pdev_id: pdev id + * @result: result + */ +struct wmi_host_proxy_ast_reserve_param { + uint32_t pdev_id; + uint32_t result; +}; + +/** + * struct wmi_host_pdev_band_to_mac - freq range for mac + * @pdev_id: PDEV ID to identifiy mac + * @start_freq: start frequency value + * @end_freq: end frequency value + */ +struct wmi_host_pdev_band_to_mac { + uint32_t pdev_id; + uint32_t start_freq; + uint32_t end_freq; +}; +#define WMI_HOST_MAX_PDEV 3 + +/** + * struct wmi_init_cmd_param - INIT command params + * @target_resource_config: pointer to resource config + * @num_mem_chunks: number of memory chunks + * @struct wmi_host_mem_chunk: pointer to memory chunks + * @hw_mode_index: HW mode index chosen + * @num_band_to_mac: Number of band to mac setting + * @struct wmi_host_pdev_band_to_mac: band to mac setting + */ +struct wmi_init_cmd_param { + target_resource_config *res_cfg; + uint8_t num_mem_chunks; + struct wmi_host_mem_chunk *mem_chunks; + uint32_t hw_mode_id; + uint32_t num_band_to_mac; + struct wmi_host_pdev_band_to_mac band_to_mac[WMI_HOST_MAX_PDEV]; +}; + +/** + * struct pdev_csa_switch_count_status - CSA switch count status event param + * @pdev_id: Physical device identifier + * @current_switch_count: Current CSA switch count + * @num_vdevs: Number of vdevs that need restart + * @vdev_ids: Array containing the vdev ids that need restart + */ +struct pdev_csa_switch_count_status { + uint32_t pdev_id; + uint32_t current_switch_count; + uint32_t num_vdevs; + uint32_t *vdev_ids; +}; + +/** + * enum wmi_host_active-apf_mode - FW_ACTIVE_APF_MODE, replicated from FW header + * @WMI_HOST_ACTIVE_APF_DISABLED: APF is disabled for all packets in active mode + * @WMI_HOST_ACTIVE_APF_ENABLED: APF is enabled for all packets in active mode + * @WMI_HOST_ACTIVE_APF_ADAPTIVE: APF is enabled for packets up to some + * threshold in active mode + */ +enum wmi_host_active_apf_mode { + WMI_HOST_ACTIVE_APF_DISABLED = (1 << 1), + WMI_HOST_ACTIVE_APF_ENABLED = (1 << 2), + WMI_HOST_ACTIVE_APF_ADAPTIVE = (1 << 3) +}; + +/** + * struct coex_config_params - Coex config command params + * @vdev_id: Virtual AP device identifier + * @config_type: Configuration type - wmi_coex_config_type enum + * @config_arg1: Configuration argument based on config type + * @config_arg2: Configuration argument based on config type + * @config_arg3: Configuration argument based on config type + * @config_arg4: Configuration argument based on config type + * @config_arg5: Configuration argument based on config type + * @config_arg6: Configuration argument based on config type + */ +struct coex_config_params { + uint32_t vdev_id; + uint32_t config_type; + uint32_t config_arg1; + uint32_t config_arg2; + uint32_t config_arg3; + uint32_t config_arg4; + uint32_t config_arg5; + uint32_t config_arg6; +}; + +#define WMI_HOST_PDEV_ID_SOC 0xFF +#define WMI_HOST_PDEV_ID_0 0 +#define WMI_HOST_PDEV_ID_1 1 +#define WMI_HOST_PDEV_ID_2 2 +#define WMI_HOST_PDEV_ID_INVALID 0xFFFFFFFF + +/** + * struct tbttoffset_params - Tbttoffset event params + * @vdev_id: Virtual AP device identifier + * @tbttoffset : Tbttoffset for the virtual AP device + */ +struct tbttoffset_params { + uint32_t vdev_id; + uint32_t tbttoffset; +}; + +#define WMI_SCAN_CLIENT_MAX 7 + +/** + * struct wmi_dbs_scan_sel_params - DBS scan selection params + * @num_clients: Number of scan clients dutycycle + * @pdev_id: pdev_id for identifying the MAC + * @module_id: scan client module id + * @num_dbs_scans: number of DBS scans + * @num_non_dbs_scans: number of non-DBS scans + */ +struct wmi_dbs_scan_sel_params { + uint32_t num_clients; + uint32_t pdev_id; + uint32_t module_id[WMI_SCAN_CLIENT_MAX]; + uint32_t num_dbs_scans[WMI_SCAN_CLIENT_MAX]; + uint32_t num_non_dbs_scans[WMI_SCAN_CLIENT_MAX]; +}; + +/** + * struct wmi_limit_off_chan_param - limit off channel parameters + * @vdev_id: vdev id + * @status: status of the command (enable/disable) + * @max_offchan_time: max off channel time + * @rest_time: home channel time + * @skip_dfs_chans: skip dfs channels during scan + */ +struct wmi_limit_off_chan_param { + uint32_t vdev_id; + bool status; + uint32_t max_offchan_time; + uint32_t rest_time; + bool skip_dfs_chans; +}; + +/** + * struct wmi_mawc_roam_params - Motion Aided wireless connectivity params + * @vdev_id: VDEV on which the parameters should be applied + * @enable: MAWC roaming feature enable/disable + * @traffic_load_threshold: Traffic threshold in kBps for MAWC roaming + * @best_ap_rssi_threshold: AP RSSI Threshold for MAWC roaming + * @rssi_stationary_high_adjust: High RSSI adjustment value to suppress scan + * @rssi_stationary_low_adjust: Low RSSI adjustment value to suppress scan + */ +struct wmi_mawc_roam_params { + uint8_t vdev_id; + bool enable; + uint32_t traffic_load_threshold; + uint32_t best_ap_rssi_threshold; + uint8_t rssi_stationary_high_adjust; + uint8_t rssi_stationary_low_adjust; +}; +/** + * struct wmi_btm_config - BSS Transition Management offload params + * @vdev_id: VDEV on which the parameters should be applied + * @btm_offload_config: BTM config + * @btm_solicited_timeout: Timeout value for waiting BTM request + * @btm_max_attempt_cnt: Maximum attempt for sending BTM query to ESS + * @btm_sticky_time: Stick time after roaming to new AP by BTM + * @disassoc_timer_threshold: threshold value till which the firmware can + * wait before triggering the roam scan after receiving the disassoc iminent + */ +struct wmi_btm_config { + uint8_t vdev_id; + uint32_t btm_offload_config; + uint32_t btm_solicited_timeout; + uint32_t btm_max_attempt_cnt; + uint32_t btm_sticky_time; + uint32_t disassoc_timer_threshold; +}; + +/** + * struct wmi_bss_load_config - BSS load trigger parameters + * @vdev_id: VDEV on which the parameters should be applied + * @bss_load_threshold: BSS load threshold after which roam scan should trigger + */ +struct wmi_bss_load_config { + uint32_t vdev_id; + uint32_t bss_load_threshold; + uint32_t bss_load_sample_time; +}; + +/** + * struct set_arp_stats - set/reset arp stats + * @vdev_id: session id + * @flag: enable/disable stats + * @pkt_type: type of packet(1 - arp) + * @ip_addr: subnet ipv4 address in case of encrypted packets + * @pkt_type_bitmap: pkt bitmap + * @tcp_src_port: tcp src port for pkt tracking + * @tcp_dst_port: tcp dst port for pkt tracking + * @icmp_ipv4: target ipv4 address to track ping packets + * @reserved: reserved + */ +struct set_arp_stats { + uint32_t vdev_id; + uint8_t flag; + uint8_t pkt_type; + uint32_t ip_addr; + uint32_t pkt_type_bitmap; + uint32_t tcp_src_port; + uint32_t tcp_dst_port; + uint32_t icmp_ipv4; + uint32_t reserved; +}; + +/** + * struct get_arp_stats - get arp stats from firmware + * @pkt_type: packet type(1 - ARP) + * @vdev_id: session id + */ +struct get_arp_stats { + uint8_t pkt_type; + uint32_t vdev_id; +}; + +/** + * struct wmi_host_ready_ev_param - Data revieved in ready event + * @status: FW init status. Success or Failure. + * @num_dscp_table: Number of DSCP table supported in FW + * @num_extra_mac_addr: Extra mac address present in ready event. Used + * in DBDC mode to provide multiple mac per pdev. + * @num_total_peer: Total number of peers FW could allocate. Zero means + * FW could allocate num peers requested by host in init. + * Otherwise, host need update it max_peer to this value. + * @num_extra_peer: Number of extra peers created and used within FW. Host + * should expect peer_id can be num_total_peer + num_extra_peer + * but it can create only upto num_total_peer. + * @agile_capability: Boolean specification of whether the target supports + * agile DFS, by means of using one 80 MHz radio chain for + * radar detection, concurrently with using another radio + * chain for non-160 MHz regular operation. + */ +struct wmi_host_ready_ev_param { + uint32_t status; + uint32_t num_dscp_table; + uint32_t num_extra_mac_addr; + uint32_t num_total_peer; + uint32_t num_extra_peer; + bool agile_capability; +}; + +enum bcn_offload_control_param { + BCN_OFFLD_CTRL_TX_DISABLE = 0, + BCN_OFFLD_CTRL_TX_ENABLE, + BCN_OFFLD_CTRL_SWBA_DISABLE, + BCN_OFFLD_CTRL_SWBA_ENABLE, +}; + +/** + * struct bcn_offload_control - Beacon offload control params + * @vdev_id: vdev identifer of VAP to control beacon tx + * @bcn_ctrl_op: values from enum bcn_offload_control_param + */ +struct bcn_offload_control { + uint32_t vdev_id; + enum bcn_offload_control_param bcn_ctrl_op; +}; + +/** + * struct wds_entry - WDS entry structure + * @peer_mac: peer mac + * @wds_mac: wds mac address + * @flags: flags + */ +struct wdsentry { + u_int8_t peer_mac[IEEE80211_ADDR_LEN]; + u_int8_t wds_mac[IEEE80211_ADDR_LEN]; + uint32_t flags; +}; + +#define WMI_HOST_DBR_RING_ADDR_LO_S 0 +#define WMI_HOST_DBR_RING_ADDR_LO 0xffffffff + +#define WMI_HOST_DBR_RING_ADDR_LO_GET(dword) \ + WMI_HOST_F_MS(dword, WMI_HOST_DBR_RING_ADDR_LO) +#define WMI_HOST_DBR_RING_ADDR_LO_SET(dword, val) \ + WMI_HOST_F_RMW(dword, val, WMI_HOST_DBR_RING_ADDR_LO) + +#define WMI_HOST_DBR_RING_ADDR_HI_S 0 +#define WMI_HOST_DBR_RING_ADDR_HI 0xf + +#define WMI_HOST_DBR_RING_ADDR_HI_GET(dword) \ + WMI_HOST_F_MS(dword, WMI_HOST_DBR_RING_ADDR_HI) +#define WMI_HOST_DBR_RING_ADDR_HI_SET(dword, val) \ + WMI_HOST_F_RMW(dword, val, WMI_HOST_DBR_RING_ADDR_HI) + +#define WMI_HOST_DBR_DATA_ADDR_LO_S 0 +#define WMI_HOST_DBR_DATA_ADDR_LO 0xffffffff + +#define WMI_HOST_DBR_DATA_ADDR_LO_GET(dword) \ + WMI_HOST_F_MS(dword, WMI_HOST_DBR_DATA_ADDR_LO) +#define WMI_HOST_DBR_DATA_ADDR_LO_SET(dword, val) \ + WMI_HOST_F_RMW(dword, val, WMI_HOST_DBR_DATA_ADDR_LO) + +#define WMI_HOST_DBR_DATA_ADDR_HI_S 0 +#define WMI_HOST_DBR_DATA_ADDR_HI 0xf + +#define WMI_HOST_DBR_DATA_ADDR_HI_GET(dword) \ + WMI_HOST_F_MS(dword, WMI_HOST_DBR_DATA_ADDR_HI) +#define WMI_HOST_DBR_DATA_ADDR_HI_SET(dword, val) \ + WMI_HOST_F_RMW(dword, val, WMI_HOST_DBR_DATA_ADDR_HI) + +#define WMI_HOST_DBR_DATA_ADDR_HI_HOST_DATA_S 12 +#define WMI_HOST_DBR_DATA_ADDR_HI_HOST_DATA 0xfffff + +#define WMI_HOST_DBR_DATA_ADDR_HI_HOST_DATA_GET(dword) \ + WMI_HOST_F_MS(dword, WMI_HOST_DBR_DATA_ADDR_HI_HOST_DATA) +#define WMI_HOST_DBR_DATA_ADDR_HI_HOST_DATA_SET(dword, val) \ + WMI_HOST_F_RMW(dword, val, WMI_HOST_DBR_DATA_ADDR_HI_HOST_DATA) + +/** + * struct direct_buf_rx_metadata: direct buffer metadata + * + * @noisefloor: noisefloor + */ +struct direct_buf_rx_metadata { + int32_t noisefloor[WMI_HOST_MAX_NUM_CHAINS]; +}; + +/** + * struct direct_buf_rx_entry: direct buffer rx release entry structure + * + * @addr_lo: LSB 32-bits of the buffer + * @addr_hi: MSB 32-bits of the buffer + * @len: Length of the buffer + */ +struct direct_buf_rx_entry { + uint32_t paddr_lo; + uint32_t paddr_hi; + uint32_t len; +}; + +/** + * struct direct_buf_rx_rsp: direct buffer rx response structure + * + * @pdev_id: Index of the pdev for which response is received + * @mod_mod: Index of the module for which respone is received + * @num_buf_release_entry: Number of buffers released through event + * @dbr_entries: Pointer to direct buffer rx entry struct + */ +struct direct_buf_rx_rsp { + uint32_t pdev_id; + uint32_t mod_id; + uint32_t num_buf_release_entry; + uint32_t num_meta_data_entry; + struct direct_buf_rx_entry *dbr_entries; +}; + +/** + * struct direct_buf_rx_cfg_req: direct buffer rx config request structure + * + * @pdev_id: Index of the pdev for which response is received + * @mod_id: Index of the module for which respone is received + * @base_paddr_lo: Lower 32bits of ring base address + * @base_paddr_hi: Higher 32bits of ring base address + * @head_idx_paddr_lo: Lower 32bits of head idx register address + * @head_idx_paddr_hi: Higher 32bits of head idx register address + * @tail_idx_paddr_lo: Lower 32bits of tail idx register address + * @tail_idx_paddr_hi: Higher 32bits of tail idx register address + * @buf_size: Size of the buffer for each pointer in the ring + * @num_elems: Number of pointers allocated and part of the source ring + */ +struct direct_buf_rx_cfg_req { + uint32_t pdev_id; + uint32_t mod_id; + uint32_t base_paddr_lo; + uint32_t base_paddr_hi; + uint32_t head_idx_paddr_lo; + uint32_t head_idx_paddr_hi; + uint32_t tail_idx_paddr_hi; + uint32_t tail_idx_paddr_lo; + uint32_t buf_size; + uint32_t num_elems; + uint32_t event_timeout_ms; + uint32_t num_resp_per_event; +}; + +/** + * struct wmi_obss_detection_cfg_param - obss detection cfg + * @vdev_id: vdev id + * @obss_detect_period_ms: detection period in ms + * @obss_11b_ap_detect_mode: detect whether there is 11b ap/ibss + * @obss_11b_sta_detect_mode: detect whether there is 11b sta + * connected with other APs + * @obss_11g_ap_detect_mode: detect whether there is 11g AP + * @obss_11a_detect_mode: detect whether there is legacy 11a traffic + * @obss_ht_legacy_detect_mode: detect whether there is ap which is + * ht legacy mode + * @obss_ht_mixed_detect_mode: detect whether there is ap which is ht mixed mode + * @obss_ht_20mhz_detect_mode: detect whether there is ap which has 20M only + * station + */ +struct wmi_obss_detection_cfg_param { + uint32_t vdev_id; + uint32_t obss_detect_period_ms; + uint32_t obss_11b_ap_detect_mode; + uint32_t obss_11b_sta_detect_mode; + uint32_t obss_11g_ap_detect_mode; + uint32_t obss_11a_detect_mode; + uint32_t obss_ht_legacy_detect_mode; + uint32_t obss_ht_mixed_detect_mode; + uint32_t obss_ht_20mhz_detect_mode; +}; + +/** + * enum wmi_obss_detection_reason - obss detection event reasons + * @OBSS_OFFLOAD_DETECTION_DISABLED: OBSS detection disabled + * @OBSS_OFFLOAD_DETECTION_PRESENT: OBSS present detection + * @OBSS_OFFLOAD_DETECTION_ABSENT: OBSS absent detection + * + * Defines different types of reasons for obss detection event from firmware. + */ +enum wmi_obss_detection_reason { + OBSS_OFFLOAD_DETECTION_DISABLED = 0, + OBSS_OFFLOAD_DETECTION_PRESENT = 1, + OBSS_OFFLOAD_DETECTION_ABSENT = 2, +}; + +/** + * struct wmi_obss_detect_info - OBSS detection info from firmware + * @vdev_id: ID of the vdev to which this info belongs. + * @reason: Indicate if present or Absent detection, + * also if not supported offload for this vdev. + * @matched_detection_masks: Detection bit map. + * @matched_bssid_addr: MAC address valid for only if info is present detection. + */ +struct wmi_obss_detect_info { + uint32_t vdev_id; + enum wmi_obss_detection_reason reason; + uint32_t matched_detection_masks; + uint8_t matched_bssid_addr[IEEE80211_ADDR_LEN]; +}; + +/** + * @time_offset: time offset after 11k offload command to trigger a neighbor + * report request (in seconds) + * @low_rssi_offset: Offset from rssi threshold to trigger a neighbor + * report request (in dBm) + * @bmiss_count_trigger: Number of beacon miss events to trigger neighbor + * report request + * @per_threshold_offset: offset from PER threshold to trigger neighbor + * report request (in %) + * @neighbor_report_cache_timeout: timeout after which new trigger can enable + * sending of a neighbor report request (in seconds) + * @max_neighbor_report_req_cap: max number of neighbor report requests that + * can be sent to the peer in the current session + * @ssid: Current connect SSID info + */ +struct wmi_11k_offload_neighbor_report_params { + uint32_t time_offset; + uint32_t low_rssi_offset; + uint32_t bmiss_count_trigger; + uint32_t per_threshold_offset; + uint32_t neighbor_report_cache_timeout; + uint32_t max_neighbor_report_req_cap; + struct mac_ssid ssid; +}; + +/** + * struct wmi_11k_offload_params - offload 11k features to FW + * @vdev_id: vdev id + * @offload_11k_bitmask: bitmask to specify offloaded features + * B0: Neighbor Report Request offload + * B1-B31: Reserved + * @neighbor_report_params: neighbor report offload params + */ +struct wmi_11k_offload_params { + uint32_t vdev_id; + uint32_t offload_11k_bitmask; + struct wmi_11k_offload_neighbor_report_params neighbor_report_params; +}; + +/** + * struct wmi_invoke_neighbor_report_params - Invoke neighbor report request + * from IW to FW + * @vdev_id: vdev id + * @send_resp_to_host: bool to send response to host or not + * @ssid: ssid given from the IW command + */ +struct wmi_invoke_neighbor_report_params { + uint32_t vdev_id; + uint32_t send_resp_to_host; + struct mac_ssid ssid; +}; + +/** + * enum wmi_obss_color_collision_evt_type - bss color collision event type + * @OBSS_COLOR_COLLISION_DETECTION_DISABLE: OBSS color detection disabled + * @OBSS_COLOR_COLLISION_DETECTION: OBSS color collision detection + * @OBSS_COLOR_FREE_SLOT_TIMER_EXPIRY: OBSS free slot detection with + * within expiry period + * @OBSS_COLOR_FREE_SLOT_AVAILABLE: OBSS free slot detection + * + * Defines different types of type for obss color collision event type. + */ +enum wmi_obss_color_collision_evt_type { + OBSS_COLOR_COLLISION_DETECTION_DISABLE = 0, + OBSS_COLOR_COLLISION_DETECTION = 1, + OBSS_COLOR_FREE_SLOT_TIMER_EXPIRY = 2, + OBSS_COLOR_FREE_SLOT_AVAILABLE = 3, +}; + +/** + * struct wmi_obss_color_collision_cfg_param - obss color collision cfg + * @vdev_id: vdev id + * @flags: proposed for future use cases, currently not used. + * @evt_type: bss color collision event. + * @current_bss_color: current bss color. + * @detection_period_ms: scan interval for both AP and STA mode. + * @scan_period_ms: scan period for passive scan to detect collision. + * @free_slot_expiry_time_ms: FW to notify host at timer expiry after + * which Host will disable the bss color. + */ +struct wmi_obss_color_collision_cfg_param { + uint32_t vdev_id; + uint32_t flags; + enum wmi_obss_color_collision_evt_type evt_type; + uint32_t current_bss_color; + uint32_t detection_period_ms; + uint32_t scan_period_ms; + uint32_t free_slot_expiry_time_ms; +}; + +/** + * struct wmi_obss_color_collision_info - bss color detection info from firmware + * @vdev_id: ID of the vdev to which this info belongs. + * @evt_type: bss color collision event. + * @obss_color_bitmap_bit0to31: Bit set indicating BSS color present. + * @obss_color_bitmap_bit32to63: Bit set indicating BSS color present. + */ +struct wmi_obss_color_collision_info { + uint32_t vdev_id; + enum wmi_obss_color_collision_evt_type evt_type; + uint32_t obss_color_bitmap_bit0to31; + uint32_t obss_color_bitmap_bit32to63; +}; + +#ifdef QCA_SUPPORT_CP_STATS +/** + * struct wmi_host_congestion_stats - host definition of congestion stats + * @vdev_id: ID of the vdev to which this info belongs. + * @congestion: This field holds the congestion percentage = + * (busy_time/total_time)*100 + * for the interval from when the vdev was started to the current time + * (or the time at which the vdev was stopped). + */ +struct wmi_host_congestion_stats { + uint32_t vdev_id; + uint32_t congestion; +}; +#endif + +#ifdef FEATURE_WLAN_APF +/** + * struct wmi_apf_write_memory_params - Android Packet Filter write memory + * params + * @vdev_id: VDEV on which APF memory is to be written + * @apf_version: APF version number + * @program_len: Length reserved for program in the APF work memory + * @addr_offset: Relative address in APF work memory to start writing + * @length: Size of the write + * @buf: Pointer to the buffer + */ +struct wmi_apf_write_memory_params { + uint8_t vdev_id; + uint32_t apf_version; + uint32_t program_len; + uint32_t addr_offset; + uint32_t length; + uint8_t *buf; +}; + +/** + * struct wmi_apf_read_memory_params - Android Packet Filter read memory params + * @vdev_id: vdev id + * @addr_offset: Relative address in APF work memory to read from + * @length: Size of the memory fetch + */ +struct wmi_apf_read_memory_params { + uint8_t vdev_id; + uint32_t addr_offset; + uint32_t length; +}; + +/** + * struct wmi_apf_read_memory_resp_event_params - Event containing read Android + * Packet Filter memory response + * @vdev_id: vdev id + * @offset: Read memory offset + * @length: Read memory length + * @more_data: Indicates more data to come + * @data: Pointer to the data + */ +struct wmi_apf_read_memory_resp_event_params { + uint32_t vdev_id; + uint32_t offset; + uint32_t length; + bool more_data; + uint8_t *data; +}; +#endif /* FEATURE_WLAN_APF */ + +/* Begin of roam scan stats definitions */ + +#define WMI_ROAM_SCAN_STATS_MAX 5 +#define WMI_ROAM_SCAN_STATS_CANDIDATES_MAX 4 +#define WMI_ROAM_SCAN_STATS_CHANNELS_MAX 50 + +/** + * struct wmi_roam_scan_stats_req - Structure to hold roam scan stats request + * @vdev_id: interface id + */ +struct wmi_roam_scan_stats_req { + uint32_t vdev_id; +}; + +/** + * struct wmi_roam_scan_cand - Roam scan candidates + * @score: score of AP + * @rssi: rssi of the AP + * @freq: center frequency + * @bssid: bssid of AP + */ +struct wmi_roam_scan_cand { + uint32_t score; + uint32_t rssi; + uint32_t freq; + uint8_t bssid[QDF_MAC_ADDR_SIZE]; +}; + +/** + * struct wmi_roam_scan_stats_params - Roam scan details + * @time_stamp: time at which this roam scan happened + * @client_id: id of client which triggered this scan + * @num_scan_chans: number of channels that were scanned as part of this scan + * @scan_freqs: frequencies of the channels that were scanned + * @is_roam_successful: whether a successful roaming happened after this scan + * @old_bssid: bssid to which STA is connected just before this scan + * @new_bssid: bssid to which STA is roamed to in case of successful roaming + * @num_roam_candidates: no.of roam candidates that are being reported + * @roam_candidate: roam scan candidate details + * @trigger_id: reason for triggering this roam or roam scan + * @trigger_value: threshold value related to trigger_id + */ +struct wmi_roam_scan_stats_params { + uint64_t time_stamp; + uint32_t client_id; + uint32_t num_scan_chans; + uint32_t scan_freqs[WMI_ROAM_SCAN_STATS_CHANNELS_MAX]; + uint32_t is_roam_successful; + + /* Bssid to which STA is connected when the roam scan is triggered */ + uint8_t old_bssid[QDF_MAC_ADDR_SIZE]; + + /* + * Bssid to which STA is connected after roaming. Will be valid only + * if is_roam_successful is true. + */ + uint8_t new_bssid[QDF_MAC_ADDR_SIZE]; + + /* Number of roam candidates that are being reported in the stats */ + uint32_t num_roam_candidates; + struct wmi_roam_scan_cand cand[WMI_ROAM_SCAN_STATS_CANDIDATES_MAX]; + uint32_t trigger_id; + uint32_t trigger_value; +}; + +/** + * struct wmi_roam_scan_stats_res - Roam scan stats response from firmware + * @num_roam_scan: number of roam scans triggered + * @roam_scan: place holder to indicate the array of + * wmi_roam_scan_stats_params followed by this structure + */ +struct wmi_roam_scan_stats_res { + uint32_t num_roam_scans; + struct wmi_roam_scan_stats_params roam_scan[0]; +}; + +/* End of roam scan stats definitions */ + +/** + * struct mws_coex_state - Modem Wireless Subsystem(MWS) coex info + * @vdev_id : vdev id + * @coex_scheme_bitmap: LTE-WLAN coexistence scheme bitmap + * Indicates the final schemes applied for the currrent Coex scenario. + * Bit 0 - TDM policy + * Bit 1 - Forced TDM policy + * Bit 2 - Dynamic Power Back-off policy + * Bit 3 - Channel Avoidance policy + * Bit 4 - Static Power Back-off policy. + * @active_conflict_count : active conflict count + * @potential_conflict_count: Potential conflict count + * @chavd_group0_bitmap : Indicates the WLAN channels to be avoided in + * b/w WLAN CH-1 and WLAN CH-14 + * @chavd_group1_bitmap : Indicates the WLAN channels to be avoided in + * WLAN CH-36 and WLAN CH-64 + * @chavd_group2_bitmap : Indicates the WLAN channels to be avoided in + * b/w WLAN CH-100 and WLAN CH-140 + * @chavd_group2_bitmap : Indicates the WLAN channels to be avoided in + * b/w WLAN CH-149 and WLAN CH-165 + */ +struct mws_coex_state { + uint32_t vdev_id; + uint32_t coex_scheme_bitmap; + uint32_t active_conflict_count; + uint32_t potential_conflict_count; + uint32_t chavd_group0_bitmap; + uint32_t chavd_group1_bitmap; + uint32_t chavd_group2_bitmap; + uint32_t chavd_group3_bitmap; +}; + +/** + * struct hdd_mws_coex_dpwb_state - Modem Wireless Subsystem(MWS) coex DPWB info + * @vdev_id : vdev id + * @current_dpwb_state: Current state of the Dynamic Power Back-off SM + * @pnp1_value: Tx power to be applied in next Dynamic Power Back-off cycle + * @lte_dutycycle: Indicates the duty cycle of current LTE frame + * @sinr_wlan_on: LTE SINR value in dB, when WLAN is ON + * @sinr_wlan_off: LTE SINR value in dB, when WLAN is OFF + * @bler_count: LTE blocks with error for the current block err report. + * @block_count: Number of LTE blocks considered for bler count report. + * @wlan_rssi_level: WLAN RSSI level + * @wlan_rssi: WLAN RSSI value in dBm considered in DP backoff algo + * @is_tdm_running: Indicates whether any TDM policy triggered + */ +struct mws_coex_dpwb_state { + uint32_t vdev_id; + int32_t current_dpwb_state; + int32_t pnp1_value; + uint32_t lte_dutycycle; + int32_t sinr_wlan_on; + int32_t sinr_wlan_off; + uint32_t bler_count; + uint32_t block_count; + uint32_t wlan_rssi_level; + int32_t wlan_rssi; + uint32_t is_tdm_running; +}; + +/** + * struct mws_coex_tdm_state - Modem Wireless Subsystem(MWS) coex TDM state info + * @vdev_id: vdev id + * @tdm_policy_bitmap: Time Division Multiplexing (TDM) LTE-Coex Policy type. + * @tdm_sf_bitmap: TDM LTE/WLAN sub-frame bitmap. + */ +struct mws_coex_tdm_state { + uint32_t vdev_id; + uint32_t tdm_policy_bitmap; + uint32_t tdm_sf_bitmap; +}; + +/** + * struct mws_coex_idrx_state - Modem Wireless Subsystem(MWS) coex IDRX state + * @vdev_id: vdev id + * @sub0_techid: SUB0 LTE-coex tech. + * @sub0_policy: SUB0 mitigation policy. + * @sub0_is_link_critical: Set if SUB0 is in link critical state. + * @sub0_static_power: LTE SUB0 imposed static power applied + * to WLAN due to LTE-WLAN coex. + * @sub0_rssi: LTE SUB0 RSSI value in dBm. + * @sub1_techid: SUB1 LTE-coex tech. + * @sub1_policy: SUB1 mitigation policy. + * @sub1_is_link_critical: Set if SUB1 is in link critical state. + * @sub1_static_power: LTE SUB1 imposed static power applied + * to WLAN due to LTE-WLAN coex. + * @sub1_rssi: LTE SUB1 RSSI value in dBm. + */ +struct mws_coex_idrx_state { + uint32_t vdev_id; + uint32_t sub0_techid; + uint32_t sub0_policy; + uint32_t sub0_is_link_critical; + int32_t sub0_static_power; + int32_t sub0_rssi; + uint32_t sub1_techid; + uint32_t sub1_policy; + uint32_t sub1_is_link_critical; + int32_t sub1_static_power; + int32_t sub1_rssi; +}; + +/** + * struct mws_antenna_sharing_info - MWS Antenna sharing Info + * @vdev_id: vdev id + * @coex_flags: BDF values of Coex flags + * @coex_config: BDF values of Coex Antenna sharing config + * @tx_chain_mask: Tx Chain mask value + * @rx_chain_mask: Rx Chain mask value + * @rx_nss: Currently active Rx Spatial streams + * @force_mrc: Forced MRC policy type + * @rssi_type: RSSI value considered for MRC + * @chain0_rssi: RSSI value measured at Chain-0 in dBm + * @chain1_rssi: RSSI value measured at Chain-1 in dBm + * @combined_rssi: RSSI value of two chains combined in dBm + * @imbalance: Absolute imbalance between two Rx chains in dB + * @mrc_threshold: RSSI threshold defined for the above imbalance value in dBm + * @grant_duration: Antenna grant duration to WLAN, in milliseconds + */ +struct mws_antenna_sharing_info { + uint32_t vdev_id; + uint32_t coex_flags; + uint32_t coex_config; + uint32_t tx_chain_mask; + uint32_t rx_chain_mask; + uint32_t rx_nss; + uint32_t force_mrc; + uint32_t rssi_type; + int32_t chain0_rssi; + int32_t chain1_rssi; + int32_t combined_rssi; + uint32_t imbalance; + int32_t mrc_threshold; + uint32_t grant_duration; +}; +#endif /* _WMI_UNIFIED_PARAM_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_pmo_api.h b/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_pmo_api.h new file mode 100644 index 0000000000000000000000000000000000000000..f47ed1842bff981bdd0b017004d7b0ba4d0551dc --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_pmo_api.h @@ -0,0 +1,266 @@ +/* + * Copyright (c) 2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ +/** + * DOC: This file contains the API definitions for the Unified Wireless Module + * Interface (WMI) which are specific to Power management offloads (PMO). + */ + +#ifndef _WMI_UNIFIED_PMO_API_H_ +#define _WMI_UNIFIED_PMO_API_H_ + +#include "wlan_pmo_tgt_api.h" +#include "wlan_pmo_arp_public_struct.h" +#include "wlan_pmo_ns_public_struct.h" +#include "wlan_pmo_gtk_public_struct.h" +#include "wlan_pmo_wow_public_struct.h" +#include "wlan_pmo_pkt_filter_public_struct.h" + +/** + * wmi_unified_add_wow_wakeup_event_cmd() - Configures wow wakeup events. + * @wmi_handle: wmi handle + * @vdev_id: vdev id + * @bitmap: Event bitmap + * @enable: enable/disable + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_add_wow_wakeup_event_cmd(void *wmi_hdl, + uint32_t vdev_id, + uint32_t *bitmap, + bool enable); + +/** + * wmi_unified_wow_patterns_to_fw_cmd() - Sends WOW patterns to FW. + * @wmi_handle: wmi handle + * @vdev_id: vdev id + * @ptrn_id: pattern id + * @ptrn: pattern + * @ptrn_len: pattern length + * @ptrn_offset: pattern offset + * @mask: mask + * @mask_len: mask length + * @user: true for user configured pattern and false for default pattern + * @default_patterns: default patterns + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_wow_patterns_to_fw_cmd(void *wmi_hdl, + uint8_t vdev_id, uint8_t ptrn_id, + const uint8_t *ptrn, uint8_t ptrn_len, + uint8_t ptrn_offset, const uint8_t *mask, + uint8_t mask_len, bool user, + uint8_t default_patterns); + +/** + * wmi_unified_add_clear_mcbc_filter_cmd() - set mcast filter command to fw + * @wmi_handle: wmi handle + * @vdev_id: vdev id + * @multicastAddr: mcast address + * @clearList: clear list flag + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_add_clear_mcbc_filter_cmd(void *wmi_hdl, + uint8_t vdev_id, + struct qdf_mac_addr multicast_addr, + bool clearList); + +/** + * wmi_unified_multiple_add_clear_mcbc_filter_cmd() - send multiple mcast + * filter command to fw + * @wmi_handle: wmi handle + * @vdev_id: vdev id + * @mcast_filter_params: mcast filter params + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_multiple_add_clear_mcbc_filter_cmd(void *wmi_hdl, + uint8_t vdev_id, + struct pmo_mcast_filter_params *filter_param); + +/** + * wmi_unified_wow_sta_ra_filter_cmd() - set RA filter pattern in fw + * @wmi_handle: wmi handle + * @vdev_id: vdev id + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_wow_sta_ra_filter_cmd(void *wmi_hdl, + uint8_t vdev_id, uint8_t default_pattern, + uint16_t rate_limit_interval); + +/** + * wmi_unified_enable_enhance_multicast_offload() - enhance multicast offload + * @wmi_hdl: wmi handle + * @vdev_id: vdev id + * @action: true for enable else false + * + * To configure enhance multicast offload in to firmware + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_enable_enhance_multicast_offload_cmd( + void *wmi_hdl, uint8_t vdev_id, bool action); + +/** + * wmi_extract_gtk_rsp_event() - extract gtk rsp params from event + * @wmi_handle: wmi handle + * @evt_buf: pointer to event buffer + * @gtk_rsp_param: Pointer to gtk rsp parameters + * @ len: len of gtk rsp event + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_extract_gtk_rsp_event(void *wmi_hdl, void *evt_buf, + struct pmo_gtk_rsp_params *gtk_rsp_param, uint32_t len); + +/** + * wmi_unified_process_gtk_offload_getinfo_cmd() - send GTK offload cmd to fw + * @wmi_handle: wmi handle + * @params: GTK offload params + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_process_gtk_offload_getinfo_cmd(void *wmi_hdl, + uint8_t vdev_id, + uint64_t offload_req_opcode); + +/** + * wmi_unified_action_frame_patterns_cmd() - send action filter wmi cmd + * @wmi_handle: wmi handler + * @action_params: pointer to action_params + * + * Return: 0 for success, otherwise appropriate error code + */ +QDF_STATUS wmi_unified_action_frame_patterns_cmd(void *wmi_hdl, + struct pmo_action_wakeup_set_params *action_params); + +/** + * wmi_unified_send_gtk_offload_cmd() - send GTK offload command to fw + * @wmi_handle: wmi handle + * @vdev_id: vdev id + * @params: GTK offload parameters + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_send_gtk_offload_cmd(void *wmi_hdl, uint8_t vdev_id, + struct pmo_gtk_req *params, + bool enable_offload, + uint32_t gtk_offload_opcode); + +/** + * wmi_unified_enable_arp_ns_offload_cmd() - enable ARP NS offload + * @wmi_hdl: wmi handle + * @param: offload request + * + * To configure ARP NS off load data to firmware + * when target goes to wow mode. + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_enable_arp_ns_offload_cmd(void *wmi_hdl, + struct pmo_arp_offload_params *arp_offload_req, + struct pmo_ns_offload_params *ns_offload_req, + uint8_t vdev_id); + +/** + * wmi_unified_conf_hw_filter_cmd() - Configure hardware filter in DTIM mode + * @opaque_wmi: wmi handle + * @req: request parameters to configure to firmware + * + * Return: QDF_STATUS + */ +QDF_STATUS wmi_unified_conf_hw_filter_cmd(void *opaque_wmi, + struct pmo_hw_filter_params *req); + +/** + * wmi_unified_lphb_config_hbenable_cmd() - enable command of LPHB configuration + * @wmi_handle: wmi handle + * @lphb_conf_req: configuration info + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_lphb_config_hbenable_cmd(void *wmi_hdl, + wmi_hb_set_enable_cmd_fixed_param *params); + +/** + * wmi_unified_lphb_config_tcp_params_cmd() - set tcp params of LPHB config req + * @wmi_handle: wmi handle + * @lphb_conf_req: lphb config request + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_lphb_config_tcp_params_cmd(void *wmi_hdl, + wmi_hb_set_tcp_params_cmd_fixed_param *lphb_conf_req); + +/** + * wmi_unified_lphb_config_tcp_pkt_filter_cmd() - config LPHB tcp packet filter + * @wmi_handle: wmi handle + * @lphb_conf_req: lphb config request + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_lphb_config_tcp_pkt_filter_cmd(void *wmi_hdl, + wmi_hb_set_tcp_pkt_filter_cmd_fixed_param *g_hb_tcp_filter_fp); + +/** + * wmi_unified_lphb_config_udp_params_cmd() - configure LPHB udp param command + * @wmi_handle: wmi handle + * @lphb_conf_req: lphb config request + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_lphb_config_udp_params_cmd(void *wmi_hdl, + wmi_hb_set_udp_params_cmd_fixed_param *lphb_conf_req); + +/** + * wmi_unified_lphb_config_udp_pkt_filter_cmd() - configure LPHB udp pkt filter + * @wmi_handle: wmi handle + * @lphb_conf_req: lphb config request + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_lphb_config_udp_pkt_filter_cmd(void *wmi_hdl, + wmi_hb_set_udp_pkt_filter_cmd_fixed_param *lphb_conf_req); + +/** + * wmi_unified_enable_disable_packet_filter_cmd() - enable/disable packet filter + * @wmi_handle: wmi handle + * @vdev_id: vdev id + * @enable: Flag to enable/disable packet filter + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_enable_disable_packet_filter_cmd(void *wmi_hdl, + uint8_t vdev_id, bool enable); + +/** + * wmi_unified_config_packet_filter_cmd() - configure packet filter in target + * @wmi_handle: wmi handle + * @vdev_id: vdev id + * @rcv_filter_param: Packet filter parameters + * @filter_id: Filter id + * @enable: Flag to add/delete packet filter configuration + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_config_packet_filter_cmd(void *wmi_hdl, + uint8_t vdev_id, struct pmo_rcv_pkt_fltr_cfg *rcv_filter_param, + uint8_t filter_id, bool enable); + +#endif /* _WMI_UNIFIED_PMO_API_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_priv.h b/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_priv.h new file mode 100644 index 0000000000000000000000000000000000000000..14a56368ddfc0671c2c767d2a4f264cd4a892227 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_priv.h @@ -0,0 +1,1934 @@ +/* + * Copyright (c) 2013-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/* + * This file contains the API definitions for the Unified Wireless + * Module Interface (WMI). + */ +#ifndef _WMI_UNIFIED_PRIV_H_ +#define _WMI_UNIFIED_PRIV_H_ +#include +#include "wmi_unified_api.h" +#include "wmi_unified_param.h" +#include "wlan_scan_ucfg_api.h" +#ifdef CONFIG_MCL +#include +#endif +#include "qdf_atomic.h" +#include + +#ifdef CONVERGED_P2P_ENABLE +#include +#endif + +#ifdef DFS_COMPONENT_ENABLE +#include +#endif +#include +#ifdef WLAN_SUPPORT_GREEN_AP +#include "wlan_green_ap_api.h" +#endif + +#ifdef WLAN_FEATURE_NAN_CONVERGENCE +#include "nan_public_structs.h" +#endif + +#ifdef WLAN_SUPPORT_TWT +#include "wmi_unified_twt_param.h" +#endif + +#define WMI_UNIFIED_MAX_EVENT 0x100 + +#ifdef WMI_INTERFACE_EVENT_LOGGING + +#ifndef WMI_EVENT_DEBUG_MAX_ENTRY +#define WMI_EVENT_DEBUG_MAX_ENTRY (1024) +#endif +#define WMI_EVENT_DEBUG_ENTRY_MAX_LENGTH (16) +/* wmi_mgmt commands */ +#ifndef WMI_MGMT_EVENT_DEBUG_MAX_ENTRY +#define WMI_MGMT_EVENT_DEBUG_MAX_ENTRY (256) +#endif +/* wmi diag rx events max buffer */ +#ifndef WMI_DIAG_RX_EVENT_DEBUG_MAX_ENTRY +#define WMI_DIAG_RX_EVENT_DEBUG_MAX_ENTRY (256) +#endif + +#define wmi_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_WMI, ## params) +#define wmi_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_WMI, ## params) +#define wmi_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_WMI, ## params) +#define wmi_info(params...) QDF_TRACE_INFO(QDF_MODULE_ID_WMI, ## params) +#define wmi_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_WMI, ## params) + +#define wmi_nofl_alert(params...) \ + QDF_TRACE_FATAL_NO_FL(QDF_MODULE_ID_WMI, ## params) +#define wmi_nofl_err(params...) \ + QDF_TRACE_ERROR_NO_FL(QDF_MODULE_ID_WMI, ## params) +#define wmi_nofl_warn(params...) \ + QDF_TRACE_WARN_NO_FL(QDF_MODULE_ID_WMI, ## params) +#define wmi_nofl_info(params...) \ + QDF_TRACE_INFO_NO_FL(QDF_MODULE_ID_WMI, ## params) +#define wmi_nofl_debug(params...) \ + QDF_TRACE_DEBUG_NO_FL(QDF_MODULE_ID_WMI, ## params) + +#define wmi_alert_rl(params...) QDF_TRACE_FATAL_RL(QDF_MODULE_ID_WMI, params) +#define wmi_err_rl(params...) QDF_TRACE_ERROR_RL(QDF_MODULE_ID_WMI, params) +#define wmi_warn_rl(params...) QDF_TRACE_WARN_RL(QDF_MODULE_ID_WMI, params) +#define wmi_info_rl(params...) QDF_TRACE_INFO_RL(QDF_MODULE_ID_WMI, params) +#define wmi_debug_rl(params...) QDF_TRACE_DEBUG_RL(QDF_MODULE_ID_WMI, params) + +/** + * struct wmi_command_debug - WMI command log buffer data type + * @ command - Store WMI Command id + * @ data - Stores WMI command data + * @ time - Time of WMI command handling + */ +struct wmi_command_debug { + uint32_t command; + /*16 bytes of WMI cmd excluding TLV and WMI headers */ + uint32_t data[WMI_EVENT_DEBUG_ENTRY_MAX_LENGTH/sizeof(uint32_t)]; + uint64_t time; +}; + +/** + * struct wmi_event_debug - WMI event log buffer data type + * @ command - Store WMI Event id + * @ data - Stores WMI Event data + * @ time - Time of WMI Event handling + */ +struct wmi_event_debug { + uint32_t event; + /*16 bytes of WMI event data excluding TLV header */ + uint32_t data[WMI_EVENT_DEBUG_ENTRY_MAX_LENGTH/sizeof(uint32_t)]; + uint64_t time; +}; + +/** + * struct wmi_command_header - Type for accessing frame data + * @ type - 802.11 Frame type + * @ subType - 802.11 Frame subtype + * @ protVer - 802.11 Version + */ +struct wmi_command_header { +#ifndef ANI_LITTLE_BIT_ENDIAN + + uint32_t sub_type:4; + uint32_t type:2; + uint32_t prot_ver:2; + +#else + + uint32_t prot_ver:2; + uint32_t type:2; + uint32_t sub_type:4; + +#endif +}; + +/** + * struct wmi_log_buf_t - WMI log buffer information type + * @buf - Refernce to WMI log buffer + * @ length - length of buffer + * @ buf_tail_idx - Tail index of buffer + * @ p_buf_tail_idx - refernce to buffer tail index. It is added to accommodate + * unified design since MCL uses global variable for buffer tail index + * @ size - the size of the buffer in number of entries + */ +struct wmi_log_buf_t { + void *buf; + uint32_t length; + uint32_t buf_tail_idx; + uint32_t *p_buf_tail_idx; + uint32_t size; +}; + +/** + * struct wmi_debug_log_info - Meta data to hold information of all buffers + * used for WMI logging + * @wmi_command_log_buf_info - Buffer info for WMI Command log + * @wmi_command_tx_cmp_log_buf_info - Buffer info for WMI Command Tx completion + * log + * @wmi_event_log_buf_info - Buffer info for WMI Event log + * @wmi_rx_event_log_buf_info - Buffer info for WMI event received log + * @wmi_mgmt_command_log_buf_info - Buffer info for WMI Management Command log + * @wmi_mgmt_command_tx_cmp_log_buf_info - Buffer info for WMI Management + * Command Tx completion log + * @wmi_mgmt_event_log_buf_info - Buffer info for WMI Management event log + * @wmi_diag_event_log_buf_info - Buffer info for WMI diag event log + * @wmi_record_lock - Lock WMI recording + * @wmi_logging_enable - Enable/Disable state for WMI logging + * @buf_offset_command - Offset from where WMI command data should be logged + * @buf_offset_event - Offset from where WMI event data should be logged + * @wmi_id_to_name - Function refernce to API to convert Command id to + * string name + * @wmi_log_debugfs_dir - refernce to debugfs directory + */ +struct wmi_debug_log_info { + struct wmi_log_buf_t wmi_command_log_buf_info; + struct wmi_log_buf_t wmi_command_tx_cmp_log_buf_info; + + struct wmi_log_buf_t wmi_event_log_buf_info; + struct wmi_log_buf_t wmi_rx_event_log_buf_info; + + struct wmi_log_buf_t wmi_mgmt_command_log_buf_info; + struct wmi_log_buf_t wmi_mgmt_command_tx_cmp_log_buf_info; + struct wmi_log_buf_t wmi_mgmt_event_log_buf_info; + struct wmi_log_buf_t wmi_diag_event_log_buf_info; + + qdf_spinlock_t wmi_record_lock; + bool wmi_logging_enable; + uint32_t buf_offset_command; + uint32_t buf_offset_event; + struct dentry *wmi_log_debugfs_dir; + uint8_t wmi_instance_id; +}; + +#endif /*WMI_INTERFACE_EVENT_LOGGING */ + +#ifdef WLAN_OPEN_SOURCE +struct fwdebug { + struct sk_buff_head fwlog_queue; + struct completion fwlog_completion; + A_BOOL fwlog_open; +}; +#endif /* WLAN_OPEN_SOURCE */ + +/** + * struct wmi_wq_dbg_info - WMI WQ debug info + * @ wd_msg_type_id - wmi event id + * @ wmi_wq - WMI workqueue struct + * @ task - WMI workqueue task struct + */ +struct wmi_wq_dbg_info { + uint32_t wd_msg_type_id; + qdf_workqueue_t *wmi_wq; + qdf_thread_t *task; +}; + +struct wmi_ops { +QDF_STATUS (*send_vdev_create_cmd)(wmi_unified_t wmi_handle, + uint8_t macaddr[IEEE80211_ADDR_LEN], + struct vdev_create_params *param); + +QDF_STATUS (*send_vdev_delete_cmd)(wmi_unified_t wmi_handle, + uint8_t if_id); + +QDF_STATUS (*send_vdev_nss_chain_params_cmd)(wmi_unified_t wmi_handle, + uint8_t vdev_id, + struct mlme_nss_chains *user_cfg); + +QDF_STATUS (*send_vdev_stop_cmd)(wmi_unified_t wmi, + uint8_t vdev_id); + +QDF_STATUS (*send_vdev_down_cmd)(wmi_unified_t wmi, + uint8_t vdev_id); + +QDF_STATUS (*send_vdev_start_cmd)(wmi_unified_t wmi, + struct vdev_start_params *req); + +QDF_STATUS (*send_vdev_set_nac_rssi_cmd)(wmi_unified_t wmi, + struct vdev_scan_nac_rssi_params *req); + +QDF_STATUS (*send_hidden_ssid_vdev_restart_cmd)(wmi_unified_t wmi_handle, + struct hidden_ssid_vdev_restart_params *restart_params); + +QDF_STATUS (*send_peer_flush_tids_cmd)(wmi_unified_t wmi, + uint8_t peer_addr[IEEE80211_ADDR_LEN], + struct peer_flush_params *param); + +QDF_STATUS (*send_peer_delete_cmd)(wmi_unified_t wmi, + uint8_t peer_addr[IEEE80211_ADDR_LEN], + uint8_t vdev_id); + +QDF_STATUS (*send_peer_unmap_conf_cmd)(wmi_unified_t wmi, + uint8_t vdev_id, + uint32_t peer_id_cnt, + uint16_t *peer_id_list); + +QDF_STATUS (*send_peer_param_cmd)(wmi_unified_t wmi, + uint8_t peer_addr[IEEE80211_ADDR_LEN], + struct peer_set_params *param); + +QDF_STATUS (*send_vdev_up_cmd)(wmi_unified_t wmi, + uint8_t bssid[IEEE80211_ADDR_LEN], + struct vdev_up_params *params); + +QDF_STATUS (*send_peer_create_cmd)(wmi_unified_t wmi, + struct peer_create_params *param); + +#ifdef WLAN_SUPPORT_GREEN_AP +QDF_STATUS (*send_green_ap_ps_cmd)(wmi_unified_t wmi_handle, + uint32_t value, uint8_t pdev_id); + +QDF_STATUS (*extract_green_ap_egap_status_info)( + uint8_t *evt_buf, + struct wlan_green_ap_egap_status_info *egap_status_info_params); +#endif + +QDF_STATUS +(*send_pdev_utf_cmd)(wmi_unified_t wmi_handle, + struct pdev_utf_params *param, + uint8_t mac_id); + +QDF_STATUS +(*send_pdev_param_cmd)(wmi_unified_t wmi_handle, + struct pdev_params *param, + uint8_t mac_id); + +QDF_STATUS (*send_suspend_cmd)(wmi_unified_t wmi_handle, + struct suspend_params *param, + uint8_t mac_id); + +QDF_STATUS (*send_resume_cmd)(wmi_unified_t wmi_handle, + uint8_t mac_id); + +#ifdef FEATURE_WLAN_D0WOW +QDF_STATUS (*send_d0wow_enable_cmd)(wmi_unified_t wmi_handle, + uint8_t mac_id); +QDF_STATUS (*send_d0wow_disable_cmd)(wmi_unified_t wmi_handle, + uint8_t mac_id); +#endif + +QDF_STATUS (*send_wow_enable_cmd)(wmi_unified_t wmi_handle, + struct wow_cmd_params *param, + uint8_t mac_id); + +QDF_STATUS (*send_set_ap_ps_param_cmd)(wmi_unified_t wmi_handle, + uint8_t *peer_addr, + struct ap_ps_params *param); + +QDF_STATUS (*send_set_sta_ps_param_cmd)(wmi_unified_t wmi_handle, + struct sta_ps_params *param); + +QDF_STATUS (*send_crash_inject_cmd)(wmi_unified_t wmi_handle, + struct crash_inject *param); + +QDF_STATUS +(*send_dbglog_cmd)(wmi_unified_t wmi_handle, + struct dbglog_params *dbglog_param); + +QDF_STATUS (*send_vdev_set_param_cmd)(wmi_unified_t wmi_handle, + struct vdev_set_params *param); + +QDF_STATUS (*send_stats_request_cmd)(wmi_unified_t wmi_handle, + uint8_t macaddr[IEEE80211_ADDR_LEN], + struct stats_request_params *param); + +#ifdef CONFIG_WIN +QDF_STATUS (*send_packet_log_enable_cmd)(wmi_unified_t wmi_handle, + WMI_HOST_PKTLOG_EVENT PKTLOG_EVENT, uint8_t mac_id); +#else +QDF_STATUS (*send_packet_log_enable_cmd)(wmi_unified_t wmi_handle, + uint8_t macaddr[IEEE80211_ADDR_LEN], + struct packet_enable_params *param); +#endif + +QDF_STATUS (*send_packet_log_disable_cmd)(wmi_unified_t wmi_handle, + uint8_t mac_id); + +QDF_STATUS (*send_beacon_send_cmd)(wmi_unified_t wmi_handle, + struct beacon_params *param); + +QDF_STATUS (*send_beacon_tmpl_send_cmd)(wmi_unified_t wmi_handle, + struct beacon_tmpl_params *param); + +QDF_STATUS (*send_peer_assoc_cmd)(wmi_unified_t wmi_handle, + struct peer_assoc_params *param); + +QDF_STATUS (*send_scan_start_cmd)(wmi_unified_t wmi_handle, + struct scan_req_params *param); + +QDF_STATUS (*send_scan_stop_cmd)(wmi_unified_t wmi_handle, + struct scan_cancel_param *param); + +QDF_STATUS (*send_scan_chan_list_cmd)(wmi_unified_t wmi_handle, + struct scan_chan_list_params *param); + +QDF_STATUS (*send_mgmt_cmd)(wmi_unified_t wmi_handle, + struct wmi_mgmt_params *param); + +QDF_STATUS (*send_offchan_data_tx_cmd)(wmi_unified_t wmi_handle, + struct wmi_offchan_data_tx_params *param); + +QDF_STATUS (*send_modem_power_state_cmd)(wmi_unified_t wmi_handle, + uint32_t param_value); + +QDF_STATUS (*send_set_sta_ps_mode_cmd)(wmi_unified_t wmi_handle, + uint32_t vdev_id, uint8_t val); + +QDF_STATUS (*send_get_temperature_cmd)(wmi_unified_t wmi_handle); + +QDF_STATUS (*send_set_p2pgo_oppps_req_cmd)(wmi_unified_t wmi_handle, + struct p2p_ps_params *oppps); + +QDF_STATUS (*send_set_p2pgo_noa_req_cmd)(wmi_unified_t wmi_handle, + struct p2p_ps_params *noa); + +#ifdef CONVERGED_P2P_ENABLE +QDF_STATUS (*send_p2p_lo_start_cmd)(wmi_unified_t wmi_handle, + struct p2p_lo_start *param); + +QDF_STATUS (*send_p2p_lo_stop_cmd)(wmi_unified_t wmi_handle, + uint8_t vdev_id); +#endif + +QDF_STATUS (*send_set_smps_params_cmd)(wmi_unified_t wmi_handle, + uint8_t vdev_id, + int value); + +QDF_STATUS (*send_set_mimops_cmd)(wmi_unified_t wmi_handle, + uint8_t vdev_id, int value); + +QDF_STATUS (*send_set_sta_uapsd_auto_trig_cmd)(wmi_unified_t wmi_handle, + struct sta_uapsd_trig_params *param); + +#ifdef WLAN_FEATURE_DSRC +QDF_STATUS (*send_ocb_set_utc_time_cmd)(wmi_unified_t wmi_handle, + struct ocb_utc_param *utc); + +QDF_STATUS (*send_ocb_get_tsf_timer_cmd)(wmi_unified_t wmi_handle, + uint8_t vdev_id); + +QDF_STATUS (*send_ocb_start_timing_advert_cmd)(wmi_unified_t wmi_handle, + struct ocb_timing_advert_param *timing_advert); + +QDF_STATUS (*send_ocb_stop_timing_advert_cmd)(wmi_unified_t wmi_handle, + struct ocb_timing_advert_param *timing_advert); + +QDF_STATUS (*send_dcc_get_stats_cmd)(wmi_unified_t wmi_handle, + struct ocb_dcc_get_stats_param *get_stats_param); + +QDF_STATUS (*send_dcc_clear_stats_cmd)(wmi_unified_t wmi_handle, + uint32_t vdev_id, uint32_t dcc_stats_bitmap); + +QDF_STATUS (*send_dcc_update_ndl_cmd)(wmi_unified_t wmi_handle, + struct ocb_dcc_update_ndl_param *update_ndl_param); + +QDF_STATUS (*send_ocb_set_config_cmd)(wmi_unified_t wmi_handle, + struct ocb_config *config); +QDF_STATUS (*extract_ocb_chan_config_resp)(wmi_unified_t wmi_hdl, + void *evt_buf, + uint32_t *status); +QDF_STATUS (*extract_ocb_tsf_timer)(wmi_unified_t wmi_hdl, + void *evt_buf, + struct ocb_get_tsf_timer_response *resp); +QDF_STATUS (*extract_dcc_update_ndl_resp)(wmi_unified_t wmi_hdl, + void *evt_buf, struct ocb_dcc_update_ndl_response *resp); +QDF_STATUS (*extract_dcc_stats)(wmi_unified_t wmi_hdl, + void *evt_buf, + struct ocb_dcc_get_stats_response **response); +#endif +QDF_STATUS (*send_lro_config_cmd)(wmi_unified_t wmi_handle, + struct wmi_lro_config_cmd_t *wmi_lro_cmd); + +QDF_STATUS (*send_set_thermal_mgmt_cmd)(wmi_unified_t wmi_handle, + struct thermal_cmd_params *thermal_info); + +QDF_STATUS (*send_peer_rate_report_cmd)(wmi_unified_t wmi_handle, + struct wmi_peer_rate_report_params *rate_report_params); + +QDF_STATUS (*send_set_mcc_channel_time_quota_cmd) + (wmi_unified_t wmi_handle, + uint32_t adapter_1_chan_freq, + uint32_t adapter_1_quota, uint32_t adapter_2_chan_freq); + +QDF_STATUS (*send_set_mcc_channel_time_latency_cmd) + (wmi_unified_t wmi_handle, + uint32_t mcc_channel_freq, uint32_t mcc_channel_time_latency); + +QDF_STATUS (*send_set_enable_disable_mcc_adaptive_scheduler_cmd)( + wmi_unified_t wmi_handle, uint32_t mcc_adaptive_scheduler, + uint32_t pdev_id); + +QDF_STATUS (*send_p2p_go_set_beacon_ie_cmd)(wmi_unified_t wmi_handle, + uint32_t vdev_id, uint8_t *p2p_ie); + +QDF_STATUS (*send_probe_rsp_tmpl_send_cmd)(wmi_unified_t wmi_handle, + uint8_t vdev_id, + struct wmi_probe_resp_params *probe_rsp_info); + +QDF_STATUS (*send_setup_install_key_cmd)(wmi_unified_t wmi_handle, + struct set_key_params *key_params); + +QDF_STATUS (*send_vdev_set_gtx_cfg_cmd)(wmi_unified_t wmi_handle, + uint32_t if_id, + struct wmi_gtx_config *gtx_info); + +QDF_STATUS (*send_set_sta_keep_alive_cmd)(wmi_unified_t wmi_handle, + struct sta_params *params); + +QDF_STATUS (*send_set_sta_sa_query_param_cmd)(wmi_unified_t wmi_handle, + uint8_t vdev_id, uint32_t max_retries, + uint32_t retry_interval); + +QDF_STATUS (*send_set_gateway_params_cmd)(wmi_unified_t wmi_handle, + struct gateway_update_req_param *req); + +QDF_STATUS (*send_set_rssi_monitoring_cmd)(wmi_unified_t wmi_handle, + struct rssi_monitor_param *req); + +QDF_STATUS (*send_scan_probe_setoui_cmd)(wmi_unified_t wmi_handle, + struct scan_mac_oui *psetoui); + +QDF_STATUS (*send_reset_passpoint_network_list_cmd)(wmi_unified_t wmi_handle, + struct wifi_passpoint_req_param *req); + +QDF_STATUS (*send_roam_scan_offload_rssi_thresh_cmd)(wmi_unified_t wmi_handle, + struct roam_offload_scan_rssi_params *roam_req); + +QDF_STATUS (*send_roam_mawc_params_cmd)(wmi_unified_t wmi_handle, + struct wmi_mawc_roam_params *params); + +QDF_STATUS (*send_roam_scan_filter_cmd)(wmi_unified_t wmi_handle, + struct roam_scan_filter_params *roam_req); + +#if defined(WLAN_FEATURE_FILS_SK) +QDF_STATUS (*send_roam_scan_hlp_cmd) (wmi_unified_t wmi_handle, + struct hlp_params *params); +#endif + +QDF_STATUS (*send_set_passpoint_network_list_cmd)(wmi_unified_t wmi_handle, + struct wifi_passpoint_req_param *req); + +QDF_STATUS (*send_set_epno_network_list_cmd)(wmi_unified_t wmi_handle, + struct wifi_enhanced_pno_params *req); + +QDF_STATUS (*send_extscan_get_capabilities_cmd)(wmi_unified_t wmi_handle, + struct extscan_capabilities_params *pgetcapab); + +QDF_STATUS (*send_extscan_get_cached_results_cmd)(wmi_unified_t wmi_handle, + struct extscan_cached_result_params *pcached_results); + +QDF_STATUS (*send_extscan_stop_change_monitor_cmd)(wmi_unified_t wmi_handle, + struct extscan_capabilities_reset_params *reset_req); + +QDF_STATUS (*send_extscan_start_change_monitor_cmd)(wmi_unified_t wmi_handle, + struct extscan_set_sig_changereq_params * + psigchange); + +QDF_STATUS (*send_extscan_stop_hotlist_monitor_cmd)(wmi_unified_t wmi_handle, + struct extscan_bssid_hotlist_reset_params *photlist_reset); + +QDF_STATUS (*send_extscan_start_hotlist_monitor_cmd)(wmi_unified_t wmi_handle, + struct extscan_bssid_hotlist_set_params *params); + +QDF_STATUS (*send_stop_extscan_cmd)(wmi_unified_t wmi_handle, + struct extscan_stop_req_params *pstopcmd); + +QDF_STATUS (*send_start_extscan_cmd)(wmi_unified_t wmi_handle, + struct wifi_scan_cmd_req_params *pstart); + +QDF_STATUS (*send_plm_stop_cmd)(wmi_unified_t wmi_handle, + const struct plm_req_params *plm); + +QDF_STATUS (*send_wlm_latency_level_cmd)(wmi_unified_t wmi_handle, + struct wlm_latency_level_param *param); + +QDF_STATUS (*send_plm_start_cmd)(wmi_unified_t wmi_handle, + const struct plm_req_params *plm, + uint32_t *gchannel_list); + +QDF_STATUS (*send_csa_offload_enable_cmd)(wmi_unified_t wmi_handle, + uint8_t vdev_id); + +QDF_STATUS (*send_pno_stop_cmd)(wmi_unified_t wmi_handle, uint8_t vdev_id); + +QDF_STATUS (*send_pno_start_cmd)(wmi_unified_t wmi_handle, + struct pno_scan_req_params *pno); + +QDF_STATUS (*send_nlo_mawc_cmd)(wmi_unified_t wmi_handle, + struct nlo_mawc_params *params); + +#ifdef IPA_OFFLOAD +QDF_STATUS (*send_ipa_offload_control_cmd)(wmi_unified_t wmi_handle, + struct ipa_uc_offload_control_params *ipa_offload); +#endif + +QDF_STATUS (*send_set_ric_req_cmd)(wmi_unified_t wmi_handle, void *msg, + uint8_t is_add_ts); + +QDF_STATUS (*send_process_ll_stats_clear_cmd) + (wmi_unified_t wmi_handle, + const struct ll_stats_clear_params *clear_req, + uint8_t addr[IEEE80211_ADDR_LEN]); + +QDF_STATUS (*send_process_ll_stats_set_cmd) + (wmi_unified_t wmi_handle, const struct ll_stats_set_params *set_req); + +QDF_STATUS (*send_process_ll_stats_get_cmd) + (wmi_unified_t wmi_handle, const struct ll_stats_get_params *get_req, + uint8_t addr[IEEE80211_ADDR_LEN]); + + +QDF_STATUS (*send_congestion_cmd)(wmi_unified_t wmi_handle, + uint8_t vdev_id); + +QDF_STATUS (*send_snr_request_cmd)(wmi_unified_t wmi_handle); + +QDF_STATUS (*send_snr_cmd)(wmi_unified_t wmi_handle, uint8_t vdev_id); + +QDF_STATUS (*send_link_status_req_cmd)(wmi_unified_t wmi_handle, + struct link_status_params *link_status); +#ifdef WLAN_POWER_MANAGEMENT_OFFLOAD +QDF_STATUS (*send_add_wow_wakeup_event_cmd)(wmi_unified_t wmi_handle, + uint32_t vdev_id, + uint32_t *bitmap, + bool enable); + +QDF_STATUS (*send_wow_patterns_to_fw_cmd)(wmi_unified_t wmi_handle, + uint8_t vdev_id, uint8_t ptrn_id, + const uint8_t *ptrn, uint8_t ptrn_len, + uint8_t ptrn_offset, const uint8_t *mask, + uint8_t mask_len, bool user, + uint8_t default_patterns); + +QDF_STATUS (*send_enable_arp_ns_offload_cmd)(wmi_unified_t wmi_handle, + struct pmo_arp_offload_params *arp_offload_req, + struct pmo_ns_offload_params *ns_offload_req, + uint8_t vdev_id); + +QDF_STATUS (*send_conf_hw_filter_cmd)(wmi_unified_t wmi, + struct pmo_hw_filter_params *req); + +QDF_STATUS (*send_enable_enhance_multicast_offload_cmd)( + wmi_unified_t wmi_handle, + uint8_t vdev_id, bool action); + +QDF_STATUS (*send_add_clear_mcbc_filter_cmd)(wmi_unified_t wmi_handle, + uint8_t vdev_id, + struct qdf_mac_addr multicast_addr, + bool clearList); + +QDF_STATUS (*send_multiple_add_clear_mcbc_filter_cmd)(wmi_unified_t wmi_handle, + uint8_t vdev_id, + struct pmo_mcast_filter_params *filter_param); + +QDF_STATUS (*send_gtk_offload_cmd)(wmi_unified_t wmi_handle, uint8_t vdev_id, + struct pmo_gtk_req *params, + bool enable_offload, + uint32_t gtk_offload_opcode); + +QDF_STATUS (*send_process_gtk_offload_getinfo_cmd)(wmi_unified_t wmi_handle, + uint8_t vdev_id, + uint64_t offload_req_opcode); + +QDF_STATUS (*send_wow_sta_ra_filter_cmd)(wmi_unified_t wmi_handle, + uint8_t vdev_id, uint8_t default_pattern, + uint16_t rate_limit_interval); + +QDF_STATUS (*send_action_frame_patterns_cmd)(wmi_unified_t wmi_handle, + struct pmo_action_wakeup_set_params *action_params); + +QDF_STATUS (*extract_gtk_rsp_event)(wmi_unified_t wmi_handle, + void *evt_buf, + struct pmo_gtk_rsp_params *gtk_rsp_param, uint32_t len); + +QDF_STATUS (*send_lphb_config_hbenable_cmd)(wmi_unified_t wmi_handle, + wmi_hb_set_enable_cmd_fixed_param *params); + +QDF_STATUS (*send_lphb_config_tcp_params_cmd)(wmi_unified_t wmi_handle, + wmi_hb_set_tcp_params_cmd_fixed_param *lphb_conf_req); + +QDF_STATUS (*send_lphb_config_tcp_pkt_filter_cmd)(wmi_unified_t wmi_handle, + wmi_hb_set_tcp_pkt_filter_cmd_fixed_param *g_hb_tcp_filter_fp); + +QDF_STATUS (*send_lphb_config_udp_params_cmd)(wmi_unified_t wmi_handle, + wmi_hb_set_udp_params_cmd_fixed_param *lphb_conf_req); + +QDF_STATUS (*send_lphb_config_udp_pkt_filter_cmd)(wmi_unified_t wmi_handle, + wmi_hb_set_udp_pkt_filter_cmd_fixed_param *lphb_conf_req); + +QDF_STATUS (*send_enable_disable_packet_filter_cmd)(wmi_unified_t wmi_handle, + uint8_t vdev_id, bool enable); + +QDF_STATUS (*send_config_packet_filter_cmd)(wmi_unified_t wmi_handle, + uint8_t vdev_id, struct pmo_rcv_pkt_fltr_cfg *rcv_filter_param, + uint8_t filter_id, bool enable); +#endif /* end of WLAN_POWER_MANAGEMENT_OFFLOAD */ +#ifdef CONFIG_MCL +QDF_STATUS (*send_process_dhcp_ind_cmd)(wmi_unified_t wmi_handle, + wmi_peer_set_param_cmd_fixed_param *ta_dhcp_ind); + +QDF_STATUS (*send_get_link_speed_cmd)(wmi_unified_t wmi_handle, + wmi_mac_addr peer_macaddr); + +QDF_STATUS (*send_bcn_buf_ll_cmd)(wmi_unified_t wmi_handle, + wmi_bcn_send_from_host_cmd_fixed_param * param); + +QDF_STATUS (*send_roam_scan_offload_mode_cmd)(wmi_unified_t wmi_handle, + wmi_start_scan_cmd_fixed_param * scan_cmd_fp, + struct roam_offload_scan_params *roam_req); + +QDF_STATUS (*send_roam_scan_offload_ap_profile_cmd)(wmi_unified_t wmi_handle, + struct ap_profile_params *ap_profile); + +QDF_STATUS (*send_pktlog_wmi_send_cmd)(wmi_unified_t wmi_handle, + WMI_PKTLOG_EVENT pktlog_event, + WMI_CMD_ID cmd_id, uint8_t user_triggered); +#endif + +#ifdef WLAN_SUPPORT_GREEN_AP +QDF_STATUS (*send_egap_conf_params_cmd)(wmi_unified_t wmi_handle, + struct wlan_green_ap_egap_params *egap_params); +#endif + +QDF_STATUS (*send_fw_profiling_cmd)(wmi_unified_t wmi_handle, + uint32_t cmd, uint32_t value1, uint32_t value2); + +QDF_STATUS (*send_nat_keepalive_en_cmd)(wmi_unified_t wmi_handle, uint8_t vdev_id); + +#ifdef WLAN_FEATURE_CIF_CFR +QDF_STATUS (*send_oem_dma_cfg_cmd)(wmi_unified_t wmi_handle, + wmi_oem_dma_ring_cfg_req_fixed_param *cfg); +#endif + +QDF_STATUS (*send_dbr_cfg_cmd)(wmi_unified_t wmi_handle, + struct direct_buf_rx_cfg_req *cfg); + +QDF_STATUS (*send_start_oem_data_cmd)(wmi_unified_t wmi_handle, + uint32_t data_len, + uint8_t *data); + +QDF_STATUS +(*send_dfs_phyerr_filter_offload_en_cmd)(wmi_unified_t wmi_handle, + bool dfs_phyerr_filter_offload); + +QDF_STATUS (*send_wow_delete_pattern_cmd)(wmi_unified_t wmi_handle, uint8_t ptrn_id, + uint8_t vdev_id); + +QDF_STATUS (*send_host_wakeup_ind_to_fw_cmd)(wmi_unified_t wmi_handle); + +QDF_STATUS (*send_del_ts_cmd)(wmi_unified_t wmi_handle, uint8_t vdev_id, + uint8_t ac); + +QDF_STATUS (*send_aggr_qos_cmd)(wmi_unified_t wmi_handle, + struct aggr_add_ts_param *aggr_qos_rsp_msg); + +QDF_STATUS (*send_add_ts_cmd)(wmi_unified_t wmi_handle, + struct add_ts_param *msg); + +QDF_STATUS (*send_process_add_periodic_tx_ptrn_cmd)(wmi_unified_t wmi_handle, + struct periodic_tx_pattern * + pAddPeriodicTxPtrnParams, + uint8_t vdev_id); + +QDF_STATUS (*send_process_del_periodic_tx_ptrn_cmd)(wmi_unified_t wmi_handle, + uint8_t vdev_id, + uint8_t pattern_id); + +QDF_STATUS (*send_stats_ext_req_cmd)(wmi_unified_t wmi_handle, + struct stats_ext_params *preq); + +QDF_STATUS (*send_enable_ext_wow_cmd)(wmi_unified_t wmi_handle, + struct ext_wow_params *params); + +QDF_STATUS (*send_set_app_type2_params_in_fw_cmd)(wmi_unified_t wmi_handle, + struct app_type2_params *appType2Params); + +QDF_STATUS (*send_set_auto_shutdown_timer_cmd)(wmi_unified_t wmi_handle, + uint32_t timer_val); + +QDF_STATUS (*send_nan_req_cmd)(wmi_unified_t wmi_handle, + struct nan_req_params *nan_req); + +QDF_STATUS (*send_process_dhcpserver_offload_cmd)(wmi_unified_t wmi_handle, + struct dhcp_offload_info_params *params); + +QDF_STATUS (*send_process_ch_avoid_update_cmd)(wmi_unified_t wmi_handle); + +QDF_STATUS (*send_regdomain_info_to_fw_cmd)(wmi_unified_t wmi_handle, + uint32_t reg_dmn, uint16_t regdmn2G, + uint16_t regdmn5G, uint8_t ctl2G, + uint8_t ctl5G); + +QDF_STATUS (*send_set_tdls_offchan_mode_cmd)(wmi_unified_t wmi_handle, + struct tdls_channel_switch_params *chan_switch_params); + +QDF_STATUS (*send_update_fw_tdls_state_cmd)(wmi_unified_t wmi_handle, + void *tdls_param, uint8_t tdls_state); + +QDF_STATUS (*send_update_tdls_peer_state_cmd)(wmi_unified_t wmi_handle, + struct tdls_peer_state_params *peerStateParams, + uint32_t *ch_mhz); + + +QDF_STATUS (*send_process_fw_mem_dump_cmd)(wmi_unified_t wmi_handle, + struct fw_dump_req_param *mem_dump_req); + +QDF_STATUS (*send_process_set_ie_info_cmd)(wmi_unified_t wmi_handle, + struct vdev_ie_info_param *ie_info); + +QDF_STATUS (*save_fw_version_cmd)(wmi_unified_t wmi_handle, void *evt_buf); + +QDF_STATUS (*check_and_update_fw_version_cmd)(wmi_unified_t wmi_hdl, void *ev); + +QDF_STATUS (*send_set_base_macaddr_indicate_cmd)(wmi_unified_t wmi_handle, + uint8_t *custom_addr); + +QDF_STATUS (*send_log_supported_evt_cmd)(wmi_unified_t wmi_handle, + uint8_t *event, + uint32_t len); + +QDF_STATUS (*send_enable_specific_fw_logs_cmd)(wmi_unified_t wmi_handle, + struct wmi_wifi_start_log *start_log); + +QDF_STATUS (*send_flush_logs_to_fw_cmd)(wmi_unified_t wmi_handle); + +QDF_STATUS (*send_pdev_set_pcl_cmd)(wmi_unified_t wmi_handle, + struct wmi_pcl_chan_weights *msg); + +QDF_STATUS (*send_pdev_set_hw_mode_cmd)(wmi_unified_t wmi_handle, + uint32_t hw_mode_index); + +QDF_STATUS (*send_pdev_set_dual_mac_config_cmd)(wmi_unified_t wmi_handle, + struct policy_mgr_dual_mac_config *msg); + +QDF_STATUS (*send_set_led_flashing_cmd)(wmi_unified_t wmi_handle, + struct flashing_req_params *flashing); + +QDF_STATUS (*send_app_type1_params_in_fw_cmd)(wmi_unified_t wmi_handle, + struct app_type1_params *app_type1_params); + +QDF_STATUS (*send_set_ssid_hotlist_cmd)(wmi_unified_t wmi_handle, + struct ssid_hotlist_request_params *request); + +QDF_STATUS (*send_process_roam_synch_complete_cmd)(wmi_unified_t wmi_handle, + uint8_t vdev_id); + +QDF_STATUS (*send_unit_test_cmd)(wmi_unified_t wmi_handle, + struct wmi_unit_test_cmd *wmi_utest); + +QDF_STATUS (*send_roam_invoke_cmd)(wmi_unified_t wmi_handle, + struct wmi_roam_invoke_cmd *roaminvoke, + uint32_t ch_hz); + +QDF_STATUS (*send_roam_scan_offload_cmd)(wmi_unified_t wmi_handle, + uint32_t command, uint32_t vdev_id); + +QDF_STATUS (*send_roam_scan_offload_scan_period_cmd)(wmi_unified_t wmi_handle, + uint32_t scan_period, + uint32_t scan_age, + uint32_t vdev_id); + +QDF_STATUS (*send_roam_scan_offload_chan_list_cmd)(wmi_unified_t wmi_handle, + uint8_t chan_count, + uint32_t *chan_list, + uint8_t list_type, uint32_t vdev_id); + +QDF_STATUS (*send_roam_scan_offload_rssi_change_cmd)(wmi_unified_t wmi_handle, + uint32_t vdev_id, + int32_t rssi_change_thresh, + uint32_t bcn_rssi_weight, + uint32_t hirssi_delay_btw_scans); + +QDF_STATUS (*send_per_roam_config_cmd)(wmi_unified_t wmi_handle, + struct wmi_per_roam_config_req *req_buf); + +QDF_STATUS (*send_set_arp_stats_req_cmd)(wmi_unified_t wmi_handle, + struct set_arp_stats *req_buf); + +QDF_STATUS (*send_get_arp_stats_req_cmd)(wmi_unified_t wmi_handle, + struct get_arp_stats *req_buf); + +#ifdef FEATURE_WLAN_APF +QDF_STATUS +(*send_set_active_apf_mode_cmd)(wmi_unified_t wmi_handle, uint8_t vdev_id, + enum wmi_host_active_apf_mode ucast_mode, + enum wmi_host_active_apf_mode mcast_bcast_mode); + +QDF_STATUS (*send_apf_enable_cmd)(wmi_unified_t wmi_handle, uint32_t vdev_id, + bool enable); + +QDF_STATUS (*send_apf_write_work_memory_cmd)(wmi_unified_t wmi_handle, + struct wmi_apf_write_memory_params *apf_write_params); + +QDF_STATUS (*send_apf_read_work_memory_cmd)(wmi_unified_t wmi_handle, + struct wmi_apf_read_memory_params *apf_read_params); + +QDF_STATUS (*extract_apf_read_memory_resp_event)(wmi_unified_t wmi_handle, + void *evt_buf, + struct wmi_apf_read_memory_resp_event_params *resp); +#endif /* FEATURE_WLAN_APF */ + +QDF_STATUS (*send_pdev_get_tpc_config_cmd)(wmi_unified_t wmi_handle, + uint32_t param); + +QDF_STATUS (*send_set_bwf_cmd)(wmi_unified_t wmi_handle, + struct set_bwf_params *param); + +QDF_STATUS (*send_set_atf_cmd)(wmi_unified_t wmi_handle, + struct set_atf_params *param); + +QDF_STATUS (*send_pdev_fips_cmd)(wmi_unified_t wmi_handle, + struct fips_params *param); + +QDF_STATUS (*send_wlan_profile_enable_cmd)(wmi_unified_t wmi_handle, + struct wlan_profile_params *param); + +QDF_STATUS (*send_wlan_profile_trigger_cmd)(wmi_unified_t wmi_handle, + struct wlan_profile_params *param); + +QDF_STATUS (*send_pdev_set_chan_cmd)(wmi_unified_t wmi_handle, + struct channel_param *param); + +QDF_STATUS (*send_set_ht_ie_cmd)(wmi_unified_t wmi_handle, + struct ht_ie_params *param); + +QDF_STATUS (*send_set_vht_ie_cmd)(wmi_unified_t wmi_handle, + struct vht_ie_params *param); + +QDF_STATUS (*send_wmm_update_cmd)(wmi_unified_t wmi_handle, + struct wmm_update_params *param); + +QDF_STATUS (*send_process_update_edca_param_cmd)(wmi_unified_t wmi_handle, + uint8_t vdev_id, bool mu_edca_param, + struct wmi_host_wme_vparams wmm_vparams[WMI_MAX_NUM_AC]); + +QDF_STATUS (*send_set_ant_switch_tbl_cmd)(wmi_unified_t wmi_handle, + struct ant_switch_tbl_params *param); + +QDF_STATUS (*send_set_ratepwr_table_cmd)(wmi_unified_t wmi_handle, + struct ratepwr_table_params *param); + +QDF_STATUS (*send_get_ratepwr_table_cmd)(wmi_unified_t wmi_handle); + +QDF_STATUS (*send_set_ctl_table_cmd)(wmi_unified_t wmi_handle, + struct ctl_table_params *param); + +QDF_STATUS (*send_set_mimogain_table_cmd)(wmi_unified_t wmi_handle, + struct mimogain_table_params *param); + +QDF_STATUS (*send_set_ratepwr_chainmsk_cmd)(wmi_unified_t wmi_handle, + struct ratepwr_chainmsk_params *param); + +QDF_STATUS (*send_set_macaddr_cmd)(wmi_unified_t wmi_handle, + struct macaddr_params *param); + +QDF_STATUS (*send_pdev_scan_start_cmd)(wmi_unified_t wmi_handle); + +QDF_STATUS (*send_pdev_scan_end_cmd)(wmi_unified_t wmi_handle); + +QDF_STATUS (*send_set_acparams_cmd)(wmi_unified_t wmi_handle, + struct acparams_params *param); + +QDF_STATUS (*send_set_vap_dscp_tid_map_cmd)(wmi_unified_t wmi_handle, + struct vap_dscp_tid_map_params *param); + +QDF_STATUS (*send_proxy_ast_reserve_cmd)(wmi_unified_t wmi_handle, + struct proxy_ast_reserve_params *param); + +QDF_STATUS (*send_pdev_qvit_cmd)(wmi_unified_t wmi_handle, + struct pdev_qvit_params *param); + +QDF_STATUS (*send_mcast_group_update_cmd)(wmi_unified_t wmi_handle, + struct mcast_group_update_params *param); + +QDF_STATUS (*send_peer_add_wds_entry_cmd)(wmi_unified_t wmi_handle, + struct peer_add_wds_entry_params *param); + +QDF_STATUS (*send_peer_del_wds_entry_cmd)(wmi_unified_t wmi_handle, + struct peer_del_wds_entry_params *param); + +QDF_STATUS (*send_set_bridge_mac_addr_cmd)(wmi_unified_t wmi_handle, + struct set_bridge_mac_addr_params *param); + +QDF_STATUS (*send_peer_update_wds_entry_cmd)(wmi_unified_t wmi_handle, + struct peer_update_wds_entry_params *param); + +QDF_STATUS (*send_phyerr_enable_cmd)(wmi_unified_t wmi_handle); + +QDF_STATUS (*send_phyerr_disable_cmd)(wmi_unified_t wmi_handle); + +QDF_STATUS (*send_smart_ant_enable_cmd)(wmi_unified_t wmi_handle, + struct smart_ant_enable_params *param); + +QDF_STATUS (*send_smart_ant_set_rx_ant_cmd)(wmi_unified_t wmi_handle, + struct smart_ant_rx_ant_params *param); + +QDF_STATUS (*send_smart_ant_set_tx_ant_cmd)(wmi_unified_t wmi_handle, + uint8_t macaddr[IEEE80211_ADDR_LEN], + struct smart_ant_tx_ant_params *param); + +QDF_STATUS (*send_smart_ant_set_training_info_cmd)(wmi_unified_t wmi_handle, + uint8_t macaddr[IEEE80211_ADDR_LEN], + struct smart_ant_training_info_params *param); + +QDF_STATUS (*send_smart_ant_set_node_config_cmd)(wmi_unified_t wmi_handle, + uint8_t macaddr[IEEE80211_ADDR_LEN], + struct smart_ant_node_config_params *param); + +QDF_STATUS (*send_smart_ant_enable_tx_feedback_cmd)(wmi_unified_t wmi_handle, + struct smart_ant_enable_tx_feedback_params *param); + +QDF_STATUS (*send_vdev_spectral_configure_cmd)(wmi_unified_t wmi_handle, + struct vdev_spectral_configure_params *param); + +QDF_STATUS (*send_vdev_spectral_enable_cmd)(wmi_unified_t wmi_handle, + struct vdev_spectral_enable_params *param); +QDF_STATUS (*send_set_del_pmkid_cache_cmd) (wmi_unified_t wmi_handle, + struct wmi_unified_pmk_cache *req_buf); + +QDF_STATUS (*send_bss_chan_info_request_cmd)(wmi_unified_t wmi_handle, + struct bss_chan_info_request_params *param); + +QDF_STATUS (*send_thermal_mitigation_param_cmd)(wmi_unified_t wmi_handle, + struct thermal_mitigation_params *param); + +QDF_STATUS (*send_vdev_set_neighbour_rx_cmd)(wmi_unified_t wmi_handle, + uint8_t macaddr[IEEE80211_ADDR_LEN], + struct set_neighbour_rx_params *param); + +QDF_STATUS (*send_vdev_set_fwtest_param_cmd)(wmi_unified_t wmi_handle, + struct set_fwtest_params *param); + +QDF_STATUS (*send_vdev_config_ratemask_cmd)(wmi_unified_t wmi_handle, + struct config_ratemask_params *param); + +QDF_STATUS (*send_vdev_set_custom_aggr_size_cmd)(wmi_unified_t wmi_handle, + struct set_custom_aggr_size_params *param); + +QDF_STATUS (*send_vdev_set_qdepth_thresh_cmd)(wmi_unified_t wmi_handle, + struct set_qdepth_thresh_params *param); + +QDF_STATUS (*send_wow_wakeup_cmd)(wmi_unified_t wmi_handle); + +QDF_STATUS (*send_wow_add_wakeup_event_cmd)(wmi_unified_t wmi_handle, + struct wow_add_wakeup_params *param); + +QDF_STATUS (*send_wow_add_wakeup_pattern_cmd)(wmi_unified_t wmi_handle, + struct wow_add_wakeup_pattern_params *param); + +QDF_STATUS (*send_wow_remove_wakeup_pattern_cmd)(wmi_unified_t wmi_handle, + struct wow_remove_wakeup_pattern_params *param); + +QDF_STATUS (*send_pdev_set_regdomain_cmd)(wmi_unified_t wmi_handle, + struct pdev_set_regdomain_params *param); + +QDF_STATUS (*send_set_quiet_mode_cmd)(wmi_unified_t wmi_handle, + struct set_quiet_mode_params *param); + +QDF_STATUS (*send_set_beacon_filter_cmd)(wmi_unified_t wmi_handle, + struct set_beacon_filter_params *param); + +QDF_STATUS (*send_remove_beacon_filter_cmd)(wmi_unified_t wmi_handle, + struct remove_beacon_filter_params *param); +/* +QDF_STATUS (*send_mgmt_cmd)(wmi_unified_t wmi_handle, + uint8_t macaddr[IEEE80211_ADDR_LEN], + struct mgmt_params *param); + */ + +QDF_STATUS (*send_addba_clearresponse_cmd)(wmi_unified_t wmi_handle, + uint8_t macaddr[IEEE80211_ADDR_LEN], + struct addba_clearresponse_params *param); + +QDF_STATUS (*send_addba_send_cmd)(wmi_unified_t wmi_handle, + uint8_t macaddr[IEEE80211_ADDR_LEN], + struct addba_send_params *param); + +QDF_STATUS (*send_delba_send_cmd)(wmi_unified_t wmi_handle, + uint8_t macaddr[IEEE80211_ADDR_LEN], + struct delba_send_params *param); + +QDF_STATUS (*send_addba_setresponse_cmd)(wmi_unified_t wmi_handle, + uint8_t macaddr[IEEE80211_ADDR_LEN], + struct addba_setresponse_params *param); + +QDF_STATUS (*send_singleamsdu_cmd)(wmi_unified_t wmi_handle, + uint8_t macaddr[IEEE80211_ADDR_LEN], + struct singleamsdu_params *param); + +QDF_STATUS (*send_set_qboost_param_cmd)(wmi_unified_t wmi_handle, + uint8_t macaddr[IEEE80211_ADDR_LEN], + struct set_qboost_params *param); + +QDF_STATUS (*send_mu_scan_cmd)(wmi_unified_t wmi_handle, + struct mu_scan_params *param); + +QDF_STATUS (*send_lteu_config_cmd)(wmi_unified_t wmi_handle, + struct lteu_config_params *param); + +QDF_STATUS (*send_set_ps_mode_cmd)(wmi_unified_t wmi_handle, + struct set_ps_mode_params *param); +QDF_STATUS (*save_service_bitmap)(wmi_unified_t wmi_handle, + void *evt_buf, void *bitmap_buf); +QDF_STATUS (*save_ext_service_bitmap)(wmi_unified_t wmi_handle, + void *evt_buf, void *bitmap_buf); +bool (*is_service_enabled)(wmi_unified_t wmi_handle, + uint32_t service_id); +QDF_STATUS (*get_target_cap_from_service_ready)(wmi_unified_t wmi_handle, + void *evt_buf, struct wlan_psoc_target_capability_info *ev); + +QDF_STATUS (*extract_fw_version)(wmi_unified_t wmi_handle, + void *ev, struct wmi_host_fw_ver *fw_ver); + +QDF_STATUS (*extract_fw_abi_version)(wmi_unified_t wmi_handle, + void *ev, struct wmi_host_fw_abi_ver *fw_ver); + +QDF_STATUS (*extract_hal_reg_cap)(wmi_unified_t wmi_handle, void *evt_buf, + struct wlan_psoc_hal_reg_capability *hal_reg_cap); + +host_mem_req * (*extract_host_mem_req)(wmi_unified_t wmi_handle, + void *evt_buf, uint8_t *num_entries); + +QDF_STATUS (*init_cmd_send)(wmi_unified_t wmi_handle, + struct wmi_init_cmd_param *param); + +QDF_STATUS (*save_fw_version)(wmi_unified_t wmi_handle, void *evt_buf); +uint32_t (*ready_extract_init_status)(wmi_unified_t wmi_hdl, void *ev); +QDF_STATUS (*ready_extract_mac_addr)(wmi_unified_t wmi_hdl, void *ev, + uint8_t *macaddr); +wmi_host_mac_addr * (*ready_extract_mac_addr_list)(wmi_unified_t wmi_hdl, + void *ev, uint8_t *num_mac_addr); +QDF_STATUS (*extract_ready_event_params)(wmi_unified_t wmi_handle, + void *evt_buf, struct wmi_host_ready_ev_param *ev_param); + +QDF_STATUS (*check_and_update_fw_version)(wmi_unified_t wmi_hdl, void *ev); +uint8_t* (*extract_dbglog_data_len)(wmi_unified_t wmi_handle, void *evt_buf, + uint32_t *len); +QDF_STATUS (*send_ext_resource_config)(wmi_unified_t wmi_handle, + wmi_host_ext_resource_config *ext_cfg); + +QDF_STATUS (*send_nf_dbr_dbm_info_get_cmd)(wmi_unified_t wmi_handle, + uint8_t mac_id); + +QDF_STATUS (*send_packet_power_info_get_cmd)(wmi_unified_t wmi_handle, + struct packet_power_info_params *param); + +QDF_STATUS (*send_gpio_config_cmd)(wmi_unified_t wmi_handle, + struct gpio_config_params *param); + +QDF_STATUS (*send_gpio_output_cmd)(wmi_unified_t wmi_handle, + struct gpio_output_params *param); + +QDF_STATUS (*send_rtt_meas_req_test_cmd)(wmi_unified_t wmi_handle, + struct rtt_meas_req_test_params *param); + +QDF_STATUS (*send_rtt_meas_req_cmd)(wmi_unified_t wmi_handle, + struct rtt_meas_req_params *param); + +QDF_STATUS (*send_rtt_keepalive_req_cmd)(wmi_unified_t wmi_handle, + struct rtt_keepalive_req_params *param); + +QDF_STATUS (*send_lci_set_cmd)(wmi_unified_t wmi_handle, + struct lci_set_params *param); + +QDF_STATUS (*send_lcr_set_cmd)(wmi_unified_t wmi_handle, + struct lcr_set_params *param); + +QDF_STATUS (*send_periodic_chan_stats_config_cmd)(wmi_unified_t wmi_handle, + struct periodic_chan_stats_params *param); + +QDF_STATUS +(*send_atf_peer_request_cmd)(wmi_unified_t wmi_handle, + struct atf_peer_request_params *param); + +QDF_STATUS +(*send_set_atf_grouping_cmd)(wmi_unified_t wmi_handle, + struct atf_grouping_params *param); + +QDF_STATUS (*send_get_user_position_cmd)(wmi_unified_t wmi_handle, + uint32_t value); + +QDF_STATUS +(*send_reset_peer_mumimo_tx_count_cmd)(wmi_unified_t wmi_handle, + uint32_t value); + +QDF_STATUS (*send_get_peer_mumimo_tx_count_cmd)(wmi_unified_t wmi_handle, + uint32_t value); + +QDF_STATUS +(*send_pdev_caldata_version_check_cmd)(wmi_unified_t wmi_handle, + uint32_t value); + +QDF_STATUS +(*send_btcoex_wlan_priority_cmd)(wmi_unified_t wmi_handle, + struct btcoex_cfg_params *param); + +QDF_STATUS +(*send_start_11d_scan_cmd)(wmi_unified_t wmi_handle, + struct reg_start_11d_scan_req *param); + +QDF_STATUS +(*send_stop_11d_scan_cmd)(wmi_unified_t wmi_handle, + struct reg_stop_11d_scan_req *param); + +QDF_STATUS +(*send_btcoex_duty_cycle_cmd)(wmi_unified_t wmi_handle, + struct btcoex_cfg_params *param); + +QDF_STATUS +(*send_coex_ver_cfg_cmd)(wmi_unified_t wmi_handle, coex_ver_cfg_t *param); + +QDF_STATUS +(*send_coex_config_cmd)(wmi_unified_t wmi_handle, + struct coex_config_params *param); + +QDF_STATUS (*send_bcn_offload_control_cmd)(wmi_unified_t wmi_handle, + struct bcn_offload_control *bcn_ctrl_param); + +QDF_STATUS (*extract_wds_addr_event)(wmi_unified_t wmi_handle, + void *evt_buf, uint16_t len, wds_addr_event_t *wds_ev); + +QDF_STATUS (*extract_dcs_interference_type)(wmi_unified_t wmi_handle, + void *evt_buf, struct wmi_host_dcs_interference_param *param); + +QDF_STATUS (*extract_dcs_cw_int)(wmi_unified_t wmi_handle, void *evt_buf, + wmi_host_ath_dcs_cw_int *cw_int); + +QDF_STATUS (*extract_dcs_im_tgt_stats)(wmi_unified_t wmi_handle, void *evt_buf, + wmi_host_dcs_im_tgt_stats_t *wlan_stat); + +QDF_STATUS (*extract_fips_event_data)(wmi_unified_t wmi_handle, + void *evt_buf, struct wmi_host_fips_event_param *param); + +QDF_STATUS (*extract_vdev_start_resp)(wmi_unified_t wmi_handle, void *evt_buf, + wmi_host_vdev_start_resp *vdev_rsp); + +QDF_STATUS (*extract_vdev_delete_resp)(wmi_unified_t wmi_handle, void *evt_buf, + struct wmi_host_vdev_delete_resp *delete_rsp); + +QDF_STATUS (*extract_tbttoffset_update_params)(void *wmi_hdl, void *evt_buf, + uint8_t idx, struct tbttoffset_params *tbtt_param); + +QDF_STATUS (*extract_ext_tbttoffset_update_params)(void *wmi_hdl, void *evt_buf, + uint8_t idx, struct tbttoffset_params *tbtt_param); + +QDF_STATUS (*extract_tbttoffset_num_vdevs)(void *wmi_hdl, void *evt_buf, + uint32_t *num_vdevs); + +QDF_STATUS (*extract_ext_tbttoffset_num_vdevs)(void *wmi_hdl, void *evt_buf, + uint32_t *num_vdevs); + +QDF_STATUS (*extract_mgmt_rx_params)(wmi_unified_t wmi_handle, void *evt_buf, + struct mgmt_rx_event_params *hdr, uint8_t **bufp); + +QDF_STATUS (*extract_vdev_stopped_param)(wmi_unified_t wmi_handle, + void *evt_buf, uint32_t *vdev_id); + +QDF_STATUS (*extract_vdev_roam_param)(wmi_unified_t wmi_handle, void *evt_buf, + wmi_host_roam_event *param); + +QDF_STATUS (*extract_vdev_scan_ev_param)(wmi_unified_t wmi_handle, + void *evt_buf, struct scan_event *param); + +#ifdef CONVERGED_TDLS_ENABLE +QDF_STATUS (*extract_vdev_tdls_ev_param)(wmi_unified_t wmi_handle, + void *evt_buf, struct tdls_event_info *param); +#endif + +QDF_STATUS (*extract_mu_ev_param)(wmi_unified_t wmi_handle, void *evt_buf, + wmi_host_mu_report_event *param); + +QDF_STATUS (*extract_mu_db_entry)(wmi_unified_t wmi_hdl, void *evt_buf, + uint8_t idx, wmi_host_mu_db_entry *param); + +QDF_STATUS (*extract_mumimo_tx_count_ev_param)(wmi_unified_t wmi_handle, + void *evt_buf, wmi_host_peer_txmu_cnt_event *param); + +QDF_STATUS (*extract_peer_gid_userpos_list_ev_param)(wmi_unified_t wmi_handle, + void *evt_buf, wmi_host_peer_gid_userpos_list_event *param); + +QDF_STATUS (*extract_pdev_caldata_version_check_ev_param)( + wmi_unified_t wmi_handle, + void *evt_buf, wmi_host_pdev_check_cal_version_event *param); + +QDF_STATUS (*extract_pdev_tpc_config_ev_param)(wmi_unified_t wmi_handle, + void *evt_buf, wmi_host_pdev_tpc_config_event *param); + +QDF_STATUS (*extract_gpio_input_ev_param)(wmi_unified_t wmi_handle, + void *evt_buf, uint32_t *gpio_num); + +QDF_STATUS (*extract_pdev_reserve_ast_ev_param)(wmi_unified_t wmi_handle, + void *evt_buf, struct wmi_host_proxy_ast_reserve_param *param); + +QDF_STATUS (*extract_nfcal_power_ev_param)(wmi_unified_t wmi_handle, + void *evt_buf, + wmi_host_pdev_nfcal_power_all_channels_event *param); + +QDF_STATUS (*extract_pdev_tpc_ev_param)(wmi_unified_t wmi_handle, + void *evt_buf, wmi_host_pdev_tpc_event *param); + +QDF_STATUS (*extract_pdev_generic_buffer_ev_param)(wmi_unified_t wmi_handle, + void *evt_buf, wmi_host_pdev_generic_buffer_event *param); + +QDF_STATUS (*extract_mgmt_tx_compl_param)(wmi_unified_t wmi_handle, + void *evt_buf, wmi_host_mgmt_tx_compl_event *param); + +QDF_STATUS (*extract_offchan_data_tx_compl_param)(wmi_unified_t wmi_handle, + void *evt_buf, + struct wmi_host_offchan_data_tx_compl_event *param); + +QDF_STATUS (*extract_pdev_csa_switch_count_status)(wmi_unified_t wmi_handle, + void *evt_buf, struct pdev_csa_switch_count_status *param); + +QDF_STATUS (*extract_swba_num_vdevs)(wmi_unified_t wmi_handle, void *evt_buf, + uint32_t *num_vdevs); + +QDF_STATUS (*extract_swba_tim_info)(wmi_unified_t wmi_handle, void *evt_buf, + uint32_t idx, wmi_host_tim_info *tim_info); + +QDF_STATUS (*extract_swba_noa_info)(wmi_unified_t wmi_handle, void *evt_buf, + uint32_t idx, wmi_host_p2p_noa_info *p2p_desc); + +#ifdef CONVERGED_P2P_ENABLE +QDF_STATUS (*extract_p2p_lo_stop_ev_param)(wmi_unified_t wmi_handle, + void *evt_buf, struct p2p_lo_event *param); + +QDF_STATUS (*extract_p2p_noa_ev_param)(wmi_unified_t wmi_handle, + void *evt_buf, struct p2p_noa_info *param); + +QDF_STATUS (*set_mac_addr_rx_filter)(wmi_unified_t wmi_handle, + struct p2p_set_mac_filter *param); +QDF_STATUS +(*extract_mac_addr_rx_filter_evt_param)(wmi_unified_t wmi_handle, + void *evt_buf, + struct p2p_set_mac_filter_evt *param); +#endif + +QDF_STATUS (*extract_peer_sta_ps_statechange_ev)(wmi_unified_t wmi_handle, + void *evt_buf, wmi_host_peer_sta_ps_statechange_event *ev); + +QDF_STATUS (*extract_peer_sta_kickout_ev)(wmi_unified_t wmi_handle, + void *evt_buf, wmi_host_peer_sta_kickout_event *ev); + +QDF_STATUS (*extract_peer_ratecode_list_ev)(wmi_unified_t wmi_handle, + void *evt_buf, uint8_t *peer_mac, wmi_sa_rate_cap *rate_cap); + +QDF_STATUS (*extract_comb_phyerr)(wmi_unified_t wmi_handle, void *evt_buf, + uint16_t datalen, uint16_t *buf_offset, wmi_host_phyerr_t *phyerr); + +QDF_STATUS (*extract_single_phyerr)(wmi_unified_t wmi_handle, void *evt_buf, + uint16_t datalen, uint16_t *buf_offset, wmi_host_phyerr_t *phyerr); + +QDF_STATUS (*extract_composite_phyerr)(wmi_unified_t wmi_handle, void *evt_buf, + uint16_t datalen, wmi_host_phyerr_t *phyerr); + +QDF_STATUS (*extract_rtt_hdr)(wmi_unified_t wmi_handle, void *evt_buf, + wmi_host_rtt_event_hdr *ev); + +QDF_STATUS (*extract_rtt_ev)(wmi_unified_t wmi_handle, void *evt_buf, + wmi_host_rtt_meas_event *ev, uint8_t *hdump, uint16_t hdump_len); + +QDF_STATUS (*extract_rtt_error_report_ev)(wmi_unified_t wmi_handle, + void *evt_buf, wmi_host_rtt_error_report_event *ev); + +QDF_STATUS (*extract_all_stats_count)(wmi_unified_t wmi_handle, void *evt_buf, + wmi_host_stats_event *stats_param); + +QDF_STATUS (*extract_pdev_stats)(wmi_unified_t wmi_handle, void *evt_buf, + uint32_t index, wmi_host_pdev_stats *pdev_stats); + +QDF_STATUS (*extract_unit_test)(wmi_unified_t wmi_handle, void *evt_buf, + wmi_unit_test_event *unit_test, uint32_t maxspace); + +QDF_STATUS (*extract_pdev_ext_stats)(wmi_unified_t wmi_handle, void *evt_buf, + uint32_t index, wmi_host_pdev_ext_stats *pdev_ext_stats); + +QDF_STATUS (*extract_vdev_stats)(wmi_unified_t wmi_handle, void *evt_buf, + uint32_t index, wmi_host_vdev_stats *vdev_stats); + +QDF_STATUS (*extract_per_chain_rssi_stats)(wmi_unified_t wmi_handle, + void *evt_buf, uint32_t index, + struct wmi_host_per_chain_rssi_stats *rssi_stats); + +QDF_STATUS (*extract_peer_stats)(wmi_unified_t wmi_handle, void *evt_buf, + uint32_t index, wmi_host_peer_stats *peer_stats); + +QDF_STATUS (*extract_bcnflt_stats)(wmi_unified_t wmi_handle, void *evt_buf, + uint32_t index, wmi_host_bcnflt_stats *bcnflt_stats); + +QDF_STATUS (*extract_peer_extd_stats)(wmi_unified_t wmi_handle, void *evt_buf, + uint32_t index, wmi_host_peer_extd_stats *peer_extd_stats); + +QDF_STATUS (*extract_peer_adv_stats)(wmi_unified_t wmi_handle, void *evt_buf, + struct wmi_host_peer_adv_stats + *peer_adv_stats); + +QDF_STATUS (*extract_chan_stats)(wmi_unified_t wmi_handle, void *evt_buf, + uint32_t index, wmi_host_chan_stats *chan_stats); + +QDF_STATUS (*extract_thermal_stats)(wmi_unified_t wmi_handle, void *evt_buf, + uint32_t *temp, uint32_t *level, uint32_t *pdev_id); + +QDF_STATUS (*extract_thermal_level_stats)(wmi_unified_t wmi_handle, + void *evt_buf, uint8_t idx, uint32_t *levelcount, + uint32_t *dccount); + +QDF_STATUS (*extract_profile_ctx)(wmi_unified_t wmi_handle, void *evt_buf, + wmi_host_wlan_profile_ctx_t *profile_ctx); + +QDF_STATUS (*extract_profile_data)(wmi_unified_t wmi_handle, void *evt_buf, + uint8_t idx, + wmi_host_wlan_profile_t *profile_data); + +QDF_STATUS (*extract_chan_info_event)(wmi_unified_t wmi_handle, void *evt_buf, + wmi_host_chan_info_event *chan_info); + +QDF_STATUS (*extract_channel_hopping_event)(wmi_unified_t wmi_handle, + void *evt_buf, + wmi_host_pdev_channel_hopping_event *ch_hopping); + +QDF_STATUS (*extract_bss_chan_info_event)(wmi_unified_t wmi_handle, + void *evt_buf, + wmi_host_pdev_bss_chan_info_event *bss_chan_info); + +QDF_STATUS (*extract_inst_rssi_stats_event)(wmi_unified_t wmi_handle, + void *evt_buf, wmi_host_inst_stats_resp *inst_rssi_resp); + +QDF_STATUS (*extract_tx_data_traffic_ctrl_ev)(wmi_unified_t wmi_handle, + void *evt_buf, wmi_host_tx_data_traffic_ctrl_event *ev); + +QDF_STATUS (*extract_atf_peer_stats_ev)(wmi_unified_t wmi_handle, + void *evt_buf, wmi_host_atf_peer_stats_event *ev); + +QDF_STATUS (*extract_atf_token_info_ev)(wmi_unified_t wmi_handle, + void *evt_buf, + uint8_t idx, + wmi_host_atf_peer_stats_info *atf_token_info); + +QDF_STATUS (*extract_vdev_extd_stats)(wmi_unified_t wmi_handle, void *evt_buf, + uint32_t index, wmi_host_vdev_extd_stats *vdev_extd_stats); + +QDF_STATUS (*extract_vdev_nac_rssi_stats)(wmi_unified_t wmi_handle, void *evt_buf, + struct wmi_host_vdev_nac_rssi_event *vdev_nac_rssi_stats); + +QDF_STATUS (*extract_bcn_stats)(wmi_unified_t wmi_handle, void *evt_buf, + uint32_t index, wmi_host_bcn_stats *bcn_stats); + +QDF_STATUS (*send_power_dbg_cmd)(wmi_unified_t wmi_handle, + struct wmi_power_dbg_params *param); + +QDF_STATUS (*send_multiple_vdev_restart_req_cmd)(wmi_unified_t wmi_handle, + struct multiple_vdev_restart_params *param); + +QDF_STATUS (*send_adapt_dwelltime_params_cmd)(wmi_unified_t wmi_handle, + struct wmi_adaptive_dwelltime_params *dwelltime_params); + +QDF_STATUS (*send_dbs_scan_sel_params_cmd)(wmi_unified_t wmi_handle, + struct wmi_dbs_scan_sel_params *dbs_scan_params); + +QDF_STATUS (*send_fw_test_cmd)(wmi_unified_t wmi_handle, + struct set_fwtest_params *wmi_fwtest); + +#ifdef WLAN_FEATURE_DISA +QDF_STATUS (*send_encrypt_decrypt_send_cmd)(wmi_unified_t wmi_handle, + struct disa_encrypt_decrypt_req_params *params); + +QDF_STATUS (*extract_encrypt_decrypt_resp_event)(wmi_unified_t wmi_handle, + void *evt_buf, + struct disa_encrypt_decrypt_resp_params *resp); +#endif + +#ifdef WLAN_FEATURE_ACTION_OUI +QDF_STATUS (*send_action_oui_cmd)(wmi_unified_t wmi_handle, + struct action_oui_request *req); +#endif /* WLAN_FEATURE_ACTION_OUI */ + +QDF_STATUS (*send_sar_limit_cmd)(wmi_unified_t wmi_handle, + struct sar_limit_cmd_params *params); + +QDF_STATUS (*get_sar_limit_cmd)(wmi_unified_t wmi_handle); + +QDF_STATUS (*extract_sar_limit_event)(wmi_unified_t wmi_handle, + uint8_t *evt_buf, + struct sar_limit_event *event); + +QDF_STATUS (*extract_sar2_result_event)(void *handle, + uint8_t *event, + uint32_t len); + +QDF_STATUS (*send_peer_rx_reorder_queue_setup_cmd)(wmi_unified_t wmi_handle, + struct rx_reorder_queue_setup_params *param); + +QDF_STATUS (*send_peer_rx_reorder_queue_remove_cmd)(wmi_unified_t wmi_handle, + struct rx_reorder_queue_remove_params *param); + +QDF_STATUS (*extract_service_ready_ext)(wmi_unified_t wmi_handle, + uint8_t *evt_buf, + struct wlan_psoc_host_service_ext_param *param); + +QDF_STATUS (*extract_hw_mode_cap_service_ready_ext)( + wmi_unified_t wmi_handle, + uint8_t *evt_buf, uint8_t hw_mode_idx, + struct wlan_psoc_host_hw_mode_caps *param); + +QDF_STATUS (*extract_mac_phy_cap_service_ready_ext)( + wmi_unified_t wmi_handle, + uint8_t *evt_buf, + uint8_t hw_mode_id, + uint8_t phy_id, + struct wlan_psoc_host_mac_phy_caps *param); + +QDF_STATUS (*extract_reg_cap_service_ready_ext)( + wmi_unified_t wmi_handle, + uint8_t *evt_buf, uint8_t phy_idx, + struct wlan_psoc_host_hal_reg_capabilities_ext *param); + +QDF_STATUS (*extract_dbr_ring_cap_service_ready_ext)( + wmi_unified_t wmi_handle, + uint8_t *evt_buf, uint8_t idx, + struct wlan_psoc_host_dbr_ring_caps *param); + +QDF_STATUS (*extract_sar_cap_service_ready_ext)( + wmi_unified_t wmi_handle, + uint8_t *evt_buf, + struct wlan_psoc_host_service_ext_param *ext_param); + +QDF_STATUS (*extract_dbr_buf_release_fixed)( + wmi_unified_t wmi_handle, + uint8_t *evt_buf, + struct direct_buf_rx_rsp *param); + +QDF_STATUS (*extract_dbr_buf_release_entry)( + wmi_unified_t wmi_handle, + uint8_t *evt_buf, uint8_t idx, + struct direct_buf_rx_entry *param); + +QDF_STATUS (*extract_dbr_buf_metadata)( + wmi_unified_t wmi_handle, + uint8_t *evt_buf, uint8_t idx, + struct direct_buf_rx_metadata *param); + +QDF_STATUS (*extract_pdev_utf_event)(wmi_unified_t wmi_hdl, + uint8_t *evt_buf, + struct wmi_host_pdev_utf_event *param); + +QDF_STATUS (*extract_pdev_qvit_event)(wmi_unified_t wmi_hdl, + uint8_t *evt_buf, + struct wmi_host_pdev_qvit_event *param); + +uint16_t (*wmi_set_htc_tx_tag)(wmi_unified_t wmi_handle, + wmi_buf_t buf, uint32_t cmd_id); + +QDF_STATUS (*extract_peer_delete_response_event)( + wmi_unified_t wmi_handle, + void *evt_buf, + struct wmi_host_peer_delete_response_event *param); + +bool (*is_management_record)(uint32_t cmd_id); +bool (*is_diag_event)(uint32_t event_id); +uint8_t *(*wmi_id_to_name)(uint32_t cmd_id); +QDF_STATUS (*send_dfs_phyerr_offload_en_cmd)(wmi_unified_t wmi_handle, + uint32_t pdev_id); +QDF_STATUS (*send_dfs_phyerr_offload_dis_cmd)(wmi_unified_t wmi_handle, + uint32_t pdev_id); +QDF_STATUS (*extract_reg_chan_list_update_event)(wmi_unified_t wmi_handle, + uint8_t *evt_buf, + struct cur_regulatory_info + *reg_info, + uint32_t len); + +QDF_STATUS (*extract_reg_11d_new_country_event)(wmi_unified_t wmi_handle, + uint8_t *evt_buf, + struct reg_11d_new_country *reg_11d_country, + uint32_t len); + +QDF_STATUS (*extract_reg_ch_avoid_event)(wmi_unified_t wmi_handle, + uint8_t *evt_buf, + struct ch_avoid_ind_type *ch_avoid_event, + uint32_t len); + +QDF_STATUS (*extract_chainmask_tables)(wmi_unified_t wmi_handle, + uint8_t *evt_buf, + struct wlan_psoc_host_chainmask_table *chainmask_table); + +QDF_STATUS (*send_get_rcpi_cmd)(wmi_unified_t wmi_handle, + struct rcpi_req *get_rcpi_param); + +QDF_STATUS (*extract_rcpi_response_event)(wmi_unified_t wmi_handle, + void *evt_buf, + struct rcpi_res *res); + +QDF_STATUS (*extract_dfs_cac_complete_event)(wmi_unified_t wmi_handle, + uint8_t *evt_buf, + uint32_t *vdev_id, + uint32_t len); +QDF_STATUS (*extract_dfs_radar_detection_event)(wmi_unified_t wmi_handle, + uint8_t *evt_buf, + struct radar_found_info *radar_found, + uint32_t len); +QDF_STATUS (*extract_wlan_radar_event_info)(wmi_unified_t wmi_handle, + uint8_t *evt_buf, + struct radar_event_info *wlan_radar_event, + uint32_t len); + +QDF_STATUS (*send_set_country_cmd)(wmi_unified_t wmi_handle, + struct set_country *param); + +uint32_t (*convert_pdev_id_host_to_target)(uint32_t pdev_id); +uint32_t (*convert_pdev_id_target_to_host)(uint32_t pdev_id); + +/* + * For MCL, convert_pdev_id_host_to_target returns legacy pdev id value. + * But in converged firmware, WMI_SET_CURRENT_COUNTRY_CMDID expects target + * mapping of pdev_id to give only one WMI_REG_CHAN_LIST_CC_EVENTID. + * wmi_pdev_id_conversion_enable cannot be used since it overwrites + * convert_pdev_id_host_to_target which effects legacy cases. + * Below two commands: convert_host_pdev_id_to_target and + * convert_target_pdev_id_to_host should be used for any WMI + * command/event where FW expects target/host mapping of pdev_id respectively. + */ +uint32_t (*convert_host_pdev_id_to_target)(uint32_t pdev_id); +uint32_t (*convert_target_pdev_id_to_host)(uint32_t pdev_id); + +QDF_STATUS (*send_user_country_code_cmd)(wmi_unified_t wmi_handle, + uint8_t pdev_id, struct cc_regdmn_s *rd); +QDF_STATUS (*send_limit_off_chan_cmd)(wmi_unified_t wmi_handle, + struct wmi_limit_off_chan_param *limit_off_chan_param); + +QDF_STATUS (*send_wow_timer_pattern_cmd)(wmi_unified_t wmi_handle, + uint8_t vdev_id, uint32_t cookie, uint32_t time); +QDF_STATUS (*send_wds_entry_list_cmd)(wmi_unified_t wmi_handle); +QDF_STATUS (*extract_wds_entry)(wmi_unified_t wmi_handle, + uint8_t *evt_buf, + struct wdsentry *wds_entry, + u_int32_t idx); + +#ifdef WLAN_FEATURE_NAN_CONVERGENCE +QDF_STATUS (*send_ndp_initiator_req_cmd)(wmi_unified_t wmi_handle, + struct nan_datapath_initiator_req *req); +QDF_STATUS (*send_ndp_responder_req_cmd)(wmi_unified_t wmi_handle, + struct nan_datapath_responder_req *req); +QDF_STATUS (*send_ndp_end_req_cmd)(wmi_unified_t wmi_handle, + struct nan_datapath_end_req *req); + +QDF_STATUS (*extract_ndp_initiator_rsp)(wmi_unified_t wmi_handle, + uint8_t *data, struct nan_datapath_initiator_rsp *rsp); +QDF_STATUS (*extract_ndp_ind)(wmi_unified_t wmi_handle, + uint8_t *data, struct nan_datapath_indication_event *ind); +QDF_STATUS (*extract_ndp_confirm)(wmi_unified_t wmi_handle, + uint8_t *data, struct nan_datapath_confirm_event *ev); +QDF_STATUS (*extract_ndp_responder_rsp)(wmi_unified_t wmi_handle, + uint8_t *data, struct nan_datapath_responder_rsp *rsp); +QDF_STATUS (*extract_ndp_end_rsp)(wmi_unified_t wmi_handle, + uint8_t *data, struct nan_datapath_end_rsp_event *rsp); +QDF_STATUS (*extract_ndp_end_ind)(wmi_unified_t wmi_handle, + uint8_t *data, struct nan_datapath_end_indication_event **ind); +QDF_STATUS (*extract_ndp_sch_update)(wmi_unified_t wmi_handle, + uint8_t *data, struct nan_datapath_sch_update_event *ind); +#endif /* WLAN_FEATURE_NAN_CONVERGENCE */ + +QDF_STATUS (*send_btm_config)(wmi_unified_t wmi_handle, + struct wmi_btm_config *params); + +QDF_STATUS (*send_roam_bss_load_config)(wmi_unified_t wmi_handle, + struct wmi_bss_load_config *params); + +QDF_STATUS (*send_obss_detection_cfg_cmd)(wmi_unified_t wmi_handle, + struct wmi_obss_detection_cfg_param *obss_cfg_param); +QDF_STATUS (*extract_obss_detection_info)(uint8_t *evt_buf, + struct wmi_obss_detect_info *info); + +#ifdef WLAN_SUPPORT_FILS +QDF_STATUS (*send_vdev_fils_enable_cmd)(wmi_unified_t wmi_handle, + struct config_fils_params *param); +QDF_STATUS (*extract_swfda_vdev_id)(wmi_unified_t wmi_handle, void *evt_buf, + uint32_t *vdev_id); +QDF_STATUS (*send_fils_discovery_send_cmd)(wmi_unified_t wmi_handle, + struct fd_params *param); +#endif /* WLAN_SUPPORT_FILS */ + +QDF_STATUS +(*send_roam_scan_stats_cmd)(wmi_unified_t wmi_handle, + struct wmi_roam_scan_stats_req *params); + +QDF_STATUS +(*extract_roam_scan_stats_res_evt)(wmi_unified_t wmi_handle, + void *evt_buf, + uint32_t *vdev_id, + struct wmi_roam_scan_stats_res **res_param); + +QDF_STATUS (*send_offload_11k_cmd)(wmi_unified_t wmi_handle, + struct wmi_11k_offload_params *params); + +QDF_STATUS (*send_invoke_neighbor_report_cmd)(wmi_unified_t wmi_handle, + struct wmi_invoke_neighbor_report_params *params); + +void (*wmi_pdev_id_conversion_enable)(wmi_unified_t wmi_handle); +void (*send_time_stamp_sync_cmd)(wmi_unified_t wmi_handle); +void (*wmi_free_allocated_event)(uint32_t cmd_event_id, + void **wmi_cmd_struct_ptr); +int (*wmi_check_and_pad_event)(void *os_handle, void *param_struc_ptr, + uint32_t param_buf_len, + uint32_t wmi_cmd_event_id, + void **wmi_cmd_struct_ptr); +int (*wmi_check_command_params)(void *os_handle, void *param_struc_ptr, + uint32_t param_buf_len, + uint32_t wmi_cmd_event_id); +QDF_STATUS (*send_bss_color_change_enable_cmd)(wmi_unified_t wmi_handle, + uint32_t vdev_id, + bool enable); +QDF_STATUS (*send_obss_color_collision_cfg_cmd)(wmi_unified_t wmi_handle, + struct wmi_obss_color_collision_cfg_param *cfg); +QDF_STATUS (*extract_obss_color_collision_info)(uint8_t *evt_buf, + struct wmi_obss_color_collision_info *info); +#ifdef WLAN_SUPPORT_TWT +QDF_STATUS (*send_twt_enable_cmd)(wmi_unified_t wmi_handle, + struct wmi_twt_enable_param *params); + +QDF_STATUS (*send_twt_disable_cmd)(wmi_unified_t wmi_handle, + struct wmi_twt_disable_param *params); + +QDF_STATUS (*send_twt_add_dialog_cmd)(wmi_unified_t wmi_handle, + struct wmi_twt_add_dialog_param *params); + +QDF_STATUS (*send_twt_del_dialog_cmd)(wmi_unified_t wmi_handle, + struct wmi_twt_del_dialog_param *params); + +QDF_STATUS (*send_twt_pause_dialog_cmd)(wmi_unified_t wmi_handle, + struct wmi_twt_pause_dialog_cmd_param *params); + +QDF_STATUS (*send_twt_resume_dialog_cmd)(wmi_unified_t wmi_handle, + struct wmi_twt_resume_dialog_cmd_param *params); + +QDF_STATUS (*extract_twt_enable_comp_event)(wmi_unified_t wmi_handle, + uint8_t *evt_buf, + struct wmi_twt_enable_complete_event_param *params); + +QDF_STATUS (*extract_twt_disable_comp_event)(wmi_unified_t wmi_handle, + uint8_t *evt_buf, + struct wmi_twt_disable_complete_event *params); + +QDF_STATUS (*extract_twt_add_dialog_comp_event)(wmi_unified_t wmi_handle, + uint8_t *evt_buf, + struct wmi_twt_add_dialog_complete_event_param *params); + +QDF_STATUS (*extract_twt_del_dialog_comp_event)(wmi_unified_t wmi_handle, + uint8_t *evt_buf, + struct wmi_twt_del_dialog_complete_event_param *params); + +QDF_STATUS (*extract_twt_pause_dialog_comp_event)(wmi_unified_t wmi_handle, + uint8_t *evt_buf, + struct wmi_twt_pause_dialog_complete_event_param *params); + +QDF_STATUS (*extract_twt_resume_dialog_comp_event)(wmi_unified_t wmi_handle, + uint8_t *evt_buf, + struct wmi_twt_resume_dialog_complete_event_param *params); +#endif + +#ifdef QCA_SUPPORT_CP_STATS +QDF_STATUS (*extract_cca_stats)(wmi_unified_t wmi_handle, void *evt_buf, + struct wmi_host_congestion_stats *stats); +#endif /* QCA_SUPPORT_CP_STATS */ + +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) && defined(HOST_DFS_SPOOF_TEST) +QDF_STATUS (*send_dfs_average_radar_params_cmd)( + wmi_unified_t wmi_handle, + struct dfs_radar_found_params *params); + +QDF_STATUS (*extract_dfs_status_from_fw)(wmi_unified_t wmi_handle, + void *evt_buf, + uint32_t *dfs_status_check); +#endif +QDF_STATUS (*send_mws_coex_status_req_cmd)(wmi_unified_t wmi_handle, + uint32_t vdev_id, uint32_t cmd_id); +}; + +/* Forward declartion for psoc*/ +struct wlan_objmgr_psoc; + +/** + * struct wmi_init_cmd - Saved wmi INIT command + * @buf: Buffer containing the wmi INIT command + * @buf_len: Length of the buffer + */ +struct wmi_cmd_init { + wmi_buf_t buf; + uint32_t buf_len; +}; + +/** + * @abi_version_0: WMI Major and Minor versions + * @abi_version_1: WMI change revision + * @abi_version_ns_0: ABI version namespace first four dwords + * @abi_version_ns_1: ABI version namespace second four dwords + * @abi_version_ns_2: ABI version namespace third four dwords + * @abi_version_ns_3: ABI version namespace fourth four dwords + */ +struct wmi_host_abi_version { + uint32_t abi_version_0; + uint32_t abi_version_1; + uint32_t abi_version_ns_0; + uint32_t abi_version_ns_1; + uint32_t abi_version_ns_2; + uint32_t abi_version_ns_3; +}; + +struct wmi_unified { + void *scn_handle; /* handle to device */ + osdev_t osdev; /* handle to use OS-independent services */ + struct wbuff_mod_handle *wbuff_handle; /* handle to wbuff */ + qdf_atomic_t pending_cmds; + HTC_ENDPOINT_ID wmi_endpoint_id; + uint16_t max_msg_len; + uint32_t *event_id; + wmi_unified_event_handler *event_handler; + enum wmi_rx_exec_ctx *ctx; + void *htc_handle; + qdf_spinlock_t eventq_lock; + qdf_nbuf_queue_t event_queue; + qdf_work_t rx_event_work; + qdf_workqueue_t *wmi_rx_work_queue; + int wmi_stop_in_progress; + struct wmi_host_abi_version fw_abi_version; + struct wmi_host_abi_version final_abi_vers; + uint32_t num_of_diag_events_logs; + uint32_t *events_logs_list; +#ifdef WLAN_OPEN_SOURCE + struct fwdebug dbglog; + struct dentry *debugfs_phy; +#endif /* WLAN_OPEN_SOURCE */ + +#ifdef WMI_INTERFACE_EVENT_LOGGING + struct wmi_debug_log_info log_info; +#endif /*WMI_INTERFACE_EVENT_LOGGING */ + + qdf_atomic_t is_target_suspended; + +#ifdef FEATURE_RUNTIME_PM + qdf_atomic_t runtime_pm_inprogress; +#endif + qdf_atomic_t is_wow_bus_suspended; + bool tag_crash_inject; + bool tgt_force_assert_enable; + enum wmi_target_type target_type; + struct wmi_rx_ops rx_ops; + struct wmi_ops *ops; + bool use_cookie; + bool wmi_stopinprogress; + uint32_t *wmi_events; +#ifndef CONFIG_MCL + uint32_t *pdev_param; + uint32_t *vdev_param; +#endif + uint32_t *services; + struct wmi_soc *soc; + uint16_t wmi_max_cmds; +}; + +#define WMI_MAX_RADIOS 3 +struct wmi_soc { + struct wlan_objmgr_psoc *wmi_psoc; + void *scn_handle; /* handle to device */ + qdf_atomic_t num_pdevs; + enum wmi_target_type target_type; + void *htc_handle; + uint32_t event_id[WMI_UNIFIED_MAX_EVENT]; + wmi_unified_event_handler event_handler[WMI_UNIFIED_MAX_EVENT]; + uint32_t max_event_idx; + enum wmi_rx_exec_ctx ctx[WMI_UNIFIED_MAX_EVENT]; + qdf_spinlock_t ctx_lock; + struct wmi_unified *wmi_pdev[WMI_MAX_RADIOS]; + HTC_ENDPOINT_ID wmi_endpoint_id[WMI_MAX_RADIOS]; + uint16_t max_msg_len[WMI_MAX_RADIOS]; + struct wmi_ops *ops; + const uint32_t *svc_ids; + uint32_t wmi_events[wmi_events_max]; + /* WMI service bitmap received from target */ + uint32_t *wmi_service_bitmap; + uint32_t *wmi_ext_service_bitmap; +#ifndef CONFIG_MCL + uint32_t pdev_param[wmi_pdev_param_max]; + uint32_t vdev_param[wmi_vdev_param_max]; +#endif + uint32_t services[wmi_services_max]; + uint16_t wmi_max_cmds; + +}; + +void wmi_unified_register_module(enum wmi_target_type target_type, + void (*wmi_attach)(wmi_unified_t wmi_handle)); +void wmi_tlv_init(void); +void wmi_non_tlv_init(void); +#ifdef WMI_NON_TLV_SUPPORT +/* ONLY_NON_TLV_TARGET:TLV attach dummy function definition for case when + * driver supports only NON-TLV target (WIN mainline) */ +#define wmi_tlv_attach(x) qdf_print("TLV Unavailable\n") +#else +void wmi_tlv_attach(wmi_unified_t wmi_handle); +#endif +void wmi_non_tlv_attach(wmi_unified_t wmi_handle); + +#ifdef FEATURE_WLAN_EXTSCAN +void wmi_extscan_attach_tlv(struct wmi_unified *wmi_handle); +#else +static inline void wmi_extscan_attach_tlv(struct wmi_unified *wmi_handle) +{ +} +#endif + +/** + * wmi_align() - provides word aligned parameter + * @param: parameter to be aligned + * + * Return: word aligned parameter + */ +static inline uint32_t wmi_align(uint32_t param) +{ + return roundup(param, sizeof(uint32_t)); +} + +/** + * wmi_vdev_map_to_vdev_id() - Provides vdev id corresponding to idx + * from vdev map + * @vdev_map: Bitmask containing information of active vdev ids + * @idx: Index referring to the i'th bit set from LSB in vdev map + * + * This API returns the vdev id for the i'th bit set from LSB in vdev map. + * Index runs through 1 from maximum number of vdevs set in the vdev map + * + * Return: vdev id of the vdev object + */ +static inline uint32_t wmi_vdev_map_to_vdev_id(uint32_t vdev_map, + uint32_t idx) +{ + uint32_t vdev_count = 0, vdev_set = 0, vdev_id = WLAN_INVALID_VDEV_ID; + + while (vdev_map) { + vdev_set += (vdev_map & 0x1); + if (vdev_set == (idx+1)) { + vdev_id = vdev_count; + break; + } + vdev_map >>= 1; + vdev_count++; + } + + return vdev_id; +} + +/** + * wmi_vdev_map_to_num_vdevs() - Provides number of vdevs active based on the + * vdev map received from FW + * @vdev_map: Bitmask containing information of active vdev ids + * + * Return: Number of vdevs set in the vdev bit mask + */ +static inline uint32_t wmi_vdev_map_to_num_vdevs(uint32_t vdev_map) +{ + uint32_t num_vdevs = 0; + + while (vdev_map) { + num_vdevs += (vdev_map & 0x1); + vdev_map >>= 1; + } + + return num_vdevs; +} +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_reg_api.h b/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_reg_api.h new file mode 100644 index 0000000000000000000000000000000000000000..ef147d6d529e4de3e3331bea21000c5732e03495 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_reg_api.h @@ -0,0 +1,100 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ +/** + * DOC: This file contains the API definitions for the Unified Wireless Module + * Interface (WMI) which are specific to Regulatory module. + */ + +#ifndef _WMI_UNIFIED_REG_API_H_ +#define _WMI_UNIFIED_REG_API_H_ + +#include "reg_services_public_struct.h" +/** + * reg_chan_list_update_handler() - function to update channel list + * @handle: wma handle + * @event_buf: event buffer + * @len: length of buffer + * + * Return: 0 for success or error code + */ +QDF_STATUS wmi_extract_reg_chan_list_update_event(void *wmi_hdl, + uint8_t *evt_buf, + struct cur_regulatory_info + *reg_info, + uint32_t len); + +/* + * wmi_unified_send_stop_11d_scan_cmd() - stop 11d scan + * @wmi_handle: wmi handle + * @stop_11d_scan: pointer to 11d scan stop req. + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +QDF_STATUS wmi_unified_send_stop_11d_scan_cmd(wmi_unified_t wmi_handle, + struct reg_stop_11d_scan_req *stop_11d_scan); + +/* + * wmi_unified_send_start_11d_scan_cmd() - start 11d scan + * @wmi_handle: wmi handle + * @start_11d_scan: pointer to 11d scan start req. + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +QDF_STATUS wmi_unified_send_start_11d_scan_cmd(wmi_unified_t wmi_handle, + struct reg_start_11d_scan_req *start_11d_scan); + +/** + * wmi_extract_reg_11d_new_cc_event() - function to extract the 11d new country + * @wmi_hdl: wmi handle + * @evt_buf: event buffer + * @reg_11d_new_cc: pointer to new 11d country info + * @len: length of buffer + * + * Return: 0 for success or error code + */ +QDF_STATUS wmi_extract_reg_11d_new_cc_event(void *wmi_hdl, + uint8_t *evt_buf, + struct reg_11d_new_country *reg_11d_new_cc, + uint32_t len); + +/** + * wmi_unified_set_user_country_code_cmd_send() - WMI set country function + * @wmi_handle: wmi handle. + * @pdev_id: Pdev id + * @rd: User country code or regdomain + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_set_user_country_code_cmd_send(void *wmi_hdl, + uint8_t pdev_id, struct cc_regdmn_s *rd); + +/** + * wmi_extract_reg_ch_avoid_event() - process freq avoid event + * @wmi_hdl: wmi handle. + * @evt_buf: event buffer + * @ch_avoid_ind: buffer pointer to save the event processed data + * @len: length of buffer + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_extract_reg_ch_avoid_event(void *wmi_hdl, + uint8_t *evt_buf, + struct ch_avoid_ind_type *ch_avoid_ind, + uint32_t len); + +#endif /* _WMI_UNIFIED_REG_API_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_twt_api.h b/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_twt_api.h new file mode 100644 index 0000000000000000000000000000000000000000..ce5c53b8942475a36ffe03bd3183548d807e73c9 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_twt_api.h @@ -0,0 +1,176 @@ + +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ +/** + * DOC: Implement API's specific to TWT component. + */ + +#ifndef _WMI_UNIFIED_TWT_API_H_ +#define _WMI_UNIFIED_TWT_API_H_ + +#include "wmi_unified_twt_param.h" + + +/** + * wmi_unified_twt_enable_cmd() - Send WMI command to Enable TWT + * @wmi_hdl: wmi handle + * @params: Parameters to be configured + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_twt_enable_cmd(void *wmi_hdl, + struct wmi_twt_enable_param *params); + +/** + * wmi_unified_twt_disable_cmd() - Send WMI command to disable TWT + * @wmi_hdl: wmi handle + * @params: Parameters to be configured + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_twt_disable_cmd(void *wmi_hdl, + struct wmi_twt_disable_param *params); + +/** + * wmi_unified_twt_add_dialog_cmd() - Send WMI command to add TWT dialog + * @wmi_hdl: wmi handle + * @params: Parameters to be configured + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_twt_add_dialog_cmd(void *wmi_hdl, + struct wmi_twt_add_dialog_param *params); + +/** + * wmi_unified_twt_del_dialog_cmd() - Send WMI command to delete TWT dialog + * @wmi_hdl: wmi handle + * @params: Parameters to be configured + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_twt_del_dialog_cmd(void *wmi_hdl, + struct wmi_twt_del_dialog_param *params); + +/** + * wmi_unified_twt_pause_dialog_cmd() - Send WMI command to pause TWT dialog + * @wmi_hdl: wmi handle + * @params: Parameters to be configured + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_twt_pause_dialog_cmd(void *wmi_hdl, + struct wmi_twt_pause_dialog_cmd_param *params); + +/** + * wmi_unified_twt_resume_dialog_cmd() - Send WMI command to resume TWT dialog + * @wmi_hdl: wmi handle + * @params: Parameters to be configured + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_twt_resume_dialog_cmd(void *wmi_hdl, + struct wmi_twt_resume_dialog_cmd_param *params); + +/** + * wmi_extract_twt_enable_comp_event() - Extract WMI event params for TWT enable + * completion event + * @wmi_hdl: wmi handle + * @evt_buf: Pointer event buffer + * @params: Parameters to extract + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_extract_twt_enable_comp_event(void *wmi_hdl, + uint8_t *evt_buf, + struct wmi_twt_enable_complete_event_param *params); + +/** + * wmi_extract_twt_disable_comp_event() - Extract WMI event params for TWT + * disable completion event + * @wmi_hdl: wmi handle + * @evt_buf: Pointer event buffer + * @params: Parameters to extract + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_extract_twt_disable_comp_event(void *wmi_hdl, + uint8_t *evt_buf, + struct wmi_twt_disable_complete_event *params); + +/** + * wmi_extract_twt_add_dialog_comp_event() - Extract WMI event params for TWT + * add dialog completion event + * @wmi_hdl: wmi handle + * @evt_buf: Pointer event buffer + * @params: Parameters to extract + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_extract_twt_add_dialog_comp_event(void *wmi_hdl, + uint8_t *evt_buf, + struct wmi_twt_add_dialog_complete_event_param *params); + +/** + * wmi_extract_twt_del_dialog_comp_event() - Extract WMI event params for TWT + * delete dialog completion event + * @wmi_hdl: wmi handle + * @evt_buf: Pointer event buffer + * @params: Parameters to extract + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_extract_twt_del_dialog_comp_event(void *wmi_hdl, + uint8_t *evt_buf, + struct wmi_twt_del_dialog_complete_event_param *params); + +/** + * wmi_extract_twt_pause_dialog_comp_event() - Extract WMI event params for TWT + * pause dialog completion event + * @wmi_hdl: wmi handle + * @evt_buf: Pointer event buffer + * @params: Parameters to extract + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_extract_twt_pause_dialog_comp_event(void *wmi_hdl, + uint8_t *evt_buf, + struct wmi_twt_pause_dialog_complete_event_param *params); + +/** + * wmi_extract_twt_resume_dialog_comp_event() - Extract WMI event params for TWT + * resume dialog completion event + * @wmi_hdl: wmi handle + * @evt_buf: Pointer event buffer + * @params: Parameters to extract + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_extract_twt_resume_dialog_comp_event(void *wmi_hdl, + uint8_t *evt_buf, + struct wmi_twt_resume_dialog_complete_event_param *params); + +#ifdef WLAN_SUPPORT_TWT +void wmi_twt_attach_tlv(struct wmi_unified *wmi_handle); +#else +static void wmi_twt_attach_tlv(struct wmi_unified *wmi_handle) +{ + return; +} +#endif + +#endif /* _WMI_UNIFIED_TWT_API_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_twt_param.h b/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_twt_param.h new file mode 100644 index 0000000000000000000000000000000000000000..3e61d47d62a7518428e6889c8407228a9fea6d43 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_twt_param.h @@ -0,0 +1,348 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/* + * This file contains the API definitions for the TWT WMI APIs. + */ + +#ifndef _WMI_UNIFIED_TWT_PARAM_H_ +#define _WMI_UNIFIED_TWT_PARAM_H_ + +/** + * @pdev_id: pdev_id for identifying the MAC. + * @sta_cong_timer_ms: STA TWT congestion timer TO value in terms of ms + * @mbss_support: Flag indicating if AP TWT feature supported in + * MBSS mode or not. + * @default_slot_size: This is the default value for the TWT slot setup + * by AP (units = microseconds) + * @congestion_thresh_setup: Minimum congestion required to start setting + * up TWT sessions + * @congestion_thresh_teardown: Minimum congestion below which TWT will be + * torn down (in percent of occupied airtime) + * @congestion_thresh_critical: Threshold above which TWT will not be active + * (in percent of occupied airtime) + * @interference_thresh_teardown: Minimum interference above that TWT + * will not be active. The interference parameters use an + * abstract method of evaluating interference. + * The parameters are in percent, ranging from 0 for no + * interference, to 100 for interference extreme enough + * to completely block the signal of interest. + * @interference_thresh_setup: Minimum interference below that TWT session + * can be setup. The interference parameters use an + * abstract method of evaluating interference. + * The parameters are in percent, ranging from 0 for no + * interference, to 100 for interference extreme enough + * to completely block the signal of interest. + * @min_no_sta_setup: Minimum no of STA required to start TWT setup + * @min_no_sta_teardown: Minimum no of STA below which TWT will be torn down + * @no_of_bcast_mcast_slots: Number of default slot sizes reserved for + * BCAST/MCAST delivery + * @min_no_twt_slots: Minimum no of available slots for TWT to be operational + * @max_no_sta_twt: Max no of STA with which TWT is possible + * (must be <= the wmi_resource_config's twt_ap_sta_count value) + * * The below interval parameters have units of milliseconds. + * @mode_check_interval: Interval between two successive check to decide the + * mode of TWT. (units = milliseconds) + * @add_sta_slot_interval: Interval between decisions making to create + * TWT slots for STAs. (units = milliseconds) + * @remove_sta_slot_interval: Inrerval between decisions making to remove TWT + * slot of STAs. (units = milliseconds) + */ +struct wmi_twt_enable_param { + uint32_t pdev_id; + uint32_t sta_cong_timer_ms; + uint32_t mbss_support; + uint32_t default_slot_size; + uint32_t congestion_thresh_setup; + uint32_t congestion_thresh_teardown; + uint32_t congestion_thresh_critical; + uint32_t interference_thresh_teardown; + uint32_t interference_thresh_setup; + uint32_t min_no_sta_setup; + uint32_t min_no_sta_teardown; + uint32_t no_of_bcast_mcast_slots; + uint32_t min_no_twt_slots; + uint32_t max_no_sta_twt; + uint32_t mode_check_interval; + uint32_t add_sta_slot_interval; + uint32_t remove_sta_slot_interval; +}; + +/* status code of enabling TWT + * WMI_ENABLE_TWT_STATUS_OK: enabling TWT successfully completed + * WMI_ENABLE_TWT_STATUS_ALREADY_ENABLED: TWT already enabled + * WMI_ENABLE_TWT_STATUS_NOT_READY: FW not ready for enabling TWT + * WMI_ENABLE_TWT_INVALID_PARAM: invalid parameters + * WMI_ENABLE_TWT_STATUS_UNKNOWN_ERROR: enabling TWT failed with an + * unknown reason + */ +enum WMI_HOST_ENABLE_TWT_STATUS { + WMI_HOST_ENABLE_TWT_STATUS_OK, + WMI_HOST_ENABLE_TWT_STATUS_ALREADY_ENABLED, + WMI_HOST_ENABLE_TWT_STATUS_NOT_READY, + WMI_HOST_ENABLE_TWT_INVALID_PARAM, + WMI_HOST_ENABLE_TWT_STATUS_UNKNOWN_ERROR, +}; + +/** struct wmi_twt_enable_complete_event_param: + * @pdev_is: pdev_id for identifying the MAC. + * @status: From enum WMI_HOST_ENABLE_TWT_STATUS + */ +struct wmi_twt_enable_complete_event_param { + uint32_t pdev_id; + uint32_t status; +}; + +/** struct wmi_twt_disable_param: + * @pdev_id: pdev_id for identifying the MAC. + */ +struct wmi_twt_disable_param { + uint32_t pdev_id; +}; + +/** struct wmi_twt_disable_complete_event: + * @pdev_id: pdev_id for identifying the MAC. + */ +struct wmi_twt_disable_complete_event { + uint32_t pdev_id; +}; + +/* from IEEE 802.11ah section 9.4.2.200 */ +enum WMI_HOST_TWT_COMMAND { + WMI_HOST_TWT_COMMAND_REQUEST_TWT = 0, + WMI_HOST_TWT_COMMAND_SUGGEST_TWT = 1, + WMI_HOST_TWT_COMMAND_DEMAND_TWT = 2, + WMI_HOST_TWT_COMMAND_TWT_GROUPING = 3, + WMI_HOST_TWT_COMMAND_ACCEPT_TWT = 4, + WMI_HOST_TWT_COMMAND_ALTERNATE_TWT = 5, + WMI_HOST_TWT_COMMAND_DICTATE_TWT = 6, + WMI_HOST_TWT_COMMAND_REJECT_TWT = 7, +}; + +/** struct wmi_twt_add_dialog_param - + * @vdev_id: VDEV identifier + * @peer_macaddr: peer MAC address when vdev is AP VDEV + * @dialog_id: diaglog_id (TWT dialog ID) + * This dialog ID must be unique within its vdev. + * @wake_intvl_us: TWT Wake Interval in units of us + * @wake_intvl_mantis: TWT Wake Interval Mantissa + * - wake_intvl_mantis must be <= 0xFFFF + * - wake_intvl_us must be divided evenly by wake_intvl_mantis, + * i.e., wake_intvl_us % wake_intvl_mantis == 0 + * - the quotient of wake_intvl_us/wake_intvl_mantis must be + * 2 to N-th(0<=N<=31) power, + * i.e., wake_intvl_us/wake_intvl_mantis == 2^N, 0<=N<=31 + * @wake_dura_us: TWT Wake Duration in units of us, must be <= 0xFFFF + * wake_dura_us must be divided evenly by 256, + * i.e., wake_dura_us % 256 == 0 + * @sp_offset_us: this long time after TWT setup the 1st SP will start. + * @twt_cmd: cmd from enum WMI_HOST_TWT_COMMAND + * @flag_bcast: 0 means Individual TWT, + * 1 means Broadcast TWT + * @flag_trigger: 0 means non-Trigger-enabled TWT, + * 1 means means Trigger-enabled TWT + * @flag_flow_type: 0 means announced TWT, + * 1 means un-announced TWT + * @flag_protection: 0 means TWT protection is required, + * 1 means TWT protection is not required + */ +struct wmi_twt_add_dialog_param { + uint32_t vdev_id; + uint8_t peer_macaddr[6]; + uint32_t dialog_id; + uint32_t wake_intvl_us; + uint32_t wake_intvl_mantis; + uint32_t wake_dura_us; + uint32_t sp_offset_us; + enum WMI_HOST_TWT_COMMAND twt_cmd; + uint32_t + flag_bcast:1, + flag_trigger:1, + flag_flow_type:1, + flag_protection:1; +}; + +/* enum - status code of adding TWT dialog + * WMI_HOST_ADD_TWT_STATUS_OK: adding TWT dialog successfully completed + * WMI_HOST_ADD_TWT_STATUS_TWT_NOT_ENABLED: TWT not enabled + * WMI_HOST_ADD_TWT_STATUS_USED_DIALOG_ID: TWT dialog ID is already used + * WMI_HOST_ADD_TWT_STATUS_INVALID_PARAM: invalid parameters + * WMI_HOST_ADD_TWT_STATUS_NOT_READY: FW not ready + * WMI_HOST_ADD_TWT_STATUS_NO_RESOURCE: FW resource exhausted + * WMI_HOST_ADD_TWT_STATUS_NO_ACK: peer AP/STA did not ACK the + * request/response frame + * WMI_HOST_ADD_TWT_STATUS_NO_RESPONSE: peer AP did not send the response frame + * WMI_HOST_ADD_TWT_STATUS_DENIED: AP did not accept the request + * WMI_HOST_ADD_TWT_STATUS_UNKNOWN_ERROR: adding TWT dialog failed with + * an unknown reason + */ +enum WMI_HOST_ADD_TWT_STATUS { + WMI_HOST_ADD_TWT_STATUS_OK, + WMI_HOST_ADD_TWT_STATUS_TWT_NOT_ENABLED, + WMI_HOST_ADD_TWT_STATUS_USED_DIALOG_ID, + WMI_HOST_ADD_TWT_STATUS_INVALID_PARAM, + WMI_HOST_ADD_TWT_STATUS_NOT_READY, + WMI_HOST_ADD_TWT_STATUS_NO_RESOURCE, + WMI_HOST_ADD_TWT_STATUS_NO_ACK, + WMI_HOST_ADD_TWT_STATUS_NO_RESPONSE, + WMI_HOST_ADD_TWT_STATUS_DENIED, + WMI_HOST_ADD_TWT_STATUS_UNKNOWN_ERROR, +}; + +/** struct wmi_twt_add_dialog_complete_param - + * @vdev_id: VDEV identifier + * @dialog_id: TWT dialog ID + * @status: refer to WMI_HOST_ADD_TWT_STATUS enum + */ +struct wmi_twt_add_dialog_complete_event_param { + uint32_t vdev_id; + uint32_t dialog_id; + uint32_t status; +}; + +/** struct wmi_twt_del_dialog_param - + * @vdev_id: VDEV identifier + * @dialog_id: TWT dialog ID + */ +struct wmi_twt_del_dialog_param { + uint32_t vdev_id; + uint32_t dialog_id; +}; + +/* status code of deleting TWT dialog + * WMI_HOST_DEL_TWT_STATUS_OK: deleting TWT dialog successfully completed + * WMI_HOST_DEL_TWT_STATUS_DIALOG_ID_NOT_EXIST: TWT dialog ID not exists + * WMI_HOST_DEL_TWT_STATUS_INVALID_PARAM: invalid parameters + * WMI_HOST_DEL_TWT_STATUS_DIALOG_ID_BUSY: FW is in the process of handling + * this dialog + * WMI_HOST_DEL_TWT_STATUS_NO_RESOURCE: FW resource exhausted + * WMI_HOST_DEL_TWT_STATUS_NO_ACK: peer AP/STA did not ACK the request/response + * frame + * WMI_HOST_DEL_TWT_STATUS_UNKNOWN_ERROR: deleting TWT dialog failed with an + * unknown reason + */ +enum WMI_HOST_DEL_TWT_STATUS { + WMI_HOST_DEL_TWT_STATUS_OK, + WMI_HOST_DEL_TWT_STATUS_DIALOG_ID_NOT_EXIST, + WMI_HOST_DEL_TWT_STATUS_INVALID_PARAM, + WMI_HOST_DEL_TWT_STATUS_DIALOG_ID_BUSY, + WMI_HOST_DEL_TWT_STATUS_NO_RESOURCE, + WMI_HOST_DEL_TWT_STATUS_NO_ACK, + WMI_HOST_DEL_TWT_STATUS_UNKNOWN_ERROR, +}; + +/** struct wmi_twt_del_dialog_complete_event_param - + * @vdev_id: VDEV identifier + * @dialog_id: TWT dialog ID + * @status: refer to WMI_HOST_DEL_TWT_STATUS enum + */ +struct wmi_twt_del_dialog_complete_event_param { + uint32_t vdev_id; + uint32_t dialog_id; + uint32_t status; +}; + +/** struct wmi_twt_pause_dialog_cmd_param - + * @vdev_id: VDEV identifier + * @dialog_id: TWT dialog ID + */ +struct wmi_twt_pause_dialog_cmd_param { + uint32_t vdev_id; + uint32_t dialog_id; +}; + +/* enum WMI_HOST_PAUSE_TWT_STATUS - status code of pausing TWT dialog + * WMI_HOST_PAUSE_TWT_STATUS_OK: pausing TWT dialog successfully completed + * WMI_HOST_PAUSE_TWT_STATUS_DIALOG_ID_NOT_EXIST: TWT dialog ID not exists + * WMI_HOST_PAUSE_TWT_STATUS_INVALID_PARAM: invalid parameters + * WMI_HOST_PAUSE_TWT_STATUS_DIALOG_ID_BUSY: FW is in the process of handling + * this dialog + * WMI_HOST_PAUSE_TWT_STATUS_NO_RESOURCE: FW resource exhausted + * WMI_HOST_PAUSE_TWT_STATUS_NO_ACK: peer AP/STA did not ACK the + * request/response frame + * WMI_HOST_PAUSE_TWT_STATUS_UNKNOWN_ERROR: pausing TWT dialog failed with an + * unknown reason + */ +enum WMI_HOST_PAUSE_TWT_STATUS { + WMI_HOST_PAUSE_TWT_STATUS_OK, + WMI_HOST_PAUSE_TWT_STATUS_DIALOG_ID_NOT_EXIST, + WMI_HOST_PAUSE_TWT_STATUS_INVALID_PARAM, + WMI_HOST_PAUSE_TWT_STATUS_DIALOG_ID_BUSY, + WMI_HOST_PAUSE_TWT_STATUS_NO_RESOURCE, + WMI_HOST_PAUSE_TWT_STATUS_NO_ACK, + WMI_HOST_PAUSE_TWT_STATUS_UNKNOWN_ERROR, +}; + +/** struct wmi_twt_pause_dialog_complete_event_param - + * @vdev_id: VDEV identifier + * @dialog_id: TWT dialog ID + * @status: refer to WMI_HOST_PAUSE_TWT_STATUS + */ +struct wmi_twt_pause_dialog_complete_event_param { + uint32_t vdev_id; + uint32_t dialog_id; + uint32_t status; +}; + +/** struct wmi_twt_resume_dialog_cmd_param - + * @vdev_id: VDEV identifier + * @dialog_id: TWT dialog ID + * @sp_offset_us: this long time after TWT resumed the 1st SP will start + */ +struct wmi_twt_resume_dialog_cmd_param { + uint32_t vdev_id; + uint32_t dialog_id; + uint32_t sp_offset_us; +}; + +/* enum WMI_HOST_RESUME_TWT_STATUS - status code of resuming TWT dialog + * WMI_HOST_RESUME_TWT_STATUS_OK: resuming TWT dialog successfully completed + * WMI_HOST_RESUME_TWT_STATUS_DIALOG_ID_NOT_EXIST: TWT dialog ID not exists + * WMI_HOST_RESUME_TWT_STATUS_INVALID_PARAM: invalid parameters + * WMI_HOST_RESUME_TWT_STATUS_DIALOG_ID_BUSY: FW is in the process of handling + * this dialog + * WMI_HOST_RESUME_TWT_STATUS_NOT_PAUSED: dialog not paused currently + * WMI_HOST_RESUME_TWT_STATUS_NO_RESOURCE: FW resource exhausted + * WMI_HOST_RESUME_TWT_STATUS_NO_ACK: peer AP/STA did not ACK the + * request/response frame + * WMI_HOST_RESUME_TWT_STATUS_UNKNOWN_ERROR: resuming TWT dialog failed with an + * unknown reason + */ +enum WMI_HOST_RESUME_TWT_STATUS { + WMI_HOST_RESUME_TWT_STATUS_OK, + WMI_HOST_RESUME_TWT_STATUS_DIALOG_ID_NOT_EXIST, + WMI_HOST_RESUME_TWT_STATUS_INVALID_PARAM, + WMI_HOST_RESUME_TWT_STATUS_DIALOG_ID_BUSY, + WMI_HOST_RESUME_TWT_STATUS_NOT_PAUSED, + WMI_HOST_RESUME_TWT_STATUS_NO_RESOURCE, + WMI_HOST_RESUME_TWT_STATUS_NO_ACK, + WMI_HOST_RESUME_TWT_STATUS_UNKNOWN_ERROR, +}; + +/** struct wmi_twt_resume_dialog_complete_event_param - + * @vdev_id: VDEV identifier + * @dialog_id: TWT dialog ID + * @status: refer to WMI_HOST_RESUME_TWT_STATUS + */ +struct wmi_twt_resume_dialog_complete_event_param { + uint32_t vdev_id; + uint32_t dialog_id; + uint32_t status; +}; + +#endif /* _WMI_UNIFIED_TWT_PARAM_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_version_whitelist.h b/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_version_whitelist.h new file mode 100644 index 0000000000000000000000000000000000000000..a114cf40ebb0e807d590d6d2b969b40eacbcff03 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_version_whitelist.h @@ -0,0 +1,32 @@ +/* + * Copyright (c) 2013-2014, 2016 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/* + * Every Product Line or chipset or team can have its own Whitelist table. + * The following is a list of versions that the present software can support + * even though its versions are incompatible. Any entry here means that the + * indicated version does not break WMI compatibility even though it has + * a minor version change. + */ +#ifndef _WMI_VERSION_WHITELIST_H_ +#define _WMI_VERSION_WHITELIST_H_ +static wmi_whitelist_version_info version_whitelist[] = { + {0, 0, 0x5F414351, 0x00004C4D, 0, 0} + /* Placeholder: Major=0, Minor=0, Namespace="QCA_ML" (Dummy entry) */ +}; +#endif /* _WMI_VERSION_WHITELIST_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_tlv_helper.c b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_tlv_helper.c new file mode 100644 index 0000000000000000000000000000000000000000..f01cbd2d55acd3131bf982e6d277d58363a9a569 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_tlv_helper.c @@ -0,0 +1,1350 @@ +/* + * Copyright (c) 2013-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "wmi_tlv_platform.c" +#include "wmi_tlv_defs.h" +#include "wmi_version.h" +#include "qdf_module.h" + +#define WMITLV_GET_ATTRIB_NUM_TLVS 0xFFFFFFFF + +#define WMITLV_GET_CMDID(val) (val & 0x00FFFFFF) +#define WMITLV_GET_NUM_TLVS(val) ((val >> 24) & 0xFF) + +#define WMITLV_GET_TAGID(val) (val & 0x00000FFF) +#define WMITLV_GET_TAG_STRUCT_SIZE(val) ((val >> 12) & 0x000001FF) +#define WMITLV_GET_TAG_ARRAY_SIZE(val) ((val >> 21) & 0x000001FF) +#define WMITLV_GET_TAG_VARIED(val) ((val >> 30) & 0x00000001) + +#define WMITLV_SET_ATTRB0(id) ((WMITLV_GET_TAG_NUM_TLV_ATTRIB(id) << 24) | \ + (id & 0x00FFFFFF)) +#define WMITLV_SET_ATTRB1(tagID, tagStructSize, tagArraySize, tagVaried) \ + (((tagVaried&0x1)<<30) | ((tagArraySize&0x1FF)<<21) | \ + ((tagStructSize&0x1FF)<<12) | (tagID&0xFFF)) + +#define WMITLV_OP_SET_TLV_ATTRIB_macro(param_ptr, param_len, wmi_cmd_event_id, \ + elem_tlv_tag, elem_struc_type, elem_name, var_len, arr_size) \ + WMITLV_SET_ATTRB1(elem_tlv_tag, sizeof(elem_struc_type), arr_size, var_len), + +#define WMITLV_GET_CMD_EVT_ATTRB_LIST(id) \ + WMITLV_SET_ATTRB0(id), \ + WMITLV_TABLE(id,SET_TLV_ATTRIB, NULL, 0) + +uint32_t cmd_attr_list[] = { + WMITLV_ALL_CMD_LIST(WMITLV_GET_CMD_EVT_ATTRB_LIST) +}; + +uint32_t evt_attr_list[] = { + WMITLV_ALL_EVT_LIST(WMITLV_GET_CMD_EVT_ATTRB_LIST) +}; + +#ifdef NO_DYNAMIC_MEM_ALLOC +static wmitlv_cmd_param_info *g_wmi_static_cmd_param_info_buf; +uint32_t g_wmi_static_max_cmd_param_tlvs; +#endif + + +/** + * wmitlv_set_static_param_tlv_buf() - tlv helper function + * @param_tlv_buf: tlv buffer parameter + * @max_tlvs_accommodated: max no of tlv entries + * + * + * WMI TLV Helper function to set the static cmd_param_tlv structure + * and number of TLVs that can be accommodated in the structure. + * This function should be used when dynamic memory allocation is not + * supported. When dynamic memory allocation is not supported by any + * component then NO_DYNAMIC_MEMALLOC macro has to be defined in respective + * tlv_platform.c file. And respective component has to allocate + * cmd_param_tlv structure buffer to accommodate whatever number of TLV's. + * Both the buffer address and number of TLV's that can be accommodated in + * the buffer should be sent as arguments to this function. + * + * Return None + */ +void +wmitlv_set_static_param_tlv_buf(void *param_tlv_buf, + uint32_t max_tlvs_accommodated) +{ +#ifdef NO_DYNAMIC_MEM_ALLOC + g_wmi_static_cmd_param_info_buf = param_tlv_buf; + g_wmi_static_max_cmd_param_tlvs = max_tlvs_accommodated; +#endif +} + +/** + * wmitlv_get_attributes() - tlv helper function + * @is_cmd_id: boolean for command attribute + * @cmd_event_id: command event id + * @curr_tlv_order: tlv order + * @tlv_attr_ptr: pointer to tlv attribute + * + * + * WMI TLV Helper functions to find the attributes of the + * Command/Event TLVs. + * + * Return: 0 if success. Return >=1 if failure. + */ +static +uint32_t wmitlv_get_attributes(uint32_t is_cmd_id, uint32_t cmd_event_id, + uint32_t curr_tlv_order, + wmitlv_attributes_struc *tlv_attr_ptr) +{ + uint32_t i, base_index, num_tlvs, num_entries; + uint32_t *pAttrArrayList; + + if (is_cmd_id) { + pAttrArrayList = &cmd_attr_list[0]; + num_entries = QDF_ARRAY_SIZE(cmd_attr_list); + } else { + pAttrArrayList = &evt_attr_list[0]; + num_entries = QDF_ARRAY_SIZE(evt_attr_list); + } + + for (i = 0; i < num_entries; i++) { + num_tlvs = WMITLV_GET_NUM_TLVS(pAttrArrayList[i]); + if (WMITLV_GET_CMDID(cmd_event_id) == + WMITLV_GET_CMDID(pAttrArrayList[i])) { + tlv_attr_ptr->cmd_num_tlv = num_tlvs; + /* Return success from here when only number of TLVS for + * this command/event is required */ + if (curr_tlv_order == WMITLV_GET_ATTRIB_NUM_TLVS) { + wmi_tlv_print_verbose + ("%s: WMI TLV attribute definitions for %s:0x%x found; num_of_tlvs:%d\n", + __func__, (is_cmd_id ? "Cmd" : "Evt"), + cmd_event_id, num_tlvs); + return 0; + } + + /* Return failure if tlv_order is more than the expected + * number of TLVs */ + if (curr_tlv_order >= num_tlvs) { + wmi_tlv_print_error + ("%s: ERROR: TLV order %d greater than num_of_tlvs:%d for %s:0x%x\n", + __func__, curr_tlv_order, num_tlvs, + (is_cmd_id ? "Cmd" : "Evt"), cmd_event_id); + return 1; + } + + base_index = i + 1; /* index to first TLV attributes */ + wmi_tlv_print_verbose + ("%s: WMI TLV attributes for %s:0x%x tlv[%d]:0x%x\n", + __func__, (is_cmd_id ? "Cmd" : "Evt"), + cmd_event_id, curr_tlv_order, + pAttrArrayList[(base_index + curr_tlv_order)]); + tlv_attr_ptr->tag_order = curr_tlv_order; + tlv_attr_ptr->tag_id = + WMITLV_GET_TAGID(pAttrArrayList + [(base_index + curr_tlv_order)]); + tlv_attr_ptr->tag_struct_size = + WMITLV_GET_TAG_STRUCT_SIZE(pAttrArrayList + [(base_index + + curr_tlv_order)]); + tlv_attr_ptr->tag_varied_size = + WMITLV_GET_TAG_VARIED(pAttrArrayList + [(base_index + + curr_tlv_order)]); + tlv_attr_ptr->tag_array_size = + WMITLV_GET_TAG_ARRAY_SIZE(pAttrArrayList + [(base_index + + curr_tlv_order)]); + return 0; + } + i += num_tlvs; + } + + wmi_tlv_print_error + ("%s: ERROR: Didn't found WMI TLV attribute definitions for %s:0x%x\n", + __func__, (is_cmd_id ? "Cmd" : "Evt"), cmd_event_id); + return 1; +} + +/** + * wmitlv_check_tlv_params() - tlv helper function + * @os_handle: os context handle + * @param_struc_ptr: pointer to tlv structure + * @is_cmd_id: boolean for command attribute + * @wmi_cmd_event_id: command event id + * + * + * Helper Function to vaidate the prepared TLV's for + * an WMI event/command to be sent. + * + * Return: 0 if success. Return < 0 if failure. + */ +static int +wmitlv_check_tlv_params(void *os_handle, void *param_struc_ptr, + uint32_t param_buf_len, uint32_t is_cmd_id, + uint32_t wmi_cmd_event_id) +{ + wmitlv_attributes_struc attr_struct_ptr; + uint32_t buf_idx = 0; + uint32_t tlv_index = 0; + uint8_t *buf_ptr = (unsigned char *)param_struc_ptr; + uint32_t expected_num_tlvs, expected_tlv_len; + int32_t error = -1; + + /* Get the number of TLVs for this command/event */ + if (wmitlv_get_attributes + (is_cmd_id, wmi_cmd_event_id, WMITLV_GET_ATTRIB_NUM_TLVS, + &attr_struct_ptr) != 0) { + wmi_tlv_print_error + ("%s: ERROR: Couldn't get expected number of TLVs for Cmd=%d\n", + __func__, wmi_cmd_event_id); + goto Error_wmitlv_check_tlv_params; + } + + /* NOTE: the returned number of TLVs is in "attr_struct_ptr.cmd_num_tlv" */ + + expected_num_tlvs = attr_struct_ptr.cmd_num_tlv; + + while ((buf_idx + WMI_TLV_HDR_SIZE) <= param_buf_len) { + uint32_t curr_tlv_tag = + WMITLV_GET_TLVTAG(WMITLV_GET_HDR(buf_ptr)); + uint32_t curr_tlv_len = + WMITLV_GET_TLVLEN(WMITLV_GET_HDR(buf_ptr)); + + if ((buf_idx + WMI_TLV_HDR_SIZE + curr_tlv_len) > param_buf_len) { + wmi_tlv_print_error + ("%s: ERROR: Invalid TLV length for Cmd=%d Tag_order=%d buf_idx=%d Tag:%d Len:%d TotalLen:%d\n", + __func__, wmi_cmd_event_id, tlv_index, buf_idx, + curr_tlv_tag, curr_tlv_len, param_buf_len); + goto Error_wmitlv_check_tlv_params; + } + + /* Get the attributes of the TLV with the given order in "tlv_index" */ + wmi_tlv_OS_MEMZERO(&attr_struct_ptr, + sizeof(wmitlv_attributes_struc)); + if (wmitlv_get_attributes + (is_cmd_id, wmi_cmd_event_id, tlv_index, + &attr_struct_ptr) != 0) { + wmi_tlv_print_error + ("%s: ERROR: No TLV attributes found for Cmd=%d Tag_order=%d\n", + __func__, wmi_cmd_event_id, tlv_index); + goto Error_wmitlv_check_tlv_params; + } + + /* Found the TLV that we wanted */ + wmi_tlv_print_verbose("%s: [tlv %d]: tag=%d, len=%d\n", + __func__, tlv_index, curr_tlv_tag, + curr_tlv_len); + + /* Validating Tag ID order */ + if (curr_tlv_tag != attr_struct_ptr.tag_id) { + wmi_tlv_print_error + ("%s: ERROR: TLV has wrong tag in order for Cmd=0x%x. Given=%d, Expected=%d.\n", + __func__, wmi_cmd_event_id, curr_tlv_tag, + attr_struct_ptr.tag_id); + goto Error_wmitlv_check_tlv_params; + } + + /* Validate Tag length */ + /* Array TLVs length checking needs special handling */ + if ((curr_tlv_tag >= WMITLV_TAG_FIRST_ARRAY_ENUM) + && (curr_tlv_tag <= WMITLV_TAG_LAST_ARRAY_ENUM)) { + if (attr_struct_ptr.tag_varied_size == WMITLV_SIZE_FIX) { + /* Array size can't be invalid for fixed size Array TLV */ + if (WMITLV_ARR_SIZE_INVALID == + attr_struct_ptr.tag_array_size) { + wmi_tlv_print_error + ("%s: ERROR: array_size can't be invalid for Array TLV Cmd=0x%x Tag=%d\n", + __func__, wmi_cmd_event_id, + curr_tlv_tag); + goto Error_wmitlv_check_tlv_params; + } + + expected_tlv_len = + attr_struct_ptr.tag_array_size * + attr_struct_ptr.tag_struct_size; + /* Paddding is only required for Byte array Tlvs all other + * array tlv's should be aligned to 4 bytes during their + * definition */ + if (WMITLV_TAG_ARRAY_BYTE == + attr_struct_ptr.tag_id) { + expected_tlv_len = + roundup(expected_tlv_len, + sizeof(uint32_t)); + } + + if (curr_tlv_len != expected_tlv_len) { + wmi_tlv_print_error + ("%s: ERROR: TLV has wrong length for Cmd=0x%x. Tag_order=%d Tag=%d, Given_Len:%d Expected_Len=%d.\n", + __func__, wmi_cmd_event_id, + tlv_index, curr_tlv_tag, + curr_tlv_len, expected_tlv_len); + goto Error_wmitlv_check_tlv_params; + } + } else { + /* Array size should be invalid for variable size Array TLV */ + if (WMITLV_ARR_SIZE_INVALID != + attr_struct_ptr.tag_array_size) { + wmi_tlv_print_error + ("%s: ERROR: array_size should be invalid for Array TLV Cmd=0x%x Tag=%d\n", + __func__, wmi_cmd_event_id, + curr_tlv_tag); + goto Error_wmitlv_check_tlv_params; + } + + /* Incase of variable length TLV's, there is no expectation + * on the length field so do whatever checking you can + * depending on the TLV tag if TLV length is non-zero */ + if (curr_tlv_len != 0) { + /* Verify TLV length is aligned to the size of structure */ + if ((curr_tlv_len % + attr_struct_ptr.tag_struct_size) != + 0) { + wmi_tlv_print_error + ("%s: ERROR: TLV length %d for Cmd=0x%x is not aligned to size of structure(%d bytes)\n", + __func__, curr_tlv_len, + wmi_cmd_event_id, + attr_struct_ptr. + tag_struct_size); + goto Error_wmitlv_check_tlv_params; + } + + if (curr_tlv_tag == + WMITLV_TAG_ARRAY_STRUC) { + uint8_t *tlv_buf_ptr = NULL; + uint32_t in_tlv_len; + uint32_t idx; + uint32_t num_of_elems; + + /* Verify length of inner TLVs */ + + num_of_elems = + curr_tlv_len / + attr_struct_ptr. + tag_struct_size; + /* Set tlv_buf_ptr to the first inner TLV address */ + tlv_buf_ptr = + buf_ptr + WMI_TLV_HDR_SIZE; + for (idx = 0; + idx < num_of_elems; + idx++) { + in_tlv_len = + WMITLV_GET_TLVLEN + (WMITLV_GET_HDR + (tlv_buf_ptr)); + if ((in_tlv_len + + WMI_TLV_HDR_SIZE) + != + attr_struct_ptr. + tag_struct_size) { + wmi_tlv_print_error + ("%s: ERROR: TLV has wrong length for Cmd=0x%x. Tag_order=%d Tag=%d, Given_Len:%zu Expected_Len=%d.\n", + __func__, + wmi_cmd_event_id, + tlv_index, + curr_tlv_tag, + (in_tlv_len + + + WMI_TLV_HDR_SIZE), + attr_struct_ptr. + tag_struct_size); + goto Error_wmitlv_check_tlv_params; + } + tlv_buf_ptr += + in_tlv_len + + WMI_TLV_HDR_SIZE; + } + } else + if ((curr_tlv_tag == + WMITLV_TAG_ARRAY_UINT32) + || (curr_tlv_tag == + WMITLV_TAG_ARRAY_BYTE) + || (curr_tlv_tag == + WMITLV_TAG_ARRAY_FIXED_STRUC)) { + /* Nothing to verify here */ + } else { + wmi_tlv_print_error + ("%s ERROR Need to handle the Array tlv %d for variable length for Cmd=0x%x\n", + __func__, + attr_struct_ptr.tag_id, + wmi_cmd_event_id); + goto Error_wmitlv_check_tlv_params; + } + } + } + } else { + /* Non-array TLV. */ + + if ((curr_tlv_len + WMI_TLV_HDR_SIZE) != + attr_struct_ptr.tag_struct_size) { + wmi_tlv_print_error + ("%s: ERROR: TLV has wrong length for Cmd=0x%x. Given=%zu, Expected=%d.\n", + __func__, wmi_cmd_event_id, + (curr_tlv_len + WMI_TLV_HDR_SIZE), + attr_struct_ptr.tag_struct_size); + goto Error_wmitlv_check_tlv_params; + } + } + + /* Check TLV length is aligned to 4 bytes or not */ + if ((curr_tlv_len % sizeof(uint32_t)) != 0) { + wmi_tlv_print_error + ("%s: ERROR: TLV length %d for Cmd=0x%x is not aligned to %zu bytes\n", + __func__, curr_tlv_len, wmi_cmd_event_id, + sizeof(uint32_t)); + goto Error_wmitlv_check_tlv_params; + } + + tlv_index++; + buf_ptr += curr_tlv_len + WMI_TLV_HDR_SIZE; + buf_idx += curr_tlv_len + WMI_TLV_HDR_SIZE; + } + + if (tlv_index != expected_num_tlvs) { + wmi_tlv_print_verbose + ("%s: INFO: Less number of TLVs filled for Cmd=0x%x Filled %d Expected=%d\n", + __func__, wmi_cmd_event_id, tlv_index, expected_num_tlvs); + } + + return 0; +Error_wmitlv_check_tlv_params: + return error; +} + +/** + * wmitlv_check_event_tlv_params() - tlv helper function + * @os_handle: os context handle + * @param_struc_ptr: pointer to tlv structure + * @is_cmd_id: boolean for command attribute + * @wmi_cmd_event_id: command event id + * + * + * Helper Function to vaidate the prepared TLV's for + * an WMI event/command to be sent. + * + * Return: 0 if success. Return < 0 if failure. + */ +int +wmitlv_check_event_tlv_params(void *os_handle, void *param_struc_ptr, + uint32_t param_buf_len, uint32_t wmi_cmd_event_id) +{ + uint32_t is_cmd_id = 0; + + return wmitlv_check_tlv_params + (os_handle, param_struc_ptr, param_buf_len, is_cmd_id, + wmi_cmd_event_id); +} + +/** + * wmitlv_check_command_tlv_params() - tlv helper function + * @os_handle: os context handle + * @param_struc_ptr: pointer to tlv structure + * @is_cmd_id: boolean for command attribute + * @wmi_cmd_event_id: command event id + * + * + * Helper Function to vaidate the prepared TLV's for + * an WMI event/command to be sent. + * + * Return: 0 if success. Return < 0 if failure. + */ +int +wmitlv_check_command_tlv_params(void *os_handle, void *param_struc_ptr, + uint32_t param_buf_len, + uint32_t wmi_cmd_event_id) +{ + uint32_t is_cmd_id = 1; + + return wmitlv_check_tlv_params + (os_handle, param_struc_ptr, param_buf_len, is_cmd_id, + wmi_cmd_event_id); +} +qdf_export_symbol(wmitlv_check_command_tlv_params); + +/** + * wmitlv_check_and_pad_tlvs() - tlv helper function + * @os_handle: os context handle + * @param_buf_len: length of tlv parameter + * @param_struc_ptr: pointer to tlv structure + * @is_cmd_id: boolean for command attribute + * @wmi_cmd_event_id: command event id + * @wmi_cmd_struct_ptr: wmi command structure + * + * + * vaidate the TLV's coming for an event/command and + * also pads data to TLV's if necessary + * + * Return: 0 if success. Return < 0 if failure. + */ +static int +wmitlv_check_and_pad_tlvs(void *os_handle, void *param_struc_ptr, + uint32_t param_buf_len, uint32_t is_cmd_id, + uint32_t wmi_cmd_event_id, void **wmi_cmd_struct_ptr) +{ + wmitlv_attributes_struc attr_struct_ptr; + uint32_t buf_idx = 0; + uint32_t tlv_index = 0; + uint32_t num_of_elems = 0; + int tlv_size_diff = 0; + uint8_t *buf_ptr = (unsigned char *)param_struc_ptr; + wmitlv_cmd_param_info *cmd_param_tlvs_ptr = NULL; + uint32_t remaining_expected_tlvs = 0xFFFFFFFF; + uint32_t len_wmi_cmd_struct_buf; + uint32_t free_buf_len; + int32_t error = -1; + + /* Get the number of TLVs for this command/event */ + if (wmitlv_get_attributes + (is_cmd_id, wmi_cmd_event_id, WMITLV_GET_ATTRIB_NUM_TLVS, + &attr_struct_ptr) != 0) { + wmi_tlv_print_error + ("%s: ERROR: Couldn't get expected number of TLVs for Cmd=%d\n", + __func__, wmi_cmd_event_id); + return error; + } + /* NOTE: the returned number of TLVs is in "attr_struct_ptr.cmd_num_tlv" */ + + if (param_buf_len < WMI_TLV_HDR_SIZE) { + wmi_tlv_print_error + ("%s: ERROR: Incorrect param buf length passed\n", + __func__); + return error; + } + + /* Create base structure of format wmi_cmd_event_id##_param_tlvs */ + len_wmi_cmd_struct_buf = + attr_struct_ptr.cmd_num_tlv * sizeof(wmitlv_cmd_param_info); +#ifndef NO_DYNAMIC_MEM_ALLOC + /* Dynamic memory allocation supported */ + wmi_tlv_os_mem_alloc(os_handle, *wmi_cmd_struct_ptr, + len_wmi_cmd_struct_buf); +#else + /* Dynamic memory allocation is not supported. Use the buffer + * g_wmi_static_cmd_param_info_buf, which should be set using + * wmi_tlv_set_static_param_tlv_buf(), + * for base structure of format wmi_cmd_event_id##_param_tlvs */ + *wmi_cmd_struct_ptr = g_wmi_static_cmd_param_info_buf; + if (attr_struct_ptr.cmd_num_tlv > g_wmi_static_max_cmd_param_tlvs) { + /* Error: Expecting more TLVs that accommodated for static structure */ + wmi_tlv_print_error + ("%s: Error: Expecting more TLVs that accommodated for static structure. Expected:%d Accomodated:%d\n", + __func__, attr_struct_ptr.cmd_num_tlv, + g_wmi_static_max_cmd_param_tlvs); + return error; + } +#endif + if (*wmi_cmd_struct_ptr == NULL) { + /* Error: unable to alloc memory */ + wmi_tlv_print_error + ("%s: Error: unable to alloc memory (size=%d) for TLV\n", + __func__, len_wmi_cmd_struct_buf); + return error; + } + + cmd_param_tlvs_ptr = (wmitlv_cmd_param_info *) *wmi_cmd_struct_ptr; + wmi_tlv_OS_MEMZERO(cmd_param_tlvs_ptr, len_wmi_cmd_struct_buf); + remaining_expected_tlvs = attr_struct_ptr.cmd_num_tlv; + + while (((buf_idx + WMI_TLV_HDR_SIZE) <= param_buf_len) + && (remaining_expected_tlvs)) { + uint32_t curr_tlv_tag = + WMITLV_GET_TLVTAG(WMITLV_GET_HDR(buf_ptr)); + uint32_t curr_tlv_len = + WMITLV_GET_TLVLEN(WMITLV_GET_HDR(buf_ptr)); + int num_padding_bytes = 0; + + free_buf_len = param_buf_len - (buf_idx + WMI_TLV_HDR_SIZE); + if (curr_tlv_len > free_buf_len) { + wmi_tlv_print_error("%s: TLV length overflow", + __func__); + goto Error_wmitlv_check_and_pad_tlvs; + } + + /* Get the attributes of the TLV with the given order in "tlv_index" */ + wmi_tlv_OS_MEMZERO(&attr_struct_ptr, + sizeof(wmitlv_attributes_struc)); + if (wmitlv_get_attributes + (is_cmd_id, wmi_cmd_event_id, tlv_index, + &attr_struct_ptr) != 0) { + wmi_tlv_print_error + ("%s: ERROR: No TLV attributes found for Cmd=%d Tag_order=%d\n", + __func__, wmi_cmd_event_id, tlv_index); + goto Error_wmitlv_check_and_pad_tlvs; + } + + /* Found the TLV that we wanted */ + wmi_tlv_print_verbose("%s: [tlv %d]: tag=%d, len=%d\n", + __func__, tlv_index, curr_tlv_tag, + curr_tlv_len); + + /* Validating Tag order */ + if (curr_tlv_tag != attr_struct_ptr.tag_id) { + wmi_tlv_print_error + ("%s: ERROR: TLV has wrong tag in order for Cmd=0x%x. Given=%d, Expected=%d.\n", + __func__, wmi_cmd_event_id, curr_tlv_tag, + attr_struct_ptr.tag_id); + goto Error_wmitlv_check_and_pad_tlvs; + } + + if ((curr_tlv_tag >= WMITLV_TAG_FIRST_ARRAY_ENUM) + && (curr_tlv_tag <= WMITLV_TAG_LAST_ARRAY_ENUM)) { + /* Current Tag is an array of some kind. */ + /* Skip the TLV header of this array */ + buf_ptr += WMI_TLV_HDR_SIZE; + buf_idx += WMI_TLV_HDR_SIZE; + } else { + /* Non-array TLV. */ + curr_tlv_len += WMI_TLV_HDR_SIZE; + } + + if (attr_struct_ptr.tag_varied_size == WMITLV_SIZE_FIX) { + /* This TLV is fixed length */ + if (WMITLV_ARR_SIZE_INVALID == + attr_struct_ptr.tag_array_size) { + tlv_size_diff = + curr_tlv_len - + attr_struct_ptr.tag_struct_size; + num_of_elems = + (curr_tlv_len > WMI_TLV_HDR_SIZE) ? 1 : 0; + } else { + tlv_size_diff = + curr_tlv_len - + (attr_struct_ptr.tag_struct_size * + attr_struct_ptr.tag_array_size); + num_of_elems = attr_struct_ptr.tag_array_size; + } + } else { + /* This TLV has a variable number of elements */ + if (WMITLV_TAG_ARRAY_STRUC == attr_struct_ptr.tag_id) { + uint32_t in_tlv_len = 0; + + if (curr_tlv_len != 0) { + in_tlv_len = + WMITLV_GET_TLVLEN(WMITLV_GET_HDR + (buf_ptr)); + in_tlv_len += WMI_TLV_HDR_SIZE; + if (in_tlv_len > curr_tlv_len) { + wmi_tlv_print_error("%s: Invalid in_tlv_len=%d", + __func__, + in_tlv_len); + goto + Error_wmitlv_check_and_pad_tlvs; + } + tlv_size_diff = + in_tlv_len - + attr_struct_ptr.tag_struct_size; + num_of_elems = + curr_tlv_len / in_tlv_len; + wmi_tlv_print_verbose + ("%s: WARN: TLV array of structures in_tlv_len=%d struct_size:%d diff:%d num_of_elems=%d \n", + __func__, in_tlv_len, + attr_struct_ptr.tag_struct_size, + tlv_size_diff, num_of_elems); + } else { + tlv_size_diff = 0; + num_of_elems = 0; + } + } else + if ((WMITLV_TAG_ARRAY_UINT32 == + attr_struct_ptr.tag_id) + || (WMITLV_TAG_ARRAY_BYTE == + attr_struct_ptr.tag_id) + || (WMITLV_TAG_ARRAY_FIXED_STRUC == + attr_struct_ptr.tag_id)) { + tlv_size_diff = 0; + num_of_elems = + curr_tlv_len / + attr_struct_ptr.tag_struct_size; + } else { + wmi_tlv_print_error + ("%s ERROR Need to handle this tag ID for variable length %d\n", + __func__, attr_struct_ptr.tag_id); + goto Error_wmitlv_check_and_pad_tlvs; + } + } + + if ((WMITLV_TAG_ARRAY_STRUC == attr_struct_ptr.tag_id) && + (tlv_size_diff != 0)) { + void *new_tlv_buf = NULL; + uint8_t *tlv_buf_ptr = NULL; + uint32_t in_tlv_len; + uint32_t i; + + if (attr_struct_ptr.tag_varied_size == WMITLV_SIZE_FIX) { + /* This is not allowed. The tag WMITLV_TAG_ARRAY_STRUC can + * only be used with variable-length structure array + * should not have a fixed number of elements (contradicting). + * Use WMITLV_TAG_ARRAY_FIXED_STRUC tag for fixed size + * structure array(where structure never change without + * breaking compatibility) */ + wmi_tlv_print_error + ("%s: ERROR: TLV (tag=%d) should be variable-length and not fixed length\n", + __func__, curr_tlv_tag); + goto Error_wmitlv_check_and_pad_tlvs; + } + + /* Warning: Needs to allocate a larger structure and pad with zeros */ + wmi_tlv_print_verbose + ("%s: WARN: TLV array of structures needs padding. tlv_size_diff=%d\n", + __func__, tlv_size_diff); + + /* incoming structure length */ + in_tlv_len = + WMITLV_GET_TLVLEN(WMITLV_GET_HDR(buf_ptr)) + + WMI_TLV_HDR_SIZE; +#ifndef NO_DYNAMIC_MEM_ALLOC + wmi_tlv_os_mem_alloc(os_handle, new_tlv_buf, + (num_of_elems * + attr_struct_ptr.tag_struct_size)); + if (new_tlv_buf == NULL) { + /* Error: unable to alloc memory */ + wmi_tlv_print_error + ("%s: Error: unable to alloc memory (size=%d) for padding the TLV array %d\n", + __func__, + (num_of_elems * + attr_struct_ptr.tag_struct_size), + curr_tlv_tag); + goto Error_wmitlv_check_and_pad_tlvs; + } + + wmi_tlv_OS_MEMZERO(new_tlv_buf, + (num_of_elems * + attr_struct_ptr.tag_struct_size)); + tlv_buf_ptr = (uint8_t *) new_tlv_buf; + for (i = 0; i < num_of_elems; i++) { + if (tlv_size_diff > 0) { + /* Incoming structure size is greater than expected + * structure size. so copy the number of bytes equal + * to expected structure size */ + wmi_tlv_OS_MEMCPY(tlv_buf_ptr, + (void *)(buf_ptr + + i * + in_tlv_len), + attr_struct_ptr. + tag_struct_size); + } else { + /* Incoming structure size is smaller than expected + * structure size. so copy the number of bytes equal + * to incoming structure size */ + wmi_tlv_OS_MEMCPY(tlv_buf_ptr, + (void *)(buf_ptr + + i * + in_tlv_len), + in_tlv_len); + } + tlv_buf_ptr += attr_struct_ptr.tag_struct_size; + } +#else + { + uint8_t *src_addr; + uint8_t *dst_addr; + uint32_t buf_mov_len; + + if (tlv_size_diff < 0) { + /* Incoming structure size is smaller than expected size + * then this needs padding for each element in the array */ + + /* Find amount of bytes to be padded for one element */ + num_padding_bytes = tlv_size_diff * -1; + + /* Move subsequent TLVs by number of bytes to be padded + * for all elements */ + if ((free_buf_len < + attr_struct_ptr.tag_struct_size * + num_of_elems) || + (param_buf_len < + buf_idx + curr_tlv_len + + num_padding_bytes * num_of_elems)) { + wmi_tlv_print_error("%s: Insufficent buffer\n", + __func__); + goto + Error_wmitlv_check_and_pad_tlvs; + } else { + src_addr = + buf_ptr + curr_tlv_len; + dst_addr = + buf_ptr + curr_tlv_len + + (num_padding_bytes * + num_of_elems); + buf_mov_len = + param_buf_len - (buf_idx + + curr_tlv_len); + + wmi_tlv_OS_MEMMOVE(dst_addr, + src_addr, + buf_mov_len); + } + + /* Move subsequent elements of array down by number of + * bytes to be padded for one element and alse set + * padding bytes to zero */ + tlv_buf_ptr = buf_ptr; + for (i = 0; i < num_of_elems - 1; i++) { + src_addr = + tlv_buf_ptr + in_tlv_len; + if (i != (num_of_elems - 1)) { + dst_addr = + tlv_buf_ptr + + in_tlv_len + + num_padding_bytes; + buf_mov_len = + curr_tlv_len - + ((i + + 1) * in_tlv_len); + + wmi_tlv_OS_MEMMOVE + (dst_addr, src_addr, + buf_mov_len); + } + + /* Set the padding bytes to zeroes */ + wmi_tlv_OS_MEMZERO(src_addr, + num_padding_bytes); + + tlv_buf_ptr += + attr_struct_ptr. + tag_struct_size; + } + src_addr = tlv_buf_ptr + in_tlv_len; + wmi_tlv_OS_MEMZERO(src_addr, + num_padding_bytes); + + /* Update the number of padding bytes to total number + * of bytes padded for all elements in the array */ + num_padding_bytes = + num_padding_bytes * num_of_elems; + + new_tlv_buf = buf_ptr; + } else { + /* Incoming structure size is greater than expected size + * then this needs shrinking for each element in the array */ + + /* Find amount of bytes to be shrunk for one element */ + num_padding_bytes = tlv_size_diff * -1; + + /* Move subsequent elements of array up by number of bytes + * to be shrunk for one element */ + tlv_buf_ptr = buf_ptr; + for (i = 0; i < (num_of_elems - 1); i++) { + src_addr = + tlv_buf_ptr + in_tlv_len; + dst_addr = + tlv_buf_ptr + in_tlv_len + + num_padding_bytes; + buf_mov_len = + curr_tlv_len - + ((i + 1) * in_tlv_len); + + wmi_tlv_OS_MEMMOVE(dst_addr, + src_addr, + buf_mov_len); + + tlv_buf_ptr += + attr_struct_ptr. + tag_struct_size; + } + + /* Move subsequent TLVs by number of bytes to be shrunk + * for all elements */ + if (param_buf_len > + (buf_idx + curr_tlv_len)) { + src_addr = + buf_ptr + curr_tlv_len; + dst_addr = + buf_ptr + curr_tlv_len + + (num_padding_bytes * + num_of_elems); + buf_mov_len = + param_buf_len - (buf_idx + + curr_tlv_len); + + wmi_tlv_OS_MEMMOVE(dst_addr, + src_addr, + buf_mov_len); + } + + /* Update the number of padding bytes to total number of + * bytes shrunk for all elements in the array */ + num_padding_bytes = + num_padding_bytes * num_of_elems; + + new_tlv_buf = buf_ptr; + } + } +#endif + cmd_param_tlvs_ptr[tlv_index].tlv_ptr = new_tlv_buf; + cmd_param_tlvs_ptr[tlv_index].num_elements = + num_of_elems; + cmd_param_tlvs_ptr[tlv_index].buf_is_allocated = 1; /* Indicates that buffer is allocated */ + + } else if (tlv_size_diff >= 0) { + /* Warning: some parameter truncation */ + if (tlv_size_diff > 0) { + wmi_tlv_print_verbose + ("%s: WARN: TLV truncated. tlv_size_diff=%d, curr_tlv_len=%d\n", + __func__, tlv_size_diff, curr_tlv_len); + } + /* TODO: this next line needs more comments and explanation */ + cmd_param_tlvs_ptr[tlv_index].tlv_ptr = + (attr_struct_ptr.tag_varied_size + && !curr_tlv_len) ? NULL : (void *)buf_ptr; + cmd_param_tlvs_ptr[tlv_index].num_elements = + num_of_elems; + cmd_param_tlvs_ptr[tlv_index].buf_is_allocated = 0; /* Indicates that buffer is not allocated */ + } else { + void *new_tlv_buf = NULL; + + /* Warning: Needs to allocate a larger structure and pad with zeros */ + wmi_tlv_print_verbose + ("%s: WARN: TLV needs padding. tlv_size_diff=%d\n", + __func__, tlv_size_diff); +#ifndef NO_DYNAMIC_MEM_ALLOC + /* Dynamic memory allocation is supported */ + wmi_tlv_os_mem_alloc(os_handle, new_tlv_buf, + (curr_tlv_len - tlv_size_diff)); + if (new_tlv_buf == NULL) { + /* Error: unable to alloc memory */ + wmi_tlv_print_error + ("%s: Error: unable to alloc memory (size=%d) for padding the TLV %d\n", + __func__, (curr_tlv_len - tlv_size_diff), + curr_tlv_tag); + goto Error_wmitlv_check_and_pad_tlvs; + } + + wmi_tlv_OS_MEMZERO(new_tlv_buf, + (curr_tlv_len - tlv_size_diff)); + wmi_tlv_OS_MEMCPY(new_tlv_buf, (void *)buf_ptr, + curr_tlv_len); +#else + /* Dynamic memory allocation is not supported. Padding has + * to be done with in the existing buffer assuming we have + * enough space to grow */ + { + /* Note: tlv_size_diff is a value less than zero */ + /* Move the Subsequent TLVs by amount of bytes needs to be padded */ + uint8_t *src_addr; + uint8_t *dst_addr; + uint32_t src_len; + + num_padding_bytes = (tlv_size_diff * -1); + + src_addr = buf_ptr + curr_tlv_len; + dst_addr = + buf_ptr + curr_tlv_len + num_padding_bytes; + src_len = + param_buf_len - (buf_idx + curr_tlv_len); + + wmi_tlv_OS_MEMMOVE(dst_addr, src_addr, src_len); + + /* Set the padding bytes to zeroes */ + wmi_tlv_OS_MEMZERO(src_addr, num_padding_bytes); + + new_tlv_buf = buf_ptr; + } +#endif + cmd_param_tlvs_ptr[tlv_index].tlv_ptr = new_tlv_buf; + cmd_param_tlvs_ptr[tlv_index].num_elements = + num_of_elems; + cmd_param_tlvs_ptr[tlv_index].buf_is_allocated = 1; /* Indicates that buffer is allocated */ + } + + tlv_index++; + remaining_expected_tlvs--; + buf_ptr += curr_tlv_len + num_padding_bytes; + buf_idx += curr_tlv_len + num_padding_bytes; + } + + return 0; +Error_wmitlv_check_and_pad_tlvs: + if (is_cmd_id) { + wmitlv_free_allocated_command_tlvs(wmi_cmd_event_id, + wmi_cmd_struct_ptr); + } else { + wmitlv_free_allocated_event_tlvs(wmi_cmd_event_id, + wmi_cmd_struct_ptr); + } + *wmi_cmd_struct_ptr = NULL; + return error; +} + +/** + * wmitlv_check_and_pad_event_tlvs() - tlv helper function + * @os_handle: os context handle + * @param_struc_ptr: pointer to tlv structure + * @param_buf_len: length of tlv parameter + * @wmi_cmd_event_id: command event id + * @wmi_cmd_struct_ptr: wmi command structure + * + * + * validate and pad(if necessary) for incoming WMI Event TLVs + * + * Return: 0 if success. Return < 0 if failure. + */ +int +wmitlv_check_and_pad_event_tlvs(void *os_handle, void *param_struc_ptr, + uint32_t param_buf_len, + uint32_t wmi_cmd_event_id, + void **wmi_cmd_struct_ptr) +{ + uint32_t is_cmd_id = 0; + return wmitlv_check_and_pad_tlvs + (os_handle, param_struc_ptr, param_buf_len, is_cmd_id, + wmi_cmd_event_id, wmi_cmd_struct_ptr); +} +qdf_export_symbol(wmitlv_check_and_pad_event_tlvs); + +/** + * wmitlv_check_and_pad_command_tlvs() - tlv helper function + * @os_handle: os context handle + * @param_struc_ptr: pointer to tlv structure + * @param_buf_len: length of tlv parameter + * @wmi_cmd_event_id: command event id + * @wmi_cmd_struct_ptr: wmi command structure + * + * + * validate and pad(if necessary) for incoming WMI Command TLVs + * + * Return: 0 if success. Return < 0 if failure. + */ +int +wmitlv_check_and_pad_command_tlvs(void *os_handle, void *param_struc_ptr, + uint32_t param_buf_len, + uint32_t wmi_cmd_event_id, + void **wmi_cmd_struct_ptr) +{ + uint32_t is_cmd_id = 1; + return wmitlv_check_and_pad_tlvs + (os_handle, param_struc_ptr, param_buf_len, is_cmd_id, + wmi_cmd_event_id, wmi_cmd_struct_ptr); +} + +/** + * wmitlv_free_allocated_tlvs() - tlv helper function + * @is_cmd_id: bollean to check if cmd or event tlv + * @cmd_event_id: command or event id + * @wmi_cmd_struct_ptr: wmi command structure + * + * + * free any allocated buffers for WMI Event/Command TLV processing + * + * Return: none + */ +static void wmitlv_free_allocated_tlvs(uint32_t is_cmd_id, + uint32_t cmd_event_id, + void **wmi_cmd_struct_ptr) +{ + void *ptr = *wmi_cmd_struct_ptr; + + if (!ptr) { + wmi_tlv_print_error("%s: Nothing to free for CMD/Event 0x%x\n", + __func__, cmd_event_id); + return; + } +#ifndef NO_DYNAMIC_MEM_ALLOC + +/* macro to free that previously allocated memory for this TLV. When (op==FREE_TLV_ELEM). */ +#define WMITLV_OP_FREE_TLV_ELEM_macro(param_ptr, param_len, wmi_cmd_event_id, elem_tlv_tag, elem_struc_type, elem_name, var_len, arr_size) \ + if ((((WMITLV_TYPEDEF_STRUCT_PARAMS_TLVS(wmi_cmd_event_id) *)ptr)->WMITLV_FIELD_BUF_IS_ALLOCATED(elem_name)) && \ + (((WMITLV_TYPEDEF_STRUCT_PARAMS_TLVS(wmi_cmd_event_id) *)ptr)->elem_name != NULL)) \ + { \ + wmi_tlv_os_mem_free(((WMITLV_TYPEDEF_STRUCT_PARAMS_TLVS(wmi_cmd_event_id) *)ptr)->elem_name); \ + } + +#define WMITLV_FREE_TLV_ELEMS(id) \ +case id: \ +{ \ + WMITLV_TABLE(id, FREE_TLV_ELEM, NULL, 0) \ +} \ +break; + + if (is_cmd_id) { + switch (cmd_event_id) { + WMITLV_ALL_CMD_LIST(WMITLV_FREE_TLV_ELEMS); + default: + wmi_tlv_print_error + ("%s: ERROR: Cannot find the TLVs attributes for Cmd=0x%x, %d\n", + __func__, cmd_event_id, cmd_event_id); + } + } else { + switch (cmd_event_id) { + WMITLV_ALL_EVT_LIST(WMITLV_FREE_TLV_ELEMS); + default: + wmi_tlv_print_error + ("%s: ERROR: Cannot find the TLVs attributes for Cmd=0x%x, %d\n", + __func__, cmd_event_id, cmd_event_id); + } + } + + wmi_tlv_os_mem_free(*wmi_cmd_struct_ptr); + *wmi_cmd_struct_ptr = NULL; +#endif + + return; +} + +/** + * wmitlv_free_allocated_command_tlvs() - tlv helper function + * @cmd_event_id: command or event id + * @wmi_cmd_struct_ptr: wmi command structure + * + * + * free any allocated buffers for WMI Event/Command TLV processing + * + * Return: none + */ +void wmitlv_free_allocated_command_tlvs(uint32_t cmd_event_id, + void **wmi_cmd_struct_ptr) +{ + wmitlv_free_allocated_tlvs(1, cmd_event_id, wmi_cmd_struct_ptr); +} + +/** + * wmitlv_free_allocated_event_tlvs() - tlv helper function + * @cmd_event_id: command or event id + * @wmi_cmd_struct_ptr: wmi command structure + * + * + * free any allocated buffers for WMI Event/Command TLV processing + * + * Return: none + */ +void wmitlv_free_allocated_event_tlvs(uint32_t cmd_event_id, + void **wmi_cmd_struct_ptr) +{ + wmitlv_free_allocated_tlvs(0, cmd_event_id, wmi_cmd_struct_ptr); +} +qdf_export_symbol(wmitlv_free_allocated_event_tlvs); + +/** + * wmi_versions_are_compatible() - tlv helper function + * @vers1: host wmi version + * @vers2: target wmi version + * + * + * check if two given wmi versions are compatible + * + * Return: none + */ +int +wmi_versions_are_compatible(wmi_abi_version *vers1, wmi_abi_version *vers2) +{ + if ((vers1->abi_version_ns_0 != vers2->abi_version_ns_0) || + (vers1->abi_version_ns_1 != vers2->abi_version_ns_1) || + (vers1->abi_version_ns_2 != vers2->abi_version_ns_2) || + (vers1->abi_version_ns_3 != vers2->abi_version_ns_3)) { + /* The namespaces are different. Incompatible. */ + return 0; + } + + if (vers1->abi_version_0 != vers2->abi_version_0) { + /* The major or minor versions are different. Incompatible */ + return 0; + } + /* We ignore the build version */ + return 1; +} + +/** + * wmi_versions_can_downgrade() - tlv helper function + * @version_whitelist_table: version table + * @my_vers: host version + * @opp_vers: target version + * @out_vers: downgraded version + * + * + * check if target wmi version can be downgraded + * + * Return: 0 if success. Return < 0 if failure. + */ +static int +wmi_versions_can_downgrade(int num_whitelist, + wmi_whitelist_version_info *version_whitelist_table, + wmi_abi_version *my_vers, + wmi_abi_version *opp_vers, + wmi_abi_version *out_vers) +{ + uint8_t can_try_to_downgrade; + uint32_t my_major_vers = WMI_VER_GET_MAJOR(my_vers->abi_version_0); + uint32_t my_minor_vers = WMI_VER_GET_MINOR(my_vers->abi_version_0); + uint32_t opp_major_vers = WMI_VER_GET_MAJOR(opp_vers->abi_version_0); + uint32_t opp_minor_vers = WMI_VER_GET_MINOR(opp_vers->abi_version_0); + uint32_t downgraded_minor_vers; + + if ((my_vers->abi_version_ns_0 != opp_vers->abi_version_ns_0) || + (my_vers->abi_version_ns_1 != opp_vers->abi_version_ns_1) || + (my_vers->abi_version_ns_2 != opp_vers->abi_version_ns_2) || + (my_vers->abi_version_ns_3 != opp_vers->abi_version_ns_3)) { + /* The namespaces are different. Incompatible. */ + can_try_to_downgrade = false; + } else if (my_major_vers != opp_major_vers) { + /* Major version is different. Incompatible and cannot downgrade. */ + can_try_to_downgrade = false; + } else { + /* Same major version. */ + + if (my_minor_vers < opp_minor_vers) { + /* Opposite party is newer. Incompatible and cannot downgrade. */ + can_try_to_downgrade = false; + } else if (my_minor_vers > opp_minor_vers) { + /* Opposite party is older. Check whitelist if we can downgrade */ + can_try_to_downgrade = true; + } else { + /* Same version */ + wmi_tlv_OS_MEMCPY(out_vers, my_vers, + sizeof(wmi_abi_version)); + return 1; + } + } + + if (!can_try_to_downgrade) { + wmi_tlv_print_error("%s: Warning: incompatible WMI version.\n", + __func__); + wmi_tlv_OS_MEMCPY(out_vers, my_vers, sizeof(wmi_abi_version)); + return 0; + } + /* Try to see we can downgrade the supported version */ + downgraded_minor_vers = my_minor_vers; + while (downgraded_minor_vers > opp_minor_vers) { + uint8_t downgraded = false; + int i; + + for (i = 0; i < num_whitelist; i++) { + if (version_whitelist_table[i].major != my_major_vers) { + continue; /* skip */ + } + if ((version_whitelist_table[i].namespace_0 != + my_vers->abi_version_ns_0) + || (version_whitelist_table[i].namespace_1 != + my_vers->abi_version_ns_1) + || (version_whitelist_table[i].namespace_2 != + my_vers->abi_version_ns_2) + || (version_whitelist_table[i].namespace_3 != + my_vers->abi_version_ns_3)) { + continue; /* skip */ + } + if (version_whitelist_table[i].minor == + downgraded_minor_vers) { + /* Found the next version that I can downgrade */ + wmi_tlv_print_error + ("%s: Note: found a whitelist entry to downgrade. wh. list ver: %d,%d,0x%x 0x%x 0x%x 0x%x\n", + __func__, version_whitelist_table[i].major, + version_whitelist_table[i].minor, + version_whitelist_table[i].namespace_0, + version_whitelist_table[i].namespace_1, + version_whitelist_table[i].namespace_2, + version_whitelist_table[i].namespace_3); + downgraded_minor_vers--; + downgraded = true; + break; + } + } + if (!downgraded) { + break; /* Done since we did not find any whitelist to downgrade version */ + } + } + wmi_tlv_OS_MEMCPY(out_vers, my_vers, sizeof(wmi_abi_version)); + out_vers->abi_version_0 = + WMI_VER_GET_VERSION_0(my_major_vers, downgraded_minor_vers); + if (downgraded_minor_vers != opp_minor_vers) { + wmi_tlv_print_error + ("%s: Warning: incompatible WMI version and cannot downgrade.\n", + __func__); + return 0; /* Incompatible */ + } else { + return 1; /* Compatible */ + } +} + +/** + * wmi_cmp_and_set_abi_version() - tlv helper function + * @version_whitelist_table: version table + * @my_vers: host version + * @opp_vers: target version + * @out_vers: downgraded version + * + * This routine will compare and set the WMI ABI version. + * First, compare my version with the opposite side's version. + * If incompatible, then check the whitelist to see if our side can downgrade. + * Finally, fill in the final ABI version into the output, out_vers. + * Return 0 if the output version is compatible + * Else return 1 if the output version is incompatible + * + * Return: 0 if the output version is compatible else < 0. + */ +int +wmi_cmp_and_set_abi_version(int num_whitelist, + wmi_whitelist_version_info * + version_whitelist_table, + struct _wmi_abi_version *my_vers, + struct _wmi_abi_version *opp_vers, + struct _wmi_abi_version *out_vers) +{ + wmi_tlv_print_verbose + ("%s: Our WMI Version: Mj=%d, Mn=%d, bd=%d, ns0=0x%x ns1:0x%x ns2:0x%x ns3:0x%x\n", + __func__, WMI_VER_GET_MAJOR(my_vers->abi_version_0), + WMI_VER_GET_MINOR(my_vers->abi_version_0), my_vers->abi_version_1, + my_vers->abi_version_ns_0, my_vers->abi_version_ns_1, + my_vers->abi_version_ns_2, my_vers->abi_version_ns_3); + + wmi_tlv_print_verbose + ("%s: Opposite side WMI Version: Mj=%d, Mn=%d, bd=%d, ns0=0x%x ns1:0x%x ns2:0x%x ns3:0x%x\n", + __func__, WMI_VER_GET_MAJOR(opp_vers->abi_version_0), + WMI_VER_GET_MINOR(opp_vers->abi_version_0), + opp_vers->abi_version_1, opp_vers->abi_version_ns_0, + opp_vers->abi_version_ns_1, opp_vers->abi_version_ns_2, + opp_vers->abi_version_ns_3); + + /* By default, the output version is our version. */ + wmi_tlv_OS_MEMCPY(out_vers, my_vers, sizeof(wmi_abi_version)); + if (!wmi_versions_are_compatible(my_vers, opp_vers)) { + /* Our host version and the given firmware version are incompatible. */ + if (wmi_versions_can_downgrade + (num_whitelist, version_whitelist_table, my_vers, opp_vers, + out_vers)) { + /* We can downgrade our host versions to match firmware. */ + wmi_tlv_print_error + ("%s: Host downgraded WMI Versions to match fw. Ret version: Mj=%d, Mn=%d, bd=%d, ns0=0x%x ns1:0x%x ns2:0x%x ns3:0x%x\n", + __func__, + WMI_VER_GET_MAJOR(out_vers->abi_version_0), + WMI_VER_GET_MINOR(out_vers->abi_version_0), + out_vers->abi_version_1, + out_vers->abi_version_ns_0, + out_vers->abi_version_ns_1, + out_vers->abi_version_ns_2, + out_vers->abi_version_ns_3); + return 0; /* Compatible */ + } else { + /* Warn: We cannot downgrade our host versions to match firmware. */ + wmi_tlv_print_error + ("%s: WARN: Host WMI Versions mismatch with fw. Ret version: Mj=%d, Mn=%d, bd=%d, ns0=0x%x ns1:0x%x ns2:0x%x ns3:0x%x\n", + __func__, + WMI_VER_GET_MAJOR(out_vers->abi_version_0), + WMI_VER_GET_MINOR(out_vers->abi_version_0), + out_vers->abi_version_1, + out_vers->abi_version_ns_0, + out_vers->abi_version_ns_1, + out_vers->abi_version_ns_2, + out_vers->abi_version_ns_3); + + return 1; /* Incompatible */ + } + } else { + /* We are compatible. Our host version is the output version */ + wmi_tlv_print_verbose + ("%s: Host and FW Compatible WMI Versions. Ret version: Mj=%d, Mn=%d, bd=%d, ns0=0x%x ns1:0x%x ns2:0x%x ns3:0x%x\n", + __func__, WMI_VER_GET_MAJOR(out_vers->abi_version_0), + WMI_VER_GET_MINOR(out_vers->abi_version_0), + out_vers->abi_version_1, out_vers->abi_version_ns_0, + out_vers->abi_version_ns_1, out_vers->abi_version_ns_2, + out_vers->abi_version_ns_3); + return 0; /* Compatible */ + } +} diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_tlv_platform.c b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_tlv_platform.c new file mode 100644 index 0000000000000000000000000000000000000000..8afe92ef916cb3d10eadd90570e963596c9f0f22 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_tlv_platform.c @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2013-2014, 2016-2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/* + * LMAC offload interface functions for WMI TLV Interface + */ + +#include /* qdf_mem_malloc,free, etc. */ +#include +#include "htc_api.h" +#include "wmi.h" + + +/* Following macro definitions use OS or platform specific functions */ +#define dummy_print(fmt, ...) {} +#define wmi_tlv_print_verbose dummy_print +#define wmi_tlv_print_error qdf_print +#define wmi_tlv_OS_MEMCPY OS_MEMCPY +#define wmi_tlv_OS_MEMZERO OS_MEMZERO +#define wmi_tlv_OS_MEMMOVE OS_MEMMOVE + +#ifndef NO_DYNAMIC_MEM_ALLOC +#define wmi_tlv_os_mem_alloc(scn, ptr, numBytes) \ + { \ + (ptr) = qdf_mem_malloc(numBytes); \ + } +#define wmi_tlv_os_mem_free qdf_mem_free +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified.c b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified.c new file mode 100644 index 0000000000000000000000000000000000000000..4ffee905bb3dffe620f7434a36b5f0dbdb765bbe --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified.c @@ -0,0 +1,2715 @@ +/* + * Copyright (c) 2015-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/* + * Host WMI unified implementation + */ +#include "htc_api.h" +#include "htc_api.h" +#include "wmi_unified_priv.h" +#include "wmi_unified_api.h" +#include "qdf_module.h" +#include "qdf_platform.h" + +#ifndef WMI_NON_TLV_SUPPORT +#include "wmi_tlv_helper.h" +#endif + +#include + +/* This check for CONFIG_WIN temporary added due to redeclaration compilation +error in MCL. Error is caused due to inclusion of wmi.h in wmi_unified_api.h +which gets included here through ol_if_athvar.h. Eventually it is expected that +wmi.h will be removed from wmi_unified_api.h after cleanup, which will need +WMI_CMD_HDR to be defined here. */ +#ifdef CONFIG_WIN +/* Copied from wmi.h */ +#undef MS +#define MS(_v, _f) (((_v) & _f##_MASK) >> _f##_LSB) +#undef SM +#define SM(_v, _f) (((_v) << _f##_LSB) & _f##_MASK) +#undef WO +#define WO(_f) ((_f##_OFFSET) >> 2) + +#undef GET_FIELD +#define GET_FIELD(_addr, _f) MS(*((uint32_t *)(_addr) + WO(_f)), _f) +#undef SET_FIELD +#define SET_FIELD(_addr, _f, _val) \ + (*((uint32_t *)(_addr) + WO(_f)) = \ + (*((uint32_t *)(_addr) + WO(_f)) & ~_f##_MASK) | SM(_val, _f)) + +#define WMI_GET_FIELD(_msg_buf, _msg_type, _f) \ + GET_FIELD(_msg_buf, _msg_type ## _ ## _f) + +#define WMI_SET_FIELD(_msg_buf, _msg_type, _f, _val) \ + SET_FIELD(_msg_buf, _msg_type ## _ ## _f, _val) + +#define WMI_EP_APASS 0x0 +#define WMI_EP_LPASS 0x1 +#define WMI_EP_SENSOR 0x2 + +/* + * * Control Path + * */ +typedef PREPACK struct { + uint32_t commandId:24, + reserved:2, /* used for WMI endpoint ID */ + plt_priv:6; /* platform private */ +} POSTPACK WMI_CMD_HDR; /* used for commands and events */ + +#define WMI_CMD_HDR_COMMANDID_LSB 0 +#define WMI_CMD_HDR_COMMANDID_MASK 0x00ffffff +#define WMI_CMD_HDR_COMMANDID_OFFSET 0x00000000 +#define WMI_CMD_HDR_WMI_ENDPOINTID_MASK 0x03000000 +#define WMI_CMD_HDR_WMI_ENDPOINTID_OFFSET 24 +#define WMI_CMD_HDR_PLT_PRIV_LSB 24 +#define WMI_CMD_HDR_PLT_PRIV_MASK 0xff000000 +#define WMI_CMD_HDR_PLT_PRIV_OFFSET 0x00000000 +/* end of copy wmi.h */ +#endif /* CONFIG_WIN */ + +#define WMI_MIN_HEAD_ROOM 64 + +/* WBUFF pool sizes for WMI */ +/* Allocation of size 256 bytes */ +#define WMI_WBUFF_POOL_0_SIZE 128 +/* Allocation of size 512 bytes */ +#define WMI_WBUFF_POOL_1_SIZE 16 +/* Allocation of size 1024 bytes */ +#define WMI_WBUFF_POOL_2_SIZE 8 +/* Allocation of size 2048 bytes */ +#define WMI_WBUFF_POOL_3_SIZE 8 + +#ifdef WMI_INTERFACE_EVENT_LOGGING +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 3, 0)) +/* TODO Cleanup this backported function */ +static int wmi_bp_seq_printf(struct seq_file *m, const char *f, ...) +{ + va_list args; + + va_start(args, f); + seq_vprintf(m, f, args); + va_end(args); + + return 0; +} +#else +#define wmi_bp_seq_printf(m, fmt, ...) seq_printf((m), fmt, ##__VA_ARGS__) +#endif + +#ifndef MAX_WMI_INSTANCES +#ifdef CONFIG_MCL +#define MAX_WMI_INSTANCES 1 +#else +#define MAX_WMI_INSTANCES 3 +#endif +#define CUSTOM_MGMT_CMD_DATA_SIZE 4 +#endif + +#ifdef CONFIG_MCL +/* WMI commands */ +uint32_t g_wmi_command_buf_idx = 0; +struct wmi_command_debug wmi_command_log_buffer[WMI_EVENT_DEBUG_MAX_ENTRY]; + +/* WMI commands TX completed */ +uint32_t g_wmi_command_tx_cmp_buf_idx = 0; +struct wmi_command_debug + wmi_command_tx_cmp_log_buffer[WMI_EVENT_DEBUG_MAX_ENTRY]; + +/* WMI events when processed */ +uint32_t g_wmi_event_buf_idx = 0; +struct wmi_event_debug wmi_event_log_buffer[WMI_EVENT_DEBUG_MAX_ENTRY]; + +/* WMI events when queued */ +uint32_t g_wmi_rx_event_buf_idx = 0; +struct wmi_event_debug wmi_rx_event_log_buffer[WMI_EVENT_DEBUG_MAX_ENTRY]; +#endif + +#define WMI_COMMAND_RECORD(h, a, b) { \ + if (wmi_log_max_entry <= \ + *(h->log_info.wmi_command_log_buf_info.p_buf_tail_idx)) \ + *(h->log_info.wmi_command_log_buf_info.p_buf_tail_idx) = 0;\ + ((struct wmi_command_debug *)h->log_info.wmi_command_log_buf_info.buf)\ + [*(h->log_info.wmi_command_log_buf_info.p_buf_tail_idx)]\ + .command = a; \ + qdf_mem_copy(((struct wmi_command_debug *)h->log_info. \ + wmi_command_log_buf_info.buf) \ + [*(h->log_info.wmi_command_log_buf_info.p_buf_tail_idx)].data,\ + b, wmi_record_max_length); \ + ((struct wmi_command_debug *)h->log_info.wmi_command_log_buf_info.buf)\ + [*(h->log_info.wmi_command_log_buf_info.p_buf_tail_idx)].\ + time = qdf_get_log_timestamp(); \ + (*(h->log_info.wmi_command_log_buf_info.p_buf_tail_idx))++; \ + h->log_info.wmi_command_log_buf_info.length++; \ +} + +#define WMI_COMMAND_TX_CMP_RECORD(h, a, b) { \ + if (wmi_log_max_entry <= \ + *(h->log_info.wmi_command_tx_cmp_log_buf_info.p_buf_tail_idx))\ + *(h->log_info.wmi_command_tx_cmp_log_buf_info. \ + p_buf_tail_idx) = 0; \ + ((struct wmi_command_debug *)h->log_info. \ + wmi_command_tx_cmp_log_buf_info.buf) \ + [*(h->log_info.wmi_command_tx_cmp_log_buf_info. \ + p_buf_tail_idx)]. \ + command = a; \ + qdf_mem_copy(((struct wmi_command_debug *)h->log_info. \ + wmi_command_tx_cmp_log_buf_info.buf) \ + [*(h->log_info.wmi_command_tx_cmp_log_buf_info. \ + p_buf_tail_idx)]. \ + data, b, wmi_record_max_length); \ + ((struct wmi_command_debug *)h->log_info. \ + wmi_command_tx_cmp_log_buf_info.buf) \ + [*(h->log_info.wmi_command_tx_cmp_log_buf_info. \ + p_buf_tail_idx)]. \ + time = qdf_get_log_timestamp(); \ + (*(h->log_info.wmi_command_tx_cmp_log_buf_info.p_buf_tail_idx))++;\ + h->log_info.wmi_command_tx_cmp_log_buf_info.length++; \ +} + +#define WMI_EVENT_RECORD(h, a, b) { \ + if (wmi_log_max_entry <= \ + *(h->log_info.wmi_event_log_buf_info.p_buf_tail_idx)) \ + *(h->log_info.wmi_event_log_buf_info.p_buf_tail_idx) = 0;\ + ((struct wmi_event_debug *)h->log_info.wmi_event_log_buf_info.buf)\ + [*(h->log_info.wmi_event_log_buf_info.p_buf_tail_idx)]. \ + event = a; \ + qdf_mem_copy(((struct wmi_event_debug *)h->log_info. \ + wmi_event_log_buf_info.buf) \ + [*(h->log_info.wmi_event_log_buf_info.p_buf_tail_idx)].data, b,\ + wmi_record_max_length); \ + ((struct wmi_event_debug *)h->log_info.wmi_event_log_buf_info.buf)\ + [*(h->log_info.wmi_event_log_buf_info.p_buf_tail_idx)].time =\ + qdf_get_log_timestamp(); \ + (*(h->log_info.wmi_event_log_buf_info.p_buf_tail_idx))++; \ + h->log_info.wmi_event_log_buf_info.length++; \ +} + +#define WMI_RX_EVENT_RECORD(h, a, b) { \ + if (wmi_log_max_entry <= \ + *(h->log_info.wmi_rx_event_log_buf_info.p_buf_tail_idx))\ + *(h->log_info.wmi_rx_event_log_buf_info.p_buf_tail_idx) = 0;\ + ((struct wmi_event_debug *)h->log_info.wmi_rx_event_log_buf_info.buf)\ + [*(h->log_info.wmi_rx_event_log_buf_info.p_buf_tail_idx)].\ + event = a; \ + qdf_mem_copy(((struct wmi_event_debug *)h->log_info. \ + wmi_rx_event_log_buf_info.buf) \ + [*(h->log_info.wmi_rx_event_log_buf_info.p_buf_tail_idx)].\ + data, b, wmi_record_max_length); \ + ((struct wmi_event_debug *)h->log_info.wmi_rx_event_log_buf_info.buf)\ + [*(h->log_info.wmi_rx_event_log_buf_info.p_buf_tail_idx)].\ + time = qdf_get_log_timestamp(); \ + (*(h->log_info.wmi_rx_event_log_buf_info.p_buf_tail_idx))++; \ + h->log_info.wmi_rx_event_log_buf_info.length++; \ +} + +#ifdef CONFIG_MCL +uint32_t g_wmi_mgmt_command_buf_idx = 0; +struct +wmi_command_debug wmi_mgmt_command_log_buffer[WMI_MGMT_EVENT_DEBUG_MAX_ENTRY]; + +/* wmi_mgmt commands TX completed */ +uint32_t g_wmi_mgmt_command_tx_cmp_buf_idx = 0; +struct wmi_command_debug +wmi_mgmt_command_tx_cmp_log_buffer[WMI_MGMT_EVENT_DEBUG_MAX_ENTRY]; + +/* wmi_mgmt events when received */ +uint32_t g_wmi_mgmt_rx_event_buf_idx = 0; +struct wmi_event_debug +wmi_mgmt_rx_event_log_buffer[WMI_MGMT_EVENT_DEBUG_MAX_ENTRY]; + +/* wmi_diag events when received */ +uint32_t g_wmi_diag_rx_event_buf_idx = 0; +struct wmi_event_debug +wmi_diag_rx_event_log_buffer[WMI_DIAG_RX_EVENT_DEBUG_MAX_ENTRY]; +#endif + +#define WMI_MGMT_COMMAND_RECORD(h, a, b) { \ + if (wmi_mgmt_log_max_entry <= \ + *(h->log_info.wmi_mgmt_command_log_buf_info.p_buf_tail_idx)) \ + *(h->log_info.wmi_mgmt_command_log_buf_info. \ + p_buf_tail_idx) = 0; \ + ((struct wmi_command_debug *)h->log_info. \ + wmi_mgmt_command_log_buf_info.buf) \ + [*(h->log_info.wmi_mgmt_command_log_buf_info.p_buf_tail_idx)].\ + command = a; \ + qdf_mem_copy(((struct wmi_command_debug *)h->log_info. \ + wmi_mgmt_command_log_buf_info.buf) \ + [*(h->log_info.wmi_mgmt_command_log_buf_info.p_buf_tail_idx)].\ + data, b, \ + wmi_record_max_length); \ + ((struct wmi_command_debug *)h->log_info. \ + wmi_mgmt_command_log_buf_info.buf) \ + [*(h->log_info.wmi_mgmt_command_log_buf_info.p_buf_tail_idx)].\ + time = qdf_get_log_timestamp(); \ + (*(h->log_info.wmi_mgmt_command_log_buf_info.p_buf_tail_idx))++;\ + h->log_info.wmi_mgmt_command_log_buf_info.length++; \ +} + +#define WMI_MGMT_COMMAND_TX_CMP_RECORD(h, a, b) { \ + if (wmi_mgmt_log_max_entry <= \ + *(h->log_info.wmi_mgmt_command_tx_cmp_log_buf_info. \ + p_buf_tail_idx)) \ + *(h->log_info.wmi_mgmt_command_tx_cmp_log_buf_info. \ + p_buf_tail_idx) = 0; \ + ((struct wmi_command_debug *)h->log_info. \ + wmi_mgmt_command_tx_cmp_log_buf_info.buf) \ + [*(h->log_info.wmi_mgmt_command_tx_cmp_log_buf_info. \ + p_buf_tail_idx)].command = a; \ + qdf_mem_copy(((struct wmi_command_debug *)h->log_info. \ + wmi_mgmt_command_tx_cmp_log_buf_info.buf)\ + [*(h->log_info.wmi_mgmt_command_tx_cmp_log_buf_info. \ + p_buf_tail_idx)].data, b, \ + wmi_record_max_length); \ + ((struct wmi_command_debug *)h->log_info. \ + wmi_mgmt_command_tx_cmp_log_buf_info.buf) \ + [*(h->log_info.wmi_mgmt_command_tx_cmp_log_buf_info. \ + p_buf_tail_idx)].time = \ + qdf_get_log_timestamp(); \ + (*(h->log_info.wmi_mgmt_command_tx_cmp_log_buf_info. \ + p_buf_tail_idx))++; \ + h->log_info.wmi_mgmt_command_tx_cmp_log_buf_info.length++; \ +} + +#define WMI_MGMT_RX_EVENT_RECORD(h, a, b) do { \ + if (wmi_mgmt_log_max_entry <= \ + *(h->log_info.wmi_mgmt_event_log_buf_info.p_buf_tail_idx))\ + *(h->log_info.wmi_mgmt_event_log_buf_info.p_buf_tail_idx) = 0;\ + ((struct wmi_event_debug *)h->log_info.wmi_mgmt_event_log_buf_info.buf)\ + [*(h->log_info.wmi_mgmt_event_log_buf_info.p_buf_tail_idx)]\ + .event = a; \ + qdf_mem_copy(((struct wmi_event_debug *)h->log_info. \ + wmi_mgmt_event_log_buf_info.buf) \ + [*(h->log_info.wmi_mgmt_event_log_buf_info.p_buf_tail_idx)].\ + data, b, wmi_record_max_length); \ + ((struct wmi_event_debug *)h->log_info.wmi_mgmt_event_log_buf_info.buf)\ + [*(h->log_info.wmi_mgmt_event_log_buf_info.p_buf_tail_idx)].\ + time = qdf_get_log_timestamp(); \ + (*(h->log_info.wmi_mgmt_event_log_buf_info.p_buf_tail_idx))++; \ + h->log_info.wmi_mgmt_event_log_buf_info.length++; \ +} while (0); + +#define WMI_DIAG_RX_EVENT_RECORD(h, a, b) do { \ + if (wmi_mgmt_log_max_entry <= \ + *(h->log_info.wmi_diag_event_log_buf_info.p_buf_tail_idx))\ + *(h->log_info.wmi_diag_event_log_buf_info.p_buf_tail_idx) = 0;\ + ((struct wmi_event_debug *)h->log_info.wmi_diag_event_log_buf_info.buf)\ + [*(h->log_info.wmi_diag_event_log_buf_info.p_buf_tail_idx)]\ + .event = a; \ + qdf_mem_copy(((struct wmi_event_debug *)h->log_info. \ + wmi_diag_event_log_buf_info.buf) \ + [*(h->log_info.wmi_diag_event_log_buf_info.p_buf_tail_idx)].\ + data, b, wmi_record_max_length); \ + ((struct wmi_event_debug *)h->log_info.wmi_diag_event_log_buf_info.buf)\ + [*(h->log_info.wmi_diag_event_log_buf_info.p_buf_tail_idx)].\ + time = qdf_get_log_timestamp(); \ + (*(h->log_info.wmi_diag_event_log_buf_info.p_buf_tail_idx))++; \ + h->log_info.wmi_diag_event_log_buf_info.length++; \ +} while (0); + +/* These are defined to made it as module param, which can be configured */ +uint32_t wmi_log_max_entry = WMI_EVENT_DEBUG_MAX_ENTRY; +uint32_t wmi_mgmt_log_max_entry = WMI_MGMT_EVENT_DEBUG_MAX_ENTRY; +uint32_t wmi_diag_log_max_entry = WMI_DIAG_RX_EVENT_DEBUG_MAX_ENTRY; +uint32_t wmi_record_max_length = WMI_EVENT_DEBUG_ENTRY_MAX_LENGTH; +uint32_t wmi_display_size = 100; + +/** + * wmi_log_init() - Initialize WMI event logging + * @wmi_handle: WMI handle. + * + * Return: Initialization status + */ +#ifdef CONFIG_MCL +static QDF_STATUS wmi_log_init(struct wmi_unified *wmi_handle) +{ + struct wmi_log_buf_t *cmd_log_buf = + &wmi_handle->log_info.wmi_command_log_buf_info; + struct wmi_log_buf_t *cmd_tx_cmpl_log_buf = + &wmi_handle->log_info.wmi_command_tx_cmp_log_buf_info; + + struct wmi_log_buf_t *event_log_buf = + &wmi_handle->log_info.wmi_event_log_buf_info; + struct wmi_log_buf_t *rx_event_log_buf = + &wmi_handle->log_info.wmi_rx_event_log_buf_info; + + struct wmi_log_buf_t *mgmt_cmd_log_buf = + &wmi_handle->log_info.wmi_mgmt_command_log_buf_info; + struct wmi_log_buf_t *mgmt_cmd_tx_cmp_log_buf = + &wmi_handle->log_info.wmi_mgmt_command_tx_cmp_log_buf_info; + struct wmi_log_buf_t *mgmt_event_log_buf = + &wmi_handle->log_info.wmi_mgmt_event_log_buf_info; + struct wmi_log_buf_t *diag_event_log_buf = + &wmi_handle->log_info.wmi_diag_event_log_buf_info; + + /* WMI commands */ + cmd_log_buf->length = 0; + cmd_log_buf->buf_tail_idx = 0; + cmd_log_buf->buf = wmi_command_log_buffer; + cmd_log_buf->p_buf_tail_idx = &g_wmi_command_buf_idx; + cmd_log_buf->size = WMI_EVENT_DEBUG_MAX_ENTRY; + + /* WMI commands TX completed */ + cmd_tx_cmpl_log_buf->length = 0; + cmd_tx_cmpl_log_buf->buf_tail_idx = 0; + cmd_tx_cmpl_log_buf->buf = wmi_command_tx_cmp_log_buffer; + cmd_tx_cmpl_log_buf->p_buf_tail_idx = &g_wmi_command_tx_cmp_buf_idx; + cmd_tx_cmpl_log_buf->size = WMI_EVENT_DEBUG_MAX_ENTRY; + + /* WMI events when processed */ + event_log_buf->length = 0; + event_log_buf->buf_tail_idx = 0; + event_log_buf->buf = wmi_event_log_buffer; + event_log_buf->p_buf_tail_idx = &g_wmi_event_buf_idx; + event_log_buf->size = WMI_EVENT_DEBUG_MAX_ENTRY; + + /* WMI events when queued */ + rx_event_log_buf->length = 0; + rx_event_log_buf->buf_tail_idx = 0; + rx_event_log_buf->buf = wmi_rx_event_log_buffer; + rx_event_log_buf->p_buf_tail_idx = &g_wmi_rx_event_buf_idx; + rx_event_log_buf->size = WMI_EVENT_DEBUG_MAX_ENTRY; + + /* WMI Management commands */ + mgmt_cmd_log_buf->length = 0; + mgmt_cmd_log_buf->buf_tail_idx = 0; + mgmt_cmd_log_buf->buf = wmi_mgmt_command_log_buffer; + mgmt_cmd_log_buf->p_buf_tail_idx = &g_wmi_mgmt_command_buf_idx; + mgmt_cmd_log_buf->size = WMI_MGMT_EVENT_DEBUG_MAX_ENTRY; + + /* WMI Management commands Tx completed*/ + mgmt_cmd_tx_cmp_log_buf->length = 0; + mgmt_cmd_tx_cmp_log_buf->buf_tail_idx = 0; + mgmt_cmd_tx_cmp_log_buf->buf = wmi_mgmt_command_tx_cmp_log_buffer; + mgmt_cmd_tx_cmp_log_buf->p_buf_tail_idx = + &g_wmi_mgmt_command_tx_cmp_buf_idx; + mgmt_cmd_tx_cmp_log_buf->size = WMI_MGMT_EVENT_DEBUG_MAX_ENTRY; + + /* WMI Management events when received */ + mgmt_event_log_buf->length = 0; + mgmt_event_log_buf->buf_tail_idx = 0; + mgmt_event_log_buf->buf = wmi_mgmt_rx_event_log_buffer; + mgmt_event_log_buf->p_buf_tail_idx = &g_wmi_mgmt_rx_event_buf_idx; + mgmt_event_log_buf->size = WMI_MGMT_EVENT_DEBUG_MAX_ENTRY; + + /* WMI diag events when received */ + diag_event_log_buf->length = 0; + diag_event_log_buf->buf_tail_idx = 0; + diag_event_log_buf->buf = wmi_diag_rx_event_log_buffer; + diag_event_log_buf->p_buf_tail_idx = &g_wmi_diag_rx_event_buf_idx; + diag_event_log_buf->size = WMI_DIAG_RX_EVENT_DEBUG_MAX_ENTRY; + + qdf_spinlock_create(&wmi_handle->log_info.wmi_record_lock); + wmi_handle->log_info.wmi_logging_enable = 1; + + return QDF_STATUS_SUCCESS; +} +#else +static QDF_STATUS wmi_log_init(struct wmi_unified *wmi_handle) +{ + struct wmi_log_buf_t *cmd_log_buf = + &wmi_handle->log_info.wmi_command_log_buf_info; + struct wmi_log_buf_t *cmd_tx_cmpl_log_buf = + &wmi_handle->log_info.wmi_command_tx_cmp_log_buf_info; + + struct wmi_log_buf_t *event_log_buf = + &wmi_handle->log_info.wmi_event_log_buf_info; + struct wmi_log_buf_t *rx_event_log_buf = + &wmi_handle->log_info.wmi_rx_event_log_buf_info; + + struct wmi_log_buf_t *mgmt_cmd_log_buf = + &wmi_handle->log_info.wmi_mgmt_command_log_buf_info; + struct wmi_log_buf_t *mgmt_cmd_tx_cmp_log_buf = + &wmi_handle->log_info.wmi_mgmt_command_tx_cmp_log_buf_info; + struct wmi_log_buf_t *mgmt_event_log_buf = + &wmi_handle->log_info.wmi_mgmt_event_log_buf_info; + struct wmi_log_buf_t *diag_event_log_buf = + &wmi_handle->log_info.wmi_diag_event_log_buf_info; + + wmi_handle->log_info.wmi_logging_enable = 0; + + /* WMI commands */ + cmd_log_buf->length = 0; + cmd_log_buf->buf_tail_idx = 0; + cmd_log_buf->buf = (struct wmi_command_debug *) qdf_mem_malloc( + wmi_log_max_entry * sizeof(struct wmi_command_debug)); + cmd_log_buf->size = wmi_log_max_entry; + + if (!cmd_log_buf->buf) { + qdf_print("no memory for WMI command log buffer..\n"); + return QDF_STATUS_E_NOMEM; + } + cmd_log_buf->p_buf_tail_idx = &cmd_log_buf->buf_tail_idx; + + /* WMI commands TX completed */ + cmd_tx_cmpl_log_buf->length = 0; + cmd_tx_cmpl_log_buf->buf_tail_idx = 0; + cmd_tx_cmpl_log_buf->buf = (struct wmi_command_debug *) qdf_mem_malloc( + wmi_log_max_entry * sizeof(struct wmi_command_debug)); + cmd_tx_cmpl_log_buf->size = wmi_log_max_entry; + + if (!cmd_tx_cmpl_log_buf->buf) { + qdf_print("no memory for WMI Command Tx Complete log buffer..\n"); + return QDF_STATUS_E_NOMEM; + } + cmd_tx_cmpl_log_buf->p_buf_tail_idx = + &cmd_tx_cmpl_log_buf->buf_tail_idx; + + /* WMI events when processed */ + event_log_buf->length = 0; + event_log_buf->buf_tail_idx = 0; + event_log_buf->buf = (struct wmi_event_debug *) qdf_mem_malloc( + wmi_log_max_entry * sizeof(struct wmi_event_debug)); + event_log_buf->size = wmi_log_max_entry; + + if (!event_log_buf->buf) { + qdf_print("no memory for WMI Event log buffer..\n"); + return QDF_STATUS_E_NOMEM; + } + event_log_buf->p_buf_tail_idx = &event_log_buf->buf_tail_idx; + + /* WMI events when queued */ + rx_event_log_buf->length = 0; + rx_event_log_buf->buf_tail_idx = 0; + rx_event_log_buf->buf = (struct wmi_event_debug *) qdf_mem_malloc( + wmi_log_max_entry * sizeof(struct wmi_event_debug)); + rx_event_log_buf->size = wmi_log_max_entry; + + if (!rx_event_log_buf->buf) { + qdf_print("no memory for WMI Event Rx log buffer..\n"); + return QDF_STATUS_E_NOMEM; + } + rx_event_log_buf->p_buf_tail_idx = &rx_event_log_buf->buf_tail_idx; + + /* WMI Management commands */ + mgmt_cmd_log_buf->length = 0; + mgmt_cmd_log_buf->buf_tail_idx = 0; + mgmt_cmd_log_buf->buf = (struct wmi_command_debug *) qdf_mem_malloc( + wmi_mgmt_log_max_entry * sizeof(struct wmi_command_debug)); + mgmt_cmd_log_buf->size = wmi_mgmt_log_max_entry; + + if (!mgmt_cmd_log_buf->buf) { + qdf_print("no memory for WMI Management Command log buffer..\n"); + return QDF_STATUS_E_NOMEM; + } + mgmt_cmd_log_buf->p_buf_tail_idx = &mgmt_cmd_log_buf->buf_tail_idx; + + /* WMI Management commands Tx completed*/ + mgmt_cmd_tx_cmp_log_buf->length = 0; + mgmt_cmd_tx_cmp_log_buf->buf_tail_idx = 0; + mgmt_cmd_tx_cmp_log_buf->buf = (struct wmi_command_debug *) + qdf_mem_malloc( + wmi_mgmt_log_max_entry * + sizeof(struct wmi_command_debug)); + mgmt_cmd_tx_cmp_log_buf->size = wmi_mgmt_log_max_entry; + + if (!mgmt_cmd_tx_cmp_log_buf->buf) { + qdf_print("no memory for WMI Management Command Tx complete log buffer..\n"); + return QDF_STATUS_E_NOMEM; + } + mgmt_cmd_tx_cmp_log_buf->p_buf_tail_idx = + &mgmt_cmd_tx_cmp_log_buf->buf_tail_idx; + + /* WMI Management events when received */ + mgmt_event_log_buf->length = 0; + mgmt_event_log_buf->buf_tail_idx = 0; + + mgmt_event_log_buf->buf = (struct wmi_event_debug *) qdf_mem_malloc( + wmi_mgmt_log_max_entry * + sizeof(struct wmi_event_debug)); + mgmt_event_log_buf->size = wmi_mgmt_log_max_entry; + + if (!mgmt_event_log_buf->buf) { + qdf_print("no memory for WMI Management Event log buffer..\n"); + return QDF_STATUS_E_NOMEM; + } + mgmt_event_log_buf->p_buf_tail_idx = &mgmt_event_log_buf->buf_tail_idx; + + /* WMI diag events when received */ + diag_event_log_buf->length = 0; + diag_event_log_buf->buf_tail_idx = 0; + + diag_event_log_buf->buf = (struct wmi_event_debug *) qdf_mem_malloc( + wmi_diag_log_max_entry * + sizeof(struct wmi_event_debug)); + diag_event_log_buf->size = wmi_diag_log_max_entry; + + if (!diag_event_log_buf->buf) { + qdf_print("no memory for WMI diag event log buffer..\n"); + return QDF_STATUS_E_NOMEM; + } + diag_event_log_buf->p_buf_tail_idx = &diag_event_log_buf->buf_tail_idx; + + qdf_spinlock_create(&wmi_handle->log_info.wmi_record_lock); + wmi_handle->log_info.wmi_logging_enable = 1; + + return QDF_STATUS_SUCCESS; +} +#endif + +/** + * wmi_log_buffer_free() - Free all dynamic allocated buffer memory for + * event logging + * @wmi_handle: WMI handle. + * + * Return: None + */ +#ifndef CONFIG_MCL +static inline void wmi_log_buffer_free(struct wmi_unified *wmi_handle) +{ + if (wmi_handle->log_info.wmi_command_log_buf_info.buf) + qdf_mem_free(wmi_handle->log_info.wmi_command_log_buf_info.buf); + if (wmi_handle->log_info.wmi_command_tx_cmp_log_buf_info.buf) + qdf_mem_free( + wmi_handle->log_info.wmi_command_tx_cmp_log_buf_info.buf); + if (wmi_handle->log_info.wmi_event_log_buf_info.buf) + qdf_mem_free(wmi_handle->log_info.wmi_event_log_buf_info.buf); + if (wmi_handle->log_info.wmi_rx_event_log_buf_info.buf) + qdf_mem_free( + wmi_handle->log_info.wmi_rx_event_log_buf_info.buf); + if (wmi_handle->log_info.wmi_mgmt_command_log_buf_info.buf) + qdf_mem_free( + wmi_handle->log_info.wmi_mgmt_command_log_buf_info.buf); + if (wmi_handle->log_info.wmi_mgmt_command_tx_cmp_log_buf_info.buf) + qdf_mem_free( + wmi_handle->log_info.wmi_mgmt_command_tx_cmp_log_buf_info.buf); + if (wmi_handle->log_info.wmi_mgmt_event_log_buf_info.buf) + qdf_mem_free( + wmi_handle->log_info.wmi_mgmt_event_log_buf_info.buf); + if (wmi_handle->log_info.wmi_diag_event_log_buf_info.buf) + qdf_mem_free( + wmi_handle->log_info.wmi_diag_event_log_buf_info.buf); + wmi_handle->log_info.wmi_logging_enable = 0; + qdf_spinlock_destroy(&wmi_handle->log_info.wmi_record_lock); +} +#else +static inline void wmi_log_buffer_free(struct wmi_unified *wmi_handle) +{ + /* Do Nothing */ +} +#endif + +/** + * wmi_print_cmd_log_buffer() - an output agnostic wmi command log printer + * @log_buffer: the command log buffer metadata of the buffer to print + * @count: the maximum number of entries to print + * @print: an abstract print method, e.g. a qdf_print() or seq_printf() wrapper + * @print_priv: any data required by the print method, e.g. a file handle + * + * Return: None + */ +static void +wmi_print_cmd_log_buffer(struct wmi_log_buf_t *log_buffer, uint32_t count, + qdf_abstract_print *print, void *print_priv) +{ + static const int data_len = + WMI_EVENT_DEBUG_ENTRY_MAX_LENGTH / sizeof(uint32_t); + char str[128]; + uint32_t idx; + + if (count > log_buffer->size) + count = log_buffer->size; + if (count > log_buffer->length) + count = log_buffer->length; + + /* subtract count from index, and wrap if necessary */ + idx = log_buffer->size + *log_buffer->p_buf_tail_idx - count; + idx %= log_buffer->size; + + print(print_priv, "Time (seconds) Cmd Id Payload"); + while (count) { + struct wmi_command_debug *cmd_log = (struct wmi_command_debug *) + &((struct wmi_command_debug *)log_buffer->buf)[idx]; + uint64_t secs, usecs; + int len = 0; + int i; + + qdf_log_timestamp_to_secs(cmd_log->time, &secs, &usecs); + len += scnprintf(str + len, sizeof(str) - len, + "% 8lld.%06lld %6u (0x%06x) ", + secs, usecs, + cmd_log->command, cmd_log->command); + for (i = 0; i < data_len; ++i) { + len += scnprintf(str + len, sizeof(str) - len, + "0x%08x ", cmd_log->data[i]); + } + + print(print_priv, str); + + --count; + ++idx; + if (idx >= log_buffer->size) + idx = 0; + } +} + +/** + * wmi_print_event_log_buffer() - an output agnostic wmi event log printer + * @log_buffer: the event log buffer metadata of the buffer to print + * @count: the maximum number of entries to print + * @print: an abstract print method, e.g. a qdf_print() or seq_printf() wrapper + * @print_priv: any data required by the print method, e.g. a file handle + * + * Return: None + */ +static void +wmi_print_event_log_buffer(struct wmi_log_buf_t *log_buffer, uint32_t count, + qdf_abstract_print *print, void *print_priv) +{ + static const int data_len = + WMI_EVENT_DEBUG_ENTRY_MAX_LENGTH / sizeof(uint32_t); + char str[128]; + uint32_t idx; + + if (count > log_buffer->size) + count = log_buffer->size; + if (count > log_buffer->length) + count = log_buffer->length; + + /* subtract count from index, and wrap if necessary */ + idx = log_buffer->size + *log_buffer->p_buf_tail_idx - count; + idx %= log_buffer->size; + + print(print_priv, "Time (seconds) Event Id Payload"); + while (count) { + struct wmi_event_debug *event_log = (struct wmi_event_debug *) + &((struct wmi_event_debug *)log_buffer->buf)[idx]; + uint64_t secs, usecs; + int len = 0; + int i; + + qdf_log_timestamp_to_secs(event_log->time, &secs, &usecs); + len += scnprintf(str + len, sizeof(str) - len, + "% 8lld.%06lld %6u (0x%06x) ", + secs, usecs, + event_log->event, event_log->event); + for (i = 0; i < data_len; ++i) { + len += scnprintf(str + len, sizeof(str) - len, + "0x%08x ", event_log->data[i]); + } + + print(print_priv, str); + + --count; + ++idx; + if (idx >= log_buffer->size) + idx = 0; + } +} + +inline void +wmi_print_cmd_log(wmi_unified_t wmi, uint32_t count, + qdf_abstract_print *print, void *print_priv) +{ + wmi_print_cmd_log_buffer( + &wmi->log_info.wmi_command_log_buf_info, + count, print, print_priv); +} + +inline void +wmi_print_cmd_tx_cmp_log(wmi_unified_t wmi, uint32_t count, + qdf_abstract_print *print, void *print_priv) +{ + wmi_print_cmd_log_buffer( + &wmi->log_info.wmi_command_tx_cmp_log_buf_info, + count, print, print_priv); +} + +inline void +wmi_print_mgmt_cmd_log(wmi_unified_t wmi, uint32_t count, + qdf_abstract_print *print, void *print_priv) +{ + wmi_print_cmd_log_buffer( + &wmi->log_info.wmi_mgmt_command_log_buf_info, + count, print, print_priv); +} + +inline void +wmi_print_mgmt_cmd_tx_cmp_log(wmi_unified_t wmi, uint32_t count, + qdf_abstract_print *print, void *print_priv) +{ + wmi_print_cmd_log_buffer( + &wmi->log_info.wmi_mgmt_command_tx_cmp_log_buf_info, + count, print, print_priv); +} + +inline void +wmi_print_event_log(wmi_unified_t wmi, uint32_t count, + qdf_abstract_print *print, void *print_priv) +{ + wmi_print_event_log_buffer( + &wmi->log_info.wmi_event_log_buf_info, + count, print, print_priv); +} + +inline void +wmi_print_rx_event_log(wmi_unified_t wmi, uint32_t count, + qdf_abstract_print *print, void *print_priv) +{ + wmi_print_event_log_buffer( + &wmi->log_info.wmi_rx_event_log_buf_info, + count, print, print_priv); +} + +inline void +wmi_print_mgmt_event_log(wmi_unified_t wmi, uint32_t count, + qdf_abstract_print *print, void *print_priv) +{ + wmi_print_event_log_buffer( + &wmi->log_info.wmi_mgmt_event_log_buf_info, + count, print, print_priv); +} + +#ifdef CONFIG_MCL +const int8_t * const debugfs_dir[MAX_WMI_INSTANCES] = {"WMI0"}; +#else +const int8_t * const debugfs_dir[MAX_WMI_INSTANCES] = {"WMI0", "WMI1", "WMI2"}; +#endif + +/* debugfs routines*/ + +/** + * debug_wmi_##func_base##_show() - debugfs functions to display content of + * command and event buffers. Macro uses max buffer length to display + * buffer when it is wraparound. + * + * @m: debugfs handler to access wmi_handle + * @v: Variable arguments (not used) + * + * Return: Length of characters printed + */ +#define GENERATE_COMMAND_DEBUG_SHOW_FUNCS(func_base, wmi_ring_size) \ + static int debug_wmi_##func_base##_show(struct seq_file *m, \ + void *v) \ + { \ + wmi_unified_t wmi_handle = (wmi_unified_t) m->private; \ + struct wmi_log_buf_t *wmi_log = \ + &wmi_handle->log_info.wmi_##func_base##_buf_info;\ + int pos, nread, outlen; \ + int i; \ + uint64_t secs, usecs; \ + \ + qdf_spin_lock(&wmi_handle->log_info.wmi_record_lock); \ + if (!wmi_log->length) { \ + qdf_spin_unlock(&wmi_handle->log_info.wmi_record_lock);\ + return wmi_bp_seq_printf(m, \ + "no elements to read from ring buffer!\n"); \ + } \ + \ + if (wmi_log->length <= wmi_ring_size) \ + nread = wmi_log->length; \ + else \ + nread = wmi_ring_size; \ + \ + if (*(wmi_log->p_buf_tail_idx) == 0) \ + /* tail can be 0 after wrap-around */ \ + pos = wmi_ring_size - 1; \ + else \ + pos = *(wmi_log->p_buf_tail_idx) - 1; \ + \ + outlen = wmi_bp_seq_printf(m, "Length = %d\n", wmi_log->length);\ + qdf_spin_unlock(&wmi_handle->log_info.wmi_record_lock); \ + while (nread--) { \ + struct wmi_command_debug *wmi_record; \ + \ + wmi_record = (struct wmi_command_debug *) \ + &(((struct wmi_command_debug *)wmi_log->buf)[pos]);\ + outlen += wmi_bp_seq_printf(m, "CMD ID = %x\n", \ + (wmi_record->command)); \ + qdf_log_timestamp_to_secs(wmi_record->time, &secs,\ + &usecs); \ + outlen += \ + wmi_bp_seq_printf(m, "CMD TIME = [%llu.%06llu]\n",\ + secs, usecs); \ + outlen += wmi_bp_seq_printf(m, "CMD = "); \ + for (i = 0; i < (wmi_record_max_length/ \ + sizeof(uint32_t)); i++) \ + outlen += wmi_bp_seq_printf(m, "%x ", \ + wmi_record->data[i]); \ + outlen += wmi_bp_seq_printf(m, "\n"); \ + \ + if (pos == 0) \ + pos = wmi_ring_size - 1; \ + else \ + pos--; \ + } \ + return outlen; \ + } \ + +#define GENERATE_EVENT_DEBUG_SHOW_FUNCS(func_base, wmi_ring_size) \ + static int debug_wmi_##func_base##_show(struct seq_file *m, \ + void *v) \ + { \ + wmi_unified_t wmi_handle = (wmi_unified_t) m->private; \ + struct wmi_log_buf_t *wmi_log = \ + &wmi_handle->log_info.wmi_##func_base##_buf_info;\ + int pos, nread, outlen; \ + int i; \ + uint64_t secs, usecs; \ + \ + qdf_spin_lock(&wmi_handle->log_info.wmi_record_lock); \ + if (!wmi_log->length) { \ + qdf_spin_unlock(&wmi_handle->log_info.wmi_record_lock);\ + return wmi_bp_seq_printf(m, \ + "no elements to read from ring buffer!\n"); \ + } \ + \ + if (wmi_log->length <= wmi_ring_size) \ + nread = wmi_log->length; \ + else \ + nread = wmi_ring_size; \ + \ + if (*(wmi_log->p_buf_tail_idx) == 0) \ + /* tail can be 0 after wrap-around */ \ + pos = wmi_ring_size - 1; \ + else \ + pos = *(wmi_log->p_buf_tail_idx) - 1; \ + \ + outlen = wmi_bp_seq_printf(m, "Length = %d\n", wmi_log->length);\ + qdf_spin_unlock(&wmi_handle->log_info.wmi_record_lock); \ + while (nread--) { \ + struct wmi_event_debug *wmi_record; \ + \ + wmi_record = (struct wmi_event_debug *) \ + &(((struct wmi_event_debug *)wmi_log->buf)[pos]);\ + qdf_log_timestamp_to_secs(wmi_record->time, &secs,\ + &usecs); \ + outlen += wmi_bp_seq_printf(m, "Event ID = %x\n",\ + (wmi_record->event)); \ + outlen += \ + wmi_bp_seq_printf(m, "Event TIME = [%llu.%06llu]\n",\ + secs, usecs); \ + outlen += wmi_bp_seq_printf(m, "CMD = "); \ + for (i = 0; i < (wmi_record_max_length/ \ + sizeof(uint32_t)); i++) \ + outlen += wmi_bp_seq_printf(m, "%x ", \ + wmi_record->data[i]); \ + outlen += wmi_bp_seq_printf(m, "\n"); \ + \ + if (pos == 0) \ + pos = wmi_ring_size - 1; \ + else \ + pos--; \ + } \ + return outlen; \ + } + +GENERATE_COMMAND_DEBUG_SHOW_FUNCS(command_log, wmi_display_size); +GENERATE_COMMAND_DEBUG_SHOW_FUNCS(command_tx_cmp_log, wmi_display_size); +GENERATE_EVENT_DEBUG_SHOW_FUNCS(event_log, wmi_display_size); +GENERATE_EVENT_DEBUG_SHOW_FUNCS(rx_event_log, wmi_display_size); +GENERATE_COMMAND_DEBUG_SHOW_FUNCS(mgmt_command_log, wmi_display_size); +GENERATE_COMMAND_DEBUG_SHOW_FUNCS(mgmt_command_tx_cmp_log, + wmi_display_size); +GENERATE_EVENT_DEBUG_SHOW_FUNCS(mgmt_event_log, wmi_display_size); + +/** + * debug_wmi_enable_show() - debugfs functions to display enable state of + * wmi logging feature. + * + * @m: debugfs handler to access wmi_handle + * @v: Variable arguments (not used) + * + * Return: always 1 + */ +static int debug_wmi_enable_show(struct seq_file *m, void *v) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) m->private; + + return wmi_bp_seq_printf(m, "%d\n", + wmi_handle->log_info.wmi_logging_enable); +} + +/** + * debug_wmi_log_size_show() - debugfs functions to display configured size of + * wmi logging command/event buffer and management command/event buffer. + * + * @m: debugfs handler to access wmi_handle + * @v: Variable arguments (not used) + * + * Return: Length of characters printed + */ +static int debug_wmi_log_size_show(struct seq_file *m, void *v) +{ + + wmi_bp_seq_printf(m, "WMI command/event log max size:%d\n", + wmi_log_max_entry); + return wmi_bp_seq_printf(m, + "WMI management command/events log max size:%d\n", + wmi_mgmt_log_max_entry); +} + +/** + * debug_wmi_##func_base##_write() - debugfs functions to clear + * wmi logging command/event buffer and management command/event buffer. + * + * @file: file handler to access wmi_handle + * @buf: received data buffer + * @count: length of received buffer + * @ppos: Not used + * + * Return: count + */ +#define GENERATE_DEBUG_WRITE_FUNCS(func_base, wmi_ring_size, wmi_record_type)\ + static ssize_t debug_wmi_##func_base##_write(struct file *file, \ + const char __user *buf, \ + size_t count, loff_t *ppos) \ + { \ + int k, ret; \ + wmi_unified_t wmi_handle = \ + ((struct seq_file *)file->private_data)->private;\ + struct wmi_log_buf_t *wmi_log = &wmi_handle->log_info. \ + wmi_##func_base##_buf_info; \ + char locbuf[50]; \ + \ + if ((!buf) || (count > 50)) \ + return -EFAULT; \ + \ + if (copy_from_user(locbuf, buf, count)) \ + return -EFAULT; \ + \ + ret = sscanf(locbuf, "%d", &k); \ + if ((ret != 1) || (k != 0)) { \ + qdf_print("Wrong input, echo 0 to clear the wmi buffer\n");\ + return -EINVAL; \ + } \ + \ + qdf_spin_lock(&wmi_handle->log_info.wmi_record_lock); \ + qdf_mem_zero(wmi_log->buf, wmi_ring_size * \ + sizeof(struct wmi_record_type)); \ + wmi_log->length = 0; \ + *(wmi_log->p_buf_tail_idx) = 0; \ + qdf_spin_unlock(&wmi_handle->log_info.wmi_record_lock); \ + \ + return count; \ + } + +GENERATE_DEBUG_WRITE_FUNCS(command_log, wmi_log_max_entry, + wmi_command_debug); +GENERATE_DEBUG_WRITE_FUNCS(command_tx_cmp_log, wmi_log_max_entry, + wmi_command_debug); +GENERATE_DEBUG_WRITE_FUNCS(event_log, wmi_log_max_entry, + wmi_event_debug); +GENERATE_DEBUG_WRITE_FUNCS(rx_event_log, wmi_log_max_entry, + wmi_event_debug); +GENERATE_DEBUG_WRITE_FUNCS(mgmt_command_log, wmi_mgmt_log_max_entry, + wmi_command_debug); +GENERATE_DEBUG_WRITE_FUNCS(mgmt_command_tx_cmp_log, + wmi_mgmt_log_max_entry, wmi_command_debug); +GENERATE_DEBUG_WRITE_FUNCS(mgmt_event_log, wmi_mgmt_log_max_entry, + wmi_event_debug); + +/** + * debug_wmi_enable_write() - debugfs functions to enable/disable + * wmi logging feature. + * + * @file: file handler to access wmi_handle + * @buf: received data buffer + * @count: length of received buffer + * @ppos: Not used + * + * Return: count + */ +static ssize_t debug_wmi_enable_write(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + wmi_unified_t wmi_handle = + ((struct seq_file *)file->private_data)->private; + int k, ret; + char locbuf[50]; + + if ((!buf) || (count > 50)) + return -EFAULT; + + if (copy_from_user(locbuf, buf, count)) + return -EFAULT; + + ret = sscanf(locbuf, "%d", &k); + if ((ret != 1) || ((k != 0) && (k != 1))) + return -EINVAL; + + wmi_handle->log_info.wmi_logging_enable = k; + return count; +} + +/** + * debug_wmi_log_size_write() - reserved. + * + * @file: file handler to access wmi_handle + * @buf: received data buffer + * @count: length of received buffer + * @ppos: Not used + * + * Return: count + */ +static ssize_t debug_wmi_log_size_write(struct file *file, + const char __user *buf, size_t count, loff_t *ppos) +{ + return -EINVAL; +} + +/* Structure to maintain debug information */ +struct wmi_debugfs_info { + const char *name; + struct dentry *de[MAX_WMI_INSTANCES]; + const struct file_operations *ops; +}; + +#define DEBUG_FOO(func_base) { .name = #func_base, \ + .ops = &debug_##func_base##_ops } + +/** + * debug_##func_base##_open() - Open debugfs entry for respective command + * and event buffer. + * + * @inode: node for debug dir entry + * @file: file handler + * + * Return: open status + */ +#define GENERATE_DEBUG_STRUCTS(func_base) \ + static int debug_##func_base##_open(struct inode *inode, \ + struct file *file) \ + { \ + return single_open(file, debug_##func_base##_show, \ + inode->i_private); \ + } \ + \ + \ + static struct file_operations debug_##func_base##_ops = { \ + .open = debug_##func_base##_open, \ + .read = seq_read, \ + .llseek = seq_lseek, \ + .write = debug_##func_base##_write, \ + .release = single_release, \ + }; + +GENERATE_DEBUG_STRUCTS(wmi_command_log); +GENERATE_DEBUG_STRUCTS(wmi_command_tx_cmp_log); +GENERATE_DEBUG_STRUCTS(wmi_event_log); +GENERATE_DEBUG_STRUCTS(wmi_rx_event_log); +GENERATE_DEBUG_STRUCTS(wmi_mgmt_command_log); +GENERATE_DEBUG_STRUCTS(wmi_mgmt_command_tx_cmp_log); +GENERATE_DEBUG_STRUCTS(wmi_mgmt_event_log); +GENERATE_DEBUG_STRUCTS(wmi_enable); +GENERATE_DEBUG_STRUCTS(wmi_log_size); + +struct wmi_debugfs_info wmi_debugfs_infos[] = { + DEBUG_FOO(wmi_command_log), + DEBUG_FOO(wmi_command_tx_cmp_log), + DEBUG_FOO(wmi_event_log), + DEBUG_FOO(wmi_rx_event_log), + DEBUG_FOO(wmi_mgmt_command_log), + DEBUG_FOO(wmi_mgmt_command_tx_cmp_log), + DEBUG_FOO(wmi_mgmt_event_log), + DEBUG_FOO(wmi_enable), + DEBUG_FOO(wmi_log_size), +}; + +#define NUM_DEBUG_INFOS (sizeof(wmi_debugfs_infos) / \ + sizeof(wmi_debugfs_infos[0])) + +/** + * wmi_debugfs_create() - Create debug_fs entry for wmi logging. + * + * @wmi_handle: wmi handle + * @par_entry: debug directory entry + * @id: Index to debug info data array + * + * Return: none + */ +static void wmi_debugfs_create(wmi_unified_t wmi_handle, + struct dentry *par_entry, int id) +{ + int i; + + if (par_entry == NULL || (id < 0) || (id >= MAX_WMI_INSTANCES)) + goto out; + + for (i = 0; i < NUM_DEBUG_INFOS; ++i) { + + wmi_debugfs_infos[i].de[id] = debugfs_create_file( + wmi_debugfs_infos[i].name, 0644, par_entry, + wmi_handle, wmi_debugfs_infos[i].ops); + + if (wmi_debugfs_infos[i].de[id] == NULL) { + qdf_print("%s: debug Entry creation failed!\n", + __func__); + goto out; + } + } + + return; + +out: + qdf_print("%s: debug Entry creation failed!\n", __func__); + wmi_log_buffer_free(wmi_handle); + return; +} + +/** + * wmi_debugfs_remove() - Remove debugfs entry for wmi logging. + * @wmi_handle: wmi handle + * @dentry: debugfs directory entry + * @id: Index to debug info data array + * + * Return: none + */ +static void wmi_debugfs_remove(wmi_unified_t wmi_handle) +{ + int i; + struct dentry *dentry = wmi_handle->log_info.wmi_log_debugfs_dir; + int id; + + if (!wmi_handle->log_info.wmi_instance_id) + return; + + id = wmi_handle->log_info.wmi_instance_id - 1; + if (dentry && (!(id < 0) || (id >= MAX_WMI_INSTANCES))) { + for (i = 0; i < NUM_DEBUG_INFOS; ++i) { + if (wmi_debugfs_infos[i].de[id]) + wmi_debugfs_infos[i].de[id] = NULL; + } + } + + if (dentry) + debugfs_remove_recursive(dentry); + + if (wmi_handle->log_info.wmi_instance_id) + wmi_handle->log_info.wmi_instance_id--; +} + +/** + * wmi_debugfs_init() - debugfs functions to create debugfs directory and to + * create debugfs enteries. + * + * @h: wmi handler + * + * Return: init status + */ +static QDF_STATUS wmi_debugfs_init(wmi_unified_t wmi_handle) +{ + int wmi_index = wmi_handle->log_info.wmi_instance_id; + + if (wmi_index < MAX_WMI_INSTANCES) { + wmi_handle->log_info.wmi_log_debugfs_dir = + debugfs_create_dir(debugfs_dir[wmi_index], NULL); + + if (!wmi_handle->log_info.wmi_log_debugfs_dir) { + qdf_print("error while creating debugfs dir for %s\n", + debugfs_dir[wmi_index]); + return QDF_STATUS_E_FAILURE; + } + + wmi_debugfs_create(wmi_handle, + wmi_handle->log_info.wmi_log_debugfs_dir, + wmi_index); + wmi_handle->log_info.wmi_instance_id++; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * wmi_mgmt_cmd_record() - Wrapper function for mgmt command logging macro + * + * @wmi_handle: wmi handle + * @cmd: mgmt command + * @header: pointer to 802.11 header + * @vdev_id: vdev id + * @chanfreq: channel frequency + * + * Return: none + */ +void wmi_mgmt_cmd_record(wmi_unified_t wmi_handle, uint32_t cmd, + void *header, uint32_t vdev_id, uint32_t chanfreq) +{ + + uint32_t data[CUSTOM_MGMT_CMD_DATA_SIZE]; + + data[0] = ((struct wmi_command_header *)header)->type; + data[1] = ((struct wmi_command_header *)header)->sub_type; + data[2] = vdev_id; + data[3] = chanfreq; + + qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock); + + WMI_MGMT_COMMAND_RECORD(wmi_handle, cmd, (uint8_t *)data); + + qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock); +} +#else +/** + * wmi_debugfs_remove() - Remove debugfs entry for wmi logging. + * @wmi_handle: wmi handle + * @dentry: debugfs directory entry + * @id: Index to debug info data array + * + * Return: none + */ +static void wmi_debugfs_remove(wmi_unified_t wmi_handle) { } +void wmi_mgmt_cmd_record(wmi_unified_t wmi_handle, uint32_t cmd, + void *header, uint32_t vdev_id, uint32_t chanfreq) { } +static inline void wmi_log_buffer_free(struct wmi_unified *wmi_handle) { } +#endif /*WMI_INTERFACE_EVENT_LOGGING */ +qdf_export_symbol(wmi_mgmt_cmd_record); + +int wmi_get_host_credits(wmi_unified_t wmi_handle); +/* WMI buffer APIs */ + +#ifdef NBUF_MEMORY_DEBUG +wmi_buf_t +wmi_buf_alloc_debug(wmi_unified_t wmi_handle, uint16_t len, uint8_t *file_name, + uint32_t line_num) +{ + wmi_buf_t wmi_buf; + + if (roundup(len + WMI_MIN_HEAD_ROOM, 4) > wmi_handle->max_msg_len) { + QDF_ASSERT(0); + return NULL; + } + + wmi_buf = wbuff_buff_get(wmi_handle->wbuff_handle, len, file_name, + line_num); + if (!wmi_buf) + wmi_buf = qdf_nbuf_alloc_debug(NULL, + roundup(len + WMI_MIN_HEAD_ROOM, + 4), + WMI_MIN_HEAD_ROOM, 4, false, + file_name, line_num); + + if (!wmi_buf) + return NULL; + + /* Clear the wmi buffer */ + OS_MEMZERO(qdf_nbuf_data(wmi_buf), len); + + /* + * Set the length of the buffer to match the allocation size. + */ + qdf_nbuf_set_pktlen(wmi_buf, len); + + return wmi_buf; +} +qdf_export_symbol(wmi_buf_alloc_debug); + +void wmi_buf_free(wmi_buf_t net_buf) +{ + net_buf = wbuff_buff_put(net_buf); + if (net_buf) + qdf_nbuf_free(net_buf); +} +qdf_export_symbol(wmi_buf_free); +#else +wmi_buf_t wmi_buf_alloc_fl(wmi_unified_t wmi_handle, uint32_t len, + const char *func, uint32_t line) +{ + wmi_buf_t wmi_buf; + + if (roundup(len + WMI_MIN_HEAD_ROOM, 4) > wmi_handle->max_msg_len) { + wmi_nofl_err("%s:%d, Invalid len:%d", func, line, len); + QDF_DEBUG_PANIC(); + return NULL; + } + + wmi_buf = wbuff_buff_get(wmi_handle->wbuff_handle, len, __FILE__, + __LINE__); + if (!wmi_buf) + wmi_buf = qdf_nbuf_alloc_fl(NULL, roundup(len + + WMI_MIN_HEAD_ROOM, 4), WMI_MIN_HEAD_ROOM, 4, + false, func, line); + + if (!wmi_buf) + return NULL; + + /* Clear the wmi buffer */ + OS_MEMZERO(qdf_nbuf_data(wmi_buf), len); + + /* + * Set the length of the buffer to match the allocation size. + */ + qdf_nbuf_set_pktlen(wmi_buf, len); + return wmi_buf; +} +qdf_export_symbol(wmi_buf_alloc_fl); + +void wmi_buf_free(wmi_buf_t net_buf) +{ + net_buf = wbuff_buff_put(net_buf); + if (net_buf) + qdf_nbuf_free(net_buf); +} +qdf_export_symbol(wmi_buf_free); +#endif + +/** + * wmi_get_max_msg_len() - get maximum WMI message length + * @wmi_handle: WMI handle. + * + * This function returns the maximum WMI message length + * + * Return: maximum WMI message length + */ +uint16_t wmi_get_max_msg_len(wmi_unified_t wmi_handle) +{ + return wmi_handle->max_msg_len - WMI_MIN_HEAD_ROOM; +} +qdf_export_symbol(wmi_get_max_msg_len); + +#ifndef WMI_CMD_STRINGS +#ifdef WLAN_DEBUG +static uint8_t *wmi_id_to_name(uint32_t wmi_command) +{ + return "Invalid WMI cmd"; +} +#endif + +#endif + +#ifdef CONFIG_MCL +static inline void wmi_log_cmd_id(uint32_t cmd_id, uint32_t tag) +{ + WMI_LOGD("Send WMI command:%s command_id:%d htc_tag:%d\n", + wmi_id_to_name(cmd_id), cmd_id, tag); +} + +/** + * wmi_is_pm_resume_cmd() - check if a cmd is part of the resume sequence + * @cmd_id: command to check + * + * Return: true if the command is part of the resume sequence. + */ +static bool wmi_is_pm_resume_cmd(uint32_t cmd_id) +{ + switch (cmd_id) { + case WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID: + case WMI_PDEV_RESUME_CMDID: + return true; + + default: + return false; + } +} +#else +static bool wmi_is_pm_resume_cmd(uint32_t cmd_id) +{ + return false; +} +#endif + +/** + * wmi_unified_cmd_send() - WMI command API + * @wmi_handle: handle to wmi + * @buf: wmi buf + * @len: wmi buffer length + * @cmd_id: wmi command id + * + * Note, it is NOT safe to access buf after calling this function! + * + * Return: 0 on success + */ +QDF_STATUS wmi_unified_cmd_send(wmi_unified_t wmi_handle, wmi_buf_t buf, + uint32_t len, uint32_t cmd_id) +{ + HTC_PACKET *pkt; + QDF_STATUS status; + uint16_t htc_tag = 0; + + if (wmi_get_runtime_pm_inprogress(wmi_handle)) { + htc_tag = + (uint16_t)wmi_handle->ops->wmi_set_htc_tx_tag( + wmi_handle, buf, cmd_id); + } else if (qdf_atomic_read(&wmi_handle->is_target_suspended) && + (!wmi_is_pm_resume_cmd(cmd_id))) { + QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_ERROR, + "%s: Target is suspended", __func__); + if (!wmi_handle->wmi_stopinprogress) + QDF_ASSERT(0); + return QDF_STATUS_E_BUSY; + } + if (wmi_handle->wmi_stopinprogress) { + QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_ERROR, + "WMI stop in progress\n"); + return QDF_STATUS_E_INVAL; + } + +#ifndef WMI_NON_TLV_SUPPORT + /* Do sanity check on the TLV parameter structure */ + if (wmi_handle->target_type == WMI_TLV_TARGET) { + void *buf_ptr = (void *)qdf_nbuf_data(buf); + + if (wmi_handle->ops->wmi_check_command_params(NULL, buf_ptr, len, cmd_id) + != 0) { + QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_ERROR, + "\nERROR: %s: Invalid WMI Param Buffer for Cmd:%d", + __func__, cmd_id); + return QDF_STATUS_E_INVAL; + } + } +#endif + + if (qdf_nbuf_push_head(buf, sizeof(WMI_CMD_HDR)) == NULL) { + QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_ERROR, + "%s, Failed to send cmd %x, no memory", + __func__, cmd_id); + return QDF_STATUS_E_NOMEM; + } + + qdf_mem_zero(qdf_nbuf_data(buf), sizeof(WMI_CMD_HDR)); + WMI_SET_FIELD(qdf_nbuf_data(buf), WMI_CMD_HDR, COMMANDID, cmd_id); + + qdf_atomic_inc(&wmi_handle->pending_cmds); + if (qdf_atomic_read(&wmi_handle->pending_cmds) >= + wmi_handle->wmi_max_cmds) { + QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_ERROR, + "\n%s: hostcredits = %d", __func__, + wmi_get_host_credits(wmi_handle)); + htc_dump_counter_info(wmi_handle->htc_handle); + qdf_atomic_dec(&wmi_handle->pending_cmds); + QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_ERROR, + "%s: MAX %d WMI Pending cmds reached.", __func__, + wmi_handle->wmi_max_cmds); + qdf_trigger_self_recovery(); + return QDF_STATUS_E_BUSY; + } + + pkt = qdf_mem_malloc(sizeof(*pkt)); + if (!pkt) { + qdf_atomic_dec(&wmi_handle->pending_cmds); + QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_ERROR, + "%s, Failed to alloc htc packet %x, no memory", + __func__, cmd_id); + return QDF_STATUS_E_NOMEM; + } + + SET_HTC_PACKET_INFO_TX(pkt, + NULL, + qdf_nbuf_data(buf), len + sizeof(WMI_CMD_HDR), + wmi_handle->wmi_endpoint_id, htc_tag); + + SET_HTC_PACKET_NET_BUF_CONTEXT(pkt, buf); +#ifdef CONFIG_MCL + wmi_log_cmd_id(cmd_id, htc_tag); +#endif + +#ifdef WMI_INTERFACE_EVENT_LOGGING + if (wmi_handle->log_info.wmi_logging_enable) { + qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock); + /* + * Record 16 bytes of WMI cmd data - + * exclude TLV and WMI headers + * + * WMI mgmt command already recorded in wmi_mgmt_cmd_record + */ + if (wmi_handle->ops->is_management_record(cmd_id) == false) { + WMI_COMMAND_RECORD(wmi_handle, cmd_id, + qdf_nbuf_data(buf) + + wmi_handle->log_info.buf_offset_command); + } + qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock); + } +#endif + + status = htc_send_pkt(wmi_handle->htc_handle, pkt); + + if (QDF_STATUS_SUCCESS != status) { + qdf_atomic_dec(&wmi_handle->pending_cmds); + QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_ERROR, + "%s %d, htc_send_pkt failed", __func__, __LINE__); + qdf_mem_free(pkt); + return status; + } + + return QDF_STATUS_SUCCESS; +} +qdf_export_symbol(wmi_unified_cmd_send); + +/** + * wmi_unified_get_event_handler_ix() - gives event handler's index + * @wmi_handle: handle to wmi + * @event_id: wmi event id + * + * Return: event handler's index + */ +static int wmi_unified_get_event_handler_ix(wmi_unified_t wmi_handle, + uint32_t event_id) +{ + uint32_t idx = 0; + int32_t invalid_idx = -1; + struct wmi_soc *soc = wmi_handle->soc; + + for (idx = 0; (idx < soc->max_event_idx && + idx < WMI_UNIFIED_MAX_EVENT); ++idx) { + if (wmi_handle->event_id[idx] == event_id && + wmi_handle->event_handler[idx] != NULL) { + return idx; + } + } + + return invalid_idx; +} + +/** + * wmi_unified_register_event() - register wmi event handler + * @wmi_handle: handle to wmi + * @event_id: wmi event id + * @handler_func: wmi event handler function + * + * Return: 0 on success + */ +int wmi_unified_register_event(wmi_unified_t wmi_handle, + uint32_t event_id, + wmi_unified_event_handler handler_func) +{ + uint32_t idx = 0; + uint32_t evt_id; + struct wmi_soc *soc = wmi_handle->soc; + + if (event_id >= wmi_events_max || + wmi_handle->wmi_events[event_id] == WMI_EVENT_ID_INVALID) { + qdf_print("%s: Event id %d is unavailable\n", + __func__, event_id); + return QDF_STATUS_E_FAILURE; + } + evt_id = wmi_handle->wmi_events[event_id]; + if (wmi_unified_get_event_handler_ix(wmi_handle, evt_id) != -1) { + qdf_print("%s : event handler already registered 0x%x\n", + __func__, evt_id); + return QDF_STATUS_E_FAILURE; + } + if (soc->max_event_idx == WMI_UNIFIED_MAX_EVENT) { + qdf_print("%s : no more event handlers 0x%x\n", + __func__, evt_id); + return QDF_STATUS_E_FAILURE; + } + idx = soc->max_event_idx; + wmi_handle->event_handler[idx] = handler_func; + wmi_handle->event_id[idx] = evt_id; + qdf_spin_lock_bh(&soc->ctx_lock); + wmi_handle->ctx[idx] = WMI_RX_UMAC_CTX; + qdf_spin_unlock_bh(&soc->ctx_lock); + soc->max_event_idx++; + + return 0; +} + +/** + * wmi_unified_register_event_handler() - register wmi event handler + * @wmi_handle: handle to wmi + * @event_id: wmi event id + * @handler_func: wmi event handler function + * @rx_ctx: rx execution context for wmi rx events + * + * This API is to support legacy requirements. Will be deprecated in future. + * Return: 0 on success + */ +int wmi_unified_register_event_handler(wmi_unified_t wmi_handle, + wmi_conv_event_id event_id, + wmi_unified_event_handler handler_func, + uint8_t rx_ctx) +{ + uint32_t idx = 0; + uint32_t evt_id; + struct wmi_soc *soc = wmi_handle->soc; + + if (event_id >= wmi_events_max || + wmi_handle->wmi_events[event_id] == WMI_EVENT_ID_INVALID) { + qdf_print("%s: Event id %d is unavailable\n", + __func__, event_id); + return QDF_STATUS_E_FAILURE; + } + evt_id = wmi_handle->wmi_events[event_id]; + + if (wmi_unified_get_event_handler_ix(wmi_handle, evt_id) != -1) { + qdf_print("%s : event handler already registered 0x%x\n", + __func__, evt_id); + return QDF_STATUS_E_FAILURE; + } + if (soc->max_event_idx == WMI_UNIFIED_MAX_EVENT) { + qdf_print("%s : no more event handlers 0x%x\n", + __func__, evt_id); + return QDF_STATUS_E_FAILURE; + } + QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_DEBUG, + "Registered event handler for event 0x%8x\n", evt_id); + idx = soc->max_event_idx; + wmi_handle->event_handler[idx] = handler_func; + wmi_handle->event_id[idx] = evt_id; + qdf_spin_lock_bh(&soc->ctx_lock); + wmi_handle->ctx[idx] = rx_ctx; + qdf_spin_unlock_bh(&soc->ctx_lock); + soc->max_event_idx++; + + return 0; +} +qdf_export_symbol(wmi_unified_register_event_handler); + +/** + * wmi_unified_unregister_event() - unregister wmi event handler + * @wmi_handle: handle to wmi + * @event_id: wmi event id + * + * Return: 0 on success + */ +int wmi_unified_unregister_event(wmi_unified_t wmi_handle, + uint32_t event_id) +{ + uint32_t idx = 0; + uint32_t evt_id; + struct wmi_soc *soc = wmi_handle->soc; + + if (event_id >= wmi_events_max || + wmi_handle->wmi_events[event_id] == WMI_EVENT_ID_INVALID) { + qdf_print("%s: Event id %d is unavailable\n", + __func__, event_id); + return QDF_STATUS_E_FAILURE; + } + evt_id = wmi_handle->wmi_events[event_id]; + + idx = wmi_unified_get_event_handler_ix(wmi_handle, evt_id); + if (idx == -1) { + qdf_print("%s : event handler is not registered: evt id 0x%x\n", + __func__, evt_id); + return QDF_STATUS_E_FAILURE; + } + wmi_handle->event_handler[idx] = NULL; + wmi_handle->event_id[idx] = 0; + --soc->max_event_idx; + wmi_handle->event_handler[idx] = + wmi_handle->event_handler[soc->max_event_idx]; + wmi_handle->event_id[idx] = + wmi_handle->event_id[soc->max_event_idx]; + + return 0; +} + +/** + * wmi_unified_unregister_event_handler() - unregister wmi event handler + * @wmi_handle: handle to wmi + * @event_id: wmi event id + * + * Return: 0 on success + */ +int wmi_unified_unregister_event_handler(wmi_unified_t wmi_handle, + wmi_conv_event_id event_id) +{ + uint32_t idx = 0; + uint32_t evt_id; + struct wmi_soc *soc = wmi_handle->soc; + + if (event_id >= wmi_events_max || + wmi_handle->wmi_events[event_id] == WMI_EVENT_ID_INVALID) { + qdf_print("%s: Event id %d is unavailable\n", + __func__, event_id); + return QDF_STATUS_E_FAILURE; + } + evt_id = wmi_handle->wmi_events[event_id]; + + idx = wmi_unified_get_event_handler_ix(wmi_handle, evt_id); + if (idx == -1) { + qdf_print("%s : event handler is not registered: evt id 0x%x\n", + __func__, evt_id); + return QDF_STATUS_E_FAILURE; + } + wmi_handle->event_handler[idx] = NULL; + wmi_handle->event_id[idx] = 0; + --soc->max_event_idx; + wmi_handle->event_handler[idx] = + wmi_handle->event_handler[soc->max_event_idx]; + wmi_handle->event_id[idx] = + wmi_handle->event_id[soc->max_event_idx]; + + return 0; +} +qdf_export_symbol(wmi_unified_unregister_event_handler); + +/** + * wmi_process_fw_event_default_ctx() - process in default caller context + * @wmi_handle: handle to wmi + * @htc_packet: pointer to htc packet + * @exec_ctx: execution context for wmi fw event + * + * Event process by below function will be in default caller context. + * wmi internally provides rx work thread processing context. + * + * Return: none + */ +static void wmi_process_fw_event_default_ctx(struct wmi_unified *wmi_handle, + HTC_PACKET *htc_packet, uint8_t exec_ctx) +{ + wmi_buf_t evt_buf; + evt_buf = (wmi_buf_t) htc_packet->pPktContext; + +#ifndef CONFIG_MCL + wmi_handle->rx_ops.wma_process_fw_event_handler_cbk + (wmi_handle->scn_handle, evt_buf, exec_ctx); +#else + wmi_handle->rx_ops.wma_process_fw_event_handler_cbk(wmi_handle, + htc_packet, exec_ctx); +#endif + + return; +} + +/** + * wmi_process_fw_event_worker_thread_ctx() - process in worker thread context + * @wmi_handle: handle to wmi + * @htc_packet: pointer to htc packet + * + * Event process by below function will be in worker thread context. + * Use this method for events which are not critical and not + * handled in protocol stack. + * + * Return: none + */ +void wmi_process_fw_event_worker_thread_ctx(struct wmi_unified *wmi_handle, + HTC_PACKET *htc_packet) +{ + wmi_buf_t evt_buf; + + evt_buf = (wmi_buf_t) htc_packet->pPktContext; + + qdf_spin_lock_bh(&wmi_handle->eventq_lock); + qdf_nbuf_queue_add(&wmi_handle->event_queue, evt_buf); + qdf_spin_unlock_bh(&wmi_handle->eventq_lock); + qdf_queue_work(0, wmi_handle->wmi_rx_work_queue, + &wmi_handle->rx_event_work); + + return; +} +qdf_export_symbol(wmi_process_fw_event_worker_thread_ctx); + +/** + * wmi_get_pdev_ep: Get wmi handle based on endpoint + * @soc: handle to wmi soc + * @ep: endpoint id + * + * Return: none + */ +static struct wmi_unified *wmi_get_pdev_ep(struct wmi_soc *soc, + HTC_ENDPOINT_ID ep) +{ + uint32_t i; + + for (i = 0; i < WMI_MAX_RADIOS; i++) + if (soc->wmi_endpoint_id[i] == ep) + break; + + if (i == WMI_MAX_RADIOS) + return NULL; + + return soc->wmi_pdev[i]; +} + +/** + * wmi_mtrace_rx() - Wrappper function for qdf_mtrace api + * @message_id: 32-Bit Wmi message ID + * @vdev_id: Vdev ID + * @data: Actual message contents + * + * This function converts the 32-bit WMI message ID in 15-bit message ID + * format for qdf_mtrace as in qdf_mtrace message there are only 15 + * bits reserved for message ID. + * out of these 15-bits, 8-bits (From LSB) specifies the WMI_GRP_ID + * and remaining 7-bits specifies the actual WMI command. With this + * notation there can be maximum 256 groups and each group can have + * max 128 commands can be supported. + * + * Return: None + */ +static void wmi_mtrace_rx(uint32_t message_id, uint16_t vdev_id, uint32_t data) +{ + uint16_t mtrace_message_id; + + mtrace_message_id = QDF_WMI_MTRACE_CMD_ID(message_id) | + (QDF_WMI_MTRACE_GRP_ID(message_id) << + QDF_WMI_MTRACE_CMD_NUM_BITS); + qdf_mtrace(QDF_MODULE_ID_WMI, QDF_MODULE_ID_WMA, + mtrace_message_id, vdev_id, data); +} + +/** + * wmi_control_rx() - process fw events callbacks + * @ctx: handle to wmi + * @htc_packet: pointer to htc packet + * + * Return: none + */ +static void wmi_control_rx(void *ctx, HTC_PACKET *htc_packet) +{ + struct wmi_soc *soc = (struct wmi_soc *) ctx; + struct wmi_unified *wmi_handle; + wmi_buf_t evt_buf; + uint32_t id; + uint32_t idx = 0; + enum wmi_rx_exec_ctx exec_ctx; + + evt_buf = (wmi_buf_t) htc_packet->pPktContext; + + wmi_handle = wmi_get_pdev_ep(soc, htc_packet->Endpoint); + if (wmi_handle == NULL) { + qdf_print + ("%s :unable to get wmi_handle to Endpoint %d\n", + __func__, htc_packet->Endpoint); + qdf_nbuf_free(evt_buf); + return; + } + + id = WMI_GET_FIELD(qdf_nbuf_data(evt_buf), WMI_CMD_HDR, COMMANDID); + idx = wmi_unified_get_event_handler_ix(wmi_handle, id); + if (qdf_unlikely(idx == A_ERROR)) { + WMI_LOGD("%s :event handler is not registered: event id 0x%x\n", + __func__, id); + qdf_nbuf_free(evt_buf); + return; + } + wmi_mtrace_rx(id, 0xFF, idx); + qdf_spin_lock_bh(&soc->ctx_lock); + exec_ctx = wmi_handle->ctx[idx]; + qdf_spin_unlock_bh(&soc->ctx_lock); + +#ifdef WMI_INTERFACE_EVENT_LOGGING + if (wmi_handle->log_info.wmi_logging_enable) { + uint8_t *data; + data = qdf_nbuf_data(evt_buf); + + qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock); + /* Exclude 4 bytes of TLV header */ + if (wmi_handle->ops->is_diag_event(id)) { + WMI_DIAG_RX_EVENT_RECORD(wmi_handle, id, + ((uint8_t *) data + + wmi_handle->log_info.buf_offset_event)); + } else if (wmi_handle->ops->is_management_record(id)) { + WMI_MGMT_RX_EVENT_RECORD(wmi_handle, id, + ((uint8_t *) data + + wmi_handle->log_info.buf_offset_event)); + } else { + WMI_RX_EVENT_RECORD(wmi_handle, id, ((uint8_t *) data + + wmi_handle->log_info.buf_offset_event)); + } + qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock); + } +#endif + + if (exec_ctx == WMI_RX_WORK_CTX) { + wmi_process_fw_event_worker_thread_ctx + (wmi_handle, htc_packet); + } else if (exec_ctx > WMI_RX_WORK_CTX) { + wmi_process_fw_event_default_ctx + (wmi_handle, htc_packet, exec_ctx); + } else { + qdf_print("%s :Invalid event context %d\n", __func__, exec_ctx); + qdf_nbuf_free(evt_buf); + } + +} + +/** + * wmi_process_fw_event() - process any fw event + * @wmi_handle: wmi handle + * @evt_buf: fw event buffer + * + * This function process fw event in caller context + * + * Return: none + */ +void wmi_process_fw_event(struct wmi_unified *wmi_handle, wmi_buf_t evt_buf) +{ + __wmi_control_rx(wmi_handle, evt_buf); +} + +/** + * __wmi_control_rx() - process serialize wmi event callback + * @wmi_handle: wmi handle + * @evt_buf: fw event buffer + * + * Return: none + */ +void __wmi_control_rx(struct wmi_unified *wmi_handle, wmi_buf_t evt_buf) +{ + uint32_t id; + uint8_t *data; + uint32_t len; + void *wmi_cmd_struct_ptr = NULL; +#ifndef WMI_NON_TLV_SUPPORT + int tlv_ok_status = 0; +#endif + uint32_t idx = 0; + + id = WMI_GET_FIELD(qdf_nbuf_data(evt_buf), WMI_CMD_HDR, COMMANDID); + + if (qdf_nbuf_pull_head(evt_buf, sizeof(WMI_CMD_HDR)) == NULL) + goto end; + + data = qdf_nbuf_data(evt_buf); + len = qdf_nbuf_len(evt_buf); + +#ifndef WMI_NON_TLV_SUPPORT + if (wmi_handle->target_type == WMI_TLV_TARGET) { + /* Validate and pad(if necessary) the TLVs */ + tlv_ok_status = + wmi_handle->ops->wmi_check_and_pad_event(wmi_handle->scn_handle, + data, len, id, + &wmi_cmd_struct_ptr); + if (tlv_ok_status != 0) { + QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_ERROR, + "%s: Error: id=0x%d, wmitlv check status=%d\n", + __func__, id, tlv_ok_status); + goto end; + } + } +#endif + + idx = wmi_unified_get_event_handler_ix(wmi_handle, id); + if (idx == A_ERROR) { + QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_ERROR, + "%s : event handler is not registered: event id 0x%x\n", + __func__, id); + goto end; + } +#ifdef WMI_INTERFACE_EVENT_LOGGING + if (wmi_handle->log_info.wmi_logging_enable) { + qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock); + /* Exclude 4 bytes of TLV header */ + if (wmi_handle->ops->is_diag_event(id)) { + /* + * skip diag event logging in WMI event buffer + * as its already logged in WMI RX event buffer + */ + } else if (wmi_handle->ops->is_management_record(id)) { + /* + * skip wmi mgmt event logging in WMI event buffer + * as its already logged in WMI RX event buffer + */ + } else { + WMI_EVENT_RECORD(wmi_handle, id, ((uint8_t *) data + + wmi_handle->log_info.buf_offset_event)); + } + qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock); + } +#endif + /* Call the WMI registered event handler */ + if (wmi_handle->target_type == WMI_TLV_TARGET) + wmi_handle->event_handler[idx] (wmi_handle->scn_handle, + wmi_cmd_struct_ptr, len); + else + wmi_handle->event_handler[idx] (wmi_handle->scn_handle, + data, len); + +end: + /* Free event buffer and allocated event tlv */ +#ifndef WMI_NON_TLV_SUPPORT + if (wmi_handle->target_type == WMI_TLV_TARGET) + wmi_handle->ops->wmi_free_allocated_event(id, &wmi_cmd_struct_ptr); +#endif + + qdf_nbuf_free(evt_buf); + +} + +#define WMI_WQ_WD_TIMEOUT (30 * 1000) /* 30s */ + +static inline void wmi_workqueue_watchdog_warn(uint32_t msg_type_id) +{ + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "%s: WLAN_BUG_RCA: Message type %x has exceeded its alloted time of %ds", + __func__, msg_type_id, WMI_WQ_WD_TIMEOUT / 1000); +} + +#ifdef CONFIG_SLUB_DEBUG_ON +static void wmi_workqueue_watchdog_bite(void *arg) +{ + struct wmi_wq_dbg_info *info = arg; + + wmi_workqueue_watchdog_warn(info->wd_msg_type_id); + qdf_print_thread_trace(info->task); + + QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, + "%s: Going down for WMI WQ Watchdog Bite!", __func__); + QDF_BUG(0); +} +#else +static inline void wmi_workqueue_watchdog_bite(void *arg) +{ + struct wmi_wq_dbg_info *info = arg; + + wmi_workqueue_watchdog_warn(info->wd_msg_type_id); +} +#endif + +/** + * wmi_rx_event_work() - process rx event in rx work queue context + * @arg: opaque pointer to wmi handle + * + * This function process any fw event to serialize it through rx worker thread. + * + * Return: none + */ +static void wmi_rx_event_work(void *arg) +{ + wmi_buf_t buf; + struct wmi_unified *wmi = arg; + qdf_timer_t wd_timer; + struct wmi_wq_dbg_info info; + + /* initialize WMI workqueue watchdog timer */ + qdf_timer_init(NULL, &wd_timer, &wmi_workqueue_watchdog_bite, + &info, QDF_TIMER_TYPE_SW); + qdf_spin_lock_bh(&wmi->eventq_lock); + buf = qdf_nbuf_queue_remove(&wmi->event_queue); + qdf_spin_unlock_bh(&wmi->eventq_lock); + while (buf) { + qdf_timer_start(&wd_timer, WMI_WQ_WD_TIMEOUT); + info.wd_msg_type_id = + WMI_GET_FIELD(qdf_nbuf_data(buf), WMI_CMD_HDR, COMMANDID); + info.wmi_wq = wmi->wmi_rx_work_queue; + info.task = qdf_get_current_task(); + __wmi_control_rx(wmi, buf); + qdf_timer_stop(&wd_timer); + qdf_spin_lock_bh(&wmi->eventq_lock); + buf = qdf_nbuf_queue_remove(&wmi->event_queue); + qdf_spin_unlock_bh(&wmi->eventq_lock); + } + qdf_timer_free(&wd_timer); +} + +#ifdef FEATURE_RUNTIME_PM +/** + * wmi_runtime_pm_init() - initialize runtime pm wmi variables + * @wmi_handle: wmi context + */ +static void wmi_runtime_pm_init(struct wmi_unified *wmi_handle) +{ + qdf_atomic_init(&wmi_handle->runtime_pm_inprogress); +} + +/** + * wmi_set_runtime_pm_inprogress() - set runtime pm progress flag + * @wmi_handle: wmi context + * @val: runtime pm progress flag + */ +void wmi_set_runtime_pm_inprogress(wmi_unified_t wmi_handle, A_BOOL val) +{ + qdf_atomic_set(&wmi_handle->runtime_pm_inprogress, val); +} + +/** + * wmi_get_runtime_pm_inprogress() - get runtime pm progress flag + * @wmi_handle: wmi context + */ +inline bool wmi_get_runtime_pm_inprogress(wmi_unified_t wmi_handle) +{ + return qdf_atomic_read(&wmi_handle->runtime_pm_inprogress); +} +#else +static void wmi_runtime_pm_init(struct wmi_unified *wmi_handle) +{ +} +#endif + +/** + * wmi_unified_get_soc_handle: Get WMI SoC handle + * @param wmi_handle: WMI context got from wmi_attach + * + * return: Pointer to Soc handle + */ +void *wmi_unified_get_soc_handle(struct wmi_unified *wmi_handle) +{ + return wmi_handle->soc; +} + +/** + * wmi_interface_logging_init: Interface looging init + * @param wmi_handle: Pointer to wmi handle object + * + * return: None + */ +#ifdef WMI_INTERFACE_EVENT_LOGGING +static inline void wmi_interface_logging_init(struct wmi_unified *wmi_handle) +{ + if (QDF_STATUS_SUCCESS == wmi_log_init(wmi_handle)) { + qdf_spinlock_create(&wmi_handle->log_info.wmi_record_lock); + wmi_debugfs_init(wmi_handle); + } +} +#else +static inline void wmi_interface_logging_init(struct wmi_unified *wmi_handle) +{ +} +#endif + +/** + * wmi_target_params_init: Target specific params init + * @param wmi_soc: Pointer to wmi soc object + * @param wmi_handle: Pointer to wmi handle object + * + * return: None + */ +#ifndef CONFIG_MCL +static inline void wmi_target_params_init(struct wmi_soc *soc, + struct wmi_unified *wmi_handle) +{ + wmi_handle->pdev_param = soc->pdev_param; + wmi_handle->vdev_param = soc->vdev_param; + wmi_handle->services = soc->services; +} +#else +static inline void wmi_target_params_init(struct wmi_soc *soc, + struct wmi_unified *wmi_handle) +{ + wmi_handle->services = soc->services; +} +#endif + +/** + * wmi_unified_get_pdev_handle: Get WMI SoC handle + * @param wmi_soc: Pointer to wmi soc object + * @param pdev_idx: pdev index + * + * return: Pointer to wmi handle or NULL on failure + */ +void *wmi_unified_get_pdev_handle(struct wmi_soc *soc, uint32_t pdev_idx) +{ + struct wmi_unified *wmi_handle; + + if (pdev_idx >= WMI_MAX_RADIOS) + return NULL; + + if (soc->wmi_pdev[pdev_idx] == NULL) { + wmi_handle = + (struct wmi_unified *) qdf_mem_malloc( + sizeof(struct wmi_unified)); + if (wmi_handle == NULL) { + qdf_print("allocation of wmi handle failed %zu\n", + sizeof(struct wmi_unified)); + return NULL; + } + wmi_handle->scn_handle = soc->scn_handle; + wmi_handle->event_id = soc->event_id; + wmi_handle->event_handler = soc->event_handler; + wmi_handle->ctx = soc->ctx; + wmi_handle->ops = soc->ops; + qdf_spinlock_create(&wmi_handle->eventq_lock); + qdf_nbuf_queue_init(&wmi_handle->event_queue); + + qdf_create_work(0, &wmi_handle->rx_event_work, + wmi_rx_event_work, wmi_handle); + wmi_handle->wmi_rx_work_queue = + qdf_alloc_unbound_workqueue("wmi_rx_event_work_queue"); + if (NULL == wmi_handle->wmi_rx_work_queue) { + WMI_LOGE("failed to create wmi_rx_event_work_queue"); + goto error; + } + wmi_handle->wmi_events = soc->wmi_events; + wmi_target_params_init(soc, wmi_handle); + wmi_interface_logging_init(wmi_handle); + qdf_atomic_init(&wmi_handle->pending_cmds); + qdf_atomic_init(&wmi_handle->is_target_suspended); + wmi_handle->target_type = soc->target_type; + wmi_handle->wmi_max_cmds = soc->wmi_max_cmds; + wmi_handle->soc = soc; + + soc->wmi_pdev[pdev_idx] = wmi_handle; + } else + wmi_handle = soc->wmi_pdev[pdev_idx]; + + wmi_handle->wmi_stopinprogress = 0; + wmi_handle->wmi_endpoint_id = soc->wmi_endpoint_id[pdev_idx]; + wmi_handle->htc_handle = soc->htc_handle; + wmi_handle->max_msg_len = soc->max_msg_len[pdev_idx]; + + return wmi_handle; + +error: + qdf_mem_free(wmi_handle); + + return NULL; +} +qdf_export_symbol(wmi_unified_get_pdev_handle); + +static void (*wmi_attach_register[WMI_MAX_TARGET_TYPE])(wmi_unified_t); + +void wmi_unified_register_module(enum wmi_target_type target_type, + void (*wmi_attach)(wmi_unified_t wmi_handle)) +{ + if (target_type < WMI_MAX_TARGET_TYPE) + wmi_attach_register[target_type] = wmi_attach; + + return; +} +qdf_export_symbol(wmi_unified_register_module); + +/** + * wmi_wbuff_register() - register wmi with wbuff + * @wmi_handle: handle to wmi + * + * @Return: void + */ +static void wmi_wbuff_register(struct wmi_unified *wmi_handle) +{ + struct wbuff_alloc_request wbuff_alloc[4]; + + wbuff_alloc[0].slot = WBUFF_POOL_0; + wbuff_alloc[0].size = WMI_WBUFF_POOL_0_SIZE; + wbuff_alloc[1].slot = WBUFF_POOL_1; + wbuff_alloc[1].size = WMI_WBUFF_POOL_1_SIZE; + wbuff_alloc[2].slot = WBUFF_POOL_2; + wbuff_alloc[2].size = WMI_WBUFF_POOL_2_SIZE; + wbuff_alloc[3].slot = WBUFF_POOL_3; + wbuff_alloc[3].size = WMI_WBUFF_POOL_3_SIZE; + + wmi_handle->wbuff_handle = wbuff_module_register(wbuff_alloc, 4, + WMI_MIN_HEAD_ROOM, 4); +} + +/** + * wmi_wbuff_deregister() - deregister wmi with wbuff + * @wmi_handle: handle to wmi + * + * @Return: void + */ +static inline void wmi_wbuff_deregister(struct wmi_unified *wmi_handle) +{ + wbuff_module_deregister(wmi_handle->wbuff_handle); + wmi_handle->wbuff_handle = NULL; +} + +/** + * wmi_unified_attach() - attach for unified WMI + * @scn_handle: handle to SCN + * @osdev: OS device context + * @target_type: TLV or not-TLV based target + * @use_cookie: cookie based allocation enabled/disabled + * @ops: umac rx callbacks + * @psoc: objmgr psoc + * + * @Return: wmi handle. + */ +void *wmi_unified_attach(void *scn_handle, + struct wmi_unified_attach_params *param) +{ + struct wmi_unified *wmi_handle; + struct wmi_soc *soc; + + soc = (struct wmi_soc *) qdf_mem_malloc(sizeof(struct wmi_soc)); + if (soc == NULL) { + qdf_print("Allocation of wmi_soc failed %zu\n", + sizeof(struct wmi_soc)); + return NULL; + } + + wmi_handle = + (struct wmi_unified *) qdf_mem_malloc( + sizeof(struct wmi_unified)); + if (wmi_handle == NULL) { + qdf_mem_free(soc); + qdf_print("allocation of wmi handle failed %zu\n", + sizeof(struct wmi_unified)); + return NULL; + } + wmi_handle->soc = soc; + wmi_handle->event_id = soc->event_id; + wmi_handle->event_handler = soc->event_handler; + wmi_handle->ctx = soc->ctx; + wmi_handle->wmi_events = soc->wmi_events; + wmi_target_params_init(soc, wmi_handle); + wmi_handle->scn_handle = scn_handle; + soc->scn_handle = scn_handle; + qdf_atomic_init(&wmi_handle->pending_cmds); + qdf_atomic_init(&wmi_handle->is_target_suspended); + wmi_runtime_pm_init(wmi_handle); + qdf_spinlock_create(&wmi_handle->eventq_lock); + qdf_nbuf_queue_init(&wmi_handle->event_queue); + qdf_create_work(0, &wmi_handle->rx_event_work, + wmi_rx_event_work, wmi_handle); + wmi_handle->wmi_rx_work_queue = + qdf_alloc_unbound_workqueue("wmi_rx_event_work_queue"); + if (NULL == wmi_handle->wmi_rx_work_queue) { + WMI_LOGE("failed to create wmi_rx_event_work_queue"); + goto error; + } + wmi_interface_logging_init(wmi_handle); + /* Attach mc_thread context processing function */ + wmi_handle->rx_ops.wma_process_fw_event_handler_cbk = + param->rx_ops->wma_process_fw_event_handler_cbk; + wmi_handle->target_type = param->target_type; + soc->target_type = param->target_type; + + if (param->target_type >= WMI_MAX_TARGET_TYPE) + goto error; + + if (wmi_attach_register[param->target_type]) { + wmi_attach_register[param->target_type](wmi_handle); + } else { + WMI_LOGE("wmi attach is not registered"); + goto error; + } + /* Assign target cookie capablity */ + wmi_handle->use_cookie = param->use_cookie; + wmi_handle->osdev = param->osdev; + wmi_handle->wmi_stopinprogress = 0; + wmi_handle->wmi_max_cmds = param->max_commands; + soc->wmi_max_cmds = param->max_commands; + /* Increase the ref count once refcount infra is present */ + soc->wmi_psoc = param->psoc; + qdf_spinlock_create(&soc->ctx_lock); + + soc->ops = wmi_handle->ops; + soc->wmi_pdev[0] = wmi_handle; + + wmi_wbuff_register(wmi_handle); + + return wmi_handle; + +error: + qdf_mem_free(soc); + qdf_mem_free(wmi_handle); + + return NULL; +} + +/** + * wmi_unified_detach() - detach for unified WMI + * + * @wmi_handle : handle to wmi. + * + * @Return: none. + */ +void wmi_unified_detach(struct wmi_unified *wmi_handle) +{ + wmi_buf_t buf; + struct wmi_soc *soc; + uint8_t i; + + wmi_wbuff_deregister(wmi_handle); + + soc = wmi_handle->soc; + for (i = 0; i < WMI_MAX_RADIOS; i++) { + if (soc->wmi_pdev[i]) { + qdf_flush_workqueue(0, + soc->wmi_pdev[i]->wmi_rx_work_queue); + qdf_destroy_workqueue(0, + soc->wmi_pdev[i]->wmi_rx_work_queue); + wmi_debugfs_remove(soc->wmi_pdev[i]); + buf = qdf_nbuf_queue_remove( + &soc->wmi_pdev[i]->event_queue); + while (buf) { + qdf_nbuf_free(buf); + buf = qdf_nbuf_queue_remove( + &soc->wmi_pdev[i]->event_queue); + } + + wmi_log_buffer_free(soc->wmi_pdev[i]); + + /* Free events logs list */ + if (soc->wmi_pdev[i]->events_logs_list) + qdf_mem_free( + soc->wmi_pdev[i]->events_logs_list); + + qdf_spinlock_destroy(&soc->wmi_pdev[i]->eventq_lock); + qdf_mem_free(soc->wmi_pdev[i]); + } + } + qdf_spinlock_destroy(&soc->ctx_lock); + + if (soc->wmi_service_bitmap) { + qdf_mem_free(soc->wmi_service_bitmap); + soc->wmi_service_bitmap = NULL; + } + + if (soc->wmi_ext_service_bitmap) { + qdf_mem_free(soc->wmi_ext_service_bitmap); + soc->wmi_ext_service_bitmap = NULL; + } + + /* Decrease the ref count once refcount infra is present */ + soc->wmi_psoc = NULL; + qdf_mem_free(soc); +} + +/** + * wmi_unified_remove_work() - detach for WMI work + * @wmi_handle: handle to WMI + * + * A function that does not fully detach WMI, but just remove work + * queue items associated with it. This is used to make sure that + * before any other processing code that may destroy related contexts + * (HTC, etc), work queue processing on WMI has already been stopped. + * + * Return: None + */ +void +wmi_unified_remove_work(struct wmi_unified *wmi_handle) +{ + wmi_buf_t buf; + + qdf_flush_workqueue(0, wmi_handle->wmi_rx_work_queue); + qdf_spin_lock_bh(&wmi_handle->eventq_lock); + buf = qdf_nbuf_queue_remove(&wmi_handle->event_queue); + while (buf) { + qdf_nbuf_free(buf); + buf = qdf_nbuf_queue_remove(&wmi_handle->event_queue); + } + qdf_spin_unlock_bh(&wmi_handle->eventq_lock); +} + +/** + * wmi_htc_tx_complete() - Process htc tx completion + * + * @ctx: handle to wmi + * @htc_packet: pointer to htc packet + * + * @Return: none. + */ +static void wmi_htc_tx_complete(void *ctx, HTC_PACKET *htc_pkt) +{ + struct wmi_soc *soc = (struct wmi_soc *) ctx; + wmi_buf_t wmi_cmd_buf = GET_HTC_PACKET_NET_BUF_CONTEXT(htc_pkt); + u_int8_t *buf_ptr; + u_int32_t len; + struct wmi_unified *wmi_handle; +#ifdef WMI_INTERFACE_EVENT_LOGGING + uint32_t cmd_id; +#endif + + ASSERT(wmi_cmd_buf); + wmi_handle = wmi_get_pdev_ep(soc, htc_pkt->Endpoint); + if (wmi_handle == NULL) { + WMI_LOGE("%s: Unable to get wmi handle\n", __func__); + QDF_ASSERT(0); + return; + } +#ifdef WMI_INTERFACE_EVENT_LOGGING + if (wmi_handle && wmi_handle->log_info.wmi_logging_enable) { + cmd_id = WMI_GET_FIELD(qdf_nbuf_data(wmi_cmd_buf), + WMI_CMD_HDR, COMMANDID); + + qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock); + /* Record 16 bytes of WMI cmd tx complete data + - exclude TLV and WMI headers */ + if (wmi_handle->ops->is_management_record(cmd_id)) { + WMI_MGMT_COMMAND_TX_CMP_RECORD(wmi_handle, cmd_id, + qdf_nbuf_data(wmi_cmd_buf) + + wmi_handle->log_info.buf_offset_command); + } else { + WMI_COMMAND_TX_CMP_RECORD(wmi_handle, cmd_id, + qdf_nbuf_data(wmi_cmd_buf) + + wmi_handle->log_info.buf_offset_command); + } + + qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock); + } +#endif + buf_ptr = (u_int8_t *) wmi_buf_data(wmi_cmd_buf); + len = qdf_nbuf_len(wmi_cmd_buf); + qdf_mem_zero(buf_ptr, len); + wmi_buf_free(wmi_cmd_buf); + qdf_mem_free(htc_pkt); + qdf_atomic_dec(&wmi_handle->pending_cmds); +} + +/** + * wmi_connect_pdev_htc_service() - WMI API to get connect to HTC service + * + * @wmi_handle: handle to WMI. + * @pdev_idx: Pdev index + * + * @Return: status. + */ +static int wmi_connect_pdev_htc_service(struct wmi_soc *soc, + uint32_t pdev_idx) +{ + int status; + struct htc_service_connect_resp response; + struct htc_service_connect_req connect; + + OS_MEMZERO(&connect, sizeof(connect)); + OS_MEMZERO(&response, sizeof(response)); + + /* meta data is unused for now */ + connect.pMetaData = NULL; + connect.MetaDataLength = 0; + /* these fields are the same for all service endpoints */ + connect.EpCallbacks.pContext = soc; + connect.EpCallbacks.EpTxCompleteMultiple = + NULL /* Control path completion ar6000_tx_complete */; + connect.EpCallbacks.EpRecv = wmi_control_rx /* Control path rx */; + connect.EpCallbacks.EpRecvRefill = NULL /* ar6000_rx_refill */; + connect.EpCallbacks.EpSendFull = NULL /* ar6000_tx_queue_full */; + connect.EpCallbacks.EpTxComplete = + wmi_htc_tx_complete /* ar6000_tx_queue_full */; + + /* connect to control service */ + connect.service_id = soc->svc_ids[pdev_idx]; + status = htc_connect_service(soc->htc_handle, &connect, + &response); + + + if (status != EOK) { + qdf_print + ("Failed to connect to WMI CONTROL service status:%d\n", + status); + return status; + } + + soc->wmi_endpoint_id[pdev_idx] = response.Endpoint; + soc->max_msg_len[pdev_idx] = response.MaxMsgLength; + + return 0; +} + +/** + * wmi_unified_connect_htc_service() - WMI API to get connect to HTC service + * + * @wmi_handle: handle to WMI. + * + * @Return: status. + */ +QDF_STATUS +wmi_unified_connect_htc_service(struct wmi_unified *wmi_handle, + void *htc_handle) +{ + uint32_t i; + uint8_t wmi_ep_count; + + wmi_handle->soc->htc_handle = htc_handle; + + wmi_ep_count = htc_get_wmi_endpoint_count(htc_handle); + if (wmi_ep_count > WMI_MAX_RADIOS) + return QDF_STATUS_E_FAULT; + + for (i = 0; i < wmi_ep_count; i++) + wmi_connect_pdev_htc_service(wmi_handle->soc, i); + + wmi_handle->htc_handle = htc_handle; + wmi_handle->wmi_endpoint_id = wmi_handle->soc->wmi_endpoint_id[0]; + wmi_handle->max_msg_len = wmi_handle->soc->max_msg_len[0]; + + return QDF_STATUS_SUCCESS; +} + +/** + * wmi_get_host_credits() - WMI API to get updated host_credits + * + * @wmi_handle: handle to WMI. + * + * @Return: updated host_credits. + */ +int wmi_get_host_credits(wmi_unified_t wmi_handle) +{ + int host_credits = 0; + + htc_get_control_endpoint_tx_host_credits(wmi_handle->htc_handle, + &host_credits); + return host_credits; +} + +/** + * wmi_get_pending_cmds() - WMI API to get WMI Pending Commands in the HTC + * queue + * + * @wmi_handle: handle to WMI. + * + * @Return: Pending Commands in the HTC queue. + */ +int wmi_get_pending_cmds(wmi_unified_t wmi_handle) +{ + return qdf_atomic_read(&wmi_handle->pending_cmds); +} + +/** + * wmi_set_target_suspend() - WMI API to set target suspend state + * + * @wmi_handle: handle to WMI. + * @val: suspend state boolean. + * + * @Return: none. + */ +void wmi_set_target_suspend(wmi_unified_t wmi_handle, A_BOOL val) +{ + qdf_atomic_set(&wmi_handle->is_target_suspended, val); +} + +/** + * WMI API to set crash injection state + * @param wmi_handle: handle to WMI. + * @param val: crash injection state boolean. + */ +void wmi_tag_crash_inject(wmi_unified_t wmi_handle, A_BOOL flag) +{ + wmi_handle->tag_crash_inject = flag; +} + +/** + * WMI API to set bus suspend state + * @param wmi_handle: handle to WMI. + * @param val: suspend state boolean. + */ +void wmi_set_is_wow_bus_suspended(wmi_unified_t wmi_handle, A_BOOL val) +{ + qdf_atomic_set(&wmi_handle->is_wow_bus_suspended, val); +} + +void wmi_set_tgt_assert(wmi_unified_t wmi_handle, bool val) +{ + wmi_handle->tgt_force_assert_enable = val; +} + +/** + * wmi_stop() - generic function to block unified WMI command + * @wmi_handle: handle to WMI. + * + * @Return: success always. + */ +int +wmi_stop(wmi_unified_t wmi_handle) +{ + QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_INFO, + "WMI Stop\n"); + wmi_handle->wmi_stopinprogress = 1; + return 0; +} + +#ifndef CONFIG_MCL +/** + * API to flush all the previous packets associated with the wmi endpoint + * + * @param wmi_handle : handle to WMI. + */ +void +wmi_flush_endpoint(wmi_unified_t wmi_handle) +{ + htc_flush_endpoint(wmi_handle->htc_handle, + wmi_handle->wmi_endpoint_id, 0); +} +qdf_export_symbol(wmi_flush_endpoint); + +/** + * wmi_pdev_id_conversion_enable() - API to enable pdev_id conversion in WMI + * By default pdev_id conversion is not done in WMI. + * This API can be used enable conversion in WMI. + * @param wmi_handle : handle to WMI + * Return none + */ +void wmi_pdev_id_conversion_enable(wmi_unified_t wmi_handle) +{ + if (wmi_handle->target_type == WMI_TLV_TARGET) + wmi_handle->ops->wmi_pdev_id_conversion_enable(wmi_handle); +} + +#endif diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_action_oui_tlv.c b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_action_oui_tlv.c new file mode 100644 index 0000000000000000000000000000000000000000..705f403387c4a0a594876725b11615ae0da4b25c --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_action_oui_tlv.c @@ -0,0 +1,289 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "wmi_unified_action_oui_tlv.h" + +bool wmi_get_action_oui_id(enum action_oui_id action_id, + wmi_vendor_oui_action_id *id) +{ + switch (action_id) { + case ACTION_OUI_CONNECT_1X1: + *id = WMI_VENDOR_OUI_ACTION_CONNECTION_1X1; + return true; + + case ACTION_OUI_ITO_EXTENSION: + *id = WMI_VENDOR_OUI_ACTION_ITO_EXTENSION; + return true; + + case ACTION_OUI_CCKM_1X1: + *id = WMI_VENDOR_OUI_ACTION_CCKM_1X1; + return true; + + case ACTION_OUI_ITO_ALTERNATE: + *id = WMI_VENDOR_OUI_ACTION_ALT_ITO; + return true; + + case ACTION_OUI_SWITCH_TO_11N_MODE: + *id = WMI_VENDOR_OUI_ACTION_SWITCH_TO_11N_MODE; + return true; + + case ACTION_OUI_CONNECT_1X1_WITH_1_CHAIN: + *id = WMI_VENDOR_OUI_ACTION_CONNECTION_1X1_NUM_TX_RX_CHAINS_1; + return true; + + case ACTION_OUI_DISABLE_AGGRESSIVE_TX: + *id = WMI_VENDOR_OUI_ACTION_DISABLE_AGGRESSIVE_TX; + return true; + + default: + return false; + } +} + +uint32_t wmi_get_action_oui_info_mask(uint32_t info_mask) +{ + uint32_t info_presence = 0; + + if (info_mask & ACTION_OUI_INFO_OUI) + info_presence |= WMI_BEACON_INFO_PRESENCE_OUI_EXT; + + if (info_mask & ACTION_OUI_INFO_MAC_ADDRESS) + info_presence |= WMI_BEACON_INFO_PRESENCE_MAC_ADDRESS; + + if (info_mask & ACTION_OUI_INFO_AP_CAPABILITY_NSS) + info_presence |= WMI_BEACON_INFO_PRESENCE_AP_CAPABILITY_NSS; + + if (info_mask & ACTION_OUI_INFO_AP_CAPABILITY_HT) + info_presence |= WMI_BEACON_INFO_PRESENCE_AP_CAPABILITY_HT; + + if (info_mask & ACTION_OUI_INFO_AP_CAPABILITY_VHT) + info_presence |= WMI_BEACON_INFO_PRESENCE_AP_CAPABILITY_VHT; + + if (info_mask & ACTION_OUI_INFO_AP_CAPABILITY_BAND) + info_presence |= WMI_BEACON_INFO_PRESENCE_AP_CAPABILITY_BAND; + + return info_presence; +} + +void wmi_fill_oui_extensions(struct action_oui_extension *extension, + uint32_t no_oui_extns, + wmi_vendor_oui_ext *cmd_ext) +{ + uint32_t i; + uint32_t buffer_length; + + for (i = 0; i < no_oui_extns; i++) { + WMITLV_SET_HDR(&cmd_ext->tlv_header, + WMITLV_TAG_STRUC_wmi_vendor_oui_ext, + WMITLV_GET_STRUCT_TLVLEN(wmi_vendor_oui_ext)); + cmd_ext->info_presence_bit_mask = + wmi_get_action_oui_info_mask(extension->info_mask); + + cmd_ext->oui_header_length = extension->oui_length; + cmd_ext->oui_data_length = extension->data_length; + cmd_ext->mac_address_length = extension->mac_addr_length; + cmd_ext->capability_data_length = + extension->capability_length; + + buffer_length = extension->oui_length + + extension->data_length + + extension->data_mask_length + + extension->mac_addr_length + + extension->mac_mask_length + + extension->capability_length; + + cmd_ext->buf_data_length = buffer_length + 1; + + cmd_ext++; + extension++; + } +} + +QDF_STATUS +wmi_fill_oui_extensions_buffer(struct action_oui_extension *extension, + wmi_vendor_oui_ext *cmd_ext, + uint32_t no_oui_extns, uint32_t rem_var_buf_len, + uint8_t *var_buf) +{ + uint8_t i; + + for (i = 0; i < (uint8_t)no_oui_extns; i++) { + if ((rem_var_buf_len - cmd_ext->buf_data_length) < 0) { + WMI_LOGE(FL("Invalid action oui command length")); + return QDF_STATUS_E_INVAL; + } + + var_buf[0] = i; + var_buf++; + + if (extension->oui_length) { + qdf_mem_copy(var_buf, extension->oui, + extension->oui_length); + var_buf += extension->oui_length; + } + + if (extension->data_length) { + qdf_mem_copy(var_buf, extension->data, + extension->data_length); + var_buf += extension->data_length; + } + + if (extension->data_mask_length) { + qdf_mem_copy(var_buf, extension->data_mask, + extension->data_mask_length); + var_buf += extension->data_mask_length; + } + + if (extension->mac_addr_length) { + qdf_mem_copy(var_buf, extension->mac_addr, + extension->mac_addr_length); + var_buf += extension->mac_addr_length; + } + + if (extension->mac_mask_length) { + qdf_mem_copy(var_buf, extension->mac_mask, + extension->mac_mask_length); + var_buf += extension->mac_mask_length; + } + + if (extension->capability_length) { + qdf_mem_copy(var_buf, extension->capability, + extension->capability_length); + var_buf += extension->capability_length; + } + + rem_var_buf_len -= cmd_ext->buf_data_length; + cmd_ext++; + extension++; + } + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS +send_action_oui_cmd_tlv(wmi_unified_t wmi_handle, + struct action_oui_request *req) +{ + wmi_pdev_config_vendor_oui_action_fixed_param *cmd; + wmi_vendor_oui_ext *cmd_ext; + wmi_buf_t wmi_buf; + struct action_oui_extension *extension; + uint32_t len; + uint32_t i; + uint8_t *buf_ptr; + uint32_t no_oui_extns; + uint32_t total_no_oui_extns; + uint32_t var_buf_len = 0; + wmi_vendor_oui_action_id action_id; + bool valid; + uint32_t rem_var_buf_len; + QDF_STATUS status; + + if (!req) { + WMI_LOGE(FL("action oui is empty")); + return QDF_STATUS_E_INVAL; + } + + no_oui_extns = req->no_oui_extensions; + total_no_oui_extns = req->total_no_oui_extensions; + + len = sizeof(*cmd); + len += WMI_TLV_HDR_SIZE; /* Array of wmi_vendor_oui_ext structures */ + + if (!no_oui_extns || + no_oui_extns > WMI_MAX_VENDOR_OUI_ACTION_SUPPORTED_PER_ACTION || + (total_no_oui_extns > WMI_VENDOR_OUI_ACTION_MAX_ACTION_ID * + WMI_MAX_VENDOR_OUI_ACTION_SUPPORTED_PER_ACTION)) { + WMI_LOGE(FL("Invalid number of action oui extensions")); + return QDF_STATUS_E_INVAL; + } + + valid = wmi_get_action_oui_id(req->action_id, &action_id); + if (!valid) { + WMI_LOGE(FL("Invalid action id")); + return QDF_STATUS_E_INVAL; + } + + len += no_oui_extns * sizeof(*cmd_ext); + len += WMI_TLV_HDR_SIZE; /* Variable length buffer */ + + extension = req->extension; + for (i = 0; i < no_oui_extns; i++) { + var_buf_len += extension->oui_length + + extension->data_length + + extension->data_mask_length + + extension->mac_addr_length + + extension->mac_mask_length + + extension->capability_length; + extension++; + } + + var_buf_len += no_oui_extns; /* to store indexes */ + rem_var_buf_len = var_buf_len; + var_buf_len = (var_buf_len + 3) & ~0x3; + len += var_buf_len; + + wmi_buf = wmi_buf_alloc(wmi_handle, len); + if (!wmi_buf) { + WMI_LOGE(FL("Failed to allocate wmi buffer")); + return QDF_STATUS_E_FAILURE; + } + + buf_ptr = (uint8_t *)wmi_buf_data(wmi_buf); + cmd = (wmi_pdev_config_vendor_oui_action_fixed_param *)buf_ptr; + + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_pdev_config_vendor_oui_action_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_pdev_config_vendor_oui_action_fixed_param)); + + cmd->action_id = action_id; + cmd->total_num_vendor_oui = total_no_oui_extns; + cmd->num_vendor_oui_ext = no_oui_extns; + + buf_ptr += sizeof(*cmd); + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, + no_oui_extns * sizeof(*cmd_ext)); + buf_ptr += WMI_TLV_HDR_SIZE; + cmd_ext = (wmi_vendor_oui_ext *)buf_ptr; + wmi_fill_oui_extensions(req->extension, no_oui_extns, cmd_ext); + + buf_ptr += no_oui_extns * sizeof(*cmd_ext); + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_BYTE, var_buf_len); + buf_ptr += WMI_TLV_HDR_SIZE; + status = wmi_fill_oui_extensions_buffer(req->extension, + cmd_ext, no_oui_extns, + rem_var_buf_len, buf_ptr); + if (!QDF_IS_STATUS_SUCCESS(status)) { + wmi_buf_free(wmi_buf); + wmi_buf = NULL; + return QDF_STATUS_E_INVAL; + } + + buf_ptr += var_buf_len; + + if (wmi_unified_cmd_send(wmi_handle, wmi_buf, len, + WMI_PDEV_CONFIG_VENDOR_OUI_ACTION_CMDID)) { + WMI_LOGE(FL("WMI_PDEV_CONFIG_VENDOR_OUI_ACTION send fail")); + wmi_buf_free(wmi_buf); + wmi_buf = NULL; + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_apf_tlv.c b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_apf_tlv.c new file mode 100644 index 0000000000000000000000000000000000000000..b687bc088d38404f8130a20b1dbe2cb5c530edc9 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_apf_tlv.c @@ -0,0 +1,226 @@ +/* + * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "wmi_unified_apf_tlv.h" + +QDF_STATUS wmi_send_set_active_apf_mode_cmd_tlv(wmi_unified_t wmi_handle, + uint8_t vdev_id, + enum wmi_host_active_apf_mode + ucast_mode, + enum wmi_host_active_apf_mode + mcast_bcast_mode) +{ + const WMITLV_TAG_ID tag_id = + WMITLV_TAG_STRUC_wmi_bpf_set_vdev_active_mode_cmd_fixed_param; + const uint32_t tlv_len = WMITLV_GET_STRUCT_TLVLEN( + wmi_bpf_set_vdev_active_mode_cmd_fixed_param); + QDF_STATUS status; + wmi_bpf_set_vdev_active_mode_cmd_fixed_param *cmd; + wmi_buf_t buf; + + WMI_LOGD("Sending WMI_BPF_SET_VDEV_ACTIVE_MODE_CMDID(%u, %d, %d)", + vdev_id, ucast_mode, mcast_bcast_mode); + + /* allocate command buffer */ + buf = wmi_buf_alloc(wmi_handle, sizeof(*cmd)); + if (!buf) { + WMI_LOGE("%s: wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_NOMEM; + } + + /* set TLV header */ + cmd = (wmi_bpf_set_vdev_active_mode_cmd_fixed_param *)wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, tag_id, tlv_len); + + /* populate data */ + cmd->vdev_id = vdev_id; + cmd->uc_mode = ucast_mode; + cmd->mcbc_mode = mcast_bcast_mode; + + /* send to FW */ + status = wmi_unified_cmd_send(wmi_handle, buf, sizeof(*cmd), + WMI_BPF_SET_VDEV_ACTIVE_MODE_CMDID); + if (QDF_IS_STATUS_ERROR(status)) { + WMI_LOGE("Failed to send WMI_BPF_SET_VDEV_ACTIVE_MODE_CMDID:%d", + status); + wmi_buf_free(buf); + return status; + } + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS wmi_send_apf_enable_cmd_tlv(wmi_unified_t wmi_handle, + uint32_t vdev_id, + bool enable) +{ + wmi_bpf_set_vdev_enable_cmd_fixed_param *cmd; + wmi_buf_t buf; + + buf = wmi_buf_alloc(wmi_handle, sizeof(*cmd)); + if (!buf) { + WMI_LOGP("%s: wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_NOMEM; + } + + cmd = (wmi_bpf_set_vdev_enable_cmd_fixed_param *) wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_bpf_set_vdev_enable_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_bpf_set_vdev_enable_cmd_fixed_param)); + cmd->vdev_id = vdev_id; + cmd->is_enabled = enable; + + if (wmi_unified_cmd_send(wmi_handle, buf, sizeof(*cmd), + WMI_BPF_SET_VDEV_ENABLE_CMDID)) { + WMI_LOGE("%s: Failed to enable/disable APF interpreter", + __func__); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS +wmi_send_apf_write_work_memory_cmd_tlv(wmi_unified_t wmi_handle, + struct wmi_apf_write_memory_params + *apf_write_params) +{ + wmi_bpf_set_vdev_work_memory_cmd_fixed_param *cmd; + uint32_t wmi_buf_len; + wmi_buf_t buf; + uint8_t *buf_ptr; + uint32_t aligned_len = 0; + + wmi_buf_len = sizeof(*cmd); + if (apf_write_params->length) { + aligned_len = roundup(apf_write_params->length, + sizeof(A_UINT32)); + + wmi_buf_len += WMI_TLV_HDR_SIZE + aligned_len; + + } + + buf = wmi_buf_alloc(wmi_handle, wmi_buf_len); + if (!buf) { + WMI_LOGP("%s: wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_NOMEM; + } + + buf_ptr = wmi_buf_data(buf); + cmd = (wmi_bpf_set_vdev_work_memory_cmd_fixed_param *)buf_ptr; + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_bpf_set_vdev_work_memory_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_bpf_set_vdev_work_memory_cmd_fixed_param)); + cmd->vdev_id = apf_write_params->vdev_id; + cmd->bpf_version = apf_write_params->apf_version; + cmd->program_len = apf_write_params->program_len; + cmd->addr_offset = apf_write_params->addr_offset; + cmd->length = apf_write_params->length; + + if (apf_write_params->length) { + buf_ptr += sizeof(*cmd); + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_BYTE, + aligned_len); + buf_ptr += WMI_TLV_HDR_SIZE; + qdf_mem_copy(buf_ptr, apf_write_params->buf, + apf_write_params->length); + } + + if (wmi_unified_cmd_send(wmi_handle, buf, wmi_buf_len, + WMI_BPF_SET_VDEV_WORK_MEMORY_CMDID)) { + WMI_LOGE("%s: Failed to write APF work memory", __func__); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS +wmi_send_apf_read_work_memory_cmd_tlv(wmi_unified_t wmi_handle, + struct wmi_apf_read_memory_params + *apf_read_params) +{ + wmi_bpf_get_vdev_work_memory_cmd_fixed_param *cmd; + wmi_buf_t buf; + + buf = wmi_buf_alloc(wmi_handle, sizeof(*cmd)); + if (!buf) { + WMI_LOGP("%s: wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_NOMEM; + } + + cmd = (wmi_bpf_get_vdev_work_memory_cmd_fixed_param *) + wmi_buf_data(buf); + + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_bpf_get_vdev_work_memory_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_bpf_get_vdev_work_memory_cmd_fixed_param)); + cmd->vdev_id = apf_read_params->vdev_id; + cmd->addr_offset = apf_read_params->addr_offset; + cmd->length = apf_read_params->length; + + if (wmi_unified_cmd_send(wmi_handle, buf, sizeof(*cmd), + WMI_BPF_GET_VDEV_WORK_MEMORY_CMDID)) { + WMI_LOGE("%s: Failed to get APF work memory", __func__); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +QDF_STATUS +wmi_extract_apf_read_memory_resp_event_tlv(wmi_unified_t wmi_handle, + void *evt_buf, + struct wmi_apf_read_memory_resp_event_params + *resp) +{ + WMI_BPF_GET_VDEV_WORK_MEMORY_RESP_EVENTID_param_tlvs *param_buf; + wmi_bpf_get_vdev_work_memory_resp_evt_fixed_param *data_event; + + param_buf = evt_buf; + if (!param_buf) { + WMI_LOGE("encrypt decrypt resp evt_buf is NULL"); + return QDF_STATUS_E_INVAL; + } + + data_event = param_buf->fixed_param; + + resp->vdev_id = data_event->vdev_id; + resp->offset = data_event->offset; + resp->more_data = data_event->fragment; + + if (data_event->length > param_buf->num_data) { + WMI_LOGE("FW msg data_len %d more than TLV hdr %d", + data_event->length, + param_buf->num_data); + return QDF_STATUS_E_INVAL; + } + + if (data_event->length && param_buf->data) { + resp->length = data_event->length; + resp->data = (uint8_t *)param_buf->data; + } + + return QDF_STATUS_SUCCESS; +} diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_api.c b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_api.c new file mode 100644 index 0000000000000000000000000000000000000000..e8fd26d3a1ab4b481938301419c0c3d383f48ede --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_api.c @@ -0,0 +1,7551 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "wmi_unified_priv.h" +#include "wmi_unified_param.h" +#include "qdf_module.h" + +static const wmi_host_channel_width mode_to_width[WMI_HOST_MODE_MAX] = { + [WMI_HOST_MODE_11A] = WMI_HOST_CHAN_WIDTH_20, + [WMI_HOST_MODE_11G] = WMI_HOST_CHAN_WIDTH_20, + [WMI_HOST_MODE_11B] = WMI_HOST_CHAN_WIDTH_20, + [WMI_HOST_MODE_11GONLY] = WMI_HOST_CHAN_WIDTH_20, + [WMI_HOST_MODE_11NA_HT20] = WMI_HOST_CHAN_WIDTH_20, + [WMI_HOST_MODE_11NG_HT20] = WMI_HOST_CHAN_WIDTH_20, + [WMI_HOST_MODE_11AC_VHT20] = WMI_HOST_CHAN_WIDTH_20, + [WMI_HOST_MODE_11AC_VHT20_2G] = WMI_HOST_CHAN_WIDTH_20, + [WMI_HOST_MODE_11NA_HT40] = WMI_HOST_CHAN_WIDTH_40, + [WMI_HOST_MODE_11NG_HT40] = WMI_HOST_CHAN_WIDTH_40, + [WMI_HOST_MODE_11AC_VHT40] = WMI_HOST_CHAN_WIDTH_40, + [WMI_HOST_MODE_11AC_VHT40_2G] = WMI_HOST_CHAN_WIDTH_40, + [WMI_HOST_MODE_11AC_VHT80] = WMI_HOST_CHAN_WIDTH_80, + [WMI_HOST_MODE_11AC_VHT80_2G] = WMI_HOST_CHAN_WIDTH_80, +#if CONFIG_160MHZ_SUPPORT + [WMI_HOST_MODE_11AC_VHT80_80] = WMI_HOST_CHAN_WIDTH_80P80, + [WMI_HOST_MODE_11AC_VHT160] = WMI_HOST_CHAN_WIDTH_160, +#endif + +#if SUPPORT_11AX + [WMI_HOST_MODE_11AX_HE20] = WMI_HOST_CHAN_WIDTH_20, + [WMI_HOST_MODE_11AX_HE40] = WMI_HOST_CHAN_WIDTH_40, + [WMI_HOST_MODE_11AX_HE80] = WMI_HOST_CHAN_WIDTH_80, + [WMI_HOST_MODE_11AX_HE80_80] = WMI_HOST_CHAN_WIDTH_80P80, + [WMI_HOST_MODE_11AX_HE160] = WMI_HOST_CHAN_WIDTH_160, + [WMI_HOST_MODE_11AX_HE20_2G] = WMI_HOST_CHAN_WIDTH_20, + [WMI_HOST_MODE_11AX_HE40_2G] = WMI_HOST_CHAN_WIDTH_40, + [WMI_HOST_MODE_11AX_HE80_2G] = WMI_HOST_CHAN_WIDTH_80, +#endif +}; + +/** + * wmi_unified_vdev_create_send() - send VDEV create command to fw + * @wmi_handle: wmi handle + * @param: pointer to hold vdev create parameter + * @macaddr: vdev mac address + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_vdev_create_send(void *wmi_hdl, + uint8_t macaddr[IEEE80211_ADDR_LEN], + struct vdev_create_params *param) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_vdev_create_cmd) + return wmi_handle->ops->send_vdev_create_cmd(wmi_handle, + macaddr, param); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_vdev_delete_send() - send VDEV delete command to fw + * @wmi_handle: wmi handle + * @if_id: vdev id + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_vdev_delete_send(void *wmi_hdl, + uint8_t if_id) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_vdev_delete_cmd) + return wmi_handle->ops->send_vdev_delete_cmd(wmi_handle, + if_id); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_unified_vdev_nss_chain_params_send(void *wmi_hdl, + uint8_t vdev_id, + struct mlme_nss_chains *user_cfg) +{ + wmi_unified_t wmi_handle = (wmi_unified_t)wmi_hdl; + + if (wmi_handle->ops->send_vdev_nss_chain_params_cmd) + return wmi_handle->ops->send_vdev_nss_chain_params_cmd( + wmi_handle, + vdev_id, + user_cfg); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_vdev_stop_send() - send vdev stop command to fw + * @wmi: wmi handle + * @vdev_id: vdev id + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_vdev_stop_send(void *wmi_hdl, + uint8_t vdev_id) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_vdev_stop_cmd) + return wmi_handle->ops->send_vdev_stop_cmd(wmi_handle, + vdev_id); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_vdev_down_send() - send vdev down command to fw + * @wmi: wmi handle + * @vdev_id: vdev id + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_vdev_down_send(void *wmi_hdl, uint8_t vdev_id) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_vdev_down_cmd) + return wmi_handle->ops->send_vdev_down_cmd(wmi_handle, vdev_id); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_vdev_start_send() - send vdev start command to fw + * @wmi: wmi handle + * @vdev_id: vdev id + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_vdev_start_send(void *wmi_hdl, + struct vdev_start_params *req) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_vdev_start_cmd) + return wmi_handle->ops->send_vdev_start_cmd(wmi_handle, req); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_vdev_set_nac_rssi_send() - send NAC_RSSI command to fw + * @wmi: wmi handle + * @req: pointer to hold nac rssi request data + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_vdev_set_nac_rssi_send(void *wmi_hdl, + struct vdev_scan_nac_rssi_params *req) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_vdev_set_nac_rssi_cmd) + return wmi_handle->ops->send_vdev_set_nac_rssi_cmd(wmi_handle, req); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_hidden_ssid_vdev_restart_send() - restart vdev to set hidden ssid + * @wmi: wmi handle + * @restart_params: vdev restart params + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_hidden_ssid_vdev_restart_send(void *wmi_hdl, + struct hidden_ssid_vdev_restart_params *restart_params) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_hidden_ssid_vdev_restart_cmd) + return wmi_handle->ops->send_hidden_ssid_vdev_restart_cmd( + wmi_handle, restart_params); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_peer_flush_tids_send() - flush peer tids packets in fw + * @wmi: wmi handle + * @peer_addr: peer mac address + * @param: pointer to hold peer flush tid parameter + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_peer_flush_tids_send(void *wmi_hdl, + uint8_t peer_addr[IEEE80211_ADDR_LEN], + struct peer_flush_params *param) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_peer_flush_tids_cmd) + return wmi_handle->ops->send_peer_flush_tids_cmd(wmi_handle, + peer_addr, param); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_peer_delete_send() - send PEER delete command to fw + * @wmi: wmi handle + * @peer_addr: peer mac addr + * @vdev_id: vdev id + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_peer_delete_send(void *wmi_hdl, + uint8_t + peer_addr[IEEE80211_ADDR_LEN], + uint8_t vdev_id) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_peer_delete_cmd) + return wmi_handle->ops->send_peer_delete_cmd(wmi_handle, + peer_addr, vdev_id); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_peer_unmap_conf_send() - send PEER unmap conf command to fw + * @wmi: wmi handle + * @vdev_id: vdev id + * @peer_id_cnt: number of peer id + * @peer_id_list: list of peer ids + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_peer_unmap_conf_send(void *wmi_hdl, + uint8_t vdev_id, + uint32_t peer_id_cnt, + uint16_t *peer_id_list) +{ + wmi_unified_t wmi_handle = (wmi_unified_t)wmi_hdl; + + if (wmi_handle->ops->send_peer_unmap_conf_cmd) + return wmi_handle->ops->send_peer_unmap_conf_cmd(wmi_handle, + vdev_id, peer_id_cnt, peer_id_list); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_set_peer_param() - set peer parameter in fw + * @wmi_ctx: wmi handle + * @peer_addr: peer mac address + * @param : pointer to hold peer set parameter + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_set_peer_param_send(void *wmi_hdl, + uint8_t peer_addr[IEEE80211_ADDR_LEN], + struct peer_set_params *param) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_peer_param_cmd) + return wmi_handle->ops->send_peer_param_cmd(wmi_handle, + peer_addr, param); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_vdev_up_send() - send vdev up command in fw + * @wmi: wmi handle + * @bssid: bssid + * @vdev_up_params: pointer to hold vdev up parameter + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_vdev_up_send(void *wmi_hdl, + uint8_t bssid[IEEE80211_ADDR_LEN], + struct vdev_up_params *params) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_vdev_up_cmd) + return wmi_handle->ops->send_vdev_up_cmd(wmi_handle, bssid, + params); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_peer_create_send() - send peer create command to fw + * @wmi: wmi handle + * @peer_addr: peer mac address + * @peer_type: peer type + * @vdev_id: vdev id + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_peer_create_send(void *wmi_hdl, + struct peer_create_params *param) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_peer_create_cmd) + return wmi_handle->ops->send_peer_create_cmd(wmi_handle, param); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_peer_rx_reorder_queue_setup_send() - send rx reorder queue + * setup command to fw + * @wmi: wmi handle + * @rx_reorder_queue_setup_params: Rx reorder queue setup parameters + * + * Return: QDF_STATUS for success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_peer_rx_reorder_queue_setup_send(void *wmi_hdl, + struct rx_reorder_queue_setup_params *param) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_peer_rx_reorder_queue_setup_cmd) + return wmi_handle->ops->send_peer_rx_reorder_queue_setup_cmd( + wmi_handle, param); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_peer_rx_reorder_queue_remove_send() - send rx reorder queue + * remove command to fw + * @wmi: wmi handle + * @rx_reorder_queue_remove_params: Rx reorder queue remove parameters + * + * Return: QDF_STATUS for success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_peer_rx_reorder_queue_remove_send(void *wmi_hdl, + struct rx_reorder_queue_remove_params *param) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_peer_rx_reorder_queue_remove_cmd) + return wmi_handle->ops->send_peer_rx_reorder_queue_remove_cmd( + wmi_handle, param); + + return QDF_STATUS_E_FAILURE; +} + +#ifdef WLAN_SUPPORT_GREEN_AP +/** + * wmi_unified_green_ap_ps_send() - enable green ap powersave command + * @wmi_handle: wmi handle + * @value: value + * @pdev_id: pdev id to have radio context + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_green_ap_ps_send(void *wmi_hdl, + uint32_t value, uint8_t pdev_id) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_green_ap_ps_cmd) + return wmi_handle->ops->send_green_ap_ps_cmd(wmi_handle, value, + pdev_id); + + return QDF_STATUS_E_FAILURE; +} +#else +QDF_STATUS wmi_unified_green_ap_ps_send(void *wmi_hdl, + uint32_t value, uint8_t pdev_id) +{ + return QDF_STATUS_SUCCESS; +} +#endif /* WLAN_SUPPORT_GREEN_AP */ + +/** + * wmi_unified_pdev_utf_cmd() - send utf command to fw + * @wmi_handle: wmi handle + * @param: pointer to pdev_utf_params + * @mac_id: mac id to have radio context + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_unified_pdev_utf_cmd_send(void *wmi_hdl, + struct pdev_utf_params *param, + uint8_t mac_id) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_pdev_utf_cmd) + return wmi_handle->ops->send_pdev_utf_cmd(wmi_handle, param, + mac_id); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_pdev_param_send() - set pdev parameters + * @wmi_handle: wmi handle + * @param: pointer to pdev parameter + * @mac_id: radio context + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failures, + * errno on failure + */ +QDF_STATUS +wmi_unified_pdev_param_send(void *wmi_hdl, + struct pdev_params *param, + uint8_t mac_id) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_pdev_param_cmd) + return wmi_handle->ops->send_pdev_param_cmd(wmi_handle, param, + mac_id); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_suspend_send() - WMI suspend function + * @param wmi_handle : handle to WMI. + * @param param : pointer to hold suspend parameter + * @mac_id: radio context + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_suspend_send(void *wmi_hdl, + struct suspend_params *param, + uint8_t mac_id) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_suspend_cmd) + return wmi_handle->ops->send_suspend_cmd(wmi_handle, param, + mac_id); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_resume_send - WMI resume function + * @param wmi_handle : handle to WMI. + * @mac_id: radio context + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_resume_send(void *wmi_hdl, + uint8_t mac_id) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_resume_cmd) + return wmi_handle->ops->send_resume_cmd(wmi_handle, + mac_id); + + return QDF_STATUS_E_FAILURE; +} + +#ifdef FEATURE_WLAN_D0WOW +/** + * wmi_unified_d0wow_enable_send() - WMI d0 wow enable function + * @param wmi_handle: handle to WMI. + * @mac_id: radio context + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_d0wow_enable_send(void *wmi_hdl, + uint8_t mac_id) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_d0wow_enable_cmd) + return wmi_handle->ops->send_d0wow_enable_cmd( + wmi_handle, mac_id); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_d0wow_disable_send() - WMI d0 wow disable function + * @param wmi_handle: handle to WMI. + * @mac_id: radio context + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_d0wow_disable_send(void *wmi_hdl, + uint8_t mac_id) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_d0wow_disable_cmd) + return wmi_handle->ops->send_d0wow_disable_cmd( + wmi_handle, mac_id); + + return QDF_STATUS_E_FAILURE; +} +#endif + +/** + * wmi_unified_wow_enable_send() - WMI wow enable function + * @param wmi_handle : handle to WMI. + * @param param : pointer to hold wow enable parameter + * @mac_id: radio context + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_wow_enable_send(void *wmi_hdl, + struct wow_cmd_params *param, + uint8_t mac_id) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_wow_enable_cmd) + return wmi_handle->ops->send_wow_enable_cmd(wmi_handle, param, + mac_id); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_wow_wakeup_send() - WMI wow wakeup function + * @param wmi_hdl : handle to WMI. + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_wow_wakeup_send(void *wmi_hdl) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_wow_wakeup_cmd) + return wmi_handle->ops->send_wow_wakeup_cmd(wmi_handle); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_wow_add_wakeup_event_send() - WMI wow wakeup function + * @param wmi_handle : handle to WMI. + * @param: pointer to wow wakeup event parameter structure + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_wow_add_wakeup_event_send(void *wmi_hdl, + struct wow_add_wakeup_params *param) +{ + wmi_unified_t wmi = (wmi_unified_t) wmi_hdl; + + if (wmi->ops->send_wow_add_wakeup_event_cmd) + return wmi->ops->send_wow_add_wakeup_event_cmd(wmi, + param); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_wow_add_wakeup_pattern_send() - WMI wow wakeup pattern function + * @param wmi_handle : handle to WMI. + * @param: pointer to wow wakeup pattern parameter structure + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_wow_add_wakeup_pattern_send(void *wmi_hdl, + struct wow_add_wakeup_pattern_params *param) +{ + wmi_unified_t wmi = (wmi_unified_t) wmi_hdl; + + if (wmi->ops->send_wow_add_wakeup_pattern_cmd) + return wmi->ops->send_wow_add_wakeup_pattern_cmd(wmi, param); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_wow_remove_wakeup_pattern_send() - WMI wow wakeup pattern function + * @param wmi_handle : handle to WMI. + * @param: pointer to wow wakeup pattern parameter structure + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_wow_remove_wakeup_pattern_send(void *wmi_hdl, + struct wow_remove_wakeup_pattern_params *param) +{ + wmi_unified_t wmi = (wmi_unified_t) wmi_hdl; + + if (wmi->ops->send_wow_remove_wakeup_pattern_cmd) + return wmi->ops->send_wow_remove_wakeup_pattern_cmd(wmi, param); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_ap_ps_cmd_send() - set ap powersave parameters + * @wma_ctx: wma context + * @peer_addr: peer mac address + * @param: pointer to ap_ps parameter structure + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_ap_ps_cmd_send(void *wmi_hdl, + uint8_t *peer_addr, + struct ap_ps_params *param) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_set_ap_ps_param_cmd) + return wmi_handle->ops->send_set_ap_ps_param_cmd(wmi_handle, + peer_addr, + param); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_sta_ps_cmd_send() - set sta powersave parameters + * @wma_ctx: wma context + * @peer_addr: peer mac address + * @param: pointer to sta_ps parameter structure + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_sta_ps_cmd_send(void *wmi_hdl, + struct sta_ps_params *param) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_set_sta_ps_param_cmd) + return wmi_handle->ops->send_set_sta_ps_param_cmd(wmi_handle, + param); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_crash_inject() - inject fw crash + * @wma_handle: wma handle + * @param: ponirt to crash inject parameter structure + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_crash_inject(void *wmi_hdl, + struct crash_inject *param) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_crash_inject_cmd) + return wmi_handle->ops->send_crash_inject_cmd(wmi_handle, + param); + + return QDF_STATUS_E_FAILURE; +} + +#ifdef FEATURE_FW_LOG_PARSING +/** + * wmi_unified_dbglog_cmd_send() - set debug log level + * @param wmi_handle : handle to WMI. + * @param param : pointer to hold dbglog level parameter + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_unified_dbglog_cmd_send(void *wmi_hdl, + struct dbglog_params *dbglog_param) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_dbglog_cmd) + return wmi_handle->ops->send_dbglog_cmd(wmi_handle, + dbglog_param); + + return QDF_STATUS_E_FAILURE; +} +qdf_export_symbol(wmi_unified_dbglog_cmd_send); +#endif + +/** + * wmi_unified_vdev_set_param_send() - WMI vdev set parameter function + * @param wmi_handle : handle to WMI. + * @param macaddr : MAC address + * @param param : pointer to hold vdev set parameter + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_vdev_set_param_send(void *wmi_hdl, + struct vdev_set_params *param) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_vdev_set_param_cmd) + return wmi_handle->ops->send_vdev_set_param_cmd(wmi_handle, + param); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_stats_request_send() - WMI request stats function + * @param wmi_handle : handle to WMI. + * @param macaddr : MAC address + * @param param : pointer to hold stats request parameter + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_stats_request_send(void *wmi_hdl, + uint8_t macaddr[IEEE80211_ADDR_LEN], + struct stats_request_params *param) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_stats_request_cmd) + return wmi_handle->ops->send_stats_request_cmd(wmi_handle, + macaddr, param); + + return QDF_STATUS_E_FAILURE; +} + +#ifdef CONFIG_MCL +/** + * wmi_unified_packet_log_enable_send() - WMI request stats function + * @param wmi_handle : handle to WMI. + * @param macaddr : MAC address + * @param param : pointer to hold stats request parameter + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_packet_log_enable_send(void *wmi_hdl, + uint8_t macaddr[IEEE80211_ADDR_LEN], + struct packet_enable_params *param) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_packet_log_enable_cmd) + return wmi_handle->ops->send_packet_log_enable_cmd(wmi_handle, + macaddr, param); + + return QDF_STATUS_E_FAILURE; +} +#else +/** + * wmi_unified_packet_log_enable_send() - WMI request stats function + * @param wmi_handle : handle to WMI. + * @param macaddr : MAC address + * @param param : pointer to hold stats request parameter + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_packet_log_enable_send(void *wmi_hdl, + WMI_HOST_PKTLOG_EVENT PKTLOG_EVENT, uint8_t mac_id) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_packet_log_enable_cmd) + return wmi_handle->ops->send_packet_log_enable_cmd(wmi_handle, + PKTLOG_EVENT, mac_id); + + return QDF_STATUS_E_FAILURE; +} + +#endif +/** + * wmi_unified_packet_log_disable__send() - WMI pktlog disable function + * @param wmi_handle : handle to WMI. + * @param PKTLOG_EVENT : packet log event + * @return QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_packet_log_disable_send(void *wmi_hdl, uint8_t mac_id) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_packet_log_disable_cmd) + return wmi_handle->ops->send_packet_log_disable_cmd(wmi_handle, + mac_id); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_beacon_send_cmd() - WMI beacon send function + * @param wmi_handle : handle to WMI. + * @param macaddr : MAC address + * @param param : pointer to hold beacon send cmd parameter + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_beacon_send_cmd(void *wmi_hdl, + struct beacon_params *param) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_beacon_send_cmd) + return wmi_handle->ops->send_beacon_send_cmd(wmi_handle, + param); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_beacon_tmpl_send_cmd() - WMI beacon send function + * @param wmi_handle : handle to WMI. + * @param macaddr : MAC address + * @param param : pointer to hold beacon send cmd parameter + * + * @return QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_beacon_tmpl_send_cmd(void *wmi_hdl, + struct beacon_tmpl_params *param) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_beacon_tmpl_send_cmd) + return wmi_handle->ops->send_beacon_tmpl_send_cmd(wmi_handle, + param); + + return QDF_STATUS_E_FAILURE; +} +/** + * wmi_unified_peer_assoc_send() - WMI peer assoc function + * @param wmi_handle : handle to WMI. + * @param macaddr : MAC address + * @param param : pointer to peer assoc parameter + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_peer_assoc_send(void *wmi_hdl, + struct peer_assoc_params *param) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_peer_assoc_cmd) + return wmi_handle->ops->send_peer_assoc_cmd(wmi_handle, + param); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_scan_start_cmd_send() - WMI scan start function + * @param wmi_handle : handle to WMI. + * @param macaddr : MAC address + * @param param : pointer to hold scan start cmd parameter + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_scan_start_cmd_send(void *wmi_hdl, + struct scan_req_params *param) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_scan_start_cmd) + return wmi_handle->ops->send_scan_start_cmd(wmi_handle, + param); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_scan_stop_cmd_send() - WMI scan start function + * @param wmi_handle : handle to WMI. + * @param macaddr : MAC address + * @param param : pointer to hold scan start cmd parameter + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_scan_stop_cmd_send(void *wmi_hdl, + struct scan_cancel_param *param) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_scan_stop_cmd) + return wmi_handle->ops->send_scan_stop_cmd(wmi_handle, + param); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_scan_chan_list_cmd_send() - WMI scan channel list function + * @param wmi_handle : handle to WMI. + * @param macaddr : MAC address + * @param param : pointer to hold scan channel list parameter + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_scan_chan_list_cmd_send(void *wmi_hdl, + struct scan_chan_list_params *param) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_scan_chan_list_cmd) + return wmi_handle->ops->send_scan_chan_list_cmd(wmi_handle, + param); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_mgmt_unified_cmd_send() - management cmd over wmi layer + * @wmi_hdl : handle to WMI. + * @param : pointer to hold mgmt cmd parameter + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_mgmt_unified_cmd_send(void *wmi_hdl, + struct wmi_mgmt_params *param) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_mgmt_cmd) + return wmi_handle->ops->send_mgmt_cmd(wmi_handle, + param); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_offchan_data_tx_cmd_send() - Send offchan data tx cmd over wmi layer + * @wmi_hdl : handle to WMI. + * @param : pointer to hold offchan data cmd parameter + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_offchan_data_tx_cmd_send(void *wmi_hdl, + struct wmi_offchan_data_tx_params *param) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_offchan_data_tx_cmd) + return wmi_handle->ops->send_offchan_data_tx_cmd(wmi_handle, + param); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_modem_power_state() - set modem power state to fw + * @wmi_hdl: wmi handle + * @param_value: parameter value + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_modem_power_state(void *wmi_hdl, + uint32_t param_value) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_modem_power_state_cmd) + return wmi_handle->ops->send_modem_power_state_cmd(wmi_handle, + param_value); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_set_sta_ps_mode() - set sta powersave params in fw + * @wmi_hdl: wmi handle + * @vdev_id: vdev id + * @val: value + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure. + */ +QDF_STATUS wmi_unified_set_sta_ps_mode(void *wmi_hdl, + uint32_t vdev_id, uint8_t val) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_set_sta_ps_mode_cmd) + return wmi_handle->ops->send_set_sta_ps_mode_cmd(wmi_handle, + vdev_id, val); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_set_mimops() - set MIMO powersave + * @wmi_hdl: wmi handle + * @vdev_id: vdev id + * @value: value + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure. + */ +QDF_STATUS wmi_unified_set_mimops(void *wmi_hdl, uint8_t vdev_id, int value) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_set_mimops_cmd) + return wmi_handle->ops->send_set_mimops_cmd(wmi_handle, + vdev_id, value); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_set_smps_params() - set smps params + * @wmi_hdl: wmi handle + * @vdev_id: vdev id + * @value: value + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure. + */ +QDF_STATUS wmi_unified_set_smps_params(void *wmi_hdl, uint8_t vdev_id, + int value) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_set_smps_params_cmd) + return wmi_handle->ops->send_set_smps_params_cmd(wmi_handle, + vdev_id, value); + + return QDF_STATUS_E_FAILURE; +} + + +/** + * wmi_set_p2pgo_oppps_req() - send p2p go opp power save request to fw + * @wmi_hdl: wmi handle + * @opps: p2p opp power save parameters + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_set_p2pgo_oppps_req(void *wmi_hdl, + struct p2p_ps_params *oppps) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_set_p2pgo_oppps_req_cmd) + return wmi_handle->ops->send_set_p2pgo_oppps_req_cmd(wmi_handle, + oppps); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_set_p2pgo_noa_req_cmd() - send p2p go noa request to fw + * @wmi_hdl: wmi handle + * @noa: p2p power save parameters + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_set_p2pgo_noa_req_cmd(void *wmi_hdl, + struct p2p_ps_params *noa) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_set_p2pgo_noa_req_cmd) + return wmi_handle->ops->send_set_p2pgo_noa_req_cmd(wmi_handle, + noa); + + return QDF_STATUS_E_FAILURE; +} + +#ifdef CONVERGED_P2P_ENABLE +/** + * wmi_unified_p2p_lo_start_cmd() - send p2p lo start request to fw + * @wmi_hdl: wmi handle + * @param: p2p listen offload start parameters + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_p2p_lo_start_cmd(void *wmi_hdl, + struct p2p_lo_start *param) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (!wmi_handle) { + WMI_LOGE("wmi handle is null"); + return QDF_STATUS_E_INVAL; + } + + if (wmi_handle->ops->send_p2p_lo_start_cmd) + return wmi_handle->ops->send_p2p_lo_start_cmd(wmi_handle, + param); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_p2p_lo_stop_cmd() - send p2p lo stop request to fw + * @wmi_hdl: wmi handle + * @vdev_id: vdev id + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_p2p_lo_stop_cmd(void *wmi_hdl, uint8_t vdev_id) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (!wmi_handle) { + WMI_LOGE("wmi handle is null"); + return QDF_STATUS_E_INVAL; + } + + if (wmi_handle->ops->send_p2p_lo_start_cmd) + return wmi_handle->ops->send_p2p_lo_stop_cmd(wmi_handle, + vdev_id); + + return QDF_STATUS_E_FAILURE; +} +#endif /* End of CONVERGED_P2P_ENABLE */ + +/** + * wmi_get_temperature() - get pdev temperature req + * @wmi_hdl: wmi handle + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure. + */ +QDF_STATUS wmi_unified_get_temperature(void *wmi_hdl) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_get_temperature_cmd) + return wmi_handle->ops->send_get_temperature_cmd(wmi_handle); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_set_sta_uapsd_auto_trig_cmd() - set uapsd auto trigger command + * @wmi_hdl: wmi handle + * @end_set_sta_ps_mode_cmd: cmd parameter strcture + * + * This function sets the trigger + * uapsd params such as service interval, delay interval + * and suspend interval which will be used by the firmware + * to send trigger frames periodically when there is no + * traffic on the transmit side. + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure. + */ +QDF_STATUS +wmi_unified_set_sta_uapsd_auto_trig_cmd(void *wmi_hdl, + struct sta_uapsd_trig_params *param) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_set_sta_uapsd_auto_trig_cmd) + return wmi_handle->ops->send_set_sta_uapsd_auto_trig_cmd(wmi_handle, + param); + + return QDF_STATUS_E_FAILURE; +} + +#ifdef WLAN_FEATURE_DSRC +QDF_STATUS wmi_unified_ocb_start_timing_advert(struct wmi_unified *wmi_hdl, + struct ocb_timing_advert_param *timing_advert) +{ + if (wmi_hdl->ops->send_ocb_start_timing_advert_cmd) + return wmi_hdl->ops->send_ocb_start_timing_advert_cmd(wmi_hdl, + timing_advert); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_ocb_stop_timing_advert(struct wmi_unified *wmi_hdl, + struct ocb_timing_advert_param *timing_advert) +{ + if (wmi_hdl->ops->send_ocb_stop_timing_advert_cmd) + return wmi_hdl->ops->send_ocb_stop_timing_advert_cmd(wmi_hdl, + timing_advert); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_ocb_set_utc_time_cmd(struct wmi_unified *wmi_hdl, + struct ocb_utc_param *utc) +{ + if (wmi_hdl->ops->send_ocb_set_utc_time_cmd) + return wmi_hdl->ops->send_ocb_set_utc_time_cmd(wmi_hdl, utc); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_ocb_get_tsf_timer(struct wmi_unified *wmi_hdl, + struct ocb_get_tsf_timer_param *req) +{ + if (wmi_hdl->ops->send_ocb_get_tsf_timer_cmd) + return wmi_hdl->ops->send_ocb_get_tsf_timer_cmd(wmi_hdl, + req->vdev_id); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_dcc_get_stats_cmd(struct wmi_unified *wmi_hdl, + struct ocb_dcc_get_stats_param *get_stats_param) +{ + if (wmi_hdl->ops->send_dcc_get_stats_cmd) + return wmi_hdl->ops->send_dcc_get_stats_cmd(wmi_hdl, + get_stats_param); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_dcc_clear_stats(struct wmi_unified *wmi_hdl, + struct ocb_dcc_clear_stats_param *clear_stats_param) +{ + if (wmi_hdl->ops->send_dcc_clear_stats_cmd) + return wmi_hdl->ops->send_dcc_clear_stats_cmd(wmi_hdl, + clear_stats_param->vdev_id, + clear_stats_param->dcc_stats_bitmap); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_dcc_update_ndl(struct wmi_unified *wmi_hdl, + struct ocb_dcc_update_ndl_param *update_ndl_param) +{ + if (wmi_hdl->ops->send_dcc_update_ndl_cmd) + return wmi_hdl->ops->send_dcc_update_ndl_cmd(wmi_hdl, + update_ndl_param); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_ocb_set_config(struct wmi_unified *wmi_hdl, + struct ocb_config *config) +{ + if (wmi_hdl->ops->send_ocb_set_config_cmd) + return wmi_hdl->ops->send_ocb_set_config_cmd(wmi_hdl, + config); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_extract_ocb_set_channel_config_resp(struct wmi_unified *wmi_hdl, + void *evt_buf, + uint32_t *status) +{ + if (wmi_hdl->ops->extract_ocb_chan_config_resp) + return wmi_hdl->ops->extract_ocb_chan_config_resp(wmi_hdl, + evt_buf, + status); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_extract_ocb_tsf_timer(struct wmi_unified *wmi_hdl, + void *evt_buf, + struct ocb_get_tsf_timer_response *resp) +{ + if (wmi_hdl->ops->extract_ocb_tsf_timer) + return wmi_hdl->ops->extract_ocb_tsf_timer(wmi_hdl, + evt_buf, + resp); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_extract_dcc_update_ndl_resp(struct wmi_unified *wmi_hdl, + void *evt_buf, struct ocb_dcc_update_ndl_response *resp) +{ + if (wmi_hdl->ops->extract_dcc_update_ndl_resp) + return wmi_hdl->ops->extract_dcc_update_ndl_resp(wmi_hdl, + evt_buf, + resp); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_extract_dcc_stats(struct wmi_unified *wmi_hdl, + void *evt_buf, + struct ocb_dcc_get_stats_response **resp) +{ + if (wmi_hdl->ops->extract_dcc_stats) + return wmi_hdl->ops->extract_dcc_stats(wmi_hdl, + evt_buf, + resp); + + return QDF_STATUS_E_FAILURE; +} +#endif + +/** + * wmi_unified_set_enable_disable_mcc_adaptive_scheduler_cmd() - control mcc scheduler + * @wmi_handle: wmi handle + * @mcc_adaptive_scheduler: enable/disable + * + * This function enable/disable mcc adaptive scheduler in fw. + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +QDF_STATUS wmi_unified_set_enable_disable_mcc_adaptive_scheduler_cmd( + void *wmi_hdl, uint32_t mcc_adaptive_scheduler, + uint32_t pdev_id) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_set_enable_disable_mcc_adaptive_scheduler_cmd) + return wmi_handle->ops->send_set_enable_disable_mcc_adaptive_scheduler_cmd(wmi_handle, + mcc_adaptive_scheduler, pdev_id); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_set_mcc_channel_time_latency_cmd() - set MCC channel time latency + * @wmi: wmi handle + * @mcc_channel: mcc channel + * @mcc_channel_time_latency: MCC channel time latency. + * + * Currently used to set time latency for an MCC vdev/adapter using operating + * channel of it and channel number. The info is provided run time using + * iwpriv command: iwpriv setMccLatency . + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_set_mcc_channel_time_latency_cmd(void *wmi_hdl, + uint32_t mcc_channel_freq, uint32_t mcc_channel_time_latency) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_set_mcc_channel_time_latency_cmd) + return wmi_handle->ops->send_set_mcc_channel_time_latency_cmd(wmi_handle, + mcc_channel_freq, + mcc_channel_time_latency); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_set_mcc_channel_time_quota_cmd() - set MCC channel time quota + * @wmi: wmi handle + * @adapter_1_chan_number: adapter 1 channel number + * @adapter_1_quota: adapter 1 quota + * @adapter_2_chan_number: adapter 2 channel number + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_set_mcc_channel_time_quota_cmd(void *wmi_hdl, + uint32_t adapter_1_chan_freq, + uint32_t adapter_1_quota, uint32_t adapter_2_chan_freq) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_set_mcc_channel_time_quota_cmd) + return wmi_handle->ops->send_set_mcc_channel_time_quota_cmd(wmi_handle, + adapter_1_chan_freq, + adapter_1_quota, + adapter_2_chan_freq); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_set_thermal_mgmt_cmd() - set thermal mgmt command to fw + * @wmi_handle: Pointer to wmi handle + * @thermal_info: Thermal command information + * + * This function sends the thermal management command + * to the firmware + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_set_thermal_mgmt_cmd(void *wmi_hdl, + struct thermal_cmd_params *thermal_info) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_set_thermal_mgmt_cmd) + return wmi_handle->ops->send_set_thermal_mgmt_cmd(wmi_handle, + thermal_info); + + return QDF_STATUS_E_FAILURE; +} + + +/** + * wmi_unified_lro_config_cmd() - process the LRO config command + * @wmi: Pointer to wmi handle + * @wmi_lro_cmd: Pointer to LRO configuration parameters + * + * This function sends down the LRO configuration parameters to + * the firmware to enable LRO, sets the TCP flags and sets the + * seed values for the toeplitz hash generation + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_lro_config_cmd(void *wmi_hdl, + struct wmi_lro_config_cmd_t *wmi_lro_cmd) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_lro_config_cmd) + return wmi_handle->ops->send_lro_config_cmd(wmi_handle, + wmi_lro_cmd); + + return QDF_STATUS_E_FAILURE; +} + +#ifdef CONFIG_MCL +/** + * wmi_unified_peer_rate_report_cmd() - process the peer rate report command + * @wmi_hdl: Pointer to wmi handle + * @rate_report_params: Pointer to peer rate report parameters + * + * + * Return: QDF_STATUS_SUCCESS for success otherwise failure + */ +QDF_STATUS wmi_unified_peer_rate_report_cmd(void *wmi_hdl, + struct wmi_peer_rate_report_params *rate_report_params) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_peer_rate_report_cmd) + return wmi_handle->ops->send_peer_rate_report_cmd(wmi_handle, + rate_report_params); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_bcn_buf_ll_cmd() - prepare and send beacon buffer to fw for LL + * @wmi_hdl: wmi handle + * @param: bcn ll cmd parameter + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_bcn_buf_ll_cmd(void *wmi_hdl, + wmi_bcn_send_from_host_cmd_fixed_param *param) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_bcn_buf_ll_cmd) + return wmi_handle->ops->send_bcn_buf_ll_cmd(wmi_handle, + param); + + return QDF_STATUS_E_FAILURE; +} +#endif + +/** + * wmi_unified_set_sta_sa_query_param_cmd() - set sta sa query parameters + * @wmi_hdl: wmi handle + * @vdev_id: vdev id + * @max_retries: max retries + * @retry_interval: retry interval + * This function sets sta query related parameters in fw. + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ + +QDF_STATUS wmi_unified_set_sta_sa_query_param_cmd(void *wmi_hdl, + uint8_t vdev_id, uint32_t max_retries, + uint32_t retry_interval) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_set_sta_sa_query_param_cmd) + return wmi_handle->ops->send_set_sta_sa_query_param_cmd(wmi_handle, + vdev_id, max_retries, + retry_interval); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_set_sta_keep_alive_cmd() - set sta keep alive parameters + * @wmi_hdl: wmi handle + * @params: sta keep alive parameter + * + * This function sets keep alive related parameters in fw. + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_set_sta_keep_alive_cmd(void *wmi_hdl, + struct sta_params *params) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_set_sta_keep_alive_cmd) + return wmi_handle->ops->send_set_sta_keep_alive_cmd(wmi_handle, + params); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_vdev_set_gtx_cfg_cmd() - set GTX params + * @wmi_hdl: wmi handle + * @if_id: vdev id + * @gtx_info: GTX config params + * + * This function set GTX related params in firmware. + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_vdev_set_gtx_cfg_cmd(void *wmi_hdl, uint32_t if_id, + struct wmi_gtx_config *gtx_info) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_vdev_set_gtx_cfg_cmd) + return wmi_handle->ops->send_vdev_set_gtx_cfg_cmd(wmi_handle, + if_id, gtx_info); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_process_update_edca_param() - update EDCA params + * @wmi_hdl: wmi handle + * @vdev_id: vdev id. + * @mu_edca_param: mu_edca_param. + * @wmm_vparams: edca parameters + * + * This function updates EDCA parameters to the target + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_process_update_edca_param(void *wmi_hdl, + uint8_t vdev_id, bool mu_edca_param, + struct wmi_host_wme_vparams wmm_vparams[WMI_MAX_NUM_AC]) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_process_update_edca_param_cmd) + return wmi_handle->ops->send_process_update_edca_param_cmd(wmi_handle, + vdev_id, mu_edca_param, wmm_vparams); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_probe_rsp_tmpl_send_cmd() - send probe response template to fw + * @wmi_hdl: wmi handle + * @vdev_id: vdev id + * @probe_rsp_info: probe response info + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_probe_rsp_tmpl_send_cmd(void *wmi_hdl, + uint8_t vdev_id, + struct wmi_probe_resp_params *probe_rsp_info) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_probe_rsp_tmpl_send_cmd) + return wmi_handle->ops->send_probe_rsp_tmpl_send_cmd(wmi_handle, + vdev_id, probe_rsp_info); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_setup_install_key_cmd - send key to install to fw + * @wmi_hdl: wmi handle + * @key_params: key parameters + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_setup_install_key_cmd(void *wmi_hdl, + struct set_key_params *key_params) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_setup_install_key_cmd) + return wmi_handle->ops->send_setup_install_key_cmd(wmi_handle, + key_params); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_p2p_go_set_beacon_ie_cmd() - set beacon IE for p2p go + * @wma_handle: wma handle + * @vdev_id: vdev id + * @p2p_ie: p2p IE + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_p2p_go_set_beacon_ie_cmd(void *wmi_hdl, + uint32_t vdev_id, uint8_t *p2p_ie) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_p2p_go_set_beacon_ie_cmd) + return wmi_handle->ops->send_p2p_go_set_beacon_ie_cmd(wmi_handle, + vdev_id, p2p_ie); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_set_gateway_params_cmd() - set gateway parameters + * @wmi_hdl: wmi handle + * @req: gateway parameter update request structure + * + * This function reads the incoming @req and fill in the destination + * WMI structure and sends down the gateway configs down to the firmware + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failures; + * error number otherwise + */ +QDF_STATUS wmi_unified_set_gateway_params_cmd(void *wmi_hdl, + struct gateway_update_req_param *req) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_set_gateway_params_cmd) + return wmi_handle->ops->send_set_gateway_params_cmd(wmi_handle, + req); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_set_rssi_monitoring_cmd() - set rssi monitoring + * @wmi_hdl: wmi handle + * @req: rssi monitoring request structure + * + * This function reads the incoming @req and fill in the destination + * WMI structure and send down the rssi monitoring configs down to the firmware + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failures; + * error number otherwise + */ +QDF_STATUS wmi_unified_set_rssi_monitoring_cmd(void *wmi_hdl, + struct rssi_monitor_param *req) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_set_rssi_monitoring_cmd) + return wmi_handle->ops->send_set_rssi_monitoring_cmd(wmi_handle, + req); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_scan_probe_setoui_cmd() - set scan probe OUI + * @wmi_hdl: wmi handle + * @psetoui: OUI parameters + * + * set scan probe OUI parameters in firmware + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_scan_probe_setoui_cmd(void *wmi_hdl, + struct scan_mac_oui *psetoui) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_scan_probe_setoui_cmd) + return wmi_handle->ops->send_scan_probe_setoui_cmd(wmi_handle, + psetoui); + + return QDF_STATUS_E_FAILURE; +} + +#ifdef CONFIG_MCL +/** + * wmi_unified_roam_scan_offload_mode_cmd() - set roam scan parameters + * @wmi_hdl: wmi handle + * @scan_cmd_fp: scan related parameters + * @roam_req: roam related parameters + * + * This function reads the incoming @roam_req and fill in the destination + * WMI structure and send down the roam scan configs down to the firmware + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_roam_scan_offload_mode_cmd(void *wmi_hdl, + wmi_start_scan_cmd_fixed_param *scan_cmd_fp, + struct roam_offload_scan_params *roam_req) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_roam_scan_offload_mode_cmd) + return wmi_handle->ops->send_roam_scan_offload_mode_cmd( + wmi_handle, scan_cmd_fp, roam_req); + + return QDF_STATUS_E_FAILURE; +} +#endif + +/** + * wmi_unified_roam_scan_offload_rssi_thresh_cmd() - set roam scan rssi + * parameters + * @wmi_hdl: wmi handle + * @roam_req: roam rssi related parameters + * + * This function reads the incoming @roam_req and fill in the destination + * WMI structure and send down the roam scan rssi configs down to the firmware + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_roam_scan_offload_rssi_thresh_cmd(void *wmi_hdl, + struct roam_offload_scan_rssi_params + *roam_req) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_roam_scan_offload_rssi_thresh_cmd) + return wmi_handle->ops->send_roam_scan_offload_rssi_thresh_cmd( + wmi_handle, roam_req); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_roam_mawc_params_cmd( + void *wmi_hdl, struct wmi_mawc_roam_params *params) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_roam_mawc_params_cmd) + return wmi_handle->ops->send_roam_mawc_params_cmd( + wmi_handle, params); + + return QDF_STATUS_E_FAILURE; +} +/** + * wmi_unified_roam_scan_filter_cmd() - send roam scan whitelist, + * blacklist and preferred list + * @wmi_hdl: wmi handle + * @roam_req: roam scan lists related parameters + * + * This function reads the incoming @roam_req and fill in the destination + * WMI structure and send down the different roam scan lists down to the fw + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_roam_scan_filter_cmd(void *wmi_hdl, + struct roam_scan_filter_params *roam_req) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_roam_scan_filter_cmd) + return wmi_handle->ops->send_roam_scan_filter_cmd( + wmi_handle, roam_req); + + return QDF_STATUS_E_FAILURE; +} + +#ifdef IPA_OFFLOAD +/** wmi_unified_ipa_offload_control_cmd() - ipa offload control parameter + * @wmi_hdl: wmi handle + * @ipa_offload: ipa offload control parameter + * + * Returns: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failures, + * error number otherwise + */ +QDF_STATUS wmi_unified_ipa_offload_control_cmd(void *wmi_hdl, + struct ipa_uc_offload_control_params *ipa_offload) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (!wmi_handle) + return QDF_STATUS_E_FAILURE; + + if (wmi_handle->ops->send_ipa_offload_control_cmd) + return wmi_handle->ops->send_ipa_offload_control_cmd(wmi_handle, + ipa_offload); + + return QDF_STATUS_E_FAILURE; +} +#endif + + +/** + * wmi_unified_plm_stop_cmd() - plm stop request + * @wmi_hdl: wmi handle + * @plm: plm request parameters + * + * This function request FW to stop PLM. + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_plm_stop_cmd(void *wmi_hdl, + const struct plm_req_params *plm) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_plm_stop_cmd) + return wmi_handle->ops->send_plm_stop_cmd(wmi_handle, + plm); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_plm_start_cmd() - plm start request + * @wmi_hdl: wmi handle + * @plm: plm request parameters + * + * This function request FW to start PLM. + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_plm_start_cmd(void *wmi_hdl, + const struct plm_req_params *plm, + uint32_t *gchannel_list) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_plm_start_cmd) + return wmi_handle->ops->send_plm_start_cmd(wmi_handle, + plm, gchannel_list); + + return QDF_STATUS_E_FAILURE; +} + +/** + * send_pno_stop_cmd() - PNO stop request + * @wmi_hdl: wmi handle + * @vdev_id: vdev id + * + * This function request FW to stop ongoing PNO operation. + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_pno_stop_cmd(void *wmi_hdl, uint8_t vdev_id) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_pno_stop_cmd) + return wmi_handle->ops->send_pno_stop_cmd(wmi_handle, + vdev_id); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_pno_start_cmd() - PNO start request + * @wmi_hdl: wmi handle + * @pno: PNO request + * + * This function request FW to start PNO request. + * Request: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +#ifdef FEATURE_WLAN_SCAN_PNO +QDF_STATUS wmi_unified_pno_start_cmd(void *wmi_hdl, + struct pno_scan_req_params *pno) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_pno_start_cmd) + return wmi_handle->ops->send_pno_start_cmd(wmi_handle, + pno); + + return QDF_STATUS_E_FAILURE; +} +#endif + +/** + * wmi_unified_nlo_mawc_cmd() - NLO MAWC cmd configuration + * @wmi_hdl: wmi handle + * @params: Configuration parameters + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_nlo_mawc_cmd(void *wmi_hdl, + struct nlo_mawc_params *params) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_nlo_mawc_cmd) + return wmi_handle->ops->send_nlo_mawc_cmd(wmi_handle, params); + + return QDF_STATUS_E_FAILURE; +} + +/* wmi_unified_set_ric_req_cmd() - set ric request element + * @wmi_hdl: wmi handle + * @msg: message + * @is_add_ts: is addts required + * + * This function sets ric request element for 11r roaming. + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_set_ric_req_cmd(void *wmi_hdl, void *msg, + uint8_t is_add_ts) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_set_ric_req_cmd) + return wmi_handle->ops->send_set_ric_req_cmd(wmi_handle, msg, + is_add_ts); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_process_ll_stats_clear_cmd() - clear link layer stats + * @wmi_hdl: wmi handle + * @clear_req: ll stats clear request command params + * @addr: mac address + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_process_ll_stats_clear_cmd(void *wmi_hdl, + const struct ll_stats_clear_params *clear_req, + uint8_t addr[IEEE80211_ADDR_LEN]) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_process_ll_stats_clear_cmd) + return wmi_handle->ops->send_process_ll_stats_clear_cmd(wmi_handle, + clear_req, addr); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_process_ll_stats_get_cmd() - link layer stats get request + * @wmi_hdl:wmi handle + * @get_req:ll stats get request command params + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_process_ll_stats_get_cmd(void *wmi_hdl, + const struct ll_stats_get_params *get_req, + uint8_t addr[IEEE80211_ADDR_LEN]) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_process_ll_stats_get_cmd) + return wmi_handle->ops->send_process_ll_stats_get_cmd(wmi_handle, + get_req, addr); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_congestion_request_cmd() - send request to fw to get CCA + * @wmi_hdl: wma handle + * @vdev_id: vdev id + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_congestion_request_cmd(void *wmi_hdl, + uint8_t vdev_id) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_congestion_cmd) + return wmi_handle->ops->send_congestion_cmd(wmi_handle, + vdev_id); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_process_ll_stats_set_cmd() - link layer stats set request + * @wmi_handle: wmi handle + * @set_req: ll stats set request command params + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_process_ll_stats_set_cmd(void *wmi_hdl, + const struct ll_stats_set_params *set_req) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_process_ll_stats_set_cmd) + return wmi_handle->ops->send_process_ll_stats_set_cmd(wmi_handle, + set_req); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_snr_request_cmd() - send request to fw to get RSSI stats + * @wmi_handle: wmi handle + * @rssi_req: get RSSI request + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_snr_request_cmd(void *wmi_hdl) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_snr_request_cmd) + return wmi_handle->ops->send_snr_request_cmd(wmi_handle); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_snr_cmd() - get RSSI from fw + * @wmi_handle: wmi handle + * @vdev_id: vdev id + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_snr_cmd(void *wmi_hdl, uint8_t vdev_id) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_snr_cmd) + return wmi_handle->ops->send_snr_cmd(wmi_handle, + vdev_id); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_link_status_req_cmd() - process link status request from UMAC + * @wmi_handle: wmi handle + * @link_status: get link params + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_link_status_req_cmd(void *wmi_hdl, + struct link_status_params *link_status) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_link_status_req_cmd) + return wmi_handle->ops->send_link_status_req_cmd(wmi_handle, + link_status); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_process_dhcp_ind() - process dhcp indication from SME + * @wmi_handle: wmi handle + * @ta_dhcp_ind: DHCP indication parameter + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +#ifdef CONFIG_MCL +QDF_STATUS wmi_unified_process_dhcp_ind(void *wmi_hdl, + wmi_peer_set_param_cmd_fixed_param *ta_dhcp_ind) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_process_dhcp_ind_cmd) + return wmi_handle->ops->send_process_dhcp_ind_cmd(wmi_handle, + ta_dhcp_ind); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_get_link_speed_cmd() -send command to get linkspeed + * @wmi_handle: wmi handle + * @pLinkSpeed: link speed info + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_get_link_speed_cmd(void *wmi_hdl, + wmi_mac_addr peer_macaddr) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_get_link_speed_cmd) + return wmi_handle->ops->send_get_link_speed_cmd(wmi_handle, + peer_macaddr); + + return QDF_STATUS_E_FAILURE; +} +#endif + +#ifdef WLAN_SUPPORT_GREEN_AP +/** + * wmi_unified_egap_conf_params_cmd() - send wmi cmd of egap configuration params + * @wmi_handle: wmi handler + * @egap_params: pointer to egap_params + * + * Return: 0 for success, otherwise appropriate error code + */ +QDF_STATUS wmi_unified_egap_conf_params_cmd(void *wmi_hdl, + struct wlan_green_ap_egap_params *egap_params) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_egap_conf_params_cmd) + return wmi_handle->ops->send_egap_conf_params_cmd(wmi_handle, + egap_params); + + return QDF_STATUS_E_FAILURE; +} +#endif + +/** + * wmi_unified_fw_profiling_data_cmd() - send FW profiling cmd to WLAN FW + * @wmi_handl: wmi handle + * @cmd: Profiling command index + * @value1: parameter1 value + * @value2: parameter2 value + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_fw_profiling_data_cmd(void *wmi_hdl, + uint32_t cmd, uint32_t value1, uint32_t value2) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_fw_profiling_cmd) + return wmi_handle->ops->send_fw_profiling_cmd(wmi_handle, + cmd, value1, value2); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_wow_timer_pattern_cmd() - set timer pattern tlv, so that firmware + * will wake up host after specified time is elapsed + * @wmi_handle: wmi handle + * @vdev_id: vdev id + * @cookie: value to identify reason why host set up wake call. + * @time: time in ms + * + * Return: QDF status + */ +QDF_STATUS wmi_unified_wow_timer_pattern_cmd(void *wmi_hdl, uint8_t vdev_id, + uint32_t cookie, uint32_t time) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_wow_timer_pattern_cmd) + return wmi_handle->ops->send_wow_timer_pattern_cmd(wmi_handle, + vdev_id, cookie, time); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_nat_keepalive_en_cmd() - enable NAT keepalive filter + * @wmi_handle: wmi handle + * @vdev_id: vdev id + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_nat_keepalive_en_cmd(void *wmi_hdl, uint8_t vdev_id) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_nat_keepalive_en_cmd) + return wmi_handle->ops->send_nat_keepalive_en_cmd(wmi_handle, + vdev_id); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_wlm_latency_level_cmd(void *wmi_hdl, + struct wlm_latency_level_param *param) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_wlm_latency_level_cmd) + return wmi_handle->ops->send_wlm_latency_level_cmd(wmi_handle, + param); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_csa_offload_enable() - send CSA offload enable command + * @wmi_hdl: wmi handle + * @vdev_id: vdev id + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_csa_offload_enable(void *wmi_hdl, uint8_t vdev_id) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_csa_offload_enable_cmd) + return wmi_handle->ops->send_csa_offload_enable_cmd(wmi_handle, + vdev_id); + + return QDF_STATUS_E_FAILURE; +} + +#ifdef WLAN_FEATURE_CIF_CFR +QDF_STATUS wmi_unified_oem_dma_ring_cfg(void *wmi_hdl, + wmi_oem_dma_ring_cfg_req_fixed_param *cfg) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_start_oem_data_cmd) + return wmi_handle->ops->send_oem_dma_cfg_cmd(wmi_handle, cfg); + + return QDF_STATUS_E_FAILURE; +} +#endif + +QDF_STATUS wmi_unified_dbr_ring_cfg(void *wmi_hdl, + struct direct_buf_rx_cfg_req *cfg) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_dbr_cfg_cmd) + return wmi_handle->ops->send_dbr_cfg_cmd(wmi_handle, cfg); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_start_oem_data_cmd() - start OEM data request to target + * @wmi_handle: wmi handle + * @startOemDataReq: start request params + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_start_oem_data_cmd(void *wmi_hdl, + uint32_t data_len, + uint8_t *data) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_start_oem_data_cmd) + return wmi_handle->ops->send_start_oem_data_cmd(wmi_handle, + data_len, data); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_dfs_phyerr_filter_offload_en_cmd() - enable dfs phyerr filter + * @wmi_handle: wmi handle + * @dfs_phyerr_filter_offload: is dfs phyerr filter offload + * + * Send WMI_DFS_PHYERR_FILTER_ENA_CMDID or + * WMI_DFS_PHYERR_FILTER_DIS_CMDID command + * to firmware based on phyerr filtering + * offload status. + * + * Return: 1 success, 0 failure + */ +QDF_STATUS +wmi_unified_dfs_phyerr_filter_offload_en_cmd(void *wmi_hdl, + bool dfs_phyerr_filter_offload) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_dfs_phyerr_filter_offload_en_cmd) + return wmi_handle->ops->send_dfs_phyerr_filter_offload_en_cmd(wmi_handle, + dfs_phyerr_filter_offload); + + return QDF_STATUS_E_FAILURE; +} + +#if !defined(REMOVE_PKT_LOG) +/** + * wmi_unified_pktlog_wmi_send_cmd() - send pktlog enable/disable command to target + * @wmi_handle: wmi handle + * @pktlog_event: pktlog event + * @cmd_id: pktlog cmd id + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +#ifdef CONFIG_MCL +QDF_STATUS wmi_unified_pktlog_wmi_send_cmd(void *wmi_hdl, + WMI_PKTLOG_EVENT pktlog_event, + uint32_t cmd_id, + uint8_t user_triggered) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_pktlog_wmi_send_cmd) + return wmi_handle->ops->send_pktlog_wmi_send_cmd(wmi_handle, + pktlog_event, cmd_id, user_triggered); + + return QDF_STATUS_E_FAILURE; +} +#endif +#endif /* REMOVE_PKT_LOG */ + +/** + * wmi_unified_wow_delete_pattern_cmd() - delete wow pattern in target + * @wmi_handle: wmi handle + * @ptrn_id: pattern id + * @vdev_id: vdev id + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_wow_delete_pattern_cmd(void *wmi_hdl, uint8_t ptrn_id, + uint8_t vdev_id) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_wow_delete_pattern_cmd) + return wmi_handle->ops->send_wow_delete_pattern_cmd(wmi_handle, + ptrn_id, vdev_id); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_host_wakeup_ind_to_fw_cmd() - send wakeup ind to fw + * @wmi_handle: wmi handle + * + * Sends host wakeup indication to FW. On receiving this indication, + * FW will come out of WOW. + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_host_wakeup_ind_to_fw_cmd(void *wmi_hdl) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_host_wakeup_ind_to_fw_cmd) + return wmi_handle->ops->send_host_wakeup_ind_to_fw_cmd(wmi_handle); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_del_ts_cmd() - send DELTS request to fw + * @wmi_handle: wmi handle + * @msg: delts params + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_del_ts_cmd(void *wmi_hdl, uint8_t vdev_id, + uint8_t ac) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_del_ts_cmd) + return wmi_handle->ops->send_del_ts_cmd(wmi_handle, + vdev_id, ac); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_aggr_qos_cmd() - send aggr qos request to fw + * @wmi_handle: handle to wmi + * @aggr_qos_rsp_msg - combined struct for all ADD_TS requests. + * + * A function to handle WMI_AGGR_QOS_REQ. This will send out + * ADD_TS requestes to firmware in loop for all the ACs with + * active flow. + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_aggr_qos_cmd(void *wmi_hdl, + struct aggr_add_ts_param *aggr_qos_rsp_msg) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_aggr_qos_cmd) + return wmi_handle->ops->send_aggr_qos_cmd(wmi_handle, + aggr_qos_rsp_msg); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_add_ts_cmd() - send ADDTS request to fw + * @wmi_handle: wmi handle + * @msg: ADDTS params + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_add_ts_cmd(void *wmi_hdl, + struct add_ts_param *msg) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_add_ts_cmd) + return wmi_handle->ops->send_add_ts_cmd(wmi_handle, + msg); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_process_add_periodic_tx_ptrn_cmd - add periodic tx ptrn + * @wmi_handle: wmi handle + * @pAddPeriodicTxPtrnParams: tx ptrn params + * + * Retrun: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_process_add_periodic_tx_ptrn_cmd(void *wmi_hdl, + struct periodic_tx_pattern * + pAddPeriodicTxPtrnParams, + uint8_t vdev_id) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_process_add_periodic_tx_ptrn_cmd) + return wmi_handle->ops->send_process_add_periodic_tx_ptrn_cmd(wmi_handle, + pAddPeriodicTxPtrnParams, + vdev_id); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_process_del_periodic_tx_ptrn_cmd - del periodic tx ptrn + * @wmi_handle: wmi handle + * @vdev_id: vdev id + * @pattern_id: pattern id + * + * Retrun: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_process_del_periodic_tx_ptrn_cmd(void *wmi_hdl, + uint8_t vdev_id, + uint8_t pattern_id) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_process_del_periodic_tx_ptrn_cmd) + return wmi_handle->ops->send_process_del_periodic_tx_ptrn_cmd(wmi_handle, + vdev_id, + pattern_id); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_stats_ext_req_cmd() - request ext stats from fw + * @wmi_handle: wmi handle + * @preq: stats ext params + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_stats_ext_req_cmd(void *wmi_hdl, + struct stats_ext_params *preq) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_stats_ext_req_cmd) + return wmi_handle->ops->send_stats_ext_req_cmd(wmi_handle, + preq); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_enable_ext_wow_cmd() - enable ext wow in fw + * @wmi_handle: wmi handle + * @params: ext wow params + * + * Return:QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_enable_ext_wow_cmd(void *wmi_hdl, + struct ext_wow_params *params) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_enable_ext_wow_cmd) + return wmi_handle->ops->send_enable_ext_wow_cmd(wmi_handle, + params); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_set_app_type2_params_in_fw_cmd() - set app type2 params in fw + * @wmi_handle: wmi handle + * @appType2Params: app type2 params + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_set_app_type2_params_in_fw_cmd(void *wmi_hdl, + struct app_type2_params *appType2Params) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_set_app_type2_params_in_fw_cmd) + return wmi_handle->ops->send_set_app_type2_params_in_fw_cmd(wmi_handle, + appType2Params); + + return QDF_STATUS_E_FAILURE; + +} + +/** + * wmi_unified_set_auto_shutdown_timer_cmd() - sets auto shutdown timer in firmware + * @wmi_handle: wmi handle + * @timer_val: auto shutdown timer value + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_set_auto_shutdown_timer_cmd(void *wmi_hdl, + uint32_t timer_val) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_set_auto_shutdown_timer_cmd) + return wmi_handle->ops->send_set_auto_shutdown_timer_cmd(wmi_handle, + timer_val); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_nan_req_cmd() - to send nan request to target + * @wmi_handle: wmi handle + * @nan_req: request data which will be non-null + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_nan_req_cmd(void *wmi_hdl, + struct nan_req_params *nan_req) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_nan_req_cmd) + return wmi_handle->ops->send_nan_req_cmd(wmi_handle, + nan_req); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_process_dhcpserver_offload_cmd() - enable DHCP server offload + * @wmi_handle: wmi handle + * @pDhcpSrvOffloadInfo: DHCP server offload info + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_process_dhcpserver_offload_cmd(void *wmi_hdl, + struct dhcp_offload_info_params *params) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_process_dhcpserver_offload_cmd) + return wmi_handle->ops->send_process_dhcpserver_offload_cmd(wmi_handle, + params); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_process_ch_avoid_update_cmd() - handles channel avoid update request + * @wmi_handle: wmi handle + * @ch_avoid_update_req: channel avoid update params + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_process_ch_avoid_update_cmd(void *wmi_hdl) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_process_ch_avoid_update_cmd) + return wmi_handle->ops->send_process_ch_avoid_update_cmd(wmi_handle); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_send_regdomain_info_to_fw_cmd() - send regdomain info to fw + * @wmi_handle: wmi handle + * @reg_dmn: reg domain + * @regdmn2G: 2G reg domain + * @regdmn5G: 5G reg domain + * @ctl2G: 2G test limit + * @ctl5G: 5G test limit + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_send_regdomain_info_to_fw_cmd(void *wmi_hdl, + uint32_t reg_dmn, uint16_t regdmn2G, + uint16_t regdmn5G, uint8_t ctl2G, + uint8_t ctl5G) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_regdomain_info_to_fw_cmd) + return wmi_handle->ops->send_regdomain_info_to_fw_cmd(wmi_handle, + reg_dmn, regdmn2G, + regdmn5G, ctl2G, + ctl5G); + + return QDF_STATUS_E_FAILURE; +} + + +/** + * wmi_unified_set_tdls_offchan_mode_cmd() - set tdls off channel mode + * @wmi_handle: wmi handle + * @chan_switch_params: Pointer to tdls channel switch parameter structure + * + * This function sets tdls off channel mode + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failures; + * Negative errno otherwise + */ +QDF_STATUS wmi_unified_set_tdls_offchan_mode_cmd(void *wmi_hdl, + struct tdls_channel_switch_params *chan_switch_params) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_set_tdls_offchan_mode_cmd) + return wmi_handle->ops->send_set_tdls_offchan_mode_cmd(wmi_handle, + chan_switch_params); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_update_fw_tdls_state_cmd() - send enable/disable tdls for a vdev + * @wmi_handle: wmi handle + * @pwmaTdlsparams: TDLS params + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_update_fw_tdls_state_cmd(void *wmi_hdl, + void *tdls_param, uint8_t tdls_state) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_update_fw_tdls_state_cmd) + return wmi_handle->ops->send_update_fw_tdls_state_cmd(wmi_handle, + tdls_param, tdls_state); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_update_tdls_peer_state_cmd() - update TDLS peer state + * @wmi_handle: wmi handle + * @peerStateParams: TDLS peer state params + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_update_tdls_peer_state_cmd(void *wmi_hdl, + struct tdls_peer_state_params *peerStateParams, + uint32_t *ch_mhz) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_update_tdls_peer_state_cmd) + return wmi_handle->ops->send_update_tdls_peer_state_cmd(wmi_handle, + peerStateParams, ch_mhz); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_process_set_ie_info_cmd() - Function to send IE info to firmware + * @wmi_handle: Pointer to WMi handle + * @ie_data: Pointer for ie data + * + * This function sends IE information to firmware + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + * + */ +QDF_STATUS wmi_unified_process_set_ie_info_cmd(void *wmi_hdl, + struct vdev_ie_info_param *ie_info) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_process_set_ie_info_cmd) + return wmi_handle->ops->send_process_set_ie_info_cmd(wmi_handle, + ie_info); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_save_fw_version_cmd() - save fw version + * @wmi_handle: pointer to wmi handle + * @res_cfg: resource config + * @num_mem_chunks: no of mem chunck + * @mem_chunk: pointer to mem chunck structure + * + * This function sends IE information to firmware + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + * + */ +QDF_STATUS wmi_unified_save_fw_version_cmd(void *wmi_hdl, + void *evt_buf) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->save_fw_version_cmd) + return wmi_handle->ops->save_fw_version_cmd(wmi_handle, + evt_buf); + + return QDF_STATUS_E_FAILURE; +} + +/** + * send_set_base_macaddr_indicate_cmd() - set base mac address in fw + * @wmi_hdl: wmi handle + * @custom_addr: base mac address + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_set_base_macaddr_indicate_cmd(void *wmi_hdl, + uint8_t *custom_addr) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_set_base_macaddr_indicate_cmd) + return wmi_handle->ops->send_set_base_macaddr_indicate_cmd(wmi_handle, + custom_addr); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_log_supported_evt_cmd() - Enable/Disable FW diag/log events + * @wmi_hdl: wmi handle + * @event: Event received from FW + * @len: Length of the event + * + * Enables the low frequency events and disables the high frequency + * events. Bit 17 indicates if the event if low/high frequency. + * 1 - high frequency, 0 - low frequency + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failures + */ +QDF_STATUS wmi_unified_log_supported_evt_cmd(void *wmi_hdl, + uint8_t *event, + uint32_t len) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_log_supported_evt_cmd) + return wmi_handle->ops->send_log_supported_evt_cmd(wmi_handle, + event, len); + + return QDF_STATUS_E_FAILURE; +} + +void wmi_send_time_stamp_sync_cmd_tlv(void *wmi_hdl) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + if (wmi_handle->ops->send_time_stamp_sync_cmd) + wmi_handle->ops->send_time_stamp_sync_cmd(wmi_handle); + +} +/** + * wmi_unified_enable_specific_fw_logs_cmd() - Start/Stop logging of diag log id + * @wmi_hdl: wmi handle + * @start_log: Start logging related parameters + * + * Send the command to the FW based on which specific logging of diag + * event/log id can be started/stopped + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_enable_specific_fw_logs_cmd(void *wmi_hdl, + struct wmi_wifi_start_log *start_log) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_enable_specific_fw_logs_cmd) + return wmi_handle->ops->send_enable_specific_fw_logs_cmd(wmi_handle, + start_log); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_flush_logs_to_fw_cmd() - Send log flush command to FW + * @wmi_hdl: WMI handle + * + * This function is used to send the flush command to the FW, + * that will flush the fw logs that are residue in the FW + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_flush_logs_to_fw_cmd(void *wmi_hdl) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_flush_logs_to_fw_cmd) + return wmi_handle->ops->send_flush_logs_to_fw_cmd(wmi_handle); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_pdev_set_pcl_cmd() - Send WMI_SOC_SET_PCL_CMDID to FW + * @wmi_hdl: wmi handle + * @msg: PCL structure containing the PCL and the number of channels + * + * WMI_SOC_SET_PCL_CMDID provides a Preferred Channel List (PCL) to the WLAN + * firmware. The DBS Manager is the consumer of this information in the WLAN + * firmware. The channel list will be used when a Virtual DEVice (VDEV) needs + * to migrate to a new channel without host driver involvement. An example of + * this behavior is Legacy Fast Roaming (LFR 3.0). Generally, the host will + * manage the channel selection without firmware involvement. + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_pdev_set_pcl_cmd(void *wmi_hdl, + struct wmi_pcl_chan_weights *msg) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_pdev_set_pcl_cmd) + return wmi_handle->ops->send_pdev_set_pcl_cmd(wmi_handle, msg); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_soc_set_hw_mode_cmd() - Send WMI_SOC_SET_HW_MODE_CMDID to FW + * @wmi_hdl: wmi handle + * @msg: Structure containing the following parameters + * + * - hw_mode_index: The HW_Mode field is a enumerated type that is selected + * from the HW_Mode table, which is returned in the WMI_SERVICE_READY_EVENTID. + * + * Provides notification to the WLAN firmware that host driver is requesting a + * HardWare (HW) Mode change. This command is needed to support iHelium in the + * configurations that include the Dual Band Simultaneous (DBS) feature. + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_soc_set_hw_mode_cmd(void *wmi_hdl, + uint32_t hw_mode_index) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_pdev_set_hw_mode_cmd) + return wmi_handle->ops->send_pdev_set_hw_mode_cmd(wmi_handle, + hw_mode_index); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_pdev_set_dual_mac_config_cmd() - Set dual mac config to FW + * @wmi_hdl: wmi handle + * @msg: Dual MAC config parameters + * + * Configures WLAN firmware with the dual MAC features + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failures. + */ +QDF_STATUS wmi_unified_pdev_set_dual_mac_config_cmd(void *wmi_hdl, + struct policy_mgr_dual_mac_config *msg) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_pdev_set_dual_mac_config_cmd) + return wmi_handle->ops->send_pdev_set_dual_mac_config_cmd(wmi_handle, + msg); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_set_led_flashing_cmd() - set led flashing in fw + * @wmi_hdl: wmi handle + * @flashing: flashing request + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_set_led_flashing_cmd(void *wmi_hdl, + struct flashing_req_params *flashing) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_set_led_flashing_cmd) + return wmi_handle->ops->send_set_led_flashing_cmd(wmi_handle, + flashing); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_app_type1_params_in_fw_cmd() - set app type1 params in fw + * @wmi_hdl: wmi handle + * @appType1Params: app type1 params + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_app_type1_params_in_fw_cmd(void *wmi_hdl, + struct app_type1_params *app_type1_params) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_app_type1_params_in_fw_cmd) + return wmi_handle->ops->send_app_type1_params_in_fw_cmd(wmi_handle, + app_type1_params); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_set_ssid_hotlist_cmd() - Handle an SSID hotlist set request + * @wmi_hdl: wmi handle + * @request: SSID hotlist set request + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_unified_set_ssid_hotlist_cmd(void *wmi_hdl, + struct ssid_hotlist_request_params *request) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_set_ssid_hotlist_cmd) + return wmi_handle->ops->send_set_ssid_hotlist_cmd(wmi_handle, + request); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_roam_synch_complete_cmd() - roam synch complete command to fw. + * @wmi_hdl: wmi handle + * @vdev_id: vdev id + * + * This function sends roam synch complete event to fw. + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_roam_synch_complete_cmd(void *wmi_hdl, + uint8_t vdev_id) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_process_roam_synch_complete_cmd) + return wmi_handle->ops->send_process_roam_synch_complete_cmd(wmi_handle, + vdev_id); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_fw_test_cmd() - send fw test command to fw. + * @wmi_hdl: wmi handle + * @wmi_fwtest: fw test command + * + * This function sends fw test command to fw. + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_fw_test_cmd(void *wmi_hdl, + struct set_fwtest_params *wmi_fwtest) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_fw_test_cmd) + return wmi_handle->ops->send_fw_test_cmd(wmi_handle, + wmi_fwtest); + + return QDF_STATUS_E_FAILURE; + +} + +/** + * wmi_unified_unit_test_cmd() - send unit test command to fw. + * @wmi_hdl: wmi handle + * @wmi_utest: unit test command + * + * This function send unit test command to fw. + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_unit_test_cmd(void *wmi_hdl, + struct wmi_unit_test_cmd *wmi_utest) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_unit_test_cmd) + return wmi_handle->ops->send_unit_test_cmd(wmi_handle, + wmi_utest); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified__roam_invoke_cmd() - send roam invoke command to fw. + * @wmi_hdl: wmi handle + * @roaminvoke: roam invoke command + * + * Send roam invoke command to fw for fastreassoc. + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_roam_invoke_cmd(void *wmi_hdl, + struct wmi_roam_invoke_cmd *roaminvoke, + uint32_t ch_hz) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_roam_invoke_cmd) + return wmi_handle->ops->send_roam_invoke_cmd(wmi_handle, + roaminvoke, ch_hz); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_roam_scan_offload_cmd() - set roam offload command + * @wmi_hdl: wmi handle + * @command: command + * @vdev_id: vdev id + * + * This function set roam offload command to fw. + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_roam_scan_offload_cmd(void *wmi_hdl, + uint32_t command, uint32_t vdev_id) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_roam_scan_offload_cmd) + return wmi_handle->ops->send_roam_scan_offload_cmd(wmi_handle, + command, vdev_id); + + return QDF_STATUS_E_FAILURE; +} +#ifdef CONFIG_MCL +/** + * wmi_unified_send_roam_scan_offload_ap_cmd() - set roam ap profile in fw + * @wmi_hdl: wmi handle + * @ap_profile: ap profile params + * + * Send WMI_ROAM_AP_PROFILE to firmware + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_send_roam_scan_offload_ap_cmd(void *wmi_hdl, + struct ap_profile_params *ap_profile) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_roam_scan_offload_ap_profile_cmd) + return wmi_handle->ops->send_roam_scan_offload_ap_profile_cmd( + wmi_handle, ap_profile); + + return QDF_STATUS_E_FAILURE; +} +#endif +/** + * wmi_unified_roam_scan_offload_scan_period() - set roam offload scan period + * @wmi_handle: wmi handle + * @scan_period: scan period + * @scan_age: scan age + * @vdev_id: vdev id + * + * Send WMI_ROAM_SCAN_PERIOD parameters to fw. + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_roam_scan_offload_scan_period(void *wmi_hdl, + uint32_t scan_period, + uint32_t scan_age, + uint32_t vdev_id) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_roam_scan_offload_scan_period_cmd) + return wmi_handle->ops->send_roam_scan_offload_scan_period_cmd(wmi_handle, + scan_period, scan_age, vdev_id); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_roam_scan_offload_chan_list_cmd() - set roam offload channel list + * @wmi_handle: wmi handle + * @chan_count: channel count + * @chan_list: channel list + * @list_type: list type + * @vdev_id: vdev id + * + * Set roam offload channel list. + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_roam_scan_offload_chan_list_cmd(void *wmi_hdl, + uint8_t chan_count, + uint32_t *chan_list, + uint8_t list_type, uint32_t vdev_id) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_roam_scan_offload_chan_list_cmd) + return wmi_handle->ops->send_roam_scan_offload_chan_list_cmd(wmi_handle, + chan_count, chan_list, + list_type, vdev_id); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_roam_scan_offload_rssi_change_cmd() - set roam offload RSSI th + * @wmi_hdl: wmi handle + * @rssi_change_thresh: RSSI Change threshold + * @bcn_rssi_weight: beacon RSSI weight + * @vdev_id: vdev id + * + * Send WMI_ROAM_SCAN_RSSI_CHANGE_THRESHOLD parameters to fw. + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_roam_scan_offload_rssi_change_cmd(void *wmi_hdl, + uint32_t vdev_id, + int32_t rssi_change_thresh, + uint32_t bcn_rssi_weight, + uint32_t hirssi_delay_btw_scans) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_roam_scan_offload_rssi_change_cmd) + return wmi_handle->ops->send_roam_scan_offload_rssi_change_cmd(wmi_handle, + vdev_id, rssi_change_thresh, + bcn_rssi_weight, hirssi_delay_btw_scans); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_set_per_roam_config(void *wmi_hdl, + struct wmi_per_roam_config_req *req_buf) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_per_roam_config_cmd) + return wmi_handle->ops->send_per_roam_config_cmd(wmi_handle, + req_buf); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_set_arp_stats_req() - set arp stats request + * @wmi_hdl: wmi handle + * @req_buf: pointer to set_arp_stats + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_set_arp_stats_req(void *wmi_hdl, + struct set_arp_stats *req_buf) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_set_arp_stats_req_cmd) + return wmi_handle->ops->send_set_arp_stats_req_cmd(wmi_handle, + req_buf); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_get_arp_stats_req() - get arp stats request + * @wmi_hdl: wmi handle + * @req_buf: pointer to get_arp_stats + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_get_arp_stats_req(void *wmi_hdl, + struct get_arp_stats *req_buf) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_get_arp_stats_req_cmd) + return wmi_handle->ops->send_get_arp_stats_req_cmd(wmi_handle, + req_buf); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_set_del_pmkid_cache(void *wmi_hdl, + struct wmi_unified_pmk_cache *req_buf) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_set_del_pmkid_cache_cmd) + return wmi_handle->ops->send_set_del_pmkid_cache_cmd(wmi_handle, + req_buf); + + return QDF_STATUS_E_FAILURE; +} + +#if defined(WLAN_FEATURE_FILS_SK) +QDF_STATUS wmi_unified_roam_send_hlp_cmd(void *wmi_hdl, + struct hlp_params *req_buf) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_roam_scan_hlp_cmd) + return wmi_handle->ops->send_roam_scan_hlp_cmd(wmi_handle, + req_buf); + + return QDF_STATUS_E_FAILURE; +} +#endif + +#ifdef FEATURE_WLAN_APF +QDF_STATUS +wmi_unified_set_active_apf_mode_cmd(wmi_unified_t wmi, uint8_t vdev_id, + enum wmi_host_active_apf_mode ucast_mode, + enum wmi_host_active_apf_mode + mcast_bcast_mode) +{ + if (wmi->ops->send_set_active_apf_mode_cmd) + return wmi->ops->send_set_active_apf_mode_cmd(wmi, vdev_id, + ucast_mode, + mcast_bcast_mode); + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_unified_send_apf_enable_cmd(wmi_unified_t wmi, + uint32_t vdev_id, bool enable) +{ + if (wmi->ops->send_apf_enable_cmd) + return wmi->ops->send_apf_enable_cmd(wmi, vdev_id, enable); + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_unified_send_apf_write_work_memory_cmd(wmi_unified_t wmi, + struct wmi_apf_write_memory_params + *write_params) +{ + if (wmi->ops->send_apf_write_work_memory_cmd) + return wmi->ops->send_apf_write_work_memory_cmd(wmi, + write_params); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_unified_send_apf_read_work_memory_cmd(wmi_unified_t wmi, + struct wmi_apf_read_memory_params + *read_params) +{ + if (wmi->ops->send_apf_read_work_memory_cmd) + return wmi->ops->send_apf_read_work_memory_cmd(wmi, + read_params); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_extract_apf_read_memory_resp_event(wmi_unified_t wmi, void *evt_buf, + struct wmi_apf_read_memory_resp_event_params + *read_mem_evt) +{ + if (wmi->ops->extract_apf_read_memory_resp_event) + return wmi->ops->extract_apf_read_memory_resp_event(wmi, + evt_buf, + read_mem_evt); + + return QDF_STATUS_E_FAILURE; +} +#endif /* FEATURE_WLAN_APF */ + +/** + * wmi_unified_pdev_get_tpc_config_cmd_send() - WMI get tpc config function + * @param wmi_handle : handle to WMI. + * @param param : tpc config param + * + * @return QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_pdev_get_tpc_config_cmd_send(void *wmi_hdl, + uint32_t param) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_pdev_get_tpc_config_cmd) + return wmi_handle->ops->send_pdev_get_tpc_config_cmd(wmi_handle, + param); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_set_bwf_cmd_send() - WMI set bwf function + * @param wmi_handle : handle to WMI. + * @param param : pointer to set bwf param + * + * @return QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_set_bwf_cmd_send(void *wmi_hdl, + struct set_bwf_params *param) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_set_bwf_cmd) + return wmi_handle->ops->send_set_bwf_cmd(wmi_handle, param); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_set_atf_cmd_send() - WMI set atf function + * @param wmi_handle : handle to WMI. + * @param param : pointer to set atf param + * + * @return QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_set_atf_cmd_send(void *wmi_hdl, + struct set_atf_params *param) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_set_atf_cmd) + return wmi_handle->ops->send_set_atf_cmd(wmi_handle, param); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_pdev_fips_cmd_send() - WMI pdev fips cmd function + * @param wmi_handle : handle to WMI. + * @param param : pointer to hold pdev fips param + * + * @return QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_pdev_fips_cmd_send(void *wmi_hdl, + struct fips_params *param) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_pdev_fips_cmd) + return wmi_handle->ops->send_pdev_fips_cmd(wmi_handle, param); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_wlan_profile_enable_cmd_send() - WMI wlan profile enable cmd function + * @param wmi_handle : handle to WMI. + * @param param : pointer to hold wlan profile param + * + * @return QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_wlan_profile_enable_cmd_send(void *wmi_hdl, + struct wlan_profile_params *param) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_wlan_profile_enable_cmd) + return wmi_handle->ops->send_wlan_profile_enable_cmd(wmi_handle, + param); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_wlan_profile_trigger_cmd_send() - WMI wlan profile trigger cmd function + * @param wmi_handle : handle to WMI. + * @param param : pointer to hold wlan profile param + * + * @return QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_wlan_profile_trigger_cmd_send(void *wmi_hdl, + struct wlan_profile_params *param) +{ + wmi_unified_t wmi = (wmi_unified_t) wmi_hdl; + + if (wmi->ops->send_wlan_profile_trigger_cmd) + return wmi->ops->send_wlan_profile_trigger_cmd(wmi, + param); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_set_chan_cmd_send() - WMI set channel cmd function + * @param wmi_handle : handle to WMI. + * @param param : pointer to hold channel param + * + * @return QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_set_chan_cmd_send(void *wmi_hdl, + struct channel_param *param) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_pdev_set_chan_cmd) + return wmi_handle->ops->send_pdev_set_chan_cmd(wmi_handle, + param); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_set_ht_ie_cmd_send() - WMI set channel cmd function + * @param wmi_handle : handle to WMI. + * @param param : pointer to hold channel param + * + * @return QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_set_ht_ie_cmd_send(void *wmi_hdl, + struct ht_ie_params *param) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_set_ht_ie_cmd) + return wmi_handle->ops->send_set_ht_ie_cmd(wmi_handle, param); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_set_vht_ie_cmd_send() - WMI set channel cmd function + * @param wmi_handle : handle to WMI. + * @param param : pointer to hold channel param + * + * @return QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_set_vht_ie_cmd_send(void *wmi_hdl, + struct vht_ie_params *param) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_set_vht_ie_cmd) + return wmi_handle->ops->send_set_vht_ie_cmd(wmi_handle, param); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_wmm_update_cmd_send() - WMI wmm update cmd function + * @param wmi_handle : handle to WMI. + * @param param : pointer to hold wmm param + * + * @return QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_wmm_update_cmd_send(void *wmi_hdl, + struct wmm_update_params *param) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_wmm_update_cmd) + return wmi_handle->ops->send_wmm_update_cmd(wmi_handle, param); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_set_ant_switch_tbl_cmd_send() - WMI ant switch tbl cmd function + * @param wmi_handle : handle to WMI. + * @param param : pointer to hold ant switch tbl param + * + * @return QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_set_ant_switch_tbl_cmd_send(void *wmi_hdl, + struct ant_switch_tbl_params *param) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_set_ant_switch_tbl_cmd) + return wmi_handle->ops->send_set_ant_switch_tbl_cmd(wmi_handle, + param); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_set_ratepwr_table_cmd_send() - WMI ratepwr table cmd function + * @param wmi_handle : handle to WMI. + * @param param : pointer to hold ratepwr table param + * + * @return QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_set_ratepwr_table_cmd_send(void *wmi_hdl, + struct ratepwr_table_params *param) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_set_ratepwr_table_cmd) + return wmi_handle->ops->send_set_ratepwr_table_cmd(wmi_handle, + param); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_get_ratepwr_table_cmd_send() - WMI ratepwr table cmd function + * @param wmi_handle : handle to WMI. + * + * @return QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_get_ratepwr_table_cmd_send(void *wmi_hdl) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_get_ratepwr_table_cmd) + return wmi_handle->ops->send_get_ratepwr_table_cmd(wmi_handle); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_set_ctl_table_cmd_send() - WMI ctl table cmd function + * @param wmi_handle : handle to WMI. + * @param param : pointer to hold ctl table param + * + * @return QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_set_ctl_table_cmd_send(void *wmi_hdl, + struct ctl_table_params *param) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_set_ctl_table_cmd) + return wmi_handle->ops->send_set_ctl_table_cmd(wmi_handle, + param); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_set_mimogain_table_cmd_send() - WMI set mimogain cmd function + * @param wmi_handle : handle to WMI. + * @param param : pointer to hold mimogain param + * + * @return QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_set_mimogain_table_cmd_send(void *wmi_hdl, + struct mimogain_table_params *param) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_set_mimogain_table_cmd) + return wmi_handle->ops->send_set_mimogain_table_cmd(wmi_handle, + param); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_set_ratepwr_chainmsk_cmd_send() - WMI ratepwr + * chainmsk cmd function + * @param wmi_handle : handle to WMI. + * @param param : pointer to hold ratepwr chainmsk param + * + * @return QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_set_ratepwr_chainmsk_cmd_send(void *wmi_hdl, + struct ratepwr_chainmsk_params *param) +{ + wmi_unified_t wmi = (wmi_unified_t) wmi_hdl; + + if (wmi->ops->send_set_ratepwr_chainmsk_cmd) + return wmi->ops->send_set_ratepwr_chainmsk_cmd(wmi, param); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_set_macaddr_cmd_send() - WMI set macaddr cmd function + * @param wmi_handle : handle to WMI. + * @param param : pointer to hold macaddr param + * + * @return QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_set_macaddr_cmd_send(void *wmi_hdl, + struct macaddr_params *param) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_set_macaddr_cmd) + return wmi_handle->ops->send_set_macaddr_cmd(wmi_handle, param); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_pdev_scan_start_cmd_send() - WMI pdev scan start cmd function + * @param wmi_handle : handle to WMI. + * + * @return QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_pdev_scan_start_cmd_send(void *wmi_hdl) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_pdev_scan_start_cmd) + return wmi_handle->ops->send_pdev_scan_start_cmd(wmi_handle); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_pdev_scan_end_cmd_send() - WMI pdev scan end cmd function + * @param wmi_handle : handle to WMI. + * + * @return QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_pdev_scan_end_cmd_send(void *wmi_hdl) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_pdev_scan_end_cmd) + return wmi_handle->ops->send_pdev_scan_end_cmd(wmi_handle); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_set_acparams_cmd_send() - WMI set acparams cmd function + * @param wmi_handle : handle to WMI. + * @param param : pointer to hold acparams param + * + * @return QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_set_acparams_cmd_send(void *wmi_hdl, + struct acparams_params *param) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_set_acparams_cmd) + return wmi_handle->ops->send_set_acparams_cmd(wmi_handle, + param); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_set_vap_dscp_tid_map_cmd_send() - WMI set vap dscp + * tid map cmd function + * @param wmi_handle : handle to WMI. + * @param param : pointer to hold dscp param + * + * @return QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_set_vap_dscp_tid_map_cmd_send(void *wmi_hdl, + struct vap_dscp_tid_map_params *param) +{ + wmi_unified_t wmi = (wmi_unified_t) wmi_hdl; + + if (wmi->ops->send_set_vap_dscp_tid_map_cmd) + return wmi->ops->send_set_vap_dscp_tid_map_cmd(wmi, param); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_proxy_ast_reserve_cmd_send() - WMI proxy ast + * reserve cmd function + * @param wmi_handle : handle to WMI. + * @param param : pointer to hold ast param + * + * @return QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_proxy_ast_reserve_cmd_send(void *wmi_hdl, + struct proxy_ast_reserve_params *param) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_proxy_ast_reserve_cmd) + return wmi_handle->ops->send_proxy_ast_reserve_cmd(wmi_handle, + param); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_pdev_qvit_cmd_send() - WMI pdev qvit cmd function + * @param wmi_handle : handle to WMI. + * @param param : pointer to hold qvit param + * + * @return QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_pdev_qvit_cmd_send(void *wmi_hdl, + struct pdev_qvit_params *param) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_pdev_qvit_cmd) + return wmi_handle->ops->send_pdev_qvit_cmd(wmi_handle, param); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_mcast_group_update_cmd_send() - WMI mcast grp update cmd function + * @param wmi_handle : handle to WMI. + * @param param : pointer to hold mcast grp param + * + * @return QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_mcast_group_update_cmd_send(void *wmi_hdl, + struct mcast_group_update_params *param) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_mcast_group_update_cmd) + return wmi_handle->ops->send_mcast_group_update_cmd(wmi_handle, + param); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_peer_add_wds_entry_cmd_send() - WMI add wds entry cmd function + * @param wmi_handle : handle to WMI. + * @param param : pointer to hold wds entry param + * + * @return QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_peer_add_wds_entry_cmd_send(void *wmi_hdl, + struct peer_add_wds_entry_params *param) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_peer_add_wds_entry_cmd) + return wmi_handle->ops->send_peer_add_wds_entry_cmd(wmi_handle, + param); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_peer_del_wds_entry_cmd_send() - WMI del wds entry cmd function + * @param wmi_handle : handle to WMI. + * @param param : pointer to hold wds entry param + * + * @return QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_peer_del_wds_entry_cmd_send(void *wmi_hdl, + struct peer_del_wds_entry_params *param) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_peer_del_wds_entry_cmd) + return wmi_handle->ops->send_peer_del_wds_entry_cmd(wmi_handle, + param); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_set_bridge_mac_addr_cmd_send() - WMI set bridge mac addr cmd function + * @param wmi_hdl : handle to WMI. + * @param param : pointer to hold bridge mac addr param + * + * @return QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_set_bridge_mac_addr_cmd_send(void *wmi_hdl, + struct set_bridge_mac_addr_params *param) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_set_bridge_mac_addr_cmd) + return wmi_handle->ops->send_set_bridge_mac_addr_cmd(wmi_handle, + param); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_peer_update_wds_entry_cmd_send() - WMI update wds entry cmd function + * @param wmi_handle : handle to WMI. + * @param param : pointer to hold wds entry param + * + * @return QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_peer_update_wds_entry_cmd_send(void *wmi_hdl, + struct peer_update_wds_entry_params *param) +{ + wmi_unified_t wmi = (wmi_unified_t) wmi_hdl; + + if (wmi->ops->send_peer_update_wds_entry_cmd) + return wmi->ops->send_peer_update_wds_entry_cmd(wmi, param); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_phyerr_enable_cmd_send() - WMI phyerr enable cmd function + * @param wmi_handle : handle to WMI. + * @param param : pointer to hold phyerr enable param + * + * @return QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_phyerr_enable_cmd_send(void *wmi_hdl) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_phyerr_enable_cmd) + return wmi_handle->ops->send_phyerr_enable_cmd(wmi_handle); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_phyerr_disable_cmd_send() - WMI phyerr disable cmd function + * @param wmi_handle : handle to WMI. + * @param param : pointer to hold phyerr disable param + * + * @return QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_phyerr_disable_cmd_send(void *wmi_hdl) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_phyerr_disable_cmd) + return wmi_handle->ops->send_phyerr_disable_cmd(wmi_handle); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_smart_ant_enable_cmd_send() - WMI smart ant enable function + * @param wmi_handle : handle to WMI. + * @param param : pointer to hold antenna param + * + * @return QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_smart_ant_enable_cmd_send(void *wmi_hdl, + struct smart_ant_enable_params *param) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_smart_ant_enable_cmd) + return wmi_handle->ops->send_smart_ant_enable_cmd(wmi_handle, + param); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_smart_ant_set_rx_ant_cmd_send() - WMI set rx antenna function + * @param wmi_handle : handle to WMI. + * @param param : pointer to hold antenna param + * + * @return QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_smart_ant_set_rx_ant_cmd_send(void *wmi_hdl, + struct smart_ant_rx_ant_params *param) +{ + wmi_unified_t wmi = (wmi_unified_t) wmi_hdl; + + if (wmi->ops->send_smart_ant_set_rx_ant_cmd) + return wmi->ops->send_smart_ant_set_rx_ant_cmd(wmi, param); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_smart_ant_set_tx_ant_cmd_send() - WMI set tx antenna function + * @param wmi_handle : handle to WMI. + * @param param : pointer to hold antenna param + * + * @return QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_smart_ant_set_tx_ant_cmd_send(void *wmi_hdl, + uint8_t macaddr[IEEE80211_ADDR_LEN], + struct smart_ant_tx_ant_params *param) +{ + wmi_unified_t wmi = (wmi_unified_t) wmi_hdl; + + if (wmi->ops->send_smart_ant_set_tx_ant_cmd) + return wmi->ops->send_smart_ant_set_tx_ant_cmd(wmi, macaddr, + param); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_smart_ant_set_training_info_cmd_send() - WMI set tx antenna function + * @param wmi_handle : handle to WMI. + * @param param : pointer to hold antenna param + * + * @return QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_smart_ant_set_training_info_cmd_send(void *wmi_hdl, + uint8_t macaddr[IEEE80211_ADDR_LEN], + struct smart_ant_training_info_params *param) +{ + wmi_unified_t wmi = (wmi_unified_t) wmi_hdl; + + if (wmi->ops->send_smart_ant_set_training_info_cmd) + return wmi->ops->send_smart_ant_set_training_info_cmd(wmi, + macaddr, param); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_smart_ant_node_config_cmd_send() - WMI set node config function + * @param wmi_handle : handle to WMI. + * @param macaddr : MAC address + * @param param : pointer to hold node parameter + * + * @return QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_smart_ant_node_config_cmd_send(void *wmi_hdl, + uint8_t macaddr[IEEE80211_ADDR_LEN], + struct smart_ant_node_config_params *param) +{ + wmi_unified_t wmi = (wmi_unified_t) wmi_hdl; + + if (wmi->ops->send_smart_ant_set_node_config_cmd) + return wmi->ops->send_smart_ant_set_node_config_cmd(wmi, + macaddr, param); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_smart_ant_enable_tx_feedback_cmd_send() - WMI set tx antenna function + * @param wmi_handle : handle to WMI. + * @param param : pointer to hold antenna param + * + * @return QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_smart_ant_enable_tx_feedback_cmd_send(void *wmi_hdl, + struct smart_ant_enable_tx_feedback_params *param) +{ + wmi_unified_t wmi = (wmi_unified_t) wmi_hdl; + + if (wmi->ops->send_smart_ant_enable_tx_feedback_cmd) + return wmi->ops->send_smart_ant_enable_tx_feedback_cmd(wmi, + param); + + return QDF_STATUS_E_FAILURE; +} +qdf_export_symbol(wmi_unified_smart_ant_enable_tx_feedback_cmd_send); + +/** + * wmi_unified_vdev_spectral_configure_cmd_send() - WMI set spectral config function + * @param wmi_handle : handle to WMI. + * @param param : pointer to hold spectral config param + * + * @return QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_vdev_spectral_configure_cmd_send(void *wmi_hdl, + struct vdev_spectral_configure_params *param) +{ + wmi_unified_t wmi = (wmi_unified_t) wmi_hdl; + + if (wmi->ops->send_vdev_spectral_configure_cmd) + return wmi->ops->send_vdev_spectral_configure_cmd(wmi, param); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_vdev_spectral_enable_cmd_send() - WMI enable spectral function + * @param wmi_handle : handle to WMI. + * @param param : pointer to hold enable spectral param + * + * @return QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_vdev_spectral_enable_cmd_send(void *wmi_hdl, + struct vdev_spectral_enable_params *param) +{ + wmi_unified_t wmi = (wmi_unified_t) wmi_hdl; + + if (wmi->ops->send_vdev_spectral_enable_cmd) + return wmi->ops->send_vdev_spectral_enable_cmd(wmi, param); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_bss_chan_info_request_cmd_send() - WMI bss chan info request function + * @param wmi_handle : handle to WMI. + * @param param : pointer to hold chan info param + * + * @return QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_bss_chan_info_request_cmd_send(void *wmi_hdl, + struct bss_chan_info_request_params *param) +{ + wmi_unified_t wmi = (wmi_unified_t) wmi_hdl; + + if (wmi->ops->send_bss_chan_info_request_cmd) + return wmi->ops->send_bss_chan_info_request_cmd(wmi, param); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_thermal_mitigation_param_cmd_send() - WMI thermal mitigation function + * @param wmi_handle : handle to WMI. + * @param param : pointer to hold thermal mitigation param + * + * @return QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_thermal_mitigation_param_cmd_send(void *wmi_hdl, + struct thermal_mitigation_params *param) +{ + wmi_unified_t wmi = (wmi_unified_t) wmi_hdl; + + if (wmi->ops->send_thermal_mitigation_param_cmd) + return wmi->ops->send_thermal_mitigation_param_cmd(wmi, param); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_vdev_set_neighbour_rx_cmd_send() - WMI set neighbour rx function + * @param wmi_handle : handle to WMI. + * @param macaddr : MAC address + * @param param : pointer to hold neighbour rx parameter + * + * @return QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_vdev_set_neighbour_rx_cmd_send(void *wmi_hdl, + uint8_t macaddr[IEEE80211_ADDR_LEN], + struct set_neighbour_rx_params *param) +{ + wmi_unified_t wmi = (wmi_unified_t) wmi_hdl; + + if (wmi->ops->send_vdev_set_neighbour_rx_cmd) + return wmi->ops->send_vdev_set_neighbour_rx_cmd(wmi, + macaddr, param); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_vdev_set_fwtest_param_cmd_send() - WMI set fwtest function + * @param wmi_handle : handle to WMI. + * @param param : pointer to hold fwtest param + * + * @return QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_vdev_set_fwtest_param_cmd_send(void *wmi_hdl, + struct set_fwtest_params *param) +{ + wmi_unified_t wmi = (wmi_unified_t) wmi_hdl; + + if (wmi->ops->send_vdev_set_fwtest_param_cmd) + return wmi->ops->send_vdev_set_fwtest_param_cmd(wmi, param); + + return QDF_STATUS_E_FAILURE; +} + +#ifdef WLAN_SUPPORT_FILS +QDF_STATUS +wmi_unified_fils_discovery_send_cmd(void *wmi_hdl, struct fd_params *param) +{ + wmi_unified_t wmi_handle = (wmi_unified_t)wmi_hdl; + + if (wmi_handle->ops->send_fils_discovery_send_cmd) + return wmi_handle->ops->send_fils_discovery_send_cmd(wmi_handle, + param); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_unified_fils_vdev_config_send_cmd(void *wmi_hdl, + struct config_fils_params *param) +{ + wmi_unified_t wmi = (wmi_unified_t)wmi_hdl; + + if (wmi->ops->send_vdev_fils_enable_cmd) + return wmi->ops->send_vdev_fils_enable_cmd(wmi, param); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_extract_swfda_vdev_id(void *wmi_hdl, void *evt_buf, + uint32_t *vdev_id) +{ + wmi_unified_t wmi_handle = (wmi_unified_t)wmi_hdl; + + if (wmi_handle->ops->extract_swfda_vdev_id) + return wmi_handle->ops->extract_swfda_vdev_id(wmi_handle, + evt_buf, vdev_id); + + return QDF_STATUS_E_FAILURE; +} +#endif /* WLAN_SUPPORT_FILS */ + +/** + * wmi_unified_vdev_config_ratemask_cmd_send() - WMI config ratemask function + * @param wmi_handle : handle to WMI. + * @param param : pointer to hold config ratemask param + * + * @return QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_vdev_config_ratemask_cmd_send(void *wmi_hdl, + struct config_ratemask_params *param) +{ + wmi_unified_t wmi = (wmi_unified_t) wmi_hdl; + + if (wmi->ops->send_vdev_config_ratemask_cmd) + return wmi->ops->send_vdev_config_ratemask_cmd(wmi, param); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_vdev_set_custom_aggr_size_cmd_send() - WMI set custom aggr + * size function + * @param wmi_handle : handle to WMI + * @param param : pointer to hold custom aggr size param + * + * @return QDF_STATUS_SUCCESS on success and QDF_STATUS_R_FAILURE for failure + */ +QDF_STATUS wmi_unified_vdev_set_custom_aggr_size_cmd_send(void *wmi_hdl, + struct set_custom_aggr_size_params *param) +{ + wmi_unified_t wmi = (wmi_unified_t)wmi_hdl; + + if (wmi->ops->send_vdev_set_custom_aggr_size_cmd) + return wmi->ops->send_vdev_set_custom_aggr_size_cmd(wmi, param); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_vdev_set_qdepth_thresh_cmd_send() - WMI set qdepth threshold + * @param wmi_handle : handle to WMI + * @param param : pointer to hold qdepth threshold params + * + * @return QDF_STATUS_SUCCESS on success and QDF_STATUS_R_FAILURE for failure + */ +QDF_STATUS wmi_unified_vdev_set_qdepth_thresh_cmd_send(void *wmi_hdl, + struct set_qdepth_thresh_params *param) +{ + wmi_unified_t wmi = (wmi_unified_t)wmi_hdl; + + if (wmi->ops->send_vdev_set_qdepth_thresh_cmd) + return wmi->ops->send_vdev_set_qdepth_thresh_cmd(wmi, param); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_pdev_set_regdomain_params_cmd_send() - WMI set regdomain function + * @param wmi_handle : handle to WMI. + * @param param : pointer to hold regdomain param + * + * @return QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_pdev_set_regdomain_cmd_send(void *wmi_hdl, + struct pdev_set_regdomain_params *param) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_pdev_set_regdomain_cmd) + return wmi_handle->ops->send_pdev_set_regdomain_cmd(wmi_handle, + param); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_set_quiet_mode_cmd_send() - WMI set quiet mode function + * @param wmi_handle : handle to WMI. + * @param param : pointer to hold quiet mode param + * + * @return QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_set_quiet_mode_cmd_send(void *wmi_hdl, + struct set_quiet_mode_params *param) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_set_quiet_mode_cmd) + return wmi_handle->ops->send_set_quiet_mode_cmd(wmi_handle, + param); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_set_beacon_filter_cmd_send() - WMI set beacon filter function + * @param wmi_handle : handle to WMI. + * @param param : pointer to hold beacon filter param + * + * @return QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_set_beacon_filter_cmd_send(void *wmi_hdl, + struct set_beacon_filter_params *param) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_set_beacon_filter_cmd) + return wmi_handle->ops->send_set_beacon_filter_cmd(wmi_handle, + param); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_remove_beacon_filter_cmd_send() - WMI set beacon filter function + * @param wmi_handle : handle to WMI. + * @param param : pointer to hold beacon filter param + * + * @return QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_remove_beacon_filter_cmd_send(void *wmi_hdl, + struct remove_beacon_filter_params *param) +{ + wmi_unified_t wmi = (wmi_unified_t) wmi_hdl; + + if (wmi->ops->send_remove_beacon_filter_cmd) + return wmi->ops->send_remove_beacon_filter_cmd(wmi, param); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_mgmt_cmd_send() - WMI mgmt cmd function + * @param wmi_handle : handle to WMI. + * @param macaddr : MAC address + * @param param : pointer to hold mgmt parameter + * + * @return QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +#if 0 +QDF_STATUS wmi_unified_mgmt_cmd_send(void *wmi_hdl, + uint8_t macaddr[IEEE80211_ADDR_LEN], + struct mgmt_params *param) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_mgmt_cmd) + return wmi_handle->ops->send_mgmt_cmd(wmi_handle, + macaddr, param); + + return QDF_STATUS_E_FAILURE; +} +#endif + +/** + * wmi_unified_addba_clearresponse_cmd_send() - WMI addba resp cmd function + * @param wmi_handle : handle to WMI. + * @param macaddr : MAC address + * @param param : pointer to hold addba resp parameter + * + * @return QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_addba_clearresponse_cmd_send(void *wmi_hdl, + uint8_t macaddr[IEEE80211_ADDR_LEN], + struct addba_clearresponse_params *param) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_addba_clearresponse_cmd) + return wmi_handle->ops->send_addba_clearresponse_cmd(wmi_handle, + macaddr, param); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_addba_send_cmd_send() - WMI addba send function + * @param wmi_handle : handle to WMI. + * @param macaddr : MAC address + * @param param : pointer to hold addba parameter + * + * @return QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_addba_send_cmd_send(void *wmi_hdl, + uint8_t macaddr[IEEE80211_ADDR_LEN], + struct addba_send_params *param) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_addba_send_cmd) + return wmi_handle->ops->send_addba_send_cmd(wmi_handle, + macaddr, param); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_delba_send_cmd_send() - WMI delba cmd function + * @param wmi_handle : handle to WMI. + * @param macaddr : MAC address + * @param param : pointer to hold delba parameter + * + * @return QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_delba_send_cmd_send(void *wmi_hdl, + uint8_t macaddr[IEEE80211_ADDR_LEN], + struct delba_send_params *param) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_delba_send_cmd) + return wmi_handle->ops->send_delba_send_cmd(wmi_handle, + macaddr, param); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_addba_setresponse_cmd_send() - WMI addba set resp cmd function + * @param wmi_handle : handle to WMI. + * @param macaddr : MAC address + * @param param : pointer to hold addba set resp parameter + * + * @return QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_addba_setresponse_cmd_send(void *wmi_hdl, + uint8_t macaddr[IEEE80211_ADDR_LEN], + struct addba_setresponse_params *param) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_addba_setresponse_cmd) + return wmi_handle->ops->send_addba_setresponse_cmd(wmi_handle, + macaddr, param); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_singleamsdu_cmd_send() - WMI singleamsdu function + * @param wmi_handle : handle to WMI. + * @param macaddr : MAC address + * @param param : pointer to hold singleamsdu parameter + * + * @return QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_singleamsdu_cmd_send(void *wmi_hdl, + uint8_t macaddr[IEEE80211_ADDR_LEN], + struct singleamsdu_params *param) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_singleamsdu_cmd) + return wmi_handle->ops->send_singleamsdu_cmd(wmi_handle, + macaddr, param); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_set_qboost_param_cmd_send() - WMI set_qboost function + * @param wmi_handle : handle to WMI. + * @param macaddr : MAC address + * @param param : pointer to hold set_qboost parameter + * + * @return QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_set_qboost_param_cmd_send(void *wmi_hdl, + uint8_t macaddr[IEEE80211_ADDR_LEN], + struct set_qboost_params *param) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_set_qboost_param_cmd) + return wmi_handle->ops->send_set_qboost_param_cmd(wmi_handle, + macaddr, param); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_mu_scan_cmd_send() - WMI set mu scan function + * @param wmi_handle : handle to WMI. + * @param param : pointer to hold mu scan param + * + * @return QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_mu_scan_cmd_send(void *wmi_hdl, + struct mu_scan_params *param) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_mu_scan_cmd) + return wmi_handle->ops->send_mu_scan_cmd(wmi_handle, param); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_lteu_config_cmd_send() - WMI set mu scan function + * @param wmi_handle : handle to WMI. + * @param param : pointer to hold mu scan param + * + * @return QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_lteu_config_cmd_send(void *wmi_hdl, + struct lteu_config_params *param) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_lteu_config_cmd) + return wmi_handle->ops->send_lteu_config_cmd(wmi_handle, param); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_set_psmode_cmd_send() - WMI set mu scan function + * @param wmi_handle : handle to WMI. + * @param param : pointer to hold mu scan param + * + * @return QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_set_psmode_cmd_send(void *wmi_hdl, + struct set_ps_mode_params *param) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_set_ps_mode_cmd) + return wmi_handle->ops->send_set_ps_mode_cmd(wmi_handle, param); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_init_cmd_send() - send initialization cmd to fw + * @wmi_handle: wmi handle + * @param param: pointer to wmi init param + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_init_cmd_send(void *wmi_hdl, + struct wmi_init_cmd_param *param) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->init_cmd_send) + return wmi_handle->ops->init_cmd_send(wmi_handle, param); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_save_service_bitmap() - save service bitmap + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_save_service_bitmap(void *wmi_hdl, void *evt_buf, + void *bitmap_buf) +{ + struct wmi_unified *wmi_handle = (struct wmi_unified *) wmi_hdl; + + if (wmi_handle->ops->save_service_bitmap) { + return wmi_handle->ops->save_service_bitmap(wmi_handle, evt_buf, + bitmap_buf); + } + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_save_ext_service_bitmap() - save extended service bitmap + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_save_ext_service_bitmap(void *wmi_hdl, void *evt_buf, + void *bitmap_buf) +{ + struct wmi_unified *wmi_handle = (struct wmi_unified *) wmi_hdl; + + if (wmi_handle->ops->save_ext_service_bitmap) { + return wmi_handle->ops->save_ext_service_bitmap(wmi_handle, + evt_buf, bitmap_buf); + } + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_save_fw_version() - Save fw version + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_save_fw_version(void *wmi_hdl, void *evt_buf) +{ + struct wmi_unified *wmi_handle = (struct wmi_unified *) wmi_hdl; + + if (wmi_handle->ops->save_fw_version) { + wmi_handle->ops->save_fw_version(wmi_handle, evt_buf); + return 0; + } + return QDF_STATUS_E_FAILURE; + +} + +/** + * wmi_check_and_update_fw_version() - Ready and fw version check + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_check_and_update_fw_version(void *wmi_hdl, void *evt_buf) +{ + struct wmi_unified *wmi_handle = (struct wmi_unified *) wmi_hdl; + + if (wmi_handle->ops->check_and_update_fw_version) + return wmi_handle->ops->check_and_update_fw_version(wmi_handle, + evt_buf); + + return QDF_STATUS_E_FAILURE; + +} + +/** + * wmi_service_enabled() - Check if service enabled + * @param wmi_handle: wmi handle + * @param service_id: service identifier + * + * Return: 1 enabled, 0 disabled + */ +bool wmi_service_enabled(void *wmi_hdl, uint32_t service_id) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if ((service_id < wmi_services_max) && + (wmi_handle->services[service_id] != WMI_SERVICE_UNAVAILABLE)) { + if (wmi_handle->ops->is_service_enabled) { + return wmi_handle->ops->is_service_enabled(wmi_handle, + wmi_handle->services[service_id]); + } + } else { + qdf_print("Support not added yet for Service %d\n", service_id); + } + + return false; +} + +/** + * wmi_get_target_cap_from_service_ready() - extract service ready event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to received event buffer + * @param ev: pointer to hold target capability information extracted from even + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_get_target_cap_from_service_ready(void *wmi_hdl, + void *evt_buf, struct wlan_psoc_target_capability_info *ev) +{ + wmi_unified_t wmi = (wmi_unified_t) wmi_hdl; + + if (wmi->ops->get_target_cap_from_service_ready) + return wmi->ops->get_target_cap_from_service_ready(wmi, + evt_buf, ev); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_extract_fw_version() - extract fw version + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param fw_ver: Pointer to hold fw version + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_extract_fw_version(void *wmi_hdl, + void *evt_buf, struct wmi_host_fw_ver *fw_ver) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->extract_fw_version) + return wmi_handle->ops->extract_fw_version(wmi_handle, + evt_buf, fw_ver); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_extract_fw_abi_version() - extract fw abi version + * @wmi_handle: wmi handle + * @param evt_buf: Pointer to event buffer + * @param fw_ver: Pointer to hold fw abi version + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_extract_fw_abi_version(void *wmi_hdl, + void *evt_buf, struct wmi_host_fw_abi_ver *fw_ver) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->extract_fw_abi_version) + return wmi_handle->ops->extract_fw_abi_version(wmi_handle, + evt_buf, fw_ver); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_extract_hal_reg_cap() - extract HAL registered capabilities + * @wmi_handle: wmi handle + * @param evt_buf: Pointer to event buffer + * @param hal_reg_cap: pointer to hold HAL reg capabilities + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_extract_hal_reg_cap(void *wmi_hdl, void *evt_buf, + struct wlan_psoc_hal_reg_capability *hal_reg_cap) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->extract_hal_reg_cap) + return wmi_handle->ops->extract_hal_reg_cap(wmi_handle, + evt_buf, hal_reg_cap); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_extract_host_mem_req_from_service_ready() - Extract host memory + * request event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param num_entries: pointer to hold number of entries requested + * + * Return: Number of entries requested + */ +host_mem_req *wmi_extract_host_mem_req_from_service_ready(void *wmi_hdl, + void *evt_buf, uint8_t *num_entries) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->extract_host_mem_req) + return wmi_handle->ops->extract_host_mem_req(wmi_handle, + evt_buf, num_entries); + + *num_entries = 0; + return NULL; +} + +/** + * wmi_ready_extract_init_status() - Extract init status from ready event + * @wmi_handle: wmi handle + * @param ev: Pointer to event buffer + * + * Return: ready status + */ +uint32_t wmi_ready_extract_init_status(void *wmi_hdl, void *ev) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->ready_extract_init_status) + return wmi_handle->ops->ready_extract_init_status(wmi_handle, + ev); + + + return 1; + +} + +/** + * wmi_ready_extract_mac_addr() - extract mac address from ready event + * @wmi_handle: wmi handle + * @param ev: pointer to event buffer + * @param macaddr: Pointer to hold MAC address + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_ready_extract_mac_addr(void *wmi_hdl, void *ev, uint8_t *macaddr) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->ready_extract_mac_addr) + return wmi_handle->ops->ready_extract_mac_addr(wmi_handle, + ev, macaddr); + + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_ready_extract_mac_addr() - extract MAC address list from ready event + * @wmi_handle: wmi handle + * @param ev: pointer to event buffer + * @param num_mac_addr: Pointer to number of entries + * + * Return: address to start of mac addr list + */ +wmi_host_mac_addr *wmi_ready_extract_mac_addr_list(void *wmi_hdl, void *ev, + uint8_t *num_mac_addr) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->ready_extract_mac_addr_list) + return wmi_handle->ops->ready_extract_mac_addr_list(wmi_handle, + ev, num_mac_addr); + + *num_mac_addr = 0; + + return NULL; +} + +/** + * wmi_extract_ready_params() - Extract data from ready event apart from + * status, macaddr and version. + * @wmi_handle: Pointer to WMI handle. + * @evt_buf: Pointer to Ready event buffer. + * @ev_param: Pointer to host defined struct to copy the data from event. + * + * Return: QDF_STATUS_SUCCESS on success. + */ +QDF_STATUS wmi_extract_ready_event_params(void *wmi_hdl, + void *evt_buf, struct wmi_host_ready_ev_param *ev_param) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->extract_ready_event_params) + return wmi_handle->ops->extract_ready_event_params(wmi_handle, + evt_buf, ev_param); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_extract_dbglog_data_len() - extract debuglog data length + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param len: length of buffer + * + * Return: length + */ +uint8_t *wmi_extract_dbglog_data_len(void *wmi_hdl, void *evt_buf, + uint32_t *len) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->extract_dbglog_data_len) + return wmi_handle->ops->extract_dbglog_data_len(wmi_handle, + evt_buf, len); + + + return NULL; +} +qdf_export_symbol(wmi_extract_dbglog_data_len); + +/** + * wmi_send_ext_resource_config() - send extended resource configuration + * @wmi_handle: wmi handle + * @param ext_cfg: pointer to extended resource configuration + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_send_ext_resource_config(void *wmi_hdl, + wmi_host_ext_resource_config *ext_cfg) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_ext_resource_config) + return wmi_handle->ops->send_ext_resource_config(wmi_handle, + ext_cfg); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_nf_dbr_dbm_info_get_cmd_send() - WMI request nf info function + * @param wmi_handle : handle to WMI. + * @mac_id: mac_id + * + * @return QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_nf_dbr_dbm_info_get_cmd_send(void *wmi_hdl, + uint8_t mac_id) +{ + wmi_unified_t wmi = (wmi_unified_t) wmi_hdl; + + if (wmi->ops->send_nf_dbr_dbm_info_get_cmd) + return wmi->ops->send_nf_dbr_dbm_info_get_cmd(wmi, mac_id); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_packet_power_info_get_cmd_send() - WMI get packet power info function + * @param wmi_handle : handle to WMI. + * @param param : pointer to hold packet power info param + * + * @return QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_packet_power_info_get_cmd_send(void *wmi_hdl, + struct packet_power_info_params *param) +{ + wmi_unified_t wmi = (wmi_unified_t) wmi_hdl; + + if (wmi->ops->send_packet_power_info_get_cmd) + return wmi->ops->send_packet_power_info_get_cmd(wmi, param); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_gpio_config_cmd_send() - WMI gpio config function + * @param wmi_handle : handle to WMI. + * @param param : pointer to hold gpio config param + * + * @return QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_gpio_config_cmd_send(void *wmi_hdl, + struct gpio_config_params *param) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_gpio_config_cmd) + return wmi_handle->ops->send_gpio_config_cmd(wmi_handle, param); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_gpio_output_cmd_send() - WMI gpio config function + * @param wmi_handle : handle to WMI. + * @param param : pointer to hold gpio config param + * + * @return QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_gpio_output_cmd_send(void *wmi_hdl, + struct gpio_output_params *param) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_gpio_output_cmd) + return wmi_handle->ops->send_gpio_output_cmd(wmi_handle, param); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_rtt_meas_req_test_cmd_send() - WMI rtt meas req test function + * @param wmi_handle : handle to WMI. + * @param param : pointer to hold rtt meas req test param + * + * @return QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_rtt_meas_req_test_cmd_send(void *wmi_hdl, + struct rtt_meas_req_test_params *param) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_rtt_meas_req_test_cmd) + return wmi_handle->ops->send_rtt_meas_req_test_cmd(wmi_handle, + param); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_rtt_meas_req_cmd_send() - WMI rtt meas req function + * @param wmi_handle : handle to WMI. + * @param param : pointer to hold rtt meas req param + * + * @return QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_rtt_meas_req_cmd_send(void *wmi_hdl, + struct rtt_meas_req_params *param) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_rtt_meas_req_cmd) + return wmi_handle->ops->send_rtt_meas_req_cmd(wmi_handle, + param); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_lci_set_cmd_send() - WMI lci set function + * @param wmi_handle : handle to WMI. + * @param param : pointer to hold lci param + * + * @return QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_lci_set_cmd_send(void *wmi_hdl, + struct lci_set_params *param) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_lci_set_cmd) + return wmi_handle->ops->send_lci_set_cmd(wmi_handle, param); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_lcr_set_cmd_send() - WMI lcr set function + * @param wmi_handle : handle to WMI. + * @param param : pointer to hold lcr param + * + * @return QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_lcr_set_cmd_send(void *wmi_hdl, + struct lcr_set_params *param) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_lcr_set_cmd) + return wmi_handle->ops->send_lcr_set_cmd(wmi_handle, param); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_rtt_keepalive_req_cmd_send() - WMI rtt meas req test function + * @param wmi_handle : handle to WMI. + * @param param : pointer to hold rtt meas req test param + * + * @return QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_rtt_keepalive_req_cmd_send(void *wmi_hdl, + struct rtt_keepalive_req_params *param) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_rtt_keepalive_req_cmd) + return wmi_handle->ops->send_rtt_keepalive_req_cmd(wmi_handle, + param); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_send_periodic_chan_stats_config_cmd() - send periodic chan stats cmd + * to fw + * @wmi_handle: wmi handle + * @param: pointer to hold periodic chan stats param + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_send_periodic_chan_stats_config_cmd(void *wmi_hdl, + struct periodic_chan_stats_params *param) +{ + wmi_unified_t wmi = (wmi_unified_t) wmi_hdl; + + if (wmi->ops->send_periodic_chan_stats_config_cmd) + return wmi->ops->send_periodic_chan_stats_config_cmd(wmi, + param); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_send_atf_peer_request_cmd() - send atf peer request command to fw + * @wmi_handle: wmi handle + * @param: pointer to atf peer request param + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_send_atf_peer_request_cmd(void *wmi_hdl, + struct atf_peer_request_params *param) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_atf_peer_request_cmd) + return wmi_handle->ops->send_atf_peer_request_cmd(wmi_handle, + param); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_send_set_atf_grouping_cmd() - send set atf grouping command to fw + * @wmi_handle: wmi handle + * @param: pointer to set atf grouping param + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_send_set_atf_grouping_cmd(void *wmi_hdl, + struct atf_grouping_params *param) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_set_atf_grouping_cmd) + return wmi_handle->ops->send_set_atf_grouping_cmd(wmi_handle, + param); + + return QDF_STATUS_E_FAILURE; + +} + +/** + * wmi_send_get_user_position_cmd() - send get user position command to fw + * @wmi_handle: wmi handle + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_send_get_user_position_cmd(void *wmi_hdl, uint32_t value) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_get_user_position_cmd) + return wmi_handle->ops->send_get_user_position_cmd(wmi_handle, + value); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_send_get_peer_mumimo_tx_count_cmd() - send get mumio tx count + * command to fw + * @wmi_handle: wmi handle + * @value: user pos value + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_send_get_peer_mumimo_tx_count_cmd(void *wmi_hdl, uint32_t value) +{ + wmi_unified_t wmi = (wmi_unified_t) wmi_hdl; + + if (wmi->ops->send_get_peer_mumimo_tx_count_cmd) + return wmi->ops->send_get_peer_mumimo_tx_count_cmd(wmi, value); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_send_reset_peer_mumimo_tx_count_cmd() - send reset peer mumimo + * tx count to fw + * @wmi_handle: wmi handle + * @value: reset tx count value + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_send_reset_peer_mumimo_tx_count_cmd(void *wmi_hdl, uint32_t value) +{ + wmi_unified_t wmi = (wmi_unified_t) wmi_hdl; + + if (wmi->ops->send_reset_peer_mumimo_tx_count_cmd) + return wmi->ops->send_reset_peer_mumimo_tx_count_cmd(wmi, + value); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_send_pdev_caldata_version_check_cmd() - send reset peer mumimo + * tx count to fw + * @wmi_handle: wmi handle + * @value: value + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS +wmi_send_pdev_caldata_version_check_cmd(void *wmi_hdl, uint32_t value) +{ + wmi_unified_t wmi = (wmi_unified_t) wmi_hdl; + + if (wmi->ops->send_pdev_caldata_version_check_cmd) + return wmi->ops->send_pdev_caldata_version_check_cmd(wmi, + value); + + return QDF_STATUS_E_FAILURE; +} + +/* Extract - APIs */ +/** + * wmi_extract_wds_addr_event - Extract WDS addr WMI event + * + * @param wmi_handle : handle to WMI. + * @param evt_buf : pointer to event buffer + * @param len : length of the event buffer + * @param wds_ev: pointer to strct to extract + * @return QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_extract_wds_addr_event(void *wmi_hdl, void *evt_buf, + uint16_t len, wds_addr_event_t *wds_ev) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->extract_wds_addr_event) { + return wmi_handle->ops->extract_wds_addr_event(wmi_handle, + evt_buf, len, wds_ev); + } + return QDF_STATUS_E_FAILURE; +} +qdf_export_symbol(wmi_extract_wds_addr_event); + +/** + * wmi_extract_dcs_interference_type() - extract dcs interference type + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param param: Pointer to hold dcs interference param + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_extract_dcs_interference_type(void *wmi_hdl, + void *evt_buf, struct wmi_host_dcs_interference_param *param) +{ + wmi_unified_t wmi = (wmi_unified_t) wmi_hdl; + + if (wmi->ops->extract_dcs_interference_type) { + return wmi->ops->extract_dcs_interference_type(wmi, + evt_buf, param); + } + return QDF_STATUS_E_FAILURE; +} + +/* + * wmi_extract_dcs_cw_int() - extract dcs cw interference from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param cw_int: Pointer to hold cw interference + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_extract_dcs_cw_int(void *wmi_hdl, void *evt_buf, + wmi_host_ath_dcs_cw_int *cw_int) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->extract_dcs_cw_int) { + return wmi_handle->ops->extract_dcs_cw_int(wmi_handle, + evt_buf, cw_int); + } + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_extract_dcs_im_tgt_stats() - extract dcs im target stats from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param wlan_stat: Pointer to hold wlan stats + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_extract_dcs_im_tgt_stats(void *wmi_hdl, void *evt_buf, + wmi_host_dcs_im_tgt_stats_t *wlan_stat) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->extract_dcs_im_tgt_stats) { + return wmi_handle->ops->extract_dcs_im_tgt_stats(wmi_handle, + evt_buf, wlan_stat); + } + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_extract_fips_event_data() - extract fips event data + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param param: pointer to FIPS event param + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_extract_fips_event_data(void *wmi_hdl, void *evt_buf, + struct wmi_host_fips_event_param *param) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->extract_fips_event_data) { + return wmi_handle->ops->extract_fips_event_data(wmi_handle, + evt_buf, param); + } + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_extract_vdev_start_resp() - extract vdev start response + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param vdev_rsp: Pointer to hold vdev response + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_extract_vdev_start_resp(void *wmi_hdl, void *evt_buf, + wmi_host_vdev_start_resp *vdev_rsp) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->extract_vdev_start_resp) + return wmi_handle->ops->extract_vdev_start_resp(wmi_handle, + evt_buf, vdev_rsp); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_extract_vdev_delete_resp() - extract vdev delete response + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param delete_rsp: Pointer to hold vdev delete response + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_extract_vdev_delete_resp(void *wmi_hdl, void *evt_buf, + struct wmi_host_vdev_delete_resp *delete_rsp) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->extract_vdev_delete_resp) + return wmi_handle->ops->extract_vdev_delete_resp(wmi_handle, + evt_buf, delete_rsp); + + return QDF_STATUS_E_FAILURE; +} + + +/** + * wmi_extract_tbttoffset_num_vdevs() - extract tbtt offset num vdev + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param vdev_map: Pointer to hold num vdev + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_extract_tbttoffset_num_vdevs(void *wmi_hdl, void *evt_buf, + uint32_t *num_vdevs) +{ + wmi_unified_t wmi = (wmi_unified_t) wmi_hdl; + + if (wmi->ops->extract_tbttoffset_num_vdevs) + return wmi->ops->extract_tbttoffset_num_vdevs(wmi, + evt_buf, num_vdevs); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_extract_ext_tbttoffset_num_vdevs() - extract ext tbtt offset num vdev + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param vdev_map: Pointer to hold num vdev + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_extract_ext_tbttoffset_num_vdevs(void *wmi_hdl, void *evt_buf, + uint32_t *num_vdevs) +{ + wmi_unified_t wmi = (wmi_unified_t) wmi_hdl; + + if (wmi->ops->extract_ext_tbttoffset_num_vdevs) + return wmi->ops->extract_ext_tbttoffset_num_vdevs(wmi, + evt_buf, num_vdevs); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_extract_tbttoffset_update_params() - extract tbtt offset update param + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param idx: Index referring to a vdev + * @param tbtt_param: Pointer to tbttoffset event param + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_extract_tbttoffset_update_params(void *wmi_hdl, void *evt_buf, + uint8_t idx, struct tbttoffset_params *tbtt_param) +{ + wmi_unified_t wmi = (wmi_unified_t) wmi_hdl; + + if (wmi->ops->extract_tbttoffset_update_params) + return wmi->ops->extract_tbttoffset_update_params(wmi, + evt_buf, idx, tbtt_param); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_extract_ext_tbttoffset_update_params() - extract tbtt offset update param + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param idx: Index referring to a vdev + * @param tbtt_param: Pointer to tbttoffset event param + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_extract_ext_tbttoffset_update_params(void *wmi_hdl, + void *evt_buf, uint8_t idx, struct tbttoffset_params *tbtt_param) +{ + wmi_unified_t wmi = (wmi_unified_t) wmi_hdl; + + if (wmi->ops->extract_ext_tbttoffset_update_params) + return wmi->ops->extract_ext_tbttoffset_update_params(wmi, + evt_buf, idx, tbtt_param); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_extract_mgmt_rx_params() - extract management rx params from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param hdr: Pointer to hold header + * @param bufp: Pointer to hold pointer to rx param buffer + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_extract_mgmt_rx_params(void *wmi_hdl, void *evt_buf, + struct mgmt_rx_event_params *hdr, uint8_t **bufp) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->extract_mgmt_rx_params) + return wmi_handle->ops->extract_mgmt_rx_params(wmi_handle, + evt_buf, hdr, bufp); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_extract_vdev_stopped_param() - extract vdev stop param from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param vdev_id: Pointer to hold vdev identifier + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_extract_vdev_stopped_param(void *wmi_hdl, void *evt_buf, + uint32_t *vdev_id) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->extract_vdev_stopped_param) + return wmi_handle->ops->extract_vdev_stopped_param(wmi_handle, + evt_buf, vdev_id); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_extract_vdev_roam_param() - extract vdev roam param from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param param: Pointer to hold roam param + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_extract_vdev_roam_param(void *wmi_hdl, void *evt_buf, + wmi_host_roam_event *param) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->extract_vdev_roam_param) + return wmi_handle->ops->extract_vdev_roam_param(wmi_handle, + evt_buf, param); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_extract_vdev_scan_ev_param() - extract vdev scan param from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param param: Pointer to hold vdev scan param + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_extract_vdev_scan_ev_param(void *wmi_hdl, void *evt_buf, + struct scan_event *param) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->extract_vdev_scan_ev_param) + return wmi_handle->ops->extract_vdev_scan_ev_param(wmi_handle, + evt_buf, param); + + return QDF_STATUS_E_FAILURE; +} + +#ifdef CONVERGED_TDLS_ENABLE +QDF_STATUS wmi_extract_vdev_tdls_ev_param(void *wmi_hdl, void *evt_buf, + struct tdls_event_info *param) +{ + wmi_unified_t wmi_handle = (wmi_unified_t)wmi_hdl; + + if (wmi_handle->ops->extract_vdev_tdls_ev_param) + return wmi_handle->ops->extract_vdev_tdls_ev_param(wmi_handle, + evt_buf, param); + + return QDF_STATUS_E_FAILURE; +} +#endif + +/** + * wmi_extract_mu_ev_param() - extract mu param from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param param: Pointer to hold mu report + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_extract_mu_ev_param(void *wmi_hdl, void *evt_buf, + wmi_host_mu_report_event *param) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->extract_mu_ev_param) + return wmi_handle->ops->extract_mu_ev_param(wmi_handle, evt_buf, + param); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_extract_mu_db_entry() - extract mu db entry from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param param: Pointer to hold mu db entry + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_extract_mu_db_entry(void *wmi_hdl, void *evt_buf, + uint8_t idx, wmi_host_mu_db_entry *param) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->extract_mu_db_entry) + return wmi_handle->ops->extract_mu_db_entry(wmi_handle, evt_buf, + idx, param); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_extract_mumimo_tx_count_ev_param() - extract mumimo tx count from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param param: Pointer to hold mumimo tx count + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_extract_mumimo_tx_count_ev_param(void *wmi_hdl, void *evt_buf, + wmi_host_peer_txmu_cnt_event *param) +{ + wmi_unified_t wmi = (wmi_unified_t) wmi_hdl; + + if (wmi->ops->extract_mumimo_tx_count_ev_param) + return wmi->ops->extract_mumimo_tx_count_ev_param(wmi, + evt_buf, param); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_extract_peer_gid_userpos_list_ev_param() - extract peer userpos list + * from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param param: Pointer to hold peer gid userposition list + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_extract_peer_gid_userpos_list_ev_param(void *wmi_hdl, + void *evt_buf, + wmi_host_peer_gid_userpos_list_event *param) +{ + wmi_unified_t wmi = (wmi_unified_t) wmi_hdl; + + if (wmi->ops->extract_peer_gid_userpos_list_ev_param) + return wmi->ops->extract_peer_gid_userpos_list_ev_param(wmi, + evt_buf, param); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_extract_pdev_caldata_version_check_ev_param() - extract caldata + * from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param param: Pointer to hold caldata version data + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_extract_pdev_caldata_version_check_ev_param(void *wmi_hdl, + void *evt_buf, + wmi_host_pdev_check_cal_version_event *param) +{ + wmi_unified_t wmi = (wmi_unified_t) wmi_hdl; + + if (wmi->ops->extract_pdev_caldata_version_check_ev_param) + return wmi->ops->extract_pdev_caldata_version_check_ev_param( + wmi, evt_buf, param); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_extract_pdev_tpc_config_ev_param() - extract pdev tpc configuration + * param from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param param: Pointer to hold tpc configuration + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_extract_pdev_tpc_config_ev_param(void *wmi_hdl, void *evt_buf, + wmi_host_pdev_tpc_config_event *param) +{ + wmi_unified_t wmi = (wmi_unified_t) wmi_hdl; + + if (wmi->ops->extract_pdev_tpc_config_ev_param) + return wmi->ops->extract_pdev_tpc_config_ev_param(wmi, + evt_buf, param); + + return QDF_STATUS_E_FAILURE; + +} + +/** + * wmi_extract_gpio_input_ev_param() - extract gpio input param from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param gpio_num: Pointer to hold gpio number + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_extract_gpio_input_ev_param(void *wmi_hdl, + void *evt_buf, uint32_t *gpio_num) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->extract_gpio_input_ev_param) + return wmi_handle->ops->extract_gpio_input_ev_param(wmi_handle, + evt_buf, gpio_num); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_extract_pdev_reserve_ast_ev_param() - extract reserve ast entry + * param from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param param: Pointer to hold reserve ast entry param + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_extract_pdev_reserve_ast_ev_param(void *wmi_hdl, + void *evt_buf, struct wmi_host_proxy_ast_reserve_param *param) +{ + wmi_unified_t wmi = (wmi_unified_t) wmi_hdl; + + if (wmi->ops->extract_pdev_reserve_ast_ev_param) + return wmi->ops->extract_pdev_reserve_ast_ev_param(wmi, + evt_buf, param); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_extract_nfcal_power_ev_param() - extract noise floor calibration + * power param from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param param: Pointer to hold nf cal power param + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_extract_nfcal_power_ev_param(void *wmi_hdl, void *evt_buf, + wmi_host_pdev_nfcal_power_all_channels_event *param) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->extract_nfcal_power_ev_param) + return wmi_handle->ops->extract_nfcal_power_ev_param(wmi_handle, + evt_buf, param); + + return QDF_STATUS_E_FAILURE; + +} + +/** + * wmi_extract_pdev_tpc_ev_param() - extract tpc param from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param param: Pointer to hold tpc param + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_extract_pdev_tpc_ev_param(void *wmi_hdl, void *evt_buf, + wmi_host_pdev_tpc_event *param) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->extract_pdev_tpc_ev_param) + return wmi_handle->ops->extract_pdev_tpc_ev_param(wmi_handle, + evt_buf, param); + + return QDF_STATUS_E_FAILURE; + +} + +/** + * wmi_extract_pdev_generic_buffer_ev_param() - extract pdev generic buffer + * from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param param: Pointer to generic buffer param + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_extract_pdev_generic_buffer_ev_param(void *wmi_hdl, + void *evt_buf, wmi_host_pdev_generic_buffer_event *param) +{ + wmi_unified_t wmi = (wmi_unified_t) wmi_hdl; + + if (wmi->ops->extract_pdev_generic_buffer_ev_param) + return wmi->ops->extract_pdev_generic_buffer_ev_param(wmi, + evt_buf, param); + + return QDF_STATUS_E_FAILURE; + +} + +/** + * wmi_extract_mgmt_tx_compl_param() - extract mgmt tx completion param + * from event + * @wmi_hdl: wmi handle + * @evt_buf: pointer to event buffer + * @param: Pointer to mgmt tx completion param + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_extract_mgmt_tx_compl_param(void *wmi_hdl, void *evt_buf, + wmi_host_mgmt_tx_compl_event *param) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->extract_mgmt_tx_compl_param) + return wmi_handle->ops->extract_mgmt_tx_compl_param(wmi_handle, + evt_buf, param); + + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_extract_offchan_data_tx_compl_param() - + * extract offchan data tx completion param from event + * @wmi_hdl: wmi handle + * @evt_buf: pointer to event buffer + * @param: Pointer to offchan data tx completion param + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_extract_offchan_data_tx_compl_param(void *wmi_hdl, void *evt_buf, + struct wmi_host_offchan_data_tx_compl_event *param) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->extract_offchan_data_tx_compl_param) + return wmi_handle->ops->extract_offchan_data_tx_compl_param( + wmi_handle, evt_buf, param); + + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_extract_pdev_csa_switch_count_status() - extract CSA switch count status + * from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param param: Pointer to CSA switch count status param + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_extract_pdev_csa_switch_count_status(void *wmi_hdl, + void *evt_buf, + struct pdev_csa_switch_count_status *param) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->extract_pdev_csa_switch_count_status) + return wmi_handle->ops->extract_pdev_csa_switch_count_status( + wmi_handle, + evt_buf, + param); + + return QDF_STATUS_E_FAILURE; +} + + +/** + * wmi_extract_swba_num_vdevs() - extract swba num vdevs from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param num_vdevs: Pointer to hold num vdevs + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_extract_swba_num_vdevs(void *wmi_hdl, void *evt_buf, + uint32_t *num_vdevs) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->extract_swba_num_vdevs) + return wmi_handle->ops->extract_swba_num_vdevs(wmi_handle, + evt_buf, num_vdevs); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_extract_swba_tim_info() - extract swba tim info from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param idx: Index to bcn info + * @param tim_info: Pointer to hold tim info + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_extract_swba_tim_info(void *wmi_hdl, void *evt_buf, + uint32_t idx, wmi_host_tim_info *tim_info) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->extract_swba_tim_info) + return wmi_handle->ops->extract_swba_tim_info(wmi_handle, + evt_buf, idx, tim_info); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_extract_swba_noa_info() - extract swba NoA information from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param idx: Index to bcn info + * @param p2p_desc: Pointer to hold p2p NoA info + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_extract_swba_noa_info(void *wmi_hdl, void *evt_buf, + uint32_t idx, wmi_host_p2p_noa_info *p2p_desc) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->extract_swba_noa_info) + return wmi_handle->ops->extract_swba_noa_info(wmi_handle, + evt_buf, idx, p2p_desc); + + return QDF_STATUS_E_FAILURE; +} + +#ifdef CONVERGED_P2P_ENABLE +/** + * wmi_extract_p2p_lo_stop_ev_param() - extract p2p lo stop param from event + * @wmi_handle: wmi handle + * @evt_buf: pointer to event buffer + * @param: Pointer to hold listen offload stop param + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_extract_p2p_lo_stop_ev_param(void *wmi_hdl, void *evt_buf, + struct p2p_lo_event *param) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (!wmi_handle) { + WMI_LOGE("wmi handle is null"); + return QDF_STATUS_E_INVAL; + } + + if (wmi_handle->ops->extract_p2p_lo_stop_ev_param) + return wmi_handle->ops->extract_p2p_lo_stop_ev_param( + wmi_handle, evt_buf, param); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_extract_p2p_noa_ev_param() - extract p2p noa param from event + * @wmi_handle: wmi handle + * @evt_buf: pointer to event buffer + * @param: Pointer to hold p2p noa param + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_extract_p2p_noa_ev_param(void *wmi_hdl, void *evt_buf, + struct p2p_noa_info *param) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (!wmi_handle) { + WMI_LOGE("wmi handle is null"); + return QDF_STATUS_E_INVAL; + } + + if (wmi_handle->ops->extract_p2p_noa_ev_param) + return wmi_handle->ops->extract_p2p_noa_ev_param( + wmi_handle, evt_buf, param); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_send_set_mac_addr_rx_filter_cmd(void *wmi_hdl, + struct p2p_set_mac_filter *param) +{ + wmi_unified_t wmi_handle = (wmi_unified_t)wmi_hdl; + + if (!wmi_handle) { + WMI_LOGE("wmi handle is null"); + return QDF_STATUS_E_INVAL; + } + + if (wmi_handle->ops->set_mac_addr_rx_filter) + return wmi_handle->ops->set_mac_addr_rx_filter( + wmi_handle, param); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_extract_mac_addr_rx_filter_evt_param(void *wmi_hdl, void *evt_buf, + struct p2p_set_mac_filter_evt *param) +{ + wmi_unified_t wmi_handle = (wmi_unified_t)wmi_hdl; + + if (!wmi_handle) { + WMI_LOGE("wmi handle is null"); + return QDF_STATUS_E_INVAL; + } + + if (wmi_handle->ops->extract_mac_addr_rx_filter_evt_param) + return wmi_handle->ops->extract_mac_addr_rx_filter_evt_param( + wmi_handle, evt_buf, param); + + return QDF_STATUS_E_FAILURE; +} +#endif + +/** + * wmi_extract_peer_sta_ps_statechange_ev() - extract peer sta ps state + * from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param ev: Pointer to hold peer param and ps state + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_extract_peer_sta_ps_statechange_ev(void *wmi_hdl, void *evt_buf, + wmi_host_peer_sta_ps_statechange_event *ev) +{ + wmi_unified_t wmi = (wmi_unified_t) wmi_hdl; + + if (wmi->ops->extract_peer_sta_ps_statechange_ev) + return wmi->ops->extract_peer_sta_ps_statechange_ev(wmi, + evt_buf, ev); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_extract_peer_sta_kickout_ev() - extract peer sta kickout event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param ev: Pointer to hold peer param + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_extract_peer_sta_kickout_ev(void *wmi_hdl, void *evt_buf, + wmi_host_peer_sta_kickout_event *ev) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->extract_peer_sta_kickout_ev) + return wmi_handle->ops->extract_peer_sta_kickout_ev(wmi_handle, + evt_buf, ev); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_extract_peer_ratecode_list_ev() - extract peer ratecode from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param peer_mac: Pointer to hold peer mac address + * @param rate_cap: Pointer to hold ratecode + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_extract_peer_ratecode_list_ev(void *wmi_hdl, void *evt_buf, + uint8_t *peer_mac, wmi_sa_rate_cap *rate_cap) +{ + wmi_unified_t wmi = (wmi_unified_t) wmi_hdl; + + if (wmi->ops->extract_peer_ratecode_list_ev) + return wmi->ops->extract_peer_ratecode_list_ev(wmi, + evt_buf, peer_mac, rate_cap); + + return QDF_STATUS_E_FAILURE; + +} + +/** + * wmi_extract_comb_phyerr() - extract comb phy error from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param datalen: data length of event buffer + * @param buf_offset: Pointer to hold value of current event buffer offset + * post extraction + * @param phyer: Pointer to hold phyerr + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_extract_comb_phyerr(void *wmi_hdl, void *evt_buf, + uint16_t datalen, uint16_t *buf_offset, wmi_host_phyerr_t *phyerr) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->extract_comb_phyerr) + return wmi_handle->ops->extract_comb_phyerr(wmi_handle, + evt_buf, datalen, buf_offset, phyerr); + + return QDF_STATUS_E_FAILURE; + +} + +/** + * wmi_extract_single_phyerr() - extract single phy error from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param datalen: data length of event buffer + * @param buf_offset: Pointer to hold value of current event buffer offset + * post extraction + * @param phyerr: Pointer to hold phyerr + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_extract_single_phyerr(void *wmi_hdl, void *evt_buf, + uint16_t datalen, uint16_t *buf_offset, wmi_host_phyerr_t *phyerr) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->extract_single_phyerr) + return wmi_handle->ops->extract_single_phyerr(wmi_handle, + evt_buf, datalen, buf_offset, phyerr); + + return QDF_STATUS_E_FAILURE; + +} + +/** + * wmi_extract_composite_phyerr() - extract composite phy error from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param datalen: Length of event buffer + * @param phyerr: Pointer to hold phy error + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_extract_composite_phyerr(void *wmi_hdl, void *evt_buf, + uint16_t datalen, wmi_host_phyerr_t *phyerr) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->extract_composite_phyerr) + return wmi_handle->ops->extract_composite_phyerr(wmi_handle, + evt_buf, datalen, phyerr); + + return QDF_STATUS_E_FAILURE; + +} + +/** + * wmi_extract_stats_param() - extract all stats count from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param stats_param: Pointer to hold stats count + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_extract_stats_param(void *wmi_hdl, void *evt_buf, + wmi_host_stats_event *stats_param) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->extract_all_stats_count) + return wmi_handle->ops->extract_all_stats_count(wmi_handle, + evt_buf, stats_param); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_extract_pdev_stats() - extract pdev stats from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param index: Index into pdev stats + * @param pdev_stats: Pointer to hold pdev stats + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_extract_pdev_stats(void *wmi_hdl, void *evt_buf, + uint32_t index, wmi_host_pdev_stats *pdev_stats) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->extract_pdev_stats) + return wmi_handle->ops->extract_pdev_stats(wmi_handle, + evt_buf, index, pdev_stats); + + return QDF_STATUS_E_FAILURE; +} + +/** + * extract_unit_test() - extract unit test from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param unit_test: Pointer to hold unit-test header + * @param maxspace: The amount of space in evt_buf + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_extract_unit_test(void *wmi_hdl, void *evt_buf, + wmi_unit_test_event *unit_test, uint32_t maxspace) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->extract_unit_test) + return wmi_handle->ops->extract_unit_test(wmi_handle, + evt_buf, unit_test, maxspace); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_extract_pdev_ext_stats() - extract extended pdev stats from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param index: Index into extended pdev stats + * @param pdev_ext_stats: Pointer to hold extended pdev stats + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_extract_pdev_ext_stats(void *wmi_hdl, void *evt_buf, + uint32_t index, wmi_host_pdev_ext_stats *pdev_ext_stats) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->extract_pdev_ext_stats) + return wmi_handle->ops->extract_pdev_ext_stats(wmi_handle, + evt_buf, index, pdev_ext_stats); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_extract_peer_stats() - extract peer stats from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param index: Index into peer stats + * @param peer_stats: Pointer to hold peer stats + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_extract_peer_stats(void *wmi_hdl, void *evt_buf, + uint32_t index, wmi_host_peer_stats *peer_stats) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->extract_peer_stats) + return wmi_handle->ops->extract_peer_stats(wmi_handle, + evt_buf, index, peer_stats); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_extract_vdev_stats() - extract vdev stats from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param index: Index into vdev stats + * @param vdev_stats: Pointer to hold vdev stats + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_extract_vdev_stats(void *wmi_hdl, void *evt_buf, + uint32_t index, wmi_host_vdev_stats *vdev_stats) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->extract_vdev_stats) + return wmi_handle->ops->extract_vdev_stats(wmi_handle, + evt_buf, index, vdev_stats); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_extract_per_chain_rssi_stats() - extract rssi stats from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param index: Index into rssi stats + * @param rssi_stats: Pointer to hold rssi stats + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_extract_per_chain_rssi_stats(void *wmi_hdl, void *evt_buf, + uint32_t index, struct wmi_host_per_chain_rssi_stats *rssi_stats) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->extract_per_chain_rssi_stats) + return wmi_handle->ops->extract_per_chain_rssi_stats(wmi_handle, + evt_buf, index, rssi_stats); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_extract_rtt_hdr() - extract rtt header from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param ev: Pointer to hold rtt header + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_extract_rtt_hdr(void *wmi_hdl, void *evt_buf, + wmi_host_rtt_event_hdr *ev) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->extract_rtt_hdr) + return wmi_handle->ops->extract_rtt_hdr(wmi_handle, + evt_buf, ev); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_extract_bcnflt_stats() - extract bcn fault stats from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param index: Index into bcn fault stats + * @param bcnflt_stats: Pointer to hold bcn fault stats + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_extract_bcnflt_stats(void *wmi_hdl, void *evt_buf, + uint32_t index, wmi_host_bcnflt_stats *bcnflt_stats) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->extract_bcnflt_stats) + return wmi_handle->ops->extract_bcnflt_stats(wmi_handle, + evt_buf, index, bcnflt_stats); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_extract_rtt_ev() - extract rtt event + * @wmi_handle: wmi handle + * @param evt_buf: Pointer to event buffer + * @param ev: Pointer to hold rtt event + * @param hdump: Pointer to hold hex dump + * @param hdump_len: hex dump length + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_extract_rtt_ev(void *wmi_hdl, void *evt_buf, + wmi_host_rtt_meas_event *ev, uint8_t *hdump, uint16_t hdump_len) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->extract_rtt_ev) + return wmi_handle->ops->extract_rtt_ev(wmi_handle, + evt_buf, ev, hdump, hdump_len); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_extract_peer_extd_stats() - extract extended peer stats from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param index: Index into extended peer stats + * @param peer_extd_stats: Pointer to hold extended peer stats + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_extract_peer_extd_stats(void *wmi_hdl, void *evt_buf, + uint32_t index, wmi_host_peer_extd_stats *peer_extd_stats) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->extract_peer_extd_stats) + return wmi_handle->ops->extract_peer_extd_stats(wmi_handle, + evt_buf, index, peer_extd_stats); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_extract_peer_adv_stats() - extract advance (extd2) peer stats from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param peer_adv_stats: Pointer to hold extended peer stats + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_extract_peer_adv_stats(wmi_unified_t wmi_handle, void *evt_buf, + struct wmi_host_peer_adv_stats + *peer_adv_stats) +{ + if (wmi_handle->ops->extract_peer_adv_stats) + return wmi_handle->ops->extract_peer_adv_stats(wmi_handle, + evt_buf, peer_adv_stats); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_extract_rtt_error_report_ev() - extract rtt error report from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param wds_ev: Pointer to hold rtt error report + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_extract_rtt_error_report_ev(void *wmi_hdl, void *evt_buf, + wmi_host_rtt_error_report_event *ev) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->extract_rtt_error_report_ev) + return wmi_handle->ops->extract_rtt_error_report_ev(wmi_handle, + evt_buf, ev); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_extract_chan_stats() - extract chan stats from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param index: Index into chan stats + * @param chanstats: Pointer to hold chan stats + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_extract_chan_stats(void *wmi_hdl, void *evt_buf, + uint32_t index, wmi_host_chan_stats *chan_stats) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->extract_chan_stats) + return wmi_handle->ops->extract_chan_stats(wmi_handle, + evt_buf, index, chan_stats); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_extract_thermal_stats() - extract thermal stats from event + * @wmi_handle: wmi handle + * @param evt_buf: Pointer to event buffer + * @param temp: Pointer to hold extracted temperature + * @param level: Pointer to hold extracted level + * @param pdev_id: Pointer to hold extracted pdev_id + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_extract_thermal_stats(void *wmi_hdl, void *evt_buf, + uint32_t *temp, uint32_t *level, uint32_t *pdev_id) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->extract_thermal_stats) + return wmi_handle->ops->extract_thermal_stats(wmi_handle, + evt_buf, temp, level, pdev_id); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_extract_profile_ctx() - extract profile context from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param profile_ctx: Pointer to hold profile context + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_extract_profile_ctx(void *wmi_hdl, void *evt_buf, + wmi_host_wlan_profile_ctx_t *profile_ctx) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->extract_profile_ctx) + return wmi_handle->ops->extract_profile_ctx(wmi_handle, + evt_buf, profile_ctx); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_extract_thermal_level_stats() - extract thermal level stats from + * event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param idx: Index to level stats + * @param levelcount: Pointer to hold levelcount + * @param dccount: Pointer to hold dccount + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_extract_thermal_level_stats(void *wmi_hdl, void *evt_buf, + uint8_t idx, uint32_t *levelcount, uint32_t *dccount) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->extract_thermal_level_stats) + return wmi_handle->ops->extract_thermal_level_stats(wmi_handle, + evt_buf, idx, levelcount, dccount); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_extract_profile_data() - extract profile data from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @idx index: index of profile data + * @param profile_data: Pointer to hold profile data + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_extract_profile_data(void *wmi_hdl, void *evt_buf, uint8_t idx, + wmi_host_wlan_profile_t *profile_data) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->extract_profile_data) + return wmi_handle->ops->extract_profile_data(wmi_handle, + evt_buf, idx, profile_data); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_extract_chan_info_event() - extract chan information from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param chan_info: Pointer to hold chan information + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_extract_chan_info_event(void *wmi_hdl, void *evt_buf, + wmi_host_chan_info_event *chan_info) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->extract_chan_info_event) + return wmi_handle->ops->extract_chan_info_event(wmi_handle, + evt_buf, chan_info); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_extract_channel_hopping_event() - extract channel hopping param + * from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param ch_hopping: Pointer to hold channel hopping param + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_extract_channel_hopping_event(void *wmi_hdl, void *evt_buf, + wmi_host_pdev_channel_hopping_event *ch_hopping) +{ + wmi_unified_t wmi = (wmi_unified_t) wmi_hdl; + + if (wmi->ops->extract_channel_hopping_event) + return wmi->ops->extract_channel_hopping_event(wmi, + evt_buf, ch_hopping); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_extract_bss_chan_info_event() - extract bss channel information + * from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param bss_chan_info: Pointer to hold bss channel information + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_extract_bss_chan_info_event(void *wmi_hdl, void *evt_buf, + wmi_host_pdev_bss_chan_info_event *bss_chan_info) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->extract_bss_chan_info_event) + return wmi_handle->ops->extract_bss_chan_info_event(wmi_handle, + evt_buf, bss_chan_info); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_extract_inst_rssi_stats_event() - extract inst rssi stats from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param inst_rssi_resp: Pointer to hold inst rssi response + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_extract_inst_rssi_stats_event(void *wmi_hdl, void *evt_buf, + wmi_host_inst_stats_resp *inst_rssi_resp) +{ + wmi_unified_t wmi = (wmi_unified_t) wmi_hdl; + + if (wmi->ops->extract_inst_rssi_stats_event) + return wmi->ops->extract_inst_rssi_stats_event(wmi, + evt_buf, inst_rssi_resp); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_extract_tx_data_traffic_ctrl_ev() - extract tx data traffic control + * from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param ev: Pointer to hold data traffic control + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_extract_tx_data_traffic_ctrl_ev(void *wmi_hdl, void *evt_buf, + wmi_host_tx_data_traffic_ctrl_event *ev) +{ + wmi_unified_t wmi = (wmi_unified_t) wmi_hdl; + + if (wmi->ops->extract_tx_data_traffic_ctrl_ev) + return wmi->ops->extract_tx_data_traffic_ctrl_ev(wmi, + evt_buf, ev); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_extract_atf_peer_stats_ev() - extract atf peer stats + * from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param ev: Pointer to hold atf peer stats + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_extract_atf_peer_stats_ev(void *wmi_hdl, void *evt_buf, + wmi_host_atf_peer_stats_event *ev) +{ + wmi_unified_t wmi = (wmi_unified_t) wmi_hdl; + + if (wmi->ops->extract_atf_peer_stats_ev) + return wmi->ops->extract_atf_peer_stats_ev(wmi, + evt_buf, ev); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_extract_atf_token_info_ev() - extract atf token info + * from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param idx: Index indicating the peer number + * @param ev: Pointer to hold atf token info + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_extract_atf_token_info_ev(void *wmi_hdl, void *evt_buf, + uint8_t idx, wmi_host_atf_peer_stats_info *ev) +{ + wmi_unified_t wmi = (wmi_unified_t) wmi_hdl; + + if (wmi->ops->extract_atf_token_info_ev) + return wmi->ops->extract_atf_token_info_ev(wmi, + evt_buf, idx, ev); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_extract_vdev_extd_stats() - extract extended vdev stats from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param index: Index into extended vdev stats + * @param vdev_extd_stats: Pointer to hold extended vdev stats + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_extract_vdev_extd_stats(void *wmi_hdl, void *evt_buf, + uint32_t index, wmi_host_vdev_extd_stats *vdev_extd_stats) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->extract_vdev_extd_stats) + return wmi_handle->ops->extract_vdev_extd_stats(wmi_handle, + evt_buf, index, vdev_extd_stats); + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_extract_bcn_stats() - extract beacon stats from event + * @wmi_handle: wmi handle + * @evt_buf: pointer to event buffer + * @index: Index into beacon stats + * @vdev_bcn_stats: Pointer to hold beacon stats + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_extract_bcn_stats(void *wmi_hdl, void *evt_buf, + uint32_t index, wmi_host_bcn_stats *vdev_bcn_stats) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->extract_bcn_stats) + return wmi_handle->ops->extract_bcn_stats(wmi_handle, + evt_buf, index, vdev_bcn_stats); + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_extract_vdev_nac_rssi_stats() - extract NAC_RSSI stats from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param vdev_extd_stats: Pointer to hold nac rssi stats + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_extract_vdev_nac_rssi_stats(void *wmi_hdl, void *evt_buf, + struct wmi_host_vdev_nac_rssi_event *vdev_nac_rssi_stats) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->extract_vdev_nac_rssi_stats) + return wmi_handle->ops->extract_vdev_nac_rssi_stats(wmi_handle, + evt_buf, vdev_nac_rssi_stats); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_send_adapt_dwelltime_params_cmd() - send wmi cmd of + * adaptive dwelltime configuration params + * @wma_handle: wma handler + * @dwelltime_params: pointer to dwelltime_params + * + * Return: QDF_STATUS_SUCCESS on success and QDF failure reason code for failure + */ +QDF_STATUS wmi_unified_send_adapt_dwelltime_params_cmd(void *wmi_hdl, + struct wmi_adaptive_dwelltime_params *dwelltime_params) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_adapt_dwelltime_params_cmd) + return wmi_handle->ops-> + send_adapt_dwelltime_params_cmd(wmi_handle, + dwelltime_params); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_send_power_dbg_cmd() - send power debug commands + * @wmi_handle: wmi handle + * @param: wmi power debug parameter + * + * Send WMI_POWER_DEBUG_CMDID parameters to fw. + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +QDF_STATUS wmi_unified_send_power_dbg_cmd(void *wmi_hdl, + struct wmi_power_dbg_params *param) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_power_dbg_cmd) + return wmi_handle->ops->send_power_dbg_cmd(wmi_handle, + param); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_send_multiple_vdev_restart_req_cmd() - send multiple vdev restart + * @wmi_handle: wmi handle + * @param: multiple vdev restart parameter + * + * Send WMI_PDEV_MULTIPLE_VDEV_RESTART_REQUEST_CMDID parameters to fw. + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +QDF_STATUS wmi_unified_send_multiple_vdev_restart_req_cmd(void *wmi_hdl, + struct multiple_vdev_restart_params *param) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_multiple_vdev_restart_req_cmd) + return wmi_handle->ops->send_multiple_vdev_restart_req_cmd( + wmi_handle, + param); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_send_sar_limit_cmd(void *wmi_hdl, + struct sar_limit_cmd_params *params) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_sar_limit_cmd) + return wmi_handle->ops->send_sar_limit_cmd( + wmi_handle, + params); + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_get_sar_limit_cmd(void *wmi_hdl) +{ + wmi_unified_t wmi_handle = wmi_hdl; + + if (wmi_handle->ops->get_sar_limit_cmd) + return wmi_handle->ops->get_sar_limit_cmd(wmi_handle); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_extract_sar_limit_event(void *wmi_hdl, + uint8_t *evt_buf, + struct sar_limit_event *event) +{ + wmi_unified_t wmi_handle = wmi_hdl; + + if (wmi_handle->ops->extract_sar_limit_event) + return wmi_handle->ops->extract_sar_limit_event(wmi_handle, + evt_buf, + event); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_extract_sar2_result_event(void *handle, + uint8_t *event, uint32_t len) +{ + wmi_unified_t wmi_handle = handle; + + if (wmi_handle->ops->extract_sar2_result_event) + return wmi_handle->ops->extract_sar2_result_event(wmi_handle, + event, + len); + + return QDF_STATUS_E_FAILURE; +} + +#ifdef WLAN_FEATURE_DISA +QDF_STATUS wmi_unified_encrypt_decrypt_send_cmd(void *wmi_hdl, + struct disa_encrypt_decrypt_req_params *params) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_encrypt_decrypt_send_cmd) + return wmi_handle->ops->send_encrypt_decrypt_send_cmd( + wmi_handle, + params); + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_extract_encrypt_decrypt_resp_params(void *wmi_hdl, + uint8_t *evt_buf, + struct disa_encrypt_decrypt_resp_params *resp) +{ + struct wmi_unified *wmi_handle = (struct wmi_unified *)wmi_hdl; + + if (wmi_handle->ops->extract_encrypt_decrypt_resp_event) + return wmi_handle->ops->extract_encrypt_decrypt_resp_event( + wmi_handle, evt_buf, resp); + + return QDF_STATUS_E_FAILURE; +} + +#endif + +/* + * wmi_unified_send_btcoex_wlan_priority_cmd() - send btcoex priority commands + * @wmi_handle: wmi handle + * @param : wmi btcoex cfg params + * + * Send WMI_BTCOEX_CFG_CMDID parameters to fw. + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +QDF_STATUS wmi_unified_send_btcoex_wlan_priority_cmd(void *wmi_hdl, + struct btcoex_cfg_params *param) +{ + wmi_unified_t wmi = (wmi_unified_t) wmi_hdl; + + if (wmi->ops->send_btcoex_wlan_priority_cmd) + return wmi->ops->send_btcoex_wlan_priority_cmd(wmi, + param); + + return QDF_STATUS_E_FAILURE; +} +/** + * wmi_unified_send_btcoex_duty_cycle_cmd() - send btcoex duty cycle commands + * @wmi_handle: wmi handle + * @param: wmi btcoex cfg params + * + * Send WMI_BTCOEX_CFG_CMDID parameters to fw. + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +QDF_STATUS wmi_unified_send_btcoex_duty_cycle_cmd(void *wmi_hdl, + struct btcoex_cfg_params *param) +{ + wmi_unified_t wmi = (wmi_unified_t) wmi_hdl; + + if (wmi->ops->send_btcoex_duty_cycle_cmd) + return wmi->ops->send_btcoex_duty_cycle_cmd(wmi, + param); + + return QDF_STATUS_E_FAILURE; +} + +/* + * wmi_extract_service_ready_ext() - extract extended service ready + * @wmi_handle: wmi handle + * @param: wmi power debug parameter + * + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +QDF_STATUS wmi_extract_service_ready_ext(void *wmi_hdl, uint8_t *evt_buf, + struct wlan_psoc_host_service_ext_param *param) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->extract_service_ready_ext) + return wmi_handle->ops->extract_service_ready_ext(wmi_handle, + evt_buf, param); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_extract_sar_cap_service_ready_ext() - + * extract sar cap from service ready event + * @wmi_handle: wmi handle + * @evt_buf: pointer to event buffer + * @ext_param: extended target info + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +QDF_STATUS wmi_extract_sar_cap_service_ready_ext( + void *wmi_hdl, + uint8_t *evt_buf, + struct wlan_psoc_host_service_ext_param *ext_param) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->extract_sar_cap_service_ready_ext) + return wmi_handle->ops->extract_sar_cap_service_ready_ext( + wmi_handle, + evt_buf, ext_param); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_extract_hw_mode_cap_service_ready_ext() - + * extract HW mode cap from service ready event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param param: Pointer to hold evt buf + * @param hw_mode_idx: hw mode idx should be less than num_mode + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +QDF_STATUS wmi_extract_hw_mode_cap_service_ready_ext( + void *wmi_hdl, + uint8_t *evt_buf, uint8_t hw_mode_idx, + struct wlan_psoc_host_hw_mode_caps *param) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->extract_hw_mode_cap_service_ready_ext) + return wmi_handle->ops->extract_hw_mode_cap_service_ready_ext( + wmi_handle, + evt_buf, hw_mode_idx, param); + + return QDF_STATUS_E_FAILURE; +} +/** + * wmi_extract_mac_phy_cap_service_ready_ext() - + * extract MAC phy cap from service ready event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param hw_mode_id: hw mode id of hw_mode_caps + * @param phy_id: phy_id within hw_mode_cap + * @param param: pointer to mac phy caps structure to hold the values from event + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +QDF_STATUS wmi_extract_mac_phy_cap_service_ready_ext( + void *wmi_hdl, + uint8_t *evt_buf, + uint8_t hw_mode_id, + uint8_t phy_id, + struct wlan_psoc_host_mac_phy_caps *param) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->extract_mac_phy_cap_service_ready_ext) + return wmi_handle->ops->extract_mac_phy_cap_service_ready_ext( + wmi_handle, + evt_buf, hw_mode_id, phy_id, param); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_extract_reg_cap_service_ready_ext() - + * extract REG cap from service ready event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param param: Pointer to hold evt buf + * @param phy_idx: phy idx should be less than num_mode + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +QDF_STATUS wmi_extract_reg_cap_service_ready_ext( + void *wmi_hdl, + uint8_t *evt_buf, uint8_t phy_idx, + struct wlan_psoc_host_hal_reg_capabilities_ext *param) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->extract_reg_cap_service_ready_ext) + return wmi_handle->ops->extract_reg_cap_service_ready_ext( + wmi_handle, + evt_buf, phy_idx, param); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_extract_dbr_ring_cap_service_ready_ext( + void *wmi_hdl, + uint8_t *evt_buf, uint8_t idx, + struct wlan_psoc_host_dbr_ring_caps *param) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->extract_dbr_ring_cap_service_ready_ext) + return wmi_handle->ops->extract_dbr_ring_cap_service_ready_ext( + wmi_handle, + evt_buf, idx, param); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_extract_dbr_buf_release_fixed( + void *wmi_hdl, + uint8_t *evt_buf, + struct direct_buf_rx_rsp *param) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->extract_dbr_buf_release_fixed) + return wmi_handle->ops->extract_dbr_buf_release_fixed( + wmi_handle, + evt_buf, param); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_extract_dbr_buf_release_entry( + void *wmi_hdl, + uint8_t *evt_buf, uint8_t idx, + struct direct_buf_rx_entry *param) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->extract_dbr_buf_release_entry) + return wmi_handle->ops->extract_dbr_buf_release_entry( + wmi_handle, + evt_buf, idx, param); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_extract_dbr_buf_metadata( + void *wmi_hdl, + uint8_t *evt_buf, uint8_t idx, + struct direct_buf_rx_metadata *param) +{ + wmi_unified_t wmi_handle = (wmi_unified_t)wmi_hdl; + + if (wmi_handle->ops->extract_dbr_buf_metadata) + return wmi_handle->ops->extract_dbr_buf_metadata( + wmi_handle, + evt_buf, idx, param); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_extract_pdev_utf_event() - + * extract UTF data from pdev utf event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param param: Pointer to hold evt buf + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +QDF_STATUS wmi_extract_pdev_utf_event(void *wmi_hdl, + uint8_t *evt_buf, + struct wmi_host_pdev_utf_event *param) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->extract_pdev_utf_event) + return wmi_handle->ops->extract_pdev_utf_event( + wmi_handle, + evt_buf, param); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_extract_pdev_qvit_event() - + * extract UTF data from pdev qvit event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param param: Pointer to hold evt buf + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +QDF_STATUS wmi_extract_pdev_qvit_event(void *wmi_hdl, + uint8_t *evt_buf, + struct wmi_host_pdev_qvit_event *param) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->extract_pdev_qvit_event) + return wmi_handle->ops->extract_pdev_qvit_event( + wmi_handle, + evt_buf, param); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_send_coex_ver_cfg_cmd() - send coex ver cfg command + * @wmi_handle: wmi handle + * @param: wmi coex ver cfg params + * + * Send WMI_COEX_VERSION_CFG_CMID parameters to fw. + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +QDF_STATUS wmi_unified_send_coex_ver_cfg_cmd(void *wmi_hdl, + coex_ver_cfg_t *param) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_coex_ver_cfg_cmd) + return wmi_handle->ops->send_coex_ver_cfg_cmd(wmi_handle, + param); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_send_coex_config_cmd() - send coex ver cfg command + * @wmi_handle: wmi handle + * @param: wmi coex cfg cmd params + * + * Send WMI_COEX_CFG_CMD parameters to fw. + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +QDF_STATUS wmi_unified_send_coex_config_cmd(void *wmi_hdl, + struct coex_config_params *param) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_coex_config_cmd) + return wmi_handle->ops->send_coex_config_cmd(wmi_handle, + param); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_send_request_get_rcpi_cmd(void *wmi_hdl, + struct rcpi_req *get_rcpi_param) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_get_rcpi_cmd) + return wmi_handle->ops->send_get_rcpi_cmd(wmi_handle, + get_rcpi_param); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_extract_rcpi_response_event(void *wmi_hdl, void *evt_buf, + struct rcpi_res *res) +{ + wmi_unified_t wmi_handle = (wmi_unified_t)wmi_hdl; + struct wmi_ops *ops = wmi_handle->ops; + + if (ops->extract_rcpi_response_event) + return ops->extract_rcpi_response_event(wmi_handle, evt_buf, + res); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_extract_peer_delete_response_event() - + * extract vdev id and peer mac addresse from peer delete response event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param param: Pointer to hold evt buf + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +QDF_STATUS wmi_extract_peer_delete_response_event( + void *wmi_hdl, + uint8_t *evt_buf, + struct wmi_host_peer_delete_response_event *param) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->extract_peer_delete_response_event) + return wmi_handle->ops->extract_peer_delete_response_event( + wmi_handle, + evt_buf, param); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_unified_dfs_phyerr_offload_en_cmd(void *wmi_hdl, + uint32_t pdev_id) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_dfs_phyerr_offload_en_cmd) + return wmi_handle->ops->send_dfs_phyerr_offload_en_cmd( + wmi_handle, pdev_id); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_unified_dfs_phyerr_offload_dis_cmd(void *wmi_hdl, + uint32_t pdev_id) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_dfs_phyerr_offload_dis_cmd) + return wmi_handle->ops->send_dfs_phyerr_offload_dis_cmd( + wmi_handle, pdev_id); + + return QDF_STATUS_E_FAILURE; +} + +/* + * wmi_extract_chainmask_tables_tlv() - extract chain mask tables + * @wmi_handle: wmi handle + * @evt_buf: pointer to event buffer. + * @chainmask_table: pointer to struct wlan_psoc_host_chainmask_table + * + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +QDF_STATUS wmi_extract_chainmask_tables(void *wmi_hdl, uint8_t *evt_buf, + struct wlan_psoc_host_chainmask_table *chainmask_table) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->extract_chainmask_tables) + return wmi_handle->ops->extract_chainmask_tables(wmi_handle, + evt_buf, chainmask_table); + + return QDF_STATUS_E_FAILURE; +} +/** + * wmi_unified_set_country_cmd_send() - WMI set country function + * @param wmi_handle : handle to WMI. + * @param param : pointer to hold set country cmd parameter + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_set_country_cmd_send(void *wmi_hdl, + struct set_country *param) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_set_country_cmd) + return wmi_handle->ops->send_set_country_cmd(wmi_handle, + param); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_send_dbs_scan_sel_params_cmd() - send wmi cmd of + * DBS scan selection configuration params + * @wma_handle: wma handler + * @dbs_scan_params: pointer to wmi_dbs_scan_sel_params + * + * Return: QDF_STATUS_SUCCESS on success and QDF failure reason code for failure + */ +QDF_STATUS wmi_unified_send_dbs_scan_sel_params_cmd(void *wmi_hdl, + struct wmi_dbs_scan_sel_params *dbs_scan_params) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_dbs_scan_sel_params_cmd) + return wmi_handle->ops-> + send_dbs_scan_sel_params_cmd(wmi_handle, + dbs_scan_params); + + return QDF_STATUS_E_FAILURE; +} + +#ifdef WLAN_FEATURE_ACTION_OUI +QDF_STATUS +wmi_unified_send_action_oui_cmd(void *wmi_hdl, + struct action_oui_request *req) +{ + wmi_unified_t wmi_handle = (wmi_unified_t)wmi_hdl; + + if (wmi_handle->ops->send_action_oui_cmd) + return wmi_handle->ops->send_action_oui_cmd(wmi_handle, + req); + + return QDF_STATUS_E_FAILURE; +} +#endif + +/** + * wmi_unified_send_limit_off_chan_cmd() - send wmi cmd of limit off channel + * configuration params + * @wmi_hdl: wmi handler + * @limit_off_chan_param: pointer to wmi_limit_off_chan_param + * + * Return: QDF_STATUS_SUCCESS on success and QDF failure reason code on failure + */ +QDF_STATUS wmi_unified_send_limit_off_chan_cmd(void *wmi_hdl, + struct wmi_limit_off_chan_param *limit_off_chan_param) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_limit_off_chan_cmd) + return wmi_handle->ops->send_limit_off_chan_cmd(wmi_handle, + limit_off_chan_param); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_send_bcn_offload_control_cmd - send beacon ofload control cmd to fw + * @wmi_hdl: wmi handle + * @bcn_ctrl_param: pointer to bcn_offload_control param + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +QDF_STATUS wmi_send_bcn_offload_control_cmd(void *wmi_hdl, + struct bcn_offload_control *bcn_ctrl_param) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_bcn_offload_control_cmd) + return wmi_handle->ops->send_bcn_offload_control_cmd(wmi_handle, + bcn_ctrl_param); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_send_wds_entry_list_cmd() - WMI function to get list of + * wds entries from FW + * @wmi_handle: wmi handle + * + * Send WMI_PDEV_WDS_ENTRY_LIST_CMDID parameters to fw. + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +QDF_STATUS wmi_unified_send_dump_wds_table_cmd(void *wmi_hdl) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_wds_entry_list_cmd) + return wmi_handle->ops->send_wds_entry_list_cmd(wmi_handle); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_extract_wds_entry - api to extract wds entry + * @wmi_handle: wma handle + * @evt_buf: pointer to event buffer + * @wds_entry: wds entry + * @idx: index to point wds entry in event buffer + * + * Return: QDF_STATUS_SUCCESS for successful event parse + * else QDF_STATUS_E_INVAL or QDF_STATUS_E_FAILURE + */ +QDF_STATUS wmi_extract_wds_entry(void *wmi_hdl, uint8_t *evt_buf, + struct wdsentry *wds_entry, + u_int32_t idx) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->extract_wds_entry) + return wmi_handle->ops->extract_wds_entry(wmi_handle, + evt_buf, wds_entry, idx); + + return QDF_STATUS_E_FAILURE; +} +qdf_export_symbol(wmi_extract_wds_entry); + +#ifdef WLAN_FEATURE_NAN_CONVERGENCE +QDF_STATUS wmi_unified_ndp_initiator_req_cmd_send(void *wmi_hdl, + struct nan_datapath_initiator_req *req) +{ + wmi_unified_t wmi_handle = wmi_hdl; + + if (wmi_handle->ops->send_ndp_initiator_req_cmd) + return wmi_handle->ops->send_ndp_initiator_req_cmd(wmi_handle, + req); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_ndp_responder_req_cmd_send(void *wmi_hdl, + struct nan_datapath_responder_req *req) +{ + wmi_unified_t wmi_handle = wmi_hdl; + + if (wmi_handle->ops->send_ndp_responder_req_cmd) + return wmi_handle->ops->send_ndp_responder_req_cmd(wmi_handle, + req); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_ndp_end_req_cmd_send(void *wmi_hdl, + struct nan_datapath_end_req *req) +{ + wmi_unified_t wmi_handle = wmi_hdl; + + if (wmi_handle->ops->send_ndp_end_req_cmd) + return wmi_handle->ops->send_ndp_end_req_cmd(wmi_handle, + req); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_extract_ndp_initiator_rsp(wmi_unified_t wmi_handle, + uint8_t *data, struct nan_datapath_initiator_rsp *rsp) +{ + if (wmi_handle->ops->extract_ndp_initiator_rsp) + return wmi_handle->ops->extract_ndp_initiator_rsp(wmi_handle, + data, rsp); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_extract_ndp_ind(wmi_unified_t wmi_handle, uint8_t *data, + struct nan_datapath_indication_event *ind) +{ + if (wmi_handle->ops->extract_ndp_ind) + return wmi_handle->ops->extract_ndp_ind(wmi_handle, + data, ind); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_extract_ndp_confirm(wmi_unified_t wmi_handle, uint8_t *data, + struct nan_datapath_confirm_event *ev) +{ + if (wmi_handle->ops->extract_ndp_confirm) + return wmi_handle->ops->extract_ndp_confirm(wmi_handle, + data, ev); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_extract_ndp_responder_rsp(wmi_unified_t wmi_handle, + uint8_t *data, struct nan_datapath_responder_rsp *rsp) +{ + if (wmi_handle->ops->extract_ndp_responder_rsp) + return wmi_handle->ops->extract_ndp_responder_rsp(wmi_handle, + data, rsp); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_extract_ndp_end_rsp(wmi_unified_t wmi_handle, uint8_t *data, + struct nan_datapath_end_rsp_event *rsp) +{ + if (wmi_handle->ops->extract_ndp_end_rsp) + return wmi_handle->ops->extract_ndp_end_rsp(wmi_handle, + data, rsp); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_extract_ndp_end_ind(wmi_unified_t wmi_handle, uint8_t *data, + struct nan_datapath_end_indication_event **ind) +{ + if (wmi_handle->ops->extract_ndp_end_ind) + return wmi_handle->ops->extract_ndp_end_ind(wmi_handle, + data, ind); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_extract_ndp_sch_update(wmi_unified_t wmi_handle, uint8_t *data, + struct nan_datapath_sch_update_event *ind) +{ + if (wmi_handle->ops->extract_ndp_sch_update) + return wmi_handle->ops->extract_ndp_sch_update(wmi_handle, + data, ind); + + return QDF_STATUS_E_FAILURE; +} +#endif +QDF_STATUS wmi_unified_send_btm_config(void *wmi_hdl, + struct wmi_btm_config *params) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_btm_config) + return wmi_handle->ops->send_btm_config(wmi_handle, + params); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_send_bss_load_config(void *wmi_hdl, + struct wmi_bss_load_config *params) +{ + wmi_unified_t wmi_handle = (wmi_unified_t)wmi_hdl; + + if (wmi_handle->ops->send_roam_bss_load_config) + return wmi_handle->ops->send_roam_bss_load_config(wmi_handle, + params); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_send_obss_detection_cfg_cmd(void *wmi_hdl, + struct wmi_obss_detection_cfg_param *obss_cfg_param) +{ + wmi_unified_t wmi_handle = (wmi_unified_t)wmi_hdl; + + if (wmi_handle->ops->send_obss_detection_cfg_cmd) + return wmi_handle->ops->send_obss_detection_cfg_cmd(wmi_handle, + obss_cfg_param); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_extract_obss_detection_info(void *wmi_hdl, + uint8_t *data, + struct wmi_obss_detect_info + *info) +{ + wmi_unified_t wmi_handle = (wmi_unified_t)wmi_hdl; + + if (wmi_handle->ops->extract_obss_detection_info) + return wmi_handle->ops->extract_obss_detection_info(data, info); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_offload_11k_cmd(void *wmi_hdl, + struct wmi_11k_offload_params *params) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_offload_11k_cmd) + return wmi_handle->ops->send_offload_11k_cmd( + wmi_handle, params); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_invoke_neighbor_report_cmd(void *wmi_hdl, + struct wmi_invoke_neighbor_report_params *params) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_invoke_neighbor_report_cmd) + return wmi_handle->ops->send_invoke_neighbor_report_cmd( + wmi_handle, params); + + return QDF_STATUS_E_FAILURE; +} + +#ifdef WLAN_SUPPORT_GREEN_AP +QDF_STATUS wmi_extract_green_ap_egap_status_info( + void *wmi_hdl, uint8_t *evt_buf, + struct wlan_green_ap_egap_status_info *egap_status_info_params) +{ + wmi_unified_t wmi_handle = (wmi_unified_t)wmi_hdl; + + if (wmi_handle->ops->extract_green_ap_egap_status_info) + return wmi_handle->ops->extract_green_ap_egap_status_info( + evt_buf, egap_status_info_params); + + return QDF_STATUS_E_FAILURE; +} +#endif + +QDF_STATUS wmi_unified_send_bss_color_change_enable_cmd(void *wmi_hdl, + uint32_t vdev_id, + bool enable) +{ + wmi_unified_t wmi_handle = (wmi_unified_t)wmi_hdl; + + if (wmi_handle->ops->send_bss_color_change_enable_cmd) + return wmi_handle->ops->send_bss_color_change_enable_cmd( + wmi_handle, vdev_id, enable); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_send_obss_color_collision_cfg_cmd(void *wmi_hdl, + struct wmi_obss_color_collision_cfg_param *cfg) +{ + wmi_unified_t wmi_handle = (wmi_unified_t)wmi_hdl; + + if (wmi_handle->ops->send_obss_color_collision_cfg_cmd) + return wmi_handle->ops->send_obss_color_collision_cfg_cmd( + wmi_handle, cfg); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_extract_obss_color_collision_info(void *wmi_hdl, + uint8_t *data, struct wmi_obss_color_collision_info *info) +{ + wmi_unified_t wmi_handle = (wmi_unified_t)wmi_hdl; + + if (wmi_handle->ops->extract_obss_color_collision_info) + return wmi_handle->ops->extract_obss_color_collision_info(data, + info); + + return QDF_STATUS_E_FAILURE; +} + +wmi_host_channel_width wmi_get_ch_width_from_phy_mode(void *wmi_hdl, + WMI_HOST_WLAN_PHY_MODE phymode) +{ + /* + * this API does translation between host only strcutres, hence + * does not need separate TLV, non-TLV definitions + */ + + if (phymode >= WMI_HOST_MODE_11A && phymode < WMI_HOST_MODE_MAX) + return mode_to_width[phymode]; + else + return WMI_HOST_CHAN_WIDTH_20; +} + +#ifdef QCA_SUPPORT_CP_STATS +QDF_STATUS wmi_extract_cca_stats(wmi_unified_t wmi_handle, void *evt_buf, + struct wmi_host_congestion_stats *stats) +{ + if (wmi_handle->ops->extract_cca_stats) + return wmi_handle->ops->extract_cca_stats(wmi_handle, evt_buf, + stats); + + return QDF_STATUS_E_FAILURE; +} +#endif /* QCA_SUPPORT_CP_STATS */ + +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) && defined(HOST_DFS_SPOOF_TEST) +QDF_STATUS +wmi_unified_dfs_send_avg_params_cmd(void *wmi_hdl, + struct dfs_radar_found_params *params) +{ + wmi_unified_t wmi_handle = (wmi_unified_t)wmi_hdl; + + if (wmi_handle->ops->send_dfs_average_radar_params_cmd) + return wmi_handle->ops->send_dfs_average_radar_params_cmd( + wmi_handle, params); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_extract_dfs_status_from_fw(void *wmi_hdl, void *evt_buf, + uint32_t *dfs_status_check) +{ + wmi_unified_t wmi_handle = (wmi_unified_t)wmi_hdl; + + if (wmi_handle->ops->extract_dfs_status_from_fw) + return wmi_handle->ops->extract_dfs_status_from_fw(wmi_handle, + evt_buf, dfs_status_check); + + return QDF_STATUS_E_FAILURE; +} +#endif + +QDF_STATUS +wmi_unified_send_roam_scan_stats_cmd(void *wmi_hdl, + struct wmi_roam_scan_stats_req *params) +{ + wmi_unified_t wmi_handle = (wmi_unified_t)wmi_hdl; + + if (wmi_handle->ops->send_roam_scan_stats_cmd) + return wmi_handle->ops->send_roam_scan_stats_cmd(wmi_handle, + params); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS +wmi_extract_roam_scan_stats_res_evt(wmi_unified_t wmi, void *evt_buf, + uint32_t *vdev_id, + struct wmi_roam_scan_stats_res **res_param) +{ + if (wmi->ops->extract_roam_scan_stats_res_evt) + return wmi->ops->extract_roam_scan_stats_res_evt(wmi, + evt_buf, + vdev_id, res_param); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_send_mws_coex_req_cmd(struct wmi_unified *wmi_handle, + uint32_t vdev_id, + uint32_t cmd_id) +{ + + if (wmi_handle->ops->send_mws_coex_status_req_cmd) + return wmi_handle->ops->send_mws_coex_status_req_cmd(wmi_handle, + vdev_id, cmd_id); + + return QDF_STATUS_E_FAILURE; +} diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_dfs_api.c b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_dfs_api.c new file mode 100644 index 0000000000000000000000000000000000000000..57ae8ff706b3184409197a149e9235d09e3bc0eb --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_dfs_api.c @@ -0,0 +1,75 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: Implement API's specific to DFS component. + */ + +#include +#include +#include +#include +#include +#include + +QDF_STATUS wmi_extract_dfs_cac_complete_event(void *wmi_hdl, + uint8_t *evt_buf, + uint32_t *vdev_id, + uint32_t len) +{ + struct wmi_unified *wmi_handle = (struct wmi_unified *)wmi_hdl; + + if (wmi_handle && wmi_handle->ops->extract_dfs_cac_complete_event) + return wmi_handle->ops->extract_dfs_cac_complete_event( + wmi_handle, evt_buf, vdev_id, len); + + return QDF_STATUS_E_FAILURE; +} +qdf_export_symbol(wmi_extract_dfs_cac_complete_event); + +QDF_STATUS wmi_extract_dfs_radar_detection_event(void *wmi_hdl, + uint8_t *evt_buf, + struct radar_found_info *radar_found, + uint32_t len) +{ + struct wmi_unified *wmi_handle = (struct wmi_unified *)wmi_hdl; + + if (wmi_handle && wmi_handle->ops->extract_dfs_radar_detection_event) + return wmi_handle->ops->extract_dfs_radar_detection_event( + wmi_handle, evt_buf, radar_found, len); + + return QDF_STATUS_E_FAILURE; +} + +#ifdef QCA_MCL_DFS_SUPPORT +QDF_STATUS wmi_extract_wlan_radar_event_info(void *wmi_hdl, + uint8_t *evt_buf, + struct radar_event_info *wlan_radar_event, + uint32_t len) +{ + struct wmi_unified *wmi_handle = (struct wmi_unified *)wmi_hdl; + + if (wmi_handle->ops->extract_wlan_radar_event_info) + return wmi_handle->ops->extract_wlan_radar_event_info( + wmi_handle, evt_buf, wlan_radar_event, len); + + return QDF_STATUS_E_FAILURE; +} +#endif +qdf_export_symbol(wmi_extract_dfs_radar_detection_event); diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_extscan_api.c b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_extscan_api.c new file mode 100644 index 0000000000000000000000000000000000000000..abb131bd55dc116a64e6668367304e5b1c455eb7 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_extscan_api.c @@ -0,0 +1,247 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "wmi_unified_priv.h" + +/** + * wmi_unified_reset_passpoint_network_list_cmd() - reset passpoint network list + * @wmi_hdl: wmi handle + * @req: passpoint network request structure + * + * This function sends down WMI command with network id set to wildcard id. + * firmware shall clear all the config entries + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_reset_passpoint_network_list_cmd(void *wmi_hdl, + struct wifi_passpoint_req_param *req) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_reset_passpoint_network_list_cmd) + return wmi_handle->ops->send_reset_passpoint_network_list_cmd(wmi_handle, + req); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_set_passpoint_network_list_cmd() - set passpoint network list + * @wmi_hdl: wmi handle + * @req: passpoint network request structure + * + * This function reads the incoming @req and fill in the destination + * WMI structure and send down the passpoint configs down to the firmware + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_set_passpoint_network_list_cmd(void *wmi_hdl, + struct wifi_passpoint_req_param *req) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_set_passpoint_network_list_cmd) + return wmi_handle->ops->send_set_passpoint_network_list_cmd(wmi_handle, + req); + + return QDF_STATUS_E_FAILURE; +} + +/** wmi_unified_set_epno_network_list_cmd() - set epno network list + * @wmi_hdl: wmi handle + * @req: epno config params request structure + * + * This function reads the incoming epno config request structure + * and constructs the WMI message to the firmware. + * + * Returns: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failures, + * error number otherwise + */ +QDF_STATUS wmi_unified_set_epno_network_list_cmd(void *wmi_hdl, + struct wifi_enhanced_pno_params *req) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_set_epno_network_list_cmd) + return wmi_handle->ops->send_set_epno_network_list_cmd(wmi_handle, + req); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_extscan_get_capabilities_cmd() - extscan get capabilities + * @wmi_hdl: wmi handle + * @pgetcapab: get capabilities params + * + * This function send request to fw to get extscan capabilities. + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_extscan_get_capabilities_cmd(void *wmi_hdl, + struct extscan_capabilities_params *pgetcapab) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_extscan_get_capabilities_cmd) + return wmi_handle->ops->send_extscan_get_capabilities_cmd(wmi_handle, + pgetcapab); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_extscan_get_cached_results_cmd() - extscan get cached results + * @wmi_hdl: wmi handle + * @pcached_results: cached results parameters + * + * This function send request to fw to get cached results. + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_extscan_get_cached_results_cmd(void *wmi_hdl, + struct extscan_cached_result_params *pcached_results) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_extscan_get_cached_results_cmd) + return wmi_handle->ops->send_extscan_get_cached_results_cmd(wmi_handle, + pcached_results); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_extscan_stop_change_monitor_cmd() - send stop change monitor cmd + * @wmi_hdl: wmi handle + * @reset_req: Reset change request params + * + * This function sends stop change monitor request to fw. + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_extscan_stop_change_monitor_cmd(void *wmi_hdl, + struct extscan_capabilities_reset_params *reset_req) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_extscan_stop_change_monitor_cmd) + return wmi_handle->ops->send_extscan_stop_change_monitor_cmd(wmi_handle, + reset_req); + + return QDF_STATUS_E_FAILURE; +} + + + +/** + * wmi_unified_extscan_start_change_monitor_cmd() - start change monitor cmd + * @wmi_hdl: wmi handle + * @psigchange: change monitor request params + * + * This function sends start change monitor request to fw. + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_extscan_start_change_monitor_cmd(void *wmi_hdl, + struct extscan_set_sig_changereq_params * + psigchange) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_extscan_start_change_monitor_cmd) + return wmi_handle->ops->send_extscan_start_change_monitor_cmd(wmi_handle, + psigchange); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_extscan_stop_hotlist_monitor_cmd() - stop hotlist monitor + * @wmi_hdl: wmi handle + * @photlist_reset: hotlist reset params + * + * This function configures hotlist monitor to stop in fw. + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +QDF_STATUS wmi_unified_extscan_stop_hotlist_monitor_cmd(void *wmi_hdl, + struct extscan_bssid_hotlist_reset_params *photlist_reset) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_extscan_stop_hotlist_monitor_cmd) + return wmi_handle->ops->send_extscan_stop_hotlist_monitor_cmd(wmi_handle, + photlist_reset); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_extscan_start_hotlist_monitor_cmd(void *wmi_hdl, + struct extscan_bssid_hotlist_set_params *params) +{ + wmi_unified_t wmi_handle = wmi_hdl; + + if (wmi_handle->ops->send_extscan_start_hotlist_monitor_cmd) + return wmi_handle->ops->send_extscan_start_hotlist_monitor_cmd(wmi_handle, + params); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_stop_extscan_cmd() - stop extscan command to fw. + * @wmi_hdl: wmi handle + * @pstopcmd: stop scan command request params + * + * This function sends stop extscan request to fw. + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure. + */ +QDF_STATUS wmi_unified_stop_extscan_cmd(void *wmi_hdl, + struct extscan_stop_req_params *pstopcmd) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_stop_extscan_cmd) + return wmi_handle->ops->send_stop_extscan_cmd(wmi_handle, + pstopcmd); + + return QDF_STATUS_E_FAILURE; +} + +/** + * wmi_unified_start_extscan_cmd() - start extscan command to fw. + * @wmi_hdl: wmi handle + * @pstart: scan command request params + * + * This function sends start extscan request to fw. + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure. + */ +QDF_STATUS wmi_unified_start_extscan_cmd(void *wmi_hdl, + struct wifi_scan_cmd_req_params *pstart) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_start_extscan_cmd) + return wmi_handle->ops->send_start_extscan_cmd(wmi_handle, + pstart); + + return QDF_STATUS_E_FAILURE; +} diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_extscan_tlv.c b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_extscan_tlv.c new file mode 100644 index 0000000000000000000000000000000000000000..570f6f46b3d42e7ee38362420e2aa92621ccfe01 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_extscan_tlv.c @@ -0,0 +1,1110 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include "wmi.h" +#include "wmi_unified_priv.h" + +/** + * send_reset_passpoint_network_list_cmd_tlv() - reset passpoint network list + * @wmi_handle: wmi handle + * @req: passpoint network request structure + * + * This function sends down WMI command with network id set to wildcard id. + * firmware shall clear all the config entries + * + * Return: QDF_STATUS enumeration + */ +static QDF_STATUS send_reset_passpoint_network_list_cmd_tlv + (wmi_unified_t wmi_handle, + struct wifi_passpoint_req_param *req) +{ + wmi_passpoint_config_cmd_fixed_param *cmd; + wmi_buf_t buf; + uint32_t len; + int ret; + + len = sizeof(*cmd); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s: Failed allocate wmi buffer", __func__); + return QDF_STATUS_E_NOMEM; + } + + cmd = (wmi_passpoint_config_cmd_fixed_param *) wmi_buf_data(buf); + + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_passpoint_config_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_passpoint_config_cmd_fixed_param)); + cmd->id = WMI_PASSPOINT_NETWORK_ID_WILDCARD; + + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_PASSPOINT_LIST_CONFIG_CMDID); + if (ret) { + WMI_LOGE("%s: Failed to send reset passpoint network list wmi cmd", + __func__); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * send_set_passpoint_network_list_cmd_tlv() - set passpoint network list + * @wmi_handle: wmi handle + * @req: passpoint network request structure + * + * This function reads the incoming @req and fill in the destination + * WMI structure and send down the passpoint configs down to the firmware + * + * Return: QDF_STATUS enumeration + */ +static QDF_STATUS send_set_passpoint_network_list_cmd_tlv + (wmi_unified_t wmi_handle, + struct wifi_passpoint_req_param *req) +{ + wmi_passpoint_config_cmd_fixed_param *cmd; + u_int8_t i, j, *bytes; + wmi_buf_t buf; + uint32_t len; + int ret; + + len = sizeof(*cmd); + for (i = 0; i < req->num_networks; i++) { + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s: Failed allocate wmi buffer", __func__); + return QDF_STATUS_E_NOMEM; + } + + cmd = (wmi_passpoint_config_cmd_fixed_param *) + wmi_buf_data(buf); + + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_passpoint_config_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_passpoint_config_cmd_fixed_param)); + cmd->id = req->networks[i].id; + WMI_LOGD("%s: network id: %u", __func__, cmd->id); + qdf_mem_copy(cmd->realm, req->networks[i].realm, + strlen(req->networks[i].realm) + 1); + WMI_LOGD("%s: realm: %s", __func__, cmd->realm); + for (j = 0; j < PASSPOINT_ROAMING_CONSORTIUM_ID_NUM; j++) { + bytes = (uint8_t *) &req->networks[i].roaming_consortium_ids[j]; + WMI_LOGD("index: %d rcids: %02x %02x %02x %02x %02x %02x %02x %02x", + j, bytes[0], bytes[1], bytes[2], bytes[3], + bytes[4], bytes[5], bytes[6], bytes[7]); + + qdf_mem_copy(&cmd->roaming_consortium_ids[j], + &req->networks[i].roaming_consortium_ids[j], + PASSPOINT_ROAMING_CONSORTIUM_ID_LEN); + } + qdf_mem_copy(cmd->plmn, req->networks[i].plmn, + PASSPOINT_PLMN_ID_LEN); + WMI_LOGD("%s: plmn: %02x:%02x:%02x", __func__, + cmd->plmn[0], cmd->plmn[1], cmd->plmn[2]); + + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_PASSPOINT_LIST_CONFIG_CMDID); + if (ret) { + WMI_LOGE("%s: Failed to send set passpoint network list wmi cmd", + __func__); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + } + + return QDF_STATUS_SUCCESS; +} + +/** send_set_epno_network_list_cmd_tlv() - set epno network list + * @wmi_handle: wmi handle + * @req: epno config params request structure + * + * This function reads the incoming epno config request structure + * and constructs the WMI message to the firmware. + * + * Returns: 0 on success, error number otherwise + */ +static QDF_STATUS send_set_epno_network_list_cmd_tlv(wmi_unified_t wmi_handle, + struct wifi_enhanced_pno_params *req) +{ + wmi_nlo_config_cmd_fixed_param *cmd; + nlo_configured_parameters *nlo_list; + enlo_candidate_score_params *cand_score_params; + u_int8_t i, *buf_ptr; + wmi_buf_t buf; + uint32_t len; + QDF_STATUS ret; + + /* Fixed Params */ + len = sizeof(*cmd); + if (req->num_networks) { + /* TLV place holder for array of structures + * then each nlo_configured_parameters(nlo_list) TLV. + */ + len += WMI_TLV_HDR_SIZE; + len += (sizeof(nlo_configured_parameters) + * QDF_MIN(req->num_networks, WMI_NLO_MAX_SSIDS)); + /* TLV for array of uint32 channel_list */ + len += WMI_TLV_HDR_SIZE; + /* TLV for nlo_channel_prediction_cfg */ + len += WMI_TLV_HDR_SIZE; + /* TLV for candidate score params */ + len += sizeof(enlo_candidate_score_params); + } + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s: Failed allocate wmi buffer", __func__); + return QDF_STATUS_E_NOMEM; + } + + cmd = (wmi_nlo_config_cmd_fixed_param *) wmi_buf_data(buf); + + buf_ptr = (u_int8_t *) cmd; + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_nlo_config_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_nlo_config_cmd_fixed_param)); + cmd->vdev_id = req->vdev_id; + + /* set flag to reset if num of networks are 0 */ + cmd->flags = (req->num_networks == 0 ? + WMI_NLO_CONFIG_ENLO_RESET : WMI_NLO_CONFIG_ENLO); + + buf_ptr += sizeof(wmi_nlo_config_cmd_fixed_param); + + cmd->no_of_ssids = QDF_MIN(req->num_networks, WMI_NLO_MAX_SSIDS); + WMI_LOGD("SSID count: %d flags: %d", + cmd->no_of_ssids, cmd->flags); + + /* Fill nlo_config only when num_networks are non zero */ + if (cmd->no_of_ssids) { + /* Fill networks */ + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, + cmd->no_of_ssids * sizeof(nlo_configured_parameters)); + buf_ptr += WMI_TLV_HDR_SIZE; + + nlo_list = (nlo_configured_parameters *) buf_ptr; + for (i = 0; i < cmd->no_of_ssids; i++) { + WMITLV_SET_HDR(&nlo_list[i].tlv_header, + WMITLV_TAG_ARRAY_BYTE, + WMITLV_GET_STRUCT_TLVLEN( + nlo_configured_parameters)); + /* Copy ssid and it's length */ + nlo_list[i].ssid.valid = true; + nlo_list[i].ssid.ssid.ssid_len = + req->networks[i].ssid.length; + qdf_mem_copy(nlo_list[i].ssid.ssid.ssid, + req->networks[i].ssid.mac_ssid, + nlo_list[i].ssid.ssid.ssid_len); + WMI_LOGD("index: %d ssid: %.*s len: %d", i, + nlo_list[i].ssid.ssid.ssid_len, + (char *) nlo_list[i].ssid.ssid.ssid, + nlo_list[i].ssid.ssid.ssid_len); + + /* Copy pno flags */ + nlo_list[i].bcast_nw_type.valid = true; + nlo_list[i].bcast_nw_type.bcast_nw_type = + req->networks[i].flags; + WMI_LOGD("PNO flags (%u)", + nlo_list[i].bcast_nw_type.bcast_nw_type); + + /* Copy auth bit field */ + nlo_list[i].auth_type.valid = true; + nlo_list[i].auth_type.auth_type = + req->networks[i].auth_bit_field; + WMI_LOGD("Auth bit field (%u)", + nlo_list[i].auth_type.auth_type); + } + + buf_ptr += cmd->no_of_ssids * sizeof(nlo_configured_parameters); + /* Fill the channel list */ + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_UINT32, 0); + buf_ptr += WMI_TLV_HDR_SIZE; + + /* Fill prediction_param */ + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, 0); + buf_ptr += WMI_TLV_HDR_SIZE; + + /* Fill epno candidate score params */ + cand_score_params = (enlo_candidate_score_params *) buf_ptr; + WMITLV_SET_HDR(buf_ptr, + WMITLV_TAG_STRUC_enlo_candidate_score_param, + WMITLV_GET_STRUCT_TLVLEN(enlo_candidate_score_params)); + cand_score_params->min5GHz_rssi = + req->min_5ghz_rssi; + cand_score_params->min24GHz_rssi = + req->min_24ghz_rssi; + cand_score_params->initial_score_max = + req->initial_score_max; + cand_score_params->current_connection_bonus = + req->current_connection_bonus; + cand_score_params->same_network_bonus = + req->same_network_bonus; + cand_score_params->secure_bonus = + req->secure_bonus; + cand_score_params->band5GHz_bonus = + req->band_5ghz_bonus; + buf_ptr += sizeof(enlo_candidate_score_params); + } + + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_NETWORK_LIST_OFFLOAD_CONFIG_CMDID); + if (QDF_IS_STATUS_ERROR(ret)) { + WMI_LOGE("%s: Failed to send nlo wmi cmd", __func__); + wmi_buf_free(buf); + return QDF_STATUS_E_INVAL; + } + + WMI_LOGD("set ePNO list request sent successfully for vdev %d", + req->vdev_id); + + return ret; +} + +/** + * send_extscan_get_capabilities_cmd_tlv() - extscan get capabilities + * @wmi_handle: wmi handle + * @pgetcapab: get capabilities params + * + * This function send request to fw to get extscan capabilities. + * + * Return: CDF status + */ +static QDF_STATUS send_extscan_get_capabilities_cmd_tlv(wmi_unified_t wmi_handle, + struct extscan_capabilities_params *pgetcapab) +{ + wmi_extscan_get_capabilities_cmd_fixed_param *cmd; + wmi_buf_t wmi_buf; + uint32_t len; + uint8_t *buf_ptr; + + len = sizeof(*cmd); + wmi_buf = wmi_buf_alloc(wmi_handle, len); + if (!wmi_buf) { + WMI_LOGE("%s: wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_NOMEM; + } + buf_ptr = (uint8_t *) wmi_buf_data(wmi_buf); + + cmd = (wmi_extscan_get_capabilities_cmd_fixed_param *) buf_ptr; + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_extscan_get_capabilities_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_extscan_get_capabilities_cmd_fixed_param)); + + cmd->request_id = pgetcapab->request_id; + + if (wmi_unified_cmd_send(wmi_handle, wmi_buf, len, + WMI_EXTSCAN_GET_CAPABILITIES_CMDID)) { + WMI_LOGE("%s: failed to command", __func__); + wmi_buf_free(wmi_buf); + return QDF_STATUS_E_FAILURE; + } + return QDF_STATUS_SUCCESS; +} + +/** + * send_extscan_get_cached_results_cmd_tlv() - extscan get cached results + * @wmi_handle: wmi handle + * @pcached_results: cached results parameters + * + * This function send request to fw to get cached results. + * + * Return: CDF status + */ +static QDF_STATUS send_extscan_get_cached_results_cmd_tlv(wmi_unified_t wmi_handle, + struct extscan_cached_result_params *pcached_results) +{ + wmi_extscan_get_cached_results_cmd_fixed_param *cmd; + wmi_buf_t wmi_buf; + uint32_t len; + uint8_t *buf_ptr; + + len = sizeof(*cmd); + wmi_buf = wmi_buf_alloc(wmi_handle, len); + if (!wmi_buf) { + WMI_LOGE("%s: wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_NOMEM; + } + buf_ptr = (uint8_t *) wmi_buf_data(wmi_buf); + + cmd = (wmi_extscan_get_cached_results_cmd_fixed_param *) buf_ptr; + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_extscan_get_cached_results_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_extscan_get_cached_results_cmd_fixed_param)); + + cmd->request_id = pcached_results->request_id; + cmd->vdev_id = pcached_results->vdev_id; + cmd->control_flags = pcached_results->flush; + + if (wmi_unified_cmd_send(wmi_handle, wmi_buf, len, + WMI_EXTSCAN_GET_CACHED_RESULTS_CMDID)) { + WMI_LOGE("%s: failed to command", __func__); + wmi_buf_free(wmi_buf); + return QDF_STATUS_E_FAILURE; + } + return QDF_STATUS_SUCCESS; +} + +/** + * send_extscan_stop_change_monitor_cmd_tlv() - send stop change monitor cmd + * @wmi_handle: wmi handle + * @reset_req: Reset change request params + * + * This function sends stop change monitor request to fw. + * + * Return: CDF status + */ +static QDF_STATUS send_extscan_stop_change_monitor_cmd_tlv + (wmi_unified_t wmi_handle, + struct extscan_capabilities_reset_params *reset_req) +{ + wmi_extscan_configure_wlan_change_monitor_cmd_fixed_param *cmd; + wmi_buf_t wmi_buf; + uint32_t len; + uint8_t *buf_ptr; + int change_list = 0; + + len = sizeof(*cmd); + + /* reset significant change tlv is set to 0 */ + len += WMI_TLV_HDR_SIZE; + len += change_list * sizeof(wmi_extscan_wlan_change_bssid_param); + wmi_buf = wmi_buf_alloc(wmi_handle, len); + if (!wmi_buf) { + WMI_LOGE("%s: wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_NOMEM; + } + buf_ptr = (uint8_t *) wmi_buf_data(wmi_buf); + + cmd = (wmi_extscan_configure_wlan_change_monitor_cmd_fixed_param *) + buf_ptr; + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_extscan_configure_wlan_change_monitor_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_extscan_configure_wlan_change_monitor_cmd_fixed_param)); + + cmd->request_id = reset_req->request_id; + cmd->vdev_id = reset_req->vdev_id; + cmd->mode = 0; + + buf_ptr += sizeof(*cmd); + WMITLV_SET_HDR(buf_ptr, + WMITLV_TAG_ARRAY_STRUC, + change_list * + sizeof(wmi_extscan_wlan_change_bssid_param)); + buf_ptr += WMI_TLV_HDR_SIZE + (change_list * + sizeof + (wmi_extscan_wlan_change_bssid_param)); + + if (wmi_unified_cmd_send(wmi_handle, wmi_buf, len, + WMI_EXTSCAN_CONFIGURE_WLAN_CHANGE_MONITOR_CMDID)) { + WMI_LOGE("%s: failed to command", __func__); + wmi_buf_free(wmi_buf); + return QDF_STATUS_E_FAILURE; + } + return QDF_STATUS_SUCCESS; +} + +/** + * wmi_get_buf_extscan_change_monitor_cmd() - fill change monitor request + * @wmi_handle: wmi handle + * @psigchange: change monitor request params + * @buf: wmi buffer + * @buf_len: buffer length + * + * This function fills elements of change monitor request buffer. + * + * Return: QDF status + */ +static QDF_STATUS wmi_get_buf_extscan_change_monitor_cmd + (wmi_unified_t wmi_handle, + struct extscan_set_sig_changereq_params + *psigchange, wmi_buf_t *buf, int *buf_len) +{ + wmi_extscan_configure_wlan_change_monitor_cmd_fixed_param *cmd; + wmi_extscan_wlan_change_bssid_param *dest_chglist; + uint8_t *buf_ptr; + int j; + int len = sizeof(*cmd); + uint32_t numap = psigchange->num_ap; + struct ap_threshold_params *src_ap = psigchange->ap; + + if (!numap || (numap > WMI_WLAN_EXTSCAN_MAX_SIGNIFICANT_CHANGE_APS)) { + WMI_LOGE("%s: Invalid number of bssid's", __func__); + return QDF_STATUS_E_INVAL; + } + len += WMI_TLV_HDR_SIZE; + len += numap * sizeof(wmi_extscan_wlan_change_bssid_param); + + *buf = wmi_buf_alloc(wmi_handle, len); + if (!*buf) { + WMI_LOGP("%s: failed to allocate memory for change monitor cmd", + __func__); + return QDF_STATUS_E_FAILURE; + } + buf_ptr = (uint8_t *) wmi_buf_data(*buf); + cmd = + (wmi_extscan_configure_wlan_change_monitor_cmd_fixed_param *) + buf_ptr; + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_extscan_configure_wlan_change_monitor_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_extscan_configure_wlan_change_monitor_cmd_fixed_param)); + + cmd->request_id = psigchange->request_id; + cmd->vdev_id = psigchange->vdev_id; + cmd->total_entries = numap; + cmd->mode = 1; + cmd->num_entries_in_page = numap; + cmd->lost_ap_scan_count = psigchange->lostap_sample_size; + cmd->max_rssi_samples = psigchange->rssi_sample_size; + cmd->rssi_averaging_samples = psigchange->rssi_sample_size; + cmd->max_out_of_range_count = psigchange->min_breaching; + + buf_ptr += sizeof(*cmd); + WMITLV_SET_HDR(buf_ptr, + WMITLV_TAG_ARRAY_STRUC, + numap * sizeof(wmi_extscan_wlan_change_bssid_param)); + dest_chglist = (wmi_extscan_wlan_change_bssid_param *) + (buf_ptr + WMI_TLV_HDR_SIZE); + + for (j = 0; j < numap; j++) { + WMITLV_SET_HDR(dest_chglist, + WMITLV_TAG_STRUC_wmi_extscan_bucket_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_extscan_wlan_change_bssid_param)); + + dest_chglist->lower_rssi_limit = src_ap->low; + dest_chglist->upper_rssi_limit = src_ap->high; + WMI_CHAR_ARRAY_TO_MAC_ADDR(src_ap->bssid.bytes, + &dest_chglist->bssid); + + WMI_LOGD("%s: min_rssi %d", __func__, + dest_chglist->lower_rssi_limit); + dest_chglist++; + src_ap++; + } + buf_ptr += WMI_TLV_HDR_SIZE + + (numap * sizeof(wmi_extscan_wlan_change_bssid_param)); + *buf_len = len; + return QDF_STATUS_SUCCESS; +} + +/** + * send_extscan_start_change_monitor_cmd_tlv() - send start change monitor cmd + * @wmi_handle: wmi handle + * @psigchange: change monitor request params + * + * This function sends start change monitor request to fw. + * + * Return: CDF status + */ +static QDF_STATUS send_extscan_start_change_monitor_cmd_tlv + (wmi_unified_t wmi_handle, + struct extscan_set_sig_changereq_params * + psigchange) +{ + QDF_STATUS qdf_status = QDF_STATUS_SUCCESS; + wmi_buf_t buf; + int len; + + + qdf_status = wmi_get_buf_extscan_change_monitor_cmd(wmi_handle, + psigchange, &buf, + &len); + if (qdf_status != QDF_STATUS_SUCCESS) { + WMI_LOGE("%s: Failed to get buffer for change monitor cmd", + __func__); + return QDF_STATUS_E_FAILURE; + } + if (!buf) { + WMI_LOGE("%s: Failed to get buffer", __func__); + return QDF_STATUS_E_FAILURE; + } + if (wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_EXTSCAN_CONFIGURE_WLAN_CHANGE_MONITOR_CMDID)) { + WMI_LOGE("%s: failed to send command", __func__); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + return QDF_STATUS_SUCCESS; +} + +/** + * send_extscan_stop_hotlist_monitor_cmd_tlv() - stop hotlist monitor + * @wmi_handle: wmi handle + * @photlist_reset: hotlist reset params + * + * This function configures hotlist monitor to stop in fw. + * + * Return: CDF status + */ +static QDF_STATUS send_extscan_stop_hotlist_monitor_cmd_tlv + (wmi_unified_t wmi_handle, + struct extscan_bssid_hotlist_reset_params *photlist_reset) +{ + wmi_extscan_configure_hotlist_monitor_cmd_fixed_param *cmd; + wmi_buf_t wmi_buf; + uint32_t len; + uint8_t *buf_ptr; + int hotlist_entries = 0; + + len = sizeof(*cmd); + + /* reset bssid hotlist with tlv set to 0 */ + len += WMI_TLV_HDR_SIZE; + len += hotlist_entries * sizeof(wmi_extscan_hotlist_entry); + + wmi_buf = wmi_buf_alloc(wmi_handle, len); + if (!wmi_buf) { + WMI_LOGE("%s: wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_NOMEM; + } + + buf_ptr = (uint8_t *) wmi_buf_data(wmi_buf); + cmd = (wmi_extscan_configure_hotlist_monitor_cmd_fixed_param *) + buf_ptr; + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_extscan_configure_hotlist_monitor_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_extscan_configure_hotlist_monitor_cmd_fixed_param)); + + cmd->request_id = photlist_reset->request_id; + cmd->vdev_id = photlist_reset->vdev_id; + cmd->mode = 0; + + buf_ptr += sizeof(*cmd); + WMITLV_SET_HDR(buf_ptr, + WMITLV_TAG_ARRAY_STRUC, + hotlist_entries * sizeof(wmi_extscan_hotlist_entry)); + buf_ptr += WMI_TLV_HDR_SIZE + + (hotlist_entries * sizeof(wmi_extscan_hotlist_entry)); + + if (wmi_unified_cmd_send(wmi_handle, wmi_buf, len, + WMI_EXTSCAN_CONFIGURE_HOTLIST_MONITOR_CMDID)) { + WMI_LOGE("%s: failed to command", __func__); + wmi_buf_free(wmi_buf); + return QDF_STATUS_E_FAILURE; + } + return QDF_STATUS_SUCCESS; +} + +/** + * send_stop_extscan_cmd_tlv() - stop extscan command to fw. + * @wmi_handle: wmi handle + * @pstopcmd: stop scan command request params + * + * This function sends stop extscan request to fw. + * + * Return: CDF Status. + */ +static QDF_STATUS send_stop_extscan_cmd_tlv(wmi_unified_t wmi_handle, + struct extscan_stop_req_params *pstopcmd) +{ + wmi_extscan_stop_cmd_fixed_param *cmd; + wmi_buf_t wmi_buf; + uint32_t len; + uint8_t *buf_ptr; + + len = sizeof(*cmd); + wmi_buf = wmi_buf_alloc(wmi_handle, len); + if (!wmi_buf) { + WMI_LOGE("%s: wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_NOMEM; + } + buf_ptr = (uint8_t *) wmi_buf_data(wmi_buf); + cmd = (wmi_extscan_stop_cmd_fixed_param *) buf_ptr; + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_extscan_stop_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_extscan_stop_cmd_fixed_param)); + + cmd->request_id = pstopcmd->request_id; + cmd->vdev_id = pstopcmd->vdev_id; + + if (wmi_unified_cmd_send(wmi_handle, wmi_buf, len, + WMI_EXTSCAN_STOP_CMDID)) { + WMI_LOGE("%s: failed to command", __func__); + wmi_buf_free(wmi_buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * wmi_get_buf_extscan_start_cmd() - Fill extscan start request + * @wmi_handle: wmi handle + * @pstart: scan command request params + * @buf: event buffer + * @buf_len: length of buffer + * + * This function fills individual elements of extscan request and + * TLV for buckets, channel list. + * + * Return: CDF Status. + */ +static +QDF_STATUS wmi_get_buf_extscan_start_cmd(wmi_unified_t wmi_handle, + struct wifi_scan_cmd_req_params *pstart, + wmi_buf_t *buf, int *buf_len) +{ + wmi_extscan_start_cmd_fixed_param *cmd; + wmi_extscan_bucket *dest_blist; + wmi_extscan_bucket_channel *dest_clist; + struct wifi_scan_bucket_params *src_bucket = pstart->buckets; + struct wifi_scan_channelspec_params *src_channel = src_bucket->channels; + struct wifi_scan_channelspec_params save_channel[WMI_WLAN_EXTSCAN_MAX_CHANNELS]; + + uint8_t *buf_ptr; + int i, k, count = 0; + int len = sizeof(*cmd); + int nbuckets = pstart->num_buckets; + int nchannels = 0; + + /* These TLV's are are NULL by default */ + uint32_t ie_len_with_pad = 0; + int num_ssid = 0; + int num_bssid = 0; + int ie_len = 0; + + uint32_t base_period = pstart->base_period; + + /* TLV placeholder for ssid_list (NULL) */ + len += WMI_TLV_HDR_SIZE; + len += num_ssid * sizeof(wmi_ssid); + + /* TLV placeholder for bssid_list (NULL) */ + len += WMI_TLV_HDR_SIZE; + len += num_bssid * sizeof(wmi_mac_addr); + + /* TLV placeholder for ie_data (NULL) */ + len += WMI_TLV_HDR_SIZE; + len += ie_len * sizeof(uint32_t); + + /* TLV placeholder for bucket */ + len += WMI_TLV_HDR_SIZE; + len += nbuckets * sizeof(wmi_extscan_bucket); + + /* TLV channel placeholder */ + len += WMI_TLV_HDR_SIZE; + for (i = 0; i < nbuckets; i++) { + nchannels += src_bucket->num_channels; + src_bucket++; + } + + WMI_LOGD("%s: Total buckets: %d total #of channels is %d", + __func__, nbuckets, nchannels); + len += nchannels * sizeof(wmi_extscan_bucket_channel); + /* Allocate the memory */ + *buf = wmi_buf_alloc(wmi_handle, len); + if (!*buf) { + WMI_LOGP("%s: failed to allocate memory for start extscan cmd", + __func__); + return QDF_STATUS_E_NOMEM; + } + buf_ptr = (uint8_t *) wmi_buf_data(*buf); + cmd = (wmi_extscan_start_cmd_fixed_param *) buf_ptr; + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_extscan_start_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_extscan_start_cmd_fixed_param)); + + cmd->request_id = pstart->request_id; + cmd->vdev_id = pstart->vdev_id; + cmd->base_period = pstart->base_period; + cmd->num_buckets = nbuckets; + cmd->configuration_flags = 0; + if (pstart->configuration_flags & WMI_EXTSCAN_LP_EXTENDED_BATCHING) + cmd->configuration_flags |= WMI_EXTSCAN_EXTENDED_BATCHING_EN; + WMI_LOGI("%s: configuration_flags: 0x%x", __func__, + cmd->configuration_flags); +#ifdef FEATURE_WLAN_EXTSCAN + cmd->min_rest_time = WMI_EXTSCAN_REST_TIME; + cmd->max_rest_time = WMI_EXTSCAN_REST_TIME; + cmd->max_scan_time = WMI_EXTSCAN_MAX_SCAN_TIME; + cmd->burst_duration = WMI_EXTSCAN_BURST_DURATION; +#endif + + /* The max dwell time is retrieved from the first channel + * of the first bucket and kept common for all channels. + */ + cmd->min_dwell_time_active = pstart->min_dwell_time_active; + cmd->max_dwell_time_active = pstart->max_dwell_time_active; + cmd->min_dwell_time_passive = pstart->min_dwell_time_passive; + cmd->max_dwell_time_passive = pstart->max_dwell_time_passive; + cmd->max_bssids_per_scan_cycle = pstart->max_ap_per_scan; + cmd->max_table_usage = pstart->report_threshold_percent; + cmd->report_threshold_num_scans = pstart->report_threshold_num_scans; + + cmd->repeat_probe_time = cmd->max_dwell_time_active / + WMI_SCAN_NPROBES_DEFAULT; + cmd->probe_delay = 0; + cmd->probe_spacing_time = 0; + cmd->idle_time = 0; + cmd->scan_ctrl_flags = WMI_SCAN_ADD_BCAST_PROBE_REQ | + WMI_SCAN_ADD_CCK_RATES | + WMI_SCAN_ADD_OFDM_RATES | + WMI_SCAN_ADD_SPOOFED_MAC_IN_PROBE_REQ | + WMI_SCAN_ADD_DS_IE_IN_PROBE_REQ; + WMI_SCAN_SET_DWELL_MODE(cmd->scan_ctrl_flags, + pstart->extscan_adaptive_dwell_mode); + cmd->scan_priority = WMI_SCAN_PRIORITY_VERY_LOW; + cmd->num_ssids = 0; + cmd->num_bssid = 0; + cmd->ie_len = 0; + cmd->n_probes = (cmd->repeat_probe_time > 0) ? + cmd->max_dwell_time_active / cmd->repeat_probe_time : 0; + + buf_ptr += sizeof(*cmd); + WMITLV_SET_HDR(buf_ptr, + WMITLV_TAG_ARRAY_FIXED_STRUC, + num_ssid * sizeof(wmi_ssid)); + buf_ptr += WMI_TLV_HDR_SIZE + (num_ssid * sizeof(wmi_ssid)); + + WMITLV_SET_HDR(buf_ptr, + WMITLV_TAG_ARRAY_FIXED_STRUC, + num_bssid * sizeof(wmi_mac_addr)); + buf_ptr += WMI_TLV_HDR_SIZE + (num_bssid * sizeof(wmi_mac_addr)); + + ie_len_with_pad = 0; + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_BYTE, + ie_len_with_pad); + buf_ptr += WMI_TLV_HDR_SIZE + ie_len_with_pad; + + WMITLV_SET_HDR(buf_ptr, + WMITLV_TAG_ARRAY_STRUC, + nbuckets * sizeof(wmi_extscan_bucket)); + dest_blist = (wmi_extscan_bucket *) + (buf_ptr + WMI_TLV_HDR_SIZE); + src_bucket = pstart->buckets; + + /* Retrieve scanning information from each bucket and + * channels and send it to the target + */ + for (i = 0; i < nbuckets; i++) { + WMITLV_SET_HDR(dest_blist, + WMITLV_TAG_STRUC_wmi_extscan_bucket_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN(wmi_extscan_bucket)); + + dest_blist->bucket_id = src_bucket->bucket; + dest_blist->base_period_multiplier = + src_bucket->period / base_period; + dest_blist->min_period = src_bucket->period; + dest_blist->max_period = src_bucket->max_period; + dest_blist->exp_backoff = src_bucket->exponent; + dest_blist->exp_max_step_count = src_bucket->step_count; + dest_blist->channel_band = src_bucket->band; + dest_blist->num_channels = src_bucket->num_channels; + dest_blist->notify_extscan_events = 0; + + if (src_bucket->report_events & + WMI_EXTSCAN_REPORT_EVENTS_EACH_SCAN) + dest_blist->notify_extscan_events = + WMI_EXTSCAN_CYCLE_COMPLETED_EVENT | + WMI_EXTSCAN_CYCLE_STARTED_EVENT; + + if (src_bucket->report_events & + WMI_EXTSCAN_REPORT_EVENTS_FULL_RESULTS) { + dest_blist->forwarding_flags = + WMI_EXTSCAN_FORWARD_FRAME_TO_HOST; + dest_blist->notify_extscan_events |= + WMI_EXTSCAN_BUCKET_COMPLETED_EVENT | + WMI_EXTSCAN_CYCLE_STARTED_EVENT | + WMI_EXTSCAN_CYCLE_COMPLETED_EVENT; + } else { + dest_blist->forwarding_flags = + WMI_EXTSCAN_NO_FORWARDING; + } + + if (src_bucket->report_events & + WMI_EXTSCAN_REPORT_EVENTS_NO_BATCH) + dest_blist->configuration_flags = 0; + else + dest_blist->configuration_flags = + WMI_EXTSCAN_BUCKET_CACHE_RESULTS; + + WMI_LOGI("%s: ntfy_extscan_events:%u cfg_flags:%u fwd_flags:%u", + __func__, dest_blist->notify_extscan_events, + dest_blist->configuration_flags, + dest_blist->forwarding_flags); + + dest_blist->min_dwell_time_active = + src_bucket->min_dwell_time_active; + dest_blist->max_dwell_time_active = + src_bucket->max_dwell_time_active; + dest_blist->min_dwell_time_passive = + src_bucket->min_dwell_time_passive; + dest_blist->max_dwell_time_passive = + src_bucket->max_dwell_time_passive; + src_channel = src_bucket->channels; + + /* save the channel info to later populate + * the channel TLV + */ + for (k = 0; k < src_bucket->num_channels; k++) { + save_channel[count++].channel = src_channel->channel; + src_channel++; + } + dest_blist++; + src_bucket++; + } + buf_ptr += WMI_TLV_HDR_SIZE + (nbuckets * sizeof(wmi_extscan_bucket)); + WMITLV_SET_HDR(buf_ptr, + WMITLV_TAG_ARRAY_STRUC, + nchannels * sizeof(wmi_extscan_bucket_channel)); + dest_clist = (wmi_extscan_bucket_channel *) + (buf_ptr + WMI_TLV_HDR_SIZE); + + /* Active or passive scan is based on the bucket dwell time + * and channel specific active,passive scans are not + * supported yet + */ + for (i = 0; i < nchannels; i++) { + WMITLV_SET_HDR(dest_clist, + WMITLV_TAG_STRUC_wmi_extscan_bucket_channel_event_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_extscan_bucket_channel)); + dest_clist->channel = save_channel[i].channel; + dest_clist++; + } + buf_ptr += WMI_TLV_HDR_SIZE + + (nchannels * sizeof(wmi_extscan_bucket_channel)); + *buf_len = len; + return QDF_STATUS_SUCCESS; +} + +/** + * send_start_extscan_cmd_tlv() - start extscan command to fw. + * @wmi_handle: wmi handle + * @pstart: scan command request params + * + * This function sends start extscan request to fw. + * + * Return: CDF Status. + */ +static QDF_STATUS send_start_extscan_cmd_tlv(wmi_unified_t wmi_handle, + struct wifi_scan_cmd_req_params *pstart) +{ + QDF_STATUS qdf_status = QDF_STATUS_SUCCESS; + wmi_buf_t buf; + int len; + + /* Fill individual elements of extscan request and + * TLV for buckets, channel list. + */ + qdf_status = wmi_get_buf_extscan_start_cmd(wmi_handle, + pstart, &buf, &len); + if (qdf_status != QDF_STATUS_SUCCESS) { + WMI_LOGE("%s: Failed to get buffer for ext scan cmd", __func__); + return QDF_STATUS_E_FAILURE; + } + if (!buf) { + WMI_LOGE("%s:Failed to get buffer for current extscan info", + __func__); + return QDF_STATUS_E_FAILURE; + } + if (wmi_unified_cmd_send(wmi_handle, buf, + len, WMI_EXTSCAN_START_CMDID)) { + WMI_LOGE("%s: failed to send command", __func__); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** wmi_get_hotlist_entries_per_page() - hotlist entries per page + * @wmi_handle: wmi handle. + * @cmd: size of command structure. + * @per_entry_size: per entry size. + * + * This utility function calculates how many hotlist entries can + * fit in one page. + * + * Return: number of entries + */ +static inline int wmi_get_hotlist_entries_per_page + (wmi_unified_t wmi_handle, + size_t cmd_size, + size_t per_entry_size) +{ + uint32_t avail_space = 0; + int num_entries = 0; + uint16_t max_msg_len = wmi_get_max_msg_len(wmi_handle); + + /* Calculate number of hotlist entries that can + * be passed in wma message request. + */ + avail_space = max_msg_len - cmd_size; + num_entries = avail_space / per_entry_size; + return num_entries; +} + +/** + * send_extscan_start_hotlist_monitor_cmd_tlv() - start hotlist monitor + * @wmi_handle: wmi handle + * @params: hotlist params + * + * This function configures hotlist monitor to start in fw. + * + * Return: QDF status + */ +static QDF_STATUS send_extscan_start_hotlist_monitor_cmd_tlv + (wmi_unified_t wmi_handle, + struct extscan_bssid_hotlist_set_params *params) +{ + wmi_extscan_configure_hotlist_monitor_cmd_fixed_param *cmd = NULL; + wmi_extscan_hotlist_entry *dest_hotlist; + struct ap_threshold_params *src_ap = params->ap; + wmi_buf_t buf; + uint8_t *buf_ptr; + + int j, index = 0; + int cmd_len = 0; + int num_entries; + int min_entries = 0; + uint32_t numap = params->num_ap; + int len = sizeof(*cmd); + + len += WMI_TLV_HDR_SIZE; + cmd_len = len; + + num_entries = wmi_get_hotlist_entries_per_page(wmi_handle, + cmd_len, + sizeof(*dest_hotlist)); + /* setbssid hotlist expects the bssid list + * to be non zero value + */ + if (!numap || (numap > WMI_WLAN_EXTSCAN_MAX_HOTLIST_APS)) { + WMI_LOGE("Invalid number of APs: %d", numap); + return QDF_STATUS_E_INVAL; + } + + /* Split the hot list entry pages and send multiple command + * requests if the buffer reaches the maximum request size + */ + while (index < numap) { + min_entries = QDF_MIN(num_entries, numap); + len += min_entries * sizeof(wmi_extscan_hotlist_entry); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGP("%s: wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_FAILURE; + } + buf_ptr = (uint8_t *) wmi_buf_data(buf); + cmd = (wmi_extscan_configure_hotlist_monitor_cmd_fixed_param *) + buf_ptr; + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_extscan_configure_hotlist_monitor_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_extscan_configure_hotlist_monitor_cmd_fixed_param)); + + /* Multiple requests are sent until the num_entries_in_page + * matches the total_entries + */ + cmd->request_id = params->request_id; + cmd->vdev_id = params->vdev_id; + cmd->total_entries = numap; + cmd->mode = 1; + cmd->num_entries_in_page = min_entries; + cmd->lost_ap_scan_count = params->lost_ap_sample_size; + cmd->first_entry_index = index; + + WMI_LOGD("%s: vdev id:%d total_entries: %d num_entries: %d lost_ap_sample_size: %d", + __func__, cmd->vdev_id, cmd->total_entries, + cmd->num_entries_in_page, + cmd->lost_ap_scan_count); + + buf_ptr += sizeof(*cmd); + WMITLV_SET_HDR(buf_ptr, + WMITLV_TAG_ARRAY_STRUC, + min_entries * sizeof(wmi_extscan_hotlist_entry)); + dest_hotlist = (wmi_extscan_hotlist_entry *) + (buf_ptr + WMI_TLV_HDR_SIZE); + + /* Populate bssid, channel info and rssi + * for the bssid's that are sent as hotlists. + */ + for (j = 0; j < min_entries; j++) { + WMITLV_SET_HDR(dest_hotlist, + WMITLV_TAG_STRUC_wmi_extscan_bucket_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_extscan_hotlist_entry)); + + dest_hotlist->min_rssi = src_ap->low; + WMI_CHAR_ARRAY_TO_MAC_ADDR(src_ap->bssid.bytes, + &dest_hotlist->bssid); + + WMI_LOGD("%s:channel:%d min_rssi %d", + __func__, dest_hotlist->channel, + dest_hotlist->min_rssi); + WMI_LOGD + ("%s: bssid mac_addr31to0: 0x%x, mac_addr47to32: 0x%x", + __func__, dest_hotlist->bssid.mac_addr31to0, + dest_hotlist->bssid.mac_addr47to32); + dest_hotlist++; + src_ap++; + } + buf_ptr += WMI_TLV_HDR_SIZE + + (min_entries * sizeof(wmi_extscan_hotlist_entry)); + + if (wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_EXTSCAN_CONFIGURE_HOTLIST_MONITOR_CMDID)) { + WMI_LOGE("%s: failed to send command", __func__); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + index = index + min_entries; + num_entries = numap - min_entries; + len = cmd_len; + } + return QDF_STATUS_SUCCESS; +} + +void wmi_extscan_attach_tlv(wmi_unified_t wmi_handle) +{ + struct wmi_ops *ops = wmi_handle->ops; + ops->send_reset_passpoint_network_list_cmd = + send_reset_passpoint_network_list_cmd_tlv; + ops->send_set_passpoint_network_list_cmd = + send_set_passpoint_network_list_cmd_tlv; + ops->send_set_epno_network_list_cmd = + send_set_epno_network_list_cmd_tlv; + ops->send_extscan_get_capabilities_cmd = + send_extscan_get_capabilities_cmd_tlv; + ops->send_extscan_get_cached_results_cmd = + send_extscan_get_cached_results_cmd_tlv; + ops->send_extscan_stop_change_monitor_cmd = + send_extscan_stop_change_monitor_cmd_tlv; + ops->send_extscan_start_change_monitor_cmd = + send_extscan_start_change_monitor_cmd_tlv; + ops->send_extscan_stop_hotlist_monitor_cmd = + send_extscan_stop_hotlist_monitor_cmd_tlv; + ops->send_extscan_start_hotlist_monitor_cmd = + send_extscan_start_hotlist_monitor_cmd_tlv; + ops->send_stop_extscan_cmd = send_stop_extscan_cmd_tlv; + ops->send_start_extscan_cmd = send_start_extscan_cmd_tlv; +} diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_non_tlv.c b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_non_tlv.c new file mode 100644 index 0000000000000000000000000000000000000000..c18bb20a5394f7994e068ee99bbfa473a1fa3e3c --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_non_tlv.c @@ -0,0 +1,9454 @@ +/* + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "wmi_unified_api.h" +#include "wmi_unified_priv.h" +#include "target_type.h" +#include + +#if defined(WMI_NON_TLV_SUPPORT) || defined(WMI_TLV_AND_NON_TLV_SUPPORT) +#include "wmi.h" +#include "wmi_unified.h" +#include + +/* pdev_id is used to distinguish the radio for which event + * is received. Since non-tlv target has only one radio, setting + * default pdev_id to one to keep rest of the code using WMI APIs unfiorm. + */ +#define WMI_NON_TLV_DEFAULT_PDEV_ID WMI_HOST_PDEV_ID_0 + +/* HTC service id for WMI */ +static const uint32_t svc_ids[] = {WMI_CONTROL_SVC}; + +/** + * send_vdev_create_cmd_non_tlv() - send VDEV create command to fw + * @wmi_handle: wmi handle + * @param: pointer to hold vdev create parameter + * @macaddr: vdev mac address + * + * Return: 0 for success or error code + */ +static QDF_STATUS send_vdev_create_cmd_non_tlv(wmi_unified_t wmi_handle, + uint8_t macaddr[IEEE80211_ADDR_LEN], + struct vdev_create_params *param) +{ + wmi_vdev_create_cmd *cmd; + wmi_buf_t buf; + int32_t len = sizeof(wmi_vdev_create_cmd); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + qdf_print("%s:wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_NOMEM; + } + cmd = (wmi_vdev_create_cmd *)wmi_buf_data(buf); + cmd->vdev_id = param->if_id; + cmd->vdev_type = param->type; + cmd->vdev_subtype = param->subtype; + WMI_CHAR_ARRAY_TO_MAC_ADDR(macaddr, &cmd->vdev_macaddr); + qdf_print("%s: ID = %d Type = %d, Subtype = %d " + "VAP Addr = %02x:%02x:%02x:%02x:%02x:%02x:\n", + __func__, param->if_id, param->type, param->subtype, + macaddr[0], macaddr[1], macaddr[2], + macaddr[3], macaddr[4], macaddr[5]); + return wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_VDEV_CREATE_CMDID); +} + +/** + * send_vdev_delete_cmd_non_tlv() - send VDEV delete command to fw + * @wmi_handle: wmi handle + * @if_id: vdev id + * + * Return: 0 for success or error code + */ +static QDF_STATUS send_vdev_delete_cmd_non_tlv(wmi_unified_t wmi_handle, + uint8_t if_id) +{ + wmi_vdev_delete_cmd *cmd; + wmi_buf_t buf; + int32_t len = sizeof(wmi_vdev_delete_cmd); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + qdf_print("%s:wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_NOMEM; + } + cmd = (wmi_vdev_delete_cmd *)wmi_buf_data(buf); + cmd->vdev_id = if_id; + qdf_print("%s for vap %d (%pK)\n", __func__, if_id, wmi_handle); + return wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_VDEV_DELETE_CMDID); +} + +/** + * send_vdev_stop_cmd_non_tlv() - send vdev stop command to fw + * @wmi: wmi handle + * @vdev_id: vdev id + * + * Return: 0 for success or erro code + */ +static QDF_STATUS send_vdev_stop_cmd_non_tlv(wmi_unified_t wmi, + uint8_t vdev_id) +{ + wmi_vdev_stop_cmd *cmd; + wmi_buf_t buf; + int len = sizeof(wmi_vdev_stop_cmd); + + buf = wmi_buf_alloc(wmi, len); + if (!buf) { + qdf_print("%s:wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_NOMEM; + } + cmd = (wmi_vdev_stop_cmd *)wmi_buf_data(buf); + cmd->vdev_id = vdev_id; + + return wmi_unified_cmd_send(wmi, buf, len, WMI_VDEV_STOP_CMDID); +} + +/** + * send_vdev_down_cmd_non_tlv() - send vdev down command to fw + * @wmi_handle: wmi handle + * @vdev_id: vdev id + * + * Return: 0 for success or error code + */ +static QDF_STATUS send_vdev_down_cmd_non_tlv(wmi_unified_t wmi_handle, + uint8_t vdev_id) +{ + wmi_vdev_down_cmd *cmd; + wmi_buf_t buf; + int len = sizeof(wmi_vdev_down_cmd); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + qdf_print("%s:wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_NOMEM; + } + cmd = (wmi_vdev_down_cmd *)wmi_buf_data(buf); + cmd->vdev_id = vdev_id; + qdf_print("%s for vap %d (%pK)\n", __func__, vdev_id, wmi_handle); + return wmi_unified_cmd_send(wmi_handle, buf, len, WMI_VDEV_DOWN_CMDID); +} + +/** + * send_vdev_start_cmd_non_tlv() - send vdev start command to fw + * @wmi: wmi handle + * @vdev_id: vdev id + * + * Return: 0 for success or error code + */ +static QDF_STATUS send_vdev_start_cmd_non_tlv(wmi_unified_t wmi, + struct vdev_start_params *param) +{ + wmi_vdev_start_request_cmd *cmd; + wmi_buf_t buf; + int len = sizeof(wmi_vdev_start_request_cmd); + int ret; + + buf = wmi_buf_alloc(wmi, len); + if (!buf) { + qdf_print("%s:wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_NOMEM; + } + + cmd = (wmi_vdev_start_request_cmd *)wmi_buf_data(buf); + cmd->vdev_id = param->vdev_id; + + cmd->chan.mhz = param->channel.mhz; + + WMI_SET_CHANNEL_MODE(&cmd->chan, param->channel.phy_mode); + + cmd->chan.band_center_freq1 = param->channel.cfreq1; + cmd->chan.band_center_freq2 = param->channel.cfreq2; + cmd->disable_hw_ack = param->disable_hw_ack; + + WMI_SET_CHANNEL_MIN_POWER(&cmd->chan, param->channel.minpower); + WMI_SET_CHANNEL_MAX_POWER(&cmd->chan, param->channel.maxpower); + WMI_SET_CHANNEL_REG_POWER(&cmd->chan, param->channel.maxregpower); + WMI_SET_CHANNEL_ANTENNA_MAX(&cmd->chan, param->channel.antennamax); + WMI_SET_CHANNEL_REG_CLASSID(&cmd->chan, param->channel.reg_class_id); + + if (param->channel.dfs_set) + WMI_SET_CHANNEL_FLAG(&cmd->chan, WMI_CHAN_FLAG_DFS); + + if (param->channel.dfs_set_cfreq2) + WMI_SET_CHANNEL_FLAG(&cmd->chan, WMI_CHAN_FLAG_DFS_CFREQ2); + + if (param->channel.set_agile) + WMI_SET_CHANNEL_FLAG(&cmd->chan, WMI_CHAN_FLAG_AGILE_MODE); + + if (param->channel.half_rate) + WMI_SET_CHANNEL_FLAG(&cmd->chan, WMI_CHAN_FLAG_HALF); + + if (param->channel.quarter_rate) + WMI_SET_CHANNEL_FLAG(&cmd->chan, WMI_CHAN_FLAG_QUARTER); + + if (param->is_restart) { + qdf_print("VDEV RESTART\n"); + ret = wmi_unified_cmd_send(wmi, buf, len, + WMI_VDEV_RESTART_REQUEST_CMDID); + } else { + qdf_print("VDEV START\n"); + ret = wmi_unified_cmd_send(wmi, buf, len, + WMI_VDEV_START_REQUEST_CMDID); + } + return ret; + +/* +For VDEV_RESTART command, the sequence of code remains the same except the +command sent as WMI_VDEV_RESTART_REQUEST_CMDID instead of START_REQUEST. + +In that case, can we introduce a flag that takes in to check if start or +restart and use the same function?? Currently implemented as two separate +functions in OL layer +*/ +} + +/** + * send_vdev_set_nac_rssi_cmd_non_tlv() - send set NAC_RSSI command to fw + * @wmi: wmi handle + * @param: Pointer to hold nac rssi stats + * + * Return: 0 for success or error code + */ +QDF_STATUS send_vdev_set_nac_rssi_cmd_non_tlv(wmi_unified_t wmi, + struct vdev_scan_nac_rssi_params *param) +{ + wmi_vdev_scan_nac_rssi_config_cmd *cmd; + wmi_buf_t buf; + int len = sizeof(wmi_vdev_scan_nac_rssi_config_cmd); + + buf = wmi_buf_alloc(wmi, len); + if (!buf) { + qdf_print("%s:wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_NOMEM; + } + + cmd = (wmi_vdev_scan_nac_rssi_config_cmd *)wmi_buf_data(buf); + cmd->vdev_id = param->vdev_id; + cmd->action = param->action; + cmd->chan_num = param->chan_num; + WMI_CHAR_ARRAY_TO_MAC_ADDR(param->bssid_addr, &cmd->bssid_addr); + WMI_CHAR_ARRAY_TO_MAC_ADDR(param->client_addr, &cmd->client_addr); + if (wmi_unified_cmd_send(wmi, buf, len, WMI_VDEV_SET_SCAN_NAC_RSSI_CMDID)) { + qdf_print("%s: ERROR: Host unable to send LOWI request to FW\n", __func__); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * send_vdev_set_neighbour_rx_cmd_non_tlv() - set neighbour rx param in fw + * @wmi_handle: wmi handle + * @macaddr: vdev mac address + * @param: pointer to hold neigbour rx param + * Return: 0 for success or error code + */ +static QDF_STATUS send_vdev_set_neighbour_rx_cmd_non_tlv(wmi_unified_t wmi_handle, + uint8_t macaddr[IEEE80211_ADDR_LEN], + struct set_neighbour_rx_params *param) +{ + wmi_vdev_filter_nrp_config_cmd *cmd; + wmi_buf_t buf; + int len = sizeof(wmi_vdev_filter_nrp_config_cmd); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + qdf_print("%s:wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_FAILURE; + } + cmd = (wmi_vdev_filter_nrp_config_cmd *)wmi_buf_data(buf); + cmd->vdev_id = param->vdev_id; + cmd->bssid_idx = param->idx; + cmd->action = param->action; + cmd->type = param->type; + WMI_CHAR_ARRAY_TO_MAC_ADDR(macaddr, &cmd->addr); + return wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_VDEV_FILTER_NEIGHBOR_RX_PACKETS_CMDID); +} + +/** + * send_vdev_set_fwtest_param_cmd_non_tlv() - send fwtest param in fw + * @wmi_handle: wmi handle + * @param: pointer to hold fwtest param + * + * Return: 0 for success or error code + */ +static QDF_STATUS send_vdev_set_fwtest_param_cmd_non_tlv(wmi_unified_t wmi_handle, + struct set_fwtest_params *param) +{ + wmi_fwtest_set_param_cmd *cmd; + wmi_buf_t buf; + int len = sizeof(wmi_fwtest_set_param_cmd); + + buf = wmi_buf_alloc(wmi_handle, len); + + if (!buf) { + qdf_print("%s:wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_FAILURE; + } + + cmd = (wmi_fwtest_set_param_cmd *)wmi_buf_data(buf); + cmd->param_id = param->arg; + cmd->param_value = param->value; + + return wmi_unified_cmd_send(wmi_handle, buf, len, WMI_FWTEST_CMDID); +} + +/** + * send_vdev_config_ratemask_cmd_non_tlv() - config ratemask param in fw + * @wmi_handle: wmi handle + * @param: pointer to hold config ratemask params + * + * Return: 0 for success or error code + */ +static QDF_STATUS send_vdev_config_ratemask_cmd_non_tlv(wmi_unified_t wmi_handle, + struct config_ratemask_params *param) +{ + wmi_vdev_config_ratemask *cmd; + wmi_buf_t buf; + int len = sizeof(wmi_vdev_config_ratemask); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + qdf_print("%s:wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_FAILURE; + } + cmd = (wmi_vdev_config_ratemask *)wmi_buf_data(buf); + cmd->vdev_id = param->vdev_id; + cmd->type = param->type; + cmd->mask_lower32 = param->lower32; + cmd->mask_higher32 = param->higher32; + qdf_print("Setting vdev ratemask vdev id = 0x%X, type = 0x%X," + "mask_l32 = 0x%X mask_h32 = 0x%X\n", + param->vdev_id, param->type, param->lower32, param->higher32); + return wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_VDEV_RATEMASK_CMDID); +} + +/** + * send_setup_install_key_cmd_non_tlv() - config security key in fw + * @wmi_handle: wmi handle + * @param: pointer to hold key params + * @macaddr: vdev mac address + * + * Return: 0 for success or error code + */ +static QDF_STATUS send_setup_install_key_cmd_non_tlv(wmi_unified_t wmi_handle, + struct set_key_params *param) +{ + wmi_vdev_install_key_cmd *cmd; + wmi_buf_t buf; + /* length depends on ieee key length */ + int len = sizeof(wmi_vdev_install_key_cmd) + param->key_len; + uint8_t wmi_cipher_type; + int i; + + wmi_cipher_type = param->key_cipher; + + /* ieee_key length does not have mic keylen */ + if ((wmi_cipher_type == WMI_CIPHER_TKIP) || + (wmi_cipher_type == WMI_CIPHER_WAPI)) + len = len + IEEE80211_MICBUF_SIZE; + + len = roundup(len, sizeof(u_int32_t)); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + qdf_print("%s:wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_NOMEM; + } + cmd = (wmi_vdev_install_key_cmd *)wmi_buf_data(buf); + + cmd->vdev_id = param->vdev_id; + WMI_CHAR_ARRAY_TO_MAC_ADDR(param->peer_mac, &cmd->peer_macaddr); + + cmd->key_ix = param->key_idx; + + /* If this WEP key is the default xmit key, TX_USAGE flag is enabled */ + cmd->key_flags = param->key_flags; + + cmd->key_len = param->key_len; + cmd->key_cipher = wmi_cipher_type; + cmd->key_txmic_len = param->key_txmic_len; + cmd->key_rxmic_len = param->key_rxmic_len; + + /* target will use the same rsc counter for + various tids from from ieee key rsc */ + if ((wmi_cipher_type == WMI_CIPHER_TKIP) || + (wmi_cipher_type == WMI_CIPHER_AES_OCB) + || (wmi_cipher_type == WMI_CIPHER_AES_CCM)) { + qdf_mem_copy(&cmd->key_rsc_counter, ¶m->key_rsc_counter[0], + sizeof(param->key_rsc_counter[0])); + qdf_mem_copy(&cmd->key_tsc_counter, ¶m->key_tsc_counter, + sizeof(param->key_tsc_counter)); + } + +#ifdef ATH_SUPPORT_WAPI + if (wmi_cipher_type == WMI_CIPHER_WAPI) { + int j; + /* For WAPI, TSC and RSC has to be initialized with predefined + * value.Here, Indicating TSC, RSC to target as part of set + * key message + */ + /* since wk_recviv and wk_txiv initialized in reverse order, + * Before indicating the Target FW, Reversing TSC and RSC + */ + for (i = (WPI_IV_LEN-1), j = 0; i >= 0; i--, j++) { + cmd->wpi_key_rsc_counter[j] = + param->rx_iv[i]; + cmd->wpi_key_tsc_counter[j] = + param->tx_iv[i]; + } + + qdf_print("RSC:"); + for (i = 0; i < 16; i++) + qdf_print("0x%x ", + *(((uint8_t *)&cmd->wpi_key_rsc_counter)+i)); + qdf_print("\n"); + + qdf_print("TSC:"); + for (i = 0; i < 16; i++) + qdf_print("0x%x ", + *(((uint8_t *)&cmd->wpi_key_tsc_counter)+i)); + qdf_print("\n"); + } +#endif + + /* for big endian host, copy engine byte_swap is enabled + * But the key data content is in network byte order + * Need to byte swap the key data content - so when copy engine + * does byte_swap - target gets key_data content in the correct order + */ + + WMI_HOST_IF_MSG_COPY_CHAR_ARRAY(cmd->key_data, param->key_data, + cmd->key_len); + + return wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_VDEV_INSTALL_KEY_CMDID); + +} + +/** + * send_peer_flush_tids_cmd_non_tlv() - flush peer tids packets in fw + * @wmi_handle: wmi handle + * @peer_addr: peer mac address + * @param: pointer to hold peer flush tid parameter + * + * Return: 0 for success or error code + */ +static QDF_STATUS send_peer_flush_tids_cmd_non_tlv(wmi_unified_t wmi_handle, + uint8_t peer_addr[IEEE80211_ADDR_LEN], + struct peer_flush_params *param) +{ + wmi_peer_flush_tids_cmd *cmd; + wmi_buf_t buf; + int len = sizeof(wmi_peer_flush_tids_cmd); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + qdf_print("%s:wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_NOMEM; + } + cmd = (wmi_peer_flush_tids_cmd *)wmi_buf_data(buf); + WMI_CHAR_ARRAY_TO_MAC_ADDR(peer_addr, &cmd->peer_macaddr); + cmd->peer_tid_bitmap = param->peer_tid_bitmap; + cmd->vdev_id = param->vdev_id; + return wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_PEER_FLUSH_TIDS_CMDID); +} + +/** + * send_peer_delete_cmd_non_tlv() - send PEER delete command to fw + * @wmi_handle: wmi handle + * @peer_addr: peer mac addr + * @vdev_id: vdev id + * + * Return: 0 for success or error code + */ +static QDF_STATUS send_peer_delete_cmd_non_tlv(wmi_unified_t wmi_handle, + uint8_t + peer_addr[IEEE80211_ADDR_LEN], + uint8_t vdev_id) +{ + wmi_peer_delete_cmd *cmd; + wmi_buf_t buf; + int len = sizeof(wmi_peer_delete_cmd); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + qdf_print("%s:wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_NOMEM; + } + cmd = (wmi_peer_delete_cmd *)wmi_buf_data(buf); + WMI_CHAR_ARRAY_TO_MAC_ADDR(peer_addr, &cmd->peer_macaddr); + cmd->vdev_id = vdev_id; + return wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_PEER_DELETE_CMDID); +} + +/** + * convert_host_peer_id_to_target_id_non_tlv - convert host peer param_id + * to target id. + * @targ_paramid: Target parameter id to hold the result. + * @peer_param_id: host param id. + * + * Return: QDF_STATUS_SUCCESS for success + * QDF_STATUS_E_NOSUPPORT when the param_id in not supported in tareget + */ +static QDF_STATUS convert_host_peer_id_to_target_id_non_tlv( + uint32_t *targ_paramid, + uint32_t peer_param_id) +{ + switch (peer_param_id) { + case WMI_HOST_PEER_MIMO_PS_STATE: + *targ_paramid = WMI_PEER_MIMO_PS_STATE; + break; + case WMI_HOST_PEER_AMPDU: + *targ_paramid = WMI_PEER_AMPDU; + break; + case WMI_HOST_PEER_AUTHORIZE: + *targ_paramid = WMI_PEER_AUTHORIZE; + break; + case WMI_HOST_PEER_CHWIDTH: + *targ_paramid = WMI_PEER_CHWIDTH; + break; + case WMI_HOST_PEER_NSS: + *targ_paramid = WMI_PEER_NSS; + break; + case WMI_HOST_PEER_USE_4ADDR: + *targ_paramid = WMI_PEER_USE_4ADDR; + break; + case WMI_HOST_PEER_USE_FIXED_PWR: + *targ_paramid = WMI_PEER_USE_FIXED_PWR; + break; + case WMI_HOST_PEER_PARAM_FIXED_RATE: + *targ_paramid = WMI_PEER_PARAM_FIXED_RATE; + break; + case WMI_HOST_PEER_SET_MU_WHITELIST: + *targ_paramid = WMI_PEER_SET_MU_WHITELIST; + break; + case WMI_HOST_PEER_EXT_STATS_ENABLE: + *targ_paramid = WMI_PEER_EXT_STATS_ENABLE; + break; + case WMI_HOST_PEER_NSS_VHT160: + *targ_paramid = WMI_PEER_NSS_VHT160; + break; + case WMI_HOST_PEER_NSS_VHT80_80: + *targ_paramid = WMI_PEER_NSS_VHT80_80; + break; + default: + return QDF_STATUS_E_NOSUPPORT; + } + + return QDF_STATUS_SUCCESS; +} +/** + * send_peer_param_cmd_non_tlv() - set peer parameter in fw + * @wmi_handle: wmi handle + * @peer_addr: peer mac address + * @param : pointer to hold peer set parameter + * + * Return: 0 for success or error code + */ +static QDF_STATUS send_peer_param_cmd_non_tlv(wmi_unified_t wmi_handle, + uint8_t peer_addr[IEEE80211_ADDR_LEN], + struct peer_set_params *param) +{ + wmi_peer_set_param_cmd *cmd; + wmi_buf_t buf; + int len = sizeof(wmi_peer_set_param_cmd); + uint32_t param_id; + + if (convert_host_peer_id_to_target_id_non_tlv(¶m_id, + param->param_id) != QDF_STATUS_SUCCESS) + return QDF_STATUS_E_NOSUPPORT; + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + qdf_print("%s: wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_NOMEM; + } + cmd = (wmi_peer_set_param_cmd *)wmi_buf_data(buf); + WMI_CHAR_ARRAY_TO_MAC_ADDR(peer_addr, &cmd->peer_macaddr); + cmd->param_id = param_id; + cmd->param_value = param->param_value; + cmd->vdev_id = param->vdev_id; + return wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_PEER_SET_PARAM_CMDID); +} + +/** + * send_vdev_up_cmd_non_tlv() - send vdev up command in fw + * @wmi_handle: wmi handle + * @bssid: bssid + * @vdev_up_params: pointer to hold vdev up parameter + * + * Return: 0 for success or error code + */ +static QDF_STATUS send_vdev_up_cmd_non_tlv(wmi_unified_t wmi_handle, + uint8_t bssid[IEEE80211_ADDR_LEN], + struct vdev_up_params *param) +{ + wmi_vdev_up_cmd *cmd; + wmi_buf_t buf; + int len = sizeof(wmi_vdev_up_cmd); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + qdf_print("%s:wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_NOMEM; + } + cmd = (wmi_vdev_up_cmd *)wmi_buf_data(buf); + cmd->vdev_id = param->vdev_id; + cmd->vdev_assoc_id = param->assoc_id; + WMI_CHAR_ARRAY_TO_MAC_ADDR(bssid, &cmd->vdev_bssid); + qdf_print("%s for vap %d (%pK)\n", __func__, param->vdev_id, wmi_handle); + return wmi_unified_cmd_send(wmi_handle, buf, len, WMI_VDEV_UP_CMDID); +} + +/** + * send_peer_create_cmd_non_tlv() - send peer create command to fw + * @wmi_handle: wmi handle + * @param: pointer to hold peer create parameter + * + * Return: 0 for success or error code + */ +static QDF_STATUS send_peer_create_cmd_non_tlv(wmi_unified_t wmi_handle, + struct peer_create_params *param) +{ + wmi_peer_create_cmd *cmd; + wmi_buf_t buf; + int len = sizeof(wmi_peer_create_cmd); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + qdf_print("%s:wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_NOMEM; + } + cmd = (wmi_peer_create_cmd *)wmi_buf_data(buf); + WMI_CHAR_ARRAY_TO_MAC_ADDR(param->peer_addr, &cmd->peer_macaddr); + cmd->vdev_id = param->vdev_id; + return wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_PEER_CREATE_CMDID); +} + +/** + * send_peer_add_wds_entry_cmd_non_tlv() - send peer add command to fw + * @wmi_handle: wmi handle + * @param: pointer holding peer details + * + * Return: 0 for success or error code + */ +static QDF_STATUS send_peer_add_wds_entry_cmd_non_tlv(wmi_unified_t wmi_handle, + struct peer_add_wds_entry_params *param) +{ + wmi_peer_add_wds_entry_cmd *cmd; + wmi_buf_t buf; + int len = sizeof(wmi_peer_add_wds_entry_cmd); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + qdf_print("%s: wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_FAILURE; + } + cmd = (wmi_peer_add_wds_entry_cmd *)wmi_buf_data(buf); + WMI_CHAR_ARRAY_TO_MAC_ADDR(param->dest_addr, &cmd->wds_macaddr); + WMI_CHAR_ARRAY_TO_MAC_ADDR(param->peer_addr, &cmd->peer_macaddr); + cmd->flags = param->flags; + return wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_PEER_ADD_WDS_ENTRY_CMDID); +} + +/** + * send_peer_del_wds_entry_cmd_non_tlv() - send peer delete command to fw + * @wmi_handle: wmi handle + * @param: pointer holding peer details + * + * Return: 0 for success or error code + */ +static QDF_STATUS send_peer_del_wds_entry_cmd_non_tlv(wmi_unified_t wmi_handle, + struct peer_del_wds_entry_params *param) +{ + wmi_peer_remove_wds_entry_cmd *cmd; + wmi_buf_t buf; + int len = sizeof(wmi_peer_remove_wds_entry_cmd); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + qdf_print("%s: wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_NOMEM; + } + cmd = (wmi_peer_remove_wds_entry_cmd *)wmi_buf_data(buf); + WMI_CHAR_ARRAY_TO_MAC_ADDR(param->dest_addr, &cmd->wds_macaddr); + return wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_PEER_REMOVE_WDS_ENTRY_CMDID); +} + +/** + * send_set_bridge_mac_addr_cmd_non_tlv() - send set bridge MAC addr command to fw + * @wmi_handle: wmi handle + * @param: pointer holding bridge addr details + * + * Return: 0 for success or error code + */ +QDF_STATUS send_set_bridge_mac_addr_cmd_non_tlv(wmi_unified_t wmi_handle, + struct set_bridge_mac_addr_params *param) +{ + wmi_peer_add_wds_entry_cmd *cmd; + wmi_buf_t buf; + uint8_t null_macaddr[IEEE80211_ADDR_LEN]; + int len = sizeof(wmi_peer_add_wds_entry_cmd); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + qdf_print("%s: wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_NOMEM; + } + qdf_mem_zero(null_macaddr, IEEE80211_ADDR_LEN); + cmd = (wmi_peer_add_wds_entry_cmd *)wmi_buf_data(buf); + WMI_CHAR_ARRAY_TO_MAC_ADDR(param->bridge_addr, &cmd->wds_macaddr); + WMI_CHAR_ARRAY_TO_MAC_ADDR(null_macaddr, &cmd->peer_macaddr); + cmd->flags = 0xffffffff; + + return wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_PEER_ADD_WDS_ENTRY_CMDID); +} + +/** + * send_peer_update_wds_entry_cmd_non_tlv() - send peer update command to fw + * @wmi_handle: wmi handle + * @param: pointer holding peer details + * + * Return: 0 for success or error code + */ +static QDF_STATUS send_peer_update_wds_entry_cmd_non_tlv(wmi_unified_t wmi_handle, + struct peer_update_wds_entry_params *param) +{ + wmi_peer_update_wds_entry_cmd *cmd; + wmi_buf_t buf; + int len = sizeof(wmi_peer_update_wds_entry_cmd); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + qdf_print("%s: wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_NOMEM; + } + + /* wmi_buf_alloc returns zeroed command buffer */ + cmd = (wmi_peer_update_wds_entry_cmd *)wmi_buf_data(buf); + cmd->flags = (param->flags) ? WMI_WDS_FLAG_STATIC : 0; + if (param->wds_macaddr) + WMI_CHAR_ARRAY_TO_MAC_ADDR(param->wds_macaddr, + &cmd->wds_macaddr); + if (param->peer_macaddr) + WMI_CHAR_ARRAY_TO_MAC_ADDR(param->peer_macaddr, + &cmd->peer_macaddr); + return wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_PEER_UPDATE_WDS_ENTRY_CMDID); +} + +#ifdef WLAN_SUPPORT_GREEN_AP +/** + * send_green_ap_ps_cmd_non_tlv() - enable green ap powersave command + * @wmi_handle: wmi handle + * @value: value + * @pdev_id: pdev id to have radio context + * + * Return: 0 for success or error code + */ +static QDF_STATUS send_green_ap_ps_cmd_non_tlv(wmi_unified_t wmi_handle, + uint32_t value, uint8_t pdev_id) +{ + wmi_pdev_green_ap_ps_enable_cmd *cmd; + wmi_buf_t buf; + int len = 0; + int ret; + + len = sizeof(wmi_pdev_green_ap_ps_enable_cmd); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + qdf_print("%s:wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_NOMEM; + } + + cmd = (wmi_pdev_green_ap_ps_enable_cmd *)wmi_buf_data(buf); + cmd->enable = value; + + ret = wmi_unified_cmd_send(wmi_handle, + buf, + len, + WMI_PDEV_GREEN_AP_PS_ENABLE_CMDID); + +#ifdef OL_GREEN_AP_DEBUG_CONFIG_INTERACTIONS + qdf_print("%s: Sent WMI_PDEV_GREEN_AP_PS_ENABLE_CMDID.\n" + "enable=%u status=%d\n", + __func__, + value, + ret); +#endif /* OL_GREEN_AP_DEBUG_CONFIG_INTERACTIONS */ + return ret; +} +#endif + +/** + * send_pdev_utf_cmd_non_tlv() - send utf command to fw + * @wmi_handle: wmi handle + * @param: pointer to pdev_utf_params + * @mac_id: mac id to have radio context + * + * Return: 0 for success or error code + */ +static QDF_STATUS +send_pdev_utf_cmd_non_tlv(wmi_unified_t wmi_handle, + struct pdev_utf_params *param, + uint8_t mac_id) +{ + wmi_buf_t buf; + u_int8_t *cmd; + int ret = 0; + /* We can initialize the value and increment.*/ + static uint8_t msgref = 1; + uint8_t segNumber = 0, segInfo, numSegments; + uint16_t chunkLen, totalBytes; + uint8_t *bufpos; + struct seg_hdr_info segHdrInfo; + + bufpos = param->utf_payload; + totalBytes = param->len; + numSegments = (uint8_t) (totalBytes / MAX_WMI_UTF_LEN); + + if (param->len - (numSegments * MAX_WMI_UTF_LEN)) + numSegments++; + + while (param->len) { + if (param->len > MAX_WMI_UTF_LEN) + chunkLen = MAX_WMI_UTF_LEN; /* MAX message.. */ + else + chunkLen = param->len; + + buf = wmi_buf_alloc(wmi_handle, + (chunkLen + sizeof(segHdrInfo))); + if (!buf) { + qdf_print("%s:wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_FAILURE; + } + + cmd = (uint8_t *)wmi_buf_data(buf); + + segHdrInfo.len = totalBytes; + segHdrInfo.msgref = msgref; + segInfo = ((numSegments << 4) & 0xF0) | (segNumber & 0xF); + segHdrInfo.segmentInfo = segInfo; + + segNumber++; + + qdf_mem_copy(cmd, &segHdrInfo, sizeof(segHdrInfo)); +#ifdef BIG_ENDIAN_HOST + if (param->is_ar900b) { + + /* for big endian host, copy engine byte_swap is + * enable But this ART command frame buffer content is + * in network byte order. + * Need to byte swap the mgmt frame buffer content - so + * when copy engine does byte_swap - target gets buffer + * content in the correct order + */ + int i; + uint32_t *destp, *srcp; + destp = (uint32_t *)(&(cmd[sizeof(segHdrInfo)])); + srcp = (uint32_t *)bufpos; + for (i = 0; i < (roundup(chunkLen, + sizeof(uint32_t)) / 4); i++) { + *destp = qdf_le32_to_cpu(*srcp); + destp++; srcp++; + } + } else { + qdf_mem_copy(&cmd[sizeof(segHdrInfo)], + bufpos, chunkLen); + } +#else + qdf_mem_copy(&cmd[sizeof(segHdrInfo)], bufpos, chunkLen); +#endif + + ret = wmi_unified_cmd_send(wmi_handle, buf, + (chunkLen + sizeof(segHdrInfo)), + WMI_PDEV_UTF_CMDID); + + if (ret != 0) + break; + + param->len -= chunkLen; + bufpos += chunkLen; + } + + msgref++; + + return ret; +} + +/** + * send_pdev_qvit_cmd_non_tlv() - send qvit command to fw + * @wmi_handle: wmi handle + * @param: pointer to pdev_qvit_params + * + * Return: 0 for success or error code + */ +static QDF_STATUS +send_pdev_qvit_cmd_non_tlv(wmi_unified_t wmi_handle, + struct pdev_qvit_params *param) +{ + wmi_buf_t buf; + u_int8_t *cmd; + int ret = 0; + /* We can initialize the value and increment.*/ + static u_int8_t msgref = 1; + u_int8_t segNumber = 0, segInfo, numSegments; + u_int16_t chunkLen, totalBytes; + u_int8_t *bufpos; + QVIT_SEG_HDR_INFO_STRUCT segHdrInfo; + +/* +#ifdef QVIT_DEBUG + qdf_print(KERN_INFO "QVIT: %s: called\n", __func__); +#endif +*/ + bufpos = param->utf_payload; + totalBytes = param->len; + numSegments = (totalBytes / MAX_WMI_QVIT_LEN); + + if (param->len - (numSegments * MAX_WMI_QVIT_LEN)) + numSegments++; + + while (param->len) { + if (param->len > MAX_WMI_QVIT_LEN) + chunkLen = MAX_WMI_QVIT_LEN; /* MAX message.. */ + else + chunkLen = param->len; + + buf = wmi_buf_alloc(wmi_handle, + (chunkLen + sizeof(segHdrInfo))); + if (!buf) { + qdf_print(KERN_ERR "QVIT: %s: wmi_buf_alloc failed\n", + __func__); + return QDF_STATUS_E_FAILURE; + } + + cmd = (u_int8_t *)wmi_buf_data(buf); + + segHdrInfo.len = totalBytes; + segHdrInfo.msgref = msgref; + segInfo = ((numSegments << 4) & 0xF0) | (segNumber & 0xF); + segHdrInfo.segmentInfo = segInfo; + + segNumber++; + + qdf_mem_copy(cmd, &segHdrInfo, sizeof(segHdrInfo)); + qdf_mem_copy(&cmd[sizeof(segHdrInfo)], bufpos, chunkLen); + + ret = wmi_unified_cmd_send(wmi_handle, buf, + (chunkLen + sizeof(segHdrInfo)), + WMI_PDEV_QVIT_CMDID); + if (ret != 0) { + qdf_print + (KERN_ERR "QVIT: %s: wmi_unified_cmd_send failed\n", + __func__); + break; + } + + param->len -= chunkLen; + bufpos += chunkLen; + } + + msgref++; + + return ret; +} + +/** + * send_pdev_param_cmd_non_tlv() - set pdev parameters + * @wmi_handle: wmi handle + * @param: pointer to pdev parameter + * @mac_id: radio context + * + * Return: 0 on success, errno on failure + */ +static QDF_STATUS +send_pdev_param_cmd_non_tlv(wmi_unified_t wmi_handle, + struct pdev_params *param, uint8_t mac_id) +{ + wmi_pdev_set_param_cmd *cmd; + wmi_buf_t buf; + int len = sizeof(wmi_pdev_set_param_cmd); + + if ((param->param_id < wmi_pdev_param_max) && + (wmi_handle->pdev_param[param->param_id] + != WMI_UNAVAILABLE_PARAM)) { + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + qdf_print("%s:wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_FAILURE; + } + cmd = (wmi_pdev_set_param_cmd *)wmi_buf_data(buf); + cmd->param_id = wmi_handle->pdev_param[param->param_id]; + cmd->param_value = param->param_value; + return wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_PDEV_SET_PARAM_CMDID); + } + return QDF_STATUS_E_FAILURE; +} + +/** + * send_suspend_cmd_non_tlv() - WMI suspend function + * + * @param wmi_handle : handle to WMI. + * @param param : pointer to hold suspend parameter + * @mac_id: radio context + * @return QDF_STATUS_SUCCESS on success and -ve on failure. + */ +static QDF_STATUS send_suspend_cmd_non_tlv(wmi_unified_t wmi_handle, + struct suspend_params *param, + uint8_t mac_id) +{ + wmi_pdev_suspend_cmd *cmd; + wmi_buf_t wmibuf; + uint32_t len = sizeof(wmi_pdev_suspend_cmd); + + /*send the command to Target to ignore the + * PCIE reset so as to ensure that Host and target + * states are in sync*/ + wmibuf = wmi_buf_alloc(wmi_handle, len); + if (wmibuf == NULL) + return QDF_STATUS_E_FAILURE; + + cmd = (wmi_pdev_suspend_cmd *)wmi_buf_data(wmibuf); + if (param->disable_target_intr) + cmd->suspend_opt = WMI_PDEV_SUSPEND_AND_DISABLE_INTR; + else + cmd->suspend_opt = WMI_PDEV_SUSPEND; + + /* + * Flush pending packets in HTC endpoint queue + * + */ + wmi_flush_endpoint(wmi_handle); + + return wmi_unified_cmd_send(wmi_handle, wmibuf, len, + WMI_PDEV_SUSPEND_CMDID); +} + +/** + * send_resume_cmd_non_tlv() - WMI resume function + * + * @param wmi_handle : handle to WMI. + * @mac_id: radio context + * @return QDF_STATUS_SUCCESS on success and -ve on failure. + */ +static QDF_STATUS send_resume_cmd_non_tlv(wmi_unified_t wmi_handle, + uint8_t mac_id) +{ + wmi_buf_t wmibuf; + + wmibuf = wmi_buf_alloc(wmi_handle, 0); + if (wmibuf == NULL) + return QDF_STATUS_E_NOMEM; + return wmi_unified_cmd_send(wmi_handle, wmibuf, 0, + WMI_PDEV_RESUME_CMDID); +} + +/** + * send_wow_enable_cmd_non_tlv() - WMI wow enable function + * + * @param wmi_handle : handle to WMI. + * @param param : pointer to hold wow enable parameter + * @mac_id: radio context + * @return QDF_STATUS_SUCCESS on success and -ve on failure. + */ +static QDF_STATUS send_wow_enable_cmd_non_tlv(wmi_unified_t wmi_handle, + struct wow_cmd_params *param, uint8_t mac_id) +{ + QDF_STATUS res; + wmi_buf_t buf = NULL; + + buf = wmi_buf_alloc(wmi_handle, 4); + if (!buf) { + qdf_print("buf alloc failed\n"); + return QDF_STATUS_E_NOMEM; + } + res = wmi_unified_cmd_send(wmi_handle, buf, 4, WMI_WOW_ENABLE_CMDID); + qdf_print("send_wow_enable result: %d\n", res); + return (res == QDF_STATUS_SUCCESS) ? + QDF_STATUS_SUCCESS : QDF_STATUS_E_FAILURE; +} + +/** + * send_wow_wakeup_cmd_non_tlv() - WMI wow wakeup function + * + * @param wmi_handle : handle to WMI. + * @return QDF_STATUS_SUCCESS on success and -ve on failure. + */ +static QDF_STATUS send_wow_wakeup_cmd_non_tlv(wmi_unified_t wmi_handle) +{ + QDF_STATUS res; + wmi_buf_t buf = NULL; + + buf = wmi_buf_alloc(wmi_handle, 4); + if (!buf) { + qdf_print("buf alloc failed\n"); + return QDF_STATUS_E_NOMEM; + } + res = wmi_unified_cmd_send(wmi_handle, buf, 4, + WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID); + qdf_print("ol_wow_wakeup result: %d\n", res); + return (res == QDF_STATUS_SUCCESS) ? + QDF_STATUS_SUCCESS : QDF_STATUS_E_FAILURE; +} + +/** + * send_wow_add_wakeup_event_cmd_non_tlv() - WMI wow add wakeup event function + * + * @param wmi_handle : handle to WMI. + * @param param : pointer to hold wow wakeup event parameter + * @return QDF_STATUS_SUCCESS on success and -ve on failure. + */ +static QDF_STATUS send_wow_add_wakeup_event_cmd_non_tlv(wmi_unified_t wmi_handle, + struct wow_add_wakeup_params *param) +{ + QDF_STATUS res; + WMI_WOW_ADD_DEL_EVT_CMD *cmd; + wmi_buf_t buf = NULL; + int len = sizeof(WMI_WOW_ADD_DEL_EVT_CMD); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + qdf_print("buf alloc failed\n"); + return QDF_STATUS_E_NOMEM; + } + cmd = (WMI_WOW_ADD_DEL_EVT_CMD *)wmi_buf_data(buf); + cmd->is_add = 1; + cmd->event_bitmap = param->type; + res = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID); + return (res == QDF_STATUS_SUCCESS) ? + QDF_STATUS_SUCCESS : QDF_STATUS_E_FAILURE; +} + +/** + * send_wow_add_wakeup_pattern_cmd_non_tlv() - WMI wow add wakeup pattern function + * + * @param wmi_handle : handle to WMI. + * @param param : pointer to hold wow wakeup pattern parameter + * @return QDF_STATUS_SUCCESS on success and -ve on failure. + */ +static QDF_STATUS send_wow_add_wakeup_pattern_cmd_non_tlv(wmi_unified_t wmi_handle, + struct wow_add_wakeup_pattern_params *param) +{ + WOW_BITMAP_PATTERN_T bitmap_pattern; + uint32_t j; + /* + struct ol_wow_info *wowInfo; + OL_WOW_PATTERN *pattern; + struct ol_ath_softc_net80211 *scn = OL_ATH_SOFTC_NET80211(ic); + */ + QDF_STATUS res; + WMI_WOW_ADD_PATTERN_CMD *cmd; + wmi_buf_t buf = NULL; + int len = sizeof(WMI_WOW_ADD_PATTERN_CMD); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + qdf_print("buf alloc failed\n"); + return QDF_STATUS_E_NOMEM; + } + cmd = (WMI_WOW_ADD_PATTERN_CMD *)wmi_buf_data(buf); + cmd->pattern_id = param->pattern_id; + cmd->pattern_type = WOW_BITMAP_PATTERN; + + for (j = 0; j < WOW_DEFAULT_BITMAP_PATTERN_SIZE; j++) + bitmap_pattern.patternbuf[j] = param->pattern_bytes[j]; + + for (j = 0; j < WOW_DEFAULT_BITMASK_SIZE; j++) + bitmap_pattern.bitmaskbuf[j] = param->mask_bytes[j]; + + bitmap_pattern.pattern_offset = 0; + + cmd->pattern_info.bitmap = bitmap_pattern; + res = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_WOW_ADD_WAKE_PATTERN_CMDID); + + return (res == QDF_STATUS_SUCCESS) ? + QDF_STATUS_SUCCESS : QDF_STATUS_E_FAILURE; +} + +/** + * send_wow_remove_wakeup_pattern_cmd_non_tlv() - WMI wow remove wakeup + * pattern function + * + * @param wmi_handle : handle to WMI. + * @param param : pointer to hold wow wakeup pattern parameter + * @return QDF_STATUS_SUCCESS on success and -ve on failure. + */ +static QDF_STATUS send_wow_remove_wakeup_pattern_cmd_non_tlv(wmi_unified_t wmi_handle, + struct wow_remove_wakeup_pattern_params *param) +{ + WMI_WOW_DEL_PATTERN_CMD *cmd; + QDF_STATUS res; + wmi_buf_t buf = NULL; + int len = sizeof(WMI_WOW_DEL_PATTERN_CMD); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + qdf_print("buf alloc failed\n"); + return QDF_STATUS_E_NOMEM; + } + cmd = (WMI_WOW_DEL_PATTERN_CMD *)wmi_buf_data(buf); + cmd->pattern_id = param->pattern_id; + cmd->pattern_type = WOW_BITMAP_PATTERN; + res = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_WOW_DEL_WAKE_PATTERN_CMDID); + return (res == QDF_STATUS_SUCCESS) ? + QDF_STATUS_SUCCESS : QDF_STATUS_E_FAILURE; +} + +/** + * send_set_ap_ps_param_cmd_non_tlv() - set ap powersave parameters + * @param wmi_handle : handle to WMI. + * @peer_addr: peer mac address + * @param: pointer to ap_ps parameter structure + * + * Return: 0 for success or error code + */ +static QDF_STATUS send_set_ap_ps_param_cmd_non_tlv(wmi_unified_t wmi_handle, + uint8_t *peer_addr, + struct ap_ps_params *param) +{ + wmi_ap_ps_peer_cmd *cmd; + wmi_buf_t buf; + + buf = wmi_buf_alloc(wmi_handle, sizeof(*cmd)); + if (!buf) { + qdf_print("%s: wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_FAILURE; + } + + cmd = (wmi_ap_ps_peer_cmd *)wmi_buf_data(buf); + cmd->vdev_id = param->vdev_id; + WMI_CHAR_ARRAY_TO_MAC_ADDR(peer_addr, &cmd->peer_macaddr); + cmd->param = param->param; + cmd->value = param->value; + + return wmi_unified_cmd_send(wmi_handle, buf, sizeof(*cmd), + WMI_AP_PS_PEER_PARAM_CMDID); +} + +/** + * send_set_sta_ps_param_cmd_non_tlv() - set sta powersave parameters + * @param wmi_handle : handle to WMI. + * @param: pointer to sta_ps parameter structure + * + * Return: 0 for success or error code + */ +static QDF_STATUS send_set_sta_ps_param_cmd_non_tlv(wmi_unified_t wmi_handle, + struct sta_ps_params *param) +{ + wmi_sta_powersave_param_cmd *cmd; + wmi_buf_t buf; + + buf = wmi_buf_alloc(wmi_handle, sizeof(*cmd)); + if (!buf) { + qdf_print("%s: wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_FAILURE; + } + + cmd = (wmi_sta_powersave_param_cmd *)wmi_buf_data(buf); + cmd->vdev_id = param->vdev_id; + cmd->param = param->param; + cmd->value = param->value; + + return wmi_unified_cmd_send(wmi_handle, buf, sizeof(*cmd), + WMI_STA_POWERSAVE_PARAM_CMDID); +} + +/** + * send_set_ps_mode_cmd_non_tlv() - set powersave mode + * @wmi_handle: wmi handle + * @param: pointer to ps_mode parameter structure + * + * Return: 0 for success or error code + */ +static QDF_STATUS send_set_ps_mode_cmd_non_tlv(wmi_unified_t wmi_handle, + struct set_ps_mode_params *param) +{ + wmi_sta_powersave_mode_cmd *cmd; + wmi_buf_t buf; + int ret; + + buf = wmi_buf_alloc(wmi_handle, sizeof(*cmd)); + if (!buf) { + qdf_print("%s: wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_FAILURE; + } + qdf_print("%s:set psmode=%d\n", __func__, param->psmode); + cmd = (wmi_sta_powersave_mode_cmd *)wmi_buf_data(buf); + cmd->vdev_id = param->vdev_id; + cmd->sta_ps_mode = param->psmode; + + ret = wmi_unified_cmd_send(wmi_handle, buf, sizeof(*cmd), + WMI_STA_POWERSAVE_MODE_CMDID); + + return ret; +} + +/** + * send_crash_inject_cmd_non_tlv() - inject fw crash + * @param wmi_handle : handle to WMI. + * @param: ponirt to crash inject parameter structure + * + * Return: 0 for success or return error + */ +static QDF_STATUS send_crash_inject_cmd_non_tlv(wmi_unified_t wmi_handle, + struct crash_inject *param) +{ + WMI_FORCE_FW_HANG_CMD *cmd; + wmi_buf_t buf; + int32_t len = sizeof(WMI_FORCE_FW_HANG_CMD); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + qdf_print("%s:wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_FAILURE; + } + cmd = (WMI_FORCE_FW_HANG_CMD *)wmi_buf_data(buf); + cmd->type = 1; + /* Should this be param->type ? */ + cmd->delay_time_ms = param->delay_time_ms; + return wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_FORCE_FW_HANG_CMDID); +} + +/** + * send_dbglog_cmd_non_tlv() - set debug log level + * + * @param wmi_handle : handle to WMI. + * @param param : pointer to hold dbglog level parameter + * @return QDF_STATUS_SUCCESS on success and -ve on failure. + */ +static QDF_STATUS +send_dbglog_cmd_non_tlv(wmi_unified_t wmi_handle, + struct dbglog_params *dbglog_param) +{ + wmi_buf_t osbuf; + WMI_DBGLOG_CFG_CMD *cmd; + QDF_STATUS status; + + osbuf = wmi_buf_alloc(wmi_handle, sizeof(*cmd)); + if (osbuf == NULL) + return QDF_STATUS_E_NOMEM; + + qdf_nbuf_put_tail(osbuf, sizeof(*cmd)); + + cmd = (WMI_DBGLOG_CFG_CMD *)(wmi_buf_data(osbuf)); + + qdf_print("wmi_dbg_cfg_send: mod[0]%08x dbgcfg%08x cfgvalid[0] %08x" + " cfgvalid[1] %08x\n", + dbglog_param->module_id_bitmap[0], + dbglog_param->val, dbglog_param->cfgvalid[0], + dbglog_param->cfgvalid[1]); + + cmd->config.cfgvalid[0] = dbglog_param->cfgvalid[0]; + cmd->config.cfgvalid[1] = dbglog_param->cfgvalid[1]; + qdf_mem_copy(&cmd->config.config.mod_id[0], + dbglog_param->module_id_bitmap, + sizeof(cmd->config.config.mod_id)); + cmd->config.config.dbg_config = dbglog_param->val; + + status = wmi_unified_cmd_send(wmi_handle, osbuf, + sizeof(WMI_DBGLOG_CFG_CMD), + WMI_DBGLOG_CFG_CMDID); + + return status; +} + +/** + * send_vdev_set_param_cmd_non_tlv() - WMI vdev set parameter function + * + * @param wmi_handle : handle to WMI. + * @param param : pointer to hold vdev set parameter + * @return QDF_STATUS_SUCCESS on success and -ve on failure. + */ +static QDF_STATUS send_vdev_set_param_cmd_non_tlv(wmi_unified_t wmi_handle, + struct vdev_set_params *param) +{ + wmi_vdev_set_param_cmd *cmd; + wmi_buf_t buf; + int len = sizeof(wmi_vdev_set_param_cmd); + + if ((param->param_id < wmi_vdev_param_max) && + (wmi_handle->vdev_param[param->param_id] != + WMI_UNAVAILABLE_PARAM)) { + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + qdf_print("%s:wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_FAILURE; + } + cmd = (wmi_vdev_set_param_cmd *)wmi_buf_data(buf); + cmd->vdev_id = param->if_id; + cmd->param_id = wmi_handle->vdev_param[param->param_id]; + cmd->param_value = param->param_value; + return wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_VDEV_SET_PARAM_CMDID); + } + return QDF_STATUS_E_FAILURE; +} + +/** + * get_stats_id_non_tlv() - Get stats identifier function + * + * @param host_stats_id: host stats identifier value + * @return stats_id based on host_stats_id + */ +static uint32_t get_stats_id_non_tlv(wmi_host_stats_id host_stats_id) +{ + uint32_t stats_id = 0; + + if (host_stats_id & WMI_HOST_REQUEST_PEER_STAT) + stats_id |= WMI_REQUEST_PEER_STAT; + if (host_stats_id & WMI_HOST_REQUEST_AP_STAT) + stats_id |= WMI_REQUEST_AP_STAT; + if (host_stats_id & WMI_HOST_REQUEST_INST_STAT) + stats_id |= WMI_REQUEST_INST_STAT; + if (host_stats_id & WMI_HOST_REQUEST_PEER_EXTD_STAT) + stats_id |= WMI_REQUEST_PEER_EXTD_STAT; + if (host_stats_id & WMI_HOST_REQUEST_NAC_RSSI) + stats_id |= WMI_REQUEST_NAC_RSSI_STAT; + + return stats_id; +} +/** + * send_stats_request_cmd_non_tlv() - WMI request stats function + * + * @param wmi_handle : handle to WMI. + * @param macaddr : MAC address + * @param param : pointer to hold stats request parameter + * @return QDF_STATUS_SUCCESS on success and -ve on failure. + */ +static QDF_STATUS send_stats_request_cmd_non_tlv(wmi_unified_t wmi_handle, + uint8_t macaddr[IEEE80211_ADDR_LEN], + struct stats_request_params *param) +{ + wmi_buf_t buf; + wmi_request_stats_cmd *cmd; + uint8_t len = sizeof(wmi_request_stats_cmd); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + qdf_print("%s:wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_INVAL; + } + + cmd = (wmi_request_stats_cmd *)wmi_buf_data(buf); + cmd->stats_id = get_stats_id_non_tlv(param->stats_id); + cmd->vdev_id = param->vdev_id; + WMI_CHAR_ARRAY_TO_MAC_ADDR(macaddr, &cmd->peer_macaddr); + cmd->inst_rssi_args.cfg_retry_count = param->rssi_args.cfg_retry_count; + cmd->inst_rssi_args.retry_count = param->rssi_args.retry_count; + + if (wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_REQUEST_STATS_CMDID)) { + return QDF_STATUS_E_INVAL; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * send_bss_chan_info_request_cmd_non_tlv() - WMI request bss chan info + * + * @param wmi_handle : handle to WMI. + * @param param : pointer to hold bss chan info request parameter + * @return QDF_STATUS_SUCCESS on success and -ve on failure. + */ +static QDF_STATUS send_bss_chan_info_request_cmd_non_tlv(wmi_unified_t wmi_handle, + struct bss_chan_info_request_params *param) +{ + wmi_buf_t buf; + wmi_pdev_bss_chan_info_request *cmd; + u_int8_t len = sizeof(wmi_pdev_bss_chan_info_request); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + qdf_print("%s:wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_INVAL; + } + + cmd = (wmi_pdev_bss_chan_info_request *)wmi_buf_data(buf); + cmd->param = param->param; + cmd->reserved = 0; + + if (wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_PDEV_BSS_CHAN_INFO_REQUEST_CMDID)) { + return QDF_STATUS_E_INVAL; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * send_packet_log_enable_cmd_non_tlv() - WMI request stats function + * + * @param wmi_handle : handle to WMI. + * @param PKTLOG_EVENT : packet log event + * @mac_id: mac id to have radio context + * @return QDF_STATUS_SUCCESS on success and -ve on failure. + */ +static QDF_STATUS send_packet_log_enable_cmd_non_tlv(wmi_unified_t wmi_handle, + WMI_HOST_PKTLOG_EVENT PKTLOG_EVENT, uint8_t mac_id) +{ + wmi_pdev_pktlog_enable_cmd *cmd; + int len = 0; + wmi_buf_t buf; + + len = sizeof(wmi_pdev_pktlog_enable_cmd); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + qdf_print("%s:wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_NOMEM; + } + cmd = (wmi_pdev_pktlog_enable_cmd *)wmi_buf_data(buf); + cmd->evlist = PKTLOG_EVENT; + if (wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_PDEV_PKTLOG_ENABLE_CMDID)) { + return QDF_STATUS_E_FAILURE; + } + return QDF_STATUS_SUCCESS; +} + +/** + * send_packet_log_disable_cmd_non_tlv() - WMI disable packet log send function + * + * @param wmi_handle : handle to WMI. + * @mac_id: mac id to have radio context + * @return QDF_STATUS_SUCCESS on success and -ve on failure. + */ +static QDF_STATUS send_packet_log_disable_cmd_non_tlv(wmi_unified_t wmi_handle, + uint8_t mac_id) +{ + int len = 0; + wmi_buf_t buf; + + buf = wmi_buf_alloc(wmi_handle, 0); + if (!buf) { + qdf_print("%s:wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_NOMEM; + } + if (wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_PDEV_PKTLOG_DISABLE_CMDID)) { + return QDF_STATUS_E_FAILURE; + } + return QDF_STATUS_SUCCESS; +} + +/** + * send_beacon_send_cmd_non_tlv() - WMI beacon send function + * + * @param wmi_handle : handle to WMI. + * @param param : pointer to hold beacon send cmd parameter + * @return QDF_STATUS_SUCCESS on success and -ve on failure. + */ +static QDF_STATUS send_beacon_send_cmd_non_tlv(wmi_unified_t wmi_handle, + struct beacon_params *param) +{ + if (param->is_high_latency) { + wmi_bcn_tx_cmd *cmd; + wmi_buf_t wmi_buf; + int bcn_len = qdf_nbuf_len(param->wbuf); + int len = sizeof(wmi_bcn_tx_hdr) + bcn_len; + + /************************************************************* + * TODO: Once we have the host target transport framework for + * sending management frames this wmi function will be replaced + * with calls to HTT. The buffer will changed to match the right + * format to be used with HTT. + *************************************************************/ + wmi_buf = wmi_buf_alloc(wmi_handle, roundup(len, + sizeof(u_int32_t))); + if (!wmi_buf) { + qdf_print("%s: wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_NOMEM; + } + cmd = (wmi_bcn_tx_cmd *)wmi_buf_data(wmi_buf); + cmd->hdr.vdev_id = param->vdev_id; + cmd->hdr.buf_len = bcn_len; + +#ifdef BIG_ENDIAN_HOST + { + /* for big endian host, copy engine byte_swap is enabled + * But the beacon buffer content is in network byte + * order Need to byte swap the beacon buffer content - + * so when copy engine does byte_swap - target gets + * buffer content in the correct order + */ + int i; + u_int32_t *destp, *srcp; + destp = (u_int32_t *)cmd->bufp; + srcp = (u_int32_t *)wmi_buf_data(param->wbuf); + for (i = 0; i < (roundup(bcn_len, + sizeof(u_int32_t))/4); i++) { + *destp = qdf_le32_to_cpu(*srcp); + destp++; srcp++; + } + } +#else + qdf_mem_copy(cmd->bufp, wmi_buf_data(param->wbuf), bcn_len); +#endif +#ifdef DEBUG_BEACON + qdf_print("%s frm length %d\n", __func__, bcn_len); +#endif + wmi_unified_cmd_send(wmi_handle, wmi_buf, + roundup(len, sizeof(u_int32_t)), WMI_BCN_TX_CMDID); + } else { + wmi_bcn_send_from_host_cmd_t *cmd; + wmi_buf_t wmi_buf; + int bcn_len = qdf_nbuf_len(param->wbuf); + int len = sizeof(wmi_bcn_send_from_host_cmd_t); + uint32_t dtim_flag = 0; + + /* get the DTIM count */ + + if (param->is_dtim_count_zero) { + dtim_flag |= WMI_BCN_SEND_DTIM_ZERO; + if (param->is_bitctl_reqd) { + /* deliver CAB traffic in next DTIM beacon */ + dtim_flag |= WMI_BCN_SEND_DTIM_BITCTL_SET; + } + } + /* Map the beacon buffer to DMA region */ + + wmi_buf = wmi_buf_alloc(wmi_handle, roundup(len, + sizeof(u_int32_t))); + if (!wmi_buf) { + qdf_print("%s: wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_NOMEM; + } + + + cmd = (wmi_bcn_send_from_host_cmd_t *)wmi_buf_data(wmi_buf); + cmd->vdev_id = param->vdev_id; + cmd->data_len = bcn_len; + cmd->frame_ctrl = param->frame_ctrl; + cmd->dtim_flag = dtim_flag; + cmd->frag_ptr = qdf_nbuf_get_frag_paddr(param->wbuf, 0); + cmd->virt_addr = (uintptr_t)param->wbuf; + cmd->bcn_antenna = param->bcn_txant; + wmi_unified_cmd_send(wmi_handle, wmi_buf, len, + WMI_PDEV_SEND_BCN_CMDID); + } + return QDF_STATUS_SUCCESS; +} + +#if 0 +/** + * send_bcn_prb_template_cmd_non_tlv() - WMI beacon probe template function + * + * @param wmi_handle : handle to WMI. + * @param macaddr : MAC address + * @param param : pointer to hold beacon prb template cmd parameter + * @return QDF_STATUS_SUCCESS on success and -ve on failure. + */ +static QDF_STATUS send_bcn_prb_template_cmd_non_tlv(wmi_unified_t wmi_handle, + struct bcn_prb_template_params *param) +{ + wmi_bcn_prb_tmpl_cmd *cmd; + wmi_buf_t buf; + wmi_bcn_prb_info *template; + int len = sizeof(wmi_bcn_prb_tmpl_cmd); + int ret; + + /* + * The target will store this information for use with + * the beacons and probes. + */ + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + qdf_print("%s: wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_NOMEM; + } + cmd = (wmi_bcn_prb_tmpl_cmd *)wmi_buf_data(buf); + cmd->vdev_id = param->vdev_id; + cmd->buf_len = param->buf_len; + template = &cmd->bcn_prb_info; + template->caps = param->caps; + template->erp = param->erp; + + /* TODO: Few more elements to be added and copied to the template + * buffer */ + + /* Send the beacon probe template to the target */ + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_BCN_PRB_TMPL_CMDID); + return ret; +} +#endif + +/** + * send_peer_assoc_cmd_non_tlv() - WMI peer assoc function + * + * @param wmi_handle : handle to WMI. + * @param param : pointer to peer assoc parameter + * @return QDF_STATUS_SUCCESS on success and -ve on failure. + */ +static QDF_STATUS send_peer_assoc_cmd_non_tlv(wmi_unified_t wmi_handle, + struct peer_assoc_params *param) +{ + wmi_peer_assoc_complete_cmd *cmd; + int len = sizeof(wmi_peer_assoc_complete_cmd); +#ifdef BIG_ENDIAN_HOST + int i; +#endif + + wmi_buf_t buf; + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + qdf_print("%s: wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_FAILURE; + } + cmd = (wmi_peer_assoc_complete_cmd *)wmi_buf_data(buf); + WMI_CHAR_ARRAY_TO_MAC_ADDR(param->peer_mac, &cmd->peer_macaddr); + cmd->vdev_id = param->vdev_id; + cmd->peer_new_assoc = param->peer_new_assoc; + cmd->peer_associd = param->peer_associd; + cmd->peer_bw_rxnss_override = 0; + + /* + * The target only needs a subset of the flags maintained in the host. + * Just populate those flags and send it down + */ + cmd->peer_flags = 0; + + if (param->is_pmf_enabled) + cmd->peer_flags |= WMI_PEER_PMF_ENABLED; + + /* + * Do not enable HT/VHT if WMM/wme is disabled for vap. + */ + if (param->is_wme_set) { + + if (param->qos_flag) + cmd->peer_flags |= WMI_PEER_QOS; + if (param->apsd_flag) + cmd->peer_flags |= WMI_PEER_APSD; + if (param->ht_flag) + cmd->peer_flags |= WMI_PEER_HT; + if (param->bw_40) + cmd->peer_flags |= WMI_PEER_40MHZ; + if (param->bw_80) + cmd->peer_flags |= WMI_PEER_80MHZ; + if (param->bw_160) + cmd->peer_flags |= WMI_PEER_160MHZ; + + /* Typically if STBC is enabled for VHT it should be enabled + * for HT as well */ + if (param->stbc_flag) + cmd->peer_flags |= WMI_PEER_STBC; + + /* Typically if LDPC is enabled for VHT it should be enabled + * for HT as well */ + if (param->ldpc_flag) + cmd->peer_flags |= WMI_PEER_LDPC; + + if (param->static_mimops_flag) + cmd->peer_flags |= WMI_PEER_STATIC_MIMOPS; + if (param->dynamic_mimops_flag) + cmd->peer_flags |= WMI_PEER_DYN_MIMOPS; + if (param->spatial_mux_flag) + cmd->peer_flags |= WMI_PEER_SPATIAL_MUX; + if (param->vht_flag) + cmd->peer_flags |= WMI_PEER_VHT; + if (param->vht_ng_flag) + cmd->peer_flags |= WMI_PEER_VHT_2G; + } + /* + * Suppress authorization for all AUTH modes that need 4-way handshake + * (during re-association). + * Authorization will be done for these modes on key installation. + */ + if (param->auth_flag) + cmd->peer_flags |= WMI_PEER_AUTH; + if (param->need_ptk_4_way) + cmd->peer_flags |= WMI_PEER_NEED_PTK_4_WAY; + else + cmd->peer_flags &= ~WMI_PEER_NEED_PTK_4_WAY; + if (param->need_gtk_2_way) + cmd->peer_flags |= WMI_PEER_NEED_GTK_2_WAY; + /* safe mode bypass the 4-way handshake */ + if (param->safe_mode_enabled) + cmd->peer_flags &= + ~(WMI_PEER_NEED_PTK_4_WAY | WMI_PEER_NEED_GTK_2_WAY); + /* Disable AMSDU for station transmit, if user configures it */ + /* Disable AMSDU for AP transmit to 11n Stations, if user configures + * it */ + if (param->amsdu_disable) + cmd->peer_flags |= WMI_PEER_AMSDU_DISABLE; + cmd->peer_caps = param->peer_caps; + cmd->peer_listen_intval = param->peer_listen_intval; + cmd->peer_ht_caps = param->peer_ht_caps; + cmd->peer_max_mpdu = param->peer_max_mpdu; + cmd->peer_mpdu_density = param->peer_mpdu_density; + cmd->peer_vht_caps = param->peer_vht_caps; + + /* Update peer rate information */ + cmd->peer_rate_caps = param->peer_rate_caps; + cmd->peer_legacy_rates.num_rates = param->peer_legacy_rates.num_rates; + /* NOTE: cmd->peer_legacy_rates.rates is of type uint32_t */ + /* ni->ni_rates.rs_rates is of type u_int8_t */ + /** + * for cmd->peer_legacy_rates.rates: + * rates (each 8bit value) packed into a 32 bit word. + * the rates are filled from least significant byte to most + * significant byte. + */ + qdf_mem_copy(cmd->peer_legacy_rates.rates, + param->peer_legacy_rates.rates, + param->peer_legacy_rates.num_rates); +#ifdef BIG_ENDIAN_HOST + for (i = 0; + i < param->peer_legacy_rates.num_rates/sizeof(uint32_t) + 1; + i++) + cmd->peer_legacy_rates.rates[i] = + qdf_le32_to_cpu(cmd->peer_legacy_rates.rates[i]); +#endif + + cmd->peer_ht_rates.num_rates = param->peer_ht_rates.num_rates; + qdf_mem_copy(cmd->peer_ht_rates.rates, param->peer_ht_rates.rates, + param->peer_ht_rates.num_rates); + +#ifdef BIG_ENDIAN_HOST + for (i = 0; i < param->peer_ht_rates.num_rates/sizeof(uint32_t) + 1; + i++) + cmd->peer_ht_rates.rates[i] = + qdf_le32_to_cpu(cmd->peer_ht_rates.rates[i]); +#endif + + if (param->ht_flag && + (param->peer_ht_rates.num_rates == 0)) { + /* Workaround for EV 116382: The node is marked HT but with + * supported rx mcs set is set to 0. 11n spec mandates MCS0-7 + * for a HT STA. So forcing the supported rx mcs rate to MCS + * 0-7. + * This workaround will be removed once we get clarification + * from WFA regarding this STA behavior + */ + u_int8_t temp_ni_rates[8] = { + 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7}; + cmd->peer_ht_rates.num_rates = 8; + qdf_mem_copy(cmd->peer_ht_rates.rates, temp_ni_rates, + cmd->peer_ht_rates.num_rates); + } + /* Target asserts if node is marked HT and all MCS is set to 0. + Mark the node as non-HT if all the mcs rates are disabled through + iwpriv */ + if (cmd->peer_ht_rates.num_rates == 0) + cmd->peer_flags &= ~WMI_PEER_HT; + + cmd->peer_nss = param->peer_nss; + + if (param->vht_capable) { + wmi_vht_rate_set *mcs; + mcs = &cmd->peer_vht_rates; + mcs->rx_max_rate = param->rx_max_rate; + mcs->rx_mcs_set = param->rx_mcs_set; + mcs->tx_max_rate = param->tx_max_rate; + mcs->tx_mcs_set = param->tx_mcs_set; + mcs->tx_max_mcs_nss = param->tx_max_mcs_nss; + } + + cmd->peer_phymode = param->peer_phymode; + /*Send bandwidth-NSS mapping to FW*/ + cmd->peer_bw_rxnss_override |= param->peer_bw_rxnss_override; + + return wmi_unified_cmd_send(wmi_handle, buf, len, WMI_PEER_ASSOC_CMDID); +} + +/** + * send_scan_start_cmd_non_tlv() - WMI scan start function + * + * @param wmi_handle : handle to WMI. + * @param param : pointer to hold scan start cmd parameter + * @return QDF_STATUS_SUCCESS on success and -ve on failure. + */ +static QDF_STATUS send_scan_start_cmd_non_tlv(wmi_unified_t wmi_handle, + struct scan_req_params *param) +{ + wmi_start_scan_cmd *cmd; + wmi_buf_t buf; + wmi_chan_list *chan_list; + wmi_bssid_list *bssid_list; + wmi_ssid_list *ssid_list; + wmi_ie_data *ie_data; + uint32_t *tmp_ptr; + int i, len = sizeof(wmi_start_scan_cmd); + +#ifdef TEST_CODE + len += sizeof(wmi_chan_list) + 3 * sizeof(uint32_t); +#else + if (param->chan_list.num_chan) { + len += sizeof(wmi_chan_list) + (param->chan_list.num_chan - 1) + * sizeof(uint32_t); + } +#endif + if (param->num_ssids) { + len += sizeof(wmi_ssid_list) + (param->num_ssids - 1) + * sizeof(wmi_ssid); + } + if (param->num_bssid) { + len += sizeof(wmi_bssid_list) + (param->num_bssid - 1) + * sizeof(wmi_mac_addr); + } + if (param->extraie.len) { + i = param->extraie.len % sizeof(uint32_t); + if (i) + len += sizeof(uint32_t) - i; + len += 2 * sizeof(uint32_t) + param->extraie.len; + } + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + qdf_print("%s: wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_NOMEM; + } + cmd = (wmi_start_scan_cmd *)wmi_buf_data(buf); + OS_MEMZERO(cmd, len); + cmd->vdev_id = param->vdev_id; + cmd->scan_priority = param->scan_priority; + cmd->scan_id = param->scan_id; + cmd->scan_req_id = param->scan_req_id; + + /* Scan events subscription */ + if (param->scan_ev_started) + cmd->notify_scan_events |= WMI_SCAN_EVENT_STARTED; + if (param->scan_ev_completed) + cmd->notify_scan_events |= WMI_SCAN_EVENT_COMPLETED; + if (param->scan_ev_bss_chan) + cmd->notify_scan_events |= WMI_SCAN_EVENT_BSS_CHANNEL; + if (param->scan_ev_foreign_chan) + cmd->notify_scan_events |= WMI_SCAN_EVENT_FOREIGN_CHANNEL; + if (param->scan_ev_dequeued) + cmd->notify_scan_events |= WMI_SCAN_EVENT_DEQUEUED; + if (param->scan_ev_preempted) + cmd->notify_scan_events |= WMI_SCAN_EVENT_PREEMPTED; + if (param->scan_ev_start_failed) + cmd->notify_scan_events |= WMI_SCAN_EVENT_START_FAILED; + if (param->scan_ev_restarted) + cmd->notify_scan_events |= WMI_SCAN_EVENT_RESTARTED; + if (param->scan_ev_foreign_chn_exit) + cmd->notify_scan_events |= WMI_SCAN_EVENT_FOREIGN_CHANNEL_EXIT; + if (param->scan_ev_invalid) + cmd->notify_scan_events |= WMI_SCAN_EVENT_INVALID; + if (param->scan_ev_gpio_timeout) + cmd->notify_scan_events |= WMI_SCAN_EVENT_GPIO_TIMEOUT; + + /** Max. active channel dwell time */ + cmd->dwell_time_active = param->dwell_time_active; + /** Passive channel dwell time */ + cmd->dwell_time_passive = param->dwell_time_passive; + + /** Scan control flags */ + cmd->scan_ctrl_flags = 0; + if (param->scan_f_passive) + cmd->scan_ctrl_flags |= WMI_SCAN_FLAG_PASSIVE; + if (param->scan_f_strict_passive_pch) + cmd->scan_ctrl_flags |= WMI_SCAN_FLAG_STRICT_PASSIVE_ON_PCHN; + if (param->scan_f_promisc_mode) + cmd->scan_ctrl_flags |= WMI_SCAN_PROMISCOUS_MODE; + if (param->scan_f_capture_phy_err) + cmd->scan_ctrl_flags |= WMI_SCAN_CAPTURE_PHY_ERROR; + if (param->scan_f_half_rate) + cmd->scan_ctrl_flags |= WMI_SCAN_FLAG_HALF_RATE_SUPPORT; + if (param->scan_f_quarter_rate) + cmd->scan_ctrl_flags |= WMI_SCAN_FLAG_QUARTER_RATE_SUPPORT; + if (param->scan_f_cck_rates) + cmd->scan_ctrl_flags |= WMI_SCAN_ADD_CCK_RATES; + if (param->scan_f_chan_stat_evnt) + cmd->scan_ctrl_flags |= WMI_SCAN_CHAN_STAT_EVENT; + if (param->scan_f_bcast_probe) + cmd->scan_ctrl_flags |= WMI_SCAN_ADD_BCAST_PROBE_REQ; + if (param->scan_f_offchan_mgmt_tx) + cmd->scan_ctrl_flags |= WMI_SCAN_OFFCHAN_MGMT_TX; + if (param->scan_f_offchan_data_tx) + cmd->scan_ctrl_flags |= WMI_SCAN_OFFCHAN_DATA_TX; + /* Always enable ofdm rates */ + cmd->scan_ctrl_flags |= WMI_SCAN_ADD_OFDM_RATES; + + /** send multiple braodcast probe req with this delay in between */ + cmd->repeat_probe_time = param->repeat_probe_time; + cmd->probe_spacing_time = param->probe_spacing_time; + /** delay between channel change and first probe request */ + cmd->probe_delay = param->probe_delay; + /** idle time on channel for which if no traffic is seen + then scanner can switch to off channel */ + cmd->idle_time = param->idle_time; + cmd->min_rest_time = param->min_rest_time; + /** maximum rest time allowed on bss channel, overwrites + * other conditions and changes channel to off channel + * even if min beacon count, idle time requirements are not met. + */ + cmd->max_rest_time = param->max_rest_time; + /** maxmimum scan time allowed */ +#if IPQ4019_EMU + cmd->max_scan_time = 0xffffffff; +#else + cmd->max_scan_time = param->max_scan_time; +#endif + tmp_ptr = (uint32_t *) (cmd + 1); +#ifdef TEST_CODE +#define DEFAULT_TIME 150 + cmd->min_rest_time = DEFAULT_TIME; + cmd->idle_time = 10*DEFAULT_TIME; + cmd->max_rest_time = 30*DEFAULT_TIME; + chan_list = (wmi_chan_list *) tmp_ptr; + chan_list->tag = WMI_CHAN_LIST_TAG; + chan_list->num_chan = 4; + chan_list->channel_list[0] = 2412; /* 1 */ + chan_list->channel_list[1] = 2437; /* 6 */ + chan_list->channel_list[2] = 5180; /* 36 */- + chan_list->channel_list[3] = 5680; /* 136 */ + tmp_ptr += (2 + chan_list->num_chan); /* increase by words */- +#else +#define FREQUENCY_THRESH 1000 + if (param->chan_list.num_chan) { + chan_list = (wmi_chan_list *) tmp_ptr; + chan_list->tag = WMI_CHAN_LIST_TAG; + chan_list->num_chan = param->chan_list.num_chan; + for (i = 0; i < param->chan_list.num_chan; ++i) + chan_list->channel_list[i] = + param->chan_list.chan[i].freq; + tmp_ptr += (2 + param->chan_list.num_chan); + } +#endif + if (param->num_ssids) { + ssid_list = (wmi_ssid_list *) tmp_ptr; + ssid_list->tag = WMI_SSID_LIST_TAG; + ssid_list->num_ssids = param->num_ssids; + for (i = 0; i < param->num_ssids; ++i) { + ssid_list->ssids[i].ssid_len = param->ssid[i].length; + WMI_HOST_IF_MSG_COPY_CHAR_ARRAY( + ssid_list->ssids[i].ssid, + param->ssid[i].ssid, + param->ssid[i].length); + } + tmp_ptr += (2 + (sizeof(wmi_ssid) * + param->num_ssids)/sizeof(uint32_t)); + } + if (param->num_bssid) { + bssid_list = (wmi_bssid_list *) tmp_ptr; + bssid_list->tag = WMI_BSSID_LIST_TAG; + bssid_list->num_bssid = param->num_bssid; + for (i = 0; i < param->num_bssid; ++i) { + WMI_CHAR_ARRAY_TO_MAC_ADDR( + &(param->bssid_list[i].bytes[0]), + &bssid_list->bssid_list[i]); + } + tmp_ptr += (2 + (sizeof(wmi_mac_addr) * + param->num_bssid)/sizeof(uint32_t)); + } + if (param->extraie.len) { + ie_data = (wmi_ie_data *) tmp_ptr; + ie_data->tag = WMI_IE_TAG; + ie_data->ie_len = param->extraie.len; + WMI_HOST_IF_MSG_COPY_CHAR_ARRAY(ie_data->ie_data, + param->extraie.ptr, param->extraie.len); + } + qdf_print("Sending SCAN START cmd\n"); + return wmi_unified_cmd_send(wmi_handle, buf, len, WMI_START_SCAN_CMDID); +} + +/** + * send_scan_stop_cmd_non_tlv() - WMI scan stop function + * + * @param wmi_handle : handle to WMI. + * @param param : pointer to hold scan start cmd parameter + * @return QDF_STATUS_SUCCESS on success and -ve on failure. + */ +static QDF_STATUS send_scan_stop_cmd_non_tlv(wmi_unified_t wmi_handle, + struct scan_cancel_param *param) +{ + wmi_stop_scan_cmd *cmd = NULL; + wmi_buf_t buf; + u_int32_t len = sizeof(wmi_stop_scan_cmd); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + qdf_print("%s: wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_NOMEM; + } + cmd = (wmi_stop_scan_cmd *)wmi_buf_data(buf); + OS_MEMZERO(cmd, len); + /* scan scheduler is not supportd yet */ + cmd->scan_id = param->scan_id; + cmd->requestor = param->requester; + cmd->vdev_id = param->vdev_id; + + if (param->req_type == WLAN_SCAN_CANCEL_PDEV_ALL) { + /* Cancelling all scans - always match scan id */ + cmd->req_type = WMI_SCAN_STOP_ALL; + } else if (param->req_type == WLAN_SCAN_CANCEL_VDEV_ALL) { + /*- + * Cancelling VAP scans - report a match if scan was requested + * by the same VAP trying to cancel it. + */ + cmd->req_type = WMI_SCN_STOP_VAP_ALL; + } else if (param->req_type == WLAN_SCAN_CANCEL_SINGLE) { + /*- + * Cancelling specific scan - report a match if specified scan + * id matches the request's scan id. + */ + cmd->req_type = WMI_SCAN_STOP_ONE; + } + + wmi_unified_cmd_send(wmi_handle, buf, len, WMI_STOP_SCAN_CMDID); + + return QDF_STATUS_SUCCESS; +} + +/** + * send_scan_chan_list_cmd_non_tlv() - WMI scan channel list function + * + * @param wmi_handle : handle to WMI. + * @param param : pointer to hold scan channel list parameter + * @return QDF_STATUS_SUCCESS on success and -ve on failure. + */ +static QDF_STATUS send_scan_chan_list_cmd_non_tlv(wmi_unified_t wmi_handle, + struct scan_chan_list_params *param) +{ + uint32_t i; + wmi_buf_t buf; + wmi_scan_chan_list_cmd *cmd; + int len = sizeof(wmi_scan_chan_list_cmd); + + len = sizeof(wmi_scan_chan_list_cmd) + + sizeof(wmi_channel)*param->nallchans; + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + qdf_print("%s:wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_NOMEM; + } + + cmd = (wmi_scan_chan_list_cmd *)wmi_buf_data(buf); + cmd->num_scan_chans = param->nallchans; + OS_MEMZERO(cmd->chan_info, sizeof(wmi_channel)*cmd->num_scan_chans); + + + for (i = 0; i < param->nallchans; ++i) { + cmd->chan_info[i].mhz = param->ch_param[i].mhz; + + if (param->ch_param[i].is_chan_passive) + WMI_SET_CHANNEL_FLAG(&(cmd->chan_info[i]), + WMI_CHAN_FLAG_PASSIVE); + + if (param->ch_param[i].allow_vht) + WMI_SET_CHANNEL_FLAG(&(cmd->chan_info[i]), + WMI_CHAN_FLAG_ALLOW_VHT); + else if (param->ch_param[i].allow_ht) + WMI_SET_CHANNEL_FLAG(&(cmd->chan_info[i]), + WMI_CHAN_FLAG_ALLOW_HT); + + cmd->chan_info[i].band_center_freq1 = + param->ch_param[i].cfreq1; + cmd->chan_info[i].band_center_freq2 = + param->ch_param[i].cfreq2; + WMI_SET_CHANNEL_MODE(&cmd->chan_info[i], + param->ch_param[i].phy_mode); + + if (param->ch_param[i].half_rate) + WMI_SET_CHANNEL_FLAG(&(cmd->chan_info[i]), + WMI_CHAN_FLAG_HALF); + if (param->ch_param[i].quarter_rate) + WMI_SET_CHANNEL_FLAG(&(cmd->chan_info[i]), + WMI_CHAN_FLAG_QUARTER); + + /* also fill in power information */ + WMI_SET_CHANNEL_MIN_POWER(&cmd->chan_info[i], + param->ch_param[i].minpower); + WMI_SET_CHANNEL_MAX_POWER(&cmd->chan_info[i], + param->ch_param[i].maxpower); + WMI_SET_CHANNEL_REG_POWER(&cmd->chan_info[i], + param->ch_param[i].maxregpower); + WMI_SET_CHANNEL_ANTENNA_MAX(&cmd->chan_info[i], + param->ch_param[i].antennamax); + WMI_SET_CHANNEL_REG_CLASSID(&cmd->chan_info[i], + param->ch_param[i].reg_class_id); + } + + wmi_unified_cmd_send(wmi_handle, buf, len, WMI_SCAN_CHAN_LIST_CMDID); + return QDF_STATUS_SUCCESS; +} + +/** + * send_thermal_mitigation_param_cmd_non_tlv() - WMI scan channel list function + * + * @param wmi_handle : handle to WMI. + * @param param : pointer to hold thermal mitigation param + * @return QDF_STATUS_SUCCESS on success and -ve on failure. + */ +static QDF_STATUS send_thermal_mitigation_param_cmd_non_tlv(wmi_unified_t wmi_handle, + struct thermal_mitigation_params *param) +{ + wmi_buf_t buf = NULL; + tt_config_t *cmd = NULL; + int error = 0; + int32_t len = 0; + int i = 0; + + len = sizeof(tt_config_t); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + qdf_print("%s:wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_NOMEM; + } + cmd = (tt_config_t *) wmi_buf_data(buf); + cmd->enable = param->enable; + cmd->dc = param->dc; + cmd->dc_per_event = param->dc_per_event; + for (i = 0; i < THERMAL_LEVELS; i++) { + cmd->levelconf[i].tmplwm = param->levelconf[i].tmplwm; + cmd->levelconf[i].tmphwm = param->levelconf[i].tmphwm; + cmd->levelconf[i].dcoffpercent = + param->levelconf[i].dcoffpercent; + cmd->levelconf[i].prio = param->levelconf[i].priority; + } + + error = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_TT_SET_CONF_CMDID); + return error; +} + +/** + * send_phyerr_enable_cmd_non_tlv() - WMI phyerr enable function + * + * @param wmi_handle : handle to WMI. + * @return QDF_STATUS_SUCCESS on success and -ve on failure. + */ +static QDF_STATUS send_phyerr_enable_cmd_non_tlv(wmi_unified_t wmi_handle) +{ + wmi_buf_t buf; + + /* + * Passing a NULL pointer to wmi_unified_cmd_send() panics it, + * so let's just use a 32 byte fake array for now. + */ + buf = wmi_buf_alloc(wmi_handle, 32); + if (buf == NULL) { + /* XXX error? */ + return QDF_STATUS_E_NOMEM; + } + + qdf_print("%s: about to send\n", __func__); + if (wmi_unified_cmd_send(wmi_handle, buf, 32, + WMI_PDEV_DFS_ENABLE_CMDID) != QDF_STATUS_SUCCESS) { + qdf_print("%s: send failed\n", __func__); + return QDF_STATUS_E_FAILURE; + } + return QDF_STATUS_SUCCESS; +} + +/** + * send_phyerr_disable_cmd_non_tlv() - WMI phyerr disable function + * + * @param wmi_handle : handle to WMI. + * @return QDF_STATUS_SUCCESS on success and -ve on failure. + */ +static QDF_STATUS send_phyerr_disable_cmd_non_tlv(wmi_unified_t wmi_handle) +{ + wmi_buf_t buf; + + /* + * Passing a NULL pointer to wmi_unified_cmd_send() panics it, + * so let's just use a 32 byte fake array for now. + */ + buf = wmi_buf_alloc(wmi_handle, 32); + if (buf == NULL) { + /* XXX error? */ + return QDF_STATUS_E_NOMEM; + } + + qdf_print("%s: about to send\n", __func__); + if (wmi_unified_cmd_send(wmi_handle, buf, 32, + WMI_PDEV_DFS_DISABLE_CMDID) != QDF_STATUS_SUCCESS) { + qdf_print("%s: send failed\n", __func__); + return QDF_STATUS_E_FAILURE; + } + return QDF_STATUS_SUCCESS; +} + +/** + * send_smart_ant_enable_cmd_non_tlv() - WMI smart ant enable function + * + * @param wmi_handle : handle to WMI. + * @param param : pointer to antenna param + * @return QDF_STATUS_SUCCESS on success and -ve on failure. + */ +static QDF_STATUS send_smart_ant_enable_cmd_non_tlv(wmi_unified_t wmi_handle, + struct smart_ant_enable_params *param) +{ + /* Send WMI COMMAND to Enable */ + wmi_pdev_smart_ant_enable_cmd *cmd; + wmi_buf_t buf; + int len = 0; + int ret; + + len = sizeof(wmi_pdev_smart_ant_enable_cmd); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + qdf_print("%s:wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_NOMEM; + } + + cmd = (wmi_pdev_smart_ant_enable_cmd *)wmi_buf_data(buf); + cmd->enable = param->enable; + cmd->mode = param->mode; + cmd->rx_antenna = param->rx_antenna; + cmd->tx_default_antenna = param->rx_antenna; + + if (param->mode == SMART_ANT_MODE_SERIAL) { + cmd->gpio_pin[0] = param->gpio_pin[0]; + cmd->gpio_pin[1] = param->gpio_pin[1]; + cmd->gpio_pin[2] = 0; + cmd->gpio_pin[3] = 0; + + cmd->gpio_func[0] = param->gpio_func[0]; + cmd->gpio_func[1] = param->gpio_func[1]; + cmd->gpio_func[2] = 0; + cmd->gpio_func[3] = 0; + + } else if (param->mode == SMART_ANT_MODE_PARALLEL) { + cmd->gpio_pin[0] = param->gpio_pin[0]; + cmd->gpio_pin[1] = param->gpio_pin[1]; + cmd->gpio_pin[2] = param->gpio_pin[2]; + cmd->gpio_pin[3] = param->gpio_pin[3]; + + cmd->gpio_func[0] = param->gpio_func[0]; + cmd->gpio_func[1] = param->gpio_func[1]; + cmd->gpio_func[2] = param->gpio_func[2]; + cmd->gpio_func[3] = param->gpio_func[3]; + } + + ret = wmi_unified_cmd_send(wmi_handle, + buf, + len, + WMI_PDEV_SMART_ANT_ENABLE_CMDID); + + if (ret != 0) { + qdf_print(" %s :WMI Failed\n", __func__); + qdf_print("%s: Failed to send WMI_PDEV_SMART_ANT_ENABLE_CMDID.\n" + "enable:%d mode:%d rx_antenna: 0x%08x PINS: " + "[%d %d %d %d] Func[%d %d %d %d] cmdstatus=%d\n", + __func__, + cmd->enable, + cmd->mode, + cmd->rx_antenna, + cmd->gpio_pin[0], + cmd->gpio_pin[1], + cmd->gpio_pin[2], + cmd->gpio_pin[3], + cmd->gpio_func[0], + cmd->gpio_func[1], + cmd->gpio_func[2], + cmd->gpio_func[3], + ret); + wmi_buf_free(buf); + } + return ret; +} +/** + * send_smart_ant_set_rx_ant_cmd_non_tlv() - WMI set rx antenna function + * + * @param wmi_handle : handle to WMI. + * @param param : pointer to rx antenna param + * @return QDF_STATUS_SUCCESS on success and -ve on failure. + */ +static QDF_STATUS send_smart_ant_set_rx_ant_cmd_non_tlv(wmi_unified_t wmi_handle, + struct smart_ant_rx_ant_params *param) +{ + wmi_pdev_smart_ant_set_rx_antenna_cmd *cmd; + wmi_buf_t buf; + int len = 0; + int ret; + + len = sizeof(wmi_pdev_smart_ant_set_rx_antenna_cmd); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + qdf_print("%s:wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_NOMEM; + } + + cmd = (wmi_pdev_smart_ant_set_rx_antenna_cmd *)wmi_buf_data(buf); + cmd->rx_antenna = param->antenna; + ret = wmi_unified_cmd_send(wmi_handle, + buf, + len, + WMI_PDEV_SMART_ANT_SET_RX_ANTENNA_CMDID); + + if (ret != 0) { + qdf_print(" %s :WMI Failed\n", __func__); + qdf_print("%s: Failed to send WMI_PDEV_SMART_ANT_SET_RX_ANTENNA_CMDID.\n" + " rx_antenna: 0x%08x cmdstatus=%d\n", + __func__, + cmd->rx_antenna, + ret); + wmi_buf_free(buf); + } + return ret; +} + +/** + * send_smart_ant_set_tx_ant_cmd_non_tlv() - WMI set tx antenna function + * @param wmi_handle : handle to WMI. + * @param macaddr : vdev mac address + * @param param : pointer to tx antenna param + * @return QDF_STATUS_SUCCESS on success and -ve on failure. + */ +static QDF_STATUS send_smart_ant_set_tx_ant_cmd_non_tlv(wmi_unified_t wmi_handle, + uint8_t macaddr[IEEE80211_ADDR_LEN], + struct smart_ant_tx_ant_params *param) +{ + wmi_peer_sant_set_tx_antenna_cmd *cmd; + wmi_buf_t buf; + int len = 0; + int ret; + + len = sizeof(wmi_peer_sant_set_tx_antenna_cmd); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + qdf_print("%s:wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_NOMEM; + } + + cmd = (wmi_peer_sant_set_tx_antenna_cmd *)wmi_buf_data(buf); + cmd->vdev_id = param->vdev_id; + WMI_CHAR_ARRAY_TO_MAC_ADDR(macaddr, &cmd->peer_macaddr); + + cmd->antenna_series[0] = param->antenna_array[0]; + cmd->antenna_series[1] = param->antenna_array[1]; + ret = wmi_unified_cmd_send(wmi_handle, + buf, + len, + WMI_PEER_SMART_ANT_SET_TX_ANTENNA_CMDID); + + if (ret != 0) { + qdf_print(" %s :WMI Failed\n", __func__); + qdf_print("%s: Failed to send WMI_PEER_SMART_ANT_SET_TX_ANTENNA_CMDID.\n" + " Node: %s tx_antennas: [0x%08x 0x%08x] cmdstatus=%d\n", + __func__, + ether_sprintf(macaddr), + cmd->antenna_series[0], + cmd->antenna_series[1], + ret); + wmi_buf_free(buf); + } + return ret; +} + +/** + * send_smart_ant_set_training_info_cmd_non_tlv() - WMI set smart antenna + * training information function + * @param wmi_handle : handle to WMI. + * @macaddr : vdev mac address + * @param param : pointer to tx antenna param + * @return QDF_STATUS_SUCCESS on success and -ve on failure. + */ +static QDF_STATUS send_smart_ant_set_training_info_cmd_non_tlv( + wmi_unified_t wmi_handle, + uint8_t macaddr[IEEE80211_ADDR_LEN], + struct smart_ant_training_info_params *param) +{ + wmi_peer_sant_set_train_antenna_cmd *cmd; + wmi_buf_t buf; + int len = 0; + int ret; + + len = sizeof(wmi_peer_sant_set_train_antenna_cmd); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + qdf_print("%s:wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_NOMEM; + } + + cmd = (wmi_peer_sant_set_train_antenna_cmd *)wmi_buf_data(buf); + cmd->vdev_id = param->vdev_id; + WMI_CHAR_ARRAY_TO_MAC_ADDR(macaddr, &cmd->peer_macaddr); + qdf_mem_copy(&cmd->train_rate_series[0], ¶m->rate_array[0], + (sizeof(uint32_t)*SMART_ANT_MAX_RATE_SERIES)); + qdf_mem_copy(&cmd->train_antenna_series[0], ¶m->antenna_array[0], + (sizeof(uint32_t)*SMART_ANT_MAX_RATE_SERIES)); + cmd->num_pkts = param->numpkts; + ret = wmi_unified_cmd_send(wmi_handle, + buf, + len, + WMI_PEER_SMART_ANT_SET_TRAIN_INFO_CMDID); + + if (ret != 0) { + qdf_print(" %s :WMI Failed\n", __func__); + qdf_print("%s: Failed to Send WMI_PEER_SMART_ANT_SET_TRAIN_INFO_CMDID.\n" + " Train Node: %s rate_array[0x%02x 0x%02x] " + "tx_antennas: [0x%08x 0x%08x] cmdstatus=%d\n", + __func__, + ether_sprintf(macaddr), + cmd->train_rate_series[0], + cmd->train_rate_series[1], + cmd->train_antenna_series[0], + cmd->train_antenna_series[1], + ret); + wmi_buf_free(buf); + } + return ret; +} + +/** + * send_smart_ant_set_node_config_cmd_non_tlv() - WMI set node + * configuration function + * @param wmi_handle : handle to WMI. + * @macaddr : vdev mad address + * @param param : pointer to tx antenna param + * @return QDF_STATUS_SUCCESS on success and -ve on failure. + */ +static QDF_STATUS send_smart_ant_set_node_config_cmd_non_tlv(wmi_unified_t wmi_handle, + uint8_t macaddr[IEEE80211_ADDR_LEN], + struct smart_ant_node_config_params *param) +{ + wmi_peer_sant_set_node_config_ops_cmd *cmd; + wmi_buf_t buf; + int len = 0; + int ret; + int i = 0; + + len = sizeof(wmi_peer_sant_set_node_config_ops_cmd); + + if ((param->args_count == 0) || (param->args_count > + (sizeof(cmd->args) / sizeof(cmd->args[0])))) { + qdf_print("%s: Can't send a command with %d arguments\n", + __func__, param->args_count); + return QDF_STATUS_E_FAILURE; + } + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + qdf_print("%s:wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_NOMEM; + } + + cmd = (wmi_peer_sant_set_node_config_ops_cmd *)wmi_buf_data(buf); + cmd->vdev_id = param->vdev_id; + WMI_CHAR_ARRAY_TO_MAC_ADDR(macaddr, &cmd->peer_macaddr); + cmd->cmd_id = param->cmd_id; + cmd->args_count = param->args_count; + for (i = 0; i < param->args_count; i++) + cmd->args[i] = param->args_arr[i]; + + ret = wmi_unified_cmd_send(wmi_handle, + buf, + len, + WMI_PEER_SMART_ANT_SET_NODE_CONFIG_OPS_CMDID); + + if (ret != 0) { + qdf_print(" %s :WMI Failed\n", __func__); + qdf_print("%s: Sent " + "WMI_PEER_SMART_ANT_SET_NODE_CONFIG_OPS_CMDID, cmd_id:" + " 0x%x\n Node: %s cmdstatus=%d\n", + __func__, param->cmd_id, ether_sprintf(macaddr), ret); + } + return ret; +} + +/** + * send_smart_ant_enable_tx_feedback_cmd_non_tlv() - WMI enable smart antenna + * tx feedback function + * @param wmi_handle : handle to WMI. + * @param param : pointer to hold enable param + * @return QDF_STATUS_SUCCESS on success and -ve on failure. + */ +static QDF_STATUS send_smart_ant_enable_tx_feedback_cmd_non_tlv( + wmi_unified_t wmi_handle, + struct smart_ant_enable_tx_feedback_params *param) +{ + uint32_t types = 0; + int len = 0; + wmi_buf_t buf; + wmi_pdev_pktlog_enable_cmd *cmd; + + if (param->enable == 1) { + types |= WMI_PKTLOG_EVENT_TX; + types |= WMI_PKTLOG_EVENT_SMART_ANTENNA; + + len = sizeof(wmi_pdev_pktlog_enable_cmd); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + qdf_print("%s:wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_FAILURE; + } + cmd = (wmi_pdev_pktlog_enable_cmd *)wmi_buf_data(buf); + cmd->evlist = types; + /*enabling the pktlog for smart antenna tx feedback*/ + if (wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_PDEV_PKTLOG_ENABLE_CMDID)) + return QDF_STATUS_E_FAILURE; + return QDF_STATUS_SUCCESS; + } else if (param->enable == 0) { + buf = wmi_buf_alloc(wmi_handle, 0); + if (!buf) { + qdf_print("%s:wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_FAILURE; + } + if (!wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_PDEV_PKTLOG_DISABLE_CMDID)) + return QDF_STATUS_E_FAILURE; + return QDF_STATUS_SUCCESS; + } else + return QDF_STATUS_E_FAILURE; +} + +/** + * send_vdev_spectral_configure_cmd_non_tlv() - send VDEV spectral configure + * command to fw + * @wmi_handle: wmi handle + * @param: pointer to hold spectral config parameter + * + * Return: 0 for success or error code + */ +static QDF_STATUS send_vdev_spectral_configure_cmd_non_tlv(wmi_unified_t wmi_handle, + struct vdev_spectral_configure_params *param) +{ + wmi_vdev_spectral_configure_cmd *cmd; + wmi_buf_t buf; + int len = 0; + int ret; + + len = sizeof(wmi_vdev_spectral_configure_cmd); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + qdf_print("%s:wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_NOMEM; + } + + cmd = (wmi_vdev_spectral_configure_cmd *)wmi_buf_data(buf); + + cmd->vdev_id = param->vdev_id; + + cmd->spectral_scan_count = param->count; + cmd->spectral_scan_period = param->period; + cmd->spectral_scan_priority = param->spectral_pri; + cmd->spectral_scan_fft_size = param->fft_size; + cmd->spectral_scan_gc_ena = param->gc_enable; + cmd->spectral_scan_restart_ena = param->restart_enable; + cmd->spectral_scan_noise_floor_ref = param->noise_floor_ref; + cmd->spectral_scan_init_delay = param->init_delay; + cmd->spectral_scan_nb_tone_thr = param->nb_tone_thr; + cmd->spectral_scan_str_bin_thr = param->str_bin_thr; + cmd->spectral_scan_wb_rpt_mode = param->wb_rpt_mode; + cmd->spectral_scan_rssi_rpt_mode = param->rssi_rpt_mode; + cmd->spectral_scan_rssi_thr = param->rssi_thr; + cmd->spectral_scan_pwr_format = param->pwr_format; + cmd->spectral_scan_rpt_mode = param->rpt_mode; + cmd->spectral_scan_bin_scale = param->bin_scale; + cmd->spectral_scan_dBm_adj = param->dbm_adj; + cmd->spectral_scan_chn_mask = param->chn_mask; + + ret = wmi_unified_cmd_send(wmi_handle, + buf, + len, + WMI_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID); +#ifdef OL_SPECTRAL_DEBUG_CONFIG_INTERACTIONS + qdf_print("%s: Sent " + "WMI_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID\n", __func__); + + qdf_print("vdev_id = %u\n" + "spectral_scan_count = %u\n" + "spectral_scan_period = %u\n" + "spectral_scan_priority = %u\n" + "spectral_scan_fft_size = %u\n" + "spectral_scan_gc_ena = %u\n" + "spectral_scan_restart_ena = %u\n" + "spectral_scan_noise_floor_ref = %u\n" + "spectral_scan_init_delay = %u\n" + "spectral_scan_nb_tone_thr = %u\n" + "spectral_scan_str_bin_thr = %u\n" + "spectral_scan_wb_rpt_mode = %u\n" + "spectral_scan_rssi_rpt_mode = %u\n" + "spectral_scan_rssi_thr = %u\n" + "spectral_scan_pwr_format = %u\n" + "spectral_scan_rpt_mode = %u\n" + "spectral_scan_bin_scale = %u\n" + "spectral_scan_dBm_adj = %u\n" + "spectral_scan_chn_mask = %u\n", + param->vdev_id, + param->count, + param->period, + param->spectral_pri, + param->fft_size, + param->gc_enable, + param->restart_enable, + param->noise_floor_ref, + param->init_delay, + param->nb_tone_thr, + param->str_bin_thr, + param->wb_rpt_mode, + param->rssi_rpt_mode, + param->rssi_thr, + param->pwr_format, + param->rpt_mode, + param->bin_scale, + param->dbm_adj, + param->chn_mask); + qdf_print("%s: Status: %d\n\n", __func__, ret); +#endif /* OL_SPECTRAL_DEBUG_CONFIG_INTERACTIONS */ + + return ret; +} + +#ifdef WLAN_SUPPORT_FILS +/** + * send_fils_discovery_send_cmd_non_tlv() - WMI FILS Discovery send function + * @wmi_handle: handle to WMI + * @param: pointer to hold FD send cmd parameter + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS error code on failure. + */ +static QDF_STATUS +send_fils_discovery_send_cmd_non_tlv(wmi_unified_t wmi_handle, + struct fd_params *param) +{ + wmi_fd_send_from_host_cmd_t *cmd; + wmi_buf_t wmi_buf; + QDF_STATUS status; + int fd_len = qdf_nbuf_len(param->wbuf); + int len = sizeof(wmi_fd_send_from_host_cmd_t); + + wmi_buf = wmi_buf_alloc(wmi_handle, roundup(len, sizeof(u_int32_t))); + if (!wmi_buf) { + WMI_LOGE("wmi_buf_alloc failed\n"); + return QDF_STATUS_E_NOMEM; + } + + cmd = (wmi_fd_send_from_host_cmd_t *)wmi_buf_data(wmi_buf); + cmd->vdev_id = param->vdev_id; + cmd->data_len = fd_len; + cmd->frag_ptr = qdf_nbuf_get_frag_paddr(param->wbuf, 0); + cmd->frame_ctrl = param->frame_ctrl; + status = wmi_unified_cmd_send(wmi_handle, wmi_buf, len, + WMI_PDEV_SEND_FD_CMDID); + if (status != QDF_STATUS_SUCCESS) { + wmi_buf_free(wmi_buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * send_vdev_fils_enable_cmd_non_tlv() - enable/Disable FD Frame command to fw + * @wmi_handle: wmi handle + * @param: pointer to hold FILS discovery enable param + * + * Return: QDF_STATUS_SUCCESS on success or QDF_STATUS error code on failure + */ +static QDF_STATUS +send_vdev_fils_enable_cmd_non_tlv(wmi_unified_t wmi_handle, + struct config_fils_params *param) +{ + wmi_enable_fils_cmd *cmd; + wmi_buf_t buf; + QDF_STATUS status; + int len = sizeof(wmi_enable_fils_cmd); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("wmi_buf_alloc failed\n"); + return QDF_STATUS_E_FAILURE; + } + cmd = (wmi_enable_fils_cmd *)wmi_buf_data(buf); + cmd->vdev_id = param->vdev_id; + cmd->fd_period = param->fd_period; + WMI_LOGI("Setting FD period to %d vdev id : %d\n", + param->fd_period, param->vdev_id); + + status = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_ENABLE_FILS_CMDID); + if (status != QDF_STATUS_SUCCESS) { + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_swfda_vdev_id_non_tlv() - extract swfda vdev id from event + * @wmi_handle: wmi handle + * @evt_buf: pointer to event buffer + * @vdev_id: pointer to hold vdev id + * + * Return: QDF_STATUS_SUCCESS + */ +static QDF_STATUS +extract_swfda_vdev_id_non_tlv(wmi_unified_t wmi_handle, + void *evt_buf, uint32_t *vdev_id) +{ + wmi_host_swfda_event *swfda_event = (wmi_host_swfda_event *)evt_buf; + + *vdev_id = swfda_event->vdev_id; + + return QDF_STATUS_SUCCESS; +} +#endif /* WLAN_SUPPORT_FILS */ + +/** + * send_vdev_spectral_enable_cmd_non_tlv() - send VDEV spectral configure + * command to fw + * @wmi_handle: wmi handle + * @param: pointer to hold spectral enable parameter + * + * Return: 0 for success or error code + */ +static QDF_STATUS send_vdev_spectral_enable_cmd_non_tlv(wmi_unified_t wmi_handle, + struct vdev_spectral_enable_params *param) +{ + wmi_vdev_spectral_enable_cmd *cmd; + wmi_buf_t buf; + int len = 0; + int ret; + + len = sizeof(wmi_vdev_spectral_enable_cmd); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + qdf_print("%s:wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_NOMEM; + } + + cmd = (wmi_vdev_spectral_enable_cmd *)wmi_buf_data(buf); + + cmd->vdev_id = param->vdev_id; + + if (param->active_valid) { + cmd->trigger_cmd = param->active ? 1 : 2; + /* 1: Trigger, 2: Clear Trigger */ + } else { + cmd->trigger_cmd = 0; /* 0: Ignore */ + } + + if (param->enabled_valid) { + cmd->enable_cmd = param->enabled ? 1 : 2; + /* 1: Enable 2: Disable */ + } else { + cmd->enable_cmd = 0; /* 0: Ignore */ + } + +#ifdef OL_SPECTRAL_DEBUG_CONFIG_INTERACTIONS + qdf_print + ("%s: Sent WMI_VDEV_SPECTRAL_SCAN_ENABLE_CMDID\n", __func__); + + qdf_print("vdev_id = %u\n" + "trigger_cmd = %u\n" + "enable_cmd = %u\n", + cmd->vdev_id, + cmd->trigger_cmd, + cmd->enable_cmd); + + qdf_print("%s: Status: %d\n\n", __func__, ret); +#endif /* OL_SPECTRAL_DEBUG_CONFIG_INTERACTIONS */ + + ret = wmi_unified_cmd_send(wmi_handle, + buf, + len, + WMI_VDEV_SPECTRAL_SCAN_ENABLE_CMDID); + return ret; +} + +/** + * send_pdev_set_regdomain_cmd_non_tlv() - send set regdomain command to fw + * @wmi_handle: wmi handle + * @param: pointer to pdev regdomain params + * + * Return: 0 for success or error code + */ +static QDF_STATUS +send_pdev_set_regdomain_cmd_non_tlv(wmi_unified_t wmi_handle, + struct pdev_set_regdomain_params *param) +{ + wmi_pdev_set_regdomain_cmd *cmd; + wmi_buf_t buf; + + int len = sizeof(wmi_pdev_set_regdomain_cmd); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + qdf_print("%s:wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_FAILURE; + } + + cmd = (wmi_pdev_set_regdomain_cmd *)wmi_buf_data(buf); + + cmd->reg_domain = param->currentRDinuse; + cmd->reg_domain_2G = param->currentRD2G; + cmd->reg_domain_5G = param->currentRD5G; + cmd->conformance_test_limit_2G = param->ctl_2G; + cmd->conformance_test_limit_5G = param->ctl_5G; + cmd->dfs_domain = param->dfsDomain; + + return wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_PDEV_SET_REGDOMAIN_CMDID); +} + +/** + * send_set_quiet_mode_cmd_non_tlv() - send set quiet mode command to fw + * @wmi_handle: wmi handle + * @param: pointer to quiet mode params + * + * Return: 0 for success or error code + */ +static QDF_STATUS +send_set_quiet_mode_cmd_non_tlv(wmi_unified_t wmi_handle, + struct set_quiet_mode_params *param) +{ + wmi_buf_t buf; + wmi_pdev_set_quiet_cmd *quiet_cmd; + int len = sizeof(wmi_pdev_set_quiet_cmd); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + qdf_print("%s:wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_NOMEM; + } + quiet_cmd = (wmi_pdev_set_quiet_cmd *)wmi_buf_data(buf); + quiet_cmd->enabled = param->enabled; + quiet_cmd->period = (param->period)*(param->intval); + quiet_cmd->duration = param->duration; + quiet_cmd->next_start = param->offset; + wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_PDEV_SET_QUIET_MODE_CMDID); + return QDF_STATUS_SUCCESS; +} + +/** + * send_set_beacon_filter_cmd_non_tlv() - send beacon filter command to fw + * @wmi_handle: wmi handle + * @param: pointer to beacon filter params + * + * Return: 0 for success or error code + */ +static QDF_STATUS +send_set_beacon_filter_cmd_non_tlv(wmi_unified_t wmi_handle, + struct set_beacon_filter_params *param) +{ + /* Issue WMI command to set beacon filter */ + int i; + wmi_add_bcn_filter_cmd_t *cmd; + QDF_STATUS res; + wmi_buf_t buf = NULL; + int len = sizeof(wmi_add_bcn_filter_cmd_t); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + qdf_print("buf alloc failed\n"); + return QDF_STATUS_E_NOMEM; + } + cmd = (wmi_add_bcn_filter_cmd_t *)wmi_buf_data(buf); + cmd->vdev_id = param->vdev_id; + qdf_print("vdev_id: %d\n", cmd->vdev_id); + + for (i = 0; i < BCN_FLT_MAX_ELEMS_IE_LIST; i++) + cmd->ie_map[i] = 0; + + if (param->ie) { + for (i = 0; i < BCN_FLT_MAX_ELEMS_IE_LIST; i++) + cmd->ie_map[i] = param->ie[i]; + } + res = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_ADD_BCN_FILTER_CMDID); + return (res == QDF_STATUS_SUCCESS) ? + QDF_STATUS_SUCCESS : QDF_STATUS_E_FAILURE; +} + +/** + * send_remove_beacon_filter_cmd_non_tlv() - send remove beacon filter command + * to fw + * @wmi_handle: wmi handle + * @param: pointer to remove beacon filter params + * + * Return: 0 for success or error code + */ +static QDF_STATUS +send_remove_beacon_filter_cmd_non_tlv(wmi_unified_t wmi_handle, + struct remove_beacon_filter_params *param) +{ + wmi_rmv_bcn_filter_cmd_t *cmd; + QDF_STATUS res; + wmi_buf_t buf = NULL; + int len = sizeof(wmi_rmv_bcn_filter_cmd_t); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + qdf_print("buf alloc failed\n"); + return QDF_STATUS_E_NOMEM; + } + cmd = (wmi_rmv_bcn_filter_cmd_t *)wmi_buf_data(buf); + cmd->vdev_id = param->vdev_id; + res = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_RMV_BCN_FILTER_CMDID); + return (res == QDF_STATUS_SUCCESS) ? + QDF_STATUS_SUCCESS : QDF_STATUS_E_FAILURE; +} + +/** + * send_mgmt_cmd_non_tlv() - send mgmt command to fw + * @wmi_handle: wmi handle + * @param: pointer to mgmt params + * Return: 0 for success or error code + */ +static QDF_STATUS +send_mgmt_cmd_non_tlv(wmi_unified_t wmi_handle, + struct wmi_mgmt_params *param) +{ + wmi_mgmt_tx_cmd *cmd; + wmi_buf_t wmi_buf; + int len = sizeof(wmi_mgmt_tx_hdr) + param->frm_len; + + wmi_buf = wmi_buf_alloc(wmi_handle, roundup(len, sizeof(u_int32_t))); + if (!wmi_buf) { + qdf_print("%s: wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_FAILURE; + } + cmd = (wmi_mgmt_tx_cmd *)wmi_buf_data(wmi_buf); + cmd->hdr.vdev_id = param->vdev_id; + WMI_CHAR_ARRAY_TO_MAC_ADDR(param->macaddr, &cmd->hdr.peer_macaddr); + cmd->hdr.buf_len = param->frm_len; + + +#ifdef BIG_ENDIAN_HOST + { + /* for big endian host, copy engine byte_swap is enabled + * But the mgmt frame buffer content is in network byte order + * Need to byte swap the mgmt frame buffer content - so when + * copy engine does byte_swap - target gets buffer content in + * the correct order + */ + int i; + u_int32_t *destp, *srcp; + destp = (u_int32_t *)cmd->bufp; + srcp = (u_int32_t *)wmi_buf_data(param->tx_frame); + for (i = 0; i < (roundup(param->frm_len, + sizeof(u_int32_t))/4); i++) { + *destp = qdf_le32_to_cpu(*srcp); + destp++; srcp++; + } + } +#else + qdf_mem_copy(cmd->bufp, wmi_buf_data(param->tx_frame), param->frm_len); +#endif + + /* Send the management frame buffer to the target */ + wmi_unified_cmd_send(wmi_handle, wmi_buf, roundup(len, + sizeof(u_int32_t)), WMI_MGMT_TX_CMDID); + return QDF_STATUS_SUCCESS; +} + +/** + * send_addba_clearresponse_cmd_non_tlv() - send addba clear response command + * to fw + * @wmi_handle: wmi handle + * @param: pointer to addba clearresp params + * @macaddr: vdev mac address + * Return: 0 for success or error code + */ +static QDF_STATUS +send_addba_clearresponse_cmd_non_tlv(wmi_unified_t wmi_handle, + uint8_t macaddr[IEEE80211_ADDR_LEN], + struct addba_clearresponse_params *param) +{ + wmi_addba_clear_resp_cmd *cmd; + wmi_buf_t buf; + int len = sizeof(wmi_addba_clear_resp_cmd); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + qdf_print("%s: wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_FAILURE; + } + cmd = (wmi_addba_clear_resp_cmd *)wmi_buf_data(buf); + cmd->vdev_id = param->vdev_id; + WMI_CHAR_ARRAY_TO_MAC_ADDR(macaddr, &cmd->peer_macaddr); + + /* Send the management frame buffer to the target */ + wmi_unified_cmd_send(wmi_handle, buf, len, WMI_ADDBA_CLEAR_RESP_CMDID); + return QDF_STATUS_SUCCESS; +} + +/** + * send_addba_send_cmd_non_tlv() - send addba send command to fw + * @wmi_handle: wmi handle + * @param: pointer to addba send params + * @macaddr: vdev mac address + * Return: 0 for success or error code + */ +static QDF_STATUS +send_addba_send_cmd_non_tlv(wmi_unified_t wmi_handle, + uint8_t macaddr[IEEE80211_ADDR_LEN], + struct addba_send_params *param) +{ + wmi_addba_send_cmd *cmd; + wmi_buf_t buf; + int len = sizeof(wmi_addba_send_cmd); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + qdf_print("%s: wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_FAILURE; + } + cmd = (wmi_addba_send_cmd *)wmi_buf_data(buf); + cmd->vdev_id = param->vdev_id; + WMI_CHAR_ARRAY_TO_MAC_ADDR(macaddr, &cmd->peer_macaddr); + cmd->tid = param->tidno; + cmd->buffersize = param->buffersize; + + /* Send the management frame buffer to the target */ + wmi_unified_cmd_send(wmi_handle, buf, len, WMI_ADDBA_SEND_CMDID); + return QDF_STATUS_SUCCESS; +} + +/** + * send_delba_send_cmd_non_tlv() - send delba send command to fw + * @wmi_handle: wmi handle + * @param: pointer to delba send params + * @macaddr: vdev mac address + * Return: 0 for success or error code + */ +static QDF_STATUS +send_delba_send_cmd_non_tlv(wmi_unified_t wmi_handle, + uint8_t macaddr[IEEE80211_ADDR_LEN], + struct delba_send_params *param) +{ + wmi_delba_send_cmd *cmd; + wmi_buf_t buf; + int len = sizeof(wmi_delba_send_cmd); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + qdf_print("%s: wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_NOMEM; + } + + cmd = (wmi_delba_send_cmd *)wmi_buf_data(buf); + cmd->vdev_id = param->vdev_id; + WMI_CHAR_ARRAY_TO_MAC_ADDR(macaddr, &cmd->peer_macaddr); + cmd->tid = param->tidno; + cmd->initiator = param->initiator; + cmd->reasoncode = param->reasoncode; + + /* send the management frame buffer to the target */ + wmi_unified_cmd_send(wmi_handle, buf, len, WMI_DELBA_SEND_CMDID); + return QDF_STATUS_SUCCESS; +} + +/** + * send_addba_setresponse_cmd_non_tlv() - send addba set response command to fw + * @wmi_handle: wmi handle + * @param: pointer to addba setresp params + * @macaddr: vdev mac address + * Return: 0 for success or error code + */ +static QDF_STATUS +send_addba_setresponse_cmd_non_tlv(wmi_unified_t wmi_handle, + uint8_t macaddr[IEEE80211_ADDR_LEN], + struct addba_setresponse_params *param) +{ + wmi_addba_setresponse_cmd *cmd; + wmi_buf_t buf; + int len = sizeof(wmi_addba_setresponse_cmd); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + qdf_print("%s: wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_NOMEM; + } + + cmd = (wmi_addba_setresponse_cmd *)wmi_buf_data(buf); + cmd->vdev_id = param->vdev_id; + WMI_CHAR_ARRAY_TO_MAC_ADDR(macaddr, &cmd->peer_macaddr); + cmd->tid = param->tidno; + cmd->statuscode = param->statuscode; + + /* send the management frame buffer to the target */ + wmi_unified_cmd_send(wmi_handle, buf, len, WMI_ADDBA_SET_RESP_CMDID); + return QDF_STATUS_SUCCESS; +} + +/** + * send_singleamsdu_cmd_non_tlv() - send single amsdu command to fw + * @wmi_handle: wmi handle + * @param: pointer to single amsdu params + * @macaddr: vdev mac address + * Return: 0 for success or error code + */ +static QDF_STATUS +send_singleamsdu_cmd_non_tlv(wmi_unified_t wmi_handle, + uint8_t macaddr[IEEE80211_ADDR_LEN], + struct singleamsdu_params *param) +{ + wmi_send_singleamsdu_cmd *cmd; + wmi_buf_t buf; + int len = sizeof(wmi_send_singleamsdu_cmd); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + qdf_print("%s: wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_NOMEM; + } + + cmd = (wmi_send_singleamsdu_cmd *)wmi_buf_data(buf); + cmd->vdev_id = param->vdev_id; + WMI_CHAR_ARRAY_TO_MAC_ADDR(macaddr, &cmd->peer_macaddr); + cmd->tid = param->tidno; + + /* send the management frame buffer to the target */ + wmi_unified_cmd_send(wmi_handle, buf, len, WMI_SEND_SINGLEAMSDU_CMDID); + return QDF_STATUS_SUCCESS; +} + +/** + * send_set_qboost_param_cmd_non_tlv() - send set qboost command to fw + * @wmi_handle: wmi handle + * @param: pointer to qboost params + * @macaddr: vdev mac address + * Return: 0 for success or error code + */ +static QDF_STATUS +send_set_qboost_param_cmd_non_tlv(wmi_unified_t wmi_handle, + uint8_t macaddr[IEEE80211_ADDR_LEN], + struct set_qboost_params *param) +{ + + WMI_QBOOST_CFG_CMD *cmd; + wmi_buf_t buf; + int ret; + + buf = wmi_buf_alloc(wmi_handle, sizeof(*cmd)); + if (!buf) { + qdf_print("%s: wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_FAILURE; + } + + cmd = (WMI_QBOOST_CFG_CMD *)wmi_buf_data(buf); + cmd->vdev_id = param->vdev_id; + WMI_CHAR_ARRAY_TO_MAC_ADDR(macaddr, &cmd->peer_macaddr); + cmd->qb_enable = param->value; + + ret = wmi_unified_cmd_send(wmi_handle, buf, sizeof(*cmd), + WMI_QBOOST_CFG_CMDID); + return ret; +} + +/** + * send_mu_scan_cmd_non_tlv() - send mu scan command to fw + * @wmi_handle: wmi handle + * @param: pointer to mu scan params + * Return: 0 for success or error code + */ +static QDF_STATUS +send_mu_scan_cmd_non_tlv(wmi_unified_t wmi_handle, + struct mu_scan_params *param) +{ + wmi_mu_start_cmd *cmd; + wmi_buf_t buf; + + buf = wmi_buf_alloc(wmi_handle, sizeof(wmi_mu_start_cmd)); + if (!buf) { + qdf_print("%s: wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_FAILURE; + } + cmd = (wmi_mu_start_cmd *)wmi_buf_data(buf); + cmd->mu_request_id = param->id; + cmd->mu_duration = param->duration; + cmd->mu_type = param->type; + cmd->lteu_tx_power = param->lteu_tx_power; + cmd->rssi_thr_bssid = param->rssi_thr_bssid; + cmd->rssi_thr_sta = param->rssi_thr_sta; + cmd->rssi_thr_sc = param->rssi_thr_sc; + cmd->plmn_id = param->plmn_id; + cmd->alpha_num_bssid = param->alpha_num_bssid; + return wmi_unified_cmd_send(wmi_handle, buf, + sizeof(wmi_mu_start_cmd), + WMI_MU_CAL_START_CMDID); +} + +/** + * send_lteu_config_cmd_non_tlv() - send lteu config command to fw + * @wmi_handle: wmi handle + * @param: pointer to lteu config params + * Return: 0 for success or error code + */ +static QDF_STATUS +send_lteu_config_cmd_non_tlv(wmi_unified_t wmi_handle, + struct lteu_config_params *param) +{ + wmi_set_lteu_config *cmd; + wmi_buf_t buf; + int i; + + buf = wmi_buf_alloc(wmi_handle, sizeof(wmi_set_lteu_config)); + if (!buf) { + qdf_print("%s: wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_FAILURE; + } + cmd = (wmi_set_lteu_config *)wmi_buf_data(buf); + cmd->gpio_enable = param->lteu_gpio_start; + cmd->num_lteu_bins = param->lteu_num_bins; + for (i = 0; i < cmd->num_lteu_bins; i++) { + cmd->mu_rssi_threshold[i] = param->lteu_thresh[i]; + cmd->mu_weight[i] = param->lteu_weight[i]; + cmd->mu_gamma[i] = param->lteu_gamma[i]; + } + cmd->mu_scan_timeout = param->lteu_scan_timeout; + cmd->alpha_num_bssid = param->alpha_num_bssid; + cmd->use_actual_nf = param->use_actual_nf; + cmd->wifi_tx_power = param->wifi_tx_power; + cmd->allow_err_packets = param->allow_err_packets; + return wmi_unified_cmd_send(wmi_handle, buf, + sizeof(wmi_set_lteu_config), + WMI_SET_LTEU_CONFIG_CMDID); +} + +/** + * send_pdev_get_tpc_config_cmd_non_tlv() - send get tpc config command to fw + * @wmi_handle: wmi handle + * @param: pointer to get tpc config params + * + * Return: 0 for success or error code + */ +static QDF_STATUS +send_pdev_get_tpc_config_cmd_non_tlv(wmi_unified_t wmi_handle, + uint32_t param) +{ + wmi_pdev_get_tpc_config_cmd *cmd; + wmi_buf_t buf; + int32_t len = sizeof(wmi_pdev_get_tpc_config_cmd); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + qdf_print("%s:wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_FAILURE; + } + cmd = (wmi_pdev_get_tpc_config_cmd *)wmi_buf_data(buf); + cmd->param = param; + return wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_PDEV_GET_TPC_CONFIG_CMDID); +} + +/** + * send_set_bwf_cmd_non_tlv() - send set bwf command to fw + * @wmi_handle: wmi handle + * @param: pointer to set bwf param + * + * Return: 0 for success or error code + */ +static QDF_STATUS +send_set_bwf_cmd_non_tlv(wmi_unified_t wmi_handle, + struct set_bwf_params *param) +{ + struct wmi_bwf_peer_info *peer_info; + wmi_peer_bwf_request *cmd; + wmi_buf_t buf; + int len = sizeof(wmi_peer_bwf_request); + int i, retval = 0; + + len += param->num_peers * sizeof(struct wmi_bwf_peer_info); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + qdf_print("%s:wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_FAILURE; + } + cmd = (wmi_peer_bwf_request *)wmi_buf_data(buf); + qdf_mem_copy(&(cmd->num_peers), &(param->num_peers), sizeof(uint32_t)); + peer_info = (struct wmi_bwf_peer_info *)&(cmd->peer_info[0]); + for (i = 0; i < param->num_peers; i++) { + qdf_mem_copy(&(peer_info[i].peer_macaddr), + &(param->peer_info[i].peer_macaddr), + sizeof(wmi_mac_addr)); + peer_info[i].bwf_guaranteed_bandwidth = + param->peer_info[i].throughput; + peer_info[i].bwf_max_airtime = param->peer_info[i].max_airtime; + peer_info[i].bwf_peer_priority = param->peer_info[i].priority; + } + + retval = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_PEER_BWF_REQUEST_CMDID); + + if (retval) + wmi_buf_free(buf); + + return retval; +} + +/** + * send_set_atf_cmd_non_tlv() - send set atf command to fw + * @wmi_handle: wmi handle + * @param: pointer to set atf param + * + * Return: 0 for success or error code + */ +static QDF_STATUS +send_set_atf_cmd_non_tlv(wmi_unified_t wmi_handle, + struct set_atf_params *param) +{ + struct wmi_atf_peer_info *peer_info; + wmi_peer_atf_request *cmd; + wmi_buf_t buf; + int len = sizeof(wmi_peer_atf_request); + int i, retval = 0; + + len += param->num_peers * sizeof(struct wmi_atf_peer_info); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + qdf_print("%s:wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_FAILURE; + } + + cmd = (wmi_peer_atf_request *)wmi_buf_data(buf); + qdf_mem_copy(&(cmd->num_peers), &(param->num_peers), sizeof(uint32_t)); + peer_info = (struct wmi_atf_peer_info *)&(cmd->peer_info[0]); + for (i = 0; i < param->num_peers; i++) { + qdf_mem_copy(&(peer_info[i].peer_macaddr), + &(param->peer_info[i].peer_macaddr), + sizeof(wmi_mac_addr)); + peer_info[i].atf_units = param->peer_info[i].percentage_peer; + } +/* qdf_print("wmi_unified_pdev_set_atf peer_num=%d\n", cmd->num_peers); */ + retval = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_PEER_ATF_REQUEST_CMDID); + return retval; +} + +/** + * send_atf_peer_request_cmd_non_tlv() - send atf peer request command to fw + * @wmi_handle: wmi handle + * @param: pointer to atf peer request param + * + * Return: 0 for success or error code + */ +static QDF_STATUS +send_atf_peer_request_cmd_non_tlv(wmi_unified_t wmi_handle, + struct atf_peer_request_params *param) +{ + struct wmi_atf_peer_ext_info *peer_ext_info; + wmi_peer_atf_ext_request *cmd; + wmi_buf_t buf; + int len = sizeof(wmi_peer_atf_ext_request); + int i, retval = 0; + + len += param->num_peers * sizeof(struct wmi_atf_peer_ext_info); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + qdf_print("%s:wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_FAILURE; + } + + cmd = (wmi_peer_atf_ext_request *)wmi_buf_data(buf); + qdf_mem_copy(&(cmd->num_peers), &(param->num_peers), sizeof(uint32_t)); + peer_ext_info = + (struct wmi_atf_peer_ext_info *)&(cmd->peer_ext_info[0]); + for (i = 0; i < param->num_peers; i++) { + qdf_mem_copy(&(peer_ext_info[i].peer_macaddr), + &(param->peer_ext_info[i].peer_macaddr), + sizeof(wmi_mac_addr)); + peer_ext_info[i].atf_groupid = + param->peer_ext_info[i].group_index; + peer_ext_info[i].atf_units_reserved = + param->peer_ext_info[i].atf_index_reserved; + } + retval = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_PEER_ATF_EXT_REQUEST_CMDID); + + return retval; +} + +/** + * send_set_atf_grouping_cmd_non_tlv() - send set atf grouping command to fw + * @wmi_handle: wmi handle + * @param: pointer to set atf grouping param + * + * Return: 0 for success or error code + */ +static QDF_STATUS +send_set_atf_grouping_cmd_non_tlv(wmi_unified_t wmi_handle, + struct atf_grouping_params *param) +{ + struct wmi_atf_group_info *group_info; + wmi_atf_ssid_grp_request *cmd; + wmi_buf_t buf; + int len = sizeof(wmi_atf_ssid_grp_request); + int i, retval = 0; + + len += param->num_groups * sizeof(struct wmi_atf_group_info); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + qdf_print("%s:wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_FAILURE; + } + + cmd = (wmi_atf_ssid_grp_request *)wmi_buf_data(buf); + qdf_mem_copy(&(cmd->num_groups), &(param->num_groups), + sizeof(uint32_t)); + group_info = (struct wmi_atf_group_info *)&(cmd->group_info[0]); + for (i = 0; i < param->num_groups; i++) { + group_info[i].atf_group_units = + param->group_info[i].percentage_group; + group_info[i].atf_group_units_reserved = + param->group_info[i].atf_group_units_reserved; + } + retval = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_ATF_SSID_GROUPING_REQUEST_CMDID); + + return retval; +} + +/** + * send_wlan_profile_enable_cmd_non_tlv() - send wlan profile enable command + * to fw + * @wmi_handle: wmi handle + * @param: pointer to wlan profile param + * + * Return: 0 for success or error code + */ +static QDF_STATUS +send_wlan_profile_enable_cmd_non_tlv(wmi_unified_t wmi_handle, + struct wlan_profile_params *param) +{ + wmi_buf_t buf; + uint16_t len; + wmi_wlan_profile_enable_profile_id_cmd *cmd; + + len = sizeof(wmi_wlan_profile_enable_profile_id_cmd); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + qdf_print("%s:wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_FAILURE; + } + cmd = (wmi_wlan_profile_enable_profile_id_cmd *)wmi_buf_data(buf); + cmd->profile_id = param->profile_id; + cmd->enable = param->enable; + return wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID); +} + +/** + * send_wlan_profile_trigger_cmd_non_tlv() - send wlan profile trigger command + * to fw + * @wmi_handle: wmi handle + * @param: pointer to wlan profile param + * + * Return: 0 for success or error code + */ +static QDF_STATUS +send_wlan_profile_trigger_cmd_non_tlv(wmi_unified_t wmi_handle, + struct wlan_profile_params *param) +{ + wmi_buf_t buf; + uint16_t len; + wmi_wlan_profile_trigger_cmd *cmd; + + len = sizeof(wmi_wlan_profile_trigger_cmd); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + qdf_print("%s:wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_FAILURE; + } + cmd = (wmi_wlan_profile_trigger_cmd *)wmi_buf_data(buf); + cmd->enable = param->enable; + return wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_WLAN_PROFILE_TRIGGER_CMDID); +} + +#ifdef BIG_ENDIAN_HOST +void wmi_host_swap_bytes(void *pv, size_t n) +{ + int noWords; + int i; + uint32_t *wordPtr; + + noWords = n/sizeof(u_int32_t); + wordPtr = (u_int32_t *)pv; + for (i = 0; i < noWords; i++) + *(wordPtr + i) = __cpu_to_le32(*(wordPtr + i)); +} +#define WMI_HOST_SWAPME(x, len) wmi_host_swap_bytes(&x, len); +#endif + +/** + * send_set_ht_ie_cmd_non_tlv() - send ht ie command to fw + * @wmi_handle: wmi handle + * @param: pointer to ht ie param + * + * Return: 0 for success or error code + */ +static QDF_STATUS +send_set_ht_ie_cmd_non_tlv(wmi_unified_t wmi_handle, + struct ht_ie_params *param) +{ + wmi_pdev_set_ht_ie_cmd *cmd; + wmi_buf_t buf; + /* adjust length to be next multiple of four */ + int len = (param->ie_len + (sizeof(uint32_t) - 1)) & + (~(sizeof(uint32_t) - 1)); + + /* to account for extra four bytes of ie data in the struct */ + len += (sizeof(wmi_pdev_set_ht_ie_cmd) - 4); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + qdf_print("%s:wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_FAILURE; + } + cmd = (wmi_pdev_set_ht_ie_cmd *)wmi_buf_data(buf); + cmd->ie_len = param->ie_len; + qdf_mem_copy(cmd->ie_data, param->ie_data, param->ie_len); +#ifdef BIG_ENDIAN_HOST + WMI_HOST_SWAPME(cmd->ie_data, len-(offsetof(wmi_pdev_set_ht_ie_cmd, + ie_data))); +#endif + return wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_PDEV_SET_HT_CAP_IE_CMDID); +} + +/** + * send_set_vht_ie_cmd_non_tlv() - send vht ie command to fw + * @wmi_handle: wmi handle + * @param: pointer to vht ie param + * + * Return: 0 for success or error code + */ +static QDF_STATUS +send_set_vht_ie_cmd_non_tlv(wmi_unified_t wmi_handle, + struct vht_ie_params *param) +{ + wmi_pdev_set_vht_ie_cmd *cmd; + wmi_buf_t buf; + /* adjust length to be next multiple of four */ + int len = (param->ie_len + (sizeof(u_int32_t) - 1)) & + (~(sizeof(u_int32_t) - 1)); + + /* to account for extra four bytes of ie data in the struct */ + len += (sizeof(wmi_pdev_set_vht_ie_cmd) - 4); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + qdf_print("%s:wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_FAILURE; + } + cmd = (wmi_pdev_set_vht_ie_cmd *)wmi_buf_data(buf); + cmd->ie_len = param->ie_len; + qdf_mem_copy(cmd->ie_data, param->ie_data, param->ie_len); +#ifdef BIG_ENDIAN_HOST + WMI_HOST_SWAPME(cmd->ie_data, len-(offsetof(wmi_pdev_set_vht_ie_cmd, + ie_data))); +#endif + return wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_PDEV_SET_VHT_CAP_IE_CMDID); +} + +/** + * send_wmm_update_cmd_non_tlv() - send wmm update command to fw + * @wmi_handle: wmi handle + * @param: pointer to wmm update param + * + * Return: 0 for success or error code + */ +static QDF_STATUS +send_wmm_update_cmd_non_tlv(wmi_unified_t wmi_handle, + struct wmm_update_params *param) +{ + wmi_buf_t buf; + wmi_pdev_set_wmm_params_cmd *cmd; + wmi_wmm_params *wmi_param = 0; + int ac; + int len = sizeof(wmi_pdev_set_wmm_params_cmd); + struct wmi_host_wmeParams *wmep; + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + qdf_print("%s:wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_SUCCESS; + } + cmd = (wmi_pdev_set_wmm_params_cmd *)wmi_buf_data(buf); + + for (ac = 0; ac < WME_NUM_AC; ac++) { + wmep = ¶m->wmep_array[ac]; + switch (ac) { + case WMI_HOST_AC_BE: + wmi_param = &cmd->wmm_params_ac_be; + break; + case WMI_HOST_AC_BK: + wmi_param = &cmd->wmm_params_ac_bk; + break; + case WMI_HOST_AC_VI: + wmi_param = &cmd->wmm_params_ac_vi; + break; + case WMI_HOST_AC_VO: + wmi_param = &cmd->wmm_params_ac_vo; + break; + default: + break; + } + + wmi_param->aifs = wmep->wmep_aifsn; + wmi_param->cwmin = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmin); + wmi_param->cwmax = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmax); + wmi_param->txoplimit = ATH_TXOP_TO_US(wmep->wmep_txopLimit); + wmi_param->acm = wmep->wmep_acm; + wmi_param->no_ack = wmep->wmep_noackPolicy; + + } + + wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_PDEV_SET_WMM_PARAMS_CMDID); + return QDF_STATUS_SUCCESS; +} + +/** + * send_set_ant_switch_tbl_cmd_non_tlv() - send ant switch tbl cmd to fw + * @wmi_handle: wmi handle + * @param: pointer to hold ant switch tbl param + * + * Return: 0 for success or error code + */ +static QDF_STATUS +send_set_ant_switch_tbl_cmd_non_tlv(wmi_unified_t wmi_handle, + struct ant_switch_tbl_params *param) +{ + uint8_t len; + wmi_buf_t buf; + wmi_pdev_set_ant_switch_tbl_cmd *cmd; + + len = sizeof(wmi_pdev_set_ant_switch_tbl_cmd); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + qdf_print("%s:wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_NOMEM; + } + cmd = (wmi_pdev_set_ant_switch_tbl_cmd *)wmi_buf_data(buf); + cmd->antCtrlCommon1 = param->ant_ctrl_common1; + cmd->antCtrlCommon2 = param->ant_ctrl_common2; + + if (wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_PDEV_SET_ANTENNA_SWITCH_TABLE_CMDID)) { + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * send_set_ratepwr_table_cmd_non_tlv() - send rate power table cmd to fw + * @wmi_handle: wmi handle + * @param: pointer to hold rate power table param + * + * Return: 0 for success or error code + */ +static QDF_STATUS +send_set_ratepwr_table_cmd_non_tlv(wmi_unified_t wmi_handle, + struct ratepwr_table_params *param) +{ + uint16_t len; + wmi_buf_t buf; + wmi_pdev_ratepwr_table_cmd *cmd; + + if (!param->ratepwr_tbl) + return QDF_STATUS_E_FAILURE; + + len = sizeof(wmi_pdev_ratepwr_table_cmd); + len += roundup(param->ratepwr_len, sizeof(uint32_t)) - sizeof(uint32_t); + /* already 4 bytes in cmd structure */ + qdf_print("wmi buf len = %d\n", len); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + qdf_print("%s:wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_NOMEM; + } + cmd = (wmi_pdev_ratepwr_table_cmd *)wmi_buf_data(buf); + + cmd->op = RATEPWR_TABLE_OPS_SET; + cmd->ratepwr_len = param->ratepwr_len; + WMI_HOST_IF_MSG_COPY_CHAR_ARRAY(&cmd->ratepwr_tbl[0], + param->ratepwr_tbl, param->ratepwr_len); + + if (wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_PDEV_RATEPWR_TABLE_CMDID)) { + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * send_get_ratepwr_table_cmd_non_tlv() - send rate power table cmd to fw + * @wmi_handle: wmi handle + * + * Return: 0 for success or error code + */ +static QDF_STATUS +send_get_ratepwr_table_cmd_non_tlv(wmi_unified_t wmi_handle) +{ + uint16_t len; + wmi_buf_t buf; + wmi_pdev_ratepwr_table_cmd *cmd; + + len = sizeof(wmi_pdev_ratepwr_table_cmd); + qdf_print("wmi buf len = %d\n", len); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + qdf_print("%s:wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_NOMEM; + } + cmd = (wmi_pdev_ratepwr_table_cmd *)wmi_buf_data(buf); + + cmd->op = RATEPWR_TABLE_OPS_GET; + cmd->ratepwr_len = 0; + cmd->ratepwr_tbl[0] = 0; + + if (wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_PDEV_RATEPWR_TABLE_CMDID)) { + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * send_set_ctl_table_cmd_non_tlv() - send ctl table cmd to fw + * @wmi_handle: wmi handle + * @param: pointer to hold ctl table param + * + * Return: 0 for success or error code + */ +static QDF_STATUS +send_set_ctl_table_cmd_non_tlv(wmi_unified_t wmi_handle, + struct ctl_table_params *param) +{ + uint16_t len; + wmi_buf_t buf; + wmi_pdev_set_ctl_table_cmd *cmd; + + if (!param->ctl_array) + return QDF_STATUS_E_FAILURE; + + /* CTL array length check for Beeliner family */ + if (param->target_type == TARGET_TYPE_AR900B || + param->target_type == TARGET_TYPE_QCA9984 || + param->target_type == TARGET_TYPE_IPQ4019 || + param->target_type == TARGET_TYPE_QCA9888) { + if (param->is_2g) { + /* For 2G, CTL array length should be 688*/ + if (param->ctl_cmd_len != + (4 + (WMI_HOST_NUM_CTLS_2G_11B * 2) + + (WMI_HOST_NUM_BAND_EDGES_2G_11B * 3) + + 1 + (WMI_HOST_NUM_CTLS_2G_11B * + WMI_HOST_NUM_BAND_EDGES_2G_11B) + + (WMI_HOST_NUM_CTLS_2G_20MHZ * 2) + + (WMI_HOST_NUM_BAND_EDGES_2G_20MHZ * 3) + + 1 + (WMI_HOST_NUM_CTLS_2G_20MHZ * + WMI_HOST_NUM_BAND_EDGES_2G_20MHZ) + + (WMI_HOST_NUM_CTLS_2G_40MHZ * 2) + + (WMI_HOST_NUM_BAND_EDGES_2G_40MHZ * 3) + + (WMI_HOST_NUM_CTLS_2G_40MHZ * + WMI_HOST_NUM_BAND_EDGES_2G_40MHZ) + 4)) { + qdf_print("CTL array len not correct\n"); + return QDF_STATUS_E_FAILURE; + } + } else { + /* For 5G, CTL array length should be 1540 */ + if (param->ctl_cmd_len != + (4 + (WMI_HOST_NUM_CTLS_5G_11A * 2) + + (WMI_HOST_NUM_BAND_EDGES_5G_11A * 3) + + 1 + (WMI_HOST_NUM_CTLS_5G_11A * + WMI_HOST_NUM_BAND_EDGES_5G_11A) + 1 + + (WMI_HOST_NUM_CTLS_5G_HT20 * 2) + + (WMI_HOST_NUM_BAND_EDGES_5G_HT20 * 3) + + 1 + (WMI_HOST_NUM_CTLS_5G_HT20 * + WMI_HOST_NUM_BAND_EDGES_5G_HT20) + + (WMI_HOST_NUM_CTLS_5G_HT40 * 2) + + (WMI_HOST_NUM_BAND_EDGES_5G_HT40 * 3) + + (WMI_HOST_NUM_CTLS_5G_HT40 * + WMI_HOST_NUM_BAND_EDGES_5G_HT40) + + (WMI_HOST_NUM_CTLS_5G_HT80 * 2) + + (WMI_HOST_NUM_BAND_EDGES_5G_HT80 * 3) + + (WMI_HOST_NUM_CTLS_5G_HT80 * + WMI_HOST_NUM_BAND_EDGES_5G_HT80) + + (WMI_HOST_NUM_CTLS_5G_HT160 * 2) + + (WMI_HOST_NUM_BAND_EDGES_5G_HT160 * 3) + + (WMI_HOST_NUM_CTLS_5G_HT160 * + WMI_HOST_NUM_BAND_EDGES_5G_HT160))) { + qdf_print("CTL array len not correct\n"); + return QDF_STATUS_E_FAILURE; + } + } + } else { + if (param->ctl_cmd_len != + WMI_HOST_NUM_CTLS_2G * WMI_HOST_NUM_BAND_EDGES_2G * 2 + + WMI_HOST_NUM_CTLS_5G * WMI_HOST_NUM_BAND_EDGES_5G * 2) { + qdf_print("CTL array len not correct\n"); + return QDF_STATUS_E_FAILURE; + } + } + + len = sizeof(wmi_pdev_set_ctl_table_cmd); + len += roundup(param->ctl_cmd_len, sizeof(uint32_t)) - sizeof(uint32_t); + qdf_print("wmi buf len = %d\n", len); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + qdf_print("%s:wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_FAILURE; + } + cmd = (wmi_pdev_set_ctl_table_cmd *)wmi_buf_data(buf); + + cmd->ctl_len = param->ctl_cmd_len; + WMI_HOST_IF_MSG_COPY_CHAR_ARRAY(&cmd->ctl_info[0], ¶m->ctl_band, + sizeof(param->ctl_band)); + WMI_HOST_IF_MSG_COPY_CHAR_ARRAY(&cmd->ctl_info[1], param->ctl_array, + param->ctl_cmd_len - sizeof(param->ctl_band)); + + if (wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_PDEV_SET_CTL_TABLE_CMDID)) { + qdf_print("%s:Failed to send command\n", __func__); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * send_set_mimogain_table_cmd_non_tlv() - send mimogain table cmd to fw + * @wmi_handle: wmi handle + * @param: pointer to hold mimogain table param + * + * Return: 0 for success or error code + */ +static QDF_STATUS +send_set_mimogain_table_cmd_non_tlv(wmi_unified_t wmi_handle, + struct mimogain_table_params *param) +{ + uint16_t len; + wmi_buf_t buf; + wmi_pdev_set_mimogain_table_cmd *cmd; + + if (!param->array_gain) + return QDF_STATUS_E_FAILURE; + + /* len must be multiple of a single array gain table */ + if (param->tbl_len % + ((WMI_HOST_TX_NUM_CHAIN-1) * WMI_HOST_TPC_REGINDEX_MAX * + WMI_HOST_ARRAY_GAIN_NUM_STREAMS) != 0) { + qdf_print("Array gain table len not correct\n"); + return QDF_STATUS_E_FAILURE; + } + + len = sizeof(wmi_pdev_set_mimogain_table_cmd); + len += roundup(param->tbl_len, sizeof(uint32_t)) - sizeof(uint32_t); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + qdf_print("%s:wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_FAILURE; + } + cmd = (wmi_pdev_set_mimogain_table_cmd *)wmi_buf_data(buf); + + WMI_MIMOGAIN_ARRAY_GAIN_LEN_SET(cmd->mimogain_info, param->tbl_len); + WMI_MIMOGAIN_MULTI_CHAIN_BYPASS_SET(cmd->mimogain_info, + param->multichain_gain_bypass); + WMI_HOST_IF_MSG_COPY_CHAR_ARRAY(&cmd->arraygain_tbl[0], + param->array_gain, + param->tbl_len); + + if (wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_PDEV_SET_MIMOGAIN_TABLE_CMDID)) { + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * send_set_ratepwr_chainmsk_cmd_non_tlv() - send ratepwr chainmask cmd to fw + * @wmi_handle: wmi handle + * @param: pointer to hold ratepwr chainmask param + * + * Return: 0 for success or error code + */ +static QDF_STATUS +send_set_ratepwr_chainmsk_cmd_non_tlv(wmi_unified_t wmi_handle, + struct ratepwr_chainmsk_params *param) +{ +#define RC_CCK_OFDM_RATES 0 +#define RC_HT_RATES 1 +#define RC_VHT_RATES 2 + uint16_t len; + wmi_buf_t buf; + wmi_pdev_ratepwr_chainmsk_tbl_cmd *cmd; + + if (!param->ratepwr_chain_tbl) + return QDF_STATUS_E_FAILURE; + + len = sizeof(wmi_pdev_ratepwr_chainmsk_tbl_cmd); + len += roundup(param->num_rate*sizeof(uint32_t), sizeof(uint32_t)); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + qdf_print("%s:wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_NOMEM; + } + cmd = (wmi_pdev_ratepwr_chainmsk_tbl_cmd *)wmi_buf_data(buf); + cmd->op = param->ops; + cmd->pream_type = param->pream_type; + cmd->rate_len = param->num_rate; + + if (param->ops == RATEPWR_CHAINMSK_TABLE_OPS_EN) { + qdf_mem_copy(&cmd->ratepwr_chaintbl[0], + param->ratepwr_chain_tbl, + param->num_rate*sizeof(u_int32_t)); + } + + wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_PDEV_RATEPWR_CHAINMSK_TABLE_CMDID); + return QDF_STATUS_SUCCESS; +} + +/** + * send_set_macaddr_cmd_non_tlv() - send set macaddr cmd to fw + * @wmi_handle: wmi handle + * @param: pointer to hold macaddr param + * + * Return: 0 for success or error code + */ +static QDF_STATUS +send_set_macaddr_cmd_non_tlv(wmi_unified_t wmi_handle, + struct macaddr_params *param) +{ + uint8_t len; + wmi_buf_t buf; + wmi_pdev_set_base_macaddr_cmd *cmd; + + len = sizeof(wmi_pdev_set_base_macaddr_cmd); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + qdf_print("%s:wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_FAILURE; + } + cmd = (wmi_pdev_set_base_macaddr_cmd *)wmi_buf_data(buf); + WMI_CHAR_ARRAY_TO_MAC_ADDR(param->macaddr, &cmd->base_macaddr); + + if (wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_PDEV_SET_BASE_MACADDR_CMDID)) { + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * send_pdev_scan_start_cmd_non_tlv() - send pdev scan start cmd to fw + * @wmi_handle: wmi handle + * + * Return: 0 for success or error code + */ +static QDF_STATUS +send_pdev_scan_start_cmd_non_tlv(wmi_unified_t wmi_handle) +{ + /* + * this command was added to support host scan egine which is + * deprecated. now the scan engine is in FW and host directly + * isssues a scan request to perform scan and provide results back + * to host + */ + wmi_buf_t buf; + wmi_pdev_scan_cmd *cmd; + int len = sizeof(wmi_pdev_scan_cmd); + + buf = wmi_buf_alloc(wmi_handle, len); + qdf_print("%s:\n", __func__); + if (!buf) { + qdf_print("%s:wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_NOMEM; + } + cmd = (wmi_pdev_scan_cmd *)wmi_buf_data(buf); + cmd->scan_start = TRUE; +#if DEPRECATE_WMI + wmi_unified_cmd_send(wmi_handle, buf, len, WMI_PDEV_SCAN_CMDID); +#endif + return QDF_STATUS_SUCCESS; +} + +/** + * send_pdev_scan_end_cmd_non_tlv() - send pdev scan end cmd to fw + * @wmi_handle: wmi handle + * + * Return: 0 for success or error code + */ +static QDF_STATUS +send_pdev_scan_end_cmd_non_tlv(wmi_unified_t wmi_handle) +{ + /* + * this command was added to support host scan egine which is + * deprecated. now the scan engine is in FW and host directly isssues + * a scan request to perform scan and provide results back to host + */ + wmi_pdev_scan_cmd *cmd; + wmi_buf_t buf; + int len = sizeof(wmi_pdev_scan_cmd); + + buf = wmi_buf_alloc(wmi_handle, len); + qdf_print("%s:\n", __func__); + if (!buf) { + qdf_print("%s:wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_NOMEM; + } + cmd = (wmi_pdev_scan_cmd *)wmi_buf_data(buf); + cmd->scan_start = FALSE; +#if DEPRECATE_WMI + wmi_unified_cmd_send(wmi_handle, buf, len, WMI_PDEV_SCAN_CMDID); +#endif + return QDF_STATUS_SUCCESS; +} + +/** + * send_set_acparams_cmd_non_tlv() - send acparams cmd to fw + * @wmi_handle: wmi handle + * @param: pointer to hold acparams + * + * Return: 0 for success or error code + */ +static QDF_STATUS +send_set_acparams_cmd_non_tlv(wmi_unified_t wmi_handle, + struct acparams_params *param) +{ + wmi_pdev_set_param_cmd *cmd; + wmi_buf_t buf; + uint32_t param_value = 0; + int len = sizeof(wmi_pdev_set_param_cmd); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + qdf_print("%s:wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_NOMEM; + } + + cmd = (wmi_pdev_set_param_cmd *)wmi_buf_data(buf); + cmd->param_id = WMI_PDEV_PARAM_AC_AGGRSIZE_SCALING; + param_value = param->ac; + param_value |= (param->aggrsize_scaling << 8); + cmd->param_value = param_value; + + wmi_unified_cmd_send(wmi_handle, buf, len, WMI_PDEV_SET_PARAM_CMDID); + return QDF_STATUS_SUCCESS; +} + +/** + * send_set_vap_dscp_tid_map_cmd_non_tlv() - send vap dscp tid map cmd to fw + * @wmi_handle: wmi handle + * @param: pointer to hold vap dscp tid map param + * + * Return: 0 for success or error code + */ +static QDF_STATUS +send_set_vap_dscp_tid_map_cmd_non_tlv(wmi_unified_t wmi_handle, + struct vap_dscp_tid_map_params *param) +{ + wmi_buf_t buf; + wmi_vdev_set_dscp_tid_map_cmd *cmd_vdev; + int len_vdev = sizeof(wmi_vdev_set_dscp_tid_map_cmd); + + buf = wmi_buf_alloc(wmi_handle, len_vdev); + if (!buf) { + qdf_print("%s:wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_FAILURE; + } + + cmd_vdev = (wmi_vdev_set_dscp_tid_map_cmd *)wmi_buf_data(buf); + qdf_mem_copy(cmd_vdev->dscp_to_tid_map, param->dscp_to_tid_map, + sizeof(uint32_t) * WMI_DSCP_MAP_MAX); + + cmd_vdev->vdev_id = param->vdev_id; + + qdf_print("Setting dscp for vap id: %d\n", cmd_vdev->vdev_id); + return wmi_unified_cmd_send(wmi_handle, buf, len_vdev, + WMI_VDEV_SET_DSCP_TID_MAP_CMDID); +} + +/** + * send_proxy_ast_reserve_cmd_non_tlv() - send proxy ast reserve cmd to fw + * @wmi_handle: wmi handle + * @param: pointer to hold proxy ast reserve param + * + * Return: 0 for success or error code + */ +static QDF_STATUS +send_proxy_ast_reserve_cmd_non_tlv(wmi_unified_t wmi_handle, + struct proxy_ast_reserve_params *param) +{ + wmi_pdev_reserve_ast_entry_cmd *cmd; + wmi_buf_t buf; + int len = sizeof(wmi_pdev_reserve_ast_entry_cmd); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + qdf_print("%s:wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_FAILURE; + } + cmd = (wmi_pdev_reserve_ast_entry_cmd *)wmi_buf_data(buf); + WMI_CHAR_ARRAY_TO_MAC_ADDR(param->macaddr, &cmd->mac_addr); + cmd->key_id = 0; + cmd->mcast = 0; + + qdf_print("%s macaddr=%s key_id=%d mcast=%d\n", __func__, + ether_sprintf(param->macaddr), cmd->key_id, cmd->mcast); + + return wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_PDEV_RESERVE_AST_ENTRY_CMDID); +} + +/** + * send_pdev_fips_cmd_non_tlv() - send pdev fips cmd to fw + * @wmi_handle: wmi handle + * @param: pointer to hold pdev fips param + * + * Return: 0 for success or error code + */ +static QDF_STATUS +send_pdev_fips_cmd_non_tlv(wmi_unified_t wmi_handle, + struct fips_params *param) +{ + wmi_pdev_fips_cmd *cmd; + wmi_buf_t buf; + int len = sizeof(wmi_pdev_fips_cmd) + param->data_len; + int retval = 0; + + /* Data length must be multiples of 16 bytes - checked against 0xF - + * and must be less than WMI_SVC_MSG_SIZE - static size of + * wmi_pdev_fips_cmd structure + */ + /* do sanity on the input */ + if (!(((param->data_len & 0xF) == 0) && + ((param->data_len > 0) && + (param->data_len < (WMI_HOST_MAX_BUFFER_SIZE - + sizeof(wmi_pdev_fips_cmd)))))) { + return QDF_STATUS_E_INVAL; + } + + buf = wmi_buf_alloc(wmi_handle, len); + + if (!buf) { + qdf_print("%s:wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_FAILURE; + } + + cmd = (wmi_pdev_fips_cmd *)wmi_buf_data(buf); + if (param->key != NULL && param->data != NULL) { + cmd->key_len = param->key_len; + cmd->data_len = param->data_len; + cmd->fips_cmd = !!(param->op); + +#ifdef BIG_ENDIAN_HOST + { + /****************BE to LE conversion*****************/ + /* Assigning unaligned space to copy the key */ + unsigned char *key_unaligned = qdf_mem_malloc( + sizeof(u_int8_t)*param->key_len + FIPS_ALIGN); + + u_int8_t *key_aligned = NULL; + + unsigned char *data_unaligned = qdf_mem_malloc( + sizeof(u_int8_t)*param->data_len + FIPS_ALIGN); + u_int8_t *data_aligned = NULL; + + int c; + + /* Checking if kmalloc is successful to allocate space */ + if (key_unaligned == NULL) + return QDF_STATUS_SUCCESS; + /* Checking if space is aligned */ + if (!FIPS_IS_ALIGNED(key_unaligned, FIPS_ALIGN)) { + /* align to 4 */ + key_aligned = + (u_int8_t *)FIPS_ALIGNTO(key_unaligned, + FIPS_ALIGN); + } else { + key_aligned = (u_int8_t *)key_unaligned; + } + + /* memset and copy content from key to key aligned */ + OS_MEMSET(key_aligned, 0, param->key_len); + OS_MEMCPY(key_aligned, param->key, param->key_len); + + /* print a hexdump for host debug */ + print_hex_dump(KERN_DEBUG, + "\t Aligned and Copied Key:@@@@ ", + DUMP_PREFIX_NONE, + 16, 1, key_aligned, param->key_len, true); + + /* Checking if kmalloc is successful to allocate space */ + if (data_unaligned == NULL) + return QDF_STATUS_SUCCESS; + /* Checking of space is aligned */ + if (!FIPS_IS_ALIGNED(data_unaligned, FIPS_ALIGN)) { + /* align to 4 */ + data_aligned = + (u_int8_t *)FIPS_ALIGNTO(data_unaligned, + FIPS_ALIGN); + } else { + data_aligned = (u_int8_t *)data_unaligned; + } + + /* memset and copy content from data to data aligned */ + OS_MEMSET(data_aligned, 0, param->data_len); + OS_MEMCPY(data_aligned, param->data, param->data_len); + + /* print a hexdump for host debug */ + print_hex_dump(KERN_DEBUG, + "\t Properly Aligned and Copied Data:@@@@ ", + DUMP_PREFIX_NONE, + 16, 1, data_aligned, param->data_len, true); + + /* converting to little Endian both key_aligned and + * data_aligned*/ + for (c = 0; c < param->key_len/4; c++) { + *((u_int32_t *)key_aligned+c) = + qdf_cpu_to_le32(*((u_int32_t *)key_aligned+c)); + } + for (c = 0; c < param->data_len/4; c++) { + *((u_int32_t *)data_aligned+c) = + qdf_cpu_to_le32(*((u_int32_t *)data_aligned+c)); + } + + /* update endian data to key and data vectors */ + OS_MEMCPY(param->key, key_aligned, param->key_len); + OS_MEMCPY(param->data, data_aligned, param->data_len); + + /* clean up allocated spaces */ + qdf_mem_free(key_unaligned); + key_unaligned = NULL; + key_aligned = NULL; + + qdf_mem_free(data_unaligned); + data_unaligned = NULL; + data_aligned = NULL; + + /*****************************************************/ + } +#endif + qdf_mem_copy(cmd->key, param->key, param->key_len); + qdf_mem_copy(cmd->data, param->data, param->data_len); + + if (param->mode == FIPS_ENGINE_AES_CTR || + param->mode == FIPS_ENGINE_AES_MIC) { + cmd->mode = param->mode; + } else { + cmd->mode = FIPS_ENGINE_AES_CTR; + } + qdf_print(KERN_ERR "Key len = %d, Data len = %d\n", + cmd->key_len, cmd->data_len); + + print_hex_dump(KERN_DEBUG, "Key: ", DUMP_PREFIX_NONE, 16, 1, + cmd->key, cmd->key_len, true); + print_hex_dump(KERN_DEBUG, "Plain text: ", DUMP_PREFIX_NONE, + 16, 1, cmd->data, cmd->data_len, true); + + retval = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_PDEV_FIPS_CMDID); + qdf_print("%s return value %d\n", __func__, retval); + } else { + qdf_print("\n%s:%d Key or Data is NULL\n", __func__, __LINE__); + retval = -EFAULT; + } + + return retval; +} + +/** + * send_pdev_set_chan_cmd_non_tlv() - send pdev set chan cmd to fw + * @wmi_handle: wmi handle + * @param: pointer to hold set chan param + * + * Return: 0 for success or error code + */ +static QDF_STATUS +send_pdev_set_chan_cmd_non_tlv(wmi_unified_t wmi_handle, + struct channel_param *param) +{ + wmi_set_channel_cmd *cmd; + wmi_buf_t buf; + int len = sizeof(wmi_set_channel_cmd); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + qdf_print("%s:wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_FAILURE; + } + + cmd = (wmi_set_channel_cmd *)wmi_buf_data(buf); + + cmd->chan.mhz = param->mhz; + + WMI_SET_CHANNEL_MODE(&cmd->chan, param->phy_mode); + + cmd->chan.band_center_freq1 = param->cfreq1; + cmd->chan.band_center_freq2 = param->cfreq2; + + WMI_SET_CHANNEL_MIN_POWER(&cmd->chan, param->minpower); + WMI_SET_CHANNEL_MAX_POWER(&cmd->chan, param->maxpower); + WMI_SET_CHANNEL_REG_POWER(&cmd->chan, param->maxregpower); + WMI_SET_CHANNEL_ANTENNA_MAX(&cmd->chan, param->antennamax); + WMI_SET_CHANNEL_REG_CLASSID(&cmd->chan, param->reg_class_id); + + if (param->dfs_set) + WMI_SET_CHANNEL_FLAG(&cmd->chan, WMI_CHAN_FLAG_DFS); + + if (param->dfs_set_cfreq2) + WMI_SET_CHANNEL_FLAG(&cmd->chan, WMI_CHAN_FLAG_DFS_CFREQ2); + + if (param->half_rate) + WMI_SET_CHANNEL_FLAG(&cmd->chan, WMI_CHAN_FLAG_HALF); + + if (param->quarter_rate) + WMI_SET_CHANNEL_FLAG(&cmd->chan, WMI_CHAN_FLAG_QUARTER); + + if ((param->phy_mode == MODE_11AC_VHT80_80) || + (param->phy_mode == MODE_11AC_VHT160)) { + qdf_print( + "WMI channel freq=%d, mode=%x band_center_freq1=%d band_center_freq2=%d\n", + cmd->chan.mhz, + WMI_GET_CHANNEL_MODE(&cmd->chan), cmd->chan.band_center_freq1, + cmd->chan.band_center_freq2); + } else { + qdf_print("WMI channel freq=%d, mode=%x band_center_freq1=%d\n" + , cmd->chan.mhz, + WMI_GET_CHANNEL_MODE(&cmd->chan), + cmd->chan.band_center_freq1); + } + + return wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_PDEV_SET_CHANNEL_CMDID); +} + +/** + * send_mcast_group_update_cmd_non_tlv() - send mcast group update cmd to fw + * @wmi_handle: wmi handle + * @param: pointer to hold mcast update param + * + * Return: 0 for success or error code + */ +static QDF_STATUS +send_mcast_group_update_cmd_non_tlv(wmi_unified_t wmi_handle, + struct mcast_group_update_params *param) +{ + wmi_peer_mcast_group_cmd *cmd; + wmi_buf_t buf; + int len; + int offset = 0; + static char dummymask[4] = { 0xFF, 0xFF, 0xFF, 0xFF}; + + len = sizeof(wmi_peer_mcast_group_cmd); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + qdf_print("%s: wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_NOMEM; + } + cmd = (wmi_peer_mcast_group_cmd *) wmi_buf_data(buf); + /* confirm the buffer is 4-byte aligned */ + ASSERT((((size_t) cmd) & 0x3) == 0); + OS_MEMZERO(cmd, sizeof(wmi_peer_mcast_group_cmd)); + + cmd->vdev_id = param->vap_id; + /* construct the message assuming our endianness matches the target */ + cmd->flags |= WMI_PEER_MCAST_GROUP_FLAG_ACTION_M & + (param->action << WMI_PEER_MCAST_GROUP_FLAG_ACTION_S); + cmd->flags |= WMI_PEER_MCAST_GROUP_FLAG_WILDCARD_M & + (param->wildcard << WMI_PEER_MCAST_GROUP_FLAG_WILDCARD_S); + if (param->is_action_delete) + cmd->flags |= WMI_PEER_MCAST_GROUP_FLAG_DELETEALL_M; + + if (param->is_mcast_addr_len) + cmd->flags |= WMI_PEER_MCAST_GROUP_FLAG_IPV6_M; + + if (param->is_filter_mode_snoop) + cmd->flags |= WMI_PEER_MCAST_GROUP_FLAG_SRC_FILTER_EXCLUDE_M; + + /* unicast address spec only applies for non-wildcard cases */ + if (!param->wildcard && param->ucast_mac_addr) { + qdf_mem_copy( + &cmd->ucast_mac_addr, + param->ucast_mac_addr, + sizeof(cmd->ucast_mac_addr)); + } + if (param->mcast_ip_addr) { + ASSERT(param->mcast_ip_addr_bytes <= + sizeof(cmd->mcast_ip_addr)); + offset = sizeof(cmd->mcast_ip_addr) - + param->mcast_ip_addr_bytes; + qdf_mem_copy(((u_int8_t *) &cmd->mcast_ip_addr) + offset, + param->mcast_ip_addr, + param->mcast_ip_addr_bytes); + } + if (!param->mask) + param->mask = &dummymask[0]; + + qdf_mem_copy(((u_int8_t *) &cmd->mcast_ip_mask) + offset, param->mask, + param->mcast_ip_addr_bytes); + + if (param->srcs && param->nsrcs) { + cmd->num_filter_addr = param->nsrcs; + ASSERT((param->nsrcs * param->mcast_ip_addr_bytes) <= + sizeof(cmd->srcs)); + + qdf_mem_copy(((u_int8_t *) &cmd->filter_addr), param->srcs, + param->nsrcs * param->mcast_ip_addr_bytes); + } + /* now correct for endianness, if necessary */ + /* + * For Little Endian, N/w Stack gives packets in Network byte order and + * issue occurs if both Host and Target happens to be in Little Endian. + * Target when compares IP addresses in packet with MCAST_GROUP_CMDID + * given IP addresses, it fails. Hence swap only mcast_ip_addr + * (16 bytes) for now. + * TODO : filter + */ +/* TBD in OL Layer +#ifdef BIG_ENDIAN_HOST + ol_bytestream_endian_fix( + (u_int32_t *)&cmd->ucast_mac_addr, + (sizeof(*cmd)-4) / sizeof(u_int32_t)); +#else + ol_bytestream_endian_fix( + (u_int32_t *)&cmd->mcast_ip_addr, + (sizeof(cmd->mcast_ip_addr)) / sizeof(u_int32_t)); +#endif Little Endian */ + wmi_unified_cmd_send( + wmi_handle, buf, len, WMI_PEER_MCAST_GROUP_CMDID); + + return QDF_STATUS_SUCCESS; +} + +/** + * send_periodic_chan_stats_config_cmd_non_tlv() - send periodic chan stats cmd + * to fw + * @wmi_handle: wmi handle + * @param: pointer to hold periodic chan stats param + * + * Return: 0 for success or error code + */ +static QDF_STATUS +send_periodic_chan_stats_config_cmd_non_tlv(wmi_unified_t wmi_handle, + struct periodic_chan_stats_params *param) +{ + wmi_buf_t buf = NULL; + wmi_set_periodic_channel_stats_config *cmd = NULL; + QDF_STATUS error = 0; + int32_t len = 0; + + len = sizeof(wmi_set_periodic_channel_stats_config); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + qdf_print("%s: Unable to allocate merory\n", __func__); + return QDF_STATUS_E_NOMEM; + } + cmd = (wmi_set_periodic_channel_stats_config *) wmi_buf_data(buf); + cmd->enable = param->enable; + cmd->stats_period = param->stats_period; + + error = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_SET_PERIODIC_CHANNEL_STATS_CONFIG); + + if (error) + qdf_print(" %s :WMI Failed\n", __func__); + + return error; +} + +/** + * send_nf_dbr_dbm_info_get_cmd_non_tlv() - send request to get nf to fw + * @wmi_handle: wmi handle + * @mac_id: radio context + * + * Return: 0 for success or error code + */ +static QDF_STATUS +send_nf_dbr_dbm_info_get_cmd_non_tlv(wmi_unified_t wmi_handle, uint8_t mac_id) +{ + wmi_buf_t wmibuf; + + wmibuf = wmi_buf_alloc(wmi_handle, 0); + if (wmibuf == NULL) + return QDF_STATUS_E_NOMEM; + + return wmi_unified_cmd_send(wmi_handle, wmibuf, 0, + WMI_PDEV_GET_NFCAL_POWER_CMDID); +} + +/** + * enum packet_power_non_tlv_flags: Target defined + * packet power rate flags + * @WMI_NON_TLV_FLAG_ONE_CHAIN: one chain + * @WMI_NON_TLV_FLAG_TWO_CHAIN: two chain + * @WMI_NON_TLV_FLAG_THREE_CHAIN: three chain + * @WMI_NON_TLV_FLAG_FOUR_CHAIN: four chain + * @WMI_NON_TLV_FLAG_STBC: STBC is set + * @WMI_NON_TLV_FLAG_40MHZ: 40MHz channel width + * @WMI_NON_TLV_FLAG_80MHZ: 80MHz channel width + * @WMI_NON_TLV_FLAG_1600MHZ: 1600MHz channel width + * @WMI_NON_TLV_FLAG_TXBF: Tx Bf enabled + * @WMI_NON_TLV_FLAG_RTSENA: RTS enabled + * @WMI_NON_TLV_FLAG_CTSENA: CTS enabled + * @WMI_NON_TLV_FLAG_LDPC: LDPC is set + * @WMI_NON_TLV_FLAG_SERIES1: Rate series 1 + * @WMI_NON_TLV_FLAG_SGI: Short gaurd interval + * @WMI_NON_TLV_FLAG_MU2: MU2 data + * @WMI_NON_TLV_FLAG_MU3: MU3 data + */ +enum packet_power_non_tlv_flags { + WMI_NON_TLV_FLAG_ONE_CHAIN = 0x0001, + WMI_NON_TLV_FLAG_TWO_CHAIN = 0x0005, + WMI_NON_TLV_FLAG_THREE_CHAIN = 0x0007, + WMI_NON_TLV_FLAG_FOUR_CHAIN = 0x000F, + WMI_NON_TLV_FLAG_STBC = 0x0010, + WMI_NON_TLV_FLAG_40MHZ = 0x0020, + WMI_NON_TLV_FLAG_80MHZ = 0x0040, + WMI_NON_TLV_FLAG_160MHZ = 0x0080, + WMI_NON_TLV_FLAG_TXBF = 0x0100, + WMI_NON_TLV_FLAG_RTSENA = 0x0200, + WMI_NON_TLV_FLAG_CTSENA = 0x0400, + WMI_NON_TLV_FLAG_LDPC = 0x0800, + WMI_NON_TLV_FLAG_SERIES1 = 0x1000, + WMI_NON_TLV_FLAG_SGI = 0x2000, + WMI_NON_TLV_FLAG_MU2 = 0x4000, + WMI_NON_TLV_FLAG_MU3 = 0x8000, +}; + +/** + * convert_to_power_info_rate_flags() - convert packet_power_info_params + * to FW understandable format + * @param: pointer to hold packet power info param + * + * @return FW understandable 16 bit rate flags + */ +static uint16_t +convert_to_power_info_rate_flags(struct packet_power_info_params *param) +{ + uint16_t rateflags = 0; + + if (param->chainmask) + rateflags |= (param->chainmask & 0xf); + if (param->chan_width == WMI_HOST_CHAN_WIDTH_40) + rateflags |= WMI_NON_TLV_FLAG_40MHZ; + if (param->chan_width == WMI_HOST_CHAN_WIDTH_80) + rateflags |= WMI_NON_TLV_FLAG_80MHZ; + if (param->chan_width == WMI_HOST_CHAN_WIDTH_160) + rateflags |= WMI_NON_TLV_FLAG_160MHZ; + if (param->rate_flags & WMI_HOST_FLAG_STBC) + rateflags |= WMI_NON_TLV_FLAG_STBC; + if (param->rate_flags & WMI_HOST_FLAG_LDPC) + rateflags |= WMI_NON_TLV_FLAG_LDPC; + if (param->rate_flags & WMI_HOST_FLAG_TXBF) + rateflags |= WMI_NON_TLV_FLAG_TXBF; + if (param->rate_flags & WMI_HOST_FLAG_RTSENA) + rateflags |= WMI_NON_TLV_FLAG_RTSENA; + if (param->rate_flags & WMI_HOST_FLAG_CTSENA) + rateflags |= WMI_NON_TLV_FLAG_CTSENA; + if (param->rate_flags & WMI_HOST_FLAG_SGI) + rateflags |= WMI_NON_TLV_FLAG_SGI; + if (param->rate_flags & WMI_HOST_FLAG_SERIES1) + rateflags |= WMI_NON_TLV_FLAG_SERIES1; + if (param->rate_flags & WMI_HOST_FLAG_MU2) + rateflags |= WMI_NON_TLV_FLAG_MU2; + if (param->rate_flags & WMI_HOST_FLAG_MU3) + rateflags |= WMI_NON_TLV_FLAG_MU3; + + return rateflags; +} + +/** + * send_packet_power_info_get_cmd_non_tlv() - send request to get packet power + * info to fw + * @wmi_handle: wmi handle + * @param: pointer to hold packet power info param + * + * Return: 0 for success or error code + */ +static QDF_STATUS +send_packet_power_info_get_cmd_non_tlv(wmi_unified_t wmi_handle, + struct packet_power_info_params *param) +{ + wmi_pdev_get_tpc_cmd *cmd; + wmi_buf_t wmibuf; + u_int32_t len = sizeof(wmi_pdev_get_tpc_cmd); + + wmibuf = wmi_buf_alloc(wmi_handle, len); + if (wmibuf == NULL) + return QDF_STATUS_E_NOMEM; + + cmd = (wmi_pdev_get_tpc_cmd *)wmi_buf_data(wmibuf); + cmd->rate_flags = convert_to_power_info_rate_flags(param); + cmd->nss = param->nss; + cmd->preamble = param->preamble; + cmd->hw_rate = param->hw_rate; + cmd->rsvd = 0x0; + + WMI_LOGD("%s[%d] commandID %d, wmi_pdev_get_tpc_cmd=0x%x," + "rate_flags: 0x%x, nss: %d, preamble: %d, hw_rate: %d\n", + __func__, __LINE__, WMI_PDEV_GET_TPC_CMDID, *((u_int32_t *)cmd), + cmd->rate_flags, cmd->nss, cmd->preamble, cmd->hw_rate); + + return wmi_unified_cmd_send(wmi_handle, wmibuf, len, + WMI_PDEV_GET_TPC_CMDID); +} + +/** + * send_gpio_config_cmd_non_tlv() - send gpio config to fw + * @wmi_handle: wmi handle + * @param: pointer to hold gpio config param + * + * Return: 0 for success or error code + */ +static QDF_STATUS +send_gpio_config_cmd_non_tlv(wmi_unified_t wmi_handle, + struct gpio_config_params *param) +{ + wmi_gpio_config_cmd *cmd; + wmi_buf_t wmibuf; + u_int32_t len = sizeof(wmi_gpio_config_cmd); + + /* Sanity Checks */ + if (param->pull_type > WMI_GPIO_PULL_DOWN || + param->intr_mode > WMI_GPIO_INTTYPE_LEVEL_HIGH) { + return QDF_STATUS_E_FAILURE; + } + + wmibuf = wmi_buf_alloc(wmi_handle, len); + if (wmibuf == NULL) + return QDF_STATUS_E_FAILURE; + + cmd = (wmi_gpio_config_cmd *)wmi_buf_data(wmibuf); + cmd->gpio_num = param->gpio_num; + cmd->input = param->input; + cmd->pull_type = param->pull_type; + cmd->intr_mode = param->intr_mode; + return wmi_unified_cmd_send(wmi_handle, wmibuf, len, + WMI_GPIO_CONFIG_CMDID); +} + +/** + * send_gpio_output_cmd_non_tlv() - send gpio output to fw + * @wmi_handle: wmi handle + * @param: pointer to hold gpio output param + * + * Return: 0 for success or error code + */ +static QDF_STATUS +send_gpio_output_cmd_non_tlv(wmi_unified_t wmi_handle, + struct gpio_output_params *param) +{ + wmi_gpio_output_cmd *cmd; + wmi_buf_t wmibuf; + u_int32_t len = sizeof(wmi_gpio_output_cmd); + + wmibuf = wmi_buf_alloc(wmi_handle, len); + if (wmibuf == NULL) + return QDF_STATUS_E_FAILURE; + + cmd = (wmi_gpio_output_cmd *)wmi_buf_data(wmibuf); + cmd->gpio_num = param->gpio_num; + cmd->set = param->set; + return wmi_unified_cmd_send(wmi_handle, wmibuf, len, + WMI_GPIO_OUTPUT_CMDID); +} + +/* + * send_rtt_meas_req_test_cmd_non_tlv() - send rtt meas req test cmd to fw + * @wmi_handle: wmi handle + * @param: pointer to hold rtt meas req test param + * + * Return: 0 for success or error code + */ +static QDF_STATUS +send_rtt_meas_req_test_cmd_non_tlv(wmi_unified_t wmi_handle, + struct rtt_meas_req_test_params *param) +{ + wmi_buf_t buf; + u_int8_t *p; + int ret; + u_int16_t len; + wmi_rtt_measreq_head *head; + wmi_rtt_measreq_body *body; + wmi_channel *w_chan; + + qdf_print("%s: The request ID is: %d\n", __func__, param->req_id); + + len = sizeof(wmi_rtt_measreq_head) + param->req_num_req * + sizeof(wmi_rtt_measreq_body); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + qdf_print("No WMI resource!"); + return QDF_STATUS_E_NOMEM; + } + + p = (u_int8_t *) wmi_buf_data(buf); + qdf_mem_set(p, len, 0); + + head = (wmi_rtt_measreq_head *) p; + WMI_RTT_REQ_ID_SET(head->req_id, param->req_id); + WMI_RTT_SPS_SET(head->req_id, 1); + + WMI_RTT_NUM_STA_SET(head->sta_num, param->req_num_req); + + body = &(head->body[0]); + WMI_RTT_VDEV_ID_SET(body->measure_info, 0); + WMI_RTT_TIMEOUT_SET(body->measure_info, 100); + WMI_RTT_REPORT_TYPE_SET(body->measure_info, param->req_report_type); + WMI_RTT_FRAME_TYPE_SET(body->control_flag, param->req_frame_type); + + WMI_RTT_TX_CHAIN_SET(body->control_flag, 001); + WMI_RTT_QCA_PEER_SET(body->control_flag, 1); + if (param->req_preamble == WMI_RTT_PREAM_LEGACY) + WMI_RTT_MCS_SET(body->control_flag, 3); + else + WMI_RTT_MCS_SET(body->control_flag, 0); + WMI_RTT_RETRIES_SET(body->control_flag, 1); + + /* + qdf_mem_copy(peer, param->mac_addr, 6); + + qdf_print("The mac_addr is" + " %.2x:%.2x:%.2x:%.2x:%.2x:%.2x extra=%d\n", + peer[0], peer[1], peer[2], + peer[3], peer[4], peer[5], param->extra); + */ + + /* start from here, embed the first req in each RTT measurement + * Command */ + /*peer[5] = 0x12; + peer[4] = 0x90; + peer[3] = 0x78; + peer[2] = 0x56; + peer[1] = 0x34; + peer[0] = 0x12; +>---*/ + head->channel.mhz = param->channel.mhz; + head->channel.band_center_freq1 = param->channel.cfreq1; + head->channel.band_center_freq2 = param->channel.cfreq2; + + + w_chan = (wmi_channel *)&head->channel; + WMI_SET_CHANNEL_MODE(w_chan, param->channel.phy_mode); + WMI_SET_CHANNEL_MIN_POWER(w_chan, param->channel.minpower); + WMI_SET_CHANNEL_MAX_POWER(w_chan, param->channel.maxpower); + WMI_SET_CHANNEL_REG_POWER(w_chan, param->channel.maxregpower); + WMI_SET_CHANNEL_ANTENNA_MAX(w_chan, param->channel.antennamax); + WMI_SET_CHANNEL_REG_CLASSID(w_chan, param->channel.reg_class_id); + + WMI_CHAR_ARRAY_TO_MAC_ADDR(((u_int8_t *)param->peer), &body->dest_mac); + WMI_CHAR_ARRAY_TO_MAC_ADDR(((u_int8_t *)param->peer), + &body->spoof_bssid); + + WMI_RTT_BW_SET(body->control_flag, param->req_bw); + WMI_RTT_PREAMBLE_SET(body->control_flag, param->req_preamble); + WMI_RTT_MEAS_NUM_SET(body->measure_info, param->num_measurements); + + body->measure_params_1 = 0; + body->measure_params_2 = 0; + + WMI_RTT_ASAP_MODE_SET(body->measure_params_1, param->asap_mode); + WMI_RTT_LCI_REQ_SET(body->measure_params_1, param->lci_requested); + WMI_RTT_LOC_CIV_REQ_SET(body->measure_params_1, + param->loc_civ_requested); + WMI_RTT_NUM_BURST_EXP_SET(body->measure_params_1, 0); + WMI_RTT_BURST_DUR_SET(body->measure_params_1, 15); + WMI_RTT_BURST_PERIOD_SET(body->measure_params_1, 0); + WMI_RTT_TSF_DELTA_VALID_SET(body->measure_params_1, 1); + WMI_RTT_TSF_DELTA_SET(body->measure_params_2, 0); + + /** other requests are same with first request */ + p = (u_int8_t *) body; + while (--param->req_num_req) { + body++; + qdf_mem_copy(body, p, sizeof(wmi_rtt_measreq_body)); + } + + ret = wmi_unified_cmd_send(wmi_handle, buf, len, WMI_RTT_MEASREQ_CMDID); + qdf_print("send rtt cmd to FW with length %d and return %d\n", + len, ret); + return QDF_STATUS_SUCCESS; +} + +/** + * send_rtt_meas_req_cmd_non_tlv() - send rtt meas req cmd to fw + * @wmi_handle: wmi handle + * @param: pointer to hold rtt meas req param + * + * Return: 0 for success or error code + */ +static QDF_STATUS +send_rtt_meas_req_cmd_non_tlv(wmi_unified_t wmi_handle, + struct rtt_meas_req_params *param) +{ + wmi_buf_t buf; + uint8_t *p; + int ret; + uint16_t len; + uint8_t peer[6]; + uint8_t spoof[6]; + wmi_rtt_measreq_head *head; + wmi_rtt_measreq_body *body; + int req_frame_type, req_preamble; + wmi_channel *w_chan; + + /* Temporarily, hardcoding peer mac address for test purpose will be + * removed once RTT host has been developed for even req_id, like + * 0, 2, 4, there is no channel_swicth for odd req_id, like 1, 3 , 5, + * there is channel switch currently, for both cases, we have 3 req in + * each command please change here if you only have one (or just let + * it be). Even == HC, odd == OC. + */ + if (!(param->req_id & 0x1)) { + len = sizeof(wmi_rtt_measreq_head); + /* + 2 * sizeof(wmi_rtt_measreq_body);*/ + } else { + len = sizeof(wmi_rtt_measreq_head); + /* + 2 * sizeof(wmi_rtt_measreq_body);*/ + } + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + qdf_print("No WMI resource!"); + return QDF_STATUS_E_FAILURE; + } + + p = (uint8_t *) wmi_buf_data(buf); + qdf_mem_set(p, len, 0); + + /* encode header */ + head = (wmi_rtt_measreq_head *) p; + /* head->req_id = req_id;*/ + WMI_RTT_REQ_ID_SET(head->req_id, param->req_id); + /* WMI_RTT_SPS_SET(head->req_id, 1);*/ + + if (!(param->req_id & 0x1)) { /*even req id */ +#ifndef RTT_TEST + /* we actually only have 3 sta to measure + this is used to test over limit request protection + XIN:WMI_RTT_NUM_STA_SET(head->sta_num, 5);*/ +#else + /* XIN:WMI_RTT_NUM_STA_SET(head->sta_num, 2);*/ + WMI_RTT_NUM_STA_SET(head->sta_num, 1); +#endif + WMI_RTT_NUM_STA_SET(head->sta_num, 1); + } else { /* odd req id */ + /* XIN:WMI_RTT_NUM_STA_SET(head->sta_num, 3); */ + WMI_RTT_NUM_STA_SET(head->sta_num, 1); + + } + + req_frame_type = RTT_MEAS_FRAME_NULL; + /* MS(extra, RTT_REQ_FRAME_TYPE);*/ + /* req_bw = //MS(extra, RTT_REQ_BW);*/ + req_preamble = WMI_RTT_PREAM_LEGACY;/*MS(extra, RTT_REQ_PREAMBLE);*/ + + /*encode common parts for each RTT measurement command body + The value here can be overwrite in following each req hardcoding */ + body = &(head->body[0]); + WMI_RTT_VDEV_ID_SET(body->measure_info, param->vdev_id); + WMI_RTT_TIMEOUT_SET(body->measure_info, RTT_TIMEOUT_MS); + WMI_RTT_REPORT_TYPE_SET(body->measure_info, 1); + WMI_RTT_FRAME_TYPE_SET(body->control_flag, req_frame_type); + WMI_RTT_TX_CHAIN_SET(body->control_flag, 001); + WMI_RTT_QCA_PEER_SET(body->control_flag, 1); + if (req_preamble == WMI_RTT_PREAM_LEGACY) + WMI_RTT_MCS_SET(body->control_flag, 3); + else + WMI_RTT_MCS_SET(body->control_flag, 0); + WMI_RTT_RETRIES_SET(body->control_flag, 1); + + if (!(param->req_id & 0x1)) { /* even time */ + qdf_mem_copy(peer, param->sta_mac_addr, 6); + } else { /* odd time */ + qdf_mem_copy(peer, param->sta_mac_addr, 6); + } + head->channel.mhz = param->channel.mhz; + head->channel.band_center_freq1 = param->channel.cfreq1; + head->channel.band_center_freq2 = param->channel.cfreq2; + + w_chan = (wmi_channel *)&head->channel; + WMI_SET_CHANNEL_MAX_POWER(w_chan, param->channel.phy_mode); + WMI_SET_CHANNEL_MIN_POWER(w_chan, param->channel.minpower); + WMI_SET_CHANNEL_MAX_POWER(w_chan, param->channel.maxpower); + WMI_SET_CHANNEL_REG_POWER(w_chan, param->channel.maxregpower); + WMI_SET_CHANNEL_ANTENNA_MAX(w_chan, param->channel.antennamax); + WMI_SET_CHANNEL_REG_CLASSID(w_chan, param->channel.reg_class_id); + + if (param->is_mode_na) + WMI_SET_CHANNEL_MODE(w_chan, MODE_11NG_HT20); + else if (param->is_mode_ac) + WMI_SET_CHANNEL_MODE(w_chan, MODE_11NA_HT20); + + if (param->channel.dfs_set) + WMI_SET_CHANNEL_FLAG(w_chan, WMI_CHAN_FLAG_DFS); + + WMI_CHAR_ARRAY_TO_MAC_ADDR(((uint8_t *)peer), &body->dest_mac); + qdf_mem_set(spoof, IEEE80211_ADDR_LEN, 0); + WMI_CHAR_ARRAY_TO_MAC_ADDR(((uint8_t *)param->spoof_mac_addr), + &body->spoof_bssid); + + /** embedded varing part of each request + set Preamble, BW, measurement times */ + if (param->is_bw_20) + WMI_RTT_BW_SET(body->control_flag, WMI_RTT_BW_20); + else if (param->is_bw_40) + WMI_RTT_BW_SET(body->control_flag, WMI_RTT_BW_40); + else if (param->is_bw_80) + WMI_RTT_BW_SET(body->control_flag, WMI_RTT_BW_80); + else + WMI_RTT_BW_SET(body->control_flag, WMI_RTT_BW_20); + WMI_RTT_PREAMBLE_SET(body->control_flag, req_preamble); + WMI_RTT_MEAS_NUM_SET(body->measure_info, param->num_probe_rqst); + + + ret = wmi_unified_cmd_send(wmi_handle, buf, len, WMI_RTT_MEASREQ_CMDID); + qdf_print("send rtt cmd to FW with length %d and return %d\n", + len, ret); + return ret; +} +/** + * send_rtt_keepalive_req_cmd_non_tlv() - send rtt keepalive req cmd to fw + * @wmi_handle: wmi handle + * @param: pointer to hold rtt keepalive req param + * + * Return: 0 for success or error code + */ +static QDF_STATUS +send_rtt_keepalive_req_cmd_non_tlv(wmi_unified_t wmi_handle, + struct rtt_keepalive_req_params *param) +{ + wmi_buf_t buf; + wmi_rtt_keepalive_cmd *cmd; + int ret; + uint16_t len; + uint8_t *ptr; + + len = sizeof(wmi_rtt_keepalive_cmd); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + qdf_print("No WMI resource\n"); + return QDF_STATUS_E_FAILURE; + } + ptr = (uint8_t *)wmi_buf_data(buf); + OS_MEMSET(ptr, 0, len); + + cmd = (wmi_rtt_keepalive_cmd *)wmi_buf_data(buf); + + WMI_RTT_REQ_ID_SET(cmd->req_id, param->req_id); + WMI_RTT_KEEPALIVE_ACTION_SET(cmd->req_id, param->stop); + WMI_RTT_VDEV_ID_SET(cmd->probe_info, param->vdev_id); + /* 3ms probe interval by default */ + WMI_RTT_KEEPALIVE_PERIOD_SET(cmd->probe_info, 3); + /* max retry of 50 by default */ + WMI_RTT_TIMEOUT_SET(cmd->probe_info, 20); + /* set frame type */ + WMI_RTT_FRAME_TYPE_SET(cmd->control_flag, RTT_MEAS_FRAME_KEEPALIVE); + + WMI_CHAR_ARRAY_TO_MAC_ADDR(param->macaddr, &cmd->sta_mac); + + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_RTT_KEEPALIVE_CMDID); + qdf_print("send rtt keepalive cmd to FW with length %d and return %d\n" + , len, ret); + param->req_id++; + + return QDF_STATUS_SUCCESS; +} +/** + * send_lci_set_cmd_non_tlv() - send lci cmd to fw + * @wmi_handle: wmi handle + * @param: pointer to hold lci param + * + * Return: 0 for success or error code + */ +static QDF_STATUS +send_lci_set_cmd_non_tlv(wmi_unified_t wmi_handle, + struct lci_set_params *param) +{ + wmi_buf_t buf; + uint8_t *p; + wmi_oem_measreq_head *head; + int len; + int colocated_bss_len = 0; + wmi_rtt_lci_cfg_head *rtt_req; + + rtt_req = (wmi_rtt_lci_cfg_head *) param->lci_data; + + len = param->msg_len; + + /* colocated_bss[1] contains num of vaps */ + /* Provide colocated bssid subIE only when there are 2 vaps or more */ + if (param->colocated_bss[1] > 1) { + qdf_print("%s: Adding %d co-located BSSIDs to LCI data\n", + __func__, param->colocated_bss[1]); + /* Convert num_vaps to octets: + 6*Num_of_vap + 1 (Max BSSID Indicator field) */ + param->colocated_bss[1] = + (param->colocated_bss[1]*IEEE80211_ADDR_LEN)+1; + colocated_bss_len = param->colocated_bss[1]+2; + qdf_mem_copy(rtt_req->colocated_bssids_info, + param->colocated_bss, + colocated_bss_len); + rtt_req->co_located_bssid_len = colocated_bss_len; + qdf_print("%s: co_located_bssid_len: %d\n", __func__, + param->colocated_bss[1]+2); + } else { + qdf_print("No co-located BSSID was added to LCI data\n"); + } + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + qdf_print("No WMI resource!"); + return QDF_STATUS_E_FAILURE; + } + + p = (uint8_t *) wmi_buf_data(buf); + qdf_mem_set(p, len, 0); + + head = (wmi_oem_measreq_head *)p; + WMI_HOST_IF_MSG_COPY_CHAR_ARRAY(head, param->lci_data, len); + if (wmi_unified_cmd_send(wmi_handle, buf, len, WMI_OEM_REQ_CMDID)) + return QDF_STATUS_E_FAILURE; + + /* Save LCI data in host buffer */ + { + + param->latitude_unc = WMI_RTT_LCI_LAT_UNC_GET( + rtt_req->lci_cfg_param_info); + param->latitude_0_1 = ((uint32_t)(rtt_req->latitude & 0x3)); + param->latitude_2_33 = (uint32_t) + (((uint64_t)(rtt_req->latitude)) >> 2); + param->longitude_unc = + WMI_RTT_LCI_LON_UNC_GET(rtt_req->lci_cfg_param_info); + param->longitude_0_1 = ((uint32_t)(rtt_req->longitude & 0x3)); + param->longitude_2_33 = + (uint32_t)(((uint64_t)(rtt_req->longitude)) >> 2); + param->altitude_type = + WMI_RTT_LCI_ALT_TYPE_GET(rtt_req->altitude_info); + param->altitude_unc_0_3 = + (WMI_RTT_LCI_ALT_UNC_GET(rtt_req->altitude_info) & 0xF); + param->altitude_unc_4_5 = + ((WMI_RTT_LCI_ALT_UNC_GET(rtt_req->altitude_info) >> 4) & + 0x3); + param->altitude = (rtt_req->altitude & RTT_LCI_ALTITUDE_MASK); + param->datum = + WMI_RTT_LCI_DATUM_GET(rtt_req->lci_cfg_param_info); + param->reg_loc_agmt = + WMI_RTT_LCI_REG_LOC_AGMT_GET(rtt_req->lci_cfg_param_info); + param->reg_loc_dse = + WMI_RTT_LCI_REG_LOC_DSE_GET(rtt_req->lci_cfg_param_info); + param->dep_sta = + WMI_RTT_LCI_DEP_STA_GET(rtt_req->lci_cfg_param_info); + param->version = + WMI_RTT_LCI_VERSION_GET(rtt_req->lci_cfg_param_info); + + } + return QDF_STATUS_SUCCESS; +} + +/** + * send_lcr_set_cmd_non_tlv() - send lcr cmd to fw + * @wmi_handle: wmi handle + * @param: pointer to hold lcr param + * + * Return: 0 for success or error code + */ +static QDF_STATUS +send_lcr_set_cmd_non_tlv(wmi_unified_t wmi_handle, + struct lcr_set_params *param) +{ + wmi_buf_t buf; + uint8_t *p; + wmi_oem_measreq_head *head; + int len; + + len = param->msg_len; + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + qdf_print("No WMI resource!"); + return QDF_STATUS_E_FAILURE; + } + + p = (uint8_t *) wmi_buf_data(buf); + qdf_mem_set(p, len, 0); + + head = (wmi_oem_measreq_head *)p; + WMI_HOST_IF_MSG_COPY_CHAR_ARRAY(head, param->lcr_data, len); + + if (wmi_unified_cmd_send(wmi_handle, buf, len, WMI_OEM_REQ_CMDID)) + return QDF_STATUS_E_FAILURE; + + return QDF_STATUS_SUCCESS; +} + + /** + * send_start_oem_data_cmd_non_tlv() - send oem req cmd to fw + * @wmi_handle: wmi handle + * @param: pointer to hold oem req param + */ +static QDF_STATUS +send_start_oem_data_cmd_non_tlv(wmi_unified_t wmi_handle, + uint32_t data_len, + uint8_t *data) +{ + wmi_buf_t buf; + uint8_t *p; + wmi_oem_measreq_head *head; + + buf = wmi_buf_alloc(wmi_handle, data_len); + if (!buf) { + qdf_print("%s: No WMI resource!\n", __func__); + return QDF_STATUS_E_FAILURE; + } + + p = (uint8_t *) wmi_buf_data(buf); + qdf_mem_set(p, data_len, 0); + + head = (wmi_oem_measreq_head *)p; + WMI_HOST_IF_MSG_COPY_CHAR_ARRAY(head, data, data_len); + + if (wmi_unified_cmd_send(wmi_handle, buf, + data_len, WMI_OEM_REQ_CMDID)) { + qdf_print("%s: ERROR: Host unable to send LOWI request to FW\n", + __func__); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * send_get_user_position_cmd_non_tlv() - send cmd get user position from fw + * @wmi_handle: wmi handle + * @value: user pos value + * + * Return: 0 for success or error code + */ +static QDF_STATUS +send_get_user_position_cmd_non_tlv(wmi_unified_t wmi_handle, uint32_t value) +{ + wmi_buf_t buf; + wmi_peer_gid_userpos_list_cmd *cmd; + + buf = wmi_buf_alloc(wmi_handle, sizeof(wmi_peer_gid_userpos_list_cmd)); + if (!buf) { + qdf_print("No WMI resource!"); + return QDF_STATUS_E_FAILURE; + } + qdf_nbuf_put_tail(buf, sizeof(wmi_peer_gid_userpos_list_cmd)); + cmd = (wmi_peer_gid_userpos_list_cmd *)(wmi_buf_data(buf)); + cmd->aid = value; + + if (wmi_unified_cmd_send(wmi_handle, buf, + sizeof(wmi_peer_gid_userpos_list_cmd), + WMI_PEER_GID_USERPOS_LIST_CMDID)) { + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * send_reset_peer_mumimo_tx_count_cmd_non_tlv() - send mumimo reset tx count fw + * @wmi_handle: wmi handle + * @value: reset tx count + * + * Return: 0 for success or error code + */ +static QDF_STATUS +send_reset_peer_mumimo_tx_count_cmd_non_tlv(wmi_unified_t wmi_handle, + uint32_t value) +{ + wmi_buf_t buf; + wmi_peer_txmu_rstcnt_cmd *cmd; + + buf = wmi_buf_alloc(wmi_handle, sizeof(wmi_peer_txmu_rstcnt_cmd)); + if (!buf) { + qdf_print("No WMI resource!"); + return QDF_STATUS_E_FAILURE; + } + qdf_nbuf_put_tail(buf, sizeof(wmi_peer_txmu_rstcnt_cmd)); + cmd = (wmi_peer_txmu_rstcnt_cmd *)(wmi_buf_data(buf)); + cmd->aid = value; + + if (wmi_unified_cmd_send(wmi_handle, buf, + sizeof(wmi_peer_txmu_rstcnt_cmd), + WMI_PEER_TX_MU_TXMIT_RSTCNT_CMDID)) { + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * send_get_peer_mumimo_tx_count_cmd_non_tlv() - send cmd to get mumimo tx count from fw + * @wmi_handle: wmi handle + * + * Return: 0 for success or error code + */ +static QDF_STATUS +send_get_peer_mumimo_tx_count_cmd_non_tlv(wmi_unified_t wmi_handle, + uint32_t value) +{ + wmi_buf_t buf; + wmi_peer_txmu_cnt_cmd *cmd; + + buf = wmi_buf_alloc(wmi_handle, sizeof(wmi_peer_txmu_cnt_cmd)); + if (!buf) { + qdf_print("No WMI resource!"); + return QDF_STATUS_E_FAILURE; + } + qdf_nbuf_put_tail(buf, sizeof(wmi_peer_txmu_cnt_cmd)); + cmd = (wmi_peer_txmu_cnt_cmd *)(wmi_buf_data(buf)); + cmd->aid = value; + + if (wmi_unified_cmd_send(wmi_handle, buf, + sizeof(wmi_peer_txmu_cnt_cmd), + WMI_PEER_TX_MU_TXMIT_COUNT_CMDID)) { + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * send_pdev_caldata_version_check_cmd_non_tlv() - send caldata check cmd to fw + * @wmi_handle: wmi handle + * @param: reserved param + * + * Return: 0 for success or error code + */ +static QDF_STATUS +send_pdev_caldata_version_check_cmd_non_tlv(wmi_unified_t wmi_handle, + uint32_t param) +{ + wmi_pdev_check_cal_version_cmd *cmd; + wmi_buf_t buf; + int32_t len = sizeof(wmi_pdev_check_cal_version_cmd); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + qdf_print("%s:wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_FAILURE; + } + cmd = (wmi_pdev_check_cal_version_cmd *)wmi_buf_data(buf); + cmd->reserved = param; /* set to 0x0 as expected from FW */ + if (wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_PDEV_CHECK_CAL_VERSION_CMDID)) { + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * send_btcoex_wlan_priority_cmd_non_tlv() - send btcoex wlan priority fw + * @wmi_handle: wmi handle + * @param: btcoex config params + * + * Return: 0 for success or error code + */ +static QDF_STATUS +send_btcoex_wlan_priority_cmd_non_tlv(wmi_unified_t wmi_handle, + struct btcoex_cfg_params *param) +{ + wmi_buf_t buf; + wmi_btcoex_cfg_cmd *cmd; + int len = sizeof(wmi_btcoex_cfg_cmd); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + qdf_print("%s:wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_FAILURE; + } + cmd = (wmi_btcoex_cfg_cmd *) wmi_buf_data(buf); + + cmd->btcoex_wlan_priority_bitmap = param->btcoex_wlan_priority_bitmap; + cmd->btcoex_param_flags = param->btcoex_param_flags; + if (wmi_unified_cmd_send(wmi_handle, buf, len, WMI_BTCOEX_CFG_CMDID)) { + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * send_btcoex_duty_cycle_cmd_non_tlv() - send btcoex wlan priority fw + * @wmi_handle: wmi handle + * @param: period and duration + * + * Return: 0 for success or error code + */ +static QDF_STATUS +send_btcoex_duty_cycle_cmd_non_tlv(wmi_unified_t wmi_handle, + struct btcoex_cfg_params *param) +{ + wmi_buf_t buf; + wmi_btcoex_cfg_cmd *cmd; + int len = sizeof(wmi_btcoex_cfg_cmd); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + qdf_print("%s:wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_FAILURE; + } + cmd = (wmi_btcoex_cfg_cmd *) wmi_buf_data(buf); + cmd->wlan_duration = param->wlan_duration; + cmd->period = param->period; + cmd->btcoex_param_flags = param->btcoex_param_flags; + if (wmi_unified_cmd_send(wmi_handle, buf, len, WMI_BTCOEX_CFG_CMDID)) { + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * send_coex_ver_cfg_cmd_non_tlv() - send coex ver cfg + * @wmi_handle: wmi handle + * @param: coex ver and configuration + * + * Return: 0 for success or error code + */ +static QDF_STATUS +send_coex_ver_cfg_cmd_non_tlv(wmi_unified_t wmi_handle, coex_ver_cfg_t *param) +{ + wmi_buf_t buf; + coex_ver_cfg_t *cmd; + int len = sizeof(wmi_coex_ver_cfg_cmd); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + qdf_print("%s:wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_FAILURE; + } + cmd = (coex_ver_cfg_t *)wmi_buf_data(buf); + cmd->coex_version = param->coex_version; + cmd->length = param->length; + qdf_mem_copy(cmd->config_buf, param->config_buf, + sizeof(cmd->config_buf)); + if (wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_COEX_VERSION_CFG_CMID)) { + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * wmi_copy_resource_config_non_tlv() - copy resource configuration function + * @param resource_cfg: pointer to resource configuration + * @param tgt_res_cfg: pointer to target resource configuration + * + * Return: None + */ +static void wmi_copy_resource_config_non_tlv(wmi_resource_config *resource_cfg, + target_resource_config *tgt_res_cfg) +{ + resource_cfg->num_vdevs = tgt_res_cfg->num_vdevs; + resource_cfg->num_peers = tgt_res_cfg->num_peers; + resource_cfg->num_active_peers = tgt_res_cfg->num_active_peers; + resource_cfg->num_offload_peers = tgt_res_cfg->num_offload_peers; + resource_cfg->num_offload_reorder_buffs = + tgt_res_cfg->num_offload_reorder_buffs; + resource_cfg->num_peer_keys = tgt_res_cfg->num_peer_keys; + resource_cfg->num_tids = tgt_res_cfg->num_tids; + resource_cfg->ast_skid_limit = tgt_res_cfg->ast_skid_limit; + resource_cfg->tx_chain_mask = tgt_res_cfg->tx_chain_mask; + resource_cfg->rx_chain_mask = tgt_res_cfg->rx_chain_mask; + resource_cfg->rx_timeout_pri[0] = tgt_res_cfg->rx_timeout_pri[0]; + resource_cfg->rx_timeout_pri[1] = tgt_res_cfg->rx_timeout_pri[1]; + resource_cfg->rx_timeout_pri[2] = tgt_res_cfg->rx_timeout_pri[2]; + resource_cfg->rx_timeout_pri[3] = tgt_res_cfg->rx_timeout_pri[3]; + resource_cfg->rx_decap_mode = tgt_res_cfg->rx_decap_mode; + resource_cfg->scan_max_pending_req = tgt_res_cfg->scan_max_pending_req; + resource_cfg->bmiss_offload_max_vdev = + tgt_res_cfg->bmiss_offload_max_vdev; + resource_cfg->roam_offload_max_vdev = + tgt_res_cfg->roam_offload_max_vdev; + resource_cfg->roam_offload_max_ap_profiles = + tgt_res_cfg->roam_offload_max_ap_profiles; + resource_cfg->num_mcast_groups = tgt_res_cfg->num_mcast_groups; + resource_cfg->num_mcast_table_elems = + tgt_res_cfg->num_mcast_table_elems; + resource_cfg->mcast2ucast_mode = tgt_res_cfg->mcast2ucast_mode; + resource_cfg->tx_dbg_log_size = tgt_res_cfg->tx_dbg_log_size; + resource_cfg->num_wds_entries = tgt_res_cfg->num_wds_entries; + resource_cfg->dma_burst_size = tgt_res_cfg->dma_burst_size; + resource_cfg->mac_aggr_delim = tgt_res_cfg->mac_aggr_delim; + resource_cfg->rx_skip_defrag_timeout_dup_detection_check = + tgt_res_cfg->rx_skip_defrag_timeout_dup_detection_check; + resource_cfg->vow_config = tgt_res_cfg->vow_config; + resource_cfg->gtk_offload_max_vdev = tgt_res_cfg->gtk_offload_max_vdev; + resource_cfg->num_msdu_desc = tgt_res_cfg->num_msdu_desc; + resource_cfg->max_frag_entries = tgt_res_cfg->max_frag_entries; + resource_cfg->max_peer_ext_stats = tgt_res_cfg->max_peer_ext_stats; + resource_cfg->smart_ant_cap = tgt_res_cfg->smart_ant_cap; + resource_cfg->BK_Minfree = tgt_res_cfg->BK_Minfree; + resource_cfg->BE_Minfree = tgt_res_cfg->BE_Minfree; + resource_cfg->VI_Minfree = tgt_res_cfg->VI_Minfree; + resource_cfg->VO_Minfree = tgt_res_cfg->VO_Minfree; + resource_cfg->rx_batchmode = tgt_res_cfg->rx_batchmode; + resource_cfg->tt_support = tgt_res_cfg->tt_support; + resource_cfg->atf_config = tgt_res_cfg->atf_config; + resource_cfg->iphdr_pad_config = tgt_res_cfg->iphdr_pad_config; + WMI_SET_QWRAP(resource_cfg, tgt_res_cfg->qwrap_config); + WMI_SET_ALLOC_FRAG(resource_cfg, + tgt_res_cfg->alloc_frag_desc_for_data_pkt); +} + +/** + * init_cmd_send_non_tlv() - send initialization cmd to fw + * @wmi_handle: wmi handle + * @param param: pointer to wmi init param + * + * Return: 0 for success or error code + */ +static QDF_STATUS init_cmd_send_non_tlv(wmi_unified_t wmi_handle, + struct wmi_init_cmd_param *param) +{ + wmi_buf_t buf; + wmi_init_cmd *cmd; + wlan_host_memory_chunk *host_mem_chunks; + uint32_t mem_chunk_len = 0; + uint16_t idx; + int len; + + len = sizeof(*cmd); + mem_chunk_len = (sizeof(wlan_host_memory_chunk) * MAX_MEM_CHUNKS); + buf = wmi_buf_alloc(wmi_handle, len + mem_chunk_len); + if (!buf) { + qdf_print("%s: wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_FAILURE; + } + + cmd = (wmi_init_cmd *) wmi_buf_data(buf); + + wmi_copy_resource_config_non_tlv(&cmd->resource_config, param->res_cfg); + + host_mem_chunks = cmd->host_mem_chunks; + for (idx = 0; idx < param->num_mem_chunks; ++idx) { + host_mem_chunks[idx].ptr = param->mem_chunks[idx].paddr; + host_mem_chunks[idx].size = param->mem_chunks[idx].len; + host_mem_chunks[idx].req_id = param->mem_chunks[idx].req_id; + qdf_print("chunk %d len %d requested , ptr 0x%x\n", + idx, cmd->host_mem_chunks[idx].size, + cmd->host_mem_chunks[idx].ptr); + } + cmd->num_host_mem_chunks = param->num_mem_chunks; + if (param->num_mem_chunks > 1) + len += ((param->num_mem_chunks-1) * + sizeof(wlan_host_memory_chunk)); + + if (wmi_unified_cmd_send(wmi_handle, buf, len, WMI_INIT_CMDID) < 0) { + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + return QDF_STATUS_SUCCESS; +} + +/** + * send_ext_resource_config_non_tlv() - send extended resource configuration + * @wmi_handle: wmi handle + * @param ext_cfg: pointer to extended resource configuration + * + * Return: 0 for success or error code + */ +static QDF_STATUS send_ext_resource_config_non_tlv(wmi_unified_t wmi_handle, + wmi_host_ext_resource_config *ext_cfg) +{ + wmi_buf_t buf; + int len = 0; + wmi_ext_resource_config *cmd_cfg; + +#define PAD_LENGTH 100 + buf = wmi_buf_alloc(wmi_handle, + len + (sizeof(wmi_ext_resource_config) + PAD_LENGTH)); + if (!buf) { + qdf_print("%s:wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_FAILURE; + } + + cmd_cfg = (wmi_ext_resource_config *)wmi_buf_data(buf); + qdf_mem_copy(cmd_cfg, ext_cfg, sizeof(wmi_ext_resource_config)); + qdf_print("\nSending Ext resource cfg: HOST PLATFORM as %d\n" + "fw_feature_bitmap as %x to TGT\n", + cmd_cfg->host_platform_config, + cmd_cfg->fw_feature_bitmap); + if (wmi_unified_cmd_send(wmi_handle, buf, + sizeof(wmi_ext_resource_config), + WMI_EXT_RESOURCE_CFG_CMDID) < 0) { + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + return QDF_STATUS_SUCCESS; +} + +/** + * save_service_bitmap_non_tlv() - save service bitmap + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param bitmap_buf: bitmap buffer for converged legacy support + * + * Return: QDF_STATUS + */ +static QDF_STATUS save_service_bitmap_non_tlv(wmi_unified_t wmi_handle, + void *evt_buf, void *bitmap_buf) +{ + wmi_service_ready_event *ev; + struct wmi_soc *soc = wmi_handle->soc; + + ev = (wmi_service_ready_event *) evt_buf; + + /* If it is already allocated, use that buffer. This can happen + * during target stop/start scenarios where host allocation is skipped. + */ + if (!soc->wmi_service_bitmap) { + soc->wmi_service_bitmap = + qdf_mem_malloc(WMI_SERVICE_BM_SIZE * sizeof(uint32_t)); + if (!soc->wmi_service_bitmap) { + WMI_LOGE("Failed memory alloc for service bitmap\n"); + return QDF_STATUS_E_NOMEM; + } + } + + qdf_mem_copy(soc->wmi_service_bitmap, ev->wmi_service_bitmap, + (WMI_SERVICE_BM_SIZE * sizeof(uint32_t))); + + if (bitmap_buf) + qdf_mem_copy(bitmap_buf, ev->wmi_service_bitmap, + (WMI_SERVICE_BM_SIZE * sizeof(uint32_t))); + + return QDF_STATUS_SUCCESS; +} + +/** + * is_service_enabled_non_tlv() - Check if service enabled + * @param wmi_handle: wmi handle + * @param service_id: service identifier + * + * Return: 1 enabled, 0 disabled + */ +static bool is_service_enabled_non_tlv(wmi_unified_t wmi_handle, + uint32_t service_id) +{ + struct wmi_soc *soc = wmi_handle->soc; + + if (!soc->wmi_service_bitmap) { + WMI_LOGE("WMI service bit map is not saved yet\n"); + return false; + } + + return WMI_SERVICE_IS_ENABLED(soc->wmi_service_bitmap, + service_id); +} + +static inline void copy_ht_cap_info(uint32_t ev_target_cap, + struct wlan_psoc_target_capability_info *cap) +{ + cap->ht_cap_info |= ev_target_cap & ( + WMI_HT_CAP_ENABLED + | WMI_HT_CAP_HT20_SGI + | WMI_HT_CAP_DYNAMIC_SMPS + | WMI_HT_CAP_TX_STBC + | WMI_HT_CAP_TX_STBC_MASK_SHIFT + | WMI_HT_CAP_RX_STBC + | WMI_HT_CAP_RX_STBC_MASK_SHIFT + | WMI_HT_CAP_LDPC + | WMI_HT_CAP_L_SIG_TXOP_PROT + | WMI_HT_CAP_MPDU_DENSITY + | WMI_HT_CAP_MPDU_DENSITY_MASK_SHIFT + | WMI_HT_CAP_HT40_SGI + | WMI_HT_CAP_IBF_BFER); + if (ev_target_cap & WMI_HT_CAP_IBF_BFER) + cap->ht_cap_info |= WMI_HOST_HT_CAP_IBF_BFER; +} + +/** + * extract_service_ready_non_tlv() - extract service ready event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to received event buffer + * @param cap: pointer to hold target capability information extracted from even + * + * Return: 0 for success or error code + */ +static QDF_STATUS extract_service_ready_non_tlv(wmi_unified_t wmi_handle, + void *evt_buf, + struct wlan_psoc_target_capability_info *cap) +{ + wmi_service_ready_event *ev; + + ev = (wmi_service_ready_event *) evt_buf; + + cap->phy_capability = ev->phy_capability; + cap->max_frag_entry = ev->max_frag_entry; + cap->num_rf_chains = ev->num_rf_chains; + copy_ht_cap_info(ev->ht_cap_info, cap); + cap->vht_cap_info = ev->vht_cap_info; + cap->vht_supp_mcs = ev->vht_supp_mcs; + cap->hw_min_tx_power = ev->hw_min_tx_power; + cap->hw_max_tx_power = ev->hw_max_tx_power; + cap->sys_cap_info = ev->sys_cap_info; + cap->min_pkt_size_enable = ev->min_pkt_size_enable; + cap->max_bcn_ie_size = ev->max_bcn_ie_size; + cap->fw_version = ev->sw_version; + cap->fw_version_1 = ev->sw_version_1; + /* Following caps not received in older fw/hw + * Initialize it as zero(default). */ + cap->max_num_scan_channels = 0; + cap->max_supported_macs = 0; + cap->wmi_fw_sub_feat_caps = 0; + cap->txrx_chainmask = 0; + cap->default_dbs_hw_mode_index = 0; + cap->num_msdu_desc = 0; + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_fw_version_non_tlv() - extract fw version + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param fw_ver: Pointer to hold fw version + * + * Return: 0 for success or error code + */ +static QDF_STATUS extract_fw_version_non_tlv(wmi_unified_t wmi_handle, + void *evt_buf, struct wmi_host_fw_ver *fw_ver) +{ + wmi_service_ready_event *ev; + + ev = (wmi_service_ready_event *) evt_buf; + + fw_ver->sw_version = ev->sw_version; + fw_ver->sw_version_1 = ev->sw_version_1; + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_fw_abi_version_non_tlv() - extract fw abi version + * @wmi_handle: wmi handle + * @param evt_buf: Pointer to event buffer + * @param fw_ver: Pointer to hold fw abi version + * + * Return: 0 for success or error code + */ +static QDF_STATUS extract_fw_abi_version_non_tlv(wmi_unified_t wmi_handle, + void *evt_buf, struct wmi_host_fw_abi_ver *fw_ver) +{ + wmi_ready_event *ev; + + ev = (wmi_ready_event *) evt_buf; + + fw_ver->sw_version = ev->sw_version; + fw_ver->abi_version = ev->abi_version; + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_hal_reg_cap_non_tlv() - extract HAL registered capabilities + * @wmi_handle: wmi handle + * @param evt_buf: Pointer to event buffer + * @param cap: pointer to hold HAL reg capabilities + * + * Return: 0 for success or error code + */ +static QDF_STATUS extract_hal_reg_cap_non_tlv(wmi_unified_t wmi_handle, + void *evt_buf, + struct wlan_psoc_hal_reg_capability *cap) +{ + wmi_service_ready_event *ev; + u_int32_t wireless_modes_orig = 0; + + ev = (wmi_service_ready_event *) evt_buf; + + qdf_mem_copy(cap, &ev->hal_reg_capabilities, + sizeof(struct wlan_psoc_hal_reg_capability)); + + /* Convert REGDMN_MODE values sent by target to host internal + * WMI_HOST_REGDMN_MODE values. + * + * REGULATORY TODO : + * REGDMN_MODE_11AC_VHT*_2G values are not used by the + * host currently. Add this in the future if required. + */ + wireless_modes_orig = ev->hal_reg_capabilities.wireless_modes; + cap->wireless_modes = 0; + + if (wireless_modes_orig & REGDMN_MODE_11A) + cap->wireless_modes |= WMI_HOST_REGDMN_MODE_11A; + + if (wireless_modes_orig & REGDMN_MODE_TURBO) + cap->wireless_modes |= WMI_HOST_REGDMN_MODE_TURBO; + + if (wireless_modes_orig & REGDMN_MODE_11B) + cap->wireless_modes |= WMI_HOST_REGDMN_MODE_11B; + + if (wireless_modes_orig & REGDMN_MODE_PUREG) + cap->wireless_modes |= WMI_HOST_REGDMN_MODE_PUREG; + + if (wireless_modes_orig & REGDMN_MODE_11G) + cap->wireless_modes |= WMI_HOST_REGDMN_MODE_11G; + + if (wireless_modes_orig & REGDMN_MODE_108G) + cap->wireless_modes |= WMI_HOST_REGDMN_MODE_108G; + + if (wireless_modes_orig & REGDMN_MODE_108A) + cap->wireless_modes |= WMI_HOST_REGDMN_MODE_108A; + + if (wireless_modes_orig & REGDMN_MODE_XR) + cap->wireless_modes |= WMI_HOST_REGDMN_MODE_XR; + + if (wireless_modes_orig & REGDMN_MODE_11A_HALF_RATE) + cap->wireless_modes |= WMI_HOST_REGDMN_MODE_11A_HALF_RATE; + + if (wireless_modes_orig & REGDMN_MODE_11A_QUARTER_RATE) + cap->wireless_modes |= WMI_HOST_REGDMN_MODE_11A_QUARTER_RATE; + + if (wireless_modes_orig & REGDMN_MODE_11NG_HT20) + cap->wireless_modes |= WMI_HOST_REGDMN_MODE_11NG_HT20; + + if (wireless_modes_orig & REGDMN_MODE_11NA_HT20) + cap->wireless_modes |= WMI_HOST_REGDMN_MODE_11NA_HT20; + + if (wireless_modes_orig & REGDMN_MODE_11NG_HT40PLUS) + cap->wireless_modes |= WMI_HOST_REGDMN_MODE_11NG_HT40PLUS; + + if (wireless_modes_orig & REGDMN_MODE_11NG_HT40MINUS) + cap->wireless_modes |= WMI_HOST_REGDMN_MODE_11NG_HT40MINUS; + + if (wireless_modes_orig & REGDMN_MODE_11NA_HT40PLUS) + cap->wireless_modes |= WMI_HOST_REGDMN_MODE_11NA_HT40PLUS; + + if (wireless_modes_orig & REGDMN_MODE_11NA_HT40MINUS) + cap->wireless_modes |= WMI_HOST_REGDMN_MODE_11NA_HT40MINUS; + + if (wireless_modes_orig & REGDMN_MODE_11AC_VHT20) + cap->wireless_modes |= WMI_HOST_REGDMN_MODE_11AC_VHT20; + + if (wireless_modes_orig & REGDMN_MODE_11AC_VHT40PLUS) + cap->wireless_modes |= WMI_HOST_REGDMN_MODE_11AC_VHT40PLUS; + + if (wireless_modes_orig & REGDMN_MODE_11AC_VHT40MINUS) + cap->wireless_modes |= WMI_HOST_REGDMN_MODE_11AC_VHT40MINUS; + + if (wireless_modes_orig & REGDMN_MODE_11AC_VHT80) + cap->wireless_modes |= WMI_HOST_REGDMN_MODE_11AC_VHT80; + + if (wireless_modes_orig & REGDMN_MODE_11AC_VHT160) + cap->wireless_modes |= WMI_HOST_REGDMN_MODE_11AC_VHT160; + + if (wireless_modes_orig & REGDMN_MODE_11AC_VHT80_80) + cap->wireless_modes |= WMI_HOST_REGDMN_MODE_11AC_VHT80_80; + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_host_mem_req_non_tlv() - Extract host memory request event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param num_entries: pointer to hold number of entries requested + * + * Return: Number of entries requested + */ +static host_mem_req *extract_host_mem_req_non_tlv(wmi_unified_t wmi_handle, + void *evt_buf, uint8_t *num_entries) +{ + wmi_service_ready_event *ev; + + ev = (wmi_service_ready_event *) evt_buf; + + *num_entries = ev->num_mem_reqs; + return (host_mem_req *)ev->mem_reqs; +} + +/** + * save_fw_version_in_service_ready_non_tlv() - Save fw version in service + * ready function + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * + * Return: 0 for success or error code + */ +static QDF_STATUS save_fw_version_in_service_ready_non_tlv( + wmi_unified_t wmi_handle, + void *evt_buf) +{ + /* Version check and exchange is not present in non-tlv implementation*/ + return QDF_STATUS_SUCCESS; +} + +/** + * ready_check_and_update_fw_version_non_tlv() - Ready and fw version check + * function + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * + * Return: 0 for success or error code + */ +static QDF_STATUS ready_check_and_update_fw_version_non_tlv( + wmi_unified_t wmi_handle, + void *evt_buf) +{ + /* Version check and exchange is not present in non-tlv implementation*/ + return QDF_STATUS_SUCCESS; +} + +/** + * ready_extract_init_status_non_tlv() - Extract init status from ready event + * @wmi_handle: wmi handle + * @param evt_buf: Pointer to event buffer + * + * Return: ready status + */ +static uint32_t ready_extract_init_status_non_tlv(wmi_unified_t wmi_hdl, + void *evt_buf) +{ + wmi_ready_event *ev = (wmi_ready_event *) evt_buf; + qdf_print("Version = %d %d status = %d\n", ev->sw_version, + ev->abi_version, ev->status); + return ev->status; +} + +/** + * ready_extract_mac_addr_non_tlv() - extract mac address from ready event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param macaddr: Pointer to hold MAC address + * + * Return: 0 for success or error code + */ +static QDF_STATUS ready_extract_mac_addr_non_tlv(wmi_unified_t wmi_hdl, + void *evt_buf, + uint8_t *macaddr) +{ + wmi_ready_event *ev = (wmi_ready_event *) evt_buf; + + WMI_MAC_ADDR_TO_CHAR_ARRAY(&ev->mac_addr, macaddr); + return QDF_STATUS_SUCCESS; +} + +/** + * extract_ready_params_non_tlv() - Extract data from ready event apart from + * status, macaddr and version. + * @wmi_handle: Pointer to WMI handle. + * @evt_buf: Pointer to Ready event buffer. + * @ev_param: Pointer to host defined struct to copy the data from event. + * + * Return: QDF_STATUS_SUCCESS on success. + */ +static QDF_STATUS extract_ready_event_params_non_tlv(wmi_unified_t wmi_handle, + void *evt_buf, struct wmi_host_ready_ev_param *ev_param) +{ + wmi_ready_event *ev = (wmi_ready_event *) evt_buf; + + ev_param->status = ev->status; + ev_param->num_dscp_table = ev->num_dscp_table; + if (ev->agile_capability) + ev_param->agile_capability = true; + else + ev_param->agile_capability = false; + /* Following params not present in non-TLV target. Set Defaults */ + ev_param->num_extra_mac_addr = 0; + ev_param->num_total_peer = 0; + ev_param->num_extra_peer = 0; + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_dbglog_data_len_non_tlv() - extract debuglog data length + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * + * Return: length + */ +static uint8_t *extract_dbglog_data_len_non_tlv(wmi_unified_t wmi_handle, + void *evt_buf, + uint32_t *len) +{ + /*Len is already valid from event. No need to change it */ + return evt_buf; +} + +/** + * extract_wds_addr_event_non_tlv() - extract wds address from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param wds_ev: Pointer to hold wds address + * + * Return: 0 for success or error code + */ +static QDF_STATUS extract_wds_addr_event_non_tlv(wmi_unified_t wmi_handle, + void *evt_buf, + uint16_t len, wds_addr_event_t *wds_ev) +{ + wmi_wds_addr_event_t *ev = (wmi_wds_addr_event_t *) evt_buf; + int i; + +#ifdef BIG_ENDIAN_HOST + { + uint8_t *datap = (uint8_t *) ev; + /*Skip swapping the first long word*/ + datap += sizeof(uint32_t); + for (i = 0; i < ((len / sizeof(uint32_t))-1); + i++, datap += sizeof(uint32_t)) + *(uint32_t *)datap = + qdf_le32_to_cpu(*(uint32_t *)datap); + } +#endif + + qdf_mem_copy(wds_ev->event_type, ev->event_type, + sizeof(wds_ev->event_type)); + for (i = 0; i < 4; i++) { + wds_ev->peer_mac[i] = + ((u_int8_t *)&(ev->peer_mac.mac_addr31to0))[i]; + wds_ev->dest_mac[i] = + ((u_int8_t *)&(ev->dest_mac.mac_addr31to0))[i]; + } + for (i = 0; i < 2; i++) { + wds_ev->peer_mac[4+i] = + ((u_int8_t *)&(ev->peer_mac.mac_addr47to32))[i]; + wds_ev->dest_mac[4+i] = + ((u_int8_t *)&(ev->dest_mac.mac_addr47to32))[i]; + } + /* vdev_id is not available in legacy. It is required only to get + * pdev, hence setting it to zero as legacy as only one pdev. + */ + wds_ev->vdev_id = 0; + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_dcs_interference_type_non_tlv() - extract dcs interference type + * from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param param: Pointer to hold dcs interference param + * + * Return: 0 for success or error code + */ +static QDF_STATUS extract_dcs_interference_type_non_tlv( + wmi_unified_t wmi_handle, + void *evt_buf, struct wmi_host_dcs_interference_param *param) +{ + wmi_dcs_interference_event_t *ev = + (wmi_dcs_interference_event_t *) evt_buf; + + param->interference_type = ev->interference_type; + param->pdev_id = WMI_NON_TLV_DEFAULT_PDEV_ID; + + return QDF_STATUS_SUCCESS; +} + +/* + * extract_dcs_cw_int_non_tlv() - extract dcs cw interference from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param cw_int: Pointer to hold cw interference + * + * Return: 0 for success or error code + */ +static QDF_STATUS extract_dcs_cw_int_non_tlv(wmi_unified_t wmi_handle, + void *evt_buf, + wmi_host_ath_dcs_cw_int *cw_int) +{ + wmi_dcs_interference_event_t *ev = + (wmi_dcs_interference_event_t *) evt_buf; + + qdf_mem_copy(cw_int, &ev->int_event.cw_int, sizeof(*cw_int)); + return QDF_STATUS_SUCCESS; +} + +/** + * extract_dcs_im_tgt_stats_non_tlv() - extract dcs im target stats from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param wlan_stat: Pointer to hold wlan stats + * + * Return: 0 for success or error code + */ +static QDF_STATUS extract_dcs_im_tgt_stats_non_tlv(wmi_unified_t wmi_handle, + void *evt_buf, + wmi_host_dcs_im_tgt_stats_t *wlan_stat) +{ + wmi_dcs_interference_event_t *ev = + (wmi_dcs_interference_event_t *) evt_buf; + + qdf_mem_copy(wlan_stat, &ev->int_event.wlan_stat, + sizeof(wmi_host_dcs_im_tgt_stats_t)); + return QDF_STATUS_SUCCESS; +} + +/** + * extract_fips_event_data_non_tlv() - extract fips event data + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param param: pointer FIPS event params + * + * Return: 0 for success or error code + */ +static QDF_STATUS extract_fips_event_data_non_tlv(wmi_unified_t wmi_handle, + void *evt_buf, + struct wmi_host_fips_event_param *param) +{ + wmi_pdev_fips_event *event = (wmi_pdev_fips_event *)evt_buf; + + param->pdev_id = WMI_NON_TLV_DEFAULT_PDEV_ID; +#ifdef BIG_ENDIAN_HOST + { + /*****************LE to BE conversion*************************/ + + /* Assigning unaligned space to copy the data */ + unsigned char *data_unaligned = qdf_mem_malloc( + (sizeof(u_int8_t)*event->data_len + FIPS_ALIGN)); + + u_int8_t *data_aligned = NULL; + int c; + + /* Checking if kmalloc does successful allocation */ + if (data_unaligned == NULL) + return QDF_STATUS_E_FAILURE; + + /* Checking if space is alligned */ + if (!FIPS_IS_ALIGNED(data_unaligned, FIPS_ALIGN)) { + /* align the data space */ + data_aligned = + (u_int8_t *)FIPS_ALIGNTO(data_unaligned, FIPS_ALIGN); + } else { + data_aligned = (u_int8_t *)data_unaligned; + } + + /* memset and copy content from data to data aligned */ + OS_MEMSET(data_aligned, 0, event->data_len); + OS_MEMCPY(data_aligned, event->data, event->data_len); + /* Endianness to LE */ + for (c = 0; c < event->data_len/4; c++) { + *((u_int32_t *)data_aligned+c) = + qdf_le32_to_cpu(*((u_int32_t *)data_aligned+c)); + } + + /* Copy content to event->data */ + OS_MEMCPY(event->data, data_aligned, event->data_len); + + /* clean up allocated space */ + qdf_mem_free(data_unaligned); + data_aligned = NULL; + data_unaligned = NULL; + + /*************************************************************/ + } +#endif + param->data = event->data; + param->data_len = event->data_len; + param->error_status = event->error_status; + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_vdev_start_resp_non_tlv() - extract vdev start response + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param vdev_rsp: Pointer to hold vdev response + * + * Return: 0 for success or error code + */ +static QDF_STATUS extract_vdev_start_resp_non_tlv(wmi_unified_t wmi_handle, + void *evt_buf, + wmi_host_vdev_start_resp *vdev_rsp) +{ + wmi_vdev_start_response_event *ev = + (wmi_vdev_start_response_event *) evt_buf; + + qdf_mem_zero(vdev_rsp, sizeof(*vdev_rsp)); + + vdev_rsp->vdev_id = ev->vdev_id; + vdev_rsp->requestor_id = ev->requestor_id; + vdev_rsp->resp_type = ev->resp_type; + vdev_rsp->status = ev->status; + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_tbttoffset_num_vdevs_non_tlv() - extract tbtt offset num vdevs + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param num_vdev: Pointer to hold num vdev + * + * Return: 0 for success or error code + */ +static QDF_STATUS extract_tbttoffset_num_vdevs_non_tlv(void *wmi_hdl, + void *evt_buf, + uint32_t *num_vdevs) +{ + wmi_tbtt_offset_event *tbtt_offset_event = + (wmi_tbtt_offset_event *)evt_buf; + uint32_t vdev_map; + + vdev_map = tbtt_offset_event->vdev_map; + *num_vdevs = wmi_vdev_map_to_num_vdevs(vdev_map); + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_tbttoffset_update_params_non_tlv() - extract tbtt offset update param + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param idx: Index referring to a vdev + * @param tbtt_param: Pointer to tbttoffset event param + * + * Return: 0 for success or error code + */ +static QDF_STATUS extract_tbttoffset_update_params_non_tlv(void *wmi_hdl, + void *evt_buf, uint8_t idx, + struct tbttoffset_params *tbtt_param) +{ + wmi_tbtt_offset_event *tbtt_offset_event = + (wmi_tbtt_offset_event *)evt_buf; + uint32_t vdev_map; + + vdev_map = tbtt_offset_event->vdev_map; + + tbtt_param->vdev_id = wmi_vdev_map_to_vdev_id(vdev_map, idx); + if (tbtt_param->vdev_id == WLAN_INVALID_VDEV_ID) + return QDF_STATUS_E_INVAL; + tbtt_param->tbttoffset = + tbtt_offset_event->tbttoffset_list[tbtt_param->vdev_id]; + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_mgmt_rx_params_non_tlv() - extract management rx params from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param hdr: Pointer to hold header + * @param bufp: Pointer to hold pointer to rx param buffer + * + * Return: 0 for success or error code + */ +static QDF_STATUS extract_mgmt_rx_params_non_tlv(wmi_unified_t wmi_handle, + void *evt_buf, + struct mgmt_rx_event_params *hdr, uint8_t **bufp) +{ + wmi_mgmt_rx_event *ev = (wmi_mgmt_rx_event *)evt_buf; + + hdr->channel = ev->hdr.channel; + hdr->snr = ev->hdr.snr; + hdr->rssi = ev->hdr.snr; + hdr->rate = ev->hdr.rate; + hdr->phy_mode = ev->hdr.phy_mode; + hdr->buf_len = ev->hdr.buf_len; + hdr->status = ev->hdr.status; + hdr->pdev_id = WMI_NON_TLV_DEFAULT_PDEV_ID; + qdf_mem_copy(hdr->rssi_ctl, ev->hdr.rssi_ctl, sizeof(hdr->rssi_ctl)); + *bufp = ev->bufp; + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_vdev_stopped_param_non_tlv() - extract vdev stop param from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param vdev_id: Pointer to hold vdev identifier + * + * Return: 0 for success or error code + */ +static QDF_STATUS extract_vdev_stopped_param_non_tlv(wmi_unified_t wmi_handle, + void *evt_buf, + uint32_t *vdev_id) +{ + wmi_vdev_stopped_event *event = (wmi_vdev_stopped_event *)evt_buf; + + *vdev_id = event->vdev_id; + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_vdev_roam_param_non_tlv() - extract vdev roam param from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param param: Pointer to hold roam param + * + * Return: 0 for success or error code + */ +static QDF_STATUS extract_vdev_roam_param_non_tlv(wmi_unified_t wmi_handle, + void *evt_buf, + wmi_host_roam_event *param) +{ + wmi_roam_event *evt = (wmi_roam_event *)evt_buf; + + qdf_mem_zero(param, sizeof(*param)); + param->vdev_id = evt->vdev_id; + param->reason = evt->reason; + return QDF_STATUS_SUCCESS; +} + +/** + * extract_vdev_scan_ev_param_non_tlv() - extract vdev scan param from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param param: Pointer to hold vdev scan param + * + * Return: 0 for success or error code + */ +static QDF_STATUS extract_vdev_scan_ev_param_non_tlv(wmi_unified_t wmi_handle, + void *evt_buf, + struct scan_event *param) +{ + wmi_scan_event *evt = (wmi_scan_event *)evt_buf; + + qdf_mem_zero(param, sizeof(*param)); + switch (evt->event) { + case WMI_SCAN_EVENT_STARTED: + param->type = SCAN_EVENT_TYPE_STARTED; + break; + case WMI_SCAN_EVENT_COMPLETED: + param->type = SCAN_EVENT_TYPE_COMPLETED; + break; + case WMI_SCAN_EVENT_BSS_CHANNEL: + param->type = SCAN_EVENT_TYPE_BSS_CHANNEL; + break; + case WMI_SCAN_EVENT_FOREIGN_CHANNEL: + param->type = SCAN_EVENT_TYPE_FOREIGN_CHANNEL; + break; + case WMI_SCAN_EVENT_DEQUEUED: + param->type = SCAN_EVENT_TYPE_DEQUEUED; + break; + case WMI_SCAN_EVENT_PREEMPTED: + param->type = SCAN_EVENT_TYPE_PREEMPTED; + break; + case WMI_SCAN_EVENT_START_FAILED: + param->type = SCAN_EVENT_TYPE_START_FAILED; + break; + case WMI_SCAN_EVENT_RESTARTED: + param->type = SCAN_EVENT_TYPE_RESTARTED; + break; + case WMI_HOST_SCAN_EVENT_FOREIGN_CHANNEL_EXIT: + param->type = SCAN_EVENT_TYPE_FOREIGN_CHANNEL_EXIT; + break; + case WMI_SCAN_EVENT_INVALID: + param->type = SCAN_EVENT_TYPE_INVALID; + break; + case WMI_SCAN_EVENT_MAX: + default: + param->type = SCAN_EVENT_TYPE_MAX; + break; + }; + + switch (evt->reason) { + case WMI_SCAN_REASON_NONE: + param->reason = SCAN_REASON_NONE; + break; + case WMI_SCAN_REASON_COMPLETED: + param->reason = SCAN_REASON_COMPLETED; + break; + case WMI_SCAN_REASON_CANCELLED: + param->reason = SCAN_REASON_CANCELLED; + break; + case WMI_SCAN_REASON_PREEMPTED: + param->reason = SCAN_REASON_PREEMPTED; + break; + case WMI_SCAN_REASON_TIMEDOUT: + param->reason = SCAN_REASON_TIMEDOUT; + break; + case WMI_SCAN_REASON_INTERNAL_FAILURE: + param->reason = SCAN_REASON_INTERNAL_FAILURE; + break; + case WMI_SCAN_REASON_MAX: + default: + param->reason = SCAN_REASON_MAX; + break; + }; + + param->chan_freq = evt->channel_freq; + param->requester = evt->requestor; + param->scan_id = evt->scan_id; + param->vdev_id = evt->vdev_id; + return QDF_STATUS_SUCCESS; +} + +/** + * extract_mu_ev_param_non_tlv() - extract mu param from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param param: Pointer to hold mu report + * + * Return: 0 for success or error code + */ +static QDF_STATUS extract_mu_ev_param_non_tlv(wmi_unified_t wmi_handle, void *evt_buf, + wmi_host_mu_report_event *param) +{ + wmi_mu_report_event *event = (wmi_mu_report_event *)evt_buf; + + param->mu_request_id = event->mu_request_id; + param->status_reason = event->status_reason; + qdf_mem_copy(param->total_mu, event->total_mu, sizeof(param->total_mu)); + param->num_active_bssid = event->num_active_bssid; + qdf_mem_copy(param->hidden_node_mu, event->hidden_node_mu, + sizeof(param->hidden_node_mu)); + param->num_TA_entries = event->num_TA_entries; + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_mu_db_entry_non_tlv() - extract mu db entry from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param profile_data: Pointer to hold mu_db_entry + * + * Return: 0 for success or error code + */ +static QDF_STATUS extract_mu_db_entry_non_tlv(wmi_unified_t wmi_handle, + void *evt_buf, uint8_t idx, + wmi_host_mu_db_entry *db_entry) +{ + wmi_mu_report_event *event = (wmi_mu_report_event *)evt_buf; + + if (idx > event->num_TA_entries) + return QDF_STATUS_E_INVAL; + + qdf_mem_copy(db_entry, &event->mu_entry[idx], + sizeof(wmi_host_mu_db_entry)); + + return QDF_STATUS_SUCCESS; +} + + +/** + * extract_mumimo_tx_count_ev_param_non_tlv() - extract mumimo tx count from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param param: Pointer to hold mu report + * + * Return: 0 for success or error code + */ +static QDF_STATUS extract_mumimo_tx_count_ev_param_non_tlv(wmi_unified_t wmi_handle, + void *evt_buf, wmi_host_peer_txmu_cnt_event *param) +{ + wmi_peer_txmu_cnt_event *event = (wmi_peer_txmu_cnt_event *)evt_buf; + + param->tx_mu_transmitted = event->tx_mu_transmitted; + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_peer_gid_userpos_list_ev_param_non_tlv() - extract gid user position + * from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param param: Pointer to hold peer user position list + * + * Return: 0 for success or error code + */ +static QDF_STATUS extract_peer_gid_userpos_list_ev_param_non_tlv( + wmi_unified_t wmi_handle, + void *evt_buf, + wmi_host_peer_gid_userpos_list_event *param) +{ + wmi_peer_gid_userpos_list_event *event = + (wmi_peer_gid_userpos_list_event *)evt_buf; + + qdf_mem_copy(param->usr_list, event->usr_list, sizeof(param->usr_list)); + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_pdev_caldata_version_check_ev_param_non_tlv() - extract caldata from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param param: Pointer to hold peer caldata version data + * + * Return: 0 for success or error code + */ +static QDF_STATUS extract_pdev_caldata_version_check_ev_param_non_tlv( + wmi_unified_t wmi_handle, + void *evt_buf, + wmi_host_pdev_check_cal_version_event *param) +{ + wmi_pdev_check_cal_version_event *event = + (wmi_pdev_check_cal_version_event *)evt_buf; + + param->software_cal_version = event->software_cal_version; + param->board_cal_version = event->board_cal_version; + param->cal_ok = event->cal_ok; + + if (event->board_mcn_detail[WMI_BOARD_MCN_STRING_MAX_SIZE] != '\0') + event->board_mcn_detail[WMI_BOARD_MCN_STRING_MAX_SIZE] = '\0'; + + WMI_HOST_IF_MSG_COPY_CHAR_ARRAY(param->board_mcn_detail, + event->board_mcn_detail, WMI_BOARD_MCN_STRING_BUF_SIZE); + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_pdev_tpc_config_ev_param_non_tlv() - extract pdev tpc configuration + * param from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param param: Pointer to hold tpc configuration + * + * Return: 0 for success or error code + */ +static QDF_STATUS extract_pdev_tpc_config_ev_param_non_tlv(wmi_unified_t wmi_handle, + void *evt_buf, + wmi_host_pdev_tpc_config_event *param) +{ + wmi_pdev_tpc_config_event *event = (wmi_pdev_tpc_config_event *)evt_buf; + + param->pdev_id = WMI_NON_TLV_DEFAULT_PDEV_ID; + param->regDomain = event->regDomain; + param->chanFreq = event->chanFreq; + param->phyMode = event->phyMode; + param->twiceAntennaReduction = event->twiceAntennaReduction; + param->twiceMaxRDPower = event->twiceMaxRDPower; + param->powerLimit = event->powerLimit; + param->rateMax = event->rateMax; + param->numTxChain = event->numTxChain; + param->ctl = event->ctl; + param->flags = event->flags; + + qdf_mem_copy(param->maxRegAllowedPower, event->maxRegAllowedPower, + sizeof(param->maxRegAllowedPower)); + qdf_mem_copy(param->maxRegAllowedPowerAGCDD, + event->maxRegAllowedPowerAGCDD, + sizeof(param->maxRegAllowedPowerAGCDD)); + qdf_mem_copy(param->maxRegAllowedPowerAGSTBC, + event->maxRegAllowedPowerAGSTBC, + sizeof(param->maxRegAllowedPowerAGSTBC)); + qdf_mem_copy(param->maxRegAllowedPowerAGTXBF, + event->maxRegAllowedPowerAGTXBF, + sizeof(param->maxRegAllowedPowerAGTXBF)); + qdf_mem_copy(param->ratesArray, event->ratesArray, + sizeof(param->ratesArray)); + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_nfcal_power_ev_param_non_tlv() - extract noise floor calibration + * power param from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param param: Pointer to hold nf cal power param + * + * Return: 0 for success or error code + */ +static QDF_STATUS extract_nfcal_power_ev_param_non_tlv(wmi_unified_t wmi_handle, + void *evt_buf, + wmi_host_pdev_nfcal_power_all_channels_event *param) +{ + wmi_pdev_nfcal_power_all_channels_event *event = + (wmi_pdev_nfcal_power_all_channels_event *)evt_buf; + + if ((sizeof(event->nfdBr) == sizeof(param->nfdbr)) && + (sizeof(event->nfdBm) == sizeof(param->nfdbm)) && + (sizeof(event->freqNum) == sizeof(param->freqnum))) { + qdf_mem_copy(param->nfdbr, event->nfdBr, sizeof(param->nfdbr)); + qdf_mem_copy(param->nfdbm, event->nfdBm, sizeof(param->nfdbm)); + qdf_mem_copy(param->freqnum, event->freqNum, + sizeof(param->freqnum)); + } else { + WMI_LOGE("%s: %d Failed copy out of bound memory!\n", __func__, __LINE__); + return QDF_STATUS_E_RESOURCES; + } + + param->pdev_id = WMI_NON_TLV_DEFAULT_PDEV_ID; + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_pdev_tpc_ev_param_non_tlv() - extract tpc param from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param param: Pointer to hold tpc param + * + * Return: 0 for success or error code + */ +static QDF_STATUS extract_pdev_tpc_ev_param_non_tlv(wmi_unified_t wmi_handle, + void *evt_buf, + wmi_host_pdev_tpc_event *param) +{ + wmi_pdev_tpc_event *event = (wmi_pdev_tpc_event *)evt_buf; + + qdf_mem_copy(param->tpc, event->tpc, sizeof(param->tpc)); + param->pdev_id = WMI_NON_TLV_DEFAULT_PDEV_ID; + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_pdev_generic_buffer_ev_param_non_tlv() - extract pdev generic buffer + * from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param param: Pointer to generic buffer param + * + * Return: 0 for success or error code + */ +static QDF_STATUS extract_pdev_generic_buffer_ev_param_non_tlv( + wmi_unified_t wmi_handle, void *evt_buf, + wmi_host_pdev_generic_buffer_event *param) +{ + wmi_pdev_generic_buffer_event *event = + (wmi_pdev_generic_buffer_event *)evt_buf; + + param->buf_type = event->buf_type; + param->frag_id = event->frag_id; + param->more_frag = event->more_frag; + param->buf_len = event->buf_len; + + qdf_mem_copy(param->buf_info, event->buf_info, event->buf_len); + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_gpio_input_ev_param_non_tlv() - extract gpio input param from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param gpio_num: Pointer to hold gpio number + * + * Return: 0 for success or error code + */ +static QDF_STATUS extract_gpio_input_ev_param_non_tlv(wmi_unified_t wmi_handle, + void *evt_buf, uint32_t *gpio_num) +{ + wmi_gpio_input_event *ev = (wmi_gpio_input_event *) evt_buf; + + *gpio_num = ev->gpio_num; + return QDF_STATUS_SUCCESS; +} + +/** + * extract_pdev_reserve_ast_ev_param_non_tlv() - extract reserve ast entry + * param from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param result: Pointer to hold reserve ast entry param + * + * Return: 0 for success or error code + */ +static QDF_STATUS extract_pdev_reserve_ast_ev_param_non_tlv( + wmi_unified_t wmi_handle, + void *evt_buf, struct wmi_host_proxy_ast_reserve_param *param) +{ + wmi_pdev_reserve_ast_entry_event *ev = + (wmi_pdev_reserve_ast_entry_event *) evt_buf; + + param->result = ev->result; + param->pdev_id = WMI_NON_TLV_DEFAULT_PDEV_ID; + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_swba_num_vdevs_non_tlv() - extract swba num vdevs from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param num_vdevs: Pointer to hold num vdevs + * + * Return: 0 for success or error code + */ +static QDF_STATUS extract_swba_num_vdevs_non_tlv(wmi_unified_t wmi_handle, + void *evt_buf, + uint32_t *num_vdevs) +{ + wmi_host_swba_event *swba_event = (wmi_host_swba_event *)evt_buf; + uint32_t vdev_map; + + vdev_map = swba_event->vdev_map; + *num_vdevs = wmi_vdev_map_to_num_vdevs(vdev_map); + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_swba_tim_info_non_tlv() - extract swba tim info from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param idx: Index to bcn info + * @param tim_info: Pointer to hold tim info + * + * Return: 0 for success or error code + */ +static QDF_STATUS extract_swba_tim_info_non_tlv(wmi_unified_t wmi_handle, + void *evt_buf, + uint32_t idx, wmi_host_tim_info *tim_info) +{ + wmi_host_swba_event *swba_event = (wmi_host_swba_event *)evt_buf; + wmi_bcn_info *bcn_info; + uint32_t vdev_map; + + bcn_info = &swba_event->bcn_info[idx]; + vdev_map = swba_event->vdev_map; + + tim_info->vdev_id = wmi_vdev_map_to_vdev_id(vdev_map, idx); + if (tim_info->vdev_id == WLAN_INVALID_VDEV_ID) + return QDF_STATUS_E_INVAL; + tim_info->tim_len = bcn_info->tim_info.tim_len; + tim_info->tim_mcast = bcn_info->tim_info.tim_mcast; + qdf_mem_copy(tim_info->tim_bitmap, bcn_info->tim_info.tim_bitmap, + (sizeof(uint32_t) * WMI_TIM_BITMAP_ARRAY_SIZE)); + tim_info->tim_changed = bcn_info->tim_info.tim_changed; + tim_info->tim_num_ps_pending = bcn_info->tim_info.tim_num_ps_pending; + return QDF_STATUS_SUCCESS; +} + +/** + * extract_swba_noa_info_non_tlv() - extract swba NoA information from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param idx: Index to bcn info + * @param p2p_desc: Pointer to hold p2p NoA info + * + * Return: 0 for success or error code + */ +static QDF_STATUS extract_swba_noa_info_non_tlv(wmi_unified_t wmi_handle, + void *evt_buf, + uint32_t idx, wmi_host_p2p_noa_info *p2p_desc) +{ + wmi_host_swba_event *swba_event = (wmi_host_swba_event *)evt_buf; + wmi_p2p_noa_info *p2p_noa_info; + wmi_bcn_info *bcn_info; + uint8_t i = 0; + uint32_t vdev_map; + + bcn_info = &swba_event->bcn_info[idx]; + vdev_map = swba_event->vdev_map; + p2p_noa_info = &bcn_info->p2p_noa_info; + + p2p_desc->vdev_id = wmi_vdev_map_to_vdev_id(vdev_map, idx); + if (p2p_desc->vdev_id == WLAN_INVALID_VDEV_ID) + return QDF_STATUS_E_INVAL; + p2p_desc->modified = false; + p2p_desc->num_descriptors = 0; + if (WMI_UNIFIED_NOA_ATTR_IS_MODIFIED(p2p_noa_info)) { + p2p_desc->modified = true; + p2p_desc->index = + (uint8_t) WMI_UNIFIED_NOA_ATTR_INDEX_GET(p2p_noa_info); + p2p_desc->oppPS = + (uint8_t) WMI_UNIFIED_NOA_ATTR_OPP_PS_GET(p2p_noa_info); + p2p_desc->ctwindow = + (uint8_t) WMI_UNIFIED_NOA_ATTR_CTWIN_GET(p2p_noa_info); + p2p_desc->num_descriptors = + (uint8_t) WMI_UNIFIED_NOA_ATTR_NUM_DESC_GET(p2p_noa_info); + + for (i = 0; i < p2p_desc->num_descriptors; i++) { + p2p_desc->noa_descriptors[i].type_count = + (uint8_t) p2p_noa_info->noa_descriptors[i]. + type_count; + p2p_desc->noa_descriptors[i].duration = + p2p_noa_info->noa_descriptors[i].duration; + p2p_desc->noa_descriptors[i].interval = + p2p_noa_info->noa_descriptors[i].interval; + p2p_desc->noa_descriptors[i].start_time = + p2p_noa_info->noa_descriptors[i].start_time; + } + } + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_peer_sta_ps_statechange_ev_non_tlv() - extract peer sta ps state + * from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param ev: Pointer to hold peer param and ps state + * + * Return: 0 for success or error code + */ +static QDF_STATUS extract_peer_sta_ps_statechange_ev_non_tlv(wmi_unified_t wmi_handle, + void *evt_buf, wmi_host_peer_sta_ps_statechange_event *ev) +{ + wmi_peer_sta_ps_statechange_event *event = + (wmi_peer_sta_ps_statechange_event *)evt_buf; + + WMI_MAC_ADDR_TO_CHAR_ARRAY(&event->peer_macaddr, ev->peer_macaddr); + ev->peer_ps_state = event->peer_ps_state; + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_peer_sta_kickout_ev_non_tlv() - extract peer sta kickout event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param ev: Pointer to hold peer param + * + * Return: 0 for success or error code + */ +static QDF_STATUS extract_peer_sta_kickout_ev_non_tlv(wmi_unified_t wmi_handle, + void *evt_buf, + wmi_host_peer_sta_kickout_event *ev) +{ + wmi_peer_sta_kickout_event *kickout_event = + (wmi_peer_sta_kickout_event *)evt_buf; + + WMI_MAC_ADDR_TO_CHAR_ARRAY(&kickout_event->peer_macaddr, + ev->peer_macaddr); + + /**Following not available in legacy wmi*/ + ev->reason = 0; + ev->rssi = 0; + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_peer_ratecode_list_ev_non_tlv() - extract peer ratecode from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param peer_mac: Pointer to hold peer mac address + * @param rate_cap: Pointer to hold ratecode + * + * Return: 0 for success or error code + */ +static QDF_STATUS extract_peer_ratecode_list_ev_non_tlv(wmi_unified_t wmi_handle, + void *evt_buf, + uint8_t *peer_mac, wmi_sa_rate_cap *rate_cap) +{ + wmi_peer_ratecode_list_event_t *rate_event = + (wmi_peer_ratecode_list_event_t *)evt_buf; + int i, htindex, j; + uint8_t shift = 0; + + WMI_MAC_ADDR_TO_CHAR_ARRAY(&rate_event->peer_macaddr, peer_mac); + + htindex = 0; + rate_cap->ratecount[0] = + ((rate_event->peer_rate_info.ratecount) & SA_MASK_BYTE); + rate_cap->ratecount[1] = + ((rate_event->peer_rate_info.ratecount >> 8) & SA_MASK_BYTE); + rate_cap->ratecount[2] = + ((rate_event->peer_rate_info.ratecount >> 16) & SA_MASK_BYTE); + rate_cap->ratecount[3] = + ((rate_event->peer_rate_info.ratecount >> 24) & SA_MASK_BYTE); + + if (rate_cap->ratecount[0]) { + for (i = 0; i < SA_MAX_LEGACY_RATE_DWORDS; i++) { + for (j = 0; j < SA_BYTES_IN_DWORD; j++) { + rate_cap->ratecode_legacy[htindex] = + ((rate_event->peer_rate_info.ratecode_legacy[i] + >> (8*j)) & SA_MASK_BYTE); + htindex++; + } + } + } + + htindex = 0; + for (i = 0; i < SA_MAX_HT_RATE_DWORDS; i++) { + for (j = 0; j < SA_BYTES_IN_DWORD; j++) { + shift = (8*j); + rate_cap->ratecode_20[htindex] = + ((rate_event->peer_rate_info.ratecode_20[i] + >> (shift)) & SA_MASK_BYTE); + rate_cap->ratecode_40[htindex] = + ((rate_event->peer_rate_info.ratecode_40[i] + >> (shift)) & SA_MASK_BYTE); + rate_cap->ratecode_80[htindex] = + ((rate_event->peer_rate_info.ratecode_80[i] + >> (shift)) & SA_MASK_BYTE); + htindex++; + } + } + return QDF_STATUS_SUCCESS; +} + +/** + * extract_rtt_header_internal_non_tlv() - extract internal rtt header from + * event + * @param ev: pointer to internal rtt event header + * @param hdr: Pointer to received rtt event header + * + * Return: None + */ +static void extract_rtt_header_internal_non_tlv(wmi_host_rtt_event_hdr *ev, + wmi_rtt_event_hdr *hdr) +{ + ev->req_id = WMI_RTT_REQ_ID_GET(hdr->req_id); + ev->result = (hdr->req_id & 0xffff0000) >> 16; + ev->meas_type = WMI_RTT_REPORT_MEAS_TYPE_GET(hdr->req_id); + ev->report_type = WMI_RTT_REPORT_REPORT_TYPE_GET(hdr->req_id); + ev->v3_status = WMI_RTT_REPORT_V3_STATUS_GET(hdr->req_id); + ev->v3_finish = WMI_RTT_REPORT_V3_FINISH_GET(hdr->req_id); + ev->v3_tm_start = WMI_RTT_REPORT_V3_TM_START_GET(hdr->req_id); + ev->num_ap = WMI_RTT_REPORT_NUM_AP_GET(hdr->req_id); + + WMI_MAC_ADDR_TO_CHAR_ARRAY(&hdr->dest_mac, ev->dest_mac); +} + +/** + * extract_rtt_error_report_ev_non_tlv() - extract rtt error report from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param wds_ev: Pointer to hold rtt error report + * + * Return: 0 for success or error code + */ +static QDF_STATUS extract_rtt_error_report_ev_non_tlv(wmi_unified_t wmi_handle, + void *evt_buf, + wmi_host_rtt_error_report_event *ev) +{ + wmi_rtt_error_report_event *error_report = + (wmi_rtt_error_report_event *) evt_buf; + + extract_rtt_header_internal_non_tlv(&ev->hdr, &error_report->header); + ev->reject_reason = error_report->reject_reason; + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_rtt_hdr_non_tlv() - extract rtt header from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param ev: Pointer to hold rtt header + * + * Return: 0 for success or error code + */ +static QDF_STATUS extract_rtt_hdr_non_tlv(wmi_unified_t wmi_handle, void *evt_buf, + wmi_host_rtt_event_hdr *ev) +{ + wmi_rtt_event_hdr *hdr = (wmi_rtt_event_hdr *) evt_buf; + + extract_rtt_header_internal_non_tlv(ev, hdr); + + return QDF_STATUS_SUCCESS; +} + +/** + * copy_rtt_report_cfr + * @ev: pointer to destination event pointer + * @report_type: report type received in event + * @p: pointer to event data + * @hdump: pointer to destination buffer + * @hdump_len: length of dest buffer + * + * Return: Pointer to current offset in p + */ +static uint8_t *copy_rtt_report_cfr(wmi_host_rtt_meas_event *ev, + uint8_t report_type, uint8_t *p, + uint8_t *hdump, int16_t hdump_len) +{ + uint8_t index, i; + uint8_t *tmp, *temp1, *temp2; +#define TONE_LEGACY_20M 53 +#define TONE_VHT_20M 56 +#define TONE_VHT_40M 117 +#define TONE_VHT_80M 242 + int tone_number[4] = { + TONE_LEGACY_20M, TONE_VHT_20M, TONE_VHT_40M, TONE_VHT_80M}; +#define MEM_ALIGN(x) ((((x)<<1)+3) & 0xFFFC) + /* the buffer size of 1 chain for each BW 0-3 */ + u_int16_t bw_size[4] = { + MEM_ALIGN(TONE_LEGACY_20M), + MEM_ALIGN(TONE_VHT_20M), + MEM_ALIGN(TONE_VHT_40M), + MEM_ALIGN(TONE_VHT_80M) + }; + if (hdump == NULL) { + qdf_print("Destination buffer is NULL\n"); + return p; + } + temp1 = temp2 = hdump; + + for (index = 0; index < 4; index++) { + if (ev->chain_mask & (1 << index)) { + if (index == 0) + ev->rssi0 = *((u_int32_t *)p); + if (index == 1) + ev->rssi1 = *((u_int32_t *)p); + if (index == 2) + ev->rssi2 = *((u_int32_t *)p); + if (index == 3) + ev->rssi3 = *((u_int32_t *)p); + + p += sizeof(u_int32_t); + if (report_type == WMI_RTT_REPORT_CFR) { + tmp = p + bw_size[ev->bw]; + ev->txrxchain_mask = tone_number[ev->bw]; + temp2 = temp2 + bw_size[ev->bw]; + for (i = 0; (i < tone_number[ev->bw]); i++) { + qdf_mem_copy(temp1, p, 2); + temp1 += 2; + p += 2; + hdump_len -= 2; + if (hdump_len <= 0) + break; + } + temp1 = temp2; + p = tmp; + } + } + } + return p; +} + +/** + * extract_rtt_ev_non_tlv() - extract rtt event + * @wmi_handle: wmi handle + * @param evt_buf: Pointer to event buffer + * @param ev: Pointer to hold rtt event + * @param hdump: Pointer to hold hex dump + * @param hdump_len: hex dump length + * + * Return: 0 for success or error code + */ +static QDF_STATUS extract_rtt_ev_non_tlv(wmi_unified_t wmi_handle, void *evt_buf, + wmi_host_rtt_meas_event *ev, uint8_t *hdump, uint16_t h_len) +{ + wmi_rtt_meas_event *body = (wmi_rtt_meas_event *) evt_buf; + uint8_t meas_type, report_type; + uint8_t *p; + int16_t hdump_len = h_len; + + A_TIME64 *time; + + if (body) { + meas_type = WMI_RTT_REPORT_MEAS_TYPE_GET(body->header.req_id); + report_type = + WMI_RTT_REPORT_REPORT_TYPE_GET(body->header.req_id); + + ev->chain_mask = WMI_RTT_REPORT_RX_CHAIN_GET(body->rx_chain); + ev->bw = WMI_RTT_REPORT_RX_BW_GET(body->rx_chain); + /* If report type is not WMI_RTT_REPORT_CFR */ + ev->txrxchain_mask = 0; + + ev->tod = ((u_int64_t) body->tod.time32) << 32; + ev->tod |= body->tod.time0; /*tmp1 is the 64 bit tod*/ + ev->toa = ((u_int64_t) body->toa.time32) << 32; + ev->toa |= body->toa.time0; + + p = (u_int8_t *) (++body); + + /* if the measurement is TMR, we should have T3 and T4 */ + if (meas_type == RTT_MEAS_FRAME_TMR) { + time = (A_TIME64 *) body; + ev->t3 = (u_int64_t) (time->time32) << 32; + ev->t3 |= time->time0; + + time++; + ev->t4 = (u_int64_t)(time->time32) << 32; + ev->t4 |= time->time0; + + p = (u_int8_t *) (++time); + } else { + ev->t3 = 0; + ev->t4 = 0; + } + + ev->rssi0 = 0; + ev->rssi1 = 0; + ev->rssi2 = 0; + ev->rssi3 = 0; + p = copy_rtt_report_cfr(ev, report_type, p, hdump, hdump_len); + } else { + qdf_print("Error!body is NULL\n"); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_thermal_stats_non_tlv() - extract thermal stats from event + * @wmi_handle: wmi handle + * @param evt_buf: Pointer to event buffer + * @param temp: Pointer to hold extracted temperature + * @param level: Pointer to hold extracted level + * + * Return: 0 for success or error code + */ +static QDF_STATUS extract_thermal_stats_non_tlv(wmi_unified_t wmi_handle, + void *evt_buf, + uint32_t *temp, uint32_t *level, uint32_t *pdev_id) +{ + tt_stats_t *tt_stats_event = NULL; + + tt_stats_event = (tt_stats_t *) evt_buf; + + *pdev_id = WMI_NON_TLV_DEFAULT_PDEV_ID; + *temp = tt_stats_event->temp; + *level = tt_stats_event->level; + return QDF_STATUS_SUCCESS; +} + +/** + * extract_thermal_level_stats_non_tlv() - extract thermal level stats from + * event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param idx: Index to level stats + * @param levelcount: Pointer to hold levelcount + * @param dccount: Pointer to hold dccount + * + * Return: 0 for success or error code + */ +static QDF_STATUS extract_thermal_level_stats_non_tlv(wmi_unified_t wmi_handle, + void *evt_buf, + uint8_t idx, uint32_t *levelcount, uint32_t *dccount) +{ + tt_stats_t *tt_stats_event = NULL; + + tt_stats_event = (tt_stats_t *) evt_buf; + + if (idx < TT_LEVELS) { + *levelcount = tt_stats_event->levstats[idx].levelcount; + *dccount = tt_stats_event->levstats[idx].dccount; + return QDF_STATUS_SUCCESS; + } + return QDF_STATUS_E_FAILURE; +} + +/** + * extract_comb_phyerr_non_tlv() - extract comb phy error from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param datalen: data length of event buffer + * @param buf_offset: Pointer to hold value of current event buffer offset + * post extraction + * @param phyer: Pointer to hold phyerr + * + * Return: 0 for success or error code + */ +static QDF_STATUS extract_comb_phyerr_non_tlv(wmi_unified_t wmi_handle, void *evt_buf, + uint16_t datalen, uint16_t *buf_offset, + wmi_host_phyerr_t *phyerr) +{ + wmi_comb_phyerr_rx_event *pe; +#if ATH_PHYERR_DEBUG + int i; +#endif /* ATH_PHYERR_DEBUG */ + uint8_t *data; + + data = (uint8_t *) evt_buf; + +#if ATH_PHYERR_DEBUG + qdf_print("%s: data=%pK, datalen=%d\n", __func__, data, datalen); + /* XXX for now */ + + for (i = 0; i < datalen; i++) { + qdf_print("%02X ", data[i]); + if (i % 32 == 31) + qdf_print("\n"); + } + qdf_print("\n"); +#endif /* ATH_PHYERR_DEBUG */ + + /* Ensure it's at least the size of the header */ + if (datalen < sizeof(*pe)) { + return QDF_STATUS_E_FAILURE; + /* XXX what should errors be? */ + } + + pe = (wmi_comb_phyerr_rx_event *) data; +#if ATH_PHYERR_DEBUG + qdf_print("%s: pe->hdr.num_phyerr_events=%d\n", + __func__, + pe->hdr.num_phyerr_events); +#endif /* ATH_PHYERR_DEBUG */ + + /* + * Reconstruct the 64 bit event TSF. This isn't from the MAC, it's + * at the time the event was sent to us, the TSF value will be + * in the future. + */ + phyerr->tsf64 = pe->hdr.tsf_l32; + phyerr->tsf64 |= (((uint64_t) pe->hdr.tsf_u32) << 32); + + *buf_offset = sizeof(pe->hdr); + phyerr->pdev_id = WMI_NON_TLV_DEFAULT_PDEV_ID; + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_single_phyerr_non_tlv() - extract single phy error from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param datalen: data length of event buffer + * @param buf_offset: Pointer to hold value of current event buffer offset + * post extraction + * @param phyerr: Pointer to hold phyerr + * + * Return: 0 for success or error code + */ +static QDF_STATUS extract_single_phyerr_non_tlv(wmi_unified_t wmi_handle, + void *evt_buf, + uint16_t datalen, uint16_t *buf_offset, + wmi_host_phyerr_t *phyerr) +{ + wmi_single_phyerr_rx_event *ev; +#if ATH_PHYERR_DEBUG + int i; +#endif /* ATH_PHYERR_DEBUG */ + int n = 0; + uint8_t *data; + + phyerr->pdev_id = WMI_NON_TLV_DEFAULT_PDEV_ID; + + n = (int) *buf_offset; + data = (uint8_t *) evt_buf; + + /* Loop over the bufp, extracting out phyerrors */ + /* + * XXX wmi_unified_comb_phyerr_rx_event.bufp is a char pointer, + * which isn't correct here - what we have received here + * is an array of TLV-style PHY errors. + */ + if (n < datalen) { + /* ensure there's at least space for the header */ + if ((datalen - n) < sizeof(ev->hdr)) { + qdf_print( + "%s: not enough space? (datalen=%d, n=%d, hdr=%zd bytes\n", + __func__, + datalen, + n, + sizeof(ev->hdr)); + return QDF_STATUS_SUCCESS; + } + + /* + * Obtain a pointer to the beginning of the current event. + * data[0] is the beginning of the WMI payload. + */ + ev = (wmi_single_phyerr_rx_event *) &data[n]; + + /* + * Sanity check the buffer length of the event against + * what we currently have. + * + * Since buf_len is 32 bits, we check if it overflows + * a large 32 bit value. It's not 0x7fffffff because + * we increase n by (buf_len + sizeof(hdr)), which would + * in itself cause n to overflow. + * + * If "int" is 64 bits then this becomes a moot point. + */ + if (ev->hdr.buf_len > PHYERROR_MAX_BUFFER_LENGTH) { + qdf_print("%s: buf_len is garbage? (0x%x\n)\n", + __func__, + ev->hdr.buf_len); + return QDF_STATUS_SUCCESS; + } + if (n + ev->hdr.buf_len > datalen) { + qdf_print("%s: buf_len exceeds available space " + "(n=%d, buf_len=%d, datalen=%d\n", + __func__, + n, + ev->hdr.buf_len, + datalen); + return QDF_STATUS_SUCCESS; + } + + phyerr->phy_err_code = WMI_UNIFIED_PHYERRCODE_GET(&ev->hdr); + +#if ATH_PHYERR_DEBUG + qdf_print("%s: len=%d, tsf=0x%08x, rssi = 0x%x/0x%x/0x%x/0x%x, " + "comb rssi = 0x%x, phycode=%d\n", + __func__, + ev->hdr.buf_len, + ev->hdr.tsf_timestamp, + ev->hdr.rssi_chain0, + ev->hdr.rssi_chain1, + ev->hdr.rssi_chain2, + ev->hdr.rssi_chain3, + WMI_UNIFIED_RSSI_COMB_GET(&ev->hdr), + phyerr->phy_err_code); + + /* + * For now, unroll this loop - the chain 'value' field isn't + * a variable but glued together into a macro field definition. + * Grr. :-) + */ + qdf_print( + "%s: chain 0: raw=0x%08x; pri20=%d sec20=%d sec40=%d sec80=%d\n", + __func__, + ev->hdr.rssi_chain0, + WMI_UNIFIED_RSSI_CHAN_GET(&ev->hdr, 0, PRI20), + WMI_UNIFIED_RSSI_CHAN_GET(&ev->hdr, 0, SEC20), + WMI_UNIFIED_RSSI_CHAN_GET(&ev->hdr, 0, SEC40), + WMI_UNIFIED_RSSI_CHAN_GET(&ev->hdr, 0, SEC80)); + + qdf_print( + "%s: chain 1: raw=0x%08x: pri20=%d sec20=%d sec40=%d sec80=%d\n", + __func__, + ev->hdr.rssi_chain1, + WMI_UNIFIED_RSSI_CHAN_GET(&ev->hdr, 1, PRI20), + WMI_UNIFIED_RSSI_CHAN_GET(&ev->hdr, 1, SEC20), + WMI_UNIFIED_RSSI_CHAN_GET(&ev->hdr, 1, SEC40), + WMI_UNIFIED_RSSI_CHAN_GET(&ev->hdr, 1, SEC80)); + + qdf_print( + "%s: chain 2: raw=0x%08x: pri20=%d sec20=%d sec40=%d sec80=%d\n", + __func__, + ev->hdr.rssi_chain2, + WMI_UNIFIED_RSSI_CHAN_GET(&ev->hdr, 2, PRI20), + WMI_UNIFIED_RSSI_CHAN_GET(&ev->hdr, 2, SEC20), + WMI_UNIFIED_RSSI_CHAN_GET(&ev->hdr, 2, SEC40), + WMI_UNIFIED_RSSI_CHAN_GET(&ev->hdr, 2, SEC80)); + + qdf_print( + "%s: chain 3: raw=0x%08x: pri20=%d sec20=%d sec40=%d sec80=%d\n", + __func__, + ev->hdr.rssi_chain3, + WMI_UNIFIED_RSSI_CHAN_GET(&ev->hdr, 3, PRI20), + WMI_UNIFIED_RSSI_CHAN_GET(&ev->hdr, 3, SEC20), + WMI_UNIFIED_RSSI_CHAN_GET(&ev->hdr, 3, SEC40), + WMI_UNIFIED_RSSI_CHAN_GET(&ev->hdr, 3, SEC80)); + + + qdf_print( + "%s: freq_info_1=0x%08x, freq_info_2=0x%08x\n", + __func__, ev->hdr.freq_info_1, ev->hdr.freq_info_2); + + /* + * The NF chain values are signed and are negative - hence + * the cast evilness. + */ + qdf_print( + "%s: nfval[1]=0x%08x, nfval[2]=0x%08x, nf=%d/%d/%d/%d, " + "freq1=%d, freq2=%d, cw=%d\n", + __func__, + ev->hdr.nf_list_1, + ev->hdr.nf_list_2, + (int) WMI_UNIFIED_NF_CHAIN_GET(&ev->hdr, 0), + (int) WMI_UNIFIED_NF_CHAIN_GET(&ev->hdr, 1), + (int) WMI_UNIFIED_NF_CHAIN_GET(&ev->hdr, 2), + (int) WMI_UNIFIED_NF_CHAIN_GET(&ev->hdr, 3), + WMI_UNIFIED_FREQ_INFO_GET(&ev->hdr, 1), + WMI_UNIFIED_FREQ_INFO_GET(&ev->hdr, 2), + WMI_UNIFIED_CHWIDTH_GET(&ev->hdr)); +#endif /* ATH_PHYERR_DEBUG */ + +#if ATH_SUPPORT_DFS + /* + * If required, pass radar events to the dfs pattern matching + * code. + * + * Don't pass radar events with no buffer payload. + */ + phyerr->tsf_timestamp = ev->hdr.tsf_timestamp; + phyerr->bufp = &ev->bufp[0]; + phyerr->buf_len = ev->hdr.buf_len; +#endif /* ATH_SUPPORT_DFS */ + + /* populate the rf info */ + phyerr->rf_info.rssi_comb = + WMI_UNIFIED_RSSI_COMB_GET(&ev->hdr); + +#ifdef WLAN_CONV_SPECTRAL_ENABLE + + /* + * If required, pass spectral events to the spectral module + * + */ + if (phyerr->phy_err_code == WMI_HOST_PHY_ERROR_FALSE_RADAR_EXT + || phyerr->phy_err_code == WMI_HOST_PHY_ERROR_SPECTRAL_SCAN) { + if (ev->hdr.buf_len > 0) { + + /* Initialize the NF values to Zero. */ + phyerr->rf_info.noise_floor[0] = + WMI_UNIFIED_NF_CHAIN_GET(&ev->hdr, 0); + phyerr->rf_info.noise_floor[1] = + WMI_UNIFIED_NF_CHAIN_GET(&ev->hdr, 1); + phyerr->rf_info.noise_floor[2] = + WMI_UNIFIED_NF_CHAIN_GET(&ev->hdr, 2); + phyerr->rf_info.noise_floor[3] = + WMI_UNIFIED_NF_CHAIN_GET(&ev->hdr, 3); + + /* Need to unroll loop due to macro + * constraints + * chain 0 */ + phyerr->rf_info.pc_rssi_info[0].rssi_pri20 = + WMI_UNIFIED_RSSI_CHAN_GET(&ev->hdr, 0, PRI20); + phyerr->rf_info.pc_rssi_info[0].rssi_sec20 = + WMI_UNIFIED_RSSI_CHAN_GET(&ev->hdr, 0, SEC20); + phyerr->rf_info.pc_rssi_info[0].rssi_sec40 = + WMI_UNIFIED_RSSI_CHAN_GET(&ev->hdr, 0, SEC40); + phyerr->rf_info.pc_rssi_info[0].rssi_sec80 = + WMI_UNIFIED_RSSI_CHAN_GET(&ev->hdr, 0, SEC80); + + /* chain 1 */ + phyerr->rf_info.pc_rssi_info[1].rssi_pri20 = + WMI_UNIFIED_RSSI_CHAN_GET(&ev->hdr, 1, PRI20); + phyerr->rf_info.pc_rssi_info[1].rssi_sec20 = + WMI_UNIFIED_RSSI_CHAN_GET(&ev->hdr, 1, SEC20); + phyerr->rf_info.pc_rssi_info[1].rssi_sec40 = + WMI_UNIFIED_RSSI_CHAN_GET(&ev->hdr, 1, SEC40); + phyerr->rf_info.pc_rssi_info[1].rssi_sec80 = + WMI_UNIFIED_RSSI_CHAN_GET(&ev->hdr, 1, SEC80); + + /* chain 2 */ + phyerr->rf_info.pc_rssi_info[2].rssi_pri20 = + WMI_UNIFIED_RSSI_CHAN_GET(&ev->hdr, 2, PRI20); + phyerr->rf_info.pc_rssi_info[2].rssi_sec20 = + WMI_UNIFIED_RSSI_CHAN_GET(&ev->hdr, 2, SEC20); + phyerr->rf_info.pc_rssi_info[2].rssi_sec40 = + WMI_UNIFIED_RSSI_CHAN_GET(&ev->hdr, 2, SEC40); + phyerr->rf_info.pc_rssi_info[2].rssi_sec80 = + WMI_UNIFIED_RSSI_CHAN_GET(&ev->hdr, 2, SEC80); + + /* chain 3 */ + phyerr->rf_info.pc_rssi_info[3].rssi_pri20 = + WMI_UNIFIED_RSSI_CHAN_GET(&ev->hdr, 3, PRI20); + phyerr->rf_info.pc_rssi_info[3].rssi_sec20 = + WMI_UNIFIED_RSSI_CHAN_GET(&ev->hdr, 3, SEC20); + phyerr->rf_info.pc_rssi_info[3].rssi_sec40 = + WMI_UNIFIED_RSSI_CHAN_GET(&ev->hdr, 3, SEC40); + phyerr->rf_info.pc_rssi_info[3].rssi_sec80 = + WMI_UNIFIED_RSSI_CHAN_GET(&ev->hdr, 3, SEC80); + + phyerr->chan_info.center_freq1 = + WMI_UNIFIED_FREQ_INFO_GET(&ev->hdr, 1); + phyerr->chan_info.center_freq2 = + WMI_UNIFIED_FREQ_INFO_GET(&ev->hdr, 2); + + } + } +#endif /* WLAN_CONV_SPECTRAL_ENABLE */ + + /* + * Advance the buffer pointer to the next PHY error. + * buflen is the length of this payload, so we need to + * advance past the current header _AND_ the payload. + */ + n += sizeof(*ev) + ev->hdr.buf_len; + } + *buf_offset = n; + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_composite_phyerr_non_tlv() - extract composite phy error from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param datalen: Length of event buffer + * @param phyerr: Pointer to hold phy error + * + * Return: 0 for success or error code + */ +static QDF_STATUS extract_composite_phyerr_non_tlv(wmi_unified_t wmi_handle, + void *evt_buf, + uint16_t datalen, wmi_host_phyerr_t *phyerr) +{ + wmi_composite_phyerr_rx_event *pe; + wmi_composite_phyerr_rx_hdr *ph; + + /* Ensure it's at least the size of the header */ + if (datalen < sizeof(*pe)) { + return QDF_STATUS_E_FAILURE; + /* XXX what should errors be? */ + } + + phyerr->pdev_id = WMI_NON_TLV_DEFAULT_PDEV_ID; + + pe = (wmi_composite_phyerr_rx_event *) evt_buf; + ph = &pe->hdr; + + /* + * Reconstruct the 64 bit event TSF. This isn't from the MAC, it's + * at the time the event was sent to us, the TSF value will be + * in the future. + */ + phyerr->tsf64 = ph->tsf_l32; + phyerr->tsf64 |= (((uint64_t) ph->tsf_u32) << 32); + + phyerr->tsf_timestamp = ph->tsf_timestamp; + phyerr->bufp = &pe->bufp[0]; + phyerr->buf_len = ph->buf_len; + + phyerr->phy_err_mask0 = ph->phy_err_mask0; + phyerr->phy_err_mask1 = ph->phy_err_mask1; + + phyerr->rf_info.rssi_comb = + WMI_UNIFIED_RSSI_COMB_GET(ph); + + /* Handle Spectral PHY Error */ + if ((ph->phy_err_mask0 & WMI_HOST_AR900B_SPECTRAL_PHYERR_MASK)) { +#ifdef WLAN_CONV_SPECTRAL_ENABLE + if (ph->buf_len > 0) { + + /* Initialize the NF values to Zero. */ + phyerr->rf_info.noise_floor[0] = + WMI_UNIFIED_NF_CHAIN_GET(ph, 0); + phyerr->rf_info.noise_floor[1] = + WMI_UNIFIED_NF_CHAIN_GET(ph, 1); + phyerr->rf_info.noise_floor[2] = + WMI_UNIFIED_NF_CHAIN_GET(ph, 2); + phyerr->rf_info.noise_floor[3] = + WMI_UNIFIED_NF_CHAIN_GET(ph, 3); + + /* populate the rf info */ + /* Need to unroll loop due to macro constraints */ + /* chain 0 */ + phyerr->rf_info.pc_rssi_info[0].rssi_pri20 = + WMI_UNIFIED_RSSI_CHAN_GET(ph, 0, PRI20); + phyerr->rf_info.pc_rssi_info[0].rssi_sec20 = + WMI_UNIFIED_RSSI_CHAN_GET(ph, 0, SEC20); + phyerr->rf_info.pc_rssi_info[0].rssi_sec40 = + WMI_UNIFIED_RSSI_CHAN_GET(ph, 0, SEC40); + phyerr->rf_info.pc_rssi_info[0].rssi_sec80 = + WMI_UNIFIED_RSSI_CHAN_GET(ph, 0, SEC80); + + /* chain 1 */ + phyerr->rf_info.pc_rssi_info[1].rssi_pri20 = + WMI_UNIFIED_RSSI_CHAN_GET(ph, 1, PRI20); + phyerr->rf_info.pc_rssi_info[1].rssi_sec20 = + WMI_UNIFIED_RSSI_CHAN_GET(ph, 1, SEC20); + phyerr->rf_info.pc_rssi_info[1].rssi_sec40 = + WMI_UNIFIED_RSSI_CHAN_GET(ph, 1, SEC40); + phyerr->rf_info.pc_rssi_info[1].rssi_sec80 = + WMI_UNIFIED_RSSI_CHAN_GET(ph, 1, SEC80); + + /* chain 2 */ + phyerr->rf_info.pc_rssi_info[2].rssi_pri20 = + WMI_UNIFIED_RSSI_CHAN_GET(ph, 2, PRI20); + phyerr->rf_info.pc_rssi_info[2].rssi_sec20 = + WMI_UNIFIED_RSSI_CHAN_GET(ph, 2, SEC20); + phyerr->rf_info.pc_rssi_info[2].rssi_sec40 = + WMI_UNIFIED_RSSI_CHAN_GET(ph, 2, SEC40); + phyerr->rf_info.pc_rssi_info[2].rssi_sec80 = + WMI_UNIFIED_RSSI_CHAN_GET(ph, 2, SEC80); + + /* chain 3 */ + phyerr->rf_info.pc_rssi_info[3].rssi_pri20 = + WMI_UNIFIED_RSSI_CHAN_GET(ph, 3, PRI20); + phyerr->rf_info.pc_rssi_info[3].rssi_sec20 = + WMI_UNIFIED_RSSI_CHAN_GET(ph, 3, SEC20); + phyerr->rf_info.pc_rssi_info[3].rssi_sec40 = + WMI_UNIFIED_RSSI_CHAN_GET(ph, 3, SEC40); + phyerr->rf_info.pc_rssi_info[3].rssi_sec80 = + WMI_UNIFIED_RSSI_CHAN_GET(ph, 3, SEC80); + + phyerr->chan_info.center_freq1 = + WMI_UNIFIED_FREQ_INFO_GET(ph, 1); + phyerr->chan_info.center_freq2 = + WMI_UNIFIED_FREQ_INFO_GET(ph, 2); + + } +#endif /* WLAN_CONV_SPECTRAL_ENABLE */ + + } + return QDF_STATUS_SUCCESS; +} + +/** + * extract_all_stats_counts_non_tlv() - extract all stats count from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param stats_param: Pointer to hold stats count + * + * Return: 0 for success or error code + */ +static QDF_STATUS extract_all_stats_counts_non_tlv(wmi_unified_t wmi_handle, + void *evt_buf, + wmi_host_stats_event *stats_param) +{ + wmi_stats_event *ev = (wmi_stats_event *) evt_buf; + wmi_stats_id stats_id = ev->stats_id; + wmi_host_stats_id nac_rssi_ev = 0; + + if (stats_id & WMI_REQUEST_NAC_RSSI_STAT) { + nac_rssi_ev = WMI_HOST_REQUEST_NAC_RSSI; + stats_id &= ~WMI_REQUEST_NAC_RSSI_STAT; + } + + switch (stats_id) { + case WMI_REQUEST_PEER_STAT: + stats_param->stats_id |= WMI_HOST_REQUEST_PEER_STAT; + break; + + case WMI_REQUEST_AP_STAT: + stats_param->stats_id |= WMI_HOST_REQUEST_AP_STAT; + break; + + case WMI_REQUEST_INST_STAT: + stats_param->stats_id |= WMI_HOST_REQUEST_INST_STAT; + break; + + case WMI_REQUEST_PEER_EXTD_STAT: + stats_param->stats_id |= WMI_HOST_REQUEST_PEER_EXTD_STAT; + break; + + case WMI_REQUEST_VDEV_EXTD_STAT: + stats_param->stats_id |= WMI_HOST_REQUEST_VDEV_EXTD_STAT; + break; + + case WMI_REQUEST_PDEV_EXT2_STAT: + stats_param->stats_id |= nac_rssi_ev; + break; + default: + stats_param->stats_id = 0; + break; + + } + + stats_param->num_pdev_stats = ev->num_pdev_stats; + stats_param->num_pdev_ext_stats = ev->num_pdev_ext_stats; + stats_param->num_vdev_stats = ev->num_vdev_stats; + stats_param->num_peer_stats = ev->num_peer_stats; + stats_param->num_bcnflt_stats = ev->num_bcnflt_stats; + stats_param->num_chan_stats = 0; + stats_param->pdev_id = WMI_NON_TLV_DEFAULT_PDEV_ID; + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_pdev_stats_non_tlv() - extract pdev stats from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param index: Index into pdev stats + * @param pdev_stats: Pointer to hold pdev stats + * + * Return: 0 for success or error code + */ +static QDF_STATUS extract_pdev_stats_non_tlv(wmi_unified_t wmi_handle, + void *evt_buf, + uint32_t index, wmi_host_pdev_stats *pdev_stats) +{ + if (index < ((wmi_stats_event *)evt_buf)->num_pdev_stats) { + + wmi_pdev_stats *ev = + (wmi_pdev_stats *) ((((wmi_stats_event *)evt_buf)->data) + + (index * sizeof(wmi_pdev_stats))); + + /* direct copy possible since wmi_host_pdev_stats is same as + * wmi_pdev_stats for non-tlv */ + /* qdf_mem_copy(pdev_stats, ev, sizeof(wmi_pdev_stats));*/ + + pdev_stats->chan_nf = ev->chan_nf; + pdev_stats->tx_frame_count = ev->tx_frame_count; + pdev_stats->rx_frame_count = ev->rx_frame_count; + pdev_stats->rx_clear_count = ev->rx_clear_count; + pdev_stats->cycle_count = ev->cycle_count; + pdev_stats->phy_err_count = ev->phy_err_count; + pdev_stats->chan_tx_pwr = ev->chan_tx_pwr; + +#define tx_stats (pdev_stats->pdev_stats.tx) +#define ev_tx_stats (ev->pdev_stats.tx) + + /* Tx Stats */ + tx_stats.comp_queued = ev_tx_stats.comp_queued; + tx_stats.comp_delivered = ev_tx_stats.comp_delivered; + tx_stats.msdu_enqued = ev_tx_stats.msdu_enqued; + tx_stats.mpdu_enqued = ev_tx_stats.mpdu_enqued; + tx_stats.wmm_drop = ev_tx_stats.wmm_drop; + tx_stats.local_enqued = ev_tx_stats.local_enqued; + tx_stats.local_freed = ev_tx_stats.local_freed; + tx_stats.hw_queued = ev_tx_stats.hw_queued; + tx_stats.hw_reaped = ev_tx_stats.hw_reaped; + tx_stats.underrun = ev_tx_stats.underrun; + tx_stats.hw_paused = ev_tx_stats.hw_paused; + tx_stats.tx_abort = ev_tx_stats.tx_abort; + tx_stats.mpdus_requed = ev_tx_stats.mpdus_requed; + tx_stats.tx_xretry = ev_tx_stats.tx_xretry; + tx_stats.data_rc = ev_tx_stats.data_rc; + tx_stats.self_triggers = ev_tx_stats.self_triggers; + tx_stats.sw_retry_failure = ev_tx_stats.sw_retry_failure; + tx_stats.illgl_rate_phy_err = ev_tx_stats.illgl_rate_phy_err; + tx_stats.pdev_cont_xretry = ev_tx_stats.pdev_cont_xretry; + tx_stats.pdev_tx_timeout = ev_tx_stats.pdev_tx_timeout; + tx_stats.pdev_resets = ev_tx_stats.pdev_resets; + tx_stats.stateless_tid_alloc_failure = + ev_tx_stats.stateless_tid_alloc_failure; + tx_stats.phy_underrun = ev_tx_stats.phy_underrun; + tx_stats.txop_ovf = ev_tx_stats.txop_ovf; + tx_stats.seq_posted = ev_tx_stats.seq_posted; + tx_stats.seq_failed_queueing = ev_tx_stats.seq_failed_queueing; + tx_stats.seq_completed = ev_tx_stats.seq_completed; + tx_stats.seq_restarted = ev_tx_stats.seq_restarted; + tx_stats.mu_seq_posted = ev_tx_stats.mu_seq_posted; + tx_stats.mpdus_sw_flush = ev_tx_stats.mpdus_sw_flush; + tx_stats.mpdus_hw_filter = ev_tx_stats.mpdus_hw_filter; + tx_stats.mpdus_truncated = ev_tx_stats.mpdus_truncated; + tx_stats.mpdus_ack_failed = ev_tx_stats.mpdus_ack_failed; + tx_stats.mpdus_expired = ev_tx_stats.mpdus_expired; + /* Only NON-TLV */ + tx_stats.mc_drop = ev_tx_stats.mc_drop; + /* Only TLV */ + tx_stats.tx_ko = 0; + +#define rx_stats (pdev_stats->pdev_stats.rx) +#define ev_rx_stats (ev->pdev_stats.rx) + + /* Rx Stats */ + rx_stats.mid_ppdu_route_change = + ev_rx_stats.mid_ppdu_route_change; + rx_stats.status_rcvd = ev_rx_stats.status_rcvd; + rx_stats.r0_frags = ev_rx_stats.r0_frags; + rx_stats.r1_frags = ev_rx_stats.r1_frags; + rx_stats.r2_frags = ev_rx_stats.r2_frags; + /* Only TLV */ + rx_stats.r3_frags = 0; + rx_stats.htt_msdus = ev_rx_stats.htt_msdus; + rx_stats.htt_mpdus = ev_rx_stats.htt_mpdus; + rx_stats.loc_msdus = ev_rx_stats.loc_msdus; + rx_stats.loc_mpdus = ev_rx_stats.loc_mpdus; + rx_stats.oversize_amsdu = ev_rx_stats.oversize_amsdu; + rx_stats.phy_errs = ev_rx_stats.phy_errs; + rx_stats.phy_err_drop = ev_rx_stats.phy_err_drop; + rx_stats.mpdu_errs = ev_rx_stats.mpdu_errs; + rx_stats.pdev_rx_timeout = ev_rx_stats.pdev_rx_timeout; + rx_stats.rx_ovfl_errs = ev_rx_stats.rx_ovfl_errs; + + /* mem stats */ + pdev_stats->pdev_stats.mem.iram_free_size = + ev->pdev_stats.mem.iram_free_size; + pdev_stats->pdev_stats.mem.dram_free_size = + ev->pdev_stats.mem.dram_free_size; + /* Only Non-TLV */ + pdev_stats->pdev_stats.mem.sram_free_size = + ev->pdev_stats.mem.sram_free_size; + + /* Peer stats */ + /* Only TLV */ + pdev_stats->pdev_stats.peer.dummy = 0; + /* Only NON-TLV */ + pdev_stats->ackRcvBad = ev->ackRcvBad; + pdev_stats->rtsBad = ev->rtsBad; + pdev_stats->rtsGood = ev->rtsGood; + pdev_stats->fcsBad = ev->fcsBad; + pdev_stats->noBeacons = ev->noBeacons; + pdev_stats->mib_int_count = ev->mib_int_count; + + } + return QDF_STATUS_SUCCESS; +} + +/** + * extract_pdev_ext_stats_non_tlv() - extract extended pdev stats from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param index: Index into extended pdev stats + * @param pdev_ext_stats: Pointer to hold extended pdev stats + * + * Return: 0 for success or error code + */ +static QDF_STATUS extract_pdev_ext_stats_non_tlv(wmi_unified_t wmi_handle, + void *evt_buf, + uint32_t index, wmi_host_pdev_ext_stats *pdev_ext_stats) +{ + if (index < ((wmi_stats_event *)evt_buf)->num_pdev_ext_stats) { + wmi_pdev_ext_stats *ev = + (wmi_pdev_ext_stats *) ((((wmi_stats_event *)evt_buf)->data) + + ((((wmi_stats_event *)evt_buf)->num_pdev_stats) * + sizeof(wmi_pdev_stats)) + + (index * sizeof(wmi_pdev_ext_stats))); + /* Copy content to event->data */ + OS_MEMCPY(pdev_ext_stats, ev, sizeof(wmi_pdev_ext_stats)); + + } + return QDF_STATUS_SUCCESS; +} + +/** + * extract_vdev_stats_non_tlv() - extract vdev stats from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param index: Index into vdev stats + * @param vdev_stats: Pointer to hold vdev stats + * + * Return: 0 for success or error code + */ + +static QDF_STATUS extract_vdev_stats_non_tlv(wmi_unified_t wmi_handle, + void *evt_buf, uint32_t index, wmi_host_vdev_stats *vdev_stats) +{ + if (index < ((wmi_stats_event *)evt_buf)->num_vdev_stats) { + wmi_vdev_stats *ev = + (wmi_vdev_stats *) ((((wmi_stats_event *)evt_buf)->data) + + ((((wmi_stats_event *)evt_buf)->num_pdev_stats) * + sizeof(wmi_pdev_stats)) + + ((((wmi_stats_event *)evt_buf)->num_pdev_ext_stats) * + sizeof(wmi_pdev_ext_stats)) + + (index * sizeof(wmi_vdev_stats))); + + OS_MEMSET(vdev_stats, 0, sizeof(wmi_host_vdev_stats)); + vdev_stats->vdev_id = ev->vdev_id; + + } + return QDF_STATUS_SUCCESS; +} + +/** + * extract_peer_stats_non_tlv() - extract peer stats from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param index: Index into peer stats + * @param peer_stats: Pointer to hold peer stats + * + * Return: 0 for success or error code + */ +static QDF_STATUS extract_peer_stats_non_tlv(wmi_unified_t wmi_handle, + void *evt_buf, uint32_t index, wmi_host_peer_stats *peer_stats) +{ + if (index < ((wmi_stats_event *)evt_buf)->num_peer_stats) { + wmi_peer_stats *ev = + (wmi_peer_stats *) ((((wmi_stats_event *)evt_buf)->data) + + ((((wmi_stats_event *)evt_buf)->num_pdev_stats) * + sizeof(wmi_pdev_stats)) + + ((((wmi_stats_event *)evt_buf)->num_pdev_ext_stats) * + sizeof(wmi_pdev_ext_stats)) + + ((((wmi_stats_event *)evt_buf)->num_vdev_stats) * + sizeof(wmi_vdev_stats)) + + (index * sizeof(wmi_peer_stats))); + + OS_MEMCPY(&(peer_stats->peer_macaddr), &(ev->peer_macaddr), + sizeof(wmi_mac_addr)); + + peer_stats->peer_rssi = ev->peer_rssi; + peer_stats->peer_rssi_seq_num = ev->peer_rssi_seq_num; + peer_stats->peer_tx_rate = ev->peer_tx_rate; + peer_stats->peer_rx_rate = ev->peer_rx_rate; + peer_stats->currentper = ev->currentper; + peer_stats->retries = ev->retries; + peer_stats->txratecount = ev->txratecount; + peer_stats->max4msframelen = ev->max4msframelen; + peer_stats->totalsubframes = ev->totalsubframes; + peer_stats->txbytes = ev->txbytes; + + OS_MEMCPY(peer_stats->nobuffs, ev->nobuffs, + sizeof(peer_stats->nobuffs)); + OS_MEMCPY(peer_stats->excretries, ev->excretries, + sizeof(peer_stats->excretries)); + peer_stats->peer_rssi_changed = ev->peer_rssi_changed; + } + return QDF_STATUS_SUCCESS; +} + +/** + * extract_bcnflt_stats_non_tlv() - extract bcn fault stats from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param index: Index into bcn fault stats + * @param bcnflt_stats: Pointer to hold bcn fault stats + * + * Return: 0 for success or error code + */ +static QDF_STATUS extract_bcnflt_stats_non_tlv(wmi_unified_t wmi_handle, + void *evt_buf, uint32_t index, wmi_host_bcnflt_stats *bcnflt_stats) +{ + return QDF_STATUS_SUCCESS; +} + +/** + * extract_peer_extd_stats_non_tlv() - extract extended peer stats from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param index: Index into extended peer stats + * @param peer_extd_stats: Pointer to hold extended peer stats + * + * Return: 0 for success or error code + */ +static QDF_STATUS extract_peer_extd_stats_non_tlv(wmi_unified_t wmi_handle, + void *evt_buf, uint32_t index, + wmi_host_peer_extd_stats *peer_extd_stats) +{ + uint8_t *pdata = ((wmi_stats_event *)evt_buf)->data; + + if (WMI_REQUEST_PEER_EXTD_STAT & + ((wmi_stats_event *)evt_buf)->stats_id) { + if (index < ((wmi_stats_event *)evt_buf)->num_peer_stats) { + wmi_peer_extd_stats *ev = (wmi_peer_extd_stats *) + ((pdata) + + ((((wmi_stats_event *)evt_buf)->num_pdev_stats) * + sizeof(wmi_pdev_stats)) + + ((((wmi_stats_event *)evt_buf)->num_pdev_ext_stats) * + sizeof(wmi_pdev_ext_stats)) + + ((((wmi_stats_event *)evt_buf)->num_vdev_stats) * + sizeof(wmi_vdev_stats)) + + ((((wmi_stats_event *)evt_buf)->num_peer_stats) * + sizeof(wmi_peer_stats)) + + (index * sizeof(wmi_peer_extd_stats))); + + OS_MEMCPY(peer_extd_stats, ev, + sizeof(wmi_host_peer_extd_stats)); + } + } + return QDF_STATUS_SUCCESS; +} + +/** + * extract_vdev_extd_stats_non_tlv() - extract extended vdev stats from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param index: Index into extended vdev stats + * @param vdev_extd_stats: Pointer to hold extended vdev stats + * + * Return: 0 for success or error code + */ +static QDF_STATUS extract_vdev_extd_stats_non_tlv(wmi_unified_t wmi_handle, + void *evt_buf, uint32_t index, + wmi_host_vdev_extd_stats *vdev_extd_stats) +{ + uint8_t *pdata = ((wmi_stats_event *)evt_buf)->data; + + if (WMI_REQUEST_PEER_EXTD_STAT & + ((wmi_stats_event *)evt_buf)->stats_id) { + + if (index < ((wmi_stats_event *)evt_buf)->num_vdev_stats) { + + wmi_vdev_extd_stats *ev = (wmi_vdev_extd_stats *) + ((pdata) + + ((((wmi_stats_event *)evt_buf)->num_pdev_stats) * + sizeof(wmi_pdev_stats)) + + ((((wmi_stats_event *)evt_buf)->num_pdev_ext_stats) * + sizeof(wmi_pdev_ext_stats)) + + ((((wmi_stats_event *)evt_buf)->num_vdev_stats) * + sizeof(wmi_vdev_stats)) + + ((((wmi_stats_event *)evt_buf)->num_peer_stats) * + sizeof(wmi_peer_stats)) + + ((((wmi_stats_event *)evt_buf)->num_peer_stats) * + sizeof(wmi_peer_extd_stats)) + + (index * sizeof(wmi_vdev_extd_stats))); + + OS_MEMCPY(vdev_extd_stats, ev, + sizeof(wmi_host_vdev_extd_stats)); + } + } + return QDF_STATUS_SUCCESS; +} +/** + * extract_vdev_nac_rssi_stats_non_tlv() - extract vdev NAC_RSSI stats from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param vdev_nac_rssi_event: Pointer to hold vdev NAC_RSSI stats + * + * Return: 0 for success or error code + */ +static QDF_STATUS extract_vdev_nac_rssi_stats_non_tlv(wmi_unified_t wmi_handle, + void *evt_buf, struct wmi_host_vdev_nac_rssi_event *vdev_nac_rssi_stats) +{ + uint8_t *pdata = ((wmi_stats_event *)evt_buf)->data; + + if (WMI_REQUEST_NAC_RSSI_STAT & + ((wmi_stats_event *)evt_buf)->stats_id) { + + + struct wmi_host_vdev_nac_rssi_event *ev = (struct wmi_host_vdev_nac_rssi_event *) + ((pdata) + + ((((wmi_stats_event *)evt_buf)->num_pdev_stats) * + sizeof(wmi_pdev_stats)) + + ((((wmi_stats_event *)evt_buf)->num_pdev_ext_stats) * + sizeof(wmi_pdev_ext_stats)) + + ((((wmi_stats_event *)evt_buf)->num_vdev_stats) * + sizeof(wmi_vdev_stats)) + + ((((wmi_stats_event *)evt_buf)->num_peer_stats) * + sizeof(wmi_peer_stats)) + + ((((wmi_stats_event *)evt_buf)->num_bcnflt_stats) * + sizeof(wmi_bcnfilter_stats_t)) + + ((WMI_REQUEST_PEER_EXTD_STAT & + ((wmi_stats_event *)evt_buf)->stats_id) ? ((((wmi_stats_event *)evt_buf)->num_peer_stats) * + sizeof(wmi_peer_extd_stats)) : 0) + + ((WMI_REQUEST_VDEV_EXTD_STAT & + ((wmi_stats_event *)evt_buf)->stats_id) ? ((((wmi_stats_event *)evt_buf)->num_vdev_stats)* + sizeof(wmi_vdev_extd_stats)) : 0) + + ((((wmi_stats_event *)evt_buf)->num_pdev_stats) * + (sizeof(wmi_pdev_ext2_stats)))); + + OS_MEMCPY(vdev_nac_rssi_stats, ev, + sizeof(struct wmi_host_vdev_nac_rssi_event)); + } + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_chan_stats_non_tlv() - extract chan stats from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param index: Index into chan stats + * @param vdev_extd_stats: Pointer to hold chan stats + * + * Return: 0 for success or error code + */ +static QDF_STATUS extract_chan_stats_non_tlv(wmi_unified_t wmi_handle, + void *evt_buf, + uint32_t index, wmi_host_chan_stats *chan_stats) +{ + /* Non-TLV doesn't have num_chan_stats */ + return QDF_STATUS_SUCCESS; +} + +/** + * extract_profile_ctx_non_tlv() - extract profile context from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param profile_ctx: Pointer to hold profile context + * + * Return: 0 for success or error code + */ +static QDF_STATUS extract_profile_ctx_non_tlv(wmi_unified_t wmi_handle, + void *evt_buf, + wmi_host_wlan_profile_ctx_t *profile_ctx) +{ + wmi_profile_stats_event *profile_ev = + (wmi_profile_stats_event *)evt_buf; + + qdf_mem_copy(profile_ctx, &(profile_ev->profile_ctx), + sizeof(wmi_host_wlan_profile_ctx_t)); + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_profile_data_non_tlv() - extract profile data from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param profile_data: Pointer to hold profile data + * + * Return: 0 for success or error code + */ +static QDF_STATUS extract_profile_data_non_tlv(wmi_unified_t wmi_handle, + void *evt_buf, uint8_t idx, + wmi_host_wlan_profile_t *profile_data) +{ + wmi_profile_stats_event *profile_ev = + (wmi_profile_stats_event *)evt_buf; + + if (idx > profile_ev->profile_ctx.bin_count) + return QDF_STATUS_E_INVAL; + + qdf_mem_copy(profile_data, &profile_ev->profile_data[idx], + sizeof(wmi_host_wlan_profile_t)); + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_chan_info_event_non_tlv() - extract chan information from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param chan_info: Pointer to hold chan information + * + * Return: 0 for success or error code + */ +static QDF_STATUS extract_chan_info_event_non_tlv(wmi_unified_t wmi_handle, + void *evt_buf, + wmi_host_chan_info_event *chan_info) +{ + wmi_chan_info_event *chan_info_ev = (wmi_chan_info_event *)evt_buf; + + chan_info->pdev_id = WMI_NON_TLV_DEFAULT_PDEV_ID; + chan_info->err_code = chan_info_ev->err_code; + chan_info->freq = chan_info_ev->freq; + chan_info->cmd_flags = chan_info_ev->cmd_flags; + chan_info->noise_floor = chan_info_ev->noise_floor; + chan_info->rx_clear_count = chan_info_ev->rx_clear_count; + chan_info->cycle_count = chan_info_ev->cycle_count; + chan_info->rx_11b_mode_data_duration = + chan_info_ev->rx_11b_mode_data_duration; + /* ONLY NON-TLV */ + chan_info->chan_tx_pwr_range = chan_info_ev->chan_tx_pwr_range; + chan_info->chan_tx_pwr_tp = chan_info_ev->chan_tx_pwr_tp; + chan_info->rx_frame_count = chan_info_ev->rx_frame_count; + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_channel_hopping_event_non_tlv() - extract channel hopping param + * from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param ch_hopping: Pointer to hold channel hopping param + * + * Return: 0 for success or error code + */ +static QDF_STATUS extract_channel_hopping_event_non_tlv( + wmi_unified_t wmi_handle, void *evt_buf, + wmi_host_pdev_channel_hopping_event *ch_hopping) +{ + wmi_pdev_channel_hopping_event *event = + (wmi_pdev_channel_hopping_event *)evt_buf; + + ch_hopping->pdev_id = WMI_NON_TLV_DEFAULT_PDEV_ID; + ch_hopping->noise_floor_report_iter = event->noise_floor_report_iter; + ch_hopping->noise_floor_total_iter = event->noise_floor_total_iter; + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_bss_chan_info_event_non_tlv() - extract bss channel information + * from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param bss_chan_info: Pointer to hold bss channel information + * + * Return: 0 for success or error code + */ +static QDF_STATUS extract_bss_chan_info_event_non_tlv(wmi_unified_t wmi_handle, + void *evt_buf, wmi_host_pdev_bss_chan_info_event *bss_chan_info) +{ + wmi_pdev_bss_chan_info_event *event = + (wmi_pdev_bss_chan_info_event *)evt_buf; + + bss_chan_info->pdev_id = WMI_NON_TLV_DEFAULT_PDEV_ID; + bss_chan_info->freq = event->freq; + bss_chan_info->noise_floor = event->noise_floor; + bss_chan_info->rx_clear_count_low = event->rx_clear_count_low; + bss_chan_info->rx_clear_count_high = event->rx_clear_count_high; + bss_chan_info->cycle_count_low = event->cycle_count_low; + bss_chan_info->cycle_count_high = event->cycle_count_high; + bss_chan_info->tx_cycle_count_low = event->tx_cycle_count_low; + bss_chan_info->tx_cycle_count_high = event->tx_cycle_count_high; + bss_chan_info->rx_cycle_count_low = event->rx_cycle_count_low; + bss_chan_info->rx_cycle_count_high = event->rx_cycle_count_high; + bss_chan_info->rx_bss_cycle_count_low = event->rx_bss_cycle_count_low; + bss_chan_info->rx_bss_cycle_count_high = event->rx_bss_cycle_count_high; + bss_chan_info->reserved = event->reserved; + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_inst_rssi_stats_event_non_tlv() - extract inst rssi stats from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param inst_rssi_resp: Pointer to hold inst rssi response + * + * Return: 0 for success or error code + */ +static QDF_STATUS extract_inst_rssi_stats_event_non_tlv( + wmi_unified_t wmi_handle, void *evt_buf, + wmi_host_inst_stats_resp *inst_rssi_resp) +{ + wmi_inst_stats_resp *event = (wmi_inst_stats_resp *)evt_buf; + + inst_rssi_resp->pdev_id = WMI_NON_TLV_DEFAULT_PDEV_ID; + qdf_mem_copy(inst_rssi_resp, event, sizeof(wmi_inst_stats_resp)); + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_tx_data_traffic_ctrl_ev_non_tlv() - extract tx data traffic control + * from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param ev: Pointer to hold data traffic control + * + * Return: 0 for success or error code + */ +static QDF_STATUS extract_tx_data_traffic_ctrl_ev_non_tlv( + wmi_unified_t wmi_handle, void *evt_buf, + wmi_host_tx_data_traffic_ctrl_event *ev) +{ + wmi_tx_data_traffic_ctrl_event *evt = + (wmi_tx_data_traffic_ctrl_event *)evt_buf; + + ev->peer_ast_idx = evt->peer_ast_idx; + ev->vdev_id = evt->vdev_id; + ev->ctrl_cmd = evt->ctrl_cmd; + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_atf_peer_stats_ev_non_tlv() - extract atf peer stats + * from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param ev: Pointer to hold atf stats event data + * + * Return: 0 for success or error code + */ +static QDF_STATUS extract_atf_peer_stats_ev_non_tlv( + wmi_unified_t wmi_handle, void *evt_buf, + wmi_host_atf_peer_stats_event *ev) +{ + wmi_atf_peer_stats_event *evt = + (wmi_atf_peer_stats_event *)evt_buf; + + ev->pdev_id = WMI_NON_TLV_DEFAULT_PDEV_ID; + ev->num_atf_peers = evt->num_atf_peers; + ev->comp_usable_airtime = evt->comp_usable_airtime; + qdf_mem_copy(&ev->reserved[0], &evt->reserved[0], + sizeof(evt->reserved)); + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_atf_token_info_ev_non_tlv() - extract atf token info + * from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @idx: Index indicating the peer number + * @param atf_token_info: Pointer to hold atf token info + * + * Return: 0 for success or error code + */ +static QDF_STATUS extract_atf_token_info_ev_non_tlv( + wmi_unified_t wmi_handle, void *evt_buf, + uint8_t idx, wmi_host_atf_peer_stats_info *atf_token_info) +{ + wmi_atf_peer_stats_event *evt = + (wmi_atf_peer_stats_event *)evt_buf; + + if (idx > evt->num_atf_peers) + return QDF_STATUS_E_INVAL; + + atf_token_info->field1 = evt->token_info_list[idx].field1; + atf_token_info->field2 = evt->token_info_list[idx].field2; + atf_token_info->field3 = evt->token_info_list[idx].field3; + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_pdev_utf_event_non_tlv() - extract UTF data info from event + * @wmi_handle: WMI handle + * @param evt_buf: Pointer to event buffer + * @param param: Pointer to hold data + * + * Return : QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS extract_pdev_utf_event_non_tlv( + wmi_unified_t wmi_handle, + uint8_t *evt_buf, + struct wmi_host_pdev_utf_event *event) +{ + event->data = evt_buf; + event->pdev_id = WMI_NON_TLV_DEFAULT_PDEV_ID; + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_pdev_qvit_event_non_tlv() - extract QVIT data info from event + * @wmi_handle: WMI handle + * @param evt_buf: Pointer to event buffer + * @param param: Pointer to hold data + * + * Return : QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS extract_pdev_qvit_event_non_tlv( + wmi_unified_t wmi_handle, + uint8_t *evt_buf, + struct wmi_host_pdev_qvit_event *event) +{ + event->data = evt_buf; + event->pdev_id = WMI_NON_TLV_DEFAULT_PDEV_ID; + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_wds_entry_non_tlv() - extract wds entry from event + * @wmi_handle: wmi handle + * @evt_buf: pointer to event buffer + * @wds_entry: wds entry + * @idx: index to point wds entry in event buffer + * + * Return: 0 for success or error code + */ +static QDF_STATUS extract_wds_entry_non_tlv(wmi_unified_t wmi_handle, + u_int8_t *evt_buf, + struct wdsentry *wds_entry, + u_int32_t idx) +{ + wmi_pdev_wds_entry_dump_event *wds_entry_dump_event = + (wmi_pdev_wds_entry_dump_event *)evt_buf; + + if (idx >= wds_entry_dump_event->num_entries) + return QDF_STATUS_E_INVAL; + qdf_mem_zero(wds_entry, sizeof(struct wdsentry)); + WMI_MAC_ADDR_TO_CHAR_ARRAY( + &(wds_entry_dump_event->wds_entry[idx].peer_macaddr), + wds_entry->peer_mac); + WMI_MAC_ADDR_TO_CHAR_ARRAY( + &(wds_entry_dump_event->wds_entry[idx].wds_macaddr), + wds_entry->wds_mac); + wds_entry->flags = wds_entry_dump_event->wds_entry[idx].flags; + + return QDF_STATUS_SUCCESS; +} + + +static bool is_management_record_non_tlv(uint32_t cmd_id) +{ + switch (cmd_id) { + case WMI_BCN_TX_CMDID: + case WMI_MGMT_TX_CMDID: + case WMI_MGMT_RX_EVENTID: + case WMI_GPIO_OUTPUT_CMDID: + case WMI_HOST_SWBA_EVENTID: + case WMI_PDEV_SEND_BCN_CMDID: + return true; + default: + return false; + } +} + +static bool is_diag_event_non_tlv(uint32_t event_id) +{ + if (WMI_DEBUG_MESG_EVENTID == event_id) + return true; + + return false; +} + +/** + * wmi_set_htc_tx_tag_non_tlv() - set HTC TX tag for WMI commands + * @wmi_handle: WMI handle + * @buf: WMI buffer + * @cmd_id: WMI command Id + * + * Return htc_tx_tag + */ +static uint16_t wmi_set_htc_tx_tag_non_tlv(wmi_unified_t wmi_handle, + wmi_buf_t buf, uint32_t cmd_id) +{ + return 0; +} + +/** + * send_dfs_phyerr_offload_en_cmd_non_tlv() - send dfs phyerr offload en cmd + * @wmi_handle: wmi handle + * @pdev_id: pdev id + * + * Send WMI_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMDID command to firmware. + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +static QDF_STATUS send_dfs_phyerr_offload_en_cmd_non_tlv( + wmi_unified_t wmi_handle, + uint32_t pdev_id) +{ + return QDF_STATUS_SUCCESS; +} + +/** + * send_dfs_phyerr_offload_dis_cmd_non_tlv() - send dfs phyerr offload dis cmd + * @wmi_handle: wmi handle + * @pdev_id: pdev id + * + * Send WMI_PDEV_DFS_PHYERR_OFFLOAD_DISABLE_CMDID command to firmware. + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +static QDF_STATUS send_dfs_phyerr_offload_dis_cmd_non_tlv( + wmi_unified_t wmi_handle, + uint32_t pdev_id) +{ + return QDF_STATUS_SUCCESS; +} + +/** + * send_wds_entry_list_cmd_non_tlv() - WMI function to get list of + * wds entries from FW + * + * @param wmi_handle : handle to WMI. + * @return QDF_STATUS_SUCCESS on success and -ve on failure. + */ +QDF_STATUS send_wds_entry_list_cmd_non_tlv(wmi_unified_t wmi_handle) +{ + wmi_buf_t buf; + + /* + * Passing a NULL pointer to wmi_unified_cmd_send() panics it, + * so let's just use a 32 byte fake array for now. + */ + buf = wmi_buf_alloc(wmi_handle, 32); + if (buf == NULL) + return QDF_STATUS_E_NOMEM; + + if (wmi_unified_cmd_send(wmi_handle, buf, 32, + WMI_PDEV_WDS_ENTRY_LIST_CMDID) != QDF_STATUS_SUCCESS) { + qdf_print("%s: send failed\n", __func__); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) && defined(HOST_DFS_SPOOF_TEST) +/** + * send_dfs_average_radar_params_cmd_non_tlv() - send average radar params to + * fw. + * @wmi_handle: wmi handle + * @params: pointer to dfs_radar_found_params. + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +static QDF_STATUS send_dfs_average_radar_params_cmd_non_tlv( + wmi_unified_t wmi_handle, + struct dfs_radar_found_params *params) +{ + wmi_host_dfs_radar_found_cmd *cmd; + wmi_buf_t buf; + + int len = sizeof(wmi_host_dfs_radar_found_cmd); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGD("%s:wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_FAILURE; + } + + cmd = (wmi_host_dfs_radar_found_cmd *)wmi_buf_data(buf); + + /* Fill the WMI structure (PRI, duration, SIDX) from + * the radar_found_param structure and then send + * out. + */ + cmd->pri_min_value = params->pri_min; + cmd->pri_max_value = params->pri_max; + cmd->duration_min_value = params->duration_min; + cmd->duration_max_value = params->duration_max; + cmd->sidx_min_value = params->sidx_min; + cmd->sidx_max_value = params->sidx_max; + + if (wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_HOST_DFS_RADAR_FOUND_CMDID)) { + WMI_LOGD("%s:Failed to send WMI command\n", __func__); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_dfs_status_from_fw_non_tlv() - extract the result of host dfs check + * from fw + * @wmi_handle: wmi handle + * @evt_buf: pointer to event buffer + * @fw_dfs_status_code: pointer to the status received from fw. + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +static QDF_STATUS +extract_dfs_status_from_fw_non_tlv(wmi_unified_t wmi_handle, + void *evt_buf, + uint32_t *fw_dfs_status_code) +{ + wmi_host_dfs_status_check_event *ev = + (wmi_host_dfs_status_check_event *)evt_buf; + + if ((ev->status == WMI_HOST_DFS_CHECK_PASSED) || + (ev->status == WMI_HOST_DFS_CHECK_FAILED) || + (ev->status == WMI_HOST_DFS_CHECK_HW_RADAR)) { + *fw_dfs_status_code = ev->status; + return QDF_STATUS_SUCCESS; + } + + WMI_LOGD("%s..Invalid status code : %d received\n", __func__, + ev->status); + + return QDF_STATUS_E_FAILURE; +} +#endif + +/** + * wmi_non_tlv_pdev_id_conversion_enable() - Enable pdev_id conversion + * + * Return None. + */ +void wmi_non_tlv_pdev_id_conversion_enable(wmi_unified_t wmi_handle) +{ + qdf_print("PDEV conversion Not Available"); +} + +struct wmi_ops non_tlv_ops = { + .send_vdev_create_cmd = send_vdev_create_cmd_non_tlv, + .send_vdev_delete_cmd = send_vdev_delete_cmd_non_tlv, + .send_vdev_down_cmd = send_vdev_down_cmd_non_tlv, + .send_peer_flush_tids_cmd = send_peer_flush_tids_cmd_non_tlv, + .send_peer_param_cmd = send_peer_param_cmd_non_tlv, + .send_vdev_up_cmd = send_vdev_up_cmd_non_tlv, + .send_peer_create_cmd = send_peer_create_cmd_non_tlv, + .send_peer_delete_cmd = send_peer_delete_cmd_non_tlv, +#ifdef WLAN_SUPPORT_GREEN_AP + .send_green_ap_ps_cmd = send_green_ap_ps_cmd_non_tlv, +#endif + .send_pdev_utf_cmd = send_pdev_utf_cmd_non_tlv, + .send_pdev_param_cmd = send_pdev_param_cmd_non_tlv, + .send_suspend_cmd = send_suspend_cmd_non_tlv, + .send_resume_cmd = send_resume_cmd_non_tlv, + .send_wow_enable_cmd = send_wow_enable_cmd_non_tlv, + .send_set_ap_ps_param_cmd = send_set_ap_ps_param_cmd_non_tlv, + .send_set_sta_ps_param_cmd = send_set_sta_ps_param_cmd_non_tlv, + .send_crash_inject_cmd = send_crash_inject_cmd_non_tlv, + .send_dbglog_cmd = send_dbglog_cmd_non_tlv, + .send_vdev_set_param_cmd = send_vdev_set_param_cmd_non_tlv, + .send_stats_request_cmd = send_stats_request_cmd_non_tlv, + .send_packet_log_enable_cmd = send_packet_log_enable_cmd_non_tlv, + .send_packet_log_disable_cmd = send_packet_log_disable_cmd_non_tlv, + .send_beacon_send_cmd = send_beacon_send_cmd_non_tlv, + .send_peer_assoc_cmd = send_peer_assoc_cmd_non_tlv, + .send_scan_start_cmd = send_scan_start_cmd_non_tlv, + .send_scan_stop_cmd = send_scan_stop_cmd_non_tlv, + .send_scan_chan_list_cmd = send_scan_chan_list_cmd_non_tlv, + .send_pdev_get_tpc_config_cmd = send_pdev_get_tpc_config_cmd_non_tlv, + .send_set_atf_cmd = send_set_atf_cmd_non_tlv, + .send_atf_peer_request_cmd = send_atf_peer_request_cmd_non_tlv, + .send_set_atf_grouping_cmd = send_set_atf_grouping_cmd_non_tlv, + .send_set_bwf_cmd = send_set_bwf_cmd_non_tlv, + .send_pdev_fips_cmd = send_pdev_fips_cmd_non_tlv, + .send_wlan_profile_enable_cmd = send_wlan_profile_enable_cmd_non_tlv, + .send_wlan_profile_trigger_cmd = send_wlan_profile_trigger_cmd_non_tlv, + .send_pdev_set_chan_cmd = send_pdev_set_chan_cmd_non_tlv, + .send_set_ht_ie_cmd = send_set_ht_ie_cmd_non_tlv, + .send_set_vht_ie_cmd = send_set_vht_ie_cmd_non_tlv, + .send_wmm_update_cmd = send_wmm_update_cmd_non_tlv, + .send_set_ant_switch_tbl_cmd = send_set_ant_switch_tbl_cmd_non_tlv, + .send_set_ratepwr_table_cmd = send_set_ratepwr_table_cmd_non_tlv, + .send_get_ratepwr_table_cmd = send_get_ratepwr_table_cmd_non_tlv, + .send_set_ctl_table_cmd = send_set_ctl_table_cmd_non_tlv, + .send_set_mimogain_table_cmd = send_set_mimogain_table_cmd_non_tlv, + .send_set_ratepwr_chainmsk_cmd = send_set_ratepwr_chainmsk_cmd_non_tlv, + .send_set_macaddr_cmd = send_set_macaddr_cmd_non_tlv, + .send_pdev_scan_start_cmd = send_pdev_scan_start_cmd_non_tlv, + .send_pdev_scan_end_cmd = send_pdev_scan_end_cmd_non_tlv, + .send_set_acparams_cmd = send_set_acparams_cmd_non_tlv, + .send_set_vap_dscp_tid_map_cmd = send_set_vap_dscp_tid_map_cmd_non_tlv, + .send_proxy_ast_reserve_cmd = send_proxy_ast_reserve_cmd_non_tlv, + .send_pdev_qvit_cmd = send_pdev_qvit_cmd_non_tlv, + .send_mcast_group_update_cmd = send_mcast_group_update_cmd_non_tlv, + .send_peer_add_wds_entry_cmd = send_peer_add_wds_entry_cmd_non_tlv, + .send_peer_del_wds_entry_cmd = send_peer_del_wds_entry_cmd_non_tlv, + .send_set_bridge_mac_addr_cmd = send_set_bridge_mac_addr_cmd_non_tlv, + .send_peer_update_wds_entry_cmd = + send_peer_update_wds_entry_cmd_non_tlv, + .send_phyerr_enable_cmd = send_phyerr_enable_cmd_non_tlv, + .send_phyerr_disable_cmd = send_phyerr_disable_cmd_non_tlv, + .send_smart_ant_enable_cmd = send_smart_ant_enable_cmd_non_tlv, + .send_smart_ant_set_rx_ant_cmd = send_smart_ant_set_rx_ant_cmd_non_tlv, + .send_smart_ant_set_tx_ant_cmd = send_smart_ant_set_tx_ant_cmd_non_tlv, + .send_smart_ant_set_training_info_cmd = + send_smart_ant_set_training_info_cmd_non_tlv, + .send_smart_ant_set_node_config_cmd = + send_smart_ant_set_node_config_cmd_non_tlv, + .send_smart_ant_enable_tx_feedback_cmd = + send_smart_ant_enable_tx_feedback_cmd_non_tlv, + .send_vdev_spectral_configure_cmd = + send_vdev_spectral_configure_cmd_non_tlv, + .send_vdev_spectral_enable_cmd = + send_vdev_spectral_enable_cmd_non_tlv, + .send_bss_chan_info_request_cmd = + send_bss_chan_info_request_cmd_non_tlv, + .send_thermal_mitigation_param_cmd = + send_thermal_mitigation_param_cmd_non_tlv, + .send_vdev_start_cmd = send_vdev_start_cmd_non_tlv, + .send_vdev_set_nac_rssi_cmd = send_vdev_set_nac_rssi_cmd_non_tlv, + .send_vdev_stop_cmd = send_vdev_stop_cmd_non_tlv, + .send_vdev_set_neighbour_rx_cmd = + send_vdev_set_neighbour_rx_cmd_non_tlv, + .send_vdev_set_fwtest_param_cmd = + send_vdev_set_fwtest_param_cmd_non_tlv, + .send_vdev_config_ratemask_cmd = send_vdev_config_ratemask_cmd_non_tlv, + .send_setup_install_key_cmd = + send_setup_install_key_cmd_non_tlv, + .send_wow_wakeup_cmd = send_wow_wakeup_cmd_non_tlv, + .send_wow_add_wakeup_event_cmd = send_wow_add_wakeup_event_cmd_non_tlv, + .send_wow_add_wakeup_pattern_cmd = + send_wow_add_wakeup_pattern_cmd_non_tlv, + .send_wow_remove_wakeup_pattern_cmd = + send_wow_remove_wakeup_pattern_cmd_non_tlv, + .send_pdev_set_regdomain_cmd = + send_pdev_set_regdomain_cmd_non_tlv, + .send_set_quiet_mode_cmd = send_set_quiet_mode_cmd_non_tlv, + .send_set_beacon_filter_cmd = send_set_beacon_filter_cmd_non_tlv, + .send_remove_beacon_filter_cmd = send_remove_beacon_filter_cmd_non_tlv, + .send_mgmt_cmd = send_mgmt_cmd_non_tlv, + .send_addba_clearresponse_cmd = send_addba_clearresponse_cmd_non_tlv, + .send_addba_send_cmd = send_addba_send_cmd_non_tlv, + .send_delba_send_cmd = send_delba_send_cmd_non_tlv, + .send_addba_setresponse_cmd = send_addba_setresponse_cmd_non_tlv, + .send_singleamsdu_cmd = send_singleamsdu_cmd_non_tlv, + .send_set_qboost_param_cmd = send_set_qboost_param_cmd_non_tlv, + .send_mu_scan_cmd = send_mu_scan_cmd_non_tlv, + .send_lteu_config_cmd = send_lteu_config_cmd_non_tlv, + .send_set_ps_mode_cmd = send_set_ps_mode_cmd_non_tlv, + .init_cmd_send = init_cmd_send_non_tlv, + .send_ext_resource_config = send_ext_resource_config_non_tlv, +#if 0 + .send_bcn_prb_template_cmd = send_bcn_prb_template_cmd_non_tlv, +#endif + .send_nf_dbr_dbm_info_get_cmd = send_nf_dbr_dbm_info_get_cmd_non_tlv, + .send_packet_power_info_get_cmd = + send_packet_power_info_get_cmd_non_tlv, + .send_gpio_config_cmd = send_gpio_config_cmd_non_tlv, + .send_gpio_output_cmd = send_gpio_output_cmd_non_tlv, + .send_rtt_meas_req_test_cmd = send_rtt_meas_req_test_cmd_non_tlv, + .send_rtt_meas_req_cmd = send_rtt_meas_req_cmd_non_tlv, + .send_lci_set_cmd = send_lci_set_cmd_non_tlv, + .send_lcr_set_cmd = send_lcr_set_cmd_non_tlv, + .send_start_oem_data_cmd = send_start_oem_data_cmd_non_tlv, + .send_rtt_keepalive_req_cmd = send_rtt_keepalive_req_cmd_non_tlv, + .send_periodic_chan_stats_config_cmd = + send_periodic_chan_stats_config_cmd_non_tlv, + .send_get_user_position_cmd = send_get_user_position_cmd_non_tlv, + .send_reset_peer_mumimo_tx_count_cmd = + send_reset_peer_mumimo_tx_count_cmd_non_tlv, + .send_get_peer_mumimo_tx_count_cmd = + send_get_peer_mumimo_tx_count_cmd_non_tlv, + .send_pdev_caldata_version_check_cmd = + send_pdev_caldata_version_check_cmd_non_tlv, + .send_btcoex_wlan_priority_cmd = send_btcoex_wlan_priority_cmd_non_tlv, + .send_btcoex_duty_cycle_cmd = send_btcoex_duty_cycle_cmd_non_tlv, + .send_coex_ver_cfg_cmd = send_coex_ver_cfg_cmd_non_tlv, + + .get_target_cap_from_service_ready = extract_service_ready_non_tlv, + .extract_fw_version = extract_fw_version_non_tlv, + .extract_fw_abi_version = extract_fw_abi_version_non_tlv, + .extract_hal_reg_cap = extract_hal_reg_cap_non_tlv, + .extract_host_mem_req = extract_host_mem_req_non_tlv, + .save_service_bitmap = save_service_bitmap_non_tlv, + .is_service_enabled = is_service_enabled_non_tlv, + .save_fw_version = save_fw_version_in_service_ready_non_tlv, + .check_and_update_fw_version = + ready_check_and_update_fw_version_non_tlv, + .extract_dbglog_data_len = extract_dbglog_data_len_non_tlv, + .ready_extract_init_status = ready_extract_init_status_non_tlv, + .ready_extract_mac_addr = ready_extract_mac_addr_non_tlv, + .extract_ready_event_params = extract_ready_event_params_non_tlv, + .extract_wds_addr_event = extract_wds_addr_event_non_tlv, + .extract_dcs_interference_type = extract_dcs_interference_type_non_tlv, + .extract_dcs_cw_int = extract_dcs_cw_int_non_tlv, + .extract_dcs_im_tgt_stats = extract_dcs_im_tgt_stats_non_tlv, + .extract_vdev_start_resp = extract_vdev_start_resp_non_tlv, + .extract_tbttoffset_update_params = + extract_tbttoffset_update_params_non_tlv, + .extract_tbttoffset_num_vdevs = + extract_tbttoffset_num_vdevs_non_tlv, + .extract_mgmt_rx_params = extract_mgmt_rx_params_non_tlv, + .extract_vdev_stopped_param = extract_vdev_stopped_param_non_tlv, + .extract_vdev_roam_param = extract_vdev_roam_param_non_tlv, + .extract_vdev_scan_ev_param = extract_vdev_scan_ev_param_non_tlv, + .extract_mu_ev_param = extract_mu_ev_param_non_tlv, + .extract_pdev_tpc_config_ev_param = + extract_pdev_tpc_config_ev_param_non_tlv, + .extract_nfcal_power_ev_param = extract_nfcal_power_ev_param_non_tlv, + .extract_pdev_tpc_ev_param = extract_pdev_tpc_ev_param_non_tlv, + .extract_pdev_generic_buffer_ev_param = + extract_pdev_generic_buffer_ev_param_non_tlv, + .extract_gpio_input_ev_param = extract_gpio_input_ev_param_non_tlv, + .extract_pdev_reserve_ast_ev_param = + extract_pdev_reserve_ast_ev_param_non_tlv, + .extract_swba_num_vdevs = extract_swba_num_vdevs_non_tlv, + .extract_swba_tim_info = extract_swba_tim_info_non_tlv, + .extract_swba_noa_info = extract_swba_noa_info_non_tlv, + .extract_peer_sta_ps_statechange_ev = + extract_peer_sta_ps_statechange_ev_non_tlv, + .extract_peer_sta_kickout_ev = extract_peer_sta_kickout_ev_non_tlv, + .extract_peer_ratecode_list_ev = extract_peer_ratecode_list_ev_non_tlv, + .extract_comb_phyerr = extract_comb_phyerr_non_tlv, + .extract_single_phyerr = extract_single_phyerr_non_tlv, + .extract_composite_phyerr = extract_composite_phyerr_non_tlv, + .extract_rtt_hdr = extract_rtt_hdr_non_tlv, + .extract_rtt_ev = extract_rtt_ev_non_tlv, + .extract_rtt_error_report_ev = extract_rtt_error_report_ev_non_tlv, + .extract_all_stats_count = extract_all_stats_counts_non_tlv, + .extract_pdev_stats = extract_pdev_stats_non_tlv, + .extract_pdev_ext_stats = extract_pdev_ext_stats_non_tlv, + .extract_vdev_stats = extract_vdev_stats_non_tlv, + .extract_peer_stats = extract_peer_stats_non_tlv, + .extract_bcnflt_stats = extract_bcnflt_stats_non_tlv, + .extract_peer_extd_stats = extract_peer_extd_stats_non_tlv, + .extract_chan_stats = extract_chan_stats_non_tlv, + .extract_thermal_stats = extract_thermal_stats_non_tlv, + .extract_thermal_level_stats = extract_thermal_level_stats_non_tlv, + .extract_profile_ctx = extract_profile_ctx_non_tlv, + .extract_profile_data = extract_profile_data_non_tlv, + .extract_chan_info_event = extract_chan_info_event_non_tlv, + .extract_channel_hopping_event = extract_channel_hopping_event_non_tlv, + .extract_bss_chan_info_event = extract_bss_chan_info_event_non_tlv, + .extract_inst_rssi_stats_event = extract_inst_rssi_stats_event_non_tlv, + .extract_tx_data_traffic_ctrl_ev = + extract_tx_data_traffic_ctrl_ev_non_tlv, + .extract_vdev_extd_stats = extract_vdev_extd_stats_non_tlv, + .extract_vdev_nac_rssi_stats = extract_vdev_nac_rssi_stats_non_tlv, + .extract_fips_event_data = extract_fips_event_data_non_tlv, + .extract_mumimo_tx_count_ev_param = + extract_mumimo_tx_count_ev_param_non_tlv, + .extract_peer_gid_userpos_list_ev_param = + extract_peer_gid_userpos_list_ev_param_non_tlv, + .extract_pdev_caldata_version_check_ev_param = + extract_pdev_caldata_version_check_ev_param_non_tlv, + .extract_mu_db_entry = extract_mu_db_entry_non_tlv, + .extract_atf_peer_stats_ev = extract_atf_peer_stats_ev_non_tlv, + .extract_atf_token_info_ev = extract_atf_token_info_ev_non_tlv, + .extract_pdev_utf_event = extract_pdev_utf_event_non_tlv, + .wmi_set_htc_tx_tag = wmi_set_htc_tx_tag_non_tlv, + .is_management_record = is_management_record_non_tlv, + .is_diag_event = is_diag_event_non_tlv, + .send_dfs_phyerr_offload_en_cmd = + send_dfs_phyerr_offload_en_cmd_non_tlv, + .send_dfs_phyerr_offload_dis_cmd = + send_dfs_phyerr_offload_dis_cmd_non_tlv, + .send_wds_entry_list_cmd = send_wds_entry_list_cmd_non_tlv, + .extract_wds_entry = extract_wds_entry_non_tlv, +#ifdef WLAN_SUPPORT_FILS + .send_vdev_fils_enable_cmd = send_vdev_fils_enable_cmd_non_tlv, + .send_fils_discovery_send_cmd = send_fils_discovery_send_cmd_non_tlv, + .extract_swfda_vdev_id = extract_swfda_vdev_id_non_tlv, +#endif /* WLAN_SUPPORT_FILS */ + .wmi_pdev_id_conversion_enable = wmi_non_tlv_pdev_id_conversion_enable, +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) && defined(HOST_DFS_SPOOF_TEST) + .send_dfs_average_radar_params_cmd = + send_dfs_average_radar_params_cmd_non_tlv, + .extract_dfs_status_from_fw = extract_dfs_status_from_fw_non_tlv, +#endif +}; + +/** + * populate_non_tlv_service() - populates wmi services + * + * @param wmi_service: Pointer to hold wmi_service + * Return: None + */ +static void populate_non_tlv_service(uint32_t *wmi_service) +{ + wmi_service[wmi_service_beacon_offload] = WMI_SERVICE_BEACON_OFFLOAD; + wmi_service[wmi_service_scan_offload] = WMI_SERVICE_SCAN_OFFLOAD; + wmi_service[wmi_service_roam_offload] = WMI_SERVICE_ROAM_OFFLOAD; + wmi_service[wmi_service_bcn_miss_offload] = + WMI_SERVICE_BCN_MISS_OFFLOAD; + wmi_service[wmi_service_sta_pwrsave] = WMI_SERVICE_STA_PWRSAVE; + wmi_service[wmi_service_sta_advanced_pwrsave] = + WMI_SERVICE_STA_ADVANCED_PWRSAVE; + wmi_service[wmi_service_ap_uapsd] = WMI_SERVICE_AP_UAPSD; + wmi_service[wmi_service_ap_dfs] = WMI_SERVICE_AP_DFS; + wmi_service[wmi_service_11ac] = WMI_SERVICE_11AC; + wmi_service[wmi_service_blockack] = WMI_SERVICE_BLOCKACK; + wmi_service[wmi_service_phyerr] = WMI_SERVICE_PHYERR; + wmi_service[wmi_service_bcn_filter] = WMI_SERVICE_BCN_FILTER; + wmi_service[wmi_service_rtt] = WMI_SERVICE_RTT; + wmi_service[wmi_service_ratectrl] = WMI_SERVICE_RATECTRL; + wmi_service[wmi_service_wow] = WMI_SERVICE_WOW; + wmi_service[wmi_service_ratectrl_cache] = WMI_SERVICE_RATECTRL_CACHE; + wmi_service[wmi_service_iram_tids] = WMI_SERVICE_IRAM_TIDS; + wmi_service[wmi_service_burst] = WMI_SERVICE_BURST; + wmi_service[wmi_service_smart_antenna_sw_support] = + WMI_SERVICE_SMART_ANTENNA_SW_SUPPORT; + wmi_service[wmi_service_gtk_offload] = WMI_SERVICE_GTK_OFFLOAD; + wmi_service[wmi_service_scan_sch] = WMI_SERVICE_SCAN_SCH; + wmi_service[wmi_service_csa_offload] = WMI_SERVICE_CSA_OFFLOAD; + wmi_service[wmi_service_chatter] = WMI_SERVICE_CHATTER; + wmi_service[wmi_service_coex_freqavoid] = WMI_SERVICE_COEX_FREQAVOID; + wmi_service[wmi_service_packet_power_save] = + WMI_SERVICE_PACKET_POWER_SAVE; + wmi_service[wmi_service_force_fw_hang] = WMI_SERVICE_FORCE_FW_HANG; + wmi_service[wmi_service_smart_antenna_hw_support] = + WMI_SERVICE_SMART_ANTENNA_HW_SUPPORT; + wmi_service[wmi_service_gpio] = WMI_SERVICE_GPIO; + wmi_service[wmi_sta_uapsd_basic_auto_trig] = + WMI_STA_UAPSD_BASIC_AUTO_TRIG; + wmi_service[wmi_sta_uapsd_var_auto_trig] = WMI_STA_UAPSD_VAR_AUTO_TRIG; + wmi_service[wmi_service_sta_keep_alive] = WMI_SERVICE_STA_KEEP_ALIVE; + wmi_service[wmi_service_tx_encap] = WMI_SERVICE_TX_ENCAP; + wmi_service[wmi_service_ap_ps_detect_out_of_sync] = + WMI_SERVICE_AP_PS_DETECT_OUT_OF_SYNC; + wmi_service[wmi_service_early_rx] = + WMI_SERVICE_EARLY_RX; + wmi_service[wmi_service_enhanced_proxy_sta] = + WMI_SERVICE_ENHANCED_PROXY_STA; + wmi_service[wmi_service_tt] = WMI_SERVICE_TT; + wmi_service[wmi_service_atf] = WMI_SERVICE_ATF; + wmi_service[wmi_service_peer_caching] = WMI_SERVICE_PEER_CACHING; + wmi_service[wmi_service_coex_gpio] = WMI_SERVICE_COEX_GPIO; + wmi_service[wmi_service_aux_spectral_intf] = + WMI_SERVICE_AUX_SPECTRAL_INTF; + wmi_service[wmi_service_aux_chan_load_intf] = + WMI_SERVICE_AUX_CHAN_LOAD_INTF; + wmi_service[wmi_service_bss_channel_info_64] = + WMI_SERVICE_BSS_CHANNEL_INFO_64; + wmi_service[wmi_service_ext_res_cfg_support] = + WMI_SERVICE_EXT_RES_CFG_SUPPORT; + wmi_service[wmi_service_mesh] = WMI_SERVICE_MESH; + wmi_service[wmi_service_restrt_chnl_support] = + WMI_SERVICE_RESTRT_CHNL_SUPPORT; + wmi_service[wmi_service_peer_stats] = WMI_SERVICE_PEER_STATS; + wmi_service[wmi_service_mesh_11s] = WMI_SERVICE_MESH_11S; + wmi_service[wmi_service_periodic_chan_stat_support] = + WMI_SERVICE_PERIODIC_CHAN_STAT_SUPPORT; + wmi_service[wmi_service_tx_mode_push_only] = + WMI_SERVICE_TX_MODE_PUSH_ONLY; + wmi_service[wmi_service_tx_mode_push_pull] = + WMI_SERVICE_TX_MODE_PUSH_PULL; + wmi_service[wmi_service_tx_mode_dynamic] = WMI_SERVICE_TX_MODE_DYNAMIC; + wmi_service[wmi_service_check_cal_version] = + WMI_SERVICE_CHECK_CAL_VERSION; + wmi_service[wmi_service_btcoex_duty_cycle] = + WMI_SERVICE_BTCOEX_DUTY_CYCLE; + wmi_service[wmi_service_4_wire_coex_support] = + WMI_SERVICE_4_WIRE_COEX_SUPPORT; + wmi_service[wmi_service_extended_nss_support] = + WMI_SERVICE_EXTENDED_NSS_SUPPORT; + + wmi_service[wmi_service_roam_scan_offload] = WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_arpns_offload] = WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_nlo] = WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_sta_dtim_ps_modulated_dtim] = + WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_sta_smps] = WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_fwtest] = WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_sta_wmmac] = WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_tdls] = WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_mcc_bcn_interval_change] = + WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_adaptive_ocs] = WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_ba_ssn_support] = WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_filter_ipsec_natkeepalive] = + WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_wlan_hb] = WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_lte_ant_share_support] = + WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_batch_scan] = WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_qpower] = WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_plmreq] = WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_thermal_mgmt] = WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_rmc] = WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_mhf_offload] = WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_coex_sar] = WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_bcn_txrate_override] = WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_nan] = WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_l1ss_stat] = WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_estimate_linkspeed] = WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_obss_scan] = WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_tdls_offchan] = WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_tdls_uapsd_buffer_sta] = + WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_tdls_uapsd_sleep_sta] = WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_ibss_pwrsave] = WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_lpass] = WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_extscan] = WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_d0wow] = WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_hsoffload] = WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_roam_ho_offload] = WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_rx_full_reorder] = WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_dhcp_offload] = WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_sta_rx_ipa_offload_support] = + WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_mdns_offload] = WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_sap_auth_offload] = WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_dual_band_simultaneous_support] = + WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_ocb] = WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_ap_arpns_offload] = WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_per_band_chainmask_support] = + WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_packet_filter_offload] = + WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_mgmt_tx_htt] = WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_mgmt_tx_wmi] = WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_ext_msg] = WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_mawc] = WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_multiple_vdev_restart] = + WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_peer_assoc_conf] = WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_egap] = WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_sta_pmf_offload] = WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_unified_wow_capability] = + WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_enterprise_mesh] = WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_apf_offload] = WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_sync_delete_cmds] = WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_ratectrl_limit_max_min_rates] = + WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_nan_data] = WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_nan_rtt] = WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_11ax] = WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_deprecated_replace] = WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_tdls_conn_tracker_in_host_mode] = + WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_enhanced_mcast_filter] =WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_half_rate_quarter_rate_support] = + WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_vdev_rx_filter] = WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_p2p_listen_offload_support] = + WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_mark_first_wakeup_packet] = + WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_multiple_mcast_filter_set] = + WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_host_managed_rx_reorder] = + WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_flash_rdwr_support] = WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_wlan_stats_report] = WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_tx_msdu_id_new_partition_support] = + WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_dfs_phyerr_offload] = WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_rcpi_support] = WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_fw_mem_dump_support] = WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_peer_stats_info] = WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_regulatory_db] = WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_11d_offload] = WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_hw_data_filtering] = WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_pkt_routing] = WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_offchan_tx_wmi] = WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_chan_load_info] = WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_ack_timeout] = WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_widebw_scan] = WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_support_dma] = + WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_host_dfs_check_support] = + WMI_SERVICE_HOST_DFS_CHECK_SUPPORT; +} + +/** + * populate_non_tlv_event_id() - populates wmi event ids + * + * @param event_ids: Pointer to hold event ids + * Return: None + */ +static void populate_non_tlv_events_id(uint32_t *event_ids) +{ + event_ids[wmi_service_ready_event_id] = WMI_SERVICE_READY_EVENTID; + event_ids[wmi_ready_event_id] = WMI_READY_EVENTID; + event_ids[wmi_dbg_msg_event_id] = WMI_DEBUG_MESG_EVENTID; + event_ids[wmi_scan_event_id] = WMI_SCAN_EVENTID; + event_ids[wmi_echo_event_id] = WMI_ECHO_EVENTID; + event_ids[wmi_update_stats_event_id] = WMI_UPDATE_STATS_EVENTID; + event_ids[wmi_inst_rssi_stats_event_id] = WMI_INST_RSSI_STATS_EVENTID; + event_ids[wmi_vdev_start_resp_event_id] = WMI_VDEV_START_RESP_EVENTID; + event_ids[wmi_vdev_standby_req_event_id] = WMI_VDEV_STANDBY_REQ_EVENTID; + event_ids[wmi_vdev_resume_req_event_id] = WMI_VDEV_RESUME_REQ_EVENTID; + event_ids[wmi_vdev_stopped_event_id] = WMI_VDEV_STOPPED_EVENTID; + event_ids[wmi_peer_sta_kickout_event_id] = WMI_PEER_STA_KICKOUT_EVENTID; + event_ids[wmi_host_swba_event_id] = WMI_HOST_SWBA_EVENTID; + event_ids[wmi_tbttoffset_update_event_id] = + WMI_TBTTOFFSET_UPDATE_EVENTID; + event_ids[wmi_mgmt_rx_event_id] = WMI_MGMT_RX_EVENTID; + event_ids[wmi_chan_info_event_id] = WMI_CHAN_INFO_EVENTID; + event_ids[wmi_phyerr_event_id] = WMI_PHYERR_EVENTID; + event_ids[wmi_roam_event_id] = WMI_ROAM_EVENTID; + event_ids[wmi_profile_match] = WMI_PROFILE_MATCH; + event_ids[wmi_debug_print_event_id] = WMI_DEBUG_PRINT_EVENTID; + event_ids[wmi_pdev_qvit_event_id] = WMI_PDEV_QVIT_EVENTID; + event_ids[wmi_wlan_profile_data_event_id] = + WMI_WLAN_PROFILE_DATA_EVENTID; + event_ids[wmi_rtt_meas_report_event_id] = + WMI_RTT_MEASUREMENT_REPORT_EVENTID; + event_ids[wmi_tsf_meas_report_event_id] = + WMI_TSF_MEASUREMENT_REPORT_EVENTID; + event_ids[wmi_rtt_error_report_event_id] = WMI_RTT_ERROR_REPORT_EVENTID; + event_ids[wmi_rtt_keepalive_event_id] = WMI_RTT_KEEPALIVE_EVENTID; + event_ids[wmi_oem_cap_event_id] = WMI_OEM_CAPABILITY_EVENTID; + event_ids[wmi_oem_meas_report_event_id] = + WMI_OEM_MEASUREMENT_REPORT_EVENTID; + event_ids[wmi_oem_report_event_id] = WMI_OEM_ERROR_REPORT_EVENTID; + event_ids[wmi_nan_event_id] = WMI_NAN_EVENTID; + event_ids[wmi_wow_wakeup_host_event_id] = WMI_WOW_WAKEUP_HOST_EVENTID; + event_ids[wmi_gtk_offload_status_event_id] = + WMI_GTK_OFFLOAD_STATUS_EVENTID; + event_ids[wmi_gtk_rekey_fail_event_id] = WMI_GTK_REKEY_FAIL_EVENTID; + event_ids[wmi_dcs_interference_event_id] = WMI_DCS_INTERFERENCE_EVENTID; + event_ids[wmi_pdev_tpc_config_event_id] = WMI_PDEV_TPC_CONFIG_EVENTID; + event_ids[wmi_csa_handling_event_id] = WMI_CSA_HANDLING_EVENTID; + event_ids[wmi_gpio_input_event_id] = WMI_GPIO_INPUT_EVENTID; + event_ids[wmi_peer_ratecode_list_event_id] = + WMI_PEER_RATECODE_LIST_EVENTID; + event_ids[wmi_generic_buffer_event_id] = WMI_GENERIC_BUFFER_EVENTID; + event_ids[wmi_mcast_buf_release_event_id] = + WMI_MCAST_BUF_RELEASE_EVENTID; + event_ids[wmi_mcast_list_ageout_event_id] = + WMI_MCAST_LIST_AGEOUT_EVENTID; + event_ids[wmi_vdev_get_keepalive_event_id] = + WMI_VDEV_GET_KEEPALIVE_EVENTID; + event_ids[wmi_wds_peer_event_id] = WMI_WDS_PEER_EVENTID; + event_ids[wmi_peer_sta_ps_statechg_event_id] = + WMI_PEER_STA_PS_STATECHG_EVENTID; + event_ids[wmi_pdev_fips_event_id] = WMI_PDEV_FIPS_EVENTID; + event_ids[wmi_tt_stats_event_id] = WMI_TT_STATS_EVENTID; + event_ids[wmi_pdev_channel_hopping_event_id] = + WMI_PDEV_CHANNEL_HOPPING_EVENTID; + event_ids[wmi_pdev_ani_cck_level_event_id] = + WMI_PDEV_ANI_CCK_LEVEL_EVENTID; + event_ids[wmi_pdev_ani_ofdm_level_event_id] = + WMI_PDEV_ANI_OFDM_LEVEL_EVENTID; + event_ids[wmi_pdev_reserve_ast_entry_event_id] = + WMI_PDEV_RESERVE_AST_ENTRY_EVENTID; + event_ids[wmi_pdev_nfcal_power_event_id] = WMI_PDEV_NFCAL_POWER_EVENTID; + event_ids[wmi_pdev_tpc_event_id] = WMI_PDEV_TPC_EVENTID; + event_ids[wmi_pdev_get_ast_info_event_id] = + WMI_PDEV_GET_AST_INFO_EVENTID; + event_ids[wmi_pdev_temperature_event_id] = WMI_PDEV_TEMPERATURE_EVENTID; + event_ids[wmi_pdev_nfcal_power_all_channels_event_id] = + WMI_PDEV_NFCAL_POWER_ALL_CHANNELS_EVENTID; + event_ids[wmi_pdev_bss_chan_info_event_id] = + WMI_PDEV_BSS_CHAN_INFO_EVENTID; + event_ids[wmi_mu_report_event_id] = WMI_MU_REPORT_EVENTID; + event_ids[wmi_tx_data_traffic_ctrl_event_id] = + WMI_TX_DATA_TRAFFIC_CTRL_EVENTID; + event_ids[wmi_pdev_utf_event_id] = WMI_PDEV_UTF_EVENTID; + event_ids[wmi_peer_tx_mu_txmit_count_event_id] = + WMI_PEER_TX_MU_TXMIT_COUNT_EVENTID; + event_ids[wmi_peer_gid_userpos_list_event_id] = + WMI_PEER_GID_USERPOS_LIST_EVENTID; + event_ids[wmi_pdev_check_cal_version_event_id] = + WMI_PDEV_CHECK_CAL_VERSION_EVENTID; + event_ids[wmi_atf_peer_stats_event_id] = + WMI_ATF_PEER_STATS_EVENTID; + event_ids[wmi_pdev_wds_entry_list_event_id] = + WMI_PDEV_WDS_ENTRY_LIST_EVENTID; + event_ids[wmi_host_swfda_event_id] = WMI_HOST_SWFDA_EVENTID; +#if defined(WLAN_DFS_PARTIAL_OFFLOAD) && defined(HOST_DFS_SPOOF_TEST) + event_ids[wmi_host_dfs_status_check_event_id] = + WMI_HOST_DFS_STATUS_CHECK_EVENTID; +#endif +} + +/** + * populate_pdev_param_non_tlv() - populates pdev params + * + * @param pdev_param: Pointer to hold pdev params + * Return: None + */ +static void populate_pdev_param_non_tlv(uint32_t *pdev_param) +{ + pdev_param[wmi_pdev_param_tx_chain_mask] = WMI_PDEV_PARAM_TX_CHAIN_MASK; + pdev_param[wmi_pdev_param_rx_chain_mask] = WMI_PDEV_PARAM_RX_CHAIN_MASK; + pdev_param[wmi_pdev_param_txpower_limit2g] = + WMI_PDEV_PARAM_TXPOWER_LIMIT2G; + pdev_param[wmi_pdev_param_txpower_limit5g] = + WMI_PDEV_PARAM_TXPOWER_LIMIT5G; + pdev_param[wmi_pdev_param_txpower_scale] = WMI_PDEV_PARAM_TXPOWER_SCALE; + pdev_param[wmi_pdev_param_beacon_gen_mode] = + WMI_PDEV_PARAM_BEACON_GEN_MODE; + pdev_param[wmi_pdev_param_beacon_tx_mode] = + WMI_PDEV_PARAM_BEACON_TX_MODE; + pdev_param[wmi_pdev_param_resmgr_offchan_mode] = + WMI_PDEV_PARAM_RESMGR_OFFCHAN_MODE; + pdev_param[wmi_pdev_param_protection_mode] = + WMI_PDEV_PARAM_PROTECTION_MODE; + pdev_param[wmi_pdev_param_dynamic_bw] = WMI_PDEV_PARAM_DYNAMIC_BW; + pdev_param[wmi_pdev_param_non_agg_sw_retry_th] = + WMI_PDEV_PARAM_NON_AGG_SW_RETRY_TH; + pdev_param[wmi_pdev_param_agg_sw_retry_th] = + WMI_PDEV_PARAM_AGG_SW_RETRY_TH; + pdev_param[wmi_pdev_param_sta_kickout_th] = + WMI_PDEV_PARAM_STA_KICKOUT_TH; + pdev_param[wmi_pdev_param_ac_aggrsize_scaling] = + WMI_PDEV_PARAM_AC_AGGRSIZE_SCALING; + pdev_param[wmi_pdev_param_ltr_enable] = WMI_PDEV_PARAM_LTR_ENABLE; + pdev_param[wmi_pdev_param_ltr_ac_latency_be] = + WMI_PDEV_PARAM_LTR_AC_LATENCY_BE; + pdev_param[wmi_pdev_param_ltr_ac_latency_bk] = + WMI_PDEV_PARAM_LTR_AC_LATENCY_BK; + pdev_param[wmi_pdev_param_ltr_ac_latency_vi] = + WMI_PDEV_PARAM_LTR_AC_LATENCY_VI; + pdev_param[wmi_pdev_param_ltr_ac_latency_vo] = + WMI_PDEV_PARAM_LTR_AC_LATENCY_VO; + pdev_param[wmi_pdev_param_ltr_ac_latency_timeout] = + WMI_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT; + pdev_param[wmi_pdev_param_ltr_sleep_override] = + WMI_PDEV_PARAM_LTR_SLEEP_OVERRIDE; + pdev_param[wmi_pdev_param_ltr_rx_override] = + WMI_PDEV_PARAM_LTR_RX_OVERRIDE; + pdev_param[wmi_pdev_param_ltr_tx_activity_timeout] = + WMI_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT; + pdev_param[wmi_pdev_param_l1ss_enable] = WMI_PDEV_PARAM_L1SS_ENABLE; + pdev_param[wmi_pdev_param_dsleep_enable] = WMI_PDEV_PARAM_DSLEEP_ENABLE; + pdev_param[wmi_pdev_param_pcielp_txbuf_flush] = + WMI_PDEV_PARAM_PCIELP_TXBUF_FLUSH; + pdev_param[wmi_pdev_param_pcielp_txbuf_watermark] = + WMI_PDEV_PARAM_PCIELP_TXBUF_WATERMARK; + pdev_param[wmi_pdev_param_pcielp_txbuf_tmo_en] = + WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_EN; + pdev_param[wmi_pdev_param_pcielp_txbuf_tmo_value] = + WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_VALUE; + pdev_param[wmi_pdev_param_pdev_stats_update_period] = + WMI_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD; + pdev_param[wmi_pdev_param_vdev_stats_update_period] = + WMI_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD; + pdev_param[wmi_pdev_param_peer_stats_update_period] = + WMI_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD; + pdev_param[wmi_pdev_param_bcnflt_stats_update_period] = + WMI_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD; + pdev_param[wmi_pdev_param_pmf_qos] = + WMI_PDEV_PARAM_PMF_QOS; + pdev_param[wmi_pdev_param_arp_ac_override] = + WMI_PDEV_PARAM_ARP_AC_OVERRIDE; + pdev_param[wmi_pdev_param_dcs] = + WMI_PDEV_PARAM_DCS; + pdev_param[wmi_pdev_param_ani_enable] = WMI_PDEV_PARAM_ANI_ENABLE; + pdev_param[wmi_pdev_param_ani_poll_period] = + WMI_PDEV_PARAM_ANI_POLL_PERIOD; + pdev_param[wmi_pdev_param_ani_listen_period] = + WMI_PDEV_PARAM_ANI_LISTEN_PERIOD; + pdev_param[wmi_pdev_param_ani_ofdm_level] = + WMI_PDEV_PARAM_ANI_OFDM_LEVEL; + pdev_param[wmi_pdev_param_ani_cck_level] = WMI_PDEV_PARAM_ANI_CCK_LEVEL; + pdev_param[wmi_pdev_param_dyntxchain] = WMI_PDEV_PARAM_DYNTXCHAIN; + pdev_param[wmi_pdev_param_proxy_sta] = WMI_PDEV_PARAM_PROXY_STA; + pdev_param[wmi_pdev_param_idle_ps_config] = + WMI_PDEV_PARAM_IDLE_PS_CONFIG; + pdev_param[wmi_pdev_param_power_gating_sleep] = + WMI_PDEV_PARAM_POWER_GATING_SLEEP; + pdev_param[wmi_pdev_param_aggr_burst] = WMI_PDEV_PARAM_AGGR_BURST; + pdev_param[wmi_pdev_param_rx_decap_mode] = WMI_PDEV_PARAM_RX_DECAP_MODE; + pdev_param[wmi_pdev_param_fast_channel_reset] = + WMI_PDEV_PARAM_FAST_CHANNEL_RESET; + pdev_param[wmi_pdev_param_burst_dur] = WMI_PDEV_PARAM_BURST_DUR; + pdev_param[wmi_pdev_param_burst_enable] = WMI_PDEV_PARAM_BURST_ENABLE; + pdev_param[wmi_pdev_param_smart_antenna_default_antenna] = + WMI_PDEV_PARAM_SMART_ANTENNA_DEFAULT_ANTENNA; + pdev_param[wmi_pdev_param_igmpmld_override] = + WMI_PDEV_PARAM_IGMPMLD_OVERRIDE; + pdev_param[wmi_pdev_param_igmpmld_tid] = + WMI_PDEV_PARAM_IGMPMLD_TID; + pdev_param[wmi_pdev_param_antenna_gain] = WMI_PDEV_PARAM_ANTENNA_GAIN; + pdev_param[wmi_pdev_param_rx_filter] = WMI_PDEV_PARAM_RX_FILTER; + pdev_param[wmi_pdev_set_mcast_to_ucast_tid] = + WMI_PDEV_SET_MCAST_TO_UCAST_TID; + pdev_param[wmi_pdev_param_proxy_sta_mode] = + WMI_PDEV_PARAM_PROXY_STA_MODE; + pdev_param[wmi_pdev_param_set_mcast2ucast_mode] = + WMI_PDEV_PARAM_SET_MCAST2UCAST_MODE; + pdev_param[wmi_pdev_param_set_mcast2ucast_buffer] = + WMI_PDEV_PARAM_SET_MCAST2UCAST_BUFFER; + pdev_param[wmi_pdev_param_remove_mcast2ucast_buffer] = + WMI_PDEV_PARAM_REMOVE_MCAST2UCAST_BUFFER; + pdev_param[wmi_pdev_peer_sta_ps_statechg_enable] = + WMI_PDEV_PEER_STA_PS_STATECHG_ENABLE; + pdev_param[wmi_pdev_param_igmpmld_ac_override] = + WMI_PDEV_PARAM_IGMPMLD_AC_OVERRIDE; + pdev_param[wmi_pdev_param_block_interbss] = + WMI_PDEV_PARAM_BLOCK_INTERBSS; + pdev_param[wmi_pdev_param_set_disable_reset_cmdid] = + WMI_PDEV_PARAM_SET_DISABLE_RESET_CMDID; + pdev_param[wmi_pdev_param_set_msdu_ttl_cmdid] = + WMI_PDEV_PARAM_SET_MSDU_TTL_CMDID; + pdev_param[wmi_pdev_param_set_ppdu_duration_cmdid] = + WMI_PDEV_PARAM_SET_PPDU_DURATION_CMDID; + pdev_param[wmi_pdev_param_txbf_sound_period_cmdid] = + WMI_PDEV_PARAM_TXBF_SOUND_PERIOD_CMDID; + pdev_param[wmi_pdev_param_set_promisc_mode_cmdid] = + WMI_PDEV_PARAM_SET_PROMISC_MODE_CMDID; + pdev_param[wmi_pdev_param_set_burst_mode_cmdid] = + WMI_PDEV_PARAM_SET_BURST_MODE_CMDID; + pdev_param[wmi_pdev_param_en_stats] = WMI_PDEV_PARAM_EN_STATS; + pdev_param[wmi_pdev_param_mu_group_policy] = + WMI_PDEV_PARAM_MU_GROUP_POLICY; + pdev_param[wmi_pdev_param_noise_detection] = + WMI_PDEV_PARAM_NOISE_DETECTION; + pdev_param[wmi_pdev_param_noise_threshold] = + WMI_PDEV_PARAM_NOISE_THRESHOLD; + pdev_param[wmi_pdev_param_dpd_enable] = + WMI_PDEV_PARAM_DPD_ENABLE; + pdev_param[wmi_pdev_param_set_mcast_bcast_echo] = + WMI_PDEV_PARAM_SET_MCAST_BCAST_ECHO; + pdev_param[wmi_pdev_param_atf_strict_sch] = + WMI_PDEV_PARAM_ATF_STRICT_SCH; + pdev_param[wmi_pdev_param_atf_sched_duration] = + WMI_PDEV_PARAM_ATF_SCHED_DURATION; + pdev_param[wmi_pdev_param_ant_plzn] = WMI_PDEV_PARAM_ANT_PLZN; + pdev_param[wmi_pdev_param_mgmt_retry_limit] = + WMI_PDEV_PARAM_MGMT_RETRY_LIMIT; + pdev_param[wmi_pdev_param_sensitivity_level] = + WMI_PDEV_PARAM_SENSITIVITY_LEVEL; + pdev_param[wmi_pdev_param_signed_txpower_2g] = + WMI_PDEV_PARAM_SIGNED_TXPOWER_2G; + pdev_param[wmi_pdev_param_signed_txpower_5g] = + WMI_PDEV_PARAM_SIGNED_TXPOWER_5G; + pdev_param[wmi_pdev_param_enable_per_tid_amsdu] = + WMI_PDEV_PARAM_ENABLE_PER_TID_AMSDU; + pdev_param[wmi_pdev_param_enable_per_tid_ampdu] = + WMI_PDEV_PARAM_ENABLE_PER_TID_AMPDU; + pdev_param[wmi_pdev_param_cca_threshold] = WMI_PDEV_PARAM_CCA_THRESHOLD; + pdev_param[wmi_pdev_param_rts_fixed_rate] = + WMI_PDEV_PARAM_RTS_FIXED_RATE; + pdev_param[wmi_pdev_param_cal_period] = WMI_PDEV_PARAM_CAL_PERIOD; + pdev_param[wmi_pdev_param_pdev_reset] = WMI_PDEV_PARAM_PDEV_RESET; + pdev_param[wmi_pdev_param_wapi_mbssid_offset] = + WMI_PDEV_PARAM_WAPI_MBSSID_OFFSET; + pdev_param[wmi_pdev_param_arp_srcaddr] = WMI_PDEV_PARAM_ARP_SRCADDR; + pdev_param[wmi_pdev_param_arp_dstaddr] = WMI_PDEV_PARAM_ARP_DSTADDR; + pdev_param[wmi_pdev_param_txpower_decr_db] = + WMI_PDEV_PARAM_TXPOWER_DECR_DB; + pdev_param[wmi_pdev_param_rx_batchmode] = WMI_PDEV_PARAM_RX_BATCHMODE; + pdev_param[wmi_pdev_param_packet_aggr_delay] = + WMI_PDEV_PARAM_PACKET_AGGR_DELAY; + pdev_param[wmi_pdev_param_atf_obss_noise_sch] = + WMI_PDEV_PARAM_ATF_OBSS_NOISE_SCH; + pdev_param[wmi_pdev_param_atf_obss_noise_scaling_factor] = + WMI_PDEV_PARAM_ATF_OBSS_NOISE_SCALING_FACTOR; + pdev_param[wmi_pdev_param_cust_txpower_scale] = + WMI_PDEV_PARAM_CUST_TXPOWER_SCALE; + pdev_param[wmi_pdev_param_atf_dynamic_enable] = + WMI_PDEV_PARAM_ATF_DYNAMIC_ENABLE; + pdev_param[wmi_pdev_param_atf_ssid_group_policy] = + WMI_PDEV_PARAM_ATF_SSID_GROUP_POLICY; + pdev_param[wmi_pdev_param_enable_btcoex] = + WMI_PDEV_PARAM_ENABLE_BTCOEX; + pdev_param[wmi_pdev_param_atf_peer_stats] = + WMI_PDEV_PARAM_ATF_PEER_STATS; + pdev_param[wmi_pdev_param_tx_ack_timeout] = WMI_UNAVAILABLE_PARAM; + pdev_param[wmi_pdev_param_soft_tx_chain_mask] = + WMI_PDEV_PARAM_SOFT_TX_CHAIN_MASK; + pdev_param[wmi_pdev_param_rfkill_enable] = WMI_UNAVAILABLE_PARAM; + pdev_param[wmi_pdev_param_hw_rfkill_config] = WMI_UNAVAILABLE_PARAM; + pdev_param[wmi_pdev_param_low_power_rf_enable] = WMI_UNAVAILABLE_PARAM; + pdev_param[wmi_pdev_param_l1ss_track] = WMI_UNAVAILABLE_PARAM; + pdev_param[wmi_pdev_param_hyst_en] = WMI_UNAVAILABLE_PARAM; + pdev_param[wmi_pdev_param_power_collapse_enable] = + WMI_UNAVAILABLE_PARAM; + pdev_param[wmi_pdev_param_led_sys_state] = WMI_UNAVAILABLE_PARAM; + pdev_param[wmi_pdev_param_led_enable] = WMI_UNAVAILABLE_PARAM; + pdev_param[wmi_pdev_param_audio_over_wlan_latency] = + WMI_UNAVAILABLE_PARAM; + pdev_param[wmi_pdev_param_audio_over_wlan_enable] = + WMI_UNAVAILABLE_PARAM; + pdev_param[wmi_pdev_param_whal_mib_stats_update_enable] = + WMI_UNAVAILABLE_PARAM; + pdev_param[wmi_pdev_param_vdev_rate_stats_update_period] = + WMI_UNAVAILABLE_PARAM; + pdev_param[wmi_pdev_param_cts_cbw] = WMI_UNAVAILABLE_PARAM; + pdev_param[wmi_pdev_param_wnts_config] = WMI_UNAVAILABLE_PARAM; + pdev_param[wmi_pdev_param_adaptive_early_rx_enable] = + WMI_UNAVAILABLE_PARAM; + pdev_param[wmi_pdev_param_adaptive_early_rx_min_sleep_slop] = + WMI_UNAVAILABLE_PARAM; + pdev_param[wmi_pdev_param_adaptive_early_rx_inc_dec_step] = + WMI_UNAVAILABLE_PARAM; + pdev_param[wmi_pdev_param_early_rx_fix_sleep_slop] = + WMI_UNAVAILABLE_PARAM; + pdev_param[wmi_pdev_param_bmiss_based_adaptive_bto_enable] = + WMI_UNAVAILABLE_PARAM; + pdev_param[wmi_pdev_param_bmiss_bto_min_bcn_timeout] = + WMI_UNAVAILABLE_PARAM; + pdev_param[wmi_pdev_param_bmiss_bto_inc_dec_step] = + WMI_UNAVAILABLE_PARAM; + pdev_param[wmi_pdev_param_bto_fix_bcn_timeout] = + WMI_UNAVAILABLE_PARAM; + pdev_param[wmi_pdev_param_ce_based_adaptive_bto_enable] = + WMI_UNAVAILABLE_PARAM; + pdev_param[wmi_pdev_param_ce_bto_combo_ce_value] = + WMI_UNAVAILABLE_PARAM; + pdev_param[wmi_pdev_param_tx_chain_mask_2g] = WMI_UNAVAILABLE_PARAM; + pdev_param[wmi_pdev_param_rx_chain_mask_2g] = WMI_UNAVAILABLE_PARAM; + pdev_param[wmi_pdev_param_tx_chain_mask_5g] = WMI_UNAVAILABLE_PARAM; + pdev_param[wmi_pdev_param_rx_chain_mask_5g] = WMI_UNAVAILABLE_PARAM; + pdev_param[wmi_pdev_param_tx_chain_mask_cck] = WMI_UNAVAILABLE_PARAM; + pdev_param[wmi_pdev_param_tx_chain_mask_1ss] = WMI_UNAVAILABLE_PARAM; +} + +/** + * populate_vdev_param_non_tlv() - populates vdev params + * + * @param vdev_param: Pointer to hold vdev params + * Return: None + */ +static void populate_vdev_param_non_tlv(uint32_t *vdev_param) +{ + vdev_param[wmi_vdev_param_rts_threshold] = WMI_VDEV_PARAM_RTS_THRESHOLD; + vdev_param[wmi_vdev_param_fragmentation_threshold] = + WMI_VDEV_PARAM_FRAGMENTATION_THRESHOLD; + vdev_param[wmi_vdev_param_beacon_interval] = + WMI_VDEV_PARAM_BEACON_INTERVAL; + vdev_param[wmi_vdev_param_listen_interval] = + WMI_VDEV_PARAM_LISTEN_INTERVAL; + vdev_param[wmi_vdev_param_multicast_rate] = + WMI_VDEV_PARAM_MULTICAST_RATE; + vdev_param[wmi_vdev_param_mgmt_tx_rate] = + WMI_VDEV_PARAM_MGMT_TX_RATE; + vdev_param[wmi_vdev_param_slot_time] = WMI_VDEV_PARAM_SLOT_TIME; + vdev_param[wmi_vdev_param_preamble] = WMI_VDEV_PARAM_PREAMBLE; + vdev_param[wmi_vdev_param_swba_time] = WMI_VDEV_PARAM_SWBA_TIME; + vdev_param[wmi_vdev_stats_update_period] = WMI_VDEV_STATS_UPDATE_PERIOD; + vdev_param[wmi_vdev_pwrsave_ageout_time] = WMI_VDEV_PWRSAVE_AGEOUT_TIME; + vdev_param[wmi_vdev_host_swba_interval] = WMI_VDEV_HOST_SWBA_INTERVAL; + vdev_param[wmi_vdev_param_dtim_period] = WMI_VDEV_PARAM_DTIM_PERIOD; + vdev_param[wmi_vdev_oc_scheduler_air_time_limit] = + WMI_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT; + vdev_param[wmi_vdev_param_wds] = WMI_VDEV_PARAM_WDS; + vdev_param[wmi_vdev_param_atim_window] = WMI_VDEV_PARAM_ATIM_WINDOW; + vdev_param[wmi_vdev_param_bmiss_count_max] = + WMI_VDEV_PARAM_BMISS_COUNT_MAX; + vdev_param[wmi_vdev_param_bmiss_first_bcnt] = + WMI_VDEV_PARAM_BMISS_FIRST_BCNT; + vdev_param[wmi_vdev_param_bmiss_final_bcnt] = + WMI_VDEV_PARAM_BMISS_FINAL_BCNT; + vdev_param[wmi_vdev_param_feature_wmm] = WMI_VDEV_PARAM_FEATURE_WMM; + vdev_param[wmi_vdev_param_chwidth] = WMI_VDEV_PARAM_CHWIDTH; + vdev_param[wmi_vdev_param_chextoffset] = WMI_VDEV_PARAM_CHEXTOFFSET; + vdev_param[wmi_vdev_param_disable_htprotection] = + WMI_VDEV_PARAM_DISABLE_HTPROTECTION; + vdev_param[wmi_vdev_param_sta_quickkickout] = + WMI_VDEV_PARAM_STA_QUICKKICKOUT; + vdev_param[wmi_vdev_param_mgmt_rate] = WMI_VDEV_PARAM_MGMT_RATE; + vdev_param[wmi_vdev_param_protection_mode] = + WMI_VDEV_PARAM_PROTECTION_MODE; + vdev_param[wmi_vdev_param_fixed_rate] = WMI_VDEV_PARAM_FIXED_RATE; + vdev_param[wmi_vdev_param_sgi] = WMI_VDEV_PARAM_SGI; + vdev_param[wmi_vdev_param_ldpc] = WMI_VDEV_PARAM_LDPC; + vdev_param[wmi_vdev_param_tx_stbc] = WMI_VDEV_PARAM_TX_STBC; + vdev_param[wmi_vdev_param_rx_stbc] = WMI_VDEV_PARAM_RX_STBC; + vdev_param[wmi_vdev_param_intra_bss_fwd] = WMI_VDEV_PARAM_INTRA_BSS_FWD; + vdev_param[wmi_vdev_param_def_keyid] = WMI_VDEV_PARAM_DEF_KEYID; + vdev_param[wmi_vdev_param_nss] = WMI_VDEV_PARAM_NSS; + vdev_param[wmi_vdev_param_bcast_data_rate] = + WMI_VDEV_PARAM_BCAST_DATA_RATE; + vdev_param[wmi_vdev_param_mcast_data_rate] = + WMI_VDEV_PARAM_MCAST_DATA_RATE; + vdev_param[wmi_vdev_param_mcast_indicate] = + WMI_VDEV_PARAM_MCAST_INDICATE; + vdev_param[wmi_vdev_param_dhcp_indicate] = WMI_VDEV_PARAM_DHCP_INDICATE; + vdev_param[wmi_vdev_param_unknown_dest_indicate] = + WMI_VDEV_PARAM_UNKNOWN_DEST_INDICATE; + vdev_param[wmi_vdev_param_ap_keepalive_min_idle_inactive_time_secs] = + WMI_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS; + vdev_param[wmi_vdev_param_ap_keepalive_max_idle_inactive_time_secs] = + WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS; + vdev_param[wmi_vdev_param_ap_keepalive_max_unresponsive_time_secs] = + WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS; + vdev_param[wmi_vdev_param_ap_enable_nawds] = + WMI_VDEV_PARAM_AP_ENABLE_NAWDS; + vdev_param[wmi_vdev_param_mcast2ucast_set] = + WMI_VDEV_PARAM_MCAST2UCAST_SET; + vdev_param[wmi_vdev_param_enable_rtscts] = WMI_VDEV_PARAM_ENABLE_RTSCTS; + vdev_param[wmi_vdev_param_rc_num_retries] = + WMI_VDEV_PARAM_RC_NUM_RETRIES; + vdev_param[wmi_vdev_param_txbf] = WMI_VDEV_PARAM_TXBF; + vdev_param[wmi_vdev_param_packet_powersave] = + WMI_VDEV_PARAM_PACKET_POWERSAVE; + vdev_param[wmi_vdev_param_drop_unencry] = WMI_VDEV_PARAM_DROP_UNENCRY; + vdev_param[wmi_vdev_param_tx_encap_type] = WMI_VDEV_PARAM_TX_ENCAP_TYPE; + vdev_param[wmi_vdev_param_ap_detect_out_of_sync_sleeping_sta_time_secs] + = WMI_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS; + vdev_param[wmi_vdev_param_cabq_maxdur] = WMI_VDEV_PARAM_CABQ_MAXDUR; + vdev_param[wmi_vdev_param_mfptest_set] = WMI_VDEV_PARAM_MFPTEST_SET; + vdev_param[wmi_vdev_param_rts_fixed_rate] = + WMI_VDEV_PARAM_RTS_FIXED_RATE; + vdev_param[wmi_vdev_param_vht_sgimask] = WMI_VDEV_PARAM_VHT_SGIMASK; + vdev_param[wmi_vdev_param_vht80_ratemask] = + WMI_VDEV_PARAM_VHT80_RATEMASK; + vdev_param[wmi_vdev_param_early_rx_adjust_enable] = + WMI_VDEV_PARAM_EARLY_RX_ADJUST_ENABLE; + vdev_param[wmi_vdev_param_early_rx_tgt_bmiss_num] = + WMI_VDEV_PARAM_EARLY_RX_TGT_BMISS_NUM; + vdev_param[wmi_vdev_param_early_rx_bmiss_sample_cycle] = + WMI_VDEV_PARAM_EARLY_RX_BMISS_SAMPLE_CYCLE; + vdev_param[wmi_vdev_param_early_rx_slop_step] = + WMI_VDEV_PARAM_EARLY_RX_SLOP_STEP; + vdev_param[wmi_vdev_param_early_rx_init_slop] = + WMI_VDEV_PARAM_EARLY_RX_INIT_SLOP; + vdev_param[wmi_vdev_param_early_rx_adjust_pause] = + WMI_VDEV_PARAM_EARLY_RX_ADJUST_PAUSE; + vdev_param[wmi_vdev_param_proxy_sta] = WMI_VDEV_PARAM_PROXY_STA; + vdev_param[wmi_vdev_param_meru_vc] = WMI_VDEV_PARAM_MERU_VC; + vdev_param[wmi_vdev_param_rx_decap_type] = WMI_VDEV_PARAM_RX_DECAP_TYPE; + vdev_param[wmi_vdev_param_bw_nss_ratemask] = + WMI_VDEV_PARAM_BW_NSS_RATEMASK; + vdev_param[wmi_vdev_param_sensor_ap] = WMI_VDEV_PARAM_SENSOR_AP; + vdev_param[wmi_vdev_param_beacon_rate] = WMI_VDEV_PARAM_BEACON_RATE; + vdev_param[wmi_vdev_param_dtim_enable_cts] = + WMI_VDEV_PARAM_DTIM_ENABLE_CTS; + vdev_param[wmi_vdev_param_sta_kickout] = WMI_VDEV_PARAM_STA_KICKOUT; + vdev_param[wmi_vdev_param_capabilities] = + WMI_VDEV_PARAM_CAPABILITIES; + vdev_param[wmi_vdev_param_mgmt_tx_power] = WMI_VDEV_PARAM_MGMT_TX_POWER; + vdev_param[wmi_vdev_param_tx_power] = WMI_VDEV_PARAM_TX_POWER; + vdev_param[wmi_vdev_param_atf_ssid_sched_policy] = + WMI_VDEV_PARAM_ATF_SSID_SCHED_POLICY; + vdev_param[wmi_vdev_param_disable_dyn_bw_rts] = + WMI_VDEV_PARAM_DISABLE_DYN_BW_RTS; + vdev_param[wmi_vdev_param_ampdu_subframe_size_per_ac] = + WMI_VDEV_PARAM_AMPDU_SUBFRAME_SIZE_PER_AC; + vdev_param[wmi_vdev_param_disable_cabq] = + WMI_VDEV_PARAM_DISABLE_CABQ; + vdev_param[wmi_vdev_param_amsdu_subframe_size_per_ac] = + WMI_VDEV_PARAM_AMSDU_SUBFRAME_SIZE_PER_AC; +} +#endif + +/** + * wmi_get_non_tlv_ops() - gives pointer to wmi tlv ops + * + * Return: pointer to wmi tlv ops + */ +void wmi_non_tlv_attach(struct wmi_unified *wmi_handle) +{ +#if defined(WMI_NON_TLV_SUPPORT) || defined(WMI_TLV_AND_NON_TLV_SUPPORT) + wmi_handle->ops = &non_tlv_ops; + wmi_handle->soc->svc_ids = &svc_ids[0]; + populate_non_tlv_service(wmi_handle->services); + populate_non_tlv_events_id(wmi_handle->wmi_events); + populate_pdev_param_non_tlv(wmi_handle->pdev_param); + populate_vdev_param_non_tlv(wmi_handle->vdev_param); + +#ifdef WMI_INTERFACE_EVENT_LOGGING + wmi_handle->log_info.buf_offset_command = 0; + wmi_handle->log_info.buf_offset_event = 0; + /*(uint8 *)(*wmi_id_to_name)(uint32_t cmd_id);*/ +#endif +#else + qdf_print("%s: Not supported\n", __func__); +#endif +} +qdf_export_symbol(wmi_non_tlv_attach); + +/** + * wmi_non_tlv_init() - Initialize WMI NON TLV module by registering Non TLV + * attach routine. + * + * Return: None + */ +void wmi_non_tlv_init(void) +{ + wmi_unified_register_module(WMI_NON_TLV_TARGET, &wmi_non_tlv_attach); +} diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_ocb_ut.c b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_ocb_ut.c new file mode 100644 index 0000000000000000000000000000000000000000..0cadf117c790c64bef92f2a48a6bbb77b55ed54b --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_ocb_ut.c @@ -0,0 +1,515 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "wmi_unified_api.h" +#include "wmi.h" +#include "wmi_version.h" +#include "wmi_unified_priv.h" +#include +#include "target_if.h" +#include "wma.h" +#include "wlan_ocb_ucfg_api.h" +#include "wlan_ocb_main.h" + +void wmi_ocb_ut_attach(struct wmi_unified *wmi_handle); + +static inline struct wlan_ocb_rx_ops * +target_if_ocb_get_rx_ops(struct wlan_objmgr_psoc *psoc) +{ + struct wlan_objmgr_pdev *pdev; + struct ocb_pdev_obj *pdev_obj; + + pdev = wlan_objmgr_get_pdev_by_id(psoc, 0, + WLAN_OCB_SB_ID); + pdev_obj = (struct ocb_pdev_obj *) + wlan_objmgr_pdev_get_comp_private_obj(pdev, + WLAN_UMAC_COMP_OCB); + return &pdev_obj->ocb_rxops; +} + +/** + * fake_vdev_create_cmd_tlv() - send VDEV create command to fw + * @wmi_handle: wmi handle + * @param: pointer to hold vdev create parameter + * @macaddr: vdev mac address + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS fake_vdev_create_cmd_tlv(wmi_unified_t wmi_handle, + uint8_t macaddr[IEEE80211_ADDR_LEN], + struct vdev_create_params *param) +{ + WMI_LOGP("%s : called", __func__); + return QDF_STATUS_SUCCESS; +} + +/** + * fake_vdev_delete_cmd_tlv() - send VDEV delete command to fw + * @wmi_handle: wmi handle + * @if_id: vdev id + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS fake_vdev_delete_cmd_tlv(wmi_unified_t wmi_handle, + uint8_t if_id) +{ + WMI_LOGP("%s : called", __func__); + return QDF_STATUS_SUCCESS; +} + +/** + * fake_ocb_set_utc_time_cmd_tlv() - send the UTC time to the firmware + * @wmi_handle: pointer to the wmi handle + * @utc: pointer to the UTC time struct + * + * Return: 0 on succes + */ +static QDF_STATUS fake_ocb_set_utc_time_cmd_tlv(wmi_unified_t wmi_handle, + struct ocb_utc_param *utc) +{ + WMI_LOGP("%s : called", __func__); + return QDF_STATUS_SUCCESS; +} + +/** + * fake_ocb_get_tsf_timer_cmd_tlv() - get ocb tsf timer val + * @wmi_handle: pointer to the wmi handle + * @request: pointer to the request + * + * Return: 0 on succes + */ +static QDF_STATUS fake_ocb_get_tsf_timer_cmd_tlv(wmi_unified_t wmi_handle, + uint8_t vdev_id) +{ + QDF_STATUS status; + struct wlan_objmgr_psoc *psoc; + struct wlan_ocb_rx_ops *ocb_rx_ops; + struct ocb_get_tsf_timer_response response; + ol_scn_t scn = (ol_scn_t) wmi_handle->scn_handle; + + WMI_LOGP("%s : called", __func__); + psoc = target_if_get_psoc_from_scn_hdl(scn); + if (!psoc) { + WMI_LOGP("null psoc"); + return -EINVAL; + } + response.vdev_id = vdev_id; + response.timer_high = 0x1234; + response.timer_low = 0x5678; + + ocb_rx_ops = target_if_ocb_get_rx_ops(psoc); + if (ocb_rx_ops->ocb_tsf_timer) { + status = ocb_rx_ops->ocb_tsf_timer(psoc, &response); + if (status != QDF_STATUS_SUCCESS) { + WMI_LOGP("ocb_tsf_timer failed."); + return -EINVAL; + } + } else { + WMI_LOGP("No ocb_tsf_timer callback"); + return -EINVAL; + } + return QDF_STATUS_SUCCESS; +} + +/** + * fake_dcc_clear_stats_cmd_tlv() - command to clear the DCC stats + * @wmi_handle: pointer to the wmi handle + * @vdev_id: vdev id + * @dcc_stats_bitmap: dcc status bitmap + * + * Return: 0 on succes + */ +static QDF_STATUS fake_dcc_clear_stats_cmd_tlv(wmi_unified_t wmi_handle, + uint32_t vdev_id, uint32_t dcc_stats_bitmap) +{ + WMI_LOGP("%s : called", __func__); + return QDF_STATUS_SUCCESS; +} + +wmi_dcc_ndl_stats_per_channel chan1_info[2] = { + [0] = {.chan_info = 5860, + .tx_power_datarate = 23 | (10 << 8), + .carrier_sense_est_comm_range = 107 | (198 << 13), + .dcc_stats = 78 | (56 << 8) | (345 << 16), + .packet_stats = 1278 | (789 << 14), + .channel_busy_time = 1389, + }, + [1] = {.chan_info = 5880, + .tx_power_datarate = 53 | (17 << 8), + .carrier_sense_est_comm_range = 137 | (198 << 13), + .dcc_stats = 78 | (66 << 8) | (245 << 16), + .packet_stats = 1278 | (889 << 14), + .channel_busy_time = 2389, + }, +}; + +/** + * fake_dcc_get_stats_cmd_tlv() - get the DCC channel stats + * @wmi_handle: pointer to the wmi handle + * @get_stats_param: pointer to the dcc stats + * + * Return: 0 on succes + */ +static QDF_STATUS fake_dcc_get_stats_cmd_tlv(wmi_unified_t wmi_handle, + struct ocb_dcc_get_stats_param *get_stats_param) +{ + QDF_STATUS status; + struct wlan_objmgr_psoc *psoc; + struct wlan_ocb_rx_ops *ocb_rx_ops; + ol_scn_t scn = (ol_scn_t) wmi_handle->scn_handle; + struct ocb_dcc_get_stats_response *response; + WMI_LOGP("%s : called", __func__); + + psoc = target_if_get_psoc_from_scn_hdl(scn); + if (!psoc) { + WMI_LOGP("null psoc"); + return -EINVAL; + } + response = qdf_mem_malloc(sizeof(*response) + 2 * + sizeof(wmi_dcc_ndl_stats_per_channel)); + response->num_channels = 2; + response->channel_stats_array_len = 2 * + sizeof(wmi_dcc_ndl_stats_per_channel); + response->vdev_id = get_stats_param->vdev_id; + response->channel_stats_array = (uint8_t *)response + sizeof(*response); + qdf_mem_copy(response->channel_stats_array, + &chan1_info, + 2 * sizeof(wmi_dcc_ndl_stats_per_channel)); + WMI_LOGP("channel1 freq %d, channel2 freq %d", chan1_info[0].chan_info, + chan1_info[1].chan_info); + ocb_rx_ops = target_if_ocb_get_rx_ops(psoc); + if (ocb_rx_ops->ocb_dcc_stats_indicate) { + status = ocb_rx_ops->ocb_dcc_stats_indicate(psoc, + response, true); + if (status != QDF_STATUS_SUCCESS) { + WMI_LOGP("dcc_stats_indicate failed."); + status = -EINVAL; + } else { + status = 0; + } + } else { + WMI_LOGP("No dcc_stats_indicate callback"); + status = -EINVAL; + } + + qdf_mem_free(response); + return QDF_STATUS_SUCCESS; +} + +/** + * fake_dcc_update_ndl_cmd_tlv() - command to update the NDL data + * @wmi_handle: pointer to the wmi handle + * @update_ndl_param: pointer to the request parameters + * + * Return: 0 on success + */ +static QDF_STATUS fake_dcc_update_ndl_cmd_tlv(wmi_unified_t wmi_handle, + struct ocb_dcc_update_ndl_param *update_ndl_param) +{ + QDF_STATUS status; + struct wlan_objmgr_psoc *psoc; + struct wlan_ocb_rx_ops *ocb_rx_ops; + ol_scn_t scn = (ol_scn_t) wmi_handle->scn_handle; + struct ocb_dcc_update_ndl_response *resp; + WMI_LOGP("%s : called", __func__); + /* Allocate and populate the response */ + resp = qdf_mem_malloc(sizeof(*resp)); + if (!resp) { + WMI_LOGP("%s:Error allocating memory for the response.", + __func__); + return -ENOMEM; + } + resp->vdev_id = update_ndl_param->vdev_id; + resp->status = 0; + + psoc = target_if_get_psoc_from_scn_hdl(scn); + if (!psoc) { + WMI_LOGP("null psoc"); + return -EINVAL; + } + + ocb_rx_ops = target_if_ocb_get_rx_ops(psoc); + if (ocb_rx_ops->ocb_dcc_ndl_update) { + status = ocb_rx_ops->ocb_dcc_ndl_update(psoc, resp); + if (status != QDF_STATUS_SUCCESS) { + WMI_LOGP("dcc_ndl_update failed."); + status = -EINVAL; + } else { + status = 0; + } + } else { + WMI_LOGP("No dcc_ndl_update callback"); + status = -EINVAL; + } + + qdf_mem_free(resp); + return QDF_STATUS_SUCCESS; +} + +/** + * fake_ocb_set_config_cmd_tlv() - send the OCB config to the FW + * @wmi_handle: pointer to the wmi handle + * @config: the OCB configuration + * + * Return: 0 on success + */ +static QDF_STATUS fake_ocb_set_config_cmd_tlv(wmi_unified_t wmi_handle, + struct ocb_config *config) +{ + u32 i; + QDF_STATUS status; + struct wlan_objmgr_psoc *psoc; + struct wlan_ocb_rx_ops *ocb_rx_ops; + ol_scn_t scn = (ol_scn_t) wmi_handle->scn_handle; + + WMI_LOGP("%s : called", __func__); + WMI_LOGI("%s: vdev_id=%d, channel_count=%d, schedule_size=%d, flag=%x", + __func__, config->vdev_id, config->channel_count, + config->schedule_size, config->flags); + + for (i = 0; i < config->channel_count; i++) { + WMI_LOGI("%s: channel info for channel %d" + " chan_freq=%d, bandwidth=%d, " QDF_MAC_ADDRESS_STR + " max_pwr=%d, min_pwr=%d, reg_pwr=%d, antenna_max=%d, " + "flags=%d", __func__, i, config->channels[i].chan_freq, + config->channels[i].bandwidth, + QDF_MAC_ADDR_ARRAY( + config->channels[i].mac_address.bytes), + config->channels[i].max_pwr, + config->channels[i].min_pwr, + config->channels[i].reg_pwr, + config->channels[i].antenna_max, + config->channels[i].flags); + } + + for (i = 0; i < config->schedule_size; i++) { + WMI_LOGI("%s: schedule info for channel %d: " + "chan_fre=%d, total_duration=%d, guard_intreval=%d", + __func__, i, config->schedule[i].chan_freq, + config->schedule[i].total_duration, + config->schedule[i].guard_interval); + } + psoc = target_if_get_psoc_from_scn_hdl(scn); + if (!psoc) { + WMI_LOGP("null psoc"); + return -EINVAL; + } + + ocb_rx_ops = target_if_ocb_get_rx_ops(psoc); + if (ocb_rx_ops->ocb_set_config_status) { + status = ocb_rx_ops->ocb_set_config_status(psoc, 0); + if (status != QDF_STATUS_SUCCESS) { + WMI_LOGP("ocb_set_config_status failed."); + return -EINVAL; + } + } else { + WMI_LOGP("No ocb_set_config_status callback"); + return -EINVAL; + } + return QDF_STATUS_SUCCESS; +} + +/** + * fake_ocb_stop_timing_advert_cmd_tlv() - stop sending the + * timing advertisement frames on a channel + * @wmi_handle: pointer to the wmi handle + * @timing_advert: pointer to the timing advertisement struct + * + * Return: 0 on succes + */ +static QDF_STATUS fake_ocb_stop_timing_advert_cmd_tlv(wmi_unified_t wmi_handle, + struct ocb_timing_advert_param *timing_advert) +{ + WMI_LOGP("%s : called", __func__); + return QDF_STATUS_SUCCESS; +} + +/** + * fake_ocb_start_timing_advert_cmd_tlv() - start sending the + * timing advertisement frames on a channel + * @wmi_handle: pointer to the wmi handle + * @timing_advert: pointer to the timing advertisement struct + * + * Return: 0 on succes + */ +static QDF_STATUS +fake_ocb_start_timing_advert_cmd_tlv(wmi_unified_t wmi_handle, + struct ocb_timing_advert_param *timing_advert) +{ + WMI_LOGP("%s : called", __func__); + return QDF_STATUS_SUCCESS; +} + +/** + * fake_peer_create_cmd_tlv() - send peer create command to fw + * @wmi: wmi handle + * @peer_addr: peer mac address + * @peer_type: peer type + * @vdev_id: vdev id + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS fake_peer_create_cmd_tlv(wmi_unified_t wmi, + struct peer_create_params *param) +{ + WMI_LOGP("%s : called", __func__); + return QDF_STATUS_SUCCESS; +} + +/** + * fake_peer_delete_cmd_tlv() - send PEER delete command to fw + * @wmi: wmi handle + * @peer_addr: peer mac addr + * @vdev_id: vdev id + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS fake_peer_delete_cmd_tlv(wmi_unified_t wmi, + uint8_t peer_addr[IEEE80211_ADDR_LEN], + uint8_t vdev_id) +{ + WMI_LOGP("%s : called", __func__); + return QDF_STATUS_SUCCESS; +} + +/** + * fake_vdev_start_cmd_tlv() - send vdev start request to fw + * @wmi_handle: wmi handle + * @req: vdev start params + * + * Return: QDF status + */ +static QDF_STATUS fake_vdev_start_cmd_tlv(wmi_unified_t wmi_handle, + struct vdev_start_params *req) +{ + tp_wma_handle wma = (tp_wma_handle) wmi_handle->scn_handle; + WMI_LOGP("%s : called", __func__); + WMI_LOGI("%s: vdev_id %d freq %d chanmode %d ch_info is_dfs %d " + "beacon interval %d dtim %d center_chan %d center_freq2 %d " + "max_txpow: 0x%x " + "Tx SS %d, Rx SS %d, ldpc_rx: %d, cac %d, regd %d, HE ops: %d", + __func__, (int)req->vdev_id, req->chan_freq, req->chan_mode, + (int)req->is_dfs, req->beacon_intval, req->dtim_period, + req->band_center_freq1, req->band_center_freq2, + req->max_txpow, + req->preferred_tx_streams, req->preferred_rx_streams, + (int)req->ldpc_rx_enabled, req->cac_duration_ms, + req->regdomain, req->he_ops); + wma_remove_vdev_req(wma, req->vdev_id, + WMA_TARGET_REQ_TYPE_VDEV_START); + wma_vdev_set_mlme_state(wma, req->vdev_id, + WLAN_VDEV_S_RUN); + ucfg_ocb_config_channel(wma->pdev); + return QDF_STATUS_SUCCESS; +} + +/** + * fake_vdev_down_cmd_tlv() - send vdev down command to fw + * @wmi: wmi handle + * @vdev_id: vdev id + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS fake_vdev_down_cmd_tlv(wmi_unified_t wmi, uint8_t vdev_id) +{ + WMI_LOGP("%s : called", __func__); + return QDF_STATUS_SUCCESS; +} + +/** + * fake_vdev_set_param_cmd_tlv() - WMI vdev set parameter function + * @param wmi_handle : handle to WMI. + * @param macaddr : MAC address + * @param param : pointer to hold vdev set parameter + * + * Return: 0 on success and -ve on failure. + */ +static QDF_STATUS fake_vdev_set_param_cmd_tlv(wmi_unified_t wmi_handle, + struct vdev_set_params *param) +{ + WMI_LOGP("%s : called", __func__); + return QDF_STATUS_SUCCESS; +} + +/** + * fake_set_enable_disable_mcc_adaptive_scheduler_cmd_tlv() - + * faked API to enable/disable mcc scheduler + * @wmi_handle: wmi handle + * @mcc_adaptive_scheduler: enable/disable + * + * This function enable/disable mcc adaptive scheduler in fw. + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS fake_set_enable_disable_mcc_adaptive_scheduler_cmd_tlv( + wmi_unified_t wmi_handle, uint32_t mcc_adaptive_scheduler, + uint32_t pdev_id) +{ + WMI_LOGP("%s : called", __func__); + return QDF_STATUS_SUCCESS; +} + +/* + * fake_process_set_ie_info_cmd_tlv() - Function to send IE info to firmware + * @wmi_handle: Pointer to WMi handle + * @ie_data: Pointer for ie data + * + * This function sends IE information to firmware + * + * Return: QDF_STATUS_SUCCESS for success otherwise failure + * + */ +static QDF_STATUS fake_process_set_ie_info_cmd_tlv(wmi_unified_t wmi_handle, + struct vdev_ie_info_param *ie_info) +{ + WMI_LOGP("%s : called", __func__); + return QDF_STATUS_SUCCESS; +} + +void wmi_ocb_ut_attach(struct wmi_unified *wmi_handle) +{ + struct wmi_ops *wmi_ops; + + if (!wmi_handle) { + WMI_LOGP("%s: null wmi handle", __func__); + return; + } + + wmi_ops = wmi_handle->ops; + wmi_ops->send_vdev_create_cmd = fake_vdev_create_cmd_tlv; + wmi_ops->send_vdev_delete_cmd = fake_vdev_delete_cmd_tlv; + wmi_ops->send_vdev_down_cmd = fake_vdev_down_cmd_tlv; + wmi_ops->send_vdev_start_cmd = fake_vdev_start_cmd_tlv; + wmi_ops->send_peer_create_cmd = fake_peer_create_cmd_tlv; + wmi_ops->send_peer_delete_cmd = fake_peer_delete_cmd_tlv; + wmi_ops->send_vdev_set_param_cmd = fake_vdev_set_param_cmd_tlv; + wmi_ops->send_ocb_set_utc_time_cmd = fake_ocb_set_utc_time_cmd_tlv; + wmi_ops->send_ocb_get_tsf_timer_cmd = fake_ocb_get_tsf_timer_cmd_tlv; + wmi_ops->send_dcc_clear_stats_cmd = fake_dcc_clear_stats_cmd_tlv; + wmi_ops->send_dcc_get_stats_cmd = fake_dcc_get_stats_cmd_tlv; + wmi_ops->send_dcc_update_ndl_cmd = fake_dcc_update_ndl_cmd_tlv; + wmi_ops->send_ocb_set_config_cmd = fake_ocb_set_config_cmd_tlv; + wmi_ops->send_ocb_stop_timing_advert_cmd = + fake_ocb_stop_timing_advert_cmd_tlv; + wmi_ops->send_ocb_start_timing_advert_cmd = + fake_ocb_start_timing_advert_cmd_tlv; + wmi_ops->send_set_enable_disable_mcc_adaptive_scheduler_cmd = + fake_set_enable_disable_mcc_adaptive_scheduler_cmd_tlv; + wmi_ops->send_process_set_ie_info_cmd = + fake_process_set_ie_info_cmd_tlv; +} diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_pmo_api.c b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_pmo_api.c new file mode 100644 index 0000000000000000000000000000000000000000..852cfe1e9583e9d353067f31ac0508d9fda10590 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_pmo_api.c @@ -0,0 +1,282 @@ +/* + * Copyright (c) 2017 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ +/** + * DOC: Implement API's specific to PMO component. + */ + +#include "ol_if_athvar.h" +#include "ol_defines.h" +#include "wmi_unified_priv.h" +#include "wmi_unified_pmo_api.h" +#include "wlan_pmo_hw_filter_public_struct.h" + +QDF_STATUS wmi_unified_add_wow_wakeup_event_cmd(void *wmi_hdl, + uint32_t vdev_id, + uint32_t *bitmap, + bool enable) +{ + struct wmi_unified *wmi_handle = (struct wmi_unified *) wmi_hdl; + + if (wmi_handle->ops->send_add_wow_wakeup_event_cmd) + return wmi_handle->ops->send_add_wow_wakeup_event_cmd( + wmi_handle, vdev_id, bitmap, enable); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_wow_patterns_to_fw_cmd(void *wmi_hdl, + uint8_t vdev_id, uint8_t ptrn_id, + const uint8_t *ptrn, uint8_t ptrn_len, + uint8_t ptrn_offset, const uint8_t *mask, + uint8_t mask_len, bool user, + uint8_t default_patterns) +{ + struct wmi_unified *wmi_handle = (struct wmi_unified *) wmi_hdl; + + if (wmi_handle->ops->send_wow_patterns_to_fw_cmd) + return wmi_handle->ops->send_wow_patterns_to_fw_cmd(wmi_handle, + vdev_id, ptrn_id, ptrn, + ptrn_len, ptrn_offset, mask, + mask_len, user, default_patterns); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_add_clear_mcbc_filter_cmd(void *wmi_hdl, + uint8_t vdev_id, + struct qdf_mac_addr multicast_addr, + bool clearList) +{ + struct wmi_unified *wmi_handle = (struct wmi_unified *)wmi_hdl; + + if (wmi_handle->ops->send_add_clear_mcbc_filter_cmd) + return wmi_handle->ops->send_add_clear_mcbc_filter_cmd( + wmi_handle, vdev_id, multicast_addr, clearList); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_multiple_add_clear_mcbc_filter_cmd(void *wmi_hdl, + uint8_t vdev_id, + struct pmo_mcast_filter_params *filter_param) +{ + struct wmi_unified *wmi_handle = (struct wmi_unified *)wmi_hdl; + + if (wmi_handle->ops->send_multiple_add_clear_mcbc_filter_cmd) + return wmi_handle->ops->send_multiple_add_clear_mcbc_filter_cmd( + wmi_handle, vdev_id, filter_param); + + return QDF_STATUS_E_FAILURE; +} + +#ifdef FEATURE_WLAN_RA_FILTERING +QDF_STATUS wmi_unified_wow_sta_ra_filter_cmd(void *wmi_hdl, + uint8_t vdev_id, uint8_t default_pattern, + uint16_t rate_limit_interval) +{ + + struct wmi_unified *wmi_handle = (struct wmi_unified *)wmi_hdl; + + if (wmi_handle->ops->send_wow_sta_ra_filter_cmd) + return wmi_handle->ops->send_wow_sta_ra_filter_cmd(wmi_handle, + vdev_id, default_pattern, rate_limit_interval); + + return QDF_STATUS_E_FAILURE; + +} +#endif /* FEATURE_WLAN_RA_FILTERING */ + +QDF_STATUS wmi_unified_enable_enhance_multicast_offload_cmd( + void *wmi_hdl, uint8_t vdev_id, bool action) +{ + struct wmi_unified *wmi_handle = (struct wmi_unified *)wmi_hdl; + struct wmi_ops *ops; + + ops = wmi_handle->ops; + if (ops && ops->send_enable_enhance_multicast_offload_cmd) + return ops->send_enable_enhance_multicast_offload_cmd( + wmi_handle, vdev_id, action); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_extract_gtk_rsp_event(void *wmi_hdl, void *evt_buf, + struct pmo_gtk_rsp_params *gtk_rsp_param, uint32_t len) +{ + struct wmi_unified *wmi_handle = (struct wmi_unified *)wmi_hdl; + + if (wmi_handle->ops->extract_gtk_rsp_event) + return wmi_handle->ops->extract_gtk_rsp_event(wmi_handle, + evt_buf, gtk_rsp_param, len); + + return QDF_STATUS_E_FAILURE; +} + + +QDF_STATUS wmi_unified_process_gtk_offload_getinfo_cmd(void *wmi_hdl, + uint8_t vdev_id, + uint64_t offload_req_opcode) +{ + struct wmi_unified *wmi_handle = (struct wmi_unified *)wmi_hdl; + + if (wmi_handle->ops->send_process_gtk_offload_getinfo_cmd) + return wmi_handle->ops->send_process_gtk_offload_getinfo_cmd( + wmi_handle, vdev_id, offload_req_opcode); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_action_frame_patterns_cmd(void *wmi_hdl, + struct pmo_action_wakeup_set_params *action_params) +{ + struct wmi_unified *wmi_handle = (struct wmi_unified *)wmi_hdl; + + if (wmi_handle->ops->send_action_frame_patterns_cmd) + return wmi_handle->ops->send_action_frame_patterns_cmd( + wmi_handle, action_params); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_send_gtk_offload_cmd(void *wmi_hdl, uint8_t vdev_id, + struct pmo_gtk_req *params, + bool enable_offload, + uint32_t gtk_offload_opcode) +{ + struct wmi_unified *wmi_handle = (struct wmi_unified *)wmi_hdl; + + if (wmi_handle->ops->send_gtk_offload_cmd) + return wmi_handle->ops->send_gtk_offload_cmd(wmi_handle, + vdev_id, params, enable_offload, + gtk_offload_opcode); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_enable_disable_packet_filter_cmd(void *wmi_hdl, + uint8_t vdev_id, bool enable) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_enable_disable_packet_filter_cmd) + return wmi_handle->ops->send_enable_disable_packet_filter_cmd( + wmi_handle, vdev_id, enable); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_config_packet_filter_cmd(void *wmi_hdl, + uint8_t vdev_id, struct pmo_rcv_pkt_fltr_cfg *rcv_filter_param, + uint8_t filter_id, bool enable) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_config_packet_filter_cmd) + return wmi_handle->ops->send_config_packet_filter_cmd( + wmi_handle, vdev_id, rcv_filter_param, filter_id, enable); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_enable_arp_ns_offload_cmd(void *wmi_hdl, + struct pmo_arp_offload_params *arp_offload_req, + struct pmo_ns_offload_params *ns_offload_req, + uint8_t vdev_id) +{ + struct wmi_unified *wmi_handle = (struct wmi_unified *)wmi_hdl; + + if (wmi_handle->ops->send_enable_arp_ns_offload_cmd) + return wmi_handle->ops->send_enable_arp_ns_offload_cmd( + wmi_handle, + arp_offload_req, ns_offload_req, vdev_id); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_conf_hw_filter_cmd(void *opaque_wmi, + struct pmo_hw_filter_params *req) +{ + struct wmi_unified *wmi = opaque_wmi; + + if (!wmi->ops->send_conf_hw_filter_cmd) + return QDF_STATUS_E_NOSUPPORT; + + return wmi->ops->send_conf_hw_filter_cmd(wmi, req); +} + +#ifdef FEATURE_WLAN_LPHB +QDF_STATUS wmi_unified_lphb_config_hbenable_cmd(void *wmi_hdl, + wmi_hb_set_enable_cmd_fixed_param *params) +{ + struct wmi_unified *wmi_handle = (struct wmi_unified *)wmi_hdl; + + if (wmi_handle->ops->send_lphb_config_hbenable_cmd) + return wmi_handle->ops->send_lphb_config_hbenable_cmd( + wmi_handle, params); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_lphb_config_tcp_params_cmd(void *wmi_hdl, + wmi_hb_set_tcp_params_cmd_fixed_param *lphb_conf_req) +{ + struct wmi_unified *wmi_handle = (struct wmi_unified *)wmi_hdl; + + if (wmi_handle->ops->send_lphb_config_tcp_params_cmd) + return wmi_handle->ops->send_lphb_config_tcp_params_cmd( + wmi_handle, lphb_conf_req); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_lphb_config_tcp_pkt_filter_cmd(void *wmi_hdl, + wmi_hb_set_tcp_pkt_filter_cmd_fixed_param *g_hb_tcp_filter_fp) +{ + struct wmi_unified *wmi_handle = (struct wmi_unified *)wmi_hdl; + + if (wmi_handle->ops->send_lphb_config_tcp_pkt_filter_cmd) + return wmi_handle->ops->send_lphb_config_tcp_pkt_filter_cmd( + wmi_handle, g_hb_tcp_filter_fp); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_lphb_config_udp_params_cmd(void *wmi_hdl, + wmi_hb_set_udp_params_cmd_fixed_param *lphb_conf_req) +{ + struct wmi_unified *wmi_handle = (struct wmi_unified *)wmi_hdl; + + if (wmi_handle->ops->send_lphb_config_udp_params_cmd) + return wmi_handle->ops->send_lphb_config_udp_params_cmd( + wmi_handle, lphb_conf_req); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_lphb_config_udp_pkt_filter_cmd(void *wmi_hdl, + wmi_hb_set_udp_pkt_filter_cmd_fixed_param *lphb_conf_req) +{ + struct wmi_unified *wmi_handle = (struct wmi_unified *)wmi_hdl; + + if (wmi_handle->ops->send_lphb_config_udp_pkt_filter_cmd) + return wmi_handle->ops->send_lphb_config_udp_pkt_filter_cmd( + wmi_handle, lphb_conf_req); + + return QDF_STATUS_E_FAILURE; +} +#endif /* FEATURE_WLAN_LPHB */ diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_reg_api.c b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_reg_api.c new file mode 100644 index 0000000000000000000000000000000000000000..67ef9b1602180cc38438e7e848f82b223435cc2e --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_reg_api.c @@ -0,0 +1,124 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: Implement API's specific to Regulatory component. + */ + +#include +#include +#include +#include +#include + +QDF_STATUS wmi_extract_reg_chan_list_update_event(void *wmi_hdl, + uint8_t *evt_buf, + struct cur_regulatory_info + *reg_info, + uint32_t len) +{ + struct wmi_unified *wmi_handle = (struct wmi_unified *)wmi_hdl; + + if (wmi_handle && wmi_handle->ops->extract_reg_chan_list_update_event) + return wmi_handle->ops->extract_reg_chan_list_update_event + (wmi_handle, + evt_buf, reg_info, len); + + return QDF_STATUS_E_FAILURE; +} +qdf_export_symbol(wmi_extract_reg_chan_list_update_event); + +/* + * wmi_unified_send_start_11d_scan_cmd() - start 11d scan + * @wmi_handle: wmi handle + * @start_11d_scan: pointer to 11d scan start req. + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +QDF_STATUS wmi_unified_send_start_11d_scan_cmd(wmi_unified_t wmi_handle, + struct reg_start_11d_scan_req *start_11d_scan) +{ + if (wmi_handle->ops->send_start_11d_scan_cmd) + return wmi_handle->ops->send_start_11d_scan_cmd(wmi_handle, + start_11d_scan); + + return QDF_STATUS_E_FAILURE; +} +qdf_export_symbol(wmi_unified_send_start_11d_scan_cmd); + +/* + * wmi_unified_send_stop_11d_scan_cmd() - stop 11d scan + * @wmi_handle: wmi handle + * @stop_11d_scan: pointer to 11d scan stop req. + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +QDF_STATUS wmi_unified_send_stop_11d_scan_cmd(wmi_unified_t wmi_handle, + struct reg_stop_11d_scan_req *stop_11d_scan) +{ + if (wmi_handle->ops->send_stop_11d_scan_cmd) + return wmi_handle->ops->send_stop_11d_scan_cmd(wmi_handle, + stop_11d_scan); + + return QDF_STATUS_E_FAILURE; +} +qdf_export_symbol(wmi_unified_send_stop_11d_scan_cmd); + +QDF_STATUS wmi_extract_reg_11d_new_cc_event(void *wmi_hdl, + uint8_t *evt_buf, + struct reg_11d_new_country *reg_11d_new_cc, + uint32_t len) +{ + struct wmi_unified *wmi_handle = (struct wmi_unified *)wmi_hdl; + + if (wmi_handle && wmi_handle->ops->extract_reg_11d_new_country_event) + return wmi_handle->ops->extract_reg_11d_new_country_event( + wmi_handle, evt_buf, reg_11d_new_cc, len); + + return QDF_STATUS_E_FAILURE; +} +qdf_export_symbol(wmi_extract_reg_11d_new_cc_event); + +QDF_STATUS wmi_unified_set_user_country_code_cmd_send(void *wmi_hdl, + uint8_t pdev_id, struct cc_regdmn_s *rd) +{ + struct wmi_unified *wmi_handle = (struct wmi_unified *) wmi_hdl; + + if (wmi_handle->ops->send_user_country_code_cmd) + return wmi_handle->ops->send_user_country_code_cmd( + wmi_handle, pdev_id, rd); + + return QDF_STATUS_E_FAILURE; +} +qdf_export_symbol(wmi_unified_set_user_country_code_cmd_send); + +QDF_STATUS wmi_extract_reg_ch_avoid_event(void *wmi_hdl, + uint8_t *evt_buf, + struct ch_avoid_ind_type *ch_avoid_ind, + uint32_t len) +{ + struct wmi_unified *wmi_handle = (struct wmi_unified *)wmi_hdl; + + if (wmi_handle && wmi_handle->ops->extract_reg_ch_avoid_event) + return wmi_handle->ops->extract_reg_ch_avoid_event( + wmi_handle, evt_buf, ch_avoid_ind, len); + + return QDF_STATUS_E_FAILURE; +} +qdf_export_symbol(wmi_extract_reg_ch_avoid_event); diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_tlv.c b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_tlv.c new file mode 100644 index 0000000000000000000000000000000000000000..e8287102415d45315b83a97e6f4b055fb25d2f0e --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_tlv.c @@ -0,0 +1,24438 @@ +/* + * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "wmi_unified_api.h" +#include "wmi.h" +#include "wmi_version.h" +#include "wmi_unified_priv.h" +#include "wmi_version_whitelist.h" +#include +#include +#include +#include +#ifdef FEATURE_WLAN_APF +#include "wmi_unified_apf_tlv.h" +#endif +#ifdef WLAN_FEATURE_ACTION_OUI +#include "wmi_unified_action_oui_tlv.h" +#endif +#ifdef CONVERGED_P2P_ENABLE +#include "wlan_p2p_public_struct.h" +#endif +#ifdef WLAN_POWER_MANAGEMENT_OFFLOAD +#include "wlan_pmo_hw_filter_public_struct.h" +#endif +#include +#ifdef WLAN_SUPPORT_GREEN_AP +#include "wlan_green_ap_api.h" +#endif + +#ifdef WLAN_FEATURE_NAN_CONVERGENCE +#include "nan_public_structs.h" +#endif +#include "wmi_unified_twt_api.h" + +#ifdef WLAN_POLICY_MGR_ENABLE +#include "wlan_policy_mgr_public_struct.h" +#endif + +#ifdef FEATURE_WLAN_TDLS +#include "wlan_tdls_public_structs.h" +#endif + +/* HTC service ids for WMI for multi-radio */ +static const uint32_t multi_svc_ids[] = {WMI_CONTROL_SVC, + WMI_CONTROL_SVC_WMAC1, + WMI_CONTROL_SVC_WMAC2}; + +/** + * convert_host_pdev_id_to_target_pdev_id() - Convert pdev_id from + * host to target defines. + * @param pdev_id: host pdev_id to be converted. + * Return: target pdev_id after conversion. + */ +static uint32_t convert_host_pdev_id_to_target_pdev_id(uint32_t pdev_id) +{ + switch (pdev_id) { + case WMI_HOST_PDEV_ID_SOC: + return WMI_PDEV_ID_SOC; + case WMI_HOST_PDEV_ID_0: + return WMI_PDEV_ID_1ST; + case WMI_HOST_PDEV_ID_1: + return WMI_PDEV_ID_2ND; + case WMI_HOST_PDEV_ID_2: + return WMI_PDEV_ID_3RD; + } + + QDF_ASSERT(0); + + return WMI_PDEV_ID_SOC; +} + +/** + * convert_target_pdev_id_to_host_pdev_id() - Convert pdev_id from + * target to host defines. + * @param pdev_id: target pdev_id to be converted. + * Return: host pdev_id after conversion. + */ +static uint32_t convert_target_pdev_id_to_host_pdev_id(uint32_t pdev_id) +{ + switch (pdev_id) { + case WMI_PDEV_ID_SOC: + return WMI_HOST_PDEV_ID_SOC; + case WMI_PDEV_ID_1ST: + return WMI_HOST_PDEV_ID_0; + case WMI_PDEV_ID_2ND: + return WMI_HOST_PDEV_ID_1; + case WMI_PDEV_ID_3RD: + return WMI_HOST_PDEV_ID_2; + } + + WMI_LOGE("Invalid pdev_id"); + + return WMI_HOST_PDEV_ID_INVALID; +} + +/** + * wmi_tlv_pdev_id_conversion_enable() - Enable pdev_id conversion + * + * Return None. + */ +static void wmi_tlv_pdev_id_conversion_enable(wmi_unified_t wmi_handle) +{ + wmi_handle->ops->convert_pdev_id_host_to_target = + convert_host_pdev_id_to_target_pdev_id; + wmi_handle->ops->convert_pdev_id_target_to_host = + convert_target_pdev_id_to_host_pdev_id; +} + +/* copy_vdev_create_pdev_id() - copy pdev from host params to target command + * buffer. + * @wmi_handle: pointer to wmi_handle + * @cmd: pointer target vdev create command buffer + * @param: pointer host params for vdev create + * + * Return: None + */ +#ifdef CONFIG_MCL +static inline void copy_vdev_create_pdev_id( + struct wmi_unified *wmi_handle, + wmi_vdev_create_cmd_fixed_param * cmd, + struct vdev_create_params *param) +{ + cmd->pdev_id = WMI_PDEV_ID_SOC; +} +#else +static inline void copy_vdev_create_pdev_id( + struct wmi_unified *wmi_handle, + wmi_vdev_create_cmd_fixed_param * cmd, + struct vdev_create_params *param) +{ + cmd->pdev_id = wmi_handle->ops->convert_pdev_id_host_to_target( + param->pdev_id); +} +#endif + +/** + * wmi_mtrace() - Wrappper function for qdf_mtrace api + * @message_id: 32-Bit Wmi message ID + * @vdev_id: Vdev ID + * @data: Actual message contents + * + * This function converts the 32-bit WMI message ID in 15-bit message ID + * format for qdf_mtrace as in qdf_mtrace message there are only 15 + * bits reserved for message ID. + * out of these 15-bits, 8-bits (From MSB) specifies the WMI_GRP_ID + * and remaining 7-bits specifies the actual WMI command. With this + * notation there can be maximum 256 groups and each group can have + * max 128 commands can be supported. + * + * Return: None + */ +static void wmi_mtrace(uint32_t message_id, uint16_t vdev_id, uint32_t data) +{ + uint16_t mtrace_message_id; + + mtrace_message_id = QDF_WMI_MTRACE_CMD_ID(message_id) | + (QDF_WMI_MTRACE_GRP_ID(message_id) << + QDF_WMI_MTRACE_CMD_NUM_BITS); + qdf_mtrace(QDF_MODULE_ID_WMI, QDF_MODULE_ID_TARGET, + mtrace_message_id, vdev_id, data); +} + +/** + * send_vdev_create_cmd_tlv() - send VDEV create command to fw + * @wmi_handle: wmi handle + * @param: pointer to hold vdev create parameter + * @macaddr: vdev mac address + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS send_vdev_create_cmd_tlv(wmi_unified_t wmi_handle, + uint8_t macaddr[IEEE80211_ADDR_LEN], + struct vdev_create_params *param) +{ + wmi_vdev_create_cmd_fixed_param *cmd; + wmi_buf_t buf; + int32_t len = sizeof(*cmd); + QDF_STATUS ret; + int num_bands = 2; + uint8_t *buf_ptr; + wmi_vdev_txrx_streams *txrx_streams; + + len += (num_bands * sizeof(*txrx_streams) + WMI_TLV_HDR_SIZE); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGP("%s:wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_NOMEM; + } + cmd = (wmi_vdev_create_cmd_fixed_param *) wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_vdev_create_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_vdev_create_cmd_fixed_param)); + cmd->vdev_id = param->if_id; + cmd->vdev_type = param->type; + cmd->vdev_subtype = param->subtype; + cmd->num_cfg_txrx_streams = num_bands; + copy_vdev_create_pdev_id(wmi_handle, cmd, param); + WMI_CHAR_ARRAY_TO_MAC_ADDR(macaddr, &cmd->vdev_macaddr); + WMI_LOGD("%s: ID = %d[pdev:%d] VAP Addr = %02x:%02x:%02x:%02x:%02x:%02x", + __func__, param->if_id, cmd->pdev_id, + macaddr[0], macaddr[1], macaddr[2], + macaddr[3], macaddr[4], macaddr[5]); + buf_ptr = (uint8_t *)cmd + sizeof(*cmd); + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, + (num_bands * sizeof(wmi_vdev_txrx_streams))); + buf_ptr += WMI_TLV_HDR_SIZE; + + WMI_LOGD("%s: type %d, subtype %d, nss_2g %d, nss_5g %d", __func__, + param->type, param->subtype, + param->nss_2g, param->nss_5g); + txrx_streams = (wmi_vdev_txrx_streams *)buf_ptr; + txrx_streams->band = WMI_TPC_CHAINMASK_CONFIG_BAND_2G; + txrx_streams->supported_tx_streams = param->nss_2g; + txrx_streams->supported_rx_streams = param->nss_2g; + WMITLV_SET_HDR(&txrx_streams->tlv_header, + WMITLV_TAG_STRUC_wmi_vdev_txrx_streams, + WMITLV_GET_STRUCT_TLVLEN(wmi_vdev_txrx_streams)); + + txrx_streams++; + txrx_streams->band = WMI_TPC_CHAINMASK_CONFIG_BAND_5G; + txrx_streams->supported_tx_streams = param->nss_5g; + txrx_streams->supported_rx_streams = param->nss_5g; + WMITLV_SET_HDR(&txrx_streams->tlv_header, + WMITLV_TAG_STRUC_wmi_vdev_txrx_streams, + WMITLV_GET_STRUCT_TLVLEN(wmi_vdev_txrx_streams)); + wmi_mtrace(WMI_VDEV_CREATE_CMDID, cmd->vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, WMI_VDEV_CREATE_CMDID); + if (QDF_IS_STATUS_ERROR(ret)) { + WMI_LOGE("Failed to send WMI_VDEV_CREATE_CMDID"); + wmi_buf_free(buf); + } + + return ret; +} + +/** + * send_vdev_delete_cmd_tlv() - send VDEV delete command to fw + * @wmi_handle: wmi handle + * @if_id: vdev id + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS send_vdev_delete_cmd_tlv(wmi_unified_t wmi_handle, + uint8_t if_id) +{ + wmi_vdev_delete_cmd_fixed_param *cmd; + wmi_buf_t buf; + QDF_STATUS ret; + + buf = wmi_buf_alloc(wmi_handle, sizeof(*cmd)); + if (!buf) { + WMI_LOGP("%s:wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_NOMEM; + } + + cmd = (wmi_vdev_delete_cmd_fixed_param *) wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_vdev_delete_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_vdev_delete_cmd_fixed_param)); + cmd->vdev_id = if_id; + wmi_mtrace(WMI_VDEV_DELETE_CMDID, cmd->vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, + sizeof(wmi_vdev_delete_cmd_fixed_param), + WMI_VDEV_DELETE_CMDID); + if (QDF_IS_STATUS_ERROR(ret)) { + WMI_LOGE("Failed to send WMI_VDEV_DELETE_CMDID"); + wmi_buf_free(buf); + } + WMI_LOGD("%s:vdev id = %d", __func__, if_id); + + return ret; +} + +/** + * send_vdev_nss_chain_params_cmd_tlv() - send VDEV nss chain params to fw + * @wmi_handle: wmi handle + * @vdev_id: vdev id + * @nss_chains_user_cfg: user configured nss chain params + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS +send_vdev_nss_chain_params_cmd_tlv(wmi_unified_t wmi_handle, + uint8_t vdev_id, + struct mlme_nss_chains *user_cfg) +{ + wmi_vdev_chainmask_config_cmd_fixed_param *cmd; + wmi_buf_t buf; + QDF_STATUS ret; + + buf = wmi_buf_alloc(wmi_handle, sizeof(*cmd)); + if (!buf) { + WMI_LOGP("%s:wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_NOMEM; + } + + cmd = (wmi_vdev_chainmask_config_cmd_fixed_param *)wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_vdev_chainmask_config_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_vdev_chainmask_config_cmd_fixed_param)); + cmd->vdev_id = vdev_id; + cmd->disable_rx_mrc_2g = user_cfg->disable_rx_mrc[NSS_CHAINS_BAND_2GHZ]; + cmd->disable_tx_mrc_2g = user_cfg->disable_tx_mrc[NSS_CHAINS_BAND_2GHZ]; + cmd->disable_rx_mrc_5g = user_cfg->disable_rx_mrc[NSS_CHAINS_BAND_5GHZ]; + cmd->disable_tx_mrc_5g = user_cfg->disable_tx_mrc[NSS_CHAINS_BAND_5GHZ]; + cmd->num_rx_chains_2g = user_cfg->num_rx_chains[NSS_CHAINS_BAND_2GHZ]; + cmd->num_tx_chains_2g = user_cfg->num_tx_chains[NSS_CHAINS_BAND_2GHZ]; + cmd->num_rx_chains_5g = user_cfg->num_rx_chains[NSS_CHAINS_BAND_5GHZ]; + cmd->num_tx_chains_5g = user_cfg->num_tx_chains[NSS_CHAINS_BAND_5GHZ]; + cmd->rx_nss_2g = user_cfg->rx_nss[NSS_CHAINS_BAND_2GHZ]; + cmd->tx_nss_2g = user_cfg->tx_nss[NSS_CHAINS_BAND_2GHZ]; + cmd->rx_nss_5g = user_cfg->rx_nss[NSS_CHAINS_BAND_5GHZ]; + cmd->tx_nss_5g = user_cfg->tx_nss[NSS_CHAINS_BAND_5GHZ]; + cmd->num_tx_chains_a = user_cfg->num_tx_chains_11a; + cmd->num_tx_chains_b = user_cfg->num_tx_chains_11b; + cmd->num_tx_chains_g = user_cfg->num_tx_chains_11g; + + wmi_mtrace(WMI_VDEV_CHAINMASK_CONFIG_CMDID, cmd->vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, + sizeof(wmi_vdev_chainmask_config_cmd_fixed_param), + WMI_VDEV_CHAINMASK_CONFIG_CMDID); + if (QDF_IS_STATUS_ERROR(ret)) { + WMI_LOGE("Failed to send WMI_VDEV_CHAINMASK_CONFIG_CMDID"); + wmi_buf_free(buf); + } + WMI_LOGD("%s: vdev_id %d", __func__, vdev_id); + + return ret; +} + +/** + * send_vdev_stop_cmd_tlv() - send vdev stop command to fw + * @wmi: wmi handle + * @vdev_id: vdev id + * + * Return: QDF_STATUS_SUCCESS for success or erro code + */ +static QDF_STATUS send_vdev_stop_cmd_tlv(wmi_unified_t wmi, + uint8_t vdev_id) +{ + wmi_vdev_stop_cmd_fixed_param *cmd; + wmi_buf_t buf; + int32_t len = sizeof(*cmd); + + buf = wmi_buf_alloc(wmi, len); + if (!buf) { + WMI_LOGP("%s : wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_NOMEM; + } + cmd = (wmi_vdev_stop_cmd_fixed_param *) wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_vdev_stop_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN(wmi_vdev_stop_cmd_fixed_param)); + cmd->vdev_id = vdev_id; + wmi_mtrace(WMI_VDEV_STOP_CMDID, cmd->vdev_id, 0); + if (wmi_unified_cmd_send(wmi, buf, len, WMI_VDEV_STOP_CMDID)) { + WMI_LOGP("%s: Failed to send vdev stop command", __func__); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + WMI_LOGD("%s:vdev id = %d", __func__, vdev_id); + + return 0; +} + +/** + * send_vdev_down_cmd_tlv() - send vdev down command to fw + * @wmi: wmi handle + * @vdev_id: vdev id + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS send_vdev_down_cmd_tlv(wmi_unified_t wmi, uint8_t vdev_id) +{ + wmi_vdev_down_cmd_fixed_param *cmd; + wmi_buf_t buf; + int32_t len = sizeof(*cmd); + + buf = wmi_buf_alloc(wmi, len); + if (!buf) { + WMI_LOGP("%s : wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_NOMEM; + } + cmd = (wmi_vdev_down_cmd_fixed_param *) wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_vdev_down_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN(wmi_vdev_down_cmd_fixed_param)); + cmd->vdev_id = vdev_id; + wmi_mtrace(WMI_VDEV_DOWN_CMDID, cmd->vdev_id, 0); + if (wmi_unified_cmd_send(wmi, buf, len, WMI_VDEV_DOWN_CMDID)) { + WMI_LOGP("%s: Failed to send vdev down", __func__); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + WMI_LOGD("%s: vdev_id %d", __func__, vdev_id); + + return 0; +} + +#ifdef CONFIG_MCL +static inline void copy_channel_info( + wmi_vdev_start_request_cmd_fixed_param * cmd, + wmi_channel *chan, + struct vdev_start_params *req) +{ + chan->mhz = req->chan_freq; + + WMI_SET_CHANNEL_MODE(chan, req->chan_mode); + + chan->band_center_freq1 = req->band_center_freq1; + chan->band_center_freq2 = req->band_center_freq2; + + if (req->is_half_rate) + WMI_SET_CHANNEL_FLAG(chan, WMI_CHAN_FLAG_HALF_RATE); + else if (req->is_quarter_rate) + WMI_SET_CHANNEL_FLAG(chan, WMI_CHAN_FLAG_QUARTER_RATE); + + if (req->is_dfs && req->flag_dfs) { + WMI_SET_CHANNEL_FLAG(chan, req->flag_dfs); + cmd->disable_hw_ack = req->dis_hw_ack; + } + + WMI_SET_CHANNEL_REG_POWER(chan, req->max_txpow); + WMI_SET_CHANNEL_MAX_TX_POWER(chan, req->max_txpow); + +} +#else +static inline void copy_channel_info( + wmi_vdev_start_request_cmd_fixed_param * cmd, + wmi_channel *chan, + struct vdev_start_params *req) +{ + chan->mhz = req->channel.mhz; + + WMI_SET_CHANNEL_MODE(chan, req->channel.phy_mode); + + chan->band_center_freq1 = req->channel.cfreq1; + chan->band_center_freq2 = req->channel.cfreq2; + WMI_LOGI("%s: req->channel.phy_mode: %d ", req->channel.phy_mode); + + if (req->channel.half_rate) + WMI_SET_CHANNEL_FLAG(chan, WMI_CHAN_FLAG_HALF_RATE); + else if (req->channel.quarter_rate) + WMI_SET_CHANNEL_FLAG(chan, WMI_CHAN_FLAG_QUARTER_RATE); + + WMI_LOGI("%s: req->channel.dfs_set: %d ", req->channel.dfs_set); + + if (req->channel.dfs_set) { + WMI_SET_CHANNEL_FLAG(chan, WMI_CHAN_FLAG_DFS); + cmd->disable_hw_ack = req->disable_hw_ack; + } + + if (req->channel.dfs_set_cfreq2) + WMI_SET_CHANNEL_FLAG(chan, WMI_CHAN_FLAG_DFS_CFREQ2); + + /* According to firmware both reg power and max tx power + * on set channel power is used and set it to max reg + * power from regulatory. + */ + WMI_SET_CHANNEL_MIN_POWER(chan, req->channel.minpower); + WMI_SET_CHANNEL_MAX_POWER(chan, req->channel.maxpower); + WMI_SET_CHANNEL_REG_POWER(chan, req->channel.maxregpower); + WMI_SET_CHANNEL_ANTENNA_MAX(chan, req->channel.antennamax); + WMI_SET_CHANNEL_REG_CLASSID(chan, req->channel.reg_class_id); + WMI_SET_CHANNEL_MAX_TX_POWER(chan, req->channel.maxregpower); + +} +#endif +/** + * send_vdev_start_cmd_tlv() - send vdev start request to fw + * @wmi_handle: wmi handle + * @req: vdev start params + * + * Return: QDF status + */ +static QDF_STATUS send_vdev_start_cmd_tlv(wmi_unified_t wmi_handle, + struct vdev_start_params *req) +{ + wmi_vdev_start_request_cmd_fixed_param *cmd; + wmi_buf_t buf; + wmi_channel *chan; + int32_t len, ret; + uint8_t *buf_ptr; + + len = sizeof(*cmd) + sizeof(wmi_channel) + WMI_TLV_HDR_SIZE; + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s : wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_NOMEM; + } + buf_ptr = (uint8_t *) wmi_buf_data(buf); + cmd = (wmi_vdev_start_request_cmd_fixed_param *) buf_ptr; + chan = (wmi_channel *) (buf_ptr + sizeof(*cmd)); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_vdev_start_request_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_vdev_start_request_cmd_fixed_param)); + WMITLV_SET_HDR(&chan->tlv_header, WMITLV_TAG_STRUC_wmi_channel, + WMITLV_GET_STRUCT_TLVLEN(wmi_channel)); + cmd->vdev_id = req->vdev_id; + + /* Fill channel info */ + copy_channel_info(cmd, chan, req); + + cmd->beacon_interval = req->beacon_intval; + cmd->dtim_period = req->dtim_period; + + cmd->bcn_tx_rate = req->bcn_tx_rate_code; + if (req->bcn_tx_rate_code) + cmd->flags |= WMI_UNIFIED_VDEV_START_BCN_TX_RATE_PRESENT; + + if (!req->is_restart) { + if (req->pmf_enabled) + cmd->flags |= WMI_UNIFIED_VDEV_START_PMF_ENABLED; + } + + /* Copy the SSID */ + if (req->ssid.length) { + if (req->ssid.length < sizeof(cmd->ssid.ssid)) + cmd->ssid.ssid_len = req->ssid.length; + else + cmd->ssid.ssid_len = sizeof(cmd->ssid.ssid); + qdf_mem_copy(cmd->ssid.ssid, req->ssid.mac_ssid, + cmd->ssid.ssid_len); + } + + if (req->hidden_ssid) + cmd->flags |= WMI_UNIFIED_VDEV_START_HIDDEN_SSID; + + cmd->flags |= WMI_UNIFIED_VDEV_START_LDPC_RX_ENABLED; + cmd->num_noa_descriptors = req->num_noa_descriptors; + cmd->preferred_rx_streams = req->preferred_rx_streams; + cmd->preferred_tx_streams = req->preferred_tx_streams; + cmd->cac_duration_ms = req->cac_duration_ms; + cmd->regdomain = req->regdomain; + cmd->he_ops = req->he_ops; + + buf_ptr = (uint8_t *) (((uintptr_t) cmd) + sizeof(*cmd) + + sizeof(wmi_channel)); + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, + cmd->num_noa_descriptors * + sizeof(wmi_p2p_noa_descriptor)); + WMI_LOGI("%s: vdev_id %d freq %d chanmode %d ch_info: 0x%x is_dfs %d " + "beacon interval %d dtim %d center_chan %d center_freq2 %d " + "reg_info_1: 0x%x reg_info_2: 0x%x, req->max_txpow: 0x%x " + "Tx SS %d, Rx SS %d, ldpc_rx: %d, cac %d, regd %d, HE ops: %d" + "req->dis_hw_ack: %d ", __func__, req->vdev_id, + chan->mhz, req->chan_mode, chan->info, + req->is_dfs, req->beacon_intval, cmd->dtim_period, + chan->band_center_freq1, chan->band_center_freq2, + chan->reg_info_1, chan->reg_info_2, req->max_txpow, + req->preferred_tx_streams, req->preferred_rx_streams, + req->ldpc_rx_enabled, req->cac_duration_ms, + req->regdomain, req->he_ops, + req->dis_hw_ack); + + if (req->is_restart) { + wmi_mtrace(WMI_VDEV_RESTART_REQUEST_CMDID, cmd->vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_VDEV_RESTART_REQUEST_CMDID); + } else { + wmi_mtrace(WMI_VDEV_START_REQUEST_CMDID, cmd->vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_VDEV_START_REQUEST_CMDID); + } + if (ret) { + WMI_LOGP("%s: Failed to send vdev start command", __func__); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * send_hidden_ssid_vdev_restart_cmd_tlv() - restart vdev to set hidden ssid + * @wmi_handle: wmi handle + * @restart_params: vdev restart params + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS send_hidden_ssid_vdev_restart_cmd_tlv(wmi_unified_t wmi_handle, + struct hidden_ssid_vdev_restart_params *restart_params) +{ + wmi_vdev_start_request_cmd_fixed_param *cmd; + wmi_buf_t buf; + wmi_channel *chan; + int32_t len; + uint8_t *buf_ptr; + QDF_STATUS ret = 0; + + len = sizeof(*cmd) + sizeof(wmi_channel) + WMI_TLV_HDR_SIZE; + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s : wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_NOMEM; + } + buf_ptr = (uint8_t *) wmi_buf_data(buf); + cmd = (wmi_vdev_start_request_cmd_fixed_param *) buf_ptr; + chan = (wmi_channel *) (buf_ptr + sizeof(*cmd)); + + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_vdev_start_request_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_vdev_start_request_cmd_fixed_param)); + + WMITLV_SET_HDR(&chan->tlv_header, + WMITLV_TAG_STRUC_wmi_channel, + WMITLV_GET_STRUCT_TLVLEN(wmi_channel)); + + cmd->vdev_id = restart_params->session_id; + cmd->ssid.ssid_len = restart_params->ssid_len; + qdf_mem_copy(cmd->ssid.ssid, + restart_params->ssid, + cmd->ssid.ssid_len); + cmd->flags = restart_params->flags; + cmd->requestor_id = restart_params->requestor_id; + cmd->disable_hw_ack = restart_params->disable_hw_ack; + + chan->mhz = restart_params->mhz; + chan->band_center_freq1 = + restart_params->band_center_freq1; + chan->band_center_freq2 = + restart_params->band_center_freq2; + chan->info = restart_params->info; + chan->reg_info_1 = restart_params->reg_info_1; + chan->reg_info_2 = restart_params->reg_info_2; + + cmd->num_noa_descriptors = 0; + buf_ptr = (uint8_t *) (((uint8_t *) cmd) + sizeof(*cmd) + + sizeof(wmi_channel)); + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, + cmd->num_noa_descriptors * + sizeof(wmi_p2p_noa_descriptor)); + + wmi_mtrace(WMI_VDEV_RESTART_REQUEST_CMDID, cmd->vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_VDEV_RESTART_REQUEST_CMDID); + if (QDF_IS_STATUS_ERROR(ret)) { + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + return QDF_STATUS_SUCCESS; +} + + +/** + * send_peer_flush_tids_cmd_tlv() - flush peer tids packets in fw + * @wmi: wmi handle + * @peer_addr: peer mac address + * @param: pointer to hold peer flush tid parameter + * + * Return: 0 for success or error code + */ +static QDF_STATUS send_peer_flush_tids_cmd_tlv(wmi_unified_t wmi, + uint8_t peer_addr[IEEE80211_ADDR_LEN], + struct peer_flush_params *param) +{ + wmi_peer_flush_tids_cmd_fixed_param *cmd; + wmi_buf_t buf; + int32_t len = sizeof(*cmd); + + buf = wmi_buf_alloc(wmi, len); + if (!buf) { + WMI_LOGP("%s: wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_NOMEM; + } + cmd = (wmi_peer_flush_tids_cmd_fixed_param *) wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_peer_flush_tids_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_peer_flush_tids_cmd_fixed_param)); + WMI_CHAR_ARRAY_TO_MAC_ADDR(peer_addr, &cmd->peer_macaddr); + cmd->peer_tid_bitmap = param->peer_tid_bitmap; + cmd->vdev_id = param->vdev_id; + WMI_LOGD("%s: peer_addr %pM vdev_id %d and peer bitmap %d", __func__, + peer_addr, param->vdev_id, + param->peer_tid_bitmap); + wmi_mtrace(WMI_PEER_FLUSH_TIDS_CMDID, cmd->vdev_id, 0); + if (wmi_unified_cmd_send(wmi, buf, len, WMI_PEER_FLUSH_TIDS_CMDID)) { + WMI_LOGP("%s: Failed to send flush tid command", __func__); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return 0; +} + +/** + * send_peer_unmap_conf_cmd_tlv() - send PEER UNMAP conf command to fw + * @wmi: wmi handle + * @vdev_id: vdev id + * @peer_id_cnt: no. of peer ids + * @peer_id_list: list of peer ids + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS send_peer_unmap_conf_cmd_tlv(wmi_unified_t wmi, + uint8_t vdev_id, + uint32_t peer_id_cnt, + uint16_t *peer_id_list) +{ + int i; + wmi_buf_t buf; + uint8_t *buf_ptr; + A_UINT32 *peer_ids; + wmi_peer_unmap_response_cmd_fixed_param *cmd; + uint32_t peer_id_list_len; + uint32_t len = sizeof(*cmd); + QDF_STATUS status; + + if (!peer_id_cnt || !peer_id_list) + return QDF_STATUS_E_FAILURE; + + len += WMI_TLV_HDR_SIZE; + + peer_id_list_len = peer_id_cnt * sizeof(A_UINT32); + + len += peer_id_list_len; + + buf = wmi_buf_alloc(wmi, len); + + if (!buf) { + WMI_LOGP("%s: wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_NOMEM; + } + + cmd = (wmi_peer_unmap_response_cmd_fixed_param *)wmi_buf_data(buf); + buf_ptr = (uint8_t *)wmi_buf_data(buf); + + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_peer_unmap_response_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_peer_unmap_response_cmd_fixed_param)); + + buf_ptr += sizeof(wmi_peer_unmap_response_cmd_fixed_param); + + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_UINT32, + peer_id_list_len); + + peer_ids = (A_UINT32 *)(buf_ptr + WMI_TLV_HDR_SIZE); + + for (i = 0; i < peer_id_cnt; i++) + peer_ids[i] = peer_id_list[i]; + + WMI_LOGD("%s: vdev_id %d peer_id_cnt %d", __func__, + vdev_id, peer_id_cnt); + wmi_mtrace(WMI_PEER_UNMAP_RESPONSE_CMDID, vdev_id, 0); + status = wmi_unified_cmd_send(wmi, buf, len, + WMI_PEER_UNMAP_RESPONSE_CMDID); + if (QDF_IS_STATUS_ERROR(status)) { + WMI_LOGE("%s: Failed to send peer unmap conf command: Err[%d]", + __func__, status); + wmi_buf_free(buf); + return status; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * send_peer_delete_cmd_tlv() - send PEER delete command to fw + * @wmi: wmi handle + * @peer_addr: peer mac addr + * @vdev_id: vdev id + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS send_peer_delete_cmd_tlv(wmi_unified_t wmi, + uint8_t peer_addr[IEEE80211_ADDR_LEN], + uint8_t vdev_id) +{ + wmi_peer_delete_cmd_fixed_param *cmd; + wmi_buf_t buf; + int32_t len = sizeof(*cmd); + buf = wmi_buf_alloc(wmi, len); + if (!buf) { + WMI_LOGP("%s: wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_NOMEM; + } + cmd = (wmi_peer_delete_cmd_fixed_param *) wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_peer_delete_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_peer_delete_cmd_fixed_param)); + WMI_CHAR_ARRAY_TO_MAC_ADDR(peer_addr, &cmd->peer_macaddr); + cmd->vdev_id = vdev_id; + + WMI_LOGD("%s: peer_addr %pM vdev_id %d", __func__, peer_addr, vdev_id); + wmi_mtrace(WMI_PEER_DELETE_CMDID, cmd->vdev_id, 0); + if (wmi_unified_cmd_send(wmi, buf, len, WMI_PEER_DELETE_CMDID)) { + WMI_LOGP("%s: Failed to send peer delete command", __func__); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return 0; +} + +/** + * convert_host_peer_id_to_target_id_tlv - convert host peer param_id + * to target id. + * @targ_paramid: Target parameter id to hold the result. + * @peer_param_id: host param id. + * + * Return: QDF_STATUS_SUCCESS for success + * QDF_STATUS_E_NOSUPPORT when the param_id in not supported in tareget + */ +#ifdef CONFIG_MCL +static QDF_STATUS convert_host_peer_id_to_target_id_tlv( + uint32_t *targ_paramid, + uint32_t peer_param_id) +{ + *targ_paramid = peer_param_id; + return QDF_STATUS_SUCCESS; +} +#else +static QDF_STATUS convert_host_peer_id_to_target_id_tlv( + uint32_t *targ_paramid, + uint32_t peer_param_id) +{ + switch (peer_param_id) { + case WMI_HOST_PEER_MIMO_PS_STATE: + *targ_paramid = WMI_PEER_MIMO_PS_STATE; + break; + case WMI_HOST_PEER_AMPDU: + *targ_paramid = WMI_PEER_AMPDU; + break; + case WMI_HOST_PEER_AUTHORIZE: + *targ_paramid = WMI_PEER_AUTHORIZE; + break; + case WMI_HOST_PEER_CHWIDTH: + *targ_paramid = WMI_PEER_CHWIDTH; + break; + case WMI_HOST_PEER_NSS: + *targ_paramid = WMI_PEER_NSS; + break; + case WMI_HOST_PEER_USE_4ADDR: + *targ_paramid = WMI_PEER_USE_4ADDR; + break; + case WMI_HOST_PEER_MEMBERSHIP: + *targ_paramid = WMI_PEER_MEMBERSHIP; + break; + case WMI_HOST_PEER_USERPOS: + *targ_paramid = WMI_PEER_USERPOS; + break; + case WMI_HOST_PEER_CRIT_PROTO_HINT_ENABLED: + *targ_paramid = WMI_PEER_CRIT_PROTO_HINT_ENABLED; + break; + case WMI_HOST_PEER_TX_FAIL_CNT_THR: + *targ_paramid = WMI_PEER_TX_FAIL_CNT_THR; + break; + case WMI_HOST_PEER_SET_HW_RETRY_CTS2S: + *targ_paramid = WMI_PEER_SET_HW_RETRY_CTS2S; + break; + case WMI_HOST_PEER_IBSS_ATIM_WINDOW_LENGTH: + *targ_paramid = WMI_PEER_IBSS_ATIM_WINDOW_LENGTH; + break; + case WMI_HOST_PEER_PHYMODE: + *targ_paramid = WMI_PEER_PHYMODE; + break; + case WMI_HOST_PEER_USE_FIXED_PWR: + *targ_paramid = WMI_PEER_USE_FIXED_PWR; + break; + case WMI_HOST_PEER_PARAM_FIXED_RATE: + *targ_paramid = WMI_PEER_PARAM_FIXED_RATE; + break; + case WMI_HOST_PEER_SET_MU_WHITELIST: + *targ_paramid = WMI_PEER_SET_MU_WHITELIST; + break; + case WMI_HOST_PEER_SET_MAC_TX_RATE: + *targ_paramid = WMI_PEER_SET_MAX_TX_RATE; + break; + case WMI_HOST_PEER_SET_MIN_TX_RATE: + *targ_paramid = WMI_PEER_SET_MIN_TX_RATE; + break; + case WMI_HOST_PEER_SET_DEFAULT_ROUTING: + *targ_paramid = WMI_PEER_SET_DEFAULT_ROUTING; + break; + case WMI_HOST_PEER_NSS_VHT160: + *targ_paramid = WMI_PEER_NSS_VHT160; + break; + case WMI_HOST_PEER_NSS_VHT80_80: + *targ_paramid = WMI_PEER_NSS_VHT80_80; + break; + case WMI_HOST_PEER_PARAM_SU_TXBF_SOUNDING_INTERVAL: + *targ_paramid = WMI_PEER_PARAM_SU_TXBF_SOUNDING_INTERVAL; + break; + case WMI_HOST_PEER_PARAM_MU_TXBF_SOUNDING_INTERVAL: + *targ_paramid = WMI_PEER_PARAM_MU_TXBF_SOUNDING_INTERVAL; + break; + case WMI_HOST_PEER_PARAM_TXBF_SOUNDING_ENABLE: + *targ_paramid = WMI_PEER_PARAM_TXBF_SOUNDING_ENABLE; + break; + case WMI_HOST_PEER_PARAM_MU_ENABLE: + *targ_paramid = WMI_PEER_PARAM_MU_ENABLE; + break; + case WMI_HOST_PEER_PARAM_OFDMA_ENABLE: + *targ_paramid = WMI_PEER_PARAM_OFDMA_ENABLE; + break; + default: + return QDF_STATUS_E_NOSUPPORT; + } + + return QDF_STATUS_SUCCESS; +} +#endif +/** + * send_peer_param_cmd_tlv() - set peer parameter in fw + * @wmi: wmi handle + * @peer_addr: peer mac address + * @param : pointer to hold peer set parameter + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS send_peer_param_cmd_tlv(wmi_unified_t wmi, + uint8_t peer_addr[IEEE80211_ADDR_LEN], + struct peer_set_params *param) +{ + wmi_peer_set_param_cmd_fixed_param *cmd; + wmi_buf_t buf; + int32_t err; + uint32_t param_id; + + if (convert_host_peer_id_to_target_id_tlv(¶m_id, + param->param_id) != QDF_STATUS_SUCCESS) + return QDF_STATUS_E_NOSUPPORT; + + buf = wmi_buf_alloc(wmi, sizeof(*cmd)); + if (!buf) { + WMI_LOGE("Failed to allocate buffer to send set_param cmd"); + return QDF_STATUS_E_NOMEM; + } + cmd = (wmi_peer_set_param_cmd_fixed_param *) wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_peer_set_param_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_peer_set_param_cmd_fixed_param)); + cmd->vdev_id = param->vdev_id; + WMI_CHAR_ARRAY_TO_MAC_ADDR(peer_addr, &cmd->peer_macaddr); + cmd->param_id = param_id; + cmd->param_value = param->param_value; + wmi_mtrace(WMI_PEER_SET_PARAM_CMDID, cmd->vdev_id, 0); + err = wmi_unified_cmd_send(wmi, buf, + sizeof(wmi_peer_set_param_cmd_fixed_param), + WMI_PEER_SET_PARAM_CMDID); + if (err) { + WMI_LOGE("Failed to send set_param cmd"); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return 0; +} + +/** + * send_vdev_up_cmd_tlv() - send vdev up command in fw + * @wmi: wmi handle + * @bssid: bssid + * @vdev_up_params: pointer to hold vdev up parameter + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS send_vdev_up_cmd_tlv(wmi_unified_t wmi, + uint8_t bssid[IEEE80211_ADDR_LEN], + struct vdev_up_params *params) +{ + wmi_vdev_up_cmd_fixed_param *cmd; + wmi_buf_t buf; + int32_t len = sizeof(*cmd); + + WMI_LOGD("%s: VDEV_UP", __func__); + WMI_LOGD("%s: vdev_id %d aid %d bssid %pM", __func__, + params->vdev_id, params->assoc_id, bssid); + buf = wmi_buf_alloc(wmi, len); + if (!buf) { + WMI_LOGP("%s: wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_NOMEM; + } + cmd = (wmi_vdev_up_cmd_fixed_param *) wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_vdev_up_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN(wmi_vdev_up_cmd_fixed_param)); + cmd->vdev_id = params->vdev_id; + cmd->vdev_assoc_id = params->assoc_id; + WMI_CHAR_ARRAY_TO_MAC_ADDR(bssid, &cmd->vdev_bssid); + wmi_mtrace(WMI_VDEV_UP_CMDID, cmd->vdev_id, 0); + if (wmi_unified_cmd_send(wmi, buf, len, WMI_VDEV_UP_CMDID)) { + WMI_LOGP("%s: Failed to send vdev up command", __func__); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return 0; +} + +/** + * send_peer_create_cmd_tlv() - send peer create command to fw + * @wmi: wmi handle + * @peer_addr: peer mac address + * @peer_type: peer type + * @vdev_id: vdev id + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS send_peer_create_cmd_tlv(wmi_unified_t wmi, + struct peer_create_params *param) +{ + wmi_peer_create_cmd_fixed_param *cmd; + wmi_buf_t buf; + int32_t len = sizeof(*cmd); + + buf = wmi_buf_alloc(wmi, len); + if (!buf) { + WMI_LOGP("%s: wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_NOMEM; + } + cmd = (wmi_peer_create_cmd_fixed_param *) wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_peer_create_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_peer_create_cmd_fixed_param)); + WMI_CHAR_ARRAY_TO_MAC_ADDR(param->peer_addr, &cmd->peer_macaddr); + cmd->peer_type = param->peer_type; + cmd->vdev_id = param->vdev_id; + + wmi_mtrace(WMI_PEER_CREATE_CMDID, cmd->vdev_id, 0); + if (wmi_unified_cmd_send(wmi, buf, len, WMI_PEER_CREATE_CMDID)) { + WMI_LOGP("%s: failed to send WMI_PEER_CREATE_CMDID", __func__); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + WMI_LOGD("%s: peer_addr %pM vdev_id %d", __func__, param->peer_addr, + param->vdev_id); + + return 0; +} + +/** + * send_peer_rx_reorder_queue_setup_cmd_tlv() - send rx reorder setup + * command to fw + * @wmi: wmi handle + * @rx_reorder_queue_setup_params: Rx reorder queue setup parameters + * + * Return: 0 for success or error code + */ +static +QDF_STATUS send_peer_rx_reorder_queue_setup_cmd_tlv(wmi_unified_t wmi, + struct rx_reorder_queue_setup_params *param) +{ + wmi_peer_reorder_queue_setup_cmd_fixed_param *cmd; + wmi_buf_t buf; + int32_t len = sizeof(*cmd); + + buf = wmi_buf_alloc(wmi, len); + if (!buf) { + WMI_LOGP("%s: wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_NOMEM; + } + cmd = (wmi_peer_reorder_queue_setup_cmd_fixed_param *)wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_peer_reorder_queue_setup_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_peer_reorder_queue_setup_cmd_fixed_param)); + WMI_CHAR_ARRAY_TO_MAC_ADDR(param->peer_macaddr, &cmd->peer_macaddr); + cmd->vdev_id = param->vdev_id; + cmd->tid = param->tid; + cmd->queue_ptr_lo = param->hw_qdesc_paddr_lo; + cmd->queue_ptr_hi = param->hw_qdesc_paddr_hi; + cmd->queue_no = param->queue_no; + + wmi_mtrace(WMI_PEER_REORDER_QUEUE_SETUP_CMDID, cmd->vdev_id, 0); + if (wmi_unified_cmd_send(wmi, buf, len, + WMI_PEER_REORDER_QUEUE_SETUP_CMDID)) { + WMI_LOGP("%s: fail to send WMI_PEER_REORDER_QUEUE_SETUP_CMDID", + __func__); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + WMI_LOGD("%s: peer_macaddr %pM vdev_id %d, tid %d\n", __func__, + param->peer_macaddr, param->vdev_id, param->tid); + + return QDF_STATUS_SUCCESS; +} + +/** + * send_peer_rx_reorder_queue_remove_cmd_tlv() - send rx reorder remove + * command to fw + * @wmi: wmi handle + * @rx_reorder_queue_remove_params: Rx reorder queue remove parameters + * + * Return: 0 for success or error code + */ +static +QDF_STATUS send_peer_rx_reorder_queue_remove_cmd_tlv(wmi_unified_t wmi, + struct rx_reorder_queue_remove_params *param) +{ + wmi_peer_reorder_queue_remove_cmd_fixed_param *cmd; + wmi_buf_t buf; + int32_t len = sizeof(*cmd); + + buf = wmi_buf_alloc(wmi, len); + if (!buf) { + WMI_LOGP("%s: wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_NOMEM; + } + cmd = (wmi_peer_reorder_queue_remove_cmd_fixed_param *) + wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_peer_reorder_queue_remove_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_peer_reorder_queue_remove_cmd_fixed_param)); + WMI_CHAR_ARRAY_TO_MAC_ADDR(param->peer_macaddr, &cmd->peer_macaddr); + cmd->vdev_id = param->vdev_id; + cmd->tid_mask = param->peer_tid_bitmap; + + wmi_mtrace(WMI_PEER_REORDER_QUEUE_REMOVE_CMDID, cmd->vdev_id, 0); + if (wmi_unified_cmd_send(wmi, buf, len, + WMI_PEER_REORDER_QUEUE_REMOVE_CMDID)) { + WMI_LOGP("%s: fail to send WMI_PEER_REORDER_QUEUE_REMOVE_CMDID", + __func__); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + WMI_LOGD("%s: peer_macaddr %pM vdev_id %d, tid_map %d", __func__, + param->peer_macaddr, param->vdev_id, param->peer_tid_bitmap); + + return QDF_STATUS_SUCCESS; +} + +/** + * send_peer_add_wds_entry_cmd_tlv() - send peer add command to fw + * @wmi_handle: wmi handle + * @param: pointer holding peer details + * + * Return: 0 for success or error code + */ +static QDF_STATUS send_peer_add_wds_entry_cmd_tlv(wmi_unified_t wmi_handle, + struct peer_add_wds_entry_params *param) +{ + wmi_peer_add_wds_entry_cmd_fixed_param *cmd; + wmi_buf_t buf; + int len = sizeof(*cmd); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + qdf_print("%s: wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_FAILURE; + } + cmd = (wmi_peer_add_wds_entry_cmd_fixed_param *) wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_peer_add_wds_entry_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_peer_add_wds_entry_cmd_fixed_param)); + WMI_CHAR_ARRAY_TO_MAC_ADDR(param->dest_addr, &cmd->wds_macaddr); + WMI_CHAR_ARRAY_TO_MAC_ADDR(param->peer_addr, &cmd->peer_macaddr); + cmd->flags = (param->flags & WMI_HOST_WDS_FLAG_STATIC) ? WMI_WDS_FLAG_STATIC : 0; + cmd->vdev_id = param->vdev_id; + + wmi_mtrace(WMI_PEER_ADD_WDS_ENTRY_CMDID, cmd->vdev_id, 0); + return wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_PEER_ADD_WDS_ENTRY_CMDID); +} + +/** + * send_peer_del_wds_entry_cmd_tlv() - send peer delete command to fw + * @wmi_handle: wmi handle + * @param: pointer holding peer details + * + * Return: 0 for success or error code + */ +static QDF_STATUS send_peer_del_wds_entry_cmd_tlv(wmi_unified_t wmi_handle, + struct peer_del_wds_entry_params *param) +{ + wmi_peer_remove_wds_entry_cmd_fixed_param *cmd; + wmi_buf_t buf; + int len = sizeof(*cmd); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + qdf_print("%s: wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_NOMEM; + } + cmd = (wmi_peer_remove_wds_entry_cmd_fixed_param *)wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_peer_remove_wds_entry_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_peer_remove_wds_entry_cmd_fixed_param)); + WMI_CHAR_ARRAY_TO_MAC_ADDR(param->dest_addr, &cmd->wds_macaddr); + cmd->vdev_id = param->vdev_id; + wmi_mtrace(WMI_PEER_REMOVE_WDS_ENTRY_CMDID, cmd->vdev_id, 0); + return wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_PEER_REMOVE_WDS_ENTRY_CMDID); +} + +/** + * send_peer_update_wds_entry_cmd_non_tlv() - send peer update command to fw + * @wmi_handle: wmi handle + * @param: pointer holding peer details + * + * Return: 0 for success or error code + */ +static QDF_STATUS send_peer_update_wds_entry_cmd_tlv(wmi_unified_t wmi_handle, + struct peer_update_wds_entry_params *param) +{ + wmi_peer_update_wds_entry_cmd_fixed_param *cmd; + wmi_buf_t buf; + int len = sizeof(*cmd); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + qdf_print("%s: wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_NOMEM; + } + + /* wmi_buf_alloc returns zeroed command buffer */ + cmd = (wmi_peer_update_wds_entry_cmd_fixed_param *)wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_peer_update_wds_entry_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_peer_update_wds_entry_cmd_fixed_param)); + cmd->flags = (param->flags & WMI_HOST_WDS_FLAG_STATIC) ? WMI_WDS_FLAG_STATIC : 0; + cmd->vdev_id = param->vdev_id; + if (param->wds_macaddr) + WMI_CHAR_ARRAY_TO_MAC_ADDR(param->wds_macaddr, + &cmd->wds_macaddr); + if (param->peer_macaddr) + WMI_CHAR_ARRAY_TO_MAC_ADDR(param->peer_macaddr, + &cmd->peer_macaddr); + wmi_mtrace(WMI_PEER_UPDATE_WDS_ENTRY_CMDID, cmd->vdev_id, 0); + return wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_PEER_UPDATE_WDS_ENTRY_CMDID); +} + +/** + * send_pdev_get_tpc_config_cmd_tlv() - send get tpc config command to fw + * @wmi_handle: wmi handle + * @param: pointer to get tpc config params + * + * Return: 0 for success or error code + */ +static QDF_STATUS +send_pdev_get_tpc_config_cmd_tlv(wmi_unified_t wmi_handle, + uint32_t param) +{ + wmi_pdev_get_tpc_config_cmd_fixed_param *cmd; + wmi_buf_t buf; + int32_t len = sizeof(wmi_pdev_get_tpc_config_cmd_fixed_param); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s:wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_NOMEM; + } + cmd = (wmi_pdev_get_tpc_config_cmd_fixed_param *)wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_pdev_get_tpc_config_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_pdev_get_tpc_config_cmd_fixed_param)); + + cmd->param = param; + wmi_mtrace(WMI_PDEV_GET_TPC_CONFIG_CMDID, NO_SESSION, 0); + if (wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_PDEV_GET_TPC_CONFIG_CMDID)) { + WMI_LOGE("Send pdev get tpc config cmd failed"); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + + } + WMI_LOGD("%s:send success", __func__); + + return QDF_STATUS_SUCCESS; +} + +#ifdef WLAN_SUPPORT_GREEN_AP +/** + * send_green_ap_ps_cmd_tlv() - enable green ap powersave command + * @wmi_handle: wmi handle + * @value: value + * @pdev_id: pdev id to have radio context + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS send_green_ap_ps_cmd_tlv(wmi_unified_t wmi_handle, + uint32_t value, uint8_t pdev_id) +{ + wmi_pdev_green_ap_ps_enable_cmd_fixed_param *cmd; + wmi_buf_t buf; + int32_t len = sizeof(*cmd); + + WMI_LOGD("Set Green AP PS val %d", value); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGP("%s: Green AP PS Mem Alloc Failed", __func__); + return QDF_STATUS_E_NOMEM; + } + + cmd = (wmi_pdev_green_ap_ps_enable_cmd_fixed_param *) wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_pdev_green_ap_ps_enable_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_pdev_green_ap_ps_enable_cmd_fixed_param)); + cmd->pdev_id = wmi_handle->ops->convert_pdev_id_host_to_target(pdev_id); + cmd->enable = value; + + wmi_mtrace(WMI_PDEV_GREEN_AP_PS_ENABLE_CMDID, NO_SESSION, 0); + if (wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_PDEV_GREEN_AP_PS_ENABLE_CMDID)) { + WMI_LOGE("Set Green AP PS param Failed val %d", value); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return 0; +} +#endif + +/** + * send_pdev_utf_cmd_tlv() - send utf command to fw + * @wmi_handle: wmi handle + * @param: pointer to pdev_utf_params + * @mac_id: mac id to have radio context + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS +send_pdev_utf_cmd_tlv(wmi_unified_t wmi_handle, + struct pdev_utf_params *param, + uint8_t mac_id) +{ + wmi_buf_t buf; + uint8_t *cmd; + /* if param->len is 0 no data is sent, return error */ + QDF_STATUS ret = QDF_STATUS_E_INVAL; + static uint8_t msgref = 1; + uint8_t segNumber = 0, segInfo, numSegments; + uint16_t chunk_len, total_bytes; + uint8_t *bufpos; + struct seg_hdr_info segHdrInfo; + + bufpos = param->utf_payload; + total_bytes = param->len; + ASSERT(total_bytes / MAX_WMI_UTF_LEN == + (uint8_t) (total_bytes / MAX_WMI_UTF_LEN)); + numSegments = (uint8_t) (total_bytes / MAX_WMI_UTF_LEN); + + if (param->len - (numSegments * MAX_WMI_UTF_LEN)) + numSegments++; + + while (param->len) { + if (param->len > MAX_WMI_UTF_LEN) + chunk_len = MAX_WMI_UTF_LEN; /* MAX message */ + else + chunk_len = param->len; + + buf = wmi_buf_alloc(wmi_handle, + (chunk_len + sizeof(segHdrInfo) + + WMI_TLV_HDR_SIZE)); + if (!buf) { + WMI_LOGE("%s:wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_NOMEM; + } + + cmd = (uint8_t *) wmi_buf_data(buf); + + segHdrInfo.len = total_bytes; + segHdrInfo.msgref = msgref; + segInfo = ((numSegments << 4) & 0xF0) | (segNumber & 0xF); + segHdrInfo.segmentInfo = segInfo; + segHdrInfo.pad = 0; + + WMI_LOGD("%s:segHdrInfo.len = %d, segHdrInfo.msgref = %d," + " segHdrInfo.segmentInfo = %d", + __func__, segHdrInfo.len, segHdrInfo.msgref, + segHdrInfo.segmentInfo); + + WMI_LOGD("%s:total_bytes %d segNumber %d totalSegments %d" + "chunk len %d", __func__, total_bytes, segNumber, + numSegments, chunk_len); + + segNumber++; + + WMITLV_SET_HDR(cmd, WMITLV_TAG_ARRAY_BYTE, + (chunk_len + sizeof(segHdrInfo))); + cmd += WMI_TLV_HDR_SIZE; + memcpy(cmd, &segHdrInfo, sizeof(segHdrInfo)); /* 4 bytes */ + memcpy(&cmd[sizeof(segHdrInfo)], bufpos, chunk_len); + + wmi_mtrace(WMI_PDEV_UTF_CMDID, NO_SESSION, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, + (chunk_len + sizeof(segHdrInfo) + + WMI_TLV_HDR_SIZE), + WMI_PDEV_UTF_CMDID); + + if (QDF_IS_STATUS_ERROR(ret)) { + WMI_LOGE("Failed to send WMI_PDEV_UTF_CMDID command"); + wmi_buf_free(buf); + break; + } + + param->len -= chunk_len; + bufpos += chunk_len; + } + + msgref++; + + return ret; +} +#ifdef CONFIG_MCL +static inline uint32_t convert_host_pdev_param_tlv(wmi_unified_t wmi_handle, + uint32_t host_param) +{ + return host_param; +} +#else +static inline uint32_t convert_host_pdev_param_tlv(wmi_unified_t wmi_handle, + uint32_t host_param) +{ + if (host_param < wmi_pdev_param_max) + return wmi_handle->pdev_param[host_param]; + + return WMI_UNAVAILABLE_PARAM; +} +#endif +/** + * send_pdev_param_cmd_tlv() - set pdev parameters + * @wmi_handle: wmi handle + * @param: pointer to pdev parameter + * @mac_id: radio context + * + * Return: 0 on success, errno on failure + */ +static QDF_STATUS +send_pdev_param_cmd_tlv(wmi_unified_t wmi_handle, + struct pdev_params *param, + uint8_t mac_id) +{ + QDF_STATUS ret; + wmi_pdev_set_param_cmd_fixed_param *cmd; + wmi_buf_t buf; + uint16_t len = sizeof(*cmd); + uint32_t pdev_param; + + pdev_param = convert_host_pdev_param_tlv(wmi_handle, param->param_id); + if (pdev_param == WMI_UNAVAILABLE_PARAM) { + WMI_LOGW("%s: Unavailable param %d\n", + __func__, param->param_id); + return QDF_STATUS_E_INVAL; + } + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s:wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_NOMEM; + } + cmd = (wmi_pdev_set_param_cmd_fixed_param *) wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_pdev_set_param_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_pdev_set_param_cmd_fixed_param)); + cmd->pdev_id = wmi_handle->ops->convert_pdev_id_host_to_target(mac_id); + cmd->param_id = pdev_param; + cmd->param_value = param->param_value; + WMI_LOGD("Setting pdev param = %x, value = %u", param->param_id, + param->param_value); + wmi_mtrace(WMI_PDEV_SET_PARAM_CMDID, NO_SESSION, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_PDEV_SET_PARAM_CMDID); + if (QDF_IS_STATUS_ERROR(ret)) { + WMI_LOGE("Failed to send set param command ret = %d", ret); + wmi_buf_free(buf); + } + return ret; +} + +/** + * send_suspend_cmd_tlv() - WMI suspend function + * @param wmi_handle : handle to WMI. + * @param param : pointer to hold suspend parameter + * @mac_id: radio context + * + * Return 0 on success and -ve on failure. + */ +static QDF_STATUS send_suspend_cmd_tlv(wmi_unified_t wmi_handle, + struct suspend_params *param, + uint8_t mac_id) +{ + wmi_pdev_suspend_cmd_fixed_param *cmd; + wmi_buf_t wmibuf; + uint32_t len = sizeof(*cmd); + int32_t ret; + + /* + * send the command to Target to ignore the + * PCIE reset so as to ensure that Host and target + * states are in sync + */ + wmibuf = wmi_buf_alloc(wmi_handle, len); + if (wmibuf == NULL) + return QDF_STATUS_E_NOMEM; + + cmd = (wmi_pdev_suspend_cmd_fixed_param *) wmi_buf_data(wmibuf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_pdev_suspend_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_pdev_suspend_cmd_fixed_param)); + if (param->disable_target_intr) + cmd->suspend_opt = WMI_PDEV_SUSPEND_AND_DISABLE_INTR; + else + cmd->suspend_opt = WMI_PDEV_SUSPEND; + + cmd->pdev_id = wmi_handle->ops->convert_pdev_id_host_to_target(mac_id); + + wmi_mtrace(WMI_PDEV_SUSPEND_CMDID, NO_SESSION, 0); + ret = wmi_unified_cmd_send(wmi_handle, wmibuf, len, + WMI_PDEV_SUSPEND_CMDID); + if (ret) { + wmi_buf_free(wmibuf); + WMI_LOGE("Failed to send WMI_PDEV_SUSPEND_CMDID command"); + } + + return ret; +} + +/** + * send_resume_cmd_tlv() - WMI resume function + * @param wmi_handle : handle to WMI. + * @mac_id: radio context + * + * Return: 0 on success and -ve on failure. + */ +static QDF_STATUS send_resume_cmd_tlv(wmi_unified_t wmi_handle, + uint8_t mac_id) +{ + wmi_buf_t wmibuf; + wmi_pdev_resume_cmd_fixed_param *cmd; + QDF_STATUS ret; + + wmibuf = wmi_buf_alloc(wmi_handle, sizeof(*cmd)); + if (wmibuf == NULL) + return QDF_STATUS_E_NOMEM; + cmd = (wmi_pdev_resume_cmd_fixed_param *) wmi_buf_data(wmibuf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_pdev_resume_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_pdev_resume_cmd_fixed_param)); + cmd->pdev_id = wmi_handle->ops->convert_pdev_id_host_to_target(mac_id); + wmi_mtrace(WMI_PDEV_RESUME_CMDID, NO_SESSION, 0); + ret = wmi_unified_cmd_send(wmi_handle, wmibuf, sizeof(*cmd), + WMI_PDEV_RESUME_CMDID); + if (QDF_IS_STATUS_ERROR(ret)) { + WMI_LOGE("Failed to send WMI_PDEV_RESUME_CMDID command"); + wmi_buf_free(wmibuf); + } + + return ret; +} + +#ifdef FEATURE_WLAN_D0WOW +/** + * send_d0wow_enable_cmd_tlv() - WMI d0 wow enable function + * @param wmi_handle: handle to WMI. + * @mac_id: radio context + * + * Return: 0 on success and error code on failure. + */ +static QDF_STATUS send_d0wow_enable_cmd_tlv(wmi_unified_t wmi_handle, + uint8_t mac_id) +{ + wmi_d0_wow_enable_disable_cmd_fixed_param *cmd; + wmi_buf_t buf; + int32_t len; + QDF_STATUS status; + + len = sizeof(wmi_d0_wow_enable_disable_cmd_fixed_param); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s: Failed allocate wmi buffer", __func__); + return QDF_STATUS_E_NOMEM; + } + cmd = (wmi_d0_wow_enable_disable_cmd_fixed_param *) wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_d0_wow_enable_disable_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_d0_wow_enable_disable_cmd_fixed_param)); + + cmd->enable = true; + + wmi_mtrace(WMI_D0_WOW_ENABLE_DISABLE_CMDID, NO_SESSION, 0); + status = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_D0_WOW_ENABLE_DISABLE_CMDID); + if (QDF_IS_STATUS_ERROR(status)) + wmi_buf_free(buf); + + return status; +} + +/** + * send_d0wow_disable_cmd_tlv() - WMI d0 wow disable function + * @param wmi_handle: handle to WMI. + * @mac_id: radio context + * + * Return: 0 on success and error code on failure. + */ +static QDF_STATUS send_d0wow_disable_cmd_tlv(wmi_unified_t wmi_handle, + uint8_t mac_id) +{ + wmi_d0_wow_enable_disable_cmd_fixed_param *cmd; + wmi_buf_t buf; + int32_t len; + QDF_STATUS status; + + len = sizeof(wmi_d0_wow_enable_disable_cmd_fixed_param); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s: Failed allocate wmi buffer", __func__); + return QDF_STATUS_E_NOMEM; + } + cmd = (wmi_d0_wow_enable_disable_cmd_fixed_param *) wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_d0_wow_enable_disable_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_d0_wow_enable_disable_cmd_fixed_param)); + + cmd->enable = false; + + wmi_mtrace(WMI_D0_WOW_ENABLE_DISABLE_CMDID, NO_SESSION, 0); + status = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_D0_WOW_ENABLE_DISABLE_CMDID); + if (QDF_IS_STATUS_ERROR(status)) + wmi_buf_free(buf); + + return status; +} +#endif + +/** + * send_wow_enable_cmd_tlv() - WMI wow enable function + * @param wmi_handle : handle to WMI. + * @param param : pointer to hold wow enable parameter + * @mac_id: radio context + * + * Return: 0 on success and -ve on failure. + */ +static QDF_STATUS send_wow_enable_cmd_tlv(wmi_unified_t wmi_handle, + struct wow_cmd_params *param, + uint8_t mac_id) +{ + wmi_wow_enable_cmd_fixed_param *cmd; + wmi_buf_t buf; + int32_t len; + int32_t ret; + + len = sizeof(wmi_wow_enable_cmd_fixed_param); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s: Failed allocate wmi buffer", __func__); + return QDF_STATUS_E_NOMEM; + } + cmd = (wmi_wow_enable_cmd_fixed_param *) wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_wow_enable_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_wow_enable_cmd_fixed_param)); + cmd->enable = param->enable; + if (param->can_suspend_link) + cmd->pause_iface_config = WOW_IFACE_PAUSE_ENABLED; + else + cmd->pause_iface_config = WOW_IFACE_PAUSE_DISABLED; + cmd->flags = param->flags; + + WMI_LOGI("suspend type: %s", + cmd->pause_iface_config == WOW_IFACE_PAUSE_ENABLED ? + "WOW_IFACE_PAUSE_ENABLED" : "WOW_IFACE_PAUSE_DISABLED"); + + wmi_mtrace(WMI_WOW_ENABLE_CMDID, NO_SESSION, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_WOW_ENABLE_CMDID); + if (ret) + wmi_buf_free(buf); + + return ret; +} + +/** + * send_set_ap_ps_param_cmd_tlv() - set ap powersave parameters + * @wmi_handle: wmi handle + * @peer_addr: peer mac address + * @param: pointer to ap_ps parameter structure + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS send_set_ap_ps_param_cmd_tlv(wmi_unified_t wmi_handle, + uint8_t *peer_addr, + struct ap_ps_params *param) +{ + wmi_ap_ps_peer_cmd_fixed_param *cmd; + wmi_buf_t buf; + int32_t err; + + buf = wmi_buf_alloc(wmi_handle, sizeof(*cmd)); + if (!buf) { + WMI_LOGE("Failed to allocate buffer to send set_ap_ps_param cmd"); + return QDF_STATUS_E_NOMEM; + } + cmd = (wmi_ap_ps_peer_cmd_fixed_param *) wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_ap_ps_peer_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_ap_ps_peer_cmd_fixed_param)); + cmd->vdev_id = param->vdev_id; + WMI_CHAR_ARRAY_TO_MAC_ADDR(peer_addr, &cmd->peer_macaddr); + cmd->param = param->param; + cmd->value = param->value; + wmi_mtrace(WMI_AP_PS_PEER_PARAM_CMDID, cmd->vdev_id, 0); + err = wmi_unified_cmd_send(wmi_handle, buf, + sizeof(*cmd), WMI_AP_PS_PEER_PARAM_CMDID); + if (err) { + WMI_LOGE("Failed to send set_ap_ps_param cmd"); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return 0; +} + +/** + * send_set_sta_ps_param_cmd_tlv() - set sta powersave parameters + * @wmi_handle: wmi handle + * @peer_addr: peer mac address + * @param: pointer to sta_ps parameter structure + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS send_set_sta_ps_param_cmd_tlv(wmi_unified_t wmi_handle, + struct sta_ps_params *param) +{ + wmi_sta_powersave_param_cmd_fixed_param *cmd; + wmi_buf_t buf; + int32_t len = sizeof(*cmd); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGP("%s: Set Sta Ps param Mem Alloc Failed", __func__); + return QDF_STATUS_E_NOMEM; + } + + cmd = (wmi_sta_powersave_param_cmd_fixed_param *) wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_sta_powersave_param_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_sta_powersave_param_cmd_fixed_param)); + cmd->vdev_id = param->vdev_id; + cmd->param = param->param; + cmd->value = param->value; + + wmi_mtrace(WMI_STA_POWERSAVE_PARAM_CMDID, cmd->vdev_id, 0); + if (wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_STA_POWERSAVE_PARAM_CMDID)) { + WMI_LOGE("Set Sta Ps param Failed vdevId %d Param %d val %d", + param->vdev_id, param->param, param->value); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return 0; +} + +/** + * send_crash_inject_cmd_tlv() - inject fw crash + * @wmi_handle: wmi handle + * @param: ponirt to crash inject parameter structure + * + * Return: QDF_STATUS_SUCCESS for success or return error + */ +static QDF_STATUS send_crash_inject_cmd_tlv(wmi_unified_t wmi_handle, + struct crash_inject *param) +{ + int32_t ret = 0; + WMI_FORCE_FW_HANG_CMD_fixed_param *cmd; + uint16_t len = sizeof(*cmd); + wmi_buf_t buf; + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s: wmi_buf_alloc failed!", __func__); + return QDF_STATUS_E_NOMEM; + } + + cmd = (WMI_FORCE_FW_HANG_CMD_fixed_param *) wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_WMI_FORCE_FW_HANG_CMD_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (WMI_FORCE_FW_HANG_CMD_fixed_param)); + cmd->type = param->type; + cmd->delay_time_ms = param->delay_time_ms; + + wmi_mtrace(WMI_FORCE_FW_HANG_CMDID, NO_SESSION, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_FORCE_FW_HANG_CMDID); + if (ret) { + WMI_LOGE("%s: Failed to send set param command, ret = %d", + __func__, ret); + wmi_buf_free(buf); + } + + return ret; +} + +#ifdef FEATURE_FW_LOG_PARSING +/** + * send_dbglog_cmd_tlv() - set debug log level + * @param wmi_handle : handle to WMI. + * @param param : pointer to hold dbglog level parameter + * + * Return: 0 on success and -ve on failure. + */ + static QDF_STATUS +send_dbglog_cmd_tlv(wmi_unified_t wmi_handle, + struct dbglog_params *dbglog_param) +{ + wmi_buf_t buf; + wmi_debug_log_config_cmd_fixed_param *configmsg; + QDF_STATUS status; + int32_t i; + int32_t len; + int8_t *buf_ptr; + int32_t *module_id_bitmap_array; /* Used to fomr the second tlv */ + + ASSERT(dbglog_param->bitmap_len < MAX_MODULE_ID_BITMAP_WORDS); + + /* Allocate size for 2 tlvs - including tlv hdr space for second tlv */ + len = sizeof(wmi_debug_log_config_cmd_fixed_param) + WMI_TLV_HDR_SIZE + + (sizeof(int32_t) * MAX_MODULE_ID_BITMAP_WORDS); + buf = wmi_buf_alloc(wmi_handle, len); + if (buf == NULL) + return QDF_STATUS_E_NOMEM; + + configmsg = + (wmi_debug_log_config_cmd_fixed_param *) (wmi_buf_data(buf)); + buf_ptr = (int8_t *) configmsg; + WMITLV_SET_HDR(&configmsg->tlv_header, + WMITLV_TAG_STRUC_wmi_debug_log_config_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_debug_log_config_cmd_fixed_param)); + configmsg->dbg_log_param = dbglog_param->param; + configmsg->value = dbglog_param->val; + /* Filling in the data part of second tlv -- should + * follow first tlv _ WMI_TLV_HDR_SIZE */ + module_id_bitmap_array = (uint32_t *) (buf_ptr + + sizeof + (wmi_debug_log_config_cmd_fixed_param) + + WMI_TLV_HDR_SIZE); + WMITLV_SET_HDR(buf_ptr + sizeof(wmi_debug_log_config_cmd_fixed_param), + WMITLV_TAG_ARRAY_UINT32, + sizeof(uint32_t) * MAX_MODULE_ID_BITMAP_WORDS); + if (dbglog_param->module_id_bitmap) { + for (i = 0; i < dbglog_param->bitmap_len; ++i) { + module_id_bitmap_array[i] = + dbglog_param->module_id_bitmap[i]; + } + } + + wmi_mtrace(WMI_DBGLOG_CFG_CMDID, NO_SESSION, 0); + status = wmi_unified_cmd_send(wmi_handle, buf, + len, WMI_DBGLOG_CFG_CMDID); + + if (status != QDF_STATUS_SUCCESS) + wmi_buf_free(buf); + + return status; +} +#endif + +#ifdef CONFIG_MCL +static inline uint32_t convert_host_vdev_param_tlv(wmi_unified_t wmi_handle, + uint32_t host_param) +{ + return host_param; +} +#else +static inline uint32_t convert_host_vdev_param_tlv(wmi_unified_t wmi_handle, + uint32_t host_param) +{ + if (host_param < wmi_vdev_param_max) + return wmi_handle->vdev_param[host_param]; + + return WMI_UNAVAILABLE_PARAM; +} +#endif +/** + * send_vdev_set_param_cmd_tlv() - WMI vdev set parameter function + * @param wmi_handle : handle to WMI. + * @param macaddr : MAC address + * @param param : pointer to hold vdev set parameter + * + * Return: 0 on success and -ve on failure. + */ +static QDF_STATUS send_vdev_set_param_cmd_tlv(wmi_unified_t wmi_handle, + struct vdev_set_params *param) +{ + QDF_STATUS ret; + wmi_vdev_set_param_cmd_fixed_param *cmd; + wmi_buf_t buf; + uint16_t len = sizeof(*cmd); + uint32_t vdev_param; + + vdev_param = convert_host_vdev_param_tlv(wmi_handle, param->param_id); + if (vdev_param == WMI_UNAVAILABLE_PARAM) { + WMI_LOGW("%s:Vdev param %d not available", __func__, + param->param_id); + return QDF_STATUS_E_INVAL; + + } + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s:wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_NOMEM; + } + cmd = (wmi_vdev_set_param_cmd_fixed_param *) wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_vdev_set_param_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_vdev_set_param_cmd_fixed_param)); + cmd->vdev_id = param->if_id; + cmd->param_id = vdev_param; + cmd->param_value = param->param_value; + WMI_LOGD("Setting vdev %d param = %x, value = %u", + cmd->vdev_id, cmd->param_id, cmd->param_value); + wmi_mtrace(WMI_VDEV_SET_PARAM_CMDID, cmd->vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_VDEV_SET_PARAM_CMDID); + if (QDF_IS_STATUS_ERROR(ret)) { + WMI_LOGE("Failed to send set param command ret = %d", ret); + wmi_buf_free(buf); + } + + return ret; +} + +/** + * send_stats_request_cmd_tlv() - WMI request stats function + * @param wmi_handle : handle to WMI. + * @param macaddr : MAC address + * @param param : pointer to hold stats request parameter + * + * Return: 0 on success and -ve on failure. + */ +static QDF_STATUS send_stats_request_cmd_tlv(wmi_unified_t wmi_handle, + uint8_t macaddr[IEEE80211_ADDR_LEN], + struct stats_request_params *param) +{ + int32_t ret; + wmi_request_stats_cmd_fixed_param *cmd; + wmi_buf_t buf; + uint16_t len = sizeof(wmi_request_stats_cmd_fixed_param); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s: wmi_buf_alloc failed", __func__); + return -QDF_STATUS_E_NOMEM; + } + + cmd = (wmi_request_stats_cmd_fixed_param *) wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_request_stats_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_request_stats_cmd_fixed_param)); + cmd->stats_id = param->stats_id; + cmd->vdev_id = param->vdev_id; + cmd->pdev_id = wmi_handle->ops->convert_pdev_id_host_to_target( + param->pdev_id); + WMI_CHAR_ARRAY_TO_MAC_ADDR(macaddr, &cmd->peer_macaddr); + + WMI_LOGD("STATS REQ STATS_ID:%d VDEV_ID:%d PDEV_ID:%d-->", + cmd->stats_id, cmd->vdev_id, cmd->pdev_id); + + wmi_mtrace(WMI_REQUEST_STATS_CMDID, cmd->vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_REQUEST_STATS_CMDID); + + if (ret) { + WMI_LOGE("Failed to send status request to fw =%d", ret); + wmi_buf_free(buf); + } + + return ret; +} + +#ifdef CONFIG_WIN +/** + * send_packet_log_enable_cmd_tlv() - Send WMI command to enable packet-log + * @param wmi_handle : handle to WMI. + * @param PKTLOG_EVENT : packet log event + * @mac_id: mac id to have radio context + * + * Return: 0 on success and -ve on failure. + */ +static QDF_STATUS send_packet_log_enable_cmd_tlv(wmi_unified_t wmi_handle, + WMI_HOST_PKTLOG_EVENT PKTLOG_EVENT, uint8_t mac_id) +{ + int32_t ret; + wmi_pdev_pktlog_enable_cmd_fixed_param *cmd; + wmi_buf_t buf; + uint16_t len = sizeof(wmi_pdev_pktlog_enable_cmd_fixed_param); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s: wmi_buf_alloc failed", __func__); + return -QDF_STATUS_E_NOMEM; + } + + cmd = (wmi_pdev_pktlog_enable_cmd_fixed_param *) wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_pdev_pktlog_enable_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_pdev_pktlog_enable_cmd_fixed_param)); + cmd->evlist = PKTLOG_EVENT; + cmd->pdev_id = mac_id; + wmi_mtrace(WMI_PDEV_PKTLOG_ENABLE_CMDID, cmd->pdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_PDEV_PKTLOG_ENABLE_CMDID); + if (ret) { + WMI_LOGE("Failed to send pktlog enable cmd to FW =%d", ret); + wmi_buf_free(buf); + } + + return ret; +} + +/** + * send_packet_log_disable_cmd_tlv() - Send WMI command to disable packet-log + * @param wmi_handle : handle to WMI. + * @mac_id: mac id to have radio context + * + * Return: 0 on success and -ve on failure. + */ +static QDF_STATUS send_packet_log_disable_cmd_tlv(wmi_unified_t wmi_handle, + uint8_t mac_id) +{ + int32_t ret; + wmi_pdev_pktlog_disable_cmd_fixed_param *cmd; + wmi_buf_t buf; + uint16_t len = sizeof(wmi_pdev_pktlog_disable_cmd_fixed_param); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s: wmi_buf_alloc failed", __func__); + return -QDF_STATUS_E_NOMEM; + } + + cmd = (wmi_pdev_pktlog_disable_cmd_fixed_param *) wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_pdev_pktlog_disable_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_pdev_pktlog_disable_cmd_fixed_param)); + cmd->pdev_id = mac_id; + wmi_mtrace(WMI_PDEV_PKTLOG_DISABLE_CMDID, cmd->pdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_PDEV_PKTLOG_DISABLE_CMDID); + if (ret) { + WMI_LOGE("Failed to send pktlog disable cmd to FW =%d", ret); + wmi_buf_free(buf); + } + + return ret; +} +#else +/** + * send_packet_log_enable_cmd_tlv() - Send WMI command to enable + * packet-log + * @param wmi_handle : handle to WMI. + * @param macaddr : MAC address + * @param param : pointer to hold stats request parameter + * + * Return: 0 on success and -ve on failure. + */ +static QDF_STATUS send_packet_log_enable_cmd_tlv(wmi_unified_t wmi_handle, + uint8_t macaddr[IEEE80211_ADDR_LEN], + struct packet_enable_params *param) +{ + return 0; +} +/** + * send_packet_log_disable_cmd_tlv() - Send WMI command to disable + * packet-log + * @param wmi_handle : handle to WMI. + * @mac_id: mac id to have radio context + * + * Return: 0 on success and -ve on failure. + */ +static QDF_STATUS send_packet_log_disable_cmd_tlv(wmi_unified_t wmi_handle, + uint8_t mac_id) +{ + return 0; +} +#endif + +#define WMI_FW_TIME_STAMP_LOW_MASK 0xffffffff +/** + * send_time_stamp_sync_cmd_tlv() - Send WMI command to + * sync time between bwtween host and firmware + * @param wmi_handle : handle to WMI. + * + * Return: None + */ +static void send_time_stamp_sync_cmd_tlv(wmi_unified_t wmi_handle) +{ + wmi_buf_t buf; + QDF_STATUS status = QDF_STATUS_SUCCESS; + WMI_DBGLOG_TIME_STAMP_SYNC_CMD_fixed_param *time_stamp; + int32_t len; + qdf_time_t time_ms; + + len = sizeof(*time_stamp); + buf = wmi_buf_alloc(wmi_handle, len); + + if (!buf) { + WMI_LOGP(FL("wmi_buf_alloc failed")); + return; + } + time_stamp = + (WMI_DBGLOG_TIME_STAMP_SYNC_CMD_fixed_param *) + (wmi_buf_data(buf)); + WMITLV_SET_HDR(&time_stamp->tlv_header, + WMITLV_TAG_STRUC_wmi_dbglog_time_stamp_sync_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + WMI_DBGLOG_TIME_STAMP_SYNC_CMD_fixed_param)); + + time_ms = qdf_get_time_of_the_day_ms(); + time_stamp->mode = WMI_TIME_STAMP_SYNC_MODE_MS; + time_stamp->time_stamp_low = time_ms & + WMI_FW_TIME_STAMP_LOW_MASK; + /* + * Send time_stamp_high 0 as the time converted from HR:MIN:SEC:MS to ms + * wont exceed 27 bit + */ + time_stamp->time_stamp_high = 0; + WMI_LOGD(FL("WMA --> DBGLOG_TIME_STAMP_SYNC_CMDID mode %d time_stamp low %d high %d"), + time_stamp->mode, time_stamp->time_stamp_low, + time_stamp->time_stamp_high); + + wmi_mtrace(WMI_DBGLOG_TIME_STAMP_SYNC_CMDID, NO_SESSION, 0); + status = wmi_unified_cmd_send(wmi_handle, buf, + len, WMI_DBGLOG_TIME_STAMP_SYNC_CMDID); + if (status) { + WMI_LOGE("Failed to send WMI_DBGLOG_TIME_STAMP_SYNC_CMDID command"); + wmi_buf_free(buf); + } + +} + +#ifdef WLAN_SUPPORT_FILS +/** + * extract_swfda_vdev_id_tlv() - extract swfda vdev id from event + * @wmi_handle: wmi handle + * @evt_buf: pointer to event buffer + * @vdev_id: pointer to hold vdev id + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_INVAL on failure + */ +static QDF_STATUS +extract_swfda_vdev_id_tlv(wmi_unified_t wmi_handle, + void *evt_buf, uint32_t *vdev_id) +{ + WMI_HOST_SWFDA_EVENTID_param_tlvs *param_buf; + wmi_host_swfda_event_fixed_param *swfda_event; + + param_buf = (WMI_HOST_SWFDA_EVENTID_param_tlvs *)evt_buf; + if (!param_buf) { + WMI_LOGE("Invalid swfda event buffer"); + return QDF_STATUS_E_INVAL; + } + swfda_event = param_buf->fixed_param; + *vdev_id = swfda_event->vdev_id; + + return QDF_STATUS_SUCCESS; +} + +/** + * send_vdev_fils_enable_cmd_tlv() - enable/Disable FD Frame command to fw + * @wmi_handle: wmi handle + * @param: pointer to hold FILS discovery enable param + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE on failure + */ +static QDF_STATUS +send_vdev_fils_enable_cmd_tlv(wmi_unified_t wmi_handle, + struct config_fils_params *param) +{ + wmi_enable_fils_cmd_fixed_param *cmd; + wmi_buf_t buf; + QDF_STATUS status; + uint32_t len = sizeof(wmi_enable_fils_cmd_fixed_param); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s : wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_NOMEM; + } + cmd = (wmi_enable_fils_cmd_fixed_param *)wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_enable_fils_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_enable_fils_cmd_fixed_param)); + cmd->vdev_id = param->vdev_id; + cmd->fd_period = param->fd_period; + WMI_LOGI("Setting FD period to %d vdev id : %d\n", + param->fd_period, param->vdev_id); + + wmi_mtrace(WMI_ENABLE_FILS_CMDID, cmd->vdev_id, 0); + status = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_ENABLE_FILS_CMDID); + if (status != QDF_STATUS_SUCCESS) { + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * send_fils_discovery_send_cmd_tlv() - WMI FILS Discovery send function + * @wmi_handle: wmi handle + * @param: pointer to hold FD send cmd parameter + * + * Return : QDF_STATUS_SUCCESS on success and QDF_STATUS_E_NOMEM on failure. + */ +static QDF_STATUS +send_fils_discovery_send_cmd_tlv(wmi_unified_t wmi_handle, + struct fd_params *param) +{ + QDF_STATUS ret; + wmi_fd_send_from_host_cmd_fixed_param *cmd; + wmi_buf_t wmi_buf; + qdf_dma_addr_t dma_addr; + + wmi_buf = wmi_buf_alloc(wmi_handle, sizeof(*cmd)); + if (!wmi_buf) { + WMI_LOGE("%s : wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_NOMEM; + } + cmd = (wmi_fd_send_from_host_cmd_fixed_param *)wmi_buf_data(wmi_buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_fd_send_from_host_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_fd_send_from_host_cmd_fixed_param)); + cmd->vdev_id = param->vdev_id; + cmd->data_len = qdf_nbuf_len(param->wbuf); + dma_addr = qdf_nbuf_get_frag_paddr(param->wbuf, 0); + qdf_dmaaddr_to_32s(dma_addr, &cmd->frag_ptr_lo, &cmd->frag_ptr_hi); + cmd->frame_ctrl = param->frame_ctrl; + + wmi_mtrace(WMI_PDEV_SEND_FD_CMDID, cmd->vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, wmi_buf, sizeof(*cmd), + WMI_PDEV_SEND_FD_CMDID); + if (ret != QDF_STATUS_SUCCESS) { + WMI_LOGE("%s: Failed to send fils discovery frame: %d", + __func__, ret); + wmi_buf_free(wmi_buf); + } + + return ret; +} +#endif /* WLAN_SUPPORT_FILS */ + +static QDF_STATUS send_beacon_send_cmd_tlv(wmi_unified_t wmi_handle, + struct beacon_params *param) +{ + QDF_STATUS ret; + wmi_bcn_send_from_host_cmd_fixed_param *cmd; + wmi_buf_t wmi_buf; + qdf_dma_addr_t dma_addr; + uint32_t dtim_flag = 0; + + wmi_buf = wmi_buf_alloc(wmi_handle, sizeof(*cmd)); + if (!wmi_buf) { + WMI_LOGE("%s : wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_NOMEM; + } + if (param->is_dtim_count_zero) { + dtim_flag |= WMI_BCN_SEND_DTIM_ZERO; + if (param->is_bitctl_reqd) { + /* deliver CAB traffic in next DTIM beacon */ + dtim_flag |= WMI_BCN_SEND_DTIM_BITCTL_SET; + } + } + cmd = (wmi_bcn_send_from_host_cmd_fixed_param *) wmi_buf_data(wmi_buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_bcn_send_from_host_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_bcn_send_from_host_cmd_fixed_param)); + cmd->vdev_id = param->vdev_id; + cmd->data_len = qdf_nbuf_len(param->wbuf); + cmd->frame_ctrl = param->frame_ctrl; + cmd->dtim_flag = dtim_flag; + dma_addr = qdf_nbuf_get_frag_paddr(param->wbuf, 0); + cmd->frag_ptr_lo = qdf_get_lower_32_bits(dma_addr); +#if defined(HTT_PADDR64) + cmd->frag_ptr_hi = qdf_get_upper_32_bits(dma_addr) & 0x1F; +#endif + cmd->bcn_antenna = param->bcn_txant; + + wmi_mtrace(WMI_PDEV_SEND_BCN_CMDID, cmd->vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, + wmi_buf, sizeof(*cmd), WMI_PDEV_SEND_BCN_CMDID); + if (ret != QDF_STATUS_SUCCESS) { + WMI_LOGE("%s: Failed to send bcn: %d", __func__, ret); + wmi_buf_free(wmi_buf); + } + + return ret; +} + +/** + * send_beacon_send_tmpl_cmd_tlv() - WMI beacon send function + * @param wmi_handle : handle to WMI. + * @param param : pointer to hold beacon send cmd parameter + * + * Return: 0 on success and -ve on failure. + */ +static QDF_STATUS send_beacon_tmpl_send_cmd_tlv(wmi_unified_t wmi_handle, + struct beacon_tmpl_params *param) +{ + int32_t ret; + wmi_bcn_tmpl_cmd_fixed_param *cmd; + wmi_bcn_prb_info *bcn_prb_info; + wmi_buf_t wmi_buf; + uint8_t *buf_ptr; + uint32_t wmi_buf_len; + + wmi_buf_len = sizeof(wmi_bcn_tmpl_cmd_fixed_param) + + sizeof(wmi_bcn_prb_info) + WMI_TLV_HDR_SIZE + + param->tmpl_len_aligned; + wmi_buf = wmi_buf_alloc(wmi_handle, wmi_buf_len); + if (!wmi_buf) { + WMI_LOGE("%s : wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_NOMEM; + } + buf_ptr = (uint8_t *) wmi_buf_data(wmi_buf); + cmd = (wmi_bcn_tmpl_cmd_fixed_param *) buf_ptr; + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_bcn_tmpl_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN(wmi_bcn_tmpl_cmd_fixed_param)); + cmd->vdev_id = param->vdev_id; + cmd->tim_ie_offset = param->tim_ie_offset; + cmd->csa_switch_count_offset = param->csa_switch_count_offset; + cmd->ext_csa_switch_count_offset = param->ext_csa_switch_count_offset; + cmd->buf_len = param->tmpl_len; + buf_ptr += sizeof(wmi_bcn_tmpl_cmd_fixed_param); + + bcn_prb_info = (wmi_bcn_prb_info *) buf_ptr; + WMITLV_SET_HDR(&bcn_prb_info->tlv_header, + WMITLV_TAG_STRUC_wmi_bcn_prb_info, + WMITLV_GET_STRUCT_TLVLEN(wmi_bcn_prb_info)); + bcn_prb_info->caps = 0; + bcn_prb_info->erp = 0; + buf_ptr += sizeof(wmi_bcn_prb_info); + + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_BYTE, param->tmpl_len_aligned); + buf_ptr += WMI_TLV_HDR_SIZE; + qdf_mem_copy(buf_ptr, param->frm, param->tmpl_len); + + wmi_mtrace(WMI_BCN_TMPL_CMDID, cmd->vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, + wmi_buf, wmi_buf_len, WMI_BCN_TMPL_CMDID); + if (ret) { + WMI_LOGE("%s: Failed to send bcn tmpl: %d", __func__, ret); + wmi_buf_free(wmi_buf); + } + + return 0; +} + +#ifdef CONFIG_MCL +static inline void copy_peer_flags_tlv( + wmi_peer_assoc_complete_cmd_fixed_param * cmd, + struct peer_assoc_params *param) +{ + cmd->peer_flags = param->peer_flags; +} +#else +static inline void copy_peer_flags_tlv( + wmi_peer_assoc_complete_cmd_fixed_param * cmd, + struct peer_assoc_params *param) +{ + /* + * The target only needs a subset of the flags maintained in the host. + * Just populate those flags and send it down + */ + cmd->peer_flags = 0; + + /* + * Do not enable HT/VHT if WMM/wme is disabled for vap. + */ + if (param->is_wme_set) { + + if (param->qos_flag) + cmd->peer_flags |= WMI_PEER_QOS; + if (param->apsd_flag) + cmd->peer_flags |= WMI_PEER_APSD; + if (param->ht_flag) + cmd->peer_flags |= WMI_PEER_HT; + if (param->bw_40) + cmd->peer_flags |= WMI_PEER_40MHZ; + if (param->bw_80) + cmd->peer_flags |= WMI_PEER_80MHZ; + if (param->bw_160) + cmd->peer_flags |= WMI_PEER_160MHZ; + + /* Typically if STBC is enabled for VHT it should be enabled + * for HT as well + **/ + if (param->stbc_flag) + cmd->peer_flags |= WMI_PEER_STBC; + + /* Typically if LDPC is enabled for VHT it should be enabled + * for HT as well + **/ + if (param->ldpc_flag) + cmd->peer_flags |= WMI_PEER_LDPC; + + if (param->static_mimops_flag) + cmd->peer_flags |= WMI_PEER_STATIC_MIMOPS; + if (param->dynamic_mimops_flag) + cmd->peer_flags |= WMI_PEER_DYN_MIMOPS; + if (param->spatial_mux_flag) + cmd->peer_flags |= WMI_PEER_SPATIAL_MUX; + if (param->vht_flag) + cmd->peer_flags |= WMI_PEER_VHT; + if (param->he_flag) + cmd->peer_flags |= WMI_PEER_HE; + } + + if (param->is_pmf_enabled) + cmd->peer_flags |= WMI_PEER_PMF; + /* + * Suppress authorization for all AUTH modes that need 4-way handshake + * (during re-association). + * Authorization will be done for these modes on key installation. + */ + if (param->auth_flag) + cmd->peer_flags |= WMI_PEER_AUTH; + if (param->need_ptk_4_way) + cmd->peer_flags |= WMI_PEER_NEED_PTK_4_WAY; + else + cmd->peer_flags &= ~WMI_PEER_NEED_PTK_4_WAY; + if (param->need_gtk_2_way) + cmd->peer_flags |= WMI_PEER_NEED_GTK_2_WAY; + /* safe mode bypass the 4-way handshake */ + if (param->safe_mode_enabled) + cmd->peer_flags &= + ~(WMI_PEER_NEED_PTK_4_WAY | WMI_PEER_NEED_GTK_2_WAY); + /* Disable AMSDU for station transmit, if user configures it */ + /* Disable AMSDU for AP transmit to 11n Stations, if user configures + * it + * if (param->amsdu_disable) Add after FW support + **/ + + /* Target asserts if node is marked HT and all MCS is set to 0. + * Mark the node as non-HT if all the mcs rates are disabled through + * iwpriv + **/ + if (param->peer_ht_rates.num_rates == 0) + cmd->peer_flags &= ~WMI_PEER_HT; +} +#endif + +#ifdef CONFIG_MCL +static inline void copy_peer_mac_addr_tlv( + wmi_peer_assoc_complete_cmd_fixed_param * cmd, + struct peer_assoc_params *param) +{ + qdf_mem_copy(&cmd->peer_macaddr, ¶m->peer_macaddr, + sizeof(param->peer_macaddr)); +} +#else +static inline void copy_peer_mac_addr_tlv( + wmi_peer_assoc_complete_cmd_fixed_param * cmd, + struct peer_assoc_params *param) +{ + WMI_CHAR_ARRAY_TO_MAC_ADDR(param->peer_mac, &cmd->peer_macaddr); +} +#endif + +/** + * send_peer_assoc_cmd_tlv() - WMI peer assoc function + * @param wmi_handle : handle to WMI. + * @param param : pointer to peer assoc parameter + * + * Return: 0 on success and -ve on failure. + */ +static QDF_STATUS send_peer_assoc_cmd_tlv(wmi_unified_t wmi_handle, + struct peer_assoc_params *param) +{ + wmi_peer_assoc_complete_cmd_fixed_param *cmd; + wmi_vht_rate_set *mcs; + wmi_he_rate_set *he_mcs; + wmi_buf_t buf; + int32_t len; + uint8_t *buf_ptr; + QDF_STATUS ret; + uint32_t peer_legacy_rates_align; + uint32_t peer_ht_rates_align; + int32_t i; + + + peer_legacy_rates_align = wmi_align(param->peer_legacy_rates.num_rates); + peer_ht_rates_align = wmi_align(param->peer_ht_rates.num_rates); + + len = sizeof(*cmd) + WMI_TLV_HDR_SIZE + + (peer_legacy_rates_align * sizeof(uint8_t)) + + WMI_TLV_HDR_SIZE + + (peer_ht_rates_align * sizeof(uint8_t)) + + sizeof(wmi_vht_rate_set) + + (sizeof(wmi_he_rate_set) * param->peer_he_mcs_count + + WMI_TLV_HDR_SIZE); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s: wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_NOMEM; + } + + buf_ptr = (uint8_t *) wmi_buf_data(buf); + cmd = (wmi_peer_assoc_complete_cmd_fixed_param *) buf_ptr; + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_peer_assoc_complete_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_peer_assoc_complete_cmd_fixed_param)); + + cmd->vdev_id = param->vdev_id; + + cmd->peer_new_assoc = param->peer_new_assoc; + cmd->peer_associd = param->peer_associd; + + copy_peer_flags_tlv(cmd, param); + copy_peer_mac_addr_tlv(cmd, param); + + cmd->peer_rate_caps = param->peer_rate_caps; + cmd->peer_caps = param->peer_caps; + cmd->peer_listen_intval = param->peer_listen_intval; + cmd->peer_ht_caps = param->peer_ht_caps; + cmd->peer_max_mpdu = param->peer_max_mpdu; + cmd->peer_mpdu_density = param->peer_mpdu_density; + cmd->peer_vht_caps = param->peer_vht_caps; + cmd->peer_phymode = param->peer_phymode; + + /* Update 11ax capabilities */ + cmd->peer_he_cap_info = param->peer_he_cap_macinfo; + cmd->peer_he_ops = param->peer_he_ops; + qdf_mem_copy(&cmd->peer_he_cap_phy, ¶m->peer_he_cap_phyinfo, + sizeof(param->peer_he_cap_phyinfo)); + qdf_mem_copy(&cmd->peer_ppet, ¶m->peer_ppet, + sizeof(param->peer_ppet)); + + /* Update peer legacy rate information */ + buf_ptr += sizeof(*cmd); + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_BYTE, + peer_legacy_rates_align); + buf_ptr += WMI_TLV_HDR_SIZE; + cmd->num_peer_legacy_rates = param->peer_legacy_rates.num_rates; + qdf_mem_copy(buf_ptr, param->peer_legacy_rates.rates, + param->peer_legacy_rates.num_rates); + + /* Update peer HT rate information */ + buf_ptr += peer_legacy_rates_align; + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_BYTE, + peer_ht_rates_align); + buf_ptr += WMI_TLV_HDR_SIZE; + cmd->num_peer_ht_rates = param->peer_ht_rates.num_rates; + qdf_mem_copy(buf_ptr, param->peer_ht_rates.rates, + param->peer_ht_rates.num_rates); + + /* VHT Rates */ + buf_ptr += peer_ht_rates_align; + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_STRUC_wmi_vht_rate_set, + WMITLV_GET_STRUCT_TLVLEN(wmi_vht_rate_set)); + + cmd->peer_nss = param->peer_nss; + + /* Update bandwidth-NSS mapping */ + cmd->peer_bw_rxnss_override = 0; + cmd->peer_bw_rxnss_override |= param->peer_bw_rxnss_override; + + mcs = (wmi_vht_rate_set *) buf_ptr; + if (param->vht_capable) { + mcs->rx_max_rate = param->rx_max_rate; + mcs->rx_mcs_set = param->rx_mcs_set; + mcs->tx_max_rate = param->tx_max_rate; + mcs->tx_mcs_set = param->tx_mcs_set; + } + + /* HE Rates */ + cmd->peer_he_mcs = param->peer_he_mcs_count; + buf_ptr += sizeof(wmi_vht_rate_set); + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, + (param->peer_he_mcs_count * sizeof(wmi_he_rate_set))); + buf_ptr += WMI_TLV_HDR_SIZE; + + /* Loop through the HE rate set */ + for (i = 0; i < param->peer_he_mcs_count; i++) { + he_mcs = (wmi_he_rate_set *) buf_ptr; + WMITLV_SET_HDR(he_mcs, WMITLV_TAG_STRUC_wmi_he_rate_set, + WMITLV_GET_STRUCT_TLVLEN(wmi_he_rate_set)); + + he_mcs->rx_mcs_set = param->peer_he_rx_mcs_set[i]; + he_mcs->tx_mcs_set = param->peer_he_tx_mcs_set[i]; + WMI_LOGD("%s:HE idx %d RxMCSmap %x TxMCSmap %x ", __func__, + i, he_mcs->rx_mcs_set, he_mcs->tx_mcs_set); + buf_ptr += sizeof(wmi_he_rate_set); + } + + + WMI_LOGD("%s: vdev_id %d associd %d peer_flags %x rate_caps %x " + "peer_caps %x listen_intval %d ht_caps %x max_mpdu %d " + "nss %d phymode %d peer_mpdu_density %d " + "cmd->peer_vht_caps %x " + "HE cap_info %x ops %x " + "HE phy %x %x %x " + "peer_bw_rxnss_override %x", __func__, + cmd->vdev_id, cmd->peer_associd, cmd->peer_flags, + cmd->peer_rate_caps, cmd->peer_caps, + cmd->peer_listen_intval, cmd->peer_ht_caps, + cmd->peer_max_mpdu, cmd->peer_nss, cmd->peer_phymode, + cmd->peer_mpdu_density, + cmd->peer_vht_caps, cmd->peer_he_cap_info, + cmd->peer_he_ops, cmd->peer_he_cap_phy[0], + cmd->peer_he_cap_phy[1], cmd->peer_he_cap_phy[2], + cmd->peer_bw_rxnss_override); + + wmi_mtrace(WMI_PEER_ASSOC_CMDID, cmd->vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_PEER_ASSOC_CMDID); + if (QDF_IS_STATUS_ERROR(ret)) { + WMI_LOGP("%s: Failed to send peer assoc command ret = %d", + __func__, ret); + wmi_buf_free(buf); + } + + return ret; +} + +/* copy_scan_notify_events() - Helper routine to copy scan notify events + */ +static inline void copy_scan_event_cntrl_flags( + wmi_start_scan_cmd_fixed_param * cmd, + struct scan_req_params *param) +{ + + /* Scan events subscription */ + if (param->scan_ev_started) + cmd->notify_scan_events |= WMI_SCAN_EVENT_STARTED; + if (param->scan_ev_completed) + cmd->notify_scan_events |= WMI_SCAN_EVENT_COMPLETED; + if (param->scan_ev_bss_chan) + cmd->notify_scan_events |= WMI_SCAN_EVENT_BSS_CHANNEL; + if (param->scan_ev_foreign_chan) + cmd->notify_scan_events |= WMI_SCAN_EVENT_FOREIGN_CHANNEL; + if (param->scan_ev_dequeued) + cmd->notify_scan_events |= WMI_SCAN_EVENT_DEQUEUED; + if (param->scan_ev_preempted) + cmd->notify_scan_events |= WMI_SCAN_EVENT_PREEMPTED; + if (param->scan_ev_start_failed) + cmd->notify_scan_events |= WMI_SCAN_EVENT_START_FAILED; + if (param->scan_ev_restarted) + cmd->notify_scan_events |= WMI_SCAN_EVENT_RESTARTED; + if (param->scan_ev_foreign_chn_exit) + cmd->notify_scan_events |= WMI_SCAN_EVENT_FOREIGN_CHANNEL_EXIT; + if (param->scan_ev_suspended) + cmd->notify_scan_events |= WMI_SCAN_EVENT_SUSPENDED; + if (param->scan_ev_resumed) + cmd->notify_scan_events |= WMI_SCAN_EVENT_RESUMED; + + /** Set scan control flags */ + cmd->scan_ctrl_flags = 0; + if (param->scan_f_passive) + cmd->scan_ctrl_flags |= WMI_SCAN_FLAG_PASSIVE; + if (param->scan_f_strict_passive_pch) + cmd->scan_ctrl_flags |= WMI_SCAN_FLAG_STRICT_PASSIVE_ON_PCHN; + if (param->scan_f_promisc_mode) + cmd->scan_ctrl_flags |= WMI_SCAN_FILTER_PROMISCOUS; + if (param->scan_f_capture_phy_err) + cmd->scan_ctrl_flags |= WMI_SCAN_CAPTURE_PHY_ERROR; + if (param->scan_f_half_rate) + cmd->scan_ctrl_flags |= WMI_SCAN_FLAG_HALF_RATE_SUPPORT; + if (param->scan_f_quarter_rate) + cmd->scan_ctrl_flags |= WMI_SCAN_FLAG_QUARTER_RATE_SUPPORT; + if (param->scan_f_cck_rates) + cmd->scan_ctrl_flags |= WMI_SCAN_ADD_CCK_RATES; + if (param->scan_f_ofdm_rates) + cmd->scan_ctrl_flags |= WMI_SCAN_ADD_OFDM_RATES; + if (param->scan_f_chan_stat_evnt) + cmd->scan_ctrl_flags |= WMI_SCAN_CHAN_STAT_EVENT; + if (param->scan_f_filter_prb_req) + cmd->scan_ctrl_flags |= WMI_SCAN_FILTER_PROBE_REQ; + if (param->scan_f_bcast_probe) + cmd->scan_ctrl_flags |= WMI_SCAN_ADD_BCAST_PROBE_REQ; + if (param->scan_f_offchan_mgmt_tx) + cmd->scan_ctrl_flags |= WMI_SCAN_OFFCHAN_MGMT_TX; + if (param->scan_f_offchan_data_tx) + cmd->scan_ctrl_flags |= WMI_SCAN_OFFCHAN_DATA_TX; + if (param->scan_f_force_active_dfs_chn) + cmd->scan_ctrl_flags |= WMI_SCAN_FLAG_FORCE_ACTIVE_ON_DFS; + if (param->scan_f_add_tpc_ie_in_probe) + cmd->scan_ctrl_flags |= WMI_SCAN_ADD_TPC_IE_IN_PROBE_REQ; + if (param->scan_f_add_ds_ie_in_probe) + cmd->scan_ctrl_flags |= WMI_SCAN_ADD_DS_IE_IN_PROBE_REQ; + if (param->scan_f_add_spoofed_mac_in_probe) + cmd->scan_ctrl_flags |= WMI_SCAN_ADD_SPOOFED_MAC_IN_PROBE_REQ; + if (param->scan_f_add_rand_seq_in_probe) + cmd->scan_ctrl_flags |= WMI_SCAN_RANDOM_SEQ_NO_IN_PROBE_REQ; + if (param->scan_f_en_ie_whitelist_in_probe) + cmd->scan_ctrl_flags |= + WMI_SCAN_ENABLE_IE_WHTELIST_IN_PROBE_REQ; + + /* for adaptive scan mode using 3 bits (21 - 23 bits) */ + WMI_SCAN_SET_DWELL_MODE(cmd->scan_ctrl_flags, + param->adaptive_dwell_time_mode); +} + +/* scan_copy_ie_buffer() - Copy scan ie_data */ +static inline void scan_copy_ie_buffer(uint8_t *buf_ptr, + struct scan_req_params *params) +{ + qdf_mem_copy(buf_ptr, params->extraie.ptr, params->extraie.len); +} + +/** + * wmi_copy_scan_random_mac() - To copy scan randomization attrs to wmi buffer + * @mac: random mac addr + * @mask: random mac mask + * @mac_addr: wmi random mac + * @mac_mask: wmi random mac mask + * + * Return None. + */ +static inline +void wmi_copy_scan_random_mac(uint8_t *mac, uint8_t *mask, + wmi_mac_addr *mac_addr, wmi_mac_addr *mac_mask) +{ + WMI_CHAR_ARRAY_TO_MAC_ADDR(mac, mac_addr); + WMI_CHAR_ARRAY_TO_MAC_ADDR(mask, mac_mask); +} + +/* + * wmi_fill_vendor_oui() - fill vendor OUIs + * @buf_ptr: pointer to wmi tlv buffer + * @num_vendor_oui: number of vendor OUIs to be filled + * @param_voui: pointer to OUI buffer + * + * This function populates the wmi tlv buffer when vendor specific OUIs are + * present. + * + * Return: None + */ +static inline +void wmi_fill_vendor_oui(uint8_t *buf_ptr, uint32_t num_vendor_oui, + uint32_t *pvoui) +{ + wmi_vendor_oui *voui = NULL; + uint32_t i; + + voui = (wmi_vendor_oui *)buf_ptr; + + for (i = 0; i < num_vendor_oui; i++) { + WMITLV_SET_HDR(&voui[i].tlv_header, + WMITLV_TAG_STRUC_wmi_vendor_oui, + WMITLV_GET_STRUCT_TLVLEN(wmi_vendor_oui)); + voui[i].oui_type_subtype = pvoui[i]; + } +} + +/* + * wmi_fill_ie_whitelist_attrs() - fill IE whitelist attrs + * @ie_bitmap: output pointer to ie bit map in cmd + * @num_vendor_oui: output pointer to num vendor OUIs + * @ie_whitelist: input parameter + * + * This function populates the IE whitelist attrs of scan, pno and + * scan oui commands for ie_whitelist parameter. + * + * Return: None + */ +static inline +void wmi_fill_ie_whitelist_attrs(uint32_t *ie_bitmap, + uint32_t *num_vendor_oui, + struct probe_req_whitelist_attr *ie_whitelist) +{ + uint32_t i = 0; + + for (i = 0; i < PROBE_REQ_BITMAP_LEN; i++) + ie_bitmap[i] = ie_whitelist->ie_bitmap[i]; + + *num_vendor_oui = ie_whitelist->num_vendor_oui; +} + +/** + * send_scan_start_cmd_tlv() - WMI scan start function + * @param wmi_handle : handle to WMI. + * @param param : pointer to hold scan start cmd parameter + * + * Return: 0 on success and -ve on failure. + */ +static QDF_STATUS send_scan_start_cmd_tlv(wmi_unified_t wmi_handle, + struct scan_req_params *params) +{ + int32_t ret = 0; + int32_t i; + wmi_buf_t wmi_buf; + wmi_start_scan_cmd_fixed_param *cmd; + uint8_t *buf_ptr; + uint32_t *tmp_ptr; + wmi_ssid *ssid = NULL; + wmi_mac_addr *bssid; + size_t len = sizeof(*cmd); + uint16_t extraie_len_with_pad = 0; + uint8_t phymode_roundup = 0; + struct probe_req_whitelist_attr *ie_whitelist = ¶ms->ie_whitelist; + + /* Length TLV placeholder for array of uint32_t */ + len += WMI_TLV_HDR_SIZE; + /* calculate the length of buffer required */ + if (params->chan_list.num_chan) + len += params->chan_list.num_chan * sizeof(uint32_t); + + /* Length TLV placeholder for array of wmi_ssid structures */ + len += WMI_TLV_HDR_SIZE; + if (params->num_ssids) + len += params->num_ssids * sizeof(wmi_ssid); + + /* Length TLV placeholder for array of wmi_mac_addr structures */ + len += WMI_TLV_HDR_SIZE; + if (params->num_bssid) + len += sizeof(wmi_mac_addr) * params->num_bssid; + + /* Length TLV placeholder for array of bytes */ + len += WMI_TLV_HDR_SIZE; + if (params->extraie.len) + extraie_len_with_pad = + roundup(params->extraie.len, sizeof(uint32_t)); + len += extraie_len_with_pad; + + len += WMI_TLV_HDR_SIZE; /* Length of TLV for array of wmi_vendor_oui */ + if (ie_whitelist->num_vendor_oui) + len += ie_whitelist->num_vendor_oui * sizeof(wmi_vendor_oui); + + len += WMI_TLV_HDR_SIZE; /* Length of TLV for array of scan phymode */ + if (params->scan_f_wide_band) + phymode_roundup = + qdf_roundup(params->chan_list.num_chan * sizeof(uint8_t), + sizeof(uint32_t)); + len += phymode_roundup; + + /* Allocate the memory */ + wmi_buf = wmi_buf_alloc(wmi_handle, len); + if (!wmi_buf) { + WMI_LOGP("%s: failed to allocate memory for start scan cmd", + __func__); + return QDF_STATUS_E_FAILURE; + } + buf_ptr = (uint8_t *) wmi_buf_data(wmi_buf); + cmd = (wmi_start_scan_cmd_fixed_param *) buf_ptr; + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_start_scan_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_start_scan_cmd_fixed_param)); + + cmd->scan_id = params->scan_id; + cmd->scan_req_id = params->scan_req_id; + cmd->vdev_id = params->vdev_id; + cmd->scan_priority = params->scan_priority; + + copy_scan_event_cntrl_flags(cmd, params); + + cmd->dwell_time_active = params->dwell_time_active; + cmd->dwell_time_active_2g = params->dwell_time_active_2g; + cmd->dwell_time_passive = params->dwell_time_passive; + cmd->min_rest_time = params->min_rest_time; + cmd->max_rest_time = params->max_rest_time; + cmd->repeat_probe_time = params->repeat_probe_time; + cmd->probe_spacing_time = params->probe_spacing_time; + cmd->idle_time = params->idle_time; + cmd->max_scan_time = params->max_scan_time; + cmd->probe_delay = params->probe_delay; + cmd->burst_duration = params->burst_duration; + cmd->num_chan = params->chan_list.num_chan; + cmd->num_bssid = params->num_bssid; + cmd->num_ssids = params->num_ssids; + cmd->ie_len = params->extraie.len; + cmd->n_probes = params->n_probes; + cmd->scan_ctrl_flags_ext = params->scan_ctrl_flags_ext; + + WMI_LOGD("scan_ctrl_flags_ext = %x", cmd->scan_ctrl_flags_ext); + + if (params->scan_random.randomize) + wmi_copy_scan_random_mac(params->scan_random.mac_addr, + params->scan_random.mac_mask, + &cmd->mac_addr, + &cmd->mac_mask); + + if (ie_whitelist->white_list) + wmi_fill_ie_whitelist_attrs(cmd->ie_bitmap, + &cmd->num_vendor_oui, + ie_whitelist); + + buf_ptr += sizeof(*cmd); + tmp_ptr = (uint32_t *) (buf_ptr + WMI_TLV_HDR_SIZE); + for (i = 0; i < params->chan_list.num_chan; ++i) + tmp_ptr[i] = params->chan_list.chan[i].freq; + + WMITLV_SET_HDR(buf_ptr, + WMITLV_TAG_ARRAY_UINT32, + (params->chan_list.num_chan * sizeof(uint32_t))); + buf_ptr += WMI_TLV_HDR_SIZE + + (params->chan_list.num_chan * sizeof(uint32_t)); + + if (params->num_ssids > WLAN_SCAN_MAX_NUM_SSID) { + WMI_LOGE("Invalid value for num_ssids %d", params->num_ssids); + goto error; + } + + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_FIXED_STRUC, + (params->num_ssids * sizeof(wmi_ssid))); + + if (params->num_ssids) { + ssid = (wmi_ssid *) (buf_ptr + WMI_TLV_HDR_SIZE); + for (i = 0; i < params->num_ssids; ++i) { + ssid->ssid_len = params->ssid[i].length; + qdf_mem_copy(ssid->ssid, params->ssid[i].ssid, + params->ssid[i].length); + ssid++; + } + } + buf_ptr += WMI_TLV_HDR_SIZE + (params->num_ssids * sizeof(wmi_ssid)); + + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_FIXED_STRUC, + (params->num_bssid * sizeof(wmi_mac_addr))); + bssid = (wmi_mac_addr *) (buf_ptr + WMI_TLV_HDR_SIZE); + + if (params->num_bssid) { + for (i = 0; i < params->num_bssid; ++i) { + WMI_CHAR_ARRAY_TO_MAC_ADDR( + ¶ms->bssid_list[i].bytes[0], bssid); + bssid++; + } + } + + buf_ptr += WMI_TLV_HDR_SIZE + + (params->num_bssid * sizeof(wmi_mac_addr)); + + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_BYTE, extraie_len_with_pad); + if (params->extraie.len) + scan_copy_ie_buffer(buf_ptr + WMI_TLV_HDR_SIZE, + params); + + buf_ptr += WMI_TLV_HDR_SIZE + extraie_len_with_pad; + + /* probe req ie whitelisting */ + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, + ie_whitelist->num_vendor_oui * sizeof(wmi_vendor_oui)); + + buf_ptr += WMI_TLV_HDR_SIZE; + + if (cmd->num_vendor_oui) { + wmi_fill_vendor_oui(buf_ptr, cmd->num_vendor_oui, + ie_whitelist->voui); + buf_ptr += cmd->num_vendor_oui * sizeof(wmi_vendor_oui); + } + + /* Add phy mode TLV if it's a wide band scan */ + if (params->scan_f_wide_band) { + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_BYTE, phymode_roundup); + buf_ptr = (uint8_t *) (buf_ptr + WMI_TLV_HDR_SIZE); + for (i = 0; i < params->chan_list.num_chan; ++i) + buf_ptr[i] = + WMI_SCAN_CHAN_SET_MODE(params->chan_list.chan[i].phymode); + buf_ptr += phymode_roundup; + } else { + /* Add ZERO legth phy mode TLV */ + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_BYTE, 0); + } + + wmi_mtrace(WMI_START_SCAN_CMDID, cmd->vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, wmi_buf, + len, WMI_START_SCAN_CMDID); + if (ret) { + WMI_LOGE("%s: Failed to start scan: %d", __func__, ret); + wmi_buf_free(wmi_buf); + } + return ret; +error: + wmi_buf_free(wmi_buf); + return QDF_STATUS_E_FAILURE; +} + +/** + * send_scan_stop_cmd_tlv() - WMI scan start function + * @param wmi_handle : handle to WMI. + * @param param : pointer to hold scan cancel cmd parameter + * + * Return: 0 on success and -ve on failure. + */ +static QDF_STATUS send_scan_stop_cmd_tlv(wmi_unified_t wmi_handle, + struct scan_cancel_param *param) +{ + wmi_stop_scan_cmd_fixed_param *cmd; + int ret; + int len = sizeof(*cmd); + wmi_buf_t wmi_buf; + + /* Allocate the memory */ + wmi_buf = wmi_buf_alloc(wmi_handle, len); + if (!wmi_buf) { + WMI_LOGP("%s: failed to allocate memory for stop scan cmd", + __func__); + ret = QDF_STATUS_E_NOMEM; + goto error; + } + + cmd = (wmi_stop_scan_cmd_fixed_param *) wmi_buf_data(wmi_buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_stop_scan_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN(wmi_stop_scan_cmd_fixed_param)); + cmd->vdev_id = param->vdev_id; + cmd->requestor = param->requester; + cmd->scan_id = param->scan_id; + cmd->pdev_id = wmi_handle->ops->convert_pdev_id_host_to_target( + param->pdev_id); + /* stop the scan with the corresponding scan_id */ + if (param->req_type == WLAN_SCAN_CANCEL_PDEV_ALL) { + /* Cancelling all scans */ + cmd->req_type = WMI_SCAN_STOP_ALL; + } else if (param->req_type == WLAN_SCAN_CANCEL_VDEV_ALL) { + /* Cancelling VAP scans */ + cmd->req_type = WMI_SCN_STOP_VAP_ALL; + } else if (param->req_type == WLAN_SCAN_CANCEL_SINGLE) { + /* Cancelling specific scan */ + cmd->req_type = WMI_SCAN_STOP_ONE; + } else { + WMI_LOGE("%s: Invalid Command : ", __func__); + wmi_buf_free(wmi_buf); + return QDF_STATUS_E_INVAL; + } + + wmi_mtrace(WMI_STOP_SCAN_CMDID, cmd->vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, wmi_buf, + len, WMI_STOP_SCAN_CMDID); + if (ret) { + WMI_LOGE("%s: Failed to send stop scan: %d", __func__, ret); + wmi_buf_free(wmi_buf); + } + +error: + return ret; +} + +#ifdef CONFIG_MCL +/** + * send_scan_chan_list_cmd_tlv() - WMI scan channel list function + * @param wmi_handle : handle to WMI. + * @param param : pointer to hold scan channel list parameter + * + * Return: 0 on success and -ve on failure. + */ +static QDF_STATUS send_scan_chan_list_cmd_tlv(wmi_unified_t wmi_handle, + struct scan_chan_list_params *chan_list) +{ + wmi_buf_t buf; + QDF_STATUS qdf_status; + wmi_scan_chan_list_cmd_fixed_param *cmd; + int i; + uint8_t *buf_ptr; + wmi_channel_param *chan_info, *tchan_info; + uint16_t len = sizeof(*cmd) + WMI_TLV_HDR_SIZE; + + len += sizeof(wmi_channel) * chan_list->num_scan_chans; + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("Failed to allocate memory"); + qdf_status = QDF_STATUS_E_NOMEM; + goto end; + } + + buf_ptr = (uint8_t *) wmi_buf_data(buf); + cmd = (wmi_scan_chan_list_cmd_fixed_param *) buf_ptr; + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_scan_chan_list_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_scan_chan_list_cmd_fixed_param)); + + WMI_LOGD("no of channels = %d, len = %d", chan_list->num_scan_chans, len); + + cmd->num_scan_chans = chan_list->num_scan_chans; + WMITLV_SET_HDR((buf_ptr + sizeof(wmi_scan_chan_list_cmd_fixed_param)), + WMITLV_TAG_ARRAY_STRUC, + sizeof(wmi_channel) * chan_list->num_scan_chans); + chan_info = (wmi_channel_param *) + (buf_ptr + sizeof(*cmd) + WMI_TLV_HDR_SIZE); + tchan_info = chan_list->chan_info; + + for (i = 0; i < chan_list->num_scan_chans; ++i) { + WMITLV_SET_HDR(&chan_info->tlv_header, + WMITLV_TAG_STRUC_wmi_channel, + WMITLV_GET_STRUCT_TLVLEN(wmi_channel)); + chan_info->mhz = tchan_info->mhz; + chan_info->band_center_freq1 = + tchan_info->band_center_freq1; + chan_info->band_center_freq2 = + tchan_info->band_center_freq2; + chan_info->info = tchan_info->info; + chan_info->reg_info_1 = tchan_info->reg_info_1; + chan_info->reg_info_2 = tchan_info->reg_info_2; + WMI_LOGD("chan[%d] = %u", i, chan_info->mhz); + + /*TODO: Set WMI_SET_CHANNEL_MIN_POWER */ + /*TODO: Set WMI_SET_CHANNEL_ANTENNA_MAX */ + /*TODO: WMI_SET_CHANNEL_REG_CLASSID */ + tchan_info++; + chan_info++; + } + cmd->pdev_id = wmi_handle->ops->convert_pdev_id_host_to_target( + chan_list->pdev_id); + + wmi_mtrace(WMI_SCAN_CHAN_LIST_CMDID, NO_SESSION, 0); + qdf_status = wmi_unified_cmd_send(wmi_handle, + buf, len, WMI_SCAN_CHAN_LIST_CMDID); + + if (QDF_IS_STATUS_ERROR(qdf_status)) { + WMI_LOGE("Failed to send WMI_SCAN_CHAN_LIST_CMDID"); + wmi_buf_free(buf); + } + +end: + return qdf_status; +} +#else +static QDF_STATUS send_scan_chan_list_cmd_tlv(wmi_unified_t wmi_handle, + struct scan_chan_list_params *chan_list) +{ + wmi_buf_t buf; + QDF_STATUS qdf_status; + wmi_scan_chan_list_cmd_fixed_param *cmd; + int i; + uint8_t *buf_ptr; + wmi_channel *chan_info; + struct channel_param *tchan_info; + uint16_t len = sizeof(*cmd) + WMI_TLV_HDR_SIZE; + + len += sizeof(wmi_channel) * chan_list->nallchans; + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("Failed to allocate memory"); + qdf_status = QDF_STATUS_E_NOMEM; + goto end; + } + + buf_ptr = (uint8_t *) wmi_buf_data(buf); + cmd = (wmi_scan_chan_list_cmd_fixed_param *) buf_ptr; + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_scan_chan_list_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_scan_chan_list_cmd_fixed_param)); + + WMI_LOGD("no of channels = %d, len = %d", chan_list->nallchans, len); + + if (chan_list->append) + cmd->flags |= APPEND_TO_EXISTING_CHAN_LIST; + + cmd->pdev_id = wmi_handle->ops->convert_pdev_id_host_to_target( + chan_list->pdev_id); + cmd->num_scan_chans = chan_list->nallchans; + WMITLV_SET_HDR((buf_ptr + sizeof(wmi_scan_chan_list_cmd_fixed_param)), + WMITLV_TAG_ARRAY_STRUC, + sizeof(wmi_channel) * chan_list->nallchans); + chan_info = (wmi_channel *) (buf_ptr + sizeof(*cmd) + WMI_TLV_HDR_SIZE); + tchan_info = &(chan_list->ch_param[0]); + + for (i = 0; i < chan_list->nallchans; ++i) { + WMITLV_SET_HDR(&chan_info->tlv_header, + WMITLV_TAG_STRUC_wmi_channel, + WMITLV_GET_STRUCT_TLVLEN(wmi_channel)); + chan_info->mhz = tchan_info->mhz; + chan_info->band_center_freq1 = + tchan_info->cfreq1; + chan_info->band_center_freq2 = + tchan_info->cfreq2; + + if (tchan_info->is_chan_passive) + WMI_SET_CHANNEL_FLAG(chan_info, + WMI_CHAN_FLAG_PASSIVE); + + if (tchan_info->allow_vht) + WMI_SET_CHANNEL_FLAG(chan_info, + WMI_CHAN_FLAG_ALLOW_VHT); + else if (tchan_info->allow_ht) + WMI_SET_CHANNEL_FLAG(chan_info, + WMI_CHAN_FLAG_ALLOW_HT); + WMI_SET_CHANNEL_MODE(chan_info, + tchan_info->phy_mode); + + if (tchan_info->half_rate) + WMI_SET_CHANNEL_FLAG(chan_info, + WMI_CHAN_FLAG_HALF_RATE); + + if (tchan_info->quarter_rate) + WMI_SET_CHANNEL_FLAG(chan_info, + WMI_CHAN_FLAG_QUARTER_RATE); + + /* also fill in power information */ + WMI_SET_CHANNEL_MIN_POWER(chan_info, + tchan_info->minpower); + WMI_SET_CHANNEL_MAX_POWER(chan_info, + tchan_info->maxpower); + WMI_SET_CHANNEL_REG_POWER(chan_info, + tchan_info->maxregpower); + WMI_SET_CHANNEL_ANTENNA_MAX(chan_info, + tchan_info->antennamax); + WMI_SET_CHANNEL_REG_CLASSID(chan_info, + tchan_info->reg_class_id); + WMI_SET_CHANNEL_MAX_TX_POWER(chan_info, + tchan_info->maxregpower); + + WMI_LOGD("chan[%d] = %u", i, chan_info->mhz); + + tchan_info++; + chan_info++; + } + cmd->pdev_id = wmi_handle->ops->convert_pdev_id_host_to_target( + chan_list->pdev_id); + + wmi_mtrace(WMI_SCAN_CHAN_LIST_CMDID, cmd->pdev_id, 0); + qdf_status = wmi_unified_cmd_send( + wmi_handle, + buf, len, WMI_SCAN_CHAN_LIST_CMDID); + + if (QDF_IS_STATUS_ERROR(qdf_status)) { + WMI_LOGE("Failed to send WMI_SCAN_CHAN_LIST_CMDID"); + wmi_buf_free(buf); + } + +end: + return qdf_status; +} +#endif + +/** + * populate_tx_send_params - Populate TX param TLV for mgmt and offchan tx + * + * @bufp: Pointer to buffer + * @param: Pointer to tx param + * + * Return: QDF_STATUS_SUCCESS for success and QDF_STATUS_E_FAILURE for failure + */ +static inline QDF_STATUS populate_tx_send_params(uint8_t *bufp, + struct tx_send_params param) +{ + wmi_tx_send_params *tx_param; + QDF_STATUS status = QDF_STATUS_SUCCESS; + + if (!bufp) { + status = QDF_STATUS_E_FAILURE; + return status; + } + tx_param = (wmi_tx_send_params *)bufp; + WMITLV_SET_HDR(&tx_param->tlv_header, + WMITLV_TAG_STRUC_wmi_tx_send_params, + WMITLV_GET_STRUCT_TLVLEN(wmi_tx_send_params)); + WMI_TX_SEND_PARAM_PWR_SET(tx_param->tx_param_dword0, param.pwr); + WMI_TX_SEND_PARAM_MCS_MASK_SET(tx_param->tx_param_dword0, + param.mcs_mask); + WMI_TX_SEND_PARAM_NSS_MASK_SET(tx_param->tx_param_dword0, + param.nss_mask); + WMI_TX_SEND_PARAM_RETRY_LIMIT_SET(tx_param->tx_param_dword0, + param.retry_limit); + WMI_TX_SEND_PARAM_CHAIN_MASK_SET(tx_param->tx_param_dword1, + param.chain_mask); + WMI_TX_SEND_PARAM_BW_MASK_SET(tx_param->tx_param_dword1, + param.bw_mask); + WMI_TX_SEND_PARAM_PREAMBLE_SET(tx_param->tx_param_dword1, + param.preamble_type); + WMI_TX_SEND_PARAM_FRAME_TYPE_SET(tx_param->tx_param_dword1, + param.frame_type); + + return status; +} + +/** + * send_mgmt_cmd_tlv() - WMI scan start function + * @wmi_handle : handle to WMI. + * @param : pointer to hold mgmt cmd parameter + * + * Return: 0 on success and -ve on failure. + */ +static QDF_STATUS send_mgmt_cmd_tlv(wmi_unified_t wmi_handle, + struct wmi_mgmt_params *param) +{ + wmi_buf_t buf; + wmi_mgmt_tx_send_cmd_fixed_param *cmd; + int32_t cmd_len; + uint64_t dma_addr; + void *qdf_ctx = param->qdf_ctx; + uint8_t *bufp; + QDF_STATUS status = QDF_STATUS_SUCCESS; + int32_t bufp_len = (param->frm_len < mgmt_tx_dl_frm_len) ? param->frm_len : + mgmt_tx_dl_frm_len; + + cmd_len = sizeof(wmi_mgmt_tx_send_cmd_fixed_param) + + WMI_TLV_HDR_SIZE + + roundup(bufp_len, sizeof(uint32_t)); + + buf = wmi_buf_alloc(wmi_handle, sizeof(wmi_tx_send_params) + cmd_len); + if (!buf) { + WMI_LOGE("%s:wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_NOMEM; + } + + cmd = (wmi_mgmt_tx_send_cmd_fixed_param *)wmi_buf_data(buf); + bufp = (uint8_t *) cmd; + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_mgmt_tx_send_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_mgmt_tx_send_cmd_fixed_param)); + + cmd->vdev_id = param->vdev_id; + + cmd->desc_id = param->desc_id; + cmd->chanfreq = param->chanfreq; + bufp += sizeof(wmi_mgmt_tx_send_cmd_fixed_param); + WMITLV_SET_HDR(bufp, WMITLV_TAG_ARRAY_BYTE, roundup(bufp_len, + sizeof(uint32_t))); + bufp += WMI_TLV_HDR_SIZE; + qdf_mem_copy(bufp, param->pdata, bufp_len); + + status = qdf_nbuf_map_single(qdf_ctx, param->tx_frame, + QDF_DMA_TO_DEVICE); + if (status != QDF_STATUS_SUCCESS) { + WMI_LOGE("%s: wmi buf map failed", __func__); + goto free_buf; + } + + dma_addr = qdf_nbuf_get_frag_paddr(param->tx_frame, 0); + cmd->paddr_lo = (uint32_t)(dma_addr & 0xffffffff); +#if defined(HTT_PADDR64) + cmd->paddr_hi = (uint32_t)((dma_addr >> 32) & 0x1F); +#endif + cmd->frame_len = param->frm_len; + cmd->buf_len = bufp_len; + cmd->tx_params_valid = param->tx_params_valid; + + wmi_mgmt_cmd_record(wmi_handle, WMI_MGMT_TX_SEND_CMDID, + bufp, cmd->vdev_id, cmd->chanfreq); + + bufp += roundup(bufp_len, sizeof(uint32_t)); + if (param->tx_params_valid) { + status = populate_tx_send_params(bufp, param->tx_param); + if (status != QDF_STATUS_SUCCESS) { + WMI_LOGE("%s: Populate TX send params failed", + __func__); + goto unmap_tx_frame; + } + cmd_len += sizeof(wmi_tx_send_params); + } + + wmi_mtrace(WMI_MGMT_TX_SEND_CMDID, cmd->vdev_id, 0); + if (wmi_unified_cmd_send(wmi_handle, buf, cmd_len, + WMI_MGMT_TX_SEND_CMDID)) { + WMI_LOGE("%s: Failed to send mgmt Tx", __func__); + goto unmap_tx_frame; + } + return QDF_STATUS_SUCCESS; + +unmap_tx_frame: + qdf_nbuf_unmap_single(qdf_ctx, param->tx_frame, + QDF_DMA_TO_DEVICE); +free_buf: + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; +} + +/** + * send_offchan_data_tx_send_cmd_tlv() - Send off-chan tx data + * @wmi_handle : handle to WMI. + * @param : pointer to offchan data tx cmd parameter + * + * Return: QDF_STATUS_SUCCESS on success and error on failure. + */ +static QDF_STATUS send_offchan_data_tx_cmd_tlv(wmi_unified_t wmi_handle, + struct wmi_offchan_data_tx_params *param) +{ + wmi_buf_t buf; + wmi_offchan_data_tx_send_cmd_fixed_param *cmd; + int32_t cmd_len; + uint64_t dma_addr; + void *qdf_ctx = param->qdf_ctx; + uint8_t *bufp; + int32_t bufp_len = (param->frm_len < mgmt_tx_dl_frm_len) ? + param->frm_len : mgmt_tx_dl_frm_len; + QDF_STATUS status = QDF_STATUS_SUCCESS; + + cmd_len = sizeof(wmi_offchan_data_tx_send_cmd_fixed_param) + + WMI_TLV_HDR_SIZE + + roundup(bufp_len, sizeof(uint32_t)); + + buf = wmi_buf_alloc(wmi_handle, sizeof(wmi_tx_send_params) + cmd_len); + if (!buf) { + WMI_LOGE("%s:wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_NOMEM; + } + + cmd = (wmi_offchan_data_tx_send_cmd_fixed_param *) wmi_buf_data(buf); + bufp = (uint8_t *) cmd; + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_offchan_data_tx_send_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_offchan_data_tx_send_cmd_fixed_param)); + + cmd->vdev_id = param->vdev_id; + + cmd->desc_id = param->desc_id; + cmd->chanfreq = param->chanfreq; + bufp += sizeof(wmi_offchan_data_tx_send_cmd_fixed_param); + WMITLV_SET_HDR(bufp, WMITLV_TAG_ARRAY_BYTE, roundup(bufp_len, + sizeof(uint32_t))); + bufp += WMI_TLV_HDR_SIZE; + qdf_mem_copy(bufp, param->pdata, bufp_len); + qdf_nbuf_map_single(qdf_ctx, param->tx_frame, QDF_DMA_TO_DEVICE); + dma_addr = qdf_nbuf_get_frag_paddr(param->tx_frame, 0); + cmd->paddr_lo = (uint32_t)(dma_addr & 0xffffffff); +#if defined(HTT_PADDR64) + cmd->paddr_hi = (uint32_t)((dma_addr >> 32) & 0x1F); +#endif + cmd->frame_len = param->frm_len; + cmd->buf_len = bufp_len; + cmd->tx_params_valid = param->tx_params_valid; + + wmi_mgmt_cmd_record(wmi_handle, WMI_OFFCHAN_DATA_TX_SEND_CMDID, + bufp, cmd->vdev_id, cmd->chanfreq); + + bufp += roundup(bufp_len, sizeof(uint32_t)); + if (param->tx_params_valid) { + status = populate_tx_send_params(bufp, param->tx_param); + if (status != QDF_STATUS_SUCCESS) { + WMI_LOGE("%s: Populate TX send params failed", + __func__); + goto err1; + } + cmd_len += sizeof(wmi_tx_send_params); + } + + wmi_mtrace(WMI_OFFCHAN_DATA_TX_SEND_CMDID, cmd->vdev_id, 0); + if (wmi_unified_cmd_send(wmi_handle, buf, cmd_len, + WMI_OFFCHAN_DATA_TX_SEND_CMDID)) { + WMI_LOGE("%s: Failed to offchan data Tx", __func__); + goto err1; + } + + return QDF_STATUS_SUCCESS; + +err1: + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; +} + +/** + * send_modem_power_state_cmd_tlv() - set modem power state to fw + * @wmi_handle: wmi handle + * @param_value: parameter value + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS send_modem_power_state_cmd_tlv(wmi_unified_t wmi_handle, + uint32_t param_value) +{ + QDF_STATUS ret; + wmi_modem_power_state_cmd_param *cmd; + wmi_buf_t buf; + uint16_t len = sizeof(*cmd); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s:wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_NOMEM; + } + cmd = (wmi_modem_power_state_cmd_param *) wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_modem_power_state_cmd_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_modem_power_state_cmd_param)); + cmd->modem_power_state = param_value; + WMI_LOGD("%s: Setting cmd->modem_power_state = %u", __func__, + param_value); + wmi_mtrace(WMI_MODEM_POWER_STATE_CMDID, NO_SESSION, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_MODEM_POWER_STATE_CMDID); + if (QDF_IS_STATUS_ERROR(ret)) { + WMI_LOGE("Failed to send notify cmd ret = %d", ret); + wmi_buf_free(buf); + } + + return ret; +} + +/** + * send_set_sta_ps_mode_cmd_tlv() - set sta powersave mode in fw + * @wmi_handle: wmi handle + * @vdev_id: vdev id + * @val: value + * + * Return: QDF_STATUS_SUCCESS for success or error code. + */ +static QDF_STATUS send_set_sta_ps_mode_cmd_tlv(wmi_unified_t wmi_handle, + uint32_t vdev_id, uint8_t val) +{ + wmi_sta_powersave_mode_cmd_fixed_param *cmd; + wmi_buf_t buf; + int32_t len = sizeof(*cmd); + + WMI_LOGD("Set Sta Mode Ps vdevId %d val %d", vdev_id, val); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGP("%s: Set Sta Mode Ps Mem Alloc Failed", __func__); + return QDF_STATUS_E_NOMEM; + } + cmd = (wmi_sta_powersave_mode_cmd_fixed_param *) wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_sta_powersave_mode_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_sta_powersave_mode_cmd_fixed_param)); + cmd->vdev_id = vdev_id; + if (val) + cmd->sta_ps_mode = WMI_STA_PS_MODE_ENABLED; + else + cmd->sta_ps_mode = WMI_STA_PS_MODE_DISABLED; + + wmi_mtrace(WMI_STA_POWERSAVE_MODE_CMDID, cmd->vdev_id, 0); + if (wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_STA_POWERSAVE_MODE_CMDID)) { + WMI_LOGE("Set Sta Mode Ps Failed vdevId %d val %d", + vdev_id, val); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + return 0; +} + +/** + * send_set_mimops_cmd_tlv() - set MIMO powersave + * @wmi_handle: wmi handle + * @vdev_id: vdev id + * @value: value + * + * Return: QDF_STATUS_SUCCESS for success or error code. + */ +static QDF_STATUS send_set_mimops_cmd_tlv(wmi_unified_t wmi_handle, + uint8_t vdev_id, int value) +{ + QDF_STATUS ret; + wmi_sta_smps_force_mode_cmd_fixed_param *cmd; + wmi_buf_t buf; + uint16_t len = sizeof(*cmd); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s:wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_NOMEM; + } + cmd = (wmi_sta_smps_force_mode_cmd_fixed_param *) wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_sta_smps_force_mode_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_sta_smps_force_mode_cmd_fixed_param)); + + cmd->vdev_id = vdev_id; + + /* WMI_SMPS_FORCED_MODE values do not directly map + * to SM power save values defined in the specification. + * Make sure to send the right mapping. + */ + switch (value) { + case 0: + cmd->forced_mode = WMI_SMPS_FORCED_MODE_NONE; + break; + case 1: + cmd->forced_mode = WMI_SMPS_FORCED_MODE_DISABLED; + break; + case 2: + cmd->forced_mode = WMI_SMPS_FORCED_MODE_STATIC; + break; + case 3: + cmd->forced_mode = WMI_SMPS_FORCED_MODE_DYNAMIC; + break; + default: + WMI_LOGE("%s:INVALID Mimo PS CONFIG", __func__); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + WMI_LOGD("Setting vdev %d value = %u", vdev_id, value); + + wmi_mtrace(WMI_STA_SMPS_FORCE_MODE_CMDID, cmd->vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_STA_SMPS_FORCE_MODE_CMDID); + if (QDF_IS_STATUS_ERROR(ret)) { + WMI_LOGE("Failed to send set Mimo PS ret = %d", ret); + wmi_buf_free(buf); + } + + return ret; +} + +/** + * send_set_smps_params_cmd_tlv() - set smps params + * @wmi_handle: wmi handle + * @vdev_id: vdev id + * @value: value + * + * Return: QDF_STATUS_SUCCESS for success or error code. + */ +static QDF_STATUS send_set_smps_params_cmd_tlv(wmi_unified_t wmi_handle, uint8_t vdev_id, + int value) +{ + QDF_STATUS ret; + wmi_sta_smps_param_cmd_fixed_param *cmd; + wmi_buf_t buf; + uint16_t len = sizeof(*cmd); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s:wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_NOMEM; + } + cmd = (wmi_sta_smps_param_cmd_fixed_param *) wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_sta_smps_param_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_sta_smps_param_cmd_fixed_param)); + + cmd->vdev_id = vdev_id; + cmd->value = value & WMI_SMPS_MASK_LOWER_16BITS; + cmd->param = + (value >> WMI_SMPS_PARAM_VALUE_S) & WMI_SMPS_MASK_UPPER_3BITS; + + WMI_LOGD("Setting vdev %d value = %x param %x", vdev_id, cmd->value, + cmd->param); + + wmi_mtrace(WMI_STA_SMPS_PARAM_CMDID, cmd->vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_STA_SMPS_PARAM_CMDID); + if (QDF_IS_STATUS_ERROR(ret)) { + WMI_LOGE("Failed to send set Mimo PS ret = %d", ret); + wmi_buf_free(buf); + } + + return ret; +} + +/** + * send_set_p2pgo_noa_req_cmd_tlv() - send p2p go noa request to fw + * @wmi_handle: wmi handle + * @noa: p2p power save parameters + * + * Return: CDF status + */ +static QDF_STATUS send_set_p2pgo_noa_req_cmd_tlv(wmi_unified_t wmi_handle, + struct p2p_ps_params *noa) +{ + wmi_p2p_set_noa_cmd_fixed_param *cmd; + wmi_p2p_noa_descriptor *noa_discriptor; + wmi_buf_t buf; + uint8_t *buf_ptr; + uint16_t len; + QDF_STATUS status; + uint32_t duration; + + WMI_LOGD("%s: Enter", __func__); + len = sizeof(*cmd) + WMI_TLV_HDR_SIZE + sizeof(*noa_discriptor); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("Failed to allocate memory"); + status = QDF_STATUS_E_FAILURE; + goto end; + } + + buf_ptr = (uint8_t *) wmi_buf_data(buf); + cmd = (wmi_p2p_set_noa_cmd_fixed_param *) buf_ptr; + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_p2p_set_noa_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_p2p_set_noa_cmd_fixed_param)); + duration = (noa->count == 1) ? noa->single_noa_duration : noa->duration; + cmd->vdev_id = noa->session_id; + cmd->enable = (duration) ? true : false; + cmd->num_noa = 1; + + WMITLV_SET_HDR((buf_ptr + sizeof(wmi_p2p_set_noa_cmd_fixed_param)), + WMITLV_TAG_ARRAY_STRUC, sizeof(wmi_p2p_noa_descriptor)); + noa_discriptor = (wmi_p2p_noa_descriptor *) (buf_ptr + + sizeof + (wmi_p2p_set_noa_cmd_fixed_param) + + WMI_TLV_HDR_SIZE); + WMITLV_SET_HDR(&noa_discriptor->tlv_header, + WMITLV_TAG_STRUC_wmi_p2p_noa_descriptor, + WMITLV_GET_STRUCT_TLVLEN(wmi_p2p_noa_descriptor)); + noa_discriptor->type_count = noa->count; + noa_discriptor->duration = duration; + noa_discriptor->interval = noa->interval; + noa_discriptor->start_time = 0; + + WMI_LOGI("SET P2P GO NOA:vdev_id:%d count:%d duration:%d interval:%d", + cmd->vdev_id, noa->count, noa_discriptor->duration, + noa->interval); + wmi_mtrace(WMI_FWTEST_P2P_SET_NOA_PARAM_CMDID, cmd->vdev_id, 0); + status = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_FWTEST_P2P_SET_NOA_PARAM_CMDID); + if (QDF_IS_STATUS_ERROR(status)) { + WMI_LOGE("Failed to send WMI_FWTEST_P2P_SET_NOA_PARAM_CMDID"); + wmi_buf_free(buf); + } + +end: + WMI_LOGD("%s: Exit", __func__); + return status; +} + + +/** + * send_set_p2pgo_oppps_req_cmd_tlv() - send p2p go opp power save request to fw + * @wmi_handle: wmi handle + * @noa: p2p opp power save parameters + * + * Return: CDF status + */ +static QDF_STATUS send_set_p2pgo_oppps_req_cmd_tlv(wmi_unified_t wmi_handle, + struct p2p_ps_params *oppps) +{ + wmi_p2p_set_oppps_cmd_fixed_param *cmd; + wmi_buf_t buf; + QDF_STATUS status; + + WMI_LOGD("%s: Enter", __func__); + buf = wmi_buf_alloc(wmi_handle, sizeof(*cmd)); + if (!buf) { + WMI_LOGE("Failed to allocate memory"); + status = QDF_STATUS_E_FAILURE; + goto end; + } + + cmd = (wmi_p2p_set_oppps_cmd_fixed_param *) wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_p2p_set_oppps_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_p2p_set_oppps_cmd_fixed_param)); + cmd->vdev_id = oppps->session_id; + if (oppps->ctwindow) + WMI_UNIFIED_OPPPS_ATTR_ENABLED_SET(cmd); + + WMI_UNIFIED_OPPPS_ATTR_CTWIN_SET(cmd, oppps->ctwindow); + WMI_LOGI("SET P2P GO OPPPS:vdev_id:%d ctwindow:%d", + cmd->vdev_id, oppps->ctwindow); + wmi_mtrace(WMI_P2P_SET_OPPPS_PARAM_CMDID, cmd->vdev_id, 0); + status = wmi_unified_cmd_send(wmi_handle, buf, sizeof(*cmd), + WMI_P2P_SET_OPPPS_PARAM_CMDID); + if (QDF_IS_STATUS_ERROR(status)) { + WMI_LOGE("Failed to send WMI_P2P_SET_OPPPS_PARAM_CMDID"); + wmi_buf_free(buf); + } + +end: + WMI_LOGD("%s: Exit", __func__); + return status; +} + +#ifdef CONVERGED_P2P_ENABLE +/** + * send_p2p_lo_start_cmd_tlv() - send p2p lo start request to fw + * @wmi_handle: wmi handle + * @param: p2p listen offload start parameters + * + * Return: QDF status + */ +static QDF_STATUS send_p2p_lo_start_cmd_tlv(wmi_unified_t wmi_handle, + struct p2p_lo_start *param) +{ + wmi_buf_t buf; + wmi_p2p_lo_start_cmd_fixed_param *cmd; + int32_t len = sizeof(*cmd); + uint8_t *buf_ptr; + QDF_STATUS status; + int device_types_len_aligned; + int probe_resp_len_aligned; + + if (!param) { + WMI_LOGE("lo start param is null"); + return QDF_STATUS_E_INVAL; + } + + WMI_LOGD("%s: vdev_id:%d", __func__, param->vdev_id); + + device_types_len_aligned = + qdf_roundup(param->dev_types_len, + sizeof(uint32_t)); + probe_resp_len_aligned = + qdf_roundup(param->probe_resp_len, + sizeof(uint32_t)); + + len += 2 * WMI_TLV_HDR_SIZE + device_types_len_aligned + + probe_resp_len_aligned; + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s: Failed to allocate memory for p2p lo start", + __func__); + return QDF_STATUS_E_NOMEM; + } + + cmd = (wmi_p2p_lo_start_cmd_fixed_param *)wmi_buf_data(buf); + buf_ptr = (uint8_t *) wmi_buf_data(buf); + + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_p2p_lo_start_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_p2p_lo_start_cmd_fixed_param)); + + cmd->vdev_id = param->vdev_id; + cmd->ctl_flags = param->ctl_flags; + cmd->channel = param->freq; + cmd->period = param->period; + cmd->interval = param->interval; + cmd->count = param->count; + cmd->device_types_len = param->dev_types_len; + cmd->prob_resp_len = param->probe_resp_len; + + buf_ptr += sizeof(wmi_p2p_lo_start_cmd_fixed_param); + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_BYTE, + device_types_len_aligned); + buf_ptr += WMI_TLV_HDR_SIZE; + qdf_mem_copy(buf_ptr, param->device_types, + param->dev_types_len); + + buf_ptr += device_types_len_aligned; + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_BYTE, + probe_resp_len_aligned); + buf_ptr += WMI_TLV_HDR_SIZE; + qdf_mem_copy(buf_ptr, param->probe_resp_tmplt, + param->probe_resp_len); + + WMI_LOGD("%s: Sending WMI_P2P_LO_START command, channel=%d, period=%d, interval=%d, count=%d", __func__, + cmd->channel, cmd->period, cmd->interval, cmd->count); + + wmi_mtrace(WMI_P2P_LISTEN_OFFLOAD_START_CMDID, cmd->vdev_id, 0); + status = wmi_unified_cmd_send(wmi_handle, + buf, len, + WMI_P2P_LISTEN_OFFLOAD_START_CMDID); + if (status != QDF_STATUS_SUCCESS) { + WMI_LOGE("%s: Failed to send p2p lo start: %d", + __func__, status); + wmi_buf_free(buf); + return status; + } + + WMI_LOGD("%s: Successfully sent WMI_P2P_LO_START", __func__); + + return QDF_STATUS_SUCCESS; +} + +/** + * send_p2p_lo_stop_cmd_tlv() - send p2p lo stop request to fw + * @wmi_handle: wmi handle + * @param: p2p listen offload stop parameters + * + * Return: QDF status + */ +static QDF_STATUS send_p2p_lo_stop_cmd_tlv(wmi_unified_t wmi_handle, + uint8_t vdev_id) +{ + wmi_buf_t buf; + wmi_p2p_lo_stop_cmd_fixed_param *cmd; + int32_t len; + QDF_STATUS status; + + WMI_LOGD("%s: vdev_id:%d", __func__, vdev_id); + + len = sizeof(*cmd); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + qdf_print("%s: Failed to allocate memory for p2p lo stop", + __func__); + return QDF_STATUS_E_NOMEM; + } + cmd = (wmi_p2p_lo_stop_cmd_fixed_param *)wmi_buf_data(buf); + + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_p2p_lo_stop_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_p2p_lo_stop_cmd_fixed_param)); + + cmd->vdev_id = vdev_id; + + WMI_LOGD("%s: Sending WMI_P2P_LO_STOP command", __func__); + + wmi_mtrace(WMI_P2P_LISTEN_OFFLOAD_STOP_CMDID, cmd->vdev_id, 0); + status = wmi_unified_cmd_send(wmi_handle, + buf, len, + WMI_P2P_LISTEN_OFFLOAD_STOP_CMDID); + if (status != QDF_STATUS_SUCCESS) { + WMI_LOGE("%s: Failed to send p2p lo stop: %d", + __func__, status); + wmi_buf_free(buf); + return status; + } + + WMI_LOGD("%s: Successfully sent WMI_P2P_LO_STOP", __func__); + + return QDF_STATUS_SUCCESS; +} +#endif /* End of CONVERGED_P2P_ENABLE */ + +/** + * send_get_temperature_cmd_tlv() - get pdev temperature req + * @wmi_handle: wmi handle + * + * Return: QDF_STATUS_SUCCESS for success or error code. + */ +static QDF_STATUS send_get_temperature_cmd_tlv(wmi_unified_t wmi_handle) +{ + wmi_pdev_get_temperature_cmd_fixed_param *cmd; + wmi_buf_t wmi_buf; + uint32_t len = sizeof(wmi_pdev_get_temperature_cmd_fixed_param); + uint8_t *buf_ptr; + + if (!wmi_handle) { + WMI_LOGE(FL("WMI is closed, can not issue cmd")); + return QDF_STATUS_E_INVAL; + } + + wmi_buf = wmi_buf_alloc(wmi_handle, len); + if (!wmi_buf) { + WMI_LOGE(FL("wmi_buf_alloc failed")); + return QDF_STATUS_E_NOMEM; + } + + buf_ptr = (uint8_t *) wmi_buf_data(wmi_buf); + + cmd = (wmi_pdev_get_temperature_cmd_fixed_param *) buf_ptr; + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_pdev_get_temperature_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_pdev_get_temperature_cmd_fixed_param)); + + wmi_mtrace(WMI_PDEV_GET_TEMPERATURE_CMDID, NO_SESSION, 0); + if (wmi_unified_cmd_send(wmi_handle, wmi_buf, len, + WMI_PDEV_GET_TEMPERATURE_CMDID)) { + WMI_LOGE(FL("failed to send get temperature command")); + wmi_buf_free(wmi_buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * send_set_sta_uapsd_auto_trig_cmd_tlv() - set uapsd auto trigger command + * @wmi_handle: wmi handle + * @vdevid: vdev id + * @peer_addr: peer mac address + * @auto_triggerparam: auto trigger parameters + * @num_ac: number of access category + * + * This function sets the trigger + * uapsd params such as service interval, delay interval + * and suspend interval which will be used by the firmware + * to send trigger frames periodically when there is no + * traffic on the transmit side. + * + * Return: QDF_STATUS_SUCCESS for success or error code. + */ +static QDF_STATUS send_set_sta_uapsd_auto_trig_cmd_tlv(wmi_unified_t wmi_handle, + struct sta_uapsd_trig_params *param) +{ + wmi_sta_uapsd_auto_trig_cmd_fixed_param *cmd; + QDF_STATUS ret; + uint32_t param_len = param->num_ac * sizeof(wmi_sta_uapsd_auto_trig_param); + uint32_t cmd_len = sizeof(*cmd) + param_len + WMI_TLV_HDR_SIZE; + uint32_t i; + wmi_buf_t buf; + uint8_t *buf_ptr; + struct sta_uapsd_params *uapsd_param; + wmi_sta_uapsd_auto_trig_param *trig_param; + + buf = wmi_buf_alloc(wmi_handle, cmd_len); + if (!buf) { + WMI_LOGE("%s:wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_NOMEM; + } + + buf_ptr = (uint8_t *) wmi_buf_data(buf); + cmd = (wmi_sta_uapsd_auto_trig_cmd_fixed_param *) buf_ptr; + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_sta_uapsd_auto_trig_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_sta_uapsd_auto_trig_cmd_fixed_param)); + cmd->vdev_id = param->vdevid; + cmd->num_ac = param->num_ac; + WMI_CHAR_ARRAY_TO_MAC_ADDR(param->peer_addr, &cmd->peer_macaddr); + + /* TLV indicating array of structures to follow */ + buf_ptr += sizeof(*cmd); + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, param_len); + + buf_ptr += WMI_TLV_HDR_SIZE; + + /* + * Update tag and length for uapsd auto trigger params (this will take + * care of updating tag and length if it is not pre-filled by caller). + */ + uapsd_param = (struct sta_uapsd_params *)param->auto_triggerparam; + trig_param = (wmi_sta_uapsd_auto_trig_param *)buf_ptr; + for (i = 0; i < param->num_ac; i++) { + WMITLV_SET_HDR((buf_ptr + + (i * sizeof(wmi_sta_uapsd_auto_trig_param))), + WMITLV_TAG_STRUC_wmi_sta_uapsd_auto_trig_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_sta_uapsd_auto_trig_param)); + trig_param->wmm_ac = uapsd_param->wmm_ac; + trig_param->user_priority = uapsd_param->user_priority; + trig_param->service_interval = uapsd_param->service_interval; + trig_param->suspend_interval = uapsd_param->suspend_interval; + trig_param->delay_interval = uapsd_param->delay_interval; + trig_param++; + uapsd_param++; + } + + wmi_mtrace(WMI_STA_UAPSD_AUTO_TRIG_CMDID, cmd->vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, cmd_len, + WMI_STA_UAPSD_AUTO_TRIG_CMDID); + if (QDF_IS_STATUS_ERROR(ret)) { + WMI_LOGE("Failed to send set uapsd param ret = %d", ret); + wmi_buf_free(buf); + } + + return ret; +} + +#ifdef WLAN_FEATURE_DSRC +/** + * send_ocb_set_utc_time_cmd() - send the UTC time to the firmware + * @wmi_handle: pointer to the wmi handle + * @utc: pointer to the UTC time struct + * + * Return: 0 on succes + */ +static QDF_STATUS send_ocb_set_utc_time_cmd_tlv(wmi_unified_t wmi_handle, + struct ocb_utc_param *utc) +{ + QDF_STATUS ret; + wmi_ocb_set_utc_time_cmd_fixed_param *cmd; + uint8_t *buf_ptr; + uint32_t len, i; + wmi_buf_t buf; + + len = sizeof(*cmd); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE(FL("wmi_buf_alloc failed")); + return QDF_STATUS_E_NOMEM; + } + + buf_ptr = (uint8_t *)wmi_buf_data(buf); + cmd = (wmi_ocb_set_utc_time_cmd_fixed_param *)buf_ptr; + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_ocb_set_utc_time_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN(wmi_ocb_set_utc_time_cmd_fixed_param)); + cmd->vdev_id = utc->vdev_id; + + for (i = 0; i < SIZE_UTC_TIME; i++) + WMI_UTC_TIME_SET(cmd, i, utc->utc_time[i]); + + for (i = 0; i < SIZE_UTC_TIME_ERROR; i++) + WMI_TIME_ERROR_SET(cmd, i, utc->time_error[i]); + + wmi_mtrace(WMI_OCB_SET_UTC_TIME_CMDID, cmd->vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_OCB_SET_UTC_TIME_CMDID); + if (QDF_IS_STATUS_ERROR(ret)) { + WMI_LOGE(FL("Failed to set OCB UTC time")); + wmi_buf_free(buf); + } + + return ret; +} + +/** + * send_ocb_start_timing_advert_cmd_tlv() - start sending the timing advertisement + * frames on a channel + * @wmi_handle: pointer to the wmi handle + * @timing_advert: pointer to the timing advertisement struct + * + * Return: 0 on succes + */ +static QDF_STATUS send_ocb_start_timing_advert_cmd_tlv(wmi_unified_t wmi_handle, + struct ocb_timing_advert_param *timing_advert) +{ + QDF_STATUS ret; + wmi_ocb_start_timing_advert_cmd_fixed_param *cmd; + uint8_t *buf_ptr; + uint32_t len, len_template; + wmi_buf_t buf; + + len = sizeof(*cmd) + + WMI_TLV_HDR_SIZE; + + len_template = timing_advert->template_length; + /* Add padding to the template if needed */ + if (len_template % 4 != 0) + len_template += 4 - (len_template % 4); + len += len_template; + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE(FL("wmi_buf_alloc failed")); + return QDF_STATUS_E_NOMEM; + } + + buf_ptr = (uint8_t *)wmi_buf_data(buf); + cmd = (wmi_ocb_start_timing_advert_cmd_fixed_param *)buf_ptr; + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_ocb_start_timing_advert_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_ocb_start_timing_advert_cmd_fixed_param)); + cmd->vdev_id = timing_advert->vdev_id; + cmd->repeat_rate = timing_advert->repeat_rate; + cmd->channel_freq = timing_advert->chan_freq; + cmd->timestamp_offset = timing_advert->timestamp_offset; + cmd->time_value_offset = timing_advert->time_value_offset; + cmd->timing_advert_template_length = timing_advert->template_length; + buf_ptr += sizeof(*cmd); + + /* Add the timing advert template */ + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_BYTE, + len_template); + qdf_mem_copy(buf_ptr + WMI_TLV_HDR_SIZE, + (uint8_t *)timing_advert->template_value, + timing_advert->template_length); + + wmi_mtrace(WMI_OCB_START_TIMING_ADVERT_CMDID, cmd->vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_OCB_START_TIMING_ADVERT_CMDID); + if (QDF_IS_STATUS_ERROR(ret)) { + WMI_LOGE(FL("Failed to start OCB timing advert")); + wmi_buf_free(buf); + } + + return ret; +} + +/** + * send_ocb_stop_timing_advert_cmd_tlv() - stop sending the timing advertisement frames + * on a channel + * @wmi_handle: pointer to the wmi handle + * @timing_advert: pointer to the timing advertisement struct + * + * Return: 0 on succes + */ +static QDF_STATUS send_ocb_stop_timing_advert_cmd_tlv(wmi_unified_t wmi_handle, + struct ocb_timing_advert_param *timing_advert) +{ + QDF_STATUS ret; + wmi_ocb_stop_timing_advert_cmd_fixed_param *cmd; + uint8_t *buf_ptr; + uint32_t len; + wmi_buf_t buf; + + len = sizeof(*cmd); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE(FL("wmi_buf_alloc failed")); + return QDF_STATUS_E_NOMEM; + } + + buf_ptr = (uint8_t *)wmi_buf_data(buf); + cmd = (wmi_ocb_stop_timing_advert_cmd_fixed_param *)buf_ptr; + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_ocb_stop_timing_advert_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_ocb_stop_timing_advert_cmd_fixed_param)); + cmd->vdev_id = timing_advert->vdev_id; + cmd->channel_freq = timing_advert->chan_freq; + + wmi_mtrace(WMI_OCB_STOP_TIMING_ADVERT_CMDID, cmd->vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_OCB_STOP_TIMING_ADVERT_CMDID); + if (QDF_IS_STATUS_ERROR(ret)) { + WMI_LOGE(FL("Failed to stop OCB timing advert")); + wmi_buf_free(buf); + } + + return ret; +} + +/** + * send_ocb_get_tsf_timer_cmd_tlv() - get ocb tsf timer val + * @wmi_handle: pointer to the wmi handle + * @request: pointer to the request + * + * Return: 0 on succes + */ +static QDF_STATUS send_ocb_get_tsf_timer_cmd_tlv(wmi_unified_t wmi_handle, + uint8_t vdev_id) +{ + QDF_STATUS ret; + wmi_ocb_get_tsf_timer_cmd_fixed_param *cmd; + uint8_t *buf_ptr; + wmi_buf_t buf; + int32_t len; + + len = sizeof(*cmd); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE(FL("wmi_buf_alloc failed")); + return QDF_STATUS_E_NOMEM; + } + buf_ptr = (uint8_t *)wmi_buf_data(buf); + + cmd = (wmi_ocb_get_tsf_timer_cmd_fixed_param *)buf_ptr; + qdf_mem_zero(cmd, len); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_ocb_get_tsf_timer_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_ocb_get_tsf_timer_cmd_fixed_param)); + cmd->vdev_id = vdev_id; + + /* Send the WMI command */ + wmi_mtrace(WMI_OCB_GET_TSF_TIMER_CMDID, cmd->vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_OCB_GET_TSF_TIMER_CMDID); + /* If there is an error, set the completion event */ + if (QDF_IS_STATUS_ERROR(ret)) { + WMI_LOGE(FL("Failed to send WMI message: %d"), ret); + wmi_buf_free(buf); + } + + return ret; +} + +/** + * send_dcc_get_stats_cmd_tlv() - get the DCC channel stats + * @wmi_handle: pointer to the wmi handle + * @get_stats_param: pointer to the dcc stats + * + * Return: 0 on succes + */ +static QDF_STATUS send_dcc_get_stats_cmd_tlv(wmi_unified_t wmi_handle, + struct ocb_dcc_get_stats_param *get_stats_param) +{ + QDF_STATUS ret; + wmi_dcc_get_stats_cmd_fixed_param *cmd; + wmi_dcc_channel_stats_request *channel_stats_array; + wmi_buf_t buf; + uint8_t *buf_ptr; + uint32_t len; + uint32_t i; + + /* Validate the input */ + if (get_stats_param->request_array_len != + get_stats_param->channel_count * sizeof(*channel_stats_array)) { + WMI_LOGE(FL("Invalid parameter")); + return QDF_STATUS_E_INVAL; + } + + /* Allocate memory for the WMI command */ + len = sizeof(*cmd) + WMI_TLV_HDR_SIZE + + get_stats_param->request_array_len; + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE(FL("wmi_buf_alloc failed")); + return QDF_STATUS_E_NOMEM; + } + + buf_ptr = wmi_buf_data(buf); + qdf_mem_zero(buf_ptr, len); + + /* Populate the WMI command */ + cmd = (wmi_dcc_get_stats_cmd_fixed_param *)buf_ptr; + buf_ptr += sizeof(*cmd); + + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_dcc_get_stats_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_dcc_get_stats_cmd_fixed_param)); + cmd->vdev_id = get_stats_param->vdev_id; + cmd->num_channels = get_stats_param->channel_count; + + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, + get_stats_param->request_array_len); + buf_ptr += WMI_TLV_HDR_SIZE; + + channel_stats_array = (wmi_dcc_channel_stats_request *)buf_ptr; + qdf_mem_copy(channel_stats_array, get_stats_param->request_array, + get_stats_param->request_array_len); + for (i = 0; i < cmd->num_channels; i++) + WMITLV_SET_HDR(&channel_stats_array[i].tlv_header, + WMITLV_TAG_STRUC_wmi_dcc_channel_stats_request, + WMITLV_GET_STRUCT_TLVLEN( + wmi_dcc_channel_stats_request)); + + /* Send the WMI command */ + wmi_mtrace(WMI_DCC_GET_STATS_CMDID, cmd->vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_DCC_GET_STATS_CMDID); + + if (QDF_IS_STATUS_ERROR(ret)) { + WMI_LOGE(FL("Failed to send WMI message: %d"), ret); + wmi_buf_free(buf); + } + + return ret; +} + +/** + * send_dcc_clear_stats_cmd_tlv() - command to clear the DCC stats + * @wmi_handle: pointer to the wmi handle + * @vdev_id: vdev id + * @dcc_stats_bitmap: dcc status bitmap + * + * Return: 0 on succes + */ +static QDF_STATUS send_dcc_clear_stats_cmd_tlv(wmi_unified_t wmi_handle, + uint32_t vdev_id, uint32_t dcc_stats_bitmap) +{ + QDF_STATUS ret; + wmi_dcc_clear_stats_cmd_fixed_param *cmd; + wmi_buf_t buf; + uint8_t *buf_ptr; + uint32_t len; + + /* Allocate memory for the WMI command */ + len = sizeof(*cmd); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE(FL("wmi_buf_alloc failed")); + return QDF_STATUS_E_NOMEM; + } + + buf_ptr = wmi_buf_data(buf); + qdf_mem_zero(buf_ptr, len); + + /* Populate the WMI command */ + cmd = (wmi_dcc_clear_stats_cmd_fixed_param *)buf_ptr; + + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_dcc_clear_stats_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_dcc_clear_stats_cmd_fixed_param)); + cmd->vdev_id = vdev_id; + cmd->dcc_stats_bitmap = dcc_stats_bitmap; + + /* Send the WMI command */ + wmi_mtrace(WMI_DCC_CLEAR_STATS_CMDID, cmd->vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_DCC_CLEAR_STATS_CMDID); + if (QDF_IS_STATUS_ERROR(ret)) { + WMI_LOGE(FL("Failed to send the WMI command")); + wmi_buf_free(buf); + } + + return ret; +} + +/** + * send_dcc_update_ndl_cmd_tlv() - command to update the NDL data + * @wmi_handle: pointer to the wmi handle + * @update_ndl_param: pointer to the request parameters + * + * Return: 0 on success + */ +static QDF_STATUS send_dcc_update_ndl_cmd_tlv(wmi_unified_t wmi_handle, + struct ocb_dcc_update_ndl_param *update_ndl_param) +{ + QDF_STATUS qdf_status; + wmi_dcc_update_ndl_cmd_fixed_param *cmd; + wmi_dcc_ndl_chan *ndl_chan_array; + wmi_dcc_ndl_active_state_config *ndl_active_state_array; + uint32_t active_state_count; + wmi_buf_t buf; + uint8_t *buf_ptr; + uint32_t len; + uint32_t i; + + /* validate the input */ + if (update_ndl_param->dcc_ndl_chan_list_len != + update_ndl_param->channel_count * sizeof(*ndl_chan_array)) { + WMI_LOGE(FL("Invalid parameter")); + return QDF_STATUS_E_INVAL; + } + active_state_count = 0; + ndl_chan_array = update_ndl_param->dcc_ndl_chan_list; + for (i = 0; i < update_ndl_param->channel_count; i++) + active_state_count += + WMI_NDL_NUM_ACTIVE_STATE_GET(&ndl_chan_array[i]); + if (update_ndl_param->dcc_ndl_active_state_list_len != + active_state_count * sizeof(*ndl_active_state_array)) { + WMI_LOGE(FL("Invalid parameter")); + return QDF_STATUS_E_INVAL; + } + + /* Allocate memory for the WMI command */ + len = sizeof(*cmd) + + WMI_TLV_HDR_SIZE + update_ndl_param->dcc_ndl_chan_list_len + + WMI_TLV_HDR_SIZE + + update_ndl_param->dcc_ndl_active_state_list_len; + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE(FL("wmi_buf_alloc failed")); + return QDF_STATUS_E_NOMEM; + } + + buf_ptr = wmi_buf_data(buf); + qdf_mem_zero(buf_ptr, len); + + /* Populate the WMI command */ + cmd = (wmi_dcc_update_ndl_cmd_fixed_param *)buf_ptr; + buf_ptr += sizeof(*cmd); + + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_dcc_update_ndl_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_dcc_update_ndl_cmd_fixed_param)); + cmd->vdev_id = update_ndl_param->vdev_id; + cmd->num_channel = update_ndl_param->channel_count; + + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, + update_ndl_param->dcc_ndl_chan_list_len); + buf_ptr += WMI_TLV_HDR_SIZE; + + ndl_chan_array = (wmi_dcc_ndl_chan *)buf_ptr; + qdf_mem_copy(ndl_chan_array, update_ndl_param->dcc_ndl_chan_list, + update_ndl_param->dcc_ndl_chan_list_len); + for (i = 0; i < cmd->num_channel; i++) + WMITLV_SET_HDR(&ndl_chan_array[i].tlv_header, + WMITLV_TAG_STRUC_wmi_dcc_ndl_chan, + WMITLV_GET_STRUCT_TLVLEN( + wmi_dcc_ndl_chan)); + buf_ptr += update_ndl_param->dcc_ndl_chan_list_len; + + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, + update_ndl_param->dcc_ndl_active_state_list_len); + buf_ptr += WMI_TLV_HDR_SIZE; + + ndl_active_state_array = (wmi_dcc_ndl_active_state_config *) buf_ptr; + qdf_mem_copy(ndl_active_state_array, + update_ndl_param->dcc_ndl_active_state_list, + update_ndl_param->dcc_ndl_active_state_list_len); + for (i = 0; i < active_state_count; i++) { + WMITLV_SET_HDR(&ndl_active_state_array[i].tlv_header, + WMITLV_TAG_STRUC_wmi_dcc_ndl_active_state_config, + WMITLV_GET_STRUCT_TLVLEN( + wmi_dcc_ndl_active_state_config)); + } + buf_ptr += update_ndl_param->dcc_ndl_active_state_list_len; + + /* Send the WMI command */ + wmi_mtrace(WMI_DCC_UPDATE_NDL_CMDID, cmd->vdev_id, 0); + qdf_status = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_DCC_UPDATE_NDL_CMDID); + /* If there is an error, set the completion event */ + if (QDF_IS_STATUS_ERROR(qdf_status)) { + WMI_LOGE(FL("Failed to send WMI message: %d"), qdf_status); + wmi_buf_free(buf); + } + + return qdf_status; +} + +/** + * send_ocb_set_config_cmd_tlv() - send the OCB config to the FW + * @wmi_handle: pointer to the wmi handle + * @config: the OCB configuration + * + * Return: 0 on success + */ +static QDF_STATUS send_ocb_set_config_cmd_tlv(wmi_unified_t wmi_handle, + struct ocb_config *config) +{ + QDF_STATUS ret; + wmi_ocb_set_config_cmd_fixed_param *cmd; + wmi_channel *chan; + wmi_ocb_channel *ocb_chan; + wmi_qos_parameter *qos_param; + wmi_dcc_ndl_chan *ndl_chan; + wmi_dcc_ndl_active_state_config *ndl_active_config; + wmi_ocb_schedule_element *sched_elem; + uint8_t *buf_ptr; + wmi_buf_t buf; + int32_t len; + int32_t i, j, active_state_count; + + /* + * Validate the dcc_ndl_chan_list_len and count the number of active + * states. Validate dcc_ndl_active_state_list_len. + */ + active_state_count = 0; + if (config->dcc_ndl_chan_list_len) { + if (!config->dcc_ndl_chan_list || + config->dcc_ndl_chan_list_len != + config->channel_count * sizeof(wmi_dcc_ndl_chan)) { + WMI_LOGE(FL("NDL channel is invalid. List len: %d"), + config->dcc_ndl_chan_list_len); + return QDF_STATUS_E_INVAL; + } + + for (i = 0, ndl_chan = config->dcc_ndl_chan_list; + i < config->channel_count; ++i, ++ndl_chan) + active_state_count += + WMI_NDL_NUM_ACTIVE_STATE_GET(ndl_chan); + + if (active_state_count) { + if (!config->dcc_ndl_active_state_list || + config->dcc_ndl_active_state_list_len != + active_state_count * + sizeof(wmi_dcc_ndl_active_state_config)) { + WMI_LOGE(FL("NDL active state is invalid.")); + return QDF_STATUS_E_INVAL; + } + } + } + + len = sizeof(*cmd) + + WMI_TLV_HDR_SIZE + config->channel_count * + sizeof(wmi_channel) + + WMI_TLV_HDR_SIZE + config->channel_count * + sizeof(wmi_ocb_channel) + + WMI_TLV_HDR_SIZE + config->channel_count * + sizeof(wmi_qos_parameter) * WMI_MAX_NUM_AC + + WMI_TLV_HDR_SIZE + config->dcc_ndl_chan_list_len + + WMI_TLV_HDR_SIZE + active_state_count * + sizeof(wmi_dcc_ndl_active_state_config) + + WMI_TLV_HDR_SIZE + config->schedule_size * + sizeof(wmi_ocb_schedule_element); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE(FL("wmi_buf_alloc failed")); + return QDF_STATUS_E_NOMEM; + } + + buf_ptr = (uint8_t *)wmi_buf_data(buf); + cmd = (wmi_ocb_set_config_cmd_fixed_param *)buf_ptr; + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_ocb_set_config_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN(wmi_ocb_set_config_cmd_fixed_param)); + cmd->vdev_id = config->vdev_id; + cmd->channel_count = config->channel_count; + cmd->schedule_size = config->schedule_size; + cmd->flags = config->flags; + buf_ptr += sizeof(*cmd); + + /* Add the wmi_channel info */ + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, + config->channel_count*sizeof(wmi_channel)); + buf_ptr += WMI_TLV_HDR_SIZE; + for (i = 0; i < config->channel_count; i++) { + chan = (wmi_channel *)buf_ptr; + WMITLV_SET_HDR(&chan->tlv_header, + WMITLV_TAG_STRUC_wmi_channel, + WMITLV_GET_STRUCT_TLVLEN(wmi_channel)); + chan->mhz = config->channels[i].chan_freq; + chan->band_center_freq1 = config->channels[i].chan_freq; + chan->band_center_freq2 = 0; + chan->info = 0; + + WMI_SET_CHANNEL_MODE(chan, config->channels[i].ch_mode); + WMI_SET_CHANNEL_MAX_POWER(chan, config->channels[i].max_pwr); + WMI_SET_CHANNEL_MIN_POWER(chan, config->channels[i].min_pwr); + WMI_SET_CHANNEL_MAX_TX_POWER(chan, config->channels[i].max_pwr); + WMI_SET_CHANNEL_REG_POWER(chan, config->channels[i].reg_pwr); + WMI_SET_CHANNEL_ANTENNA_MAX(chan, + config->channels[i].antenna_max); + + if (config->channels[i].bandwidth < 10) + WMI_SET_CHANNEL_FLAG(chan, WMI_CHAN_FLAG_QUARTER_RATE); + else if (config->channels[i].bandwidth < 20) + WMI_SET_CHANNEL_FLAG(chan, WMI_CHAN_FLAG_HALF_RATE); + buf_ptr += sizeof(*chan); + } + + /* Add the wmi_ocb_channel info */ + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, + config->channel_count*sizeof(wmi_ocb_channel)); + buf_ptr += WMI_TLV_HDR_SIZE; + for (i = 0; i < config->channel_count; i++) { + ocb_chan = (wmi_ocb_channel *)buf_ptr; + WMITLV_SET_HDR(&ocb_chan->tlv_header, + WMITLV_TAG_STRUC_wmi_ocb_channel, + WMITLV_GET_STRUCT_TLVLEN(wmi_ocb_channel)); + ocb_chan->bandwidth = config->channels[i].bandwidth; + WMI_CHAR_ARRAY_TO_MAC_ADDR( + config->channels[i].mac_address.bytes, + &ocb_chan->mac_address); + buf_ptr += sizeof(*ocb_chan); + } + + /* Add the wmi_qos_parameter info */ + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, + config->channel_count * sizeof(wmi_qos_parameter)*WMI_MAX_NUM_AC); + buf_ptr += WMI_TLV_HDR_SIZE; + /* WMI_MAX_NUM_AC parameters for each channel */ + for (i = 0; i < config->channel_count; i++) { + for (j = 0; j < WMI_MAX_NUM_AC; j++) { + qos_param = (wmi_qos_parameter *)buf_ptr; + WMITLV_SET_HDR(&qos_param->tlv_header, + WMITLV_TAG_STRUC_wmi_qos_parameter, + WMITLV_GET_STRUCT_TLVLEN(wmi_qos_parameter)); + qos_param->aifsn = + config->channels[i].qos_params[j].aifsn; + qos_param->cwmin = + config->channels[i].qos_params[j].cwmin; + qos_param->cwmax = + config->channels[i].qos_params[j].cwmax; + buf_ptr += sizeof(*qos_param); + } + } + + /* Add the wmi_dcc_ndl_chan (per channel) */ + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, + config->dcc_ndl_chan_list_len); + buf_ptr += WMI_TLV_HDR_SIZE; + if (config->dcc_ndl_chan_list_len) { + ndl_chan = (wmi_dcc_ndl_chan *)buf_ptr; + qdf_mem_copy(ndl_chan, config->dcc_ndl_chan_list, + config->dcc_ndl_chan_list_len); + for (i = 0; i < config->channel_count; i++) + WMITLV_SET_HDR(&(ndl_chan[i].tlv_header), + WMITLV_TAG_STRUC_wmi_dcc_ndl_chan, + WMITLV_GET_STRUCT_TLVLEN(wmi_dcc_ndl_chan)); + buf_ptr += config->dcc_ndl_chan_list_len; + } + + /* Add the wmi_dcc_ndl_active_state_config */ + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, active_state_count * + sizeof(wmi_dcc_ndl_active_state_config)); + buf_ptr += WMI_TLV_HDR_SIZE; + if (active_state_count) { + ndl_active_config = (wmi_dcc_ndl_active_state_config *)buf_ptr; + qdf_mem_copy(ndl_active_config, + config->dcc_ndl_active_state_list, + active_state_count * sizeof(*ndl_active_config)); + for (i = 0; i < active_state_count; ++i) + WMITLV_SET_HDR(&(ndl_active_config[i].tlv_header), + WMITLV_TAG_STRUC_wmi_dcc_ndl_active_state_config, + WMITLV_GET_STRUCT_TLVLEN( + wmi_dcc_ndl_active_state_config)); + buf_ptr += active_state_count * + sizeof(*ndl_active_config); + } + + /* Add the wmi_ocb_schedule_element info */ + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, + config->schedule_size * sizeof(wmi_ocb_schedule_element)); + buf_ptr += WMI_TLV_HDR_SIZE; + for (i = 0; i < config->schedule_size; i++) { + sched_elem = (wmi_ocb_schedule_element *)buf_ptr; + WMITLV_SET_HDR(&sched_elem->tlv_header, + WMITLV_TAG_STRUC_wmi_ocb_schedule_element, + WMITLV_GET_STRUCT_TLVLEN(wmi_ocb_schedule_element)); + sched_elem->channel_freq = config->schedule[i].chan_freq; + sched_elem->total_duration = config->schedule[i].total_duration; + sched_elem->guard_interval = config->schedule[i].guard_interval; + buf_ptr += sizeof(*sched_elem); + } + + + wmi_mtrace(WMI_OCB_SET_CONFIG_CMDID, cmd->vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_OCB_SET_CONFIG_CMDID); + if (QDF_IS_STATUS_ERROR(ret)) { + WMI_LOGE("Failed to set OCB config"); + wmi_buf_free(buf); + } + + return ret; +} + +/** + * extract_ocb_channel_config_resp_tlv() - extract ocb channel config resp + * @wmi_handle: wmi handle + * @evt_buf: wmi event buffer + * @status: status buffer + * + * Return: QDF_STATUS_SUCCESS on success + */ +static QDF_STATUS extract_ocb_channel_config_resp_tlv(wmi_unified_t wmi_handle, + void *evt_buf, + uint32_t *status) +{ + WMI_OCB_SET_CONFIG_RESP_EVENTID_param_tlvs *param_tlvs; + wmi_ocb_set_config_resp_event_fixed_param *fix_param; + + param_tlvs = evt_buf; + fix_param = param_tlvs->fixed_param; + + *status = fix_param->status; + return QDF_STATUS_SUCCESS; +} + +/** + * extract_ocb_tsf_timer_tlv() - extract TSF timer from event buffer + * @wmi_handle: wmi handle + * @evt_buf: wmi event buffer + * @resp: response buffer + * + * Return: QDF_STATUS_SUCCESS on success + */ +static QDF_STATUS extract_ocb_tsf_timer_tlv(wmi_unified_t wmi_handle, + void *evt_buf, struct ocb_get_tsf_timer_response *resp) +{ + WMI_OCB_GET_TSF_TIMER_RESP_EVENTID_param_tlvs *param_tlvs; + wmi_ocb_get_tsf_timer_resp_event_fixed_param *fix_param; + + param_tlvs = evt_buf; + fix_param = param_tlvs->fixed_param; + resp->vdev_id = fix_param->vdev_id; + resp->timer_high = fix_param->tsf_timer_high; + resp->timer_low = fix_param->tsf_timer_low; + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_ocb_ndl_resp_tlv() - extract TSF timer from event buffer + * @wmi_handle: wmi handle + * @evt_buf: wmi event buffer + * @resp: response buffer + * + * Return: QDF_STATUS_SUCCESS on success + */ +static QDF_STATUS extract_ocb_ndl_resp_tlv(wmi_unified_t wmi_handle, + void *evt_buf, struct ocb_dcc_update_ndl_response *resp) +{ + WMI_DCC_UPDATE_NDL_RESP_EVENTID_param_tlvs *param_tlvs; + wmi_dcc_update_ndl_resp_event_fixed_param *fix_param; + + param_tlvs = evt_buf; + fix_param = param_tlvs->fixed_param; + resp->vdev_id = fix_param->vdev_id; + resp->status = fix_param->status; + return QDF_STATUS_SUCCESS; +} + +/** + * extract_ocb_dcc_stats_tlv() - extract DCC stats from event buffer + * @wmi_handle: wmi handle + * @evt_buf: wmi event buffer + * @resp: response buffer + * + * Since length of stats is variable, buffer for DCC stats will be allocated + * in this function. The caller must free the buffer. + * + * Return: QDF_STATUS_SUCCESS on success + */ +static QDF_STATUS extract_ocb_dcc_stats_tlv(wmi_unified_t wmi_handle, + void *evt_buf, struct ocb_dcc_get_stats_response **resp) +{ + struct ocb_dcc_get_stats_response *response; + WMI_DCC_GET_STATS_RESP_EVENTID_param_tlvs *param_tlvs; + wmi_dcc_get_stats_resp_event_fixed_param *fix_param; + + param_tlvs = (WMI_DCC_GET_STATS_RESP_EVENTID_param_tlvs *)evt_buf; + fix_param = param_tlvs->fixed_param; + + /* Allocate and populate the response */ + if (fix_param->num_channels > ((WMI_SVC_MSG_MAX_SIZE - + sizeof(*fix_param)) / sizeof(wmi_dcc_ndl_stats_per_channel)) || + fix_param->num_channels > param_tlvs->num_stats_per_channel_list) { + WMI_LOGW("%s: too many channels:%d actual:%d", __func__, + fix_param->num_channels, + param_tlvs->num_stats_per_channel_list); + *resp = NULL; + return QDF_STATUS_E_INVAL; + } + response = qdf_mem_malloc(sizeof(*response) + fix_param->num_channels * + sizeof(wmi_dcc_ndl_stats_per_channel)); + *resp = response; + if (!response) + return QDF_STATUS_E_NOMEM; + + response->vdev_id = fix_param->vdev_id; + response->num_channels = fix_param->num_channels; + response->channel_stats_array_len = + fix_param->num_channels * + sizeof(wmi_dcc_ndl_stats_per_channel); + response->channel_stats_array = ((uint8_t *)response) + + sizeof(*response); + qdf_mem_copy(response->channel_stats_array, + param_tlvs->stats_per_channel_list, + response->channel_stats_array_len); + + return QDF_STATUS_SUCCESS; +} +#endif + +/** + * send_set_enable_disable_mcc_adaptive_scheduler_cmd_tlv() -enable/disable mcc scheduler + * @wmi_handle: wmi handle + * @mcc_adaptive_scheduler: enable/disable + * + * This function enable/disable mcc adaptive scheduler in fw. + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS send_set_enable_disable_mcc_adaptive_scheduler_cmd_tlv( + wmi_unified_t wmi_handle, uint32_t mcc_adaptive_scheduler, + uint32_t pdev_id) +{ + QDF_STATUS ret; + wmi_buf_t buf = 0; + wmi_resmgr_adaptive_ocs_enable_disable_cmd_fixed_param *cmd = NULL; + uint16_t len = + sizeof(wmi_resmgr_adaptive_ocs_enable_disable_cmd_fixed_param); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGP("%s : wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_NOMEM; + } + cmd = (wmi_resmgr_adaptive_ocs_enable_disable_cmd_fixed_param *) + wmi_buf_data(buf); + + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_resmgr_adaptive_ocs_enable_disable_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_resmgr_adaptive_ocs_enable_disable_cmd_fixed_param)); + cmd->enable = mcc_adaptive_scheduler; + cmd->pdev_id = wmi_handle->ops->convert_pdev_id_host_to_target(pdev_id); + + wmi_mtrace(WMI_RESMGR_ADAPTIVE_OCS_ENABLE_DISABLE_CMDID, NO_SESSION, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_RESMGR_ADAPTIVE_OCS_ENABLE_DISABLE_CMDID); + if (QDF_IS_STATUS_ERROR(ret)) { + WMI_LOGP("%s: Failed to send enable/disable MCC" + " adaptive scheduler command", __func__); + wmi_buf_free(buf); + } + + return ret; +} + +/** + * send_set_mcc_channel_time_latency_cmd_tlv() -set MCC channel time latency + * @wmi: wmi handle + * @mcc_channel: mcc channel + * @mcc_channel_time_latency: MCC channel time latency. + * + * Currently used to set time latency for an MCC vdev/adapter using operating + * channel of it and channel number. The info is provided run time using + * iwpriv command: iwpriv setMccLatency . + * + * Return: CDF status + */ +static QDF_STATUS send_set_mcc_channel_time_latency_cmd_tlv(wmi_unified_t wmi_handle, + uint32_t mcc_channel_freq, uint32_t mcc_channel_time_latency) +{ + QDF_STATUS ret; + wmi_buf_t buf = 0; + wmi_resmgr_set_chan_latency_cmd_fixed_param *cmdTL = NULL; + uint16_t len = 0; + uint8_t *buf_ptr = NULL; + wmi_resmgr_chan_latency chan_latency; + /* Note: we only support MCC time latency for a single channel */ + uint32_t num_channels = 1; + uint32_t chan1_freq = mcc_channel_freq; + uint32_t latency_chan1 = mcc_channel_time_latency; + + + /* If 0ms latency is provided, then FW will set to a default. + * Otherwise, latency must be at least 30ms. + */ + if ((latency_chan1 > 0) && + (latency_chan1 < WMI_MCC_MIN_NON_ZERO_CHANNEL_LATENCY)) { + WMI_LOGE("%s: Invalid time latency for Channel #1 = %dms " + "Minimum is 30ms (or 0 to use default value by " + "firmware)", __func__, latency_chan1); + return QDF_STATUS_E_INVAL; + } + + /* Set WMI CMD for channel time latency here */ + len = sizeof(wmi_resmgr_set_chan_latency_cmd_fixed_param) + + WMI_TLV_HDR_SIZE + /*Place holder for chan_time_latency array */ + num_channels * sizeof(wmi_resmgr_chan_latency); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s : wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_NOMEM; + } + buf_ptr = (uint8_t *) wmi_buf_data(buf); + cmdTL = (wmi_resmgr_set_chan_latency_cmd_fixed_param *) + wmi_buf_data(buf); + WMITLV_SET_HDR(&cmdTL->tlv_header, + WMITLV_TAG_STRUC_wmi_resmgr_set_chan_latency_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_resmgr_set_chan_latency_cmd_fixed_param)); + cmdTL->num_chans = num_channels; + /* Update channel time latency information for home channel(s) */ + buf_ptr += sizeof(*cmdTL); + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_BYTE, + num_channels * sizeof(wmi_resmgr_chan_latency)); + buf_ptr += WMI_TLV_HDR_SIZE; + chan_latency.chan_mhz = chan1_freq; + chan_latency.latency = latency_chan1; + qdf_mem_copy(buf_ptr, &chan_latency, sizeof(chan_latency)); + wmi_mtrace(WMI_RESMGR_SET_CHAN_LATENCY_CMDID, NO_SESSION, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_RESMGR_SET_CHAN_LATENCY_CMDID); + if (QDF_IS_STATUS_ERROR(ret)) { + WMI_LOGE("%s: Failed to send MCC Channel Time Latency command", + __func__); + wmi_buf_free(buf); + QDF_ASSERT(0); + } + + return ret; +} + +/** + * send_set_mcc_channel_time_quota_cmd_tlv() -set MCC channel time quota + * @wmi: wmi handle + * @adapter_1_chan_number: adapter 1 channel number + * @adapter_1_quota: adapter 1 quota + * @adapter_2_chan_number: adapter 2 channel number + * + * Return: CDF status + */ +static QDF_STATUS send_set_mcc_channel_time_quota_cmd_tlv(wmi_unified_t wmi_handle, + uint32_t adapter_1_chan_freq, + uint32_t adapter_1_quota, uint32_t adapter_2_chan_freq) +{ + QDF_STATUS ret; + wmi_buf_t buf = 0; + uint16_t len = 0; + uint8_t *buf_ptr = NULL; + wmi_resmgr_set_chan_time_quota_cmd_fixed_param *cmdTQ = NULL; + wmi_resmgr_chan_time_quota chan_quota; + uint32_t quota_chan1 = adapter_1_quota; + /* Knowing quota of 1st chan., derive quota for 2nd chan. */ + uint32_t quota_chan2 = 100 - quota_chan1; + /* Note: setting time quota for MCC requires info for 2 channels */ + uint32_t num_channels = 2; + uint32_t chan1_freq = adapter_1_chan_freq; + uint32_t chan2_freq = adapter_2_chan_freq; + + WMI_LOGD("%s: freq1:%dMHz, Quota1:%dms, " + "freq2:%dMHz, Quota2:%dms", __func__, + chan1_freq, quota_chan1, chan2_freq, + quota_chan2); + + /* + * Perform sanity check on time quota values provided. + */ + if (quota_chan1 < WMI_MCC_MIN_CHANNEL_QUOTA || + quota_chan1 > WMI_MCC_MAX_CHANNEL_QUOTA) { + WMI_LOGE("%s: Invalid time quota for Channel #1=%dms. Minimum " + "is 20ms & maximum is 80ms", __func__, quota_chan1); + return QDF_STATUS_E_INVAL; + } + /* Set WMI CMD for channel time quota here */ + len = sizeof(wmi_resmgr_set_chan_time_quota_cmd_fixed_param) + + WMI_TLV_HDR_SIZE + /* Place holder for chan_time_quota array */ + num_channels * sizeof(wmi_resmgr_chan_time_quota); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s : wmi_buf_alloc failed", __func__); + QDF_ASSERT(0); + return QDF_STATUS_E_NOMEM; + } + buf_ptr = (uint8_t *) wmi_buf_data(buf); + cmdTQ = (wmi_resmgr_set_chan_time_quota_cmd_fixed_param *) + wmi_buf_data(buf); + WMITLV_SET_HDR(&cmdTQ->tlv_header, + WMITLV_TAG_STRUC_wmi_resmgr_set_chan_time_quota_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_resmgr_set_chan_time_quota_cmd_fixed_param)); + cmdTQ->num_chans = num_channels; + + /* Update channel time quota information for home channel(s) */ + buf_ptr += sizeof(*cmdTQ); + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_BYTE, + num_channels * sizeof(wmi_resmgr_chan_time_quota)); + buf_ptr += WMI_TLV_HDR_SIZE; + chan_quota.chan_mhz = chan1_freq; + chan_quota.channel_time_quota = quota_chan1; + qdf_mem_copy(buf_ptr, &chan_quota, sizeof(chan_quota)); + /* Construct channel and quota record for the 2nd MCC mode. */ + buf_ptr += sizeof(chan_quota); + chan_quota.chan_mhz = chan2_freq; + chan_quota.channel_time_quota = quota_chan2; + qdf_mem_copy(buf_ptr, &chan_quota, sizeof(chan_quota)); + + wmi_mtrace(WMI_RESMGR_SET_CHAN_TIME_QUOTA_CMDID, NO_SESSION, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_RESMGR_SET_CHAN_TIME_QUOTA_CMDID); + if (QDF_IS_STATUS_ERROR(ret)) { + WMI_LOGE("Failed to send MCC Channel Time Quota command"); + wmi_buf_free(buf); + QDF_ASSERT(0); + } + + return ret; +} + +/** + * send_set_thermal_mgmt_cmd_tlv() - set thermal mgmt command to fw + * @wmi_handle: Pointer to wmi handle + * @thermal_info: Thermal command information + * + * This function sends the thermal management command + * to the firmware + * + * Return: QDF_STATUS_SUCCESS for success otherwise failure + */ +static QDF_STATUS send_set_thermal_mgmt_cmd_tlv(wmi_unified_t wmi_handle, + struct thermal_cmd_params *thermal_info) +{ + wmi_thermal_mgmt_cmd_fixed_param *cmd = NULL; + wmi_buf_t buf = NULL; + QDF_STATUS status; + uint32_t len = 0; + + len = sizeof(*cmd); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("Failed to allocate buffer to send set key cmd"); + return QDF_STATUS_E_FAILURE; + } + + cmd = (wmi_thermal_mgmt_cmd_fixed_param *) wmi_buf_data(buf); + + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_thermal_mgmt_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_thermal_mgmt_cmd_fixed_param)); + + cmd->lower_thresh_degreeC = thermal_info->min_temp; + cmd->upper_thresh_degreeC = thermal_info->max_temp; + cmd->enable = thermal_info->thermal_enable; + + WMI_LOGE("TM Sending thermal mgmt cmd: low temp %d, upper temp %d, enabled %d", + cmd->lower_thresh_degreeC, cmd->upper_thresh_degreeC, cmd->enable); + + wmi_mtrace(WMI_THERMAL_MGMT_CMDID, NO_SESSION, 0); + status = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_THERMAL_MGMT_CMDID); + if (QDF_IS_STATUS_ERROR(status)) { + wmi_buf_free(buf); + WMI_LOGE("%s:Failed to send thermal mgmt command", __func__); + } + + return status; +} + + +/** + * send_lro_config_cmd_tlv() - process the LRO config command + * @wmi_handle: Pointer to WMI handle + * @wmi_lro_cmd: Pointer to LRO configuration parameters + * + * This function sends down the LRO configuration parameters to + * the firmware to enable LRO, sets the TCP flags and sets the + * seed values for the toeplitz hash generation + * + * Return: QDF_STATUS_SUCCESS for success otherwise failure + */ +static QDF_STATUS send_lro_config_cmd_tlv(wmi_unified_t wmi_handle, + struct wmi_lro_config_cmd_t *wmi_lro_cmd) +{ + wmi_lro_info_cmd_fixed_param *cmd; + wmi_buf_t buf; + QDF_STATUS status; + + + buf = wmi_buf_alloc(wmi_handle, sizeof(*cmd)); + if (!buf) { + WMI_LOGE("Failed to allocate buffer to send set key cmd"); + return QDF_STATUS_E_FAILURE; + } + + cmd = (wmi_lro_info_cmd_fixed_param *) wmi_buf_data(buf); + + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_lro_info_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN(wmi_lro_info_cmd_fixed_param)); + + cmd->lro_enable = wmi_lro_cmd->lro_enable; + WMI_LRO_INFO_TCP_FLAG_VALS_SET(cmd->tcp_flag_u32, + wmi_lro_cmd->tcp_flag); + WMI_LRO_INFO_TCP_FLAGS_MASK_SET(cmd->tcp_flag_u32, + wmi_lro_cmd->tcp_flag_mask); + cmd->toeplitz_hash_ipv4_0_3 = + wmi_lro_cmd->toeplitz_hash_ipv4[0]; + cmd->toeplitz_hash_ipv4_4_7 = + wmi_lro_cmd->toeplitz_hash_ipv4[1]; + cmd->toeplitz_hash_ipv4_8_11 = + wmi_lro_cmd->toeplitz_hash_ipv4[2]; + cmd->toeplitz_hash_ipv4_12_15 = + wmi_lro_cmd->toeplitz_hash_ipv4[3]; + cmd->toeplitz_hash_ipv4_16 = + wmi_lro_cmd->toeplitz_hash_ipv4[4]; + + cmd->toeplitz_hash_ipv6_0_3 = + wmi_lro_cmd->toeplitz_hash_ipv6[0]; + cmd->toeplitz_hash_ipv6_4_7 = + wmi_lro_cmd->toeplitz_hash_ipv6[1]; + cmd->toeplitz_hash_ipv6_8_11 = + wmi_lro_cmd->toeplitz_hash_ipv6[2]; + cmd->toeplitz_hash_ipv6_12_15 = + wmi_lro_cmd->toeplitz_hash_ipv6[3]; + cmd->toeplitz_hash_ipv6_16_19 = + wmi_lro_cmd->toeplitz_hash_ipv6[4]; + cmd->toeplitz_hash_ipv6_20_23 = + wmi_lro_cmd->toeplitz_hash_ipv6[5]; + cmd->toeplitz_hash_ipv6_24_27 = + wmi_lro_cmd->toeplitz_hash_ipv6[6]; + cmd->toeplitz_hash_ipv6_28_31 = + wmi_lro_cmd->toeplitz_hash_ipv6[7]; + cmd->toeplitz_hash_ipv6_32_35 = + wmi_lro_cmd->toeplitz_hash_ipv6[8]; + cmd->toeplitz_hash_ipv6_36_39 = + wmi_lro_cmd->toeplitz_hash_ipv6[9]; + cmd->toeplitz_hash_ipv6_40 = + wmi_lro_cmd->toeplitz_hash_ipv6[10]; + + WMI_LOGD("WMI_LRO_CONFIG: lro_enable %d, tcp_flag 0x%x", + cmd->lro_enable, cmd->tcp_flag_u32); + + wmi_mtrace(WMI_LRO_CONFIG_CMDID, NO_SESSION, 0); + status = wmi_unified_cmd_send(wmi_handle, buf, + sizeof(*cmd), WMI_LRO_CONFIG_CMDID); + if (QDF_IS_STATUS_ERROR(status)) { + wmi_buf_free(buf); + WMI_LOGE("%s:Failed to send WMI_LRO_CONFIG_CMDID", __func__); + } + + return status; +} + +/** + * send_peer_rate_report_cmd_tlv() - process the peer rate report command + * @wmi_handle: Pointer to wmi handle + * @rate_report_params: Pointer to peer rate report parameters + * + * + * Return: QDF_STATUS_SUCCESS for success otherwise failure + */ +static QDF_STATUS send_peer_rate_report_cmd_tlv(wmi_unified_t wmi_handle, + struct wmi_peer_rate_report_params *rate_report_params) +{ + wmi_peer_set_rate_report_condition_fixed_param *cmd = NULL; + wmi_buf_t buf = NULL; + QDF_STATUS status = 0; + uint32_t len = 0; + uint32_t i, j; + + len = sizeof(*cmd); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("Failed to alloc buf to peer_set_condition cmd\n"); + return QDF_STATUS_E_FAILURE; + } + + cmd = (wmi_peer_set_rate_report_condition_fixed_param *) + wmi_buf_data(buf); + + WMITLV_SET_HDR( + &cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_peer_set_rate_report_condition_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_peer_set_rate_report_condition_fixed_param)); + + cmd->enable_rate_report = rate_report_params->rate_report_enable; + cmd->report_backoff_time = rate_report_params->backoff_time; + cmd->report_timer_period = rate_report_params->timer_period; + for (i = 0; i < PEER_RATE_REPORT_COND_MAX_NUM; i++) { + cmd->cond_per_phy[i].val_cond_flags = + rate_report_params->report_per_phy[i].cond_flags; + cmd->cond_per_phy[i].rate_delta.min_delta = + rate_report_params->report_per_phy[i].delta.delta_min; + cmd->cond_per_phy[i].rate_delta.percentage = + rate_report_params->report_per_phy[i].delta.percent; + for (j = 0; j < MAX_NUM_OF_RATE_THRESH; j++) { + cmd->cond_per_phy[i].rate_threshold[j] = + rate_report_params->report_per_phy[i]. + report_rate_threshold[j]; + } + } + + WMI_LOGE("%s enable %d backoff_time %d period %d\n", __func__, + cmd->enable_rate_report, + cmd->report_backoff_time, cmd->report_timer_period); + + wmi_mtrace(WMI_PEER_SET_RATE_REPORT_CONDITION_CMDID, NO_SESSION, 0); + status = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_PEER_SET_RATE_REPORT_CONDITION_CMDID); + if (QDF_IS_STATUS_ERROR(status)) { + wmi_buf_free(buf); + WMI_LOGE("%s:Failed to send peer_set_report_cond command", + __func__); + } + return status; +} + +/** + * send_bcn_buf_ll_cmd_tlv() - prepare and send beacon buffer to fw for LL + * @wmi_handle: wmi handle + * @param: bcn ll cmd parameter + * + * Return: QDF_STATUS_SUCCESS for success otherwise failure + */ +static QDF_STATUS send_bcn_buf_ll_cmd_tlv(wmi_unified_t wmi_handle, + wmi_bcn_send_from_host_cmd_fixed_param *param) +{ + wmi_bcn_send_from_host_cmd_fixed_param *cmd; + wmi_buf_t wmi_buf; + QDF_STATUS ret; + + wmi_buf = wmi_buf_alloc(wmi_handle, sizeof(*cmd)); + if (!wmi_buf) { + WMI_LOGE("%s: wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_FAILURE; + } + + cmd = (wmi_bcn_send_from_host_cmd_fixed_param *) wmi_buf_data(wmi_buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_bcn_send_from_host_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_bcn_send_from_host_cmd_fixed_param)); + cmd->vdev_id = param->vdev_id; + cmd->data_len = param->data_len; + cmd->frame_ctrl = param->frame_ctrl; + cmd->frag_ptr = param->frag_ptr; + cmd->dtim_flag = param->dtim_flag; + + wmi_mtrace(WMI_PDEV_SEND_BCN_CMDID, cmd->vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, wmi_buf, sizeof(*cmd), + WMI_PDEV_SEND_BCN_CMDID); + + if (QDF_IS_STATUS_ERROR(ret)) { + WMI_LOGE("Failed to send WMI_PDEV_SEND_BCN_CMDID command"); + wmi_buf_free(wmi_buf); + } + + return ret; +} + +/** + * send_set_sta_sa_query_param_cmd_tlv() - set sta sa query parameters + * @wmi_handle: wmi handle + * @vdev_id: vdev id + * @max_retries: max retries + * @retry_interval: retry interval + * This function sets sta query related parameters in fw. + * + * Return: QDF_STATUS_SUCCESS for success otherwise failure + */ + +static QDF_STATUS send_set_sta_sa_query_param_cmd_tlv(wmi_unified_t wmi_handle, + uint8_t vdev_id, uint32_t max_retries, + uint32_t retry_interval) +{ + wmi_buf_t buf; + WMI_PMF_OFFLOAD_SET_SA_QUERY_CMD_fixed_param *cmd; + int len; + + len = sizeof(*cmd); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE(FL("wmi_buf_alloc failed")); + return QDF_STATUS_E_FAILURE; + } + + cmd = (WMI_PMF_OFFLOAD_SET_SA_QUERY_CMD_fixed_param *)wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_WMI_PMF_OFFLOAD_SET_SA_QUERY_CMD_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (WMI_PMF_OFFLOAD_SET_SA_QUERY_CMD_fixed_param)); + + + cmd->vdev_id = vdev_id; + cmd->sa_query_max_retry_count = max_retries; + cmd->sa_query_retry_interval = retry_interval; + + WMI_LOGD(FL("STA sa query: vdev_id:%d interval:%u retry count:%d"), + vdev_id, retry_interval, max_retries); + + wmi_mtrace(WMI_PMF_OFFLOAD_SET_SA_QUERY_CMDID, cmd->vdev_id, 0); + if (wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_PMF_OFFLOAD_SET_SA_QUERY_CMDID)) { + WMI_LOGE(FL("Failed to offload STA SA Query")); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + WMI_LOGD(FL("Exit :")); + return 0; +} + +/** + * send_set_sta_keep_alive_cmd_tlv() - set sta keep alive parameters + * @wmi_handle: wmi handle + * @params: sta keep alive parameter + * + * This function sets keep alive related parameters in fw. + * + * Return: CDF status + */ +static QDF_STATUS send_set_sta_keep_alive_cmd_tlv(wmi_unified_t wmi_handle, + struct sta_params *params) +{ + wmi_buf_t buf; + WMI_STA_KEEPALIVE_CMD_fixed_param *cmd; + WMI_STA_KEEPALVE_ARP_RESPONSE *arp_rsp; + uint8_t *buf_ptr; + int len; + QDF_STATUS ret; + + WMI_LOGD("%s: Enter", __func__); + + len = sizeof(*cmd) + sizeof(*arp_rsp); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("wmi_buf_alloc failed"); + return QDF_STATUS_E_FAILURE; + } + + cmd = (WMI_STA_KEEPALIVE_CMD_fixed_param *) wmi_buf_data(buf); + buf_ptr = (uint8_t *) cmd; + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_WMI_STA_KEEPALIVE_CMD_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (WMI_STA_KEEPALIVE_CMD_fixed_param)); + cmd->interval = params->timeperiod; + cmd->enable = (params->timeperiod) ? 1 : 0; + cmd->vdev_id = params->vdev_id; + WMI_LOGD("Keep Alive: vdev_id:%d interval:%u method:%d", params->vdev_id, + params->timeperiod, params->method); + arp_rsp = (WMI_STA_KEEPALVE_ARP_RESPONSE *) (buf_ptr + sizeof(*cmd)); + WMITLV_SET_HDR(&arp_rsp->tlv_header, + WMITLV_TAG_STRUC_WMI_STA_KEEPALVE_ARP_RESPONSE, + WMITLV_GET_STRUCT_TLVLEN(WMI_STA_KEEPALVE_ARP_RESPONSE)); + + if ((params->method == WMI_KEEP_ALIVE_UNSOLICIT_ARP_RSP) || + (params->method == + WMI_STA_KEEPALIVE_METHOD_GRATUITOUS_ARP_REQUEST)) { + if ((NULL == params->hostv4addr) || + (NULL == params->destv4addr) || + (NULL == params->destmac)) { + WMI_LOGE("%s: received null pointer, hostv4addr:%pK " + "destv4addr:%pK destmac:%pK ", __func__, + params->hostv4addr, params->destv4addr, params->destmac); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + cmd->method = params->method; + qdf_mem_copy(&arp_rsp->sender_prot_addr, params->hostv4addr, + WMI_IPV4_ADDR_LEN); + qdf_mem_copy(&arp_rsp->target_prot_addr, params->destv4addr, + WMI_IPV4_ADDR_LEN); + WMI_CHAR_ARRAY_TO_MAC_ADDR(params->destmac, &arp_rsp->dest_mac_addr); + } else { + cmd->method = WMI_STA_KEEPALIVE_METHOD_NULL_FRAME; + } + + wmi_mtrace(WMI_STA_KEEPALIVE_CMDID, cmd->vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_STA_KEEPALIVE_CMDID); + if (QDF_IS_STATUS_ERROR(ret)) { + WMI_LOGE("Failed to set KeepAlive"); + wmi_buf_free(buf); + } + + WMI_LOGD("%s: Exit", __func__); + return ret; +} + +/** + * send_vdev_set_gtx_cfg_cmd_tlv() - set GTX params + * @wmi_handle: wmi handle + * @if_id: vdev id + * @gtx_info: GTX config params + * + * This function set GTX related params in firmware. + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS send_vdev_set_gtx_cfg_cmd_tlv(wmi_unified_t wmi_handle, uint32_t if_id, + struct wmi_gtx_config *gtx_info) +{ + wmi_vdev_set_gtx_params_cmd_fixed_param *cmd; + wmi_buf_t buf; + QDF_STATUS ret; + int len = sizeof(wmi_vdev_set_gtx_params_cmd_fixed_param); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s:wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_NOMEM; + } + cmd = (wmi_vdev_set_gtx_params_cmd_fixed_param *) wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_vdev_set_gtx_params_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_vdev_set_gtx_params_cmd_fixed_param)); + cmd->vdev_id = if_id; + + cmd->gtxRTMask[0] = gtx_info->gtx_rt_mask[0]; + cmd->gtxRTMask[1] = gtx_info->gtx_rt_mask[1]; + cmd->userGtxMask = gtx_info->gtx_usrcfg; + cmd->gtxPERThreshold = gtx_info->gtx_threshold; + cmd->gtxPERMargin = gtx_info->gtx_margin; + cmd->gtxTPCstep = gtx_info->gtx_tpcstep; + cmd->gtxTPCMin = gtx_info->gtx_tpcmin; + cmd->gtxBWMask = gtx_info->gtx_bwmask; + + WMI_LOGD("Setting vdev%d GTX values:htmcs 0x%x, vhtmcs 0x%x, usermask 0x%x, \ + gtxPERThreshold %d, gtxPERMargin %d, gtxTPCstep %d, gtxTPCMin %d, \ + gtxBWMask 0x%x.", if_id, cmd->gtxRTMask[0], cmd->gtxRTMask[1], + cmd->userGtxMask, cmd->gtxPERThreshold, cmd->gtxPERMargin, + cmd->gtxTPCstep, cmd->gtxTPCMin, cmd->gtxBWMask); + + wmi_mtrace(WMI_VDEV_SET_GTX_PARAMS_CMDID, cmd->vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_VDEV_SET_GTX_PARAMS_CMDID); + if (QDF_IS_STATUS_ERROR(ret)) { + WMI_LOGE("Failed to set GTX PARAMS"); + wmi_buf_free(buf); + } + return ret; +} + +/** + * send_process_update_edca_param_cmd_tlv() - update EDCA params + * @wmi_handle: wmi handle + * @vdev_id: vdev id. + * @wmm_vparams: edca parameters + * + * This function updates EDCA parameters to the target + * + * Return: CDF Status + */ +static QDF_STATUS send_process_update_edca_param_cmd_tlv(wmi_unified_t wmi_handle, + uint8_t vdev_id, bool mu_edca_param, + struct wmi_host_wme_vparams wmm_vparams[WMI_MAX_NUM_AC]) +{ + uint8_t *buf_ptr; + wmi_buf_t buf; + wmi_vdev_set_wmm_params_cmd_fixed_param *cmd; + wmi_wmm_vparams *wmm_param; + struct wmi_host_wme_vparams *twmm_param; + int len = sizeof(*cmd); + int ac; + + buf = wmi_buf_alloc(wmi_handle, len); + + if (!buf) { + WMI_LOGE("%s: wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_NOMEM; + } + + buf_ptr = (uint8_t *) wmi_buf_data(buf); + cmd = (wmi_vdev_set_wmm_params_cmd_fixed_param *) buf_ptr; + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_vdev_set_wmm_params_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_vdev_set_wmm_params_cmd_fixed_param)); + cmd->vdev_id = vdev_id; + cmd->wmm_param_type = mu_edca_param; + + for (ac = 0; ac < WMI_MAX_NUM_AC; ac++) { + wmm_param = (wmi_wmm_vparams *) (&cmd->wmm_params[ac]); + twmm_param = (struct wmi_host_wme_vparams *) (&wmm_vparams[ac]); + WMITLV_SET_HDR(&wmm_param->tlv_header, + WMITLV_TAG_STRUC_wmi_vdev_set_wmm_params_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN(wmi_wmm_vparams)); + wmm_param->cwmin = twmm_param->cwmin; + wmm_param->cwmax = twmm_param->cwmax; + wmm_param->aifs = twmm_param->aifs; + if (mu_edca_param) + wmm_param->mu_edca_timer = twmm_param->mu_edca_timer; + else + wmm_param->txoplimit = twmm_param->txoplimit; + wmm_param->acm = twmm_param->acm; + wmm_param->no_ack = twmm_param->noackpolicy; + } + + wmi_mtrace(WMI_VDEV_SET_WMM_PARAMS_CMDID, cmd->vdev_id, 0); + if (wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_VDEV_SET_WMM_PARAMS_CMDID)) + goto fail; + + return QDF_STATUS_SUCCESS; + +fail: + wmi_buf_free(buf); + WMI_LOGE("%s: Failed to set WMM Paremeters", __func__); + return QDF_STATUS_E_FAILURE; +} + +/** + * send_probe_rsp_tmpl_send_cmd_tlv() - send probe response template to fw + * @wmi_handle: wmi handle + * @vdev_id: vdev id + * @probe_rsp_info: probe response info + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS send_probe_rsp_tmpl_send_cmd_tlv(wmi_unified_t wmi_handle, + uint8_t vdev_id, + struct wmi_probe_resp_params *probe_rsp_info) +{ + wmi_prb_tmpl_cmd_fixed_param *cmd; + wmi_bcn_prb_info *bcn_prb_info; + wmi_buf_t wmi_buf; + uint32_t tmpl_len, tmpl_len_aligned, wmi_buf_len; + uint8_t *buf_ptr; + QDF_STATUS ret; + + WMI_LOGD(FL("Send probe response template for vdev %d"), vdev_id); + + tmpl_len = probe_rsp_info->prb_rsp_template_len; + tmpl_len_aligned = roundup(tmpl_len, sizeof(uint32_t)); + + wmi_buf_len = sizeof(wmi_prb_tmpl_cmd_fixed_param) + + sizeof(wmi_bcn_prb_info) + WMI_TLV_HDR_SIZE + + tmpl_len_aligned; + + if (wmi_buf_len > WMI_BEACON_TX_BUFFER_SIZE) { + WMI_LOGE(FL("wmi_buf_len: %d > %d. Can't send wmi cmd"), + wmi_buf_len, WMI_BEACON_TX_BUFFER_SIZE); + return QDF_STATUS_E_INVAL; + } + + wmi_buf = wmi_buf_alloc(wmi_handle, wmi_buf_len); + if (!wmi_buf) { + WMI_LOGE(FL("wmi_buf_alloc failed")); + return QDF_STATUS_E_NOMEM; + } + + buf_ptr = (uint8_t *) wmi_buf_data(wmi_buf); + + cmd = (wmi_prb_tmpl_cmd_fixed_param *) buf_ptr; + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_prb_tmpl_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN(wmi_prb_tmpl_cmd_fixed_param)); + cmd->vdev_id = vdev_id; + cmd->buf_len = tmpl_len; + buf_ptr += sizeof(wmi_prb_tmpl_cmd_fixed_param); + + bcn_prb_info = (wmi_bcn_prb_info *) buf_ptr; + WMITLV_SET_HDR(&bcn_prb_info->tlv_header, + WMITLV_TAG_STRUC_wmi_bcn_prb_info, + WMITLV_GET_STRUCT_TLVLEN(wmi_bcn_prb_info)); + bcn_prb_info->caps = 0; + bcn_prb_info->erp = 0; + buf_ptr += sizeof(wmi_bcn_prb_info); + + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_BYTE, tmpl_len_aligned); + buf_ptr += WMI_TLV_HDR_SIZE; + qdf_mem_copy(buf_ptr, probe_rsp_info->prb_rsp_template_frm, tmpl_len); + + wmi_mtrace(WMI_PRB_TMPL_CMDID, cmd->vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, + wmi_buf, wmi_buf_len, WMI_PRB_TMPL_CMDID); + if (QDF_IS_STATUS_ERROR(ret)) { + WMI_LOGE(FL("Failed to send PRB RSP tmpl: %d"), ret); + wmi_buf_free(wmi_buf); + } + + return ret; +} + +#if defined(ATH_SUPPORT_WAPI) || defined(FEATURE_WLAN_WAPI) +#define WPI_IV_LEN 16 + +/** + * wmi_update_wpi_key_counter() - update WAPI tsc and rsc key counters + * + * @dest_tx: destination address of tsc key counter + * @src_tx: source address of tsc key counter + * @dest_rx: destination address of rsc key counter + * @src_rx: source address of rsc key counter + * + * This function copies WAPI tsc and rsc key counters in the wmi buffer. + * + * Return: None + * + */ +static void wmi_update_wpi_key_counter(uint8_t *dest_tx, uint8_t *src_tx, + uint8_t *dest_rx, uint8_t *src_rx) +{ + qdf_mem_copy(dest_tx, src_tx, WPI_IV_LEN); + qdf_mem_copy(dest_rx, src_rx, WPI_IV_LEN); +} +#else +static void wmi_update_wpi_key_counter(uint8_t *dest_tx, uint8_t *src_tx, + uint8_t *dest_rx, uint8_t *src_rx) +{ + return; +} +#endif + +/** + * send_setup_install_key_cmd_tlv() - set key parameters + * @wmi_handle: wmi handle + * @key_params: key parameters + * + * This function fills structure from information + * passed in key_params. + * + * Return: QDF_STATUS_SUCCESS - success + * QDF_STATUS_E_FAILURE - failure + * QDF_STATUS_E_NOMEM - not able to allocate buffer + */ +static QDF_STATUS send_setup_install_key_cmd_tlv(wmi_unified_t wmi_handle, + struct set_key_params *key_params) +{ + wmi_vdev_install_key_cmd_fixed_param *cmd; + wmi_buf_t buf; + uint8_t *buf_ptr; + uint32_t len; + uint8_t *key_data; + QDF_STATUS status; + + len = sizeof(*cmd) + roundup(key_params->key_len, sizeof(uint32_t)) + + WMI_TLV_HDR_SIZE; + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("Failed to allocate buffer to send set key cmd"); + return QDF_STATUS_E_NOMEM; + } + + buf_ptr = (uint8_t *) wmi_buf_data(buf); + cmd = (wmi_vdev_install_key_cmd_fixed_param *) buf_ptr; + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_vdev_install_key_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_vdev_install_key_cmd_fixed_param)); + cmd->vdev_id = key_params->vdev_id; + cmd->key_ix = key_params->key_idx; + + + WMI_CHAR_ARRAY_TO_MAC_ADDR(key_params->peer_mac, &cmd->peer_macaddr); + cmd->key_flags |= key_params->key_flags; + cmd->key_cipher = key_params->key_cipher; + if ((key_params->key_txmic_len) && + (key_params->key_rxmic_len)) { + cmd->key_txmic_len = key_params->key_txmic_len; + cmd->key_rxmic_len = key_params->key_rxmic_len; + } +#if defined(ATH_SUPPORT_WAPI) || defined(FEATURE_WLAN_WAPI) + wmi_update_wpi_key_counter(cmd->wpi_key_tsc_counter, + key_params->tx_iv, + cmd->wpi_key_rsc_counter, + key_params->rx_iv); +#endif + buf_ptr += sizeof(wmi_vdev_install_key_cmd_fixed_param); + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_BYTE, + roundup(key_params->key_len, sizeof(uint32_t))); + key_data = (uint8_t *) (buf_ptr + WMI_TLV_HDR_SIZE); + qdf_mem_copy((void *)key_data, + (const void *)key_params->key_data, key_params->key_len); + if (key_params->key_rsc_counter) + qdf_mem_copy(&cmd->key_rsc_counter, key_params->key_rsc_counter, + sizeof(wmi_key_seq_counter)); + cmd->key_len = key_params->key_len; + + wmi_mtrace(WMI_VDEV_INSTALL_KEY_CMDID, cmd->vdev_id, 0); + status = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_VDEV_INSTALL_KEY_CMDID); + if (QDF_IS_STATUS_ERROR(status)) { + qdf_mem_zero(wmi_buf_data(buf), len); + wmi_buf_free(buf); + } + return status; +} + +/** + * send_sar_limit_cmd_tlv() - send sar limit cmd to fw + * @wmi_handle: wmi handle + * @params: sar limit params + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS send_sar_limit_cmd_tlv(wmi_unified_t wmi_handle, + struct sar_limit_cmd_params *sar_limit_params) +{ + wmi_buf_t buf; + QDF_STATUS qdf_status; + wmi_sar_limits_cmd_fixed_param *cmd; + int i; + uint8_t *buf_ptr; + wmi_sar_limit_cmd_row *wmi_sar_rows_list; + struct sar_limit_cmd_row *sar_rows_list; + uint32_t len = sizeof(*cmd) + WMI_TLV_HDR_SIZE; + + len += sizeof(wmi_sar_limit_cmd_row) * sar_limit_params->num_limit_rows; + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("Failed to allocate memory"); + qdf_status = QDF_STATUS_E_NOMEM; + goto end; + } + + buf_ptr = (uint8_t *) wmi_buf_data(buf); + cmd = (wmi_sar_limits_cmd_fixed_param *) buf_ptr; + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_sar_limits_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_sar_limits_cmd_fixed_param)); + cmd->sar_enable = sar_limit_params->sar_enable; + cmd->commit_limits = sar_limit_params->commit_limits; + cmd->num_limit_rows = sar_limit_params->num_limit_rows; + + WMI_LOGD("no of sar rows = %d, len = %d", + sar_limit_params->num_limit_rows, len); + buf_ptr += sizeof(*cmd); + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, + sizeof(wmi_sar_limit_cmd_row) * + sar_limit_params->num_limit_rows); + if (cmd->num_limit_rows == 0) + goto send_sar_limits; + + wmi_sar_rows_list = (wmi_sar_limit_cmd_row *) + (buf_ptr + WMI_TLV_HDR_SIZE); + sar_rows_list = sar_limit_params->sar_limit_row_list; + + for (i = 0; i < sar_limit_params->num_limit_rows; i++) { + WMITLV_SET_HDR(&wmi_sar_rows_list->tlv_header, + WMITLV_TAG_STRUC_wmi_sar_limit_cmd_row, + WMITLV_GET_STRUCT_TLVLEN(wmi_sar_limit_cmd_row)); + wmi_sar_rows_list->band_id = sar_rows_list->band_id; + wmi_sar_rows_list->chain_id = sar_rows_list->chain_id; + wmi_sar_rows_list->mod_id = sar_rows_list->mod_id; + wmi_sar_rows_list->limit_value = sar_rows_list->limit_value; + wmi_sar_rows_list->validity_bitmap = + sar_rows_list->validity_bitmap; + WMI_LOGD("row %d, band_id = %d, chain_id = %d, mod_id = %d, limit_value = %d, validity_bitmap = %d", + i, wmi_sar_rows_list->band_id, + wmi_sar_rows_list->chain_id, + wmi_sar_rows_list->mod_id, + wmi_sar_rows_list->limit_value, + wmi_sar_rows_list->validity_bitmap); + sar_rows_list++; + wmi_sar_rows_list++; + } +send_sar_limits: + wmi_mtrace(WMI_SAR_LIMITS_CMDID, NO_SESSION, 0); + qdf_status = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_SAR_LIMITS_CMDID); + + if (QDF_IS_STATUS_ERROR(qdf_status)) { + WMI_LOGE("Failed to send WMI_SAR_LIMITS_CMDID"); + wmi_buf_free(buf); + } + +end: + return qdf_status; +} + +static QDF_STATUS get_sar_limit_cmd_tlv(wmi_unified_t wmi_handle) +{ + wmi_sar_get_limits_cmd_fixed_param *cmd; + wmi_buf_t wmi_buf; + uint32_t len; + QDF_STATUS status; + + WMI_LOGD(FL("Enter")); + + len = sizeof(*cmd); + wmi_buf = wmi_buf_alloc(wmi_handle, len); + if (!wmi_buf) { + WMI_LOGP(FL("failed to allocate memory for msg")); + return QDF_STATUS_E_NOMEM; + } + + cmd = (wmi_sar_get_limits_cmd_fixed_param *)wmi_buf_data(wmi_buf); + + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_sar_get_limits_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_sar_get_limits_cmd_fixed_param)); + + cmd->reserved = 0; + + wmi_mtrace(WMI_SAR_GET_LIMITS_CMDID, NO_SESSION, 0); + status = wmi_unified_cmd_send(wmi_handle, wmi_buf, len, + WMI_SAR_GET_LIMITS_CMDID); + if (QDF_IS_STATUS_ERROR(status)) { + WMI_LOGE(FL("Failed to send get SAR limit cmd: %d"), status); + wmi_buf_free(wmi_buf); + } + + WMI_LOGD(FL("Exit")); + + return status; +} + +/** + * wmi_sar2_result_string() - return string conversion of sar2 result + * @result: sar2 result value + * + * This utility function helps log string conversion of sar2 result. + * + * Return: string conversion of sar 2 result, if match found; + * "Unknown response" otherwise. + */ +#ifdef WLAN_DEBUG +static const char *wmi_sar2_result_string(uint32_t result) +{ + switch (result) { + CASE_RETURN_STRING(WMI_SAR2_SUCCESS); + CASE_RETURN_STRING(WMI_SAR2_INVALID_ANTENNA_INDEX); + CASE_RETURN_STRING(WMI_SAR2_INVALID_TABLE_INDEX); + CASE_RETURN_STRING(WMI_SAR2_STATE_ERROR); + CASE_RETURN_STRING(WMI_SAR2_BDF_NO_TABLE); + default: + return "Unknown response"; + } +} +#endif + +/** + * extract_sar2_result_event_tlv() - process sar response event from FW. + * @handle: wma handle + * @event: event buffer + * @len: buffer length + * + * Return: 0 for success or error code + */ +static QDF_STATUS extract_sar2_result_event_tlv(void *handle, + uint8_t *event, + uint32_t len) +{ + wmi_sar2_result_event_fixed_param *sar2_fixed_param; + + WMI_SAR2_RESULT_EVENTID_param_tlvs *param_buf = + (WMI_SAR2_RESULT_EVENTID_param_tlvs *) event; + + if (!param_buf) { + WMI_LOGI("Invalid sar2 result event buffer"); + return QDF_STATUS_E_INVAL;; + } + + sar2_fixed_param = param_buf->fixed_param; + if (!sar2_fixed_param) { + WMI_LOGI("Invalid sar2 result event fixed param buffer"); + return QDF_STATUS_E_INVAL;; + } + + WMI_LOGI("SAR2 result: %s", + wmi_sar2_result_string(sar2_fixed_param->result)); + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS extract_sar_limit_event_tlv(wmi_unified_t wmi_handle, + uint8_t *evt_buf, + struct sar_limit_event *event) +{ + wmi_sar_get_limits_event_fixed_param *fixed_param; + WMI_SAR_GET_LIMITS_EVENTID_param_tlvs *param_buf; + wmi_sar_get_limit_event_row *row_in; + struct sar_limit_event_row *row_out; + uint32_t row; + + if (!evt_buf) { + WMI_LOGE(FL("input event is NULL")); + return QDF_STATUS_E_INVAL; + } + if (!event) { + WMI_LOGE(FL("output event is NULL")); + return QDF_STATUS_E_INVAL; + } + + param_buf = (WMI_SAR_GET_LIMITS_EVENTID_param_tlvs *)evt_buf; + + fixed_param = param_buf->fixed_param; + if (!fixed_param) { + WMI_LOGE(FL("Invalid fixed param")); + return QDF_STATUS_E_INVAL; + } + + event->sar_enable = fixed_param->sar_enable; + event->num_limit_rows = fixed_param->num_limit_rows; + + if (event->num_limit_rows > param_buf->num_sar_get_limits) { + WMI_LOGE(FL("Num rows %d exceeds sar_get_limits rows len %d"), + event->num_limit_rows, param_buf->num_sar_get_limits); + return QDF_STATUS_E_INVAL; + } + + if (event->num_limit_rows > MAX_SAR_LIMIT_ROWS_SUPPORTED) { + QDF_ASSERT(0); + WMI_LOGE(FL("Num rows %d exceeds max of %d"), + event->num_limit_rows, + MAX_SAR_LIMIT_ROWS_SUPPORTED); + event->num_limit_rows = MAX_SAR_LIMIT_ROWS_SUPPORTED; + } + + row_in = param_buf->sar_get_limits; + if (!row_in) { + WMI_LOGD("sar_get_limits is NULL"); + } else { + row_out = &event->sar_limit_row[0]; + for (row = 0; row < event->num_limit_rows; row++) { + row_out->band_id = row_in->band_id; + row_out->chain_id = row_in->chain_id; + row_out->mod_id = row_in->mod_id; + row_out->limit_value = row_in->limit_value; + row_out++; + row_in++; + } + } + + return QDF_STATUS_SUCCESS; +} + +#ifdef WLAN_FEATURE_DISA +/** + * send_encrypt_decrypt_send_cmd() - send encrypt/decrypt cmd to fw + * @wmi_handle: wmi handle + * @params: encrypt/decrypt params + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static +QDF_STATUS send_encrypt_decrypt_send_cmd_tlv(wmi_unified_t wmi_handle, + struct disa_encrypt_decrypt_req_params *encrypt_decrypt_params) +{ + wmi_vdev_encrypt_decrypt_data_req_cmd_fixed_param *cmd; + wmi_buf_t wmi_buf; + uint8_t *buf_ptr; + QDF_STATUS ret; + uint32_t len; + + WMI_LOGD(FL("Send encrypt decrypt cmd")); + + len = sizeof(*cmd) + + roundup(encrypt_decrypt_params->data_len, sizeof(uint32_t)) + + WMI_TLV_HDR_SIZE; + wmi_buf = wmi_buf_alloc(wmi_handle, len); + if (!wmi_buf) { + WMI_LOGP("%s: failed to allocate memory for encrypt/decrypt msg", + __func__); + return QDF_STATUS_E_NOMEM; + } + + buf_ptr = wmi_buf_data(wmi_buf); + cmd = (wmi_vdev_encrypt_decrypt_data_req_cmd_fixed_param *)buf_ptr; + + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_vdev_encrypt_decrypt_data_req_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_vdev_encrypt_decrypt_data_req_cmd_fixed_param)); + + cmd->vdev_id = encrypt_decrypt_params->vdev_id; + cmd->key_flag = encrypt_decrypt_params->key_flag; + cmd->key_idx = encrypt_decrypt_params->key_idx; + cmd->key_cipher = encrypt_decrypt_params->key_cipher; + cmd->key_len = encrypt_decrypt_params->key_len; + cmd->key_txmic_len = encrypt_decrypt_params->key_txmic_len; + cmd->key_rxmic_len = encrypt_decrypt_params->key_rxmic_len; + + qdf_mem_copy(cmd->key_data, encrypt_decrypt_params->key_data, + encrypt_decrypt_params->key_len); + + qdf_mem_copy(cmd->mac_hdr, encrypt_decrypt_params->mac_header, + MAX_MAC_HEADER_LEN); + + cmd->data_len = encrypt_decrypt_params->data_len; + + if (cmd->data_len) { + buf_ptr += sizeof(*cmd); + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_BYTE, + roundup(encrypt_decrypt_params->data_len, + sizeof(uint32_t))); + buf_ptr += WMI_TLV_HDR_SIZE; + qdf_mem_copy(buf_ptr, encrypt_decrypt_params->data, + encrypt_decrypt_params->data_len); + } + + /* This conversion is to facilitate data to FW in little endian */ + cmd->pn[5] = encrypt_decrypt_params->pn[0]; + cmd->pn[4] = encrypt_decrypt_params->pn[1]; + cmd->pn[3] = encrypt_decrypt_params->pn[2]; + cmd->pn[2] = encrypt_decrypt_params->pn[3]; + cmd->pn[1] = encrypt_decrypt_params->pn[4]; + cmd->pn[0] = encrypt_decrypt_params->pn[5]; + + wmi_mtrace(WMI_VDEV_ENCRYPT_DECRYPT_DATA_REQ_CMDID, cmd->vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, + wmi_buf, len, + WMI_VDEV_ENCRYPT_DECRYPT_DATA_REQ_CMDID); + if (QDF_IS_STATUS_ERROR(ret)) { + WMI_LOGE("Failed to send ENCRYPT DECRYPT cmd: %d", ret); + wmi_buf_free(wmi_buf); + } + + return ret; +} + +/** + * extract_encrypt_decrypt_resp_event_tlv() - extract encrypt decrypt resp + * params from event + * @wmi_handle: wmi handle + * @evt_buf: pointer to event buffer + * @resp: Pointer to hold resp parameters + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static +QDF_STATUS extract_encrypt_decrypt_resp_event_tlv(wmi_unified_t wmi_handle, + void *evt_buf, struct disa_encrypt_decrypt_resp_params *resp) +{ + WMI_VDEV_ENCRYPT_DECRYPT_DATA_RESP_EVENTID_param_tlvs *param_buf; + wmi_vdev_encrypt_decrypt_data_resp_event_fixed_param *data_event; + + param_buf = evt_buf; + if (!param_buf) { + WMI_LOGE("encrypt decrypt resp evt_buf is NULL"); + return QDF_STATUS_E_INVAL; + } + + data_event = param_buf->fixed_param; + + resp->vdev_id = data_event->vdev_id; + resp->status = data_event->status; + + if ((data_event->data_length > param_buf->num_enc80211_frame) || + (data_event->data_length > WMI_SVC_MSG_MAX_SIZE - WMI_TLV_HDR_SIZE - + sizeof(*data_event))) { + WMI_LOGE("FW msg data_len %d more than TLV hdr %d", + data_event->data_length, + param_buf->num_enc80211_frame); + return QDF_STATUS_E_INVAL; + } + + resp->data_len = data_event->data_length; + + if (resp->data_len) + resp->data = (uint8_t *)param_buf->enc80211_frame; + + return QDF_STATUS_SUCCESS; +} +#endif + +/** + * send_p2p_go_set_beacon_ie_cmd_tlv() - set beacon IE for p2p go + * @wmi_handle: wmi handle + * @vdev_id: vdev id + * @p2p_ie: p2p IE + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS send_p2p_go_set_beacon_ie_cmd_tlv(wmi_unified_t wmi_handle, + uint32_t vdev_id, uint8_t *p2p_ie) +{ + QDF_STATUS ret; + wmi_p2p_go_set_beacon_ie_fixed_param *cmd; + wmi_buf_t wmi_buf; + uint32_t ie_len, ie_len_aligned, wmi_buf_len; + uint8_t *buf_ptr; + + ie_len = (uint32_t) (p2p_ie[1] + 2); + + /* More than one P2P IE may be included in a single frame. + If multiple P2P IEs are present, the complete P2P attribute + data consists of the concatenation of the P2P Attribute + fields of the P2P IEs. The P2P Attributes field of each + P2P IE may be any length up to the maximum (251 octets). + In this case host sends one P2P IE to firmware so the length + should not exceed more than 251 bytes + */ + if (ie_len > 251) { + WMI_LOGE("%s : invalid p2p ie length %u", __func__, ie_len); + return QDF_STATUS_E_INVAL; + } + + ie_len_aligned = roundup(ie_len, sizeof(uint32_t)); + + wmi_buf_len = + sizeof(wmi_p2p_go_set_beacon_ie_fixed_param) + ie_len_aligned + + WMI_TLV_HDR_SIZE; + + wmi_buf = wmi_buf_alloc(wmi_handle, wmi_buf_len); + if (!wmi_buf) { + WMI_LOGE("%s : wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_NOMEM; + } + + buf_ptr = (uint8_t *) wmi_buf_data(wmi_buf); + + cmd = (wmi_p2p_go_set_beacon_ie_fixed_param *) buf_ptr; + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_p2p_go_set_beacon_ie_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_p2p_go_set_beacon_ie_fixed_param)); + cmd->vdev_id = vdev_id; + cmd->ie_buf_len = ie_len; + + buf_ptr += sizeof(wmi_p2p_go_set_beacon_ie_fixed_param); + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_BYTE, ie_len_aligned); + buf_ptr += WMI_TLV_HDR_SIZE; + qdf_mem_copy(buf_ptr, p2p_ie, ie_len); + + WMI_LOGD("%s: Sending WMI_P2P_GO_SET_BEACON_IE", __func__); + + wmi_mtrace(WMI_P2P_GO_SET_BEACON_IE, cmd->vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, + wmi_buf, wmi_buf_len, + WMI_P2P_GO_SET_BEACON_IE); + if (QDF_IS_STATUS_ERROR(ret)) { + WMI_LOGE("Failed to send bcn tmpl: %d", ret); + wmi_buf_free(wmi_buf); + } + + WMI_LOGD("%s: Successfully sent WMI_P2P_GO_SET_BEACON_IE", __func__); + return ret; +} + +/** + * send_set_gateway_params_cmd_tlv() - set gateway parameters + * @wmi_handle: wmi handle + * @req: gateway parameter update request structure + * + * This function reads the incoming @req and fill in the destination + * WMI structure and sends down the gateway configs down to the firmware + * + * Return: QDF_STATUS + */ +static QDF_STATUS send_set_gateway_params_cmd_tlv(wmi_unified_t wmi_handle, + struct gateway_update_req_param *req) +{ + wmi_roam_subnet_change_config_fixed_param *cmd; + wmi_buf_t buf; + QDF_STATUS ret; + int len = sizeof(*cmd); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGP("%s: wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_NOMEM; + } + + cmd = (wmi_roam_subnet_change_config_fixed_param *) wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_roam_subnet_change_config_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_roam_subnet_change_config_fixed_param)); + + cmd->vdev_id = req->session_id; + qdf_mem_copy(&cmd->inet_gw_ip_v4_addr, req->ipv4_addr, + QDF_IPV4_ADDR_SIZE); + qdf_mem_copy(&cmd->inet_gw_ip_v6_addr, req->ipv6_addr, + QDF_IPV6_ADDR_SIZE); + WMI_CHAR_ARRAY_TO_MAC_ADDR(req->gw_mac_addr.bytes, + &cmd->inet_gw_mac_addr); + cmd->max_retries = req->max_retries; + cmd->timeout = req->timeout; + cmd->num_skip_subnet_change_detection_bssid_list = 0; + cmd->flag = 0; + if (req->ipv4_addr_type) + WMI_SET_ROAM_SUBNET_CHANGE_FLAG_IP4_ENABLED(cmd->flag); + + if (req->ipv6_addr_type) + WMI_SET_ROAM_SUBNET_CHANGE_FLAG_IP6_ENABLED(cmd->flag); + + wmi_mtrace(WMI_ROAM_SUBNET_CHANGE_CONFIG_CMDID, cmd->vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_ROAM_SUBNET_CHANGE_CONFIG_CMDID); + if (QDF_IS_STATUS_ERROR(ret)) { + WMI_LOGE("Failed to send gw config parameter to fw, ret: %d", + ret); + wmi_buf_free(buf); + } + + return ret; +} + +/** + * send_set_rssi_monitoring_cmd_tlv() - set rssi monitoring + * @wmi_handle: wmi handle + * @req: rssi monitoring request structure + * + * This function reads the incoming @req and fill in the destination + * WMI structure and send down the rssi monitoring configs down to the firmware + * + * Return: 0 on success; error number otherwise + */ +static QDF_STATUS send_set_rssi_monitoring_cmd_tlv(wmi_unified_t wmi_handle, + struct rssi_monitor_param *req) +{ + wmi_rssi_breach_monitor_config_fixed_param *cmd; + wmi_buf_t buf; + QDF_STATUS ret; + uint32_t len = sizeof(*cmd); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGP("%s: wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_NOMEM; + } + + cmd = (wmi_rssi_breach_monitor_config_fixed_param *) wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_rssi_breach_monitor_config_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_rssi_breach_monitor_config_fixed_param)); + + cmd->vdev_id = req->session_id; + cmd->request_id = req->request_id; + cmd->lo_rssi_reenable_hysteresis = 0; + cmd->hi_rssi_reenable_histeresis = 0; + cmd->min_report_interval = 0; + cmd->max_num_report = 1; + if (req->control) { + /* enable one threshold for each min/max */ + cmd->enabled_bitmap = 0x09; + cmd->low_rssi_breach_threshold[0] = req->min_rssi; + cmd->hi_rssi_breach_threshold[0] = req->max_rssi; + } else { + cmd->enabled_bitmap = 0; + cmd->low_rssi_breach_threshold[0] = 0; + cmd->hi_rssi_breach_threshold[0] = 0; + } + + wmi_mtrace(WMI_RSSI_BREACH_MONITOR_CONFIG_CMDID, cmd->vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_RSSI_BREACH_MONITOR_CONFIG_CMDID); + if (QDF_IS_STATUS_ERROR(ret)) { + WMI_LOGE("Failed to send WMI_RSSI_BREACH_MONITOR_CONFIG_CMDID"); + wmi_buf_free(buf); + } + + WMI_LOGD("Sent WMI_RSSI_BREACH_MONITOR_CONFIG_CMDID to FW"); + + return ret; +} + +/** + * send_scan_probe_setoui_cmd_tlv() - set scan probe OUI + * @wmi_handle: wmi handle + * @psetoui: OUI parameters + * + * set scan probe OUI parameters in firmware + * + * Return: CDF status + */ +static QDF_STATUS send_scan_probe_setoui_cmd_tlv(wmi_unified_t wmi_handle, + struct scan_mac_oui *psetoui) +{ + wmi_scan_prob_req_oui_cmd_fixed_param *cmd; + wmi_buf_t wmi_buf; + uint32_t len; + uint8_t *buf_ptr; + uint32_t *oui_buf; + struct probe_req_whitelist_attr *ie_whitelist = &psetoui->ie_whitelist; + + len = sizeof(*cmd) + WMI_TLV_HDR_SIZE + + ie_whitelist->num_vendor_oui * sizeof(wmi_vendor_oui); + + wmi_buf = wmi_buf_alloc(wmi_handle, len); + if (!wmi_buf) { + WMI_LOGE("%s: wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_NOMEM; + } + buf_ptr = (uint8_t *) wmi_buf_data(wmi_buf); + cmd = (wmi_scan_prob_req_oui_cmd_fixed_param *) buf_ptr; + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_scan_prob_req_oui_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_scan_prob_req_oui_cmd_fixed_param)); + + oui_buf = &cmd->prob_req_oui; + qdf_mem_zero(oui_buf, sizeof(cmd->prob_req_oui)); + *oui_buf = psetoui->oui[0] << 16 | psetoui->oui[1] << 8 + | psetoui->oui[2]; + WMI_LOGD("%s: wmi:oui received from hdd %08x", __func__, + cmd->prob_req_oui); + + cmd->vdev_id = psetoui->vdev_id; + cmd->flags = WMI_SCAN_PROBE_OUI_SPOOFED_MAC_IN_PROBE_REQ; + if (psetoui->enb_probe_req_sno_randomization) + cmd->flags |= WMI_SCAN_PROBE_OUI_RANDOM_SEQ_NO_IN_PROBE_REQ; + + if (ie_whitelist->white_list) { + wmi_fill_ie_whitelist_attrs(cmd->ie_bitmap, + &cmd->num_vendor_oui, + ie_whitelist); + cmd->flags |= + WMI_SCAN_PROBE_OUI_ENABLE_IE_WHITELIST_IN_PROBE_REQ; + } + + buf_ptr += sizeof(*cmd); + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, + ie_whitelist->num_vendor_oui * sizeof(wmi_vendor_oui)); + buf_ptr += WMI_TLV_HDR_SIZE; + + if (cmd->num_vendor_oui != 0) { + wmi_fill_vendor_oui(buf_ptr, cmd->num_vendor_oui, + ie_whitelist->voui); + buf_ptr += cmd->num_vendor_oui * sizeof(wmi_vendor_oui); + } + + wmi_mtrace(WMI_SCAN_PROB_REQ_OUI_CMDID, cmd->vdev_id, 0); + if (wmi_unified_cmd_send(wmi_handle, wmi_buf, len, + WMI_SCAN_PROB_REQ_OUI_CMDID)) { + WMI_LOGE("%s: failed to send command", __func__); + wmi_buf_free(wmi_buf); + return QDF_STATUS_E_FAILURE; + } + return QDF_STATUS_SUCCESS; +} + +#if defined(WLAN_FEATURE_FILS_SK) && defined(WLAN_FEATURE_ROAM_OFFLOAD) +/** + * wmi_add_fils_tlv() - Add FILS TLV to roam scan offload command + * @wmi_handle: wmi handle + * @roam_req: Roam scan offload params + * @buf_ptr: command buffer to send + * @fils_tlv_len: fils tlv length + * + * Return: Updated buffer pointer + */ +static uint8_t *wmi_add_fils_tlv(wmi_unified_t wmi_handle, + struct roam_offload_scan_params *roam_req, + uint8_t *buf_ptr, uint32_t fils_tlv_len) +{ + wmi_roam_fils_offload_tlv_param *fils_tlv; + wmi_erp_info *erp_info; + struct roam_fils_params *roam_fils_params; + + if (!roam_req->add_fils_tlv) + return buf_ptr; + + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, + sizeof(*fils_tlv)); + buf_ptr += WMI_TLV_HDR_SIZE; + + fils_tlv = (wmi_roam_fils_offload_tlv_param *)buf_ptr; + WMITLV_SET_HDR(&fils_tlv->tlv_header, + WMITLV_TAG_STRUC_wmi_roam_fils_offload_tlv_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_roam_fils_offload_tlv_param)); + + roam_fils_params = &roam_req->roam_fils_params; + erp_info = (wmi_erp_info *)(&fils_tlv->vdev_erp_info); + + erp_info->username_length = roam_fils_params->username_length; + qdf_mem_copy(erp_info->username, roam_fils_params->username, + erp_info->username_length); + + erp_info->next_erp_seq_num = roam_fils_params->next_erp_seq_num; + + erp_info->rRk_length = roam_fils_params->rrk_length; + qdf_mem_copy(erp_info->rRk, roam_fils_params->rrk, + erp_info->rRk_length); + + erp_info->rIk_length = roam_fils_params->rik_length; + qdf_mem_copy(erp_info->rIk, roam_fils_params->rik, + erp_info->rIk_length); + + erp_info->realm_len = roam_fils_params->realm_len; + qdf_mem_copy(erp_info->realm, roam_fils_params->realm, + erp_info->realm_len); + + buf_ptr += sizeof(*fils_tlv); + return buf_ptr; +} +#else +static inline uint8_t *wmi_add_fils_tlv(wmi_unified_t wmi_handle, + struct roam_offload_scan_params *roam_req, + uint8_t *buf_ptr, uint32_t fils_tlv_len) +{ + return buf_ptr; +} +#endif +/** + * send_roam_scan_offload_mode_cmd_tlv() - send roam scan mode request to fw + * @wmi_handle: wmi handle + * @scan_cmd_fp: start scan command ptr + * @roam_req: roam request param + * + * send WMI_ROAM_SCAN_MODE TLV to firmware. It has a piggyback + * of WMI_ROAM_SCAN_MODE. + * + * Return: QDF status + */ +static QDF_STATUS send_roam_scan_offload_mode_cmd_tlv(wmi_unified_t wmi_handle, + wmi_start_scan_cmd_fixed_param * + scan_cmd_fp, + struct roam_offload_scan_params *roam_req) +{ + wmi_buf_t buf = NULL; + QDF_STATUS status; + int len; + uint8_t *buf_ptr; + wmi_roam_scan_mode_fixed_param *roam_scan_mode_fp; + +#ifdef WLAN_FEATURE_ROAM_OFFLOAD + int auth_mode = roam_req->auth_mode; + roam_offload_param *req_offload_params = + &roam_req->roam_offload_params; + wmi_roam_offload_tlv_param *roam_offload_params; + wmi_roam_11i_offload_tlv_param *roam_offload_11i; + wmi_roam_11r_offload_tlv_param *roam_offload_11r; + wmi_roam_ese_offload_tlv_param *roam_offload_ese; + wmi_tlv_buf_len_param *assoc_ies; + uint32_t fils_tlv_len = 0; +#endif /* WLAN_FEATURE_ROAM_OFFLOAD */ + /* Need to create a buf with roam_scan command at + * front and piggyback with scan command */ + len = sizeof(wmi_roam_scan_mode_fixed_param) + +#ifdef WLAN_FEATURE_ROAM_OFFLOAD + (2 * WMI_TLV_HDR_SIZE) + +#endif /* WLAN_FEATURE_ROAM_OFFLOAD */ + sizeof(wmi_start_scan_cmd_fixed_param); +#ifdef WLAN_FEATURE_ROAM_OFFLOAD + WMI_LOGD("auth_mode = %d", auth_mode); + if (roam_req->is_roam_req_valid && + roam_req->roam_offload_enabled) { + len += sizeof(wmi_roam_offload_tlv_param); + len += WMI_TLV_HDR_SIZE; + if ((auth_mode != WMI_AUTH_NONE) && + ((auth_mode != WMI_AUTH_OPEN) || + (auth_mode == WMI_AUTH_OPEN && + roam_req->mdid.mdie_present && + roam_req->is_11r_assoc) || + roam_req->is_ese_assoc)) { + len += WMI_TLV_HDR_SIZE; + if (roam_req->is_ese_assoc) + len += + sizeof(wmi_roam_ese_offload_tlv_param); + else if (auth_mode == WMI_AUTH_FT_RSNA || + auth_mode == WMI_AUTH_FT_RSNA_PSK || + (auth_mode == WMI_AUTH_OPEN && + roam_req->mdid.mdie_present && + roam_req->is_11r_assoc)) + len += + sizeof(wmi_roam_11r_offload_tlv_param); + else + len += + sizeof(wmi_roam_11i_offload_tlv_param); + } else { + len += WMI_TLV_HDR_SIZE; + } + + len += (sizeof(*assoc_ies) + (2*WMI_TLV_HDR_SIZE) + + roundup(roam_req->assoc_ie_length, + sizeof(uint32_t))); + + if (roam_req->add_fils_tlv) { + fils_tlv_len = sizeof( + wmi_roam_fils_offload_tlv_param); + len += WMI_TLV_HDR_SIZE + fils_tlv_len; + } + } else { + if (roam_req->is_roam_req_valid) + WMI_LOGD("%s : roam offload = %d", + __func__, roam_req->roam_offload_enabled); + else + WMI_LOGD("%s : roam_req is NULL", __func__); + len += (4 * WMI_TLV_HDR_SIZE); + } + if (roam_req->is_roam_req_valid && + roam_req->roam_offload_enabled) { + roam_req->mode = roam_req->mode | + WMI_ROAM_SCAN_MODE_ROAMOFFLOAD; + } +#endif /* WLAN_FEATURE_ROAM_OFFLOAD */ + + if (roam_req->mode == (WMI_ROAM_SCAN_MODE_NONE + |WMI_ROAM_SCAN_MODE_ROAMOFFLOAD)) + len = sizeof(wmi_roam_scan_mode_fixed_param); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s : wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_NOMEM; + } + + buf_ptr = (uint8_t *) wmi_buf_data(buf); + roam_scan_mode_fp = (wmi_roam_scan_mode_fixed_param *) buf_ptr; + WMITLV_SET_HDR(&roam_scan_mode_fp->tlv_header, + WMITLV_TAG_STRUC_wmi_roam_scan_mode_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_roam_scan_mode_fixed_param)); + + roam_scan_mode_fp->min_delay_roam_trigger_reason_bitmask = + roam_req->roam_trigger_reason_bitmask; + roam_scan_mode_fp->min_delay_btw_scans = + WMI_SEC_TO_MSEC(roam_req->min_delay_btw_roam_scans); + roam_scan_mode_fp->roam_scan_mode = roam_req->mode; + roam_scan_mode_fp->vdev_id = roam_req->vdev_id; + if (roam_req->mode == (WMI_ROAM_SCAN_MODE_NONE | + WMI_ROAM_SCAN_MODE_ROAMOFFLOAD)) { + roam_scan_mode_fp->flags |= + WMI_ROAM_SCAN_MODE_FLAG_REPORT_STATUS; + goto send_roam_scan_mode_cmd; + } + + /* Fill in scan parameters suitable for roaming scan */ + buf_ptr += sizeof(wmi_roam_scan_mode_fixed_param); + + qdf_mem_copy(buf_ptr, scan_cmd_fp, + sizeof(wmi_start_scan_cmd_fixed_param)); + /* Ensure there is no additional IEs */ + scan_cmd_fp->ie_len = 0; + WMITLV_SET_HDR(buf_ptr, + WMITLV_TAG_STRUC_wmi_start_scan_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_start_scan_cmd_fixed_param)); +#ifdef WLAN_FEATURE_ROAM_OFFLOAD + buf_ptr += sizeof(wmi_start_scan_cmd_fixed_param); + if (roam_req->is_roam_req_valid && roam_req->roam_offload_enabled) { + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, + sizeof(wmi_roam_offload_tlv_param)); + buf_ptr += WMI_TLV_HDR_SIZE; + roam_offload_params = (wmi_roam_offload_tlv_param *) buf_ptr; + WMITLV_SET_HDR(buf_ptr, + WMITLV_TAG_STRUC_wmi_roam_offload_tlv_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_roam_offload_tlv_param)); + roam_offload_params->prefer_5g = roam_req->prefer_5ghz; + roam_offload_params->rssi_cat_gap = roam_req->roam_rssi_cat_gap; + roam_offload_params->select_5g_margin = + roam_req->select_5ghz_margin; + roam_offload_params->handoff_delay_for_rx = + req_offload_params->ho_delay_for_rx; + roam_offload_params->max_mlme_sw_retries = + req_offload_params->roam_preauth_retry_count; + roam_offload_params->no_ack_timeout = + req_offload_params->roam_preauth_no_ack_timeout; + roam_offload_params->reassoc_failure_timeout = + roam_req->reassoc_failure_timeout; + roam_offload_params->roam_candidate_validity_time = + roam_req->rct_validity_timer; + + /* Fill the capabilities */ + roam_offload_params->capability = + req_offload_params->capability; + roam_offload_params->ht_caps_info = + req_offload_params->ht_caps_info; + roam_offload_params->ampdu_param = + req_offload_params->ampdu_param; + roam_offload_params->ht_ext_cap = + req_offload_params->ht_ext_cap; + roam_offload_params->ht_txbf = req_offload_params->ht_txbf; + roam_offload_params->asel_cap = req_offload_params->asel_cap; + roam_offload_params->qos_caps = req_offload_params->qos_caps; + roam_offload_params->qos_enabled = + req_offload_params->qos_enabled; + roam_offload_params->wmm_caps = req_offload_params->wmm_caps; + qdf_mem_copy((uint8_t *)roam_offload_params->mcsset, + (uint8_t *)req_offload_params->mcsset, + ROAM_OFFLOAD_NUM_MCS_SET); + + buf_ptr += sizeof(wmi_roam_offload_tlv_param); + /* The TLV's are in the order of 11i, 11R, ESE. Hence, + * they are filled in the same order.Depending on the + * authentication type, the other mode TLV's are nullified + * and only headers are filled.*/ + if ((auth_mode != WMI_AUTH_NONE) && + ((auth_mode != WMI_AUTH_OPEN) || + (auth_mode == WMI_AUTH_OPEN + && roam_req->mdid.mdie_present && + roam_req->is_11r_assoc) || + roam_req->is_ese_assoc)) { + if (roam_req->is_ese_assoc) { + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, + WMITLV_GET_STRUCT_TLVLEN(0)); + buf_ptr += WMI_TLV_HDR_SIZE; + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, + WMITLV_GET_STRUCT_TLVLEN(0)); + buf_ptr += WMI_TLV_HDR_SIZE; + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, + sizeof(wmi_roam_ese_offload_tlv_param)); + buf_ptr += WMI_TLV_HDR_SIZE; + roam_offload_ese = + (wmi_roam_ese_offload_tlv_param *) buf_ptr; + qdf_mem_copy(roam_offload_ese->krk, + roam_req->krk, + sizeof(roam_req->krk)); + qdf_mem_copy(roam_offload_ese->btk, + roam_req->btk, + sizeof(roam_req->btk)); + WMITLV_SET_HDR(&roam_offload_ese->tlv_header, + WMITLV_TAG_STRUC_wmi_roam_ese_offload_tlv_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_roam_ese_offload_tlv_param)); + buf_ptr += + sizeof(wmi_roam_ese_offload_tlv_param); + } else if (auth_mode == WMI_AUTH_FT_RSNA + || auth_mode == WMI_AUTH_FT_RSNA_PSK + || (auth_mode == WMI_AUTH_OPEN + && roam_req->mdid.mdie_present && + roam_req->is_11r_assoc)) { + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, + 0); + buf_ptr += WMI_TLV_HDR_SIZE; + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, + sizeof(wmi_roam_11r_offload_tlv_param)); + buf_ptr += WMI_TLV_HDR_SIZE; + roam_offload_11r = + (wmi_roam_11r_offload_tlv_param *) buf_ptr; + roam_offload_11r->r0kh_id_len = + roam_req->rokh_id_length; + qdf_mem_copy(roam_offload_11r->r0kh_id, + roam_req->rokh_id, + roam_offload_11r->r0kh_id_len); + qdf_mem_copy(roam_offload_11r->psk_msk, + roam_req->psk_pmk, + sizeof(roam_req->psk_pmk)); + roam_offload_11r->psk_msk_len = + roam_req->pmk_len; + roam_offload_11r->mdie_present = + roam_req->mdid.mdie_present; + roam_offload_11r->mdid = + roam_req->mdid.mobility_domain; + if (auth_mode == WMI_AUTH_OPEN) { + /* If FT-Open ensure pmk length + and r0khid len are zero */ + roam_offload_11r->r0kh_id_len = 0; + roam_offload_11r->psk_msk_len = 0; + } + WMITLV_SET_HDR(&roam_offload_11r->tlv_header, + WMITLV_TAG_STRUC_wmi_roam_11r_offload_tlv_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_roam_11r_offload_tlv_param)); + buf_ptr += + sizeof(wmi_roam_11r_offload_tlv_param); + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, + WMITLV_GET_STRUCT_TLVLEN(0)); + buf_ptr += WMI_TLV_HDR_SIZE; + WMI_LOGD("psk_msk_len = %d", + roam_offload_11r->psk_msk_len); + if (roam_offload_11r->psk_msk_len) + QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_WMI, + QDF_TRACE_LEVEL_DEBUG, + roam_offload_11r->psk_msk, + roam_offload_11r->psk_msk_len); + } else { + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, + sizeof(wmi_roam_11i_offload_tlv_param)); + buf_ptr += WMI_TLV_HDR_SIZE; + roam_offload_11i = + (wmi_roam_11i_offload_tlv_param *) buf_ptr; + + if (roam_req->roam_key_mgmt_offload_enabled && + roam_req->fw_okc) { + WMI_SET_ROAM_OFFLOAD_OKC_ENABLED + (roam_offload_11i->flags); + WMI_LOGI("LFR3:OKC enabled"); + } else { + WMI_SET_ROAM_OFFLOAD_OKC_DISABLED + (roam_offload_11i->flags); + WMI_LOGI("LFR3:OKC disabled"); + } + if (roam_req->roam_key_mgmt_offload_enabled && + roam_req->fw_pmksa_cache) { + WMI_SET_ROAM_OFFLOAD_PMK_CACHE_ENABLED + (roam_offload_11i->flags); + WMI_LOGI("LFR3:PMKSA caching enabled"); + } else { + WMI_SET_ROAM_OFFLOAD_PMK_CACHE_DISABLED + (roam_offload_11i->flags); + WMI_LOGI("LFR3:PMKSA caching disabled"); + } + + qdf_mem_copy(roam_offload_11i->pmk, + roam_req->psk_pmk, + sizeof(roam_req->psk_pmk)); + roam_offload_11i->pmk_len = roam_req->pmk_len; + WMITLV_SET_HDR(&roam_offload_11i->tlv_header, + WMITLV_TAG_STRUC_wmi_roam_11i_offload_tlv_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_roam_11i_offload_tlv_param)); + buf_ptr += + sizeof(wmi_roam_11i_offload_tlv_param); + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, + 0); + buf_ptr += WMI_TLV_HDR_SIZE; + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, + 0); + buf_ptr += WMI_TLV_HDR_SIZE; + WMI_LOGD("pmk_len = %d", + roam_offload_11i->pmk_len); + if (roam_offload_11i->pmk_len) + QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_WMI, + QDF_TRACE_LEVEL_DEBUG, + roam_offload_11i->pmk, + roam_offload_11i->pmk_len); + } + } else { + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, + WMITLV_GET_STRUCT_TLVLEN(0)); + buf_ptr += WMI_TLV_HDR_SIZE; + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, + WMITLV_GET_STRUCT_TLVLEN(0)); + buf_ptr += WMI_TLV_HDR_SIZE; + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, + WMITLV_GET_STRUCT_TLVLEN(0)); + buf_ptr += WMI_TLV_HDR_SIZE; + } + + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, + sizeof(*assoc_ies)); + buf_ptr += WMI_TLV_HDR_SIZE; + + assoc_ies = (wmi_tlv_buf_len_param *) buf_ptr; + WMITLV_SET_HDR(&assoc_ies->tlv_header, + WMITLV_TAG_STRUC_wmi_tlv_buf_len_param, + WMITLV_GET_STRUCT_TLVLEN(wmi_tlv_buf_len_param)); + assoc_ies->buf_len = roam_req->assoc_ie_length; + + buf_ptr += sizeof(*assoc_ies); + + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_BYTE, + roundup(assoc_ies->buf_len, sizeof(uint32_t))); + buf_ptr += WMI_TLV_HDR_SIZE; + + if (assoc_ies->buf_len != 0) { + qdf_mem_copy(buf_ptr, roam_req->assoc_ie, + assoc_ies->buf_len); + } + buf_ptr += qdf_roundup(assoc_ies->buf_len, sizeof(uint32_t)); + buf_ptr = wmi_add_fils_tlv(wmi_handle, roam_req, + buf_ptr, fils_tlv_len); + } else { + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, + WMITLV_GET_STRUCT_TLVLEN(0)); + buf_ptr += WMI_TLV_HDR_SIZE; + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, + WMITLV_GET_STRUCT_TLVLEN(0)); + buf_ptr += WMI_TLV_HDR_SIZE; + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, + WMITLV_GET_STRUCT_TLVLEN(0)); + buf_ptr += WMI_TLV_HDR_SIZE; + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, + WMITLV_GET_STRUCT_TLVLEN(0)); + buf_ptr += WMI_TLV_HDR_SIZE; + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, + WMITLV_GET_STRUCT_TLVLEN(0)); + buf_ptr += WMI_TLV_HDR_SIZE; + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_BYTE, + WMITLV_GET_STRUCT_TLVLEN(0)); + } +#endif /* WLAN_FEATURE_ROAM_OFFLOAD */ + +send_roam_scan_mode_cmd: + wmi_mtrace(WMI_ROAM_SCAN_MODE, NO_SESSION, 0); + status = wmi_unified_cmd_send(wmi_handle, buf, + len, WMI_ROAM_SCAN_MODE); + if (QDF_IS_STATUS_ERROR(status)) { + WMI_LOGE( + "wmi_unified_cmd_send WMI_ROAM_SCAN_MODE returned Error %d", + status); + wmi_buf_free(buf); + } + + return status; +} + +static QDF_STATUS send_roam_mawc_params_cmd_tlv(wmi_unified_t wmi_handle, + struct wmi_mawc_roam_params *params) +{ + wmi_buf_t buf = NULL; + QDF_STATUS status; + int len; + uint8_t *buf_ptr; + wmi_roam_configure_mawc_cmd_fixed_param *wmi_roam_mawc_params; + + len = sizeof(*wmi_roam_mawc_params); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s : wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_NOMEM; + } + + buf_ptr = (uint8_t *) wmi_buf_data(buf); + wmi_roam_mawc_params = + (wmi_roam_configure_mawc_cmd_fixed_param *) buf_ptr; + WMITLV_SET_HDR(&wmi_roam_mawc_params->tlv_header, + WMITLV_TAG_STRUC_wmi_roam_configure_mawc_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_roam_configure_mawc_cmd_fixed_param)); + wmi_roam_mawc_params->vdev_id = params->vdev_id; + if (params->enable) + wmi_roam_mawc_params->enable = 1; + else + wmi_roam_mawc_params->enable = 0; + wmi_roam_mawc_params->traffic_load_threshold = + params->traffic_load_threshold; + wmi_roam_mawc_params->best_ap_rssi_threshold = + params->best_ap_rssi_threshold; + wmi_roam_mawc_params->rssi_stationary_high_adjust = + params->rssi_stationary_high_adjust; + wmi_roam_mawc_params->rssi_stationary_low_adjust = + params->rssi_stationary_low_adjust; + WMI_LOGD(FL("MAWC roam en=%d, vdev=%d, tr=%d, ap=%d, high=%d, low=%d"), + wmi_roam_mawc_params->enable, wmi_roam_mawc_params->vdev_id, + wmi_roam_mawc_params->traffic_load_threshold, + wmi_roam_mawc_params->best_ap_rssi_threshold, + wmi_roam_mawc_params->rssi_stationary_high_adjust, + wmi_roam_mawc_params->rssi_stationary_low_adjust); + + wmi_mtrace(WMI_ROAM_CONFIGURE_MAWC_CMDID, NO_SESSION, 0); + status = wmi_unified_cmd_send(wmi_handle, buf, + len, WMI_ROAM_CONFIGURE_MAWC_CMDID); + if (QDF_IS_STATUS_ERROR(status)) { + WMI_LOGE("WMI_ROAM_CONFIGURE_MAWC_CMDID failed, Error %d", + status); + wmi_buf_free(buf); + return status; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * send_roam_scan_offload_rssi_thresh_cmd_tlv() - set scan offload + * rssi threashold + * @wmi_handle: wmi handle + * @roam_req: Roaming request buffer + * + * Send WMI_ROAM_SCAN_RSSI_THRESHOLD TLV to firmware + * + * Return: QDF status + */ +static QDF_STATUS send_roam_scan_offload_rssi_thresh_cmd_tlv(wmi_unified_t wmi_handle, + struct roam_offload_scan_rssi_params *roam_req) +{ + wmi_buf_t buf = NULL; + QDF_STATUS status; + int len; + uint8_t *buf_ptr; + wmi_roam_scan_rssi_threshold_fixed_param *rssi_threshold_fp; + wmi_roam_scan_extended_threshold_param *ext_thresholds = NULL; + wmi_roam_earlystop_rssi_thres_param *early_stop_thresholds = NULL; + wmi_roam_dense_thres_param *dense_thresholds = NULL; + wmi_roam_bg_scan_roaming_param *bg_scan_params = NULL; + + len = sizeof(wmi_roam_scan_rssi_threshold_fixed_param); + len += WMI_TLV_HDR_SIZE; /* TLV for ext_thresholds*/ + len += sizeof(wmi_roam_scan_extended_threshold_param); + len += WMI_TLV_HDR_SIZE; + len += sizeof(wmi_roam_earlystop_rssi_thres_param); + len += WMI_TLV_HDR_SIZE; /* TLV for dense thresholds*/ + len += sizeof(wmi_roam_dense_thres_param); + len += WMI_TLV_HDR_SIZE; /* TLV for BG Scan*/ + len += sizeof(wmi_roam_bg_scan_roaming_param); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s : wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_NOMEM; + } + + buf_ptr = (uint8_t *) wmi_buf_data(buf); + rssi_threshold_fp = + (wmi_roam_scan_rssi_threshold_fixed_param *) buf_ptr; + WMITLV_SET_HDR(&rssi_threshold_fp->tlv_header, + WMITLV_TAG_STRUC_wmi_roam_scan_rssi_threshold_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_roam_scan_rssi_threshold_fixed_param)); + /* fill in threshold values */ + rssi_threshold_fp->vdev_id = roam_req->session_id; + rssi_threshold_fp->roam_scan_rssi_thresh = roam_req->rssi_thresh; + rssi_threshold_fp->roam_rssi_thresh_diff = roam_req->rssi_thresh_diff; + rssi_threshold_fp->hirssi_scan_max_count = + roam_req->hi_rssi_scan_max_count; + rssi_threshold_fp->hirssi_scan_delta = + roam_req->hi_rssi_scan_rssi_delta; + rssi_threshold_fp->hirssi_upper_bound = roam_req->hi_rssi_scan_rssi_ub; + rssi_threshold_fp->rssi_thresh_offset_5g = + roam_req->rssi_thresh_offset_5g; + + buf_ptr += sizeof(wmi_roam_scan_rssi_threshold_fixed_param); + WMITLV_SET_HDR(buf_ptr, + WMITLV_TAG_ARRAY_STRUC, + sizeof(wmi_roam_scan_extended_threshold_param)); + buf_ptr += WMI_TLV_HDR_SIZE; + ext_thresholds = (wmi_roam_scan_extended_threshold_param *) buf_ptr; + + ext_thresholds->penalty_threshold_5g = roam_req->penalty_threshold_5g; + if (roam_req->raise_rssi_thresh_5g >= WMI_NOISE_FLOOR_DBM_DEFAULT) + ext_thresholds->boost_threshold_5g = + roam_req->boost_threshold_5g; + + ext_thresholds->boost_algorithm_5g = + WMI_ROAM_5G_BOOST_PENALIZE_ALGO_LINEAR; + ext_thresholds->boost_factor_5g = roam_req->raise_factor_5g; + ext_thresholds->penalty_algorithm_5g = + WMI_ROAM_5G_BOOST_PENALIZE_ALGO_LINEAR; + ext_thresholds->penalty_factor_5g = roam_req->drop_factor_5g; + ext_thresholds->max_boost_5g = roam_req->max_raise_rssi_5g; + ext_thresholds->max_penalty_5g = roam_req->max_drop_rssi_5g; + ext_thresholds->good_rssi_threshold = roam_req->good_rssi_threshold; + + WMITLV_SET_HDR(&ext_thresholds->tlv_header, + WMITLV_TAG_STRUC_wmi_roam_scan_extended_threshold_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_roam_scan_extended_threshold_param)); + buf_ptr += sizeof(wmi_roam_scan_extended_threshold_param); + WMITLV_SET_HDR(buf_ptr, + WMITLV_TAG_ARRAY_STRUC, + sizeof(wmi_roam_earlystop_rssi_thres_param)); + buf_ptr += WMI_TLV_HDR_SIZE; + early_stop_thresholds = (wmi_roam_earlystop_rssi_thres_param *) buf_ptr; + early_stop_thresholds->roam_earlystop_thres_min = + roam_req->roam_earlystop_thres_min; + early_stop_thresholds->roam_earlystop_thres_max = + roam_req->roam_earlystop_thres_max; + WMITLV_SET_HDR(&early_stop_thresholds->tlv_header, + WMITLV_TAG_STRUC_wmi_roam_earlystop_rssi_thres_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_roam_earlystop_rssi_thres_param)); + + buf_ptr += sizeof(wmi_roam_earlystop_rssi_thres_param); + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, + sizeof(wmi_roam_dense_thres_param)); + buf_ptr += WMI_TLV_HDR_SIZE; + dense_thresholds = (wmi_roam_dense_thres_param *) buf_ptr; + dense_thresholds->roam_dense_rssi_thres_offset = + roam_req->dense_rssi_thresh_offset; + dense_thresholds->roam_dense_min_aps = roam_req->dense_min_aps_cnt; + dense_thresholds->roam_dense_traffic_thres = + roam_req->traffic_threshold; + dense_thresholds->roam_dense_status = roam_req->initial_dense_status; + WMITLV_SET_HDR(&dense_thresholds->tlv_header, + WMITLV_TAG_STRUC_wmi_roam_dense_thres_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_roam_dense_thres_param)); + + buf_ptr += sizeof(wmi_roam_dense_thres_param); + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, + sizeof(wmi_roam_bg_scan_roaming_param)); + buf_ptr += WMI_TLV_HDR_SIZE; + bg_scan_params = (wmi_roam_bg_scan_roaming_param *) buf_ptr; + bg_scan_params->roam_bg_scan_bad_rssi_thresh = + roam_req->bg_scan_bad_rssi_thresh; + bg_scan_params->roam_bg_scan_client_bitmap = + roam_req->bg_scan_client_bitmap; + bg_scan_params->bad_rssi_thresh_offset_2g = + roam_req->roam_bad_rssi_thresh_offset_2g; + bg_scan_params->flags = roam_req->flags; + WMITLV_SET_HDR(&bg_scan_params->tlv_header, + WMITLV_TAG_STRUC_wmi_roam_bg_scan_roaming_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_roam_bg_scan_roaming_param)); + + wmi_mtrace(WMI_ROAM_SCAN_RSSI_THRESHOLD, NO_SESSION, 0); + status = wmi_unified_cmd_send(wmi_handle, buf, + len, WMI_ROAM_SCAN_RSSI_THRESHOLD); + if (QDF_IS_STATUS_ERROR(status)) { + WMI_LOGE("cmd WMI_ROAM_SCAN_RSSI_THRESHOLD returned Error %d", + status); + wmi_buf_free(buf); + } + + return status; +} + +/** + * send_adapt_dwelltime_params_cmd_tlv() - send wmi cmd of adaptive dwelltime + * configuration params + * @wma_handle: wma handler + * @dwelltime_params: pointer to dwelltime_params + * + * Return: QDF_STATUS_SUCCESS on success and QDF failure reason code for failure + */ +static +QDF_STATUS send_adapt_dwelltime_params_cmd_tlv(wmi_unified_t wmi_handle, + struct wmi_adaptive_dwelltime_params *dwelltime_params) +{ + wmi_scan_adaptive_dwell_config_fixed_param *dwell_param; + wmi_scan_adaptive_dwell_parameters_tlv *cmd; + wmi_buf_t buf; + uint8_t *buf_ptr; + int32_t err; + int len; + + len = sizeof(wmi_scan_adaptive_dwell_config_fixed_param); + len += WMI_TLV_HDR_SIZE; /* TLV for ext_thresholds*/ + len += sizeof(wmi_scan_adaptive_dwell_parameters_tlv); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s :Failed to allocate buffer to send cmd", + __func__); + return QDF_STATUS_E_NOMEM; + } + buf_ptr = (uint8_t *) wmi_buf_data(buf); + dwell_param = (wmi_scan_adaptive_dwell_config_fixed_param *) buf_ptr; + WMITLV_SET_HDR(&dwell_param->tlv_header, + WMITLV_TAG_STRUC_wmi_scan_adaptive_dwell_config_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_scan_adaptive_dwell_config_fixed_param)); + + dwell_param->enable = dwelltime_params->is_enabled; + buf_ptr += sizeof(wmi_scan_adaptive_dwell_config_fixed_param); + WMITLV_SET_HDR(buf_ptr, + WMITLV_TAG_ARRAY_STRUC, + sizeof(wmi_scan_adaptive_dwell_parameters_tlv)); + buf_ptr += WMI_TLV_HDR_SIZE; + + cmd = (wmi_scan_adaptive_dwell_parameters_tlv *) buf_ptr; + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_scan_adaptive_dwell_parameters_tlv, + WMITLV_GET_STRUCT_TLVLEN( + wmi_scan_adaptive_dwell_parameters_tlv)); + + cmd->default_adaptive_dwell_mode = dwelltime_params->dwelltime_mode; + cmd->adapative_lpf_weight = dwelltime_params->lpf_weight; + cmd->passive_monitor_interval_ms = dwelltime_params->passive_mon_intval; + cmd->wifi_activity_threshold_pct = dwelltime_params->wifi_act_threshold; + wmi_mtrace(WMI_SCAN_ADAPTIVE_DWELL_CONFIG_CMDID, NO_SESSION, 0); + err = wmi_unified_cmd_send(wmi_handle, buf, + len, WMI_SCAN_ADAPTIVE_DWELL_CONFIG_CMDID); + if (err) { + WMI_LOGE("Failed to send adapt dwelltime cmd err=%d", err); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * send_dbs_scan_sel_params_cmd_tlv() - send wmi cmd of DBS scan selection + * configuration params + * @wmi_handle: wmi handler + * @dbs_scan_params: pointer to wmi_dbs_scan_sel_params + * + * Return: QDF_STATUS_SUCCESS on success and QDF failure reason code for failure + */ +static QDF_STATUS send_dbs_scan_sel_params_cmd_tlv(wmi_unified_t wmi_handle, + struct wmi_dbs_scan_sel_params *dbs_scan_params) +{ + wmi_scan_dbs_duty_cycle_fixed_param *dbs_scan_param; + wmi_scan_dbs_duty_cycle_tlv_param *cmd; + wmi_buf_t buf; + uint8_t *buf_ptr; + QDF_STATUS err; + uint32_t i; + int len; + + len = sizeof(*dbs_scan_param); + len += WMI_TLV_HDR_SIZE; + len += dbs_scan_params->num_clients * sizeof(*cmd); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s:Failed to allocate buffer to send cmd", __func__); + return QDF_STATUS_E_NOMEM; + } + + buf_ptr = (uint8_t *) wmi_buf_data(buf); + dbs_scan_param = (wmi_scan_dbs_duty_cycle_fixed_param *) buf_ptr; + WMITLV_SET_HDR(&dbs_scan_param->tlv_header, + WMITLV_TAG_STRUC_wmi_scan_dbs_duty_cycle_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_scan_dbs_duty_cycle_fixed_param)); + + dbs_scan_param->num_clients = dbs_scan_params->num_clients; + dbs_scan_param->pdev_id = dbs_scan_params->pdev_id; + buf_ptr += sizeof(*dbs_scan_param); + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, + (sizeof(*cmd) * dbs_scan_params->num_clients)); + buf_ptr = buf_ptr + (uint8_t) WMI_TLV_HDR_SIZE; + + for (i = 0; i < dbs_scan_params->num_clients; i++) { + cmd = (wmi_scan_dbs_duty_cycle_tlv_param *) buf_ptr; + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_scan_dbs_duty_cycle_param_tlv, + WMITLV_GET_STRUCT_TLVLEN( + wmi_scan_dbs_duty_cycle_tlv_param)); + cmd->module_id = dbs_scan_params->module_id[i]; + cmd->num_dbs_scans = dbs_scan_params->num_dbs_scans[i]; + cmd->num_non_dbs_scans = dbs_scan_params->num_non_dbs_scans[i]; + buf_ptr = buf_ptr + (uint8_t) sizeof(*cmd); + } + + wmi_mtrace(WMI_SET_SCAN_DBS_DUTY_CYCLE_CMDID, NO_SESSION, 0); + err = wmi_unified_cmd_send(wmi_handle, buf, + len, WMI_SET_SCAN_DBS_DUTY_CYCLE_CMDID); + if (QDF_IS_STATUS_ERROR(err)) { + WMI_LOGE("Failed to send dbs scan selection cmd err=%d", err); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * send_roam_scan_filter_cmd_tlv() - Filter to be applied while roaming + * @wmi_handle: wmi handle + * @roam_req: Request which contains the filters + * + * There are filters such as whitelist, blacklist and preferred + * list that need to be applied to the scan results to form the + * probable candidates for roaming. + * + * Return: Return success upon successfully passing the + * parameters to the firmware, otherwise failure. + */ +static QDF_STATUS send_roam_scan_filter_cmd_tlv(wmi_unified_t wmi_handle, + struct roam_scan_filter_params *roam_req) +{ + wmi_buf_t buf = NULL; + QDF_STATUS status; + uint32_t i; + uint32_t len, blist_len = 0; + uint8_t *buf_ptr; + wmi_roam_filter_fixed_param *roam_filter; + uint8_t *bssid_src_ptr = NULL; + wmi_mac_addr *bssid_dst_ptr = NULL; + wmi_ssid *ssid_ptr = NULL; + uint32_t *bssid_preferred_factor_ptr = NULL; + wmi_roam_lca_disallow_config_tlv_param *blist_param; + wmi_roam_rssi_rejection_oce_config_param *rssi_rej; + + len = sizeof(wmi_roam_filter_fixed_param); + + len += WMI_TLV_HDR_SIZE; + if (roam_req->num_bssid_black_list) + len += roam_req->num_bssid_black_list * sizeof(wmi_mac_addr); + len += WMI_TLV_HDR_SIZE; + if (roam_req->num_ssid_white_list) + len += roam_req->num_ssid_white_list * sizeof(wmi_ssid); + len += 2 * WMI_TLV_HDR_SIZE; + if (roam_req->num_bssid_preferred_list) { + len += roam_req->num_bssid_preferred_list * sizeof(wmi_mac_addr); + len += roam_req->num_bssid_preferred_list * sizeof(uint32_t); + } + len += WMI_TLV_HDR_SIZE; + if (roam_req->lca_disallow_config_present) { + len += sizeof(*blist_param); + blist_len = sizeof(*blist_param); + } + + len += WMI_TLV_HDR_SIZE; + if (roam_req->num_rssi_rejection_ap) + len += roam_req->num_rssi_rejection_ap * sizeof(*rssi_rej); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s : wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_NOMEM; + } + + buf_ptr = (u_int8_t *) wmi_buf_data(buf); + roam_filter = (wmi_roam_filter_fixed_param *) buf_ptr; + WMITLV_SET_HDR(&roam_filter->tlv_header, + WMITLV_TAG_STRUC_wmi_roam_filter_fixed_param, + WMITLV_GET_STRUCT_TLVLEN(wmi_roam_filter_fixed_param)); + /* fill in fixed values */ + roam_filter->vdev_id = roam_req->session_id; + roam_filter->flags = 0; + roam_filter->op_bitmap = roam_req->op_bitmap; + roam_filter->num_bssid_black_list = roam_req->num_bssid_black_list; + roam_filter->num_ssid_white_list = roam_req->num_ssid_white_list; + roam_filter->num_bssid_preferred_list = + roam_req->num_bssid_preferred_list; + roam_filter->num_rssi_rejection_ap = + roam_req->num_rssi_rejection_ap; + buf_ptr += sizeof(wmi_roam_filter_fixed_param); + + WMITLV_SET_HDR((buf_ptr), + WMITLV_TAG_ARRAY_FIXED_STRUC, + (roam_req->num_bssid_black_list * sizeof(wmi_mac_addr))); + bssid_src_ptr = (uint8_t *)&roam_req->bssid_avoid_list; + bssid_dst_ptr = (wmi_mac_addr *)(buf_ptr + WMI_TLV_HDR_SIZE); + for (i = 0; i < roam_req->num_bssid_black_list; i++) { + WMI_CHAR_ARRAY_TO_MAC_ADDR(bssid_src_ptr, bssid_dst_ptr); + bssid_src_ptr += ATH_MAC_LEN; + bssid_dst_ptr++; + } + buf_ptr += WMI_TLV_HDR_SIZE + + (roam_req->num_bssid_black_list * sizeof(wmi_mac_addr)); + WMITLV_SET_HDR((buf_ptr), + WMITLV_TAG_ARRAY_FIXED_STRUC, + (roam_req->num_ssid_white_list * sizeof(wmi_ssid))); + ssid_ptr = (wmi_ssid *)(buf_ptr + WMI_TLV_HDR_SIZE); + for (i = 0; i < roam_req->num_ssid_white_list; i++) { + qdf_mem_copy(&ssid_ptr->ssid, + &roam_req->ssid_allowed_list[i].mac_ssid, + roam_req->ssid_allowed_list[i].length); + ssid_ptr->ssid_len = roam_req->ssid_allowed_list[i].length; + ssid_ptr++; + } + buf_ptr += WMI_TLV_HDR_SIZE + (roam_req->num_ssid_white_list * + sizeof(wmi_ssid)); + WMITLV_SET_HDR((buf_ptr), + WMITLV_TAG_ARRAY_FIXED_STRUC, + (roam_req->num_bssid_preferred_list * sizeof(wmi_mac_addr))); + bssid_src_ptr = (uint8_t *)&roam_req->bssid_favored; + bssid_dst_ptr = (wmi_mac_addr *)(buf_ptr + WMI_TLV_HDR_SIZE); + for (i = 0; i < roam_req->num_bssid_preferred_list; i++) { + WMI_CHAR_ARRAY_TO_MAC_ADDR(bssid_src_ptr, + (wmi_mac_addr *)bssid_dst_ptr); + bssid_src_ptr += ATH_MAC_LEN; + bssid_dst_ptr++; + } + buf_ptr += WMI_TLV_HDR_SIZE + + (roam_req->num_bssid_preferred_list * sizeof(wmi_mac_addr)); + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_UINT32, + (roam_req->num_bssid_preferred_list * sizeof(uint32_t))); + bssid_preferred_factor_ptr = (uint32_t *)(buf_ptr + WMI_TLV_HDR_SIZE); + for (i = 0; i < roam_req->num_bssid_preferred_list; i++) { + *bssid_preferred_factor_ptr = + roam_req->bssid_favored_factor[i]; + bssid_preferred_factor_ptr++; + } + buf_ptr += WMI_TLV_HDR_SIZE + + (roam_req->num_bssid_preferred_list * sizeof(uint32_t)); + + WMITLV_SET_HDR(buf_ptr, + WMITLV_TAG_ARRAY_STRUC, blist_len); + buf_ptr += WMI_TLV_HDR_SIZE; + if (roam_req->lca_disallow_config_present) { + blist_param = + (wmi_roam_lca_disallow_config_tlv_param *) buf_ptr; + WMITLV_SET_HDR(&blist_param->tlv_header, + WMITLV_TAG_STRUC_wmi_roam_lca_disallow_config_tlv_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_roam_lca_disallow_config_tlv_param)); + + blist_param->disallow_duration = roam_req->disallow_duration; + blist_param->rssi_channel_penalization = + roam_req->rssi_channel_penalization; + blist_param->num_disallowed_aps = roam_req->num_disallowed_aps; + blist_param->disallow_lca_enable_source_bitmap = + (WMI_ROAM_LCA_DISALLOW_SOURCE_PER | + WMI_ROAM_LCA_DISALLOW_SOURCE_BACKGROUND); + buf_ptr += (sizeof(wmi_roam_lca_disallow_config_tlv_param)); + } + + WMITLV_SET_HDR(buf_ptr, + WMITLV_TAG_ARRAY_STRUC, + (roam_req->num_rssi_rejection_ap * sizeof(*rssi_rej))); + buf_ptr += WMI_TLV_HDR_SIZE; + for (i = 0; i < roam_req->num_rssi_rejection_ap; i++) { + rssi_rej = + (wmi_roam_rssi_rejection_oce_config_param *) buf_ptr; + WMITLV_SET_HDR(&rssi_rej->tlv_header, + WMITLV_TAG_STRUC_wmi_roam_rssi_rejection_oce_config_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_roam_rssi_rejection_oce_config_param)); + WMI_CHAR_ARRAY_TO_MAC_ADDR( + roam_req->rssi_rejection_ap[i].bssid.bytes, + &rssi_rej->bssid); + rssi_rej->remaining_disallow_duration = + roam_req->rssi_rejection_ap[i].remaining_duration; + rssi_rej->requested_rssi = + (int32_t)roam_req->rssi_rejection_ap[i].expected_rssi; + buf_ptr += + (sizeof(wmi_roam_rssi_rejection_oce_config_param)); + } + + wmi_mtrace(WMI_ROAM_FILTER_CMDID, NO_SESSION, 0); + status = wmi_unified_cmd_send(wmi_handle, buf, + len, WMI_ROAM_FILTER_CMDID); + if (QDF_IS_STATUS_ERROR(status)) { + WMI_LOGE("cmd WMI_ROAM_FILTER_CMDID returned Error %d", + status); + wmi_buf_free(buf); + } + + return status; +} + +#if defined(WLAN_FEATURE_FILS_SK) +static QDF_STATUS send_roam_scan_send_hlp_cmd_tlv(wmi_unified_t wmi_handle, + struct hlp_params *params) +{ + uint32_t len; + uint8_t *buf_ptr; + wmi_buf_t buf = NULL; + wmi_pdev_update_fils_hlp_pkt_cmd_fixed_param *hlp_params; + + len = sizeof(wmi_pdev_update_fils_hlp_pkt_cmd_fixed_param); + len += WMI_TLV_HDR_SIZE; + len += qdf_roundup(params->hlp_ie_len, sizeof(uint32_t)); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s : wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_NOMEM; + } + + buf_ptr = (uint8_t *) wmi_buf_data(buf); + hlp_params = (wmi_pdev_update_fils_hlp_pkt_cmd_fixed_param *) buf_ptr; + WMITLV_SET_HDR(&hlp_params->tlv_header, + WMITLV_TAG_STRUC_wmi_pdev_update_fils_hlp_pkt_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_pdev_update_fils_hlp_pkt_cmd_fixed_param)); + + hlp_params->vdev_id = params->vdev_id; + hlp_params->size = params->hlp_ie_len; + hlp_params->pkt_type = WMI_FILS_HLP_PKT_TYPE_DHCP_DISCOVER; + + buf_ptr += sizeof(*hlp_params); + + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_BYTE, + round_up(params->hlp_ie_len, + sizeof(uint32_t))); + buf_ptr += WMI_TLV_HDR_SIZE; + qdf_mem_copy(buf_ptr, params->hlp_ie, params->hlp_ie_len); + + WMI_LOGD(FL("send FILS HLP pkt vdev %d len %d"), + hlp_params->vdev_id, hlp_params->size); + wmi_mtrace(WMI_PDEV_UPDATE_FILS_HLP_PKT_CMDID, NO_SESSION, 0); + if (wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_PDEV_UPDATE_FILS_HLP_PKT_CMDID)) { + WMI_LOGE(FL("Failed to send FILS HLP pkt cmd")); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} +#endif + +#ifdef IPA_OFFLOAD +/** send_ipa_offload_control_cmd_tlv() - ipa offload control parameter + * @wmi_handle: wmi handle + * @ipa_offload: ipa offload control parameter + * + * Returns: 0 on success, error number otherwise + */ +static QDF_STATUS send_ipa_offload_control_cmd_tlv(wmi_unified_t wmi_handle, + struct ipa_uc_offload_control_params *ipa_offload) +{ + wmi_ipa_offload_enable_disable_cmd_fixed_param *cmd; + wmi_buf_t wmi_buf; + uint32_t len; + u_int8_t *buf_ptr; + + len = sizeof(*cmd); + wmi_buf = wmi_buf_alloc(wmi_handle, len); + if (!wmi_buf) { + WMI_LOGE("%s: wmi_buf_alloc failed (len=%d)", __func__, len); + return QDF_STATUS_E_NOMEM; + } + + WMI_LOGD("%s: offload_type=%d, enable=%d", __func__, + ipa_offload->offload_type, ipa_offload->enable); + + buf_ptr = (u_int8_t *)wmi_buf_data(wmi_buf); + + cmd = (wmi_ipa_offload_enable_disable_cmd_fixed_param *)buf_ptr; + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUCT_wmi_ipa_offload_enable_disable_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_ipa_offload_enable_disable_cmd_fixed_param)); + + cmd->offload_type = ipa_offload->offload_type; + cmd->vdev_id = ipa_offload->vdev_id; + cmd->enable = ipa_offload->enable; + + wmi_mtrace(WMI_IPA_OFFLOAD_ENABLE_DISABLE_CMDID, cmd->vdev_id, 0); + if (wmi_unified_cmd_send(wmi_handle, wmi_buf, len, + WMI_IPA_OFFLOAD_ENABLE_DISABLE_CMDID)) { + WMI_LOGE("%s: failed to command", __func__); + wmi_buf_free(wmi_buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} +#endif + +/** + * send_plm_stop_cmd_tlv() - plm stop request + * @wmi_handle: wmi handle + * @plm: plm request parameters + * + * This function request FW to stop PLM. + * + * Return: CDF status + */ +static QDF_STATUS send_plm_stop_cmd_tlv(wmi_unified_t wmi_handle, + const struct plm_req_params *plm) +{ + wmi_vdev_plmreq_stop_cmd_fixed_param *cmd; + int32_t len; + wmi_buf_t buf; + uint8_t *buf_ptr; + int ret; + + len = sizeof(*cmd); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s: Failed allocate wmi buffer", __func__); + return QDF_STATUS_E_NOMEM; + } + + cmd = (wmi_vdev_plmreq_stop_cmd_fixed_param *) wmi_buf_data(buf); + + buf_ptr = (uint8_t *) cmd; + + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_vdev_plmreq_stop_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_vdev_plmreq_stop_cmd_fixed_param)); + + cmd->vdev_id = plm->session_id; + + cmd->meas_token = plm->meas_token; + WMI_LOGD("vdev %d meas token %d", cmd->vdev_id, cmd->meas_token); + + wmi_mtrace(WMI_VDEV_PLMREQ_STOP_CMDID, cmd->vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_VDEV_PLMREQ_STOP_CMDID); + if (ret) { + WMI_LOGE("%s: Failed to send plm stop wmi cmd", __func__); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * send_plm_start_cmd_tlv() - plm start request + * @wmi_handle: wmi handle + * @plm: plm request parameters + * + * This function request FW to start PLM. + * + * Return: CDF status + */ +static QDF_STATUS send_plm_start_cmd_tlv(wmi_unified_t wmi_handle, + const struct plm_req_params *plm, + uint32_t *gchannel_list) +{ + wmi_vdev_plmreq_start_cmd_fixed_param *cmd; + uint32_t *channel_list; + int32_t len; + wmi_buf_t buf; + uint8_t *buf_ptr; + uint8_t count; + int ret; + + /* TLV place holder for channel_list */ + len = sizeof(*cmd) + WMI_TLV_HDR_SIZE; + len += sizeof(uint32_t) * plm->plm_num_ch; + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s: Failed allocate wmi buffer", __func__); + return QDF_STATUS_E_NOMEM; + } + cmd = (wmi_vdev_plmreq_start_cmd_fixed_param *) wmi_buf_data(buf); + + buf_ptr = (uint8_t *) cmd; + + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_vdev_plmreq_start_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_vdev_plmreq_start_cmd_fixed_param)); + + cmd->vdev_id = plm->session_id; + + cmd->meas_token = plm->meas_token; + cmd->dialog_token = plm->diag_token; + cmd->number_bursts = plm->num_bursts; + cmd->burst_interval = WMI_SEC_TO_MSEC(plm->burst_int); + cmd->off_duration = plm->meas_duration; + cmd->burst_cycle = plm->burst_len; + cmd->tx_power = plm->desired_tx_pwr; + WMI_CHAR_ARRAY_TO_MAC_ADDR(plm->mac_addr.bytes, &cmd->dest_mac); + cmd->num_chans = plm->plm_num_ch; + + buf_ptr += sizeof(wmi_vdev_plmreq_start_cmd_fixed_param); + + WMI_LOGD("vdev : %d measu token : %d", cmd->vdev_id, cmd->meas_token); + WMI_LOGD("dialog_token: %d", cmd->dialog_token); + WMI_LOGD("number_bursts: %d", cmd->number_bursts); + WMI_LOGD("burst_interval: %d", cmd->burst_interval); + WMI_LOGD("off_duration: %d", cmd->off_duration); + WMI_LOGD("burst_cycle: %d", cmd->burst_cycle); + WMI_LOGD("tx_power: %d", cmd->tx_power); + WMI_LOGD("Number of channels : %d", cmd->num_chans); + + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_UINT32, + (cmd->num_chans * sizeof(uint32_t))); + + buf_ptr += WMI_TLV_HDR_SIZE; + if (cmd->num_chans) { + channel_list = (uint32_t *) buf_ptr; + for (count = 0; count < cmd->num_chans; count++) { + channel_list[count] = plm->plm_ch_list[count]; + if (channel_list[count] < WMI_NLO_FREQ_THRESH) + channel_list[count] = + gchannel_list[count]; + WMI_LOGD("Ch[%d]: %d MHz", count, channel_list[count]); + } + buf_ptr += cmd->num_chans * sizeof(uint32_t); + } + + wmi_mtrace(WMI_VDEV_PLMREQ_START_CMDID, cmd->vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_VDEV_PLMREQ_START_CMDID); + if (ret) { + WMI_LOGE("%s: Failed to send plm start wmi cmd", __func__); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * send_pno_stop_cmd_tlv() - PNO stop request + * @wmi_handle: wmi handle + * @vdev_id: vdev id + * + * This function request FW to stop ongoing PNO operation. + * + * Return: CDF status + */ +static QDF_STATUS send_pno_stop_cmd_tlv(wmi_unified_t wmi_handle, uint8_t vdev_id) +{ + wmi_nlo_config_cmd_fixed_param *cmd; + int32_t len = sizeof(*cmd); + wmi_buf_t buf; + uint8_t *buf_ptr; + int ret; + + /* + * TLV place holder for array of structures nlo_configured_parameters + * TLV place holder for array of uint32_t channel_list + * TLV place holder for chnl prediction cfg + */ + len += WMI_TLV_HDR_SIZE + WMI_TLV_HDR_SIZE + WMI_TLV_HDR_SIZE; + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s: Failed allocate wmi buffer", __func__); + return QDF_STATUS_E_NOMEM; + } + + cmd = (wmi_nlo_config_cmd_fixed_param *) wmi_buf_data(buf); + buf_ptr = (uint8_t *) cmd; + + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_nlo_config_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_nlo_config_cmd_fixed_param)); + + cmd->vdev_id = vdev_id; + cmd->flags = WMI_NLO_CONFIG_STOP; + buf_ptr += sizeof(*cmd); + + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, 0); + buf_ptr += WMI_TLV_HDR_SIZE; + + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_UINT32, 0); + buf_ptr += WMI_TLV_HDR_SIZE; + + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, 0); + buf_ptr += WMI_TLV_HDR_SIZE; + + + wmi_mtrace(WMI_NETWORK_LIST_OFFLOAD_CONFIG_CMDID, cmd->vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_NETWORK_LIST_OFFLOAD_CONFIG_CMDID); + if (ret) { + WMI_LOGE("%s: Failed to send nlo wmi cmd", __func__); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * wmi_set_pno_channel_prediction() - Set PNO channel prediction + * @buf_ptr: Buffer passed by upper layers + * @pno: Buffer to be sent to the firmware + * + * Copy the PNO Channel prediction configuration parameters + * passed by the upper layers to a WMI format TLV and send it + * down to the firmware. + * + * Return: None + */ +static void wmi_set_pno_channel_prediction(uint8_t *buf_ptr, + struct pno_scan_req_params *pno) +{ + nlo_channel_prediction_cfg *channel_prediction_cfg = + (nlo_channel_prediction_cfg *) buf_ptr; + WMITLV_SET_HDR(&channel_prediction_cfg->tlv_header, + WMITLV_TAG_ARRAY_BYTE, + WMITLV_GET_STRUCT_TLVLEN(nlo_channel_prediction_cfg)); +#ifdef FEATURE_WLAN_SCAN_PNO + channel_prediction_cfg->enable = pno->pno_channel_prediction; + channel_prediction_cfg->top_k_num = pno->top_k_num_of_channels; + channel_prediction_cfg->stationary_threshold = pno->stationary_thresh; + channel_prediction_cfg->full_scan_period_ms = + pno->channel_prediction_full_scan; +#endif + buf_ptr += sizeof(nlo_channel_prediction_cfg); + WMI_LOGD("enable: %d, top_k_num: %d, stat_thresh: %d, full_scan: %d", + channel_prediction_cfg->enable, + channel_prediction_cfg->top_k_num, + channel_prediction_cfg->stationary_threshold, + channel_prediction_cfg->full_scan_period_ms); +} + +/** + * send_nlo_mawc_cmd_tlv() - Send MAWC NLO configuration + * @wmi_handle: wmi handle + * @params: configuration parameters + * + * Return: QDF_STATUS + */ +static QDF_STATUS send_nlo_mawc_cmd_tlv(wmi_unified_t wmi_handle, + struct nlo_mawc_params *params) +{ + wmi_buf_t buf = NULL; + QDF_STATUS status; + int len; + uint8_t *buf_ptr; + wmi_nlo_configure_mawc_cmd_fixed_param *wmi_nlo_mawc_params; + + len = sizeof(*wmi_nlo_mawc_params); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s : wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_NOMEM; + } + + buf_ptr = (uint8_t *) wmi_buf_data(buf); + wmi_nlo_mawc_params = + (wmi_nlo_configure_mawc_cmd_fixed_param *) buf_ptr; + WMITLV_SET_HDR(&wmi_nlo_mawc_params->tlv_header, + WMITLV_TAG_STRUC_wmi_nlo_configure_mawc_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_nlo_configure_mawc_cmd_fixed_param)); + wmi_nlo_mawc_params->vdev_id = params->vdev_id; + if (params->enable) + wmi_nlo_mawc_params->enable = 1; + else + wmi_nlo_mawc_params->enable = 0; + wmi_nlo_mawc_params->exp_backoff_ratio = params->exp_backoff_ratio; + wmi_nlo_mawc_params->init_scan_interval = params->init_scan_interval; + wmi_nlo_mawc_params->max_scan_interval = params->max_scan_interval; + WMI_LOGD(FL("MAWC NLO en=%d, vdev=%d, ratio=%d, SCAN init=%d, max=%d"), + wmi_nlo_mawc_params->enable, wmi_nlo_mawc_params->vdev_id, + wmi_nlo_mawc_params->exp_backoff_ratio, + wmi_nlo_mawc_params->init_scan_interval, + wmi_nlo_mawc_params->max_scan_interval); + + wmi_mtrace(WMI_NLO_CONFIGURE_MAWC_CMDID, NO_SESSION, 0); + status = wmi_unified_cmd_send(wmi_handle, buf, + len, WMI_NLO_CONFIGURE_MAWC_CMDID); + if (QDF_IS_STATUS_ERROR(status)) { + WMI_LOGE("WMI_NLO_CONFIGURE_MAWC_CMDID failed, Error %d", + status); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * send_pno_start_cmd_tlv() - PNO start request + * @wmi_handle: wmi handle + * @pno: PNO request + * + * This function request FW to start PNO request. + * Request: CDF status + */ +static QDF_STATUS send_pno_start_cmd_tlv(wmi_unified_t wmi_handle, + struct pno_scan_req_params *pno) +{ + wmi_nlo_config_cmd_fixed_param *cmd; + nlo_configured_parameters *nlo_list; + uint32_t *channel_list; + int32_t len; + wmi_buf_t buf; + uint8_t *buf_ptr; + uint8_t i; + int ret; + struct probe_req_whitelist_attr *ie_whitelist = &pno->ie_whitelist; + connected_nlo_rssi_params *nlo_relative_rssi; + connected_nlo_bss_band_rssi_pref *nlo_band_rssi; + + /* + * TLV place holder for array nlo_configured_parameters(nlo_list) + * TLV place holder for array of uint32_t channel_list + * TLV place holder for chnnl prediction cfg + * TLV place holder for array of wmi_vendor_oui + * TLV place holder for array of connected_nlo_bss_band_rssi_pref + */ + len = sizeof(*cmd) + + WMI_TLV_HDR_SIZE + WMI_TLV_HDR_SIZE + WMI_TLV_HDR_SIZE + + WMI_TLV_HDR_SIZE + WMI_TLV_HDR_SIZE; + + len += sizeof(uint32_t) * QDF_MIN(pno->networks_list[0].channel_cnt, + WMI_NLO_MAX_CHAN); + len += sizeof(nlo_configured_parameters) * + QDF_MIN(pno->networks_cnt, WMI_NLO_MAX_SSIDS); + len += sizeof(nlo_channel_prediction_cfg); + len += sizeof(enlo_candidate_score_params); + len += sizeof(wmi_vendor_oui) * ie_whitelist->num_vendor_oui; + len += sizeof(connected_nlo_rssi_params); + len += sizeof(connected_nlo_bss_band_rssi_pref); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s: Failed allocate wmi buffer", __func__); + return QDF_STATUS_E_NOMEM; + } + + cmd = (wmi_nlo_config_cmd_fixed_param *) wmi_buf_data(buf); + + buf_ptr = (uint8_t *) cmd; + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_nlo_config_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_nlo_config_cmd_fixed_param)); + cmd->vdev_id = pno->vdev_id; + cmd->flags = WMI_NLO_CONFIG_START | WMI_NLO_CONFIG_SSID_HIDE_EN; + +#ifdef FEATURE_WLAN_SCAN_PNO + WMI_SCAN_SET_DWELL_MODE(cmd->flags, + pno->adaptive_dwell_mode); +#endif + /* Current FW does not support min-max range for dwell time */ + cmd->active_dwell_time = pno->active_dwell_time; + cmd->passive_dwell_time = pno->passive_dwell_time; + + if (pno->do_passive_scan) + cmd->flags |= WMI_NLO_CONFIG_SCAN_PASSIVE; + /* Copy scan interval */ + cmd->fast_scan_period = pno->fast_scan_period; + cmd->slow_scan_period = pno->slow_scan_period; + cmd->delay_start_time = WMI_SEC_TO_MSEC(pno->delay_start_time); + cmd->fast_scan_max_cycles = pno->fast_scan_max_cycles; + cmd->scan_backoff_multiplier = pno->scan_backoff_multiplier; + WMI_LOGD("fast_scan_period: %d msec slow_scan_period: %d msec", + cmd->fast_scan_period, cmd->slow_scan_period); + WMI_LOGD("fast_scan_max_cycles: %d", cmd->fast_scan_max_cycles); + + /* mac randomization attributes */ + if (pno->scan_random.randomize) { + cmd->flags |= WMI_NLO_CONFIG_SPOOFED_MAC_IN_PROBE_REQ | + WMI_NLO_CONFIG_RANDOM_SEQ_NO_IN_PROBE_REQ; + wmi_copy_scan_random_mac(pno->scan_random.mac_addr, + pno->scan_random.mac_mask, + &cmd->mac_addr, + &cmd->mac_mask); + } + + buf_ptr += sizeof(wmi_nlo_config_cmd_fixed_param); + + cmd->no_of_ssids = QDF_MIN(pno->networks_cnt, WMI_NLO_MAX_SSIDS); + WMI_LOGD("SSID count : %d", cmd->no_of_ssids); + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, + cmd->no_of_ssids * sizeof(nlo_configured_parameters)); + buf_ptr += WMI_TLV_HDR_SIZE; + + nlo_list = (nlo_configured_parameters *) buf_ptr; + for (i = 0; i < cmd->no_of_ssids; i++) { + WMITLV_SET_HDR(&nlo_list[i].tlv_header, + WMITLV_TAG_ARRAY_BYTE, + WMITLV_GET_STRUCT_TLVLEN + (nlo_configured_parameters)); + /* Copy ssid and it's length */ + nlo_list[i].ssid.valid = true; + nlo_list[i].ssid.ssid.ssid_len = + pno->networks_list[i].ssid.length; + qdf_mem_copy(nlo_list[i].ssid.ssid.ssid, + pno->networks_list[i].ssid.ssid, + nlo_list[i].ssid.ssid.ssid_len); + WMI_LOGD("index: %d ssid: %.*s len: %d", i, + nlo_list[i].ssid.ssid.ssid_len, + (char *)nlo_list[i].ssid.ssid.ssid, + nlo_list[i].ssid.ssid.ssid_len); + + /* Copy rssi threshold */ + if (pno->networks_list[i].rssi_thresh && + pno->networks_list[i].rssi_thresh > + WMI_RSSI_THOLD_DEFAULT) { + nlo_list[i].rssi_cond.valid = true; + nlo_list[i].rssi_cond.rssi = + pno->networks_list[i].rssi_thresh; + WMI_LOGD("RSSI threshold : %d dBm", + nlo_list[i].rssi_cond.rssi); + } + nlo_list[i].bcast_nw_type.valid = true; + nlo_list[i].bcast_nw_type.bcast_nw_type = + pno->networks_list[i].bc_new_type; + WMI_LOGD("Broadcast NW type (%u)", + nlo_list[i].bcast_nw_type.bcast_nw_type); + } + buf_ptr += cmd->no_of_ssids * sizeof(nlo_configured_parameters); + + /* Copy channel info */ + cmd->num_of_channels = QDF_MIN(pno->networks_list[0].channel_cnt, + WMI_NLO_MAX_CHAN); + WMI_LOGD("Channel count: %d", cmd->num_of_channels); + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_UINT32, + (cmd->num_of_channels * sizeof(uint32_t))); + buf_ptr += WMI_TLV_HDR_SIZE; + + channel_list = (uint32_t *) buf_ptr; + for (i = 0; i < cmd->num_of_channels; i++) { + channel_list[i] = pno->networks_list[0].channels[i]; + + if (channel_list[i] < WMI_NLO_FREQ_THRESH) + channel_list[i] = + wlan_chan_to_freq(pno-> + networks_list[0].channels[i]); + + WMI_LOGD("Ch[%d]: %d MHz", i, channel_list[i]); + } + buf_ptr += cmd->num_of_channels * sizeof(uint32_t); + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, + sizeof(nlo_channel_prediction_cfg)); + buf_ptr += WMI_TLV_HDR_SIZE; + wmi_set_pno_channel_prediction(buf_ptr, pno); + buf_ptr += sizeof(nlo_channel_prediction_cfg); + /** TODO: Discrete firmware doesn't have command/option to configure + * App IE which comes from wpa_supplicant as of part PNO start request. + */ + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_STRUC_enlo_candidate_score_param, + WMITLV_GET_STRUCT_TLVLEN(enlo_candidate_score_params)); + buf_ptr += sizeof(enlo_candidate_score_params); + + if (ie_whitelist->white_list) { + cmd->flags |= WMI_NLO_CONFIG_ENABLE_IE_WHITELIST_IN_PROBE_REQ; + wmi_fill_ie_whitelist_attrs(cmd->ie_bitmap, + &cmd->num_vendor_oui, + ie_whitelist); + } + + /* ie white list */ + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, + ie_whitelist->num_vendor_oui * sizeof(wmi_vendor_oui)); + buf_ptr += WMI_TLV_HDR_SIZE; + if (cmd->num_vendor_oui != 0) { + wmi_fill_vendor_oui(buf_ptr, cmd->num_vendor_oui, + ie_whitelist->voui); + buf_ptr += cmd->num_vendor_oui * sizeof(wmi_vendor_oui); + } + + if (pno->relative_rssi_set) + cmd->flags |= WMI_NLO_CONFIG_ENABLE_CNLO_RSSI_CONFIG; + + /* + * Firmware calculation using connected PNO params: + * New AP's RSSI >= (Connected AP's RSSI + relative_rssi +/- rssi_pref) + * deduction of rssi_pref for chosen band_pref and + * addition of rssi_pref for remaining bands (other than chosen band). + */ + nlo_relative_rssi = (connected_nlo_rssi_params *) buf_ptr; + WMITLV_SET_HDR(&nlo_relative_rssi->tlv_header, + WMITLV_TAG_STRUC_wmi_connected_nlo_rssi_params, + WMITLV_GET_STRUCT_TLVLEN(connected_nlo_rssi_params)); + nlo_relative_rssi->relative_rssi = pno->relative_rssi; + WMI_LOGD("relative_rssi %d", nlo_relative_rssi->relative_rssi); + buf_ptr += sizeof(*nlo_relative_rssi); + + /* + * As of now Kernel and Host supports one band and rssi preference. + * Firmware supports array of band and rssi preferences + */ + cmd->num_cnlo_band_pref = 1; + WMITLV_SET_HDR(buf_ptr, + WMITLV_TAG_ARRAY_STRUC, + cmd->num_cnlo_band_pref * + sizeof(connected_nlo_bss_band_rssi_pref)); + buf_ptr += WMI_TLV_HDR_SIZE; + + nlo_band_rssi = (connected_nlo_bss_band_rssi_pref *) buf_ptr; + for (i = 0; i < cmd->num_cnlo_band_pref; i++) { + WMITLV_SET_HDR(&nlo_band_rssi[i].tlv_header, + WMITLV_TAG_STRUC_wmi_connected_nlo_bss_band_rssi_pref, + WMITLV_GET_STRUCT_TLVLEN( + connected_nlo_bss_band_rssi_pref)); + nlo_band_rssi[i].band = pno->band_rssi_pref.band; + nlo_band_rssi[i].rssi_pref = pno->band_rssi_pref.rssi; + WMI_LOGI("band_pref %d, rssi_pref %d", + nlo_band_rssi[i].band, + nlo_band_rssi[i].rssi_pref); + } + buf_ptr += cmd->num_cnlo_band_pref * sizeof(*nlo_band_rssi); + + wmi_mtrace(WMI_NETWORK_LIST_OFFLOAD_CONFIG_CMDID, cmd->vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_NETWORK_LIST_OFFLOAD_CONFIG_CMDID); + if (ret) { + WMI_LOGE("%s: Failed to send nlo wmi cmd", __func__); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/* send_set_ric_req_cmd_tlv() - set ric request element + * @wmi_handle: wmi handle + * @msg: message + * @is_add_ts: is addts required + * + * This function sets ric request element for 11r roaming. + * + * Return: CDF status + */ +static QDF_STATUS send_set_ric_req_cmd_tlv(wmi_unified_t wmi_handle, + void *msg, uint8_t is_add_ts) +{ + wmi_ric_request_fixed_param *cmd; + wmi_ric_tspec *tspec_param; + wmi_buf_t buf; + uint8_t *buf_ptr; + struct mac_tspec_ie *ptspecIE = NULL; + int32_t len = sizeof(wmi_ric_request_fixed_param) + + WMI_TLV_HDR_SIZE + sizeof(wmi_ric_tspec); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGP("%s: wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_NOMEM; + } + + buf_ptr = (uint8_t *) wmi_buf_data(buf); + + cmd = (wmi_ric_request_fixed_param *) buf_ptr; + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_ric_request_fixed_param, + WMITLV_GET_STRUCT_TLVLEN(wmi_ric_request_fixed_param)); + if (is_add_ts) + cmd->vdev_id = ((struct add_ts_param *) msg)->sme_session_id; + else + cmd->vdev_id = ((struct del_ts_params *) msg)->sessionId; + cmd->num_ric_request = 1; + cmd->is_add_ric = is_add_ts; + + buf_ptr += sizeof(wmi_ric_request_fixed_param); + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, sizeof(wmi_ric_tspec)); + + buf_ptr += WMI_TLV_HDR_SIZE; + tspec_param = (wmi_ric_tspec *) buf_ptr; + WMITLV_SET_HDR(&tspec_param->tlv_header, + WMITLV_TAG_STRUC_wmi_ric_tspec, + WMITLV_GET_STRUCT_TLVLEN(wmi_ric_tspec)); + + if (is_add_ts) + ptspecIE = &(((struct add_ts_param *) msg)->tspec); +#ifdef WLAN_FEATURE_ROAM_OFFLOAD + else + ptspecIE = &(((struct del_ts_params *) msg)->delTsInfo.tspec); +#endif + if (ptspecIE) { + /* Fill the tsinfo in the format expected by firmware */ +#ifndef ANI_LITTLE_BIT_ENDIAN + qdf_mem_copy(((uint8_t *) &tspec_param->ts_info) + 1, + ((uint8_t *) &ptspecIE->tsinfo) + 1, 2); +#else + qdf_mem_copy(((uint8_t *) &tspec_param->ts_info), + ((uint8_t *) &ptspecIE->tsinfo) + 1, 2); +#endif /* ANI_LITTLE_BIT_ENDIAN */ + + tspec_param->nominal_msdu_size = ptspecIE->nomMsduSz; + tspec_param->maximum_msdu_size = ptspecIE->maxMsduSz; + tspec_param->min_service_interval = ptspecIE->minSvcInterval; + tspec_param->max_service_interval = ptspecIE->maxSvcInterval; + tspec_param->inactivity_interval = ptspecIE->inactInterval; + tspec_param->suspension_interval = ptspecIE->suspendInterval; + tspec_param->svc_start_time = ptspecIE->svcStartTime; + tspec_param->min_data_rate = ptspecIE->minDataRate; + tspec_param->mean_data_rate = ptspecIE->meanDataRate; + tspec_param->peak_data_rate = ptspecIE->peakDataRate; + tspec_param->max_burst_size = ptspecIE->maxBurstSz; + tspec_param->delay_bound = ptspecIE->delayBound; + tspec_param->min_phy_rate = ptspecIE->minPhyRate; + tspec_param->surplus_bw_allowance = ptspecIE->surplusBw; + tspec_param->medium_time = 0; + } + WMI_LOGI("%s: Set RIC Req is_add_ts:%d", __func__, is_add_ts); + + wmi_mtrace(WMI_ROAM_SET_RIC_REQUEST_CMDID, cmd->vdev_id, 0); + if (wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_ROAM_SET_RIC_REQUEST_CMDID)) { + WMI_LOGP("%s: Failed to send vdev Set RIC Req command", + __func__); + if (is_add_ts) + ((struct add_ts_param *) msg)->status = + QDF_STATUS_E_FAILURE; + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * send_process_ll_stats_clear_cmd_tlv() - clear link layer stats + * @wmi_handle: wmi handle + * @clear_req: ll stats clear request command params + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS send_process_ll_stats_clear_cmd_tlv(wmi_unified_t wmi_handle, + const struct ll_stats_clear_params *clear_req, + uint8_t addr[IEEE80211_ADDR_LEN]) +{ + wmi_clear_link_stats_cmd_fixed_param *cmd; + int32_t len; + wmi_buf_t buf; + uint8_t *buf_ptr; + int ret; + + len = sizeof(*cmd); + buf = wmi_buf_alloc(wmi_handle, len); + + if (!buf) { + WMI_LOGE("%s: Failed allocate wmi buffer", __func__); + return QDF_STATUS_E_NOMEM; + } + + buf_ptr = (uint8_t *) wmi_buf_data(buf); + qdf_mem_zero(buf_ptr, len); + cmd = (wmi_clear_link_stats_cmd_fixed_param *) buf_ptr; + + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_clear_link_stats_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_clear_link_stats_cmd_fixed_param)); + + cmd->stop_stats_collection_req = clear_req->stop_req; + cmd->vdev_id = clear_req->sta_id; + cmd->stats_clear_req_mask = clear_req->stats_clear_mask; + + WMI_CHAR_ARRAY_TO_MAC_ADDR(addr, + &cmd->peer_macaddr); + + WMI_LOGD("LINK_LAYER_STATS - Clear Request Params"); + WMI_LOGD("StopReq : %d", cmd->stop_stats_collection_req); + WMI_LOGD("Vdev Id : %d", cmd->vdev_id); + WMI_LOGD("Clear Stat Mask : %d", cmd->stats_clear_req_mask); + /* WMI_LOGD("Peer MAC Addr : %pM", + cmd->peer_macaddr); */ + + wmi_mtrace(WMI_CLEAR_LINK_STATS_CMDID, cmd->vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_CLEAR_LINK_STATS_CMDID); + if (ret) { + WMI_LOGE("%s: Failed to send clear link stats req", __func__); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + WMI_LOGD("Clear Link Layer Stats request sent successfully"); + return QDF_STATUS_SUCCESS; +} + +/** + * send_process_ll_stats_set_cmd_tlv() - link layer stats set request + * @wmi_handle: wmi handle + * @setReq: ll stats set request command params + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS send_process_ll_stats_set_cmd_tlv(wmi_unified_t wmi_handle, + const struct ll_stats_set_params *set_req) +{ + wmi_start_link_stats_cmd_fixed_param *cmd; + int32_t len; + wmi_buf_t buf; + uint8_t *buf_ptr; + int ret; + + len = sizeof(*cmd); + buf = wmi_buf_alloc(wmi_handle, len); + + if (!buf) { + WMI_LOGE("%s: Failed allocate wmi buffer", __func__); + return QDF_STATUS_E_NOMEM; + } + + buf_ptr = (uint8_t *) wmi_buf_data(buf); + qdf_mem_zero(buf_ptr, len); + cmd = (wmi_start_link_stats_cmd_fixed_param *) buf_ptr; + + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_start_link_stats_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_start_link_stats_cmd_fixed_param)); + + cmd->mpdu_size_threshold = set_req->mpdu_size_threshold; + cmd->aggressive_statistics_gathering = + set_req->aggressive_statistics_gathering; + + WMI_LOGD("LINK_LAYER_STATS - Start/Set Request Params"); + WMI_LOGD("MPDU Size Thresh : %d", cmd->mpdu_size_threshold); + WMI_LOGD("Aggressive Gather: %d", cmd->aggressive_statistics_gathering); + + wmi_mtrace(WMI_START_LINK_STATS_CMDID, NO_SESSION, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_START_LINK_STATS_CMDID); + if (ret) { + WMI_LOGE("%s: Failed to send set link stats request", __func__); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * send_process_ll_stats_get_cmd_tlv() - link layer stats get request + * @wmi_handle:wmi handle + * @get_req:ll stats get request command params + * @addr: mac address + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS send_process_ll_stats_get_cmd_tlv(wmi_unified_t wmi_handle, + const struct ll_stats_get_params *get_req, + uint8_t addr[IEEE80211_ADDR_LEN]) +{ + wmi_request_link_stats_cmd_fixed_param *cmd; + int32_t len; + wmi_buf_t buf; + uint8_t *buf_ptr; + int ret; + + len = sizeof(*cmd); + buf = wmi_buf_alloc(wmi_handle, len); + + if (!buf) { + WMI_LOGE("%s: buf allocation failed", __func__); + return QDF_STATUS_E_NOMEM; + } + + buf_ptr = (uint8_t *) wmi_buf_data(buf); + qdf_mem_zero(buf_ptr, len); + cmd = (wmi_request_link_stats_cmd_fixed_param *) buf_ptr; + + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_request_link_stats_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_request_link_stats_cmd_fixed_param)); + + cmd->request_id = get_req->req_id; + cmd->stats_type = get_req->param_id_mask; + cmd->vdev_id = get_req->sta_id; + + WMI_CHAR_ARRAY_TO_MAC_ADDR(addr, + &cmd->peer_macaddr); + + WMI_LOGD("LINK_LAYER_STATS - Get Request Params"); + WMI_LOGD("Request ID : %u", cmd->request_id); + WMI_LOGD("Stats Type : %0x", cmd->stats_type); + WMI_LOGD("Vdev ID : %d", cmd->vdev_id); + WMI_LOGD("Peer MAC Addr : %pM", addr); + + wmi_mtrace(WMI_REQUEST_LINK_STATS_CMDID, cmd->vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_REQUEST_LINK_STATS_CMDID); + if (ret) { + WMI_LOGE("%s: Failed to send get link stats request", __func__); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + + +/** + * send_congestion_cmd_tlv() - send request to fw to get CCA + * @wmi_handle: wmi handle + * @vdev_id: vdev id + * + * Return: CDF status + */ +static QDF_STATUS send_congestion_cmd_tlv(wmi_unified_t wmi_handle, + uint8_t vdev_id) +{ + wmi_buf_t buf; + wmi_request_stats_cmd_fixed_param *cmd; + uint8_t len; + uint8_t *buf_ptr; + + len = sizeof(*cmd); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s: Failed to allocate wmi buffer", __func__); + return QDF_STATUS_E_FAILURE; + } + + buf_ptr = wmi_buf_data(buf); + cmd = (wmi_request_stats_cmd_fixed_param *)buf_ptr; + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_request_stats_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_request_stats_cmd_fixed_param)); + + cmd->stats_id = WMI_REQUEST_CONGESTION_STAT; + cmd->vdev_id = vdev_id; + WMI_LOGD("STATS REQ VDEV_ID:%d stats_id %d -->", + cmd->vdev_id, cmd->stats_id); + + wmi_mtrace(WMI_REQUEST_STATS_CMDID, cmd->vdev_id, 0); + if (wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_REQUEST_STATS_CMDID)) { + WMI_LOGE("%s: Failed to send WMI_REQUEST_STATS_CMDID", + __func__); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * send_snr_request_cmd_tlv() - send request to fw to get RSSI stats + * @wmi_handle: wmi handle + * @rssi_req: get RSSI request + * + * Return: CDF status + */ +static QDF_STATUS send_snr_request_cmd_tlv(wmi_unified_t wmi_handle) +{ + wmi_buf_t buf; + wmi_request_stats_cmd_fixed_param *cmd; + uint8_t len = sizeof(wmi_request_stats_cmd_fixed_param); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s: wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_FAILURE; + } + + cmd = (wmi_request_stats_cmd_fixed_param *) wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_request_stats_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_request_stats_cmd_fixed_param)); + cmd->stats_id = WMI_REQUEST_VDEV_STAT; + wmi_mtrace(WMI_REQUEST_STATS_CMDID, cmd->vdev_id, 0); + if (wmi_unified_cmd_send + (wmi_handle, buf, len, WMI_REQUEST_STATS_CMDID)) { + WMI_LOGE("Failed to send host stats request to fw"); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * send_snr_cmd_tlv() - get RSSI from fw + * @wmi_handle: wmi handle + * @vdev_id: vdev id + * + * Return: CDF status + */ +static QDF_STATUS send_snr_cmd_tlv(wmi_unified_t wmi_handle, uint8_t vdev_id) +{ + wmi_buf_t buf; + wmi_request_stats_cmd_fixed_param *cmd; + uint8_t len = sizeof(wmi_request_stats_cmd_fixed_param); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s: wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_FAILURE; + } + + cmd = (wmi_request_stats_cmd_fixed_param *) wmi_buf_data(buf); + cmd->vdev_id = vdev_id; + + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_request_stats_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_request_stats_cmd_fixed_param)); + cmd->stats_id = WMI_REQUEST_VDEV_STAT; + wmi_mtrace(WMI_REQUEST_STATS_CMDID, cmd->vdev_id, 0); + if (wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_REQUEST_STATS_CMDID)) { + WMI_LOGE("Failed to send host stats request to fw"); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * send_link_status_req_cmd_tlv() - process link status request from UMAC + * @wmi_handle: wmi handle + * @link_status: get link params + * + * Return: CDF status + */ +static QDF_STATUS send_link_status_req_cmd_tlv(wmi_unified_t wmi_handle, + struct link_status_params *link_status) +{ + wmi_buf_t buf; + wmi_request_stats_cmd_fixed_param *cmd; + uint8_t len = sizeof(wmi_request_stats_cmd_fixed_param); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s: wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_FAILURE; + } + + cmd = (wmi_request_stats_cmd_fixed_param *) wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_request_stats_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_request_stats_cmd_fixed_param)); + cmd->stats_id = WMI_REQUEST_VDEV_RATE_STAT; + cmd->vdev_id = link_status->session_id; + wmi_mtrace(WMI_REQUEST_STATS_CMDID, cmd->vdev_id, 0); + if (wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_REQUEST_STATS_CMDID)) { + WMI_LOGE("Failed to send WMI link status request to fw"); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * send_process_dhcp_ind_cmd_tlv() - process dhcp indication from SME + * @wmi_handle: wmi handle + * @ta_dhcp_ind: DHCP indication parameter + * + * Return: CDF Status + */ +static QDF_STATUS send_process_dhcp_ind_cmd_tlv(wmi_unified_t wmi_handle, + wmi_peer_set_param_cmd_fixed_param *ta_dhcp_ind) +{ + QDF_STATUS status; + wmi_buf_t buf = NULL; + uint8_t *buf_ptr; + wmi_peer_set_param_cmd_fixed_param *peer_set_param_fp; + int len = sizeof(wmi_peer_set_param_cmd_fixed_param); + + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s : wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_NOMEM; + } + + buf_ptr = (uint8_t *) wmi_buf_data(buf); + peer_set_param_fp = (wmi_peer_set_param_cmd_fixed_param *) buf_ptr; + WMITLV_SET_HDR(&peer_set_param_fp->tlv_header, + WMITLV_TAG_STRUC_wmi_peer_set_param_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_peer_set_param_cmd_fixed_param)); + + /* fill in values */ + peer_set_param_fp->vdev_id = ta_dhcp_ind->vdev_id; + peer_set_param_fp->param_id = ta_dhcp_ind->param_id; + peer_set_param_fp->param_value = ta_dhcp_ind->param_value; + qdf_mem_copy(&peer_set_param_fp->peer_macaddr, + &ta_dhcp_ind->peer_macaddr, + sizeof(ta_dhcp_ind->peer_macaddr)); + + wmi_mtrace(WMI_PEER_SET_PARAM_CMDID, NO_SESSION, 0); + status = wmi_unified_cmd_send(wmi_handle, buf, + len, WMI_PEER_SET_PARAM_CMDID); + if (QDF_IS_STATUS_ERROR(status)) { + WMI_LOGE("%s: wmi_unified_cmd_send WMI_PEER_SET_PARAM_CMD" + " returned Error %d", __func__, status); + wmi_buf_free(buf); + } + + return status; +} + +/** + * send_get_link_speed_cmd_tlv() -send command to get linkspeed + * @wmi_handle: wmi handle + * @pLinkSpeed: link speed info + * + * Return: CDF status + */ +static QDF_STATUS send_get_link_speed_cmd_tlv(wmi_unified_t wmi_handle, + wmi_mac_addr peer_macaddr) +{ + wmi_peer_get_estimated_linkspeed_cmd_fixed_param *cmd; + wmi_buf_t wmi_buf; + uint32_t len; + uint8_t *buf_ptr; + + len = sizeof(wmi_peer_get_estimated_linkspeed_cmd_fixed_param); + wmi_buf = wmi_buf_alloc(wmi_handle, len); + if (!wmi_buf) { + WMI_LOGE("%s: wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_NOMEM; + } + buf_ptr = (uint8_t *) wmi_buf_data(wmi_buf); + + cmd = (wmi_peer_get_estimated_linkspeed_cmd_fixed_param *) buf_ptr; + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_peer_get_estimated_linkspeed_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_peer_get_estimated_linkspeed_cmd_fixed_param)); + + /* Copy the peer macaddress to the wma buffer */ + qdf_mem_copy(&cmd->peer_macaddr, + &peer_macaddr, + sizeof(peer_macaddr)); + + + wmi_mtrace(WMI_PEER_GET_ESTIMATED_LINKSPEED_CMDID, cmd->vdev_id, 0); + if (wmi_unified_cmd_send(wmi_handle, wmi_buf, len, + WMI_PEER_GET_ESTIMATED_LINKSPEED_CMDID)) { + WMI_LOGE("%s: failed to send link speed command", __func__); + wmi_buf_free(wmi_buf); + return QDF_STATUS_E_FAILURE; + } + return QDF_STATUS_SUCCESS; +} + +#ifdef WLAN_SUPPORT_GREEN_AP +/** + * send_egap_conf_params_cmd_tlv() - send wmi cmd of egap configuration params + * @wmi_handle: wmi handler + * @egap_params: pointer to egap_params + * + * Return: 0 for success, otherwise appropriate error code + */ +static QDF_STATUS send_egap_conf_params_cmd_tlv(wmi_unified_t wmi_handle, + struct wlan_green_ap_egap_params *egap_params) +{ + wmi_ap_ps_egap_param_cmd_fixed_param *cmd; + wmi_buf_t buf; + int32_t err; + + buf = wmi_buf_alloc(wmi_handle, sizeof(*cmd)); + if (!buf) { + WMI_LOGE("Failed to allocate buffer to send ap_ps_egap cmd"); + return QDF_STATUS_E_NOMEM; + } + cmd = (wmi_ap_ps_egap_param_cmd_fixed_param *) wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_ap_ps_egap_param_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_ap_ps_egap_param_cmd_fixed_param)); + + cmd->enable = egap_params->host_enable_egap; + cmd->inactivity_time = egap_params->egap_inactivity_time; + cmd->wait_time = egap_params->egap_wait_time; + cmd->flags = egap_params->egap_feature_flags; + wmi_mtrace(WMI_AP_PS_EGAP_PARAM_CMDID, NO_SESSION, 0); + err = wmi_unified_cmd_send(wmi_handle, buf, + sizeof(*cmd), WMI_AP_PS_EGAP_PARAM_CMDID); + if (err) { + WMI_LOGE("Failed to send ap_ps_egap cmd"); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} +#endif + +/** + * send_fw_profiling_cmd_tlv() - send FW profiling cmd to WLAN FW + * @wmi_handl: wmi handle + * @cmd: Profiling command index + * @value1: parameter1 value + * @value2: parameter2 value + * + * Return: QDF_STATUS_SUCCESS for success else error code + */ +static QDF_STATUS send_fw_profiling_cmd_tlv(wmi_unified_t wmi_handle, + uint32_t cmd, uint32_t value1, uint32_t value2) +{ + wmi_buf_t buf; + int32_t len = 0; + int ret; + wmi_wlan_profile_trigger_cmd_fixed_param *prof_trig_cmd; + wmi_wlan_profile_set_hist_intvl_cmd_fixed_param *hist_intvl_cmd; + wmi_wlan_profile_enable_profile_id_cmd_fixed_param *profile_enable_cmd; + wmi_wlan_profile_get_prof_data_cmd_fixed_param *profile_getdata_cmd; + + switch (cmd) { + case WMI_WLAN_PROFILE_TRIGGER_CMDID: + len = sizeof(wmi_wlan_profile_trigger_cmd_fixed_param); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGP("%s: wmi_buf_alloc Failed", __func__); + return QDF_STATUS_E_NOMEM; + } + prof_trig_cmd = + (wmi_wlan_profile_trigger_cmd_fixed_param *) + wmi_buf_data(buf); + WMITLV_SET_HDR(&prof_trig_cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_wlan_profile_trigger_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_wlan_profile_trigger_cmd_fixed_param)); + prof_trig_cmd->enable = value1; + wmi_mtrace(WMI_WLAN_PROFILE_TRIGGER_CMDID, NO_SESSION, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_WLAN_PROFILE_TRIGGER_CMDID); + if (ret) { + WMI_LOGE("PROFILE_TRIGGER cmd Failed with value %d", + value1); + wmi_buf_free(buf); + return ret; + } + break; + + case WMI_WLAN_PROFILE_GET_PROFILE_DATA_CMDID: + len = sizeof(wmi_wlan_profile_get_prof_data_cmd_fixed_param); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGP("%s: wmi_buf_alloc Failed", __func__); + return QDF_STATUS_E_NOMEM; + } + profile_getdata_cmd = + (wmi_wlan_profile_get_prof_data_cmd_fixed_param *) + wmi_buf_data(buf); + WMITLV_SET_HDR(&profile_getdata_cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_wlan_profile_get_prof_data_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_wlan_profile_get_prof_data_cmd_fixed_param)); + wmi_mtrace(WMI_WLAN_PROFILE_GET_PROFILE_DATA_CMDID, + NO_SESSION, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_WLAN_PROFILE_GET_PROFILE_DATA_CMDID); + if (ret) { + WMI_LOGE("PROFILE_DATA cmd Failed for id %d value %d", + value1, value2); + wmi_buf_free(buf); + return ret; + } + break; + + case WMI_WLAN_PROFILE_SET_HIST_INTVL_CMDID: + len = sizeof(wmi_wlan_profile_set_hist_intvl_cmd_fixed_param); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGP("%s: wmi_buf_alloc Failed", __func__); + return QDF_STATUS_E_NOMEM; + } + hist_intvl_cmd = + (wmi_wlan_profile_set_hist_intvl_cmd_fixed_param *) + wmi_buf_data(buf); + WMITLV_SET_HDR(&hist_intvl_cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_wlan_profile_set_hist_intvl_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_wlan_profile_set_hist_intvl_cmd_fixed_param)); + hist_intvl_cmd->profile_id = value1; + hist_intvl_cmd->value = value2; + wmi_mtrace(WMI_WLAN_PROFILE_SET_HIST_INTVL_CMDID, + NO_SESSION, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_WLAN_PROFILE_SET_HIST_INTVL_CMDID); + if (ret) { + WMI_LOGE("HIST_INTVL cmd Failed for id %d value %d", + value1, value2); + wmi_buf_free(buf); + return ret; + } + break; + + case WMI_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID: + len = + sizeof(wmi_wlan_profile_enable_profile_id_cmd_fixed_param); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGP("%s: wmi_buf_alloc Failed", __func__); + return QDF_STATUS_E_NOMEM; + } + profile_enable_cmd = + (wmi_wlan_profile_enable_profile_id_cmd_fixed_param *) + wmi_buf_data(buf); + WMITLV_SET_HDR(&profile_enable_cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_wlan_profile_enable_profile_id_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_wlan_profile_enable_profile_id_cmd_fixed_param)); + profile_enable_cmd->profile_id = value1; + profile_enable_cmd->enable = value2; + wmi_mtrace(WMI_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID, + NO_SESSION, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID); + if (ret) { + WMI_LOGE("enable cmd Failed for id %d value %d", + value1, value2); + wmi_buf_free(buf); + return ret; + } + break; + + default: + WMI_LOGD("%s: invalid profiling command", __func__); + break; + } + + return 0; +} + +static QDF_STATUS send_wlm_latency_level_cmd_tlv(wmi_unified_t wmi_handle, + struct wlm_latency_level_param *params) +{ + wmi_wlm_config_cmd_fixed_param *cmd; + wmi_buf_t buf; + uint32_t len = sizeof(*cmd); + static uint32_t ll[4] = {100, 60, 40, 20}; + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGP("%s: wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_NOMEM; + } + cmd = (wmi_wlm_config_cmd_fixed_param *)wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_wlm_config_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_wlm_config_cmd_fixed_param)); + cmd->vdev_id = params->vdev_id; + cmd->latency_level = params->wlm_latency_level; + cmd->ul_latency = ll[params->wlm_latency_level]; + cmd->dl_latency = ll[params->wlm_latency_level]; + cmd->flags = params->wlm_latency_flags; + wmi_mtrace(WMI_WLM_CONFIG_CMDID, cmd->vdev_id, 0); + if (wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_WLM_CONFIG_CMDID)) { + WMI_LOGE("%s: Failed to send setting latency config command", + __func__); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return 0; +} +/** + * send_nat_keepalive_en_cmd_tlv() - enable NAT keepalive filter + * @wmi_handle: wmi handle + * @vdev_id: vdev id + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS send_nat_keepalive_en_cmd_tlv(wmi_unified_t wmi_handle, uint8_t vdev_id) +{ + WMI_VDEV_IPSEC_NATKEEPALIVE_FILTER_CMD_fixed_param *cmd; + wmi_buf_t buf; + int32_t len = sizeof(*cmd); + + WMI_LOGD("%s: vdev_id %d", __func__, vdev_id); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGP("%s: wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_NOMEM; + } + cmd = (WMI_VDEV_IPSEC_NATKEEPALIVE_FILTER_CMD_fixed_param *) + wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_WMI_VDEV_IPSEC_NATKEEPALIVE_FILTER_CMD_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (WMI_VDEV_IPSEC_NATKEEPALIVE_FILTER_CMD_fixed_param)); + cmd->vdev_id = vdev_id; + cmd->action = IPSEC_NATKEEPALIVE_FILTER_ENABLE; + wmi_mtrace(WMI_VDEV_IPSEC_NATKEEPALIVE_FILTER_CMDID, cmd->vdev_id, 0); + if (wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_VDEV_IPSEC_NATKEEPALIVE_FILTER_CMDID)) { + WMI_LOGP("%s: Failed to send NAT keepalive enable command", + __func__); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return 0; +} + +/** + * wmi_unified_csa_offload_enable() - sen CSA offload enable command + * @wmi_handle: wmi handle + * @vdev_id: vdev id + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS send_csa_offload_enable_cmd_tlv(wmi_unified_t wmi_handle, + uint8_t vdev_id) +{ + wmi_csa_offload_enable_cmd_fixed_param *cmd; + wmi_buf_t buf; + int32_t len = sizeof(*cmd); + + WMI_LOGD("%s: vdev_id %d", __func__, vdev_id); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGP("%s: wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_NOMEM; + } + cmd = (wmi_csa_offload_enable_cmd_fixed_param *) wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_csa_offload_enable_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_csa_offload_enable_cmd_fixed_param)); + cmd->vdev_id = vdev_id; + cmd->csa_offload_enable = WMI_CSA_OFFLOAD_ENABLE; + wmi_mtrace(WMI_CSA_OFFLOAD_ENABLE_CMDID, cmd->vdev_id, 0); + if (wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_CSA_OFFLOAD_ENABLE_CMDID)) { + WMI_LOGP("%s: Failed to send CSA offload enable command", + __func__); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return 0; +} + +#ifdef WLAN_FEATURE_CIF_CFR +/** + * send_oem_dma_cfg_cmd_tlv() - configure OEM DMA rings + * @wmi_handle: wmi handle + * @data_len: len of dma cfg req + * @data: dma cfg req + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +static QDF_STATUS send_oem_dma_cfg_cmd_tlv(wmi_unified_t wmi_handle, + wmi_oem_dma_ring_cfg_req_fixed_param *cfg) +{ + wmi_buf_t buf; + uint8_t *cmd; + QDF_STATUS ret; + + WMITLV_SET_HDR(cfg, + WMITLV_TAG_STRUC_wmi_oem_dma_ring_cfg_req_fixed_param, + (sizeof(*cfg) - WMI_TLV_HDR_SIZE)); + + buf = wmi_buf_alloc(wmi_handle, sizeof(*cfg)); + if (!buf) { + WMI_LOGE(FL("wmi_buf_alloc failed")); + return QDF_STATUS_E_FAILURE; + } + + cmd = (uint8_t *) wmi_buf_data(buf); + qdf_mem_copy(cmd, cfg, sizeof(*cfg)); + WMI_LOGI(FL("Sending OEM Data Request to target, data len %lu"), + sizeof(*cfg)); + wmi_mtrace(WMI_OEM_DMA_RING_CFG_REQ_CMDID, NO_SESSION, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, sizeof(*cfg), + WMI_OEM_DMA_RING_CFG_REQ_CMDID); + if (QDF_IS_STATUS_ERROR(ret)) { + WMI_LOGE(FL(":wmi cmd send failed")); + wmi_buf_free(buf); + } + + return ret; +} +#endif + +/** + * send_dbr_cfg_cmd_tlv() - configure DMA rings for Direct Buf RX + * @wmi_handle: wmi handle + * @data_len: len of dma cfg req + * @data: dma cfg req + * + * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure + */ +static QDF_STATUS send_dbr_cfg_cmd_tlv(wmi_unified_t wmi_handle, + struct direct_buf_rx_cfg_req *cfg) +{ + wmi_buf_t buf; + wmi_dma_ring_cfg_req_fixed_param *cmd; + QDF_STATUS ret; + int32_t len = sizeof(*cmd); + + buf = wmi_buf_alloc(wmi_handle, sizeof(*cmd)); + if (!buf) { + WMI_LOGE(FL("wmi_buf_alloc failed")); + return QDF_STATUS_E_FAILURE; + } + + cmd = (wmi_dma_ring_cfg_req_fixed_param *)wmi_buf_data(buf); + + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_dma_ring_cfg_req_fixed_param, + WMITLV_GET_STRUCT_TLVLEN(wmi_dma_ring_cfg_req_fixed_param)); + + cmd->pdev_id = wmi_handle->ops->convert_pdev_id_host_to_target( + cfg->pdev_id); + cmd->mod_id = cfg->mod_id; + cmd->base_paddr_lo = cfg->base_paddr_lo; + cmd->base_paddr_hi = cfg->base_paddr_hi; + cmd->head_idx_paddr_lo = cfg->head_idx_paddr_lo; + cmd->head_idx_paddr_hi = cfg->head_idx_paddr_hi; + cmd->tail_idx_paddr_lo = cfg->tail_idx_paddr_lo; + cmd->tail_idx_paddr_hi = cfg->tail_idx_paddr_hi; + cmd->num_elems = cfg->num_elems; + cmd->buf_size = cfg->buf_size; + cmd->num_resp_per_event = cfg->num_resp_per_event; + cmd->event_timeout_ms = cfg->event_timeout_ms; + + WMI_LOGD("%s: wmi_dma_ring_cfg_req_fixed_param pdev id %d mod id %d" + "base paddr lo %x base paddr hi %x head idx paddr lo %x" + "head idx paddr hi %x tail idx paddr lo %x" + "tail idx addr hi %x num elems %d buf size %d num resp %d" + "event timeout %d\n", __func__, cmd->pdev_id, + cmd->mod_id, cmd->base_paddr_lo, cmd->base_paddr_hi, + cmd->head_idx_paddr_lo, cmd->head_idx_paddr_hi, + cmd->tail_idx_paddr_lo, cmd->tail_idx_paddr_hi, + cmd->num_elems, cmd->buf_size, cmd->num_resp_per_event, + cmd->event_timeout_ms); + wmi_mtrace(WMI_PDEV_DMA_RING_CFG_REQ_CMDID, NO_SESSION, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_PDEV_DMA_RING_CFG_REQ_CMDID); + if (QDF_IS_STATUS_ERROR(ret)) { + WMI_LOGE(FL(":wmi cmd send failed")); + wmi_buf_free(buf); + } + + return ret; +} + +/** + * send_start_11d_scan_cmd_tlv() - start 11d scan request + * @wmi_handle: wmi handle + * @start_11d_scan: 11d scan start request parameters + * + * This function request FW to start 11d scan. + * + * Return: QDF status + */ +static QDF_STATUS send_start_11d_scan_cmd_tlv(wmi_unified_t wmi_handle, + struct reg_start_11d_scan_req *start_11d_scan) +{ + wmi_11d_scan_start_cmd_fixed_param *cmd; + int32_t len; + wmi_buf_t buf; + int ret; + + len = sizeof(*cmd); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s: Failed allocate wmi buffer", __func__); + return QDF_STATUS_E_NOMEM; + } + + cmd = (wmi_11d_scan_start_cmd_fixed_param *)wmi_buf_data(buf); + + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_11d_scan_start_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_11d_scan_start_cmd_fixed_param)); + + cmd->vdev_id = start_11d_scan->vdev_id; + cmd->scan_period_msec = start_11d_scan->scan_period_msec; + cmd->start_interval_msec = start_11d_scan->start_interval_msec; + + WMI_LOGD("vdev %d sending 11D scan start req", cmd->vdev_id); + + wmi_mtrace(WMI_11D_SCAN_START_CMDID, cmd->vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_11D_SCAN_START_CMDID); + if (ret) { + WMI_LOGE("%s: Failed to send start 11d scan wmi cmd", __func__); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * send_stop_11d_scan_cmd_tlv() - stop 11d scan request + * @wmi_handle: wmi handle + * @start_11d_scan: 11d scan stop request parameters + * + * This function request FW to stop 11d scan. + * + * Return: QDF status + */ +static QDF_STATUS send_stop_11d_scan_cmd_tlv(wmi_unified_t wmi_handle, + struct reg_stop_11d_scan_req *stop_11d_scan) +{ + wmi_11d_scan_stop_cmd_fixed_param *cmd; + int32_t len; + wmi_buf_t buf; + int ret; + + len = sizeof(*cmd); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s: Failed allocate wmi buffer", __func__); + return QDF_STATUS_E_NOMEM; + } + + cmd = (wmi_11d_scan_stop_cmd_fixed_param *)wmi_buf_data(buf); + + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_11d_scan_stop_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_11d_scan_stop_cmd_fixed_param)); + + cmd->vdev_id = stop_11d_scan->vdev_id; + + WMI_LOGD("vdev %d sending 11D scan stop req", cmd->vdev_id); + + wmi_mtrace(WMI_11D_SCAN_STOP_CMDID, cmd->vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_11D_SCAN_STOP_CMDID); + if (ret) { + WMI_LOGE("%s: Failed to send stop 11d scan wmi cmd", __func__); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * send_start_oem_data_cmd_tlv() - start OEM data request to target + * @wmi_handle: wmi handle + * @startOemDataReq: start request params + * + * Return: CDF status + */ +static QDF_STATUS send_start_oem_data_cmd_tlv(wmi_unified_t wmi_handle, + uint32_t data_len, + uint8_t *data) +{ + wmi_buf_t buf; + uint8_t *cmd; + QDF_STATUS ret; + + buf = wmi_buf_alloc(wmi_handle, + (data_len + WMI_TLV_HDR_SIZE)); + if (!buf) { + WMI_LOGE(FL("wmi_buf_alloc failed")); + return QDF_STATUS_E_FAILURE; + } + + cmd = (uint8_t *) wmi_buf_data(buf); + + WMITLV_SET_HDR(cmd, WMITLV_TAG_ARRAY_BYTE, data_len); + cmd += WMI_TLV_HDR_SIZE; + qdf_mem_copy(cmd, data, + data_len); + + WMI_LOGD(FL("Sending OEM Data Request to target, data len %d"), + data_len); + + wmi_mtrace(WMI_OEM_REQ_CMDID, NO_SESSION, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, + (data_len + + WMI_TLV_HDR_SIZE), WMI_OEM_REQ_CMDID); + + if (QDF_IS_STATUS_ERROR(ret)) { + WMI_LOGE(FL(":wmi cmd send failed")); + wmi_buf_free(buf); + } + + return ret; +} + +/** + * send_dfs_phyerr_filter_offload_en_cmd_tlv() - enable dfs phyerr filter + * @wmi_handle: wmi handle + * @dfs_phyerr_filter_offload: is dfs phyerr filter offload + * + * Send WMI_DFS_PHYERR_FILTER_ENA_CMDID or + * WMI_DFS_PHYERR_FILTER_DIS_CMDID command + * to firmware based on phyerr filtering + * offload status. + * + * Return: 1 success, 0 failure + */ +static QDF_STATUS +send_dfs_phyerr_filter_offload_en_cmd_tlv(wmi_unified_t wmi_handle, + bool dfs_phyerr_filter_offload) +{ + wmi_dfs_phyerr_filter_ena_cmd_fixed_param *enable_phyerr_offload_cmd; + wmi_dfs_phyerr_filter_dis_cmd_fixed_param *disable_phyerr_offload_cmd; + wmi_buf_t buf; + uint16_t len; + QDF_STATUS ret; + + + if (false == dfs_phyerr_filter_offload) { + WMI_LOGD("%s:Phyerror Filtering offload is Disabled in ini", + __func__); + len = sizeof(*disable_phyerr_offload_cmd); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s:wmi_buf_alloc failed", __func__); + return 0; + } + disable_phyerr_offload_cmd = + (wmi_dfs_phyerr_filter_dis_cmd_fixed_param *) + wmi_buf_data(buf); + + WMITLV_SET_HDR(&disable_phyerr_offload_cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_dfs_phyerr_filter_dis_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_dfs_phyerr_filter_dis_cmd_fixed_param)); + + /* + * Send WMI_DFS_PHYERR_FILTER_DIS_CMDID + * to the firmware to disable the phyerror + * filtering offload. + */ + wmi_mtrace(WMI_DFS_PHYERR_FILTER_DIS_CMDID, NO_SESSION, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_DFS_PHYERR_FILTER_DIS_CMDID); + if (QDF_IS_STATUS_ERROR(ret)) { + WMI_LOGE("%s: Failed to send WMI_DFS_PHYERR_FILTER_DIS_CMDID ret=%d", + __func__, ret); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + WMI_LOGD("%s: WMI_DFS_PHYERR_FILTER_DIS_CMDID Send Success", + __func__); + } else { + WMI_LOGD("%s:Phyerror Filtering offload is Enabled in ini", + __func__); + + len = sizeof(*enable_phyerr_offload_cmd); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s:wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_FAILURE; + } + + enable_phyerr_offload_cmd = + (wmi_dfs_phyerr_filter_ena_cmd_fixed_param *) + wmi_buf_data(buf); + + WMITLV_SET_HDR(&enable_phyerr_offload_cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_dfs_phyerr_filter_ena_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_dfs_phyerr_filter_ena_cmd_fixed_param)); + + /* + * Send a WMI_DFS_PHYERR_FILTER_ENA_CMDID + * to the firmware to enable the phyerror + * filtering offload. + */ + wmi_mtrace(WMI_DFS_PHYERR_FILTER_ENA_CMDID, NO_SESSION, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_DFS_PHYERR_FILTER_ENA_CMDID); + + if (QDF_IS_STATUS_ERROR(ret)) { + WMI_LOGE("%s: Failed to send DFS PHYERR CMD ret=%d", + __func__, ret); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + WMI_LOGD("%s: WMI_DFS_PHYERR_FILTER_ENA_CMDID Send Success", + __func__); + } + + return QDF_STATUS_SUCCESS; +} + +/** + * send_wow_timer_pattern_cmd_tlv() - set timer pattern tlv, so that firmware + * will wake up host after specified time is elapsed + * @wmi_handle: wmi handle + * @vdev_id: vdev id + * @cookie: value to identify reason why host set up wake call. + * @time: time in ms + * + * Return: QDF status + */ +static QDF_STATUS send_wow_timer_pattern_cmd_tlv(wmi_unified_t wmi_handle, + uint8_t vdev_id, uint32_t cookie, uint32_t time) +{ + WMI_WOW_ADD_PATTERN_CMD_fixed_param *cmd; + wmi_buf_t buf; + uint8_t *buf_ptr; + int32_t len; + int ret; + + len = sizeof(WMI_WOW_ADD_PATTERN_CMD_fixed_param) + + WMI_TLV_HDR_SIZE + 0 * sizeof(WOW_BITMAP_PATTERN_T) + + WMI_TLV_HDR_SIZE + 0 * sizeof(WOW_IPV4_SYNC_PATTERN_T) + + WMI_TLV_HDR_SIZE + 0 * sizeof(WOW_IPV6_SYNC_PATTERN_T) + + WMI_TLV_HDR_SIZE + 0 * sizeof(WOW_MAGIC_PATTERN_CMD) + + WMI_TLV_HDR_SIZE + 1 * sizeof(uint32_t) + + WMI_TLV_HDR_SIZE + 1 * sizeof(uint32_t); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s: Failed allocate wmi buffer", __func__); + return QDF_STATUS_E_NOMEM; + } + + cmd = (WMI_WOW_ADD_PATTERN_CMD_fixed_param *) wmi_buf_data(buf); + buf_ptr = (uint8_t *) cmd; + + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_WMI_WOW_ADD_PATTERN_CMD_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (WMI_WOW_ADD_PATTERN_CMD_fixed_param)); + cmd->vdev_id = vdev_id; + cmd->pattern_id = cookie, + cmd->pattern_type = WOW_TIMER_PATTERN; + buf_ptr += sizeof(WMI_WOW_ADD_PATTERN_CMD_fixed_param); + + /* Fill TLV for WMITLV_TAG_STRUC_WOW_BITMAP_PATTERN_T but no data. */ + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, 0); + buf_ptr += WMI_TLV_HDR_SIZE; + + /* Fill TLV for WMITLV_TAG_STRUC_WOW_IPV4_SYNC_PATTERN_T but no data. */ + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, 0); + buf_ptr += WMI_TLV_HDR_SIZE; + + /* Fill TLV for WMITLV_TAG_STRUC_WOW_IPV6_SYNC_PATTERN_T but no data. */ + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, 0); + buf_ptr += WMI_TLV_HDR_SIZE; + + /* Fill TLV for WMITLV_TAG_STRUC_WOW_MAGIC_PATTERN_CMD but no data. */ + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, 0); + buf_ptr += WMI_TLV_HDR_SIZE; + + /* Fill TLV for pattern_info_timeout, and time value */ + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_UINT32, sizeof(uint32_t)); + buf_ptr += WMI_TLV_HDR_SIZE; + *((uint32_t *) buf_ptr) = time; + buf_ptr += sizeof(uint32_t); + + /* Fill TLV for ra_ratelimit_interval. with dummy 0 value */ + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_UINT32, sizeof(uint32_t)); + buf_ptr += WMI_TLV_HDR_SIZE; + *((uint32_t *) buf_ptr) = 0; + + WMI_LOGD("%s: send wake timer pattern with time[%d] to fw vdev = %d", + __func__, time, vdev_id); + + wmi_mtrace(WMI_WOW_ADD_WAKE_PATTERN_CMDID, cmd->vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_WOW_ADD_WAKE_PATTERN_CMDID); + if (ret) { + WMI_LOGE("%s: Failed to send wake timer pattern to fw", + __func__); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +#if !defined(REMOVE_PKT_LOG) +/** + * send_pktlog_wmi_send_cmd_tlv() - send pktlog enable/disable command to target + * @wmi_handle: wmi handle + * @pktlog_event: pktlog event + * @cmd_id: pktlog cmd id + * + * Return: CDF status + */ +static QDF_STATUS send_pktlog_wmi_send_cmd_tlv(wmi_unified_t wmi_handle, + WMI_PKTLOG_EVENT pktlog_event, + WMI_CMD_ID cmd_id, uint8_t user_triggered) +{ + WMI_PKTLOG_EVENT PKTLOG_EVENT; + WMI_CMD_ID CMD_ID; + wmi_pdev_pktlog_enable_cmd_fixed_param *cmd; + wmi_pdev_pktlog_disable_cmd_fixed_param *disable_cmd; + int len = 0; + wmi_buf_t buf; + + PKTLOG_EVENT = pktlog_event; + CMD_ID = cmd_id; + + switch (CMD_ID) { + case WMI_PDEV_PKTLOG_ENABLE_CMDID: + len = sizeof(*cmd); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s:wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_NOMEM; + } + cmd = (wmi_pdev_pktlog_enable_cmd_fixed_param *) + wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_pdev_pktlog_enable_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_pdev_pktlog_enable_cmd_fixed_param)); + cmd->evlist = PKTLOG_EVENT; + cmd->enable = user_triggered ? WMI_PKTLOG_ENABLE_FORCE + : WMI_PKTLOG_ENABLE_AUTO; + cmd->pdev_id = wmi_handle->ops->convert_pdev_id_host_to_target( + WMI_HOST_PDEV_ID_SOC); + wmi_mtrace(WMI_PDEV_PKTLOG_ENABLE_CMDID, NO_SESSION, 0); + if (wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_PDEV_PKTLOG_ENABLE_CMDID)) { + WMI_LOGE("failed to send pktlog enable cmdid"); + goto wmi_send_failed; + } + break; + case WMI_PDEV_PKTLOG_DISABLE_CMDID: + len = sizeof(*disable_cmd); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s:wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_NOMEM; + } + disable_cmd = (wmi_pdev_pktlog_disable_cmd_fixed_param *) + wmi_buf_data(buf); + WMITLV_SET_HDR(&disable_cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_pdev_pktlog_disable_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_pdev_pktlog_disable_cmd_fixed_param)); + disable_cmd->pdev_id = + wmi_handle->ops->convert_pdev_id_host_to_target( + WMI_HOST_PDEV_ID_SOC); + wmi_mtrace(WMI_PDEV_PKTLOG_DISABLE_CMDID, NO_SESSION, 0); + if (wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_PDEV_PKTLOG_DISABLE_CMDID)) { + WMI_LOGE("failed to send pktlog disable cmdid"); + goto wmi_send_failed; + } + break; + default: + WMI_LOGD("%s: invalid PKTLOG command", __func__); + break; + } + + return QDF_STATUS_SUCCESS; + +wmi_send_failed: + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; +} +#endif /* REMOVE_PKT_LOG */ + +/** + * send_wow_delete_pattern_cmd_tlv() - delete wow pattern in target + * @wmi_handle: wmi handle + * @ptrn_id: pattern id + * @vdev_id: vdev id + * + * Return: CDF status + */ +static QDF_STATUS send_wow_delete_pattern_cmd_tlv(wmi_unified_t wmi_handle, + uint8_t ptrn_id, uint8_t vdev_id) +{ + WMI_WOW_DEL_PATTERN_CMD_fixed_param *cmd; + wmi_buf_t buf; + int32_t len; + int ret; + + len = sizeof(WMI_WOW_DEL_PATTERN_CMD_fixed_param); + + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s: Failed allocate wmi buffer", __func__); + return QDF_STATUS_E_NOMEM; + } + + cmd = (WMI_WOW_DEL_PATTERN_CMD_fixed_param *) wmi_buf_data(buf); + + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_WMI_WOW_DEL_PATTERN_CMD_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + WMI_WOW_DEL_PATTERN_CMD_fixed_param)); + cmd->vdev_id = vdev_id; + cmd->pattern_id = ptrn_id; + cmd->pattern_type = WOW_BITMAP_PATTERN; + + WMI_LOGI("Deleting pattern id: %d vdev id %d in fw", + cmd->pattern_id, vdev_id); + + wmi_mtrace(WMI_WOW_DEL_WAKE_PATTERN_CMDID, cmd->vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_WOW_DEL_WAKE_PATTERN_CMDID); + if (ret) { + WMI_LOGE("%s: Failed to delete wow ptrn from fw", __func__); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * send_host_wakeup_ind_to_fw_cmd_tlv() - send wakeup ind to fw + * @wmi_handle: wmi handle + * + * Sends host wakeup indication to FW. On receiving this indication, + * FW will come out of WOW. + * + * Return: CDF status + */ +static QDF_STATUS send_host_wakeup_ind_to_fw_cmd_tlv(wmi_unified_t wmi_handle) +{ + wmi_wow_hostwakeup_from_sleep_cmd_fixed_param *cmd; + wmi_buf_t buf; + QDF_STATUS qdf_status = QDF_STATUS_SUCCESS; + int32_t len; + int ret; + + len = sizeof(wmi_wow_hostwakeup_from_sleep_cmd_fixed_param); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s: Failed allocate wmi buffer", __func__); + return QDF_STATUS_E_NOMEM; + } + + cmd = (wmi_wow_hostwakeup_from_sleep_cmd_fixed_param *) + wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_wow_hostwakeup_from_sleep_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_wow_hostwakeup_from_sleep_cmd_fixed_param)); + + + wmi_mtrace(WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID, NO_SESSION, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID); + if (ret) { + WMI_LOGE("Failed to send host wakeup indication to fw"); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return qdf_status; +} + +/** + * send_del_ts_cmd_tlv() - send DELTS request to fw + * @wmi_handle: wmi handle + * @msg: delts params + * + * Return: CDF status + */ +static QDF_STATUS send_del_ts_cmd_tlv(wmi_unified_t wmi_handle, uint8_t vdev_id, + uint8_t ac) +{ + wmi_vdev_wmm_delts_cmd_fixed_param *cmd; + wmi_buf_t buf; + int32_t len = sizeof(*cmd); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGP("%s: wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_NOMEM; + } + cmd = (wmi_vdev_wmm_delts_cmd_fixed_param *) wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_vdev_wmm_delts_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_vdev_wmm_delts_cmd_fixed_param)); + cmd->vdev_id = vdev_id; + cmd->ac = ac; + + WMI_LOGD("Delts vdev:%d, ac:%d, %s:%d", + cmd->vdev_id, cmd->ac, __func__, __LINE__); + wmi_mtrace(WMI_VDEV_WMM_DELTS_CMDID, cmd->vdev_id, 0); + if (wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_VDEV_WMM_DELTS_CMDID)) { + WMI_LOGP("%s: Failed to send vdev DELTS command", __func__); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * send_aggr_qos_cmd_tlv() - send aggr qos request to fw + * @wmi_handle: handle to wmi + * @aggr_qos_rsp_msg - combined struct for all ADD_TS requests. + * + * A function to handle WMI_AGGR_QOS_REQ. This will send out + * ADD_TS requestes to firmware in loop for all the ACs with + * active flow. + * + * Return: CDF status + */ +static QDF_STATUS send_aggr_qos_cmd_tlv(wmi_unified_t wmi_handle, + struct aggr_add_ts_param *aggr_qos_rsp_msg) +{ + int i = 0; + wmi_vdev_wmm_addts_cmd_fixed_param *cmd; + wmi_buf_t buf; + int32_t len = sizeof(*cmd); + + for (i = 0; i < WMI_QOS_NUM_AC_MAX; i++) { + /* if flow in this AC is active */ + if (((1 << i) & aggr_qos_rsp_msg->tspecIdx)) { + /* + * as per implementation of wma_add_ts_req() we + * are not waiting any response from firmware so + * apart from sending ADDTS to firmware just send + * success to upper layers + */ + aggr_qos_rsp_msg->status[i] = QDF_STATUS_SUCCESS; + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGP("%s: wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_NOMEM; + } + cmd = (wmi_vdev_wmm_addts_cmd_fixed_param *) + wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_vdev_wmm_addts_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_vdev_wmm_addts_cmd_fixed_param)); + cmd->vdev_id = aggr_qos_rsp_msg->vdev_id; + cmd->ac = + WMI_TID_TO_AC(aggr_qos_rsp_msg->tspec[i].tsinfo. + traffic.userPrio); + cmd->medium_time_us = + aggr_qos_rsp_msg->tspec[i].mediumTime * 32; + cmd->downgrade_type = WMM_AC_DOWNGRADE_DEPRIO; + WMI_LOGD("%s:%d: Addts vdev:%d, ac:%d, mediumTime:%d downgrade_type:%d", + __func__, __LINE__, cmd->vdev_id, cmd->ac, + cmd->medium_time_us, cmd->downgrade_type); + wmi_mtrace(WMI_VDEV_WMM_ADDTS_CMDID, cmd->vdev_id, 0); + if (wmi_unified_cmd_send + (wmi_handle, buf, len, + WMI_VDEV_WMM_ADDTS_CMDID)) { + WMI_LOGP("%s: Failed to send vdev ADDTS command", + __func__); + aggr_qos_rsp_msg->status[i] = + QDF_STATUS_E_FAILURE; + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + } + } + + return QDF_STATUS_SUCCESS; +} + +/** + * send_add_ts_cmd_tlv() - send ADDTS request to fw + * @wmi_handle: wmi handle + * @msg: ADDTS params + * + * Return: CDF status + */ +static QDF_STATUS send_add_ts_cmd_tlv(wmi_unified_t wmi_handle, + struct add_ts_param *msg) +{ + wmi_vdev_wmm_addts_cmd_fixed_param *cmd; + wmi_buf_t buf; + int32_t len = sizeof(*cmd); + + msg->status = QDF_STATUS_SUCCESS; + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGP("%s: wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_NOMEM; + } + cmd = (wmi_vdev_wmm_addts_cmd_fixed_param *) wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_vdev_wmm_addts_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_vdev_wmm_addts_cmd_fixed_param)); + cmd->vdev_id = msg->sme_session_id; + cmd->ac = msg->tspec.tsinfo.traffic.userPrio; + cmd->medium_time_us = msg->tspec.mediumTime * 32; + cmd->downgrade_type = WMM_AC_DOWNGRADE_DROP; + WMI_LOGD("Addts vdev:%d, ac:%d, mediumTime:%d, downgrade_type:%d %s:%d", + cmd->vdev_id, cmd->ac, cmd->medium_time_us, + cmd->downgrade_type, __func__, __LINE__); + wmi_mtrace(WMI_VDEV_WMM_ADDTS_CMDID, cmd->vdev_id, 0); + if (wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_VDEV_WMM_ADDTS_CMDID)) { + WMI_LOGP("%s: Failed to send vdev ADDTS command", __func__); + msg->status = QDF_STATUS_E_FAILURE; + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * send_process_add_periodic_tx_ptrn_cmd_tlv - add periodic tx ptrn + * @wmi_handle: wmi handle + * @pAddPeriodicTxPtrnParams: tx ptrn params + * + * Retrun: CDF status + */ +static QDF_STATUS send_process_add_periodic_tx_ptrn_cmd_tlv(wmi_unified_t wmi_handle, + struct periodic_tx_pattern * + pAddPeriodicTxPtrnParams, + uint8_t vdev_id) +{ + WMI_ADD_PROACTIVE_ARP_RSP_PATTERN_CMD_fixed_param *cmd; + wmi_buf_t wmi_buf; + uint32_t len; + uint8_t *buf_ptr; + uint32_t ptrn_len, ptrn_len_aligned; + int j; + + ptrn_len = pAddPeriodicTxPtrnParams->ucPtrnSize; + ptrn_len_aligned = roundup(ptrn_len, sizeof(uint32_t)); + len = sizeof(WMI_ADD_PROACTIVE_ARP_RSP_PATTERN_CMD_fixed_param) + + WMI_TLV_HDR_SIZE + ptrn_len_aligned; + + wmi_buf = wmi_buf_alloc(wmi_handle, len); + if (!wmi_buf) { + WMI_LOGE("%s: wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_NOMEM; + } + + buf_ptr = (uint8_t *) wmi_buf_data(wmi_buf); + + cmd = (WMI_ADD_PROACTIVE_ARP_RSP_PATTERN_CMD_fixed_param *) buf_ptr; + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_WMI_ADD_PROACTIVE_ARP_RSP_PATTERN_CMD_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (WMI_ADD_PROACTIVE_ARP_RSP_PATTERN_CMD_fixed_param)); + + /* Pass the pattern id to delete for the corresponding vdev id */ + cmd->vdev_id = vdev_id; + cmd->pattern_id = pAddPeriodicTxPtrnParams->ucPtrnId; + cmd->timeout = pAddPeriodicTxPtrnParams->usPtrnIntervalMs; + cmd->length = pAddPeriodicTxPtrnParams->ucPtrnSize; + + /* Pattern info */ + buf_ptr += sizeof(WMI_ADD_PROACTIVE_ARP_RSP_PATTERN_CMD_fixed_param); + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_BYTE, ptrn_len_aligned); + buf_ptr += WMI_TLV_HDR_SIZE; + qdf_mem_copy(buf_ptr, pAddPeriodicTxPtrnParams->ucPattern, ptrn_len); + for (j = 0; j < pAddPeriodicTxPtrnParams->ucPtrnSize; j++) + WMI_LOGD("%s: Add Ptrn: %02x", __func__, buf_ptr[j] & 0xff); + + WMI_LOGD("%s: Add ptrn id: %d vdev_id: %d", + __func__, cmd->pattern_id, cmd->vdev_id); + + wmi_mtrace(WMI_ADD_PROACTIVE_ARP_RSP_PATTERN_CMDID, cmd->vdev_id, 0); + if (wmi_unified_cmd_send(wmi_handle, wmi_buf, len, + WMI_ADD_PROACTIVE_ARP_RSP_PATTERN_CMDID)) { + WMI_LOGE("%s: failed to add pattern set state command", + __func__); + wmi_buf_free(wmi_buf); + return QDF_STATUS_E_FAILURE; + } + return QDF_STATUS_SUCCESS; +} + +/** + * send_process_del_periodic_tx_ptrn_cmd_tlv - del periodic tx ptrn + * @wmi_handle: wmi handle + * @vdev_id: vdev id + * @pattern_id: pattern id + * + * Retrun: CDF status + */ +static QDF_STATUS send_process_del_periodic_tx_ptrn_cmd_tlv(wmi_unified_t wmi_handle, + uint8_t vdev_id, + uint8_t pattern_id) +{ + WMI_DEL_PROACTIVE_ARP_RSP_PATTERN_CMD_fixed_param *cmd; + wmi_buf_t wmi_buf; + uint32_t len = + sizeof(WMI_DEL_PROACTIVE_ARP_RSP_PATTERN_CMD_fixed_param); + + wmi_buf = wmi_buf_alloc(wmi_handle, len); + if (!wmi_buf) { + WMI_LOGE("%s: wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_NOMEM; + } + + cmd = (WMI_DEL_PROACTIVE_ARP_RSP_PATTERN_CMD_fixed_param *) + wmi_buf_data(wmi_buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_WMI_DEL_PROACTIVE_ARP_RSP_PATTERN_CMD_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (WMI_DEL_PROACTIVE_ARP_RSP_PATTERN_CMD_fixed_param)); + + /* Pass the pattern id to delete for the corresponding vdev id */ + cmd->vdev_id = vdev_id; + cmd->pattern_id = pattern_id; + WMI_LOGD("%s: Del ptrn id: %d vdev_id: %d", + __func__, cmd->pattern_id, cmd->vdev_id); + + wmi_mtrace(WMI_DEL_PROACTIVE_ARP_RSP_PATTERN_CMDID, cmd->vdev_id, 0); + if (wmi_unified_cmd_send(wmi_handle, wmi_buf, len, + WMI_DEL_PROACTIVE_ARP_RSP_PATTERN_CMDID)) { + WMI_LOGE("%s: failed to send del pattern command", __func__); + wmi_buf_free(wmi_buf); + return QDF_STATUS_E_FAILURE; + } + return QDF_STATUS_SUCCESS; +} + +/** + * send_stats_ext_req_cmd_tlv() - request ext stats from fw + * @wmi_handle: wmi handle + * @preq: stats ext params + * + * Return: CDF status + */ +static QDF_STATUS send_stats_ext_req_cmd_tlv(wmi_unified_t wmi_handle, + struct stats_ext_params *preq) +{ + QDF_STATUS ret; + wmi_req_stats_ext_cmd_fixed_param *cmd; + wmi_buf_t buf; + size_t len; + uint8_t *buf_ptr; + + len = sizeof(*cmd) + WMI_TLV_HDR_SIZE + preq->request_data_len; + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s:wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_NOMEM; + } + + buf_ptr = (uint8_t *) wmi_buf_data(buf); + cmd = (wmi_req_stats_ext_cmd_fixed_param *) buf_ptr; + + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_req_stats_ext_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_req_stats_ext_cmd_fixed_param)); + cmd->vdev_id = preq->vdev_id; + cmd->data_len = preq->request_data_len; + + WMI_LOGD("%s: The data len value is %u and vdev id set is %u ", + __func__, preq->request_data_len, preq->vdev_id); + + buf_ptr += sizeof(wmi_req_stats_ext_cmd_fixed_param); + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_BYTE, cmd->data_len); + + buf_ptr += WMI_TLV_HDR_SIZE; + qdf_mem_copy(buf_ptr, preq->request_data, cmd->data_len); + + wmi_mtrace(WMI_REQUEST_STATS_EXT_CMDID, cmd->vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_REQUEST_STATS_EXT_CMDID); + if (QDF_IS_STATUS_ERROR(ret)) { + WMI_LOGE("%s: Failed to send notify cmd ret = %d", __func__, + ret); + wmi_buf_free(buf); + } + + return ret; +} + +/** + * send_enable_ext_wow_cmd_tlv() - enable ext wow in fw + * @wmi_handle: wmi handle + * @params: ext wow params + * + * Return:0 for success or error code + */ +static QDF_STATUS send_enable_ext_wow_cmd_tlv(wmi_unified_t wmi_handle, + struct ext_wow_params *params) +{ + wmi_extwow_enable_cmd_fixed_param *cmd; + wmi_buf_t buf; + int32_t len; + int ret; + + len = sizeof(wmi_extwow_enable_cmd_fixed_param); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s: Failed allocate wmi buffer", __func__); + return QDF_STATUS_E_NOMEM; + } + + cmd = (wmi_extwow_enable_cmd_fixed_param *) wmi_buf_data(buf); + + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_extwow_enable_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_extwow_enable_cmd_fixed_param)); + + cmd->vdev_id = params->vdev_id; + cmd->type = params->type; + cmd->wakeup_pin_num = params->wakeup_pin_num; + + WMI_LOGD("%s: vdev_id %d type %d Wakeup_pin_num %x", + __func__, cmd->vdev_id, cmd->type, cmd->wakeup_pin_num); + + wmi_mtrace(WMI_EXTWOW_ENABLE_CMDID, cmd->vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_EXTWOW_ENABLE_CMDID); + if (ret) { + WMI_LOGE("%s: Failed to set EXTWOW Enable", __func__); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; + +} + +/** + * send_app_type1_params_in_fw_cmd_tlv() - set app type1 params in fw + * @wmi_handle: wmi handle + * @app_type1_params: app type1 params + * + * Return: CDF status + */ +static QDF_STATUS send_app_type1_params_in_fw_cmd_tlv(wmi_unified_t wmi_handle, + struct app_type1_params *app_type1_params) +{ + wmi_extwow_set_app_type1_params_cmd_fixed_param *cmd; + wmi_buf_t buf; + int32_t len; + int ret; + + len = sizeof(wmi_extwow_set_app_type1_params_cmd_fixed_param); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s: Failed allocate wmi buffer", __func__); + return QDF_STATUS_E_NOMEM; + } + + cmd = (wmi_extwow_set_app_type1_params_cmd_fixed_param *) + wmi_buf_data(buf); + + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_extwow_set_app_type1_params_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_extwow_set_app_type1_params_cmd_fixed_param)); + + cmd->vdev_id = app_type1_params->vdev_id; + WMI_CHAR_ARRAY_TO_MAC_ADDR(app_type1_params->wakee_mac_addr.bytes, + &cmd->wakee_mac); + qdf_mem_copy(cmd->ident, app_type1_params->identification_id, 8); + cmd->ident_len = app_type1_params->id_length; + qdf_mem_copy(cmd->passwd, app_type1_params->password, 16); + cmd->passwd_len = app_type1_params->pass_length; + + WMI_LOGD("%s: vdev_id %d wakee_mac_addr %pM " + "identification_id %.8s id_length %u " + "password %.16s pass_length %u", + __func__, cmd->vdev_id, app_type1_params->wakee_mac_addr.bytes, + cmd->ident, cmd->ident_len, cmd->passwd, cmd->passwd_len); + + wmi_mtrace(WMI_EXTWOW_SET_APP_TYPE1_PARAMS_CMDID, cmd->vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_EXTWOW_SET_APP_TYPE1_PARAMS_CMDID); + if (ret) { + WMI_LOGE("%s: Failed to set APP TYPE1 PARAMS", __func__); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * send_set_app_type2_params_in_fw_cmd_tlv() - set app type2 params in fw + * @wmi_handle: wmi handle + * @appType2Params: app type2 params + * + * Return: CDF status + */ +static QDF_STATUS send_set_app_type2_params_in_fw_cmd_tlv(wmi_unified_t wmi_handle, + struct app_type2_params *appType2Params) +{ + wmi_extwow_set_app_type2_params_cmd_fixed_param *cmd; + wmi_buf_t buf; + int32_t len; + int ret; + + len = sizeof(wmi_extwow_set_app_type2_params_cmd_fixed_param); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s: Failed allocate wmi buffer", __func__); + return QDF_STATUS_E_NOMEM; + } + + cmd = (wmi_extwow_set_app_type2_params_cmd_fixed_param *) + wmi_buf_data(buf); + + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_extwow_set_app_type2_params_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_extwow_set_app_type2_params_cmd_fixed_param)); + + cmd->vdev_id = appType2Params->vdev_id; + + qdf_mem_copy(cmd->rc4_key, appType2Params->rc4_key, 16); + cmd->rc4_key_len = appType2Params->rc4_key_len; + + cmd->ip_id = appType2Params->ip_id; + cmd->ip_device_ip = appType2Params->ip_device_ip; + cmd->ip_server_ip = appType2Params->ip_server_ip; + + cmd->tcp_src_port = appType2Params->tcp_src_port; + cmd->tcp_dst_port = appType2Params->tcp_dst_port; + cmd->tcp_seq = appType2Params->tcp_seq; + cmd->tcp_ack_seq = appType2Params->tcp_ack_seq; + + cmd->keepalive_init = appType2Params->keepalive_init; + cmd->keepalive_min = appType2Params->keepalive_min; + cmd->keepalive_max = appType2Params->keepalive_max; + cmd->keepalive_inc = appType2Params->keepalive_inc; + + WMI_CHAR_ARRAY_TO_MAC_ADDR(appType2Params->gateway_mac.bytes, + &cmd->gateway_mac); + cmd->tcp_tx_timeout_val = appType2Params->tcp_tx_timeout_val; + cmd->tcp_rx_timeout_val = appType2Params->tcp_rx_timeout_val; + + WMI_LOGD("%s: vdev_id %d gateway_mac %pM " + "rc4_key %.16s rc4_key_len %u " + "ip_id %x ip_device_ip %x ip_server_ip %x " + "tcp_src_port %u tcp_dst_port %u tcp_seq %u " + "tcp_ack_seq %u keepalive_init %u keepalive_min %u " + "keepalive_max %u keepalive_inc %u " + "tcp_tx_timeout_val %u tcp_rx_timeout_val %u", + __func__, cmd->vdev_id, appType2Params->gateway_mac.bytes, + cmd->rc4_key, cmd->rc4_key_len, + cmd->ip_id, cmd->ip_device_ip, cmd->ip_server_ip, + cmd->tcp_src_port, cmd->tcp_dst_port, cmd->tcp_seq, + cmd->tcp_ack_seq, cmd->keepalive_init, cmd->keepalive_min, + cmd->keepalive_max, cmd->keepalive_inc, + cmd->tcp_tx_timeout_val, cmd->tcp_rx_timeout_val); + + wmi_mtrace(WMI_EXTWOW_SET_APP_TYPE2_PARAMS_CMDID, cmd->vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_EXTWOW_SET_APP_TYPE2_PARAMS_CMDID); + if (ret) { + WMI_LOGE("%s: Failed to set APP TYPE2 PARAMS", __func__); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; + +} + +/** + * send_set_auto_shutdown_timer_cmd_tlv() - sets auto shutdown timer in firmware + * @wmi_handle: wmi handle + * @timer_val: auto shutdown timer value + * + * Return: CDF status + */ +static QDF_STATUS send_set_auto_shutdown_timer_cmd_tlv(wmi_unified_t wmi_handle, + uint32_t timer_val) +{ + QDF_STATUS status; + wmi_buf_t buf = NULL; + uint8_t *buf_ptr; + wmi_host_auto_shutdown_cfg_cmd_fixed_param *wmi_auto_sh_cmd; + int len = sizeof(wmi_host_auto_shutdown_cfg_cmd_fixed_param); + + WMI_LOGD("%s: Set WMI_HOST_AUTO_SHUTDOWN_CFG_CMDID:TIMER_VAL=%d", + __func__, timer_val); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s : wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_NOMEM; + } + + buf_ptr = (uint8_t *) wmi_buf_data(buf); + wmi_auto_sh_cmd = + (wmi_host_auto_shutdown_cfg_cmd_fixed_param *) buf_ptr; + wmi_auto_sh_cmd->timer_value = timer_val; + + WMITLV_SET_HDR(&wmi_auto_sh_cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_host_auto_shutdown_cfg_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_host_auto_shutdown_cfg_cmd_fixed_param)); + + wmi_mtrace(WMI_HOST_AUTO_SHUTDOWN_CFG_CMDID, NO_SESSION, 0); + status = wmi_unified_cmd_send(wmi_handle, buf, + len, WMI_HOST_AUTO_SHUTDOWN_CFG_CMDID); + if (QDF_IS_STATUS_ERROR(status)) { + WMI_LOGE("%s: WMI_HOST_AUTO_SHUTDOWN_CFG_CMDID Err %d", + __func__, status); + wmi_buf_free(buf); + } + + return status; +} + +/** + * send_nan_req_cmd_tlv() - to send nan request to target + * @wmi_handle: wmi handle + * @nan_req: request data which will be non-null + * + * Return: CDF status + */ +static QDF_STATUS send_nan_req_cmd_tlv(wmi_unified_t wmi_handle, + struct nan_req_params *nan_req) +{ + QDF_STATUS ret; + wmi_nan_cmd_param *cmd; + wmi_buf_t buf; + uint16_t len = sizeof(*cmd); + uint16_t nan_data_len, nan_data_len_aligned; + uint8_t *buf_ptr; + + /* + * <----- cmd ------------><-- WMI_TLV_HDR_SIZE --><--- data ----> + * +------------+----------+-----------------------+--------------+ + * | tlv_header | data_len | WMITLV_TAG_ARRAY_BYTE | nan_req_data | + * +------------+----------+-----------------------+--------------+ + */ + if (!nan_req) { + WMI_LOGE("%s:nan req is not valid", __func__); + return QDF_STATUS_E_FAILURE; + } + nan_data_len = nan_req->request_data_len; + nan_data_len_aligned = roundup(nan_req->request_data_len, + sizeof(uint32_t)); + if (nan_data_len_aligned < nan_req->request_data_len) { + WMI_LOGE("%s: integer overflow while rounding up data_len", + __func__); + return QDF_STATUS_E_FAILURE; + } + + if (nan_data_len_aligned > WMI_SVC_MSG_MAX_SIZE - WMI_TLV_HDR_SIZE) { + WMI_LOGE("%s: wmi_max_msg_size overflow for given datalen", + __func__); + return QDF_STATUS_E_FAILURE; + } + + len += WMI_TLV_HDR_SIZE + nan_data_len_aligned; + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s:wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_NOMEM; + } + buf_ptr = (uint8_t *) wmi_buf_data(buf); + cmd = (wmi_nan_cmd_param *) buf_ptr; + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_nan_cmd_param, + WMITLV_GET_STRUCT_TLVLEN(wmi_nan_cmd_param)); + cmd->data_len = nan_req->request_data_len; + WMI_LOGD("%s: The data len value is %u", + __func__, nan_req->request_data_len); + buf_ptr += sizeof(wmi_nan_cmd_param); + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_BYTE, nan_data_len_aligned); + buf_ptr += WMI_TLV_HDR_SIZE; + qdf_mem_copy(buf_ptr, nan_req->request_data, cmd->data_len); + + wmi_mtrace(WMI_NAN_CMDID, NO_SESSION, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_NAN_CMDID); + if (QDF_IS_STATUS_ERROR(ret)) { + WMI_LOGE("%s Failed to send set param command ret = %d", + __func__, ret); + wmi_buf_free(buf); + } + + return ret; +} + +/** + * send_process_dhcpserver_offload_cmd_tlv() - enable DHCP server offload + * @wmi_handle: wmi handle + * @params: DHCP server offload info + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS +send_process_dhcpserver_offload_cmd_tlv(wmi_unified_t wmi_handle, + struct dhcp_offload_info_params *params) +{ + wmi_set_dhcp_server_offload_cmd_fixed_param *cmd; + wmi_buf_t buf; + QDF_STATUS status; + + buf = wmi_buf_alloc(wmi_handle, sizeof(*cmd)); + if (!buf) { + WMI_LOGE("Failed to allocate buffer to send " + "set_dhcp_server_offload cmd"); + return QDF_STATUS_E_NOMEM; + } + + cmd = (wmi_set_dhcp_server_offload_cmd_fixed_param *) wmi_buf_data(buf); + + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_set_dhcp_server_offload_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_set_dhcp_server_offload_cmd_fixed_param)); + cmd->vdev_id = params->vdev_id; + cmd->enable = params->dhcp_offload_enabled; + cmd->num_client = params->dhcp_client_num; + cmd->srv_ipv4 = params->dhcp_srv_addr; + cmd->start_lsb = 0; + wmi_mtrace(WMI_SET_DHCP_SERVER_OFFLOAD_CMDID, cmd->vdev_id, 0); + status = wmi_unified_cmd_send(wmi_handle, buf, + sizeof(*cmd), + WMI_SET_DHCP_SERVER_OFFLOAD_CMDID); + if (QDF_IS_STATUS_ERROR(status)) { + WMI_LOGE("Failed to send set_dhcp_server_offload cmd"); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + WMI_LOGD("Set dhcp server offload to vdevId %d", + params->vdev_id); + + return status; +} + +/** + * send_set_led_flashing_cmd_tlv() - set led flashing in fw + * @wmi_handle: wmi handle + * @flashing: flashing request + * + * Return: CDF status + */ +static QDF_STATUS send_set_led_flashing_cmd_tlv(wmi_unified_t wmi_handle, + struct flashing_req_params *flashing) +{ + wmi_set_led_flashing_cmd_fixed_param *cmd; + QDF_STATUS status; + wmi_buf_t buf; + uint8_t *buf_ptr; + int32_t len = sizeof(wmi_set_led_flashing_cmd_fixed_param); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGP(FL("wmi_buf_alloc failed")); + return QDF_STATUS_E_NOMEM; + } + buf_ptr = (uint8_t *) wmi_buf_data(buf); + cmd = (wmi_set_led_flashing_cmd_fixed_param *) buf_ptr; + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_set_led_flashing_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_set_led_flashing_cmd_fixed_param)); + cmd->pattern_id = flashing->pattern_id; + cmd->led_x0 = flashing->led_x0; + cmd->led_x1 = flashing->led_x1; + + wmi_mtrace(WMI_PDEV_SET_LED_FLASHING_CMDID, NO_SESSION, 0); + status = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_PDEV_SET_LED_FLASHING_CMDID); + if (QDF_IS_STATUS_ERROR(status)) { + WMI_LOGE("%s: wmi_unified_cmd_send WMI_PEER_SET_PARAM_CMD" + " returned Error %d", __func__, status); + wmi_buf_free(buf); + } + + return status; +} + +/** + * send_process_ch_avoid_update_cmd_tlv() - handles channel avoid update request + * @wmi_handle: wmi handle + * @ch_avoid_update_req: channel avoid update params + * + * Return: CDF status + */ +static QDF_STATUS send_process_ch_avoid_update_cmd_tlv(wmi_unified_t wmi_handle) +{ + QDF_STATUS status; + wmi_buf_t buf = NULL; + uint8_t *buf_ptr; + wmi_chan_avoid_update_cmd_param *ch_avoid_update_fp; + int len = sizeof(wmi_chan_avoid_update_cmd_param); + + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s : wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_NOMEM; + } + + buf_ptr = (uint8_t *) wmi_buf_data(buf); + ch_avoid_update_fp = (wmi_chan_avoid_update_cmd_param *) buf_ptr; + WMITLV_SET_HDR(&ch_avoid_update_fp->tlv_header, + WMITLV_TAG_STRUC_wmi_chan_avoid_update_cmd_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_chan_avoid_update_cmd_param)); + + wmi_mtrace(WMI_CHAN_AVOID_UPDATE_CMDID, NO_SESSION, 0); + status = wmi_unified_cmd_send(wmi_handle, buf, + len, WMI_CHAN_AVOID_UPDATE_CMDID); + if (QDF_IS_STATUS_ERROR(status)) { + WMI_LOGE("wmi_unified_cmd_send" + " WMITLV_TABLE_WMI_CHAN_AVOID_UPDATE" + " returned Error %d", status); + wmi_buf_free(buf); + } + + return status; +} + +/** + * send_pdev_set_regdomain_cmd_tlv() - send set regdomain command to fw + * @wmi_handle: wmi handle + * @param: pointer to pdev regdomain params + * + * Return: 0 for success or error code + */ +static QDF_STATUS +send_pdev_set_regdomain_cmd_tlv(wmi_unified_t wmi_handle, + struct pdev_set_regdomain_params *param) +{ + wmi_buf_t buf; + wmi_pdev_set_regdomain_cmd_fixed_param *cmd; + int32_t len = sizeof(*cmd); + + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGP("%s: wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_NOMEM; + } + cmd = (wmi_pdev_set_regdomain_cmd_fixed_param *) wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_pdev_set_regdomain_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_pdev_set_regdomain_cmd_fixed_param)); + + cmd->reg_domain = param->currentRDinuse; + cmd->reg_domain_2G = param->currentRD2G; + cmd->reg_domain_5G = param->currentRD5G; + cmd->conformance_test_limit_2G = param->ctl_2G; + cmd->conformance_test_limit_5G = param->ctl_5G; + cmd->dfs_domain = param->dfsDomain; + cmd->pdev_id = wmi_handle->ops->convert_pdev_id_host_to_target( + param->pdev_id); + + wmi_mtrace(WMI_PDEV_SET_REGDOMAIN_CMDID, NO_SESSION, 0); + if (wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_PDEV_SET_REGDOMAIN_CMDID)) { + WMI_LOGE("%s: Failed to send pdev set regdomain command", + __func__); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * send_regdomain_info_to_fw_cmd_tlv() - send regdomain info to fw + * @wmi_handle: wmi handle + * @reg_dmn: reg domain + * @regdmn2G: 2G reg domain + * @regdmn5G: 5G reg domain + * @ctl2G: 2G test limit + * @ctl5G: 5G test limit + * + * Return: none + */ +static QDF_STATUS send_regdomain_info_to_fw_cmd_tlv(wmi_unified_t wmi_handle, + uint32_t reg_dmn, uint16_t regdmn2G, + uint16_t regdmn5G, uint8_t ctl2G, + uint8_t ctl5G) +{ + wmi_buf_t buf; + wmi_pdev_set_regdomain_cmd_fixed_param *cmd; + int32_t len = sizeof(*cmd); + + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGP("%s: wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_NOMEM; + } + cmd = (wmi_pdev_set_regdomain_cmd_fixed_param *) wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_pdev_set_regdomain_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_pdev_set_regdomain_cmd_fixed_param)); + cmd->reg_domain = reg_dmn; + cmd->reg_domain_2G = regdmn2G; + cmd->reg_domain_5G = regdmn5G; + cmd->conformance_test_limit_2G = ctl2G; + cmd->conformance_test_limit_5G = ctl5G; + + wmi_mtrace(WMI_PDEV_SET_REGDOMAIN_CMDID, NO_SESSION, 0); + if (wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_PDEV_SET_REGDOMAIN_CMDID)) { + WMI_LOGP("%s: Failed to send pdev set regdomain command", + __func__); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +#ifdef FEATURE_WLAN_TDLS +/** + * tdls_get_wmi_offchannel_mode - Get WMI tdls off channel mode + * @tdls_sw_mode: tdls_sw_mode + * + * This function returns wmi tdls offchannel mode + * + * Return: enum value of wmi tdls offchannel mode + */ +static uint8_t tdls_get_wmi_offchannel_mode(uint8_t tdls_sw_mode) +{ + uint8_t off_chan_mode; + + switch (tdls_sw_mode) { + case ENABLE_CHANSWITCH: + off_chan_mode = WMI_TDLS_ENABLE_OFFCHANNEL; + break; + + case DISABLE_CHANSWITCH: + off_chan_mode = WMI_TDLS_DISABLE_OFFCHANNEL; + break; + + default: + WMI_LOGD(FL("unknown tdls_sw_mode %d"), tdls_sw_mode); + off_chan_mode = WMI_TDLS_DISABLE_OFFCHANNEL; + } + return off_chan_mode; +} + +/** + * tdls_get_wmi_offchannel_bw - Get WMI tdls off channel Bandwidth + * @tdls_sw_mode: tdls_sw_mode + * + * This function returns wmi tdls offchannel bandwidth + * + * Return: TDLS offchannel bandwidth + */ +static uint8_t tdls_get_wmi_offchannel_bw(uint16_t tdls_off_ch_bw_offset) +{ + uint8_t off_chan_bw; + + switch (tdls_off_ch_bw_offset) { + case BW20: + off_chan_bw = WMI_TDLS_OFFCHAN_20MHZ; + break; + case BW40_LOW_PRIMARY: + case BW40_HIGH_PRIMARY: + off_chan_bw = WMI_TDLS_OFFCHAN_40MHZ; + break; + case BW80: + off_chan_bw = WMI_TDLS_OFFCHAN_80MHZ; + case BWALL: + off_chan_bw = WMI_TDLS_OFFCHAN_160MHZ; + default: + WMI_LOGD(FL("unknown tdls offchannel bw offset %d"), + tdls_off_ch_bw_offset); + off_chan_bw = WMI_TDLS_OFFCHAN_20MHZ; + } + return off_chan_bw; +} + +#else +static uint8_t tdls_get_wmi_offchannel_mode(uint8_t tdls_sw_mode) +{ + return WMI_TDLS_DISABLE_OFFCHANNEL; +} + +static uint8_t tdls_get_wmi_offchannel_bw(uint16_t tdls_off_ch_bw_offset) +{ + return WMI_TDLS_OFFCHAN_20MHZ; +} +#endif + +/** + * send_set_tdls_offchan_mode_cmd_tlv() - set tdls off channel mode + * @wmi_handle: wmi handle + * @chan_switch_params: Pointer to tdls channel switch parameter structure + * + * This function sets tdls off channel mode + * + * Return: 0 on success; Negative errno otherwise + */ +static QDF_STATUS send_set_tdls_offchan_mode_cmd_tlv(wmi_unified_t wmi_handle, + struct tdls_channel_switch_params *chan_switch_params) +{ + wmi_tdls_set_offchan_mode_cmd_fixed_param *cmd; + wmi_buf_t wmi_buf; + u_int16_t len = sizeof(wmi_tdls_set_offchan_mode_cmd_fixed_param); + + wmi_buf = wmi_buf_alloc(wmi_handle, len); + if (!wmi_buf) { + WMI_LOGE(FL("wmi_buf_alloc failed")); + return QDF_STATUS_E_FAILURE; + } + cmd = (wmi_tdls_set_offchan_mode_cmd_fixed_param *) + wmi_buf_data(wmi_buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_tdls_set_offchan_mode_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_tdls_set_offchan_mode_cmd_fixed_param)); + + WMI_CHAR_ARRAY_TO_MAC_ADDR(chan_switch_params->peer_mac_addr, + &cmd->peer_macaddr); + cmd->vdev_id = chan_switch_params->vdev_id; + cmd->offchan_mode = + tdls_get_wmi_offchannel_mode(chan_switch_params->tdls_sw_mode); + cmd->is_peer_responder = chan_switch_params->is_responder; + cmd->offchan_num = chan_switch_params->tdls_off_ch; + cmd->offchan_bw_bitmap = + tdls_get_wmi_offchannel_bw( + chan_switch_params->tdls_off_ch_bw_offset); + cmd->offchan_oper_class = chan_switch_params->oper_class; + + WMI_LOGD(FL("Peer MAC Addr mac_addr31to0: 0x%x, mac_addr47to32: 0x%x"), + cmd->peer_macaddr.mac_addr31to0, + cmd->peer_macaddr.mac_addr47to32); + + WMI_LOGD(FL( + "vdev_id: %d, off channel mode: %d, off channel Num: %d, " + "off channel offset: 0x%x, is_peer_responder: %d, operating class: %d" + ), + cmd->vdev_id, + cmd->offchan_mode, + cmd->offchan_num, + cmd->offchan_bw_bitmap, + cmd->is_peer_responder, + cmd->offchan_oper_class); + + wmi_mtrace(WMI_TDLS_SET_OFFCHAN_MODE_CMDID, cmd->vdev_id, 0); + if (wmi_unified_cmd_send(wmi_handle, wmi_buf, len, + WMI_TDLS_SET_OFFCHAN_MODE_CMDID)) { + WMI_LOGP(FL("failed to send tdls off chan command")); + wmi_buf_free(wmi_buf); + return QDF_STATUS_E_FAILURE; + } + + + return QDF_STATUS_SUCCESS; +} + +/** + * send_update_fw_tdls_state_cmd_tlv() - send enable/disable tdls for a vdev + * @wmi_handle: wmi handle + * @pwmaTdlsparams: TDLS params + * + * Return: 0 for success or error code + */ +static QDF_STATUS send_update_fw_tdls_state_cmd_tlv(wmi_unified_t wmi_handle, + void *tdls_param, uint8_t tdls_state) +{ + wmi_tdls_set_state_cmd_fixed_param *cmd; + wmi_buf_t wmi_buf; + + struct wmi_tdls_params *wmi_tdls = (struct wmi_tdls_params *) tdls_param; + uint16_t len = sizeof(wmi_tdls_set_state_cmd_fixed_param); + + wmi_buf = wmi_buf_alloc(wmi_handle, len); + if (!wmi_buf) { + WMI_LOGE("%s: wmai_buf_alloc failed", __func__); + return QDF_STATUS_E_FAILURE; + } + cmd = (wmi_tdls_set_state_cmd_fixed_param *) wmi_buf_data(wmi_buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_tdls_set_state_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_tdls_set_state_cmd_fixed_param)); + cmd->vdev_id = wmi_tdls->vdev_id; + cmd->state = tdls_state; + cmd->notification_interval_ms = wmi_tdls->notification_interval_ms; + cmd->tx_discovery_threshold = wmi_tdls->tx_discovery_threshold; + cmd->tx_teardown_threshold = wmi_tdls->tx_teardown_threshold; + cmd->rssi_teardown_threshold = wmi_tdls->rssi_teardown_threshold; + cmd->rssi_delta = wmi_tdls->rssi_delta; + cmd->tdls_options = wmi_tdls->tdls_options; + cmd->tdls_peer_traffic_ind_window = wmi_tdls->peer_traffic_ind_window; + cmd->tdls_peer_traffic_response_timeout_ms = + wmi_tdls->peer_traffic_response_timeout; + cmd->tdls_puapsd_mask = wmi_tdls->puapsd_mask; + cmd->tdls_puapsd_inactivity_time_ms = wmi_tdls->puapsd_inactivity_time; + cmd->tdls_puapsd_rx_frame_threshold = + wmi_tdls->puapsd_rx_frame_threshold; + cmd->teardown_notification_ms = + wmi_tdls->teardown_notification_ms; + cmd->tdls_peer_kickout_threshold = + wmi_tdls->tdls_peer_kickout_threshold; + + WMI_LOGD("%s: tdls_state: %d, state: %d, " + "notification_interval_ms: %d, " + "tx_discovery_threshold: %d, " + "tx_teardown_threshold: %d, " + "rssi_teardown_threshold: %d, " + "rssi_delta: %d, " + "tdls_options: 0x%x, " + "tdls_peer_traffic_ind_window: %d, " + "tdls_peer_traffic_response_timeout: %d, " + "tdls_puapsd_mask: 0x%x, " + "tdls_puapsd_inactivity_time: %d, " + "tdls_puapsd_rx_frame_threshold: %d, " + "teardown_notification_ms: %d, " + "tdls_peer_kickout_threshold: %d", + __func__, tdls_state, cmd->state, + cmd->notification_interval_ms, + cmd->tx_discovery_threshold, + cmd->tx_teardown_threshold, + cmd->rssi_teardown_threshold, + cmd->rssi_delta, + cmd->tdls_options, + cmd->tdls_peer_traffic_ind_window, + cmd->tdls_peer_traffic_response_timeout_ms, + cmd->tdls_puapsd_mask, + cmd->tdls_puapsd_inactivity_time_ms, + cmd->tdls_puapsd_rx_frame_threshold, + cmd->teardown_notification_ms, + cmd->tdls_peer_kickout_threshold); + + wmi_mtrace(WMI_TDLS_SET_STATE_CMDID, cmd->vdev_id, 0); + if (wmi_unified_cmd_send(wmi_handle, wmi_buf, len, + WMI_TDLS_SET_STATE_CMDID)) { + WMI_LOGP("%s: failed to send tdls set state command", __func__); + wmi_buf_free(wmi_buf); + return QDF_STATUS_E_FAILURE; + } + WMI_LOGD("%s: vdev_id %d", __func__, wmi_tdls->vdev_id); + + return QDF_STATUS_SUCCESS; +} + +/** + * send_update_tdls_peer_state_cmd_tlv() - update TDLS peer state + * @wmi_handle: wmi handle + * @peerStateParams: TDLS peer state params + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS send_update_tdls_peer_state_cmd_tlv(wmi_unified_t wmi_handle, + struct tdls_peer_state_params *peerStateParams, + uint32_t *ch_mhz) +{ + wmi_tdls_peer_update_cmd_fixed_param *cmd; + wmi_tdls_peer_capabilities *peer_cap; + wmi_channel *chan_info; + wmi_buf_t wmi_buf; + uint8_t *buf_ptr; + uint32_t i; + int32_t len = sizeof(wmi_tdls_peer_update_cmd_fixed_param) + + sizeof(wmi_tdls_peer_capabilities); + + + len += WMI_TLV_HDR_SIZE + + sizeof(wmi_channel) * peerStateParams->peerCap.peerChanLen; + + wmi_buf = wmi_buf_alloc(wmi_handle, len); + if (!wmi_buf) { + WMI_LOGE("%s: wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_FAILURE; + } + + buf_ptr = (uint8_t *) wmi_buf_data(wmi_buf); + cmd = (wmi_tdls_peer_update_cmd_fixed_param *) buf_ptr; + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_tdls_peer_update_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_tdls_peer_update_cmd_fixed_param)); + + cmd->vdev_id = peerStateParams->vdevId; + WMI_CHAR_ARRAY_TO_MAC_ADDR(peerStateParams->peerMacAddr, + &cmd->peer_macaddr); + + + cmd->peer_state = peerStateParams->peerState; + + WMI_LOGD("%s: vdev_id: %d, peerStateParams->peerMacAddr: %pM, " + "peer_macaddr.mac_addr31to0: 0x%x, " + "peer_macaddr.mac_addr47to32: 0x%x, peer_state: %d", + __func__, cmd->vdev_id, peerStateParams->peerMacAddr, + cmd->peer_macaddr.mac_addr31to0, + cmd->peer_macaddr.mac_addr47to32, cmd->peer_state); + + buf_ptr += sizeof(wmi_tdls_peer_update_cmd_fixed_param); + peer_cap = (wmi_tdls_peer_capabilities *) buf_ptr; + WMITLV_SET_HDR(&peer_cap->tlv_header, + WMITLV_TAG_STRUC_wmi_tdls_peer_capabilities, + WMITLV_GET_STRUCT_TLVLEN(wmi_tdls_peer_capabilities)); + + if ((peerStateParams->peerCap.peerUapsdQueue & 0x08) >> 3) + WMI_SET_TDLS_PEER_VO_UAPSD(peer_cap); + if ((peerStateParams->peerCap.peerUapsdQueue & 0x04) >> 2) + WMI_SET_TDLS_PEER_VI_UAPSD(peer_cap); + if ((peerStateParams->peerCap.peerUapsdQueue & 0x02) >> 1) + WMI_SET_TDLS_PEER_BK_UAPSD(peer_cap); + if (peerStateParams->peerCap.peerUapsdQueue & 0x01) + WMI_SET_TDLS_PEER_BE_UAPSD(peer_cap); + + /* Ack and More Data Ack are sent as 0, so no need to set + * but fill SP + */ + WMI_SET_TDLS_PEER_SP_UAPSD(peer_cap, + peerStateParams->peerCap.peerMaxSp); + + peer_cap->buff_sta_support = + peerStateParams->peerCap.peerBuffStaSupport; + peer_cap->off_chan_support = + peerStateParams->peerCap.peerOffChanSupport; + peer_cap->peer_curr_operclass = + peerStateParams->peerCap.peerCurrOperClass; + /* self curr operclass is not being used and so pass op class for + * preferred off chan in it. + */ + peer_cap->self_curr_operclass = + peerStateParams->peerCap.opClassForPrefOffChan; + peer_cap->peer_chan_len = peerStateParams->peerCap.peerChanLen; + peer_cap->peer_operclass_len = + peerStateParams->peerCap.peerOperClassLen; + + WMI_LOGD("%s: peer_operclass_len: %d", + __func__, peer_cap->peer_operclass_len); + for (i = 0; i < WMI_TDLS_MAX_SUPP_OPER_CLASSES; i++) { + peer_cap->peer_operclass[i] = + peerStateParams->peerCap.peerOperClass[i]; + WMI_LOGD("%s: peer_operclass[%d]: %d", + __func__, i, peer_cap->peer_operclass[i]); + } + + peer_cap->is_peer_responder = peerStateParams->peerCap.isPeerResponder; + peer_cap->pref_offchan_num = peerStateParams->peerCap.prefOffChanNum; + peer_cap->pref_offchan_bw = + peerStateParams->peerCap.prefOffChanBandwidth; + + WMI_LOGD + ("%s: peer_qos: 0x%x, buff_sta_support: %d, off_chan_support: %d, " + "peer_curr_operclass: %d, self_curr_operclass: %d, peer_chan_len: " + "%d, peer_operclass_len: %d, is_peer_responder: %d, pref_offchan_num:" + " %d, pref_offchan_bw: %d", + __func__, peer_cap->peer_qos, peer_cap->buff_sta_support, + peer_cap->off_chan_support, peer_cap->peer_curr_operclass, + peer_cap->self_curr_operclass, peer_cap->peer_chan_len, + peer_cap->peer_operclass_len, peer_cap->is_peer_responder, + peer_cap->pref_offchan_num, peer_cap->pref_offchan_bw); + + /* next fill variable size array of peer chan info */ + buf_ptr += sizeof(wmi_tdls_peer_capabilities); + WMITLV_SET_HDR(buf_ptr, + WMITLV_TAG_ARRAY_STRUC, + sizeof(wmi_channel) * + peerStateParams->peerCap.peerChanLen); + chan_info = (wmi_channel *) (buf_ptr + WMI_TLV_HDR_SIZE); + + for (i = 0; i < peerStateParams->peerCap.peerChanLen; ++i) { + WMITLV_SET_HDR(&chan_info->tlv_header, + WMITLV_TAG_STRUC_wmi_channel, + WMITLV_GET_STRUCT_TLVLEN(wmi_channel)); + chan_info->mhz = ch_mhz[i]; + chan_info->band_center_freq1 = chan_info->mhz; + chan_info->band_center_freq2 = 0; + + WMI_LOGD("%s: chan[%d] = %u", __func__, i, chan_info->mhz); + + if (peerStateParams->peerCap.peerChan[i].dfsSet) { + WMI_SET_CHANNEL_FLAG(chan_info, WMI_CHAN_FLAG_PASSIVE); + WMI_LOGI("chan[%d] DFS[%d]\n", + peerStateParams->peerCap.peerChan[i].chanId, + peerStateParams->peerCap.peerChan[i].dfsSet); + } + + if (chan_info->mhz < WMI_2_4_GHZ_MAX_FREQ) + WMI_SET_CHANNEL_MODE(chan_info, MODE_11G); + else + WMI_SET_CHANNEL_MODE(chan_info, MODE_11A); + + WMI_SET_CHANNEL_MAX_TX_POWER(chan_info, + peerStateParams->peerCap. + peerChan[i].pwr); + + WMI_SET_CHANNEL_REG_POWER(chan_info, + peerStateParams->peerCap.peerChan[i]. + pwr); + WMI_LOGD("Channel TX power[%d] = %u: %d", i, chan_info->mhz, + peerStateParams->peerCap.peerChan[i].pwr); + + chan_info++; + } + + wmi_mtrace(WMI_TDLS_PEER_UPDATE_CMDID, cmd->vdev_id, 0); + if (wmi_unified_cmd_send(wmi_handle, wmi_buf, len, + WMI_TDLS_PEER_UPDATE_CMDID)) { + WMI_LOGE("%s: failed to send tdls peer update state command", + __func__); + wmi_buf_free(wmi_buf); + return QDF_STATUS_E_FAILURE; + } + + + return QDF_STATUS_SUCCESS; +} + +/* + * send_process_set_ie_info_cmd_tlv() - Function to send IE info to firmware + * @wmi_handle: Pointer to WMi handle + * @ie_data: Pointer for ie data + * + * This function sends IE information to firmware + * + * Return: QDF_STATUS_SUCCESS for success otherwise failure + * + */ +static QDF_STATUS send_process_set_ie_info_cmd_tlv(wmi_unified_t wmi_handle, + struct vdev_ie_info_param *ie_info) +{ + wmi_vdev_set_ie_cmd_fixed_param *cmd; + wmi_buf_t buf; + uint8_t *buf_ptr; + uint32_t len, ie_len_aligned; + QDF_STATUS ret; + + + ie_len_aligned = roundup(ie_info->length, sizeof(uint32_t)); + /* Allocate memory for the WMI command */ + len = sizeof(*cmd) + WMI_TLV_HDR_SIZE + ie_len_aligned; + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE(FL("wmi_buf_alloc failed")); + return QDF_STATUS_E_NOMEM; + } + + buf_ptr = wmi_buf_data(buf); + qdf_mem_zero(buf_ptr, len); + + /* Populate the WMI command */ + cmd = (wmi_vdev_set_ie_cmd_fixed_param *)buf_ptr; + + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_vdev_set_ie_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_vdev_set_ie_cmd_fixed_param)); + cmd->vdev_id = ie_info->vdev_id; + cmd->ie_id = ie_info->ie_id; + cmd->ie_len = ie_info->length; + cmd->band = ie_info->band; + + WMI_LOGD(FL("IE:%d of size:%d sent for vdev:%d"), ie_info->ie_id, + ie_info->length, ie_info->vdev_id); + + buf_ptr += sizeof(*cmd); + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_BYTE, ie_len_aligned); + buf_ptr += WMI_TLV_HDR_SIZE; + + qdf_mem_copy(buf_ptr, ie_info->data, cmd->ie_len); + + wmi_mtrace(WMI_VDEV_SET_IE_CMDID, cmd->vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_VDEV_SET_IE_CMDID); + if (QDF_IS_STATUS_ERROR(ret)) { + WMI_LOGE(FL("Failed to send set IE command ret = %d"), ret); + wmi_buf_free(buf); + } + + return ret; +} + +/** + * send_smart_ant_enable_cmd_tlv() - WMI smart ant enable function + * + * @param wmi_handle : handle to WMI. + * @param param : pointer to antenna param + * + * This function sends smart antenna enable command to FW + * + * @return QDF_STATUS_SUCCESS on success and -ve on failure. + */ +static QDF_STATUS send_smart_ant_enable_cmd_tlv(wmi_unified_t wmi_handle, + struct smart_ant_enable_params *param) +{ + /* Send WMI COMMAND to Enable */ + wmi_pdev_smart_ant_enable_cmd_fixed_param *cmd; + wmi_pdev_smart_ant_gpio_handle *gpio_param; + wmi_buf_t buf; + uint8_t *buf_ptr; + int len = 0; + QDF_STATUS ret; + int loop = 0; + + len = sizeof(*cmd) + WMI_TLV_HDR_SIZE; + len += WMI_HAL_MAX_SANTENNA * sizeof(wmi_pdev_smart_ant_gpio_handle); + buf = wmi_buf_alloc(wmi_handle, len); + + if (!buf) { + WMI_LOGE("%s:wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_NOMEM; + } + + buf_ptr = wmi_buf_data(buf); + qdf_mem_zero(buf_ptr, len); + cmd = (wmi_pdev_smart_ant_enable_cmd_fixed_param *)buf_ptr; + + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_pdev_smart_ant_enable_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_pdev_smart_ant_enable_cmd_fixed_param)); + + cmd->pdev_id = wmi_handle->ops->convert_pdev_id_host_to_target( + param->pdev_id); + cmd->enable = param->enable; + cmd->mode = param->mode; + cmd->rx_antenna = param->rx_antenna; + cmd->tx_default_antenna = param->rx_antenna; + + /* TLV indicating array of structures to follow */ + buf_ptr += sizeof(wmi_pdev_smart_ant_enable_cmd_fixed_param); + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, + WMI_HAL_MAX_SANTENNA * + sizeof(wmi_pdev_smart_ant_gpio_handle)); + + buf_ptr += WMI_TLV_HDR_SIZE; + gpio_param = (wmi_pdev_smart_ant_gpio_handle *)buf_ptr; + + for (loop = 0; loop < WMI_HAL_MAX_SANTENNA; loop++) { + WMITLV_SET_HDR(&gpio_param->tlv_header, + WMITLV_TAG_STRUC_wmi_pdev_smart_ant_gpio_handle, + WMITLV_GET_STRUCT_TLVLEN( + wmi_pdev_smart_ant_gpio_handle)); + if (param->mode == SMART_ANT_MODE_SERIAL) { + if (loop < WMI_HOST_MAX_SERIAL_ANTENNA) { + gpio_param->gpio_pin = param->gpio_pin[loop]; + gpio_param->gpio_func = param->gpio_func[loop]; + } else { + gpio_param->gpio_pin = 0; + gpio_param->gpio_func = 0; + } + } else if (param->mode == SMART_ANT_MODE_PARALLEL) { + gpio_param->gpio_pin = param->gpio_pin[loop]; + gpio_param->gpio_func = param->gpio_func[loop]; + } + /* Setting it to 0 for now */ + gpio_param->pdev_id = + wmi_handle->ops->convert_pdev_id_host_to_target( + param->pdev_id); + gpio_param++; + } + + wmi_mtrace(WMI_PDEV_SMART_ANT_ENABLE_CMDID, NO_SESSION, 0); + ret = wmi_unified_cmd_send(wmi_handle, + buf, + len, + WMI_PDEV_SMART_ANT_ENABLE_CMDID); + + if (ret != 0) { + WMI_LOGE(" %s :WMI Failed\n", __func__); + WMI_LOGE("enable:%d mode:%d rx_antenna: 0x%08x PINS: [%d %d %d %d] Func[%d %d %d %d] cmdstatus=%d\n", + cmd->enable, + cmd->mode, + cmd->rx_antenna, + param->gpio_pin[0], param->gpio_pin[1], + param->gpio_pin[2], param->gpio_pin[3], + param->gpio_func[0], param->gpio_func[1], + param->gpio_func[2], param->gpio_func[3], + ret); + wmi_buf_free(buf); + } + + return ret; +} + +/** + * send_smart_ant_set_rx_ant_cmd_tlv() - WMI set rx antenna function + * + * @param wmi_handle : handle to WMI. + * @param param : pointer to rx antenna param + * @return QDF_STATUS_SUCCESS on success and -ve on failure. + */ +static QDF_STATUS send_smart_ant_set_rx_ant_cmd_tlv(wmi_unified_t wmi_handle, + struct smart_ant_rx_ant_params *param) +{ + wmi_pdev_smart_ant_set_rx_antenna_cmd_fixed_param *cmd; + wmi_buf_t buf; + uint8_t *buf_ptr; + uint32_t len; + QDF_STATUS ret; + + len = sizeof(*cmd); + buf = wmi_buf_alloc(wmi_handle, len); + WMI_LOGD("%s:\n", __func__); + if (!buf) { + WMI_LOGE("%s:wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_NOMEM; + } + + buf_ptr = wmi_buf_data(buf); + cmd = (wmi_pdev_smart_ant_set_rx_antenna_cmd_fixed_param *)buf_ptr; + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_pdev_smart_ant_set_rx_antenna_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_pdev_smart_ant_set_rx_antenna_cmd_fixed_param)); + cmd->rx_antenna = param->antenna; + cmd->pdev_id = wmi_handle->ops->convert_pdev_id_host_to_target( + param->pdev_id); + + wmi_mtrace(WMI_PDEV_SMART_ANT_SET_RX_ANTENNA_CMDID, NO_SESSION, 0); + ret = wmi_unified_cmd_send(wmi_handle, + buf, + len, + WMI_PDEV_SMART_ANT_SET_RX_ANTENNA_CMDID); + + if (ret != 0) { + WMI_LOGE(" %s :WMI Failed\n", __func__); + WMI_LOGE("%s: rx_antenna: 0x%08x cmdstatus=%d\n", + __func__, + cmd->rx_antenna, + ret); + wmi_buf_free(buf); + } + + return ret; +} + +/** + * send_set_ctl_table_cmd_tlv() - send ctl table cmd to fw + * @wmi_handle: wmi handle + * @param: pointer to hold ctl table param + * + * @return QDF_STATUS_SUCCESS on success and -ve on failure. + */ +static QDF_STATUS +send_set_ctl_table_cmd_tlv(wmi_unified_t wmi_handle, + struct ctl_table_params *param) +{ + uint16_t len, ctl_tlv_len; + uint8_t *buf_ptr; + wmi_buf_t buf; + wmi_pdev_set_ctl_table_cmd_fixed_param *cmd; + uint32_t *ctl_array; + + if (!param->ctl_array) + return QDF_STATUS_E_FAILURE; + + ctl_tlv_len = WMI_TLV_HDR_SIZE + + roundup(param->ctl_cmd_len, sizeof(uint32_t)); + len = sizeof(*cmd) + ctl_tlv_len; + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s:wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_FAILURE; + } + + buf_ptr = wmi_buf_data(buf); + qdf_mem_zero(buf_ptr, len); + + cmd = (wmi_pdev_set_ctl_table_cmd_fixed_param *)buf_ptr; + + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_pdev_set_ctl_table_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_pdev_set_ctl_table_cmd_fixed_param)); + cmd->ctl_len = param->ctl_cmd_len; + cmd->pdev_id = wmi_handle->ops->convert_pdev_id_host_to_target( + param->pdev_id); + + buf_ptr += sizeof(*cmd); + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_UINT32, + (cmd->ctl_len)); + buf_ptr += WMI_TLV_HDR_SIZE; + ctl_array = (uint32_t *)buf_ptr; + + WMI_HOST_IF_MSG_COPY_CHAR_ARRAY(&ctl_array[0], ¶m->ctl_band, + sizeof(param->ctl_band)); + WMI_HOST_IF_MSG_COPY_CHAR_ARRAY(&ctl_array[1], param->ctl_array, + param->ctl_cmd_len - + sizeof(param->ctl_band)); + + wmi_mtrace(WMI_PDEV_SET_CTL_TABLE_CMDID, NO_SESSION, 0); + if (wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_PDEV_SET_CTL_TABLE_CMDID)) { + WMI_LOGE("%s:Failed to send command\n", __func__); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * send_set_mimogain_table_cmd_tlv() - send mimogain table cmd to fw + * @wmi_handle: wmi handle + * @param: pointer to hold mimogain table param + * + * @return QDF_STATUS_SUCCESS on success and -ve on failure. + */ +static QDF_STATUS +send_set_mimogain_table_cmd_tlv(wmi_unified_t wmi_handle, + struct mimogain_table_params *param) +{ + uint16_t len, table_tlv_len; + wmi_buf_t buf; + uint8_t *buf_ptr; + wmi_pdev_set_mimogain_table_cmd_fixed_param *cmd; + uint32_t *gain_table; + + if (!param->array_gain) + return QDF_STATUS_E_FAILURE; + + /* len must be multiple of a single array gain table */ + if (param->tbl_len % + ((WMI_HOST_TX_NUM_CHAIN-1) * WMI_HOST_TPC_REGINDEX_MAX * + WMI_HOST_ARRAY_GAIN_NUM_STREAMS) != 0) { + WMI_LOGE("Array gain table len not correct\n"); + return QDF_STATUS_E_FAILURE; + } + + table_tlv_len = WMI_TLV_HDR_SIZE + + roundup(param->tbl_len, sizeof(uint32_t)); + len = sizeof(*cmd) + table_tlv_len; + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s:wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_FAILURE; + } + + buf_ptr = wmi_buf_data(buf); + qdf_mem_zero(buf_ptr, len); + + cmd = (wmi_pdev_set_mimogain_table_cmd_fixed_param *)buf_ptr; + + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_pdev_set_mimogain_table_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_pdev_set_mimogain_table_cmd_fixed_param)); + + cmd->pdev_id = wmi_handle->ops->convert_pdev_id_host_to_target( + param->pdev_id); + WMI_MIMOGAIN_ARRAY_GAIN_LEN_SET(cmd->mimogain_info, param->tbl_len); + WMI_MIMOGAIN_MULTI_CHAIN_BYPASS_SET(cmd->mimogain_info, + param->multichain_gain_bypass); + + buf_ptr += sizeof(*cmd); + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_UINT32, + (param->tbl_len)); + buf_ptr += WMI_TLV_HDR_SIZE; + gain_table = (uint32_t *)buf_ptr; + + WMI_HOST_IF_MSG_COPY_CHAR_ARRAY(gain_table, + param->array_gain, + param->tbl_len); + + wmi_mtrace(WMI_PDEV_SET_MIMOGAIN_TABLE_CMDID, NO_SESSION, 0); + if (wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_PDEV_SET_MIMOGAIN_TABLE_CMDID)) { + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * enum packet_power_tlv_flags: target defined + * packet power rate flags for TLV + * @WMI_TLV_FLAG_ONE_CHAIN: one chain + * @WMI_TLV_FLAG_TWO_CHAIN: two chain + * @WMI_TLV_FLAG_THREE_CHAIN: three chain + * @WMI_TLV_FLAG_FOUR_CHAIN: four chain + * @WMI_TLV_FLAG_FIVE_CHAIN: five chain + * @WMI_TLV_FLAG_SIX_CHAIN: six chain + * @WMI_TLV_FLAG_SEVEN_CHAIN: seven chain + * @WMI_TLV_FLAG_EIGHT_CHAIN:eight chain + * @WMI_TLV_FLAG_STBC: STBC is set + * @WMI_TLV_FLAG_40MHZ: 40MHz chan width + * @WMI_TLV_FLAG_80MHZ: 80MHz chan width + * @WMI_TLV_FLAG_160MHZ: 160MHz chan width + * @WMI_TLV_FLAG_TXBF: Tx Bf enabled + * @WMI_TLV_FLAG_RTSENA: RTS enabled + * @WMI_TLV_FLAG_CTSENA: CTS enabled + * @WMI_TLV_FLAG_LDPC: LDPC is set + * @WMI_TLV_FLAG_SGI: Short gaurd interval + * @WMI_TLV_FLAG_SU: SU Data + * @WMI_TLV_FLAG_DL_MU_MIMO_AC: DL AC MU data + * @WMI_TLV_FLAG_DL_MU_MIMO_AX: DL AX MU data + * @WMI_TLV_FLAG_DL_OFDMA: DL OFDMA data + * @WMI_TLV_FLAG_UL_OFDMA: UL OFDMA data + * @WMI_TLV_FLAG_UL_MU_MIMO: UL MU data + * + * @WMI_TLV_FLAG_BW_MASK: bandwidth mask + * @WMI_TLV_FLAG_BW_SHIFT: bandwidth shift + * @WMI_TLV_FLAG_SU_MU_OFDMA_MASK: su/mu/ofdma mask + * @WMI_TLV_FLAG_SU_MU_OFDMA_shift: su/mu/ofdma shift + */ +enum packet_power_tlv_flags { + WMI_TLV_FLAG_ONE_CHAIN = 0x00000001, + WMI_TLV_FLAG_TWO_CHAIN = 0x00000003, + WMI_TLV_FLAG_THREE_CHAIN = 0x00000007, + WMI_TLV_FLAG_FOUR_CHAIN = 0x0000000F, + WMI_TLV_FLAG_FIVE_CHAIN = 0x0000001F, + WMI_TLV_FLAG_SIX_CHAIN = 0x0000003F, + WMI_TLV_FLAG_SEVEN_CHAIN = 0x0000007F, + WMI_TLV_FLAG_EIGHT_CHAIN = 0x0000008F, + WMI_TLV_FLAG_STBC = 0x00000100, + WMI_TLV_FLAG_40MHZ = 0x00000200, + WMI_TLV_FLAG_80MHZ = 0x00000300, + WMI_TLV_FLAG_160MHZ = 0x00000400, + WMI_TLV_FLAG_TXBF = 0x00000800, + WMI_TLV_FLAG_RTSENA = 0x00001000, + WMI_TLV_FLAG_CTSENA = 0x00002000, + WMI_TLV_FLAG_LDPC = 0x00004000, + WMI_TLV_FLAG_SGI = 0x00008000, + WMI_TLV_FLAG_SU = 0x00100000, + WMI_TLV_FLAG_DL_MU_MIMO_AC = 0x00200000, + WMI_TLV_FLAG_DL_MU_MIMO_AX = 0x00300000, + WMI_TLV_FLAG_DL_OFDMA = 0x00400000, + WMI_TLV_FLAG_UL_OFDMA = 0x00500000, + WMI_TLV_FLAG_UL_MU_MIMO = 0x00600000, + + WMI_TLV_FLAG_CHAIN_MASK = 0xff, + WMI_TLV_FLAG_BW_MASK = 0x3, + WMI_TLV_FLAG_BW_SHIFT = 9, + WMI_TLV_FLAG_SU_MU_OFDMA_MASK = 0x7, + WMI_TLV_FLAG_SU_MU_OFDMA_SHIFT = 20, +}; + +/** + * convert_to_power_info_rate_flags() - convert packet_power_info_params + * to FW understandable format + * @param: pointer to hold packet power info param + * + * @return FW understandable 32 bit rate flags + */ +static uint32_t +convert_to_power_info_rate_flags(struct packet_power_info_params *param) +{ + uint32_t rateflags = 0; + + if (param->chainmask) + rateflags |= + (param->chainmask & WMI_TLV_FLAG_CHAIN_MASK); + if (param->chan_width) + rateflags |= + ((param->chan_width & WMI_TLV_FLAG_BW_MASK) + << WMI_TLV_FLAG_BW_SHIFT); + if (param->su_mu_ofdma) + rateflags |= + ((param->su_mu_ofdma & WMI_TLV_FLAG_SU_MU_OFDMA_MASK) + << WMI_TLV_FLAG_SU_MU_OFDMA_SHIFT); + if (param->rate_flags & WMI_HOST_FLAG_STBC) + rateflags |= WMI_TLV_FLAG_STBC; + if (param->rate_flags & WMI_HOST_FLAG_LDPC) + rateflags |= WMI_TLV_FLAG_LDPC; + if (param->rate_flags & WMI_HOST_FLAG_TXBF) + rateflags |= WMI_TLV_FLAG_TXBF; + if (param->rate_flags & WMI_HOST_FLAG_RTSENA) + rateflags |= WMI_TLV_FLAG_RTSENA; + if (param->rate_flags & WMI_HOST_FLAG_CTSENA) + rateflags |= WMI_TLV_FLAG_CTSENA; + if (param->rate_flags & WMI_HOST_FLAG_SGI) + rateflags |= WMI_TLV_FLAG_SGI; + + return rateflags; +} + +/** + * send_packet_power_info_get_cmd_tlv() - send request to get packet power + * info to fw + * @wmi_handle: wmi handle + * @param: pointer to hold packet power info param + * + * @return QDF_STATUS_SUCCESS on success and -ve on failure. + */ +static QDF_STATUS +send_packet_power_info_get_cmd_tlv(wmi_unified_t wmi_handle, + struct packet_power_info_params *param) +{ + wmi_pdev_get_tpc_cmd_fixed_param *cmd; + wmi_buf_t wmibuf; + uint8_t *buf_ptr; + u_int32_t len = sizeof(wmi_pdev_get_tpc_cmd_fixed_param); + + wmibuf = wmi_buf_alloc(wmi_handle, len); + if (wmibuf == NULL) + return QDF_STATUS_E_NOMEM; + + buf_ptr = (uint8_t *)wmi_buf_data(wmibuf); + + cmd = (wmi_pdev_get_tpc_cmd_fixed_param *)buf_ptr; + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_pdev_get_tpc_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_pdev_get_tpc_cmd_fixed_param)); + cmd->pdev_id = wmi_handle->ops->convert_pdev_id_host_to_target( + param->pdev_id); + cmd->rate_flags = convert_to_power_info_rate_flags(param); + cmd->nss = param->nss; + cmd->preamble = param->preamble; + cmd->hw_rate = param->hw_rate; + + WMI_LOGI("%s[%d] commandID %d, wmi_pdev_get_tpc_cmd=0x%x," + "rate_flags: 0x%x, nss: %d, preamble: %d, hw_rate: %d\n", + __func__, __LINE__, WMI_PDEV_GET_TPC_CMDID, *((u_int32_t *)cmd), + cmd->rate_flags, cmd->nss, cmd->preamble, cmd->hw_rate); + + wmi_mtrace(WMI_PDEV_GET_TPC_CMDID, NO_SESSION, 0); + if (wmi_unified_cmd_send(wmi_handle, wmibuf, len, + WMI_PDEV_GET_TPC_CMDID)) { + WMI_LOGE(FL("Failed to get tpc command\n")); + wmi_buf_free(wmibuf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * send_vdev_config_ratemask_cmd_tlv() - config ratemask param in fw + * @wmi_handle: wmi handle + * @param: pointer to hold config ratemask params + * + * @return QDF_STATUS_SUCCESS on success and -ve on failure. + */ +static QDF_STATUS send_vdev_config_ratemask_cmd_tlv(wmi_unified_t wmi_handle, + struct config_ratemask_params *param) +{ + wmi_vdev_config_ratemask_cmd_fixed_param *cmd; + wmi_buf_t buf; + int32_t len = sizeof(*cmd); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s:wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_FAILURE; + } + cmd = (wmi_vdev_config_ratemask_cmd_fixed_param *)wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_vdev_config_ratemask_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_vdev_config_ratemask_cmd_fixed_param)); + cmd->vdev_id = param->vdev_id; + cmd->type = param->type; + cmd->mask_lower32 = param->lower32; + cmd->mask_higher32 = param->higher32; + WMI_LOGI("Setting vdev ratemask vdev id = 0x%X, type = 0x%X, mask_l32 = 0x%X mask_h32 = 0x%X\n", + param->vdev_id, param->type, param->lower32, param->higher32); + + wmi_mtrace(WMI_VDEV_RATEMASK_CMDID, cmd->vdev_id, 0); + if (wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_VDEV_RATEMASK_CMDID)) { + WMI_LOGE("Seting vdev ratemask failed\n"); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * copy_custom_aggr_bitmap() - copies host side bitmap using FW APIs + * @param: param sent from the host side + * @cmd: param to be sent to the fw side + */ +static inline void copy_custom_aggr_bitmap( + struct set_custom_aggr_size_params *param, + wmi_vdev_set_custom_aggr_size_cmd_fixed_param *cmd) +{ + WMI_VDEV_CUSTOM_AGGR_AC_SET(cmd->enable_bitmap, + param->ac); + WMI_VDEV_CUSTOM_AGGR_TYPE_SET(cmd->enable_bitmap, + param->aggr_type); + WMI_VDEV_CUSTOM_TX_AGGR_SZ_DIS_SET(cmd->enable_bitmap, + param->tx_aggr_size_disable); + WMI_VDEV_CUSTOM_RX_AGGR_SZ_DIS_SET(cmd->enable_bitmap, + param->rx_aggr_size_disable); + WMI_VDEV_CUSTOM_TX_AC_EN_SET(cmd->enable_bitmap, + param->tx_ac_enable); +} + +/** + * send_vdev_set_custom_aggr_size_cmd_tlv() - custom aggr size param in fw + * @wmi_handle: wmi handle + * @param: pointer to hold custom aggr size params + * + * @return QDF_STATUS_SUCCESS on success and -ve on failure. + */ +static QDF_STATUS send_vdev_set_custom_aggr_size_cmd_tlv( + wmi_unified_t wmi_handle, + struct set_custom_aggr_size_params *param) +{ + wmi_vdev_set_custom_aggr_size_cmd_fixed_param *cmd; + wmi_buf_t buf; + int32_t len = sizeof(*cmd); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s:wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_FAILURE; + } + cmd = (wmi_vdev_set_custom_aggr_size_cmd_fixed_param *) + wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_vdev_set_custom_aggr_size_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_vdev_set_custom_aggr_size_cmd_fixed_param)); + cmd->vdev_id = param->vdev_id; + cmd->tx_aggr_size = param->tx_aggr_size; + cmd->rx_aggr_size = param->rx_aggr_size; + copy_custom_aggr_bitmap(param, cmd); + + WMI_LOGD("Set custom aggr: vdev id=0x%X, tx aggr size=0x%X " + "rx_aggr_size=0x%X access category=0x%X, agg_type=0x%X " + "tx_aggr_size_disable=0x%X, rx_aggr_size_disable=0x%X " + "tx_ac_enable=0x%X\n", + param->vdev_id, param->tx_aggr_size, param->rx_aggr_size, + param->ac, param->aggr_type, param->tx_aggr_size_disable, + param->rx_aggr_size_disable, param->tx_ac_enable); + + wmi_mtrace(WMI_VDEV_SET_CUSTOM_AGGR_SIZE_CMDID, cmd->vdev_id, 0); + if (wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_VDEV_SET_CUSTOM_AGGR_SIZE_CMDID)) { + WMI_LOGE("Seting custom aggregation size failed\n"); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * send_vdev_set_qdepth_thresh_cmd_tlv() - WMI set qdepth threshold + * @param wmi_handle : handle to WMI. + * @param param : pointer to tx antenna param + * + * @return QDF_STATUS_SUCCESS on success and -ve on failure. + */ + +static QDF_STATUS send_vdev_set_qdepth_thresh_cmd_tlv(wmi_unified_t wmi_handle, + struct set_qdepth_thresh_params *param) +{ + wmi_peer_tid_msduq_qdepth_thresh_update_cmd_fixed_param *cmd; + wmi_msduq_qdepth_thresh_update *cmd_update; + wmi_buf_t buf; + int32_t len = 0; + int i; + uint8_t *buf_ptr; + QDF_STATUS ret; + + if (param->num_of_msduq_updates > QDEPTH_THRESH_MAX_UPDATES) { + WMI_LOGE("%s: Invalid Update Count!\n", __func__); + return QDF_STATUS_E_INVAL; + } + + len = sizeof(*cmd) + WMI_TLV_HDR_SIZE; + len += (sizeof(wmi_msduq_qdepth_thresh_update) * + param->num_of_msduq_updates); + buf = wmi_buf_alloc(wmi_handle, len); + + if (!buf) { + WMI_LOGE("%s:wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_NOMEM; + } + + buf_ptr = (uint8_t *)wmi_buf_data(buf); + cmd = (wmi_peer_tid_msduq_qdepth_thresh_update_cmd_fixed_param *) + buf_ptr; + + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_peer_tid_msduq_qdepth_thresh_update_cmd_fixed_param + , WMITLV_GET_STRUCT_TLVLEN( + wmi_peer_tid_msduq_qdepth_thresh_update_cmd_fixed_param)); + + cmd->pdev_id = + wmi_handle->ops->convert_pdev_id_host_to_target(param->pdev_id); + cmd->vdev_id = param->vdev_id; + WMI_CHAR_ARRAY_TO_MAC_ADDR(param->mac_addr, &cmd->peer_mac_address); + cmd->num_of_msduq_updates = param->num_of_msduq_updates; + + buf_ptr += sizeof( + wmi_peer_tid_msduq_qdepth_thresh_update_cmd_fixed_param); + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, + param->num_of_msduq_updates * + sizeof(wmi_msduq_qdepth_thresh_update)); + buf_ptr += WMI_TLV_HDR_SIZE; + cmd_update = (wmi_msduq_qdepth_thresh_update *)buf_ptr; + + for (i = 0; i < cmd->num_of_msduq_updates; i++) { + WMITLV_SET_HDR(&cmd_update->tlv_header, + WMITLV_TAG_STRUC_wmi_msduq_qdepth_thresh_update, + WMITLV_GET_STRUCT_TLVLEN( + wmi_msduq_qdepth_thresh_update)); + cmd_update->tid_num = param->update_params[i].tid_num; + cmd_update->msduq_update_mask = + param->update_params[i].msduq_update_mask; + cmd_update->qdepth_thresh_value = + param->update_params[i].qdepth_thresh_value; + WMI_LOGD("Set QDepth Threshold: vdev=0x%X pdev=0x%X, tid=0x%X " + "mac_addr_upper4=%X, mac_addr_lower2:%X," + " update mask=0x%X thresh val=0x%X\n", + cmd->vdev_id, cmd->pdev_id, cmd_update->tid_num, + cmd->peer_mac_address.mac_addr31to0, + cmd->peer_mac_address.mac_addr47to32, + cmd_update->msduq_update_mask, + cmd_update->qdepth_thresh_value); + cmd_update++; + } + + wmi_mtrace(WMI_PEER_TID_MSDUQ_QDEPTH_THRESH_UPDATE_CMDID, + cmd->vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_PEER_TID_MSDUQ_QDEPTH_THRESH_UPDATE_CMDID); + + if (ret != 0) { + WMI_LOGE(" %s :WMI Failed\n", __func__); + wmi_buf_free(buf); + } + + return ret; +} + +/** + * send_set_vap_dscp_tid_map_cmd_tlv() - send vap dscp tid map cmd to fw + * @wmi_handle: wmi handle + * @param: pointer to hold vap dscp tid map param + * + * @return QDF_STATUS_SUCCESS on success and -ve on failure. + */ +static QDF_STATUS +send_set_vap_dscp_tid_map_cmd_tlv(wmi_unified_t wmi_handle, + struct vap_dscp_tid_map_params *param) +{ + wmi_buf_t buf; + wmi_vdev_set_dscp_tid_map_cmd_fixed_param *cmd; + int32_t len = sizeof(*cmd); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s:wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_FAILURE; + } + + cmd = (wmi_vdev_set_dscp_tid_map_cmd_fixed_param *)wmi_buf_data(buf); + qdf_mem_copy(cmd->dscp_to_tid_map, param->dscp_to_tid_map, + sizeof(uint32_t) * WMI_DSCP_MAP_MAX); + + cmd->vdev_id = param->vdev_id; + cmd->enable_override = 0; + + WMI_LOGI("Setting dscp for vap id: %d\n", cmd->vdev_id); + wmi_mtrace(WMI_VDEV_SET_DSCP_TID_MAP_CMDID, cmd->vdev_id, 0); + if (wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_VDEV_SET_DSCP_TID_MAP_CMDID)) { + WMI_LOGE("Failed to set dscp cmd\n"); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * send_vdev_set_neighbour_rx_cmd_tlv() - set neighbour rx param in fw + * @wmi_handle: wmi handle + * @macaddr: vdev mac address + * @param: pointer to hold neigbour rx param + * + * @return QDF_STATUS_SUCCESS on success and -ve on failure. + */ +static QDF_STATUS send_vdev_set_neighbour_rx_cmd_tlv(wmi_unified_t wmi_handle, + uint8_t macaddr[IEEE80211_ADDR_LEN], + struct set_neighbour_rx_params *param) +{ + wmi_vdev_filter_nrp_config_cmd_fixed_param *cmd; + wmi_buf_t buf; + int32_t len = sizeof(*cmd); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s:wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_FAILURE; + } + cmd = (wmi_vdev_filter_nrp_config_cmd_fixed_param *)wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_vdev_filter_nrp_config_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_vdev_filter_nrp_config_cmd_fixed_param)); + cmd->vdev_id = param->vdev_id; + cmd->bssid_idx = param->idx; + cmd->action = param->action; + cmd->type = param->type; + WMI_CHAR_ARRAY_TO_MAC_ADDR(macaddr, &cmd->addr); + cmd->flag = 0; + + wmi_mtrace(WMI_VDEV_FILTER_NEIGHBOR_RX_PACKETS_CMDID, cmd->vdev_id, 0); + if (wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_VDEV_FILTER_NEIGHBOR_RX_PACKETS_CMDID)) { + WMI_LOGE("Failed to set neighbour rx param\n"); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * send_smart_ant_set_tx_ant_cmd_tlv() - WMI set tx antenna function + * @param wmi_handle : handle to WMI. + * @param macaddr : vdev mac address + * @param param : pointer to tx antenna param + * + * @return QDF_STATUS_SUCCESS on success and -ve on failure. + */ +static QDF_STATUS send_smart_ant_set_tx_ant_cmd_tlv(wmi_unified_t wmi_handle, + uint8_t macaddr[IEEE80211_ADDR_LEN], + struct smart_ant_tx_ant_params *param) +{ + wmi_peer_smart_ant_set_tx_antenna_cmd_fixed_param *cmd; + wmi_peer_smart_ant_set_tx_antenna_series *ant_tx_series; + wmi_buf_t buf; + int32_t len = 0; + int i; + uint8_t *buf_ptr; + QDF_STATUS ret; + + len = sizeof(*cmd) + WMI_TLV_HDR_SIZE; + len += (WMI_SMART_ANT_MAX_RATE_SERIES) * + sizeof(wmi_peer_smart_ant_set_tx_antenna_series); + buf = wmi_buf_alloc(wmi_handle, len); + + if (!buf) { + WMI_LOGE("%s:wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_NOMEM; + } + + buf_ptr = (uint8_t *)wmi_buf_data(buf); + qdf_mem_zero(buf_ptr, len); + cmd = (wmi_peer_smart_ant_set_tx_antenna_cmd_fixed_param *)buf_ptr; + + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_peer_smart_ant_set_tx_antenna_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_peer_smart_ant_set_tx_antenna_cmd_fixed_param)); + + cmd->vdev_id = param->vdev_id; + WMI_CHAR_ARRAY_TO_MAC_ADDR(macaddr, &cmd->peer_macaddr); + + buf_ptr += sizeof(wmi_peer_smart_ant_set_tx_antenna_cmd_fixed_param); + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, + sizeof(wmi_peer_smart_ant_set_tx_antenna_series)); + buf_ptr += WMI_TLV_HDR_SIZE; + ant_tx_series = (wmi_peer_smart_ant_set_tx_antenna_series *)buf_ptr; + + for (i = 0; i < WMI_SMART_ANT_MAX_RATE_SERIES; i++) { + WMITLV_SET_HDR(&ant_tx_series->tlv_header, + WMITLV_TAG_STRUC_wmi_peer_smart_ant_set_tx_antenna_series, + WMITLV_GET_STRUCT_TLVLEN( + wmi_peer_smart_ant_set_tx_antenna_series)); + ant_tx_series->antenna_series = param->antenna_array[i]; + ant_tx_series++; + } + + wmi_mtrace(WMI_PEER_SMART_ANT_SET_TX_ANTENNA_CMDID, cmd->vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, + buf, + len, + WMI_PEER_SMART_ANT_SET_TX_ANTENNA_CMDID); + + if (ret != 0) { + WMI_LOGE(" %s :WMI Failed\n", __func__); + wmi_buf_free(buf); + } + + return ret; +} + +/** + * send_set_ant_switch_tbl_cmd_tlv() - send ant switch tbl cmd to fw + * @wmi_handle: wmi handle + * @param: pointer to hold ant switch tbl param + * + * @return QDF_STATUS_SUCCESS on success and -ve on failure. + */ +static QDF_STATUS +send_set_ant_switch_tbl_cmd_tlv(wmi_unified_t wmi_handle, + struct ant_switch_tbl_params *param) +{ + uint8_t len; + wmi_buf_t buf; + wmi_pdev_set_ant_switch_tbl_cmd_fixed_param *cmd; + wmi_pdev_set_ant_ctrl_chain *ctrl_chain; + uint8_t *buf_ptr; + + len = sizeof(*cmd) + WMI_TLV_HDR_SIZE; + len += sizeof(wmi_pdev_set_ant_ctrl_chain); + buf = wmi_buf_alloc(wmi_handle, len); + + if (!buf) { + WMI_LOGE("%s:wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_NOMEM; + } + + buf_ptr = (uint8_t *)wmi_buf_data(buf); + qdf_mem_zero(buf_ptr, len); + cmd = (wmi_pdev_set_ant_switch_tbl_cmd_fixed_param *)buf_ptr; + + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_pdev_set_ant_switch_tbl_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_pdev_set_ant_switch_tbl_cmd_fixed_param)); + + cmd->antCtrlCommon1 = param->ant_ctrl_common1; + cmd->antCtrlCommon2 = param->ant_ctrl_common2; + cmd->mac_id = + wmi_handle->ops->convert_pdev_id_host_to_target(param->pdev_id); + + /* TLV indicating array of structures to follow */ + buf_ptr += sizeof(wmi_pdev_set_ant_switch_tbl_cmd_fixed_param); + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, + sizeof(wmi_pdev_set_ant_ctrl_chain)); + buf_ptr += WMI_TLV_HDR_SIZE; + ctrl_chain = (wmi_pdev_set_ant_ctrl_chain *)buf_ptr; + + ctrl_chain->pdev_id = + wmi_handle->ops->convert_pdev_id_host_to_target(param->pdev_id); + ctrl_chain->antCtrlChain = param->antCtrlChain; + + wmi_mtrace(WMI_PDEV_SET_ANTENNA_SWITCH_TABLE_CMDID, NO_SESSION, 0); + if (wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_PDEV_SET_ANTENNA_SWITCH_TABLE_CMDID)) { + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * send_smart_ant_set_training_info_cmd_tlv() - WMI set smart antenna + * training information function + * @param wmi_handle : handle to WMI. + * @macaddr : vdev mac address + * @param param : pointer to tx antenna param + * @return QDF_STATUS_SUCCESS on success and -ve on failure. + */ +static QDF_STATUS send_smart_ant_set_training_info_cmd_tlv( + wmi_unified_t wmi_handle, + uint8_t macaddr[IEEE80211_ADDR_LEN], + struct smart_ant_training_info_params *param) +{ + wmi_peer_smart_ant_set_train_antenna_cmd_fixed_param *cmd; + wmi_peer_smart_ant_set_train_antenna_param *train_param; + wmi_buf_t buf; + uint8_t *buf_ptr; + int32_t len = 0; + QDF_STATUS ret; + int loop; + + len = sizeof(*cmd) + WMI_TLV_HDR_SIZE; + len += (WMI_SMART_ANT_MAX_RATE_SERIES) * + sizeof(wmi_peer_smart_ant_set_train_antenna_param); + buf = wmi_buf_alloc(wmi_handle, len); + + if (!buf) { + WMI_LOGE("%s:wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_NOMEM; + } + + buf_ptr = (uint8_t *)wmi_buf_data(buf); + qdf_mem_zero(buf_ptr, len); + cmd = (wmi_peer_smart_ant_set_train_antenna_cmd_fixed_param *)buf_ptr; + + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_peer_smart_ant_set_train_antenna_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_peer_smart_ant_set_train_antenna_cmd_fixed_param)); + + cmd->vdev_id = param->vdev_id; + WMI_CHAR_ARRAY_TO_MAC_ADDR(macaddr, &cmd->peer_macaddr); + cmd->num_pkts = param->numpkts; + + buf_ptr += sizeof(wmi_peer_smart_ant_set_train_antenna_cmd_fixed_param); + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, + sizeof(wmi_peer_smart_ant_set_train_antenna_param) * + WMI_SMART_ANT_MAX_RATE_SERIES); + + buf_ptr += WMI_TLV_HDR_SIZE; + train_param = (wmi_peer_smart_ant_set_train_antenna_param *)buf_ptr; + + for (loop = 0; loop < WMI_SMART_ANT_MAX_RATE_SERIES; loop++) { + WMITLV_SET_HDR(&train_param->tlv_header, + WMITLV_TAG_STRUC_wmi_peer_smart_ant_set_train_antenna_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_peer_smart_ant_set_train_antenna_param)); + train_param->train_rate_series = param->rate_array[loop]; + train_param->train_antenna_series = param->antenna_array[loop]; + train_param->rc_flags = 0; + WMI_LOGI(FL("Series number:%d\n"), loop); + WMI_LOGI(FL("Rate [0x%02x] Tx_Antenna [0x%08x]\n"), + train_param->train_rate_series, + train_param->train_antenna_series); + train_param++; + } + + wmi_mtrace(WMI_PEER_SMART_ANT_SET_TRAIN_INFO_CMDID, cmd->vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, + buf, + len, + WMI_PEER_SMART_ANT_SET_TRAIN_INFO_CMDID); + + if (ret != 0) { + WMI_LOGE(" %s :WMI Failed\n", __func__); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return ret; +} + +/** + * send_smart_ant_set_node_config_cmd_tlv() - WMI set node + * configuration function + * @param wmi_handle : handle to WMI. + * @macaddr : vdev mad address + * @param param : pointer to tx antenna param + * + * @return QDF_STATUS_SUCCESS on success and -ve on failure. + */ +static QDF_STATUS send_smart_ant_set_node_config_cmd_tlv( + wmi_unified_t wmi_handle, + uint8_t macaddr[IEEE80211_ADDR_LEN], + struct smart_ant_node_config_params *param) +{ + wmi_peer_smart_ant_set_node_config_ops_cmd_fixed_param *cmd; + wmi_buf_t buf; + uint8_t *buf_ptr; + int32_t len = 0, args_tlv_len; + int ret; + int i = 0; + uint32_t *node_config_args; + + args_tlv_len = WMI_TLV_HDR_SIZE + param->args_count * sizeof(uint32_t); + len = sizeof(*cmd) + args_tlv_len; + + if (param->args_count == 0) { + WMI_LOGE("%s: Can't send a command with %d arguments\n", + __func__, param->args_count); + return QDF_STATUS_E_FAILURE; + } + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s:wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_NOMEM; + } + + cmd = (wmi_peer_smart_ant_set_node_config_ops_cmd_fixed_param *) + wmi_buf_data(buf); + buf_ptr = (uint8_t *)cmd; + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_peer_smart_ant_set_node_config_ops_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_peer_smart_ant_set_node_config_ops_cmd_fixed_param)); + cmd->vdev_id = param->vdev_id; + WMI_CHAR_ARRAY_TO_MAC_ADDR(macaddr, &cmd->peer_macaddr); + cmd->cmd_id = param->cmd_id; + cmd->args_count = param->args_count; + buf_ptr += sizeof( + wmi_peer_smart_ant_set_node_config_ops_cmd_fixed_param); + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_UINT32, + (cmd->args_count * sizeof(uint32_t))); + buf_ptr += WMI_TLV_HDR_SIZE; + node_config_args = (uint32_t *)buf_ptr; + + for (i = 0; i < param->args_count; i++) { + node_config_args[i] = param->args_arr[i]; + WMI_LOGI("%d", param->args_arr[i]); + } + + wmi_mtrace(WMI_PEER_SMART_ANT_SET_NODE_CONFIG_OPS_CMDID, + cmd->vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, + buf, + len, + WMI_PEER_SMART_ANT_SET_NODE_CONFIG_OPS_CMDID); + + if (ret != 0) { + WMI_LOGE("%s: WMI FAILED:Sent cmd_id: 0x%x\n Node: %02x:%02x:%02x:%02x:%02x:%02x cmdstatus=%d\n", + __func__, param->cmd_id, macaddr[0], + macaddr[1], macaddr[2], macaddr[3], + macaddr[4], macaddr[5], ret); + wmi_buf_free(buf); + } + + return ret; +} + +/** + * send_set_atf_cmd_tlv() - send set atf command to fw + * @wmi_handle: wmi handle + * @param: pointer to set atf param + * + * @return QDF_STATUS_SUCCESS on success and -ve on failure. + */ +static QDF_STATUS +send_set_atf_cmd_tlv(wmi_unified_t wmi_handle, + struct set_atf_params *param) +{ + wmi_atf_peer_info *peer_info; + wmi_peer_atf_request_fixed_param *cmd; + wmi_buf_t buf; + uint8_t *buf_ptr; + int i; + int32_t len = 0; + QDF_STATUS retval; + + len = sizeof(*cmd) + WMI_TLV_HDR_SIZE; + len += param->num_peers * sizeof(wmi_atf_peer_info); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s:wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_FAILURE; + } + buf_ptr = (uint8_t *)wmi_buf_data(buf); + cmd = (wmi_peer_atf_request_fixed_param *)buf_ptr; + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_peer_atf_request_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_peer_atf_request_fixed_param)); + cmd->num_peers = param->num_peers; + + buf_ptr += sizeof(*cmd); + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, + sizeof(wmi_atf_peer_info) * + cmd->num_peers); + buf_ptr += WMI_TLV_HDR_SIZE; + peer_info = (wmi_atf_peer_info *)buf_ptr; + + for (i = 0; i < cmd->num_peers; i++) { + WMITLV_SET_HDR(&peer_info->tlv_header, + WMITLV_TAG_STRUC_wmi_atf_peer_info, + WMITLV_GET_STRUCT_TLVLEN( + wmi_atf_peer_info)); + qdf_mem_copy(&(peer_info->peer_macaddr), + &(param->peer_info[i].peer_macaddr), + sizeof(wmi_mac_addr)); + peer_info->atf_units = param->peer_info[i].percentage_peer; + peer_info->vdev_id = param->peer_info[i].vdev_id; + peer_info->pdev_id = + wmi_handle->ops->convert_pdev_id_host_to_target( + param->peer_info[i].pdev_id); + /* + * TLV definition for peer atf request fixed param combines + * extension stats. Legacy FW for WIN (Non-TLV) has peer atf + * stats and atf extension stats as two different + * implementations. + * Need to discuss with FW on this. + * + * peer_info->atf_groupid = param->peer_ext_info[i].group_index; + * peer_info->atf_units_reserved = + * param->peer_ext_info[i].atf_index_reserved; + */ + peer_info++; + } + + wmi_mtrace(WMI_PEER_ATF_REQUEST_CMDID, NO_SESSION, 0); + retval = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_PEER_ATF_REQUEST_CMDID); + + if (retval != QDF_STATUS_SUCCESS) { + WMI_LOGE("%s : WMI Failed\n", __func__); + wmi_buf_free(buf); + } + + return retval; +} + +/** + * send_vdev_set_fwtest_param_cmd_tlv() - send fwtest param in fw + * @wmi_handle: wmi handle + * @param: pointer to hold fwtest param + * + * @return QDF_STATUS_SUCCESS on success and -ve on failure. + */ +static QDF_STATUS send_vdev_set_fwtest_param_cmd_tlv(wmi_unified_t wmi_handle, + struct set_fwtest_params *param) +{ + wmi_fwtest_set_param_cmd_fixed_param *cmd; + wmi_buf_t buf; + int32_t len = sizeof(*cmd); + + buf = wmi_buf_alloc(wmi_handle, len); + + if (!buf) { + WMI_LOGE("%s:wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_FAILURE; + } + + cmd = (wmi_fwtest_set_param_cmd_fixed_param *)wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_fwtest_set_param_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_fwtest_set_param_cmd_fixed_param)); + cmd->param_id = param->arg; + cmd->param_value = param->value; + + wmi_mtrace(WMI_FWTEST_CMDID, NO_SESSION, 0); + if (wmi_unified_cmd_send(wmi_handle, buf, len, WMI_FWTEST_CMDID)) { + WMI_LOGE("Setting FW test param failed\n"); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * send_set_qboost_param_cmd_tlv() - send set qboost command to fw + * @wmi_handle: wmi handle + * @param: pointer to qboost params + * @macaddr: vdev mac address + * + * @return QDF_STATUS_SUCCESS on success and -ve on failure. + */ +static QDF_STATUS +send_set_qboost_param_cmd_tlv(wmi_unified_t wmi_handle, + uint8_t macaddr[IEEE80211_ADDR_LEN], + struct set_qboost_params *param) +{ + WMI_QBOOST_CFG_CMD_fixed_param *cmd; + wmi_buf_t buf; + int32_t len; + QDF_STATUS ret; + + len = sizeof(*cmd); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s: wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_FAILURE; + } + + cmd = (WMI_QBOOST_CFG_CMD_fixed_param *)wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_WMI_QBOOST_CFG_CMD_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + WMI_QBOOST_CFG_CMD_fixed_param)); + cmd->vdev_id = param->vdev_id; + WMI_CHAR_ARRAY_TO_MAC_ADDR(macaddr, &cmd->peer_macaddr); + cmd->qb_enable = param->value; + + wmi_mtrace(WMI_QBOOST_CFG_CMDID, cmd->vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, sizeof(*cmd), + WMI_QBOOST_CFG_CMDID); + + if (ret != 0) { + WMI_LOGE("Setting qboost cmd failed\n"); + wmi_buf_free(buf); + } + + return ret; +} + +/** + * send_gpio_config_cmd_tlv() - send gpio config to fw + * @wmi_handle: wmi handle + * @param: pointer to hold gpio config param + * + * Return: 0 for success or error code + */ +static QDF_STATUS +send_gpio_config_cmd_tlv(wmi_unified_t wmi_handle, + struct gpio_config_params *param) +{ + wmi_gpio_config_cmd_fixed_param *cmd; + wmi_buf_t buf; + int32_t len; + QDF_STATUS ret; + + len = sizeof(*cmd); + + /* Sanity Checks */ + if (param->pull_type > WMI_GPIO_PULL_DOWN || + param->intr_mode > WMI_GPIO_INTTYPE_LEVEL_HIGH) { + return QDF_STATUS_E_FAILURE; + } + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s: wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_FAILURE; + } + + cmd = (wmi_gpio_config_cmd_fixed_param *)wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_gpio_config_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_gpio_config_cmd_fixed_param)); + cmd->gpio_num = param->gpio_num; + cmd->input = param->input; + cmd->pull_type = param->pull_type; + cmd->intr_mode = param->intr_mode; + + wmi_mtrace(WMI_GPIO_CONFIG_CMDID, NO_SESSION, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, sizeof(*cmd), + WMI_GPIO_CONFIG_CMDID); + + if (ret != 0) { + WMI_LOGE("Sending GPIO config cmd failed\n"); + wmi_buf_free(buf); + } + + return ret; +} + +/** + * send_gpio_output_cmd_tlv() - send gpio output to fw + * @wmi_handle: wmi handle + * @param: pointer to hold gpio output param + * + * Return: 0 for success or error code + */ +static QDF_STATUS +send_gpio_output_cmd_tlv(wmi_unified_t wmi_handle, + struct gpio_output_params *param) +{ + wmi_gpio_output_cmd_fixed_param *cmd; + wmi_buf_t buf; + int32_t len; + QDF_STATUS ret; + + len = sizeof(*cmd); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s: wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_FAILURE; + } + + cmd = (wmi_gpio_output_cmd_fixed_param *)wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_gpio_output_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_gpio_output_cmd_fixed_param)); + cmd->gpio_num = param->gpio_num; + cmd->set = param->set; + + wmi_mtrace(WMI_GPIO_OUTPUT_CMDID, NO_SESSION, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, sizeof(*cmd), + WMI_GPIO_OUTPUT_CMDID); + + if (ret != 0) { + WMI_LOGE("Sending GPIO output cmd failed\n"); + wmi_buf_free(buf); + } + + return ret; + +} + +/** + * send_phyerr_disable_cmd_tlv() - WMI phyerr disable function + * + * @param wmi_handle : handle to WMI. + * @return QDF_STATUS_SUCCESS on success and -ve on failure. + */ +static QDF_STATUS send_phyerr_disable_cmd_tlv(wmi_unified_t wmi_handle) +{ + wmi_pdev_dfs_disable_cmd_fixed_param *cmd; + wmi_buf_t buf; + QDF_STATUS ret; + int32_t len; + + len = sizeof(*cmd); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s: wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_FAILURE; + } + + cmd = (wmi_pdev_dfs_disable_cmd_fixed_param *)wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_pdev_dfs_disable_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_pdev_dfs_disable_cmd_fixed_param)); + /* Filling it with WMI_PDEV_ID_SOC for now */ + cmd->pdev_id = wmi_handle->ops->convert_pdev_id_host_to_target( + WMI_HOST_PDEV_ID_SOC); + + wmi_mtrace(WMI_PDEV_DFS_DISABLE_CMDID, NO_SESSION, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, sizeof(*cmd), + WMI_PDEV_DFS_DISABLE_CMDID); + + if (ret != 0) { + WMI_LOGE("Sending PDEV DFS disable cmd failed\n"); + wmi_buf_free(buf); + } + + return ret; +} + +/** + * send_phyerr_enable_cmd_tlv() - WMI phyerr disable function + * + * @param wmi_handle : handle to WMI. + * @return QDF_STATUS_SUCCESS on success and -ve on failure. + */ +static QDF_STATUS send_phyerr_enable_cmd_tlv(wmi_unified_t wmi_handle) +{ + wmi_pdev_dfs_enable_cmd_fixed_param *cmd; + wmi_buf_t buf; + QDF_STATUS ret; + int32_t len; + + len = sizeof(*cmd); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s: wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_FAILURE; + } + + cmd = (wmi_pdev_dfs_enable_cmd_fixed_param *)wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_pdev_dfs_enable_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_pdev_dfs_enable_cmd_fixed_param)); + /* Reserved for future use */ + cmd->reserved0 = 0; + + wmi_mtrace(WMI_PDEV_DFS_ENABLE_CMDID, NO_SESSION, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, sizeof(*cmd), + WMI_PDEV_DFS_ENABLE_CMDID); + + if (ret != 0) { + WMI_LOGE("Sending PDEV DFS enable cmd failed\n"); + wmi_buf_free(buf); + } + + return ret; +} + +/** + * send_periodic_chan_stats_config_cmd_tlv() - send periodic chan stats cmd + * to fw + * @wmi_handle: wmi handle + * @param: pointer to hold periodic chan stats param + * + * Return: 0 for success or error code + */ +static QDF_STATUS +send_periodic_chan_stats_config_cmd_tlv(wmi_unified_t wmi_handle, + struct periodic_chan_stats_params *param) +{ + wmi_set_periodic_channel_stats_config_fixed_param *cmd; + wmi_buf_t buf; + QDF_STATUS ret; + int32_t len; + + len = sizeof(*cmd); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s: wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_FAILURE; + } + + cmd = (wmi_set_periodic_channel_stats_config_fixed_param *) + wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_set_periodic_channel_stats_config_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_set_periodic_channel_stats_config_fixed_param)); + cmd->enable = param->enable; + cmd->stats_period = param->stats_period; + cmd->pdev_id = wmi_handle->ops->convert_pdev_id_host_to_target( + param->pdev_id); + + wmi_mtrace(WMI_SET_PERIODIC_CHANNEL_STATS_CONFIG_CMDID, NO_SESSION, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, sizeof(*cmd), + WMI_SET_PERIODIC_CHANNEL_STATS_CONFIG_CMDID); + + if (ret != 0) { + WMI_LOGE("Sending periodic chan stats config failed"); + wmi_buf_free(buf); + } + + return ret; +} + +/** + * send_nf_dbr_dbm_info_get_cmd_tlv() - send request to get nf to fw + * @wmi_handle: wmi handle + * @mac_id: radio context + * + * Return: 0 for success or error code + */ +static QDF_STATUS +send_nf_dbr_dbm_info_get_cmd_tlv(wmi_unified_t wmi_handle, uint8_t mac_id) +{ + wmi_buf_t buf; + QDF_STATUS ret; + wmi_pdev_get_nfcal_power_fixed_param *cmd; + int32_t len = sizeof(*cmd); + + buf = wmi_buf_alloc(wmi_handle, len); + if (buf == NULL) + return QDF_STATUS_E_NOMEM; + + cmd = (wmi_pdev_get_nfcal_power_fixed_param *)wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_pdev_get_nfcal_power_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_pdev_get_nfcal_power_fixed_param)); + cmd->pdev_id = wmi_handle->ops->convert_pdev_id_host_to_target(mac_id); + + wmi_mtrace(WMI_PDEV_GET_NFCAL_POWER_CMDID, NO_SESSION, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_PDEV_GET_NFCAL_POWER_CMDID); + if (ret != 0) { + WMI_LOGE("Sending get nfcal power cmd failed\n"); + wmi_buf_free(buf); + } + + return ret; +} + +/** + * send_set_ht_ie_cmd_tlv() - send ht ie command to fw + * @wmi_handle: wmi handle + * @param: pointer to ht ie param + * + * Return: 0 for success or error code + */ +static QDF_STATUS +send_set_ht_ie_cmd_tlv(wmi_unified_t wmi_handle, + struct ht_ie_params *param) +{ + wmi_pdev_set_ht_ie_cmd_fixed_param *cmd; + wmi_buf_t buf; + QDF_STATUS ret; + int32_t len; + uint8_t *buf_ptr; + + len = sizeof(*cmd) + WMI_TLV_HDR_SIZE + + roundup(param->ie_len, sizeof(uint32_t)); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s: wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_FAILURE; + } + + buf_ptr = (uint8_t *)wmi_buf_data(buf); + cmd = (wmi_pdev_set_ht_ie_cmd_fixed_param *)buf_ptr; + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_pdev_set_ht_ie_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_pdev_set_ht_ie_cmd_fixed_param)); + cmd->reserved0 = 0; + cmd->ie_len = param->ie_len; + cmd->tx_streams = param->tx_streams; + cmd->rx_streams = param->rx_streams; + + buf_ptr += sizeof(*cmd); + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_BYTE, cmd->ie_len); + buf_ptr += WMI_TLV_HDR_SIZE; + if (param->ie_len) + WMI_HOST_IF_MSG_COPY_CHAR_ARRAY(buf_ptr, param->ie_data, + cmd->ie_len); + + wmi_mtrace(WMI_PDEV_SET_HT_CAP_IE_CMDID, NO_SESSION, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_PDEV_SET_HT_CAP_IE_CMDID); + + if (ret != 0) { + WMI_LOGE("Sending set ht ie cmd failed\n"); + wmi_buf_free(buf); + } + + return ret; +} + +/** + * send_set_vht_ie_cmd_tlv() - send vht ie command to fw + * @wmi_handle: wmi handle + * @param: pointer to vht ie param + * + * Return: 0 for success or error code + */ +static QDF_STATUS +send_set_vht_ie_cmd_tlv(wmi_unified_t wmi_handle, + struct vht_ie_params *param) +{ + wmi_pdev_set_vht_ie_cmd_fixed_param *cmd; + wmi_buf_t buf; + QDF_STATUS ret; + int32_t len; + uint8_t *buf_ptr; + + len = sizeof(*cmd) + WMI_TLV_HDR_SIZE + + roundup(param->ie_len, sizeof(uint32_t)); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s: wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_FAILURE; + } + + buf_ptr = (uint8_t *)wmi_buf_data(buf); + cmd = (wmi_pdev_set_vht_ie_cmd_fixed_param *)buf_ptr; + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_pdev_set_vht_ie_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_pdev_set_vht_ie_cmd_fixed_param)); + cmd->reserved0 = 0; + cmd->ie_len = param->ie_len; + cmd->tx_streams = param->tx_streams; + cmd->rx_streams = param->rx_streams; + + buf_ptr += sizeof(*cmd); + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_BYTE, cmd->ie_len); + buf_ptr += WMI_TLV_HDR_SIZE; + if (param->ie_len) + WMI_HOST_IF_MSG_COPY_CHAR_ARRAY(buf_ptr, param->ie_data, + cmd->ie_len); + + wmi_mtrace(WMI_PDEV_SET_VHT_CAP_IE_CMDID, NO_SESSION, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_PDEV_SET_VHT_CAP_IE_CMDID); + + if (ret != 0) { + WMI_LOGE("Sending set vht ie cmd failed\n"); + wmi_buf_free(buf); + } + + return ret; +} + +/** + * send_set_quiet_mode_cmd_tlv() - send set quiet mode command to fw + * @wmi_handle: wmi handle + * @param: pointer to quiet mode params + * + * Return: 0 for success or error code + */ +static QDF_STATUS +send_set_quiet_mode_cmd_tlv(wmi_unified_t wmi_handle, + struct set_quiet_mode_params *param) +{ + wmi_pdev_set_quiet_cmd_fixed_param *quiet_cmd; + wmi_buf_t buf; + QDF_STATUS ret; + int32_t len; + + len = sizeof(*quiet_cmd); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s: wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_FAILURE; + } + + quiet_cmd = (wmi_pdev_set_quiet_cmd_fixed_param *)wmi_buf_data(buf); + WMITLV_SET_HDR(&quiet_cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_pdev_set_quiet_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_pdev_set_quiet_cmd_fixed_param)); + quiet_cmd = (wmi_pdev_set_quiet_cmd_fixed_param *)wmi_buf_data(buf); + quiet_cmd->enabled = param->enabled; + quiet_cmd->period = (param->period)*(param->intval); + quiet_cmd->duration = param->duration; + quiet_cmd->next_start = param->offset; + quiet_cmd->pdev_id = wmi_handle->ops->convert_pdev_id_host_to_target( + WMI_HOST_PDEV_ID_SOC); + + wmi_mtrace(WMI_PDEV_SET_QUIET_MODE_CMDID, NO_SESSION, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_PDEV_SET_QUIET_MODE_CMDID); + + if (ret != 0) { + WMI_LOGE("Sending set quiet cmd failed\n"); + wmi_buf_free(buf); + } + + return ret; +} + +/** + * send_set_bwf_cmd_tlv() - send set bwf command to fw + * @wmi_handle: wmi handle + * @param: pointer to set bwf param + * + * Return: 0 for success or error code + */ +static QDF_STATUS +send_set_bwf_cmd_tlv(wmi_unified_t wmi_handle, + struct set_bwf_params *param) +{ + wmi_bwf_peer_info *peer_info; + wmi_peer_bwf_request_fixed_param *cmd; + wmi_buf_t buf; + QDF_STATUS retval; + int32_t len; + uint8_t *buf_ptr; + int i; + + len = sizeof(*cmd) + WMI_TLV_HDR_SIZE; + len += param->num_peers * sizeof(wmi_bwf_peer_info); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s:wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_FAILURE; + } + buf_ptr = (uint8_t *)wmi_buf_data(buf); + cmd = (wmi_peer_bwf_request_fixed_param *)buf_ptr; + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_peer_bwf_request_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_peer_bwf_request_fixed_param)); + cmd->num_peers = param->num_peers; + + buf_ptr += sizeof(*cmd); + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, + sizeof(wmi_bwf_peer_info) * + cmd->num_peers); + buf_ptr += WMI_TLV_HDR_SIZE; + peer_info = (wmi_bwf_peer_info *)buf_ptr; + + for (i = 0; i < cmd->num_peers; i++) { + WMITLV_SET_HDR(&peer_info->tlv_header, + WMITLV_TAG_STRUC_wmi_bwf_peer_info, + WMITLV_GET_STRUCT_TLVLEN(wmi_bwf_peer_info)); + peer_info->bwf_guaranteed_bandwidth = + param->peer_info[i].throughput; + peer_info->bwf_max_airtime = + param->peer_info[i].max_airtime; + peer_info->bwf_peer_priority = + param->peer_info[i].priority; + qdf_mem_copy(&peer_info->peer_macaddr, + ¶m->peer_info[i].peer_macaddr, + sizeof(param->peer_info[i].peer_macaddr)); + peer_info->vdev_id = + param->peer_info[i].vdev_id; + peer_info->pdev_id = + wmi_handle->ops->convert_pdev_id_host_to_target( + param->peer_info[i].pdev_id); + peer_info++; + } + + wmi_mtrace(WMI_PEER_BWF_REQUEST_CMDID, NO_SESSION, 0); + retval = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_PEER_BWF_REQUEST_CMDID); + + if (retval != QDF_STATUS_SUCCESS) { + WMI_LOGE("%s : WMI Failed\n", __func__); + wmi_buf_free(buf); + } + + return retval; +} + +/** + * send_mcast_group_update_cmd_tlv() - send mcast group update cmd to fw + * @wmi_handle: wmi handle + * @param: pointer to hold mcast update param + * + * Return: 0 for success or error code + */ +static QDF_STATUS +send_mcast_group_update_cmd_tlv(wmi_unified_t wmi_handle, + struct mcast_group_update_params *param) +{ + wmi_peer_mcast_group_cmd_fixed_param *cmd; + wmi_buf_t buf; + QDF_STATUS ret; + int32_t len; + int offset = 0; + static char dummymask[4] = { 0xFF, 0xFF, 0xFF, 0xFF}; + + len = sizeof(*cmd); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s: wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_FAILURE; + } + cmd = (wmi_peer_mcast_group_cmd_fixed_param *)wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_peer_mcast_group_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_peer_mcast_group_cmd_fixed_param)); + /* confirm the buffer is 4-byte aligned */ + QDF_ASSERT((((size_t) cmd) & 0x3) == 0); + qdf_mem_zero(cmd, sizeof(*cmd)); + + cmd->vdev_id = param->vap_id; + /* construct the message assuming our endianness matches the target */ + cmd->flags |= WMI_PEER_MCAST_GROUP_FLAG_ACTION_M & + (param->action << WMI_PEER_MCAST_GROUP_FLAG_ACTION_S); + cmd->flags |= WMI_PEER_MCAST_GROUP_FLAG_WILDCARD_M & + (param->wildcard << WMI_PEER_MCAST_GROUP_FLAG_WILDCARD_S); + if (param->is_action_delete) + cmd->flags |= WMI_PEER_MCAST_GROUP_FLAG_DELETEALL_M; + + if (param->is_mcast_addr_len) + cmd->flags |= WMI_PEER_MCAST_GROUP_FLAG_IPV6_M; + + if (param->is_filter_mode_snoop) + cmd->flags |= WMI_PEER_MCAST_GROUP_FLAG_SRC_FILTER_EXCLUDE_M; + + /* unicast address spec only applies for non-wildcard cases */ + if (!param->wildcard && param->ucast_mac_addr) { + WMI_CHAR_ARRAY_TO_MAC_ADDR(param->ucast_mac_addr, + &cmd->ucast_mac_addr); + } + + if (param->mcast_ip_addr) { + QDF_ASSERT(param->mcast_ip_addr_bytes <= + sizeof(cmd->mcast_ip_addr)); + offset = sizeof(cmd->mcast_ip_addr) - + param->mcast_ip_addr_bytes; + qdf_mem_copy(((uint8_t *)&cmd->mcast_ip_addr) + offset, + param->mcast_ip_addr, + param->mcast_ip_addr_bytes); + } + if (!param->mask) + param->mask = &dummymask[0]; + + qdf_mem_copy(((uint8_t *)&cmd->mcast_ip_mask) + offset, + param->mask, + param->mcast_ip_addr_bytes); + + if (param->srcs && param->nsrcs) { + cmd->num_filter_addr = param->nsrcs; + QDF_ASSERT((param->nsrcs * param->mcast_ip_addr_bytes) <= + sizeof(cmd->filter_addr)); + + qdf_mem_copy(((uint8_t *) &cmd->filter_addr), param->srcs, + param->nsrcs * param->mcast_ip_addr_bytes); + } + + wmi_mtrace(WMI_PEER_MCAST_GROUP_CMDID, cmd->vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_PEER_MCAST_GROUP_CMDID); + + if (ret != QDF_STATUS_SUCCESS) { + WMI_LOGE("%s : WMI Failed\n", __func__); + wmi_buf_free(buf); + } + + return ret; +} + +/** + * send_vdev_spectral_configure_cmd_tlv() - send VDEV spectral configure + * command to fw + * @wmi_handle: wmi handle + * @param: pointer to hold spectral config parameter + * + * Return: 0 for success or error code + */ +static QDF_STATUS send_vdev_spectral_configure_cmd_tlv(wmi_unified_t wmi_handle, + struct vdev_spectral_configure_params *param) +{ + wmi_vdev_spectral_configure_cmd_fixed_param *cmd; + wmi_buf_t buf; + QDF_STATUS ret; + int32_t len; + + len = sizeof(*cmd); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s: wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_FAILURE; + } + + cmd = (wmi_vdev_spectral_configure_cmd_fixed_param *)wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_vdev_spectral_configure_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_vdev_spectral_configure_cmd_fixed_param)); + + cmd->vdev_id = param->vdev_id; + cmd->spectral_scan_count = param->count; + cmd->spectral_scan_period = param->period; + cmd->spectral_scan_priority = param->spectral_pri; + cmd->spectral_scan_fft_size = param->fft_size; + cmd->spectral_scan_gc_ena = param->gc_enable; + cmd->spectral_scan_restart_ena = param->restart_enable; + cmd->spectral_scan_noise_floor_ref = param->noise_floor_ref; + cmd->spectral_scan_init_delay = param->init_delay; + cmd->spectral_scan_nb_tone_thr = param->nb_tone_thr; + cmd->spectral_scan_str_bin_thr = param->str_bin_thr; + cmd->spectral_scan_wb_rpt_mode = param->wb_rpt_mode; + cmd->spectral_scan_rssi_rpt_mode = param->rssi_rpt_mode; + cmd->spectral_scan_rssi_thr = param->rssi_thr; + cmd->spectral_scan_pwr_format = param->pwr_format; + cmd->spectral_scan_rpt_mode = param->rpt_mode; + cmd->spectral_scan_bin_scale = param->bin_scale; + cmd->spectral_scan_dBm_adj = param->dbm_adj; + cmd->spectral_scan_chn_mask = param->chn_mask; + + wmi_mtrace(WMI_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID, cmd->vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID); + + if (ret != 0) { + WMI_LOGE("Sending set quiet cmd failed\n"); + wmi_buf_free(buf); + } + + WMI_LOGI("%s: Sent WMI_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID\n", + __func__); + + WMI_LOGI("vdev_id = %u\n" + "spectral_scan_count = %u\n" + "spectral_scan_period = %u\n" + "spectral_scan_priority = %u\n" + "spectral_scan_fft_size = %u\n" + "spectral_scan_gc_ena = %u\n" + "spectral_scan_restart_ena = %u\n" + "spectral_scan_noise_floor_ref = %u\n" + "spectral_scan_init_delay = %u\n" + "spectral_scan_nb_tone_thr = %u\n" + "spectral_scan_str_bin_thr = %u\n" + "spectral_scan_wb_rpt_mode = %u\n" + "spectral_scan_rssi_rpt_mode = %u\n" + "spectral_scan_rssi_thr = %u\n" + "spectral_scan_pwr_format = %u\n" + "spectral_scan_rpt_mode = %u\n" + "spectral_scan_bin_scale = %u\n" + "spectral_scan_dBm_adj = %u\n" + "spectral_scan_chn_mask = %u\n", + param->vdev_id, + param->count, + param->period, + param->spectral_pri, + param->fft_size, + param->gc_enable, + param->restart_enable, + param->noise_floor_ref, + param->init_delay, + param->nb_tone_thr, + param->str_bin_thr, + param->wb_rpt_mode, + param->rssi_rpt_mode, + param->rssi_thr, + param->pwr_format, + param->rpt_mode, + param->bin_scale, + param->dbm_adj, + param->chn_mask); + WMI_LOGI("%s: Status: %d\n\n", __func__, ret); + + return ret; +} + +/** + * send_vdev_spectral_enable_cmd_tlv() - send VDEV spectral configure + * command to fw + * @wmi_handle: wmi handle + * @param: pointer to hold spectral enable parameter + * + * Return: 0 for success or error code + */ +static QDF_STATUS send_vdev_spectral_enable_cmd_tlv(wmi_unified_t wmi_handle, + struct vdev_spectral_enable_params *param) +{ + wmi_vdev_spectral_enable_cmd_fixed_param *cmd; + wmi_buf_t buf; + QDF_STATUS ret; + int32_t len; + + len = sizeof(*cmd); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s: wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_FAILURE; + } + + cmd = (wmi_vdev_spectral_enable_cmd_fixed_param *)wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_vdev_spectral_enable_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_vdev_spectral_enable_cmd_fixed_param)); + + cmd->vdev_id = param->vdev_id; + + if (param->active_valid) { + cmd->trigger_cmd = param->active ? 1 : 2; + /* 1: Trigger, 2: Clear Trigger */ + } else { + cmd->trigger_cmd = 0; /* 0: Ignore */ + } + + if (param->enabled_valid) { + cmd->enable_cmd = param->enabled ? 1 : 2; + /* 1: Enable 2: Disable */ + } else { + cmd->enable_cmd = 0; /* 0: Ignore */ + } + + wmi_mtrace(WMI_VDEV_SPECTRAL_SCAN_ENABLE_CMDID, cmd->vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_VDEV_SPECTRAL_SCAN_ENABLE_CMDID); + + if (ret != 0) { + WMI_LOGE("Sending scan enable CMD failed\n"); + wmi_buf_free(buf); + } + + WMI_LOGI("%s: Sent WMI_VDEV_SPECTRAL_SCAN_ENABLE_CMDID\n", __func__); + + WMI_LOGI("vdev_id = %u\n" + "trigger_cmd = %u\n" + "enable_cmd = %u\n", + cmd->vdev_id, + cmd->trigger_cmd, + cmd->enable_cmd); + + WMI_LOGI("%s: Status: %d\n\n", __func__, ret); + + return ret; +} + +/** + * send_thermal_mitigation_param_cmd_tlv() - configure thermal mitigation params + * @param wmi_handle : handle to WMI. + * @param param : pointer to hold thermal mitigation param + * + * @return QDF_STATUS_SUCCESS on success and -ve on failure. + */ +static QDF_STATUS send_thermal_mitigation_param_cmd_tlv( + wmi_unified_t wmi_handle, + struct thermal_mitigation_params *param) +{ + wmi_therm_throt_config_request_fixed_param *tt_conf = NULL; + wmi_therm_throt_level_config_info *lvl_conf = NULL; + wmi_buf_t buf = NULL; + uint8_t *buf_ptr = NULL; + int error; + int32_t len; + int i; + + len = sizeof(*tt_conf) + WMI_TLV_HDR_SIZE + + THERMAL_LEVELS * sizeof(wmi_therm_throt_level_config_info); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s:wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_NOMEM; + } + tt_conf = (wmi_therm_throt_config_request_fixed_param *) wmi_buf_data(buf); + + /* init fixed params */ + WMITLV_SET_HDR(tt_conf, + WMITLV_TAG_STRUC_wmi_therm_throt_config_request_fixed_param, + (WMITLV_GET_STRUCT_TLVLEN(wmi_therm_throt_config_request_fixed_param))); + + tt_conf->pdev_id = wmi_handle->ops->convert_pdev_id_host_to_target( + param->pdev_id); + tt_conf->enable = param->enable; + tt_conf->dc = param->dc; + tt_conf->dc_per_event = param->dc_per_event; + tt_conf->therm_throt_levels = THERMAL_LEVELS; + + buf_ptr = (uint8_t *) ++tt_conf; + /* init TLV params */ + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, + (THERMAL_LEVELS * sizeof(wmi_therm_throt_level_config_info))); + + lvl_conf = (wmi_therm_throt_level_config_info *) (buf_ptr + WMI_TLV_HDR_SIZE); + for (i = 0; i < THERMAL_LEVELS; i++) { + WMITLV_SET_HDR(&lvl_conf->tlv_header, + WMITLV_TAG_STRUC_wmi_therm_throt_level_config_info, + WMITLV_GET_STRUCT_TLVLEN(wmi_therm_throt_level_config_info)); + lvl_conf->temp_lwm = param->levelconf[i].tmplwm; + lvl_conf->temp_hwm = param->levelconf[i].tmphwm; + lvl_conf->dc_off_percent = param->levelconf[i].dcoffpercent; + lvl_conf->prio = param->levelconf[i].priority; + lvl_conf++; + } + + wmi_mtrace(WMI_THERM_THROT_SET_CONF_CMDID, NO_SESSION, 0); + error = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_THERM_THROT_SET_CONF_CMDID); + if (QDF_IS_STATUS_ERROR(error)) { + wmi_buf_free(buf); + WMI_LOGE("Failed to send WMI_THERM_THROT_SET_CONF_CMDID command"); + } + + return error; +} + +/** + * send_pdev_qvit_cmd_tlv() - send qvit command to fw + * @wmi_handle: wmi handle + * @param: pointer to pdev_qvit_params + * + * Return: 0 for success or error code + */ +static QDF_STATUS +send_pdev_qvit_cmd_tlv(wmi_unified_t wmi_handle, + struct pdev_qvit_params *param) +{ + wmi_buf_t buf; + QDF_STATUS ret = QDF_STATUS_E_INVAL; + uint8_t *cmd; + static uint8_t msgref = 1; + uint8_t segnumber = 0, seginfo, numsegments; + uint16_t chunk_len, total_bytes; + uint8_t *bufpos; + QVIT_SEG_HDR_INFO_STRUCT seghdrinfo; + + bufpos = param->utf_payload; + total_bytes = param->len; + ASSERT(total_bytes / MAX_WMI_QVIT_LEN == + (uint8_t) (total_bytes / MAX_WMI_QVIT_LEN)); + numsegments = (uint8_t) (total_bytes / MAX_WMI_QVIT_LEN); + + if (param->len - (numsegments * MAX_WMI_QVIT_LEN)) + numsegments++; + + while (param->len) { + if (param->len > MAX_WMI_QVIT_LEN) + chunk_len = MAX_WMI_QVIT_LEN; /* MAX message */ + else + chunk_len = param->len; + + buf = wmi_buf_alloc(wmi_handle, + (chunk_len + sizeof(seghdrinfo) + + WMI_TLV_HDR_SIZE)); + if (!buf) { + WMI_LOGE("%s:wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_NOMEM; + } + + cmd = (uint8_t *) wmi_buf_data(buf); + + seghdrinfo.len = total_bytes; + seghdrinfo.msgref = msgref; + seginfo = ((numsegments << 4) & 0xF0) | (segnumber & 0xF); + seghdrinfo.segmentInfo = seginfo; + + segnumber++; + + WMITLV_SET_HDR(cmd, WMITLV_TAG_ARRAY_BYTE, + (chunk_len + sizeof(seghdrinfo))); + cmd += WMI_TLV_HDR_SIZE; + qdf_mem_copy(cmd, &seghdrinfo, sizeof(seghdrinfo)); + qdf_mem_copy(&cmd[sizeof(seghdrinfo)], bufpos, chunk_len); + + wmi_mtrace(WMI_PDEV_QVIT_CMDID, NO_SESSION, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, + (chunk_len + sizeof(seghdrinfo) + + WMI_TLV_HDR_SIZE), + WMI_PDEV_QVIT_CMDID); + + if (ret != 0) { + WMI_LOGE("Failed to send WMI_PDEV_QVIT_CMDID command"); + wmi_buf_free(buf); + break; + } + + param->len -= chunk_len; + bufpos += chunk_len; + } + msgref++; + + return ret; +} + +/** + * send_wmm_update_cmd_tlv() - send wmm update command to fw + * @wmi_handle: wmi handle + * @param: pointer to wmm update param + * + * Return: 0 for success or error code + */ +static QDF_STATUS +send_wmm_update_cmd_tlv(wmi_unified_t wmi_handle, + struct wmm_update_params *param) +{ + wmi_pdev_set_wmm_params_cmd_fixed_param *cmd; + wmi_wmm_params *wmm_param; + wmi_buf_t buf; + QDF_STATUS ret; + int32_t len; + int ac = 0; + struct wmi_host_wmeParams *wmep; + uint8_t *buf_ptr; + + len = sizeof(*cmd) + (WME_NUM_AC * sizeof(*wmm_param)); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s: wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_FAILURE; + } + + buf_ptr = (uint8_t *) wmi_buf_data(buf); + cmd = (wmi_pdev_set_wmm_params_cmd_fixed_param *) buf_ptr; + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_pdev_set_wmm_params_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_pdev_set_wmm_params_cmd_fixed_param)); + + cmd->reserved0 = WMI_HOST_PDEV_ID_SOC; + + buf_ptr += sizeof(wmi_pdev_set_wmm_params_cmd_fixed_param); + + for (ac = 0; ac < WME_NUM_AC; ac++) { + wmep = ¶m->wmep_array[ac]; + wmm_param = (wmi_wmm_params *)buf_ptr; + WMITLV_SET_HDR(&wmm_param->tlv_header, + WMITLV_TAG_STRUC_wmi_wmm_params, + WMITLV_GET_STRUCT_TLVLEN(wmi_wmm_params)); + wmm_param->aifs = wmep->wmep_aifsn; + wmm_param->cwmin = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmin); + wmm_param->cwmax = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmax); + wmm_param->txoplimit = ATH_TXOP_TO_US(wmep->wmep_txopLimit); + wmm_param->acm = wmep->wmep_acm; + wmm_param->no_ack = wmep->wmep_noackPolicy; + buf_ptr += sizeof(wmi_wmm_params); + } + wmi_mtrace(WMI_PDEV_SET_WMM_PARAMS_CMDID, NO_SESSION, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_PDEV_SET_WMM_PARAMS_CMDID); + + if (ret != 0) { + WMI_LOGE("Sending WMM update CMD failed\n"); + wmi_buf_free(buf); + } + + return ret; +} + +/** + * send_coex_config_cmd_tlv() - send coex config command to fw + * @wmi_handle: wmi handle + * @param: pointer to coex config param + * + * Return: 0 for success or error code + */ +static QDF_STATUS +send_coex_config_cmd_tlv(wmi_unified_t wmi_handle, + struct coex_config_params *param) +{ + WMI_COEX_CONFIG_CMD_fixed_param *cmd; + wmi_buf_t buf; + QDF_STATUS ret; + int32_t len; + + len = sizeof(*cmd); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s: wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_FAILURE; + } + + cmd = (WMI_COEX_CONFIG_CMD_fixed_param *)wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_WMI_COEX_CONFIG_CMD_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + WMI_COEX_CONFIG_CMD_fixed_param)); + + cmd->vdev_id = param->vdev_id; + cmd->config_type = param->config_type; + cmd->config_arg1 = param->config_arg1; + cmd->config_arg2 = param->config_arg2; + cmd->config_arg3 = param->config_arg3; + cmd->config_arg4 = param->config_arg4; + cmd->config_arg5 = param->config_arg5; + cmd->config_arg6 = param->config_arg6; + + wmi_mtrace(WMI_COEX_CONFIG_CMDID, cmd->vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_COEX_CONFIG_CMDID); + + if (ret != 0) { + WMI_LOGE("Sending COEX CONFIG CMD failed\n"); + wmi_buf_free(buf); + } + + return ret; +} + + +#ifdef WLAN_SUPPORT_TWT +static void wmi_copy_twt_resource_config(wmi_resource_config *resource_cfg, + target_resource_config *tgt_res_cfg) +{ + resource_cfg->twt_ap_pdev_count = tgt_res_cfg->twt_ap_pdev_count; + resource_cfg->twt_ap_sta_count = tgt_res_cfg->twt_ap_sta_count; +} +#else +static void wmi_copy_twt_resource_config(wmi_resource_config *resource_cfg, + target_resource_config *tgt_res_cfg) +{ + resource_cfg->twt_ap_pdev_count = 0; + resource_cfg->twt_ap_sta_count = 0; +} +#endif + +static +void wmi_copy_resource_config(wmi_resource_config *resource_cfg, + target_resource_config *tgt_res_cfg) +{ + resource_cfg->num_vdevs = tgt_res_cfg->num_vdevs; + resource_cfg->num_peers = tgt_res_cfg->num_peers; + resource_cfg->num_offload_peers = tgt_res_cfg->num_offload_peers; + resource_cfg->num_offload_reorder_buffs = + tgt_res_cfg->num_offload_reorder_buffs; + resource_cfg->num_peer_keys = tgt_res_cfg->num_peer_keys; + resource_cfg->num_tids = tgt_res_cfg->num_tids; + resource_cfg->ast_skid_limit = tgt_res_cfg->ast_skid_limit; + resource_cfg->tx_chain_mask = tgt_res_cfg->tx_chain_mask; + resource_cfg->rx_chain_mask = tgt_res_cfg->rx_chain_mask; + resource_cfg->rx_timeout_pri[0] = tgt_res_cfg->rx_timeout_pri[0]; + resource_cfg->rx_timeout_pri[1] = tgt_res_cfg->rx_timeout_pri[1]; + resource_cfg->rx_timeout_pri[2] = tgt_res_cfg->rx_timeout_pri[2]; + resource_cfg->rx_timeout_pri[3] = tgt_res_cfg->rx_timeout_pri[3]; + resource_cfg->rx_decap_mode = tgt_res_cfg->rx_decap_mode; + resource_cfg->scan_max_pending_req = + tgt_res_cfg->scan_max_pending_req; + resource_cfg->bmiss_offload_max_vdev = + tgt_res_cfg->bmiss_offload_max_vdev; + resource_cfg->roam_offload_max_vdev = + tgt_res_cfg->roam_offload_max_vdev; + resource_cfg->roam_offload_max_ap_profiles = + tgt_res_cfg->roam_offload_max_ap_profiles; + resource_cfg->num_mcast_groups = tgt_res_cfg->num_mcast_groups; + resource_cfg->num_mcast_table_elems = + tgt_res_cfg->num_mcast_table_elems; + resource_cfg->mcast2ucast_mode = tgt_res_cfg->mcast2ucast_mode; + resource_cfg->tx_dbg_log_size = tgt_res_cfg->tx_dbg_log_size; + resource_cfg->num_wds_entries = tgt_res_cfg->num_wds_entries; + resource_cfg->dma_burst_size = tgt_res_cfg->dma_burst_size; + resource_cfg->mac_aggr_delim = tgt_res_cfg->mac_aggr_delim; + resource_cfg->rx_skip_defrag_timeout_dup_detection_check = + tgt_res_cfg->rx_skip_defrag_timeout_dup_detection_check; + resource_cfg->vow_config = tgt_res_cfg->vow_config; + resource_cfg->gtk_offload_max_vdev = tgt_res_cfg->gtk_offload_max_vdev; + resource_cfg->num_msdu_desc = tgt_res_cfg->num_msdu_desc; + resource_cfg->max_frag_entries = tgt_res_cfg->max_frag_entries; + resource_cfg->num_tdls_vdevs = tgt_res_cfg->num_tdls_vdevs; + resource_cfg->num_tdls_conn_table_entries = + tgt_res_cfg->num_tdls_conn_table_entries; + resource_cfg->beacon_tx_offload_max_vdev = + tgt_res_cfg->beacon_tx_offload_max_vdev; + resource_cfg->num_multicast_filter_entries = + tgt_res_cfg->num_multicast_filter_entries; + resource_cfg->num_wow_filters = + tgt_res_cfg->num_wow_filters; + resource_cfg->num_keep_alive_pattern = + tgt_res_cfg->num_keep_alive_pattern; + resource_cfg->keep_alive_pattern_size = + tgt_res_cfg->keep_alive_pattern_size; + resource_cfg->max_tdls_concurrent_sleep_sta = + tgt_res_cfg->max_tdls_concurrent_sleep_sta; + resource_cfg->max_tdls_concurrent_buffer_sta = + tgt_res_cfg->max_tdls_concurrent_buffer_sta; + resource_cfg->wmi_send_separate = + tgt_res_cfg->wmi_send_separate; + resource_cfg->num_ocb_vdevs = + tgt_res_cfg->num_ocb_vdevs; + resource_cfg->num_ocb_channels = + tgt_res_cfg->num_ocb_channels; + resource_cfg->num_ocb_schedules = + tgt_res_cfg->num_ocb_schedules; + resource_cfg->bpf_instruction_size = tgt_res_cfg->apf_instruction_size; + resource_cfg->max_bssid_rx_filters = tgt_res_cfg->max_bssid_rx_filters; + resource_cfg->use_pdev_id = tgt_res_cfg->use_pdev_id; + resource_cfg->max_num_dbs_scan_duty_cycle = + tgt_res_cfg->max_num_dbs_scan_duty_cycle; + resource_cfg->sched_params = tgt_res_cfg->scheduler_params; + resource_cfg->num_packet_filters = tgt_res_cfg->num_packet_filters; + resource_cfg->num_max_sta_vdevs = tgt_res_cfg->num_max_sta_vdevs; + + if (tgt_res_cfg->atf_config) + WMI_RSRC_CFG_FLAG_ATF_CONFIG_ENABLE_SET(resource_cfg->flag1, 1); + if (tgt_res_cfg->mgmt_comp_evt_bundle_support) + WMI_RSRC_CFG_FLAG_MGMT_COMP_EVT_BUNDLE_SUPPORT_SET( + resource_cfg->flag1, 1); + if (tgt_res_cfg->tx_msdu_new_partition_id_support) + WMI_RSRC_CFG_FLAG_TX_MSDU_ID_NEW_PARTITION_SUPPORT_SET( + resource_cfg->flag1, 1); + if (tgt_res_cfg->cce_disable) + WMI_RSRC_CFG_FLAG_TCL_CCE_DISABLE_SET(resource_cfg->flag1, 1); + + if (tgt_res_cfg->new_htt_msg_format) { + WMI_RSRC_CFG_FLAG_HTT_H2T_NO_HTC_HDR_LEN_IN_MSG_LEN_SET( + resource_cfg->flag1, 1); + } + + if (tgt_res_cfg->peer_unmap_conf_support) + WMI_RSRC_CFG_FLAG_PEER_UNMAP_RESPONSE_SUPPORT_SET( + resource_cfg->flag1, 1); + + if (tgt_res_cfg->tstamp64_en) + WMI_RSRC_CFG_FLAG_TX_COMPLETION_TX_TSF64_ENABLE_SET( + resource_cfg->flag1, 1); + + if (tgt_res_cfg->three_way_coex_config_legacy_en) + WMI_RSRC_CFG_FLAG_THREE_WAY_COEX_CONFIG_LEGACY_SUPPORT_SET( + resource_cfg->flag1, 1); + + wmi_copy_twt_resource_config(resource_cfg, tgt_res_cfg); +} + +/* copy_hw_mode_id_in_init_cmd() - Helper routine to copy hw_mode in init cmd + * @wmi_handle: pointer to wmi handle + * @buf_ptr: pointer to current position in init command buffer + * @len: pointer to length. This will be updated with current length of cmd + * @param: point host parameters for init command + * + * Return: Updated pointer of buf_ptr. + */ +static inline uint8_t *copy_hw_mode_in_init_cmd(struct wmi_unified *wmi_handle, + uint8_t *buf_ptr, int *len, struct wmi_init_cmd_param *param) +{ + uint16_t idx; + + if (param->hw_mode_id != WMI_HOST_HW_MODE_MAX) { + wmi_pdev_set_hw_mode_cmd_fixed_param *hw_mode; + wmi_pdev_band_to_mac *band_to_mac; + + hw_mode = (wmi_pdev_set_hw_mode_cmd_fixed_param *) + (buf_ptr + sizeof(wmi_init_cmd_fixed_param) + + sizeof(wmi_resource_config) + + WMI_TLV_HDR_SIZE + (param->num_mem_chunks * + sizeof(wlan_host_memory_chunk))); + + WMITLV_SET_HDR(&hw_mode->tlv_header, + WMITLV_TAG_STRUC_wmi_pdev_set_hw_mode_cmd_fixed_param, + (WMITLV_GET_STRUCT_TLVLEN + (wmi_pdev_set_hw_mode_cmd_fixed_param))); + + hw_mode->hw_mode_index = param->hw_mode_id; + hw_mode->num_band_to_mac = param->num_band_to_mac; + + buf_ptr = (uint8_t *) (hw_mode + 1); + band_to_mac = (wmi_pdev_band_to_mac *) (buf_ptr + + WMI_TLV_HDR_SIZE); + for (idx = 0; idx < param->num_band_to_mac; idx++) { + WMITLV_SET_HDR(&band_to_mac[idx].tlv_header, + WMITLV_TAG_STRUC_wmi_pdev_band_to_mac, + WMITLV_GET_STRUCT_TLVLEN + (wmi_pdev_band_to_mac)); + band_to_mac[idx].pdev_id = + wmi_handle->ops->convert_pdev_id_host_to_target( + param->band_to_mac[idx].pdev_id); + band_to_mac[idx].start_freq = + param->band_to_mac[idx].start_freq; + band_to_mac[idx].end_freq = + param->band_to_mac[idx].end_freq; + } + *len += sizeof(wmi_pdev_set_hw_mode_cmd_fixed_param) + + (param->num_band_to_mac * + sizeof(wmi_pdev_band_to_mac)) + + WMI_TLV_HDR_SIZE; + + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, + (param->num_band_to_mac * + sizeof(wmi_pdev_band_to_mac))); + } + + return buf_ptr; +} + +static inline void copy_fw_abi_version_tlv(wmi_unified_t wmi_handle, + wmi_init_cmd_fixed_param *cmd) +{ + int num_whitelist; + wmi_abi_version my_vers; + + num_whitelist = sizeof(version_whitelist) / + sizeof(wmi_whitelist_version_info); + my_vers.abi_version_0 = WMI_ABI_VERSION_0; + my_vers.abi_version_1 = WMI_ABI_VERSION_1; + my_vers.abi_version_ns_0 = WMI_ABI_VERSION_NS_0; + my_vers.abi_version_ns_1 = WMI_ABI_VERSION_NS_1; + my_vers.abi_version_ns_2 = WMI_ABI_VERSION_NS_2; + my_vers.abi_version_ns_3 = WMI_ABI_VERSION_NS_3; + + wmi_cmp_and_set_abi_version(num_whitelist, version_whitelist, + &my_vers, + (struct _wmi_abi_version *)&wmi_handle->fw_abi_version, + &cmd->host_abi_vers); + + qdf_print("%s: INIT_CMD version: %d, %d, 0x%x, 0x%x, 0x%x, 0x%x", + __func__, + WMI_VER_GET_MAJOR(cmd->host_abi_vers.abi_version_0), + WMI_VER_GET_MINOR(cmd->host_abi_vers.abi_version_0), + cmd->host_abi_vers.abi_version_ns_0, + cmd->host_abi_vers.abi_version_ns_1, + cmd->host_abi_vers.abi_version_ns_2, + cmd->host_abi_vers.abi_version_ns_3); + + /* Save version sent from host - + * Will be used to check ready event + */ + qdf_mem_copy(&wmi_handle->final_abi_vers, &cmd->host_abi_vers, + sizeof(wmi_abi_version)); +} + +static QDF_STATUS save_fw_version_cmd_tlv(wmi_unified_t wmi_handle, void *evt_buf) +{ + WMI_SERVICE_READY_EVENTID_param_tlvs *param_buf; + wmi_service_ready_event_fixed_param *ev; + + + param_buf = (WMI_SERVICE_READY_EVENTID_param_tlvs *) evt_buf; + + ev = (wmi_service_ready_event_fixed_param *) param_buf->fixed_param; + if (!ev) + return QDF_STATUS_E_FAILURE; + + /*Save fw version from service ready message */ + /*This will be used while sending INIT message */ + qdf_mem_copy(&wmi_handle->fw_abi_version, &ev->fw_abi_vers, + sizeof(wmi_handle->fw_abi_version)); + + return QDF_STATUS_SUCCESS; +} + +/** + * wmi_unified_save_fw_version_cmd() - save fw version + * @wmi_handle: pointer to wmi handle + * @res_cfg: resource config + * @num_mem_chunks: no of mem chunck + * @mem_chunk: pointer to mem chunck structure + * + * This function sends IE information to firmware + * + * Return: QDF_STATUS_SUCCESS for success otherwise failure + * + */ +static QDF_STATUS check_and_update_fw_version_cmd_tlv(wmi_unified_t wmi_handle, + void *evt_buf) +{ + WMI_READY_EVENTID_param_tlvs *param_buf = NULL; + wmi_ready_event_fixed_param *ev = NULL; + + param_buf = (WMI_READY_EVENTID_param_tlvs *) evt_buf; + ev = param_buf->fixed_param; + if (!wmi_versions_are_compatible((struct _wmi_abi_version *) + &wmi_handle->final_abi_vers, + &ev->fw_abi_vers)) { + /* + * Error: Our host version and the given firmware version + * are incompatible. + **/ + WMI_LOGD("%s: Error: Incompatible WMI version." + "Host: %d,%d,0x%x 0x%x 0x%x 0x%x, FW: %d,%d,0x%x 0x%x 0x%x 0x%x\n", + __func__, + WMI_VER_GET_MAJOR(wmi_handle->final_abi_vers. + abi_version_0), + WMI_VER_GET_MINOR(wmi_handle->final_abi_vers. + abi_version_0), + wmi_handle->final_abi_vers.abi_version_ns_0, + wmi_handle->final_abi_vers.abi_version_ns_1, + wmi_handle->final_abi_vers.abi_version_ns_2, + wmi_handle->final_abi_vers.abi_version_ns_3, + WMI_VER_GET_MAJOR(ev->fw_abi_vers.abi_version_0), + WMI_VER_GET_MINOR(ev->fw_abi_vers.abi_version_0), + ev->fw_abi_vers.abi_version_ns_0, + ev->fw_abi_vers.abi_version_ns_1, + ev->fw_abi_vers.abi_version_ns_2, + ev->fw_abi_vers.abi_version_ns_3); + + return QDF_STATUS_E_FAILURE; + } + qdf_mem_copy(&wmi_handle->final_abi_vers, &ev->fw_abi_vers, + sizeof(wmi_abi_version)); + qdf_mem_copy(&wmi_handle->fw_abi_version, &ev->fw_abi_vers, + sizeof(wmi_abi_version)); + + return QDF_STATUS_SUCCESS; +} + +/** + * send_set_base_macaddr_indicate_cmd_tlv() - set base mac address in fw + * @wmi_handle: wmi handle + * @custom_addr: base mac address + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS send_set_base_macaddr_indicate_cmd_tlv(wmi_unified_t wmi_handle, + uint8_t *custom_addr) +{ + wmi_pdev_set_base_macaddr_cmd_fixed_param *cmd; + wmi_buf_t buf; + int err; + + buf = wmi_buf_alloc(wmi_handle, sizeof(*cmd)); + if (!buf) { + WMI_LOGE("Failed to allocate buffer to send base macaddr cmd"); + return QDF_STATUS_E_NOMEM; + } + + cmd = (wmi_pdev_set_base_macaddr_cmd_fixed_param *) wmi_buf_data(buf); + qdf_mem_zero(cmd, sizeof(*cmd)); + + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_pdev_set_base_macaddr_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_pdev_set_base_macaddr_cmd_fixed_param)); + WMI_CHAR_ARRAY_TO_MAC_ADDR(custom_addr, &cmd->base_macaddr); + cmd->pdev_id = wmi_handle->ops->convert_pdev_id_host_to_target( + WMI_HOST_PDEV_ID_SOC); + wmi_mtrace(WMI_PDEV_SET_BASE_MACADDR_CMDID, NO_SESSION, 0); + err = wmi_unified_cmd_send(wmi_handle, buf, + sizeof(*cmd), + WMI_PDEV_SET_BASE_MACADDR_CMDID); + if (err) { + WMI_LOGE("Failed to send set_base_macaddr cmd"); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return 0; +} + +/** + * send_log_supported_evt_cmd_tlv() - Enable/Disable FW diag/log events + * @handle: wmi handle + * @event: Event received from FW + * @len: Length of the event + * + * Enables the low frequency events and disables the high frequency + * events. Bit 17 indicates if the event if low/high frequency. + * 1 - high frequency, 0 - low frequency + * + * Return: 0 on successfully enabling/disabling the events + */ +static QDF_STATUS send_log_supported_evt_cmd_tlv(wmi_unified_t wmi_handle, + uint8_t *event, + uint32_t len) +{ + uint32_t num_of_diag_events_logs; + wmi_diag_event_log_config_fixed_param *cmd; + wmi_buf_t buf; + uint8_t *buf_ptr; + uint32_t *cmd_args, *evt_args; + uint32_t buf_len, i; + + WMI_DIAG_EVENT_LOG_SUPPORTED_EVENTID_param_tlvs *param_buf; + wmi_diag_event_log_supported_event_fixed_params *wmi_event; + + WMI_LOGI("Received WMI_DIAG_EVENT_LOG_SUPPORTED_EVENTID"); + + param_buf = (WMI_DIAG_EVENT_LOG_SUPPORTED_EVENTID_param_tlvs *) event; + if (!param_buf) { + WMI_LOGE("Invalid log supported event buffer"); + return QDF_STATUS_E_INVAL; + } + wmi_event = param_buf->fixed_param; + num_of_diag_events_logs = wmi_event->num_of_diag_events_logs; + + if (num_of_diag_events_logs > + param_buf->num_diag_events_logs_list) { + WMI_LOGE("message number of events %d is more than tlv hdr content %d", + num_of_diag_events_logs, + param_buf->num_diag_events_logs_list); + return QDF_STATUS_E_INVAL; + } + + evt_args = param_buf->diag_events_logs_list; + if (!evt_args) { + WMI_LOGE("%s: Event list is empty, num_of_diag_events_logs=%d", + __func__, num_of_diag_events_logs); + return QDF_STATUS_E_INVAL; + } + + WMI_LOGD("%s: num_of_diag_events_logs=%d", + __func__, num_of_diag_events_logs); + + /* Free any previous allocation */ + if (wmi_handle->events_logs_list) { + qdf_mem_free(wmi_handle->events_logs_list); + wmi_handle->events_logs_list = NULL; + } + + if (num_of_diag_events_logs > + (WMI_SVC_MSG_MAX_SIZE / sizeof(uint32_t))) { + WMI_LOGE("%s: excess num of logs:%d", __func__, + num_of_diag_events_logs); + QDF_ASSERT(0); + return QDF_STATUS_E_INVAL; + } + /* Store the event list for run time enable/disable */ + wmi_handle->events_logs_list = qdf_mem_malloc(num_of_diag_events_logs * + sizeof(uint32_t)); + if (!wmi_handle->events_logs_list) { + WMI_LOGE("%s: event log list memory allocation failed", + __func__); + return QDF_STATUS_E_NOMEM; + } + wmi_handle->num_of_diag_events_logs = num_of_diag_events_logs; + + /* Prepare the send buffer */ + buf_len = sizeof(*cmd) + WMI_TLV_HDR_SIZE + + (num_of_diag_events_logs * sizeof(uint32_t)); + + buf = wmi_buf_alloc(wmi_handle, buf_len); + if (!buf) { + WMI_LOGE("%s: wmi_buf_alloc failed", __func__); + qdf_mem_free(wmi_handle->events_logs_list); + wmi_handle->events_logs_list = NULL; + return QDF_STATUS_E_NOMEM; + } + + cmd = (wmi_diag_event_log_config_fixed_param *) wmi_buf_data(buf); + buf_ptr = (uint8_t *) cmd; + + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_diag_event_log_config_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_diag_event_log_config_fixed_param)); + + cmd->num_of_diag_events_logs = num_of_diag_events_logs; + + buf_ptr += sizeof(wmi_diag_event_log_config_fixed_param); + + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_UINT32, + (num_of_diag_events_logs * sizeof(uint32_t))); + + cmd_args = (uint32_t *) (buf_ptr + WMI_TLV_HDR_SIZE); + + /* Populate the events */ + for (i = 0; i < num_of_diag_events_logs; i++) { + /* Low freq (0) - Enable (1) the event + * High freq (1) - Disable (0) the event + */ + WMI_DIAG_ID_ENABLED_DISABLED_SET(cmd_args[i], + !(WMI_DIAG_FREQUENCY_GET(evt_args[i]))); + /* Set the event ID */ + WMI_DIAG_ID_SET(cmd_args[i], + WMI_DIAG_ID_GET(evt_args[i])); + /* Set the type */ + WMI_DIAG_TYPE_SET(cmd_args[i], + WMI_DIAG_TYPE_GET(evt_args[i])); + /* Storing the event/log list in WMI */ + wmi_handle->events_logs_list[i] = evt_args[i]; + } + + wmi_mtrace(WMI_DIAG_EVENT_LOG_CONFIG_CMDID, NO_SESSION, 0); + if (wmi_unified_cmd_send(wmi_handle, buf, buf_len, + WMI_DIAG_EVENT_LOG_CONFIG_CMDID)) { + WMI_LOGE("%s: WMI_DIAG_EVENT_LOG_CONFIG_CMDID failed", + __func__); + wmi_buf_free(buf); + /* Not clearing events_logs_list, though wmi cmd failed. + * Host can still have this list + */ + return QDF_STATUS_E_INVAL; + } + + return 0; +} + +/** + * send_enable_specific_fw_logs_cmd_tlv() - Start/Stop logging of diag log id + * @wmi_handle: wmi handle + * @start_log: Start logging related parameters + * + * Send the command to the FW based on which specific logging of diag + * event/log id can be started/stopped + * + * Return: None + */ +static QDF_STATUS send_enable_specific_fw_logs_cmd_tlv(wmi_unified_t wmi_handle, + struct wmi_wifi_start_log *start_log) +{ + wmi_diag_event_log_config_fixed_param *cmd; + wmi_buf_t buf; + uint8_t *buf_ptr; + uint32_t len, count, log_level, i; + uint32_t *cmd_args; + uint32_t total_len; + count = 0; + + if (!wmi_handle->events_logs_list) { + WMI_LOGD("%s: Not received event/log list from FW, yet", + __func__); + return QDF_STATUS_E_NOMEM; + } + /* total_len stores the number of events where BITS 17 and 18 are set. + * i.e., events of high frequency (17) and for extended debugging (18) + */ + total_len = 0; + for (i = 0; i < wmi_handle->num_of_diag_events_logs; i++) { + if ((WMI_DIAG_FREQUENCY_GET(wmi_handle->events_logs_list[i])) && + (WMI_DIAG_EXT_FEATURE_GET(wmi_handle->events_logs_list[i]))) + total_len++; + } + + len = sizeof(*cmd) + WMI_TLV_HDR_SIZE + + (total_len * sizeof(uint32_t)); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s: wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_NOMEM; + } + cmd = (wmi_diag_event_log_config_fixed_param *) wmi_buf_data(buf); + buf_ptr = (uint8_t *) cmd; + + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_diag_event_log_config_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_diag_event_log_config_fixed_param)); + + cmd->num_of_diag_events_logs = total_len; + + buf_ptr += sizeof(wmi_diag_event_log_config_fixed_param); + + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_UINT32, + (total_len * sizeof(uint32_t))); + + cmd_args = (uint32_t *) (buf_ptr + WMI_TLV_HDR_SIZE); + + if (start_log->verbose_level >= WMI_LOG_LEVEL_ACTIVE) + log_level = 1; + else + log_level = 0; + + WMI_LOGD("%s: Length:%d, Log_level:%d", __func__, total_len, log_level); + for (i = 0; i < wmi_handle->num_of_diag_events_logs; i++) { + uint32_t val = wmi_handle->events_logs_list[i]; + if ((WMI_DIAG_FREQUENCY_GET(val)) && + (WMI_DIAG_EXT_FEATURE_GET(val))) { + + WMI_DIAG_ID_SET(cmd_args[count], + WMI_DIAG_ID_GET(val)); + WMI_DIAG_TYPE_SET(cmd_args[count], + WMI_DIAG_TYPE_GET(val)); + WMI_DIAG_ID_ENABLED_DISABLED_SET(cmd_args[count], + log_level); + WMI_LOGD("%s: Idx:%d, val:%x", __func__, i, val); + count++; + } + } + + wmi_mtrace(WMI_DIAG_EVENT_LOG_CONFIG_CMDID, NO_SESSION, 0); + if (wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_DIAG_EVENT_LOG_CONFIG_CMDID)) { + WMI_LOGE("%s: WMI_DIAG_EVENT_LOG_CONFIG_CMDID failed", + __func__); + wmi_buf_free(buf); + return QDF_STATUS_E_INVAL; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * send_flush_logs_to_fw_cmd_tlv() - Send log flush command to FW + * @wmi_handle: WMI handle + * + * This function is used to send the flush command to the FW, + * that will flush the fw logs that are residue in the FW + * + * Return: None + */ +static QDF_STATUS send_flush_logs_to_fw_cmd_tlv(wmi_unified_t wmi_handle) +{ + wmi_debug_mesg_flush_fixed_param *cmd; + wmi_buf_t buf; + int len = sizeof(*cmd); + QDF_STATUS ret; + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGP("%s: wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_NOMEM; + } + + cmd = (wmi_debug_mesg_flush_fixed_param *) wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_debug_mesg_flush_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_debug_mesg_flush_fixed_param)); + cmd->reserved0 = 0; + + wmi_mtrace(WMI_DEBUG_MESG_FLUSH_CMDID, NO_SESSION, 0); + ret = wmi_unified_cmd_send(wmi_handle, + buf, + len, + WMI_DEBUG_MESG_FLUSH_CMDID); + if (QDF_IS_STATUS_ERROR(ret)) { + WMI_LOGE("Failed to send WMI_DEBUG_MESG_FLUSH_CMDID"); + wmi_buf_free(buf); + return QDF_STATUS_E_INVAL; + } + WMI_LOGD("Sent WMI_DEBUG_MESG_FLUSH_CMDID to FW"); + + return ret; +} + +/** + * send_pdev_set_pcl_cmd_tlv() - Send WMI_SOC_SET_PCL_CMDID to FW + * @wmi_handle: wmi handle + * @msg: PCL structure containing the PCL and the number of channels + * + * WMI_PDEV_SET_PCL_CMDID provides a Preferred Channel List (PCL) to the WLAN + * firmware. The DBS Manager is the consumer of this information in the WLAN + * firmware. The channel list will be used when a Virtual DEVice (VDEV) needs + * to migrate to a new channel without host driver involvement. An example of + * this behavior is Legacy Fast Roaming (LFR 3.0). Generally, the host will + * manage the channel selection without firmware involvement. + * + * WMI_PDEV_SET_PCL_CMDID will carry only the weight list and not the actual + * channel list. The weights corresponds to the channels sent in + * WMI_SCAN_CHAN_LIST_CMDID. The channels from PCL would be having a higher + * weightage compared to the non PCL channels. + * + * Return: Success if the cmd is sent successfully to the firmware + */ +static QDF_STATUS send_pdev_set_pcl_cmd_tlv(wmi_unified_t wmi_handle, + struct wmi_pcl_chan_weights *msg) +{ + wmi_pdev_set_pcl_cmd_fixed_param *cmd; + wmi_buf_t buf; + uint8_t *buf_ptr; + uint32_t *cmd_args, i, len; + uint32_t chan_len; + + chan_len = msg->saved_num_chan; + + len = sizeof(*cmd) + + WMI_TLV_HDR_SIZE + (chan_len * sizeof(uint32_t)); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s: wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_NOMEM; + } + + cmd = (wmi_pdev_set_pcl_cmd_fixed_param *) wmi_buf_data(buf); + buf_ptr = (uint8_t *) cmd; + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_pdev_set_pcl_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN(wmi_pdev_set_pcl_cmd_fixed_param)); + + cmd->pdev_id = wmi_handle->ops->convert_pdev_id_host_to_target( + WMI_HOST_PDEV_ID_SOC); + cmd->num_chan = chan_len; + WMI_LOGD("%s: Total chan (PCL) len:%d", __func__, cmd->num_chan); + + buf_ptr += sizeof(wmi_pdev_set_pcl_cmd_fixed_param); + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_UINT32, + (chan_len * sizeof(uint32_t))); + cmd_args = (uint32_t *) (buf_ptr + WMI_TLV_HDR_SIZE); + for (i = 0; i < chan_len ; i++) { + cmd_args[i] = msg->weighed_valid_list[i]; + WMI_LOGD("%s: chan:%d weight:%d", __func__, + msg->saved_chan_list[i], cmd_args[i]); + } + wmi_mtrace(WMI_PDEV_SET_PCL_CMDID, NO_SESSION, 0); + if (wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_PDEV_SET_PCL_CMDID)) { + WMI_LOGE("%s: Failed to send WMI_PDEV_SET_PCL_CMDID", __func__); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + return QDF_STATUS_SUCCESS; +} + +/** + * send_pdev_set_hw_mode_cmd_tlv() - Send WMI_PDEV_SET_HW_MODE_CMDID to FW + * @wmi_handle: wmi handle + * @msg: Structure containing the following parameters + * + * - hw_mode_index: The HW_Mode field is a enumerated type that is selected + * from the HW_Mode table, which is returned in the WMI_SERVICE_READY_EVENTID. + * + * Provides notification to the WLAN firmware that host driver is requesting a + * HardWare (HW) Mode change. This command is needed to support iHelium in the + * configurations that include the Dual Band Simultaneous (DBS) feature. + * + * Return: Success if the cmd is sent successfully to the firmware + */ +static QDF_STATUS send_pdev_set_hw_mode_cmd_tlv(wmi_unified_t wmi_handle, + uint32_t hw_mode_index) +{ + wmi_pdev_set_hw_mode_cmd_fixed_param *cmd; + wmi_buf_t buf; + uint32_t len; + + len = sizeof(*cmd); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s: wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_NOMEM; + } + + cmd = (wmi_pdev_set_hw_mode_cmd_fixed_param *) wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_pdev_set_hw_mode_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN(wmi_pdev_set_hw_mode_cmd_fixed_param)); + + cmd->pdev_id = wmi_handle->ops->convert_pdev_id_host_to_target( + WMI_HOST_PDEV_ID_SOC); + cmd->hw_mode_index = hw_mode_index; + WMI_LOGI("%s: HW mode index:%d", __func__, cmd->hw_mode_index); + + wmi_mtrace(WMI_PDEV_SET_HW_MODE_CMDID, NO_SESSION, 0); + if (wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_PDEV_SET_HW_MODE_CMDID)) { + WMI_LOGE("%s: Failed to send WMI_PDEV_SET_HW_MODE_CMDID", + __func__); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +#ifdef WLAN_POLICY_MGR_ENABLE +/** + * send_pdev_set_dual_mac_config_cmd_tlv() - Set dual mac config to FW + * @wmi_handle: wmi handle + * @msg: Dual MAC config parameters + * + * Configures WLAN firmware with the dual MAC features + * + * Return: QDF_STATUS. 0 on success. + */ +static +QDF_STATUS send_pdev_set_dual_mac_config_cmd_tlv(wmi_unified_t wmi_handle, + struct policy_mgr_dual_mac_config *msg) +{ + wmi_pdev_set_mac_config_cmd_fixed_param *cmd; + wmi_buf_t buf; + uint32_t len; + + len = sizeof(*cmd); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s: wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_FAILURE; + } + + cmd = (wmi_pdev_set_mac_config_cmd_fixed_param *) wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_pdev_set_mac_config_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_pdev_set_mac_config_cmd_fixed_param)); + + cmd->pdev_id = wmi_handle->ops->convert_pdev_id_host_to_target( + WMI_HOST_PDEV_ID_SOC); + cmd->concurrent_scan_config_bits = msg->scan_config; + cmd->fw_mode_config_bits = msg->fw_mode_config; + WMI_LOGD("%s: scan_config:%x fw_mode_config:%x", + __func__, msg->scan_config, msg->fw_mode_config); + + wmi_mtrace(WMI_PDEV_SET_MAC_CONFIG_CMDID, NO_SESSION, 0); + if (wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_PDEV_SET_MAC_CONFIG_CMDID)) { + WMI_LOGE("%s: Failed to send WMI_PDEV_SET_MAC_CONFIG_CMDID", + __func__); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + return QDF_STATUS_SUCCESS; +} +#endif + +#ifdef BIG_ENDIAN_HOST +/** +* fips_conv_data_be() - LE to BE conversion of FIPS ev data +* @param data_len - data length +* @param data - pointer to data +* +* Return: QDF_STATUS - success or error status +*/ +static QDF_STATUS fips_align_data_be(wmi_unified_t wmi_handle, + struct fips_params *param) +{ + unsigned char *key_unaligned, *data_unaligned; + int c; + u_int8_t *key_aligned = NULL; + u_int8_t *data_aligned = NULL; + + /* Assigning unaligned space to copy the key */ + key_unaligned = qdf_mem_malloc( + sizeof(u_int8_t)*param->key_len + FIPS_ALIGN); + data_unaligned = qdf_mem_malloc( + sizeof(u_int8_t)*param->data_len + FIPS_ALIGN); + + /* Checking if kmalloc is successful to allocate space */ + if (key_unaligned == NULL) + return QDF_STATUS_SUCCESS; + /* Checking if space is aligned */ + if (!FIPS_IS_ALIGNED(key_unaligned, FIPS_ALIGN)) { + /* align to 4 */ + key_aligned = + (u_int8_t *)FIPS_ALIGNTO(key_unaligned, + FIPS_ALIGN); + } else { + key_aligned = (u_int8_t *)key_unaligned; + } + + /* memset and copy content from key to key aligned */ + OS_MEMSET(key_aligned, 0, param->key_len); + OS_MEMCPY(key_aligned, param->key, param->key_len); + + /* print a hexdump for host debug */ + print_hex_dump(KERN_DEBUG, + "\t Aligned and Copied Key:@@@@ ", + DUMP_PREFIX_NONE, + 16, 1, key_aligned, param->key_len, true); + + /* Checking if kmalloc is successful to allocate space */ + if (data_unaligned == NULL) + return QDF_STATUS_SUCCESS; + /* Checking of space is aligned */ + if (!FIPS_IS_ALIGNED(data_unaligned, FIPS_ALIGN)) { + /* align to 4 */ + data_aligned = + (u_int8_t *)FIPS_ALIGNTO(data_unaligned, + FIPS_ALIGN); + } else { + data_aligned = (u_int8_t *)data_unaligned; + } + + /* memset and copy content from data to data aligned */ + OS_MEMSET(data_aligned, 0, param->data_len); + OS_MEMCPY(data_aligned, param->data, param->data_len); + + /* print a hexdump for host debug */ + print_hex_dump(KERN_DEBUG, + "\t Properly Aligned and Copied Data:@@@@ ", + DUMP_PREFIX_NONE, + 16, 1, data_aligned, param->data_len, true); + + /* converting to little Endian both key_aligned and + * data_aligned*/ + for (c = 0; c < param->key_len/4; c++) { + *((u_int32_t *)key_aligned+c) = + qdf_cpu_to_le32(*((u_int32_t *)key_aligned+c)); + } + for (c = 0; c < param->data_len/4; c++) { + *((u_int32_t *)data_aligned+c) = + qdf_cpu_to_le32(*((u_int32_t *)data_aligned+c)); + } + + /* update endian data to key and data vectors */ + OS_MEMCPY(param->key, key_aligned, param->key_len); + OS_MEMCPY(param->data, data_aligned, param->data_len); + + /* clean up allocated spaces */ + qdf_mem_free(key_unaligned); + key_unaligned = NULL; + key_aligned = NULL; + + qdf_mem_free(data_unaligned); + data_unaligned = NULL; + data_aligned = NULL; + + return QDF_STATUS_SUCCESS; +} +#else +/** +* fips_align_data_be() - DUMMY for LE platform +* +* Return: QDF_STATUS - success +*/ +static QDF_STATUS fips_align_data_be(wmi_unified_t wmi_handle, + struct fips_params *param) +{ + return QDF_STATUS_SUCCESS; +} +#endif + + +/** + * send_pdev_fips_cmd_tlv() - send pdev fips cmd to fw + * @wmi_handle: wmi handle + * @param: pointer to hold pdev fips param + * + * Return: 0 for success or error code + */ +static QDF_STATUS +send_pdev_fips_cmd_tlv(wmi_unified_t wmi_handle, + struct fips_params *param) +{ + wmi_pdev_fips_cmd_fixed_param *cmd; + wmi_buf_t buf; + uint8_t *buf_ptr; + uint32_t len = sizeof(wmi_pdev_fips_cmd_fixed_param); + QDF_STATUS retval = QDF_STATUS_SUCCESS; + + /* Length TLV placeholder for array of bytes */ + len += WMI_TLV_HDR_SIZE; + if (param->data_len) + len += (param->data_len*sizeof(uint8_t)); + + /* + * Data length must be multiples of 16 bytes - checked against 0xF - + * and must be less than WMI_SVC_MSG_SIZE - static size of + * wmi_pdev_fips_cmd structure + */ + + /* do sanity on the input */ + if (!(((param->data_len & 0xF) == 0) && + ((param->data_len > 0) && + (param->data_len < (WMI_HOST_MAX_BUFFER_SIZE - + sizeof(wmi_pdev_fips_cmd_fixed_param)))))) { + return QDF_STATUS_E_INVAL; + } + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + qdf_print("%s:wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_FAILURE; + } + + buf_ptr = (uint8_t *) wmi_buf_data(buf); + cmd = (wmi_pdev_fips_cmd_fixed_param *)buf_ptr; + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_pdev_fips_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_pdev_fips_cmd_fixed_param)); + + cmd->pdev_id = wmi_handle->ops->convert_pdev_id_host_to_target( + param->pdev_id); + if (param->key != NULL && param->data != NULL) { + cmd->key_len = param->key_len; + cmd->data_len = param->data_len; + cmd->fips_cmd = !!(param->op); + + if (fips_align_data_be(wmi_handle, param) != QDF_STATUS_SUCCESS) + return QDF_STATUS_E_FAILURE; + + qdf_mem_copy(cmd->key, param->key, param->key_len); + + if (param->mode == FIPS_ENGINE_AES_CTR || + param->mode == FIPS_ENGINE_AES_MIC) { + cmd->mode = param->mode; + } else { + cmd->mode = FIPS_ENGINE_AES_CTR; + } + qdf_print(KERN_ERR "Key len = %d, Data len = %d\n", + cmd->key_len, cmd->data_len); + + print_hex_dump(KERN_DEBUG, "Key: ", DUMP_PREFIX_NONE, 16, 1, + cmd->key, cmd->key_len, true); + buf_ptr += sizeof(*cmd); + + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_BYTE, param->data_len); + + buf_ptr += WMI_TLV_HDR_SIZE; + if (param->data_len) + qdf_mem_copy(buf_ptr, + (uint8_t *) param->data, param->data_len); + + print_hex_dump(KERN_DEBUG, "Plain text: ", DUMP_PREFIX_NONE, + 16, 1, buf_ptr, cmd->data_len, true); + + buf_ptr += param->data_len; + + wmi_mtrace(WMI_PDEV_FIPS_CMDID, NO_SESSION, 0); + retval = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_PDEV_FIPS_CMDID); + qdf_print("%s return value %d\n", __func__, retval); + } else { + qdf_print("\n%s:%d Key or Data is NULL\n", __func__, __LINE__); + wmi_buf_free(buf); + retval = -QDF_STATUS_E_BADMSG; + } + + return retval; +} + +#ifdef WLAN_POWER_MANAGEMENT_OFFLOAD +/** + * send_add_wow_wakeup_event_cmd_tlv() - Configures wow wakeup events. + * @wmi_handle: wmi handle + * @vdev_id: vdev id + * @bitmap: Event bitmap + * @enable: enable/disable + * + * Return: CDF status + */ +static QDF_STATUS send_add_wow_wakeup_event_cmd_tlv(wmi_unified_t wmi_handle, + uint32_t vdev_id, + uint32_t *bitmap, + bool enable) +{ + WMI_WOW_ADD_DEL_EVT_CMD_fixed_param *cmd; + uint16_t len; + wmi_buf_t buf; + int ret; + + len = sizeof(WMI_WOW_ADD_DEL_EVT_CMD_fixed_param); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s: Failed allocate wmi buffer", __func__); + return QDF_STATUS_E_NOMEM; + } + cmd = (WMI_WOW_ADD_DEL_EVT_CMD_fixed_param *) wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_WMI_WOW_ADD_DEL_EVT_CMD_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (WMI_WOW_ADD_DEL_EVT_CMD_fixed_param)); + cmd->vdev_id = vdev_id; + cmd->is_add = enable; + qdf_mem_copy(&(cmd->event_bitmaps[0]), bitmap, sizeof(uint32_t) * + WMI_WOW_MAX_EVENT_BM_LEN); + + WMI_LOGD("Wakeup pattern 0x%x%x%x%x %s in fw", cmd->event_bitmaps[0], + cmd->event_bitmaps[1], cmd->event_bitmaps[2], + cmd->event_bitmaps[3], enable ? "enabled" : "disabled"); + + wmi_mtrace(WMI_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID, cmd->vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID); + if (ret) { + WMI_LOGE("Failed to config wow wakeup event"); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * send_wow_patterns_to_fw_cmd_tlv() - Sends WOW patterns to FW. + * @wmi_handle: wmi handle + * @vdev_id: vdev id + * @ptrn_id: pattern id + * @ptrn: pattern + * @ptrn_len: pattern length + * @ptrn_offset: pattern offset + * @mask: mask + * @mask_len: mask length + * @user: true for user configured pattern and false for default pattern + * @default_patterns: default patterns + * + * Return: CDF status + */ +static QDF_STATUS send_wow_patterns_to_fw_cmd_tlv(wmi_unified_t wmi_handle, + uint8_t vdev_id, uint8_t ptrn_id, + const uint8_t *ptrn, uint8_t ptrn_len, + uint8_t ptrn_offset, const uint8_t *mask, + uint8_t mask_len, bool user, + uint8_t default_patterns) +{ + WMI_WOW_ADD_PATTERN_CMD_fixed_param *cmd; + WOW_BITMAP_PATTERN_T *bitmap_pattern; + wmi_buf_t buf; + uint8_t *buf_ptr; + int32_t len; + int ret; + + len = sizeof(WMI_WOW_ADD_PATTERN_CMD_fixed_param) + + WMI_TLV_HDR_SIZE + + 1 * sizeof(WOW_BITMAP_PATTERN_T) + + WMI_TLV_HDR_SIZE + + 0 * sizeof(WOW_IPV4_SYNC_PATTERN_T) + + WMI_TLV_HDR_SIZE + + 0 * sizeof(WOW_IPV6_SYNC_PATTERN_T) + + WMI_TLV_HDR_SIZE + + 0 * sizeof(WOW_MAGIC_PATTERN_CMD) + + WMI_TLV_HDR_SIZE + + 0 * sizeof(uint32_t) + WMI_TLV_HDR_SIZE + 1 * sizeof(uint32_t); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s: Failed allocate wmi buffer", __func__); + return QDF_STATUS_E_NOMEM; + } + + cmd = (WMI_WOW_ADD_PATTERN_CMD_fixed_param *) wmi_buf_data(buf); + buf_ptr = (uint8_t *) cmd; + + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_WMI_WOW_ADD_PATTERN_CMD_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (WMI_WOW_ADD_PATTERN_CMD_fixed_param)); + cmd->vdev_id = vdev_id; + cmd->pattern_id = ptrn_id; + + cmd->pattern_type = WOW_BITMAP_PATTERN; + buf_ptr += sizeof(WMI_WOW_ADD_PATTERN_CMD_fixed_param); + + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, + sizeof(WOW_BITMAP_PATTERN_T)); + buf_ptr += WMI_TLV_HDR_SIZE; + bitmap_pattern = (WOW_BITMAP_PATTERN_T *) buf_ptr; + + WMITLV_SET_HDR(&bitmap_pattern->tlv_header, + WMITLV_TAG_STRUC_WOW_BITMAP_PATTERN_T, + WMITLV_GET_STRUCT_TLVLEN(WOW_BITMAP_PATTERN_T)); + + qdf_mem_copy(&bitmap_pattern->patternbuf[0], ptrn, ptrn_len); + qdf_mem_copy(&bitmap_pattern->bitmaskbuf[0], mask, mask_len); + + bitmap_pattern->pattern_offset = ptrn_offset; + bitmap_pattern->pattern_len = ptrn_len; + + if (bitmap_pattern->pattern_len > WOW_DEFAULT_BITMAP_PATTERN_SIZE) + bitmap_pattern->pattern_len = WOW_DEFAULT_BITMAP_PATTERN_SIZE; + + if (bitmap_pattern->pattern_len > WOW_DEFAULT_BITMASK_SIZE) + bitmap_pattern->pattern_len = WOW_DEFAULT_BITMASK_SIZE; + + bitmap_pattern->bitmask_len = bitmap_pattern->pattern_len; + bitmap_pattern->pattern_id = ptrn_id; + + WMI_LOGD("vdev: %d, ptrn id: %d, ptrn len: %d, ptrn offset: %d user %d", + cmd->vdev_id, cmd->pattern_id, bitmap_pattern->pattern_len, + bitmap_pattern->pattern_offset, user); + WMI_LOGD("Pattern : "); + QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_DEBUG, + &bitmap_pattern->patternbuf[0], + bitmap_pattern->pattern_len); + + WMI_LOGD("Mask : "); + QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_DEBUG, + &bitmap_pattern->bitmaskbuf[0], + bitmap_pattern->pattern_len); + + buf_ptr += sizeof(WOW_BITMAP_PATTERN_T); + + /* Fill TLV for WMITLV_TAG_STRUC_WOW_IPV4_SYNC_PATTERN_T but no data. */ + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, 0); + buf_ptr += WMI_TLV_HDR_SIZE; + + /* Fill TLV for WMITLV_TAG_STRUC_WOW_IPV6_SYNC_PATTERN_T but no data. */ + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, 0); + buf_ptr += WMI_TLV_HDR_SIZE; + + /* Fill TLV for WMITLV_TAG_STRUC_WOW_MAGIC_PATTERN_CMD but no data. */ + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, 0); + buf_ptr += WMI_TLV_HDR_SIZE; + + /* Fill TLV for pattern_info_timeout but no data. */ + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_UINT32, 0); + buf_ptr += WMI_TLV_HDR_SIZE; + + /* Fill TLV for ratelimit_interval with dummy data as this fix elem */ + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_UINT32, 1 * sizeof(uint32_t)); + buf_ptr += WMI_TLV_HDR_SIZE; + *(uint32_t *) buf_ptr = 0; + + wmi_mtrace(WMI_WOW_ADD_WAKE_PATTERN_CMDID, cmd->vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_WOW_ADD_WAKE_PATTERN_CMDID); + if (ret) { + WMI_LOGE("%s: Failed to send wow ptrn to fw", __func__); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * fill_arp_offload_params_tlv() - Fill ARP offload data + * @wmi_handle: wmi handle + * @offload_req: offload request + * @buf_ptr: buffer pointer + * + * To fill ARP offload data to firmware + * when target goes to wow mode. + * + * Return: None + */ +static void fill_arp_offload_params_tlv(wmi_unified_t wmi_handle, + struct pmo_arp_offload_params *offload_req, uint8_t **buf_ptr) +{ + + int i; + WMI_ARP_OFFLOAD_TUPLE *arp_tuple; + bool enable_or_disable = offload_req->enable; + + WMITLV_SET_HDR(*buf_ptr, WMITLV_TAG_ARRAY_STRUC, + (WMI_MAX_ARP_OFFLOADS*sizeof(WMI_ARP_OFFLOAD_TUPLE))); + *buf_ptr += WMI_TLV_HDR_SIZE; + for (i = 0; i < WMI_MAX_ARP_OFFLOADS; i++) { + arp_tuple = (WMI_ARP_OFFLOAD_TUPLE *)*buf_ptr; + WMITLV_SET_HDR(&arp_tuple->tlv_header, + WMITLV_TAG_STRUC_WMI_ARP_OFFLOAD_TUPLE, + WMITLV_GET_STRUCT_TLVLEN(WMI_ARP_OFFLOAD_TUPLE)); + + /* Fill data for ARP and NS in the first tupple for LA */ + if ((enable_or_disable & PMO_OFFLOAD_ENABLE) && (i == 0)) { + /* Copy the target ip addr and flags */ + arp_tuple->flags = WMI_ARPOFF_FLAGS_VALID; + qdf_mem_copy(&arp_tuple->target_ipaddr, + offload_req->host_ipv4_addr, + WMI_IPV4_ADDR_LEN); + WMI_LOGD("ARPOffload IP4 address: %pI4", + offload_req->host_ipv4_addr); + } + *buf_ptr += sizeof(WMI_ARP_OFFLOAD_TUPLE); + } +} + +#ifdef WLAN_NS_OFFLOAD +/** + * fill_ns_offload_params_tlv() - Fill NS offload data + * @wmi|_handle: wmi handle + * @offload_req: offload request + * @buf_ptr: buffer pointer + * + * To fill NS offload data to firmware + * when target goes to wow mode. + * + * Return: None + */ +static void fill_ns_offload_params_tlv(wmi_unified_t wmi_handle, + struct pmo_ns_offload_params *ns_req, uint8_t **buf_ptr) +{ + + int i; + WMI_NS_OFFLOAD_TUPLE *ns_tuple; + + WMITLV_SET_HDR(*buf_ptr, WMITLV_TAG_ARRAY_STRUC, + (WMI_MAX_NS_OFFLOADS * sizeof(WMI_NS_OFFLOAD_TUPLE))); + *buf_ptr += WMI_TLV_HDR_SIZE; + for (i = 0; i < WMI_MAX_NS_OFFLOADS; i++) { + ns_tuple = (WMI_NS_OFFLOAD_TUPLE *)*buf_ptr; + WMITLV_SET_HDR(&ns_tuple->tlv_header, + WMITLV_TAG_STRUC_WMI_NS_OFFLOAD_TUPLE, + (sizeof(WMI_NS_OFFLOAD_TUPLE) - WMI_TLV_HDR_SIZE)); + + /* + * Fill data only for NS offload in the first ARP tuple for LA + */ + if ((ns_req->enable & PMO_OFFLOAD_ENABLE)) { + ns_tuple->flags |= WMI_NSOFF_FLAGS_VALID; + /* Copy the target/solicitation/remote ip addr */ + if (ns_req->target_ipv6_addr_valid[i]) + qdf_mem_copy(&ns_tuple->target_ipaddr[0], + &ns_req->target_ipv6_addr[i], + sizeof(WMI_IPV6_ADDR)); + qdf_mem_copy(&ns_tuple->solicitation_ipaddr, + &ns_req->self_ipv6_addr[i], + sizeof(WMI_IPV6_ADDR)); + if (ns_req->target_ipv6_addr_ac_type[i]) { + ns_tuple->flags |= + WMI_NSOFF_FLAGS_IS_IPV6_ANYCAST; + } + WMI_LOGD("Index %d NS solicitedIp %pI6, targetIp %pI6", + i, &ns_req->self_ipv6_addr[i], + &ns_req->target_ipv6_addr[i]); + + /* target MAC is optional, check if it is valid, + * if this is not valid, the target will use the known + * local MAC address rather than the tuple + */ + WMI_CHAR_ARRAY_TO_MAC_ADDR( + ns_req->self_macaddr.bytes, + &ns_tuple->target_mac); + if ((ns_tuple->target_mac.mac_addr31to0 != 0) || + (ns_tuple->target_mac.mac_addr47to32 != 0)) { + ns_tuple->flags |= WMI_NSOFF_FLAGS_MAC_VALID; + } + } + *buf_ptr += sizeof(WMI_NS_OFFLOAD_TUPLE); + } +} + + +/** + * fill_nsoffload_ext_tlv() - Fill NS offload ext data + * @wmi: wmi handle + * @offload_req: offload request + * @buf_ptr: buffer pointer + * + * To fill extended NS offload extended data to firmware + * when target goes to wow mode. + * + * Return: None + */ +static void fill_nsoffload_ext_tlv(wmi_unified_t wmi_handle, + struct pmo_ns_offload_params *ns_req, uint8_t **buf_ptr) +{ + int i; + WMI_NS_OFFLOAD_TUPLE *ns_tuple; + uint32_t count, num_ns_ext_tuples; + + count = ns_req->num_ns_offload_count; + num_ns_ext_tuples = ns_req->num_ns_offload_count - + WMI_MAX_NS_OFFLOADS; + + /* Populate extended NS offload tuples */ + WMITLV_SET_HDR(*buf_ptr, WMITLV_TAG_ARRAY_STRUC, + (num_ns_ext_tuples * sizeof(WMI_NS_OFFLOAD_TUPLE))); + *buf_ptr += WMI_TLV_HDR_SIZE; + for (i = WMI_MAX_NS_OFFLOADS; i < count; i++) { + ns_tuple = (WMI_NS_OFFLOAD_TUPLE *)*buf_ptr; + WMITLV_SET_HDR(&ns_tuple->tlv_header, + WMITLV_TAG_STRUC_WMI_NS_OFFLOAD_TUPLE, + (sizeof(WMI_NS_OFFLOAD_TUPLE)-WMI_TLV_HDR_SIZE)); + + /* + * Fill data only for NS offload in the first ARP tuple for LA + */ + if ((ns_req->enable & PMO_OFFLOAD_ENABLE)) { + ns_tuple->flags |= WMI_NSOFF_FLAGS_VALID; + /* Copy the target/solicitation/remote ip addr */ + if (ns_req->target_ipv6_addr_valid[i]) + qdf_mem_copy(&ns_tuple->target_ipaddr[0], + &ns_req->target_ipv6_addr[i], + sizeof(WMI_IPV6_ADDR)); + qdf_mem_copy(&ns_tuple->solicitation_ipaddr, + &ns_req->self_ipv6_addr[i], + sizeof(WMI_IPV6_ADDR)); + if (ns_req->target_ipv6_addr_ac_type[i]) { + ns_tuple->flags |= + WMI_NSOFF_FLAGS_IS_IPV6_ANYCAST; + } + WMI_LOGD("Index %d NS solicitedIp %pI6, targetIp %pI6", + i, &ns_req->self_ipv6_addr[i], + &ns_req->target_ipv6_addr[i]); + + /* target MAC is optional, check if it is valid, + * if this is not valid, the target will use the + * known local MAC address rather than the tuple + */ + WMI_CHAR_ARRAY_TO_MAC_ADDR( + ns_req->self_macaddr.bytes, + &ns_tuple->target_mac); + if ((ns_tuple->target_mac.mac_addr31to0 != 0) || + (ns_tuple->target_mac.mac_addr47to32 != 0)) { + ns_tuple->flags |= WMI_NSOFF_FLAGS_MAC_VALID; + } + } + *buf_ptr += sizeof(WMI_NS_OFFLOAD_TUPLE); + } +} +#else +static void fill_ns_offload_params_tlv(wmi_unified_t wmi_handle, + struct pmo_ns_offload_params *ns_req, uint8_t **buf_ptr) +{ +} + +static void fill_nsoffload_ext_tlv(wmi_unified_t wmi_handle, + struct pmo_ns_offload_params *ns_req, uint8_t **buf_ptr) +{ +} +#endif + +/** + * send_enable_arp_ns_offload_cmd_tlv() - enable ARP NS offload + * @wma: wmi handle + * @arp_offload_req: arp offload request + * @ns_offload_req: ns offload request + * @arp_only: flag + * + * To configure ARP NS off load data to firmware + * when target goes to wow mode. + * + * Return: QDF Status + */ +static QDF_STATUS send_enable_arp_ns_offload_cmd_tlv(wmi_unified_t wmi_handle, + struct pmo_arp_offload_params *arp_offload_req, + struct pmo_ns_offload_params *ns_offload_req, + uint8_t vdev_id) +{ + int32_t res; + WMI_SET_ARP_NS_OFFLOAD_CMD_fixed_param *cmd; + uint8_t *buf_ptr; + wmi_buf_t buf; + int32_t len; + uint32_t count = 0, num_ns_ext_tuples = 0; + + count = ns_offload_req->num_ns_offload_count; + + /* + * TLV place holder size for array of NS tuples + * TLV place holder size for array of ARP tuples + */ + len = sizeof(WMI_SET_ARP_NS_OFFLOAD_CMD_fixed_param) + + WMI_TLV_HDR_SIZE + + WMI_MAX_NS_OFFLOADS * sizeof(WMI_NS_OFFLOAD_TUPLE) + + WMI_TLV_HDR_SIZE + + WMI_MAX_ARP_OFFLOADS * sizeof(WMI_ARP_OFFLOAD_TUPLE); + + /* + * If there are more than WMI_MAX_NS_OFFLOADS addresses then allocate + * extra length for extended NS offload tuples which follows ARP offload + * tuples. Host needs to fill this structure in following format: + * 2 NS ofload tuples + * 2 ARP offload tuples + * N numbers of extended NS offload tuples if HDD has given more than + * 2 NS offload addresses + */ + if (count > WMI_MAX_NS_OFFLOADS) { + num_ns_ext_tuples = count - WMI_MAX_NS_OFFLOADS; + len += WMI_TLV_HDR_SIZE + num_ns_ext_tuples + * sizeof(WMI_NS_OFFLOAD_TUPLE); + } + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s: wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_NOMEM; + } + + buf_ptr = (uint8_t *) wmi_buf_data(buf); + cmd = (WMI_SET_ARP_NS_OFFLOAD_CMD_fixed_param *) buf_ptr; + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_WMI_SET_ARP_NS_OFFLOAD_CMD_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (WMI_SET_ARP_NS_OFFLOAD_CMD_fixed_param)); + cmd->flags = 0; + cmd->vdev_id = vdev_id; + cmd->num_ns_ext_tuples = num_ns_ext_tuples; + + WMI_LOGD("ARP NS Offload vdev_id: %d", cmd->vdev_id); + + buf_ptr += sizeof(WMI_SET_ARP_NS_OFFLOAD_CMD_fixed_param); + fill_ns_offload_params_tlv(wmi_handle, ns_offload_req, &buf_ptr); + fill_arp_offload_params_tlv(wmi_handle, arp_offload_req, &buf_ptr); + if (num_ns_ext_tuples) + fill_nsoffload_ext_tlv(wmi_handle, ns_offload_req, &buf_ptr); + + wmi_mtrace(WMI_SET_ARP_NS_OFFLOAD_CMDID, cmd->vdev_id, 0); + res = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_SET_ARP_NS_OFFLOAD_CMDID); + if (res) { + WMI_LOGE("Failed to enable ARP NDP/NSffload"); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * send_enable_enhance_multicast_offload_tlv() - send enhance multicast offload + * @wmi_handle: wmi handle + * @vdev_id: vdev id + * @action: true for enable else false + * + * To enable enhance multicast offload to firmware + * when target goes to wow mode. + * + * Return: QDF Status + */ + +static +QDF_STATUS send_enable_enhance_multicast_offload_tlv( + wmi_unified_t wmi_handle, + uint8_t vdev_id, bool action) +{ + QDF_STATUS status; + wmi_buf_t buf; + wmi_config_enhanced_mcast_filter_cmd_fixed_param *cmd; + + buf = wmi_buf_alloc(wmi_handle, sizeof(*cmd)); + if (!buf) { + WMI_LOGE("Failed to allocate buffer to send set key cmd"); + return QDF_STATUS_E_NOMEM; + } + + cmd = (wmi_config_enhanced_mcast_filter_cmd_fixed_param *) + wmi_buf_data(buf); + + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_config_enhanced_mcast_filter_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_config_enhanced_mcast_filter_cmd_fixed_param)); + + cmd->vdev_id = vdev_id; + cmd->enable = ((action == 0) ? ENHANCED_MCAST_FILTER_DISABLED : + ENHANCED_MCAST_FILTER_ENABLED); + WMI_LOGD("%s: config enhance multicast offload action %d for vdev %d", + __func__, action, vdev_id); + wmi_mtrace(WMI_CONFIG_ENHANCED_MCAST_FILTER_CMDID, cmd->vdev_id, 0); + status = wmi_unified_cmd_send(wmi_handle, buf, + sizeof(*cmd), WMI_CONFIG_ENHANCED_MCAST_FILTER_CMDID); + if (status != QDF_STATUS_SUCCESS) { + wmi_buf_free(buf); + WMI_LOGE("%s:Failed to send ENHANCED_MCAST_FILTER_CMDID", + __func__); + } + + return status; +} + +/** + * extract_gtk_rsp_event_tlv() - extract gtk rsp params from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param hdr: Pointer to hold header + * @param bufp: Pointer to hold pointer to rx param buffer + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS extract_gtk_rsp_event_tlv(wmi_unified_t wmi_handle, + void *evt_buf, struct pmo_gtk_rsp_params *gtk_rsp_param, uint32_t len) +{ + WMI_GTK_OFFLOAD_STATUS_EVENT_fixed_param *fixed_param; + WMI_GTK_OFFLOAD_STATUS_EVENTID_param_tlvs *param_buf; + + param_buf = (WMI_GTK_OFFLOAD_STATUS_EVENTID_param_tlvs *)evt_buf; + if (!param_buf) { + WMI_LOGE("gtk param_buf is NULL"); + return QDF_STATUS_E_INVAL; + } + + if (len < sizeof(WMI_GTK_OFFLOAD_STATUS_EVENT_fixed_param)) { + WMI_LOGE("Invalid length for GTK status"); + return QDF_STATUS_E_INVAL; + } + + fixed_param = (WMI_GTK_OFFLOAD_STATUS_EVENT_fixed_param *) + param_buf->fixed_param; + + if (fixed_param->vdev_id >= WLAN_UMAC_PSOC_MAX_VDEVS) { + wmi_err_rl("Invalid vdev_id %u", fixed_param->vdev_id); + return QDF_STATUS_E_INVAL; + } + + gtk_rsp_param->vdev_id = fixed_param->vdev_id; + gtk_rsp_param->status_flag = QDF_STATUS_SUCCESS; + gtk_rsp_param->refresh_cnt = fixed_param->refresh_cnt; + qdf_mem_copy(>k_rsp_param->replay_counter, + &fixed_param->replay_counter, + GTK_REPLAY_COUNTER_BYTES); + + return QDF_STATUS_SUCCESS; + +} + +#ifdef FEATURE_WLAN_RA_FILTERING +/** + * send_wow_sta_ra_filter_cmd_tlv() - set RA filter pattern in fw + * @wmi_handle: wmi handle + * @vdev_id: vdev id + * + * Return: CDF status + */ +static QDF_STATUS send_wow_sta_ra_filter_cmd_tlv(wmi_unified_t wmi_handle, + uint8_t vdev_id, uint8_t default_pattern, + uint16_t rate_limit_interval) +{ + + WMI_WOW_ADD_PATTERN_CMD_fixed_param *cmd; + wmi_buf_t buf; + uint8_t *buf_ptr; + int32_t len; + int ret; + + len = sizeof(WMI_WOW_ADD_PATTERN_CMD_fixed_param) + + WMI_TLV_HDR_SIZE + + 0 * sizeof(WOW_BITMAP_PATTERN_T) + + WMI_TLV_HDR_SIZE + + 0 * sizeof(WOW_IPV4_SYNC_PATTERN_T) + + WMI_TLV_HDR_SIZE + + 0 * sizeof(WOW_IPV6_SYNC_PATTERN_T) + + WMI_TLV_HDR_SIZE + + 0 * sizeof(WOW_MAGIC_PATTERN_CMD) + + WMI_TLV_HDR_SIZE + + 0 * sizeof(uint32_t) + WMI_TLV_HDR_SIZE + 1 * sizeof(uint32_t); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s: Failed allocate wmi buffer", __func__); + return QDF_STATUS_E_NOMEM; + } + + cmd = (WMI_WOW_ADD_PATTERN_CMD_fixed_param *) wmi_buf_data(buf); + buf_ptr = (uint8_t *) cmd; + + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_WMI_WOW_ADD_PATTERN_CMD_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (WMI_WOW_ADD_PATTERN_CMD_fixed_param)); + cmd->vdev_id = vdev_id; + cmd->pattern_id = default_pattern, + cmd->pattern_type = WOW_IPV6_RA_PATTERN; + buf_ptr += sizeof(WMI_WOW_ADD_PATTERN_CMD_fixed_param); + + /* Fill TLV for WMITLV_TAG_STRUC_WOW_BITMAP_PATTERN_T but no data. */ + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, 0); + buf_ptr += WMI_TLV_HDR_SIZE; + + /* Fill TLV for WMITLV_TAG_STRUC_WOW_IPV4_SYNC_PATTERN_T but no data. */ + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, 0); + buf_ptr += WMI_TLV_HDR_SIZE; + + /* Fill TLV for WMITLV_TAG_STRUC_WOW_IPV6_SYNC_PATTERN_T but no data. */ + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, 0); + buf_ptr += WMI_TLV_HDR_SIZE; + + /* Fill TLV for WMITLV_TAG_STRUC_WOW_MAGIC_PATTERN_CMD but no data. */ + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, 0); + buf_ptr += WMI_TLV_HDR_SIZE; + + /* Fill TLV for pattern_info_timeout but no data. */ + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_UINT32, 0); + buf_ptr += WMI_TLV_HDR_SIZE; + + /* Fill TLV for ra_ratelimit_interval. */ + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_UINT32, sizeof(uint32_t)); + buf_ptr += WMI_TLV_HDR_SIZE; + + *((uint32_t *) buf_ptr) = rate_limit_interval; + + WMI_LOGD("%s: send RA rate limit [%d] to fw vdev = %d", __func__, + rate_limit_interval, vdev_id); + + wmi_mtrace(WMI_WOW_ADD_WAKE_PATTERN_CMDID, cmd->vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_WOW_ADD_WAKE_PATTERN_CMDID); + if (ret) { + WMI_LOGE("%s: Failed to send RA rate limit to fw", __func__); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; + +} +#endif /* FEATURE_WLAN_RA_FILTERING */ + +/** + * send_add_clear_mcbc_filter_cmd_tlv() - set mcast filter command to fw + * @wmi_handle: wmi handle + * @vdev_id: vdev id + * @multicastAddr: mcast address + * @clearList: clear list flag + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS send_add_clear_mcbc_filter_cmd_tlv(wmi_unified_t wmi_handle, + uint8_t vdev_id, + struct qdf_mac_addr multicast_addr, + bool clearList) +{ + WMI_SET_MCASTBCAST_FILTER_CMD_fixed_param *cmd; + wmi_buf_t buf; + int err; + + buf = wmi_buf_alloc(wmi_handle, sizeof(*cmd)); + if (!buf) { + WMI_LOGE("Failed to allocate buffer to send set_param cmd"); + return QDF_STATUS_E_NOMEM; + } + + cmd = (WMI_SET_MCASTBCAST_FILTER_CMD_fixed_param *) wmi_buf_data(buf); + qdf_mem_zero(cmd, sizeof(*cmd)); + + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_WMI_SET_MCASTBCAST_FILTER_CMD_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (WMI_SET_MCASTBCAST_FILTER_CMD_fixed_param)); + cmd->action = + (clearList ? WMI_MCAST_FILTER_DELETE : WMI_MCAST_FILTER_SET); + cmd->vdev_id = vdev_id; + WMI_CHAR_ARRAY_TO_MAC_ADDR(multicast_addr.bytes, &cmd->mcastbdcastaddr); + + WMI_LOGD("Action:%d; vdev_id:%d; clearList:%d; MCBC MAC Addr: %pM", + cmd->action, vdev_id, clearList, multicast_addr.bytes); + + wmi_mtrace(WMI_SET_MCASTBCAST_FILTER_CMDID, cmd->vdev_id, 0); + err = wmi_unified_cmd_send(wmi_handle, buf, + sizeof(*cmd), + WMI_SET_MCASTBCAST_FILTER_CMDID); + if (err) { + WMI_LOGE("Failed to send set_param cmd"); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * send_multiple_add_clear_mcbc_filter_cmd_tlv() - send multiple mcast filter + * command to fw + * @wmi_handle: wmi handle + * @vdev_id: vdev id + * @mcast_filter_params: mcast filter params + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS send_multiple_add_clear_mcbc_filter_cmd_tlv( + wmi_unified_t wmi_handle, + uint8_t vdev_id, + struct pmo_mcast_filter_params *filter_param) + +{ + WMI_SET_MULTIPLE_MCAST_FILTER_CMD_fixed_param *cmd; + uint8_t *buf_ptr; + wmi_buf_t buf; + int err; + int i; + uint8_t *mac_addr_src_ptr = NULL; + wmi_mac_addr *mac_addr_dst_ptr; + uint32_t len = sizeof(*cmd) + WMI_TLV_HDR_SIZE + + sizeof(wmi_mac_addr) * filter_param->multicast_addr_cnt; + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("Failed to allocate memory"); + return QDF_STATUS_E_NOMEM; + } + + buf_ptr = (uint8_t *) wmi_buf_data(buf); + cmd = (WMI_SET_MULTIPLE_MCAST_FILTER_CMD_fixed_param *) + wmi_buf_data(buf); + qdf_mem_zero(cmd, sizeof(*cmd)); + + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_set_multiple_mcast_filter_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (WMI_SET_MULTIPLE_MCAST_FILTER_CMD_fixed_param)); + cmd->operation = + ((filter_param->action == 0) ? WMI_MULTIPLE_MCAST_FILTER_DELETE + : WMI_MULTIPLE_MCAST_FILTER_ADD); + cmd->vdev_id = vdev_id; + cmd->num_mcastaddrs = filter_param->multicast_addr_cnt; + + buf_ptr += sizeof(*cmd); + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_FIXED_STRUC, + sizeof(wmi_mac_addr) * + filter_param->multicast_addr_cnt); + + if (filter_param->multicast_addr_cnt == 0) + goto send_cmd; + + mac_addr_src_ptr = (uint8_t *)&filter_param->multicast_addr; + mac_addr_dst_ptr = (wmi_mac_addr *) + (buf_ptr + WMI_TLV_HDR_SIZE); + + for (i = 0; i < filter_param->multicast_addr_cnt; i++) { + WMI_CHAR_ARRAY_TO_MAC_ADDR(mac_addr_src_ptr, mac_addr_dst_ptr); + mac_addr_src_ptr += ATH_MAC_LEN; + mac_addr_dst_ptr++; + } + +send_cmd: + wmi_mtrace(WMI_SET_MULTIPLE_MCAST_FILTER_CMDID, cmd->vdev_id, 0); + err = wmi_unified_cmd_send(wmi_handle, buf, + len, + WMI_SET_MULTIPLE_MCAST_FILTER_CMDID); + if (err) { + WMI_LOGE("Failed to send set_param cmd"); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +static void +fill_fils_tlv_params(WMI_GTK_OFFLOAD_CMD_fixed_param *cmd, + uint8_t vdev_id, + struct pmo_gtk_req *params) +{ + uint8_t *buf_ptr; + wmi_gtk_offload_fils_tlv_param *ext_param; + + buf_ptr = (uint8_t *) cmd + sizeof(*cmd); + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, + sizeof(*ext_param)); + buf_ptr += WMI_TLV_HDR_SIZE; + + ext_param = (wmi_gtk_offload_fils_tlv_param *)buf_ptr; + WMITLV_SET_HDR(&ext_param->tlv_header, + WMITLV_TAG_STRUC_wmi_gtk_offload_extended_tlv_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_gtk_offload_fils_tlv_param)); + ext_param->vdev_id = vdev_id; + ext_param->flags = cmd->flags; + ext_param->kek_len = params->kek_len; + qdf_mem_copy(ext_param->KEK, params->kek, params->kek_len); + qdf_mem_copy(ext_param->KCK, params->kck, + WMI_GTK_OFFLOAD_KCK_BYTES); + qdf_mem_copy(ext_param->replay_counter, ¶ms->replay_counter, + GTK_REPLAY_COUNTER_BYTES); +} + +/** + * send_gtk_offload_cmd_tlv() - send GTK offload command to fw + * @wmi_handle: wmi handle + * @vdev_id: vdev id + * @params: GTK offload parameters + * + * Return: CDF status + */ +static +QDF_STATUS send_gtk_offload_cmd_tlv(wmi_unified_t wmi_handle, uint8_t vdev_id, + struct pmo_gtk_req *params, + bool enable_offload, + uint32_t gtk_offload_opcode) +{ + int len; + wmi_buf_t buf; + WMI_GTK_OFFLOAD_CMD_fixed_param *cmd; + QDF_STATUS status = QDF_STATUS_SUCCESS; + + WMI_LOGD("%s Enter", __func__); + + len = sizeof(*cmd); + + if (params->is_fils_connection) + len += WMI_TLV_HDR_SIZE + + sizeof(wmi_gtk_offload_fils_tlv_param); + + /* alloc wmi buffer */ + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("wmi_buf_alloc failed for WMI_GTK_OFFLOAD_CMD"); + status = QDF_STATUS_E_NOMEM; + goto out; + } + + cmd = (WMI_GTK_OFFLOAD_CMD_fixed_param *) wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_WMI_GTK_OFFLOAD_CMD_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (WMI_GTK_OFFLOAD_CMD_fixed_param)); + + cmd->vdev_id = vdev_id; + + /* Request target to enable GTK offload */ + if (enable_offload == PMO_GTK_OFFLOAD_ENABLE) { + cmd->flags = gtk_offload_opcode; + + /* Copy the keys and replay counter */ + qdf_mem_copy(cmd->KCK, params->kck, PMO_KCK_LEN); + qdf_mem_copy(cmd->KEK, params->kek, PMO_KEK_LEN_LEGACY); + qdf_mem_copy(cmd->replay_counter, ¶ms->replay_counter, + GTK_REPLAY_COUNTER_BYTES); + } else { + cmd->flags = gtk_offload_opcode; + } + if (params->is_fils_connection) + fill_fils_tlv_params(cmd, vdev_id, params); + + WMI_LOGD("VDEVID: %d, GTK_FLAGS: x%x kek len %d", vdev_id, cmd->flags, params->kek_len); + /* send the wmi command */ + wmi_mtrace(WMI_GTK_OFFLOAD_CMDID, cmd->vdev_id, 0); + if (wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_GTK_OFFLOAD_CMDID)) { + WMI_LOGE("Failed to send WMI_GTK_OFFLOAD_CMDID"); + wmi_buf_free(buf); + status = QDF_STATUS_E_FAILURE; + } + +out: + WMI_LOGD("%s Exit", __func__); + return status; +} + +/** + * send_process_gtk_offload_getinfo_cmd_tlv() - send GTK offload cmd to fw + * @wmi_handle: wmi handle + * @params: GTK offload params + * + * Return: CDF status + */ +static QDF_STATUS send_process_gtk_offload_getinfo_cmd_tlv( + wmi_unified_t wmi_handle, + uint8_t vdev_id, + uint64_t offload_req_opcode) +{ + int len; + wmi_buf_t buf; + WMI_GTK_OFFLOAD_CMD_fixed_param *cmd; + QDF_STATUS status = QDF_STATUS_SUCCESS; + + len = sizeof(*cmd); + + /* alloc wmi buffer */ + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("wmi_buf_alloc failed for WMI_GTK_OFFLOAD_CMD"); + status = QDF_STATUS_E_NOMEM; + goto out; + } + + cmd = (WMI_GTK_OFFLOAD_CMD_fixed_param *) wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_WMI_GTK_OFFLOAD_CMD_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (WMI_GTK_OFFLOAD_CMD_fixed_param)); + + /* Request for GTK offload status */ + cmd->flags = offload_req_opcode; + cmd->vdev_id = vdev_id; + + /* send the wmi command */ + wmi_mtrace(WMI_GTK_OFFLOAD_CMDID, cmd->vdev_id, 0); + if (wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_GTK_OFFLOAD_CMDID)) { + WMI_LOGE("Failed to send WMI_GTK_OFFLOAD_CMDID for req info"); + wmi_buf_free(buf); + status = QDF_STATUS_E_FAILURE; + } + +out: + return status; +} + +/** + * send_action_frame_patterns_cmd_tlv() - send wmi cmd of action filter params + * @wmi_handle: wmi handler + * @action_params: pointer to action_params + * + * Return: 0 for success, otherwise appropriate error code + */ +static QDF_STATUS send_action_frame_patterns_cmd_tlv(wmi_unified_t wmi_handle, + struct pmo_action_wakeup_set_params *action_params) +{ + WMI_WOW_SET_ACTION_WAKE_UP_CMD_fixed_param *cmd; + wmi_buf_t buf; + int i; + int32_t err; + uint32_t len = 0, *cmd_args; + uint8_t *buf_ptr; + + len = (PMO_SUPPORTED_ACTION_CATE * sizeof(uint32_t)) + + WMI_TLV_HDR_SIZE + sizeof(*cmd); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("Failed to allocate buffer to send action filter cmd"); + return QDF_STATUS_E_NOMEM; + } + cmd = (WMI_WOW_SET_ACTION_WAKE_UP_CMD_fixed_param *) wmi_buf_data(buf); + buf_ptr = (uint8_t *)cmd; + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_wow_set_action_wake_up_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + WMI_WOW_SET_ACTION_WAKE_UP_CMD_fixed_param)); + + cmd->vdev_id = action_params->vdev_id; + cmd->operation = action_params->operation; + + for (i = 0; i < MAX_SUPPORTED_ACTION_CATEGORY_ELE_LIST; i++) + cmd->action_category_map[i] = + action_params->action_category_map[i]; + + buf_ptr += sizeof(WMI_WOW_SET_ACTION_WAKE_UP_CMD_fixed_param); + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_UINT32, + (PMO_SUPPORTED_ACTION_CATE * sizeof(uint32_t))); + buf_ptr += WMI_TLV_HDR_SIZE; + cmd_args = (uint32_t *) buf_ptr; + for (i = 0; i < PMO_SUPPORTED_ACTION_CATE; i++) + cmd_args[i] = action_params->action_per_category[i]; + + wmi_mtrace(WMI_WOW_SET_ACTION_WAKE_UP_CMDID, cmd->vdev_id, 0); + err = wmi_unified_cmd_send(wmi_handle, buf, + len, WMI_WOW_SET_ACTION_WAKE_UP_CMDID); + if (err) { + WMI_LOGE("Failed to send ap_ps_egap cmd"); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +#ifdef FEATURE_WLAN_LPHB + +/** + * send_lphb_config_hbenable_cmd_tlv() - enable command of LPHB configuration + * @wmi_handle: wmi handle + * @lphb_conf_req: configuration info + * + * Return: CDF status + */ +static QDF_STATUS send_lphb_config_hbenable_cmd_tlv(wmi_unified_t wmi_handle, + wmi_hb_set_enable_cmd_fixed_param *params) +{ + QDF_STATUS status; + wmi_buf_t buf = NULL; + uint8_t *buf_ptr; + wmi_hb_set_enable_cmd_fixed_param *hb_enable_fp; + int len = sizeof(wmi_hb_set_enable_cmd_fixed_param); + + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s : wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_NOMEM; + } + + buf_ptr = (uint8_t *) wmi_buf_data(buf); + hb_enable_fp = (wmi_hb_set_enable_cmd_fixed_param *) buf_ptr; + WMITLV_SET_HDR(&hb_enable_fp->tlv_header, + WMITLV_TAG_STRUC_wmi_hb_set_enable_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_hb_set_enable_cmd_fixed_param)); + + /* fill in values */ + hb_enable_fp->vdev_id = params->session; + hb_enable_fp->enable = params->enable; + hb_enable_fp->item = params->item; + hb_enable_fp->session = params->session; + + wmi_mtrace(WMI_HB_SET_ENABLE_CMDID, NO_SESSION, 0); + status = wmi_unified_cmd_send(wmi_handle, buf, + len, WMI_HB_SET_ENABLE_CMDID); + if (QDF_IS_STATUS_ERROR(status)) { + WMI_LOGE("cmd_send WMI_HB_SET_ENABLE returned Error %d", + status); + wmi_buf_free(buf); + } + + return status; +} + +/** + * send_lphb_config_tcp_params_cmd_tlv() - set tcp params of LPHB configuration + * @wmi_handle: wmi handle + * @lphb_conf_req: lphb config request + * + * Return: CDF status + */ +static QDF_STATUS send_lphb_config_tcp_params_cmd_tlv(wmi_unified_t wmi_handle, + wmi_hb_set_tcp_params_cmd_fixed_param *lphb_conf_req) +{ + QDF_STATUS status; + wmi_buf_t buf = NULL; + uint8_t *buf_ptr; + wmi_hb_set_tcp_params_cmd_fixed_param *hb_tcp_params_fp; + int len = sizeof(wmi_hb_set_tcp_params_cmd_fixed_param); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s : wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_NOMEM; + } + + buf_ptr = (uint8_t *) wmi_buf_data(buf); + hb_tcp_params_fp = (wmi_hb_set_tcp_params_cmd_fixed_param *) buf_ptr; + WMITLV_SET_HDR(&hb_tcp_params_fp->tlv_header, + WMITLV_TAG_STRUC_wmi_hb_set_tcp_params_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_hb_set_tcp_params_cmd_fixed_param)); + + /* fill in values */ + hb_tcp_params_fp->vdev_id = lphb_conf_req->vdev_id; + hb_tcp_params_fp->srv_ip = lphb_conf_req->srv_ip; + hb_tcp_params_fp->dev_ip = lphb_conf_req->dev_ip; + hb_tcp_params_fp->seq = lphb_conf_req->seq; + hb_tcp_params_fp->src_port = lphb_conf_req->src_port; + hb_tcp_params_fp->dst_port = lphb_conf_req->dst_port; + hb_tcp_params_fp->interval = lphb_conf_req->interval; + hb_tcp_params_fp->timeout = lphb_conf_req->timeout; + hb_tcp_params_fp->session = lphb_conf_req->session; + qdf_mem_copy(&hb_tcp_params_fp->gateway_mac, + &lphb_conf_req->gateway_mac, + sizeof(hb_tcp_params_fp->gateway_mac)); + + wmi_mtrace(WMI_HB_SET_TCP_PARAMS_CMDID, NO_SESSION, 0); + status = wmi_unified_cmd_send(wmi_handle, buf, + len, WMI_HB_SET_TCP_PARAMS_CMDID); + if (QDF_IS_STATUS_ERROR(status)) { + WMI_LOGE("cmd_send WMI_HB_SET_TCP_PARAMS returned Error %d", + status); + wmi_buf_free(buf); + } + + return status; +} + +/** + * send_lphb_config_tcp_pkt_filter_cmd_tlv() - configure tcp packet filter cmd + * @wmi_handle: wmi handle + * @lphb_conf_req: lphb config request + * + * Return: CDF status + */ +static +QDF_STATUS send_lphb_config_tcp_pkt_filter_cmd_tlv(wmi_unified_t wmi_handle, + wmi_hb_set_tcp_pkt_filter_cmd_fixed_param *g_hb_tcp_filter_fp) +{ + QDF_STATUS status; + wmi_buf_t buf = NULL; + uint8_t *buf_ptr; + wmi_hb_set_tcp_pkt_filter_cmd_fixed_param *hb_tcp_filter_fp; + int len = sizeof(wmi_hb_set_tcp_pkt_filter_cmd_fixed_param); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s : wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_NOMEM; + } + + buf_ptr = (uint8_t *) wmi_buf_data(buf); + hb_tcp_filter_fp = + (wmi_hb_set_tcp_pkt_filter_cmd_fixed_param *) buf_ptr; + WMITLV_SET_HDR(&hb_tcp_filter_fp->tlv_header, + WMITLV_TAG_STRUC_wmi_hb_set_tcp_pkt_filter_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_hb_set_tcp_pkt_filter_cmd_fixed_param)); + + /* fill in values */ + hb_tcp_filter_fp->vdev_id = g_hb_tcp_filter_fp->vdev_id; + hb_tcp_filter_fp->length = g_hb_tcp_filter_fp->length; + hb_tcp_filter_fp->offset = g_hb_tcp_filter_fp->offset; + hb_tcp_filter_fp->session = g_hb_tcp_filter_fp->session; + memcpy((void *)&hb_tcp_filter_fp->filter, + (void *)&g_hb_tcp_filter_fp->filter, + WMI_WLAN_HB_MAX_FILTER_SIZE); + + wmi_mtrace(WMI_HB_SET_TCP_PKT_FILTER_CMDID, NO_SESSION, 0); + status = wmi_unified_cmd_send(wmi_handle, buf, + len, WMI_HB_SET_TCP_PKT_FILTER_CMDID); + if (QDF_IS_STATUS_ERROR(status)) { + WMI_LOGE("cmd_send WMI_HB_SET_TCP_PKT_FILTER returned Error %d", + status); + wmi_buf_free(buf); + } + + return status; +} + +/** + * send_lphb_config_udp_params_cmd_tlv() - configure udp param command of LPHB + * @wmi_handle: wmi handle + * @lphb_conf_req: lphb config request + * + * Return: CDF status + */ +static QDF_STATUS send_lphb_config_udp_params_cmd_tlv(wmi_unified_t wmi_handle, + wmi_hb_set_udp_params_cmd_fixed_param *lphb_conf_req) +{ + QDF_STATUS status; + wmi_buf_t buf = NULL; + uint8_t *buf_ptr; + wmi_hb_set_udp_params_cmd_fixed_param *hb_udp_params_fp; + int len = sizeof(wmi_hb_set_udp_params_cmd_fixed_param); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s : wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_NOMEM; + } + + buf_ptr = (uint8_t *) wmi_buf_data(buf); + hb_udp_params_fp = (wmi_hb_set_udp_params_cmd_fixed_param *) buf_ptr; + WMITLV_SET_HDR(&hb_udp_params_fp->tlv_header, + WMITLV_TAG_STRUC_wmi_hb_set_udp_params_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_hb_set_udp_params_cmd_fixed_param)); + + /* fill in values */ + hb_udp_params_fp->vdev_id = lphb_conf_req->vdev_id; + hb_udp_params_fp->srv_ip = lphb_conf_req->srv_ip; + hb_udp_params_fp->dev_ip = lphb_conf_req->dev_ip; + hb_udp_params_fp->src_port = lphb_conf_req->src_port; + hb_udp_params_fp->dst_port = lphb_conf_req->dst_port; + hb_udp_params_fp->interval = lphb_conf_req->interval; + hb_udp_params_fp->timeout = lphb_conf_req->timeout; + hb_udp_params_fp->session = lphb_conf_req->session; + qdf_mem_copy(&hb_udp_params_fp->gateway_mac, + &lphb_conf_req->gateway_mac, + sizeof(lphb_conf_req->gateway_mac)); + + wmi_mtrace(WMI_HB_SET_UDP_PARAMS_CMDID, NO_SESSION, 0); + status = wmi_unified_cmd_send(wmi_handle, buf, + len, WMI_HB_SET_UDP_PARAMS_CMDID); + if (QDF_IS_STATUS_ERROR(status)) { + WMI_LOGE("cmd_send WMI_HB_SET_UDP_PARAMS returned Error %d", + status); + wmi_buf_free(buf); + } + + return status; +} + +/** + * send_lphb_config_udp_pkt_filter_cmd_tlv() - configure udp pkt filter command + * @wmi_handle: wmi handle + * @lphb_conf_req: lphb config request + * + * Return: CDF status + */ +static +QDF_STATUS send_lphb_config_udp_pkt_filter_cmd_tlv(wmi_unified_t wmi_handle, + wmi_hb_set_udp_pkt_filter_cmd_fixed_param *lphb_conf_req) +{ + QDF_STATUS status; + wmi_buf_t buf = NULL; + uint8_t *buf_ptr; + wmi_hb_set_udp_pkt_filter_cmd_fixed_param *hb_udp_filter_fp; + int len = sizeof(wmi_hb_set_udp_pkt_filter_cmd_fixed_param); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s : wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_NOMEM; + } + + buf_ptr = (uint8_t *) wmi_buf_data(buf); + hb_udp_filter_fp = + (wmi_hb_set_udp_pkt_filter_cmd_fixed_param *) buf_ptr; + WMITLV_SET_HDR(&hb_udp_filter_fp->tlv_header, + WMITLV_TAG_STRUC_wmi_hb_set_udp_pkt_filter_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_hb_set_udp_pkt_filter_cmd_fixed_param)); + + /* fill in values */ + hb_udp_filter_fp->vdev_id = lphb_conf_req->vdev_id; + hb_udp_filter_fp->length = lphb_conf_req->length; + hb_udp_filter_fp->offset = lphb_conf_req->offset; + hb_udp_filter_fp->session = lphb_conf_req->session; + memcpy((void *)&hb_udp_filter_fp->filter, + (void *)&lphb_conf_req->filter, + WMI_WLAN_HB_MAX_FILTER_SIZE); + + wmi_mtrace(WMI_HB_SET_UDP_PKT_FILTER_CMDID, NO_SESSION, 0); + status = wmi_unified_cmd_send(wmi_handle, buf, + len, WMI_HB_SET_UDP_PKT_FILTER_CMDID); + if (QDF_IS_STATUS_ERROR(status)) { + WMI_LOGE("cmd_send WMI_HB_SET_UDP_PKT_FILTER returned Error %d", + status); + wmi_buf_free(buf); + } + + return status; +} +#endif /* FEATURE_WLAN_LPHB */ + +static QDF_STATUS send_conf_hw_filter_cmd_tlv(wmi_unified_t wmi, + struct pmo_hw_filter_params *req) +{ + QDF_STATUS status; + wmi_hw_data_filter_cmd_fixed_param *cmd; + wmi_buf_t wmi_buf; + + if (!req) { + WMI_LOGE("req is null"); + return QDF_STATUS_E_INVAL; + } + + wmi_buf = wmi_buf_alloc(wmi, sizeof(*cmd)); + if (!wmi_buf) { + WMI_LOGE(FL("Out of memory")); + return QDF_STATUS_E_NOMEM; + } + + cmd = (wmi_hw_data_filter_cmd_fixed_param *)wmi_buf_data(wmi_buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_hw_data_filter_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN(wmi_hw_data_filter_cmd_fixed_param)); + cmd->vdev_id = req->vdev_id; + cmd->enable = req->enable; + /* Set all modes in case of disable */ + if (!cmd->enable) + cmd->hw_filter_bitmap = ((uint32_t)~0U); + else + cmd->hw_filter_bitmap = req->mode_bitmap; + + WMI_LOGD("Send %s hw filter mode: 0x%X for vdev id %d", + req->enable ? "enable" : "disable", req->mode_bitmap, + req->vdev_id); + + wmi_mtrace(WMI_HW_DATA_FILTER_CMDID, cmd->vdev_id, 0); + status = wmi_unified_cmd_send(wmi, wmi_buf, sizeof(*cmd), + WMI_HW_DATA_FILTER_CMDID); + if (QDF_IS_STATUS_ERROR(status)) { + WMI_LOGE("Failed to configure hw filter"); + wmi_buf_free(wmi_buf); + } + + return status; +} + +/** + * send_enable_disable_packet_filter_cmd_tlv() - enable/disable packet filter + * @wmi_handle: wmi handle + * @vdev_id: vdev id + * @enable: Flag to enable/disable packet filter + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS send_enable_disable_packet_filter_cmd_tlv( + wmi_unified_t wmi_handle, uint8_t vdev_id, bool enable) +{ + int32_t len; + int ret = 0; + wmi_buf_t buf; + WMI_PACKET_FILTER_ENABLE_CMD_fixed_param *cmd; + + len = sizeof(WMI_PACKET_FILTER_ENABLE_CMD_fixed_param); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s: Failed allocate wmi buffer", __func__); + return QDF_STATUS_E_NOMEM; + } + + cmd = (WMI_PACKET_FILTER_ENABLE_CMD_fixed_param *) wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_packet_filter_enable_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + WMI_PACKET_FILTER_ENABLE_CMD_fixed_param)); + + cmd->vdev_id = vdev_id; + if (enable) + cmd->enable = PACKET_FILTER_SET_ENABLE; + else + cmd->enable = PACKET_FILTER_SET_DISABLE; + + WMI_LOGE("%s: Packet filter enable %d for vdev_id %d", + __func__, cmd->enable, vdev_id); + + wmi_mtrace(WMI_PACKET_FILTER_ENABLE_CMDID, cmd->vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_PACKET_FILTER_ENABLE_CMDID); + if (ret) { + WMI_LOGE("Failed to send packet filter wmi cmd to fw"); + wmi_buf_free(buf); + } + + return ret; +} + +/** + * send_config_packet_filter_cmd_tlv() - configure packet filter in target + * @wmi_handle: wmi handle + * @vdev_id: vdev id + * @rcv_filter_param: Packet filter parameters + * @filter_id: Filter id + * @enable: Flag to add/delete packet filter configuration + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS send_config_packet_filter_cmd_tlv(wmi_unified_t wmi_handle, + uint8_t vdev_id, struct pmo_rcv_pkt_fltr_cfg *rcv_filter_param, + uint8_t filter_id, bool enable) +{ + int len, i; + int err = 0; + wmi_buf_t buf; + WMI_PACKET_FILTER_CONFIG_CMD_fixed_param *cmd; + + + /* allocate the memory */ + len = sizeof(*cmd); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("Failed to allocate buffer to send set_param cmd"); + return QDF_STATUS_E_NOMEM; + } + + cmd = (WMI_PACKET_FILTER_CONFIG_CMD_fixed_param *)wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_packet_filter_config_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (WMI_PACKET_FILTER_CONFIG_CMD_fixed_param)); + + cmd->vdev_id = vdev_id; + cmd->filter_id = filter_id; + if (enable) + cmd->filter_action = PACKET_FILTER_SET_ACTIVE; + else + cmd->filter_action = PACKET_FILTER_SET_INACTIVE; + + if (enable) { + cmd->num_params = QDF_MIN( + WMI_PACKET_FILTER_MAX_CMP_PER_PACKET_FILTER, + rcv_filter_param->num_params); + cmd->filter_type = rcv_filter_param->filter_type; + cmd->coalesce_time = rcv_filter_param->coalesce_time; + + for (i = 0; i < cmd->num_params; i++) { + cmd->paramsData[i].proto_type = + rcv_filter_param->params_data[i].protocol_layer; + cmd->paramsData[i].cmp_type = + rcv_filter_param->params_data[i].compare_flag; + cmd->paramsData[i].data_length = + rcv_filter_param->params_data[i].data_length; + cmd->paramsData[i].data_offset = + rcv_filter_param->params_data[i].data_offset; + memcpy(&cmd->paramsData[i].compareData, + rcv_filter_param->params_data[i].compare_data, + sizeof(cmd->paramsData[i].compareData)); + memcpy(&cmd->paramsData[i].dataMask, + rcv_filter_param->params_data[i].data_mask, + sizeof(cmd->paramsData[i].dataMask)); + } + } + + WMI_LOGE("Packet filter action %d filter with id: %d, num_params=%d", + cmd->filter_action, cmd->filter_id, cmd->num_params); + /* send the command along with data */ + wmi_mtrace(WMI_PACKET_FILTER_CONFIG_CMDID, cmd->vdev_id, 0); + err = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_PACKET_FILTER_CONFIG_CMDID); + if (err) { + WMI_LOGE("Failed to send pkt_filter cmd"); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} +#endif /* End of WLAN_POWER_MANAGEMENT_OFFLOAD */ + +/** + * send_set_ssid_hotlist_cmd_tlv() - Handle an SSID hotlist set request + * @wmi_handle: wmi handle + * @request: SSID hotlist set request + * + * Return: QDF_STATUS enumeration + */ +static QDF_STATUS +send_set_ssid_hotlist_cmd_tlv(wmi_unified_t wmi_handle, + struct ssid_hotlist_request_params *request) +{ + wmi_extscan_configure_hotlist_ssid_monitor_cmd_fixed_param *cmd; + wmi_buf_t wmi_buf; + uint32_t len; + uint32_t array_size; + uint8_t *buf_ptr; + + /* length of fixed portion */ + len = sizeof(*cmd); + + /* length of variable portion */ + array_size = + request->ssid_count * sizeof(wmi_extscan_hotlist_ssid_entry); + len += WMI_TLV_HDR_SIZE + array_size; + + wmi_buf = wmi_buf_alloc(wmi_handle, len); + if (!wmi_buf) { + WMI_LOGE("%s: wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_NOMEM; + } + + buf_ptr = (uint8_t *) wmi_buf_data(wmi_buf); + cmd = (wmi_extscan_configure_hotlist_ssid_monitor_cmd_fixed_param *) + buf_ptr; + WMITLV_SET_HDR + (&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_extscan_configure_hotlist_ssid_monitor_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_extscan_configure_hotlist_ssid_monitor_cmd_fixed_param)); + + cmd->request_id = request->request_id; + cmd->requestor_id = 0; + cmd->vdev_id = request->session_id; + cmd->table_id = 0; + cmd->lost_ap_scan_count = request->lost_ssid_sample_size; + cmd->total_entries = request->ssid_count; + cmd->num_entries_in_page = request->ssid_count; + cmd->first_entry_index = 0; + + buf_ptr += sizeof(*cmd); + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, array_size); + + if (request->ssid_count) { + wmi_extscan_hotlist_ssid_entry *entry; + int i; + + buf_ptr += WMI_TLV_HDR_SIZE; + entry = (wmi_extscan_hotlist_ssid_entry *)buf_ptr; + for (i = 0; i < request->ssid_count; i++) { + WMITLV_SET_HDR + (entry, + WMITLV_TAG_ARRAY_STRUC, + WMITLV_GET_STRUCT_TLVLEN + (wmi_extscan_hotlist_ssid_entry)); + entry->ssid.ssid_len = request->ssids[i].ssid.length; + qdf_mem_copy(entry->ssid.ssid, + request->ssids[i].ssid.mac_ssid, + request->ssids[i].ssid.length); + entry->band = request->ssids[i].band; + entry->min_rssi = request->ssids[i].rssi_low; + entry->max_rssi = request->ssids[i].rssi_high; + entry++; + } + cmd->mode = WMI_EXTSCAN_MODE_START; + } else { + cmd->mode = WMI_EXTSCAN_MODE_STOP; + } + + wmi_mtrace(WMI_EXTSCAN_CONFIGURE_HOTLIST_SSID_MONITOR_CMDID, + cmd->vdev_id, 0); + if (wmi_unified_cmd_send + (wmi_handle, wmi_buf, len, + WMI_EXTSCAN_CONFIGURE_HOTLIST_SSID_MONITOR_CMDID)) { + WMI_LOGE("%s: failed to send command", __func__); + wmi_buf_free(wmi_buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * send_process_roam_synch_complete_cmd_tlv() - roam synch complete command to fw. + * @wmi_handle: wmi handle + * @vdev_id: vdev id + * + * This function sends roam synch complete event to fw. + * + * Return: CDF STATUS + */ +static QDF_STATUS send_process_roam_synch_complete_cmd_tlv(wmi_unified_t wmi_handle, + uint8_t vdev_id) +{ + wmi_roam_synch_complete_fixed_param *cmd; + wmi_buf_t wmi_buf; + uint8_t *buf_ptr; + uint16_t len; + len = sizeof(wmi_roam_synch_complete_fixed_param); + + wmi_buf = wmi_buf_alloc(wmi_handle, len); + if (!wmi_buf) { + WMI_LOGE("%s: wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_NOMEM; + } + cmd = (wmi_roam_synch_complete_fixed_param *) wmi_buf_data(wmi_buf); + buf_ptr = (uint8_t *) cmd; + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_roam_synch_complete_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_roam_synch_complete_fixed_param)); + cmd->vdev_id = vdev_id; + wmi_mtrace(WMI_ROAM_SYNCH_COMPLETE, cmd->vdev_id, 0); + if (wmi_unified_cmd_send(wmi_handle, wmi_buf, len, + WMI_ROAM_SYNCH_COMPLETE)) { + WMI_LOGP("%s: failed to send roam synch confirmation", + __func__); + wmi_buf_free(wmi_buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * send_fw_test_cmd_tlv() - send fw test command to fw. + * @wmi_handle: wmi handle + * @wmi_fwtest: fw test command + * + * This function sends fw test command to fw. + * + * Return: CDF STATUS + */ +static +QDF_STATUS send_fw_test_cmd_tlv(wmi_unified_t wmi_handle, + struct set_fwtest_params *wmi_fwtest) +{ + wmi_fwtest_set_param_cmd_fixed_param *cmd; + wmi_buf_t wmi_buf; + uint16_t len; + + len = sizeof(*cmd); + + wmi_buf = wmi_buf_alloc(wmi_handle, len); + if (!wmi_buf) { + WMI_LOGE("%s: wmai_buf_alloc failed", __func__); + return QDF_STATUS_E_NOMEM; + } + + cmd = (wmi_fwtest_set_param_cmd_fixed_param *) wmi_buf_data(wmi_buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_fwtest_set_param_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_fwtest_set_param_cmd_fixed_param)); + cmd->param_id = wmi_fwtest->arg; + cmd->param_value = wmi_fwtest->value; + + wmi_mtrace(WMI_FWTEST_CMDID, NO_SESSION, 0); + if (wmi_unified_cmd_send(wmi_handle, wmi_buf, len, + WMI_FWTEST_CMDID)) { + WMI_LOGP("%s: failed to send fw test command", __func__); + wmi_buf_free(wmi_buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * send_unit_test_cmd_tlv() - send unit test command to fw. + * @wmi_handle: wmi handle + * @wmi_utest: unit test command + * + * This function send unit test command to fw. + * + * Return: CDF STATUS + */ +static QDF_STATUS send_unit_test_cmd_tlv(wmi_unified_t wmi_handle, + struct wmi_unit_test_cmd *wmi_utest) +{ + wmi_unit_test_cmd_fixed_param *cmd; + wmi_buf_t wmi_buf; + uint8_t *buf_ptr; + int i; + uint16_t len, args_tlv_len; + uint32_t *unit_test_cmd_args; + + args_tlv_len = + WMI_TLV_HDR_SIZE + wmi_utest->num_args * sizeof(uint32_t); + len = sizeof(wmi_unit_test_cmd_fixed_param) + args_tlv_len; + + wmi_buf = wmi_buf_alloc(wmi_handle, len); + if (!wmi_buf) { + WMI_LOGE("%s: wmai_buf_alloc failed", __func__); + return QDF_STATUS_E_NOMEM; + } + + cmd = (wmi_unit_test_cmd_fixed_param *) wmi_buf_data(wmi_buf); + buf_ptr = (uint8_t *) cmd; + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_unit_test_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN(wmi_unit_test_cmd_fixed_param)); + cmd->vdev_id = wmi_utest->vdev_id; + cmd->module_id = wmi_utest->module_id; + cmd->num_args = wmi_utest->num_args; + cmd->diag_token = wmi_utest->diag_token; + buf_ptr += sizeof(wmi_unit_test_cmd_fixed_param); + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_UINT32, + (wmi_utest->num_args * sizeof(uint32_t))); + unit_test_cmd_args = (uint32_t *) (buf_ptr + WMI_TLV_HDR_SIZE); + WMI_LOGI("%s: VDEV ID: %d\n", __func__, cmd->vdev_id); + WMI_LOGI("%s: MODULE ID: %d\n", __func__, cmd->module_id); + WMI_LOGI("%s: TOKEN: %d\n", __func__, cmd->diag_token); + WMI_LOGI("%s: %d num of args = ", __func__, wmi_utest->num_args); + for (i = 0; (i < wmi_utest->num_args && i < WMI_UNIT_TEST_MAX_NUM_ARGS); i++) { + unit_test_cmd_args[i] = wmi_utest->args[i]; + WMI_LOGI("%d,", wmi_utest->args[i]); + } + wmi_mtrace(WMI_UNIT_TEST_CMDID, cmd->vdev_id, 0); + if (wmi_unified_cmd_send(wmi_handle, wmi_buf, len, + WMI_UNIT_TEST_CMDID)) { + WMI_LOGP("%s: failed to send unit test command", __func__); + wmi_buf_free(wmi_buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * send_roam_invoke_cmd_tlv() - send roam invoke command to fw. + * @wmi_handle: wma handle + * @roaminvoke: roam invoke command + * + * Send roam invoke command to fw for fastreassoc. + * + * Return: CDF STATUS + */ +static QDF_STATUS send_roam_invoke_cmd_tlv(wmi_unified_t wmi_handle, + struct wmi_roam_invoke_cmd *roaminvoke, + uint32_t ch_hz) +{ + wmi_roam_invoke_cmd_fixed_param *cmd; + wmi_buf_t wmi_buf; + u_int8_t *buf_ptr; + u_int16_t len, args_tlv_len; + uint32_t *channel_list; + wmi_mac_addr *bssid_list; + wmi_tlv_buf_len_param *buf_len_tlv; + + /* Host sends only one channel and one bssid */ + args_tlv_len = (4 * WMI_TLV_HDR_SIZE) + sizeof(uint32_t) + + sizeof(wmi_mac_addr) + sizeof(wmi_tlv_buf_len_param) + + roundup(roaminvoke->frame_len, sizeof(uint32_t)); + len = sizeof(wmi_roam_invoke_cmd_fixed_param) + args_tlv_len; + wmi_buf = wmi_buf_alloc(wmi_handle, len); + if (!wmi_buf) { + WMI_LOGE("%s: wmai_buf_alloc failed", __func__); + return QDF_STATUS_E_NOMEM; + } + + cmd = (wmi_roam_invoke_cmd_fixed_param *)wmi_buf_data(wmi_buf); + buf_ptr = (u_int8_t *) cmd; + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_roam_invoke_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN(wmi_roam_invoke_cmd_fixed_param)); + cmd->vdev_id = roaminvoke->vdev_id; + cmd->flags |= (1 << WMI_ROAM_INVOKE_FLAG_REPORT_FAILURE); + if (roaminvoke->is_same_bssid) + cmd->flags |= (1 << WMI_ROAM_INVOKE_FLAG_NO_NULL_FRAME_TO_AP); + WMI_LOGD(FL("is_same_bssid flag: %d"), roaminvoke->is_same_bssid); + + if (roaminvoke->frame_len) { + cmd->roam_scan_mode = WMI_ROAM_INVOKE_SCAN_MODE_SKIP; + /* packing 1 beacon/probe_rsp frame with WMI cmd */ + cmd->num_buf = 1; + } else { + cmd->roam_scan_mode = WMI_ROAM_INVOKE_SCAN_MODE_FIXED_CH; + cmd->num_buf = 0; + } + + cmd->roam_ap_sel_mode = 0; + cmd->roam_delay = 0; + cmd->num_chan = 1; + cmd->num_bssid = 1; + + buf_ptr += sizeof(wmi_roam_invoke_cmd_fixed_param); + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_UINT32, + (sizeof(u_int32_t))); + channel_list = (uint32_t *)(buf_ptr + WMI_TLV_HDR_SIZE); + *channel_list = ch_hz; + buf_ptr += sizeof(uint32_t) + WMI_TLV_HDR_SIZE; + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_FIXED_STRUC, + (sizeof(wmi_mac_addr))); + bssid_list = (wmi_mac_addr *)(buf_ptr + WMI_TLV_HDR_SIZE); + WMI_CHAR_ARRAY_TO_MAC_ADDR(roaminvoke->bssid, bssid_list); + + /* move to next tlv i.e. bcn_prb_buf_list */ + buf_ptr += WMI_TLV_HDR_SIZE + sizeof(wmi_mac_addr); + + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_FIXED_STRUC, + sizeof(wmi_tlv_buf_len_param)); + + buf_len_tlv = (wmi_tlv_buf_len_param *)(buf_ptr + WMI_TLV_HDR_SIZE); + buf_len_tlv->buf_len = roaminvoke->frame_len; + + /* move to next tlv i.e. bcn_prb_frm */ + buf_ptr += WMI_TLV_HDR_SIZE + sizeof(wmi_tlv_buf_len_param); + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_BYTE, + roundup(roaminvoke->frame_len, sizeof(uint32_t))); + + /* copy frame after the header */ + qdf_mem_copy(buf_ptr + WMI_TLV_HDR_SIZE, + roaminvoke->frame_buf, + roaminvoke->frame_len); + + WMI_LOGD(FL("bcn/prb_rsp frame, length: %d"), roaminvoke->frame_len); + QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_DEBUG, + buf_ptr + WMI_TLV_HDR_SIZE, + roaminvoke->frame_len); + WMI_LOGD(FL("flag:%d, MODE scn:%d, ap:%d, dly:%d, n_ch:%d, n_bssid:%d"), + cmd->flags, cmd->roam_scan_mode, + cmd->roam_ap_sel_mode, cmd->roam_delay, + cmd->num_chan, cmd->num_bssid); + WMI_LOGD(FL("BSSID: %pM, channel: %d"), roaminvoke->bssid, ch_hz); + + wmi_mtrace(WMI_ROAM_INVOKE_CMDID, cmd->vdev_id, 0); + if (wmi_unified_cmd_send(wmi_handle, wmi_buf, len, + WMI_ROAM_INVOKE_CMDID)) { + WMI_LOGP("%s: failed to send roam invoke command", __func__); + wmi_buf_free(wmi_buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * send_roam_scan_offload_cmd_tlv() - set roam offload command + * @wmi_handle: wmi handle + * @command: command + * @vdev_id: vdev id + * + * This function set roam offload command to fw. + * + * Return: CDF status + */ +static QDF_STATUS send_roam_scan_offload_cmd_tlv(wmi_unified_t wmi_handle, + uint32_t command, uint32_t vdev_id) +{ + QDF_STATUS status; + wmi_roam_scan_cmd_fixed_param *cmd_fp; + wmi_buf_t buf = NULL; + int len; + uint8_t *buf_ptr; + + len = sizeof(wmi_roam_scan_cmd_fixed_param); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s : wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_NOMEM; + } + + buf_ptr = (uint8_t *) wmi_buf_data(buf); + + cmd_fp = (wmi_roam_scan_cmd_fixed_param *) buf_ptr; + WMITLV_SET_HDR(&cmd_fp->tlv_header, + WMITLV_TAG_STRUC_wmi_roam_scan_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN(wmi_roam_scan_cmd_fixed_param)); + cmd_fp->vdev_id = vdev_id; + cmd_fp->command_arg = command; + + wmi_mtrace(WMI_ROAM_SCAN_CMD, NO_SESSION, 0); + status = wmi_unified_cmd_send(wmi_handle, buf, + len, WMI_ROAM_SCAN_CMD); + if (QDF_IS_STATUS_ERROR(status)) { + WMI_LOGE("wmi_unified_cmd_send WMI_ROAM_SCAN_CMD returned Error %d", + status); + goto error; + } + + WMI_LOGI("%s: WMI --> WMI_ROAM_SCAN_CMD", __func__); + return QDF_STATUS_SUCCESS; + +error: + wmi_buf_free(buf); + + return status; +} + +/** + * send_roam_scan_offload_ap_profile_cmd_tlv() - set roam ap profile in fw + * @wmi_handle: wmi handle + * @ap_profile_p: ap profile + * @vdev_id: vdev id + * + * Send WMI_ROAM_AP_PROFILE to firmware + * + * Return: CDF status + */ +static QDF_STATUS send_roam_scan_offload_ap_profile_cmd_tlv(wmi_unified_t wmi_handle, + struct ap_profile_params *ap_profile) +{ + wmi_buf_t buf = NULL; + QDF_STATUS status; + int len; + uint8_t *buf_ptr; + wmi_roam_ap_profile_fixed_param *roam_ap_profile_fp; + wmi_roam_cnd_scoring_param *score_param; + wmi_ap_profile *profile; + + len = sizeof(wmi_roam_ap_profile_fixed_param) + sizeof(wmi_ap_profile); + len += sizeof(*score_param); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s : wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_NOMEM; + } + + buf_ptr = (uint8_t *) wmi_buf_data(buf); + roam_ap_profile_fp = (wmi_roam_ap_profile_fixed_param *) buf_ptr; + WMITLV_SET_HDR(&roam_ap_profile_fp->tlv_header, + WMITLV_TAG_STRUC_wmi_roam_ap_profile_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_roam_ap_profile_fixed_param)); + /* fill in threshold values */ + roam_ap_profile_fp->vdev_id = ap_profile->vdev_id; + roam_ap_profile_fp->id = 0; + buf_ptr += sizeof(wmi_roam_ap_profile_fixed_param); + + profile = (wmi_ap_profile *)buf_ptr; + WMITLV_SET_HDR(&profile->tlv_header, + WMITLV_TAG_STRUC_wmi_ap_profile, + WMITLV_GET_STRUCT_TLVLEN(wmi_ap_profile)); + profile->flags = ap_profile->profile.flags; + profile->rssi_threshold = ap_profile->profile.rssi_threshold; + profile->ssid.ssid_len = ap_profile->profile.ssid.length; + qdf_mem_copy(profile->ssid.ssid, ap_profile->profile.ssid.mac_ssid, + profile->ssid.ssid_len); + profile->rsn_authmode = ap_profile->profile.rsn_authmode; + profile->rsn_ucastcipherset = ap_profile->profile.rsn_ucastcipherset; + profile->rsn_mcastcipherset = ap_profile->profile.rsn_mcastcipherset; + profile->rsn_mcastmgmtcipherset = + ap_profile->profile.rsn_mcastmgmtcipherset; + profile->rssi_abs_thresh = ap_profile->profile.rssi_abs_thresh; + + WMI_LOGD("AP profile: flags %x rssi_threshold %d ssid:%.*s authmode %d uc cipher %d mc cipher %d mc mgmt cipher %d rssi abs thresh %d", + profile->flags, profile->rssi_threshold, + profile->ssid.ssid_len, ap_profile->profile.ssid.mac_ssid, + profile->rsn_authmode, profile->rsn_ucastcipherset, + profile->rsn_mcastcipherset, profile->rsn_mcastmgmtcipherset, + profile->rssi_abs_thresh); + + buf_ptr += sizeof(wmi_ap_profile); + + score_param = (wmi_roam_cnd_scoring_param *)buf_ptr; + WMITLV_SET_HDR(&score_param->tlv_header, + WMITLV_TAG_STRUC_wmi_roam_cnd_scoring_param, + WMITLV_GET_STRUCT_TLVLEN(wmi_roam_cnd_scoring_param)); + score_param->disable_bitmap = ap_profile->param.disable_bitmap; + score_param->rssi_weightage_pcnt = + ap_profile->param.rssi_weightage; + score_param->ht_weightage_pcnt = ap_profile->param.ht_weightage; + score_param->vht_weightage_pcnt = ap_profile->param.vht_weightage; + score_param->he_weightage_pcnt = ap_profile->param.he_weightage; + score_param->bw_weightage_pcnt = ap_profile->param.bw_weightage; + score_param->band_weightage_pcnt = ap_profile->param.band_weightage; + score_param->nss_weightage_pcnt = ap_profile->param.nss_weightage; + score_param->esp_qbss_weightage_pcnt = + ap_profile->param.esp_qbss_weightage; + score_param->beamforming_weightage_pcnt = + ap_profile->param.beamforming_weightage; + score_param->pcl_weightage_pcnt = ap_profile->param.pcl_weightage; + score_param->oce_wan_weightage_pcnt = + ap_profile->param.oce_wan_weightage; + + WMI_LOGD("Score params weightage: disable_bitmap %x rssi %d ht %d vht %d he %d BW %d band %d NSS %d ESP %d BF %d PCL %d OCE WAN %d", + score_param->disable_bitmap, score_param->rssi_weightage_pcnt, + score_param->ht_weightage_pcnt, + score_param->vht_weightage_pcnt, + score_param->he_weightage_pcnt, score_param->bw_weightage_pcnt, + score_param->band_weightage_pcnt, + score_param->nss_weightage_pcnt, + score_param->esp_qbss_weightage_pcnt, + score_param->beamforming_weightage_pcnt, + score_param->pcl_weightage_pcnt, + score_param->oce_wan_weightage_pcnt); + + score_param->bw_scoring.score_pcnt = ap_profile->param.bw_index_score; + score_param->band_scoring.score_pcnt = + ap_profile->param.band_index_score; + score_param->nss_scoring.score_pcnt = + ap_profile->param.nss_index_score; + + WMI_LOGD("Params index score bitmask: bw_index_score %x band_index_score %x nss_index_score %x", + score_param->bw_scoring.score_pcnt, + score_param->band_scoring.score_pcnt, + score_param->nss_scoring.score_pcnt); + + score_param->rssi_scoring.best_rssi_threshold = + (-1) * ap_profile->param.rssi_scoring.best_rssi_threshold; + score_param->rssi_scoring.good_rssi_threshold = + (-1) * ap_profile->param.rssi_scoring.good_rssi_threshold; + score_param->rssi_scoring.bad_rssi_threshold = + (-1) * ap_profile->param.rssi_scoring.bad_rssi_threshold; + score_param->rssi_scoring.good_rssi_pcnt = + ap_profile->param.rssi_scoring.good_rssi_pcnt; + score_param->rssi_scoring.bad_rssi_pcnt = + ap_profile->param.rssi_scoring.bad_rssi_pcnt; + score_param->rssi_scoring.good_bucket_size = + ap_profile->param.rssi_scoring.good_bucket_size; + score_param->rssi_scoring.bad_bucket_size = + ap_profile->param.rssi_scoring.bad_bucket_size; + score_param->rssi_scoring.rssi_pref_5g_rssi_thresh = + (-1) * ap_profile->param.rssi_scoring.rssi_pref_5g_rssi_thresh; + + WMI_LOGD("Rssi scoring threshold: best RSSI %d good RSSI %d bad RSSI %d prefer 5g threshold %d", + score_param->rssi_scoring.best_rssi_threshold, + score_param->rssi_scoring.good_rssi_threshold, + score_param->rssi_scoring.bad_rssi_threshold, + score_param->rssi_scoring.rssi_pref_5g_rssi_thresh); + WMI_LOGD("Good RSSI score for each slot %d bad RSSI score for each slot %d good bucket %d bad bucket %d", + score_param->rssi_scoring.good_rssi_pcnt, + score_param->rssi_scoring.bad_rssi_pcnt, + score_param->rssi_scoring.good_bucket_size, + score_param->rssi_scoring.bad_bucket_size); + + score_param->esp_qbss_scoring.num_slot = + ap_profile->param.esp_qbss_scoring.num_slot; + score_param->esp_qbss_scoring.score_pcnt3_to_0 = + ap_profile->param.esp_qbss_scoring.score_pcnt3_to_0; + score_param->esp_qbss_scoring.score_pcnt7_to_4 = + ap_profile->param.esp_qbss_scoring.score_pcnt7_to_4; + score_param->esp_qbss_scoring.score_pcnt11_to_8 = + ap_profile->param.esp_qbss_scoring.score_pcnt11_to_8; + score_param->esp_qbss_scoring.score_pcnt15_to_12 = + ap_profile->param.esp_qbss_scoring.score_pcnt15_to_12; + + WMI_LOGD("ESP QBSS index weight: slots %d weight 0to3 %x weight 4to7 %x weight 8to11 %x weight 12to15 %x", + score_param->esp_qbss_scoring.num_slot, + score_param->esp_qbss_scoring.score_pcnt3_to_0, + score_param->esp_qbss_scoring.score_pcnt7_to_4, + score_param->esp_qbss_scoring.score_pcnt11_to_8, + score_param->esp_qbss_scoring.score_pcnt15_to_12); + + score_param->oce_wan_scoring.num_slot = + ap_profile->param.oce_wan_scoring.num_slot; + score_param->oce_wan_scoring.score_pcnt3_to_0 = + ap_profile->param.oce_wan_scoring.score_pcnt3_to_0; + score_param->oce_wan_scoring.score_pcnt7_to_4 = + ap_profile->param.oce_wan_scoring.score_pcnt7_to_4; + score_param->oce_wan_scoring.score_pcnt11_to_8 = + ap_profile->param.oce_wan_scoring.score_pcnt11_to_8; + score_param->oce_wan_scoring.score_pcnt15_to_12 = + ap_profile->param.oce_wan_scoring.score_pcnt15_to_12; + + WMI_LOGD("OCE WAN index weight: slots %d weight 0to3 %x weight 4to7 %x weight 8to11 %x weight 12to15 %x", + score_param->oce_wan_scoring.num_slot, + score_param->oce_wan_scoring.score_pcnt3_to_0, + score_param->oce_wan_scoring.score_pcnt7_to_4, + score_param->oce_wan_scoring.score_pcnt11_to_8, + score_param->oce_wan_scoring.score_pcnt15_to_12); + + score_param->roam_score_delta_pcnt = ap_profile->param.roam_score_delta; + score_param->roam_score_delta_mask = + ap_profile->param.roam_trigger_bitmap; + WMI_LOGD("Roam score delta:%d Roam_trigger_bitmap:%x", + score_param->roam_score_delta_pcnt, + score_param->roam_score_delta_mask); + + wmi_mtrace(WMI_ROAM_AP_PROFILE, NO_SESSION, 0); + status = wmi_unified_cmd_send(wmi_handle, buf, + len, WMI_ROAM_AP_PROFILE); + if (QDF_IS_STATUS_ERROR(status)) { + WMI_LOGE("wmi_unified_cmd_send WMI_ROAM_AP_PROFILE returned Error %d", + status); + wmi_buf_free(buf); + } + + WMI_LOGD("WMI --> WMI_ROAM_AP_PROFILE and other parameters"); + + return status; +} + +/** + * send_roam_scan_offload_scan_period_cmd_tlv() - set roam offload scan period + * @wmi_handle: wmi handle + * @scan_period: scan period + * @scan_age: scan age + * @vdev_id: vdev id + * + * Send WMI_ROAM_SCAN_PERIOD parameters to fw. + * + * Return: CDF status + */ +static QDF_STATUS send_roam_scan_offload_scan_period_cmd_tlv(wmi_unified_t wmi_handle, + uint32_t scan_period, + uint32_t scan_age, + uint32_t vdev_id) +{ + QDF_STATUS status; + wmi_buf_t buf = NULL; + int len; + uint8_t *buf_ptr; + wmi_roam_scan_period_fixed_param *scan_period_fp; + + /* Send scan period values */ + len = sizeof(wmi_roam_scan_period_fixed_param); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s : wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_NOMEM; + } + + buf_ptr = (uint8_t *) wmi_buf_data(buf); + scan_period_fp = (wmi_roam_scan_period_fixed_param *) buf_ptr; + WMITLV_SET_HDR(&scan_period_fp->tlv_header, + WMITLV_TAG_STRUC_wmi_roam_scan_period_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_roam_scan_period_fixed_param)); + /* fill in scan period values */ + scan_period_fp->vdev_id = vdev_id; + scan_period_fp->roam_scan_period = scan_period; /* 20 seconds */ + scan_period_fp->roam_scan_age = scan_age; + + wmi_mtrace(WMI_ROAM_SCAN_PERIOD, NO_SESSION, 0); + status = wmi_unified_cmd_send(wmi_handle, buf, + len, WMI_ROAM_SCAN_PERIOD); + if (QDF_IS_STATUS_ERROR(status)) { + WMI_LOGE("wmi_unified_cmd_send WMI_ROAM_SCAN_PERIOD returned Error %d", + status); + goto error; + } + + WMI_LOGI("%s: WMI --> WMI_ROAM_SCAN_PERIOD roam_scan_period=%d, roam_scan_age=%d", + __func__, scan_period, scan_age); + return QDF_STATUS_SUCCESS; +error: + wmi_buf_free(buf); + + return status; +} + +/** + * send_roam_scan_offload_chan_list_cmd_tlv() - set roam offload channel list + * @wmi_handle: wmi handle + * @chan_count: channel count + * @chan_list: channel list + * @list_type: list type + * @vdev_id: vdev id + * + * Set roam offload channel list. + * + * Return: CDF status + */ +static QDF_STATUS send_roam_scan_offload_chan_list_cmd_tlv(wmi_unified_t wmi_handle, + uint8_t chan_count, + uint32_t *chan_list, + uint8_t list_type, uint32_t vdev_id) +{ + wmi_buf_t buf = NULL; + QDF_STATUS status; + int len, list_tlv_len; + int i; + uint8_t *buf_ptr; + wmi_roam_chan_list_fixed_param *chan_list_fp; + uint32_t *roam_chan_list_array; + + if (chan_count == 0) { + WMI_LOGD("%s : invalid number of channels %d", __func__, + chan_count); + return QDF_STATUS_E_EMPTY; + } + /* Channel list is a table of 2 TLV's */ + list_tlv_len = WMI_TLV_HDR_SIZE + chan_count * sizeof(uint32_t); + len = sizeof(wmi_roam_chan_list_fixed_param) + list_tlv_len; + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s : wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_NOMEM; + } + + buf_ptr = (uint8_t *) wmi_buf_data(buf); + chan_list_fp = (wmi_roam_chan_list_fixed_param *) buf_ptr; + WMITLV_SET_HDR(&chan_list_fp->tlv_header, + WMITLV_TAG_STRUC_wmi_roam_chan_list_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_roam_chan_list_fixed_param)); + chan_list_fp->vdev_id = vdev_id; + chan_list_fp->num_chan = chan_count; + if (chan_count > 0 && list_type == WMI_CHANNEL_LIST_STATIC) { + /* external app is controlling channel list */ + chan_list_fp->chan_list_type = + WMI_ROAM_SCAN_CHAN_LIST_TYPE_STATIC; + } else { + /* umac supplied occupied channel list in LFR */ + chan_list_fp->chan_list_type = + WMI_ROAM_SCAN_CHAN_LIST_TYPE_DYNAMIC; + } + + buf_ptr += sizeof(wmi_roam_chan_list_fixed_param); + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_UINT32, + (chan_list_fp->num_chan * sizeof(uint32_t))); + roam_chan_list_array = (uint32_t *) (buf_ptr + WMI_TLV_HDR_SIZE); + WMI_LOGD("%s: %d channels = ", __func__, chan_list_fp->num_chan); + for (i = 0; ((i < chan_list_fp->num_chan) && + (i < WMI_ROAM_MAX_CHANNELS)); i++) { + roam_chan_list_array[i] = chan_list[i]; + WMI_LOGD("%d,", roam_chan_list_array[i]); + } + + wmi_mtrace(WMI_ROAM_CHAN_LIST, NO_SESSION, 0); + status = wmi_unified_cmd_send(wmi_handle, buf, + len, WMI_ROAM_CHAN_LIST); + if (QDF_IS_STATUS_ERROR(status)) { + WMI_LOGE("wmi_unified_cmd_send WMI_ROAM_CHAN_LIST returned Error %d", + status); + goto error; + } + + WMI_LOGD("%s: WMI --> WMI_ROAM_SCAN_CHAN_LIST", __func__); + return QDF_STATUS_SUCCESS; +error: + wmi_buf_free(buf); + + return status; +} + +/** + * send_per_roam_config_cmd_tlv() - set per roaming config to FW + * @wmi_handle: wmi handle + * @req_buf: per roam config buffer + * + * Return: QDF status + */ +static QDF_STATUS send_per_roam_config_cmd_tlv(wmi_unified_t wmi_handle, + struct wmi_per_roam_config_req *req_buf) +{ + wmi_buf_t buf = NULL; + QDF_STATUS status; + int len; + uint8_t *buf_ptr; + wmi_roam_per_config_fixed_param *wmi_per_config; + + len = sizeof(wmi_roam_per_config_fixed_param); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s : wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_NOMEM; + } + + buf_ptr = (uint8_t *) wmi_buf_data(buf); + wmi_per_config = + (wmi_roam_per_config_fixed_param *) buf_ptr; + WMITLV_SET_HDR(&wmi_per_config->tlv_header, + WMITLV_TAG_STRUC_wmi_roam_per_config_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_roam_per_config_fixed_param)); + + /* fill in per roam config values */ + wmi_per_config->vdev_id = req_buf->vdev_id; + + wmi_per_config->enable = req_buf->per_config.enable; + wmi_per_config->high_rate_thresh = + (req_buf->per_config.tx_high_rate_thresh << 16) | + (req_buf->per_config.rx_high_rate_thresh & 0x0000ffff); + wmi_per_config->low_rate_thresh = + (req_buf->per_config.tx_low_rate_thresh << 16) | + (req_buf->per_config.rx_low_rate_thresh & 0x0000ffff); + wmi_per_config->pkt_err_rate_thresh_pct = + (req_buf->per_config.tx_rate_thresh_percnt << 16) | + (req_buf->per_config.rx_rate_thresh_percnt & 0x0000ffff); + wmi_per_config->per_rest_time = req_buf->per_config.per_rest_time; + wmi_per_config->pkt_err_rate_mon_time = + (req_buf->per_config.tx_per_mon_time << 16) | + (req_buf->per_config.rx_per_mon_time & 0x0000ffff); + wmi_per_config->min_candidate_rssi = + req_buf->per_config.min_candidate_rssi; + + /* Send per roam config parameters */ + wmi_mtrace(WMI_ROAM_PER_CONFIG_CMDID, NO_SESSION, 0); + status = wmi_unified_cmd_send(wmi_handle, buf, + len, WMI_ROAM_PER_CONFIG_CMDID); + if (QDF_IS_STATUS_ERROR(status)) { + WMI_LOGE("WMI_ROAM_PER_CONFIG_CMDID failed, Error %d", + status); + wmi_buf_free(buf); + return status; + } + WMI_LOGD(FL("per roam enable=%d, vdev=%d"), + req_buf->per_config.enable, req_buf->vdev_id); + + return QDF_STATUS_SUCCESS; +} + +/** + * send_roam_scan_offload_rssi_change_cmd_tlv() - set roam offload RSSI th + * @wmi_handle: wmi handle + * @rssi_change_thresh: RSSI Change threshold + * @bcn_rssi_weight: beacon RSSI weight + * @vdev_id: vdev id + * + * Send WMI_ROAM_SCAN_RSSI_CHANGE_THRESHOLD parameters to fw. + * + * Return: CDF status + */ +static QDF_STATUS send_roam_scan_offload_rssi_change_cmd_tlv(wmi_unified_t wmi_handle, + uint32_t vdev_id, + int32_t rssi_change_thresh, + uint32_t bcn_rssi_weight, + uint32_t hirssi_delay_btw_scans) +{ + wmi_buf_t buf = NULL; + QDF_STATUS status; + int len; + uint8_t *buf_ptr; + wmi_roam_scan_rssi_change_threshold_fixed_param *rssi_change_fp; + + /* Send rssi change parameters */ + len = sizeof(wmi_roam_scan_rssi_change_threshold_fixed_param); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s : wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_NOMEM; + } + + buf_ptr = (uint8_t *) wmi_buf_data(buf); + rssi_change_fp = + (wmi_roam_scan_rssi_change_threshold_fixed_param *) buf_ptr; + WMITLV_SET_HDR(&rssi_change_fp->tlv_header, + WMITLV_TAG_STRUC_wmi_roam_scan_rssi_change_threshold_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_roam_scan_rssi_change_threshold_fixed_param)); + /* fill in rssi change threshold (hysteresis) values */ + rssi_change_fp->vdev_id = vdev_id; + rssi_change_fp->roam_scan_rssi_change_thresh = rssi_change_thresh; + rssi_change_fp->bcn_rssi_weight = bcn_rssi_weight; + rssi_change_fp->hirssi_delay_btw_scans = hirssi_delay_btw_scans; + + wmi_mtrace(WMI_ROAM_SCAN_RSSI_CHANGE_THRESHOLD, NO_SESSION, 0); + status = wmi_unified_cmd_send(wmi_handle, buf, + len, WMI_ROAM_SCAN_RSSI_CHANGE_THRESHOLD); + if (QDF_IS_STATUS_ERROR(status)) { + WMI_LOGE("wmi_unified_cmd_send WMI_ROAM_SCAN_RSSI_CHANGE_THRESHOLD returned Error %d", + status); + goto error; + } + + WMI_LOGD(FL("roam_scan_rssi_change_thresh=%d, bcn_rssi_weight=%d"), + rssi_change_thresh, bcn_rssi_weight); + WMI_LOGD(FL("hirssi_delay_btw_scans=%d"), hirssi_delay_btw_scans); + return QDF_STATUS_SUCCESS; +error: + wmi_buf_free(buf); + + return status; +} + +/** + * send_power_dbg_cmd_tlv() - send power debug commands + * @wmi_handle: wmi handle + * @param: wmi power debug parameter + * + * Send WMI_POWER_DEBUG_CMDID parameters to fw. + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +static QDF_STATUS send_power_dbg_cmd_tlv(wmi_unified_t wmi_handle, + struct wmi_power_dbg_params *param) +{ + wmi_buf_t buf = NULL; + QDF_STATUS status; + int len, args_tlv_len; + uint8_t *buf_ptr; + uint8_t i; + wmi_pdev_wal_power_debug_cmd_fixed_param *cmd; + uint32_t *cmd_args; + + /* Prepare and send power debug cmd parameters */ + args_tlv_len = WMI_TLV_HDR_SIZE + param->num_args * sizeof(uint32_t); + len = sizeof(*cmd) + args_tlv_len; + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s : wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_NOMEM; + } + + buf_ptr = (uint8_t *) wmi_buf_data(buf); + cmd = (wmi_pdev_wal_power_debug_cmd_fixed_param *) buf_ptr; + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_pdev_wal_power_debug_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_pdev_wal_power_debug_cmd_fixed_param)); + + cmd->pdev_id = wmi_handle->ops->convert_pdev_id_host_to_target( + param->pdev_id); + cmd->module_id = param->module_id; + cmd->num_args = param->num_args; + buf_ptr += sizeof(*cmd); + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_UINT32, + (param->num_args * sizeof(uint32_t))); + cmd_args = (uint32_t *) (buf_ptr + WMI_TLV_HDR_SIZE); + WMI_LOGI("%s: %d num of args = ", __func__, param->num_args); + for (i = 0; (i < param->num_args && i < WMI_MAX_POWER_DBG_ARGS); i++) { + cmd_args[i] = param->args[i]; + WMI_LOGI("%d,", param->args[i]); + } + + wmi_mtrace(WMI_PDEV_WAL_POWER_DEBUG_CMDID, NO_SESSION, 0); + status = wmi_unified_cmd_send(wmi_handle, buf, + len, WMI_PDEV_WAL_POWER_DEBUG_CMDID); + if (QDF_IS_STATUS_ERROR(status)) { + WMI_LOGE("wmi_unified_cmd_send WMI_PDEV_WAL_POWER_DEBUG_CMDID returned Error %d", + status); + goto error; + } + + return QDF_STATUS_SUCCESS; +error: + wmi_buf_free(buf); + + return status; +} + +/** + * send_multiple_vdev_restart_req_cmd_tlv() - send multiple vdev restart req + * @wmi_handle: wmi handle + * @param: wmi multiple vdev restart req param + * + * Send WMI_PDEV_MULTIPLE_VDEV_RESTART_REQUEST_CMDID parameters to fw. + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +static QDF_STATUS send_multiple_vdev_restart_req_cmd_tlv( + wmi_unified_t wmi_handle, + struct multiple_vdev_restart_params *param) +{ + wmi_buf_t buf; + QDF_STATUS qdf_status; + wmi_pdev_multiple_vdev_restart_request_cmd_fixed_param *cmd; + int i; + uint8_t *buf_ptr; + uint32_t *vdev_ids; + wmi_channel *chan_info; + struct channel_param *tchan_info; + uint16_t len = sizeof(*cmd) + WMI_TLV_HDR_SIZE; + + len += sizeof(wmi_channel); + if (param->num_vdevs) + len += sizeof(uint32_t) * param->num_vdevs; + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("Failed to allocate memory\n"); + qdf_status = QDF_STATUS_E_NOMEM; + goto end; + } + + buf_ptr = (uint8_t *)wmi_buf_data(buf); + cmd = (wmi_pdev_multiple_vdev_restart_request_cmd_fixed_param *) + buf_ptr; + + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_pdev_multiple_vdev_restart_request_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_pdev_multiple_vdev_restart_request_cmd_fixed_param)); + cmd->pdev_id = wmi_handle->ops->convert_pdev_id_host_to_target( + param->pdev_id); + cmd->requestor_id = param->requestor_id; + cmd->disable_hw_ack = param->disable_hw_ack; + cmd->cac_duration_ms = param->cac_duration_ms; + cmd->num_vdevs = param->num_vdevs; + + WMI_LOGI("%s:cmd->pdev_id: %d ,cmd->requestor_id: %d ," + "cmd->disable_hw_ack: %d , cmd->cac_duration_ms:%d ," + " cmd->num_vdevs: %d ", + __func__, cmd->pdev_id, cmd->requestor_id, + cmd->disable_hw_ack, cmd->cac_duration_ms, cmd->num_vdevs); + buf_ptr += sizeof(*cmd); + + WMITLV_SET_HDR(buf_ptr, + WMITLV_TAG_ARRAY_UINT32, + sizeof(uint32_t) * param->num_vdevs); + vdev_ids = (uint32_t *)(buf_ptr + WMI_TLV_HDR_SIZE); + for (i = 0; i < param->num_vdevs; i++) { + vdev_ids[i] = param->vdev_ids[i]; + } + + buf_ptr += (sizeof(uint32_t) * param->num_vdevs) + WMI_TLV_HDR_SIZE; + + WMITLV_SET_HDR(buf_ptr, + WMITLV_TAG_STRUC_wmi_channel, + WMITLV_GET_STRUCT_TLVLEN(wmi_channel)); + chan_info = (wmi_channel *)buf_ptr; + tchan_info = &(param->ch_param); + chan_info->mhz = tchan_info->mhz; + chan_info->band_center_freq1 = tchan_info->cfreq1; + chan_info->band_center_freq2 = tchan_info->cfreq2; + if (tchan_info->is_chan_passive) + WMI_SET_CHANNEL_FLAG(chan_info, + WMI_CHAN_FLAG_PASSIVE); + if (tchan_info->dfs_set) + WMI_SET_CHANNEL_FLAG(chan_info, WMI_CHAN_FLAG_DFS); + + if (tchan_info->allow_vht) + WMI_SET_CHANNEL_FLAG(chan_info, + WMI_CHAN_FLAG_ALLOW_VHT); + else if (tchan_info->allow_ht) + WMI_SET_CHANNEL_FLAG(chan_info, + WMI_CHAN_FLAG_ALLOW_HT); + WMI_SET_CHANNEL_MODE(chan_info, tchan_info->phy_mode); + WMI_SET_CHANNEL_MIN_POWER(chan_info, tchan_info->minpower); + WMI_SET_CHANNEL_MAX_POWER(chan_info, tchan_info->maxpower); + WMI_SET_CHANNEL_REG_POWER(chan_info, tchan_info->maxregpower); + WMI_SET_CHANNEL_ANTENNA_MAX(chan_info, tchan_info->antennamax); + WMI_SET_CHANNEL_REG_CLASSID(chan_info, tchan_info->reg_class_id); + WMI_SET_CHANNEL_MAX_TX_POWER(chan_info, tchan_info->maxregpower); + + WMI_LOGI("%s:tchan_info->is_chan_passive: %d ," + "tchan_info->dfs_set : %d ,tchan_info->allow_vht:%d ," + "tchan_info->allow_ht: %d ,tchan_info->antennamax: %d ," + "tchan_info->phy_mode: %d ,tchan_info->minpower: %d," + "tchan_info->maxpower: %d ,tchan_info->maxregpower: %d ," + "tchan_info->reg_class_id: %d ," + "tchan_info->maxregpower : %d ", __func__, + tchan_info->is_chan_passive, tchan_info->dfs_set, + tchan_info->allow_vht, tchan_info->allow_ht, + tchan_info->antennamax, tchan_info->phy_mode, + tchan_info->minpower, tchan_info->maxpower, + tchan_info->maxregpower, tchan_info->reg_class_id, + tchan_info->maxregpower); + + wmi_mtrace(WMI_PDEV_MULTIPLE_VDEV_RESTART_REQUEST_CMDID, NO_SESSION, 0); + qdf_status = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_PDEV_MULTIPLE_VDEV_RESTART_REQUEST_CMDID); + + if (QDF_IS_STATUS_ERROR(qdf_status)) { + WMI_LOGE("%s: Failed to send\n", __func__); + wmi_buf_free(buf); + } + +end: + return qdf_status; +} + +/** + * send_dfs_phyerr_offload_en_cmd_tlv() - send dfs phyerr offload enable cmd + * @wmi_handle: wmi handle + * @pdev_id: pdev id + * + * Send WMI_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMDID command to firmware. + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +static QDF_STATUS send_dfs_phyerr_offload_en_cmd_tlv(wmi_unified_t wmi_handle, + uint32_t pdev_id) +{ + wmi_pdev_dfs_phyerr_offload_enable_cmd_fixed_param *cmd; + wmi_buf_t buf; + uint16_t len; + QDF_STATUS ret; + + len = sizeof(*cmd); + buf = wmi_buf_alloc(wmi_handle, len); + + WMI_LOGI("%s: pdev_id=%d", __func__, pdev_id); + + if (!buf) { + WMI_LOGE("%s : wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_NOMEM; + } + + cmd = (wmi_pdev_dfs_phyerr_offload_enable_cmd_fixed_param *) + wmi_buf_data(buf); + + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_pdev_dfs_phyerr_offload_enable_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_pdev_dfs_phyerr_offload_enable_cmd_fixed_param)); + + cmd->pdev_id = wmi_handle->ops->convert_pdev_id_host_to_target(pdev_id); + wmi_mtrace(WMI_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMDID, NO_SESSION, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMDID); + if (QDF_IS_STATUS_ERROR(ret)) { + WMI_LOGE("%s: Failed to send cmd to fw, ret=%d, pdev_id=%d", + __func__, ret, pdev_id); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * send_dfs_phyerr_offload_dis_cmd_tlv() - send dfs phyerr offload disable cmd + * @wmi_handle: wmi handle + * @pdev_id: pdev id + * + * Send WMI_PDEV_DFS_PHYERR_OFFLOAD_DISABLE_CMDID command to firmware. + * + * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error + */ +static QDF_STATUS send_dfs_phyerr_offload_dis_cmd_tlv(wmi_unified_t wmi_handle, + uint32_t pdev_id) +{ + wmi_pdev_dfs_phyerr_offload_disable_cmd_fixed_param *cmd; + wmi_buf_t buf; + uint16_t len; + QDF_STATUS ret; + + len = sizeof(*cmd); + buf = wmi_buf_alloc(wmi_handle, len); + + WMI_LOGI("%s: pdev_id=%d", __func__, pdev_id); + + if (!buf) { + WMI_LOGE("%s : wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_NOMEM; + } + + cmd = (wmi_pdev_dfs_phyerr_offload_disable_cmd_fixed_param *) + wmi_buf_data(buf); + + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_pdev_dfs_phyerr_offload_disable_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_pdev_dfs_phyerr_offload_disable_cmd_fixed_param)); + + cmd->pdev_id = wmi_handle->ops->convert_pdev_id_host_to_target(pdev_id); + wmi_mtrace(WMI_PDEV_DFS_PHYERR_OFFLOAD_DISABLE_CMDID, NO_SESSION, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_PDEV_DFS_PHYERR_OFFLOAD_DISABLE_CMDID); + if (QDF_IS_STATUS_ERROR(ret)) { + WMI_LOGE("%s: Failed to send cmd to fw, ret=%d, pdev_id=%d", + __func__, ret, pdev_id); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * init_cmd_send_tlv() - send initialization cmd to fw + * @wmi_handle: wmi handle + * @param param: pointer to wmi init param + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS init_cmd_send_tlv(wmi_unified_t wmi_handle, + struct wmi_init_cmd_param *param) +{ + wmi_buf_t buf; + wmi_init_cmd_fixed_param *cmd; + uint8_t *buf_ptr; + wmi_resource_config *resource_cfg; + wlan_host_memory_chunk *host_mem_chunks; + uint32_t mem_chunk_len = 0, hw_mode_len = 0; + uint16_t idx; + int len; + QDF_STATUS ret; + + len = sizeof(*cmd) + sizeof(wmi_resource_config) + + WMI_TLV_HDR_SIZE; + mem_chunk_len = (sizeof(wlan_host_memory_chunk) * MAX_MEM_CHUNKS); + + if (param->hw_mode_id != WMI_HOST_HW_MODE_MAX) + hw_mode_len = sizeof(wmi_pdev_set_hw_mode_cmd_fixed_param) + + WMI_TLV_HDR_SIZE + + (param->num_band_to_mac * sizeof(wmi_pdev_band_to_mac)); + + buf = wmi_buf_alloc(wmi_handle, len + mem_chunk_len + hw_mode_len); + if (!buf) { + qdf_print("%s: wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_FAILURE; + } + + buf_ptr = (uint8_t *) wmi_buf_data(buf); + cmd = (wmi_init_cmd_fixed_param *) buf_ptr; + resource_cfg = (wmi_resource_config *) (buf_ptr + sizeof(*cmd)); + + host_mem_chunks = (wlan_host_memory_chunk *) + (buf_ptr + sizeof(*cmd) + sizeof(wmi_resource_config) + + WMI_TLV_HDR_SIZE); + + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_init_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN(wmi_init_cmd_fixed_param)); + + wmi_copy_resource_config(resource_cfg, param->res_cfg); + WMITLV_SET_HDR(&resource_cfg->tlv_header, + WMITLV_TAG_STRUC_wmi_resource_config, + WMITLV_GET_STRUCT_TLVLEN(wmi_resource_config)); + + for (idx = 0; idx < param->num_mem_chunks; ++idx) { + WMITLV_SET_HDR(&(host_mem_chunks[idx].tlv_header), + WMITLV_TAG_STRUC_wlan_host_memory_chunk, + WMITLV_GET_STRUCT_TLVLEN + (wlan_host_memory_chunk)); + host_mem_chunks[idx].ptr = param->mem_chunks[idx].paddr; + host_mem_chunks[idx].size = param->mem_chunks[idx].len; + host_mem_chunks[idx].req_id = param->mem_chunks[idx].req_id; + QDF_TRACE(QDF_MODULE_ID_ANY, QDF_TRACE_LEVEL_DEBUG, + "chunk %d len %d requested ,ptr 0x%x ", + idx, host_mem_chunks[idx].size, + host_mem_chunks[idx].ptr); + } + cmd->num_host_mem_chunks = param->num_mem_chunks; + len += (param->num_mem_chunks * sizeof(wlan_host_memory_chunk)); + + WMITLV_SET_HDR((buf_ptr + sizeof(*cmd) + sizeof(wmi_resource_config)), + WMITLV_TAG_ARRAY_STRUC, + (sizeof(wlan_host_memory_chunk) * + param->num_mem_chunks)); + + /* Fill hw mode id config */ + buf_ptr = copy_hw_mode_in_init_cmd(wmi_handle, buf_ptr, &len, param); + + /* Fill fw_abi_vers */ + copy_fw_abi_version_tlv(wmi_handle, cmd); + + wmi_mtrace(WMI_INIT_CMDID, NO_SESSION, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, WMI_INIT_CMDID); + if (QDF_IS_STATUS_ERROR(ret)) { + WMI_LOGE("wmi_unified_cmd_send WMI_INIT_CMDID returned Error %d", + ret); + wmi_buf_free(buf); + } + + return ret; + +} + +/** + * send_addba_send_cmd_tlv() - send addba send command to fw + * @wmi_handle: wmi handle + * @param: pointer to delba send params + * @macaddr: peer mac address + * + * Send WMI_ADDBA_SEND_CMDID command to firmware + * Return: QDF_STATUS_SUCCESS on success. QDF_STATUS_E** on error + */ +static QDF_STATUS +send_addba_send_cmd_tlv(wmi_unified_t wmi_handle, + uint8_t macaddr[IEEE80211_ADDR_LEN], + struct addba_send_params *param) +{ + wmi_addba_send_cmd_fixed_param *cmd; + wmi_buf_t buf; + uint16_t len; + QDF_STATUS ret; + + len = sizeof(*cmd); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s : wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_NOMEM; + } + + cmd = (wmi_addba_send_cmd_fixed_param *)wmi_buf_data(buf); + + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_addba_send_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN(wmi_addba_send_cmd_fixed_param)); + + cmd->vdev_id = param->vdev_id; + WMI_CHAR_ARRAY_TO_MAC_ADDR(macaddr, &cmd->peer_macaddr); + cmd->tid = param->tidno; + cmd->buffersize = param->buffersize; + + wmi_mtrace(WMI_ADDBA_SEND_CMDID, cmd->vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, WMI_ADDBA_SEND_CMDID); + if (QDF_IS_STATUS_ERROR(ret)) { + WMI_LOGE("%s: Failed to send cmd to fw, ret=%d", __func__, ret); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * send_delba_send_cmd_tlv() - send delba send command to fw + * @wmi_handle: wmi handle + * @param: pointer to delba send params + * @macaddr: peer mac address + * + * Send WMI_DELBA_SEND_CMDID command to firmware + * Return: QDF_STATUS_SUCCESS on success. QDF_STATUS_E** on error + */ +static QDF_STATUS +send_delba_send_cmd_tlv(wmi_unified_t wmi_handle, + uint8_t macaddr[IEEE80211_ADDR_LEN], + struct delba_send_params *param) +{ + wmi_delba_send_cmd_fixed_param *cmd; + wmi_buf_t buf; + uint16_t len; + QDF_STATUS ret; + + len = sizeof(*cmd); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s : wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_NOMEM; + } + + cmd = (wmi_delba_send_cmd_fixed_param *)wmi_buf_data(buf); + + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_delba_send_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN(wmi_delba_send_cmd_fixed_param)); + + cmd->vdev_id = param->vdev_id; + WMI_CHAR_ARRAY_TO_MAC_ADDR(macaddr, &cmd->peer_macaddr); + cmd->tid = param->tidno; + cmd->initiator = param->initiator; + cmd->reasoncode = param->reasoncode; + + wmi_mtrace(WMI_DELBA_SEND_CMDID, cmd->vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, WMI_DELBA_SEND_CMDID); + if (QDF_IS_STATUS_ERROR(ret)) { + WMI_LOGE("%s: Failed to send cmd to fw, ret=%d", __func__, ret); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * send_addba_clearresponse_cmd_tlv() - send addba clear response command + * to fw + * @wmi_handle: wmi handle + * @param: pointer to addba clearresp params + * @macaddr: peer mac address + * Return: 0 for success or error code + */ +static QDF_STATUS +send_addba_clearresponse_cmd_tlv(wmi_unified_t wmi_handle, + uint8_t macaddr[IEEE80211_ADDR_LEN], + struct addba_clearresponse_params *param) +{ + wmi_addba_clear_resp_cmd_fixed_param *cmd; + wmi_buf_t buf; + uint16_t len; + QDF_STATUS ret; + + len = sizeof(*cmd); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s: wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_FAILURE; + } + cmd = (wmi_addba_clear_resp_cmd_fixed_param *)wmi_buf_data(buf); + + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_addba_clear_resp_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN(wmi_addba_clear_resp_cmd_fixed_param)); + + cmd->vdev_id = param->vdev_id; + WMI_CHAR_ARRAY_TO_MAC_ADDR(macaddr, &cmd->peer_macaddr); + + wmi_mtrace(WMI_ADDBA_CLEAR_RESP_CMDID, cmd->vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, + buf, len, WMI_ADDBA_CLEAR_RESP_CMDID); + if (QDF_IS_STATUS_ERROR(ret)) { + WMI_LOGE("%s: Failed to send cmd to fw, ret=%d", __func__, ret); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * send_bcn_offload_control_cmd_tlv - send beacon ofload control cmd to fw + * @wmi_handle: wmi handle + * @bcn_ctrl_param: pointer to bcn_offload_control param + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static +QDF_STATUS send_bcn_offload_control_cmd_tlv(wmi_unified_t wmi_handle, + struct bcn_offload_control *bcn_ctrl_param) +{ + wmi_buf_t buf; + wmi_bcn_offload_ctrl_cmd_fixed_param *cmd; + QDF_STATUS ret; + uint32_t len; + + len = sizeof(*cmd); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + qdf_print("%s: wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_FAILURE; + } + + cmd = (wmi_bcn_offload_ctrl_cmd_fixed_param *) wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_bcn_offload_ctrl_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_bcn_offload_ctrl_cmd_fixed_param)); + cmd->vdev_id = bcn_ctrl_param->vdev_id; + switch (bcn_ctrl_param->bcn_ctrl_op) { + case BCN_OFFLD_CTRL_TX_DISABLE: + cmd->bcn_ctrl_op = WMI_BEACON_CTRL_TX_DISABLE; + break; + case BCN_OFFLD_CTRL_TX_ENABLE: + cmd->bcn_ctrl_op = WMI_BEACON_CTRL_TX_ENABLE; + break; + case BCN_OFFLD_CTRL_SWBA_DISABLE: + cmd->bcn_ctrl_op = WMI_BEACON_CTRL_SWBA_EVENT_DISABLE; + break; + case BCN_OFFLD_CTRL_SWBA_ENABLE: + cmd->bcn_ctrl_op = WMI_BEACON_CTRL_SWBA_EVENT_ENABLE; + break; + default: + WMI_LOGE("WMI_BCN_OFFLOAD_CTRL_CMDID unknown CTRL Operation %d", + bcn_ctrl_param->bcn_ctrl_op); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + break; + } + wmi_mtrace(WMI_BCN_OFFLOAD_CTRL_CMDID, cmd->vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_BCN_OFFLOAD_CTRL_CMDID); + + if (QDF_IS_STATUS_ERROR(ret)) { + WMI_LOGE("WMI_BCN_OFFLOAD_CTRL_CMDID send returned Error %d", + ret); + wmi_buf_free(buf); + } + + return ret; +} + +#ifdef WLAN_FEATURE_NAN_CONVERGENCE +static QDF_STATUS nan_ndp_initiator_req_tlv(wmi_unified_t wmi_handle, + struct nan_datapath_initiator_req *ndp_req) +{ + uint16_t len; + wmi_buf_t buf; + uint8_t *tlv_ptr; + QDF_STATUS status; + wmi_channel *ch_tlv; + wmi_ndp_initiator_req_fixed_param *cmd; + uint32_t passphrase_len, service_name_len; + uint32_t ndp_cfg_len, ndp_app_info_len, pmk_len; + wmi_ndp_transport_ip_param *tcp_ip_param; + + /* + * WMI command expects 4 byte alligned len: + * round up ndp_cfg_len and ndp_app_info_len to 4 bytes + */ + ndp_cfg_len = qdf_roundup(ndp_req->ndp_config.ndp_cfg_len, 4); + ndp_app_info_len = qdf_roundup(ndp_req->ndp_info.ndp_app_info_len, 4); + pmk_len = qdf_roundup(ndp_req->pmk.pmk_len, 4); + passphrase_len = qdf_roundup(ndp_req->passphrase.passphrase_len, 4); + service_name_len = + qdf_roundup(ndp_req->service_name.service_name_len, 4); + /* allocated memory for fixed params as well as variable size data */ + len = sizeof(*cmd) + sizeof(*ch_tlv) + (5 * WMI_TLV_HDR_SIZE) + + ndp_cfg_len + ndp_app_info_len + pmk_len + + passphrase_len + service_name_len; + + if (ndp_req->is_ipv6_addr_present) + len += sizeof(*tcp_ip_param); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("wmi_buf_alloc failed"); + return QDF_STATUS_E_NOMEM; + } + + cmd = (wmi_ndp_initiator_req_fixed_param *) wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_ndp_initiator_req_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_ndp_initiator_req_fixed_param)); + cmd->vdev_id = wlan_vdev_get_id(ndp_req->vdev); + cmd->transaction_id = ndp_req->transaction_id; + cmd->service_instance_id = ndp_req->service_instance_id; + WMI_CHAR_ARRAY_TO_MAC_ADDR(ndp_req->peer_discovery_mac_addr.bytes, + &cmd->peer_discovery_mac_addr); + + cmd->ndp_cfg_len = ndp_req->ndp_config.ndp_cfg_len; + cmd->ndp_app_info_len = ndp_req->ndp_info.ndp_app_info_len; + cmd->ndp_channel_cfg = ndp_req->channel_cfg; + cmd->nan_pmk_len = ndp_req->pmk.pmk_len; + cmd->nan_csid = ndp_req->ncs_sk_type; + cmd->nan_passphrase_len = ndp_req->passphrase.passphrase_len; + cmd->nan_servicename_len = ndp_req->service_name.service_name_len; + + ch_tlv = (wmi_channel *)&cmd[1]; + WMITLV_SET_HDR(ch_tlv, WMITLV_TAG_STRUC_wmi_channel, + WMITLV_GET_STRUCT_TLVLEN(wmi_channel)); + ch_tlv->mhz = ndp_req->channel; + tlv_ptr = (uint8_t *)&ch_tlv[1]; + + WMITLV_SET_HDR(tlv_ptr, WMITLV_TAG_ARRAY_BYTE, ndp_cfg_len); + qdf_mem_copy(&tlv_ptr[WMI_TLV_HDR_SIZE], + ndp_req->ndp_config.ndp_cfg, cmd->ndp_cfg_len); + tlv_ptr = tlv_ptr + WMI_TLV_HDR_SIZE + ndp_cfg_len; + + WMITLV_SET_HDR(tlv_ptr, WMITLV_TAG_ARRAY_BYTE, ndp_app_info_len); + qdf_mem_copy(&tlv_ptr[WMI_TLV_HDR_SIZE], + ndp_req->ndp_info.ndp_app_info, cmd->ndp_app_info_len); + tlv_ptr = tlv_ptr + WMI_TLV_HDR_SIZE + ndp_app_info_len; + + WMITLV_SET_HDR(tlv_ptr, WMITLV_TAG_ARRAY_BYTE, pmk_len); + qdf_mem_copy(&tlv_ptr[WMI_TLV_HDR_SIZE], ndp_req->pmk.pmk, + cmd->nan_pmk_len); + tlv_ptr = tlv_ptr + WMI_TLV_HDR_SIZE + pmk_len; + + WMITLV_SET_HDR(tlv_ptr, WMITLV_TAG_ARRAY_BYTE, passphrase_len); + qdf_mem_copy(&tlv_ptr[WMI_TLV_HDR_SIZE], ndp_req->passphrase.passphrase, + cmd->nan_passphrase_len); + tlv_ptr = tlv_ptr + WMI_TLV_HDR_SIZE + passphrase_len; + + WMITLV_SET_HDR(tlv_ptr, WMITLV_TAG_ARRAY_BYTE, service_name_len); + qdf_mem_copy(&tlv_ptr[WMI_TLV_HDR_SIZE], + ndp_req->service_name.service_name, + cmd->nan_servicename_len); + tlv_ptr = tlv_ptr + WMI_TLV_HDR_SIZE + service_name_len; + + if (ndp_req->is_ipv6_addr_present) { + tcp_ip_param = (wmi_ndp_transport_ip_param *)tlv_ptr; + WMITLV_SET_HDR(tcp_ip_param, + WMITLV_TAG_STRUC_wmi_ndp_transport_ip_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_ndp_transport_ip_param)); + tcp_ip_param->ipv6_addr_present = true; + qdf_mem_copy(tcp_ip_param->ipv6_intf_addr, + ndp_req->ipv6_addr, WMI_NDP_IPV6_INTF_ADDR_LEN); + } + WMI_LOGD(FL("IPv6 addr present: %d, addr: %pI6"), + ndp_req->is_ipv6_addr_present, ndp_req->ipv6_addr); + + WMI_LOGD("vdev_id = %d, transaction_id: %d, service_instance_id: %d, ch: %d, ch_cfg: %d, csid: %d", + cmd->vdev_id, cmd->transaction_id, cmd->service_instance_id, + ch_tlv->mhz, cmd->ndp_channel_cfg, cmd->nan_csid); + WMI_LOGD("peer mac addr: mac_addr31to0: 0x%x, mac_addr47to32: 0x%x", + cmd->peer_discovery_mac_addr.mac_addr31to0, + cmd->peer_discovery_mac_addr.mac_addr47to32); + + WMI_LOGD("ndp_config len: %d", cmd->ndp_cfg_len); + QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_DEBUG, + ndp_req->ndp_config.ndp_cfg, + ndp_req->ndp_config.ndp_cfg_len); + + WMI_LOGD("ndp_app_info len: %d", cmd->ndp_app_info_len); + QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_DEBUG, + ndp_req->ndp_info.ndp_app_info, + ndp_req->ndp_info.ndp_app_info_len); + + WMI_LOGD("pmk len: %d", cmd->nan_pmk_len); + QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_DEBUG, + ndp_req->pmk.pmk, cmd->nan_pmk_len); + + WMI_LOGD("pass phrase len: %d", cmd->nan_passphrase_len); + QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_DEBUG, + ndp_req->passphrase.passphrase, + cmd->nan_passphrase_len); + + WMI_LOGD("service name len: %d", cmd->nan_servicename_len); + QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_DEBUG, + ndp_req->service_name.service_name, + cmd->nan_servicename_len); + + WMI_LOGD("sending WMI_NDP_INITIATOR_REQ_CMDID(0x%X)", + WMI_NDP_INITIATOR_REQ_CMDID); + + wmi_mtrace(WMI_NDP_INITIATOR_REQ_CMDID, cmd->vdev_id, 0); + status = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_NDP_INITIATOR_REQ_CMDID); + if (QDF_IS_STATUS_ERROR(status)) { + WMI_LOGE("WMI_NDP_INITIATOR_REQ_CMDID failed, ret: %d", status); + wmi_buf_free(buf); + } + + return status; +} + +static QDF_STATUS nan_ndp_responder_req_tlv(wmi_unified_t wmi_handle, + struct nan_datapath_responder_req *req) +{ + uint16_t len; + wmi_buf_t buf; + uint8_t *tlv_ptr; + QDF_STATUS status; + wmi_ndp_responder_req_fixed_param *cmd; + wmi_ndp_transport_ip_param *tcp_ip_param; + uint32_t passphrase_len, service_name_len; + uint32_t vdev_id = 0, ndp_cfg_len, ndp_app_info_len, pmk_len; + + vdev_id = wlan_vdev_get_id(req->vdev); + WMI_LOGD("vdev_id: %d, transaction_id: %d, ndp_rsp %d, ndp_instance_id: %d, ndp_app_info_len: %d", + vdev_id, req->transaction_id, + req->ndp_rsp, + req->ndp_instance_id, + req->ndp_info.ndp_app_info_len); + + /* + * WMI command expects 4 byte alligned len: + * round up ndp_cfg_len and ndp_app_info_len to 4 bytes + */ + ndp_cfg_len = qdf_roundup(req->ndp_config.ndp_cfg_len, 4); + ndp_app_info_len = qdf_roundup(req->ndp_info.ndp_app_info_len, 4); + pmk_len = qdf_roundup(req->pmk.pmk_len, 4); + passphrase_len = qdf_roundup(req->passphrase.passphrase_len, 4); + service_name_len = + qdf_roundup(req->service_name.service_name_len, 4); + + /* allocated memory for fixed params as well as variable size data */ + len = sizeof(*cmd) + 5*WMI_TLV_HDR_SIZE + ndp_cfg_len + ndp_app_info_len + + pmk_len + passphrase_len + service_name_len; + + if (req->is_ipv6_addr_present || req->is_port_present || + req->is_protocol_present) + len += sizeof(*tcp_ip_param); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("wmi_buf_alloc failed"); + return QDF_STATUS_E_NOMEM; + } + cmd = (wmi_ndp_responder_req_fixed_param *) wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_ndp_responder_req_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_ndp_responder_req_fixed_param)); + cmd->vdev_id = vdev_id; + cmd->transaction_id = req->transaction_id; + cmd->ndp_instance_id = req->ndp_instance_id; + cmd->rsp_code = req->ndp_rsp; + cmd->ndp_cfg_len = req->ndp_config.ndp_cfg_len; + cmd->ndp_app_info_len = req->ndp_info.ndp_app_info_len; + cmd->nan_pmk_len = req->pmk.pmk_len; + cmd->nan_csid = req->ncs_sk_type; + cmd->nan_passphrase_len = req->passphrase.passphrase_len; + cmd->nan_servicename_len = req->service_name.service_name_len; + + tlv_ptr = (uint8_t *)&cmd[1]; + WMITLV_SET_HDR(tlv_ptr, WMITLV_TAG_ARRAY_BYTE, ndp_cfg_len); + qdf_mem_copy(&tlv_ptr[WMI_TLV_HDR_SIZE], + req->ndp_config.ndp_cfg, cmd->ndp_cfg_len); + + tlv_ptr = tlv_ptr + WMI_TLV_HDR_SIZE + ndp_cfg_len; + WMITLV_SET_HDR(tlv_ptr, WMITLV_TAG_ARRAY_BYTE, ndp_app_info_len); + qdf_mem_copy(&tlv_ptr[WMI_TLV_HDR_SIZE], + req->ndp_info.ndp_app_info, + req->ndp_info.ndp_app_info_len); + + tlv_ptr = tlv_ptr + WMI_TLV_HDR_SIZE + ndp_app_info_len; + WMITLV_SET_HDR(tlv_ptr, WMITLV_TAG_ARRAY_BYTE, pmk_len); + qdf_mem_copy(&tlv_ptr[WMI_TLV_HDR_SIZE], req->pmk.pmk, + cmd->nan_pmk_len); + + tlv_ptr = tlv_ptr + WMI_TLV_HDR_SIZE + pmk_len; + WMITLV_SET_HDR(tlv_ptr, WMITLV_TAG_ARRAY_BYTE, passphrase_len); + qdf_mem_copy(&tlv_ptr[WMI_TLV_HDR_SIZE], + req->passphrase.passphrase, + cmd->nan_passphrase_len); + tlv_ptr = tlv_ptr + WMI_TLV_HDR_SIZE + passphrase_len; + + WMITLV_SET_HDR(tlv_ptr, WMITLV_TAG_ARRAY_BYTE, service_name_len); + qdf_mem_copy(&tlv_ptr[WMI_TLV_HDR_SIZE], + req->service_name.service_name, + cmd->nan_servicename_len); + + tlv_ptr = tlv_ptr + WMI_TLV_HDR_SIZE + service_name_len; + + if (req->is_ipv6_addr_present || req->is_port_present || + req->is_protocol_present) { + tcp_ip_param = (wmi_ndp_transport_ip_param *)tlv_ptr; + WMITLV_SET_HDR(tcp_ip_param, + WMITLV_TAG_STRUC_wmi_ndp_transport_ip_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_ndp_transport_ip_param)); + tcp_ip_param->ipv6_addr_present = req->is_ipv6_addr_present; + qdf_mem_copy(tcp_ip_param->ipv6_intf_addr, + req->ipv6_addr, WMI_NDP_IPV6_INTF_ADDR_LEN); + + tcp_ip_param->trans_port_present = req->is_port_present; + tcp_ip_param->transport_port = req->port; + + tcp_ip_param->trans_proto_present = req->is_protocol_present; + tcp_ip_param->transport_protocol = req->protocol; + } + WMI_LOGD(FL("IPv6 addr present: %d, addr: %pI6"), + req->is_ipv6_addr_present, req->ipv6_addr); + WMI_LOGD(FL("port: %d present: %d"), req->is_port_present, req->port); + WMI_LOGD(FL("protocol: %d present: %d"), + req->is_protocol_present, req->protocol); + + WMI_LOGD("vdev_id = %d, transaction_id: %d, csid: %d", + cmd->vdev_id, cmd->transaction_id, cmd->nan_csid); + + WMI_LOGD("ndp_config len: %d", + req->ndp_config.ndp_cfg_len); + QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_DEBUG, + req->ndp_config.ndp_cfg, + req->ndp_config.ndp_cfg_len); + + WMI_LOGD("ndp_app_info len: %d", + req->ndp_info.ndp_app_info_len); + QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_DEBUG, + req->ndp_info.ndp_app_info, + req->ndp_info.ndp_app_info_len); + + WMI_LOGD("pmk len: %d", cmd->nan_pmk_len); + QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_DEBUG, + req->pmk.pmk, cmd->nan_pmk_len); + + WMI_LOGD("pass phrase len: %d", cmd->nan_passphrase_len); + QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_DEBUG, + req->passphrase.passphrase, + cmd->nan_passphrase_len); + + WMI_LOGD("service name len: %d", cmd->nan_servicename_len); + QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_DEBUG, + req->service_name.service_name, + cmd->nan_servicename_len); + + WMI_LOGD("sending WMI_NDP_RESPONDER_REQ_CMDID(0x%X)", + WMI_NDP_RESPONDER_REQ_CMDID); + wmi_mtrace(WMI_NDP_RESPONDER_REQ_CMDID, cmd->vdev_id, 0); + status = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_NDP_RESPONDER_REQ_CMDID); + if (QDF_IS_STATUS_ERROR(status)) { + WMI_LOGE("WMI_NDP_RESPONDER_REQ_CMDID failed, ret: %d", status); + wmi_buf_free(buf); + } + return status; +} + +static QDF_STATUS nan_ndp_end_req_tlv(wmi_unified_t wmi_handle, + struct nan_datapath_end_req *req) +{ + uint16_t len; + wmi_buf_t buf; + QDF_STATUS status; + uint32_t ndp_end_req_len, i; + wmi_ndp_end_req *ndp_end_req_lst; + wmi_ndp_end_req_fixed_param *cmd; + + /* len of tlv following fixed param */ + ndp_end_req_len = sizeof(wmi_ndp_end_req) * req->num_ndp_instances; + /* above comes out to 4 byte alligned already, no need of padding */ + len = sizeof(*cmd) + ndp_end_req_len + WMI_TLV_HDR_SIZE; + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("Malloc failed"); + return QDF_STATUS_E_NOMEM; + } + + cmd = (wmi_ndp_end_req_fixed_param *) wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_ndp_end_req_fixed_param, + WMITLV_GET_STRUCT_TLVLEN(wmi_ndp_end_req_fixed_param)); + + cmd->transaction_id = req->transaction_id; + + /* set tlv pointer to end of fixed param */ + WMITLV_SET_HDR((uint8_t *)&cmd[1], WMITLV_TAG_ARRAY_STRUC, + ndp_end_req_len); + + ndp_end_req_lst = (wmi_ndp_end_req *)((uint8_t *)&cmd[1] + + WMI_TLV_HDR_SIZE); + for (i = 0; i < req->num_ndp_instances; i++) { + WMITLV_SET_HDR(&ndp_end_req_lst[i], + WMITLV_TAG_ARRAY_FIXED_STRUC, + (sizeof(*ndp_end_req_lst) - WMI_TLV_HDR_SIZE)); + + ndp_end_req_lst[i].ndp_instance_id = req->ndp_ids[i]; + } + + WMI_LOGD("Sending WMI_NDP_END_REQ_CMDID to FW"); + wmi_mtrace(WMI_NDP_END_REQ_CMDID, NO_SESSION, 0); + status = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_NDP_END_REQ_CMDID); + if (QDF_IS_STATUS_ERROR(status)) { + WMI_LOGE("WMI_NDP_END_REQ_CMDID failed, ret: %d", status); + wmi_buf_free(buf); + } + + return status; +} + +static QDF_STATUS extract_ndp_initiator_rsp_tlv(wmi_unified_t wmi_handle, + uint8_t *data, struct nan_datapath_initiator_rsp *rsp) +{ + WMI_NDP_INITIATOR_RSP_EVENTID_param_tlvs *event; + wmi_ndp_initiator_rsp_event_fixed_param *fixed_params; + + event = (WMI_NDP_INITIATOR_RSP_EVENTID_param_tlvs *)data; + fixed_params = event->fixed_param; + + rsp->vdev = + wlan_objmgr_get_vdev_by_id_from_psoc(wmi_handle->soc->wmi_psoc, + fixed_params->vdev_id, + WLAN_NAN_ID); + if (!rsp->vdev) { + WMI_LOGE("vdev is null"); + return QDF_STATUS_E_INVAL; + } + + rsp->transaction_id = fixed_params->transaction_id; + rsp->ndp_instance_id = fixed_params->ndp_instance_id; + rsp->status = fixed_params->rsp_status; + rsp->reason = fixed_params->reason_code; + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS extract_ndp_ind_tlv(wmi_unified_t wmi_handle, + uint8_t *data, struct nan_datapath_indication_event *rsp) +{ + WMI_NDP_INDICATION_EVENTID_param_tlvs *event; + wmi_ndp_indication_event_fixed_param *fixed_params; + size_t total_array_len; + + event = (WMI_NDP_INDICATION_EVENTID_param_tlvs *)data; + fixed_params = + (wmi_ndp_indication_event_fixed_param *)event->fixed_param; + + if (fixed_params->ndp_cfg_len > event->num_ndp_cfg) { + WMI_LOGE("FW message ndp cfg length %d larger than TLV hdr %d", + fixed_params->ndp_cfg_len, event->num_ndp_cfg); + return QDF_STATUS_E_INVAL; + } + + if (fixed_params->ndp_app_info_len > event->num_ndp_app_info) { + WMI_LOGE("FW message ndp app info length %d more than TLV hdr %d", + fixed_params->ndp_app_info_len, + event->num_ndp_app_info); + return QDF_STATUS_E_INVAL; + } + + if (fixed_params->ndp_cfg_len > + (WMI_SVC_MSG_MAX_SIZE - sizeof(*fixed_params))) { + WMI_LOGE("%s: excess wmi buffer: ndp_cfg_len %d", + __func__, fixed_params->ndp_cfg_len); + return QDF_STATUS_E_INVAL; + } + + total_array_len = fixed_params->ndp_cfg_len + + sizeof(*fixed_params); + + if (fixed_params->ndp_app_info_len > + (WMI_SVC_MSG_MAX_SIZE - total_array_len)) { + WMI_LOGE("%s: excess wmi buffer: ndp_cfg_len %d", + __func__, fixed_params->ndp_app_info_len); + return QDF_STATUS_E_INVAL; + } + total_array_len += fixed_params->ndp_app_info_len; + + if (fixed_params->nan_scid_len > + (WMI_SVC_MSG_MAX_SIZE - total_array_len)) { + WMI_LOGE("%s: excess wmi buffer: ndp_cfg_len %d", + __func__, fixed_params->nan_scid_len); + return QDF_STATUS_E_INVAL; + } + + rsp->vdev = + wlan_objmgr_get_vdev_by_id_from_psoc(wmi_handle->soc->wmi_psoc, + fixed_params->vdev_id, + WLAN_NAN_ID); + if (!rsp->vdev) { + WMI_LOGE("vdev is null"); + return QDF_STATUS_E_INVAL; + } + rsp->service_instance_id = fixed_params->service_instance_id; + rsp->ndp_instance_id = fixed_params->ndp_instance_id; + rsp->role = fixed_params->self_ndp_role; + rsp->policy = fixed_params->accept_policy; + + WMI_MAC_ADDR_TO_CHAR_ARRAY(&fixed_params->peer_ndi_mac_addr, + rsp->peer_mac_addr.bytes); + WMI_MAC_ADDR_TO_CHAR_ARRAY(&fixed_params->peer_discovery_mac_addr, + rsp->peer_discovery_mac_addr.bytes); + + WMI_LOGD("WMI_NDP_INDICATION_EVENTID(0x%X) received. vdev %d,\n" + "service_instance %d, ndp_instance %d, role %d, policy %d,\n" + "csid: %d, scid_len: %d, peer_addr: %pM, peer_disc_addr: %pM", + WMI_NDP_INDICATION_EVENTID, fixed_params->vdev_id, + fixed_params->service_instance_id, + fixed_params->ndp_instance_id, fixed_params->self_ndp_role, + fixed_params->accept_policy, + fixed_params->nan_csid, fixed_params->nan_scid_len, + rsp->peer_mac_addr.bytes, + rsp->peer_discovery_mac_addr.bytes); + + WMI_LOGD("ndp_cfg - %d bytes", fixed_params->ndp_cfg_len); + + WMI_LOGD("ndp_app_info - %d bytes", + fixed_params->ndp_app_info_len); + + rsp->ncs_sk_type = fixed_params->nan_csid; + if (event->ndp_cfg) { + rsp->ndp_config.ndp_cfg_len = fixed_params->ndp_cfg_len; + if (rsp->ndp_config.ndp_cfg_len > NDP_QOS_INFO_LEN) + rsp->ndp_config.ndp_cfg_len = NDP_QOS_INFO_LEN; + qdf_mem_copy(rsp->ndp_config.ndp_cfg, event->ndp_cfg, + rsp->ndp_config.ndp_cfg_len); + } + + if (event->ndp_app_info) { + rsp->ndp_info.ndp_app_info_len = fixed_params->ndp_app_info_len; + if (rsp->ndp_info.ndp_app_info_len > NDP_APP_INFO_LEN) + rsp->ndp_info.ndp_app_info_len = NDP_APP_INFO_LEN; + qdf_mem_copy(rsp->ndp_info.ndp_app_info, event->ndp_app_info, + rsp->ndp_info.ndp_app_info_len); + } + + if (event->ndp_scid) { + rsp->scid.scid_len = fixed_params->nan_scid_len; + if (rsp->scid.scid_len > NDP_SCID_BUF_LEN) + rsp->scid.scid_len = NDP_SCID_BUF_LEN; + qdf_mem_copy(rsp->scid.scid, event->ndp_scid, + rsp->scid.scid_len); + } + + if (event->ndp_transport_ip_param && + event->num_ndp_transport_ip_param) { + if (event->ndp_transport_ip_param->ipv6_addr_present) { + rsp->is_ipv6_addr_present = true; + qdf_mem_copy(rsp->ipv6_addr, + event->ndp_transport_ip_param->ipv6_intf_addr, + WMI_NDP_IPV6_INTF_ADDR_LEN); + } + } + WMI_LOGD(FL("IPv6 addr present: %d, addr: %pI6"), + rsp->is_ipv6_addr_present, rsp->ipv6_addr); + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS extract_ndp_confirm_tlv(wmi_unified_t wmi_handle, + uint8_t *data, struct nan_datapath_confirm_event *rsp) +{ + uint8_t i; + WMI_HOST_WLAN_PHY_MODE ch_mode; + WMI_NDP_CONFIRM_EVENTID_param_tlvs *event; + wmi_ndp_confirm_event_fixed_param *fixed_params; + size_t total_array_len; + + event = (WMI_NDP_CONFIRM_EVENTID_param_tlvs *) data; + fixed_params = (wmi_ndp_confirm_event_fixed_param *)event->fixed_param; + WMI_LOGD("WMI_NDP_CONFIRM_EVENTID(0x%X) received. vdev %d, ndp_instance %d, rsp_code %d, reason_code: %d, num_active_ndps_on_peer: %d", + WMI_NDP_CONFIRM_EVENTID, fixed_params->vdev_id, + fixed_params->ndp_instance_id, fixed_params->rsp_code, + fixed_params->reason_code, + fixed_params->num_active_ndps_on_peer); + WMI_LOGE("num_ch: %d", fixed_params->num_ndp_channels); + + if (fixed_params->ndp_cfg_len > event->num_ndp_cfg) { + WMI_LOGE("FW message ndp cfg length %d larger than TLV hdr %d", + fixed_params->ndp_cfg_len, event->num_ndp_cfg); + return QDF_STATUS_E_INVAL; + } + + WMI_LOGD("ndp_cfg - %d bytes", fixed_params->ndp_cfg_len); + + if (fixed_params->ndp_app_info_len > event->num_ndp_app_info) { + WMI_LOGE("FW message ndp app info length %d more than TLV hdr %d", + fixed_params->ndp_app_info_len, + event->num_ndp_app_info); + return QDF_STATUS_E_INVAL; + } + + WMI_LOGD("ndp_app_info - %d bytes", + fixed_params->ndp_app_info_len); + + if (fixed_params->ndp_cfg_len > + (WMI_SVC_MSG_MAX_SIZE - sizeof(*fixed_params))) { + WMI_LOGE("%s: excess wmi buffer: ndp_cfg_len %d", + __func__, fixed_params->ndp_cfg_len); + return QDF_STATUS_E_INVAL; + } + + total_array_len = fixed_params->ndp_cfg_len + + sizeof(*fixed_params); + + if (fixed_params->ndp_app_info_len > + (WMI_SVC_MSG_MAX_SIZE - total_array_len)) { + WMI_LOGE("%s: excess wmi buffer: ndp_cfg_len %d", + __func__, fixed_params->ndp_app_info_len); + return QDF_STATUS_E_INVAL; + } + if (fixed_params->num_ndp_channels > event->num_ndp_channel_list || + fixed_params->num_ndp_channels > event->num_nss_list) { + WMI_LOGE(FL("NDP Ch count %d greater than NDP Ch TLV len (%d) or NSS TLV len (%d)"), + fixed_params->num_ndp_channels, + event->num_ndp_channel_list, + event->num_nss_list); + return QDF_STATUS_E_INVAL; + } + + rsp->vdev = + wlan_objmgr_get_vdev_by_id_from_psoc(wmi_handle->soc->wmi_psoc, + fixed_params->vdev_id, + WLAN_NAN_ID); + if (!rsp->vdev) { + WMI_LOGE("vdev is null"); + return QDF_STATUS_E_INVAL; + } + rsp->ndp_instance_id = fixed_params->ndp_instance_id; + rsp->rsp_code = fixed_params->rsp_code; + rsp->reason_code = fixed_params->reason_code; + rsp->num_active_ndps_on_peer = fixed_params->num_active_ndps_on_peer; + rsp->num_channels = fixed_params->num_ndp_channels; + WMI_MAC_ADDR_TO_CHAR_ARRAY(&fixed_params->peer_ndi_mac_addr, + rsp->peer_ndi_mac_addr.bytes); + rsp->ndp_info.ndp_app_info_len = fixed_params->ndp_app_info_len; + qdf_mem_copy(rsp->ndp_info.ndp_app_info, event->ndp_app_info, + rsp->ndp_info.ndp_app_info_len); + + if (rsp->num_channels > NAN_CH_INFO_MAX_CHANNELS) { + WMI_LOGE(FL("too many channels")); + rsp->num_channels = NAN_CH_INFO_MAX_CHANNELS; + } + + for (i = 0; i < rsp->num_channels; i++) { + rsp->ch[i].channel = event->ndp_channel_list[i].mhz; + rsp->ch[i].nss = event->nss_list[i]; + ch_mode = WMI_GET_CHANNEL_MODE(&event->ndp_channel_list[i]); + rsp->ch[i].ch_width = wmi_get_ch_width_from_phy_mode(wmi_handle, + ch_mode); + WMI_LOGD(FL("ch: %d, ch_mode: %d, nss: %d"), + rsp->ch[i].channel, + rsp->ch[i].ch_width, + rsp->ch[i].nss); + } + + if (event->ndp_transport_ip_param && + event->num_ndp_transport_ip_param) { + if (event->ndp_transport_ip_param->ipv6_addr_present) { + rsp->is_ipv6_addr_present = true; + qdf_mem_copy(rsp->ipv6_addr, + event->ndp_transport_ip_param->ipv6_intf_addr, + WMI_NDP_IPV6_INTF_ADDR_LEN); + } + + if (event->ndp_transport_ip_param->trans_port_present) { + rsp->is_port_present = true; + rsp->port = + event->ndp_transport_ip_param->transport_port; + } + + if (event->ndp_transport_ip_param->trans_proto_present) { + rsp->is_protocol_present = true; + rsp->protocol = + event->ndp_transport_ip_param->transport_protocol; + } + } + WMI_LOGD(FL("IPv6 addr present: %d, addr: %pI6"), + rsp->is_ipv6_addr_present, rsp->ipv6_addr); + WMI_LOGD(FL("port: %d present: %d"), rsp->port, rsp->is_port_present); + WMI_LOGD(FL("protocol: %d present: %d"), + rsp->protocol, rsp->is_protocol_present); + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS extract_ndp_responder_rsp_tlv(wmi_unified_t wmi_handle, + uint8_t *data, struct nan_datapath_responder_rsp *rsp) +{ + WMI_NDP_RESPONDER_RSP_EVENTID_param_tlvs *event; + wmi_ndp_responder_rsp_event_fixed_param *fixed_params; + + event = (WMI_NDP_RESPONDER_RSP_EVENTID_param_tlvs *)data; + fixed_params = event->fixed_param; + + WMI_LOGD("WMI_NDP_RESPONDER_RSP_EVENTID(0x%X) received. vdev_id: %d, peer_mac_addr: %pM,transaction_id: %d, status_code %d, reason_code: %d, create_peer: %d", + WMI_NDP_RESPONDER_RSP_EVENTID, fixed_params->vdev_id, + rsp->peer_mac_addr.bytes, rsp->transaction_id, + rsp->status, rsp->reason, rsp->create_peer); + + rsp->vdev = + wlan_objmgr_get_vdev_by_id_from_psoc(wmi_handle->soc->wmi_psoc, + fixed_params->vdev_id, + WLAN_NAN_ID); + if (!rsp->vdev) { + WMI_LOGE("vdev is null"); + return QDF_STATUS_E_INVAL; + } + rsp->transaction_id = fixed_params->transaction_id; + rsp->reason = fixed_params->reason_code; + rsp->status = fixed_params->rsp_status; + rsp->create_peer = fixed_params->create_peer; + WMI_MAC_ADDR_TO_CHAR_ARRAY(&fixed_params->peer_ndi_mac_addr, + rsp->peer_mac_addr.bytes); + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS extract_ndp_end_rsp_tlv(wmi_unified_t wmi_handle, + uint8_t *data, struct nan_datapath_end_rsp_event *rsp) +{ + WMI_NDP_END_RSP_EVENTID_param_tlvs *event; + wmi_ndp_end_rsp_event_fixed_param *fixed_params = NULL; + + event = (WMI_NDP_END_RSP_EVENTID_param_tlvs *) data; + fixed_params = (wmi_ndp_end_rsp_event_fixed_param *)event->fixed_param; + WMI_LOGD("WMI_NDP_END_RSP_EVENTID(0x%X) received. transaction_id: %d, rsp_status: %d, reason_code: %d", + WMI_NDP_END_RSP_EVENTID, fixed_params->transaction_id, + fixed_params->rsp_status, fixed_params->reason_code); + + rsp->vdev = wlan_objmgr_get_vdev_by_opmode_from_psoc( + wmi_handle->soc->wmi_psoc, QDF_NDI_MODE, WLAN_NAN_ID); + if (!rsp->vdev) { + WMI_LOGE("vdev is null"); + return QDF_STATUS_E_INVAL; + } + rsp->transaction_id = fixed_params->transaction_id; + rsp->reason = fixed_params->reason_code; + rsp->status = fixed_params->rsp_status; + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS extract_ndp_end_ind_tlv(wmi_unified_t wmi_handle, + uint8_t *data, struct nan_datapath_end_indication_event **rsp) +{ + uint32_t i, buf_size; + wmi_ndp_end_indication *ind; + struct qdf_mac_addr peer_addr; + WMI_NDP_END_INDICATION_EVENTID_param_tlvs *event; + + event = (WMI_NDP_END_INDICATION_EVENTID_param_tlvs *) data; + ind = event->ndp_end_indication_list; + + if (event->num_ndp_end_indication_list == 0) { + WMI_LOGE("Error: Event ignored, 0 ndp instances"); + return QDF_STATUS_E_INVAL; + } + + WMI_LOGD("number of ndp instances = %d", + event->num_ndp_end_indication_list); + + if (event->num_ndp_end_indication_list > ((UINT_MAX - sizeof(**rsp))/ + sizeof((*rsp)->ndp_map[0]))) { + WMI_LOGE("num_ndp_end_ind_list %d too large", + event->num_ndp_end_indication_list); + return QDF_STATUS_E_INVAL; + } + + buf_size = sizeof(**rsp) + event->num_ndp_end_indication_list * + sizeof((*rsp)->ndp_map[0]); + *rsp = qdf_mem_malloc(buf_size); + if (!(*rsp)) { + WMI_LOGE("Failed to allocate memory"); + return QDF_STATUS_E_NOMEM; + } + + (*rsp)->num_ndp_ids = event->num_ndp_end_indication_list; + for (i = 0; i < (*rsp)->num_ndp_ids; i++) { + WMI_MAC_ADDR_TO_CHAR_ARRAY(&ind[i].peer_ndi_mac_addr, + peer_addr.bytes); + WMI_LOGD("ind[%d]: type %d, reason_code %d, instance_id %d num_active %d ", + i, ind[i].type, ind[i].reason_code, + ind[i].ndp_instance_id, + ind[i].num_active_ndps_on_peer); + /* Add each instance entry to the list */ + (*rsp)->ndp_map[i].ndp_instance_id = ind[i].ndp_instance_id; + (*rsp)->ndp_map[i].vdev_id = ind[i].vdev_id; + WMI_MAC_ADDR_TO_CHAR_ARRAY(&ind[i].peer_ndi_mac_addr, + (*rsp)->ndp_map[i].peer_ndi_mac_addr.bytes); + (*rsp)->ndp_map[i].num_active_ndp_sessions = + ind[i].num_active_ndps_on_peer; + (*rsp)->ndp_map[i].type = ind[i].type; + (*rsp)->ndp_map[i].reason_code = ind[i].reason_code; + } + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS extract_ndp_sch_update_tlv(wmi_unified_t wmi_handle, + uint8_t *data, struct nan_datapath_sch_update_event *ind) +{ + uint8_t i; + WMI_HOST_WLAN_PHY_MODE ch_mode; + WMI_NDL_SCHEDULE_UPDATE_EVENTID_param_tlvs *event; + wmi_ndl_schedule_update_fixed_param *fixed_params; + + event = (WMI_NDL_SCHEDULE_UPDATE_EVENTID_param_tlvs *)data; + fixed_params = event->fixed_param; + + WMI_LOGD(FL("flags: %d, num_ch: %d, num_ndp_instances: %d"), + fixed_params->flags, fixed_params->num_channels, + fixed_params->num_ndp_instances); + + if (fixed_params->num_channels > event->num_ndl_channel_list || + fixed_params->num_channels > event->num_nss_list) { + WMI_LOGE(FL("Channel count %d greater than NDP Ch list TLV len (%d) or NSS list TLV len (%d)"), + fixed_params->num_channels, + event->num_ndl_channel_list, + event->num_nss_list); + return QDF_STATUS_E_INVAL; + } + if (fixed_params->num_ndp_instances > event->num_ndp_instance_list) { + WMI_LOGE(FL("NDP Instance count %d greater than NDP Instancei TLV len %d"), + fixed_params->num_ndp_instances, + event->num_ndp_instance_list); + return QDF_STATUS_E_INVAL; + } + + ind->vdev = + wlan_objmgr_get_vdev_by_id_from_psoc(wmi_handle->soc->wmi_psoc, + fixed_params->vdev_id, + WLAN_NAN_ID); + if (!ind->vdev) { + WMI_LOGE("vdev is null"); + return QDF_STATUS_E_INVAL; + } + + ind->flags = fixed_params->flags; + ind->num_channels = fixed_params->num_channels; + ind->num_ndp_instances = fixed_params->num_ndp_instances; + WMI_MAC_ADDR_TO_CHAR_ARRAY(&fixed_params->peer_macaddr, + ind->peer_addr.bytes); + + if (ind->num_ndp_instances > NDP_NUM_INSTANCE_ID) { + WMI_LOGE(FL("uint32 overflow")); + wlan_objmgr_vdev_release_ref(ind->vdev, WLAN_NAN_ID); + return QDF_STATUS_E_INVAL; + } + + qdf_mem_copy(ind->ndp_instances, event->ndp_instance_list, + sizeof(uint32_t) * ind->num_ndp_instances); + + if (ind->num_channels > NAN_CH_INFO_MAX_CHANNELS) { + WMI_LOGE(FL("too many channels")); + ind->num_channels = NAN_CH_INFO_MAX_CHANNELS; + } + + for (i = 0; i < ind->num_channels; i++) { + ind->ch[i].channel = event->ndl_channel_list[i].mhz; + ind->ch[i].nss = event->nss_list[i]; + ch_mode = WMI_GET_CHANNEL_MODE(&event->ndl_channel_list[i]); + ind->ch[i].ch_width = wmi_get_ch_width_from_phy_mode(wmi_handle, + ch_mode); + WMI_LOGD(FL("ch: %d, ch_mode: %d, nss: %d"), + ind->ch[i].channel, + ind->ch[i].ch_width, + ind->ch[i].nss); + } + + for (i = 0; i < fixed_params->num_ndp_instances; i++) + WMI_LOGD(FL("instance_id[%d]: %d"), + i, event->ndp_instance_list[i]); + + return QDF_STATUS_SUCCESS; +} + +#endif + +#ifdef QCA_SUPPORT_CP_STATS +/** + * extract_cca_stats_tlv - api to extract congestion stats from event buffer + * @wmi_handle: wma handle + * @evt_buf: event buffer + * @out_buff: buffer to populated after stats extraction + * + * Return: status of operation + */ +static QDF_STATUS extract_cca_stats_tlv(wmi_unified_t wmi_handle, + void *evt_buf, struct wmi_host_congestion_stats *out_buff) +{ + WMI_UPDATE_STATS_EVENTID_param_tlvs *param_buf; + wmi_congestion_stats *congestion_stats; + + param_buf = (WMI_UPDATE_STATS_EVENTID_param_tlvs *)evt_buf; + congestion_stats = param_buf->congestion_stats; + if (!congestion_stats) { + WMI_LOGD("%s: no cca stats in event buffer", __func__); + return QDF_STATUS_E_INVAL; + } + + out_buff->vdev_id = congestion_stats->vdev_id; + out_buff->congestion = congestion_stats->congestion; + + WMI_LOGD("%s: cca stats event processed", __func__); + return QDF_STATUS_SUCCESS; +} +#endif /* QCA_SUPPORT_CP_STATS */ + +/** + * save_service_bitmap_tlv() - save service bitmap + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param bitmap_buf: bitmap buffer, for converged legacy support + * + * Return: QDF_STATUS + */ +static +QDF_STATUS save_service_bitmap_tlv(wmi_unified_t wmi_handle, void *evt_buf, + void *bitmap_buf) +{ + WMI_SERVICE_READY_EVENTID_param_tlvs *param_buf; + struct wmi_soc *soc = wmi_handle->soc; + + param_buf = (WMI_SERVICE_READY_EVENTID_param_tlvs *) evt_buf; + + /* If it is already allocated, use that buffer. This can happen + * during target stop/start scenarios where host allocation is skipped. + */ + if (!soc->wmi_service_bitmap) { + soc->wmi_service_bitmap = + qdf_mem_malloc(WMI_SERVICE_BM_SIZE * sizeof(uint32_t)); + if (!soc->wmi_service_bitmap) { + WMI_LOGE("Failed memory allocation for service bitmap"); + return QDF_STATUS_E_NOMEM; + } + } + + qdf_mem_copy(soc->wmi_service_bitmap, + param_buf->wmi_service_bitmap, + (WMI_SERVICE_BM_SIZE * sizeof(uint32_t))); + + if (bitmap_buf) + qdf_mem_copy(bitmap_buf, + param_buf->wmi_service_bitmap, + (WMI_SERVICE_BM_SIZE * sizeof(uint32_t))); + + return QDF_STATUS_SUCCESS; +} + +/** + * save_ext_service_bitmap_tlv() - save extendend service bitmap + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param bitmap_buf: bitmap buffer, for converged legacy support + * + * Return: QDF_STATUS + */ +static +QDF_STATUS save_ext_service_bitmap_tlv(wmi_unified_t wmi_handle, void *evt_buf, + void *bitmap_buf) +{ + WMI_SERVICE_AVAILABLE_EVENTID_param_tlvs *param_buf; + wmi_service_available_event_fixed_param *ev; + struct wmi_soc *soc = wmi_handle->soc; + + param_buf = (WMI_SERVICE_AVAILABLE_EVENTID_param_tlvs *) evt_buf; + + ev = param_buf->fixed_param; + + /* If it is already allocated, use that buffer. This can happen + * during target stop/start scenarios where host allocation is skipped. + */ + if (!soc->wmi_ext_service_bitmap) { + soc->wmi_ext_service_bitmap = qdf_mem_malloc( + WMI_SERVICE_SEGMENT_BM_SIZE32 * sizeof(uint32_t)); + if (!soc->wmi_ext_service_bitmap) { + WMI_LOGE("Failed memory allocation for service bitmap"); + return QDF_STATUS_E_NOMEM; + } + } + + qdf_mem_copy(soc->wmi_ext_service_bitmap, + ev->wmi_service_segment_bitmap, + (WMI_SERVICE_SEGMENT_BM_SIZE32 * sizeof(uint32_t))); + + WMI_LOGD("wmi_ext_service_bitmap 0:0x%x, 1:0x%x, 2:0x%x, 3:0x%x\n", + soc->wmi_ext_service_bitmap[0], soc->wmi_ext_service_bitmap[1], + soc->wmi_ext_service_bitmap[2], soc->wmi_ext_service_bitmap[3]); + + if (bitmap_buf) + qdf_mem_copy(bitmap_buf, + soc->wmi_ext_service_bitmap, + (WMI_SERVICE_SEGMENT_BM_SIZE32 * sizeof(uint32_t))); + + return QDF_STATUS_SUCCESS; +} +/** + * is_service_enabled_tlv() - Check if service enabled + * @param wmi_handle: wmi handle + * @param service_id: service identifier + * + * Return: 1 enabled, 0 disabled + */ +static bool is_service_enabled_tlv(wmi_unified_t wmi_handle, + uint32_t service_id) +{ + struct wmi_soc *soc = wmi_handle->soc; + + if (!soc->wmi_service_bitmap) { + WMI_LOGE("WMI service bit map is not saved yet\n"); + return false; + } + + /* if wmi_service_enabled was received with extended bitmap, + * use WMI_SERVICE_EXT_IS_ENABLED to check the services. + */ + if (soc->wmi_ext_service_bitmap) + return WMI_SERVICE_EXT_IS_ENABLED(soc->wmi_service_bitmap, + soc->wmi_ext_service_bitmap, + service_id); + + if (service_id >= WMI_MAX_SERVICE) { + WMI_LOGE("Service id %d but WMI ext service bitmap is NULL", + service_id); + return false; + } + + return WMI_SERVICE_IS_ENABLED(soc->wmi_service_bitmap, + service_id); +} + +static inline void copy_ht_cap_info(uint32_t ev_target_cap, + struct wlan_psoc_target_capability_info *cap) +{ + /* except LDPC all flags are common betwen legacy and here + * also IBFEER is not defined for TLV + */ + cap->ht_cap_info |= ev_target_cap & ( + WMI_HT_CAP_ENABLED + | WMI_HT_CAP_HT20_SGI + | WMI_HT_CAP_DYNAMIC_SMPS + | WMI_HT_CAP_TX_STBC + | WMI_HT_CAP_TX_STBC_MASK_SHIFT + | WMI_HT_CAP_RX_STBC + | WMI_HT_CAP_RX_STBC_MASK_SHIFT + | WMI_HT_CAP_LDPC + | WMI_HT_CAP_L_SIG_TXOP_PROT + | WMI_HT_CAP_MPDU_DENSITY + | WMI_HT_CAP_MPDU_DENSITY_MASK_SHIFT + | WMI_HT_CAP_HT40_SGI); + if (ev_target_cap & WMI_HT_CAP_LDPC) + cap->ht_cap_info |= WMI_HOST_HT_CAP_RX_LDPC | + WMI_HOST_HT_CAP_TX_LDPC; +} +/** + * extract_service_ready_tlv() - extract service ready event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to received event buffer + * @param cap: pointer to hold target capability information extracted from even + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS extract_service_ready_tlv(wmi_unified_t wmi_handle, + void *evt_buf, struct wlan_psoc_target_capability_info *cap) +{ + WMI_SERVICE_READY_EVENTID_param_tlvs *param_buf; + wmi_service_ready_event_fixed_param *ev; + + + param_buf = (WMI_SERVICE_READY_EVENTID_param_tlvs *) evt_buf; + + ev = (wmi_service_ready_event_fixed_param *) param_buf->fixed_param; + if (!ev) { + qdf_print("%s: wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_FAILURE; + } + + cap->phy_capability = ev->phy_capability; + cap->max_frag_entry = ev->max_frag_entry; + cap->num_rf_chains = ev->num_rf_chains; + copy_ht_cap_info(ev->ht_cap_info, cap); + cap->vht_cap_info = ev->vht_cap_info; + cap->vht_supp_mcs = ev->vht_supp_mcs; + cap->hw_min_tx_power = ev->hw_min_tx_power; + cap->hw_max_tx_power = ev->hw_max_tx_power; + cap->sys_cap_info = ev->sys_cap_info; + cap->min_pkt_size_enable = ev->min_pkt_size_enable; + cap->max_bcn_ie_size = ev->max_bcn_ie_size; + cap->max_num_scan_channels = ev->max_num_scan_channels; + cap->max_supported_macs = ev->max_supported_macs; + cap->wmi_fw_sub_feat_caps = ev->wmi_fw_sub_feat_caps; + cap->txrx_chainmask = ev->txrx_chainmask; + cap->default_dbs_hw_mode_index = ev->default_dbs_hw_mode_index; + cap->num_msdu_desc = ev->num_msdu_desc; + cap->fw_version = ev->fw_build_vers; + /* fw_version_1 is not available in TLV. */ + cap->fw_version_1 = 0; + + return QDF_STATUS_SUCCESS; +} + +/* convert_wireless_modes_tlv() - Convert REGDMN_MODE values sent by target + * to host internal WMI_HOST_REGDMN_MODE values. + * REGULATORY TODO : REGDMN_MODE_11AC_VHT*_2G values are not used by the + * host currently. Add this in the future if required. + * 11AX (Phase II) : 11ax related values are not currently + * advertised separately by FW. As part of phase II regulatory bring-up, + * finalize the advertisement mechanism. + * @target_wireless_mode: target wireless mode received in message + * + * Return: returns the host internal wireless mode. + */ +static inline uint32_t convert_wireless_modes_tlv(uint32_t target_wireless_mode) +{ + + uint32_t wireless_modes = 0; + + if (target_wireless_mode & REGDMN_MODE_11A) + wireless_modes |= WMI_HOST_REGDMN_MODE_11A; + + if (target_wireless_mode & REGDMN_MODE_TURBO) + wireless_modes |= WMI_HOST_REGDMN_MODE_TURBO; + + if (target_wireless_mode & REGDMN_MODE_11B) + wireless_modes |= WMI_HOST_REGDMN_MODE_11B; + + if (target_wireless_mode & REGDMN_MODE_PUREG) + wireless_modes |= WMI_HOST_REGDMN_MODE_PUREG; + + if (target_wireless_mode & REGDMN_MODE_11G) + wireless_modes |= WMI_HOST_REGDMN_MODE_11G; + + if (target_wireless_mode & REGDMN_MODE_108G) + wireless_modes |= WMI_HOST_REGDMN_MODE_108G; + + if (target_wireless_mode & REGDMN_MODE_108A) + wireless_modes |= WMI_HOST_REGDMN_MODE_108A; + + if (target_wireless_mode & REGDMN_MODE_XR) + wireless_modes |= WMI_HOST_REGDMN_MODE_XR; + + if (target_wireless_mode & REGDMN_MODE_11A_HALF_RATE) + wireless_modes |= WMI_HOST_REGDMN_MODE_11A_HALF_RATE; + + if (target_wireless_mode & REGDMN_MODE_11A_QUARTER_RATE) + wireless_modes |= WMI_HOST_REGDMN_MODE_11A_QUARTER_RATE; + + if (target_wireless_mode & REGDMN_MODE_11NG_HT20) + wireless_modes |= WMI_HOST_REGDMN_MODE_11NG_HT20; + + if (target_wireless_mode & REGDMN_MODE_11NA_HT20) + wireless_modes |= WMI_HOST_REGDMN_MODE_11NA_HT20; + + if (target_wireless_mode & REGDMN_MODE_11NG_HT40PLUS) + wireless_modes |= WMI_HOST_REGDMN_MODE_11NG_HT40PLUS; + + if (target_wireless_mode & REGDMN_MODE_11NG_HT40MINUS) + wireless_modes |= WMI_HOST_REGDMN_MODE_11NG_HT40MINUS; + + if (target_wireless_mode & REGDMN_MODE_11NA_HT40PLUS) + wireless_modes |= WMI_HOST_REGDMN_MODE_11NA_HT40PLUS; + + if (target_wireless_mode & REGDMN_MODE_11NA_HT40MINUS) + wireless_modes |= WMI_HOST_REGDMN_MODE_11NA_HT40MINUS; + + if (target_wireless_mode & REGDMN_MODE_11AC_VHT20) + wireless_modes |= WMI_HOST_REGDMN_MODE_11AC_VHT20; + + if (target_wireless_mode & REGDMN_MODE_11AC_VHT40PLUS) + wireless_modes |= WMI_HOST_REGDMN_MODE_11AC_VHT40PLUS; + + if (target_wireless_mode & REGDMN_MODE_11AC_VHT40MINUS) + wireless_modes |= WMI_HOST_REGDMN_MODE_11AC_VHT40MINUS; + + if (target_wireless_mode & REGDMN_MODE_11AC_VHT80) + wireless_modes |= WMI_HOST_REGDMN_MODE_11AC_VHT80; + + if (target_wireless_mode & REGDMN_MODE_11AC_VHT160) + wireless_modes |= WMI_HOST_REGDMN_MODE_11AC_VHT160; + + if (target_wireless_mode & REGDMN_MODE_11AC_VHT80_80) + wireless_modes |= WMI_HOST_REGDMN_MODE_11AC_VHT80_80; + + return wireless_modes; +} + +/** + * extract_hal_reg_cap_tlv() - extract HAL registered capabilities + * @wmi_handle: wmi handle + * @param evt_buf: Pointer to event buffer + * @param cap: pointer to hold HAL reg capabilities + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS extract_hal_reg_cap_tlv(wmi_unified_t wmi_handle, + void *evt_buf, struct wlan_psoc_hal_reg_capability *cap) +{ + WMI_SERVICE_READY_EVENTID_param_tlvs *param_buf; + + param_buf = (WMI_SERVICE_READY_EVENTID_param_tlvs *) evt_buf; + + qdf_mem_copy(cap, (((uint8_t *)param_buf->hal_reg_capabilities) + + sizeof(uint32_t)), + sizeof(struct wlan_psoc_hal_reg_capability)); + + cap->wireless_modes = convert_wireless_modes_tlv( + param_buf->hal_reg_capabilities->wireless_modes); + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_host_mem_req_tlv() - Extract host memory request event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param num_entries: pointer to hold number of entries requested + * + * Return: Number of entries requested + */ +static host_mem_req *extract_host_mem_req_tlv(wmi_unified_t wmi_handle, + void *evt_buf, uint8_t *num_entries) +{ + WMI_SERVICE_READY_EVENTID_param_tlvs *param_buf; + wmi_service_ready_event_fixed_param *ev; + + param_buf = (WMI_SERVICE_READY_EVENTID_param_tlvs *) evt_buf; + + ev = (wmi_service_ready_event_fixed_param *) param_buf->fixed_param; + if (!ev) { + qdf_print("%s: wmi_buf_alloc failed\n", __func__); + return NULL; + } + + if (ev->num_mem_reqs > param_buf->num_mem_reqs) { + WMI_LOGE("Invalid num_mem_reqs %d:%d", + ev->num_mem_reqs, param_buf->num_mem_reqs); + return NULL; + } + + *num_entries = ev->num_mem_reqs; + + return (host_mem_req *)param_buf->mem_reqs; +} + +/** + * save_fw_version_in_service_ready_tlv() - Save fw version in service + * ready function + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS +save_fw_version_in_service_ready_tlv(wmi_unified_t wmi_handle, void *evt_buf) +{ + WMI_SERVICE_READY_EVENTID_param_tlvs *param_buf; + wmi_service_ready_event_fixed_param *ev; + + + param_buf = (WMI_SERVICE_READY_EVENTID_param_tlvs *) evt_buf; + + ev = (wmi_service_ready_event_fixed_param *) param_buf->fixed_param; + if (!ev) { + qdf_print("%s: wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_FAILURE; + } + + /*Save fw version from service ready message */ + /*This will be used while sending INIT message */ + qdf_mem_copy(&wmi_handle->fw_abi_version, &ev->fw_abi_vers, + sizeof(wmi_handle->fw_abi_version)); + + return QDF_STATUS_SUCCESS; +} + +/** + * ready_extract_init_status_tlv() - Extract init status from ready event + * @wmi_handle: wmi handle + * @param evt_buf: Pointer to event buffer + * + * Return: ready status + */ +static uint32_t ready_extract_init_status_tlv(wmi_unified_t wmi_handle, + void *evt_buf) +{ + WMI_READY_EVENTID_param_tlvs *param_buf = NULL; + wmi_ready_event_fixed_param *ev = NULL; + + param_buf = (WMI_READY_EVENTID_param_tlvs *) evt_buf; + ev = param_buf->fixed_param; + + qdf_print("%s:%d\n", __func__, ev->status); + + return ev->status; +} + +/** + * ready_extract_mac_addr_tlv() - extract mac address from ready event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param macaddr: Pointer to hold MAC address + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS ready_extract_mac_addr_tlv(wmi_unified_t wmi_hamdle, + void *evt_buf, uint8_t *macaddr) +{ + WMI_READY_EVENTID_param_tlvs *param_buf = NULL; + wmi_ready_event_fixed_param *ev = NULL; + + + param_buf = (WMI_READY_EVENTID_param_tlvs *) evt_buf; + ev = param_buf->fixed_param; + + WMI_MAC_ADDR_TO_CHAR_ARRAY(&ev->mac_addr, macaddr); + + return QDF_STATUS_SUCCESS; +} + +/** + * ready_extract_mac_addr_list_tlv() - extract MAC address list from ready event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param macaddr: Pointer to hold number of MAC addresses + * + * Return: Pointer to addr list + */ +static wmi_host_mac_addr *ready_extract_mac_addr_list_tlv(wmi_unified_t wmi_hamdle, + void *evt_buf, uint8_t *num_mac) +{ + WMI_READY_EVENTID_param_tlvs *param_buf = NULL; + wmi_ready_event_fixed_param *ev = NULL; + + param_buf = (WMI_READY_EVENTID_param_tlvs *) evt_buf; + ev = param_buf->fixed_param; + + *num_mac = ev->num_extra_mac_addr; + + return (wmi_host_mac_addr *) param_buf->mac_addr_list; +} + +/** + * extract_ready_params_tlv() - Extract data from ready event apart from + * status, macaddr and version. + * @wmi_handle: Pointer to WMI handle. + * @evt_buf: Pointer to Ready event buffer. + * @ev_param: Pointer to host defined struct to copy the data from event. + * + * Return: QDF_STATUS_SUCCESS on success. + */ +static QDF_STATUS extract_ready_event_params_tlv(wmi_unified_t wmi_handle, + void *evt_buf, struct wmi_host_ready_ev_param *ev_param) +{ + WMI_READY_EVENTID_param_tlvs *param_buf = NULL; + wmi_ready_event_fixed_param *ev = NULL; + + param_buf = (WMI_READY_EVENTID_param_tlvs *) evt_buf; + ev = param_buf->fixed_param; + + ev_param->status = ev->status; + ev_param->num_dscp_table = ev->num_dscp_table; + ev_param->num_extra_mac_addr = ev->num_extra_mac_addr; + ev_param->num_total_peer = ev->num_total_peers; + ev_param->num_extra_peer = ev->num_extra_peers; + /* Agile_cap in ready event is not supported in TLV target */ + ev_param->agile_capability = false; + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_dbglog_data_len_tlv() - extract debuglog data length + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * + * Return: length + */ +static uint8_t *extract_dbglog_data_len_tlv(wmi_unified_t wmi_handle, + void *evt_buf, uint32_t *len) +{ + WMI_DEBUG_MESG_EVENTID_param_tlvs *param_buf; + + param_buf = (WMI_DEBUG_MESG_EVENTID_param_tlvs *) evt_buf; + + *len = param_buf->num_bufp; + + return param_buf->bufp; +} + +/** + * extract_vdev_start_resp_tlv() - extract vdev start response + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param vdev_rsp: Pointer to hold vdev response + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS extract_vdev_start_resp_tlv(wmi_unified_t wmi_handle, + void *evt_buf, wmi_host_vdev_start_resp *vdev_rsp) +{ + WMI_VDEV_START_RESP_EVENTID_param_tlvs *param_buf; + wmi_vdev_start_response_event_fixed_param *ev; + + param_buf = (WMI_VDEV_START_RESP_EVENTID_param_tlvs *) evt_buf; + if (!param_buf) { + qdf_print("Invalid start response event buffer\n"); + return QDF_STATUS_E_INVAL; + } + + ev = param_buf->fixed_param; + if (!ev) { + qdf_print("Invalid start response event buffer\n"); + return QDF_STATUS_E_INVAL; + } + + qdf_mem_zero(vdev_rsp, sizeof(*vdev_rsp)); + + vdev_rsp->vdev_id = ev->vdev_id; + vdev_rsp->requestor_id = ev->requestor_id; + switch (ev->resp_type) { + case WMI_VDEV_START_RESP_EVENT: + vdev_rsp->resp_type = WMI_HOST_VDEV_START_RESP_EVENT; + break; + case WMI_VDEV_RESTART_RESP_EVENT: + vdev_rsp->resp_type = WMI_HOST_VDEV_RESTART_RESP_EVENT; + break; + default: + qdf_print("Invalid start response event buffer\n"); + break; + }; + vdev_rsp->status = ev->status; + vdev_rsp->chain_mask = ev->chain_mask; + vdev_rsp->smps_mode = ev->smps_mode; + vdev_rsp->mac_id = ev->mac_id; + vdev_rsp->cfgd_tx_streams = ev->cfgd_tx_streams; + vdev_rsp->cfgd_rx_streams = ev->cfgd_rx_streams; + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_vdev_delete_resp_tlv() - extract vdev delete response + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param delete_rsp: Pointer to hold vdev delete response + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS extract_vdev_delete_resp_tlv(wmi_unified_t wmi_handle, + void *evt_buf, struct wmi_host_vdev_delete_resp *delete_rsp) +{ + WMI_VDEV_DELETE_RESP_EVENTID_param_tlvs *param_buf; + wmi_vdev_delete_resp_event_fixed_param *ev; + + param_buf = (WMI_VDEV_DELETE_RESP_EVENTID_param_tlvs *) evt_buf; + if (!param_buf) { + WMI_LOGE("Invalid vdev delete response event buffer\n"); + return QDF_STATUS_E_INVAL; + } + + ev = param_buf->fixed_param; + if (!ev) { + WMI_LOGE("Invalid vdev delete response event\n"); + return QDF_STATUS_E_INVAL; + } + + qdf_mem_zero(delete_rsp, sizeof(*delete_rsp)); + delete_rsp->vdev_id = ev->vdev_id; + + return QDF_STATUS_SUCCESS; +} + + +/** + * extract_tbttoffset_num_vdevs_tlv() - extract tbtt offset num vdev + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param num_vdevs: Pointer to hold num vdev + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS extract_tbttoffset_num_vdevs_tlv(void *wmi_hdl, + void *evt_buf, uint32_t *num_vdevs) +{ + WMI_TBTTOFFSET_UPDATE_EVENTID_param_tlvs *param_buf; + wmi_tbtt_offset_event_fixed_param *tbtt_offset_event; + uint32_t vdev_map; + + param_buf = (WMI_TBTTOFFSET_UPDATE_EVENTID_param_tlvs *)evt_buf; + if (!param_buf) { + qdf_print("Invalid tbtt update ext event buffer\n"); + return QDF_STATUS_E_INVAL; + } + tbtt_offset_event = param_buf->fixed_param; + vdev_map = tbtt_offset_event->vdev_map; + *num_vdevs = wmi_vdev_map_to_num_vdevs(vdev_map); + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_ext_tbttoffset_num_vdevs_tlv() - extract ext tbtt offset num vdev + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param num_vdevs: Pointer to hold num vdev + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS extract_ext_tbttoffset_num_vdevs_tlv(void *wmi_hdl, + void *evt_buf, uint32_t *num_vdevs) +{ + WMI_TBTTOFFSET_EXT_UPDATE_EVENTID_param_tlvs *param_buf; + wmi_tbtt_offset_ext_event_fixed_param *tbtt_offset_ext_event; + + param_buf = (WMI_TBTTOFFSET_EXT_UPDATE_EVENTID_param_tlvs *)evt_buf; + if (!param_buf) { + qdf_print("Invalid tbtt update ext event buffer\n"); + return QDF_STATUS_E_INVAL; + } + tbtt_offset_ext_event = param_buf->fixed_param; + + *num_vdevs = tbtt_offset_ext_event->num_vdevs; + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_tbttoffset_update_params_tlv() - extract tbtt offset param + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param idx: Index referring to a vdev + * @param tbtt_param: Pointer to tbttoffset event param + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS extract_tbttoffset_update_params_tlv(void *wmi_hdl, + void *evt_buf, uint8_t idx, + struct tbttoffset_params *tbtt_param) +{ + WMI_TBTTOFFSET_UPDATE_EVENTID_param_tlvs *param_buf; + wmi_tbtt_offset_event_fixed_param *tbtt_offset_event; + uint32_t vdev_map; + + param_buf = (WMI_TBTTOFFSET_UPDATE_EVENTID_param_tlvs *) evt_buf; + if (!param_buf) { + qdf_print("Invalid tbtt update event buffer\n"); + return QDF_STATUS_E_INVAL; + } + + tbtt_offset_event = param_buf->fixed_param; + vdev_map = tbtt_offset_event->vdev_map; + tbtt_param->vdev_id = wmi_vdev_map_to_vdev_id(vdev_map, idx); + if (tbtt_param->vdev_id == WLAN_INVALID_VDEV_ID) + return QDF_STATUS_E_INVAL; + tbtt_param->tbttoffset = + param_buf->tbttoffset_list[tbtt_param->vdev_id]; + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_ext_tbttoffset_update_params_tlv() - extract ext tbtt offset param + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param idx: Index referring to a vdev + * @param tbtt_param: Pointer to tbttoffset event param + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS extract_ext_tbttoffset_update_params_tlv(void *wmi_hdl, + void *evt_buf, uint8_t idx, + struct tbttoffset_params *tbtt_param) +{ + WMI_TBTTOFFSET_EXT_UPDATE_EVENTID_param_tlvs *param_buf; + wmi_tbtt_offset_info *tbtt_offset_info; + + param_buf = (WMI_TBTTOFFSET_EXT_UPDATE_EVENTID_param_tlvs *)evt_buf; + if (!param_buf) { + qdf_print("Invalid tbtt update event buffer\n"); + return QDF_STATUS_E_INVAL; + } + tbtt_offset_info = ¶m_buf->tbtt_offset_info[idx]; + + tbtt_param->vdev_id = tbtt_offset_info->vdev_id; + tbtt_param->tbttoffset = tbtt_offset_info->tbttoffset; + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_mgmt_rx_params_tlv() - extract management rx params from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param hdr: Pointer to hold header + * @param bufp: Pointer to hold pointer to rx param buffer + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS extract_mgmt_rx_params_tlv(wmi_unified_t wmi_handle, + void *evt_buf, struct mgmt_rx_event_params *hdr, + uint8_t **bufp) +{ + WMI_MGMT_RX_EVENTID_param_tlvs *param_tlvs = NULL; + wmi_mgmt_rx_hdr *ev_hdr = NULL; + int i; + + param_tlvs = (WMI_MGMT_RX_EVENTID_param_tlvs *) evt_buf; + if (!param_tlvs) { + WMI_LOGE("Get NULL point message from FW"); + return QDF_STATUS_E_INVAL; + } + + ev_hdr = param_tlvs->hdr; + if (!hdr) { + WMI_LOGE("Rx event is NULL"); + return QDF_STATUS_E_INVAL; + } + + hdr->pdev_id = wmi_handle->ops->convert_pdev_id_target_to_host( + ev_hdr->pdev_id); + + hdr->channel = ev_hdr->channel; + hdr->snr = ev_hdr->snr; + hdr->rate = ev_hdr->rate; + hdr->phy_mode = ev_hdr->phy_mode; + hdr->buf_len = ev_hdr->buf_len; + hdr->status = ev_hdr->status; + hdr->flags = ev_hdr->flags; + hdr->rssi = ev_hdr->rssi; + hdr->tsf_delta = ev_hdr->tsf_delta; + for (i = 0; i < ATH_MAX_ANTENNA; i++) + hdr->rssi_ctl[i] = ev_hdr->rssi_ctl[i]; + + *bufp = param_tlvs->bufp; + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_vdev_stopped_param_tlv() - extract vdev stop param from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param vdev_id: Pointer to hold vdev identifier + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS extract_vdev_stopped_param_tlv(wmi_unified_t wmi_handle, + void *evt_buf, uint32_t *vdev_id) +{ + WMI_VDEV_STOPPED_EVENTID_param_tlvs *param_buf; + wmi_vdev_stopped_event_fixed_param *resp_event; + + param_buf = (WMI_VDEV_STOPPED_EVENTID_param_tlvs *) evt_buf; + if (!param_buf) { + WMI_LOGE("Invalid event buffer"); + return QDF_STATUS_E_INVAL; + } + resp_event = param_buf->fixed_param; + *vdev_id = resp_event->vdev_id; + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_vdev_roam_param_tlv() - extract vdev roam param from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param param: Pointer to hold roam param + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS extract_vdev_roam_param_tlv(wmi_unified_t wmi_handle, + void *evt_buf, wmi_host_roam_event *param) +{ + WMI_ROAM_EVENTID_param_tlvs *param_buf; + wmi_roam_event_fixed_param *evt; + + param_buf = (WMI_ROAM_EVENTID_param_tlvs *) evt_buf; + if (!param_buf) { + WMI_LOGE("Invalid roam event buffer"); + return QDF_STATUS_E_INVAL; + } + + evt = param_buf->fixed_param; + qdf_mem_zero(param, sizeof(*param)); + + param->vdev_id = evt->vdev_id; + param->reason = evt->reason; + param->rssi = evt->rssi; + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_vdev_scan_ev_param_tlv() - extract vdev scan param from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param param: Pointer to hold vdev scan param + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS extract_vdev_scan_ev_param_tlv(wmi_unified_t wmi_handle, + void *evt_buf, struct scan_event *param) +{ + WMI_SCAN_EVENTID_param_tlvs *param_buf = NULL; + wmi_scan_event_fixed_param *evt = NULL; + + param_buf = (WMI_SCAN_EVENTID_param_tlvs *) evt_buf; + evt = param_buf->fixed_param; + + qdf_mem_zero(param, sizeof(*param)); + + switch (evt->event) { + case WMI_SCAN_EVENT_STARTED: + param->type = SCAN_EVENT_TYPE_STARTED; + break; + case WMI_SCAN_EVENT_COMPLETED: + param->type = SCAN_EVENT_TYPE_COMPLETED; + break; + case WMI_SCAN_EVENT_BSS_CHANNEL: + param->type = SCAN_EVENT_TYPE_BSS_CHANNEL; + break; + case WMI_SCAN_EVENT_FOREIGN_CHANNEL: + param->type = SCAN_EVENT_TYPE_FOREIGN_CHANNEL; + break; + case WMI_SCAN_EVENT_DEQUEUED: + param->type = SCAN_EVENT_TYPE_DEQUEUED; + break; + case WMI_SCAN_EVENT_PREEMPTED: + param->type = SCAN_EVENT_TYPE_PREEMPTED; + break; + case WMI_SCAN_EVENT_START_FAILED: + param->type = SCAN_EVENT_TYPE_START_FAILED; + break; + case WMI_SCAN_EVENT_RESTARTED: + param->type = SCAN_EVENT_TYPE_RESTARTED; + break; + case WMI_SCAN_EVENT_FOREIGN_CHANNEL_EXIT: + param->type = SCAN_EVENT_TYPE_FOREIGN_CHANNEL_EXIT; + break; + case WMI_SCAN_EVENT_MAX: + default: + param->type = SCAN_EVENT_TYPE_MAX; + break; + }; + + switch (evt->reason) { + case WMI_SCAN_REASON_NONE: + param->reason = SCAN_REASON_NONE; + break; + case WMI_SCAN_REASON_COMPLETED: + param->reason = SCAN_REASON_COMPLETED; + break; + case WMI_SCAN_REASON_CANCELLED: + param->reason = SCAN_REASON_CANCELLED; + break; + case WMI_SCAN_REASON_PREEMPTED: + param->reason = SCAN_REASON_PREEMPTED; + break; + case WMI_SCAN_REASON_TIMEDOUT: + param->reason = SCAN_REASON_TIMEDOUT; + break; + case WMI_SCAN_REASON_INTERNAL_FAILURE: + param->reason = SCAN_REASON_INTERNAL_FAILURE; + break; + case WMI_SCAN_REASON_SUSPENDED: + param->reason = SCAN_REASON_SUSPENDED; + break; + case WMI_SCAN_REASON_MAX: + param->reason = SCAN_REASON_MAX; + break; + default: + param->reason = SCAN_REASON_MAX; + break; + }; + + param->chan_freq = evt->channel_freq; + param->requester = evt->requestor; + param->scan_id = evt->scan_id; + param->vdev_id = evt->vdev_id; + param->timestamp = evt->tsf_timestamp; + + return QDF_STATUS_SUCCESS; +} + +#ifdef CONVERGED_TDLS_ENABLE +/** + * extract_vdev_tdls_ev_param_tlv() - extract vdev tdls param from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param param: Pointer to hold vdev tdls param + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS extract_vdev_tdls_ev_param_tlv(wmi_unified_t wmi_handle, + void *evt_buf, struct tdls_event_info *param) +{ + WMI_TDLS_PEER_EVENTID_param_tlvs *param_buf; + wmi_tdls_peer_event_fixed_param *evt; + + param_buf = (WMI_TDLS_PEER_EVENTID_param_tlvs *)evt_buf; + if (!param_buf) { + WMI_LOGE("%s: NULL param_buf", __func__); + return QDF_STATUS_E_NULL_VALUE; + } + + evt = param_buf->fixed_param; + + qdf_mem_zero(param, sizeof(*param)); + + param->vdev_id = evt->vdev_id; + WMI_MAC_ADDR_TO_CHAR_ARRAY(&evt->peer_macaddr, + param->peermac.bytes); + switch (evt->peer_status) { + case WMI_TDLS_SHOULD_DISCOVER: + param->message_type = TDLS_SHOULD_DISCOVER; + break; + case WMI_TDLS_SHOULD_TEARDOWN: + param->message_type = TDLS_SHOULD_TEARDOWN; + break; + case WMI_TDLS_PEER_DISCONNECTED: + param->message_type = TDLS_PEER_DISCONNECTED; + break; + case WMI_TDLS_CONNECTION_TRACKER_NOTIFICATION: + param->message_type = TDLS_CONNECTION_TRACKER_NOTIFY; + break; + default: + WMI_LOGE("%s: Discarding unknown tdls event %d from target", + __func__, evt->peer_status); + return QDF_STATUS_E_INVAL; + }; + + switch (evt->peer_reason) { + case WMI_TDLS_TEARDOWN_REASON_TX: + param->peer_reason = TDLS_TEARDOWN_TX; + break; + case WMI_TDLS_TEARDOWN_REASON_RSSI: + param->peer_reason = TDLS_TEARDOWN_RSSI; + break; + case WMI_TDLS_TEARDOWN_REASON_SCAN: + param->peer_reason = TDLS_TEARDOWN_SCAN; + break; + case WMI_TDLS_DISCONNECTED_REASON_PEER_DELETE: + param->peer_reason = TDLS_DISCONNECTED_PEER_DELETE; + break; + case WMI_TDLS_TEARDOWN_REASON_PTR_TIMEOUT: + param->peer_reason = TDLS_TEARDOWN_PTR_TIMEOUT; + break; + case WMI_TDLS_TEARDOWN_REASON_BAD_PTR: + param->peer_reason = TDLS_TEARDOWN_BAD_PTR; + break; + case WMI_TDLS_TEARDOWN_REASON_NO_RESPONSE: + param->peer_reason = TDLS_TEARDOWN_NO_RSP; + break; + case WMI_TDLS_ENTER_BUF_STA: + param->peer_reason = TDLS_PEER_ENTER_BUF_STA; + break; + case WMI_TDLS_EXIT_BUF_STA: + param->peer_reason = TDLS_PEER_EXIT_BUF_STA; + break; + case WMI_TDLS_ENTER_BT_BUSY_MODE: + param->peer_reason = TDLS_ENTER_BT_BUSY; + break; + case WMI_TDLS_EXIT_BT_BUSY_MODE: + param->peer_reason = TDLS_EXIT_BT_BUSY; + break; + case WMI_TDLS_SCAN_STARTED_EVENT: + param->peer_reason = TDLS_SCAN_STARTED; + break; + case WMI_TDLS_SCAN_COMPLETED_EVENT: + param->peer_reason = TDLS_SCAN_COMPLETED; + break; + + default: + WMI_LOGE("%s: unknown reason %d in tdls event %d from target", + __func__, evt->peer_reason, evt->peer_status); + return QDF_STATUS_E_INVAL; + }; + + WMI_LOGD("%s: tdls event, peer: %pM, type: 0x%x, reason: %d, vdev: %d", + __func__, param->peermac.bytes, param->message_type, + param->peer_reason, param->vdev_id); + + return QDF_STATUS_SUCCESS; +} +#endif + +/** + * extract_mgmt_tx_compl_param_tlv() - extract MGMT tx completion event params + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param param: Pointer to hold MGMT TX completion params + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS extract_mgmt_tx_compl_param_tlv(wmi_unified_t wmi_handle, + void *evt_buf, wmi_host_mgmt_tx_compl_event *param) +{ + WMI_MGMT_TX_COMPLETION_EVENTID_param_tlvs *param_buf; + wmi_mgmt_tx_compl_event_fixed_param *cmpl_params; + + param_buf = (WMI_MGMT_TX_COMPLETION_EVENTID_param_tlvs *) + evt_buf; + if (!param_buf) { + WMI_LOGE("%s: Invalid mgmt Tx completion event", __func__); + return QDF_STATUS_E_INVAL; + } + cmpl_params = param_buf->fixed_param; + + param->pdev_id = wmi_handle->ops->convert_pdev_id_target_to_host( + cmpl_params->pdev_id); + param->desc_id = cmpl_params->desc_id; + param->status = cmpl_params->status; + param->ppdu_id = cmpl_params->ppdu_id; + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_offchan_data_tx_compl_param_tlv() - + * extract Offchan data tx completion event params + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param param: Pointer to hold offchan data TX completion params + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS extract_offchan_data_tx_compl_param_tlv( + wmi_unified_t wmi_handle, void *evt_buf, + struct wmi_host_offchan_data_tx_compl_event *param) +{ + WMI_OFFCHAN_DATA_TX_COMPLETION_EVENTID_param_tlvs *param_buf; + wmi_offchan_data_tx_compl_event_fixed_param *cmpl_params; + + param_buf = (WMI_OFFCHAN_DATA_TX_COMPLETION_EVENTID_param_tlvs *) + evt_buf; + if (!param_buf) { + WMI_LOGE("%s: Invalid offchan data Tx compl event", __func__); + return QDF_STATUS_E_INVAL; + } + cmpl_params = param_buf->fixed_param; + + param->pdev_id = wmi_handle->ops->convert_pdev_id_target_to_host( + cmpl_params->pdev_id); + param->desc_id = cmpl_params->desc_id; + param->status = cmpl_params->status; + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_pdev_csa_switch_count_status_tlv() - extract pdev csa switch count + * status tlv + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param param: Pointer to hold csa switch count status event param + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS extract_pdev_csa_switch_count_status_tlv( + wmi_unified_t wmi_handle, + void *evt_buf, + struct pdev_csa_switch_count_status *param) +{ + WMI_PDEV_CSA_SWITCH_COUNT_STATUS_EVENTID_param_tlvs *param_buf; + wmi_pdev_csa_switch_count_status_event_fixed_param *csa_status; + + param_buf = (WMI_PDEV_CSA_SWITCH_COUNT_STATUS_EVENTID_param_tlvs *) + evt_buf; + if (!param_buf) { + WMI_LOGE("%s: Invalid CSA status event\n", __func__); + return QDF_STATUS_E_INVAL; + } + + csa_status = param_buf->fixed_param; + + param->pdev_id = wmi_handle->ops->convert_pdev_id_target_to_host( + csa_status->pdev_id); + param->current_switch_count = csa_status->current_switch_count; + param->num_vdevs = csa_status->num_vdevs; + param->vdev_ids = param_buf->vdev_ids; + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_pdev_tpc_config_ev_param_tlv() - extract pdev tpc configuration + * param from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param param: Pointer to hold tpc configuration + * + * Return: 0 for success or error code + */ +static QDF_STATUS extract_pdev_tpc_config_ev_param_tlv(wmi_unified_t wmi_handle, + void *evt_buf, + wmi_host_pdev_tpc_config_event *param) +{ + wmi_pdev_tpc_config_event_fixed_param *event = + (wmi_pdev_tpc_config_event_fixed_param *)evt_buf; + + if (!event) { + WMI_LOGE("Invalid event buffer"); + return QDF_STATUS_E_INVAL; + } + + param->pdev_id = event->pdev_id; + param->regDomain = event->regDomain; + param->chanFreq = event->chanFreq; + param->phyMode = event->phyMode; + param->twiceAntennaReduction = event->twiceAntennaReduction; + param->twiceMaxRDPower = event->twiceMaxRDPower; + param->powerLimit = event->powerLimit; + param->rateMax = event->rateMax; + param->numTxChain = event->numTxChain; + param->ctl = event->ctl; + param->flags = event->flags; + + qdf_mem_copy(param->maxRegAllowedPower, event->maxRegAllowedPower, + sizeof(param->maxRegAllowedPower)); + qdf_mem_copy(param->maxRegAllowedPowerAGCDD, + event->maxRegAllowedPowerAGCDD, + sizeof(param->maxRegAllowedPowerAGCDD)); + qdf_mem_copy(param->maxRegAllowedPowerAGSTBC, + event->maxRegAllowedPowerAGSTBC, + sizeof(param->maxRegAllowedPowerAGSTBC)); + qdf_mem_copy(param->maxRegAllowedPowerAGTXBF, + event->maxRegAllowedPowerAGTXBF, + sizeof(param->maxRegAllowedPowerAGTXBF)); + WMI_LOGD("%s:extract success", __func__); + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_swba_num_vdevs_tlv() - extract swba num vdevs from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param num_vdevs: Pointer to hold num vdevs + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS extract_swba_num_vdevs_tlv(wmi_unified_t wmi_handle, + void *evt_buf, uint32_t *num_vdevs) +{ + WMI_HOST_SWBA_EVENTID_param_tlvs *param_buf; + wmi_host_swba_event_fixed_param *swba_event; + uint32_t vdev_map; + + param_buf = (WMI_HOST_SWBA_EVENTID_param_tlvs *) evt_buf; + if (!param_buf) { + WMI_LOGE("Invalid swba event buffer"); + return QDF_STATUS_E_INVAL; + } + + swba_event = param_buf->fixed_param; + *num_vdevs = swba_event->num_vdevs; + if (!(*num_vdevs)) { + vdev_map = swba_event->vdev_map; + *num_vdevs = wmi_vdev_map_to_num_vdevs(vdev_map); + } + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_swba_tim_info_tlv() - extract swba tim info from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param idx: Index to bcn info + * @param tim_info: Pointer to hold tim info + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS extract_swba_tim_info_tlv(wmi_unified_t wmi_handle, + void *evt_buf, uint32_t idx, wmi_host_tim_info *tim_info) +{ + WMI_HOST_SWBA_EVENTID_param_tlvs *param_buf; + wmi_tim_info *tim_info_ev; + + param_buf = (WMI_HOST_SWBA_EVENTID_param_tlvs *) evt_buf; + if (!param_buf) { + WMI_LOGE("Invalid swba event buffer"); + return QDF_STATUS_E_INVAL; + } + + tim_info_ev = ¶m_buf->tim_info[idx]; + + tim_info->tim_len = tim_info_ev->tim_len; + tim_info->tim_mcast = tim_info_ev->tim_mcast; + qdf_mem_copy(tim_info->tim_bitmap, tim_info_ev->tim_bitmap, + (sizeof(uint32_t) * WMI_TIM_BITMAP_ARRAY_SIZE)); + tim_info->tim_changed = tim_info_ev->tim_changed; + tim_info->tim_num_ps_pending = tim_info_ev->tim_num_ps_pending; + tim_info->vdev_id = tim_info_ev->vdev_id; + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_swba_noa_info_tlv() - extract swba NoA information from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param idx: Index to bcn info + * @param p2p_desc: Pointer to hold p2p NoA info + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS extract_swba_noa_info_tlv(wmi_unified_t wmi_handle, + void *evt_buf, uint32_t idx, wmi_host_p2p_noa_info *p2p_desc) +{ + WMI_HOST_SWBA_EVENTID_param_tlvs *param_buf; + wmi_p2p_noa_info *p2p_noa_info; + uint8_t i = 0; + + param_buf = (WMI_HOST_SWBA_EVENTID_param_tlvs *) evt_buf; + if (!param_buf) { + WMI_LOGE("Invalid swba event buffer"); + return QDF_STATUS_E_INVAL; + } + + p2p_noa_info = ¶m_buf->p2p_noa_info[idx]; + + p2p_desc->modified = false; + p2p_desc->num_descriptors = 0; + if (WMI_UNIFIED_NOA_ATTR_IS_MODIFIED(p2p_noa_info)) { + p2p_desc->modified = true; + p2p_desc->index = + (uint8_t) WMI_UNIFIED_NOA_ATTR_INDEX_GET(p2p_noa_info); + p2p_desc->oppPS = + (uint8_t) WMI_UNIFIED_NOA_ATTR_OPP_PS_GET(p2p_noa_info); + p2p_desc->ctwindow = + (uint8_t) WMI_UNIFIED_NOA_ATTR_CTWIN_GET(p2p_noa_info); + p2p_desc->num_descriptors = + (uint8_t) WMI_UNIFIED_NOA_ATTR_NUM_DESC_GET + (p2p_noa_info); + for (i = 0; i < p2p_desc->num_descriptors; i++) { + p2p_desc->noa_descriptors[i].type_count = + (uint8_t) p2p_noa_info->noa_descriptors[i]. + type_count; + p2p_desc->noa_descriptors[i].duration = + p2p_noa_info->noa_descriptors[i].duration; + p2p_desc->noa_descriptors[i].interval = + p2p_noa_info->noa_descriptors[i].interval; + p2p_desc->noa_descriptors[i].start_time = + p2p_noa_info->noa_descriptors[i].start_time; + } + p2p_desc->vdev_id = p2p_noa_info->vdev_id; + } + + return QDF_STATUS_SUCCESS; +} + +#ifdef CONVERGED_P2P_ENABLE +/** + * extract_p2p_noa_ev_param_tlv() - extract p2p noa information from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param param: Pointer to hold p2p noa info + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS extract_p2p_noa_ev_param_tlv( + wmi_unified_t wmi_handle, void *evt_buf, + struct p2p_noa_info *param) +{ + WMI_P2P_NOA_EVENTID_param_tlvs *param_tlvs; + wmi_p2p_noa_event_fixed_param *fixed_param; + uint8_t i; + wmi_p2p_noa_info *wmi_noa_info; + uint8_t *buf_ptr; + uint32_t descriptors; + + param_tlvs = (WMI_P2P_NOA_EVENTID_param_tlvs *) evt_buf; + if (!param_tlvs) { + WMI_LOGE("%s: Invalid P2P NoA event buffer", __func__); + return QDF_STATUS_E_INVAL; + } + + if (!param) { + WMI_LOGE("noa information param is null"); + return QDF_STATUS_E_INVAL; + } + + fixed_param = param_tlvs->fixed_param; + buf_ptr = (uint8_t *) fixed_param; + buf_ptr += sizeof(wmi_p2p_noa_event_fixed_param); + wmi_noa_info = (wmi_p2p_noa_info *) (buf_ptr); + + if (!WMI_UNIFIED_NOA_ATTR_IS_MODIFIED(wmi_noa_info)) { + WMI_LOGE("%s: noa attr is not modified", __func__); + return QDF_STATUS_E_INVAL; + } + + param->vdev_id = fixed_param->vdev_id; + param->index = + (uint8_t) WMI_UNIFIED_NOA_ATTR_INDEX_GET(wmi_noa_info); + param->opps_ps = + (uint8_t) WMI_UNIFIED_NOA_ATTR_OPP_PS_GET(wmi_noa_info); + param->ct_window = + (uint8_t) WMI_UNIFIED_NOA_ATTR_CTWIN_GET(wmi_noa_info); + descriptors = WMI_UNIFIED_NOA_ATTR_NUM_DESC_GET(wmi_noa_info); + param->num_desc = (uint8_t) descriptors; + if (param->num_desc > WMI_P2P_MAX_NOA_DESCRIPTORS) { + WMI_LOGE("%s: invalid num desc:%d", __func__, + param->num_desc); + return QDF_STATUS_E_INVAL; + } + + WMI_LOGD("%s:index %u, opps_ps %u, ct_window %u, num_descriptors = %u", __func__, + param->index, param->opps_ps, param->ct_window, + param->num_desc); + for (i = 0; i < param->num_desc; i++) { + param->noa_desc[i].type_count = + (uint8_t) wmi_noa_info->noa_descriptors[i]. + type_count; + param->noa_desc[i].duration = + wmi_noa_info->noa_descriptors[i].duration; + param->noa_desc[i].interval = + wmi_noa_info->noa_descriptors[i].interval; + param->noa_desc[i].start_time = + wmi_noa_info->noa_descriptors[i].start_time; + WMI_LOGD("%s:NoA descriptor[%d] type_count %u, duration %u, interval %u, start_time = %u", + __func__, i, param->noa_desc[i].type_count, + param->noa_desc[i].duration, + param->noa_desc[i].interval, + param->noa_desc[i].start_time); + } + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_p2p_lo_stop_ev_param_tlv() - extract p2p lo stop + * information from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param param: Pointer to hold p2p lo stop event information + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS extract_p2p_lo_stop_ev_param_tlv( + wmi_unified_t wmi_handle, void *evt_buf, + struct p2p_lo_event *param) +{ + WMI_P2P_LISTEN_OFFLOAD_STOPPED_EVENTID_param_tlvs *param_tlvs; + wmi_p2p_lo_stopped_event_fixed_param *lo_param; + + param_tlvs = (WMI_P2P_LISTEN_OFFLOAD_STOPPED_EVENTID_param_tlvs *) + evt_buf; + if (!param_tlvs) { + WMI_LOGE("%s: Invalid P2P lo stop event buffer", __func__); + return QDF_STATUS_E_INVAL; + } + + if (!param) { + WMI_LOGE("lo stop event param is null"); + return QDF_STATUS_E_INVAL; + } + + lo_param = param_tlvs->fixed_param; + param->vdev_id = lo_param->vdev_id; + param->reason_code = lo_param->reason; + WMI_LOGD("%s: vdev_id:%d, reason:%d", __func__, + param->vdev_id, param->reason_code); + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS +send_set_mac_addr_rx_filter_cmd_tlv(wmi_unified_t wmi_handle, + struct p2p_set_mac_filter *param) +{ + wmi_vdev_add_mac_addr_to_rx_filter_cmd_fixed_param *cmd; + uint32_t len; + wmi_buf_t buf; + int ret; + + if (!wmi_handle) { + WMI_LOGE("WMA context is invald!"); + return QDF_STATUS_E_INVAL; + } + + len = sizeof(*cmd); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("Failed allocate wmi buffer"); + return QDF_STATUS_E_NOMEM; + } + + cmd = (wmi_vdev_add_mac_addr_to_rx_filter_cmd_fixed_param *) + wmi_buf_data(buf); + + WMITLV_SET_HDR( + &cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_vdev_add_mac_addr_to_rx_filter_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_vdev_add_mac_addr_to_rx_filter_cmd_fixed_param)); + + cmd->vdev_id = param->vdev_id; + cmd->freq = param->freq; + WMI_CHAR_ARRAY_TO_MAC_ADDR(param->mac, &cmd->mac_addr); + if (param->set) + cmd->enable = 1; + else + cmd->enable = 0; + WMI_LOGD("set random mac rx vdev %d freq %d set %d %pM", + param->vdev_id, param->freq, param->set, param->mac); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_VDEV_ADD_MAC_ADDR_TO_RX_FILTER_CMDID); + if (ret) { + WMI_LOGE("Failed to send action frame random mac cmd"); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS extract_mac_addr_rx_filter_evt_param_tlv( + wmi_unified_t wmi_handle, void *evt_buf, + struct p2p_set_mac_filter_evt *param) +{ + WMI_VDEV_ADD_MAC_ADDR_TO_RX_FILTER_STATUS_EVENTID_param_tlvs *param_buf; + wmi_vdev_add_mac_addr_to_rx_filter_status_event_fixed_param *event; + + param_buf = + (WMI_VDEV_ADD_MAC_ADDR_TO_RX_FILTER_STATUS_EVENTID_param_tlvs *) + evt_buf; + if (!param_buf) { + WMI_LOGE("Invalid action frame filter mac event"); + return QDF_STATUS_E_INVAL; + } + event = param_buf->fixed_param; + if (!event) { + WMI_LOGE("Invalid fixed param"); + return QDF_STATUS_E_INVAL; + } + param->vdev_id = event->vdev_id; + param->status = event->status; + + return QDF_STATUS_SUCCESS; +} +#endif /* End of CONVERGED_P2P_ENABLE */ + +/** + * extract_peer_sta_kickout_ev_tlv() - extract peer sta kickout event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param ev: Pointer to hold peer param + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS extract_peer_sta_kickout_ev_tlv(wmi_unified_t wmi_handle, + void *evt_buf, wmi_host_peer_sta_kickout_event *ev) +{ + WMI_PEER_STA_KICKOUT_EVENTID_param_tlvs *param_buf = NULL; + wmi_peer_sta_kickout_event_fixed_param *kickout_event = NULL; + + param_buf = (WMI_PEER_STA_KICKOUT_EVENTID_param_tlvs *) evt_buf; + kickout_event = param_buf->fixed_param; + + WMI_MAC_ADDR_TO_CHAR_ARRAY(&kickout_event->peer_macaddr, + ev->peer_macaddr); + + ev->reason = kickout_event->reason; + ev->rssi = kickout_event->rssi; + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_all_stats_counts_tlv() - extract all stats count from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param stats_param: Pointer to hold stats count + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS extract_all_stats_counts_tlv(wmi_unified_t wmi_handle, + void *evt_buf, wmi_host_stats_event *stats_param) +{ + wmi_stats_event_fixed_param *ev; + wmi_per_chain_rssi_stats *rssi_event; + WMI_UPDATE_STATS_EVENTID_param_tlvs *param_buf; + uint64_t min_data_len; + + qdf_mem_zero(stats_param, sizeof(*stats_param)); + param_buf = (WMI_UPDATE_STATS_EVENTID_param_tlvs *) evt_buf; + ev = (wmi_stats_event_fixed_param *) param_buf->fixed_param; + rssi_event = param_buf->chain_stats; + if (!ev) { + WMI_LOGE("%s: event fixed param NULL\n", __func__); + return QDF_STATUS_E_FAILURE; + } + + if (param_buf->num_data > WMI_SVC_MSG_MAX_SIZE - sizeof(*ev)) { + WMI_LOGE("num_data : %u is invalid", param_buf->num_data); + return QDF_STATUS_E_FAULT; + } + + switch (ev->stats_id) { + case WMI_REQUEST_PEER_STAT: + stats_param->stats_id |= WMI_HOST_REQUEST_PEER_STAT; + break; + + case WMI_REQUEST_AP_STAT: + stats_param->stats_id |= WMI_HOST_REQUEST_AP_STAT; + break; + + case WMI_REQUEST_PDEV_STAT: + stats_param->stats_id |= WMI_HOST_REQUEST_PDEV_STAT; + break; + + case WMI_REQUEST_VDEV_STAT: + stats_param->stats_id |= WMI_HOST_REQUEST_VDEV_STAT; + break; + + case WMI_REQUEST_BCNFLT_STAT: + stats_param->stats_id |= WMI_HOST_REQUEST_BCNFLT_STAT; + break; + + case WMI_REQUEST_VDEV_RATE_STAT: + stats_param->stats_id |= WMI_HOST_REQUEST_VDEV_RATE_STAT; + break; + + case WMI_REQUEST_BCN_STAT: + stats_param->stats_id |= WMI_HOST_REQUEST_BCN_STAT; + break; + + case WMI_REQUEST_PEER_EXTD2_STAT: + stats_param->stats_id |= WMI_HOST_REQUEST_PEER_ADV_STATS; + break; + + default: + stats_param->stats_id = 0; + break; + + } + + /* ev->num_*_stats may cause uint32_t overflow, so use uint64_t + * to save total length calculated + */ + min_data_len = + (((uint64_t)ev->num_pdev_stats) * sizeof(wmi_pdev_stats)) + + (((uint64_t)ev->num_vdev_stats) * sizeof(wmi_vdev_stats)) + + (((uint64_t)ev->num_peer_stats) * sizeof(wmi_peer_stats)) + + (((uint64_t)ev->num_bcnflt_stats) * + sizeof(wmi_bcnfilter_stats_t)) + + (((uint64_t)ev->num_chan_stats) * sizeof(wmi_chan_stats)) + + (((uint64_t)ev->num_mib_stats) * sizeof(wmi_mib_stats)) + + (((uint64_t)ev->num_bcn_stats) * sizeof(wmi_bcn_stats)) + + (((uint64_t)ev->num_peer_extd_stats) * + sizeof(wmi_peer_extd_stats)); + if (param_buf->num_data != min_data_len) { + WMI_LOGE("data len: %u isn't same as calculated: %llu", + param_buf->num_data, min_data_len); + return QDF_STATUS_E_FAULT; + } + + stats_param->last_event = ev->last_event; + stats_param->num_pdev_stats = ev->num_pdev_stats; + stats_param->num_pdev_ext_stats = 0; + stats_param->num_vdev_stats = ev->num_vdev_stats; + stats_param->num_peer_stats = ev->num_peer_stats; + stats_param->num_bcnflt_stats = ev->num_bcnflt_stats; + stats_param->num_chan_stats = ev->num_chan_stats; + stats_param->num_bcn_stats = ev->num_bcn_stats; + stats_param->pdev_id = wmi_handle->ops->convert_pdev_id_target_to_host( + ev->pdev_id); + + /* if chain_stats is not populated */ + if (!param_buf->chain_stats || !param_buf->num_chain_stats) + return QDF_STATUS_SUCCESS; + + if (WMITLV_TAG_STRUC_wmi_per_chain_rssi_stats != + WMITLV_GET_TLVTAG(rssi_event->tlv_header)) + return QDF_STATUS_SUCCESS; + + if (WMITLV_GET_STRUCT_TLVLEN(wmi_per_chain_rssi_stats) != + WMITLV_GET_TLVLEN(rssi_event->tlv_header)) + return QDF_STATUS_SUCCESS; + + if (rssi_event->num_per_chain_rssi_stats >= + WMITLV_GET_TLVLEN(rssi_event->tlv_header)) { + WMI_LOGE("num_per_chain_rssi_stats:%u is out of bounds", + rssi_event->num_per_chain_rssi_stats); + return QDF_STATUS_E_INVAL; + } + stats_param->num_rssi_stats = rssi_event->num_per_chain_rssi_stats; + + /* if peer_adv_stats is not populated */ + if (!param_buf->num_peer_extd2_stats) + return QDF_STATUS_SUCCESS; + + stats_param->num_peer_adv_stats = param_buf->num_peer_extd2_stats; + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_pdev_tx_stats() - extract pdev tx stats from event + */ +static void extract_pdev_tx_stats(wmi_host_dbg_tx_stats *tx, struct wlan_dbg_tx_stats *tx_stats) +{ + /* Tx Stats */ + tx->comp_queued = tx_stats->comp_queued; + tx->comp_delivered = tx_stats->comp_delivered; + tx->msdu_enqued = tx_stats->msdu_enqued; + tx->mpdu_enqued = tx_stats->mpdu_enqued; + tx->wmm_drop = tx_stats->wmm_drop; + tx->local_enqued = tx_stats->local_enqued; + tx->local_freed = tx_stats->local_freed; + tx->hw_queued = tx_stats->hw_queued; + tx->hw_reaped = tx_stats->hw_reaped; + tx->underrun = tx_stats->underrun; + tx->tx_abort = tx_stats->tx_abort; + tx->mpdus_requed = tx_stats->mpdus_requed; + tx->data_rc = tx_stats->data_rc; + tx->self_triggers = tx_stats->self_triggers; + tx->sw_retry_failure = tx_stats->sw_retry_failure; + tx->illgl_rate_phy_err = tx_stats->illgl_rate_phy_err; + tx->pdev_cont_xretry = tx_stats->pdev_cont_xretry; + tx->pdev_tx_timeout = tx_stats->pdev_tx_timeout; + tx->pdev_resets = tx_stats->pdev_resets; + tx->stateless_tid_alloc_failure = tx_stats->stateless_tid_alloc_failure; + tx->phy_underrun = tx_stats->phy_underrun; + tx->txop_ovf = tx_stats->txop_ovf; + + return; +} + + +/** + * extract_pdev_rx_stats() - extract pdev rx stats from event + */ +static void extract_pdev_rx_stats(wmi_host_dbg_rx_stats *rx, struct wlan_dbg_rx_stats *rx_stats) +{ + /* Rx Stats */ + rx->mid_ppdu_route_change = rx_stats->mid_ppdu_route_change; + rx->status_rcvd = rx_stats->status_rcvd; + rx->r0_frags = rx_stats->r0_frags; + rx->r1_frags = rx_stats->r1_frags; + rx->r2_frags = rx_stats->r2_frags; + /* Only TLV */ + rx->r3_frags = 0; + rx->htt_msdus = rx_stats->htt_msdus; + rx->htt_mpdus = rx_stats->htt_mpdus; + rx->loc_msdus = rx_stats->loc_msdus; + rx->loc_mpdus = rx_stats->loc_mpdus; + rx->oversize_amsdu = rx_stats->oversize_amsdu; + rx->phy_errs = rx_stats->phy_errs; + rx->phy_err_drop = rx_stats->phy_err_drop; + rx->mpdu_errs = rx_stats->mpdu_errs; + + return; +} + +/** + * extract_pdev_stats_tlv() - extract pdev stats from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param index: Index into pdev stats + * @param pdev_stats: Pointer to hold pdev stats + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS extract_pdev_stats_tlv(wmi_unified_t wmi_handle, + void *evt_buf, uint32_t index, wmi_host_pdev_stats *pdev_stats) +{ + WMI_UPDATE_STATS_EVENTID_param_tlvs *param_buf; + wmi_stats_event_fixed_param *ev_param; + uint8_t *data; + + param_buf = (WMI_UPDATE_STATS_EVENTID_param_tlvs *) evt_buf; + ev_param = (wmi_stats_event_fixed_param *) param_buf->fixed_param; + + data = param_buf->data; + + if (index < ev_param->num_pdev_stats) { + wmi_pdev_stats *ev = (wmi_pdev_stats *) ((data) + + (index * sizeof(wmi_pdev_stats))); + + pdev_stats->chan_nf = ev->chan_nf; + pdev_stats->tx_frame_count = ev->tx_frame_count; + pdev_stats->rx_frame_count = ev->rx_frame_count; + pdev_stats->rx_clear_count = ev->rx_clear_count; + pdev_stats->cycle_count = ev->cycle_count; + pdev_stats->phy_err_count = ev->phy_err_count; + pdev_stats->chan_tx_pwr = ev->chan_tx_pwr; + + extract_pdev_tx_stats(&(pdev_stats->pdev_stats.tx), + &(ev->pdev_stats.tx)); + extract_pdev_rx_stats(&(pdev_stats->pdev_stats.rx), + &(ev->pdev_stats.rx)); + } + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_unit_test_tlv() - extract unit test data + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param unit_test: pointer to hold unit test data + * @param maxspace: Amount of space in evt_buf + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS extract_unit_test_tlv(wmi_unified_t wmi_handle, + void *evt_buf, wmi_unit_test_event *unit_test, uint32_t maxspace) +{ + WMI_UNIT_TEST_EVENTID_param_tlvs *param_buf; + wmi_unit_test_event_fixed_param *ev_param; + uint32_t num_bufp; + uint32_t copy_size; + uint8_t *bufp; + + param_buf = (WMI_UNIT_TEST_EVENTID_param_tlvs *) evt_buf; + ev_param = param_buf->fixed_param; + bufp = param_buf->bufp; + num_bufp = param_buf->num_bufp; + unit_test->vdev_id = ev_param->vdev_id; + unit_test->module_id = ev_param->module_id; + unit_test->diag_token = ev_param->diag_token; + unit_test->flag = ev_param->flag; + unit_test->payload_len = ev_param->payload_len; + WMI_LOGI("%s:vdev_id:%d mod_id:%d diag_token:%d flag:%d\n", __func__, + ev_param->vdev_id, + ev_param->module_id, + ev_param->diag_token, + ev_param->flag); + WMI_LOGD("%s: Unit-test data given below %d", __func__, num_bufp); + qdf_trace_hex_dump(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_DEBUG, + bufp, num_bufp); + copy_size = (num_bufp < maxspace) ? num_bufp : maxspace; + qdf_mem_copy(unit_test->buffer, bufp, copy_size); + unit_test->buffer_len = copy_size; + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_pdev_ext_stats_tlv() - extract extended pdev stats from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param index: Index into extended pdev stats + * @param pdev_ext_stats: Pointer to hold extended pdev stats + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS extract_pdev_ext_stats_tlv(wmi_unified_t wmi_handle, + void *evt_buf, uint32_t index, wmi_host_pdev_ext_stats *pdev_ext_stats) +{ + return QDF_STATUS_SUCCESS; +} + +/** + * extract_vdev_stats_tlv() - extract vdev stats from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param index: Index into vdev stats + * @param vdev_stats: Pointer to hold vdev stats + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS extract_vdev_stats_tlv(wmi_unified_t wmi_handle, + void *evt_buf, uint32_t index, wmi_host_vdev_stats *vdev_stats) +{ + WMI_UPDATE_STATS_EVENTID_param_tlvs *param_buf; + wmi_stats_event_fixed_param *ev_param; + uint8_t *data; + + param_buf = (WMI_UPDATE_STATS_EVENTID_param_tlvs *) evt_buf; + ev_param = (wmi_stats_event_fixed_param *) param_buf->fixed_param; + data = (uint8_t *) param_buf->data; + + if (index < ev_param->num_vdev_stats) { + wmi_vdev_stats *ev = (wmi_vdev_stats *) ((data) + + ((ev_param->num_pdev_stats) * + sizeof(wmi_pdev_stats)) + + (index * sizeof(wmi_vdev_stats))); + + vdev_stats->vdev_id = ev->vdev_id; + vdev_stats->vdev_snr.bcn_snr = ev->vdev_snr.bcn_snr; + vdev_stats->vdev_snr.dat_snr = ev->vdev_snr.dat_snr; + + OS_MEMCPY(vdev_stats->tx_frm_cnt, ev->tx_frm_cnt, + sizeof(ev->tx_frm_cnt)); + vdev_stats->rx_frm_cnt = ev->rx_frm_cnt; + OS_MEMCPY(vdev_stats->multiple_retry_cnt, + ev->multiple_retry_cnt, + sizeof(ev->multiple_retry_cnt)); + OS_MEMCPY(vdev_stats->fail_cnt, ev->fail_cnt, + sizeof(ev->fail_cnt)); + vdev_stats->rts_fail_cnt = ev->rts_fail_cnt; + vdev_stats->rts_succ_cnt = ev->rts_succ_cnt; + vdev_stats->rx_err_cnt = ev->rx_err_cnt; + vdev_stats->rx_discard_cnt = ev->rx_discard_cnt; + vdev_stats->ack_fail_cnt = ev->ack_fail_cnt; + OS_MEMCPY(vdev_stats->tx_rate_history, ev->tx_rate_history, + sizeof(ev->tx_rate_history)); + OS_MEMCPY(vdev_stats->bcn_rssi_history, ev->bcn_rssi_history, + sizeof(ev->bcn_rssi_history)); + + } + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_per_chain_rssi_stats_tlv() - api to extract rssi stats from event + * buffer + * @wmi_handle: wmi handle + * @evt_buf: pointer to event buffer + * @index: Index into vdev stats + * @rssi_stats: Pointer to hold rssi stats + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS extract_per_chain_rssi_stats_tlv(wmi_unified_t wmi_handle, + void *evt_buf, uint32_t index, + struct wmi_host_per_chain_rssi_stats *rssi_stats) +{ + uint8_t *data; + wmi_rssi_stats *fw_rssi_stats; + wmi_per_chain_rssi_stats *rssi_event; + WMI_UPDATE_STATS_EVENTID_param_tlvs *param_buf; + + if (!evt_buf) { + WMI_LOGE("evt_buf is null"); + return QDF_STATUS_E_NULL_VALUE; + } + + param_buf = (WMI_UPDATE_STATS_EVENTID_param_tlvs *) evt_buf; + rssi_event = param_buf->chain_stats; + + if (index >= rssi_event->num_per_chain_rssi_stats) { + WMI_LOGE("invalid index"); + return QDF_STATUS_E_INVAL; + } + + data = ((uint8_t *)(&rssi_event[1])) + WMI_TLV_HDR_SIZE; + fw_rssi_stats = &((wmi_rssi_stats *)data)[index]; + + rssi_stats->vdev_id = fw_rssi_stats->vdev_id; + qdf_mem_copy(rssi_stats->rssi_avg_beacon, + fw_rssi_stats->rssi_avg_beacon, + sizeof(fw_rssi_stats->rssi_avg_beacon)); + qdf_mem_copy(rssi_stats->rssi_avg_data, + fw_rssi_stats->rssi_avg_data, + sizeof(fw_rssi_stats->rssi_avg_data)); + qdf_mem_copy(&rssi_stats->peer_macaddr, + &fw_rssi_stats->peer_macaddr, + sizeof(fw_rssi_stats->peer_macaddr)); + + return QDF_STATUS_SUCCESS; +} + + + +/** + * extract_bcn_stats_tlv() - extract bcn stats from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param index: Index into vdev stats + * @param bcn_stats: Pointer to hold bcn stats + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS extract_bcn_stats_tlv(wmi_unified_t wmi_handle, + void *evt_buf, uint32_t index, wmi_host_bcn_stats *bcn_stats) +{ + WMI_UPDATE_STATS_EVENTID_param_tlvs *param_buf; + wmi_stats_event_fixed_param *ev_param; + uint8_t *data; + + param_buf = (WMI_UPDATE_STATS_EVENTID_param_tlvs *) evt_buf; + ev_param = (wmi_stats_event_fixed_param *) param_buf->fixed_param; + data = (uint8_t *) param_buf->data; + + if (index < ev_param->num_bcn_stats) { + wmi_bcn_stats *ev = (wmi_bcn_stats *) ((data) + + ((ev_param->num_pdev_stats) * sizeof(wmi_pdev_stats)) + + ((ev_param->num_vdev_stats) * sizeof(wmi_vdev_stats)) + + ((ev_param->num_peer_stats) * sizeof(wmi_peer_stats)) + + ((ev_param->num_chan_stats) * sizeof(wmi_chan_stats)) + + ((ev_param->num_mib_stats) * sizeof(wmi_mib_stats)) + + (index * sizeof(wmi_bcn_stats))); + + bcn_stats->vdev_id = ev->vdev_id; + bcn_stats->tx_bcn_succ_cnt = ev->tx_bcn_succ_cnt; + bcn_stats->tx_bcn_outage_cnt = ev->tx_bcn_outage_cnt; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_peer_stats_tlv() - extract peer stats from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param index: Index into peer stats + * @param peer_stats: Pointer to hold peer stats + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS extract_peer_stats_tlv(wmi_unified_t wmi_handle, + void *evt_buf, uint32_t index, wmi_host_peer_stats *peer_stats) +{ + WMI_UPDATE_STATS_EVENTID_param_tlvs *param_buf; + wmi_stats_event_fixed_param *ev_param; + uint8_t *data; + + param_buf = (WMI_UPDATE_STATS_EVENTID_param_tlvs *) evt_buf; + ev_param = (wmi_stats_event_fixed_param *) param_buf->fixed_param; + data = (uint8_t *) param_buf->data; + + if (index < ev_param->num_peer_stats) { + wmi_peer_stats *ev = (wmi_peer_stats *) ((data) + + ((ev_param->num_pdev_stats) * sizeof(wmi_pdev_stats)) + + ((ev_param->num_vdev_stats) * sizeof(wmi_vdev_stats)) + + (index * sizeof(wmi_peer_stats))); + + OS_MEMSET(peer_stats, 0, sizeof(wmi_host_peer_stats)); + + OS_MEMCPY(&(peer_stats->peer_macaddr), + &(ev->peer_macaddr), sizeof(wmi_mac_addr)); + + peer_stats->peer_rssi = ev->peer_rssi; + peer_stats->peer_tx_rate = ev->peer_tx_rate; + peer_stats->peer_rx_rate = ev->peer_rx_rate; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_bcnflt_stats_tlv() - extract bcn fault stats from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param index: Index into bcn fault stats + * @param bcnflt_stats: Pointer to hold bcn fault stats + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS extract_bcnflt_stats_tlv(wmi_unified_t wmi_handle, + void *evt_buf, uint32_t index, wmi_host_bcnflt_stats *peer_stats) +{ + return QDF_STATUS_SUCCESS; +} + +/** + * extract_peer_adv_stats_tlv() - extract adv peer stats from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param index: Index into extended peer stats + * @param peer_adv_stats: Pointer to hold adv peer stats + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS extract_peer_adv_stats_tlv(wmi_unified_t wmi_handle, + void *evt_buf, + struct wmi_host_peer_adv_stats + *peer_adv_stats) +{ + WMI_UPDATE_STATS_EVENTID_param_tlvs *param_buf; + wmi_peer_extd2_stats *adv_stats; + int i; + + param_buf = (WMI_UPDATE_STATS_EVENTID_param_tlvs *)evt_buf; + + adv_stats = param_buf->peer_extd2_stats; + if (!adv_stats) { + WMI_LOGD("%s: no peer_adv stats in event buffer", __func__); + return QDF_STATUS_E_INVAL; + } + + for (i = 0; i < param_buf->num_peer_extd2_stats; i++) { + WMI_MAC_ADDR_TO_CHAR_ARRAY(&adv_stats[i].peer_macaddr, + peer_adv_stats[i].peer_macaddr); + peer_adv_stats[i].fcs_count = adv_stats[i].rx_fcs_err; + peer_adv_stats[i].rx_bytes = + (uint64_t)adv_stats[i].rx_bytes_u32 << + WMI_LOWER_BITS_SHIFT_32 | + adv_stats[i].rx_bytes_l32; + peer_adv_stats[i].rx_count = adv_stats[i].rx_mpdus; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_peer_extd_stats_tlv() - extract extended peer stats from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param index: Index into extended peer stats + * @param peer_extd_stats: Pointer to hold extended peer stats + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS extract_peer_extd_stats_tlv(wmi_unified_t wmi_handle, + void *evt_buf, uint32_t index, + wmi_host_peer_extd_stats *peer_extd_stats) +{ + return QDF_STATUS_SUCCESS; +} + +/** + * extract_chan_stats_tlv() - extract chan stats from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param index: Index into chan stats + * @param vdev_extd_stats: Pointer to hold chan stats + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS extract_chan_stats_tlv(wmi_unified_t wmi_handle, + void *evt_buf, uint32_t index, wmi_host_chan_stats *chan_stats) +{ + WMI_UPDATE_STATS_EVENTID_param_tlvs *param_buf; + wmi_stats_event_fixed_param *ev_param; + uint8_t *data; + + param_buf = (WMI_UPDATE_STATS_EVENTID_param_tlvs *) evt_buf; + ev_param = (wmi_stats_event_fixed_param *) param_buf->fixed_param; + data = (uint8_t *) param_buf->data; + + if (index < ev_param->num_chan_stats) { + wmi_chan_stats *ev = (wmi_chan_stats *) ((data) + + ((ev_param->num_pdev_stats) * sizeof(wmi_pdev_stats)) + + ((ev_param->num_vdev_stats) * sizeof(wmi_vdev_stats)) + + ((ev_param->num_peer_stats) * sizeof(wmi_peer_stats)) + + (index * sizeof(wmi_chan_stats))); + + + /* Non-TLV doesn't have num_chan_stats */ + chan_stats->chan_mhz = ev->chan_mhz; + chan_stats->sampling_period_us = ev->sampling_period_us; + chan_stats->rx_clear_count = ev->rx_clear_count; + chan_stats->tx_duration_us = ev->tx_duration_us; + chan_stats->rx_duration_us = ev->rx_duration_us; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_profile_ctx_tlv() - extract profile context from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @idx: profile stats index to extract + * @param profile_ctx: Pointer to hold profile context + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS extract_profile_ctx_tlv(wmi_unified_t wmi_handle, + void *evt_buf, wmi_host_wlan_profile_ctx_t *profile_ctx) +{ + return QDF_STATUS_SUCCESS; +} + +/** + * extract_profile_data_tlv() - extract profile data from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param profile_data: Pointer to hold profile data + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS extract_profile_data_tlv(wmi_unified_t wmi_handle, + void *evt_buf, uint8_t idx, wmi_host_wlan_profile_t *profile_data) +{ + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_chan_info_event_tlv() - extract chan information from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param chan_info: Pointer to hold chan information + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS extract_chan_info_event_tlv(wmi_unified_t wmi_handle, + void *evt_buf, wmi_host_chan_info_event *chan_info) +{ + WMI_CHAN_INFO_EVENTID_param_tlvs *param_buf; + wmi_chan_info_event_fixed_param *ev; + + param_buf = (WMI_CHAN_INFO_EVENTID_param_tlvs *) evt_buf; + + ev = (wmi_chan_info_event_fixed_param *) param_buf->fixed_param; + if (!ev) { + WMI_LOGE("%s: Failed to allocmemory\n", __func__); + return QDF_STATUS_E_FAILURE; + } + + chan_info->err_code = ev->err_code; + chan_info->freq = ev->freq; + chan_info->cmd_flags = ev->cmd_flags; + chan_info->noise_floor = ev->noise_floor; + chan_info->rx_clear_count = ev->rx_clear_count; + chan_info->cycle_count = ev->cycle_count; + chan_info->tx_frame_cnt = ev->tx_frame_cnt; + chan_info->mac_clk_mhz = ev->mac_clk_mhz; + chan_info->pdev_id = wlan_get_pdev_id_from_vdev_id( + (struct wlan_objmgr_psoc *)wmi_handle->soc->wmi_psoc, + ev->vdev_id, WLAN_SCAN_ID); + chan_info->chan_tx_pwr_range = ev->chan_tx_pwr_range; + chan_info->chan_tx_pwr_tp = ev->chan_tx_pwr_tp; + chan_info->my_bss_rx_cycle_count = ev->my_bss_rx_cycle_count; + chan_info->rx_11b_mode_data_duration = ev->rx_11b_mode_data_duration; + chan_info->tx_frame_cnt = ev->tx_frame_cnt; + chan_info->rx_frame_count = ev->rx_frame_count; + chan_info->mac_clk_mhz = ev->mac_clk_mhz; + chan_info->vdev_id = ev->vdev_id; + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_pdev_utf_event_tlv() - extract UTF data info from event + * @wmi_handle: WMI handle + * @param evt_buf: Pointer to event buffer + * @param param: Pointer to hold data + * + * Return : QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS extract_pdev_utf_event_tlv(wmi_unified_t wmi_handle, + uint8_t *evt_buf, + struct wmi_host_pdev_utf_event *event) +{ + WMI_PDEV_UTF_EVENTID_param_tlvs *param_buf; + struct wmi_host_utf_seg_header_info *seg_hdr; + + param_buf = (WMI_PDEV_UTF_EVENTID_param_tlvs *)evt_buf; + event->data = param_buf->data; + event->datalen = param_buf->num_data; + + if (event->datalen < sizeof(struct wmi_host_utf_seg_header_info)) { + WMI_LOGE("%s: Invalid datalen: %d ", __func__, event->datalen); + return QDF_STATUS_E_INVAL; + } + seg_hdr = (struct wmi_host_utf_seg_header_info *)param_buf->data; + /* Set pdev_id=1 until FW adds support to include pdev_id */ + event->pdev_id = wmi_handle->ops->convert_pdev_id_target_to_host( + seg_hdr->pdev_id); + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_chainmask_tables_tlv() - extract chain mask tables from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param param: Pointer to hold evt buf + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS extract_chainmask_tables_tlv(wmi_unified_t wmi_handle, + uint8_t *event, struct wlan_psoc_host_chainmask_table *chainmask_table) +{ + WMI_SERVICE_READY_EXT_EVENTID_param_tlvs *param_buf; + WMI_MAC_PHY_CHAINMASK_CAPABILITY *chainmask_caps; + WMI_SOC_MAC_PHY_HW_MODE_CAPS *hw_caps; + uint8_t i = 0, j = 0; + uint32_t num_mac_phy_chainmask_caps = 0; + + param_buf = (WMI_SERVICE_READY_EXT_EVENTID_param_tlvs *) event; + if (!param_buf) + return QDF_STATUS_E_INVAL; + + hw_caps = param_buf->soc_hw_mode_caps; + if (!hw_caps) + return QDF_STATUS_E_INVAL; + + if ((!hw_caps->num_chainmask_tables) || + (hw_caps->num_chainmask_tables > PSOC_MAX_CHAINMASK_TABLES) || + (hw_caps->num_chainmask_tables > + param_buf->num_mac_phy_chainmask_combo)) + return QDF_STATUS_E_INVAL; + + chainmask_caps = param_buf->mac_phy_chainmask_caps; + + if (chainmask_caps == NULL) + return QDF_STATUS_E_INVAL; + + for (i = 0; i < hw_caps->num_chainmask_tables; i++) { + if (chainmask_table[i].num_valid_chainmasks > + (UINT_MAX - num_mac_phy_chainmask_caps)) { + wmi_err_rl("integer overflow, num_mac_phy_chainmask_caps:%d, i:%d, um_valid_chainmasks:%d", + num_mac_phy_chainmask_caps, i, + chainmask_table[i].num_valid_chainmasks); + return QDF_STATUS_E_INVAL; + } + num_mac_phy_chainmask_caps += + chainmask_table[i].num_valid_chainmasks; + } + + if (num_mac_phy_chainmask_caps > + param_buf->num_mac_phy_chainmask_caps) { + wmi_err_rl("invalid chainmask caps num, num_mac_phy_chainmask_caps:%d, param_buf->num_mac_phy_chainmask_caps:%d", + num_mac_phy_chainmask_caps, + param_buf->num_mac_phy_chainmask_caps); + return QDF_STATUS_E_INVAL; + } + + for (i = 0; i < hw_caps->num_chainmask_tables; i++) { + + qdf_print("Dumping chain mask combo data for table : %d\n", i); + for (j = 0; j < chainmask_table[i].num_valid_chainmasks; j++) { + + chainmask_table[i].cap_list[j].chainmask = + chainmask_caps->chainmask; + + chainmask_table[i].cap_list[j].supports_chan_width_20 = + WMI_SUPPORT_CHAN_WIDTH_20_GET(chainmask_caps->supported_flags); + + chainmask_table[i].cap_list[j].supports_chan_width_40 = + WMI_SUPPORT_CHAN_WIDTH_40_GET(chainmask_caps->supported_flags); + + chainmask_table[i].cap_list[j].supports_chan_width_80 = + WMI_SUPPORT_CHAN_WIDTH_80_GET(chainmask_caps->supported_flags); + + chainmask_table[i].cap_list[j].supports_chan_width_160 = + WMI_SUPPORT_CHAN_WIDTH_160_GET(chainmask_caps->supported_flags); + + chainmask_table[i].cap_list[j].supports_chan_width_80P80 = + WMI_SUPPORT_CHAN_WIDTH_80P80_GET(chainmask_caps->supported_flags); + + chainmask_table[i].cap_list[j].chain_mask_2G = + WMI_SUPPORT_CHAIN_MASK_2G_GET(chainmask_caps->supported_flags); + + chainmask_table[i].cap_list[j].chain_mask_5G = + WMI_SUPPORT_CHAIN_MASK_5G_GET(chainmask_caps->supported_flags); + + chainmask_table[i].cap_list[j].chain_mask_tx = + WMI_SUPPORT_CHAIN_MASK_TX_GET(chainmask_caps->supported_flags); + + chainmask_table[i].cap_list[j].chain_mask_rx = + WMI_SUPPORT_CHAIN_MASK_RX_GET(chainmask_caps->supported_flags); + + chainmask_table[i].cap_list[j].supports_aDFS = + WMI_SUPPORT_CHAIN_MASK_ADFS_GET(chainmask_caps->supported_flags); + + qdf_print("supported_flags: 0x%08x chainmasks: 0x%08x\n", + chainmask_caps->supported_flags, + chainmask_caps->chainmask + ); + chainmask_caps++; + } + } + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_service_ready_ext_tlv() - extract basic extended service ready params + * from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param param: Pointer to hold evt buf + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS extract_service_ready_ext_tlv(wmi_unified_t wmi_handle, + uint8_t *event, struct wlan_psoc_host_service_ext_param *param) +{ + WMI_SERVICE_READY_EXT_EVENTID_param_tlvs *param_buf; + wmi_service_ready_ext_event_fixed_param *ev; + WMI_SOC_MAC_PHY_HW_MODE_CAPS *hw_caps; + WMI_SOC_HAL_REG_CAPABILITIES *reg_caps; + WMI_MAC_PHY_CHAINMASK_COMBO *chain_mask_combo; + uint8_t i = 0; + + param_buf = (WMI_SERVICE_READY_EXT_EVENTID_param_tlvs *) event; + if (!param_buf) + return QDF_STATUS_E_INVAL; + + ev = param_buf->fixed_param; + if (!ev) + return QDF_STATUS_E_INVAL; + + /* Move this to host based bitmap */ + param->default_conc_scan_config_bits = + ev->default_conc_scan_config_bits; + param->default_fw_config_bits = ev->default_fw_config_bits; + param->he_cap_info = ev->he_cap_info; + param->mpdu_density = ev->mpdu_density; + param->max_bssid_rx_filters = ev->max_bssid_rx_filters; + param->fw_build_vers_ext = ev->fw_build_vers_ext; + param->num_dbr_ring_caps = param_buf->num_dma_ring_caps; + qdf_mem_copy(¶m->ppet, &ev->ppet, sizeof(param->ppet)); + + hw_caps = param_buf->soc_hw_mode_caps; + if (hw_caps) + param->num_hw_modes = hw_caps->num_hw_modes; + else + param->num_hw_modes = 0; + + reg_caps = param_buf->soc_hal_reg_caps; + if (reg_caps) + param->num_phy = reg_caps->num_phy; + else + param->num_phy = 0; + + if (hw_caps) { + param->num_chainmask_tables = hw_caps->num_chainmask_tables; + qdf_print("Num chain mask tables: %d\n", hw_caps->num_chainmask_tables); + } else + param->num_chainmask_tables = 0; + + if (param->num_chainmask_tables > PSOC_MAX_CHAINMASK_TABLES || + param->num_chainmask_tables > + param_buf->num_mac_phy_chainmask_combo) { + wmi_err_rl("num_chainmask_tables is OOB: %u", + param->num_chainmask_tables); + return QDF_STATUS_E_INVAL; + } + chain_mask_combo = param_buf->mac_phy_chainmask_combo; + + if (chain_mask_combo == NULL) + return QDF_STATUS_SUCCESS; + + qdf_print("Dumping chain mask combo data\n"); + + for (i = 0; i < param->num_chainmask_tables; i++) { + + qdf_print("table_id : %d Num valid chainmasks: %d\n", + chain_mask_combo->chainmask_table_id, + chain_mask_combo->num_valid_chainmask + ); + + param->chainmask_table[i].table_id = + chain_mask_combo->chainmask_table_id; + param->chainmask_table[i].num_valid_chainmasks = + chain_mask_combo->num_valid_chainmask; + chain_mask_combo++; + } + qdf_print("chain mask combo end\n"); + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_sar_cap_service_ready_ext_tlv() - + * extract SAR cap from service ready event + * @wmi_handle: wmi handle + * @event: pointer to event buffer + * @ext_param: extended target info + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS extract_sar_cap_service_ready_ext_tlv( + wmi_unified_t wmi_handle, + uint8_t *event, + struct wlan_psoc_host_service_ext_param *ext_param) +{ + WMI_SERVICE_READY_EXT_EVENTID_param_tlvs *param_buf; + WMI_SAR_CAPABILITIES *sar_caps; + + param_buf = (WMI_SERVICE_READY_EXT_EVENTID_param_tlvs *) event; + + if (!param_buf) + return QDF_STATUS_E_INVAL; + + sar_caps = param_buf->sar_caps; + if (sar_caps) + ext_param->sar_version = sar_caps->active_version; + else + ext_param->sar_version = 0; + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_hw_mode_cap_service_ready_ext_tlv() - + * extract HW mode cap from service ready event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param param: Pointer to hold evt buf + * @param hw_mode_idx: hw mode idx should be less than num_mode + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS extract_hw_mode_cap_service_ready_ext_tlv( + wmi_unified_t wmi_handle, + uint8_t *event, uint8_t hw_mode_idx, + struct wlan_psoc_host_hw_mode_caps *param) +{ + WMI_SERVICE_READY_EXT_EVENTID_param_tlvs *param_buf; + WMI_SOC_MAC_PHY_HW_MODE_CAPS *hw_caps; + + param_buf = (WMI_SERVICE_READY_EXT_EVENTID_param_tlvs *) event; + if (!param_buf) + return QDF_STATUS_E_INVAL; + + hw_caps = param_buf->soc_hw_mode_caps; + if (!hw_caps) + return QDF_STATUS_E_INVAL; + + if (!hw_caps->num_hw_modes || + !param_buf->hw_mode_caps || + hw_caps->num_hw_modes > PSOC_MAX_HW_MODE || + hw_caps->num_hw_modes > param_buf->num_hw_mode_caps) + return QDF_STATUS_E_INVAL; + + if (hw_mode_idx >= hw_caps->num_hw_modes) + return QDF_STATUS_E_INVAL; + + param->hw_mode_id = param_buf->hw_mode_caps[hw_mode_idx].hw_mode_id; + param->phy_id_map = param_buf->hw_mode_caps[hw_mode_idx].phy_id_map; + + param->hw_mode_config_type = + param_buf->hw_mode_caps[hw_mode_idx].hw_mode_config_type; + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_mac_phy_cap_service_ready_ext_tlv() - + * extract MAC phy cap from service ready event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param param: Pointer to hold evt buf + * @param hw_mode_idx: hw mode idx should be less than num_mode + * @param phy_id: phy id within hw_mode + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS extract_mac_phy_cap_service_ready_ext_tlv( + wmi_unified_t wmi_handle, + uint8_t *event, uint8_t hw_mode_id, uint8_t phy_id, + struct wlan_psoc_host_mac_phy_caps *param) +{ + WMI_SERVICE_READY_EXT_EVENTID_param_tlvs *param_buf; + WMI_MAC_PHY_CAPABILITIES *mac_phy_caps; + WMI_SOC_MAC_PHY_HW_MODE_CAPS *hw_caps; + uint32_t phy_map; + uint8_t hw_idx, phy_idx = 0; + + param_buf = (WMI_SERVICE_READY_EXT_EVENTID_param_tlvs *) event; + if (!param_buf) + return QDF_STATUS_E_INVAL; + + hw_caps = param_buf->soc_hw_mode_caps; + if (!hw_caps) + return QDF_STATUS_E_INVAL; + if (hw_caps->num_hw_modes > PSOC_MAX_HW_MODE || + hw_caps->num_hw_modes > param_buf->num_hw_mode_caps) { + wmi_err_rl("invalid num_hw_modes %d, num_hw_mode_caps %d", + hw_caps->num_hw_modes, param_buf->num_hw_mode_caps); + return QDF_STATUS_E_INVAL; + } + + for (hw_idx = 0; hw_idx < hw_caps->num_hw_modes; hw_idx++) { + if (hw_mode_id == param_buf->hw_mode_caps[hw_idx].hw_mode_id) + break; + + phy_map = param_buf->hw_mode_caps[hw_idx].phy_id_map; + while (phy_map) { + phy_map >>= 1; + phy_idx++; + } + } + + if (hw_idx == hw_caps->num_hw_modes) + return QDF_STATUS_E_INVAL; + + phy_idx += phy_id; + if (phy_idx >= param_buf->num_mac_phy_caps) + return QDF_STATUS_E_INVAL; + + mac_phy_caps = ¶m_buf->mac_phy_caps[phy_idx]; + + param->hw_mode_id = mac_phy_caps->hw_mode_id; + param->pdev_id = wmi_handle->ops->convert_pdev_id_target_to_host( + mac_phy_caps->pdev_id); + param->phy_id = mac_phy_caps->phy_id; + param->supports_11b = + WMI_SUPPORT_11B_GET(mac_phy_caps->supported_flags); + param->supports_11g = + WMI_SUPPORT_11G_GET(mac_phy_caps->supported_flags); + param->supports_11a = + WMI_SUPPORT_11A_GET(mac_phy_caps->supported_flags); + param->supports_11n = + WMI_SUPPORT_11N_GET(mac_phy_caps->supported_flags); + param->supports_11ac = + WMI_SUPPORT_11AC_GET(mac_phy_caps->supported_flags); + param->supports_11ax = + WMI_SUPPORT_11AX_GET(mac_phy_caps->supported_flags); + + param->supported_bands = mac_phy_caps->supported_bands; + param->ampdu_density = mac_phy_caps->ampdu_density; + param->max_bw_supported_2G = mac_phy_caps->max_bw_supported_2G; + param->ht_cap_info_2G = mac_phy_caps->ht_cap_info_2G; + param->vht_cap_info_2G = mac_phy_caps->vht_cap_info_2G; + param->vht_supp_mcs_2G = mac_phy_caps->vht_supp_mcs_2G; + param->he_cap_info_2G = mac_phy_caps->he_cap_info_2G; + param->he_supp_mcs_2G = mac_phy_caps->he_supp_mcs_2G; + param->tx_chain_mask_2G = mac_phy_caps->tx_chain_mask_2G; + param->rx_chain_mask_2G = mac_phy_caps->rx_chain_mask_2G; + param->max_bw_supported_5G = mac_phy_caps->max_bw_supported_5G; + param->ht_cap_info_5G = mac_phy_caps->ht_cap_info_5G; + param->vht_cap_info_5G = mac_phy_caps->vht_cap_info_5G; + param->vht_supp_mcs_5G = mac_phy_caps->vht_supp_mcs_5G; + param->he_cap_info_5G = mac_phy_caps->he_cap_info_5G; + param->he_supp_mcs_5G = mac_phy_caps->he_supp_mcs_5G; + param->tx_chain_mask_5G = mac_phy_caps->tx_chain_mask_5G; + param->rx_chain_mask_5G = mac_phy_caps->rx_chain_mask_5G; + qdf_mem_copy(¶m->he_cap_phy_info_2G, + &mac_phy_caps->he_cap_phy_info_2G, + sizeof(param->he_cap_phy_info_2G)); + qdf_mem_copy(¶m->he_cap_phy_info_5G, + &mac_phy_caps->he_cap_phy_info_5G, + sizeof(param->he_cap_phy_info_5G)); + qdf_mem_copy(¶m->he_ppet2G, &mac_phy_caps->he_ppet2G, + sizeof(param->he_ppet2G)); + qdf_mem_copy(¶m->he_ppet5G, &mac_phy_caps->he_ppet5G, + sizeof(param->he_ppet5G)); + param->chainmask_table_id = mac_phy_caps->chainmask_table_id; + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_reg_cap_service_ready_ext_tlv() - + * extract REG cap from service ready event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param param: Pointer to hold evt buf + * @param phy_idx: phy idx should be less than num_mode + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS extract_reg_cap_service_ready_ext_tlv( + wmi_unified_t wmi_handle, + uint8_t *event, uint8_t phy_idx, + struct wlan_psoc_host_hal_reg_capabilities_ext *param) +{ + WMI_SERVICE_READY_EXT_EVENTID_param_tlvs *param_buf; + WMI_SOC_HAL_REG_CAPABILITIES *reg_caps; + WMI_HAL_REG_CAPABILITIES_EXT *ext_reg_cap; + + param_buf = (WMI_SERVICE_READY_EXT_EVENTID_param_tlvs *) event; + if (!param_buf) + return QDF_STATUS_E_INVAL; + + reg_caps = param_buf->soc_hal_reg_caps; + if (!reg_caps) + return QDF_STATUS_E_INVAL; + + if (reg_caps->num_phy > param_buf->num_hal_reg_caps) + return QDF_STATUS_E_INVAL; + + if (phy_idx >= reg_caps->num_phy) + return QDF_STATUS_E_INVAL; + + if (!param_buf->hal_reg_caps) + return QDF_STATUS_E_INVAL; + + ext_reg_cap = ¶m_buf->hal_reg_caps[phy_idx]; + + param->phy_id = ext_reg_cap->phy_id; + param->eeprom_reg_domain = ext_reg_cap->eeprom_reg_domain; + param->eeprom_reg_domain_ext = ext_reg_cap->eeprom_reg_domain_ext; + param->regcap1 = ext_reg_cap->regcap1; + param->regcap2 = ext_reg_cap->regcap2; + param->wireless_modes = convert_wireless_modes_tlv( + ext_reg_cap->wireless_modes); + param->low_2ghz_chan = ext_reg_cap->low_2ghz_chan; + param->high_2ghz_chan = ext_reg_cap->high_2ghz_chan; + param->low_5ghz_chan = ext_reg_cap->low_5ghz_chan; + param->high_5ghz_chan = ext_reg_cap->high_5ghz_chan; + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS extract_dbr_ring_cap_service_ready_ext_tlv( + wmi_unified_t wmi_handle, + uint8_t *event, uint8_t idx, + struct wlan_psoc_host_dbr_ring_caps *param) +{ + WMI_SERVICE_READY_EXT_EVENTID_param_tlvs *param_buf; + WMI_DMA_RING_CAPABILITIES *dbr_ring_caps; + + param_buf = (WMI_SERVICE_READY_EXT_EVENTID_param_tlvs *)event; + if (!param_buf) + return QDF_STATUS_E_INVAL; + + dbr_ring_caps = ¶m_buf->dma_ring_caps[idx]; + + param->pdev_id = wmi_handle->ops->convert_pdev_id_target_to_host( + dbr_ring_caps->pdev_id); + param->mod_id = dbr_ring_caps->mod_id; + param->ring_elems_min = dbr_ring_caps->ring_elems_min; + param->min_buf_size = dbr_ring_caps->min_buf_size; + param->min_buf_align = dbr_ring_caps->min_buf_align; + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS extract_dbr_buf_release_fixed_tlv(wmi_unified_t wmi_handle, + uint8_t *event, struct direct_buf_rx_rsp *param) +{ + WMI_PDEV_DMA_RING_BUF_RELEASE_EVENTID_param_tlvs *param_buf; + wmi_dma_buf_release_fixed_param *ev; + + param_buf = (WMI_PDEV_DMA_RING_BUF_RELEASE_EVENTID_param_tlvs *)event; + if (!param_buf) + return QDF_STATUS_E_INVAL; + + ev = param_buf->fixed_param; + if (!ev) + return QDF_STATUS_E_INVAL; + + param->pdev_id = wmi_handle->ops->convert_pdev_id_target_to_host( + ev->pdev_id); + param->mod_id = ev->mod_id; + param->num_buf_release_entry = ev->num_buf_release_entry; + param->num_meta_data_entry = ev->num_meta_data_entry; + WMI_LOGD("%s:pdev id %d mod id %d num buf release entry %d\n", __func__, + param->pdev_id, param->mod_id, param->num_buf_release_entry); + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS extract_dbr_buf_release_entry_tlv(wmi_unified_t wmi_handle, + uint8_t *event, uint8_t idx, struct direct_buf_rx_entry *param) +{ + WMI_PDEV_DMA_RING_BUF_RELEASE_EVENTID_param_tlvs *param_buf; + wmi_dma_buf_release_entry *entry; + + param_buf = (WMI_PDEV_DMA_RING_BUF_RELEASE_EVENTID_param_tlvs *)event; + if (!param_buf) + return QDF_STATUS_E_INVAL; + + entry = ¶m_buf->entries[idx]; + + if (!entry) { + WMI_LOGE("%s: Entry is NULL\n", __func__); + return QDF_STATUS_E_FAILURE; + } + + WMI_LOGD("%s: paddr_lo[%d] = %x\n", __func__, idx, entry->paddr_lo); + + param->paddr_lo = entry->paddr_lo; + param->paddr_hi = entry->paddr_hi; + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS extract_dbr_buf_metadata_tlv( + wmi_unified_t wmi_handle, uint8_t *event, + uint8_t idx, struct direct_buf_rx_metadata *param) +{ + WMI_PDEV_DMA_RING_BUF_RELEASE_EVENTID_param_tlvs *param_buf; + wmi_dma_buf_release_spectral_meta_data *entry; + + param_buf = (WMI_PDEV_DMA_RING_BUF_RELEASE_EVENTID_param_tlvs *)event; + if (!param_buf) + return QDF_STATUS_E_INVAL; + + entry = ¶m_buf->meta_data[idx]; + + if (!entry) { + WMI_LOGE("%s: Entry is NULL\n", __func__); + return QDF_STATUS_E_FAILURE; + } + + qdf_mem_copy(param->noisefloor, entry->noise_floor, + sizeof(entry->noise_floor)); + return QDF_STATUS_SUCCESS; +} + +/** + * extract_dcs_interference_type_tlv() - extract dcs interference type + * from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param param: Pointer to hold dcs interference param + * + * Return: 0 for success or error code + */ +static QDF_STATUS extract_dcs_interference_type_tlv( + wmi_unified_t wmi_handle, + void *evt_buf, struct wmi_host_dcs_interference_param *param) +{ + WMI_DCS_INTERFERENCE_EVENTID_param_tlvs *param_buf; + + param_buf = (WMI_DCS_INTERFERENCE_EVENTID_param_tlvs *) evt_buf; + if (!param_buf) + return QDF_STATUS_E_INVAL; + + param->interference_type = param_buf->fixed_param->interference_type; + param->pdev_id = wmi_handle->ops->convert_pdev_id_target_to_host( + param_buf->fixed_param->pdev_id); + + return QDF_STATUS_SUCCESS; +} + +/* + * extract_dcs_cw_int_tlv() - extract dcs cw interference from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param cw_int: Pointer to hold cw interference + * + * Return: 0 for success or error code + */ +static QDF_STATUS extract_dcs_cw_int_tlv(wmi_unified_t wmi_handle, + void *evt_buf, + wmi_host_ath_dcs_cw_int *cw_int) +{ + WMI_DCS_INTERFERENCE_EVENTID_param_tlvs *param_buf; + wlan_dcs_cw_int *ev; + + param_buf = (WMI_DCS_INTERFERENCE_EVENTID_param_tlvs *) evt_buf; + if (!param_buf) + return QDF_STATUS_E_INVAL; + + ev = param_buf->cw_int; + + cw_int->channel = ev->channel; + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_dcs_im_tgt_stats_tlv() - extract dcs im target stats from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param wlan_stat: Pointer to hold wlan stats + * + * Return: 0 for success or error code + */ +static QDF_STATUS extract_dcs_im_tgt_stats_tlv(wmi_unified_t wmi_handle, + void *evt_buf, + wmi_host_dcs_im_tgt_stats_t *wlan_stat) +{ + WMI_DCS_INTERFERENCE_EVENTID_param_tlvs *param_buf; + wlan_dcs_im_tgt_stats_t *ev; + + param_buf = (WMI_DCS_INTERFERENCE_EVENTID_param_tlvs *) evt_buf; + if (!param_buf) + return QDF_STATUS_E_INVAL; + + ev = param_buf->wlan_stat; + wlan_stat->reg_tsf32 = ev->reg_tsf32; + wlan_stat->last_ack_rssi = ev->last_ack_rssi; + wlan_stat->tx_waste_time = ev->tx_waste_time; + wlan_stat->rx_time = ev->rx_time; + wlan_stat->phyerr_cnt = ev->phyerr_cnt; + wlan_stat->mib_stats.listen_time = ev->listen_time; + wlan_stat->mib_stats.reg_tx_frame_cnt = ev->reg_tx_frame_cnt; + wlan_stat->mib_stats.reg_rx_frame_cnt = ev->reg_rx_frame_cnt; + wlan_stat->mib_stats.reg_rxclr_cnt = ev->reg_rxclr_cnt; + wlan_stat->mib_stats.reg_cycle_cnt = ev->reg_cycle_cnt; + wlan_stat->mib_stats.reg_rxclr_ext_cnt = ev->reg_rxclr_ext_cnt; + wlan_stat->mib_stats.reg_ofdm_phyerr_cnt = ev->reg_ofdm_phyerr_cnt; + wlan_stat->mib_stats.reg_cck_phyerr_cnt = ev->reg_cck_phyerr_cnt; + wlan_stat->chan_nf = ev->chan_nf; + wlan_stat->my_bss_rx_cycle_count = ev->my_bss_rx_cycle_count; + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_thermal_stats_tlv() - extract thermal stats from event + * @wmi_handle: wmi handle + * @param evt_buf: Pointer to event buffer + * @param temp: Pointer to hold extracted temperature + * @param level: Pointer to hold extracted level + * + * Return: 0 for success or error code + */ +static QDF_STATUS +extract_thermal_stats_tlv(wmi_unified_t wmi_handle, + void *evt_buf, uint32_t *temp, + uint32_t *level, uint32_t *pdev_id) +{ + WMI_THERM_THROT_STATS_EVENTID_param_tlvs *param_buf; + wmi_therm_throt_stats_event_fixed_param *tt_stats_event; + + param_buf = + (WMI_THERM_THROT_STATS_EVENTID_param_tlvs *) evt_buf; + if (!param_buf) + return QDF_STATUS_E_INVAL; + + tt_stats_event = param_buf->fixed_param; + + *pdev_id = wmi_handle->ops->convert_pdev_id_target_to_host( + tt_stats_event->pdev_id); + *temp = tt_stats_event->temp; + *level = tt_stats_event->level; + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_thermal_level_stats_tlv() - extract thermal level stats from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param idx: Index to level stats + * @param levelcount: Pointer to hold levelcount + * @param dccount: Pointer to hold dccount + * + * Return: 0 for success or error code + */ +static QDF_STATUS +extract_thermal_level_stats_tlv(wmi_unified_t wmi_handle, + void *evt_buf, uint8_t idx, uint32_t *levelcount, + uint32_t *dccount) +{ + WMI_THERM_THROT_STATS_EVENTID_param_tlvs *param_buf; + wmi_therm_throt_level_stats_info *tt_level_info; + + param_buf = + (WMI_THERM_THROT_STATS_EVENTID_param_tlvs *) evt_buf; + if (!param_buf) + return QDF_STATUS_E_INVAL; + + tt_level_info = param_buf->therm_throt_level_stats_info; + + if (idx < THERMAL_LEVELS) { + *levelcount = tt_level_info[idx].level_count; + *dccount = tt_level_info[idx].dc_count; + return QDF_STATUS_SUCCESS; + } + + return QDF_STATUS_E_FAILURE; +} +#ifdef BIG_ENDIAN_HOST +/** + * fips_conv_data_be() - LE to BE conversion of FIPS ev data + * @param data_len - data length + * @param data - pointer to data + * + * Return: QDF_STATUS - success or error status + */ +static QDF_STATUS fips_conv_data_be(uint32_t data_len, uint8_t *data) +{ + uint8_t *data_aligned = NULL; + int c; + unsigned char *data_unaligned; + + data_unaligned = qdf_mem_malloc(((sizeof(uint8_t) * data_len) + + FIPS_ALIGN)); + /* Assigning unaligned space to copy the data */ + /* Checking if kmalloc does successful allocation */ + if (data_unaligned == NULL) + return QDF_STATUS_E_FAILURE; + + /* Checking if space is alligned */ + if (!FIPS_IS_ALIGNED(data_unaligned, FIPS_ALIGN)) { + /* align the data space */ + data_aligned = + (uint8_t *)FIPS_ALIGNTO(data_unaligned, FIPS_ALIGN); + } else { + data_aligned = (u_int8_t *)data_unaligned; + } + + /* memset and copy content from data to data aligned */ + OS_MEMSET(data_aligned, 0, data_len); + OS_MEMCPY(data_aligned, data, data_len); + /* Endianness to LE */ + for (c = 0; c < data_len/4; c++) { + *((u_int32_t *)data_aligned + c) = + qdf_le32_to_cpu(*((u_int32_t *)data_aligned + c)); + } + + /* Copy content to event->data */ + OS_MEMCPY(data, data_aligned, data_len); + + /* clean up allocated space */ + qdf_mem_free(data_unaligned); + data_aligned = NULL; + data_unaligned = NULL; + + /*************************************************************/ + + return QDF_STATUS_SUCCESS; +} +#else +/** + * fips_conv_data_be() - DUMMY for LE platform + * + * Return: QDF_STATUS - success + */ +static QDF_STATUS fips_conv_data_be(uint32_t data_len, uint8_t *data) +{ + return QDF_STATUS_SUCCESS; +} +#endif + +/** + * extract_fips_event_data_tlv() - extract fips event data + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param param: pointer FIPS event params + * + * Return: 0 for success or error code + */ +static QDF_STATUS extract_fips_event_data_tlv(wmi_unified_t wmi_handle, + void *evt_buf, struct wmi_host_fips_event_param *param) +{ + WMI_PDEV_FIPS_EVENTID_param_tlvs *param_buf; + wmi_pdev_fips_event_fixed_param *event; + + param_buf = (WMI_PDEV_FIPS_EVENTID_param_tlvs *) evt_buf; + event = (wmi_pdev_fips_event_fixed_param *) param_buf->fixed_param; + + if (fips_conv_data_be(event->data_len, param_buf->data) != + QDF_STATUS_SUCCESS) + return QDF_STATUS_E_FAILURE; + + param->data = (uint32_t *)param_buf->data; + param->data_len = event->data_len; + param->error_status = event->error_status; + param->pdev_id = wmi_handle->ops->convert_pdev_id_target_to_host( + event->pdev_id); + + return QDF_STATUS_SUCCESS; +} + +/* + * extract_peer_delete_response_event_tlv() - extract peer delete response event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param vdev_id: Pointer to hold vdev_id + * @param mac_addr: Pointer to hold peer mac address + * + * Return: QDF_STATUS_SUCCESS for success or error code + */ +static QDF_STATUS extract_peer_delete_response_event_tlv(wmi_unified_t wmi_hdl, + void *evt_buf, struct wmi_host_peer_delete_response_event *param) +{ + WMI_PEER_DELETE_RESP_EVENTID_param_tlvs *param_buf; + wmi_peer_delete_resp_event_fixed_param *ev; + + param_buf = (WMI_PEER_DELETE_RESP_EVENTID_param_tlvs *)evt_buf; + + ev = (wmi_peer_delete_resp_event_fixed_param *) param_buf->fixed_param; + if (!ev) { + WMI_LOGE("%s: Invalid peer_delete response\n", __func__); + return QDF_STATUS_E_FAILURE; + } + + param->vdev_id = ev->vdev_id; + WMI_MAC_ADDR_TO_CHAR_ARRAY(&ev->peer_macaddr, + ¶m->mac_address.bytes[0]); + + return QDF_STATUS_SUCCESS; +} + +static bool is_management_record_tlv(uint32_t cmd_id) +{ + switch (cmd_id) { + case WMI_MGMT_TX_SEND_CMDID: + case WMI_MGMT_TX_COMPLETION_EVENTID: + case WMI_OFFCHAN_DATA_TX_SEND_CMDID: + case WMI_MGMT_RX_EVENTID: + return true; + default: + return false; + } +} + +static bool is_diag_event_tlv(uint32_t event_id) +{ + if (WMI_DIAG_EVENTID == event_id) + return true; + + return false; +} + +static uint16_t wmi_tag_vdev_set_cmd(wmi_unified_t wmi_hdl, wmi_buf_t buf) +{ + wmi_vdev_set_param_cmd_fixed_param *set_cmd; + + set_cmd = (wmi_vdev_set_param_cmd_fixed_param *)wmi_buf_data(buf); + + switch (set_cmd->param_id) { + case WMI_VDEV_PARAM_LISTEN_INTERVAL: + case WMI_VDEV_PARAM_DTIM_POLICY: + return HTC_TX_PACKET_TAG_AUTO_PM; + default: + break; + } + + return 0; +} + +static uint16_t wmi_tag_sta_powersave_cmd(wmi_unified_t wmi_hdl, wmi_buf_t buf) +{ + wmi_sta_powersave_param_cmd_fixed_param *ps_cmd; + + ps_cmd = (wmi_sta_powersave_param_cmd_fixed_param *)wmi_buf_data(buf); + + switch (ps_cmd->param) { + case WMI_STA_PS_PARAM_TX_WAKE_THRESHOLD: + case WMI_STA_PS_PARAM_INACTIVITY_TIME: + case WMI_STA_PS_ENABLE_QPOWER: + return HTC_TX_PACKET_TAG_AUTO_PM; + default: + break; + } + + return 0; +} + +static uint16_t wmi_tag_common_cmd(wmi_unified_t wmi_hdl, wmi_buf_t buf, + uint32_t cmd_id) +{ + if (qdf_atomic_read(&wmi_hdl->is_wow_bus_suspended)) + return 0; + + switch (cmd_id) { + case WMI_VDEV_SET_PARAM_CMDID: + return wmi_tag_vdev_set_cmd(wmi_hdl, buf); + case WMI_STA_POWERSAVE_PARAM_CMDID: + return wmi_tag_sta_powersave_cmd(wmi_hdl, buf); + default: + break; + } + + return 0; +} + +static uint16_t wmi_tag_fw_hang_cmd(wmi_unified_t wmi_handle) +{ + uint16_t tag = 0; + + if (qdf_atomic_read(&wmi_handle->is_target_suspended)) { + pr_err("%s: Target is already suspended, Ignore FW Hang Command\n", + __func__); + return tag; + } + + if (wmi_handle->tag_crash_inject) + tag = HTC_TX_PACKET_TAG_AUTO_PM; + + wmi_handle->tag_crash_inject = false; + return tag; +} + +/** + * wmi_set_htc_tx_tag_tlv() - set HTC TX tag for WMI commands + * @wmi_handle: WMI handle + * @buf: WMI buffer + * @cmd_id: WMI command Id + * + * Return htc_tx_tag + */ +static uint16_t wmi_set_htc_tx_tag_tlv(wmi_unified_t wmi_handle, + wmi_buf_t buf, + uint32_t cmd_id) +{ + uint16_t htc_tx_tag = 0; + + switch (cmd_id) { + case WMI_WOW_ENABLE_CMDID: + case WMI_PDEV_SUSPEND_CMDID: + case WMI_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID: + case WMI_WOW_ADD_WAKE_PATTERN_CMDID: + case WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID: + case WMI_PDEV_RESUME_CMDID: + case WMI_WOW_DEL_WAKE_PATTERN_CMDID: + case WMI_WOW_SET_ACTION_WAKE_UP_CMDID: +#ifdef FEATURE_WLAN_D0WOW + case WMI_D0_WOW_ENABLE_DISABLE_CMDID: +#endif + htc_tx_tag = HTC_TX_PACKET_TAG_AUTO_PM; + break; + case WMI_FORCE_FW_HANG_CMDID: + htc_tx_tag = wmi_tag_fw_hang_cmd(wmi_handle); + break; + case WMI_VDEV_SET_PARAM_CMDID: + case WMI_STA_POWERSAVE_PARAM_CMDID: + htc_tx_tag = wmi_tag_common_cmd(wmi_handle, buf, cmd_id); + default: + break; + } + + return htc_tx_tag; +} + +/** + * extract_channel_hopping_event_tlv() - extract channel hopping param + * from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param ch_hopping: Pointer to hold channel hopping param + * + * @return QDF_STATUS_SUCCESS on success and -ve on failure. + */ +static QDF_STATUS extract_channel_hopping_event_tlv( + wmi_unified_t wmi_handle, void *evt_buf, + wmi_host_pdev_channel_hopping_event *ch_hopping) +{ + WMI_PDEV_CHANNEL_HOPPING_EVENTID_param_tlvs *param_buf; + wmi_pdev_channel_hopping_event_fixed_param *event; + + param_buf = (WMI_PDEV_CHANNEL_HOPPING_EVENTID_param_tlvs *)evt_buf; + event = (wmi_pdev_channel_hopping_event_fixed_param *) + param_buf->fixed_param; + + ch_hopping->noise_floor_report_iter = event->noise_floor_report_iter; + ch_hopping->noise_floor_total_iter = event->noise_floor_total_iter; + ch_hopping->pdev_id = wmi_handle->ops->convert_pdev_id_target_to_host( + event->pdev_id); + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_pdev_tpc_ev_param_tlv() - extract tpc param from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param param: Pointer to hold tpc param + * + * @return QDF_STATUS_SUCCESS on success and -ve on failure. + */ +static QDF_STATUS extract_pdev_tpc_ev_param_tlv(wmi_unified_t wmi_handle, + void *evt_buf, + wmi_host_pdev_tpc_event *param) +{ + WMI_PDEV_TPC_EVENTID_param_tlvs *param_buf; + wmi_pdev_tpc_event_fixed_param *event; + + param_buf = (WMI_PDEV_TPC_EVENTID_param_tlvs *)evt_buf; + event = (wmi_pdev_tpc_event_fixed_param *)param_buf->fixed_param; + + param->pdev_id = wmi_handle->ops->convert_pdev_id_target_to_host( + event->pdev_id); + qdf_mem_copy(param->tpc, param_buf->tpc, sizeof(param->tpc)); + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_nfcal_power_ev_param_tlv() - extract noise floor calibration + * power param from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param param: Pointer to hold nf cal power param + * + * Return: 0 for success or error code + */ +static QDF_STATUS +extract_nfcal_power_ev_param_tlv(wmi_unified_t wmi_handle, + void *evt_buf, + wmi_host_pdev_nfcal_power_all_channels_event *param) +{ + WMI_PDEV_NFCAL_POWER_ALL_CHANNELS_EVENTID_param_tlvs *param_buf; + wmi_pdev_nfcal_power_all_channels_event_fixed_param *event; + wmi_pdev_nfcal_power_all_channels_nfdBr *ch_nfdbr; + wmi_pdev_nfcal_power_all_channels_nfdBm *ch_nfdbm; + wmi_pdev_nfcal_power_all_channels_freqNum *ch_freqnum; + uint32_t i; + + param_buf = + (WMI_PDEV_NFCAL_POWER_ALL_CHANNELS_EVENTID_param_tlvs *)evt_buf; + event = param_buf->fixed_param; + ch_nfdbr = param_buf->nfdbr; + ch_nfdbm = param_buf->nfdbm; + ch_freqnum = param_buf->freqnum; + + WMI_LOGD("pdev_id[%x], num_nfdbr[%d], num_nfdbm[%d] num_freqnum[%d]\n", + event->pdev_id, param_buf->num_nfdbr, + param_buf->num_nfdbm, param_buf->num_freqnum); + + if (param_buf->num_nfdbr > + WMI_HOST_RXG_CAL_CHAN_MAX * WMI_HOST_MAX_NUM_CHAINS) { + WMI_LOGE("invalid number of nfdBr"); + return QDF_STATUS_E_FAILURE; + } + + if (param_buf->num_nfdbm > + WMI_HOST_RXG_CAL_CHAN_MAX * WMI_HOST_MAX_NUM_CHAINS) { + WMI_LOGE("invalid number of nfdBm"); + return QDF_STATUS_E_FAILURE; + } + + if (param_buf->num_freqnum > WMI_HOST_RXG_CAL_CHAN_MAX) { + WMI_LOGE("invalid number of freqNum"); + return QDF_STATUS_E_FAILURE; + } + + for (i = 0; i < param_buf->num_nfdbr; i++) { + param->nfdbr[i] = (int8_t)ch_nfdbr->nfdBr; + param->nfdbm[i] = (int8_t)ch_nfdbm->nfdBm; + ch_nfdbr++; + ch_nfdbm++; + } + + for (i = 0; i < param_buf->num_freqnum; i++) { + param->freqnum[i] = ch_freqnum->freqNum; + ch_freqnum++; + } + + param->pdev_id = wmi_handle->ops-> + convert_pdev_id_target_to_host(event->pdev_id); + + return QDF_STATUS_SUCCESS; +} + + +#ifdef BIG_ENDIAN_HOST +/** + * wds_addr_ev_conv_data_be() - LE to BE conversion of wds addr event + * @param data_len - data length + * @param data - pointer to data + * + * Return: QDF_STATUS - success or error status + */ +static QDF_STATUS wds_addr_ev_conv_data_be(uint16_t data_len, uint8_t *ev) +{ + uint8_t *datap = (uint8_t *)ev; + int i; + /* Skip swapping the first word */ + datap += sizeof(uint32_t); + for (i = 0; i < ((data_len / sizeof(uint32_t))-1); + i++, datap += sizeof(uint32_t)) { + *(uint32_t *)datap = qdf_le32_to_cpu(*(uint32_t *)datap); + } + + return QDF_STATUS_SUCCESS; +} +#else +/** + * wds_addr_ev_conv_data_be() - Dummy operation for LE platforms + * @param data_len - data length + * @param data - pointer to data + * + * Return: QDF_STATUS - success or error status + */ +static QDF_STATUS wds_addr_ev_conv_data_be(uint32_t data_len, uint8_t *ev) +{ + return QDF_STATUS_SUCCESS; +} +#endif + +/** + * extract_wds_addr_event_tlv() - extract wds address from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param wds_ev: Pointer to hold wds address + * + * @return QDF_STATUS_SUCCESS on success and -ve on failure. + */ +static QDF_STATUS extract_wds_addr_event_tlv(wmi_unified_t wmi_handle, + void *evt_buf, + uint16_t len, wds_addr_event_t *wds_ev) +{ + WMI_WDS_PEER_EVENTID_param_tlvs *param_buf; + wmi_wds_addr_event_fixed_param *ev; + int i; + + param_buf = (WMI_WDS_PEER_EVENTID_param_tlvs *)evt_buf; + ev = (wmi_wds_addr_event_fixed_param *)param_buf->fixed_param; + + if (wds_addr_ev_conv_data_be(len, (uint8_t *)ev) != QDF_STATUS_SUCCESS) + return QDF_STATUS_E_FAILURE; + + qdf_mem_copy(wds_ev->event_type, ev->event_type, + sizeof(wds_ev->event_type)); + for (i = 0; i < 4; i++) { + wds_ev->peer_mac[i] = + ((u_int8_t *)&(ev->peer_mac.mac_addr31to0))[i]; + wds_ev->dest_mac[i] = + ((u_int8_t *)&(ev->dest_mac.mac_addr31to0))[i]; + } + for (i = 0; i < 2; i++) { + wds_ev->peer_mac[4+i] = + ((u_int8_t *)&(ev->peer_mac.mac_addr47to32))[i]; + wds_ev->dest_mac[4+i] = + ((u_int8_t *)&(ev->dest_mac.mac_addr47to32))[i]; + } + return QDF_STATUS_SUCCESS; +} + +/** + * extract_peer_sta_ps_statechange_ev_tlv() - extract peer sta ps state + * from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param ev: Pointer to hold peer param and ps state + * + * @return QDF_STATUS_SUCCESS on success and -ve on failure. + */ +static QDF_STATUS extract_peer_sta_ps_statechange_ev_tlv(wmi_unified_t wmi_handle, + void *evt_buf, wmi_host_peer_sta_ps_statechange_event *ev) +{ + WMI_PEER_STA_PS_STATECHG_EVENTID_param_tlvs *param_buf; + wmi_peer_sta_ps_statechange_event_fixed_param *event; + + param_buf = (WMI_PEER_STA_PS_STATECHG_EVENTID_param_tlvs *)evt_buf; + event = (wmi_peer_sta_ps_statechange_event_fixed_param *) + param_buf->fixed_param; + + WMI_MAC_ADDR_TO_CHAR_ARRAY(&event->peer_macaddr, ev->peer_macaddr); + ev->peer_ps_state = event->peer_ps_state; + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_inst_rssi_stats_event_tlv() - extract inst rssi stats from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param inst_rssi_resp: Pointer to hold inst rssi response + * + * @return QDF_STATUS_SUCCESS on success and -ve on failure. + */ +static QDF_STATUS extract_inst_rssi_stats_event_tlv( + wmi_unified_t wmi_handle, void *evt_buf, + wmi_host_inst_stats_resp *inst_rssi_resp) +{ + WMI_INST_RSSI_STATS_EVENTID_param_tlvs *param_buf; + wmi_inst_rssi_stats_resp_fixed_param *event; + + param_buf = (WMI_INST_RSSI_STATS_EVENTID_param_tlvs *)evt_buf; + event = (wmi_inst_rssi_stats_resp_fixed_param *)param_buf->fixed_param; + + qdf_mem_copy(&(inst_rssi_resp->peer_macaddr), + &(event->peer_macaddr), sizeof(wmi_mac_addr)); + inst_rssi_resp->iRSSI = event->iRSSI; + + return QDF_STATUS_SUCCESS; +} + +static struct cur_reg_rule +*create_reg_rules_from_wmi(uint32_t num_reg_rules, + wmi_regulatory_rule_struct *wmi_reg_rule) +{ + struct cur_reg_rule *reg_rule_ptr; + uint32_t count; + + reg_rule_ptr = qdf_mem_malloc(num_reg_rules * sizeof(*reg_rule_ptr)); + + if (NULL == reg_rule_ptr) { + WMI_LOGE("memory allocation failure"); + return NULL; + } + + for (count = 0; count < num_reg_rules; count++) { + reg_rule_ptr[count].start_freq = + WMI_REG_RULE_START_FREQ_GET( + wmi_reg_rule[count].freq_info); + reg_rule_ptr[count].end_freq = + WMI_REG_RULE_END_FREQ_GET( + wmi_reg_rule[count].freq_info); + reg_rule_ptr[count].max_bw = + WMI_REG_RULE_MAX_BW_GET( + wmi_reg_rule[count].bw_pwr_info); + reg_rule_ptr[count].reg_power = + WMI_REG_RULE_REG_POWER_GET( + wmi_reg_rule[count].bw_pwr_info); + reg_rule_ptr[count].ant_gain = + WMI_REG_RULE_ANTENNA_GAIN_GET( + wmi_reg_rule[count].bw_pwr_info); + reg_rule_ptr[count].flags = + WMI_REG_RULE_FLAGS_GET( + wmi_reg_rule[count].flag_info); + } + + return reg_rule_ptr; +} + +static QDF_STATUS extract_reg_chan_list_update_event_tlv( + wmi_unified_t wmi_handle, uint8_t *evt_buf, + struct cur_regulatory_info *reg_info, uint32_t len) +{ + WMI_REG_CHAN_LIST_CC_EVENTID_param_tlvs *param_buf; + wmi_reg_chan_list_cc_event_fixed_param *chan_list_event_hdr; + wmi_regulatory_rule_struct *wmi_reg_rule; + uint32_t num_2g_reg_rules, num_5g_reg_rules; + + WMI_LOGD("processing regulatory channel list"); + + param_buf = (WMI_REG_CHAN_LIST_CC_EVENTID_param_tlvs *)evt_buf; + if (!param_buf) { + WMI_LOGE("invalid channel list event buf"); + return QDF_STATUS_E_FAILURE; + } + + chan_list_event_hdr = param_buf->fixed_param; + + reg_info->num_2g_reg_rules = chan_list_event_hdr->num_2g_reg_rules; + reg_info->num_5g_reg_rules = chan_list_event_hdr->num_5g_reg_rules; + num_2g_reg_rules = reg_info->num_2g_reg_rules; + num_5g_reg_rules = reg_info->num_5g_reg_rules; + if ((num_2g_reg_rules > MAX_REG_RULES) || + (num_5g_reg_rules > MAX_REG_RULES) || + (num_2g_reg_rules + num_5g_reg_rules > MAX_REG_RULES) || + (num_2g_reg_rules + num_5g_reg_rules != + param_buf->num_reg_rule_array)) { + wmi_err_rl("Invalid num_2g_reg_rules: %u, num_5g_reg_rules: %u", + num_2g_reg_rules, num_5g_reg_rules); + return QDF_STATUS_E_FAILURE; + } + if (param_buf->num_reg_rule_array > + (WMI_SVC_MSG_MAX_SIZE - sizeof(*chan_list_event_hdr)) / + sizeof(*wmi_reg_rule)) { + wmi_err_rl("Invalid num_reg_rule_array: %u", + param_buf->num_reg_rule_array); + return QDF_STATUS_E_FAILURE; + } + + qdf_mem_copy(reg_info->alpha2, &(chan_list_event_hdr->alpha2), + REG_ALPHA2_LEN); + reg_info->dfs_region = chan_list_event_hdr->dfs_region; + reg_info->phybitmap = chan_list_event_hdr->phybitmap; + reg_info->offload_enabled = true; + reg_info->num_phy = chan_list_event_hdr->num_phy; + reg_info->phy_id = chan_list_event_hdr->phy_id; + reg_info->ctry_code = chan_list_event_hdr->country_id; + reg_info->reg_dmn_pair = chan_list_event_hdr->domain_code; + if (chan_list_event_hdr->status_code == WMI_REG_SET_CC_STATUS_PASS) + reg_info->status_code = REG_SET_CC_STATUS_PASS; + else if (chan_list_event_hdr->status_code == + WMI_REG_CURRENT_ALPHA2_NOT_FOUND) + reg_info->status_code = REG_CURRENT_ALPHA2_NOT_FOUND; + else if (chan_list_event_hdr->status_code == + WMI_REG_INIT_ALPHA2_NOT_FOUND) + reg_info->status_code = REG_INIT_ALPHA2_NOT_FOUND; + else if (chan_list_event_hdr->status_code == + WMI_REG_SET_CC_CHANGE_NOT_ALLOWED) + reg_info->status_code = REG_SET_CC_CHANGE_NOT_ALLOWED; + else if (chan_list_event_hdr->status_code == + WMI_REG_SET_CC_STATUS_NO_MEMORY) + reg_info->status_code = REG_SET_CC_STATUS_NO_MEMORY; + else if (chan_list_event_hdr->status_code == + WMI_REG_SET_CC_STATUS_FAIL) + reg_info->status_code = REG_SET_CC_STATUS_FAIL; + + reg_info->min_bw_2g = chan_list_event_hdr->min_bw_2g; + reg_info->max_bw_2g = chan_list_event_hdr->max_bw_2g; + reg_info->min_bw_5g = chan_list_event_hdr->min_bw_5g; + reg_info->max_bw_5g = chan_list_event_hdr->max_bw_5g; + + WMI_LOGD(FL("num_phys = %u and phy_id = %u"), + reg_info->num_phy, reg_info->phy_id); + + WMI_LOGD("%s:cc %s dfs %d BW: min_2g %d max_2g %d min_5g %d max_5g %d", + __func__, reg_info->alpha2, reg_info->dfs_region, + reg_info->min_bw_2g, reg_info->max_bw_2g, + reg_info->min_bw_5g, reg_info->max_bw_5g); + + WMI_LOGD("%s: num_2g_reg_rules %d num_5g_reg_rules %d", __func__, + num_2g_reg_rules, num_5g_reg_rules); + wmi_reg_rule = + (wmi_regulatory_rule_struct *)((uint8_t *)chan_list_event_hdr + + sizeof(wmi_reg_chan_list_cc_event_fixed_param) + + WMI_TLV_HDR_SIZE); + reg_info->reg_rules_2g_ptr = create_reg_rules_from_wmi(num_2g_reg_rules, + wmi_reg_rule); + wmi_reg_rule += num_2g_reg_rules; + + reg_info->reg_rules_5g_ptr = create_reg_rules_from_wmi(num_5g_reg_rules, + wmi_reg_rule); + + WMI_LOGD("processed regulatory channel list"); + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS extract_reg_11d_new_country_event_tlv( + wmi_unified_t wmi_handle, uint8_t *evt_buf, + struct reg_11d_new_country *reg_11d_country, uint32_t len) +{ + wmi_11d_new_country_event_fixed_param *reg_11d_country_event; + WMI_11D_NEW_COUNTRY_EVENTID_param_tlvs *param_buf; + + param_buf = (WMI_11D_NEW_COUNTRY_EVENTID_param_tlvs *)evt_buf; + if (!param_buf) { + WMI_LOGE("invalid 11d country event buf"); + return QDF_STATUS_E_FAILURE; + } + + reg_11d_country_event = param_buf->fixed_param; + + qdf_mem_copy(reg_11d_country->alpha2, + ®_11d_country_event->new_alpha2, REG_ALPHA2_LEN); + reg_11d_country->alpha2[REG_ALPHA2_LEN] = '\0'; + + WMI_LOGD("processed 11d country event, new cc %s", + reg_11d_country->alpha2); + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS extract_reg_ch_avoid_event_tlv( + wmi_unified_t wmi_handle, uint8_t *evt_buf, + struct ch_avoid_ind_type *ch_avoid_ind, uint32_t len) +{ + wmi_avoid_freq_ranges_event_fixed_param *afr_fixed_param; + wmi_avoid_freq_range_desc *afr_desc; + uint32_t num_freq_ranges, freq_range_idx; + WMI_WLAN_FREQ_AVOID_EVENTID_param_tlvs *param_buf = + (WMI_WLAN_FREQ_AVOID_EVENTID_param_tlvs *) evt_buf; + + if (!param_buf) { + WMI_LOGE("Invalid channel avoid event buffer"); + return QDF_STATUS_E_INVAL; + } + + afr_fixed_param = param_buf->fixed_param; + if (!afr_fixed_param) { + WMI_LOGE("Invalid channel avoid event fixed param buffer"); + return QDF_STATUS_E_INVAL; + } + + if (!ch_avoid_ind) { + WMI_LOGE("Invalid channel avoid indication buffer"); + return QDF_STATUS_E_INVAL; + } + if (param_buf->num_avd_freq_range < afr_fixed_param->num_freq_ranges) { + WMI_LOGE(FL("no.of freq ranges exceeded the limit")); + return QDF_STATUS_E_INVAL; + } + num_freq_ranges = (afr_fixed_param->num_freq_ranges > + CH_AVOID_MAX_RANGE) ? CH_AVOID_MAX_RANGE : + afr_fixed_param->num_freq_ranges; + + WMI_LOGD("Channel avoid event received with %d ranges", + num_freq_ranges); + + ch_avoid_ind->ch_avoid_range_cnt = num_freq_ranges; + afr_desc = (wmi_avoid_freq_range_desc *)(param_buf->avd_freq_range); + for (freq_range_idx = 0; freq_range_idx < num_freq_ranges; + freq_range_idx++) { + ch_avoid_ind->avoid_freq_range[freq_range_idx].start_freq = + afr_desc->start_freq; + ch_avoid_ind->avoid_freq_range[freq_range_idx].end_freq = + afr_desc->end_freq; + WMI_LOGD("range %d tlv id %u, start freq %u, end freq %u", + freq_range_idx, afr_desc->tlv_header, + afr_desc->start_freq, afr_desc->end_freq); + afr_desc++; + } + + return QDF_STATUS_SUCCESS; +} +#ifdef DFS_COMPONENT_ENABLE +/** + * extract_dfs_cac_complete_event_tlv() - extract cac complete event + * @wmi_handle: wma handle + * @evt_buf: event buffer + * @vdev_id: vdev id + * @len: length of buffer + * + * Return: 0 for success or error code + */ +static QDF_STATUS extract_dfs_cac_complete_event_tlv(wmi_unified_t wmi_handle, + uint8_t *evt_buf, + uint32_t *vdev_id, + uint32_t len) +{ + WMI_VDEV_DFS_CAC_COMPLETE_EVENTID_param_tlvs *param_tlvs; + wmi_vdev_dfs_cac_complete_event_fixed_param *cac_event; + + param_tlvs = (WMI_VDEV_DFS_CAC_COMPLETE_EVENTID_param_tlvs *) evt_buf; + if (!param_tlvs) { + WMI_LOGE("invalid cac complete event buf"); + return QDF_STATUS_E_FAILURE; + } + + cac_event = param_tlvs->fixed_param; + *vdev_id = cac_event->vdev_id; + WMI_LOGD("processed cac complete event vdev %d", *vdev_id); + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_dfs_radar_detection_event_tlv() - extract radar found event + * @wmi_handle: wma handle + * @evt_buf: event buffer + * @radar_found: radar found event info + * @len: length of buffer + * + * Return: 0 for success or error code + */ +static QDF_STATUS extract_dfs_radar_detection_event_tlv( + wmi_unified_t wmi_handle, + uint8_t *evt_buf, + struct radar_found_info *radar_found, + uint32_t len) +{ + WMI_PDEV_DFS_RADAR_DETECTION_EVENTID_param_tlvs *param_tlv; + wmi_pdev_dfs_radar_detection_event_fixed_param *radar_event; + + param_tlv = (WMI_PDEV_DFS_RADAR_DETECTION_EVENTID_param_tlvs *) evt_buf; + if (!param_tlv) { + WMI_LOGE("invalid radar detection event buf"); + return QDF_STATUS_E_FAILURE; + } + + radar_event = param_tlv->fixed_param; + radar_found->pdev_id = convert_target_pdev_id_to_host_pdev_id( + radar_event->pdev_id); + if (radar_found->pdev_id == WMI_HOST_PDEV_ID_INVALID) + return QDF_STATUS_E_FAILURE; + + radar_found->detection_mode = radar_event->detection_mode; + radar_found->chan_freq = radar_event->chan_freq; + radar_found->chan_width = radar_event->chan_width; + radar_found->detector_id = radar_event->detector_id; + radar_found->segment_id = radar_event->segment_id; + radar_found->timestamp = radar_event->timestamp; + radar_found->is_chirp = radar_event->is_chirp; + radar_found->freq_offset = radar_event->freq_offset; + radar_found->sidx = radar_event->sidx; + + WMI_LOGI("processed radar found event pdev %d," + "Radar Event Info:pdev_id %d,timestamp %d,chan_freq (dur) %d," + "chan_width (RSSI) %d,detector_id (false_radar) %d," + "freq_offset (radar_check) %d,segment_id %d,sidx %d," + "is_chirp %d,detection mode %d\n", + radar_event->pdev_id, radar_found->pdev_id, + radar_event->timestamp, radar_event->chan_freq, + radar_event->chan_width, radar_event->detector_id, + radar_event->freq_offset, radar_event->segment_id, + radar_event->sidx, radar_event->is_chirp, + radar_event->detection_mode); + + return QDF_STATUS_SUCCESS; +} + +#ifdef QCA_MCL_DFS_SUPPORT +/** + * extract_wlan_radar_event_info_tlv() - extract radar pulse event + * @wmi_handle: wma handle + * @evt_buf: event buffer + * @wlan_radar_event: Pointer to struct radar_event_info + * @len: length of buffer + * + * Return: QDF_STATUS + */ +static QDF_STATUS extract_wlan_radar_event_info_tlv( + wmi_unified_t wmi_handle, + uint8_t *evt_buf, + struct radar_event_info *wlan_radar_event, + uint32_t len) +{ + WMI_DFS_RADAR_EVENTID_param_tlvs *param_tlv; + wmi_dfs_radar_event_fixed_param *radar_event; + + param_tlv = (WMI_DFS_RADAR_EVENTID_param_tlvs *)evt_buf; + if (!param_tlv) { + WMI_LOGE("invalid wlan radar event buf"); + return QDF_STATUS_E_FAILURE; + } + + radar_event = param_tlv->fixed_param; + wlan_radar_event->pulse_is_chirp = radar_event->pulse_is_chirp; + wlan_radar_event->pulse_center_freq = radar_event->pulse_center_freq; + wlan_radar_event->pulse_duration = radar_event->pulse_duration; + wlan_radar_event->rssi = radar_event->rssi; + wlan_radar_event->pulse_detect_ts = radar_event->pulse_detect_ts; + wlan_radar_event->upload_fullts_high = radar_event->upload_fullts_high; + wlan_radar_event->upload_fullts_low = radar_event->upload_fullts_low; + wlan_radar_event->peak_sidx = radar_event->peak_sidx; + wlan_radar_event->delta_peak = radar_event->pulse_delta_peak; + wlan_radar_event->delta_diff = radar_event->pulse_delta_diff; + if (radar_event->pulse_flags & + WMI_DFS_RADAR_PULSE_FLAG_MASK_PSIDX_DIFF_VALID) { + wlan_radar_event->is_psidx_diff_valid = true; + wlan_radar_event->psidx_diff = radar_event->psidx_diff; + } else { + wlan_radar_event->is_psidx_diff_valid = false; + } + + wlan_radar_event->pdev_id = radar_event->pdev_id; + + return QDF_STATUS_SUCCESS; +} +#else +static QDF_STATUS extract_wlan_radar_event_info_tlv( + wmi_unified_t wmi_handle, + uint8_t *evt_buf, + struct radar_event_info *wlan_radar_event, + uint32_t len) +{ + return QDF_STATUS_SUCCESS; +} +#endif +#endif + +/** + * send_get_rcpi_cmd_tlv() - send request for rcpi value + * @wmi_handle: wmi handle + * @get_rcpi_param: rcpi params + * + * Return: QDF status + */ +static QDF_STATUS send_get_rcpi_cmd_tlv(wmi_unified_t wmi_handle, + struct rcpi_req *get_rcpi_param) +{ + wmi_buf_t buf; + wmi_request_rcpi_cmd_fixed_param *cmd; + uint8_t len = sizeof(wmi_request_rcpi_cmd_fixed_param); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s: Failed to allocate wmi buffer", __func__); + return QDF_STATUS_E_NOMEM; + } + + cmd = (wmi_request_rcpi_cmd_fixed_param *) wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_request_rcpi_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_request_rcpi_cmd_fixed_param)); + + cmd->vdev_id = get_rcpi_param->vdev_id; + WMI_CHAR_ARRAY_TO_MAC_ADDR(get_rcpi_param->mac_addr, + &cmd->peer_macaddr); + + switch (get_rcpi_param->measurement_type) { + + case RCPI_MEASUREMENT_TYPE_AVG_MGMT: + cmd->measurement_type = WMI_RCPI_MEASUREMENT_TYPE_AVG_MGMT; + break; + + case RCPI_MEASUREMENT_TYPE_AVG_DATA: + cmd->measurement_type = WMI_RCPI_MEASUREMENT_TYPE_AVG_DATA; + break; + + case RCPI_MEASUREMENT_TYPE_LAST_MGMT: + cmd->measurement_type = WMI_RCPI_MEASUREMENT_TYPE_LAST_MGMT; + break; + + case RCPI_MEASUREMENT_TYPE_LAST_DATA: + cmd->measurement_type = WMI_RCPI_MEASUREMENT_TYPE_LAST_DATA; + break; + + default: + /* + * invalid rcpi measurement type, fall back to + * RCPI_MEASUREMENT_TYPE_AVG_MGMT + */ + cmd->measurement_type = WMI_RCPI_MEASUREMENT_TYPE_AVG_MGMT; + break; + } + WMI_LOGD("RCPI REQ VDEV_ID:%d-->", cmd->vdev_id); + wmi_mtrace(WMI_REQUEST_RCPI_CMDID, cmd->vdev_id, 0); + if (wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_REQUEST_RCPI_CMDID)) { + + WMI_LOGE("%s: Failed to send WMI_REQUEST_RCPI_CMDID", + __func__); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_rcpi_response_event_tlv() - Extract RCPI event params + * @wmi_handle: wmi handle + * @evt_buf: pointer to event buffer + * @res: pointer to hold rcpi response from firmware + * + * Return: QDF_STATUS_SUCCESS for successful event parse + * else QDF_STATUS_E_INVAL or QDF_STATUS_E_FAILURE + */ +static QDF_STATUS +extract_rcpi_response_event_tlv(wmi_unified_t wmi_handle, + void *evt_buf, struct rcpi_res *res) +{ + WMI_UPDATE_RCPI_EVENTID_param_tlvs *param_buf; + wmi_update_rcpi_event_fixed_param *event; + + param_buf = (WMI_UPDATE_RCPI_EVENTID_param_tlvs *)evt_buf; + if (!param_buf) { + WMI_LOGE(FL("Invalid rcpi event")); + return QDF_STATUS_E_INVAL; + } + + event = param_buf->fixed_param; + res->vdev_id = event->vdev_id; + WMI_MAC_ADDR_TO_CHAR_ARRAY(&event->peer_macaddr, res->mac_addr); + + switch (event->measurement_type) { + + case WMI_RCPI_MEASUREMENT_TYPE_AVG_MGMT: + res->measurement_type = RCPI_MEASUREMENT_TYPE_AVG_MGMT; + break; + + case WMI_RCPI_MEASUREMENT_TYPE_AVG_DATA: + res->measurement_type = RCPI_MEASUREMENT_TYPE_AVG_DATA; + break; + + case WMI_RCPI_MEASUREMENT_TYPE_LAST_MGMT: + res->measurement_type = RCPI_MEASUREMENT_TYPE_LAST_MGMT; + break; + + case WMI_RCPI_MEASUREMENT_TYPE_LAST_DATA: + res->measurement_type = RCPI_MEASUREMENT_TYPE_LAST_DATA; + break; + + default: + WMI_LOGE(FL("Invalid rcpi measurement type from firmware")); + res->measurement_type = RCPI_MEASUREMENT_TYPE_INVALID; + return QDF_STATUS_E_FAILURE; + } + + if (event->status) + return QDF_STATUS_E_FAILURE; + else + return QDF_STATUS_SUCCESS; +} + +/** + * convert_host_pdev_id_to_target_pdev_id_legacy() - Convert pdev_id from + * host to target defines. For legacy there is not conversion + * required. Just return pdev_id as it is. + * @param pdev_id: host pdev_id to be converted. + * Return: target pdev_id after conversion. + */ +static uint32_t convert_host_pdev_id_to_target_pdev_id_legacy( + uint32_t pdev_id) +{ + if (pdev_id == WMI_HOST_PDEV_ID_SOC) + return WMI_PDEV_ID_SOC; + + /*No conversion required*/ + return pdev_id; +} + +/** + * convert_target_pdev_id_to_host_pdev_id_legacy() - Convert pdev_id from + * target to host defines. For legacy there is not conversion + * required. Just return pdev_id as it is. + * @param pdev_id: target pdev_id to be converted. + * Return: host pdev_id after conversion. + */ +static uint32_t convert_target_pdev_id_to_host_pdev_id_legacy( + uint32_t pdev_id) +{ + /*No conversion required*/ + return pdev_id; +} + +/** + * send_set_country_cmd_tlv() - WMI scan channel list function + * @param wmi_handle : handle to WMI. + * @param param : pointer to hold scan channel list parameter + * + * Return: 0 on success and -ve on failure. + */ +static QDF_STATUS send_set_country_cmd_tlv(wmi_unified_t wmi_handle, + struct set_country *params) +{ + wmi_buf_t buf; + QDF_STATUS qdf_status; + wmi_set_current_country_cmd_fixed_param *cmd; + uint16_t len = sizeof(*cmd); + uint8_t pdev_id = params->pdev_id; + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("Failed to allocate memory"); + qdf_status = QDF_STATUS_E_NOMEM; + goto end; + } + + cmd = (wmi_set_current_country_cmd_fixed_param *)wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_set_current_country_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_set_current_country_cmd_fixed_param)); + + cmd->pdev_id = wmi_handle->ops->convert_host_pdev_id_to_target(pdev_id); + WMI_LOGD("setting current country to %s and target pdev_id = %u", + params->country, cmd->pdev_id); + + qdf_mem_copy((uint8_t *)&cmd->new_alpha2, params->country, 3); + + wmi_mtrace(WMI_SET_CURRENT_COUNTRY_CMDID, NO_SESSION, 0); + qdf_status = wmi_unified_cmd_send(wmi_handle, + buf, len, WMI_SET_CURRENT_COUNTRY_CMDID); + + if (QDF_IS_STATUS_ERROR(qdf_status)) { + WMI_LOGE("Failed to send WMI_SET_CURRENT_COUNTRY_CMDID"); + wmi_buf_free(buf); + } + +end: + return qdf_status; +} + +#define WMI_REG_COUNTRY_ALPHA_SET(alpha, val0, val1, val2) do { \ + WMI_SET_BITS(alpha, 0, 8, val0); \ + WMI_SET_BITS(alpha, 8, 8, val1); \ + WMI_SET_BITS(alpha, 16, 8, val2); \ + } while (0) + +static QDF_STATUS send_user_country_code_cmd_tlv(wmi_unified_t wmi_handle, + uint8_t pdev_id, struct cc_regdmn_s *rd) +{ + wmi_set_init_country_cmd_fixed_param *cmd; + uint16_t len; + wmi_buf_t buf; + int ret; + + len = sizeof(wmi_set_init_country_cmd_fixed_param); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s: Failed allocate wmi buffer", __func__); + return QDF_STATUS_E_NOMEM; + } + cmd = (wmi_set_init_country_cmd_fixed_param *) wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_set_init_country_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_set_init_country_cmd_fixed_param)); + + cmd->pdev_id = wmi_handle->ops->convert_pdev_id_host_to_target(pdev_id); + + if (rd->flags == CC_IS_SET) { + cmd->countrycode_type = WMI_COUNTRYCODE_COUNTRY_ID; + cmd->country_code.country_id = rd->cc.country_code; + } else if (rd->flags == ALPHA_IS_SET) { + cmd->countrycode_type = WMI_COUNTRYCODE_ALPHA2; + WMI_REG_COUNTRY_ALPHA_SET(cmd->country_code.alpha2, + rd->cc.alpha[0], + rd->cc.alpha[1], + rd->cc.alpha[2]); + } else if (rd->flags == REGDMN_IS_SET) { + cmd->countrycode_type = WMI_COUNTRYCODE_DOMAIN_CODE; + cmd->country_code.domain_code = rd->cc.regdmn_id; + } + + wmi_mtrace(WMI_SET_INIT_COUNTRY_CMDID, NO_SESSION, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_SET_INIT_COUNTRY_CMDID); + if (ret) { + WMI_LOGE("Failed to config wow wakeup event"); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * send_limit_off_chan_cmd_tlv() - send wmi cmd of limit off chan + * configuration params + * @wmi_handle: wmi handler + * @limit_off_chan_param: pointer to wmi_off_chan_param + * + * Return: 0 for success and non zero for failure + */ +static +QDF_STATUS send_limit_off_chan_cmd_tlv(wmi_unified_t wmi_handle, + struct wmi_limit_off_chan_param *limit_off_chan_param) +{ + wmi_vdev_limit_offchan_cmd_fixed_param *cmd; + wmi_buf_t buf; + uint32_t len = sizeof(*cmd); + int err; + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGP("%s: failed to allocate memory for limit off chan cmd", + __func__); + return QDF_STATUS_E_NOMEM; + } + + cmd = (wmi_vdev_limit_offchan_cmd_fixed_param *)wmi_buf_data(buf); + + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_vdev_limit_offchan_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_vdev_limit_offchan_cmd_fixed_param)); + + cmd->vdev_id = limit_off_chan_param->vdev_id; + + cmd->flags &= 0; + if (limit_off_chan_param->status) + cmd->flags |= WMI_VDEV_LIMIT_OFFCHAN_ENABLE; + if (limit_off_chan_param->skip_dfs_chans) + cmd->flags |= WMI_VDEV_LIMIT_OFFCHAN_SKIP_DFS; + + cmd->max_offchan_time = limit_off_chan_param->max_offchan_time; + cmd->rest_time = limit_off_chan_param->rest_time; + + WMI_LOGE("%s: vdev_id=%d, flags =%x, max_offchan_time=%d, rest_time=%d", + __func__, cmd->vdev_id, cmd->flags, cmd->max_offchan_time, + cmd->rest_time); + + wmi_mtrace(WMI_VDEV_LIMIT_OFFCHAN_CMDID, cmd->vdev_id, 0); + err = wmi_unified_cmd_send(wmi_handle, buf, + len, WMI_VDEV_LIMIT_OFFCHAN_CMDID); + if (QDF_IS_STATUS_ERROR(err)) { + WMI_LOGE("Failed to send limit off chan cmd err=%d", err); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * send_set_arp_stats_req_cmd_tlv() - send wmi cmd to set arp stats request + * @wmi_handle: wmi handler + * @req_buf: set arp stats request buffer + * + * Return: 0 for success and non zero for failure + */ +static QDF_STATUS send_set_arp_stats_req_cmd_tlv(wmi_unified_t wmi_handle, + struct set_arp_stats *req_buf) +{ + wmi_buf_t buf = NULL; + QDF_STATUS status; + int len; + uint8_t *buf_ptr; + wmi_vdev_set_arp_stats_cmd_fixed_param *wmi_set_arp; + + len = sizeof(wmi_vdev_set_arp_stats_cmd_fixed_param); + if (req_buf->pkt_type_bitmap) { + len += WMI_TLV_HDR_SIZE; + len += sizeof(wmi_vdev_set_connectivity_check_stats); + } + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s : wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_NOMEM; + } + + buf_ptr = (uint8_t *) wmi_buf_data(buf); + wmi_set_arp = + (wmi_vdev_set_arp_stats_cmd_fixed_param *) buf_ptr; + WMITLV_SET_HDR(&wmi_set_arp->tlv_header, + WMITLV_TAG_STRUC_wmi_vdev_set_arp_stats_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_vdev_set_arp_stats_cmd_fixed_param)); + + /* fill in per roam config values */ + wmi_set_arp->vdev_id = req_buf->vdev_id; + + wmi_set_arp->set_clr = req_buf->flag; + wmi_set_arp->pkt_type = req_buf->pkt_type; + wmi_set_arp->ipv4 = req_buf->ip_addr; + + WMI_LOGD("NUD Stats: vdev_id %u set_clr %u pkt_type:%u ipv4 %u", + wmi_set_arp->vdev_id, wmi_set_arp->set_clr, + wmi_set_arp->pkt_type, wmi_set_arp->ipv4); + + /* + * pkt_type_bitmap should be non-zero to ensure + * presence of additional stats. + */ + if (req_buf->pkt_type_bitmap) { + wmi_vdev_set_connectivity_check_stats *wmi_set_connect_stats; + + buf_ptr += sizeof(wmi_vdev_set_arp_stats_cmd_fixed_param); + WMITLV_SET_HDR(buf_ptr, + WMITLV_TAG_ARRAY_STRUC, + sizeof(wmi_vdev_set_connectivity_check_stats)); + buf_ptr += WMI_TLV_HDR_SIZE; + wmi_set_connect_stats = + (wmi_vdev_set_connectivity_check_stats *)buf_ptr; + WMITLV_SET_HDR(&wmi_set_connect_stats->tlv_header, + WMITLV_TAG_STRUC_wmi_vdev_set_connectivity_check_stats, + WMITLV_GET_STRUCT_TLVLEN( + wmi_vdev_set_connectivity_check_stats)); + wmi_set_connect_stats->pkt_type_bitmap = + req_buf->pkt_type_bitmap; + wmi_set_connect_stats->tcp_src_port = req_buf->tcp_src_port; + wmi_set_connect_stats->tcp_dst_port = req_buf->tcp_dst_port; + wmi_set_connect_stats->icmp_ipv4 = req_buf->icmp_ipv4; + + WMI_LOGD("Connectivity Stats: pkt_type_bitmap %u tcp_src_port:%u tcp_dst_port %u icmp_ipv4 %u", + wmi_set_connect_stats->pkt_type_bitmap, + wmi_set_connect_stats->tcp_src_port, + wmi_set_connect_stats->tcp_dst_port, + wmi_set_connect_stats->icmp_ipv4); + } + + /* Send per roam config parameters */ + wmi_mtrace(WMI_VDEV_SET_ARP_STAT_CMDID, NO_SESSION, 0); + status = wmi_unified_cmd_send(wmi_handle, buf, + len, WMI_VDEV_SET_ARP_STAT_CMDID); + if (QDF_IS_STATUS_ERROR(status)) { + WMI_LOGE("WMI_SET_ARP_STATS_CMDID failed, Error %d", + status); + goto error; + } + + WMI_LOGD(FL("set arp stats flag=%d, vdev=%d"), + req_buf->flag, req_buf->vdev_id); + return QDF_STATUS_SUCCESS; +error: + wmi_buf_free(buf); + + return status; +} + +/** + * send_get_arp_stats_req_cmd_tlv() - send wmi cmd to get arp stats request + * @wmi_handle: wmi handler + * @req_buf: get arp stats request buffer + * + * Return: 0 for success and non zero for failure + */ +static QDF_STATUS send_get_arp_stats_req_cmd_tlv(wmi_unified_t wmi_handle, + struct get_arp_stats *req_buf) +{ + wmi_buf_t buf = NULL; + QDF_STATUS status; + int len; + uint8_t *buf_ptr; + wmi_vdev_get_arp_stats_cmd_fixed_param *get_arp_stats; + + len = sizeof(wmi_vdev_get_arp_stats_cmd_fixed_param); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s : wmi_buf_alloc failed", __func__); + return QDF_STATUS_E_NOMEM; + } + + buf_ptr = (uint8_t *) wmi_buf_data(buf); + get_arp_stats = + (wmi_vdev_get_arp_stats_cmd_fixed_param *) buf_ptr; + WMITLV_SET_HDR(&get_arp_stats->tlv_header, + WMITLV_TAG_STRUC_wmi_vdev_get_arp_stats_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_vdev_get_arp_stats_cmd_fixed_param)); + + /* fill in arp stats req cmd values */ + get_arp_stats->vdev_id = req_buf->vdev_id; + + WMI_LOGI(FL("vdev=%d"), req_buf->vdev_id); + /* Send per roam config parameters */ + wmi_mtrace(WMI_VDEV_GET_ARP_STAT_CMDID, NO_SESSION, 0); + status = wmi_unified_cmd_send(wmi_handle, buf, + len, WMI_VDEV_GET_ARP_STAT_CMDID); + if (QDF_IS_STATUS_ERROR(status)) { + WMI_LOGE("WMI_GET_ARP_STATS_CMDID failed, Error %d", + status); + goto error; + } + + return QDF_STATUS_SUCCESS; +error: + wmi_buf_free(buf); + + return status; +} + +/** + * send_set_del_pmkid_cache_cmd_tlv() - send wmi cmd of set del pmkid + * @wmi_handle: wmi handler + * @pmk_info: pointer to PMK cache entry + * @vdev_id: vdev id + * + * Return: 0 for success and non zero for failure + */ +static QDF_STATUS send_set_del_pmkid_cache_cmd_tlv(wmi_unified_t wmi_handle, + struct wmi_unified_pmk_cache *pmk_info) +{ + wmi_pdev_update_pmk_cache_cmd_fixed_param *cmd; + wmi_buf_t buf; + QDF_STATUS status; + uint8_t *buf_ptr; + wmi_pmk_cache *pmksa; + uint32_t len = sizeof(*cmd); + + if (pmk_info->pmk_len) + len += WMI_TLV_HDR_SIZE + sizeof(*pmksa); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGP("%s: failed to allocate memory for set del pmkid cache", + __func__); + return QDF_STATUS_E_NOMEM; + } + + buf_ptr = (uint8_t *) wmi_buf_data(buf); + cmd = (wmi_pdev_update_pmk_cache_cmd_fixed_param *) buf_ptr; + + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_pdev_update_pmk_cache_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_pdev_update_pmk_cache_cmd_fixed_param)); + + cmd->vdev_id = pmk_info->session_id; + + /* If pmk_info->pmk_len is 0, this is a flush request */ + if (!pmk_info->pmk_len) { + cmd->op_flag = WMI_PMK_CACHE_OP_FLAG_FLUSH_ALL; + cmd->num_cache = 0; + goto send_cmd; + } + + cmd->num_cache = 1; + buf_ptr += sizeof(*cmd); + + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, + sizeof(*pmksa)); + buf_ptr += WMI_TLV_HDR_SIZE; + + pmksa = (wmi_pmk_cache *)buf_ptr; + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_STRUC_wmi_pmk_cache, + WMITLV_GET_STRUCT_TLVLEN + (wmi_pmk_cache)); + pmksa->pmk_len = pmk_info->pmk_len; + qdf_mem_copy(pmksa->pmk, pmk_info->pmk, pmksa->pmk_len); + pmksa->pmkid_len = pmk_info->pmkid_len; + qdf_mem_copy(pmksa->pmkid, pmk_info->pmkid, pmksa->pmkid_len); + qdf_mem_copy(&(pmksa->bssid), &(pmk_info->bssid), sizeof(wmi_mac_addr)); + pmksa->ssid.ssid_len = pmk_info->ssid.length; + qdf_mem_copy(&(pmksa->ssid.ssid), &(pmk_info->ssid.mac_ssid), + pmksa->ssid.ssid_len); + pmksa->cache_id = pmk_info->cache_id; + pmksa->cat_flag = pmk_info->cat_flag; + pmksa->action_flag = pmk_info->action_flag; + +send_cmd: + wmi_mtrace(WMI_PDEV_UPDATE_PMK_CACHE_CMDID, cmd->vdev_id, 0); + status = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_PDEV_UPDATE_PMK_CACHE_CMDID); + if (status != QDF_STATUS_SUCCESS) { + WMI_LOGE("%s: failed to send set del pmkid cache command %d", + __func__, status); + wmi_buf_free(buf); + } + + return status; +} + +/** + * send_pdev_caldata_version_check_cmd_tlv() - send caldata check cmd to fw + * @wmi_handle: wmi handle + * @param: reserved param + * + * Return: 0 for success or error code + */ +static QDF_STATUS +send_pdev_caldata_version_check_cmd_tlv(wmi_unified_t wmi_handle, + uint32_t param) +{ + wmi_pdev_check_cal_version_cmd_fixed_param *cmd; + wmi_buf_t buf; + int32_t len = sizeof(wmi_pdev_check_cal_version_cmd_fixed_param); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + qdf_print("%s:wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_FAILURE; + } + cmd = (wmi_pdev_check_cal_version_cmd_fixed_param *)wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_pdev_check_cal_version_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_pdev_check_cal_version_cmd_fixed_param)); + cmd->pdev_id = param; /* set to 0x0 as expected from FW */ + wmi_mtrace(WMI_PDEV_CHECK_CAL_VERSION_CMDID, NO_SESSION, 0); + if (wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_PDEV_CHECK_CAL_VERSION_CMDID)) { + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_pdev_caldata_version_check_ev_param_tlv() - extract caldata from event + * @wmi_handle: wmi handle + * @param evt_buf: pointer to event buffer + * @param param: Pointer to hold peer caldata version data + * + * Return: 0 for success or error code + */ +static QDF_STATUS extract_pdev_caldata_version_check_ev_param_tlv( + wmi_unified_t wmi_handle, + void *evt_buf, + wmi_host_pdev_check_cal_version_event *param) +{ + WMI_PDEV_CHECK_CAL_VERSION_EVENTID_param_tlvs *param_tlvs; + wmi_pdev_check_cal_version_event_fixed_param *event; + + param_tlvs = (WMI_PDEV_CHECK_CAL_VERSION_EVENTID_param_tlvs *) evt_buf; + if (!param_tlvs) { + WMI_LOGE("invalid cal version event buf"); + return QDF_STATUS_E_FAILURE; + } + event = param_tlvs->fixed_param; + if (event->board_mcn_detail[WMI_BOARD_MCN_STRING_MAX_SIZE] != '\0') + event->board_mcn_detail[WMI_BOARD_MCN_STRING_MAX_SIZE] = '\0'; + WMI_HOST_IF_MSG_COPY_CHAR_ARRAY(param->board_mcn_detail, + event->board_mcn_detail, WMI_BOARD_MCN_STRING_BUF_SIZE); + + param->software_cal_version = event->software_cal_version; + param->board_cal_version = event->board_cal_version; + param->cal_ok = event->cal_status; + + return QDF_STATUS_SUCCESS; +} + +/* + * send_btm_config_cmd_tlv() - Send wmi cmd for BTM config + * @wmi_handle: wmi handle + * @params: pointer to wmi_btm_config + * + * Return: QDF_STATUS + */ +static QDF_STATUS send_btm_config_cmd_tlv(wmi_unified_t wmi_handle, + struct wmi_btm_config *params) +{ + + wmi_btm_config_fixed_param *cmd; + wmi_buf_t buf; + uint32_t len; + + len = sizeof(*cmd); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + qdf_print("%s:wmi_buf_alloc failed\n", __func__); + return QDF_STATUS_E_NOMEM; + } + + cmd = (wmi_btm_config_fixed_param *)wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_btm_config_fixed_param, + WMITLV_GET_STRUCT_TLVLEN(wmi_btm_config_fixed_param)); + cmd->vdev_id = params->vdev_id; + cmd->flags = params->btm_offload_config; + cmd->max_attempt_cnt = params->btm_max_attempt_cnt; + cmd->solicited_timeout_ms = params->btm_solicited_timeout; + cmd->stick_time_seconds = params->btm_sticky_time; + cmd->disassoc_timer_threshold = params->disassoc_timer_threshold; + + wmi_mtrace(WMI_ROAM_BTM_CONFIG_CMDID, cmd->vdev_id, 0); + if (wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_ROAM_BTM_CONFIG_CMDID)) { + WMI_LOGE("%s: failed to send WMI_ROAM_BTM_CONFIG_CMDID", + __func__); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * send_roam_bss_load_config_tlv() - send roam load bss trigger configuration + * @wmi_handle: wmi handle + * @parms: pointer to wmi_bss_load_config + * + * This function sends the roam load bss trigger configuration to fw. + * the bss_load_threshold parameter is used to configure the maximum + * bss load percentage, above which the firmware should trigger roaming + * + * Return: QDF status + */ +static QDF_STATUS +send_roam_bss_load_config_tlv(wmi_unified_t wmi_handle, + struct wmi_bss_load_config *params) +{ + wmi_roam_bss_load_config_cmd_fixed_param *cmd; + wmi_buf_t buf; + uint32_t len; + + len = sizeof(*cmd); + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) + return QDF_STATUS_E_NOMEM; + + cmd = (wmi_roam_bss_load_config_cmd_fixed_param *)wmi_buf_data(buf); + WMITLV_SET_HDR( + &cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_roam_bss_load_config_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN(wmi_roam_bss_load_config_cmd_fixed_param)); + cmd->vdev_id = params->vdev_id; + cmd->bss_load_threshold = params->bss_load_threshold; + cmd->monitor_time_window = params->bss_load_sample_time; + + wmi_mtrace(WMI_ROAM_BSS_LOAD_CONFIG_CMDID, cmd->vdev_id, 0); + if (wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_ROAM_BSS_LOAD_CONFIG_CMDID)) { + WMI_LOGE("%s: failed to send WMI_ROAM_BSS_LOAD_CONFIG_CMDID ", + __func__); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * send_obss_detection_cfg_cmd_tlv() - send obss detection + * configurations to firmware. + * @wmi_handle: wmi handle + * @obss_cfg_param: obss detection configurations + * + * Send WMI_SAP_OBSS_DETECTION_CFG_CMDID parameters to fw. + * + * Return: QDF_STATUS + */ +static QDF_STATUS send_obss_detection_cfg_cmd_tlv(wmi_unified_t wmi_handle, + struct wmi_obss_detection_cfg_param *obss_cfg_param) +{ + wmi_buf_t buf; + wmi_sap_obss_detection_cfg_cmd_fixed_param *cmd; + uint8_t len = sizeof(wmi_sap_obss_detection_cfg_cmd_fixed_param); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s: Failed to allocate wmi buffer", __func__); + return QDF_STATUS_E_NOMEM; + } + + cmd = (wmi_sap_obss_detection_cfg_cmd_fixed_param *)wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_sap_obss_detection_cfg_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_sap_obss_detection_cfg_cmd_fixed_param)); + + cmd->vdev_id = obss_cfg_param->vdev_id; + cmd->detect_period_ms = obss_cfg_param->obss_detect_period_ms; + cmd->b_ap_detect_mode = obss_cfg_param->obss_11b_ap_detect_mode; + cmd->b_sta_detect_mode = obss_cfg_param->obss_11b_sta_detect_mode; + cmd->g_ap_detect_mode = obss_cfg_param->obss_11g_ap_detect_mode; + cmd->a_detect_mode = obss_cfg_param->obss_11a_detect_mode; + cmd->ht_legacy_detect_mode = obss_cfg_param->obss_ht_legacy_detect_mode; + cmd->ht_mixed_detect_mode = obss_cfg_param->obss_ht_mixed_detect_mode; + cmd->ht_20mhz_detect_mode = obss_cfg_param->obss_ht_20mhz_detect_mode; + + wmi_mtrace(WMI_SAP_OBSS_DETECTION_CFG_CMDID, cmd->vdev_id, 0); + if (wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_SAP_OBSS_DETECTION_CFG_CMDID)) { + WMI_LOGE("Failed to send WMI_SAP_OBSS_DETECTION_CFG_CMDID"); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_obss_detection_info_tlv() - Extract obss detection info + * received from firmware. + * @evt_buf: pointer to event buffer + * @obss_detection: Pointer to hold obss detection info + * + * Return: QDF_STATUS + */ +static QDF_STATUS extract_obss_detection_info_tlv(uint8_t *evt_buf, + struct wmi_obss_detect_info + *obss_detection) +{ + WMI_SAP_OBSS_DETECTION_REPORT_EVENTID_param_tlvs *param_buf; + wmi_sap_obss_detection_info_evt_fixed_param *fix_param; + + if (!obss_detection) { + WMI_LOGE("%s: Invalid obss_detection event buffer", __func__); + return QDF_STATUS_E_INVAL; + } + + param_buf = (WMI_SAP_OBSS_DETECTION_REPORT_EVENTID_param_tlvs *)evt_buf; + if (!param_buf) { + WMI_LOGE("%s: Invalid evt_buf", __func__); + return QDF_STATUS_E_INVAL; + } + + fix_param = param_buf->fixed_param; + obss_detection->vdev_id = fix_param->vdev_id; + obss_detection->matched_detection_masks = + fix_param->matched_detection_masks; + WMI_MAC_ADDR_TO_CHAR_ARRAY(&fix_param->matched_bssid_addr, + &obss_detection->matched_bssid_addr[0]); + switch (fix_param->reason) { + case WMI_SAP_OBSS_DETECTION_EVENT_REASON_NOT_SUPPORT: + obss_detection->reason = OBSS_OFFLOAD_DETECTION_DISABLED; + break; + case WMI_SAP_OBSS_DETECTION_EVENT_REASON_PRESENT_NOTIFY: + obss_detection->reason = OBSS_OFFLOAD_DETECTION_PRESENT; + break; + case WMI_SAP_OBSS_DETECTION_EVENT_REASON_ABSENT_TIMEOUT: + obss_detection->reason = OBSS_OFFLOAD_DETECTION_ABSENT; + break; + default: + WMI_LOGE("%s: Invalid reason %d", __func__, fix_param->reason); + return QDF_STATUS_E_INVAL; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * send_roam_scan_stats_cmd_tlv() - Send roam scan stats req command to fw + * @wmi_handle: wmi handle + * @params: pointer to request structure + * + * Return: QDF_STATUS + */ +static QDF_STATUS +send_roam_scan_stats_cmd_tlv(wmi_unified_t wmi_handle, + struct wmi_roam_scan_stats_req *params) +{ + wmi_buf_t buf; + wmi_request_roam_scan_stats_cmd_fixed_param *cmd; + WMITLV_TAG_ID tag; + uint32_t size; + uint32_t len = sizeof(*cmd); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE(FL("Failed to allocate wmi buffer")); + return QDF_STATUS_E_FAILURE; + } + + cmd = (wmi_request_roam_scan_stats_cmd_fixed_param *)wmi_buf_data(buf); + + tag = WMITLV_TAG_STRUC_wmi_request_roam_scan_stats_cmd_fixed_param; + size = WMITLV_GET_STRUCT_TLVLEN( + wmi_request_roam_scan_stats_cmd_fixed_param); + WMITLV_SET_HDR(&cmd->tlv_header, tag, size); + + cmd->vdev_id = params->vdev_id; + + WMI_LOGD(FL("Roam Scan Stats Req vdev_id: %u"), cmd->vdev_id); + if (wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_REQUEST_ROAM_SCAN_STATS_CMDID)) { + WMI_LOGE("%s: Failed to send WMI_REQUEST_ROAM_SCAN_STATS_CMDID", + __func__); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_roam_scan_stats_res_evt_tlv() - Extract roam scan stats event + * @wmi_handle: wmi handle + * @evt_buf: pointer to event buffer + * @vdev_id: output pointer to hold vdev id + * @res_param: output pointer to hold the allocated response + * + * Return: QDF_STATUS + */ +static QDF_STATUS +extract_roam_scan_stats_res_evt_tlv(wmi_unified_t wmi_handle, void *evt_buf, + uint32_t *vdev_id, + struct wmi_roam_scan_stats_res **res_param) +{ + WMI_ROAM_SCAN_STATS_EVENTID_param_tlvs *param_buf; + wmi_roam_scan_stats_event_fixed_param *fixed_param; + uint32_t *client_id = NULL; + wmi_roaming_timestamp *timestamp = NULL; + uint32_t *num_channels = NULL; + uint32_t *chan_info = NULL; + wmi_mac_addr *old_bssid = NULL; + uint32_t *is_roaming_success = NULL; + wmi_mac_addr *new_bssid = NULL; + uint32_t *num_roam_candidates = NULL; + wmi_roam_scan_trigger_reason *roam_reason = NULL; + wmi_mac_addr *bssid = NULL; + uint32_t *score = NULL; + uint32_t *channel = NULL; + uint32_t *rssi = NULL; + int chan_idx = 0, cand_idx = 0; + uint32_t total_len; + struct wmi_roam_scan_stats_res *res; + uint32_t i, j; + uint32_t num_scans, scan_param_size; + + *res_param = NULL; + *vdev_id = 0xFF; /* Initialize to invalid vdev id */ + param_buf = (WMI_ROAM_SCAN_STATS_EVENTID_param_tlvs *)evt_buf; + if (!param_buf) { + WMI_LOGE(FL("Invalid roam scan stats event")); + return QDF_STATUS_E_INVAL; + } + + fixed_param = param_buf->fixed_param; + + num_scans = fixed_param->num_roam_scans; + scan_param_size = sizeof(struct wmi_roam_scan_stats_params); + *vdev_id = fixed_param->vdev_id; + if (num_scans > WMI_ROAM_SCAN_STATS_MAX) { + wmi_err_rl("%u exceeded maximum roam scan stats: %u", + num_scans, WMI_ROAM_SCAN_STATS_MAX); + return QDF_STATUS_E_INVAL; + } + + total_len = sizeof(*res) + num_scans * scan_param_size; + + res = qdf_mem_malloc(total_len); + if (!res) { + WMI_LOGE("Failed to allocate roam scan stats response memory"); + return QDF_STATUS_E_NOMEM; + } + + if (!num_scans) { + *res_param = res; + return QDF_STATUS_SUCCESS; + } + + if (param_buf->client_id && + param_buf->num_client_id == num_scans) + client_id = param_buf->client_id; + + if (param_buf->timestamp && + param_buf->num_timestamp == num_scans) + timestamp = param_buf->timestamp; + + if (param_buf->old_bssid && + param_buf->num_old_bssid == num_scans) + old_bssid = param_buf->old_bssid; + + if (param_buf->new_bssid && + param_buf->num_new_bssid == num_scans) + new_bssid = param_buf->new_bssid; + + if (param_buf->is_roaming_success && + param_buf->num_is_roaming_success == num_scans) + is_roaming_success = param_buf->is_roaming_success; + + if (param_buf->roam_reason && + param_buf->num_roam_reason == num_scans) + roam_reason = param_buf->roam_reason; + + if (param_buf->num_channels && + param_buf->num_num_channels == num_scans) { + uint32_t count, chan_info_sum = 0; + + num_channels = param_buf->num_channels; + for (count = 0; count < param_buf->num_num_channels; count++) { + if (param_buf->num_channels[count] > + WMI_ROAM_SCAN_STATS_CHANNELS_MAX) { + wmi_err_rl("%u exceeded max scan channels %u", + param_buf->num_channels[count], + WMI_ROAM_SCAN_STATS_CHANNELS_MAX); + goto error; + } + chan_info_sum += param_buf->num_channels[count]; + } + + if (param_buf->chan_info && + param_buf->num_chan_info == chan_info_sum) + chan_info = param_buf->chan_info; + } + + if (param_buf->num_roam_candidates && + param_buf->num_num_roam_candidates == num_scans) { + uint32_t cnt, roam_cand_sum = 0; + + num_roam_candidates = param_buf->num_roam_candidates; + for (cnt = 0; cnt < param_buf->num_num_roam_candidates; cnt++) { + if (param_buf->num_roam_candidates[cnt] > + WMI_ROAM_SCAN_STATS_CANDIDATES_MAX) { + wmi_err_rl("%u exceeded max scan cand %u", + param_buf->num_roam_candidates[cnt], + WMI_ROAM_SCAN_STATS_CANDIDATES_MAX); + goto error; + } + roam_cand_sum += param_buf->num_roam_candidates[cnt]; + } + + if (param_buf->bssid && + param_buf->num_bssid == roam_cand_sum) + bssid = param_buf->bssid; + + if (param_buf->score && + param_buf->num_score == roam_cand_sum) + score = param_buf->score; + + if (param_buf->channel && + param_buf->num_channel == roam_cand_sum) + channel = param_buf->channel; + + if (param_buf->rssi && + param_buf->num_rssi == roam_cand_sum) + rssi = param_buf->rssi; + } + + res->num_roam_scans = num_scans; + for (i = 0; i < num_scans; i++) { + struct wmi_roam_scan_stats_params *roam = &res->roam_scan[i]; + + if (timestamp) + roam->time_stamp = timestamp[i].lower32bit | + (timestamp[i].upper32bit << 31); + + if (client_id) + roam->client_id = client_id[i]; + + if (num_channels) { + roam->num_scan_chans = num_channels[i]; + if (chan_info) { + for (j = 0; j < num_channels[i]; j++) + roam->scan_freqs[j] = + chan_info[chan_idx++]; + } + } + + if (is_roaming_success) + roam->is_roam_successful = is_roaming_success[i]; + + if (roam_reason) { + roam->trigger_id = roam_reason[i].trigger_id; + roam->trigger_value = roam_reason[i].trigger_value; + } + + if (num_roam_candidates) { + roam->num_roam_candidates = num_roam_candidates[i]; + + for (j = 0; j < num_roam_candidates[i]; j++) { + if (score) + roam->cand[j].score = score[cand_idx]; + if (rssi) + roam->cand[j].rssi = rssi[cand_idx]; + if (channel) + roam->cand[j].freq = + channel[cand_idx]; + + if (bssid) + WMI_MAC_ADDR_TO_CHAR_ARRAY( + &bssid[cand_idx], + roam->cand[j].bssid); + + cand_idx++; + } + } + + if (old_bssid) + WMI_MAC_ADDR_TO_CHAR_ARRAY(&old_bssid[i], + roam->old_bssid); + + if (new_bssid) + WMI_MAC_ADDR_TO_CHAR_ARRAY(&new_bssid[i], + roam->new_bssid); + } + + *res_param = res; + + return QDF_STATUS_SUCCESS; +error: + qdf_mem_free(res); + return QDF_STATUS_E_FAILURE; +} + +/** + * send_offload_11k_cmd_tlv() - send wmi cmd with 11k offload params + * @wmi_handle: wmi handler + * @params: pointer to 11k offload params + * + * Return: 0 for success and non zero for failure + */ +static QDF_STATUS send_offload_11k_cmd_tlv(wmi_unified_t wmi_handle, + struct wmi_11k_offload_params *params) +{ + wmi_11k_offload_report_fixed_param *cmd; + wmi_buf_t buf; + QDF_STATUS status; + uint8_t *buf_ptr; + wmi_neighbor_report_11k_offload_tlv_param + *neighbor_report_offload_params; + wmi_neighbor_report_offload *neighbor_report_offload; + + uint32_t len = sizeof(*cmd); + + if (params->offload_11k_bitmask & + WMI_11K_OFFLOAD_BITMAP_NEIGHBOR_REPORT_REQ) + len += WMI_TLV_HDR_SIZE + + sizeof(wmi_neighbor_report_11k_offload_tlv_param); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGP("%s: failed to allocate memory for 11k offload params", + __func__); + return QDF_STATUS_E_NOMEM; + } + + buf_ptr = (uint8_t *) wmi_buf_data(buf); + cmd = (wmi_11k_offload_report_fixed_param *) buf_ptr; + + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_offload_11k_report_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_11k_offload_report_fixed_param)); + + cmd->vdev_id = params->vdev_id; + cmd->offload_11k = params->offload_11k_bitmask; + + if (params->offload_11k_bitmask & + WMI_11K_OFFLOAD_BITMAP_NEIGHBOR_REPORT_REQ) { + buf_ptr += sizeof(wmi_11k_offload_report_fixed_param); + + WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, + sizeof(wmi_neighbor_report_11k_offload_tlv_param)); + buf_ptr += WMI_TLV_HDR_SIZE; + + neighbor_report_offload_params = + (wmi_neighbor_report_11k_offload_tlv_param *)buf_ptr; + WMITLV_SET_HDR(&neighbor_report_offload_params->tlv_header, + WMITLV_TAG_STRUC_wmi_neighbor_report_offload_tlv_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_neighbor_report_11k_offload_tlv_param)); + + neighbor_report_offload = &neighbor_report_offload_params-> + neighbor_rep_ofld_params; + + neighbor_report_offload->time_offset = + params->neighbor_report_params.time_offset; + neighbor_report_offload->low_rssi_offset = + params->neighbor_report_params.low_rssi_offset; + neighbor_report_offload->bmiss_count_trigger = + params->neighbor_report_params.bmiss_count_trigger; + neighbor_report_offload->per_threshold_offset = + params->neighbor_report_params.per_threshold_offset; + neighbor_report_offload->neighbor_report_cache_timeout = + params->neighbor_report_params. + neighbor_report_cache_timeout; + neighbor_report_offload->max_neighbor_report_req_cap = + params->neighbor_report_params. + max_neighbor_report_req_cap; + neighbor_report_offload->ssid.ssid_len = + params->neighbor_report_params.ssid.length; + qdf_mem_copy(neighbor_report_offload->ssid.ssid, + ¶ms->neighbor_report_params.ssid.mac_ssid, + neighbor_report_offload->ssid.ssid_len); + } + + wmi_mtrace(WMI_11K_OFFLOAD_REPORT_CMDID, cmd->vdev_id, 0); + status = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_11K_OFFLOAD_REPORT_CMDID); + if (status != QDF_STATUS_SUCCESS) { + WMI_LOGE("%s: failed to send 11k offload command %d", + __func__, status); + wmi_buf_free(buf); + } + + return status; +} + +/** + * send_invoke_neighbor_report_cmd_tlv() - send invoke 11k neighbor report + * command + * @wmi_handle: wmi handler + * @params: pointer to neighbor report invoke params + * + * Return: 0 for success and non zero for failure + */ +static QDF_STATUS send_invoke_neighbor_report_cmd_tlv(wmi_unified_t wmi_handle, + struct wmi_invoke_neighbor_report_params *params) +{ + wmi_11k_offload_invoke_neighbor_report_fixed_param *cmd; + wmi_buf_t buf; + QDF_STATUS status; + uint8_t *buf_ptr; + uint32_t len = sizeof(*cmd); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGP("%s:failed to allocate memory for neighbor invoke cmd", + __func__); + return QDF_STATUS_E_NOMEM; + } + + buf_ptr = (uint8_t *) wmi_buf_data(buf); + cmd = (wmi_11k_offload_invoke_neighbor_report_fixed_param *) buf_ptr; + + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_invoke_neighbor_report_fixed_param, + WMITLV_GET_STRUCT_TLVLEN( + wmi_11k_offload_invoke_neighbor_report_fixed_param)); + + cmd->vdev_id = params->vdev_id; + cmd->flags = params->send_resp_to_host; + + cmd->ssid.ssid_len = params->ssid.length; + qdf_mem_copy(cmd->ssid.ssid, + ¶ms->ssid.mac_ssid, + cmd->ssid.ssid_len); + + wmi_mtrace(WMI_11K_INVOKE_NEIGHBOR_REPORT_CMDID, cmd->vdev_id, 0); + status = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_11K_INVOKE_NEIGHBOR_REPORT_CMDID); + if (status != QDF_STATUS_SUCCESS) { + WMI_LOGE("%s: failed to send invoke neighbor report command %d", + __func__, status); + wmi_buf_free(buf); + } + + return status; +} + +#ifdef WLAN_SUPPORT_GREEN_AP +static QDF_STATUS extract_green_ap_egap_status_info_tlv( + uint8_t *evt_buf, + struct wlan_green_ap_egap_status_info *egap_status_info_params) +{ + WMI_AP_PS_EGAP_INFO_EVENTID_param_tlvs *param_buf; + wmi_ap_ps_egap_info_event_fixed_param *egap_info_event; + wmi_ap_ps_egap_info_chainmask_list *chainmask_event; + + param_buf = (WMI_AP_PS_EGAP_INFO_EVENTID_param_tlvs *)evt_buf; + if (!param_buf) { + WMI_LOGE("Invalid EGAP Info status event buffer"); + return QDF_STATUS_E_INVAL; + } + + egap_info_event = (wmi_ap_ps_egap_info_event_fixed_param *) + param_buf->fixed_param; + chainmask_event = (wmi_ap_ps_egap_info_chainmask_list *) + param_buf->chainmask_list; + + if (!egap_info_event || !chainmask_event) { + WMI_LOGE("Invalid EGAP Info event or chainmask event"); + return QDF_STATUS_E_INVAL; + } + + egap_status_info_params->status = egap_info_event->status; + egap_status_info_params->mac_id = chainmask_event->mac_id; + egap_status_info_params->tx_chainmask = chainmask_event->tx_chainmask; + egap_status_info_params->rx_chainmask = chainmask_event->rx_chainmask; + + return QDF_STATUS_SUCCESS; +} +#endif + +/* + * send_bss_color_change_enable_cmd_tlv() - Send command to enable or disable of + * updating bss color change within firmware when AP announces bss color change. + * @wmi_handle: wmi handle + * @vdev_id: vdev ID + * @enable: enable bss color change within firmware + * + * Send WMI_BSS_COLOR_CHANGE_ENABLE_CMDID parameters to fw. + * + * Return: QDF_STATUS + */ +static QDF_STATUS send_bss_color_change_enable_cmd_tlv(wmi_unified_t wmi_handle, + uint32_t vdev_id, + bool enable) +{ + wmi_buf_t buf; + wmi_bss_color_change_enable_fixed_param *cmd; + uint8_t len = sizeof(wmi_bss_color_change_enable_fixed_param); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s: Failed to allocate wmi buffer", __func__); + return QDF_STATUS_E_NOMEM; + } + + cmd = (wmi_bss_color_change_enable_fixed_param *)wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_bss_color_change_enable_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_bss_color_change_enable_fixed_param)); + cmd->vdev_id = vdev_id; + cmd->enable = enable; + wmi_mtrace(WMI_BSS_COLOR_CHANGE_ENABLE_CMDID, cmd->vdev_id, 0); + if (wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_BSS_COLOR_CHANGE_ENABLE_CMDID)) { + WMI_LOGE("Failed to send WMI_BSS_COLOR_CHANGE_ENABLE_CMDID"); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * send_obss_color_collision_cfg_cmd_tlv() - send bss color detection + * configurations to firmware. + * @wmi_handle: wmi handle + * @cfg_param: obss detection configurations + * + * Send WMI_OBSS_COLOR_COLLISION_DET_CONFIG_CMDID parameters to fw. + * + * Return: QDF_STATUS + */ +static QDF_STATUS send_obss_color_collision_cfg_cmd_tlv( + wmi_unified_t wmi_handle, + struct wmi_obss_color_collision_cfg_param *cfg_param) +{ + wmi_buf_t buf; + wmi_obss_color_collision_det_config_fixed_param *cmd; + uint8_t len = sizeof(wmi_obss_color_collision_det_config_fixed_param); + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s: Failed to allocate wmi buffer", __func__); + return QDF_STATUS_E_NOMEM; + } + + cmd = (wmi_obss_color_collision_det_config_fixed_param *)wmi_buf_data( + buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_obss_color_collision_det_config_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_obss_color_collision_det_config_fixed_param)); + cmd->vdev_id = cfg_param->vdev_id; + cmd->flags = cfg_param->flags; + cmd->current_bss_color = cfg_param->current_bss_color; + cmd->detection_period_ms = cfg_param->detection_period_ms; + cmd->scan_period_ms = cfg_param->scan_period_ms; + cmd->free_slot_expiry_time_ms = cfg_param->free_slot_expiry_time_ms; + + switch (cfg_param->evt_type) { + case OBSS_COLOR_COLLISION_DETECTION_DISABLE: + cmd->evt_type = WMI_BSS_COLOR_COLLISION_DISABLE; + break; + case OBSS_COLOR_COLLISION_DETECTION: + cmd->evt_type = WMI_BSS_COLOR_COLLISION_DETECTION; + break; + case OBSS_COLOR_FREE_SLOT_TIMER_EXPIRY: + cmd->evt_type = WMI_BSS_COLOR_FREE_SLOT_TIMER_EXPIRY; + break; + case OBSS_COLOR_FREE_SLOT_AVAILABLE: + cmd->evt_type = WMI_BSS_COLOR_FREE_SLOT_AVAILABLE; + break; + default: + WMI_LOGE("%s: invalid event type: %d", + __func__, cfg_param->evt_type); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + wmi_mtrace(WMI_OBSS_COLOR_COLLISION_DET_CONFIG_CMDID, cmd->vdev_id, 0); + if (wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_OBSS_COLOR_COLLISION_DET_CONFIG_CMDID)) { + WMI_LOGE("%s: Sending OBSS color det cmd failed, vdev_id: %d", + __func__, cfg_param->vdev_id); + wmi_buf_free(buf); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_obss_color_collision_info_tlv() - Extract bss color collision info + * received from firmware. + * @evt_buf: pointer to event buffer + * @info: Pointer to hold bss collision info + * + * Return: QDF_STATUS + */ +static QDF_STATUS extract_obss_color_collision_info_tlv(uint8_t *evt_buf, + struct wmi_obss_color_collision_info *info) +{ + WMI_OBSS_COLOR_COLLISION_DETECTION_EVENTID_param_tlvs *param_buf; + wmi_obss_color_collision_evt_fixed_param *fix_param; + + if (!info) { + WMI_LOGE("%s: Invalid obss color buffer", __func__); + return QDF_STATUS_E_INVAL; + } + + param_buf = (WMI_OBSS_COLOR_COLLISION_DETECTION_EVENTID_param_tlvs *) + evt_buf; + if (!param_buf) { + WMI_LOGE("%s: Invalid evt_buf", __func__); + return QDF_STATUS_E_INVAL; + } + + fix_param = param_buf->fixed_param; + info->vdev_id = fix_param->vdev_id; + info->obss_color_bitmap_bit0to31 = fix_param->bss_color_bitmap_bit0to31; + info->obss_color_bitmap_bit32to63 = + fix_param->bss_color_bitmap_bit32to63; + + switch (fix_param->evt_type) { + case WMI_BSS_COLOR_COLLISION_DISABLE: + info->evt_type = OBSS_COLOR_COLLISION_DETECTION_DISABLE; + break; + case WMI_BSS_COLOR_COLLISION_DETECTION: + info->evt_type = OBSS_COLOR_COLLISION_DETECTION; + break; + case WMI_BSS_COLOR_FREE_SLOT_TIMER_EXPIRY: + info->evt_type = OBSS_COLOR_FREE_SLOT_TIMER_EXPIRY; + break; + case WMI_BSS_COLOR_FREE_SLOT_AVAILABLE: + info->evt_type = OBSS_COLOR_FREE_SLOT_AVAILABLE; + break; + default: + WMI_LOGE("%s: invalid event type: %d, vdev_id: %d", + __func__, fix_param->evt_type, fix_param->vdev_id); + return QDF_STATUS_E_FAILURE; + } + + return QDF_STATUS_SUCCESS; +} + +/* + * extract_comb_phyerr_tlv() - extract comb phy error from event + * @wmi_handle: wmi handle + * @evt_buf: pointer to event buffer + * @datalen: data length of event buffer + * @buf_offset: Pointer to hold value of current event buffer offset + * post extraction + * @phyerr: Pointer to hold phyerr + * + * Return: QDF_STATUS + */ +static QDF_STATUS extract_comb_phyerr_tlv(wmi_unified_t wmi_handle, + void *evt_buf, + uint16_t datalen, + uint16_t *buf_offset, + wmi_host_phyerr_t *phyerr) +{ + WMI_PHYERR_EVENTID_param_tlvs *param_tlvs; + wmi_comb_phyerr_rx_hdr *pe_hdr; + + param_tlvs = (WMI_PHYERR_EVENTID_param_tlvs *)evt_buf; + if (!param_tlvs) { + WMI_LOGD("%s: Received null data from FW", __func__); + return QDF_STATUS_E_FAILURE; + } + + pe_hdr = param_tlvs->hdr; + if (!pe_hdr) { + WMI_LOGD("%s: Received Data PE Header is NULL", __func__); + return QDF_STATUS_E_FAILURE; + } + + /* Ensure it's at least the size of the header */ + if (datalen < sizeof(*pe_hdr)) { + WMI_LOGD("%s: Expected minimum size %zu, received %d", + __func__, sizeof(*pe_hdr), datalen); + return QDF_STATUS_E_FAILURE; + } + + phyerr->pdev_id = wmi_handle->ops-> + convert_pdev_id_target_to_host(pe_hdr->pdev_id); + phyerr->tsf64 = pe_hdr->tsf_l32; + phyerr->tsf64 |= (((uint64_t)pe_hdr->tsf_u32) << 32); + phyerr->bufp = param_tlvs->bufp; + + if (pe_hdr->buf_len > param_tlvs->num_bufp) { + WMI_LOGD("Invalid buf_len %d, num_bufp %d", + pe_hdr->buf_len, param_tlvs->num_bufp); + return QDF_STATUS_E_FAILURE; + } + + phyerr->buf_len = pe_hdr->buf_len; + phyerr->phy_err_mask0 = pe_hdr->rsPhyErrMask0; + phyerr->phy_err_mask1 = pe_hdr->rsPhyErrMask1; + *buf_offset = sizeof(*pe_hdr) + sizeof(uint32_t); + + return QDF_STATUS_SUCCESS; +} + +/** + * extract_single_phyerr_tlv() - extract single phy error from event + * @wmi_handle: wmi handle + * @evt_buf: pointer to event buffer + * @datalen: data length of event buffer + * @buf_offset: Pointer to hold value of current event buffer offset + * post extraction + * @phyerr: Pointer to hold phyerr + * + * Return: QDF_STATUS + */ +static QDF_STATUS extract_single_phyerr_tlv(wmi_unified_t wmi_handle, + void *evt_buf, + uint16_t datalen, + uint16_t *buf_offset, + wmi_host_phyerr_t *phyerr) +{ + wmi_single_phyerr_rx_event *ev; + uint16_t n = *buf_offset; + uint8_t *data = (uint8_t *)evt_buf; + + if (n < datalen) { + if ((datalen - n) < sizeof(ev->hdr)) { + WMI_LOGD("%s: Not enough space. len=%d, n=%d, hdr=%zu", + __func__, datalen, n, sizeof(ev->hdr)); + return QDF_STATUS_E_FAILURE; + } + + /* + * Obtain a pointer to the beginning of the current event. + * data[0] is the beginning of the WMI payload. + */ + ev = (wmi_single_phyerr_rx_event *)&data[n]; + + /* + * Sanity check the buffer length of the event against + * what we currently have. + * + * Since buf_len is 32 bits, we check if it overflows + * a large 32 bit value. It's not 0x7fffffff because + * we increase n by (buf_len + sizeof(hdr)), which would + * in itself cause n to overflow. + * + * If "int" is 64 bits then this becomes a moot point. + */ + if (ev->hdr.buf_len > PHYERROR_MAX_BUFFER_LENGTH) { + WMI_LOGD("%s: buf_len is garbage 0x%x", + __func__, ev->hdr.buf_len); + return QDF_STATUS_E_FAILURE; + } + + if ((n + ev->hdr.buf_len) > datalen) { + WMI_LOGD("%s: len exceeds n=%d, buf_len=%d, datalen=%d", + __func__, n, ev->hdr.buf_len, datalen); + return QDF_STATUS_E_FAILURE; + } + + phyerr->phy_err_code = WMI_UNIFIED_PHYERRCODE_GET(&ev->hdr); + phyerr->tsf_timestamp = ev->hdr.tsf_timestamp; + phyerr->bufp = &ev->bufp[0]; + phyerr->buf_len = ev->hdr.buf_len; + phyerr->rf_info.rssi_comb = WMI_UNIFIED_RSSI_COMB_GET(&ev->hdr); + + /* + * Advance the buffer pointer to the next PHY error. + * buflen is the length of this payload, so we need to + * advance past the current header _AND_ the payload. + */ + n += sizeof(*ev) + ev->hdr.buf_len; + } + *buf_offset = n; + + return QDF_STATUS_SUCCESS; +} + +#ifdef WLAN_MWS_INFO_DEBUGFS +/** + * send_mws_coex_status_req_cmd_tlv() - send coex cmd to fw + * + * @wmi_handle: wmi handle + * @vdev_id: vdev id + * @cmd_id: Coex command id + * + * Send WMI_VDEV_GET_MWS_COEX_INFO_CMDID to fw. + * + * Return: QDF_STATUS + */ +static QDF_STATUS send_mws_coex_status_req_cmd_tlv(wmi_unified_t wmi_handle, + uint32_t vdev_id, + uint32_t cmd_id) +{ + wmi_buf_t buf; + wmi_vdev_get_mws_coex_info_cmd_fixed_param *cmd; + uint16_t len = sizeof(*cmd); + int ret; + + buf = wmi_buf_alloc(wmi_handle, len); + if (!buf) { + WMI_LOGE("%s: Failed to allocate wmi buffer", __func__); + return QDF_STATUS_E_NOMEM; + } + + cmd = (wmi_vdev_get_mws_coex_info_cmd_fixed_param *)wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_vdev_get_mws_coex_info_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_vdev_get_mws_coex_info_cmd_fixed_param)); + cmd->vdev_id = vdev_id; + cmd->cmd_id = cmd_id; + wmi_mtrace(WMI_VDEV_GET_MWS_COEX_INFO_CMDID, vdev_id, 0); + ret = wmi_unified_cmd_send(wmi_handle, buf, len, + WMI_VDEV_GET_MWS_COEX_INFO_CMDID); + if (QDF_IS_STATUS_ERROR(ret)) { + WMI_LOGE("Failed to send set param command ret = %d", ret); + wmi_buf_free(buf); + } + return ret; +} +#endif + +struct wmi_ops tlv_ops = { + .send_vdev_create_cmd = send_vdev_create_cmd_tlv, + .send_vdev_delete_cmd = send_vdev_delete_cmd_tlv, + .send_vdev_nss_chain_params_cmd = send_vdev_nss_chain_params_cmd_tlv, + .send_vdev_down_cmd = send_vdev_down_cmd_tlv, + .send_vdev_start_cmd = send_vdev_start_cmd_tlv, + .send_hidden_ssid_vdev_restart_cmd = + send_hidden_ssid_vdev_restart_cmd_tlv, + .send_peer_flush_tids_cmd = send_peer_flush_tids_cmd_tlv, + .send_peer_param_cmd = send_peer_param_cmd_tlv, + .send_vdev_up_cmd = send_vdev_up_cmd_tlv, + .send_vdev_stop_cmd = send_vdev_stop_cmd_tlv, + .send_peer_create_cmd = send_peer_create_cmd_tlv, + .send_peer_delete_cmd = send_peer_delete_cmd_tlv, + .send_peer_unmap_conf_cmd = send_peer_unmap_conf_cmd_tlv, + .send_peer_rx_reorder_queue_setup_cmd = + send_peer_rx_reorder_queue_setup_cmd_tlv, + .send_peer_rx_reorder_queue_remove_cmd = + send_peer_rx_reorder_queue_remove_cmd_tlv, + .send_peer_add_wds_entry_cmd = send_peer_add_wds_entry_cmd_tlv, + .send_peer_del_wds_entry_cmd = send_peer_del_wds_entry_cmd_tlv, + .send_peer_update_wds_entry_cmd = send_peer_update_wds_entry_cmd_tlv, + .send_pdev_utf_cmd = send_pdev_utf_cmd_tlv, + .send_pdev_param_cmd = send_pdev_param_cmd_tlv, + .send_pdev_get_tpc_config_cmd = send_pdev_get_tpc_config_cmd_tlv, + .send_suspend_cmd = send_suspend_cmd_tlv, + .send_resume_cmd = send_resume_cmd_tlv, +#ifdef FEATURE_WLAN_D0WOW + .send_d0wow_enable_cmd = send_d0wow_enable_cmd_tlv, + .send_d0wow_disable_cmd = send_d0wow_disable_cmd_tlv, +#endif + .send_wow_enable_cmd = send_wow_enable_cmd_tlv, + .send_set_ap_ps_param_cmd = send_set_ap_ps_param_cmd_tlv, + .send_set_sta_ps_param_cmd = send_set_sta_ps_param_cmd_tlv, + .send_crash_inject_cmd = send_crash_inject_cmd_tlv, +#ifdef FEATURE_FW_LOG_PARSING + .send_dbglog_cmd = send_dbglog_cmd_tlv, +#endif + .send_vdev_set_param_cmd = send_vdev_set_param_cmd_tlv, + .send_stats_request_cmd = send_stats_request_cmd_tlv, + .send_packet_log_enable_cmd = send_packet_log_enable_cmd_tlv, + .send_time_stamp_sync_cmd = send_time_stamp_sync_cmd_tlv, + .send_packet_log_disable_cmd = send_packet_log_disable_cmd_tlv, + .send_beacon_send_cmd = send_beacon_send_cmd_tlv, + .send_beacon_tmpl_send_cmd = send_beacon_tmpl_send_cmd_tlv, + .send_peer_assoc_cmd = send_peer_assoc_cmd_tlv, + .send_scan_start_cmd = send_scan_start_cmd_tlv, + .send_scan_stop_cmd = send_scan_stop_cmd_tlv, + .send_scan_chan_list_cmd = send_scan_chan_list_cmd_tlv, + .send_mgmt_cmd = send_mgmt_cmd_tlv, + .send_offchan_data_tx_cmd = send_offchan_data_tx_cmd_tlv, + .send_modem_power_state_cmd = send_modem_power_state_cmd_tlv, + .send_set_sta_ps_mode_cmd = send_set_sta_ps_mode_cmd_tlv, + .send_set_sta_uapsd_auto_trig_cmd = + send_set_sta_uapsd_auto_trig_cmd_tlv, + .send_get_temperature_cmd = send_get_temperature_cmd_tlv, + .send_set_p2pgo_oppps_req_cmd = send_set_p2pgo_oppps_req_cmd_tlv, + .send_set_p2pgo_noa_req_cmd = send_set_p2pgo_noa_req_cmd_tlv, +#ifdef CONVERGED_P2P_ENABLE + .send_p2p_lo_start_cmd = send_p2p_lo_start_cmd_tlv, + .send_p2p_lo_stop_cmd = send_p2p_lo_stop_cmd_tlv, +#endif + .send_set_smps_params_cmd = send_set_smps_params_cmd_tlv, + .send_set_mimops_cmd = send_set_mimops_cmd_tlv, +#ifdef WLAN_FEATURE_DSRC + .send_ocb_set_utc_time_cmd = send_ocb_set_utc_time_cmd_tlv, + .send_ocb_get_tsf_timer_cmd = send_ocb_get_tsf_timer_cmd_tlv, + .send_dcc_clear_stats_cmd = send_dcc_clear_stats_cmd_tlv, + .send_dcc_get_stats_cmd = send_dcc_get_stats_cmd_tlv, + .send_dcc_update_ndl_cmd = send_dcc_update_ndl_cmd_tlv, + .send_ocb_set_config_cmd = send_ocb_set_config_cmd_tlv, + .send_ocb_stop_timing_advert_cmd = send_ocb_stop_timing_advert_cmd_tlv, + .send_ocb_start_timing_advert_cmd = + send_ocb_start_timing_advert_cmd_tlv, + .extract_ocb_chan_config_resp = extract_ocb_channel_config_resp_tlv, + .extract_ocb_tsf_timer = extract_ocb_tsf_timer_tlv, + .extract_dcc_update_ndl_resp = extract_ocb_ndl_resp_tlv, + .extract_dcc_stats = extract_ocb_dcc_stats_tlv, +#endif + .send_set_enable_disable_mcc_adaptive_scheduler_cmd = + send_set_enable_disable_mcc_adaptive_scheduler_cmd_tlv, + .send_set_mcc_channel_time_latency_cmd = + send_set_mcc_channel_time_latency_cmd_tlv, + .send_set_mcc_channel_time_quota_cmd = + send_set_mcc_channel_time_quota_cmd_tlv, + .send_set_thermal_mgmt_cmd = send_set_thermal_mgmt_cmd_tlv, + .send_lro_config_cmd = send_lro_config_cmd_tlv, + .send_peer_rate_report_cmd = send_peer_rate_report_cmd_tlv, + .send_set_sta_sa_query_param_cmd = send_set_sta_sa_query_param_cmd_tlv, + .send_set_sta_keep_alive_cmd = send_set_sta_keep_alive_cmd_tlv, + .send_vdev_set_gtx_cfg_cmd = send_vdev_set_gtx_cfg_cmd_tlv, + .send_probe_rsp_tmpl_send_cmd = + send_probe_rsp_tmpl_send_cmd_tlv, + .send_p2p_go_set_beacon_ie_cmd = + send_p2p_go_set_beacon_ie_cmd_tlv, + .send_setup_install_key_cmd = + send_setup_install_key_cmd_tlv, + .send_set_gateway_params_cmd = + send_set_gateway_params_cmd_tlv, + .send_set_rssi_monitoring_cmd = + send_set_rssi_monitoring_cmd_tlv, + .send_scan_probe_setoui_cmd = + send_scan_probe_setoui_cmd_tlv, + .send_roam_scan_offload_rssi_thresh_cmd = + send_roam_scan_offload_rssi_thresh_cmd_tlv, + .send_roam_mawc_params_cmd = send_roam_mawc_params_cmd_tlv, + .send_roam_scan_filter_cmd = + send_roam_scan_filter_cmd_tlv, +#ifdef IPA_OFFLOAD + .send_ipa_offload_control_cmd = + send_ipa_offload_control_cmd_tlv, +#endif + .send_plm_stop_cmd = send_plm_stop_cmd_tlv, + .send_plm_start_cmd = send_plm_start_cmd_tlv, + .send_pno_stop_cmd = send_pno_stop_cmd_tlv, + .send_pno_start_cmd = send_pno_start_cmd_tlv, + .send_nlo_mawc_cmd = send_nlo_mawc_cmd_tlv, + .send_set_ric_req_cmd = send_set_ric_req_cmd_tlv, + .send_process_ll_stats_clear_cmd = send_process_ll_stats_clear_cmd_tlv, + .send_process_ll_stats_set_cmd = send_process_ll_stats_set_cmd_tlv, + .send_process_ll_stats_get_cmd = send_process_ll_stats_get_cmd_tlv, + .send_congestion_cmd = send_congestion_cmd_tlv, + .send_snr_request_cmd = send_snr_request_cmd_tlv, + .send_snr_cmd = send_snr_cmd_tlv, + .send_link_status_req_cmd = send_link_status_req_cmd_tlv, +#ifdef WLAN_POWER_MANAGEMENT_OFFLOAD + .send_add_wow_wakeup_event_cmd = send_add_wow_wakeup_event_cmd_tlv, + .send_wow_patterns_to_fw_cmd = send_wow_patterns_to_fw_cmd_tlv, + .send_enable_arp_ns_offload_cmd = send_enable_arp_ns_offload_cmd_tlv, + .send_add_clear_mcbc_filter_cmd = send_add_clear_mcbc_filter_cmd_tlv, + .send_multiple_add_clear_mcbc_filter_cmd = + send_multiple_add_clear_mcbc_filter_cmd_tlv, + .send_conf_hw_filter_cmd = send_conf_hw_filter_cmd_tlv, + .send_gtk_offload_cmd = send_gtk_offload_cmd_tlv, + .send_process_gtk_offload_getinfo_cmd = + send_process_gtk_offload_getinfo_cmd_tlv, + .send_enable_enhance_multicast_offload_cmd = + send_enable_enhance_multicast_offload_tlv, + .extract_gtk_rsp_event = extract_gtk_rsp_event_tlv, +#ifdef FEATURE_WLAN_RA_FILTERING + .send_wow_sta_ra_filter_cmd = send_wow_sta_ra_filter_cmd_tlv, +#endif + .send_action_frame_patterns_cmd = send_action_frame_patterns_cmd_tlv, + .send_lphb_config_hbenable_cmd = send_lphb_config_hbenable_cmd_tlv, + .send_lphb_config_tcp_params_cmd = send_lphb_config_tcp_params_cmd_tlv, + .send_lphb_config_tcp_pkt_filter_cmd = + send_lphb_config_tcp_pkt_filter_cmd_tlv, + .send_lphb_config_udp_params_cmd = send_lphb_config_udp_params_cmd_tlv, + .send_lphb_config_udp_pkt_filter_cmd = + send_lphb_config_udp_pkt_filter_cmd_tlv, + .send_enable_disable_packet_filter_cmd = + send_enable_disable_packet_filter_cmd_tlv, + .send_config_packet_filter_cmd = send_config_packet_filter_cmd_tlv, +#endif /* End of WLAN_POWER_MANAGEMENT_OFFLOAD */ +#ifdef CONFIG_MCL + .send_process_dhcp_ind_cmd = send_process_dhcp_ind_cmd_tlv, + .send_get_link_speed_cmd = send_get_link_speed_cmd_tlv, + .send_bcn_buf_ll_cmd = send_bcn_buf_ll_cmd_tlv, + .send_roam_scan_offload_mode_cmd = + send_roam_scan_offload_mode_cmd_tlv, +#ifndef REMOVE_PKT_LOG + .send_pktlog_wmi_send_cmd = send_pktlog_wmi_send_cmd_tlv, +#endif + .send_roam_scan_offload_ap_profile_cmd = + send_roam_scan_offload_ap_profile_cmd_tlv, +#endif +#ifdef WLAN_SUPPORT_GREEN_AP + .send_egap_conf_params_cmd = send_egap_conf_params_cmd_tlv, + .send_green_ap_ps_cmd = send_green_ap_ps_cmd_tlv, + .extract_green_ap_egap_status_info = + extract_green_ap_egap_status_info_tlv, +#endif + .send_fw_profiling_cmd = send_fw_profiling_cmd_tlv, + .send_csa_offload_enable_cmd = send_csa_offload_enable_cmd_tlv, + .send_nat_keepalive_en_cmd = send_nat_keepalive_en_cmd_tlv, + .send_wlm_latency_level_cmd = send_wlm_latency_level_cmd_tlv, + .send_start_oem_data_cmd = send_start_oem_data_cmd_tlv, +#ifdef WLAN_FEATURE_CIF_CFR + .send_oem_dma_cfg_cmd = send_oem_dma_cfg_cmd_tlv, +#endif + .send_dbr_cfg_cmd = send_dbr_cfg_cmd_tlv, + .send_dfs_phyerr_filter_offload_en_cmd = + send_dfs_phyerr_filter_offload_en_cmd_tlv, + .send_wow_delete_pattern_cmd = send_wow_delete_pattern_cmd_tlv, + .send_host_wakeup_ind_to_fw_cmd = send_host_wakeup_ind_to_fw_cmd_tlv, + .send_del_ts_cmd = send_del_ts_cmd_tlv, + .send_aggr_qos_cmd = send_aggr_qos_cmd_tlv, + .send_add_ts_cmd = send_add_ts_cmd_tlv, + .send_process_add_periodic_tx_ptrn_cmd = + send_process_add_periodic_tx_ptrn_cmd_tlv, + .send_process_del_periodic_tx_ptrn_cmd = + send_process_del_periodic_tx_ptrn_cmd_tlv, + .send_stats_ext_req_cmd = send_stats_ext_req_cmd_tlv, + .send_enable_ext_wow_cmd = send_enable_ext_wow_cmd_tlv, + .send_set_app_type2_params_in_fw_cmd = + send_set_app_type2_params_in_fw_cmd_tlv, + .send_set_auto_shutdown_timer_cmd = + send_set_auto_shutdown_timer_cmd_tlv, + .send_nan_req_cmd = send_nan_req_cmd_tlv, + .send_process_dhcpserver_offload_cmd = + send_process_dhcpserver_offload_cmd_tlv, + .send_set_led_flashing_cmd = send_set_led_flashing_cmd_tlv, + .send_process_ch_avoid_update_cmd = + send_process_ch_avoid_update_cmd_tlv, + .send_pdev_set_regdomain_cmd = + send_pdev_set_regdomain_cmd_tlv, + .send_regdomain_info_to_fw_cmd = send_regdomain_info_to_fw_cmd_tlv, + .send_set_tdls_offchan_mode_cmd = send_set_tdls_offchan_mode_cmd_tlv, + .send_update_fw_tdls_state_cmd = send_update_fw_tdls_state_cmd_tlv, + .send_update_tdls_peer_state_cmd = send_update_tdls_peer_state_cmd_tlv, + .send_process_set_ie_info_cmd = send_process_set_ie_info_cmd_tlv, + .save_fw_version_cmd = save_fw_version_cmd_tlv, + .check_and_update_fw_version = + check_and_update_fw_version_cmd_tlv, + .send_set_base_macaddr_indicate_cmd = + send_set_base_macaddr_indicate_cmd_tlv, + .send_log_supported_evt_cmd = send_log_supported_evt_cmd_tlv, + .send_enable_specific_fw_logs_cmd = + send_enable_specific_fw_logs_cmd_tlv, + .send_flush_logs_to_fw_cmd = send_flush_logs_to_fw_cmd_tlv, + .send_pdev_set_pcl_cmd = send_pdev_set_pcl_cmd_tlv, + .send_pdev_set_hw_mode_cmd = send_pdev_set_hw_mode_cmd_tlv, +#ifdef WLAN_POLICY_MGR_ENABLE + .send_pdev_set_dual_mac_config_cmd = + send_pdev_set_dual_mac_config_cmd_tlv, +#endif + .send_app_type1_params_in_fw_cmd = + send_app_type1_params_in_fw_cmd_tlv, + .send_set_ssid_hotlist_cmd = send_set_ssid_hotlist_cmd_tlv, + .send_process_roam_synch_complete_cmd = + send_process_roam_synch_complete_cmd_tlv, + .send_unit_test_cmd = send_unit_test_cmd_tlv, + .send_roam_invoke_cmd = send_roam_invoke_cmd_tlv, + .send_roam_scan_offload_cmd = send_roam_scan_offload_cmd_tlv, + .send_roam_scan_offload_scan_period_cmd = + send_roam_scan_offload_scan_period_cmd_tlv, + .send_roam_scan_offload_chan_list_cmd = + send_roam_scan_offload_chan_list_cmd_tlv, + .send_roam_scan_offload_rssi_change_cmd = + send_roam_scan_offload_rssi_change_cmd_tlv, +#ifdef FEATURE_WLAN_APF + .send_set_active_apf_mode_cmd = wmi_send_set_active_apf_mode_cmd_tlv, + .send_apf_enable_cmd = wmi_send_apf_enable_cmd_tlv, + .send_apf_write_work_memory_cmd = + wmi_send_apf_write_work_memory_cmd_tlv, + .send_apf_read_work_memory_cmd = + wmi_send_apf_read_work_memory_cmd_tlv, + .extract_apf_read_memory_resp_event = + wmi_extract_apf_read_memory_resp_event_tlv, +#endif /* FEATURE_WLAN_APF */ + .send_adapt_dwelltime_params_cmd = + send_adapt_dwelltime_params_cmd_tlv, + .send_dbs_scan_sel_params_cmd = + send_dbs_scan_sel_params_cmd_tlv, + .init_cmd_send = init_cmd_send_tlv, + .send_smart_ant_enable_cmd = send_smart_ant_enable_cmd_tlv, + .send_smart_ant_set_rx_ant_cmd = send_smart_ant_set_rx_ant_cmd_tlv, + .send_set_ctl_table_cmd = send_set_ctl_table_cmd_tlv, + .send_set_mimogain_table_cmd = send_set_mimogain_table_cmd_tlv, + .send_packet_power_info_get_cmd = send_packet_power_info_get_cmd_tlv, + .send_vdev_config_ratemask_cmd = send_vdev_config_ratemask_cmd_tlv, + .send_vdev_set_custom_aggr_size_cmd = + send_vdev_set_custom_aggr_size_cmd_tlv, + .send_vdev_set_qdepth_thresh_cmd = + send_vdev_set_qdepth_thresh_cmd_tlv, + .send_set_vap_dscp_tid_map_cmd = send_set_vap_dscp_tid_map_cmd_tlv, + .send_vdev_set_neighbour_rx_cmd = send_vdev_set_neighbour_rx_cmd_tlv, + .send_smart_ant_set_tx_ant_cmd = send_smart_ant_set_tx_ant_cmd_tlv, + .send_set_ant_switch_tbl_cmd = send_set_ant_switch_tbl_cmd_tlv, + .send_smart_ant_set_training_info_cmd = + send_smart_ant_set_training_info_cmd_tlv, + .send_smart_ant_set_node_config_cmd = + send_smart_ant_set_node_config_cmd_tlv, + .send_set_atf_cmd = send_set_atf_cmd_tlv, + .send_vdev_set_fwtest_param_cmd = send_vdev_set_fwtest_param_cmd_tlv, + .send_set_qboost_param_cmd = send_set_qboost_param_cmd_tlv, + .send_gpio_config_cmd = send_gpio_config_cmd_tlv, + .send_gpio_output_cmd = send_gpio_output_cmd_tlv, + .send_phyerr_disable_cmd = send_phyerr_disable_cmd_tlv, + .send_phyerr_enable_cmd = send_phyerr_enable_cmd_tlv, + .send_periodic_chan_stats_config_cmd = + send_periodic_chan_stats_config_cmd_tlv, + .send_nf_dbr_dbm_info_get_cmd = send_nf_dbr_dbm_info_get_cmd_tlv, + .send_set_ht_ie_cmd = send_set_ht_ie_cmd_tlv, + .send_set_vht_ie_cmd = send_set_vht_ie_cmd_tlv, + .send_set_quiet_mode_cmd = send_set_quiet_mode_cmd_tlv, + .send_set_bwf_cmd = send_set_bwf_cmd_tlv, + .send_mcast_group_update_cmd = send_mcast_group_update_cmd_tlv, + .send_vdev_spectral_configure_cmd = + send_vdev_spectral_configure_cmd_tlv, + .send_vdev_spectral_enable_cmd = + send_vdev_spectral_enable_cmd_tlv, + .send_thermal_mitigation_param_cmd = + send_thermal_mitigation_param_cmd_tlv, + .send_pdev_qvit_cmd = send_pdev_qvit_cmd_tlv, + .send_wmm_update_cmd = send_wmm_update_cmd_tlv, + .send_process_update_edca_param_cmd = + send_process_update_edca_param_cmd_tlv, + .send_coex_config_cmd = send_coex_config_cmd_tlv, + .send_set_country_cmd = send_set_country_cmd_tlv, + .send_bcn_offload_control_cmd = send_bcn_offload_control_cmd_tlv, + .send_addba_send_cmd = send_addba_send_cmd_tlv, + .send_delba_send_cmd = send_delba_send_cmd_tlv, + .send_addba_clearresponse_cmd = send_addba_clearresponse_cmd_tlv, + .get_target_cap_from_service_ready = extract_service_ready_tlv, + .extract_hal_reg_cap = extract_hal_reg_cap_tlv, + .extract_host_mem_req = extract_host_mem_req_tlv, + .save_service_bitmap = save_service_bitmap_tlv, + .save_ext_service_bitmap = save_ext_service_bitmap_tlv, + .is_service_enabled = is_service_enabled_tlv, + .save_fw_version = save_fw_version_in_service_ready_tlv, + .ready_extract_init_status = ready_extract_init_status_tlv, + .ready_extract_mac_addr = ready_extract_mac_addr_tlv, + .ready_extract_mac_addr_list = ready_extract_mac_addr_list_tlv, + .extract_ready_event_params = extract_ready_event_params_tlv, + .extract_dbglog_data_len = extract_dbglog_data_len_tlv, + .extract_vdev_start_resp = extract_vdev_start_resp_tlv, + .extract_vdev_delete_resp = extract_vdev_delete_resp_tlv, + .extract_tbttoffset_update_params = + extract_tbttoffset_update_params_tlv, + .extract_ext_tbttoffset_update_params = + extract_ext_tbttoffset_update_params_tlv, + .extract_tbttoffset_num_vdevs = + extract_tbttoffset_num_vdevs_tlv, + .extract_ext_tbttoffset_num_vdevs = + extract_ext_tbttoffset_num_vdevs_tlv, + .extract_mgmt_rx_params = extract_mgmt_rx_params_tlv, + .extract_vdev_stopped_param = extract_vdev_stopped_param_tlv, + .extract_vdev_roam_param = extract_vdev_roam_param_tlv, + .extract_vdev_scan_ev_param = extract_vdev_scan_ev_param_tlv, +#ifdef CONVERGED_TDLS_ENABLE + .extract_vdev_tdls_ev_param = extract_vdev_tdls_ev_param_tlv, +#endif + .extract_mgmt_tx_compl_param = extract_mgmt_tx_compl_param_tlv, + .extract_swba_num_vdevs = extract_swba_num_vdevs_tlv, + .extract_swba_tim_info = extract_swba_tim_info_tlv, + .extract_swba_noa_info = extract_swba_noa_info_tlv, +#ifdef CONVERGED_P2P_ENABLE + .extract_p2p_noa_ev_param = extract_p2p_noa_ev_param_tlv, + .extract_p2p_lo_stop_ev_param = + extract_p2p_lo_stop_ev_param_tlv, + .set_mac_addr_rx_filter = send_set_mac_addr_rx_filter_cmd_tlv, + .extract_mac_addr_rx_filter_evt_param = + extract_mac_addr_rx_filter_evt_param_tlv, +#endif + .extract_offchan_data_tx_compl_param = + extract_offchan_data_tx_compl_param_tlv, + .extract_peer_sta_kickout_ev = extract_peer_sta_kickout_ev_tlv, + .extract_all_stats_count = extract_all_stats_counts_tlv, + .extract_pdev_stats = extract_pdev_stats_tlv, + .extract_unit_test = extract_unit_test_tlv, + .extract_pdev_ext_stats = extract_pdev_ext_stats_tlv, + .extract_vdev_stats = extract_vdev_stats_tlv, + .extract_per_chain_rssi_stats = extract_per_chain_rssi_stats_tlv, + .extract_peer_stats = extract_peer_stats_tlv, + .extract_bcn_stats = extract_bcn_stats_tlv, + .extract_bcnflt_stats = extract_bcnflt_stats_tlv, + .extract_peer_extd_stats = extract_peer_extd_stats_tlv, + .extract_peer_adv_stats = extract_peer_adv_stats_tlv, + .extract_chan_stats = extract_chan_stats_tlv, + .extract_profile_ctx = extract_profile_ctx_tlv, + .extract_profile_data = extract_profile_data_tlv, + .extract_chan_info_event = extract_chan_info_event_tlv, + .extract_channel_hopping_event = extract_channel_hopping_event_tlv, + .send_fw_test_cmd = send_fw_test_cmd_tlv, +#ifdef WLAN_FEATURE_DISA + .send_encrypt_decrypt_send_cmd = + send_encrypt_decrypt_send_cmd_tlv, + .extract_encrypt_decrypt_resp_event = + extract_encrypt_decrypt_resp_event_tlv, +#endif + .send_sar_limit_cmd = send_sar_limit_cmd_tlv, + .get_sar_limit_cmd = get_sar_limit_cmd_tlv, + .extract_sar_limit_event = extract_sar_limit_event_tlv, + .extract_sar2_result_event = extract_sar2_result_event_tlv, + .send_power_dbg_cmd = send_power_dbg_cmd_tlv, + .send_multiple_vdev_restart_req_cmd = + send_multiple_vdev_restart_req_cmd_tlv, + .extract_service_ready_ext = extract_service_ready_ext_tlv, + .extract_hw_mode_cap_service_ready_ext = + extract_hw_mode_cap_service_ready_ext_tlv, + .extract_mac_phy_cap_service_ready_ext = + extract_mac_phy_cap_service_ready_ext_tlv, + .extract_reg_cap_service_ready_ext = + extract_reg_cap_service_ready_ext_tlv, + .extract_dbr_ring_cap_service_ready_ext = + extract_dbr_ring_cap_service_ready_ext_tlv, + .extract_sar_cap_service_ready_ext = + extract_sar_cap_service_ready_ext_tlv, + .extract_dbr_buf_release_fixed = extract_dbr_buf_release_fixed_tlv, + .extract_dbr_buf_release_entry = extract_dbr_buf_release_entry_tlv, + .extract_dbr_buf_metadata = extract_dbr_buf_metadata_tlv, + .extract_pdev_utf_event = extract_pdev_utf_event_tlv, + .wmi_set_htc_tx_tag = wmi_set_htc_tx_tag_tlv, + .extract_dcs_interference_type = extract_dcs_interference_type_tlv, + .extract_dcs_cw_int = extract_dcs_cw_int_tlv, + .extract_dcs_im_tgt_stats = extract_dcs_im_tgt_stats_tlv, + .extract_fips_event_data = extract_fips_event_data_tlv, + .send_pdev_fips_cmd = send_pdev_fips_cmd_tlv, + .extract_peer_delete_response_event = + extract_peer_delete_response_event_tlv, + .is_management_record = is_management_record_tlv, + .is_diag_event = is_diag_event_tlv, + .extract_pdev_csa_switch_count_status = + extract_pdev_csa_switch_count_status_tlv, + .extract_pdev_tpc_ev_param = extract_pdev_tpc_ev_param_tlv, + .extract_pdev_tpc_config_ev_param = + extract_pdev_tpc_config_ev_param_tlv, + .extract_nfcal_power_ev_param = extract_nfcal_power_ev_param_tlv, + .extract_wds_addr_event = extract_wds_addr_event_tlv, + .extract_peer_sta_ps_statechange_ev = + extract_peer_sta_ps_statechange_ev_tlv, + .extract_inst_rssi_stats_event = extract_inst_rssi_stats_event_tlv, + .send_per_roam_config_cmd = send_per_roam_config_cmd_tlv, +#ifdef WLAN_FEATURE_ACTION_OUI + .send_action_oui_cmd = send_action_oui_cmd_tlv, +#endif + .send_dfs_phyerr_offload_en_cmd = send_dfs_phyerr_offload_en_cmd_tlv, + .send_dfs_phyerr_offload_dis_cmd = send_dfs_phyerr_offload_dis_cmd_tlv, + .extract_reg_chan_list_update_event = + extract_reg_chan_list_update_event_tlv, + .extract_chainmask_tables = + extract_chainmask_tables_tlv, + .extract_thermal_stats = extract_thermal_stats_tlv, + .extract_thermal_level_stats = extract_thermal_level_stats_tlv, + .send_get_rcpi_cmd = send_get_rcpi_cmd_tlv, + .extract_rcpi_response_event = extract_rcpi_response_event_tlv, +#ifdef DFS_COMPONENT_ENABLE + .extract_dfs_cac_complete_event = extract_dfs_cac_complete_event_tlv, + .extract_dfs_radar_detection_event = + extract_dfs_radar_detection_event_tlv, + .extract_wlan_radar_event_info = extract_wlan_radar_event_info_tlv, +#endif + .convert_pdev_id_host_to_target = + convert_host_pdev_id_to_target_pdev_id_legacy, + .convert_pdev_id_target_to_host = + convert_target_pdev_id_to_host_pdev_id_legacy, + + .convert_host_pdev_id_to_target = + convert_host_pdev_id_to_target_pdev_id, + .convert_target_pdev_id_to_host = + convert_target_pdev_id_to_host_pdev_id, + + .send_start_11d_scan_cmd = send_start_11d_scan_cmd_tlv, + .send_stop_11d_scan_cmd = send_stop_11d_scan_cmd_tlv, + .extract_reg_11d_new_country_event = + extract_reg_11d_new_country_event_tlv, + .send_user_country_code_cmd = send_user_country_code_cmd_tlv, + .send_limit_off_chan_cmd = + send_limit_off_chan_cmd_tlv, + .extract_reg_ch_avoid_event = + extract_reg_ch_avoid_event_tlv, + .send_pdev_caldata_version_check_cmd = + send_pdev_caldata_version_check_cmd_tlv, + .extract_pdev_caldata_version_check_ev_param = + extract_pdev_caldata_version_check_ev_param_tlv, + .send_set_arp_stats_req_cmd = send_set_arp_stats_req_cmd_tlv, + .send_get_arp_stats_req_cmd = send_get_arp_stats_req_cmd_tlv, + .send_set_del_pmkid_cache_cmd = send_set_del_pmkid_cache_cmd_tlv, +#if defined(WLAN_FEATURE_FILS_SK) + .send_roam_scan_hlp_cmd = send_roam_scan_send_hlp_cmd_tlv, +#endif + .send_wow_timer_pattern_cmd = send_wow_timer_pattern_cmd_tlv, +#ifdef WLAN_FEATURE_NAN_CONVERGENCE + .send_ndp_initiator_req_cmd = nan_ndp_initiator_req_tlv, + .send_ndp_responder_req_cmd = nan_ndp_responder_req_tlv, + .send_ndp_end_req_cmd = nan_ndp_end_req_tlv, + .extract_ndp_initiator_rsp = extract_ndp_initiator_rsp_tlv, + .extract_ndp_ind = extract_ndp_ind_tlv, + .extract_ndp_confirm = extract_ndp_confirm_tlv, + .extract_ndp_responder_rsp = extract_ndp_responder_rsp_tlv, + .extract_ndp_end_rsp = extract_ndp_end_rsp_tlv, + .extract_ndp_end_ind = extract_ndp_end_ind_tlv, + .extract_ndp_sch_update = extract_ndp_sch_update_tlv, +#endif + .send_btm_config = send_btm_config_cmd_tlv, + .send_roam_bss_load_config = send_roam_bss_load_config_tlv, + .send_obss_detection_cfg_cmd = send_obss_detection_cfg_cmd_tlv, + .extract_obss_detection_info = extract_obss_detection_info_tlv, +#ifdef WLAN_SUPPORT_FILS + .send_vdev_fils_enable_cmd = send_vdev_fils_enable_cmd_tlv, + .extract_swfda_vdev_id = extract_swfda_vdev_id_tlv, + .send_fils_discovery_send_cmd = send_fils_discovery_send_cmd_tlv, +#endif /* WLAN_SUPPORT_FILS */ + .send_offload_11k_cmd = send_offload_11k_cmd_tlv, + .send_invoke_neighbor_report_cmd = send_invoke_neighbor_report_cmd_tlv, + .wmi_pdev_id_conversion_enable = wmi_tlv_pdev_id_conversion_enable, + .wmi_free_allocated_event = wmitlv_free_allocated_event_tlvs, + .wmi_check_and_pad_event = wmitlv_check_and_pad_event_tlvs, + .wmi_check_command_params = wmitlv_check_command_tlv_params, + .send_bss_color_change_enable_cmd = + send_bss_color_change_enable_cmd_tlv, + .send_obss_color_collision_cfg_cmd = + send_obss_color_collision_cfg_cmd_tlv, + .extract_obss_color_collision_info = + extract_obss_color_collision_info_tlv, + .extract_comb_phyerr = extract_comb_phyerr_tlv, + .extract_single_phyerr = extract_single_phyerr_tlv, +#ifdef QCA_SUPPORT_CP_STATS + .extract_cca_stats = extract_cca_stats_tlv, +#endif + .send_roam_scan_stats_cmd = send_roam_scan_stats_cmd_tlv, + .extract_roam_scan_stats_res_evt = extract_roam_scan_stats_res_evt_tlv, +#ifdef WLAN_MWS_INFO_DEBUGFS + .send_mws_coex_status_req_cmd = send_mws_coex_status_req_cmd_tlv, +#endif +}; + +/** + * populate_tlv_event_id() - populates wmi event ids + * + * @param event_ids: Pointer to hold event ids + * Return: None + */ +static void populate_tlv_events_id(uint32_t *event_ids) +{ + event_ids[wmi_service_ready_event_id] = WMI_SERVICE_READY_EVENTID; + event_ids[wmi_ready_event_id] = WMI_READY_EVENTID; + event_ids[wmi_scan_event_id] = WMI_SCAN_EVENTID; + event_ids[wmi_pdev_tpc_config_event_id] = WMI_PDEV_TPC_CONFIG_EVENTID; + event_ids[wmi_chan_info_event_id] = WMI_CHAN_INFO_EVENTID; + event_ids[wmi_phyerr_event_id] = WMI_PHYERR_EVENTID; + event_ids[wmi_pdev_dump_event_id] = WMI_PDEV_DUMP_EVENTID; + event_ids[wmi_tx_pause_event_id] = WMI_TX_PAUSE_EVENTID; + event_ids[wmi_dfs_radar_event_id] = WMI_DFS_RADAR_EVENTID; + event_ids[wmi_pdev_l1ss_track_event_id] = WMI_PDEV_L1SS_TRACK_EVENTID; + event_ids[wmi_pdev_temperature_event_id] = WMI_PDEV_TEMPERATURE_EVENTID; + event_ids[wmi_service_ready_ext_event_id] = + WMI_SERVICE_READY_EXT_EVENTID; + event_ids[wmi_vdev_start_resp_event_id] = WMI_VDEV_START_RESP_EVENTID; + event_ids[wmi_vdev_stopped_event_id] = WMI_VDEV_STOPPED_EVENTID; + event_ids[wmi_vdev_install_key_complete_event_id] = + WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID; + event_ids[wmi_vdev_mcc_bcn_intvl_change_req_event_id] = + WMI_VDEV_MCC_BCN_INTERVAL_CHANGE_REQ_EVENTID; + + event_ids[wmi_vdev_tsf_report_event_id] = WMI_VDEV_TSF_REPORT_EVENTID; + event_ids[wmi_peer_sta_kickout_event_id] = WMI_PEER_STA_KICKOUT_EVENTID; + event_ids[wmi_peer_info_event_id] = WMI_PEER_INFO_EVENTID; + event_ids[wmi_peer_tx_fail_cnt_thr_event_id] = + WMI_PEER_TX_FAIL_CNT_THR_EVENTID; + event_ids[wmi_peer_estimated_linkspeed_event_id] = + WMI_PEER_ESTIMATED_LINKSPEED_EVENTID; + event_ids[wmi_peer_state_event_id] = WMI_PEER_STATE_EVENTID; + event_ids[wmi_peer_delete_response_event_id] = + WMI_PEER_DELETE_RESP_EVENTID; + event_ids[wmi_mgmt_rx_event_id] = WMI_MGMT_RX_EVENTID; + event_ids[wmi_host_swba_event_id] = WMI_HOST_SWBA_EVENTID; + event_ids[wmi_tbttoffset_update_event_id] = + WMI_TBTTOFFSET_UPDATE_EVENTID; + event_ids[wmi_ext_tbttoffset_update_event_id] = + WMI_TBTTOFFSET_EXT_UPDATE_EVENTID; + event_ids[wmi_offload_bcn_tx_status_event_id] = + WMI_OFFLOAD_BCN_TX_STATUS_EVENTID; + event_ids[wmi_offload_prob_resp_tx_status_event_id] = + WMI_OFFLOAD_PROB_RESP_TX_STATUS_EVENTID; + event_ids[wmi_mgmt_tx_completion_event_id] = + WMI_MGMT_TX_COMPLETION_EVENTID; + event_ids[wmi_pdev_nfcal_power_all_channels_event_id] = + WMI_PDEV_NFCAL_POWER_ALL_CHANNELS_EVENTID; + event_ids[wmi_tx_delba_complete_event_id] = + WMI_TX_DELBA_COMPLETE_EVENTID; + event_ids[wmi_tx_addba_complete_event_id] = + WMI_TX_ADDBA_COMPLETE_EVENTID; + event_ids[wmi_ba_rsp_ssn_event_id] = WMI_BA_RSP_SSN_EVENTID; + + event_ids[wmi_aggr_state_trig_event_id] = WMI_AGGR_STATE_TRIG_EVENTID; + + event_ids[wmi_roam_event_id] = WMI_ROAM_EVENTID; + event_ids[wmi_profile_match] = WMI_PROFILE_MATCH; + + event_ids[wmi_roam_synch_event_id] = WMI_ROAM_SYNCH_EVENTID; + event_ids[wmi_roam_synch_frame_event_id] = WMI_ROAM_SYNCH_FRAME_EVENTID; + + event_ids[wmi_p2p_disc_event_id] = WMI_P2P_DISC_EVENTID; + + event_ids[wmi_p2p_noa_event_id] = WMI_P2P_NOA_EVENTID; + event_ids[wmi_p2p_lo_stop_event_id] = + WMI_P2P_LISTEN_OFFLOAD_STOPPED_EVENTID; + event_ids[wmi_vdev_add_macaddr_rx_filter_event_id] = + WMI_VDEV_ADD_MAC_ADDR_TO_RX_FILTER_STATUS_EVENTID; + event_ids[wmi_pdev_resume_event_id] = WMI_PDEV_RESUME_EVENTID; + event_ids[wmi_wow_wakeup_host_event_id] = WMI_WOW_WAKEUP_HOST_EVENTID; + event_ids[wmi_d0_wow_disable_ack_event_id] = + WMI_D0_WOW_DISABLE_ACK_EVENTID; + event_ids[wmi_wow_initial_wakeup_event_id] = + WMI_WOW_INITIAL_WAKEUP_EVENTID; + + event_ids[wmi_rtt_meas_report_event_id] = + WMI_RTT_MEASUREMENT_REPORT_EVENTID; + event_ids[wmi_tsf_meas_report_event_id] = + WMI_TSF_MEASUREMENT_REPORT_EVENTID; + event_ids[wmi_rtt_error_report_event_id] = WMI_RTT_ERROR_REPORT_EVENTID; + event_ids[wmi_stats_ext_event_id] = WMI_STATS_EXT_EVENTID; + event_ids[wmi_iface_link_stats_event_id] = WMI_IFACE_LINK_STATS_EVENTID; + event_ids[wmi_peer_link_stats_event_id] = WMI_PEER_LINK_STATS_EVENTID; + event_ids[wmi_radio_link_stats_link] = WMI_RADIO_LINK_STATS_EVENTID; + event_ids[wmi_diag_event_id_log_supported_event_id] = + WMI_DIAG_EVENT_LOG_SUPPORTED_EVENTID; + event_ids[wmi_nlo_match_event_id] = WMI_NLO_MATCH_EVENTID; + event_ids[wmi_nlo_scan_complete_event_id] = + WMI_NLO_SCAN_COMPLETE_EVENTID; + event_ids[wmi_apfind_event_id] = WMI_APFIND_EVENTID; + event_ids[wmi_passpoint_match_event_id] = WMI_PASSPOINT_MATCH_EVENTID; + + event_ids[wmi_gtk_offload_status_event_id] = + WMI_GTK_OFFLOAD_STATUS_EVENTID; + event_ids[wmi_gtk_rekey_fail_event_id] = WMI_GTK_REKEY_FAIL_EVENTID; + event_ids[wmi_csa_handling_event_id] = WMI_CSA_HANDLING_EVENTID; + event_ids[wmi_chatter_pc_query_event_id] = WMI_CHATTER_PC_QUERY_EVENTID; + + event_ids[wmi_echo_event_id] = WMI_ECHO_EVENTID; + + event_ids[wmi_pdev_utf_event_id] = WMI_PDEV_UTF_EVENTID; + + event_ids[wmi_dbg_msg_event_id] = WMI_DEBUG_MESG_EVENTID; + event_ids[wmi_update_stats_event_id] = WMI_UPDATE_STATS_EVENTID; + event_ids[wmi_debug_print_event_id] = WMI_DEBUG_PRINT_EVENTID; + event_ids[wmi_dcs_interference_event_id] = WMI_DCS_INTERFERENCE_EVENTID; + event_ids[wmi_pdev_qvit_event_id] = WMI_PDEV_QVIT_EVENTID; + event_ids[wmi_wlan_profile_data_event_id] = + WMI_WLAN_PROFILE_DATA_EVENTID; + event_ids[wmi_pdev_ftm_intg_event_id] = WMI_PDEV_FTM_INTG_EVENTID; + event_ids[wmi_wlan_freq_avoid_event_id] = WMI_WLAN_FREQ_AVOID_EVENTID; + event_ids[wmi_vdev_get_keepalive_event_id] = + WMI_VDEV_GET_KEEPALIVE_EVENTID; + event_ids[wmi_thermal_mgmt_event_id] = WMI_THERMAL_MGMT_EVENTID; + + event_ids[wmi_diag_container_event_id] = + WMI_DIAG_DATA_CONTAINER_EVENTID; + + event_ids[wmi_host_auto_shutdown_event_id] = + WMI_HOST_AUTO_SHUTDOWN_EVENTID; + + event_ids[wmi_update_whal_mib_stats_event_id] = + WMI_UPDATE_WHAL_MIB_STATS_EVENTID; + + /*update ht/vht info based on vdev (rx and tx NSS and preamble) */ + event_ids[wmi_update_vdev_rate_stats_event_id] = + WMI_UPDATE_VDEV_RATE_STATS_EVENTID; + + event_ids[wmi_diag_event_id] = WMI_DIAG_EVENTID; + event_ids[wmi_unit_test_event_id] = WMI_UNIT_TEST_EVENTID; + + /** Set OCB Sched Response, deprecated */ + event_ids[wmi_ocb_set_sched_event_id] = WMI_OCB_SET_SCHED_EVENTID; + + event_ids[wmi_dbg_mesg_flush_complete_event_id] = + WMI_DEBUG_MESG_FLUSH_COMPLETE_EVENTID; + event_ids[wmi_rssi_breach_event_id] = WMI_RSSI_BREACH_EVENTID; + + /* GPIO Event */ + event_ids[wmi_gpio_input_event_id] = WMI_GPIO_INPUT_EVENTID; + event_ids[wmi_uploadh_event_id] = WMI_UPLOADH_EVENTID; + + event_ids[wmi_captureh_event_id] = WMI_CAPTUREH_EVENTID; + event_ids[wmi_rfkill_state_change_event_id] = + WMI_RFKILL_STATE_CHANGE_EVENTID; + + /* TDLS Event */ + event_ids[wmi_tdls_peer_event_id] = WMI_TDLS_PEER_EVENTID; + + event_ids[wmi_batch_scan_enabled_event_id] = + WMI_BATCH_SCAN_ENABLED_EVENTID; + event_ids[wmi_batch_scan_result_event_id] = + WMI_BATCH_SCAN_RESULT_EVENTID; + /* OEM Event */ + event_ids[wmi_oem_cap_event_id] = WMI_OEM_CAPABILITY_EVENTID; + event_ids[wmi_oem_meas_report_event_id] = + WMI_OEM_MEASUREMENT_REPORT_EVENTID; + event_ids[wmi_oem_report_event_id] = WMI_OEM_ERROR_REPORT_EVENTID; + + /* NAN Event */ + event_ids[wmi_nan_event_id] = WMI_NAN_EVENTID; + + /* LPI Event */ + event_ids[wmi_lpi_result_event_id] = WMI_LPI_RESULT_EVENTID; + event_ids[wmi_lpi_status_event_id] = WMI_LPI_STATUS_EVENTID; + event_ids[wmi_lpi_handoff_event_id] = WMI_LPI_HANDOFF_EVENTID; + + /* ExtScan events */ + event_ids[wmi_extscan_start_stop_event_id] = + WMI_EXTSCAN_START_STOP_EVENTID; + event_ids[wmi_extscan_operation_event_id] = + WMI_EXTSCAN_OPERATION_EVENTID; + event_ids[wmi_extscan_table_usage_event_id] = + WMI_EXTSCAN_TABLE_USAGE_EVENTID; + event_ids[wmi_extscan_cached_results_event_id] = + WMI_EXTSCAN_CACHED_RESULTS_EVENTID; + event_ids[wmi_extscan_wlan_change_results_event_id] = + WMI_EXTSCAN_WLAN_CHANGE_RESULTS_EVENTID; + event_ids[wmi_extscan_hotlist_match_event_id] = + WMI_EXTSCAN_HOTLIST_MATCH_EVENTID; + event_ids[wmi_extscan_capabilities_event_id] = + WMI_EXTSCAN_CAPABILITIES_EVENTID; + event_ids[wmi_extscan_hotlist_ssid_match_event_id] = + WMI_EXTSCAN_HOTLIST_SSID_MATCH_EVENTID; + + /* mDNS offload events */ + event_ids[wmi_mdns_stats_event_id] = WMI_MDNS_STATS_EVENTID; + + /* SAP Authentication offload events */ + event_ids[wmi_sap_ofl_add_sta_event_id] = WMI_SAP_OFL_ADD_STA_EVENTID; + event_ids[wmi_sap_ofl_del_sta_event_id] = WMI_SAP_OFL_DEL_STA_EVENTID; + + /** Out-of-context-of-bss (OCB) events */ + event_ids[wmi_ocb_set_config_resp_event_id] = + WMI_OCB_SET_CONFIG_RESP_EVENTID; + event_ids[wmi_ocb_get_tsf_timer_resp_event_id] = + WMI_OCB_GET_TSF_TIMER_RESP_EVENTID; + event_ids[wmi_dcc_get_stats_resp_event_id] = + WMI_DCC_GET_STATS_RESP_EVENTID; + event_ids[wmi_dcc_update_ndl_resp_event_id] = + WMI_DCC_UPDATE_NDL_RESP_EVENTID; + event_ids[wmi_dcc_stats_event_id] = WMI_DCC_STATS_EVENTID; + /* System-On-Chip events */ + event_ids[wmi_soc_set_hw_mode_resp_event_id] = + WMI_SOC_SET_HW_MODE_RESP_EVENTID; + event_ids[wmi_soc_hw_mode_transition_event_id] = + WMI_SOC_HW_MODE_TRANSITION_EVENTID; + event_ids[wmi_soc_set_dual_mac_config_resp_event_id] = + WMI_SOC_SET_DUAL_MAC_CONFIG_RESP_EVENTID; + event_ids[wmi_pdev_fips_event_id] = WMI_PDEV_FIPS_EVENTID; + event_ids[wmi_pdev_csa_switch_count_status_event_id] = + WMI_PDEV_CSA_SWITCH_COUNT_STATUS_EVENTID; + event_ids[wmi_reg_chan_list_cc_event_id] = WMI_REG_CHAN_LIST_CC_EVENTID; + event_ids[wmi_inst_rssi_stats_event_id] = WMI_INST_RSSI_STATS_EVENTID; + event_ids[wmi_pdev_tpc_config_event_id] = WMI_PDEV_TPC_CONFIG_EVENTID; + event_ids[wmi_peer_sta_ps_statechg_event_id] = + WMI_PEER_STA_PS_STATECHG_EVENTID; + event_ids[wmi_pdev_channel_hopping_event_id] = + WMI_PDEV_CHANNEL_HOPPING_EVENTID; + event_ids[wmi_offchan_data_tx_completion_event] = + WMI_OFFCHAN_DATA_TX_COMPLETION_EVENTID; + event_ids[wmi_dfs_cac_complete_id] = WMI_VDEV_DFS_CAC_COMPLETE_EVENTID; + event_ids[wmi_dfs_radar_detection_event_id] = + WMI_PDEV_DFS_RADAR_DETECTION_EVENTID; + event_ids[wmi_tt_stats_event_id] = WMI_THERM_THROT_STATS_EVENTID; + event_ids[wmi_11d_new_country_event_id] = WMI_11D_NEW_COUNTRY_EVENTID; + event_ids[wmi_pdev_tpc_event_id] = WMI_PDEV_TPC_EVENTID; + event_ids[wmi_get_arp_stats_req_id] = WMI_VDEV_GET_ARP_STAT_EVENTID; + event_ids[wmi_service_available_event_id] = + WMI_SERVICE_AVAILABLE_EVENTID; + event_ids[wmi_update_rcpi_event_id] = WMI_UPDATE_RCPI_EVENTID; + event_ids[wmi_pdev_check_cal_version_event_id] = WMI_PDEV_CHECK_CAL_VERSION_EVENTID; + /* NDP events */ + event_ids[wmi_ndp_initiator_rsp_event_id] = + WMI_NDP_INITIATOR_RSP_EVENTID; + event_ids[wmi_ndp_indication_event_id] = WMI_NDP_INDICATION_EVENTID; + event_ids[wmi_ndp_confirm_event_id] = WMI_NDP_CONFIRM_EVENTID; + event_ids[wmi_ndp_responder_rsp_event_id] = + WMI_NDP_RESPONDER_RSP_EVENTID; + event_ids[wmi_ndp_end_indication_event_id] = + WMI_NDP_END_INDICATION_EVENTID; + event_ids[wmi_ndp_end_rsp_event_id] = WMI_NDP_END_RSP_EVENTID; + event_ids[wmi_ndl_schedule_update_event_id] = + WMI_NDL_SCHEDULE_UPDATE_EVENTID; + + event_ids[wmi_oem_response_event_id] = WMI_OEM_RESPONSE_EVENTID; + event_ids[wmi_peer_stats_info_event_id] = WMI_PEER_STATS_INFO_EVENTID; + event_ids[wmi_pdev_chip_power_stats_event_id] = + WMI_PDEV_CHIP_POWER_STATS_EVENTID; + event_ids[wmi_ap_ps_egap_info_event_id] = WMI_AP_PS_EGAP_INFO_EVENTID; + event_ids[wmi_peer_assoc_conf_event_id] = WMI_PEER_ASSOC_CONF_EVENTID; + event_ids[wmi_vdev_delete_resp_event_id] = WMI_VDEV_DELETE_RESP_EVENTID; + event_ids[wmi_apf_capability_info_event_id] = + WMI_BPF_CAPABILIY_INFO_EVENTID; + event_ids[wmi_vdev_encrypt_decrypt_data_rsp_event_id] = + WMI_VDEV_ENCRYPT_DECRYPT_DATA_RESP_EVENTID; + event_ids[wmi_report_rx_aggr_failure_event_id] = + WMI_REPORT_RX_AGGR_FAILURE_EVENTID; + event_ids[wmi_pdev_chip_pwr_save_failure_detect_event_id] = + WMI_PDEV_CHIP_POWER_SAVE_FAILURE_DETECTED_EVENTID; + event_ids[wmi_peer_antdiv_info_event_id] = WMI_PEER_ANTDIV_INFO_EVENTID; + event_ids[wmi_pdev_set_hw_mode_rsp_event_id] = + WMI_PDEV_SET_HW_MODE_RESP_EVENTID; + event_ids[wmi_pdev_hw_mode_transition_event_id] = + WMI_PDEV_HW_MODE_TRANSITION_EVENTID; + event_ids[wmi_pdev_set_mac_config_resp_event_id] = + WMI_PDEV_SET_MAC_CONFIG_RESP_EVENTID; + event_ids[wmi_coex_bt_activity_event_id] = + WMI_WLAN_COEX_BT_ACTIVITY_EVENTID; + event_ids[wmi_mgmt_tx_bundle_completion_event_id] = + WMI_MGMT_TX_BUNDLE_COMPLETION_EVENTID; + event_ids[wmi_radio_tx_power_level_stats_event_id] = + WMI_RADIO_TX_POWER_LEVEL_STATS_EVENTID; + event_ids[wmi_report_stats_event_id] = WMI_REPORT_STATS_EVENTID; + event_ids[wmi_dma_buf_release_event_id] = + WMI_PDEV_DMA_RING_BUF_RELEASE_EVENTID; + event_ids[wmi_sap_obss_detection_report_event_id] = + WMI_SAP_OBSS_DETECTION_REPORT_EVENTID; + event_ids[wmi_host_swfda_event_id] = WMI_HOST_SWFDA_EVENTID; + event_ids[wmi_sar_get_limits_event_id] = WMI_SAR_GET_LIMITS_EVENTID; + event_ids[wmi_obss_color_collision_report_event_id] = + WMI_OBSS_COLOR_COLLISION_DETECTION_EVENTID; + event_ids[wmi_pdev_div_rssi_antid_event_id] = + WMI_PDEV_DIV_RSSI_ANTID_EVENTID; + event_ids[wmi_twt_enable_complete_event_id] = + WMI_TWT_ENABLE_COMPLETE_EVENTID; + event_ids[wmi_apf_get_vdev_work_memory_resp_event_id] = + WMI_BPF_GET_VDEV_WORK_MEMORY_RESP_EVENTID; + event_ids[wmi_wlan_sar2_result_event_id] = WMI_SAR2_RESULT_EVENTID; + event_ids[wmi_roam_scan_stats_event_id] = WMI_ROAM_SCAN_STATS_EVENTID; + event_ids[wmi_vdev_bcn_reception_stats_event_id] = + WMI_VDEV_BCN_RECEPTION_STATS_EVENTID; + event_ids[wmi_roam_blacklist_event_id] = WMI_ROAM_BLACKLIST_EVENTID; + event_ids[wmi_pdev_cold_boot_cal_event_id] = + WMI_PDEV_COLD_BOOT_CAL_DATA_EVENTID; +#ifdef WLAN_MWS_INFO_DEBUGFS + event_ids[wmi_vdev_get_mws_coex_state_eventid] = + WMI_VDEV_GET_MWS_COEX_STATE_EVENTID; + event_ids[wmi_vdev_get_mws_coex_dpwb_state_eventid] = + WMI_VDEV_GET_MWS_COEX_DPWB_STATE_EVENTID; + event_ids[wmi_vdev_get_mws_coex_tdm_state_eventid] = + WMI_VDEV_GET_MWS_COEX_TDM_STATE_EVENTID; + event_ids[wmi_vdev_get_mws_coex_idrx_state_eventid] = + WMI_VDEV_GET_MWS_COEX_IDRX_STATE_EVENTID; + event_ids[wmi_vdev_get_mws_coex_antenna_sharing_state_eventid] = + WMI_VDEV_GET_MWS_COEX_ANTENNA_SHARING_STATE_EVENTID; +#endif + event_ids[wmi_coex_report_antenna_isolation_event_id] = + WMI_COEX_REPORT_ANTENNA_ISOLATION_EVENTID; +} + +/** + * populate_tlv_service() - populates wmi services + * + * @param wmi_service: Pointer to hold wmi_service + * Return: None + */ +static void populate_tlv_service(uint32_t *wmi_service) +{ + wmi_service[wmi_service_beacon_offload] = WMI_SERVICE_BEACON_OFFLOAD; + wmi_service[wmi_service_ack_timeout] = WMI_SERVICE_ACK_TIMEOUT; + wmi_service[wmi_service_scan_offload] = WMI_SERVICE_SCAN_OFFLOAD; + wmi_service[wmi_service_roam_scan_offload] = + WMI_SERVICE_ROAM_SCAN_OFFLOAD; + wmi_service[wmi_service_bcn_miss_offload] = + WMI_SERVICE_BCN_MISS_OFFLOAD; + wmi_service[wmi_service_sta_pwrsave] = WMI_SERVICE_STA_PWRSAVE; + wmi_service[wmi_service_sta_advanced_pwrsave] = + WMI_SERVICE_STA_ADVANCED_PWRSAVE; + wmi_service[wmi_service_ap_uapsd] = WMI_SERVICE_AP_UAPSD; + wmi_service[wmi_service_ap_dfs] = WMI_SERVICE_AP_DFS; + wmi_service[wmi_service_11ac] = WMI_SERVICE_11AC; + wmi_service[wmi_service_blockack] = WMI_SERVICE_BLOCKACK; + wmi_service[wmi_service_phyerr] = WMI_SERVICE_PHYERR; + wmi_service[wmi_service_bcn_filter] = WMI_SERVICE_BCN_FILTER; + wmi_service[wmi_service_rtt] = WMI_SERVICE_RTT; + wmi_service[wmi_service_wow] = WMI_SERVICE_WOW; + wmi_service[wmi_service_ratectrl_cache] = WMI_SERVICE_RATECTRL_CACHE; + wmi_service[wmi_service_iram_tids] = WMI_SERVICE_IRAM_TIDS; + wmi_service[wmi_service_arpns_offload] = WMI_SERVICE_ARPNS_OFFLOAD; + wmi_service[wmi_service_nlo] = WMI_SERVICE_NLO; + wmi_service[wmi_service_gtk_offload] = WMI_SERVICE_GTK_OFFLOAD; + wmi_service[wmi_service_scan_sch] = WMI_SERVICE_SCAN_SCH; + wmi_service[wmi_service_csa_offload] = WMI_SERVICE_CSA_OFFLOAD; + wmi_service[wmi_service_chatter] = WMI_SERVICE_CHATTER; + wmi_service[wmi_service_coex_freqavoid] = WMI_SERVICE_COEX_FREQAVOID; + wmi_service[wmi_service_packet_power_save] = + WMI_SERVICE_PACKET_POWER_SAVE; + wmi_service[wmi_service_force_fw_hang] = WMI_SERVICE_FORCE_FW_HANG; + wmi_service[wmi_service_gpio] = WMI_SERVICE_GPIO; + wmi_service[wmi_service_sta_dtim_ps_modulated_dtim] = + WMI_SERVICE_STA_DTIM_PS_MODULATED_DTIM; + wmi_service[wmi_sta_uapsd_basic_auto_trig] = + WMI_STA_UAPSD_BASIC_AUTO_TRIG; + wmi_service[wmi_sta_uapsd_var_auto_trig] = WMI_STA_UAPSD_VAR_AUTO_TRIG; + wmi_service[wmi_service_sta_keep_alive] = WMI_SERVICE_STA_KEEP_ALIVE; + wmi_service[wmi_service_tx_encap] = WMI_SERVICE_TX_ENCAP; + wmi_service[wmi_service_ap_ps_detect_out_of_sync] = + WMI_SERVICE_AP_PS_DETECT_OUT_OF_SYNC; + wmi_service[wmi_service_early_rx] = WMI_SERVICE_EARLY_RX; + wmi_service[wmi_service_sta_smps] = WMI_SERVICE_STA_SMPS; + wmi_service[wmi_service_fwtest] = WMI_SERVICE_FWTEST; + wmi_service[wmi_service_sta_wmmac] = WMI_SERVICE_STA_WMMAC; + wmi_service[wmi_service_tdls] = WMI_SERVICE_TDLS; + wmi_service[wmi_service_burst] = WMI_SERVICE_BURST; + wmi_service[wmi_service_mcc_bcn_interval_change] = + WMI_SERVICE_MCC_BCN_INTERVAL_CHANGE; + wmi_service[wmi_service_adaptive_ocs] = WMI_SERVICE_ADAPTIVE_OCS; + wmi_service[wmi_service_ba_ssn_support] = WMI_SERVICE_BA_SSN_SUPPORT; + wmi_service[wmi_service_filter_ipsec_natkeepalive] = + WMI_SERVICE_FILTER_IPSEC_NATKEEPALIVE; + wmi_service[wmi_service_wlan_hb] = WMI_SERVICE_WLAN_HB; + wmi_service[wmi_service_lte_ant_share_support] = + WMI_SERVICE_LTE_ANT_SHARE_SUPPORT; + wmi_service[wmi_service_batch_scan] = WMI_SERVICE_BATCH_SCAN; + wmi_service[wmi_service_qpower] = WMI_SERVICE_QPOWER; + wmi_service[wmi_service_plmreq] = WMI_SERVICE_PLMREQ; + wmi_service[wmi_service_thermal_mgmt] = WMI_SERVICE_THERMAL_MGMT; + wmi_service[wmi_service_rmc] = WMI_SERVICE_RMC; + wmi_service[wmi_service_mhf_offload] = WMI_SERVICE_MHF_OFFLOAD; + wmi_service[wmi_service_coex_sar] = WMI_SERVICE_COEX_SAR; + wmi_service[wmi_service_bcn_txrate_override] = + WMI_SERVICE_BCN_TXRATE_OVERRIDE; + wmi_service[wmi_service_nan] = WMI_SERVICE_NAN; + wmi_service[wmi_service_l1ss_stat] = WMI_SERVICE_L1SS_STAT; + wmi_service[wmi_service_estimate_linkspeed] = + WMI_SERVICE_ESTIMATE_LINKSPEED; + wmi_service[wmi_service_obss_scan] = WMI_SERVICE_OBSS_SCAN; + wmi_service[wmi_service_tdls_offchan] = WMI_SERVICE_TDLS_OFFCHAN; + wmi_service[wmi_service_tdls_uapsd_buffer_sta] = + WMI_SERVICE_TDLS_UAPSD_BUFFER_STA; + wmi_service[wmi_service_tdls_uapsd_sleep_sta] = + WMI_SERVICE_TDLS_UAPSD_SLEEP_STA; + wmi_service[wmi_service_ibss_pwrsave] = WMI_SERVICE_IBSS_PWRSAVE; + wmi_service[wmi_service_lpass] = WMI_SERVICE_LPASS; + wmi_service[wmi_service_extscan] = WMI_SERVICE_EXTSCAN; + wmi_service[wmi_service_d0wow] = WMI_SERVICE_D0WOW; + wmi_service[wmi_service_hsoffload] = WMI_SERVICE_HSOFFLOAD; + wmi_service[wmi_service_roam_ho_offload] = WMI_SERVICE_ROAM_HO_OFFLOAD; + wmi_service[wmi_service_rx_full_reorder] = WMI_SERVICE_RX_FULL_REORDER; + wmi_service[wmi_service_dhcp_offload] = WMI_SERVICE_DHCP_OFFLOAD; + wmi_service[wmi_service_sta_rx_ipa_offload_support] = + WMI_SERVICE_STA_RX_IPA_OFFLOAD_SUPPORT; + wmi_service[wmi_service_mdns_offload] = WMI_SERVICE_MDNS_OFFLOAD; + wmi_service[wmi_service_sap_auth_offload] = + WMI_SERVICE_SAP_AUTH_OFFLOAD; + wmi_service[wmi_service_dual_band_simultaneous_support] = + WMI_SERVICE_DUAL_BAND_SIMULTANEOUS_SUPPORT; + wmi_service[wmi_service_ocb] = WMI_SERVICE_OCB; + wmi_service[wmi_service_ap_arpns_offload] = + WMI_SERVICE_AP_ARPNS_OFFLOAD; + wmi_service[wmi_service_per_band_chainmask_support] = + WMI_SERVICE_PER_BAND_CHAINMASK_SUPPORT; + wmi_service[wmi_service_packet_filter_offload] = + WMI_SERVICE_PACKET_FILTER_OFFLOAD; + wmi_service[wmi_service_mgmt_tx_htt] = WMI_SERVICE_MGMT_TX_HTT; + wmi_service[wmi_service_mgmt_tx_wmi] = WMI_SERVICE_MGMT_TX_WMI; + wmi_service[wmi_service_ext_msg] = WMI_SERVICE_EXT_MSG; + wmi_service[wmi_service_mawc] = WMI_SERVICE_MAWC; + wmi_service[wmi_service_multiple_vdev_restart] = + WMI_SERVICE_MULTIPLE_VDEV_RESTART; + + wmi_service[wmi_service_roam_offload] = WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_ratectrl] = WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_smart_antenna_sw_support] = + WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_smart_antenna_hw_support] = + WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_enhanced_proxy_sta] = WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_tt] = WMI_SERVICE_THERM_THROT; + wmi_service[wmi_service_atf] = WMI_SERVICE_ATF; + wmi_service[wmi_service_peer_caching] = WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_coex_gpio] = WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_aux_spectral_intf] = WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_aux_chan_load_intf] = WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_bss_channel_info_64] = WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_ext_res_cfg_support] = WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_mesh] = WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_restrt_chnl_support] = WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_peer_stats] = WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_mesh_11s] = WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_periodic_chan_stat_support] = + WMI_SERVICE_PERIODIC_CHAN_STAT_SUPPORT; + wmi_service[wmi_service_tx_mode_push_only] = WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_tx_mode_push_pull] = WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_tx_mode_dynamic] = WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_btcoex_duty_cycle] = WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_4_wire_coex_support] = WMI_SERVICE_UNAVAILABLE; + wmi_service[wmi_service_mesh] = WMI_SERVICE_ENTERPRISE_MESH; + wmi_service[wmi_service_peer_assoc_conf] = WMI_SERVICE_PEER_ASSOC_CONF; + wmi_service[wmi_service_egap] = WMI_SERVICE_EGAP; + wmi_service[wmi_service_sta_pmf_offload] = WMI_SERVICE_STA_PMF_OFFLOAD; + wmi_service[wmi_service_unified_wow_capability] = + WMI_SERVICE_UNIFIED_WOW_CAPABILITY; + wmi_service[wmi_service_enterprise_mesh] = WMI_SERVICE_ENTERPRISE_MESH; + wmi_service[wmi_service_apf_offload] = WMI_SERVICE_BPF_OFFLOAD; + wmi_service[wmi_service_sync_delete_cmds] = + WMI_SERVICE_SYNC_DELETE_CMDS; + wmi_service[wmi_service_ratectrl_limit_max_min_rates] = + WMI_SERVICE_RATECTRL_LIMIT_MAX_MIN_RATES; + wmi_service[wmi_service_nan_data] = WMI_SERVICE_NAN_DATA; + wmi_service[wmi_service_nan_rtt] = WMI_SERVICE_NAN_RTT; + wmi_service[wmi_service_11ax] = WMI_SERVICE_11AX; + wmi_service[wmi_service_deprecated_replace] = + WMI_SERVICE_DEPRECATED_REPLACE; + wmi_service[wmi_service_tdls_conn_tracker_in_host_mode] = + WMI_SERVICE_TDLS_CONN_TRACKER_IN_HOST_MODE; + wmi_service[wmi_service_enhanced_mcast_filter] = + WMI_SERVICE_ENHANCED_MCAST_FILTER; + wmi_service[wmi_service_half_rate_quarter_rate_support] = + WMI_SERVICE_HALF_RATE_QUARTER_RATE_SUPPORT; + wmi_service[wmi_service_vdev_rx_filter] = WMI_SERVICE_VDEV_RX_FILTER; + wmi_service[wmi_service_p2p_listen_offload_support] = + WMI_SERVICE_P2P_LISTEN_OFFLOAD_SUPPORT; + wmi_service[wmi_service_mark_first_wakeup_packet] = + WMI_SERVICE_MARK_FIRST_WAKEUP_PACKET; + wmi_service[wmi_service_multiple_mcast_filter_set] = + WMI_SERVICE_MULTIPLE_MCAST_FILTER_SET; + wmi_service[wmi_service_host_managed_rx_reorder] = + WMI_SERVICE_HOST_MANAGED_RX_REORDER; + wmi_service[wmi_service_flash_rdwr_support] = + WMI_SERVICE_FLASH_RDWR_SUPPORT; + wmi_service[wmi_service_wlan_stats_report] = + WMI_SERVICE_WLAN_STATS_REPORT; + wmi_service[wmi_service_tx_msdu_id_new_partition_support] = + WMI_SERVICE_TX_MSDU_ID_NEW_PARTITION_SUPPORT; + wmi_service[wmi_service_dfs_phyerr_offload] = + WMI_SERVICE_DFS_PHYERR_OFFLOAD; + wmi_service[wmi_service_rcpi_support] = WMI_SERVICE_RCPI_SUPPORT; + wmi_service[wmi_service_fw_mem_dump_support] = + WMI_SERVICE_FW_MEM_DUMP_SUPPORT; + wmi_service[wmi_service_peer_stats_info] = WMI_SERVICE_PEER_STATS_INFO; + wmi_service[wmi_service_regulatory_db] = WMI_SERVICE_REGULATORY_DB; + wmi_service[wmi_service_11d_offload] = WMI_SERVICE_11D_OFFLOAD; + wmi_service[wmi_service_hw_data_filtering] = + WMI_SERVICE_HW_DATA_FILTERING; + wmi_service[wmi_service_pkt_routing] = WMI_SERVICE_PKT_ROUTING; + wmi_service[wmi_service_offchan_tx_wmi] = WMI_SERVICE_OFFCHAN_TX_WMI; + wmi_service[wmi_service_chan_load_info] = WMI_SERVICE_CHAN_LOAD_INFO; + wmi_service[wmi_service_extended_nss_support] = + WMI_SERVICE_EXTENDED_NSS_SUPPORT; + wmi_service[wmi_service_widebw_scan] = WMI_SERVICE_SCAN_PHYMODE_SUPPORT; + wmi_service[wmi_service_bcn_offload_start_stop_support] = + WMI_SERVICE_BCN_OFFLOAD_START_STOP_SUPPORT; + wmi_service[wmi_service_offchan_data_tid_support] = + WMI_SERVICE_OFFCHAN_DATA_TID_SUPPORT; + wmi_service[wmi_service_support_dma] = + WMI_SERVICE_SUPPORT_DIRECT_DMA; + wmi_service[wmi_service_8ss_tx_bfee] = WMI_SERVICE_8SS_TX_BFEE; + wmi_service[wmi_service_fils_support] = WMI_SERVICE_FILS_SUPPORT; + wmi_service[wmi_service_mawc_support] = WMI_SERVICE_MAWC_SUPPORT; + wmi_service[wmi_service_wow_wakeup_by_timer_pattern] = + WMI_SERVICE_WOW_WAKEUP_BY_TIMER_PATTERN; + wmi_service[wmi_service_11k_neighbour_report_support] = + WMI_SERVICE_11K_NEIGHBOUR_REPORT_SUPPORT; + wmi_service[wmi_service_ap_obss_detection_offload] = + WMI_SERVICE_AP_OBSS_DETECTION_OFFLOAD; + wmi_service[wmi_service_bss_color_offload] = + WMI_SERVICE_BSS_COLOR_OFFLOAD; + wmi_service[wmi_service_gmac_offload_support] = + WMI_SERVICE_GMAC_OFFLOAD_SUPPORT; + wmi_service[wmi_service_dual_beacon_on_single_mac_scc_support] = + WMI_SERVICE_DUAL_BEACON_ON_SINGLE_MAC_SCC_SUPPORT; + wmi_service[wmi_service_dual_beacon_on_single_mac_mcc_support] = + WMI_SERVICE_DUAL_BEACON_ON_SINGLE_MAC_MCC_SUPPORT; + wmi_service[wmi_service_twt_requestor] = WMI_SERVICE_STA_TWT; + wmi_service[wmi_service_twt_responder] = WMI_SERVICE_AP_TWT; + wmi_service[wmi_service_listen_interval_offload_support] = + WMI_SERVICE_LISTEN_INTERVAL_OFFLOAD_SUPPORT; + wmi_service[wmi_service_per_vdev_chain_support] = + WMI_SERVICE_PER_VDEV_CHAINMASK_CONFIG_SUPPORT; + wmi_service[wmi_service_new_htt_msg_format] = + WMI_SERVICE_HTT_H2T_NO_HTC_HDR_LEN_IN_MSG_LEN; + wmi_service[wmi_service_peer_unmap_cnf_support] = + WMI_SERVICE_PEER_UNMAP_RESPONSE_SUPPORT; + wmi_service[wmi_service_beacon_reception_stats] = + WMI_SERVICE_BEACON_RECEPTION_STATS; + wmi_service[wmi_service_vdev_latency_config] = + WMI_SERVICE_VDEV_LATENCY_CONFIG; + wmi_service[wmi_service_sta_plus_sta_support] = + WMI_SERVICE_STA_PLUS_STA_SUPPORT; + wmi_service[wmi_service_tx_compl_tsf64] = + WMI_SERVICE_TX_COMPL_TSF64; + wmi_service[wmi_service_three_way_coex_config_legacy] = + WMI_SERVICE_THREE_WAY_COEX_CONFIG_LEGACY; +} + +#ifndef CONFIG_MCL + +/** + * populate_pdev_param_tlv() - populates pdev params + * + * @param pdev_param: Pointer to hold pdev params + * Return: None + */ +static void populate_pdev_param_tlv(uint32_t *pdev_param) +{ + pdev_param[wmi_pdev_param_tx_chain_mask] = WMI_PDEV_PARAM_TX_CHAIN_MASK; + pdev_param[wmi_pdev_param_rx_chain_mask] = WMI_PDEV_PARAM_RX_CHAIN_MASK; + pdev_param[wmi_pdev_param_txpower_limit2g] = + WMI_PDEV_PARAM_TXPOWER_LIMIT2G; + pdev_param[wmi_pdev_param_txpower_limit5g] = + WMI_PDEV_PARAM_TXPOWER_LIMIT5G; + pdev_param[wmi_pdev_param_txpower_scale] = WMI_PDEV_PARAM_TXPOWER_SCALE; + pdev_param[wmi_pdev_param_beacon_gen_mode] = + WMI_PDEV_PARAM_BEACON_GEN_MODE; + pdev_param[wmi_pdev_param_beacon_tx_mode] = + WMI_PDEV_PARAM_BEACON_TX_MODE; + pdev_param[wmi_pdev_param_resmgr_offchan_mode] = + WMI_PDEV_PARAM_RESMGR_OFFCHAN_MODE; + pdev_param[wmi_pdev_param_protection_mode] = + WMI_PDEV_PARAM_PROTECTION_MODE; + pdev_param[wmi_pdev_param_dynamic_bw] = WMI_PDEV_PARAM_DYNAMIC_BW; + pdev_param[wmi_pdev_param_non_agg_sw_retry_th] = + WMI_PDEV_PARAM_NON_AGG_SW_RETRY_TH; + pdev_param[wmi_pdev_param_agg_sw_retry_th] = + WMI_PDEV_PARAM_AGG_SW_RETRY_TH; + pdev_param[wmi_pdev_param_sta_kickout_th] = + WMI_PDEV_PARAM_STA_KICKOUT_TH; + pdev_param[wmi_pdev_param_ac_aggrsize_scaling] = + WMI_PDEV_PARAM_AC_AGGRSIZE_SCALING; + pdev_param[wmi_pdev_param_ltr_enable] = WMI_PDEV_PARAM_LTR_ENABLE; + pdev_param[wmi_pdev_param_ltr_ac_latency_be] = + WMI_PDEV_PARAM_LTR_AC_LATENCY_BE; + pdev_param[wmi_pdev_param_ltr_ac_latency_bk] = + WMI_PDEV_PARAM_LTR_AC_LATENCY_BK; + pdev_param[wmi_pdev_param_ltr_ac_latency_vi] = + WMI_PDEV_PARAM_LTR_AC_LATENCY_VI; + pdev_param[wmi_pdev_param_ltr_ac_latency_vo] = + WMI_PDEV_PARAM_LTR_AC_LATENCY_VO; + pdev_param[wmi_pdev_param_ltr_ac_latency_timeout] = + WMI_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT; + pdev_param[wmi_pdev_param_ltr_sleep_override] = + WMI_PDEV_PARAM_LTR_SLEEP_OVERRIDE; + pdev_param[wmi_pdev_param_ltr_rx_override] = + WMI_PDEV_PARAM_LTR_RX_OVERRIDE; + pdev_param[wmi_pdev_param_ltr_tx_activity_timeout] = + WMI_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT; + pdev_param[wmi_pdev_param_l1ss_enable] = WMI_PDEV_PARAM_L1SS_ENABLE; + pdev_param[wmi_pdev_param_dsleep_enable] = WMI_PDEV_PARAM_DSLEEP_ENABLE; + pdev_param[wmi_pdev_param_pcielp_txbuf_flush] = + WMI_PDEV_PARAM_PCIELP_TXBUF_FLUSH; + pdev_param[wmi_pdev_param_pcielp_txbuf_watermark] = + WMI_PDEV_PARAM_PCIELP_TXBUF_WATERMARK; + pdev_param[wmi_pdev_param_pcielp_txbuf_tmo_en] = + WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_EN; + pdev_param[wmi_pdev_param_pcielp_txbuf_tmo_value] = + WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_VALUE; + pdev_param[wmi_pdev_param_pdev_stats_update_period] = + WMI_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD; + pdev_param[wmi_pdev_param_vdev_stats_update_period] = + WMI_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD; + pdev_param[wmi_pdev_param_peer_stats_update_period] = + WMI_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD; + pdev_param[wmi_pdev_param_bcnflt_stats_update_period] = + WMI_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD; + pdev_param[wmi_pdev_param_pmf_qos] = WMI_PDEV_PARAM_PMF_QOS; + pdev_param[wmi_pdev_param_arp_ac_override] = + WMI_PDEV_PARAM_ARP_AC_OVERRIDE; + pdev_param[wmi_pdev_param_dcs] = WMI_PDEV_PARAM_DCS; + pdev_param[wmi_pdev_param_ani_enable] = WMI_PDEV_PARAM_ANI_ENABLE; + pdev_param[wmi_pdev_param_ani_poll_period] = + WMI_PDEV_PARAM_ANI_POLL_PERIOD; + pdev_param[wmi_pdev_param_ani_listen_period] = + WMI_PDEV_PARAM_ANI_LISTEN_PERIOD; + pdev_param[wmi_pdev_param_ani_ofdm_level] = + WMI_PDEV_PARAM_ANI_OFDM_LEVEL; + pdev_param[wmi_pdev_param_ani_cck_level] = WMI_PDEV_PARAM_ANI_CCK_LEVEL; + pdev_param[wmi_pdev_param_dyntxchain] = WMI_PDEV_PARAM_DYNTXCHAIN; + pdev_param[wmi_pdev_param_proxy_sta] = WMI_PDEV_PARAM_PROXY_STA; + pdev_param[wmi_pdev_param_idle_ps_config] = + WMI_PDEV_PARAM_IDLE_PS_CONFIG; + pdev_param[wmi_pdev_param_power_gating_sleep] = + WMI_PDEV_PARAM_POWER_GATING_SLEEP; + pdev_param[wmi_pdev_param_rfkill_enable] = WMI_PDEV_PARAM_RFKILL_ENABLE; + pdev_param[wmi_pdev_param_burst_dur] = WMI_PDEV_PARAM_BURST_DUR; + pdev_param[wmi_pdev_param_burst_enable] = WMI_PDEV_PARAM_BURST_ENABLE; + pdev_param[wmi_pdev_param_hw_rfkill_config] = + WMI_PDEV_PARAM_HW_RFKILL_CONFIG; + pdev_param[wmi_pdev_param_low_power_rf_enable] = + WMI_PDEV_PARAM_LOW_POWER_RF_ENABLE; + pdev_param[wmi_pdev_param_l1ss_track] = WMI_PDEV_PARAM_L1SS_TRACK; + pdev_param[wmi_pdev_param_hyst_en] = WMI_PDEV_PARAM_HYST_EN; + pdev_param[wmi_pdev_param_power_collapse_enable] = + WMI_PDEV_PARAM_POWER_COLLAPSE_ENABLE; + pdev_param[wmi_pdev_param_led_sys_state] = WMI_PDEV_PARAM_LED_SYS_STATE; + pdev_param[wmi_pdev_param_led_enable] = WMI_PDEV_PARAM_LED_ENABLE; + pdev_param[wmi_pdev_param_audio_over_wlan_latency] = + WMI_PDEV_PARAM_AUDIO_OVER_WLAN_LATENCY; + pdev_param[wmi_pdev_param_audio_over_wlan_enable] = + WMI_PDEV_PARAM_AUDIO_OVER_WLAN_ENABLE; + pdev_param[wmi_pdev_param_whal_mib_stats_update_enable] = + WMI_PDEV_PARAM_WHAL_MIB_STATS_UPDATE_ENABLE; + pdev_param[wmi_pdev_param_vdev_rate_stats_update_period] = + WMI_PDEV_PARAM_VDEV_RATE_STATS_UPDATE_PERIOD; + pdev_param[wmi_pdev_param_cts_cbw] = WMI_PDEV_PARAM_CTS_CBW; + pdev_param[wmi_pdev_param_wnts_config] = WMI_PDEV_PARAM_WNTS_CONFIG; + pdev_param[wmi_pdev_param_adaptive_early_rx_enable] = + WMI_PDEV_PARAM_ADAPTIVE_EARLY_RX_ENABLE; + pdev_param[wmi_pdev_param_adaptive_early_rx_min_sleep_slop] = + WMI_PDEV_PARAM_ADAPTIVE_EARLY_RX_MIN_SLEEP_SLOP; + pdev_param[wmi_pdev_param_adaptive_early_rx_inc_dec_step] = + WMI_PDEV_PARAM_ADAPTIVE_EARLY_RX_INC_DEC_STEP; + pdev_param[wmi_pdev_param_early_rx_fix_sleep_slop] = + WMI_PDEV_PARAM_EARLY_RX_FIX_SLEEP_SLOP; + pdev_param[wmi_pdev_param_bmiss_based_adaptive_bto_enable] = + WMI_PDEV_PARAM_BMISS_BASED_ADAPTIVE_BTO_ENABLE; + pdev_param[wmi_pdev_param_bmiss_bto_min_bcn_timeout] = + WMI_PDEV_PARAM_BMISS_BTO_MIN_BCN_TIMEOUT; + pdev_param[wmi_pdev_param_bmiss_bto_inc_dec_step] = + WMI_PDEV_PARAM_BMISS_BTO_INC_DEC_STEP; + pdev_param[wmi_pdev_param_bto_fix_bcn_timeout] = + WMI_PDEV_PARAM_BTO_FIX_BCN_TIMEOUT; + pdev_param[wmi_pdev_param_ce_based_adaptive_bto_enable] = + WMI_PDEV_PARAM_CE_BASED_ADAPTIVE_BTO_ENABLE; + pdev_param[wmi_pdev_param_ce_bto_combo_ce_value] = + WMI_PDEV_PARAM_CE_BTO_COMBO_CE_VALUE; + pdev_param[wmi_pdev_param_tx_chain_mask_2g] = + WMI_PDEV_PARAM_TX_CHAIN_MASK_2G; + pdev_param[wmi_pdev_param_rx_chain_mask_2g] = + WMI_PDEV_PARAM_RX_CHAIN_MASK_2G; + pdev_param[wmi_pdev_param_tx_chain_mask_5g] = + WMI_PDEV_PARAM_TX_CHAIN_MASK_5G; + pdev_param[wmi_pdev_param_rx_chain_mask_5g] = + WMI_PDEV_PARAM_RX_CHAIN_MASK_5G; + pdev_param[wmi_pdev_param_tx_chain_mask_cck] = + WMI_PDEV_PARAM_TX_CHAIN_MASK_CCK; + pdev_param[wmi_pdev_param_tx_chain_mask_1ss] = + WMI_PDEV_PARAM_TX_CHAIN_MASK_1SS; + pdev_param[wmi_pdev_param_rx_filter] = WMI_PDEV_PARAM_RX_FILTER; + pdev_param[wmi_pdev_set_mcast_to_ucast_tid] = + WMI_PDEV_SET_MCAST_TO_UCAST_TID; + pdev_param[wmi_pdev_param_mgmt_retry_limit] = + WMI_PDEV_PARAM_MGMT_RETRY_LIMIT; + pdev_param[wmi_pdev_param_aggr_burst] = WMI_PDEV_PARAM_AGGR_BURST; + pdev_param[wmi_pdev_peer_sta_ps_statechg_enable] = + WMI_PDEV_PEER_STA_PS_STATECHG_ENABLE; + pdev_param[wmi_pdev_param_proxy_sta_mode] = + WMI_PDEV_PARAM_PROXY_STA_MODE; + pdev_param[wmi_pdev_param_mu_group_policy] = + WMI_PDEV_PARAM_MU_GROUP_POLICY; + pdev_param[wmi_pdev_param_noise_detection] = + WMI_PDEV_PARAM_NOISE_DETECTION; + pdev_param[wmi_pdev_param_noise_threshold] = + WMI_PDEV_PARAM_NOISE_THRESHOLD; + pdev_param[wmi_pdev_param_dpd_enable] = WMI_PDEV_PARAM_DPD_ENABLE; + pdev_param[wmi_pdev_param_set_mcast_bcast_echo] = + WMI_PDEV_PARAM_SET_MCAST_BCAST_ECHO; + pdev_param[wmi_pdev_param_atf_strict_sch] = + WMI_PDEV_PARAM_ATF_STRICT_SCH; + pdev_param[wmi_pdev_param_atf_sched_duration] = + WMI_PDEV_PARAM_ATF_SCHED_DURATION; + pdev_param[wmi_pdev_param_ant_plzn] = WMI_PDEV_PARAM_ANT_PLZN; + pdev_param[wmi_pdev_param_sensitivity_level] = + WMI_PDEV_PARAM_SENSITIVITY_LEVEL; + pdev_param[wmi_pdev_param_signed_txpower_2g] = + WMI_PDEV_PARAM_SIGNED_TXPOWER_2G; + pdev_param[wmi_pdev_param_signed_txpower_5g] = + WMI_PDEV_PARAM_SIGNED_TXPOWER_5G; + pdev_param[wmi_pdev_param_enable_per_tid_amsdu] = + WMI_PDEV_PARAM_ENABLE_PER_TID_AMSDU; + pdev_param[wmi_pdev_param_enable_per_tid_ampdu] = + WMI_PDEV_PARAM_ENABLE_PER_TID_AMPDU; + pdev_param[wmi_pdev_param_cca_threshold] = + WMI_PDEV_PARAM_CCA_THRESHOLD; + pdev_param[wmi_pdev_param_rts_fixed_rate] = + WMI_PDEV_PARAM_RTS_FIXED_RATE; + pdev_param[wmi_pdev_param_cal_period] = WMI_UNAVAILABLE_PARAM; + pdev_param[wmi_pdev_param_pdev_reset] = WMI_PDEV_PARAM_PDEV_RESET; + pdev_param[wmi_pdev_param_wapi_mbssid_offset] = + WMI_PDEV_PARAM_WAPI_MBSSID_OFFSET; + pdev_param[wmi_pdev_param_arp_srcaddr] = + WMI_PDEV_PARAM_ARP_DBG_SRCADDR; + pdev_param[wmi_pdev_param_arp_dstaddr] = + WMI_PDEV_PARAM_ARP_DBG_DSTADDR; + pdev_param[wmi_pdev_param_txpower_decr_db] = + WMI_PDEV_PARAM_TXPOWER_DECR_DB; + pdev_param[wmi_pdev_param_rx_batchmode] = WMI_UNAVAILABLE_PARAM; + pdev_param[wmi_pdev_param_packet_aggr_delay] = WMI_UNAVAILABLE_PARAM; + pdev_param[wmi_pdev_param_atf_obss_noise_sch] = + WMI_PDEV_PARAM_ATF_OBSS_NOISE_SCH; + pdev_param[wmi_pdev_param_atf_obss_noise_scaling_factor] = + WMI_PDEV_PARAM_ATF_OBSS_NOISE_SCALING_FACTOR; + pdev_param[wmi_pdev_param_cust_txpower_scale] = + WMI_PDEV_PARAM_CUST_TXPOWER_SCALE; + pdev_param[wmi_pdev_param_atf_dynamic_enable] = + WMI_PDEV_PARAM_ATF_DYNAMIC_ENABLE; + pdev_param[wmi_pdev_param_atf_ssid_group_policy] = + WMI_UNAVAILABLE_PARAM; + pdev_param[wmi_pdev_param_igmpmld_override] = WMI_UNAVAILABLE_PARAM; + pdev_param[wmi_pdev_param_igmpmld_tid] = WMI_UNAVAILABLE_PARAM; + pdev_param[wmi_pdev_param_antenna_gain] = WMI_PDEV_PARAM_ANTENNA_GAIN; + pdev_param[wmi_pdev_param_block_interbss] = + WMI_PDEV_PARAM_BLOCK_INTERBSS; + pdev_param[wmi_pdev_param_set_disable_reset_cmdid] = + WMI_PDEV_PARAM_SET_DISABLE_RESET_CMDID; + pdev_param[wmi_pdev_param_set_msdu_ttl_cmdid] = + WMI_PDEV_PARAM_SET_MSDU_TTL_CMDID; + pdev_param[wmi_pdev_param_txbf_sound_period_cmdid] = + WMI_PDEV_PARAM_TXBF_SOUND_PERIOD_CMDID; + pdev_param[wmi_pdev_param_set_burst_mode_cmdid] = + WMI_PDEV_PARAM_SET_BURST_MODE_CMDID; + pdev_param[wmi_pdev_param_en_stats] = WMI_PDEV_PARAM_EN_STATS; + pdev_param[wmi_pdev_param_mesh_mcast_enable] = + WMI_PDEV_PARAM_MESH_MCAST_ENABLE; + pdev_param[wmi_pdev_param_set_promisc_mode_cmdid] = + WMI_PDEV_PARAM_SET_PROMISC_MODE_CMDID; + pdev_param[wmi_pdev_param_set_ppdu_duration_cmdid] = + WMI_PDEV_PARAM_SET_PPDU_DURATION_CMDID; + pdev_param[wmi_pdev_param_igmpmld_ac_override] = + WMI_PDEV_PARAM_IGMPMLD_AC_OVERRIDE; + pdev_param[wmi_pdev_param_remove_mcast2ucast_buffer] = + WMI_PDEV_PARAM_REMOVE_MCAST2UCAST_BUFFER; + pdev_param[wmi_pdev_param_set_mcast2ucast_buffer] = + WMI_PDEV_PARAM_SET_MCAST2UCAST_BUFFER; + pdev_param[wmi_pdev_param_set_mcast2ucast_mode] = + WMI_PDEV_PARAM_SET_MCAST2UCAST_MODE; + pdev_param[wmi_pdev_param_smart_antenna_default_antenna] = + WMI_PDEV_PARAM_SMART_ANTENNA_DEFAULT_ANTENNA; + pdev_param[wmi_pdev_param_fast_channel_reset] = + WMI_PDEV_PARAM_FAST_CHANNEL_RESET; + pdev_param[wmi_pdev_param_rx_decap_mode] = WMI_PDEV_PARAM_RX_DECAP_MODE; + pdev_param[wmi_pdev_param_tx_ack_timeout] = WMI_PDEV_PARAM_ACK_TIMEOUT; + pdev_param[wmi_pdev_param_cck_tx_enable] = WMI_PDEV_PARAM_CCK_TX_ENABLE; +} + +/** + * populate_vdev_param_tlv() - populates vdev params + * + * @param vdev_param: Pointer to hold vdev params + * Return: None + */ +static void populate_vdev_param_tlv(uint32_t *vdev_param) +{ + vdev_param[wmi_vdev_param_rts_threshold] = WMI_VDEV_PARAM_RTS_THRESHOLD; + vdev_param[wmi_vdev_param_fragmentation_threshold] = + WMI_VDEV_PARAM_FRAGMENTATION_THRESHOLD; + vdev_param[wmi_vdev_param_beacon_interval] = + WMI_VDEV_PARAM_BEACON_INTERVAL; + vdev_param[wmi_vdev_param_listen_interval] = + WMI_VDEV_PARAM_LISTEN_INTERVAL; + vdev_param[wmi_vdev_param_multicast_rate] = + WMI_VDEV_PARAM_MULTICAST_RATE; + vdev_param[wmi_vdev_param_mgmt_tx_rate] = WMI_VDEV_PARAM_MGMT_TX_RATE; + vdev_param[wmi_vdev_param_slot_time] = WMI_VDEV_PARAM_SLOT_TIME; + vdev_param[wmi_vdev_param_preamble] = WMI_VDEV_PARAM_PREAMBLE; + vdev_param[wmi_vdev_param_swba_time] = WMI_VDEV_PARAM_SWBA_TIME; + vdev_param[wmi_vdev_stats_update_period] = WMI_VDEV_STATS_UPDATE_PERIOD; + vdev_param[wmi_vdev_pwrsave_ageout_time] = WMI_VDEV_PWRSAVE_AGEOUT_TIME; + vdev_param[wmi_vdev_host_swba_interval] = WMI_VDEV_HOST_SWBA_INTERVAL; + vdev_param[wmi_vdev_param_dtim_period] = WMI_VDEV_PARAM_DTIM_PERIOD; + vdev_param[wmi_vdev_oc_scheduler_air_time_limit] = + WMI_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT; + vdev_param[wmi_vdev_param_wds] = WMI_VDEV_PARAM_WDS; + vdev_param[wmi_vdev_param_atim_window] = WMI_VDEV_PARAM_ATIM_WINDOW; + vdev_param[wmi_vdev_param_bmiss_count_max] = + WMI_VDEV_PARAM_BMISS_COUNT_MAX; + vdev_param[wmi_vdev_param_bmiss_first_bcnt] = + WMI_VDEV_PARAM_BMISS_FIRST_BCNT; + vdev_param[wmi_vdev_param_bmiss_final_bcnt] = + WMI_VDEV_PARAM_BMISS_FINAL_BCNT; + vdev_param[wmi_vdev_param_feature_wmm] = WMI_VDEV_PARAM_FEATURE_WMM; + vdev_param[wmi_vdev_param_chwidth] = WMI_VDEV_PARAM_CHWIDTH; + vdev_param[wmi_vdev_param_chextoffset] = WMI_VDEV_PARAM_CHEXTOFFSET; + vdev_param[wmi_vdev_param_disable_htprotection] = + WMI_VDEV_PARAM_DISABLE_HTPROTECTION; + vdev_param[wmi_vdev_param_sta_quickkickout] = + WMI_VDEV_PARAM_STA_QUICKKICKOUT; + vdev_param[wmi_vdev_param_mgmt_rate] = WMI_VDEV_PARAM_MGMT_RATE; + vdev_param[wmi_vdev_param_protection_mode] = + WMI_VDEV_PARAM_PROTECTION_MODE; + vdev_param[wmi_vdev_param_fixed_rate] = WMI_VDEV_PARAM_FIXED_RATE; + vdev_param[wmi_vdev_param_sgi] = WMI_VDEV_PARAM_SGI; + vdev_param[wmi_vdev_param_ldpc] = WMI_VDEV_PARAM_LDPC; + vdev_param[wmi_vdev_param_tx_stbc] = WMI_VDEV_PARAM_TX_STBC; + vdev_param[wmi_vdev_param_rx_stbc] = WMI_VDEV_PARAM_RX_STBC; + vdev_param[wmi_vdev_param_intra_bss_fwd] = WMI_VDEV_PARAM_INTRA_BSS_FWD; + vdev_param[wmi_vdev_param_def_keyid] = WMI_VDEV_PARAM_DEF_KEYID; + vdev_param[wmi_vdev_param_nss] = WMI_VDEV_PARAM_NSS; + vdev_param[wmi_vdev_param_bcast_data_rate] = + WMI_VDEV_PARAM_BCAST_DATA_RATE; + vdev_param[wmi_vdev_param_mcast_data_rate] = + WMI_VDEV_PARAM_MCAST_DATA_RATE; + vdev_param[wmi_vdev_param_mcast_indicate] = + WMI_VDEV_PARAM_MCAST_INDICATE; + vdev_param[wmi_vdev_param_dhcp_indicate] = + WMI_VDEV_PARAM_DHCP_INDICATE; + vdev_param[wmi_vdev_param_unknown_dest_indicate] = + WMI_VDEV_PARAM_UNKNOWN_DEST_INDICATE; + vdev_param[wmi_vdev_param_ap_keepalive_min_idle_inactive_time_secs] = + WMI_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS; + vdev_param[wmi_vdev_param_ap_keepalive_max_idle_inactive_time_secs] = + WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS; + vdev_param[wmi_vdev_param_ap_keepalive_max_unresponsive_time_secs] = + WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS; + vdev_param[wmi_vdev_param_ap_enable_nawds] = + WMI_VDEV_PARAM_AP_ENABLE_NAWDS; + vdev_param[wmi_vdev_param_enable_rtscts] = WMI_VDEV_PARAM_ENABLE_RTSCTS; + vdev_param[wmi_vdev_param_txbf] = WMI_VDEV_PARAM_TXBF; + vdev_param[wmi_vdev_param_packet_powersave] = + WMI_VDEV_PARAM_PACKET_POWERSAVE; + vdev_param[wmi_vdev_param_drop_unencry] = WMI_VDEV_PARAM_DROP_UNENCRY; + vdev_param[wmi_vdev_param_tx_encap_type] = WMI_VDEV_PARAM_TX_ENCAP_TYPE; + vdev_param[wmi_vdev_param_ap_detect_out_of_sync_sleeping_sta_time_secs] = + WMI_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS; + vdev_param[wmi_vdev_param_early_rx_adjust_enable] = + WMI_VDEV_PARAM_EARLY_RX_ADJUST_ENABLE; + vdev_param[wmi_vdev_param_early_rx_tgt_bmiss_num] = + WMI_VDEV_PARAM_EARLY_RX_TGT_BMISS_NUM; + vdev_param[wmi_vdev_param_early_rx_bmiss_sample_cycle] = + WMI_VDEV_PARAM_EARLY_RX_BMISS_SAMPLE_CYCLE; + vdev_param[wmi_vdev_param_early_rx_slop_step] = + WMI_VDEV_PARAM_EARLY_RX_SLOP_STEP; + vdev_param[wmi_vdev_param_early_rx_init_slop] = + WMI_VDEV_PARAM_EARLY_RX_INIT_SLOP; + vdev_param[wmi_vdev_param_early_rx_adjust_pause] = + WMI_VDEV_PARAM_EARLY_RX_ADJUST_PAUSE; + vdev_param[wmi_vdev_param_tx_pwrlimit] = WMI_VDEV_PARAM_TX_PWRLIMIT; + vdev_param[wmi_vdev_param_snr_num_for_cal] = + WMI_VDEV_PARAM_SNR_NUM_FOR_CAL; + vdev_param[wmi_vdev_param_roam_fw_offload] = + WMI_VDEV_PARAM_ROAM_FW_OFFLOAD; + vdev_param[wmi_vdev_param_enable_rmc] = WMI_VDEV_PARAM_ENABLE_RMC; + vdev_param[wmi_vdev_param_ibss_max_bcn_lost_ms] = + WMI_VDEV_PARAM_IBSS_MAX_BCN_LOST_MS; + vdev_param[wmi_vdev_param_max_rate] = WMI_VDEV_PARAM_MAX_RATE; + vdev_param[wmi_vdev_param_early_rx_drift_sample] = + WMI_VDEV_PARAM_EARLY_RX_DRIFT_SAMPLE; + vdev_param[wmi_vdev_param_set_ibss_tx_fail_cnt_thr] = + WMI_VDEV_PARAM_SET_IBSS_TX_FAIL_CNT_THR; + vdev_param[wmi_vdev_param_ebt_resync_timeout] = + WMI_VDEV_PARAM_EBT_RESYNC_TIMEOUT; + vdev_param[wmi_vdev_param_aggr_trig_event_enable] = + WMI_VDEV_PARAM_AGGR_TRIG_EVENT_ENABLE; + vdev_param[wmi_vdev_param_is_ibss_power_save_allowed] = + WMI_VDEV_PARAM_IS_IBSS_POWER_SAVE_ALLOWED; + vdev_param[wmi_vdev_param_is_power_collapse_allowed] = + WMI_VDEV_PARAM_IS_POWER_COLLAPSE_ALLOWED; + vdev_param[wmi_vdev_param_is_awake_on_txrx_enabled] = + WMI_VDEV_PARAM_IS_AWAKE_ON_TXRX_ENABLED; + vdev_param[wmi_vdev_param_inactivity_cnt] = + WMI_VDEV_PARAM_INACTIVITY_CNT; + vdev_param[wmi_vdev_param_txsp_end_inactivity_time_ms] = + WMI_VDEV_PARAM_TXSP_END_INACTIVITY_TIME_MS; + vdev_param[wmi_vdev_param_dtim_policy] = WMI_VDEV_PARAM_DTIM_POLICY; + vdev_param[wmi_vdev_param_ibss_ps_warmup_time_secs] = + WMI_VDEV_PARAM_IBSS_PS_WARMUP_TIME_SECS; + vdev_param[wmi_vdev_param_ibss_ps_1rx_chain_in_atim_window_enable] = + WMI_VDEV_PARAM_IBSS_PS_1RX_CHAIN_IN_ATIM_WINDOW_ENABLE; + vdev_param[wmi_vdev_param_rx_leak_window] = + WMI_VDEV_PARAM_RX_LEAK_WINDOW; + vdev_param[wmi_vdev_param_stats_avg_factor] = + WMI_VDEV_PARAM_STATS_AVG_FACTOR; + vdev_param[wmi_vdev_param_disconnect_th] = WMI_VDEV_PARAM_DISCONNECT_TH; + vdev_param[wmi_vdev_param_rtscts_rate] = WMI_VDEV_PARAM_RTSCTS_RATE; + vdev_param[wmi_vdev_param_mcc_rtscts_protection_enable] = + WMI_VDEV_PARAM_MCC_RTSCTS_PROTECTION_ENABLE; + vdev_param[wmi_vdev_param_mcc_broadcast_probe_enable] = + WMI_VDEV_PARAM_MCC_BROADCAST_PROBE_ENABLE; + vdev_param[wmi_vdev_param_mgmt_tx_power] = WMI_VDEV_PARAM_MGMT_TX_POWER; + vdev_param[wmi_vdev_param_beacon_rate] = WMI_VDEV_PARAM_BEACON_RATE; + vdev_param[wmi_vdev_param_rx_decap_type] = WMI_VDEV_PARAM_RX_DECAP_TYPE; + vdev_param[wmi_vdev_param_he_dcm_enable] = WMI_VDEV_PARAM_HE_DCM; + vdev_param[wmi_vdev_param_he_range_ext_enable] = + WMI_VDEV_PARAM_HE_RANGE_EXT; + vdev_param[wmi_vdev_param_he_bss_color] = WMI_VDEV_PARAM_BSS_COLOR; + vdev_param[wmi_vdev_param_set_hemu_mode] = WMI_VDEV_PARAM_SET_HEMU_MODE; + vdev_param[wmi_vdev_param_set_heop] = WMI_VDEV_PARAM_HEOPS_0_31; + vdev_param[wmi_vdev_param_sensor_ap] = WMI_VDEV_PARAM_SENSOR_AP; + vdev_param[wmi_vdev_param_dtim_enable_cts] = + WMI_VDEV_PARAM_DTIM_ENABLE_CTS; + vdev_param[wmi_vdev_param_atf_ssid_sched_policy] = + WMI_VDEV_PARAM_ATF_SSID_SCHED_POLICY; + vdev_param[wmi_vdev_param_disable_dyn_bw_rts] = + WMI_VDEV_PARAM_DISABLE_DYN_BW_RTS; + vdev_param[wmi_vdev_param_mcast2ucast_set] = + WMI_VDEV_PARAM_MCAST2UCAST_SET; + vdev_param[wmi_vdev_param_rc_num_retries] = + WMI_VDEV_PARAM_RC_NUM_RETRIES; + vdev_param[wmi_vdev_param_cabq_maxdur] = WMI_VDEV_PARAM_CABQ_MAXDUR; + vdev_param[wmi_vdev_param_mfptest_set] = WMI_VDEV_PARAM_MFPTEST_SET; + vdev_param[wmi_vdev_param_rts_fixed_rate] = + WMI_VDEV_PARAM_RTS_FIXED_RATE; + vdev_param[wmi_vdev_param_vht_sgimask] = WMI_VDEV_PARAM_VHT_SGIMASK; + vdev_param[wmi_vdev_param_vht80_ratemask] = + WMI_VDEV_PARAM_VHT80_RATEMASK; + vdev_param[wmi_vdev_param_proxy_sta] = WMI_VDEV_PARAM_PROXY_STA; + vdev_param[wmi_vdev_param_bw_nss_ratemask] = + WMI_VDEV_PARAM_BW_NSS_RATEMASK; + vdev_param[wmi_vdev_param_set_he_ltf] = + WMI_VDEV_PARAM_HE_LTF; + vdev_param[wmi_vdev_param_rate_dropdown_bmap] = + WMI_VDEV_PARAM_RATE_DROPDOWN_BMAP; + vdev_param[wmi_vdev_param_set_ba_mode] = + WMI_VDEV_PARAM_BA_MODE; + vdev_param[wmi_vdev_param_capabilities] = + WMI_VDEV_PARAM_CAPABILITIES; + vdev_param[wmi_vdev_param_autorate_misc_cfg] = + WMI_VDEV_PARAM_AUTORATE_MISC_CFG; +} +#endif + +/** + * populate_target_defines_tlv() - Populate target defines and params + * @wmi_handle: pointer to wmi handle + * + * Return: None + */ +#ifndef CONFIG_MCL +static void populate_target_defines_tlv(struct wmi_unified *wmi_handle) +{ + populate_pdev_param_tlv(wmi_handle->pdev_param); + populate_vdev_param_tlv(wmi_handle->vdev_param); +} +#else +static void populate_target_defines_tlv(struct wmi_unified *wmi_handle) +{ } +#endif + +/** + * wmi_ocb_ut_attach() - Attach OCB test framework + * @wmi_handle: wmi handle + * + * Return: None + */ +#ifdef WLAN_OCB_UT +void wmi_ocb_ut_attach(struct wmi_unified *wmi_handle); +#else +static inline void wmi_ocb_ut_attach(struct wmi_unified *wmi_handle) +{ + return; +} +#endif + +/** + * wmi_tlv_attach() - Attach TLV APIs + * + * Return: None + */ +void wmi_tlv_attach(wmi_unified_t wmi_handle) +{ + wmi_handle->ops = &tlv_ops; + wmi_ocb_ut_attach(wmi_handle); + wmi_handle->soc->svc_ids = &multi_svc_ids[0]; +#ifdef WMI_INTERFACE_EVENT_LOGGING + /* Skip saving WMI_CMD_HDR and TLV HDR */ + wmi_handle->log_info.buf_offset_command = 8; + /* WMI_CMD_HDR is already stripped, skip saving TLV HDR */ + wmi_handle->log_info.buf_offset_event = 4; +#endif + populate_tlv_events_id(wmi_handle->wmi_events); + populate_tlv_service(wmi_handle->services); + populate_target_defines_tlv(wmi_handle); + wmi_twt_attach_tlv(wmi_handle); + wmi_extscan_attach_tlv(wmi_handle); +} +qdf_export_symbol(wmi_tlv_attach); + +/** + * wmi_tlv_init() - Initialize WMI TLV module by registering TLV attach routine + * + * Return: None + */ +void wmi_tlv_init(void) +{ + wmi_unified_register_module(WMI_TLV_TARGET, &wmi_tlv_attach); +} diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_twt_api.c b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_twt_api.c new file mode 100644 index 0000000000000000000000000000000000000000..8047c9b304072a079d31d754b0bba48f0d128766 --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_twt_api.c @@ -0,0 +1,174 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ +/** + * DOC: Implement API's specific to TWT component. + */ + +#include "wmi_unified_priv.h" +#include "wmi_unified_twt_api.h" + + +QDF_STATUS wmi_unified_twt_enable_cmd(void *wmi_hdl, + struct wmi_twt_enable_param *params) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_twt_enable_cmd) + return wmi_handle->ops->send_twt_enable_cmd( + wmi_handle, params); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_twt_disable_cmd(void *wmi_hdl, + struct wmi_twt_disable_param *params) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_twt_disable_cmd) + return wmi_handle->ops->send_twt_disable_cmd( + wmi_handle, params); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_twt_add_dialog_cmd(void *wmi_hdl, + struct wmi_twt_add_dialog_param *params) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_twt_add_dialog_cmd) + return wmi_handle->ops->send_twt_add_dialog_cmd( + wmi_handle, params); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_twt_del_dialog_cmd(void *wmi_hdl, + struct wmi_twt_del_dialog_param *params) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_twt_del_dialog_cmd) + return wmi_handle->ops->send_twt_del_dialog_cmd( + wmi_handle, params); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_twt_pause_dialog_cmd(void *wmi_hdl, + struct wmi_twt_pause_dialog_cmd_param *params) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_twt_pause_dialog_cmd) + return wmi_handle->ops->send_twt_pause_dialog_cmd( + wmi_handle, params); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_unified_twt_resume_dialog_cmd(void *wmi_hdl, + struct wmi_twt_resume_dialog_cmd_param *params) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->send_twt_resume_dialog_cmd) + return wmi_handle->ops->send_twt_resume_dialog_cmd( + wmi_handle, params); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_extract_twt_enable_comp_event(void *wmi_hdl, + uint8_t *evt_buf, + struct wmi_twt_enable_complete_event_param *params) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->extract_twt_enable_comp_event) + return wmi_handle->ops->extract_twt_enable_comp_event( + wmi_handle, evt_buf, params); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_extract_twt_disable_comp_event(void *wmi_hdl, + uint8_t *evt_buf, + struct wmi_twt_disable_complete_event *params) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->extract_twt_disable_comp_event) + return wmi_handle->ops->extract_twt_disable_comp_event( + wmi_handle, evt_buf, params); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_extract_twt_add_dialog_comp_event(void *wmi_hdl, + uint8_t *evt_buf, + struct wmi_twt_add_dialog_complete_event_param *params) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->extract_twt_add_dialog_comp_event) + return wmi_handle->ops->extract_twt_add_dialog_comp_event( + wmi_handle, evt_buf, params); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_extract_twt_del_dialog_comp_event(void *wmi_hdl, + uint8_t *evt_buf, + struct wmi_twt_del_dialog_complete_event_param *params) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->extract_twt_del_dialog_comp_event) + return wmi_handle->ops->extract_twt_del_dialog_comp_event( + wmi_handle, evt_buf, params); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_extract_twt_pause_dialog_comp_event(void *wmi_hdl, + uint8_t *evt_buf, + struct wmi_twt_pause_dialog_complete_event_param *params) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->extract_twt_pause_dialog_comp_event) + return wmi_handle->ops->extract_twt_pause_dialog_comp_event( + wmi_handle, evt_buf, params); + + return QDF_STATUS_E_FAILURE; +} + +QDF_STATUS wmi_extract_twt_resume_dialog_comp_event(void *wmi_hdl, + uint8_t *evt_buf, + struct wmi_twt_resume_dialog_complete_event_param *params) +{ + wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl; + + if (wmi_handle->ops->extract_twt_resume_dialog_comp_event) + return wmi_handle->ops->extract_twt_resume_dialog_comp_event( + wmi_handle, evt_buf, params); + + return QDF_STATUS_E_FAILURE; +} diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_twt_tlv.c b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_twt_tlv.c new file mode 100644 index 0000000000000000000000000000000000000000..66ba9e8177d3ed1eb3f89601316cbe6aa4384d5d --- /dev/null +++ b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_twt_tlv.c @@ -0,0 +1,408 @@ + +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include "wmi.h" +#include "wmi_unified_priv.h" +#include "wmi_unified_twt_param.h" +#include "wmi_unified_twt_api.h" + +static QDF_STATUS send_twt_enable_cmd_tlv(wmi_unified_t wmi_handle, + struct wmi_twt_enable_param *params) +{ + wmi_twt_enable_cmd_fixed_param *cmd; + wmi_buf_t buf; + QDF_STATUS status; + + buf = wmi_buf_alloc(wmi_handle, sizeof(*cmd)); + if (!buf) { + WMI_LOGE("Failed to allocate memory"); + return QDF_STATUS_E_FAILURE; + } + + cmd = (wmi_twt_enable_cmd_fixed_param *) wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_twt_enable_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_twt_enable_cmd_fixed_param)); + + cmd->pdev_id = + wmi_handle->ops->convert_pdev_id_host_to_target( + params->pdev_id); + cmd->sta_cong_timer_ms = params->sta_cong_timer_ms; + cmd->mbss_support = params->mbss_support; + cmd->default_slot_size = params->default_slot_size; + cmd->congestion_thresh_setup = params->congestion_thresh_setup; + cmd->congestion_thresh_teardown = params->congestion_thresh_teardown; + cmd->congestion_thresh_critical = params->congestion_thresh_critical; + cmd->interference_thresh_teardown = + params->interference_thresh_teardown; + cmd->interference_thresh_setup = params->interference_thresh_setup; + cmd->min_no_sta_setup = params->min_no_sta_setup; + cmd->min_no_sta_teardown = params->min_no_sta_teardown; + cmd->no_of_bcast_mcast_slots = params->no_of_bcast_mcast_slots; + cmd->min_no_twt_slots = params->min_no_twt_slots; + cmd->max_no_sta_twt = params->max_no_sta_twt; + cmd->mode_check_interval = params->mode_check_interval; + cmd->add_sta_slot_interval = params->add_sta_slot_interval; + cmd->remove_sta_slot_interval = params->remove_sta_slot_interval; + + status = wmi_unified_cmd_send(wmi_handle, buf, sizeof(*cmd), + WMI_TWT_ENABLE_CMDID); + if (QDF_IS_STATUS_ERROR(status)) { + WMI_LOGE("Failed to send WMI_TWT_ENABLE_CMDID"); + wmi_buf_free(buf); + } + + return status; +} + + +static QDF_STATUS send_twt_disable_cmd_tlv(wmi_unified_t wmi_handle, + struct wmi_twt_disable_param *params) +{ + wmi_twt_disable_cmd_fixed_param *cmd; + wmi_buf_t buf; + QDF_STATUS status; + + buf = wmi_buf_alloc(wmi_handle, sizeof(*cmd)); + if (!buf) { + WMI_LOGE("Failed to allocate memory"); + return QDF_STATUS_E_FAILURE; + } + + cmd = (wmi_twt_disable_cmd_fixed_param *) wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_twt_disable_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_twt_disable_cmd_fixed_param)); + + cmd->pdev_id = + wmi_handle->ops->convert_pdev_id_host_to_target( + params->pdev_id); + + status = wmi_unified_cmd_send(wmi_handle, buf, sizeof(*cmd), + WMI_TWT_DISABLE_CMDID); + if (QDF_IS_STATUS_ERROR(status)) { + WMI_LOGE("Failed to send WMI_TWT_DISABLE_CMDID"); + wmi_buf_free(buf); + } + + return status; +} + +static QDF_STATUS send_twt_add_dialog_cmd_tlv(wmi_unified_t wmi_handle, + struct wmi_twt_add_dialog_param *params) +{ + wmi_twt_add_dialog_cmd_fixed_param *cmd; + wmi_buf_t buf; + QDF_STATUS status; + + buf = wmi_buf_alloc(wmi_handle, sizeof(*cmd)); + if (!buf) { + WMI_LOGE("Failed to allocate memory"); + return QDF_STATUS_E_FAILURE; + } + + cmd = (wmi_twt_add_dialog_cmd_fixed_param *) wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_twt_add_dialog_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_twt_add_dialog_cmd_fixed_param)); + + cmd->vdev_id = params->vdev_id; + WMI_CHAR_ARRAY_TO_MAC_ADDR(params->peer_macaddr, &cmd->peer_macaddr); + cmd->dialog_id = params->dialog_id; + cmd->wake_intvl_us = params->wake_intvl_us; + cmd->wake_intvl_mantis = params->wake_intvl_mantis; + cmd->wake_dura_us = params->wake_dura_us; + cmd->sp_offset_us = params->sp_offset_us; + TWT_FLAGS_SET_CMD(cmd->flags, params->twt_cmd); + TWT_FLAGS_SET_BROADCAST(cmd->flags, params->flag_bcast); + TWT_FLAGS_SET_TRIGGER(cmd->flags, params->flag_trigger); + TWT_FLAGS_SET_FLOW_TYPE(cmd->flags, params->flag_flow_type); + TWT_FLAGS_SET_PROTECTION(cmd->flags, params->flag_protection); + + status = wmi_unified_cmd_send(wmi_handle, buf, sizeof(*cmd), + WMI_TWT_ADD_DIALOG_CMDID); + if (QDF_IS_STATUS_ERROR(status)) { + WMI_LOGE("Failed to send WMI_TWT_ADD_DIALOG_CMDID"); + wmi_buf_free(buf); + } + + return status; +} + +static QDF_STATUS send_twt_del_dialog_cmd_tlv(wmi_unified_t wmi_handle, + struct wmi_twt_del_dialog_param *params) +{ + wmi_twt_del_dialog_cmd_fixed_param *cmd; + wmi_buf_t buf; + QDF_STATUS status; + + buf = wmi_buf_alloc(wmi_handle, sizeof(*cmd)); + if (!buf) { + WMI_LOGE("Failed to allocate memory"); + return QDF_STATUS_E_FAILURE; + } + + cmd = (wmi_twt_del_dialog_cmd_fixed_param *) wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_twt_del_dialog_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_twt_del_dialog_cmd_fixed_param)); + + cmd->vdev_id = params->vdev_id; + cmd->dialog_id = params->dialog_id; + + status = wmi_unified_cmd_send(wmi_handle, buf, sizeof(*cmd), + WMI_TWT_DEL_DIALOG_CMDID); + if (QDF_IS_STATUS_ERROR(status)) { + WMI_LOGE("Failed to send WMI_TWT_DEL_DIALOG_CMDID"); + wmi_buf_free(buf); + } + + return status; +} + +static QDF_STATUS send_twt_pause_dialog_cmd_tlv(wmi_unified_t wmi_handle, + struct wmi_twt_pause_dialog_cmd_param *params) +{ + wmi_twt_pause_dialog_cmd_fixed_param *cmd; + wmi_buf_t buf; + QDF_STATUS status; + + buf = wmi_buf_alloc(wmi_handle, sizeof(*cmd)); + if (!buf) { + WMI_LOGE("Failed to allocate memory"); + return QDF_STATUS_E_FAILURE; + } + + cmd = (wmi_twt_pause_dialog_cmd_fixed_param *) wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_twt_pause_dialog_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_twt_pause_dialog_cmd_fixed_param)); + + cmd->vdev_id = params->vdev_id; + cmd->dialog_id = params->dialog_id; + + status = wmi_unified_cmd_send(wmi_handle, buf, sizeof(*cmd), + WMI_TWT_PAUSE_DIALOG_CMDID); + if (QDF_IS_STATUS_ERROR(status)) { + WMI_LOGE("Failed to send WMI_TWT_PAUSE_DIALOG_CMDID"); + wmi_buf_free(buf); + } + + return status; +} + +static QDF_STATUS send_twt_resume_dialog_cmd_tlv(wmi_unified_t wmi_handle, + struct wmi_twt_resume_dialog_cmd_param *params) +{ + wmi_twt_resume_dialog_cmd_fixed_param *cmd; + wmi_buf_t buf; + QDF_STATUS status; + + buf = wmi_buf_alloc(wmi_handle, sizeof(*cmd)); + if (!buf) { + WMI_LOGE("Failed to allocate memory"); + return QDF_STATUS_E_FAILURE; + } + + cmd = (wmi_twt_resume_dialog_cmd_fixed_param *) wmi_buf_data(buf); + WMITLV_SET_HDR(&cmd->tlv_header, + WMITLV_TAG_STRUC_wmi_twt_resume_dialog_cmd_fixed_param, + WMITLV_GET_STRUCT_TLVLEN + (wmi_twt_resume_dialog_cmd_fixed_param)); + + cmd->vdev_id = params->vdev_id; + cmd->dialog_id = params->dialog_id; + cmd->sp_offset_us = params->sp_offset_us; + + status = wmi_unified_cmd_send(wmi_handle, buf, sizeof(*cmd), + WMI_TWT_RESUME_DIALOG_CMDID); + if (QDF_IS_STATUS_ERROR(status)) { + WMI_LOGE("Failed to send WMI_TWT_RESUME_DIALOG_CMDID"); + wmi_buf_free(buf); + } + + return status; +} + +static QDF_STATUS extract_twt_enable_comp_event_tlv(wmi_unified_t wmi_handle, + uint8_t *evt_buf, + struct wmi_twt_enable_complete_event_param *params) +{ + WMI_TWT_ENABLE_COMPLETE_EVENTID_param_tlvs *param_buf; + wmi_twt_enable_complete_event_fixed_param *ev; + + param_buf = (WMI_TWT_ENABLE_COMPLETE_EVENTID_param_tlvs *)evt_buf; + if (!param_buf) { + WMI_LOGE("evt_buf is NULL"); + return QDF_STATUS_E_INVAL; + } + + ev = param_buf->fixed_param; + + params->pdev_id = + wmi_handle->ops->convert_pdev_id_target_to_host(ev->pdev_id); + params->status = ev->status; + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS extract_twt_disable_comp_event_tlv(wmi_unified_t wmi_handle, + uint8_t *evt_buf, + struct wmi_twt_disable_complete_event *params) +{ + WMI_TWT_DISABLE_COMPLETE_EVENTID_param_tlvs *param_buf; + wmi_twt_disable_complete_event_fixed_param *ev; + + param_buf = (WMI_TWT_DISABLE_COMPLETE_EVENTID_param_tlvs *)evt_buf; + if (!param_buf) { + WMI_LOGE("evt_buf is NULL"); + return QDF_STATUS_E_INVAL; + } + + ev = param_buf->fixed_param; + +#if 0 + params->pdev_id = + wmi_handle->ops->convert_pdev_id_target_to_host(ev->pdev_id); + params->status = ev->status; +#endif + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS extract_twt_add_dialog_comp_event_tlv( + wmi_unified_t wmi_handle, + uint8_t *evt_buf, + struct wmi_twt_add_dialog_complete_event_param *params) +{ + WMI_TWT_ADD_DIALOG_COMPLETE_EVENTID_param_tlvs *param_buf; + wmi_twt_add_dialog_complete_event_fixed_param *ev; + + param_buf = (WMI_TWT_ADD_DIALOG_COMPLETE_EVENTID_param_tlvs *)evt_buf; + if (!param_buf) { + WMI_LOGE("evt_buf is NULL"); + return QDF_STATUS_E_INVAL; + } + + ev = param_buf->fixed_param; + + params->vdev_id = ev->vdev_id; + params->status = ev->status; + params->dialog_id = ev->dialog_id; + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS extract_twt_del_dialog_comp_event_tlv( + wmi_unified_t wmi_handle, + uint8_t *evt_buf, + struct wmi_twt_del_dialog_complete_event_param *params) +{ + WMI_TWT_DEL_DIALOG_COMPLETE_EVENTID_param_tlvs *param_buf; + wmi_twt_del_dialog_complete_event_fixed_param *ev; + + param_buf = (WMI_TWT_DEL_DIALOG_COMPLETE_EVENTID_param_tlvs *)evt_buf; + if (!param_buf) { + WMI_LOGE("evt_buf is NULL"); + return QDF_STATUS_E_INVAL; + } + + ev = param_buf->fixed_param; + + params->vdev_id = ev->vdev_id; + params->dialog_id = ev->dialog_id; + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS extract_twt_pause_dialog_comp_event_tlv( + wmi_unified_t wmi_handle, + uint8_t *evt_buf, + struct wmi_twt_pause_dialog_complete_event_param *params) +{ + WMI_TWT_PAUSE_DIALOG_COMPLETE_EVENTID_param_tlvs *param_buf; + wmi_twt_pause_dialog_complete_event_fixed_param *ev; + + param_buf = (WMI_TWT_PAUSE_DIALOG_COMPLETE_EVENTID_param_tlvs *)evt_buf; + if (!param_buf) { + WMI_LOGE("evt_buf is NULL"); + return QDF_STATUS_E_INVAL; + } + + ev = param_buf->fixed_param; + + params->vdev_id = ev->vdev_id; + params->status = ev->status; + params->dialog_id = ev->dialog_id; + + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS extract_twt_resume_dialog_comp_event_tlv( + wmi_unified_t wmi_handle, + uint8_t *evt_buf, + struct wmi_twt_resume_dialog_complete_event_param *params) +{ + WMI_TWT_RESUME_DIALOG_COMPLETE_EVENTID_param_tlvs *param_buf; + wmi_twt_resume_dialog_complete_event_fixed_param *ev; + + param_buf = + (WMI_TWT_RESUME_DIALOG_COMPLETE_EVENTID_param_tlvs *)evt_buf; + if (!param_buf) { + WMI_LOGE("evt_buf is NULL"); + return QDF_STATUS_E_INVAL; + } + + ev = param_buf->fixed_param; + + params->vdev_id = ev->vdev_id; + params->status = ev->status; + params->dialog_id = ev->dialog_id; + + return QDF_STATUS_SUCCESS; +} + +void wmi_twt_attach_tlv(wmi_unified_t wmi_handle) +{ + struct wmi_ops *ops = wmi_handle->ops; + + ops->send_twt_enable_cmd = send_twt_enable_cmd_tlv; + ops->send_twt_disable_cmd = send_twt_disable_cmd_tlv; + ops->send_twt_add_dialog_cmd = send_twt_add_dialog_cmd_tlv; + ops->send_twt_del_dialog_cmd = send_twt_del_dialog_cmd_tlv; + ops->send_twt_pause_dialog_cmd = send_twt_pause_dialog_cmd_tlv; + ops->send_twt_resume_dialog_cmd = send_twt_resume_dialog_cmd_tlv; + ops->extract_twt_enable_comp_event = extract_twt_enable_comp_event_tlv; + ops->extract_twt_disable_comp_event = + extract_twt_disable_comp_event_tlv; + ops->extract_twt_add_dialog_comp_event = + extract_twt_add_dialog_comp_event_tlv; + ops->extract_twt_del_dialog_comp_event = + extract_twt_del_dialog_comp_event_tlv; + ops->extract_twt_pause_dialog_comp_event = + extract_twt_pause_dialog_comp_event_tlv; + ops->extract_twt_resume_dialog_comp_event = + extract_twt_resume_dialog_comp_event_tlv; +}